[
  {
    "path": ".drone.yml",
    "content": "---\nkind: pipeline\ntype: docker\nname: cargo-test\n\nenvironment:\n  RUSTC_WRAPPER: '/root/.cargo/bin/cachepot'\n  CACHEPOT_BUCKET: 'drone-sccache'\n  CACHEPOT_S3_KEY_PREFIX: ci\n  CACHEPOT_REGION: 'us-east-2'\n  CARGO_INCREMENTAL: '0'\n\n__buildenv: &buildenv\n  image: casperlabs/node-build-u1804\n  volumes:\n  - name: rustup\n    path: \"/root/.rustup\"\n  - name: cargo\n    path: \"/root/.cargo\"\n  - name: drone\n    path: \"/drone\"\n  environment:\n    AWS_ACCESS_KEY_ID:\n      from_secret: cachepot_aws_ak\n    AWS_SECRET_ACCESS_KEY:\n      from_secret: cachepot_aws_sk\n\nsteps:\n- name: setup\n  <<: *buildenv\n  commands:\n  - make setup\n\n# The below is duplicated for pull and push\n# due to environment bug with caching.\n- name: cargo-test-pr\n  <<: *buildenv\n  environment:\n    SCCACHE_S3_PUBLIC: true\n  commands:\n  - make check-std-features\n  - make check-testing-features\n  - make test CARGO_FLAGS=--release\n  - make test-contracts CARGO_FLAGS=--release\n  - cachepot --show-stats\n  when:\n    event:\n    - pull_request\n\n- name: cargo-test-push\n  <<: *buildenv\n  commands:\n  - make check-std-features\n  - make check-testing-features\n  - make test CARGO_FLAGS=--release\n  - make test-contracts CARGO_FLAGS=--release\n  - cachepot --show-stats\n  when:\n    event:\n    - push\n\n- name: notify\n  image: plugins/slack\n  settings:\n    webhook:\n      from_secret: slack_webhook\n    template:\n    - |\n      Cargo-Test Pipeline Status: *{{ uppercasefirst build.status }}*\n      Drone Build: <{{ build.link }}|#{{ build.number }}>\n      Commit Link: <https://github.com/{{repo.owner}}/{{repo.name}}/commit/{{build.commit}}|{{ truncate build.commit 10 }}>\n  when:\n    event:\n    - push\n    status:\n    - failure\n    branch:\n    - dev\n    - \"release-*\"\n    - \"feat-*\"\n\nvolumes:\n- name: rustup\n  temp: {}\n- name: cargo\n  temp: {}\n- name: drone\n  temp: {}\n\ntrigger:\n  branch:\n  - trying\n  - staging\n  - dev\n  - \"release-*\"\n  - \"feat-*\"\n  event:\n    include:\n    - pull_request\n    - push\n    exclude:\n    - tag\n    - cron\n\n---\nkind: pipeline\ntype: docker\nname: nctl-testing\n\nenvironment:\n  RUSTC_WRAPPER: '/root/.cargo/bin/cachepot'\n  CACHEPOT_BUCKET: 'drone-sccache'\n  CACHEPOT_S3_KEY_PREFIX: ci\n  CACHEPOT_REGION: 'us-east-2'\n  CARGO_INCREMENTAL: '0'\n\n__buildenv: &buildenv\n  image: casperlabs/node-build-u1804\n  volumes:\n  - name: rustup\n    path: \"/root/.rustup\"\n  - name: cargo\n    path: \"/root/.cargo\"\n  - name: drone\n    path: \"/drone\"\n  environment:\n    AWS_ACCESS_KEY_ID:\n      from_secret: cachepot_aws_ak\n    AWS_SECRET_ACCESS_KEY:\n      from_secret: cachepot_aws_sk\n\nsteps:\n- name: setup\n  <<: *buildenv\n  commands:\n  - make setup\n  # `elfx86exts` is used to determine the CPU features used by the compiled node binary.\n  - cargo install elfx86exts --version 0.5.0\n\n- name: nctl-compile\n  <<: *buildenv\n  commands:\n  - bash -c ./ci/nctl_compile.sh\n\n- name: nctl-upgrade-test\n  <<: *buildenv\n  environment:\n    AWS_ACCESS_KEY_ID:\n      from_secret: put-drone-aws-ak\n    AWS_SECRET_ACCESS_KEY:\n      from_secret: put-drone-aws-sk\n  commands:\n  - bash -c ./ci/nctl_upgrade.sh\n\n- name: check CPU features\n  <<: *buildenv\n  commands:\n  - ./ci/check_cpu_features.sh\n\nvolumes:\n- name: rustup\n  temp: {}\n- name: cargo\n  temp: {}\n- name: drone\n  temp: {}\n\ntrigger:\n  branch:\n  - trying\n  - staging\n  event:\n    include:\n    - push\n    exclude:\n    - pull_request\n    - tag\n    - cron\n\n---\nkind: pipeline\ntype: docker\nname: package\n\n__buildenv: &buildenv\n  image: casperlabs/node-build-u1804\n  volumes:\n  - name: rustup\n    path: \"/root/.rustup\"\n  - name: cargo\n    path: \"/root/.cargo\"\n  - name: drone\n    path: \"/drone\"\n  - name: nctl-temp-dir\n    path: \"/tmp/nctl_upgrade_stage\"\n\n__buildenv_upload: &buildenv_upload\n  image: casperlabs/node-build-u1804\n  volumes:\n  - name: rustup\n    path: \"/root/.rustup\"\n  - name: cargo\n    path: \"/root/.cargo\"\n  - name: drone\n    path: \"/drone\"\n  - name: nctl-temp-dir\n    path: \"/tmp/nctl_upgrade_stage\"\n  environment:\n    AWS_ACCESS_KEY_ID:\n      from_secret: put-drone-aws-ak\n    AWS_SECRET_ACCESS_KEY:\n      from_secret: put-drone-aws-sk\n\nsteps:\n- name: setup\n  <<: *buildenv\n  commands:\n  - make setup\n\n- name: build-client-contracts\n  <<: *buildenv\n  commands:\n  - make build-client-contracts\n\n- name: stest-wasm-package-push-to-s3\n  image: casperlabs/s3cmd-build:latest\n  environment:\n    AWS_ACCESS_KEY_ID:\n      from_secret: put-drone-aws-ak\n    AWS_SECRET_ACCESS_KEY:\n      from_secret: put-drone-aws-sk\n  commands:\n  - \"./build_wasm_package.sh\"\n\n- name: build-upgrade-package\n  <<: *buildenv\n  commands:\n  - \"./ci/build_update_package.sh\"\n\n- name: upload-to-s3-genesis\n  image: plugins/s3\n  settings:\n    bucket: 'genesis.casper.network'\n    region: 'us-east-2'\n    access_key:\n      from_secret: drone_genesis_key_id\n    secret_key:\n      from_secret: drone_genesis_secret\n    source: \"target/upgrade_build/**/*\"\n    strip_prefix: 'target/upgrade_build/'\n    target: \"/drone/${DRONE_COMMIT}/\"\n\n- name: nctl-s3-build\n  <<: *buildenv_upload\n  commands:\n  - \"aws s3 rm s3://nctl.casper.network/${DRONE_BRANCH} --recursive\"\n  - \"./ci/nctl_upgrade_stage.sh\"\n  when:\n    branch:\n    - dev\n    - \"release-*\"\n\n- name: nctl-bucket-upload\n  image: plugins/s3-sync:latest\n  settings:\n    bucket: 'nctl.casper.network'\n    access_key:\n      from_secret: put-drone-aws-ak\n    secret_key:\n      from_secret: put-drone-aws-sk\n    region: us-east-2\n    source: '../../tmp/nctl_upgrade_stage/'\n    target: \"/${DRONE_BRANCH}/\"\n  volumes:\n  - name: nctl-temp-dir\n    path: /tmp/nctl_upgrade_stage\n  when:\n    branch:\n    - dev\n    - \"release-*\"\n\n- name: notify\n  image: plugins/slack\n  settings:\n    webhook:\n      from_secret: slack_webhook\n    template:\n    - |\n      Package Pipeline Status: *{{ uppercasefirst build.status }}*\n      Drone Build: <{{ build.link }}|#{{ build.number }}>\n      Commit Link: <https://github.com/{{repo.owner}}/{{repo.name}}/commit/{{build.commit}}|{{ truncate build.commit 10 }}>\n  when:\n    status:\n    - failure\n\nvolumes:\n- name: rustup\n  temp: {}\n- name: cargo\n  temp: {}\n- name: drone\n  temp: {}\n- name: nctl-temp-dir\n  temp: {}\n\ntrigger:\n  branch:\n  - dev\n  - \"release-*\"\n  - \"feat-*\"\n  event:\n    include:\n    - push\n    exclude:\n    - pull_request\n    - tag\n    - cron\n\n---\nkind: pipeline\ntype: docker\nname: release-by-tag\n\n__buildenv: &buildenv\n  image: casperlabs/node-build-u1804\n  volumes:\n  - name: rustup\n    path: \"/root/.rustup\"\n  - name: cargo\n    path: \"/root/.cargo\"\n  - name: drone\n    path: \"/drone\"\n  - name: nctl-temp-dir\n    path: \"/tmp/nctl_upgrade_stage\"\n\n__buildenv_upload: &buildenv_upload\n  image: casperlabs/node-build-u1804\n  volumes:\n  - name: rustup\n    path: \"/root/.rustup\"\n  - name: cargo\n    path: \"/root/.cargo\"\n  - name: drone\n    path: \"/drone\"\n  - name: nctl-temp-dir\n    path: \"/tmp/nctl_upgrade_stage\"\n  environment:\n    AWS_ACCESS_KEY_ID:\n      from_secret: put-drone-aws-ak\n    AWS_SECRET_ACCESS_KEY:\n      from_secret: put-drone-aws-sk\n\nsteps:\n- name: setup\n  <<: *buildenv\n  commands:\n  - make setup\n\n- name: build-upgrade-package\n  <<: *buildenv\n  commands:\n  - \"./ci/build_update_package.sh\"\n\n- name: publish-github-pre-release\n  image: plugins/github-release\n  settings:\n    api_key:\n      from_secret: github_token\n    checksum:\n    - sha256\n    - md5\n    files:\n    - \"./target/upgrade_build/*/bin.tar.gz\"\n    prerelease:\n    - true\n\n- name: nctl-s3-build\n  <<: *buildenv_upload\n  commands:\n  - \"aws s3 rm s3://nctl.casper.network/${DRONE_TAG} --recursive\"\n  - \"./ci/nctl_upgrade_stage.sh\"\n\n- name: nctl-bucket-upload\n  image: plugins/s3-sync:latest\n  settings:\n    bucket: 'nctl.casper.network'\n    access_key:\n      from_secret: put-drone-aws-ak\n    secret_key:\n      from_secret: put-drone-aws-sk\n    region: us-east-2\n    source: '../../tmp/nctl_upgrade_stage/'\n    target: \"/${DRONE_TAG}/\"\n  volumes:\n  - name: nctl-temp-dir\n    path: /tmp/nctl_upgrade_stage\n\n- name: publish-crates\n  <<: *buildenv\n  environment:\n    CARGO_TOKEN:\n      from_secret: crates_io_token\n  commands:\n  - \"./ci/publish_to_crates_io.sh\"\n\n- name: as-contract-publish\n  image: plugins/npm\n  settings:\n    username:\n      from_secret: npm_user\n    token:\n      from_secret: npm_token\n    email:\n      from_secret: npm_email\n    folder:\n      - \"smart_contracts/contract_as\"\n    fail_on_version_conflict:\n      - true\n    access:\n      - \"public\"\n\n- name: notify\n  image: plugins/slack\n  settings:\n    webhook:\n      from_secret: slack_webhook\n    template:\n    - |\n      Casper-Node Release Status: *{{ uppercasefirst build.status }}*\n      Drone Build: <{{ build.link }}|#{{ build.number }}>\n      Commit Link: <https://github.com/{{repo.owner}}/{{repo.name}}/commit/{{build.commit}}|{{ truncate build.commit 10 }}>\n  when:\n    status:\n    - failure\n    - success\n\nvolumes:\n- name: rustup\n  temp: {}\n- name: cargo\n  temp: {}\n- name: drone\n  temp: {}\n- name: nctl-temp-dir\n  temp: {}\n\ntrigger:\n  ref:\n  - refs/tags/v*\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feedback.yml",
    "content": "name: Condor Release Feedback / Issue Form\ndescription: Please share your feedback or issues you face in incorporating Condor Release changes in your application/project.\ntitle: '[Condor-Release]: Specify your feedback/issue briefly'\nlabels:\n  - condor-feedback\nassignees:\n  - devendran-m\n  - piotr-dziubecki\n  - sacherjj\n  - cspramit\n  - SaiProServ\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        This is a feedback form, to consolidate Condor feedback and/or issues.\n  - type: dropdown\n    id: type\n    attributes:\n      label: Category\n      description: Please choose the category that best describes your needs.\n      options:\n        - Feedback\n        - Bug/Issue\n    validations:\n      required: true\n  - type: input\n    id: email\n    attributes:\n      label: Your email address\n      placeholder: john@doe.com\n    validations:\n      required: true\n  - type: input\n    id: project-name\n    attributes:\n      label: Integration Project Name(Optional)\n      placeholder: CasperWallet\n      description: Name of the project with Casper, if applicable\n    validations:\n      required: false\n  - type: dropdown\n    id: casper-network\n    attributes:\n      label: Casper Network\n      description: Please choose the network or environment related to the feedback, bug, or issue.\n      options:\n        - Devnet\n        - Integration-Test\n        - Testnet\n        - Mainnet        \n    validations:\n      required: true\n  - type: dropdown\n    id: node-functionality\n    attributes:\n      label: Node Functionality\n      description: Please specify the primary function of the node on the Casper Network related to the feedback or issue.\n      options:\n        - Node\n        - JSON RPC\n        - Execution Engine\n        - SSE\n        - NCTL\n        - CCTL\n        - Validator\n        - Consensus\n        - Other\n    validations:\n      required: true\n  - type: textarea\n    id: feedback-issue\n    attributes:\n      label: Description\n      placeholder: Please elaborate your feedback/ bug or issue here.\n      description: Please provide a detailed description of your feedback, bug, or issue.\n    validations:\n      required: true\n  - type: input\n    id: date-since\n    attributes:\n      label: Date Issue Began(optional)\n      placeholder: dd/mm/yyyy\n      description: When did you first notice this issue? \n    validations:\n      required: false\n  - type: textarea\n    id: attachments\n    attributes:\n      label: Attachments (optional)\n      description: Please attach any logs, screenshots, or links that may help with the analysis.\n    validations:\n      required: false"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "Please consider the following when creating a PR:\n\n* Provide a useful description of this PR, suitably verbose, aimed at helping reviewers and contributors\n* Update all relevant changelogs\n* Provide a link to the GitHub issue relating to this PR\n* Identify if any downstream impact as in to, SDKs, SmartContracts etc\n"
  },
  {
    "path": ".github/workflows/casper-node.yml",
    "content": "---\nname: casper-node\n# runs steps that are OK with normal rust based on ./rust-toolchain.toml\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n      - dev\n      - trying\n      - staging\n      - 'release-**'\n      - 'feat-**'\n    paths-ignore:\n      - '**.md'\n\n  pull_request:\n    branches:\n      - dev\n      - 'release-**'\n      - 'feat-**'\n    paths-ignore:\n      - '**.md'\n\njobs:\n  lints:\n    name: tests\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n        with:\n          persist-credentials: false\n\n      - name: setup\n        run: make setup\n\n      - name: setup ubuntu\n        run: |\n         sudo apt-get -y install wabt\n\n      - uses: Swatinem/rust-cache@v2\n\n      - name: check-format\n        run: make check-format\n\n      - name: doc\n        run: make doc\n\n      - name: lint\n        run: make lint\n\n      - name: audit\n        run: make audit\n\n      - name: check-std-features\n        run: make check-std-features\n\n      - name: check-testing-features\n        run: make check-testing-features\n\n      - name: test\n        run: make test CARGO_FLAGS=--release\n\n      - name: test-contracts\n        run: make test-contracts CARGO_FLAGS=--release\n"
  },
  {
    "path": ".github/workflows/lints-md.yml",
    "content": "---\nname: lints\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n      - dev\n      - trying\n      - staging\n      - 'release-**'\n      - 'feat-**'\n    paths:\n      - '**.md'\n\n  pull_request:\n    branches:\n      - dev\n      - 'release-**'\n      - 'feat-**'\n    paths:\n      - '**.md'\n\njobs:\n  lints:\n    name: lints\n    runs-on: ubuntu-latest\n    steps:\n      - run: 'echo \"Markdown only change, no lints required\"'\n"
  },
  {
    "path": ".github/workflows/publish-global-state-update-gen.yml",
    "content": "---\nname: publish-global-state-update-gen\npermissions:\n  contents: read\n  id-token: write\n\non:\n  push:\n    tags:\n      - \"v*\"\n\njobs:\n  publish_deb:\n    strategy:\n      matrix:\n        include:\n          - os: ubuntu-22.04\n            code_name: jammy\n#          - os: ubuntu-24.04\n#            code_name: noble\n\n    runs-on: ${{ matrix.os }}\n\n    steps:\n      - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2\n\n      - name: Configure AWS credentials\n        uses: aws-actions/configure-aws-credentials@v4\n        with:\n          role-to-assume: ${{ secrets.AWS_ACCESS_ROLE_REPO }}\n          role-session-name: GitHub_to_AWS_via_FederatedOIDC\n          aws-region: ${{ secrets.AWS_ACCESS_REGION_REPO }}\n\n      - name: Install deps\n        run: |\n          echo \"deb http://repo.aptly.info/ squeeze main\" | sudo tee -a /etc/apt/sources.list.d/aptly.list\n          wget -qO - https://www.aptly.info/pubkey.txt | sudo apt-key add -\n          sudo apt-get update\n          sudo apt-get install -y aptly=1.4.0\n          aptly config show\n\n      - name: Import GPG key\n        uses: crazy-max/ghaction-import-gpg@c8bb57c57e8df1be8c73ff3d59deab1dbc00e0d1 #v5.1.0\n        with:\n          gpg_private_key: ${{ secrets.APTLY_GPG_KEY }}\n          passphrase: ${{ secrets.APTLY_GPG_PASS }}\n\n      - name: Install cargo deb\n        run: cargo install cargo-deb\n\n      - name: Cargo build\n        run: cargo build -p global-state-update-gen --release\n\n      - name: Cargo deb\n        run: cargo deb -p global-state-update-gen --no-build --variant ${{ matrix.code_name }}\n\n      - name: Upload binaries to repo\n        env:\n          PLUGIN_REPO_NAME: ${{ secrets.AWS_BUCKET_REPO }}\n          PLUGIN_REGION: ${{ secrets.AWS_ACCESS_REGION_REPO }}\n          PLUGIN_GPG_KEY: ${{ secrets.APTLY_GPG_KEY }}\n          PLUGIN_GPG_PASS: ${{ secrets.APTLY_GPG_PASS }}\n          PLUGIN_ACL: 'private'\n          PLUGIN_PREFIX: 'releases'\n          PLUGIN_DEB_PATH: './target/debian'\n          PLUGIN_OS_CODENAME: ${{ matrix.code_name }}\n        run: ./ci/publish_deb_to_repo.sh\n\n      - name: Invalidate CloudFront cache\n        run: |\n          aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_CLOUDFRONT_REPO }} --paths \"/*\"\n"
  },
  {
    "path": ".github/workflows/publish-release-and-crates.yml",
    "content": "---\nname: publish-release-and-crates\npermissions:\n  contents: read\n  id-token: write\n\non:\n  push:\n    tags:\n      - 'v*'\n\njobs:\n  push_release_and_crates:\n    strategy:\n      matrix:\n        include:\n          - os: ubuntu-22.04\n            code_name: jammy\n\n    runs-on: ${{ matrix.os }}\n\n    steps:\n      - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2\n\n      # jq python and python toml required for build_update_package.sh\n      - name: Install deps\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y jq python3 python3-toml\n          python3 --version\n\n      - name: Install cargo deb\n        run: cargo install cargo-deb\n\n      - name: Build update package\n        run: ./ci/build_update_package.sh\n\n      - name: Publish to crates.io\n        env:\n          CARGO_TOKEN: ${{ secrets.crates_io_token }}\n        run: ./ci/publish_to_crates_io.sh\n\n      # Add config.tar.gz, bin.tar.gz to release\n      - name: Upload files to release\n        uses: svenstaro/upload-release-action@v2\n        with:\n          repo_token: ${{ secrets.GITHUB_TOKEN }}\n          file: target/upgrade_build/*.tar.gz\n          tag: ${{ github.ref }}\n          overwrite: true\n          file_glob: true"
  },
  {
    "path": ".github/workflows/push-artifacts.yml",
    "content": "---\nname: push-artifacts\npermissions:\n  contents: read\n  id-token: write\n\non:\n  push:\n    branches:\n      - dev\n      - 'feat-**'\n      - 'release-**'\n\njobs:\n  push_artifacts:\n    strategy:\n      matrix:\n        include:\n          - os: ubuntu-22.04\n            code_name: jammy\n\n    runs-on: ${{ matrix.os }}\n\n    steps:\n      - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2\n\n      # Assign AWS PROD role to get access to production cloudfronts and S3 buckets\n      - name: Configure AWS credentials\n        uses: aws-actions/configure-aws-credentials@v4\n        with:\n          role-to-assume: ${{ secrets.AWS_ACCESS_ROLE_GENESIS }}\n          role-session-name: GitHub_to_AWS_via_FederatedOIDC\n          aws-region: ${{ secrets.AWS_ACCESS_REGION_GENESIS }}\n\n      # jq python and python toml required for build_update_package.sh\n      - name: Install deps\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y jq python3 python3-toml\n          python3 --version\n\n      - name: Install cargo deb\n        run: cargo install cargo-deb\n\n      - name: Build update package\n        run: ./ci/build_update_package.sh\n\n      - name: Upload artifacts to S3\n        run: aws s3 sync ./target/upgrade_build/ s3://${{ secrets.AWS_BUCKET_GENESIS }}/artifacts/casper-node/$(git rev-parse HEAD)/\n\n      - name: Upload branch_name.latest file to S3\n        run: aws s3 sync ./target/latest/ s3://${{ secrets.AWS_BUCKET_GENESIS }}/artifacts/casper-node/\n\n      # Required in case of overwrite\n      - name: Invalidate CloudFront cache\n        run: aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_CLOUDFRONT_GENESIS }} --paths \"/artifacts/casper-node/*\"\n"
  },
  {
    "path": ".gitignore",
    "content": "target_as\n\n# Criterion puts results in wrong directories inside workspace: https://github.com/bheisler/criterion.rs/issues/192\ntarget\n\n# Created by https://www.toptal.com/developers/gitignore/api/rust,node\n# Edit at https://www.toptal.com/developers/gitignore?templates=rust,node\n\n### Node ###\n# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n\n# Diagnostic reports (https://nodejs.org/api/report.html)\nreport.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\nlib-cov\n\n# Coverage directory used by tools like istanbul\ncoverage\n*.lcov\n\n# nyc test coverage\n.nyc_output\n\n# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)\n.grunt\n\n# Bower dependency directory (https://bower.io/)\nbower_components\n\n# node-waf configuration\n.lock-wscript\n\n# Compiled binary addons (https://nodejs.org/api/addons.html)\nbuild/Release\n\n# Dependency directories\nnode_modules/\njspm_packages/\n\n# TypeScript v1 declaration files\ntypings/\n\n# TypeScript cache\n*.tsbuildinfo\n\n# Optional npm cache directory\n.npm\n\n# Optional eslint cache\n.eslintcache\n\n# Microbundle cache\n.rpt2_cache/\n.rts2_cache_cjs/\n.rts2_cache_es/\n.rts2_cache_umd/\n\n# Optional REPL history\n.node_repl_history\n\n# Output of 'npm pack'\n*.tgz\n\n# Yarn Integrity file\n.yarn-integrity\n\n# dotenv environment variables file\n.env\n.env.test\n\n# parcel-bundler cache (https://parceljs.org/)\n.cache\n\n# Next.js build output\n.next\n\n# Nuxt.js build / generate output\n.nuxt\ndist\n\n# Gatsby files\n.cache/\n# Comment in the public line in if your project uses Gatsby and not Next.js\n# https://nextjs.org/blog/next-9-1#public-directory-support\n# public\n\n# vuepress build output\n.vuepress/dist\n\n# Serverless directories\n.serverless/\n\n# FuseBox cache\n.fusebox/\n\n# DynamoDB Local files\n.dynamodb/\n\n# TernJS port file\n.tern-port\n\n# Stores VSCode versions used for testing VSCode extensions\n.vscode-test\n\n### Rust ###\n# Generated by Cargo\n# will have compiled files and executables\n# /target/\n\n# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries\n# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html\n# Cargo.lock\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\n\n# End of https://www.toptal.com/developers/gitignore/api/rust,node\n\nresources/production/*.wasm\nresources/node-storage/*\nresources/local/chainspec.toml\n\nexecution_engine_testing/test_support/resources/chainspec.toml\n\n# CLion\n.idea/\ncmake-build-debug/\n\n# vscode\n.vscode/\n.dccache\n\n# utils data dirs\n/utils/**/chain-download\n/utils/**/lmdb-data\n\n# OS X\n.DS_Store\n\n# Notes\nnotes\n\n# sw* files in vim\n.*.sw*\n\n# disk use reports\n**/disk_use_report.csv\n\n# index files for VSCode\n.lh/*\n\n*.patch\n\n# direnv-related files\n.envrc\n.direnv/\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\n\n# highway-rewards-analysis and highway-state-grapher are temporarily disabled becasue\n# they use old rewards calculation logic and need to be updated.\n\nmembers = [\n    \"ci/casper_updater\",\n    \"execution_engine\",\n    \"execution_engine_testing/test_support\",\n    \"execution_engine_testing/tests\",\n    \"node\",\n    \"smart_contracts/contract\",\n    \"smart_contracts/contracts/[!.]*/*\",\n    \"storage\",\n    \"types\",\n    \"utils/global-state-update-gen\",\n    \"utils/validation\",\n    \"binary_port\",\n    \"smart_contracts/sdk\",\n    \"smart_contracts/sdk_codegen\",\n    \"smart_contracts/sdk_sys\",\n    \"smart_contracts/macros\",\n    \"cargo_casper\",\n    # \"utils/highway-rewards-analysis\",\n    # \"utils/highway-state-grapher\",\n    \"executor/wasm_common\",\n    \"executor/wasm_interface\",\n    \"executor/wasm_host\",\n    \"executor/wasmer_backend\",\n    \"executor/wasm\",\n]\n\ndefault-members = [\n    \"ci/casper_updater\",\n    \"execution_engine\",\n    \"execution_engine_testing/test_support\",\n    \"execution_engine_testing/tests\",\n    \"node\",\n    \"storage\",\n    \"types\",\n    \"utils/global-state-update-gen\",\n    \"utils/validation\",\n    \"binary_port\",\n    \"smart_contracts/sdk\",\n    \"smart_contracts/sdk_sys\",\n    \"smart_contracts/sdk_codegen\",\n    \"smart_contracts/macros\",\n    # \"utils/highway-rewards-analysis\",\n    # \"utils/highway-state-grapher\",\n]\n\nexclude = [\"utils/nctl/remotes/casper-client-rs\"]\n\nresolver = \"2\"\n\n# Include debug symbols in the release build of `casper-engine-tests` so that `simple-transfer` will yield useful\n# perf data.\n[profile.release.package.casper-engine-tests]\ndebug = true\n\n[profile.release]\ncodegen-units = 1\nlto = true\n\n[profile.bench]\ncodegen-units = 1\nlto = true\n\n[workspace.dependencies]\nnum-derive = \"0.4.2\"\nnum-traits = \"0.2.19\"\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2021 Casper Association\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "# This supports environments where $HOME/.cargo/env has not been sourced (CI, CLion Makefile runner)\nCARGO  = $(or $(shell which cargo),  $(HOME)/.cargo/bin/cargo)\nRUSTUP = $(or $(shell which rustup), $(HOME)/.cargo/bin/rustup)\n\nPINNED_NIGHTLY := $(shell cat smart_contracts/rust-toolchain)\nPINNED_STABLE  := $(shell sed -nr 's/channel *= *\\\"(.*)\\\"/\\1/p' rust-toolchain.toml)\nWASM_STRIP_VERSION := $(shell wasm-strip --version)\n\nCARGO_OPTS := --locked\nCARGO_PINNED_NIGHTLY := $(CARGO) +$(PINNED_NIGHTLY) $(CARGO_OPTS)\nCARGO := $(CARGO) $(CARGO_OPTS)\n\nDISABLE_LOGGING = RUST_LOG=MatchesNothing\n\n# Rust Contracts\nVM2_CONTRACTS    = $(shell find ./smart_contracts/contracts/vm2 -mindepth 1 -maxdepth 1 -type d -exec basename {} \\;)\nALL_CONTRACTS    = $(shell find ./smart_contracts/contracts/[!.]* -mindepth 1 -maxdepth 1 -not -path \"./smart_contracts/contracts/vm2*\" -type d -exec basename {} \\;)\nCLIENT_CONTRACTS = $(shell find ./smart_contracts/contracts/client -mindepth 1 -maxdepth 1 -type d -exec basename {} \\;)\nCARGO_HOME_REMAP = $(if $(CARGO_HOME),$(CARGO_HOME),$(HOME)/.cargo)\nRUSTC_FLAGS      = \"--remap-path-prefix=$(CARGO_HOME_REMAP)=/home/cargo --remap-path-prefix=$$PWD=/dir\"\n\nCONTRACT_TARGET_DIR       = target/wasm32-unknown-unknown/release\n\nbuild-contract-rs/%:\n\tcd smart_contracts/contracts && RUSTFLAGS=$(RUSTC_FLAGS) $(CARGO) build --verbose --release $(filter-out --release, $(CARGO_FLAGS)) --package $*\n\nbuild-vm2-contract-rs/%:\n\tRUSTFLAGS=$(RUSTC_FLAGS) $(CARGO) run -p cargo-casper --bin cargo-casper -- build-schema --package $*\n\tcd smart_contracts/contracts/vm2 && RUSTFLAGS=$(RUSTC_FLAGS) $(CARGO) build --verbose --release $(filter-out --release, $(CARGO_FLAGS)) --package $*\n\n.PHONY: build-vm2-contracts-rs\nbuild-vm2-contracts-rs: $(patsubst %, build-vm2-contract-rs/%, $(VM2_CONTRACTS))\n\n.PHONY: build-all-contracts-rs\nbuild-all-contracts-rs: $(patsubst %, build-contract-rs/%, $(ALL_CONTRACTS))\n\n.PHONY: build-client-contracts-rs\nbuild-client-contracts-rs: $(patsubst %, build-contract-rs/%, $(CLIENT_CONTRACTS))\n\nstrip-contract/%:\n\twasm-strip $(CONTRACT_TARGET_DIR)/$(subst -,_,$*).wasm 2>/dev/null | true\n\n.PHONY: strip-all-contracts\nstrip-all-contracts: $(info Using 'wasm-strip' version $(WASM_STRIP_VERSION)) $(patsubst %, strip-contract/%, $(ALL_CONTRACTS))\n\n.PHONY: strip-client-contracts\nstrip-client-contracts: $(patsubst %, strip-contract/%, $(CLIENT_CONTRACTS))\n\n.PHONY: build-contracts-rs\nbuild-contracts-rs: build-all-contracts-rs strip-all-contracts\n\n.PHONY: build-client-contracts\nbuild-client-contracts: build-client-contracts-rs strip-client-contracts\n\n.PHONY: build-contracts\nbuild-contracts: build-contracts-rs\n\nresources/local/chainspec.toml: generate-chainspec.sh resources/local/chainspec.toml.in\n\t@./$<\n\n.PHONY: test-rs\ntest-rs: resources/local/chainspec.toml build-contracts-rs\n\t$(LEGACY) $(DISABLE_LOGGING) $(CARGO) test --all-features --no-fail-fast $(CARGO_FLAGS) -- --nocapture\n\n.PHONY: resources/local/chainspec.toml\ntest-rs-no-default-features:\n\tcd smart_contracts/contract && $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) --no-default-features --features=version-sync\n\n.PHONY: test\ntest: test-rs-no-default-features test-rs\n\n.PHONY: test-contracts-rs\ntest-contracts-rs: build-contracts-rs\n\t$(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) -p casper-engine-tests -- --ignored --skip repeated_ffi_call_should_gas_out_quickly\n\n.PHONY: test-contracts-timings\ntest-contracts-timings: build-contracts-rs\n\t$(DISABLE_LOGGING) $(CARGO) test --release $(filter-out --release, $(CARGO_FLAGS)) -p casper-engine-tests -- --ignored --test-threads=1 repeated_ffi_call_should_gas_out_quickly\n\n.PHONY: test-contracts\ntest-contracts: test-contracts-rs\n\n.PHONY: check-no-default-features\ncheck-no-default-features:\n\tcd types && $(CARGO) check --all-targets --no-default-features\n\n.PHONY: check-std-features\ncheck-std-features:\n\tcd types && $(CARGO) check --all-targets --no-default-features --features=std\n\tcd types && $(CARGO) check --all-targets --features=std\n\tcd smart_contracts/contract && $(CARGO) check --all-targets --no-default-features --features=std\n\tcd smart_contracts/contract && $(CARGO) check --all-targets --features=std\n\ncheck-std-fs-io-features:\n\tcd types && $(CARGO) check --all-targets --features=std-fs-io\n\tcd types && $(CARGO) check --lib --features=std-fs-io\n\ncheck-testing-features:\n\tcd types && $(CARGO) check --all-targets --no-default-features --features=testing\n\tcd types && $(CARGO) check --all-targets --features=testing\n\n.PHONY: check-format\ncheck-format:\n\t$(CARGO_PINNED_NIGHTLY) fmt --all -- --check\n\n.PHONY: format\nformat:\n\t$(CARGO_PINNED_NIGHTLY) fmt --all\n\nlint-contracts-rs:\n\tcd smart_contracts/contracts && $(CARGO) clippy $(patsubst %, -p %, $(ALL_CONTRACTS)) -- -D warnings -A renamed_and_removed_lints\n\n.PHONY: lint\nlint: lint-contracts-rs lint-default-features lint-all-features lint-smart-contracts lint-no-default-features\n\n.PHONY: lint-default-features\nlint-default-features:\n\t$(CARGO) clippy --all-targets -- -D warnings\n\n.PHONY: lint-no-default-features\nlint-no-default-features:\n\t$(CARGO) clippy --all-targets --no-default-features -- -D warnings\n\n.PHONY: lint-all-features\nlint-all-features:\n\t$(CARGO) clippy --all-targets --all-features -- -D warnings\n\n.PHONY: lint-smart-contracts\nlint-smart-contracts:\n\tcd smart_contracts/contract && $(CARGO) clippy --all-targets -- -D warnings -A renamed_and_removed_lints\n\n.PHONY: audit-rs\naudit-rs:\n\t$(CARGO) audit\n\n.PHONY: audit\naudit: audit-rs\n\n.PHONY: doc\ndoc:\n\tRUSTFLAGS=\"-D warnings\" RUSTDOCFLAGS=\"--cfg docsrs\" $(CARGO_PINNED_NIGHTLY) doc --all-features $(CARGO_FLAGS) --no-deps\n\tcd smart_contracts/contract && RUSTFLAGS=\"-D warnings\" RUSTDOCFLAGS=\"--cfg docsrs\" $(CARGO_PINNED_NIGHTLY) doc --all-features $(CARGO_FLAGS) --no-deps\n\n.PHONY: check-rs\ncheck: \\\n\tcheck-format \\\n\tdoc \\\n\tlint \\\n\taudit \\\n\tcheck-no-default-features \\\n\tcheck-std-features \\\n\tcheck-std-fs-io-features \\\n\tcheck-testing-features \\\n\ttest-rs \\\n\ttest-rs-no-default-features \\\n\ttest-contracts-rs\n\n.PHONY: clean\nclean:\n\trm -rf resources/local/chainspec.toml\n\t$(CARGO) clean\n\n.PHONY: build-for-packaging\nbuild-for-packaging: build-client-contracts\n\t$(LEGACY) $(CARGO) build --release\n\n.PHONY: package\npackage:\n\tcd contract && $(CARGO) package\n\n.PHONY: publish\npublish:\n\t./publish.sh\n\n.PHONY: bench\nbench: build-contracts-rs\n\t$(CARGO) bench\n\n.PHONY: setup-cargo-packagers\nsetup-cargo-packagers:\n\t$(CARGO) install cargo-deb || exit 0\n\n.PHONY: setup-rs\nsetup-rs:\n\t$(RUSTUP) update\n\t$(RUSTUP) toolchain install $(PINNED_STABLE) $(PINNED_NIGHTLY)\n\t$(RUSTUP) target add --toolchain $(PINNED_STABLE) wasm32-unknown-unknown\n\t$(RUSTUP) target add --toolchain $(PINNED_NIGHTLY) wasm32-unknown-unknown\n\t$(RUSTUP) component add --toolchain $(PINNED_NIGHTLY) rustfmt clippy-preview\n\t$(RUSTUP) component add --toolchain $(PINNED_STABLE) clippy-preview\n\t$(CARGO) install cargo-audit\n\n.PHONY: setup\nsetup: setup-rs\n"
  },
  {
    "path": "README.md",
    "content": "<a href=\"https://casper.network/\"><img src=\"images/Casper-association-logo-new.svg\" alt=\"Casper Network Logo\" width=\"300\" height=\"100\"></a>\n\n# casper-node\n\nReference node for the Casper Blockchain Protocol.\n\n## Casper Blockchain\n\nCasper is the blockchain platform purpose-built to scale opportunity for everyone. Building toward blockchain’s next frontier,\nCasper is designed for real-world applications without sacrificing usability, cost, decentralization, or security. It removes\nthe barriers that prevent mainstream blockchain adoption by making blockchain friendly to use, open to the world, and\nfuture-proof to support innovations today and tomorrow. Guided by open-source principles and built from the ground up to\nempower individuals, the team seeks to provide an equitable foundation made for long-lasting impact. Read more about our\nmission at: https://casper.network\n\nThe Casper MainNet is live.\n- [cspr.live Block Explorer](https://cspr.live)\n\n### Specification\n\n- [Platform Specification](https://docs.casper.network/design)\n- [Highway Consensus Proofs](https://github.com/casper-network/highway/releases/latest)\n- [Zug Consensus Whitepaper](http://arxiv.org/pdf/2205.06314)\n\n### Get Started with Smart Contracts\n- [Writing Smart Contracts](https://docs.casper.network/developers/)\n- [Rust Smart Contract SDK](https://crates.io/crates/cargo-casper)\n- [Rust Smart Contract API Docs](https://docs.rs/casper-contract/latest/casper_contract/contract_api/index.html)\n- [AssemblyScript Smart Contract API](https://www.npmjs.com/package/casper-contract)\n\n### Community\n\n- [Discord Server](https://discord.gg/caspernetwork)\n- [Telegram Channel](https://t.me/casperofficialann)\n- [X (Twitter)](https://x.com/Casper_Network)\n\n\n\n## Running a casper-node from source\n\n### Pre-Requisites for Building\n\n* CMake 3.1.4 or greater\n* [Rust](https://www.rust-lang.org/tools/install)\n* libssl-dev\n* pkg-config\n* gcc\n* g++\n* recommended [wasm-strip](https://github.com/WebAssembly/wabt) (used to reduce the size of compiled Wasm)\n\n```sh\n# Ubuntu prerequisites setup example\napt update\napt install cmake libssl-dev pkg-config gcc g++ -y\n# the '-s -- -y' part ensures silent mode. Omit if you want to customize\ncurl https://sh.rustup.rs -sSf | sh -s -- -y\n```\n\n\n### Setup\n\nBefore building a node, prepare your Rust build environment:\n\n```\nmake setup-rs\n```\n\nThe node software can be compiled afterwards:\n\n```\ncargo build -p casper-node --release\n```\n\nThe result will be a `casper-node` binary found in `target/release`.  Copy this somewhere into your\nPATH, or substitute `target/release/casper-node` for `casper-node` in all examples below.\n\n### Running one node\n\nTo run a validator node you will need to specify a config file and launch the validator subcommand, for example\n\n```\ncasper-node validator /etc/casper-node/config.toml\n```\n\nThe node ships with an [example configuration file](resources/local/config.toml) that should be setup first.  There is\nalso a template for a local [chainspec](resources/local/chainspec.toml.in) in the same folder.\n\nFor launching, the following configuration values must be properly set:\n\n| Setting                   | Description |\n| :-------------------------| :---------- |\n| `network.known_addresses` | Must refer to public listening addresses of one or more currently-running nodes.  If the node cannot connect to any of these addresses, it will panic.  The node _can_ be run with this referring to its own address, but it will be equivalent to specifying an empty list for `known_addresses` - i.e. the node will run and listen, but will be reliant on other nodes connecting to it in order to join the network.  This would be normal for the very first node of a network, but all subsequent nodes should normally specify that first  node's public listening address as their `known_addresses`. |\n\n__The node will not run properly without another node to connect to.  It is recommended that multiple nodes are run.__\n\n### Running multiple nodes on one machine\n\nThere is a [tool](https://github.com/casper-network/casper-nctl) which automates the process of running multiple nodes on a single machine.\n\nNote that running multiple nodes on a single machine is normally only recommended for test purposes.\n\n## Configuration\n\nIn general nodes are configured through a configuration file, typically named `config.toml`.  This\nfile may reference other files or locations through relative paths.  When it does, note that all\npaths that are not absolute will be resolved relative to `config.toml` directory.\n\n\n### Environment overrides\n\nSome environments may call for overriding options through the environment.  In this\nscenario, the `NODE_CONFIG` environment variable can be used. For example:\nalternatively expressed as\n\n```\nexport NODE_CONFIG=consensus.secret_key_path=secret_keys/node-1.pem;network.known_addresses=[1.2.3.4:34553, 200.201.203.204:34553]\ncasper-node validator /etc/casper-node/config.toml\n```\n\nNote how the semicolon is used to separate configuration overrides here.\n\n### Other environment variables\n\nTo set the threshold at which a warn-level log message is generated for a long-running reactor event, use the env var\n`CL_EVENT_MAX_MICROSECS`.  For example, to set the threshold to 1 millisecond:\n\n```\nCL_EVENT_MAX_MICROSECS=1000\n```\n\nTo set the threshold above which the size of the current scheduler queues will be dumped to logs, use the `CL_EVENT_QUEUE_DUMP_THRESHOLD` variable. For example, to set the threshold to 10000 events:\n\n```\nCL_EVENT_QUEUE_DUMP_THRESHOLD=10000\n```\n\nThis will dump a line to the log if the total number of events in queues exceeds 10000. After each dump, the threshold will be automatically increased by 10% to avoid log flooding.\n\nExample log entry:\n```\nCurrent event queue size (11000) is above the threshold (10000): details [(\"FinalitySignature\", 3000), (\"FromStorage\", 1000), (\"NetworkIncoming\", 6500), (\"Regular\", 500)]\n```\n\n## Logging\n\nLogging can be enabled by setting the environment variable `RUST_LOG`.  This can be set to one of the following levels,\nfrom lowest priority to highest: `trace`, `debug`, `info`, `warn`, `error`:\n\n```\nRUST_LOG=info cargo run --release -- validator resources/local/config.toml\n```\n\nIf the environment variable is unset, it is equivalent to setting `RUST_LOG=error`.\n\n### Log message format\n\nA typical log message will look like:\n\n```\nJun 09 01:40:17.315 INFO  [casper_node::components::rpc_server rpc_server.rs:127] starting HTTP server; server_addr=127.0.0.1:7777\n```\n\nThis is comprised of the following parts:\n* timestamp\n* log level\n* full module path (not to be confused with filesystem path) of the source of the message\n* filename and line number of the source of the message\n* message\n\n### Filtering log messages\n\n`RUST_LOG` can be set to enable varying levels for different modules.  Simply set it to a comma-separated list of\n`module-path=level`, where the module path is as shown above in the typical log message, with the end truncated to suit.\n\nFor example, to enable `trace` level logging for the `network` module in `components`, `info` level for all other\nmodules in `components`, and `warn` level for the remaining codebase:\n\n```\nRUST_LOG=casper_node::components::network=trace,casper_node::comp=info,warn\n```\n\n### Logging network messages and tracing events\n\nSpecial logging targets exist in `net_in` and `net_out` which can be used to log every single network message leaving or\nentering a node when set to trace level:\n\n```\nRUST_LOG=net_in::TRACE,net_out::TRACE\n```\n\nAll messages in these logs are also assigned a unique ID that is different even if the same message is sent to multiple\nnodes. The receiving node will log them using the same ID as the sender, thus enabling the tracing of a message across\nmultiple nodes provided all logs are available.\n\nAnother helpful logging feature is ancestor logging. If the target `dispatch` is set to at least debug level, events\nbeing dispatched will be logged as well. Any event has an id (`ev`) and may have an ancestor (`a`), which is the previous\nevent whose effects caused the resulting event to be scheduled. As an example, if an incoming network message gets\nassigned an ID of `ev=123`, the first round of subsequent events will show `a=123` as their ancestor in the logs.\n\n### Changing the logging filter at runtime\n\nIf necessary, the filter of a running node can be changed using the diagnostics port, using the `set-log-filter`\ncommand. See the \"Diagnostics port\" section for details on how to access it.\n\n## Debugging\n\nSome additional debug functionality is available, mainly allowed for inspections of the internal event queue.\n\n### Diagnostics port\n\nIf the configuration option `diagnostics_port.enabled` is set to `true`, a unix socket named `debug.socket` by default can be found next to the configuration while the node is running.\n\n#### Interactive use\n\nThe `debug.socket` can be connected to by tools like `socat` for interactive use:\n\n```sh\nsocat readline unix:/path/to/debug.socket\n```\n\nEntering `help` will show available commands. The `set` command allows configuring the current connection, see `set --help`.\n\n#### Example: Collecting a consensus dump\n\nAfter connecting using `socat` (see above), we set the output format to JSON:\n\n```\nset --output=json\n```\n\nA confirmation will acknowledge the settings change (unless `--quiet=true` is set):\n\n```\n{\n  \"Success\": {\n    \"msg\": \"session unchanged\"\n  }\n}\n```\n\nWe can now call `dump-consensus` to get the _latest_ era serialized in JSON format:\n\n```\ndump-consensus\n{\n  \"Success\": {\n    \"msg\": \"dumping consensus state\"\n  }\n}\n{\"id\":8,\"start_time\":\"2022-03-01T14:54:42.176Z\",\"start_height\":88,\"new_faulty\" ...\n```\n\nAn era other than the latest can be dumped by specifying as a parameter, _e.g._ `dump-consensus 3` will dump the third era. See `dump-consensus --help` for details.\n\n#### Example: Dumping the event queue\n\nWith the connection set to JSON output (see previous example), we can also dump the event queues:\n\n```\ndump-queues\n{\n  \"Success\": {\n    \"msg\": \"dumping queues\"\n  }\n}\n{\"queues\":{\"Regular\":[],\"Api\":[],\"Network\":[],\"Control\":[],\"NetworkIncoming\":[]\n}}{\"queues\":{\"Api\":[],\"Regular\":[],\"Control\":[],\"NetworkIncoming\":[],\"Network\":\n[]}}{\"queues\":{\"Network\":[],\"Control\":[],\"Api\":[],\"NetworkIncoming\":[],\"Regular\n\":[]}}\n```\n\nEmpty output will be produced on a node that is working without external pressure, as the queues will be empty most of the time.\n\n\n#### Non-interactive use\n\nThe diagnostics port can also be scripted by sending a newline-terminated list of commands through `socat`. For example, the following sequence of commands will collect a consensus dump without the success-indicating header:\n\n```\nset -o json -q true\ndump-consensus\n```\n\nFor ad-hoc dumps, this can be shortened and piped into `socat`:\n\n```sh\necho -e 'set -o json -q true\\ndump-consensus' | socat - unix-client:debug.socket > consensus-dump.json\n```\n\nThis results in the latest era being dumped into `consensus-dump.json`.\n\n\n## Running a client\n\nSee [the client README](https://github.com/casper-ecosystem/casper-client-rs#readme).\n\n## Running a local network\n\nSee [the nctl utility README](https://github.com/casper-network/casper-nctl#readme).\n\n## Running on an existing network\n\nTo support upgrades with a network, the casper-node is installed using scripts distributed with the\n[casper-node-launcher](https://github.com/casper-network/casper-node-launcher).\n"
  },
  {
    "path": "binary_port/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.  The format is based on [Keep a Changelog].\n\n[comment]: <> (Added:      new features)\n[comment]: <> (Changed:    changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed:    now removed features)\n[comment]: <> (Fixed:      any bug fixes)\n[comment]: <> (Security:   in case of vulnerabilities)\n\n## [Unreleased]\n\n### Added\n* `ErrorCode` has a new code `117`\n\n## [1.0.0] - \n\n### Added\n* Initial release of node for Casper mainnet."
  },
  {
    "path": "binary_port/Cargo.toml",
    "content": "[package]\nname = \"casper-binary-port\"\nversion = \"1.1.1\"\nedition = \"2018\"\ndescription = \"Types for the casper node binary port\"\ndocumentation = \"https://docs.rs/casper-binary-port\"\nreadme = \"README.md\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/master/binary_port\"\nlicense = \"Apache-2.0\"\nexclude = [\"proptest-regressions\"]\n\n[dependencies]\nbincode = \"1.3.3\"\nbytes = \"1.0.1\"\ncasper-types = { version = \"7.0.0\", path = \"../types\", features = [\"datasize\", \"json-schema\", \"std\"] }\nnum-derive = { workspace = true }\nnum-traits = { workspace = true }\nonce_cell = { version = \"1.5.2\" }\nrand = \"0.8.3\"\nserde = { version = \"1.0.183\", features = [\"derive\"] }\nstrum = \"0.27\"\nstrum_macros = \"0.27\"\nthiserror = \"1.0.45\"\ntokio-util = { version = \"0.6.4\", features = [\"codec\"] }\ntracing = \"0.1.18\"\n\n[dev-dependencies]\ncasper-types = { path = \"../types\", features = [\"datasize\", \"json-schema\", \"std\", \"testing\"] }\nserde_json = \"1\"\nserde_test = \"1\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustc-args = [\"--cfg\", \"docsrs\"]\n\n[features]\ntesting = [\"rand/default\"]\n"
  },
  {
    "path": "binary_port/README.md",
    "content": "# `casper-binary-port`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-hashing)](https://crates.io/crates/casper-binary-port)\n[![Documentation](https://docs.rs/casper-hashing/badge.svg)](https://docs.rs/casper-binary-port)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nTypes for the binary port on a casper network node.\n\n[Node Operator Guide](https://docs.casper.network/operators/)\n\n## License\n\nLicensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE).\n"
  },
  {
    "path": "binary_port/src/balance_response.rs",
    "content": "use std::collections::BTreeMap;\n#[cfg(test)]\nuse std::{collections::VecDeque, iter::FromIterator};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    global_state::TrieMerkleProof,\n    system::mint::BalanceHoldAddrTag,\n    BlockTime, Key, StoredValue, U512,\n};\n#[cfg(test)]\nuse casper_types::{global_state::TrieMerkleProofStep, CLValue};\n#[cfg(test)]\nuse rand::Rng;\n\n/// Response to a balance query.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BalanceResponse {\n    /// The purses total balance, not considering holds.\n    pub total_balance: U512,\n    /// The available balance (total balance - sum of all active holds).\n    pub available_balance: U512,\n    /// A proof that the given value is present in the Merkle trie.\n    pub total_balance_proof: Box<TrieMerkleProof<Key, StoredValue>>,\n    /// Any time-relevant active holds on the balance.\n    pub balance_holds: BTreeMap<BlockTime, BalanceHoldsWithProof>,\n}\n\nimpl BalanceResponse {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        BalanceResponse {\n            total_balance: rng.gen(),\n            available_balance: rng.gen(),\n            total_balance_proof: Box::new(TrieMerkleProof::new(\n                Key::URef(rng.gen()),\n                StoredValue::CLValue(CLValue::from_t(rng.gen::<i32>()).unwrap()),\n                VecDeque::from_iter([TrieMerkleProofStep::random(rng)]),\n            )),\n            balance_holds: BTreeMap::new(),\n        }\n    }\n}\n\nimpl ToBytes for BalanceResponse {\n    fn to_bytes(&self) -> Result<Vec<u8>, casper_types::bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), casper_types::bytesrepr::Error> {\n        self.total_balance.write_bytes(writer)?;\n        self.available_balance.write_bytes(writer)?;\n        self.total_balance_proof.write_bytes(writer)?;\n        self.balance_holds.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.total_balance.serialized_length()\n            + self.available_balance.serialized_length()\n            + self.total_balance_proof.serialized_length()\n            + self.balance_holds.serialized_length()\n    }\n}\n\nimpl FromBytes for BalanceResponse {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> {\n        let (total_balance, remainder) = U512::from_bytes(bytes)?;\n        let (available_balance, remainder) = U512::from_bytes(remainder)?;\n        let (total_balance_proof, remainder) =\n            TrieMerkleProof::<Key, StoredValue>::from_bytes(remainder)?;\n        let (balance_holds, remainder) =\n            BTreeMap::<BlockTime, BalanceHoldsWithProof>::from_bytes(remainder)?;\n        Ok((\n            BalanceResponse {\n                total_balance,\n                available_balance,\n                total_balance_proof: Box::new(total_balance_proof),\n                balance_holds,\n            },\n            remainder,\n        ))\n    }\n}\n\n/// Balance holds with Merkle proofs.\npub type BalanceHoldsWithProof =\n    BTreeMap<BalanceHoldAddrTag, (U512, TrieMerkleProof<Key, StoredValue>)>;\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BalanceResponse::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/binary_message.rs",
    "content": "#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\nuse bytes::{Buf, Bytes};\nuse tokio_util::codec::{self};\n\nuse crate::error::Error;\n\ntype LengthEncoding = u32;\nconst LENGTH_ENCODING_SIZE_BYTES: usize = size_of::<LengthEncoding>();\n\n#[derive(Clone, PartialEq, Debug)]\npub struct BinaryMessage(Bytes);\n\nimpl BinaryMessage {\n    pub fn new(payload: Vec<u8>) -> Self {\n        Self(payload.into())\n    }\n\n    pub fn payload(&self) -> &[u8] {\n        &self.0\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        let len = rng.gen_range(1..=1024);\n        let payload = std::iter::repeat_with(|| rng.gen()).take(len).collect();\n        BinaryMessage(payload)\n    }\n}\n\n#[derive(Clone, Copy)]\npub struct BinaryMessageCodec {\n    max_message_size_bytes: u32,\n}\n\nimpl BinaryMessageCodec {\n    pub fn new(max_message_size_bytes: u32) -> Self {\n        Self {\n            max_message_size_bytes,\n        }\n    }\n\n    pub fn max_message_size_bytes(&self) -> u32 {\n        self.max_message_size_bytes\n    }\n}\n\nimpl codec::Encoder<BinaryMessage> for BinaryMessageCodec {\n    type Error = Error;\n\n    fn encode(\n        &mut self,\n        item: BinaryMessage,\n        dst: &mut bytes::BytesMut,\n    ) -> Result<(), Self::Error> {\n        let length = item.0.len() as LengthEncoding;\n        if length > self.max_message_size_bytes {\n            return Err(Error::RequestTooLarge {\n                allowed: self.max_message_size_bytes,\n                got: length,\n            });\n        }\n        let length_bytes = length.to_le_bytes();\n        dst.extend(length_bytes.iter().chain(item.0.iter()));\n        Ok(())\n    }\n}\n\nimpl codec::Decoder for BinaryMessageCodec {\n    type Item = BinaryMessage;\n\n    type Error = Error;\n\n    fn decode(&mut self, src: &mut bytes::BytesMut) -> Result<Option<Self::Item>, Self::Error> {\n        let (length, have_full_frame) = if let [b1, b2, b3, b4, remainder @ ..] = &src[..] {\n            let length = LengthEncoding::from_le_bytes([*b1, *b2, *b3, *b4]) as usize;\n            if length == 0 {\n                return Err(Error::EmptyRequest);\n            }\n            let remainder_length = remainder.len();\n            (length, remainder_length >= length)\n        } else {\n            // Not enough bytes to read the length.\n            return Ok(None);\n        };\n\n        if length > self.max_message_size_bytes as usize {\n            return Err(Error::RequestTooLarge {\n                allowed: self.max_message_size_bytes,\n                got: length as u32,\n            });\n        }\n\n        if !have_full_frame {\n            // Not enough bytes to read the whole message.\n            return Ok(None);\n        };\n\n        src.advance(LENGTH_ENCODING_SIZE_BYTES);\n        Ok(Some(BinaryMessage(src.split_to(length).freeze())))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::testing::TestRng;\n    use rand::Rng;\n    use tokio_util::codec::{Decoder, Encoder};\n\n    use crate::{\n        binary_message::{LengthEncoding, LENGTH_ENCODING_SIZE_BYTES},\n        error::Error,\n        BinaryMessage, BinaryMessageCodec,\n    };\n\n    const MAX_MESSAGE_SIZE_BYTES: u32 = 1024 * 1024;\n\n    #[test]\n    fn binary_message_codec() {\n        let rng = &mut TestRng::new();\n        let val = BinaryMessage::random(rng);\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        codec\n            .encode(val.clone(), &mut bytes)\n            .expect(\"should encode\");\n\n        let decoded = codec\n            .decode(&mut bytes)\n            .expect(\"should decode\")\n            .expect(\"should be Some\");\n\n        assert_eq!(val, decoded);\n    }\n\n    #[test]\n    fn should_not_decode_when_not_enough_bytes_to_decode_length() {\n        let rng = &mut TestRng::new();\n        let val = BinaryMessage::random(rng);\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        codec.encode(val, &mut bytes).expect(\"should encode\");\n\n        let _ = bytes.split_off(LENGTH_ENCODING_SIZE_BYTES / 2);\n        let in_bytes = bytes.clone();\n        assert!(codec.decode(&mut bytes).expect(\"should decode\").is_none());\n\n        // Ensure that the bytes are not consumed.\n        assert_eq!(in_bytes, bytes);\n    }\n\n    #[test]\n    fn should_not_decode_when_not_enough_bytes_to_decode_full_frame() {\n        let rng = &mut TestRng::new();\n        let val = BinaryMessage::random(rng);\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        codec.encode(val, &mut bytes).expect(\"should encode\");\n\n        let _ = bytes.split_off(bytes.len() - 1);\n        let in_bytes = bytes.clone();\n        assert!(codec.decode(&mut bytes).expect(\"should decode\").is_none());\n\n        // Ensure that the bytes are not consumed.\n        assert_eq!(in_bytes, bytes);\n    }\n\n    #[test]\n    fn should_leave_remainder_in_buffer() {\n        let rng = &mut TestRng::new();\n        let val = BinaryMessage::random(rng);\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        codec.encode(val, &mut bytes).expect(\"should encode\");\n        let suffix = bytes::Bytes::from_static(b\"suffix\");\n        bytes.extend(&suffix);\n\n        let _ = codec.decode(&mut bytes);\n        // Ensure that the bytes are not consumed.\n        assert_eq!(bytes, suffix);\n    }\n\n    #[test]\n    fn encode_should_bail_on_too_large_request() {\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let too_large = MAX_MESSAGE_SIZE_BYTES as usize + 1;\n        let val = BinaryMessage::new(vec![0; too_large]);\n        let mut bytes = bytes::BytesMut::new();\n        let result = codec.encode(val, &mut bytes).unwrap_err();\n\n        assert!(matches!(result, Error::RequestTooLarge { allowed, got }\n                 if allowed == codec.max_message_size_bytes && got == too_large as u32));\n    }\n\n    #[test]\n    fn should_encode_request_of_maximum_size() {\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let just_right_size = MAX_MESSAGE_SIZE_BYTES as usize;\n        let val = BinaryMessage::new(vec![0; just_right_size]);\n        let mut bytes = bytes::BytesMut::new();\n\n        let result = codec.encode(val, &mut bytes);\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn decode_should_bail_on_too_large_request() {\n        let rng = &mut TestRng::new();\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        let too_large = (codec.max_message_size_bytes + 1) as LengthEncoding;\n        bytes.extend(too_large.to_le_bytes());\n        bytes.extend(std::iter::repeat_with(|| rng.gen::<u8>()).take(too_large as usize));\n\n        let result = codec.decode(&mut bytes).unwrap_err();\n        assert!(matches!(result, Error::RequestTooLarge { allowed, got }\n                 if allowed == codec.max_message_size_bytes && got == too_large));\n    }\n\n    #[test]\n    fn should_decode_request_of_maximum_size() {\n        let rng = &mut TestRng::new();\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        let just_right_size = (codec.max_message_size_bytes) as LengthEncoding;\n        bytes.extend(just_right_size.to_le_bytes());\n        bytes.extend(std::iter::repeat_with(|| rng.gen::<u8>()).take(just_right_size as usize));\n\n        let result = codec.decode(&mut bytes);\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn should_bail_on_empty_request() {\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        let empty = 0 as LengthEncoding;\n        bytes.extend(&empty.to_le_bytes());\n\n        let result = codec.decode(&mut bytes).unwrap_err();\n        assert!(matches!(result, Error::EmptyRequest));\n    }\n\n    #[test]\n    fn should_decoded_queued_messages() {\n        let rng = &mut TestRng::new();\n        let count = rng.gen_range(10000..20000);\n        let messages = (0..count)\n            .map(|_| BinaryMessage::random(rng))\n            .collect::<Vec<_>>();\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES);\n        let mut bytes = bytes::BytesMut::new();\n        for msg in &messages {\n            codec\n                .encode(msg.clone(), &mut bytes)\n                .expect(\"should encode\");\n        }\n\n        let mut decoded_messages = vec![];\n        loop {\n            let maybe_message = codec.decode(&mut bytes).expect(\"should decode\");\n            match maybe_message {\n                Some(message) => decoded_messages.push(message),\n                None => break,\n            }\n        }\n        assert_eq!(messages, decoded_messages);\n    }\n\n    #[test]\n    fn should_not_decode_when_read_bytes_extend_max() {\n        const MAX_MESSAGE_BYTES: usize = 1000;\n        let rng = &mut TestRng::new();\n        let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_BYTES as u32);\n        let mut bytes = bytes::BytesMut::new();\n        let some_length = (MAX_MESSAGE_BYTES * 2_usize) as LengthEncoding; //This value doesn't match the\n                                                                           // length of mock_bytes intentionally so we can be sure at what point did the encoder bail -\n                                                                           // we want to ensure that the encoder doesn't read the whole message before it bails\n        bytes.extend(&some_length.to_le_bytes());\n        bytes.extend(std::iter::repeat_with(|| rng.gen::<u8>()).take(MAX_MESSAGE_BYTES * 3));\n\n        let message_res = codec.decode(&mut bytes);\n        assert!(message_res.is_err());\n        let err = message_res.err().unwrap();\n        assert!(matches!(\n            err,\n            Error::RequestTooLarge { allowed, got}\n            if allowed == MAX_MESSAGE_BYTES as u32 && got == MAX_MESSAGE_BYTES as u32 * 2,\n        ))\n    }\n}\n"
  },
  {
    "path": "binary_port/src/binary_response.rs",
    "content": "use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n\nuse crate::{\n    binary_response_header::BinaryResponseHeader,\n    error_code::ErrorCode,\n    response_type::{PayloadEntity, ResponseType},\n};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n\n/// The response used in the binary port protocol.\n#[derive(Debug, PartialEq)]\npub struct BinaryResponse {\n    /// Header of the binary response.\n    header: BinaryResponseHeader,\n    /// The response.\n    payload: Vec<u8>,\n}\n\nimpl BinaryResponse {\n    /// Creates new empty binary response.\n    pub fn new_empty() -> Self {\n        Self {\n            header: BinaryResponseHeader::new(None),\n            payload: vec![],\n        }\n    }\n\n    /// Creates new binary response with error code.\n    pub fn new_error(error: ErrorCode) -> Self {\n        BinaryResponse {\n            header: BinaryResponseHeader::new_error(error),\n            payload: vec![],\n        }\n    }\n\n    /// Creates new binary response from raw bytes.\n    pub fn from_raw_bytes(payload_type: ResponseType, payload: Vec<u8>) -> Self {\n        BinaryResponse {\n            header: BinaryResponseHeader::new(Some(payload_type)),\n            payload,\n        }\n    }\n\n    /// Creates a new binary response from a value.\n    pub fn from_value<V>(val: V) -> Self\n    where\n        V: ToBytes + PayloadEntity,\n    {\n        ToBytes::to_bytes(&val).map_or(\n            BinaryResponse::new_error(ErrorCode::InternalError),\n            |payload| BinaryResponse {\n                payload,\n                header: BinaryResponseHeader::new(Some(V::RESPONSE_TYPE)),\n            },\n        )\n    }\n\n    /// Creates a new binary response from an optional value.\n    pub fn from_option<V>(opt: Option<V>) -> Self\n    where\n        V: ToBytes + PayloadEntity,\n    {\n        match opt {\n            Some(val) => Self::from_value(val),\n            None => Self::new_empty(),\n        }\n    }\n\n    /// Returns true if response is success.\n    pub fn is_success(&self) -> bool {\n        self.header.is_success()\n    }\n\n    /// Returns the error code.\n    pub fn error_code(&self) -> u16 {\n        self.header.error_code()\n    }\n\n    /// Returns the payload type of the response.\n    pub fn returned_data_type_tag(&self) -> Option<u8> {\n        self.header.returned_data_type_tag()\n    }\n\n    /// Returns true if the response means that data has not been found.\n    pub fn is_not_found(&self) -> bool {\n        self.header.is_not_found()\n    }\n\n    /// Returns the payload.\n    pub fn payload(&self) -> &[u8] {\n        self.payload.as_ref()\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self {\n            header: BinaryResponseHeader::random(rng),\n            payload: rng.random_vec(64..128),\n        }\n    }\n}\n\nimpl ToBytes for BinaryResponse {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let BinaryResponse { header, payload } = self;\n\n        header.write_bytes(writer)?;\n        payload.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.header.serialized_length() + self.payload.serialized_length()\n    }\n}\n\nimpl FromBytes for BinaryResponse {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (header, remainder) = FromBytes::from_bytes(bytes)?;\n        let (payload, remainder) = Bytes::from_bytes(remainder)?;\n\n        Ok((\n            BinaryResponse {\n                header,\n                payload: payload.into(),\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BinaryResponse::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/binary_response_and_request.rs",
    "content": "use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n\nuse crate::{binary_response::BinaryResponse, response_type::PayloadEntity, ResponseType};\n\nuse crate::record_id::RecordId;\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n\n/// The binary response along with the original binary request attached.\n#[derive(Debug, PartialEq)]\npub struct BinaryResponseAndRequest {\n    /// Context of the original request.\n    request: Bytes,\n    /// The response.\n    response: BinaryResponse,\n}\n\nimpl BinaryResponseAndRequest {\n    /// Creates new binary response with the original request attached.\n    pub fn new(data: BinaryResponse, request: Bytes) -> Self {\n        Self {\n            request,\n            response: data,\n        }\n    }\n\n    /// Returns a new binary response with specified data and no original request.\n    pub fn new_test_response<A: PayloadEntity + ToBytes>(\n        record_id: RecordId,\n        data: &A,\n    ) -> BinaryResponseAndRequest {\n        let response = BinaryResponse::from_raw_bytes(\n            ResponseType::from_record_id(record_id, false),\n            data.to_bytes().unwrap(),\n        );\n        Self::new(response, Bytes::from(vec![]))\n    }\n\n    /// Returns a new binary response with specified legacy data and no original request.\n    pub fn new_legacy_test_response<A: PayloadEntity + serde::Serialize>(\n        record_id: RecordId,\n        data: &A,\n    ) -> BinaryResponseAndRequest {\n        let response = BinaryResponse::from_raw_bytes(\n            ResponseType::from_record_id(record_id, true),\n            bincode::serialize(data).unwrap(),\n        );\n        Self::new(response, Bytes::from(vec![]))\n    }\n\n    /// Returns true if response is success.\n    pub fn is_success(&self) -> bool {\n        self.response.is_success()\n    }\n\n    /// Returns the error code.\n    pub fn error_code(&self) -> u16 {\n        self.response.error_code()\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        let bytes = vec![1; 155];\n        Self {\n            request: Bytes::from(bytes),\n            response: BinaryResponse::random(rng),\n        }\n    }\n\n    /// Returns serialized bytes representing the original request.\n    pub fn request(&self) -> &[u8] {\n        &self.request\n    }\n\n    /// Returns the inner binary response.\n    pub fn response(&self) -> &BinaryResponse {\n        &self.response\n    }\n}\n\nimpl ToBytes for BinaryResponseAndRequest {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let BinaryResponseAndRequest { request, response } = self;\n        request.write_bytes(writer)?;\n        response.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.request.serialized_length() + self.response.serialized_length()\n    }\n}\n\nimpl FromBytes for BinaryResponseAndRequest {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (request, remainder) = FromBytes::from_bytes(bytes)?;\n        let (response, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((BinaryResponseAndRequest { request, response }, remainder))\n    }\n}\n\nimpl From<BinaryResponseAndRequest> for BinaryResponse {\n    fn from(response_and_request: BinaryResponseAndRequest) -> Self {\n        let BinaryResponseAndRequest { response, .. } = response_and_request;\n        response\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn roundtrip() {\n        let rng = &mut TestRng::new();\n        let bytes = vec![1; 155];\n        let response = BinaryResponse::random(rng);\n        let val = BinaryResponseAndRequest::new(response, Bytes::from(bytes));\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BinaryResponseAndRequest::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/binary_response_header.rs",
    "content": "use crate::{error_code::ErrorCode, response_type::ResponseType};\nuse casper_types::bytesrepr::{self, FromBytes, ToBytes};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\n/// Header of the binary response.\n#[derive(Debug, PartialEq)]\npub struct BinaryResponseHeader {\n    binary_response_version: u16,\n    error: u16,\n    returned_data_type_tag: Option<u8>,\n}\n\nimpl BinaryResponseHeader {\n    pub const BINARY_RESPONSE_VERSION: u16 = 1;\n    /// Creates new binary response header representing success.\n    pub fn new(returned_data_type: Option<ResponseType>) -> Self {\n        Self {\n            binary_response_version: Self::BINARY_RESPONSE_VERSION,\n            error: ErrorCode::NoError as u16,\n            returned_data_type_tag: returned_data_type.map(|ty| ty as u8),\n        }\n    }\n\n    /// Creates new binary response header representing error.\n    pub fn new_error(error: ErrorCode) -> Self {\n        Self {\n            binary_response_version: Self::BINARY_RESPONSE_VERSION,\n            error: error as u16,\n            returned_data_type_tag: None,\n        }\n    }\n\n    /// Returns the type of the returned data.\n    pub fn returned_data_type_tag(&self) -> Option<u8> {\n        self.returned_data_type_tag\n    }\n\n    /// Returns the error code.\n    pub fn error_code(&self) -> u16 {\n        self.error\n    }\n\n    /// Returns true if the response represents success.\n    pub fn is_success(&self) -> bool {\n        self.error == ErrorCode::NoError as u16\n    }\n\n    /// Returns true if the response indicates the data was not found.\n    pub fn is_not_found(&self) -> bool {\n        self.error == ErrorCode::NotFound as u16\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        let error = rng.gen();\n        let returned_data_type_tag = if rng.gen() { None } else { Some(rng.gen()) };\n\n        BinaryResponseHeader {\n            binary_response_version: Self::BINARY_RESPONSE_VERSION,\n            error,\n            returned_data_type_tag,\n        }\n    }\n}\n\nimpl ToBytes for BinaryResponseHeader {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let Self {\n            binary_response_version,\n            error,\n            returned_data_type_tag,\n        } = self;\n\n        binary_response_version.write_bytes(writer)?;\n        error.write_bytes(writer)?;\n        returned_data_type_tag.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.binary_response_version.serialized_length()\n            + self.error.serialized_length()\n            + self.returned_data_type_tag.serialized_length()\n    }\n}\n\nimpl FromBytes for BinaryResponseHeader {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (binary_response_version, remainder) = FromBytes::from_bytes(bytes)?;\n        let (error, remainder) = FromBytes::from_bytes(remainder)?;\n        let (returned_data_type_tag, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((\n            BinaryResponseHeader {\n                binary_response_version,\n                error,\n                returned_data_type_tag,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BinaryResponseHeader::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/command.rs",
    "content": "use core::convert::TryFrom;\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Transaction,\n};\n\nuse crate::get_request::GetRequest;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\n/// The header of a binary request.\n#[derive(Debug, PartialEq)]\npub struct CommandHeader {\n    header_version: u16,\n    type_tag: u8,\n    id: u16,\n}\n\nimpl CommandHeader {\n    // Defines the current version of the header, in practice defining the current version of the\n    // binary port protocol. Requests with mismatched header version will be dropped.\n    pub const HEADER_VERSION: u16 = 1;\n\n    /// Creates new binary request header.\n    pub fn new(type_tag: CommandTag, id: u16) -> Self {\n        Self {\n            header_version: Self::HEADER_VERSION,\n            type_tag: type_tag.into(),\n            id,\n        }\n    }\n\n    /// Returns the type tag of the request.\n    pub fn type_tag(&self) -> u8 {\n        self.type_tag\n    }\n\n    /// Returns the request id.\n    pub fn id(&self) -> u16 {\n        self.id\n    }\n\n    /// Returns the header version.\n    pub fn version(&self) -> u16 {\n        self.header_version\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn set_binary_request_version(&mut self, version: u16) {\n        self.header_version = version;\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self {\n            header_version: rng.gen(),\n            type_tag: CommandTag::random(rng).into(),\n            id: rng.gen(),\n        }\n    }\n}\n\nimpl ToBytes for CommandHeader {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.header_version.write_bytes(writer)?;\n        self.type_tag.write_bytes(writer)?;\n        self.id.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.header_version.serialized_length()\n            + self.type_tag.serialized_length()\n            + self.id.serialized_length()\n    }\n}\n\nimpl FromBytes for CommandHeader {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (binary_request_version, remainder) = FromBytes::from_bytes(bytes)?;\n        let (type_tag, remainder) = FromBytes::from_bytes(remainder)?;\n        let (id, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            CommandHeader {\n                header_version: binary_request_version,\n                type_tag,\n                id,\n            },\n            remainder,\n        ))\n    }\n}\n\n/// A request to the binary access interface.\n#[derive(Debug, PartialEq)]\n\npub enum Command {\n    /// Request to get data from the node\n    Get(GetRequest),\n    /// Request to add a transaction into a blockchain.\n    TryAcceptTransaction {\n        /// Transaction to be handled.\n        transaction: Transaction,\n    },\n    /// Request to execute a transaction speculatively.\n    TrySpeculativeExec {\n        /// Transaction to execute.\n        transaction: Transaction,\n    },\n}\n\nimpl Command {\n    /// Returns the type tag of the request.\n    pub fn tag(&self) -> CommandTag {\n        match self {\n            Command::Get(_) => CommandTag::Get,\n            Command::TryAcceptTransaction { .. } => CommandTag::TryAcceptTransaction,\n            Command::TrySpeculativeExec { .. } => CommandTag::TrySpeculativeExec,\n        }\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match CommandTag::random(rng) {\n            CommandTag::Get => Self::Get(GetRequest::random(rng)),\n            CommandTag::TryAcceptTransaction => Self::TryAcceptTransaction {\n                transaction: Transaction::random(rng),\n            },\n            CommandTag::TrySpeculativeExec => Self::TrySpeculativeExec {\n                transaction: Transaction::random(rng),\n            },\n        }\n    }\n}\n\nimpl ToBytes for Command {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            Command::Get(inner) => inner.write_bytes(writer),\n            Command::TryAcceptTransaction { transaction } => transaction.write_bytes(writer),\n            Command::TrySpeculativeExec { transaction } => transaction.write_bytes(writer),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            Command::Get(inner) => inner.serialized_length(),\n            Command::TryAcceptTransaction { transaction } => transaction.serialized_length(),\n            Command::TrySpeculativeExec { transaction } => transaction.serialized_length(),\n        }\n    }\n}\n\nimpl TryFrom<(CommandTag, &[u8])> for Command {\n    type Error = bytesrepr::Error;\n\n    fn try_from((tag, bytes): (CommandTag, &[u8])) -> Result<Self, Self::Error> {\n        let (req, remainder) = match tag {\n            CommandTag::Get => {\n                let (get_request, remainder) = FromBytes::from_bytes(bytes)?;\n                (Command::Get(get_request), remainder)\n            }\n            CommandTag::TryAcceptTransaction => {\n                let (transaction, remainder) = FromBytes::from_bytes(bytes)?;\n                (Command::TryAcceptTransaction { transaction }, remainder)\n            }\n            CommandTag::TrySpeculativeExec => {\n                let (transaction, remainder) = FromBytes::from_bytes(bytes)?;\n                (Command::TrySpeculativeExec { transaction }, remainder)\n            }\n        };\n        if !remainder.is_empty() {\n            return Err(bytesrepr::Error::LeftOverBytes);\n        }\n        Ok(req)\n    }\n}\n\n/// The type tag of a binary request.\n#[derive(Debug, PartialEq)]\n#[repr(u8)]\npub enum CommandTag {\n    /// Request to get data from the node\n    Get = 0,\n    /// Request to add a transaction into a blockchain.\n    TryAcceptTransaction = 1,\n    /// Request to execute a transaction speculatively.\n    TrySpeculativeExec = 2,\n}\n\nimpl CommandTag {\n    /// Creates a random `CommandTag`.\n    #[cfg(test)]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..3) {\n            0 => CommandTag::Get,\n            1 => CommandTag::TryAcceptTransaction,\n            2 => CommandTag::TrySpeculativeExec,\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl TryFrom<u8> for CommandTag {\n    type Error = InvalidCommandTag;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(CommandTag::Get),\n            1 => Ok(CommandTag::TryAcceptTransaction),\n            2 => Ok(CommandTag::TrySpeculativeExec),\n            _ => Err(InvalidCommandTag),\n        }\n    }\n}\n\nimpl From<CommandTag> for u8 {\n    fn from(value: CommandTag) -> Self {\n        value as u8\n    }\n}\n\n/// Error raised when trying to convert an invalid u8 into a `CommandTag`.\npub struct InvalidCommandTag;\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn header_bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = CommandHeader::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n\n    #[test]\n    fn request_bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = Command::random(rng);\n        let bytes = val.to_bytes().expect(\"should serialize\");\n        assert_eq!(Command::try_from((val.tag(), &bytes[..])), Ok(val));\n    }\n}\n"
  },
  {
    "path": "binary_port/src/dictionary_item_identifier.rs",
    "content": "#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    DictionaryAddr, EntityAddr, HashAddr, URef,\n};\n\nconst ACCOUNT_NAMED_KEY_TAG: u8 = 0;\nconst CONTRACT_NAMED_KEY_TAG: u8 = 1;\nconst ENTITY_NAMED_KEY_TAG: u8 = 2;\nconst UREF_TAG: u8 = 3;\nconst DICTIONARY_ITEM_TAG: u8 = 4;\n\n/// Options for dictionary item lookups.\n#[derive(Clone, Debug, PartialEq)]\npub enum DictionaryItemIdentifier {\n    /// Lookup a dictionary item via an accounts named keys.\n    AccountNamedKey {\n        /// The account hash.\n        hash: AccountHash,\n        /// The named key under which the dictionary seed URef is stored.\n        dictionary_name: String,\n        /// The dictionary item key formatted as a string.\n        dictionary_item_key: String,\n    },\n    /// Lookup a dictionary item via a contracts named keys.\n    ContractNamedKey {\n        /// The contract hash.\n        hash: HashAddr,\n        /// The named key under which the dictionary seed URef is stored.\n        dictionary_name: String,\n        /// The dictionary item key formatted as a string.\n        dictionary_item_key: String,\n    },\n    /// Lookup a dictionary item via an entities named keys.\n    EntityNamedKey {\n        /// The entity address.\n        addr: EntityAddr,\n        /// The named key under which the dictionary seed URef is stored.\n        dictionary_name: String,\n        /// The dictionary item key formatted as a string.\n        dictionary_item_key: String,\n    },\n    /// Lookup a dictionary item via its seed URef.\n    URef {\n        /// The dictionary's seed URef.\n        seed_uref: URef,\n        /// The dictionary item key formatted as a string.\n        dictionary_item_key: String,\n    },\n    /// Lookup a dictionary item via its unique key.\n    DictionaryItem(DictionaryAddr),\n}\n\nimpl DictionaryItemIdentifier {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..5) {\n            0 => DictionaryItemIdentifier::AccountNamedKey {\n                hash: rng.gen(),\n                dictionary_name: rng.random_string(32..64),\n                dictionary_item_key: rng.random_string(32..64),\n            },\n            1 => DictionaryItemIdentifier::ContractNamedKey {\n                hash: rng.gen(),\n                dictionary_name: rng.random_string(32..64),\n                dictionary_item_key: rng.random_string(32..64),\n            },\n            2 => DictionaryItemIdentifier::EntityNamedKey {\n                addr: rng.gen(),\n                dictionary_name: rng.random_string(32..64),\n                dictionary_item_key: rng.random_string(32..64),\n            },\n            3 => DictionaryItemIdentifier::URef {\n                seed_uref: rng.gen(),\n                dictionary_item_key: rng.random_string(32..64),\n            },\n            4 => DictionaryItemIdentifier::DictionaryItem(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for DictionaryItemIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            DictionaryItemIdentifier::AccountNamedKey {\n                hash: key,\n                dictionary_name,\n                dictionary_item_key,\n            } => {\n                ACCOUNT_NAMED_KEY_TAG.write_bytes(writer)?;\n                key.write_bytes(writer)?;\n                dictionary_name.write_bytes(writer)?;\n                dictionary_item_key.write_bytes(writer)\n            }\n            DictionaryItemIdentifier::ContractNamedKey {\n                hash: key,\n                dictionary_name,\n                dictionary_item_key,\n            } => {\n                CONTRACT_NAMED_KEY_TAG.write_bytes(writer)?;\n                key.write_bytes(writer)?;\n                dictionary_name.write_bytes(writer)?;\n                dictionary_item_key.write_bytes(writer)\n            }\n            DictionaryItemIdentifier::EntityNamedKey {\n                addr,\n                dictionary_name,\n                dictionary_item_key,\n            } => {\n                ENTITY_NAMED_KEY_TAG.write_bytes(writer)?;\n                addr.write_bytes(writer)?;\n                dictionary_name.write_bytes(writer)?;\n                dictionary_item_key.write_bytes(writer)\n            }\n            DictionaryItemIdentifier::URef {\n                seed_uref,\n                dictionary_item_key,\n            } => {\n                UREF_TAG.write_bytes(writer)?;\n                seed_uref.write_bytes(writer)?;\n                dictionary_item_key.write_bytes(writer)\n            }\n            DictionaryItemIdentifier::DictionaryItem(addr) => {\n                DICTIONARY_ITEM_TAG.write_bytes(writer)?;\n                addr.write_bytes(writer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                DictionaryItemIdentifier::AccountNamedKey {\n                    hash,\n                    dictionary_name,\n                    dictionary_item_key,\n                } => {\n                    hash.serialized_length()\n                        + dictionary_name.serialized_length()\n                        + dictionary_item_key.serialized_length()\n                }\n                DictionaryItemIdentifier::ContractNamedKey {\n                    hash,\n                    dictionary_name,\n                    dictionary_item_key,\n                } => {\n                    hash.serialized_length()\n                        + dictionary_name.serialized_length()\n                        + dictionary_item_key.serialized_length()\n                }\n                DictionaryItemIdentifier::EntityNamedKey {\n                    addr,\n                    dictionary_name,\n                    dictionary_item_key,\n                } => {\n                    addr.serialized_length()\n                        + dictionary_name.serialized_length()\n                        + dictionary_item_key.serialized_length()\n                }\n                DictionaryItemIdentifier::URef {\n                    seed_uref,\n                    dictionary_item_key,\n                } => seed_uref.serialized_length() + dictionary_item_key.serialized_length(),\n                DictionaryItemIdentifier::DictionaryItem(addr) => addr.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for DictionaryItemIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            ACCOUNT_NAMED_KEY_TAG => {\n                let (key, remainder) = FromBytes::from_bytes(remainder)?;\n                let (dictionary_name, remainder) = String::from_bytes(remainder)?;\n                let (dictionary_item_key, remainder) = String::from_bytes(remainder)?;\n                Ok((\n                    DictionaryItemIdentifier::AccountNamedKey {\n                        hash: key,\n                        dictionary_name,\n                        dictionary_item_key,\n                    },\n                    remainder,\n                ))\n            }\n            CONTRACT_NAMED_KEY_TAG => {\n                let (key, remainder) = FromBytes::from_bytes(remainder)?;\n                let (dictionary_name, remainder) = String::from_bytes(remainder)?;\n                let (dictionary_item_key, remainder) = String::from_bytes(remainder)?;\n                Ok((\n                    DictionaryItemIdentifier::ContractNamedKey {\n                        hash: key,\n                        dictionary_name,\n                        dictionary_item_key,\n                    },\n                    remainder,\n                ))\n            }\n            ENTITY_NAMED_KEY_TAG => {\n                let (addr, remainder) = FromBytes::from_bytes(remainder)?;\n                let (dictionary_name, remainder) = String::from_bytes(remainder)?;\n                let (dictionary_item_key, remainder) = String::from_bytes(remainder)?;\n                Ok((\n                    DictionaryItemIdentifier::EntityNamedKey {\n                        addr,\n                        dictionary_name,\n                        dictionary_item_key,\n                    },\n                    remainder,\n                ))\n            }\n            UREF_TAG => {\n                let (seed_uref, remainder) = FromBytes::from_bytes(remainder)?;\n                let (dictionary_item_key, remainder) = String::from_bytes(remainder)?;\n                Ok((\n                    DictionaryItemIdentifier::URef {\n                        seed_uref,\n                        dictionary_item_key,\n                    },\n                    remainder,\n                ))\n            }\n            DICTIONARY_ITEM_TAG => {\n                let (addr, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((DictionaryItemIdentifier::DictionaryItem(addr), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = DictionaryItemIdentifier::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/entity_qualifier.rs",
    "content": "use super::dictionary_item_identifier::DictionaryItemIdentifier;\nuse crate::{KeyPrefix, PurseIdentifier};\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Key, KeyTag,\n};\n#[cfg(test)]\nuse rand::Rng;\n\nconst ITEM_TAG: u8 = 0;\nconst ALL_ITEMS_TAG: u8 = 1;\nconst DICTIONARY_ITEM_TAG: u8 = 2;\nconst BALANCE_TAG: u8 = 3;\nconst ITEMS_BY_PREFIX_TAG: u8 = 4;\n\n/// A request to get data from the global state.\n#[derive(Clone, Debug, PartialEq)]\npub enum GlobalStateEntityQualifier {\n    /// Gets an item from the global state.\n    Item {\n        /// Key under which data is stored.\n        base_key: Key,\n        /// Path under which the value is stored.\n        path: Vec<String>,\n    },\n    /// Get all items under the given key tag.\n    AllItems {\n        /// Key tag\n        key_tag: KeyTag,\n    },\n    /// Get a dictionary item by its identifier.\n    DictionaryItem {\n        /// Dictionary item identifier.\n        identifier: DictionaryItemIdentifier,\n    },\n    /// Get balance by state root and purse.\n    Balance {\n        /// Purse identifier.\n        purse_identifier: PurseIdentifier,\n    },\n    ItemsByPrefix {\n        /// Key prefix to search for.\n        key_prefix: KeyPrefix,\n    },\n}\n\nimpl GlobalStateEntityQualifier {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        let gen_range = TestRng::gen_range(rng, 0..5);\n        random_for_variant(gen_range, rng)\n    }\n}\n\n#[cfg(test)]\nfn random_for_variant(gen_range: u8, rng: &mut TestRng) -> GlobalStateEntityQualifier {\n    match gen_range {\n        ITEM_TAG => {\n            let path_count = rng.gen_range(10..20);\n            GlobalStateEntityQualifier::Item {\n                base_key: rng.gen(),\n                path: std::iter::repeat_with(|| rng.random_string(32..64))\n                    .take(path_count)\n                    .collect(),\n            }\n        }\n        ALL_ITEMS_TAG => GlobalStateEntityQualifier::AllItems {\n            key_tag: KeyTag::random(rng),\n        },\n        DICTIONARY_ITEM_TAG => GlobalStateEntityQualifier::DictionaryItem {\n            identifier: DictionaryItemIdentifier::random(rng),\n        },\n        BALANCE_TAG => GlobalStateEntityQualifier::Balance {\n            purse_identifier: PurseIdentifier::random(rng),\n        },\n        ITEMS_BY_PREFIX_TAG => GlobalStateEntityQualifier::ItemsByPrefix {\n            key_prefix: KeyPrefix::random(rng),\n        },\n        _ => unreachable!(),\n    }\n}\n\nimpl ToBytes for GlobalStateEntityQualifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            GlobalStateEntityQualifier::Item { base_key, path } => {\n                ITEM_TAG.write_bytes(writer)?;\n                base_key.write_bytes(writer)?;\n                path.write_bytes(writer)\n            }\n            GlobalStateEntityQualifier::AllItems { key_tag } => {\n                ALL_ITEMS_TAG.write_bytes(writer)?;\n                key_tag.write_bytes(writer)\n            }\n            GlobalStateEntityQualifier::DictionaryItem { identifier } => {\n                DICTIONARY_ITEM_TAG.write_bytes(writer)?;\n                identifier.write_bytes(writer)\n            }\n            GlobalStateEntityQualifier::Balance { purse_identifier } => {\n                BALANCE_TAG.write_bytes(writer)?;\n                purse_identifier.write_bytes(writer)\n            }\n            GlobalStateEntityQualifier::ItemsByPrefix { key_prefix } => {\n                ITEMS_BY_PREFIX_TAG.write_bytes(writer)?;\n                key_prefix.write_bytes(writer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                GlobalStateEntityQualifier::Item { base_key, path } => {\n                    base_key.serialized_length() + path.serialized_length()\n                }\n                GlobalStateEntityQualifier::AllItems { key_tag } => key_tag.serialized_length(),\n                GlobalStateEntityQualifier::DictionaryItem { identifier } => {\n                    identifier.serialized_length()\n                }\n                GlobalStateEntityQualifier::Balance { purse_identifier } => {\n                    purse_identifier.serialized_length()\n                }\n                GlobalStateEntityQualifier::ItemsByPrefix { key_prefix } => {\n                    key_prefix.serialized_length()\n                }\n            }\n    }\n}\n\nimpl FromBytes for GlobalStateEntityQualifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            ITEM_TAG => {\n                let (base_key, remainder) = FromBytes::from_bytes(remainder)?;\n                let (path, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((\n                    GlobalStateEntityQualifier::Item { base_key, path },\n                    remainder,\n                ))\n            }\n            ALL_ITEMS_TAG => {\n                let (key_tag, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((GlobalStateEntityQualifier::AllItems { key_tag }, remainder))\n            }\n            DICTIONARY_ITEM_TAG => {\n                let (identifier, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((\n                    GlobalStateEntityQualifier::DictionaryItem { identifier },\n                    remainder,\n                ))\n            }\n            BALANCE_TAG => {\n                let (purse_identifier, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((\n                    GlobalStateEntityQualifier::Balance { purse_identifier },\n                    remainder,\n                ))\n            }\n            ITEMS_BY_PREFIX_TAG => {\n                let (key_prefix, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((\n                    GlobalStateEntityQualifier::ItemsByPrefix { key_prefix },\n                    remainder,\n                ))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for i in 0..5 {\n            let qualifier = random_for_variant(i, rng);\n            bytesrepr::test_serialization_roundtrip(&qualifier);\n        }\n    }\n}\n"
  },
  {
    "path": "binary_port/src/era_identifier.rs",
    "content": "#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    BlockIdentifier, EraId,\n};\n\nconst ERA_TAG: u8 = 0;\nconst BLOCK_TAG: u8 = 1;\n\n/// Identifier for an era.\n#[derive(Clone, Debug, PartialEq)]\npub enum EraIdentifier {\n    Era(EraId),\n    Block(BlockIdentifier),\n}\n\nimpl EraIdentifier {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..2) {\n            ERA_TAG => EraIdentifier::Era(EraId::random(rng)),\n            BLOCK_TAG => EraIdentifier::Block(BlockIdentifier::random(rng)),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for EraIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            EraIdentifier::Era(era_id) => {\n                ERA_TAG.write_bytes(writer)?;\n                era_id.write_bytes(writer)\n            }\n            EraIdentifier::Block(block_id) => {\n                BLOCK_TAG.write_bytes(writer)?;\n                block_id.write_bytes(writer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                EraIdentifier::Era(era_id) => era_id.serialized_length(),\n                EraIdentifier::Block(block_id) => block_id.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for EraIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            ERA_TAG => {\n                let (era_id, remainder) = EraId::from_bytes(remainder)?;\n                Ok((EraIdentifier::Era(era_id), remainder))\n            }\n            BLOCK_TAG => {\n                let (block_id, remainder) = BlockIdentifier::from_bytes(remainder)?;\n                Ok((EraIdentifier::Block(block_id), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = EraIdentifier::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/error.rs",
    "content": "use thiserror::Error;\n\n#[derive(Error, Debug)]\npub enum Error {\n    #[error(\"Invalid command tag ({0})\")]\n    InvalidCommandTag(u8),\n    #[error(\"Request too large: allowed {allowed} bytes, got {got} bytes\")]\n    RequestTooLarge { allowed: u32, got: u32 },\n    #[error(\"Empty request\")]\n    EmptyRequest,\n    #[error(transparent)]\n    Io(#[from] std::io::Error),\n    #[error(transparent)]\n    BytesRepr(#[from] casper_types::bytesrepr::Error),\n}\n"
  },
  {
    "path": "binary_port/src/error_code.rs",
    "content": "use core::{convert::TryFrom, fmt};\n\nuse casper_types::{InvalidDeploy, InvalidTransaction, InvalidTransactionV1};\n\nuse num_derive::FromPrimitive;\nuse num_traits::FromPrimitive;\n#[cfg(test)]\nuse strum_macros::EnumIter;\n\n/// The error code indicating the result of handling the binary request.\n#[derive(Debug, Copy, Clone, thiserror::Error, Eq, PartialEq, FromPrimitive)]\n#[repr(u16)]\n#[cfg_attr(test, derive(EnumIter))]\npub enum ErrorCode {\n    /// Request executed correctly.\n    #[error(\"request executed correctly\")]\n    NoError = 0,\n    /// This function is disabled.\n    #[error(\"this function is disabled\")]\n    FunctionDisabled = 1,\n    /// Data not found.\n    #[error(\"data not found\")]\n    NotFound = 2,\n    /// Root not found.\n    #[error(\"root not found\")]\n    RootNotFound = 3,\n    /// Invalid item variant.\n    #[error(\"invalid item variant\")]\n    InvalidItemVariant = 4,\n    /// Wasm preprocessing.\n    #[error(\"wasm preprocessing\")]\n    WasmPreprocessing = 5,\n    /// Internal error.\n    #[error(\"internal error\")]\n    InternalError = 6,\n    /// The query failed.\n    #[error(\"the query failed\")]\n    FailedQuery = 7,\n    /// Bad request.\n    #[error(\"bad request\")]\n    BadRequest = 8,\n    /// Received an unsupported type of request.\n    #[error(\"unsupported request\")]\n    UnsupportedRequest = 9,\n    /// Dictionary URef not found.\n    #[error(\"dictionary URef not found\")]\n    DictionaryURefNotFound = 10,\n    /// This node has no complete blocks.\n    #[error(\"no complete blocks\")]\n    NoCompleteBlocks = 11,\n    /// The deploy had an invalid chain name\n    #[error(\"the deploy had an invalid chain name\")]\n    InvalidDeployChainName = 12,\n    /// Deploy dependencies are no longer supported\n    #[error(\"the dependencies for this transaction are no longer supported\")]\n    InvalidDeployDependenciesNoLongerSupported = 13,\n    /// The deploy sent to the network had an excessive size\n    #[error(\"the deploy had an excessive size\")]\n    InvalidDeployExcessiveSize = 14,\n    /// The deploy sent to the network had an excessive time to live\n    #[error(\"the deploy had an excessive time to live\")]\n    InvalidDeployExcessiveTimeToLive = 15,\n    /// The deploy sent to the network had a timestamp referencing a time that has yet to occur.\n    #[error(\"the deploys timestamp is in the future\")]\n    InvalidDeployTimestampInFuture = 16,\n    /// The deploy sent to the network had an invalid body hash\n    #[error(\"the deploy had an invalid body hash\")]\n    InvalidDeployBodyHash = 17,\n    /// The deploy sent to the network had an invalid deploy hash i.e. the provided deploy hash\n    /// didn't match the derived deploy hash\n    #[error(\"the deploy had an invalid deploy hash\")]\n    InvalidDeployHash = 18,\n    /// The deploy sent to the network had an empty approval set\n    #[error(\"the deploy had no approvals\")]\n    InvalidDeployEmptyApprovals = 19,\n    /// The deploy sent to the network had an invalid approval\n    #[error(\"the deploy had an invalid approval\")]\n    InvalidDeployApproval = 20,\n    /// The deploy sent to the network had an excessive session args length\n    #[error(\"the deploy had an excessive session args length\")]\n    InvalidDeployExcessiveSessionArgsLength = 21,\n    /// The deploy sent to the network had an excessive payment args length\n    #[error(\"the deploy had an excessive payment args length\")]\n    InvalidDeployExcessivePaymentArgsLength = 22,\n    /// The deploy sent to the network had a missing payment amount\n    #[error(\"the deploy had a missing payment amount\")]\n    InvalidDeployMissingPaymentAmount = 23,\n    /// The deploy sent to the network had a payment amount that was not parseable\n    #[error(\"the deploy sent to the network had a payment amount that was unable to be parsed\")]\n    InvalidDeployFailedToParsePaymentAmount = 24,\n    /// The deploy sent to the network exceeded the block gas limit\n    #[error(\"the deploy sent to the network exceeded the block gas limit\")]\n    InvalidDeployExceededBlockGasLimit = 25,\n    /// The deploy sent to the network was missing a transfer amount\n    #[error(\"the deploy sent to the network was missing a transfer amount\")]\n    InvalidDeployMissingTransferAmount = 26,\n    /// The deploy sent to the network had a transfer amount that was unable to be parseable\n    #[error(\"the deploy sent to the network had a transfer amount that was unable to be parsed\")]\n    InvalidDeployFailedToParseTransferAmount = 27,\n    /// The deploy sent to the network had a transfer amount that was insufficient\n    #[error(\"the deploy sent to the network had an insufficient transfer amount\")]\n    InvalidDeployInsufficientTransferAmount = 28,\n    /// The deploy sent to the network had excessive approvals\n    #[error(\"the deploy sent to the network had excessive approvals\")]\n    InvalidDeployExcessiveApprovals = 29,\n    /// The network was unable to calculate the gas limit for the deploy\n    #[error(\"the network was unable to calculate the gas limit associated with the deploy\")]\n    InvalidDeployUnableToCalculateGasLimit = 30,\n    /// The network was unable to calculate the gas cost for the deploy\n    #[error(\"the network was unable to calculate the gas cost for the deploy\")]\n    InvalidDeployUnableToCalculateGasCost = 31,\n    /// The deploy sent to the network was invalid for an unspecified reason\n    #[error(\"the deploy sent to the network was invalid for an unspecified reason\")]\n    InvalidDeployUnspecified = 32,\n    /// The transaction sent to the network had an invalid chain name\n    #[error(\"the transaction sent to the network had an invalid chain name\")]\n    InvalidTransactionChainName = 33,\n    /// The transaction sent to the network had an excessive size\n    #[error(\"the transaction sent to the network had an excessive size\")]\n    InvalidTransactionExcessiveSize = 34,\n    /// The transaction sent to the network had an excessive time to live\n    #[error(\"the transaction sent to the network had an excessive time to live\")]\n    InvalidTransactionExcessiveTimeToLive = 35,\n    /// The transaction sent to the network had a timestamp located in the future.\n    #[error(\"the transaction sent to the network had a timestamp that has not yet occurred\")]\n    InvalidTransactionTimestampInFuture = 36,\n    /// The transaction sent to the network had a provided body hash that conflicted with hash\n    /// derived by the network\n    #[error(\"the transaction sent to the network had an invalid body hash\")]\n    InvalidTransactionBodyHash = 37,\n    /// The transaction sent to the network had a provided hash that conflicted with the hash\n    /// derived by the network\n    #[error(\"the transaction sent to the network had an invalid hash\")]\n    InvalidTransactionHash = 38,\n    /// The transaction sent to the network had an empty approvals set\n    #[error(\"the transaction sent to the network had no approvals\")]\n    InvalidTransactionEmptyApprovals = 39,\n    /// The transaction sent to the network had an invalid approval\n    #[error(\"the transaction sent to the network had an invalid approval\")]\n    InvalidTransactionInvalidApproval = 40,\n    /// The transaction sent to the network had excessive args length\n    #[error(\"the transaction sent to the network had excessive args length\")]\n    InvalidTransactionExcessiveArgsLength = 41,\n    /// The transaction sent to the network had excessive approvals\n    #[error(\"the transaction sent to the network had excessive approvals\")]\n    InvalidTransactionExcessiveApprovals = 42,\n    /// The transaction sent to the network exceeds the block gas limit\n    #[error(\"the transaction sent to the network exceeds the networks block gas limit\")]\n    InvalidTransactionExceedsBlockGasLimit = 43,\n    /// The transaction sent to the network had a missing arg\n    #[error(\"the transaction sent to the network was missing an argument\")]\n    InvalidTransactionMissingArg = 44,\n    /// The transaction sent to the network had an argument with an unexpected type\n    #[error(\"the transaction sent to the network had an unexpected argument type\")]\n    InvalidTransactionUnexpectedArgType = 45,\n    /// The transaction sent to the network had an invalid argument\n    #[error(\"the transaction sent to the network had an invalid argument\")]\n    InvalidTransactionInvalidArg = 46,\n    /// The transaction sent to the network had an insufficient transfer amount\n    #[error(\"the transaction sent to the network had an insufficient transfer amount\")]\n    InvalidTransactionInsufficientTransferAmount = 47,\n    /// The transaction sent to the network had a custom entry point when it should have a non\n    /// custom entry point.\n    #[error(\"the native transaction sent to the network should not have a custom entry point\")]\n    InvalidTransactionEntryPointCannotBeCustom = 48,\n    /// The transaction sent to the network had a standard entry point when it must be custom.\n    #[error(\"the non-native transaction sent to the network must have a custom entry point\")]\n    InvalidTransactionEntryPointMustBeCustom = 49,\n    /// The transaction sent to the network had empty module bytes\n    #[error(\"the transaction sent to the network had empty module bytes\")]\n    InvalidTransactionEmptyModuleBytes = 50,\n    /// The transaction sent to the network had an invalid gas price conversion\n    #[error(\"the transaction sent to the network had an invalid gas price conversion\")]\n    InvalidTransactionGasPriceConversion = 51,\n    /// The network was unable to calculate the gas limit for the transaction sent.\n    #[error(\"the network was unable to calculate the gas limit for the transaction sent\")]\n    InvalidTransactionUnableToCalculateGasLimit = 52,\n    /// The network was unable to calculate the gas cost for the transaction sent.\n    #[error(\"the network was unable to calculate the gas cost for the transaction sent.\")]\n    InvalidTransactionUnableToCalculateGasCost = 53,\n    /// The transaction sent to the network had an invalid pricing mode\n    #[error(\"the transaction sent to the network had an invalid pricing mode\")]\n    InvalidTransactionPricingMode = 54,\n    /// The transaction sent to the network was invalid for an unspecified reason\n    #[error(\"the transaction sent to the network was invalid for an unspecified reason\")]\n    InvalidTransactionUnspecified = 55,\n    /// As the various enums are tagged non_exhaustive, it is possible that in the future none of\n    /// these previous errors cover the error that occurred, therefore we need some catchall in\n    /// the case that nothing else works.\n    #[error(\"the transaction or deploy sent to the network was invalid for an unspecified reason\")]\n    InvalidTransactionOrDeployUnspecified = 56,\n    /// The switch block for the requested era was not found\n    #[error(\"the switch block for the requested era was not found\")]\n    SwitchBlockNotFound = 57,\n    #[error(\"the parent of the switch block for the requested era was not found\")]\n    /// The parent of the switch block for the requested era was not found\n    SwitchBlockParentNotFound = 58,\n    #[error(\"cannot serve rewards stored in V1 format\")]\n    /// Cannot serve rewards stored in V1 format\n    UnsupportedRewardsV1Request = 59,\n    /// Invalid binary request header versions.\n    #[error(\"binary request header versions mismatch\")]\n    CommandHeaderVersionMismatch = 60,\n    /// Blockchain is empty\n    #[error(\"blockchain is empty\")]\n    EmptyBlockchain = 61,\n    /// Expected deploy, but got transaction\n    #[error(\"expected deploy, got transaction\")]\n    ExpectedDeploy = 62,\n    /// Expected transaction, but got deploy\n    #[error(\"expected transaction V1, got deploy\")]\n    ExpectedTransaction = 63,\n    /// Transaction has expired\n    #[error(\"transaction has expired\")]\n    TransactionExpired = 64,\n    /// Transactions parameters are missing or incorrect\n    #[error(\"missing or incorrect transaction parameters\")]\n    MissingOrIncorrectParameters = 65,\n    /// No such addressable entity\n    #[error(\"no such addressable entity\")]\n    NoSuchAddressableEntity = 66,\n    // No such contract at hash\n    #[error(\"no such contract at hash\")]\n    NoSuchContractAtHash = 67,\n    /// No such entry point\n    #[error(\"no such entry point\")]\n    NoSuchEntryPoint = 68,\n    /// No such package at hash\n    #[error(\"no such package at hash\")]\n    NoSuchPackageAtHash = 69,\n    /// Invalid entity at version\n    #[error(\"invalid entity at version\")]\n    InvalidEntityAtVersion = 70,\n    /// Disabled entity at version\n    #[error(\"disabled entity at version\")]\n    DisabledEntityAtVersion = 71,\n    /// Missing entity at version\n    #[error(\"missing entity at version\")]\n    MissingEntityAtVersion = 72,\n    /// Invalid associated keys\n    #[error(\"invalid associated keys\")]\n    InvalidAssociatedKeys = 73,\n    /// Insufficient signature weight\n    #[error(\"insufficient signature weight\")]\n    InsufficientSignatureWeight = 74,\n    /// Insufficient balance\n    #[error(\"insufficient balance\")]\n    InsufficientBalance = 75,\n    /// Unknown balance\n    #[error(\"unknown balance\")]\n    UnknownBalance = 76,\n    /// Invalid payment variant for deploy\n    #[error(\"invalid payment variant for deploy\")]\n    DeployInvalidPaymentVariant = 77,\n    /// Missing payment amount for deploy\n    #[error(\"missing payment amount for deploy\")]\n    DeployMissingPaymentAmount = 78,\n    /// Failed to parse payment amount for deploy\n    #[error(\"failed to parse payment amount for deploy\")]\n    DeployFailedToParsePaymentAmount = 79,\n    /// Missing transfer target for deploy\n    #[error(\"missing transfer target for deploy\")]\n    DeployMissingTransferTarget = 80,\n    /// Missing module bytes for deploy\n    #[error(\"missing module bytes for deploy\")]\n    DeployMissingModuleBytes = 81,\n    /// Entry point cannot be 'call'\n    #[error(\"entry point cannot be 'call'\")]\n    InvalidTransactionEntryPointCannotBeCall = 82,\n    /// Invalid transaction lane\n    #[error(\"invalid transaction lane\")]\n    InvalidTransactionInvalidTransactionLane = 83,\n    /// Gas price tolerance too low\n    #[error(\"gas price tolerance too low\")]\n    GasPriceToleranceTooLow = 84,\n    /// Received V1 Transaction for spec exec.\n    #[error(\"received v1 transaction for speculative execution\")]\n    ReceivedV1Transaction = 85,\n    /// Purse was not found for given identifier.\n    #[error(\"purse was not found for given identifier\")]\n    PurseNotFound = 86,\n    /// Too many requests per second.\n    #[error(\"request was throttled\")]\n    RequestThrottled = 87,\n    /// Expected named arguments.\n    #[error(\"expected named arguments\")]\n    ExpectedNamedArguments = 88,\n    /// Invalid transaction runtime.\n    #[error(\"invalid transaction runtime\")]\n    InvalidTransactionRuntime = 89,\n    /// Key in transfer request malformed\n    #[error(\"malformed transfer record key\")]\n    TransferRecordMalformedKey = 90,\n    /// Malformed information request\n    #[error(\"malformed information request\")]\n    MalformedInformationRequest = 91,\n    /// Malformed binary version\n    #[error(\"not enough bytes to read version of the binary request header\")]\n    TooLittleBytesForRequestHeaderVersion = 92,\n    /// Malformed command header version\n    #[error(\"malformed commnd header version\")]\n    MalformedCommandHeaderVersion = 93,\n    /// Malformed header\n    #[error(\"malformed command header\")]\n    MalformedCommandHeader = 94,\n    /// Malformed command\n    #[error(\"malformed command\")]\n    MalformedCommand = 95,\n    /// No matching lane for transaction\n    #[error(\"couldn't associate a transaction lane with the transaction\")]\n    InvalidTransactionNoLaneMatches = 96,\n    /// Entry point must be 'call'\n    #[error(\"entry point must be 'call'\")]\n    InvalidTransactionEntryPointMustBeCall = 97,\n    /// One of the payloads field cannot be deserialized\n    #[error(\"One of the payloads field cannot be deserialized\")]\n    InvalidTransactionCannotDeserializeField = 98,\n    /// Can't calculate hash of the payload fields\n    #[error(\"Can't calculate hash of the payload fields\")]\n    InvalidTransactionCannotCalculateFieldsHash = 99,\n    /// Unexpected fields in payload\n    #[error(\"Unexpected fields in payload\")]\n    InvalidTransactionUnexpectedFields = 100,\n    /// Expected bytes arguments\n    #[error(\"expected bytes arguments\")]\n    InvalidTransactionExpectedBytesArguments = 101,\n    /// Missing seed field in transaction\n    #[error(\"Missing seed field in transaction\")]\n    InvalidTransactionMissingSeed = 102,\n    /// Pricing mode not supported\n    #[error(\"Pricing mode not supported\")]\n    PricingModeNotSupported = 103,\n    /// Gas limit not supported\n    #[error(\"Gas limit not supported\")]\n    InvalidDeployGasLimitNotSupported = 104,\n    /// Invalid runtime for Transaction::Deploy\n    #[error(\"Invalid runtime for Transaction::Deploy\")]\n    InvalidDeployInvalidRuntime = 105,\n    /// Deploy exceeds wasm lane gas limit\n    #[error(\"Transaction::Deploy exceeds lane gas limit\")]\n    InvalidDeployExceededWasmLaneGasLimit = 106,\n    /// Invalid runtime for Transaction::Deploy\n    #[error(\"Invalid payment amount for Transaction::Deploy\")]\n    InvalidDeployInvalidPaymentAmount = 107,\n    /// Insufficient burn amount for Transaction::V1\n    #[error(\"Insufficient burn amount for Transaction::V1\")]\n    InvalidTransactionInsufficientBurnAmount = 108,\n    /// Invalid payment amount for Transaction::V1\n    #[error(\"Invalid payment amount for Transaction::V1\")]\n    InvalidTransactionInvalidPaymentAmount = 109,\n    /// Unexpected entry point for Transaction::V1\n    #[error(\"Unexpected entry point for Transaction::V1\")]\n    InvalidTransactionUnexpectedEntryPoint = 110,\n    /// Cannot serialize transaction\n    #[error(\"Transaction has malformed binary representation\")]\n    TransactionHasMalformedBinaryRepresentation = 111,\n    #[error(\"Transaction includes an argument named amount with a value below a relevant limit\")]\n    InsufficientAmountArgValue = 112,\n    #[error(\n        \"Transaction attempts to set a minimum delegation amount below the lowest allowed value\"\n    )]\n    InvalidMinimumDelegationAmount = 113,\n    #[error(\n        \"Transaction attempts to set a maximum delegation amount above the highest allowed value\"\n    )]\n    InvalidMaximumDelegationAmount = 114,\n    #[error(\"Transaction attempts to set a reserved slots count above the highest allowed value\")]\n    InvalidReservedSlots = 115,\n    #[error(\"Transaction attempts to set a delegation amount above the highest allowed value\")]\n    InvalidDelegationAmount = 116,\n    #[error(\"the transaction invocation target is unsupported under V2 runtime\")]\n    UnsupportedInvocationTarget = 117,\n}\n\nimpl TryFrom<u16> for ErrorCode {\n    type Error = UnknownErrorCode;\n\n    fn try_from(value: u16) -> Result<Self, Self::Error> {\n        FromPrimitive::from_u16(value).ok_or(UnknownErrorCode)\n    }\n}\n\n/// Error indicating that the error code is unknown.\n#[derive(Debug, Clone, Copy)]\npub struct UnknownErrorCode;\n\nimpl fmt::Display for UnknownErrorCode {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"unknown node error code\")\n    }\n}\n\nimpl std::error::Error for UnknownErrorCode {}\n\nimpl From<InvalidTransaction> for ErrorCode {\n    fn from(value: InvalidTransaction) -> Self {\n        match value {\n            InvalidTransaction::Deploy(invalid_deploy) => ErrorCode::from(invalid_deploy),\n            InvalidTransaction::V1(invalid_transaction) => ErrorCode::from(invalid_transaction),\n            _ => ErrorCode::InvalidTransactionOrDeployUnspecified,\n        }\n    }\n}\n\nimpl From<InvalidDeploy> for ErrorCode {\n    fn from(value: InvalidDeploy) -> Self {\n        match value {\n            InvalidDeploy::InvalidChainName { .. } => ErrorCode::InvalidDeployChainName,\n            InvalidDeploy::DependenciesNoLongerSupported => {\n                ErrorCode::InvalidDeployDependenciesNoLongerSupported\n            }\n            InvalidDeploy::ExcessiveSize(_) => ErrorCode::InvalidDeployExcessiveSize,\n            InvalidDeploy::ExcessiveTimeToLive { .. } => {\n                ErrorCode::InvalidDeployExcessiveTimeToLive\n            }\n            InvalidDeploy::TimestampInFuture { .. } => ErrorCode::InvalidDeployTimestampInFuture,\n            InvalidDeploy::InvalidBodyHash => ErrorCode::InvalidDeployBodyHash,\n            InvalidDeploy::InvalidDeployHash => ErrorCode::InvalidDeployHash,\n            InvalidDeploy::EmptyApprovals => ErrorCode::InvalidDeployEmptyApprovals,\n            InvalidDeploy::InvalidApproval { .. } => ErrorCode::InvalidDeployApproval,\n            InvalidDeploy::ExcessiveSessionArgsLength { .. } => {\n                ErrorCode::InvalidDeployExcessiveSessionArgsLength\n            }\n            InvalidDeploy::ExcessivePaymentArgsLength { .. } => {\n                ErrorCode::InvalidDeployExcessivePaymentArgsLength\n            }\n            InvalidDeploy::MissingPaymentAmount => ErrorCode::InvalidDeployMissingPaymentAmount,\n            InvalidDeploy::FailedToParsePaymentAmount => {\n                ErrorCode::InvalidDeployFailedToParsePaymentAmount\n            }\n            InvalidDeploy::ExceededBlockGasLimit { .. } => {\n                ErrorCode::InvalidDeployExceededBlockGasLimit\n            }\n            InvalidDeploy::MissingTransferAmount => ErrorCode::InvalidDeployMissingTransferAmount,\n            InvalidDeploy::FailedToParseTransferAmount => {\n                ErrorCode::InvalidDeployFailedToParseTransferAmount\n            }\n            InvalidDeploy::InsufficientTransferAmount { .. } => {\n                ErrorCode::InvalidDeployInsufficientTransferAmount\n            }\n            InvalidDeploy::ExcessiveApprovals { .. } => ErrorCode::InvalidDeployExcessiveApprovals,\n            InvalidDeploy::UnableToCalculateGasLimit => {\n                ErrorCode::InvalidDeployUnableToCalculateGasLimit\n            }\n            InvalidDeploy::UnableToCalculateGasCost => {\n                ErrorCode::InvalidDeployUnableToCalculateGasCost\n            }\n            InvalidDeploy::GasPriceToleranceTooLow { .. } => ErrorCode::GasPriceToleranceTooLow,\n            InvalidDeploy::GasLimitNotSupported => ErrorCode::InvalidDeployGasLimitNotSupported,\n            InvalidDeploy::InvalidRuntime => ErrorCode::InvalidDeployInvalidRuntime,\n            InvalidDeploy::NoLaneMatch => ErrorCode::InvalidTransactionNoLaneMatches,\n            InvalidDeploy::ExceededLaneGasLimit { .. } => {\n                ErrorCode::InvalidDeployExceededWasmLaneGasLimit\n            }\n            InvalidDeploy::InvalidPaymentAmount => ErrorCode::InvalidDeployInvalidPaymentAmount,\n            InvalidDeploy::PricingModeNotSupported => ErrorCode::PricingModeNotSupported,\n            _ => ErrorCode::InvalidDeployUnspecified,\n        }\n    }\n}\n\nimpl From<InvalidTransactionV1> for ErrorCode {\n    fn from(value: InvalidTransactionV1) -> Self {\n        match value {\n            InvalidTransactionV1::InvalidChainName { .. } => ErrorCode::InvalidTransactionChainName,\n            InvalidTransactionV1::ExcessiveSize(_) => ErrorCode::InvalidTransactionExcessiveSize,\n            InvalidTransactionV1::ExcessiveTimeToLive { .. } => {\n                ErrorCode::InvalidTransactionExcessiveTimeToLive\n            }\n            InvalidTransactionV1::TimestampInFuture { .. } => {\n                ErrorCode::InvalidTransactionTimestampInFuture\n            }\n            InvalidTransactionV1::InvalidBodyHash => ErrorCode::InvalidTransactionBodyHash,\n            InvalidTransactionV1::InvalidTransactionHash => ErrorCode::InvalidTransactionHash,\n            InvalidTransactionV1::EmptyApprovals => ErrorCode::InvalidTransactionEmptyApprovals,\n            InvalidTransactionV1::InvalidApproval { .. } => {\n                ErrorCode::InvalidTransactionInvalidApproval\n            }\n            InvalidTransactionV1::ExcessiveArgsLength { .. } => {\n                ErrorCode::InvalidTransactionExcessiveArgsLength\n            }\n            InvalidTransactionV1::ExcessiveApprovals { .. } => {\n                ErrorCode::InvalidTransactionExcessiveApprovals\n            }\n            InvalidTransactionV1::ExceedsBlockGasLimit { .. } => {\n                ErrorCode::InvalidTransactionExceedsBlockGasLimit\n            }\n            InvalidTransactionV1::MissingArg { .. } => ErrorCode::InvalidTransactionMissingArg,\n            InvalidTransactionV1::UnexpectedArgType { .. } => {\n                ErrorCode::InvalidTransactionUnexpectedArgType\n            }\n            InvalidTransactionV1::InvalidArg { .. } => ErrorCode::InvalidTransactionInvalidArg,\n            InvalidTransactionV1::InsufficientTransferAmount { .. } => {\n                ErrorCode::InvalidTransactionInsufficientTransferAmount\n            }\n            InvalidTransactionV1::EntryPointCannotBeCustom { .. } => {\n                ErrorCode::InvalidTransactionEntryPointCannotBeCustom\n            }\n            InvalidTransactionV1::EntryPointMustBeCustom { .. } => {\n                ErrorCode::InvalidTransactionEntryPointMustBeCustom\n            }\n            InvalidTransactionV1::EmptyModuleBytes => ErrorCode::InvalidTransactionEmptyModuleBytes,\n            InvalidTransactionV1::GasPriceConversion { .. } => {\n                ErrorCode::InvalidTransactionGasPriceConversion\n            }\n            InvalidTransactionV1::UnableToCalculateGasLimit => {\n                ErrorCode::InvalidTransactionUnableToCalculateGasLimit\n            }\n            InvalidTransactionV1::UnableToCalculateGasCost => {\n                ErrorCode::InvalidTransactionUnableToCalculateGasCost\n            }\n            InvalidTransactionV1::InvalidPricingMode { .. } => {\n                ErrorCode::InvalidTransactionPricingMode\n            }\n            InvalidTransactionV1::EntryPointCannotBeCall => {\n                ErrorCode::InvalidTransactionEntryPointCannotBeCall\n            }\n            InvalidTransactionV1::InvalidTransactionLane(_) => {\n                ErrorCode::InvalidTransactionInvalidTransactionLane\n            }\n            InvalidTransactionV1::GasPriceToleranceTooLow { .. } => {\n                ErrorCode::GasPriceToleranceTooLow\n            }\n            InvalidTransactionV1::ExpectedNamedArguments => ErrorCode::ExpectedNamedArguments,\n            InvalidTransactionV1::InvalidTransactionRuntime { .. } => {\n                ErrorCode::InvalidTransactionRuntime\n            }\n            InvalidTransactionV1::NoLaneMatch => ErrorCode::InvalidTransactionNoLaneMatches,\n            InvalidTransactionV1::EntryPointMustBeCall { .. } => {\n                ErrorCode::InvalidTransactionEntryPointMustBeCall\n            }\n            InvalidTransactionV1::CouldNotDeserializeField { .. } => {\n                ErrorCode::InvalidTransactionCannotDeserializeField\n            }\n            InvalidTransactionV1::CannotCalculateFieldsHash => {\n                ErrorCode::InvalidTransactionCannotCalculateFieldsHash\n            }\n            InvalidTransactionV1::UnexpectedTransactionFieldEntries => {\n                ErrorCode::InvalidTransactionUnexpectedFields\n            }\n            InvalidTransactionV1::ExpectedBytesArguments => {\n                ErrorCode::InvalidTransactionExpectedBytesArguments\n            }\n            InvalidTransactionV1::MissingSeed => ErrorCode::InvalidTransactionMissingSeed,\n            InvalidTransactionV1::PricingModeNotSupported => ErrorCode::PricingModeNotSupported,\n            InvalidTransactionV1::InsufficientBurnAmount { .. } => {\n                ErrorCode::InvalidTransactionInsufficientBurnAmount\n            }\n            InvalidTransactionV1::InvalidPaymentAmount => {\n                ErrorCode::InvalidTransactionInvalidPaymentAmount\n            }\n            InvalidTransactionV1::UnexpectedEntryPoint { .. } => {\n                ErrorCode::InvalidTransactionUnexpectedEntryPoint\n            }\n            InvalidTransactionV1::CouldNotSerializeTransaction { .. } => {\n                ErrorCode::TransactionHasMalformedBinaryRepresentation\n            }\n            InvalidTransactionV1::InsufficientAmount { .. } => {\n                ErrorCode::InsufficientAmountArgValue\n            }\n            InvalidTransactionV1::InvalidMinimumDelegationAmount { .. } => {\n                ErrorCode::InvalidMinimumDelegationAmount\n            }\n            InvalidTransactionV1::InvalidMaximumDelegationAmount { .. } => {\n                ErrorCode::InvalidMaximumDelegationAmount\n            }\n            InvalidTransactionV1::InvalidReservedSlots { .. } => ErrorCode::InvalidReservedSlots,\n            InvalidTransactionV1::InvalidDelegationAmount { .. } => {\n                ErrorCode::InvalidDelegationAmount\n            }\n            InvalidTransactionV1::UnsupportedInvocationTarget { .. } => {\n                ErrorCode::UnsupportedInvocationTarget\n            }\n            _other => ErrorCode::InvalidTransactionUnspecified,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::convert::TryFrom;\n\n    use crate::ErrorCode;\n    use casper_types::{InvalidDeploy, InvalidTransactionV1};\n    use strum::IntoEnumIterator;\n\n    #[test]\n    fn verify_all_invalid_transaction_v1_errors_have_error_codes() {\n        for error in InvalidTransactionV1::iter() {\n            let code = ErrorCode::from(error.clone());\n            assert_ne!(\n                code,\n                ErrorCode::InvalidTransactionUnspecified,\n                \"Seems like InvalidTransactionV1 {error:?} has no corresponding error code\"\n            );\n            assert_ne!(\n                code,\n                ErrorCode::InvalidDeployUnspecified,\n                \"Seems like InvalidTransactionV1 {error:?} has no corresponding error code\"\n            )\n        }\n    }\n\n    #[test]\n    fn verify_all_invalid_deploy_errors_have_error_codes() {\n        for error in InvalidDeploy::iter() {\n            let code = ErrorCode::from(error.clone());\n            assert_ne!(\n                code,\n                ErrorCode::InvalidTransactionUnspecified,\n                \"Seems like InvalidDeploy {error} has no corresponding error code\"\n            );\n            assert_ne!(\n                code,\n                ErrorCode::InvalidDeployUnspecified,\n                \"Seems like InvalidDeploy {error} has no corresponding error code\"\n            )\n        }\n    }\n\n    #[test]\n    fn try_from_decoded_all_variants() {\n        for variant in ErrorCode::iter() {\n            let as_int = variant as u16;\n            let decoded = ErrorCode::try_from(as_int);\n            assert!(\n                decoded.is_ok(),\n                \"variant {} not covered by TryFrom<u16> implementation\",\n                as_int\n            );\n            assert_eq!(decoded.unwrap(), variant);\n        }\n    }\n}\n"
  },
  {
    "path": "binary_port/src/get_request.rs",
    "content": "use casper_types::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Digest,\n};\n\nuse crate::state_request::GlobalStateRequest;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\nconst RECORD_TAG: u8 = 0;\nconst INFORMATION_TAG: u8 = 1;\nconst STATE_TAG: u8 = 2;\nconst TRIE_TAG: u8 = 3;\n\n/// A request to get data from the node.\n#[derive(Clone, Debug, PartialEq)]\npub enum GetRequest {\n    /// Retrieves a record from the node.\n    Record {\n        /// Type tag of the record to retrieve.\n        record_type_tag: u16,\n        /// Key encoded into bytes.\n        key: Vec<u8>,\n    },\n    /// Retrieves information from the node.\n    Information {\n        /// Type tag of the information to retrieve.\n        info_type_tag: u16,\n        /// Key encoded into bytes.\n        key: Vec<u8>,\n    },\n    /// Retrieves data from the global state.\n    State(Box<GlobalStateRequest>),\n    /// Get a trie by its Digest.\n    Trie {\n        /// A trie key.\n        trie_key: Digest,\n    },\n}\n\nimpl GetRequest {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..4) {\n            0 => GetRequest::Record {\n                record_type_tag: rng.gen(),\n                key: rng.random_vec(16..32),\n            },\n            1 => GetRequest::Information {\n                info_type_tag: rng.gen(),\n                key: rng.random_vec(16..32),\n            },\n            2 => GetRequest::State(Box::new(GlobalStateRequest::random(rng))),\n            3 => GetRequest::Trie {\n                trie_key: Digest::random(rng),\n            },\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for GetRequest {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            GetRequest::Record {\n                record_type_tag,\n                key,\n            } => {\n                RECORD_TAG.write_bytes(writer)?;\n                record_type_tag.write_bytes(writer)?;\n                key.write_bytes(writer)\n            }\n            GetRequest::Information { info_type_tag, key } => {\n                INFORMATION_TAG.write_bytes(writer)?;\n                info_type_tag.write_bytes(writer)?;\n                key.write_bytes(writer)\n            }\n            GetRequest::State(req) => {\n                STATE_TAG.write_bytes(writer)?;\n                req.write_bytes(writer)\n            }\n            GetRequest::Trie { trie_key } => {\n                TRIE_TAG.write_bytes(writer)?;\n                trie_key.write_bytes(writer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                GetRequest::Record {\n                    record_type_tag,\n                    key,\n                } => record_type_tag.serialized_length() + key.serialized_length(),\n                GetRequest::Information { info_type_tag, key } => {\n                    info_type_tag.serialized_length() + key.serialized_length()\n                }\n                GetRequest::State(req) => req.serialized_length(),\n                GetRequest::Trie { trie_key } => trie_key.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for GetRequest {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            RECORD_TAG => {\n                let (record_type_tag, remainder) = FromBytes::from_bytes(remainder)?;\n                let (key, remainder) = Bytes::from_bytes(remainder)?;\n                Ok((\n                    GetRequest::Record {\n                        record_type_tag,\n                        key: key.into(),\n                    },\n                    remainder,\n                ))\n            }\n            INFORMATION_TAG => {\n                let (info_type_tag, remainder) = FromBytes::from_bytes(remainder)?;\n                let (key, remainder) = Bytes::from_bytes(remainder)?;\n                Ok((\n                    GetRequest::Information {\n                        info_type_tag,\n                        key: key.into(),\n                    },\n                    remainder,\n                ))\n            }\n            STATE_TAG => {\n                let (req, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((GetRequest::State(Box::new(req)), remainder))\n            }\n            TRIE_TAG => {\n                let (trie_key, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((GetRequest::Trie { trie_key }, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = GetRequest::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/global_state_query_result.rs",
    "content": "//! The result of the query for the global state value.\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    global_state::TrieMerkleProof,\n    Key, StoredValue,\n};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n\n#[cfg(test)]\nuse casper_types::{ByteCode, ByteCodeKind};\nuse serde::Serialize;\n\n/// Carries the successful result of the global state query.\n#[derive(Debug, PartialEq, Clone, Serialize)]\npub struct GlobalStateQueryResult {\n    /// Stored value.\n    value: StoredValue,\n    /// Proof.\n    merkle_proof: Vec<TrieMerkleProof<Key, StoredValue>>,\n}\n\nimpl GlobalStateQueryResult {\n    /// Creates the global state query result.\n    pub fn new(value: StoredValue, merkle_proof: Vec<TrieMerkleProof<Key, StoredValue>>) -> Self {\n        Self {\n            value,\n            merkle_proof,\n        }\n    }\n\n    /// Returns the stored value.\n    pub fn value(&self) -> &StoredValue {\n        &self.value\n    }\n\n    /// Returns the stored value and the merkle proof.\n    pub fn into_inner(self) -> (StoredValue, Vec<TrieMerkleProof<Key, StoredValue>>) {\n        (self.value, self.merkle_proof)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random_invalid(rng: &mut TestRng) -> Self {\n        use casper_types::{global_state::TrieMerkleProofStep, CLValue};\n        use rand::Rng;\n        // Note: This does NOT create a logically-valid struct. Instance created by this function\n        // should be used in `bytesrepr` tests only.\n\n        let mut merkle_proof = vec![];\n        for _ in 0..rng.gen_range(0..10) {\n            let stored_value = StoredValue::CLValue(\n                CLValue::from_t(rng.gen::<i32>()).expect(\"should create CLValue\"),\n            );\n            let steps = (0..rng.gen_range(0..10))\n                .map(|_| TrieMerkleProofStep::random(rng))\n                .collect();\n            merkle_proof.push(TrieMerkleProof::new(rng.gen(), stored_value, steps));\n        }\n\n        Self {\n            value: StoredValue::ByteCode(ByteCode::new(\n                ByteCodeKind::V1CasperWasm,\n                rng.random_vec(10..20),\n            )),\n            merkle_proof,\n        }\n    }\n}\n\nimpl ToBytes for GlobalStateQueryResult {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let GlobalStateQueryResult {\n            value,\n            merkle_proof,\n        } = self;\n        value.write_bytes(writer)?;\n        merkle_proof.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.value.serialized_length() + self.merkle_proof.serialized_length()\n    }\n}\n\nimpl FromBytes for GlobalStateQueryResult {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, remainder) = FromBytes::from_bytes(bytes)?;\n        let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            GlobalStateQueryResult {\n                value,\n                merkle_proof,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = GlobalStateQueryResult::random_invalid(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/information_request.rs",
    "content": "use core::convert::TryFrom;\n\n#[cfg(test)]\nuse rand::Rng;\n\nuse crate::{get_request::GetRequest, EraIdentifier};\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contracts::{ContractHash, ContractPackageHash},\n    system::auction::DelegatorKind,\n    BlockIdentifier, EntityAddr, GlobalStateIdentifier, PackageAddr, PublicKey, TransactionHash,\n};\n\n/// Request for information from the node.\n#[derive(Clone, Debug, PartialEq)]\npub enum InformationRequest {\n    /// Returns the block header by an identifier, no identifier indicates the latest block.\n    BlockHeader(Option<BlockIdentifier>),\n    /// Returns the block with signatures by an identifier, no identifier indicates the latest\n    /// block.\n    BlockWithSignatures(Option<BlockIdentifier>),\n    /// Returns a transaction with approvals and execution info for a given hash.\n    Transaction {\n        /// Hash of the transaction to retrieve.\n        hash: TransactionHash,\n        /// Whether to return the deploy with the finalized approvals substituted.\n        with_finalized_approvals: bool,\n    },\n    /// Returns connected peers.\n    Peers,\n    /// Returns node uptime.\n    Uptime,\n    /// Returns last progress of the sync process.\n    LastProgress,\n    /// Returns current state of the main reactor.\n    ReactorState,\n    /// Returns network name.\n    NetworkName,\n    /// Returns consensus validator changes.\n    ConsensusValidatorChanges,\n    /// Returns status of the BlockSynchronizer.\n    BlockSynchronizerStatus,\n    /// Returns the available block range.\n    AvailableBlockRange,\n    /// Returns info about next upgrade.\n    NextUpgrade,\n    /// Returns consensus status.\n    ConsensusStatus,\n    /// Returns chainspec raw bytes.\n    ChainspecRawBytes,\n    /// Returns the status information of the node.\n    NodeStatus,\n    /// Returns the latest switch block header.\n    LatestSwitchBlockHeader,\n    /// Returns the reward for a validator or a delegator in a specific era.\n    Reward {\n        /// Identifier of the era to get the reward for. Must point to either a switch block or\n        /// a valid `EraId`. If `None`, the reward for the latest switch block is returned.\n        era_identifier: Option<EraIdentifier>,\n        /// Public key of the validator to get the reward for.\n        validator: Box<PublicKey>,\n        /// Identity of the delegator to get the reward for.\n        /// If `None`, the reward for the validator is returned.\n        delegator: Option<Box<DelegatorKind>>,\n    },\n    /// Returns the current Casper protocol version.\n    ProtocolVersion,\n    /// Returns the contract package by an identifier.\n    Package {\n        /// Global state identifier, `None` means \"latest block state\".\n        state_identifier: Option<GlobalStateIdentifier>,\n        /// Identifier of the contract package to retrieve.\n        identifier: PackageIdentifier,\n    },\n    /// Returns the entity by an identifier.\n    Entity {\n        /// Global state identifier, `None` means \"latest block state\".\n        state_identifier: Option<GlobalStateIdentifier>,\n        /// Identifier of the entity to retrieve.\n        identifier: EntityIdentifier,\n        /// Whether to return the bytecode with the entity.\n        include_bytecode: bool,\n    },\n}\n\nimpl InformationRequest {\n    /// Returns the tag of the request.\n    pub fn tag(&self) -> InformationRequestTag {\n        match self {\n            InformationRequest::BlockHeader(_) => InformationRequestTag::BlockHeader,\n            InformationRequest::BlockWithSignatures(_) => {\n                InformationRequestTag::BlockWithSignatures\n            }\n            InformationRequest::Transaction { .. } => InformationRequestTag::Transaction,\n            InformationRequest::Peers => InformationRequestTag::Peers,\n            InformationRequest::Uptime => InformationRequestTag::Uptime,\n            InformationRequest::LastProgress => InformationRequestTag::LastProgress,\n            InformationRequest::ReactorState => InformationRequestTag::ReactorState,\n            InformationRequest::NetworkName => InformationRequestTag::NetworkName,\n            InformationRequest::ConsensusValidatorChanges => {\n                InformationRequestTag::ConsensusValidatorChanges\n            }\n            InformationRequest::BlockSynchronizerStatus => {\n                InformationRequestTag::BlockSynchronizerStatus\n            }\n            InformationRequest::AvailableBlockRange => InformationRequestTag::AvailableBlockRange,\n            InformationRequest::NextUpgrade => InformationRequestTag::NextUpgrade,\n            InformationRequest::ConsensusStatus => InformationRequestTag::ConsensusStatus,\n            InformationRequest::ChainspecRawBytes => InformationRequestTag::ChainspecRawBytes,\n            InformationRequest::NodeStatus => InformationRequestTag::NodeStatus,\n            InformationRequest::LatestSwitchBlockHeader => {\n                InformationRequestTag::LatestSwitchBlockHeader\n            }\n            InformationRequest::Reward { .. } => InformationRequestTag::Reward,\n            InformationRequest::ProtocolVersion => InformationRequestTag::ProtocolVersion,\n            InformationRequest::Package { .. } => InformationRequestTag::Package,\n            InformationRequest::Entity { .. } => InformationRequestTag::Entity,\n        }\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match InformationRequestTag::random(rng) {\n            InformationRequestTag::BlockHeader => InformationRequest::BlockHeader(\n                rng.gen::<bool>().then(|| BlockIdentifier::random(rng)),\n            ),\n            InformationRequestTag::BlockWithSignatures => InformationRequest::BlockWithSignatures(\n                rng.gen::<bool>().then(|| BlockIdentifier::random(rng)),\n            ),\n            InformationRequestTag::Transaction => InformationRequest::Transaction {\n                hash: TransactionHash::random(rng),\n                with_finalized_approvals: rng.gen(),\n            },\n            InformationRequestTag::Peers => InformationRequest::Peers,\n            InformationRequestTag::Uptime => InformationRequest::Uptime,\n            InformationRequestTag::LastProgress => InformationRequest::LastProgress,\n            InformationRequestTag::ReactorState => InformationRequest::ReactorState,\n            InformationRequestTag::NetworkName => InformationRequest::NetworkName,\n            InformationRequestTag::ConsensusValidatorChanges => {\n                InformationRequest::ConsensusValidatorChanges\n            }\n            InformationRequestTag::BlockSynchronizerStatus => {\n                InformationRequest::BlockSynchronizerStatus\n            }\n            InformationRequestTag::AvailableBlockRange => InformationRequest::AvailableBlockRange,\n            InformationRequestTag::NextUpgrade => InformationRequest::NextUpgrade,\n            InformationRequestTag::ConsensusStatus => InformationRequest::ConsensusStatus,\n            InformationRequestTag::ChainspecRawBytes => InformationRequest::ChainspecRawBytes,\n            InformationRequestTag::NodeStatus => InformationRequest::NodeStatus,\n            InformationRequestTag::LatestSwitchBlockHeader => {\n                InformationRequest::LatestSwitchBlockHeader\n            }\n            InformationRequestTag::Reward => InformationRequest::Reward {\n                era_identifier: rng.gen::<bool>().then(|| EraIdentifier::random(rng)),\n                validator: PublicKey::random(rng).into(),\n                delegator: rng\n                    .gen::<bool>()\n                    .then(|| Box::new(DelegatorKind::PublicKey(PublicKey::random(rng)))),\n            },\n            InformationRequestTag::ProtocolVersion => InformationRequest::ProtocolVersion,\n            InformationRequestTag::Package => InformationRequest::Package {\n                state_identifier: rng\n                    .gen::<bool>()\n                    .then(|| GlobalStateIdentifier::random(rng)),\n                identifier: PackageIdentifier::random(rng),\n            },\n            InformationRequestTag::Entity => InformationRequest::Entity {\n                state_identifier: rng\n                    .gen::<bool>()\n                    .then(|| GlobalStateIdentifier::random(rng)),\n                identifier: EntityIdentifier::random(rng),\n                include_bytecode: rng.gen(),\n            },\n        }\n    }\n}\n\nimpl ToBytes for InformationRequest {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            InformationRequest::BlockHeader(block_identifier) => {\n                block_identifier.write_bytes(writer)\n            }\n            InformationRequest::BlockWithSignatures(block_identifier) => {\n                block_identifier.write_bytes(writer)\n            }\n            InformationRequest::Transaction {\n                hash,\n                with_finalized_approvals,\n            } => {\n                hash.write_bytes(writer)?;\n                with_finalized_approvals.write_bytes(writer)\n            }\n            InformationRequest::Peers\n            | InformationRequest::Uptime\n            | InformationRequest::LastProgress\n            | InformationRequest::ReactorState\n            | InformationRequest::NetworkName\n            | InformationRequest::ConsensusValidatorChanges\n            | InformationRequest::BlockSynchronizerStatus\n            | InformationRequest::AvailableBlockRange\n            | InformationRequest::NextUpgrade\n            | InformationRequest::ConsensusStatus\n            | InformationRequest::ChainspecRawBytes\n            | InformationRequest::NodeStatus\n            | InformationRequest::LatestSwitchBlockHeader\n            | InformationRequest::ProtocolVersion => Ok(()),\n            InformationRequest::Reward {\n                era_identifier,\n                validator,\n                delegator,\n            } => {\n                era_identifier.write_bytes(writer)?;\n                validator.write_bytes(writer)?;\n                delegator.as_deref().write_bytes(writer)?;\n                Ok(())\n            }\n            InformationRequest::Package {\n                state_identifier,\n                identifier,\n            } => {\n                state_identifier.write_bytes(writer)?;\n                identifier.write_bytes(writer)\n            }\n            InformationRequest::Entity {\n                state_identifier,\n                identifier,\n                include_bytecode,\n            } => {\n                state_identifier.write_bytes(writer)?;\n                identifier.write_bytes(writer)?;\n                include_bytecode.write_bytes(writer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            InformationRequest::BlockHeader(block_identifier) => {\n                block_identifier.serialized_length()\n            }\n            InformationRequest::BlockWithSignatures(block_identifier) => {\n                block_identifier.serialized_length()\n            }\n            InformationRequest::Transaction {\n                hash,\n                with_finalized_approvals,\n            } => hash.serialized_length() + with_finalized_approvals.serialized_length(),\n            InformationRequest::Peers\n            | InformationRequest::Uptime\n            | InformationRequest::LastProgress\n            | InformationRequest::ReactorState\n            | InformationRequest::NetworkName\n            | InformationRequest::ConsensusValidatorChanges\n            | InformationRequest::BlockSynchronizerStatus\n            | InformationRequest::AvailableBlockRange\n            | InformationRequest::NextUpgrade\n            | InformationRequest::ConsensusStatus\n            | InformationRequest::ChainspecRawBytes\n            | InformationRequest::NodeStatus\n            | InformationRequest::LatestSwitchBlockHeader\n            | InformationRequest::ProtocolVersion => 0,\n            InformationRequest::Reward {\n                era_identifier,\n                validator,\n                delegator,\n            } => {\n                era_identifier.serialized_length()\n                    + validator.serialized_length()\n                    + delegator.as_deref().serialized_length()\n            }\n            InformationRequest::Package {\n                state_identifier,\n                identifier,\n            } => state_identifier.serialized_length() + identifier.serialized_length(),\n            InformationRequest::Entity {\n                state_identifier,\n                identifier,\n                include_bytecode,\n            } => {\n                state_identifier.serialized_length()\n                    + identifier.serialized_length()\n                    + include_bytecode.serialized_length()\n            }\n        }\n    }\n}\n\nimpl TryFrom<(InformationRequestTag, &[u8])> for InformationRequest {\n    type Error = bytesrepr::Error;\n\n    fn try_from((tag, key_bytes): (InformationRequestTag, &[u8])) -> Result<Self, Self::Error> {\n        let (req, remainder) = match tag {\n            InformationRequestTag::BlockHeader => {\n                let (block_identifier, remainder) = FromBytes::from_bytes(key_bytes)?;\n                (InformationRequest::BlockHeader(block_identifier), remainder)\n            }\n            InformationRequestTag::BlockWithSignatures => {\n                let (block_identifier, remainder) = FromBytes::from_bytes(key_bytes)?;\n                (\n                    InformationRequest::BlockWithSignatures(block_identifier),\n                    remainder,\n                )\n            }\n            InformationRequestTag::Transaction => {\n                let (hash, remainder) = FromBytes::from_bytes(key_bytes)?;\n                let (with_finalized_approvals, remainder) = FromBytes::from_bytes(remainder)?;\n                (\n                    InformationRequest::Transaction {\n                        hash,\n                        with_finalized_approvals,\n                    },\n                    remainder,\n                )\n            }\n            InformationRequestTag::Peers => (InformationRequest::Peers, key_bytes),\n            InformationRequestTag::Uptime => (InformationRequest::Uptime, key_bytes),\n            InformationRequestTag::LastProgress => (InformationRequest::LastProgress, key_bytes),\n            InformationRequestTag::ReactorState => (InformationRequest::ReactorState, key_bytes),\n            InformationRequestTag::NetworkName => (InformationRequest::NetworkName, key_bytes),\n            InformationRequestTag::ConsensusValidatorChanges => {\n                (InformationRequest::ConsensusValidatorChanges, key_bytes)\n            }\n            InformationRequestTag::BlockSynchronizerStatus => {\n                (InformationRequest::BlockSynchronizerStatus, key_bytes)\n            }\n            InformationRequestTag::AvailableBlockRange => {\n                (InformationRequest::AvailableBlockRange, key_bytes)\n            }\n            InformationRequestTag::NextUpgrade => (InformationRequest::NextUpgrade, key_bytes),\n            InformationRequestTag::ConsensusStatus => {\n                (InformationRequest::ConsensusStatus, key_bytes)\n            }\n            InformationRequestTag::ChainspecRawBytes => {\n                (InformationRequest::ChainspecRawBytes, key_bytes)\n            }\n            InformationRequestTag::NodeStatus => (InformationRequest::NodeStatus, key_bytes),\n            InformationRequestTag::LatestSwitchBlockHeader => {\n                (InformationRequest::LatestSwitchBlockHeader, key_bytes)\n            }\n            InformationRequestTag::Reward => {\n                let (era_identifier, remainder) = <Option<EraIdentifier>>::from_bytes(key_bytes)?;\n                let (validator, remainder) = PublicKey::from_bytes(remainder)?;\n                let (delegator, remainder) = <Option<DelegatorKind>>::from_bytes(remainder)?;\n                (\n                    InformationRequest::Reward {\n                        era_identifier,\n                        validator: Box::new(validator),\n                        delegator: delegator.map(Box::new),\n                    },\n                    remainder,\n                )\n            }\n            InformationRequestTag::ProtocolVersion => {\n                (InformationRequest::ProtocolVersion, key_bytes)\n            }\n            InformationRequestTag::Package => {\n                let (state_identifier, remainder) = FromBytes::from_bytes(key_bytes)?;\n                let (identifier, remainder) = FromBytes::from_bytes(remainder)?;\n                (\n                    InformationRequest::Package {\n                        state_identifier,\n                        identifier,\n                    },\n                    remainder,\n                )\n            }\n            InformationRequestTag::Entity => {\n                let (state_identifier, remainder) = FromBytes::from_bytes(key_bytes)?;\n                let (identifier, remainder) = FromBytes::from_bytes(remainder)?;\n                let (include_bytecode, remainder) = FromBytes::from_bytes(remainder)?;\n                (\n                    InformationRequest::Entity {\n                        state_identifier,\n                        identifier,\n                        include_bytecode,\n                    },\n                    remainder,\n                )\n            }\n        };\n        if !remainder.is_empty() {\n            return Err(bytesrepr::Error::LeftOverBytes);\n        }\n        Ok(req)\n    }\n}\n\nimpl TryFrom<InformationRequest> for GetRequest {\n    type Error = bytesrepr::Error;\n\n    fn try_from(request: InformationRequest) -> Result<Self, Self::Error> {\n        Ok(GetRequest::Information {\n            info_type_tag: request.tag().into(),\n            key: request.to_bytes()?,\n        })\n    }\n}\n\n/// Identifier of an information request.\n#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]\n#[repr(u16)]\npub enum InformationRequestTag {\n    /// Block header request.\n    BlockHeader = 0,\n    /// Block with signatures request.\n    BlockWithSignatures = 1,\n    /// Transaction request.\n    Transaction = 2,\n    /// Peers request.\n    Peers = 3,\n    /// Uptime request.\n    Uptime = 4,\n    /// Last progress request.\n    LastProgress = 5,\n    /// Reactor state request.\n    ReactorState = 6,\n    /// Network name request.\n    NetworkName = 7,\n    /// Consensus validator changes request.\n    ConsensusValidatorChanges = 8,\n    /// Block synchronizer status request.\n    BlockSynchronizerStatus = 9,\n    /// Available block range request.\n    AvailableBlockRange = 10,\n    /// Next upgrade request.\n    NextUpgrade = 11,\n    /// Consensus status request.\n    ConsensusStatus = 12,\n    /// Chainspec raw bytes request.\n    ChainspecRawBytes = 13,\n    /// Node status request.\n    NodeStatus = 14,\n    /// Latest switch block header request.\n    LatestSwitchBlockHeader = 15,\n    /// Reward for a validator or a delegator in a specific era.\n    Reward = 16,\n    /// Protocol version request.\n    ProtocolVersion = 17,\n    /// Contract package request.\n    Package = 18,\n    /// Addressable entity request.\n    Entity = 19,\n}\n\nimpl InformationRequestTag {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..20) {\n            0 => InformationRequestTag::BlockHeader,\n            1 => InformationRequestTag::BlockWithSignatures,\n            2 => InformationRequestTag::Transaction,\n            3 => InformationRequestTag::Peers,\n            4 => InformationRequestTag::Uptime,\n            5 => InformationRequestTag::LastProgress,\n            6 => InformationRequestTag::ReactorState,\n            7 => InformationRequestTag::NetworkName,\n            8 => InformationRequestTag::ConsensusValidatorChanges,\n            9 => InformationRequestTag::BlockSynchronizerStatus,\n            10 => InformationRequestTag::AvailableBlockRange,\n            11 => InformationRequestTag::NextUpgrade,\n            12 => InformationRequestTag::ConsensusStatus,\n            13 => InformationRequestTag::ChainspecRawBytes,\n            14 => InformationRequestTag::NodeStatus,\n            15 => InformationRequestTag::LatestSwitchBlockHeader,\n            16 => InformationRequestTag::Reward,\n            17 => InformationRequestTag::ProtocolVersion,\n            18 => InformationRequestTag::Package,\n            19 => InformationRequestTag::Entity,\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl TryFrom<u16> for InformationRequestTag {\n    type Error = UnknownInformationRequestTag;\n\n    fn try_from(value: u16) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(InformationRequestTag::BlockHeader),\n            1 => Ok(InformationRequestTag::BlockWithSignatures),\n            2 => Ok(InformationRequestTag::Transaction),\n            3 => Ok(InformationRequestTag::Peers),\n            4 => Ok(InformationRequestTag::Uptime),\n            5 => Ok(InformationRequestTag::LastProgress),\n            6 => Ok(InformationRequestTag::ReactorState),\n            7 => Ok(InformationRequestTag::NetworkName),\n            8 => Ok(InformationRequestTag::ConsensusValidatorChanges),\n            9 => Ok(InformationRequestTag::BlockSynchronizerStatus),\n            10 => Ok(InformationRequestTag::AvailableBlockRange),\n            11 => Ok(InformationRequestTag::NextUpgrade),\n            12 => Ok(InformationRequestTag::ConsensusStatus),\n            13 => Ok(InformationRequestTag::ChainspecRawBytes),\n            14 => Ok(InformationRequestTag::NodeStatus),\n            15 => Ok(InformationRequestTag::LatestSwitchBlockHeader),\n            16 => Ok(InformationRequestTag::Reward),\n            17 => Ok(InformationRequestTag::ProtocolVersion),\n            18 => Ok(InformationRequestTag::Package),\n            19 => Ok(InformationRequestTag::Entity),\n            _ => Err(UnknownInformationRequestTag(value)),\n        }\n    }\n}\n\nimpl From<InformationRequestTag> for u16 {\n    fn from(value: InformationRequestTag) -> Self {\n        value as u16\n    }\n}\n\n/// Error returned when trying to convert a `u16` into a `RecordId`.\n#[derive(Debug, Clone, PartialEq)]\npub struct UnknownInformationRequestTag(u16);\n\n#[derive(Debug, Clone, PartialEq)]\npub enum EntityIdentifier {\n    ContractHash(ContractHash),\n    AccountHash(AccountHash),\n    PublicKey(PublicKey),\n    EntityAddr(EntityAddr),\n}\n\nimpl EntityIdentifier {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..4) {\n            0 => EntityIdentifier::ContractHash(ContractHash::new(rng.gen())),\n            1 => EntityIdentifier::PublicKey(PublicKey::random(rng)),\n            2 => EntityIdentifier::AccountHash(AccountHash::new(rng.gen())),\n            3 => EntityIdentifier::EntityAddr(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl FromBytes for EntityIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        let (identifier, remainder) = match tag {\n            0 => {\n                let (hash, remainder) = FromBytes::from_bytes(remainder)?;\n                (EntityIdentifier::ContractHash(hash), remainder)\n            }\n            1 => {\n                let (key, remainder) = FromBytes::from_bytes(remainder)?;\n                (EntityIdentifier::PublicKey(key), remainder)\n            }\n            2 => {\n                let (hash, remainder) = FromBytes::from_bytes(remainder)?;\n                (EntityIdentifier::AccountHash(hash), remainder)\n            }\n            3 => {\n                let (entity, remainder) = FromBytes::from_bytes(remainder)?;\n                (EntityIdentifier::EntityAddr(entity), remainder)\n            }\n            _ => return Err(bytesrepr::Error::Formatting),\n        };\n        Ok((identifier, remainder))\n    }\n}\n\nimpl ToBytes for EntityIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let tag: u8 = match self {\n            EntityIdentifier::ContractHash(_) => 0,\n            EntityIdentifier::PublicKey(_) => 1,\n            EntityIdentifier::AccountHash(_) => 2,\n            EntityIdentifier::EntityAddr(_) => 3,\n        };\n        tag.write_bytes(writer)?;\n        match self {\n            EntityIdentifier::ContractHash(hash) => hash.write_bytes(writer),\n            EntityIdentifier::PublicKey(key) => key.write_bytes(writer),\n            EntityIdentifier::AccountHash(hash) => hash.write_bytes(writer),\n            EntityIdentifier::EntityAddr(entity) => entity.write_bytes(writer),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        let identifier_length = match self {\n            EntityIdentifier::ContractHash(hash) => hash.serialized_length(),\n            EntityIdentifier::PublicKey(key) => key.serialized_length(),\n            EntityIdentifier::AccountHash(hash) => hash.serialized_length(),\n            EntityIdentifier::EntityAddr(entity) => entity.serialized_length(),\n        };\n        U8_SERIALIZED_LENGTH + identifier_length\n    }\n}\n\n#[derive(Debug, Clone, PartialEq)]\npub enum PackageIdentifier {\n    ContractPackageHash(ContractPackageHash),\n    PackageAddr(PackageAddr),\n}\n\nimpl PackageIdentifier {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..2) {\n            0 => PackageIdentifier::ContractPackageHash(ContractPackageHash::new(rng.gen())),\n            1 => PackageIdentifier::PackageAddr(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl FromBytes for PackageIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        let (identifier, remainder) = match tag {\n            0 => {\n                let (hash, remainder) = FromBytes::from_bytes(remainder)?;\n                (PackageIdentifier::ContractPackageHash(hash), remainder)\n            }\n            1 => {\n                let (addr, remainder) = FromBytes::from_bytes(remainder)?;\n                (PackageIdentifier::PackageAddr(addr), remainder)\n            }\n            _ => return Err(bytesrepr::Error::Formatting),\n        };\n        Ok((identifier, remainder))\n    }\n}\n\nimpl ToBytes for PackageIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let tag: u8 = match self {\n            PackageIdentifier::ContractPackageHash(_) => 0,\n            PackageIdentifier::PackageAddr(_) => 1,\n        };\n        tag.write_bytes(writer)?;\n        match self {\n            PackageIdentifier::ContractPackageHash(hash) => hash.write_bytes(writer),\n            PackageIdentifier::PackageAddr(addr) => addr.write_bytes(writer),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        let identifier_length = match self {\n            PackageIdentifier::ContractPackageHash(hash) => hash.serialized_length(),\n            PackageIdentifier::PackageAddr(addr) => addr.serialized_length(),\n        };\n        U8_SERIALIZED_LENGTH + identifier_length\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn tag_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = InformationRequestTag::random(rng);\n        let tag = u16::from(val);\n        assert_eq!(InformationRequestTag::try_from(tag), Ok(val));\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = InformationRequest::random(rng);\n        let bytes = val.to_bytes().expect(\"should serialize\");\n        assert_eq!(\n            InformationRequest::try_from((val.tag(), &bytes[..])),\n            Ok(val)\n        );\n    }\n\n    #[test]\n    fn entity_identifier_bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = EntityIdentifier::random(rng);\n        let bytes = val.to_bytes().expect(\"should serialize\");\n        assert_eq!(bytesrepr::deserialize_from_slice(bytes), Ok(val));\n    }\n\n    #[test]\n    fn package_identifier_bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = PackageIdentifier::random(rng);\n        let bytes = val.to_bytes().expect(\"should serialize\");\n        assert_eq!(bytesrepr::deserialize_from_slice(bytes), Ok(val));\n    }\n}\n"
  },
  {
    "path": "binary_port/src/key_prefix.rs",
    "content": "#[cfg(any(feature = \"testing\", test))]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contract_messages::TopicNameHash,\n    system::{auction::BidAddrTag, mint::BalanceHoldAddrTag},\n    EntityAddr, KeyTag, URefAddr,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\n/// Key prefixes used for querying the global state.\n#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]\npub enum KeyPrefix {\n    /// Retrieves all delegator bid addresses for a given validator.\n    DelegatorBidAddrsByValidator(AccountHash),\n    /// Retrieves all messages for a given entity.\n    MessagesByEntity(EntityAddr),\n    /// Retrieves all messages for a given entity and topic.\n    MessagesByEntityAndTopic(EntityAddr, TopicNameHash),\n    /// Retrieves all named keys for a given entity.\n    NamedKeysByEntity(EntityAddr),\n    /// Retrieves all gas balance holds for a given purse.\n    GasBalanceHoldsByPurse(URefAddr),\n    /// Retrieves all processing balance holds for a given purse.\n    ProcessingBalanceHoldsByPurse(URefAddr),\n    /// Retrieves all V1 entry points for a given entity.\n    EntryPointsV1ByEntity(EntityAddr),\n    /// Retrieves all V2 entry points for a given entity.\n    EntryPointsV2ByEntity(EntityAddr),\n}\n\nimpl KeyPrefix {\n    /// Returns a random `KeyPrefix`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..8) {\n            0 => KeyPrefix::DelegatorBidAddrsByValidator(rng.gen()),\n            1 => KeyPrefix::MessagesByEntity(rng.gen()),\n            2 => KeyPrefix::MessagesByEntityAndTopic(rng.gen(), rng.gen()),\n            3 => KeyPrefix::NamedKeysByEntity(rng.gen()),\n            4 => KeyPrefix::GasBalanceHoldsByPurse(rng.gen()),\n            5 => KeyPrefix::ProcessingBalanceHoldsByPurse(rng.gen()),\n            6 => KeyPrefix::EntryPointsV1ByEntity(rng.gen()),\n            7 => KeyPrefix::EntryPointsV2ByEntity(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for KeyPrefix {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            KeyPrefix::DelegatorBidAddrsByValidator(validator) => {\n                writer.push(KeyTag::BidAddr as u8);\n                writer.push(BidAddrTag::DelegatedAccount as u8);\n                validator.write_bytes(writer)?;\n            }\n            KeyPrefix::MessagesByEntity(entity) => {\n                writer.push(KeyTag::Message as u8);\n                entity.write_bytes(writer)?;\n            }\n            KeyPrefix::MessagesByEntityAndTopic(entity, topic) => {\n                writer.push(KeyTag::Message as u8);\n                entity.write_bytes(writer)?;\n                topic.write_bytes(writer)?;\n            }\n            KeyPrefix::NamedKeysByEntity(entity) => {\n                writer.push(KeyTag::NamedKey as u8);\n                entity.write_bytes(writer)?;\n            }\n            KeyPrefix::GasBalanceHoldsByPurse(uref) => {\n                writer.push(KeyTag::BalanceHold as u8);\n                writer.push(BalanceHoldAddrTag::Gas as u8);\n                uref.write_bytes(writer)?;\n            }\n            KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => {\n                writer.push(KeyTag::BalanceHold as u8);\n                writer.push(BalanceHoldAddrTag::Processing as u8);\n                uref.write_bytes(writer)?;\n            }\n            KeyPrefix::EntryPointsV1ByEntity(entity) => {\n                writer.push(KeyTag::EntryPoint as u8);\n                writer.push(0);\n                entity.write_bytes(writer)?;\n            }\n            KeyPrefix::EntryPointsV2ByEntity(entity) => {\n                writer.push(KeyTag::EntryPoint as u8);\n                writer.push(1);\n                entity.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                KeyPrefix::DelegatorBidAddrsByValidator(validator) => {\n                    U8_SERIALIZED_LENGTH + validator.serialized_length()\n                }\n                KeyPrefix::MessagesByEntity(entity) => entity.serialized_length(),\n                KeyPrefix::MessagesByEntityAndTopic(entity, topic) => {\n                    entity.serialized_length() + topic.serialized_length()\n                }\n                KeyPrefix::NamedKeysByEntity(entity) => entity.serialized_length(),\n                KeyPrefix::GasBalanceHoldsByPurse(uref) => {\n                    U8_SERIALIZED_LENGTH + uref.serialized_length()\n                }\n                KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => {\n                    U8_SERIALIZED_LENGTH + uref.serialized_length()\n                }\n                KeyPrefix::EntryPointsV1ByEntity(entity) => {\n                    U8_SERIALIZED_LENGTH + entity.serialized_length()\n                }\n                KeyPrefix::EntryPointsV2ByEntity(entity) => {\n                    U8_SERIALIZED_LENGTH + entity.serialized_length()\n                }\n            }\n    }\n}\n\nimpl FromBytes for KeyPrefix {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        let result = match tag {\n            tag if tag == KeyTag::BidAddr as u8 => {\n                let (bid_addr_tag, remainder) = u8::from_bytes(remainder)?;\n                match bid_addr_tag {\n                    tag if tag == BidAddrTag::DelegatedAccount as u8 => {\n                        let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                        (\n                            KeyPrefix::DelegatorBidAddrsByValidator(validator),\n                            remainder,\n                        )\n                    }\n                    _ => return Err(bytesrepr::Error::Formatting),\n                }\n            }\n            tag if tag == KeyTag::Message as u8 => {\n                let (entity_addr, remainder) = EntityAddr::from_bytes(remainder)?;\n                if remainder.is_empty() {\n                    (KeyPrefix::MessagesByEntity(entity_addr), remainder)\n                } else {\n                    let (topic, remainder) = TopicNameHash::from_bytes(remainder)?;\n                    (\n                        KeyPrefix::MessagesByEntityAndTopic(entity_addr, topic),\n                        remainder,\n                    )\n                }\n            }\n            tag if tag == KeyTag::NamedKey as u8 => {\n                let (entity, remainder) = EntityAddr::from_bytes(remainder)?;\n                (KeyPrefix::NamedKeysByEntity(entity), remainder)\n            }\n            tag if tag == KeyTag::BalanceHold as u8 => {\n                let (balance_hold_addr_tag, remainder) = u8::from_bytes(remainder)?;\n                let (uref, remainder) = URefAddr::from_bytes(remainder)?;\n                match balance_hold_addr_tag {\n                    tag if tag == BalanceHoldAddrTag::Gas as u8 => {\n                        (KeyPrefix::GasBalanceHoldsByPurse(uref), remainder)\n                    }\n                    tag if tag == BalanceHoldAddrTag::Processing as u8 => {\n                        (KeyPrefix::ProcessingBalanceHoldsByPurse(uref), remainder)\n                    }\n                    _ => return Err(bytesrepr::Error::Formatting),\n                }\n            }\n            tag if tag == KeyTag::EntryPoint as u8 => {\n                let (entry_point_type, remainder) = u8::from_bytes(remainder)?;\n                let (entity, remainder) = EntityAddr::from_bytes(remainder)?;\n                match entry_point_type {\n                    0 => (KeyPrefix::EntryPointsV1ByEntity(entity), remainder),\n                    1 => (KeyPrefix::EntryPointsV2ByEntity(entity), remainder),\n                    _ => return Err(bytesrepr::Error::Formatting),\n                }\n            }\n            _ => return Err(bytesrepr::Error::Formatting),\n        };\n        Ok(result)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let key_prefix = KeyPrefix::random(rng);\n        bytesrepr::test_serialization_roundtrip(&key_prefix);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/lib.rs",
    "content": "//! A Rust library for types used by the binary port of a casper node.\n\nmod balance_response;\nmod binary_message;\nmod binary_response;\nmod binary_response_and_request;\nmod binary_response_header;\nmod command;\nmod dictionary_item_identifier;\nmod entity_qualifier;\nmod era_identifier;\nmod error;\nmod error_code;\nmod get_request;\nmod global_state_query_result;\nmod information_request;\nmod key_prefix;\nmod minimal_block_info;\nmod node_status;\nmod purse_identifier;\npub mod record_id;\nmod response_type;\nmod speculative_execution_result;\nmod state_request;\nmod type_wrappers;\n\npub use balance_response::BalanceResponse;\npub use binary_message::{BinaryMessage, BinaryMessageCodec};\npub use binary_response::BinaryResponse;\npub use binary_response_and_request::BinaryResponseAndRequest;\npub use binary_response_header::BinaryResponseHeader;\npub use command::{Command, CommandHeader, CommandTag};\npub use dictionary_item_identifier::DictionaryItemIdentifier;\npub use entity_qualifier::GlobalStateEntityQualifier;\npub use era_identifier::EraIdentifier;\npub use error::Error;\npub use error_code::ErrorCode;\npub use get_request::GetRequest;\npub use global_state_query_result::GlobalStateQueryResult;\npub use information_request::{\n    EntityIdentifier, InformationRequest, InformationRequestTag, PackageIdentifier,\n};\npub use key_prefix::KeyPrefix;\npub use minimal_block_info::MinimalBlockInfo;\npub use node_status::NodeStatus;\npub use purse_identifier::PurseIdentifier;\npub use record_id::{RecordId, UnknownRecordId};\npub use response_type::{PayloadEntity, ResponseType};\npub use speculative_execution_result::SpeculativeExecutionResult;\npub use state_request::GlobalStateRequest;\npub use type_wrappers::{\n    AccountInformation, AddressableEntityInformation, ConsensusStatus, ConsensusValidatorChanges,\n    ContractInformation, DictionaryQueryResult, GetTrieFullResult, LastProgress, NetworkName,\n    ReactorStateName, RewardResponse, TransactionWithExecutionInfo, Uptime, ValueWithProof,\n};\n"
  },
  {
    "path": "binary_port/src/minimal_block_info.rs",
    "content": "use casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Block, BlockHash, Digest, EraId, PublicKey, Timestamp,\n};\nuse serde::{Deserialize, Serialize};\n\n#[cfg(test)]\nuse rand::Rng;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n\n/// Minimal info about a `Block` needed to satisfy the node status request.\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]\n#[serde(deny_unknown_fields)]\npub struct MinimalBlockInfo {\n    hash: BlockHash,\n    timestamp: Timestamp,\n    era_id: EraId,\n    height: u64,\n    state_root_hash: Digest,\n    creator: PublicKey,\n}\n\nimpl MinimalBlockInfo {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self {\n            hash: BlockHash::random(rng),\n            timestamp: Timestamp::random(rng),\n            era_id: EraId::random(rng),\n            height: rng.gen(),\n            state_root_hash: Digest::random(rng),\n            creator: PublicKey::random(rng),\n        }\n    }\n}\n\nimpl FromBytes for MinimalBlockInfo {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (timestamp, remainder) = Timestamp::from_bytes(remainder)?;\n        let (era_id, remainder) = EraId::from_bytes(remainder)?;\n        let (height, remainder) = u64::from_bytes(remainder)?;\n        let (state_root_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (creator, remainder) = PublicKey::from_bytes(remainder)?;\n        Ok((\n            MinimalBlockInfo {\n                hash,\n                timestamp,\n                era_id,\n                height,\n                state_root_hash,\n                creator,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl ToBytes for MinimalBlockInfo {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.hash.write_bytes(writer)?;\n        self.timestamp.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.height.write_bytes(writer)?;\n        self.state_root_hash.write_bytes(writer)?;\n        self.creator.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.hash.serialized_length()\n            + self.timestamp.serialized_length()\n            + self.era_id.serialized_length()\n            + self.height.serialized_length()\n            + self.state_root_hash.serialized_length()\n            + self.creator.serialized_length()\n    }\n}\n\nimpl From<Block> for MinimalBlockInfo {\n    fn from(block: Block) -> Self {\n        let proposer = match &block {\n            Block::V1(v1) => v1.proposer().clone(),\n            Block::V2(v2) => v2.proposer().clone(),\n        };\n\n        MinimalBlockInfo {\n            hash: *block.hash(),\n            timestamp: block.timestamp(),\n            era_id: block.era_id(),\n            height: block.height(),\n            state_root_hash: *block.state_root_hash(),\n            creator: proposer,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = MinimalBlockInfo::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/node_status.rs",
    "content": "use casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    AvailableBlockRange, BlockHash, BlockSynchronizerStatus, Digest, NextUpgrade, Peers,\n    ProtocolVersion, PublicKey, TimeDiff, Timestamp,\n};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\nuse serde::Serialize;\n\nuse crate::{minimal_block_info::MinimalBlockInfo, type_wrappers::ReactorStateName};\n\n/// Status information about the node.\n#[derive(Debug, PartialEq, Serialize)]\npub struct NodeStatus {\n    /// The current protocol version.\n    pub protocol_version: ProtocolVersion,\n    /// The node ID and network address of each connected peer.\n    pub peers: Peers,\n    /// The compiled node version.\n    pub build_version: String,\n    /// The chainspec name.\n    pub chainspec_name: String,\n    /// The state root hash of the lowest block in the available block range.\n    pub starting_state_root_hash: Digest,\n    /// The minimal info of the last block from the linear chain.\n    pub last_added_block_info: Option<MinimalBlockInfo>,\n    /// Our public signing key.\n    pub our_public_signing_key: Option<PublicKey>,\n    /// The next round length if this node is a validator.\n    pub round_length: Option<TimeDiff>,\n    /// Information about the next scheduled upgrade.\n    pub next_upgrade: Option<NextUpgrade>,\n    /// Time that passed since the node has started.\n    pub uptime: TimeDiff,\n    /// The current state of node reactor.\n    pub reactor_state: ReactorStateName,\n    /// Timestamp of the last recorded progress in the reactor.\n    pub last_progress: Timestamp,\n    /// The available block range in storage.\n    pub available_block_range: AvailableBlockRange,\n    /// The status of the block synchronizer builders.\n    pub block_sync: BlockSynchronizerStatus,\n    /// The hash of the latest switch block.\n    pub latest_switch_block_hash: Option<BlockHash>,\n}\n\nimpl NodeStatus {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self {\n            protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()),\n            peers: Peers::random(rng),\n            build_version: rng.random_string(5..10),\n            chainspec_name: rng.random_string(5..10),\n            starting_state_root_hash: Digest::random(rng),\n            last_added_block_info: rng.gen::<bool>().then_some(MinimalBlockInfo::random(rng)),\n            our_public_signing_key: rng.gen::<bool>().then_some(PublicKey::random(rng)),\n            round_length: rng\n                .gen::<bool>()\n                .then_some(TimeDiff::from_millis(rng.gen())),\n            next_upgrade: rng.gen::<bool>().then_some(NextUpgrade::random(rng)),\n            uptime: TimeDiff::from_millis(rng.gen()),\n            reactor_state: ReactorStateName::new(rng.random_string(5..10)),\n            last_progress: Timestamp::random(rng),\n            available_block_range: AvailableBlockRange::random(rng),\n            block_sync: BlockSynchronizerStatus::random(rng),\n            latest_switch_block_hash: rng.gen::<bool>().then_some(BlockHash::random(rng)),\n        }\n    }\n}\n\nimpl FromBytes for NodeStatus {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (protocol_version, remainder) = ProtocolVersion::from_bytes(bytes)?;\n        let (peers, remainder) = Peers::from_bytes(remainder)?;\n        let (build_version, remainder) = String::from_bytes(remainder)?;\n        let (chainspec_name, remainder) = String::from_bytes(remainder)?;\n        let (starting_state_root_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (last_added_block_info, remainder) = Option::<MinimalBlockInfo>::from_bytes(remainder)?;\n        let (our_public_signing_key, remainder) = Option::<PublicKey>::from_bytes(remainder)?;\n        let (round_length, remainder) = Option::<TimeDiff>::from_bytes(remainder)?;\n        let (next_upgrade, remainder) = Option::<NextUpgrade>::from_bytes(remainder)?;\n        let (uptime, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (reactor_state, remainder) = ReactorStateName::from_bytes(remainder)?;\n        let (last_progress, remainder) = Timestamp::from_bytes(remainder)?;\n        let (available_block_range, remainder) = AvailableBlockRange::from_bytes(remainder)?;\n        let (block_sync, remainder) = BlockSynchronizerStatus::from_bytes(remainder)?;\n        let (latest_switch_block_hash, remainder) = Option::<BlockHash>::from_bytes(remainder)?;\n        Ok((\n            NodeStatus {\n                protocol_version,\n                peers,\n                build_version,\n                chainspec_name,\n                starting_state_root_hash,\n                last_added_block_info,\n                our_public_signing_key,\n                round_length,\n                next_upgrade,\n                uptime,\n                reactor_state,\n                last_progress,\n                available_block_range,\n                block_sync,\n                latest_switch_block_hash,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl ToBytes for NodeStatus {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let NodeStatus {\n            protocol_version,\n            peers,\n            build_version,\n            chainspec_name,\n            starting_state_root_hash,\n            last_added_block_info,\n            our_public_signing_key,\n            round_length,\n            next_upgrade,\n            uptime,\n            reactor_state,\n            last_progress,\n            available_block_range,\n            block_sync,\n            latest_switch_block_hash,\n        } = self;\n        protocol_version.write_bytes(writer)?;\n        peers.write_bytes(writer)?;\n        build_version.write_bytes(writer)?;\n        chainspec_name.write_bytes(writer)?;\n        starting_state_root_hash.write_bytes(writer)?;\n        last_added_block_info.write_bytes(writer)?;\n        our_public_signing_key.write_bytes(writer)?;\n        round_length.write_bytes(writer)?;\n        next_upgrade.write_bytes(writer)?;\n        uptime.write_bytes(writer)?;\n        reactor_state.write_bytes(writer)?;\n        last_progress.write_bytes(writer)?;\n        available_block_range.write_bytes(writer)?;\n        block_sync.write_bytes(writer)?;\n        latest_switch_block_hash.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.protocol_version.serialized_length()\n            + self.peers.serialized_length()\n            + self.build_version.serialized_length()\n            + self.chainspec_name.serialized_length()\n            + self.starting_state_root_hash.serialized_length()\n            + self.last_added_block_info.serialized_length()\n            + self.our_public_signing_key.serialized_length()\n            + self.round_length.serialized_length()\n            + self.next_upgrade.serialized_length()\n            + self.uptime.serialized_length()\n            + self.reactor_state.serialized_length()\n            + self.last_progress.serialized_length()\n            + self.available_block_range.serialized_length()\n            + self.block_sync.serialized_length()\n            + self.latest_switch_block_hash.serialized_length()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = NodeStatus::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/purse_identifier.rs",
    "content": "#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(test)]\nuse rand::Rng;\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    EntityAddr, PublicKey, URef,\n};\n\nconst PAYMENT_PURSE_TAG: u8 = 0;\nconst ACCUMULATE_PURSE_TAG: u8 = 1;\nconst UREF_PURSE_TAG: u8 = 2;\nconst PUBLIC_KEY_PURSE_TAG: u8 = 3;\nconst ACCOUNT_PURSE_TAG: u8 = 4;\nconst ENTITY_PURSE_TAG: u8 = 5;\n\n/// Identifier for balance lookup.\n#[derive(Clone, Debug, PartialEq)]\npub enum PurseIdentifier {\n    Payment,\n    Accumulate,\n    Purse(URef),\n    PublicKey(PublicKey),\n    Account(AccountHash),\n    Entity(EntityAddr),\n}\n\nimpl PurseIdentifier {\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..6) {\n            PAYMENT_PURSE_TAG => PurseIdentifier::Payment,\n            ACCUMULATE_PURSE_TAG => PurseIdentifier::Accumulate,\n            UREF_PURSE_TAG => PurseIdentifier::Purse(rng.gen()),\n            PUBLIC_KEY_PURSE_TAG => PurseIdentifier::PublicKey(PublicKey::random(rng)),\n            ACCOUNT_PURSE_TAG => PurseIdentifier::Account(rng.gen()),\n            ENTITY_PURSE_TAG => PurseIdentifier::Entity(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for PurseIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            PurseIdentifier::Payment => PAYMENT_PURSE_TAG.write_bytes(writer),\n            PurseIdentifier::Accumulate => ACCUMULATE_PURSE_TAG.write_bytes(writer),\n            PurseIdentifier::Purse(uref) => {\n                UREF_PURSE_TAG.write_bytes(writer)?;\n                uref.write_bytes(writer)\n            }\n            PurseIdentifier::PublicKey(key) => {\n                PUBLIC_KEY_PURSE_TAG.write_bytes(writer)?;\n                key.write_bytes(writer)\n            }\n            PurseIdentifier::Account(account) => {\n                ACCOUNT_PURSE_TAG.write_bytes(writer)?;\n                account.write_bytes(writer)\n            }\n            PurseIdentifier::Entity(entity) => {\n                ENTITY_PURSE_TAG.write_bytes(writer)?;\n                entity.write_bytes(writer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                PurseIdentifier::Payment => 0,\n                PurseIdentifier::Accumulate => 0,\n                PurseIdentifier::Purse(uref) => uref.serialized_length(),\n                PurseIdentifier::PublicKey(key) => key.serialized_length(),\n                PurseIdentifier::Account(account) => account.serialized_length(),\n                PurseIdentifier::Entity(entity) => entity.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for PurseIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            PAYMENT_PURSE_TAG => Ok((PurseIdentifier::Payment, remainder)),\n            ACCUMULATE_PURSE_TAG => Ok((PurseIdentifier::Accumulate, remainder)),\n            UREF_PURSE_TAG => {\n                let (uref, remainder) = URef::from_bytes(remainder)?;\n                Ok((PurseIdentifier::Purse(uref), remainder))\n            }\n            PUBLIC_KEY_PURSE_TAG => {\n                let (key, remainder) = PublicKey::from_bytes(remainder)?;\n                Ok((PurseIdentifier::PublicKey(key), remainder))\n            }\n            ACCOUNT_PURSE_TAG => {\n                let (account, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((PurseIdentifier::Account(account), remainder))\n            }\n            ENTITY_PURSE_TAG => {\n                let (entity, remainder) = EntityAddr::from_bytes(remainder)?;\n                Ok((PurseIdentifier::Entity(entity), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = PurseIdentifier::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/record_id.rs",
    "content": "use core::convert::TryFrom;\n\n#[cfg(test)]\nuse rand::Rng;\nuse serde::Serialize;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n#[cfg(any(feature = \"testing\", test))]\nuse strum::IntoEnumIterator;\n#[cfg(any(feature = \"testing\", test))]\nuse strum_macros::EnumIter;\n\n/// An identifier of a record type.\n#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)]\n#[repr(u16)]\n#[cfg_attr(any(feature = \"testing\", test), derive(EnumIter))]\npub enum RecordId {\n    /// Refers to `BlockHeader` record.\n    BlockHeader = 0,\n    /// Refers to `BlockBody` record.\n    BlockBody = 1,\n    /// Refers to `ApprovalsHashes` record.\n    ApprovalsHashes = 2,\n    /// Refers to `BlockMetadata` record.\n    BlockMetadata = 3,\n    /// Refers to `Transaction` record.\n    Transaction = 4,\n    /// Refers to `ExecutionResult` record.\n    ExecutionResult = 5,\n    /// Refers to `Transfer` record.\n    Transfer = 6,\n    /// Refers to `FinalizedTransactionApprovals` record.\n    FinalizedTransactionApprovals = 7,\n}\n\nimpl RecordId {\n    #[cfg(test)]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..8) {\n            0 => RecordId::BlockHeader,\n            1 => RecordId::BlockBody,\n            2 => RecordId::ApprovalsHashes,\n            3 => RecordId::BlockMetadata,\n            4 => RecordId::Transaction,\n            5 => RecordId::ExecutionResult,\n            6 => RecordId::Transfer,\n            7 => RecordId::FinalizedTransactionApprovals,\n            _ => unreachable!(),\n        }\n    }\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn all() -> impl Iterator<Item = RecordId> {\n        RecordId::iter()\n    }\n}\n\nimpl TryFrom<u16> for RecordId {\n    type Error = UnknownRecordId;\n\n    fn try_from(value: u16) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(RecordId::BlockHeader),\n            1 => Ok(RecordId::BlockBody),\n            2 => Ok(RecordId::ApprovalsHashes),\n            3 => Ok(RecordId::BlockMetadata),\n            4 => Ok(RecordId::Transaction),\n            5 => Ok(RecordId::ExecutionResult),\n            6 => Ok(RecordId::Transfer),\n            7 => Ok(RecordId::FinalizedTransactionApprovals),\n            _ => Err(UnknownRecordId(value)),\n        }\n    }\n}\n\nimpl From<RecordId> for u16 {\n    fn from(value: RecordId) -> Self {\n        value as u16\n    }\n}\n\nimpl core::fmt::Display for RecordId {\n    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n        match self {\n            RecordId::BlockHeader => write!(f, \"BlockHeader\"),\n            RecordId::BlockBody => write!(f, \"BlockBody\"),\n            RecordId::ApprovalsHashes => write!(f, \"ApprovalsHashes\"),\n            RecordId::BlockMetadata => write!(f, \"BlockMetadata\"),\n            RecordId::Transaction => write!(f, \"Transaction\"),\n            RecordId::ExecutionResult => write!(f, \"ExecutionResult\"),\n            RecordId::Transfer => write!(f, \"Transfer\"),\n            RecordId::FinalizedTransactionApprovals => write!(f, \"FinalizedTransactionApprovals\"),\n        }\n    }\n}\n\n/// Error returned when trying to convert a `u16` into a `RecordId`.\n#[derive(Debug, PartialEq, Eq)]\npub struct UnknownRecordId(u16);\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn tag_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = RecordId::random(rng);\n        let tag = u16::from(val);\n        assert_eq!(RecordId::try_from(tag), Ok(val));\n    }\n}\n"
  },
  {
    "path": "binary_port/src/response_type.rs",
    "content": "//! The payload type.\n\nuse core::{convert::TryFrom, fmt};\n\n#[cfg(test)]\nuse rand::Rng;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    contracts::ContractPackage,\n    execution::{ExecutionResult, ExecutionResultV1},\n    AvailableBlockRange, BlockBody, BlockBodyV1, BlockHeader, BlockHeaderV1, BlockSignatures,\n    BlockSignaturesV1, BlockSynchronizerStatus, BlockWithSignatures, ChainspecRawBytes, Deploy,\n    NextUpgrade, Package, Peers, ProtocolVersion, StoredValue, Transaction, Transfer,\n};\n\nuse crate::{\n    global_state_query_result::GlobalStateQueryResult,\n    node_status::NodeStatus,\n    speculative_execution_result::SpeculativeExecutionResult,\n    type_wrappers::{\n        ConsensusStatus, ConsensusValidatorChanges, GetTrieFullResult, LastProgress, NetworkName,\n        ReactorStateName, RewardResponse,\n    },\n    AccountInformation, AddressableEntityInformation, BalanceResponse, ContractInformation,\n    DictionaryQueryResult, RecordId, TransactionWithExecutionInfo, Uptime, ValueWithProof,\n};\n\n/// A type of the payload being returned in a binary response.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\n#[repr(u8)]\npub enum ResponseType {\n    /// Legacy version of the block header.\n    BlockHeaderV1,\n    /// Block header.\n    BlockHeader,\n    /// Legacy version of the block body.\n    BlockBodyV1,\n    /// Block body.\n    BlockBody,\n    /// Legacy version of the approvals hashes.\n    ApprovalsHashesV1,\n    /// Approvals hashes\n    ApprovalsHashes,\n    /// Legacy version of the block signatures.\n    BlockSignaturesV1,\n    /// Block signatures.\n    BlockSignatures,\n    /// Deploy.\n    Deploy,\n    /// Transaction.\n    Transaction,\n    /// Legacy version of the execution result.\n    ExecutionResultV1,\n    /// Execution result.\n    ExecutionResult,\n    /// Wasm V1 execution result.\n    WasmV1Result,\n    /// Transfers.\n    Transfers,\n    /// Finalized deploy approvals.\n    FinalizedDeployApprovals,\n    /// Finalized approvals.\n    FinalizedApprovals,\n    /// Block with signatures.\n    BlockWithSignatures,\n    /// Transaction with approvals and execution info.\n    TransactionWithExecutionInfo,\n    /// Peers.\n    Peers,\n    /// Last progress.\n    LastProgress,\n    /// State of the reactor.\n    ReactorState,\n    /// Network name.\n    NetworkName,\n    /// Consensus validator changes.\n    ConsensusValidatorChanges, // return type in `effects.rs` will be turned into dedicated type.\n    /// Status of the block synchronizer.\n    BlockSynchronizerStatus,\n    /// Available block range.\n    AvailableBlockRange,\n    /// Information about the next network upgrade.\n    NextUpgrade,\n    /// Consensus status.\n    ConsensusStatus, // return type in `effects.rs` will be turned into dedicated type.\n    /// Chainspec represented as raw bytes.\n    ChainspecRawBytes,\n    /// Uptime.\n    Uptime,\n    /// Result of checking if given block is in the highest available block range.\n    HighestBlockSequenceCheckResult,\n    /// Result of the speculative execution,\n    SpeculativeExecutionResult,\n    /// Result of querying global state,\n    GlobalStateQueryResult,\n    /// Result of querying global state for all values under a specified key.\n    StoredValues,\n    /// Result of querying global state for a full trie.\n    GetTrieFullResult,\n    /// Node status.\n    NodeStatus,\n    /// Result of querying for a dictionary item.\n    DictionaryQueryResult,\n    /// Balance query response.\n    BalanceResponse,\n    /// Reward response.\n    Reward,\n    /// Protocol version.\n    ProtocolVersion,\n    /// Contract package with Merkle proof.\n    ContractPackageWithProof,\n    /// Contract information.\n    ContractInformation,\n    /// Account information.\n    AccountInformation,\n    /// Package with Merkle proof.\n    PackageWithProof,\n    /// Addressable entity information.\n    AddressableEntityInformation,\n}\n\nimpl ResponseType {\n    pub fn from_record_id(record_id: RecordId, is_legacy: bool) -> Self {\n        match (is_legacy, record_id) {\n            (true, RecordId::BlockHeader) => Self::BlockHeaderV1,\n            (true, RecordId::BlockBody) => Self::BlockBodyV1,\n            (true, RecordId::ApprovalsHashes) => Self::ApprovalsHashesV1,\n            (true, RecordId::BlockMetadata) => Self::BlockSignaturesV1,\n            (true, RecordId::Transaction) => Self::Deploy,\n            (true, RecordId::ExecutionResult) => Self::ExecutionResultV1,\n            (true, RecordId::Transfer) => Self::Transfers,\n            (true, RecordId::FinalizedTransactionApprovals) => Self::FinalizedDeployApprovals,\n            (false, RecordId::BlockHeader) => Self::BlockHeader,\n            (false, RecordId::BlockBody) => Self::BlockBody,\n            (false, RecordId::ApprovalsHashes) => Self::ApprovalsHashes,\n            (false, RecordId::BlockMetadata) => Self::BlockSignatures,\n            (false, RecordId::Transaction) => Self::Transaction,\n            (false, RecordId::ExecutionResult) => Self::ExecutionResult,\n            (false, RecordId::Transfer) => Self::Transfers,\n            (false, RecordId::FinalizedTransactionApprovals) => Self::FinalizedApprovals,\n        }\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self::try_from(rng.gen_range(0..44)).unwrap()\n    }\n}\n\nimpl TryFrom<u8> for ResponseType {\n    type Error = ();\n\n    fn try_from(v: u8) -> Result<Self, Self::Error> {\n        match v {\n            x if x == ResponseType::BlockHeaderV1 as u8 => Ok(ResponseType::BlockHeaderV1),\n            x if x == ResponseType::BlockHeader as u8 => Ok(ResponseType::BlockHeader),\n            x if x == ResponseType::BlockBodyV1 as u8 => Ok(ResponseType::BlockBodyV1),\n            x if x == ResponseType::BlockBody as u8 => Ok(ResponseType::BlockBody),\n            x if x == ResponseType::ApprovalsHashesV1 as u8 => Ok(ResponseType::ApprovalsHashesV1),\n            x if x == ResponseType::ApprovalsHashes as u8 => Ok(ResponseType::ApprovalsHashes),\n            x if x == ResponseType::BlockSignaturesV1 as u8 => Ok(ResponseType::BlockSignaturesV1),\n            x if x == ResponseType::BlockSignatures as u8 => Ok(ResponseType::BlockSignatures),\n            x if x == ResponseType::Deploy as u8 => Ok(ResponseType::Deploy),\n            x if x == ResponseType::Transaction as u8 => Ok(ResponseType::Transaction),\n            x if x == ResponseType::ExecutionResultV1 as u8 => Ok(ResponseType::ExecutionResultV1),\n            x if x == ResponseType::ExecutionResult as u8 => Ok(ResponseType::ExecutionResult),\n            x if x == ResponseType::Transfers as u8 => Ok(ResponseType::Transfers),\n            x if x == ResponseType::FinalizedDeployApprovals as u8 => {\n                Ok(ResponseType::FinalizedDeployApprovals)\n            }\n            x if x == ResponseType::FinalizedApprovals as u8 => {\n                Ok(ResponseType::FinalizedApprovals)\n            }\n            x if x == ResponseType::BlockWithSignatures as u8 => {\n                Ok(ResponseType::BlockWithSignatures)\n            }\n            x if x == ResponseType::TransactionWithExecutionInfo as u8 => {\n                Ok(ResponseType::TransactionWithExecutionInfo)\n            }\n            x if x == ResponseType::Peers as u8 => Ok(ResponseType::Peers),\n            x if x == ResponseType::Uptime as u8 => Ok(ResponseType::Uptime),\n            x if x == ResponseType::LastProgress as u8 => Ok(ResponseType::LastProgress),\n            x if x == ResponseType::ReactorState as u8 => Ok(ResponseType::ReactorState),\n            x if x == ResponseType::NetworkName as u8 => Ok(ResponseType::NetworkName),\n            x if x == ResponseType::ConsensusValidatorChanges as u8 => {\n                Ok(ResponseType::ConsensusValidatorChanges)\n            }\n            x if x == ResponseType::BlockSynchronizerStatus as u8 => {\n                Ok(ResponseType::BlockSynchronizerStatus)\n            }\n            x if x == ResponseType::AvailableBlockRange as u8 => {\n                Ok(ResponseType::AvailableBlockRange)\n            }\n            x if x == ResponseType::NextUpgrade as u8 => Ok(ResponseType::NextUpgrade),\n            x if x == ResponseType::ConsensusStatus as u8 => Ok(ResponseType::ConsensusStatus),\n            x if x == ResponseType::ChainspecRawBytes as u8 => Ok(ResponseType::ChainspecRawBytes),\n            x if x == ResponseType::HighestBlockSequenceCheckResult as u8 => {\n                Ok(ResponseType::HighestBlockSequenceCheckResult)\n            }\n            x if x == ResponseType::SpeculativeExecutionResult as u8 => {\n                Ok(ResponseType::SpeculativeExecutionResult)\n            }\n            x if x == ResponseType::GlobalStateQueryResult as u8 => {\n                Ok(ResponseType::GlobalStateQueryResult)\n            }\n            x if x == ResponseType::StoredValues as u8 => Ok(ResponseType::StoredValues),\n            x if x == ResponseType::GetTrieFullResult as u8 => Ok(ResponseType::GetTrieFullResult),\n            x if x == ResponseType::NodeStatus as u8 => Ok(ResponseType::NodeStatus),\n            x if x == ResponseType::DictionaryQueryResult as u8 => {\n                Ok(ResponseType::DictionaryQueryResult)\n            }\n            x if x == ResponseType::WasmV1Result as u8 => Ok(ResponseType::WasmV1Result),\n            x if x == ResponseType::BalanceResponse as u8 => Ok(ResponseType::BalanceResponse),\n            x if x == ResponseType::Reward as u8 => Ok(ResponseType::Reward),\n            x if x == ResponseType::ProtocolVersion as u8 => Ok(ResponseType::ProtocolVersion),\n            x if x == ResponseType::ContractPackageWithProof as u8 => {\n                Ok(ResponseType::ContractPackageWithProof)\n            }\n            x if x == ResponseType::ContractInformation as u8 => {\n                Ok(ResponseType::ContractInformation)\n            }\n            x if x == ResponseType::AccountInformation as u8 => {\n                Ok(ResponseType::AccountInformation)\n            }\n            x if x == ResponseType::PackageWithProof as u8 => Ok(ResponseType::PackageWithProof),\n            x if x == ResponseType::AddressableEntityInformation as u8 => {\n                Ok(ResponseType::AddressableEntityInformation)\n            }\n            _ => Err(()),\n        }\n    }\n}\n\nimpl From<ResponseType> for u8 {\n    fn from(value: ResponseType) -> Self {\n        value as u8\n    }\n}\n\nimpl fmt::Display for ResponseType {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match self {\n            ResponseType::BlockHeaderV1 => write!(f, \"BlockHeaderV1\"),\n            ResponseType::BlockHeader => write!(f, \"BlockHeader\"),\n            ResponseType::BlockBodyV1 => write!(f, \"BlockBodyV1\"),\n            ResponseType::BlockBody => write!(f, \"BlockBody\"),\n            ResponseType::ApprovalsHashesV1 => write!(f, \"ApprovalsHashesV1\"),\n            ResponseType::ApprovalsHashes => write!(f, \"ApprovalsHashes\"),\n            ResponseType::BlockSignaturesV1 => write!(f, \"BlockSignaturesV1\"),\n            ResponseType::BlockSignatures => write!(f, \"BlockSignatures\"),\n            ResponseType::Deploy => write!(f, \"Deploy\"),\n            ResponseType::Transaction => write!(f, \"Transaction\"),\n            ResponseType::ExecutionResultV1 => write!(f, \"ExecutionResultV1\"),\n            ResponseType::ExecutionResult => write!(f, \"ExecutionResult\"),\n            ResponseType::Transfers => write!(f, \"Transfers\"),\n            ResponseType::FinalizedDeployApprovals => write!(f, \"FinalizedDeployApprovals\"),\n            ResponseType::FinalizedApprovals => write!(f, \"FinalizedApprovals\"),\n            ResponseType::BlockWithSignatures => write!(f, \"BlockWithSignatures\"),\n            ResponseType::TransactionWithExecutionInfo => write!(f, \"TransactionWithExecutionInfo\"),\n            ResponseType::Peers => write!(f, \"Peers\"),\n            ResponseType::LastProgress => write!(f, \"LastProgress\"),\n            ResponseType::ReactorState => write!(f, \"ReactorState\"),\n            ResponseType::NetworkName => write!(f, \"NetworkName\"),\n            ResponseType::ConsensusValidatorChanges => write!(f, \"ConsensusValidatorChanges\"),\n            ResponseType::BlockSynchronizerStatus => write!(f, \"BlockSynchronizerStatus\"),\n            ResponseType::AvailableBlockRange => write!(f, \"AvailableBlockRange\"),\n            ResponseType::NextUpgrade => write!(f, \"NextUpgrade\"),\n            ResponseType::ConsensusStatus => write!(f, \"ConsensusStatus\"),\n            ResponseType::ChainspecRawBytes => write!(f, \"ChainspecRawBytes\"),\n            ResponseType::Uptime => write!(f, \"Uptime\"),\n            ResponseType::HighestBlockSequenceCheckResult => {\n                write!(f, \"HighestBlockSequenceCheckResult\")\n            }\n            ResponseType::SpeculativeExecutionResult => write!(f, \"SpeculativeExecutionResult\"),\n            ResponseType::GlobalStateQueryResult => write!(f, \"GlobalStateQueryResult\"),\n            ResponseType::StoredValues => write!(f, \"StoredValues\"),\n            ResponseType::GetTrieFullResult => write!(f, \"GetTrieFullResult\"),\n            ResponseType::NodeStatus => write!(f, \"NodeStatus\"),\n            ResponseType::WasmV1Result => write!(f, \"WasmV1Result\"),\n            ResponseType::DictionaryQueryResult => write!(f, \"DictionaryQueryResult\"),\n            ResponseType::BalanceResponse => write!(f, \"BalanceResponse\"),\n            ResponseType::Reward => write!(f, \"Reward\"),\n            ResponseType::ProtocolVersion => write!(f, \"ProtocolVersion\"),\n            ResponseType::ContractPackageWithProof => write!(f, \"ContractPackageWithProof\"),\n            ResponseType::ContractInformation => write!(f, \"ContractInformation\"),\n            ResponseType::AccountInformation => write!(f, \"AccountInformation\"),\n            ResponseType::PackageWithProof => write!(f, \"PackageWithProof\"),\n            ResponseType::AddressableEntityInformation => {\n                write!(f, \"AddressableEntityInformation\")\n            }\n        }\n    }\n}\n\n/// Represents an entity that can be sent as a payload.\npub trait PayloadEntity {\n    /// Returns the payload type of the entity.\n    const RESPONSE_TYPE: ResponseType;\n}\n\nimpl PayloadEntity for Transaction {\n    const RESPONSE_TYPE: ResponseType = ResponseType::Transaction;\n}\n\nimpl PayloadEntity for Deploy {\n    const RESPONSE_TYPE: ResponseType = ResponseType::Deploy;\n}\n\nimpl PayloadEntity for BlockHeader {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockHeader;\n}\n\nimpl PayloadEntity for BlockHeaderV1 {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockHeaderV1;\n}\n\nimpl PayloadEntity for BlockBody {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockBody;\n}\n\nimpl PayloadEntity for BlockBodyV1 {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockBodyV1;\n}\n\nimpl PayloadEntity for BlockSignatures {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockSignatures;\n}\n\nimpl PayloadEntity for BlockSignaturesV1 {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockSignaturesV1;\n}\n\nimpl PayloadEntity for ExecutionResult {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ExecutionResult;\n}\n\nimpl PayloadEntity for ExecutionResultV1 {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ExecutionResultV1;\n}\n\nimpl PayloadEntity for BlockWithSignatures {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockWithSignatures;\n}\n\nimpl PayloadEntity for TransactionWithExecutionInfo {\n    const RESPONSE_TYPE: ResponseType = ResponseType::TransactionWithExecutionInfo;\n}\n\nimpl PayloadEntity for Peers {\n    const RESPONSE_TYPE: ResponseType = ResponseType::Peers;\n}\n\nimpl PayloadEntity for Vec<Transfer> {\n    const RESPONSE_TYPE: ResponseType = ResponseType::Transfers;\n}\n\nimpl PayloadEntity for AvailableBlockRange {\n    const RESPONSE_TYPE: ResponseType = ResponseType::AvailableBlockRange;\n}\n\nimpl PayloadEntity for ChainspecRawBytes {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ChainspecRawBytes;\n}\n\nimpl PayloadEntity for ConsensusValidatorChanges {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ConsensusValidatorChanges;\n}\n\nimpl PayloadEntity for GlobalStateQueryResult {\n    const RESPONSE_TYPE: ResponseType = ResponseType::GlobalStateQueryResult;\n}\n\nimpl PayloadEntity for DictionaryQueryResult {\n    const RESPONSE_TYPE: ResponseType = ResponseType::DictionaryQueryResult;\n}\n\nimpl PayloadEntity for Vec<StoredValue> {\n    const RESPONSE_TYPE: ResponseType = ResponseType::StoredValues;\n}\n\nimpl PayloadEntity for GetTrieFullResult {\n    const RESPONSE_TYPE: ResponseType = ResponseType::GetTrieFullResult;\n}\n\nimpl PayloadEntity for SpeculativeExecutionResult {\n    const RESPONSE_TYPE: ResponseType = ResponseType::SpeculativeExecutionResult;\n}\n\nimpl PayloadEntity for NodeStatus {\n    const RESPONSE_TYPE: ResponseType = ResponseType::NodeStatus;\n}\n\nimpl PayloadEntity for NextUpgrade {\n    const RESPONSE_TYPE: ResponseType = ResponseType::NextUpgrade;\n}\n\nimpl PayloadEntity for Uptime {\n    const RESPONSE_TYPE: ResponseType = ResponseType::Uptime;\n}\n\nimpl PayloadEntity for LastProgress {\n    const RESPONSE_TYPE: ResponseType = ResponseType::LastProgress;\n}\n\nimpl PayloadEntity for ReactorStateName {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ReactorState;\n}\n\nimpl PayloadEntity for NetworkName {\n    const RESPONSE_TYPE: ResponseType = ResponseType::NetworkName;\n}\n\nimpl PayloadEntity for BlockSynchronizerStatus {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BlockSynchronizerStatus;\n}\n\nimpl PayloadEntity for ConsensusStatus {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ConsensusStatus;\n}\n\nimpl PayloadEntity for BalanceResponse {\n    const RESPONSE_TYPE: ResponseType = ResponseType::BalanceResponse;\n}\n\nimpl PayloadEntity for RewardResponse {\n    const RESPONSE_TYPE: ResponseType = ResponseType::Reward;\n}\n\nimpl PayloadEntity for ProtocolVersion {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ProtocolVersion;\n}\n\nimpl PayloadEntity for ValueWithProof<ContractPackage> {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ContractPackageWithProof;\n}\n\nimpl PayloadEntity for ContractInformation {\n    const RESPONSE_TYPE: ResponseType = ResponseType::ContractInformation;\n}\n\nimpl PayloadEntity for AccountInformation {\n    const RESPONSE_TYPE: ResponseType = ResponseType::AccountInformation;\n}\n\nimpl PayloadEntity for ValueWithProof<Package> {\n    const RESPONSE_TYPE: ResponseType = ResponseType::PackageWithProof;\n}\n\nimpl PayloadEntity for AddressableEntityInformation {\n    const RESPONSE_TYPE: ResponseType = ResponseType::AddressableEntityInformation;\n}\n\nimpl<T> PayloadEntity for Box<T>\nwhere\n    T: PayloadEntity,\n{\n    const RESPONSE_TYPE: ResponseType = T::RESPONSE_TYPE;\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn convert_u8_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = ResponseType::random(rng);\n        assert_eq!(ResponseType::try_from(val as u8), Ok(val));\n    }\n}\n"
  },
  {
    "path": "binary_port/src/speculative_execution_result.rs",
    "content": "use once_cell::sync::Lazy;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::distributions::{Alphanumeric, DistString};\n\n#[cfg(any(feature = \"testing\", test))]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    contract_messages::Messages,\n    execution::Effects,\n    BlockHash, Digest, Gas, InvalidTransaction, Transfer,\n};\n\nstatic SPECULATIVE_EXECUTION_RESULT: Lazy<SpeculativeExecutionResult> = Lazy::new(|| {\n    SpeculativeExecutionResult::new(\n        BlockHash::new(Digest::from([0; Digest::LENGTH])),\n        vec![],\n        Gas::zero(),\n        Gas::zero(),\n        Effects::new(),\n        Messages::new(),\n        None,\n    )\n});\n\n#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]\npub struct SpeculativeExecutionResult {\n    /// Block hash against which the execution was performed.\n    block_hash: BlockHash,\n    /// List of transfers that happened during execution.\n    transfers: Vec<Transfer>,\n    /// Gas limit.\n    limit: Gas,\n    /// Gas consumed.\n    consumed: Gas,\n    /// Execution effects.\n    effects: Effects,\n    /// Messages emitted during execution.\n    messages: Messages,\n    /// Did the wasm execute successfully?\n    error: Option<String>,\n}\n\nimpl SpeculativeExecutionResult {\n    pub fn new(\n        block_hash: BlockHash,\n        transfers: Vec<Transfer>,\n        limit: Gas,\n        consumed: Gas,\n        effects: Effects,\n        messages: Messages,\n        error: Option<String>,\n    ) -> Self {\n        SpeculativeExecutionResult {\n            transfers,\n            limit,\n            consumed,\n            effects,\n            messages,\n            error,\n            block_hash,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn example() -> &'static Self {\n        &SPECULATIVE_EXECUTION_RESULT\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use casper_types::contract_messages::Message;\n\n        let random_messages = |rng: &mut TestRng| -> Messages {\n            let count = rng.gen_range(16..128);\n            std::iter::repeat_with(|| Message::random(rng))\n                .take(count)\n                .collect()\n        };\n\n        SpeculativeExecutionResult {\n            block_hash: BlockHash::new(rng.gen()),\n            transfers: vec![Transfer::random(rng)],\n            limit: Gas::random(rng),\n            consumed: Gas::random(rng),\n            effects: Effects::random(rng),\n            messages: random_messages(rng),\n            error: if rng.gen() {\n                None\n            } else {\n                let count = rng.gen_range(16..128);\n                Some(Alphanumeric.sample_string(rng, count))\n            },\n        }\n    }\n}\n\nimpl From<InvalidTransaction> for SpeculativeExecutionResult {\n    fn from(invalid_transaction: InvalidTransaction) -> Self {\n        SpeculativeExecutionResult {\n            transfers: Default::default(),\n            limit: Default::default(),\n            consumed: Default::default(),\n            effects: Default::default(),\n            messages: Default::default(),\n            error: Some(format!(\"{}\", invalid_transaction)),\n            block_hash: Default::default(),\n        }\n    }\n}\n\nimpl ToBytes for SpeculativeExecutionResult {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        ToBytes::serialized_length(&self.transfers)\n            + ToBytes::serialized_length(&self.limit)\n            + ToBytes::serialized_length(&self.consumed)\n            + ToBytes::serialized_length(&self.effects)\n            + ToBytes::serialized_length(&self.messages)\n            + ToBytes::serialized_length(&self.error)\n            + ToBytes::serialized_length(&self.block_hash)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.transfers.write_bytes(writer)?;\n        self.limit.write_bytes(writer)?;\n        self.consumed.write_bytes(writer)?;\n        self.effects.write_bytes(writer)?;\n        self.messages.write_bytes(writer)?;\n        self.error.write_bytes(writer)?;\n        self.block_hash.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for SpeculativeExecutionResult {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (transfers, bytes) = Vec::<Transfer>::from_bytes(bytes)?;\n        let (limit, bytes) = Gas::from_bytes(bytes)?;\n        let (consumed, bytes) = Gas::from_bytes(bytes)?;\n        let (effects, bytes) = Effects::from_bytes(bytes)?;\n        let (messages, bytes) = Messages::from_bytes(bytes)?;\n        let (error, bytes) = Option::<String>::from_bytes(bytes)?;\n        let (block_hash, bytes) = BlockHash::from_bytes(bytes)?;\n        Ok((\n            SpeculativeExecutionResult {\n                transfers,\n                limit,\n                consumed,\n                effects,\n                messages,\n                error,\n                block_hash,\n            },\n            bytes,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = SpeculativeExecutionResult::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/state_request.rs",
    "content": "use std::fmt::{Display, Formatter, Result as DisplayResult};\n\nuse crate::entity_qualifier::GlobalStateEntityQualifier;\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    GlobalStateIdentifier,\n};\n#[cfg(test)]\nuse rand::Rng;\n\n/// A request to get data from the global state.\n#[derive(Clone, Debug, PartialEq)]\npub struct GlobalStateRequest {\n    /// Global state identifier, `None` means \"latest block state\".\n    state_identifier: Option<GlobalStateIdentifier>,\n    /// qualifier that points to a specific item (or items) in the global state.\n    qualifier: GlobalStateEntityQualifier,\n}\n\nimpl GlobalStateRequest {\n    pub fn new(\n        state_identifier: Option<GlobalStateIdentifier>,\n        qualifier: GlobalStateEntityQualifier,\n    ) -> Self {\n        GlobalStateRequest {\n            state_identifier,\n            qualifier,\n        }\n    }\n    pub fn destructure(self) -> (Option<GlobalStateIdentifier>, GlobalStateEntityQualifier) {\n        (self.state_identifier, self.qualifier)\n    }\n\n    pub fn state_identifier(self) -> Option<GlobalStateIdentifier> {\n        self.state_identifier\n    }\n\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        let state_identifier = rng\n            .gen::<bool>()\n            .then(|| GlobalStateIdentifier::random(rng));\n        let qualifier = GlobalStateEntityQualifier::random(rng);\n        Self {\n            state_identifier,\n            qualifier,\n        }\n    }\n}\n\nimpl ToBytes for GlobalStateRequest {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.state_identifier.write_bytes(writer)?;\n        self.qualifier.write_bytes(writer)?;\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.state_identifier.serialized_length() + self.qualifier.serialized_length()\n    }\n}\n\nimpl FromBytes for GlobalStateRequest {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (state_identifier, remainder) = FromBytes::from_bytes(bytes)?;\n        let (qualifier, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            GlobalStateRequest {\n                state_identifier,\n                qualifier,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl Display for GlobalStateRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> DisplayResult {\n        match self.qualifier {\n            GlobalStateEntityQualifier::Item { base_key, .. } => {\n                write!(f, \"get item from global state ({})\", base_key)\n            }\n            GlobalStateEntityQualifier::AllItems { key_tag, .. } => {\n                write!(f, \"get all items ({})\", key_tag)\n            }\n            GlobalStateEntityQualifier::DictionaryItem { .. } => {\n                write!(f, \"get dictionary item\")\n            }\n            GlobalStateEntityQualifier::Balance { .. } => {\n                write!(f, \"get balance by state root\",)\n            }\n            GlobalStateEntityQualifier::ItemsByPrefix { .. } => {\n                write!(f, \"get items by prefix\")\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let val = GlobalStateRequest::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "binary_port/src/type_wrappers.rs",
    "content": "use core::{convert::TryFrom, num::TryFromIntError, time::Duration};\nuse std::collections::BTreeMap;\n\nuse casper_types::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    contracts::ContractHash,\n    global_state::TrieMerkleProof,\n    system::auction::DelegationRate,\n    Account, AddressableEntity, BlockHash, ByteCode, Contract, ContractWasm, EntityAddr, EraId,\n    ExecutionInfo, Key, PublicKey, StoredValue, TimeDiff, Timestamp, Transaction, ValidatorChange,\n    U512,\n};\nuse serde::Serialize;\n\nuse super::GlobalStateQueryResult;\n\n// `bytesrepr` implementations for type wrappers are repetitive, hence this macro helper. We should\n// get rid of this after we introduce the proper \"bytesrepr-derive\" proc macro.\nmacro_rules! impl_bytesrepr_for_type_wrapper {\n    ($t:ident) => {\n        impl ToBytes for $t {\n            fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n                self.0.to_bytes()\n            }\n\n            fn serialized_length(&self) -> usize {\n                self.0.serialized_length()\n            }\n\n            fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n                self.0.write_bytes(writer)\n            }\n        }\n\n        impl FromBytes for $t {\n            fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n                let (inner, remainder) = FromBytes::from_bytes(bytes)?;\n                Ok(($t(inner), remainder))\n            }\n        }\n    };\n}\n\n/// Type representing uptime.\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]\npub struct Uptime(u64);\n\nimpl Uptime {\n    /// Constructs new uptime.\n    pub fn new(value: u64) -> Self {\n        Self(value)\n    }\n\n    /// Retrieve the inner value.\n    pub fn into_inner(self) -> u64 {\n        self.0\n    }\n}\n\nimpl From<Uptime> for Duration {\n    fn from(uptime: Uptime) -> Self {\n        Duration::from_secs(uptime.0)\n    }\n}\n\nimpl TryFrom<Uptime> for TimeDiff {\n    type Error = TryFromIntError;\n\n    fn try_from(uptime: Uptime) -> Result<Self, Self::Error> {\n        u32::try_from(uptime.0).map(TimeDiff::from_seconds)\n    }\n}\n\n/// Type representing changes in consensus validators.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct ConsensusValidatorChanges(BTreeMap<PublicKey, Vec<(EraId, ValidatorChange)>>);\n\nimpl ConsensusValidatorChanges {\n    /// Constructs new consensus validator changes.\n    pub fn new(value: BTreeMap<PublicKey, Vec<(EraId, ValidatorChange)>>) -> Self {\n        Self(value)\n    }\n\n    /// Retrieve the inner value.\n    pub fn into_inner(self) -> BTreeMap<PublicKey, Vec<(EraId, ValidatorChange)>> {\n        self.0\n    }\n}\n\nimpl From<ConsensusValidatorChanges> for BTreeMap<PublicKey, Vec<(EraId, ValidatorChange)>> {\n    fn from(consensus_validator_changes: ConsensusValidatorChanges) -> Self {\n        consensus_validator_changes.0\n    }\n}\n\n/// Type representing network name.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct NetworkName(String);\n\nimpl NetworkName {\n    /// Constructs new network name.\n    pub fn new(value: impl ToString) -> Self {\n        Self(value.to_string())\n    }\n\n    /// Retrieve the inner value.\n    pub fn into_inner(self) -> String {\n        self.0\n    }\n}\n\nimpl From<NetworkName> for String {\n    fn from(network_name: NetworkName) -> Self {\n        network_name.0\n    }\n}\n\n/// Type representing the reactor state name.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct ReactorStateName(String);\n\nimpl ReactorStateName {\n    /// Constructs new reactor state name.\n    pub fn new(value: impl ToString) -> Self {\n        Self(value.to_string())\n    }\n\n    /// Retrieve the name as a `String`.\n    pub fn into_inner(self) -> String {\n        self.0\n    }\n}\n\nimpl From<ReactorStateName> for String {\n    fn from(reactor_state: ReactorStateName) -> Self {\n        reactor_state.0\n    }\n}\n\n/// Type representing last progress of the sync process.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct LastProgress(Timestamp);\n\nimpl LastProgress {\n    /// Constructs new last progress.\n    pub fn new(value: Timestamp) -> Self {\n        Self(value)\n    }\n\n    /// Retrieve the inner value.\n    pub fn into_inner(self) -> Timestamp {\n        self.0\n    }\n}\n\nimpl From<LastProgress> for Timestamp {\n    fn from(last_progress: LastProgress) -> Self {\n        last_progress.0\n    }\n}\n\n/// Type representing results of the get full trie request.\n#[derive(Debug, PartialEq, Eq)]\npub struct GetTrieFullResult(Option<Bytes>);\n\nimpl GetTrieFullResult {\n    /// Constructs new get trie result.\n    pub fn new(value: Option<Bytes>) -> Self {\n        Self(value)\n    }\n\n    /// Returns the inner value.\n    pub fn into_inner(self) -> Option<Bytes> {\n        self.0\n    }\n}\n\n/// Type representing the reward of a validator or a delegator.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct RewardResponse {\n    amount: U512,\n    era_id: EraId,\n    delegation_rate: DelegationRate,\n    switch_block_hash: BlockHash,\n}\n\nimpl RewardResponse {\n    /// Constructs new reward response.\n    pub fn new(\n        amount: U512,\n        era_id: EraId,\n        delegation_rate: DelegationRate,\n        switch_block_hash: BlockHash,\n    ) -> Self {\n        Self {\n            amount,\n            era_id,\n            delegation_rate,\n            switch_block_hash,\n        }\n    }\n\n    /// Returns the amount of the reward.\n    pub fn amount(&self) -> U512 {\n        self.amount\n    }\n\n    /// Returns the era ID.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the delegation rate of the validator.\n    pub fn delegation_rate(&self) -> DelegationRate {\n        self.delegation_rate\n    }\n\n    /// Returns the switch block hash at which the reward was distributed.\n    pub fn switch_block_hash(&self) -> BlockHash {\n        self.switch_block_hash\n    }\n}\n\nimpl ToBytes for RewardResponse {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.amount.serialized_length()\n            + self.era_id.serialized_length()\n            + self.delegation_rate.serialized_length()\n            + self.switch_block_hash.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.amount.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.delegation_rate.write_bytes(writer)?;\n        self.switch_block_hash.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for RewardResponse {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (amount, remainder) = FromBytes::from_bytes(bytes)?;\n        let (era_id, remainder) = FromBytes::from_bytes(remainder)?;\n        let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?;\n        let (switch_block_hash, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            RewardResponse::new(amount, era_id, delegation_rate, switch_block_hash),\n            remainder,\n        ))\n    }\n}\n\n/// Describes the consensus status.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct ConsensusStatus {\n    validator_public_key: PublicKey,\n    round_length: Option<TimeDiff>,\n}\n\nimpl ConsensusStatus {\n    /// Constructs new consensus status.\n    pub fn new(validator_public_key: PublicKey, round_length: Option<TimeDiff>) -> Self {\n        Self {\n            validator_public_key,\n            round_length,\n        }\n    }\n\n    /// Returns the validator public key.\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Returns the round length.\n    pub fn round_length(&self) -> Option<TimeDiff> {\n        self.round_length\n    }\n}\n\nimpl ToBytes for ConsensusStatus {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.validator_public_key.serialized_length() + self.round_length.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.validator_public_key.write_bytes(writer)?;\n        self.round_length.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for ConsensusStatus {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (round_length, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            ConsensusStatus::new(validator_public_key, round_length),\n            remainder,\n        ))\n    }\n}\n\n/// A transaction with execution info.\n#[derive(Debug, PartialEq, Eq, Serialize)]\npub struct TransactionWithExecutionInfo {\n    transaction: Transaction,\n    execution_info: Option<ExecutionInfo>,\n}\n\nimpl TransactionWithExecutionInfo {\n    /// Constructs new transaction with execution info.\n    pub fn new(transaction: Transaction, execution_info: Option<ExecutionInfo>) -> Self {\n        Self {\n            transaction,\n            execution_info,\n        }\n    }\n\n    /// Converts `self` into the transaction and execution info.\n    pub fn into_inner(self) -> (Transaction, Option<ExecutionInfo>) {\n        (self.transaction, self.execution_info)\n    }\n}\n\nimpl ToBytes for TransactionWithExecutionInfo {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.transaction.serialized_length() + self.execution_info.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.transaction.write_bytes(writer)?;\n        self.execution_info.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for TransactionWithExecutionInfo {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (transaction, remainder) = FromBytes::from_bytes(bytes)?;\n        let (execution_info, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            TransactionWithExecutionInfo::new(transaction, execution_info),\n            remainder,\n        ))\n    }\n}\n\n/// A query result for a dictionary item, contains the dictionary item key and a global state query\n/// result.\n#[derive(Debug, Clone, PartialEq)]\npub struct DictionaryQueryResult {\n    key: Key,\n    query_result: GlobalStateQueryResult,\n}\n\nimpl DictionaryQueryResult {\n    /// Constructs new dictionary query result.\n    pub fn new(key: Key, query_result: GlobalStateQueryResult) -> Self {\n        Self { key, query_result }\n    }\n\n    /// Converts `self` into the dictionary item key and global state query result.\n    pub fn into_inner(self) -> (Key, GlobalStateQueryResult) {\n        (self.key, self.query_result)\n    }\n}\n\nimpl ToBytes for DictionaryQueryResult {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.key.write_bytes(writer)?;\n        self.query_result.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.key.serialized_length() + self.query_result.serialized_length()\n    }\n}\n\nimpl FromBytes for DictionaryQueryResult {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (query_result, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((DictionaryQueryResult::new(key, query_result), remainder))\n    }\n}\n\n/// An account with its associated merkle proof.\n#[derive(Debug, PartialEq)]\npub struct AccountInformation {\n    account: Account,\n    merkle_proof: Vec<TrieMerkleProof<Key, StoredValue>>,\n}\n\nimpl AccountInformation {\n    /// Constructs a new `AccountResponse`.\n    pub fn new(account: Account, merkle_proof: Vec<TrieMerkleProof<Key, StoredValue>>) -> Self {\n        Self {\n            account,\n            merkle_proof,\n        }\n    }\n\n    /// Returns the inner `Account`.\n    pub fn account(&self) -> &Account {\n        &self.account\n    }\n\n    /// Returns the merkle proof.\n    pub fn merkle_proof(&self) -> &Vec<TrieMerkleProof<Key, StoredValue>> {\n        &self.merkle_proof\n    }\n\n    /// Converts `self` into the account and merkle proof.\n    pub fn into_inner(self) -> (Account, Vec<TrieMerkleProof<Key, StoredValue>>) {\n        (self.account, self.merkle_proof)\n    }\n}\n\nimpl ToBytes for AccountInformation {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.account.write_bytes(writer)?;\n        self.merkle_proof.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.account.serialized_length() + self.merkle_proof.serialized_length()\n    }\n}\n\nimpl FromBytes for AccountInformation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (account, remainder) = FromBytes::from_bytes(bytes)?;\n        let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((AccountInformation::new(account, merkle_proof), remainder))\n    }\n}\n\n/// A contract with its associated Wasm and merkle proof.\n#[derive(Debug, PartialEq)]\npub struct ContractInformation {\n    hash: ContractHash,\n    contract: ValueWithProof<Contract>,\n    wasm: Option<ValueWithProof<ContractWasm>>,\n}\n\nimpl ContractInformation {\n    /// Constructs new `ContractInformation`.\n    pub fn new(\n        hash: ContractHash,\n        contract: ValueWithProof<Contract>,\n        wasm: Option<ValueWithProof<ContractWasm>>,\n    ) -> Self {\n        Self {\n            hash,\n            contract,\n            wasm,\n        }\n    }\n\n    /// Returns the hash of the contract.\n    pub fn hash(&self) -> ContractHash {\n        self.hash\n    }\n\n    /// Returns the inner `Contract`.\n    pub fn contract(&self) -> &Contract {\n        &self.contract.value\n    }\n\n    /// Returns the Merkle proof of the contract.\n    pub fn contract_proof(&self) -> &Vec<TrieMerkleProof<Key, StoredValue>> {\n        &self.contract.merkle_proof\n    }\n\n    /// Returns the inner `ContractWasm` with its proof.\n    pub fn wasm(&self) -> Option<&ValueWithProof<ContractWasm>> {\n        self.wasm.as_ref()\n    }\n\n    /// Converts `self` into the contract hash, contract and Wasm.\n    pub fn into_inner(\n        self,\n    ) -> (\n        ContractHash,\n        ValueWithProof<Contract>,\n        Option<ValueWithProof<ContractWasm>>,\n    ) {\n        (self.hash, self.contract, self.wasm)\n    }\n}\n\nimpl ToBytes for ContractInformation {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.hash.write_bytes(writer)?;\n        self.contract.write_bytes(writer)?;\n        self.wasm.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.hash.serialized_length()\n            + self.contract.serialized_length()\n            + self.wasm.serialized_length()\n    }\n}\n\nimpl FromBytes for ContractInformation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (hash, remainder) = FromBytes::from_bytes(bytes)?;\n        let (contract, remainder) = FromBytes::from_bytes(remainder)?;\n        let (wasm, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((ContractInformation::new(hash, contract, wasm), remainder))\n    }\n}\n\n/// A contract entity with its associated ByteCode.\n#[derive(Debug, PartialEq)]\npub struct AddressableEntityInformation {\n    addr: EntityAddr,\n    entity: ValueWithProof<AddressableEntity>,\n    bytecode: Option<ValueWithProof<ByteCode>>,\n}\n\nimpl AddressableEntityInformation {\n    /// Constructs new contract entity with ByteCode.\n    pub fn new(\n        addr: EntityAddr,\n        entity: ValueWithProof<AddressableEntity>,\n        bytecode: Option<ValueWithProof<ByteCode>>,\n    ) -> Self {\n        Self {\n            addr,\n            entity,\n            bytecode,\n        }\n    }\n\n    /// Returns the entity address.\n    pub fn addr(&self) -> EntityAddr {\n        self.addr\n    }\n\n    /// Returns the inner `AddressableEntity`.\n    pub fn entity(&self) -> &AddressableEntity {\n        &self.entity.value\n    }\n\n    /// Returns the inner `ByteCodeWithProof`.\n    pub fn entity_merkle_proof(&self) -> &Vec<TrieMerkleProof<Key, StoredValue>> {\n        &self.entity.merkle_proof\n    }\n\n    /// Returns the inner `ByteCode`.\n    pub fn bytecode(&self) -> Option<&ValueWithProof<ByteCode>> {\n        self.bytecode.as_ref()\n    }\n\n    /// Converts `self` into the entity address, entity and ByteCode.\n    pub fn into_inner(\n        self,\n    ) -> (\n        EntityAddr,\n        ValueWithProof<AddressableEntity>,\n        Option<ValueWithProof<ByteCode>>,\n    ) {\n        (self.addr, self.entity, self.bytecode)\n    }\n}\n\nimpl ToBytes for AddressableEntityInformation {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.addr.write_bytes(writer)?;\n        self.entity.write_bytes(writer)?;\n        self.bytecode.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.addr.serialized_length()\n            + self.entity.serialized_length()\n            + self.bytecode.serialized_length()\n    }\n}\n\nimpl FromBytes for AddressableEntityInformation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (addr, remainder) = FromBytes::from_bytes(bytes)?;\n        let (entity, remainder) = FromBytes::from_bytes(remainder)?;\n        let (bytecode, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            AddressableEntityInformation::new(addr, entity, bytecode),\n            remainder,\n        ))\n    }\n}\n\n/// A value with its associated Merkle proof.\n#[derive(Debug, PartialEq)]\npub struct ValueWithProof<T> {\n    value: T,\n    merkle_proof: Vec<TrieMerkleProof<Key, StoredValue>>,\n}\n\nimpl<T> ValueWithProof<T> {\n    /// Constructs a new `ValueWithProof`.\n    pub fn new(value: T, merkle_proof: Vec<TrieMerkleProof<Key, StoredValue>>) -> Self {\n        Self {\n            value,\n            merkle_proof,\n        }\n    }\n\n    /// Returns the value.\n    pub fn value(&self) -> &T {\n        &self.value\n    }\n\n    /// Returns the Merkle proof.\n    pub fn merkle_proof(&self) -> &[TrieMerkleProof<Key, StoredValue>] {\n        &self.merkle_proof\n    }\n\n    /// Converts `self` into the value and Merkle proof.\n    pub fn into_inner(self) -> (T, Vec<TrieMerkleProof<Key, StoredValue>>) {\n        (self.value, self.merkle_proof)\n    }\n}\n\nimpl<T: ToBytes> ToBytes for ValueWithProof<T> {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.value.write_bytes(writer)?;\n        self.merkle_proof.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.value.serialized_length() + self.merkle_proof.serialized_length()\n    }\n}\n\nimpl<T: FromBytes> FromBytes for ValueWithProof<T> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, remainder) = FromBytes::from_bytes(bytes)?;\n        let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((ValueWithProof::new(value, merkle_proof), remainder))\n    }\n}\n\nimpl_bytesrepr_for_type_wrapper!(Uptime);\nimpl_bytesrepr_for_type_wrapper!(ConsensusValidatorChanges);\nimpl_bytesrepr_for_type_wrapper!(NetworkName);\nimpl_bytesrepr_for_type_wrapper!(ReactorStateName);\nimpl_bytesrepr_for_type_wrapper!(LastProgress);\nimpl_bytesrepr_for_type_wrapper!(GetTrieFullResult);\n\n#[cfg(test)]\nmod tests {\n    use core::iter::FromIterator;\n    use rand::Rng;\n\n    use super::*;\n    use casper_types::{\n        contracts::ContractPackageHash, execution::ExecutionResult, testing::TestRng, BlockHash,\n        CLValue, ContractWasmHash, StoredValue,\n    };\n\n    #[test]\n    fn uptime_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&Uptime::new(rng.gen()));\n    }\n\n    #[test]\n    fn consensus_validator_changes_roundtrip() {\n        let rng = &mut TestRng::new();\n        let map = BTreeMap::from_iter([(\n            PublicKey::random(rng),\n            vec![(EraId::random(rng), ValidatorChange::random(rng))],\n        )]);\n        bytesrepr::test_serialization_roundtrip(&ConsensusValidatorChanges::new(map));\n    }\n\n    #[test]\n    fn network_name_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&NetworkName::new(rng.random_string(5..20)));\n    }\n\n    #[test]\n    fn reactor_state_name_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&ReactorStateName::new(rng.random_string(5..20)));\n    }\n\n    #[test]\n    fn last_progress_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&LastProgress::new(Timestamp::random(rng)));\n    }\n\n    #[test]\n    fn get_trie_full_result_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&GetTrieFullResult::new(rng.gen()));\n    }\n\n    #[test]\n    fn reward_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&RewardResponse::new(\n            rng.gen(),\n            EraId::random(rng),\n            rng.gen(),\n            BlockHash::random(rng),\n        ));\n    }\n\n    #[test]\n    fn consensus_status_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&ConsensusStatus::new(\n            PublicKey::random(rng),\n            Some(TimeDiff::from_millis(rng.gen())),\n        ));\n    }\n\n    #[test]\n    fn transaction_with_execution_info_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&TransactionWithExecutionInfo::new(\n            Transaction::random(rng),\n            rng.gen::<bool>().then(|| ExecutionInfo {\n                block_hash: BlockHash::random(rng),\n                block_height: rng.gen(),\n                execution_result: rng.gen::<bool>().then(|| ExecutionResult::random(rng)),\n            }),\n        ));\n    }\n\n    #[test]\n    fn dictionary_query_result_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&DictionaryQueryResult::new(\n            Key::Account(rng.gen()),\n            GlobalStateQueryResult::new(\n                StoredValue::CLValue(CLValue::from_t(rng.gen::<i32>()).unwrap()),\n                vec![],\n            ),\n        ));\n    }\n\n    #[test]\n    fn contract_with_wasm_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&ContractInformation::new(\n            ContractHash::new(rng.gen()),\n            ValueWithProof::new(\n                Contract::new(\n                    ContractPackageHash::new(rng.gen()),\n                    ContractWasmHash::new(rng.gen()),\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                ),\n                Default::default(),\n            ),\n            rng.gen::<bool>().then(|| {\n                ValueWithProof::new(\n                    ContractWasm::new(rng.random_vec(10..50)),\n                    Default::default(),\n                )\n            }),\n        ));\n    }\n\n    #[test]\n    fn addressable_entity_with_byte_code_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&AddressableEntityInformation::new(\n            rng.gen(),\n            ValueWithProof::new(AddressableEntity::example().clone(), Default::default()),\n            rng.gen::<bool>().then(|| {\n                ValueWithProof::new(\n                    ByteCode::new(rng.gen(), rng.random_vec(10..50)),\n                    Default::default(),\n                )\n            }),\n        ));\n    }\n}\n"
  },
  {
    "path": "bors.toml",
    "content": "# require the below checks before bors merges anything\nstatus = [\n  \"continuous-integration/drone/push\"\n]\n\n# Ensure that reviewers (all maintainers!) can't merge their own PRs without review.\n# This works because Github doesn't allow self-review.\nrequired_approvals = 1\n\n# Number of seconds from when a merge commit is created to when its statuses must pass.\ntimeout_sec = 10800 #3h\n\n# A marker in the PR description that indicates boilerplate that does not belong in the merge-commit message.\ncut_body_after = \"## Overview\"\n"
  },
  {
    "path": "build_wasm_package.sh",
    "content": "#!/bin/bash\n\nabspath() {\n  # generate absolute path from relative path\n  # $1     : relative filename\n  # return : absolute path\n  if [ -d \"$1\" ]; then\n    # dir\n    (cd \"$1\"; pwd)\n  elif [ -f \"$1\" ]; then\n    # file\n    if [[ $1 == */* ]]; then\n      echo \"$(cd \"${1%/*}\"; pwd)/${1##*/}\"\n    else\n      echo \"$(pwd)/$1\"\n    fi\n  fi\n}\n\nexport RUN_DIR=$(dirname $(abspath $0))\nNODE_CONFIG_FILE=\"$RUN_DIR/node/Cargo.toml\"\n# have to be sed instead of grep -oP to work in alpine docker image\nexport WASM_PACKAGE_VERSION=\"$(grep ^version $NODE_CONFIG_FILE | sed -e s'/.*= \"//' | sed -e s'/\".*//')\"\nexport CL_WASM_DIR=\"$RUN_DIR/target/wasm32-unknown-unknown/release\"\nexport CL_OUTPUT_S3_DIR=\"$RUN_DIR/s3_artifacts/${WASM_PACKAGE_VERSION}\"\nexport CL_WASM_PACKAGE=\"$CL_OUTPUT_S3_DIR/casper-contracts.tar.gz\"\nexport CL_S3_BUCKET='casperlabs-cicd-artifacts'\nexport CL_S3_LOCATION=\"wasm_contracts/${WASM_PACKAGE_VERSION}\"\n\nif [ ! -d $CL_OUTPUT_S3_DIR ]; then\n  mkdir -p \"${CL_OUTPUT_S3_DIR}\"\nfi\n# package all wasm files\necho \"[INFO] Checking if wasm files are ready under the path $CL_WASM_DIR\"\nif [ -d \"$CL_WASM_DIR\" ]; then\n  ls -al $CL_WASM_DIR/*wasm\n  echo \"[INFO] Creating a tar.gz package: $CL_WASM_PACKAGE\"\n  pushd $CL_WASM_DIR\n  tar zvcf $CL_WASM_PACKAGE *wasm\n  popd\nelse\n  echo \"[ERROR] No wasm dir: $CL_WASM_DIR\"\n  exit 1\nfi\n\n# upload to s3\nif [ -z \"$AWS_SECRET_ACCESS_KEY\" ] || [ -z \"$AWS_ACCESS_KEY_ID\" ]; then\n    log \"ERROR: AWS KEYS needed to run. Contact SRE.\"\n    exit 1\nelse\n    s3cmd put ${CL_WASM_PACKAGE} s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/casper-contracts.tar.gz\nfi\n"
  },
  {
    "path": "cargo_casper/Cargo.toml",
    "content": "[package]\nname = \"cargo-casper\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\ncasper-contract-sdk-sys = { path = \"../smart_contracts/sdk_sys\" }\ncasper-contract-sdk = { path = \"../smart_contracts/sdk\", features = [\"__abi_generator\"] }\nclap = { version = \"4.4.11\", features = [\"derive\"] }\nclap-cargo = { version = \"0.14.0\", features = [\"cargo_metadata\"] }\nlibloading = \"0.8.6\"\ninclude_dir = \"0.7.4\"\nanyhow = \"1.0.86\"\nserde = { version = \"1.0\", features = [\"derive\"] }\nserde_json = { version = \"1.0.140\" }\ncargo_metadata = \"0.19.2\"\nwabt = \"0.10.0\"\nonce_cell = \"1.21.3\"\ncrossterm = \"0.29.0\"\nthiserror = \"2.0.12\"\natty = \"0.2.14\"\n"
  },
  {
    "path": "cargo_casper/build.rs",
    "content": "use std::env;\n\nfn main() {\n    match env::var(\"TARGET\") {\n        Ok(target) => {\n            println!(\"cargo:rustc-env=TARGET={}\", target);\n        }\n        Err(_) => {\n            println!(\"cargo:warning=Failed to obtain target triple\");\n        }\n    }\n}\n"
  },
  {
    "path": "cargo_casper/project_template/Cargo.toml",
    "content": "[package]\nname = \"project-template\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-macros = \"0.1.0\"\ncasper-contract-sdk = \"0.1.0\""
  },
  {
    "path": "cargo_casper/project_template/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n#![cfg_attr(target_arch = \"wasm32\", no_std)]\n\nuse casper_contract_sdk::prelude::*;\n\n#[casper(contract_state)]\npub struct Contract {\n    counter: u64,\n}\n\nimpl Default for Contract {\n    fn default() -> Self {\n        panic!(\"Unable to instantiate contract without a constructor!\");\n    }\n}\n\n#[casper]\nimpl Contract {\n    #[casper(constructor)]\n    pub fn new() -> Self {\n        Self { counter: 0 }\n    }\n\n    #[casper(constructor)]\n    pub fn default() -> Self {\n        Self::new()\n    }\n\n    pub fn increase(&mut self) {\n        self.counter += 1;\n    }\n\n    pub fn get(&self) -> u64 {\n        self.counter\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_counter() {\n        let mut counter = Contract::new();\n        assert_eq!(counter.get(), 0);\n        counter.increase();\n        assert_eq!(counter.get(), 1);\n        counter.increase();\n        assert_eq!(counter.get(), 2);\n    }\n}\n"
  },
  {
    "path": "cargo_casper/src/cli/build.rs",
    "content": "use std::{io::Cursor, path::PathBuf, process::Command};\n\nuse anyhow::Context;\n\nuse crate::compilation::CompileJob;\n\n/// The `build` subcommand flow.\npub fn build_impl(\n    package_name: Option<&str>,\n    output_dir: Option<PathBuf>,\n    embed_schema: bool,\n) -> Result<(), anyhow::Error> {\n    // Build the contract package targetting wasm32-unknown-unknown without\n    // extra feature flags - this is the production contract wasm file.\n    //\n    // Optionally (but by default) create an entrypoint in the wasm that will have\n    // embedded schema JSON file for discoverability (aka internal schema).\n    let production_wasm_path = if embed_schema {\n        // Build the schema first\n        let mut buffer = Cursor::new(Vec::new());\n        super::build_schema::build_schema_impl(package_name, &mut buffer)\n            .context(\"Failed to build contract schema\")?;\n\n        let contract_schema =\n            String::from_utf8(buffer.into_inner()).context(\"Failed to read contract schema\")?;\n\n        // Build the contract with above schema injected\n        eprintln!(\"🔨 Step 2: Building contract with schema injected...\");\n        let production_wasm_path = CompileJob::new(\n            package_name,\n            None,\n            vec![(\"__CARGO_CASPER_INJECT_SCHEMA_MARKER\", &contract_schema)],\n        )\n        .dispatch(\n            \"wasm32-unknown-unknown\",\n            [\"casper-contract-sdk/__embed_schema\"],\n        )\n        .context(\"Failed to compile user wasm\")?\n        .get_artifact_by_extension(\"wasm\")\n        .context(\"Build artifacts for contract wasm didn't include a wasm file\")?;\n\n        // Write the schema next to the wasm\n        let schema_file_path = production_wasm_path.with_extension(\"json\");\n\n        std::fs::create_dir_all(schema_file_path.parent().unwrap())\n            .context(\"Failed creating directory for wasm schema\")?;\n\n        std::fs::write(&schema_file_path, contract_schema)\n            .context(\"Failed writing contract schema\")?;\n\n        production_wasm_path\n    } else {\n        // Compile and move to specified output directory\n        eprintln!(\"🔨 Step 2: Building contract...\");\n        CompileJob::new(package_name, None, vec![])\n            .dispatch(\"wasm32-unknown-unknown\", Option::<String>::None)\n            .context(\"Failed to compile user wasm\")?\n            .get_artifact_by_extension(\"wasm\")\n            .context(\"Failed extracting build artifacts to directory\")?\n    };\n\n    // Run wasm optimizations passes that will shrink the size of the wasm.\n    eprintln!(\"🔨 Step 3: Applying optimizations...\");\n    Command::new(\"wasm-strip\")\n        .args([&production_wasm_path])\n        .spawn()\n        .context(\"Failed to execute wasm-strip command. Is wabt installed?\")?;\n\n    // Move to output_dir if specified\n    let mut out_wasm_path = production_wasm_path.clone();\n    let mut out_schema_path = None;\n\n    if let Some(output_dir) = output_dir {\n        out_wasm_path = output_dir\n            .join(out_wasm_path.file_stem().unwrap())\n            .with_extension(\"wasm\");\n        std::fs::rename(&production_wasm_path, &out_wasm_path)\n            .context(\"Couldn't write to the specified output directory.\")?;\n    }\n\n    if embed_schema {\n        out_schema_path = Some(out_wasm_path.with_extension(\"json\"));\n        let production_schema_path = production_wasm_path.with_extension(\"json\");\n        std::fs::rename(&production_schema_path, out_schema_path.as_ref().unwrap())\n            .context(\"Couldn't write to the specified output directory.\")?;\n    }\n\n    // Report paths\n    eprintln!(\"✅ Completed. Build artifacts:\");\n    eprintln!(\"{:?}\", out_wasm_path.canonicalize()?);\n    if let Some(schema_path) = out_schema_path {\n        eprintln!(\"{:?}\", schema_path.canonicalize()?);\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "cargo_casper/src/cli/build_schema/artifact.rs",
    "content": "use std::{mem::MaybeUninit, path::Path};\n\nuse libloading::{Library, Symbol};\n\nconst COLLECT_SCHEMA_FUNC: &str = \"__cargo_casper_collect_schema\";\n\ntype CollectSchema = unsafe extern \"C\" fn(size_ptr: *mut u64) -> *mut u8;\n\npub(crate) struct Artifact {\n    library: Library,\n}\n\nimpl Artifact {\n    pub(crate) fn from_path<P: AsRef<Path>>(\n        artifact_path: P,\n    ) -> Result<Artifact, libloading::Error> {\n        let library = unsafe { libloading::Library::new(artifact_path.as_ref()) }?;\n\n        Ok(Self { library })\n    }\n\n    /// Collects schema from the built artifact.\n    ///\n    /// This returns a [`serde_json::Value`] to skip validation of a `Schema` object structure which\n    /// (in theory) can differ.\n    pub(crate) fn collect_schema(&self) -> serde_json::Result<serde_json::Value> {\n        let collect_schema: Symbol<CollectSchema> =\n            unsafe { self.library.get(COLLECT_SCHEMA_FUNC.as_bytes()).unwrap() };\n\n        let json_bytes = {\n            let mut value = MaybeUninit::uninit();\n            let leaked_json_bytes = unsafe { collect_schema(value.as_mut_ptr()) };\n            let size = unsafe { value.assume_init() };\n            let length: usize = size.try_into().unwrap();\n            unsafe { Vec::from_raw_parts(leaked_json_bytes, length, length) }\n        };\n\n        serde_json::from_slice(&json_bytes)\n    }\n}\n"
  },
  {
    "path": "cargo_casper/src/cli/build_schema.rs",
    "content": "mod artifact;\n\nuse std::{env::consts::DLL_EXTENSION, ffi::OsStr, io::Write, path::PathBuf};\n\nuse anyhow::Context;\nuse artifact::Artifact;\nuse cargo_metadata::MetadataCommand;\n\nuse crate::compilation::CompileJob;\n\n/// The `build-schema` subcommand flow. The schema is written to the specified\n/// [`Write`] implementer.\npub fn build_schema_impl<W: Write>(\n    package_name: Option<&str>,\n    output_writer: &mut W,\n) -> Result<(), anyhow::Error> {\n    // Compile contract package to a native library with extra code that will\n    // produce ABI information including entrypoints, types, etc.\n    eprintln!(\"🔨 Step 1: Building contract schema...\");\n\n    let rustflags = {\n        let current = std::env::var(\"RUSTFLAGS\").unwrap_or_default();\n        format!(\"-C link-dead-code {current}\")\n    };\n\n    let compilation = CompileJob::new(package_name, None, vec![(\"RUSTFLAGS\", &rustflags)]);\n\n    // Get all of the direct user contract dependencies.\n    //\n    // This is a naive approach -- if a dep is feature gated, it won't be resolved correctly.\n    // In practice, we only care about casper-contract-sdk and casper-macros being used, and there\n    // is little to no reason to feature gate them. So this approach should be good enough.\n    let dependencies: Vec<String> = {\n        let metadata = MetadataCommand::new().exec()?;\n\n        // Find the root package (the one whose manifest path matches our Cargo.toml)\n        let package = match package_name {\n            Some(package_name) => metadata\n                .packages\n                .iter()\n                .find(|p| p.name == package_name)\n                .context(\"Root package not found in metadata\")?,\n            None => {\n                let manifest_path_target = PathBuf::from(\"./Cargo.toml\").canonicalize()?;\n                metadata\n                    .packages\n                    .iter()\n                    .find(|p| p.manifest_path.canonicalize().unwrap() == manifest_path_target)\n                    .context(\"Root package not found in metadata\")?\n            }\n        };\n\n        // Extract the direct dependency names from the package.\n        package\n            .dependencies\n            .iter()\n            .map(|dep| dep.name.clone())\n            .collect()\n    };\n\n    // Determine extra features based on the dependencies detected\n    let mut features = Vec::new();\n\n    if dependencies.contains(&\"casper-contract-sdk\".into()) {\n        features.push(\"casper-contract-sdk/__abi_generator\".to_owned());\n    }\n\n    if dependencies.contains(&\"casper-macros\".into()) {\n        features.push(\"casper-macros/__abi_generator\".to_owned());\n    }\n\n    let build_result = compilation\n        .dispatch(env!(\"TARGET\"), &features)\n        .context(\"ABI-rich wasm compilation failure\")?;\n\n    // Extract ABI information from the built contract\n    let artifact_path = build_result\n        .artifacts()\n        .iter()\n        .find(|x| x.extension() == Some(OsStr::new(DLL_EXTENSION)))\n        .context(\"Failed loading the built contract\")?;\n\n    let artifact = Artifact::from_path(artifact_path).context(\"Load library\")?;\n    let collected = artifact.collect_schema().context(\"Collect schema\")?;\n    serde_json::to_writer(output_writer, &collected).context(\"Serialize collected schema\")?;\n    Ok(())\n}\n"
  },
  {
    "path": "cargo_casper/src/cli/new.rs",
    "content": "use std::path::PathBuf;\n\nuse anyhow::Context;\n\nuse include_dir::{include_dir, Dir};\n\nstatic TEMPLATE_DIR: Dir = include_dir!(\"$CARGO_MANIFEST_DIR/project_template\");\nconst TEMPLATE_NAME_MARKER: &str = \"project_template\";\n\n/// The `new` subcommand flow.\npub fn new_impl(name: &str) -> Result<(), anyhow::Error> {\n    let name = name\n        .trim()\n        .to_lowercase()\n        .split_whitespace()\n        .collect::<Vec<_>>()\n        .join(\"-\");\n\n    let template_dir = super::extract_embedded_dir(&PathBuf::from(&name), &TEMPLATE_DIR)\n        .context(\"Failed extracting template directory\")?;\n\n    let toml_path = template_dir.join(\"Cargo.toml\");\n\n    let toml_content = std::fs::read_to_string(&toml_path)\n        .context(\"Failed reading template Cargo.toml file\")?\n        .replace(TEMPLATE_NAME_MARKER, &name);\n\n    std::fs::write(toml_path, toml_content).context(\"Failed updating template Cargo.toml file\")?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "cargo_casper/src/cli.rs",
    "content": "use std::{\n    io,\n    path::{Path, PathBuf},\n};\n\nuse clap::Subcommand;\nuse include_dir::{Dir, DirEntry};\n\npub mod build;\npub mod build_schema;\npub mod new;\n\n/// Writes the binary-embedded directory into a filesystem directory.\n/// Returns the path to the extracted dir.\npub(crate) fn extract_embedded_dir(target: &Path, dir: &Dir) -> io::Result<PathBuf> {\n    // Ensure the target directory exists.\n    std::fs::create_dir_all(target)?;\n\n    // Iterate over each entry in the directory.\n    for entry in dir.entries() {\n        match entry {\n            DirEntry::File(file) => {\n                let file_path = target.join(file.path());\n                if let Some(parent) = file_path.parent() {\n                    std::fs::create_dir_all(parent)?;\n                }\n                std::fs::write(file_path, file.contents())?;\n            }\n            DirEntry::Dir(sub_dir) => {\n                extract_embedded_dir(target, sub_dir)?;\n            }\n        }\n    }\n\n    Ok(target.into())\n}\n\n#[derive(Debug, Subcommand)]\npub(crate) enum Command {\n    /// Build the JSON schema of the contract.\n    BuildSchema {\n        /// Where should the build artifacts be saved?\n        #[arg(short, long)]\n        output: Option<PathBuf>,\n        /// The cargo workspace\n        #[command(flatten)]\n        workspace: clap_cargo::Workspace,\n    },\n    /// Build the contract with its JSON schema embedded.\n    Build {\n        /// Where should the build artifacts be saved?\n        #[arg(short, long)]\n        output: Option<PathBuf>,\n        /// Should the schema be embedded and exposed in the contract? (Default: true)\n        #[arg(short, long)]\n        embed_schema: Option<bool>,\n        /// The cargo workspace\n        #[command(flatten)]\n        workspace: clap_cargo::Workspace,\n    },\n    /// Creates a new VM2 smart contract project from a template.\n    New {\n        /// Name of the project to create\n        name: String,\n    },\n}\n\n#[derive(Debug, clap::Parser)]\npub(crate) struct Cli {\n    #[command(subcommand)]\n    pub command: Command,\n}\n"
  },
  {
    "path": "cargo_casper/src/compilation.rs",
    "content": "use std::{\n    ffi::OsStr,\n    path::PathBuf,\n    process::{Command, Stdio},\n};\n\nuse anyhow::{anyhow, Result};\n\nuse crate::utils::command_runner::{self, DEFAULT_MAX_LINES};\n\n/// Represents a job to compile a Cargo project.\npub(crate) struct CompileJob<'a> {\n    package_name: Option<&'a str>,\n    features: Vec<String>,\n    env_vars: Vec<(&'a str, &'a str)>,\n    in_dir: Option<PathBuf>,\n}\n\nimpl<'a> CompileJob<'a> {\n    /// Creates a new compile job with the given manifest path, optional features,\n    /// and environmental variables.\n    pub fn new(\n        package_name: Option<&'a str>,\n        features: Option<Vec<String>>,\n        env_vars: Vec<(&'a str, &'a str)>,\n    ) -> Self {\n        Self {\n            package_name,\n            features: features.unwrap_or_default(),\n            env_vars,\n            in_dir: None,\n        }\n    }\n\n    /// Dispatches the compilation job. This builds the Cargo project into a temporary target\n    /// directory.\n    pub fn dispatch<T, I, S>(&self, target: T, extra_features: I) -> Result<CompilationResults>\n    where\n        T: Into<String>,\n        I: IntoIterator<Item = S>,\n        S: Into<String>,\n    {\n        let target: String = target.into();\n\n        // Merge the configured features with any extra features\n        let mut features = self.features.clone();\n        features.extend(extra_features.into_iter().map(Into::into));\n        let features_str = features.join(\",\");\n\n        let mut build_args = vec![\"build\"];\n\n        if let Some(package_name) = self.package_name {\n            build_args.push(\"-p\");\n            build_args.push(package_name);\n        }\n\n        build_args.extend_from_slice(&[\n            \"--target\",\n            target.as_str(),\n            \"--features\",\n            &features_str,\n            \"--lib\",\n            \"--release\",\n            \"--color=always\",\n            \"--message-format=json-diagnostic-rendered-ansi\",\n        ]);\n\n        // Run the cargo build command and capture the output\n        let mut command = Command::new(\"cargo\");\n        command.args(&build_args);\n        command.stdout(Stdio::piped());\n        command.stderr(Stdio::piped());\n        for (key, value) in &self.env_vars {\n            command.env(key, value);\n        }\n\n        if let Some(in_directory) = &self.in_dir {\n            command.current_dir(in_directory);\n        }\n\n        // Run the process and capture the output from both stdout and stderr.\n        let handle = command_runner::run_process(&mut command)?;\n\n        let mut log_trail = command_runner::LogTrailBuilder::new()\n            .max_lines(DEFAULT_MAX_LINES)\n            .interactive(command_runner::Interactive::Auto)\n            .build();\n        let mut artifacts = Vec::new();\n        for line in &handle.receiver {\n            match line {\n                command_runner::Line::Stdout(line) => {\n                    match serde_json::from_str::<cargo_metadata::Message>(&line.to_string())\n                        .expect(\"Parse\")\n                    {\n                        cargo_metadata::Message::CompilerArtifact(artifact) => {\n                            for artifact in &artifact.filenames {\n                                let path = PathBuf::from(artifact);\n                                if path\n                                    .parent()\n                                    .and_then(|p| p.file_name())\n                                    .and_then(OsStr::to_str)\n                                    != Some(\"deps\")\n                                {\n                                    artifacts.push(PathBuf::from(artifact));\n                                }\n                            }\n                        }\n                        cargo_metadata::Message::CompilerMessage(compiler_message) => {\n                            log_trail.push_line(compiler_message.to_string())?;\n                        }\n                        cargo_metadata::Message::BuildScriptExecuted(_build_script) => {}\n                        cargo_metadata::Message::BuildFinished(_build_finished) => {}\n                        cargo_metadata::Message::TextLine(text) => log_trail.push_line(text)?,\n                        _ => todo!(),\n                    }\n                }\n                command_runner::Line::Stderr(line) => {\n                    log_trail.push_line(line)?;\n                }\n            }\n        }\n\n        match handle.wait() {\n            Ok(()) => {\n                // Process completed successfully.\n            }\n            Err(command_runner::Outcome::Io(error)) => {\n                return Err(anyhow!(\"Cargo build failed with error code: {error}\"));\n            }\n            Err(command_runner::Outcome::ErrorCode(code)) => {\n                return Err(anyhow!(\"Cargo build failed with error code: {code}\"));\n            }\n            Err(command_runner::Outcome::Signal(signal)) => {\n                return Err(anyhow!(\"Cargo build was terminated by signal: {signal}\"));\n            }\n        }\n\n        Ok(CompilationResults { artifacts })\n    }\n}\n\n/// Results of a compilation job.\npub(crate) struct CompilationResults {\n    artifacts: Vec<PathBuf>,\n}\n\nimpl CompilationResults {\n    /// Returns a slice of paths to the build artifacts.\n    pub fn artifacts(&self) -> &[PathBuf] {\n        &self.artifacts\n    }\n\n    pub fn get_artifact_by_extension(&self, extension: &str) -> Option<PathBuf> {\n        self.artifacts()\n            .iter()\n            .find(|x| x.extension().and_then(|y| y.to_str()) == Some(extension))\n            .map(|x| x.into())\n    }\n}\n"
  },
  {
    "path": "cargo_casper/src/main.rs",
    "content": "use std::{fs::File, io::Write};\n\nuse clap::Parser;\nuse cli::{Cli, Command};\n\npub(crate) mod cli;\npub(crate) mod compilation;\npub mod utils;\n\nfn main() -> anyhow::Result<()> {\n    let cli = Cli::parse();\n    match cli.command {\n        Command::BuildSchema { output, workspace } => {\n            // If user specified an output path, write there.\n            // Otherwise print to standard output.\n            let mut schema_writer: Box<dyn Write> = match output {\n                Some(path) => Box::new(File::create(path)?),\n                None => Box::new(std::io::stdout()),\n            };\n\n            // Select the package to build\n            let package_name = workspace.package.first().map(|x| x.as_str());\n\n            cli::build_schema::build_schema_impl(package_name, &mut schema_writer)?\n        }\n        Command::Build {\n            output,\n            embed_schema,\n            workspace,\n        } => {\n            // Select the package to build\n            let package_name = workspace.package.first().map(|x| x.as_str());\n\n            cli::build::build_impl(package_name, output, embed_schema.unwrap_or(true))?\n        }\n        Command::New { name } => cli::new::new_impl(&name)?,\n    }\n    Ok(())\n}\n"
  },
  {
    "path": "cargo_casper/src/utils/command_runner.rs",
    "content": "use std::{\n    collections::VecDeque,\n    fmt::{Display, Formatter},\n    io::{self, BufRead, BufReader, Write},\n    os::unix::process::ExitStatusExt,\n    process::{Command, Stdio},\n    sync::mpsc,\n    thread,\n};\n\nuse atty::Stream;\nuse crossterm::{cursor, style, terminal, QueueableCommand};\nuse thiserror::Error;\n\n#[derive(Debug, Error)]\npub enum Outcome {\n    #[error(\"Input/Output error: {0}\")]\n    Io(#[from] io::Error),\n    #[error(\"Subprocess exited with error code: {0}\")]\n    ErrorCode(i32),\n    #[error(\"Subprocess terminated by signal: {0}\")]\n    Signal(i32),\n}\n\n#[derive(Debug)]\npub enum Line {\n    Stdout(String),\n    Stderr(String),\n}\n\nimpl Display for Line {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Line::Stdout(text) => write!(f, \"{}\", text),\n            Line::Stderr(text) => write!(f, \"{}\", text),\n        }\n    }\n}\n\n/// Maximum number of lines to keep in the rolling log.\npub const DEFAULT_MAX_LINES: usize = 10;\n\n#[derive(Debug)]\npub struct ProcessHandle {\n    pub receiver: mpsc::Receiver<Line>,\n    pub stdout_thread_handle: thread::JoinHandle<()>,\n    pub stderr_thread_handle: thread::JoinHandle<()>,\n    pub child: std::process::Child,\n}\n\nimpl ProcessHandle {\n    pub fn wait(mut self) -> Result<(), Outcome> {\n        // Ensure the reader threads have completed.\n        self.stdout_thread_handle\n            .join()\n            .expect(\"Stdout thread panicked\");\n        self.stderr_thread_handle\n            .join()\n            .expect(\"Stderr thread panicked\");\n\n        // Wait for the subprocess to finish.\n        let exit_status = self.child.wait().expect(\"Failed to wait on child process\");\n\n        match exit_status.code() {\n            Some(code) => {\n                if code == 0 {\n                    // Subprocess completed successfully.\n                    Ok(())\n                } else {\n                    // Subprocess exited with error code.\n                    Err(Outcome::ErrorCode(code))\n                }\n            }\n            None => {\n                // Subprocess terminated by signal.\n                if let Some(signal) = exit_status.signal() {\n                    // Subprocess terminated by signal\n                    Err(Outcome::Signal(signal))\n                } else {\n                    unreachable!(\"Unexpected exit status: {:?}\", exit_status);\n                }\n            }\n        }\n    }\n}\n\n/// Runs a subprocess and captures its output.\n///\n/// Returns a `ProcessHandle` that can be used to read the output and wait for the process to\n/// finish.\n///\n/// Lines captured are available in a `receiver` attribute and can be piped to a `LogTrail`\n/// instance.\npub fn run_process(command: &mut Command) -> io::Result<ProcessHandle> {\n    // Spawn the subprocess with stdout and stderr piped.\n    let mut child = command\n        .stdout(Stdio::piped())\n        .stderr(Stdio::piped())\n        .spawn()?;\n\n    // Take the stdout and stderr handles.\n    let stdout_pipe = child.stdout.take().expect(\"Failed to capture stdout\");\n    let stderr_pipe = child.stderr.take().expect(\"Failed to capture stderr\");\n\n    // Create a channel to receive lines from both stdout and stderr.\n    let (tx, rx) = mpsc::channel();\n\n    // Spawn a thread to read stdout.\n    let stdout_thread = thread::spawn({\n        let tx = tx.clone();\n\n        move || {\n            let reader = BufReader::new(stdout_pipe);\n            for line in reader.lines() {\n                if let Ok(line_text) = line {\n                    // If send fails, the main thread is likely gone.\n                    if tx.send(Line::Stdout(line_text)).is_err() {\n                        break;\n                    }\n                } else {\n                    break;\n                }\n            }\n        }\n    });\n\n    // Spawn a second thread to read stderr.\n\n    let stderr_thread = thread::spawn({\n        let tx_err = tx.clone();\n        move || {\n            let reader = BufReader::new(stderr_pipe);\n            for line in reader.lines() {\n                if let Ok(line_text) = line {\n                    if tx_err.send(Line::Stderr(line_text)).is_err() {\n                        break;\n                    }\n                } else {\n                    break;\n                }\n            }\n        }\n    });\n\n    // Drop the extra sender so that the channel closes when both threads finish.\n    drop(tx);\n\n    Ok(ProcessHandle {\n        receiver: rx,\n        stdout_thread_handle: stdout_thread,\n        stderr_thread_handle: stderr_thread,\n        child,\n    })\n}\n\n/// Enum representing the interactive mode for the log trail.\npub enum Interactive {\n    /// Program will figure it out if a logs can be printed interactively.\n    Auto,\n    /// Interactive mode is enabled.\n    Yes,\n    /// Interactive mode is disabled.\n    No,\n}\n\nimpl Interactive {\n    /// Check if the interactive mode is enabled.\n    pub fn is_enabled(&self) -> bool {\n        match self {\n            Interactive::Auto => atty::is(Stream::Stdout),\n            Interactive::Yes => true,\n            Interactive::No => false,\n        }\n    }\n}\n/// A stateful log trail that maintains a rolling window of log lines.\npub struct LogTrail {\n    max_lines: usize,\n    interactive: Interactive,\n    current_lines: VecDeque<String>,\n    printed_lines: usize,\n    stdout: std::io::Stdout,\n}\n\nimpl LogTrail {\n    /// Create a new LogTrail.\n    ///\n    /// * `max_lines` specifies how many lines to keep in the rolling window.\n    /// * `interactive` should be true when you want the dynamic updating behavior (e.g. when\n    ///   running in a terminal).\n    pub fn new(max_lines: usize, interactive: Interactive) -> Self {\n        Self {\n            max_lines,\n            interactive,\n            current_lines: VecDeque::with_capacity(max_lines),\n            printed_lines: 0,\n            stdout: io::stdout(),\n        }\n    }\n\n    /// Push a new line into the log trail.\n    ///\n    /// This method tracks the line numbering and either updates the dynamic window (if interactive)\n    /// or prints the new line immediately.\n    pub fn push_line<S: Into<String>>(&mut self, line: S) -> io::Result<()> {\n        let line_text = line.into();\n        if self.interactive.is_enabled() {\n            // Maintain a rolling window of at most max_lines.\n            if self.current_lines.len() == self.max_lines {\n                self.current_lines.pop_front();\n            }\n            self.current_lines.push_back(line_text);\n            // Move the cursor up by the number of previously printed lines plus one extra\n            // (e.g. if a static header line is printed above the log).\n            if self.printed_lines > 0 {\n                self.stdout\n                    .queue(cursor::MoveUp(self.printed_lines as u16))?;\n            }\n            // Clear everything from the current cursor position downward.\n            self.stdout\n                .queue(terminal::Clear(terminal::ClearType::FromCursorDown))?;\n\n            // Reprint the rolling buffer with each line prefixed.\n            for text in self.current_lines.iter() {\n                self.stdout.queue(style::Print(text))?;\n                self.stdout.queue(style::Print(\"\\n\"))?;\n            }\n            self.printed_lines = self.current_lines.len();\n        } else {\n            // In non-interactive mode simply print the line.\n            self.stdout.queue(style::Print(line_text))?;\n            self.stdout.queue(style::Print(\"\\n\"))?;\n        }\n        self.stdout.flush()?;\n        Ok(())\n    }\n}\n\n/// Builder for creating a `LogTrail` instance.\n#[derive(Default)]\npub struct LogTrailBuilder {\n    max_lines: Option<usize>,\n    interactive: Option<Interactive>,\n}\n\nimpl LogTrailBuilder {\n    /// Creates a new builder with default values.\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    /// Sets the maximum number of lines for the rolling log.\n    pub fn max_lines(mut self, max_lines: usize) -> Self {\n        self.max_lines = Some(max_lines);\n        self\n    }\n\n    /// Sets whether the log trail should be interactive.\n    pub fn interactive(mut self, interactive: Interactive) -> Self {\n        self.interactive = Some(interactive);\n        self\n    }\n\n    /// Builds the `LogTrail` instance.\n    pub fn build(self) -> LogTrail {\n        let max_lines = self.max_lines.expect(\"Max lines must be set\");\n        let interactive = self.interactive.expect(\"Interactive mode must be set\");\n        LogTrail::new(max_lines, interactive)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_run_process() {\n        // This test will run the `echo` command, which should always succeed.\n        let result = run_process(Command::new(\"echo\").args([\"Hello, world!\"]))\n            .expect(\"Failed to run process\");\n        assert!(result.wait().is_ok());\n    }\n\n    #[test]\n    fn test_run_interactive_process() {\n        // This test will run the `echo` command, which should always succeed.\n        let result = run_process(Command::new(\"echo\").args([\"Hello, world!\"]))\n            .expect(\"Failed to run process\");\n        assert!(result.wait().is_ok());\n    }\n\n    #[test]\n    fn test_run_process_failure() {\n        // This test will run a non-existent command, which should fail.\n        let result = run_process(&mut Command::new(\"non_existent_command\"))\n            .expect_err(\"Failed to run process\");\n        assert_eq!(result.kind(), io::ErrorKind::NotFound);\n    }\n\n    #[test]\n    fn test_run_process_with_env() {\n        // This test will run the `env` command to print environment variables.\n        let handle = run_process(Command::new(\"env\").envs([(\"TEST_VAR\", \"test_value\")]))\n            .expect(\"Failed to run process\");\n\n        let captured_lines: Vec<String> = handle\n            .receiver\n            .into_iter()\n            .map(|line| line.to_string())\n            .collect();\n        let output = captured_lines.join(\"\\n\");\n        assert!(output.contains(\"TEST_VAR=test_value\"));\n    }\n}\n"
  },
  {
    "path": "cargo_casper/src/utils.rs",
    "content": "pub mod command_runner;\n"
  },
  {
    "path": "cargo_casper/test.py",
    "content": "#!/usr/bin/env python\nimport itertools\nimport time\nimport sys\nimport os\n\nfor a in itertools.count():\n    if a % 2 == 0:\n        print(f'line {a}')\n    else:\n        print(f'error line {a}', file=sys.stderr)\n    time.sleep(0.05)\n    if a == 44:\n        os.kill(os.getpid(), 9)\n    if a > 100:\n        break\n\nprint('Goodbye')\n"
  },
  {
    "path": "ci/build_update_package.sh",
    "content": "#!/usr/bin/env bash\n\n# This script will build\n#  - bin.tar.gz\n#  - config.tar.gz\n#  - version.json\n# in target/upgrade_build\n\nset -e\n\nif command -v jq >&2; then\n  echo \"jq installed\"\nelse\n  echo \"ERROR: jq is not installed and required\"\n  exit 1\nfi\n\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nLATEST_DIR=\"$ROOT_DIR/target/latest\"\nGENESIS_FILES_DIR=\"$ROOT_DIR/resources/production\"\nNODE_BUILD_TARGET=\"$ROOT_DIR/target/release/casper-node\"\nNODE_BUILD_DIR=\"$ROOT_DIR/node\"\nUPGRADE_DIR=\"$ROOT_DIR/target/upgrade_build/\"\nBIN_DIR=\"$UPGRADE_DIR/bin\"\nCONFIG_DIR=\"$UPGRADE_DIR/config\"\nGIT_HASH=$(git rev-parse HEAD)\nBRANCH_NAME=$(git branch --show-current)\nPROTOCOL_VERSION=$(cat \"$GENESIS_FILES_DIR/chainspec.toml\" | python3 -c \"import sys, toml; print(toml.load(sys.stdin)['protocol']['version'].replace('.','_'))\")\nNODE_VERSION=$(cat \"$NODE_BUILD_DIR/Cargo.toml\" | python3 -c \"import sys, toml; print(toml.load(sys.stdin)['package']['version'])\")\n\necho \"Creating $BRANCH_NAME.latest file\"\nmkdir -p \"$LATEST_DIR\"\necho -n \"$GIT_HASH\" > \"$LATEST_DIR/$BRANCH_NAME.latest\"\n\necho \"Building casper-node\"\ncd \"$NODE_BUILD_DIR\" || exit\ncargo build --release\n\necho \"Building global-state-update-gen\"\ncd \"$ROOT_DIR\" || exit\ncargo build --release --package global-state-update-gen\ncargo deb --package global-state-update-gen\nmkdir -p \"$UPGRADE_DIR\"\ncp \"$ROOT_DIR/target/debian/\"* \"$UPGRADE_DIR\" || exit\n\necho \"Generating bin README.md\"\nmkdir -p \"$BIN_DIR\"\nreadme=\"$BIN_DIR/README.md\"\n{\n  echo \"Build for Ubuntu 20.04.\"\n  echo \"\"\n  echo \"To run on other platforms, build from https://github.com/casper-network/casper-node\"\n  echo \" cd node\"\n  echo \" cargo build --release\"\n  echo \"\"\n  echo \"git commit hash: $GIT_HASH\"\n} > \"$readme\"\n\necho \"Packaging bin.tar.gz\"\nmkdir -p \"$BIN_DIR\"\ncp \"$NODE_BUILD_TARGET\" \"$BIN_DIR\"\n# To get no path in tar, need to cd in.\ncd \"$BIN_DIR\" || exit\ntar -czvf \"../bin.tar.gz\" .\ncd ..\nrm -rf \"$BIN_DIR\"\n\necho \"Packaging config.tar.gz\"\nmkdir -p \"$CONFIG_DIR\"\ncp \"$GENESIS_FILES_DIR/chainspec.toml\" \"$CONFIG_DIR\"\ncp \"$GENESIS_FILES_DIR/config-example.toml\" \"$CONFIG_DIR\"\ncp \"$GENESIS_FILES_DIR/accounts.toml\" \"$CONFIG_DIR\"\n# To get no path in tar, need to cd in.\ncd \"$CONFIG_DIR\" || exit\ntar -czvf \"../config.tar.gz\" .\ncd ..\nrm -rf \"$CONFIG_DIR\"\n\necho \"Building version.json\"\njq --null-input \\\n--arg\tbranch \"$BRANCH_NAME\" \\\n--arg version \"$NODE_VERSION\" \\\n--arg pv \"$PROTOCOL_VERSION\" \\\n--arg ghash \"$GIT_HASH\" \\\n--arg now \"$(jq -nr 'now | strftime(\"%Y-%m-%dT%H:%M:%SZ\")')\" \\\n--arg files \"$(ls \"$UPGRADE_DIR\" | jq -nRc '[inputs]')\" \\\n'{\"branch\": $branch, \"version\": $version, \"protocol_version\": $pv, \"git-hash\": $ghash, \"timestamp\": $now, \"files\": $files}' \\\n> \"$UPGRADE_DIR/version.json\"\n"
  },
  {
    "path": "ci/casper_updater/Cargo.toml",
    "content": "[package]\nauthors = [\"Joe Sacher <joe.sacher@casper.network>\"]\ndescription = \"A tool to update versions of all published Casper packages.\"\nedition = \"2021\"\nlicense = \"Apache-2.0\"\nname = \"casper-updater\"\nreadme = \"README.md\"\nversion = \"0.4.0\"\n\n[dependencies]\nclap = { version = \"4.2.7\", features = [\"cargo\", \"deprecated\", \"wrap_help\"] }\nonce_cell = \"1.17.1\"\nregex = \"1.8.1\"\nsemver = \"1.0.17\"\n"
  },
  {
    "path": "ci/casper_updater/README.md",
    "content": "# casper-updater\n\nA tool to update versions of all published Casper packages.\n\n# Usage\n\nThe tool iterates through each published package, asking for a new version for each or automatically bumping the major, minor or patch version if `--bump=[major|minor|patch]` was specified.  Once a valid version is specified, all files dependent on that version are updated.\n\nIf you run the tool from its own directory it will expect to find the casper-node root directory at '../..'.  Alternatively, you can give the path to the casper-node root directory via `--root-dir`.    \n\nTo see a list of files which will be affected, or to check that the tool's regex matches are up to date, run the tool with `--dry-run`.\n"
  },
  {
    "path": "ci/casper_updater/src/dependent_file.rs",
    "content": "use std::{\n    fs,\n    path::{Path, PathBuf},\n};\n\nuse regex::Regex;\n\n/// A file which is dependent on the version of a certain Casper crate.\npub struct DependentFile {\n    /// Full path to the file.\n    path: PathBuf,\n    /// Regex applicable to the portion to be updated.\n    regex: Regex,\n    /// Function which generates the replacement string once the updated version is known.\n    replacement: fn(&str) -> String,\n}\n\nimpl DependentFile {\n    pub fn new<P: AsRef<Path>>(\n        relative_path: P,\n        regex: Regex,\n        replacement: fn(&str) -> String,\n    ) -> Self {\n        let path = crate::root_dir().join(relative_path);\n        let dependent_file = DependentFile {\n            path,\n            regex,\n            replacement,\n        };\n        let contents = dependent_file.contents();\n        assert!(\n            dependent_file.regex.find(&contents).is_some(),\n            \"regex '{}' failed to get a match in {}\",\n            dependent_file.regex,\n            dependent_file.path.display()\n        );\n        dependent_file\n    }\n\n    pub fn update(&self, updated_version: &str) {\n        let contents = self.contents();\n        let updated_contents = self\n            .regex\n            .replace_all(&contents, (self.replacement)(updated_version).as_str());\n        fs::write(&self.path, updated_contents.as_ref())\n            .unwrap_or_else(|error| panic!(\"should write {}: {:?}\", self.path.display(), error));\n    }\n\n    pub fn path(&self) -> &Path {\n        &self.path\n    }\n\n    pub fn relative_path(&self) -> &Path {\n        self.path\n            .strip_prefix(crate::root_dir())\n            .expect(\"should strip prefix\")\n    }\n\n    pub fn contents(&self) -> String {\n        fs::read_to_string(&self.path)\n            .unwrap_or_else(|error| panic!(\"should read {}: {:?}\", self.path.display(), error))\n    }\n}\n"
  },
  {
    "path": "ci/casper_updater/src/main.rs",
    "content": "//! A tool to update versions of all published Casper packages.\n\n#![warn(unused, missing_copy_implementations, missing_docs)]\n#![deny(\n    deprecated_in_future,\n    future_incompatible,\n    macro_use_extern_crate,\n    rust_2018_idioms,\n    nonstandard_style,\n    single_use_lifetimes,\n    trivial_casts,\n    trivial_numeric_casts,\n    unsafe_code,\n    unstable_features,\n    unused_import_braces,\n    unused_lifetimes,\n    unused_qualifications,\n    unused_results,\n    warnings,\n    clippy::all\n)]\n#![forbid(\n    arithmetic_overflow,\n    invalid_type_param_default,\n    macro_expanded_macro_exports_accessed_by_absolute_paths,\n    mutable_transmutes,\n    no_mangle_const_items,\n    overflowing_literals,\n    pub_use_of_private_extern_crate,\n    unknown_crate_types\n)]\n\nmod dependent_file;\nmod package;\nmod regex_data;\n\nuse std::{\n    env,\n    path::{Path, PathBuf},\n    process::Command,\n};\n\nuse clap::{\n    builder::{PathBufValueParser, PossibleValue},\n    crate_version, Arg, ArgAction, Command as App,\n};\nuse once_cell::sync::Lazy;\nuse semver::Version;\n\nuse package::Package;\n\nconst APP_NAME: &str = \"Casper Updater\";\n\nconst ROOT_DIR_ARG_NAME: &str = \"root-dir\";\nconst ROOT_DIR_ARG_SHORT: char = 'r';\nconst ROOT_DIR_ARG_VALUE_NAME: &str = \"PATH\";\nconst ROOT_DIR_ARG_HELP: &str =\n    \"Path to casper-node root directory.  If not supplied, assumes it is at ../..\";\n\nconst BUMP_ARG_NAME: &str = \"bump\";\nconst BUMP_ARG_SHORT: char = 'b';\nconst BUMP_ARG_VALUE_NAME: &str = \"VERSION-COMPONENT\";\nconst BUMP_ARG_HELP: &str =\n    \"Increases all crates' versions automatically without asking for user input.  For a crate at \\\n    version x.y.z, the version will be bumped to (x+1).0.0, x.(y+1).0, or x.y.(z+1) depending on \\\n    which version component is specified.  If this option is specified, --activation-point must \\\n    also be specified.\";\nconst MAJOR: &str = \"major\";\nconst MINOR: &str = \"minor\";\nconst PATCH: &str = \"patch\";\n\nconst DRY_RUN_ARG_NAME: &str = \"dry-run\";\nconst DRY_RUN_ARG_SHORT: char = 'd';\nconst DRY_RUN_ARG_HELP: &str = \"Checks all regexes get matches in current casper-node repo\";\n\nconst ALLOW_EARLIER_VERSION_NAME: &str = \"allow-earlier-version\";\nconst ALLOW_EARLIER_VERSION_HELP: &str = \"Allows manual setting of version earlier than current\";\n\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]\npub(crate) enum BumpVersion {\n    Major,\n    Minor,\n    Patch,\n}\n\nimpl BumpVersion {\n    pub(crate) fn update(self, current_version: &Version) -> Version {\n        match self {\n            BumpVersion::Major => Version::new(current_version.major + 1, 0, 0),\n            BumpVersion::Minor => Version::new(current_version.major, current_version.minor + 1, 0),\n            BumpVersion::Patch => Version::new(\n                current_version.major,\n                current_version.minor,\n                current_version.patch + 1,\n            ),\n        }\n    }\n}\n\nstruct Args {\n    root_dir: PathBuf,\n    bump_version: Option<BumpVersion>,\n    dry_run: bool,\n    allow_earlier_version: bool,\n}\n\n/// The full path to the casper-node root directory.\npub(crate) fn root_dir() -> &'static Path {\n    &ARGS.root_dir\n}\n\n/// The version component to bump, if any.\npub(crate) fn bump_version() -> Option<BumpVersion> {\n    ARGS.bump_version\n}\n\n/// Whether we're doing a dry run or not.\npub(crate) fn is_dry_run() -> bool {\n    ARGS.dry_run\n}\n\n/// If we allow reverting version to previous (used for master back to previous release branch)\npub(crate) fn allow_earlier_version() -> bool {\n    ARGS.allow_earlier_version\n}\n\nstatic ARGS: Lazy<Args> = Lazy::new(get_args);\n\nfn get_args() -> Args {\n    let arg_matches = App::new(APP_NAME)\n        .version(crate_version!())\n        .arg(\n            Arg::new(ROOT_DIR_ARG_NAME)\n                .long(ROOT_DIR_ARG_NAME)\n                .short(ROOT_DIR_ARG_SHORT)\n                .value_name(ROOT_DIR_ARG_VALUE_NAME)\n                .help(ROOT_DIR_ARG_HELP)\n                .value_parser(PathBufValueParser::new()),\n        )\n        .arg(\n            Arg::new(BUMP_ARG_NAME)\n                .long(BUMP_ARG_NAME)\n                .short(BUMP_ARG_SHORT)\n                .value_name(BUMP_ARG_VALUE_NAME)\n                .help(BUMP_ARG_HELP)\n                .value_parser([\n                    PossibleValue::new(MAJOR),\n                    PossibleValue::new(MINOR),\n                    PossibleValue::new(PATCH),\n                ]),\n        )\n        .arg(\n            Arg::new(DRY_RUN_ARG_NAME)\n                .long(DRY_RUN_ARG_NAME)\n                .short(DRY_RUN_ARG_SHORT)\n                .action(ArgAction::SetTrue)\n                .help(DRY_RUN_ARG_HELP),\n        )\n        .arg(\n            Arg::new(ALLOW_EARLIER_VERSION_NAME)\n                .long(ALLOW_EARLIER_VERSION_NAME)\n                .action(ArgAction::SetTrue)\n                .help(ALLOW_EARLIER_VERSION_HELP),\n        )\n        .get_matches();\n\n    let root_dir = match arg_matches.get_one::<PathBuf>(ROOT_DIR_ARG_NAME) {\n        Some(path) => path.clone(),\n        None => env::current_dir()\n            .expect(\"should be able to access current working dir\")\n            .parent()\n            .expect(\"current working dir should have parent\")\n            .parent()\n            .expect(\"current working dir should have two parents\")\n            .to_path_buf(),\n    };\n\n    let bump_version = arg_matches\n        .get_one::<&str>(BUMP_ARG_NAME)\n        .map(|value| match *value {\n            MAJOR => BumpVersion::Major,\n            MINOR => BumpVersion::Minor,\n            PATCH => BumpVersion::Patch,\n            _ => unreachable!(),\n        });\n\n    let dry_run = arg_matches.get_flag(DRY_RUN_ARG_NAME);\n\n    let allow_earlier_version = arg_matches.get_flag(ALLOW_EARLIER_VERSION_NAME);\n\n    Args {\n        root_dir,\n        bump_version,\n        dry_run,\n        allow_earlier_version,\n    }\n}\n\nfn main() {\n    let rust_packages = [\n        Package::cargo(\"types\", &regex_data::types::DEPENDENT_FILES),\n        Package::cargo(\"binary_port\", &regex_data::binary_port::DEPENDENT_FILES),\n        Package::cargo(\"storage\", &regex_data::storage::DEPENDENT_FILES),\n        Package::cargo(\n            \"execution_engine\",\n            &regex_data::execution_engine::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"execution_engine_testing/test_support\",\n            &regex_data::execution_engine_testing_test_support::DEPENDENT_FILES,\n        ),\n        Package::cargo(\"node\", &regex_data::node::DEPENDENT_FILES),\n        Package::cargo(\n            \"smart_contracts/contract\",\n            &regex_data::smart_contracts_contract::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"smart_contracts/sdk_sys\",\n            &regex_data::smart_contracts_sdk_sys::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"smart_contracts/sdk\",\n            &regex_data::smart_contracts_sdk::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"smart_contracts/sdk_codegen\",\n            &regex_data::smart_contracts_sdk_codegen::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"smart_contracts/macros\",\n            &regex_data::smart_contracts_macros::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"executor/wasm_common\",\n            &regex_data::executor_wasm_common::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"executor/wasm_interface\",\n            &regex_data::executor_wasm_interface::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"executor/wasm_host\",\n            &regex_data::executor_wasm_host::DEPENDENT_FILES,\n        ),\n        Package::cargo(\n            \"executor/wasmer_backend\",\n            &regex_data::executor_wasmer_backend::DEPENDENT_FILES,\n        ),\n        Package::cargo(\"executor/wasm\", &regex_data::executor_wasm::DEPENDENT_FILES),\n    ];\n\n    for rust_package in &rust_packages {\n        rust_package.update()\n    }\n\n    // Update Cargo.lock if this isn't a dry run.\n    if !is_dry_run() {\n        let mut command = Command::new(env!(\"CARGO\"));\n        let _ = command\n            .current_dir(root_dir())\n            .arg(\"update\")\n            .arg(\"--offline\");\n        for rust_package in &rust_packages {\n            let _ = command.arg(\"--package\").arg(rust_package.name());\n        }\n        let status = command\n            .status()\n            .unwrap_or_else(|error| panic!(\"Failed to execute '{:?}': {}\", command, error));\n        assert!(status.success(), \"Failed to update Cargo.lock\");\n    }\n}\n"
  },
  {
    "path": "ci/casper_updater/src/package.rs",
    "content": "use std::{\n    io::{self, Write},\n    path::Path,\n};\n\nuse regex::Regex;\nuse semver::Version;\n\nuse crate::{\n    dependent_file::DependentFile,\n    regex_data::{MANIFEST_NAME_REGEX, MANIFEST_VERSION_REGEX},\n};\n\nconst CAPTURE_INDEX: usize = 2;\n\n/// Represents a published Casper crate or AssemblyScript package which may need its version\n/// updated.\npub struct Package {\n    /// This package's name as specified in its manifest.\n    name: String,\n    /// This package's current version as specified in its manifest.\n    current_version: Version,\n    /// Files which must be updated if this package's version is changed, including this package's\n    /// own manifest file.  The other files will often be from a different package.\n    dependent_files: &'static Vec<DependentFile>,\n}\n\ntrait PackageConsts {\n    const MANIFEST: &'static str;\n    fn name_regex() -> &'static Regex;\n    fn version_regex() -> &'static Regex;\n}\n\nstruct CargoPackage;\n\nimpl PackageConsts for CargoPackage {\n    const MANIFEST: &'static str = \"Cargo.toml\";\n\n    fn name_regex() -> &'static Regex {\n        &MANIFEST_NAME_REGEX\n    }\n\n    fn version_regex() -> &'static Regex {\n        &MANIFEST_VERSION_REGEX\n    }\n}\n\n#[allow(clippy::ptr_arg)]\nimpl Package {\n    pub fn cargo<P: AsRef<Path>>(\n        relative_path: P,\n        dependent_files: &'static Vec<DependentFile>,\n    ) -> Self {\n        Self::new::<_, CargoPackage>(relative_path, dependent_files)\n    }\n\n    pub fn name(&self) -> &str {\n        &self.name\n    }\n\n    fn new<P: AsRef<Path>, T: PackageConsts>(\n        relative_path: P,\n        dependent_files: &'static Vec<DependentFile>,\n    ) -> Self {\n        let manifest_path = crate::root_dir().join(&relative_path).join(T::MANIFEST);\n\n        let manifest = dependent_files\n            .iter()\n            .find(|&file| file.path() == manifest_path)\n            .unwrap_or_else(|| {\n                panic!(\n                    \"{} should be a dependent file of {}\",\n                    manifest_path.display(),\n                    relative_path.as_ref().display()\n                )\n            });\n        let contents = manifest.contents();\n\n        let find_value = |regex: &Regex| {\n            regex\n                .captures(&contents)\n                .unwrap_or_else(|| {\n                    panic!(\n                        \"should find package name and version in {}\",\n                        manifest_path.display()\n                    )\n                })\n                .get(CAPTURE_INDEX)\n                .unwrap_or_else(|| {\n                    panic!(\n                        \"package name and version should be regex capture at index {} in {}\",\n                        CAPTURE_INDEX,\n                        manifest_path.display()\n                    )\n                })\n                .as_str()\n                .to_string()\n        };\n\n        let name = find_value(T::name_regex());\n        let version = find_value(T::version_regex());\n        let current_version = Version::parse(&version).expect(\"should parse current version\");\n\n        Package {\n            name,\n            current_version,\n            dependent_files,\n        }\n    }\n\n    pub fn update(&self) {\n        if crate::is_dry_run() {\n            println!(\n                \"Current version of {} is {}\",\n                self.name, self.current_version\n            );\n            if let Some(bump_version) = crate::bump_version() {\n                let updated_version = bump_version.update(&self.current_version);\n                println!(\"Will be updated to {}\", updated_version);\n            }\n            println!(\"Files affected by this package's version:\");\n            for dependent_file in self.dependent_files {\n                println!(\"\\t* {}\", dependent_file.relative_path().display());\n            }\n            println!();\n            return;\n        }\n\n        let updated_version = match crate::bump_version() {\n            None => match get_updated_version_from_user(&self.name, &self.current_version) {\n                Some(version) => version,\n                None => return,\n            },\n            Some(bump_version) => bump_version.update(&self.current_version),\n        };\n\n        for dependent_file in self.dependent_files {\n            dependent_file.update(&updated_version.to_string());\n        }\n\n        println!(\n            \"Updated {} from {} to {}.\",\n            self.name, self.current_version, updated_version\n        );\n    }\n}\n\npub fn get_updated_version_from_user(name: &str, current_version: &Version) -> Option<Version> {\n    loop {\n        print!(\n            \"Current {} version is {}.  Enter new version (leave blank for unchanged): \",\n            name, current_version\n        );\n        io::stdout().flush().expect(\"should flush stdout\");\n        let mut input = String::new();\n        match io::stdin().read_line(&mut input) {\n            Ok(_) => {\n                input = input.trim_end().to_string();\n                if input.is_empty() {\n                    return None;\n                }\n\n                let new_version = match Version::parse(&input) {\n                    Ok(version) => version,\n                    Err(error) => {\n                        println!(\"\\n{} is not a valid version: {}.\", input, error);\n                        continue;\n                    }\n                };\n\n                if new_version < *current_version {\n                    println!(\n                        \"Updated version ({}) is lower than current version ({})\",\n                        new_version, current_version\n                    );\n                    if crate::allow_earlier_version() {\n                        println!(\"Allowing earlier version due to flag.\")\n                    } else {\n                        continue;\n                    }\n                }\n\n                return if new_version == *current_version {\n                    None\n                } else {\n                    Some(new_version)\n                };\n            }\n            Err(error) => println!(\"\\nFailed to read from stdin: {}.\", error),\n        }\n    }\n}\n"
  },
  {
    "path": "ci/casper_updater/src/regex_data.rs",
    "content": "#![allow(clippy::wildcard_imports)]\n\nuse once_cell::sync::Lazy;\nuse regex::Regex;\n\nuse crate::dependent_file::DependentFile;\n\npub static MANIFEST_NAME_REGEX: Lazy<Regex> =\n    Lazy::new(|| Regex::new(r#\"(?m)(^name = )\"([^\"]+)\"#).unwrap());\npub static MANIFEST_VERSION_REGEX: Lazy<Regex> =\n    Lazy::new(|| Regex::new(r#\"(?m)(^version = )\"([^\"]+)\"#).unwrap());\n\nfn replacement(updated_version: &str) -> String {\n    format!(r#\"$1\"{}\"#, updated_version)\n}\n\nfn replacement_with_slash(updated_version: &str) -> String {\n    format!(r#\"$1/{}\"#, updated_version)\n}\n\npub static TYPES_VERSION_REGEX: Lazy<Regex> =\n    Lazy::new(|| Regex::new(r#\"(?m)(^casper-types = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap());\n\npub mod binary_port {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"binary_port/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"node/Cargo.toml\",\n                Regex::new(r#\"(?m)(^casper-binary-port = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub mod types {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"types/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"types/src/lib.rs\",\n                Regex::new(\n                    r#\"(?m)(#!\\[doc\\(html_root_url = \"https://docs.rs/casper-types)/(?:[^\"]+)\"#,\n                )\n                .unwrap(),\n                replacement_with_slash,\n            ),\n            DependentFile::new(\n                \"binary_port/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"storage/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"execution_engine/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"execution_engine_testing/test_support/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\"node/Cargo.toml\", TYPES_VERSION_REGEX.clone(), replacement),\n            DependentFile::new(\n                \"smart_contracts/contract/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_host/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_interface/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasmer_backend/Cargo.toml\",\n                TYPES_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub static STORAGE_VERSION_REGEX: Lazy<Regex> =\n    Lazy::new(|| Regex::new(r#\"(?m)(^casper-storage = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap());\npub mod storage {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"storage/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"storage/src/lib.rs\",\n                Regex::new(\n                    r#\"(?m)(#!\\[doc\\(html_root_url = \"https://docs.rs/casper-storage)/(?:[^\"]+)\"#,\n                )\n                .unwrap(),\n                replacement_with_slash,\n            ),\n            DependentFile::new(\n                \"execution_engine/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"execution_engine_testing/test_support/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"node/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_host/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_interface/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasmer_backend/Cargo.toml\",\n                STORAGE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub static EXECUTION_ENGINE_VERSION_REGEX: Lazy<Regex> = Lazy::new(|| {\n    Regex::new(r#\"(?m)(^casper-execution-engine = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap()\n});\npub mod execution_engine {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"execution_engine/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"execution_engine/src/lib.rs\",\n                Regex::new(r#\"(?m)(#!\\[doc\\(html_root_url = \"https://docs.rs/casper-execution-engine)/(?:[^\"]+)\"#).unwrap(),\n                replacement_with_slash,\n            ),\n                DependentFile::new(\n                    \"execution_engine_testing/test_support/Cargo.toml\",\n                    EXECUTION_ENGINE_VERSION_REGEX.clone(),\n                    replacement,\n                ),\n                DependentFile::new(\n                    \"node/Cargo.toml\",\n                    EXECUTION_ENGINE_VERSION_REGEX.clone(),\n                    replacement,\n                ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                EXECUTION_ENGINE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            ]\n    });\n}\n\npub mod execution_engine_testing_test_support {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"execution_engine_testing/test_support/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"execution_engine_testing/test_support/src/lib.rs\",\n                Regex::new(r#\"(?m)(#!\\[doc\\(html_root_url = \"https://docs.rs/casper-engine-test-support)/(?:[^\"]+)\"#).unwrap(),\n                replacement_with_slash,\n            ),\n        ]\n    });\n}\n\npub mod node {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"node/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"node/src/lib.rs\",\n                Regex::new(\n                    r#\"(?m)(#!\\[doc\\(html_root_url = \"https://docs.rs/casper-node)/(?:[^\"]+)\"#,\n                )\n                .unwrap(),\n                replacement_with_slash,\n            ),\n        ]\n    });\n}\n\npub mod smart_contracts_contract {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"smart_contracts/contract/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/contract/src/lib.rs\",\n                Regex::new(\n                    r#\"(?m)(#!\\[doc\\(html_root_url = \"https://docs.rs/casper-contract)/(?:[^\"]+)\"#,\n                )\n                .unwrap(),\n                replacement_with_slash,\n            ),\n        ]\n    });\n}\n\npub static SMART_CONTRACTS_SDK_SYS_VERSION_REGEX: Lazy<Regex> = Lazy::new(|| {\n    Regex::new(r#\"(?m)(^casper-contract-sdk-sys = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap()\n});\n\npub mod smart_contracts_sdk_sys {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"smart_contracts/sdk_sys/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_common/Cargo.toml\",\n                SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasmer_backend/Cargo.toml\",\n                SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/macros/Cargo.toml\",\n                SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/sdk/Cargo.toml\",\n                SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub mod smart_contracts_sdk {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"smart_contracts/sdk/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/sdk_codegen/Cargo.toml\",\n                Regex::new(r#\"(?m)(^casper-contract-sdk = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub mod smart_contracts_sdk_codegen {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![DependentFile::new(\n            \"smart_contracts/sdk_codegen/Cargo.toml\",\n            MANIFEST_VERSION_REGEX.clone(),\n            replacement,\n        )]\n    });\n}\npub mod smart_contracts_macros {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"smart_contracts/macros/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/sdk/Cargo.toml\",\n                Regex::new(r#\"(?m)(^casper-contract-macros = \\{[^\\}]*version = )\"(?:[^\"]+)\"#)\n                    .unwrap(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub static EXECUTOR_WASM_COMMON_VERSION_REGEX: Lazy<Regex> = Lazy::new(|| {\n    Regex::new(r#\"(?m)(^casper-executor-wasm-common = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap()\n});\npub mod executor_wasm_common {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"executor/wasm_common/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_host/Cargo.toml\",\n                EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_interface/Cargo.toml\",\n                EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasmer_backend/Cargo.toml\",\n                EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/macros/Cargo.toml\",\n                EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"smart_contracts/sdk/Cargo.toml\",\n                EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub static EXECUTOR_WASM_INTERFACE_VERSION_REGEX: Lazy<Regex> = Lazy::new(|| {\n    Regex::new(r#\"(?m)(^casper-executor-wasm-interface = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap()\n});\npub mod executor_wasm_interface {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"executor/wasm_interface/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm_host/Cargo.toml\",\n                EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasmer_backend/Cargo.toml\",\n                EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"node/Cargo.toml\",\n                EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub static EXECUTOR_WASM_HOST_VERSION_REGEX: Lazy<Regex> = Lazy::new(|| {\n    Regex::new(r#\"(?m)(^casper-executor-wasm-host = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap()\n});\npub mod executor_wasm_host {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"executor/wasm_host/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                EXECUTOR_WASM_HOST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub static EXECUTOR_WASMER_BACKEND_VERSION_REGEX: Lazy<Regex> = Lazy::new(|| {\n    Regex::new(r#\"(?m)(^casper-executor-wasmer-backend = \\{[^\\}]*version = )\"(?:[^\"]+)\"#).unwrap()\n});\npub mod executor_wasmer_backend {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"executor/wasmer_backend/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                EXECUTOR_WASMER_BACKEND_VERSION_REGEX.clone(),\n                replacement,\n            ),\n        ]\n    });\n}\n\npub mod executor_wasm {\n    use super::*;\n\n    pub static DEPENDENT_FILES: Lazy<Vec<DependentFile>> = Lazy::new(|| {\n        vec![\n            DependentFile::new(\n                \"executor/wasm/Cargo.toml\",\n                MANIFEST_VERSION_REGEX.clone(),\n                replacement,\n            ),\n            DependentFile::new(\n                \"node/Cargo.toml\",\n                Regex::new(r#\"(?m)(^casper-executor-wasm = \\{[^\\}]*version = )\"(?:[^\"]+)\"#)\n                    .unwrap(),\n                replacement,\n            ),\n        ]\n    });\n}\n"
  },
  {
    "path": "ci/check_cpu_features.sh",
    "content": "#!/usr/bin/env bash\n\n# Ensure that there has not been a change in CPU features used.\n\nset -e\n\ncd $(dirname $0)/..\n\ncargo build --release --bin casper-node\nutils/dump-cpu-features.sh target/release/casper-node > current-build-cpu-features.txt\nif [[ $(comm -23 current-build-cpu-features.txt ci/cpu-features-1.4.13-release.txt) ]]; then\n    exit 1\nfi\necho \"Check passed, instruction set extensions in node binary have not been changed since 1.4.13\"\n"
  },
  {
    "path": "ci/ci.json",
    "content": "{\n  \"external_deps\": {\n    \"casper-client-rs\": {\n      \"github_repo_url\": \"https://github.com/casper-ecosystem/casper-client-rs.git\",\n      \"branch\": \"dev\"\n    },\n    \"casper-node-launcher\": {\n      \"github_repo_url\": \"https://github.com/casper-network/casper-node-launcher.git\",\n      \"branch\": \"main\"\n    },\n    \"casper-sidecar\": {\n      \"github_repo_url\": \"https://github.com/casper-network/casper-sidecar\",\n      \"branch\": \"dev\"\n    },\n    \"casper-nctl\": {\n      \"github_repo_url\": \"https://github.com/casper-network/casper-nctl\",\n      \"branch\": \"dev\"\n    }\n  },\n  \"nctl_upgrade_tests\": {\n    \"protocol_1\": \"1.5.6\"\n  }\n}"
  },
  {
    "path": "ci/cpu-features-1.4.13-release.txt",
    "content": "AVX\nAVX2\nBMI\nCMOV\nMODE64\nNOVLX\nPCLMUL\nSHA\nSSE1\nSSE2\nSSE3\nSSE41\nSSSE3\n"
  },
  {
    "path": "ci/markdown-link-check-config.json",
    "content": "{\n  \"ignorePatterns\": [\n    {\n      \"pattern\": \"^http://localhost.*\"\n    }\n  ],\n  \"httpHeaders\": [\n    {\n      \"urls\": [\"https://crates.io\"],\n      \"headers\": {\n        \"Accept\": \"text/html\"\n      }\n    }\n  ],\n  \"timeout\": \"10s\",\n  \"retryOn429\": true,\n  \"retryCount\": 5,\n  \"fallbackRetryDelay\": \"2s\"\n}\n"
  },
  {
    "path": "ci/markdown_link_check.sh",
    "content": "#!/usr/bin/env bash\nset -e\n\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nCONFIG_PATH=\"$ROOT_DIR/ci/markdown-link-check-config.json\"\npushd \"$ROOT_DIR\"\n\nFILES=($(find . -name \"*.md\" -not -path \".*/node_modules/*\"))\n\nfor file in \"${FILES[@]}\"; do\n    markdown-link-check -v -r -p -c \"$CONFIG_PATH\" \"$file\"\ndone\npopd\n"
  },
  {
    "path": "ci/nctl_compile.sh",
    "content": "#!/usr/bin/env bash\nset -e\nshopt -s expand_aliases\n\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nJSON_CONFIG_FILE=\"$ROOT_DIR/ci/ci.json\"\nJSON_KEYS=($(jq -r '.external_deps | keys[]' \"$JSON_CONFIG_FILE\"))\n\nfunction clone_external_repo() {\n    local NAME=${1}\n    local JSON_FILE=${2}\n    local URL\n    local BRANCH\n    local CLONE_REPO_PATH\n\n    CLONE_REPO_PATH=\"$ROOT_DIR/../$NAME\"\n    URL=$(jq -r \".external_deps.\\\"${NAME}\\\".github_repo_url\" \"$JSON_FILE\")\n    BRANCH=$(jq -r \".external_deps.\\\"${NAME}\\\".branch\" \"$JSON_FILE\")\n\n    if [ ! -d \"$CLONE_REPO_PATH\" ]; then\n        echo \"... cloning $NAME: branch=$BRANCH\"\n        git clone -b \"$BRANCH\" \"$URL\" \"$CLONE_REPO_PATH\"\n    else\n        echo \"skipping clone of $NAME: directory already exists.\"\n    fi\n}\n\n# Clone external dependencies\nfor i in \"${JSON_KEYS[@]}\"; do\n    clone_external_repo \"$i\" \"$JSON_CONFIG_FILE\"\ndone\n\nNCTL_HOME=\"$ROOT_DIR/../casper-nctl\"\nNCTL_CASPER_HOME=\"$ROOT_DIR\"\n\nif [ ! -d \"$NCTL_HOME\" ]; then\n    echo \"ERROR: nctl was not set up correctly, check ci/ci.json, exiting...\"\n    exit 1\nfi\n\n# Activate Environment\npushd \"$ROOT_DIR\"\nsource \"$NCTL_HOME/activate\"\npopd\n\n# NCTL Build\nnctl-compile\ncachepot --show-stats\n"
  },
  {
    "path": "ci/nctl_upgrade.sh",
    "content": "#!/usr/bin/env bash\nset -e\nshopt -s expand_aliases\n\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nNCTL_HOME=\"$ROOT_DIR/../casper-nctl\"\nNCTL_CASPER_HOME=\"$ROOT_DIR\"\n\n# Activate Environment\npushd \"$ROOT_DIR\"\nsource \"$NCTL_HOME/activate\"\n\n# Call compile wrapper for client, launcher, and nctl-compile\nbash -c \"$ROOT_DIR/ci/nctl_compile.sh\"\n\nfunction main() {\n    local TEST_ID=${1}\n    local SKIP_SETUP=${2}\n    if [ \"$SKIP_SETUP\" != \"true\" ]; then\n\n        # NCTL Build\n        pushd \"$ROOT_DIR\"\n        nctl-compile\n\n        # Clear Old Stages\n        log \"removing old remotes and stages\"\n        nctl-stage-teardown\n        rm -rf $(get_path_to_stages)\n        rm -rf $(get_path_to_remotes)\n\n        # Stage\n        get_remotes\n        stage_remotes\n        build_from_settings_file\n        source \"$NCTL/sh/staging/set_override_tomls.sh\" upgrade_test='true'\n    fi\n\n    if [ -z \"$TEST_ID\" ]; then\n        log \"tooling needs to be updated to deal with AddressableEntity after 2.0 upgrade\"\n        log \"disabling for now\"\n        # PR CI tests\n        start_upgrade_scenario_1\n#        start_upgrade_scenario_3\n#        start_upgrade_scenario_11\n    else\n        start_upgrade_scenario_\"$TEST_ID\"\n    fi\n}\n\n# Pulls down remotely staged file\n# from s3 bucket to NCTL remotes directory.\nfunction get_remotes() {\n    local CI_JSON_CONFIG_FILE\n    local PROTO_1\n\n    CI_JSON_CONFIG_FILE=\"$NCTL_CASPER_HOME/ci/ci.json\"\n    PROTO_1=$(jq -r '.nctl_upgrade_tests.\"protocol_1\"' \"$CI_JSON_CONFIG_FILE\")\n    nctl-stage-set-remotes \"$PROTO_1\"\n}\n\n# Sets up settings.sh for CI test.\n# If local arg is passed it will skip this step\n# and use whats currently in settings.sh\n#   arg: local is for debug testing only\nfunction stage_remotes() {\n    local PATH_TO_STAGE\n\n    PATH_TO_STAGE=\"$(get_path_to_stage 1)\"\n    dev_branch_settings \"$PATH_TO_STAGE\"\n}\n\n# Generates stage-1 directory for test execution\n# Just here for a log message\nfunction build_from_settings_file() {\n    log \"... setting build from settings.sh file\"\n    nctl-stage-build-from-settings\n}\n\n# Produces settings.sh needed for CI testing.\n# It will always setup latest RC -> minor incremented by 1.\n# i.e: if current RC is 1.2 then dev will be setup as 1.3\nfunction dev_branch_settings() {\n    local PATH_TO_STAGE=${1}\n    local STARTING_VERSION=${2}\n    local INCREMENT\n    local RC_VERSION\n\n    pushd \"$(get_path_to_remotes)\"\n    RC_VERSION=\"$(ls --group-directories-first -d */ | sort -r | head -n 1 | tr -d '/')\"\n\n    [[ \"$RC_VERSION\" =~ (.*[^0-9])([0-9])(.)([0-9]+) ]] && INCREMENT=\"2.0${BASH_REMATCH[3]}${BASH_REMATCH[4]}\"\n\n    RC_VERSION=$(echo \"$RC_VERSION\" | sed 's/\\./\\_/g')\n    INCREMENT=$(echo \"$INCREMENT\" | sed 's/\\./\\_/g')\n\n    # check if a version to start at was given\n    if [ ! -z $STARTING_VERSION ]; then\n        # overwrite start version\n        RC_VERSION=$(echo \"$STARTING_VERSION\" | sed 's/\\./\\_/g')\n    fi\n\n    mkdir -p \"$(get_path_to_stage '1')\"\n\n    cat <<EOF > \"$(get_path_to_stage_settings 1)\"\nexport NCTL_STAGE_SHORT_NAME=\"YOUR-SHORT-NAME\"\n\nexport NCTL_STAGE_DESCRIPTION=\"YOUR-DESCRIPTION\"\n\nexport NCTL_STAGE_TARGETS=(\n    \"${RC_VERSION}:remote\"\n    \"${INCREMENT}:local\"\n)\nEOF\n    cat \"$(get_path_to_stage_settings 1)\"\n    popd\n}\n\n# Kicks off the scenario\n# Just here for a log message\nfunction start_upgrade_scenario_1() {\n    log \"... Starting Upgrade Scenario 1\"\n    nctl-exec-upgrade-scenario-1\n}\n\nfunction start_upgrade_scenario_3() {\n    log \"... Starting Upgrade Scenario 3\"\n    nctl-exec-upgrade-scenario-3\n}\n\nfunction start_upgrade_scenario_4() {\n    log \"... Starting Upgrade Scenario 4\"\n    nctl-exec-upgrade-scenario-4\n}\n\nfunction start_upgrade_scenario_5() {\n    log \"... Starting Upgrade Scenario 5\"\n    nctl-exec-upgrade-scenario-5\n}\n\nfunction start_upgrade_scenario_6() {\n    log \"... Starting Upgrade Scenario 6\"\n    nctl-exec-upgrade-scenario-6\n}\n\nfunction start_upgrade_scenario_7() {\n    log \"... Starting Upgrade Scenario 7\"\n    nctl-exec-upgrade-scenario-7\n}\n\nfunction start_upgrade_scenario_8() {\n    log \"... Starting Upgrade Scenario 8\"\n    nctl-exec-upgrade-scenario-8\n}\n\nfunction start_upgrade_scenario_9() {\n    log \"... Starting Upgrade Scenario 9\"\n    nctl-exec-upgrade-scenario-9\n}\n\nfunction start_upgrade_scenario_10() {\n    log \"... Setting up custom starting version\"\n    local PATH_TO_STAGE\n\n    PATH_TO_STAGE=\"$(get_path_to_stage 1)\"\n\n    log \"... downloading remote for 1.4.5\"\n    nctl-stage-set-remotes \"1.4.5\"\n\n    log \"... tearing down old stages\"\n    nctl-stage-teardown\n\n    log \"... creating new stage\"\n    dev_branch_settings \"$PATH_TO_STAGE\" \"1.4.5\"\n    build_from_settings_file\n\n    log \"... Starting Upgrade Scenario 10\"\n    nctl-exec-upgrade-scenario-10\n}\n\nfunction start_upgrade_scenario_11() {\n    log \"... Starting Upgrade Scenario 11\"\n    nctl-exec-upgrade-scenario-11\n}\n\nfunction start_upgrade_scenario_12() {\n    log \"... Setting up custom starting version\"\n    local PATH_TO_STAGE\n\n    PATH_TO_STAGE=\"$(get_path_to_stage 1)\"\n\n    log \"... downloading remote for 1.3.0\"\n    nctl-stage-set-remotes \"1.3.0\"\n\n    log \"... tearing down old stages\"\n    nctl-stage-teardown\n\n    log \"... creating new stage\"\n    dev_branch_settings \"$PATH_TO_STAGE\" \"1.3.0\"\n    build_from_settings_file\n\n    log \"... Starting Upgrade Scenario 12\"\n    nctl-exec-upgrade-scenario-12\n}\n\nfunction start_upgrade_scenario_13() {\n    log \"... Setting up custom starting version\"\n    local PATH_TO_STAGE\n\n    PATH_TO_STAGE=\"$(get_path_to_stage 1)\"\n\n    log \"... downloading remote for 1.4.13\"\n    nctl-stage-set-remotes \"1.4.13\"\n\n    log \"... tearing down old stages\"\n    nctl-stage-teardown\n\n    log \"... creating new stage\"\n    dev_branch_settings \"$PATH_TO_STAGE\" \"1.4.13\"\n    build_from_settings_file\n\n    log \"... Starting Upgrade Scenario 13\"\n    nctl-exec-upgrade-scenario-13\n}\n\nfunction start_upgrade_scenario_14() {\n    log \"... Setting up custom starting version\"\n    local PATH_TO_STAGE\n\n    PATH_TO_STAGE=\"$(get_path_to_stage 1)\"\n\n    log \"... downloading remote for 1.4.13\"\n    nctl-stage-set-remotes \"1.4.13\"\n\n    log \"... tearing down old stages\"\n    nctl-stage-teardown\n\n    log \"... creating new stage\"\n    dev_branch_settings \"$PATH_TO_STAGE\" \"1.4.13\"\n    build_from_settings_file\n\n    log \"... Starting Upgrade Scenario 14\"\n    nctl-exec-upgrade-scenario-14\n}\n\n# ----------------------------------------------------------------\n# ENTRY POINT\n# ----------------------------------------------------------------\n\nunset TEST_ID\nunset SKIP_SETUP\n\nfor ARGUMENT in \"$@\"; do\n    KEY=$(echo \"$ARGUMENT\" | cut -f1 -d=)\n    VALUE=$(echo \"$ARGUMENT\" | cut -f2 -d=)\n    case \"$KEY\" in\n        test_id) TEST_ID=${VALUE} ;;\n        skip_setup) SKIP_SETUP=${VALUE} ;;\n        *) ;;\n    esac\ndone\n\nmain \"$TEST_ID\" \"$SKIP_SETUP\"\n"
  },
  {
    "path": "ci/nctl_upgrade_stage.sh",
    "content": "#!/usr/bin/env bash\n\n# Script used to group everything needed for nctl upgrade remotes.\n\nset -e\nshopt -s expand_aliases\n\ntrap clean_up EXIT\n\nfunction clean_up() {\n    local EXIT_CODE=$?\n\n    if [ \"$EXIT_CODE\" = '0' ] && [ ! -z ${DRONE} ]; then\n        # Running in CI so don't cleanup stage dir\n        echo \"Script completed successfully!\"\n        return\n    fi\n\n    if [ -d \"$TEMP_STAGE_DIR\" ]; then\n        echo \"Script exited $EXIT_CODE\"\n        echo \"... Removing stage dir: $TEMP_STAGE_DIR\"\n        rm -rf \"$TEMP_STAGE_DIR\"\n        exit \"$EXIT_CODE\"\n    fi\n}\n\n# DIRECTORIES\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nBIN_BUILD_DIR=\"$ROOT_DIR/target/release\"\nWASM_BUILD_DIR=\"$ROOT_DIR/target/wasm32-unknown-unknown/release\"\nCONFIG_DIR=\"$ROOT_DIR/resources/local\"\nTEMP_STAGE_DIR='/tmp/nctl_upgrade_stage'\n\n# FILES\nBIN_ARRAY=(casper-node)\n\nWASM_ARRAY=(add_bid.wasm \\\n            delegate.wasm \\\n            transfer_to_account_u512.wasm \\\n            undelegate.wasm \\\n            withdraw_bid.wasm)\n\nCONFIG_ARRAY=(chainspec.toml.in config.toml accounts.toml)\n\n# Create temporary staging directory\nif [ ! -d \"$TEMP_STAGE_DIR\" ]; then\n    mkdir -p '/tmp/nctl_upgrade_stage'\nfi\n\n# Ensure files are built\ncd \"$ROOT_DIR\"\ncargo build --release --package casper-node\nmake build-contract-rs/activate-bid\nmake build-contract-rs/add-bid\nmake build-contract-rs/delegate\nmake build-contract-rs/named-purse-payment\nmake build-contract-rs/transfer-to-account-u512\nmake build-contract-rs/undelegate\nmake build-contract-rs/withdraw-bid\n\n# Copy binaries to staging dir\nfor i in \"${BIN_ARRAY[@]}\"; do\n    if [ -f \"$BIN_BUILD_DIR/$i\" ]; then\n        echo \"Copying $BIN_BUILD_DIR/$i to $TEMP_STAGE_DIR\"\n        cp \"$BIN_BUILD_DIR/$i\" \"$TEMP_STAGE_DIR\"\n    else\n        echo \"ERROR: $BIN_BUILD_DIR/$i not found!\"\n        exit 1\n    fi\n    echo \"\"\ndone\n\n# Copy wasm to staging dir\nfor i in \"${WASM_ARRAY[@]}\"; do\n    if [ -f \"$WASM_BUILD_DIR/$i\" ]; then\n        echo \"Copying $WASM_BUILD_DIR/$i to $TEMP_STAGE_DIR\"\n        cp \"$WASM_BUILD_DIR/$i\" \"$TEMP_STAGE_DIR\"\n    else\n        echo \"ERROR: $WASM_BUILD_DIR/$i not found!\"\n        exit 2\n    fi\n    echo \"\"\ndone\n\n# Copy configs to staging dir\nfor i in \"${CONFIG_ARRAY[@]}\"; do\n    if [ -f \"$CONFIG_DIR/$i\" ]; then\n        echo \"Copying $CONFIG_DIR/$i to $TEMP_STAGE_DIR\"\n        cp \"$CONFIG_DIR/$i\" \"$TEMP_STAGE_DIR\"\n    else\n        echo \"ERROR: $CONFIG_DIR/$i not found!\"\n        exit 3\n    fi\n    echo \"\"\ndone\n"
  },
  {
    "path": "ci/nightly-test.sh",
    "content": "#!/usr/bin/env bash\nset -e\nshopt -s expand_aliases\n\nDRONE_ROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nNCTL_HOME=\"$DRONE_ROOT_DIR/../casper-nctl\"\nNCTL_CASPER_HOME=\"$DRONE_ROOT_DIR\"\n\nSCENARIOS_DIR=\"$NCTL_HOME/sh/scenarios\"\nSCENARIOS_CHAINSPEC_DIR=\"$SCENARIOS_DIR/chainspecs\"\nSCENARIOS_ACCOUNTS_DIR=\"$SCENARIOS_DIR/accounts_toml\"\nSCENARIOS_CONFIGS_DIR=\"$SCENARIOS_DIR/configs\"\n\nNCTL_CLIENT_BRANCH=\"${DRONE_BRANCH:='dev'}\"\n\n# Activate Environment\npushd \"$DRONE_ROOT_DIR\"\nsource \"$NCTL_HOME/activate\"\n\n# Call compile wrapper for client, launcher, and nctl-compile\nbash -c \"$DRONE_ROOT_DIR/ci/nctl_compile.sh\"\n\nfunction start_run_teardown() {\n    local RUN_CMD=$1\n    local TEST_NAME\n    local STAGE_TOML_DIR\n    local SETUP_ARGS\n    local CONFIG_TOML\n    local CHAINSPEC_TOML\n    local ACCOUNTS_TOML\n\n    # Capture test prefix for custom file checks\n    TEST_NAME=\"$(echo $RUN_CMD | awk -F'.sh' '{ print $1 }')\"\n    STAGE_TOML_DIR=\"$NCTL/overrides\"\n    CONFIG_TOML=\"$STAGE_TOML_DIR/$TEST_NAME.config.toml\"\n    CHAINSPEC_TOML=\"$STAGE_TOML_DIR/$TEST_NAME.chainspec.toml.in\"\n    ACCOUNTS_TOML=\"$STAGE_TOML_DIR/$TEST_NAME.accounts.toml\"\n\n    # Really-really make sure nothing is leftover\n    nctl-assets-teardown\n\n    # Overrides chainspec.toml\n    if [ -f \"$CHAINSPEC_TOML\" ]; then\n        SETUP_ARGS+=(\"chainspec_path=$CHAINSPEC_TOML\")\n    fi\n\n    # Overrides accounts.toml\n    if [ -f \"$ACCOUNTS_TOML\" ]; then\n        SETUP_ARGS+=(\"accounts_path=$ACCOUNTS_TOML\")\n    fi\n\n    # Overrides config.toml\n    if [ -f \"$CONFIG_TOML\" ]; then\n        SETUP_ARGS+=(\"config_path=$CONFIG_TOML\")\n    fi\n\n    # Setup nctl files for test\n    echo \"Setting up network: nctl-assets-setup ${SETUP_ARGS[@]}\"\n    nctl-assets-setup \"${SETUP_ARGS[@]}\"\n    sleep 1\n\n    # Start nctl network\n    nctl-start\n    echo \"Sleeping 10s to allow network startup\"\n    sleep 10\n\n    # Run passed in test\n    pushd \"$SCENARIOS_DIR\"\n    echo \"Starting scenario: $RUN_CMD\"\n    # Don't qoute the cmd\n    source $RUN_CMD\n\n    # Cleanup after test completion\n    popd\n    nctl-assets-teardown\n    sleep 1\n}\n\nfunction run_test_and_count {\n    CASPER_NCTL_NIGHTLY_TEST_COUNT=$((CASPER_NCTL_NIGHTLY_TEST_COUNT+1))\n    eval $1\n}\n\nfunction run_nightly_upgrade_test() {\n    # setup only needed the first time\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=4\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=5 skip_setup=true\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=6 skip_setup=true\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=7 skip_setup=true\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=8 skip_setup=true\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=9 skip_setup=true\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=10\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=11\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=12\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=13\"'\n    run_test_and_count 'bash -c \"./ci/nctl_upgrade.sh test_id=14\"'\n}\n\nfunction run_soundness_test() {\n    echo \"Starting network soundness test\"\n\n    # Really-really make sure nothing is leftover\n    nctl-assets-teardown\n\n    $NCTL/sh/scenarios/network_soundness.py\n\n    # Clean up after the test\n    nctl-assets-teardown\n}\n\nCASPER_NCTL_NIGHTLY_TEST_COUNT=0\n\nsource \"$NCTL/sh/staging/set_override_tomls.sh\"\nrun_test_and_count 'start_run_teardown \"client.sh\"'\nrun_test_and_count 'start_run_teardown \"itst01.sh\"'\nrun_test_and_count 'start_run_teardown \"itst01_private_chain.sh\"'\nrun_test_and_count 'start_run_teardown \"itst02.sh\"'\nrun_test_and_count 'start_run_teardown \"itst02_private_chain.sh\"'\nrun_test_and_count 'start_run_teardown \"itst11.sh\"'\nrun_test_and_count 'start_run_teardown \"itst11_private_chain.sh\"'\nrun_test_and_count 'start_run_teardown \"itst13.sh\"'\nrun_test_and_count 'start_run_teardown \"itst14.sh\"'\nrun_test_and_count 'start_run_teardown \"itst14_private_chain.sh\"'\nrun_test_and_count 'start_run_teardown \"bond_its.sh\"'\nrun_test_and_count 'start_run_teardown \"emergency_upgrade_test.sh\"'\nrun_test_and_count 'start_run_teardown \"emergency_upgrade_test_balances.sh\"'\nrun_test_and_count 'start_run_teardown \"upgrade_after_emergency_upgrade_test.sh\"'\nrun_test_and_count 'start_run_teardown \"sync_test.sh timeout=500\"'\nrun_test_and_count 'start_run_teardown \"swap_validator_set.sh\"'\nrun_test_and_count 'start_run_teardown \"sync_upgrade_test.sh node=6 era=5 timeout=500\"'\nrun_test_and_count 'start_run_teardown \"validators_disconnect.sh\"'\nrun_test_and_count 'start_run_teardown \"event_stream.sh\"'\nrun_test_and_count 'start_run_teardown \"regression_4771.sh\"'\n# Without start_run_teardown - these ones perform their own assets setup, network start and teardown\nrun_test_and_count 'source \"$SCENARIOS_DIR/upgrade_after_emergency_upgrade_test_pre_1.5.sh\"'\nrun_test_and_count 'source \"$SCENARIOS_DIR/regression_3976.sh\"'\n\nrun_nightly_upgrade_test\n\nrun_test_and_count 'run_soundness_test'\n\n# Run these last as they occasionally fail (see https://github.com/casper-network/casper-node/issues/2973)\nrun_test_and_count 'start_run_teardown \"itst06.sh\"'\nrun_test_and_count 'start_run_teardown \"itst06_private_chain.sh\"'\nrun_test_and_count 'start_run_teardown \"itst07.sh\"'\nrun_test_and_count 'start_run_teardown \"itst07_private_chain.sh\"'\n\necho \"All tests passed. Test count: $CASPER_NCTL_NIGHTLY_TEST_COUNT\""
  },
  {
    "path": "ci/publish_deb_to_repo.sh",
    "content": "#!/usr/bin/env bash\nset -e\n\n# Verify all variables are present\nif [[ -z $PLUGIN_GPG_KEY || -z $PLUGIN_GPG_PASS || -z $PLUGIN_REGION \\\n        || -z $PLUGIN_REPO_NAME || -z $PLUGIN_ACL || -z $PLUGIN_PREFIX \\\n        || -z $PLUGIN_DEB_PATH || -z $PLUGIN_OS_CODENAME ]]; then\n    echo \"ERROR: Environment Variable Missing!\"\n    exit 1\nfi\n\n# Verify if its the first time publishing. Will need to know later.\n# Probably an easier way to do this check :)\nEXISTS_RET=$(aws s3 ls s3://\"$PLUGIN_REPO_NAME\"/releases/dists/ --region \"$PLUGIN_REGION\" | grep \"$PLUGIN_OS_CODENAME\") || EXISTS_RET=\"false\"\n\n# Sanity Check for later\nif [ \"$EXISTS_RET\" = \"false\" ]; then\n    echo \"First time uploading repo!\"\nelse\n    echo \"Repo Exists! Defaulting to publish update...\"\nfi\n\n### APTLY SECTION\n\n# Move old config file to use in jq query\nmv ~/.aptly.conf ~/.aptly.conf.orig\n\n# Inject ENV Variables and save as .aptly.conf\njq --arg region \"$PLUGIN_REGION\" --arg bucket \"$PLUGIN_REPO_NAME\" --arg acl \"$PLUGIN_ACL\" --arg prefix \"$PLUGIN_PREFIX\"   '.S3PublishEndpoints[$bucket] = {\"region\":$region, \"bucket\":$bucket, \"acl\": $acl, \"prefix\": $prefix}' ~/.aptly.conf.orig > ~/.aptly.conf\n\n# If aptly repo DOESNT exist locally already\nif [ ! \"$(aptly repo list | grep $PLUGIN_OS_CODENAME)\" ]; then\n    aptly repo create -distribution=\"$PLUGIN_OS_CODENAME\" -component=main \"release-$PLUGIN_OS_CODENAME\"\nfi\n\n# If aptly mirror DOESNT exist locally already\nif [ ! \"$(aptly mirror list | grep $PLUGIN_OS_CODENAME)\" ] && [ ! \"$EXISTS_RET\" = \"false\" ] ; then\n    aptly mirror create -ignore-signatures \"local-repo-$PLUGIN_OS_CODENAME\" https://\"${PLUGIN_REPO_NAME}\"/\"${PLUGIN_PREFIX}\"/ \"${PLUGIN_OS_CODENAME}\" main\nfi\n\n# When it's not the first time uploading.\nif [ ! \"$EXISTS_RET\" = \"false\" ]; then\n    aptly mirror update -ignore-signatures \"local-repo-$PLUGIN_OS_CODENAME\"\n    # Found an article that said using 'Name' will select all packages for us\n    aptly repo import \"local-repo-$PLUGIN_OS_CODENAME\" \"release-$PLUGIN_OS_CODENAME\" Name\nfi\n\n# Add .debs to the local repo\naptly repo add -force-replace \"release-$PLUGIN_OS_CODENAME\" \"$PLUGIN_DEB_PATH\"/*.deb\n\n# Publish to S3\nif [ ! \"$(aptly publish list | grep $PLUGIN_REPO_NAME | grep $PLUGIN_OS_CODENAME)\" ]; then\n    # If the repo is new\n    aptly publish repo -batch -force-overwrite -passphrase=\"$PLUGIN_GPG_PASS\" \"release-$PLUGIN_OS_CODENAME\" s3:\"${PLUGIN_REPO_NAME}\":\nelse\n    # If the repo exists\n    aptly publish update -batch -force-overwrite -passphrase=\"$PLUGIN_GPG_PASS\" \"$PLUGIN_OS_CODENAME\" s3:\"${PLUGIN_REPO_NAME}\":\nfi\n"
  },
  {
    "path": "ci/publish_to_crates_io.sh",
    "content": "#!/usr/bin/env bash\n\nset -eu -o pipefail\n\nCRATES_URL=https://crates.io/api/v1/crates\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\n\nrun_curl() {\n    set +e\n    CURL_OUTPUT=$(curl -s $1)\n    set -e\n    local EXIT_CODE=$?\n    if [[ $EXIT_CODE -ne 0 ]]; then\n        printf \"curl -s %s failed with exit code %d\\n\\n\" $1 $EXIT_CODE\n        exit 1\n    fi\n}\n\ncheck_python_has_toml() {\n    set +e\n    python3 -c \"import toml\" 2>/dev/null\n    if [[ $? -ne 0 ]]; then\n        printf \"Ensure you have 'toml' installed for Python3\\n\"\n        printf \"e.g. run\\n\"\n        printf \"    pip3 install toml --user\\n\\n\"\n        exit 3\n    fi\n    set -e\n}\n\nlocal_version() {\n    local CRATE_DIR=\"$1\"\n    printf \"Local version:         \"\n    LOCAL_VERSION=$(cat \"$ROOT_DIR/$CRATE_DIR/Cargo.toml\" | python3 -c \"import sys, toml; print(toml.load(sys.stdin)['package']['version'])\")\n    printf \"%s\\n\" $LOCAL_VERSION\n}\n\nmax_version_in_crates_io() {\n    local CRATE=$1\n    printf \"Max published version: \"\n    run_curl $CRATES_URL/$CRATE\n    if [[ \"$CURL_OUTPUT\" == \"{\\\"errors\\\":[{\\\"detail\\\":\\\"Not Found\\\"}]}\" ]]; then\n        CRATES_IO_VERSION=\"N/A (not found in crates.io)\"\n    else\n        CRATES_IO_VERSION=$(echo \"$CURL_OUTPUT\" | python3 -c \"import sys, json; print(json.load(sys.stdin)['crate']['max_version'])\")\n    fi\n    printf \"%s\\n\" \"$CRATES_IO_VERSION\"\n}\n\npublish() {\n    local CRATE_DIR=\"$1\"\n    local CRATE_NAME=$(cat $ROOT_DIR/$CRATE_DIR/Cargo.toml | python3 -c \"import sys, toml; print(toml.load(sys.stdin)['package']['name'])\")\n    printf \"%s\\n\" $CRATE_NAME\n\n    max_version_in_crates_io $CRATE_NAME\n\n    local_version \"$CRATE_DIR\"\n\n    if [[ \"$LOCAL_VERSION\" == \"$CRATES_IO_VERSION\" ]]; then\n        printf \"Skipping\\n\"\n    else\n        printf \"Publishing...\\n\"\n        pushd $ROOT_DIR/$CRATE_DIR >/dev/null\n        set +u\n        cargo publish \"${@:2}\" --token ${CARGO_TOKEN}\n        set -u\n        popd >/dev/null\n        printf \"Published version %s\\n\" $LOCAL_VERSION\n        printf \"Sleeping for 60 seconds...\\n\"\n        sleep 60\n    fi\n    printf \"================================================================================\\n\\n\"\n}\n\ncheck_python_has_toml\n\n# These are the subdirs of casper-node which contain packages for publishing.  They should remain ordered from\n# least-dependent to most.\n#\n# Header format for dependencies:\n# <directory> (<crate name>)\n#     <dependent crate name>\n\n# types (casper-types) -> None\npublish types\n\n# storage (casper-storage)\n#     casper-types\npublish storage\n\n# binary_port (casper-binary-port)\n#     casper-types\npublish binary_port\n\n# execution-engine (casper-execution-engine)\n#     casper-storage\n#     casper-types\npublish execution_engine\n\n# execution_engine_testing/test_support (casper-engine-test-support)\n#     casper-storage\n#     casper-types\n#     casper-execution-engine\npublish execution_engine_testing/test_support\n\n# smart_contracts/contract (casper-contract)\n#     casper-types\npublish smart_contracts/contract\n\n# smart_contracts/sdk_sys (casper-contract-sdk_sys) -> None\npublish smart_contracts/sdk_sys\n\n# executor/wasm_common (casper-executor-wasm_common)\n#     casper-contract-sdk_sys\npublish executor/wasm_common\n\n# smart_contracts/macros (casper-contract-macros)\n#     casper-executor-wasm_common\n#     casper-contract-sdk_sys\npublish smart_contracts/macros\n\n# smart_contracts/sdk (casper-contract-sdk)\n#     casper-contract-sdk_sys\n#     casper-executor-wasm_common\n#     casper-contract-macros\npublish smart_contracts/sdk\n\n# smart_contracts/sdk_codegen (casper-contract-sdk_codegen)\n#     casper-contract-sdk\npublish smart_contracts/sdk_codegen\n\n# executor/wasm_interface (casper-executor-wasm_interface)\n#     casper-executor-wasm_common\n#     casper-storage\n#     casper-types\npublish executor/wasm_interface\n\n# executor/wasm_host (casper-executor-wasm_host)\n#     casper-executor-wasm_common\n#     casper-executor-wasm_interface\n#     casper-storage\n#     casper-types\npublish executor/wasm_host\n\n# executor/wasmer_backend (casper-executor-wasmer_backend)\n#     casper-executor-wasm_common\n#     casper-executor-wasm_interface\n#     casper-executor-wasm_host\n#     casper-storage\n#     casper-contract-sdk_sys\n#     casper-types\npublish executor/wasmer_backend\n\n# executor/wasm (casper-executor-wasm)\n#     casper-executor-wasm_common\n#     casper-executor-wasm_host\n#     casper-executor-wasm_interface\n#     casper-executor-wasmer_backend\n#     casper-storage\n#     casper-types\n#     casper-execution-engine\npublish executor/wasm\n\n# node (casper-node)\n#     casper-binary-port\n#     casper-storage\n#     casper-types\n#     casper-execution-engine\n#     casper-executor-wasm\n#     casper-executor-wasm_interface\npublish node\n"
  },
  {
    "path": "ci/test_casper-node_deb.sh",
    "content": "#!/usr/bin/env bash\nset -e\n\n# This script is used to test install of casper-node.\n# It will also detect issues with chainspec.toml and config-example.toml.\n# Often, new settings for casper-node get missed in resources/production directory\n# as most developers test using resources/local.  This will display any crashes from\n# configuration in CI, rather than requiring manual work with a release.\n\nDEB_NAME=\"casper-node\"\n\necho \"$1/$DEB_NAME*.deb\"\napt-get install -y \"$1\"/target/debian/\"$DEB_NAME\"*.deb\n\nif ! type \"$DEB_NAME\" > /dev/null; then\n  exit 1\nfi\n\ncp \"$1/resources/production/chainspec.toml\" /etc/casper/chainspec.toml\n\n# Replace timestamp with future time in chainspec.toml to not get start after genesis error\nFUTURE_TIME=$(date -d '+1 hour' --utc +%FT%TZ)\nsed -i \"/timestamp =/c\\timestamp = \\'$FUTURE_TIME\\'\" /etc/casper/chainspec.toml\n\nTEST_RUN_OUTPUT=\"$1/casper_node_run_output\"\n# This will fail for no keys, but will fail for config.toml or chainspec.toml first\ncasper-node validator /etc/casper/config.toml &> \"$TEST_RUN_OUTPUT\" || true\n\napt-get remove -y \"$DEB_NAME\"\n\nEXPECTED_TEXT=\"secret key load failed: could not read '/etc/casper/validator_keys/secret_key.pem'\"\nif grep < \"$TEST_RUN_OUTPUT\" -q \"$EXPECTED_TEXT\"; then\n    echo \"Found key failures as expected\"\nelse\n    echo \"#################################\"\n    echo \"Expected key failures, not found.\"\n    echo \"Assume this is configuration related for config-example.toml or chainspec.toml in resources/production.\"\n    echo \"Run log:\"\n    cat \"$TEST_RUN_OUTPUT\"\n    exit 1\nfi\n"
  },
  {
    "path": "ci/test_deb_install.sh",
    "content": "#!/usr/bin/env bash\nset -ex\n\necho \"$1\"/\"$2\"*.deb\napt-get install -y \"$1\"/target/debian/\"$2\"*.deb\n\nif ! type \"$2\" > /dev/null; then\n  exit 1\nfi\n\napt-get remove -y \"$2\"\n"
  },
  {
    "path": "ci/upgrade_package_s3_storage.sh",
    "content": "#!/usr/bin/env bash\n\nset -e\n\n# This script allows uploading, downloading and purging of files to genesis.casper.network s3 for storing\n# possible upgrade package releases to promote to a network or use for testing.\n\n# Using drone/GIT_HASH/PROTOCOL_VERSION as s3 bucket location in genesis.casper.network\n\n# Check python has toml for getting PROTOCOL_VERSION\nset +e\npython3 -c \"import toml\" 2>/dev/null\nif [[ $? -ne 0 ]]; then\n  echo \"Ensure you have 'toml' installed for Python3\"\n  echo \"e.g. run\"\n  echo \"    python3 -m pip install toml --user\"\n  echo \"\"\n  exit 3\nfi\nset -e\n\nROOT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" >/dev/null 2>&1 && pwd)\"\nGENESIS_FILES_DIR=\"$ROOT_DIR/resources/production\"\n\nPROTOCOL_VERSION=$(cat \"$GENESIS_FILES_DIR/chainspec.toml\" | python3 -c \"import sys, toml; print(toml.load(sys.stdin)['protocol']['version'].replace('.','_'))\")\necho \"Protocol version: $PROTOCOL_VERSION\"\nGIT_HASH=$(git rev-parse HEAD)\necho \"Git hash: $GIT_HASH\"\n\nvalid_commands=(\"put\" \"get\" \"del\")\nACTION=$1\nif [[ \" ${valid_commands[*]} \" != *\" $ACTION \"* ]]; then\n  echo \"Invalid command passed: $ACTION\"\n  echo \"Possible commands are:\"\n  echo \" put <local source with ending />\"\n  echo \" get <local target>\"\n  echo \" del \"\n  exit 1\nfi\n\nif [[ \"$ACTION\" != \"del\" ]]; then\n  LOCAL=$2\n\n  if [ -z \"$LOCAL\" ]; then\n    echo \"Local path not provided\"\n    exit 1\n  fi\nfi\n\necho \"CL_VAULT_TOKEN: '${CL_VAULT_TOKEN}'\"\necho \"CL_VAULT_HOST: '${CL_VAULT_HOST}'\"\n# get aws credentials files\nCL_VAULT_URL=\"${CL_VAULT_HOST}/v1/sre/cicd/s3/aws_credentials\"\nCREDENTIALS=$(curl -s -q -H \"X-Vault-Token: $CL_VAULT_TOKEN\" -X GET \"$CL_VAULT_URL\")\n# get just the body required by s3cmd, strip off vault payload\nAWS_ACCESS_KEY_ID=$(echo \"$CREDENTIALS\" | jq -r .data.cicd_agent_to_s3.aws_access_key)\nexport AWS_ACCESS_KEY_ID\nAWS_SECRET_ACCESS_KEY=$(echo \"$CREDENTIALS\" | jq -r .data.cicd_agent_to_s3.aws_secret_key)\nexport AWS_SECRET_ACCESS_KEY\n\nCL_S3_BUCKET=\"genesis.casper.network\"\nCL_S3_LOCATION=\"drone/$GIT_HASH\"\n\ncase \"$ACTION\" in\n\"put\")\n  echo \"sync ${LOCAL} s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${PROTOCOL_VERSION}/\"\n  s3cmd sync \"${LOCAL}\" \"s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${PROTOCOL_VERSION}/\"\n  ;;\n\"get\")\n  echo \"sync s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${PROTOCOL_VERSION}/ ${LOCAL}\"\n  s3cmd sync \"s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${PROTOCOL_VERSION}/\" \"${LOCAL}\"\n  ;;\n\"del\")\n  echo \"del --recursive s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}\"\n  s3cmd del --recursive \"s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}\"\n  ;;\nesac\n"
  },
  {
    "path": "docker_make.sh",
    "content": "\n# Images used in this script are build in Casper/buildenv repo\n\n# This allows make commands without local build environment setup or\n# using an OS version other than locally installed.\n\nset -e\n\npackage=\"docker_make.sh\"\nvalid_docker_images=(\"node-build-u1804\" \"node-build-u2004\")\ndefault_image=${valid_docker_images[0]}\nfunction help {\n  echo \"$package - issue make commands in docker environment\"\n  echo \" \"\n  echo \"$package [options] command\"\n  echo \" \"\n  echo \"options:\"\n  echo \"-h, --help             show brief help\"\n  echo \"--image=DOCKER_IMAGE   specify docker image to use: ${valid_docker_images[*]}\"\n  echo \"                       Image will default to ${default_image} if not given\"\n  echo\n  echo \"Ex: '$package all' will execute 'make all' on ${default_image}.\"\n  echo\n  echo \"Note: The results will be in your ./target or ./target-as directory and\"\n  echo \"      might not be compatible with your local system\"\n  exit 0\n}\n\nwhile test $# -gt 0; do\n  case \"$1\" in\n    -h|--help)\n      help\n      ;;\n    --image*)\n      image=`echo $1 | sed -e 's/^[^=]*=//g'`\n      if [[ \" ${valid_docker_images[*]} \" == *\" $image \"* ]]; then\n        docker_image=$image\n      else\n        echo \"Invalid docker image passed in: $image\"\n        echo \"Possible images are: ${valid_docker_images[*]}.\"\n      fi\n      shift\n      ;;\n    *)\n      break\n      ;;\n  esac\ndone\n\nif [ -z \"$1\" ]; then\n  echo \"make command not given.\"\n#  echo \"Using 'list' to show targets.\"\n#  make_command=\"list\"\n  exit 1\nelse\n  make_command=\"$1\"\nfi\n\nif [ -z \"$docker_image\" ]; then\n  echo \"Defaulting build image to ${default_image}.\"\n  docker_image=${default_image}\nfi\n\ndocker pull casperlabs/${docker_image}:latest\n\n# Getting user and group to chown/chgrp target folder from root at end.\n# Cannot use the --user trick as cached .cargo in image is owned by root.\ncommand=\"cd /casper-node; make ${make_command}; chown -R -f $(id -u):$(id -g) ./target ./target_as ./execution_engine_testing/casper_casper;\"\ndocker run --rm --volume $(pwd):/casper-node casperlabs/${docker_image}:latest /bin/bash -c \"${command}\""
  },
  {
    "path": "execution_engine/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.  The format is based on [Keep a Changelog].\n\n[comment]: <> (Added:      new features)\n[comment]: <> (Changed:    changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed:    now removed features)\n[comment]: <> (Fixed:      any bug fixes)\n[comment]: <> (Security:   in case of vulnerabilities)\n\n## 9.0.0\n\n### Added\n\n* Added `RewardsHandling` support to the execution engine\n* Added `RewardsHandling` field to the struct `EngineConfig`\n\n## 8.0.0\n\n### Added\n\n- Add support for a factory pattern on the host side.\n- struct casper_execution_engine::engine_state::engine_config::EngineConfig\n- struct casper_execution_engine::engine_state::engine_config::EngineConfigBuilder\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_ALLOW_AUCTION_BIDS: bool\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS: bool\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_BALANCE_HOLD_INTERVAL: casper_types::timestamp::TimeDiff\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_COMPUTE_REWARDS: bool\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY: bool\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAXIMUM_DELEGATION_AMOUNT: u64\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_ASSOCIATED_KEYS: u32\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_DELEGATORS_PER_VALIDATOR: u32\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_QUERY_DEPTH: u64\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_STORED_VALUE_SIZE: u32\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT: u64\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION: casper_types::protocol_version::ProtocolVersion\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_STRICT_ARGUMENT_CHECKING: bool\n- const casper_execution_engine::engine_state::engine_config::DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS: u64\n- enum casper_execution_engine::engine_state::Error\n- enum casper_execution_engine::engine_state::ExecutableItem\n- enum casper_execution_engine::engine_state::InvalidRequest\n- enum casper_execution_engine::engine_state::SessionInputData<'a>\n- struct casper_execution_engine::engine_state::BlockInfo\n- struct casper_execution_engine::engine_state::EngineConfig\n- struct casper_execution_engine::engine_state::EngineConfigBuilder\n- struct casper_execution_engine::engine_state::ExecutionEngineV1\n- struct casper_execution_engine::engine_state::SessionDataDeploy<'a>\n- struct casper_execution_engine::engine_state::SessionDataV1<'a>\n- struct casper_execution_engine::engine_state::WasmV1Request\n- struct casper_execution_engine::engine_state::WasmV1Result\n- const casper_execution_engine::engine_state::DEFAULT_MAX_QUERY_DEPTH: u64\n- const casper_execution_engine::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32\n- const casper_execution_engine::engine_state::MAX_PAYMENT_AMOUNT: u64\n- const casper_execution_engine::engine_state::WASMLESS_TRANSFER_FIXED_GAS_PRICE: u8\n- static casper_execution_engine::engine_state::MAX_PAYMENT: once_cell::sync::Lazy<casper_types::uint::macro_code::U512>\n- enum casper_execution_engine::execution::ExecError\n- enum casper_execution_engine::resolvers::error::ResolverError\n- trait casper_execution_engine::resolvers::memory_resolver::MemoryResolver\n- const casper_execution_engine::runtime::cryptography::DIGEST_LENGTH: usize\n- fn casper_execution_engine::runtime::cryptography::blake2b<T: core::convert::AsRef<[u8]>>(data: T) -> [u8; 32]\n- fn casper_execution_engine::runtime::cryptography::blake3<T: core::convert::AsRef<[u8]>>(data: T) -> [u8; 32]\n- fn casper_execution_engine::runtime::cryptography::sha256<T: core::convert::AsRef<[u8]>>(data: T) -> [u8; 32]\n- struct casper_execution_engine::runtime::stack::RuntimeStack\n- struct casper_execution_engine::runtime::stack::RuntimeStackOverflow\n- type casper_execution_engine::runtime::stack::RuntimeStackFrame = casper_types::system::caller::Caller\n- enum casper_execution_engine::runtime::PreprocessingError\n- enum casper_execution_engine::runtime::WasmValidationError\n- struct casper_execution_engine::runtime::Runtime<'a, R>\n- struct casper_execution_engine::runtime::RuntimeStack\n- struct casper_execution_engine::runtime::RuntimeStackOverflow\n- const casper_execution_engine::runtime::DEFAULT_BR_TABLE_MAX_SIZE: u32\n- const casper_execution_engine::runtime::DEFAULT_MAX_GLOBALS: u32\n- const casper_execution_engine::runtime::DEFAULT_MAX_PARAMETER_COUNT: u32\n- const casper_execution_engine::runtime::DEFAULT_MAX_TABLE_SIZE: u32\n- fn casper_execution_engine::runtime::cycles_for_instruction(instruction: &casper_wasm::elements::ops::Instruction) -> u32\n- fn casper_execution_engine::runtime::preprocess(wasm_config: casper_types::chainspec::vm_config::wasm_config::WasmConfig, module_bytes: &[u8]) -> core::result::Result<casper_wasm::elements::module::Module, casper_execution_engine::runtime::PreprocessingError>\n- type casper_execution_engine::runtime::RuntimeStackFrame = casper_types::system::caller::Caller\n- enum casper_execution_engine::runtime_context::AllowInstallUpgrade\n- struct casper_execution_engine::runtime_context::RuntimeContext<'a, R>\n- const casper_execution_engine::runtime_context::RANDOM_BYTES_COUNT: usize\n\n### Removed\n\n- struct casper_execution_engine::config::Config\n- enum casper_execution_engine::core::engine_state::balance::BalanceResult\n- struct casper_execution_engine::core::engine_state::balance::BalanceRequest\n- struct casper_execution_engine::core::engine_state::chainspec_registry::ChainspecRegistry\n- struct casper_execution_engine::core::engine_state::checksum_registry::ChecksumRegistry\n- struct casper_execution_engine::core::engine_state::deploy_item::DeployItem\n- enum casper_execution_engine::core::engine_state::engine_config::FeeHandling\n- enum casper_execution_engine::core::engine_state::engine_config::RefundHandling\n- struct casper_execution_engine::core::engine_state::engine_config::EngineConfig\n- struct casper_execution_engine::core::engine_state::engine_config::EngineConfigBuilder\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_ALLOW_AUCTION_BIDS: bool\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS: bool\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_FEE_HANDLING\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_ASSOCIATED_KEYS\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_QUERY_DEPTH\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_STORED_VALUE_SIZE\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MINIMUM_BID_AMOUNT\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_REFUND_HANDLING\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_STRICT_ARGUMENT_CHECKING\n- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS\n- enum casper_execution_engine::core::engine_state::era_validators::GetEraValidatorsError\n- struct casper_execution_engine::core::engine_state::era_validators::GetEraValidatorsRequest\n- enum casper_execution_engine::core::engine_state::executable_deploy_item::ContractIdentifier\n- enum casper_execution_engine::core::engine_state::executable_deploy_item::ContractPackageIdentifier\n- enum casper_execution_engine::core::engine_state::executable_deploy_item::DeployKind\n- enum casper_execution_engine::core::engine_state::executable_deploy_item::ExecutableDeployItem\n- enum casper_execution_engine::core::engine_state::executable_deploy_item::ExecutionKind\n- struct casper_execution_engine::core::engine_state::executable_deploy_item::ExecutableDeployItemDiscriminantsIter\n- struct casper_execution_engine::core::engine_state::execute_request::ExecuteRequest\n- struct casper_execution_engine::core::engine_state::execution_effect::ExecutionEffect\n- enum casper_execution_engine::core::engine_state::execution_result::ExecutionResult\n- enum casper_execution_engine::core::engine_state::execution_result::ForcedTransferResult\n- struct casper_execution_engine::core::engine_state::execution_result::ExecutionResultBuilder\n- type casper_execution_engine::core::engine_state::execution_result::ExecutionResults = alloc::collections::vec_deque::VecDeque<casper_execution_engine::core::engine_state::execution_result::ExecutionResult>\n- enum casper_execution_engine::core::engine_state::genesis::GenesisAccount\n- enum casper_execution_engine::core::engine_state::genesis::GenesisError\n- struct casper_execution_engine::core::engine_state::genesis::AdministratorAccount\n- struct casper_execution_engine::core::engine_state::genesis::ExecConfig\n- struct casper_execution_engine::core::engine_state::genesis::ExecConfigBuilder\n- struct casper_execution_engine::core::engine_state::genesis::GenesisConfig\n- struct casper_execution_engine::core::engine_state::genesis::GenesisSuccess\n- struct casper_execution_engine::core::engine_state::genesis::GenesisValidator\n- const casper_execution_engine::core::engine_state::genesis::DEFAULT_AUCTION_DELAY: u64\n- const casper_execution_engine::core::engine_state::genesis::DEFAULT_GENESIS_TIMESTAMP_MILLIS: u64\n- const casper_execution_engine::core::engine_state::genesis::DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64\n- const casper_execution_engine::core::engine_state::genesis::DEFAULT_ROUND_SEIGNIORAGE_RATE: num_rational::Ratio<u64>\n- const casper_execution_engine::core::engine_state::genesis::DEFAULT_UNBONDING_DELAY: u64\n- const casper_execution_engine::core::engine_state::genesis::DEFAULT_VALIDATOR_SLOTS: u32\n- enum casper_execution_engine::core::engine_state::get_bids::GetBidsResult\n- struct casper_execution_engine::core::engine_state::get_bids::GetBidsRequest\n- enum casper_execution_engine::core::engine_state::op::Op\n- enum casper_execution_engine::core::engine_state::query::QueryResult\n- struct casper_execution_engine::core::engine_state::query::QueryRequest\n- struct casper_execution_engine::core::engine_state::run_genesis_request::RunGenesisRequest\n- enum casper_execution_engine::core::engine_state::step::StepError\n- struct casper_execution_engine::core::engine_state::step::EvictItem\n- struct casper_execution_engine::core::engine_state::step::RewardItem\n- struct casper_execution_engine::core::engine_state::step::SlashItem\n- struct casper_execution_engine::core::engine_state::step::StepRequest\n- struct casper_execution_engine::core::engine_state::step::StepSuccess\n- struct casper_execution_engine::core::engine_state::system_contract_registry::SystemContractRegistry\n- enum casper_execution_engine::core::engine_state::upgrade::ProtocolUpgradeError\n- struct casper_execution_engine::core::engine_state::upgrade::UpgradeConfig\n- struct casper_execution_engine::core::engine_state::upgrade::UpgradeSuccess\n- enum casper_execution_engine::core::engine_state::BalanceResult\n- enum casper_execution_engine::core::engine_state::Error\n- enum casper_execution_engine::core::engine_state::ExecError\n- enum casper_execution_engine::core::engine_state::ExecutableDeployItem\n- enum casper_execution_engine::core::engine_state::ExecutionResult\n- enum casper_execution_engine::core::engine_state::ForcedTransferResult\n- enum casper_execution_engine::core::engine_state::GenesisAccount\n- enum casper_execution_engine::core::engine_state::GetBidsResult\n- enum casper_execution_engine::core::engine_state::GetEraValidatorsError\n- enum casper_execution_engine::core::engine_state::PruneResult\n- enum casper_execution_engine::core::engine_state::QueryResult\n- enum casper_execution_engine::core::engine_state::StepError\n- enum casper_execution_engine::core::engine_state::TransferTargetMode\n- struct casper_execution_engine::core::engine_state::BalanceRequest\n- struct casper_execution_engine::core::engine_state::ChainspecRegistry\n- struct casper_execution_engine::core::engine_state::ChecksumRegistry\n- struct casper_execution_engine::core::engine_state::DeployItem\n- struct casper_execution_engine::core::engine_state::EngineConfig\n- struct casper_execution_engine::core::engine_state::EngineConfigBuilder\n- struct casper_execution_engine::core::engine_state::EngineState<S>\n- struct casper_execution_engine::core::engine_state::ExecConfig\n- struct casper_execution_engine::core::engine_state::ExecuteRequest\n- struct casper_execution_engine::core::engine_state::GenesisConfig\n- struct casper_execution_engine::core::engine_state::GenesisSuccess\n- struct casper_execution_engine::core::engine_state::GetBidsRequest\n- struct casper_execution_engine::core::engine_state::GetEraValidatorsRequest\n- struct casper_execution_engine::core::engine_state::PruneConfig\n- struct casper_execution_engine::core::engine_state::QueryRequest\n- struct casper_execution_engine::core::engine_state::RewardItem\n- struct casper_execution_engine::core::engine_state::RunGenesisRequest\n- struct casper_execution_engine::core::engine_state::SlashItem\n- struct casper_execution_engine::core::engine_state::StepRequest\n- struct casper_execution_engine::core::engine_state::StepSuccess\n- struct casper_execution_engine::core::engine_state::SystemContractRegistry\n- struct casper_execution_engine::core::engine_state::TransferArgs\n- struct casper_execution_engine::core::engine_state::TransferRuntimeArgsBuilder\n- struct casper_execution_engine::core::engine_state::UpgradeConfig\n- struct casper_execution_engine::core::engine_state::UpgradeSuccess\n- const casper_execution_engine::core::engine_state::DEFAULT_MAX_QUERY_DEPTH: u64\n- const casper_execution_engine::core::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32\n- const casper_execution_engine::core::engine_state::MAX_PAYMENT_AMOUNT: u64\n- const casper_execution_engine::core::engine_state::WASMLESS_TRANSFER_FIXED_GAS_PRICE: u64\n- static casper_execution_engine::core::engine_state::MAX_PAYMENT: once_cell::sync::Lazy<casper_types::uint::macro_code::U512>\n- enum casper_execution_engine::core::execution::Error\n- enum casper_execution_engine::core::resolvers::error::ResolverError\n- trait casper_execution_engine::core::resolvers::memory_resolver::MemoryResolver\n- struct casper_execution_engine::core::runtime::stack::RuntimeStack\n- struct casper_execution_engine::core::runtime::stack::RuntimeStackOverflow\n- type casper_execution_engine::core::runtime::stack::RuntimeStackFrame = casper_types::system::call_stack_element::CallStackElement\n- struct casper_execution_engine::core::runtime::Runtime<'a, R>\n- struct casper_execution_engine::core::runtime_context::RuntimeContext<'a, R>\n- const casper_execution_engine::core::runtime_context::RANDOM_BYTES_COUNT: usize\n- fn casper_execution_engine::core::runtime_context::validate_group_membership(contract_package: &casper_types::contracts::ContractPackage, access: &casper_types::contracts::EntryPointAccess, validator: impl core::ops::function::Fn(&casper_types::uref::URef) -> bool) -> core::result::Result<(), casper_execution_engine::core::engine_state::ExecError>\n- enum casper_execution_engine::core::tracking_copy::AddResult\n- enum casper_execution_engine::core::tracking_copy::TrackingCopyQueryResult\n- enum casper_execution_engine::core::tracking_copy::ValidationError\n- struct casper_execution_engine::core::tracking_copy::TrackingCopy<R>\n- struct casper_execution_engine::core::tracking_copy::TrackingCopyCache<M>\n- trait casper_execution_engine::core::tracking_copy::TrackingCopyExt<R>\n- fn casper_execution_engine::core::tracking_copy::validate_balance_proof(hash: &casper_hashing::Digest, balance_proof: &casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof<casper_types::key::Key, casper_types::stored_value::StoredValue>, expected_purse_key: casper_types::key::Key, expected_motes: &casper_types::uint::macro_code::U512) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError>\n- fn casper_execution_engine::core::tracking_copy::validate_query_proof(hash: &casper_hashing::Digest, proofs: &[casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof<casper_types::key::Key, casper_types::stored_value::StoredValue>], expected_first_key: &casper_types::key::Key, path: &[alloc::string::String], expected_value: &casper_types::stored_value::StoredValue) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError>\n- enum casper_execution_engine::core::ValidationError\n- const casper_execution_engine::core::ADDRESS_LENGTH: usize\n- fn casper_execution_engine::core::validate_balance_proof(hash: &casper_hashing::Digest, balance_proof: &casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof<casper_types::key::Key, casper_types::stored_value::StoredValue>, expected_purse_key: casper_types::key::Key, expected_motes: &casper_types::uint::macro_code::U512) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError>\n- fn casper_execution_engine::core::validate_query_proof(hash: &casper_hashing::Digest, proofs: &[casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof<casper_types::key::Key, casper_types::stored_value::StoredValue>], expected_first_key: &casper_types::key::Key, path: &[alloc::string::String], expected_value: &casper_types::stored_value::StoredValue) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError>\n- type casper_execution_engine::core::Address = [u8; 32]\n- struct casper_execution_engine::shared::additive_map::AdditiveMap<K, V, S>\n- struct casper_execution_engine::shared::execution_journal::ExecutionJournal\n- struct casper_execution_engine::shared::host_function_costs::HostFunction<T>\n- struct casper_execution_engine::shared::host_function_costs::HostFunctionCosts\n- type casper_execution_engine::shared::host_function_costs::Cost = u32\n- enum casper_execution_engine::shared::logging::Style\n- struct casper_execution_engine::shared::logging::Settings\n- fn casper_execution_engine::shared::logging::initialize(settings: casper_execution_engine::shared::logging::Settings) -> core::result::Result<(), log::SetLoggerError>\n- fn casper_execution_engine::shared::logging::log_details(\\_log_level: log::Level, \\_message_format: alloc::string::String, \\_properties: alloc::collections::btree::map::BTreeMap<&str, alloc::string::String>)\n- fn casper_execution_engine::shared::logging::log_host_function_metrics(\\_host_function: &str, \\_properties: alloc::collections::btree::map::BTreeMap<&str, alloc::string::String>)\n- struct casper_execution_engine::shared::newtypes::CorrelationId\n- struct casper_execution_engine::shared::opcode_costs::BrTableCost\n- struct casper_execution_engine::shared::opcode_costs::ControlFlowCosts\n- struct casper_execution_engine::shared::opcode_costs::OpcodeCosts\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_ADD_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_BIT_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONST_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BLOCK_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_IF_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_CALL_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_DROP_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_ELSE_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_END_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_IF_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_LOOP_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_RETURN_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_SELECT_OPCODE: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONVERSION_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_CURRENT_MEMORY_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_DIV_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_GLOBAL_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_GROW_MEMORY_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_INTEGER_COMPARISON_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_LOAD_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_LOCAL_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_MUL_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_NOP_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_STORE_COST: u32\n- const casper_execution_engine::shared::opcode_costs::DEFAULT_UNREACHABLE_COST: u32\n- struct casper_execution_engine::shared::storage_costs::StorageCosts\n- const casper_execution_engine::shared::storage_costs::DEFAULT_GAS_PER_BYTE_COST: u32\n- struct casper_execution_engine::shared::system_config::auction_costs::AuctionCosts\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_ACTIVATE_BID_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_ADD_BID_COST: u64\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_DELEGATE_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_DISTRIBUTE_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_GET_ERA_VALIDATORS_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_READ_ERA_ID_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_REDELEGATE_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_RUN_AUCTION_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_SLASH_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_UNDELEGATE_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_WITHDRAW_BID_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST: u32\n- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST: u32\n- struct casper_execution_engine::shared::system_config::handle_payment_costs::HandlePaymentCosts\n- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_FINALIZE_PAYMENT_COST: u32\n- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_GET_PAYMENT_PURSE_COST: u32\n- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_GET_REFUND_PURSE_COST: u32\n- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_SET_REFUND_PURSE_COST: u32\n- struct casper_execution_engine::shared::system_config::mint_costs::MintCosts\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_BALANCE_COST: u32\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_CREATE_COST: u32\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_MINT_COST: u32\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_MINT_INTO_EXISTING_PURSE_COST: u32\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_READ_BASE_ROUND_REWARD_COST: u32\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_REDUCE_TOTAL_SUPPLY_COST: u32\n- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_TRANSFER_COST: u32\n- struct casper_execution_engine::shared::system_config::standard_payment_costs::StandardPaymentCosts\n- struct casper_execution_engine::shared::system_config::SystemConfig\n- const casper_execution_engine::shared::system_config::DEFAULT_WASMLESS_TRANSFER_COST: u32\n- fn casper_execution_engine::shared::test_utils::mocked_account(account_hash: casper_types::account::account_hash::AccountHash) -> alloc::vec::Vec<(casper_types::key::Key, casper_types::stored_value::StoredValue)>\n- enum casper_execution_engine::shared::transform::Error\n- enum casper_execution_engine::shared::transform::Transform\n- static casper_execution_engine::shared::utils::OS_PAGE_SIZE: once_cell::sync::Lazy<usize>\n- fn casper_execution_engine::shared::utils::check_multiple_of_page_size(value: usize)\n- fn casper_execution_engine::shared::utils::jsonify<T>(value: T, pretty_print: bool) -> alloc::string::String where T: serde::ser::Serialize\n- struct casper_execution_engine::shared::wasm_config::WasmConfig\n- const casper_execution_engine::shared::wasm_config::DEFAULT_MAX_STACK_HEIGHT: u32\n- const casper_execution_engine::shared::wasm_config::DEFAULT_WASM_MAX_MEMORY: u32\n- enum casper_execution_engine::shared::wasm_prep::PreprocessingError\n- enum casper_execution_engine::shared::wasm_prep::WasmValidationError\n- const casper_execution_engine::shared::wasm_prep::DEFAULT_BR_TABLE_MAX_SIZE: u32\n- const casper_execution_engine::shared::wasm_prep::DEFAULT_MAX_GLOBALS: u32\n- const casper_execution_engine::shared::wasm_prep::DEFAULT_MAX_PARAMETER_COUNT: u32\n- const casper_execution_engine::shared::wasm_prep::DEFAULT_MAX_TABLE_SIZE: u32\n- fn casper_execution_engine::shared::wasm_prep::deserialize(module_bytes: &[u8]) -> core::result::Result<casper_wasm::elements::module::Module, casper_execution_engine::shared::wasm_prep::PreprocessingError>\n- fn casper_execution_engine::shared::wasm_prep::get_module_from_entry_points(entry_point_names: alloc::vec::Vec<&str>, module: casper_wasm::elements::module::Module) -> core::result::Result<alloc::vec::Vec<u8>, casper_execution_engine::core::engine_state::ExecError>\n- fn casper_execution_engine::shared::wasm_prep::preprocess(wasm_config: casper_execution_engine::shared::wasm_config::WasmConfig, module_bytes: &[u8]) -> core::result::Result<casper_wasm::elements::module::Module, casper_execution_engine::shared::wasm_prep::PreprocessingError>\n- enum casper_execution_engine::storage::error::in_memory::Error\n- enum casper_execution_engine::storage::error::lmdb::Error\n- enum casper_execution_engine::storage::error::Error\n- struct casper_execution_engine::storage::global_state::in_memory::InMemoryGlobalState\n- struct casper_execution_engine::storage::global_state::lmdb::LmdbGlobalState\n- struct casper_execution_engine::storage::global_state::scratch::ScratchGlobalState\n- enum casper_execution_engine::storage::global_state::CommitError\n- trait casper_execution_engine::storage::global_state::CommitProvider: casper_execution_engine::storage::global_state::StateProvider\n- trait casper_execution_engine::storage::global_state::StateProvider\n- trait casper_execution_engine::storage::global_state::StateReader<K, V>\n- fn casper_execution_engine::storage::global_state::commit<'a, R, S, H, E>(environment: &'a R, store: &S, correlation_id: casper_execution_engine::shared::newtypes::CorrelationId, prestate_hash: casper_hashing::Digest, effects: casper_execution_engine::shared::additive_map::AdditiveMap<casper_types::key::Key, casper_execution_engine::shared::transform::Transform, H>) -> core::result::Result<casper_hashing::Digest, E> where R: casper_execution_engine::storage::transaction_source::TransactionSource<'a, Handle = <S as casper_execution_engine::storage::store::Store>::Handle>, S: casper_execution_engine::storage::trie_store::TrieStore<casper_types::key::Key, casper_types::stored_value::StoredValue>, <S as casper_execution_engine::storage::store::Store>::Error: core::convert::From<<R as casper_execution_engine::storage::transaction_source::TransactionSource>::Error>, E: core::convert::From<<R as casper_execution_engine::storage::transaction_source::TransactionSource>::Error> + core::convert::From<<S as casper_execution_engine::storage::store::Store>::Error> + core::convert::From<casper_types::bytesrepr::Error> + core::convert::From<casper_execution_engine::storage::global_state::CommitError>, H: core::hash::BuildHasher\n- fn casper_execution_engine::storage::global_state::put_stored_values<'a, R, S, E>(environment: &'a R, store: &S, correlation_id: casper_execution_engine::shared::newtypes::CorrelationId, prestate_hash: casper_hashing::Digest, stored_values: std::collections::hash::map::HashMap<casper_types::key::Key, casper_types::stored_value::StoredValue>) -> core::result::Result<casper_hashing::Digest, E> where R: casper_execution_engine::storage::transaction_source::TransactionSource<'a, Handle = <S as casper_execution_engine::storage::store::Store>::Handle>, S: casper_execution_engine::storage::trie_store::TrieStore<casper_types::key::Key, casper_types::stored_value::StoredValue>, <S as casper_execution_engine::storage::store::Store>::Error: core::convert::From<<R as casper_execution_engine::storage::transaction_source::TransactionSource>::Error>, E: core::convert::From<<R as casper_execution_engine::storage::transaction_source::TransactionSource>::Error> + core::convert::From<<S as casper_execution_engine::storage::store::Store>::Error> + core::convert::From<casper_types::bytesrepr::Error> + core::convert::From<casper_execution_engine::storage::global_state::CommitError>\n- trait casper_execution_engine::storage::store::Store<K, V>\n- trait casper_execution_engine::storage::store::StoreExt<K, V>: casper_execution_engine::storage::store::Store<K, V>\n- struct casper_execution_engine::storage::transaction_source::in_memory::InMemoryEnvironment\n- struct casper_execution_engine::storage::transaction_source::in_memory::InMemoryReadTransaction\n- struct casper_execution_engine::storage::transaction_source::in_memory::InMemoryReadWriteTransaction<'a>\n- struct casper_execution_engine::storage::transaction_source::lmdb::LmdbEnvironment\n- trait casper_execution_engine::storage::transaction_source::Readable: casper_execution_engine::storage::transaction_source::Transaction\n- trait casper_execution_engine::storage::transaction_source::Transaction: core::marker::Sized\n- trait casper_execution_engine::storage::transaction_source::TransactionSource<'a>\n- trait casper_execution_engine::storage::transaction_source::Writable: casper_execution_engine::storage::transaction_source::Transaction\n- fn casper_execution_engine::storage::transaction_source::Writable::write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> core::result::Result<(), Self::Error>\n- impl<'a> casper_execution_engine::storage::transaction_source::Writable for lmdb::transaction::RwTransaction<'a>\n- fn lmdb::transaction::RwTransaction<'a>::write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> core::result::Result<(), Self::Error>\n- enum casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProofStep\n- struct casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof<K, V>\n- enum casper_execution_engine::storage::trie::DescendantsIterator<'a>\n- enum casper_execution_engine::storage::trie::Pointer\n- enum casper_execution_engine::storage::trie::Trie<K, V>\n- struct casper_execution_engine::storage::trie::PointerBlock\n- struct casper_execution_engine::storage::trie::TrieRaw\n- type casper_execution_engine::storage::trie::Parents<K, V> = alloc::vec::Vec<(u8, casper_execution_engine::storage::trie::Trie<K, V>)>\n- type casper_execution_engine::storage::trie::PointerBlockArray = [casper_execution_engine::storage::trie::PointerBlockValue; 256]\n- type casper_execution_engine::storage::trie::PointerBlockValue = core::option::Option<casper_execution_engine::storage::trie::Pointer>\n- struct casper_execution_engine::storage::trie_store::in_memory::InMemoryTrieStore\n- struct casper_execution_engine::storage::trie_store::lmdb::LmdbTrieStore\n- trait casper_execution_engine::storage::trie_store::TrieStore<K, V>: casper_execution_engine::storage::store::Store<casper_hashing::Digest, casper_execution_engine::storage::trie::Trie<K, V>>\n- macro casper_execution_engine::make_array_newtype!\n\n\n## 7.0.1\n\n### Changed\n* Change the cost of `wasm.storage_costs.gas_per_byte` and `shared::storage_costs::DEFAULT_GAS_PER_BYTE_COST` from `630_000` to `1_117_587`.\n* Change the cost of the host function `casper_add_associated_key` from `9_000` to `1_200_000`.\n* Change the cost of the argument `entry_points_size` of host function `casper_add_contract_version` from `0` to `120_000`.\n* Change the cost of the host function `casper_blake2b`and its argument `in_size` from `200` and `0` respectively to `1_200_000` to `120_000`.\n* Change the cost of the host function `casper_call_contract` and its arguments `entry_point_name_size` and `runtime_args_size` from `4_500`, `0` and `420` respectively to `300_000_000`, `120_000` and `120_000`.\n* Change the cost of the host function `casper_call_versioned_contract` and the arguments `entry_point_name_size` and `runtime_args_size` from `4_500`, `0` and `420` respectively to `300_000_000`, `120_000` and `120_000`.\n* Change the cost of the host function `casper_get_balance` from `3_800` to `3_000_000`.\n* Change the cost of arguments `name_size` and `dest_size` of host function `casper_get_named_arg` from `0` to `120_000`.\n* Change the cost of the host function `casper_put_key` and its arguments `name_size` and `key_size` from `38_000`, `1_100` and `0` respectively to `100_000_000`, `120_000` and `120_000`.\n* Change the cost of the host function `casper_read_value` and its argument `key_size` from `6_000` and `0` respectively to `60_000` and `120_000`.\n* Change the cost of the argument `urefs_size` of host function `casper_remove_contract_user_group_urefs` from `0` to `120_000`.\n* Change the cost of the host function `casper_transfer_from_purse_to_purse` from `82_000` to `82_000_000`.\n\n\n\n## [Unreleased] (node 1.5.4)\n## 7.0.0\n\n### Added\n* Add chainspec option `core.allow_unrestricted_transfers` that, if enabled, allows token transfers between any two peers. Disabling this option makes sense only for private chains.\n* Add chainspec option `core.allow_auction_bids` that, if enabled, allows auction entrypoints `delegate` and `add_bid` to operate. Disabling this option makes sense only for private chains.\n* Add chainspec option `core.compute_rewards` that, if enabled, computes rewards for each era. Disabling this option makes sense only for private chains.\n* Add chainspec option `core.refund_handling` that specifies how payment refunds are handled.\n* Add chainspec option `core.fee_handling` that specifies how transaction fees are handled.\n* Add chainspec option `core.administrators` that, if set, contains list of administrator accounts. This option makes sense only for private chains.\n* Add support for a new FFI function `enable_contract_version` for enabling a specific version of a contract.\n\n### Changed\n* `current stack height` is written to `stderr` in case `Trap(Unreachable)` error is encountered during Wasm execution.\n* Tweak upgrade logic transforming withdraw purses to early exit if possible.\n* Lower the default gas costs of opcodes.\n  - Set the cost for branching opcodes to 35,000 (`br`, `br_if`, `br_table`).\n  - Set the cost for call opcodes to 68,000 (`call`, `call_indirect`).\n* Default value for round seigniorage rate is halved to `7/175070816` due to reduction in block times, to maintain current seigniorage rate (per unit of time).\n* Refund ratio is changed from 0% to 99%.\n\n\n\n## 6.0.0\n\n### Changed\n* Default value for `max_stack_height` is increased to 500.\n* Replace usage of `parity-wasm` and `wasmi` with Casper forks `casper-wasm` and `casper-wasmi` respectively.\n\n### Fixed\n* Fix incorrect handling of unbonding purses for validators that were also evicted in that era.\n* Fix issue with one-time code used for migrating data to support redelegations.\n\n### Security\n* Fix unbounded memory allocation issue while parsing Wasm.\n\n\n\n## 5.0.0\n\n### Added\n* Add a new entry point `redelegate` to the Auction system contract which allows users to redelegate to another validator without having to unbond. The function signature for the entrypoint is: `redelegate(delegator: PublicKey, validator: PublicKey, amount: U512, new_validator: PublicKey)`\n* Add a new type `ChainspecRegistry` which contains the hashes of the `chainspec.toml` and will optionally contain the hashes for `accounts.toml` and `global_state.toml`.\n* Add ability to enable strict args checking when executing a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\n\n### Changed\n* Fix some integer casts.\n* Change both genesis and upgrade functions to write `ChainspecRegistry` under the fixed `Key::ChainspecRegistry`.\n* Lift the temporary limit of the size of individual values stored in global state.\n* Providing incorrect Wasm for execution will cause the default 2.5CSPR to be charged.\n* Update the default `control_flow` opcode cost from `440` to `440_000`.\n\n\n\n## 4.0.0\n\n### Changed\n* Update dependencies (in particular `casper-types` to v2.0.0 due to additional `Key` variant, requiring a major version bump here).\n\n\n\n## 3.1.1\n\n### Changed\n* Update the following constant values to match settings in production chainspec:\n  * `DEFAULT_RET_VALUE_SIZE_WEIGHT`\n  * `DEFAULT_CONTROL_FLOW_CALL_OPCODE`\n  * `DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE`\n  * `DEFAULT_GAS_PER_BYTE_COST`\n  * `DEFAULT_ADD_BID_COST`\n  * `DEFAULT_WITHDRAW_BID_COST`\n  * `DEFAULT_DELEGATE_COST`\n  * `DEFAULT_UNDELEGATE_COST`\n  * `DEFAULT_MAX_STACK_HEIGHT`\n\n\n\n## 3.1.0\n\n### Added\n* Add `commit_prune` functionality to support pruning of entries in global storage.\n\n### Changed\n* Update to use `casper-wasm-utils`; a patched fork of the archived `wasm-utils`.\n\n\n\n## 3.0.0\n\n### Changed\n* Implement more precise control over opcode costs that lowers the gas cost.\n* Increase cost of `withdraw_bid` and `undelegate` auction entry points to 2.5CSPR.\n\n\n\n## 2.0.1\n\n### Security\n* Implement checks before preprocessing Wasm to avoid potential OOM when initializing table section.\n* Implement checks before preprocessing Wasm to avoid references to undeclared functions or globals.\n* Implement checks before preprocessing Wasm to avoid possibility to import internal host functions.\n\n\n## 2.0.0 - 2022-05-11\n\n### Changed\n* Change contract runtime to allow caching global state changes during execution of a single block, also avoiding writing interstitial data to global state.\n\n\n\n## 1.5.0 - 2022-04-05\n\n### Changed\n* Temporarily limit the size of individual values stored in global state.\n\n### Security\n* `amount` argument is now required for transactions wanting to send tokens using account's main purse. It is now an upper limit on all tokens being transferred within the transaction.\n* Significant rework around the responsibilities of the executor, runtime and runtime context objects, with a focus on removing alternate execution paths where unintended escalation of privilege was possible.\n* Attenuate the main purse URef to remove WRITE permissions by default when returned via `ret` or passed as a runtime argument.\n* Fix a potential panic during Wasm preprocessing.\n* `get_era_validators` performs a query rather than execution.\n\n\n\n## 1.4.4 - 2021-12-29\n\n### Changed\n* No longer checksum-hex encode hash digest and address types.\n\n\n\n## 1.4.3 - 2021-12-06\n\n### Changed\n* Auction contract now handles minting into an existing purse.\n* Default maximum stack size in `WasmConfig` changed to 188.\n* Default behavior of LMDB changed to use [`NO_READAHEAD`](https://docs.rs/lmdb/0.8.0/lmdb/struct.EnvironmentFlags.html#associatedconstant.NO_READAHEAD)\n\n### Fixed\n* Fix a case where an unlocked and partially unbonded genesis validator with smaller stake incorrectly occupies slot for a non-genesis validator with higher stake.\n\n\n\n## [1.4.2] - 2021-11-11\n\n### Changed\n* Execution transforms are returned in their insertion order.\n\n### Removed\n* Removed `SystemContractCache` as it was not being used anymore\n\n## [1.4.0] - 2021-10-04\n\n### Added\n* Added genesis validation step to ensure there are more genesis validators than validator slots.\n* Added a support for passing a public key as a `target` argument in native transfers.\n* Added a `max_associated_keys` configuration option for a hard limit of associated keys under accounts.\n\n### Changed\n* Documented `storage` module and children.\n* Reduced visibility to `pub(crate)` in several areas, allowing some dead code to be noticed and pruned.\n* Support building and testing using stable Rust.\n* Increase price of `create_purse` to 2.5CSPR.\n* Increase price of native transfer to 100 million motes (0.1 CSPR).\n* Improve doc comments to clarify behavior of the bidding functionality.\n* Document `core` and `shared` modules and their children.\n* Change parameters to `LmdbEnvironment`'s constructor enabling manual flushing to disk.\n\n### Fixed\n* Fix a case where user could potentially supply a refund purse as a payment purse.\n\n\n\n## [1.3.0] - 2021-07-19\n\n### Changed\n* Update pinned version of Rust to `nightly-2021-06-17`.\n\n\n\n## [1.2.0] - 2021-05-27\n\n### Added\n* Add validation that the delegated amount of each genesis account is non-zero.\n* Add `activate-bid` client contract.\n* Add a check in `Mint::transfer` that the source has `Read` permissions.\n\n### Changed\n* Change to Apache 2.0 license.\n* Remove the strict expectation that minor and patch protocol versions must always increase by 1.\n\n### Removed\n* Remove `RootNotFound` error struct.\n\n\n\n## [1.1.1] - 2021-04-19\n\nNo changes.\n\n\n\n## [1.1.0] - 2021-04-13 [YANKED]\n\nNo changes.\n\n\n\n## [1.0.1] - 2021-04-08\n\nNo changes.\n\n\n\n## [1.0.0] - 2021-03-30\n\n### Added\n* Initial release of execution engine for Casper mainnet.\n\n\n\n[Keep a Changelog]: https://keepachangelog.com/en/1.0.0\n[unreleased]: https://github.com/casper-network/casper-node/compare/37d561634adf73dab40fffa7f1f1ee47e80bf8a1...dev\n[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.0...37d561634adf73dab40fffa7f1f1ee47e80bf8a1\n[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0\n[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0\n[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0\n[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1\n[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0\n"
  },
  {
    "path": "execution_engine/Cargo.toml",
    "content": "[package]\nname = \"casper-execution-engine\"\nversion = \"9.0.0\" # when updating, also update 'html_root_url' in lib.rs\nauthors = [\"Henry Till <henrytill@gmail.com>\", \"Ed Hastings <ed@casperlabs.io>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\ndescription = \"Casper execution engine crates.\"\nreadme = \"README.md\"\ndocumentation = \"https://docs.rs/casper-execution-engine\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/master/execution_engine\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nanyhow = \"1.0.33\"\nbase16 = \"0.2.1\"\nbincode = \"1.3.1\"\nblake2 = { version = \"0.10.6\", default-features = false }\nblake3 = { version = \"1.5.0\", default-features = false, features = [\"pure\"] }\nsha2 = { version = \"0.10.8\", default-features = false }\ncasper-storage = { version = \"5.0.0\", path = \"../storage\", default-features = true }\ncasper-types = { version = \"7.0.0\", path = \"../types\", default-features = false, features = [\"datasize\", \"gens\", \"json-schema\", \"std\"] }\ncasper-wasm = { version = \"1.0.0\", default-features = false, features = [\"sign_ext\", \"call_indirect_overlong\"] }\ncasper-wasm-utils = { version = \"4.0.0\", default-features = false, features = [\"sign_ext\", \"call_indirect_overlong\"] }\ncasper-wasmi = { version = \"1.0.0\", features = [\"sign_ext\", \"call_indirect_overlong\"] }\ndatasize = \"0.2.4\"\neither = \"1.8.1\"\nhex-buffer-serde = \"0.2.1\"\nhex_fmt = \"0.3.0\"\nhostname = \"0.3.0\"\nhumantime = \"2\"\nitertools = \"0.10.3\"\nlinked-hash-map = \"0.5.3\"\nlog = { version = \"0.4.8\", features = [\"std\", \"serde\", \"kv_unstable\"] }\nnum = { version = \"0.4.0\", default-features = false }\nnum-derive = { workspace = true }\nnum-rational = { version = \"0.4.0\", features = [\"serde\"] }\nnum-traits = { workspace = true }\nnum_cpus = \"1\"\nonce_cell = \"1.5.2\"\nproptest = { version = \"1.0.0\", optional = true }\nrand = \"0.8.3\"\nrand_chacha = \"0.3.0\"\nschemars = { version = \"0.8.16\", features = [\"preserve_order\"] }\nserde = { version = \"1\", features = [\"derive\"] }\nserde_bytes = \"0.11.5\"\nserde_json = { version = \"1\", features = [\"preserve_order\"] }\nstrum = { version = \"0.24.1\", features = [\"strum_macros\", \"derive\"], optional = true }\ntempfile = \"3.4.0\"\nthiserror = \"1.0.18\"\ntracing = \"0.1.18\"\nuint = \"0.9.0\"\nclap = { version = \"4.5.21\", features = [\"derive\"] }\ntoml = \"0.8.19\"\nwat = \"1.220.0\"\n\n[dev-dependencies]\nassert_matches = \"1.3.0\"\ncasper-types = { path = \"../types\", features = [\"datasize\", \"json-schema\", \"testing\", \"std\"] }\ncriterion = \"0.5.1\"\nproptest = \"1.0.0\"\ntempfile = \"3.4.0\"\nwalrus = \"0.20.2\"\n\n[features]\ndefault = [\"gens\"]\n# DEPRECATED\ngens = [\"casper-types/testing\", \"proptest\", \"strum\"]\ntest-support = []\n\n[[bench]]\nname = \"trie_bench\"\nharness = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "execution_engine/README.md",
    "content": "# `casper-execution-engine`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-execution-engine)](https://crates.io/crates/casper-execution-engine)\n[![Documentation](https://docs.rs/casper-execution-engine/badge.svg)](https://docs.rs/casper-execution-engine)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nThe main component of the Casper Wasm execution engine.\n\n## License\n\nLicensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE)."
  },
  {
    "path": "execution_engine/benches/trie_bench.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion};\n\nuse casper_storage::global_state::trie::{PointerBlock, Trie};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::EntityKindTag,\n    bytesrepr::{FromBytes, ToBytes},\n    global_state::Pointer,\n    AddressableEntityHash, CLValue, Digest, Key, StoredValue,\n};\n\nfn serialize_trie_leaf(b: &mut Bencher) {\n    let contract_key = Key::addressable_entity_key(\n        EntityKindTag::SmartContract,\n        AddressableEntityHash::new([42; 32]),\n    );\n    let leaf = Trie::Leaf {\n        key: Key::Account(AccountHash::new([0; 32])),\n        value: StoredValue::CLValue(CLValue::from_t(contract_key).unwrap()),\n    };\n    b.iter(|| ToBytes::to_bytes(black_box(&leaf)));\n}\n\nfn deserialize_trie_leaf(b: &mut Bencher) {\n    let contract_key: Key = Key::addressable_entity_key(\n        EntityKindTag::SmartContract,\n        AddressableEntityHash::new([42; 32]),\n    );\n    let leaf = Trie::Leaf {\n        key: Key::Account(AccountHash::new([0; 32])),\n        value: StoredValue::CLValue(CLValue::from_t(contract_key).unwrap()),\n    };\n    let leaf_bytes = leaf.to_bytes().unwrap();\n    b.iter(|| Trie::<Key, StoredValue>::from_bytes(black_box(&leaf_bytes)));\n}\n\nfn serialize_trie_node(b: &mut Bencher) {\n    let node = Trie::<Key, StoredValue>::Node {\n        pointer_block: Box::<PointerBlock>::default(),\n    };\n    b.iter(|| ToBytes::to_bytes(black_box(&node)));\n}\n\nfn deserialize_trie_node(b: &mut Bencher) {\n    let node = Trie::<Key, StoredValue>::Node {\n        pointer_block: Box::<PointerBlock>::default(),\n    };\n    let node_bytes = node.to_bytes().unwrap();\n\n    b.iter(|| Trie::<Key, StoredValue>::from_bytes(black_box(&node_bytes)));\n}\n\nfn serialize_trie_node_pointer(b: &mut Bencher) {\n    let node = Trie::<Key, StoredValue>::extension(\n        (0..255).collect(),\n        Pointer::NodePointer(Digest::hash([0; 32])),\n    );\n\n    b.iter(|| ToBytes::to_bytes(black_box(&node)));\n}\n\nfn deserialize_trie_node_pointer(b: &mut Bencher) {\n    let node = Trie::<Key, StoredValue>::extension(\n        (0..255).collect(),\n        Pointer::NodePointer(Digest::hash([0; 32])),\n    );\n    let node_bytes = node.to_bytes().unwrap();\n\n    b.iter(|| Trie::<Key, StoredValue>::from_bytes(black_box(&node_bytes)));\n}\n\nfn trie_bench(c: &mut Criterion) {\n    c.bench_function(\"serialize_trie_leaf\", serialize_trie_leaf);\n    c.bench_function(\"deserialize_trie_leaf\", deserialize_trie_leaf);\n    c.bench_function(\"serialize_trie_node\", serialize_trie_node);\n    c.bench_function(\"deserialize_trie_node\", deserialize_trie_node);\n    c.bench_function(\"serialize_trie_node_pointer\", serialize_trie_node_pointer);\n    c.bench_function(\n        \"deserialize_trie_node_pointer\",\n        deserialize_trie_node_pointer,\n    );\n}\n\ncriterion_group!(benches, trie_bench);\ncriterion_main!(benches);\n"
  },
  {
    "path": "execution_engine/src/bin/run_wasm.rs",
    "content": "use std::{\n    fs,\n    path::{Path, PathBuf},\n    time::{Duration, Instant},\n};\n\nuse casper_types::WasmConfig;\n\nuse casper_execution_engine::runtime;\nuse casper_wasmi::{\n    memory_units::Pages, Externals, FuncInstance, HostError, ImportsBuilder, MemoryInstance,\n    ModuleImportResolver, ModuleInstance, RuntimeValue, Signature,\n};\n\nfn prepare_instance(module_bytes: &[u8], chainspec: &ChainspecConfig) -> casper_wasmi::ModuleRef {\n    let wasm_module = runtime::preprocess(chainspec.wasm_config, module_bytes).unwrap();\n    let module = casper_wasmi::Module::from_casper_wasm_module(wasm_module).unwrap();\n    let resolver = MinimalWasmiResolver::default();\n    let mut imports = ImportsBuilder::new();\n    imports.push_resolver(\"env\", &resolver);\n    let not_started_module = ModuleInstance::new(&module, &imports).unwrap();\n\n    assert!(!not_started_module.has_start());\n\n    let instance = not_started_module.not_started_instance();\n    instance.clone()\n}\n\nstruct RunWasmInfo {\n    elapsed: Duration,\n    gas_used: u64,\n}\n\nfn run_wasm(\n    module_bytes: Vec<u8>,\n    cli_args: &Args,\n    chainspec: &ChainspecConfig,\n    func_name: &str,\n) -> (\n    Result<Option<RuntimeValue>, casper_wasmi::Error>,\n    RunWasmInfo,\n) {\n    println!(\n        \"Invoke export {:?} with args {:?}\",\n        func_name, cli_args.args\n    );\n\n    let instance = prepare_instance(&module_bytes, chainspec);\n\n    let params = {\n        let export = instance.export_by_name(func_name).unwrap();\n        let func = export.as_func().unwrap();\n        func.signature().params().to_owned()\n    };\n\n    let args = {\n        assert_eq!(\n            cli_args.args.len(),\n            params.len(),\n            \"Not enough arguments supplied\"\n        );\n        let mut vec = Vec::new();\n        for (input_arg, func_arg) in cli_args.args.iter().zip(params.into_iter()) {\n            let value = match func_arg {\n                casper_wasmi::ValueType::I32 => {\n                    casper_wasmi::RuntimeValue::I32(input_arg.parse().unwrap())\n                }\n                casper_wasmi::ValueType::I64 => {\n                    casper_wasmi::RuntimeValue::I64(input_arg.parse().unwrap())\n                }\n                casper_wasmi::ValueType::F32 => todo!(),\n                casper_wasmi::ValueType::F64 => todo!(),\n            };\n            vec.push(value);\n        }\n        vec\n    };\n\n    let start = Instant::now();\n\n    let gas_limit = cli_args\n        .gas_limit\n        .unwrap_or(chainspec.transaction_config.block_gas_limit);\n\n    let mut externals = MinimalWasmiExternals::new(0, gas_limit);\n    let result: Result<Option<RuntimeValue>, casper_wasmi::Error> =\n        instance\n            .clone()\n            .invoke_export(func_name, &args, &mut externals);\n\n    let info = RunWasmInfo {\n        elapsed: start.elapsed(),\n        gas_used: externals.gas_used,\n    };\n\n    (result, info)\n}\nuse clap::Parser;\nuse serde::Deserialize;\n\n#[derive(Parser, Clone, Debug)]\n#[command(author, version, about, long_about = None)]\nstruct Args {\n    #[arg(value_name = \"MODULE\")]\n    wasm_file: PathBuf,\n    #[arg(long = \"gas_limit\")]\n    gas_limit: Option<u64>,\n    #[arg(long = \"invoke\", value_name = \"FUNCTION\")]\n    invoke: Option<String>,\n    /// Arguments given to the Wasm module or the invoked function.\n    #[arg(value_name = \"ARGS\")]\n    args: Vec<String>,\n    #[arg(short, long)]\n    chainspec_file: Option<PathBuf>,\n}\n\nfn load_wasm_file<P: AsRef<Path>>(path: P) -> Vec<u8> {\n    let path = path.as_ref();\n    let bytes = fs::read(path).expect(\"valid file\");\n    match path.extension() {\n        Some(ext) if ext.eq_ignore_ascii_case(\"wat\") => {\n            wat::parse_bytes(&bytes).expect(\"valid wat\").into_owned()\n        }\n        None | Some(_) => bytes,\n    }\n}\n\n#[derive(Deserialize, Clone, Default, Debug)]\nstruct TransactionConfig {\n    block_gas_limit: u64,\n}\n\n/// in the chainspec file, it can continue to be parsed as an `ChainspecConfig`.\n#[derive(Deserialize, Clone, Default, Debug)]\nstruct ChainspecConfig {\n    /// WasmConfig.\n    #[serde(rename = \"wasm\")]\n    pub wasm_config: WasmConfig,\n    #[serde(rename = \"transactions\")]\n    pub transaction_config: TransactionConfig,\n}\n\nfn main() {\n    let args = Args::parse();\n\n    let chainspec_file = args.clone().chainspec_file.expect(\"chainspec file\");\n    println!(\"Using chainspec file {:?}\", chainspec_file.display());\n    let chainspec_data = fs::read_to_string(chainspec_file.as_path()).expect(\"valid file\");\n    let chainspec_config: ChainspecConfig =\n        toml::from_str(&chainspec_data).expect(\"valid chainspec\");\n\n    let wasm_bytes = load_wasm_file(&args.wasm_file);\n\n    if let Some(ref func_name) = args.invoke {\n        let (result, info) = run_wasm(wasm_bytes, &args, &chainspec_config, func_name);\n\n        println!(\"result: {:?}\", result);\n        println!(\"elapsed: {:?}\", info.elapsed);\n        println!(\"gas used: {}\", info.gas_used);\n    }\n}\n\n#[derive(Default)]\nstruct MinimalWasmiResolver(());\n\n#[derive(Debug)]\nstruct MinimalWasmiExternals {\n    gas_used: u64,\n    block_gas_limit: u64,\n}\n\nimpl MinimalWasmiExternals {\n    fn new(gas_used: u64, block_gas_limit: u64) -> Self {\n        Self {\n            gas_used,\n            block_gas_limit,\n        }\n    }\n}\n\nconst GAS_FUNC_IDX: usize = 0;\n\nimpl ModuleImportResolver for MinimalWasmiResolver {\n    fn resolve_func(\n        &self,\n        field_name: &str,\n        _signature: &casper_wasmi::Signature,\n    ) -> Result<casper_wasmi::FuncRef, casper_wasmi::Error> {\n        if field_name == \"gas\" {\n            Ok(FuncInstance::alloc_host(\n                Signature::new(&[casper_wasmi::ValueType::I32; 1][..], None),\n                GAS_FUNC_IDX,\n            ))\n        } else {\n            Err(casper_wasmi::Error::Instantiation(format!(\n                \"Export {} not found\",\n                field_name\n            )))\n        }\n    }\n\n    fn resolve_memory(\n        &self,\n        field_name: &str,\n        memory_type: &casper_wasmi::MemoryDescriptor,\n    ) -> Result<casper_wasmi::MemoryRef, casper_wasmi::Error> {\n        if field_name == \"memory\" {\n            Ok(MemoryInstance::alloc(\n                Pages(memory_type.initial() as usize),\n                memory_type.maximum().map(|x| Pages(x as usize)),\n            )?)\n        } else {\n            panic!(\"invalid exported memory name {}\", field_name);\n        }\n    }\n}\n\n#[derive(thiserror::Error, Debug)]\n#[error(\"gas limit\")]\nstruct GasLimit;\n\nimpl HostError for GasLimit {}\n\nimpl Externals for MinimalWasmiExternals {\n    fn invoke_index(\n        &mut self,\n        index: usize,\n        args: casper_wasmi::RuntimeArgs,\n    ) -> Result<Option<casper_wasmi::RuntimeValue>, casper_wasmi::Trap> {\n        if index == GAS_FUNC_IDX {\n            let gas_used: u32 = args.nth_checked(0)?;\n            // match gas_used.checked_add(\n            match self.gas_used.checked_add(gas_used.into()) {\n                Some(new_gas_used) if new_gas_used > self.block_gas_limit => {\n                    return Err(GasLimit.into());\n                }\n                Some(new_gas_used) => {\n                    // dbg!(&new_gas_used, &self.block_gas_limit);\n                    self.gas_used = new_gas_used;\n                }\n                None => {\n                    unreachable!();\n                }\n            }\n            Ok(None)\n        } else {\n            unreachable!();\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/engine_state/engine_config.rs",
    "content": "//! Support for runtime configuration of the execution engine - as an integral property of the\n//! `EngineState` instance.\n\nuse std::collections::BTreeSet;\n\nuse num_rational::Ratio;\nuse num_traits::One;\n\nuse casper_types::{\n    account::AccountHash, FeeHandling, ProtocolVersion, PublicKey, RefundHandling, RewardsHandling,\n    StorageCosts, SystemConfig, TimeDiff, WasmConfig, DEFAULT_FEE_HANDLING,\n    DEFAULT_MINIMUM_BID_AMOUNT, DEFAULT_REFUND_HANDLING,\n};\n\n/// Default value for a maximum query depth configuration option.\npub const DEFAULT_MAX_QUERY_DEPTH: u64 = 5;\n/// Default value for maximum associated keys configuration option.\npub const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100;\n/// Default value for maximum runtime call stack height configuration option.\npub const DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 = 12;\n/// Default max serialized size of `StoredValue`s.\n#[deprecated(\n    since = \"3.2.0\",\n    note = \"not used in `casper-execution-engine` config anymore\"\n)]\npub const DEFAULT_MAX_STORED_VALUE_SIZE: u32 = 8 * 1024 * 1024;\n/// Default value for minimum delegation amount in motes.\npub const DEFAULT_MINIMUM_DELEGATION_AMOUNT: u64 = 500 * 1_000_000_000;\n/// Default value for maximum delegation amount in motes.\npub const DEFAULT_MAXIMUM_DELEGATION_AMOUNT: u64 = 1_000_000_000 * 1_000_000_000;\n/// Default value for strict argument checking.\npub const DEFAULT_STRICT_ARGUMENT_CHECKING: bool = false;\n/// 91 days / 7 days in a week = 13 weeks\n/// Length of total vesting schedule in days.\nconst VESTING_SCHEDULE_LENGTH_DAYS: usize = 91;\nconst DAY_MILLIS: usize = 24 * 60 * 60 * 1000;\n/// Default length of total vesting schedule period expressed in days.\npub const DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS: u64 =\n    VESTING_SCHEDULE_LENGTH_DAYS as u64 * DAY_MILLIS as u64;\n/// Default maximum number of delegators per validator.\npub const DEFAULT_MAX_DELEGATORS_PER_VALIDATOR: u32 = 1200;\n/// Default value for allowing auction bids.\npub const DEFAULT_ALLOW_AUCTION_BIDS: bool = true;\n/// Default value for allowing unrestricted transfers.\npub const DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS: bool = true;\n/// Default compute rewards.\npub const DEFAULT_COMPUTE_REWARDS: bool = true;\n/// Default protocol version.\npub const DEFAULT_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0;\n/// Default period for balance holds to decay (currently 24 hours).\npub const DEFAULT_BALANCE_HOLD_INTERVAL: TimeDiff = TimeDiff::from_seconds(24 * 60 * 60);\n\n/// Default entity flag.\npub const DEFAULT_ENABLE_ENTITY: bool = false;\n\npub(crate) const DEFAULT_TRAP_ON_AMBIGUOUS_ENTITY_VERSION: bool = false;\n\n/// The runtime configuration of the execution engine\n#[derive(Debug, Clone)]\npub struct EngineConfig {\n    /// Maximum number of associated keys (i.e. map of\n    /// [`AccountHash`](AccountHash)s to\n    /// [`Weight`](casper_types::account::Weight)s) for a single account.\n    max_associated_keys: u32,\n    max_runtime_call_stack_height: u32,\n    minimum_delegation_amount: u64,\n    maximum_delegation_amount: u64,\n    minimum_bid_amount: u64,\n    /// This flag indicates if arguments passed to contracts are checked against the defined types.\n    strict_argument_checking: bool,\n    /// Vesting schedule period in milliseconds.\n    vesting_schedule_period_millis: u64,\n    max_delegators_per_validator: u32,\n    wasm_config: WasmConfig,\n    system_config: SystemConfig,\n    protocol_version: ProtocolVersion,\n    /// A private network specifies a list of administrative accounts.\n    pub(crate) administrative_accounts: BTreeSet<AccountHash>,\n    /// Auction entrypoints such as \"add_bid\" or \"delegate\" are disabled if this flag is set to\n    /// `false`.\n    pub(crate) allow_auction_bids: bool,\n    /// Allow unrestricted transfers between normal accounts.\n    ///\n    /// If set to `true` accounts can transfer tokens between themselves without restrictions. If\n    /// set to `false` tokens can be transferred only from normal accounts to administrators\n    /// and administrators to normal accounts but not normal accounts to normal accounts.\n    pub(crate) allow_unrestricted_transfers: bool,\n    /// Refund handling config.\n    pub(crate) refund_handling: RefundHandling,\n    /// Fee handling.\n    pub(crate) fee_handling: FeeHandling,\n    /// Compute auction rewards.\n    pub(crate) compute_rewards: bool,\n    pub(crate) enable_entity: bool,\n    pub(crate) trap_on_ambiguous_entity_version: bool,\n    pub(crate) rewards_handling: RewardsHandling,\n    storage_costs: StorageCosts,\n}\n\nimpl Default for EngineConfig {\n    fn default() -> Self {\n        EngineConfig {\n            max_associated_keys: DEFAULT_MAX_ASSOCIATED_KEYS,\n            max_runtime_call_stack_height: DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT,\n            minimum_delegation_amount: DEFAULT_MINIMUM_DELEGATION_AMOUNT,\n            maximum_delegation_amount: DEFAULT_MAXIMUM_DELEGATION_AMOUNT,\n            minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n            strict_argument_checking: DEFAULT_STRICT_ARGUMENT_CHECKING,\n            vesting_schedule_period_millis: DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS,\n            max_delegators_per_validator: DEFAULT_MAX_DELEGATORS_PER_VALIDATOR,\n            wasm_config: WasmConfig::default(),\n            system_config: SystemConfig::default(),\n            administrative_accounts: Default::default(),\n            allow_auction_bids: DEFAULT_ALLOW_AUCTION_BIDS,\n            allow_unrestricted_transfers: DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS,\n            refund_handling: DEFAULT_REFUND_HANDLING,\n            fee_handling: DEFAULT_FEE_HANDLING,\n            compute_rewards: DEFAULT_COMPUTE_REWARDS,\n            protocol_version: DEFAULT_PROTOCOL_VERSION,\n            enable_entity: DEFAULT_ENABLE_ENTITY,\n            trap_on_ambiguous_entity_version: DEFAULT_TRAP_ON_AMBIGUOUS_ENTITY_VERSION,\n            rewards_handling: RewardsHandling::Standard,\n            storage_costs: Default::default(),\n        }\n    }\n}\n\nimpl EngineConfig {\n    /// Returns the current max associated keys config.\n    pub fn max_associated_keys(&self) -> u32 {\n        self.max_associated_keys\n    }\n\n    /// Returns the current max runtime call stack height config.\n    pub fn max_runtime_call_stack_height(&self) -> u32 {\n        self.max_runtime_call_stack_height\n    }\n\n    /// Returns the current wasm config.\n    pub fn wasm_config(&self) -> &WasmConfig {\n        &self.wasm_config\n    }\n\n    /// Returns the current system config.\n    pub fn system_config(&self) -> &SystemConfig {\n        &self.system_config\n    }\n\n    /// Returns the current protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns the minimum delegation amount in motes.\n    pub fn minimum_delegation_amount(&self) -> u64 {\n        self.minimum_delegation_amount\n    }\n\n    /// Returns the maximum delegation amount in motes.\n    pub fn maximum_delegation_amount(&self) -> u64 {\n        self.maximum_delegation_amount\n    }\n\n    /// Returns the minimum delegation amount in motes.\n    pub fn minimum_bid_amount(&self) -> u64 {\n        self.minimum_bid_amount\n    }\n\n    /// Get the engine config's strict argument checking flag.\n    pub fn strict_argument_checking(&self) -> bool {\n        self.strict_argument_checking\n    }\n\n    /// Get the vesting schedule period.\n    pub fn vesting_schedule_period_millis(&self) -> u64 {\n        self.vesting_schedule_period_millis\n    }\n\n    /// Get the max delegators per validator\n    pub fn max_delegators_per_validator(&self) -> u32 {\n        self.max_delegators_per_validator\n    }\n\n    /// Returns the engine config's administrative accounts.\n    pub fn administrative_accounts(&self) -> &BTreeSet<AccountHash> {\n        &self.administrative_accounts\n    }\n\n    /// Returns true if auction bids are allowed.\n    pub fn allow_auction_bids(&self) -> bool {\n        self.allow_auction_bids\n    }\n\n    /// Returns true if unrestricted transfers are allowed.\n    pub fn allow_unrestricted_transfers(&self) -> bool {\n        self.allow_unrestricted_transfers\n    }\n\n    /// Checks if an account hash is an administrator.\n    pub(crate) fn is_administrator(&self, account_hash: &AccountHash) -> bool {\n        self.administrative_accounts.contains(account_hash)\n    }\n\n    /// Returns the engine config's refund ratio.\n    pub fn refund_handling(&self) -> RefundHandling {\n        self.refund_handling\n    }\n\n    /// Returns the engine config's fee handling strategy.\n    pub fn fee_handling(&self) -> FeeHandling {\n        self.fee_handling\n    }\n\n    /// Returns the engine config's storage_costs.\n    pub fn storage_costs(&self) -> &StorageCosts {\n        &self.storage_costs\n    }\n\n    /// Returns the engine config's compute rewards flag.\n    pub fn compute_rewards(&self) -> bool {\n        self.compute_rewards\n    }\n\n    /// Returns the `trap_on_ambiguous_entity_version` flag.\n    pub fn trap_on_ambiguous_entity_version(&self) -> bool {\n        self.trap_on_ambiguous_entity_version\n    }\n\n    /// Returns the current configuration for rewards handling.\n    pub fn rewards_handling(&self) -> RewardsHandling {\n        self.rewards_handling.clone()\n    }\n\n    /// Sets the protocol version of the config.\n    ///\n    /// NOTE: This is only useful to the WasmTestBuilder for emulating a network upgrade, and hence\n    /// is subject to change or deletion without notice.\n    #[doc(hidden)]\n    pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) {\n        self.protocol_version = protocol_version;\n    }\n\n    /// Sets the `wasm_config.max_memory` to `new_value`.\n    #[cfg(feature = \"test-support\")]\n    pub fn set_max_memory(&mut self, new_value: u32) {\n        *self.wasm_config.v1_mut().max_memory_mut() = new_value;\n    }\n}\n\n/// A builder for an [`EngineConfig`].\n///\n/// Any field that isn't specified will be defaulted.  See [the module docs](index.html) for the set\n/// of default values.\n#[derive(Default, Debug)]\npub struct EngineConfigBuilder {\n    max_query_depth: Option<u64>,\n    max_associated_keys: Option<u32>,\n    max_runtime_call_stack_height: Option<u32>,\n    minimum_delegation_amount: Option<u64>,\n    maximum_delegation_amount: Option<u64>,\n    minimum_bid_amount: Option<u64>,\n    strict_argument_checking: Option<bool>,\n    vesting_schedule_period_millis: Option<u64>,\n    max_delegators_per_validator: Option<u32>,\n    wasm_config: Option<WasmConfig>,\n    system_config: Option<SystemConfig>,\n    protocol_version: Option<ProtocolVersion>,\n    administrative_accounts: Option<BTreeSet<PublicKey>>,\n    allow_auction_bids: Option<bool>,\n    allow_unrestricted_transfers: Option<bool>,\n    refund_handling: Option<RefundHandling>,\n    fee_handling: Option<FeeHandling>,\n    compute_rewards: Option<bool>,\n    balance_hold_interval: Option<TimeDiff>,\n    enable_entity: Option<bool>,\n    trap_on_ambiguous_entity_version: Option<bool>,\n    rewards_handling: Option<RewardsHandling>,\n    storage_costs: Option<StorageCosts>,\n}\n\nimpl EngineConfigBuilder {\n    /// Creates a new `EngineConfig` builder.\n    pub fn new() -> Self {\n        EngineConfigBuilder::default()\n    }\n\n    /// Sets the max query depth config option.\n    pub fn with_max_query_depth(mut self, max_query_depth: u64) -> Self {\n        self.max_query_depth = Some(max_query_depth);\n        self\n    }\n\n    /// Sets the max associated keys config option.\n    pub fn with_max_associated_keys(mut self, max_associated_keys: u32) -> Self {\n        self.max_associated_keys = Some(max_associated_keys);\n        self\n    }\n\n    /// Sets the max runtime call stack height config option.\n    pub fn with_max_runtime_call_stack_height(\n        mut self,\n        max_runtime_call_stack_height: u32,\n    ) -> Self {\n        self.max_runtime_call_stack_height = Some(max_runtime_call_stack_height);\n        self\n    }\n\n    /// Sets the strict argument checking config option.\n    pub fn with_strict_argument_checking(mut self, value: bool) -> Self {\n        self.strict_argument_checking = Some(value);\n        self\n    }\n\n    /// Sets the vesting schedule period millis config option.\n    pub fn with_vesting_schedule_period_millis(mut self, value: u64) -> Self {\n        self.vesting_schedule_period_millis = Some(value);\n        self\n    }\n\n    /// Sets the max delegators per validator config option.\n    pub fn with_max_delegators_per_validator(mut self, value: u32) -> Self {\n        self.max_delegators_per_validator = Some(value);\n        self\n    }\n\n    /// Sets the wasm config options.\n    pub fn with_wasm_config(mut self, wasm_config: WasmConfig) -> Self {\n        self.wasm_config = Some(wasm_config);\n        self\n    }\n\n    /// Sets the system config options.\n    pub fn with_system_config(mut self, system_config: SystemConfig) -> Self {\n        self.system_config = Some(system_config);\n        self\n    }\n\n    /// Sets the protocol version.\n    pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {\n        self.protocol_version = Some(protocol_version);\n        self\n    }\n\n    /// Sets the maximum wasm stack height config option.\n    pub fn with_wasm_max_stack_height(mut self, wasm_stack_height: u32) -> Self {\n        let wasm_config = self.wasm_config.get_or_insert_with(WasmConfig::default);\n        *wasm_config.v1_mut().max_stack_height_mut() = wasm_stack_height;\n        self\n    }\n\n    /// Sets the minimum delegation amount config option.\n    pub fn with_minimum_delegation_amount(mut self, minimum_delegation_amount: u64) -> Self {\n        self.minimum_delegation_amount = Some(minimum_delegation_amount);\n        self\n    }\n\n    /// Sets the maximum delegation amount config option.\n    pub fn with_maximum_delegation_amount(mut self, maximum_delegation_amount: u64) -> Self {\n        self.maximum_delegation_amount = Some(maximum_delegation_amount);\n        self\n    }\n\n    /// Sets the minimum bid amount config option.\n    pub fn with_minimum_bid_amount(mut self, minimum_bid_amount: u64) -> Self {\n        self.minimum_bid_amount = Some(minimum_bid_amount);\n        self\n    }\n\n    /// Sets the administrative accounts.\n    pub fn with_administrative_accounts(\n        mut self,\n        administrator_accounts: BTreeSet<PublicKey>,\n    ) -> Self {\n        self.administrative_accounts = Some(administrator_accounts);\n        self\n    }\n\n    /// Sets the allow auction bids config option.\n    pub fn with_allow_auction_bids(mut self, allow_auction_bids: bool) -> Self {\n        self.allow_auction_bids = Some(allow_auction_bids);\n        self\n    }\n\n    /// Sets the allow unrestricted transfers config option.\n    pub fn with_allow_unrestricted_transfers(mut self, allow_unrestricted_transfers: bool) -> Self {\n        self.allow_unrestricted_transfers = Some(allow_unrestricted_transfers);\n        self\n    }\n\n    /// Sets the refund handling config option.\n    pub fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self {\n        match refund_handling {\n            RefundHandling::Refund { refund_ratio } | RefundHandling::Burn { refund_ratio } => {\n                debug_assert!(\n                    refund_ratio <= Ratio::one(),\n                    \"refund ratio should be in the range of [0, 1]\"\n                );\n            }\n            RefundHandling::NoRefund => {\n                //noop\n            }\n        }\n\n        self.refund_handling = Some(refund_handling);\n        self\n    }\n\n    /// Sets fee handling config option.\n    pub fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self {\n        self.fee_handling = Some(fee_handling);\n        self\n    }\n\n    /// Sets compute rewards config option.\n    pub fn with_compute_rewards(mut self, compute_rewards: bool) -> Self {\n        self.compute_rewards = Some(compute_rewards);\n        self\n    }\n\n    /// Sets balance hold interval config option.\n    pub fn balance_hold_interval(mut self, balance_hold_interval: TimeDiff) -> Self {\n        self.balance_hold_interval = Some(balance_hold_interval);\n        self\n    }\n\n    /// Sets the enable entity flag.\n    pub fn with_enable_entity(mut self, enable_entity: bool) -> Self {\n        self.enable_entity = Some(enable_entity);\n        self\n    }\n\n    /// Sets the flag if the runtime returns an error on entity version collision.\n    pub fn with_trap_on_ambiguous_entity_version(\n        mut self,\n        trap_on_ambiguous_entity_version: bool,\n    ) -> Self {\n        self.trap_on_ambiguous_entity_version = Some(trap_on_ambiguous_entity_version);\n        self\n    }\n\n    /// Sets the storage_costs config option.\n    pub fn with_storage_costs(mut self, storage_costs: StorageCosts) -> Self {\n        self.storage_costs = Some(storage_costs);\n        self\n    }\n\n    /// Builds a new [`EngineConfig`] object.\n    pub fn build(self) -> EngineConfig {\n        let max_associated_keys = self\n            .max_associated_keys\n            .unwrap_or(DEFAULT_MAX_ASSOCIATED_KEYS);\n        let max_runtime_call_stack_height = self\n            .max_runtime_call_stack_height\n            .unwrap_or(DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT);\n        let minimum_delegation_amount = self\n            .minimum_delegation_amount\n            .unwrap_or(DEFAULT_MINIMUM_DELEGATION_AMOUNT);\n        let maximum_delegation_amount = self\n            .maximum_delegation_amount\n            .unwrap_or(DEFAULT_MAXIMUM_DELEGATION_AMOUNT);\n        let minimum_bid_amount = self\n            .minimum_bid_amount\n            .unwrap_or(DEFAULT_MINIMUM_BID_AMOUNT);\n        let wasm_config = self.wasm_config.unwrap_or_default();\n        let system_config = self.system_config.unwrap_or_default();\n        let protocol_version = self.protocol_version.unwrap_or(DEFAULT_PROTOCOL_VERSION);\n        let administrative_accounts = {\n            self.administrative_accounts\n                .unwrap_or_default()\n                .iter()\n                .map(PublicKey::to_account_hash)\n                .collect()\n        };\n        let allow_auction_bids = self\n            .allow_auction_bids\n            .unwrap_or(DEFAULT_ALLOW_AUCTION_BIDS);\n        let allow_unrestricted_transfers = self\n            .allow_unrestricted_transfers\n            .unwrap_or(DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS);\n        let refund_handling = self.refund_handling.unwrap_or(DEFAULT_REFUND_HANDLING);\n        let fee_handling = self.fee_handling.unwrap_or(DEFAULT_FEE_HANDLING);\n\n        let strict_argument_checking = self\n            .strict_argument_checking\n            .unwrap_or(DEFAULT_STRICT_ARGUMENT_CHECKING);\n        let vesting_schedule_period_millis = self\n            .vesting_schedule_period_millis\n            .unwrap_or(DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS);\n        let max_delegators_per_validator = self\n            .max_delegators_per_validator\n            .unwrap_or(DEFAULT_MAX_DELEGATORS_PER_VALIDATOR);\n        let compute_rewards = self.compute_rewards.unwrap_or(DEFAULT_COMPUTE_REWARDS);\n        let enable_entity = self.enable_entity.unwrap_or(DEFAULT_ENABLE_ENTITY);\n        let trap_on_ambiguous_entity_version = self\n            .trap_on_ambiguous_entity_version\n            .unwrap_or(DEFAULT_TRAP_ON_AMBIGUOUS_ENTITY_VERSION);\n        let storage_costs = self.storage_costs.unwrap_or_default();\n        let rewards_handling = self.rewards_handling.unwrap_or(RewardsHandling::Standard);\n\n        EngineConfig {\n            max_associated_keys,\n            max_runtime_call_stack_height,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            minimum_bid_amount,\n            wasm_config,\n            system_config,\n            protocol_version,\n            administrative_accounts,\n            allow_auction_bids,\n            allow_unrestricted_transfers,\n            refund_handling,\n            fee_handling,\n            strict_argument_checking,\n            vesting_schedule_period_millis,\n            max_delegators_per_validator,\n            compute_rewards,\n            enable_entity,\n            trap_on_ambiguous_entity_version,\n            rewards_handling,\n            storage_costs,\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/engine_state/error.rs",
    "content": "//! Definition of all the possible outcomes of the operation on an `EngineState` instance.\nuse datasize::DataSize;\nuse thiserror::Error;\n\nuse casper_storage::{system::transfer::TransferError, tracking_copy::TrackingCopyError};\nuse casper_types::{bytesrepr, system::mint, ApiError, Digest, Key, ProtocolVersion};\n\nuse super::InvalidRequest;\nuse crate::{\n    execution::ExecError,\n    runtime::{stack, PreprocessingError},\n};\n\n/// Engine state errors.\n#[derive(Clone, Error, Debug)]\n#[non_exhaustive]\npub enum Error {\n    /// Specified state root hash is not found.\n    #[error(\"Root not found: {0}\")]\n    RootNotFound(Digest),\n    /// Protocol version used in the deploy is invalid.\n    #[error(\"Invalid protocol version: {0}\")]\n    InvalidProtocolVersion(ProtocolVersion),\n    /// WASM preprocessing error.\n    #[error(\"Wasm preprocessing error: {0}\")]\n    WasmPreprocessing(#[from] PreprocessingError),\n    /// Contract execution error.\n    #[error(transparent)]\n    Exec(ExecError),\n    /// Serialization/deserialization error.\n    #[error(\"Bytesrepr error: {0}\")]\n    Bytesrepr(String),\n    /// Mint error.\n    #[error(\"Mint error: {0}\")]\n    Mint(String),\n    /// Invalid key variant.\n    #[error(\"Unsupported key type: {0}\")]\n    InvalidKeyVariant(Key),\n    /// An attempt to push to the runtime stack while already at the maximum height.\n    #[error(\"Runtime stack overflow\")]\n    RuntimeStackOverflow,\n    /// Storage error.\n    #[error(\"Tracking copy error: {0}\")]\n    TrackingCopy(TrackingCopyError),\n    /// Native transfer error.\n    #[error(\"Transfer error: {0}\")]\n    Transfer(TransferError),\n    /// Could not derive a valid item to execute.\n    #[error(\"Invalid executable item: {0}\")]\n    InvalidExecutableItem(#[from] InvalidRequest),\n}\n\nimpl Error {\n    /// Creates an [`enum@Error`] instance of an [`Error::Exec`] variant with an API\n    /// error-compatible object.\n    ///\n    /// This method should be used only by native code that has to mimic logic of a WASM executed\n    /// code.\n    pub fn reverter(api_error: impl Into<ApiError>) -> Error {\n        Error::Exec(ExecError::Revert(api_error.into()))\n    }\n}\n\nimpl From<TransferError> for Error {\n    fn from(err: TransferError) -> Self {\n        Error::Transfer(err)\n    }\n}\n\nimpl From<ExecError> for Error {\n    fn from(error: ExecError) -> Self {\n        match error {\n            ExecError::WasmPreprocessing(preprocessing_error) => {\n                Error::WasmPreprocessing(preprocessing_error)\n            }\n            _ => Error::Exec(error),\n        }\n    }\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(error: bytesrepr::Error) -> Self {\n        Error::Bytesrepr(format!(\"{}\", error))\n    }\n}\n\nimpl From<mint::Error> for Error {\n    fn from(error: mint::Error) -> Self {\n        Error::Mint(format!(\"{}\", error))\n    }\n}\n\nimpl From<stack::RuntimeStackOverflow> for Error {\n    fn from(_: stack::RuntimeStackOverflow) -> Self {\n        Self::RuntimeStackOverflow\n    }\n}\n\nimpl From<TrackingCopyError> for Error {\n    fn from(e: TrackingCopyError) -> Self {\n        Error::TrackingCopy(e)\n    }\n}\n\nimpl DataSize for Error {\n    const IS_DYNAMIC: bool = true;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    #[inline]\n    fn estimate_heap_size(&self) -> usize {\n        12 // TODO: replace with some actual estimation depending on the variant\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/engine_state/execution_kind.rs",
    "content": "//! Units of execution.\n\nuse casper_types::{\n    bytesrepr::Bytes,\n    contracts::{NamedKeys, ProtocolVersionMajor},\n    AddressableEntityHash, EntityVersion, Key, PackageHash, TransactionInvocationTarget,\n};\n\nuse super::{wasm_v1::SessionKind, Error, ExecutableItem};\nuse crate::execution::ExecError;\n\n/// The type of execution about to be performed.\n#[derive(Clone, Debug)]\npub(crate) enum ExecutionKind<'a> {\n    /// Standard (non-specialized) Wasm bytes related to a transaction of version 1 or later.\n    Standard(&'a Bytes),\n    /// Wasm bytes which install or upgrade a stored entity.\n    InstallerUpgrader(&'a Bytes),\n    /// Stored contract.\n    Stored {\n        /// AddressableEntity's hash.\n        entity_hash: AddressableEntityHash,\n        /// Entry point.\n        entry_point: String,\n    },\n    /// Standard (non-specialized) Wasm bytes related to a `Deploy`.\n    ///\n    /// This is equivalent to the `Standard` variant with the exception that this kind will be\n    /// allowed to install or upgrade stored entities to retain existing (pre-node 2.0) behavior.\n    Deploy(&'a Bytes),\n    /// A call to an entity/contract in a package/contract package.\n    VersionedCall {\n        package_hash: PackageHash,\n        entity_version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        /// Entry point.\n        entry_point: String,\n    },\n}\n\nimpl<'a> ExecutionKind<'a> {\n    pub(crate) fn new(\n        named_keys: &NamedKeys,\n        executable_item: &'a ExecutableItem,\n        entry_point: String,\n    ) -> Result<Self, Error> {\n        match executable_item {\n            ExecutableItem::Invocation(target) => {\n                Self::new_direct_invocation(named_keys, target, entry_point)\n            }\n            ExecutableItem::PaymentBytes(module_bytes)\n            | ExecutableItem::SessionBytes {\n                kind: SessionKind::GenericBytecode,\n                module_bytes,\n            } => Ok(ExecutionKind::Standard(module_bytes)),\n            ExecutableItem::SessionBytes {\n                kind: SessionKind::InstallUpgradeBytecode,\n                module_bytes,\n            } => Ok(ExecutionKind::InstallerUpgrader(module_bytes)),\n            ExecutableItem::Deploy(module_bytes) => Ok(ExecutionKind::Deploy(module_bytes)),\n        }\n    }\n\n    fn new_direct_invocation(\n        named_keys: &NamedKeys,\n        target: &TransactionInvocationTarget,\n        entry_point: String,\n    ) -> Result<Self, Error> {\n        let entity_hash = match target {\n            TransactionInvocationTarget::ByHash(addr) => AddressableEntityHash::new(*addr),\n            TransactionInvocationTarget::ByName(alias) => {\n                let entity_key = named_keys\n                    .get(alias)\n                    .ok_or_else(|| Error::Exec(ExecError::NamedKeyNotFound(alias.clone())))?;\n\n                match entity_key {\n                    Key::Hash(hash) => AddressableEntityHash::new(*hash),\n                    Key::AddressableEntity(entity_addr) => {\n                        AddressableEntityHash::new(entity_addr.value())\n                    }\n                    _ => return Err(Error::InvalidKeyVariant(*entity_key)),\n                }\n            }\n            TransactionInvocationTarget::ByPackageHash {\n                addr,\n                version,\n                protocol_version_major,\n            } => {\n                let package_hash = PackageHash::from(*addr);\n                return Ok(Self::VersionedCall {\n                    package_hash,\n                    entity_version: *version,\n                    protocol_version_major: *protocol_version_major,\n                    entry_point,\n                });\n            }\n            TransactionInvocationTarget::ByPackageName {\n                name,\n                version,\n                protocol_version_major,\n            } => {\n                let package_key = named_keys\n                    .get(name)\n                    .ok_or_else(|| Error::Exec(ExecError::NamedKeyNotFound(name.to_string())))?;\n\n                let package_hash = match package_key {\n                    Key::Hash(hash) | Key::SmartContract(hash) => PackageHash::new(*hash),\n                    _ => return Err(Error::InvalidKeyVariant(*package_key)),\n                };\n                return Ok(Self::VersionedCall {\n                    package_hash,\n                    entity_version: *version,\n                    protocol_version_major: *protocol_version_major,\n                    entry_point,\n                });\n            }\n        };\n        Ok(ExecutionKind::Stored {\n            entity_hash,\n            entry_point,\n        })\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/engine_state/mod.rs",
    "content": "//!  This module contains all the execution related code.\npub mod engine_config;\nmod error;\npub(crate) mod execution_kind;\nmod wasm_v1;\n\nuse std::{cell::RefCell, collections::BTreeSet, rc::Rc};\n\nuse casper_types::{\n    account::AccountHash, Gas, InitiatorAddr, Key, Phase, RuntimeArgs, StoredValue, TransactionHash,\n};\n\nuse casper_storage::{\n    global_state::{\n        error::Error as GlobalStateError,\n        state::{StateProvider, StateReader},\n    },\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError},\n    TrackingCopy,\n};\n\nuse crate::{execution::Executor, runtime::RuntimeStack};\npub use engine_config::{\n    EngineConfig, EngineConfigBuilder, DEFAULT_MAX_QUERY_DEPTH,\n    DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT,\n};\npub use error::Error;\nuse execution_kind::ExecutionKind;\npub use wasm_v1::{\n    BlockInfo, ExecutableItem, InvalidRequest, SessionDataDeploy, SessionDataV1, SessionInputData,\n    WasmV1Request, WasmV1Result,\n};\n\n/// Gas/motes conversion rate of wasmless transfer cost is always 1 regardless of what user wants to\n/// pay.\npub const WASMLESS_TRANSFER_FIXED_GAS_PRICE: u8 = 1;\n\n/// The public api of the v1 execution engine, as of protocol version 2.0.0\n#[derive(Debug, Clone, Default)]\npub struct ExecutionEngineV1 {\n    config: EngineConfig,\n}\n\nimpl ExecutionEngineV1 {\n    /// Creates new execution engine.\n    pub fn new(config: EngineConfig) -> ExecutionEngineV1 {\n        ExecutionEngineV1 { config }\n    }\n\n    /// Returns engine config.\n    pub fn config(&self) -> &EngineConfig {\n        &self.config\n    }\n\n    /// Executes wasm, and that's all. Does not commit or handle payment or anything else.\n    pub fn execute(\n        &self,\n        state_provider: &impl StateProvider,\n        wasm_v1_request: WasmV1Request,\n    ) -> WasmV1Result {\n        let WasmV1Request {\n            block_info,\n            transaction_hash,\n            gas_limit,\n            initiator_addr,\n            executable_item,\n            entry_point,\n            args,\n            authorization_keys,\n            phase,\n        } = wasm_v1_request;\n        // NOTE to core engineers: it is intended for the EE to ONLY execute wasm targeting the\n        // casper v1 virtual machine. it should not handle native behavior, database / global state\n        // interaction, payment processing, or anything other than its single function.\n        // A good deal of effort has been put into removing all such behaviors; please do not\n        // come along and start adding it back.\n\n        let account_hash = initiator_addr.account_hash();\n        let protocol_version = self.config.protocol_version();\n        let state_hash = block_info.state_hash;\n        let tc = match state_provider.tracking_copy(state_hash) {\n            Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)),\n            Ok(None) => return WasmV1Result::root_not_found(gas_limit, state_hash),\n            Err(gse) => {\n                return WasmV1Result::precondition_failure(\n                    gas_limit,\n                    Error::TrackingCopy(TrackingCopyError::Storage(gse)),\n                )\n            }\n        };\n        let (runtime_footprint, entity_addr) = {\n            match tc.borrow_mut().authorized_runtime_footprint_by_account(\n                protocol_version,\n                account_hash,\n                &authorization_keys,\n                &self.config().administrative_accounts,\n            ) {\n                Ok((runtime_footprint, entity_hash)) => (runtime_footprint, entity_hash),\n                Err(tce) => {\n                    return WasmV1Result::precondition_failure(gas_limit, Error::TrackingCopy(tce))\n                }\n            }\n        };\n        let mut named_keys = runtime_footprint.named_keys().clone();\n        let execution_kind = match ExecutionKind::new(&named_keys, &executable_item, entry_point) {\n            Ok(execution_kind) => execution_kind,\n            Err(ese) => return WasmV1Result::precondition_failure(gas_limit, ese),\n        };\n        let access_rights = runtime_footprint.extract_access_rights(entity_addr.value());\n        Executor::new(self.config().clone()).exec(\n            execution_kind,\n            args,\n            entity_addr,\n            Rc::new(RefCell::new(runtime_footprint)),\n            &mut named_keys,\n            access_rights,\n            authorization_keys,\n            account_hash,\n            block_info,\n            transaction_hash,\n            gas_limit,\n            Rc::clone(&tc),\n            phase,\n            RuntimeStack::from_account_hash(\n                account_hash,\n                self.config.max_runtime_call_stack_height() as usize,\n            ),\n        )\n    }\n\n    /// Executes wasm, and that's all. Does not commit or handle payment or anything else.\n    #[allow(clippy::too_many_arguments)]\n    pub fn execute_with_tracking_copy<R>(\n        &self,\n        tracking_copy: TrackingCopy<R>,\n        block_info: BlockInfo,\n        transaction_hash: TransactionHash,\n        gas_limit: Gas,\n        initiator_addr: InitiatorAddr,\n        executable_item: ExecutableItem,\n        entry_point: String,\n        args: RuntimeArgs,\n        authorization_keys: BTreeSet<AccountHash>,\n        phase: Phase,\n    ) -> WasmV1Result\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        // NOTE to core engineers: it is intended for the EE to ONLY execute wasm targeting the\n        // casper v1 virtual machine. it should not handle native behavior, database / global state\n        // interaction, payment processing, or anything other than its single function.\n        // A good deal of effort has been put into removing all such behaviors; please do not\n        // come along and start adding it back.\n\n        let account_hash = initiator_addr.account_hash();\n        let protocol_version = self.config.protocol_version();\n        let tc = Rc::new(RefCell::new(tracking_copy));\n        let (runtime_footprint, entity_addr) = {\n            match tc.borrow_mut().authorized_runtime_footprint_by_account(\n                protocol_version,\n                account_hash,\n                &authorization_keys,\n                &self.config().administrative_accounts,\n            ) {\n                Ok((addressable_entity, entity_hash)) => (addressable_entity, entity_hash),\n                Err(tce) => {\n                    return WasmV1Result::precondition_failure(gas_limit, Error::TrackingCopy(tce))\n                }\n            }\n        };\n        let mut named_keys = runtime_footprint.named_keys().clone();\n        let execution_kind = match ExecutionKind::new(&named_keys, &executable_item, entry_point) {\n            Ok(execution_kind) => execution_kind,\n            Err(ese) => return WasmV1Result::precondition_failure(gas_limit, ese),\n        };\n        let access_rights = runtime_footprint.extract_access_rights(entity_addr.value());\n        Executor::new(self.config().clone()).exec(\n            execution_kind,\n            args,\n            entity_addr,\n            Rc::new(RefCell::new(runtime_footprint)),\n            &mut named_keys,\n            access_rights,\n            authorization_keys,\n            account_hash,\n            block_info,\n            transaction_hash,\n            gas_limit,\n            Rc::clone(&tc),\n            phase,\n            RuntimeStack::from_account_hash(\n                account_hash,\n                self.config.max_runtime_call_stack_height() as usize,\n            ),\n        )\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/engine_state/wasm_v1.rs",
    "content": "use std::{collections::BTreeSet, convert::TryFrom};\n\nuse serde::Serialize;\nuse thiserror::Error;\n\nuse casper_storage::{data_access_layer::TransferResult, tracking_copy::TrackingCopyCache};\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::Bytes,\n    contract_messages::Messages,\n    execution::{Effects, TransformKindV2},\n    BlockHash, BlockTime, CLValue, DeployHash, Digest, ExecutableDeployItem, Gas, InitiatorAddr,\n    Key, Phase, PricingMode, ProtocolVersion, RuntimeArgs, TransactionEntryPoint, TransactionHash,\n    TransactionInvocationTarget, TransactionTarget, TransactionV1Hash, Transfer, URefAddr, U512,\n};\n\nuse crate::engine_state::Error as EngineError;\n\nconst DEFAULT_ENTRY_POINT: &str = \"call\";\n\n/// Structure that needs to be filled with data so the engine can assemble wasm for deploy.\npub struct SessionDataDeploy<'a> {\n    deploy_hash: &'a DeployHash,\n    session: &'a ExecutableDeployItem,\n    initiator_addr: &'a InitiatorAddr,\n    signers: BTreeSet<AccountHash>,\n    is_standard_payment: bool,\n}\n\nimpl<'a> SessionDataDeploy<'a> {\n    /// Constructor\n    pub fn new(\n        deploy_hash: &'a DeployHash,\n        session: &'a ExecutableDeployItem,\n        initiator_addr: &'a InitiatorAddr,\n        signers: BTreeSet<AccountHash>,\n        is_standard_payment: bool,\n    ) -> Self {\n        Self {\n            deploy_hash,\n            session,\n            initiator_addr,\n            signers,\n            is_standard_payment,\n        }\n    }\n\n    /// Deploy hash of the deploy\n    pub fn deploy_hash(&self) -> &DeployHash {\n        self.deploy_hash\n    }\n\n    /// executable item of the deploy\n    pub fn session(&self) -> &ExecutableDeployItem {\n        self.session\n    }\n\n    /// initiator address of the deploy\n    pub fn initiator_addr(&self) -> &InitiatorAddr {\n        self.initiator_addr\n    }\n\n    /// signers of the deploy\n    pub fn signers(&self) -> BTreeSet<AccountHash> {\n        self.signers.clone()\n    }\n}\n\n/// Structure that needs to be filled with data so the engine can assemble wasm for v1.\npub struct SessionDataV1<'a> {\n    args: &'a RuntimeArgs,\n    target: &'a TransactionTarget,\n    entry_point: &'a TransactionEntryPoint,\n    is_install_upgrade: bool,\n    hash: &'a TransactionV1Hash,\n    pricing_mode: &'a PricingMode,\n    initiator_addr: &'a InitiatorAddr,\n    signers: BTreeSet<AccountHash>,\n    is_standard_payment: bool,\n}\n\nimpl<'a> SessionDataV1<'a> {\n    #[allow(clippy::too_many_arguments)]\n    /// Constructor\n    pub fn new(\n        args: &'a RuntimeArgs,\n        target: &'a TransactionTarget,\n        entry_point: &'a TransactionEntryPoint,\n        is_install_upgrade: bool,\n        hash: &'a TransactionV1Hash,\n        pricing_mode: &'a PricingMode,\n        initiator_addr: &'a InitiatorAddr,\n        signers: BTreeSet<AccountHash>,\n        is_standard_payment: bool,\n    ) -> Self {\n        Self {\n            args,\n            target,\n            entry_point,\n            is_install_upgrade,\n            hash,\n            pricing_mode,\n            initiator_addr,\n            signers,\n            is_standard_payment,\n        }\n    }\n\n    /// Runtime args passed with the transaction.\n    pub fn args(&self) -> &RuntimeArgs {\n        self.args\n    }\n\n    /// Target of the transaction.\n    pub fn target(&self) -> &TransactionTarget {\n        self.target\n    }\n\n    /// Entry point of the transaction\n    pub fn entry_point(&self) -> &TransactionEntryPoint {\n        self.entry_point\n    }\n\n    /// Should session be allowed to perform install/upgrade operations\n    pub fn is_install_upgrade(&self) -> bool {\n        self.is_install_upgrade\n    }\n\n    /// Hash of the transaction\n    pub fn hash(&self) -> &TransactionV1Hash {\n        self.hash\n    }\n\n    /// initiator address of the transaction\n    pub fn initiator_addr(&self) -> &InitiatorAddr {\n        self.initiator_addr\n    }\n\n    /// signers of the transaction\n    pub fn signers(&self) -> BTreeSet<AccountHash> {\n        self.signers.clone()\n    }\n\n    /// Pricing mode of the transaction\n    pub fn pricing_mode(&self) -> &PricingMode {\n        self.pricing_mode\n    }\n}\n\n/// Wrapper enum abstracting data for assmbling WasmV1Requests\npub enum SessionInputData<'a> {\n    /// Variant for sessions created from deploy transactions\n    DeploySessionData {\n        /// Deploy session data\n        data: SessionDataDeploy<'a>,\n    },\n    /// Variant for sessions created from v1 transactions\n    SessionDataV1 {\n        /// v1 session data\n        data: SessionDataV1<'a>,\n    },\n}\n\nimpl SessionInputData<'_> {\n    /// Transaction hash for the session\n    pub fn transaction_hash(&self) -> TransactionHash {\n        match self {\n            SessionInputData::DeploySessionData { data } => {\n                TransactionHash::Deploy(*data.deploy_hash())\n            }\n            SessionInputData::SessionDataV1 { data } => TransactionHash::V1(*data.hash()),\n        }\n    }\n\n    /// Initiator address for the session\n    pub fn initiator_addr(&self) -> &InitiatorAddr {\n        match self {\n            SessionInputData::DeploySessionData { data } => data.initiator_addr(),\n            SessionInputData::SessionDataV1 { data } => data.initiator_addr(),\n        }\n    }\n\n    /// Signers for the session\n    pub fn signers(&self) -> BTreeSet<AccountHash> {\n        match self {\n            SessionInputData::DeploySessionData { data } => data.signers(),\n            SessionInputData::SessionDataV1 { data } => data.signers(),\n        }\n    }\n\n    /// determines if the transaction from which this session data was created is a standard payment\n    pub fn is_standard_payment(&self) -> bool {\n        match self {\n            SessionInputData::DeploySessionData { data } => data.is_standard_payment,\n            SessionInputData::SessionDataV1 { data } => data.is_standard_payment,\n        }\n    }\n\n    /// Is install upgrade allowed?\n    pub fn is_install_upgrade_allowed(&self) -> bool {\n        match self {\n            SessionInputData::DeploySessionData { .. } => true,\n            SessionInputData::SessionDataV1 { data } => data.is_install_upgrade,\n        }\n    }\n}\n\n/// Error returned if constructing a new [`WasmV1Request`] fails.\n#[derive(Clone, Eq, PartialEq, Error, Serialize, Debug)]\npub enum InvalidRequest {\n    /// Missing custom payment.\n    #[error(\"custom payment not found for {0}\")]\n    CustomPaymentNotFound(TransactionHash),\n    /// Unexpected variant.\n    #[error(\"unexpected variant for {0} attempting {1}\")]\n    UnexpectedVariant(TransactionHash, String),\n    /// Unsupported mode.\n    #[error(\"unsupported mode for {0} attempting {1}\")]\n    UnsupportedMode(TransactionHash, String),\n    /// Invalid entry point.\n    #[error(\"invalid entry point for {0} attempting {1}\")]\n    InvalidEntryPoint(TransactionHash, String),\n    /// Invalid target.\n    #[error(\"invalid target for {0} attempting {1}\")]\n    InvalidTarget(TransactionHash, String),\n    /// Unsupported category.\n    #[error(\"invalid category for {0} attempting {1}\")]\n    InvalidCategory(TransactionHash, String),\n}\n\n#[derive(Debug, Clone)]\npub enum SessionKind {\n    InstallUpgradeBytecode,\n    GenericBytecode,\n}\n\n/// The item to be executed.\n#[derive(Debug, Clone)]\npub enum ExecutableItem {\n    /// Deploy model byte code.\n    Deploy(Bytes),\n    /// Payment byte code.\n    PaymentBytes(Bytes),\n    /// Session byte code.\n    SessionBytes {\n        /// The kind of session.\n        kind: SessionKind,\n        /// The compiled Wasm.\n        module_bytes: Bytes,\n    },\n    /// An attempt to invoke a stored entity or package.\n    Invocation(TransactionInvocationTarget),\n}\n\nimpl ExecutableItem {\n    /// Is install upgrade allowed?\n    pub fn is_install_upgrade_allowed(&self) -> bool {\n        match self {\n            ExecutableItem::Deploy(_) => true,\n            ExecutableItem::PaymentBytes(_) | ExecutableItem::Invocation(_) => false,\n            ExecutableItem::SessionBytes { kind, .. } => {\n                matches!(kind, SessionKind::InstallUpgradeBytecode)\n            }\n        }\n    }\n}\n\n/// Block info.\n#[derive(Copy, Clone, Debug)]\npub struct BlockInfo {\n    /// State root hash of the global state in which the transaction will be executed.\n    pub state_hash: Digest,\n    /// Block time represented as a unix timestamp.\n    pub block_time: BlockTime,\n    /// Parent block hash\n    pub parent_block_hash: BlockHash,\n    /// Block height\n    pub block_height: u64,\n    /// Protocol version\n    pub protocol_version: ProtocolVersion,\n}\n\nimpl BlockInfo {\n    /// A new instance of `[BlockInfo]`.\n    pub fn new(\n        state_hash: Digest,\n        block_time: BlockTime,\n        parent_block_hash: BlockHash,\n        block_height: u64,\n        protocol_version: ProtocolVersion,\n    ) -> Self {\n        BlockInfo {\n            state_hash,\n            block_time,\n            parent_block_hash,\n            block_height,\n            protocol_version,\n        }\n    }\n\n    /// Apply different state hash.\n    pub fn with_state_hash(&mut self, state_hash: Digest) {\n        self.state_hash = state_hash;\n    }\n\n    /// State hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Block time.\n    pub fn block_time(&self) -> BlockTime {\n        self.block_time\n    }\n\n    /// Parent block hash.\n    pub fn parent_block_hash(&self) -> BlockHash {\n        self.parent_block_hash\n    }\n\n    /// Block height.\n    pub fn block_height(&self) -> u64 {\n        self.block_height\n    }\n\n    /// Protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n}\n\n/// A request to execute the given Wasm on the V1 runtime.\n#[derive(Debug)]\npub struct WasmV1Request {\n    /// Block info.\n    pub block_info: BlockInfo,\n    /// The hash identifying the transaction.\n    pub transaction_hash: TransactionHash,\n    /// The number of Motes per unit of Gas to be paid for execution.\n    pub gas_limit: Gas,\n    /// The transaction's initiator.\n    pub initiator_addr: InitiatorAddr,\n    /// The executable item.\n    pub executable_item: ExecutableItem,\n    /// The entry point to call when executing.\n    pub entry_point: String,\n    /// The runtime args.\n    pub args: RuntimeArgs,\n    /// The account hashes of the signers of the transaction.\n    pub authorization_keys: BTreeSet<AccountHash>,\n    /// Execution phase.\n    pub phase: Phase,\n}\n\nimpl WasmV1Request {\n    /// New from executable deploy item or InvalidRequest error.\n    pub fn new_from_executable_deploy_item(\n        block_info: BlockInfo,\n        gas_limit: Gas,\n        transaction_hash: TransactionHash,\n        initiator_addr: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        session_item: &ExecutableDeployItem,\n    ) -> Result<Self, InvalidRequest> {\n        let executable_info =\n            build_session_info_for_executable_item(session_item, transaction_hash)?;\n        Ok(Self::new_from_executable_info(\n            block_info,\n            gas_limit,\n            transaction_hash,\n            initiator_addr,\n            authorization_keys,\n            executable_info,\n        ))\n    }\n\n    /// New payment from executable deploy item or InvalidRequest error.\n    pub fn new_payment_from_executable_deploy_item(\n        block_info: BlockInfo,\n        gas_limit: Gas,\n        transaction_hash: TransactionHash,\n        initiator_addr: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        payment_item: &ExecutableDeployItem,\n    ) -> Result<Self, InvalidRequest> {\n        let executable_info =\n            build_payment_info_for_executable_item(payment_item, transaction_hash)?;\n        Ok(Self::new_from_executable_info(\n            block_info,\n            gas_limit,\n            transaction_hash,\n            initiator_addr,\n            authorization_keys,\n            executable_info,\n        ))\n    }\n\n    pub(crate) fn new_from_executable_info(\n        block_info: BlockInfo,\n        gas_limit: Gas,\n        transaction_hash: TransactionHash,\n        initiator_addr: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        executable_info: impl Executable,\n    ) -> Self {\n        let executable_item = executable_info.item();\n        Self {\n            block_info,\n            transaction_hash,\n            gas_limit,\n            initiator_addr,\n            authorization_keys,\n            executable_item,\n            entry_point: executable_info.entry_point().clone(),\n            args: executable_info.args().clone(),\n            phase: executable_info.phase(),\n        }\n    }\n\n    /// Creates a new request from a transaction for use as the session code.\n    pub fn new_session(\n        block_info: BlockInfo,\n        gas_limit: Gas,\n        session_input_data: &SessionInputData,\n    ) -> Result<Self, InvalidRequest> {\n        let session_info = SessionInfo::try_from(session_input_data)?;\n        let transaction_hash = session_input_data.transaction_hash();\n        let initiator_addr = session_input_data.initiator_addr().clone();\n        let authorization_keys = session_input_data.signers().clone();\n        Ok(WasmV1Request::new_from_executable_info(\n            block_info,\n            gas_limit,\n            transaction_hash,\n            initiator_addr,\n            authorization_keys,\n            session_info,\n        ))\n    }\n\n    /// Creates a new request from a transaction for use as custom payment.\n    pub fn new_custom_payment(\n        block_info: BlockInfo,\n        gas_limit: Gas,\n        session_input_data: &SessionInputData,\n    ) -> Result<Self, InvalidRequest> {\n        let payment_info = PaymentInfo::try_from(session_input_data)?;\n        let transaction_hash = session_input_data.transaction_hash();\n        let initiator_addr = session_input_data.initiator_addr().clone();\n        let authorization_keys = session_input_data.signers().clone();\n        Ok(WasmV1Request::new_from_executable_info(\n            block_info,\n            gas_limit,\n            transaction_hash,\n            initiator_addr,\n            authorization_keys,\n            payment_info,\n        ))\n    }\n}\n\n/// Wasm v1 result.\n#[derive(Clone, Debug)]\npub struct WasmV1Result {\n    /// List of transfers that happened during execution.\n    transfers: Vec<Transfer>,\n    /// Gas limit.\n    limit: Gas,\n    /// Gas consumed.\n    consumed: Gas,\n    /// Execution effects.\n    effects: Effects,\n    /// Messages emitted during execution.\n    messages: Messages,\n    /// Did the wasm execute successfully?\n    error: Option<EngineError>,\n    /// Result captured from a ret call.\n    ret: Option<CLValue>,\n    /// Tracking copy cache captured during execution.\n    cache: Option<TrackingCopyCache>,\n}\n\nimpl WasmV1Result {\n    /// Creates a new instance.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        limit: Gas,\n        consumed: Gas,\n        effects: Effects,\n        transfers: Vec<Transfer>,\n        messages: Messages,\n        error: Option<EngineError>,\n        ret: Option<CLValue>,\n        cache: Option<TrackingCopyCache>,\n    ) -> Self {\n        WasmV1Result {\n            limit,\n            consumed,\n            effects,\n            transfers,\n            messages,\n            error,\n            ret,\n            cache,\n        }\n    }\n\n    /// Error, if any.\n    pub fn error(&self) -> Option<&EngineError> {\n        self.error.as_ref()\n    }\n\n    /// List of transfers that happened during execution.\n    pub fn transfers(&self) -> &Vec<Transfer> {\n        &self.transfers\n    }\n\n    /// Gas limit.\n    pub fn limit(&self) -> Gas {\n        self.limit\n    }\n\n    /// Gas consumed.\n    pub fn consumed(&self) -> Gas {\n        self.consumed\n    }\n\n    /// Execution effects.\n    pub fn effects(&self) -> &Effects {\n        &self.effects\n    }\n\n    /// Tracking copy cache captured during execution.\n    pub fn cache(&self) -> Option<&TrackingCopyCache> {\n        self.cache.as_ref()\n    }\n\n    /// Messages emitted during execution.\n    pub fn messages(&self) -> &Messages {\n        &self.messages\n    }\n\n    /// Result captured from a ret call.\n    pub fn ret(&self) -> Option<&CLValue> {\n        self.ret.as_ref()\n    }\n\n    /// Root not found.\n    pub fn root_not_found(gas_limit: Gas, state_hash: Digest) -> Self {\n        WasmV1Result {\n            transfers: Vec::default(),\n            effects: Effects::new(),\n            messages: Vec::default(),\n            limit: gas_limit,\n            consumed: Gas::zero(),\n            error: Some(EngineError::RootNotFound(state_hash)),\n            ret: None,\n            cache: None,\n        }\n    }\n\n    /// Precondition failure.\n    pub fn precondition_failure(gas_limit: Gas, error: EngineError) -> Self {\n        WasmV1Result {\n            transfers: Vec::default(),\n            effects: Effects::new(),\n            messages: Vec::default(),\n            limit: gas_limit,\n            consumed: Gas::zero(),\n            error: Some(error),\n            ret: None,\n            cache: None,\n        }\n    }\n\n    /// Failed to transform transaction into an executable item.\n    pub fn invalid_executable_item(gas_limit: Gas, error: InvalidRequest) -> Self {\n        WasmV1Result {\n            transfers: Vec::default(),\n            effects: Effects::new(),\n            messages: Vec::default(),\n            limit: gas_limit,\n            consumed: Gas::zero(),\n            error: Some(EngineError::InvalidExecutableItem(error)),\n            ret: None,\n            cache: None,\n        }\n    }\n\n    /// Returns `true` if this is a precondition failure.\n    ///\n    /// Precondition variant is further described as an execution failure which does not have any\n    /// effects, and has a gas cost of 0.\n    pub fn has_precondition_failure(&self) -> bool {\n        self.error.is_some() && self.consumed == Gas::zero() && self.effects.is_empty()\n    }\n\n    /// Converts a transfer result to an execution result.\n    pub fn from_transfer_result(transfer_result: TransferResult, consumed: Gas) -> Option<Self> {\n        // NOTE: for native / wasmless operations limit and consumed are always equal, and\n        // we can get away with simplifying to one or the other here.\n        // this is NOT true of wasm based operations however.\n        match transfer_result {\n            TransferResult::RootNotFound => None,\n            TransferResult::Success {\n                transfers,\n                effects,\n                cache,\n            } => Some(WasmV1Result {\n                transfers,\n                limit: consumed,\n                consumed,\n                effects,\n                messages: Messages::default(),\n                error: None,\n                ret: None,\n                cache: Some(cache),\n            }),\n            TransferResult::Failure(te) => {\n                Some(WasmV1Result {\n                    transfers: vec![],\n                    limit: consumed,\n                    consumed,\n                    effects: Effects::default(), // currently not returning effects on failure\n                    messages: Messages::default(),\n                    error: Some(EngineError::Transfer(te)),\n                    ret: None,\n                    cache: None,\n                })\n            }\n        }\n    }\n\n    /// Checks effects for an AddUInt512 transform to a balance at imputed addr\n    /// and for exactly the imputed amount.\n    pub fn balance_increased_by_amount(&self, addr: URefAddr, amount: U512) -> bool {\n        if self.effects.is_empty() || self.effects.transforms().is_empty() {\n            return false;\n        }\n\n        let key = Key::Balance(addr);\n        if let Some(transform) = self.effects.transforms().iter().find(|x| x.key() == &key) {\n            if let TransformKindV2::AddUInt512(added) = transform.kind() {\n                return *added == amount;\n            }\n        }\n        false\n    }\n}\n\n/// Helper struct to carry item, entry_point, and arg info for a `WasmV1Request`.\nstruct ExecutableInfo {\n    item: ExecutableItem,\n    entry_point: String,\n    args: RuntimeArgs,\n}\n\npub(crate) trait Executable {\n    fn item(&self) -> ExecutableItem;\n    fn entry_point(&self) -> &String;\n    fn args(&self) -> &RuntimeArgs;\n    fn phase(&self) -> Phase;\n}\n\n/// New type for hanging session specific impl's off of.\nstruct SessionInfo(ExecutableInfo);\n\nimpl Executable for SessionInfo {\n    fn item(&self) -> ExecutableItem {\n        self.0.item.clone()\n    }\n\n    fn entry_point(&self) -> &String {\n        &self.0.entry_point\n    }\n\n    fn args(&self) -> &RuntimeArgs {\n        &self.0.args\n    }\n\n    fn phase(&self) -> Phase {\n        Phase::Session\n    }\n}\n\nimpl TryFrom<&SessionInputData<'_>> for PaymentInfo {\n    type Error = InvalidRequest;\n\n    fn try_from(input_data: &SessionInputData) -> Result<Self, Self::Error> {\n        match input_data {\n            SessionInputData::DeploySessionData { data } => PaymentInfo::try_from(data),\n            SessionInputData::SessionDataV1 { data } => PaymentInfo::try_from(data),\n        }\n    }\n}\n\nimpl TryFrom<&SessionInputData<'_>> for SessionInfo {\n    type Error = InvalidRequest;\n\n    fn try_from(input_data: &SessionInputData) -> Result<Self, Self::Error> {\n        match input_data {\n            SessionInputData::DeploySessionData { data } => SessionInfo::try_from(data),\n            SessionInputData::SessionDataV1 { data } => SessionInfo::try_from(data),\n        }\n    }\n}\n\nimpl TryFrom<&SessionDataDeploy<'_>> for SessionInfo {\n    type Error = InvalidRequest;\n\n    fn try_from(deploy_data: &SessionDataDeploy) -> Result<Self, Self::Error> {\n        let transaction_hash = TransactionHash::Deploy(*deploy_data.deploy_hash());\n        let session_item = deploy_data.session();\n        build_session_info_for_executable_item(session_item, transaction_hash)\n    }\n}\n\nfn build_session_info_for_executable_item(\n    session_item: &ExecutableDeployItem,\n    transaction_hash: TransactionHash,\n) -> Result<SessionInfo, InvalidRequest> {\n    let session: ExecutableItem;\n    let session_entry_point: String;\n    let session_args: RuntimeArgs;\n    match session_item {\n        ExecutableDeployItem::ModuleBytes { module_bytes, args } => {\n            session = ExecutableItem::Deploy(module_bytes.clone());\n            session_entry_point = DEFAULT_ENTRY_POINT.to_string();\n            session_args = args.clone();\n        }\n        ExecutableDeployItem::StoredContractByHash {\n            hash,\n            entry_point,\n            args,\n        } => {\n            session = ExecutableItem::Invocation(\n                TransactionInvocationTarget::new_invocable_entity((*hash).into()),\n            );\n            session_entry_point = entry_point.clone();\n            session_args = args.clone();\n        }\n        ExecutableDeployItem::StoredContractByName {\n            name,\n            entry_point,\n            args,\n        } => {\n            session = ExecutableItem::Invocation(\n                TransactionInvocationTarget::new_invocable_entity_alias(name.clone()),\n            );\n            session_entry_point = entry_point.clone();\n            session_args = args.clone();\n        }\n        ExecutableDeployItem::StoredVersionedContractByHash {\n            hash,\n            version,\n            entry_point,\n            args,\n        } => {\n            session = ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageHash {\n                addr: hash.value(),\n                version: *version,\n                protocol_version_major: None,\n            });\n            session_entry_point = entry_point.clone();\n            session_args = args.clone();\n        }\n        ExecutableDeployItem::StoredVersionedContractByName {\n            name,\n            version,\n            entry_point,\n            args,\n        } => {\n            session = ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageName {\n                name: name.to_owned(),\n                version: *version,\n                protocol_version_major: None,\n            });\n            session_entry_point = entry_point.clone();\n            session_args = args.clone();\n        }\n        ExecutableDeployItem::Transfer { .. } => {\n            return Err(InvalidRequest::UnsupportedMode(\n                transaction_hash,\n                session_item.to_string(),\n            ));\n        }\n    }\n\n    Ok(SessionInfo(ExecutableInfo {\n        item: session,\n        entry_point: session_entry_point,\n        args: session_args,\n    }))\n}\n\nimpl TryFrom<&SessionDataV1<'_>> for SessionInfo {\n    type Error = InvalidRequest;\n\n    fn try_from(v1_txn: &SessionDataV1) -> Result<Self, Self::Error> {\n        let transaction_hash = TransactionHash::V1(*v1_txn.hash());\n        let args = v1_txn.args().clone();\n        let session = match v1_txn.target() {\n            TransactionTarget::Native => {\n                return Err(InvalidRequest::InvalidTarget(\n                    transaction_hash,\n                    v1_txn.target().to_string(),\n                ));\n            }\n            TransactionTarget::Stored { id, .. } => {\n                let TransactionEntryPoint::Custom(entry_point) = v1_txn.entry_point() else {\n                    return Err(InvalidRequest::InvalidEntryPoint(\n                        transaction_hash,\n                        v1_txn.entry_point().to_string(),\n                    ));\n                };\n                let item = ExecutableItem::Invocation(id.clone());\n                ExecutableInfo {\n                    item,\n                    entry_point: entry_point.clone(),\n                    args,\n                }\n            }\n            TransactionTarget::Session { module_bytes, .. } => {\n                if *v1_txn.entry_point() != TransactionEntryPoint::Call {\n                    return Err(InvalidRequest::InvalidEntryPoint(\n                        transaction_hash,\n                        v1_txn.entry_point().to_string(),\n                    ));\n                };\n                let kind = if v1_txn.is_install_upgrade() {\n                    SessionKind::InstallUpgradeBytecode\n                } else {\n                    SessionKind::GenericBytecode\n                };\n                let item = ExecutableItem::SessionBytes {\n                    kind,\n                    module_bytes: module_bytes.clone(),\n                };\n                ExecutableInfo {\n                    item,\n                    entry_point: DEFAULT_ENTRY_POINT.to_owned(),\n                    args,\n                }\n            }\n        };\n\n        Ok(SessionInfo(session))\n    }\n}\n/// New type for hanging payment specific impl's off of.\nstruct PaymentInfo(ExecutableInfo);\n\nimpl Executable for PaymentInfo {\n    fn item(&self) -> ExecutableItem {\n        self.0.item.clone()\n    }\n\n    fn entry_point(&self) -> &String {\n        &self.0.entry_point\n    }\n\n    fn args(&self) -> &RuntimeArgs {\n        &self.0.args\n    }\n\n    fn phase(&self) -> Phase {\n        Phase::Payment\n    }\n}\n\nimpl TryFrom<&SessionDataDeploy<'_>> for PaymentInfo {\n    type Error = InvalidRequest;\n\n    fn try_from(deploy_data: &SessionDataDeploy) -> Result<Self, Self::Error> {\n        let payment_item = deploy_data.session();\n        let transaction_hash = TransactionHash::Deploy(*deploy_data.deploy_hash());\n        build_payment_info_for_executable_item(payment_item, transaction_hash)\n    }\n}\n\nfn build_payment_info_for_executable_item(\n    payment_item: &ExecutableDeployItem,\n    transaction_hash: TransactionHash,\n) -> Result<PaymentInfo, InvalidRequest> {\n    match payment_item {\n        ExecutableDeployItem::ModuleBytes { module_bytes, args } => {\n            let payment = if module_bytes.is_empty() {\n                return Err(InvalidRequest::UnsupportedMode(\n                    transaction_hash,\n                    \"standard payment is no longer handled by the execution engine\".to_string(),\n                ));\n            } else {\n                ExecutableItem::PaymentBytes(module_bytes.clone())\n            };\n            Ok(PaymentInfo(ExecutableInfo {\n                item: payment,\n                entry_point: DEFAULT_ENTRY_POINT.to_string(),\n                args: args.clone(),\n            }))\n        }\n        ExecutableDeployItem::StoredContractByHash {\n            hash,\n            args,\n            entry_point,\n        } => Ok(PaymentInfo(ExecutableInfo {\n            item: ExecutableItem::Invocation(TransactionInvocationTarget::ByHash(hash.value())),\n            entry_point: entry_point.clone(),\n            args: args.clone(),\n        })),\n        ExecutableDeployItem::StoredContractByName {\n            name,\n            args,\n            entry_point,\n        } => Ok(PaymentInfo(ExecutableInfo {\n            item: ExecutableItem::Invocation(TransactionInvocationTarget::ByName(name.clone())),\n            entry_point: entry_point.clone(),\n            args: args.clone(),\n        })),\n        ExecutableDeployItem::StoredVersionedContractByHash {\n            args,\n            hash,\n            version,\n            entry_point,\n        } => Ok(PaymentInfo(ExecutableInfo {\n            item: ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageHash {\n                addr: hash.value(),\n                version: *version,\n                protocol_version_major: None,\n            }),\n            entry_point: entry_point.clone(),\n            args: args.clone(),\n        })),\n        ExecutableDeployItem::StoredVersionedContractByName {\n            name,\n            version,\n            args,\n            entry_point,\n        } => Ok(PaymentInfo(ExecutableInfo {\n            item: ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageName {\n                name: name.clone(),\n                version: *version,\n                protocol_version_major: None,\n            }),\n            entry_point: entry_point.clone(),\n            args: args.clone(),\n        })),\n        ExecutableDeployItem::Transfer { .. } => Err(InvalidRequest::UnexpectedVariant(\n            transaction_hash,\n            \"payment item\".to_string(),\n        )),\n    }\n}\n\nimpl TryFrom<&SessionDataV1<'_>> for PaymentInfo {\n    type Error = InvalidRequest;\n\n    fn try_from(v1_txn: &SessionDataV1) -> Result<Self, Self::Error> {\n        let transaction_hash = TransactionHash::V1(*v1_txn.hash());\n        match v1_txn.pricing_mode() {\n            mode @ PricingMode::PaymentLimited {\n                standard_payment, ..\n            } => {\n                if *standard_payment {\n                    return Err(InvalidRequest::UnsupportedMode(\n                        transaction_hash,\n                        mode.to_string(),\n                    ));\n                }\n            }\n            mode @ PricingMode::Fixed { .. } | mode @ PricingMode::Prepaid { .. } => {\n                return Err(InvalidRequest::UnsupportedMode(\n                    transaction_hash,\n                    mode.to_string(),\n                ));\n            }\n        };\n\n        let payment = match v1_txn.target() {\n            TransactionTarget::Session { module_bytes, .. } => {\n                if *v1_txn.entry_point() != TransactionEntryPoint::Call {\n                    return Err(InvalidRequest::InvalidEntryPoint(\n                        transaction_hash,\n                        v1_txn.entry_point().to_string(),\n                    ));\n                };\n                let item = ExecutableItem::PaymentBytes(module_bytes.clone());\n                ExecutableInfo {\n                    item,\n                    entry_point: DEFAULT_ENTRY_POINT.to_owned(),\n                    args: v1_txn.args().clone(),\n                }\n            }\n            TransactionTarget::Native | TransactionTarget::Stored { .. } => {\n                return Err(InvalidRequest::InvalidTarget(\n                    transaction_hash,\n                    v1_txn.target().to_string(),\n                ));\n            }\n        };\n\n        Ok(PaymentInfo(payment))\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/execution/error.rs",
    "content": "//! Execution error and supporting code.\nuse std::str::Utf8Error;\nuse thiserror::Error;\n\nuse casper_storage::{global_state, tracking_copy::TrackingCopyError};\n\nuse casper_types::{\n    account::{AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure},\n    bytesrepr,\n    execution::TransformError,\n    system, AccessRights, AddressableEntityHash, ApiError, ByteCodeHash, CLType, CLValueError,\n    ContractRuntimeTag, EntityVersionKey, Key, PackageHash, StoredValueTypeMismatch, URef,\n};\nuse casper_wasm::elements;\n\nuse crate::{\n    resolvers::error::ResolverError,\n    runtime::{stack, PreprocessingError},\n};\n\n/// Possible execution errors.\n#[derive(Error, Debug, Clone)]\n#[non_exhaustive]\npub enum Error {\n    /// WASM interpreter error.\n    #[error(\"Interpreter error: {}\", _0)]\n    Interpreter(String),\n    /// Storage error.\n    #[error(\"Storage error: {}\", _0)]\n    Storage(global_state::error::Error),\n    /// Failed to (de)serialize bytes.\n    #[error(\"Serialization error: {}\", _0)]\n    BytesRepr(bytesrepr::Error),\n    /// Unable to find named key.\n    #[error(\"Named key {} not found\", _0)]\n    NamedKeyNotFound(String),\n    /// Unable to find a key.\n    #[error(\"Key {} not found\", _0)]\n    KeyNotFound(Key),\n    /// Unable to find an account.\n    #[error(\"Account {:?} not found\", _0)]\n    AccountNotFound(Key),\n    /// Type mismatch error.\n    #[error(\"{}\", _0)]\n    TypeMismatch(StoredValueTypeMismatch),\n    /// Invalid access.\n    #[error(\"Invalid access rights: {}\", required)]\n    InvalidAccess {\n        /// Required access rights of the operation.\n        required: AccessRights,\n    },\n    /// Forged reference error.\n    #[error(\"Forged reference: {}\", _0)]\n    ForgedReference(URef),\n    /// Unable to find a function.\n    #[error(\"Function not found: {}\", _0)]\n    FunctionNotFound(String),\n    /// Parity WASM error.\n    #[error(\"{}\", _0)]\n    ParityWasm(elements::Error),\n    /// Error optimizing WASM.\n    #[error(\"WASM optimizer error\")]\n    WasmOptimizer,\n    /// Execution exceeded the gas limit.\n    #[error(\"Out of gas error\")]\n    GasLimit,\n    /// A stored smart contract called a ret function.\n    #[error(\"Return\")]\n    Ret(Vec<URef>),\n    /// Error using WASM host function resolver.\n    #[error(\"Resolver error: {}\", _0)]\n    Resolver(ResolverError),\n    /// Reverts execution with a provided status\n    #[error(\"{}\", _0)]\n    Revert(ApiError),\n    /// Error adding an associated key.\n    #[error(\"{}\", _0)]\n    AddKeyFailure(AddKeyFailure),\n    /// Error removing an associated key.\n    #[error(\"{}\", _0)]\n    RemoveKeyFailure(RemoveKeyFailure),\n    /// Error updating an associated key.\n    #[error(\"{}\", _0)]\n    UpdateKeyFailure(UpdateKeyFailure),\n    /// Error setting threshold on associated key.\n    #[error(\"{}\", _0)]\n    SetThresholdFailure(SetThresholdFailure),\n    /// Error executing system contract.\n    #[error(\"{}\", _0)]\n    SystemContract(system::Error),\n    /// Weight of all used associated keys does not meet account's deploy threshold.\n    #[error(\"Deployment authorization failure\")]\n    DeploymentAuthorizationFailure,\n    /// Host buffer expected a value to be present.\n    #[error(\"Expected return value\")]\n    ExpectedReturnValue,\n    /// Error calling a host function in a wrong context.\n    #[error(\"Invalid context\")]\n    InvalidContext,\n    /// Unable to execute a deploy with invalid major protocol version.\n    #[error(\"Incompatible protocol major version. Expected version {expected} but actual version is {actual}\")]\n    IncompatibleProtocolMajorVersion {\n        /// Expected major version.\n        expected: u32,\n        /// Actual major version supplied.\n        actual: u32,\n    },\n    /// Error converting a CLValue.\n    #[error(\"{0}\")]\n    CLValue(CLValueError),\n    /// WASM bytes contains an unsupported \"start\" section.\n    #[error(\"Unsupported Wasm start\")]\n    UnsupportedWasmStart,\n    /// Contract package has no active contract versions.\n    #[error(\"No active contract versions for contract package\")]\n    NoActiveEntityVersions(PackageHash),\n    /// Invalid entity version supplied.\n    #[error(\"Invalid entity version: {}\", _0)]\n    InvalidEntityVersion(EntityVersionKey),\n    /// Invalid entity version supplied.\n    #[error(\"Disabled entity version: {}\", _0)]\n    DisabledEntityVersion(EntityVersionKey),\n    /// Invalid entity version supplied.\n    #[error(\"Missing entity version: {}\", _0)]\n    MissingEntityVersion(EntityVersionKey),\n    /// Contract does not have specified entry point.\n    #[error(\"No such method: {}\", _0)]\n    NoSuchMethod(String),\n    /// Contract does\n    #[error(\"Error calling a template entry point: {}\", _0)]\n    TemplateMethod(String),\n    /// Error processing WASM bytes.\n    #[error(\"Wasm preprocessing error: {}\", _0)]\n    WasmPreprocessing(PreprocessingError),\n    /// Unexpected variant of a stored value.\n    #[error(\"Unexpected variant of a stored value\")]\n    UnexpectedStoredValueVariant,\n    /// Error upgrading a locked contract package.\n    #[error(\"A locked contract cannot be upgraded\")]\n    LockedEntity(PackageHash),\n    /// Unable to find a contract by a specified hash address.\n    #[error(\"Invalid contract: {}\", _0)]\n    InvalidEntity(AddressableEntityHash),\n    /// Unable to find the WASM bytes specified by a hash address.\n    #[error(\"Invalid contract WASM: {}\", _0)]\n    InvalidByteCode(ByteCodeHash),\n    /// Error calling a smart contract with a missing argument.\n    #[error(\"Missing argument: {name}\")]\n    MissingArgument {\n        /// Name of the required argument.\n        name: String,\n    },\n    /// Error writing a dictionary item key which exceeded maximum allowed length.\n    #[error(\"Dictionary item key exceeded maximum length\")]\n    DictionaryItemKeyExceedsLength,\n    /// Missing system contract hash.\n    #[error(\"Missing system contract hash: {0}\")]\n    MissingSystemContractHash(String),\n    /// An attempt to push to the runtime stack which is already at the maximum height.\n    #[error(\"Runtime stack overflow\")]\n    RuntimeStackOverflow,\n    /// The runtime stack is `None`.\n    #[error(\"Runtime stack missing\")]\n    MissingRuntimeStack,\n    /// Contract is disabled.\n    #[error(\"Contract is disabled\")]\n    DisabledEntity(AddressableEntityHash),\n    /// Transform error.\n    #[error(transparent)]\n    Transform(TransformError),\n    /// Invalid key\n    #[error(\"Invalid key {0}\")]\n    UnexpectedKeyVariant(Key),\n    /// Failed to transfer tokens on a private chain.\n    #[error(\"Failed to transfer with unrestricted transfers disabled\")]\n    DisabledUnrestrictedTransfers,\n    /// Storage error.\n    #[error(\"Tracking copy error: {0}\")]\n    TrackingCopy(TrackingCopyError),\n    /// Weight of all used associated keys does not meet entity's upgrade threshold.\n    #[error(\"Deployment authorization failure\")]\n    UpgradeAuthorizationFailure,\n    /// The EntryPoints contains an invalid entry.\n    #[error(\"The EntryPoints contains an invalid entry\")]\n    InvalidEntryPointType,\n    /// Invalid operation.\n    #[error(\"The imputed operation is invalid\")]\n    InvalidImputedOperation,\n    /// Invalid string encoding.\n    #[error(\"Invalid UTF-8 string encoding: {0}\")]\n    InvalidUtf8Encoding(Utf8Error),\n    /// Incompatible transaction runtime.\n    #[error(\"Incompatible runtime: {0}\")]\n    IncompatibleRuntime(ContractRuntimeTag),\n    /// No matching entity version key.\n    #[error(\"No matching entity version key\")]\n    NoMatchingEntityVersionKey,\n    /// Ambiguous entity version and unable to determine entity version key.\n    #[error(\"Ambiguous entity version\")]\n    AmbiguousEntityVersion,\n}\n\nimpl From<PreprocessingError> for Error {\n    fn from(error: PreprocessingError) -> Self {\n        Error::WasmPreprocessing(error)\n    }\n}\n\nimpl From<casper_wasm_utils::OptimizerError> for Error {\n    fn from(_optimizer_error: casper_wasm_utils::OptimizerError) -> Self {\n        Error::WasmOptimizer\n    }\n}\n\nimpl Error {\n    /// Returns new type mismatch error.\n    pub fn type_mismatch(expected: CLType, found: CLType) -> Error {\n        Error::TypeMismatch(StoredValueTypeMismatch::new(\n            format!(\"{:?}\", expected),\n            format!(\"{:?}\", found),\n        ))\n    }\n}\n\nimpl casper_wasmi::HostError for Error {}\n\nimpl From<casper_wasmi::Error> for Error {\n    fn from(error: casper_wasmi::Error) -> Self {\n        match error\n            .as_host_error()\n            .and_then(|host_error| host_error.downcast_ref::<Error>())\n        {\n            Some(error) => error.clone(),\n            None => Error::Interpreter(error.into()),\n        }\n    }\n}\n\nimpl From<global_state::error::Error> for Error {\n    fn from(e: global_state::error::Error) -> Self {\n        Error::Storage(e)\n    }\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(e: bytesrepr::Error) -> Self {\n        Error::BytesRepr(e)\n    }\n}\n\nimpl From<elements::Error> for Error {\n    fn from(e: elements::Error) -> Self {\n        Error::ParityWasm(e)\n    }\n}\n\nimpl From<ResolverError> for Error {\n    fn from(err: ResolverError) -> Self {\n        Error::Resolver(err)\n    }\n}\n\nimpl From<AddKeyFailure> for Error {\n    fn from(err: AddKeyFailure) -> Self {\n        Error::AddKeyFailure(err)\n    }\n}\n\nimpl From<RemoveKeyFailure> for Error {\n    fn from(err: RemoveKeyFailure) -> Self {\n        Error::RemoveKeyFailure(err)\n    }\n}\n\nimpl From<UpdateKeyFailure> for Error {\n    fn from(err: UpdateKeyFailure) -> Self {\n        Error::UpdateKeyFailure(err)\n    }\n}\n\nimpl From<SetThresholdFailure> for Error {\n    fn from(err: SetThresholdFailure) -> Self {\n        Error::SetThresholdFailure(err)\n    }\n}\n\nimpl From<system::Error> for Error {\n    fn from(error: system::Error) -> Self {\n        Error::SystemContract(error)\n    }\n}\n\nimpl From<CLValueError> for Error {\n    fn from(e: CLValueError) -> Self {\n        Error::CLValue(e)\n    }\n}\n\nimpl From<stack::RuntimeStackOverflow> for Error {\n    fn from(_: stack::RuntimeStackOverflow) -> Self {\n        Error::RuntimeStackOverflow\n    }\n}\n\nimpl From<TrackingCopyError> for Error {\n    fn from(e: TrackingCopyError) -> Self {\n        Error::TrackingCopy(e)\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/execution/executor.rs",
    "content": "use std::{cell::RefCell, collections::BTreeSet, rc::Rc};\n\nuse casper_storage::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    tracking_copy::TrackingCopy,\n    AddressGenerator,\n};\nuse casper_types::{\n    account::AccountHash, contract_messages::Messages, contracts::NamedKeys, execution::Effects,\n    ContextAccessRights, EntityAddr, EntryPointType, Gas, Key, Phase, RuntimeArgs,\n    RuntimeFootprint, StoredValue, TransactionHash, U512,\n};\n\nuse crate::{\n    engine_state::{execution_kind::ExecutionKind, BlockInfo, EngineConfig, WasmV1Result},\n    execution::ExecError,\n    runtime::{Runtime, RuntimeStack},\n    runtime_context::{AllowInstallUpgrade, RuntimeContext},\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\nfn try_get_amount(runtime_args: &RuntimeArgs) -> Result<U512, ExecError> {\n    runtime_args\n        .try_get_number(ARG_AMOUNT)\n        .map_err(ExecError::from)\n}\n\n/// Executor object deals with execution of WASM modules.\npub struct Executor {\n    config: EngineConfig,\n}\n\nimpl Executor {\n    /// Creates new executor object.\n    pub fn new(config: EngineConfig) -> Self {\n        Executor { config }\n    }\n\n    /// Executes a WASM module.\n    ///\n    /// This method checks if a given contract hash is a system contract, and then short circuits to\n    /// a specific native implementation of it. Otherwise, a supplied WASM module is executed.\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn exec<R>(\n        &self,\n        execution_kind: ExecutionKind,\n        args: RuntimeArgs,\n        entity_addr: EntityAddr,\n        runtime_footprint: Rc<RefCell<RuntimeFootprint>>,\n        named_keys: &mut NamedKeys,\n        access_rights: ContextAccessRights,\n        authorization_keys: BTreeSet<AccountHash>,\n        account_hash: AccountHash,\n        block_info: BlockInfo,\n        txn_hash: TransactionHash,\n        gas_limit: Gas,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n        phase: Phase,\n        stack: RuntimeStack,\n    ) -> WasmV1Result\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let spending_limit: U512 = match try_get_amount(&args) {\n            Ok(spending_limit) => spending_limit,\n            Err(error) => {\n                return WasmV1Result::new(\n                    gas_limit,\n                    Gas::zero(),\n                    Effects::default(),\n                    Vec::default(),\n                    Vec::default(),\n                    Some(error.into()),\n                    None,\n                    None,\n                );\n            }\n        };\n\n        let address_generator = {\n            let generator = AddressGenerator::new(txn_hash.as_ref(), phase);\n            Rc::new(RefCell::new(generator))\n        };\n\n        let context_key = if self.config.enable_entity {\n            Key::AddressableEntity(entity_addr)\n        } else {\n            match entity_addr {\n                EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => Key::Hash(hash),\n                EntityAddr::Account(hash) => Key::Account(AccountHash::new(hash)),\n            }\n        };\n\n        let allow_install_upgrade = match execution_kind {\n            ExecutionKind::InstallerUpgrader(_)\n            | ExecutionKind::Stored { .. }\n            | ExecutionKind::VersionedCall { .. }\n            | ExecutionKind::Deploy(_) => AllowInstallUpgrade::Allowed,\n            ExecutionKind::Standard(_) => AllowInstallUpgrade::Forbidden,\n        };\n\n        let context = self.create_runtime_context(\n            named_keys,\n            runtime_footprint,\n            context_key,\n            authorization_keys,\n            access_rights,\n            account_hash,\n            address_generator,\n            tracking_copy,\n            block_info,\n            txn_hash,\n            phase,\n            args.clone(),\n            gas_limit,\n            spending_limit,\n            EntryPointType::Caller,\n            allow_install_upgrade,\n        );\n\n        let mut runtime = Runtime::new(context);\n\n        let result = match execution_kind {\n            ExecutionKind::Standard(module_bytes)\n            | ExecutionKind::InstallerUpgrader(module_bytes)\n            | ExecutionKind::Deploy(module_bytes) => {\n                runtime.execute_module_bytes(module_bytes, stack)\n            }\n            ExecutionKind::Stored {\n                entity_hash,\n                entry_point,\n            } => {\n                // These args are passed through here as they are required to construct the new\n                // `Runtime` during the contract's execution (i.e. inside\n                // `Runtime::execute_contract`).\n                runtime.call_contract_with_stack(entity_hash, &entry_point, args, stack)\n            }\n            ExecutionKind::VersionedCall {\n                package_hash,\n                entity_version,\n                protocol_version_major,\n                entry_point,\n            } => runtime.call_package_version_with_stack(\n                package_hash,\n                protocol_version_major,\n                entity_version,\n                entry_point,\n                args,\n                stack,\n            ),\n        };\n        match result {\n            Ok(ret) => WasmV1Result::new(\n                gas_limit,\n                runtime.context().gas_counter(),\n                runtime.context().effects(),\n                runtime.context().transfers().to_owned(),\n                runtime.context().messages(),\n                None,\n                Some(ret),\n                Some(runtime.context().cache()),\n            ),\n            Err(error) => WasmV1Result::new(\n                gas_limit,\n                runtime.context().gas_counter(),\n                Effects::new(),\n                vec![],\n                Messages::new(),\n                Some(error.into()),\n                None,\n                None,\n            ),\n        }\n    }\n\n    /// Creates new runtime context.\n    #[allow(clippy::too_many_arguments)]\n    fn create_runtime_context<'a, R>(\n        &self,\n        named_keys: &'a mut NamedKeys,\n        runtime_footprint: Rc<RefCell<RuntimeFootprint>>,\n        context_key: Key,\n        authorization_keys: BTreeSet<AccountHash>,\n        access_rights: ContextAccessRights,\n        account_hash: AccountHash,\n        address_generator: Rc<RefCell<AddressGenerator>>,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n        block_info: BlockInfo,\n        txn_hash: TransactionHash,\n        phase: Phase,\n        runtime_args: RuntimeArgs,\n        gas_limit: Gas,\n        remaining_spending_limit: U512,\n        entry_point_type: EntryPointType,\n        allow_install_upgrade: AllowInstallUpgrade,\n    ) -> RuntimeContext<'a, R>\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let gas_counter = Gas::default();\n        let transfers = Vec::default();\n\n        RuntimeContext::new(\n            named_keys,\n            runtime_footprint,\n            context_key,\n            authorization_keys,\n            access_rights,\n            account_hash,\n            address_generator,\n            tracking_copy,\n            self.config.clone(),\n            block_info,\n            txn_hash,\n            phase,\n            runtime_args,\n            gas_limit,\n            gas_counter,\n            transfers,\n            remaining_spending_limit,\n            entry_point_type,\n            allow_install_upgrade,\n        )\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/execution/mod.rs",
    "content": "//! Code execution.\nmod error;\n#[macro_use]\nmod executor;\n\npub use self::error::Error as ExecError;\npub(crate) use self::executor::Executor;\n"
  },
  {
    "path": "execution_engine/src/lib.rs",
    "content": "//! The engine which executes smart contracts on the Casper network.\n\n#![doc(html_root_url = \"https://docs.rs/casper-execution-engine/9.0.0\")]\n#![doc(\n    html_favicon_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png\",\n    html_logo_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png\",\n    test(attr(deny(warnings)))\n)]\n#![warn(\n    missing_docs,\n    trivial_casts,\n    trivial_numeric_casts,\n    unused_qualifications\n)]\n#![cfg_attr(docsrs, feature(doc_auto_cfg))]\n\npub mod engine_state;\npub mod execution;\npub mod resolvers;\npub mod runtime;\npub mod runtime_context;\n"
  },
  {
    "path": "execution_engine/src/resolvers/error.rs",
    "content": "//! Errors that may be emitted by a host function resolver.\nuse thiserror::Error;\n\nuse casper_types::ProtocolVersion;\n\n/// Error conditions of a host function resolver.\n#[derive(Error, Debug, Copy, Clone)]\n#[non_exhaustive]\npub enum ResolverError {\n    /// Unknown protocol version.\n    #[error(\"Unknown protocol version: {}\", _0)]\n    UnknownProtocolVersion(ProtocolVersion),\n    /// WASM module does not export a memory section.\n    #[error(\"No imported memory\")]\n    NoImportedMemory,\n}\n"
  },
  {
    "path": "execution_engine/src/resolvers/memory_resolver.rs",
    "content": "//! This module contains resolver of a memory section of the WASM code.\nuse casper_wasmi::MemoryRef;\n\nuse super::error::ResolverError;\n\n/// This trait takes care of returning an instance of allocated memory.\n///\n/// This happens once the WASM program tries to resolve \"memory\". Whenever\n/// contract didn't request a memory this method should return an Error.\npub trait MemoryResolver {\n    /// Returns a memory instance.\n    fn memory_ref(&self) -> Result<MemoryRef, ResolverError>;\n}\n"
  },
  {
    "path": "execution_engine/src/resolvers/mod.rs",
    "content": "//! This module is responsible for resolving host functions from within the WASM engine.\npub mod error;\npub mod memory_resolver;\npub(crate) mod v1_function_index;\nmod v1_resolver;\n\nuse casper_wasmi::ModuleImportResolver;\n\nuse casper_types::ProtocolVersion;\n\nuse self::error::ResolverError;\nuse super::engine_state::EngineConfig;\nuse crate::resolvers::memory_resolver::MemoryResolver;\n\n/// Creates a module resolver for given protocol version.\n///\n/// * `protocol_version` Version of the protocol. Can't be lower than 1.\npub(crate) fn create_module_resolver(\n    _protocol_version: ProtocolVersion,\n    engine_config: &EngineConfig,\n) -> Result<impl ModuleImportResolver + MemoryResolver, ResolverError> {\n    Ok(v1_resolver::RuntimeModuleImportResolver::new(\n        engine_config.wasm_config().v1().max_memory(),\n    ))\n    // if in future it is necessary to pick a different resolver\n    // based on the protocol version, modify this logic accordingly\n    // if there is an unsupported / unknown protocol version return the following error:\n    // Err(ResolverError::UnknownProtocolVersion(protocol_version))\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::ProtocolVersion;\n\n    use super::*;\n\n    #[test]\n    fn resolve_invalid_module() {\n        // NOTE: we are currently not enforcing underlying logic\n        assert!(\n            create_module_resolver(ProtocolVersion::default(), &EngineConfig::default()).is_ok()\n        );\n    }\n\n    #[test]\n    fn protocol_version_1_always_resolves() {\n        assert!(create_module_resolver(ProtocolVersion::V1_0_0, &EngineConfig::default()).is_ok());\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/resolvers/v1_function_index.rs",
    "content": "//! WASM host function resolver for protocol version 1.x.x.\nuse num_derive::{FromPrimitive, ToPrimitive};\nuse num_traits::{FromPrimitive, ToPrimitive};\n\n/// Enum representing unique IDs of host functions supported in major version 1.\n#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive, Clone, Copy)]\n#[repr(usize)]\npub(crate) enum FunctionIndex {\n    WriteFuncIndex,\n    ReadFuncIndex,\n    AddFuncIndex,\n    NewFuncIndex,\n    RetFuncIndex,\n    CallContractFuncIndex,\n    GetKeyFuncIndex,\n    GasFuncIndex,\n    HasKeyFuncIndex,\n    PutKeyFuncIndex,\n    IsValidURefFnIndex,\n    RevertFuncIndex,\n    AddAssociatedKeyFuncIndex,\n    RemoveAssociatedKeyFuncIndex,\n    UpdateAssociatedKeyFuncIndex,\n    SetActionThresholdFuncIndex,\n    LoadNamedKeysFuncIndex,\n    RemoveKeyFuncIndex,\n    GetCallerIndex,\n    GetBlocktimeIndex,\n    CreatePurseIndex,\n    TransferToAccountIndex,\n    TransferFromPurseToAccountIndex,\n    TransferFromPurseToPurseIndex,\n    GetBalanceIndex,\n    GetPhaseIndex,\n    GetSystemContractIndex,\n    GetMainPurseIndex,\n    ReadHostBufferIndex,\n    CreateContractPackageAtHash,\n    AddContractVersion,\n    AddContractVersionWithMessageTopics,\n    AddPackageVersionWithMessageTopics,\n    DisableContractVersion,\n    CallVersionedContract,\n    CreateContractUserGroup,\n    #[cfg(feature = \"test-support\")]\n    PrintIndex,\n    GetRuntimeArgsizeIndex,\n    GetRuntimeArgIndex,\n    RemoveContractUserGroupIndex,\n    ExtendContractUserGroupURefsIndex,\n    RemoveContractUserGroupURefsIndex,\n    Blake2b,\n    NewDictionaryFuncIndex,\n    DictionaryGetFuncIndex,\n    DictionaryPutFuncIndex,\n    LoadCallStack,\n    LoadAuthorizationKeys,\n    RandomBytes,\n    DictionaryReadFuncIndex,\n    EnableContractVersion,\n    ManageMessageTopic,\n    EmitMessage,\n    LoadCallerInformation,\n    GetBlockInfoIndex,\n    GenericHash,\n    RecoverSecp256k1,\n    VerifySignature,\n    CallPackageVersion,\n}\n\nimpl From<FunctionIndex> for usize {\n    fn from(index: FunctionIndex) -> usize {\n        // NOTE: This can't fail as `FunctionIndex` is represented by usize,\n        // so this serves mostly as a syntax sugar.\n        index.to_usize().unwrap()\n    }\n}\n\nimpl TryFrom<usize> for FunctionIndex {\n    type Error = &'static str;\n    fn try_from(value: usize) -> Result<Self, Self::Error> {\n        FromPrimitive::from_usize(value).ok_or(\"Invalid function index\")\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::FunctionIndex;\n\n    #[test]\n    fn primitive_to_enum() {\n        FunctionIndex::try_from(19).expect(\"Unable to create enum from number\");\n    }\n\n    #[test]\n    fn enum_to_primitive() {\n        let element = FunctionIndex::UpdateAssociatedKeyFuncIndex;\n        let _primitive: usize = element.into();\n    }\n\n    #[test]\n    fn invalid_index() {\n        assert!(FunctionIndex::try_from(123_456_789usize).is_err());\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/resolvers/v1_resolver.rs",
    "content": "use std::cell::RefCell;\n\nuse casper_wasmi::{\n    memory_units::Pages, Error as InterpreterError, FuncInstance, FuncRef, MemoryDescriptor,\n    MemoryInstance, MemoryRef, ModuleImportResolver, Signature, ValueType,\n};\n\nuse super::{\n    error::ResolverError, memory_resolver::MemoryResolver, v1_function_index::FunctionIndex,\n};\n\npub(crate) struct RuntimeModuleImportResolver {\n    memory: RefCell<Option<MemoryRef>>,\n    max_memory: u32,\n}\n\nimpl RuntimeModuleImportResolver {\n    pub(crate) fn new(max_memory: u32) -> Self {\n        Self {\n            memory: RefCell::new(None),\n            max_memory,\n        }\n    }\n}\n\nimpl MemoryResolver for RuntimeModuleImportResolver {\n    fn memory_ref(&self) -> Result<MemoryRef, ResolverError> {\n        self.memory\n            .borrow()\n            .as_ref()\n            .map(Clone::clone)\n            .ok_or(ResolverError::NoImportedMemory)\n    }\n}\n\nimpl ModuleImportResolver for RuntimeModuleImportResolver {\n    fn resolve_func(\n        &self,\n        field_name: &str,\n        _signature: &Signature,\n    ) -> Result<FuncRef, InterpreterError> {\n        let func_ref = match field_name {\n            \"casper_read_value\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::ReadFuncIndex.into(),\n            ),\n            \"casper_load_named_keys\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::LoadNamedKeysFuncIndex.into(),\n            ),\n            \"casper_write\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], None),\n                FunctionIndex::WriteFuncIndex.into(),\n            ),\n            \"casper_add\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], None),\n                FunctionIndex::AddFuncIndex.into(),\n            ),\n            \"casper_new_uref\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], None),\n                FunctionIndex::NewFuncIndex.into(),\n            ),\n            \"casper_ret\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], None),\n                FunctionIndex::RetFuncIndex.into(),\n            ),\n            \"casper_get_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 5][..], Some(ValueType::I32)),\n                FunctionIndex::GetKeyFuncIndex.into(),\n            ),\n            \"casper_has_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::HasKeyFuncIndex.into(),\n            ),\n            \"casper_put_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], None),\n                FunctionIndex::PutKeyFuncIndex.into(),\n            ),\n            \"gas\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], None),\n                FunctionIndex::GasFuncIndex.into(),\n            ),\n            \"casper_is_valid_uref\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::IsValidURefFnIndex.into(),\n            ),\n            \"casper_revert\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], None),\n                FunctionIndex::RevertFuncIndex.into(),\n            ),\n            \"casper_add_associated_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::AddAssociatedKeyFuncIndex.into(),\n            ),\n            \"casper_remove_associated_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::RemoveAssociatedKeyFuncIndex.into(),\n            ),\n            \"casper_update_associated_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::UpdateAssociatedKeyFuncIndex.into(),\n            ),\n            \"casper_set_action_threshold\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::SetActionThresholdFuncIndex.into(),\n            ),\n            \"casper_remove_key\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], None),\n                FunctionIndex::RemoveKeyFuncIndex.into(),\n            ),\n            \"casper_get_caller\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], Some(ValueType::I32)),\n                FunctionIndex::GetCallerIndex.into(),\n            ),\n            \"casper_get_blocktime\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], None),\n                FunctionIndex::GetBlocktimeIndex.into(),\n            ),\n            \"casper_create_purse\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::CreatePurseIndex.into(),\n            ),\n            \"casper_transfer_to_account\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 7][..], Some(ValueType::I32)),\n                FunctionIndex::TransferToAccountIndex.into(),\n            ),\n            \"casper_transfer_from_purse_to_account\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 9][..], Some(ValueType::I32)),\n                FunctionIndex::TransferFromPurseToAccountIndex.into(),\n            ),\n            \"casper_transfer_from_purse_to_purse\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 8][..], Some(ValueType::I32)),\n                FunctionIndex::TransferFromPurseToPurseIndex.into(),\n            ),\n            \"casper_get_balance\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::GetBalanceIndex.into(),\n            ),\n            \"casper_get_phase\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], None),\n                FunctionIndex::GetPhaseIndex.into(),\n            ),\n            \"casper_get_system_contract\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::GetSystemContractIndex.into(),\n            ),\n            \"casper_get_main_purse\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], None),\n                FunctionIndex::GetMainPurseIndex.into(),\n            ),\n            \"casper_read_host_buffer\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::ReadHostBufferIndex.into(),\n            ),\n            \"casper_create_contract_package_at_hash\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], None),\n                FunctionIndex::CreateContractPackageAtHash.into(),\n            ),\n            \"casper_create_contract_user_group\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 8][..], Some(ValueType::I32)),\n                FunctionIndex::CreateContractUserGroup.into(),\n            ),\n            \"casper_add_contract_version\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 10][..], Some(ValueType::I32)),\n                FunctionIndex::AddContractVersion.into(),\n            ),\n            \"casper_add_contract_version_with_message_topics\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 11][..], Some(ValueType::I32)),\n                FunctionIndex::AddContractVersionWithMessageTopics.into(),\n            ),\n            \"casper_add_package_version_with_message_topics\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 11][..], Some(ValueType::I32)),\n                FunctionIndex::AddPackageVersionWithMessageTopics.into(),\n            ),\n            \"casper_disable_contract_version\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::DisableContractVersion.into(),\n            ),\n            \"casper_call_contract\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 7][..], Some(ValueType::I32)),\n                FunctionIndex::CallContractFuncIndex.into(),\n            ),\n            \"casper_call_versioned_contract\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 9][..], Some(ValueType::I32)),\n                FunctionIndex::CallVersionedContract.into(),\n            ),\n            \"casper_get_named_arg_size\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::GetRuntimeArgsizeIndex.into(),\n            ),\n            \"casper_get_named_arg\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::GetRuntimeArgIndex.into(),\n            ),\n            \"casper_remove_contract_user_group\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::RemoveContractUserGroupIndex.into(),\n            ),\n            \"casper_provision_contract_user_group_uref\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 5][..], Some(ValueType::I32)),\n                FunctionIndex::ExtendContractUserGroupURefsIndex.into(),\n            ),\n            \"casper_remove_contract_user_group_urefs\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)),\n                FunctionIndex::RemoveContractUserGroupURefsIndex.into(),\n            ),\n            \"casper_blake2b\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::Blake2b.into(),\n            ),\n            \"casper_load_call_stack\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::LoadCallStack.into(),\n            ),\n            \"casper_load_caller_information\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::LoadCallerInformation.into(),\n            ),\n            #[cfg(feature = \"test-support\")]\n            \"casper_print\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], None),\n                FunctionIndex::PrintIndex.into(),\n            ),\n            \"casper_dictionary_get\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 5][..], Some(ValueType::I32)),\n                FunctionIndex::DictionaryGetFuncIndex.into(),\n            ),\n            \"casper_dictionary_read\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)),\n                FunctionIndex::DictionaryReadFuncIndex.into(),\n            ),\n            \"casper_dictionary_put\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)),\n                FunctionIndex::DictionaryPutFuncIndex.into(),\n            ),\n            \"casper_new_dictionary\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 1][..], Some(ValueType::I32)),\n                FunctionIndex::NewDictionaryFuncIndex.into(),\n            ),\n            \"casper_load_authorization_keys\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::LoadAuthorizationKeys.into(),\n            ),\n            \"casper_random_bytes\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)),\n                FunctionIndex::RandomBytes.into(),\n            ),\n            \"casper_enable_contract_version\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::EnableContractVersion.into(),\n            ),\n            \"casper_manage_message_topic\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::ManageMessageTopic.into(),\n            ),\n            \"casper_emit_message\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)),\n                FunctionIndex::EmitMessage.into(),\n            ),\n            \"casper_get_block_info\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 2][..], None),\n                FunctionIndex::GetBlockInfoIndex.into(),\n            ),\n            \"casper_generic_hash\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 5][..], Some(ValueType::I32)),\n                FunctionIndex::GenericHash.into(),\n            ),\n            \"casper_recover_secp256k1\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)),\n                FunctionIndex::RecoverSecp256k1.into(),\n            ),\n            \"casper_verify_signature\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)),\n                FunctionIndex::VerifySignature.into(),\n            ),\n            \"casper_call_package_version\" => FuncInstance::alloc_host(\n                Signature::new(&[ValueType::I32; 11][..], Some(ValueType::I32)),\n                FunctionIndex::CallPackageVersion.into(),\n            ),\n            _ => {\n                return Err(InterpreterError::Function(format!(\n                    \"host module doesn't export function with name {}\",\n                    field_name\n                )));\n            }\n        };\n        Ok(func_ref)\n    }\n\n    fn resolve_memory(\n        &self,\n        field_name: &str,\n        descriptor: &MemoryDescriptor,\n    ) -> Result<MemoryRef, InterpreterError> {\n        if field_name == \"memory\" {\n            match &mut *self.memory.borrow_mut() {\n                Some(_) => {\n                    // Even though most wat -> wasm compilers don't allow multiple memory entries,\n                    // we should make sure we won't accidentally allocate twice.\n                    Err(InterpreterError::Instantiation(\n                        \"Memory is already instantiated\".into(),\n                    ))\n                }\n                memory_ref @ None => {\n                    // Any memory entry in the wasm file without max specified is changed into an\n                    // entry with hardcoded max value. This way `maximum` below is never\n                    // unspecified, but for safety reasons we'll still default it.\n                    let descriptor_max = descriptor.maximum().unwrap_or(self.max_memory);\n                    // Checks if wasm's memory entry has too much initial memory or non-default max\n                    // memory pages exceeds the limit.\n                    if descriptor.initial() > descriptor_max || descriptor_max > self.max_memory {\n                        return Err(InterpreterError::Instantiation(\n                            \"Module requested too much memory\".into(),\n                        ));\n                    }\n                    // Note: each \"page\" is 64 KiB\n                    let mem = MemoryInstance::alloc(\n                        Pages(descriptor.initial() as usize),\n                        descriptor.maximum().map(|x| Pages(x as usize)),\n                    )?;\n                    *memory_ref = Some(mem.clone());\n                    Ok(mem)\n                }\n            }\n        } else {\n            Err(InterpreterError::Instantiation(\n                \"Memory imported under unknown name\".to_owned(),\n            ))\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/args.rs",
    "content": "use casper_wasmi::{FromValue, RuntimeArgs, Trap};\n\npub(crate) trait Args\nwhere\n    Self: Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap>;\n}\n\nimpl<T1> Args for (T1,)\nwhere\n    T1: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        Ok((a0,))\n    }\n}\n\nimpl<T1, T2> Args for (T1, T2)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        Ok((a0, a1))\n    }\n}\n\nimpl<T1, T2, T3> Args for (T1, T2, T3)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        Ok((a0, a1, a2))\n    }\n}\n\nimpl<T1, T2, T3, T4> Args for (T1, T2, T3, T4)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        Ok((a0, a1, a2, a3))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5> Args for (T1, T2, T3, T4, T5)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        Ok((a0, a1, a2, a3, a4))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5, T6> Args for (T1, T2, T3, T4, T5, T6)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n    T6: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        let a5: T6 = args.nth_checked(5)?;\n        Ok((a0, a1, a2, a3, a4, a5))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5, T6, T7> Args for (T1, T2, T3, T4, T5, T6, T7)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n    T6: FromValue + Sized,\n    T7: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        let a5: T6 = args.nth_checked(5)?;\n        let a6: T7 = args.nth_checked(6)?;\n        Ok((a0, a1, a2, a3, a4, a5, a6))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5, T6, T7, T8> Args for (T1, T2, T3, T4, T5, T6, T7, T8)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n    T6: FromValue + Sized,\n    T7: FromValue + Sized,\n    T8: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        let a5: T6 = args.nth_checked(5)?;\n        let a6: T7 = args.nth_checked(6)?;\n        let a7: T8 = args.nth_checked(7)?;\n        Ok((a0, a1, a2, a3, a4, a5, a6, a7))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5, T6, T7, T8, T9> Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n    T6: FromValue + Sized,\n    T7: FromValue + Sized,\n    T8: FromValue + Sized,\n    T9: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        let a5: T6 = args.nth_checked(5)?;\n        let a6: T7 = args.nth_checked(6)?;\n        let a7: T8 = args.nth_checked(7)?;\n        let a8: T9 = args.nth_checked(8)?;\n        Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n    T6: FromValue + Sized,\n    T7: FromValue + Sized,\n    T8: FromValue + Sized,\n    T9: FromValue + Sized,\n    T10: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        let a5: T6 = args.nth_checked(5)?;\n        let a6: T7 = args.nth_checked(6)?;\n        let a7: T8 = args.nth_checked(7)?;\n        let a8: T9 = args.nth_checked(8)?;\n        let a9: T10 = args.nth_checked(9)?;\n        Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8, a9))\n    }\n}\n\nimpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Args\n    for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)\nwhere\n    T1: FromValue + Sized,\n    T2: FromValue + Sized,\n    T3: FromValue + Sized,\n    T4: FromValue + Sized,\n    T5: FromValue + Sized,\n    T6: FromValue + Sized,\n    T7: FromValue + Sized,\n    T8: FromValue + Sized,\n    T9: FromValue + Sized,\n    T10: FromValue + Sized,\n    T11: FromValue + Sized,\n{\n    fn parse(args: RuntimeArgs) -> Result<Self, Trap> {\n        let a0: T1 = args.nth_checked(0)?;\n        let a1: T2 = args.nth_checked(1)?;\n        let a2: T3 = args.nth_checked(2)?;\n        let a3: T4 = args.nth_checked(3)?;\n        let a4: T5 = args.nth_checked(4)?;\n        let a5: T6 = args.nth_checked(5)?;\n        let a6: T7 = args.nth_checked(6)?;\n        let a7: T8 = args.nth_checked(7)?;\n        let a8: T9 = args.nth_checked(8)?;\n        let a9: T10 = args.nth_checked(9)?;\n        let a10: T11 = args.nth_checked(10)?;\n        Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10))\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/auction_internal.rs",
    "content": "use std::collections::BTreeSet;\nuse tracing::{debug, error};\n\nuse casper_storage::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    system::{\n        auction::{\n            providers::{AccountProvider, MintProvider, RuntimeProvider, StorageProvider},\n            Auction,\n        },\n        mint::Mint,\n    },\n};\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{FromBytes, ToBytes},\n    system::{\n        auction::{BidAddr, BidKind, EraInfo, Error, Unbond, UnbondEra, UnbondKind},\n        mint,\n    },\n    AccessRights, CLTyped, CLValue, Key, KeyTag, PublicKey, RuntimeArgs, StoredValue, URef, U512,\n};\n\nuse super::Runtime;\nuse crate::execution::ExecError;\n\nimpl From<ExecError> for Option<Error> {\n    fn from(exec_error: ExecError) -> Self {\n        match exec_error {\n            // This is used to propagate [`execution::Error::GasLimit`] to make sure [`Auction`]\n            // contract running natively supports propagating gas limit errors without a panic.\n            ExecError::GasLimit => Some(Error::GasLimit),\n            // There are possibly other exec errors happening but such translation would be lossy.\n            _ => None,\n        }\n    }\n}\n\nimpl<R> StorageProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn read<T: FromBytes + CLTyped>(&mut self, uref: URef) -> Result<Option<T>, Error> {\n        match self.context.read_gs(&uref.into()) {\n            Ok(Some(StoredValue::CLValue(cl_value))) => {\n                Ok(Some(cl_value.into_t().map_err(|_| Error::CLValue)?))\n            }\n            Ok(Some(_)) => {\n                error!(\"StorageProvider::read: unexpected StoredValue variant\");\n                Err(Error::Storage)\n            }\n            Ok(None) => Ok(None),\n            Err(ExecError::BytesRepr(_)) => Err(Error::Serialization),\n            // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See\n            // also [`Runtime::reverter`] and [`to_auction_error`]\n            Err(ExecError::GasLimit) => Err(Error::GasLimit),\n            Err(err) => {\n                error!(\"StorageProvider::read: {:?}\", err);\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn write<T: ToBytes + CLTyped>(&mut self, uref: URef, value: T) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        self.context\n            .metered_write_gs(uref.into(), StoredValue::CLValue(cl_value))\n            .map_err(|exec_error| {\n                error!(\"StorageProvider::write: {:?}\", exec_error);\n                <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n            })\n    }\n\n    fn read_bid(&mut self, key: &Key) -> Result<Option<BidKind>, Error> {\n        match self.context.read_gs(key) {\n            Ok(Some(StoredValue::BidKind(bid_kind))) => Ok(Some(bid_kind)),\n            Ok(Some(_)) => {\n                error!(\"StorageProvider::read_bid: unexpected StoredValue variant\");\n                Err(Error::Storage)\n            }\n            Ok(None) => Ok(None),\n            Err(ExecError::BytesRepr(_)) => Err(Error::Serialization),\n            // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See\n            // also [`Runtime::reverter`] and [`to_auction_error`]\n            Err(ExecError::GasLimit) => Err(Error::GasLimit),\n            Err(err) => {\n                error!(\"StorageProvider::read_bid: {:?}\", err);\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn write_bid(&mut self, key: Key, bid_kind: BidKind) -> Result<(), Error> {\n        self.context\n            .metered_write_gs_unsafe(key, StoredValue::BidKind(bid_kind))\n            .map_err(|exec_error| {\n                error!(\"StorageProvider::write_bid: {:?}\", exec_error);\n                <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n            })\n    }\n\n    fn read_unbond(&mut self, bid_addr: BidAddr) -> Result<Option<Unbond>, Error> {\n        match self.context.read_gs(&Key::BidAddr(bid_addr)) {\n            Ok(Some(StoredValue::BidKind(BidKind::Unbond(unbonds)))) => Ok(Some(*unbonds)),\n            Ok(Some(_)) => {\n                error!(\"StorageProvider::read_unbonds: unexpected StoredValue variant\");\n                Err(Error::Storage)\n            }\n            Ok(None) => Ok(None),\n            Err(ExecError::BytesRepr(_)) => Err(Error::Serialization),\n            // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See\n            // also [`Runtime::reverter`] and [`to_auction_error`]\n            Err(ExecError::GasLimit) => Err(Error::GasLimit),\n            Err(err) => {\n                error!(\"StorageProvider::read_unbonds: {:?}\", err);\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn write_unbond(&mut self, bid_addr: BidAddr, unbond: Option<Unbond>) -> Result<(), Error> {\n        let unbond_key = Key::BidAddr(bid_addr);\n        match unbond {\n            Some(unbond) => self\n                .context\n                .metered_write_gs_unsafe(\n                    unbond_key,\n                    StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))),\n                )\n                .map_err(|exec_error| {\n                    error!(\"StorageProvider::write_unbond: {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                }),\n            None => {\n                self.context.prune_gs_unsafe(unbond_key);\n                Ok(())\n            }\n        }\n    }\n\n    fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), Error> {\n        Runtime::record_era_info(self, era_info)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::RecordEraInfo))\n    }\n\n    fn prune_bid(&mut self, bid_addr: BidAddr) {\n        Runtime::prune(self, bid_addr.into());\n    }\n}\n\nimpl<R> RuntimeProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_caller(&self) -> AccountHash {\n        self.context.get_initiator()\n    }\n\n    fn is_allowed_session_caller(&self, account_hash: &AccountHash) -> bool {\n        Runtime::is_allowed_session_caller(self, account_hash)\n    }\n\n    fn is_valid_uref(&self, uref: URef) -> bool {\n        self.context.validate_uref(&uref).is_ok()\n    }\n\n    fn named_keys_get(&self, name: &str) -> Option<Key> {\n        self.context.named_keys_get(name).cloned()\n    }\n\n    fn get_keys(&mut self, key_tag: &KeyTag) -> Result<BTreeSet<Key>, Error> {\n        self.context.get_keys(key_tag).map_err(|err| {\n            error!(%key_tag, \"RuntimeProvider::get_keys: {:?}\", err);\n            Error::Storage\n        })\n    }\n\n    fn get_keys_by_prefix(&mut self, prefix: &[u8]) -> Result<Vec<Key>, Error> {\n        self.context\n            .get_keys_with_prefix(prefix)\n            .map_err(|exec_error| {\n                error!(\"RuntimeProvider::get_keys_by_prefix: {:?}\", exec_error);\n                <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n            })\n    }\n\n    fn delegator_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error> {\n        let delegated_accounts = {\n            let prefix = bid_addr.delegated_account_prefix()?;\n            let keys = self\n                .context\n                .get_keys_with_prefix(&prefix)\n                .map_err(|exec_error| {\n                    error!(\"RuntimeProvider::delegator_count accounts {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                })?;\n            keys.len()\n        };\n        let delegated_purses = {\n            let prefix = bid_addr.delegated_purse_prefix()?;\n            let keys = self\n                .context\n                .get_keys_with_prefix(&prefix)\n                .map_err(|exec_error| {\n                    error!(\"RuntimeProvider::delegator_count purses {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                })?;\n            keys.len()\n        };\n        Ok(delegated_accounts.saturating_add(delegated_purses))\n    }\n\n    fn reservation_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error> {\n        let reserved_accounts = {\n            let reservation_prefix = bid_addr.reserved_account_prefix()?;\n            let reservation_keys = self\n                .context\n                .get_keys_with_prefix(&reservation_prefix)\n                .map_err(|exec_error| {\n                    error!(\"RuntimeProvider::reservation_count {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                })?;\n            reservation_keys.len()\n        };\n        let reserved_purses = {\n            let reservation_prefix = bid_addr.reserved_purse_prefix()?;\n            let reservation_keys = self\n                .context\n                .get_keys_with_prefix(&reservation_prefix)\n                .map_err(|exec_error| {\n                    error!(\"RuntimeProvider::reservation_count {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                })?;\n            reservation_keys.len()\n        };\n        Ok(reserved_accounts.saturating_add(reserved_purses))\n    }\n\n    fn used_reservation_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error> {\n        let reservation_account_prefix = bid_addr.reserved_account_prefix()?;\n        let reservation_purse_prefix = bid_addr.reserved_purse_prefix()?;\n\n        let reservation_keys = {\n            let mut ret = self\n                .context\n                .get_keys_with_prefix(&reservation_account_prefix)\n                .map_err(|exec_error| {\n                    error!(\"RuntimeProvider::reservation_count {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                })?;\n            let purses = self\n                .context\n                .get_keys_with_prefix(&reservation_purse_prefix)\n                .map_err(|exec_error| {\n                    error!(\"RuntimeProvider::reservation_count {:?}\", exec_error);\n                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                })?;\n            ret.extend(purses);\n            ret\n        };\n\n        let mut used = 0;\n        for reservation_key in reservation_keys {\n            if let Key::BidAddr(BidAddr::ReservedDelegationAccount {\n                validator,\n                delegator,\n            }) = reservation_key\n            {\n                let key_to_check = Key::BidAddr(BidAddr::DelegatedAccount {\n                    validator,\n                    delegator,\n                });\n                if let Ok(Some(_)) = self.context.read_gs(&key_to_check) {\n                    used += 1;\n                }\n            }\n            if let Key::BidAddr(BidAddr::ReservedDelegationPurse {\n                validator,\n                delegator,\n            }) = reservation_key\n            {\n                let key_to_check = Key::BidAddr(BidAddr::DelegatedPurse {\n                    validator,\n                    delegator,\n                });\n                if let Ok(Some(_)) = self.context.read_gs(&key_to_check) {\n                    used += 1;\n                }\n            }\n        }\n        Ok(used)\n    }\n\n    fn vesting_schedule_period_millis(&self) -> u64 {\n        self.context\n            .engine_config()\n            .vesting_schedule_period_millis()\n    }\n\n    fn allow_auction_bids(&self) -> bool {\n        self.context.engine_config().allow_auction_bids()\n    }\n\n    fn should_compute_rewards(&self) -> bool {\n        self.context.engine_config().compute_rewards()\n    }\n}\n\nimpl<R> MintProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn unbond(&mut self, unbond_kind: &UnbondKind, unbond_era: &UnbondEra) -> Result<(), Error> {\n        let is_delegator = unbond_kind.is_delegator();\n        let (purse, maybe_account_hash) = match unbond_kind {\n            UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => {\n                let account_hash = pk.to_account_hash();\n                let maybe_value = self\n                    .context\n                    .read_gs_unsafe(&Key::Account(account_hash))\n                    .map_err(|exec_error| {\n                        error!(\"MintProvider::unbond: {:?}\", exec_error);\n                        <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                    })?;\n\n                match maybe_value {\n                    Some(StoredValue::Account(account)) => {\n                        (account.main_purse(), Some(account_hash))\n                    }\n                    Some(StoredValue::CLValue(cl_value)) => {\n                        let entity_key: Key = cl_value.into_t().map_err(|_| Error::CLValue)?;\n                        match self.context.read_gs_unsafe(&entity_key) {\n                            Ok(Some(StoredValue::AddressableEntity(entity))) => {\n                                (entity.main_purse(), Some(account_hash))\n                            }\n                            Ok(Some(StoredValue::CLValue(_))) => {\n                                return Err(Error::CLValue);\n                            }\n                            Ok(Some(_)) => {\n                                return if is_delegator {\n                                    Err(Error::DelegatorNotFound)\n                                } else {\n                                    Err(Error::ValidatorNotFound)\n                                }\n                            }\n                            Ok(None) => {\n                                return Err(Error::InvalidPublicKey);\n                            }\n                            Err(exec_error) => {\n                                error!(\"MintProvider::unbond: {:?}\", exec_error);\n                                return Err(\n                                    <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n                                );\n                            }\n                        }\n                    }\n                    Some(_) => return Err(Error::UnexpectedStoredValueVariant),\n                    None => return Err(Error::InvalidPublicKey),\n                }\n            }\n            UnbondKind::DelegatedPurse(addr) => {\n                let purse = URef::new(*addr, AccessRights::READ_ADD_WRITE);\n                match self.balance(purse) {\n                    Ok(Some(_)) => (purse, None),\n                    Ok(None) => return Err(Error::MissingPurse),\n                    Err(err) => {\n                        error!(\"MintProvider::unbond delegated purse: {:?}\", err);\n                        return Err(Error::MintError);\n                    }\n                }\n            }\n        };\n\n        self.mint_transfer_direct(\n            maybe_account_hash,\n            *unbond_era.bonding_purse(),\n            purse,\n            *unbond_era.amount(),\n            None,\n        )\n        .map_err(|_| Error::Transfer)?\n        .map_err(|_| Error::Transfer)?;\n        Ok(())\n    }\n\n    /// Allows optimized auction and mint interaction.\n    /// Intended to be used only by system contracts to manage staked purses.\n    /// NOTE: Never expose this through FFI.\n    fn mint_transfer_direct(\n        &mut self,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<Result<(), mint::Error>, Error> {\n        let is_main_purse_transfer = self\n            .context\n            .runtime_footprint()\n            .borrow()\n            .main_purse()\n            .expect(\"didnt have purse\")\n            .addr()\n            == source.addr();\n        let has_perms = is_main_purse_transfer\n            || (source.is_writeable() && self.context.validate_uref(&source).is_ok());\n        if !(has_perms || self.context.get_initiator() == PublicKey::System.to_account_hash()) {\n            return Err(Error::InvalidCaller);\n        }\n\n        let args_values = RuntimeArgs::try_new(|args| {\n            args.insert(mint::ARG_TO, to)?;\n            args.insert(mint::ARG_SOURCE, source)?;\n            args.insert(mint::ARG_TARGET, target)?;\n            args.insert(mint::ARG_AMOUNT, amount)?;\n            args.insert(mint::ARG_ID, id)?;\n            Ok(())\n        })\n        .map_err(|_| Error::CLValue)?;\n\n        let gas_counter = self.gas_counter();\n\n        self.context\n            .access_rights_extend(&[source, target.into_add()]);\n\n        let mint_hash = self.get_mint_hash().map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::MissingValue)\n        })?;\n\n        let cl_value = self\n            .call_contract(mint_hash, mint::METHOD_TRANSFER, args_values)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Transfer))?;\n\n        self.set_gas_counter(gas_counter);\n        cl_value.into_t().map_err(|_| Error::CLValue)\n    }\n\n    fn mint_into_existing_purse(\n        &mut self,\n        amount: U512,\n        existing_purse: URef,\n    ) -> Result<(), Error> {\n        if self.context.get_initiator() != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidCaller);\n        }\n\n        let args_values = RuntimeArgs::try_new(|args| {\n            args.insert(mint::ARG_AMOUNT, amount)?;\n            args.insert(mint::ARG_PURSE, existing_purse)?;\n            Ok(())\n        })\n        .map_err(|_| Error::CLValue)?;\n\n        let gas_counter = self.gas_counter();\n\n        let mint_hash = self.get_mint_hash().map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::MissingValue)\n        })?;\n\n        let cl_value = self\n            .call_contract(\n                mint_hash,\n                mint::METHOD_MINT_INTO_EXISTING_PURSE,\n                args_values,\n            )\n            .map_err(|error| <Option<Error>>::from(error).unwrap_or(Error::MintError))?;\n        self.set_gas_counter(gas_counter);\n        cl_value\n            .into_t::<Result<(), mint::Error>>()\n            .map_err(|_| Error::CLValue)?\n            .map_err(|_| Error::MintError)\n    }\n\n    fn create_purse(&mut self) -> Result<URef, Error> {\n        Runtime::create_purse(self).map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::CreatePurseFailed)\n        })\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        Runtime::available_balance(self, purse)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::GetBalance))\n    }\n\n    fn read_base_round_reward(&mut self) -> Result<U512, Error> {\n        let mint_hash = self.get_mint_hash().map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::MissingValue)\n        })?;\n        self.mint_read_base_round_reward(mint_hash)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::MintReward))\n    }\n\n    fn mint(&mut self, amount: U512) -> Result<URef, Error> {\n        let mint_hash = self.get_mint_hash().map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::MissingValue)\n        })?;\n        self.mint_mint(mint_hash, amount)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::MintError))\n    }\n\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> {\n        let mint_hash = self.get_mint_hash().map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::MissingValue)\n        })?;\n        self.mint_reduce_total_supply(mint_hash, amount)\n            .map_err(|exec_error| {\n                <Option<Error>>::from(exec_error).unwrap_or(Error::MintReduceTotalSupply)\n            })\n    }\n}\n\nimpl<R> AccountProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_main_purse(&self) -> Result<URef, Error> {\n        // NOTE: this is used by the system and is not (and should not be made to be) accessible\n        // from userland.\n        match Runtime::context(self)\n            .runtime_footprint()\n            .borrow()\n            .main_purse()\n        {\n            None => {\n                debug!(\"runtime attempt to access non-existent main purse\");\n                Err(Error::InvalidContext)\n            }\n            Some(purse) => Ok(purse),\n        }\n    }\n\n    /// Set main purse.\n    fn set_main_purse(&mut self, purse: URef) {\n        Runtime::context(self)\n            .runtime_footprint()\n            .borrow_mut()\n            .set_main_purse(purse);\n    }\n}\n\nimpl<R> Auction for Runtime<'_, R> where R: StateReader<Key, StoredValue, Error = GlobalStateError> {}\n"
  },
  {
    "path": "execution_engine/src/runtime/cryptography.rs",
    "content": "//! Cryptography module containing hashing functions used internally\n//! by the execution engine\n\nuse blake2::{\n    digest::{Update, VariableOutput},\n    Blake2bVar,\n};\nuse sha2::{Digest, Sha256};\n\n/// The number of bytes in a hash.\n/// All hash functions in this module have a digest length of 32.\npub const DIGEST_LENGTH: usize = 32;\n\n/// The 32-byte digest blake2b hash function\npub fn blake2b<T: AsRef<[u8]>>(data: T) -> [u8; DIGEST_LENGTH] {\n    let mut result = [0; DIGEST_LENGTH];\n    // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher\n    let mut hasher = Blake2bVar::new(DIGEST_LENGTH).expect(\"should create hasher\");\n\n    hasher.update(data.as_ref());\n\n    // NOTE: This should never fail, because result is exactly DIGEST_LENGTH long\n    hasher.finalize_variable(&mut result).ok();\n\n    result\n}\n\n/// The 32-byte digest blake3 hash function\npub fn blake3<T: AsRef<[u8]>>(data: T) -> [u8; DIGEST_LENGTH] {\n    let mut result = [0; DIGEST_LENGTH];\n    let mut hasher = blake3::Hasher::new();\n\n    hasher.update(data.as_ref());\n    let hash = hasher.finalize();\n    let hash_bytes: &[u8; DIGEST_LENGTH] = hash.as_bytes();\n    result.copy_from_slice(hash_bytes);\n    result\n}\n\n/// The 32-byte digest sha256 hash function\npub fn sha256<T: AsRef<[u8]>>(data: T) -> [u8; DIGEST_LENGTH] {\n    Sha256::digest(data).into()\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/externals.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    convert::TryFrom,\n};\n\nuse casper_wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap};\n\nuse casper_storage::global_state::{error::Error as GlobalStateError, state::StateReader};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{EntityEntryPoint, EntryPoints},\n    api_error,\n    bytesrepr::{self, ToBytes},\n    contract_messages::MessageTopicOperation,\n    contracts::{\n        ContractPackageHash, EntryPoints as ContractEntryPoints, NamedKeys, ProtocolVersionMajor,\n    },\n    AddressableEntityHash, ApiError, EntityVersion, Gas, Group, HashAlgorithm, HostFunction,\n    HostFunctionCost, Key, PackageHash, PackageStatus, PublicKey, Signature, StoredValue, URef,\n    U512, UREF_SERIALIZED_LENGTH,\n};\n\nuse super::{args::Args, ExecError, Runtime};\nuse crate::{resolvers::v1_function_index::FunctionIndex, runtime::cryptography};\n\nimpl<R> Externals for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn invoke_index(\n        &mut self,\n        index: usize,\n        args: RuntimeArgs,\n    ) -> Result<Option<RuntimeValue>, Trap> {\n        let func = FunctionIndex::try_from(index).expect(\"unknown function index\");\n\n        let host_function_costs =\n            (*self.context.engine_config().wasm_config().v1()).take_host_function_costs();\n\n        match func {\n            FunctionIndex::ReadFuncIndex => {\n                // args(0) = pointer to key in Wasm memory\n                // args(1) = size of key in Wasm memory\n                // args(2) = pointer to output size (output param)\n                let (key_ptr, key_size, output_size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.read_value,\n                    [key_ptr, key_size, output_size_ptr],\n                )?;\n                let ret = self.read(key_ptr, key_size, output_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::LoadNamedKeysFuncIndex => {\n                // args(0) = pointer to amount of keys (output)\n                // args(1) = pointer to amount of serialized bytes (output)\n                let (total_keys_ptr, result_size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.load_named_keys,\n                    [total_keys_ptr, result_size_ptr],\n                )?;\n                let ret = self.load_named_keys(total_keys_ptr, result_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::WriteFuncIndex => {\n                // args(0) = pointer to key in Wasm memory\n                // args(1) = size of key\n                // args(2) = pointer to value\n                // args(3) = size of value\n                let (key_ptr, key_size, value_ptr, value_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.write,\n                    [key_ptr, key_size, value_ptr, value_size],\n                )?;\n                self.write(key_ptr, key_size, value_ptr, value_size)?;\n                Ok(None)\n            }\n\n            FunctionIndex::AddFuncIndex => {\n                // args(0) = pointer to key in Wasm memory\n                // args(1) = size of key\n                // args(2) = pointer to value\n                // args(3) = size of value\n                let (key_ptr, key_size, value_ptr, value_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.add,\n                    [key_ptr, key_size, value_ptr, value_size],\n                )?;\n                self.add(key_ptr, key_size, value_ptr, value_size)?;\n                Ok(None)\n            }\n\n            FunctionIndex::NewFuncIndex => {\n                // args(0) = pointer to uref destination in Wasm memory\n                // args(1) = pointer to initial value\n                // args(2) = size of initial value\n                let (uref_ptr, value_ptr, value_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.new_uref,\n                    [uref_ptr, value_ptr, value_size],\n                )?;\n                self.new_uref(uref_ptr, value_ptr, value_size)?;\n                Ok(None)\n            }\n\n            FunctionIndex::RetFuncIndex => {\n                // args(0) = pointer to value\n                // args(1) = size of value\n                let (value_ptr, value_size) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.ret, [value_ptr, value_size])?;\n                Err(self.ret(value_ptr, value_size as usize))\n            }\n\n            FunctionIndex::GetKeyFuncIndex => {\n                // args(0) = pointer to key name in Wasm memory\n                // args(1) = size of key name\n                // args(2) = pointer to output buffer for serialized key\n                // args(3) = size of output buffer\n                // args(4) = pointer to bytes written\n                let (name_ptr, name_size, output_ptr, output_size, bytes_written) =\n                    Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.get_key,\n                    [name_ptr, name_size, output_ptr, output_size, bytes_written],\n                )?;\n                let ret = self.load_key(\n                    name_ptr,\n                    name_size,\n                    output_ptr,\n                    output_size as usize,\n                    bytes_written,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::HasKeyFuncIndex => {\n                // args(0) = pointer to key name in Wasm memory\n                // args(1) = size of key name\n                let (name_ptr, name_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.has_key,\n                    [name_ptr, name_size],\n                )?;\n                let result = self.has_key(name_ptr, name_size)?;\n                Ok(Some(RuntimeValue::I32(result)))\n            }\n\n            FunctionIndex::PutKeyFuncIndex => {\n                // args(0) = pointer to key name in Wasm memory\n                // args(1) = size of key name\n                // args(2) = pointer to key in Wasm memory\n                // args(3) = size of key\n                let (name_ptr, name_size, key_ptr, key_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.put_key,\n                    [name_ptr, name_size, key_ptr, key_size],\n                )?;\n                self.put_key(name_ptr, name_size, key_ptr, key_size)?;\n                Ok(None)\n            }\n\n            FunctionIndex::RemoveKeyFuncIndex => {\n                // args(0) = pointer to key name in Wasm memory\n                // args(1) = size of key name\n                let (name_ptr, name_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.remove_key,\n                    [name_ptr, name_size],\n                )?;\n                self.remove_key(name_ptr, name_size)?;\n                Ok(None)\n            }\n\n            FunctionIndex::GetCallerIndex => {\n                // args(0) = pointer where a size of serialized bytes will be stored\n                let (output_size_ptr,) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.get_caller, [output_size_ptr])?;\n                let ret = self.get_caller(output_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::GetBlocktimeIndex => {\n                // args(0) = pointer to Wasm memory where to write.\n                let (dest_ptr,) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.get_blocktime, [dest_ptr])?;\n                self.get_blocktime(dest_ptr)?;\n                Ok(None)\n            }\n\n            FunctionIndex::GasFuncIndex => {\n                let (gas_arg,): (u32,) = Args::parse(args)?;\n                // Gas is special cased internal host function and for accounting purposes it isn't\n                // represented in protocol data.\n                self.gas(Gas::new(gas_arg))?;\n                Ok(None)\n            }\n\n            FunctionIndex::IsValidURefFnIndex => {\n                // args(0) = pointer to value to validate\n                // args(1) = size of value\n                let (uref_ptr, uref_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.is_valid_uref,\n                    [uref_ptr, uref_size],\n                )?;\n                Ok(Some(RuntimeValue::I32(i32::from(\n                    self.is_valid_uref(uref_ptr, uref_size)?,\n                ))))\n            }\n\n            FunctionIndex::RevertFuncIndex => {\n                // args(0) = status u32\n                let (status,) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.revert, [status])?;\n                Err(self.revert(status))\n            }\n\n            FunctionIndex::AddAssociatedKeyFuncIndex => {\n                // args(0) = pointer to array of bytes of an account hash\n                // args(1) = size of an account hash\n                // args(2) = weight of the key\n                let (account_hash_ptr, account_hash_size, weight_value) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.add_associated_key,\n                    [\n                        account_hash_ptr,\n                        account_hash_size,\n                        weight_value as HostFunctionCost,\n                    ],\n                )?;\n                let value = self.add_associated_key(\n                    account_hash_ptr,\n                    account_hash_size as usize,\n                    weight_value,\n                )?;\n                Ok(Some(RuntimeValue::I32(value)))\n            }\n\n            FunctionIndex::RemoveAssociatedKeyFuncIndex => {\n                // args(0) = pointer to array of bytes of an account hash\n                // args(1) = size of an account hash\n                let (account_hash_ptr, account_hash_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.remove_associated_key,\n                    [account_hash_ptr, account_hash_size],\n                )?;\n                let value =\n                    self.remove_associated_key(account_hash_ptr, account_hash_size as usize)?;\n                Ok(Some(RuntimeValue::I32(value)))\n            }\n\n            FunctionIndex::UpdateAssociatedKeyFuncIndex => {\n                // args(0) = pointer to array of bytes of an account hash\n                // args(1) = size of an account hash\n                // args(2) = weight of the key\n                let (account_hash_ptr, account_hash_size, weight_value) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.update_associated_key,\n                    [\n                        account_hash_ptr,\n                        account_hash_size,\n                        weight_value as HostFunctionCost,\n                    ],\n                )?;\n                let value = self.update_associated_key(\n                    account_hash_ptr,\n                    account_hash_size as usize,\n                    weight_value,\n                )?;\n                Ok(Some(RuntimeValue::I32(value)))\n            }\n\n            FunctionIndex::SetActionThresholdFuncIndex => {\n                // args(0) = action type\n                // args(1) = new threshold\n                let (action_type_value, threshold_value) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.set_action_threshold,\n                    [action_type_value, threshold_value as HostFunctionCost],\n                )?;\n                let value = self.set_action_threshold(action_type_value, threshold_value)?;\n                Ok(Some(RuntimeValue::I32(value)))\n            }\n\n            FunctionIndex::CreatePurseIndex => {\n                // args(0) = pointer to array for return value\n                // args(1) = length of array for return value\n                let (dest_ptr, dest_size) = Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &host_function_costs.create_purse,\n                    [dest_ptr, dest_size],\n                )?;\n\n                let result = if (dest_size as usize) < UREF_SERIALIZED_LENGTH {\n                    Err(ApiError::PurseNotCreated)\n                } else {\n                    let purse = self.create_purse()?;\n                    let purse_bytes = purse.into_bytes().map_err(ExecError::BytesRepr)?;\n                    self.try_get_memory()?\n                        .set(dest_ptr, &purse_bytes)\n                        .map_err(|e| ExecError::Interpreter(e.into()))?;\n                    Ok(())\n                };\n\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(result))))\n            }\n\n            FunctionIndex::TransferToAccountIndex => {\n                // args(0) = pointer to array of bytes of an account hash\n                // args(1) = length of array of bytes of an account hash\n                // args(2) = pointer to array of bytes of an amount\n                // args(3) = length of array of bytes of an amount\n                // args(4) = pointer to array of bytes of an id\n                // args(5) = length of array of bytes of an id\n                // args(6) = pointer to a value where new value will be set\n                let (key_ptr, key_size, amount_ptr, amount_size, id_ptr, id_size, result_ptr) =\n                    Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.transfer_to_account,\n                    [\n                        key_ptr,\n                        key_size,\n                        amount_ptr,\n                        amount_size,\n                        id_ptr,\n                        id_size,\n                        result_ptr,\n                    ],\n                )?;\n                let account_hash: AccountHash = {\n                    let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n                let amount: U512 = {\n                    let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n                let id: Option<u64> = {\n                    let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n\n                let ret = match self.transfer_to_account(account_hash, amount, id)? {\n                    Ok(transferred_to) => {\n                        let result_value: u32 = transferred_to as u32;\n                        let result_value_bytes = result_value.to_le_bytes();\n                        self.try_get_memory()?\n                            .set(result_ptr, &result_value_bytes)\n                            .map_err(|error| ExecError::Interpreter(error.into()))?;\n                        Ok(())\n                    }\n                    Err(api_error) => Err(api_error),\n                };\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::TransferFromPurseToAccountIndex => {\n                // args(0) = pointer to array of bytes in Wasm memory of a source purse\n                // args(1) = length of array of bytes in Wasm memory of a source purse\n                // args(2) = pointer to array of bytes in Wasm memory of an account hash\n                // args(3) = length of array of bytes in Wasm memory of an account hash\n                // args(4) = pointer to array of bytes in Wasm memory of an amount\n                // args(5) = length of array of bytes in Wasm memory of an amount\n                // args(6) = pointer to array of bytes in Wasm memory of an id\n                // args(7) = length of array of bytes in Wasm memory of an id\n                // args(8) = pointer to a value where value of `TransferredTo` enum will be set\n                let (\n                    source_ptr,\n                    source_size,\n                    key_ptr,\n                    key_size,\n                    amount_ptr,\n                    amount_size,\n                    id_ptr,\n                    id_size,\n                    result_ptr,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.transfer_from_purse_to_account,\n                    [\n                        source_ptr,\n                        source_size,\n                        key_ptr,\n                        key_size,\n                        amount_ptr,\n                        amount_size,\n                        id_ptr,\n                        id_size,\n                        result_ptr,\n                    ],\n                )?;\n                let source_purse = {\n                    let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n                let account_hash: AccountHash = {\n                    let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n                let amount: U512 = {\n                    let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n                let id: Option<u64> = {\n                    let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n                let ret = match self.transfer_from_purse_to_account_hash(\n                    source_purse,\n                    account_hash,\n                    amount,\n                    id,\n                )? {\n                    Ok(transferred_to) => {\n                        let result_value: u32 = transferred_to as u32;\n                        let result_value_bytes = result_value.to_le_bytes();\n                        self.try_get_memory()?\n                            .set(result_ptr, &result_value_bytes)\n                            .map_err(|error| ExecError::Interpreter(error.into()))?;\n                        Ok(())\n                    }\n                    Err(api_error) => Err(api_error),\n                };\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::TransferFromPurseToPurseIndex => {\n                // args(0) = pointer to array of bytes in Wasm memory of a source purse\n                // args(1) = length of array of bytes in Wasm memory of a source purse\n                // args(2) = pointer to array of bytes in Wasm memory of a target purse\n                // args(3) = length of array of bytes in Wasm memory of a target purse\n                // args(4) = pointer to array of bytes in Wasm memory of an amount\n                // args(5) = length of array of bytes in Wasm memory of an amount\n                // args(6) = pointer to array of bytes in Wasm memory of an id\n                // args(7) = length of array of bytes in Wasm memory of an id\n                let (\n                    source_ptr,\n                    source_size,\n                    target_ptr,\n                    target_size,\n                    amount_ptr,\n                    amount_size,\n                    id_ptr,\n                    id_size,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.transfer_from_purse_to_purse,\n                    [\n                        source_ptr,\n                        source_size,\n                        target_ptr,\n                        target_size,\n                        amount_ptr,\n                        amount_size,\n                        id_ptr,\n                        id_size,\n                    ],\n                )?;\n\n                let source: URef = {\n                    let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n\n                let target: URef = {\n                    let bytes = self.bytes_from_mem(target_ptr, target_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n\n                let amount: U512 = {\n                    let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n\n                let id: Option<u64> = {\n                    let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?;\n                    bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)?\n                };\n\n                let ret = self.transfer_from_purse_to_purse(source, target, amount, id)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::GetBalanceIndex => {\n                // args(0) = pointer to purse input\n                // args(1) = length of purse\n                // args(2) = pointer to output size (output)\n                let (ptr, ptr_size, output_size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.get_balance,\n                    [ptr, ptr_size, output_size_ptr],\n                )?;\n                let ret = self.get_balance_host_buffer(ptr, ptr_size as usize, output_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::GetPhaseIndex => {\n                // args(0) = pointer to Wasm memory where to write.\n                let (dest_ptr,) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.get_phase, [dest_ptr])?;\n                self.get_phase(dest_ptr)?;\n                Ok(None)\n            }\n\n            FunctionIndex::GetSystemContractIndex => {\n                // args(0) = system contract index\n                // args(1) = dest pointer for storing serialized result\n                // args(2) = dest pointer size\n                let (system_contract_index, dest_ptr, dest_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.get_system_contract,\n                    [system_contract_index, dest_ptr, dest_size],\n                )?;\n                let ret = self.get_system_contract(system_contract_index, dest_ptr, dest_size)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::GetMainPurseIndex => {\n                // args(0) = pointer to Wasm memory where to write.\n                let (dest_ptr,) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.get_main_purse, [dest_ptr])?;\n                self.get_main_purse(dest_ptr)?;\n                Ok(None)\n            }\n\n            FunctionIndex::ReadHostBufferIndex => {\n                // args(0) = pointer to Wasm memory where to write size.\n                let (dest_ptr, dest_size, bytes_written_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.read_host_buffer,\n                    [dest_ptr, dest_size, bytes_written_ptr],\n                )?;\n                let ret = self.read_host_buffer(dest_ptr, dest_size as usize, bytes_written_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::CreateContractPackageAtHash => {\n                // args(0) = pointer to wasm memory where to write 32-byte Hash address\n                // args(1) = pointer to wasm memory where to write 32-byte access key address\n                // args(2) = boolean flag to determine if the contract can be versioned\n                let (hash_dest_ptr, access_dest_ptr, is_locked) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.create_contract_package_at_hash,\n                    [hash_dest_ptr, access_dest_ptr],\n                )?;\n                let package_status = PackageStatus::new(is_locked);\n                let (hash_addr, access_addr) =\n                    self.create_contract_package_at_hash(package_status)?;\n\n                self.function_address(hash_addr, hash_dest_ptr)?;\n                self.function_address(access_addr, access_dest_ptr)?;\n                Ok(None)\n            }\n\n            FunctionIndex::CreateContractUserGroup => {\n                // args(0) = pointer to package key in wasm memory\n                // args(1) = size of package key in wasm memory\n                // args(2) = pointer to group label in wasm memory\n                // args(3) = size of group label in wasm memory\n                // args(4) = number of new urefs to generate for the group\n                // args(5) = pointer to existing_urefs in wasm memory\n                // args(6) = size of existing_urefs in wasm memory\n                // args(7) = pointer to location to write size of output (written to host buffer)\n                let (\n                    package_key_ptr,\n                    package_key_size,\n                    label_ptr,\n                    label_size,\n                    num_new_urefs,\n                    existing_urefs_ptr,\n                    existing_urefs_size,\n                    output_size_ptr,\n                ) = Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &host_function_costs.create_contract_user_group,\n                    [\n                        package_key_ptr,\n                        package_key_size,\n                        label_ptr,\n                        label_size,\n                        num_new_urefs,\n                        existing_urefs_ptr,\n                        existing_urefs_size,\n                        output_size_ptr,\n                    ],\n                )?;\n\n                let contract_package_hash: PackageHash =\n                    self.t_from_mem(package_key_ptr, package_key_size)?;\n                let label: String = self.t_from_mem(label_ptr, label_size)?;\n                let existing_urefs: BTreeSet<URef> =\n                    self.t_from_mem(existing_urefs_ptr, existing_urefs_size)?;\n\n                let ret = self.create_contract_user_group(\n                    contract_package_hash,\n                    label,\n                    num_new_urefs,\n                    existing_urefs,\n                    output_size_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::AddContractVersion => {\n                // args(0) = pointer to package key in wasm memory\n                // args(1) = size of package key in wasm memory\n                // args(2) = pointer to entrypoints in wasm memory\n                // args(3) = size of entrypoints in wasm memory\n                // args(4) = pointer to named keys in wasm memory\n                // args(5) = size of named keys in wasm memory\n                // args(6) = pointer to output buffer for serialized key\n                // args(7) = size of output buffer\n                // args(8) = pointer to bytes written\n                let (\n                    contract_package_hash_ptr,\n                    contract_package_hash_size,\n                    version_ptr,\n                    entry_points_ptr,\n                    entry_points_size,\n                    named_keys_ptr,\n                    named_keys_size,\n                    output_ptr,\n                    output_size,\n                    bytes_written_ptr,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.add_contract_version,\n                    [\n                        contract_package_hash_ptr,\n                        contract_package_hash_size,\n                        version_ptr,\n                        entry_points_ptr,\n                        entry_points_size,\n                        named_keys_ptr,\n                        named_keys_size,\n                        output_ptr,\n                        output_size,\n                        bytes_written_ptr,\n                    ],\n                )?;\n\n                let contract_package_hash: ContractPackageHash =\n                    self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?;\n                let package_hash = PackageHash::new(contract_package_hash.value());\n                let entry_points: EntryPoints = {\n                    let contract_entry_points: ContractEntryPoints =\n                        self.t_from_mem(entry_points_ptr, entry_points_size)?;\n\n                    let points: Vec<EntityEntryPoint> = contract_entry_points\n                        .take_entry_points()\n                        .into_iter()\n                        .map(EntityEntryPoint::from)\n                        .collect();\n\n                    points.into()\n                };\n                let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?;\n                let ret = self.add_contract_version(\n                    package_hash,\n                    version_ptr,\n                    entry_points,\n                    named_keys,\n                    BTreeMap::new(),\n                    output_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n            FunctionIndex::AddContractVersionWithMessageTopics => {\n                // args(0)  = pointer to package hash in wasm memory\n                // args(1)  = size of package hash in wasm memory\n                // args(2)  = pointer to entity version in wasm memory\n                // args(3)  = pointer to entrypoints in wasm memory\n                // args(4)  = size of entrypoints in wasm memory\n                // args(5)  = pointer to named keys in wasm memory\n                // args(6)  = size of named keys in wasm memory\n                // args(7)  = pointer to the new topic names in wasm memory\n                // args(8)  = size of the new topic names in wasm memory\n                // args(9)  = pointer to output buffer for serialized key\n                // args(10) = size of output buffer\n                let (\n                    contract_package_hash_ptr,\n                    contract_package_hash_size,\n                    version_ptr,\n                    entry_points_ptr,\n                    entry_points_size,\n                    named_keys_ptr,\n                    named_keys_size,\n                    message_topics_ptr,\n                    message_topics_size,\n                    output_ptr,\n                    output_size,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.add_contract_version_with_message_topics,\n                    [\n                        contract_package_hash_ptr,\n                        contract_package_hash_size,\n                        version_ptr,\n                        entry_points_ptr,\n                        entry_points_size,\n                        named_keys_ptr,\n                        named_keys_size,\n                        message_topics_ptr,\n                        message_topics_size,\n                        output_ptr,\n                        output_size,\n                    ],\n                )?;\n\n                // Exit if unable to return output.\n                if output_size < 32 {\n                    // `output_size` must be >= actual length of serialized hash bytes\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                        ApiError::BufferTooSmall,\n                    )))));\n                }\n\n                let package_hash: PackageHash =\n                    self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?;\n                let entry_points: EntryPoints =\n                    self.t_from_mem(entry_points_ptr, entry_points_size)?;\n                let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?;\n                let message_topics: BTreeMap<String, MessageTopicOperation> =\n                    self.t_from_mem(message_topics_ptr, message_topics_size)?;\n\n                // Check that the names of the topics that are added are within the configured\n                // limits.\n                let message_limits = self.context.engine_config().wasm_config().messages_limits();\n                for (topic_name, _) in\n                    message_topics\n                        .iter()\n                        .filter(|(_, operation)| match operation {\n                            MessageTopicOperation::Add => true,\n                        })\n                {\n                    if topic_name.len() > message_limits.max_topic_name_size() as usize {\n                        return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                            ApiError::MaxTopicNameSizeExceeded,\n                        )))));\n                    }\n                }\n\n                let ret = self.add_contract_version(\n                    package_hash,\n                    version_ptr,\n                    entry_points,\n                    named_keys,\n                    message_topics,\n                    output_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::AddPackageVersionWithMessageTopics => {\n                // args(0)  = pointer to package hash in wasm memory\n                // args(1)  = size of package hash in wasm memory\n                // args(2)  = pointer to entity version in wasm memory\n                // args(3)  = pointer to entrypoints in wasm memory\n                // args(4)  = size of entrypoints in wasm memory\n                // args(5)  = pointer to named keys in wasm memory\n                // args(6)  = size of named keys in wasm memory\n                // args(7)  = pointer to the new topic names in wasm memory\n                // args(8)  = size of the new topic names in wasm memory\n                // args(9)  = pointer to output buffer for serialized key\n                // args(10) = size of output buffer\n                let (\n                    contract_package_hash_ptr,\n                    contract_package_hash_size,\n                    version_ptr,\n                    entry_points_ptr,\n                    entry_points_size,\n                    named_keys_ptr,\n                    named_keys_size,\n                    message_topics,\n                    message_topics_size,\n                    output_ptr,\n                    output_size,\n                ) = Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &host_function_costs.add_package_version_with_message_topics,\n                    [\n                        contract_package_hash_ptr,\n                        contract_package_hash_size,\n                        version_ptr,\n                        entry_points_ptr,\n                        entry_points_size,\n                        named_keys_ptr,\n                        named_keys_size,\n                        message_topics,\n                        message_topics_size,\n                        output_ptr,\n                        output_size,\n                    ],\n                )?;\n\n                // Exit if unable to return output.\n                if output_size < 32 {\n                    // `output_size` must be >= actual length of serialized hash bytes\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                        ApiError::BufferTooSmall,\n                    )))));\n                }\n\n                let package_hash: PackageHash =\n                    self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?;\n                let entry_points: EntryPoints =\n                    self.t_from_mem(entry_points_ptr, entry_points_size)?;\n                let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?;\n                let message_topics: BTreeMap<String, MessageTopicOperation> =\n                    self.t_from_mem(message_topics, message_topics_size)?;\n\n                // Check that the names of the topics that are added are within the configured\n                // limits.\n                let message_limits = self.context.engine_config().wasm_config().messages_limits();\n                for (topic_name, _) in\n                    message_topics\n                        .iter()\n                        .filter(|(_, operation)| match operation {\n                            MessageTopicOperation::Add => true,\n                        })\n                {\n                    if topic_name.len() > message_limits.max_topic_name_size() as usize {\n                        return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                            ApiError::MaxTopicNameSizeExceeded,\n                        )))));\n                    }\n                }\n\n                let ret = self.add_contract_version(\n                    package_hash,\n                    version_ptr,\n                    entry_points,\n                    named_keys,\n                    message_topics,\n                    output_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::DisableContractVersion => {\n                // args(0) = pointer to package hash in wasm memory\n                // args(1) = size of package hash in wasm memory\n                // args(2) = pointer to contract hash in wasm memory\n                // args(3) = size of contract hash in wasm memory\n                let (package_key_ptr, package_key_size, contract_hash_ptr, contract_hash_size) =\n                    Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.disable_contract_version,\n                    [\n                        package_key_ptr,\n                        package_key_size,\n                        contract_hash_ptr,\n                        contract_hash_size,\n                    ],\n                )?;\n                let contract_package_hash = self.t_from_mem(package_key_ptr, package_key_size)?;\n                let contract_hash = self.t_from_mem(contract_hash_ptr, contract_hash_size)?;\n\n                let result = self.disable_contract_version(contract_package_hash, contract_hash)?;\n\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(result))))\n            }\n\n            FunctionIndex::CallContractFuncIndex => {\n                // args(0) = pointer to contract hash where contract is at in global state\n                // args(1) = size of contract hash\n                // args(2) = pointer to entry point\n                // args(3) = size of entry point\n                // args(4) = pointer to function arguments in Wasm memory\n                // args(5) = size of arguments\n                // args(6) = pointer to result size (output)\n                let (\n                    contract_hash_ptr,\n                    contract_hash_size,\n                    entry_point_name_ptr,\n                    entry_point_name_size,\n                    args_ptr,\n                    args_size,\n                    result_size_ptr,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.call_contract,\n                    [\n                        contract_hash_ptr,\n                        contract_hash_size,\n                        entry_point_name_ptr,\n                        entry_point_name_size,\n                        args_ptr,\n                        args_size,\n                        result_size_ptr,\n                    ],\n                )?;\n\n                let contract_hash: AddressableEntityHash =\n                    self.t_from_mem(contract_hash_ptr, contract_hash_size)?;\n                let entry_point_name: String =\n                    self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?;\n                let args_bytes: Vec<u8> = {\n                    let args_size: u32 = args_size;\n                    self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec()\n                };\n\n                let ret = self.call_contract_host_buffer(\n                    contract_hash,\n                    &entry_point_name,\n                    &args_bytes,\n                    result_size_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::CallVersionedContract => {\n                // args(0) = pointer to contract_package_hash where contract is at in global state\n                // args(1) = size of contract_package_hash\n                // args(2) = pointer to contract version in wasm memory\n                // args(3) = size of contract version in wasm memory\n                // args(4) = pointer to method name in wasm memory\n                // args(5) = size of method name in wasm memory\n                // args(6) = pointer to function arguments in Wasm memory\n                // args(7) = size of arguments\n                // args(8) = pointer to result size (output)\n                let (\n                    contract_package_hash_ptr,\n                    contract_package_hash_size,\n                    contract_version_ptr,\n                    contract_package_size,\n                    entry_point_name_ptr,\n                    entry_point_name_size,\n                    args_ptr,\n                    args_size,\n                    result_size_ptr,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.call_versioned_contract,\n                    [\n                        contract_package_hash_ptr,\n                        contract_package_hash_size,\n                        contract_version_ptr,\n                        contract_package_size,\n                        entry_point_name_ptr,\n                        entry_point_name_size,\n                        args_ptr,\n                        args_size,\n                        result_size_ptr,\n                    ],\n                )?;\n\n                let contract_package_hash: PackageHash =\n                    self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?;\n                let contract_version: Option<EntityVersion> =\n                    self.t_from_mem(contract_version_ptr, contract_package_size)?;\n                let entry_point_name: String =\n                    self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?;\n                let args_bytes: Vec<u8> = {\n                    let args_size: u32 = args_size;\n                    self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec()\n                };\n\n                let ret = self.call_versioned_contract_host_buffer(\n                    contract_package_hash,\n                    contract_version,\n                    entry_point_name,\n                    &args_bytes,\n                    result_size_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            #[cfg(feature = \"test-support\")]\n            FunctionIndex::PrintIndex => {\n                let (text_ptr, text_size) = Args::parse(args)?;\n                self.charge_host_function_call(&host_function_costs.print, [text_ptr, text_size])?;\n                self.print(text_ptr, text_size)?;\n                Ok(None)\n            }\n\n            FunctionIndex::GetRuntimeArgsizeIndex => {\n                // args(0) = pointer to name of host runtime arg to load\n                // args(1) = size of name of the host runtime arg\n                // args(2) = pointer to a argument size (output)\n                let (name_ptr, name_size, size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.get_named_arg_size,\n                    [name_ptr, name_size, size_ptr],\n                )?;\n                let ret = self.get_named_arg_size(name_ptr, name_size as usize, size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::GetRuntimeArgIndex => {\n                // args(0) = pointer to serialized argument name\n                // args(1) = size of serialized argument name\n                // args(2) = pointer to output pointer where host will write argument bytes\n                // args(3) = size of available data under output pointer\n                let (name_ptr, name_size, dest_ptr, dest_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.get_named_arg,\n                    [name_ptr, name_size, dest_ptr, dest_size],\n                )?;\n                let ret =\n                    self.get_named_arg(name_ptr, name_size as usize, dest_ptr, dest_size as usize)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::RemoveContractUserGroupIndex => {\n                // args(0) = pointer to package key in wasm memory\n                // args(1) = size of package key in wasm memory\n                // args(2) = pointer to serialized group label\n                // args(3) = size of serialized group label\n                let (package_key_ptr, package_key_size, label_ptr, label_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.remove_contract_user_group,\n                    [package_key_ptr, package_key_size, label_ptr, label_size],\n                )?;\n                let package_key = self.t_from_mem(package_key_ptr, package_key_size)?;\n                let label: Group = self.t_from_mem(label_ptr, label_size)?;\n\n                let ret = self.remove_contract_user_group(package_key, label)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::ExtendContractUserGroupURefsIndex => {\n                // args(0) = pointer to package key in wasm memory\n                // args(1) = size of package key in wasm memory\n                // args(2) = pointer to label name\n                // args(3) = label size bytes\n                // args(4) = output of size value of host bytes data\n                let (package_ptr, package_size, label_ptr, label_size, value_size_ptr) =\n                    Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &host_function_costs.provision_contract_user_group_uref,\n                    [\n                        package_ptr,\n                        package_size,\n                        label_ptr,\n                        label_size,\n                        value_size_ptr,\n                    ],\n                )?;\n                let ret = self.provision_contract_user_group_uref(\n                    package_ptr,\n                    package_size,\n                    label_ptr,\n                    label_size,\n                    value_size_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::RemoveContractUserGroupURefsIndex => {\n                // args(0) = pointer to package key in wasm memory\n                // args(1) = size of package key in wasm memory\n                // args(2) = pointer to label name\n                // args(3) = label size bytes\n                // args(4) = pointer to urefs\n                // args(5) = size of urefs pointer\n                let (package_ptr, package_size, label_ptr, label_size, urefs_ptr, urefs_size) =\n                    Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.remove_contract_user_group_urefs,\n                    [\n                        package_ptr,\n                        package_size,\n                        label_ptr,\n                        label_size,\n                        urefs_ptr,\n                        urefs_size,\n                    ],\n                )?;\n                let ret = self.remove_contract_user_group_urefs(\n                    package_ptr,\n                    package_size,\n                    label_ptr,\n                    label_size,\n                    urefs_ptr,\n                    urefs_size,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::Blake2b => {\n                let (in_ptr, in_size, out_ptr, out_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.blake2b,\n                    [in_ptr, in_size, out_ptr, out_size],\n                )?;\n                let digest =\n                    self.checked_memory_slice(in_ptr as usize, in_size as usize, |input| {\n                        cryptography::blake2b(input)\n                    })?;\n\n                let result = if digest.len() != out_size as usize {\n                    Err(ApiError::BufferTooSmall)\n                } else {\n                    Ok(())\n                };\n                if result.is_err() {\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(result))));\n                }\n\n                self.try_get_memory()?\n                    .set(out_ptr, &digest)\n                    .map_err(|error| ExecError::Interpreter(error.into()))?;\n                Ok(Some(RuntimeValue::I32(0)))\n            }\n\n            FunctionIndex::NewDictionaryFuncIndex => {\n                // args(0) = pointer to output size (output param)\n                let (output_size_ptr,): (u32,) = Args::parse(args)?;\n                const UREF_LEN: u32 = 33u32;\n                self.charge_host_function_call(&host_function_costs.new_uref, [0, 0, UREF_LEN])?;\n                let ret = self.new_dictionary(output_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::DictionaryGetFuncIndex => {\n                // args(0) = pointer to uref in Wasm memory\n                // args(1) = size of uref in Wasm memory\n                // args(2) = pointer to key bytes pointer in Wasm memory\n                // args(3) = pointer to key bytes size in Wasm memory\n                // args(4) = pointer to output size (output param)\n                let (uref_ptr, uref_size, key_bytes_ptr, key_bytes_size, output_size_ptr): (\n                    _,\n                    u32,\n                    _,\n                    u32,\n                    _,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.dictionary_get,\n                    [key_bytes_ptr, key_bytes_size, output_size_ptr],\n                )?;\n                let ret = self.dictionary_get(\n                    uref_ptr,\n                    uref_size,\n                    key_bytes_ptr,\n                    key_bytes_size,\n                    output_size_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::DictionaryPutFuncIndex => {\n                // args(0) = pointer to uref in Wasm memory\n                // args(1) = size of uref in Wasm memory\n                // args(2) = pointer to key bytes pointer in Wasm memory\n                // args(3) = pointer to key bytes size in Wasm memory\n                // args(4) = pointer to value bytes pointer in Wasm memory\n                // args(5) = pointer to value bytes size in Wasm memory\n                let (uref_ptr, uref_size, key_bytes_ptr, key_bytes_size, value_ptr, value_ptr_size): (_, u32, _, u32, _, u32) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.dictionary_put,\n                    [key_bytes_ptr, key_bytes_size, value_ptr, value_ptr_size],\n                )?;\n                let ret = self.dictionary_put(\n                    uref_ptr,\n                    uref_size,\n                    key_bytes_ptr,\n                    key_bytes_size,\n                    value_ptr,\n                    value_ptr_size,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::DictionaryReadFuncIndex => {\n                // args(0) = pointer to key in Wasm memory\n                // args(1) = size of key in Wasm memory\n                // args(2) = pointer to output size (output param)\n                let (key_ptr, key_size, output_size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.read_value,\n                    [key_ptr, key_size, output_size_ptr],\n                )?;\n                let ret = self.dictionary_read(key_ptr, key_size, output_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::LoadCallStack => {\n                // args(0) (Output) Pointer to number of elements in the call stack.\n                // args(1) (Output) Pointer to size in bytes of the serialized call stack.\n                let (call_stack_len_ptr, result_size_ptr) = Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &HostFunction::fixed(10_000),\n                    [call_stack_len_ptr, result_size_ptr],\n                )?;\n                let ret = self.load_call_stack(call_stack_len_ptr, result_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::LoadCallerInformation => {\n                // args(0) (Input) Type of action\n                // args(1) (Output) Pointer to number of elements in the call stack.\n                // args(2) (Output) Pointer to size in bytes of the serialized call stack.\n                let (action, call_stack_len_ptr, result_size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &HostFunction::fixed(10_000),\n                    [0, call_stack_len_ptr, result_size_ptr],\n                )?;\n                let ret =\n                    self.load_caller_information(action, call_stack_len_ptr, result_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::LoadAuthorizationKeys => {\n                // args(0) (Output) Pointer to number of authorization keys.\n                // args(1) (Output) Pointer to size in bytes of the total bytes.\n                let (len_ptr, result_size_ptr) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &HostFunction::fixed(10_000),\n                    [len_ptr, result_size_ptr],\n                )?;\n                let ret = self.load_authorization_keys(len_ptr, result_size_ptr)?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n\n            FunctionIndex::RandomBytes => {\n                let (out_ptr, out_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.random_bytes,\n                    [out_ptr, out_size],\n                )?;\n\n                let random_bytes = self.context.random_bytes()?;\n\n                let result = if random_bytes.len() != out_size as usize {\n                    Err(ApiError::BufferTooSmall)\n                } else {\n                    Ok(())\n                };\n                if result.is_err() {\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(result))));\n                }\n\n                self.try_get_memory()?\n                    .set(out_ptr, &random_bytes)\n                    .map_err(|error| ExecError::Interpreter(error.into()))?;\n\n                Ok(Some(RuntimeValue::I32(0)))\n            }\n\n            FunctionIndex::EnableContractVersion => {\n                // args(0) = pointer to package hash in wasm memory\n                // args(1) = size of package hash in wasm memory\n                // args(2) = pointer to contract hash in wasm memory\n                // args(3) = size of contract hash in wasm memory\n                let (package_key_ptr, package_key_size, contract_hash_ptr, contract_hash_size) =\n                    Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.enable_contract_version,\n                    [\n                        package_key_ptr,\n                        package_key_size,\n                        contract_hash_ptr,\n                        contract_hash_size,\n                    ],\n                )?;\n                let contract_package_hash = self.t_from_mem(package_key_ptr, package_key_size)?;\n                let contract_hash = self.t_from_mem(contract_hash_ptr, contract_hash_size)?;\n\n                let result = self.enable_contract_version(contract_package_hash, contract_hash)?;\n\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(result))))\n            }\n\n            FunctionIndex::ManageMessageTopic => {\n                // args(0) = pointer to the serialized topic name string in wasm memory\n                // args(1) = size of the serialized topic name string in wasm memory\n                // args(2) = pointer to the operation to be performed for the specified topic\n                // args(3) = size of the operation\n                let (topic_name_ptr, topic_name_size, operation_ptr, operation_size) =\n                    Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.manage_message_topic,\n                    [\n                        topic_name_ptr,\n                        topic_name_size,\n                        operation_ptr,\n                        operation_size,\n                    ],\n                )?;\n\n                let limits = self.context.engine_config().wasm_config().messages_limits();\n\n                if topic_name_size > limits.max_topic_name_size() {\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                        ApiError::MaxTopicNameSizeExceeded,\n                    )))));\n                }\n\n                let topic_name_bytes =\n                    self.bytes_from_mem(topic_name_ptr, topic_name_size as usize)?;\n                let topic_name = std::str::from_utf8(&topic_name_bytes)\n                    .map_err(|e| Trap::from(ExecError::InvalidUtf8Encoding(e)))?;\n\n                if operation_size as usize > MessageTopicOperation::max_serialized_len() {\n                    return Err(Trap::from(ExecError::InvalidImputedOperation));\n                }\n                let topic_operation = self\n                    .t_from_mem(operation_ptr, operation_size)\n                    .map_err(|_e| Trap::from(ExecError::InvalidImputedOperation))?;\n\n                // only allow managing messages from stored contracts\n                if !self.context.get_context_key().is_smart_contract_key() {\n                    return Err(Trap::from(ExecError::InvalidContext));\n                }\n\n                let result = match topic_operation {\n                    MessageTopicOperation::Add => {\n                        self.add_message_topic(topic_name).map_err(Trap::from)?\n                    }\n                };\n\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(result))))\n            }\n\n            FunctionIndex::EmitMessage => {\n                // args(0) = pointer to the serialized topic name string in wasm memory\n                // args(1) = size of the serialized name string in wasm memory\n                // args(2) = pointer to the serialized message payload in wasm memory\n                // args(3) = size of the serialized message payload in wasm memory\n                let (topic_name_ptr, topic_name_size, message_ptr, message_size) =\n                    Args::parse(args)?;\n\n                // Charge for the call to emit message. This increases for every message emitted\n                // within an execution so we're not using the static value from the wasm config.\n                self.context\n                    .charge_gas(Gas::new(self.context.emit_message_cost()))?;\n                // Charge for parameter weights.\n                self.charge_host_function_call(\n                    &HostFunction::new(0, host_function_costs.emit_message.arguments()),\n                    &[topic_name_ptr, topic_name_size, message_ptr, message_size],\n                )?;\n\n                let limits = self.context.engine_config().wasm_config().messages_limits();\n\n                if topic_name_size > limits.max_topic_name_size() {\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                        ApiError::MaxTopicNameSizeExceeded,\n                    )))));\n                }\n\n                if message_size > limits.max_message_size() {\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                        ApiError::MessageTooLarge,\n                    )))));\n                }\n\n                let topic_name_bytes =\n                    self.bytes_from_mem(topic_name_ptr, topic_name_size as usize)?;\n                let topic_name = std::str::from_utf8(&topic_name_bytes)\n                    .map_err(|e| Trap::from(ExecError::InvalidUtf8Encoding(e)))?;\n\n                let message = self.t_from_mem(message_ptr, message_size)?;\n\n                let result = self.emit_message(topic_name, message)?;\n                if result.is_ok() {\n                    // Increase the cost for the next call to emit a message.\n                    let new_cost = self\n                        .context\n                        .emit_message_cost()\n                        .checked_add(host_function_costs.cost_increase_per_message.into())\n                        .ok_or(ExecError::GasLimit)?;\n                    self.context.set_emit_message_cost(new_cost);\n                }\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(result))))\n            }\n\n            FunctionIndex::GetBlockInfoIndex => {\n                // args(0) = field selector\n                // args(1) = pointer to output pointer where host will write argument bytes\n                let (field_idx, dest_ptr): (u8, u32) = Args::parse(args)?;\n\n                self.charge_host_function_call(&host_function_costs.get_block_info, [0u32, 0u32])?;\n                self.get_block_info(field_idx, dest_ptr)?;\n                Ok(None)\n            }\n\n            FunctionIndex::GenericHash => {\n                // args(0) = pointer to input in Wasm memory\n                // args(1) = size of input in Wasm memory\n                // args(2) = integer representation of HashAlgorithm enum variant\n                // args(3) = pointer to output pointer in Wasm memory\n                // args(4) = size of output\n                let (in_ptr, in_size, hash_algo_type, out_ptr, out_size) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.generic_hash,\n                    [in_ptr, in_size, hash_algo_type, out_ptr, out_size],\n                )?;\n                let hash_algo_type = match HashAlgorithm::try_from(hash_algo_type as u8) {\n                    Ok(v) => v,\n                    Err(_e) => {\n                        return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err(\n                            ApiError::InvalidArgument,\n                        )))))\n                    }\n                };\n\n                let digest =\n                    self.checked_memory_slice(in_ptr as usize, in_size as usize, |input| {\n                        match hash_algo_type {\n                            HashAlgorithm::Blake2b => cryptography::blake2b(input),\n                            HashAlgorithm::Blake3 => cryptography::blake3(input),\n                            HashAlgorithm::Sha256 => cryptography::sha256(input),\n                        }\n                    })?;\n\n                let result = if digest.len() > out_size as usize {\n                    Err(ApiError::BufferTooSmall)\n                } else {\n                    Ok(())\n                };\n\n                if result.is_err() {\n                    return Ok(Some(RuntimeValue::I32(api_error::i32_from(result))));\n                }\n\n                if self.try_get_memory()?.set(out_ptr, &digest).is_err() {\n                    return Ok(Some(RuntimeValue::I32(\n                        u32::from(ApiError::HostBufferEmpty) as i32,\n                    )));\n                }\n\n                Ok(Some(RuntimeValue::I32(0)))\n            }\n\n            FunctionIndex::RecoverSecp256k1 => {\n                // args(0) = pointer to input bytes in memory\n                // args(1) = length of input bytes in memory\n                // args(2) = pointer to signature bytes in memory\n                // args(3) = length of signature bytes in memory\n                // args(4) = pointer to public key buffer in memory (size is fixed)\n                // args(5) = the recovery id\n\n                let (\n                    data_ptr,\n                    data_size,\n                    signature_ptr,\n                    signature_size,\n                    public_key_ptr,\n                    recovery_id,\n                ) = Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &host_function_costs.recover_secp256k1,\n                    [\n                        data_ptr,\n                        data_size,\n                        signature_ptr,\n                        signature_size,\n                        public_key_ptr,\n                        recovery_id,\n                    ],\n                )?;\n\n                if recovery_id >= 4 {\n                    return Ok(Some(RuntimeValue::I32(\n                        u32::from(ApiError::InvalidArgument) as i32,\n                    )));\n                }\n\n                let data = self.bytes_from_mem(data_ptr, data_size as usize)?;\n                let signature: Signature = self.t_from_mem(signature_ptr, signature_size)?;\n\n                let Ok(public_key) =\n                    casper_types::crypto::recover_secp256k1(data, &signature, recovery_id as u8)\n                else {\n                    return Ok(Some(RuntimeValue::I32(\n                        u32::from(ApiError::InvalidArgument) as i32,\n                    )));\n                };\n\n                let Ok(key_bytes) = public_key.to_bytes() else {\n                    return Ok(Some(RuntimeValue::I32(\n                        u32::from(ApiError::OutOfMemory) as i32\n                    )));\n                };\n\n                if self\n                    .try_get_memory()?\n                    .set(public_key_ptr, &key_bytes)\n                    .is_err()\n                {\n                    return Ok(Some(RuntimeValue::I32(\n                        u32::from(ApiError::HostBufferEmpty) as i32,\n                    )));\n                }\n\n                Ok(Some(RuntimeValue::I32(0)))\n            }\n\n            FunctionIndex::VerifySignature => {\n                // args(0) = pointer to message bytes in memory\n                // args(1) = length of message bytes\n                // args(2) = pointer to signature bytes in memory\n                // args(3) = length of signature bytes\n                // args(4) = pointer to public key bytes in memory\n                // args(5) = length of public key bytes\n                let (\n                    message_ptr,\n                    message_size,\n                    signature_ptr,\n                    signature_size,\n                    public_key_ptr,\n                    public_key_size,\n                ) = Args::parse(args)?;\n\n                self.charge_host_function_call(\n                    &host_function_costs.verify_signature,\n                    [\n                        message_ptr,\n                        message_size,\n                        signature_ptr,\n                        signature_size,\n                        public_key_ptr,\n                        public_key_size,\n                    ],\n                )?;\n\n                let message = self.bytes_from_mem(message_ptr, message_size as usize)?;\n                let signature: Signature = self.t_from_mem(signature_ptr, signature_size)?;\n                let public_key: PublicKey = self.t_from_mem(public_key_ptr, public_key_size)?;\n\n                if casper_types::crypto::verify(message, &signature, &public_key).is_err() {\n                    return Ok(Some(RuntimeValue::I32(\n                        u32::from(ApiError::InvalidArgument) as i32,\n                    )));\n                }\n\n                Ok(Some(RuntimeValue::I32(0)))\n            }\n            FunctionIndex::CallPackageVersion => {\n                // args(0) = pointer to contract_package_hash where contract is at in global state\n                // args(1) = size of contract_package_hash\n                // args(2) = pointer to major version in wasm memory\n                // args(3) = size of major version in wasm memory\n                // args(3) = pointer to contract version in wasm memory\n                // args(4) = size of contract version in wasm memory\n                // args(5) = pointer to method name in wasm memory\n                // args(6) = size of method name in wasm memory\n                // args(7) = pointer to function arguments in Wasm memory\n                // args(8) = size of arguments\n                // args(9) = pointer to result size (output)\n                let (\n                    contract_package_hash_ptr,\n                    contract_package_hash_size,\n                    major_version_ptr,\n                    major_version_size,\n                    contract_version_ptr,\n                    contract_version_size,\n                    entry_point_name_ptr,\n                    entry_point_name_size,\n                    args_ptr,\n                    args_size,\n                    result_size_ptr,\n                ) = Args::parse(args)?;\n                self.charge_host_function_call(\n                    &host_function_costs.call_package_version,\n                    [\n                        contract_package_hash_ptr,\n                        contract_package_hash_size,\n                        major_version_ptr,\n                        major_version_size,\n                        contract_version_ptr,\n                        contract_version_size,\n                        entry_point_name_ptr,\n                        entry_point_name_size,\n                        args_ptr,\n                        args_size,\n                        result_size_ptr,\n                    ],\n                )?;\n\n                let contract_package_hash: PackageHash =\n                    self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?;\n                let contract_version: Option<EntityVersion> =\n                    self.t_from_mem(contract_version_ptr, contract_version_size)?;\n                let major_version: Option<ProtocolVersionMajor> =\n                    self.t_from_mem(major_version_ptr, major_version_size)?;\n                let entry_point_name: String =\n                    self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?;\n                let args_bytes: Vec<u8> = {\n                    let args_size: u32 = args_size;\n                    self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec()\n                };\n\n                let ret = self.call_package_version_host_buffer(\n                    contract_package_hash,\n                    major_version,\n                    contract_version,\n                    entry_point_name,\n                    &args_bytes,\n                    result_size_ptr,\n                )?;\n                Ok(Some(RuntimeValue::I32(api_error::i32_from(ret))))\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/handle_payment_internal.rs",
    "content": "use casper_storage::global_state::{error::Error as GlobalStateError, state::StateReader};\nuse std::collections::BTreeSet;\n\nuse casper_types::{\n    account::AccountHash, addressable_entity::NamedKeyAddr, system::handle_payment::Error, Account,\n    CLValue, Contract, FeeHandling, Key, Phase, RefundHandling, StoredValue, TransferredTo, URef,\n    U512,\n};\n\nuse casper_storage::system::handle_payment::{\n    mint_provider::MintProvider, runtime_provider::RuntimeProvider,\n    storage_provider::StorageProvider, HandlePayment,\n};\n\nuse crate::{execution::ExecError, runtime::Runtime};\n\nimpl From<ExecError> for Option<Error> {\n    fn from(exec_error: ExecError) -> Self {\n        match exec_error {\n            // This is used to propagate [`ExecError::GasLimit`] to make sure\n            // [`HandlePayment`] contract running natively supports propagating gas limit\n            // errors without a panic.\n            ExecError::GasLimit => Some(Error::GasLimit),\n            // There are possibly other exec errors happening but such translation would be lossy.\n            _ => None,\n        }\n    }\n}\n\nimpl<R> MintProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn transfer_purse_to_account(\n        &mut self,\n        source: URef,\n        target: AccountHash,\n        amount: U512,\n    ) -> Result<TransferredTo, Error> {\n        match self.transfer_from_purse_to_account_hash(source, target, amount, None) {\n            Ok(Ok(transferred_to)) => Ok(transferred_to),\n            Ok(Err(_mint_error)) => Err(Error::Transfer),\n            Err(exec_error) => Err(<Option<Error>>::from(exec_error).unwrap_or(Error::Transfer)),\n        }\n    }\n\n    fn transfer_purse_to_purse(\n        &mut self,\n        source: URef,\n        target: URef,\n        amount: U512,\n    ) -> Result<(), Error> {\n        let contract_hash = match self.get_mint_hash() {\n            Ok(mint_hash) => mint_hash,\n            Err(exec_error) => {\n                return Err(<Option<Error>>::from(exec_error).unwrap_or(Error::Transfer));\n            }\n        };\n        match self.mint_transfer(contract_hash, None, source, target, amount, None) {\n            Ok(Ok(_)) => Ok(()),\n            Ok(Err(_mint_error)) => Err(Error::Transfer),\n            Err(exec_error) => Err(<Option<Error>>::from(exec_error).unwrap_or(Error::Transfer)),\n        }\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        Runtime::available_balance(self, purse)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::GetBalance))\n    }\n\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> {\n        let contract_hash = match self.get_mint_hash() {\n            Ok(mint_hash) => mint_hash,\n            Err(exec_error) => {\n                return Err(<Option<Error>>::from(exec_error).unwrap_or(Error::Transfer));\n            }\n        };\n        if let Err(exec_error) = self.mint_reduce_total_supply(contract_hash, amount) {\n            Err(<Option<Error>>::from(exec_error).unwrap_or(Error::ReduceTotalSupply))\n        } else {\n            Ok(())\n        }\n    }\n}\n\nimpl<R> RuntimeProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_key(&mut self, name: &str) -> Option<Key> {\n        match self.context.named_keys_get(name).cloned() {\n            None => match self.context.get_context_key() {\n                Key::AddressableEntity(entity_addr) => {\n                    let key = if let Ok(addr) =\n                        NamedKeyAddr::new_from_string(entity_addr, name.to_string())\n                    {\n                        Key::NamedKey(addr)\n                    } else {\n                        return None;\n                    };\n                    if let Ok(Some(StoredValue::NamedKey(value))) = self.context.read_gs(&key) {\n                        value.get_key().ok()\n                    } else {\n                        None\n                    }\n                }\n                Key::Hash(_) => {\n                    match self\n                        .context\n                        .read_gs_typed::<Contract>(&self.context.get_context_key())\n                    {\n                        Ok(contract) => contract.named_keys().get(name).copied(),\n                        Err(_) => None,\n                    }\n                }\n                Key::Account(_) => {\n                    match self\n                        .context\n                        .read_gs_typed::<Account>(&self.context.get_context_key())\n                    {\n                        Ok(account) => account.named_keys().get(name).copied(),\n                        Err(_) => None,\n                    }\n                }\n                _ => None,\n            },\n            Some(key) => Some(key),\n        }\n    }\n\n    fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error> {\n        self.context\n            .put_key(name.to_string(), key)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::PutKey))\n    }\n\n    fn remove_key(&mut self, name: &str) -> Result<(), Error> {\n        self.context\n            .remove_key(name)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::RemoveKey))\n    }\n\n    fn get_phase(&self) -> Phase {\n        self.context.phase()\n    }\n\n    fn get_caller(&self) -> AccountHash {\n        self.context.get_initiator()\n    }\n\n    fn refund_handling(&self) -> RefundHandling {\n        self.context.engine_config().refund_handling()\n    }\n\n    fn fee_handling(&self) -> FeeHandling {\n        self.context.engine_config().fee_handling()\n    }\n\n    fn administrative_accounts(&self) -> BTreeSet<AccountHash> {\n        self.context\n            .engine_config()\n            .administrative_accounts()\n            .clone()\n    }\n}\n\nimpl<R> StorageProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn write_balance(&mut self, purse_uref: URef, amount: U512) -> Result<(), Error> {\n        let cl_amount = CLValue::from_t(amount).map_err(|_| Error::Storage)?;\n        self.context\n            .metered_write_gs_unsafe(Key::Balance(purse_uref.addr()), cl_amount)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))?;\n        Ok(())\n    }\n}\n\nimpl<R> HandlePayment for Runtime<'_, R> where\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>\n{\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/host_function_flag.rs",
    "content": "use std::{cell::Cell, rc::Rc};\n\nuse tracing::error;\n\n/// A flag to indicate whether the current runtime call is made within the scope of a host function.\n///\n/// The flag is backed by an `Rc<Cell<u64>>`, meaning that clones will all share state.\n#[derive(Default, Clone)]\npub(super) struct HostFunctionFlag {\n    /// A counter which, if non-zero, indicates that the `HostFunctionFlag` is `true`.\n    counter: Rc<Cell<u64>>,\n}\n\nimpl HostFunctionFlag {\n    /// Returns `true` if this `HostFunctionFlag` has entered any number of host function scopes\n    /// without having exited them all.\n    pub(super) fn is_in_host_function_scope(&self) -> bool {\n        self.counter.get() != 0\n    }\n\n    /// Must be called when entering a host function scope.\n    ///\n    /// The returned `ScopedHostFunctionFlag` must be kept alive for the duration of the host\n    /// function call.  While at least one such `ScopedHostFunctionFlag` exists,\n    /// `is_in_host_function_scope()` returns `true`.\n    #[must_use]\n    pub(super) fn enter_host_function_scope(&self) -> ScopedHostFunctionFlag {\n        let new_count = self.counter.get().checked_add(1).unwrap_or_else(|| {\n            error!(\"checked_add failure in host function flag counter\");\n            debug_assert!(false, \"checked_add failure in host function flag counter\");\n            u64::MAX\n        });\n        self.counter.set(new_count);\n        ScopedHostFunctionFlag {\n            counter: self.counter.clone(),\n        }\n    }\n}\n\npub(super) struct ScopedHostFunctionFlag {\n    counter: Rc<Cell<u64>>,\n}\n\nimpl Drop for ScopedHostFunctionFlag {\n    fn drop(&mut self) {\n        let new_count = self.counter.get().checked_sub(1).unwrap_or_else(|| {\n            error!(\"checked_sub failure in host function flag counter\");\n            debug_assert!(false, \"checked_sub failure in host function flag counter\");\n            0\n        });\n        self.counter.set(new_count);\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn should_handle_multiple_scopes() {\n        let flag = HostFunctionFlag::default();\n        assert!(!flag.is_in_host_function_scope());\n\n        {\n            let _outer_scope = flag.enter_host_function_scope();\n            assert_eq!(flag.counter.get(), 1);\n            assert!(flag.is_in_host_function_scope());\n\n            {\n                let _inner_scope = flag.enter_host_function_scope();\n                assert_eq!(flag.counter.get(), 2);\n                assert!(flag.is_in_host_function_scope());\n            }\n\n            assert_eq!(flag.counter.get(), 1);\n            assert!(flag.is_in_host_function_scope());\n\n            {\n                let cloned_flag = flag.clone();\n                assert_eq!(cloned_flag.counter.get(), 1);\n                assert!(cloned_flag.is_in_host_function_scope());\n                assert!(flag.is_in_host_function_scope());\n\n                let _inner_scope = cloned_flag.enter_host_function_scope();\n                assert_eq!(cloned_flag.counter.get(), 2);\n                assert!(cloned_flag.is_in_host_function_scope());\n                assert!(flag.is_in_host_function_scope());\n            }\n\n            assert_eq!(flag.counter.get(), 1);\n            assert!(flag.is_in_host_function_scope());\n        }\n\n        assert_eq!(flag.counter.get(), 0);\n        assert!(!flag.is_in_host_function_scope());\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/mint_internal.rs",
    "content": "use tracing::error;\n\nuse casper_storage::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    system::{\n        error::ProviderError,\n        mint::{\n            runtime_provider::RuntimeProvider, storage_provider::StorageProvider,\n            system_provider::SystemProvider, Mint,\n        },\n    },\n};\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{FromBytes, ToBytes},\n    system::{mint::Error, Caller},\n    CLTyped, CLValue, Key, Phase, RuntimeFootprint, StoredValue, SystemHashRegistry, URef, U512,\n};\n\nuse super::Runtime;\nuse crate::execution::ExecError;\n\nimpl From<ExecError> for Option<Error> {\n    fn from(exec_error: ExecError) -> Self {\n        match exec_error {\n            // This is used to propagate [`ExecError::GasLimit`] to make sure [`Mint`]\n            // contract running natively supports propagating gas limit errors without a panic.\n            ExecError::GasLimit => Some(Error::GasLimit),\n            ExecError::ForgedReference(_) => Some(Error::ForgedReference),\n            // There are possibly other exec errors happening but such translation would be lossy.\n            _ => None,\n        }\n    }\n}\n\nimpl<R> RuntimeProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_caller(&self) -> AccountHash {\n        self.context.get_initiator()\n    }\n\n    fn get_immediate_caller(&self) -> Option<Caller> {\n        Runtime::<'_, R>::get_immediate_caller(self).cloned()\n    }\n\n    fn is_called_from_standard_payment(&self) -> bool {\n        self.context.phase() == Phase::Payment && self.module.is_none()\n    }\n\n    fn get_system_entity_registry(&self) -> Result<SystemHashRegistry, ProviderError> {\n        self.context.system_entity_registry().map_err(|err| {\n            error!(%err, \"unable to obtain system entity registry during transfer\");\n            ProviderError::SystemEntityRegistry\n        })\n    }\n\n    fn runtime_footprint_by_account_hash(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<Option<RuntimeFootprint>, ProviderError> {\n        self.context\n            .runtime_footprint_by_account_hash(account_hash)\n            .map_err(|err| {\n                error!(%err, \"error getting runtime footprint by account hash\");\n                ProviderError::AccountHash(account_hash)\n            })\n    }\n\n    fn get_phase(&self) -> Phase {\n        self.context.phase()\n    }\n\n    fn get_key(&self, name: &str) -> Option<Key> {\n        self.context.named_keys_get(name).cloned()\n    }\n\n    fn get_approved_spending_limit(&self) -> U512 {\n        self.context.remaining_spending_limit()\n    }\n\n    fn sub_approved_spending_limit(&mut self, transferred: U512) {\n        // We're ignoring the result here since we always check first\n        // if there is still enough spending limit left.\n        self.context.subtract_amount_spent(transferred);\n    }\n\n    fn get_main_purse(&self) -> Option<URef> {\n        self.context.runtime_footprint().borrow().main_purse()\n    }\n\n    fn is_administrator(&self, account_hash: &AccountHash) -> bool {\n        self.context.engine_config().is_administrator(account_hash)\n    }\n\n    fn allow_unrestricted_transfers(&self) -> bool {\n        self.context.engine_config().allow_unrestricted_transfers()\n    }\n\n    /// Validate URef against context access rights.\n    fn is_valid_uref(&self, uref: &URef) -> bool {\n        self.context.access_rights().has_access_rights_to_uref(uref)\n    }\n}\n\nimpl<R> StorageProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn new_uref<T: CLTyped + ToBytes>(&mut self, init: T) -> Result<URef, Error> {\n        let cl_value: CLValue = CLValue::from_t(init).map_err(|_| Error::CLValue)?;\n        self.context\n            .new_uref(StoredValue::CLValue(cl_value))\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::NewURef))\n    }\n\n    fn read<T: CLTyped + FromBytes>(&mut self, uref: URef) -> Result<Option<T>, Error> {\n        let maybe_value = self\n            .context\n            .read_gs(&Key::URef(uref))\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))?;\n        match maybe_value {\n            Some(StoredValue::CLValue(value)) => {\n                let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?;\n                Ok(Some(value))\n            }\n            Some(_cl_value) => Err(Error::CLValue),\n            None => Ok(None),\n        }\n    }\n\n    fn write_amount(&mut self, uref: URef, amount: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(amount).map_err(|_| Error::CLValue)?;\n        self.context\n            .metered_write_gs(Key::URef(uref), StoredValue::CLValue(cl_value))\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))\n    }\n\n    fn add<T: CLTyped + ToBytes>(&mut self, uref: URef, value: T) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        self.context\n            .metered_add_gs(uref, cl_value)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))\n    }\n\n    fn total_balance(&mut self, purse: URef) -> Result<U512, Error> {\n        Runtime::total_balance(self, purse)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        Runtime::available_balance(self, purse)\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))\n    }\n\n    fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(balance).map_err(|_| Error::CLValue)?;\n        self.context\n            .metered_write_gs_unsafe(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value))\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))\n    }\n\n    fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        self.context\n            .metered_add_gs_unsafe(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value))\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))\n    }\n}\n\nimpl<R> SystemProvider for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn record_transfer(\n        &mut self,\n        maybe_to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<(), Error> {\n        let result = Runtime::record_transfer(self, maybe_to, source, target, amount, id);\n        result.map_err(|exec_error| {\n            <Option<Error>>::from(exec_error).unwrap_or(Error::RecordTransferFailure)\n        })\n    }\n}\n\nimpl<R> Mint for Runtime<'_, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn purse_exists(&mut self, uref: URef) -> Result<bool, Error> {\n        let maybe_value = self\n            .context\n            .read_gs(&Key::Balance(uref.addr()))\n            .map_err(|exec_error| <Option<Error>>::from(exec_error).unwrap_or(Error::Storage))?;\n        match maybe_value {\n            Some(StoredValue::CLValue(value)) => Ok(*value.cl_type() == U512::cl_type()),\n            Some(_non_cl_value) => Err(Error::CLValue),\n            None => Ok(false),\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/mod.rs",
    "content": "//! This module contains executor state of the WASM code.\nmod args;\nmod auction_internal;\npub mod cryptography;\nmod externals;\nmod handle_payment_internal;\nmod host_function_flag;\nmod mint_internal;\npub mod stack;\nmod utils;\npub(crate) mod wasm_prep;\n\nuse std::{\n    cmp,\n    collections::{BTreeMap, BTreeSet},\n    convert::{TryFrom, TryInto},\n    iter::FromIterator,\n};\n\nuse casper_wasm::elements::Module;\nuse casper_wasmi::{MemoryRef, Trap, TrapCode};\nuse tracing::{debug, error, warn};\n\n#[cfg(feature = \"test-support\")]\nuse casper_wasmi::RuntimeValue;\nuse itertools::Itertools;\nuse num_rational::Ratio;\n\nuse casper_storage::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    system::{auction::Auction, handle_payment::HandlePayment, mint::Mint},\n    tracking_copy::TrackingCopyExt,\n};\nuse casper_types::{\n    account::{\n        Account, AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure,\n        UpdateKeyFailure,\n    },\n    addressable_entity::{\n        self, ActionThresholds, ActionType, AddressableEntity, AddressableEntityHash,\n        AssociatedKeys, ContractRuntimeTag, EntityEntryPoint, EntryPointAccess, EntryPointType,\n        EntryPoints, MessageTopicError, MessageTopics, NamedKeyAddr, NamedKeyValue, Parameter,\n        Weight, DEFAULT_ENTRY_POINT_NAME,\n    },\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    contract_messages::{\n        Message, MessageAddr, MessagePayload, MessageTopicOperation, MessageTopicSummary,\n    },\n    contracts::{\n        ContractHash, ContractPackage, ContractPackageHash, ContractPackageStatus,\n        ContractVersions, DisabledVersions, NamedKeys, ProtocolVersionMajor,\n    },\n    system::{\n        self,\n        auction::{self, DelegatorKind, EraInfo, MINIMUM_DELEGATION_RATE_KEY},\n        handle_payment,\n        mint::{self, MINT_SUSTAIN_PURSE_KEY},\n        CallStackElement, Caller, CallerInfo, SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT,\n        STANDARD_PAYMENT,\n    },\n    AccessRights, ApiError, BlockGlobalAddr, BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash,\n    ByteCodeKind, CLTyped, CLValue, ContextAccessRights, Contract, ContractWasm, EntityAddr,\n    EntityKind, EntityVersion, EntityVersionKey, EntityVersions, Gas, GrantedAccess, Group, Groups,\n    HashAddr, HostFunction, HostFunctionCost, InitiatorAddr, Key, NamedArg, Package, PackageHash,\n    PackageStatus, Phase, PublicKey, RewardsHandling, RuntimeArgs, RuntimeFootprint, StoredValue,\n    Transfer, TransferResult, TransferV2, TransferredTo, URef, DICTIONARY_ITEM_KEY_MAX_LENGTH,\n    U512,\n};\n\nuse crate::{\n    execution::ExecError, runtime::host_function_flag::HostFunctionFlag,\n    runtime_context::RuntimeContext,\n};\npub use stack::{RuntimeStack, RuntimeStackFrame, RuntimeStackOverflow};\npub use wasm_prep::{\n    cycles_for_instruction, preprocess, PreprocessingError, WasmValidationError,\n    DEFAULT_BR_TABLE_MAX_SIZE, DEFAULT_MAX_GLOBALS, DEFAULT_MAX_PARAMETER_COUNT,\n    DEFAULT_MAX_TABLE_SIZE,\n};\n\n#[derive(Debug)]\nenum CallContractIdentifier {\n    Contract {\n        contract_hash: HashAddr,\n    },\n    ContractPackage {\n        contract_package_hash: HashAddr,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n    },\n}\n\n#[repr(u8)]\nenum CallerInformation {\n    Initiator = 0,\n    Immediate = 1,\n    FullCallChain = 2,\n}\n\nimpl TryFrom<u8> for CallerInformation {\n    type Error = ApiError;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(CallerInformation::Initiator),\n            1 => Ok(CallerInformation::Immediate),\n            2 => Ok(CallerInformation::FullCallChain),\n            _ => Err(ApiError::InvalidCallerInfoRequest),\n        }\n    }\n}\n\n/// Represents the runtime properties of a WASM execution.\npub struct Runtime<'a, R> {\n    context: RuntimeContext<'a, R>,\n    memory: Option<MemoryRef>,\n    module: Option<Module>,\n    host_buffer: Option<CLValue>,\n    stack: Option<RuntimeStack>,\n    host_function_flag: HostFunctionFlag,\n}\n\nimpl<'a, R> Runtime<'a, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    /// Creates a new runtime instance.\n    pub(crate) fn new(context: RuntimeContext<'a, R>) -> Self {\n        Runtime {\n            context,\n            memory: None,\n            module: None,\n            host_buffer: None,\n            stack: None,\n            host_function_flag: HostFunctionFlag::default(),\n        }\n    }\n\n    /// Creates a new runtime instance by cloning the config, and host function flag from `self`.\n    fn new_invocation_runtime(\n        &self,\n        context: RuntimeContext<'a, R>,\n        module: Module,\n        memory: MemoryRef,\n        stack: RuntimeStack,\n    ) -> Self {\n        Self::check_preconditions(&stack);\n        Runtime {\n            context,\n            memory: Some(memory),\n            module: Some(module),\n            host_buffer: None,\n            stack: Some(stack),\n            host_function_flag: self.host_function_flag.clone(),\n        }\n    }\n\n    /// Creates a new runtime instance with a stack from `self`.\n    pub(crate) fn new_with_stack(\n        &self,\n        context: RuntimeContext<'a, R>,\n        stack: RuntimeStack,\n    ) -> Self {\n        Self::check_preconditions(&stack);\n        Runtime {\n            context,\n            memory: None,\n            module: None,\n            host_buffer: None,\n            stack: Some(stack),\n            host_function_flag: self.host_function_flag.clone(),\n        }\n    }\n\n    /// Preconditions that would render the system inconsistent if violated. Those are strictly\n    /// programming errors.\n    fn check_preconditions(stack: &RuntimeStack) {\n        if stack.is_empty() {\n            error!(\"Call stack should not be empty while creating a new Runtime instance\");\n            debug_assert!(false);\n        }\n\n        if stack.first_frame().unwrap().contract_hash().is_some() {\n            error!(\"First element of the call stack should always represent a Session call\");\n            debug_assert!(false);\n        }\n    }\n\n    /// Returns the context.\n    pub(crate) fn context(&self) -> &RuntimeContext<'a, R> {\n        &self.context\n    }\n\n    fn gas(&mut self, amount: Gas) -> Result<(), ExecError> {\n        self.context.charge_gas(amount)\n    }\n\n    /// Returns current gas counter.\n    fn gas_counter(&self) -> Gas {\n        self.context.gas_counter()\n    }\n\n    /// Sets new gas counter value.\n    fn set_gas_counter(&mut self, new_gas_counter: Gas) {\n        self.context.set_gas_counter(new_gas_counter);\n    }\n\n    /// Charge for a system contract call.\n    ///\n    /// This method does not charge for system contract calls if the immediate caller is a system\n    /// contract or if we're currently within the scope of a host function call. This avoids\n    /// misleading gas charges if one system contract calls other system contract (e.g. auction\n    /// contract calls into mint to create new purses).\n    pub(crate) fn charge_system_contract_call<T>(&mut self, amount: T) -> Result<(), ExecError>\n    where\n        T: Into<Gas>,\n    {\n        if self.is_system_immediate_caller()? || self.host_function_flag.is_in_host_function_scope()\n        {\n            return Ok(());\n        }\n\n        self.context.charge_system_contract_call(amount)\n    }\n\n    fn checked_memory_slice<Ret>(\n        &self,\n        offset: usize,\n        size: usize,\n        func: impl FnOnce(&[u8]) -> Ret,\n    ) -> Result<Ret, ExecError> {\n        // This is mostly copied from a private function `MemoryInstance::checked_memory_region`\n        // that calls a user defined function with a validated slice of memory. This allows\n        // usage patterns that does not involve copying data onto heap first i.e. deserialize\n        // values without copying data first, etc.\n        // NOTE: Depending on the VM backend used in future, this may change, as not all VMs may\n        // support direct memory access.\n        self.try_get_memory()?\n            .with_direct_access(|buffer| {\n                let end = offset.checked_add(size).ok_or_else(|| {\n                    casper_wasmi::Error::Memory(format!(\n                        \"trying to access memory block of size {} from offset {}\",\n                        size, offset\n                    ))\n                })?;\n\n                if end > buffer.len() {\n                    return Err(casper_wasmi::Error::Memory(format!(\n                        \"trying to access region [{}..{}] in memory [0..{}]\",\n                        offset,\n                        end,\n                        buffer.len(),\n                    )));\n                }\n\n                Ok(func(&buffer[offset..end]))\n            })\n            .map_err(Into::into)\n    }\n\n    /// Returns bytes from the WASM memory instance.\n    #[inline]\n    fn bytes_from_mem(&self, ptr: u32, size: usize) -> Result<Vec<u8>, ExecError> {\n        self.checked_memory_slice(ptr as usize, size, |data| data.to_vec())\n    }\n\n    /// Returns a deserialized type from the WASM memory instance.\n    #[inline]\n    fn t_from_mem<T: FromBytes>(&self, ptr: u32, size: u32) -> Result<T, ExecError> {\n        let result = self.checked_memory_slice(ptr as usize, size as usize, |data| {\n            bytesrepr::deserialize_from_slice(data)\n        })?;\n        Ok(result?)\n    }\n\n    /// Reads key (defined as `key_ptr` and `key_size` tuple) from Wasm memory.\n    #[inline]\n    fn key_from_mem(&mut self, key_ptr: u32, key_size: u32) -> Result<Key, ExecError> {\n        self.t_from_mem(key_ptr, key_size)\n    }\n\n    /// Reads `CLValue` (defined as `cl_value_ptr` and `cl_value_size` tuple) from Wasm memory.\n    #[inline]\n    fn cl_value_from_mem(\n        &mut self,\n        cl_value_ptr: u32,\n        cl_value_size: u32,\n    ) -> Result<CLValue, ExecError> {\n        self.t_from_mem(cl_value_ptr, cl_value_size)\n    }\n\n    /// Returns a deserialized string from the WASM memory instance.\n    #[inline]\n    fn string_from_mem(&self, ptr: u32, size: u32) -> Result<String, Trap> {\n        self.t_from_mem(ptr, size).map_err(Trap::from)\n    }\n\n    fn get_module_from_entry_points(\n        &mut self,\n        entry_points: &EntryPoints,\n    ) -> Result<Vec<u8>, ExecError> {\n        let module = self.try_get_module()?.clone();\n        let entry_point_names: Vec<&str> = entry_points.keys().map(|s| s.as_str()).collect();\n        let module_bytes = wasm_prep::get_module_from_entry_points(entry_point_names, module)?;\n        Ok(module_bytes)\n    }\n\n    #[allow(clippy::wrong_self_convention)]\n    fn is_valid_uref(&self, uref_ptr: u32, uref_size: u32) -> Result<bool, Trap> {\n        let uref: URef = self.t_from_mem(uref_ptr, uref_size)?;\n        Ok(self.context.validate_uref(&uref).is_ok())\n    }\n\n    /// Load the uref known by the given name into the Wasm memory\n    fn load_key(\n        &mut self,\n        name_ptr: u32,\n        name_size: u32,\n        output_ptr: u32,\n        output_size: usize,\n        bytes_written_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        let name = self.string_from_mem(name_ptr, name_size)?;\n\n        // Get a key and serialize it\n        let key = match self.context.named_keys_get(&name) {\n            Some(key) => key,\n            None => {\n                return Ok(Err(ApiError::MissingKey));\n            }\n        };\n\n        let key_bytes = match key.to_bytes() {\n            Ok(bytes) => bytes,\n            Err(error) => return Ok(Err(error.into())),\n        };\n\n        // `output_size` has to be greater or equal to the actual length of serialized Key bytes\n        if output_size < key_bytes.len() {\n            return Ok(Err(ApiError::BufferTooSmall));\n        }\n\n        // Set serialized Key bytes into the output buffer\n        if let Err(error) = self.try_get_memory()?.set(output_ptr, &key_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        // SAFETY: For all practical purposes following conversion is assumed to be safe\n        let bytes_size: u32 = key_bytes\n            .len()\n            .try_into()\n            .expect(\"Keys should not serialize to many bytes\");\n        let size_bytes = bytes_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self.try_get_memory()?.set(bytes_written_ptr, &size_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn has_key(&mut self, name_ptr: u32, name_size: u32) -> Result<i32, Trap> {\n        let name = self.string_from_mem(name_ptr, name_size)?;\n        if self.context.named_keys_contains_key(&name) {\n            Ok(0)\n        } else {\n            Ok(1)\n        }\n    }\n\n    fn put_key(\n        &mut self,\n        name_ptr: u32,\n        name_size: u32,\n        key_ptr: u32,\n        key_size: u32,\n    ) -> Result<(), Trap> {\n        let name = self.string_from_mem(name_ptr, name_size)?;\n        let key = self.key_from_mem(key_ptr, key_size)?;\n\n        if let Some(payment_purse) = self.context.maybe_payment_purse() {\n            if Key::URef(payment_purse).normalize() == key.normalize() {\n                warn!(\"attempt to put_key payment purse\");\n                return Err(Into::into(ExecError::Revert(ApiError::HandlePayment(\n                    handle_payment::Error::AttemptToPersistPaymentPurse as u8,\n                ))));\n            }\n        }\n        self.context.put_key(name, key).map_err(Into::into)\n    }\n\n    fn remove_key(&mut self, name_ptr: u32, name_size: u32) -> Result<(), Trap> {\n        let name = self.string_from_mem(name_ptr, name_size)?;\n        self.context.remove_key(&name)?;\n        Ok(())\n    }\n\n    /// Writes runtime context's account main purse to dest_ptr in the Wasm memory.\n    fn get_main_purse(&mut self, dest_ptr: u32) -> Result<(), Trap> {\n        let purse = self.context.get_main_purse()?;\n        let purse_bytes = purse.into_bytes().map_err(ExecError::BytesRepr)?;\n        self.try_get_memory()?\n            .set(dest_ptr, &purse_bytes)\n            .map_err(|e| ExecError::Interpreter(e.into()).into())\n    }\n\n    /// Writes caller (deploy) account public key to output_size_ptr in the Wasm\n    /// memory.\n    fn get_caller(&mut self, output_size_ptr: u32) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n        let value = CLValue::from_t(self.context.get_initiator()).map_err(ExecError::CLValue)?;\n        let value_size = value.inner_bytes().len();\n\n        // Save serialized public key into host buffer\n        if let Err(error) = self.write_host_buffer(value) {\n            return Ok(Err(error));\n        }\n\n        // Write output\n        let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_size_ptr, &output_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n        Ok(Ok(()))\n    }\n\n    /// Gets the immediate caller of the current execution\n    fn get_immediate_caller(&self) -> Option<&RuntimeStackFrame> {\n        self.stack.as_ref().and_then(|stack| stack.previous_frame())\n    }\n\n    /// Checks if immediate caller is of session type of the same account as the provided account\n    /// hash.\n    fn is_allowed_session_caller(&self, provided_account_hash: &AccountHash) -> bool {\n        if self.context.get_initiator() == PublicKey::System.to_account_hash() {\n            return true;\n        }\n\n        if let Some(Caller::Initiator { account_hash }) = self.get_immediate_caller() {\n            return account_hash == provided_account_hash;\n        }\n        false\n    }\n\n    /// Writes runtime context's phase to dest_ptr in the Wasm memory.\n    fn get_phase(&mut self, dest_ptr: u32) -> Result<(), Trap> {\n        let phase = self.context.phase();\n        let bytes = phase.into_bytes().map_err(ExecError::BytesRepr)?;\n        self.try_get_memory()?\n            .set(dest_ptr, &bytes)\n            .map_err(|e| ExecError::Interpreter(e.into()).into())\n    }\n\n    /// Writes requested field from runtime context's block info to dest_ptr in the Wasm memory.\n    fn get_block_info(&self, field_idx: u8, dest_ptr: u32) -> Result<(), Trap> {\n        if field_idx == 0 {\n            // original functionality\n            return self.get_blocktime(dest_ptr);\n        }\n        let block_info = self.context.get_block_info();\n\n        let mut data: Vec<u8> = vec![];\n        if field_idx == 1 {\n            data = block_info\n                .block_height()\n                .into_bytes()\n                .map_err(ExecError::BytesRepr)?;\n        }\n        if field_idx == 2 {\n            data = block_info\n                .parent_block_hash()\n                .into_bytes()\n                .map_err(ExecError::BytesRepr)?;\n        }\n        if field_idx == 3 {\n            data = block_info\n                .state_hash()\n                .into_bytes()\n                .map_err(ExecError::BytesRepr)?;\n        }\n        if field_idx == 4 {\n            data = self\n                .context\n                .protocol_version()\n                .into_bytes()\n                .map_err(ExecError::BytesRepr)?;\n        }\n        if field_idx == 5 {\n            data = self\n                .context\n                .engine_config()\n                .enable_entity\n                .into_bytes()\n                .map_err(ExecError::BytesRepr)?;\n        }\n        if data.is_empty() {\n            Err(ExecError::InvalidImputedOperation.into())\n        } else {\n            Ok(self\n                .try_get_memory()?\n                .set(dest_ptr, &data)\n                .map_err(|e| ExecError::Interpreter(e.into()))?)\n        }\n    }\n\n    /// Writes current blocktime to dest_ptr in Wasm memory.\n    fn get_blocktime(&self, dest_ptr: u32) -> Result<(), Trap> {\n        let block_info = self.context.get_block_info();\n        let blocktime = block_info\n            .block_time()\n            .into_bytes()\n            .map_err(ExecError::BytesRepr)?;\n        self.try_get_memory()?\n            .set(dest_ptr, &blocktime)\n            .map_err(|e| ExecError::Interpreter(e.into()).into())\n    }\n\n    /// Load the uref known by the given name into the Wasm memory\n    fn load_call_stack(\n        &mut self,\n        // (Output) Pointer to number of elements in the call stack.\n        call_stack_len_ptr: u32,\n        // (Output) Pointer to size in bytes of the serialized call stack.\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n        let call_stack: Vec<CallStackElement> = match self.try_get_stack() {\n            Ok(stack) => {\n                let caller = stack.call_stack_elements();\n                caller.iter().map_into().collect_vec()\n            }\n            Err(_error) => return Ok(Err(ApiError::Unhandled)),\n        };\n        let call_stack_len: u32 = match call_stack.len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n        };\n        let call_stack_len_bytes = call_stack_len.to_le_bytes();\n\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(call_stack_len_ptr, &call_stack_len_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        if call_stack_len == 0 {\n            return Ok(Ok(()));\n        }\n\n        let call_stack_cl_value = CLValue::from_t(call_stack).map_err(ExecError::CLValue)?;\n\n        let call_stack_cl_value_bytes_len: u32 =\n            match call_stack_cl_value.inner_bytes().len().try_into() {\n                Ok(value) => value,\n                Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n            };\n\n        if let Err(error) = self.write_host_buffer(call_stack_cl_value) {\n            return Ok(Err(error));\n        }\n\n        let call_stack_cl_value_bytes_len_bytes = call_stack_cl_value_bytes_len.to_le_bytes();\n\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(result_size_ptr, &call_stack_cl_value_bytes_len_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Returns information about the call stack based on a given action.\n    fn load_caller_information(\n        &mut self,\n        information: u8,\n        // (Output) Pointer to number of elements in the call stack.\n        call_stack_len_ptr: u32,\n        // (Output) Pointer to size in bytes of the serialized call stack.\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n\n        let caller_info = match CallerInformation::try_from(information) {\n            Ok(info) => info,\n            Err(error) => return Ok(Err(error)),\n        };\n\n        let caller = match caller_info {\n            CallerInformation::Initiator => {\n                let initiator_account_hash = self.context.get_initiator();\n                let caller = Caller::initiator(initiator_account_hash);\n                match CallerInfo::try_from(caller) {\n                    Ok(caller_info) => {\n                        vec![caller_info]\n                    }\n                    Err(_) => return Ok(Err(ApiError::CLTypeMismatch)),\n                }\n            }\n            CallerInformation::Immediate => match self.get_immediate_caller() {\n                Some(frame) => match CallerInfo::try_from(*frame) {\n                    Ok(immediate_info) => {\n                        vec![immediate_info]\n                    }\n                    Err(_) => return Ok(Err(ApiError::CLTypeMismatch)),\n                },\n                None => return Ok(Err(ApiError::Unhandled)),\n            },\n            CallerInformation::FullCallChain => match self.try_get_stack() {\n                Ok(call_stack) => {\n                    let call_stack = call_stack.call_stack_elements().clone();\n\n                    let mut ret = vec![];\n                    for caller in call_stack {\n                        match CallerInfo::try_from(caller) {\n                            Ok(info) => ret.push(info),\n                            Err(_) => return Ok(Err(ApiError::CLTypeMismatch)),\n                        }\n                    }\n                    ret\n                }\n                Err(_) => return Ok(Err(ApiError::Unhandled)),\n            },\n        };\n\n        let call_stack_len: u32 = match caller.len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n        };\n        let call_stack_len_bytes = call_stack_len.to_le_bytes();\n\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(call_stack_len_ptr, &call_stack_len_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        if call_stack_len == 0 {\n            return Ok(Ok(()));\n        }\n\n        let call_stack_cl_value = CLValue::from_t(caller).map_err(ExecError::CLValue)?;\n\n        let call_stack_cl_value_bytes_len: u32 =\n            match call_stack_cl_value.inner_bytes().len().try_into() {\n                Ok(value) => value,\n                Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n            };\n\n        if let Err(error) = self.write_host_buffer(call_stack_cl_value) {\n            return Ok(Err(error));\n        }\n\n        let call_stack_cl_value_bytes_len_bytes = call_stack_cl_value_bytes_len.to_le_bytes();\n\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(result_size_ptr, &call_stack_cl_value_bytes_len_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Return some bytes from the memory and terminate the current `sub_call`. Note that the return\n    /// type is `Trap`, indicating that this function will always kill the current Wasm instance.\n    fn ret(&mut self, value_ptr: u32, value_size: usize) -> Trap {\n        self.host_buffer = None;\n\n        let mem_get =\n            self.checked_memory_slice(value_ptr as usize, value_size, |data| data.to_vec());\n\n        match mem_get {\n            Ok(buf) => {\n                // Set the result field in the runtime and return the proper element of the `Error`\n                // enum indicating that the reason for exiting the module was a call to ret.\n                self.host_buffer = bytesrepr::deserialize_from_slice(buf).ok();\n\n                let urefs = match &self.host_buffer {\n                    Some(buf) => utils::extract_urefs(buf),\n                    None => Ok(vec![]),\n                };\n                match urefs {\n                    Ok(urefs) => {\n                        for uref in &urefs {\n                            if let Err(error) = self.context.validate_uref(uref) {\n                                return Trap::from(error);\n                            }\n                        }\n                        ExecError::Ret(urefs).into()\n                    }\n                    Err(e) => e.into(),\n                }\n            }\n            Err(e) => e.into(),\n        }\n    }\n\n    /// Checks if a [`HashAddr`] corresponds to a system contract.\n    fn is_system_contract(&self, hash_addr: HashAddr) -> Result<bool, ExecError> {\n        self.context.is_system_addressable_entity(&hash_addr)\n    }\n\n    fn get_named_argument<T: FromBytes + CLTyped>(\n        args: &RuntimeArgs,\n        name: &str,\n    ) -> Result<T, ExecError> {\n        let arg: CLValue = args\n            .get(name)\n            .cloned()\n            .ok_or(ExecError::Revert(ApiError::MissingArgument))?;\n        arg.into_t()\n            .map_err(|_| ExecError::Revert(ApiError::InvalidArgument))\n    }\n\n    fn try_get_named_argument<T: FromBytes + CLTyped>(\n        args: &RuntimeArgs,\n        name: &str,\n    ) -> Result<Option<T>, ExecError> {\n        match args.get(name) {\n            Some(arg) => {\n                let arg = arg\n                    .clone()\n                    .into_t()\n                    .map_err(|_| ExecError::Revert(ApiError::InvalidArgument))?;\n                Ok(Some(arg))\n            }\n            None => Ok(None),\n        }\n    }\n\n    fn reverter<T: Into<ApiError>>(error: T) -> ExecError {\n        let api_error: ApiError = error.into();\n        // NOTE: This is special casing needed to keep the native system contracts propagate\n        // GasLimit properly to the user. Once support for wasm system contract will be dropped this\n        // won't be necessary anymore.\n        match api_error {\n            ApiError::Mint(mint_error) if mint_error == mint::Error::GasLimit as u8 => {\n                ExecError::GasLimit\n            }\n            ApiError::AuctionError(auction_error)\n                if auction_error == auction::Error::GasLimit as u8 =>\n            {\n                ExecError::GasLimit\n            }\n            ApiError::HandlePayment(handle_payment_error)\n                if handle_payment_error == handle_payment::Error::GasLimit as u8 =>\n            {\n                ExecError::GasLimit\n            }\n            api_error => ExecError::Revert(api_error),\n        }\n    }\n\n    /// Calls host mint contract.\n    fn call_host_mint(\n        &mut self,\n        entry_point_name: &str,\n        runtime_args: &RuntimeArgs,\n        access_rights: ContextAccessRights,\n        stack: RuntimeStack,\n    ) -> Result<CLValue, ExecError> {\n        let gas_counter = self.gas_counter();\n\n        let mint_hash = self.context.get_system_contract(MINT)?;\n        let mint_addr = EntityAddr::new_system(mint_hash.value());\n        let mint_key = if self.context.engine_config().enable_entity {\n            Key::AddressableEntity(EntityAddr::System(mint_hash.value()))\n        } else {\n            Key::Hash(mint_hash.value())\n        };\n\n        let mint_named_keys = self\n            .context\n            .state()\n            .borrow_mut()\n            .get_named_keys(mint_addr)?;\n\n        let mut named_keys = mint_named_keys;\n\n        let runtime_context = self.context.new_from_self(\n            mint_key,\n            EntryPointType::Called,\n            &mut named_keys,\n            access_rights,\n            runtime_args.to_owned(),\n        );\n\n        let mut mint_runtime = self.new_with_stack(runtime_context, stack);\n\n        let engine_config = self.context.engine_config();\n        let system_config = engine_config.system_config();\n        let mint_costs = system_config.mint_costs();\n\n        let result = match entry_point_name {\n            // Type: `fn mint(amount: U512) -> Result<URef, ExecError>`\n            mint::METHOD_MINT => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.mint)?;\n\n                let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?;\n                let result: Result<URef, mint::Error> = mint_runtime.mint(amount);\n                if let Err(mint::Error::GasLimit) = result {\n                    return Err(ExecError::GasLimit);\n                }\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n            mint::METHOD_REDUCE_TOTAL_SUPPLY => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.reduce_total_supply)?;\n\n                let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?;\n                let result: Result<(), mint::Error> = mint_runtime.reduce_total_supply(amount);\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n            mint::METHOD_BURN => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.burn)?;\n\n                let purse: URef = Self::get_named_argument(runtime_args, mint::ARG_PURSE)?;\n                let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?;\n                let result: Result<(), mint::Error> = mint_runtime.burn(purse, amount);\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n            // Type: `fn create() -> URef`\n            mint::METHOD_CREATE => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.create)?;\n\n                let uref = mint_runtime.mint(U512::zero()).map_err(Self::reverter)?;\n                CLValue::from_t(uref).map_err(Self::reverter)\n            })(),\n            // Type: `fn balance(purse: URef) -> Option<U512>`\n            mint::METHOD_BALANCE => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.balance)?;\n\n                let uref: URef = Self::get_named_argument(runtime_args, mint::ARG_PURSE)?;\n\n                let maybe_balance: Option<U512> =\n                    mint_runtime.balance(uref).map_err(Self::reverter)?;\n                CLValue::from_t(maybe_balance).map_err(Self::reverter)\n            })(),\n            // Type: `fn transfer(maybe_to: Option<AccountHash>, source: URef, target: URef, amount:\n            // U512, id: Option<u64>) -> Result<(), ExecError>`\n            mint::METHOD_TRANSFER => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.transfer)?;\n\n                let maybe_to: Option<AccountHash> =\n                    Self::get_named_argument(runtime_args, mint::ARG_TO)?;\n                let source: URef = Self::get_named_argument(runtime_args, mint::ARG_SOURCE)?;\n                let target: URef = Self::get_named_argument(runtime_args, mint::ARG_TARGET)?;\n                let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?;\n                let id: Option<u64> = Self::get_named_argument(runtime_args, mint::ARG_ID)?;\n                let result: Result<(), mint::Error> =\n                    mint_runtime.transfer(maybe_to, source, target, amount, id);\n\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n            // Type: `fn read_base_round_reward() -> Result<U512, ExecError>`\n            mint::METHOD_READ_BASE_ROUND_REWARD => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.read_base_round_reward)?;\n\n                let result: U512 = mint_runtime\n                    .read_base_round_reward()\n                    .map_err(Self::reverter)?;\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n            mint::METHOD_MINT_INTO_EXISTING_PURSE => (|| {\n                mint_runtime.charge_system_contract_call(mint_costs.mint_into_existing_purse)?;\n\n                let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?;\n                let existing_purse: URef = Self::get_named_argument(runtime_args, mint::ARG_PURSE)?;\n\n                let result: Result<(), mint::Error> =\n                    mint_runtime.mint_into_existing_purse(existing_purse, amount);\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n            _ => {\n                // Code should never reach this point as existence of the entrypoint is validated\n                // before reaching this point.\n                Ok(CLValue::unit())\n            }\n        };\n\n        // Charge just for the amount that particular entry point cost - using gas cost from the\n        // isolated runtime might have a recursive costs whenever system contract calls other system\n        // contract.\n        self.gas(\n            mint_runtime\n                .gas_counter()\n                .checked_sub(gas_counter)\n                .unwrap_or(gas_counter),\n        )?;\n\n        // Result still contains a result, but the entrypoints logic does not exit early on errors.\n        let ret = result?;\n\n        // Update outer spending approved limit.\n        self.context\n            .set_remaining_spending_limit(mint_runtime.context.remaining_spending_limit());\n\n        let urefs = utils::extract_urefs(&ret)?;\n        self.context.access_rights_extend(&urefs);\n        {\n            let transfers = self.context.transfers_mut();\n            mint_runtime.context.transfers().clone_into(transfers);\n        }\n        Ok(ret)\n    }\n\n    /// Calls host `handle_payment` contract.\n    fn call_host_handle_payment(\n        &mut self,\n        entry_point_name: &str,\n        runtime_args: &RuntimeArgs,\n        access_rights: ContextAccessRights,\n        stack: RuntimeStack,\n    ) -> Result<CLValue, ExecError> {\n        let gas_counter = self.gas_counter();\n\n        let handle_payment_hash = self.context.get_system_contract(HANDLE_PAYMENT)?;\n        let handle_payment_key = if self.context.engine_config().enable_entity {\n            Key::AddressableEntity(EntityAddr::System(handle_payment_hash.value()))\n        } else {\n            Key::Hash(handle_payment_hash.value())\n        };\n\n        let handle_payment_named_keys = self\n            .context\n            .state()\n            .borrow_mut()\n            .get_named_keys(EntityAddr::System(handle_payment_hash.value()))?;\n\n        let mut named_keys = handle_payment_named_keys;\n\n        let runtime_context = self.context.new_from_self(\n            handle_payment_key,\n            EntryPointType::Called,\n            &mut named_keys,\n            access_rights,\n            runtime_args.to_owned(),\n        );\n\n        let mut runtime = self.new_with_stack(runtime_context, stack);\n\n        let engine_config = self.context.engine_config();\n        let system_config = engine_config.system_config();\n        let handle_payment_costs = system_config.handle_payment_costs();\n\n        let result = match entry_point_name {\n            handle_payment::METHOD_GET_PAYMENT_PURSE => {\n                runtime.charge_system_contract_call(handle_payment_costs.get_payment_purse)?;\n                match self.context.maybe_payment_purse() {\n                    Some(payment_purse) => CLValue::from_t(payment_purse).map_err(Self::reverter),\n                    None => {\n                        let payment_purse = runtime.get_payment_purse().map_err(Self::reverter)?;\n                        self.context.set_payment_purse(payment_purse);\n                        CLValue::from_t(payment_purse).map_err(Self::reverter)\n                    }\n                }\n            }\n            handle_payment::METHOD_SET_REFUND_PURSE => (|| {\n                runtime.charge_system_contract_call(handle_payment_costs.set_refund_purse)?;\n\n                let purse: URef =\n                    Self::get_named_argument(runtime_args, handle_payment::ARG_PURSE)?;\n                runtime.set_refund_purse(purse).map_err(Self::reverter)?;\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n            handle_payment::METHOD_GET_REFUND_PURSE => (|| {\n                runtime.charge_system_contract_call(handle_payment_costs.get_refund_purse)?;\n\n                let maybe_purse = runtime.get_refund_purse().map_err(Self::reverter)?;\n                CLValue::from_t(maybe_purse).map_err(Self::reverter)\n            })(),\n            _ => {\n                // Code should never reach here as existence of the entrypoint is validated before\n                // reaching this point.\n                Ok(CLValue::unit())\n            }\n        };\n\n        self.gas(\n            runtime\n                .gas_counter()\n                .checked_sub(gas_counter)\n                .unwrap_or(gas_counter),\n        )?;\n\n        let ret = result?;\n\n        let urefs = utils::extract_urefs(&ret)?;\n        self.context.access_rights_extend(&urefs);\n        {\n            let transfers = self.context.transfers_mut();\n            runtime.context.transfers().clone_into(transfers);\n        }\n        Ok(ret)\n    }\n\n    /// Calls host auction contract.\n    fn call_host_auction(\n        &mut self,\n        entry_point_name: &str,\n        runtime_args: &RuntimeArgs,\n        access_rights: ContextAccessRights,\n        stack: RuntimeStack,\n    ) -> Result<CLValue, ExecError> {\n        let gas_counter = self.gas_counter();\n\n        let auction_hash = self.context.get_system_contract(AUCTION)?;\n        let auction_key = if self.context.engine_config().enable_entity {\n            Key::AddressableEntity(EntityAddr::System(auction_hash.value()))\n        } else {\n            Key::Hash(auction_hash.value())\n        };\n\n        let auction_named_keys = self\n            .context\n            .state()\n            .borrow_mut()\n            .get_named_keys(EntityAddr::System(auction_hash.value()))?;\n\n        let mut named_keys = auction_named_keys;\n\n        let runtime_context = self.context.new_from_self(\n            auction_key,\n            EntryPointType::Called,\n            &mut named_keys,\n            access_rights,\n            runtime_args.to_owned(),\n        );\n\n        let mut runtime = self.new_with_stack(runtime_context, stack);\n\n        let engine_config = self.context.engine_config();\n        let system_config = engine_config.system_config();\n        let auction_costs = system_config.auction_costs();\n\n        let result = match entry_point_name {\n            auction::METHOD_GET_ERA_VALIDATORS => (|| {\n                runtime.charge_system_contract_call::<u64>(auction_costs.get_era_validators)?;\n\n                let result = runtime.get_era_validators().map_err(Self::reverter)?;\n\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_ADD_BID => (|| {\n                runtime.charge_system_contract_call(auction_costs.add_bid)?;\n                let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?;\n                let delegation_rate =\n                    Self::get_named_argument(runtime_args, auction::ARG_DELEGATION_RATE)?;\n                let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n\n                let global_minimum_delegation_amount =\n                    self.context.engine_config().minimum_delegation_amount();\n                let minimum_delegation_amount = Self::try_get_named_argument(\n                    runtime_args,\n                    auction::ARG_MINIMUM_DELEGATION_AMOUNT,\n                )?;\n\n                let global_maximum_delegation_amount =\n                    self.context.engine_config().maximum_delegation_amount();\n                let maximum_delegation_amount = Self::try_get_named_argument(\n                    runtime_args,\n                    auction::ARG_MAXIMUM_DELEGATION_AMOUNT,\n                )?;\n\n                let reserved_slots =\n                    Self::try_get_named_argument(runtime_args, auction::ARG_RESERVED_SLOTS)?\n                        .unwrap_or(0);\n\n                let max_delegators_per_validator =\n                    self.context.engine_config().max_delegators_per_validator();\n                let minimum_delegation_rate = self.get_minimum_delegation_rate()?;\n                let minimum_bid_amount = self.context().engine_config().minimum_bid_amount();\n\n                let result = runtime\n                    .add_bid(\n                        public_key,\n                        delegation_rate,\n                        amount,\n                        minimum_delegation_amount,\n                        maximum_delegation_amount,\n                        minimum_bid_amount,\n                        max_delegators_per_validator,\n                        reserved_slots,\n                        global_minimum_delegation_amount,\n                        global_maximum_delegation_amount,\n                        minimum_delegation_rate,\n                    )\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_WITHDRAW_BID => (|| {\n                runtime.charge_system_contract_call(auction_costs.withdraw_bid)?;\n\n                let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?;\n                let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n                let min_bid_amount = self.context.engine_config().minimum_bid_amount();\n\n                let result = runtime\n                    .withdraw_bid(public_key, amount, min_bid_amount)\n                    .map_err(Self::reverter)?;\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_DELEGATE => (|| {\n                runtime.charge_system_contract_call(auction_costs.delegate)?;\n\n                let delegator = {\n                    match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) {\n                        Ok(pk) => DelegatorKind::PublicKey(pk),\n                        Err(_) => {\n                            let uref: URef = match Self::get_named_argument(\n                                runtime_args,\n                                auction::ARG_DELEGATOR_PURSE,\n                            ) {\n                                Ok(uref) => uref,\n                                Err(err) => {\n                                    debug!(%err, \"failed to get delegator purse argument\");\n                                    return Err(err);\n                                }\n                            };\n                            DelegatorKind::Purse(uref.addr())\n                        }\n                    }\n                };\n                let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n                let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n\n                let max_delegators_per_validator =\n                    self.context.engine_config().max_delegators_per_validator();\n\n                let result = runtime\n                    .delegate(delegator, validator, amount, max_delegators_per_validator)\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_UNDELEGATE => (|| {\n                runtime.charge_system_contract_call(auction_costs.undelegate)?;\n\n                let delegator = {\n                    match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) {\n                        Ok(pk) => DelegatorKind::PublicKey(pk),\n                        Err(_) => {\n                            let uref: URef = match Self::get_named_argument(\n                                runtime_args,\n                                auction::ARG_DELEGATOR_PURSE,\n                            ) {\n                                Ok(uref) => uref,\n                                Err(err) => {\n                                    debug!(%err, \"failed to get delegator purse argument\");\n                                    return Err(err);\n                                }\n                            };\n                            DelegatorKind::Purse(uref.addr())\n                        }\n                    }\n                };\n                let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n                let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n\n                let result = runtime\n                    .undelegate(delegator, validator, amount)\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_REDELEGATE => (|| {\n                runtime.charge_system_contract_call(auction_costs.redelegate)?;\n\n                let delegator = {\n                    match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) {\n                        Ok(pk) => DelegatorKind::PublicKey(pk),\n                        Err(_) => {\n                            let uref: URef = match Self::get_named_argument(\n                                runtime_args,\n                                auction::ARG_DELEGATOR_PURSE,\n                            ) {\n                                Ok(uref) => uref,\n                                Err(err) => {\n                                    debug!(%err, \"failed to get delegator purse argument\");\n                                    return Err(err);\n                                }\n                            };\n                            DelegatorKind::Purse(uref.addr())\n                        }\n                    }\n                };\n                let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n                let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n                let new_validator =\n                    Self::get_named_argument(runtime_args, auction::ARG_NEW_VALIDATOR)?;\n\n                let result = runtime\n                    .redelegate(delegator, validator, amount, new_validator)\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_RUN_AUCTION => (|| {\n                runtime.charge_system_contract_call(auction_costs.run_auction)?;\n\n                let era_end_timestamp_millis =\n                    Self::get_named_argument(runtime_args, auction::ARG_ERA_END_TIMESTAMP_MILLIS)?;\n                let evicted_validators =\n                    Self::get_named_argument(runtime_args, auction::ARG_EVICTED_VALIDATORS)?;\n\n                let max_delegators_per_validator =\n                    self.context.engine_config().max_delegators_per_validator();\n                let minimum_bid_amount = self.context.engine_config().minimum_bid_amount();\n                runtime\n                    .run_auction(\n                        era_end_timestamp_millis,\n                        evicted_validators,\n                        max_delegators_per_validator,\n                        true,\n                        Ratio::new_raw(U512::from(1), U512::from(5)),\n                        minimum_bid_amount,\n                    )\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n\n            // Type: `fn slash(validator_public_keys: &[PublicKey]) -> Result<(), ExecError>`\n            auction::METHOD_SLASH => (|| {\n                runtime.charge_system_contract_call(auction_costs.slash)?;\n\n                let validator_public_keys =\n                    Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR_PUBLIC_KEYS)?;\n                runtime\n                    .slash(validator_public_keys)\n                    .map_err(Self::reverter)?;\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n\n            // Type: `fn distribute(reward_factors: BTreeMap<PublicKey, u512>) -> Result<(),\n            // ExecError>`\n            auction::METHOD_DISTRIBUTE => (|| {\n                runtime.charge_system_contract_call(auction_costs.distribute)?;\n                let rewards_handling = self.context().engine_config().rewards_handling();\n                let rewards = Self::get_named_argument(runtime_args, auction::ARG_REWARDS_MAP)?;\n\n                let sustain_purse = match rewards_handling {\n                    RewardsHandling::Standard => None,\n                    RewardsHandling::Sustain { .. } => {\n                        let sustain_purse = {\n                            let mint_hash = self.context.get_system_contract(AUCTION)?;\n                            match self\n                                .context\n                                .state()\n                                .borrow_mut()\n                                .get_named_keys(EntityAddr::System(mint_hash.value()))?\n                                .get(MINT_SUSTAIN_PURSE_KEY)\n                            {\n                                Some(Key::URef(uref)) => Some(*uref),\n                                Some(_) | None => None,\n                            }\n                        };\n\n                        sustain_purse\n                    }\n                };\n\n                runtime\n                    .distribute(rewards, sustain_purse, rewards_handling)\n                    .map_err(Self::reverter)?;\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n\n            // Type: `fn read_era_id() -> Result<EraId, ExecError>`\n            auction::METHOD_READ_ERA_ID => (|| {\n                runtime.charge_system_contract_call(auction_costs.read_era_id)?;\n\n                let result = runtime.read_era_id().map_err(Self::reverter)?;\n                CLValue::from_t(result).map_err(Self::reverter)\n            })(),\n\n            auction::METHOD_ACTIVATE_BID => (|| {\n                runtime.charge_system_contract_call(auction_costs.activate_bid)?;\n\n                let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n\n                runtime\n                    .activate_bid(validator, engine_config.minimum_bid_amount())\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n            auction::METHOD_CHANGE_BID_PUBLIC_KEY => (|| {\n                runtime.charge_system_contract_call(auction_costs.change_bid_public_key)?;\n\n                let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?;\n                let new_public_key =\n                    Self::get_named_argument(runtime_args, auction::ARG_NEW_PUBLIC_KEY)?;\n\n                runtime\n                    .change_bid_public_key(public_key, new_public_key)\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n            auction::METHOD_ADD_RESERVATIONS => (|| {\n                runtime.charge_system_contract_call(auction_costs.add_reservations)?;\n\n                let reservations =\n                    Self::get_named_argument(runtime_args, auction::ARG_RESERVATIONS)?;\n\n                let minimum_delegation_rate = self.get_minimum_delegation_rate()?;\n                runtime\n                    .add_reservations(reservations, minimum_delegation_rate)\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n            auction::METHOD_CANCEL_RESERVATIONS => (|| {\n                runtime.charge_system_contract_call(auction_costs.cancel_reservations)?;\n\n                let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n                let delegators = Self::get_named_argument(runtime_args, auction::ARG_DELEGATORS)?;\n                let max_delegators_per_validator =\n                    self.context.engine_config().max_delegators_per_validator();\n\n                runtime\n                    .cancel_reservations(validator, delegators, max_delegators_per_validator)\n                    .map_err(Self::reverter)?;\n\n                CLValue::from_t(()).map_err(Self::reverter)\n            })(),\n            _ => {\n                // Code should never reach here as existence of the entrypoint is validated before\n                // reaching this point.\n                Ok(CLValue::unit())\n            }\n        };\n\n        // Charge for the gas spent during execution in an isolated runtime.\n        self.gas(\n            runtime\n                .gas_counter()\n                .checked_sub(gas_counter)\n                .unwrap_or(gas_counter),\n        )?;\n\n        // Result still contains a result, but the entrypoints logic does not exit early on errors.\n        let ret = result?;\n\n        let urefs = utils::extract_urefs(&ret)?;\n        self.context.access_rights_extend(&urefs);\n        {\n            let transfers = self.context.transfers_mut();\n            runtime.context.transfers().clone_into(transfers);\n        }\n\n        Ok(ret)\n    }\n\n    /// Call a contract by pushing a stack element onto the frame.\n    pub(crate) fn call_contract_with_stack(\n        &mut self,\n        contract_hash: AddressableEntityHash,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n        stack: RuntimeStack,\n    ) -> Result<CLValue, ExecError> {\n        self.stack = Some(stack);\n\n        self.call_contract(contract_hash, entry_point_name, args)\n    }\n\n    /// Call a version within a package by pushing a stack element onto the frame.\n    pub fn call_package_version_with_stack(\n        &mut self,\n        contract_package_hash: PackageHash,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        version: Option<EntityVersion>,\n        entry_point_name: String,\n        args: RuntimeArgs,\n        stack: RuntimeStack,\n    ) -> Result<CLValue, ExecError> {\n        self.stack = Some(stack);\n\n        self.call_package_version(\n            contract_package_hash,\n            protocol_version_major,\n            version,\n            entry_point_name,\n            args,\n        )\n    }\n\n    pub(crate) fn execute_module_bytes(\n        &mut self,\n        module_bytes: &Bytes,\n        stack: RuntimeStack,\n    ) -> Result<CLValue, ExecError> {\n        let protocol_version = self.context.protocol_version();\n        let engine_config = self.context.engine_config();\n        let wasm_config = engine_config.wasm_config();\n        #[cfg(feature = \"test-support\")]\n        let max_stack_height = wasm_config.v1().max_stack_height();\n        let module = preprocess(*wasm_config, module_bytes)?;\n        let (instance, memory) =\n            utils::instance_and_memory(module.clone(), protocol_version, engine_config)?;\n        self.memory = Some(memory);\n        self.module = Some(module);\n        self.stack = Some(stack);\n        self.context.set_args(utils::attenuate_uref_in_args(\n            self.context.args().clone(),\n            self.context\n                .runtime_footprint()\n                .borrow()\n                .main_purse()\n                .expect(\"line 1183\")\n                .addr(),\n            AccessRights::WRITE,\n        )?);\n\n        let result = instance.invoke_export(DEFAULT_ENTRY_POINT_NAME, &[], self);\n\n        let error = match result {\n            Err(error) => error,\n            // If `Ok` and the `host_buffer` is `None`, the contract's execution succeeded but did\n            // not explicitly call `runtime::ret()`.  Treat as though the execution\n            // returned the unit type `()` as per Rust functions which don't specify a\n            // return value.\n            Ok(_) => {\n                return Ok(self.take_host_buffer().unwrap_or(CLValue::from_t(())?));\n            }\n        };\n\n        #[cfg(feature = \"test-support\")]\n        dump_runtime_stack_info(instance, max_stack_height);\n\n        if let Some(host_error) = error.as_host_error() {\n            // If the \"error\" was in fact a trap caused by calling `ret` then\n            // this is normal operation and we should return the value captured\n            // in the Runtime result field.\n            let downcasted_error = host_error.downcast_ref::<ExecError>();\n            return match downcasted_error {\n                Some(ExecError::Ret(ref _ret_urefs)) => self\n                    .take_host_buffer()\n                    .ok_or(ExecError::ExpectedReturnValue),\n                Some(error) => Err(error.clone()),\n                None => Err(ExecError::Interpreter(host_error.to_string())),\n            };\n        }\n        Err(ExecError::Interpreter(error.into()))\n    }\n\n    /// Calls contract living under a `key`, with supplied `args`.\n    pub fn call_contract(\n        &mut self,\n        contract_hash: AddressableEntityHash,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Result<CLValue, ExecError> {\n        let contract_hash = contract_hash.value();\n        let identifier = CallContractIdentifier::Contract { contract_hash };\n\n        self.execute_contract(identifier, entry_point_name, args)\n    }\n\n    /// Calls `version` of the contract living at `key`, invoking `method` with\n    /// supplied `args`. This function also checks the args conform with the\n    /// types given in the contract header.\n    pub fn call_versioned_contract(\n        &mut self,\n        contract_package_hash: PackageHash,\n        contract_version: Option<EntityVersion>,\n        entry_point_name: String,\n        args: RuntimeArgs,\n    ) -> Result<CLValue, ExecError> {\n        self.call_package_version(\n            contract_package_hash,\n            None,\n            contract_version,\n            entry_point_name,\n            args,\n        )\n    }\n\n    /// Calls `version` of the contract living at `key`, invoking `method` with\n    /// supplied `args`. This function also checks the args conform with the\n    /// types given in the contract header.\n    pub fn call_package_version(\n        &mut self,\n        contract_package_hash: PackageHash,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        version: Option<EntityVersion>,\n        entry_point_name: String,\n        args: RuntimeArgs,\n    ) -> Result<CLValue, ExecError> {\n        /*\n        m e\n        - - :   pick the highest enabled version, considering the major protocol version first, then the entity version\n        - + :   walk down from the highest major protocol version, so highest..2.+ then 1.+\n                   If there is only one, its that one (no guessing)\n                   If there are more than one, but the entity version is in the range of only one major version\n                   If there is a collision,\n                      It would be safer to error on this collision, however\n                      we are making a best attempt and picking the highest protocol version\n                      add a chainspec setting to either error or pick the highest in this ambigious case\n                      pick the highest protocol version\n        + - :   pick the highest enabled entity version for the given major\n        + + :   pick the version key based on +.+\n        */\n\n        let contract_package_hash = contract_package_hash.value();\n        let identifier = CallContractIdentifier::ContractPackage {\n            contract_package_hash,\n            version,\n            protocol_version_major,\n        };\n\n        self.execute_contract(identifier, &entry_point_name, args)\n    }\n\n    fn get_protocol_version_for_entity_version(\n        &self,\n        entity_version: EntityVersion,\n        package: &Package,\n    ) -> Result<EntityVersionKey, ExecError> {\n        let enabled_versions = package.enabled_versions();\n        let current_protocol_version_major = self.context.protocol_version().value().major;\n\n        let mut possible_versions = vec![];\n\n        for protocol_version_major in (1..=current_protocol_version_major).rev() {\n            let entity_version_key = EntityVersionKey::new(protocol_version_major, entity_version);\n            // If there is a corresponding addr then its an enabled valid entity version key\n            if enabled_versions.get(&entity_version_key).is_some() {\n                possible_versions.push(entity_version_key)\n            }\n        }\n\n        if possible_versions.is_empty() {\n            return Err(ExecError::NoMatchingEntityVersionKey);\n        }\n\n        if possible_versions.len() > 1\n            && self\n                .context\n                .engine_config()\n                .trap_on_ambiguous_entity_version\n        {\n            return Err(ExecError::AmbiguousEntityVersion);\n        }\n\n        // If possible versions has more than one, then the element to be popped\n        // will be the version key which has the same entity version, but the highest protocol\n        // version If there is only one version key matching the entity version then we will\n        // correctly pop the singular element in the possible versions.\n        // This sort is load bearing.\n        possible_versions.sort();\n        // This unwrap is safe as long as we exit early on possible versions being empty\n        let entity_version_key = possible_versions.pop().unwrap();\n        Ok(entity_version_key)\n    }\n\n    fn get_key_from_entity_addr(&self, entity_addr: EntityAddr) -> Key {\n        if self.context().engine_config().enable_entity {\n            Key::AddressableEntity(entity_addr)\n        } else {\n            match entity_addr {\n                EntityAddr::System(system_hash_addr) => Key::Hash(system_hash_addr),\n                EntityAddr::Account(hash_addr) => Key::Account(AccountHash::new(hash_addr)),\n                EntityAddr::SmartContract(contract_hash_addr) => Key::Hash(contract_hash_addr),\n            }\n        }\n    }\n\n    fn get_context_key_for_contract_call(\n        &self,\n        entity_addr: EntityAddr,\n        entry_point: &EntityEntryPoint,\n    ) -> Result<Key, ExecError> {\n        let current = self.context.entry_point_type();\n        let next = entry_point.entry_point_type();\n        match (current, next) {\n            (EntryPointType::Called, EntryPointType::Caller) => {\n                // Session code can't be called from Contract code for security reasons.\n                Err(ExecError::InvalidContext)\n            }\n            (EntryPointType::Factory, EntryPointType::Caller) => {\n                // Session code can't be called from Installer code for security reasons.\n                Err(ExecError::InvalidContext)\n            }\n            (EntryPointType::Caller, EntryPointType::Caller) => {\n                // Session code called from session reuses current base key\n                Ok(self.context.get_context_key())\n            }\n            (EntryPointType::Caller, EntryPointType::Called)\n            | (EntryPointType::Called, EntryPointType::Called) => {\n                Ok(self.get_key_from_entity_addr(entity_addr))\n            }\n            _ => {\n                // Any other combination (installer, normal, etc.) is a contract context.\n                Ok(self.get_key_from_entity_addr(entity_addr))\n            }\n        }\n    }\n\n    fn try_get_memory(&self) -> Result<&MemoryRef, ExecError> {\n        self.memory.as_ref().ok_or(ExecError::WasmPreprocessing(\n            PreprocessingError::MissingMemorySection,\n        ))\n    }\n\n    fn try_get_module(&self) -> Result<&Module, ExecError> {\n        self.module.as_ref().ok_or(ExecError::WasmPreprocessing(\n            PreprocessingError::MissingModule,\n        ))\n    }\n\n    fn try_get_stack(&self) -> Result<&RuntimeStack, ExecError> {\n        self.stack.as_ref().ok_or(ExecError::MissingRuntimeStack)\n    }\n\n    fn maybe_system_type(&self, hash_addr: HashAddr) -> Option<SystemEntityType> {\n        let is_mint = self.is_mint(hash_addr);\n        if is_mint.is_some() {\n            return is_mint;\n        };\n\n        let is_auction = self.is_auction(hash_addr);\n        if is_auction.is_some() {\n            return is_auction;\n        };\n        let is_handle = self.is_handle_payment(hash_addr);\n        if is_handle.is_some() {\n            return is_handle;\n        };\n\n        None\n    }\n\n    fn is_mint(&self, hash_addr: HashAddr) -> Option<SystemEntityType> {\n        let hash = match self.context.get_system_contract(MINT) {\n            Ok(hash) => hash,\n            Err(_) => {\n                error!(\"Failed to get system mint contract hash\");\n                return None;\n            }\n        };\n        if hash.value() == hash_addr {\n            Some(SystemEntityType::Mint)\n        } else {\n            None\n        }\n    }\n\n    /// Checks if current context is the `handle_payment` system contract.\n    fn is_handle_payment(&self, hash_addr: HashAddr) -> Option<SystemEntityType> {\n        let hash = match self.context.get_system_contract(HANDLE_PAYMENT) {\n            Ok(hash) => hash,\n            Err(_) => {\n                error!(\"Failed to get system handle payment contract hash\");\n                return None;\n            }\n        };\n        if hash.value() == hash_addr {\n            Some(SystemEntityType::HandlePayment)\n        } else {\n            None\n        }\n    }\n\n    /// Checks if given hash is the auction system contract.\n    fn is_auction(&self, hash_addr: HashAddr) -> Option<SystemEntityType> {\n        let hash = match self.context.get_system_contract(AUCTION) {\n            Ok(hash) => hash,\n            Err(_) => {\n                error!(\"Failed to get system auction contract hash\");\n                return None;\n            }\n        };\n\n        if hash.value() == hash_addr {\n            Some(SystemEntityType::Auction)\n        } else {\n            None\n        }\n    }\n\n    fn execute_contract(\n        &mut self,\n        identifier: CallContractIdentifier,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Result<CLValue, ExecError> {\n        let (footprint, entity_addr, package) = match identifier {\n            CallContractIdentifier::Contract { contract_hash } => {\n                let entity_addr = if self.context.is_system_addressable_entity(&contract_hash)? {\n                    EntityAddr::new_system(contract_hash)\n                } else {\n                    EntityAddr::new_smart_contract(contract_hash)\n                };\n                let footprint = match self.context.read_gs(&Key::Hash(contract_hash))? {\n                    Some(StoredValue::Contract(contract)) => {\n                        if self.context.engine_config().enable_entity {\n                            self.migrate_contract_and_contract_package(contract_hash)?;\n                        };\n\n                        let maybe_system_entity_type = self.maybe_system_type(contract_hash);\n\n                        RuntimeFootprint::new_contract_footprint(\n                            ContractHash::new(contract_hash),\n                            contract,\n                            maybe_system_entity_type,\n                        )\n                    }\n                    Some(_) | None => {\n                        if !self.context.engine_config().enable_entity {\n                            return Err(ExecError::KeyNotFound(Key::Hash(contract_hash)));\n                        }\n                        let key = Key::AddressableEntity(entity_addr);\n                        let entity = self.context.read_gs_typed::<AddressableEntity>(&key)?;\n                        let entity_named_keys = self\n                            .context\n                            .state()\n                            .borrow_mut()\n                            .get_named_keys(entity_addr)?;\n                        let entry_points = self.context.get_casper_vm_v1_entry_point(key)?;\n                        RuntimeFootprint::new_entity_footprint(\n                            entity_addr,\n                            entity,\n                            entity_named_keys,\n                            entry_points,\n                        )\n                    }\n                };\n\n                let package_hash = footprint.package_hash().ok_or(ExecError::InvalidContext)?;\n                let package: Package = self.context.get_package(package_hash)?;\n\n                // System contract hashes are disabled at upgrade point\n                let is_calling_system_contract = self.is_system_contract(contract_hash)?;\n\n                let entity_hash = AddressableEntityHash::new(contract_hash);\n\n                // Check if provided contract hash is disabled\n                let is_contract_enabled = package.is_entity_enabled(&entity_addr);\n\n                if !is_calling_system_contract && !is_contract_enabled {\n                    return Err(ExecError::DisabledEntity(entity_hash));\n                }\n\n                (footprint, entity_addr, package)\n            }\n            CallContractIdentifier::ContractPackage {\n                contract_package_hash,\n                version,\n                protocol_version_major,\n            } => {\n                let package = self.context.get_package(contract_package_hash)?;\n                let entity_version_key = match (version, protocol_version_major) {\n                    (Some(entity_version), Some(major)) => {\n                        EntityVersionKey::new(major, entity_version)\n                    }\n                    (None, Some(major)) => package.current_entity_version_for(major),\n                    (Some(entity_version), None) => {\n                        match self.get_protocol_version_for_entity_version(entity_version, &package)\n                        {\n                            Ok(entity_version_key) => entity_version_key,\n                            Err(err) => {\n                                return Err(err);\n                            }\n                        }\n                    }\n                    (None, None) => match package.current_entity_version() {\n                        Some(v) => v,\n                        None => {\n                            return Err(ExecError::NoActiveEntityVersions(\n                                contract_package_hash.into(),\n                            ));\n                        }\n                    },\n                };\n\n                if package.is_version_missing(entity_version_key) {\n                    return Err(ExecError::MissingEntityVersion(entity_version_key));\n                }\n\n                if !package.is_version_enabled(entity_version_key) {\n                    return Err(ExecError::DisabledEntityVersion(entity_version_key));\n                }\n\n                let hash_addr = package\n                    .lookup_entity_hash(entity_version_key)\n                    .copied()\n                    .ok_or(ExecError::MissingEntityVersion(entity_version_key))?\n                    .value();\n\n                let entity_addr = if self.context.is_system_addressable_entity(&hash_addr)? {\n                    EntityAddr::new_system(hash_addr)\n                } else {\n                    EntityAddr::new_smart_contract(hash_addr)\n                };\n\n                let footprint = match self.context.read_gs(&Key::Hash(hash_addr))? {\n                    Some(StoredValue::Contract(contract)) => {\n                        if self.context.engine_config().enable_entity {\n                            self.migrate_contract_and_contract_package(hash_addr)?;\n                        };\n                        let maybe_system_entity_type = self.maybe_system_type(hash_addr);\n                        RuntimeFootprint::new_contract_footprint(\n                            ContractHash::new(hash_addr),\n                            contract,\n                            maybe_system_entity_type,\n                        )\n                    }\n                    Some(_) | None => {\n                        if !self.context.engine_config().enable_entity {\n                            return Err(ExecError::KeyNotFound(Key::Hash(hash_addr)));\n                        }\n                        let key = Key::AddressableEntity(entity_addr);\n                        let entity = self.context.read_gs_typed::<AddressableEntity>(&key)?;\n                        let entity_named_keys = self\n                            .context\n                            .state()\n                            .borrow_mut()\n                            .get_named_keys(entity_addr)?;\n                        let entry_points = self.context.get_casper_vm_v1_entry_point(key)?;\n                        RuntimeFootprint::new_entity_footprint(\n                            entity_addr,\n                            entity,\n                            entity_named_keys,\n                            entry_points,\n                        )\n                    }\n                };\n\n                (footprint, entity_addr, package)\n            }\n        };\n\n        if let EntityKind::Account(_) = footprint.entity_kind() {\n            return Err(ExecError::InvalidContext);\n        }\n\n        let entry_point = match footprint.entry_points().get(entry_point_name) {\n            Some(entry_point) => entry_point,\n            None => {\n                match footprint.entity_kind() {\n                    EntityKind::System(_) => {\n                        self.charge_system_contract_call(\n                            self.context()\n                                .engine_config()\n                                .system_config()\n                                .no_such_entrypoint(),\n                        )?;\n                    }\n                    EntityKind::Account(_) => {}\n                    EntityKind::SmartContract(_) => {}\n                }\n                return Err(ExecError::NoSuchMethod(entry_point_name.to_owned()));\n            }\n        };\n\n        let entry_point_type = entry_point.entry_point_type();\n\n        if self.context.engine_config().enable_entity && entry_point_type.is_invalid_context() {\n            return Err(ExecError::InvalidContext);\n        }\n\n        // Get contract entry point hash\n        // if public, allowed\n        // if group, restricted to user group access\n        // if template, not allowed\n        self.validate_entry_point_access(&package, entry_point_name, entry_point.access())?;\n        if self.context.engine_config().strict_argument_checking() {\n            let entry_point_args_lookup: BTreeMap<&str, &Parameter> = entry_point\n                .args()\n                .iter()\n                .map(|param| (param.name(), param))\n                .collect();\n\n            let args_lookup: BTreeMap<&str, &NamedArg> = args\n                .named_args()\n                .map(|named_arg| (named_arg.name(), named_arg))\n                .collect();\n\n            // variable ensure args type(s) match defined args of entry point\n            for (param_name, param) in entry_point_args_lookup {\n                if let Some(named_arg) = args_lookup.get(param_name) {\n                    if param.cl_type() != named_arg.cl_value().cl_type() {\n                        return Err(ExecError::type_mismatch(\n                            param.cl_type().clone(),\n                            named_arg.cl_value().cl_type().clone(),\n                        ));\n                    }\n                } else if !param.cl_type().is_option() {\n                    return Err(ExecError::MissingArgument {\n                        name: param.name().to_string(),\n                    });\n                }\n            }\n        }\n\n        let entity_hash = AddressableEntityHash::new(entity_addr.value());\n\n        if !self\n            .context\n            .engine_config()\n            .administrative_accounts()\n            .is_empty()\n            && !package.is_entity_enabled(&entity_addr)\n            && !self\n                .context\n                .is_system_addressable_entity(&entity_addr.value())?\n        {\n            return Err(ExecError::DisabledEntity(entity_hash));\n        }\n\n        // if session the caller's context\n        // else the called contract's context\n        let context_entity_key =\n            self.get_context_key_for_contract_call(entity_addr, entry_point)?;\n\n        let context_entity_hash = context_entity_key\n            .into_entity_hash_addr()\n            .ok_or(ExecError::UnexpectedKeyVariant(context_entity_key))?;\n\n        let (should_attenuate_urefs, should_validate_urefs) = {\n            // Determines if this call originated from the system account based on a first\n            // element of the call stack.\n            let is_system_account =\n                self.context.get_initiator() == PublicKey::System.to_account_hash();\n            // Is the immediate caller a system contract, such as when the auction calls the mint.\n            let is_caller_system_contract =\n                self.is_system_contract(self.context.access_rights().context_key())?;\n            // Checks if the contract we're about to call is a system contract.\n            let is_calling_system_contract = self.is_system_contract(context_entity_hash)?;\n            // uref attenuation is necessary in the following circumstances:\n            //   the originating account (aka the caller) is not the system account and\n            //   the immediate caller is either a normal account or a normal contract and\n            //   the target contract about to be called is a normal contract\n            let should_attenuate_urefs =\n                !is_system_account && !is_caller_system_contract && !is_calling_system_contract;\n            let should_validate_urefs = !is_caller_system_contract || !is_calling_system_contract;\n            (should_attenuate_urefs, should_validate_urefs)\n        };\n        let runtime_args = if should_attenuate_urefs {\n            // Main purse URefs should be attenuated only when a non-system contract is executed by\n            // a non-system account to avoid possible phishing attack scenarios.\n            utils::attenuate_uref_in_args(\n                args,\n                self.context\n                    .runtime_footprint()\n                    .borrow()\n                    .main_purse()\n                    .expect(\"need purse for attenuation\")\n                    .addr(),\n                AccessRights::WRITE,\n            )?\n        } else {\n            args\n        };\n\n        let extended_access_rights = {\n            let mut all_urefs = vec![];\n            for arg in runtime_args.to_values() {\n                let urefs = utils::extract_urefs(arg)?;\n                if should_validate_urefs {\n                    for uref in &urefs {\n                        self.context.validate_uref(uref)?;\n                    }\n                }\n                all_urefs.extend(urefs);\n            }\n            all_urefs\n        };\n\n        let (mut named_keys, access_rights) = match entry_point_type {\n            EntryPointType::Caller => {\n                let mut access_rights = self\n                    .context\n                    .runtime_footprint()\n                    .borrow()\n                    .extract_access_rights(context_entity_hash);\n                access_rights.extend(&extended_access_rights);\n\n                let named_keys = self\n                    .context\n                    .runtime_footprint()\n                    .borrow()\n                    .named_keys()\n                    .clone();\n\n                (named_keys, access_rights)\n            }\n            EntryPointType::Called | EntryPointType::Factory => {\n                let mut access_rights = footprint.extract_access_rights(entity_hash.value());\n                access_rights.extend(&extended_access_rights);\n                let named_keys = footprint.named_keys().clone();\n                (named_keys, access_rights)\n            }\n        };\n\n        let stack = {\n            let mut stack = self.try_get_stack()?.clone();\n\n            let package_hash = match footprint.package_hash() {\n                Some(hash) => PackageHash::new(hash),\n                None => {\n                    return Err(ExecError::UnexpectedStoredValueVariant);\n                }\n            };\n\n            let caller = if self.context.engine_config().enable_entity {\n                Caller::entity(package_hash, entity_addr)\n            } else {\n                Caller::smart_contract(\n                    ContractPackageHash::new(package_hash.value()),\n                    ContractHash::new(entity_addr.value()),\n                )\n            };\n\n            stack.push(caller)?;\n\n            stack\n        };\n\n        if let EntityKind::System(system_contract_type) = footprint.entity_kind() {\n            let entry_point_name = entry_point.name();\n\n            match system_contract_type {\n                SystemEntityType::Mint => {\n                    return self.call_host_mint(\n                        entry_point_name,\n                        &runtime_args,\n                        access_rights,\n                        stack,\n                    );\n                }\n                SystemEntityType::HandlePayment => {\n                    return self.call_host_handle_payment(\n                        entry_point_name,\n                        &runtime_args,\n                        access_rights,\n                        stack,\n                    );\n                }\n                SystemEntityType::Auction => {\n                    return self.call_host_auction(\n                        entry_point_name,\n                        &runtime_args,\n                        access_rights,\n                        stack,\n                    );\n                }\n                // Not callable\n                SystemEntityType::StandardPayment => {}\n            }\n        }\n\n        let module: Module = {\n            let byte_code_addr = footprint.wasm_hash().ok_or(ExecError::InvalidContext)?;\n\n            let byte_code_key = match footprint.entity_kind() {\n                EntityKind::System(_) | EntityKind::Account(_) => {\n                    Key::ByteCode(ByteCodeAddr::Empty)\n                }\n                EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) => {\n                    if self.context.engine_config().enable_entity {\n                        Key::ByteCode(ByteCodeAddr::new_wasm_addr(byte_code_addr))\n                    } else {\n                        Key::Hash(byte_code_addr)\n                    }\n                }\n                EntityKind::SmartContract(runtime @ ContractRuntimeTag::VmCasperV2) => {\n                    return Err(ExecError::IncompatibleRuntime(runtime));\n                }\n            };\n\n            let byte_code: ByteCode = match self.context.read_gs(&byte_code_key)? {\n                Some(StoredValue::ContractWasm(wasm)) => {\n                    ByteCode::new(ByteCodeKind::V1CasperWasm, wasm.take_bytes())\n                }\n                Some(StoredValue::ByteCode(byte_code)) => byte_code,\n                Some(_) => {\n                    return Err(ExecError::InvalidByteCode(ByteCodeHash::new(\n                        byte_code_addr,\n                    )))\n                }\n                None => return Err(ExecError::KeyNotFound(byte_code_key)),\n            };\n\n            casper_wasm::deserialize_buffer(byte_code.bytes())?\n        };\n\n        let context = self.context.new_from_self(\n            context_entity_key,\n            entry_point.entry_point_type(),\n            &mut named_keys,\n            access_rights,\n            runtime_args,\n        );\n\n        let (instance, memory) = utils::instance_and_memory(\n            module.clone(),\n            self.context.protocol_version(),\n            self.context.engine_config(),\n        )?;\n        let runtime = &mut Runtime::new_invocation_runtime(self, context, module, memory, stack);\n        let result = instance.invoke_export(entry_point.name(), &[], runtime);\n        // The `runtime`'s context was initialized with our counter from before the call and any gas\n        // charged by the sub-call was added to its counter - so let's copy the correct value of the\n        // counter from there to our counter. Do the same for the message cost tracking.\n        self.context.set_gas_counter(runtime.context.gas_counter());\n        self.context\n            .set_emit_message_cost(runtime.context.emit_message_cost());\n        let transfers = self.context.transfers_mut();\n        runtime.context.transfers().clone_into(transfers);\n\n        match result {\n            Ok(_) => {\n                // If `Ok` and the `host_buffer` is `None`, the contract's execution succeeded but\n                // did not explicitly call `runtime::ret()`.  Treat as though the\n                // execution returned the unit type `()` as per Rust functions which\n                // don't specify a return value.\n                if self.context.entry_point_type() == EntryPointType::Caller\n                    && runtime.context.entry_point_type() == EntryPointType::Caller\n                {\n                    // Overwrites parent's named keys with child's new named key but only when\n                    // running session code.\n                    *self.context.named_keys_mut() = runtime.context.named_keys().clone();\n                }\n                self.context\n                    .set_remaining_spending_limit(runtime.context.remaining_spending_limit());\n                Ok(runtime.take_host_buffer().unwrap_or(CLValue::from_t(())?))\n            }\n            Err(error) => {\n                #[cfg(feature = \"test-support\")]\n                dump_runtime_stack_info(\n                    instance,\n                    self.context\n                        .engine_config()\n                        .wasm_config()\n                        .v1()\n                        .max_stack_height(),\n                );\n                if let Some(host_error) = error.as_host_error() {\n                    // If the \"error\" was in fact a trap caused by calling `ret` then this is normal\n                    // operation and we should return the value captured in the Runtime result\n                    // field.\n                    let downcasted_error = host_error.downcast_ref::<ExecError>();\n                    return match downcasted_error {\n                        Some(ExecError::Ret(ref ret_urefs)) => {\n                            // Insert extra urefs returned from call.\n                            // Those returned URef's are guaranteed to be valid as they were already\n                            // validated in the `ret` call inside context we ret from.\n                            self.context.access_rights_extend(ret_urefs);\n                            if self.context.entry_point_type() == EntryPointType::Caller\n                                && runtime.context.entry_point_type() == EntryPointType::Caller\n                            {\n                                // Overwrites parent's named keys with child's new named key but\n                                // only when running session code.\n                                *self.context.named_keys_mut() =\n                                    runtime.context.named_keys().clone();\n                            }\n                            // Stored contracts are expected to always call a `ret` function,\n                            // otherwise it's an error.\n                            runtime\n                                .take_host_buffer()\n                                .ok_or(ExecError::ExpectedReturnValue)\n                        }\n                        Some(error) => Err(error.clone()),\n                        None => Err(ExecError::Interpreter(host_error.to_string())),\n                    };\n                }\n                Err(ExecError::Interpreter(error.into()))\n            }\n        }\n    }\n\n    fn call_contract_host_buffer(\n        &mut self,\n        contract_hash: AddressableEntityHash,\n        entry_point_name: &str,\n        args_bytes: &[u8],\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        // Exit early if the host buffer is already occupied\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n        let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?;\n\n        if let Some(payment_purse) = self.context.maybe_payment_purse() {\n            for named_arg in args.named_args() {\n                if utils::extract_urefs(named_arg.cl_value())?\n                    .into_iter()\n                    .any(|uref| uref.remove_access_rights() == payment_purse.remove_access_rights())\n                {\n                    warn!(\"attempt to call_contract with payment purse\");\n\n                    return Err(Into::into(ExecError::Revert(ApiError::HandlePayment(\n                        handle_payment::Error::AttemptToPersistPaymentPurse as u8,\n                    ))));\n                }\n            }\n        }\n\n        let result = self.call_contract(contract_hash, entry_point_name, args)?;\n        self.manage_call_contract_host_buffer(result_size_ptr, result)\n    }\n\n    fn call_versioned_contract_host_buffer(\n        &mut self,\n        contract_package_hash: PackageHash,\n        contract_version: Option<EntityVersion>,\n        entry_point_name: String,\n        args_bytes: &[u8],\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        // Exit early if the host buffer is already occupied\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n        let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?;\n\n        if let Some(payment_purse) = self.context.maybe_payment_purse() {\n            for named_arg in args.named_args() {\n                if utils::extract_urefs(named_arg.cl_value())?\n                    .into_iter()\n                    .any(|uref| uref.remove_access_rights() == payment_purse.remove_access_rights())\n                {\n                    warn!(\"attempt to call_versioned_contract with payment purse\");\n\n                    return Err(Into::into(ExecError::Revert(ApiError::HandlePayment(\n                        handle_payment::Error::AttemptToPersistPaymentPurse as u8,\n                    ))));\n                }\n            }\n        }\n\n        let result = self.call_versioned_contract(\n            contract_package_hash,\n            contract_version,\n            entry_point_name,\n            args,\n        )?;\n        self.manage_call_contract_host_buffer(result_size_ptr, result)\n    }\n\n    fn call_package_version_host_buffer(\n        &mut self,\n        contract_package_hash: PackageHash,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        contract_version: Option<EntityVersion>,\n        entry_point_name: String,\n        args_bytes: &[u8],\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        // Exit early if the host buffer is already occupied\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n        let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?;\n\n        if let Some(payment_purse) = self.context.maybe_payment_purse() {\n            for named_arg in args.named_args() {\n                if utils::extract_urefs(named_arg.cl_value())?\n                    .into_iter()\n                    .any(|uref| uref.remove_access_rights() == payment_purse.remove_access_rights())\n                {\n                    warn!(\"attempt to call_versioned_contract with payment purse\");\n\n                    return Err(Into::into(ExecError::Revert(ApiError::HandlePayment(\n                        handle_payment::Error::AttemptToPersistPaymentPurse as u8,\n                    ))));\n                }\n            }\n        }\n\n        let result = self.call_package_version(\n            contract_package_hash,\n            protocol_version_major,\n            contract_version,\n            entry_point_name,\n            args,\n        )?;\n        self.manage_call_contract_host_buffer(result_size_ptr, result)\n    }\n\n    fn check_host_buffer(&mut self) -> Result<(), ApiError> {\n        if !self.can_write_to_host_buffer() {\n            Err(ApiError::HostBufferFull)\n        } else {\n            Ok(())\n        }\n    }\n\n    fn manage_call_contract_host_buffer(\n        &mut self,\n        result_size_ptr: u32,\n        result: CLValue,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        let result_size: u32 = match result.inner_bytes().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n        };\n\n        // leave the host buffer set to `None` if there's nothing to write there\n        if result_size != 0 {\n            if let Err(error) = self.write_host_buffer(result) {\n                return Ok(Err(error));\n            }\n        }\n\n        let result_size_bytes = result_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(result_size_ptr, &result_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn load_named_keys(\n        &mut self,\n        total_keys_ptr: u32,\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n\n        let total_keys: u32 = match self.context.named_keys().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n        };\n\n        let total_keys_bytes = total_keys.to_le_bytes();\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(total_keys_ptr, &total_keys_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        if total_keys == 0 {\n            // No need to do anything else, we leave host buffer empty.\n            return Ok(Ok(()));\n        }\n\n        let named_keys =\n            CLValue::from_t(self.context.named_keys().clone()).map_err(ExecError::CLValue)?;\n\n        let length: u32 = match named_keys.inner_bytes().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::BufferTooSmall)),\n        };\n\n        if let Err(error) = self.write_host_buffer(named_keys) {\n            return Ok(Err(error));\n        }\n\n        let length_bytes = length.to_le_bytes();\n        if let Err(error) = self.try_get_memory()?.set(result_size_ptr, &length_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn create_contract_package(\n        &mut self,\n        is_locked: PackageStatus,\n    ) -> Result<(ContractPackage, URef), ExecError> {\n        let access_key = self.context.new_unit_uref()?;\n        let package_status = match is_locked {\n            PackageStatus::Locked => ContractPackageStatus::Locked,\n            PackageStatus::Unlocked => ContractPackageStatus::Unlocked,\n        };\n\n        let contract_package = ContractPackage::new(\n            access_key,\n            ContractVersions::default(),\n            DisabledVersions::default(),\n            Groups::default(),\n            package_status,\n        );\n\n        Ok((contract_package, access_key))\n    }\n\n    fn create_package(&mut self, is_locked: PackageStatus) -> Result<(Package, URef), ExecError> {\n        let access_key = self.context.new_unit_uref()?;\n        let contract_package = Package::new(\n            EntityVersions::new(),\n            BTreeSet::new(),\n            Groups::new(),\n            is_locked,\n        );\n\n        Ok((contract_package, access_key))\n    }\n\n    fn create_contract_package_at_hash(\n        &mut self,\n        lock_status: PackageStatus,\n    ) -> Result<([u8; 32], [u8; 32]), ExecError> {\n        let addr = self.context.new_hash_address()?;\n        let access_key = if self.context.engine_config().enable_entity {\n            let (package, access_key) = self.create_package(lock_status)?;\n            self.context\n                .metered_write_gs_unsafe(Key::SmartContract(addr), package)?;\n            access_key\n        } else {\n            let (package, access_key) = self.create_contract_package(lock_status)?;\n            self.context\n                .metered_write_gs_unsafe(Key::Hash(addr), package)?;\n            access_key\n        };\n        Ok((addr, access_key.addr()))\n    }\n\n    fn create_contract_user_group_by_contract_package(\n        &mut self,\n        contract_package_hash: PackageHash,\n        label: String,\n        num_new_urefs: u32,\n        mut existing_urefs: BTreeSet<URef>,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        let mut contract_package: ContractPackage = self\n            .context\n            .get_validated_contract_package(contract_package_hash.value())?;\n\n        let groups = contract_package.groups_mut();\n        let new_group = Group::new(label);\n\n        // Ensure group does not already exist\n        if groups.contains(&new_group) {\n            return Ok(Err(addressable_entity::Error::GroupAlreadyExists.into()));\n        }\n\n        // Ensure there are not too many groups\n        if groups.len() >= (addressable_entity::MAX_GROUPS as usize) {\n            return Ok(Err(addressable_entity::Error::MaxGroupsExceeded.into()));\n        }\n\n        // Ensure there are not too many urefs\n        let total_urefs: usize =\n            groups.total_urefs() + (num_new_urefs as usize) + existing_urefs.len();\n        if total_urefs > addressable_entity::MAX_TOTAL_UREFS {\n            let err = addressable_entity::Error::MaxTotalURefsExceeded;\n            return Ok(Err(ApiError::ContractHeader(err as u8)));\n        }\n\n        // Proceed with creating user group\n        let mut new_urefs = Vec::with_capacity(num_new_urefs as usize);\n        for _ in 0..num_new_urefs {\n            let u = self.context.new_unit_uref()?;\n            new_urefs.push(u);\n        }\n\n        for u in new_urefs.iter().cloned() {\n            existing_urefs.insert(u);\n        }\n        groups.insert(new_group, existing_urefs);\n\n        // check we can write to the host buffer\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n        // create CLValue for return value\n        let new_urefs_value = CLValue::from_t(new_urefs)?;\n        let value_size = new_urefs_value.inner_bytes().len();\n        // write return value to buffer\n        if let Err(err) = self.write_host_buffer(new_urefs_value) {\n            return Ok(Err(err));\n        }\n        // Write return value size to output location\n        let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_size_ptr, &output_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        // Write updated package to the global state\n        self.context.metered_write_gs_unsafe(\n            ContractPackageHash::new(contract_package_hash.value()),\n            contract_package,\n        )?;\n\n        Ok(Ok(()))\n    }\n\n    fn create_contract_user_group(\n        &mut self,\n        contract_package_hash: PackageHash,\n        label: String,\n        num_new_urefs: u32,\n        mut existing_urefs: BTreeSet<URef>,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if !self.context.engine_config().enable_entity {\n            return self.create_contract_user_group_by_contract_package(\n                contract_package_hash,\n                label,\n                num_new_urefs,\n                existing_urefs,\n                output_size_ptr,\n            );\n        };\n\n        let mut contract_package: Package =\n            self.context.get_validated_package(contract_package_hash)?;\n\n        let groups = contract_package.groups_mut();\n        let new_group = Group::new(label);\n\n        // Ensure group does not already exist\n        if groups.contains(&new_group) {\n            return Ok(Err(addressable_entity::Error::GroupAlreadyExists.into()));\n        }\n\n        // Ensure there are not too many groups\n        if groups.len() >= (addressable_entity::MAX_GROUPS as usize) {\n            return Ok(Err(addressable_entity::Error::MaxGroupsExceeded.into()));\n        }\n\n        // Ensure there are not too many urefs\n        let total_urefs: usize =\n            groups.total_urefs() + (num_new_urefs as usize) + existing_urefs.len();\n        if total_urefs > addressable_entity::MAX_TOTAL_UREFS {\n            let err = addressable_entity::Error::MaxTotalURefsExceeded;\n            return Ok(Err(ApiError::ContractHeader(err as u8)));\n        }\n\n        // Proceed with creating user group\n        let mut new_urefs = Vec::with_capacity(num_new_urefs as usize);\n        for _ in 0..num_new_urefs {\n            let u = self.context.new_unit_uref()?;\n            new_urefs.push(u);\n        }\n\n        for u in new_urefs.iter().cloned() {\n            existing_urefs.insert(u);\n        }\n        groups.insert(new_group, existing_urefs);\n\n        // check we can write to the host buffer\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n        // create CLValue for return value\n        let new_urefs_value = CLValue::from_t(new_urefs)?;\n        let value_size = new_urefs_value.inner_bytes().len();\n        // write return value to buffer\n        if let Err(err) = self.write_host_buffer(new_urefs_value) {\n            return Ok(Err(err));\n        }\n        // Write return value size to output location\n        let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_size_ptr, &output_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        // Write updated package to the global state\n        self.context\n            .metered_write_gs_unsafe(contract_package_hash, contract_package)?;\n\n        Ok(Ok(()))\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn add_contract_version(\n        &mut self,\n        package_hash: PackageHash,\n        version_ptr: u32,\n        entry_points: EntryPoints,\n        named_keys: NamedKeys,\n        message_topics: BTreeMap<String, MessageTopicOperation>,\n        output_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if self.context.engine_config().enable_entity {\n            self.add_contract_version_by_package(\n                package_hash,\n                version_ptr,\n                entry_points,\n                named_keys,\n                message_topics,\n                output_ptr,\n            )\n        } else {\n            self.add_contract_version_by_contract_package(\n                package_hash.value(),\n                version_ptr,\n                entry_points,\n                named_keys,\n                message_topics,\n                output_ptr,\n            )\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn add_contract_version_by_contract_package(\n        &mut self,\n        contract_package_hash: HashAddr,\n        version_ptr: u32,\n        entry_points: EntryPoints,\n        mut named_keys: NamedKeys,\n        message_topics: BTreeMap<String, MessageTopicOperation>,\n        output_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if !self.context.install_upgrade_allowed() {\n            // NOTE: This is not a permission check on the caller,\n            // it is enforcing the rule that only legacy standard deploys (which are grandfathered)\n            // and install / upgrade transactions are allowed to call this method\n            return Ok(Err(ApiError::NotAllowedToAddContractVersion));\n        }\n\n        // if entry_points.contains_stored_session() {\n        //     // As of 2.0 we do not allow stored session logic to be\n        //     // installed or upgraded. Pre-existing stored\n        //     // session logic is still callable.\n        //     return Err(ExecError::InvalidEntryPointType);\n        // }\n\n        self.context\n            .validate_key(&Key::Hash(contract_package_hash))?;\n\n        let mut contract_package: ContractPackage = self\n            .context\n            .get_validated_contract_package(contract_package_hash)?;\n\n        let version = contract_package.current_contract_version();\n\n        // Return an error if the contract is locked and has some version associated with it.\n        if contract_package.is_locked() && version.is_some() {\n            return Err(ExecError::LockedEntity(PackageHash::new(\n                contract_package_hash,\n            )));\n        }\n\n        for (_, key) in named_keys.iter() {\n            self.context.validate_key(key)?\n        }\n\n        let contract_wasm_hash = self.context.new_hash_address()?;\n        let contract_wasm = {\n            let module_bytes = self.get_module_from_entry_points(&entry_points)?;\n            ContractWasm::new(module_bytes)\n        };\n\n        let contract_hash_addr: HashAddr = self.context.new_hash_address()?;\n        let contract_entity_addr = EntityAddr::SmartContract(contract_hash_addr);\n\n        let protocol_version = self.context.protocol_version();\n        let major = protocol_version.value().major;\n\n        let maybe_previous_hash =\n            if let Some(previous_contract_hash) = contract_package.current_contract_hash() {\n                let previous_contract: Contract =\n                    self.context.read_gs_typed(&previous_contract_hash.into())?;\n\n                let previous_named_keys = previous_contract.take_named_keys();\n                named_keys.append(previous_named_keys);\n                Some(EntityAddr::SmartContract(previous_contract_hash.value()))\n            } else {\n                None\n            };\n\n        if let Err(err) = self.carry_forward_message_topics(\n            maybe_previous_hash,\n            contract_entity_addr,\n            message_topics,\n        )? {\n            return Ok(Err(err));\n        };\n\n        let contract_package_hash = ContractPackageHash::new(contract_package_hash);\n        let contract = Contract::new(\n            contract_package_hash,\n            contract_wasm_hash.into(),\n            named_keys,\n            entry_points.into(),\n            protocol_version,\n        );\n\n        let insert_contract_result =\n            contract_package.insert_contract_version(major, contract_hash_addr.into());\n\n        self.context\n            .metered_write_gs_unsafe(Key::Hash(contract_wasm_hash), contract_wasm)?;\n        self.context\n            .metered_write_gs_unsafe(Key::Hash(contract_hash_addr), contract)?;\n        self.context\n            .metered_write_gs_unsafe(Key::Hash(contract_package_hash.value()), contract_package)?;\n\n        // set return values to buffer\n        {\n            let hash_bytes = match contract_hash_addr.to_bytes() {\n                Ok(bytes) => bytes,\n                Err(error) => return Ok(Err(error.into())),\n            };\n\n            // Set serialized hash bytes into the output buffer\n            if let Err(error) = self.try_get_memory()?.set(output_ptr, &hash_bytes) {\n                return Err(ExecError::Interpreter(error.into()));\n            }\n\n            // Set version into VM shared memory\n            let version_value: u32 = insert_contract_result.contract_version();\n            let version_bytes = version_value.to_le_bytes();\n            if let Err(error) = self.try_get_memory()?.set(version_ptr, &version_bytes) {\n                return Err(ExecError::Interpreter(error.into()));\n            }\n        }\n\n        Ok(Ok(()))\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn add_contract_version_by_package(\n        &mut self,\n        package_hash: PackageHash,\n        version_ptr: u32,\n        entry_points: EntryPoints,\n        mut named_keys: NamedKeys,\n        message_topics: BTreeMap<String, MessageTopicOperation>,\n        output_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if !self.context.install_upgrade_allowed() {\n            // NOTE: This is not a permission check on the caller,\n            // it is enforcing the rule that only legacy standard deploys (which are grandfathered)\n            // and install / upgrade transactions are allowed to call this method\n            return Ok(Err(ApiError::NotAllowedToAddContractVersion));\n        }\n\n        if entry_points.contains_stored_session() {\n            // As of 2.0 we do not allow stored session logic to be\n            // installed or upgraded. Pre-existing stored\n            // session logic is still callable.\n            return Err(ExecError::InvalidEntryPointType);\n        }\n\n        let mut package = self.context.get_package(package_hash.value())?;\n\n        // Return an error if the contract is locked and has some version associated with it.\n        if package.is_locked() {\n            return Err(ExecError::LockedEntity(package_hash));\n        }\n\n        let (\n            main_purse,\n            previous_named_keys,\n            action_thresholds,\n            associated_keys,\n            previous_hash_addr,\n        ) = self.new_version_entity_parts(&package)?;\n\n        // We generate the byte code hash because a byte code record\n        // must exist for a contract record to exist.\n        let byte_code_hash = self.context.new_hash_address()?;\n\n        let hash_addr = self.context.new_hash_address()?;\n        let entity_addr = EntityAddr::SmartContract(hash_addr);\n\n        if let Err(err) =\n            self.carry_forward_message_topics(previous_hash_addr, entity_addr, message_topics)?\n        {\n            return Ok(Err(err));\n        };\n\n        let protocol_version = self.context.protocol_version();\n\n        let insert_entity_version_result =\n            package.insert_entity_version(protocol_version.value().major, entity_addr);\n\n        let byte_code = {\n            let module_bytes = self.get_module_from_entry_points(&entry_points)?;\n            ByteCode::new(ByteCodeKind::V1CasperWasm, module_bytes)\n        };\n\n        self.context.metered_write_gs_unsafe(\n            Key::ByteCode(ByteCodeAddr::new_wasm_addr(byte_code_hash)),\n            byte_code,\n        )?;\n\n        let entity_addr = EntityAddr::new_smart_contract(hash_addr);\n\n        {\n            // DO NOT EXTRACT INTO SEPARATE FUNCTION.\n            for (_, key) in named_keys.iter() {\n                // Validate all the imputed named keys\n                // against the installers permissions\n                self.context.validate_key(key)?;\n            }\n            // Carry forward named keys from previous version\n            // Grant all the imputed named keys + previous named keys.\n            named_keys.append(previous_named_keys);\n            for (name, key) in named_keys.iter() {\n                let named_key_value =\n                    StoredValue::NamedKey(NamedKeyValue::from_concrete_values(*key, name.clone())?);\n                let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, name.clone())?;\n                self.context\n                    .metered_write_gs_unsafe(Key::NamedKey(named_key_addr), named_key_value)?;\n            }\n            self.context.write_entry_points(entity_addr, entry_points)?;\n        }\n\n        let entity = AddressableEntity::new(\n            package_hash,\n            byte_code_hash.into(),\n            protocol_version,\n            main_purse,\n            associated_keys,\n            action_thresholds,\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        );\n        let entity_key = Key::AddressableEntity(entity_addr);\n        self.context.metered_write_gs_unsafe(entity_key, entity)?;\n        self.context\n            .metered_write_gs_unsafe(package_hash, package)?;\n\n        // set return values to buffer\n        {\n            let hash_bytes = match hash_addr.to_bytes() {\n                Ok(bytes) => bytes,\n                Err(error) => return Ok(Err(error.into())),\n            };\n\n            // Set serialized hash bytes into the output buffer\n            if let Err(error) = self.try_get_memory()?.set(output_ptr, &hash_bytes) {\n                return Err(ExecError::Interpreter(error.into()));\n            }\n\n            // Set version into VM shared memory\n            let version_value: u32 = insert_entity_version_result.entity_version();\n            let version_bytes = version_value.to_le_bytes();\n            if let Err(error) = self.try_get_memory()?.set(version_ptr, &version_bytes) {\n                return Err(ExecError::Interpreter(error.into()));\n            }\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn carry_forward_message_topics(\n        &mut self,\n        previous_entity_addr: Option<EntityAddr>,\n        entity_addr: EntityAddr,\n        message_topics: BTreeMap<String, MessageTopicOperation>,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        let mut previous_message_topics = match previous_entity_addr {\n            Some(previous_hash) => self.context.get_message_topics(previous_hash)?,\n            None => MessageTopics::default(),\n        };\n\n        let max_topics_per_contract = self\n            .context\n            .engine_config()\n            .wasm_config()\n            .messages_limits()\n            .max_topics_per_contract();\n\n        let topics_to_add = message_topics\n            .iter()\n            .filter(|(_, operation)| match operation {\n                MessageTopicOperation::Add => true,\n            });\n        // Check if registering the new topics would exceed the limit per contract\n        if previous_message_topics.len() + topics_to_add.clone().count()\n            > max_topics_per_contract as usize\n        {\n            return Ok(Err(ApiError::from(MessageTopicError::MaxTopicsExceeded)));\n        }\n\n        // Extend the previous topics with the newly added ones.\n        for (new_topic, _) in topics_to_add {\n            let topic_name_hash = cryptography::blake2b(new_topic.as_bytes()).into();\n            if let Err(e) = previous_message_topics.add_topic(new_topic.as_str(), topic_name_hash) {\n                return Ok(Err(e.into()));\n            }\n        }\n\n        for (topic_name, topic_hash) in previous_message_topics.iter() {\n            let topic_key = Key::message_topic(entity_addr, *topic_hash);\n            let block_time = self.context.get_block_info().block_time();\n            let summary = StoredValue::MessageTopic(MessageTopicSummary::new(\n                0,\n                block_time,\n                topic_name.clone(),\n            ));\n            self.context.metered_write_gs_unsafe(topic_key, summary)?;\n        }\n        Ok(Ok(()))\n    }\n\n    fn new_version_entity_parts(\n        &mut self,\n        package: &Package,\n    ) -> Result<\n        (\n            URef,\n            NamedKeys,\n            ActionThresholds,\n            AssociatedKeys,\n            Option<EntityAddr>,\n        ),\n        ExecError,\n    > {\n        if let Some(previous_entity_hash) = package.current_entity_hash() {\n            let previous_entity_key = Key::AddressableEntity(previous_entity_hash);\n            let (mut previous_entity, requires_purse_creation) =\n                self.context.get_contract_entity(previous_entity_key)?;\n\n            let action_thresholds = previous_entity.action_thresholds().clone();\n\n            let associated_keys = previous_entity.associated_keys().clone();\n            // STEP 1: LOAD THE CONTRACT AND CHECK IF CALLER IS IN ASSOCIATED KEYS WITH ENOUGH\n            // WEIGHT     TO UPGRADE (COMPARE TO THE ACTION THRESHOLD FOR UPGRADE\n            // ACTION). STEP 2: IF CALLER IS NOT IN CONTRACTS ASSOCIATED KEYS\n            //    CHECK FOR LEGACY UREFADDR UNDER KEY:HASH(PACKAGEADDR)\n            //    IF FOUND,\n            //      call validate_uref(that uref)\n            //    IF VALID,\n            //      create the new contract version carrying forward previous state including\n            // associated keys      BUT add the caller to the associated keys with\n            // weight == to the action threshold for upgrade ELSE, error\n            if !previous_entity.can_upgrade_with(self.context.authorization_keys()) {\n                // Check if the calling entity must be grandfathered into the new\n                // addressable entity format\n                let account_hash = self.context.get_initiator();\n\n                let access_key = match self\n                    .context\n                    .read_gs(&Key::Hash(previous_entity.package_hash().value()))?\n                {\n                    Some(StoredValue::ContractPackage(contract_package)) => {\n                        contract_package.access_key()\n                    }\n                    Some(StoredValue::CLValue(cl_value)) => {\n                        let (_key, uref) = cl_value\n                            .into_t::<(Key, URef)>()\n                            .map_err(ExecError::CLValue)?;\n                        uref\n                    }\n                    Some(_other) => return Err(ExecError::UnexpectedStoredValueVariant),\n                    None => {\n                        return Err(ExecError::UpgradeAuthorizationFailure);\n                    }\n                };\n\n                let has_access = self.context.validate_uref(&access_key).is_ok();\n\n                if has_access && !associated_keys.contains_key(&account_hash) {\n                    previous_entity.add_associated_key(\n                        account_hash,\n                        *action_thresholds.upgrade_management(),\n                    )?;\n                } else {\n                    return Err(ExecError::UpgradeAuthorizationFailure);\n                }\n            }\n\n            let main_purse = if requires_purse_creation {\n                self.create_purse()?\n            } else {\n                previous_entity.main_purse()\n            };\n\n            let associated_keys = previous_entity.associated_keys().clone();\n\n            let previous_named_keys = self.context.get_named_keys(previous_entity_key)?;\n\n            return Ok((\n                main_purse,\n                previous_named_keys,\n                action_thresholds,\n                associated_keys,\n                Some(previous_entity_hash),\n            ));\n        }\n\n        Ok((\n            self.create_purse()?,\n            NamedKeys::new(),\n            ActionThresholds::default(),\n            AssociatedKeys::new(self.context.get_initiator(), Weight::new(1)),\n            None,\n        ))\n    }\n\n    fn disable_contract_version(\n        &mut self,\n        contract_package_hash: PackageHash,\n        contract_hash: AddressableEntityHash,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if self.context.engine_config().enable_entity {\n            let contract_package_key = Key::SmartContract(contract_package_hash.value());\n            self.context.validate_key(&contract_package_key)?;\n\n            let mut contract_package: Package =\n                self.context.get_validated_package(contract_package_hash)?;\n\n            if contract_package.is_locked() {\n                return Err(ExecError::LockedEntity(contract_package_hash));\n            }\n\n            if let Err(err) = contract_package\n                .disable_entity_version(EntityAddr::SmartContract(contract_hash.value()))\n            {\n                return Ok(Err(err.into()));\n            }\n\n            self.context\n                .metered_write_gs_unsafe(contract_package_key, contract_package)?;\n        } else {\n            let contract_package_key = Key::Hash(contract_package_hash.value());\n            self.context.validate_key(&contract_package_key)?;\n\n            let mut contract_package: ContractPackage = self\n                .context\n                .get_validated_contract_package(contract_package_hash.value())?;\n\n            if contract_package.is_locked() {\n                return Err(ExecError::LockedEntity(PackageHash::new(\n                    contract_package_hash.value(),\n                )));\n            }\n            let contract_hash = ContractHash::new(contract_hash.value());\n\n            if let Err(err) = contract_package.disable_contract_version(contract_hash) {\n                return Ok(Err(err.into()));\n            }\n\n            self.context\n                .metered_write_gs_unsafe(contract_package_key, contract_package)?;\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn enable_contract_version(\n        &mut self,\n        contract_package_hash: PackageHash,\n        contract_hash: AddressableEntityHash,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if self.context.engine_config().enable_entity {\n            let contract_package_key = Key::SmartContract(contract_package_hash.value());\n            self.context.validate_key(&contract_package_key)?;\n\n            let mut contract_package: Package =\n                self.context.get_validated_package(contract_package_hash)?;\n\n            if contract_package.is_locked() {\n                return Err(ExecError::LockedEntity(contract_package_hash));\n            }\n\n            if let Err(err) =\n                contract_package.enable_version(EntityAddr::SmartContract(contract_hash.value()))\n            {\n                return Ok(Err(err.into()));\n            }\n\n            self.context\n                .metered_write_gs_unsafe(contract_package_key, contract_package)?;\n        } else {\n            let contract_package_key = Key::Hash(contract_package_hash.value());\n            self.context.validate_key(&contract_package_key)?;\n\n            let mut contract_package: ContractPackage = self\n                .context\n                .get_validated_contract_package(contract_package_hash.value())?;\n\n            if contract_package.is_locked() {\n                return Err(ExecError::LockedEntity(PackageHash::new(\n                    contract_package_hash.value(),\n                )));\n            }\n            let contract_hash = ContractHash::new(contract_hash.value());\n\n            if let Err(err) = contract_package.enable_contract_version(contract_hash) {\n                return Ok(Err(err.into()));\n            }\n\n            self.context\n                .metered_write_gs_unsafe(contract_package_key, contract_package)?;\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Writes function address (`hash_bytes`) into the Wasm memory (at\n    /// `dest_ptr` pointer).\n    fn function_address(&mut self, hash_bytes: [u8; 32], dest_ptr: u32) -> Result<(), Trap> {\n        self.try_get_memory()?\n            .set(dest_ptr, &hash_bytes)\n            .map_err(|e| ExecError::Interpreter(e.into()).into())\n    }\n\n    /// Generates new unforgeable reference and adds it to the context's\n    /// access_rights set.\n    fn new_uref(&mut self, uref_ptr: u32, value_ptr: u32, value_size: u32) -> Result<(), Trap> {\n        let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; // read initial value from memory\n        let uref = self.context.new_uref(StoredValue::CLValue(cl_value))?;\n        self.try_get_memory()?\n            .set(uref_ptr, &uref.into_bytes().map_err(ExecError::BytesRepr)?)\n            .map_err(|e| ExecError::Interpreter(e.into()).into())\n    }\n\n    /// Writes `value` under `key` in GlobalState.\n    fn write(\n        &mut self,\n        key_ptr: u32,\n        key_size: u32,\n        value_ptr: u32,\n        value_size: u32,\n    ) -> Result<(), Trap> {\n        let key = self.key_from_mem(key_ptr, key_size)?;\n        let cl_value = self.cl_value_from_mem(value_ptr, value_size)?;\n        self.context\n            .metered_write_gs(key, cl_value)\n            .map_err(Into::into)\n    }\n\n    /// Records a transfer.\n    fn record_transfer(\n        &mut self,\n        maybe_to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<(), ExecError> {\n        if self.context.get_context_key() != self.context.get_system_entity_key(MINT)? {\n            return Err(ExecError::InvalidContext);\n        }\n\n        if self.context.phase() != Phase::Session {\n            return Ok(());\n        }\n\n        let txn_hash = self.context.get_transaction_hash();\n        let from = InitiatorAddr::AccountHash(self.context.get_initiator());\n        let fee = Gas::from(\n            self.context\n                .engine_config()\n                .system_config()\n                .mint_costs()\n                .transfer,\n        );\n        let transfer = Transfer::V2(TransferV2::new(\n            txn_hash, from, maybe_to, source, target, amount, fee, id,\n        ));\n        self.context.transfers_mut().push(transfer);\n        Ok(())\n    }\n\n    /// Records given auction info at a given era id\n    fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), ExecError> {\n        if self.context.get_initiator() != PublicKey::System.to_account_hash() {\n            return Err(ExecError::InvalidContext);\n        }\n\n        if self.context.get_context_key() != self.context.get_system_entity_key(AUCTION)? {\n            return Err(ExecError::InvalidContext);\n        }\n\n        if self.context.phase() != Phase::Session {\n            return Ok(());\n        }\n\n        self.context.write_era_info(Key::EraSummary, era_info);\n\n        Ok(())\n    }\n\n    /// Adds `value` to the cell that `key` points at.\n    fn add(\n        &mut self,\n        key_ptr: u32,\n        key_size: u32,\n        value_ptr: u32,\n        value_size: u32,\n    ) -> Result<(), Trap> {\n        let key = self.key_from_mem(key_ptr, key_size)?;\n        let cl_value = self.cl_value_from_mem(value_ptr, value_size)?;\n        self.context\n            .metered_add_gs(key, cl_value)\n            .map_err(Into::into)\n    }\n\n    /// Reads value from the GS living under key specified by `key_ptr` and\n    /// `key_size`. Wasm and host communicate through memory that Wasm\n    /// module exports. If contract wants to pass data to the host, it has\n    /// to tell it [the host] where this data lives in the exported memory\n    /// (pass its pointer and length).\n    fn read(\n        &mut self,\n        key_ptr: u32,\n        key_size: u32,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n\n        let key = self.key_from_mem(key_ptr, key_size)?;\n        let cl_value = match self.context.read_gs(&key)? {\n            Some(stored_value) => {\n                CLValue::try_from(stored_value).map_err(ExecError::TypeMismatch)?\n            }\n            None => return Ok(Err(ApiError::ValueNotFound)),\n        };\n\n        let value_size: u32 = match cl_value.inner_bytes().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::BufferTooSmall)),\n        };\n\n        if let Err(error) = self.write_host_buffer(cl_value) {\n            return Ok(Err(error));\n        }\n\n        let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self.try_get_memory()?.set(output_size_ptr, &value_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Reverts contract execution with a status specified.\n    fn revert(&mut self, status: u32) -> Trap {\n        ExecError::Revert(status.into()).into()\n    }\n\n    /// Checks if a caller can manage its own associated keys and thresholds.\n    ///\n    /// On some private chains with administrator keys configured this requires that the caller is\n    /// an admin to be able to manage its own keys. If the caller is not an administrator then the\n    /// deploy has to be signed by an administrator.\n    fn can_manage_keys(&self) -> bool {\n        if self\n            .context\n            .engine_config()\n            .administrative_accounts()\n            .is_empty()\n        {\n            // Public chain\n            return self\n                .context\n                .runtime_footprint()\n                .borrow()\n                .can_manage_keys_with(self.context.authorization_keys());\n        }\n\n        if self\n            .context\n            .engine_config()\n            .is_administrator(&self.context.get_initiator())\n        {\n            return true;\n        }\n\n        // If caller is not an admin, check if deploy was co-signed by admin account.\n        self.context.is_authorized_by_admin()\n    }\n\n    fn add_associated_key(\n        &mut self,\n        account_hash_ptr: u32,\n        account_hash_size: usize,\n        weight_value: u8,\n    ) -> Result<i32, Trap> {\n        let account_hash = {\n            // Account hash as serialized bytes\n            let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?;\n            // Account hash deserialized\n            let source: AccountHash = bytesrepr::deserialize_from_slice(source_serialized)\n                .map_err(ExecError::BytesRepr)?;\n            source\n        };\n        let weight = Weight::new(weight_value);\n\n        if !self.can_manage_keys() {\n            return Ok(AddKeyFailure::PermissionDenied as i32);\n        }\n\n        match self.context.add_associated_key(account_hash, weight) {\n            Ok(_) => Ok(0),\n            // This relies on the fact that `AddKeyFailure` is represented as\n            // i32 and first variant start with number `1`, so all other variants\n            // are greater than the first one, so it's safe to assume `0` is success,\n            // and any error is greater than 0.\n            Err(ExecError::AddKeyFailure(e)) => Ok(e as i32),\n            // Any other variant just pass as `Trap`\n            Err(e) => Err(e.into()),\n        }\n    }\n\n    fn remove_associated_key(\n        &mut self,\n        account_hash_ptr: u32,\n        account_hash_size: usize,\n    ) -> Result<i32, Trap> {\n        let account_hash = {\n            // Account hash as serialized bytes\n            let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?;\n            // Account hash deserialized\n            let source: AccountHash = bytesrepr::deserialize_from_slice(source_serialized)\n                .map_err(ExecError::BytesRepr)?;\n            source\n        };\n\n        if !self.can_manage_keys() {\n            return Ok(RemoveKeyFailure::PermissionDenied as i32);\n        }\n\n        match self.context.remove_associated_key(account_hash) {\n            Ok(_) => Ok(0),\n            Err(ExecError::RemoveKeyFailure(e)) => Ok(e as i32),\n            Err(e) => Err(e.into()),\n        }\n    }\n\n    fn update_associated_key(\n        &mut self,\n        account_hash_ptr: u32,\n        account_hash_size: usize,\n        weight_value: u8,\n    ) -> Result<i32, Trap> {\n        let account_hash = {\n            // Account hash as serialized bytes\n            let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?;\n            // Account hash deserialized\n            let source: AccountHash = bytesrepr::deserialize_from_slice(source_serialized)\n                .map_err(ExecError::BytesRepr)?;\n            source\n        };\n        let weight = Weight::new(weight_value);\n\n        if !self.can_manage_keys() {\n            return Ok(UpdateKeyFailure::PermissionDenied as i32);\n        }\n\n        match self.context.update_associated_key(account_hash, weight) {\n            Ok(_) => Ok(0),\n            // This relies on the fact that `UpdateKeyFailure` is represented as\n            // i32 and first variant start with number `1`, so all other variants\n            // are greater than the first one, so it's safe to assume `0` is success,\n            // and any error is greater than 0.\n            Err(ExecError::UpdateKeyFailure(e)) => Ok(e as i32),\n            // Any other variant just pass as `Trap`\n            Err(e) => Err(e.into()),\n        }\n    }\n\n    fn set_action_threshold(\n        &mut self,\n        action_type_value: u32,\n        threshold_value: u8,\n    ) -> Result<i32, Trap> {\n        if !self.can_manage_keys() {\n            return Ok(SetThresholdFailure::PermissionDeniedError as i32);\n        }\n\n        match ActionType::try_from(action_type_value) {\n            Ok(action_type) => {\n                let threshold = Weight::new(threshold_value);\n                match self.context.set_action_threshold(action_type, threshold) {\n                    Ok(_) => Ok(0),\n                    Err(ExecError::SetThresholdFailure(e)) => Ok(e as i32),\n                    Err(error) => Err(error.into()),\n                }\n            }\n            Err(_) => Err(Trap::Code(TrapCode::Unreachable)),\n        }\n    }\n\n    /// Looks up the public mint contract key in the context's protocol data.\n    ///\n    /// Returned URef is already attenuated depending on the calling account.\n    fn get_mint_hash(&self) -> Result<AddressableEntityHash, ExecError> {\n        self.context.get_system_contract(MINT)\n    }\n\n    /// Looks up the public handle payment contract key in the context's protocol data.\n    ///\n    /// Returned URef is already attenuated depending on the calling account.\n    fn get_handle_payment_hash(&self) -> Result<AddressableEntityHash, ExecError> {\n        self.context.get_system_contract(HANDLE_PAYMENT)\n    }\n\n    /// Looks up the public standard payment contract key in the context's protocol data.\n    ///\n    /// Returned URef is already attenuated depending on the calling account.\n    fn get_standard_payment_hash(&self) -> Result<AddressableEntityHash, ExecError> {\n        self.context.get_system_contract(STANDARD_PAYMENT)\n    }\n\n    /// Looks up the public auction contract key in the context's protocol data.\n    ///\n    /// Returned URef is already attenuated depending on the calling account.\n    fn get_auction_hash(&self) -> Result<AddressableEntityHash, ExecError> {\n        self.context.get_system_contract(AUCTION)\n    }\n\n    /// Calls the `read_base_round_reward` method on the mint contract at the given mint\n    /// contract key\n    fn mint_read_base_round_reward(\n        &mut self,\n        mint_contract_hash: AddressableEntityHash,\n    ) -> Result<U512, ExecError> {\n        let gas_counter = self.gas_counter();\n        let call_result = self.call_contract(\n            mint_contract_hash,\n            mint::METHOD_READ_BASE_ROUND_REWARD,\n            RuntimeArgs::default(),\n        );\n        self.set_gas_counter(gas_counter);\n\n        let reward = call_result?.into_t()?;\n        Ok(reward)\n    }\n\n    /// Calls the `mint` method on the mint contract at the given mint\n    /// contract key\n    fn mint_mint(\n        &mut self,\n        mint_contract_hash: AddressableEntityHash,\n        amount: U512,\n    ) -> Result<URef, ExecError> {\n        let gas_counter = self.gas_counter();\n        let runtime_args = {\n            let mut runtime_args = RuntimeArgs::new();\n            runtime_args.insert(mint::ARG_AMOUNT, amount)?;\n            runtime_args\n        };\n        let call_result = self.call_contract(mint_contract_hash, mint::METHOD_MINT, runtime_args);\n        self.set_gas_counter(gas_counter);\n\n        let result: Result<URef, mint::Error> = call_result?.into_t()?;\n        Ok(result.map_err(system::Error::from)?)\n    }\n\n    /// Calls the `reduce_total_supply` method on the mint contract at the given mint\n    /// contract key\n    fn mint_reduce_total_supply(\n        &mut self,\n        mint_contract_hash: AddressableEntityHash,\n        amount: U512,\n    ) -> Result<(), ExecError> {\n        let gas_counter = self.gas_counter();\n        let runtime_args = {\n            let mut runtime_args = RuntimeArgs::new();\n            runtime_args.insert(mint::ARG_AMOUNT, amount)?;\n            runtime_args\n        };\n        let call_result = self.call_contract(\n            mint_contract_hash,\n            mint::METHOD_REDUCE_TOTAL_SUPPLY,\n            runtime_args,\n        );\n        self.set_gas_counter(gas_counter);\n\n        let result: Result<(), mint::Error> = call_result?.into_t()?;\n        Ok(result.map_err(system::Error::from)?)\n    }\n\n    /// Calls the \"create\" method on the mint contract at the given mint\n    /// contract key\n    fn mint_create(\n        &mut self,\n        mint_contract_hash: AddressableEntityHash,\n    ) -> Result<URef, ExecError> {\n        let result =\n            self.call_contract(mint_contract_hash, mint::METHOD_CREATE, RuntimeArgs::new());\n        let purse = result?.into_t()?;\n        Ok(purse)\n    }\n\n    fn create_purse(&mut self) -> Result<URef, ExecError> {\n        let _scoped_host_function_flag = self.host_function_flag.enter_host_function_scope();\n        self.mint_create(self.get_mint_hash()?)\n    }\n\n    /// Calls the \"transfer\" method on the mint contract at the given mint\n    /// contract key\n    fn mint_transfer(\n        &mut self,\n        mint_contract_hash: AddressableEntityHash,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<Result<(), mint::Error>, ExecError> {\n        self.context.validate_uref(&source)?;\n\n        let args_values = {\n            let mut runtime_args = RuntimeArgs::new();\n            runtime_args.insert(mint::ARG_TO, to)?;\n            runtime_args.insert(mint::ARG_SOURCE, source)?;\n            runtime_args.insert(mint::ARG_TARGET, target)?;\n            runtime_args.insert(mint::ARG_AMOUNT, amount)?;\n            runtime_args.insert(mint::ARG_ID, id)?;\n            runtime_args\n        };\n\n        let gas_counter = self.gas_counter();\n        let call_result =\n            self.call_contract(mint_contract_hash, mint::METHOD_TRANSFER, args_values);\n        self.set_gas_counter(gas_counter);\n\n        Ok(call_result?.into_t()?)\n    }\n\n    /// Creates a new account at `target` hash, transferring `amount`\n    /// of motes from `source` purse to the new account's main purse.\n    fn transfer_to_new_account(\n        &mut self,\n        source: URef,\n        target: AccountHash,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<TransferResult, ExecError> {\n        let mint_contract_hash = self.get_mint_hash()?;\n\n        let allow_unrestricted_transfers =\n            self.context.engine_config().allow_unrestricted_transfers();\n\n        if !allow_unrestricted_transfers\n            && self.context.get_initiator() != PublicKey::System.to_account_hash()\n            && !self\n                .context\n                .engine_config()\n                .is_administrator(&self.context.get_initiator())\n            && !self.context.engine_config().is_administrator(&target)\n        {\n            return Err(ExecError::DisabledUnrestrictedTransfers);\n        }\n\n        // A precondition check that verifies that the transfer can be done\n        // as the source purse has enough funds to cover the transfer.\n        if amount > self.available_balance(source)?.unwrap_or_default() {\n            return Ok(Err(mint::Error::InsufficientFunds.into()));\n        }\n\n        let target_purse = self.mint_create(mint_contract_hash)?;\n\n        if source == target_purse {\n            return Ok(Err(mint::Error::EqualSourceAndTarget.into()));\n        }\n\n        let result = self.mint_transfer(\n            mint_contract_hash,\n            Some(target),\n            source,\n            target_purse.with_access_rights(AccessRights::ADD),\n            amount,\n            id,\n        );\n\n        // We granted a temporary access rights bit to newly created main purse as part of\n        // `mint_create` call, and we need to remove it to avoid leakage of access rights.\n\n        self.context\n            .remove_access(target_purse.addr(), target_purse.access_rights());\n\n        match result? {\n            Ok(()) => {\n                let main_purse = target_purse;\n                if !self.context.engine_config().enable_entity {\n                    let account = Account::create(target, NamedKeys::new(), target_purse);\n                    self.context.metered_write_gs_unsafe(\n                        Key::Account(target),\n                        StoredValue::Account(account),\n                    )?;\n                    return Ok(Ok(TransferredTo::NewAccount));\n                }\n\n                let protocol_version = self.context.protocol_version();\n                let byte_code_hash = ByteCodeHash::default();\n                let entity_hash = AddressableEntityHash::new(target.value());\n                let package_hash = PackageHash::new(self.context.new_hash_address()?);\n\n                let associated_keys = AssociatedKeys::new(target, Weight::new(1));\n\n                let entity = AddressableEntity::new(\n                    package_hash,\n                    byte_code_hash,\n                    protocol_version,\n                    main_purse,\n                    associated_keys,\n                    ActionThresholds::default(),\n                    EntityKind::Account(target),\n                );\n\n                let package = {\n                    let mut package = Package::new(\n                        EntityVersions::default(),\n                        BTreeSet::default(),\n                        Groups::default(),\n                        PackageStatus::Locked,\n                    );\n                    package.insert_entity_version(\n                        protocol_version.value().major,\n                        EntityAddr::Account(target.value()),\n                    );\n                    package\n                };\n\n                let entity_key: Key = entity.entity_key(entity_hash);\n\n                self.context\n                    .metered_write_gs_unsafe(entity_key, StoredValue::AddressableEntity(entity))?;\n\n                let contract_package_key: Key = package_hash.into();\n\n                self.context.metered_write_gs_unsafe(\n                    contract_package_key,\n                    StoredValue::SmartContract(package),\n                )?;\n\n                let contract_by_account = CLValue::from_t(entity_key)?;\n\n                let target_key = Key::Account(target);\n\n                self.context.metered_write_gs_unsafe(\n                    target_key,\n                    StoredValue::CLValue(contract_by_account),\n                )?;\n\n                Ok(Ok(TransferredTo::NewAccount))\n            }\n            Err(mint_error) => Ok(Err(mint_error.into())),\n        }\n    }\n\n    /// Transferring a given amount of motes from the given source purse to the\n    /// new account's purse. Requires that the [`URef`]s have already\n    /// been created by the mint contract (or are the genesis account's).\n    fn transfer_to_existing_account(\n        &mut self,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<TransferResult, ExecError> {\n        let mint_contract_key = self.get_mint_hash()?;\n\n        match self.mint_transfer(mint_contract_key, to, source, target, amount, id)? {\n            Ok(()) => Ok(Ok(TransferredTo::ExistingAccount)),\n            Err(error) => Ok(Err(error.into())),\n        }\n    }\n\n    /// Transfers `amount` of motes from default purse of the account to\n    /// `target` account. If that account does not exist, creates one.\n    fn transfer_to_account(\n        &mut self,\n        target: AccountHash,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<TransferResult, ExecError> {\n        let source = self.context.get_main_purse()?;\n        self.transfer_from_purse_to_account_hash(source, target, amount, id)\n    }\n\n    /// Transfers `amount` of motes from `source` purse to `target` account's main purse.\n    /// If that account does not exist, creates one.\n    fn transfer_from_purse_to_account_hash(\n        &mut self,\n        source: URef,\n        target: AccountHash,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<TransferResult, ExecError> {\n        let _scoped_host_function_flag = self.host_function_flag.enter_host_function_scope();\n        let target_key = Key::Account(target);\n\n        // Look up the account at the given key\n        match self.context.read_gs(&target_key)? {\n            None => {\n                // If no account exists, create a new account and transfer the amount to its\n                // main purse.\n\n                self.transfer_to_new_account(source, target, amount, id)\n            }\n            Some(StoredValue::CLValue(entity_key_value)) => {\n                // Attenuate the target main purse\n                let entity_key = CLValue::into_t::<Key>(entity_key_value)?;\n                let target_uref = if let Some(StoredValue::AddressableEntity(entity)) =\n                    self.context.read_gs(&entity_key)?\n                {\n                    entity.main_purse_add_only()\n                } else {\n                    let contract_hash = if let Some(entity_hash) = entity_key\n                        .into_entity_hash_addr()\n                        .map(AddressableEntityHash::new)\n                    {\n                        entity_hash\n                    } else {\n                        return Err(ExecError::UnexpectedKeyVariant(entity_key));\n                    };\n                    return Err(ExecError::InvalidEntity(contract_hash));\n                };\n\n                if source.with_access_rights(AccessRights::ADD) == target_uref {\n                    return Ok(Ok(TransferredTo::ExistingAccount));\n                }\n\n                // Upsert ADD access to caller on target allowing deposit of motes; this will be\n                // revoked after the transfer is completed if caller did not already have ADD access\n                let granted_access = self.context.grant_access(target_uref);\n\n                // If an account exists, transfer the amount to its purse\n                let transfer_result = self.transfer_to_existing_account(\n                    Some(target),\n                    source,\n                    target_uref,\n                    amount,\n                    id,\n                );\n\n                // Remove from caller temporarily granted ADD access on target.\n                if let GrantedAccess::Granted {\n                    uref_addr,\n                    newly_granted_access_rights,\n                } = granted_access\n                {\n                    self.context\n                        .remove_access(uref_addr, newly_granted_access_rights)\n                }\n                transfer_result\n            }\n            Some(StoredValue::Account(account)) => {\n                self.transfer_from_purse_to_account(source, &account, amount, id)\n            }\n            Some(_) => {\n                // If some other value exists, return an error\n                Err(ExecError::AccountNotFound(target_key))\n            }\n        }\n    }\n\n    fn transfer_from_purse_to_account(\n        &mut self,\n        source: URef,\n        target_account: &Account,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<TransferResult, ExecError> {\n        // Attenuate the target main purse\n        let target_uref = target_account.main_purse_add_only();\n\n        if source.with_access_rights(AccessRights::ADD) == target_uref {\n            return Ok(Ok(TransferredTo::ExistingAccount));\n        }\n\n        // Grant ADD access to caller on target allowing deposit of motes; this will be\n        // revoked after the transfer is completed if caller did not already have ADD access\n        let granted_access = self.context.grant_access(target_uref);\n\n        // If an account exists, transfer the amount to its purse\n        let transfer_result = self.transfer_to_existing_account(\n            Some(target_account.account_hash()),\n            source,\n            target_uref,\n            amount,\n            id,\n        );\n\n        // Remove from caller temporarily granted ADD access on target.\n        if let GrantedAccess::Granted {\n            uref_addr,\n            newly_granted_access_rights,\n        } = granted_access\n        {\n            self.context\n                .remove_access(uref_addr, newly_granted_access_rights)\n        }\n        transfer_result\n    }\n\n    /// Transfers `amount` of motes from `source` purse to `target` purse.\n    fn transfer_from_purse_to_purse(\n        &mut self,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<Result<(), mint::Error>, ExecError> {\n        self.context.validate_uref(&source)?;\n        let mint_contract_key = self.get_mint_hash()?;\n        match self.mint_transfer(mint_contract_key, None, source, target, amount, id)? {\n            Ok(()) => Ok(Ok(())),\n            Err(mint_error) => Ok(Err(mint_error)),\n        }\n    }\n\n    fn total_balance(&mut self, purse: URef) -> Result<U512, ExecError> {\n        match self.context.total_balance(&purse) {\n            Ok(motes) => Ok(motes.value()),\n            Err(err) => Err(err),\n        }\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, ExecError> {\n        match self.context.available_balance(&purse) {\n            Ok(motes) => Ok(Some(motes.value())),\n            Err(err) => Err(err),\n        }\n    }\n\n    fn get_balance_host_buffer(\n        &mut self,\n        purse_ptr: u32,\n        purse_size: usize,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n\n        let purse: URef = {\n            let bytes = self.bytes_from_mem(purse_ptr, purse_size)?;\n            match bytesrepr::deserialize_from_slice(bytes) {\n                Ok(purse) => purse,\n                Err(error) => return Ok(Err(error.into())),\n            }\n        };\n\n        let balance = match self.available_balance(purse)? {\n            Some(balance) => balance,\n            None => return Ok(Err(ApiError::InvalidPurse)),\n        };\n\n        let balance_cl_value = match CLValue::from_t(balance) {\n            Ok(cl_value) => cl_value,\n            Err(error) => return Ok(Err(error.into())),\n        };\n\n        let balance_size = balance_cl_value.inner_bytes().len() as i32;\n        if let Err(error) = self.write_host_buffer(balance_cl_value) {\n            return Ok(Err(error));\n        }\n\n        let balance_size_bytes = balance_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_size_ptr, &balance_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn get_system_contract(\n        &mut self,\n        system_contract_index: u32,\n        dest_ptr: u32,\n        _dest_size: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        let hash: AddressableEntityHash = match SystemEntityType::try_from(system_contract_index) {\n            Ok(SystemEntityType::Mint) => self.get_mint_hash()?,\n            Ok(SystemEntityType::HandlePayment) => self.get_handle_payment_hash()?,\n            Ok(SystemEntityType::StandardPayment) => self.get_standard_payment_hash()?,\n            Ok(SystemEntityType::Auction) => self.get_auction_hash()?,\n            Err(error) => return Ok(Err(error)),\n        };\n\n        match self.try_get_memory()?.set(dest_ptr, hash.as_ref()) {\n            Ok(_) => Ok(Ok(())),\n            Err(error) => Err(ExecError::Interpreter(error.into()).into()),\n        }\n    }\n\n    /// If host_buffer set, clears the host_buffer and returns value, else None\n    pub fn take_host_buffer(&mut self) -> Option<CLValue> {\n        self.host_buffer.take()\n    }\n\n    /// Checks if a write to host buffer can happen.\n    ///\n    /// This will check if the host buffer is empty.\n    fn can_write_to_host_buffer(&self) -> bool {\n        self.host_buffer.is_none()\n    }\n\n    /// Overwrites data in host buffer only if it's in empty state\n    fn write_host_buffer(&mut self, data: CLValue) -> Result<(), ApiError> {\n        match self.host_buffer {\n            Some(_) => return Err(ApiError::HostBufferFull),\n            None => self.host_buffer = Some(data),\n        }\n        Ok(())\n    }\n\n    fn read_host_buffer(\n        &mut self,\n        dest_ptr: u32,\n        dest_size: usize,\n        bytes_written_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        let (_cl_type, serialized_value) = match self.take_host_buffer() {\n            None => return Ok(Err(ApiError::HostBufferEmpty)),\n            Some(cl_value) => cl_value.destructure(),\n        };\n\n        if serialized_value.len() > u32::MAX as usize {\n            return Ok(Err(ApiError::OutOfMemory));\n        }\n        if serialized_value.len() > dest_size {\n            return Ok(Err(ApiError::BufferTooSmall));\n        }\n\n        // Slice data, so if `dest_size` is larger than host_buffer size, it will take host_buffer\n        // as whole.\n        let sliced_buf = &serialized_value[..cmp::min(dest_size, serialized_value.len())];\n        if let Err(error) = self.try_get_memory()?.set(dest_ptr, sliced_buf) {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        // Never panics because we check that `serialized_value.len()` fits in `u32`.\n        let bytes_written: u32 = sliced_buf\n            .len()\n            .try_into()\n            .expect(\"Size of buffer should fit within limit\");\n        let bytes_written_data = bytes_written.to_le_bytes();\n\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(bytes_written_ptr, &bytes_written_data)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        Ok(Ok(()))\n    }\n\n    #[cfg(feature = \"test-support\")]\n    fn print(&mut self, text_ptr: u32, text_size: u32) -> Result<(), Trap> {\n        let text = self.string_from_mem(text_ptr, text_size)?;\n        println!(\"{}\", text); // this println! is intentional\n        Ok(())\n    }\n\n    fn get_named_arg_size(\n        &mut self,\n        name_ptr: u32,\n        name_size: usize,\n        size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        let name_bytes = self.bytes_from_mem(name_ptr, name_size)?;\n        let name = String::from_utf8_lossy(&name_bytes);\n\n        let arg_size: u32 = match self.context.args().get(&name) {\n            Some(arg) if arg.inner_bytes().len() > u32::MAX as usize => {\n                return Ok(Err(ApiError::OutOfMemory));\n            }\n            Some(arg) => {\n                // SAFETY: Safe to unwrap as we asserted length above\n                arg.inner_bytes()\n                    .len()\n                    .try_into()\n                    .expect(\"Should fit within the range\")\n            }\n            None => return Ok(Err(ApiError::MissingArgument)),\n        };\n\n        let arg_size_bytes = arg_size.to_le_bytes(); // Wasm is little-endian\n\n        if let Err(e) = self.try_get_memory()?.set(size_ptr, &arg_size_bytes) {\n            return Err(ExecError::Interpreter(e.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn get_named_arg(\n        &mut self,\n        name_ptr: u32,\n        name_size: usize,\n        output_ptr: u32,\n        output_size: usize,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        let name_bytes = self.bytes_from_mem(name_ptr, name_size)?;\n        let name = String::from_utf8_lossy(&name_bytes);\n\n        let arg = match self.context.args().get(&name) {\n            Some(arg) => arg,\n            None => return Ok(Err(ApiError::MissingArgument)),\n        };\n\n        if arg.inner_bytes().len() > output_size {\n            return Ok(Err(ApiError::OutOfMemory));\n        }\n\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_ptr, &arg.inner_bytes()[..output_size])\n        {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Enforce group access restrictions (if any) on attempts to call an `EntryPoint`.\n    fn validate_entry_point_access(\n        &self,\n        package: &Package,\n        name: &str,\n        access: &EntryPointAccess,\n    ) -> Result<(), ExecError> {\n        match access {\n            EntryPointAccess::Public => Ok(()),\n            EntryPointAccess::Groups(group_names) => {\n                if group_names.is_empty() {\n                    // Exits early in a special case of empty list of groups regardless of the group\n                    // checking logic below it.\n                    return Err(ExecError::InvalidContext);\n                }\n\n                let find_result = group_names.iter().find(|&group_name| {\n                    package\n                        .groups()\n                        .get(group_name)\n                        .and_then(|urefs| {\n                            urefs\n                                .iter()\n                                .find(|&uref| self.context.validate_uref(uref).is_ok())\n                        })\n                        .is_some()\n                });\n\n                if find_result.is_none() {\n                    return Err(ExecError::InvalidContext);\n                }\n\n                Ok(())\n            }\n            EntryPointAccess::Template => Err(ExecError::TemplateMethod(name.to_string())),\n        }\n    }\n\n    /// Remove a user group from access to a contract\n    fn remove_contract_user_group(\n        &mut self,\n        package_key: PackageHash,\n        label: Group,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        if self.context.engine_config().enable_entity {\n            let mut package: Package = self.context.get_validated_package(package_key)?;\n            let group_to_remove = Group::new(label);\n\n            // Ensure group exists in groups\n            if !package.groups().contains(&group_to_remove) {\n                return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into()));\n            }\n\n            // Remove group if it is not referenced by at least one entry_point in active versions.\n            let versions = package.versions();\n            for entity_hash in versions.contract_hashes() {\n                let entry_points = {\n                    self.context\n                        .get_casper_vm_v1_entry_point(Key::AddressableEntity(*entity_hash))?\n                };\n                for entry_point in entry_points.take_entry_points() {\n                    match entry_point.access() {\n                        EntryPointAccess::Public | EntryPointAccess::Template => {\n                            continue;\n                        }\n                        EntryPointAccess::Groups(groups) => {\n                            if groups.contains(&group_to_remove) {\n                                return Ok(Err(addressable_entity::Error::GroupInUse.into()));\n                            }\n                        }\n                    }\n                }\n            }\n\n            if !package.remove_group(&group_to_remove) {\n                return Ok(Err(addressable_entity::Error::GroupInUse.into()));\n            }\n\n            // Write updated package to the global state\n            self.context.metered_write_gs_unsafe(package_key, package)?;\n        } else {\n            let mut contract_package = self\n                .context\n                .get_validated_contract_package(package_key.value())?;\n            let group_to_remove = Group::new(label);\n\n            // Ensure group exists in groups\n            if !contract_package.groups().contains(&group_to_remove) {\n                return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into()));\n            }\n\n            // Remove group if it is not referenced by at least one entry_point in active versions.\n            for (_version, contract_hash) in contract_package.versions().iter() {\n                let entry_points = {\n                    self.context\n                        .get_casper_vm_v1_entry_point(Key::contract_entity_key(\n                            AddressableEntityHash::new(contract_hash.value()),\n                        ))?\n                };\n                for entry_point in entry_points.take_entry_points() {\n                    match entry_point.access() {\n                        EntryPointAccess::Public | EntryPointAccess::Template => {\n                            continue;\n                        }\n                        EntryPointAccess::Groups(groups) => {\n                            if groups.contains(&group_to_remove) {\n                                return Ok(Err(addressable_entity::Error::GroupInUse.into()));\n                            }\n                        }\n                    }\n                }\n            }\n\n            if !contract_package.remove_group(&group_to_remove) {\n                return Ok(Err(addressable_entity::Error::GroupInUse.into()));\n            }\n\n            // Write updated package to the global state\n            self.context.metered_write_gs_unsafe(\n                ContractPackageHash::new(package_key.value()),\n                contract_package,\n            )?;\n        }\n        Ok(Ok(()))\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn provision_contract_user_group_uref(\n        &mut self,\n        package_ptr: u32,\n        package_size: u32,\n        label_ptr: u32,\n        label_size: u32,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        let contract_package_hash = self.t_from_mem(package_ptr, package_size)?;\n        let label: String = self.t_from_mem(label_ptr, label_size)?;\n        let new_uref = if self.context.engine_config().enable_entity {\n            let mut contract_package = self.context.get_validated_package(contract_package_hash)?;\n            let groups = contract_package.groups_mut();\n\n            let group_label = Group::new(label);\n\n            // Ensure there are not too many urefs\n            if groups.total_urefs() + 1 > addressable_entity::MAX_TOTAL_UREFS {\n                return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into()));\n            }\n\n            // Ensure given group exists and does not exceed limits\n            let group = match groups.get_mut(&group_label) {\n                Some(group) if group.len() + 1 > addressable_entity::MAX_GROUPS as usize => {\n                    // Ensures there are not too many groups to fit in amount of new urefs\n                    return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into()));\n                }\n                Some(group) => group,\n                None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())),\n            };\n\n            // Proceed with creating new URefs\n            let new_uref = self.context.new_unit_uref()?;\n            if !group.insert(new_uref) {\n                return Ok(Err(addressable_entity::Error::URefAlreadyExists.into()));\n            }\n\n            // Write updated package to the global state\n            self.context\n                .metered_write_gs_unsafe(contract_package_hash, contract_package)?;\n            new_uref\n        } else {\n            let mut contract_package = self\n                .context\n                .get_validated_contract_package(contract_package_hash.value())?;\n            let groups = contract_package.groups_mut();\n\n            let group_label = Group::new(label);\n\n            // Ensure there are not too many urefs\n            if groups.total_urefs() + 1 > addressable_entity::MAX_TOTAL_UREFS {\n                return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into()));\n            }\n\n            // Ensure given group exists and does not exceed limits\n            let group = match groups.get_mut(&group_label) {\n                Some(group) if group.len() + 1 > addressable_entity::MAX_GROUPS as usize => {\n                    // Ensures there are not too many groups to fit in amount of new urefs\n                    return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into()));\n                }\n                Some(group) => group,\n                None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())),\n            };\n\n            // Proceed with creating new URefs\n            let new_uref = self.context.new_unit_uref()?;\n            if !group.insert(new_uref) {\n                return Ok(Err(addressable_entity::Error::URefAlreadyExists.into()));\n            }\n\n            // Write updated package to the global state\n            self.context.metered_write_gs_unsafe(\n                ContractPackageHash::new(contract_package_hash.value()),\n                contract_package,\n            )?;\n            new_uref\n        };\n\n        // check we can write to the host buffer\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n        // create CLValue for return value\n        let new_uref_value = CLValue::from_t(new_uref)?;\n        let value_size = new_uref_value.inner_bytes().len();\n        // write return value to buffer\n        if let Err(err) = self.write_host_buffer(new_uref_value) {\n            return Ok(Err(err));\n        }\n        // Write return value size to output location\n        let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_size_ptr, &output_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        Ok(Ok(()))\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn remove_contract_user_group_urefs(\n        &mut self,\n        package_ptr: u32,\n        package_size: u32,\n        label_ptr: u32,\n        label_size: u32,\n        urefs_ptr: u32,\n        urefs_size: u32,\n    ) -> Result<Result<(), ApiError>, ExecError> {\n        let contract_package_hash: PackageHash = self.t_from_mem(package_ptr, package_size)?;\n        let label: String = self.t_from_mem(label_ptr, label_size)?;\n        let urefs: BTreeSet<URef> = self.t_from_mem(urefs_ptr, urefs_size)?;\n\n        if self.context.engine_config().enable_entity {\n            let mut contract_package = self.context.get_validated_package(contract_package_hash)?;\n\n            let groups = contract_package.groups_mut();\n            let group_label = Group::new(label);\n\n            let group = match groups.get_mut(&group_label) {\n                Some(group) => group,\n                None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())),\n            };\n\n            if urefs.is_empty() {\n                return Ok(Ok(()));\n            }\n\n            for uref in urefs {\n                if !group.remove(&uref) {\n                    return Ok(Err(addressable_entity::Error::UnableToRemoveURef.into()));\n                }\n            }\n            // Write updated package to the global state\n            self.context\n                .metered_write_gs_unsafe(contract_package_hash, contract_package)?;\n        } else {\n            let contract_package_hash = ContractPackageHash::new(contract_package_hash.value());\n            let mut contract_package = self\n                .context\n                .get_validated_contract_package(contract_package_hash.value())?;\n\n            let groups = contract_package.groups_mut();\n            let group_label = Group::new(label);\n\n            let group = match groups.get_mut(&group_label) {\n                Some(group) => group,\n                None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())),\n            };\n\n            if urefs.is_empty() {\n                return Ok(Ok(()));\n            }\n\n            for uref in urefs {\n                if !group.remove(&uref) {\n                    return Ok(Err(addressable_entity::Error::UnableToRemoveURef.into()));\n                }\n            }\n            // Write updated package to the global state\n            self.context\n                .metered_write_gs_unsafe(contract_package_hash, contract_package)?;\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Calculate gas cost for a host function\n    fn charge_host_function_call<T>(\n        &mut self,\n        host_function: &HostFunction<T>,\n        weights: T,\n    ) -> Result<(), Trap>\n    where\n        T: AsRef<[HostFunctionCost]> + Copy,\n    {\n        let cost = host_function\n            .calculate_gas_cost(weights)\n            .ok_or(ExecError::GasLimit)?; // Overflowing gas calculation means gas limit was exceeded\n        self.gas(cost)?;\n        Ok(())\n    }\n\n    /// Creates a dictionary\n    fn new_dictionary(&mut self, output_size_ptr: u32) -> Result<Result<(), ApiError>, ExecError> {\n        // check we can write to the host buffer\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n\n        // Create new URef\n        let new_uref = self.context.new_unit_uref()?;\n\n        // create CLValue for return value\n        let new_uref_value = CLValue::from_t(new_uref)?;\n        let value_size = new_uref_value.inner_bytes().len();\n        // write return value to buffer\n        if let Err(err) = self.write_host_buffer(new_uref_value) {\n            return Ok(Err(err));\n        }\n        // Write return value size to output location\n        let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self\n            .try_get_memory()?\n            .set(output_size_ptr, &output_size_bytes)\n        {\n            return Err(ExecError::Interpreter(error.into()));\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Reads the `value` under a `key` in a dictionary\n    fn dictionary_get(\n        &mut self,\n        uref_ptr: u32,\n        uref_size: u32,\n        dictionary_item_key_bytes_ptr: u32,\n        dictionary_item_key_bytes_size: u32,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        // check we can write to the host buffer\n        if let Err(err) = self.check_host_buffer() {\n            return Ok(Err(err));\n        }\n\n        let uref: URef = self.t_from_mem(uref_ptr, uref_size)?;\n        let dictionary_item_key = self.checked_memory_slice(\n            dictionary_item_key_bytes_ptr as usize,\n            dictionary_item_key_bytes_size as usize,\n            |utf8_bytes| std::str::from_utf8(utf8_bytes).map(ToOwned::to_owned),\n        )?;\n\n        let dictionary_item_key = if let Ok(item_key) = dictionary_item_key {\n            item_key\n        } else {\n            return Ok(Err(ApiError::InvalidDictionaryItemKey));\n        };\n\n        let cl_value = match self.context.dictionary_get(uref, &dictionary_item_key)? {\n            Some(cl_value) => cl_value,\n            None => return Ok(Err(ApiError::ValueNotFound)),\n        };\n\n        let value_size: u32 = match cl_value.inner_bytes().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::BufferTooSmall)),\n        };\n\n        if let Err(error) = self.write_host_buffer(cl_value) {\n            return Ok(Err(error));\n        }\n\n        let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self.try_get_memory()?.set(output_size_ptr, &value_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Reads the `value` under a `Key::Dictionary`.\n    fn dictionary_read(\n        &mut self,\n        key_ptr: u32,\n        key_size: u32,\n        output_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n\n        let dictionary_key = self.key_from_mem(key_ptr, key_size)?;\n        let cl_value = match self.context.dictionary_read(dictionary_key)? {\n            Some(cl_value) => cl_value,\n            None => return Ok(Err(ApiError::ValueNotFound)),\n        };\n\n        let value_size: u32 = match cl_value.inner_bytes().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::BufferTooSmall)),\n        };\n\n        if let Err(error) = self.write_host_buffer(cl_value) {\n            return Ok(Err(error));\n        }\n\n        let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian\n        if let Err(error) = self.try_get_memory()?.set(output_size_ptr, &value_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    /// Writes a `key`, `value` pair in a dictionary\n    fn dictionary_put(\n        &mut self,\n        uref_ptr: u32,\n        uref_size: u32,\n        key_ptr: u32,\n        key_size: u32,\n        value_ptr: u32,\n        value_size: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        let uref: URef = self.t_from_mem(uref_ptr, uref_size)?;\n        let dictionary_item_key_bytes = {\n            if (key_size as usize) > DICTIONARY_ITEM_KEY_MAX_LENGTH {\n                return Ok(Err(ApiError::DictionaryItemKeyExceedsLength));\n            }\n            self.checked_memory_slice(key_ptr as usize, key_size as usize, |data| {\n                std::str::from_utf8(data).map(ToOwned::to_owned)\n            })?\n        };\n\n        let dictionary_item_key = if let Ok(item_key) = dictionary_item_key_bytes {\n            item_key\n        } else {\n            return Ok(Err(ApiError::InvalidDictionaryItemKey));\n        };\n        let cl_value = self.cl_value_from_mem(value_ptr, value_size)?;\n        if let Err(e) = self\n            .context\n            .dictionary_put(uref, &dictionary_item_key, cl_value)\n        {\n            return Err(Trap::from(e));\n        }\n        Ok(Ok(()))\n    }\n\n    /// Checks if immediate caller is a system contract or account.\n    ///\n    /// For cases where call stack is only the session code, then this method returns `true` if the\n    /// caller is system, or `false` otherwise.\n    fn is_system_immediate_caller(&self) -> Result<bool, ExecError> {\n        let immediate_caller = match self.get_immediate_caller() {\n            Some(call_stack_element) => call_stack_element,\n            None => {\n                // Immediate caller is assumed to exist at a time this check is run.\n                return Ok(false);\n            }\n        };\n\n        match immediate_caller {\n            Caller::Initiator { account_hash } => {\n                // This case can happen during genesis where we're setting up purses for accounts.\n                Ok(account_hash == &PublicKey::System.to_account_hash())\n            }\n            Caller::SmartContract { contract_hash, .. } => Ok(self\n                .context\n                .is_system_addressable_entity(&contract_hash.value())?),\n            Caller::Entity { entity_addr, .. } => Ok(self\n                .context\n                .is_system_addressable_entity(&entity_addr.value())?),\n        }\n    }\n\n    fn load_authorization_keys(\n        &mut self,\n        len_ptr: u32,\n        result_size_ptr: u32,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        if !self.can_write_to_host_buffer() {\n            // Exit early if the host buffer is already occupied\n            return Ok(Err(ApiError::HostBufferFull));\n        }\n\n        // A set of keys is converted into a vector so it can be written to a host buffer\n        let authorization_keys = Vec::from_iter(self.context.authorization_keys().clone());\n\n        let total_keys: u32 = match authorization_keys.len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n        };\n        let total_keys_bytes = total_keys.to_le_bytes();\n        if let Err(error) = self.try_get_memory()?.set(len_ptr, &total_keys_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        if total_keys == 0 {\n            // No need to do anything else, we leave host buffer empty.\n            return Ok(Ok(()));\n        }\n\n        let authorization_keys = CLValue::from_t(authorization_keys).map_err(ExecError::CLValue)?;\n\n        let length: u32 = match authorization_keys.inner_bytes().len().try_into() {\n            Ok(value) => value,\n            Err(_) => return Ok(Err(ApiError::OutOfMemory)),\n        };\n        if let Err(error) = self.write_host_buffer(authorization_keys) {\n            return Ok(Err(error));\n        }\n\n        let length_bytes = length.to_le_bytes();\n        if let Err(error) = self.try_get_memory()?.set(result_size_ptr, &length_bytes) {\n            return Err(ExecError::Interpreter(error.into()).into());\n        }\n\n        Ok(Ok(()))\n    }\n\n    fn prune(&mut self, key: Key) {\n        self.context.prune_gs_unsafe(key);\n    }\n\n    pub(crate) fn migrate_contract_and_contract_package(\n        &mut self,\n        hash_addr: HashAddr,\n    ) -> Result<AddressableEntity, ExecError> {\n        let protocol_version = self.context.protocol_version();\n        let contract = self.context.get_contract(ContractHash::new(hash_addr))?;\n        let package_hash = contract.contract_package_hash();\n        self.context\n            .migrate_package(package_hash, protocol_version)?;\n        let entity_hash = AddressableEntityHash::new(hash_addr);\n        let key = Key::contract_entity_key(entity_hash);\n        self.context.read_gs_typed(&key)\n    }\n\n    fn add_message_topic(&mut self, topic_name: &str) -> Result<Result<(), ApiError>, ExecError> {\n        let topic_hash = cryptography::blake2b(topic_name).into();\n\n        self.context\n            .add_message_topic(topic_name, topic_hash)\n            .map(|ret| ret.map_err(ApiError::from))\n    }\n\n    fn emit_message(\n        &mut self,\n        topic_name: &str,\n        message: MessagePayload,\n    ) -> Result<Result<(), ApiError>, Trap> {\n        let entity_addr = self.context.context_key_to_entity_addr()?;\n\n        let topic_name_hash = cryptography::blake2b(topic_name).into();\n        let topic_key = Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash));\n\n        // Check if the topic exists and get the summary.\n        let Some(StoredValue::MessageTopic(prev_topic_summary)) =\n            self.context.read_gs(&topic_key)?\n        else {\n            return Ok(Err(ApiError::MessageTopicNotRegistered));\n        };\n\n        let current_blocktime = self.context.get_block_info().block_time();\n        let topic_message_index = if prev_topic_summary.blocktime() != current_blocktime {\n            for index in 1..prev_topic_summary.message_count() {\n                self.context\n                    .prune_gs_unsafe(Key::message(entity_addr, topic_name_hash, index));\n            }\n            0\n        } else {\n            prev_topic_summary.message_count()\n        };\n\n        let block_message_index: u64 = match self\n            .context\n            .read_gs(&Key::BlockGlobal(BlockGlobalAddr::MessageCount))?\n        {\n            Some(stored_value) => {\n                let (prev_block_time, prev_count): (BlockTime, u64) = CLValue::into_t(\n                    CLValue::try_from(stored_value).map_err(ExecError::TypeMismatch)?,\n                )\n                .map_err(ExecError::CLValue)?;\n                if prev_block_time == current_blocktime {\n                    prev_count\n                } else {\n                    0\n                }\n            }\n            None => 0,\n        };\n\n        let Some(topic_message_count) = topic_message_index.checked_add(1) else {\n            return Ok(Err(ApiError::MessageTopicFull));\n        };\n\n        let Some(block_message_count) = block_message_index.checked_add(1) else {\n            return Ok(Err(ApiError::MaxMessagesPerBlockExceeded));\n        };\n\n        self.context.metered_emit_message(\n            topic_key,\n            current_blocktime,\n            block_message_count,\n            topic_message_count,\n            Message::new(\n                entity_addr,\n                message,\n                topic_name.to_string(),\n                topic_name_hash,\n                topic_message_index,\n                block_message_index,\n            ),\n        )?;\n        Ok(Ok(()))\n    }\n\n    fn get_minimum_delegation_rate(&self) -> Result<u8, ExecError> {\n        let auction_contract_hash = self.context.get_system_contract(AUCTION)?;\n        let auction_named_keys = self\n            .context\n            .state()\n            .borrow_mut()\n            .get_named_keys(EntityAddr::System(auction_contract_hash.value()))?;\n        let minimum_delegation_rate_key =\n            auction_named_keys.get(MINIMUM_DELEGATION_RATE_KEY).ok_or(\n                ExecError::NamedKeyNotFound(MINIMUM_DELEGATION_RATE_KEY.to_string()),\n            )?;\n        let stored_value = self\n            .context\n            .state()\n            .borrow_mut()\n            .read(minimum_delegation_rate_key)?\n            .ok_or(ExecError::KeyNotFound(*minimum_delegation_rate_key))?;\n        if let StoredValue::CLValue(cl_value) = stored_value {\n            let minimum_delegation_rate: u8 = cl_value.into_t().map_err(ExecError::CLValue)?;\n            Ok(minimum_delegation_rate)\n        } else {\n            Err(ExecError::UnexpectedStoredValueVariant)\n        }\n    }\n}\n\n#[cfg(feature = \"test-support\")]\nfn dump_runtime_stack_info(instance: casper_wasmi::ModuleRef, max_stack_height: u32) {\n    let globals = instance.globals();\n    let Some(current_runtime_call_stack_height) = globals.last() else {\n        return;\n    };\n\n    if let RuntimeValue::I32(current_runtime_call_stack_height) =\n        current_runtime_call_stack_height.get()\n    {\n        if current_runtime_call_stack_height > max_stack_height as i32 {\n            eprintln!(\"runtime stack overflow, current={current_runtime_call_stack_height}, max={max_stack_height}\");\n        }\n    };\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/stack.rs",
    "content": "//! Runtime stacks.\nuse casper_types::{account::AccountHash, system::Caller};\n\n/// A runtime stack frame.\n///\n/// Currently it aliases to a [`Caller`].\n///\n/// NOTE: Once we need to add more data to a stack frame we should make this a newtype, rather than\n/// change [`Caller`].\npub type RuntimeStackFrame = Caller;\n\n/// The runtime stack.\n#[derive(Clone, Debug)]\npub struct RuntimeStack {\n    frames: Vec<RuntimeStackFrame>,\n    max_height: usize,\n}\n\n/// Error returned on an attempt to pop off an empty stack.\n#[cfg(test)]\n#[derive(Debug)]\nstruct RuntimeStackUnderflow;\n\n/// Error returned on an attempt to push to a stack already at the maximum height.\n#[derive(Debug)]\npub struct RuntimeStackOverflow;\n\nimpl RuntimeStack {\n    /// Creates an empty stack.\n    pub fn new(max_height: usize) -> Self {\n        Self {\n            frames: Vec::with_capacity(max_height),\n            max_height,\n        }\n    }\n\n    /// Creates a stack with one entry.\n    pub fn new_with_frame(max_height: usize, frame: RuntimeStackFrame) -> Self {\n        let mut frames = Vec::with_capacity(max_height);\n        frames.push(frame);\n        Self { frames, max_height }\n    }\n\n    /// Is the stack empty?\n    pub fn is_empty(&self) -> bool {\n        self.frames.is_empty()\n    }\n\n    /// The height of the stack.\n    pub fn len(&self) -> usize {\n        self.frames.len()\n    }\n\n    /// The current stack frame.\n    pub fn current_frame(&self) -> Option<&RuntimeStackFrame> {\n        self.frames.last()\n    }\n\n    /// The previous stack frame.\n    pub fn previous_frame(&self) -> Option<&RuntimeStackFrame> {\n        self.frames.iter().nth_back(1)\n    }\n\n    /// The first stack frame.\n    pub fn first_frame(&self) -> Option<&RuntimeStackFrame> {\n        self.frames.first()\n    }\n\n    /// Pops the current frame from the stack.\n    #[cfg(test)]\n    fn pop(&mut self) -> Result<(), RuntimeStackUnderflow> {\n        self.frames.pop().ok_or(RuntimeStackUnderflow)?;\n        Ok(())\n    }\n\n    /// Pushes a frame onto the stack.\n    pub fn push(&mut self, frame: RuntimeStackFrame) -> Result<(), RuntimeStackOverflow> {\n        if self.len() < self.max_height {\n            self.frames.push(frame);\n            Ok(())\n        } else {\n            Err(RuntimeStackOverflow)\n        }\n    }\n\n    // It is here for backwards compatibility only.\n    /// A view of the stack in the previous stack format.\n    pub fn call_stack_elements(&self) -> &Vec<Caller> {\n        &self.frames\n    }\n\n    /// Returns a stack with exactly one session element with the associated account hash.\n    pub fn from_account_hash(account_hash: AccountHash, max_height: usize) -> Self {\n        RuntimeStack {\n            frames: vec![Caller::initiator(account_hash)],\n            max_height,\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use core::convert::TryInto;\n\n    use casper_types::account::{AccountHash, ACCOUNT_HASH_LENGTH};\n\n    use super::*;\n\n    fn nth_frame(n: usize) -> Caller {\n        let mut bytes = [0_u8; ACCOUNT_HASH_LENGTH];\n        let n: u32 = n.try_into().unwrap();\n        bytes[0..4].copy_from_slice(&n.to_le_bytes());\n        Caller::initiator(AccountHash::new(bytes))\n    }\n\n    #[allow(clippy::redundant_clone)]\n    #[test]\n    fn stack_should_respect_max_height_after_clone() {\n        const MAX_HEIGHT: usize = 3;\n        let mut stack = RuntimeStack::new(MAX_HEIGHT);\n        stack.push(nth_frame(1)).unwrap();\n\n        let mut stack2 = stack.clone();\n        stack2.push(nth_frame(2)).unwrap();\n        stack2.push(nth_frame(3)).unwrap();\n        stack2.push(nth_frame(4)).unwrap_err();\n        assert_eq!(stack2.len(), MAX_HEIGHT);\n    }\n\n    #[test]\n    fn stack_should_work_as_expected() {\n        const MAX_HEIGHT: usize = 6;\n\n        let mut stack = RuntimeStack::new(MAX_HEIGHT);\n        assert!(stack.is_empty());\n        assert_eq!(stack.len(), 0);\n        assert_eq!(stack.current_frame(), None);\n        assert_eq!(stack.previous_frame(), None);\n        assert_eq!(stack.first_frame(), None);\n\n        stack.push(nth_frame(0)).unwrap();\n        assert!(!stack.is_empty());\n        assert_eq!(stack.len(), 1);\n        assert_eq!(stack.current_frame(), Some(&nth_frame(0)));\n        assert_eq!(stack.previous_frame(), None);\n        assert_eq!(stack.first_frame(), Some(&nth_frame(0)));\n\n        let mut n: usize = 1;\n        while stack.push(nth_frame(n)).is_ok() {\n            n += 1;\n            assert!(!stack.is_empty());\n            assert_eq!(stack.len(), n);\n            assert_eq!(stack.current_frame(), Some(&nth_frame(n - 1)));\n            assert_eq!(stack.previous_frame(), Some(&nth_frame(n - 2)));\n            assert_eq!(stack.first_frame(), Some(&nth_frame(0)));\n        }\n        assert!(!stack.is_empty());\n        assert_eq!(stack.len(), MAX_HEIGHT);\n        assert_eq!(stack.current_frame(), Some(&nth_frame(MAX_HEIGHT - 1)));\n        assert_eq!(stack.previous_frame(), Some(&nth_frame(MAX_HEIGHT - 2)));\n        assert_eq!(stack.first_frame(), Some(&nth_frame(0)));\n\n        while stack.len() >= 3 {\n            stack.pop().unwrap();\n            n = n.checked_sub(1).unwrap();\n            assert!(!stack.is_empty());\n            assert_eq!(stack.len(), n);\n            assert_eq!(stack.current_frame(), Some(&nth_frame(n - 1)));\n            assert_eq!(stack.previous_frame(), Some(&nth_frame(n - 2)));\n            assert_eq!(stack.first_frame(), Some(&nth_frame(0)));\n        }\n\n        stack.pop().unwrap();\n        assert!(!stack.is_empty());\n        assert_eq!(stack.len(), 1);\n        assert_eq!(stack.current_frame(), Some(&nth_frame(0)));\n        assert_eq!(stack.previous_frame(), None);\n        assert_eq!(stack.first_frame(), Some(&nth_frame(0)));\n\n        stack.pop().unwrap();\n        assert!(stack.is_empty());\n        assert_eq!(stack.len(), 0);\n        assert_eq!(stack.current_frame(), None);\n        assert_eq!(stack.previous_frame(), None);\n        assert_eq!(stack.first_frame(), None);\n\n        assert!(stack.pop().is_err());\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/standard_payment_internal.rs",
    "content": "use casper_storage::global_state::{error::Error as GlobalStateError, state::StateReader};\nuse casper_types::{\n    account::Account,\n    system::{handle_payment, mint},\n    ApiError, Key, RuntimeArgs, StoredValue, TransferredTo, URef, U512,\n};\n\nuse casper_storage::system::standard_payment::{\n    account_provider::AccountProvider, handle_payment_provider::HandlePaymentProvider,\n    mint_provider::MintProvider, StandardPayment,\n};\n\nuse crate::{execution, runtime::Runtime};\n\npub(crate) const METHOD_GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\n\nimpl From<execution::Error> for Option<ApiError> {\n    fn from(exec_error: execution::Error) -> Self {\n        match exec_error {\n            // This is used to propagate [`execution::Error::GasLimit`] to make sure\n            // [`StandardPayment`] contract running natively supports propagating gas limit\n            // errors without a panic.\n            execution::Error::GasLimit => Some(mint::Error::GasLimit.into()),\n            // There are possibly other exec errors happening but such translation would be lossy.\n            _ => None,\n        }\n    }\n}\n\nimpl<'a, R> AccountProvider for Runtime<'a, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_main_purse(&mut self) -> Result<URef, ApiError> {\n        self.context.get_main_purse().map_err(|exec_error| {\n            <Option<ApiError>>::from(exec_error).unwrap_or(ApiError::InvalidPurse)\n        })\n    }\n}\n\nimpl<'a, R> MintProvider for Runtime<'a, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn transfer_purse_to_account(\n        &mut self,\n        source: URef,\n        target_account: &Account,\n        amount: U512,\n    ) -> Result<(), ApiError> {\n        match Runtime::transfer_from_purse_to_account(self, source, target_account, amount, None) {\n            Ok(Ok(TransferredTo::ExistingAccount)) => Ok(()),\n            Ok(Ok(TransferredTo::NewAccount)) => Ok(()),\n            Ok(Err(error)) => Err(error),\n            Err(_error) => Err(ApiError::Transfer),\n        }\n    }\n}\n\nimpl<'a, R> HandlePaymentProvider for Runtime<'a, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_payment_purse(&mut self) -> Result<URef, ApiError> {\n        let hash = self\n            .get_handle_payment_contract()\n            .map_err(|_| ApiError::MissingSystemContractHash)?;\n\n        let cl_value = self\n            .call_contract(hash, METHOD_GET_PAYMENT_PURSE, RuntimeArgs::new())\n            .map_err(|exec_error| {\n                let maybe_api_error: Option<ApiError> = exec_error.into();\n                maybe_api_error\n                    .unwrap_or_else(|| handle_payment::Error::PaymentPurseNotFound.into())\n            })?;\n\n        let payment_purse_ref: URef = cl_value.into_t()?;\n        Ok(payment_purse_ref)\n    }\n}\n\nimpl<'a, R> StandardPayment for Runtime<'a, R> where\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>\n{\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/utils.rs",
    "content": "use std::collections::BTreeMap;\n\nuse casper_wasm::elements::Module;\nuse casper_wasmi::{ImportsBuilder, MemoryRef, ModuleInstance, ModuleRef};\n\nuse casper_types::{\n    contracts::NamedKeys, AccessRights, CLType, CLValue, Key, ProtocolVersion, PublicKey,\n    RuntimeArgs, URef, URefAddr, U128, U256, U512,\n};\n\nuse crate::{\n    engine_state::EngineConfig,\n    execution::ExecError,\n    resolvers::{self, memory_resolver::MemoryResolver},\n};\n\n/// Creates an WASM module instance and a memory instance.\n///\n/// This ensures that a memory instance is properly resolved into a pre-allocated memory area, and a\n/// host function resolver is attached to the module.\n///\n/// The WASM module is also validated to not have a \"start\" section as we currently don't support\n/// running it.\n///\n/// Both [`ModuleRef`] and a [`MemoryRef`] are ready to be executed.\npub(super) fn instance_and_memory(\n    parity_module: Module,\n    protocol_version: ProtocolVersion,\n    engine_config: &EngineConfig,\n) -> Result<(ModuleRef, MemoryRef), ExecError> {\n    let module = casper_wasmi::Module::from_casper_wasm_module(parity_module)?;\n    let resolver = resolvers::create_module_resolver(protocol_version, engine_config)?;\n    let mut imports = ImportsBuilder::new();\n    imports.push_resolver(\"env\", &resolver);\n    let not_started_module = ModuleInstance::new(&module, &imports)?;\n    if not_started_module.has_start() {\n        return Err(ExecError::UnsupportedWasmStart);\n    }\n    let instance = not_started_module.not_started_instance().clone();\n    let memory = resolver.memory_ref()?;\n    Ok((instance, memory))\n}\n\n/// Removes `rights_to_disable` from all urefs in `args` matching the address `uref_addr`.\npub(super) fn attenuate_uref_in_args(\n    mut args: RuntimeArgs,\n    uref_addr: URefAddr,\n    rights_to_disable: AccessRights,\n) -> Result<RuntimeArgs, ExecError> {\n    for arg in args.named_args_mut() {\n        *arg.cl_value_mut() = rewrite_urefs(arg.cl_value().clone(), |uref| {\n            if uref.addr() == uref_addr {\n                uref.disable_access_rights(rights_to_disable);\n            }\n        })?;\n    }\n\n    Ok(args)\n}\n\n/// Extracts a copy of every uref able to be deserialized from `cl_value`.\npub(super) fn extract_urefs(cl_value: &CLValue) -> Result<Vec<URef>, ExecError> {\n    let mut vec: Vec<URef> = Default::default();\n    rewrite_urefs(cl_value.clone(), |uref| {\n        vec.push(*uref);\n    })?;\n    Ok(vec)\n}\n\n/// Executes `func` on every uref able to be deserialized from `cl_value` and returns the resulting\n/// re-serialized `CLValue`.\n#[allow(clippy::cognitive_complexity)]\nfn rewrite_urefs(cl_value: CLValue, mut func: impl FnMut(&mut URef)) -> Result<CLValue, ExecError> {\n    let ret = match cl_value.cl_type() {\n        CLType::Bool\n        | CLType::I32\n        | CLType::I64\n        | CLType::U8\n        | CLType::U32\n        | CLType::U64\n        | CLType::U128\n        | CLType::U256\n        | CLType::U512\n        | CLType::Unit\n        | CLType::String\n        | CLType::PublicKey\n        | CLType::Any => cl_value,\n        CLType::Option(ty) => match **ty {\n            CLType::URef => {\n                let mut opt: Option<URef> = cl_value.to_owned().into_t()?;\n                opt.iter_mut().for_each(func);\n                CLValue::from_t(opt)?\n            }\n            CLType::Key => {\n                let mut opt: Option<Key> = cl_value.to_owned().into_t()?;\n                opt.iter_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(opt)?\n            }\n            _ => cl_value,\n        },\n        CLType::List(ty) => match **ty {\n            CLType::URef => {\n                let mut urefs: Vec<URef> = cl_value.to_owned().into_t()?;\n                urefs.iter_mut().for_each(func);\n                CLValue::from_t(urefs)?\n            }\n            CLType::Key => {\n                let mut keys: Vec<Key> = cl_value.to_owned().into_t()?;\n                keys.iter_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(keys)?\n            }\n            _ => cl_value,\n        },\n        CLType::ByteArray(_) => cl_value,\n        CLType::Result { ok, err } => match (&**ok, &**err) {\n            (CLType::URef, CLType::Bool) => {\n                let mut res: Result<URef, bool> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::I32) => {\n                let mut res: Result<URef, i32> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::I64) => {\n                let mut res: Result<URef, i64> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::U8) => {\n                let mut res: Result<URef, u8> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::U32) => {\n                let mut res: Result<URef, u32> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::U64) => {\n                let mut res: Result<URef, u64> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::U128) => {\n                let mut res: Result<URef, U128> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::U256) => {\n                let mut res: Result<URef, U256> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::U512) => {\n                let mut res: Result<URef, U512> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::Unit) => {\n                let mut res: Result<URef, ()> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::String) => {\n                let mut res: Result<URef, String> = cl_value.to_owned().into_t()?;\n                res.iter_mut().for_each(func);\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::Key) => {\n                let mut res: Result<URef, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(uref) => func(uref),\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::URef, CLType::URef) => {\n                let mut res: Result<URef, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(uref) => func(uref),\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::Bool) => {\n                let mut res: Result<Key, bool> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::I32) => {\n                let mut res: Result<Key, i32> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::I64) => {\n                let mut res: Result<Key, i64> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::U8) => {\n                let mut res: Result<Key, u8> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::U32) => {\n                let mut res: Result<Key, u32> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::U64) => {\n                let mut res: Result<Key, u64> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::U128) => {\n                let mut res: Result<Key, U128> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::U256) => {\n                let mut res: Result<Key, U256> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::U512) => {\n                let mut res: Result<Key, U512> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::Unit) => {\n                let mut res: Result<Key, ()> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::String) => {\n                let mut res: Result<Key, String> = cl_value.to_owned().into_t()?;\n                if let Ok(Key::URef(uref)) = &mut res {\n                    func(uref);\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::URef) => {\n                let mut res: Result<Key, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(Key::URef(uref)) => func(uref),\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Key, CLType::Key) => {\n                let mut res: Result<Key, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(Key::URef(uref)) => func(uref),\n                    Err(Key::URef(uref)) => func(uref),\n                    Ok(_) | Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Bool, CLType::URef) => {\n                let mut res: Result<bool, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::I32, CLType::URef) => {\n                let mut res: Result<i32, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::I64, CLType::URef) => {\n                let mut res: Result<i64, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U8, CLType::URef) => {\n                let mut res: Result<u8, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U32, CLType::URef) => {\n                let mut res: Result<u32, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U64, CLType::URef) => {\n                let mut res: Result<u64, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U128, CLType::URef) => {\n                let mut res: Result<U128, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U256, CLType::URef) => {\n                let mut res: Result<U256, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U512, CLType::URef) => {\n                let mut res: Result<U512, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Unit, CLType::URef) => {\n                let mut res: Result<(), URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::String, CLType::URef) => {\n                let mut res: Result<String, URef> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(uref) => func(uref),\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Bool, CLType::Key) => {\n                let mut res: Result<bool, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::I32, CLType::Key) => {\n                let mut res: Result<i32, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::I64, CLType::Key) => {\n                let mut res: Result<i64, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U8, CLType::Key) => {\n                let mut res: Result<u8, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U32, CLType::Key) => {\n                let mut res: Result<u32, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U64, CLType::Key) => {\n                let mut res: Result<u64, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U128, CLType::Key) => {\n                let mut res: Result<U128, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U256, CLType::Key) => {\n                let mut res: Result<U256, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::U512, CLType::Key) => {\n                let mut res: Result<U512, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::Unit, CLType::Key) => {\n                let mut res: Result<(), Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (CLType::String, CLType::Key) => {\n                let mut res: Result<String, Key> = cl_value.to_owned().into_t()?;\n                match &mut res {\n                    Ok(_) => {}\n                    Err(Key::URef(uref)) => func(uref),\n                    Err(_) => {}\n                }\n                CLValue::from_t(res)?\n            }\n            (_, _) => cl_value,\n        },\n        CLType::Map { key, value } => match (&**key, &**value) {\n            (CLType::URef, CLType::Bool) => {\n                let mut map: BTreeMap<URef, bool> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::I32) => {\n                let mut map: BTreeMap<URef, i32> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::I64) => {\n                let mut map: BTreeMap<URef, i64> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::U8) => {\n                let mut map: BTreeMap<URef, u8> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::U32) => {\n                let mut map: BTreeMap<URef, u32> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::U64) => {\n                let mut map: BTreeMap<URef, u64> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::U128) => {\n                let mut map: BTreeMap<URef, U128> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::U256) => {\n                let mut map: BTreeMap<URef, U256> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::U512) => {\n                let mut map: BTreeMap<URef, U512> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::Unit) => {\n                let mut map: BTreeMap<URef, ()> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::String) => {\n                let mut map: BTreeMap<URef, String> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        func(&mut k);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::Key) => {\n                let mut map: BTreeMap<URef, Key> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, mut v)| {\n                        func(&mut k);\n                        v.as_uref_mut().iter_mut().for_each(|v| func(v));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::URef, CLType::URef) => {\n                let mut map: BTreeMap<URef, URef> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, mut v)| {\n                        func(&mut k);\n                        func(&mut v);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::Bool) => {\n                let mut map: BTreeMap<Key, bool> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::I32) => {\n                let mut map: BTreeMap<Key, i32> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::I64) => {\n                let mut map: BTreeMap<Key, i64> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::U8) => {\n                let mut map: BTreeMap<Key, u8> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::U32) => {\n                let mut map: BTreeMap<Key, u32> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::U64) => {\n                let mut map: BTreeMap<Key, u64> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::U128) => {\n                let mut map: BTreeMap<Key, U128> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::U256) => {\n                let mut map: BTreeMap<Key, U256> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::U512) => {\n                let mut map: BTreeMap<Key, U512> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::Unit) => {\n                let mut map: BTreeMap<Key, ()> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::String) => {\n                let mut map: BTreeMap<Key, String> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::URef) => {\n                let mut map: BTreeMap<Key, URef> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, mut v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        func(&mut v);\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Key, CLType::Key) => {\n                let mut map: BTreeMap<Key, Key> = cl_value.to_owned().into_t()?;\n                map = map\n                    .into_iter()\n                    .map(|(mut k, mut v)| {\n                        k.as_uref_mut().iter_mut().for_each(|k| func(k));\n                        v.as_uref_mut().iter_mut().for_each(|v| func(v));\n                        (k, v)\n                    })\n                    .collect();\n                CLValue::from_t(map)?\n            }\n            (CLType::Bool, CLType::URef) => {\n                let mut map: BTreeMap<bool, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::I32, CLType::URef) => {\n                let mut map: BTreeMap<i32, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::I64, CLType::URef) => {\n                let mut map: BTreeMap<i64, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U8, CLType::URef) => {\n                let mut map: BTreeMap<u8, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U32, CLType::URef) => {\n                let mut map: BTreeMap<u32, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U64, CLType::URef) => {\n                let mut map: BTreeMap<u64, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U128, CLType::URef) => {\n                let mut map: BTreeMap<U128, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U256, CLType::URef) => {\n                let mut map: BTreeMap<U256, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U512, CLType::URef) => {\n                let mut map: BTreeMap<U512, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::Unit, CLType::URef) => {\n                let mut map: BTreeMap<(), URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::String, CLType::URef) => {\n                let mut map: BTreeMap<String, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::PublicKey, CLType::URef) => {\n                let mut map: BTreeMap<PublicKey, URef> = cl_value.to_owned().into_t()?;\n                map.values_mut().for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::Bool, CLType::Key) => {\n                let mut map: BTreeMap<bool, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::I32, CLType::Key) => {\n                let mut map: BTreeMap<i32, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::I64, CLType::Key) => {\n                let mut map: BTreeMap<i64, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U8, CLType::Key) => {\n                let mut map: BTreeMap<u8, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U32, CLType::Key) => {\n                let mut map: BTreeMap<u32, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U64, CLType::Key) => {\n                let mut map: BTreeMap<u64, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U128, CLType::Key) => {\n                let mut map: BTreeMap<U128, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U256, CLType::Key) => {\n                let mut map: BTreeMap<U256, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::U512, CLType::Key) => {\n                let mut map: BTreeMap<U512, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::Unit, CLType::Key) => {\n                let mut map: BTreeMap<(), Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::String, CLType::Key) => {\n                let mut map: NamedKeys = cl_value.to_owned().into_t()?;\n                map.keys_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (CLType::PublicKey, CLType::Key) => {\n                let mut map: BTreeMap<PublicKey, Key> = cl_value.to_owned().into_t()?;\n                map.values_mut().filter_map(Key::as_uref_mut).for_each(func);\n                CLValue::from_t(map)?\n            }\n            (_, _) => cl_value,\n        },\n        CLType::Tuple1([ty]) => match **ty {\n            CLType::URef => {\n                let mut val: (URef,) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            CLType::Key => {\n                let mut val: (Key,) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            _ => cl_value,\n        },\n        CLType::Tuple2([ty1, ty2]) => match (&**ty1, &**ty2) {\n            (CLType::URef, CLType::Bool) => {\n                let mut val: (URef, bool) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::I32) => {\n                let mut val: (URef, i32) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::I64) => {\n                let mut val: (URef, i64) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::U8) => {\n                let mut val: (URef, u8) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::U32) => {\n                let mut val: (URef, u32) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::U64) => {\n                let mut val: (URef, u64) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::U128) => {\n                let mut val: (URef, U128) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::U256) => {\n                let mut val: (URef, U256) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::U512) => {\n                let mut val: (URef, U512) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::Unit) => {\n                let mut val: (URef, ()) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::String) => {\n                let mut val: (URef, String) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::Key) => {\n                let mut val: (URef, Key) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::URef, CLType::URef) => {\n                let mut val: (URef, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.0);\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::Bool) => {\n                let mut val: (Key, bool) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::I32) => {\n                let mut val: (Key, i32) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::I64) => {\n                let mut val: (Key, i64) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::U8) => {\n                let mut val: (Key, u8) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::U32) => {\n                let mut val: (Key, u32) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::U64) => {\n                let mut val: (Key, u64) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::U128) => {\n                let mut val: (Key, U128) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::U256) => {\n                let mut val: (Key, U256) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::U512) => {\n                let mut val: (Key, U512) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::Unit) => {\n                let mut val: (Key, ()) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::String) => {\n                let mut val: (Key, String) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::URef) => {\n                let mut val: (Key, URef) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::Key, CLType::Key) => {\n                let mut val: (Key, Key) = cl_value.to_owned().into_t()?;\n                val.0.as_uref_mut().iter_mut().for_each(|v| func(v));\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Bool, CLType::URef) => {\n                let mut val: (bool, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::I32, CLType::URef) => {\n                let mut val: (i32, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::I64, CLType::URef) => {\n                let mut val: (i64, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::U8, CLType::URef) => {\n                let mut val: (u8, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::U32, CLType::URef) => {\n                let mut val: (u32, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::U64, CLType::URef) => {\n                let mut val: (u64, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::U128, CLType::URef) => {\n                let mut val: (U128, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::U256, CLType::URef) => {\n                let mut val: (U256, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::U512, CLType::URef) => {\n                let mut val: (U512, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::Unit, CLType::URef) => {\n                let mut val: ((), URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::String, CLType::URef) => {\n                let mut val: (String, URef) = cl_value.to_owned().into_t()?;\n                func(&mut val.1);\n                CLValue::from_t(val)?\n            }\n            (CLType::Bool, CLType::Key) => {\n                let mut val: (bool, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::I32, CLType::Key) => {\n                let mut val: (i32, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::I64, CLType::Key) => {\n                let mut val: (i64, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::U8, CLType::Key) => {\n                let mut val: (u8, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::U32, CLType::Key) => {\n                let mut val: (u32, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::U64, CLType::Key) => {\n                let mut val: (u64, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::U128, CLType::Key) => {\n                let mut val: (U128, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::U256, CLType::Key) => {\n                let mut val: (U256, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::U512, CLType::Key) => {\n                let mut val: (U512, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::Unit, CLType::Key) => {\n                let mut val: ((), Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (CLType::String, CLType::Key) => {\n                let mut val: (String, Key) = cl_value.to_owned().into_t()?;\n                val.1.as_uref_mut().iter_mut().for_each(|v| func(v));\n                CLValue::from_t(val)?\n            }\n            (_, _) => cl_value,\n        },\n        CLType::Tuple3(_) => cl_value,\n        CLType::Key => {\n            let mut key: Key = cl_value.to_t()?;\n            key.as_uref_mut().iter_mut().for_each(|v| func(v));\n            CLValue::from_t(key)?\n        }\n        CLType::URef => {\n            let mut uref: URef = cl_value.to_t()?;\n            func(&mut uref);\n            CLValue::from_t(uref)?\n        }\n    };\n    Ok(ret)\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::BTreeMap;\n\n    use proptest::{\n        array::uniform32,\n        collection::{btree_map, vec},\n        option,\n        prelude::*,\n        result,\n    };\n\n    use casper_types::{\n        gens::*, runtime_args, AccessRights, CLType, CLValue, Key, PublicKey, SecretKey, URef,\n    };\n\n    use super::*;\n\n    fn cl_value_with_urefs_arb() -> impl Strategy<Value = (CLValue, Vec<URef>)> {\n        // If compiler brings you here it most probably means you've added a variant to `CLType`\n        // enum but forgot to add generator for it.\n        let stub: Option<CLType> = None;\n        if let Some(cl_type) = stub {\n            match cl_type {\n                CLType::Bool\n                | CLType::I32\n                | CLType::I64\n                | CLType::U8\n                | CLType::U32\n                | CLType::U64\n                | CLType::U128\n                | CLType::U256\n                | CLType::U512\n                | CLType::Unit\n                | CLType::String\n                | CLType::Key\n                | CLType::URef\n                | CLType::Option(_)\n                | CLType::List(_)\n                | CLType::ByteArray(..)\n                | CLType::Result { .. }\n                | CLType::Map { .. }\n                | CLType::Tuple1(_)\n                | CLType::Tuple2(_)\n                | CLType::Tuple3(_)\n                | CLType::PublicKey\n                | CLType::Any => (),\n            }\n        };\n\n        prop_oneof![\n            Just((CLValue::from_t(()).expect(\"should create CLValue\"), vec![])),\n            any::<bool>()\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            any::<i32>().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            any::<i64>().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            any::<u8>().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            any::<u32>().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            any::<u64>().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            u128_arb().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            u256_arb().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            u512_arb().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            key_arb().prop_map(|x| {\n                let urefs = x.as_uref().into_iter().cloned().collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            uref_arb().prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![x])),\n            \".*\".prop_map(|x: String| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            option::of(any::<u64>())\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            option::of(uref_arb()).prop_map(|x| {\n                let urefs = x.iter().cloned().collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            option::of(key_arb()).prop_map(|x| {\n                let urefs = x.iter().filter_map(Key::as_uref).cloned().collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            vec(any::<i32>(), 0..100)\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            vec(uref_arb(), 0..100).prop_map(|x| (\n                CLValue::from_t(x.clone()).expect(\"should create CLValue\"),\n                x\n            )),\n            vec(key_arb(), 0..100).prop_map(|x| (\n                CLValue::from_t(x.clone()).expect(\"should create CLValue\"),\n                x.into_iter().filter_map(Key::into_uref).collect()\n            )),\n            uniform32(any::<u8>())\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            result::maybe_err(key_arb(), \".*\").prop_map(|x| {\n                let urefs = match &x {\n                    Ok(key) => key.as_uref().into_iter().cloned().collect(),\n                    Err(_) => vec![],\n                };\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            result::maybe_ok(\".*\", uref_arb()).prop_map(|x| {\n                let urefs = match &x {\n                    Ok(_) => vec![],\n                    Err(uref) => vec![*uref],\n                };\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            btree_map(\".*\", u512_arb(), 0..100)\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            btree_map(uref_arb(), u512_arb(), 0..100).prop_map(|x| {\n                let urefs = x.keys().cloned().collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            btree_map(\".*\", uref_arb(), 0..100).prop_map(|x| {\n                let urefs = x.values().cloned().collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            btree_map(uref_arb(), key_arb(), 0..100).prop_map(|x| {\n                let urefs: Vec<URef> = x\n                    .clone()\n                    .into_iter()\n                    .flat_map(|(k, v)| {\n                        vec![Some(k), v.into_uref()]\n                            .into_iter()\n                            .flatten()\n                            .collect::<Vec<URef>>()\n                    })\n                    .collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            btree_map(key_arb(), uref_arb(), 0..100).prop_map(|x| {\n                let urefs: Vec<URef> = x\n                    .clone()\n                    .into_iter()\n                    .flat_map(|(k, v)| {\n                        vec![k.into_uref(), Some(v)]\n                            .into_iter()\n                            .flatten()\n                            .collect::<Vec<URef>>()\n                    })\n                    .collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            btree_map(key_arb(), key_arb(), 0..100).prop_map(|x| {\n                let urefs: Vec<URef> = x\n                    .clone()\n                    .into_iter()\n                    .flat_map(|(k, v)| {\n                        vec![k.into_uref(), v.into_uref()]\n                            .into_iter()\n                            .flatten()\n                            .collect::<Vec<URef>>()\n                    })\n                    .collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            (any::<bool>())\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            (uref_arb())\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![x])),\n            (any::<bool>(), any::<i32>())\n                .prop_map(|x| (CLValue::from_t(x).expect(\"should create CLValue\"), vec![])),\n            (uref_arb(), any::<i32>()).prop_map(|x| {\n                let uref = x.0;\n                (\n                    CLValue::from_t(x).expect(\"should create CLValue\"),\n                    vec![uref],\n                )\n            }),\n            (any::<i32>(), key_arb()).prop_map(|x| {\n                let urefs = x.1.as_uref().into_iter().cloned().collect();\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n            (uref_arb(), key_arb()).prop_map(|x| {\n                let mut urefs = vec![x.0];\n                urefs.extend(x.1.as_uref().into_iter().cloned());\n                (CLValue::from_t(x).expect(\"should create CLValue\"), urefs)\n            }),\n        ]\n    }\n\n    proptest! {\n        #[test]\n        fn should_extract_urefs((cl_value, urefs) in cl_value_with_urefs_arb()) {\n            let extracted_urefs = extract_urefs(&cl_value).unwrap();\n            prop_assert_eq!(extracted_urefs, urefs);\n        }\n    }\n\n    #[test]\n    fn extract_from_public_keys_to_urefs_map() {\n        let uref = URef::new([43; 32], AccessRights::READ_ADD_WRITE);\n        let mut map = BTreeMap::new();\n        map.insert(\n            PublicKey::from(\n                &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            uref,\n        );\n        let cl_value = CLValue::from_t(map).unwrap();\n        assert_eq!(extract_urefs(&cl_value).unwrap(), vec![uref]);\n    }\n\n    #[test]\n    fn extract_from_public_keys_to_uref_keys_map() {\n        let uref = URef::new([43; 32], AccessRights::READ_ADD_WRITE);\n        let key = Key::from(uref);\n        let mut map = BTreeMap::new();\n        map.insert(\n            PublicKey::from(\n                &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            key,\n        );\n        let cl_value = CLValue::from_t(map).unwrap();\n        assert_eq!(extract_urefs(&cl_value).unwrap(), vec![uref]);\n    }\n\n    #[test]\n    fn should_modify_urefs() {\n        let uref_1 = URef::new([1; 32], AccessRights::READ_ADD_WRITE);\n        let uref_2 = URef::new([2; 32], AccessRights::READ_ADD_WRITE);\n        let uref_3 = URef::new([3; 32], AccessRights::READ_ADD_WRITE);\n\n        let args = runtime_args! {\n            \"uref1\" => uref_1,\n            \"uref2\" => Some(uref_1),\n            \"uref3\" => vec![uref_2, uref_1, uref_3],\n            \"uref4\" => vec![Key::from(uref_3), Key::from(uref_2), Key::from(uref_1)],\n        };\n\n        let args = attenuate_uref_in_args(args, uref_1.addr(), AccessRights::WRITE).unwrap();\n\n        let arg = args.get(\"uref1\").unwrap().clone();\n        let lhs = arg.into_t::<URef>().unwrap();\n        let rhs = uref_1.with_access_rights(AccessRights::READ_ADD);\n        assert_eq!(lhs, rhs);\n\n        let arg = args.get(\"uref2\").unwrap().clone();\n        let lhs = arg.into_t::<Option<URef>>().unwrap();\n        let rhs = uref_1.with_access_rights(AccessRights::READ_ADD);\n        assert_eq!(lhs, Some(rhs));\n\n        let arg = args.get(\"uref3\").unwrap().clone();\n        let lhs = arg.into_t::<Vec<URef>>().unwrap();\n        let rhs = vec![\n            uref_2.with_access_rights(AccessRights::READ_ADD_WRITE),\n            uref_1.with_access_rights(AccessRights::READ_ADD),\n            uref_3.with_access_rights(AccessRights::READ_ADD_WRITE),\n        ];\n        assert_eq!(lhs, rhs);\n\n        let arg = args.get(\"uref4\").unwrap().clone();\n        let lhs = arg.into_t::<Vec<Key>>().unwrap();\n        let rhs = vec![\n            Key::from(uref_3.with_access_rights(AccessRights::READ_ADD_WRITE)),\n            Key::from(uref_2.with_access_rights(AccessRights::READ_ADD_WRITE)),\n            Key::from(uref_1.with_access_rights(AccessRights::READ_ADD)),\n        ];\n        assert_eq!(lhs, rhs);\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime/wasm_prep.rs",
    "content": "//! Preprocessing of Wasm modules.\nuse std::{convert::TryInto, num::NonZeroU32};\n\nuse thiserror::Error;\n\nuse casper_types::{OpcodeCosts, WasmConfig};\nuse casper_wasm::elements::{\n    self, External, Instruction, Internal, MemorySection, Module, Section, SignExtInstruction,\n    TableType, Type,\n};\nuse casper_wasm_utils::{\n    self,\n    rules::{MemoryGrowCost, Rules},\n    stack_height,\n};\n\nuse crate::execution::ExecError;\n\nconst ATOMIC_OPCODE_PREFIX: u8 = 0xfe;\nconst BULK_OPCODE_PREFIX: u8 = 0xfc;\nconst SIMD_OPCODE_PREFIX: u8 = 0xfd;\n\nconst DEFAULT_GAS_MODULE_NAME: &str = \"env\";\n/// Name of the internal gas function injected by [`casper_wasm_utils::inject_gas_counter`].\nconst INTERNAL_GAS_FUNCTION_NAME: &str = \"gas\";\n\n/// We only allow maximum of 4k function pointers in a table section.\npub const DEFAULT_MAX_TABLE_SIZE: u32 = 4096;\n/// Maximum number of elements that can appear as immediate value to the br_table instruction.\npub const DEFAULT_BR_TABLE_MAX_SIZE: u32 = 256;\n/// Maximum number of global a module is allowed to declare.\npub const DEFAULT_MAX_GLOBALS: u32 = 256;\n/// Maximum number of parameters a function can have.\npub const DEFAULT_MAX_PARAMETER_COUNT: u32 = 256;\n\n/// An error emitted by the Wasm preprocessor.\n#[derive(Debug, Clone, Error)]\n#[non_exhaustive]\npub enum WasmValidationError {\n    /// Initial table size outside allowed bounds.\n    #[error(\"initial table size of {actual} exceeds allowed limit of {max}\")]\n    InitialTableSizeExceeded {\n        /// Allowed maximum table size.\n        max: u32,\n        /// Actual initial table size specified in the Wasm.\n        actual: u32,\n    },\n    /// Maximum table size outside allowed bounds.\n    #[error(\"maximum table size of {actual} exceeds allowed limit of {max}\")]\n    MaxTableSizeExceeded {\n        /// Allowed maximum table size.\n        max: u32,\n        /// Actual max table size specified in the Wasm.\n        actual: u32,\n    },\n    /// Number of the tables in a Wasm must be at most one.\n    #[error(\"the number of tables must be at most one\")]\n    MoreThanOneTable,\n    /// Length of a br_table exceeded the maximum allowed size.\n    #[error(\"maximum br_table size of {actual} exceeds allowed limit of {max}\")]\n    BrTableSizeExceeded {\n        /// Maximum allowed br_table length.\n        max: u32,\n        /// Actual size of a br_table in the code.\n        actual: usize,\n    },\n    /// Declared number of globals exceeds allowed limit.\n    #[error(\"declared number of globals ({actual}) exceeds allowed limit of {max}\")]\n    TooManyGlobals {\n        /// Maximum allowed globals.\n        max: u32,\n        /// Actual number of globals declared in the Wasm.\n        actual: usize,\n    },\n    /// Module declares a function type with too many parameters.\n    #[error(\"use of a function type with too many parameters (limit of {max} but function declares {actual})\")]\n    TooManyParameters {\n        /// Maximum allowed parameters.\n        max: u32,\n        /// Actual number of parameters a function has in the Wasm.\n        actual: usize,\n    },\n    /// Module tries to import a function that the host does not provide.\n    #[error(\"module imports a non-existent function\")]\n    MissingHostFunction,\n    /// Opcode for a global access refers to a non-existing global\n    #[error(\"opcode for a global access refers to non-existing global index {index}\")]\n    IncorrectGlobalOperation {\n        /// Provided index.\n        index: u32,\n    },\n    /// Missing function index.\n    #[error(\"missing function index {index}\")]\n    MissingFunctionIndex {\n        /// Provided index.\n        index: u32,\n    },\n    /// Missing function type.\n    #[error(\"missing type index {index}\")]\n    MissingFunctionType {\n        /// Provided index.\n        index: u32,\n    },\n}\n\n/// An error emitted by the Wasm preprocessor.\n#[derive(Debug, Clone, Error)]\n#[non_exhaustive]\npub enum PreprocessingError {\n    /// Unable to deserialize Wasm bytes.\n    #[error(\"Deserialization error: {0}\")]\n    Deserialize(String),\n    /// Found opcodes forbidden by gas rules.\n    #[error(\n        \"Encountered operation forbidden by gas rules. Consult instruction -> metering config map\"\n    )]\n    OperationForbiddenByGasRules,\n    /// Stack limiter was unable to instrument the binary.\n    #[error(\"Stack limiter error\")]\n    StackLimiter,\n    /// Wasm bytes is missing memory section.\n    #[error(\"Memory section should exist\")]\n    MissingMemorySection,\n    /// The module is missing.\n    #[error(\"Missing module\")]\n    MissingModule,\n    /// Unable to validate wasm bytes.\n    #[error(\"Wasm validation error: {0}\")]\n    WasmValidation(#[from] WasmValidationError),\n}\n\nimpl From<elements::Error> for PreprocessingError {\n    fn from(error: elements::Error) -> Self {\n        PreprocessingError::Deserialize(error.to_string())\n    }\n}\n\n/// Ensures that all the references to functions and global variables in the wasm bytecode are\n/// properly declared.\n///\n/// This validates that:\n///\n/// - Start function points to a function declared in the Wasm bytecode\n/// - All exported functions are pointing to functions declared in the Wasm bytecode\n/// - `call` instructions reference a function declared in the Wasm bytecode.\n/// - `global.set`, `global.get` instructions are referencing an existing global declared in the\n///   Wasm bytecode.\n/// - All members of the \"elem\" section point at functions declared in the Wasm bytecode.\nfn ensure_valid_access(module: &Module) -> Result<(), WasmValidationError> {\n    let function_types_count = module\n        .type_section()\n        .map(|ts| ts.types().len())\n        .unwrap_or_default();\n\n    let mut function_count = 0_u32;\n    if let Some(import_section) = module.import_section() {\n        for import_entry in import_section.entries() {\n            if let External::Function(function_type_index) = import_entry.external() {\n                if (*function_type_index as usize) < function_types_count {\n                    function_count = function_count.saturating_add(1);\n                } else {\n                    return Err(WasmValidationError::MissingFunctionType {\n                        index: *function_type_index,\n                    });\n                }\n            }\n        }\n    }\n    if let Some(function_section) = module.function_section() {\n        for function_entry in function_section.entries() {\n            let function_type_index = function_entry.type_ref();\n            if (function_type_index as usize) < function_types_count {\n                function_count = function_count.saturating_add(1);\n            } else {\n                return Err(WasmValidationError::MissingFunctionType {\n                    index: function_type_index,\n                });\n            }\n        }\n    }\n\n    if let Some(function_index) = module.start_section() {\n        ensure_valid_function_index(function_index, function_count)?;\n    }\n    if let Some(export_section) = module.export_section() {\n        for export_entry in export_section.entries() {\n            if let Internal::Function(function_index) = export_entry.internal() {\n                ensure_valid_function_index(*function_index, function_count)?;\n            }\n        }\n    }\n\n    if let Some(code_section) = module.code_section() {\n        let global_len = module\n            .global_section()\n            .map(|global_section| global_section.entries().len())\n            .unwrap_or(0);\n\n        for instr in code_section\n            .bodies()\n            .iter()\n            .flat_map(|body| body.code().elements())\n        {\n            match instr {\n                Instruction::Call(idx) => {\n                    ensure_valid_function_index(*idx, function_count)?;\n                }\n                Instruction::GetGlobal(idx) | Instruction::SetGlobal(idx)\n                    if *idx as usize >= global_len =>\n                {\n                    return Err(WasmValidationError::IncorrectGlobalOperation { index: *idx });\n                }\n                _ => {}\n            }\n        }\n    }\n\n    if let Some(element_section) = module.elements_section() {\n        for element_segment in element_section.entries() {\n            for idx in element_segment.members() {\n                ensure_valid_function_index(*idx, function_count)?;\n            }\n        }\n    }\n\n    Ok(())\n}\n\nfn ensure_valid_function_index(index: u32, function_count: u32) -> Result<(), WasmValidationError> {\n    if index >= function_count {\n        return Err(WasmValidationError::MissingFunctionIndex { index });\n    }\n    Ok(())\n}\n\n/// Checks if given wasm module contains a non-empty memory section.\nfn memory_section(module: &Module) -> Option<&MemorySection> {\n    for section in module.sections() {\n        if let Section::Memory(section) = section {\n            return if section.entries().is_empty() {\n                None\n            } else {\n                Some(section)\n            };\n        }\n    }\n    None\n}\n\n/// Ensures (table) section has at most one table entry, and initial, and maximum values are\n/// normalized.\n///\n/// If a maximum value is not specified it will be defaulted to 4k to prevent OOM.\nfn ensure_table_size_limit(mut module: Module, limit: u32) -> Result<Module, WasmValidationError> {\n    if let Some(sect) = module.table_section_mut() {\n        // Table section is optional and there can be at most one.\n        if sect.entries().len() > 1 {\n            return Err(WasmValidationError::MoreThanOneTable);\n        }\n\n        if let Some(table_entry) = sect.entries_mut().first_mut() {\n            let initial = table_entry.limits().initial();\n            if initial > limit {\n                return Err(WasmValidationError::InitialTableSizeExceeded {\n                    max: limit,\n                    actual: initial,\n                });\n            }\n\n            match table_entry.limits().maximum() {\n                Some(max) => {\n                    if max > limit {\n                        return Err(WasmValidationError::MaxTableSizeExceeded {\n                            max: limit,\n                            actual: max,\n                        });\n                    }\n                }\n                None => {\n                    // rewrite wasm and provide a maximum limit for a table section\n                    *table_entry = TableType::new(initial, Some(limit))\n                }\n            }\n        }\n    }\n\n    Ok(module)\n}\n\n/// Ensure that any `br_table` instruction adheres to its immediate value limit.\nfn ensure_br_table_size_limit(module: &Module, limit: u32) -> Result<(), WasmValidationError> {\n    let code_section = if let Some(type_section) = module.code_section() {\n        type_section\n    } else {\n        return Ok(());\n    };\n    for instr in code_section\n        .bodies()\n        .iter()\n        .flat_map(|body| body.code().elements())\n    {\n        if let Instruction::BrTable(br_table_data) = instr {\n            if br_table_data.table.len() > limit as usize {\n                return Err(WasmValidationError::BrTableSizeExceeded {\n                    max: limit,\n                    actual: br_table_data.table.len(),\n                });\n            }\n        }\n    }\n    Ok(())\n}\n\n/// Ensures that module doesn't declare too many globals.\n///\n/// Globals are not limited through the `stack_height` as locals are. Neither does\n/// the linear memory limit `memory_pages` applies to them.\nfn ensure_global_variable_limit(module: &Module, limit: u32) -> Result<(), WasmValidationError> {\n    if let Some(global_section) = module.global_section() {\n        let actual = global_section.entries().len();\n        if actual > limit as usize {\n            return Err(WasmValidationError::TooManyGlobals { max: limit, actual });\n        }\n    }\n    Ok(())\n}\n\n/// Ensure maximum numbers of parameters a function can have.\n///\n/// Those need to be limited to prevent a potentially exploitable interaction with\n/// the stack height instrumentation: The costs of executing the stack height\n/// instrumentation for an indirectly called function scales linearly with the amount\n/// of parameters of this function. Because the stack height instrumentation itself is\n/// is not weight metered its costs must be static (via this limit) and included in\n/// the costs of the instructions that cause them (call, call_indirect).\nfn ensure_parameter_limit(module: &Module, limit: u32) -> Result<(), WasmValidationError> {\n    let type_section = if let Some(type_section) = module.type_section() {\n        type_section\n    } else {\n        return Ok(());\n    };\n\n    for Type::Function(func) in type_section.types() {\n        let actual = func.params().len();\n        if actual > limit as usize {\n            return Err(WasmValidationError::TooManyParameters { max: limit, actual });\n        }\n    }\n\n    Ok(())\n}\n\n/// Ensures that Wasm module has valid imports.\nfn ensure_valid_imports(module: &Module) -> Result<(), WasmValidationError> {\n    let import_entries = module\n        .import_section()\n        .map(|is| is.entries())\n        .unwrap_or(&[]);\n\n    // Gas counter is currently considered an implementation detail.\n    //\n    // If a wasm module tries to import it will be rejected.\n\n    for import in import_entries {\n        if import.module() == DEFAULT_GAS_MODULE_NAME\n            && import.field() == INTERNAL_GAS_FUNCTION_NAME\n        {\n            return Err(WasmValidationError::MissingHostFunction);\n        }\n    }\n\n    Ok(())\n}\n\n/// Preprocesses Wasm bytes and returns a module.\n///\n/// This process consists of a few steps:\n/// - Validate that the given bytes contain a memory section, and check the memory page limit.\n/// - Inject gas counters into the code, which makes it possible for the executed Wasm to be charged\n///   for opcodes; this also validates opcodes and ensures that there are no forbidden opcodes in\n///   use, such as floating point opcodes.\n/// - Ensure that the code has a maximum stack height.\n///\n/// In case the preprocessing rules can't be applied, an error is returned.\n/// Otherwise, this method returns a valid module ready to be executed safely on the host.\npub fn preprocess(\n    wasm_config: WasmConfig,\n    module_bytes: &[u8],\n) -> Result<Module, PreprocessingError> {\n    let module = deserialize(module_bytes)?;\n\n    ensure_valid_access(&module)?;\n\n    if memory_section(&module).is_none() {\n        // `casper_wasm_utils::externalize_mem` expects a non-empty memory section to exist in the\n        // module, and panics otherwise.\n        return Err(PreprocessingError::MissingMemorySection);\n    }\n\n    let module = ensure_table_size_limit(module, DEFAULT_MAX_TABLE_SIZE)?;\n    ensure_br_table_size_limit(&module, DEFAULT_BR_TABLE_MAX_SIZE)?;\n    ensure_global_variable_limit(&module, DEFAULT_MAX_GLOBALS)?;\n    ensure_parameter_limit(&module, DEFAULT_MAX_PARAMETER_COUNT)?;\n    ensure_valid_imports(&module)?;\n\n    let costs = RuledOpcodeCosts(wasm_config.v1().opcode_costs());\n    let module = casper_wasm_utils::externalize_mem(module, None, wasm_config.v1().max_memory());\n    let module = casper_wasm_utils::inject_gas_counter(module, &costs, DEFAULT_GAS_MODULE_NAME)\n        .map_err(|_| PreprocessingError::OperationForbiddenByGasRules)?;\n    let module = stack_height::inject_limiter(module, wasm_config.v1().max_stack_height())\n        .map_err(|_| PreprocessingError::StackLimiter)?;\n    Ok(module)\n}\n\n/// Returns a parity Module from the given bytes without making modifications or checking limits.\npub fn deserialize(module_bytes: &[u8]) -> Result<Module, PreprocessingError> {\n    casper_wasm::deserialize_buffer::<Module>(module_bytes).map_err(|deserialize_error| {\n        match deserialize_error {\n            casper_wasm::SerializationError::UnknownOpcode(BULK_OPCODE_PREFIX) => {\n                PreprocessingError::Deserialize(\n                    \"Bulk memory operations are not supported\".to_string(),\n                )\n            }\n            casper_wasm::SerializationError::UnknownOpcode(SIMD_OPCODE_PREFIX) => {\n                PreprocessingError::Deserialize(\"SIMD operations are not supported\".to_string())\n            }\n            casper_wasm::SerializationError::UnknownOpcode(ATOMIC_OPCODE_PREFIX) => {\n                PreprocessingError::Deserialize(\"Atomic operations are not supported\".to_string())\n            }\n            casper_wasm::SerializationError::UnknownOpcode(_) => {\n                PreprocessingError::Deserialize(\"Encountered an unsupported operation\".to_string())\n            }\n            casper_wasm::SerializationError::Other(\n                \"Enable the multi_value feature to deserialize more than one function result\",\n            ) => {\n                // Due to the way casper-wasm crate works, it's always deserializes opcodes\n                // from multi_value proposal but if the feature is not enabled, then it will\n                // error with very specific message (as compared to other extensions).\n                //\n                // That's OK since we'd prefer to not inspect deserialized bytecode. We\n                // can simply replace the error message with a more user friendly one.\n                PreprocessingError::Deserialize(\n                    \"Multi value extension is not supported\".to_string(),\n                )\n            }\n            _ => deserialize_error.into(),\n        }\n    })\n}\n\n/// Creates new wasm module from entry points.\npub fn get_module_from_entry_points(\n    entry_point_names: Vec<&str>,\n    mut module: Module,\n) -> Result<Vec<u8>, ExecError> {\n    let export_section = module\n        .export_section()\n        .ok_or_else(|| ExecError::FunctionNotFound(String::from(\"Missing Export Section\")))?;\n\n    let maybe_missing_name: Option<String> = entry_point_names\n        .iter()\n        .find(|name| {\n            !export_section\n                .entries()\n                .iter()\n                .any(|export_entry| export_entry.field() == **name)\n        })\n        .map(|s| String::from(*s));\n\n    match maybe_missing_name {\n        Some(missing_name) => Err(ExecError::FunctionNotFound(missing_name)),\n        None => {\n            casper_wasm_utils::optimize(&mut module, entry_point_names)?;\n            casper_wasm::serialize(module).map_err(ExecError::ParityWasm)\n        }\n    }\n}\n\n/// Returns the cost of executing a single instruction.\n///\n/// This is benchmarked on a reference hardware, and calculated based on the multiplies of the\n/// cheapest opcode (nop) in the given instruction.\n///\n/// For instance, nop will always have cycle cost of 1, and all other opcodes will have a multiple\n/// of that.\n///\n/// The number of cycles for each instruction correlates, but not directly, to the reference x86_64\n/// CPU cycles it takes to execute the instruction as the interpreter does extra work to invoke an\n/// instruction.\npub fn cycles_for_instruction(instruction: &Instruction) -> u32 {\n    match instruction {\n        // The following instructions signal the beginning of a block, loop, or if construct. They\n        // don't have any static cost. Validated in benchmarks.\n        Instruction::Loop(_) => 1,\n        Instruction::Block(_) => 1,\n        Instruction::Else => 1,\n        Instruction::End => 1,\n\n        Instruction::Unreachable => 1,\n        Instruction::Nop => 1,\n\n        Instruction::If(_) => 3,\n\n        // These instructions are resuming execution from previously saved location (produced by\n        // loop or block).\n        Instruction::Br(_) => 1,\n        Instruction::BrIf(_) => 3,\n        Instruction::BrTable(_) => 5,\n\n        Instruction::Return => 1,\n\n        // Call opcodes are charged for each of the opcode individually. Validated in benchmarks.\n        Instruction::Call(_) => 22,\n        Instruction::CallIndirect(_, _) => 27,\n\n        Instruction::Drop => 1,\n\n        // Select opcode is validated in benchmarks.\n        Instruction::Select => 11,\n\n        Instruction::GetLocal(_) | Instruction::SetLocal(_) | Instruction::TeeLocal(_) => 5,\n\n        Instruction::GetGlobal(_) => 7,\n        Instruction::SetGlobal(_) => 5,\n\n        Instruction::I64Load32S(_, _)\n        | Instruction::F32Load(_, _)\n        | Instruction::F64Load(_, _)\n        | Instruction::I32Load(_, _)\n        | Instruction::I64Load(_, _)\n        | Instruction::I32Load8S(_, _)\n        | Instruction::I64Load32U(_, _)\n        | Instruction::I64Load8U(_, _)\n        | Instruction::I64Load8S(_, _)\n        | Instruction::I32Load8U(_, _)\n        | Instruction::I64Load16U(_, _)\n        | Instruction::I32Load16U(_, _)\n        | Instruction::I64Load16S(_, _)\n        | Instruction::I32Load16S(_, _) => 8,\n\n        Instruction::I32Store(_, _)\n        | Instruction::I64Store(_, _)\n        | Instruction::F32Store(_, _)\n        | Instruction::F64Store(_, _)\n        | Instruction::I32Store8(_, _)\n        | Instruction::I32Store16(_, _)\n        | Instruction::I64Store8(_, _)\n        | Instruction::I64Store16(_, _)\n        | Instruction::I64Store32(_, _) => 4,\n\n        Instruction::CurrentMemory(_) => 5,\n        Instruction::GrowMemory(_) => 5,\n\n        Instruction::I32Const(_)\n        | Instruction::I64Const(_)\n        | Instruction::F32Const(_)\n        | Instruction::F64Const(_) => 5,\n\n        Instruction::I32Eqz\n        | Instruction::I32Eq\n        | Instruction::I32Ne\n        | Instruction::I32LtS\n        | Instruction::I32LtU\n        | Instruction::I32GtS\n        | Instruction::I32GtU\n        | Instruction::I32LeS\n        | Instruction::I32LeU\n        | Instruction::I32GeS\n        | Instruction::I32GeU\n        | Instruction::I64Eqz\n        | Instruction::I64Eq\n        | Instruction::I64Ne\n        | Instruction::I64LtS\n        | Instruction::I64LtU\n        | Instruction::I64GtS\n        | Instruction::I64GtU\n        | Instruction::I64LeS\n        | Instruction::I64LeU\n        | Instruction::I64GeS\n        | Instruction::I64GeU => 5,\n\n        Instruction::F32Eq\n        | Instruction::F32Ne\n        | Instruction::F32Lt\n        | Instruction::F32Gt\n        | Instruction::F32Le\n        | Instruction::F32Ge\n        | Instruction::F64Eq\n        | Instruction::F64Ne\n        | Instruction::F64Lt\n        | Instruction::F64Gt\n        | Instruction::F64Le\n        | Instruction::F64Ge => 5,\n\n        Instruction::I32Clz | Instruction::I32Ctz | Instruction::I32Popcnt => 5,\n\n        Instruction::I32Add | Instruction::I32Sub => 5,\n\n        Instruction::I32Mul => 5,\n\n        Instruction::I32DivS\n        | Instruction::I32DivU\n        | Instruction::I32RemS\n        | Instruction::I32RemU => 5,\n\n        Instruction::I32And\n        | Instruction::I32Or\n        | Instruction::I32Xor\n        | Instruction::I32Shl\n        | Instruction::I32ShrS\n        | Instruction::I32ShrU\n        | Instruction::I32Rotl\n        | Instruction::I32Rotr\n        | Instruction::I64Clz\n        | Instruction::I64Ctz\n        | Instruction::I64Popcnt => 5,\n\n        Instruction::I64Add | Instruction::I64Sub => 5,\n        Instruction::I64Mul => 5,\n\n        Instruction::I64DivS\n        | Instruction::I64DivU\n        | Instruction::I64RemS\n        | Instruction::I64RemU => 5,\n\n        Instruction::I64And\n        | Instruction::I64Or\n        | Instruction::I64Xor\n        | Instruction::I64Shl\n        | Instruction::I64ShrS\n        | Instruction::I64ShrU\n        | Instruction::I64Rotl\n        | Instruction::I64Rotr => 5,\n\n        Instruction::F32Abs\n        | Instruction::F32Neg\n        | Instruction::F32Ceil\n        | Instruction::F32Floor\n        | Instruction::F32Trunc\n        | Instruction::F32Nearest\n        | Instruction::F32Sqrt\n        | Instruction::F32Add\n        | Instruction::F32Sub\n        | Instruction::F32Mul\n        | Instruction::F32Div\n        | Instruction::F32Min\n        | Instruction::F32Max\n        | Instruction::F32Copysign\n        | Instruction::F64Abs\n        | Instruction::F64Neg\n        | Instruction::F64Ceil\n        | Instruction::F64Floor\n        | Instruction::F64Trunc\n        | Instruction::F64Nearest\n        | Instruction::F64Sqrt\n        | Instruction::F64Add\n        | Instruction::F64Sub\n        | Instruction::F64Mul\n        | Instruction::F64Div\n        | Instruction::F64Min\n        | Instruction::F64Max\n        | Instruction::F64Copysign => 5,\n\n        Instruction::I32WrapI64 | Instruction::I64ExtendSI32 | Instruction::I64ExtendUI32 => 5,\n\n        Instruction::F32ConvertSI32\n        | Instruction::F32ConvertUI32\n        | Instruction::F32ConvertSI64\n        | Instruction::F32ConvertUI64\n        | Instruction::F32DemoteF64\n        | Instruction::F64ConvertSI32\n        | Instruction::F64ConvertUI32\n        | Instruction::F64ConvertSI64\n        | Instruction::F64ConvertUI64\n        | Instruction::F64PromoteF32 => 5,\n\n        // Unsupported reinterpretation operators for floats.\n        Instruction::I32ReinterpretF32\n        | Instruction::I64ReinterpretF64\n        | Instruction::F32ReinterpretI32\n        | Instruction::F64ReinterpretI64 => 5,\n\n        Instruction::SignExt(SignExtInstruction::I32Extend8S)\n        | Instruction::SignExt(SignExtInstruction::I32Extend16S)\n        | Instruction::SignExt(SignExtInstruction::I64Extend8S)\n        | Instruction::SignExt(SignExtInstruction::I64Extend16S)\n        | Instruction::SignExt(SignExtInstruction::I64Extend32S) => 5,\n\n        Instruction::I32TruncUF32 | Instruction::I64TruncSF32 => 40,\n\n        Instruction::I32TruncSF32 | Instruction::I64TruncUF32 => 42,\n\n        Instruction::I32TruncSF64\n        | Instruction::I32TruncUF64\n        | Instruction::I64TruncUF64\n        | Instruction::I64TruncSF64 => 195,\n    }\n}\n\nstruct RuledOpcodeCosts(OpcodeCosts);\n\nimpl RuledOpcodeCosts {\n    /// Returns the cost multiplier of executing a single instruction.\n    fn instruction_cost_multiplier(&self, instruction: &Instruction) -> Option<u32> {\n        let costs = self.0;\n\n        // Obtain the gas cost multiplier for the instruction.\n        match instruction {\n            Instruction::Unreachable => Some(costs.unreachable),\n            Instruction::Nop => Some(costs.nop),\n\n            // Control flow class of opcodes is charged for each of the opcode individually.\n            Instruction::Block(_) => Some(costs.control_flow.block),\n            Instruction::Loop(_) => Some(costs.control_flow.op_loop),\n            Instruction::If(_) => Some(costs.control_flow.op_if),\n            Instruction::Else => Some(costs.control_flow.op_else),\n            Instruction::End => Some(costs.control_flow.end),\n            Instruction::Br(_) => Some(costs.control_flow.br),\n            Instruction::BrIf(_) => Some(costs.control_flow.br_if),\n            Instruction::BrTable(br_table_data) => {\n                // If we're unable to fit table size in `u32` to measure the cost, then such wasm\n                // would be rejected. This is unlikely scenario as we impose a limit\n                // for the amount of targets a `br_table` opcode can contain.\n                let br_table_size: u32 = br_table_data.table.len().try_into().ok()?;\n\n                let br_table_cost = costs.control_flow.br_table.cost;\n\n                let table_size_part =\n                    br_table_size.checked_mul(costs.control_flow.br_table.size_multiplier)?;\n\n                let br_table_cost = br_table_cost.checked_add(table_size_part)?;\n                Some(br_table_cost)\n            }\n            Instruction::Return => Some(costs.control_flow.op_return),\n            Instruction::Call(_) => Some(costs.control_flow.call),\n            Instruction::CallIndirect(_, _) => Some(costs.control_flow.call_indirect),\n            Instruction::Drop => Some(costs.control_flow.drop),\n            Instruction::Select => Some(costs.control_flow.select),\n\n            Instruction::GetLocal(_) | Instruction::SetLocal(_) | Instruction::TeeLocal(_) => {\n                Some(costs.local)\n            }\n            Instruction::GetGlobal(_) | Instruction::SetGlobal(_) => Some(costs.global),\n\n            Instruction::I32Load(_, _)\n            | Instruction::I64Load(_, _)\n            | Instruction::F32Load(_, _)\n            | Instruction::F64Load(_, _)\n            | Instruction::I32Load8S(_, _)\n            | Instruction::I32Load8U(_, _)\n            | Instruction::I32Load16S(_, _)\n            | Instruction::I32Load16U(_, _)\n            | Instruction::I64Load8S(_, _)\n            | Instruction::I64Load8U(_, _)\n            | Instruction::I64Load16S(_, _)\n            | Instruction::I64Load16U(_, _)\n            | Instruction::I64Load32S(_, _)\n            | Instruction::I64Load32U(_, _) => Some(costs.load),\n\n            Instruction::I32Store(_, _)\n            | Instruction::I64Store(_, _)\n            | Instruction::F32Store(_, _)\n            | Instruction::F64Store(_, _)\n            | Instruction::I32Store8(_, _)\n            | Instruction::I32Store16(_, _)\n            | Instruction::I64Store8(_, _)\n            | Instruction::I64Store16(_, _)\n            | Instruction::I64Store32(_, _) => Some(costs.store),\n\n            Instruction::CurrentMemory(_) => Some(costs.current_memory),\n            Instruction::GrowMemory(_) => Some(costs.grow_memory),\n\n            Instruction::I32Const(_) | Instruction::I64Const(_) => Some(costs.op_const),\n\n            Instruction::F32Const(_) | Instruction::F64Const(_) => None, // float_const\n\n            Instruction::I32Eqz\n            | Instruction::I32Eq\n            | Instruction::I32Ne\n            | Instruction::I32LtS\n            | Instruction::I32LtU\n            | Instruction::I32GtS\n            | Instruction::I32GtU\n            | Instruction::I32LeS\n            | Instruction::I32LeU\n            | Instruction::I32GeS\n            | Instruction::I32GeU\n            | Instruction::I64Eqz\n            | Instruction::I64Eq\n            | Instruction::I64Ne\n            | Instruction::I64LtS\n            | Instruction::I64LtU\n            | Instruction::I64GtS\n            | Instruction::I64GtU\n            | Instruction::I64LeS\n            | Instruction::I64LeU\n            | Instruction::I64GeS\n            | Instruction::I64GeU => Some(costs.integer_comparison),\n\n            Instruction::F32Eq\n            | Instruction::F32Ne\n            | Instruction::F32Lt\n            | Instruction::F32Gt\n            | Instruction::F32Le\n            | Instruction::F32Ge\n            | Instruction::F64Eq\n            | Instruction::F64Ne\n            | Instruction::F64Lt\n            | Instruction::F64Gt\n            | Instruction::F64Le\n            | Instruction::F64Ge => None, // Unsupported comparison operators for floats.\n\n            Instruction::I32Clz | Instruction::I32Ctz | Instruction::I32Popcnt => Some(costs.bit),\n\n            Instruction::I32Add | Instruction::I32Sub => Some(costs.add),\n\n            Instruction::I32Mul => Some(costs.mul),\n\n            Instruction::I32DivS\n            | Instruction::I32DivU\n            | Instruction::I32RemS\n            | Instruction::I32RemU => Some(costs.div),\n\n            Instruction::I32And\n            | Instruction::I32Or\n            | Instruction::I32Xor\n            | Instruction::I32Shl\n            | Instruction::I32ShrS\n            | Instruction::I32ShrU\n            | Instruction::I32Rotl\n            | Instruction::I32Rotr\n            | Instruction::I64Clz\n            | Instruction::I64Ctz\n            | Instruction::I64Popcnt => Some(costs.bit),\n\n            Instruction::I64Add | Instruction::I64Sub => Some(costs.add),\n            Instruction::I64Mul => Some(costs.mul),\n\n            Instruction::I64DivS\n            | Instruction::I64DivU\n            | Instruction::I64RemS\n            | Instruction::I64RemU => Some(costs.div),\n\n            Instruction::I64And\n            | Instruction::I64Or\n            | Instruction::I64Xor\n            | Instruction::I64Shl\n            | Instruction::I64ShrS\n            | Instruction::I64ShrU\n            | Instruction::I64Rotl\n            | Instruction::I64Rotr => Some(costs.bit),\n\n            Instruction::F32Abs\n            | Instruction::F32Neg\n            | Instruction::F32Ceil\n            | Instruction::F32Floor\n            | Instruction::F32Trunc\n            | Instruction::F32Nearest\n            | Instruction::F32Sqrt\n            | Instruction::F32Add\n            | Instruction::F32Sub\n            | Instruction::F32Mul\n            | Instruction::F32Div\n            | Instruction::F32Min\n            | Instruction::F32Max\n            | Instruction::F32Copysign\n            | Instruction::F64Abs\n            | Instruction::F64Neg\n            | Instruction::F64Ceil\n            | Instruction::F64Floor\n            | Instruction::F64Trunc\n            | Instruction::F64Nearest\n            | Instruction::F64Sqrt\n            | Instruction::F64Add\n            | Instruction::F64Sub\n            | Instruction::F64Mul\n            | Instruction::F64Div\n            | Instruction::F64Min\n            | Instruction::F64Max\n            | Instruction::F64Copysign => None, // Unsupported math operators for floats.\n\n            Instruction::I32WrapI64 | Instruction::I64ExtendSI32 | Instruction::I64ExtendUI32 => {\n                Some(costs.conversion)\n            }\n\n            Instruction::I32TruncSF32\n            | Instruction::I32TruncUF32\n            | Instruction::I32TruncSF64\n            | Instruction::I32TruncUF64\n            | Instruction::I64TruncSF32\n            | Instruction::I64TruncUF32\n            | Instruction::I64TruncSF64\n            | Instruction::I64TruncUF64\n            | Instruction::F32ConvertSI32\n            | Instruction::F32ConvertUI32\n            | Instruction::F32ConvertSI64\n            | Instruction::F32ConvertUI64\n            | Instruction::F32DemoteF64\n            | Instruction::F64ConvertSI32\n            | Instruction::F64ConvertUI32\n            | Instruction::F64ConvertSI64\n            | Instruction::F64ConvertUI64\n            | Instruction::F64PromoteF32 => None, // Unsupported conversion operators for floats.\n\n            // Unsupported reinterpretation operators for floats.\n            Instruction::I32ReinterpretF32\n            | Instruction::I64ReinterpretF64\n            | Instruction::F32ReinterpretI32\n            | Instruction::F64ReinterpretI64 => None,\n\n            Instruction::SignExt(_) => Some(costs.sign),\n        }\n    }\n}\n\nimpl Rules for RuledOpcodeCosts {\n    fn instruction_cost(&self, instruction: &Instruction) -> Option<u32> {\n        // The number of cycles for each instruction correlates, but not directly, to the reference\n        // x86_64 CPU cycles.\n        let cycles = cycles_for_instruction(instruction);\n\n        // The cost of executing an instruction is the number of cycles times the cost of a nop.\n        let multiplier = self.instruction_cost_multiplier(instruction)?;\n\n        cycles.checked_mul(multiplier)\n    }\n\n    fn memory_grow_cost(&self) -> Option<MemoryGrowCost> {\n        NonZeroU32::new(self.0.grow_memory).map(MemoryGrowCost::Linear)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::addressable_entity::DEFAULT_ENTRY_POINT_NAME;\n    use casper_wasm::{\n        builder,\n        elements::{CodeSection, Instructions},\n    };\n    use walrus::{FunctionBuilder, ModuleConfig, ValType};\n\n    use super::*;\n\n    #[test]\n    fn should_not_panic_on_empty_memory() {\n        // These bytes were generated during fuzz testing and are compiled from Wasm which\n        // deserializes to a `Module` with a memory section containing no entries.\n        const MODULE_BYTES_WITH_EMPTY_MEMORY: [u8; 61] = [\n            0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x09, 0x02, 0x60, 0x01, 0x7f,\n            0x01, 0x7f, 0x60, 0x00, 0x00, 0x03, 0x03, 0x02, 0x00, 0x01, 0x05, 0x01, 0x00, 0x08,\n            0x01, 0x01, 0x0a, 0x1d, 0x02, 0x18, 0x00, 0x20, 0x00, 0x41, 0x80, 0x80, 0x82, 0x80,\n            0x78, 0x70, 0x41, 0x80, 0x82, 0x80, 0x80, 0x7e, 0x4f, 0x22, 0x00, 0x1a, 0x20, 0x00,\n            0x0f, 0x0b, 0x02, 0x00, 0x0b,\n        ];\n\n        match preprocess(WasmConfig::default(), &MODULE_BYTES_WITH_EMPTY_MEMORY).unwrap_err() {\n            PreprocessingError::MissingMemorySection => (),\n            error => panic!(\"expected MissingMemorySection, got {:?}\", error),\n        }\n    }\n\n    #[test]\n    fn should_not_overflow_in_export_section() {\n        let module = builder::module()\n            .function()\n            .signature()\n            .build()\n            .body()\n            .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End]))\n            .build()\n            .build()\n            .export()\n            .field(DEFAULT_ENTRY_POINT_NAME)\n            .internal()\n            .func(u32::MAX)\n            .build()\n            // Memory section is mandatory\n            .memory()\n            .build()\n            .build();\n        let module_bytes = casper_wasm::serialize(module).expect(\"should serialize\");\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(\n                &error,\n                PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index })\n                if *missing_index == u32::MAX\n            ),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_overflow_in_element_section() {\n        const CALL_FN_IDX: u32 = 0;\n\n        let module = builder::module()\n            .function()\n            .signature()\n            .build()\n            .body()\n            .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End]))\n            .build()\n            .build()\n            // Export above function\n            .export()\n            .field(DEFAULT_ENTRY_POINT_NAME)\n            .internal()\n            .func(CALL_FN_IDX)\n            .build()\n            .table()\n            .with_element(u32::MAX, vec![u32::MAX])\n            .build()\n            // Memory section is mandatory\n            .memory()\n            .build()\n            .build();\n        let module_bytes = casper_wasm::serialize(module).expect(\"should serialize\");\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(\n                &error,\n                PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index })\n                if *missing_index == u32::MAX\n            ),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_overflow_in_call_opcode() {\n        let module = builder::module()\n            .function()\n            .signature()\n            .build()\n            .body()\n            .with_instructions(Instructions::new(vec![\n                Instruction::Call(u32::MAX),\n                Instruction::End,\n            ]))\n            .build()\n            .build()\n            // Export above function\n            .export()\n            .field(DEFAULT_ENTRY_POINT_NAME)\n            .build()\n            // .with_sections(vec![Section::Start(u32::MAX)])\n            // Memory section is mandatory\n            .memory()\n            .build()\n            .build();\n        let module_bytes = casper_wasm::serialize(module).expect(\"should serialize\");\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(\n                &error,\n                PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index })\n                if *missing_index == u32::MAX\n            ),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_overflow_in_start_section_without_code_section() {\n        let module = builder::module()\n            .with_section(Section::Start(u32::MAX))\n            .memory()\n            .build()\n            .build();\n        let module_bytes = casper_wasm::serialize(module).expect(\"should serialize\");\n\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(\n                &error,\n                PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index })\n                if *missing_index == u32::MAX\n            ),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_overflow_in_start_section_with_code() {\n        let module = builder::module()\n            .with_section(Section::Start(u32::MAX))\n            .with_section(Section::Code(CodeSection::with_bodies(Vec::new())))\n            .memory()\n            .build()\n            .build();\n        let module_bytes = casper_wasm::serialize(module).expect(\"should serialize\");\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(\n                &error,\n                PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index })\n                if *missing_index == u32::MAX\n            ),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_accept_multi_value_proposal_wasm() {\n        let module_bytes = {\n            let mut module = walrus::Module::with_config(ModuleConfig::new());\n\n            let _memory_id = module.memories.add_local(false, 11, None);\n\n            let mut func_with_locals =\n                FunctionBuilder::new(&mut module.types, &[], &[ValType::I32, ValType::I64]);\n\n            func_with_locals.func_body().i64_const(0).i32_const(1);\n\n            let func_with_locals = func_with_locals.finish(vec![], &mut module.funcs);\n\n            let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            call_func.func_body().call(func_with_locals);\n\n            let call = call_func.finish(Vec::new(), &mut module.funcs);\n\n            module.exports.add(DEFAULT_ENTRY_POINT_NAME, call);\n\n            module.emit_wasm()\n        };\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(&error, PreprocessingError::Deserialize(msg)\n            if msg == \"Multi value extension is not supported\"),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_accept_atomics_proposal_wasm() {\n        let module_bytes = {\n            let mut module = walrus::Module::with_config(ModuleConfig::new());\n\n            let _memory_id = module.memories.add_local(false, 11, None);\n\n            let mut func_with_atomics = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            func_with_atomics.func_body().atomic_fence();\n\n            let func_with_atomics = func_with_atomics.finish(vec![], &mut module.funcs);\n\n            let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            call_func.func_body().call(func_with_atomics);\n\n            let call = call_func.finish(Vec::new(), &mut module.funcs);\n\n            module.exports.add(DEFAULT_ENTRY_POINT_NAME, call);\n\n            module.emit_wasm()\n        };\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(&error, PreprocessingError::Deserialize(msg)\n            if msg == \"Atomic operations are not supported\"),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_accept_bulk_proposal_wasm() {\n        let module_bytes = {\n            let mut module = walrus::Module::with_config(ModuleConfig::new());\n\n            let memory_id = module.memories.add_local(false, 11, None);\n\n            let mut func_with_bulk = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            func_with_bulk.func_body().memory_copy(memory_id, memory_id);\n\n            let func_with_bulk = func_with_bulk.finish(vec![], &mut module.funcs);\n\n            let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            call_func.func_body().call(func_with_bulk);\n\n            let call = call_func.finish(Vec::new(), &mut module.funcs);\n\n            module.exports.add(DEFAULT_ENTRY_POINT_NAME, call);\n\n            module.emit_wasm()\n        };\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(&error, PreprocessingError::Deserialize(msg)\n            if msg == \"Bulk memory operations are not supported\"),\n            \"{:?}\",\n            error,\n        );\n    }\n\n    #[test]\n    fn should_not_accept_simd_proposal_wasm() {\n        let module_bytes = {\n            let mut module = walrus::Module::with_config(ModuleConfig::new());\n\n            let _memory_id = module.memories.add_local(false, 11, None);\n\n            let mut func_with_simd = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            func_with_simd.func_body().v128_bitselect();\n\n            let func_with_simd = func_with_simd.finish(vec![], &mut module.funcs);\n\n            let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n            call_func.func_body().call(func_with_simd);\n\n            let call = call_func.finish(Vec::new(), &mut module.funcs);\n\n            module.exports.add(DEFAULT_ENTRY_POINT_NAME, call);\n\n            module.emit_wasm()\n        };\n        let error = preprocess(WasmConfig::default(), &module_bytes)\n            .expect_err(\"should fail with an error\");\n        assert!(\n            matches!(&error, PreprocessingError::Deserialize(msg)\n            if msg == \"SIMD operations are not supported\"),\n            \"{:?}\",\n            error,\n        );\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime_context/mod.rs",
    "content": "//! The context of execution of WASM code.\n\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    cell::RefCell,\n    collections::BTreeSet,\n    convert::{TryFrom, TryInto},\n    fmt::Debug,\n    rc::Rc,\n};\n\nuse tracing::error;\n\nuse casper_storage::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    tracking_copy::{\n        AddResult, TrackingCopy, TrackingCopyCache, TrackingCopyEntityExt, TrackingCopyError,\n        TrackingCopyExt,\n    },\n    AddressGenerator,\n};\n\nuse casper_types::{\n    account::{\n        Account, AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure,\n        UpdateKeyFailure,\n    },\n    addressable_entity::{\n        ActionType, EntityKindTag, MessageTopicError, MessageTopics, NamedKeyAddr, NamedKeyValue,\n        Weight,\n    },\n    bytesrepr::ToBytes,\n    contract_messages::{Message, MessageAddr, MessageTopicSummary, Messages, TopicNameHash},\n    contracts::{ContractHash, ContractPackage, ContractPackageHash, NamedKeys},\n    execution::Effects,\n    handle_stored_dictionary_value,\n    system::auction::EraInfo,\n    AccessRights, AddressableEntity, AddressableEntityHash, BlockTime, CLType, CLValue,\n    CLValueDictionary, ContextAccessRights, Contract, EntityAddr, EntryPointAddr, EntryPointType,\n    EntryPointValue, EntryPoints, Gas, GrantedAccess, HashAddr, Key, KeyTag, Motes, Package,\n    PackageHash, Phase, ProtocolVersion, RuntimeArgs, RuntimeFootprint, StoredValue,\n    StoredValueTypeMismatch, SystemHashRegistry, TransactionHash, Transfer, URef, URefAddr,\n    DICTIONARY_ITEM_KEY_MAX_LENGTH, KEY_HASH_LENGTH, U512,\n};\n\nuse crate::{\n    engine_state::{BlockInfo, EngineConfig},\n    execution::ExecError,\n};\n\n/// Number of bytes returned from the `random_bytes` function.\npub const RANDOM_BYTES_COUNT: usize = 32;\n\n/// Whether the execution is permitted to call FFI `casper_add_contract_version()` or not.\n#[derive(Copy, Clone, PartialEq, Eq, Debug)]\npub enum AllowInstallUpgrade {\n    /// Allowed.\n    Allowed,\n    /// Forbidden.\n    Forbidden,\n}\n\n/// Holds information specific to the deployed contract.\npub struct RuntimeContext<'a, R> {\n    tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n    // Enables look up of specific uref based on human-readable name\n    named_keys: &'a mut NamedKeys,\n    // Used to check uref is known before use (prevents forging urefs)\n    access_rights: ContextAccessRights,\n    args: RuntimeArgs,\n    authorization_keys: BTreeSet<AccountHash>,\n    block_info: BlockInfo,\n    transaction_hash: TransactionHash,\n    gas_limit: Gas,\n    gas_counter: Gas,\n    address_generator: Rc<RefCell<AddressGenerator>>,\n    phase: Phase,\n    engine_config: EngineConfig,\n    entry_point_type: EntryPointType,\n    transfers: Vec<Transfer>,\n    remaining_spending_limit: U512,\n\n    // Original account/contract for read only tasks taken before execution\n    runtime_footprint: Rc<RefCell<RuntimeFootprint>>,\n    // Key pointing to the account / contract / entity context this instance is tied to\n    context_key: Key,\n    account_hash: AccountHash,\n    emit_message_cost: U512,\n    allow_install_upgrade: AllowInstallUpgrade,\n    payment_purse: Option<URef>,\n}\n\nimpl<'a, R> RuntimeContext<'a, R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    /// Creates new runtime context where we don't already have one.\n    ///\n    /// Where we already have a runtime context, consider using `new_from_self()`.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        named_keys: &'a mut NamedKeys,\n        runtime_footprint: Rc<RefCell<RuntimeFootprint>>,\n        context_key: Key,\n        authorization_keys: BTreeSet<AccountHash>,\n        access_rights: ContextAccessRights,\n        account_hash: AccountHash,\n        address_generator: Rc<RefCell<AddressGenerator>>,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n        engine_config: EngineConfig,\n        block_info: BlockInfo,\n        transaction_hash: TransactionHash,\n        phase: Phase,\n        args: RuntimeArgs,\n        gas_limit: Gas,\n        gas_counter: Gas,\n        transfers: Vec<Transfer>,\n        remaining_spending_limit: U512,\n        entry_point_type: EntryPointType,\n        allow_install_upgrade: AllowInstallUpgrade,\n    ) -> Self {\n        let emit_message_cost = (*engine_config.wasm_config().v1())\n            .take_host_function_costs()\n            .emit_message\n            .cost()\n            .into();\n        RuntimeContext {\n            tracking_copy,\n            entry_point_type,\n            named_keys,\n            access_rights,\n            args,\n            runtime_footprint,\n            context_key,\n            authorization_keys,\n            account_hash,\n            block_info,\n            transaction_hash,\n            gas_limit,\n            gas_counter,\n            address_generator,\n            phase,\n            engine_config,\n            transfers,\n            remaining_spending_limit,\n            emit_message_cost,\n            allow_install_upgrade,\n            payment_purse: None,\n        }\n    }\n\n    /// Creates new runtime context cloning values from self.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new_from_self(\n        &self,\n        context_key: Key,\n        entry_point_type: EntryPointType,\n        named_keys: &'a mut NamedKeys,\n        access_rights: ContextAccessRights,\n        runtime_args: RuntimeArgs,\n    ) -> Self {\n        let runtime_footprint = self.runtime_footprint.clone();\n        let authorization_keys = self.authorization_keys.clone();\n        let account_hash = self.account_hash;\n\n        let address_generator = self.address_generator.clone();\n        let tracking_copy = self.state();\n        let engine_config = self.engine_config.clone();\n\n        let block_info = self.block_info;\n        let transaction_hash = self.transaction_hash;\n        let phase = self.phase;\n\n        let gas_limit = self.gas_limit;\n        let gas_counter = self.gas_counter;\n        let remaining_spending_limit = self.remaining_spending_limit();\n\n        let transfers = self.transfers.clone();\n        let payment_purse = self.payment_purse;\n\n        RuntimeContext {\n            tracking_copy,\n            entry_point_type,\n            named_keys,\n            access_rights,\n            args: runtime_args,\n            runtime_footprint,\n            context_key,\n            authorization_keys,\n            account_hash,\n            block_info,\n            transaction_hash,\n            gas_limit,\n            gas_counter,\n            address_generator,\n            phase,\n            engine_config,\n            transfers,\n            remaining_spending_limit,\n            emit_message_cost: self.emit_message_cost,\n            allow_install_upgrade: self.allow_install_upgrade,\n            payment_purse,\n        }\n    }\n\n    /// Returns all authorization keys for this deploy.\n    pub fn authorization_keys(&self) -> &BTreeSet<AccountHash> {\n        &self.authorization_keys\n    }\n\n    /// Returns a named key by a name if it exists.\n    pub fn named_keys_get(&self, name: &str) -> Option<&Key> {\n        self.named_keys.get(name)\n    }\n\n    /// Returns named keys.\n    pub fn named_keys(&self) -> &NamedKeys {\n        self.named_keys\n    }\n\n    /// Returns a mutable reference to named keys.\n    pub fn named_keys_mut(&mut self) -> &mut NamedKeys {\n        self.named_keys\n    }\n\n    /// Checks if named keys contains a key referenced by name.\n    pub fn named_keys_contains_key(&self, name: &str) -> bool {\n        self.named_keys.contains(name)\n    }\n\n    /// Returns the payment purse, if set.\n    pub fn maybe_payment_purse(&self) -> Option<URef> {\n        self.payment_purse\n    }\n\n    /// Sets the payment purse to the imputed uref.\n    pub fn set_payment_purse(&mut self, uref: URef) {\n        self.payment_purse = Some(uref);\n    }\n\n    /// Returns an instance of the engine config.\n    pub fn engine_config(&self) -> &EngineConfig {\n        &self.engine_config\n    }\n\n    /// Helper function to avoid duplication in `remove_uref`.\n    fn remove_key_from_contract(\n        &mut self,\n        key: Key,\n        mut contract: Contract,\n        name: &str,\n    ) -> Result<(), ExecError> {\n        if contract.remove_named_key(name).is_none() {\n            return Ok(());\n        }\n        self.metered_write_gs_unsafe(key, contract)?;\n        Ok(())\n    }\n\n    /// Helper function to avoid duplication in `remove_uref`.\n    fn remove_key_from_entity(&mut self, name: &str) -> Result<(), ExecError> {\n        let key = self.context_key;\n        match key {\n            Key::AddressableEntity(entity_addr) => {\n                let named_key =\n                    NamedKeyAddr::new_from_string(entity_addr, name.to_string())?.into();\n                if let Some(StoredValue::NamedKey(_)) = self.read_gs(&named_key)? {\n                    self.prune_gs_unsafe(named_key);\n                }\n            }\n            account_hash @ Key::Account(_) => {\n                let account: Account = {\n                    let mut account: Account = self.read_gs_typed(&account_hash)?;\n                    account.named_keys_mut().remove(name);\n                    account\n                };\n                self.named_keys.remove(name);\n                let account_value = self.account_to_validated_value(account)?;\n                self.metered_write_gs_unsafe(account_hash, account_value)?;\n            }\n            contract_uref @ Key::URef(_) => {\n                let contract: Contract = {\n                    let value: StoredValue = self\n                        .tracking_copy\n                        .borrow_mut()\n                        .read(&contract_uref)?\n                        .ok_or(ExecError::KeyNotFound(contract_uref))?;\n\n                    value.try_into().map_err(ExecError::TypeMismatch)?\n                };\n\n                self.named_keys.remove(name);\n                self.remove_key_from_contract(contract_uref, contract, name)?\n            }\n            contract_hash @ Key::Hash(_) => {\n                let contract: Contract = self.read_gs_typed(&contract_hash)?;\n                self.named_keys.remove(name);\n                self.remove_key_from_contract(contract_hash, contract, name)?\n            }\n            _ => return Err(ExecError::UnexpectedKeyVariant(key)),\n        }\n        Ok(())\n    }\n\n    /// Remove Key from the `named_keys` map of the current context.\n    /// It removes both from the ephemeral map (RuntimeContext::named_keys) but\n    /// also the to-be-persisted map (in the TrackingCopy/GlobalState).\n    pub fn remove_key(&mut self, name: &str) -> Result<(), ExecError> {\n        self.named_keys.remove(name);\n        self.remove_key_from_entity(name)\n    }\n\n    /// Returns block info.\n    pub fn get_block_info(&self) -> BlockInfo {\n        self.block_info\n    }\n\n    /// Returns the transaction hash.\n    pub fn get_transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Extends access rights with a new map.\n    pub fn access_rights_extend(&mut self, urefs: &[URef]) {\n        self.access_rights.extend(urefs);\n    }\n\n    /// Returns a mapping of access rights for each [`URef`]s address.\n    pub fn access_rights(&self) -> &ContextAccessRights {\n        &self.access_rights\n    }\n\n    /// Returns footprint of the caller.\n    pub fn runtime_footprint(&self) -> Rc<RefCell<RuntimeFootprint>> {\n        Rc::clone(&self.runtime_footprint)\n    }\n\n    /// Returns arguments.\n    pub fn args(&self) -> &RuntimeArgs {\n        &self.args\n    }\n\n    pub(crate) fn set_args(&mut self, args: RuntimeArgs) {\n        self.args = args\n    }\n\n    /// Returns new shared instance of an address generator.\n    pub fn address_generator(&self) -> Rc<RefCell<AddressGenerator>> {\n        Rc::clone(&self.address_generator)\n    }\n\n    /// Returns new shared instance of a tracking copy.\n    pub(super) fn state(&self) -> Rc<RefCell<TrackingCopy<R>>> {\n        Rc::clone(&self.tracking_copy)\n    }\n\n    /// Returns the gas limit.\n    pub fn gas_limit(&self) -> Gas {\n        self.gas_limit\n    }\n\n    /// Returns the current gas counter.\n    pub fn gas_counter(&self) -> Gas {\n        self.gas_counter\n    }\n\n    /// Sets the gas counter to a new value.\n    pub fn set_gas_counter(&mut self, new_gas_counter: Gas) {\n        self.gas_counter = new_gas_counter;\n    }\n\n    /// Returns the context key for this instance.\n    pub fn get_context_key(&self) -> Key {\n        self.context_key\n    }\n\n    /// Returns the initiator of the call chain.\n    pub fn get_initiator(&self) -> AccountHash {\n        self.account_hash\n    }\n\n    /// Returns the protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.block_info.protocol_version()\n    }\n\n    /// Returns the current phase.\n    pub fn phase(&self) -> Phase {\n        self.phase\n    }\n\n    /// Returns `true` if the execution is permitted to call `casper_add_contract_version()`.\n    pub fn install_upgrade_allowed(&self) -> bool {\n        self.allow_install_upgrade == AllowInstallUpgrade::Allowed\n    }\n\n    /// Generates new deterministic hash for uses as an address.\n    pub fn new_hash_address(&mut self) -> Result<[u8; KEY_HASH_LENGTH], ExecError> {\n        Ok(self.address_generator.borrow_mut().new_hash_address())\n    }\n\n    /// Returns 32 pseudo random bytes.\n    pub fn random_bytes(&mut self) -> Result<[u8; RANDOM_BYTES_COUNT], ExecError> {\n        Ok(self.address_generator.borrow_mut().create_address())\n    }\n\n    /// Creates new [`URef`] instance.\n    pub fn new_uref(&mut self, value: StoredValue) -> Result<URef, ExecError> {\n        let uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.insert_uref(uref);\n        self.metered_write_gs(Key::URef(uref), value)?;\n        Ok(uref)\n    }\n\n    /// Creates a new URef where the value it stores is CLType::Unit.\n    pub(crate) fn new_unit_uref(&mut self) -> Result<URef, ExecError> {\n        self.new_uref(StoredValue::CLValue(CLValue::unit()))\n    }\n\n    /// Puts `key` to the map of named keys of current context.\n    pub fn put_key(&mut self, name: String, key: Key) -> Result<(), ExecError> {\n        // No need to perform actual validation on the base key because an account or contract (i.e.\n        // the element stored under `base_key`) is allowed to add new named keys to itself.\n        match self.get_context_key() {\n            Key::Account(_) | Key::Hash(_) => {\n                let named_key_value = StoredValue::CLValue(CLValue::from_t((name.clone(), key))?);\n                self.validate_value(&named_key_value)?;\n                self.metered_add_gs_unsafe(self.get_context_key(), named_key_value)?;\n                self.insert_named_key(name, key);\n            }\n            Key::AddressableEntity(entity_addr) => {\n                let named_key_value =\n                    StoredValue::NamedKey(NamedKeyValue::from_concrete_values(key, name.clone())?);\n                self.validate_value(&named_key_value)?;\n                let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, name.clone())?;\n                self.metered_write_gs_unsafe(Key::NamedKey(named_key_addr), named_key_value)?;\n                self.insert_named_key(name, key);\n            }\n            _ => return Err(ExecError::InvalidContext),\n        }\n\n        Ok(())\n    }\n\n    pub(crate) fn get_message_topics(\n        &mut self,\n        hash_addr: EntityAddr,\n    ) -> Result<MessageTopics, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .get_message_topics(hash_addr)\n            .map_err(Into::into)\n    }\n\n    pub(crate) fn get_named_keys(&mut self, entity_key: Key) -> Result<NamedKeys, ExecError> {\n        let entity_addr = if let Key::AddressableEntity(entity_addr) = entity_key {\n            entity_addr\n        } else {\n            return Err(ExecError::UnexpectedKeyVariant(entity_key));\n        };\n        self.tracking_copy\n            .borrow_mut()\n            .get_named_keys(entity_addr)\n            .map_err(Into::into)\n    }\n\n    pub(crate) fn write_entry_points(\n        &mut self,\n        entity_addr: EntityAddr,\n        entry_points: EntryPoints,\n    ) -> Result<(), ExecError> {\n        if entry_points.is_empty() {\n            return Ok(());\n        }\n\n        for entry_point in entry_points.take_entry_points() {\n            let entry_point_addr =\n                EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name())?;\n            let entry_point_value =\n                StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point));\n            self.metered_write_gs_unsafe(Key::EntryPoint(entry_point_addr), entry_point_value)?;\n        }\n\n        Ok(())\n    }\n\n    pub(crate) fn get_casper_vm_v1_entry_point(\n        &mut self,\n        entity_key: Key,\n    ) -> Result<EntryPoints, ExecError> {\n        let entity_addr = if let Key::AddressableEntity(entity_addr) = entity_key {\n            entity_addr\n        } else {\n            return Err(ExecError::UnexpectedKeyVariant(entity_key));\n        };\n\n        self.tracking_copy\n            .borrow_mut()\n            .get_v1_entry_points(entity_addr)\n            .map_err(Into::into)\n    }\n\n    /// Reads the total balance of a purse [`URef`].\n    ///\n    /// Currently address of a purse [`URef`] is also a hash in the [`Key::Hash`] space.\n    pub(crate) fn total_balance(&mut self, purse_uref: &URef) -> Result<Motes, ExecError> {\n        let key = Key::URef(*purse_uref);\n        let total = self\n            .tracking_copy\n            .borrow_mut()\n            .get_total_balance(key)\n            .map_err(ExecError::TrackingCopy)?;\n        Ok(total)\n    }\n\n    /// Reads the available balance of a purse [`URef`].\n    ///\n    /// Currently address of a purse [`URef`] is also a hash in the [`Key::Hash`] space.\n    pub(crate) fn available_balance(&mut self, purse_uref: &URef) -> Result<Motes, ExecError> {\n        let key = Key::URef(*purse_uref);\n        self.tracking_copy\n            .borrow_mut()\n            .get_available_balance(key)\n            .map_err(ExecError::TrackingCopy)\n    }\n\n    /// Read a stored value under a [`Key`].\n    pub fn read_gs(&mut self, key: &Key) -> Result<Option<StoredValue>, ExecError> {\n        self.validate_readable(key)?;\n        self.validate_key(key)?;\n\n        let maybe_stored_value = self.tracking_copy.borrow_mut().read(key)?;\n\n        let stored_value = match maybe_stored_value {\n            Some(stored_value) => handle_stored_dictionary_value(*key, stored_value)?,\n            None => return Ok(None),\n        };\n\n        Ok(Some(stored_value))\n    }\n\n    /// Reads a value from a global state directly.\n    ///\n    /// # Usage\n    ///\n    /// DO NOT EXPOSE THIS VIA THE FFI - This function bypasses security checks and should be used\n    /// with caution.\n    pub fn read_gs_unsafe(&mut self, key: &Key) -> Result<Option<StoredValue>, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .read(key)\n            .map_err(Into::into)\n    }\n\n    /// This method is a wrapper over `read_gs` in the sense that it extracts the type held by a\n    /// `StoredValue` stored in the global state in a type safe manner.\n    ///\n    /// This is useful if you want to get the exact type from global state.\n    pub fn read_gs_typed<T>(&mut self, key: &Key) -> Result<T, ExecError>\n    where\n        T: TryFrom<StoredValue, Error = StoredValueTypeMismatch>,\n        T::Error: Debug,\n    {\n        let value = match self.read_gs(key)? {\n            None => return Err(ExecError::KeyNotFound(*key)),\n            Some(value) => value,\n        };\n\n        value\n            .try_into()\n            .map_err(|error| ExecError::TrackingCopy(TrackingCopyError::TypeMismatch(error)))\n    }\n\n    /// Returns all keys based on the tag prefix.\n    pub fn get_keys(&mut self, key_tag: &KeyTag) -> Result<BTreeSet<Key>, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .get_keys(key_tag)\n            .map_err(Into::into)\n    }\n\n    /// Returns all key's that start with prefix, if any.\n    pub fn get_keys_with_prefix(&mut self, prefix: &[u8]) -> Result<Vec<Key>, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .reader()\n            .keys_with_prefix(prefix)\n            .map_err(Into::into)\n    }\n\n    /// Write an era info instance to the global state.\n    pub fn write_era_info(&mut self, key: Key, value: EraInfo) {\n        if let Key::EraSummary = key {\n            // Writing an `EraInfo` for 100 validators will not exceed write size limit.\n            self.tracking_copy\n                .borrow_mut()\n                .write(key, StoredValue::EraInfo(value));\n        } else {\n            panic!(\"Do not use this function for writing non-era-info keys\")\n        }\n    }\n\n    /// Creates validated instance of `StoredValue` from `account`.\n    fn account_to_validated_value(&self, account: Account) -> Result<StoredValue, ExecError> {\n        let value = StoredValue::Account(account);\n        self.validate_value(&value)?;\n        Ok(value)\n    }\n\n    /// Write an account to the global state.\n    pub fn write_account(&mut self, key: Key, account: Account) -> Result<(), ExecError> {\n        if let Key::Account(_) = key {\n            self.validate_key(&key)?;\n            let account_value = self.account_to_validated_value(account)?;\n            self.metered_write_gs_unsafe(key, account_value)?;\n            Ok(())\n        } else {\n            panic!(\"Do not use this function for writing non-account keys\")\n        }\n    }\n\n    /// Read an account from the global state.\n    pub fn read_account(&mut self, key: &Key) -> Result<Option<StoredValue>, ExecError> {\n        if let Key::Account(_) = key {\n            self.validate_key(key)?;\n            self.tracking_copy\n                .borrow_mut()\n                .read(key)\n                .map_err(Into::into)\n        } else {\n            panic!(\"Do not use this function for reading from non-account keys\")\n        }\n    }\n\n    /// Adds a named key.\n    ///\n    /// If given `Key` refers to an [`URef`] then it extends the runtime context's access rights\n    /// with the URef's access rights.\n    fn insert_named_key(&mut self, name: String, key: Key) {\n        if let Key::URef(uref) = key {\n            self.insert_uref(uref);\n        }\n        self.named_keys.insert(name, key);\n    }\n\n    /// Adds a new [`URef`] into the context.\n    ///\n    /// Once an [`URef`] is inserted, it's considered a valid [`URef`] in this runtime context.\n    fn insert_uref(&mut self, uref: URef) {\n        self.access_rights.extend(&[uref])\n    }\n\n    /// Grants access to a [`URef`]; unless access was pre-existing.\n    pub fn grant_access(&mut self, uref: URef) -> GrantedAccess {\n        self.access_rights.grant_access(uref)\n    }\n\n    /// Removes an access right from the current runtime context.\n    pub fn remove_access(&mut self, uref_addr: URefAddr, access_rights: AccessRights) {\n        self.access_rights.remove_access(uref_addr, access_rights)\n    }\n\n    /// Returns a copy of the current effects of a tracking copy.\n    pub fn effects(&self) -> Effects {\n        self.tracking_copy.borrow().effects()\n    }\n\n    /// Returns a copy of the current messages of a tracking copy.\n    pub fn messages(&self) -> Messages {\n        self.tracking_copy.borrow().messages()\n    }\n\n    /// Returns a copy of the current named keys of a tracking copy.\n    pub fn cache(&self) -> TrackingCopyCache {\n        self.tracking_copy.borrow().cache()\n    }\n\n    /// Returns the cost charged for the last emitted message.\n    pub fn emit_message_cost(&self) -> U512 {\n        self.emit_message_cost\n    }\n\n    /// Sets the cost charged for the last emitted message.\n    pub fn set_emit_message_cost(&mut self, cost: U512) {\n        self.emit_message_cost = cost\n    }\n\n    /// Returns list of transfers.\n    pub fn transfers(&self) -> &Vec<Transfer> {\n        &self.transfers\n    }\n\n    /// Returns mutable list of transfers.\n    pub fn transfers_mut(&mut self) -> &mut Vec<Transfer> {\n        &mut self.transfers\n    }\n\n    fn validate_cl_value(&self, cl_value: &CLValue) -> Result<(), ExecError> {\n        match cl_value.cl_type() {\n            CLType::Bool\n            | CLType::I32\n            | CLType::I64\n            | CLType::U8\n            | CLType::U32\n            | CLType::U64\n            | CLType::U128\n            | CLType::U256\n            | CLType::U512\n            | CLType::Unit\n            | CLType::String\n            | CLType::Option(_)\n            | CLType::List(_)\n            | CLType::ByteArray(..)\n            | CLType::Result { .. }\n            | CLType::Map { .. }\n            | CLType::Tuple1(_)\n            | CLType::Tuple3(_)\n            | CLType::Any\n            | CLType::PublicKey => Ok(()),\n            CLType::Key => {\n                let key: Key = cl_value.to_t()?;\n                self.validate_key(&key)\n            }\n            CLType::URef => {\n                let uref: URef = cl_value.to_t()?;\n                self.validate_uref(&uref)\n            }\n            tuple @ CLType::Tuple2(_) if *tuple == casper_types::named_key_type() => {\n                let (_name, key): (String, Key) = cl_value.to_t()?;\n                self.validate_key(&key)\n            }\n            CLType::Tuple2(_) => Ok(()),\n        }\n    }\n\n    /// Validates whether keys used in the `value` are not forged.\n    pub(crate) fn validate_value(&self, value: &StoredValue) -> Result<(), ExecError> {\n        match value {\n            StoredValue::CLValue(cl_value) => self.validate_cl_value(cl_value),\n            StoredValue::NamedKey(named_key_value) => {\n                self.validate_cl_value(named_key_value.get_key_as_cl_value())?;\n                self.validate_cl_value(named_key_value.get_name_as_cl_value())\n            }\n            StoredValue::Account(_)\n            | StoredValue::ByteCode(_)\n            | StoredValue::Contract(_)\n            | StoredValue::AddressableEntity(_)\n            | StoredValue::SmartContract(_)\n            | StoredValue::Transfer(_)\n            | StoredValue::DeployInfo(_)\n            | StoredValue::EraInfo(_)\n            | StoredValue::Bid(_)\n            | StoredValue::BidKind(_)\n            | StoredValue::Withdraw(_)\n            | StoredValue::Unbonding(_)\n            | StoredValue::ContractPackage(_)\n            | StoredValue::ContractWasm(_)\n            | StoredValue::MessageTopic(_)\n            | StoredValue::Message(_)\n            | StoredValue::Prepayment(_)\n            | StoredValue::EntryPoint(_)\n            | StoredValue::RawBytes(_) => Ok(()),\n        }\n    }\n\n    pub(crate) fn context_key_to_entity_addr(&self) -> Result<EntityAddr, ExecError> {\n        match self.context_key {\n            Key::Account(account_hash) => Ok(EntityAddr::Account(account_hash.value())),\n            Key::Hash(hash) => {\n                if self.is_system_addressable_entity(&hash)? {\n                    Ok(EntityAddr::System(hash))\n                } else {\n                    Ok(EntityAddr::SmartContract(hash))\n                }\n            }\n            Key::AddressableEntity(addr) => Ok(addr),\n            _ => Err(ExecError::UnexpectedKeyVariant(self.context_key)),\n        }\n    }\n\n    /// Validates whether key is not forged (whether it can be found in the\n    /// `named_keys`) and whether the version of a key that contract wants\n    /// to use, has access rights that are less powerful than access rights'\n    /// of the key in the `named_keys`.\n    pub(crate) fn validate_key(&self, key: &Key) -> Result<(), ExecError> {\n        let uref = match key {\n            Key::URef(uref) => uref,\n            _ => return Ok(()),\n        };\n        self.validate_uref(uref)\n    }\n\n    /// Validate [`URef`] access rights.\n    ///\n    /// Returns unit if [`URef`]s address exists in the context, and has correct access rights bit\n    /// set.\n    pub(crate) fn validate_uref(&self, uref: &URef) -> Result<(), ExecError> {\n        if self.access_rights.has_access_rights_to_uref(uref) {\n            Ok(())\n        } else {\n            Err(ExecError::ForgedReference(*uref))\n        }\n    }\n\n    /// Validates if a [`Key`] refers to a [`URef`] and has a read bit set.\n    fn validate_readable(&self, key: &Key) -> Result<(), ExecError> {\n        if self.is_readable(key) {\n            Ok(())\n        } else {\n            Err(ExecError::InvalidAccess {\n                required: AccessRights::READ,\n            })\n        }\n    }\n\n    /// Validates if a [`Key`] refers to a [`URef`] and has a add bit set.\n    fn validate_addable(&self, key: &Key) -> Result<(), ExecError> {\n        if self.is_addable(key) {\n            Ok(())\n        } else {\n            Err(ExecError::InvalidAccess {\n                required: AccessRights::ADD,\n            })\n        }\n    }\n\n    /// Validates if a [`Key`] refers to a [`URef`] and has a write bit set.\n    pub(crate) fn validate_writeable(&self, key: &Key) -> Result<(), ExecError> {\n        if self.is_writeable(key) {\n            Ok(())\n        } else {\n            Err(ExecError::InvalidAccess {\n                required: AccessRights::WRITE,\n            })\n        }\n    }\n\n    /// Tests whether reading from the `key` is valid.\n    pub fn is_readable(&self, key: &Key) -> bool {\n        match self.context_key_to_entity_addr() {\n            Ok(entity_addr) => key.is_readable(&entity_addr),\n            Err(error) => {\n                error!(?error, \"entity_key is unexpected key variant\");\n                panic!(\"is_readable: entity_key is unexpected key variant\");\n            }\n        }\n    }\n\n    /// Tests whether addition to `key` is valid.\n    pub fn is_addable(&self, key: &Key) -> bool {\n        match self.context_key_to_entity_addr() {\n            Ok(entity_addr) => key.is_addable(&entity_addr),\n            Err(error) => {\n                error!(?error, \"entity_key is unexpected key variant\");\n                panic!(\"is_addable: entity_key is unexpected key variant\");\n            }\n        }\n    }\n\n    /// Tests whether writing to `key` is valid.\n    pub fn is_writeable(&self, key: &Key) -> bool {\n        match self.context_key_to_entity_addr() {\n            Ok(entity_addr) => key.is_writeable(&entity_addr),\n            Err(error) => {\n                error!(?error, \"entity_key is unexpected key variant\");\n                panic!(\"is_writeable: entity_key is unexpected key variant\");\n            }\n        }\n    }\n\n    /// Safely charge the specified amount of gas, up to the available gas limit.\n    ///\n    /// Returns [`Error::GasLimit`] if gas limit exceeded and `()` if not.\n    /// Intuition about the return value sense is to answer the question 'are we\n    /// allowed to continue?'\n    pub(crate) fn charge_gas(&mut self, gas: Gas) -> Result<(), ExecError> {\n        let prev = self.gas_counter();\n        let gas_limit = self.gas_limit();\n        // gas charge overflow protection\n        match prev.checked_add(gas) {\n            None => {\n                self.set_gas_counter(gas_limit);\n                Err(ExecError::GasLimit)\n            }\n            Some(val) if val > gas_limit => {\n                self.set_gas_counter(gas_limit);\n                Err(ExecError::GasLimit)\n            }\n            Some(val) => {\n                self.set_gas_counter(val);\n                Ok(())\n            }\n        }\n    }\n\n    /// Checks if we are calling a system addressable entity.\n    pub(crate) fn is_system_addressable_entity(\n        &self,\n        hash_addr: &HashAddr,\n    ) -> Result<bool, ExecError> {\n        Ok(self.system_entity_registry()?.exists(hash_addr))\n    }\n\n    /// Charges gas for specified amount of bytes used.\n    fn charge_gas_storage(&mut self, bytes_count: usize) -> Result<(), ExecError> {\n        if let Some(hash_addr) = self.get_context_key().into_entity_hash_addr() {\n            if self.is_system_addressable_entity(&hash_addr)? {\n                // Don't charge storage used while executing a system contract.\n                return Ok(());\n            }\n        }\n\n        let storage_costs = self.engine_config.storage_costs();\n\n        let gas_cost = storage_costs.calculate_gas_cost(bytes_count);\n\n        self.charge_gas(gas_cost)\n    }\n\n    /// Charges gas for using a host system contract's entrypoint.\n    pub(crate) fn charge_system_contract_call<T>(&mut self, call_cost: T) -> Result<(), ExecError>\n    where\n        T: Into<Gas>,\n    {\n        let amount: Gas = call_cost.into();\n        self.charge_gas(amount)\n    }\n\n    /// Prune a key from the global state.\n    ///\n    /// Use with caution - there is no validation done as the key is assumed to be validated\n    /// already.\n    pub(crate) fn prune_gs_unsafe<K>(&mut self, key: K)\n    where\n        K: Into<Key>,\n    {\n        self.tracking_copy.borrow_mut().prune(key.into());\n    }\n\n    pub(crate) fn migrate_package(\n        &mut self,\n        contract_package_hash: ContractPackageHash,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .migrate_package(Key::Hash(contract_package_hash.value()), protocol_version)\n            .map_err(ExecError::TrackingCopy)\n    }\n\n    /// Writes data to global state with a measurement.\n    ///\n    /// Use with caution - there is no validation done as the key is assumed to be validated\n    /// already.\n    pub(crate) fn metered_write_gs_unsafe<K, V>(\n        &mut self,\n        key: K,\n        value: V,\n    ) -> Result<(), ExecError>\n    where\n        K: Into<Key>,\n        V: Into<StoredValue>,\n    {\n        let stored_value = value.into();\n\n        // Charge for amount as measured by serialized length\n        let bytes_count = stored_value.serialized_length();\n        self.charge_gas_storage(bytes_count)?;\n\n        self.tracking_copy\n            .borrow_mut()\n            .write(key.into(), stored_value);\n        Ok(())\n    }\n\n    /// Emits message and writes message summary to global state with a measurement.\n    pub(crate) fn metered_emit_message(\n        &mut self,\n        topic_key: Key,\n        block_time: BlockTime,\n        block_message_count: u64,\n        topic_message_count: u32,\n        message: Message,\n    ) -> Result<(), ExecError> {\n        let topic_value = StoredValue::MessageTopic(MessageTopicSummary::new(\n            topic_message_count,\n            block_time,\n            message.topic_name().to_owned(),\n        ));\n        let message_key = message.message_key();\n        let message_value = StoredValue::Message(message.checksum().map_err(ExecError::BytesRepr)?);\n\n        let block_message_count_value =\n            StoredValue::CLValue(CLValue::from_t((block_time, block_message_count))?);\n\n        // Charge for amount as measured by serialized length\n        let bytes_count = topic_value.serialized_length()\n            + message_value.serialized_length()\n            + block_message_count_value.serialized_length();\n        self.charge_gas_storage(bytes_count)?;\n\n        self.tracking_copy.borrow_mut().emit_message(\n            topic_key,\n            topic_value,\n            message_key,\n            message_value,\n            block_message_count_value,\n            message,\n        );\n        Ok(())\n    }\n\n    /// Writes data to a global state and charges for bytes stored.\n    ///\n    /// This method performs full validation of the key to be written.\n    pub(crate) fn metered_write_gs<T>(&mut self, key: Key, value: T) -> Result<(), ExecError>\n    where\n        T: Into<StoredValue>,\n    {\n        let stored_value = value.into();\n        self.validate_writeable(&key)?;\n        self.validate_key(&key)?;\n        self.validate_value(&stored_value)?;\n        self.metered_write_gs_unsafe(key, stored_value)\n    }\n\n    /// Adds data to a global state key and charges for bytes stored.\n    pub(crate) fn metered_add_gs_unsafe(\n        &mut self,\n        key: Key,\n        value: StoredValue,\n    ) -> Result<(), ExecError> {\n        let value_bytes_count = value.serialized_length();\n        self.charge_gas_storage(value_bytes_count)?;\n\n        match self.tracking_copy.borrow_mut().add(key, value) {\n            Err(storage_error) => Err(storage_error.into()),\n            Ok(AddResult::Success) => Ok(()),\n            Ok(AddResult::KeyNotFound(key)) => Err(ExecError::KeyNotFound(key)),\n            Ok(AddResult::TypeMismatch(type_mismatch)) => {\n                Err(ExecError::TypeMismatch(type_mismatch))\n            }\n            Ok(AddResult::Serialization(error)) => Err(ExecError::BytesRepr(error)),\n            Ok(AddResult::Transform(error)) => Err(ExecError::Transform(error)),\n        }\n    }\n\n    /// Adds `value` to the `key`. The premise for being able to `add` value is\n    /// that the type of it value can be added (is a Monoid). If the\n    /// values can't be added, either because they're not a Monoid or if the\n    /// value stored under `key` has different type, then `TypeMismatch`\n    /// errors is returned.\n    pub(crate) fn metered_add_gs<K, V>(&mut self, key: K, value: V) -> Result<(), ExecError>\n    where\n        K: Into<Key>,\n        V: Into<StoredValue>,\n    {\n        let key = key.into();\n        let value = value.into();\n        self.validate_addable(&key)?;\n        self.validate_key(&key)?;\n        self.validate_value(&value)?;\n        self.metered_add_gs_unsafe(key, value)\n    }\n\n    /// Adds new associated key.\n    pub(crate) fn add_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n        weight: Weight,\n    ) -> Result<(), ExecError> {\n        let context_key = self.context_key;\n        let entity_addr = self.context_key_to_entity_addr()?;\n\n        if EntryPointType::Caller == self.entry_point_type\n            && entity_addr.tag() != EntityKindTag::Account\n        {\n            // Exit early with error to avoid mutations\n            return Err(AddKeyFailure::PermissionDenied.into());\n        }\n\n        if self.engine_config.enable_entity {\n            // Get the current entity record\n            let entity = {\n                let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?;\n                // enforce max keys limit\n                if entity.associated_keys().len()\n                    >= (self.engine_config.max_associated_keys() as usize)\n                {\n                    return Err(ExecError::AddKeyFailure(AddKeyFailure::MaxKeysLimit));\n                }\n\n                // Exit early in case of error without updating global state\n                entity\n                    .add_associated_key(account_hash, weight)\n                    .map_err(ExecError::from)?;\n                entity\n            };\n\n            self.metered_write_gs_unsafe(\n                context_key,\n                self.addressable_entity_to_validated_value(entity)?,\n            )?;\n        } else {\n            // Take an account out of the global state\n            let account = {\n                let mut account: Account = self.read_gs_typed(&context_key)?;\n\n                if account.associated_keys().len() as u32\n                    >= (self.engine_config.max_associated_keys())\n                {\n                    return Err(ExecError::AddKeyFailure(AddKeyFailure::MaxKeysLimit));\n                }\n\n                // Exit early in case of error without updating global state\n                let result = account.add_associated_key(\n                    account_hash,\n                    casper_types::account::Weight::new(weight.value()),\n                );\n\n                result.map_err(ExecError::from)?;\n                account\n            };\n\n            let account_value = self.account_to_validated_value(account)?;\n\n            self.metered_write_gs_unsafe(context_key, account_value)?;\n        }\n\n        Ok(())\n    }\n\n    /// Remove associated key.\n    pub(crate) fn remove_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<(), ExecError> {\n        let context_key = self.context_key;\n        let entity_addr = self.context_key_to_entity_addr()?;\n\n        if EntryPointType::Caller == self.entry_point_type\n            && entity_addr.tag() != EntityKindTag::Account\n        {\n            // Exit early with error to avoid mutations\n            return Err(RemoveKeyFailure::PermissionDenied.into());\n        }\n\n        if !self\n            .runtime_footprint()\n            .borrow()\n            .can_manage_keys_with(&self.authorization_keys)\n        {\n            // Exit early if authorization keys weight doesn't exceed required\n            // key management threshold\n            return Err(RemoveKeyFailure::PermissionDenied.into());\n        }\n\n        if self.engine_config.enable_entity {\n            // Get the current entity record\n            let entity = {\n                let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?;\n\n                // Exit early in case of error without updating global state\n                entity\n                    .remove_associated_key(account_hash)\n                    .map_err(ExecError::from)?;\n                entity\n            };\n\n            self.metered_write_gs_unsafe(\n                context_key,\n                self.addressable_entity_to_validated_value(entity)?,\n            )?;\n        } else {\n            // Take an account out of the global state\n            let account = {\n                let mut account: Account = self.read_gs_typed(&context_key)?;\n\n                // Exit early in case of error without updating global state\n                account\n                    .remove_associated_key(account_hash)\n                    .map_err(ExecError::from)?;\n                account\n            };\n\n            let account_value = self.account_to_validated_value(account)?;\n\n            self.metered_write_gs_unsafe(context_key, account_value)?;\n        }\n\n        Ok(())\n    }\n\n    /// Update associated key.\n    pub(crate) fn update_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n        weight: Weight,\n    ) -> Result<(), ExecError> {\n        let context_key = self.context_key;\n        let entity_addr = self.context_key_to_entity_addr()?;\n\n        if EntryPointType::Caller == self.entry_point_type\n            && entity_addr.tag() != EntityKindTag::Account\n        {\n            // Exit early with error to avoid mutations\n            return Err(UpdateKeyFailure::PermissionDenied.into());\n        }\n\n        if !self\n            .runtime_footprint()\n            .borrow()\n            .can_manage_keys_with(&self.authorization_keys)\n        {\n            // Exit early if authorization keys weight doesn't exceed required\n            // key management threshold\n            return Err(UpdateKeyFailure::PermissionDenied.into());\n        }\n\n        if self.engine_config.enable_entity {\n            // Get the current entity record\n            let entity = {\n                let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?;\n\n                // Exit early in case of error without updating global state\n                entity\n                    .update_associated_key(account_hash, weight)\n                    .map_err(ExecError::from)?;\n                entity\n            };\n\n            self.metered_write_gs_unsafe(\n                context_key,\n                self.addressable_entity_to_validated_value(entity)?,\n            )?;\n        } else {\n            // Take an account out of the global state\n            let account = {\n                let mut account: Account = self.read_gs_typed(&context_key)?;\n\n                // Exit early in case of error without updating global state\n                account\n                    .update_associated_key(\n                        account_hash,\n                        casper_types::account::Weight::new(weight.value()),\n                    )\n                    .map_err(ExecError::from)?;\n                account\n            };\n\n            let account_value = self.account_to_validated_value(account)?;\n\n            self.metered_write_gs_unsafe(context_key, account_value)?;\n        }\n\n        Ok(())\n    }\n\n    pub(crate) fn is_authorized_by_admin(&self) -> bool {\n        self.engine_config\n            .administrative_accounts()\n            .intersection(&self.authorization_keys)\n            .next()\n            .is_some()\n    }\n    /// Gets given contract package with its access_key validated against current context.\n    pub(crate) fn get_validated_contract_package(\n        &mut self,\n        package_hash: HashAddr,\n    ) -> Result<ContractPackage, ExecError> {\n        let package_hash_key = Key::Hash(package_hash);\n        self.validate_key(&package_hash_key)?;\n        let contract_package: ContractPackage = self.read_gs_typed(&package_hash_key)?;\n\n        if !self.is_authorized_by_admin() {\n            self.validate_uref(&contract_package.access_key())?;\n        }\n\n        Ok(contract_package)\n    }\n\n    /// Set threshold of an associated key.\n    pub(crate) fn set_action_threshold(\n        &mut self,\n        action_type: ActionType,\n        threshold: Weight,\n    ) -> Result<(), ExecError> {\n        let context_key = self.context_key;\n        let entity_addr = self.context_key_to_entity_addr()?;\n\n        if EntryPointType::Caller == self.entry_point_type\n            && entity_addr.tag() != EntityKindTag::Account\n        {\n            // Exit early with error to avoid mutations\n            return Err(SetThresholdFailure::PermissionDeniedError.into());\n        }\n\n        if self.engine_config.enable_entity {\n            // Take an addressable entity out of the global state\n            let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?;\n\n            // Exit early in case of error without updating global state\n            if self.is_authorized_by_admin() {\n                entity.set_action_threshold_unchecked(action_type, threshold)\n            } else {\n                entity.set_action_threshold(action_type, threshold)\n            }\n            .map_err(ExecError::from)?;\n\n            let entity_value = self.addressable_entity_to_validated_value(entity)?;\n\n            self.metered_write_gs_unsafe(context_key, entity_value)?;\n        } else {\n            // Converts an account's public key into a URef\n            let key = Key::Account(AccountHash::new(entity_addr.value()));\n\n            // Take an account out of the global state\n            let mut account: Account = self.read_gs_typed(&key)?;\n\n            // Exit early in case of error without updating global state\n            let action_type = match action_type {\n                ActionType::Deployment => casper_types::account::ActionType::Deployment,\n                ActionType::KeyManagement => casper_types::account::ActionType::KeyManagement,\n                ActionType::UpgradeManagement => return Err(ExecError::InvalidContext),\n            };\n\n            let threshold = casper_types::account::Weight::new(threshold.value());\n\n            if self.is_authorized_by_admin() {\n                account.set_action_threshold_unchecked(action_type, threshold)\n            } else {\n                account.set_action_threshold(action_type, threshold)\n            }\n            .map_err(ExecError::from)?;\n\n            let account_value = self.account_to_validated_value(account)?;\n\n            self.metered_write_gs_unsafe(key, account_value)?;\n        }\n\n        Ok(())\n    }\n\n    fn addressable_entity_to_validated_value(\n        &self,\n        entity: AddressableEntity,\n    ) -> Result<StoredValue, ExecError> {\n        let value = StoredValue::AddressableEntity(entity);\n        self.validate_value(&value)?;\n        Ok(value)\n    }\n\n    pub(crate) fn runtime_footprint_by_account_hash(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<Option<RuntimeFootprint>, ExecError> {\n        if self.engine_config.enable_entity {\n            match self.read_gs(&Key::Account(account_hash))? {\n                Some(StoredValue::CLValue(cl_value)) => {\n                    let key: Key = cl_value.into_t().map_err(ExecError::CLValue)?;\n                    match self.read_gs(&key)? {\n                        Some(StoredValue::AddressableEntity(addressable_entity)) => {\n                            let entity_addr = EntityAddr::Account(account_hash.value());\n                            let named_keys = self.get_named_keys(key)?;\n                            let entry_points = self.get_casper_vm_v1_entry_point(key)?;\n                            let footprint = RuntimeFootprint::new_entity_footprint(\n                                entity_addr,\n                                addressable_entity,\n                                named_keys,\n                                entry_points,\n                            );\n                            Ok(Some(footprint))\n                        }\n                        Some(_other_variant_2) => Err(ExecError::UnexpectedStoredValueVariant),\n                        None => Ok(None),\n                    }\n                }\n                Some(_other_variant_1) => Err(ExecError::UnexpectedStoredValueVariant),\n                None => Ok(None),\n            }\n        } else {\n            match self.read_gs(&Key::Account(account_hash))? {\n                Some(StoredValue::Account(account)) => {\n                    Ok(Some(RuntimeFootprint::new_account_footprint(account)))\n                }\n                Some(_other_variant_1) => Err(ExecError::UnexpectedStoredValueVariant),\n                None => Ok(None),\n            }\n        }\n    }\n\n    /// Gets main purse id\n    pub fn get_main_purse(&mut self) -> Result<URef, ExecError> {\n        let main_purse = self\n            .runtime_footprint()\n            .borrow()\n            .main_purse()\n            .ok_or(ExecError::InvalidContext)?;\n        Ok(main_purse)\n    }\n\n    /// Gets entry point type.\n    pub fn entry_point_type(&self) -> EntryPointType {\n        self.entry_point_type\n    }\n\n    /// Gets given contract package with its access_key validated against current context.\n    pub(crate) fn get_validated_package(\n        &mut self,\n        package_hash: PackageHash,\n    ) -> Result<Package, ExecError> {\n        let package_hash_key = Key::from(package_hash);\n        self.validate_key(&package_hash_key)?;\n        let contract_package = if self.engine_config.enable_entity {\n            self.read_gs_typed::<Package>(&Key::SmartContract(package_hash.value()))?\n        } else {\n            let cp = self.read_gs_typed::<ContractPackage>(&Key::Hash(package_hash.value()))?;\n            cp.into()\n        };\n        Ok(contract_package)\n    }\n\n    pub(crate) fn get_package(&mut self, package_hash: HashAddr) -> Result<Package, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .get_package(package_hash)\n            .map_err(Into::into)\n    }\n\n    pub(crate) fn get_contract(\n        &mut self,\n        contract_hash: ContractHash,\n    ) -> Result<Contract, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .get_contract(contract_hash)\n            .map_err(Into::into)\n    }\n\n    pub(crate) fn get_contract_entity(\n        &mut self,\n        entity_key: Key,\n    ) -> Result<(AddressableEntity, bool), ExecError> {\n        let entity_hash = if let Some(entity_hash) = entity_key.into_entity_hash() {\n            entity_hash\n        } else {\n            return Err(ExecError::UnexpectedKeyVariant(entity_key));\n        };\n\n        let mut tc = self.tracking_copy.borrow_mut();\n\n        let key = Key::contract_entity_key(entity_hash);\n        match tc.read(&key)? {\n            Some(StoredValue::AddressableEntity(entity)) => Ok((entity, false)),\n            Some(other) => Err(ExecError::TypeMismatch(StoredValueTypeMismatch::new(\n                \"AddressableEntity\".to_string(),\n                other.type_name(),\n            ))),\n            None => match tc.read(&Key::Hash(entity_hash.value()))? {\n                Some(StoredValue::Contract(contract)) => Ok((contract.into(), true)),\n                Some(other) => Err(ExecError::TypeMismatch(StoredValueTypeMismatch::new(\n                    \"Contract\".to_string(),\n                    other.type_name(),\n                ))),\n                None => Err(TrackingCopyError::KeyNotFound(key).into()),\n            },\n        }\n    }\n\n    /// Gets a dictionary item key from a dictionary referenced by a `uref`.\n    pub(crate) fn dictionary_get(\n        &mut self,\n        uref: URef,\n        dictionary_item_key: &str,\n    ) -> Result<Option<CLValue>, ExecError> {\n        self.validate_readable(&uref.into())?;\n        self.validate_key(&uref.into())?;\n        let dictionary_item_key_bytes = dictionary_item_key.as_bytes();\n\n        if dictionary_item_key_bytes.len() > DICTIONARY_ITEM_KEY_MAX_LENGTH {\n            return Err(ExecError::DictionaryItemKeyExceedsLength);\n        }\n\n        let dictionary_key = Key::dictionary(uref, dictionary_item_key_bytes);\n        self.dictionary_read(dictionary_key)\n    }\n\n    /// Gets a dictionary value from a dictionary `Key`.\n    pub(crate) fn dictionary_read(\n        &mut self,\n        dictionary_key: Key,\n    ) -> Result<Option<CLValue>, ExecError> {\n        let maybe_stored_value = self\n            .tracking_copy\n            .borrow_mut()\n            .read(&dictionary_key)\n            .map_err(Into::<ExecError>::into)?;\n\n        if let Some(stored_value) = maybe_stored_value {\n            let stored_value = handle_stored_dictionary_value(dictionary_key, stored_value)?;\n            let cl_value = CLValue::try_from(stored_value).map_err(ExecError::TypeMismatch)?;\n            Ok(Some(cl_value))\n        } else {\n            Ok(None)\n        }\n    }\n\n    /// Puts a dictionary item key from a dictionary referenced by a `uref`.\n    pub fn dictionary_put(\n        &mut self,\n        seed_uref: URef,\n        dictionary_item_key: &str,\n        cl_value: CLValue,\n    ) -> Result<(), ExecError> {\n        let dictionary_item_key_bytes = dictionary_item_key.as_bytes();\n\n        if dictionary_item_key_bytes.len() > DICTIONARY_ITEM_KEY_MAX_LENGTH {\n            return Err(ExecError::DictionaryItemKeyExceedsLength);\n        }\n\n        self.validate_writeable(&seed_uref.into())?;\n        self.validate_uref(&seed_uref)?;\n\n        self.validate_cl_value(&cl_value)?;\n\n        let wrapped_cl_value = {\n            let dictionary_value = CLValueDictionary::new(\n                cl_value,\n                seed_uref.addr().to_vec(),\n                dictionary_item_key_bytes.to_vec(),\n            );\n            CLValue::from_t(dictionary_value).map_err(ExecError::from)?\n        };\n\n        let dictionary_key = Key::dictionary(seed_uref, dictionary_item_key_bytes);\n        self.metered_write_gs_unsafe(dictionary_key, wrapped_cl_value)?;\n        Ok(())\n    }\n\n    /// Gets system contract by name.\n    pub(crate) fn get_system_contract(\n        &self,\n        name: &str,\n    ) -> Result<AddressableEntityHash, ExecError> {\n        let registry = self.system_entity_registry()?;\n        let hash = registry.get(name).ok_or_else(|| {\n            error!(\"Missing system contract hash: {}\", name);\n            ExecError::MissingSystemContractHash(name.to_string())\n        })?;\n        Ok(AddressableEntityHash::new(*hash))\n    }\n\n    pub(crate) fn get_system_entity_key(&self, name: &str) -> Result<Key, ExecError> {\n        let system_entity_hash = self.get_system_contract(name)?;\n        if self.engine_config.enable_entity {\n            Ok(Key::addressable_entity_key(\n                EntityKindTag::System,\n                system_entity_hash,\n            ))\n        } else {\n            Ok(Key::Hash(system_entity_hash.value()))\n        }\n    }\n\n    /// Returns system entity registry by querying the global state.\n    pub fn system_entity_registry(&self) -> Result<SystemHashRegistry, ExecError> {\n        self.tracking_copy\n            .borrow_mut()\n            .get_system_entity_registry()\n            .map_err(|err| {\n                error!(\"Missing system entity registry\");\n                ExecError::TrackingCopy(err)\n            })\n    }\n\n    pub(super) fn remaining_spending_limit(&self) -> U512 {\n        self.remaining_spending_limit\n    }\n\n    /// Subtract spent amount from the main purse spending limit.\n    pub(crate) fn subtract_amount_spent(&mut self, amount: U512) -> Option<U512> {\n        if let Some(res) = self.remaining_spending_limit.checked_sub(amount) {\n            self.remaining_spending_limit = res;\n            Some(self.remaining_spending_limit)\n        } else {\n            error!(\n                limit = %self.remaining_spending_limit,\n                spent = %amount,\n                \"exceeded main purse spending limit\"\n            );\n            self.remaining_spending_limit = U512::zero();\n            None\n        }\n    }\n\n    /// Sets a new spending limit.\n    /// Should be called after inner context returns - if tokens were spent there, it must count\n    /// towards global limit for the whole deploy execution.\n    pub(crate) fn set_remaining_spending_limit(&mut self, amount: U512) {\n        self.remaining_spending_limit = amount;\n    }\n\n    /// Adds new message topic.\n    pub(crate) fn add_message_topic(\n        &mut self,\n        topic_name: &str,\n        topic_name_hash: TopicNameHash,\n    ) -> Result<Result<(), MessageTopicError>, ExecError> {\n        let entity_addr = self.context_key_to_entity_addr()?;\n\n        // Take the addressable entity out of the global state\n        {\n            let mut message_topics = self\n                .tracking_copy\n                .borrow_mut()\n                .get_message_topics(entity_addr)?;\n\n            let max_topics_per_contract = self\n                .engine_config\n                .wasm_config()\n                .messages_limits()\n                .max_topics_per_contract();\n\n            if message_topics.len() >= max_topics_per_contract as usize {\n                return Ok(Err(MessageTopicError::MaxTopicsExceeded));\n            }\n\n            if let Err(e) = message_topics.add_topic(topic_name, topic_name_hash) {\n                return Ok(Err(e));\n            }\n        }\n\n        let topic_key = Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash));\n        let block_time = self.block_info.block_time();\n        let summary = StoredValue::MessageTopic(MessageTopicSummary::new(\n            0,\n            block_time,\n            topic_name.to_string(),\n        ));\n\n        self.metered_write_gs_unsafe(topic_key, summary)?;\n\n        Ok(Ok(()))\n    }\n}\n"
  },
  {
    "path": "execution_engine/src/runtime_context/tests.rs",
    "content": "use std::{cell::RefCell, collections::BTreeSet, convert::TryInto, iter::FromIterator, rc::Rc};\n\nuse rand::RngCore;\n\nuse casper_storage::{\n    global_state::state::lmdb::LmdbGlobalStateView, tracking_copy::new_temporary_tracking_copy,\n    AddressGenerator, TrackingCopy,\n};\n\nuse super::{AllowInstallUpgrade, ExecError, RuntimeContext};\nuse crate::engine_state::{BlockInfo, EngineConfig, EngineConfigBuilder};\nuse casper_types::{\n    account::{\n        AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, ACCOUNT_HASH_LENGTH,\n    },\n    addressable_entity::{ActionType, AssociatedKeys, EntryPoints, Weight},\n    bytesrepr::ToBytes,\n    contracts::NamedKeys,\n    execution::TransformKindV2,\n    system::{AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT},\n    AccessRights, AddressableEntity, AddressableEntityHash, BlockGlobalAddr, BlockHash, BlockTime,\n    ByteCodeHash, CLValue, ContextAccessRights, Digest, EntityAddr, EntityKind, EntryPointType,\n    Gas, HashAddr, Key, PackageHash, Phase, ProtocolVersion, PublicKey, RuntimeArgs,\n    RuntimeFootprint, SecretKey, StoredValue, SystemHashRegistry, Tagged, Timestamp,\n    TransactionHash, TransactionV1Hash, URef, KEY_HASH_LENGTH, U256, U512,\n};\nuse tempfile::TempDir;\n\nconst TXN_HASH_RAW: [u8; 32] = [1u8; 32];\nconst PHASE: Phase = Phase::Session;\nconst GAS_LIMIT: u64 = 500_000_000_000_000u64;\n\nfn test_engine_config() -> EngineConfig {\n    EngineConfig::default()\n}\n\nfn new_tracking_copy(\n    account_hash: AccountHash,\n    init_entity_key: Key,\n    init_entity: AddressableEntity,\n) -> (TrackingCopy<LmdbGlobalStateView>, TempDir) {\n    let entity_key_cl_value = CLValue::from_t(init_entity_key).expect(\"must convert to cl value\");\n\n    let initial_data = [\n        (init_entity_key, StoredValue::AddressableEntity(init_entity)),\n        (\n            Key::Account(account_hash),\n            StoredValue::CLValue(entity_key_cl_value),\n        ),\n    ];\n    new_temporary_tracking_copy(initial_data, None, true)\n}\n\nfn new_addressable_entity_with_purse(\n    account_hash: AccountHash,\n    entity_hash: AddressableEntityHash,\n    entity_kind: EntityKind,\n    purse: [u8; 32],\n) -> (Key, Key, AddressableEntity) {\n    let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1));\n    let entity = AddressableEntity::new(\n        PackageHash::default(),\n        ByteCodeHash::default(),\n        ProtocolVersion::V2_0_0,\n        URef::new(purse, AccessRights::READ_ADD_WRITE),\n        associated_keys,\n        Default::default(),\n        entity_kind,\n    );\n    let account_key = Key::Account(account_hash);\n    let contract_key = Key::addressable_entity_key(entity_kind.tag(), entity_hash);\n\n    (account_key, contract_key, entity)\n}\n\nfn new_addressable_entity(\n    account_hash: AccountHash,\n    entity_hash: AddressableEntityHash,\n) -> (Key, Key, AddressableEntity) {\n    new_addressable_entity_with_purse(\n        account_hash,\n        entity_hash,\n        EntityKind::Account(account_hash),\n        [0; 32],\n    )\n}\n\n// create random account key.\nfn random_account_key<G: RngCore>(entropy_source: &mut G) -> Key {\n    let mut key = [0u8; 32];\n    entropy_source.fill_bytes(&mut key);\n    Key::Account(AccountHash::new(key))\n}\n\n// create random contract key.\nfn random_contract_key<G: RngCore>(entropy_source: &mut G) -> Key {\n    let mut key_hash = [0u8; 32];\n    entropy_source.fill_bytes(&mut key_hash);\n    Key::AddressableEntity(EntityAddr::SmartContract(key_hash))\n}\n\n// Create URef Key.\nfn create_uref_as_key(address_generator: &mut AddressGenerator, rights: AccessRights) -> Key {\n    let address = address_generator.create_address();\n    Key::URef(URef::new(address, rights))\n}\n\nfn random_hash<G: RngCore>(entropy_source: &mut G) -> Key {\n    let mut key = [0u8; KEY_HASH_LENGTH];\n    entropy_source.fill_bytes(&mut key);\n    Key::Hash(key)\n}\n\nfn new_runtime_context<'a>(\n    addressable_entity: &'a AddressableEntity,\n    account_hash: AccountHash,\n    entity_address: Key,\n    named_keys: &'a mut NamedKeys,\n    access_rights: ContextAccessRights,\n    address_generator: AddressGenerator,\n) -> (RuntimeContext<'a, LmdbGlobalStateView>, TempDir) {\n    let (mut tracking_copy, tempdir) =\n        new_tracking_copy(account_hash, entity_address, addressable_entity.clone());\n\n    let mint_hash = HashAddr::default();\n\n    let default_system_registry = {\n        let mut registry = SystemHashRegistry::new();\n        registry.insert(MINT.to_string(), mint_hash);\n        registry.insert(HANDLE_PAYMENT.to_string(), HashAddr::default());\n        registry.insert(STANDARD_PAYMENT.to_string(), HashAddr::default());\n        registry.insert(AUCTION.to_string(), HashAddr::default());\n        StoredValue::CLValue(CLValue::from_t(registry).unwrap())\n    };\n\n    tracking_copy.write(Key::SystemEntityRegistry, default_system_registry);\n    tracking_copy.write(\n        Key::Account(account_hash),\n        StoredValue::CLValue(CLValue::from_t(entity_address).expect(\"must get cl_value\")),\n    );\n    tracking_copy.write(\n        entity_address,\n        StoredValue::AddressableEntity(addressable_entity.clone()),\n    );\n\n    // write block time to gs\n    let now = Timestamp::now();\n    let cl_value = CLValue::from_t(now.millis()).expect(\"should get cl_value\");\n    let stored_value = StoredValue::CLValue(cl_value);\n    tracking_copy.write(Key::BlockGlobal(BlockGlobalAddr::BlockTime), stored_value);\n\n    // write protocol version to gs\n    let protocol_version = ProtocolVersion::V2_0_0;\n    let cl_value = CLValue::from_t(protocol_version.destructure()).expect(\"should get cl_value\");\n    let stored_value = StoredValue::CLValue(cl_value);\n    tracking_copy.write(\n        Key::BlockGlobal(BlockGlobalAddr::ProtocolVersion),\n        stored_value,\n    );\n\n    // write the addressable entity flag to gs\n    let cl_value = CLValue::from_t(false).expect(\"should get cl_value\");\n    let stored_value = StoredValue::CLValue(cl_value);\n    tracking_copy.write(\n        Key::BlockGlobal(BlockGlobalAddr::AddressableEntity),\n        stored_value,\n    );\n\n    let addr = match entity_address {\n        Key::AddressableEntity(entity_addr) => entity_addr,\n        Key::Account(account_hash) => EntityAddr::Account(account_hash.value()),\n        Key::Hash(hash) => EntityAddr::SmartContract(hash),\n        _ => panic!(\"unexpected key\"),\n    };\n\n    let runtime_footprint = RuntimeFootprint::new_entity_footprint(\n        addr,\n        addressable_entity.clone(),\n        named_keys.clone(),\n        EntryPoints::new(),\n    );\n\n    let engine_config = {\n        let config_builder = EngineConfigBuilder::new();\n        config_builder.with_enable_entity(true).build()\n    };\n\n    let runtime_context = RuntimeContext::new(\n        named_keys,\n        Rc::new(RefCell::new(runtime_footprint)),\n        entity_address,\n        BTreeSet::from_iter(vec![account_hash]),\n        access_rights,\n        account_hash,\n        Rc::new(RefCell::new(address_generator)),\n        Rc::new(RefCell::new(tracking_copy)),\n        engine_config,\n        BlockInfo::new(\n            Digest::default(),\n            BlockTime::new(0),\n            BlockHash::default(),\n            0,\n            ProtocolVersion::V2_0_0,\n        ),\n        TransactionHash::V1(TransactionV1Hash::from_raw([1u8; 32])),\n        Phase::Session,\n        RuntimeArgs::new(),\n        Gas::new(U512::from(GAS_LIMIT)),\n        Gas::default(),\n        Vec::default(),\n        U512::MAX,\n        EntryPointType::Caller,\n        AllowInstallUpgrade::Forbidden,\n    );\n\n    (runtime_context, tempdir)\n}\n\n#[allow(clippy::assertions_on_constants)]\nfn assert_forged_reference<T>(result: Result<T, ExecError>) {\n    match result {\n        Err(ExecError::ForgedReference(_)) => assert!(true),\n        _ => panic!(\"Error. Test should have failed with ForgedReference error but didn't.\"),\n    }\n}\n\n#[allow(clippy::assertions_on_constants)]\nfn assert_invalid_access<T: std::fmt::Debug>(\n    result: Result<T, ExecError>,\n    expecting: AccessRights,\n) {\n    match result {\n        Err(ExecError::InvalidAccess { required }) if required == expecting => assert!(true),\n        other => panic!(\n            \"Error. Test should have failed with InvalidAccess error but didn't: {:?}.\",\n            other\n        ),\n    }\n}\n\nfn build_runtime_context_and_execute<T, F>(\n    mut named_keys: NamedKeys,\n    functor: F,\n) -> Result<T, ExecError>\nwhere\n    F: FnOnce(RuntimeContext<LmdbGlobalStateView>) -> Result<T, ExecError>,\n{\n    let secret_key = SecretKey::ed25519_from_bytes([222; SecretKey::ED25519_LENGTH])\n        .expect(\"should create secret key\");\n    let public_key = PublicKey::from(&secret_key);\n    let account_hash = public_key.to_account_hash();\n    let entity_hash = AddressableEntityHash::new([10u8; 32]);\n    let deploy_hash = [1u8; 32];\n    let (_, entity_key, addressable_entity) =\n        new_addressable_entity(public_key.to_account_hash(), entity_hash);\n\n    let address_generator = AddressGenerator::new(&deploy_hash, Phase::Session);\n    let access_rights = addressable_entity.extract_access_rights(entity_hash, &named_keys);\n    let (runtime_context, _tempdir) = new_runtime_context(\n        &addressable_entity,\n        account_hash,\n        entity_key,\n        &mut named_keys,\n        access_rights,\n        address_generator,\n    );\n\n    functor(runtime_context)\n}\n\n#[track_caller]\nfn last_transform_kind_on_addressable_entity(\n    runtime_context: &RuntimeContext<LmdbGlobalStateView>,\n) -> TransformKindV2 {\n    let key = runtime_context.context_key;\n    runtime_context\n        .effects()\n        .transforms()\n        .iter()\n        .rev()\n        .find_map(|transform| (transform.key() == &key).then(|| transform.kind().clone()))\n        .unwrap()\n}\n\n#[test]\nfn use_uref_valid() {\n    // Test fixture\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_as_key = create_uref_as_key(&mut rng, AccessRights::READ_WRITE);\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_as_key);\n    // Use uref as the key to perform an action on the global state.\n    // This should succeed because the uref is valid.\n    let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap());\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| {\n        rc.metered_write_gs(uref_as_key, value)\n    });\n    result.expect(\"writing using valid uref should succeed\");\n}\n\n#[test]\nfn use_uref_forged() {\n    // Test fixture\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref = create_uref_as_key(&mut rng, AccessRights::READ_WRITE);\n    let named_keys = NamedKeys::new();\n    // named_keys.insert(String::new(), Key::from(uref));\n    let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap());\n    let result =\n        build_runtime_context_and_execute(named_keys, |mut rc| rc.metered_write_gs(uref, value));\n\n    assert_forged_reference(result);\n}\n\n#[test]\nfn account_key_not_writeable() {\n    let mut rng = rand::thread_rng();\n    let acc_key = random_account_key(&mut rng);\n    let result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| {\n        rc.metered_write_gs(\n            acc_key,\n            StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n        )\n    });\n    assert_invalid_access(result, AccessRights::WRITE);\n}\n\n#[test]\nfn entity_key_readable_valid() {\n    // Entity key is readable if it is a \"base\" key - current context of the\n    // execution.\n    let result = build_runtime_context_and_execute(NamedKeys::new(), |rc| {\n        let context_key = rc.get_context_key();\n        let runtime_footprint = rc.runtime_footprint();\n\n        let entity_hash = runtime_footprint.borrow().hash_addr();\n        let key_hash = context_key.into_entity_hash_addr().expect(\"must get hash\");\n\n        assert_eq!(entity_hash, key_hash);\n        Ok(())\n    });\n\n    assert!(result.is_ok());\n}\n\n#[test]\nfn account_key_addable_returns_type_mismatch() {\n    // Account key is not addable anymore as we do not store an account underneath they key\n    // but instead there is a CLValue which acts as an indirection to the corresponding entity.\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_as_key = create_uref_as_key(&mut rng, AccessRights::READ);\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_as_key);\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| {\n        let account_key: Key = rc.account_hash.into();\n        let uref_name = \"NewURef\".to_owned();\n        let named_key = StoredValue::CLValue(CLValue::from_t((uref_name, uref_as_key)).unwrap());\n\n        rc.metered_add_gs(account_key, named_key)\n    });\n\n    assert!(result.is_err());\n}\n\n#[test]\nfn account_key_addable_invalid() {\n    // Account key is NOT addable if it is a \"base\" key - current context of the\n    // execution.\n    let mut rng = rand::thread_rng();\n    let other_acc_key = random_account_key(&mut rng);\n\n    let result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| {\n        rc.metered_add_gs(\n            other_acc_key,\n            StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n        )\n    });\n\n    assert_invalid_access(result, AccessRights::ADD);\n}\n\n#[test]\nfn contract_key_readable_valid() {\n    // Account key is readable if it is a \"base\" key - current context of the\n    // execution.\n    let mut rng = rand::thread_rng();\n    let contract_key = random_contract_key(&mut rng);\n    let result =\n        build_runtime_context_and_execute(NamedKeys::new(), |mut rc| rc.read_gs(&contract_key));\n\n    assert!(result.is_ok());\n}\n\n#[test]\nfn contract_key_not_writeable() {\n    // Account key is readable if it is a \"base\" key - current context of the\n    // execution.\n    let mut rng = rand::thread_rng();\n    let contract_key = random_contract_key(&mut rng);\n    let result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| {\n        rc.metered_write_gs(\n            contract_key,\n            StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n        )\n    });\n\n    assert_invalid_access(result, AccessRights::WRITE);\n}\n\n#[test]\nfn contract_key_addable_valid() {\n    // Contract key is addable if it is a \"base\" key - current context of the execution.\n    let account_hash = AccountHash::new([0u8; 32]);\n    let entity_hash = AddressableEntityHash::new([1u8; 32]);\n    let (_account_key, entity_key, entity) = new_addressable_entity(account_hash, entity_hash);\n    let authorization_keys = BTreeSet::from_iter(vec![account_hash]);\n    let mut address_generator = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n\n    let mut rng = rand::thread_rng();\n    let contract_key = random_contract_key(&mut rng);\n    let entity_as_stored_value = StoredValue::AddressableEntity(AddressableEntity::default());\n    let mut access_rights = entity_as_stored_value\n        .as_addressable_entity()\n        .unwrap()\n        .extract_access_rights(AddressableEntityHash::default(), &NamedKeys::new());\n\n    let (tracking_copy, _tempdir) = new_tracking_copy(account_hash, entity_key, entity);\n    let tracking_copy = Rc::new(RefCell::new(tracking_copy));\n    tracking_copy\n        .borrow_mut()\n        .write(contract_key, entity_as_stored_value.clone());\n\n    let default_system_registry = {\n        let mut registry = SystemHashRegistry::new();\n        registry.insert(MINT.to_string(), HashAddr::default());\n        registry.insert(HANDLE_PAYMENT.to_string(), HashAddr::default());\n        registry.insert(STANDARD_PAYMENT.to_string(), HashAddr::default());\n        registry.insert(AUCTION.to_string(), HashAddr::default());\n        StoredValue::CLValue(CLValue::from_t(registry).unwrap())\n    };\n\n    tracking_copy\n        .borrow_mut()\n        .write(Key::SystemEntityRegistry, default_system_registry);\n\n    let uref_as_key = create_uref_as_key(&mut address_generator, AccessRights::WRITE);\n    let uref_name = \"NewURef\".to_owned();\n    let named_uref_tuple =\n        StoredValue::CLValue(CLValue::from_t((uref_name.clone(), uref_as_key)).unwrap());\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(uref_name, uref_as_key);\n\n    access_rights.extend(&[uref_as_key.into_uref().expect(\"should be a URef\")]);\n\n    let addr = match contract_key {\n        Key::AddressableEntity(entity_addr) => entity_addr,\n        Key::Account(account_hash) => EntityAddr::Account(account_hash.value()),\n        Key::Hash(hash) => EntityAddr::SmartContract(hash),\n        _ => panic!(\"unexpected key\"),\n    };\n\n    let runtime_footprint = RuntimeFootprint::new_entity_footprint(\n        addr,\n        AddressableEntity::default(),\n        named_keys.clone(),\n        EntryPoints::new(),\n    );\n\n    let mut runtime_context = RuntimeContext::new(\n        &mut named_keys,\n        Rc::new(RefCell::new(runtime_footprint)),\n        contract_key,\n        authorization_keys,\n        access_rights,\n        account_hash,\n        Rc::new(RefCell::new(address_generator)),\n        Rc::clone(&tracking_copy),\n        EngineConfig::default(),\n        BlockInfo::new(\n            Digest::default(),\n            BlockTime::new(0),\n            BlockHash::default(),\n            0,\n            ProtocolVersion::V2_0_0,\n        ),\n        TransactionHash::V1(TransactionV1Hash::from_raw(TXN_HASH_RAW)),\n        PHASE,\n        RuntimeArgs::new(),\n        Gas::new(U512::from(GAS_LIMIT)),\n        Gas::default(),\n        Vec::default(),\n        U512::zero(),\n        EntryPointType::Caller,\n        AllowInstallUpgrade::Forbidden,\n    );\n\n    assert!(runtime_context\n        .metered_add_gs(contract_key, named_uref_tuple)\n        .is_err())\n}\n\n#[test]\nfn contract_key_addable_invalid() {\n    let account_hash = AccountHash::new([0u8; 32]);\n    let entity_hash = AddressableEntityHash::new([1u8; 32]);\n    let (_, entity_key, entity) = new_addressable_entity(account_hash, entity_hash);\n    let authorization_keys = BTreeSet::from_iter(vec![account_hash]);\n    let mut address_generator = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let mut rng = rand::thread_rng();\n    let contract_key = random_contract_key(&mut rng);\n\n    let other_contract_key = random_contract_key(&mut rng);\n    let contract = StoredValue::AddressableEntity(AddressableEntity::default());\n    let mut access_rights = contract\n        .as_addressable_entity()\n        .unwrap()\n        .extract_access_rights(AddressableEntityHash::default(), &NamedKeys::new());\n    let (tracking_copy, _tempdir) = new_tracking_copy(account_hash, entity_key, entity.clone());\n    let tracking_copy = Rc::new(RefCell::new(tracking_copy));\n\n    tracking_copy.borrow_mut().write(contract_key, contract);\n\n    let uref_as_key = create_uref_as_key(&mut address_generator, AccessRights::WRITE);\n    let uref_name = \"NewURef\".to_owned();\n    let named_uref_tuple = StoredValue::CLValue(CLValue::from_t((uref_name, uref_as_key)).unwrap());\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_as_key);\n\n    access_rights.extend(&[uref_as_key.into_uref().expect(\"should be a URef\")]);\n\n    let addr = match entity_key {\n        Key::AddressableEntity(entity_addr) => entity_addr,\n        Key::Account(account_hash) => EntityAddr::Account(account_hash.value()),\n        Key::Hash(hash) => EntityAddr::SmartContract(hash),\n        _ => panic!(\"unexpected key\"),\n    };\n\n    let runtime_footprint = RuntimeFootprint::new_entity_footprint(\n        addr,\n        AddressableEntity::default(),\n        named_keys.clone(),\n        EntryPoints::new(),\n    );\n\n    let mut runtime_context = RuntimeContext::new(\n        &mut named_keys,\n        Rc::new(RefCell::new(runtime_footprint)),\n        other_contract_key,\n        authorization_keys,\n        access_rights,\n        account_hash,\n        Rc::new(RefCell::new(address_generator)),\n        Rc::clone(&tracking_copy),\n        EngineConfig::default(),\n        BlockInfo::new(\n            Digest::default(),\n            BlockTime::new(0),\n            BlockHash::default(),\n            0,\n            ProtocolVersion::V2_0_0,\n        ),\n        TransactionHash::V1(TransactionV1Hash::from_raw(TXN_HASH_RAW)),\n        PHASE,\n        RuntimeArgs::new(),\n        Gas::new(U512::from(GAS_LIMIT)),\n        Gas::default(),\n        Vec::default(),\n        U512::zero(),\n        EntryPointType::Caller,\n        AllowInstallUpgrade::Forbidden,\n    );\n\n    let result = runtime_context.metered_add_gs(contract_key, named_uref_tuple);\n\n    assert_invalid_access(result, AccessRights::ADD);\n}\n\n#[test]\nfn uref_key_readable_valid() {\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_key = create_uref_as_key(&mut rng, AccessRights::READ);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_key);\n\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| rc.read_gs(&uref_key));\n    assert!(result.is_ok());\n}\n\n#[test]\nfn uref_key_readable_invalid() {\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_key = create_uref_as_key(&mut rng, AccessRights::WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_key);\n\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| rc.read_gs(&uref_key));\n    assert_invalid_access(result, AccessRights::READ);\n}\n\n#[test]\nfn uref_key_writeable_valid() {\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_key = create_uref_as_key(&mut rng, AccessRights::WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_key);\n\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| {\n        rc.metered_write_gs(\n            uref_key,\n            StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n        )\n    });\n    assert!(result.is_ok());\n}\n\n#[test]\nfn uref_key_writeable_invalid() {\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_key = create_uref_as_key(&mut rng, AccessRights::READ);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_key);\n\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| {\n        rc.metered_write_gs(\n            uref_key,\n            StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n        )\n    });\n    assert_invalid_access(result, AccessRights::WRITE);\n}\n\n#[test]\nfn uref_key_addable_valid() {\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_key = create_uref_as_key(&mut rng, AccessRights::ADD_WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_key);\n\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| {\n        rc.metered_write_gs(uref_key, CLValue::from_t(10_i32).unwrap())\n            .expect(\"Writing to the GlobalState should work.\");\n        rc.metered_add_gs(uref_key, CLValue::from_t(1_i32).unwrap())\n    });\n    assert!(result.is_ok());\n}\n\n#[test]\nfn uref_key_addable_invalid() {\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_key = create_uref_as_key(&mut rng, AccessRights::WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(String::new(), uref_key);\n\n    let result = build_runtime_context_and_execute(named_keys, |mut rc| {\n        rc.metered_add_gs(\n            uref_key,\n            StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n        )\n    });\n    assert_invalid_access(result, AccessRights::ADD);\n}\n\n#[test]\nfn hash_key_is_not_writeable() {\n    // values under hash's are immutable\n    let functor = |runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        let mut rng = rand::thread_rng();\n        let key = random_hash(&mut rng);\n        runtime_context.validate_writeable(&key)\n    };\n    let result = build_runtime_context_and_execute(NamedKeys::new(), functor);\n    assert!(result.is_err())\n}\n\n#[test]\nfn hash_key_is_not_addable() {\n    // values under hashes are immutable\n    let functor = |runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        let mut rng = rand::thread_rng();\n        let key = random_hash(&mut rng);\n        runtime_context.validate_addable(&key)\n    };\n    let result = build_runtime_context_and_execute(NamedKeys::new(), functor);\n    assert!(result.is_err())\n}\n\n#[test]\nfn manage_associated_keys() {\n    // Testing a valid case only - successfully added a key, and successfully removed,\n    // making sure `account_dirty` mutated\n    let named_keys = NamedKeys::new();\n    let functor = |mut runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        let account_hash = AccountHash::new([42; 32]);\n        let weight = Weight::new(155);\n\n        // Add a key (this doesn't check for all invariants as `add_key`\n        // is already tested in different place)\n        runtime_context\n            .add_associated_key(account_hash, weight)\n            .expect(\"Unable to add key\");\n\n        let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context);\n        let entity = match transform_kind {\n            TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity,\n            _ => panic!(\"Invalid transform operation found\"),\n        };\n        entity\n            .associated_keys()\n            .get(&account_hash)\n            .expect(\"Account hash wasn't added to associated keys\");\n\n        let new_weight = Weight::new(100);\n        runtime_context\n            .update_associated_key(account_hash, new_weight)\n            .expect(\"Unable to update key\");\n\n        let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context);\n        let entity = match transform_kind {\n            TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity,\n            _ => panic!(\"Invalid transform operation found\"),\n        };\n        let value = entity\n            .associated_keys()\n            .get(&account_hash)\n            .expect(\"Account hash wasn't added to associated keys\");\n\n        assert_eq!(value, &new_weight, \"value was not updated\");\n\n        // Remove a key that was already added\n        runtime_context\n            .remove_associated_key(account_hash)\n            .expect(\"Unable to remove key\");\n\n        // Verify\n        let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context);\n        let entity = match transform_kind {\n            TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity,\n            _ => panic!(\"Invalid transform operation found\"),\n        };\n\n        let actual = entity.associated_keys().get(&account_hash);\n\n        assert!(actual.is_none());\n\n        // Remove a key that was already removed\n        runtime_context\n            .remove_associated_key(account_hash)\n            .expect_err(\"A non existing key was unexpectedly removed again\");\n\n        Ok(())\n    };\n    let _ = build_runtime_context_and_execute(named_keys, functor);\n}\n\n#[test]\nfn action_thresholds_management() {\n    // Testing a valid case only - successfully added a key, and successfully removed,\n    // making sure `account_dirty` mutated\n    let named_keys = NamedKeys::new();\n    let functor = |mut runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        let entity_hash_by_account_hash =\n            CLValue::from_t(Key::Hash([2; 32])).expect(\"must convert to cl_value\");\n\n        runtime_context\n            .metered_write_gs_unsafe(\n                Key::Account(AccountHash::new([42; 32])),\n                entity_hash_by_account_hash,\n            )\n            .expect(\"must write key to gs\");\n\n        runtime_context\n            .add_associated_key(AccountHash::new([42; 32]), Weight::new(254))\n            .expect(\"Unable to add associated key with maximum weight\");\n        runtime_context\n            .set_action_threshold(ActionType::KeyManagement, Weight::new(253))\n            .expect(\"Unable to set action threshold KeyManagement\");\n        runtime_context\n            .set_action_threshold(ActionType::Deployment, Weight::new(252))\n            .expect(\"Unable to set action threshold Deployment\");\n\n        let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context);\n        let mutated_entity = match transform_kind {\n            TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity,\n            _ => panic!(\"Invalid transform operation found\"),\n        };\n\n        assert_eq!(\n            mutated_entity.action_thresholds().deployment(),\n            &Weight::new(252)\n        );\n        assert_eq!(\n            mutated_entity.action_thresholds().key_management(),\n            &Weight::new(253)\n        );\n\n        runtime_context\n            .set_action_threshold(ActionType::Deployment, Weight::new(255))\n            .expect_err(\"Shouldn't be able to set deployment threshold higher than key management\");\n\n        Ok(())\n    };\n    let _ = build_runtime_context_and_execute(named_keys, functor);\n}\n\n#[test]\nfn should_verify_ownership_before_adding_key() {\n    // Testing a valid case only - successfully added a key, and successfully removed,\n    // making sure `account_dirty` mutated\n    let named_keys = NamedKeys::new();\n    let functor = |mut runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        // Overwrites a `context_key` to a different one before doing any operation as\n        // account `[0; 32]`\n        let entity_hash_by_account_hash =\n            CLValue::from_t(Key::Hash([2; 32])).expect(\"must convert to cl_value\");\n\n        runtime_context\n            .metered_write_gs_unsafe(\n                Key::Account(AccountHash::new([84; 32])),\n                entity_hash_by_account_hash,\n            )\n            .expect(\"must write key to gs\");\n\n        runtime_context\n            .metered_write_gs_unsafe(Key::Hash([1; 32]), AddressableEntity::default())\n            .expect(\"must write key to gs\");\n\n        runtime_context.context_key = Key::Hash([1; 32]);\n\n        let err = runtime_context\n            .add_associated_key(AccountHash::new([84; 32]), Weight::new(123))\n            .expect_err(\"This operation should return error\");\n\n        match err {\n            ExecError::UnexpectedKeyVariant(_) => {\n                // This is the v2.0.0 error as this test is currently using Key::Hash\n                // instead of Key::AddressableEntity\n            }\n            ExecError::AddKeyFailure(AddKeyFailure::PermissionDenied) => {}\n            e => panic!(\"Invalid error variant: {:?}\", e),\n        }\n\n        Ok(())\n    };\n    let _ = build_runtime_context_and_execute(named_keys, functor);\n}\n\n#[test]\nfn should_verify_ownership_before_removing_a_key() {\n    // Testing a valid case only - successfully added a key, and successfully removed,\n    // making sure `account_dirty` mutated\n    let named_keys = NamedKeys::new();\n    let functor = |mut runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        // Overwrites a `context_key` to a different one before doing any operation as\n        // account `[0; 32]`\n        runtime_context.context_key = Key::Hash([1; 32]);\n\n        let err = runtime_context\n            .remove_associated_key(AccountHash::new([84; 32]))\n            .expect_err(\"This operation should return error\");\n\n        match err {\n            ExecError::UnexpectedKeyVariant(_) => {\n                // this is the v2.0 error because this test is currently using\n                // Key::Hash instead of Key::AddressableEntity\n            }\n            ExecError::RemoveKeyFailure(RemoveKeyFailure::PermissionDenied) => {}\n            ref e => panic!(\"Invalid error variant: {:?}\", e),\n        }\n\n        Ok(())\n    };\n    let _ = build_runtime_context_and_execute(named_keys, functor);\n}\n\n#[test]\nfn should_verify_ownership_before_setting_action_threshold() {\n    // Testing a valid case only - successfully added a key, and successfully removed,\n    // making sure `account_dirty` mutated\n    let named_keys = NamedKeys::new();\n    let functor = |mut runtime_context: RuntimeContext<LmdbGlobalStateView>| {\n        // Overwrites a `context_key` to a different one before doing any operation as\n        // account `[0; 32]`\n        runtime_context.context_key = Key::Hash([1; 32]);\n\n        let err = runtime_context\n            .set_action_threshold(ActionType::Deployment, Weight::new(123))\n            .expect_err(\"This operation should return error\");\n\n        match err {\n            ExecError::UnexpectedKeyVariant(_) => {\n                // this is what is returned under protocol version 2.0 because Key::Hash(_) is\n                // deprecated.\n            }\n            ExecError::SetThresholdFailure(SetThresholdFailure::PermissionDeniedError) => {}\n            ref e => panic!(\"Invalid error variant: {:?}\", e),\n        }\n\n        Ok(())\n    };\n    let _ = build_runtime_context_and_execute(named_keys, functor);\n}\n\n#[test]\nfn remove_uref_works() {\n    // Test that `remove_uref` removes Key from both ephemeral representation\n    // which is one of the current RuntimeContext, and also puts that change\n    // into the `TrackingCopy` so that it's later committed to the GlobalState.\n    let deploy_hash = [1u8; 32];\n    let mut address_generator = AddressGenerator::new(&deploy_hash, Phase::Session);\n    let uref_name = \"Foo\".to_owned();\n    let uref_key = create_uref_as_key(&mut address_generator, AccessRights::READ);\n    let account_hash = AccountHash::new([0u8; 32]);\n    let entity_hash = AddressableEntityHash::new([0u8; 32]);\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(uref_name.clone(), uref_key);\n    let (_, entity_key, addressable_entity) = new_addressable_entity(account_hash, entity_hash);\n\n    let access_rights = addressable_entity.extract_access_rights(entity_hash, &named_keys);\n\n    let (mut runtime_context, _tempdir) = new_runtime_context(\n        &addressable_entity,\n        account_hash,\n        entity_key,\n        &mut named_keys,\n        access_rights,\n        address_generator,\n    );\n\n    assert!(runtime_context.named_keys_contains_key(&uref_name));\n    assert!(runtime_context.remove_key(&uref_name).is_ok());\n    // It is valid to retain the access right for the given runtime context\n    // even if you remove the URef from the named keys.\n    assert!(runtime_context.validate_key(&uref_key).is_ok());\n    assert!(!runtime_context.named_keys_contains_key(&uref_name));\n\n    let entity_named_keys = runtime_context\n        .get_named_keys(entity_key)\n        .expect(\"must get named keys for entity\");\n    assert!(!entity_named_keys.contains(&uref_name));\n    // The next time the account is used, the access right is gone for the removed\n    // named key.\n\n    let next_session_access_rights = addressable_entity.extract_access_rights(\n        AddressableEntityHash::new(account_hash.value()),\n        &entity_named_keys,\n    );\n    let address_generator = AddressGenerator::new(&deploy_hash, Phase::Session);\n\n    let (runtime_context, _tempdir) = new_runtime_context(\n        &addressable_entity,\n        account_hash,\n        entity_key,\n        &mut named_keys,\n        next_session_access_rights,\n        address_generator,\n    );\n    assert!(runtime_context.validate_key(&uref_key).is_err());\n}\n\n#[test]\nfn an_accounts_access_rights_should_include_main_purse() {\n    let test_main_purse = URef::new([42u8; 32], AccessRights::READ_ADD_WRITE);\n    // All other access rights except for main purse are extracted from named keys.\n    let account_hash = AccountHash::new([0u8; 32]);\n    let entity_hash = AddressableEntityHash::new([1u8; 32]);\n    let named_keys = NamedKeys::new();\n    let (_context_key, _, entity) = new_addressable_entity_with_purse(\n        account_hash,\n        entity_hash,\n        EntityKind::Account(account_hash),\n        test_main_purse.addr(),\n    );\n    assert!(\n        named_keys.is_empty(),\n        \"Named keys does not contain main purse\"\n    );\n    let access_rights = entity.extract_access_rights(entity_hash, &named_keys);\n    assert!(\n        access_rights.has_access_rights_to_uref(&test_main_purse),\n        \"Main purse should be included in access rights\"\n    );\n}\n\n#[test]\nfn validate_valid_purse_of_an_account() {\n    // Tests that URef which matches a purse of a given context gets validated\n    let test_main_purse = URef::new([42u8; 32], AccessRights::READ_ADD_WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(\"entry\".to_string(), Key::from(test_main_purse));\n\n    let deploy_hash = [1u8; 32];\n    let account_hash = AccountHash::new([0u8; 32]);\n    let entity_hash = AddressableEntityHash::new([1u8; 32]);\n    let (context_key, _, entity) = new_addressable_entity_with_purse(\n        account_hash,\n        entity_hash,\n        EntityKind::Account(account_hash),\n        test_main_purse.addr(),\n    );\n\n    let mut access_rights = entity.extract_access_rights(entity_hash, &named_keys);\n    access_rights.extend(&[test_main_purse]);\n\n    let address_generator = AddressGenerator::new(&deploy_hash, Phase::Session);\n    let (runtime_context, _tempdir) = new_runtime_context(\n        &entity,\n        account_hash,\n        context_key,\n        &mut named_keys,\n        access_rights,\n        address_generator,\n    );\n\n    // URef that has the same id as purse of an account gets validated\n    // successfully.\n    assert!(runtime_context.validate_uref(&test_main_purse).is_ok());\n\n    let purse = test_main_purse.with_access_rights(AccessRights::READ);\n    assert!(runtime_context.validate_uref(&purse).is_ok());\n    let purse = test_main_purse.with_access_rights(AccessRights::ADD);\n    assert!(runtime_context.validate_uref(&purse).is_ok());\n    let purse = test_main_purse.with_access_rights(AccessRights::WRITE);\n    assert!(runtime_context.validate_uref(&purse).is_ok());\n\n    // Purse ID that doesn't match account's purse should fail as it's also not\n    // in known urefs.\n    let purse = URef::new([53; 32], AccessRights::READ_ADD_WRITE);\n    assert!(runtime_context.validate_uref(&purse).is_err());\n}\n\n#[test]\nfn should_meter_for_gas_storage_write() {\n    // Test fixture\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_as_key = create_uref_as_key(&mut rng, AccessRights::READ_WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(\"entry\".to_string(), uref_as_key);\n\n    let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap());\n    let expected_write_cost = test_engine_config()\n        .storage_costs()\n        .calculate_gas_cost(value.serialized_length());\n\n    let (gas_usage_before, gas_usage_after) =\n        build_runtime_context_and_execute(named_keys, |mut rc| {\n            let gas_before = rc.gas_counter();\n            rc.metered_write_gs(uref_as_key, value)\n                .expect(\"should write\");\n            let gas_after = rc.gas_counter();\n            Ok((gas_before, gas_after))\n        })\n        .expect(\"should run test\");\n\n    assert!(\n        gas_usage_after > gas_usage_before,\n        \"{} <= {}\",\n        gas_usage_after,\n        gas_usage_before\n    );\n\n    assert_eq!(\n        Some(gas_usage_after),\n        gas_usage_before.checked_add(expected_write_cost)\n    );\n}\n\n#[test]\nfn should_meter_for_gas_storage_add() {\n    // Test fixture\n    let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE);\n    let uref_as_key = create_uref_as_key(&mut rng, AccessRights::ADD_WRITE);\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(\"entry\".to_string(), uref_as_key);\n\n    let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap());\n    let expected_add_cost = test_engine_config()\n        .storage_costs()\n        .calculate_gas_cost(value.serialized_length());\n\n    let (gas_usage_before, gas_usage_after) =\n        build_runtime_context_and_execute(named_keys, |mut rc| {\n            rc.metered_write_gs(uref_as_key, value.clone())\n                .expect(\"should write\");\n            let gas_before = rc.gas_counter();\n            rc.metered_add_gs(uref_as_key, value).expect(\"should add\");\n            let gas_after = rc.gas_counter();\n            Ok((gas_before, gas_after))\n        })\n        .expect(\"should run test\");\n\n    assert!(\n        gas_usage_after > gas_usage_before,\n        \"{} <= {}\",\n        gas_usage_after,\n        gas_usage_before\n    );\n\n    assert_eq!(\n        Some(gas_usage_after),\n        gas_usage_before.checked_add(expected_add_cost)\n    );\n}\n\n#[test]\nfn associated_keys_add_full() {\n    let final_add_result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| {\n        let associated_keys_before = rc.runtime_footprint().borrow().associated_keys().len();\n\n        for count in 0..(rc.engine_config.max_associated_keys() as usize - associated_keys_before) {\n            let account_hash = {\n                let mut addr = [0; ACCOUNT_HASH_LENGTH];\n                U256::from(count).to_big_endian(&mut addr);\n                AccountHash::new(addr)\n            };\n            let weight = Weight::new(count.try_into().unwrap());\n            rc.add_associated_key(account_hash, weight)\n                .unwrap_or_else(|e| panic!(\"should add key {}: {:?}\", count, e));\n        }\n\n        rc.add_associated_key(AccountHash::new([42; 32]), Weight::new(42))\n    });\n\n    assert!(matches!(\n        final_add_result.expect_err(\"should error out\"),\n        ExecError::AddKeyFailure(AddKeyFailure::MaxKeysLimit)\n    ));\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.  The format is based on [Keep a Changelog].\n\n[comment]: <> (Added:      new features)\n[comment]: <> (Changed:    changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed:    now removed features)\n[comment]: <> (Fixed:      any bug fixes)\n[comment]: <> (Security:   in case of vulnerabilities)\n\n## 9.0.0\n\n### Added\n\n* Added support for `RewardsHandling` to the execution engine testing crate\n* Added support for `minimum_delegation_rate` to the execution engine testing crate\n\n## 7.0.1\n\n### Added\n* Provide `from_chainspec_path` and `max_associated_keys` helper methods on `ChainspecConfig`.\n* Provide functions for converting from `ChainspecConfig` to `EngineConfig`.\n* Provide `try_exec` method on `WasmTestBuilder` for fallible contract execution.\n* Provide `PRODUCTION_CHAINSPEC_PATH`: a lazy static defining the path to the production chainspec.toml file.\n\n\n\n## 7.0.0\n\n### Added\n* Provide `calculate_refund_amount` on `WasmTestBuilder`.\n* Provide `upgrade_with_upgrade_request_and_config` on `WasmTestBuilder`.\n\n\n\n## 6.0.0\n\n### Changed\n* Update `casper-execution-engine` dependency.\n* Handle evict items in the `WasmTestBuilder` when advancing eras or calling `step`.\n\n\n\n## 5.0.0\n\n### Added\n* Add `WasmTestBuilder::get_execution_journals` method for returning execution journals for all test runs.\n* Add support to load values from a given Chainspec.\n* Add static and constants that represent Casper-mainnet chainspec values. These values will change as new ProtocolVersions are added. The current values reflect ones used in the 1.5.0 ProtocolVersion.\n* Add `WasmTestBuilder::advance_era`, `WasmTestBuilder::advance_eras_by`, and `WasmTestBuilder::advance_eras_by_default_auction_delay` to advance chain and run auction contract in test environment.\n\n### Changed\n* `WasmTestBuilder::get_transforms` is deprecated in favor of `WasmTestBuilder::get_execution_journals`.\n* `deploy_hash` field is now defaulted to a random value rather than zeros in `DeployItemBuilder`.\n\n\n\n## 4.0.0\n\n### Changed\n* Update dependencies (in particular `casper-types` to v2.0.0 due to additional `Key` variant, requiring a major version bump here).\n\n\n\n## 3.1.1\n\n### Changed\n* Update chainspec values used in `PRODUCTION_RUN_GENESIS_REQUEST` to match those of Mainnet protocol version 1.4.15.\n\n\n\n## 3.1.0\n\n### Added\n* Add support for `commit_prune` of `casper-execution-engine`.\n\n\n\n## 3.0.0\n\n### Changed\n* Version bump only to match major version bump of `casper-execution-engine` dependency.\n\n\n\n## 2.3.0 [YANKED]\n\n### Added\n* Add `ChainspecConfig` to support parsing a chainspec.\n\n\n\n## 2.2.0\n\n### Added\n* Add some auction and transfer test support functions for reuse among benchmarks and unit tests.\n\n### Deprecated\n* Deprecated the `DEFAULT_RUN_GENESIS_REQUEST` in favor of `PRODUCTION_RUN_GENESIS_REQUEST`.\n\n\n\n## 2.1.0\n\n### Added\n* Add further helper methods to `WasmTestBuilder`.\n\n\n\n## 2.0.3 - 2021-12-06\n\n### Added\n* Added `WasmTestBuilder::get_balance_keys` function.\n\n\n\n## 2.0.2 - 2021-11-24\n\n### Changed\n* Revert the change to the path detection logic applied in v2.0.1.\n\n\n\n## [2.0.1] - 2021-11-4\n\n### Changed\n* Change the path detection logic for compiled Wasm as used by the casper-node monorepo.\n\n### Deprecated\n* Deprecate the `test-support` feature.  It had and continues to have no effect when enabled.\n\n\n\n## [2.0.0] - 2021-11-01\n\n### Added\n* Provide fine-grained support for testing all aspects of smart contract execution, including:\n    * `WasmTestBuilder` for building and running a test to exercise a smart contract\n    * `DeployItemBuilder` for building a `DeployItem` from a smart contract\n    * `ExecuteRequestBuilder` for building an `ExecuteRequest` to execute a given smart contract\n    * `AdditiveMapDiff` to allow easy comparison of two AdditiveMaps\n    * `StepRequestBuilder` for building a `StepRequest` (generally only used by the execution engine itself)\n    * `UpgradeRequestBuilder` for building an `UpgradeRequest` (generally only used by the execution engine itself)\n* Provide `LmdbWasmTestBuilder` can be used where global state needs to be persisted after execution of a smart contract\n* Provide several helper functions in `utils` module\n* Provide several default consts and statics useful across many test scenarios\n\n### Removed\n* Remove coarse-grained support and newtypes for testing smart contracts, including removal of:\n    * `Account`\n    * `AccountHash`\n    * `Error`\n    * `Session`\n    * `SessionBuilder`\n    * `SessionTransferInfo`\n    * `TestContext`\n    * `TestContextBuilder`\n    * `Value`\n* Remove `InMemoryWasmTestBuilder`.\n\n\n\n## [1.4.0] - 2021-10-04\n\n### Changed\n* Support building and testing using stable Rust.\n\n\n\n## [1.3.0] - 2021-07-19\n\n### Changed\n* Update pinned version of Rust to `nightly-2021-06-17`.\n\n\n\n## [1.2.0] - 2021-05-28\n\n### Changed\n* Change to Apache 2.0 license.\n\n\n\n## [1.1.1] - 2021-04-19\n\nNo changes.\n\n\n\n## [1.1.0] - 2021-04-13 [YANKED]\n\nNo changes.\n\n\n\n## [1.0.1] - 2021-04-08\n\nNo changes.\n\n\n\n## [1.0.0] - 2021-03-30\n\n### Added\n* Initial release of execution-engine test support framework compatible with Casper mainnet.\n\n\n\n[Keep a Changelog]: https://keepachangelog.com/en/1.0.0\n[unreleased]: https://github.com/casper-network/casper-node/compare/04f48a467...dev\n[2.0.1]: https://github.com/casper-network/casper-node/compare/13585abcf...04f48a467\n[2.0.0]: https://github.com/casper-network/casper-node/compare/v1.4.0...13585abcf\n[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0\n[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0\n[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0\n[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1\n[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0\n"
  },
  {
    "path": "execution_engine_testing/test_support/Cargo.toml",
    "content": "[package]\nname = \"casper-engine-test-support\"\nversion = \"9.0.0\" # when updating, also update 'html_root_url' in lib.rs\nauthors = [\"Fraser Hutchison <fraser@casperlabs.io>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\ndescription = \"Library to support testing of Wasm smart contracts for use on the Casper network.\"\ndocumentation = \"https://docs.rs/casper-engine-test-support\"\nreadme = \"README.md\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/master/execution_engine_testing/test_support\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nblake2 = \"0.9.0\"\ncasper-storage = { version = \"5.0.0\", path = \"../../storage\" }\ncasper-types = { version = \"7.0.0\", path = \"../../types\" }\nenv_logger = \"0.10.0\"\ncasper-execution-engine = { version = \"9.0.0\", path = \"../../execution_engine\", features = [\"test-support\"] }\nhumantime = \"2\"\nfilesize = \"0.2.0\"\nlmdb-rkv = \"0.14\"\nlog = \"0.4.14\"\nnum-rational = \"0.4.0\"\nnum-traits = { workspace = true }\nonce_cell = \"1.8.0\"\nrand = \"0.8.4\"\nserde = { version = \"1\", features = [\"derive\", \"rc\"] }\ntempfile = \"3.4.0\"\ntoml = \"0.5.6\"\n\n[dev-dependencies]\ncasper-types = { version = \"7.0.0\", path = \"../../types\", features = [\"std\"] }\nversion-sync = \"0.9.3\"\n\n[build-dependencies]\ntoml_edit = \"=0.21.0\"\nhumantime = \"2\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "execution_engine_testing/test_support/README.md",
    "content": "# `casper-engine-test-support`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-engine-test-support)](https://crates.io/crates/casper-engine-test-support)\n[![Documentation](https://docs.rs/casper-engine-test-support/badge.svg)](https://docs.rs/casper-engine-test-support)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nA library to support testing of Wasm smart contracts for use on the Casper network.\n\n## License\n\nLicensed under the [Apache License Version 2.0](../../LICENSE).\n"
  },
  {
    "path": "execution_engine_testing/test_support/build.rs",
    "content": "use humantime::format_rfc3339;\nuse std::{\n    env, fs,\n    path::Path,\n    time::{Duration, SystemTime},\n};\nuse toml_edit::{value, Document};\n\nfn main() {\n    let manifest_dir = env::var(\"CARGO_MANIFEST_DIR\").unwrap();\n    let input_chainspec = Path::new(&manifest_dir)\n        .join(\"resources\")\n        .join(\"chainspec.toml.in\");\n    let output_chainspec = Path::new(&manifest_dir)\n        .join(\"resources\")\n        .join(\"chainspec.toml\");\n\n    println!(\"cargo:rerun-if-changed={}\", input_chainspec.display());\n\n    let toml = fs::read_to_string(input_chainspec).expect(\"could not read chainspec.toml.in\");\n    let mut doc = toml\n        .parse::<Document>()\n        .expect(\"invalid document in chainspec.toml.in\");\n    let activation_point = SystemTime::now() + Duration::from_secs(40);\n    doc[\"protocol\"][\"activation_point\"] = value(format_rfc3339(activation_point).to_string());\n\n    fs::write(output_chainspec, doc.to_string()).expect(\"could not write chainspec.toml\");\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/chainspec_config.rs",
    "content": "use std::{\n    convert::TryFrom,\n    fs, io,\n    path::{Path, PathBuf},\n};\n\nuse log::error;\nuse once_cell::sync::Lazy;\nuse serde::Deserialize;\n\nuse casper_execution_engine::engine_state::{EngineConfig, EngineConfigBuilder};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    system::auction::VESTING_SCHEDULE_LENGTH_MILLIS, CoreConfig, FeeHandling, GenesisAccount,\n    GenesisConfig, MintCosts, PricingHandling, ProtocolVersion, RefundHandling, StorageCosts,\n    SystemConfig, TimeDiff, WasmConfig,\n};\n\nuse crate::{\n    GenesisConfigBuilder, DEFAULT_ACCOUNTS, DEFAULT_CHAINSPEC_REGISTRY,\n    DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_MAX_QUERY_DEPTH,\n};\n\n/// The name of the chainspec file on disk.\npub const CHAINSPEC_NAME: &str = \"chainspec.toml\";\n\n/// Symlink to chainspec.\npub static CHAINSPEC_SYMLINK: Lazy<PathBuf> = Lazy::new(|| {\n    PathBuf::from(env!(\"CARGO_MANIFEST_DIR\"))\n        .join(\"resources/\")\n        .join(CHAINSPEC_NAME)\n});\n\n#[derive(Debug)]\n#[allow(clippy::enum_variant_names)]\npub enum Error {\n    FailedToLoadChainspec {\n        /// Path that failed to be read.\n        path: PathBuf,\n        /// The underlying OS error.\n        error: io::Error,\n    },\n    FailedToParseChainspec(toml::de::Error),\n    Validation,\n}\n\n/// This struct can be parsed from a TOML-encoded chainspec file.  It means that as the\n/// chainspec format changes over versions, as long as we maintain the core config in this form\n/// in the chainspec file, it can continue to be parsed as an `ChainspecConfig`.\n#[derive(Deserialize, Clone, Default, Debug)]\npub struct ChainspecConfig {\n    /// CoreConfig\n    #[serde(rename = \"core\")]\n    pub core_config: CoreConfig,\n    /// WasmConfig.\n    #[serde(rename = \"wasm\")]\n    pub wasm_config: WasmConfig,\n    /// SystemConfig\n    #[serde(rename = \"system_costs\")]\n    pub system_costs_config: SystemConfig,\n    /// Storage costs.\n    pub storage_costs: StorageCosts,\n}\n\nimpl ChainspecConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<Self, Error> {\n        let chainspec_config: ChainspecConfig =\n            toml::from_slice(bytes).map_err(Error::FailedToParseChainspec)?;\n\n        if !chainspec_config.is_valid() {\n            return Err(Error::Validation);\n        }\n\n        Ok(chainspec_config)\n    }\n\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Error> {\n        let path = path.as_ref();\n        let bytes = fs::read(path).map_err(|error| Error::FailedToLoadChainspec {\n            path: path.to_path_buf(),\n            error,\n        })?;\n        ChainspecConfig::from_bytes(&bytes)\n    }\n\n    /// Load from path.\n    pub fn from_chainspec_path<P: AsRef<Path>>(filename: P) -> Result<Self, Error> {\n        Self::from_path(filename)\n    }\n\n    fn is_valid(&self) -> bool {\n        if self.core_config.vesting_schedule_period\n            > TimeDiff::from_millis(VESTING_SCHEDULE_LENGTH_MILLIS)\n        {\n            error!(\n                \"vesting schedule period too long (actual {}; maximum {})\",\n                self.core_config.vesting_schedule_period.millis(),\n                VESTING_SCHEDULE_LENGTH_MILLIS,\n            );\n            return false;\n        }\n\n        true\n    }\n\n    pub(crate) fn create_genesis_request_from_chainspec<P: AsRef<Path>>(\n        filename: P,\n        genesis_accounts: Vec<GenesisAccount>,\n        protocol_version: ProtocolVersion,\n    ) -> Result<GenesisRequest, Error> {\n        ChainspecConfig::from_path(filename)?\n            .create_genesis_request(genesis_accounts, protocol_version)\n    }\n\n    /// Create genesis request from self.\n    pub fn create_genesis_request(\n        &self,\n        genesis_accounts: Vec<GenesisAccount>,\n        protocol_version: ProtocolVersion,\n    ) -> Result<GenesisRequest, Error> {\n        // if you get a compilation error here, make sure to update the builder below accordingly\n        let ChainspecConfig {\n            core_config,\n            wasm_config,\n            system_costs_config,\n            storage_costs,\n        } = self;\n        let CoreConfig {\n            validator_slots,\n            auction_delay,\n            locked_funds_period,\n            unbonding_delay,\n            round_seigniorage_rate,\n            enable_addressable_entity,\n            minimum_delegation_rate,\n            ..\n        } = core_config;\n\n        let genesis_config = GenesisConfigBuilder::new()\n            .with_accounts(genesis_accounts)\n            .with_wasm_config(*wasm_config)\n            .with_system_config(*system_costs_config)\n            .with_validator_slots(*validator_slots)\n            .with_auction_delay(*auction_delay)\n            .with_locked_funds_period_millis(locked_funds_period.millis())\n            .with_round_seigniorage_rate(*round_seigniorage_rate)\n            .with_unbonding_delay(*unbonding_delay)\n            .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS)\n            .with_storage_costs(*storage_costs)\n            .with_enable_addressable_entity(*enable_addressable_entity)\n            .with_minimum_delegation_rate(*minimum_delegation_rate)\n            .build();\n\n        Ok(GenesisRequest::new(\n            DEFAULT_GENESIS_CONFIG_HASH,\n            protocol_version,\n            genesis_config,\n            DEFAULT_CHAINSPEC_REGISTRY.clone(),\n        ))\n    }\n\n    /// Create a `RunGenesisRequest` using values from the local `chainspec.toml`.\n    pub fn create_genesis_request_from_local_chainspec(\n        genesis_accounts: Vec<GenesisAccount>,\n        protocol_version: ProtocolVersion,\n    ) -> Result<GenesisRequest, Error> {\n        Self::create_genesis_request_from_chainspec(\n            &*CHAINSPEC_SYMLINK,\n            genesis_accounts,\n            protocol_version,\n        )\n    }\n\n    /// Sets the minimum delegation rate config option.\n    pub fn with_minimum_delegation_rate(mut self, minimum_delegation_rate: u8) -> Self {\n        self.core_config.minimum_delegation_rate = minimum_delegation_rate;\n        self\n    }\n\n    /// Sets the vesting schedule period millis config option.\n    pub fn with_max_associated_keys(&mut self, value: u32) -> &mut Self {\n        self.core_config.max_associated_keys = value;\n        self\n    }\n\n    /// Sets the vesting schedule period millis config option.\n    pub fn with_vesting_schedule_period_millis(mut self, value: u64) -> Self {\n        self.core_config.vesting_schedule_period = TimeDiff::from_millis(value);\n        self\n    }\n\n    /// Sets the max delegators per validator config option.\n    pub fn with_max_delegators_per_validator(mut self, value: u32) -> Self {\n        self.core_config.max_delegators_per_validator = value;\n        self\n    }\n\n    /// Sets the minimum delegation amount config option.\n    pub fn with_minimum_delegation_amount(mut self, minimum_delegation_amount: u64) -> Self {\n        self.core_config.minimum_delegation_amount = minimum_delegation_amount;\n        self\n    }\n\n    /// Sets fee handling config option.\n    pub fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self {\n        self.core_config.fee_handling = fee_handling;\n        self\n    }\n\n    /// Sets wasm config option.\n    pub fn with_wasm_config(mut self, wasm_config: WasmConfig) -> Self {\n        self.wasm_config = wasm_config;\n        self\n    }\n\n    /// Sets mint costs.\n    pub fn with_mint_costs(self, mint_costs: MintCosts) -> Self {\n        self.system_costs_config.with_mint_costs(mint_costs);\n        self\n    }\n\n    /// Sets wasm max stack height.\n    pub fn with_wasm_max_stack_height(mut self, max_stack_height: u32) -> Self {\n        *self.wasm_config.v1_mut().max_stack_height_mut() = max_stack_height;\n        self\n    }\n\n    /// Sets refund handling config option.\n    pub fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self {\n        self.core_config.refund_handling = refund_handling;\n        self\n    }\n\n    /// Sets pricing handling config option.\n    pub fn with_pricing_handling(mut self, pricing_handling: PricingHandling) -> Self {\n        self.core_config.pricing_handling = pricing_handling;\n        self\n    }\n\n    /// Sets strict argument checking.\n    pub fn with_strict_argument_checking(mut self, strict_argument_checking: bool) -> Self {\n        self.core_config.strict_argument_checking = strict_argument_checking;\n        self\n    }\n\n    /// Sets the enable addressable entity flag.\n    pub fn with_enable_addressable_entity(mut self, enable_addressable_entity: bool) -> Self {\n        self.core_config.enable_addressable_entity = enable_addressable_entity;\n        self\n    }\n\n    /// Returns the `max_associated_keys` setting from the core config.\n    pub fn max_associated_keys(&self) -> u32 {\n        self.core_config.max_associated_keys\n    }\n\n    /// Returns an engine config.\n    pub fn engine_config(&self) -> EngineConfig {\n        EngineConfigBuilder::new()\n            .with_max_query_depth(DEFAULT_MAX_QUERY_DEPTH)\n            .with_max_associated_keys(self.core_config.max_associated_keys)\n            .with_max_runtime_call_stack_height(self.core_config.max_runtime_call_stack_height)\n            .with_minimum_delegation_amount(self.core_config.minimum_delegation_amount)\n            .with_strict_argument_checking(self.core_config.strict_argument_checking)\n            .with_vesting_schedule_period_millis(self.core_config.vesting_schedule_period.millis())\n            .with_max_delegators_per_validator(self.core_config.max_delegators_per_validator)\n            .with_wasm_config(self.wasm_config)\n            .with_system_config(self.system_costs_config)\n            .with_administrative_accounts(self.core_config.administrators.clone())\n            .with_allow_auction_bids(self.core_config.allow_auction_bids)\n            .with_allow_unrestricted_transfers(self.core_config.allow_unrestricted_transfers)\n            .with_refund_handling(self.core_config.refund_handling)\n            .with_fee_handling(self.core_config.fee_handling)\n            .with_enable_entity(self.core_config.enable_addressable_entity)\n            .with_storage_costs(self.storage_costs)\n            .build()\n    }\n}\n\nimpl From<ChainspecConfig> for EngineConfig {\n    fn from(chainspec_config: ChainspecConfig) -> Self {\n        EngineConfigBuilder::new()\n            .with_max_query_depth(DEFAULT_MAX_QUERY_DEPTH)\n            .with_max_associated_keys(chainspec_config.core_config.max_associated_keys)\n            .with_max_runtime_call_stack_height(\n                chainspec_config.core_config.max_runtime_call_stack_height,\n            )\n            .with_minimum_delegation_amount(chainspec_config.core_config.minimum_delegation_amount)\n            .with_strict_argument_checking(chainspec_config.core_config.strict_argument_checking)\n            .with_vesting_schedule_period_millis(\n                chainspec_config\n                    .core_config\n                    .vesting_schedule_period\n                    .millis(),\n            )\n            .with_max_delegators_per_validator(\n                chainspec_config.core_config.max_delegators_per_validator,\n            )\n            .with_wasm_config(chainspec_config.wasm_config)\n            .with_system_config(chainspec_config.system_costs_config)\n            .with_enable_entity(chainspec_config.core_config.enable_addressable_entity)\n            .build()\n    }\n}\n\nimpl TryFrom<ChainspecConfig> for GenesisConfig {\n    type Error = Error;\n\n    fn try_from(chainspec_config: ChainspecConfig) -> Result<Self, Self::Error> {\n        Ok(GenesisConfigBuilder::new()\n            .with_accounts(DEFAULT_ACCOUNTS.clone())\n            .with_wasm_config(chainspec_config.wasm_config)\n            .with_system_config(chainspec_config.system_costs_config)\n            .with_validator_slots(chainspec_config.core_config.validator_slots)\n            .with_auction_delay(chainspec_config.core_config.auction_delay)\n            .with_locked_funds_period_millis(\n                chainspec_config.core_config.locked_funds_period.millis(),\n            )\n            .with_round_seigniorage_rate(chainspec_config.core_config.round_seigniorage_rate)\n            .with_unbonding_delay(chainspec_config.core_config.unbonding_delay)\n            .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS)\n            .with_storage_costs(chainspec_config.storage_costs)\n            .with_enable_addressable_entity(chainspec_config.core_config.enable_addressable_entity)\n            .with_minimum_delegation_rate(chainspec_config.core_config.minimum_delegation_rate)\n            .build())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{convert::TryFrom, path::PathBuf};\n\n    use casper_types::GenesisConfig;\n    use once_cell::sync::Lazy;\n\n    use super::{ChainspecConfig, CHAINSPEC_NAME};\n\n    pub static LOCAL_PATH: Lazy<PathBuf> =\n        Lazy::new(|| PathBuf::from(env!(\"CARGO_MANIFEST_DIR\")).join(\"../../resources/local/\"));\n\n    #[test]\n    fn should_load_chainspec_config_from_chainspec() {\n        let path = &LOCAL_PATH.join(CHAINSPEC_NAME);\n        let chainspec_config = ChainspecConfig::from_chainspec_path(path).unwrap();\n        // Check that the loaded values matches values present in the local chainspec.\n        assert_eq!(chainspec_config.core_config.auction_delay, 1);\n    }\n\n    #[test]\n    fn should_get_exec_config_from_chainspec_values() {\n        let path = &LOCAL_PATH.join(CHAINSPEC_NAME);\n        let chainspec_config = ChainspecConfig::from_chainspec_path(path).unwrap();\n        let config = GenesisConfig::try_from(chainspec_config).unwrap();\n        assert_eq!(config.auction_delay(), 1)\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/deploy_item.rs",
    "content": "//! Units of account-triggered execution.\n\nuse std::collections::BTreeSet;\n\nuse casper_execution_engine::engine_state::{BlockInfo, InvalidRequest, WasmV1Request};\nuse casper_types::{\n    account::AccountHash, Deploy, DeployHash, ExecutableDeployItem, Gas, InitiatorAddr,\n    TransactionHash,\n};\n\n/// Definition of a deploy with all the details that make it possible to execute it.\n/// Corresponds to the similarly-named IPC protobuf message.\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct DeployItem {\n    /// Address that created and signed this deploy. This address will be used as a context for\n    /// executing session code.\n    pub address: AccountHash,\n    /// Session code.\n    pub session: ExecutableDeployItem,\n    /// Payment code.\n    pub payment: ExecutableDeployItem,\n    /// Gas price specified for this deploy by the user.\n    pub gas_price: u8,\n    /// List of accounts that signed this deploy.\n    pub authorization_keys: BTreeSet<AccountHash>,\n    /// A unique identifier of the deploy.\n    /// Currently it is the hash of the deploy header (see `DeployHeader` in the `types` crate).\n    pub deploy_hash: DeployHash,\n}\n\nimpl DeployItem {\n    /// Creates a [`DeployItem`].\n    pub fn new(\n        address: AccountHash,\n        session: ExecutableDeployItem,\n        payment: ExecutableDeployItem,\n        gas_price: u8,\n        authorization_keys: BTreeSet<AccountHash>,\n        deploy_hash: DeployHash,\n    ) -> Self {\n        DeployItem {\n            address,\n            session,\n            payment,\n            gas_price,\n            authorization_keys,\n            deploy_hash,\n        }\n    }\n\n    /// Is this a native transfer?\n    pub fn is_native_transfer(&self) -> bool {\n        matches!(self.session, ExecutableDeployItem::Transfer { .. })\n    }\n\n    /// Creates a new request from a deploy item for use as the session code.\n    pub fn new_session_from_deploy_item(\n        &self,\n        block_info: BlockInfo,\n        gas_limit: Gas,\n    ) -> Result<WasmV1Request, InvalidRequest> {\n        let address = &self.address;\n        let session = &self.session;\n        let authorization_keys = &self.authorization_keys;\n        let deploy_hash = &self.deploy_hash;\n\n        let transaction_hash = TransactionHash::Deploy(*deploy_hash);\n        let initiator_addr = InitiatorAddr::AccountHash(*address);\n        let authorization_keys = authorization_keys.clone();\n        WasmV1Request::new_from_executable_deploy_item(\n            block_info,\n            gas_limit,\n            transaction_hash,\n            initiator_addr,\n            authorization_keys,\n            session,\n        )\n    }\n\n    /// Creates a new request from a deploy item for use as custom payment.\n    pub fn new_custom_payment_from_deploy_item(\n        &self,\n        block_info: BlockInfo,\n        gas_limit: Gas,\n    ) -> Result<WasmV1Request, InvalidRequest> {\n        let address = &self.address;\n        let payment = &self.payment;\n        let authorization_keys = &self.authorization_keys;\n        let deploy_hash = &self.deploy_hash;\n\n        let transaction_hash = TransactionHash::Deploy(*deploy_hash);\n        let initiator_addr = InitiatorAddr::AccountHash(*address);\n        let authorization_keys = authorization_keys.clone();\n\n        WasmV1Request::new_payment_from_executable_deploy_item(\n            block_info,\n            gas_limit,\n            transaction_hash,\n            initiator_addr,\n            authorization_keys,\n            payment,\n        )\n    }\n}\n\nimpl From<Deploy> for DeployItem {\n    fn from(deploy: Deploy) -> Self {\n        let address = deploy.header().account().to_account_hash();\n        let authorization_keys = deploy\n            .approvals()\n            .iter()\n            .map(|approval| approval.signer().to_account_hash())\n            .collect();\n\n        DeployItem::new(\n            address,\n            deploy.session().clone(),\n            deploy.payment().clone(),\n            deploy.header().gas_price() as u8,\n            authorization_keys,\n            DeployHash::new(*deploy.hash().inner()),\n        )\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/deploy_item_builder.rs",
    "content": "use std::{collections::BTreeSet, path::Path};\n\nuse rand::Rng;\n\nuse casper_types::{\n    account::AccountHash, bytesrepr::Bytes, contracts::ContractPackageHash, AddressableEntityHash,\n    DeployHash, EntityVersion, ExecutableDeployItem, HashAddr, PackageHash, RuntimeArgs,\n};\n\nuse crate::{deploy_item::DeployItem, utils, DEFAULT_GAS_PRICE};\n\n#[derive(Default)]\nstruct DeployItemData {\n    pub address: Option<AccountHash>,\n    pub payment_code: Option<ExecutableDeployItem>,\n    pub session_code: Option<ExecutableDeployItem>,\n    pub gas_price: u8,\n    pub authorization_keys: BTreeSet<AccountHash>,\n    pub deploy_hash: Option<DeployHash>,\n}\n\n/// Builds a [`DeployItem`].\npub struct DeployItemBuilder {\n    deploy_item: DeployItemData,\n}\n\nimpl DeployItemBuilder {\n    /// Returns a new [`DeployItemBuilder`] struct.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    /// Sets the address of the deploy.\n    pub fn with_address(mut self, address: AccountHash) -> Self {\n        self.deploy_item.address = Some(address);\n        self\n    }\n\n    /// Sets the payment bytes for the deploy.\n    pub fn with_payment_bytes<T: Into<Bytes>>(\n        mut self,\n        module_bytes: T,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::ModuleBytes {\n            module_bytes: module_bytes.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the payment bytes of the deploy to an empty Vec.\n    pub fn with_standard_payment(self, args: RuntimeArgs) -> Self {\n        self.with_payment_bytes(vec![], args)\n    }\n\n    /// Sets the payment bytes of a deploy by reading a file and passing [`RuntimeArgs`].\n    pub fn with_payment_code<T: AsRef<Path>>(self, file_name: T, args: RuntimeArgs) -> Self {\n        let module_bytes = utils::read_wasm_file(file_name);\n        self.with_payment_bytes(module_bytes, args)\n    }\n\n    /// Sets payment code of the deploy with contract hash.\n    pub fn with_stored_payment_hash(\n        mut self,\n        hash: AddressableEntityHash,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredContractByHash {\n            hash: hash.into(),\n            entry_point: entry_point.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the payment code of the deploy with a named key.\n    pub fn with_stored_payment_named_key(\n        mut self,\n        uref_name: &str,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredContractByName {\n            name: uref_name.to_owned(),\n            entry_point: entry_point_name.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the payment code of the deploy with a contract package hash.\n    pub fn with_stored_versioned_payment_hash(\n        mut self,\n        package_hash: PackageHash,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByHash {\n            hash: ContractPackageHash::new(package_hash.value()),\n            version: None,\n            entry_point: entry_point.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the payment code of the deploy with versioned contract stored under a named key.\n    pub fn with_stored_versioned_payment_named_key(\n        mut self,\n        uref_name: &str,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByName {\n            name: uref_name.to_owned(),\n            version: None,\n            entry_point: entry_point_name.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the session bytes for the deploy.\n    pub fn with_session_bytes<T: Into<Bytes>>(\n        mut self,\n        module_bytes: T,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.session_code = Some(ExecutableDeployItem::ModuleBytes {\n            module_bytes: module_bytes.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the session code for the deploy using a wasm file.\n    pub fn with_session_code<T: AsRef<Path>>(self, file_name: T, args: RuntimeArgs) -> Self {\n        let module_bytes = utils::read_wasm_file(file_name);\n        self.with_session_bytes(module_bytes, args)\n    }\n\n    /// Sets the session code of the deploy as a native transfer.\n    pub fn with_transfer_args(mut self, args: RuntimeArgs) -> Self {\n        self.deploy_item.session_code = Some(ExecutableDeployItem::Transfer { args });\n        self\n    }\n\n    /// Sets the session code for the deploy with a stored contract hash, entrypoint and runtime\n    /// arguments.\n    pub fn with_stored_session_hash(\n        mut self,\n        hash: AddressableEntityHash,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.session_code = Some(ExecutableDeployItem::StoredContractByHash {\n            hash: hash.into(),\n            entry_point: entry_point.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the session code of the deploy by using a contract stored under a named key.\n    pub fn with_stored_session_named_key(\n        mut self,\n        name: &str,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.session_code = Some(ExecutableDeployItem::StoredContractByName {\n            name: name.to_owned(),\n            entry_point: entry_point.into(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the session code of the deploy with a versioned contract stored under a named key.\n    pub fn with_stored_versioned_contract_by_name(\n        mut self,\n        name: &str,\n        version: Option<EntityVersion>,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.session_code = Some(ExecutableDeployItem::StoredVersionedContractByName {\n            name: name.to_owned(),\n            version,\n            entry_point: entry_point.to_owned(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the session code of the deploy with a stored, versioned contract by contract hash.\n    pub fn with_stored_versioned_contract_by_hash(\n        mut self,\n        hash: HashAddr,\n        version: Option<EntityVersion>,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.session_code = Some(ExecutableDeployItem::StoredVersionedContractByHash {\n            hash: hash.into(),\n            version,\n            entry_point: entry_point.to_owned(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the payment code of the deploy with a versioned contract stored under a named key.\n    pub fn with_stored_versioned_payment_contract_by_name(\n        mut self,\n        key_name: &str,\n        version: Option<EntityVersion>,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByName {\n            name: key_name.to_owned(),\n            version,\n            entry_point: entry_point.to_owned(),\n            args,\n        });\n        self\n    }\n\n    /// Sets the payment code of the deploy using a stored versioned contract by contract hash.\n    pub fn with_stored_versioned_payment_contract_by_hash(\n        mut self,\n        hash: HashAddr,\n        version: Option<EntityVersion>,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByHash {\n            hash: hash.into(),\n            version,\n            entry_point: entry_point.to_owned(),\n            args,\n        });\n        self\n    }\n\n    /// Sets authorization keys for the deploy.\n    pub fn with_authorization_keys(mut self, authorization_keys: &[AccountHash]) -> Self {\n        self.deploy_item.authorization_keys = authorization_keys.iter().copied().collect();\n        self\n    }\n\n    /// Sets the gas price for the deploy.\n    pub fn with_gas_price(mut self, gas_price: u8) -> Self {\n        self.deploy_item.gas_price = gas_price;\n        self\n    }\n\n    /// Sets the hash of the deploy.\n    pub fn with_deploy_hash(mut self, hash: [u8; 32]) -> Self {\n        self.deploy_item.deploy_hash = Some(DeployHash::from_raw(hash));\n        self\n    }\n\n    /// Consumes self and returns a [`DeployItem`].\n    pub fn build(self) -> DeployItem {\n        DeployItem {\n            address: self\n                .deploy_item\n                .address\n                .unwrap_or_else(|| AccountHash::new([0u8; 32])),\n            session: self\n                .deploy_item\n                .session_code\n                .expect(\"should have session code\"),\n            payment: self\n                .deploy_item\n                .payment_code\n                .expect(\"should have payment code\"),\n            gas_price: self.deploy_item.gas_price,\n            authorization_keys: self.deploy_item.authorization_keys,\n            deploy_hash: self\n                .deploy_item\n                .deploy_hash\n                .unwrap_or_else(|| DeployHash::from_raw(rand::thread_rng().gen())),\n        }\n    }\n}\n\nimpl Default for DeployItemBuilder {\n    fn default() -> Self {\n        let deploy_item = DeployItemData {\n            gas_price: DEFAULT_GAS_PRICE,\n            ..Default::default()\n        };\n        DeployItemBuilder { deploy_item }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn should_not_default_deploy_hash_to_zeros_if_not_specified() {\n        let address = AccountHash::new([42; 32]);\n        let deploy = DeployItemBuilder::new()\n            .with_address(address)\n            .with_authorization_keys(&[address])\n            .with_session_bytes(Vec::new(), RuntimeArgs::new())\n            .with_payment_bytes(Vec::new(), RuntimeArgs::new())\n            .build();\n        assert_ne!(deploy.deploy_hash, DeployHash::default());\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/execute_request_builder.rs",
    "content": "use std::collections::BTreeSet;\n\nuse casper_execution_engine::engine_state::{\n    BlockInfo, ExecutableItem, SessionDataV1, SessionInputData, WasmV1Request,\n};\nuse casper_types::{\n    account::AccountHash, addressable_entity::DEFAULT_ENTRY_POINT_NAME,\n    contracts::ProtocolVersionMajor, runtime_args, AddressableEntityHash, BlockHash, BlockTime,\n    Digest, EntityVersion, Gas, InitiatorAddr, PackageHash, Phase, PricingMode, ProtocolVersion,\n    RuntimeArgs, TransactionEntryPoint, TransactionHash, TransactionInvocationTarget,\n    TransactionRuntimeParams, TransactionTarget, TransactionV1Hash,\n};\n\nuse crate::{\n    deploy_item::DeployItem, DeployItemBuilder, ARG_AMOUNT, DEFAULT_BLOCK_TIME, DEFAULT_PAYMENT,\n    DEFAULT_PROTOCOL_VERSION,\n};\n\n/// A request comprising a [`WasmV1Request`] for use as session code, and an optional custom\n/// payment `WasmV1Request`.\n#[derive(Debug)]\npub struct ExecuteRequest {\n    /// The session request.\n    pub session: WasmV1Request,\n    /// The optional custom payment request.\n    pub custom_payment: Option<WasmV1Request>,\n}\n\nimpl ExecuteRequest {\n    /// Is install upgrade allowed?\n    pub fn is_install_upgrade_allowed(&self) -> bool {\n        self.session.executable_item.is_install_upgrade_allowed()\n    }\n}\n\n/// Builds an [`ExecuteRequest`].\n#[derive(Debug)]\npub struct ExecuteRequestBuilder {\n    state_hash: Digest,\n    block_time: BlockTime,\n    block_height: u64,\n    parent_block_hash: BlockHash,\n    protocol_version: ProtocolVersion,\n    transaction_hash: TransactionHash,\n    initiator_addr: InitiatorAddr,\n    payment: Option<ExecutableItem>,\n    payment_gas_limit: Gas,\n    payment_entry_point: String,\n    payment_args: RuntimeArgs,\n    session: ExecutableItem,\n    session_gas_limit: Gas,\n    session_entry_point: String,\n    session_args: RuntimeArgs,\n    authorization_keys: BTreeSet<AccountHash>,\n}\n\nconst DEFAULT_GAS_LIMIT: u64 = 5_000_u64 * 10u64.pow(9);\n\nimpl ExecuteRequestBuilder {\n    /// The default value used for `WasmV1Request::state_hash`.\n    pub const DEFAULT_STATE_HASH: Digest = Digest::from_raw([1; 32]);\n    /// The default value used for `WasmV1Request::transaction_hash`.\n    pub const DEFAULT_TRANSACTION_HASH: TransactionHash =\n        TransactionHash::V1(TransactionV1Hash::from_raw([2; 32]));\n    /// The default value used for `WasmV1Request::entry_point`.\n    pub const DEFAULT_ENTRY_POINT: &'static str = \"call\";\n    /// The default protocol version stored in the BlockInfo\n    pub const DEFAULT_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0;\n\n    /// Converts a `SessionInputData` into an `ExecuteRequestBuilder`.\n    pub fn from_session_input_data_for_protocol_version(\n        session_input_data: &SessionInputData,\n        protocol_version: ProtocolVersion,\n    ) -> Self {\n        let block_info = BlockInfo::new(\n            Self::DEFAULT_STATE_HASH,\n            BlockTime::new(DEFAULT_BLOCK_TIME),\n            BlockHash::default(),\n            0,\n            protocol_version,\n        );\n        let authorization_keys = session_input_data.signers();\n        let session =\n            WasmV1Request::new_session(block_info, Gas::new(DEFAULT_GAS_LIMIT), session_input_data)\n                .unwrap();\n\n        let payment: Option<ExecutableItem>;\n        let payment_gas_limit: Gas;\n        let payment_entry_point: String;\n        let payment_args: RuntimeArgs;\n        if session_input_data.is_standard_payment() {\n            payment = None;\n            payment_gas_limit = Gas::zero();\n            payment_entry_point = DEFAULT_ENTRY_POINT_NAME.to_string();\n            payment_args = RuntimeArgs::new();\n        } else {\n            let block_info = BlockInfo::new(\n                Self::DEFAULT_STATE_HASH,\n                BlockTime::new(DEFAULT_BLOCK_TIME),\n                BlockHash::default(),\n                0,\n                protocol_version,\n            );\n            let request = WasmV1Request::new_custom_payment(\n                block_info,\n                Gas::new(DEFAULT_GAS_LIMIT),\n                session_input_data,\n            )\n            .unwrap();\n            payment = Some(request.executable_item);\n            payment_gas_limit = request.gas_limit;\n            payment_entry_point = request.entry_point;\n            payment_args = request.args;\n        }\n\n        ExecuteRequestBuilder {\n            state_hash: session.block_info.state_hash,\n            block_time: session.block_info.block_time,\n            block_height: session.block_info.block_height,\n            parent_block_hash: session.block_info.parent_block_hash,\n            protocol_version: session.block_info.protocol_version,\n            transaction_hash: session.transaction_hash,\n            initiator_addr: session.initiator_addr,\n            payment,\n            payment_gas_limit,\n            payment_entry_point,\n            payment_args,\n            session: session.executable_item,\n            session_gas_limit: session.gas_limit,\n            session_entry_point: session.entry_point,\n            session_args: session.args,\n            authorization_keys,\n        }\n    }\n\n    /// Converts a `SessionInputData` into an `ExecuteRequestBuilder`.\n    pub fn from_session_input_data(session_input_data: &SessionInputData) -> Self {\n        Self::from_session_input_data_for_protocol_version(\n            session_input_data,\n            DEFAULT_PROTOCOL_VERSION,\n        )\n    }\n\n    /// Converts a `DeployItem` into an `ExecuteRequestBuilder`.\n    pub fn from_deploy_item(deploy_item: &DeployItem) -> Self {\n        Self::from_deploy_item_for_protocol_version(deploy_item, DEFAULT_PROTOCOL_VERSION)\n    }\n\n    /// Converts a `DeployItem` into an `ExecuteRequestBuilder`.\n    pub fn from_deploy_item_for_protocol_version(\n        deploy_item: &DeployItem,\n        protocol_version: ProtocolVersion,\n    ) -> Self {\n        let authorization_keys = deploy_item.authorization_keys.clone();\n        let block_info = BlockInfo::new(\n            Self::DEFAULT_STATE_HASH,\n            BlockTime::new(DEFAULT_BLOCK_TIME),\n            BlockHash::default(),\n            0,\n            protocol_version,\n        );\n        let session = deploy_item\n            .new_session_from_deploy_item(block_info, Gas::new(DEFAULT_GAS_LIMIT))\n            .unwrap();\n\n        let payment: Option<ExecutableItem>;\n        let payment_gas_limit: Gas;\n        let payment_entry_point: String;\n        let payment_args: RuntimeArgs;\n        if deploy_item.payment.is_standard_payment(Phase::Payment) {\n            payment = None;\n            payment_gas_limit = Gas::zero();\n            payment_entry_point = DEFAULT_ENTRY_POINT_NAME.to_string();\n            payment_args = RuntimeArgs::new();\n        } else {\n            let block_info = BlockInfo::new(\n                Self::DEFAULT_STATE_HASH,\n                BlockTime::new(DEFAULT_BLOCK_TIME),\n                BlockHash::default(),\n                0,\n                DEFAULT_PROTOCOL_VERSION,\n            );\n            let request = deploy_item\n                .new_custom_payment_from_deploy_item(block_info, Gas::new(DEFAULT_GAS_LIMIT))\n                .unwrap();\n            payment = Some(request.executable_item);\n            payment_gas_limit = request.gas_limit;\n            payment_entry_point = request.entry_point;\n            payment_args = request.args;\n        }\n\n        ExecuteRequestBuilder {\n            state_hash: session.block_info.state_hash,\n            block_time: session.block_info.block_time,\n            block_height: session.block_info.block_height,\n            parent_block_hash: session.block_info.parent_block_hash,\n            protocol_version: session.block_info.protocol_version,\n            transaction_hash: session.transaction_hash,\n            initiator_addr: session.initiator_addr,\n            payment,\n            payment_gas_limit,\n            payment_entry_point,\n            payment_args,\n            session: session.executable_item,\n            session_gas_limit: session.gas_limit,\n            session_entry_point: session.entry_point,\n            session_args: session.args,\n            authorization_keys,\n        }\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with standard dependencies.\n    pub fn standard(\n        account_hash: AccountHash,\n        session_file: &str,\n        session_args: RuntimeArgs,\n    ) -> Self {\n        Self::standard_with_protocol_version(\n            account_hash,\n            session_file,\n            session_args,\n            DEFAULT_PROTOCOL_VERSION,\n        )\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with standard dependencies.\n    pub fn standard_with_protocol_version(\n        account_hash: AccountHash,\n        session_file: &str,\n        session_args: RuntimeArgs,\n        protocol_version: ProtocolVersion,\n    ) -> Self {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(account_hash)\n            .with_session_code(session_file, session_args)\n            .with_standard_payment(runtime_args! {\n                ARG_AMOUNT => *DEFAULT_PAYMENT\n            })\n            .with_authorization_keys(&[account_hash])\n            .build();\n        Self::from_deploy_item_for_protocol_version(&deploy_item, protocol_version)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with session module bytes.\n    pub fn module_bytes(\n        account_hash: AccountHash,\n        module_bytes: Vec<u8>,\n        session_args: RuntimeArgs,\n    ) -> Self {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(account_hash)\n            .with_session_bytes(module_bytes, session_args)\n            .with_standard_payment(runtime_args! {\n                ARG_AMOUNT => *DEFAULT_PAYMENT\n            })\n            .with_authorization_keys(&[account_hash])\n            .build();\n        Self::from_deploy_item(&deploy_item)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a\n    /// stored contract by hash.\n    pub fn contract_call_by_hash(\n        sender: AccountHash,\n        contract_hash: AddressableEntityHash,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(sender)\n            .with_stored_session_hash(contract_hash, entry_point, args)\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[sender])\n            .build();\n        Self::from_deploy_item(&deploy_item)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a\n    /// stored contract by name.\n    pub fn contract_call_by_name(\n        sender: AccountHash,\n        contract_name: &str,\n        entry_point: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(sender)\n            .with_stored_session_named_key(contract_name, entry_point, args)\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[sender])\n            .build();\n        Self::from_deploy_item(&deploy_item)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a\n    /// versioned stored contract by hash.\n    pub fn contract_call_by_hash_versioned_with_major(\n        sender: AccountHash,\n        contract_package_hash: PackageHash,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        let initiator_addr = InitiatorAddr::AccountHash(sender);\n        let target = TransactionTarget::Stored {\n            id: TransactionInvocationTarget::ByPackageHash {\n                addr: contract_package_hash.value(),\n                version,\n                protocol_version_major,\n            },\n            runtime: TransactionRuntimeParams::VmCasperV1,\n        };\n        let entry_point = TransactionEntryPoint::Custom(entry_point_name.to_owned());\n        let hash = TransactionV1Hash::from_raw([1; 32]);\n        let pricing_mode = PricingMode::PaymentLimited {\n            payment_amount: DEFAULT_PAYMENT.as_u64(),\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        };\n        let mut signers = BTreeSet::new();\n        signers.insert(sender);\n        let session_input_data = SessionInputData::SessionDataV1 {\n            data: SessionDataV1::new(\n                &args,\n                &target,\n                &entry_point,\n                false,\n                &hash,\n                &pricing_mode,\n                &initiator_addr,\n                signers,\n                pricing_mode.is_standard_payment(),\n            ),\n        };\n        Self::from_session_input_data(&session_input_data)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a\n    /// versioned stored contract by hash.\n    pub fn versioned_contract_call_by_hash(\n        sender: AccountHash,\n        contract_package_hash: PackageHash,\n        version: Option<EntityVersion>,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(sender)\n            .with_stored_versioned_contract_by_hash(\n                contract_package_hash.value(),\n                version,\n                entry_point_name,\n                args,\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[sender])\n            .build();\n        Self::from_deploy_item(&deploy_item)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a\n    /// versioned stored contract by name.\n    pub fn contract_call_by_name_versioned_with_major(\n        sender: AccountHash,\n        contract_name: &str,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        let initiator_addr = InitiatorAddr::AccountHash(sender);\n        let target = TransactionTarget::Stored {\n            id: TransactionInvocationTarget::ByPackageName {\n                name: contract_name.to_owned(),\n                version,\n                protocol_version_major,\n            },\n            runtime: TransactionRuntimeParams::VmCasperV1,\n        };\n        let entry_point = TransactionEntryPoint::Custom(entry_point_name.to_owned());\n        let hash = TransactionV1Hash::from_raw([1; 32]);\n        let pricing_mode = PricingMode::PaymentLimited {\n            payment_amount: DEFAULT_PAYMENT.as_u64(),\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        };\n        let mut signers = BTreeSet::new();\n        signers.insert(sender);\n        let session_input_data = SessionInputData::SessionDataV1 {\n            data: SessionDataV1::new(\n                &args,\n                &target,\n                &entry_point,\n                false,\n                &hash,\n                &pricing_mode,\n                &initiator_addr,\n                signers,\n                pricing_mode.is_standard_payment(),\n            ),\n        };\n        Self::from_session_input_data(&session_input_data)\n    }\n\n    /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a\n    /// versioned stored contract by name.\n    pub fn versioned_contract_call_by_name(\n        sender: AccountHash,\n        contract_name: &str,\n        version: Option<EntityVersion>,\n        entry_point_name: &str,\n        args: RuntimeArgs,\n    ) -> Self {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(sender)\n            .with_stored_versioned_contract_by_name(contract_name, version, entry_point_name, args)\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[sender])\n            .build();\n        Self::from_deploy_item(&deploy_item)\n    }\n\n    /// Sets the block time of the [`WasmV1Request`]s.\n    pub fn with_block_time<T: Into<BlockTime>>(mut self, block_time: T) -> Self {\n        self.block_time = block_time.into();\n        self\n    }\n\n    /// Sets the block height of the [`WasmV1Request`]s.\n    pub fn with_block_height(mut self, block_height: u64) -> Self {\n        self.block_height = block_height;\n        self\n    }\n\n    /// Sets the parent block hash of the [`WasmV1Request`]s.\n    pub fn with_parent_block_hash(mut self, parent_block_hash: BlockHash) -> Self {\n        self.parent_block_hash = parent_block_hash;\n        self\n    }\n\n    /// Sets the parent block hash of the [`WasmV1Request`]s.\n    pub fn with_state_hash(mut self, state_hash: Digest) -> Self {\n        self.state_hash = state_hash;\n        self\n    }\n\n    /// Sets the authorization keys used by the [`WasmV1Request`]s.\n    pub fn with_authorization_keys(mut self, authorization_keys: BTreeSet<AccountHash>) -> Self {\n        self.authorization_keys = authorization_keys;\n        self\n    }\n\n    /// Sets the protocol version for the execution request\n    pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {\n        self.protocol_version = protocol_version;\n        self\n    }\n\n    /// Consumes self and returns an `ExecuteRequest`.\n    pub fn build(self) -> ExecuteRequest {\n        let ExecuteRequestBuilder {\n            state_hash,\n            block_time,\n            block_height,\n            parent_block_hash,\n            protocol_version,\n            transaction_hash,\n            initiator_addr,\n            payment,\n            payment_gas_limit,\n            payment_entry_point,\n            payment_args,\n            session,\n            session_gas_limit,\n            session_entry_point,\n            session_args,\n            authorization_keys,\n        } = self;\n\n        let block_info = BlockInfo::new(\n            state_hash,\n            block_time,\n            parent_block_hash,\n            block_height,\n            protocol_version,\n        );\n        let maybe_custom_payment = payment.map(|executable_item| WasmV1Request {\n            block_info,\n            transaction_hash,\n            gas_limit: payment_gas_limit,\n            initiator_addr: initiator_addr.clone(),\n            executable_item,\n            entry_point: payment_entry_point,\n            args: payment_args,\n            authorization_keys: authorization_keys.clone(),\n            phase: Phase::Payment,\n        });\n\n        let session = WasmV1Request {\n            block_info,\n            transaction_hash,\n            gas_limit: session_gas_limit,\n            initiator_addr,\n            executable_item: session,\n            entry_point: session_entry_point,\n            args: session_args,\n            authorization_keys,\n            phase: Phase::Session,\n        };\n\n        ExecuteRequest {\n            session,\n            custom_payment: maybe_custom_payment,\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/genesis_config_builder.rs",
    "content": "//! A builder for an [`GenesisConfig`].\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY;\nuse casper_types::{\n    system::auction::DelegationRate, GenesisAccount, GenesisConfig, HoldBalanceHandling,\n    StorageCosts, SystemConfig, WasmConfig,\n};\nuse num_rational::Ratio;\n\nuse crate::{\n    DEFAULT_AUCTION_DELAY, DEFAULT_GAS_HOLD_BALANCE_HANDLING, DEFAULT_GAS_HOLD_INTERVAL_MILLIS,\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n    DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS,\n};\n\n/// A builder for an [`GenesisConfig`].\n///\n/// Any field that isn't specified will be defaulted.  See [the module docs](index.html) for the set\n/// of default values.\n#[derive(Default, Debug)]\npub struct GenesisConfigBuilder {\n    accounts: Option<Vec<GenesisAccount>>,\n    wasm_config: Option<WasmConfig>,\n    system_config: Option<SystemConfig>,\n    validator_slots: Option<u32>,\n    auction_delay: Option<u64>,\n    locked_funds_period_millis: Option<u64>,\n    round_seigniorage_rate: Option<Ratio<u64>>,\n    unbonding_delay: Option<u64>,\n    genesis_timestamp_millis: Option<u64>,\n    gas_hold_balance_handling: Option<HoldBalanceHandling>,\n    gas_hold_interval_millis: Option<u64>,\n    enable_addressable_entity: Option<bool>,\n    rewards_ratio: Option<Ratio<u64>>,\n    storage_costs: Option<StorageCosts>,\n    minimum_delegation_rate: DelegationRate,\n}\n\nimpl GenesisConfigBuilder {\n    /// Creates a new `ExecConfig` builder.\n    pub fn new() -> Self {\n        GenesisConfigBuilder::default()\n    }\n\n    /// Sets the genesis accounts.\n    pub fn with_accounts(mut self, accounts: Vec<GenesisAccount>) -> Self {\n        self.accounts = Some(accounts);\n        self\n    }\n\n    /// Sets the Wasm config options.\n    pub fn with_wasm_config(mut self, wasm_config: WasmConfig) -> Self {\n        self.wasm_config = Some(wasm_config);\n        self\n    }\n\n    /// Sets the system config options.\n    pub fn with_system_config(mut self, system_config: SystemConfig) -> Self {\n        self.system_config = Some(system_config);\n        self\n    }\n\n    /// Sets the validator slots config option.\n    pub fn with_validator_slots(mut self, validator_slots: u32) -> Self {\n        self.validator_slots = Some(validator_slots);\n        self\n    }\n\n    /// Sets the auction delay config option.\n    pub fn with_auction_delay(mut self, auction_delay: u64) -> Self {\n        self.auction_delay = Some(auction_delay);\n        self\n    }\n\n    /// Sets the locked funds period config option.\n    pub fn with_locked_funds_period_millis(mut self, locked_funds_period_millis: u64) -> Self {\n        self.locked_funds_period_millis = Some(locked_funds_period_millis);\n        self\n    }\n\n    /// Sets the round seigniorage rate config option.\n    pub fn with_round_seigniorage_rate(mut self, round_seigniorage_rate: Ratio<u64>) -> Self {\n        self.round_seigniorage_rate = Some(round_seigniorage_rate);\n        self\n    }\n\n    /// Sets the unbonding delay config option.\n    pub fn with_unbonding_delay(mut self, unbonding_delay: u64) -> Self {\n        self.unbonding_delay = Some(unbonding_delay);\n        self\n    }\n\n    /// Sets the genesis timestamp config option.\n    pub fn with_genesis_timestamp_millis(mut self, genesis_timestamp_millis: u64) -> Self {\n        self.genesis_timestamp_millis = Some(genesis_timestamp_millis);\n        self\n    }\n\n    /// Sets the enable addressable entity flag.\n    pub fn with_enable_addressable_entity(mut self, enable_addressable_entity: bool) -> Self {\n        self.enable_addressable_entity = Some(enable_addressable_entity);\n        self\n    }\n\n    /// Sets the rewards ratio.\n    pub fn with_rewards_ratio(mut self, rewards_ratio: Ratio<u64>) -> Self {\n        self.rewards_ratio = Some(rewards_ratio);\n        self\n    }\n\n    /// Sets the storage_costs handling.\n    pub fn with_storage_costs(mut self, storage_costs: StorageCosts) -> Self {\n        self.storage_costs = Some(storage_costs);\n        self\n    }\n\n    /// Sets the minimum delegation rate config option.\n    pub fn with_minimum_delegation_rate(mut self, minimum_delegation_rate: DelegationRate) -> Self {\n        self.minimum_delegation_rate = minimum_delegation_rate;\n        self\n    }\n\n    /// Builds a new [`GenesisConfig`] object.\n    pub fn build(self) -> GenesisConfig {\n        GenesisConfig::new(\n            self.accounts.unwrap_or_default(),\n            self.wasm_config.unwrap_or_default(),\n            self.system_config.unwrap_or_default(),\n            self.validator_slots.unwrap_or(DEFAULT_VALIDATOR_SLOTS),\n            self.auction_delay.unwrap_or(DEFAULT_AUCTION_DELAY),\n            self.locked_funds_period_millis\n                .unwrap_or(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS),\n            self.round_seigniorage_rate\n                .unwrap_or(DEFAULT_ROUND_SEIGNIORAGE_RATE),\n            self.unbonding_delay.unwrap_or(DEFAULT_UNBONDING_DELAY),\n            self.genesis_timestamp_millis\n                .unwrap_or(DEFAULT_GENESIS_TIMESTAMP_MILLIS),\n            self.gas_hold_balance_handling\n                .unwrap_or(DEFAULT_GAS_HOLD_BALANCE_HANDLING),\n            self.gas_hold_interval_millis\n                .unwrap_or(DEFAULT_GAS_HOLD_INTERVAL_MILLIS),\n            self.enable_addressable_entity\n                .unwrap_or(DEFAULT_ENABLE_ENTITY),\n            self.rewards_ratio,\n            self.storage_costs.unwrap_or_default(),\n            self.minimum_delegation_rate,\n        )\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/lib.rs",
    "content": "//! A library to support testing of Wasm smart contracts for use on the Casper Platform.\n\n#![doc(html_root_url = \"https://docs.rs/casper-engine-test-support/9.0.0\")]\n#![doc(\n    html_favicon_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png\",\n    html_logo_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png\",\n    test(attr(deny(warnings)))\n)]\n#![warn(missing_docs)]\n\nmod chainspec_config;\npub mod deploy_item;\nmod deploy_item_builder;\nmod execute_request_builder;\npub mod genesis_config_builder;\nmod step_request_builder;\nmod transfer_request_builder;\nmod upgrade_request_builder;\npub mod utils;\nmod wasm_test_builder;\n\npub(crate) use genesis_config_builder::GenesisConfigBuilder;\nuse num_rational::Ratio;\nuse once_cell::sync::Lazy;\n\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    account::AccountHash, testing::TestRng, ChainspecRegistry, Digest, GenesisAccount,\n    GenesisConfig, HoldBalanceHandling, Motes, ProtocolVersion, PublicKey, SecretKey, StorageCosts,\n    SystemConfig, WasmConfig, WasmV1Config, U512,\n};\n\npub use chainspec_config::{ChainspecConfig, CHAINSPEC_SYMLINK};\npub use deploy_item_builder::DeployItemBuilder;\npub use execute_request_builder::{ExecuteRequest, ExecuteRequestBuilder};\npub use step_request_builder::StepRequestBuilder;\npub use transfer_request_builder::TransferRequestBuilder;\npub use upgrade_request_builder::UpgradeRequestBuilder;\npub use wasm_test_builder::{EntityWithNamedKeys, LmdbWasmTestBuilder, WasmTestBuilder};\n\n/// Default number of validator slots.\npub const DEFAULT_VALIDATOR_SLOTS: u32 = 5;\n/// Default auction delay.\npub const DEFAULT_AUCTION_DELAY: u64 = 1;\n/// Default lock-in period is currently zero.\npub const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 0;\n/// Default length of total vesting schedule is currently zero.\npub const DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 0;\n\n/// Default number of eras that need to pass to be able to withdraw unbonded funds.\npub const DEFAULT_UNBONDING_DELAY: u64 = 7;\n\n/// Round seigniorage rate represented as a fraction of the total supply.\n///\n/// Annual issuance: 8%\n/// Minimum round length: 2^14 ms\n/// Ticks per year: 31536000000\n///\n/// (1+0.08)^((2^14)/31536000000)-1 is expressed as a fractional number below.\npub const DEFAULT_ROUND_SEIGNIORAGE_RATE: Ratio<u64> = Ratio::new_raw(1, 4200000000000000000);\n\n/// Default chain name.\npub const DEFAULT_CHAIN_NAME: &str = \"casper-execution-engine-testing\";\n/// Default genesis timestamp in milliseconds.\npub const DEFAULT_GENESIS_TIMESTAMP_MILLIS: u64 = 0;\n/// Default block time.\npub const DEFAULT_BLOCK_TIME: u64 = 0;\n/// Default gas price.\npub const DEFAULT_GAS_PRICE: u8 = 1;\n/// Amount named argument.\npub const ARG_AMOUNT: &str = \"amount\";\n/// Timestamp increment in milliseconds.\npub const TIMESTAMP_MILLIS_INCREMENT: u64 = 30_000; // 30 seconds\n/// Default gas hold balance handling.\npub const DEFAULT_GAS_HOLD_BALANCE_HANDLING: HoldBalanceHandling = HoldBalanceHandling::Accrued;\n/// Default gas hold interval in milliseconds.\npub const DEFAULT_GAS_HOLD_INTERVAL_MILLIS: u64 = 24 * 60 * 60 * 60;\n\n/// Default value for maximum associated keys configuration option.\npub const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100;\n\n/// Default value for a maximum query depth configuration option.\npub const DEFAULT_MAX_QUERY_DEPTH: u64 = 5;\n/// Default value for maximum runtime call stack height configuration option.\npub const DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 = 12;\n/// Default value for minimum delegation amount in motes.\npub const DEFAULT_MINIMUM_DELEGATION_AMOUNT: u64 = 500 * 1_000_000_000;\n/// Default value for maximum delegation amount in motes.\npub const DEFAULT_MAXIMUM_DELEGATION_AMOUNT: u64 = 1_000_000_000 * 1_000_000_000;\n\n/// Default genesis config hash.\npub const DEFAULT_GENESIS_CONFIG_HASH: Digest = Digest::from_raw([42; 32]);\n\n/// Default account secret key.\npub static DEFAULT_ACCOUNT_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap());\n/// Default account public key.\npub static DEFAULT_ACCOUNT_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*DEFAULT_ACCOUNT_SECRET_KEY));\n/// Default test account address.\npub static DEFAULT_ACCOUNT_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*DEFAULT_ACCOUNT_PUBLIC_KEY));\n// NOTE: declaring DEFAULT_ACCOUNT_KEY as *DEFAULT_ACCOUNT_ADDR causes tests to stall.\n/// Default account key.\npub static DEFAULT_ACCOUNT_KEY: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*DEFAULT_ACCOUNT_PUBLIC_KEY));\n/// Default initial balance of a test account in motes.\npub const DEFAULT_ACCOUNT_INITIAL_BALANCE: u64 = 10_000_000_000_000_000_000_u64;\n/// Minimal amount for a transfer that creates new accounts.\npub const MINIMUM_ACCOUNT_CREATION_BALANCE: u64 = 7_500_000_000_000_000_u64;\n/// Default proposer public key.\npub static DEFAULT_PROPOSER_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([198; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n/// Default proposer address.\npub static DEFAULT_PROPOSER_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*DEFAULT_PROPOSER_PUBLIC_KEY));\n\n/// Default public key to associate with the sustain purse.\npub static DEFAULT_SUSTAIN_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\n/// Default accounts.\npub static DEFAULT_ACCOUNTS: Lazy<Vec<GenesisAccount>> = Lazy::new(|| {\n    let mut ret = Vec::new();\n    let genesis_account = GenesisAccount::account(\n        DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    );\n    ret.push(genesis_account);\n    let proposer_account = GenesisAccount::account(\n        DEFAULT_PROPOSER_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    );\n    ret.push(proposer_account);\n    let rng = &mut TestRng::new();\n    for _ in 0..10 {\n        let filler_account = GenesisAccount::account(\n            PublicKey::random(rng),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            None,\n        );\n        ret.push(filler_account);\n    }\n    ret\n});\n/// Default [`ProtocolVersion`].\npub const DEFAULT_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0;\n/// Default payment.\npub static DEFAULT_PAYMENT: Lazy<U512> = Lazy::new(|| U512::from(10_000_000_000_000u64));\n/// Default [`WasmConfig`].\npub static DEFAULT_WASM_CONFIG: Lazy<WasmConfig> = Lazy::new(WasmConfig::default);\n/// Default [`WasmV1Config`].\npub static DEFAULT_WASM_V1_CONFIG: Lazy<WasmV1Config> = Lazy::new(WasmV1Config::default);\n/// Default [`SystemConfig`].\npub static DEFAULT_SYSTEM_CONFIG: Lazy<SystemConfig> = Lazy::new(SystemConfig::default);\n/// Default [`StorageCosts`].\npub static DEFAULT_STORAGE_COSTS: Lazy<StorageCosts> = Lazy::new(StorageCosts::default);\n\n/// Default [`GenesisConfig`].\npub static DEFAULT_EXEC_CONFIG: Lazy<GenesisConfig> = Lazy::new(|| {\n    GenesisConfigBuilder::default()\n        .with_accounts(DEFAULT_ACCOUNTS.clone())\n        .with_wasm_config(*DEFAULT_WASM_CONFIG)\n        .with_system_config(*DEFAULT_SYSTEM_CONFIG)\n        .with_validator_slots(DEFAULT_VALIDATOR_SLOTS)\n        .with_auction_delay(DEFAULT_AUCTION_DELAY)\n        .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS)\n        .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE)\n        .with_unbonding_delay(DEFAULT_UNBONDING_DELAY)\n        .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS)\n        .with_storage_costs(*DEFAULT_STORAGE_COSTS)\n        .build()\n});\n\n/// Default [`ChainspecRegistry`].\npub static DEFAULT_CHAINSPEC_REGISTRY: Lazy<ChainspecRegistry> =\n    Lazy::new(|| ChainspecRegistry::new_with_genesis(&[1, 2, 3], &[4, 5, 6]));\n\n/// A [`GenesisRequest`] using cost tables matching those used in Casper Mainnet.\npub static LOCAL_GENESIS_REQUEST: Lazy<GenesisRequest> = Lazy::new(|| {\n    ChainspecConfig::create_genesis_request_from_local_chainspec(\n        DEFAULT_ACCOUNTS.clone(),\n        DEFAULT_PROTOCOL_VERSION,\n    )\n    .expect(\"must create the request\")\n});\n/// Round seigniorage rate from the production chainspec.\npub static PRODUCTION_ROUND_SEIGNIORAGE_RATE: Lazy<Ratio<u64>> = Lazy::new(|| {\n    let chainspec = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK)\n        .expect(\"must create chainspec_config\");\n    chainspec.core_config.round_seigniorage_rate\n});\n/// System address.\npub static SYSTEM_ADDR: Lazy<AccountHash> = Lazy::new(|| PublicKey::System.to_account_hash());\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::MessageLimits;\n\n    #[test]\n    fn defaults_should_match_production_chainspec_values() {\n        let production = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK).unwrap();\n        // No need to test `CoreConfig::validator_slots`.\n        assert_eq!(production.core_config.auction_delay, DEFAULT_AUCTION_DELAY);\n        assert_eq!(\n            production.core_config.locked_funds_period.millis(),\n            DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS\n        );\n        assert_eq!(\n            production.core_config.unbonding_delay,\n            DEFAULT_UNBONDING_DELAY\n        );\n        assert_eq!(\n            production.core_config.round_seigniorage_rate.reduced(),\n            DEFAULT_ROUND_SEIGNIORAGE_RATE.reduced()\n        );\n        assert_eq!(\n            production.core_config.max_associated_keys,\n            DEFAULT_MAX_ASSOCIATED_KEYS\n        );\n        assert_eq!(\n            production.core_config.max_runtime_call_stack_height,\n            DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT\n        );\n        assert_eq!(\n            production.core_config.minimum_delegation_amount,\n            DEFAULT_MINIMUM_DELEGATION_AMOUNT\n        );\n        assert_eq!(\n            production.core_config.maximum_delegation_amount,\n            DEFAULT_MAXIMUM_DELEGATION_AMOUNT\n        );\n\n        assert_eq!(\n            production.wasm_config.messages_limits(),\n            MessageLimits::default()\n        );\n\n        assert_eq!(production.wasm_config.v1(), &WasmV1Config::default());\n\n        assert_eq!(production.system_costs_config, SystemConfig::default());\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/step_request_builder.rs",
    "content": "use casper_storage::{\n    data_access_layer::{EvictItem, RewardItem, SlashItem, StepRequest},\n    system::runtime_native::{Config, TransferConfig},\n};\nuse casper_types::{Digest, EraId, ProtocolVersion};\n\n/// Builder for creating a [`StepRequest`].\n#[derive(Debug, Clone)]\npub struct StepRequestBuilder {\n    runtime_config: Config,\n    parent_state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    slash_items: Vec<SlashItem>,\n    reward_items: Vec<RewardItem>,\n    evict_items: Vec<EvictItem>,\n    run_auction: bool,\n    next_era_id: EraId,\n    era_end_timestamp_millis: u64,\n}\n\nimpl StepRequestBuilder {\n    /// Returns a new `StepRequestBuilder`.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    /// Sets config.\n    pub fn with_runtime_config(mut self, runtime_config: Config) -> Self {\n        self.runtime_config = runtime_config;\n        self\n    }\n\n    /// Sets `transfer_config` to the imputed value.\n    pub fn with_transfer_config(mut self, transfer_config: TransferConfig) -> Self {\n        self.runtime_config = self.runtime_config.set_transfer_config(transfer_config);\n        self\n    }\n\n    /// Sets `parent_state_hash` to the given [`Digest`].\n    pub fn with_parent_state_hash(mut self, parent_state_hash: Digest) -> Self {\n        self.parent_state_hash = parent_state_hash;\n        self\n    }\n\n    /// Sets `protocol_version` to the given [`ProtocolVersion`].\n    pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {\n        self.protocol_version = protocol_version;\n        self\n    }\n\n    /// Pushes the given [`SlashItem`] into `slash_items`.\n    pub fn with_slash_item(mut self, slash_item: SlashItem) -> Self {\n        self.slash_items.push(slash_item);\n        self\n    }\n\n    /// Pushes the given [`RewardItem`] into `reward_items`.\n    pub fn with_reward_item(mut self, reward_item: RewardItem) -> Self {\n        self.reward_items.push(reward_item);\n        self\n    }\n\n    /// Pushes the given [`EvictItem`] into `evict_items`.\n    pub fn with_evict_item(mut self, evict_item: EvictItem) -> Self {\n        self.evict_items.push(evict_item);\n        self\n    }\n\n    /// Pushes the given vector of [`EvictItem`] into `evict_items`.\n    pub fn with_evict_items(mut self, evict_items: impl IntoIterator<Item = EvictItem>) -> Self {\n        self.evict_items.extend(evict_items);\n        self\n    }\n\n    /// Sets `run_auction`.\n    pub fn with_run_auction(mut self, run_auction: bool) -> Self {\n        self.run_auction = run_auction;\n        self\n    }\n\n    /// Sets `next_era_id` to the given [`EraId`].\n    pub fn with_next_era_id(mut self, next_era_id: EraId) -> Self {\n        self.next_era_id = next_era_id;\n        self\n    }\n\n    /// Sets `era_end_timestamp_millis`.\n    pub fn with_era_end_timestamp_millis(mut self, era_end_timestamp_millis: u64) -> Self {\n        self.era_end_timestamp_millis = era_end_timestamp_millis;\n        self\n    }\n\n    /// Consumes the [`StepRequestBuilder`] and returns a [`StepRequest`].\n    pub fn build(self) -> StepRequest {\n        StepRequest::new(\n            self.runtime_config,\n            self.parent_state_hash,\n            self.protocol_version,\n            self.slash_items,\n            self.evict_items,\n            self.next_era_id,\n            self.era_end_timestamp_millis,\n        )\n    }\n}\n\nimpl Default for StepRequestBuilder {\n    fn default() -> Self {\n        StepRequestBuilder {\n            runtime_config: Default::default(),\n            parent_state_hash: Default::default(),\n            protocol_version: Default::default(),\n            slash_items: Default::default(),\n            evict_items: Default::default(),\n            run_auction: true, //<-- run_auction by default\n            next_era_id: Default::default(),\n            era_end_timestamp_millis: Default::default(),\n            reward_items: Default::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/transfer_request_builder.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    iter,\n};\n\nuse blake2::{\n    digest::{Update, VariableOutput},\n    VarBlake2b,\n};\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY;\nuse num_rational::Ratio;\n\nuse casper_storage::{\n    data_access_layer::TransferRequest,\n    system::runtime_native::{Config as NativeRuntimeConfig, TransferConfig},\n};\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::ToBytes,\n    system::mint::{ARG_AMOUNT, ARG_ID, ARG_SOURCE, ARG_TARGET},\n    BlockTime, CLValue, Digest, FeeHandling, Gas, InitiatorAddr, ProtocolVersion, RefundHandling,\n    RewardsHandling, RuntimeArgs, TransactionHash, TransactionV1Hash, TransferTarget, URef,\n    DEFAULT_GAS_HOLD_INTERVAL, U512,\n};\n\nuse crate::{\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_BLOCK_TIME, DEFAULT_PROTOCOL_VERSION,\n};\n\n/// Builds a [`TransferRequest`].\n#[derive(Debug)]\npub struct TransferRequestBuilder {\n    config: NativeRuntimeConfig,\n    state_hash: Digest,\n    block_time: BlockTime,\n    protocol_version: ProtocolVersion,\n    transaction_hash: Option<TransactionHash>,\n    initiator: InitiatorAddr,\n    authorization_keys: BTreeSet<AccountHash>,\n    args: BTreeMap<String, CLValue>,\n    gas: Gas,\n}\n\nimpl TransferRequestBuilder {\n    /// The default value used for `TransferRequest::config`.\n    pub const DEFAULT_CONFIG: NativeRuntimeConfig = NativeRuntimeConfig::new(\n        TransferConfig::Unadministered,\n        FeeHandling::PayToProposer,\n        RefundHandling::Refund {\n            refund_ratio: Ratio::new_raw(99, 100),\n        },\n        0,\n        true,\n        true,\n        0,\n        500_000_000_000,\n        500_000_000_000,\n        1_000_000_000_000_000_000,\n        DEFAULT_GAS_HOLD_INTERVAL.millis(),\n        false,\n        Ratio::new_raw(U512::zero(), U512::zero()),\n        DEFAULT_ENABLE_ENTITY,\n        2_500_000_000,\n        RewardsHandling::Standard,\n    );\n    /// The default value used for `TransferRequest::state_hash`.\n    pub const DEFAULT_STATE_HASH: Digest = Digest::from_raw([1; 32]);\n    /// The default value used for `TransferRequest::gas`.\n    pub const DEFAULT_GAS: u64 = 2_500_000_000;\n\n    /// Constructs a new `TransferRequestBuilder`.\n    pub fn new<A: Into<U512>, T: Into<TransferTarget>>(amount: A, target: T) -> Self {\n        let mut args = BTreeMap::new();\n        let _ = args.insert(\n            ARG_AMOUNT.to_string(),\n            CLValue::from_t(amount.into()).unwrap(),\n        );\n        let _ = args.insert(\n            ARG_ID.to_string(),\n            CLValue::from_t(Option::<u64>::None).unwrap(),\n        );\n        let target_value = match target.into() {\n            TransferTarget::PublicKey(public_key) => CLValue::from_t(public_key),\n            TransferTarget::AccountHash(account_hash) => CLValue::from_t(account_hash),\n            TransferTarget::URef(uref) => CLValue::from_t(uref),\n        }\n        .unwrap();\n        let _ = args.insert(ARG_TARGET.to_string(), target_value);\n        TransferRequestBuilder {\n            config: Self::DEFAULT_CONFIG,\n            state_hash: Self::DEFAULT_STATE_HASH,\n            block_time: BlockTime::new(DEFAULT_BLOCK_TIME),\n            protocol_version: DEFAULT_PROTOCOL_VERSION,\n            transaction_hash: None,\n            initiator: InitiatorAddr::PublicKey(DEFAULT_ACCOUNT_PUBLIC_KEY.clone()),\n            authorization_keys: iter::once(*DEFAULT_ACCOUNT_ADDR).collect(),\n            args,\n            gas: Gas::new(Self::DEFAULT_GAS),\n        }\n    }\n\n    /// Sets the native runtime config of the [`TransferRequest`].\n    pub fn with_native_runtime_config(mut self, config: NativeRuntimeConfig) -> Self {\n        self.config = config;\n        self\n    }\n\n    /// Sets the block time of the [`TransferRequest`].\n    pub fn with_block_time(mut self, block_time: u64) -> Self {\n        self.block_time = BlockTime::new(block_time);\n        self\n    }\n\n    /// Sets the protocol version used by the [`TransferRequest`].\n    pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {\n        self.protocol_version = protocol_version;\n        self\n    }\n\n    /// Sets the transaction hash used by the [`TransferRequest`].\n    pub fn with_transaction_hash(mut self, transaction_hash: TransactionHash) -> Self {\n        self.transaction_hash = Some(transaction_hash);\n        self\n    }\n\n    /// Sets the initiator used by the [`TransferRequest`], and adds its account hash to the set of\n    /// authorization keys.\n    pub fn with_initiator<T: Into<InitiatorAddr>>(mut self, initiator: T) -> Self {\n        self.initiator = initiator.into();\n        let _ = self\n            .authorization_keys\n            .insert(self.initiator.account_hash());\n        self\n    }\n\n    /// Sets the authorization keys used by the [`TransferRequest`].\n    pub fn with_authorization_keys<T: IntoIterator<Item = AccountHash>>(\n        mut self,\n        authorization_keys: T,\n    ) -> Self {\n        self.authorization_keys = authorization_keys.into_iter().collect();\n        self\n    }\n\n    /// Adds the \"source\" runtime arg, replacing the existing one if it exists.\n    pub fn with_source(mut self, source: URef) -> Self {\n        let value = CLValue::from_t(source).unwrap();\n        let _ = self.args.insert(ARG_SOURCE.to_string(), value);\n        self\n    }\n\n    /// Adds the \"id\" runtime arg, replacing the existing one if it exists..\n    pub fn with_transfer_id(mut self, id: u64) -> Self {\n        let value = CLValue::from_t(Some(id)).unwrap();\n        let _ = self.args.insert(ARG_ID.to_string(), value);\n        self\n    }\n\n    /// Consumes self and returns a `TransferRequest`.\n    ///\n    /// If a transaction hash was not provided, the blake2b hash of the contents of the other fields\n    /// will be calculated, so that different requests will have different transaction hashes.  Note\n    /// that this generated hash is not the same as what would have been generated on an actual\n    /// `Transaction` for an equivalent request.\n    pub fn build(self) -> TransferRequest {\n        let txn_hash = match self.transaction_hash {\n            Some(txn_hash) => txn_hash,\n            None => {\n                let mut result = [0; 32];\n                let mut hasher = VarBlake2b::new(32).unwrap();\n\n                match &self.config.transfer_config() {\n                    TransferConfig::Administered {\n                        administrative_accounts,\n                        allow_unrestricted_transfers,\n                    } => hasher.update(\n                        (administrative_accounts, allow_unrestricted_transfers)\n                            .to_bytes()\n                            .unwrap(),\n                    ),\n                    TransferConfig::Unadministered => {\n                        hasher.update([1]);\n                    }\n                }\n                hasher.update(self.config.fee_handling().to_bytes().unwrap());\n                hasher.update(self.config.refund_handling().to_bytes().unwrap());\n                hasher.update(\n                    self.config\n                        .vesting_schedule_period_millis()\n                        .to_bytes()\n                        .unwrap(),\n                );\n                hasher.update(self.config.allow_auction_bids().to_bytes().unwrap());\n                hasher.update(self.config.compute_rewards().to_bytes().unwrap());\n                hasher.update(\n                    self.config\n                        .max_delegators_per_validator()\n                        .to_bytes()\n                        .unwrap(),\n                );\n                hasher.update(\n                    self.config\n                        .global_minimum_delegation_amount()\n                        .to_bytes()\n                        .unwrap(),\n                );\n                hasher.update(self.state_hash);\n                hasher.update(self.block_time.to_bytes().unwrap());\n                hasher.update(self.protocol_version.to_bytes().unwrap());\n                hasher.update(self.initiator.to_bytes().unwrap());\n                hasher.update(self.authorization_keys.to_bytes().unwrap());\n                hasher.update(self.args.to_bytes().unwrap());\n                hasher.update(self.gas.to_bytes().unwrap());\n                hasher.finalize_variable(|slice| {\n                    result.copy_from_slice(slice);\n                });\n                TransactionHash::V1(TransactionV1Hash::from_raw(result))\n            }\n        };\n\n        TransferRequest::with_runtime_args(\n            self.config,\n            self.state_hash,\n            self.protocol_version,\n            txn_hash,\n            self.initiator,\n            self.authorization_keys,\n            RuntimeArgs::from(self.args),\n        )\n    }\n\n    /// Sets the runtime args used by the [`TransferRequest`].\n    ///\n    /// NOTE: This is not generally useful for creating a valid `TransferRequest`, and hence is\n    /// subject to change or deletion without notice.\n    #[doc(hidden)]\n    pub fn with_args(mut self, args: RuntimeArgs) -> Self {\n        self.args = args\n            .named_args()\n            .map(|named_arg| (named_arg.name().to_string(), named_arg.cl_value().clone()))\n            .collect();\n        self\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/upgrade_request_builder.rs",
    "content": "use std::collections::BTreeMap;\n\nuse num_rational::Ratio;\n\nuse casper_types::{\n    system::auction::DelegationRate, ChainspecRegistry, Digest, EraId, FeeHandling,\n    HoldBalanceHandling, Key, ProtocolUpgradeConfig, ProtocolVersion, RewardsHandling, StoredValue,\n};\n\n/// Builds an `UpgradeConfig`.\npub struct UpgradeRequestBuilder {\n    pre_state_hash: Digest,\n    current_protocol_version: ProtocolVersion,\n    new_protocol_version: ProtocolVersion,\n    activation_point: Option<EraId>,\n    new_gas_hold_handling: Option<HoldBalanceHandling>,\n    new_gas_hold_interval: Option<u64>,\n    new_validator_slots: Option<u32>,\n    new_auction_delay: Option<u64>,\n    new_locked_funds_period_millis: Option<u64>,\n    new_round_seigniorage_rate: Option<Ratio<u64>>,\n    new_unbonding_delay: Option<u64>,\n    global_state_update: BTreeMap<Key, StoredValue>,\n    chainspec_registry: ChainspecRegistry,\n    fee_handling: FeeHandling,\n    validator_minimum_bid_amount: u64,\n    maximum_delegation_amount: u64,\n    minimum_delegation_amount: u64,\n    enable_addressable_entity: bool,\n    rewards_handling: RewardsHandling,\n    new_minimum_delegation_rate: Option<DelegationRate>,\n}\n\nimpl UpgradeRequestBuilder {\n    /// Returns a new `UpgradeRequestBuilder`.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    /// Sets a pre-state hash using a [`Digest`].\n    pub fn with_pre_state_hash(mut self, pre_state_hash: Digest) -> Self {\n        self.pre_state_hash = pre_state_hash;\n        self\n    }\n\n    /// Sets `current_protocol_version` to the given [`ProtocolVersion`].\n    pub fn with_current_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {\n        self.current_protocol_version = protocol_version;\n        self\n    }\n\n    /// Sets `new_protocol_version` to the given [`ProtocolVersion`].\n    pub fn with_new_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {\n        self.new_protocol_version = protocol_version;\n        self\n    }\n\n    /// Sets `with_new_gas_hold_handling`.\n    pub fn with_new_gas_hold_handling(mut self, gas_hold_handling: HoldBalanceHandling) -> Self {\n        self.new_gas_hold_handling = Some(gas_hold_handling);\n        self\n    }\n\n    /// Sets `with_new_gas_hold_interval`.\n    pub fn with_new_gas_hold_interval(mut self, gas_hold_interval: u64) -> Self {\n        self.new_gas_hold_interval = Some(gas_hold_interval);\n        self\n    }\n\n    /// Sets `new_validator_slots`.\n    pub fn with_new_validator_slots(mut self, new_validator_slots: u32) -> Self {\n        self.new_validator_slots = Some(new_validator_slots);\n        self\n    }\n\n    /// Sets `new_auction_delay`.\n    pub fn with_new_auction_delay(mut self, new_auction_delay: u64) -> Self {\n        self.new_auction_delay = Some(new_auction_delay);\n        self\n    }\n\n    /// Sets `new_locked_funds_period_millis`.\n    pub fn with_new_locked_funds_period_millis(\n        mut self,\n        new_locked_funds_period_millis: u64,\n    ) -> Self {\n        self.new_locked_funds_period_millis = Some(new_locked_funds_period_millis);\n        self\n    }\n\n    /// Sets `new_round_seigniorage_rate`.\n    pub fn with_new_round_seigniorage_rate(mut self, rate: Ratio<u64>) -> Self {\n        self.new_round_seigniorage_rate = Some(rate);\n        self\n    }\n\n    /// Sets `new_unbonding_delay`.\n    pub fn with_new_unbonding_delay(mut self, unbonding_delay: u64) -> Self {\n        self.new_unbonding_delay = Some(unbonding_delay);\n        self\n    }\n\n    /// Sets `global_state_update`.\n    pub fn with_global_state_update(\n        mut self,\n        global_state_update: BTreeMap<Key, StoredValue>,\n    ) -> Self {\n        self.global_state_update = global_state_update;\n        self\n    }\n\n    /// Sets `activation_point`.\n    pub fn with_activation_point(mut self, activation_point: EraId) -> Self {\n        self.activation_point = Some(activation_point);\n        self\n    }\n\n    /// Sets the Chainspec registry.\n    pub fn with_chainspec_registry(mut self, chainspec_registry: ChainspecRegistry) -> Self {\n        self.chainspec_registry = chainspec_registry;\n        self\n    }\n\n    /// Sets the fee handling.\n    pub fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self {\n        self.fee_handling = fee_handling;\n        self\n    }\n\n    /// Set the validator minimum bid amount.\n    pub fn with_validator_minimum_bid_amount(mut self, validator_minimum_bid_amount: u64) -> Self {\n        self.validator_minimum_bid_amount = validator_minimum_bid_amount;\n        self\n    }\n\n    /// Sets the maximum delegation for the validators bid during migration.\n    pub fn with_maximum_delegation_amount(mut self, maximum_delegation_amount: u64) -> Self {\n        self.maximum_delegation_amount = maximum_delegation_amount;\n        self\n    }\n\n    /// Sets the minimum delegation for the validators bid during migration.\n    pub fn with_minimum_delegation_amount(mut self, minimum_delegation_amount: u64) -> Self {\n        self.minimum_delegation_amount = minimum_delegation_amount;\n        self\n    }\n\n    /// Sets the enable entity flag.\n    pub fn with_enable_addressable_entity(mut self, enable_entity: bool) -> Self {\n        self.enable_addressable_entity = enable_entity;\n        self\n    }\n\n    /// Sets the rewards handling\n    pub fn with_rewards_handling(mut self, rewards_handling: RewardsHandling) -> Self {\n        self.rewards_handling = rewards_handling;\n        self\n    }\n\n    /// Sets the minimum delegation rate for validator bids and reservations.\n    pub fn with_new_minimum_delegation_rate(\n        mut self,\n        new_minimum_delegation_rate: DelegationRate,\n    ) -> Self {\n        self.new_minimum_delegation_rate = Some(new_minimum_delegation_rate);\n        self\n    }\n\n    /// Consumes the `UpgradeRequestBuilder` and returns an [`ProtocolUpgradeConfig`].\n    pub fn build(self) -> ProtocolUpgradeConfig {\n        ProtocolUpgradeConfig::new(\n            self.pre_state_hash,\n            self.current_protocol_version,\n            self.new_protocol_version,\n            self.activation_point,\n            self.new_gas_hold_handling,\n            self.new_gas_hold_interval,\n            self.new_validator_slots,\n            self.new_auction_delay,\n            self.new_locked_funds_period_millis,\n            self.new_round_seigniorage_rate,\n            self.new_unbonding_delay,\n            self.global_state_update,\n            self.chainspec_registry,\n            self.fee_handling,\n            self.validator_minimum_bid_amount,\n            self.maximum_delegation_amount,\n            self.minimum_delegation_amount,\n            self.enable_addressable_entity,\n            self.rewards_handling,\n            self.new_minimum_delegation_rate,\n        )\n    }\n}\n\nimpl Default for UpgradeRequestBuilder {\n    fn default() -> UpgradeRequestBuilder {\n        UpgradeRequestBuilder {\n            pre_state_hash: Default::default(),\n            current_protocol_version: Default::default(),\n            new_protocol_version: Default::default(),\n            activation_point: None,\n            new_gas_hold_handling: None,\n            new_gas_hold_interval: None,\n            new_validator_slots: None,\n            new_auction_delay: None,\n            new_locked_funds_period_millis: None,\n            new_round_seigniorage_rate: None,\n            new_unbonding_delay: None,\n            global_state_update: Default::default(),\n            chainspec_registry: ChainspecRegistry::new_with_optional_global_state(&[], None),\n            fee_handling: FeeHandling::default(),\n            validator_minimum_bid_amount: 2_500_000_000_000u64,\n            maximum_delegation_amount: u64::MAX,\n            minimum_delegation_amount: 0,\n            enable_addressable_entity: false,\n            rewards_handling: RewardsHandling::Standard,\n            new_minimum_delegation_rate: None,\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/utils.rs",
    "content": "//! Utility types and functions for working with execution engine tests.\n\nuse std::{\n    env, fs,\n    path::{Path, PathBuf},\n};\n\nuse once_cell::sync::Lazy;\n\nuse casper_execution_engine::engine_state::{Error, WasmV1Result};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{bytesrepr::Bytes, GenesisAccount, GenesisConfig};\n\nuse super::{DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY};\nuse crate::{\n    GenesisConfigBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY,\n    DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, DEFAULT_STORAGE_COSTS,\n    DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG,\n};\n\nstatic RUST_WORKSPACE_PATH: Lazy<PathBuf> = Lazy::new(|| {\n    let path = Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n        .parent()\n        .and_then(Path::parent)\n        .expect(\"CARGO_MANIFEST_DIR should have parent\");\n    assert!(\n        path.exists(),\n        \"Workspace path {} does not exists\",\n        path.display()\n    );\n    path.to_path_buf()\n});\n// The location of compiled Wasm files if compiled from the Rust sources within the casper-node\n// repo, i.e. 'casper-node/target/wasm32-unknown-unknown/release/'.\nstatic RUST_WORKSPACE_WASM_PATH: Lazy<PathBuf> = Lazy::new(|| {\n    let path = RUST_WORKSPACE_PATH\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n    assert!(\n        path.exists() || RUST_TOOL_WASM_PATH.exists(),\n        \"Rust Wasm path {} does not exists\",\n        path.display()\n    );\n    path\n});\n// The location of compiled Wasm files if running from within the 'tests' crate generated by the\n// cargo_casper tool, i.e. 'wasm/'.\nstatic RUST_TOOL_WASM_PATH: Lazy<PathBuf> = Lazy::new(|| {\n    env::current_dir()\n        .expect(\"should get current working dir\")\n        .join(\"wasm\")\n});\n// The location of compiled Wasm files if compiled from the Rust sources within the casper-node\n// repo where `CARGO_TARGET_DIR` is set, i.e.\n// '<CARGO_TARGET_DIR>/wasm32-unknown-unknown/release/'.\nstatic MAYBE_CARGO_TARGET_DIR_WASM_PATH: Lazy<Option<PathBuf>> = Lazy::new(|| {\n    let maybe_target = std::env::var(\"CARGO_TARGET_DIR\").ok();\n    maybe_target.as_ref().map(|path| {\n        Path::new(path)\n            .join(\"wasm32-unknown-unknown\")\n            .join(\"release\")\n    })\n});\nstatic WASM_PATHS: Lazy<Vec<PathBuf>> = Lazy::new(get_compiled_wasm_paths);\n\n/// Constructs a list of paths that should be considered while looking for a compiled wasm file.\nfn get_compiled_wasm_paths() -> Vec<PathBuf> {\n    let mut ret = vec![\n        RUST_WORKSPACE_WASM_PATH.clone(),\n        RUST_TOOL_WASM_PATH.clone(),\n    ];\n    if let Some(cargo_target_dir_wasm_path) = &*MAYBE_CARGO_TARGET_DIR_WASM_PATH {\n        ret.push(cargo_target_dir_wasm_path.clone());\n    };\n    ret\n}\n\n/// Reads a given compiled contract file based on path\npub fn read_wasm_file<T: AsRef<Path>>(contract_file: T) -> Bytes {\n    let mut attempted_paths = vec![];\n\n    if contract_file.as_ref().is_relative() {\n        // Find first path to a given file found in a list of paths\n        for wasm_path in WASM_PATHS.iter() {\n            let mut filename = wasm_path.clone();\n            filename.push(contract_file.as_ref());\n            if let Ok(wasm_bytes) = fs::read(&filename) {\n                return Bytes::from(wasm_bytes);\n            }\n            attempted_paths.push(filename);\n        }\n    }\n    // Try just opening in case the arg is a valid path relative to current working dir, or is a\n    // valid absolute path.\n    if let Ok(wasm_bytes) = fs::read(contract_file.as_ref()) {\n        return Bytes::from(wasm_bytes);\n    }\n    attempted_paths.push(contract_file.as_ref().to_owned());\n\n    let mut error_msg =\n        \"\\nFailed to open compiled Wasm file.  Tried the following locations:\\n\".to_string();\n    for attempted_path in attempted_paths {\n        error_msg = format!(\"{}    - {}\\n\", error_msg, attempted_path.display());\n    }\n\n    panic!(\"{}\\n\", error_msg);\n}\n\n/// Returns an [`GenesisConfig`].\npub fn create_genesis_config(accounts: Vec<GenesisAccount>) -> GenesisConfig {\n    let wasm_config = *DEFAULT_WASM_CONFIG;\n    let system_config = *DEFAULT_SYSTEM_CONFIG;\n    let validator_slots = DEFAULT_VALIDATOR_SLOTS;\n    let auction_delay = DEFAULT_AUCTION_DELAY;\n    let locked_funds_period_millis = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n    let round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE;\n    let unbonding_delay = DEFAULT_UNBONDING_DELAY;\n    let genesis_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n    let storage_costs = *DEFAULT_STORAGE_COSTS;\n\n    GenesisConfigBuilder::default()\n        .with_accounts(accounts)\n        .with_wasm_config(wasm_config)\n        .with_system_config(system_config)\n        .with_validator_slots(validator_slots)\n        .with_auction_delay(auction_delay)\n        .with_locked_funds_period_millis(locked_funds_period_millis)\n        .with_round_seigniorage_rate(round_seigniorage_rate)\n        .with_unbonding_delay(unbonding_delay)\n        .with_genesis_timestamp_millis(genesis_timestamp_millis)\n        .with_storage_costs(storage_costs)\n        .build()\n}\n\n/// Returns a [`GenesisRequest`].\npub fn create_run_genesis_request(accounts: Vec<GenesisAccount>) -> GenesisRequest {\n    let config = create_genesis_config(accounts);\n    GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    )\n}\n\n/// Returns an error if the `ExecutionResult` has an error.\n///\n/// # Panics\n/// * Panics if the result does not have a precondition failure.\n/// * Panics if result.as_error() is `None`.\npub fn get_precondition_failure(exec_result: &WasmV1Result) -> &Error {\n    assert!(\n        exec_result.has_precondition_failure(),\n        \"should be a precondition failure\"\n    );\n    exec_result.error().expect(\"should have an error\")\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/src/wasm_test_builder.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    convert::TryFrom,\n    ffi::OsStr,\n    fs,\n    iter::{self, FromIterator},\n    ops::Deref,\n    path::{Path, PathBuf},\n    rc::Rc,\n    sync::Arc,\n};\n\nuse filesize::PathExt;\nuse lmdb::DatabaseFlags;\nuse num_rational::Ratio;\nuse num_traits::{CheckedMul, Zero};\nuse tempfile::TempDir;\n\nuse casper_execution_engine::engine_state::{\n    EngineConfig, Error, ExecutionEngineV1, WasmV1Request, WasmV1Result, DEFAULT_MAX_QUERY_DEPTH,\n};\nuse casper_storage::{\n    data_access_layer::{\n        balance::BalanceHandling, AuctionMethod, BalanceIdentifier, BalanceRequest, BalanceResult,\n        BiddingRequest, BiddingResult, BidsRequest, BlockRewardsRequest, BlockRewardsResult,\n        BlockStore, DataAccessLayer, EraValidatorsRequest, EraValidatorsResult, FeeRequest,\n        FeeResult, FlushRequest, FlushResult, GenesisRequest, GenesisResult, HandleFeeMode,\n        HandleFeeRequest, HandleFeeResult, MessageTopicsRequest, MessageTopicsResult,\n        ProofHandling, ProtocolUpgradeRequest, ProtocolUpgradeResult, PruneRequest, PruneResult,\n        QueryRequest, QueryResult, RoundSeigniorageRateRequest, RoundSeigniorageRateResult,\n        StepRequest, StepResult, SystemEntityRegistryPayload, SystemEntityRegistryRequest,\n        SystemEntityRegistryResult, SystemEntityRegistrySelector, TotalSupplyRequest,\n        TotalSupplyResult, TransferRequest, TrieRequest,\n    },\n    global_state::{\n        state::{\n            lmdb::LmdbGlobalState, scratch::ScratchGlobalState, CommitProvider, ScratchProvider,\n            StateProvider, StateReader,\n        },\n        transaction_source::lmdb::LmdbEnvironment,\n        trie::Trie,\n        trie_store::lmdb::LmdbTrieStore,\n    },\n    system::runtime_native::{Config as NativeRuntimeConfig, TransferConfig},\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyExt},\n    AddressGenerator,\n};\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{EntityKindTag, MessageTopics, NamedKeyAddr},\n    bytesrepr::{self, FromBytes},\n    contracts::{ContractHash, NamedKeys},\n    execution::Effects,\n    global_state::TrieMerkleProof,\n    runtime_args,\n    system::{\n        auction::{\n            BidAddrTag, BidKind, EraValidators, Unbond, UnbondKind, UnbondingPurse, ValidatorBid,\n            ValidatorWeights, WithdrawPurses, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_EVICTED_VALIDATORS,\n            AUCTION_DELAY_KEY, ERA_ID_KEY, METHOD_RUN_AUCTION, UNBONDING_DELAY_KEY,\n        },\n        mint::{MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY},\n        AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT,\n    },\n    AccessRights, Account, AddressableEntity, AddressableEntityHash, AuctionCosts, BlockGlobalAddr,\n    BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, CLTyped, CLValue, Contract, Digest,\n    EntityAddr, EntryPoints, EraId, FeeHandling, Gas, HandlePaymentCosts, HoldBalanceHandling,\n    InitiatorAddr, Key, KeyTag, MintCosts, Motes, Package, PackageHash, Phase,\n    ProtocolUpgradeConfig, ProtocolVersion, PublicKey, RefundHandling, RewardsHandling,\n    StoredValue, SystemHashRegistry, TransactionHash, TransactionV1Hash, URef, OS_PAGE_SIZE, U512,\n};\n\nuse crate::{\n    chainspec_config::{ChainspecConfig, CHAINSPEC_SYMLINK},\n    ExecuteRequest, ExecuteRequestBuilder, StepRequestBuilder, DEFAULT_GAS_PRICE,\n    DEFAULT_PROPOSER_ADDR, DEFAULT_PROTOCOL_VERSION, SYSTEM_ADDR,\n};\n\n/// LMDB initial map size is calculated based on DEFAULT_LMDB_PAGES and systems page size.\npub(crate) const DEFAULT_LMDB_PAGES: usize = 256_000_000;\n\n/// LMDB max readers\n///\n/// The default value is chosen to be the same as the node itself.\npub(crate) const DEFAULT_MAX_READERS: u32 = 512;\n\n/// This is appended to the data dir path provided to the `LmdbWasmTestBuilder`\".\nconst GLOBAL_STATE_DIR: &str = \"global_state\";\n\n/// A wrapper structure that groups an entity alongside its namedkeys.\n#[derive(Debug)]\npub struct EntityWithNamedKeys {\n    entity: AddressableEntity,\n    named_keys: NamedKeys,\n}\n\nimpl EntityWithNamedKeys {\n    /// Creates a new instance of an Entity with its NamedKeys.\n    pub fn new(entity: AddressableEntity, named_keys: NamedKeys) -> Self {\n        Self { entity, named_keys }\n    }\n\n    /// Returns a reference to the Entity.\n    pub fn entity(&self) -> AddressableEntity {\n        self.entity.clone()\n    }\n\n    /// Returns a reference to the main purse for the inner entity.\n    pub fn main_purse(&self) -> URef {\n        self.entity.main_purse()\n    }\n\n    /// Returns a reference to the NamedKeys.\n    pub fn named_keys(&self) -> &NamedKeys {\n        &self.named_keys\n    }\n}\n\n/// Builder for simple WASM test\npub struct WasmTestBuilder<S> {\n    /// Data access layer.\n    data_access_layer: Arc<S>,\n    /// [`ExecutionEngineV1`] is wrapped in [`Rc`] to work around a missing [`Clone`]\n    /// implementation.\n    execution_engine: Rc<ExecutionEngineV1>,\n    /// The chainspec.\n    chainspec: ChainspecConfig,\n    exec_results: Vec<WasmV1Result>,\n    upgrade_results: Vec<ProtocolUpgradeResult>,\n    prune_results: Vec<PruneResult>,\n    genesis_hash: Option<Digest>,\n    /// Post state hash.\n    post_state_hash: Option<Digest>,\n    /// Cached effects after successful runs i.e. `effects[0]` is the collection of effects for\n    /// first exec call, etc.\n    effects: Vec<Effects>,\n    /// Genesis effects.\n    genesis_effects: Option<Effects>,\n    /// Cached system account.\n    system_account: Option<AddressableEntity>,\n    /// Scratch global state used for in-memory execution and commit optimization.\n    scratch_global_state: Option<ScratchGlobalState>,\n    /// Global state dir, for implementations that define one.\n    global_state_dir: Option<PathBuf>,\n    /// Temporary directory, for implementation that uses one.\n    temp_dir: Option<Rc<TempDir>>,\n}\n\nimpl<S: ScratchProvider> WasmTestBuilder<S> {\n    /// Commit scratch to global state, and reset the scratch cache.\n    pub fn write_scratch_to_db(&mut self) -> &mut Self {\n        let prestate_hash = self.post_state_hash.expect(\"Should have genesis hash\");\n        if let Some(scratch) = self.scratch_global_state.take() {\n            let new_state_root = self\n                .data_access_layer\n                .write_scratch_to_db(prestate_hash, scratch)\n                .unwrap();\n            self.post_state_hash = Some(new_state_root);\n        }\n        self\n    }\n    /// Flushes the LMDB environment to disk.\n    pub fn flush_environment(&self) {\n        let request = FlushRequest::new();\n        if let FlushResult::Failure(gse) = self.data_access_layer.flush(request) {\n            panic!(\"flush failed: {:?}\", gse)\n        }\n    }\n\n    /// Execute and commit transforms from an ExecuteRequest into a scratch global state.\n    /// You MUST call write_scratch_to_lmdb to flush these changes to LmdbGlobalState.\n    #[allow(deprecated)]\n    pub fn scratch_exec_and_commit(&mut self, mut exec_request: WasmV1Request) -> &mut Self {\n        if self.scratch_global_state.is_none() {\n            self.scratch_global_state = Some(self.data_access_layer.get_scratch_global_state());\n        }\n\n        let cached_state = self\n            .scratch_global_state\n            .as_ref()\n            .expect(\"scratch state should exist\");\n\n        let state_hash = self.post_state_hash.expect(\"expected post_state_hash\");\n        exec_request.block_info.with_state_hash(state_hash);\n\n        // First execute the request against our scratch global state.\n        let execution_result = self.execution_engine.execute(cached_state, exec_request);\n        let _post_state_hash = cached_state\n            .commit_effects(\n                self.post_state_hash.expect(\"requires a post_state_hash\"),\n                execution_result.effects().clone(),\n            )\n            .expect(\"should commit\");\n\n        // Save transforms and execution results for WasmTestBuilder.\n        self.effects.push(execution_result.effects().clone());\n        self.exec_results.push(execution_result);\n        self\n    }\n}\n\nimpl<S> Clone for WasmTestBuilder<S> {\n    fn clone(&self) -> Self {\n        WasmTestBuilder {\n            data_access_layer: Arc::clone(&self.data_access_layer),\n            execution_engine: Rc::clone(&self.execution_engine),\n            chainspec: self.chainspec.clone(),\n            exec_results: self.exec_results.clone(),\n            upgrade_results: self.upgrade_results.clone(),\n            prune_results: self.prune_results.clone(),\n            genesis_hash: self.genesis_hash,\n            post_state_hash: self.post_state_hash,\n            effects: self.effects.clone(),\n            genesis_effects: self.genesis_effects.clone(),\n            system_account: self.system_account.clone(),\n            scratch_global_state: None,\n            global_state_dir: self.global_state_dir.clone(),\n            temp_dir: self.temp_dir.clone(),\n        }\n    }\n}\n\n#[derive(Copy, Clone, Debug)]\nenum GlobalStateMode {\n    /// Creates empty lmdb database with specified flags\n    Create(DatabaseFlags),\n    /// Opens existing database\n    Open(Digest),\n}\n\nimpl GlobalStateMode {\n    fn post_state_hash(self) -> Option<Digest> {\n        match self {\n            GlobalStateMode::Create(_) => None,\n            GlobalStateMode::Open(post_state_hash) => Some(post_state_hash),\n        }\n    }\n}\n\n/// Wasm test builder where state is held in LMDB.\npub type LmdbWasmTestBuilder = WasmTestBuilder<DataAccessLayer<LmdbGlobalState>>;\n\nimpl Default for LmdbWasmTestBuilder {\n    fn default() -> Self {\n        Self::new_temporary_with_chainspec(&*CHAINSPEC_SYMLINK)\n    }\n}\n\nimpl LmdbWasmTestBuilder {\n    /// Upgrades the execution engine using the scratch trie.\n    pub fn upgrade_using_scratch(\n        &mut self,\n        upgrade_config: &mut ProtocolUpgradeConfig,\n    ) -> &mut Self {\n        let pre_state_hash = self.post_state_hash.expect(\"should have state hash\");\n        upgrade_config.with_pre_state_hash(pre_state_hash);\n\n        let scratch_state = self.data_access_layer.get_scratch_global_state();\n        let pre_state_hash = upgrade_config.pre_state_hash();\n        let req = ProtocolUpgradeRequest::new(upgrade_config.clone());\n        let result = {\n            let result = scratch_state.protocol_upgrade(req);\n            if let ProtocolUpgradeResult::Success { effects, .. } = result {\n                let post_state_hash = self\n                    .data_access_layer\n                    .write_scratch_to_db(pre_state_hash, scratch_state)\n                    .unwrap();\n                self.post_state_hash = Some(post_state_hash);\n                let mut engine_config = self.chainspec.engine_config();\n                let new_protocol_version = upgrade_config.new_protocol_version();\n                engine_config.set_protocol_version(new_protocol_version);\n                self.execution_engine = Rc::new(ExecutionEngineV1::new(engine_config));\n                ProtocolUpgradeResult::Success {\n                    post_state_hash,\n                    effects,\n                }\n            } else {\n                result\n            }\n        };\n        self.upgrade_results.push(result);\n        self\n    }\n\n    /// Returns an [`LmdbWasmTestBuilder`] with configuration.\n    pub fn new_with_config<T: AsRef<OsStr> + ?Sized>(\n        data_dir: &T,\n        chainspec: ChainspecConfig,\n    ) -> Self {\n        let _ = env_logger::try_init();\n        let page_size = *OS_PAGE_SIZE;\n        let global_state_dir = Self::global_state_dir(data_dir);\n        Self::create_global_state_dir(&global_state_dir);\n        let environment = Arc::new(\n            LmdbEnvironment::new(\n                &global_state_dir,\n                page_size * DEFAULT_LMDB_PAGES,\n                DEFAULT_MAX_READERS,\n                true,\n            )\n            .expect(\"should create LmdbEnvironment\"),\n        );\n        let trie_store = Arc::new(\n            LmdbTrieStore::new(&environment, None, DatabaseFlags::empty())\n                .expect(\"should create LmdbTrieStore\"),\n        );\n\n        let max_query_depth = DEFAULT_MAX_QUERY_DEPTH;\n        let enable_addressable_entity = chainspec.core_config.enable_addressable_entity;\n        let global_state = LmdbGlobalState::empty(\n            environment,\n            trie_store,\n            max_query_depth,\n            enable_addressable_entity,\n        )\n        .expect(\"should create LmdbGlobalState\");\n\n        let data_access_layer = Arc::new(DataAccessLayer {\n            block_store: BlockStore::new(),\n            state: global_state,\n            max_query_depth,\n            enable_addressable_entity,\n        });\n\n        let engine_config = chainspec.engine_config();\n        let engine_state = ExecutionEngineV1::new(engine_config);\n\n        WasmTestBuilder {\n            data_access_layer,\n            execution_engine: Rc::new(engine_state),\n            chainspec,\n            exec_results: Vec::new(),\n            upgrade_results: Vec::new(),\n            prune_results: Vec::new(),\n            genesis_hash: None,\n            post_state_hash: None,\n            effects: Vec::new(),\n            system_account: None,\n            genesis_effects: None,\n            scratch_global_state: None,\n            global_state_dir: Some(global_state_dir),\n            temp_dir: None,\n        }\n    }\n\n    fn create_or_open<T: AsRef<Path>>(\n        global_state_dir: T,\n        chainspec: ChainspecConfig,\n        protocol_version: ProtocolVersion,\n        mode: GlobalStateMode,\n    ) -> Self {\n        let _ = env_logger::try_init();\n        let page_size = *OS_PAGE_SIZE;\n\n        match mode {\n            GlobalStateMode::Create(_database_flags) => {}\n            GlobalStateMode::Open(_post_state_hash) => {\n                Self::create_global_state_dir(&global_state_dir)\n            }\n        }\n\n        let environment = LmdbEnvironment::new(\n            &global_state_dir,\n            page_size * DEFAULT_LMDB_PAGES,\n            DEFAULT_MAX_READERS,\n            true,\n        )\n        .expect(\"should create LmdbEnvironment\");\n\n        let max_query_depth = DEFAULT_MAX_QUERY_DEPTH;\n\n        let enable_addressable_entity = chainspec.core_config.enable_addressable_entity;\n        let global_state = match mode {\n            GlobalStateMode::Create(database_flags) => {\n                let trie_store = LmdbTrieStore::new(&environment, None, database_flags)\n                    .expect(\"should open LmdbTrieStore\");\n                LmdbGlobalState::empty(\n                    Arc::new(environment),\n                    Arc::new(trie_store),\n                    max_query_depth,\n                    enable_addressable_entity,\n                )\n                .expect(\"should create LmdbGlobalState\")\n            }\n            GlobalStateMode::Open(post_state_hash) => {\n                let trie_store =\n                    LmdbTrieStore::open(&environment, None).expect(\"should open LmdbTrieStore\");\n                LmdbGlobalState::new(\n                    Arc::new(environment),\n                    Arc::new(trie_store),\n                    post_state_hash,\n                    max_query_depth,\n                    enable_addressable_entity,\n                )\n            }\n        };\n\n        let data_access_layer = Arc::new(DataAccessLayer {\n            block_store: BlockStore::new(),\n            state: global_state,\n            max_query_depth,\n            enable_addressable_entity,\n        });\n        let mut engine_config = chainspec.engine_config();\n        engine_config.set_protocol_version(protocol_version);\n        let engine_state = ExecutionEngineV1::new(engine_config);\n\n        let post_state_hash = mode.post_state_hash();\n\n        let builder = WasmTestBuilder {\n            data_access_layer,\n            execution_engine: Rc::new(engine_state),\n            chainspec,\n            exec_results: Vec::new(),\n            upgrade_results: Vec::new(),\n            prune_results: Vec::new(),\n            genesis_hash: None,\n            post_state_hash,\n            effects: Vec::new(),\n            genesis_effects: None,\n            system_account: None,\n            scratch_global_state: None,\n            global_state_dir: Some(global_state_dir.as_ref().to_path_buf()),\n            temp_dir: None,\n        };\n\n        builder\n    }\n\n    /// Returns an [`LmdbWasmTestBuilder`] with configuration and values from\n    /// a given chainspec.\n    pub fn new_with_chainspec<T: AsRef<OsStr> + ?Sized, P: AsRef<Path>>(\n        data_dir: &T,\n        chainspec_path: P,\n    ) -> Self {\n        let chainspec_config = ChainspecConfig::from_chainspec_path(chainspec_path)\n            .expect(\"must build chainspec configuration\");\n\n        Self::new_with_config(data_dir, chainspec_config)\n    }\n\n    /// Returns an [`LmdbWasmTestBuilder`] with configuration and values from\n    /// the production chainspec.\n    pub fn new_with_production_chainspec<T: AsRef<OsStr> + ?Sized>(data_dir: &T) -> Self {\n        Self::new_with_chainspec(data_dir, &*CHAINSPEC_SYMLINK)\n    }\n\n    /// Returns a new [`LmdbWasmTestBuilder`].\n    pub fn new<T: AsRef<OsStr> + ?Sized>(data_dir: &T) -> Self {\n        Self::new_with_config(data_dir, Default::default())\n    }\n\n    /// Creates a new instance of builder using the supplied configurations, opening wrapped LMDBs\n    /// (e.g. in the Trie and Data stores) rather than creating them.\n    pub fn open<T: AsRef<OsStr> + ?Sized>(\n        data_dir: &T,\n        chainspec: ChainspecConfig,\n        protocol_version: ProtocolVersion,\n        post_state_hash: Digest,\n    ) -> Self {\n        let global_state_path = Self::global_state_dir(data_dir);\n        Self::open_raw(\n            global_state_path,\n            chainspec,\n            protocol_version,\n            post_state_hash,\n        )\n    }\n\n    /// Creates a new instance of builder using the supplied configurations, opening wrapped LMDBs\n    /// (e.g. in the Trie and Data stores) rather than creating them.\n    /// Differs from `open` in that it doesn't append `GLOBAL_STATE_DIR` to the supplied path.\n    pub fn open_raw<T: AsRef<Path>>(\n        global_state_dir: T,\n        chainspec: ChainspecConfig,\n        protocol_version: ProtocolVersion,\n        post_state_hash: Digest,\n    ) -> Self {\n        Self::create_or_open(\n            global_state_dir,\n            chainspec,\n            protocol_version,\n            GlobalStateMode::Open(post_state_hash),\n        )\n    }\n\n    /// Creates new temporary lmdb builder with an engine config instance.\n    ///\n    /// Once [`LmdbWasmTestBuilder`] instance goes out of scope a global state directory will be\n    /// removed as well.\n    pub fn new_temporary_with_config(chainspec: ChainspecConfig) -> Self {\n        let temp_dir = tempfile::tempdir().unwrap();\n\n        let database_flags = DatabaseFlags::default();\n\n        let mut builder = Self::create_or_open(\n            temp_dir.path(),\n            chainspec,\n            DEFAULT_PROTOCOL_VERSION,\n            GlobalStateMode::Create(database_flags),\n        );\n\n        builder.temp_dir = Some(Rc::new(temp_dir));\n\n        builder\n    }\n\n    /// Creates new temporary lmdb builder with a path to a chainspec to load.\n    ///\n    /// Once [`LmdbWasmTestBuilder`] instance goes out of scope a global state directory will be\n    /// removed as well.\n    pub fn new_temporary_with_chainspec<P: AsRef<Path>>(chainspec_path: P) -> Self {\n        let chainspec = ChainspecConfig::from_chainspec_path(chainspec_path)\n            .expect(\"must build chainspec configuration\");\n\n        Self::new_temporary_with_config(chainspec)\n    }\n\n    fn create_global_state_dir<T: AsRef<Path>>(global_state_path: T) {\n        fs::create_dir_all(&global_state_path).unwrap_or_else(|_| {\n            panic!(\n                \"Expected to create {}\",\n                global_state_path.as_ref().display()\n            )\n        });\n    }\n\n    fn global_state_dir<T: AsRef<OsStr> + ?Sized>(data_dir: &T) -> PathBuf {\n        let mut path = PathBuf::from(data_dir);\n        path.push(GLOBAL_STATE_DIR);\n        path\n    }\n\n    /// Returns the file size on disk of the backing lmdb file behind LmdbGlobalState.\n    pub fn lmdb_on_disk_size(&self) -> Option<u64> {\n        if let Some(path) = self.global_state_dir.as_ref() {\n            let mut path = path.clone();\n            path.push(\"data.lmdb\");\n            return path.as_path().size_on_disk().ok();\n        }\n        None\n    }\n\n    /// run step against scratch global state.\n    pub fn step_with_scratch(&mut self, step_request: StepRequest) -> &mut Self {\n        if self.scratch_global_state.is_none() {\n            self.scratch_global_state = Some(self.data_access_layer.get_scratch_global_state());\n        }\n\n        let cached_state = self\n            .scratch_global_state\n            .as_ref()\n            .expect(\"scratch state should exist\");\n\n        match cached_state.step(step_request) {\n            StepResult::RootNotFound => {\n                panic!(\"Root not found\")\n            }\n            StepResult::Failure(err) => {\n                panic!(\"{:?}\", err)\n            }\n            StepResult::Success { .. } => {}\n        }\n        self\n    }\n\n    /// Runs a [`TransferRequest`] and commits the resulting effects.\n    pub fn transfer_and_commit(&mut self, mut transfer_request: TransferRequest) -> &mut Self {\n        let pre_state_hash = self.post_state_hash.expect(\"expected post_state_hash\");\n        transfer_request.set_state_hash_and_config(pre_state_hash, self.native_runtime_config());\n        let transfer_result = self.data_access_layer.transfer(transfer_request);\n        let gas = Gas::new(self.chainspec.system_costs_config.mint_costs().transfer);\n        let execution_result = WasmV1Result::from_transfer_result(transfer_result, gas)\n            .expect(\"transfer result should map to wasm v1 result\");\n        let effects = execution_result.effects().clone();\n        self.effects.push(effects.clone());\n        self.exec_results.push(execution_result);\n        self.commit_transforms(pre_state_hash, effects);\n        self\n    }\n}\n\nimpl<S> WasmTestBuilder<S>\nwhere\n    S: StateProvider + CommitProvider,\n{\n    /// Takes a [`GenesisRequest`], executes the request and returns Self.\n    pub fn run_genesis(&mut self, request: GenesisRequest) -> &mut Self {\n        match self.data_access_layer.genesis(request) {\n            GenesisResult::Fatal(msg) => {\n                panic!(\"{}\", msg);\n            }\n            GenesisResult::Failure(err) => {\n                panic!(\"{:?}\", err);\n            }\n            GenesisResult::Success {\n                post_state_hash,\n                effects,\n            } => {\n                self.genesis_hash = Some(post_state_hash);\n                self.post_state_hash = Some(post_state_hash);\n                self.system_account = self.get_entity_by_account_hash(*SYSTEM_ADDR);\n                self.genesis_effects = Some(effects);\n            }\n        }\n        self\n    }\n\n    fn query_system_entity_registry(\n        &self,\n        post_state_hash: Option<Digest>,\n    ) -> Option<SystemHashRegistry> {\n        match self.query(post_state_hash, Key::SystemEntityRegistry, &[]) {\n            Ok(StoredValue::CLValue(cl_registry)) => {\n                let system_entity_registry =\n                    CLValue::into_t::<SystemHashRegistry>(cl_registry).unwrap();\n                Some(system_entity_registry)\n            }\n            Ok(_) => None,\n            Err(_) => None,\n        }\n    }\n\n    /// Queries state for a [`StoredValue`].\n    pub fn query(\n        &self,\n        maybe_post_state: Option<Digest>,\n        base_key: Key,\n        path: &[String],\n    ) -> Result<StoredValue, String> {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n\n        let query_request = QueryRequest::new(post_state, base_key, path.to_vec());\n\n        let query_result = self.data_access_layer.query(query_request);\n        if let QueryResult::Success { value, .. } = query_result {\n            return Ok(value.deref().clone());\n        }\n\n        Err(format!(\"{:?}\", query_result))\n    }\n\n    /// Retrieves the message topics for the given hash addr.\n    pub fn message_topics(\n        &self,\n        maybe_post_state: Option<Digest>,\n        entity_addr: EntityAddr,\n    ) -> Result<MessageTopics, String> {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n\n        let request = MessageTopicsRequest::new(post_state, entity_addr);\n        let result = self.data_access_layer.message_topics(request);\n        if let MessageTopicsResult::Success { message_topics } = result {\n            return Ok(message_topics);\n        }\n\n        Err(format!(\"{:?}\", result))\n    }\n\n    /// Query a named key in global state by account hash.\n    pub fn query_named_key_by_account_hash(\n        &self,\n        maybe_post_state: Option<Digest>,\n        account_hash: AccountHash,\n        name: &str,\n    ) -> Result<StoredValue, String> {\n        let entity_addr = self\n            .get_entity_hash_by_account_hash(account_hash)\n            .map(|entity_hash| EntityAddr::new_account(entity_hash.value()))\n            .expect(\"must get EntityAddr\");\n        self.query_named_key(maybe_post_state, entity_addr, name)\n    }\n\n    /// Query a named key.\n    pub fn query_named_key(\n        &self,\n        maybe_post_state: Option<Digest>,\n        entity_addr: EntityAddr,\n        name: &str,\n    ) -> Result<StoredValue, String> {\n        let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, name.to_string())\n            .expect(\"could not create named key address\");\n        let empty_path: Vec<String> = vec![];\n        let maybe_stored_value = self\n            .query(maybe_post_state, Key::NamedKey(named_key_addr), &empty_path)\n            .expect(\"no stored value found\");\n        let key = maybe_stored_value\n            .as_cl_value()\n            .map(|cl_val| CLValue::into_t::<Key>(cl_val.clone()))\n            .expect(\"must be cl_value\")\n            .expect(\"must get key\");\n        self.query(maybe_post_state, key, &[])\n    }\n\n    /// Queries state for a dictionary item.\n    pub fn query_dictionary_item(\n        &self,\n        maybe_post_state: Option<Digest>,\n        dictionary_seed_uref: URef,\n        dictionary_item_key: &str,\n    ) -> Result<StoredValue, String> {\n        let dictionary_address =\n            Key::dictionary(dictionary_seed_uref, dictionary_item_key.as_bytes());\n        let empty_path: Vec<String> = vec![];\n        self.query(maybe_post_state, dictionary_address, &empty_path)\n    }\n\n    /// Queries for a [`StoredValue`] and returns the [`StoredValue`] and a Merkle proof.\n    pub fn query_with_proof(\n        &self,\n        maybe_post_state: Option<Digest>,\n        base_key: Key,\n        path: &[String],\n    ) -> Result<(StoredValue, Vec<TrieMerkleProof<Key, StoredValue>>), String> {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n\n        let path_vec: Vec<String> = path.to_vec();\n\n        let query_request = QueryRequest::new(post_state, base_key, path_vec);\n\n        let query_result = self.data_access_layer.query(query_request);\n\n        if let QueryResult::Success { value, proofs } = query_result {\n            return Ok((value.deref().clone(), proofs));\n        }\n\n        panic! {\"{:?}\", query_result};\n    }\n\n    /// Queries for the total supply of token.\n    /// # Panics\n    /// Panics if the total supply can't be found.\n    pub fn total_supply(\n        &self,\n        protocol_version: ProtocolVersion,\n        maybe_post_state: Option<Digest>,\n    ) -> U512 {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n        let result = self\n            .data_access_layer\n            .total_supply(TotalSupplyRequest::new(post_state, protocol_version));\n        if let TotalSupplyResult::Success { total_supply } = result {\n            total_supply\n        } else {\n            panic!(\"total supply should exist at every root hash {:?}\", result);\n        }\n    }\n\n    /// Queries for the round seigniorage rate.\n    /// # Panics\n    /// Panics if the total supply or seigniorage rate can't be found.\n    pub fn round_seigniorage_rate(\n        &mut self,\n        maybe_post_state: Option<Digest>,\n        protocol_version: ProtocolVersion,\n    ) -> Ratio<U512> {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n        let result =\n            self.data_access_layer\n                .round_seigniorage_rate(RoundSeigniorageRateRequest::new(\n                    post_state,\n                    protocol_version,\n                ));\n        if let RoundSeigniorageRateResult::Success { rate } = result {\n            rate\n        } else {\n            panic!(\n                \"round seigniorage rate should exist at every root hash {:?}\",\n                result\n            );\n        }\n    }\n\n    /// Queries for the base round reward.\n    /// # Panics\n    /// Panics if the total supply or seigniorage rate can't be found.\n    pub fn base_round_reward(\n        &mut self,\n        maybe_post_state: Option<Digest>,\n        protocol_version: ProtocolVersion,\n    ) -> U512 {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n        let total_supply = self.total_supply(protocol_version, Some(post_state));\n        let rate = self.round_seigniorage_rate(Some(post_state), protocol_version);\n        rate.checked_mul(&Ratio::from(total_supply))\n            .map(|ratio| ratio.to_integer())\n            .expect(\"must get base round reward\")\n    }\n\n    /// Direct auction interactions for stake management.\n    pub fn bidding(\n        &mut self,\n        maybe_post_state: Option<Digest>,\n        protocol_version: ProtocolVersion,\n        initiator: InitiatorAddr,\n        auction_method: AuctionMethod,\n    ) -> BiddingResult {\n        let post_state = maybe_post_state\n            .or(self.post_state_hash)\n            .expect(\"builder must have a post-state hash\");\n\n        let transaction_hash = TransactionHash::V1(TransactionV1Hash::default());\n        let authorization_keys = BTreeSet::from_iter(iter::once(initiator.account_hash()));\n\n        let config = &self.chainspec;\n        let fee_handling = config.core_config.fee_handling;\n        let refund_handling = config.core_config.refund_handling;\n        let vesting_schedule_period_millis = config.core_config.vesting_schedule_period.millis();\n        let allow_auction_bids = config.core_config.allow_auction_bids;\n        let compute_rewards = config.core_config.compute_rewards;\n        let max_delegators_per_validator = config.core_config.max_delegators_per_validator;\n        let minimum_bid_amount = config.core_config.minimum_bid_amount;\n        let minimum_delegation_amount = config.core_config.minimum_delegation_amount;\n        let maximum_delegation_amount = config.core_config.maximum_delegation_amount;\n        let balance_hold_interval = config.core_config.gas_hold_interval.millis();\n        let include_credits = config.core_config.fee_handling == FeeHandling::NoFee;\n        let credit_cap = Ratio::new_raw(\n            U512::from(*config.core_config.validator_credit_cap.numer()),\n            U512::from(*config.core_config.validator_credit_cap.denom()),\n        );\n        let enable_addressable_entity = config.core_config.enable_addressable_entity;\n        let native_runtime_config = casper_storage::system::runtime_native::Config::new(\n            TransferConfig::Unadministered,\n            fee_handling,\n            refund_handling,\n            vesting_schedule_period_millis,\n            allow_auction_bids,\n            compute_rewards,\n            max_delegators_per_validator,\n            minimum_bid_amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            balance_hold_interval,\n            include_credits,\n            credit_cap,\n            enable_addressable_entity,\n            config.system_costs_config.mint_costs().transfer,\n            config.core_config.rewards_handling.clone(),\n        );\n\n        let bidding_req = BiddingRequest::new(\n            native_runtime_config,\n            post_state,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            auction_method,\n        );\n        self.data_access_layer().bidding(bidding_req)\n    }\n\n    /// Runs an optional custom payment [`WasmV1Request`] and a session `WasmV1Request`.\n    ///\n    /// If the custom payment is `Some` and its execution fails, the session request is not\n    /// attempted.\n    pub fn exec_wasm_v1(&mut self, mut request: WasmV1Request) -> &mut Self {\n        let state_hash = self.post_state_hash.expect(\"expected post_state_hash\");\n        request.block_info.with_state_hash(state_hash);\n        let result = self\n            .execution_engine\n            .execute(self.data_access_layer.as_ref(), request);\n        let effects = result.effects().clone();\n        self.exec_results.push(result);\n        self.effects.push(effects);\n        self\n    }\n\n    /// Runs an [`ExecuteRequest`].\n    pub fn exec(&mut self, mut exec_request: ExecuteRequest) -> &mut Self {\n        let mut effects = Effects::new();\n        if let Some(mut payment) = exec_request.custom_payment {\n            let state_hash = self.post_state_hash.expect(\"expected post_state_hash\");\n            payment.block_info.with_state_hash(state_hash);\n            let payment_result = self\n                .execution_engine\n                .execute(self.data_access_layer.as_ref(), payment);\n            // If executing payment code failed, record this and exit without attempting session\n            // execution.\n            effects = payment_result.effects().clone();\n            let payment_failed = payment_result.error().is_some();\n            self.exec_results.push(payment_result);\n            if payment_failed {\n                self.effects.push(effects);\n                return self;\n            }\n        }\n        let state_hash = self.post_state_hash.expect(\"expected post_state_hash\");\n        exec_request.session.block_info.with_state_hash(state_hash);\n\n        let session_result = self\n            .execution_engine\n            .execute(self.data_access_layer.as_ref(), exec_request.session);\n        // Cache transformations\n        effects.append(session_result.effects().clone());\n        self.effects.push(effects);\n        self.exec_results.push(session_result);\n        self\n    }\n\n    /// Commit effects of previous exec call on the latest post-state hash.\n    pub fn commit(&mut self) -> &mut Self {\n        let prestate_hash = self.post_state_hash.expect(\"Should have genesis hash\");\n\n        let effects = self.effects.last().cloned().unwrap_or_default();\n\n        self.commit_transforms(prestate_hash, effects)\n    }\n\n    /// Runs a commit request, expects a successful response, and\n    /// overwrites existing cached post state hash with a new one.\n    pub fn commit_transforms(&mut self, pre_state_hash: Digest, effects: Effects) -> &mut Self {\n        let post_state_hash = self\n            .data_access_layer\n            .commit_effects(pre_state_hash, effects)\n            .expect(\"should commit\");\n        self.post_state_hash = Some(post_state_hash);\n        self\n    }\n\n    /// Upgrades the execution engine.\n    pub fn upgrade(&mut self, upgrade_config: &mut ProtocolUpgradeConfig) -> &mut Self {\n        let pre_state_hash = self.post_state_hash.expect(\"should have state hash\");\n        upgrade_config.with_pre_state_hash(pre_state_hash);\n\n        let req = ProtocolUpgradeRequest::new(upgrade_config.clone());\n\n        let result = self.data_access_layer.protocol_upgrade(req);\n\n        if let ProtocolUpgradeResult::Success {\n            post_state_hash, ..\n        } = result\n        {\n            let mut engine_config = self.chainspec.engine_config();\n            engine_config.set_protocol_version(upgrade_config.new_protocol_version());\n            self.execution_engine = Rc::new(ExecutionEngineV1::new(engine_config));\n            self.post_state_hash = Some(post_state_hash);\n        }\n\n        self.upgrade_results.push(result);\n        self\n    }\n\n    /// This ONLY executes the run_auction logic of the auction. If you are testing\n    /// specifically that function, this is sufficient. However, to match the standard\n    /// end of era auction behavior the comprehensive `step` function should be used instead.\n    pub fn run_auction(\n        &mut self,\n        era_end_timestamp_millis: u64,\n        evicted_validators: Vec<PublicKey>,\n    ) -> &mut Self {\n        let auction = self.get_auction_contract_hash();\n        let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *SYSTEM_ADDR,\n            auction,\n            METHOD_RUN_AUCTION,\n            runtime_args! {\n                ARG_ERA_END_TIMESTAMP_MILLIS => era_end_timestamp_millis,\n                ARG_EVICTED_VALIDATORS => evicted_validators,\n            },\n        )\n        .build();\n        self.exec(exec_request).expect_success().commit()\n    }\n\n    /// Increments engine state at end of era (rewards, auction, unbond, etc.).\n    pub fn step(&mut self, step_request: StepRequest) -> StepResult {\n        let step_result = self.data_access_layer.step(step_request);\n\n        if let StepResult::Success {\n            post_state_hash, ..\n        } = step_result\n        {\n            self.post_state_hash = Some(post_state_hash);\n        }\n\n        step_result\n    }\n\n    fn native_runtime_config(&self) -> NativeRuntimeConfig {\n        let administrators: BTreeSet<AccountHash> = self\n            .chainspec\n            .core_config\n            .administrators\n            .iter()\n            .map(|x| x.to_account_hash())\n            .collect();\n        let allow_unrestricted = self.chainspec.core_config.allow_unrestricted_transfers;\n        let transfer_config = TransferConfig::new(administrators, allow_unrestricted);\n        let include_credits = self.chainspec.core_config.fee_handling == FeeHandling::NoFee;\n        let credit_cap = Ratio::new_raw(\n            U512::from(*self.chainspec.core_config.validator_credit_cap.numer()),\n            U512::from(*self.chainspec.core_config.validator_credit_cap.denom()),\n        );\n        NativeRuntimeConfig::new(\n            transfer_config,\n            self.chainspec.core_config.fee_handling,\n            self.chainspec.core_config.refund_handling,\n            self.chainspec.core_config.vesting_schedule_period.millis(),\n            self.chainspec.core_config.allow_auction_bids,\n            self.chainspec.core_config.compute_rewards,\n            self.chainspec.core_config.max_delegators_per_validator,\n            self.chainspec.core_config.minimum_bid_amount,\n            self.chainspec.core_config.minimum_delegation_amount,\n            self.chainspec.core_config.maximum_delegation_amount,\n            self.chainspec.core_config.gas_hold_interval.millis(),\n            include_credits,\n            credit_cap,\n            self.chainspec.core_config.enable_addressable_entity,\n            self.chainspec.system_costs_config.mint_costs().transfer,\n            self.chainspec.core_config.rewards_handling.clone(),\n        )\n    }\n\n    /// Distribute fees.\n    pub fn distribute_fees(\n        &mut self,\n        pre_state_hash: Option<Digest>,\n        protocol_version: ProtocolVersion,\n        block_time: u64,\n    ) -> FeeResult {\n        let native_runtime_config = self.native_runtime_config();\n\n        let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap();\n        let fee_req = FeeRequest::new(\n            native_runtime_config,\n            pre_state_hash,\n            protocol_version,\n            block_time.into(),\n        );\n        let fee_result = self.data_access_layer.distribute_fees(fee_req);\n\n        if let FeeResult::Success {\n            post_state_hash, ..\n        } = fee_result\n        {\n            self.post_state_hash = Some(post_state_hash);\n        }\n\n        fee_result\n    }\n\n    /// Distributes the rewards.\n    pub fn distribute(\n        &mut self,\n        pre_state_hash: Option<Digest>,\n        protocol_version: ProtocolVersion,\n        rewards: BTreeMap<PublicKey, Vec<U512>>,\n        block_time: u64,\n    ) -> BlockRewardsResult {\n        let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap();\n        let native_runtime_config = self.native_runtime_config();\n        let distribute_req = BlockRewardsRequest::new(\n            native_runtime_config,\n            pre_state_hash,\n            protocol_version,\n            BlockTime::new(block_time),\n            rewards,\n        );\n        let distribute_block_rewards_result = self\n            .data_access_layer\n            .distribute_block_rewards(distribute_req);\n\n        if let BlockRewardsResult::Success {\n            post_state_hash, ..\n        } = distribute_block_rewards_result\n        {\n            self.post_state_hash = Some(post_state_hash);\n        }\n\n        distribute_block_rewards_result\n    }\n\n    /// Distributes the rewards.\n    pub fn distribute_with_rewards_handling(\n        &mut self,\n        pre_state_hash: Option<Digest>,\n        protocol_version: ProtocolVersion,\n        rewards: BTreeMap<PublicKey, Vec<U512>>,\n        block_time: u64,\n        rewards_handling: RewardsHandling,\n    ) -> BlockRewardsResult {\n        let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap();\n        let administrators: BTreeSet<AccountHash> = self\n            .chainspec\n            .core_config\n            .administrators\n            .iter()\n            .map(|x| x.to_account_hash())\n            .collect();\n        let allow_unrestricted = self.chainspec.core_config.allow_unrestricted_transfers;\n        let transfer_config = TransferConfig::new(administrators, allow_unrestricted);\n        let include_credits = self.chainspec.core_config.fee_handling == FeeHandling::NoFee;\n        let credit_cap = Ratio::new_raw(\n            U512::from(*self.chainspec.core_config.validator_credit_cap.numer()),\n            U512::from(*self.chainspec.core_config.validator_credit_cap.denom()),\n        );\n\n        let native_runtime_config = NativeRuntimeConfig::new(\n            transfer_config,\n            self.chainspec.core_config.fee_handling,\n            self.chainspec.core_config.refund_handling,\n            self.chainspec.core_config.vesting_schedule_period.millis(),\n            self.chainspec.core_config.allow_auction_bids,\n            self.chainspec.core_config.compute_rewards,\n            self.chainspec.core_config.max_delegators_per_validator,\n            self.chainspec.core_config.minimum_bid_amount,\n            self.chainspec.core_config.minimum_delegation_amount,\n            self.chainspec.core_config.maximum_delegation_amount,\n            self.chainspec.core_config.gas_hold_interval.millis(),\n            include_credits,\n            credit_cap,\n            self.chainspec.core_config.enable_addressable_entity,\n            self.chainspec.system_costs_config.mint_costs().transfer,\n            rewards_handling,\n        );\n\n        let distribute_req = BlockRewardsRequest::new(\n            native_runtime_config,\n            pre_state_hash,\n            protocol_version,\n            BlockTime::new(block_time),\n            rewards,\n        );\n        let distribute_block_rewards_result = self\n            .data_access_layer\n            .distribute_block_rewards(distribute_req);\n\n        if let BlockRewardsResult::Success {\n            post_state_hash, ..\n        } = distribute_block_rewards_result\n        {\n            self.post_state_hash = Some(post_state_hash);\n        }\n\n        distribute_block_rewards_result\n    }\n\n    /// Finalizes payment for a transaction\n    pub fn handle_fee(\n        &mut self,\n        pre_state_hash: Option<Digest>,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        handle_fee_mode: HandleFeeMode,\n    ) -> HandleFeeResult {\n        let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap();\n        let native_runtime_config = self.native_runtime_config();\n        let handle_fee_request = HandleFeeRequest::new(\n            native_runtime_config,\n            pre_state_hash,\n            protocol_version,\n            transaction_hash,\n            handle_fee_mode,\n        );\n        let handle_fee_result = self.data_access_layer.handle_fee(handle_fee_request);\n        if let HandleFeeResult::Success { effects, .. } = &handle_fee_result {\n            self.commit_transforms(pre_state_hash, effects.clone());\n        }\n\n        handle_fee_result\n    }\n\n    /// Expects a successful run\n    #[track_caller]\n    pub fn expect_success(&mut self) -> &mut Self {\n        let exec_result = self\n            .get_last_exec_result()\n            .expect(\"Expected to be called after exec()\");\n        if exec_result.error().is_some() {\n            panic!(\n                \"Expected successful execution result, but instead got: {:#?}\",\n                exec_result,\n            );\n        }\n        self\n    }\n\n    /// Expects a failed run\n    pub fn expect_failure(&mut self) -> &mut Self {\n        let exec_result = self\n            .get_last_exec_result()\n            .expect(\"Expected to be called after exec()\");\n        if exec_result.error().is_none() {\n            panic!(\n                \"Expected failed execution result, but instead got: {:?}\",\n                exec_result,\n            );\n        }\n        self\n    }\n\n    /// Returns `true` if the last exec had an error, otherwise returns false.\n    #[track_caller]\n    pub fn is_error(&self) -> bool {\n        self.get_last_exec_result()\n            .expect(\"Expected to be called after exec()\")\n            .error()\n            .is_some()\n    }\n\n    /// Returns an `engine_state::Error` if the last exec had an error, otherwise `None`.\n    #[track_caller]\n    pub fn get_error(&self) -> Option<Error> {\n        self.get_last_exec_result()\n            .expect(\"Expected to be called after exec()\")\n            .error()\n            .cloned()\n    }\n\n    /// Returns the error message of the last exec.\n    #[track_caller]\n    pub fn get_error_message(&self) -> Option<String> {\n        self.get_last_exec_result()\n            .expect(\"Expected to be called after exec()\")\n            .error()\n            .map(|error| error.to_string())\n    }\n\n    /// Gets `Effects` of all previous runs.\n    #[track_caller]\n    pub fn get_effects(&self) -> Vec<Effects> {\n        self.effects.clone()\n    }\n\n    /// Gets genesis account (if present)\n    pub fn get_genesis_account(&self) -> &AddressableEntity {\n        self.system_account\n            .as_ref()\n            .expect(\"Unable to obtain genesis account. Please run genesis first.\")\n    }\n\n    /// Returns the [`AddressableEntityHash`] of the mint, panics if it can't be found.\n    pub fn get_mint_contract_hash(&self) -> AddressableEntityHash {\n        self.get_system_entity_hash(MINT)\n            .expect(\"Unable to obtain mint contract. Please run genesis first.\")\n    }\n\n    /// Returns the [`AddressableEntityHash`] of the \"handle payment\" contract, panics if it can't\n    /// be found.\n    pub fn get_handle_payment_contract_hash(&self) -> AddressableEntityHash {\n        self.get_system_entity_hash(HANDLE_PAYMENT)\n            .expect(\"Unable to obtain handle payment contract. Please run genesis first.\")\n    }\n\n    /// Returns the [`AddressableEntityHash`] of the \"standard payment\" contract, panics if it can't\n    /// be found.\n    pub fn get_standard_payment_contract_hash(&self) -> AddressableEntityHash {\n        self.get_system_entity_hash(STANDARD_PAYMENT)\n            .expect(\"Unable to obtain standard payment contract. Please run genesis first.\")\n    }\n\n    fn get_system_entity_hash(&self, contract_name: &str) -> Option<AddressableEntityHash> {\n        self.query_system_entity_registry(self.post_state_hash)?\n            .get(contract_name)\n            .map(|hash| AddressableEntityHash::new(*hash))\n    }\n\n    /// Returns the [`AddressableEntityHash`] of the \"auction\" contract, panics if it can't be\n    /// found.\n    pub fn get_auction_contract_hash(&self) -> AddressableEntityHash {\n        self.get_system_entity_hash(AUCTION)\n            .expect(\"Unable to obtain auction contract. Please run genesis first.\")\n    }\n\n    /// Returns genesis effects, panics if there aren't any.\n    pub fn get_genesis_effects(&self) -> &Effects {\n        self.genesis_effects\n            .as_ref()\n            .expect(\"should have genesis transforms\")\n    }\n\n    /// Returns the genesis hash, panics if it can't be found.\n    pub fn get_genesis_hash(&self) -> Digest {\n        self.genesis_hash\n            .expect(\"Genesis hash should be present. Should be called after run_genesis.\")\n    }\n\n    /// Returns the post state hash, panics if it can't be found.\n    pub fn get_post_state_hash(&self) -> Digest {\n        self.post_state_hash.expect(\"Should have post-state hash.\")\n    }\n\n    /// The chainspec configured settings for this builder.\n    pub fn chainspec(&self) -> &ChainspecConfig {\n        &self.chainspec\n    }\n\n    /// The current engine config\n    pub fn engine_config(&self) -> &EngineConfig {\n        self.execution_engine.config()\n    }\n\n    /// Update chainspec\n    pub fn with_chainspec(&mut self, chainspec: ChainspecConfig) -> &mut Self {\n        self.chainspec = chainspec;\n        self.execution_engine = Rc::new(ExecutionEngineV1::new(self.chainspec.engine_config()));\n        self\n    }\n\n    /// Update the engine config of the builder.\n    pub fn with_engine_config(&mut self, engine_config: EngineConfig) -> &mut Self {\n        self.execution_engine = Rc::new(ExecutionEngineV1::new(engine_config));\n        self\n    }\n\n    /// Sets blocktime into global state.\n    pub fn with_block_time(&mut self, block_time: BlockTime) -> &mut Self {\n        if let Some(state_root_hash) = self.post_state_hash {\n            let mut tracking_copy = self\n                .data_access_layer\n                .tracking_copy(state_root_hash)\n                .expect(\"should not error on checkout\")\n                .expect(\"should checkout tracking copy\");\n\n            let cl_value = CLValue::from_t(block_time.value()).expect(\"should get cl value\");\n            tracking_copy.write(\n                Key::BlockGlobal(BlockGlobalAddr::BlockTime),\n                StoredValue::CLValue(cl_value),\n            );\n            self.commit_transforms(state_root_hash, tracking_copy.effects());\n        }\n\n        self\n    }\n\n    /// Writes a set of keys and values to global state.\n    pub fn write_data_and_commit(\n        &mut self,\n        data: impl Iterator<Item = (Key, StoredValue)>,\n    ) -> &mut Self {\n        if let Some(state_root_hash) = self.post_state_hash {\n            let mut tracking_copy = self\n                .data_access_layer\n                .tracking_copy(state_root_hash)\n                .expect(\"should not error on checkout\")\n                .expect(\"should checkout tracking copy\");\n\n            for (key, val) in data {\n                tracking_copy.write(key, val);\n            }\n\n            self.commit_transforms(state_root_hash, tracking_copy.effects());\n        }\n        self\n    }\n\n    /// Sets gas hold config into global state.\n    pub fn with_gas_hold_config(\n        &mut self,\n        handling: HoldBalanceHandling,\n        interval: u64,\n    ) -> &mut Self {\n        if let Some(state_root_hash) = self.post_state_hash {\n            let mut tracking_copy = self\n                .data_access_layer\n                .tracking_copy(state_root_hash)\n                .expect(\"should not error on checkout\")\n                .expect(\"should checkout tracking copy\");\n\n            let registry = tracking_copy\n                .get_system_entity_registry()\n                .expect(\"should have registry\");\n            let mint = *registry.get(\"mint\").expect(\"should have mint\");\n            let mint_addr = EntityAddr::new_system(mint);\n            let named_keys = tracking_copy\n                .get_named_keys(mint_addr)\n                .expect(\"should have named keys\");\n\n            let mut address_generator =\n                AddressGenerator::new(state_root_hash.as_ref(), Phase::System);\n\n            // gas handling\n            let uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE);\n            let stored_value = StoredValue::CLValue(\n                CLValue::from_t(handling.tag()).expect(\"should turn handling tag into CLValue\"),\n            );\n\n            tracking_copy\n                .upsert_uref_to_named_keys(\n                    mint_addr,\n                    MINT_GAS_HOLD_HANDLING_KEY,\n                    &named_keys,\n                    uref,\n                    stored_value,\n                )\n                .expect(\"should upsert gas handling\");\n\n            // gas interval\n            let uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE);\n            let stored_value = StoredValue::CLValue(\n                CLValue::from_t(interval).expect(\"should turn gas interval into CLValue\"),\n            );\n\n            tracking_copy\n                .upsert_uref_to_named_keys(\n                    mint_addr,\n                    MINT_GAS_HOLD_INTERVAL_KEY,\n                    &named_keys,\n                    uref,\n                    stored_value,\n                )\n                .expect(\"should upsert gas interval\");\n\n            self.commit_transforms(state_root_hash, tracking_copy.effects());\n        }\n        self\n    }\n\n    /// Returns the engine state.\n    pub fn get_engine_state(&self) -> &ExecutionEngineV1 {\n        &self.execution_engine\n    }\n\n    /// Returns the engine state.\n    pub fn data_access_layer(&self) -> &S {\n        &self.data_access_layer\n    }\n\n    /// Returns the last results execs.\n    pub fn get_last_exec_result(&self) -> Option<WasmV1Result> {\n        self.exec_results.last().cloned()\n    }\n\n    /// Returns the owned results of a specific exec.\n    pub fn get_exec_result_owned(&self, index: usize) -> Option<WasmV1Result> {\n        self.exec_results.get(index).cloned()\n    }\n\n    /// Returns a count of exec results.\n    pub fn get_exec_results_count(&self) -> usize {\n        self.exec_results.len()\n    }\n\n    /// Returns a `Result` containing an [`ProtocolUpgradeResult`].\n    pub fn get_upgrade_result(&self, index: usize) -> Option<&ProtocolUpgradeResult> {\n        self.upgrade_results.get(index)\n    }\n\n    /// Expects upgrade success.\n    pub fn expect_upgrade_success(&mut self) -> &mut Self {\n        // Check first result, as only first result is interesting for a simple test\n        let result = self\n            .upgrade_results\n            .last()\n            .expect(\"Expected to be called after a system upgrade.\");\n\n        assert!(result.is_success(), \"Expected success, got: {:?}\", result);\n\n        self\n    }\n\n    /// Expect failure of the protocol upgrade.\n    pub fn expect_upgrade_failure(&mut self) -> &mut Self {\n        // Check first result, as only first result is interesting for a simple test\n        let result = self\n            .upgrade_results\n            .last()\n            .expect(\"Expected to be called after a system upgrade.\");\n\n        assert!(result.is_err(), \"Expected Failure got {:?}\", result);\n\n        self\n    }\n\n    /// Returns the `Account` if present.\n    pub fn get_account(&self, account_hash: AccountHash) -> Option<Account> {\n        let stored_value = self\n            .query(None, Key::Account(account_hash), &[])\n            .expect(\"must have stored value\");\n\n        stored_value.into_account()\n    }\n\n    /// Returns the \"handle payment\" contract, panics if it can't be found.\n    pub fn get_handle_payment_contract(&self) -> EntityWithNamedKeys {\n        let hash = self\n            .get_system_entity_hash(HANDLE_PAYMENT)\n            .expect(\"should have handle payment contract\");\n\n        let handle_payment_contract = if self.chainspec.core_config.enable_addressable_entity {\n            Key::addressable_entity_key(EntityKindTag::System, hash)\n        } else {\n            Key::Hash(hash.value())\n        };\n        let stored_value = self\n            .query(None, handle_payment_contract, &[])\n            .expect(\"must have stored value\");\n        match stored_value {\n            StoredValue::Contract(contract) => {\n                let named_keys = contract.named_keys().clone();\n                let entity = AddressableEntity::from(contract);\n                EntityWithNamedKeys::new(entity, named_keys)\n            }\n            StoredValue::AddressableEntity(entity) => {\n                let named_keys = self.get_named_keys(EntityAddr::System(hash.value()));\n                EntityWithNamedKeys::new(entity, named_keys)\n            }\n            _ => panic!(\"unhandled stored value\"),\n        }\n    }\n\n    /// Returns the balance of a purse, panics if the balance can't be parsed into a `U512`.\n    pub fn get_purse_balance(&self, purse: URef) -> U512 {\n        let base_key = Key::Balance(purse.addr());\n        self.query(None, base_key, &[])\n            .and_then(|v| CLValue::try_from(v).map_err(|error| format!(\"{:?}\", error)))\n            .and_then(|cl_value| cl_value.into_t().map_err(|error| format!(\"{:?}\", error)))\n            .expect(\"should parse balance into a U512\")\n    }\n\n    /// Returns a `BalanceResult` for a purse, panics if the balance can't be found.\n    pub fn get_purse_balance_result_with_proofs(\n        &self,\n        protocol_version: ProtocolVersion,\n        balance_identifier: BalanceIdentifier,\n    ) -> BalanceResult {\n        let balance_handling = BalanceHandling::Available;\n        let proof_handling = ProofHandling::Proofs;\n        let state_root_hash: Digest = self.post_state_hash.expect(\"should have post_state_hash\");\n        let request = BalanceRequest::new(\n            state_root_hash,\n            protocol_version,\n            balance_identifier,\n            balance_handling,\n            proof_handling,\n        );\n        self.data_access_layer.balance(request)\n    }\n\n    /// Returns a `BalanceResult` for a purse using a `PublicKey`.\n    pub fn get_public_key_balance_result_with_proofs(\n        &self,\n        protocol_version: ProtocolVersion,\n        public_key: PublicKey,\n    ) -> BalanceResult {\n        let state_root_hash: Digest = self.post_state_hash.expect(\"should have post_state_hash\");\n        let balance_handling = BalanceHandling::Available;\n        let proof_handling = ProofHandling::Proofs;\n        let request = BalanceRequest::from_public_key(\n            state_root_hash,\n            protocol_version,\n            public_key,\n            balance_handling,\n            proof_handling,\n        );\n        self.data_access_layer.balance(request)\n    }\n\n    /// Gets the purse balance of a proposer.\n    pub fn get_proposer_purse_balance(&self) -> U512 {\n        let proposer_contract = self\n            .get_entity_by_account_hash(*DEFAULT_PROPOSER_ADDR)\n            .expect(\"proposer account should exist\");\n        self.get_purse_balance(proposer_contract.main_purse())\n    }\n\n    /// Gets the contract hash associated with a given account hash.\n    pub fn get_entity_hash_by_account_hash(\n        &self,\n        account_hash: AccountHash,\n    ) -> Option<AddressableEntityHash> {\n        match self.query(None, Key::Account(account_hash), &[]).ok() {\n            Some(StoredValue::Account(_)) => Some(AddressableEntityHash::new(account_hash.value())),\n            Some(StoredValue::CLValue(cl_value)) => {\n                let entity_key = CLValue::into_t::<Key>(cl_value).expect(\"must have contract hash\");\n                entity_key.into_entity_hash()\n            }\n            Some(_) | None => None,\n        }\n    }\n\n    /// Returns an Entity alongside its named keys queried by its account hash.\n    pub fn get_entity_with_named_keys_by_account_hash(\n        &self,\n        account_hash: AccountHash,\n    ) -> Option<EntityWithNamedKeys> {\n        if let Some(entity) = self.get_entity_by_account_hash(account_hash) {\n            let entity_named_keys = self.get_named_keys_by_account_hash(account_hash);\n            return Some(EntityWithNamedKeys::new(entity, entity_named_keys));\n        };\n\n        None\n    }\n\n    /// Returns an Entity alongside its named keys queried by its entity hash.\n    pub fn get_entity_with_named_keys_by_entity_hash(\n        &self,\n        entity_hash: AddressableEntityHash,\n    ) -> Option<EntityWithNamedKeys> {\n        match self.get_addressable_entity(entity_hash) {\n            Some(entity) => {\n                let named_keys = self.get_named_keys(entity.entity_addr(entity_hash));\n                Some(EntityWithNamedKeys::new(entity, named_keys))\n            }\n            None => None,\n        }\n    }\n\n    /// Queries for an `Account`.\n    pub fn get_entity_by_account_hash(\n        &self,\n        account_hash: AccountHash,\n    ) -> Option<AddressableEntity> {\n        match self.query(None, Key::Account(account_hash), &[]).ok() {\n            Some(StoredValue::Account(account)) => Some(AddressableEntity::from(account)),\n            Some(StoredValue::CLValue(cl_value)) => {\n                let entity_key = CLValue::into_t::<Key>(cl_value).expect(\"must have entity key\");\n                match self.query(None, entity_key, &[]) {\n                    Ok(StoredValue::AddressableEntity(entity)) => Some(entity),\n                    Ok(_) | Err(_) => None,\n                }\n            }\n            Some(_other_variant) => None,\n            None => None,\n        }\n    }\n\n    /// Queries for an `AddressableEntity` and panics if it can't be found.\n    pub fn get_expected_addressable_entity_by_account_hash(\n        &self,\n        account_hash: AccountHash,\n    ) -> AddressableEntity {\n        self.get_entity_by_account_hash(account_hash)\n            .expect(\"account to exist\")\n    }\n\n    /// Queries for an addressable entity by `AddressableEntityHash`.\n    pub fn get_addressable_entity(\n        &self,\n        entity_hash: AddressableEntityHash,\n    ) -> Option<AddressableEntity> {\n        if !self.chainspec.core_config.enable_addressable_entity {\n            let contract_hash = ContractHash::new(entity_hash.value());\n            return self\n                .get_contract(contract_hash)\n                .map(AddressableEntity::from);\n        }\n\n        let entity_key = Key::addressable_entity_key(EntityKindTag::SmartContract, entity_hash);\n\n        let value: StoredValue = match self.query(None, entity_key, &[]) {\n            Ok(stored_value) => stored_value,\n            Err(_) => self\n                .query(\n                    None,\n                    Key::addressable_entity_key(EntityKindTag::System, entity_hash),\n                    &[],\n                )\n                .ok()?,\n        };\n\n        if let StoredValue::AddressableEntity(entity) = value {\n            Some(entity)\n        } else {\n            None\n        }\n    }\n\n    /// Retrieve a Contract from global state.\n    pub fn get_contract(&self, contract_hash: ContractHash) -> Option<Contract> {\n        let contract_value: StoredValue = self\n            .query(None, contract_hash.into(), &[])\n            .expect(\"should have contract value\");\n\n        if let StoredValue::Contract(contract) = contract_value {\n            Some(contract)\n        } else {\n            None\n        }\n    }\n\n    /// Queries for byte code by `ByteCodeAddr` and returns an `Option<ByteCode>`.\n    pub fn get_byte_code(&self, byte_code_hash: ByteCodeHash) -> Option<ByteCode> {\n        let byte_code_key = Key::byte_code_key(ByteCodeAddr::new_wasm_addr(byte_code_hash.value()));\n\n        let byte_code_value: StoredValue = self\n            .query(None, byte_code_key, &[])\n            .expect(\"should have contract value\");\n\n        if let StoredValue::ByteCode(byte_code) = byte_code_value {\n            Some(byte_code)\n        } else {\n            None\n        }\n    }\n\n    /// Queries for a contract package by `PackageHash`.\n    pub fn get_package(&self, package_hash: PackageHash) -> Option<Package> {\n        let key = if self.chainspec.core_config.enable_addressable_entity {\n            Key::SmartContract(package_hash.value())\n        } else {\n            Key::Hash(package_hash.value())\n        };\n        let contract_value: StoredValue = self\n            .query(None, key, &[])\n            .expect(\"should have package value\");\n\n        match contract_value {\n            StoredValue::ContractPackage(contract_package) => Some(contract_package.into()),\n            StoredValue::SmartContract(package) => Some(package),\n            _ => None,\n        }\n    }\n\n    /// Returns how much gas execution consumed / used.\n    pub fn exec_consumed(&self, index: usize) -> Gas {\n        self.exec_results\n            .get(index)\n            .map(WasmV1Result::consumed)\n            .unwrap()\n    }\n\n    /// Returns the `Gas` cost of the last exec.\n    pub fn last_exec_gas_consumed(&self) -> Gas {\n        self.exec_results\n            .last()\n            .map(WasmV1Result::consumed)\n            .unwrap()\n    }\n\n    /// Assert that last error is the expected one.\n    ///\n    /// NOTE: we're using string-based representation for checking equality\n    /// as the `Error` type does not implement `Eq` (many of its subvariants don't).\n    pub fn assert_error(&self, expected_error: Error) {\n        match self.get_error() {\n            Some(error) => assert_eq!(format!(\"{:?}\", expected_error), format!(\"{:?}\", error)),\n            None => panic!(\"expected error ({:?}) got success\", expected_error),\n        }\n    }\n\n    /// Gets [`EraValidators`].\n    pub fn get_era_validators(&mut self) -> EraValidators {\n        let state_hash = self.get_post_state_hash();\n        let request = EraValidatorsRequest::new(state_hash);\n        let result = self.data_access_layer.era_validators(request);\n\n        if let EraValidatorsResult::Success { era_validators } = result {\n            era_validators\n        } else {\n            panic!(\"get era validators should be available\");\n        }\n    }\n\n    /// Gets [`ValidatorWeights`] for a given [`EraId`].\n    pub fn get_validator_weights(&mut self, era_id: EraId) -> Option<ValidatorWeights> {\n        let mut result = self.get_era_validators();\n        result.remove(&era_id)\n    }\n\n    /// Gets [`Vec<BidKind>`].\n    pub fn get_bids(&mut self) -> Vec<BidKind> {\n        let get_bids_request = BidsRequest::new(self.get_post_state_hash());\n\n        let get_bids_result = self.data_access_layer.bids(get_bids_request);\n\n        get_bids_result.into_option().unwrap()\n    }\n\n    /// Returns named keys for an account entity by its account hash.\n    pub fn get_named_keys_by_account_hash(&self, account_hash: AccountHash) -> NamedKeys {\n        let entity_hash = self\n            .get_entity_hash_by_account_hash(account_hash)\n            .expect(\"must have entity hash\");\n        let entity_addr = EntityAddr::new_account(entity_hash.value());\n        self.get_named_keys(entity_addr)\n    }\n\n    /// Returns the named keys for a system contract.\n    pub fn get_named_keys_for_system_contract(\n        &self,\n        system_entity_hash: AddressableEntityHash,\n    ) -> NamedKeys {\n        self.get_named_keys(EntityAddr::System(system_entity_hash.value()))\n    }\n\n    /// Returns the named keys for a system contract.\n    pub fn get_named_keys_for_contract(&self, contract_hash: AddressableEntityHash) -> NamedKeys {\n        self.get_named_keys(EntityAddr::SmartContract(contract_hash.value()))\n    }\n\n    /// Get the named keys for an entity.\n    pub fn get_named_keys(&self, entity_addr: EntityAddr) -> NamedKeys {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        tracking_copy\n            .get_named_keys(entity_addr)\n            .expect(\"should have named keys\")\n    }\n\n    /// Gets [`BTreeMap<UnbondKind, Unbond>`].\n    pub fn get_unbonds(&mut self) -> BTreeMap<UnbondKind, Vec<Unbond>> {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        let reader = tracking_copy.reader();\n\n        let unbond_keys = reader\n            .keys_with_prefix(&[KeyTag::BidAddr as u8])\n            .unwrap_or_default();\n\n        let mut ret = BTreeMap::new();\n\n        for key in unbond_keys.into_iter() {\n            if let Ok(Some(StoredValue::BidKind(BidKind::Unbond(unbond)))) = reader.read(&key) {\n                let unbond_kind = unbond.unbond_kind();\n                match ret.get_mut(unbond_kind) {\n                    None => {\n                        let _ = ret.insert(unbond_kind.clone(), vec![*unbond]);\n                    }\n                    Some(unbonds) => unbonds.push(*unbond),\n                };\n            }\n        }\n\n        ret\n    }\n\n    /// Retrieve the bid for a validator by their public key.\n    pub fn get_validator_bid(&mut self, validator_public_key: PublicKey) -> Option<ValidatorBid> {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        let reader = tracking_copy.reader();\n\n        let validator_keys = reader\n            .keys_with_prefix(&[KeyTag::BidAddr as u8, BidAddrTag::Validator as u8])\n            .unwrap_or_default();\n\n        for key in validator_keys.into_iter() {\n            if let Ok(Some(StoredValue::BidKind(BidKind::Validator(bid)))) = reader.read(&key) {\n                if bid.validator_public_key() == &validator_public_key {\n                    return Some(*bid);\n                }\n            }\n        }\n\n        None\n    }\n\n    /// Gets [`BTreeMap<AccountHash, Vec<UnbondingPurse>>`].\n    pub fn get_unbonding_purses(&mut self) -> BTreeMap<AccountHash, Vec<UnbondingPurse>> {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        let reader = tracking_copy.reader();\n\n        let unbond_keys = reader\n            .keys_with_prefix(&[KeyTag::Unbond as u8])\n            .unwrap_or_default();\n\n        let mut ret = BTreeMap::new();\n\n        for key in unbond_keys.into_iter() {\n            let read_result = reader.read(&key);\n            if let (Key::Unbond(account_hash), Ok(Some(StoredValue::Unbonding(unbonding_purses)))) =\n                (key, read_result)\n            {\n                ret.insert(account_hash, unbonding_purses);\n            }\n        }\n\n        ret\n    }\n\n    /// Gets [`WithdrawPurses`].\n    pub fn get_withdraw_purses(&mut self) -> WithdrawPurses {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        let reader = tracking_copy.reader();\n\n        let withdraws_keys = reader\n            .keys_with_prefix(&[KeyTag::Withdraw as u8])\n            .unwrap_or_default();\n\n        let mut ret = BTreeMap::new();\n\n        for key in withdraws_keys.into_iter() {\n            let read_result = reader.read(&key);\n            if let (Key::Withdraw(account_hash), Ok(Some(StoredValue::Withdraw(withdraw_purses)))) =\n                (key, read_result)\n            {\n                ret.insert(account_hash, withdraw_purses);\n            }\n        }\n\n        ret\n    }\n\n    /// Gets all `[Key::Balance]`s in global state.\n    pub fn get_balance_keys(&self) -> Vec<Key> {\n        self.get_keys(KeyTag::Balance).unwrap_or_default()\n    }\n\n    /// Gets all keys in global state by a prefix.\n    pub fn get_keys(\n        &self,\n        tag: KeyTag,\n    ) -> Result<Vec<Key>, casper_storage::global_state::error::Error> {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        let reader = tracking_copy.reader();\n\n        reader.keys_with_prefix(&[tag as u8])\n    }\n\n    /// Gets all entry points for a given entity\n    pub fn get_entry_points(&self, entity_addr: EntityAddr) -> EntryPoints {\n        let state_root_hash = self.get_post_state_hash();\n\n        let tracking_copy = self\n            .data_access_layer\n            .tracking_copy(state_root_hash)\n            .unwrap()\n            .unwrap();\n\n        tracking_copy\n            .get_v1_entry_points(entity_addr)\n            .expect(\"must get entry points\")\n    }\n\n    /// Gets a stored value from a contract's named keys.\n    pub fn get_value<T>(&mut self, entity_addr: EntityAddr, name: &str) -> T\n    where\n        T: FromBytes + CLTyped,\n    {\n        let named_keys = self.get_named_keys(entity_addr);\n\n        let key = named_keys.get(name).expect(\"should have named key\");\n        let stored_value = self.query(None, *key, &[]).expect(\"should query\");\n        let cl_value = stored_value.into_cl_value().expect(\"should be cl value\");\n        let result: T = cl_value.into_t().expect(\"should convert\");\n        result\n    }\n\n    /// Gets an [`EraId`].\n    pub fn get_era(&mut self) -> EraId {\n        let auction_contract = self.get_auction_contract_hash();\n        self.get_value(EntityAddr::System(auction_contract.value()), ERA_ID_KEY)\n    }\n\n    /// Gets the auction delay.\n    pub fn get_auction_delay(&mut self) -> u64 {\n        let auction_contract = self.get_auction_contract_hash();\n        self.get_value(\n            EntityAddr::System(auction_contract.value()),\n            AUCTION_DELAY_KEY,\n        )\n    }\n\n    /// Gets the unbonding delay\n    pub fn get_unbonding_delay(&mut self) -> u64 {\n        let auction_contract = self.get_auction_contract_hash();\n        self.get_value(\n            EntityAddr::System(auction_contract.value()),\n            UNBONDING_DELAY_KEY,\n        )\n    }\n\n    fn system_entity_key(&self, request: SystemEntityRegistryRequest) -> Key {\n        let result = self.data_access_layer.system_entity_registry(request);\n        if let SystemEntityRegistryResult::Success { payload, .. } = result {\n            match payload {\n                SystemEntityRegistryPayload::All(_) => {\n                    panic!(\"asked for auction, got entire registry\");\n                }\n                SystemEntityRegistryPayload::EntityKey(key) => key,\n            }\n        } else {\n            panic!(\"{:?}\", result)\n        }\n    }\n\n    /// Gets the [`AddressableEntityHash`] of the system auction contract, panics if it can't be\n    /// found.\n    pub fn get_system_auction_hash(&self) -> AddressableEntityHash {\n        let state_root_hash = self.get_post_state_hash();\n        let request = SystemEntityRegistryRequest::new(\n            state_root_hash,\n            ProtocolVersion::V2_0_0,\n            SystemEntityRegistrySelector::auction(),\n            self.chainspec.core_config.enable_addressable_entity,\n        );\n        self.system_entity_key(request)\n            .into_entity_hash()\n            .expect(\"should downcast\")\n    }\n\n    /// Gets the [`AddressableEntityHash`] of the system mint contract, panics if it can't be found.\n    pub fn get_system_mint_hash(&self) -> AddressableEntityHash {\n        let state_root_hash = self.get_post_state_hash();\n        let request = SystemEntityRegistryRequest::new(\n            state_root_hash,\n            ProtocolVersion::V2_0_0,\n            SystemEntityRegistrySelector::mint(),\n            self.chainspec.core_config.enable_addressable_entity,\n        );\n        self.system_entity_key(request)\n            .into_entity_hash()\n            .expect(\"should downcast\")\n    }\n\n    /// Gets the [`AddressableEntityHash`] of the system handle payment contract, panics if it can't\n    /// be found.\n    pub fn get_system_handle_payment_hash(\n        &self,\n        protocol_version: ProtocolVersion,\n    ) -> AddressableEntityHash {\n        let state_root_hash = self.get_post_state_hash();\n        let request = SystemEntityRegistryRequest::new(\n            state_root_hash,\n            protocol_version,\n            SystemEntityRegistrySelector::handle_payment(),\n            self.chainspec.core_config.enable_addressable_entity,\n        );\n        self.system_entity_key(request)\n            .into_entity_hash()\n            .expect(\"should downcast\")\n    }\n\n    /// Resets the `exec_results`, `upgrade_results` and `transform` fields.\n    pub fn clear_results(&mut self) -> &mut Self {\n        self.exec_results = Vec::new();\n        self.upgrade_results = Vec::new();\n        self.effects = Vec::new();\n        self\n    }\n\n    /// Advances eras by num_eras\n    pub fn advance_eras_by(&mut self, num_eras: u64) {\n        let step_request_builder = StepRequestBuilder::new()\n            .with_protocol_version(ProtocolVersion::V2_0_0)\n            .with_runtime_config(self.native_runtime_config())\n            .with_run_auction(true);\n\n        for _ in 0..num_eras {\n            let state_hash = self.get_post_state_hash();\n            let step_request = step_request_builder\n                .clone()\n                .with_parent_state_hash(state_hash)\n                .with_next_era_id(self.get_era().successor())\n                .build();\n\n            match self.step(step_request) {\n                StepResult::RootNotFound => panic!(\"Root not found {:?}\", state_hash),\n                StepResult::Failure(err) => panic!(\"{:?}\", err),\n                StepResult::Success { .. } => {\n                    // noop\n                }\n            }\n        }\n    }\n\n    /// Advances eras by configured amount\n    pub fn advance_eras_by_default_auction_delay(&mut self) {\n        let auction_delay = self.get_auction_delay();\n        self.advance_eras_by(auction_delay + 1);\n    }\n\n    /// Advances by a single era.\n    pub fn advance_era(&mut self) {\n        self.advance_eras_by(1);\n    }\n\n    /// Returns an initialized step request builder.\n    pub fn step_request_builder(&mut self) -> StepRequestBuilder {\n        StepRequestBuilder::new()\n            .with_parent_state_hash(self.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V2_0_0)\n            .with_runtime_config(self.native_runtime_config())\n    }\n\n    /// Returns a trie by hash.\n    pub fn get_trie(&mut self, state_hash: Digest) -> Option<Trie<Key, StoredValue>> {\n        let req = TrieRequest::new(state_hash, None);\n        self.data_access_layer()\n            .trie(req)\n            .into_raw()\n            .unwrap()\n            .map(|bytes| bytesrepr::deserialize(bytes.into_inner().into()).unwrap())\n    }\n\n    /// Returns the costs related to interacting with the auction system contract.\n    pub fn get_auction_costs(&self) -> AuctionCosts {\n        *self.chainspec.system_costs_config.auction_costs()\n    }\n\n    /// Returns the costs related to interacting with the mint system contract.\n    pub fn get_mint_costs(&self) -> MintCosts {\n        *self.chainspec.system_costs_config.mint_costs()\n    }\n\n    /// Returns the costs related to interacting with the handle payment system contract.\n    pub fn get_handle_payment_costs(&self) -> HandlePaymentCosts {\n        *self.chainspec.system_costs_config.handle_payment_costs()\n    }\n\n    /// Commits a prune of leaf nodes from the tip of the merkle trie.\n    pub fn commit_prune(&mut self, prune_config: PruneRequest) -> &mut Self {\n        let result = self.data_access_layer.prune(prune_config);\n\n        if let PruneResult::Success {\n            post_state_hash,\n            effects,\n        } = &result\n        {\n            self.post_state_hash = Some(*post_state_hash);\n            self.effects.push(effects.clone());\n        }\n\n        self.prune_results.push(result);\n        self\n    }\n\n    /// Returns a `Result` containing a [`PruneResult`].\n    pub fn get_prune_result(&self, index: usize) -> Option<&PruneResult> {\n        self.prune_results.get(index)\n    }\n\n    /// Expects a prune success.\n    pub fn expect_prune_success(&mut self) -> &mut Self {\n        // Check first result, as only first result is interesting for a simple test\n        let result = self\n            .prune_results\n            .last()\n            .expect(\"Expected to be called after a system upgrade.\");\n\n        match result {\n            PruneResult::RootNotFound => panic!(\"Root not found\"),\n            PruneResult::MissingKey => panic!(\"Does not exists\"),\n            PruneResult::Failure(tce) => {\n                panic!(\"{:?}\", tce);\n            }\n            PruneResult::Success { .. } => {}\n        }\n\n        self\n    }\n\n    /// Calculates refunded amount from a last execution request.\n    pub fn calculate_refund_amount(&self, payment_amount: U512) -> U512 {\n        let gas_amount = Motes::from_gas(self.last_exec_gas_consumed(), DEFAULT_GAS_PRICE)\n            .expect(\"should create motes from gas\");\n\n        let refund_ratio = match self.chainspec.core_config.refund_handling {\n            RefundHandling::Refund { refund_ratio } | RefundHandling::Burn { refund_ratio } => {\n                refund_ratio\n            }\n            RefundHandling::NoRefund => Ratio::zero(),\n        };\n\n        let (numer, denom) = refund_ratio.into();\n        let refund_ratio = Ratio::new_raw(U512::from(numer), U512::from(denom));\n\n        // amount declared to be paid in payment code MINUS gas spent in last execution.\n        let refundable_amount = Ratio::from(payment_amount) - Ratio::from(gas_amount.value());\n        (refundable_amount * refund_ratio).to_integer()\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/test_support/tests/version_numbers.rs",
    "content": "#[test]\nfn test_html_root_url() {\n    version_sync::assert_html_root_url_updated!(\"src/lib.rs\");\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/Cargo.toml",
    "content": "[package]\nname = \"casper-engine-tests\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>, Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[dependencies]\nbase16 = \"0.2.1\"\ncasper-engine-test-support = { path = \"../test_support\" }\ncasper-execution-engine = { path = \"../../execution_engine\", features = [\"test-support\"] }\ncasper-storage = { path = \"../../storage\" }\ncasper-types = { path = \"../../types\", default-features = false, features = [\"datasize\", \"json-schema\"] }\ned25519-dalek = { version = \"2.1.1\", default-features = false, features = [\"alloc\", \"zeroize\"] }\ncasper-wasm = \"1.0.0\"\nclap = \"2\"\nfs_extra = \"1.2.0\"\nlog = \"0.4.8\"\nrand = \"0.8.3\"\nserde = \"1\"\nserde_json = \"1\"\ntempfile = \"3.4.0\"\nwat = \"1.219.1\"\nwasmprinter = \"0.219.0\"\nwalrus = \"0.20.2\"\n\n[dev-dependencies]\nassert_matches = \"1.3.0\"\ncriterion = { version = \"0.5.1\", features = [\"html_reports\"]}\ndictionary = { path = \"../../smart_contracts/contracts/test/dictionary\", default-features = false }\ndictionary-call = { path = \"../../smart_contracts/contracts/test/dictionary-call\", default-features = false }\nget-call-stack-recursive-subcall = { path = \"../../smart_contracts/contracts/test/get-call-stack-recursive-subcall\", default-features = false }\ngh-1470-regression = { path = \"../../smart_contracts/contracts/test/gh-1470-regression\", default-features = false }\ngh-1470-regression-call = { path = \"../../smart_contracts/contracts/test/gh-1470-regression-call\", default-features = false }\nlmdb-rkv = \"0.14\"\nnum-rational = \"0.4.0\"\nnum-traits = { workspace = true }\nonce_cell = \"1.5.2\"\nregex = \"1.5.4\"\nwalrus = \"0.20.2\"\nwat = \"1.0.47\"\n\n[features]\nfixture-generators = []\n"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/call_stack_fixture/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"8d90f686f4d3906ca63ea4ac6b0b72348605c95069e946c1f5b32496907d7fec\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/counter_contract/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"e8ac57060d30935c297b7565fcb17b1591edb4f2a9b9738682d493b9556e32cf\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/delegator_amount/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"2.0.0\"\n  },\n  \"post_state_hash\": \"09fefcac7cff208fa98018a20008b258f6d10fade632a1d1bd4bd5768cf5b3d7\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/disabled_versions/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"b06fd206cd3719a18c212bef2f21adbef24086982c92250a8368929625693841\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/gh_3208/state.json",
    "content": "{\n  \"description\": \"Default proposer account is also a genesis validator with a vesting schedule already initialized\",\n  \"genesis_request\": {\n    \"chainspec_registry\": {\n      \"chainspec_raw_hash\": \"11c0e79b71c3976ccd0c02d1310e2516c08edc9d8b6f57ccd680d63a4d8e72da\",\n      \"genesis_accounts_raw_hash\": \"0afd4a04d7720da9922f2b40249989faf4ff8096e1ed49bee615bb6cb1ee4f7d\",\n      \"global_state_raw_hash\": null\n    },\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"validator\": {\n              \"bonded_amount\": \"1000000000000\",\n              \"delegation_rate\": 15\n            }\n          }\n        }\n      ],\n      \"auction_delay\": 3,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 10000,\n          \"delegate\": 10000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 10000,\n          \"withdraw_bid\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 2500000000,\n          \"mint\": 2500000000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 100000000\n      },\n      \"unbonding_delay\": 14,\n      \"validator_slots\": 5,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"random_bytes\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 188,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": 440,\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"regular\": 210,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"a180ca1ca4cb5f9a6a1430886c89689584d5b69a2d1cd862cd458cd459d788c0\"\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/gh_3710/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"auction_delay\": 1,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        7,\n        87535408\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 2500000000,\n          \"delegate\": 2500000000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 2500000000,\n          \"withdraw_bid\": 2500000000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 2500000000,\n          \"mint\": 2500000000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 100000000\n      },\n      \"unbonding_delay\": 7,\n      \"validator_slots\": 100,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420000\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 188,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": {\n            \"block\": 440,\n            \"br\": 440000,\n            \"br_if\": 440000,\n            \"br_table\": {\n              \"cost\": 440000,\n              \"size_multiplier\": 100\n            },\n            \"call\": 440,\n            \"call_indirect\": 440,\n            \"drop\": 440,\n            \"else\": 440,\n            \"end\": 440,\n            \"if\": 440,\n            \"loop\": 440,\n            \"return\": 440,\n            \"select\": 440\n          },\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 630000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"ca42d66dd3ca95adfb92e56afc96353af5a888a258220d3c6b439e02c7b66306\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/groups/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"2.0.0\"\n  },\n  \"post_state_hash\": \"b899bb0ccee3c734859a075fff3a71196a84eca1366e6bc3c04a2253fec1fb3c\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_2_0/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.2.0\",\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"balance\": \"100000000000000000\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"balance\": \"100000000000000000\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"wasm_config\": {\n        \"max_memory\": 64,\n        \"max_stack_height\": 65536,\n        \"opcode_costs\": {\n          \"bit\": 300,\n          \"add\": 210,\n          \"mul\": 240,\n          \"div\": 320,\n          \"load\": 2500,\n          \"store\": 4700,\n          \"const\": 110,\n          \"local\": 390,\n          \"global\": 390,\n          \"control_flow\": 440,\n          \"integer_comparison\": 250,\n          \"conversion\": 420,\n          \"unreachable\": 270,\n          \"nop\": 200,\n          \"current_memory\": 290,\n          \"grow_memory\": 240000,\n          \"regular\": 210\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        },\n        \"host_function_costs\": {\n          \"read_value\": {\n            \"cost\": 6000,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"read_value_local\": {\n            \"cost\": 5500,\n            \"arguments\": [\n              0,\n              590,\n              0\n            ]\n          },\n          \"write\": {\n            \"cost\": 14000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ]\n          },\n          \"write_local\": {\n            \"cost\": 9500,\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ]\n          },\n          \"add\": {\n            \"cost\": 5800,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"new_uref\": {\n            \"cost\": 17000,\n            \"arguments\": [\n              0,\n              0,\n              590\n            ]\n          },\n          \"load_named_keys\": {\n            \"cost\": 42000,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"ret\": {\n            \"cost\": 23000,\n            \"arguments\": [\n              0,\n              420\n            ]\n          },\n          \"get_key\": {\n            \"cost\": 2000,\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ]\n          },\n          \"has_key\": {\n            \"cost\": 1500,\n            \"arguments\": [\n              0,\n              840\n            ]\n          },\n          \"put_key\": {\n            \"cost\": 38000,\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ]\n          },\n          \"remove_key\": {\n            \"cost\": 61000,\n            \"arguments\": [\n              0,\n              3200\n            ]\n          },\n          \"revert\": {\n            \"cost\": 500,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"is_valid_uref\": {\n            \"cost\": 760,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"add_associated_key\": {\n            \"cost\": 9000,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"remove_associated_key\": {\n            \"cost\": 4200,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"update_associated_key\": {\n            \"cost\": 4200,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"set_action_threshold\": {\n            \"cost\": 74000,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"get_caller\": {\n            \"cost\": 380,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"get_blocktime\": {\n            \"cost\": 330,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"create_purse\": {\n            \"cost\": 170000,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"transfer_to_account\": {\n            \"cost\": 24000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"transfer_from_purse_to_account\": {\n            \"cost\": 160000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"cost\": 82000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_balance\": {\n            \"cost\": 3800,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_phase\": {\n            \"cost\": 710,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"get_system_contract\": {\n            \"cost\": 1100,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_main_purse\": {\n            \"cost\": 1300,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"read_host_buffer\": {\n            \"cost\": 3500,\n            \"arguments\": [\n              0,\n              310,\n              0\n            ]\n          },\n          \"create_contract_package_at_hash\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"create_contract_user_group\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"add_contract_version\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"disable_contract_version\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"call_contract\": {\n            \"cost\": 4500,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ]\n          },\n          \"call_versioned_contract\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_named_arg_size\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_named_arg\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"remove_contract_user_group\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"provision_contract_user_group_uref\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"print\": {\n            \"cost\": 20000,\n            \"arguments\": [\n              0,\n              4600\n            ]\n          },\n          \"blake2b\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          }\n        }\n      },\n      \"system_config\": {\n        \"wasmless_transfer_cost\": 10000,\n        \"auction_costs\": {\n          \"get_era_validators\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"add_bid\": 10000,\n          \"withdraw_bid\": 10000,\n          \"delegate\": 10000,\n          \"undelegate\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"distribute\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000,\n          \"read_era_id\": 10000,\n          \"activate_bid\": 10000\n        },\n        \"mint_costs\": {\n          \"mint\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"create\": 10000,\n          \"balance\": 10000,\n          \"transfer\": 10000,\n          \"read_base_round_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"get_payment_purse\": 10000,\n          \"set_refund_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"finalize_payment\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        }\n      },\n      \"validator_slots\": 5,\n      \"auction_delay\": 3,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"unbonding_delay\": 14,\n      \"genesis_timestamp_millis\": 0\n    }\n  },\n  \"post_state_hash\": \"483aa1dc35286904ac958f38b71080b78d0904465ef9596b364506996dd5f0cc\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_3_1/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"auction_delay\": 3,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 10000,\n          \"delegate\": 10000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 10000,\n          \"withdraw_bid\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 10000,\n          \"mint\": 10000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 10000\n      },\n      \"unbonding_delay\": 14,\n      \"validator_slots\": 5,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 170000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 160000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 24000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 65536,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": 440,\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"regular\": 210,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"cedf51e7a23890fc14873d6c4da7076d80d79ed95038dd80710d45dbab4c822b\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_4_2/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5AF25e204AD03D0a26e236996404F1be51a60948bcc026cD084a83690B756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01BB47d33d777B4559Bb917d1825827421C4a6B1b9737F12e1C58EA4305aF88b74\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"01cA57eEd30e4a7274Ef4C648F56F58F880B20D2CA25725D9e5C13C83C08c09aEB\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"0153840868cB293a6e0a636B1f2245BEfBe1988fB287Bd6A9D84dF5Df4A519dd11\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"auction_delay\": 3,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 10000,\n          \"delegate\": 10000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 10000,\n          \"withdraw_bid\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 2500000000,\n          \"mint\": 2500000000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 100000000\n      },\n      \"unbonding_delay\": 14,\n      \"validator_slots\": 5,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 65536,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": 440,\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"regular\": 210,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2A2A2a2a2a2A2a2A2A2A2a2A2a2a2A2a2A2A2a2A2A2a2a2A2a2a2A2A2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"AaF255056ED8966704422AfaC3B9c5F95c9Cc8Fc90E777939c82c31451e826C9\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_4_3/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5AF25e204AD03D0a26e236996404F1be51a60948bcc026cD084a83690B756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01BB47d33d777B4559Bb917d1825827421C4a6B1b9737F12e1C58EA4305aF88b74\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"0197fFc883c80Bee7237EF95d9B9B703d4AD63e60A21e605867682B75b8b3f4303\",\n            \"validator\": {\n              \"bonded_amount\": \"100000000\",\n              \"delegation_rate\": 0\n            }\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"01466627D52773c9D5cDA6C8FD28ea31eC7b94e68Aa8d42E2AD31A75dc8d24ed07\",\n            \"validator\": {\n              \"bonded_amount\": \"200000000\",\n              \"delegation_rate\": 0\n            }\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"01cA57eEd30e4a7274Ef4C648F56F58F880B20D2CA25725D9e5C13C83C08c09aEB\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"0153840868cB293a6e0a636B1f2245BEfBe1988fB287Bd6A9D84dF5Df4A519dd11\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"7500000000000000\",\n            \"public_key\": \"01fC947730F49eB01427a66e050733294d9e520e545c7a27125A780634e0860a27\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"auction_delay\": 3,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 10000,\n          \"delegate\": 10000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 10000,\n          \"withdraw_bid\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 2500000000,\n          \"mint\": 2500000000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 100000000\n      },\n      \"unbonding_delay\": 14,\n      \"validator_slots\": 5,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 65536,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": 440,\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"regular\": 210,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2A2A2a2a2a2A2a2A2A2A2a2A2a2a2A2a2A2A2a2A2A2a2a2A2a2a2A2A2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"916dCD18De4C08947b2C85Da4d26FEf5C92E27Cb559EdfD23A4D2f4780a3f422\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_4_4/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"auction_delay\": 3,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 10000,\n          \"delegate\": 10000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 10000,\n          \"withdraw_bid\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 2500000000,\n          \"mint\": 2500000000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 100000000\n      },\n      \"unbonding_delay\": 14,\n      \"validator_slots\": 5,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 188,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": 440,\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"regular\": 210,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"f3047c087de9e9bac49de080e061e3313c7227ddc9ce22b8c9c617c397569987\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_4_5/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"balance\": \"100000000000000000\",\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"auction_delay\": 3,\n      \"genesis_timestamp_millis\": 0,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        6414,\n        623437335209\n      ],\n      \"system_config\": {\n        \"auction_costs\": {\n          \"activate_bid\": 10000,\n          \"add_bid\": 10000,\n          \"delegate\": 10000,\n          \"distribute\": 10000,\n          \"get_era_validators\": 10000,\n          \"read_era_id\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"undelegate\": 10000,\n          \"withdraw_bid\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000\n        },\n        \"handle_payment_costs\": {\n          \"finalize_payment\": 10000,\n          \"get_payment_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"set_refund_purse\": 10000\n        },\n        \"mint_costs\": {\n          \"balance\": 10000,\n          \"create\": 2500000000,\n          \"mint\": 2500000000,\n          \"read_base_round_reward\": 10000,\n          \"reduce_total_supply\": 10000,\n          \"transfer\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        },\n        \"wasmless_transfer_cost\": 100000000\n      },\n      \"unbonding_delay\": 14,\n      \"validator_slots\": 5,\n      \"wasm_config\": {\n        \"host_function_costs\": {\n          \"add\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 5800\n          },\n          \"add_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 9000\n          },\n          \"add_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"blake2b\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"call_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ],\n            \"cost\": 4500\n          },\n          \"call_versioned_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_package_at_hash\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"create_purse\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"dictionary_get\": {\n            \"arguments\": [\n              0,\n              590,\n              0\n            ],\n            \"cost\": 5500\n          },\n          \"dictionary_put\": {\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ],\n            \"cost\": 9500\n          },\n          \"disable_contract_version\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_balance\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 3800\n          },\n          \"get_blocktime\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 330\n          },\n          \"get_caller\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 380\n          },\n          \"get_key\": {\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2000\n          },\n          \"get_main_purse\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 1300\n          },\n          \"get_named_arg\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_named_arg_size\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"get_phase\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 710\n          },\n          \"get_system_contract\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 1100\n          },\n          \"has_key\": {\n            \"arguments\": [\n              0,\n              840\n            ],\n            \"cost\": 1500\n          },\n          \"is_valid_uref\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 760\n          },\n          \"load_named_keys\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 42000\n          },\n          \"new_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              590\n            ],\n            \"cost\": 17000\n          },\n          \"print\": {\n            \"arguments\": [\n              0,\n              4600\n            ],\n            \"cost\": 20000\n          },\n          \"provision_contract_user_group_uref\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"put_key\": {\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ],\n            \"cost\": 38000\n          },\n          \"read_host_buffer\": {\n            \"arguments\": [\n              0,\n              310,\n              0\n            ],\n            \"cost\": 3500\n          },\n          \"read_value\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 6000\n          },\n          \"remove_associated_key\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"remove_contract_user_group\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 200\n          },\n          \"remove_key\": {\n            \"arguments\": [\n              0,\n              3200\n            ],\n            \"cost\": 61000\n          },\n          \"ret\": {\n            \"arguments\": [\n              0,\n              420\n            ],\n            \"cost\": 23000\n          },\n          \"revert\": {\n            \"arguments\": [\n              0\n            ],\n            \"cost\": 500\n          },\n          \"set_action_threshold\": {\n            \"arguments\": [\n              0,\n              0\n            ],\n            \"cost\": 74000\n          },\n          \"transfer_from_purse_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 82000\n          },\n          \"transfer_to_account\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ],\n            \"cost\": 2500000000\n          },\n          \"update_associated_key\": {\n            \"arguments\": [\n              0,\n              0,\n              0\n            ],\n            \"cost\": 4200\n          },\n          \"write\": {\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ],\n            \"cost\": 14000\n          }\n        },\n        \"max_memory\": 64,\n        \"max_stack_height\": 188,\n        \"opcode_costs\": {\n          \"add\": 210,\n          \"bit\": 300,\n          \"const\": 110,\n          \"control_flow\": 440,\n          \"conversion\": 420,\n          \"current_memory\": 290,\n          \"div\": 320,\n          \"global\": 390,\n          \"grow_memory\": 240000,\n          \"integer_comparison\": 250,\n          \"load\": 2500,\n          \"local\": 390,\n          \"mul\": 240,\n          \"nop\": 200,\n          \"regular\": 210,\n          \"store\": 4700,\n          \"unreachable\": 270\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 625000\n        }\n      }\n    },\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"805c44ea515eb44273ab2368201283baf598db5d4ad1f416669201a7a390918b\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/release_1_5_8/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"29bbd3e40c68462422db2a7bb144e71e53607a1b7d9bcbdacecef22c998de8e3\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/three_version_fixture/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"30da4f641f4297d77600cb0319153c6e3f3353b0905231954a19a726896b28e5\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/upgrade_thresholds/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"genesis_config_hash\": \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\",\n    \"protocol_version\": \"1.0.0\",\n    \"ee_config\": {\n      \"accounts\": [\n        {\n          \"Account\": {\n            \"public_key\": \"01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3\",\n            \"balance\": \"100000000000000000\",\n            \"validator\": null\n          }\n        },\n        {\n          \"Account\": {\n            \"public_key\": \"01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74\",\n            \"balance\": \"100000000000000000\",\n            \"validator\": null\n          }\n        }\n      ],\n      \"wasm_config\": {\n        \"max_memory\": 64,\n        \"max_stack_height\": 500,\n        \"opcode_costs\": {\n          \"bit\": 300,\n          \"add\": 210,\n          \"mul\": 240,\n          \"div\": 320,\n          \"load\": 2500,\n          \"store\": 4700,\n          \"const\": 110,\n          \"local\": 390,\n          \"global\": 390,\n          \"integer_comparison\": 250,\n          \"conversion\": 420,\n          \"unreachable\": 270,\n          \"nop\": 200,\n          \"current_memory\": 290,\n          \"grow_memory\": 240000,\n          \"control_flow\": {\n            \"block\": 440,\n            \"loop\": 440,\n            \"if\": 440,\n            \"else\": 440,\n            \"end\": 440,\n            \"br\": 440000,\n            \"br_if\": 440000,\n            \"return\": 440,\n            \"call\": 140000,\n            \"call_indirect\": 140000,\n            \"drop\": 440,\n            \"select\": 440,\n            \"br_table\": {\n              \"cost\": 440000,\n              \"size_multiplier\": 100\n            }\n          }\n        },\n        \"storage_costs\": {\n          \"gas_per_byte\": 630000\n        },\n        \"host_function_costs\": {\n          \"read_value\": {\n            \"cost\": 6000,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"dictionary_get\": {\n            \"cost\": 5500,\n            \"arguments\": [\n              0,\n              590,\n              0\n            ]\n          },\n          \"write\": {\n            \"cost\": 14000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              980\n            ]\n          },\n          \"dictionary_put\": {\n            \"cost\": 9500,\n            \"arguments\": [\n              0,\n              1800,\n              0,\n              520\n            ]\n          },\n          \"add\": {\n            \"cost\": 5800,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"new_uref\": {\n            \"cost\": 17000,\n            \"arguments\": [\n              0,\n              0,\n              590\n            ]\n          },\n          \"load_named_keys\": {\n            \"cost\": 42000,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"ret\": {\n            \"cost\": 23000,\n            \"arguments\": [\n              0,\n              420000\n            ]\n          },\n          \"get_key\": {\n            \"cost\": 2000,\n            \"arguments\": [\n              0,\n              440,\n              0,\n              0,\n              0\n            ]\n          },\n          \"has_key\": {\n            \"cost\": 1500,\n            \"arguments\": [\n              0,\n              840\n            ]\n          },\n          \"put_key\": {\n            \"cost\": 38000,\n            \"arguments\": [\n              0,\n              1100,\n              0,\n              0\n            ]\n          },\n          \"remove_key\": {\n            \"cost\": 61000,\n            \"arguments\": [\n              0,\n              3200\n            ]\n          },\n          \"revert\": {\n            \"cost\": 500,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"is_valid_uref\": {\n            \"cost\": 760,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"add_associated_key\": {\n            \"cost\": 9000,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"remove_associated_key\": {\n            \"cost\": 4200,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"update_associated_key\": {\n            \"cost\": 4200,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"set_action_threshold\": {\n            \"cost\": 74000,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"get_caller\": {\n            \"cost\": 380,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"get_blocktime\": {\n            \"cost\": 330,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"create_purse\": {\n            \"cost\": 2500000000,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"transfer_to_account\": {\n            \"cost\": 2500000000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"transfer_from_purse_to_account\": {\n            \"cost\": 2500000000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"transfer_from_purse_to_purse\": {\n            \"cost\": 82000,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_balance\": {\n            \"cost\": 3800,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_phase\": {\n            \"cost\": 710,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"get_system_contract\": {\n            \"cost\": 1100,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_main_purse\": {\n            \"cost\": 1300,\n            \"arguments\": [\n              0\n            ]\n          },\n          \"read_host_buffer\": {\n            \"cost\": 3500,\n            \"arguments\": [\n              0,\n              310,\n              0\n            ]\n          },\n          \"create_contract_package_at_hash\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0\n            ]\n          },\n          \"create_contract_user_group\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"add_contract_version\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"disable_contract_version\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"call_contract\": {\n            \"cost\": 4500,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ]\n          },\n          \"call_versioned_contract\": {\n            \"cost\": 4500,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              0,\n              420,\n              0\n            ]\n          },\n          \"get_named_arg_size\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0\n            ]\n          },\n          \"get_named_arg\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"remove_contract_user_group\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"provision_contract_user_group_uref\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"remove_contract_user_group_urefs\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"print\": {\n            \"cost\": 20000,\n            \"arguments\": [\n              0,\n              4600\n            ]\n          },\n          \"blake2b\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0,\n              0,\n              0\n            ]\n          },\n          \"random_bytes\": {\n            \"cost\": 200,\n            \"arguments\": [\n              0,\n              0\n            ]\n          }\n        }\n      },\n      \"system_config\": {\n        \"wasmless_transfer_cost\": 100000000,\n        \"auction_costs\": {\n          \"get_era_validators\": 10000,\n          \"read_seigniorage_recipients\": 10000,\n          \"add_bid\": 2500000000,\n          \"withdraw_bid\": 2500000000,\n          \"delegate\": 2500000000,\n          \"undelegate\": 2500000000,\n          \"run_auction\": 10000,\n          \"slash\": 10000,\n          \"distribute\": 10000,\n          \"withdraw_delegator_reward\": 10000,\n          \"withdraw_validator_reward\": 10000,\n          \"read_era_id\": 10000,\n          \"activate_bid\": 10000,\n          \"redelegate\": 2500000000\n        },\n        \"mint_costs\": {\n          \"mint\": 2500000000,\n          \"reduce_total_supply\": 10000,\n          \"create\": 2500000000,\n          \"balance\": 10000,\n          \"transfer\": 10000,\n          \"read_base_round_reward\": 10000,\n          \"mint_into_existing_purse\": 2500000000\n        },\n        \"handle_payment_costs\": {\n          \"get_payment_purse\": 10000,\n          \"set_refund_purse\": 10000,\n          \"get_refund_purse\": 10000,\n          \"finalize_payment\": 10000\n        },\n        \"standard_payment_costs\": {\n          \"pay\": 10000\n        }\n      },\n      \"validator_slots\": 100,\n      \"auction_delay\": 1,\n      \"locked_funds_period_millis\": 7776000000,\n      \"round_seigniorage_rate\": [\n        7,\n        87535408\n      ],\n      \"unbonding_delay\": 7,\n      \"genesis_timestamp_millis\": 0\n    },\n    \"chainspec_registry\": {\n      \"chainspec_raw_hash\": \"11c0e79b71c3976ccd0c02d1310e2516c08edc9d8b6f57ccd680d63a4d8e72da\",\n      \"genesis_accounts_raw_hash\": \"0afd4a04d7720da9922f2b40249989faf4ff8096e1ed49bee615bb6cb1ee4f7d\",\n      \"global_state_raw_hash\": null\n    }\n  },\n  \"post_state_hash\": \"059d4fbbc1048314fd58111bbb6e733626de760b771793a181822e984fdd72a9\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/fixtures/validator_minimum_bid/state.json",
    "content": "{\n  \"genesis_request\": {\n    \"protocol_version\": \"1.0.0\"\n  },\n  \"post_state_hash\": \"b39980efe0651ef956d8a9af5c197bc6a11af2f164d7f3ebe583a6a3172f57a0\"\n}"
  },
  {
    "path": "execution_engine_testing/tests/src/lib.rs",
    "content": "pub mod lmdb_fixture;\npub mod wasm_utils;\npub use casper_engine_test_support::genesis_config_builder::GenesisConfigBuilder;\n\n#[cfg(test)]\nmod test;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/lmdb_fixture.rs",
    "content": "use std::{\n    env,\n    fs::File,\n    io::Write,\n    path::{Path, PathBuf},\n};\n\nuse fs_extra::dir;\nuse serde::{Deserialize, Serialize};\nuse serde_json::json;\nuse tempfile::TempDir;\n\nuse casper_engine_test_support::{ChainspecConfig, LmdbWasmTestBuilder};\nuse casper_storage::data_access_layer::GenesisRequest;\n#[cfg(test)]\nuse casper_types::{AccessRights, Key, URef};\nuse casper_types::{Digest, ProtocolVersion};\n\npub const RELEASE_1_2_0: &str = \"release_1_2_0\";\npub const RELEASE_1_3_1: &str = \"release_1_3_1\";\npub const RELEASE_1_4_2: &str = \"release_1_4_2\";\npub const RELEASE_1_4_3: &str = \"release_1_4_3\";\npub const RELEASE_1_4_4: &str = \"release_1_4_4\";\npub const RELEASE_1_4_5: &str = \"release_1_4_5\";\npub const RELEASE_1_5_8: &str = \"release_1_5_8\";\nconst STATE_JSON_FILE: &str = \"state.json\";\nconst FIXTURES_DIRECTORY: &str = \"fixtures\";\nconst GENESIS_PROTOCOL_VERSION_FIELD: &str = \"protocol_version\";\n\n#[cfg(test)]\nconst RUN_FIXTURE_GENERATORS_ENV: &str = \"RUN_FIXTURE_GENERATORS\";\n\n#[cfg(test)]\npub(crate) fn is_fixture_generator_enabled() -> bool {\n    env::var_os(RUN_FIXTURE_GENERATORS_ENV).is_some()\n}\n\n/// This is a special place in the global state where fixture contains a registry.\n#[cfg(test)]\npub(crate) const ENTRY_REGISTRY_SPECIAL_ADDRESS: Key =\n    Key::URef(URef::new([0u8; 32], AccessRights::all()));\n\nfn path_to_lmdb_fixtures() -> PathBuf {\n    Path::new(env!(\"CARGO_MANIFEST_DIR\")).join(FIXTURES_DIRECTORY)\n}\n\n/// Contains serialized genesis config.\n#[derive(Serialize, Deserialize)]\npub struct LmdbFixtureState {\n    /// Serializes as unstructured JSON value because [`GenesisRequest`] might change over time\n    /// and likely old fixture might not deserialize cleanly in the future.\n    pub genesis_request: serde_json::Value,\n    pub post_state_hash: Digest,\n}\n\nimpl LmdbFixtureState {\n    pub fn genesis_protocol_version(&self) -> ProtocolVersion {\n        serde_json::from_value(\n            self.genesis_request\n                .get(GENESIS_PROTOCOL_VERSION_FIELD)\n                .cloned()\n                .unwrap(),\n        )\n        .expect(\"should have protocol version field\")\n    }\n}\n\n/// Creates a [`LmdbWasmTestBuilder`] from a named fixture directory.\n///\n/// As part of this process a new temporary directory will be created to store LMDB files from given\n/// fixture, and a builder will be created using it.\n///\n/// This function returns a triple of the builder, a [`LmdbFixtureState`] which contains serialized\n/// genesis request for given fixture, and a temporary directory which has to be kept in scope.\npub fn builder_from_global_state_fixture(\n    fixture_name: &str,\n) -> (LmdbWasmTestBuilder, LmdbFixtureState, TempDir) {\n    let source = path_to_lmdb_fixtures().join(fixture_name);\n    let to = tempfile::tempdir().expect(\"should create temp dir\");\n    fs_extra::copy_items(&[source], &to, &dir::CopyOptions::default())\n        .expect(\"should copy global state fixture\");\n\n    let path_to_state = to.path().join(fixture_name).join(STATE_JSON_FILE);\n    let lmdb_fixture_state: LmdbFixtureState =\n        serde_json::from_reader(File::open(path_to_state).unwrap()).unwrap();\n    let path_to_gs = to.path().join(fixture_name);\n\n    (\n        LmdbWasmTestBuilder::open(\n            &path_to_gs,\n            ChainspecConfig::default(),\n            lmdb_fixture_state.genesis_protocol_version(),\n            lmdb_fixture_state.post_state_hash,\n        ),\n        lmdb_fixture_state,\n        to,\n    )\n}\n\npub fn builder_from_global_state_fixture_with_enable_ae(\n    fixture_name: &str,\n    enable_addressable_entity: bool,\n) -> (LmdbWasmTestBuilder, LmdbFixtureState, TempDir) {\n    let source = path_to_lmdb_fixtures().join(fixture_name);\n    let to = tempfile::tempdir().expect(\"should create temp dir\");\n    fs_extra::copy_items(&[source], &to, &dir::CopyOptions::default())\n        .expect(\"should copy global state fixture\");\n\n    let path_to_state = to.path().join(fixture_name).join(STATE_JSON_FILE);\n    let lmdb_fixture_state: LmdbFixtureState =\n        serde_json::from_reader(File::open(path_to_state).unwrap()).unwrap();\n    let path_to_gs = to.path().join(fixture_name);\n\n    (\n        LmdbWasmTestBuilder::open(\n            &path_to_gs,\n            ChainspecConfig::default().with_enable_addressable_entity(enable_addressable_entity),\n            lmdb_fixture_state.genesis_protocol_version(),\n            lmdb_fixture_state.post_state_hash,\n        ),\n        lmdb_fixture_state,\n        to,\n    )\n}\n\n/// Creates a new fixture with a name.\n///\n/// This process is currently manual. The process to do this is to check out a release branch, call\n/// this function to generate (i.e. `generate_fixture(\"release_1_3_0\")`) and persist it in version\n/// control.\npub fn generate_fixture(\n    name: &str,\n    genesis_request: GenesisRequest,\n    post_genesis_setup: impl FnOnce(&mut LmdbWasmTestBuilder),\n) -> Result<(), Box<dyn std::error::Error>> {\n    let lmdb_fixtures_root = path_to_lmdb_fixtures();\n    let fixture_root = lmdb_fixtures_root.join(name);\n\n    let path_to_data_lmdb = fixture_root.join(\"global_state\").join(\"data.lmdb\");\n    if path_to_data_lmdb.exists() {\n        eprintln!(\n            \"Lmdb fixture located at {} already exists. If you need to re-generate a fixture to ensure a serialization \\\n            changes are backwards compatible please make sure you are running a specific version, or a past commit. \\\n            Skipping.\",\n            path_to_data_lmdb.display()\n        );\n        return Ok(());\n    }\n\n    let chainspec = ChainspecConfig::default();\n    let mut builder = LmdbWasmTestBuilder::new_with_config(&fixture_root, chainspec);\n\n    builder.run_genesis(genesis_request.clone());\n\n    // You can customize the fixture post genesis with a callable.\n    post_genesis_setup(&mut builder);\n\n    let post_state_hash = builder.get_post_state_hash();\n\n    let genesis_request_json = json!({\n        GENESIS_PROTOCOL_VERSION_FIELD: genesis_request.protocol_version(),\n    });\n\n    let state = LmdbFixtureState {\n        genesis_request: genesis_request_json,\n        post_state_hash,\n    };\n    let serialized_state = serde_json::to_string_pretty(&state)?;\n\n    let path_to_state_file = fixture_root.join(STATE_JSON_FILE);\n\n    let mut f = File::create(path_to_state_file)?;\n    f.write_all(serialized_state.as_bytes())?;\n    Ok(())\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/calling_packages_by_version_query.rs",
    "content": "use std::collections::BTreeSet;\n\n/// This test assumes that the provided fixture has v1 and v2 installed in protocol version 1\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_PROTOCOL_VERSION,\n};\nuse casper_execution_engine::{\n    engine_state::{EngineConfigBuilder, Error, SessionDataV1, SessionInputData},\n    execution::ExecError,\n};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    contracts::ProtocolVersionMajor, runtime_args, AddressableEntityHash, ChainspecRegistry,\n    EntityVersion, EntityVersionKey, EraId, HashAddr, HoldBalanceHandling, Key, NamedKeys,\n    PackageHash, PricingMode, ProtocolVersion, RuntimeArgs, StoredValue, Timestamp,\n    TransactionEntryPoint, TransactionInvocationTarget, TransactionRuntimeParams,\n    TransactionTarget, TransactionV1Hash,\n};\nuse once_cell::sync::Lazy;\nuse rand::Rng;\n\nstatic V3_0_0: Lazy<ProtocolVersion> = Lazy::new(|| ProtocolVersion::from_parts(3, 0, 0));\n\nconst DISABLE_CONTRACT: &str = \"disable_contract.wasm\";\n\nstatic CURRENT_PROTOCOL_MAJOR: Lazy<u32> = Lazy::new(|| DEFAULT_PROTOCOL_VERSION.value().major);\n\nconst CONTRACT_WASM: &str = \"key_putter.wasm\";\n\n#[ignore]\n#[test]\nfn should_call_package_hash_by_exact_version() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, Some(1), Some(1), ProtocolVersion::V1_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn should_call_package_name_by_exact_version() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n\n    exec_put_key_by_package_name(&mut builder, Some(1), Some(1), ProtocolVersion::V1_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn should_call_package_hash_by_exact_version_after_protocol_version_change() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n\n    upgrade_version(&mut builder, 2, false);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, Some(1), Some(1), ProtocolVersion::V2_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn should_call_package_name_by_exact_version_after_protocol_version_change() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, false);\n\n    exec_put_key_by_package_name(&mut builder, Some(1), Some(1), ProtocolVersion::V2_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn should_call_by_hash_newest_version_when_only_major_specified() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, false);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, Some(1), None, ProtocolVersion::V2_0_0);\n\n    let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_2,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v2\");\n\n    disable_contract_version(&mut builder, 1, 2);\n    // After disabling 1.2, selecting by major protocol 1 should point to 1.1\n\n    exec_put_key_by_package_hash(&mut builder, Some(1), None, ProtocolVersion::V2_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn should_call_by_name_newest_version_when_only_major_specified() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, false);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    exec_put_key_by_package_name(&mut builder, Some(1), None, ProtocolVersion::V2_0_0);\n\n    let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_2,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v2\");\n\n    disable_contract_version(&mut builder, 1, 2);\n    // After disabling 1.2, selecting by major protocol 1 should point to 1.1\n\n    exec_put_key_by_package_name(&mut builder, Some(1), None, ProtocolVersion::V2_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn should_call_by_hash_the_newest_version() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, false);\n\n    exec_put_key_by_package_hash(&mut builder, None, None, ProtocolVersion::V1_0_0);\n\n    let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_2,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v2\");\n\n    upgrade_version(&mut builder, 2, false);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, None, None, ProtocolVersion::V2_0_0);\n\n    let hash_of_2_2 =\n        get_contract_hash_for_specific_version(&mut builder, *CURRENT_PROTOCOL_MAJOR, 2).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_2_2,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v4\");\n\n    disable_contract_version(&mut builder, 2, 2);\n    // After disabling 2.2, selecting newest should point to 2.1\n\n    exec_put_key_by_package_hash(&mut builder, None, None, ProtocolVersion::V2_0_0);\n\n    let hash_of_2_1 = get_contract_hash_for_specific_version(&mut builder, 2, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_2_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v3\");\n}\n\n#[ignore]\n#[test]\nfn should_call_by_name_the_newest_version() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n\n    exec_put_key_by_package_name(&mut builder, None, None, ProtocolVersion::V1_0_0);\n\n    let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_2,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v2\");\n\n    upgrade_version(&mut builder, 2, false);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    exec_put_key_by_package_name(&mut builder, None, None, ProtocolVersion::V2_0_0);\n\n    let hash_of_2_2 =\n        get_contract_hash_for_specific_version(&mut builder, *CURRENT_PROTOCOL_MAJOR, 2).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_2_2,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v4\");\n\n    disable_contract_version(&mut builder, 2, 2);\n    // After disabling 2.2, selecting newest should point to 2.1\n\n    exec_put_key_by_package_name(&mut builder, None, None, ProtocolVersion::V2_0_0);\n\n    let hash_of_2_1 = get_contract_hash_for_specific_version(&mut builder, 2, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_2_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v3\");\n}\n\n#[ignore]\n#[test]\nfn when_disamiguous_calls_are_enabled_should_call_by_hash_querying_by_version() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, None, Some(1), ProtocolVersion::V1_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n\n    upgrade_version(&mut builder, 2, false);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, None, Some(1), ProtocolVersion::V2_0_0);\n\n    let hash_of_2_1 = get_contract_hash_for_specific_version(&mut builder, 2, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_2_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v3\");\n\n    disable_contract_version(&mut builder, *CURRENT_PROTOCOL_MAJOR, 1);\n    exec_put_key_by_package_hash(&mut builder, None, Some(1), ProtocolVersion::V2_0_0);\n\n    let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        &mut builder,\n        hash_of_1_1,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, \"key_putter_v1\");\n}\n\n#[ignore]\n#[test]\nfn when_disamiguous_calls_are_disabled_then_ambiguous_call_by_hash_will_fail() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, true);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    let package_hash = get_package_hash(&mut builder);\n    let target = TransactionInvocationTarget::ByPackageHash {\n        addr: package_hash,\n        version: Some(1),\n        protocol_version_major: None,\n    };\n    let request = builder_for_calling_entrypoint(\n        \"put_key\".to_owned(),\n        target,\n        RuntimeArgs::default(),\n        ProtocolVersion::V2_0_0,\n    );\n    builder.exec(request).expect_failure().commit();\n    let error = builder\n        .get_last_exec_result()\n        .unwrap()\n        .error()\n        .unwrap()\n        .clone();\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::AmbiguousEntityVersion)\n    ))\n}\n\n#[ignore]\n#[test]\nfn when_disamiguous_calls_are_disabled_then_ambiguous_call_by_name_will_fail() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, true);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0);\n\n    let target = TransactionInvocationTarget::ByPackageName {\n        name: \"package_name\".to_owned(),\n        version: Some(1),\n        protocol_version_major: None,\n    };\n    let request = builder_for_calling_entrypoint(\n        \"put_key\".to_owned(),\n        target,\n        RuntimeArgs::default(),\n        ProtocolVersion::V2_0_0,\n    );\n    builder.exec(request).expect_failure().commit();\n    let error = builder\n        .get_last_exec_result()\n        .unwrap()\n        .error()\n        .unwrap()\n        .clone();\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::AmbiguousEntityVersion)\n    ))\n}\n\n#[ignore]\n#[test]\nfn calling_by_package_hash_should_work_when_more_then_two_protocol_versions() {\n    let mut builder = prepare_v1_builder();\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0);\n    upgrade_version(&mut builder, 2, false);\n    upgrade_version(&mut builder, 3, false);\n    install(&mut builder, CONTRACT_WASM, *V3_0_0);\n\n    exec_put_key_by_package_hash(&mut builder, None, Some(1), *V3_0_0);\n    assert_contract_version_hash_placeholder_value(&mut builder, 3, 1, \"key_putter_v3\");\n\n    install(&mut builder, CONTRACT_WASM, *V3_0_0);\n    exec_put_key_by_package_hash(&mut builder, None, None, *V3_0_0);\n    assert_contract_version_hash_placeholder_value(&mut builder, 3, 2, \"key_putter_v4\");\n\n    exec_put_key_by_package_hash(&mut builder, Some(1), None, *V3_0_0);\n    assert_contract_version_hash_placeholder_value(&mut builder, 1, 2, \"key_putter_v2\");\n}\n\nfn assert_contract_version_hash_placeholder_value(\n    builder: &mut LmdbWasmTestBuilder,\n    protocol_version: ProtocolVersionMajor,\n    entity_version: EntityVersion,\n    expected_value: &str,\n) {\n    let hash_of_contract =\n        get_contract_hash_for_specific_version(builder, protocol_version, entity_version).unwrap();\n    let value = get_value_of_named_key_for_contract_hash_as_str(\n        builder,\n        hash_of_contract,\n        \"key_placeholder\",\n    )\n    .unwrap();\n    assert_eq!(value, expected_value);\n}\n\nfn install(builder: &mut LmdbWasmTestBuilder, file_name: &str, protocol_version: ProtocolVersion) {\n    let install_request = ExecuteRequestBuilder::standard_with_protocol_version(\n        *DEFAULT_ACCOUNT_ADDR,\n        file_name,\n        RuntimeArgs::default(),\n        protocol_version,\n    )\n    .build();\n    builder.exec(install_request).expect_success().commit();\n}\n\nfn get_value_of_named_key_for_contract_hash_as_str(\n    builder: &mut LmdbWasmTestBuilder,\n    hash: HashAddr,\n    key_name: &str,\n) -> Option<String> {\n    let get_named_keys_for_contract_hash = get_named_keys_for_contract_hash(builder, hash);\n    get_named_keys_for_contract_hash\n        .get(key_name)\n        .and_then(|key| match builder.query(None, *key, &[]) {\n            Ok(v) => match v {\n                StoredValue::CLValue(cl_value) => cl_value.into_t().ok(),\n                _ => panic!(\"Unexpected stored value kind\"),\n            },\n            Err(_) => None,\n        })\n}\n\nfn disable_contract_version(\n    builder: &mut LmdbWasmTestBuilder,\n    protocol_version_major: ProtocolVersionMajor,\n    version: EntityVersion,\n) {\n    let package_hash = get_package_hash(builder);\n    let hash =\n        get_contract_hash_for_specific_version(builder, protocol_version_major, version).unwrap();\n    let stored_entity_hash = AddressableEntityHash::new(hash);\n    let disable_request = {\n        let session_args = runtime_args! {\n            \"contract_package_hash\" => package_hash,\n            \"contract_hash\" => stored_entity_hash,\n        };\n\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, DISABLE_CONTRACT, session_args)\n            .build()\n    };\n    builder.exec(disable_request).expect_success().commit();\n}\n\nfn get_named_keys_for_contract_hash(\n    builder: &mut LmdbWasmTestBuilder,\n    hash: HashAddr,\n) -> NamedKeys {\n    builder.get_named_keys_for_contract(AddressableEntityHash::new(hash))\n}\n\nfn call_contract_entrypoint(\n    builder: &mut LmdbWasmTestBuilder,\n    entry_point: String,\n    id: TransactionInvocationTarget,\n    args: RuntimeArgs,\n    protocol_version: ProtocolVersion,\n) {\n    let request = builder_for_calling_entrypoint(entry_point, id, args, protocol_version);\n    builder.exec(request).expect_success().commit();\n}\n\nfn builder_for_calling_entrypoint(\n    entry_point: String,\n    id: TransactionInvocationTarget,\n    args: RuntimeArgs,\n    protocol_version: ProtocolVersion,\n) -> casper_engine_test_support::ExecuteRequest {\n    let target = TransactionTarget::Stored {\n        id,\n        runtime: TransactionRuntimeParams::VmCasperV1,\n    };\n    let entry_point = TransactionEntryPoint::Custom(entry_point);\n    let v1_hash = TransactionV1Hash::from_raw([5; 32]);\n    let mut signers = BTreeSet::new();\n    signers.insert(*DEFAULT_ACCOUNT_ADDR);\n    let pricing_mode = PricingMode::PaymentLimited {\n        payment_amount: 2_500_000,\n        gas_price_tolerance: 1,\n        standard_payment: true,\n    };\n    let initiator_addr = casper_types::InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR);\n    let session_data_v1 = SessionDataV1::new(\n        &args,\n        &target,\n        &entry_point,\n        true,\n        &v1_hash,\n        &pricing_mode,\n        &initiator_addr,\n        signers,\n        true,\n    );\n    let session_input_data = SessionInputData::SessionDataV1 {\n        data: session_data_v1,\n    };\n    ExecuteRequestBuilder::from_session_input_data_for_protocol_version(\n        &session_input_data,\n        protocol_version,\n    )\n    .build()\n}\n\nfn get_package_hash(builder: &mut LmdbWasmTestBuilder) -> [u8; 32] {\n    let account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n    let get = account.named_keys().get(\"package_name\");\n    let package_key = get.unwrap();\n    let package_hash = match package_key {\n        Key::Hash(hash) => hash,\n        _ => {\n            panic!(\"COULDN'T HANLDE\")\n        }\n    };\n    *package_hash\n}\n\nfn get_contract_hash_for_specific_version(\n    builder: &mut LmdbWasmTestBuilder,\n    protocol_version_major: ProtocolVersionMajor,\n    version: EntityVersion,\n) -> Option<HashAddr> {\n    let maybe_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR);\n    let account = maybe_account.unwrap();\n    let get = account.named_keys().get(\"package_name\");\n    let package_key = get.unwrap();\n    let package_hash = match package_key {\n        Key::Hash(hash) => hash,\n        _ => {\n            panic!(\"COULDN'T HANLDE THE KEY\")\n        }\n    };\n    let package = builder\n        .get_package(PackageHash::new(*package_hash))\n        .unwrap();\n    let key = EntityVersionKey::new(protocol_version_major, version);\n    package.versions().get(&key).map(|x| x.value())\n}\n\nfn exec_put_key_by_package_name(\n    builder: &mut LmdbWasmTestBuilder,\n    protocol_version_major: Option<ProtocolVersionMajor>,\n    version: Option<EntityVersion>,\n    protocol_version: ProtocolVersion,\n) {\n    let target = TransactionInvocationTarget::ByPackageName {\n        name: \"package_name\".to_owned(),\n        version,\n        protocol_version_major,\n    };\n    call_contract_entrypoint(\n        builder,\n        \"put_key\".to_owned(),\n        target,\n        RuntimeArgs::default(),\n        protocol_version,\n    )\n}\n\nfn exec_put_key_by_package_hash(\n    builder: &mut LmdbWasmTestBuilder,\n    protocol_version_major: Option<ProtocolVersionMajor>,\n    version: Option<EntityVersion>,\n    protocol_version: ProtocolVersion,\n) {\n    let package_hash = get_package_hash(builder);\n    let target = TransactionInvocationTarget::ByPackageHash {\n        addr: package_hash,\n        version,\n        protocol_version_major,\n    };\n    call_contract_entrypoint(\n        builder,\n        \"put_key\".to_owned(),\n        target,\n        RuntimeArgs::default(),\n        protocol_version,\n    )\n}\n\nfn upgrade_version(\n    builder: &mut LmdbWasmTestBuilder,\n    new_protocol_version_major: ProtocolVersionMajor,\n    should_trap_on_ambiguous_entity_version: bool,\n) {\n    if new_protocol_version_major <= 1 {\n        panic!(\"Can't upgrade to 1 or 0 major version\");\n    }\n    let current_protocol_version =\n        ProtocolVersion::from_parts(new_protocol_version_major - 1, 0, 0);\n    let new_protocol_version = ProtocolVersion::from_parts(new_protocol_version_major, 0, 0);\n\n    let activation_point = EraId::new(0u64);\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(current_protocol_version)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(activation_point)\n        .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n        .with_new_gas_hold_interval(24 * 60 * 60 * 60)\n        .with_enable_addressable_entity(false)\n        .build();\n    let config = EngineConfigBuilder::new()\n        .with_trap_on_ambiguous_entity_version(should_trap_on_ambiguous_entity_version)\n        .build();\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade_using_scratch(&mut upgrade_request)\n        .expect_upgrade_success();\n    builder.with_engine_config(config);\n}\n\nfn prepare_v1_builder() -> LmdbWasmTestBuilder {\n    let mut rng = rand::thread_rng();\n    let chainspec_bytes = rng.gen::<[u8; 32]>();\n    let genesis_account = rng.gen::<[u8; 32]>();\n    let chainspec_registry =\n        ChainspecRegistry::new_with_genesis(&chainspec_bytes, &genesis_account);\n\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        ProtocolVersion::V1_0_0,\n        DEFAULT_EXEC_CONFIG.clone(),\n        chainspec_registry,\n    );\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/chainspec_registry.rs",
    "content": "use rand::Rng;\nuse tempfile::TempDir;\n\nuse casper_engine_test_support::{\n    LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH,\n    DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{ChainspecRegistry, Digest, EraId, Key, ProtocolVersion};\n\nuse crate::lmdb_fixture;\n\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n\nconst OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION;\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    OLD_PROTOCOL_VERSION.value().major,\n    OLD_PROTOCOL_VERSION.value().minor,\n    OLD_PROTOCOL_VERSION.value().patch + 1,\n);\n\n#[ignore]\n#[test]\nfn should_commit_chainspec_registry_during_genesis() {\n    let mut rng = rand::thread_rng();\n    let chainspec_bytes = rng.gen::<[u8; 32]>();\n    let genesis_account = rng.gen::<[u8; 32]>();\n    let chainspec_bytes_hash = Digest::hash(chainspec_bytes);\n    let genesis_account_hash = Digest::hash(genesis_account);\n\n    let chainspec_registry =\n        ChainspecRegistry::new_with_genesis(&chainspec_bytes, &genesis_account);\n\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        DEFAULT_EXEC_CONFIG.clone(),\n        chainspec_registry,\n    );\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n\n    let queried_registry = builder\n        .query(None, Key::ChainspecRegistry, &[])\n        .expect(\"must have entry under Key::ChainspecRegistry\")\n        .as_cl_value()\n        .expect(\"must have underlying cl_value\")\n        .to_owned()\n        .into_t::<ChainspecRegistry>()\n        .expect(\"must convert to chainspec registry\");\n\n    let queried_chainspec_hash = queried_registry.chainspec_raw_hash();\n    assert_eq!(*queried_chainspec_hash, chainspec_bytes_hash);\n\n    let queried_accounts_hash = queried_registry\n        .genesis_accounts_raw_hash()\n        .expect(\"must have entry for genesis accounts\");\n    assert_eq!(*queried_accounts_hash, genesis_account_hash);\n}\n\n#[ignore]\n#[test]\n#[should_panic]\nfn should_fail_to_commit_genesis_when_missing_genesis_accounts_hash() {\n    let mut rng = rand::thread_rng();\n    let chainspec_bytes = rng.gen::<[u8; 32]>();\n\n    let incomplete_chainspec_registry =\n        ChainspecRegistry::new_with_optional_global_state(&chainspec_bytes, None);\n\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        DEFAULT_EXEC_CONFIG.clone(),\n        incomplete_chainspec_registry,\n    );\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n}\n\n#[derive(Copy, Clone, Eq, PartialEq, Debug)]\nstruct TestConfig {\n    with_global_state_bytes: bool,\n    from_v1_4_4: bool,\n}\n\nfn should_upgrade_chainspec_registry(cfg: TestConfig) {\n    let mut rng = rand::thread_rng();\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n\n    let mut builder = if cfg.from_v1_4_4 {\n        let (builder, _lmdb_fixture_state, _temp_dir) =\n            lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_4);\n        builder\n    } else {\n        let mut builder = LmdbWasmTestBuilder::new(data_dir.path());\n        builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n        builder\n    };\n\n    let chainspec_bytes = rng.gen::<[u8; 32]>();\n    let global_state_bytes = rng.gen::<[u8; 32]>();\n    let chainspec_bytes_hash = Digest::hash(chainspec_bytes);\n    let global_state_bytes_hash = Digest::hash(global_state_bytes);\n\n    let upgraded_chainspec_registry = ChainspecRegistry::new_with_optional_global_state(\n        &chainspec_bytes,\n        cfg.with_global_state_bytes\n            .then_some(global_state_bytes.as_slice()),\n    );\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n            .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_chainspec_registry(upgraded_chainspec_registry)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let queried_registry = builder\n        .query(None, Key::ChainspecRegistry, &[])\n        .expect(\"must have entry under Key::ChainspecRegistry\")\n        .as_cl_value()\n        .expect(\"must have underlying cl_value\")\n        .to_owned()\n        .into_t::<ChainspecRegistry>()\n        .expect(\"must convert to chainspec registry\");\n\n    // There should be no entry for the genesis accounts once the upgrade has completed.\n    assert!(queried_registry.genesis_accounts_raw_hash().is_none());\n\n    let queried_chainspec_hash = queried_registry.chainspec_raw_hash();\n    assert_eq!(*queried_chainspec_hash, chainspec_bytes_hash);\n\n    if cfg.with_global_state_bytes {\n        let queried_global_state_toml_hash = queried_registry.global_state_raw_hash().unwrap();\n        assert_eq!(*queried_global_state_toml_hash, global_state_bytes_hash);\n    } else {\n        assert!(queried_registry.global_state_raw_hash().is_none());\n    }\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_chainspec_registry_with_global_state_hash() {\n    let cfg = TestConfig {\n        with_global_state_bytes: true,\n        from_v1_4_4: false,\n    };\n    should_upgrade_chainspec_registry(cfg)\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_chainspec_registry_without_global_state_hash() {\n    let cfg = TestConfig {\n        with_global_state_bytes: false,\n        from_v1_4_4: false,\n    };\n    should_upgrade_chainspec_registry(cfg)\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_chainspec_registry_with_global_state_hash_from_v1_4_4() {\n    let cfg = TestConfig {\n        with_global_state_bytes: true,\n        from_v1_4_4: true,\n    };\n    should_upgrade_chainspec_registry(cfg)\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_chainspec_registry_without_global_state_hash_from_v1_4_4() {\n    let cfg = TestConfig {\n        with_global_state_bytes: false,\n        from_v1_4_4: true,\n    };\n    should_upgrade_chainspec_registry(cfg)\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/check_transfer_success.rs",
    "content": "use std::path::PathBuf;\n\nuse casper_engine_test_support::{\n    utils::create_genesis_config, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE,\n    DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_GENESIS_CONFIG_HASH,\n    DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION,\n};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{runtime_args, GenesisAccount, Key, Motes, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_DESTINATION: &str = \"destination\";\nconst TRANSFER_WASM: &str = \"transfer_main_purse_to_new_purse.wasm\";\nconst NEW_PURSE_NAME: &str = \"test_purse\";\nconst FIRST_TRANSFER_AMOUNT: u64 = 142;\nconst SECOND_TRANSFER_AMOUNT: u64 = 250;\n\n#[ignore]\n#[test]\nfn test_check_transfer_success_with_source_only() {\n    // create a genesis account.\n    let genesis_account = GenesisAccount::account(\n        DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    );\n\n    // add the account to the genesis config.\n    let mut accounts = vec![genesis_account];\n    accounts.extend((*DEFAULT_ACCOUNTS).clone());\n    let genesis_config = create_genesis_config(accounts);\n    let genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    // Doing a transfer from main purse to create new purse and store URef under NEW_PURSE_NAME.\n    let transfer_amount = U512::from(FIRST_TRANSFER_AMOUNT);\n    let path = PathBuf::from(TRANSFER_WASM);\n    let session_args = runtime_args! {\n        ARG_DESTINATION => NEW_PURSE_NAME,\n        ARG_AMOUNT => transfer_amount\n    };\n\n    // build the deploy.\n    let deploy_item = DeployItemBuilder::new()\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT})\n        .with_session_code(path, session_args)\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    // build a request to execute the deploy.\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(genesis_request).commit();\n\n    // we need this to figure out what the transfer fee is.\n    let proposer_starting_balance = builder.get_proposer_purse_balance();\n\n    // Getting main purse URef to verify transfer\n    let source_purse = builder\n        .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .main_purse();\n\n    builder.exec(exec_request).commit().expect_success();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_starting_balance;\n    let expected_source_ending_balance = Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE)\n        .checked_sub(Motes::new(transfer_amount))\n        .unwrap()\n        .checked_sub(Motes::new(transaction_fee))\n        .unwrap();\n    let actual_source_ending_balance = Motes::new(builder.get_purse_balance(source_purse));\n\n    assert_eq!(expected_source_ending_balance, actual_source_ending_balance);\n}\n\n#[ignore]\n#[test]\nfn test_check_transfer_success_with_source_only_errors() {\n    let genesis_account = GenesisAccount::account(\n        DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    );\n\n    let mut accounts = vec![genesis_account];\n    accounts.extend((*DEFAULT_ACCOUNTS).clone());\n    let genesis_config = create_genesis_config(accounts);\n    let genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    // Doing a transfer from main purse to create new purse and store Uref under NEW_PURSE_NAME.\n    let transfer_amount = U512::from(FIRST_TRANSFER_AMOUNT);\n    // Setup mismatch between transfer_amount performed and given to trigger assertion.\n    let wrong_transfer_amount = transfer_amount - U512::from(100u64);\n\n    let path = PathBuf::from(TRANSFER_WASM);\n    let session_args = runtime_args! {\n        ARG_DESTINATION => NEW_PURSE_NAME,\n        ARG_AMOUNT => wrong_transfer_amount\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT})\n        .with_session_code(path, session_args)\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    // Set up test builder and run genesis.\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(genesis_request).commit();\n\n    // compare proposer balance before and after the transaction to get the tx fee.\n    let proposer_starting_balance = builder.get_proposer_purse_balance();\n    let source_purse = builder\n        .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .main_purse();\n\n    builder.exec(exec_request).commit().expect_success();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_starting_balance;\n    let expected_source_ending_balance = Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE)\n        .checked_sub(Motes::new(transfer_amount))\n        .unwrap()\n        .checked_sub(Motes::new(transaction_fee))\n        .unwrap();\n    let actual_source_ending_balance = Motes::new(builder.get_purse_balance(source_purse));\n\n    assert!(expected_source_ending_balance != actual_source_ending_balance);\n}\n\n#[ignore]\n#[test]\nfn test_check_transfer_success_with_source_and_target() {\n    let genesis_account = GenesisAccount::account(\n        DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    );\n\n    let mut accounts = vec![genesis_account];\n    accounts.extend((*DEFAULT_ACCOUNTS).clone());\n    let genesis_config = create_genesis_config(accounts);\n    let genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let transfer_amount = U512::from(SECOND_TRANSFER_AMOUNT);\n    // Doing a transfer from main purse to create new purse and store URef under NEW_PURSE_NAME.\n    let path = PathBuf::from(TRANSFER_WASM);\n    let session_args = runtime_args! {\n        ARG_DESTINATION => NEW_PURSE_NAME,\n        ARG_AMOUNT => transfer_amount\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT})\n        .with_session_code(path, session_args)\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(genesis_request).commit();\n\n    // we need this to figure out what the transfer fee is.\n    let proposer_starting_balance = builder.get_proposer_purse_balance();\n\n    // Getting main purse URef to verify transfer\n    let source_purse = builder\n        .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .main_purse();\n\n    builder.exec(exec_request).commit().expect_success();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_starting_balance;\n    let expected_source_ending_balance = Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE)\n        .checked_sub(Motes::new(transfer_amount))\n        .unwrap()\n        .checked_sub(Motes::new(transaction_fee))\n        .unwrap();\n    let actual_source_ending_balance = Motes::new(builder.get_purse_balance(source_purse));\n\n    assert_eq!(expected_source_ending_balance, actual_source_ending_balance);\n\n    // retrieve newly created purse URef\n    builder\n        .query(\n            None,\n            Key::Account(*DEFAULT_ACCOUNT_ADDR),\n            &[NEW_PURSE_NAME.to_string()],\n        )\n        .expect(\"new purse should exist\");\n\n    // let target_purse = builder\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    let target_purse = named_keys\n        .get(NEW_PURSE_NAME)\n        .expect(\"value\")\n        .into_uref()\n        .expect(\"uref\");\n\n    let expected_balance = U512::from(SECOND_TRANSFER_AMOUNT);\n    let target_balance = builder.get_purse_balance(target_purse);\n\n    assert_eq!(expected_balance, target_balance);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/account/associated_keys.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    engine_state::{EngineConfigBuilder, Error},\n    execution::ExecError,\n};\nuse casper_types::{\n    account::AccountHash, addressable_entity::Weight, runtime_args, ApiError, U512,\n};\n\nconst CONTRACT_ADD_UPDATE_ASSOCIATED_KEY: &str = \"add_update_associated_key.wasm\";\nconst CONTRACT_REMOVE_ASSOCIATED_KEY: &str = \"remove_associated_key.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst ARG_ACCOUNT: &str = \"account\";\n\nstatic ACCOUNT_1_INITIAL_FUND: Lazy<U512> = Lazy::new(|| *DEFAULT_PAYMENT * 10);\n\n#[ignore]\n#[test]\nfn should_manage_associated_key() {\n    // for a given account, should be able to add a new associated key and update\n    // that key\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { \"target\" => ACCOUNT_1_ADDR, \"amount\" => *ACCOUNT_1_INITIAL_FUND },\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { \"account\" => *DEFAULT_ACCOUNT_ADDR, },\n    )\n    .build();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let genesis_key = *DEFAULT_ACCOUNT_ADDR;\n\n    let contract_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n\n    let gen_weight = contract_1\n        .associated_keys()\n        .get(&genesis_key)\n        .expect(\"weight\");\n\n    let expected_weight = Weight::new(2);\n    assert_eq!(*gen_weight, expected_weight, \"unexpected weight\");\n\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_REMOVE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => *DEFAULT_ACCOUNT_ADDR, },\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let contract_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n\n    let new_weight = contract_1.associated_keys().get(&genesis_key);\n\n    assert_eq!(new_weight, None, \"key should be removed\");\n\n    let is_error = builder.is_error();\n    assert!(!is_error);\n}\n\n#[ignore]\n#[test]\nfn should_remove_associated_key_when_at_max_allowed_cap() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let engine_config = EngineConfigBuilder::new()\n        .with_max_associated_keys(2)\n        .build();\n\n    builder\n        .with_engine_config(engine_config)\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .commit();\n\n    assert_eq!(builder.get_engine_state().config().max_associated_keys(), 2);\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { \"target\" => ACCOUNT_1_ADDR, \"amount\" => *ACCOUNT_1_INITIAL_FUND },\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { \"account\" => *DEFAULT_ACCOUNT_ADDR, },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { \"account\" => *DEFAULT_ACCOUNT_ADDR, },\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_failure();\n\n    let error = builder.get_error().expect(\"we asserted the failure\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::MaxKeysLimit))\n    ));\n\n    let exec_request_4 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_REMOVE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => *DEFAULT_ACCOUNT_ADDR, },\n    )\n    .build();\n\n    builder.exec(exec_request_4).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/account/authorized_keys.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder,\n    ARG_AMOUNT, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    engine_state::{self, Error},\n    execution::ExecError,\n};\nuse casper_storage::{system::transfer::TransferError, tracking_copy::TrackingCopyError};\nuse casper_types::{account::AccountHash, addressable_entity::Weight, runtime_args, U512};\n\nconst CONTRACT_ADD_ASSOCIATED_KEY: &str = \"add_associated_key.wasm\";\nconst CONTRACT_ADD_UPDATE_ASSOCIATED_KEY: &str = \"add_update_associated_key.wasm\";\nconst CONTRACT_SET_ACTION_THRESHOLDS: &str = \"set_action_thresholds.wasm\";\nconst ARG_KEY_MANAGEMENT_THRESHOLD: &str = \"key_management_threshold\";\nconst ARG_DEPLOY_THRESHOLD: &str = \"deploy_threshold\";\nconst ARG_ACCOUNT: &str = \"account\";\nconst ARG_WEIGHT: &str = \"weight\";\nconst KEY_1: AccountHash = AccountHash::new([254; 32]);\nconst KEY_2: AccountHash = AccountHash::new([253; 32]);\nconst KEY_2_WEIGHT: Weight = Weight::new(100);\nconst KEY_3: AccountHash = AccountHash::new([252; 32]);\n\n#[ignore]\n#[test]\nfn should_deploy_with_authorized_identity_key() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_SET_ACTION_THRESHOLDS,\n        runtime_args! {\n            ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1),\n            ARG_DEPLOY_THRESHOLD => Weight::new(1),\n        },\n    )\n    .build();\n    // Basic deploy with single key\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_raise_auth_failure_with_invalid_key() {\n    // tests that authorized keys that does not belong to account raises\n    // Error::Authorization\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1);\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1),\n                ARG_DEPLOY_THRESHOLD => Weight::new(1)\n            },\n        )\n        .with_deploy_hash([1u8; 32])\n        .with_authorization_keys(&[KEY_1])\n        .build();\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    // Basic deploy with single key\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let deploy_result = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n\n    assert!(\n        deploy_result.has_precondition_failure(),\n        \"{:?}\",\n        deploy_result\n    );\n    let message = format!(\"{}\", deploy_result.error().unwrap());\n\n    assert_eq!(\n        message,\n        format!(\n            \"{}\",\n            engine_state::Error::TrackingCopy(TrackingCopyError::Authorization)\n        )\n    )\n}\n\n#[ignore]\n#[test]\nfn should_raise_auth_failure_with_invalid_keys() {\n    // tests that authorized keys that does not belong to account raises\n    // Error::Authorization\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1);\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_2);\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_3);\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1),\n                ARG_DEPLOY_THRESHOLD => Weight::new(1)\n            },\n        )\n        .with_deploy_hash([1u8; 32])\n        .with_authorization_keys(&[KEY_2, KEY_1, KEY_3])\n        .build();\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    // Basic deploy with single key\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let deploy_result = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n\n    assert!(deploy_result.has_precondition_failure());\n    let message = format!(\"{}\", deploy_result.error().unwrap());\n\n    assert_eq!(\n        message,\n        format!(\n            \"{}\",\n            engine_state::Error::TrackingCopy(TrackingCopyError::Authorization)\n        )\n    )\n}\n\n#[ignore]\n#[test]\nfn should_raise_deploy_authorization_failure() {\n    // tests that authorized keys needs sufficient cumulative weight\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1);\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_2);\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_3);\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_1, },\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_2, },\n    )\n    .build();\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_3, },\n    )\n    .build();\n    // Deploy threshold is equal to 3, keymgmnt is still 1.\n    // Even after verifying weights and thresholds to not\n    // lock out the account, those values should work as\n    // account now has 1. identity key with weight=1 and\n    // a key with weight=2.\n    let exec_request_4 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_SET_ACTION_THRESHOLDS,\n        runtime_args! {\n            ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(4),\n            ARG_DEPLOY_THRESHOLD => Weight::new(3)\n        },\n    )\n    .build();\n    // Basic deploy with single key\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        // Reusing a test contract that would add new key\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit()\n        .exec(exec_request_3)\n        .expect_success()\n        .commit()\n        // This should execute successfully - change deploy and key management\n        // thresholds.\n        .exec(exec_request_4)\n        .expect_success()\n        .commit();\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        // Next deploy will see deploy threshold == 4, keymgmnt == 5\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(5),\n                ARG_DEPLOY_THRESHOLD => Weight::new(4)\n            }, //args\n        )\n        .with_deploy_hash([5u8; 32])\n        .with_authorization_keys(&[KEY_1])\n        .build();\n    let exec_request_5 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    // With deploy threshold == 3 using single secondary key\n    // with weight == 2 should raise deploy authorization failure.\n    builder.clear_results().exec(exec_request_5).commit();\n\n    {\n        let deploy_result = builder\n            .get_exec_result_owned(0)\n            .expect(\"should have exec response\");\n\n        assert!(deploy_result.has_precondition_failure());\n        let message = format!(\"{}\", deploy_result.error().unwrap());\n        assert!(message.contains(&format!(\"{}\", ExecError::DeploymentAuthorizationFailure)))\n    }\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        // change deployment threshold to 4\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(6),\n                ARG_DEPLOY_THRESHOLD => Weight::new(5)\n            },\n        )\n        .with_deploy_hash([6u8; 32])\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, KEY_1, KEY_2, KEY_3])\n        .build();\n    let exec_request_6 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n    // identity key (w: 1) and KEY_1 (w: 2) passes threshold of 3\n    builder\n        .clear_results()\n        .exec(exec_request_6)\n        .expect_success()\n        .commit();\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        // change deployment threshold to 4\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0),\n                ARG_DEPLOY_THRESHOLD => Weight::new(0)\n            }, //args\n        )\n        .with_deploy_hash([6u8; 32])\n        .with_authorization_keys(&[KEY_2, KEY_1])\n        .build();\n    let exec_request_7 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    // deployment threshold is now 4\n    // failure: KEY_2 weight + KEY_1 weight < deployment threshold\n    // let result4 = builder.clear_results()\n    builder.clear_results().exec(exec_request_7).commit();\n\n    {\n        let deploy_result = builder\n            .get_exec_result_owned(0)\n            .expect(\"should have exec response\");\n\n        assert!(deploy_result.has_precondition_failure());\n        let message = format!(\"{}\", deploy_result.error().unwrap());\n        assert!(message.contains(&format!(\"{}\", ExecError::DeploymentAuthorizationFailure)))\n    }\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        // change deployment threshold to 4\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0),\n                ARG_DEPLOY_THRESHOLD => Weight::new(0)\n            }, //args\n        )\n        .with_deploy_hash([8u8; 32])\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, KEY_1, KEY_2, KEY_3])\n        .build();\n    let exec_request_8 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    // success: identity key weight + KEY_1 weight + KEY_2 weight >= deployment\n    // threshold\n    builder\n        .clear_results()\n        .exec(exec_request_8)\n        .commit()\n        .expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_authorize_deploy_with_multiple_keys() {\n    // tests that authorized keys needs sufficient cumulative weight\n    // and each of the associated keys is greater than threshold\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1);\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_2);\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_1, },\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_2, },\n    )\n    .build();\n    // Basic deploy with single key\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        // Reusing a test contract that would add new key\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n\n    // KEY_1 (w: 2) KEY_2 (w: 2) each passes default threshold of 1\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0),\n                ARG_DEPLOY_THRESHOLD => Weight::new(0),\n            },\n        )\n        .with_deploy_hash([36; 32])\n        .with_authorization_keys(&[KEY_2, KEY_1])\n        .build();\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_authorize_deploy_with_duplicated_keys() {\n    // tests that authorized keys needs sufficient cumulative weight\n    // and each of the associated keys is greater than threshold\n\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1);\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_1, },\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_ASSOCIATED_KEY,\n        runtime_args! {\n            ARG_ACCOUNT => KEY_2,\n            ARG_WEIGHT => KEY_2_WEIGHT,\n        },\n    )\n    .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_SET_ACTION_THRESHOLDS,\n        runtime_args! {\n            ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(4),\n            ARG_DEPLOY_THRESHOLD => Weight::new(3)\n        },\n    )\n    .build();\n    // Basic deploy with single key\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        // Reusing a test contract that would add new key\n        .exec(exec_request_1)\n        .expect_success()\n        .commit();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT,\n        })\n        .with_session_code(\n            CONTRACT_SET_ACTION_THRESHOLDS,\n            runtime_args! {\n                ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0),\n                ARG_DEPLOY_THRESHOLD => Weight::new(0)\n            },\n        )\n        .with_deploy_hash([3u8; 32])\n        .with_authorization_keys(&[\n            KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1,\n        ])\n        .build();\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n    builder.clear_results().exec(exec_request_3).commit();\n    let deploy_result = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n\n    assert!(\n        deploy_result.has_precondition_failure(),\n        \"{:?}\",\n        deploy_result\n    );\n    let message = format!(\"{}\", deploy_result.error().unwrap());\n    assert!(message.contains(&format!(\n        \"{}\",\n        TrackingCopyError::DeploymentAuthorizationFailure\n    )))\n}\n\n#[ignore]\n#[test]\nfn should_not_authorize_transfer_without_deploy_key_threshold() {\n    // tests that authorized keys needs sufficient cumulative weight\n    // and each of the associated keys is greater than threshold\n    let transfer_amount = U512::from(1);\n\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1);\n    assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_2);\n\n    let add_key_1_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_1, },\n    )\n    .build();\n    let add_key_2_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_UPDATE_ASSOCIATED_KEY,\n        runtime_args! { ARG_ACCOUNT => KEY_2, },\n    )\n    .build();\n    let update_thresholds_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_SET_ACTION_THRESHOLDS,\n        runtime_args! {\n            ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(5),\n            ARG_DEPLOY_THRESHOLD => Weight::new(5),\n        },\n    )\n    .build();\n\n    // Basic deploy with single key\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        // Reusing a test contract that would add new key\n        .exec(add_key_1_request)\n        .expect_success()\n        .commit();\n\n    builder.exec(add_key_2_request).expect_success().commit();\n\n    builder\n        .exec(update_thresholds_request)\n        .expect_success()\n        .commit();\n\n    // KEY_1 (w: 2) DEFAULT_ACCOUNT (w: 1) does not pass deploy threshold of 5\n    let transfer_request_1 = TransferRequestBuilder::new(transfer_amount, KEY_2)\n        .with_authorization_keys([KEY_1, *DEFAULT_ACCOUNT_ADDR])\n        .build();\n\n    builder.transfer_and_commit(transfer_request_1);\n\n    let response = builder\n        .get_exec_result_owned(3)\n        .expect(\"should have response\");\n    let error = response.error().expect(\"should have error\");\n    assert!(matches!(\n        error,\n        Error::Transfer(TransferError::TrackingCopy(\n            TrackingCopyError::DeploymentAuthorizationFailure\n        ))\n    ));\n\n    // KEY_1 (w: 2) KEY_2 (w: 2) DEFAULT_ACCOUNT_ADDR (w: 1) each passes threshold of 5\n    let transfer_request = TransferRequestBuilder::new(transfer_amount, KEY_2)\n        .with_authorization_keys([KEY_2, KEY_1, *DEFAULT_ACCOUNT_ADDR])\n        .build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/account/key_management_thresholds.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, runtime_args};\n\nconst CONTRACT_KEY_MANAGEMENT_THRESHOLDS: &str = \"key_management_thresholds.wasm\";\n\nconst ARG_STAGE: &str = \"stage\";\n\n#[ignore]\n#[test]\nfn should_verify_key_management_permission_with_low_weight() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_KEY_MANAGEMENT_THRESHOLDS,\n        runtime_args! { ARG_STAGE => String::from(\"init\") },\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_KEY_MANAGEMENT_THRESHOLDS,\n        runtime_args! { ARG_STAGE => String::from(\"test-permission-denied\") },\n    )\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_verify_key_management_permission_with_sufficient_weight() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_KEY_MANAGEMENT_THRESHOLDS,\n        runtime_args! { ARG_STAGE => String::from(\"init\") },\n    )\n    .build();\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        // This test verifies that all key management operations succeed\n        .with_session_code(\n            \"key_management_thresholds.wasm\",\n            runtime_args! { ARG_STAGE => String::from(\"test-key-mgmnt-succeed\") },\n        )\n        .with_deploy_hash([2u8; 32])\n        .with_authorization_keys(&[\n            *DEFAULT_ACCOUNT_ADDR,\n            // Key [42; 32] is created in init stage\n            AccountHash::new([42; 32]),\n        ])\n        .build();\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/account/mod.rs",
    "content": "mod associated_keys;\nmod authorized_keys;\nmod key_management_thresholds;\nmod named_keys;\nmod named_keys_stored;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/account/named_keys.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{bytesrepr::FromBytes, runtime_args, CLTyped, CLValue, Key, U512};\n\nconst CONTRACT_NAMED_KEYS: &str = \"named_keys.wasm\";\nconst EXPECTED_UREF_VALUE: u64 = 123_456_789u64;\n\nconst KEY1: &str = \"hello-world\";\nconst KEY2: &str = \"big-value\";\n\nconst COMMAND_CREATE_UREF1: &str = \"create-uref1\";\nconst COMMAND_CREATE_UREF2: &str = \"create-uref2\";\nconst COMMAND_REMOVE_UREF1: &str = \"remove-uref1\";\nconst COMMAND_REMOVE_UREF2: &str = \"remove-uref2\";\nconst COMMAND_TEST_READ_UREF1: &str = \"test-read-uref1\";\nconst COMMAND_TEST_READ_UREF2: &str = \"test-read-uref2\";\nconst COMMAND_INCREASE_UREF2: &str = \"increase-uref2\";\nconst COMMAND_OVERWRITE_UREF2: &str = \"overwrite-uref2\";\nconst ARG_COMMAND: &str = \"command\";\n\nfn run_command(builder: &mut LmdbWasmTestBuilder, command: &str) {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAMED_KEYS,\n        runtime_args! { ARG_COMMAND => command },\n    )\n    .build();\n    builder.exec(exec_request).commit().expect_success();\n}\n\nfn read_value<T: CLTyped + FromBytes>(builder: &mut LmdbWasmTestBuilder, key: Key) -> T {\n    CLValue::try_from(builder.query(None, key, &[]).expect(\"should have value\"))\n        .expect(\"should have CLValue\")\n        .into_t()\n        .expect(\"should convert successfully\")\n}\n\n#[ignore]\n#[test]\nfn should_run_named_keys_contract() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    run_command(&mut builder, COMMAND_CREATE_UREF1);\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    assert!(named_keys.contains(KEY1));\n    assert!(!named_keys.contains(KEY2));\n\n    run_command(&mut builder, COMMAND_CREATE_UREF2);\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    let uref1 = *named_keys.get(KEY1).expect(\"should have key\");\n    let uref2 = *named_keys.get(KEY2).expect(\"should have key\");\n    let value1: String = read_value(&mut builder, uref1);\n    let value2: U512 = read_value(&mut builder, uref2);\n    assert_eq!(value1, \"Hello, world!\");\n    assert_eq!(value2, U512::max_value());\n\n    run_command(&mut builder, COMMAND_TEST_READ_UREF1);\n\n    run_command(&mut builder, COMMAND_REMOVE_UREF1);\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    assert!(!named_keys.contains(KEY1));\n    assert!(named_keys.contains(KEY2));\n\n    run_command(&mut builder, COMMAND_TEST_READ_UREF2);\n\n    run_command(&mut builder, COMMAND_INCREASE_UREF2);\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    let uref2 = *named_keys.get(KEY2).expect(\"should have key\");\n    let value2: U512 = read_value(&mut builder, uref2);\n    assert_eq!(value2, U512::zero());\n\n    run_command(&mut builder, COMMAND_OVERWRITE_UREF2);\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    let uref2 = *named_keys.get(KEY2).expect(\"should have key\");\n    let value2: U512 = read_value(&mut builder, uref2);\n    assert_eq!(value2, U512::from(EXPECTED_UREF_VALUE));\n\n    run_command(&mut builder, COMMAND_REMOVE_UREF2);\n\n    let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR);\n\n    assert!(!named_keys.contains(KEY1));\n    assert!(!named_keys.contains(KEY2));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/account/named_keys_stored.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::execution::ExecError;\nuse casper_types::{runtime_args, ApiError, RuntimeArgs};\n\nconst CONTRACT_HASH_NAME: &str = \"contract_stored\";\nconst ENTRY_POINT_CONTRACT: &str = \"named_keys_contract\";\nconst ENTRY_POINT_SESSION: &str = \"named_keys_session\";\nconst ENTRY_POINT_CONTRACT_TO_CONTRACT: &str = \"named_keys_contract_to_contract\";\n\n#[ignore]\n#[test]\nfn should_run_stored_named_keys_contract() {\n    let mut builder = setup();\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_NAME,\n        ENTRY_POINT_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_stored_named_keys_session() {\n    let mut builder = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_NAME,\n        ENTRY_POINT_SESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_failure();\n\n    let expected_error =\n        casper_execution_engine::engine_state::Error::Exec(ExecError::Revert(ApiError::User(0)));\n\n    builder.assert_error(expected_error)\n}\n\n#[ignore]\n#[test]\nfn should_run_stored_named_keys_contract_to_contract() {\n    let mut builder = setup();\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_NAME,\n        ENTRY_POINT_CONTRACT_TO_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_stored_named_keys_module_bytes_to_contract() {\n    let mut builder = setup();\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        \"named_keys_stored_call.wasm\",\n        runtime_args! {\n            \"entry_point\" => ENTRY_POINT_CONTRACT,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_stored_named_keys_module_bytes_to_contract_to_contract() {\n    let mut builder = setup();\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        \"named_keys_stored_call.wasm\",\n        runtime_args! {\n            \"entry_point\" => ENTRY_POINT_CONTRACT_TO_CONTRACT,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        \"named_keys_stored.wasm\",\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request_1).expect_success().commit();\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/add_contract_version.rs",
    "content": "use std::collections::{BTreeMap, BTreeSet};\n\nuse crate::lmdb_fixture;\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_SECRET_KEY, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    engine_state::{Error as StateError, SessionDataV1, SessionInputData},\n    execution::ExecError,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    ApiError, BlockTime, Digest, EraId, InitiatorAddr, Key, PricingMode, ProtocolVersion,\n    PublicKey, RuntimeArgs, SecretKey, TimeDiff, Timestamp, Transaction, TransactionArgs,\n    TransactionEntryPoint, TransactionRuntimeParams, TransactionScheduling, TransactionTarget,\n    TransactionV1, TransactionV1Payload,\n};\n\nconst CONTRACT: &str = \"do_nothing_stored.wasm\";\nconst CHAIN_NAME: &str = \"a\";\nconst BLOCK_TIME: BlockTime = BlockTime::new(10);\n\npub(crate) const ARGS_MAP_KEY: u16 = 0;\npub(crate) const TARGET_MAP_KEY: u16 = 1;\npub(crate) const ENTRY_POINT_MAP_KEY: u16 = 2;\npub(crate) const SCHEDULING_MAP_KEY: u16 = 3;\n\n#[ignore]\n#[test]\nfn should_allow_add_contract_version_via_deploy() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit();\n\n    let deploy_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT, RuntimeArgs::new())\n            .build();\n\n    builder.exec(deploy_request).expect_success().commit();\n}\n\nfn try_add_contract_version(\n    is_install_upgrade: bool,\n    should_succeed: bool,\n    mut builder: LmdbWasmTestBuilder,\n) {\n    let module_bytes = utils::read_wasm_file(CONTRACT);\n\n    let txn = new_transaction_v1_session(\n        is_install_upgrade,\n        module_bytes,\n        TransactionRuntimeParams::VmCasperV1,\n        &DEFAULT_ACCOUNT_SECRET_KEY,\n    );\n\n    let txn_request = {\n        let wrapped = Transaction::from(txn.clone()).clone();\n        let initiator_addr = txn.initiator_addr();\n        let is_standard_payment = if let PricingMode::PaymentLimited {\n            standard_payment, ..\n        } = txn.pricing_mode()\n        {\n            *standard_payment\n        } else {\n            true\n        };\n        let tx_args = txn\n            .deserialize_field::<TransactionArgs>(ARGS_MAP_KEY)\n            .unwrap();\n        let args = tx_args.as_named().unwrap();\n        let target = txn\n            .deserialize_field::<TransactionTarget>(TARGET_MAP_KEY)\n            .unwrap();\n        let entry_point = txn\n            .deserialize_field::<TransactionEntryPoint>(ENTRY_POINT_MAP_KEY)\n            .unwrap();\n        let session_input_data = to_v1_session_input_data(\n            is_standard_payment,\n            initiator_addr,\n            args,\n            &target,\n            &entry_point,\n            &wrapped,\n        );\n        assert_eq!(\n            session_input_data.is_install_upgrade_allowed(),\n            is_install_upgrade,\n            \"session_input_data should match imputed arg\"\n        );\n        ExecuteRequestBuilder::from_session_input_data(&session_input_data)\n            .with_block_time(BLOCK_TIME)\n            .build()\n    };\n    assert_eq!(\n        txn_request.is_install_upgrade_allowed(),\n        is_install_upgrade,\n        \"txn_request should match imputed arg\"\n    );\n    builder.exec(txn_request);\n\n    if should_succeed {\n        builder.expect_success();\n    } else {\n        builder.assert_error(StateError::Exec(ExecError::Revert(\n            ApiError::NotAllowedToAddContractVersion,\n        )))\n    }\n}\n\npub fn new_transaction_v1_session(\n    is_install_upgrade: bool,\n    module_bytes: Bytes,\n    runtime: TransactionRuntimeParams,\n    secret_key: &SecretKey,\n) -> TransactionV1 {\n    let timestamp = Timestamp::now();\n\n    let target = TransactionTarget::Session {\n        is_install_upgrade,\n        module_bytes,\n        runtime,\n    };\n    let args = TransactionArgs::Named(RuntimeArgs::new());\n    let entry_point = TransactionEntryPoint::Call;\n    let scheduling = TransactionScheduling::Standard;\n    let mut fields: BTreeMap<u16, Bytes> = BTreeMap::new();\n\n    fields.insert(ARGS_MAP_KEY, args.to_bytes().unwrap().into());\n    fields.insert(TARGET_MAP_KEY, target.to_bytes().unwrap().into());\n    fields.insert(ENTRY_POINT_MAP_KEY, entry_point.to_bytes().unwrap().into());\n    fields.insert(SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into());\n\n    let public_key = PublicKey::from(secret_key);\n    let initiator_addr = InitiatorAddr::from(public_key);\n    build_transaction(\n        CHAIN_NAME.to_string(),\n        timestamp,\n        TimeDiff::from_millis(30 * 60 * 1_000),\n        PricingMode::Fixed {\n            gas_price_tolerance: 5,\n            additional_computation_factor: 0,\n        },\n        fields,\n        initiator_addr,\n        secret_key,\n    )\n}\n\nfn build_transaction(\n    chain_name: String,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n    pricing_mode: PricingMode,\n    fields: BTreeMap<u16, Bytes>,\n    initiator_addr: InitiatorAddr,\n    secret_key: &SecretKey,\n) -> TransactionV1 {\n    let transaction_v1_payload = TransactionV1Payload::new(\n        chain_name,\n        timestamp,\n        ttl,\n        pricing_mode,\n        initiator_addr,\n        fields,\n    );\n    let hash = Digest::hash(\n        transaction_v1_payload\n            .to_bytes()\n            .unwrap_or_else(|error| panic!(\"should serialize body: {}\", error)),\n    );\n    let mut transaction = TransactionV1::new(hash.into(), transaction_v1_payload, BTreeSet::new());\n    transaction.sign(secret_key);\n    transaction\n}\n\nfn to_v1_session_input_data<'a>(\n    is_standard_payment: bool,\n    initiator_addr: &'a InitiatorAddr,\n    args: &'a RuntimeArgs,\n    target: &'a TransactionTarget,\n    entry_point: &'a TransactionEntryPoint,\n    txn: &'a Transaction,\n) -> SessionInputData<'a> {\n    let is_install_upgrade = match target {\n        TransactionTarget::Session {\n            is_install_upgrade, ..\n        } => *is_install_upgrade,\n        _ => false,\n    };\n    match txn {\n        Transaction::Deploy(_) => panic!(\"unexpected deploy transaction\"),\n        Transaction::V1(transaction_v1) => {\n            let data = SessionDataV1::new(\n                args,\n                target,\n                entry_point,\n                is_install_upgrade,\n                transaction_v1.hash(),\n                transaction_v1.pricing_mode(),\n                initiator_addr,\n                txn.signers().clone(),\n                is_standard_payment,\n            );\n            SessionInputData::SessionDataV1 { data }\n        }\n    }\n}\n\n#[ignore]\n#[test]\nfn should_allow_add_contract_version_via_transaction_v1_installer_upgrader() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit();\n    try_add_contract_version(true, true, builder)\n}\n\n#[ignore]\n#[test]\nfn should_disallow_add_contract_version_via_transaction_v1_standard() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit();\n    try_add_contract_version(false, false, builder)\n}\n\n#[ignore]\n#[test]\nfn should_allow_1x_user_to_add_contract_version_via_transaction_v1_installer_upgrader() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture_with_enable_ae(\n            lmdb_fixture::RELEASE_1_5_8,\n            true,\n        );\n    let old_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(old_protocol_version)\n        .with_new_protocol_version(ProtocolVersion::from_parts(2, 0, 0))\n        .with_activation_point(EraId::new(1))\n        .with_enable_addressable_entity(true)\n        .build();\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let account_as_1x = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"must have stored value\")\n        .as_account()\n        .is_some();\n\n    assert!(account_as_1x);\n    try_add_contract_version(true, true, builder)\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/auction.rs",
    "content": "use casper_engine_test_support::{\n    ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    runtime_args,\n    system::auction::{ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_PUBLIC_KEY},\n    ApiError, U512,\n};\nuse once_cell::sync::Lazy;\nuse std::path::PathBuf;\n\nconst ADD_BIDS_WASM: &str = \"auction_bids.wasm\";\nconst ARG_ENTRY_POINT: &str = \"entry_point\";\n/// The name of the chainspec file on disk.\npub const CHAINSPEC_NAME: &str = \"chainspec.toml\";\npub static LOCAL_PATH: Lazy<PathBuf> =\n    Lazy::new(|| PathBuf::from(env!(\"CARGO_MANIFEST_DIR\")).join(\"../../resources/local/\"));\n\n#[ignore]\n#[test]\nfn add_auction_should_fail_when_delegation_rate_not_met() {\n    let path = LOCAL_PATH.join(CHAINSPEC_NAME);\n    let mut chainspec =\n        ChainspecConfig::from_chainspec_path(path).expect(\"must build chainspec configuration\");\n    chainspec = chainspec.with_minimum_delegation_rate(20);\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec.clone());\n    let genesis_request = chainspec\n        .create_genesis_request(DEFAULT_ACCOUNTS.clone(), DEFAULT_PROTOCOL_VERSION)\n        .unwrap();\n    builder.run_genesis(genesis_request);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        ADD_BIDS_WASM,\n        runtime_args! {\n            ARG_ENTRY_POINT => \"add_bid\",\n            ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => U512::from(10_000_000_000_000u64),\n            ARG_DELEGATION_RATE => 19u8,\n        },\n    )\n    .build();\n\n    let commit = builder.exec(exec_request).commit();\n    commit.expect_failure();\n    let last_exec_result = commit\n        .get_last_exec_result()\n        .expect(\"Expected to be called after exec()\");\n    assert!(matches!(\n        last_exec_result.error().cloned(),\n        Some(Error::Exec(ExecError::Revert(ApiError::AuctionError(64))))\n    ));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/create_purse.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, runtime_args, U512};\n\nconst CONTRACT_CREATE_PURSE_01: &str = \"create_purse_01.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst TEST_PURSE_NAME: &str = \"test_purse\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\nstatic ACCOUNT_1_INITIAL_BALANCE: Lazy<U512> = Lazy::new(|| *DEFAULT_PAYMENT);\n\n#[ignore]\n#[test]\nfn should_insert_account_into_named_keys() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { \"target\" => ACCOUNT_1_ADDR, \"amount\" => *ACCOUNT_1_INITIAL_BALANCE},\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_CREATE_PURSE_01,\n        runtime_args! { ARG_PURSE_NAME => TEST_PURSE_NAME },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let contract_1 = builder\n        .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n\n    assert!(\n        contract_1.named_keys().contains(TEST_PURSE_NAME),\n        \"contract_1 named_keys should include test purse\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_create_usable_purse() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { \"target\" => ACCOUNT_1_ADDR, \"amount\" => *ACCOUNT_1_INITIAL_BALANCE},\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_CREATE_PURSE_01,\n        runtime_args! { ARG_PURSE_NAME => TEST_PURSE_NAME },\n    )\n    .build();\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n\n    let contract_1 = builder\n        .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n\n    let purse = contract_1\n        .named_keys()\n        .get(TEST_PURSE_NAME)\n        .expect(\"should have known key\")\n        .into_uref()\n        .expect(\"should have uref\");\n\n    let purse_balance = builder.get_purse_balance(purse);\n    assert!(\n        purse_balance.is_zero(),\n        \"when created directly a purse has 0 balance\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/dictionary.rs",
    "content": "use std::{convert::TryFrom, path::PathBuf};\n\nuse casper_engine_test_support::{\n    utils::create_genesis_config, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    TransferRequestBuilder, ARG_AMOUNT, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY,\n    DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error as EngineError, execution::ExecError};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    account::AccountHash, addressable_entity::EntityKindTag, runtime_args, AccessRights,\n    AddressableEntityHash, ApiError, CLType, CLValue, GenesisAccount, Key, Motes, RuntimeArgs,\n    StoredValue,\n};\n\nuse dictionary_call::{NEW_DICTIONARY_ITEM_KEY, NEW_DICTIONARY_VALUE};\n\nconst DICTIONARY_WASM: &str = \"dictionary.wasm\";\nconst DICTIONARY_CALL_WASM: &str = \"dictionary_call.wasm\";\nconst DICTIONARY_ITEM_KEY_CHECK: &str = \"dictionary-item-key-check.wasm\";\nconst DICTIONARY_READ: &str = \"dictionary_read.wasm\";\nconst READ_FROM_KEY: &str = \"read_from_key.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\n\nfn setup() -> (LmdbWasmTestBuilder, AddressableEntityHash) {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let fund_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build();\n\n    let install_contract_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        DICTIONARY_WASM,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.transfer_and_commit(fund_request).expect_success();\n\n    builder\n        .exec(install_contract_request)\n        .commit()\n        .expect_success();\n\n    let default_account_entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    assert!(default_account_entity\n        .named_keys()\n        .contains(dictionary::MALICIOUS_KEY_NAME));\n    assert!(default_account_entity\n        .named_keys()\n        .contains(dictionary::DICTIONARY_REF));\n\n    let entity_hash = default_account_entity\n        .named_keys()\n        .get(dictionary::CONTRACT_HASH_NAME)\n        .cloned()\n        .and_then(Key::into_entity_hash)\n        .expect(\"should have hash\");\n\n    (builder, entity_hash)\n}\n\nfn query_dictionary_item(\n    builder: &LmdbWasmTestBuilder,\n    key: Key,\n    dictionary_name: Option<String>,\n    dictionary_item_key: String,\n) -> Result<StoredValue, String> {\n    let empty_path = vec![];\n    let dictionary_key_bytes = dictionary_item_key.as_bytes();\n    let address = match key {\n        Key::Hash(_) => {\n            if dictionary_name.is_none() {\n                return Err(\"No dictionary name was provided\".to_string());\n            }\n            let name = dictionary_name.unwrap();\n            let named_keys = builder\n                .query(None, key, &[])?\n                .as_contract()\n                .expect(\"must get contract\")\n                .named_keys()\n                .clone();\n\n            let dictionary_uref = named_keys\n                .get(&name)\n                .and_then(Key::as_uref)\n                .ok_or_else(|| \"No dictionary uref was found in named keys\".to_string())?;\n\n            Key::dictionary(*dictionary_uref, dictionary_key_bytes)\n        }\n        Key::Account(_) => {\n            if dictionary_name.is_none() {\n                return Err(\"No dictionary name was provided\".to_string());\n            }\n            let stored_value = builder.query(None, key, &[])?;\n            match stored_value {\n                StoredValue::CLValue(cl_value) => {\n                    let entity_hash: AddressableEntityHash = CLValue::into_t::<Key>(cl_value)\n                        .expect(\"must convert to contract hash\")\n                        .into_entity_hash()\n                        .expect(\"must convert to contract hash\");\n\n                    let entity_key =\n                        Key::addressable_entity_key(EntityKindTag::Account, entity_hash);\n\n                    return query_dictionary_item(\n                        builder,\n                        entity_key,\n                        dictionary_name,\n                        dictionary_item_key,\n                    );\n                }\n                StoredValue::Account(account) => {\n                    if let Some(name) = dictionary_name {\n                        let dictionary_uref = account\n                            .named_keys()\n                            .get(&name)\n                            .and_then(Key::as_uref)\n                            .ok_or_else(|| {\n                                \"No dictionary uref was found in named keys\".to_string()\n                            })?;\n\n                        Key::dictionary(*dictionary_uref, dictionary_key_bytes)\n                    } else {\n                        return Err(\"No dictionary name was provided\".to_string());\n                    }\n                }\n                _ => return Err(\"Unhandled stored value\".to_string()),\n            }\n        }\n        Key::AddressableEntity(entity_addr) => {\n            if let Some(name) = dictionary_name {\n                let stored_value = builder.query(None, key, &[])?;\n\n                match &stored_value {\n                    StoredValue::AddressableEntity(_) => {}\n                    _ => {\n                        return Err(\n                            \"Provided base key is nether an account or a contract\".to_string()\n                        );\n                    }\n                };\n\n                let named_keys = builder.get_named_keys(entity_addr);\n\n                let dictionary_uref = named_keys\n                    .get(&name)\n                    .and_then(Key::as_uref)\n                    .ok_or_else(|| \"No dictionary uref was found in named keys\".to_string())?;\n\n                Key::dictionary(*dictionary_uref, dictionary_key_bytes)\n            } else {\n                return Err(\"No dictionary name was provided\".to_string());\n            }\n        }\n        Key::URef(uref) => Key::dictionary(uref, dictionary_key_bytes),\n        Key::Dictionary(address) => Key::Dictionary(address),\n        _ => return Err(\"Unsupported key type for a query to a dictionary item\".to_string()),\n    };\n    builder.query(None, address, &empty_path)\n}\n\n#[ignore]\n#[test]\nfn should_modify_with_owned_access_rights() {\n    let (mut builder, contract_hash) = setup();\n\n    let modify_write_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        dictionary::MODIFY_WRITE_ENTRYPOINT,\n        RuntimeArgs::default(),\n    )\n    .build();\n    let modify_write_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        dictionary::MODIFY_WRITE_ENTRYPOINT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have account\");\n\n    let stored_dictionary_key = contract\n        .named_keys()\n        .get(dictionary::DICTIONARY_NAME)\n        .expect(\"dictionary\");\n    let dictionary_seed_uref = stored_dictionary_key.into_uref().expect(\"should be uref\");\n\n    let key_bytes = dictionary::DICTIONARY_PUT_KEY.as_bytes();\n    let dictionary_key = Key::dictionary(dictionary_seed_uref, key_bytes);\n\n    builder\n        .exec(modify_write_request_1)\n        .commit()\n        .expect_success();\n\n    let stored_value = builder\n        .query(None, dictionary_seed_uref.into(), &[])\n        .expect(\"should have value\");\n    let dictionary_uref_value = stored_value\n        .as_cl_value()\n        .cloned()\n        .expect(\"should have cl value\");\n    assert_eq!(\n        dictionary_uref_value.cl_type(),\n        &CLType::Unit,\n        \"created dictionary uref should be unit\"\n    );\n\n    let stored_value = builder\n        .query(None, dictionary_key, &[])\n        .expect(\"should have value\");\n    let dictionary_value = stored_value\n        .as_cl_value()\n        .cloned()\n        .expect(\"should have cl value\");\n\n    let value: String = dictionary_value.into_t().expect(\"should be a string\");\n    assert_eq!(value, \"Hello, world!\");\n\n    builder\n        .exec(modify_write_request_2)\n        .commit()\n        .expect_success();\n\n    let stored_value = builder\n        .query(None, dictionary_key, &[])\n        .expect(\"should have value\");\n    let dictionary_value = stored_value\n        .as_cl_value()\n        .cloned()\n        .expect(\"should have cl value\");\n\n    let value: String = dictionary_value.into_t().expect(\"should be a string\");\n    assert_eq!(value, \"Hello, world! Hello, world!\");\n}\n\n#[ignore]\n#[test]\nfn should_not_write_with_read_access_rights() {\n    let (mut builder, contract_hash) = setup();\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_WRITE,\n            dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_RO_ENTRYPOINT,\n            dictionary_call::ARG_CONTRACT_HASH => contract_hash,\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::InvalidAccess {\n                required: AccessRights::WRITE\n            })\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_read_with_read_access_rights() {\n    let (mut builder, contract_hash) = setup();\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_READ,\n            dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_RO_ENTRYPOINT,\n            dictionary_call::ARG_CONTRACT_HASH => contract_hash,\n        },\n    )\n    .build();\n\n    builder.exec(call_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_read_with_write_access_rights() {\n    let (mut builder, contract_hash) = setup();\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_READ,\n            dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_W_ENTRYPOINT,\n            dictionary_call::ARG_CONTRACT_HASH => contract_hash,\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::InvalidAccess {\n                required: AccessRights::READ\n            })\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_write_with_write_access_rights() {\n    let (mut builder, contract_hash) = setup();\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_WRITE,\n            dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_W_ENTRYPOINT,\n            dictionary_call::ARG_CONTRACT_HASH => contract_hash,\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have account\");\n\n    let stored_dictionary_key = contract\n        .named_keys()\n        .get(dictionary::DICTIONARY_NAME)\n        .expect(\"dictionary\");\n    let dictionary_root_uref = stored_dictionary_key.into_uref().expect(\"should be uref\");\n\n    let dictionary_key = Key::dictionary(dictionary_root_uref, NEW_DICTIONARY_ITEM_KEY.as_bytes());\n\n    let result = builder\n        .query(None, dictionary_key, &[])\n        .expect(\"should query\");\n    let value = result.as_cl_value().cloned().expect(\"should have cl value\");\n    let value: String = value.into_t().expect(\"should get string\");\n    assert_eq!(value, NEW_DICTIONARY_VALUE);\n}\n\n#[ignore]\n#[test]\nfn should_not_write_with_forged_uref() {\n    let (mut builder, contract_hash) = setup();\n\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have account\");\n\n    let stored_dictionary_key = contract\n        .named_keys()\n        .get(dictionary::DICTIONARY_NAME)\n        .expect(\"dictionary\");\n    let dictionary_root_uref = stored_dictionary_key.into_uref().expect(\"should be uref\");\n\n    // Do some extra forging on the uref\n    let forged_uref = dictionary_root_uref.into_read_add_write();\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_FORGED_UREF_WRITE,\n            dictionary_call::ARG_FORGED_UREF => forged_uref,\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::ForgedReference(uref))\n            if *uref == forged_uref\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_put_with_invalid_dictionary_item_key() {\n    let (mut builder, contract_hash) = setup();\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have account\");\n\n    let _stored_dictionary_key = contract\n        .named_keys()\n        .get(dictionary::DICTIONARY_NAME)\n        .expect(\"dictionary\");\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_INVALID_PUT_DICTIONARY_ITEM_KEY,\n            dictionary_call::ARG_CONTRACT_HASH => contract_hash\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::Revert(ApiError::InvalidDictionaryItemKey))\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_get_with_invalid_dictionary_item_key() {\n    let (mut builder, contract_hash) = setup();\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have account\");\n\n    let _stored_dictionary_key = contract\n        .named_keys()\n        .get(dictionary::DICTIONARY_NAME)\n        .expect(\"dictionary\");\n\n    let call_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        DICTIONARY_CALL_WASM,\n        runtime_args! {\n            dictionary_call::ARG_OPERATION => dictionary_call::OP_INVALID_GET_DICTIONARY_ITEM_KEY,\n            dictionary_call::ARG_CONTRACT_HASH => contract_hash\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::Revert(ApiError::InvalidDictionaryItemKey))\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn dictionary_put_should_fail_with_large_item_key() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let fund_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build();\n\n    let install_contract_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        DICTIONARY_ITEM_KEY_CHECK,\n        runtime_args! {\n            \"dictionary-operation\" => \"put\"\n        },\n    )\n    .build();\n\n    builder.transfer_and_commit(fund_request).expect_success();\n    builder.exec(install_contract_request).commit();\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::Revert(ApiError::DictionaryItemKeyExceedsLength))\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn dictionary_get_should_fail_with_large_item_key() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let fund_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build();\n\n    let install_contract_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        DICTIONARY_ITEM_KEY_CHECK,\n        runtime_args! {\n            \"dictionary-operation\" => \"get\"\n        },\n    )\n    .build();\n\n    builder.transfer_and_commit(fund_request).expect_success();\n    builder.exec(install_contract_request).commit();\n    let exec_result = builder.get_last_exec_result().expect(\"should have results\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::Revert(ApiError::DictionaryItemKeyExceedsLength))\n        ),\n        \"Received error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_query_dictionary_items_with_test_builder() {\n    let genesis_account = GenesisAccount::account(\n        DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    );\n\n    let mut accounts = vec![genesis_account];\n    accounts.extend((*DEFAULT_ACCOUNTS).clone());\n    let genesis_config = create_genesis_config(accounts);\n    let genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let dictionary_code = PathBuf::from(DICTIONARY_WASM);\n    let deploy_item = DeployItemBuilder::new()\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT})\n        .with_session_code(dictionary_code, RuntimeArgs::new())\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(genesis_request).commit();\n\n    builder.exec(exec_request).commit().expect_success();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let entity_hash = default_account\n        .named_keys()\n        .get(dictionary::CONTRACT_HASH_NAME)\n        .expect(\"should have contract\")\n        .into_entity_hash()\n        .expect(\"should have hash\");\n\n    let dictionary_uref = default_account\n        .named_keys()\n        .get(dictionary::DICTIONARY_REF)\n        .expect(\"should have dictionary uref\")\n        .into_uref()\n        .expect(\"should have URef\");\n\n    {\n        // Query through account's named keys\n        let queried_value = query_dictionary_item(\n            &builder,\n            Key::from(*DEFAULT_ACCOUNT_ADDR),\n            Some(dictionary::DICTIONARY_REF.to_string()),\n            dictionary::DEFAULT_DICTIONARY_NAME.to_string(),\n        )\n        .expect(\"should query\");\n        let value = CLValue::try_from(queried_value).expect(\"should have cl value\");\n        let value: String = value.into_t().expect(\"should be string\");\n        assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE);\n    }\n\n    {\n        // Query through account's named keys\n        let queried_value = query_dictionary_item(\n            &builder,\n            Key::from(*DEFAULT_ACCOUNT_ADDR),\n            Some(dictionary::DICTIONARY_REF.to_string()),\n            dictionary::DEFAULT_DICTIONARY_NAME.to_string(),\n        )\n        .expect(\"should query\");\n        let value = CLValue::try_from(queried_value).expect(\"should have cl value\");\n        let value: String = value.into_t().expect(\"should be string\");\n        assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE);\n    }\n\n    {\n        // Query through contract's named keys\n        let queried_value = query_dictionary_item(\n            &builder,\n            Key::Hash(entity_hash.value()),\n            Some(dictionary::DICTIONARY_NAME.to_string()),\n            dictionary::DEFAULT_DICTIONARY_NAME.to_string(),\n        )\n        .expect(\"should query\");\n        let value = CLValue::try_from(queried_value).expect(\"should have cl value\");\n        let value: String = value.into_t().expect(\"should be string\");\n        assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE);\n    }\n\n    {\n        // Query through dictionary URef itself\n        let queried_value = query_dictionary_item(\n            &builder,\n            Key::from(dictionary_uref),\n            None,\n            dictionary::DEFAULT_DICTIONARY_NAME.to_string(),\n        )\n        .expect(\"should query\");\n        let value = CLValue::try_from(queried_value).expect(\"should have cl value\");\n        let value: String = value.into_t().expect(\"should be string\");\n        assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE);\n    }\n\n    {\n        // Query by computed dictionary item key\n        let dictionary_item_name = dictionary::DEFAULT_DICTIONARY_NAME.as_bytes();\n        let dictionary_item_key = Key::dictionary(dictionary_uref, dictionary_item_name);\n\n        let queried_value =\n            query_dictionary_item(&builder, dictionary_item_key, None, String::new())\n                .expect(\"should query\");\n        let value = CLValue::try_from(queried_value).expect(\"should have cl value\");\n        let value: String = value.into_t().expect(\"should be string\");\n        assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE);\n    }\n}\n\n#[ignore]\n#[test]\nfn should_be_able_to_perform_dictionary_read() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let dictionary_session_call =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, DICTIONARY_READ, RuntimeArgs::new())\n            .build();\n\n    builder\n        .exec(dictionary_session_call)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_be_able_to_perform_read_from_key() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let read_from_key_session_call =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, READ_FROM_KEY, RuntimeArgs::new())\n            .build();\n\n    builder\n        .exec(read_from_key_session_call)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/generic_hash.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, HashAlgorithm};\n\nconst GENERIC_HASH_WASM: &str = \"generic_hash.wasm\";\n\n#[ignore]\n#[test]\nfn should_run_generic_hash_blake2() {\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                GENERIC_HASH_WASM,\n                runtime_args! {\n                    \"data\" => \"blake2 hash test\",\n                    \"algorithm\" => HashAlgorithm::Blake2b as u8,\n                    \"expected\" => [0x0A, 0x24, 0xA2, 0xDF, 0x30, 0x46, 0x1F, 0xA9, 0x69, 0x36, 0x67, 0x97, 0xE4, 0xD4, 0x30, 0xA1, 0x13, 0xC6, 0xCE, 0xE2, 0x78, 0xB5, 0xEF, 0x63, 0xBD, 0x5D, 0x00, 0xA0, 0xA6, 0x61, 0x1E, 0x29]\n                },\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_generic_hash_blake3() {\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                GENERIC_HASH_WASM,\n                runtime_args! {\n                    \"data\" => \"blake3 hash test\",\n                    \"algorithm\" => HashAlgorithm::Blake3 as u8,\n                    \"expected\" => [0x01, 0x65, 0x7D, 0x50, 0x0C, 0x51, 0x9B, 0xB6, 0x8D, 0x01, 0x26, 0x53, 0x66, 0xE2, 0x72, 0x2E, 0x1A, 0x05, 0x65, 0x2E, 0xD7, 0x0C, 0x77, 0xB0, 0x06, 0x80, 0xF8, 0xE8, 0x9E, 0xF9, 0x0F, 0xA1]\n                },\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_generic_hash_sha256() {\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                GENERIC_HASH_WASM,\n                runtime_args! {\n                    \"data\" => \"sha256 hash test\",\n                    \"algorithm\" => HashAlgorithm::Sha256 as u8,\n                    \"expected\" => [0x29, 0xD2, 0xC7, 0x7B, 0x39, 0x7F, 0xF6, 0x9E, 0x25, 0x0D, 0x81, 0xA3, 0xBA, 0xBB, 0x32, 0xDE, 0xFF, 0x3C, 0x2D, 0x06, 0xC9, 0x8E, 0x5E, 0x73, 0x60, 0x54, 0x3C, 0xE4, 0x91, 0xAC, 0x81, 0xCA]\n                },\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/get_arg.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, ApiError, RuntimeArgs, U512};\n\nconst CONTRACT_GET_ARG: &str = \"get_arg.wasm\";\nconst ARG0_VALUE: &str = \"Hello, world!\";\nconst ARG1_VALUE: u64 = 42;\nconst ARG_VALUE0: &str = \"value0\";\nconst ARG_VALUE1: &str = \"value1\";\n\n/// Calls get_arg contract and returns Ok(()) in case no error, or String which is the error message\n/// returned by the engine\nfn call_get_arg(args: RuntimeArgs) -> Result<(), String> {\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_GET_ARG, args).build();\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    if !builder.is_error() {\n        return Ok(());\n    }\n\n    let error_message = builder.get_error_message().expect(\"should have a result\");\n\n    Err(error_message)\n}\n\n#[ignore]\n#[test]\nfn should_use_passed_argument() {\n    let args = runtime_args! {\n        ARG_VALUE0 => ARG0_VALUE,\n        ARG_VALUE1 => U512::from(ARG1_VALUE),\n    };\n    call_get_arg(args).expect(\"Should successfully call get_arg with 2 valid args\");\n}\n\n#[ignore]\n#[test]\nfn should_revert_with_missing_arg() {\n    assert!(call_get_arg(RuntimeArgs::default())\n        .expect_err(\"should fail\")\n        .contains(&format!(\"{:?}\", ApiError::MissingArgument),));\n    assert!(\n        call_get_arg(runtime_args! { ARG_VALUE0 => String::from(ARG0_VALUE) })\n            .expect_err(\"should fail\")\n            .contains(&format!(\"{:?}\", ApiError::MissingArgument))\n    );\n}\n\n#[ignore]\n#[test]\nfn should_revert_with_invalid_argument() {\n    let res1 =\n        call_get_arg(runtime_args! {ARG_VALUE0 =>  U512::from(123)}).expect_err(\"should fail\");\n    assert!(\n        res1.contains(&format!(\"{:?}\", ApiError::InvalidArgument,)),\n        \"res1: {:?}\",\n        res1\n    );\n\n    let res2 = call_get_arg(runtime_args! {\n        ARG_VALUE0 => String::from(ARG0_VALUE),\n        ARG_VALUE1 => String::from(\"this is expected to be U512\"),\n    })\n    .expect_err(\"should fail\");\n\n    assert!(\n        res2.contains(&format!(\"{:?}\", ApiError::InvalidArgument,)),\n        \"res2:{:?}\",\n        res2\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/get_block_info.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{bytesrepr::ToBytes, runtime_args, BlockHash, ProtocolVersion};\n\nconst CONTRACT_GET_BLOCKINFO: &str = \"get_blockinfo.wasm\";\nconst ARG_FIELD_IDX: &str = \"field_idx\";\n\nconst FIELD_IDX_BLOCK_TIME: u8 = 0;\nconst ARG_KNOWN_BLOCK_TIME: &str = \"known_block_time\";\n\n#[ignore]\n#[test]\nfn should_run_get_block_time() {\n    let block_time: u64 = 42;\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKINFO,\n        runtime_args! {\n            ARG_FIELD_IDX => FIELD_IDX_BLOCK_TIME,\n            ARG_KNOWN_BLOCK_TIME => block_time\n        },\n    )\n    .with_block_time(block_time)\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .expect_success();\n}\n\nconst FIELD_IDX_BLOCK_HEIGHT: u8 = 1;\nconst ARG_KNOWN_BLOCK_HEIGHT: &str = \"known_block_height\";\n\n#[ignore]\n#[test]\nfn should_run_get_block_height() {\n    let block_height: u64 = 1;\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKINFO,\n        runtime_args! {\n            ARG_FIELD_IDX => FIELD_IDX_BLOCK_HEIGHT,\n            ARG_KNOWN_BLOCK_HEIGHT => block_height\n        },\n    )\n    .with_block_height(block_height)\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n}\n\nconst FIELD_IDX_PARENT_BLOCK_HASH: u8 = 2;\nconst ARG_KNOWN_BLOCK_PARENT_HASH: &str = \"known_block_parent_hash\";\n\n#[ignore]\n#[test]\nfn should_run_get_block_parent_hash() {\n    let block_hash = BlockHash::default();\n    let digest = block_hash.inner();\n    let digest_bytes = digest.to_bytes().expect(\"should serialize\");\n    let bytes = casper_types::bytesrepr::Bytes::from(digest_bytes);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKINFO,\n        runtime_args! {\n            ARG_FIELD_IDX => FIELD_IDX_PARENT_BLOCK_HASH,\n            ARG_KNOWN_BLOCK_PARENT_HASH => bytes\n        },\n    )\n    .with_parent_block_hash(block_hash)\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n}\n\nconst FIELD_IDX_STATE_HASH: u8 = 3;\nconst ARG_KNOWN_STATE_HASH: &str = \"known_state_hash\";\n\n#[ignore]\n#[test]\nfn should_run_get_state_hash() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let state_hash = builder.get_post_state_hash();\n    let digest_bytes = state_hash.to_bytes().expect(\"should serialize\");\n    let bytes = casper_types::bytesrepr::Bytes::from(digest_bytes);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKINFO,\n        runtime_args! {\n            ARG_FIELD_IDX => FIELD_IDX_STATE_HASH,\n            ARG_KNOWN_STATE_HASH => bytes\n        },\n    )\n    .with_state_hash(state_hash)\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\nconst FIELD_IDX_PROTOCOL_VERSION: u8 = 4;\nconst ARG_KNOWN_PROTOCOL_VERSION: &str = \"known_protocol_version\";\n\n#[ignore]\n#[test]\nfn should_run_get_protocol_version() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = ProtocolVersion::V2_0_0;\n    let protocol_version_bytes = protocol_version.to_bytes().expect(\"should_serialize\");\n    let bytes = casper_types::bytesrepr::Bytes::from(protocol_version_bytes);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKINFO,\n        runtime_args! {\n            ARG_FIELD_IDX => FIELD_IDX_PROTOCOL_VERSION,\n            ARG_KNOWN_PROTOCOL_VERSION => bytes\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\nconst FIELD_IDX_ADDRESSABLE_ENTITY: u8 = 5;\nconst ARG_KNOWN_ADDRESSABLE_ENTITY: &str = \"known_addressable_entity\";\n\n#[ignore]\n#[test]\nfn should_run_get_addressable_entity() {\n    let addressable_entity: bool = false;\n    let addressable_entity_bytes = addressable_entity.to_bytes().expect(\"should_serialize\");\n    let bytes = casper_types::bytesrepr::Bytes::from(addressable_entity_bytes);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKINFO,\n        runtime_args! {\n            ARG_FIELD_IDX => FIELD_IDX_ADDRESSABLE_ENTITY,\n            ARG_KNOWN_ADDRESSABLE_ENTITY => bytes\n        },\n    )\n    .build();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/get_blocktime.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::runtime_args;\n\nconst CONTRACT_GET_BLOCKTIME: &str = \"get_blocktime.wasm\";\nconst ARG_KNOWN_BLOCK_TIME: &str = \"known_block_time\";\n\n#[ignore]\n#[test]\nfn should_run_get_blocktime_contract() {\n    let block_time: u64 = 42;\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_BLOCKTIME,\n        runtime_args! { ARG_KNOWN_BLOCK_TIME => block_time },\n    )\n    .with_block_time(block_time)\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/get_call_stack.rs",
    "content": "use num_traits::One;\n\nuse casper_engine_test_support::{\n    ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error as CoreError, execution::ExecError};\nuse casper_types::{\n    account::{Account, AccountHash},\n    contracts::{ContractHash, ContractPackageHash},\n    runtime_args,\n    system::{Caller, CallerInfo},\n    CLValue, EntityAddr, EntryPointType, HashAddr, Key, PackageHash, StoredValue, U512,\n};\n\nuse get_call_stack_recursive_subcall::{\n    Call, ContractAddress, ARG_CALLS, ARG_CURRENT_DEPTH, METHOD_FORWARDER_CONTRACT_NAME,\n    METHOD_FORWARDER_SESSION_NAME,\n};\n\nconst CONTRACT_RECURSIVE_SUBCALL: &str = \"get_call_stack_recursive_subcall.wasm\";\nconst CONTRACT_CALL_RECURSIVE_SUBCALL: &str = \"get_call_stack_call_recursive_subcall.wasm\";\n\nconst CONTRACT_PACKAGE_NAME: &str = \"forwarder\";\nconst CONTRACT_NAME: &str = \"our_contract_name\";\n\nconst CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT: &str = METHOD_FORWARDER_CONTRACT_NAME;\nconst CONTRACT_FORWARDER_ENTRYPOINT_SESSION: &str = METHOD_FORWARDER_SESSION_NAME;\n\nfn stored_session(contract_hash: ContractHash) -> Call {\n    Call {\n        contract_address: ContractAddress::ContractHash(contract_hash),\n        target_method: CONTRACT_FORWARDER_ENTRYPOINT_SESSION.to_string(),\n        entry_point_type: EntryPointType::Caller,\n    }\n}\n\nfn stored_versioned_session(contract_package_hash: ContractPackageHash) -> Call {\n    Call {\n        contract_address: ContractAddress::ContractPackageHash(contract_package_hash),\n        target_method: CONTRACT_FORWARDER_ENTRYPOINT_SESSION.to_string(),\n        entry_point_type: EntryPointType::Caller,\n    }\n}\n\nfn stored_contract(contract_hash: ContractHash) -> Call {\n    Call {\n        contract_address: ContractAddress::ContractHash(contract_hash),\n        target_method: CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT.to_string(),\n        entry_point_type: EntryPointType::Called,\n    }\n}\n\nfn stored_versioned_contract(contract_package_hash: ContractPackageHash) -> Call {\n    Call {\n        contract_address: ContractAddress::ContractPackageHash(contract_package_hash),\n        target_method: CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT.to_string(),\n        entry_point_type: EntryPointType::Called,\n    }\n}\n\nfn store_contract(builder: &mut LmdbWasmTestBuilder, session_filename: &str) {\n    let store_contract_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, session_filename, runtime_args! {})\n            .build();\n    builder\n        .exec(store_contract_request)\n        .commit()\n        .expect_success();\n}\n\nfn execute_and_assert_result(\n    call_depth: usize,\n    builder: &mut LmdbWasmTestBuilder,\n    execute_request: ExecuteRequest,\n    is_invalid_context: bool,\n) {\n    if call_depth == 0 {\n        builder.exec(execute_request).commit().expect_success();\n    } else if is_invalid_context {\n        builder.exec(execute_request).commit().expect_failure();\n        let error = builder.get_error().expect(\"must have an error\");\n        assert!(matches!(\n            error,\n            // Call chains have stored contract trying to call stored session which we don't\n            // support and is an actual error.\n            CoreError::Exec(ExecError::InvalidContext)\n        ));\n    }\n}\n\n// Constant from the contracts used in the tests below.\nconst LARGE_AMOUNT: u64 = 1_500_000_000_000;\n\n// In the payment or session phase, this test will try to transfer `len + 1` times\n// a fixed amount of `1_500_000_000_000` from the main purse of the account.\n// We need to provide an explicit approval via passing that as an `amount` argument.\npub fn approved_amount(idx: usize) -> U512 {\n    U512::from(LARGE_AMOUNT * (idx + 1) as u64)\n}\n\ntrait AccountExt {\n    fn get_hash(&self, key: &str) -> HashAddr;\n}\n\nimpl AccountExt for Account {\n    fn get_hash(&self, key: &str) -> HashAddr {\n        self.named_keys()\n            .get(key)\n            .cloned()\n            .and_then(Key::into_hash_addr)\n            .unwrap()\n    }\n}\n\ntrait BuilderExt {\n    fn get_call_stack_from_session_context(&mut self, stored_call_stack_key: &str) -> Vec<Caller>;\n\n    fn get_call_stack_from_contract_context(\n        &mut self,\n        stored_call_stack_key: &str,\n        contract_package_hash: HashAddr,\n    ) -> Vec<Caller>;\n}\n\nimpl BuilderExt for LmdbWasmTestBuilder {\n    fn get_call_stack_from_session_context(&mut self, stored_call_stack_key: &str) -> Vec<Caller> {\n        let cl_value = self\n            .query(\n                None,\n                (*DEFAULT_ACCOUNT_ADDR).into(),\n                &[stored_call_stack_key.to_string()],\n            )\n            .unwrap();\n\n        let caller_info = cl_value\n            .into_cl_value()\n            .map(CLValue::into_t::<Vec<CallerInfo>>)\n            .unwrap()\n            .unwrap();\n\n        let mut callers = vec![];\n\n        for info in caller_info {\n            let kind = info.kind();\n            match kind {\n                0 => {\n                    let account_hash = info\n                        .get_field_by_index(0)\n                        .map(|val| {\n                            val.to_t::<Option<AccountHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 0 in fields\")\n                        .expect(\"account hash must be some\");\n                    callers.push(Caller::Initiator { account_hash });\n                }\n                3 => {\n                    let package_hash = info\n                        .get_field_by_index(1)\n                        .map(|val| {\n                            val.to_t::<Option<PackageHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 1 in fields\")\n                        .expect(\"package hash must be some\");\n                    let entity_addr = info\n                        .get_field_by_index(3)\n                        .map(|val| {\n                            val.to_t::<Option<EntityAddr>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 3 in fields\")\n                        .expect(\"entity addr must be some\");\n                    callers.push(Caller::Entity {\n                        package_hash,\n                        entity_addr,\n                    });\n                }\n                4 => {\n                    let contract_package_hash = info\n                        .get_field_by_index(2)\n                        .map(|val| {\n                            val.to_t::<Option<ContractPackageHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 2 in fields\")\n                        .expect(\"contract package hash must be some\");\n                    let contract_hash = info\n                        .get_field_by_index(4)\n                        .map(|val| {\n                            val.to_t::<Option<ContractHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 4 in fields\")\n                        .expect(\"contract hash must be some\");\n                    callers.push(Caller::SmartContract {\n                        contract_package_hash,\n                        contract_hash,\n                    });\n                }\n                _ => panic!(\"unhandled kind\"),\n            }\n        }\n\n        callers\n    }\n\n    fn get_call_stack_from_contract_context(\n        &mut self,\n        stored_call_stack_key: &str,\n        contract_package_hash: HashAddr,\n    ) -> Vec<Caller> {\n        let value = self\n            .query(None, Key::Hash(contract_package_hash), &[])\n            .unwrap();\n\n        let contract_package = match value {\n            StoredValue::ContractPackage(package) => package,\n            _ => panic!(\"unreachable\"),\n        };\n\n        let current_contract_hash = contract_package.current_contract_hash().unwrap();\n\n        let cl_value = self\n            .query(\n                None,\n                current_contract_hash.into(),\n                &[stored_call_stack_key.to_string()],\n            )\n            .unwrap();\n\n        let stack_elements = cl_value\n            .into_cl_value()\n            .map(CLValue::into_t::<Vec<CallerInfo>>)\n            .unwrap()\n            .unwrap();\n\n        let mut callers = vec![];\n\n        for info in stack_elements {\n            let kind = info.kind();\n            match kind {\n                0 => {\n                    let account_hash = info\n                        .get_field_by_index(0)\n                        .map(|val| {\n                            val.to_t::<Option<AccountHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 0 in fields\")\n                        .expect(\"account hash must be some\");\n                    callers.push(Caller::Initiator { account_hash });\n                }\n                3 => {\n                    let package_hash = info\n                        .get_field_by_index(1)\n                        .map(|val| {\n                            val.to_t::<Option<PackageHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 1 in fields\")\n                        .expect(\"package hash must be some\");\n                    let entity_addr = info\n                        .get_field_by_index(3)\n                        .map(|val| {\n                            val.to_t::<Option<EntityAddr>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 3 in fields\")\n                        .expect(\"entity addr must be some\");\n                    callers.push(Caller::Entity {\n                        package_hash,\n                        entity_addr,\n                    });\n                }\n                4 => {\n                    let contract_package_hash = info\n                        .get_field_by_index(2)\n                        .map(|val| {\n                            val.to_t::<Option<ContractPackageHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 2 in fields\")\n                        .expect(\"contract package hash must be some\");\n                    let contract_hash = info\n                        .get_field_by_index(4)\n                        .map(|val| {\n                            val.to_t::<Option<ContractHash>>()\n                                .expect(\"must convert out of cl_value\")\n                        })\n                        .expect(\"must have index 4 in fields\")\n                        .expect(\"contract hash must be some\");\n                    callers.push(Caller::SmartContract {\n                        contract_package_hash,\n                        contract_hash,\n                    });\n                }\n                _ => panic!(\"unhandled kind\"),\n            }\n        }\n\n        callers\n    }\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    store_contract(&mut builder, CONTRACT_RECURSIVE_SUBCALL);\n    builder\n}\n\nfn assert_each_context_has_correct_call_stack_info(\n    builder: &mut LmdbWasmTestBuilder,\n    top_level_call: Call,\n    mut subcalls: Vec<Call>,\n    current_contract_package_hash: HashAddr,\n) {\n    let mut calls = vec![top_level_call];\n    calls.append(&mut subcalls);\n\n    // query for and verify that all the elements in the call stack match their\n    // pre-defined Call element\n    for (i, call) in calls.iter().enumerate() {\n        let stored_call_stack_key = format!(\"call_stack-{}\", i);\n        // we need to know where to look for the call stack information\n        let call_stack = match call.entry_point_type {\n            EntryPointType::Called | EntryPointType::Factory => builder\n                .get_call_stack_from_contract_context(\n                    &stored_call_stack_key,\n                    current_contract_package_hash,\n                ),\n            EntryPointType::Caller => {\n                builder.get_call_stack_from_session_context(&stored_call_stack_key)\n            }\n        };\n        assert_eq!(\n            call_stack.len(),\n            i + 2,\n            \"call stack len was an unexpected size {}, should be {} {:#?}\",\n            call_stack.len(),\n            i + 2,\n            call_stack,\n        );\n        let (head, rest) = call_stack.split_at(usize::one());\n\n        assert_eq!(\n            head,\n            [Caller::Initiator {\n                account_hash: *DEFAULT_ACCOUNT_ADDR,\n            }],\n        );\n        assert_call_stack_matches_calls(rest.to_vec(), &calls);\n    }\n}\n\nfn assert_invalid_context(builder: &mut LmdbWasmTestBuilder, depth: usize) {\n    if depth == 0 {\n        builder.expect_success();\n    } else {\n        let error = builder.get_error().unwrap();\n        assert!(matches!(\n            error,\n            casper_execution_engine::engine_state::Error::Exec(ExecError::InvalidContext)\n        ));\n    }\n}\n\nfn assert_each_context_has_correct_call_stack_info_module_bytes(\n    builder: &mut LmdbWasmTestBuilder,\n    subcalls: Vec<Call>,\n    current_contract_package_hash: HashAddr,\n) {\n    let stored_call_stack_key = format!(\"call_stack-{}\", 0);\n    let call_stack = builder.get_call_stack_from_session_context(&stored_call_stack_key);\n    let (head, _) = call_stack.split_at(usize::one());\n    assert_eq!(\n        head,\n        [Caller::Initiator {\n            account_hash: *DEFAULT_ACCOUNT_ADDR,\n        }],\n    );\n\n    for (i, call) in (1..=subcalls.len()).zip(subcalls.iter()) {\n        let stored_call_stack_key = format!(\"call_stack-{}\", i);\n        // we need to know where to look for the call stack information\n        let call_stack = match call.entry_point_type {\n            EntryPointType::Called | EntryPointType::Factory => builder\n                .get_call_stack_from_contract_context(\n                    &stored_call_stack_key,\n                    current_contract_package_hash,\n                ),\n            EntryPointType::Caller => {\n                builder.get_call_stack_from_session_context(&stored_call_stack_key)\n            }\n        };\n        let (head, rest) = call_stack.split_at(usize::one());\n        assert_eq!(\n            head,\n            [Caller::Initiator {\n                account_hash: *DEFAULT_ACCOUNT_ADDR,\n            }],\n        );\n        assert_call_stack_matches_calls(rest.to_vec(), &subcalls);\n    }\n}\n\nfn assert_call_stack_matches_calls(call_stack: Vec<Caller>, calls: &[Call]) {\n    for (index, expected_call_stack_element) in call_stack.iter().enumerate() {\n        let maybe_call = calls.get(index);\n        match (maybe_call, expected_call_stack_element) {\n            // Versioned Call with EntryPointType::Contract\n            (\n                Some(Call {\n                    entry_point_type,\n                    contract_address:\n                        ContractAddress::ContractPackageHash(current_contract_package_hash),\n                    ..\n                }),\n                Caller::SmartContract {\n                    contract_package_hash,\n                    ..\n                },\n            ) if *entry_point_type == EntryPointType::Called\n                && contract_package_hash.value() == current_contract_package_hash.value() => {}\n\n            // Unversioned Call with EntryPointType::Called\n            (\n                Some(Call {\n                    entry_point_type,\n                    contract_address: ContractAddress::ContractHash(current_contract_hash),\n                    ..\n                }),\n                Caller::SmartContract { contract_hash, .. },\n            ) if *entry_point_type == EntryPointType::Called\n                && contract_hash.value() == current_contract_hash.value() => {}\n\n            // Versioned Call with EntryPointType::Session\n            (\n                Some(Call {\n                    entry_point_type,\n                    contract_address:\n                        ContractAddress::ContractPackageHash(current_contract_package_hash),\n                    ..\n                }),\n                Caller::SmartContract {\n                    contract_package_hash,\n                    ..\n                },\n            ) if *entry_point_type == EntryPointType::Caller\n                && *contract_package_hash == *current_contract_package_hash => {}\n\n            // Unversioned Call with EntryPointType::Session\n            (\n                Some(Call {\n                    entry_point_type,\n                    contract_address: ContractAddress::ContractHash(current_contract_hash),\n                    ..\n                }),\n                Caller::SmartContract { contract_hash, .. },\n            ) if *entry_point_type == EntryPointType::Caller\n                && contract_hash.value() == current_contract_hash.value() => {}\n\n            _ => panic!(\n                \"call stack element {:#?} didn't match expected call {:#?} at index {}, {:#?}\",\n                expected_call_stack_element, maybe_call, index, call_stack,\n            ),\n        }\n    }\n}\n\nmod session {\n\n    use casper_engine_test_support::{ExecuteRequestBuilder, DEFAULT_ACCOUNT_ADDR};\n    use casper_types::{execution::TransformKindV2, runtime_args, system::mint, Key};\n\n    use super::{\n        approved_amount, AccountExt, ARG_CALLS, ARG_CURRENT_DEPTH, CONTRACT_CALL_RECURSIVE_SUBCALL,\n        CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n        CONTRACT_NAME, CONTRACT_PACKAGE_NAME,\n    };\n\n    // DEPTHS should not contain 1, as it will eliminate the initial element from the subcalls\n    // vector\n    const DEPTHS: &[usize] = &[0, 2, 5, 10];\n\n    // Session + recursive subcall\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_contract_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_contract(current_contract_hash.into()));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_contract_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_contract(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_session_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            println!(\"{:?}\", default_account);\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_contract(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_session_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_contract(current_contract_hash.into()));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_session_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_session(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_contract(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_session_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_session(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_contract(current_contract_hash.into()));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_session_to_stored_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_session_to_stored_versioned_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_session(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info_module_bytes(\n                &mut builder,\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    // Session + recursive subcall failure cases\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_contract_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_versioned_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_contract_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn session_bytes_to_stored_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()));\n            }\n\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_CALL_RECURSIVE_SUBCALL,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    // Stored contract + recursive subcall\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_name_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_contract(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_hash_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            let effects = builder.get_effects().last().unwrap().clone();\n\n            let key = if builder.chainspec().core_config.enable_addressable_entity {\n                Key::SmartContract(current_contract_package_hash)\n            } else {\n                Key::Hash(current_contract_package_hash)\n            };\n\n            assert!(\n                effects\n                    .transforms()\n                    .iter()\n                    .any(|transform| transform.key() == &key\n                        && transform.kind() == &TransformKindV2::Identity),\n                \"Missing `Identity` transform for a contract package being called.\"\n            );\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_contract(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_name_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_contract(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_hash_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_contract(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_name_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_contract(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_hash_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            let effects = builder.get_effects().last().unwrap().clone();\n\n            assert!(\n                effects.transforms().iter().any(|transform| transform.key()\n                    == &Key::Hash(current_contract_hash)\n                    && transform.kind() == &TransformKindV2::Identity),\n                \"Missing `Identity` transform for a contract being called.\"\n            );\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_contract(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_name_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_contract(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_hash_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_contract(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    // Stored contract + recursive subcall failure cases\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_name_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_hash_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_name_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_hash_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_name_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_hash_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_name_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_hash_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail(\n    ) {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_hash_to_stored_versioned_contract_to_stored_session_should_fail(\n    ) {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_name_to_stored_contract_to_stored_versioned_session_should_fail(\n    ) {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_contract_by_hash_to_stored_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail(\n    ) {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_name_to_stored_contract_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_contract_by_hash_to_stored_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    // Stored session + recursive subcall\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_name_to_stored_versioned_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_hash_to_stored_versioned_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_name_to_stored_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_hash_to_stored_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_name_to_stored_versioned_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_versioned_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_session(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_name_to_stored_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_session() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_name_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_hash_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_name_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_hash_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_versioned_session(current_contract_package_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_name_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_versioned_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_name_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_contract() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len];\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit().expect_success();\n\n            super::assert_each_context_has_correct_call_stack_info(\n                &mut builder,\n                super::stored_session(current_contract_hash.into()),\n                subcalls,\n                current_contract_package_hash,\n            );\n        }\n    }\n\n    // Stored session + recursive subcall failure cases\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail(\n    ) {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_hash_to_stored_versioned_contract_to_stored_session_should_fail()\n    {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_name_to_stored_contract_to_stored_versioned_session_should_fail()\n    {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_PACKAGE_NAME,\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_session_by_hash_to_stored_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_package_hash.into(),\n                None,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail()\n    {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    len.saturating_sub(1)\n                ];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_name_to_stored_contract_to_stored_versioned_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_NAME,\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_contract_to_stored_session_should_fail() {\n        for len in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)];\n            if *len > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                current_contract_hash.into(),\n                CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                runtime_args! {\n                    ARG_CALLS => subcalls.clone(),\n                    ARG_CURRENT_DEPTH => 0u8,\n                    mint::ARG_AMOUNT => approved_amount(*len),\n                },\n            )\n            .build();\n\n            builder.exec(execute_request).commit();\n\n            super::assert_invalid_context(&mut builder, *len);\n        }\n    }\n}\n\nmod payment {\n    use std::iter;\n\n    use rand::Rng;\n\n    use casper_engine_test_support::{\n        DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    };\n    use casper_types::{runtime_args, system::mint, HashAddr, RuntimeArgs};\n    use get_call_stack_recursive_subcall::Call;\n\n    use crate::wasm_utils;\n\n    use super::{\n        approved_amount, AccountExt, ARG_CALLS, ARG_CURRENT_DEPTH, CONTRACT_CALL_RECURSIVE_SUBCALL,\n        CONTRACT_FORWARDER_ENTRYPOINT_SESSION, CONTRACT_NAME, CONTRACT_PACKAGE_NAME,\n    };\n\n    // DEPTHS should not contain 1, as it will eliminate the initial element from the subcalls\n    // vector.  Going further than 6 will hit the gas limit.\n    const DEPTHS: &[usize] = &[0, 6, 10];\n\n    fn execute(\n        builder: &mut LmdbWasmTestBuilder,\n        call_depth: usize,\n        subcalls: Vec<Call>,\n        is_invalid_context: bool,\n    ) {\n        let execute_request = {\n            let mut rng = rand::thread_rng();\n            let deploy_hash = rng.gen();\n            let sender = *DEFAULT_ACCOUNT_ADDR;\n            let args = runtime_args! {\n                ARG_CALLS => subcalls,\n                ARG_CURRENT_DEPTH => 0u8,\n                mint::ARG_AMOUNT => approved_amount(call_depth),\n            };\n            let deploy = DeployItemBuilder::new()\n                .with_address(sender)\n                .with_payment_code(CONTRACT_CALL_RECURSIVE_SUBCALL, args)\n                .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n                .with_authorization_keys(&[sender])\n                .with_deploy_hash(deploy_hash)\n                .build();\n            ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n        };\n\n        super::execute_and_assert_result(call_depth, builder, execute_request, is_invalid_context);\n    }\n\n    fn execute_stored_payment_by_package_name(\n        builder: &mut LmdbWasmTestBuilder,\n        call_depth: usize,\n        subcalls: Vec<Call>,\n    ) {\n        let execute_request = {\n            let mut rng = rand::thread_rng();\n            let deploy_hash = rng.gen();\n\n            let sender = *DEFAULT_ACCOUNT_ADDR;\n\n            let args = runtime_args! {\n                ARG_CALLS => subcalls,\n                ARG_CURRENT_DEPTH => 0u8,\n                mint::ARG_AMOUNT => approved_amount(call_depth),\n            };\n\n            let deploy = DeployItemBuilder::new()\n                .with_address(sender)\n                .with_stored_versioned_payment_contract_by_name(\n                    CONTRACT_PACKAGE_NAME,\n                    None,\n                    CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                    args,\n                )\n                .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n                .with_authorization_keys(&[sender])\n                .with_deploy_hash(deploy_hash)\n                .build();\n\n            ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n        };\n\n        super::execute_and_assert_result(call_depth, builder, execute_request, false);\n    }\n\n    fn execute_stored_payment_by_package_hash(\n        builder: &mut LmdbWasmTestBuilder,\n        call_depth: usize,\n        subcalls: Vec<Call>,\n        current_contract_package_hash: HashAddr,\n    ) {\n        let execute_request = {\n            let mut rng = rand::thread_rng();\n            let deploy_hash = rng.gen();\n            let sender = *DEFAULT_ACCOUNT_ADDR;\n            let args = runtime_args! {\n                ARG_CALLS => subcalls,\n                ARG_CURRENT_DEPTH => 0u8,\n                mint::ARG_AMOUNT => approved_amount(call_depth),\n            };\n            let deploy = DeployItemBuilder::new()\n                .with_address(sender)\n                .with_stored_versioned_payment_contract_by_hash(\n                    current_contract_package_hash,\n                    None,\n                    CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                    args,\n                )\n                .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n                .with_authorization_keys(&[sender])\n                .with_deploy_hash(deploy_hash)\n                .build();\n            ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n        };\n\n        super::execute_and_assert_result(call_depth, builder, execute_request, false);\n    }\n\n    fn execute_stored_payment_by_contract_name(\n        builder: &mut LmdbWasmTestBuilder,\n        call_depth: usize,\n        subcalls: Vec<Call>,\n    ) {\n        let execute_request = {\n            let mut rng = rand::thread_rng();\n            let deploy_hash = rng.gen();\n\n            let sender = *DEFAULT_ACCOUNT_ADDR;\n\n            let args = runtime_args! {\n                ARG_CALLS => subcalls,\n                ARG_CURRENT_DEPTH => 0u8,\n                mint::ARG_AMOUNT => approved_amount(call_depth),\n            };\n\n            let deploy = DeployItemBuilder::new()\n                .with_address(sender)\n                .with_stored_payment_named_key(\n                    CONTRACT_NAME,\n                    CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                    args,\n                )\n                .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n                .with_authorization_keys(&[sender])\n                .with_deploy_hash(deploy_hash)\n                .build();\n\n            ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n        };\n\n        super::execute_and_assert_result(call_depth, builder, execute_request, false);\n    }\n\n    fn execute_stored_payment_by_contract_hash(\n        builder: &mut LmdbWasmTestBuilder,\n        call_depth: usize,\n        subcalls: Vec<Call>,\n        current_contract_hash: HashAddr,\n    ) {\n        let execute_request = {\n            let mut rng = rand::thread_rng();\n            let deploy_hash = rng.gen();\n            let sender = *DEFAULT_ACCOUNT_ADDR;\n            let args = runtime_args! {\n                ARG_CALLS => subcalls,\n                ARG_CURRENT_DEPTH => 0u8,\n                mint::ARG_AMOUNT => approved_amount(call_depth),\n            };\n            let deploy = DeployItemBuilder::new()\n                .with_address(sender)\n                .with_stored_payment_hash(\n                    current_contract_hash.into(),\n                    CONTRACT_FORWARDER_ENTRYPOINT_SESSION,\n                    args,\n                )\n                .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n                .with_authorization_keys(&[sender])\n                .with_deploy_hash(deploy_hash)\n                .build();\n            ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n        };\n\n        super::execute_and_assert_result(call_depth, builder, execute_request, false);\n    }\n\n    // Session + recursive subcall\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_versioned_session_to_stored_versioned_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_contract(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, false);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_versioned_session_to_stored_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_contract(current_contract_hash.into()));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, false);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_session_to_stored_versioned_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_session(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_contract(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, false)\n        }\n    }\n\n    // Payment logic is tethered to a low gas amount. It is not forbidden to attempt to do calls\n    // however they are expensive and if you exceed the gas limit it should fail with a\n    // GasLimit error.\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_contract_to_stored_session() {\n        let call_depth = 5usize;\n        let mut builder = super::setup();\n        let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n        let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n        let subcalls = vec![\n            super::stored_contract(current_contract_hash.into()),\n            super::stored_session(current_contract_hash.into()),\n        ];\n        execute(&mut builder, call_depth, subcalls, true)\n    }\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_session_to_stored_contract_() {\n        let call_depth = 5usize;\n        let mut builder = super::setup();\n        let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n        let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n        let subcalls = iter::repeat_with(|| {\n            [\n                super::stored_session(current_contract_hash.into()),\n                super::stored_contract(current_contract_hash.into()),\n            ]\n        })\n        .take(call_depth)\n        .flatten();\n        execute(&mut builder, call_depth, subcalls.collect(), false)\n    }\n\n    // Session + recursive subcall failure cases\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_versioned_contract_to_stored_versioned_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, true)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_versioned_contract_to_stored_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, true)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_contract_to_stored_versioned_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_contract(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, true)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn payment_bytes_to_stored_contract_to_stored_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_contract(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()));\n            }\n\n            execute(&mut builder, *call_depth, subcalls, true)\n        }\n    }\n\n    // Stored session + recursive subcall\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_name_to_stored_versioned_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls);\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_hash_to_stored_versioned_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_package_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_package_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_name_to_stored_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_hash_to_stored_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_package_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_package_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_versioned_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_hash_to_stored_versioned_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_session(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_contract_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_hash_to_stored_session() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_contract_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_name_to_stored_versioned_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_hash_to_stored_versioned_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_package_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_package_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_name_to_stored_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_hash_to_stored_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_package_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_package_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_versioned_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_hash_to_stored_versioned_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    *call_depth\n                ];\n\n            execute_stored_payment_by_contract_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_hash_to_stored_contract() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            println!(\"DA {:?}\", default_account);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth];\n\n            execute_stored_payment_by_contract_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_hash,\n            )\n        }\n    }\n\n    // Stored session + recursive subcall failure cases\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail(\n    ) {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_hash_to_stored_versioned_contract_to_stored_session_should_fail()\n    {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            execute_stored_payment_by_package_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_package_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_name_to_stored_contract_to_stored_versioned_session_should_fail()\n    {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_contract(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_versioned_payment_by_hash_to_stored_contract_to_stored_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_contract(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            execute_stored_payment_by_package_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_package_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail()\n    {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_session_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls =\n                vec![\n                    super::stored_versioned_contract(current_contract_package_hash.into());\n                    call_depth.saturating_sub(1)\n                ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            execute_stored_payment_by_contract_hash(\n                &mut builder,\n                *call_depth,\n                subcalls,\n                current_contract_hash,\n            )\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_contract_to_stored_versioned_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME);\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_contract(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_versioned_session(\n                    current_contract_package_hash.into(),\n                ))\n            }\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn stored_payment_by_name_to_stored_contract_to_stored_session_should_fail() {\n        for call_depth in DEPTHS {\n            let mut builder = super::setup();\n            let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap();\n            let current_contract_hash = default_account.get_hash(CONTRACT_NAME);\n\n            let mut subcalls = vec![\n                super::stored_contract(current_contract_hash.into());\n                call_depth.saturating_sub(1)\n            ];\n            if *call_depth > 0 {\n                subcalls.push(super::stored_session(current_contract_hash.into()))\n            }\n\n            execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls)\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/get_caller.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    account::AccountHash,\n    contracts::{ContractHash, ContractPackageHash},\n    runtime_args,\n    system::{Caller, CallerInfo},\n    CLValue, EntityAddr,\n};\n\nconst CONTRACT_GET_CALLER: &str = \"get_caller.wasm\";\nconst CONTRACT_GET_CALLER_SUBCALL: &str = \"get_caller_subcall.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst LOAD_CALLER_INFORMATION: &str = \"load_caller_info.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst LOAD_CALLER_INFO_HASH: &str = \"load_caller_info_contract_hash\";\nconst LOAD_CALLER_INFO_PACKAGE_HASH: &str = \"load_caller_info_package\";\n\n#[ignore]\n#[test]\nfn should_run_get_caller_contract() {\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_GET_CALLER,\n                runtime_args! {\"account\" => *DEFAULT_ACCOUNT_ADDR},\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_get_caller_contract_other_account() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n                runtime_args! {\"target\" => ACCOUNT_1_ADDR, \"amount\"=> *DEFAULT_PAYMENT},\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                ACCOUNT_1_ADDR,\n                CONTRACT_GET_CALLER,\n                runtime_args! {\"account\" => ACCOUNT_1_ADDR},\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_get_caller_subcall_contract() {\n    {\n        let mut builder = LmdbWasmTestBuilder::default();\n        builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n        builder\n            .exec(\n                ExecuteRequestBuilder::standard(\n                    *DEFAULT_ACCOUNT_ADDR,\n                    CONTRACT_GET_CALLER_SUBCALL,\n                    runtime_args! {\"account\" => *DEFAULT_ACCOUNT_ADDR},\n                )\n                .build(),\n            )\n            .expect_success()\n            .commit();\n    }\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n                runtime_args! {\"target\" => ACCOUNT_1_ADDR, \"amount\"=>*DEFAULT_PAYMENT},\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                ACCOUNT_1_ADDR,\n                CONTRACT_GET_CALLER_SUBCALL,\n                runtime_args! {\"account\" => ACCOUNT_1_ADDR},\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_load_caller_information_based_on_action() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                LOAD_CALLER_INFORMATION,\n                runtime_args! {},\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n\n    let caller_info_entity_hash = builder\n        .get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .get(LOAD_CALLER_INFO_HASH)\n        .expect(\"must have caller info entity key\")\n        .into_entity_hash()\n        .expect(\"must get addressable entity hash\");\n\n    let initiator_call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        caller_info_entity_hash,\n        \"initiator\",\n        runtime_args! {},\n    )\n    .build();\n\n    builder\n        .exec(initiator_call_request)\n        .expect_success()\n        .commit();\n\n    let immediate_call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        caller_info_entity_hash,\n        \"get_immediate_caller\",\n        runtime_args! {},\n    )\n    .build();\n\n    builder\n        .exec(immediate_call_request)\n        .expect_success()\n        .commit();\n\n    let initiator_call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        caller_info_entity_hash,\n        \"get_full_stack\",\n        runtime_args! {},\n    )\n    .build();\n\n    builder\n        .exec(initiator_call_request)\n        .expect_success()\n        .commit();\n\n    let info_named_keys =\n        builder.get_named_keys(EntityAddr::SmartContract(caller_info_entity_hash.value()));\n\n    let initiator = *info_named_keys\n        .get(\"initiator\")\n        .expect(\"must have key entry for initiator\");\n\n    let initiator_account_hash = builder\n        .query(None, initiator, &[])\n        .expect(\"must have stored value\")\n        .as_cl_value()\n        .map(|cl_val| CLValue::into_t(cl_val.clone()))\n        .expect(\"must have cl value\")\n        .expect(\"must get account hash\");\n\n    assert_eq!(*DEFAULT_ACCOUNT_ADDR, initiator_account_hash);\n\n    let immediate = *info_named_keys\n        .get(\"immediate\")\n        .expect(\"must have key entry for initiator\");\n\n    let caller: CallerInfo = builder\n        .query(None, immediate, &[])\n        .expect(\"must have stored value\")\n        .as_cl_value()\n        .map(|cl_val| CLValue::into_t(cl_val.clone()))\n        .expect(\"must have cl value\")\n        .expect(\"must get caller\");\n\n    let expected_caller = CallerInfo::try_from(Caller::initiator(*DEFAULT_ACCOUNT_ADDR))\n        .expect(\"must get caller info\");\n\n    assert_eq!(expected_caller, caller);\n\n    let full = *info_named_keys\n        .get(\"full\")\n        .expect(\"must have key entry for full call stack\");\n\n    let full_call_stack: Vec<CallerInfo> = builder\n        .query(None, full, &[])\n        .expect(\"must have stored value\")\n        .as_cl_value()\n        .map(|cl_val| CLValue::into_t(cl_val.clone()))\n        .expect(\"must have cl value\")\n        .expect(\"must get full call stack\");\n\n    let package_hash = builder\n        .get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .get(LOAD_CALLER_INFO_PACKAGE_HASH)\n        .expect(\"must get package key\")\n        .into_hash_addr()\n        .map(ContractPackageHash::new)\n        .expect(\"must get package hash\");\n\n    let frame = CallerInfo::try_from(Caller::smart_contract(\n        package_hash,\n        ContractHash::new(caller_info_entity_hash.value()),\n    ))\n    .expect(\"must get frame\");\n    let expected_stack = vec![expected_caller, frame];\n    assert_eq!(expected_stack, full_call_stack);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/get_phase.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, Phase};\n\nconst ARG_PHASE: &str = \"phase\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_run_get_phase_contract() {\n    let default_account = *DEFAULT_ACCOUNT_ADDR;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_deploy_hash([1; 32])\n        .with_session_code(\n            \"get_phase.wasm\",\n            runtime_args! { ARG_PHASE => Phase::Session },\n        )\n        .with_payment_code(\n            \"get_phase_payment.wasm\",\n            runtime_args! {\n                ARG_PHASE => Phase::Payment,\n                ARG_AMOUNT => *DEFAULT_PAYMENT\n            },\n        )\n        .with_authorization_keys(&[default_account])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/list_authorization_keys.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    account::AccountHash, addressable_entity::Weight, runtime_args,\n    system::standard_payment::ARG_AMOUNT, ApiError, PublicKey, SecretKey,\n};\nuse once_cell::sync::Lazy;\n\nconst ARG_ACCOUNT: &str = \"account\";\nconst ARG_WEIGHT: &str = \"weight\";\nconst DEFAULT_WEIGHT: Weight = Weight::new(1);\n\nconst CONTRACT_ADD_ASSOCIATED_KEY: &str = \"add_associated_key.wasm\";\n\nconst CONTRACT_LIST_AUTHORIZATION_KEYS: &str = \"list_authorization_keys.wasm\";\nconst ARG_EXPECTED_AUTHORIZATION_KEYS: &str = \"expected_authorization_keys\";\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash());\n\nstatic ACCOUNT_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([243u8; 32]).unwrap());\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY));\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash());\n\nconst USER_ERROR_ASSERTION: u16 = 0;\n\n#[ignore]\n#[test]\nfn should_list_authorization_keys() {\n    assert!(\n        test_match(\n            *DEFAULT_ACCOUNT_ADDR,\n            vec![*DEFAULT_ACCOUNT_ADDR],\n            vec![*DEFAULT_ACCOUNT_ADDR],\n        ),\n        \"one signature should match the expected authorization key\"\n    );\n    assert!(\n        !test_match(\n            *DEFAULT_ACCOUNT_ADDR,\n            vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR],\n            vec![*DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR],\n        ),\n        \"two signatures are off by one\"\n    );\n    assert!(\n        test_match(\n            *DEFAULT_ACCOUNT_ADDR,\n            vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR],\n            vec![*DEFAULT_ACCOUNT_ADDR, *ACCOUNT_2_ADDR],\n        ),\n        \"two signatures should match the expected list\"\n    );\n    assert!(\n        test_match(\n            *ACCOUNT_1_ADDR,\n            vec![*ACCOUNT_1_ADDR],\n            vec![*ACCOUNT_1_ADDR],\n        ),\n        \"one signature should match the output for non-default account\"\n    );\n\n    assert!(\n        test_match(\n            *DEFAULT_ACCOUNT_ADDR,\n            vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR],\n            vec![*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR],\n        ),\n        \"multisig matches expected list\"\n    );\n    assert!(\n        !test_match(\n            *DEFAULT_ACCOUNT_ADDR,\n            vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR],\n            vec![],\n        ),\n        \"multisig is not empty\"\n    );\n    assert!(\n        !test_match(\n            *DEFAULT_ACCOUNT_ADDR,\n            vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR],\n            vec![*ACCOUNT_2_ADDR, *ACCOUNT_1_ADDR],\n        ),\n        \"multisig does not include caller account\"\n    );\n}\n\nfn test_match(\n    caller: AccountHash,\n    signatures: Vec<AccountHash>,\n    expected_authorization_keys: Vec<AccountHash>,\n) -> bool {\n    let mut builder = setup();\n    let session_args = runtime_args! {\n        ARG_EXPECTED_AUTHORIZATION_KEYS => expected_authorization_keys\n    };\n    let deploy_hash = [42; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(caller)\n        .with_session_code(CONTRACT_LIST_AUTHORIZATION_KEYS, session_args)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT\n        })\n        .with_authorization_keys(&signatures)\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(exec_request).commit();\n\n    match builder.get_error() {\n        Some(Error::Exec(ExecError::Revert(ApiError::User(USER_ERROR_ASSERTION)))) => false,\n        Some(error) => panic!(\"Unexpected error {:?}\", error),\n        None => {\n            // Success\n            true\n        }\n    }\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for account in [*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR] {\n        let add_key_request = {\n            let session_args = runtime_args! {\n                ARG_ACCOUNT => account,\n                ARG_WEIGHT => DEFAULT_WEIGHT,\n            };\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_ADD_ASSOCIATED_KEY,\n                session_args,\n            )\n            .build()\n        };\n\n        let transfer_request =\n            TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, account).build();\n\n        builder.exec(add_key_request).expect_success().commit();\n        builder\n            .transfer_and_commit(transfer_request)\n            .expect_success();\n    }\n\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/list_named_keys.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, contracts::NamedKeys, runtime_args, Key};\n\nconst CONTRACT_LIST_NAMED_KEYS: &str = \"list_named_keys.wasm\";\nconst NEW_NAME_ACCOUNT: &str = \"Account\";\nconst NEW_NAME_HASH: &str = \"Hash\";\nconst ARG_INITIAL_NAMED_KEYS: &str = \"initial_named_args\";\nconst ARG_NEW_NAMED_KEYS: &str = \"new_named_keys\";\n\n#[ignore]\n#[test]\nfn should_list_named_keys() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let initial_named_keys: NamedKeys = NamedKeys::new();\n\n    let new_named_keys = {\n        let account_hash = AccountHash::new([1; 32]);\n        let mut named_keys = NamedKeys::new();\n        assert!(named_keys\n            .insert(NEW_NAME_ACCOUNT.to_string(), Key::Account(account_hash))\n            .is_none());\n        assert!(named_keys\n            .insert(NEW_NAME_HASH.to_string(), Key::Hash([2; 32]))\n            .is_none());\n        named_keys\n    };\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_LIST_NAMED_KEYS,\n        runtime_args! {\n            ARG_INITIAL_NAMED_KEYS => initial_named_keys,\n            ARG_NEW_NAMED_KEYS => new_named_keys,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).commit().expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/main_purse.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, runtime_args};\n\nconst CONTRACT_MAIN_PURSE: &str = \"main_purse.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_run_main_purse_contract_default_account() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let builder = builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract for default account\");\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_MAIN_PURSE,\n        runtime_args! { \"purse\" => default_account.main_purse() },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_main_purse_contract_account_1() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *DEFAULT_PAYMENT },\n    )\n    .build();\n\n    let builder = builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit();\n\n    let account_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should get account\");\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_MAIN_PURSE,\n        runtime_args! { \"purse\" => account_1.main_purse() },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/mint_purse.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n};\nuse casper_types::{runtime_args, RuntimeArgs, U512};\n\nconst CONTRACT_MINT_PURSE: &str = \"mint_purse.wasm\";\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\n#[ignore]\n#[test]\nfn should_run_mint_purse_contract() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { \"target\" => *SYSTEM_ADDR, \"amount\" => U512::from(TRANSFER_AMOUNT) },\n    )\n    .build();\n    let exec_request_2 =\n        ExecuteRequestBuilder::standard(*SYSTEM_ADDR, CONTRACT_MINT_PURSE, RuntimeArgs::default())\n            .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).commit().expect_success();\n    builder.exec(exec_request_2).commit().expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_non_system_accounts_to_mint() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_MINT_PURSE,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    assert!(LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .is_error());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/mod.rs",
    "content": "mod account;\nmod add_contract_version;\nmod auction;\nmod create_purse;\nmod dictionary;\nmod generic_hash;\nmod get_arg;\nmod get_block_info;\nmod get_blocktime;\nmod get_call_stack;\nmod get_caller;\nmod get_phase;\nmod list_authorization_keys;\nmod list_named_keys;\nmod main_purse;\nmod mint_purse;\nmod multisig_authorization;\nmod named_dictionaries;\nmod recover_secp256k1;\nmod revert;\nmod runtime;\nmod subcall;\nmod transfer;\nmod transfer_cached;\nmod verify_signature;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/multisig_authorization.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    account::AccountHash, addressable_entity::Weight, runtime_args, ApiError, RuntimeArgs,\n};\n\nconst CONTRACT_ADD_ASSOCIATED_KEY: &str = \"add_associated_key.wasm\";\nconst CONTRACT_MULTISIG_AUTHORIZATION: &str = \"multisig_authorization.wasm\";\nconst CONTRACT_KEY: &str = \"contract\";\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_ACCOUNT: &str = \"account\";\nconst ARG_WEIGHT: &str = \"weight\";\nconst DEFAULT_WEIGHT: Weight = Weight::new(1);\nconst ENTRYPOINT_A: &str = \"entrypoint_a\";\nconst ENTRYPOINT_B: &str = \"entrypoint_b\";\n\nconst ROLE_A_KEYS: [AccountHash; 3] = [\n    AccountHash::new([1; 32]),\n    AccountHash::new([2; 32]),\n    AccountHash::new([3; 32]),\n];\n\nconst ROLE_B_KEYS: [AccountHash; 3] = [\n    AccountHash::new([4; 32]),\n    AccountHash::new([5; 32]),\n    AccountHash::new([6; 32]),\n];\n\nconst USER_ERROR_PERMISSION_DENIED: u16 = 0;\n\n#[ignore]\n#[test]\nfn should_verify_multisig_authorization_key_roles() {\n    // Role A tests\n    assert!(\n        !test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_A,\n            &[*DEFAULT_ACCOUNT_ADDR,]\n        ),\n        \"entrypoint A does not work with identity key\"\n    );\n    assert!(\n        test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_A,\n            &[*DEFAULT_ACCOUNT_ADDR, ROLE_A_KEYS[0],]\n        ),\n        \"entrypoint A works with addional role A keys\"\n    );\n    assert!(\n        !test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_A,\n            &[*DEFAULT_ACCOUNT_ADDR, ROLE_B_KEYS[0],]\n        ),\n        \"entrypoint A does not allow role B key\"\n    );\n    assert!(\n        !test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_A,\n            &[\n                *DEFAULT_ACCOUNT_ADDR,\n                ROLE_B_KEYS[2],\n                ROLE_B_KEYS[1],\n                ROLE_B_KEYS[0],\n            ]\n        ),\n        \"entrypoint A does not allow role B keys\"\n    );\n    assert!(\n        test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_A,\n            &[\n                *DEFAULT_ACCOUNT_ADDR,\n                ROLE_A_KEYS[2],\n                ROLE_A_KEYS[1],\n                ROLE_A_KEYS[0],\n            ]\n        ),\n        \"entrypoint A works with all role A keys\"\n    );\n\n    // Role B tests\n    assert!(\n        !test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_B,\n            &[*DEFAULT_ACCOUNT_ADDR,]\n        ),\n        \"entrypoint B does not work with identity key\"\n    );\n    assert!(\n        test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_B,\n            &[*DEFAULT_ACCOUNT_ADDR, ROLE_B_KEYS[0],]\n        ),\n        \"entrypoint B works with addional role A keys\"\n    );\n    assert!(\n        !test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_B,\n            &[*DEFAULT_ACCOUNT_ADDR, ROLE_A_KEYS[0],]\n        ),\n        \"entrypoint B does not allow role B key\"\n    );\n    assert!(\n        !test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_B,\n            &[\n                *DEFAULT_ACCOUNT_ADDR,\n                ROLE_A_KEYS[2],\n                ROLE_A_KEYS[1],\n                ROLE_A_KEYS[0],\n            ]\n        ),\n        \"entrypoint B does not allow role B keys\"\n    );\n    assert!(\n        test_multisig_auth(\n            *DEFAULT_ACCOUNT_ADDR,\n            ENTRYPOINT_B,\n            &[\n                *DEFAULT_ACCOUNT_ADDR,\n                ROLE_B_KEYS[2],\n                ROLE_B_KEYS[1],\n                ROLE_B_KEYS[0],\n            ]\n        ),\n        \"entrypoint B works with all role B keys\"\n    );\n}\n\nfn test_multisig_auth(\n    caller: AccountHash,\n    entry_point: &str,\n    authorization_keys: &[AccountHash],\n) -> bool {\n    let mut builder = setup();\n    let session_args = runtime_args! {};\n    let payment_args = runtime_args! {\n        ARG_AMOUNT => *DEFAULT_PAYMENT\n    };\n    let deploy_hash = [42; 32];\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(caller)\n        .with_stored_session_named_key(CONTRACT_KEY, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(authorization_keys)\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(exec_request).commit();\n\n    match builder.get_error() {\n        Some(Error::Exec(ExecError::Revert(ApiError::User(USER_ERROR_PERMISSION_DENIED)))) => false,\n        Some(error) => panic!(\"Unexpected error {:?}\", error),\n        None => {\n            // Success\n            true\n        }\n    }\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for account in ROLE_A_KEYS.iter().chain(&ROLE_B_KEYS) {\n        let add_key_request = {\n            let session_args = runtime_args! {\n                ARG_ACCOUNT => *account,\n                ARG_WEIGHT => DEFAULT_WEIGHT,\n            };\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_ADD_ASSOCIATED_KEY,\n                session_args,\n            )\n            .build()\n        };\n\n        builder.exec(add_key_request).expect_success().commit();\n    }\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_MULTISIG_AUTHORIZATION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_request).expect_success().commit();\n\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/named_dictionaries.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::runtime_args;\nuse rand::{rngs::StdRng, Rng, SeedableRng};\n\n#[ignore]\n#[test]\nfn named_dictionaries_should_work_as_expected() {\n    // Types from `smart_contracts/contracts/test/named-dictionary-test/src/main.rs`.\n    type DictIndex = u8;\n    type KeySeed = u8;\n    type Value = u8;\n\n    let mut rng = StdRng::seed_from_u64(0);\n\n    let puts: Vec<(DictIndex, KeySeed, Value)> = (0..1_000)\n        .map(|_| (rng.gen_range(0..9), rng.gen_range(0..20), rng.gen()))\n        .collect();\n\n    let builder = &mut LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                \"named-dictionary-test.wasm\",\n                runtime_args! { \"puts\" => puts },\n            )\n            .build(),\n        )\n        .expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/recover_secp256k1.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    runtime_args, PublicKey, SecretKey, Signature,\n};\n\nconst RECOVER_SECP256K1_WASM: &str = \"recover_secp256k1.wasm\";\n\n#[ignore]\n#[test]\nfn should_recover_secp256k1() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_secp256k1().unwrap();\n    let public_key = PublicKey::from(&signing_key);\n\n    let (signature, recovery_id) = match signing_key {\n        SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(),\n        _ => panic!(\"PK recovery mechanism only works with Secp256k1 keys\"),\n    };\n\n    let signature = Signature::Secp256k1(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n    let recovery_id = recovery_id.to_byte();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                RECOVER_SECP256K1_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"recovery_id\" => recovery_id,\n                    \"expected\" => public_key\n                },\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_fail_recover_secp256k1_recovery_id_out_of_range() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_secp256k1().unwrap();\n    let public_key = PublicKey::from(&signing_key);\n\n    let (signature, _) = match signing_key {\n        SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(),\n        _ => panic!(\"PK recovery mechanism only works with Secp256k1 keys\"),\n    };\n\n    let signature = Signature::Secp256k1(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n    let recovery_id = 4;\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                RECOVER_SECP256K1_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"recovery_id\" => recovery_id,\n                    \"expected\" => public_key\n                },\n            )\n            .build(),\n        )\n        .expect_failure()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_fail_recover_secp256k1_pk_mismatch() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_secp256k1().unwrap();\n\n    let (signature, _) = match signing_key {\n        SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(),\n        _ => panic!(\"PK recovery mechanism only works with Secp256k1 keys\"),\n    };\n\n    let signature = Signature::Secp256k1(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n    let recovery_id = 4;\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                RECOVER_SECP256K1_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"recovery_id\" => recovery_id,\n                    \"expected\" => PublicKey::System\n                },\n            )\n            .build(),\n        )\n        .expect_failure()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/revert.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst REVERT_WASM: &str = \"revert.wasm\";\n\n#[ignore]\n#[test]\nfn should_revert() {\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REVERT_WASM, RuntimeArgs::default())\n            .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .is_error();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/runtime.rs",
    "content": "use std::collections::HashSet;\n\nuse rand::Rng;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    runtime::{cryptography, cryptography::DIGEST_LENGTH},\n    runtime_context::RANDOM_BYTES_COUNT,\n};\nuse casper_storage::address_generator::ADDRESS_LENGTH;\nuse casper_types::runtime_args;\n\nconst ARG_BYTES: &str = \"bytes\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst BLAKE2B_WASM: &str = \"blake2b.wasm\";\nconst HASH_RESULT: &str = \"hash_result\";\n\nconst RANDOM_BYTES_WASM: &str = \"random_bytes.wasm\";\nconst RANDOM_BYTES_RESULT: &str = \"random_bytes_result\";\n\nconst RANDOM_BYTES_PAYMENT_WASM: &str = \"random_bytes_payment.wasm\";\nconst RANDOM_BYTES_PAYMENT_RESULT: &str = \"random_bytes_payment_result\";\n\nfn get_value<const COUNT: usize>(builder: &LmdbWasmTestBuilder, result: &str) -> [u8; COUNT] {\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let uref = account.named_keys().get(result).expect(\"should have value\");\n\n    builder\n        .query(None, *uref, &[])\n        .expect(\"should query\")\n        .into_cl_value()\n        .expect(\"should be CLValue\")\n        .into_t()\n        .expect(\"should convert\")\n}\n\n#[ignore]\n#[test]\nfn should_return_different_random_bytes_on_different_phases() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut rng = rand::thread_rng();\n    let deploy_hash = rng.gen();\n    let address = *DEFAULT_ACCOUNT_ADDR;\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(address)\n        .with_session_code(RANDOM_BYTES_WASM, runtime_args! {})\n        .with_payment_code(\n            RANDOM_BYTES_PAYMENT_WASM,\n            runtime_args! {\n                ARG_AMOUNT => *DEFAULT_PAYMENT\n            },\n        )\n        .with_authorization_keys(&[address])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let execute_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(execute_request).commit().expect_success();\n\n    let session_generated_bytes = get_value::<RANDOM_BYTES_COUNT>(&builder, RANDOM_BYTES_RESULT);\n    let payment_generated_bytes =\n        get_value::<ADDRESS_LENGTH>(&builder, RANDOM_BYTES_PAYMENT_RESULT);\n\n    assert_ne!(session_generated_bytes, payment_generated_bytes)\n}\n\n#[ignore]\n#[test]\nfn should_return_different_random_bytes_on_each_call() {\n    const RUNS: usize = 10;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let all_addresses: HashSet<_> = (0..RUNS)\n        .map(|_| {\n            let exec_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                RANDOM_BYTES_WASM,\n                runtime_args! {},\n            )\n            .build();\n\n            builder.exec(exec_request).commit().expect_success();\n\n            get_value::<RANDOM_BYTES_COUNT>(&builder, RANDOM_BYTES_RESULT)\n        })\n        .collect();\n\n    // Assert that each address is unique.\n    assert_eq!(all_addresses.len(), RUNS)\n}\n\n#[ignore]\n#[test]\nfn should_hash() {\n    const INPUT_LENGTH: usize = 32;\n    const RUNS: usize = 100;\n\n    let mut rng = rand::thread_rng();\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for _ in 0..RUNS {\n        let input: [u8; INPUT_LENGTH] = rng.gen();\n\n        let exec_request = ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            BLAKE2B_WASM,\n            runtime_args! {\n                ARG_BYTES => input\n            },\n        )\n        .build();\n\n        builder.exec(exec_request).commit().expect_success();\n\n        let digest = get_value::<DIGEST_LENGTH>(&builder, HASH_RESULT);\n        let expected_digest = cryptography::blake2b(input);\n        assert_eq!(digest, expected_digest);\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/subcall.rs",
    "content": "use num_traits::cast::AsPrimitive;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, RuntimeArgs, StorageCosts, ENTITY_INITIAL_VERSION, U512};\n\nconst ARG_TARGET: &str = \"target_contract\";\nconst ARG_GAS_AMOUNT: &str = \"gas_amount\";\nconst ARG_METHOD_NAME: &str = \"method_name\";\n\n#[ignore]\n#[test]\nfn should_enforce_subcall_consumption() {\n    const CONTRACT_NAME: &str = \"measure_gas_subcall.wasm\";\n    const DO_NOTHING: &str = \"do-nothing\";\n    const DO_SOMETHING: &str = \"do-something\";\n    const NO_SUBCALL: &str = \"no-subcall\";\n\n    let do_nothing_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! { ARG_TARGET => DO_NOTHING },\n    )\n    .build();\n\n    let do_something_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! { ARG_TARGET => DO_SOMETHING },\n    )\n    .build();\n\n    let no_subcall_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! { ARG_TARGET => NO_SUBCALL },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(do_nothing_request).expect_success().commit();\n\n    builder.exec(do_something_request).expect_success().commit();\n\n    builder.exec(no_subcall_request).expect_success().commit();\n\n    let do_nothing_consumed = builder.exec_consumed(0);\n\n    let do_something_consumed = builder.exec_consumed(1);\n\n    let no_subcall_consumed = builder.exec_consumed(2);\n\n    assert_ne!(\n        do_nothing_consumed, do_something_consumed,\n        \"should have different consumeds\"\n    );\n\n    assert_ne!(\n        no_subcall_consumed, do_something_consumed,\n        \"should have different consumeds\"\n    );\n\n    assert!(\n        do_nothing_consumed < do_something_consumed,\n        \"should consume more to do something via subcall\"\n    );\n\n    assert!(\n        no_subcall_consumed < do_nothing_consumed,\n        \"do nothing in a subcall should consume more than no subcall\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_add_all_gas_consumed_for_subcall() {\n    const CONTRACT_NAME: &str = \"add_gas_subcall.wasm\";\n    const ADD_GAS_FROM_SESSION: &str = \"add-gas-from-session\";\n    const ADD_GAS_VIA_SUBCALL: &str = \"add-gas-via-subcall\";\n\n    let gas_to_add: U512 = U512::from(1024);\n\n    let gas_to_add_as_arg: u32 = gas_to_add.as_();\n\n    let add_zero_gas_from_session_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! {\n            ARG_GAS_AMOUNT => 0,\n            ARG_METHOD_NAME => ADD_GAS_FROM_SESSION,\n        },\n    )\n    .build();\n\n    let add_some_gas_from_session_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! {\n            ARG_GAS_AMOUNT => gas_to_add_as_arg,\n            ARG_METHOD_NAME => ADD_GAS_FROM_SESSION,\n        },\n    )\n    .build();\n\n    let add_zero_gas_via_subcall_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! {\n            ARG_GAS_AMOUNT => 0,\n            ARG_METHOD_NAME => ADD_GAS_VIA_SUBCALL,\n        },\n    )\n    .build();\n\n    let add_some_gas_via_subcall_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NAME,\n        runtime_args! {\n            ARG_GAS_AMOUNT => gas_to_add_as_arg,\n            ARG_METHOD_NAME => ADD_GAS_VIA_SUBCALL,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(add_zero_gas_from_session_request)\n        .expect_success()\n        .commit();\n    builder\n        .exec(add_some_gas_from_session_request)\n        .expect_success()\n        .commit();\n    builder\n        .exec(add_zero_gas_via_subcall_request)\n        .expect_success()\n        .commit();\n    builder\n        .exec(add_some_gas_via_subcall_request)\n        .expect_success()\n        .commit();\n\n    let add_zero_gas_from_session_consumed = builder.exec_consumed(0);\n    let add_some_gas_from_session_consumed = builder.exec_consumed(1);\n    let add_zero_gas_via_subcall_consumed = builder.exec_consumed(2);\n    let add_some_gas_via_subcall_consumed = builder.exec_consumed(3);\n\n    let expected_gas = U512::from(StorageCosts::default().gas_per_byte()) * gas_to_add;\n    assert!(\n        add_zero_gas_from_session_consumed.value() < add_zero_gas_via_subcall_consumed.value(),\n        \"subcall expected to consume more gas due to storing contract\"\n    );\n    assert!(add_some_gas_from_session_consumed.value() > expected_gas);\n    assert!(add_some_gas_via_subcall_consumed.value() > expected_gas);\n}\n\n#[ignore]\n#[test]\nfn expensive_subcall_should_consume_more() {\n    const DO_NOTHING: &str = \"do_nothing_stored.wasm\";\n    const EXPENSIVE_CALCULATION: &str = \"expensive_calculation.wasm\";\n    const DO_NOTHING_PACKAGE_HASH_KEY_NAME: &str = \"do_nothing_package_hash\";\n    const EXPENSIVE_CALCULATION_KEY: &str = \"expensive-calculation\";\n    const ENTRY_FUNCTION_NAME: &str = \"delegate\";\n\n    let store_do_nothing_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, DO_NOTHING, RuntimeArgs::default())\n            .build();\n\n    let store_calculation_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        EXPENSIVE_CALCULATION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    // store the contracts first\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(store_do_nothing_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(store_calculation_request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n\n    let expensive_calculation_contract_hash = account\n        .named_keys()\n        .get(EXPENSIVE_CALCULATION_KEY)\n        .expect(\"should get expensive_calculation contract hash\")\n        .into_entity_hash()\n        .expect(\"should get hash\");\n\n    // execute the contracts via subcalls\n\n    let call_do_nothing_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        DO_NOTHING_PACKAGE_HASH_KEY_NAME,\n        Some(ENTITY_INITIAL_VERSION),\n        ENTRY_FUNCTION_NAME,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let call_expensive_calculation_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        expensive_calculation_contract_hash,\n        \"calculate\",\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder\n        .exec(call_do_nothing_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(call_expensive_calculation_request)\n        .expect_success()\n        .commit();\n\n    let do_nothing_consumed = builder.exec_consumed(2);\n\n    let expensive_calculation_consumed = builder.exec_consumed(3);\n\n    assert!(\n        do_nothing_consumed < expensive_calculation_consumed,\n        \"calculation consumed should be higher than doing nothing consumed\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/transfer.rs",
    "content": "use assert_matches::assert_matches;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error as EngineError, execution::ExecError};\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{handle_payment, mint},\n    ApiError, PublicKey, SecretKey, U512,\n};\n\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_TRANSFER_TO_PUBLIC_KEY: &str = \"transfer_to_public_key.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_PUBLIC_KEY: &str = \"transfer_purse_to_public_key.wasm\";\nconst CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = \"transfer_to_named_purse.wasm\";\n\nstatic TRANSFER_1_AMOUNT: Lazy<U512> =\n    Lazy::new(|| U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + 1000);\nstatic TRANSFER_2_AMOUNT: Lazy<U512> = Lazy::new(|| U512::from(750));\nstatic TRANSFER_2_AMOUNT_WITH_ADV: Lazy<U512> = Lazy::new(|| *DEFAULT_PAYMENT + *TRANSFER_2_AMOUNT);\nstatic TRANSFER_TOO_MUCH: Lazy<U512> = Lazy::new(|| U512::from(u64::MAX));\nstatic ACCOUNT_1_INITIAL_BALANCE: Lazy<U512> =\n    Lazy::new(|| U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE));\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash());\n\nstatic ACCOUNT_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([210u8; 32]).unwrap());\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY));\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash());\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_SOURCE_PURSE: &str = \"source_purse\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst TEST_PURSE: &str = \"test_purse\";\n\n#[ignore]\n#[test]\nfn should_transfer_to_account() {\n    let transfer_amount: U512 = *TRANSFER_1_AMOUNT;\n\n    // Run genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n\n    let default_account_purse = default_account.main_purse();\n\n    // Check genesis account balance\n    let initial_account_balance = builder.get_purse_balance(default_account_purse);\n\n    // Exec transfer contract\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // Check genesis account balance\n\n    let modified_balance = builder.get_purse_balance(default_account_purse);\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    assert_eq!(\n        modified_balance,\n        initial_account_balance - transaction_fee - transfer_amount\n    );\n\n    let handle_payment = builder.get_handle_payment_contract();\n    let payment_purse = (*handle_payment\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .unwrap())\n    .into_uref()\n    .unwrap();\n    assert_eq!(builder.get_purse_balance(payment_purse), U512::zero());\n}\n\n#[ignore]\n#[test]\nfn should_transfer_to_public_key() {\n    let transfer_amount: U512 = *TRANSFER_1_AMOUNT;\n\n    // Run genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n\n    let default_account_purse = default_account.main_purse();\n\n    // Check genesis account balance\n    let initial_account_balance = builder.get_purse_balance(default_account_purse);\n\n    // Exec transfer contract\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_PUBLIC_KEY,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_PUBLIC_KEY.clone(), ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // Check genesis account balance\n\n    let modified_balance = builder.get_purse_balance(default_account_purse);\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    assert_eq!(\n        modified_balance,\n        initial_account_balance - transaction_fee - transfer_amount\n    );\n\n    let handle_payment = builder.get_handle_payment_contract();\n    let payment_purse = (*handle_payment\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .unwrap())\n    .into_uref()\n    .unwrap();\n    assert_eq!(builder.get_purse_balance(payment_purse), U512::zero());\n}\n\n#[ignore]\n#[test]\nfn should_transfer_from_purse_to_public_key() {\n    // Run genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // Create a funded a purse, and store it in named keys\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_NAMED_PURSE,\n        runtime_args! {\n            ARG_PURSE_NAME => TEST_PURSE,\n            ARG_AMOUNT => *TRANSFER_1_AMOUNT,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n    let default_account_purse = default_account.entity().main_purse();\n\n    // Check genesis account balance\n    let initial_account_balance = builder.get_purse_balance(default_account_purse);\n\n    let test_purse = default_account\n        .named_keys()\n        .get(TEST_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have test purse\");\n\n    let test_purse_balanace_before = builder.get_purse_balance(test_purse);\n\n    // Exec transfer contract\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_PUBLIC_KEY,\n        runtime_args! {\n            ARG_SOURCE_PURSE => test_purse,\n            ARG_TARGET => ACCOUNT_1_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => *TRANSFER_1_AMOUNT,\n        },\n    )\n    .build();\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    // Check genesis account balance\n\n    let modified_balance = builder.get_purse_balance(default_account_purse);\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    assert_eq!(modified_balance, initial_account_balance - transaction_fee);\n\n    let test_purse_balanace_after = builder.get_purse_balance(test_purse);\n    assert_eq!(\n        test_purse_balanace_after,\n        test_purse_balanace_before - *TRANSFER_1_AMOUNT\n    );\n\n    let handle_payment = builder.get_handle_payment_contract();\n    let payment_purse = (*handle_payment\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .unwrap())\n    .into_uref()\n    .unwrap();\n    assert_eq!(builder.get_purse_balance(payment_purse), U512::zero());\n}\n\n#[ignore]\n#[test]\nfn should_transfer_from_account_to_account() {\n    let initial_genesis_amount: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE);\n    let transfer_1_amount: U512 = *TRANSFER_1_AMOUNT;\n    let transfer_2_amount: U512 = *TRANSFER_2_AMOUNT;\n\n    // Run genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let builder = builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n\n    let default_account_purse = default_account.main_purse();\n\n    // Check genesis account balance\n    let genesis_balance = builder.get_purse_balance(default_account_purse);\n\n    assert_eq!(genesis_balance, initial_genesis_amount,);\n\n    // Exec transfer 1 contract\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let modified_balance = builder.get_purse_balance(default_account_purse);\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    let expected_balance = initial_genesis_amount - transaction_fee_1 - transfer_1_amount;\n\n    assert_eq!(modified_balance, expected_balance);\n\n    // Check account 1 balance\n    let account_1 = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account 1\");\n    let account_1_purse = account_1.main_purse();\n    let account_1_balance = builder.get_purse_balance(account_1_purse);\n\n    assert_eq!(account_1_balance, transfer_1_amount,);\n\n    // Exec transfer 2 contract\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT },\n    )\n    .build();\n\n    let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let transaction_fee_2 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2;\n\n    let account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"should have account 2\");\n\n    let account_2_purse = account_2.main_purse();\n\n    // Check account 1 balance\n\n    let account_1_balance = builder.get_purse_balance(account_1_purse);\n\n    assert_eq!(\n        account_1_balance,\n        transfer_1_amount - transaction_fee_2 - transfer_2_amount\n    );\n\n    let account_2_balance = builder.get_purse_balance(account_2_purse);\n\n    assert_eq!(account_2_balance, transfer_2_amount,);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_to_existing_account() {\n    let initial_genesis_amount: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE);\n    let transfer_1_amount: U512 = *TRANSFER_1_AMOUNT;\n    let transfer_2_amount: U512 = *TRANSFER_2_AMOUNT;\n\n    // Run genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let builder = builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n\n    let default_account_purse = default_account.main_purse();\n\n    // Check genesis account balance\n    let genesis_balance = builder.get_purse_balance(default_account_purse);\n\n    assert_eq!(genesis_balance, initial_genesis_amount,);\n\n    // Exec transfer 1 contract\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // Exec transfer contract\n\n    let account_1 = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account\");\n\n    let account_1_purse = account_1.main_purse();\n\n    // Check genesis account balance\n\n    let genesis_balance = builder.get_purse_balance(default_account_purse);\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    assert_eq!(\n        genesis_balance,\n        initial_genesis_amount - transaction_fee_1 - transfer_1_amount\n    );\n\n    // Check account 1 balance\n\n    let account_1_balance = builder.get_purse_balance(account_1_purse);\n\n    assert_eq!(account_1_balance, transfer_1_amount,);\n\n    // Exec transfer contract\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT },\n    )\n    .build();\n\n    let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"should get account\");\n\n    let account_2_purse = account_2.main_purse();\n\n    // Check account 1 balance\n\n    let account_1_balance = builder.get_purse_balance(account_1_purse);\n\n    let transaction_fee_2 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2;\n\n    assert_eq!(\n        account_1_balance,\n        transfer_1_amount - transaction_fee_2 - transfer_2_amount,\n    );\n\n    // Check account 2 balance\n\n    let account_2_balance_transform = builder.get_purse_balance(account_2_purse);\n\n    assert_eq!(account_2_balance_transform, transfer_2_amount);\n}\n\n#[ignore]\n#[test]\nfn should_fail_when_insufficient_funds() {\n    // Run genesis\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT_WITH_ADV },\n    )\n    .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_TOO_MUCH },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        // Exec transfer contract\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        // Exec transfer contract\n        .exec(exec_request_2)\n        .expect_success()\n        .commit()\n        // Exec transfer contract\n        .exec(exec_request_3)\n        .commit();\n\n    let exec_result = builder\n        .get_exec_result_owned(2)\n        .expect(\"should have exec response\");\n    let exec_result = exec_result.error().expect(\"should have error\");\n    let error = assert_matches!(exec_result, EngineError::Exec(ExecError::Revert(e)) => e, \"{:?}\", exec_result);\n    assert_eq!(*error, ApiError::from(mint::Error::InsufficientFunds));\n}\n\n#[ignore]\n#[allow(unused)]\n#[test]\nfn should_transfer_total_amount() {\n    // NOTE: as of protocol version 2.0.0 the execution engine is no longer reponsible\n    // for payment, refund, or fee handling...thus\n    // full transactions executed via the node are subject to payment, fee, refund,\n    // etc based upon chainspec settings, but when using the EE directly as is done\n    // in this test, there is no charge and all transfers are at face value.\n    fn balance_checker(bldr: &mut LmdbWasmTestBuilder, account_hash: AccountHash) -> U512 {\n        let entity = bldr\n            .get_entity_by_account_hash(account_hash)\n            .expect(\"should have account entity\");\n        let entity_main_purse = entity.main_purse();\n        bldr.get_purse_balance(entity_main_purse)\n    }\n    fn commit(bldr: &mut LmdbWasmTestBuilder, req_bldr: ExecuteRequestBuilder) {\n        let req = req_bldr.build();\n        bldr.exec(req).expect_success().commit();\n    }\n    fn genesis() -> LmdbWasmTestBuilder {\n        let mut builder = LmdbWasmTestBuilder::default();\n        builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n        builder\n    }\n\n    let mut builder = genesis();\n\n    let balance_x_initial = balance_checker(&mut builder, *DEFAULT_ACCOUNT_ADDR);\n    let amount_to_fund = *ACCOUNT_1_INITIAL_BALANCE;\n\n    // fund account 1 from default account\n    commit(\n        &mut builder,\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n            runtime_args! { \"target\" => *ACCOUNT_1_ADDR, \"amount\" => amount_to_fund },\n        ),\n    );\n    let balance_x_out = balance_checker(&mut builder, *DEFAULT_ACCOUNT_ADDR);\n    assert_eq!(\n        balance_x_initial - amount_to_fund,\n        balance_x_out,\n        \"funded amount should be deducted from funder's balance\"\n    );\n    let balance_y_initial = balance_checker(&mut builder, *ACCOUNT_1_ADDR);\n    assert_eq!(\n        amount_to_fund, balance_y_initial,\n        \"receiving account's balance should match funding amount\"\n    );\n    let diff = balance_x_initial - balance_y_initial;\n    assert_eq!(\n        diff, balance_x_out,\n        \"funder's balance difference should equal funded amount\"\n    );\n\n    // transfer it to a different account\n    commit(\n        &mut builder,\n        ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n            runtime_args! { \"target\" => *ACCOUNT_2_ADDR, \"amount\" => balance_y_initial },\n        ),\n    );\n    let balance_y_out = balance_checker(&mut builder, *ACCOUNT_1_ADDR);\n    assert_eq!(\n        balance_y_initial - amount_to_fund,\n        balance_y_out,\n        \"funded amount should be deducted from funder's balance\"\n    );\n    let balance_z_initial = balance_checker(&mut builder, *ACCOUNT_2_ADDR);\n    assert_eq!(\n        amount_to_fund, balance_z_initial,\n        \"receiving account's balance should match funding amount\"\n    );\n    let diff = balance_y_initial - balance_z_initial;\n    assert_eq!(\n        diff, balance_y_out,\n        \"funder's balance difference should equal funded amount\"\n    );\n\n    // transfer it back to originator\n    commit(\n        &mut builder,\n        ExecuteRequestBuilder::standard(\n            *ACCOUNT_2_ADDR,\n            CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n            runtime_args! { \"target\" => *DEFAULT_ACCOUNT_ADDR, \"amount\" => balance_z_initial },\n        ),\n    );\n    let balance_x_in = balance_checker(&mut builder, *DEFAULT_ACCOUNT_ADDR);\n    let balance_z_out = balance_checker(&mut builder, *ACCOUNT_2_ADDR);\n    assert_eq!(\n        U512::zero(),\n        balance_z_out,\n        \"trampoline account should be zero'd\"\n    );\n    assert_eq!(\n        balance_x_initial, balance_x_in,\n        \"original balance should be restored\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/transfer_cached.rs",
    "content": "use once_cell::sync::Lazy;\nuse tempfile::TempDir;\n\nuse casper_engine_test_support::{\n    LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, MintCosts, PublicKey, SecretKey, U512};\n\n/// The maximum amount of motes that payment code execution can cost.\nconst TRANSFER_MOTES_AMOUNT: u64 = 2_500_000_000;\n\nstatic TRANSFER_AMOUNT: Lazy<U512> = Lazy::new(|| U512::from(TRANSFER_MOTES_AMOUNT));\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash());\n\nstatic ACCOUNT_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([210u8; 32]).unwrap());\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY));\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash());\n\n#[ignore]\n#[test]\nfn should_transfer_to_account() {\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new(data_dir.path());\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let pre_state_hash = builder.get_post_state_hash();\n\n    // Default account to account 1\n    let transfer_request = TransferRequestBuilder::new(1, *ACCOUNT_1_ADDR).build();\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    assert_ne!(\n        pre_state_hash,\n        builder.get_post_state_hash(),\n        \"post state hash didn't change...\"\n    );\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get default account\");\n\n    let account1 = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\");\n\n    let default_account_balance = builder.get_purse_balance(default_account.main_purse());\n    let default_expected_balance = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - (U512::one());\n    assert_eq!(\n        default_account_balance, default_expected_balance,\n        \"default account balance should reflect the transfer\",\n    );\n\n    let account_1_balance = builder.get_purse_balance(account1.main_purse());\n    assert_eq!(\n        account_1_balance,\n        U512::one(),\n        \"account 1 balance should have been exactly one (1)\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_transfer_multiple_times() {\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new(data_dir.path());\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let pre_state_hash = builder.get_post_state_hash();\n\n    // Default account to account 1\n    // We must first transfer the amount account 1 will transfer to account 2, along with the fee\n    // account 1 will need to pay for that transfer.\n    let transfer_request = TransferRequestBuilder::new(\n        *TRANSFER_AMOUNT + MintCosts::default().transfer,\n        *ACCOUNT_1_ADDR,\n    )\n    .build();\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT, *ACCOUNT_2_ADDR)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .build();\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    // Double spend test for account 1\n    let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT, *ACCOUNT_2_ADDR)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .build();\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_failure();\n\n    assert_ne!(\n        pre_state_hash,\n        builder.get_post_state_hash(),\n        \"post state hash didn't change...\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_api/verify_signature.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    runtime_args, PublicKey, SecretKey, Signature,\n};\nuse ed25519_dalek::Signer;\n\nconst VERIFY_SIGNATURE_WASM: &str = \"verify_signature.wasm\";\n\n#[ignore]\n#[test]\nfn should_verify_secp256k1_signature() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_secp256k1().unwrap();\n    let public_key = PublicKey::from(&signing_key);\n\n    let (signature, _) = match signing_key {\n        SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(),\n        _ => panic!(\"Expected a Secp256k1 key\"),\n    };\n\n    let signature = Signature::Secp256k1(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                VERIFY_SIGNATURE_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"public_key\" => public_key,\n                },\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_verify_ed25519_signature() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_ed25519().unwrap();\n    let public_key = PublicKey::from(&signing_key);\n\n    let signature = match signing_key {\n        SecretKey::Ed25519(signing_key) => signing_key.sign(message_bytes),\n        _ => panic!(\"Expected an Ed25519 key\"),\n    };\n\n    let signature = Signature::Ed25519(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                VERIFY_SIGNATURE_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"public_key\" => public_key,\n                },\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_fail_verify_secp256k1_signature() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_secp256k1().unwrap();\n    let unrelated_key = PublicKey::from(&SecretKey::generate_secp256k1().unwrap());\n\n    let (signature, _) = match signing_key {\n        SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(),\n        _ => panic!(\"Expected a Secp256k1 key\"),\n    };\n\n    let signature = Signature::Secp256k1(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                VERIFY_SIGNATURE_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"public_key\" => unrelated_key,\n                },\n            )\n            .build(),\n        )\n        .expect_failure()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_fail_verify_ed25519_signature() {\n    let message = String::from(\"Recovery test\");\n    let message_bytes = message.as_bytes();\n    let signing_key = SecretKey::generate_ed25519().unwrap();\n    let unrelated_key = PublicKey::from(&SecretKey::generate_ed25519().unwrap());\n\n    let signature = match signing_key {\n        SecretKey::Ed25519(signing_key) => signing_key.sign(message_bytes),\n        _ => panic!(\"Expected an Ed25519 key\"),\n    };\n\n    let signature = Signature::Ed25519(signature);\n    let signature_bytes: Bytes = signature.to_bytes().unwrap().into();\n\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                VERIFY_SIGNATURE_WASM,\n                runtime_args! {\n                    \"message\" => message,\n                    \"signature_bytes\" => signature_bytes,\n                    \"public_key\" => unrelated_key,\n                },\n            )\n            .build(),\n        )\n        .expect_failure()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_context.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\n\nuse casper_types::{runtime_args, Key, RuntimeArgs, ENTITY_INITIAL_VERSION};\n\nconst CONTRACT_HEADERS: &str = \"contract_context.wasm\";\nconst PACKAGE_HASH_KEY: &str = \"package_hash_key\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\nconst CONTRACT_HASH_KEY: &str = \"contract_hash_key\";\n\nconst CONTRACT_CODE_TEST: &str = \"contract_code_test\";\n\nconst NEW_KEY: &str = \"new_key\";\n\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[ignore]\n#[test]\nfn should_enforce_intended_execution_contexts() {\n    // This test runs a contract that extends the same key with more data after every call.\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HEADERS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_HASH_KEY,\n        Some(ENTITY_INITIAL_VERSION),\n        CONTRACT_CODE_TEST,\n        runtime_args! {},\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let _package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    assert!(account.named_keys().get(NEW_KEY).is_none());\n\n    // Check version\n\n    let contract_version_stored = builder\n        .query(\n            None,\n            Key::Account(*DEFAULT_ACCOUNT_ADDR),\n            &[CONTRACT_VERSION.to_string()],\n        )\n        .expect(\"should query account\")\n        .as_cl_value()\n        .cloned()\n        .expect(\"should be cl value\");\n    assert_eq!(contract_version_stored.into_t::<u32>().unwrap(), 1u32);\n}\n\n#[ignore]\n#[test]\nfn should_enforce_intended_execution_context_direct_by_name() {\n    // This test runs a contract that extends the same key with more data after every call.\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HEADERS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_KEY,\n        CONTRACT_CODE_TEST,\n        runtime_args! {},\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let _package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    assert!(account.named_keys().get(NEW_KEY).is_none());\n}\n\n#[ignore]\n#[test]\nfn should_enforce_intended_execution_context_direct_by_hash() {\n    // This test runs a contract that extends the same key with more data after every call.\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HEADERS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let contract_hash = account\n        .named_keys()\n        .get(CONTRACT_HASH_KEY)\n        .expect(\"should have contract hash\")\n        .into_entity_hash();\n\n    let contract_hash = contract_hash.unwrap();\n\n    let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CONTRACT_CODE_TEST,\n        runtime_args! {},\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let _package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    assert!(account.named_keys().get(NEW_KEY).is_none())\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/contract_messages.rs",
    "content": "use num_traits::Zero;\nuse std::cell::RefCell;\n\nuse casper_execution_engine::runtime::cryptography;\n\nuse casper_engine_test_support::{\n    ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_BLOCK_TIME, LOCAL_GENESIS_REQUEST,\n};\n\nuse casper_types::{\n    addressable_entity::MessageTopics,\n    bytesrepr::ToBytes,\n    contract_messages::{MessageChecksum, MessagePayload, MessageTopicSummary, TopicNameHash},\n    runtime_args, AddressableEntityHash, BlockGlobalAddr, BlockTime, CLValue, CoreConfig, Digest,\n    EntityAddr, HostFunction, HostFunctionCostsV1, HostFunctionCostsV2, Key, MessageLimits,\n    OpcodeCosts, RuntimeArgs, StorageCosts, StoredValue, SystemConfig, WasmConfig, WasmV1Config,\n    WasmV2Config, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY, U512,\n};\n\nconst MESSAGE_EMITTER_INSTALLER_WASM: &str = \"contract_messages_emitter.wasm\";\nconst MESSAGE_EMITTER_UPGRADER_WASM: &str = \"contract_messages_upgrader.wasm\";\nconst MESSAGE_EMITTER_FROM_ACCOUNT: &str = \"contract_messages_from_account.wasm\";\nconst MESSAGE_EMITTER_PACKAGE_HASH_KEY_NAME: &str = \"messages_emitter_package_hash\";\nconst MESSAGE_EMITTER_GENERIC_TOPIC: &str = \"generic_messages\";\nconst MESSAGE_EMITTER_UPGRADED_TOPIC: &str = \"new_topic_after_upgrade\";\nconst ENTRY_POINT_EMIT_MESSAGE: &str = \"emit_message\";\nconst ENTRY_POINT_EMIT_MULTIPLE_MESSAGES: &str = \"emit_multiple_messages\";\nconst ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION: &str = \"emit_message_from_each_version\";\nconst ARG_NUM_MESSAGES_TO_EMIT: &str = \"num_messages_to_emit\";\nconst ARG_TOPIC_NAME: &str = \"topic_name\";\nconst ENTRY_POINT_ADD_TOPIC: &str = \"add_topic\";\nconst ARG_MESSAGE_SUFFIX_NAME: &str = \"message_suffix\";\nconst ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT: &str = \"register_default_topic_with_init\";\n\nconst EMITTER_MESSAGE_PREFIX: &str = \"generic message: \";\n\n// Number of messages that will be emitted when calling `ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION`\nconst EMIT_MESSAGE_FROM_EACH_VERSION_NUM_MESSAGES: u32 = 3;\n\nfn install_messages_emitter_contract(\n    builder: &RefCell<LmdbWasmTestBuilder>,\n    use_initializer: bool,\n) -> AddressableEntityHash {\n    // Request to install the contract that will be emitting messages.\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        MESSAGE_EMITTER_INSTALLER_WASM,\n        runtime_args! {\n            ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT => use_initializer,\n        },\n    )\n    .build();\n\n    // Execute the request to install the message emitting contract.\n    // This will also register a topic for the contract to emit messages on.\n    builder\n        .borrow_mut()\n        .exec(install_request)\n        .expect_success()\n        .commit();\n\n    // Get the contract package for the messages_emitter.\n    let query_result = builder\n        .borrow_mut()\n        .query(\n            None,\n            Key::from(*DEFAULT_ACCOUNT_ADDR),\n            &[MESSAGE_EMITTER_PACKAGE_HASH_KEY_NAME.into()],\n        )\n        .expect(\"should query\");\n\n    let message_emitter_package = if let StoredValue::ContractPackage(package) = query_result {\n        package\n    } else {\n        panic!(\"Stored value is not a contract package: {:?}\", query_result);\n    };\n\n    // Get the contract hash of the messages_emitter contract.\n    message_emitter_package\n        .versions()\n        .values()\n        .last()\n        .map(|contract_hash| AddressableEntityHash::new(contract_hash.value()))\n        .expect(\"Should have contract hash\")\n}\n\nfn upgrade_messages_emitter_contract(\n    builder: &RefCell<LmdbWasmTestBuilder>,\n    use_initializer: bool,\n    expect_failure: bool,\n) -> AddressableEntityHash {\n    let upgrade_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        MESSAGE_EMITTER_UPGRADER_WASM,\n        runtime_args! {\n            ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT => use_initializer,\n        },\n    )\n    .build();\n\n    // let new_topics = BTreeMap::from([(\n    //     MESSAGE_EMITTER_GENERIC_TOPIC.to_string(),\n    //     MessageTopicOperation::Add,\n    // )]);\n\n    // println!(\"{}\", new_topics.into_bytes().unwrap().len());\n\n    // Execute the request to upgrade the message emitting contract.\n    // This will also register a new topic for the contract to emit messages on.\n    if expect_failure {\n        builder\n            .borrow_mut()\n            .exec(upgrade_request)\n            .expect_failure()\n            .commit();\n    } else {\n        builder\n            .borrow_mut()\n            .exec(upgrade_request)\n            .expect_success()\n            .commit();\n    }\n\n    // Get the contract package for the upgraded messages emitter contract.\n    let query_result = builder\n        .borrow_mut()\n        .query(\n            None,\n            Key::from(*DEFAULT_ACCOUNT_ADDR),\n            &[MESSAGE_EMITTER_PACKAGE_HASH_KEY_NAME.into()],\n        )\n        .expect(\"should query\");\n\n    let message_emitter_package = if let StoredValue::ContractPackage(package) = query_result {\n        package\n    } else {\n        panic!(\"Stored value is not a contract package: {:?}\", query_result);\n    };\n\n    // Get the contract hash of the latest version of the messages emitter contract.\n    message_emitter_package\n        .versions()\n        .values()\n        .last()\n        .map(|contract_hash| AddressableEntityHash::new(contract_hash.value()))\n        .expect(\"Should have contract hash\")\n}\n\nfn emit_message_with_suffix(\n    builder: &RefCell<LmdbWasmTestBuilder>,\n    suffix: &str,\n    contract_hash: &AddressableEntityHash,\n    block_time: u64,\n) {\n    let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        *contract_hash,\n        ENTRY_POINT_EMIT_MESSAGE,\n        runtime_args! {\n            ARG_MESSAGE_SUFFIX_NAME => suffix,\n        },\n    )\n    .with_block_time(block_time)\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(emit_message_request)\n        .expect_success()\n        .commit();\n}\n\nstruct ContractQueryView<'a> {\n    builder: &'a RefCell<LmdbWasmTestBuilder>,\n    contract_hash: AddressableEntityHash,\n}\n\nimpl<'a> ContractQueryView<'a> {\n    fn new(\n        builder: &'a RefCell<LmdbWasmTestBuilder>,\n        contract_hash: AddressableEntityHash,\n    ) -> Self {\n        Self {\n            builder,\n            contract_hash,\n        }\n    }\n\n    fn message_topics(&self) -> MessageTopics {\n        let message_topics_result = self\n            .builder\n            .borrow_mut()\n            .message_topics(None, EntityAddr::SmartContract(self.contract_hash.value()))\n            .expect(\"must get message topics\");\n\n        message_topics_result\n    }\n\n    fn message_topic(&self, topic_name_hash: TopicNameHash) -> MessageTopicSummary {\n        let query_result = self\n            .builder\n            .borrow_mut()\n            .query(\n                None,\n                Key::message_topic(\n                    EntityAddr::SmartContract(self.contract_hash.value()),\n                    topic_name_hash,\n                ),\n                &[],\n            )\n            .expect(\"should query\");\n\n        match query_result {\n            StoredValue::MessageTopic(summary) => summary,\n            _ => {\n                panic!(\n                    \"Stored value is not a message topic summary: {:?}\",\n                    query_result\n                );\n            }\n        }\n    }\n\n    fn message_summary(\n        &self,\n        topic_name_hash: TopicNameHash,\n        message_index: u32,\n        state_hash: Option<Digest>,\n    ) -> Result<MessageChecksum, String> {\n        let query_result = self.builder.borrow_mut().query(\n            state_hash,\n            Key::message(\n                EntityAddr::SmartContract(self.contract_hash.value()),\n                topic_name_hash,\n                message_index,\n            ),\n            &[],\n        )?;\n\n        match query_result {\n            StoredValue::Message(summary) => Ok(summary),\n            _ => panic!(\"Stored value is not a message summary: {:?}\", query_result),\n        }\n    }\n}\n\n#[ignore]\n#[test]\nfn should_emit_messages() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n    let query_view = ContractQueryView::new(&builder, contract_hash);\n\n    let message_topics = query_view.message_topics();\n\n    let (topic_name, message_topic_hash) = message_topics\n        .iter()\n        .next()\n        .expect(\"should have at least one topic\");\n\n    assert_eq!(topic_name, &MESSAGE_EMITTER_GENERIC_TOPIC.to_string());\n    // Check that the topic exists for the installed contract.\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        0\n    );\n\n    // Now call the entry point to emit some messages.\n    emit_message_with_suffix(&builder, \"test\", &contract_hash, DEFAULT_BLOCK_TIME);\n    let expected_message = MessagePayload::from(format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, \"test\"));\n    let expected_message_hash = cryptography::blake2b(\n        [\n            0u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = query_view\n        .message_summary(*message_topic_hash, 0, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        1\n    );\n\n    // call again to emit a new message and check that the index in the topic incremented.\n    emit_message_with_suffix(&builder, \"test\", &contract_hash, DEFAULT_BLOCK_TIME);\n    let expected_message_hash = cryptography::blake2b(\n        [\n            1u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = query_view\n        .message_summary(*message_topic_hash, 1, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        2\n    );\n\n    let first_block_state_hash = builder.borrow().get_post_state_hash();\n\n    // call to emit a new message but in another block.\n    emit_message_with_suffix(\n        &builder,\n        \"new block time\",\n        &contract_hash,\n        DEFAULT_BLOCK_TIME + 1,\n    );\n    let expected_message =\n        MessagePayload::from(format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, \"new block time\"));\n    let expected_message_hash = cryptography::blake2b(\n        [\n            0u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = query_view\n        .message_summary(*message_topic_hash, 0, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        1\n    );\n\n    // old messages should be pruned from tip and inaccessible at the latest state hash.\n    assert!(query_view\n        .message_summary(*message_topic_hash, 1, None)\n        .is_err());\n\n    // old messages should still be discoverable at a state hash before pruning.\n    assert!(query_view\n        .message_summary(*message_topic_hash, 1, Some(first_block_state_hash))\n        .is_ok());\n}\n\n#[ignore]\n#[test]\nfn should_emit_message_on_empty_topic_in_new_block() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n    let query_view = ContractQueryView::new(&builder, contract_hash);\n\n    let message_topics = query_view.message_topics();\n\n    let (_, message_topic_hash) = message_topics\n        .iter()\n        .next()\n        .expect(\"should have at least one topic\");\n\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        0\n    );\n\n    emit_message_with_suffix(\n        &builder,\n        \"new block time\",\n        &contract_hash,\n        DEFAULT_BLOCK_TIME + 1,\n    );\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        1\n    );\n}\n\n#[ignore]\n#[test]\nfn should_add_topics() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n    let query_view = ContractQueryView::new(&builder, contract_hash);\n\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => \"topic_1\",\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_success()\n        .commit();\n\n    let topic_1_hash = *query_view\n        .message_topics()\n        .get(\"topic_1\")\n        .expect(\"should have added topic `topic_1\");\n    assert_eq!(query_view.message_topic(topic_1_hash).message_count(), 0);\n\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => \"topic_2\",\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_success()\n        .commit();\n\n    let topic_2_hash = *query_view\n        .message_topics()\n        .get(\"topic_2\")\n        .expect(\"should have added topic `topic_2\");\n\n    assert!(query_view.message_topics().get(\"topic_1\").is_some());\n    assert_eq!(query_view.message_topic(topic_1_hash).message_count(), 0);\n    assert_eq!(query_view.message_topic(topic_2_hash).message_count(), 0);\n}\n\n#[ignore]\n#[test]\nfn should_not_add_duplicate_topics() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n    let query_view = ContractQueryView::new(&builder, contract_hash);\n    let message_topics = query_view.message_topics();\n    let (first_topic_name, _) = message_topics\n        .iter()\n        .next()\n        .expect(\"should have at least one topic\");\n\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => first_topic_name,\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_failure()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_exceed_configured_limits() {\n    let chainspec = {\n        let default_wasm_v1_config = WasmV1Config::default();\n        let default_wasm_v2_config = WasmV2Config::default();\n        let wasm_v1_config = WasmV1Config::new(\n            default_wasm_v1_config.max_memory(),\n            default_wasm_v1_config.max_stack_height(),\n            default_wasm_v1_config.opcode_costs(),\n            default_wasm_v1_config.take_host_function_costs(),\n        );\n        let wasm_v2_config = WasmV2Config::new(\n            default_wasm_v2_config.max_memory(),\n            default_wasm_v2_config.opcode_costs(),\n            default_wasm_v2_config.take_host_function_costs(),\n        );\n        let wasm_config = WasmConfig::new(\n            MessageLimits {\n                max_topic_name_size: 32,\n                max_message_size: 100,\n                max_topics_per_contract: 2,\n            },\n            wasm_v1_config,\n            wasm_v2_config,\n        );\n        ChainspecConfig {\n            system_costs_config: SystemConfig::default(),\n            core_config: CoreConfig::default(),\n            wasm_config,\n            storage_costs: StorageCosts::default(),\n        }\n    };\n\n    let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec));\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n\n    // if the topic larger than the limit, registering should fail.\n    // string is 33 bytes > limit established above\n    let too_large_topic_name = std::str::from_utf8(&[0x4du8; 33]).unwrap();\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => too_large_topic_name,\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_failure()\n        .commit();\n\n    // if the topic name is equal to the limit, registering should work.\n    // string is 32 bytes == limit established above\n    let topic_name_at_limit = std::str::from_utf8(&[0x4du8; 32]).unwrap();\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => topic_name_at_limit,\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_success()\n        .commit();\n\n    // Check that the max number of topics limit is enforced.\n    // 2 topics are already registered, so registering another topic should\n    // fail since the limit is already reached.\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => \"topic_1\",\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_failure()\n        .commit();\n\n    // Check message size limit\n    let large_message = std::str::from_utf8(&[0x4du8; 128]).unwrap();\n    let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_EMIT_MESSAGE,\n        runtime_args! {\n            ARG_MESSAGE_SUFFIX_NAME => large_message,\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(emit_message_request)\n        .expect_failure()\n        .commit();\n}\n\nfn should_carry_message_topics_on_upgraded_contract(use_initializer: bool) {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let _ = install_messages_emitter_contract(&builder, true);\n    let contract_hash = upgrade_messages_emitter_contract(&builder, use_initializer, false);\n    let query_view = ContractQueryView::new(&builder, contract_hash);\n\n    let message_topics = query_view.message_topics();\n    assert_eq!(message_topics.len(), 2);\n    let mut expected_topic_names = 0;\n    for (topic_name, topic_hash) in message_topics.iter() {\n        if topic_name == MESSAGE_EMITTER_GENERIC_TOPIC\n            || topic_name == MESSAGE_EMITTER_UPGRADED_TOPIC\n        {\n            expected_topic_names += 1;\n        }\n\n        assert_eq!(query_view.message_topic(*topic_hash).message_count(), 0);\n    }\n    assert_eq!(expected_topic_names, 2);\n}\n\n#[ignore]\n#[test]\nfn should_carry_message_topics_on_upgraded_contract_with_initializer() {\n    should_carry_message_topics_on_upgraded_contract(true);\n}\n\n#[ignore]\n#[test]\nfn should_carry_message_topics_on_upgraded_contract_without_initializer() {\n    should_carry_message_topics_on_upgraded_contract(false);\n}\n\n#[ignore]\n#[test]\nfn should_not_emit_messages_from_account() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // Request to run a deploy that tries to register a message topic without a stored contract.\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        MESSAGE_EMITTER_FROM_ACCOUNT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // Expect to fail since topics can only be registered by stored contracts.\n    builder\n        .borrow_mut()\n        .exec(install_request)\n        .expect_failure()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_charge_expected_gas_for_storage() {\n    const GAS_PER_BYTE_COST: u32 = 100;\n\n    let chainspec = {\n        let wasm_v1_config = WasmV1Config::new(\n            DEFAULT_WASM_MAX_MEMORY,\n            DEFAULT_MAX_STACK_HEIGHT,\n            OpcodeCosts::zero(),\n            HostFunctionCostsV1::zero(),\n        );\n        let wasm_v2_config = WasmV2Config::new(\n            DEFAULT_WASM_MAX_MEMORY,\n            OpcodeCosts::zero(),\n            HostFunctionCostsV2::zero(),\n        );\n        let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config);\n        ChainspecConfig {\n            wasm_config,\n            core_config: CoreConfig::default(),\n            system_costs_config: SystemConfig::default(),\n            storage_costs: StorageCosts::new(GAS_PER_BYTE_COST),\n        }\n    };\n\n    let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec));\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n\n    let topic_name = \"consume_topic\";\n\n    // check the consume of adding a new topic\n    let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_ADD_TOPIC,\n        runtime_args! {\n            ARG_TOPIC_NAME => topic_name,\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(add_topic_request)\n        .expect_success()\n        .commit();\n\n    let add_topic_consumed = builder.borrow().last_exec_gas_consumed().value();\n\n    let default_topic_summary =\n        MessageTopicSummary::new(0, BlockTime::new(0), topic_name.to_string());\n    let written_size_expected =\n        StoredValue::MessageTopic(default_topic_summary.clone()).serialized_length();\n    assert_eq!(\n        U512::from(written_size_expected * GAS_PER_BYTE_COST as usize),\n        add_topic_consumed\n    );\n\n    let message_topic =\n        MessageTopicSummary::new(0, BlockTime::new(0), \"generic_messages\".to_string());\n    emit_message_with_suffix(&builder, \"test\", &contract_hash, DEFAULT_BLOCK_TIME);\n    // check that the storage consume charged is variable since the message topic hash a variable\n    // string field with message size that is emitted.\n    let written_size_expected = StoredValue::Message(MessageChecksum([0; 32])).serialized_length()\n        + StoredValue::MessageTopic(message_topic).serialized_length()\n        + StoredValue::CLValue(CLValue::from_t((BlockTime::new(0), 0u64)).unwrap())\n            .serialized_length();\n    let emit_message_gas_consumed = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(\n        U512::from(written_size_expected * GAS_PER_BYTE_COST as usize),\n        emit_message_gas_consumed\n    );\n\n    emit_message_with_suffix(&builder, \"test 12345\", &contract_hash, DEFAULT_BLOCK_TIME);\n    let written_size_expected = StoredValue::Message(MessageChecksum([0; 32])).serialized_length()\n        + StoredValue::MessageTopic(MessageTopicSummary::new(\n            0,\n            BlockTime::new(0),\n            \"generic_messages\".to_string(),\n        ))\n        .serialized_length()\n        + StoredValue::CLValue(CLValue::from_t((BlockTime::new(0), 0u64)).unwrap())\n            .serialized_length();\n    let emit_message_gas_consumed = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(\n        U512::from(written_size_expected * GAS_PER_BYTE_COST as usize),\n        emit_message_gas_consumed\n    );\n\n    // emitting messages in a different block will also prune the old entries so check the consumed.\n    emit_message_with_suffix(\n        &builder,\n        \"message in different block\",\n        &contract_hash,\n        DEFAULT_BLOCK_TIME + 1,\n    );\n    let emit_message_gas_consumed = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(\n        U512::from(written_size_expected * GAS_PER_BYTE_COST as usize),\n        emit_message_gas_consumed\n    );\n}\n\n#[ignore]\n#[test]\nfn should_charge_increasing_gas_consumed_for_multiple_messages_emitted() {\n    const FIRST_MESSAGE_EMIT_COST: u32 = 100;\n    const COST_INCREASE_PER_MESSAGE: u32 = 50;\n    const fn emit_consumed_per_execution(num_messages: u32) -> u32 {\n        FIRST_MESSAGE_EMIT_COST * num_messages\n            + (num_messages - 1) * num_messages / 2 * COST_INCREASE_PER_MESSAGE\n    }\n\n    const MESSAGES_TO_EMIT: u32 = 4;\n    const EMIT_MULTIPLE_EXPECTED_COST: u32 = emit_consumed_per_execution(MESSAGES_TO_EMIT);\n    const EMIT_MESSAGES_FROM_MULTIPLE_CONTRACTS: u32 =\n        emit_consumed_per_execution(EMIT_MESSAGE_FROM_EACH_VERSION_NUM_MESSAGES);\n    let chainspec = {\n        let wasm_v1_config = WasmV1Config::new(\n            DEFAULT_WASM_MAX_MEMORY,\n            DEFAULT_MAX_STACK_HEIGHT,\n            OpcodeCosts::zero(),\n            HostFunctionCostsV1 {\n                emit_message: HostFunction::fixed(FIRST_MESSAGE_EMIT_COST),\n                cost_increase_per_message: COST_INCREASE_PER_MESSAGE,\n                ..Zero::zero()\n            },\n        );\n        let wasm_v2_config = WasmV2Config::new(\n            DEFAULT_WASM_MAX_MEMORY,\n            OpcodeCosts::zero(),\n            HostFunctionCostsV2::default(),\n        );\n        let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config);\n        ChainspecConfig {\n            wasm_config,\n            core_config: CoreConfig::default(),\n            system_costs_config: SystemConfig::default(),\n            storage_costs: StorageCosts::zero(),\n        }\n    };\n\n    let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec));\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n\n    // Emit one message in this execution. Cost should be `FIRST_MESSAGE_EMIT_COST`.\n    emit_message_with_suffix(&builder, \"test\", &contract_hash, DEFAULT_BLOCK_TIME);\n    let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(emit_message_gas_consume, FIRST_MESSAGE_EMIT_COST.into());\n\n    // Emit multiple messages in this execution. Cost should increase for each message emitted.\n    let emit_messages_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_EMIT_MULTIPLE_MESSAGES,\n        runtime_args! {\n            ARG_NUM_MESSAGES_TO_EMIT => MESSAGES_TO_EMIT,\n        },\n    )\n    .build();\n    builder\n        .borrow_mut()\n        .exec(emit_messages_request)\n        .expect_success()\n        .commit();\n\n    let emit_multiple_messages_consume = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(\n        emit_multiple_messages_consume,\n        EMIT_MULTIPLE_EXPECTED_COST.into()\n    );\n\n    // Try another execution where we emit a single message.\n    // Cost should be `FIRST_MESSAGE_EMIT_COST`\n    emit_message_with_suffix(&builder, \"test\", &contract_hash, DEFAULT_BLOCK_TIME);\n    let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(emit_message_gas_consume, FIRST_MESSAGE_EMIT_COST.into());\n\n    // Check gas consume when multiple messages are emitted from different contracts.\n    let contract_hash = upgrade_messages_emitter_contract(&builder, true, false);\n    let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION,\n        runtime_args! {\n            ARG_MESSAGE_SUFFIX_NAME => \"test message\",\n        },\n    )\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(emit_message_request)\n        .expect_success()\n        .commit();\n\n    // 3 messages are emitted by this execution so the consume would be:\n    // `EMIT_MESSAGES_FROM_MULTIPLE_CONTRACTS`\n    let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value();\n    assert_eq!(\n        emit_message_gas_consume,\n        U512::from(EMIT_MESSAGES_FROM_MULTIPLE_CONTRACTS)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_register_topic_on_contract_creation() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, false);\n    let query_view = ContractQueryView::new(&builder, contract_hash);\n\n    let message_topics = query_view.message_topics();\n    let (topic_name, message_topic_hash) = message_topics\n        .iter()\n        .next()\n        .expect(\"should have at least one topic\");\n\n    assert_eq!(topic_name, &MESSAGE_EMITTER_GENERIC_TOPIC.to_string());\n    // Check that the topic exists for the installed contract.\n    assert_eq!(\n        query_view\n            .message_topic(*message_topic_hash)\n            .message_count(),\n        0\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_exceed_configured_topic_name_limits_on_contract_upgrade_no_init() {\n    let chainspec = {\n        let default_wasm_v1_config = WasmV1Config::default();\n        let default_wasm_v2_config = WasmV2Config::default();\n        let wasm_v1_config = WasmV1Config::new(\n            default_wasm_v1_config.max_memory(),\n            default_wasm_v1_config.max_stack_height(),\n            default_wasm_v1_config.opcode_costs(),\n            default_wasm_v1_config.take_host_function_costs(),\n        );\n        let wasm_v2_config = WasmV2Config::new(\n            default_wasm_v2_config.max_memory(),\n            default_wasm_v2_config.opcode_costs(),\n            default_wasm_v2_config.take_host_function_costs(),\n        );\n        let wasm_config = WasmConfig::new(\n            MessageLimits {\n                max_topic_name_size: 16, //length of MESSAGE_EMITTER_GENERIC_TOPIC\n                max_message_size: 100,\n                max_topics_per_contract: 3,\n            },\n            wasm_v1_config,\n            wasm_v2_config,\n        );\n        ChainspecConfig {\n            wasm_config,\n            core_config: CoreConfig::default(),\n            system_costs_config: SystemConfig::default(),\n            storage_costs: StorageCosts::default(),\n        }\n    };\n\n    let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec));\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let _ = install_messages_emitter_contract(&builder, false);\n    let _ = upgrade_messages_emitter_contract(&builder, false, true);\n}\n\n#[ignore]\n#[test]\nfn should_not_exceed_configured_max_topics_per_contract_upgrade_no_init() {\n    let chainspec = {\n        let default_wasm_v1_config = WasmV1Config::default();\n        let wasm_v1_config = WasmV1Config::new(\n            default_wasm_v1_config.max_memory(),\n            default_wasm_v1_config.max_stack_height(),\n            default_wasm_v1_config.opcode_costs(),\n            default_wasm_v1_config.take_host_function_costs(),\n        );\n        let default_wasm_v2_config = WasmV2Config::default();\n        let wasm_v2_config = WasmV2Config::new(\n            default_wasm_v2_config.max_memory(),\n            default_wasm_v2_config.opcode_costs(),\n            default_wasm_v2_config.take_host_function_costs(),\n        );\n        let wasm_config = WasmConfig::new(\n            MessageLimits {\n                max_topic_name_size: 32,\n                max_message_size: 100,\n                max_topics_per_contract: 1, /* only allow 1 topic. Since on upgrade previous\n                                             * topics carry over, the upgrade should fail. */\n            },\n            wasm_v1_config,\n            wasm_v2_config,\n        );\n        ChainspecConfig {\n            wasm_config,\n            system_costs_config: SystemConfig::default(),\n            core_config: CoreConfig::default(),\n            storage_costs: StorageCosts::default(),\n        }\n    };\n\n    let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec));\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let _ = install_messages_emitter_contract(&builder, false);\n    let _ = upgrade_messages_emitter_contract(&builder, false, true);\n}\n\n#[ignore]\n#[test]\nfn should_produce_per_block_message_ordering() {\n    let builder = RefCell::new(LmdbWasmTestBuilder::default());\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let emitter_contract_hash = install_messages_emitter_contract(&builder, true);\n    let query_view = ContractQueryView::new(&builder, emitter_contract_hash);\n\n    let message_topics = query_view.message_topics();\n    let (_, message_topic_hash) = message_topics\n        .iter()\n        .next()\n        .expect(\"should have at least one topic\");\n\n    let assert_last_message_block_index = |expected_index: u64| {\n        assert_eq!(\n            builder\n                .borrow()\n                .get_last_exec_result()\n                .unwrap()\n                .messages()\n                .first()\n                .unwrap()\n                .block_index(),\n            expected_index\n        )\n    };\n\n    let query_message_count = || -> Option<(BlockTime, u64)> {\n        let query_result =\n            builder\n                .borrow_mut()\n                .query(None, Key::BlockGlobal(BlockGlobalAddr::MessageCount), &[]);\n\n        match query_result {\n            Ok(StoredValue::CLValue(cl_value)) => Some(cl_value.into_t().unwrap()),\n            Err(_) => None,\n            _ => panic!(\"Stored value is not a CLvalue: {:?}\", query_result),\n        }\n    };\n\n    // Emit the first message in the block. It should have block index 0.\n    emit_message_with_suffix(\n        &builder,\n        \"test 0\",\n        &emitter_contract_hash,\n        DEFAULT_BLOCK_TIME,\n    );\n    assert_last_message_block_index(0);\n    assert_eq!(\n        query_message_count(),\n        Some((BlockTime::new(DEFAULT_BLOCK_TIME), 1))\n    );\n\n    let expected_message = MessagePayload::from(format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, \"test 0\"));\n    let expected_message_hash = cryptography::blake2b(\n        [\n            0u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = query_view\n        .message_summary(*message_topic_hash, 0, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n\n    // Emit the second message in the same block. It should have block index 1.\n    emit_message_with_suffix(\n        &builder,\n        \"test 1\",\n        &emitter_contract_hash,\n        DEFAULT_BLOCK_TIME,\n    );\n    assert_last_message_block_index(1);\n    assert_eq!(\n        query_message_count(),\n        Some((BlockTime::new(DEFAULT_BLOCK_TIME), 2))\n    );\n\n    let expected_message = MessagePayload::from(format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, \"test 1\"));\n    let expected_message_hash = cryptography::blake2b(\n        [\n            1u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = query_view\n        .message_summary(*message_topic_hash, 1, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n\n    // Upgrade the message emitter contract end emit a message from this contract in the same block\n    // as before. The block index of the message should be 2 since the block hasn't changed.\n    let upgraded_contract_hash = upgrade_messages_emitter_contract(&builder, true, false);\n    let upgraded_contract_query_view = ContractQueryView::new(&builder, upgraded_contract_hash);\n\n    let upgraded_topics = upgraded_contract_query_view.message_topics();\n    let upgraded_message_topic_hash = upgraded_topics\n        .get(MESSAGE_EMITTER_UPGRADED_TOPIC)\n        .expect(\"should have upgraded topic\");\n\n    let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        upgraded_contract_hash,\n        \"upgraded_emit_message\",\n        runtime_args! {\n            ARG_MESSAGE_SUFFIX_NAME => \"test 2\",\n        },\n    )\n    .with_block_time(DEFAULT_BLOCK_TIME)\n    .build();\n\n    builder\n        .borrow_mut()\n        .exec(emit_message_request)\n        .expect_success()\n        .commit();\n    assert_last_message_block_index(2);\n    assert_eq!(\n        query_message_count(),\n        Some((BlockTime::new(DEFAULT_BLOCK_TIME), 3))\n    );\n\n    let expected_message = MessagePayload::from(format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, \"test 2\"));\n    let expected_message_hash = cryptography::blake2b(\n        [\n            2u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = upgraded_contract_query_view\n        .message_summary(*upgraded_message_topic_hash, 0, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n\n    // Now emit a message in a different block. The block index should be 0 since it's the first\n    // message in the new block.\n    emit_message_with_suffix(\n        &builder,\n        \"test 3\",\n        &emitter_contract_hash,\n        DEFAULT_BLOCK_TIME + 1,\n    );\n    assert_last_message_block_index(0);\n    assert_eq!(\n        query_message_count(),\n        Some((BlockTime::new(DEFAULT_BLOCK_TIME + 1), 1))\n    );\n    let expected_message = MessagePayload::from(format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, \"test 3\"));\n    let expected_message_hash = cryptography::blake2b(\n        [\n            0u64.to_bytes().unwrap(),\n            expected_message.to_bytes().unwrap(),\n        ]\n        .concat(),\n    );\n    let queried_message_summary = query_view\n        .message_summary(*message_topic_hash, 0, None)\n        .expect(\"should have value\")\n        .value();\n    assert_eq!(expected_message_hash, queried_message_summary);\n}\n\n#[ignore]\n#[test]\nfn emit_message_should_consume_variable_gas_based_on_topic_and_message_size() {\n    const MESSAGE_EMIT_COST: u32 = 1_000_000;\n\n    const COST_PER_MESSAGE_TOPIC_NAME_SIZE: u32 = 2;\n    const COST_PER_MESSAGE_LENGTH: u32 = 1_000;\n    const MESSAGE_SUFFIX: &str = \"test\";\n\n    let chainspec = {\n        let wasm_v1_config = WasmV1Config::new(\n            DEFAULT_WASM_MAX_MEMORY,\n            DEFAULT_MAX_STACK_HEIGHT,\n            OpcodeCosts::zero(),\n            HostFunctionCostsV1 {\n                emit_message: HostFunction::new(\n                    MESSAGE_EMIT_COST,\n                    [\n                        0,\n                        COST_PER_MESSAGE_TOPIC_NAME_SIZE,\n                        0,\n                        COST_PER_MESSAGE_LENGTH,\n                    ],\n                ),\n                ..Zero::zero()\n            },\n        );\n        let wasm_v2_config = WasmV2Config::new(\n            DEFAULT_WASM_MAX_MEMORY,\n            OpcodeCosts::zero(),\n            HostFunctionCostsV2::default(),\n        );\n        let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config);\n        ChainspecConfig {\n            wasm_config,\n            core_config: CoreConfig::default(),\n            system_costs_config: SystemConfig::default(),\n            storage_costs: StorageCosts::zero(),\n        }\n    };\n\n    let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec));\n    builder\n        .borrow_mut()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let contract_hash = install_messages_emitter_contract(&builder, true);\n\n    // Emit one message in this execution. Cost should be consume of the call to emit message +\n    // consume charged for message topic name length + consume for message payload size.\n    emit_message_with_suffix(&builder, MESSAGE_SUFFIX, &contract_hash, DEFAULT_BLOCK_TIME);\n    let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value();\n    let payload: MessagePayload = format!(\"{}{}\", EMITTER_MESSAGE_PREFIX, MESSAGE_SUFFIX).into();\n    let expected_consume = MESSAGE_EMIT_COST\n        + COST_PER_MESSAGE_TOPIC_NAME_SIZE * MESSAGE_EMITTER_GENERIC_TOPIC.len() as u32\n        + COST_PER_MESSAGE_LENGTH * payload.serialized_length() as u32;\n    assert_eq!(emit_message_gas_consume, expected_consume.into());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/counter_factory.rs",
    "content": "use std::{collections::BTreeSet, iter::FromIterator};\n\nuse crate::wasm_utils;\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    addressable_entity::{EntityKindTag, DEFAULT_ENTRY_POINT_NAME},\n    runtime_args, AddressableEntityHash, ByteCodeAddr, Key, RuntimeArgs, U512,\n};\n\nconst CONTRACT_COUNTER_FACTORY: &str = \"counter_factory.wasm\";\nconst CONTRACT_FACTORY_DEFAULT_ENTRY_POINT: &str = \"contract_factory_default\";\nconst CONTRACT_FACTORY_ENTRY_POINT: &str = \"contract_factory\";\nconst DECREASE_ENTRY_POINT: &str = \"decrement\";\nconst INCREASE_ENTRY_POINT: &str = \"increment\";\nconst ARG_INITIAL_VALUE: &str = \"initial_value\";\nconst ARG_NAME: &str = \"name\";\nconst NEW_COUNTER_1_NAME: &str = \"new-counter-1\";\nconst NEW_COUNTER_2_NAME: &str = \"new-counter-2\";\n\n#[ignore]\n#[test]\nfn should_not_call_undefined_entrypoints_on_factory() {\n    let (mut builder, contract_hash) = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        DEFAULT_ENTRY_POINT_NAME, // should not be able to call \"call\" entry point\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).commit();\n\n    let no_such_method_1 = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(no_such_method_1, Error::Exec(ExecError::NoSuchMethod(function_name)) if function_name == DEFAULT_ENTRY_POINT_NAME)\n    );\n\n    // Can't call abstract entry point \"increase\" on the factory.\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        INCREASE_ENTRY_POINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit();\n\n    let no_such_method_2 = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(&no_such_method_2, Error::Exec(ExecError::TemplateMethod(function_name)) if function_name == INCREASE_ENTRY_POINT),\n        \"{:?}\",\n        &no_such_method_2\n    );\n\n    // Can't call abstract entry point \"decrease\" on the factory.\n\n    let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        DECREASE_ENTRY_POINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_3).commit();\n\n    let no_such_method_3 = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(&no_such_method_3, Error::Exec(ExecError::TemplateMethod(function_name)) if function_name == DECREASE_ENTRY_POINT),\n        \"{:?}\",\n        &no_such_method_3\n    );\n}\n\n#[ignore]\n#[test]\nfn contract_factory_wasm_should_have_expected_exports() {\n    let (builder, contract_hash) = setup();\n\n    let enable_entity = builder.chainspec().core_config.enable_addressable_entity;\n\n    let bytes = if enable_entity {\n        let factory_contract = builder\n            .query(\n                None,\n                Key::addressable_entity_key(EntityKindTag::SmartContract, contract_hash),\n                &[],\n            )\n            .expect(\"should have contract\")\n            .as_addressable_entity()\n            .cloned()\n            .expect(\"should be contract\");\n\n        let factory_contract_byte_code_key = Key::byte_code_key(ByteCodeAddr::new_wasm_addr(\n            factory_contract.byte_code_addr(),\n        ));\n\n        let factory_contract_wasm = builder\n            .query(None, factory_contract_byte_code_key, &[])\n            .expect(\"should have contract wasm\")\n            .as_byte_code()\n            .cloned()\n            .expect(\"should have wasm\");\n        factory_contract_wasm.take_bytes()\n    } else {\n        let factory_contract = builder\n            .query(None, Key::Hash(contract_hash.value()), &[])\n            .expect(\"should have contract\")\n            .as_contract()\n            .cloned()\n            .expect(\"should be contract\");\n\n        let factory_contract_byte_code_key =\n            Key::Hash(factory_contract.contract_wasm_hash().value());\n\n        let factory_contract_wasm = builder\n            .query(None, factory_contract_byte_code_key, &[])\n            .expect(\"should have contract wasm\")\n            .as_contract_wasm()\n            .cloned()\n            .expect(\"should have wasm\");\n\n        factory_contract_wasm.take_bytes()\n    };\n\n    let factory_wasm_exports = wasm_utils::get_wasm_exports(&bytes);\n    let expected_entrypoints = BTreeSet::from_iter([\n        INCREASE_ENTRY_POINT.to_string(),\n        DECREASE_ENTRY_POINT.to_string(),\n        CONTRACT_FACTORY_ENTRY_POINT.to_string(),\n        CONTRACT_FACTORY_DEFAULT_ENTRY_POINT.to_string(),\n    ]);\n    assert_eq!(factory_wasm_exports, expected_entrypoints);\n}\n\n#[ignore]\n#[test]\nfn should_install_and_use_factory_pattern() {\n    let (mut builder, contract_hash) = setup();\n\n    // Call a factory entrypoint\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CONTRACT_FACTORY_ENTRY_POINT,\n        runtime_args! {\n            ARG_NAME => NEW_COUNTER_1_NAME,\n            ARG_INITIAL_VALUE => U512::one(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).commit().expect_success();\n\n    // Call a different factory entrypoint that accepts different set of arguments\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CONTRACT_FACTORY_DEFAULT_ENTRY_POINT,\n        runtime_args! {\n            ARG_NAME => NEW_COUNTER_2_NAME,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit().expect_success();\n\n    let counter_factory_contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have contract hash\");\n\n    let new_counter_1 = counter_factory_contract\n        .named_keys()\n        .get(NEW_COUNTER_1_NAME)\n        .expect(\"new counter should exist\")\n        .into_entity_hash()\n        .unwrap();\n\n    let new_counter_1_contract = builder\n        .get_addressable_entity(new_counter_1)\n        .expect(\"should have contract instance\");\n\n    let new_counter_2 = counter_factory_contract\n        .named_keys()\n        .get(NEW_COUNTER_2_NAME)\n        .expect(\"new counter should exist\")\n        .into_entity_hash()\n        .unwrap();\n\n    let _new_counter_2_contract = builder\n        .get_addressable_entity(new_counter_2)\n        .expect(\"should have contract instance\");\n\n    let counter_1_wasm = if builder.chainspec().core_config.enable_addressable_entity {\n        builder\n            .query(\n                None,\n                Key::byte_code_key(ByteCodeAddr::new_wasm_addr(\n                    new_counter_1_contract.byte_code_addr(),\n                )),\n                &[],\n            )\n            .expect(\"should have contract wasm\")\n            .as_byte_code()\n            .cloned()\n            .expect(\"should have wasm\")\n            .take_bytes()\n    } else {\n        builder\n            .query(\n                None,\n                Key::Hash(new_counter_1_contract.byte_code_addr()),\n                &[],\n            )\n            .expect(\"should have contract wasm\")\n            .as_contract_wasm()\n            .cloned()\n            .expect(\"should have wasm\")\n            .take_bytes()\n    };\n\n    let new_counter_1_exports = wasm_utils::get_wasm_exports(&counter_1_wasm);\n    assert_eq!(\n        new_counter_1_exports,\n        BTreeSet::from_iter([\n            INCREASE_ENTRY_POINT.to_string(),\n            DECREASE_ENTRY_POINT.to_string()\n        ])\n    );\n\n    let increment_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        new_counter_1,\n        INCREASE_ENTRY_POINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(increment_request).commit().expect_success();\n\n    let decrement_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        new_counter_1,\n        DECREASE_ENTRY_POINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(decrement_request).commit().expect_success();\n}\n\nfn setup() -> (LmdbWasmTestBuilder, AddressableEntityHash) {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_COUNTER_FACTORY,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request).commit().expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have entity for account\");\n\n    let contract_hash_key = account\n        .named_keys()\n        .get(\"factory_hash\")\n        .expect(\"should have factory hash\");\n\n    (builder, contract_hash_key.into_entity_hash().unwrap())\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/deploy/context_association.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\n\nuse casper_types::{\n    runtime_args,\n    system::{AUCTION, HANDLE_PAYMENT, MINT},\n};\n\nconst SYSTEM_CONTRACT_HASHES_WASM: &str = \"system_contract_hashes.wasm\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_put_system_contract_hashes_to_account_context() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(SYSTEM_CONTRACT_HASHES_WASM, runtime_args! {})\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount})\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n        .with_deploy_hash([1; 32])\n        .build();\n\n    let request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"account should exist\");\n\n    let named_keys = account.named_keys();\n\n    assert!(named_keys.contains(MINT), \"should contain mint\");\n    assert!(\n        named_keys.contains(HANDLE_PAYMENT),\n        \"should contain handle payment\"\n    );\n    assert!(named_keys.contains(AUCTION), \"should contain auction\");\n\n    assert_eq!(\n        named_keys\n            .get(MINT)\n            .unwrap()\n            .into_entity_hash_addr()\n            .expect(\"should be a hash\"),\n        builder.get_mint_contract_hash().value(),\n        \"mint_contract_hash should match\"\n    );\n    assert_eq!(\n        named_keys\n            .get(HANDLE_PAYMENT)\n            .unwrap()\n            .into_entity_hash_addr()\n            .expect(\"should be a hash\"),\n        builder.get_handle_payment_contract_hash().value(),\n        \"handle_payment_contract_hash should match\"\n    );\n    assert_eq!(\n        named_keys\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .expect(\"should be a hash\"),\n        builder.get_auction_contract_hash().value(),\n        \"auction_contract_hash should match\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/deploy/mod.rs",
    "content": "mod context_association;\nmod non_standard_payment;\nmod preconditions;\nmod receipts;\nmod stored_contracts;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/deploy/non_standard_payment.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::BlockInfo, execution::ExecError};\nuse casper_storage::data_access_layer::BalanceIdentifier;\nuse casper_types::{\n    account::AccountHash, runtime_args, ApiError, BlockHash, Digest, Gas, RuntimeArgs, Timestamp,\n    U512,\n};\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]);\nconst DO_NOTHING_WASM: &str = \"do_nothing.wasm\";\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst TRANSFER_MAIN_PURSE_TO_NEW_PURSE_WASM: &str = \"transfer_main_purse_to_new_purse.wasm\";\nconst PAYMENT_PURSE_PERSIST_WASM: &str = \"payment_purse_persist.wasm\";\nconst NAMED_PURSE_PAYMENT_WASM: &str = \"named_purse_payment.wasm\";\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst ARG_DESTINATION: &str = \"destination\";\n\n#[ignore]\n#[test]\nfn should_charge_non_main_purse() {\n    // as account_1, create & fund a new purse and use that to pay for something\n    // instead of account_1 main purse\n    const TEST_PURSE_NAME: &str = \"test-purse\";\n\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let account_1_funding_amount = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE);\n    let account_1_purse_funding_amount = *DEFAULT_PAYMENT;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let setup_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => account_1_funding_amount },\n    )\n    .build();\n\n    let create_purse_exec_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        TRANSFER_MAIN_PURSE_TO_NEW_PURSE_WASM,\n        runtime_args! { ARG_DESTINATION => TEST_PURSE_NAME, ARG_AMOUNT => account_1_purse_funding_amount },\n    )\n        .build();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(setup_exec_request)\n        .expect_success()\n        .commit()\n        .exec(create_purse_exec_request)\n        .expect_success()\n        .commit();\n\n    // get account_1\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    // get purse\n    let purse_key = account_1.named_keys().get(TEST_PURSE_NAME).unwrap();\n    let purse = purse_key.into_uref().expect(\"should have uref\");\n    let purse_starting_balance = builder.get_purse_balance(purse);\n\n    assert_eq!(\n        purse_starting_balance, account_1_purse_funding_amount,\n        \"purse should be funded with expected amount, which in this case is also == to the amount to be paid\"\n    );\n\n    // in this test, we're just going to pay everything in the purse to\n    // keep the math easy.\n    let amount_to_be_paid = account_1_purse_funding_amount;\n    // should be able to pay for exec using new purse\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())\n        .with_payment_code(\n            NAMED_PURSE_PAYMENT_WASM,\n            runtime_args! {\n                ARG_PURSE_NAME => TEST_PURSE_NAME,\n                ARG_AMOUNT => amount_to_be_paid\n            },\n        )\n        .with_authorization_keys(&[account_1_account_hash])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let block_time = Timestamp::now().millis();\n    let parent_block_hash = BlockHash::default();\n    let block_info = BlockInfo::new(\n        Digest::default(),\n        block_time.into(),\n        parent_block_hash,\n        1,\n        DEFAULT_PROTOCOL_VERSION,\n    );\n    builder\n        .exec_wasm_v1(\n            deploy_item\n                .new_custom_payment_from_deploy_item(block_info, Gas::from(12_500_000_000_u64))\n                .expect(\"should be valid req\"),\n        )\n        .expect_success()\n        .commit();\n\n    let payment_purse_balance = builder\n        .get_purse_balance_result_with_proofs(DEFAULT_PROTOCOL_VERSION, BalanceIdentifier::Payment);\n\n    assert!(\n        payment_purse_balance.is_success(),\n        \"payment purse balance check should succeed\"\n    );\n\n    let paid_amount = *payment_purse_balance\n        .available_balance()\n        .expect(\"should have payment amount\");\n\n    assert_eq!(\n        paid_amount, amount_to_be_paid,\n        \"purse resting balance should equal funding amount minus exec costs\"\n    );\n\n    let purse_final_balance = builder.get_purse_balance(purse);\n\n    assert_eq!(\n        purse_final_balance,\n        U512::zero(),\n        \"since we zero'd out the paying purse, the final balance should be zero\"\n    );\n}\n\nconst ARG_METHOD: &str = \"method\";\n\n#[ignore]\n#[test]\nfn should_not_allow_custom_payment_purse_persistence_1() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())\n        .with_payment_code(\n            PAYMENT_PURSE_PERSIST_WASM,\n            runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT, ARG_METHOD => \"put_key\"},\n        )\n        .with_deploy_hash([1; 32])\n        .with_authorization_keys(&[account_hash])\n        .build();\n    let block_info = BlockInfo::new(\n        Digest::default(),\n        Timestamp::now().millis().into(),\n        BlockHash::default(),\n        1,\n        DEFAULT_PROTOCOL_VERSION,\n    );\n    let limit = Gas::from(12_500_000_000_u64);\n\n    let request = deploy_item\n        .new_custom_payment_from_deploy_item(block_info, limit)\n        .expect(\"should be valid req\");\n\n    builder.exec_wasm_v1(request).expect_failure();\n\n    builder.assert_error(casper_execution_engine::engine_state::Error::Exec(\n        ExecError::Revert(ApiError::HandlePayment(40)),\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_custom_payment_purse_persistence_2() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())\n        .with_payment_code(\n            PAYMENT_PURSE_PERSIST_WASM,\n            runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT, ARG_METHOD => \"call_contract\"},\n        )\n        .with_deploy_hash([1; 32])\n        .with_authorization_keys(&[account_hash])\n        .build();\n    let block_info = BlockInfo::new(\n        Digest::default(),\n        Timestamp::now().millis().into(),\n        BlockHash::default(),\n        1,\n        DEFAULT_PROTOCOL_VERSION,\n    );\n    let limit = Gas::from(12_500_000_000_u64);\n\n    let request = deploy_item\n        .new_custom_payment_from_deploy_item(block_info, limit)\n        .expect(\"should be valid req\");\n\n    builder.exec_wasm_v1(request).expect_failure();\n\n    builder.assert_error(casper_execution_engine::engine_state::Error::Exec(\n        ExecError::Revert(ApiError::HandlePayment(40)),\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_custom_payment_purse_persistence_3() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())\n        .with_payment_code(\n            PAYMENT_PURSE_PERSIST_WASM,\n            runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT, ARG_METHOD => \"call_versioned_contract\"},\n        )\n        .with_deploy_hash([1; 32])\n        .with_authorization_keys(&[account_hash])\n        .build();\n    let block_info = BlockInfo::new(\n        Digest::default(),\n        Timestamp::now().millis().into(),\n        BlockHash::default(),\n        1,\n        DEFAULT_PROTOCOL_VERSION,\n    );\n    let limit = Gas::from(12_500_000_000_u64);\n\n    let request = deploy_item\n        .new_custom_payment_from_deploy_item(block_info, limit)\n        .expect(\"should be valid req\");\n\n    builder.exec_wasm_v1(request).expect_failure();\n\n    builder.assert_error(casper_execution_engine::engine_state::Error::Exec(\n        ExecError::Revert(ApiError::HandlePayment(40)),\n    ));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/deploy/preconditions.rs",
    "content": "use assert_matches::assert_matches;\n\nuse casper_engine_test_support::{\n    utils, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::engine_state::Error;\nuse casper_storage::tracking_copy::TrackingCopyError;\nuse casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512};\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]);\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_raise_precondition_authorization_failure_invalid_account() {\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let nonexistent_account_addr = AccountHash::new([99u8; 32]);\n    let payment_purse_amount = 10_000_000;\n    let transferred_amount = 1;\n\n    let deploy_item = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_deploy_hash([1; 32])\n            .with_session_code(\n                \"transfer_purse_to_account.wasm\",\n                runtime_args! { \"target\" =>account_1_account_hash, \"amount\" => U512::from(transferred_amount) },\n            )\n            // .with_address(nonexistent_account_addr)\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => U512::from(payment_purse_amount) })\n            .with_authorization_keys(&[nonexistent_account_addr])\n            .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let response = builder\n        .get_exec_result_owned(0)\n        .expect(\"there should be a response\");\n\n    let precondition_failure = utils::get_precondition_failure(&response);\n    assert_matches!(\n        precondition_failure,\n        Error::TrackingCopy(TrackingCopyError::Authorization)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_raise_precondition_authorization_failure_empty_authorized_keys() {\n    let empty_keys: [AccountHash; 0] = [];\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(\"do_nothing.wasm\", RuntimeArgs::default())\n        .with_standard_payment(RuntimeArgs::default())\n        .with_deploy_hash([1; 32])\n        // empty authorization keys to force error\n        .with_authorization_keys(&empty_keys)\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let response = builder\n        .get_exec_result_owned(0)\n        .expect(\"there should be a response\");\n\n    let precondition_failure = utils::get_precondition_failure(&response);\n    assert_matches!(\n        precondition_failure,\n        Error::TrackingCopy(TrackingCopyError::Authorization)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_raise_precondition_authorization_failure_invalid_authorized_keys() {\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let nonexistent_account_addr = AccountHash::new([99u8; 32]);\n    let payment_purse_amount = 10_000_000;\n    let transferred_amount = 1;\n\n    let deploy_item = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_deploy_hash([1; 32])\n            .with_session_code(\n                \"transfer_purse_to_account.wasm\",\n                runtime_args! { \"target\" =>account_1_account_hash, \"amount\" => U512::from(transferred_amount) },\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => U512::from(payment_purse_amount) })\n            // invalid authorization key to force error\n            .with_authorization_keys(&[nonexistent_account_addr])\n            .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let response = builder\n        .get_exec_result_owned(0)\n        .expect(\"there should be a response\");\n\n    let precondition_failure = utils::get_precondition_failure(&response);\n    assert_matches!(\n        precondition_failure,\n        Error::TrackingCopy(TrackingCopyError::Authorization)\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/deploy/receipts.rs",
    "content": "use std::collections::{BTreeMap, BTreeSet};\n\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    account::AccountHash, runtime_args, system::mint, AccessRights, Gas, InitiatorAddr, PublicKey,\n    SecretKey, Transfer, TransferV2, U512,\n};\n\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT_WITH_ID: &str = \"transfer_purse_to_account_with_id.wasm\";\nconst TRANSFER_ARG_TARGET: &str = \"target\";\nconst TRANSFER_ARG_AMOUNT: &str = \"amount\";\nconst TRANSFER_ARG_ID: &str = \"id\";\n\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS: &str = \"transfer_purse_to_accounts.wasm\";\nconst TRANSFER_ARG_TARGETS: &str = \"targets\";\n\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_STORED: &str = \"transfer_purse_to_accounts_stored.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_SUBCALL: &str = \"transfer_purse_to_accounts_subcall.wasm\";\n\nconst HASH_KEY_NAME: &str = \"transfer_purse_to_accounts_hash\";\nconst PURSE_NAME: &str = \"purse\";\n\nstatic ALICE_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; 32]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic BOB_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([5; 32]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic CAROL_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([7; 32]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\nstatic ALICE_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ALICE_KEY));\nstatic BOB_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*BOB_KEY));\nstatic CAROL_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*CAROL_KEY));\n\nstatic TRANSFER_AMOUNT_1: Lazy<U512> = Lazy::new(|| U512::from(100_100_000));\nstatic TRANSFER_AMOUNT_2: Lazy<U512> = Lazy::new(|| U512::from(200_100_000));\nstatic TRANSFER_AMOUNT_3: Lazy<U512> = Lazy::new(|| U512::from(300_100_000));\n\n#[ignore]\n#[test]\nfn should_record_wasmless_transfer() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let id = 0;\n\n    let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT_1, *ALICE_ADDR)\n        .with_transfer_id(id)\n        .build();\n\n    let txn_hash = transfer_request.transaction_hash();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let alice_attenuated_main_purse = alice_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let execution_result = builder\n        .get_last_exec_result()\n        .expect(\"Expected execution results.\");\n\n    let transfers = execution_result.transfers();\n    assert_eq!(transfers.len(), 1);\n\n    let Transfer::V2(transfer) = transfers[0].clone() else {\n        panic!(\"wrong transfer variant\");\n    };\n\n    assert_eq!(transfer.transaction_hash, txn_hash);\n    assert_eq!(\n        transfer.from,\n        InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR)\n    );\n    assert_eq!(transfer.to, Some(*ALICE_ADDR));\n    assert_eq!(transfer.source, default_account.main_purse());\n    assert_eq!(transfer.target, alice_attenuated_main_purse);\n    assert_eq!(transfer.amount, *TRANSFER_AMOUNT_1);\n    assert_eq!(\n        transfer.gas,\n        Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer\n        )\n    );\n    assert_eq!(transfer.id, Some(id));\n}\n\n#[ignore]\n#[test]\nfn should_record_wasm_transfer() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! {\n            TRANSFER_ARG_TARGET => *ALICE_ADDR,\n            TRANSFER_ARG_AMOUNT => *TRANSFER_AMOUNT_1\n        },\n    )\n    .build();\n\n    let txn_hash = transfer_request.session.transaction_hash;\n\n    builder.exec(transfer_request).commit().expect_success();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let alice_attenuated_main_purse = alice_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let execution_result = builder\n        .get_last_exec_result()\n        .expect(\"Expected execution results.\");\n\n    assert_ne!(execution_result.consumed(), Gas::zero());\n    let transfers = execution_result.transfers();\n    assert_eq!(transfers.len(), 1);\n\n    let Transfer::V2(transfer) = transfers[0].clone() else {\n        panic!(\"wrong transfer variant\");\n    };\n\n    assert_eq!(transfer.transaction_hash, txn_hash);\n    assert_eq!(\n        transfer.from,\n        InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR)\n    );\n    assert_eq!(transfer.source, default_account.main_purse());\n    assert_eq!(transfer.target, alice_attenuated_main_purse);\n    assert_eq!(transfer.amount, *TRANSFER_AMOUNT_1);\n    assert_eq!(\n        transfer.gas,\n        Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer\n        )\n    )\n}\n\n#[ignore]\n#[test]\nfn should_record_wasm_transfer_with_id() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let id = Some(0);\n\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT_WITH_ID,\n        runtime_args! {\n            TRANSFER_ARG_TARGET => *ALICE_ADDR,\n            TRANSFER_ARG_AMOUNT => *TRANSFER_AMOUNT_1,\n            TRANSFER_ARG_ID => id\n        },\n    )\n    .build();\n\n    let txn_hash = transfer_request.session.transaction_hash;\n\n    builder.exec(transfer_request).commit().expect_success();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let alice_attenuated_main_purse = alice_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let execution_result = builder\n        .get_last_exec_result()\n        .expect(\"Expected execution results.\");\n\n    assert_ne!(execution_result.consumed(), Gas::zero());\n    let transfers = execution_result.transfers();\n    assert_eq!(transfers.len(), 1);\n\n    let Transfer::V2(transfer) = transfers[0].clone() else {\n        panic!(\"wrong transfer variant\");\n    };\n\n    assert_eq!(transfer.transaction_hash, txn_hash);\n    assert_eq!(\n        transfer.from,\n        InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR)\n    );\n    assert_eq!(transfer.source, default_account.main_purse());\n    assert_eq!(transfer.target, alice_attenuated_main_purse);\n    assert_eq!(transfer.amount, *TRANSFER_AMOUNT_1);\n    assert_eq!(\n        transfer.gas,\n        Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer\n        )\n    );\n    assert_eq!(transfer.id, id);\n}\n\n#[ignore]\n#[test]\nfn should_record_wasm_transfers() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let alice_id = Some(0);\n    let bob_id = Some(1);\n    let carol_id = Some(2);\n\n    let targets: BTreeMap<AccountHash, (U512, Option<u64>)> = {\n        let mut tmp = BTreeMap::new();\n        tmp.insert(*ALICE_ADDR, (*TRANSFER_AMOUNT_1, alice_id));\n        tmp.insert(*BOB_ADDR, (*TRANSFER_AMOUNT_2, bob_id));\n        tmp.insert(*CAROL_ADDR, (*TRANSFER_AMOUNT_3, carol_id));\n        tmp\n    };\n\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS,\n        runtime_args! {\n            mint::ARG_AMOUNT => *TRANSFER_AMOUNT_1 + *TRANSFER_AMOUNT_2 + *TRANSFER_AMOUNT_3,\n            TRANSFER_ARG_TARGETS => targets,\n        },\n    )\n    .build();\n\n    let txn_hash = transfer_request.session.transaction_hash;\n\n    builder.exec(transfer_request).commit().expect_success();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let bob_account = builder\n        .get_entity_by_account_hash(*BOB_ADDR)\n        .expect(\"should have Bob's account\");\n\n    let carol_account = builder\n        .get_entity_by_account_hash(*CAROL_ADDR)\n        .expect(\"should have Carol's account\");\n\n    let alice_attenuated_main_purse = alice_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let bob_attenuated_main_purse = bob_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let carol_attenuated_main_purse = carol_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let execution_result = builder\n        .get_last_exec_result()\n        .expect(\"Expected execution results.\");\n\n    assert_ne!(execution_result.consumed(), Gas::zero());\n    const EXPECTED_LENGTH: usize = 3;\n    assert_eq!(execution_result.transfers().len(), EXPECTED_LENGTH);\n    assert_eq!(\n        execution_result\n            .transfers()\n            .iter()\n            .cloned()\n            .collect::<BTreeSet<Transfer>>()\n            .len(),\n        EXPECTED_LENGTH\n    );\n\n    let transfers: BTreeSet<Transfer> = {\n        let mut tmp = BTreeSet::new();\n        for transfer in execution_result.transfers() {\n            tmp.insert(transfer.clone());\n        }\n        tmp\n    };\n\n    assert_eq!(transfers.len(), EXPECTED_LENGTH);\n\n    assert!(transfers.contains(&Transfer::V2(TransferV2 {\n        transaction_hash: txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*ALICE_ADDR),\n        source: default_account.main_purse(),\n        target: alice_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_1,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer\n        ),\n        id: alice_id,\n    })));\n\n    assert!(transfers.contains(&Transfer::V2(TransferV2 {\n        transaction_hash: txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*BOB_ADDR),\n        source: default_account.main_purse(),\n        target: bob_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_2,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer\n        ),\n        id: bob_id,\n    })));\n\n    assert!(transfers.contains(&Transfer::V2(TransferV2 {\n        transaction_hash: txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*CAROL_ADDR),\n        source: default_account.main_purse(),\n        target: carol_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_3,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer\n        ),\n        id: carol_id,\n    })));\n}\n\n#[ignore]\n#[test]\nfn should_record_wasm_transfers_with_subcall() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let alice_id = Some(0);\n    let bob_id = Some(1);\n    let carol_id = Some(2);\n\n    let total_transfer_amount = *TRANSFER_AMOUNT_1 + *TRANSFER_AMOUNT_2 + *TRANSFER_AMOUNT_3;\n\n    let store_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_STORED,\n        runtime_args! {\n            mint::ARG_AMOUNT => total_transfer_amount,\n        },\n    )\n    .build();\n\n    let targets: BTreeMap<AccountHash, (U512, Option<u64>)> = {\n        let mut tmp = BTreeMap::new();\n        tmp.insert(*ALICE_ADDR, (*TRANSFER_AMOUNT_1, alice_id));\n        tmp.insert(*BOB_ADDR, (*TRANSFER_AMOUNT_2, bob_id));\n        tmp.insert(*CAROL_ADDR, (*TRANSFER_AMOUNT_3, carol_id));\n        tmp\n    };\n\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_SUBCALL,\n        runtime_args! {\n            mint::ARG_AMOUNT => total_transfer_amount,\n            TRANSFER_ARG_TARGETS => targets,\n        },\n    )\n    .build();\n\n    let transfer_txn_hash = transfer_request.session.transaction_hash;\n\n    builder.exec(store_request).commit().expect_success();\n    builder.exec(transfer_request).commit().expect_success();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let entity_hash = default_account\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .unwrap()\n        .into_entity_hash()\n        .expect(\"should have contract hash\");\n\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(entity_hash)\n        .expect(\"should have stored contract\");\n\n    let contract_purse = contract\n        .named_keys()\n        .get(PURSE_NAME)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse\");\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let bob_account = builder\n        .get_entity_by_account_hash(*BOB_ADDR)\n        .expect(\"should have Bob's account\");\n\n    let carol_account = builder\n        .get_entity_by_account_hash(*CAROL_ADDR)\n        .expect(\"should have Carol's account\");\n\n    let alice_attenuated_main_purse = alice_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let bob_attenuated_main_purse = bob_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let carol_attenuated_main_purse = carol_account\n        .main_purse()\n        .with_access_rights(AccessRights::ADD);\n\n    let execution_result = builder\n        .get_last_exec_result()\n        .expect(\"Expected execution results.\");\n\n    /*\n    assert_eq!(txn_info.transaction_hash, transfer_txn_hash);\n    assert_eq!(\n        txn_info.from,\n        InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR)\n    );\n    assert_eq!(txn_info.source, default_account.main_purse());\n    */\n\n    assert_ne!(execution_result.consumed(), Gas::zero());\n    const EXPECTED_LENGTH: usize = 6;\n    assert_eq!(execution_result.transfers().len(), EXPECTED_LENGTH);\n    assert_eq!(\n        execution_result\n            .transfers()\n            .iter()\n            .cloned()\n            .collect::<BTreeSet<Transfer>>()\n            .len(),\n        EXPECTED_LENGTH\n    );\n\n    let transfer_counts: BTreeMap<Transfer, usize> = {\n        let mut tmp = BTreeMap::new();\n        for transfer in execution_result.transfers() {\n            tmp.entry(transfer.clone())\n                .and_modify(|i| *i += 1)\n                .or_insert(1);\n        }\n        tmp\n    };\n\n    let session_expected_alice = Transfer::V2(TransferV2 {\n        transaction_hash: transfer_txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*ALICE_ADDR),\n        source: default_account.main_purse(),\n        target: alice_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_1,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer,\n        ),\n        id: alice_id,\n    });\n\n    let session_expected_bob = Transfer::V2(TransferV2 {\n        transaction_hash: transfer_txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*BOB_ADDR),\n        source: default_account.main_purse(),\n        target: bob_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_2,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer,\n        ),\n        id: bob_id,\n    });\n\n    let session_expected_carol = Transfer::V2(TransferV2 {\n        transaction_hash: transfer_txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*CAROL_ADDR),\n        source: default_account.main_purse(),\n        target: carol_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_3,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer,\n        ),\n        id: carol_id,\n    });\n\n    const SESSION_EXPECTED_COUNT: Option<usize> = Some(1);\n    for (i, expected) in [\n        session_expected_alice,\n        session_expected_bob,\n        session_expected_carol,\n    ]\n    .iter()\n    .enumerate()\n    {\n        assert_eq!(\n            transfer_counts.get(expected).cloned(),\n            SESSION_EXPECTED_COUNT,\n            \"transfer {} has unexpected value\",\n            i\n        );\n    }\n\n    let stored_expected_alice = Transfer::V2(TransferV2 {\n        transaction_hash: transfer_txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*ALICE_ADDR),\n        source: contract_purse,\n        target: alice_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_1,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer,\n        ),\n        id: alice_id,\n    });\n\n    let stored_expected_bob = Transfer::V2(TransferV2 {\n        transaction_hash: transfer_txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*BOB_ADDR),\n        source: contract_purse,\n        target: bob_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_2,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer,\n        ),\n        id: bob_id,\n    });\n\n    let stored_expected_carol = Transfer::V2(TransferV2 {\n        transaction_hash: transfer_txn_hash,\n        from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR),\n        to: Some(*CAROL_ADDR),\n        source: contract_purse,\n        target: carol_attenuated_main_purse,\n        amount: *TRANSFER_AMOUNT_3,\n        gas: Gas::from(\n            builder\n                .chainspec()\n                .system_costs_config\n                .mint_costs()\n                .transfer,\n        ),\n        id: carol_id,\n    });\n\n    const STORED_EXPECTED_COUNT: Option<usize> = Some(1);\n    for (i, expected) in [\n        stored_expected_alice,\n        stored_expected_bob,\n        stored_expected_carol,\n    ]\n    .iter()\n    .enumerate()\n    {\n        assert_eq!(\n            transfer_counts.get(expected).cloned(),\n            STORED_EXPECTED_COUNT,\n            \"transfer {} has unexpected value\",\n            i\n        );\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/deploy/stored_contracts.rs",
    "content": "use assert_matches::assert_matches;\nuse casper_engine_test_support::{\n    DeployItemBuilder, EntityWithNamedKeys, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE,\n    DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    account::AccountHash, runtime_args, EntityVersion, EraId, HashAddr, ProtocolVersion,\n    RuntimeArgs, ENTITY_INITIAL_VERSION, U512,\n};\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]);\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\nconst DO_NOTHING_NAME: &str = \"do_nothing\";\nconst DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME: &str = \"do_nothing_package_hash\";\nconst DO_NOTHING_CONTRACT_HASH_NAME: &str = \"do_nothing_hash\";\nconst INITIAL_VERSION: EntityVersion = ENTITY_INITIAL_VERSION;\nconst ENTRY_FUNCTION_NAME: &str = \"delegate\";\nconst PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0;\nconst STORED_PAYMENT_CONTRACT_NAME: &str = \"test_payment_stored.wasm\";\nconst STORED_PAYMENT_CONTRACT_HASH_NAME: &str = \"test_payment_hash\";\nconst STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME: &str = \"test_payment_package_hash\";\nconst PAY_ENTRYPOINT: &str = \"pay\";\nconst TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME: &str = \"transfer_purse_to_account\";\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n/// Prepares a upgrade request with pre-loaded deploy code, and new protocol version.\nfn make_upgrade_request(new_protocol_version: ProtocolVersion) -> UpgradeRequestBuilder {\n    UpgradeRequestBuilder::new()\n        .with_current_protocol_version(PROTOCOL_VERSION)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n}\n\nfn install_custom_payment(\n    builder: &mut LmdbWasmTestBuilder,\n) -> (EntityWithNamedKeys, HashAddr, U512) {\n    // store payment contract\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORED_PAYMENT_CONTRACT_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).commit();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    // check account named keys\n    let package_hash = default_account\n        .named_keys()\n        .get(STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME)\n        .expect(\"key should exist\")\n        .into_hash_addr()\n        .expect(\"should be a hash\");\n\n    let exec_cost = builder.get_last_exec_result().unwrap().consumed().value();\n\n    (default_account, package_hash, exec_cost)\n}\n\n#[ignore]\n#[test]\nfn should_exec_non_stored_code() {\n    // using the new execute logic, passing code for both payment and session\n    // should work exactly as it did with the original exec logic\n\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n    let transferred_amount = 1;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(\n            format!(\"{}.wasm\", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME),\n            runtime_args! {\n                ARG_TARGET => account_1_account_hash,\n                ARG_AMOUNT => U512::from(transferred_amount)\n            },\n        )\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_purse_amount,\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n        .with_deploy_hash([1; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get genesis account\");\n    let modified_balance: U512 = builder.get_purse_balance(default_account.main_purse());\n\n    let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE);\n\n    assert_ne!(\n        modified_balance, initial_balance,\n        \"balance should be less than initial balance\"\n    );\n\n    let tally = transaction_fee + U512::from(transferred_amount) + modified_balance;\n\n    assert_eq!(\n        initial_balance, tally,\n        \"no net resources should be gained or lost post-distribution\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_if_calling_non_existent_entry_point() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // first, store payment contract with entry point named \"pay\"\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORED_PAYMENT_CONTRACT_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).commit();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract associated with default account\");\n    let stored_payment_contract_hash = default_account\n        .named_keys()\n        .get(STORED_PAYMENT_CONTRACT_HASH_NAME)\n        .expect(\"should have standard_payment named key\")\n        .into_entity_hash_addr()\n        .expect(\"standard_payment should be an uref\");\n\n    // next make another deploy that attempts to use the stored payment logic\n    // but passing the name for an entry point that does not exist.\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(format!(\"{}.wasm\", DO_NOTHING_NAME), RuntimeArgs::default())\n        .with_stored_payment_hash(\n            stored_payment_contract_hash.into(),\n            \"electric-boogaloo\",\n            runtime_args! { ARG_AMOUNT => payment_purse_amount },\n        )\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n        .with_deploy_hash([1; 32])\n        .build();\n\n    let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_stored_payment).commit();\n\n    assert!(\n        builder.is_error(),\n        \"calling a non-existent entry point should not work\"\n    );\n\n    let expected_error = Error::Exec(ExecError::NoSuchMethod(\"electric-boogaloo\".to_string()));\n\n    builder.assert_error(expected_error);\n}\n\n#[ignore]\n#[test]\nfn should_exec_stored_code_by_hash() {\n    let default_payment = *DEFAULT_PAYMENT;\n\n    // genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store payment\n    let (_, custom_payment_package_hash, _) = install_custom_payment(&mut builder);\n\n    let transferred_amount = U512::one();\n\n    // next make another deploy that USES stored payment logic\n\n    {\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_payment_contract_by_hash(\n                custom_payment_package_hash,\n                Some(ENTITY_INITIAL_VERSION),\n                PAY_ENTRYPOINT,\n                runtime_args! {\n                    ARG_AMOUNT => default_payment,\n                },\n            )\n            .with_session_code(\n                format!(\"{}.wasm\", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME),\n                runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => transferred_amount },\n            )\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n            .with_deploy_hash([2; 32])\n            .build();\n\n        let transfer_using_stored_payment =\n            ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n        builder.exec(transfer_using_stored_payment).expect_failure();\n    }\n\n    let error = builder.get_error().unwrap();\n\n    assert_matches!(error, Error::Exec(ExecError::ForgedReference(_)))\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_above_balance_using_stored_payment_code_by_hash() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n\n    // genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store payment\n    let (default_account, hash, _) = install_custom_payment(&mut builder);\n    let starting_balance = builder.get_purse_balance(default_account.main_purse());\n\n    let transferred_amount = starting_balance - *DEFAULT_PAYMENT + U512::one();\n\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let deploy_item = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_session_code(\n                format!(\"{}.wasm\", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME),\n                runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount },\n            )\n            .with_stored_versioned_payment_contract_by_hash(\n                hash,\n                Some(ENTITY_INITIAL_VERSION),\n                PAY_ENTRYPOINT,\n                runtime_args! {\n                    ARG_AMOUNT => payment_purse_amount,\n                },\n            )\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n            .with_deploy_hash([2; 32])\n            .build();\n\n    let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(exec_request_stored_payment)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().unwrap();\n\n    assert_matches!(error, Error::Exec(ExecError::ForgedReference(_)))\n}\n\n#[ignore]\n#[allow(unused)]\n#[test]\nfn should_empty_account_using_stored_payment_code_by_hash() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n\n    // genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store payment\n\n    let (default_account, hash, _) = install_custom_payment(&mut builder);\n    let starting_balance = builder.get_purse_balance(default_account.main_purse());\n\n    // verify stored contract functions as expected by checking all the maths\n\n    let transferred_amount = starting_balance - *DEFAULT_PAYMENT;\n\n    {\n        let account_1_account_hash = ACCOUNT_1_ADDR;\n        let deploy_item = DeployItemBuilder::new()\n                .with_address(*DEFAULT_ACCOUNT_ADDR)\n                .with_session_code(\n                    format!(\"{}.wasm\", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME),\n                    runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount },\n                )\n                .with_stored_versioned_payment_contract_by_hash(\n                    hash,\n                    Some(ENTITY_INITIAL_VERSION),\n                    PAY_ENTRYPOINT,\n                    runtime_args! {\n                        ARG_AMOUNT => payment_purse_amount,\n                    },\n                )\n                .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n                .with_deploy_hash([2; 32])\n                .build();\n\n        let exec_request_stored_payment =\n            ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n        builder.exec(exec_request_stored_payment).expect_failure();\n    }\n\n    let error = builder.get_error().expect(\"must have error\");\n\n    assert_matches!(error, Error::Exec(ExecError::ForgedReference(_)))\n}\n\n#[ignore]\n#[test]\nfn should_exec_stored_code_by_named_hash() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n\n    // genesis\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    install_custom_payment(&mut builder);\n\n    // verify stored contract functions as expected by checking all the maths\n\n    let transferred_amount = 1;\n\n    {\n        let account_1_account_hash = ACCOUNT_1_ADDR;\n        let deploy_item = DeployItemBuilder::new()\n                .with_address(*DEFAULT_ACCOUNT_ADDR)\n                .with_session_code(\n                    format!(\"{}.wasm\", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME),\n                    runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) },\n                )\n                .with_stored_versioned_payment_contract_by_name(\n                    STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME,\n                    Some(ENTITY_INITIAL_VERSION),\n                    PAY_ENTRYPOINT,\n                    runtime_args! {\n                        ARG_AMOUNT => payment_purse_amount,\n                    },\n                )\n                .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n                .with_deploy_hash([2; 32])\n                .build();\n\n        let exec_request_stored_payment =\n            ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n        builder.exec(exec_request_stored_payment).expect_failure();\n\n        let error = builder.get_error().unwrap();\n\n        assert_matches!(error, Error::Exec(ExecError::ForgedReference(_)))\n    }\n}\n\n#[ignore]\n#[test]\nfn should_fail_session_stored_at_named_key_with_incompatible_major_version() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // first, store payment contract for v1.0.0\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        &format!(\"{}_stored.wasm\", DO_NOTHING_NAME),\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).commit();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORED_PAYMENT_CONTRACT_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).commit();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract associated with default account\");\n    assert!(\n        default_account\n            .named_keys()\n            .contains(DO_NOTHING_CONTRACT_HASH_NAME),\n        \"do_nothing should be present in named keys\"\n    );\n\n    let stored_payment_contract_hash = default_account\n        .named_keys()\n        .get(STORED_PAYMENT_CONTRACT_HASH_NAME)\n        .expect(\"should have standard_payment named key\")\n        .into_entity_hash_addr()\n        .expect(\"standard_payment should be an uref\");\n    //\n    // upgrade with new wasm costs with modified mint for given version\n    //\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch);\n\n    let mut upgrade_request = make_upgrade_request(new_protocol_version).build();\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    // Call stored session code\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_session_named_key(\n            DO_NOTHING_CONTRACT_HASH_NAME,\n            ENTRY_FUNCTION_NAME,\n            RuntimeArgs::new(),\n        )\n        .with_stored_payment_hash(\n            stored_payment_contract_hash.into(),\n            PAY_ENTRYPOINT,\n            runtime_args! { ARG_AMOUNT => payment_purse_amount },\n        )\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n        .with_deploy_hash([2; 32])\n        .build();\n\n    let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_stored_payment).commit();\n\n    assert!(\n        builder.is_error(),\n        \"calling a session module with increased major protocol version should be error\",\n    );\n    let _error = builder.get_error().expect(\"must have error\");\n    // println!(\"error {:?}\", error);\n    // assert!(matches!(\n    //     error,\n    //     Error::Exec(ExecError::IncompatibleProtocolMajorVersion {\n    //         expected: 3,\n    //         actual: 2\n    //     })\n    // ))\n}\n\n#[ignore]\n#[test]\nfn should_execute_stored_payment_and_session_code_with_new_major_version() {\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    //\n    // upgrade with new wasm costs with modified mint for given version\n    //\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch);\n\n    let mut upgrade_request = make_upgrade_request(new_protocol_version).build();\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    // first, store payment contract for v2.0.0\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORED_PAYMENT_CONTRACT_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        &format!(\"{}_stored.wasm\", DO_NOTHING_NAME),\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // store both contracts\n    builder.exec(exec_request_1).expect_success().commit();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    // query both stored contracts by their named keys\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n    let test_payment_stored_hash = default_account\n        .named_keys()\n        .get(STORED_PAYMENT_CONTRACT_HASH_NAME)\n        .expect(\"standard_payment should be present in named keys\")\n        .into_entity_hash_addr()\n        .expect(\"standard_payment named key should be hash\");\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(\n            DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME,\n            Some(INITIAL_VERSION),\n            ENTRY_FUNCTION_NAME,\n            RuntimeArgs::new(),\n        )\n        .with_stored_payment_hash(\n            test_payment_stored_hash.into(),\n            PAY_ENTRYPOINT,\n            runtime_args! { ARG_AMOUNT => payment_purse_amount },\n        )\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .clear_results()\n        .exec(exec_request_stored_payment)\n        .expect_failure();\n\n    let error = builder.get_error().unwrap();\n\n    assert_matches!(error, Error::Exec(ExecError::ForgedReference(_)))\n}\n\n// We are currently not enforcing major version compliance to permit optimistic retro-compatibility\n//  if we start enforcing this in the future, the following tests should be restored and patched up\n//  to whatever the relevant protocol versions are at that time.\n// #[ignore]\n// #[test]\n// fn should_fail_payment_stored_at_hash_with_incompatible_major_version() {\n//     let payment_purse_amount = *DEFAULT_PAYMENT;\n//\n//     let default_account_hash = *DEFAULT_ACCOUNT_ADDR;\n//     // first, store payment contract\n//     let exec_request = ExecuteRequestBuilder::standard(\n//         default_account_hash,\n//         STORED_PAYMENT_CONTRACT_NAME,\n//         RuntimeArgs::default(),\n//     )\n//     .build();\n//\n//     let mut builder = LmdbWasmTestBuilder::default();\n//     builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n//\n//     builder.exec(exec_request).expect_success().commit();\n//\n//     let default_account = builder\n//         .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n//         .expect(\"must have contract associated with default account\");\n//\n//     let stored_payment_key = *default_account\n//         .named_keys()\n//         .get(STORED_PAYMENT_CONTRACT_HASH_NAME)\n//         .expect(\"should have stored payment key\");\n//\n//     let _stored_payment = builder\n//         .query(None, stored_payment_key, &[])\n//         .expect(\"should have stored payement\");\n//\n//     let stored_payment_contract_hash = stored_payment_key\n//         .into_entity_hash_addr()\n//         .expect(\"standard_payment should be an uref\");\n//\n//     //\n//     // upgrade with new wasm costs with modified mint for given version to avoid missing wasm\n// costs     // table that's queried early\n//     //\n//     let sem_ver = PROTOCOL_VERSION.value();\n//     let new_protocol_version =\n//         ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch);\n//\n//     let mut upgrade_request = make_upgrade_request(new_protocol_version).build();\n//\n//     builder\n//         .upgrade(&mut upgrade_request)\n//         .expect_upgrade_success();\n//\n//     // next make another deploy that USES stored payment logic\n//     let deploy_item = DeployItemBuilder::new()\n//         .with_address(*DEFAULT_ACCOUNT_ADDR)\n//         .with_session_code(format!(\"{}.wasm\", DO_NOTHING_NAME), RuntimeArgs::default())\n//         .with_stored_payment_hash(\n//             stored_payment_contract_hash.into(),\n//             PAY_ENTRYPOINT,\n//             runtime_args! { ARG_AMOUNT => payment_purse_amount },\n//         )\n//         .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n//         .with_deploy_hash([2; 32])\n//         .build();\n//\n//     let exec_request_stored_payment =\n// ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n//\n//     builder.exec(exec_request_stored_payment).commit();\n//\n//     assert!(\n//         builder.is_error(),\n//         \"calling a payment module with increased major protocol version should be error\"\n//     );\n//\n//     let expected_error = Error::Exec(ExecError::IncompatibleProtocolMajorVersion {\n//         expected: 3,\n//         actual: 2,\n//     });\n//\n//     builder.assert_error(expected_error);\n// }\n\n// #[ignore]\n// #[test]\n// fn should_fail_session_stored_at_named_key_with_missing_new_major_version() {\n//     let payment_purse_amount = *DEFAULT_PAYMENT;\n//\n//     let mut builder = LmdbWasmTestBuilder::default();\n//     builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n//\n//     // first, store payment contract for v1.0.0\n//     let exec_request_1 = ExecuteRequestBuilder::standard(\n//         *DEFAULT_ACCOUNT_ADDR,\n//         &format!(\"{}_stored.wasm\", DO_NOTHING_NAME),\n//         RuntimeArgs::default(),\n//     )\n//     .build();\n//     let exec_request_2 = ExecuteRequestBuilder::standard(\n//         *DEFAULT_ACCOUNT_ADDR,\n//         STORED_PAYMENT_CONTRACT_NAME,\n//         RuntimeArgs::default(),\n//     )\n//     .build();\n//\n//     builder.exec(exec_request_1).commit();\n//     builder.exec(exec_request_2).commit();\n//\n//     let default_account = builder\n//         .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n//         .expect(\"must have contract\");\n//     assert!(\n//         default_account\n//             .named_keys()\n//             .contains(DO_NOTHING_CONTRACT_HASH_NAME),\n//         \"do_nothing should be present in named keys\"\n//     );\n//\n//     //\n//     // upgrade with new wasm costs with modified mint for given version\n//     //\n//     let sem_ver = PROTOCOL_VERSION.value();\n//     let new_protocol_version =\n//         ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch);\n//\n//     let mut upgrade_request = make_upgrade_request(new_protocol_version).build();\n//\n//     builder\n//         .upgrade(&mut upgrade_request)\n//         .expect_upgrade_success();\n//\n//     // Call stored session code\n//\n//     let deploy_item = DeployItemBuilder::new()\n//         .with_address(*DEFAULT_ACCOUNT_ADDR)\n//         .with_stored_versioned_contract_by_name(\n//             DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME,\n//             Some(INITIAL_VERSION),\n//             ENTRY_FUNCTION_NAME,\n//             RuntimeArgs::new(),\n//         )\n//         .with_stored_versioned_payment_contract_by_name(\n//             STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME,\n//             Some(INITIAL_VERSION),\n//             PAY_ENTRYPOINT,\n//             runtime_args! {\n//                 ARG_AMOUNT => payment_purse_amount,\n//             },\n//         )\n//         .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n//         .with_deploy_hash([2; 32])\n//         .build();\n//\n//     let exec_request_stored_payment =\n// ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n//\n//     builder.exec(exec_request_stored_payment).commit();\n//\n//     assert!(\n//         builder.is_error(),\n//         \"calling a session module with increased major protocol version should be error\",\n//     );\n//\n//     let entity_version_key = EntityVersionKey::new(3, 1);\n//\n//     let expected_error = Error::Exec(ExecError::MissingEntityVersion(entity_version_key));\n//\n//     builder.assert_error(expected_error);\n// }\n//\n// #[ignore]\n// #[test]\n// fn should_fail_session_stored_at_hash_with_incompatible_major_version() {\n//     let payment_purse_amount = *DEFAULT_PAYMENT;\n//\n//     let mut builder = LmdbWasmTestBuilder::default();\n//     builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n//\n//     // first, store payment contract for v1.0.0\n//     let exec_request_1 = ExecuteRequestBuilder::standard(\n//         *DEFAULT_ACCOUNT_ADDR,\n//         &format!(\"{}_stored.wasm\", DO_NOTHING_NAME),\n//         RuntimeArgs::default(),\n//     )\n//     .build();\n//\n//     let mut builder = LmdbWasmTestBuilder::default();\n//     builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n//\n//     builder.exec(exec_request_1).commit();\n//\n//     let exec_request = ExecuteRequestBuilder::standard(\n//         *DEFAULT_ACCOUNT_ADDR,\n//         STORED_PAYMENT_CONTRACT_NAME,\n//         RuntimeArgs::default(),\n//     )\n//     .build();\n//\n//     builder.exec(exec_request).commit();\n//\n//     //\n//     // upgrade with new wasm costs with modified mint for given version\n//     //\n//     let sem_ver = PROTOCOL_VERSION.value();\n//     let new_protocol_version =\n//         ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch);\n//\n//     let mut upgrade_request = make_upgrade_request(new_protocol_version).build();\n//\n//     builder\n//         .upgrade(&mut upgrade_request)\n//         .expect_upgrade_success();\n//\n//     // Call stored session code\n//\n//     // query both stored contracts by their named keys\n//     let default_account = builder\n//         .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n//         .expect(\"must have contract\");\n//     let test_payment_stored_hash = default_account\n//         .named_keys()\n//         .get(STORED_PAYMENT_CONTRACT_HASH_NAME)\n//         .expect(\"standard_payment should be present in named keys\")\n//         .into_entity_hash_addr()\n//         .expect(\"standard_payment named key should be hash\");\n//\n//     let deploy_item = DeployItemBuilder::new()\n//         .with_address(*DEFAULT_ACCOUNT_ADDR)\n//         .with_stored_session_named_key(\n//             DO_NOTHING_CONTRACT_HASH_NAME,\n//             ENTRY_FUNCTION_NAME,\n//             RuntimeArgs::new(),\n//         )\n//         .with_stored_payment_hash(\n//             test_payment_stored_hash.into(),\n//             PAY_ENTRYPOINT,\n//             runtime_args! { ARG_AMOUNT => payment_purse_amount },\n//         )\n//         .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n//         .with_deploy_hash([2; 32])\n//         .build();\n//\n//     let exec_request_stored_payment =\n// ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n//\n//     builder.exec(exec_request_stored_payment).commit();\n//\n//     assert!(\n//         builder.is_error(),\n//         \"calling a session module with increased major protocol version should be error\",\n//     );\n//     let error = builder.get_error().expect(\"must have error\");\n//     assert!(\n//         matches!(\n//             error,\n//             Error::Exec(ExecError::IncompatibleProtocolMajorVersion {\n//                 expected: 3,\n//                 actual: 2\n//             }),\n//         ),\n//         \"Error does not match: {:?}\",\n//         error\n//     )\n// }\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/explorer/faucet.rs",
    "content": "use num_rational::Ratio;\n\nuse casper_execution_engine::{engine_state, execution::ExecError};\n\nuse casper_engine_test_support::{\n    ChainspecConfig, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    TransferRequestBuilder, CHAINSPEC_SYMLINK, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    account::AccountHash, runtime_args, ApiError, FeeHandling, Key, PricingHandling, PublicKey,\n    RefundHandling, SecretKey, Transfer, U512,\n};\n\n// test constants.\nuse super::{\n    faucet_test_helpers::{\n        get_faucet_entity_hash, get_faucet_purse, query_stored_value, FaucetDeployHelper,\n        FaucetInstallSessionRequestBuilder, FundAccountRequestBuilder,\n    },\n    ARG_AMOUNT, ARG_AVAILABLE_AMOUNT, ARG_DISTRIBUTIONS_PER_INTERVAL, ARG_ID, ARG_TARGET,\n    ARG_TIME_INTERVAL, AUTHORIZED_ACCOUNT_NAMED_KEY, AVAILABLE_AMOUNT_NAMED_KEY,\n    DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY, ENTRY_POINT_FAUCET, ENTRY_POINT_SET_VARIABLES,\n    FAUCET_CONTRACT_NAMED_KEY, FAUCET_FUND_AMOUNT, FAUCET_ID, FAUCET_INSTALLER_SESSION,\n    FAUCET_PURSE_NAMED_KEY, FAUCET_TIME_INTERVAL, INSTALLER_ACCOUNT, INSTALLER_FUND_AMOUNT,\n    INSTALLER_NAMED_KEY, LAST_DISTRIBUTION_TIME_NAMED_KEY, REMAINING_REQUESTS_NAMED_KEY,\n    TIME_INTERVAL_NAMED_KEY, TWO_HOURS_AS_MILLIS,\n};\n\n/// User error variant defined in the faucet contract.\nconst FAUCET_CALL_BY_USER_WITH_AUTHORIZED_ACCOUNT_SET: u16 = 25;\n\n#[ignore]\n#[test]\nfn should_install_faucet_contract() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let fund_installer_account_request = FundAccountRequestBuilder::new()\n        .with_target_account(INSTALLER_ACCOUNT)\n        .with_fund_amount(U512::from(INSTALLER_FUND_AMOUNT))\n        .build();\n\n    builder\n        .transfer_and_commit(fund_installer_account_request)\n        .expect_success();\n\n    builder\n        .exec(FaucetInstallSessionRequestBuilder::new().build())\n        .expect_success()\n        .commit();\n\n    let installer_named_keys = builder\n        .get_entity_with_named_keys_by_account_hash(INSTALLER_ACCOUNT)\n        .expect(\"must have entity\")\n        .named_keys()\n        .clone();\n\n    assert!(installer_named_keys\n        .get(&format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID))\n        .is_some());\n\n    let faucet_purse_id = format!(\"{}_{}\", FAUCET_PURSE_NAMED_KEY, FAUCET_ID);\n    assert!(installer_named_keys.get(&faucet_purse_id).is_some());\n\n    let faucet_named_key = Key::Hash(\n        installer_named_keys\n            .get(&format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID))\n            .expect(\"failed to find faucet named key\")\n            .into_entity_hash_addr()\n            .expect(\"must get hash addr\"),\n    );\n\n    // check installer is set.\n    builder\n        .query(None, faucet_named_key, &[INSTALLER_NAMED_KEY.to_string()])\n        .expect(\"failed to find installer named key\");\n\n    // check time interval\n    builder\n        .query(\n            None,\n            faucet_named_key,\n            &[TIME_INTERVAL_NAMED_KEY.to_string()],\n        )\n        .expect(\"failed to find time interval named key\");\n\n    // check last distribution time\n    builder\n        .query(\n            None,\n            faucet_named_key,\n            &[LAST_DISTRIBUTION_TIME_NAMED_KEY.to_string()],\n        )\n        .expect(\"failed to find last distribution named key\");\n\n    // check faucet purse\n    builder\n        .query(\n            None,\n            faucet_named_key,\n            &[FAUCET_PURSE_NAMED_KEY.to_string()],\n        )\n        .expect(\"failed to find faucet purse named key\");\n\n    // check available amount\n    builder\n        .query(\n            None,\n            faucet_named_key,\n            &[AVAILABLE_AMOUNT_NAMED_KEY.to_string()],\n        )\n        .expect(\"failed to find available amount named key\");\n\n    // check remaining requests\n    builder\n        .query(\n            None,\n            faucet_named_key,\n            &[REMAINING_REQUESTS_NAMED_KEY.to_string()],\n        )\n        .expect(\"failed to find remaining requests named key\");\n\n    builder\n        .query(\n            None,\n            faucet_named_key,\n            &[AUTHORIZED_ACCOUNT_NAMED_KEY.to_string()],\n        )\n        .expect(\"failed to find authorized account named key\");\n}\n\n#[ignore]\n#[test]\nfn should_allow_installer_to_set_variables() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut helper = FaucetDeployHelper::new()\n        .with_installer_account(INSTALLER_ACCOUNT)\n        .with_installer_fund_amount(U512::from(INSTALLER_FUND_AMOUNT))\n        .with_faucet_purse_fund_amount(U512::from(FAUCET_FUND_AMOUNT))\n        .with_faucet_available_amount(Some(U512::from(FAUCET_FUND_AMOUNT)))\n        .with_faucet_distributions_per_interval(Some(2))\n        .with_faucet_time_interval(Some(FAUCET_TIME_INTERVAL));\n\n    builder\n        .transfer_and_commit(helper.fund_installer_request())\n        .expect_success();\n\n    builder\n        .exec(helper.faucet_install_request())\n        .expect_success()\n        .commit();\n\n    let faucet_contract_hash = helper.query_and_set_faucet_contract_hash(&builder);\n    let faucet_entity_key = Key::Hash(faucet_contract_hash.value());\n\n    assert_eq!(\n        helper.query_faucet_purse_balance(&builder),\n        helper.faucet_purse_fund_amount()\n    );\n\n    let available_amount: U512 = query_stored_value(\n        &mut builder,\n        faucet_entity_key,\n        vec![AVAILABLE_AMOUNT_NAMED_KEY.to_string()],\n    );\n\n    // the available amount per interval will be zero until the installer calls\n    // the set_variable entrypoint to finish setup.\n    assert_eq!(available_amount, U512::zero());\n\n    let time_interval: u64 = query_stored_value(\n        &mut builder,\n        faucet_entity_key,\n        vec![TIME_INTERVAL_NAMED_KEY.to_string()],\n    );\n\n    // defaults to around two hours.\n    assert_eq!(time_interval, TWO_HOURS_AS_MILLIS);\n\n    let distributions_per_interval: u64 = query_stored_value(\n        &mut builder,\n        faucet_entity_key,\n        vec![DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY.to_string()],\n    );\n\n    assert_eq!(distributions_per_interval, 0u64);\n\n    builder\n        .exec(helper.faucet_config_request())\n        .expect_success()\n        .commit();\n\n    let available_amount: U512 = query_stored_value(\n        &mut builder,\n        faucet_entity_key,\n        vec![AVAILABLE_AMOUNT_NAMED_KEY.to_string()],\n    );\n\n    assert_eq!(available_amount, helper.faucet_purse_fund_amount());\n\n    let time_interval: u64 = query_stored_value(\n        &mut builder,\n        faucet_entity_key,\n        vec![TIME_INTERVAL_NAMED_KEY.to_string()],\n    );\n\n    assert_eq!(time_interval, helper.faucet_time_interval().unwrap());\n\n    let distributions_per_interval: u64 = query_stored_value(\n        &mut builder,\n        faucet_entity_key,\n        vec![DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY.to_string()],\n    );\n\n    assert_eq!(\n        distributions_per_interval,\n        helper.faucet_distributions_per_interval().unwrap()\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fund_new_account() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let faucet_purse_fund_amount = U512::from(9_000_000_000u64);\n    let faucet_distributions_per_interval = 3;\n\n    let mut helper = FaucetDeployHelper::default()\n        .with_faucet_purse_fund_amount(faucet_purse_fund_amount)\n        .with_faucet_available_amount(Some(faucet_purse_fund_amount))\n        .with_faucet_distributions_per_interval(Some(faucet_distributions_per_interval));\n\n    builder\n        .transfer_and_commit(helper.fund_installer_request())\n        .expect_success();\n\n    builder\n        .exec(helper.faucet_install_request())\n        .expect_success()\n        .commit();\n\n    helper.query_and_set_faucet_contract_hash(&builder);\n\n    builder\n        .exec(helper.faucet_config_request())\n        .expect_success()\n        .commit();\n\n    let new_account = AccountHash::new([7u8; 32]);\n\n    let new_account_fund_amount = U512::from(5_000_000_000u64);\n    let fund_new_account_request = helper\n        .new_faucet_fund_request_builder()\n        .with_installer_account(helper.installer_account())\n        .with_arg_target(new_account)\n        .with_arg_fund_amount(new_account_fund_amount)\n        .build();\n\n    let faucet_purse_uref = helper.query_faucet_purse(&builder);\n    let faucet_purse_balance_before = builder.get_purse_balance(faucet_purse_uref);\n\n    builder\n        .exec(fund_new_account_request)\n        .expect_success()\n        .commit();\n\n    let faucet_purse_balance_after = builder.get_purse_balance(faucet_purse_uref);\n\n    assert_eq!(\n        faucet_purse_balance_after,\n        faucet_purse_balance_before - new_account_fund_amount\n    );\n\n    let new_account_actual_purse_balance = builder.get_purse_balance(\n        builder\n            .get_expected_addressable_entity_by_account_hash(new_account)\n            .main_purse(),\n    );\n\n    assert_eq!(new_account_actual_purse_balance, new_account_fund_amount);\n}\n\n#[ignore]\n#[test]\nfn should_fund_existing_account() {\n    let user_account = AccountHash::new([7u8; 32]);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let faucet_purse_fund_amount = U512::from(9_000_000_000u64);\n    let faucet_distributions_per_interval = 3;\n\n    let mut helper = FaucetDeployHelper::default()\n        .with_faucet_purse_fund_amount(faucet_purse_fund_amount)\n        .with_faucet_available_amount(Some(faucet_purse_fund_amount))\n        .with_faucet_distributions_per_interval(Some(faucet_distributions_per_interval));\n\n    builder\n        .transfer_and_commit(helper.fund_installer_request())\n        .expect_success();\n\n    let user_account_initial_balance = U512::from(15_000_000_000u64);\n\n    let fund_user_request = FundAccountRequestBuilder::new()\n        .with_target_account(user_account)\n        .with_fund_amount(user_account_initial_balance)\n        .build();\n\n    builder\n        .transfer_and_commit(fund_user_request)\n        .expect_success();\n\n    builder\n        .exec(helper.faucet_install_request())\n        .expect_success()\n        .commit();\n\n    helper.query_and_set_faucet_contract_hash(&builder);\n\n    builder\n        .exec(helper.faucet_config_request())\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(\n            helper\n                .new_faucet_fund_request_builder()\n                .with_user_account(user_account)\n                .with_payment_amount(user_account_initial_balance)\n                .build(),\n        )\n        .expect_success()\n        .commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"must have last exec result\");\n    let transfer = exec_result.transfers().first().expect(\"must have transfer\");\n\n    let one_distribution = Ratio::new(\n        faucet_purse_fund_amount,\n        faucet_distributions_per_interval.into(),\n    )\n    .to_integer();\n    assert!(\n        matches!(transfer, Transfer::V2(v2) if v2.amount == one_distribution),\n        \"{:?}\",\n        transfer\n    );\n}\n\n#[ignore]\n#[test]\nfn should_allow_installer_to_fund_freely() {\n    let installer_account = AccountHash::new([1u8; 32]);\n    let user_account = AccountHash::new([2u8; 32]);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let faucet_fund_amount = U512::from(200_000_000_000u64);\n    let half_of_faucet_fund_amount = faucet_fund_amount / 2;\n    let assigned_distributions_per_time_interval = 2u64;\n    let mut helper = FaucetDeployHelper::new()\n        .with_installer_account(installer_account)\n        .with_installer_fund_amount(INSTALLER_FUND_AMOUNT.into())\n        .with_faucet_purse_fund_amount(faucet_fund_amount)\n        .with_faucet_available_amount(Some(half_of_faucet_fund_amount))\n        .with_faucet_distributions_per_interval(Some(assigned_distributions_per_time_interval))\n        .with_faucet_time_interval(Some(10_000u64));\n\n    builder\n        .transfer_and_commit(helper.fund_installer_request())\n        .expect_success();\n\n    builder\n        .exec(helper.faucet_install_request())\n        .expect_success()\n        .commit();\n\n    helper.query_and_set_faucet_contract_hash(&builder);\n\n    let faucet_contract_hash = get_faucet_entity_hash(&builder, installer_account);\n    let faucet_entity_key = Key::Hash(faucet_contract_hash.value());\n    let faucet_purse = get_faucet_purse(&builder, installer_account);\n\n    let faucet_purse_balance = builder.get_purse_balance(faucet_purse);\n    assert_eq!(faucet_purse_balance, faucet_fund_amount);\n\n    let available_amount = query_stored_value::<U512>(\n        &mut builder,\n        faucet_entity_key,\n        [AVAILABLE_AMOUNT_NAMED_KEY.to_string()].into(),\n    );\n\n    // the available amount per interval should be zero until the installer calls\n    // the set_variable entrypoint to finish setup.\n    assert_eq!(available_amount, U512::zero());\n\n    builder\n        .exec(helper.faucet_config_request())\n        .expect_success()\n        .commit();\n\n    let available_amount = query_stored_value::<U512>(\n        &mut builder,\n        faucet_entity_key,\n        [AVAILABLE_AMOUNT_NAMED_KEY.to_string()].into(),\n    );\n\n    assert_eq!(available_amount, half_of_faucet_fund_amount);\n\n    let user_fund_amount = U512::from(3_000_000_000u64);\n    // This would only allow other callers to fund twice in this interval,\n    // but the installer can fund as many times as they want.\n    let num_funds = 3;\n\n    for _ in 0..num_funds {\n        let faucet_call_by_installer = helper\n            .new_faucet_fund_request_builder()\n            .with_installer_account(helper.installer_account())\n            .with_arg_fund_amount(user_fund_amount)\n            .with_arg_target(user_account)\n            .build();\n\n        builder\n            .exec(faucet_call_by_installer)\n            .expect_success()\n            .commit();\n    }\n\n    let faucet_purse_balance = builder.get_purse_balance(faucet_purse);\n    assert_eq!(\n        faucet_purse_balance,\n        faucet_fund_amount - user_fund_amount * num_funds,\n        \"faucet purse balance must match expected amount after {} faucet calls\",\n        num_funds\n    );\n\n    // check the balance of the user's main purse\n    let user_main_purse_balance_after = builder.get_purse_balance(\n        builder\n            .get_expected_addressable_entity_by_account_hash(user_account)\n            .main_purse(),\n    );\n\n    assert_eq!(user_main_purse_balance_after, user_fund_amount * num_funds);\n}\n\n#[ignore]\n#[test]\nfn should_not_fund_if_zero_distributions_per_interval() {\n    let installer_account = AccountHash::new([1u8; 32]);\n    let user_account = AccountHash::new([2u8; 32]);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // Fund installer account\n    let fund_installer_account_request = FundAccountRequestBuilder::new()\n        .with_target_account(installer_account)\n        .with_fund_amount(INSTALLER_FUND_AMOUNT.into())\n        .build();\n\n    builder\n        .transfer_and_commit(fund_installer_account_request)\n        .expect_success();\n\n    let faucet_fund_amount = U512::from(400_000_000_000_000u64);\n\n    let installer_session_request = ExecuteRequestBuilder::standard(\n        installer_account,\n        FAUCET_INSTALLER_SESSION,\n        runtime_args! {ARG_ID => FAUCET_ID, ARG_AMOUNT => faucet_fund_amount},\n    )\n    .build();\n\n    builder\n        .exec(installer_session_request)\n        .expect_success()\n        .commit();\n\n    let installer_call_faucet_request = ExecuteRequestBuilder::contract_call_by_name(\n        installer_account,\n        &format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID),\n        ENTRY_POINT_FAUCET,\n        runtime_args! {ARG_TARGET => user_account},\n    )\n    .build();\n\n    builder\n        .exec(installer_call_faucet_request)\n        .expect_failure()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_allow_funding_by_an_authorized_account() {\n    let installer_account = AccountHash::new([1u8; 32]);\n\n    let authorized_account_public_key = {\n        let secret_key =\n            SecretKey::ed25519_from_bytes([2u8; 32]).expect(\"failed to construct secret key\");\n        PublicKey::from(&secret_key)\n    };\n\n    let authorized_account = authorized_account_public_key.to_account_hash();\n    let user_account = AccountHash::new([3u8; 32]);\n    let faucet_fund_amount = U512::from(400_000_000_000_000u64);\n    let half_of_faucet_fund_amount = faucet_fund_amount / 2;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut helper = FaucetDeployHelper::new()\n        .with_installer_account(installer_account)\n        .with_installer_fund_amount(INSTALLER_FUND_AMOUNT.into())\n        .with_faucet_purse_fund_amount(faucet_fund_amount)\n        .with_faucet_available_amount(Some(half_of_faucet_fund_amount))\n        .with_faucet_distributions_per_interval(Some(2u64))\n        .with_faucet_time_interval(Some(10_000u64));\n\n    builder\n        .transfer_and_commit(helper.fund_installer_request())\n        .expect_success();\n\n    builder\n        .exec(helper.faucet_install_request())\n        .expect_success()\n        .commit();\n\n    helper.query_and_set_faucet_contract_hash(&builder);\n\n    builder\n        .exec(helper.faucet_config_request())\n        .expect_success()\n        .commit();\n\n    let installer_named_keys = builder\n        .get_entity_with_named_keys_by_account_hash(installer_account)\n        .expect(\"must have entity\")\n        .named_keys()\n        .clone();\n\n    let faucet_named_key = installer_named_keys\n        .get(&format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID))\n        .expect(\"failed to find faucet named key\");\n\n    let hash = faucet_named_key.into_entity_hash().expect(\n        \"must convert to entity hash\\\n    \",\n    );\n    let key = Key::Hash(hash.value());\n\n    let maybe_authorized_account_public_key = builder\n        .query(None, key, &[AUTHORIZED_ACCOUNT_NAMED_KEY.to_string()])\n        .expect(\"failed to find authorized account named key\")\n        .as_cl_value()\n        .expect(\"failed to convert into cl value\")\n        .clone()\n        .into_t::<Option<PublicKey>>()\n        .expect(\"failed to convert into optional public key\");\n\n    assert_eq!(maybe_authorized_account_public_key, None::<PublicKey>);\n\n    let faucet_authorize_account_request = helper\n        .new_faucet_authorize_account_request_builder()\n        .with_authorized_user_public_key(Some(authorized_account_public_key.clone()))\n        .build();\n\n    builder\n        .exec(faucet_authorize_account_request)\n        .expect_success()\n        .commit();\n\n    let maybe_authorized_account_public_key = builder\n        .query(None, key, &[AUTHORIZED_ACCOUNT_NAMED_KEY.to_string()])\n        .expect(\"failed to find authorized account named key\")\n        .as_cl_value()\n        .expect(\"failed to convert into cl value\")\n        .clone()\n        .into_t::<Option<PublicKey>>()\n        .expect(\"failed to convert into optional public key\");\n\n    assert_eq!(\n        maybe_authorized_account_public_key,\n        Some(authorized_account_public_key.clone())\n    );\n\n    let authorized_account_fund_amount = U512::from(10_000_000_000u64);\n    let faucet_fund_authorized_account_by_installer_request = helper\n        .new_faucet_fund_request_builder()\n        .with_arg_fund_amount(authorized_account_fund_amount)\n        .with_arg_target(authorized_account_public_key.to_account_hash())\n        .build();\n\n    builder\n        .exec(faucet_fund_authorized_account_by_installer_request)\n        .expect_success()\n        .commit();\n\n    let user_fund_amount = U512::from(10_000_000_000u64);\n    let faucet_fund_user_by_authorized_account_request = helper\n        .new_faucet_fund_request_builder()\n        .with_authorized_account(authorized_account)\n        .with_arg_fund_amount(user_fund_amount)\n        .with_arg_target(user_account)\n        .with_payment_amount(user_fund_amount)\n        .build();\n\n    builder\n        .exec(faucet_fund_user_by_authorized_account_request)\n        .expect_success()\n        .commit();\n\n    let user_main_purse_balance_after = builder.get_purse_balance(\n        builder\n            .get_expected_addressable_entity_by_account_hash(user_account)\n            .main_purse(),\n    );\n    assert_eq!(user_main_purse_balance_after, user_fund_amount);\n\n    // A user cannot fund themselves if there is an authorized account.\n    let faucet_fund_by_user_request = helper\n        .new_faucet_fund_request_builder()\n        .with_user_account(user_account)\n        .with_payment_amount(user_fund_amount)\n        .build();\n\n    builder\n        .exec(faucet_fund_by_user_request)\n        .expect_failure()\n        .commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"failed to get exec results\");\n\n    let error = exec_result.error().unwrap();\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::User(\n                FAUCET_CALL_BY_USER_WITH_AUTHORIZED_ACCOUNT_SET\n            )))\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n\n#[ignore]\n#[test]\nfn faucet_costs() {\n    // This test will fail if execution costs vary.  The expected costs should not be updated\n    // without understanding why the cost has changed.  If the costs do change, it should be\n    // reflected in the \"Costs by Entry Point\" section of the faucet crate's README.md.\n    const EXPECTED_FAUCET_INSTALL_COST: u64 = 149_181_711_315;\n    const EXPECTED_FAUCET_INSTALL_COST_ALT: u64 = 149_230_872_143;\n\n    const EXPECTED_FAUCET_SET_VARIABLES_COST: u64 = 79_463_750;\n\n    const EXPECTED_FAUCET_CALL_BY_INSTALLER_COST: u64 = 2_652_633_308;\n\n    const EXPECTED_FAUCET_CALL_BY_USER_COST: u64 = 2_558_333_326;\n\n    let installer_account = AccountHash::new([1u8; 32]);\n    let user_account: AccountHash = AccountHash::new([2u8; 32]);\n\n    let chainspec = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK)\n        .expect(\"must build chainspec configuration\");\n    let chainspec_config = chainspec\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_pricing_handling(PricingHandling::Fixed);\n    LmdbWasmTestBuilder::new_temporary_with_config(chainspec_config);\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let fund_installer_account_request =\n        TransferRequestBuilder::new(INSTALLER_FUND_AMOUNT, installer_account).build();\n\n    builder\n        .transfer_and_commit(fund_installer_account_request)\n        .expect_success();\n\n    let faucet_fund_amount = U512::from(400_000_000_000_000u64);\n    let installer_session_request = ExecuteRequestBuilder::standard(\n        installer_account,\n        FAUCET_INSTALLER_SESSION,\n        runtime_args! {ARG_ID => FAUCET_ID, ARG_AMOUNT => faucet_fund_amount },\n    )\n    .build();\n\n    builder\n        .exec(installer_session_request)\n        .expect_success()\n        .commit();\n\n    let faucet_install_cost = builder.last_exec_gas_consumed();\n\n    let assigned_time_interval = 10_000u64;\n    let assigned_distributions_per_interval = 2u64;\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(installer_account)\n        .with_authorization_keys(&[installer_account])\n        .with_stored_session_named_key(\n            &format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID),\n            ENTRY_POINT_SET_VARIABLES,\n            runtime_args! {\n                ARG_AVAILABLE_AMOUNT => Some(faucet_fund_amount),\n                ARG_TIME_INTERVAL => Some(assigned_time_interval),\n                ARG_DISTRIBUTIONS_PER_INTERVAL => Some(assigned_distributions_per_interval)\n            },\n        )\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT})\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let installer_set_variable_request =\n        ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(installer_set_variable_request)\n        .expect_success()\n        .commit();\n\n    let faucet_set_variables_cost = builder.last_exec_gas_consumed();\n\n    let user_fund_amount = U512::from(10_000_000_000u64);\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(installer_account)\n        .with_authorization_keys(&[installer_account])\n        .with_stored_session_named_key(\n            &format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID),\n            ENTRY_POINT_FAUCET,\n            runtime_args! {ARG_TARGET => user_account, ARG_AMOUNT => user_fund_amount, ARG_ID => <Option<u64>>::None},\n        )\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT})\n        .with_deploy_hash([4; 32])\n        .build();\n\n    let faucet_call_by_installer = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(faucet_call_by_installer)\n        .expect_success()\n        .commit();\n\n    let faucet_call_by_installer_cost = builder.last_exec_gas_consumed();\n\n    let faucet_contract_hash = get_faucet_entity_hash(&builder, installer_account);\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(user_account)\n        .with_authorization_keys(&[user_account])\n        .with_stored_session_hash(\n            faucet_contract_hash,\n            ENTRY_POINT_FAUCET,\n            runtime_args! {ARG_TARGET => user_account, ARG_ID => <Option<u64>>::None},\n        )\n        .with_standard_payment(runtime_args! {ARG_AMOUNT => user_fund_amount})\n        .with_deploy_hash([4; 32])\n        .build();\n\n    let faucet_call_by_user_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(faucet_call_by_user_request)\n        .expect_success()\n        .commit();\n\n    let faucet_call_by_user_cost = builder.last_exec_gas_consumed();\n\n    let mut costs_as_expected = true;\n    let cost_64 = faucet_install_cost.value().as_u64();\n    if cost_64 != EXPECTED_FAUCET_INSTALL_COST && cost_64 != EXPECTED_FAUCET_INSTALL_COST_ALT {\n        costs_as_expected = false;\n        eprintln!(\n            \"faucet_install_cost wrong: expected: {}, got: {}\",\n            EXPECTED_FAUCET_INSTALL_COST,\n            faucet_install_cost.value().as_u64()\n        );\n    }\n\n    if faucet_set_variables_cost.value().as_u64() != EXPECTED_FAUCET_SET_VARIABLES_COST {\n        costs_as_expected = false;\n        eprintln!(\n            \"faucet_set_variables_cost wrong: expected: {}, got: {}\",\n            EXPECTED_FAUCET_SET_VARIABLES_COST,\n            faucet_set_variables_cost.value().as_u64()\n        );\n    }\n\n    if faucet_call_by_installer_cost.value().as_u64() != EXPECTED_FAUCET_CALL_BY_INSTALLER_COST {\n        costs_as_expected = false;\n        eprintln!(\n            \"faucet_call_by_installer_cost wrong: expected: {}, got: {}\",\n            EXPECTED_FAUCET_CALL_BY_INSTALLER_COST,\n            faucet_call_by_installer_cost.value().as_u64()\n        );\n    }\n\n    if faucet_call_by_user_cost.value().as_u64() != EXPECTED_FAUCET_CALL_BY_USER_COST {\n        costs_as_expected = false;\n        eprintln!(\n            \"faucet_call_by_user_cost wrong: expected: {}, got: {}\",\n            EXPECTED_FAUCET_CALL_BY_USER_COST,\n            faucet_call_by_user_cost.value().as_u64()\n        );\n    }\n    assert!(costs_as_expected);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/explorer/faucet_test_helpers.rs",
    "content": "use rand::Rng;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, EntityWithNamedKeys, ExecuteRequest, ExecuteRequestBuilder,\n    LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_PAYMENT,\n};\nuse casper_storage::data_access_layer::TransferRequest;\nuse casper_types::{\n    account::AccountHash, bytesrepr::FromBytes, runtime_args, AddressableEntityHash, CLTyped, Key,\n    PublicKey, URef, U512,\n};\n\nuse super::{\n    ARG_AMOUNT, ARG_AVAILABLE_AMOUNT, ARG_DISTRIBUTIONS_PER_INTERVAL, ARG_ID, ARG_TARGET,\n    ARG_TIME_INTERVAL, ENTRY_POINT_AUTHORIZE_TO, ENTRY_POINT_FAUCET, ENTRY_POINT_SET_VARIABLES,\n    FAUCET_CONTRACT_NAMED_KEY, FAUCET_FUND_AMOUNT, FAUCET_ID, FAUCET_INSTALLER_SESSION,\n    FAUCET_PURSE_NAMED_KEY, INSTALLER_ACCOUNT, INSTALLER_FUND_AMOUNT,\n};\n\n#[derive(Clone, Copy, Debug)]\npub struct FundAccountRequestBuilder {\n    target_account: AccountHash,\n    fund_amount: U512,\n    fund_id: Option<u64>,\n}\n\nimpl FundAccountRequestBuilder {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn with_target_account(mut self, account_hash: AccountHash) -> Self {\n        self.target_account = account_hash;\n        self\n    }\n\n    pub fn with_fund_amount(mut self, fund_amount: U512) -> Self {\n        self.fund_amount = fund_amount;\n        self\n    }\n\n    pub fn with_fund_id(mut self, fund_id: Option<u64>) -> Self {\n        self.fund_id = fund_id;\n        self\n    }\n\n    pub fn build(&self) -> TransferRequest {\n        let mut builder = TransferRequestBuilder::new(self.fund_amount, self.target_account);\n        if let Some(id) = self.fund_id {\n            builder = builder.with_transfer_id(id);\n        }\n        builder.build()\n    }\n}\n\nimpl Default for FundAccountRequestBuilder {\n    fn default() -> Self {\n        Self {\n            target_account: INSTALLER_ACCOUNT,\n            fund_amount: U512::from(INSTALLER_FUND_AMOUNT),\n            fund_id: None,\n        }\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct FaucetInstallSessionRequestBuilder {\n    installer_account: AccountHash,\n    faucet_installer_session: String,\n    faucet_id: u64,\n    faucet_fund_amount: U512,\n}\n\nimpl FaucetInstallSessionRequestBuilder {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self {\n        self.installer_account = installer_account;\n        self\n    }\n\n    pub fn with_faucet_installer_session(mut self, installer_session: &str) -> Self {\n        self.faucet_installer_session = installer_session.to_string();\n        self\n    }\n\n    pub fn with_faucet_id(mut self, faucet_id: u64) -> Self {\n        self.faucet_id = faucet_id;\n        self\n    }\n\n    pub fn with_faucet_fund_amount(mut self, faucet_fund_amount: U512) -> Self {\n        self.faucet_fund_amount = faucet_fund_amount;\n        self\n    }\n\n    pub fn build(&self) -> ExecuteRequest {\n        ExecuteRequestBuilder::standard(\n            self.installer_account,\n            &self.faucet_installer_session,\n            runtime_args! {\n                ARG_ID => self.faucet_id,\n                ARG_AMOUNT => self.faucet_fund_amount\n            },\n        )\n        .build()\n    }\n}\n\nimpl Default for FaucetInstallSessionRequestBuilder {\n    fn default() -> Self {\n        Self {\n            installer_account: INSTALLER_ACCOUNT,\n            faucet_installer_session: FAUCET_INSTALLER_SESSION.to_string(),\n            faucet_id: FAUCET_ID,\n            faucet_fund_amount: FAUCET_FUND_AMOUNT.into(),\n        }\n    }\n}\n\n#[derive(Debug, Copy, Clone)]\npub struct FaucetConfigRequestBuilder {\n    installer_account: AccountHash,\n    faucet_contract_hash: Option<AddressableEntityHash>,\n    available_amount: Option<U512>,\n    time_interval: Option<u64>,\n    distributions_per_interval: Option<u64>,\n}\n\nimpl FaucetConfigRequestBuilder {\n    pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self {\n        self.installer_account = installer_account;\n        self\n    }\n\n    pub fn with_faucet_contract_hash(mut self, contract_hash: AddressableEntityHash) -> Self {\n        self.faucet_contract_hash = Some(contract_hash);\n        self\n    }\n\n    pub fn with_available_amount(mut self, available_amount: Option<U512>) -> Self {\n        self.available_amount = available_amount;\n        self\n    }\n\n    pub fn with_time_interval(mut self, time_interval: Option<u64>) -> Self {\n        self.time_interval = time_interval;\n        self\n    }\n\n    pub fn with_distributions_per_interval(\n        mut self,\n        distributions_per_interval: Option<u64>,\n    ) -> Self {\n        self.distributions_per_interval = distributions_per_interval;\n        self\n    }\n\n    pub fn build(&self) -> ExecuteRequest {\n        ExecuteRequestBuilder::contract_call_by_hash(\n            self.installer_account,\n            self.faucet_contract_hash\n                .expect(\"must supply faucet contract hash\"),\n            ENTRY_POINT_SET_VARIABLES,\n            runtime_args! {\n                ARG_AVAILABLE_AMOUNT => self.available_amount,\n                ARG_TIME_INTERVAL => self.time_interval,\n                ARG_DISTRIBUTIONS_PER_INTERVAL => self.distributions_per_interval\n            },\n        )\n        .build()\n    }\n}\n\nimpl Default for FaucetConfigRequestBuilder {\n    fn default() -> Self {\n        Self {\n            installer_account: INSTALLER_ACCOUNT,\n            faucet_contract_hash: None,\n            available_amount: None,\n            time_interval: None,\n            distributions_per_interval: None,\n        }\n    }\n}\n\npub struct FaucetAuthorizeAccountRequestBuilder {\n    installer_account: AccountHash,\n    authorized_account_public_key: Option<PublicKey>,\n    faucet_contract_hash: Option<AddressableEntityHash>,\n}\n\nimpl FaucetAuthorizeAccountRequestBuilder {\n    pub fn new() -> FaucetAuthorizeAccountRequestBuilder {\n        FaucetAuthorizeAccountRequestBuilder::default()\n    }\n\n    pub fn with_faucet_contract_hash(\n        mut self,\n        faucet_contract_hash: Option<AddressableEntityHash>,\n    ) -> Self {\n        self.faucet_contract_hash = faucet_contract_hash;\n        self\n    }\n\n    pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self {\n        self.installer_account = installer_account;\n        self\n    }\n\n    pub fn with_authorized_user_public_key(\n        mut self,\n        authorized_account_public_key: Option<PublicKey>,\n    ) -> Self {\n        self.authorized_account_public_key = authorized_account_public_key;\n        self\n    }\n\n    pub fn build(self) -> ExecuteRequest {\n        ExecuteRequestBuilder::contract_call_by_hash(\n            self.installer_account,\n            self.faucet_contract_hash\n                .expect(\"must supply faucet contract hash\"),\n            ENTRY_POINT_AUTHORIZE_TO,\n            runtime_args! {ARG_TARGET => self.authorized_account_public_key},\n        )\n        .build()\n    }\n}\n\nimpl Default for FaucetAuthorizeAccountRequestBuilder {\n    fn default() -> Self {\n        Self {\n            installer_account: INSTALLER_ACCOUNT,\n            authorized_account_public_key: None,\n            faucet_contract_hash: None,\n        }\n    }\n}\n\nenum FaucetCallerAccount {\n    Installer(AccountHash),\n    Authorized(AccountHash),\n    User(AccountHash),\n}\n\nimpl FaucetCallerAccount {\n    pub fn account_hash(&self) -> AccountHash {\n        match self {\n            FaucetCallerAccount::Installer(account_hash)\n            | FaucetCallerAccount::Authorized(account_hash)\n            | FaucetCallerAccount::User(account_hash) => *account_hash,\n        }\n    }\n}\n\npub struct FaucetFundRequestBuilder {\n    faucet_contract_hash: Option<AddressableEntityHash>,\n    caller_account: FaucetCallerAccount,\n    arg_target: Option<AccountHash>,\n    arg_fund_amount: Option<U512>,\n    arg_id: Option<u64>,\n    payment_amount: U512,\n    block_time: Option<u64>,\n}\n\nimpl FaucetFundRequestBuilder {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self {\n        self.caller_account = FaucetCallerAccount::Installer(installer_account);\n        self\n    }\n\n    pub fn with_authorized_account(mut self, authorized_account: AccountHash) -> Self {\n        self.caller_account = FaucetCallerAccount::Authorized(authorized_account);\n        self\n    }\n\n    pub fn with_user_account(mut self, user_account: AccountHash) -> Self {\n        self.caller_account = FaucetCallerAccount::User(user_account);\n        self\n    }\n\n    pub fn with_arg_fund_amount(mut self, fund_amount: U512) -> Self {\n        self.arg_fund_amount = Some(fund_amount);\n        self\n    }\n\n    pub fn with_arg_target(mut self, target: AccountHash) -> Self {\n        self.arg_target = Some(target);\n        self\n    }\n\n    pub fn with_faucet_contract_hash(\n        mut self,\n        faucet_contract_hash: AddressableEntityHash,\n    ) -> Self {\n        self.faucet_contract_hash = Some(faucet_contract_hash);\n        self\n    }\n\n    pub fn with_payment_amount(mut self, payment_amount: U512) -> Self {\n        self.payment_amount = payment_amount;\n        self\n    }\n\n    pub fn build(self) -> ExecuteRequest {\n        let mut rng = rand::thread_rng();\n\n        let deploy_item = DeployItemBuilder::new()\n            .with_address(self.caller_account.account_hash())\n            .with_authorization_keys(&[self.caller_account.account_hash()])\n            .with_stored_session_hash(\n                self.faucet_contract_hash\n                    .expect(\"must supply faucet contract hash\"),\n                ENTRY_POINT_FAUCET,\n                match self.caller_account {\n                    FaucetCallerAccount::Installer(_)\n                    | FaucetCallerAccount::Authorized(_) => runtime_args! {\n                        ARG_TARGET => self.arg_target.expect(\"must supply arg target when calling as installer or authorized account\"),\n                        ARG_AMOUNT => self.arg_fund_amount.expect(\"must supply arg amount when calling as installer or authorized account\"),\n                        ARG_ID => self.arg_id\n                    },\n                    FaucetCallerAccount::User(_) => runtime_args! {\n                       ARG_ID => self.arg_id\n                    },\n                },\n            )\n            .with_standard_payment(runtime_args! {ARG_AMOUNT => self.payment_amount})\n            .with_deploy_hash(rng.gen())\n            .build();\n\n        match self.block_time {\n            Some(block_time) => ExecuteRequestBuilder::from_deploy_item(&deploy_item)\n                .with_block_time(block_time)\n                .build(),\n            None => ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(),\n        }\n    }\n}\n\nimpl Default for FaucetFundRequestBuilder {\n    fn default() -> Self {\n        Self {\n            arg_fund_amount: None,\n            payment_amount: *DEFAULT_PAYMENT,\n            faucet_contract_hash: None,\n            caller_account: FaucetCallerAccount::Installer(INSTALLER_ACCOUNT),\n            arg_target: None,\n            arg_id: None,\n            block_time: None,\n        }\n    }\n}\n\npub fn query_stored_value<T: CLTyped + FromBytes>(\n    builder: &mut LmdbWasmTestBuilder,\n    base_key: Key,\n    path: Vec<String>,\n) -> T {\n    builder\n        .query(None, base_key, &path)\n        .expect(\"must have stored value\")\n        .as_cl_value()\n        .cloned()\n        .expect(\"must have cl value\")\n        .into_t::<T>()\n        .expect(\"must get value\")\n}\n\npub fn get_faucet_entity_hash(\n    builder: &LmdbWasmTestBuilder,\n    installer_account: AccountHash,\n) -> AddressableEntityHash {\n    builder\n        .get_entity_with_named_keys_by_account_hash(installer_account)\n        .unwrap()\n        .named_keys()\n        .get(&format!(\"{}_{}\", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID))\n        .cloned()\n        .and_then(Key::into_entity_hash_addr)\n        .map(AddressableEntityHash::new)\n        .expect(\"failed to find faucet contract\")\n}\n\npub fn get_faucet_entity(\n    builder: &LmdbWasmTestBuilder,\n    installer_account: AccountHash,\n) -> EntityWithNamedKeys {\n    builder\n        .get_entity_with_named_keys_by_entity_hash(get_faucet_entity_hash(\n            builder,\n            installer_account,\n        ))\n        .expect(\"failed to find faucet contract\")\n}\n\npub fn get_faucet_purse(builder: &LmdbWasmTestBuilder, installer_account: AccountHash) -> URef {\n    get_faucet_entity(builder, installer_account)\n        .named_keys()\n        .get(FAUCET_PURSE_NAMED_KEY)\n        .cloned()\n        .and_then(Key::into_uref)\n        .expect(\"failed to find faucet purse\")\n}\n\npub struct FaucetDeployHelper {\n    installer_account: AccountHash,\n    installer_fund_amount: U512,\n    installer_fund_id: Option<u64>,\n    authorized_user_public_key: Option<PublicKey>,\n    faucet_purse_fund_amount: U512,\n    faucet_installer_session: String,\n    faucet_id: u64,\n    faucet_contract_hash: Option<AddressableEntityHash>,\n    faucet_distributions_per_interval: Option<u64>,\n    faucet_available_amount: Option<U512>,\n    faucet_time_interval: Option<u64>,\n    fund_account_request_builder: FundAccountRequestBuilder,\n    pub faucet_install_session_request_builder: FaucetInstallSessionRequestBuilder,\n    pub faucet_config_request_builder: FaucetConfigRequestBuilder,\n}\n\nimpl FaucetDeployHelper {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn installer_account(&self) -> AccountHash {\n        self.installer_account\n    }\n\n    pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self {\n        self.installer_account = installer_account;\n        self\n    }\n\n    pub fn with_installer_fund_amount(mut self, installer_fund_amount: U512) -> Self {\n        self.installer_fund_amount = installer_fund_amount;\n        self\n    }\n\n    pub fn with_faucet_purse_fund_amount(mut self, faucet_purse_fund_amount: U512) -> Self {\n        self.faucet_purse_fund_amount = faucet_purse_fund_amount;\n        self\n    }\n\n    pub fn with_faucet_available_amount(mut self, available_amount: Option<U512>) -> Self {\n        self.faucet_available_amount = available_amount;\n        self\n    }\n\n    pub fn with_faucet_distributions_per_interval(\n        mut self,\n        distributions_per_interval: Option<u64>,\n    ) -> Self {\n        self.faucet_distributions_per_interval = distributions_per_interval;\n        self\n    }\n\n    pub fn with_faucet_time_interval(mut self, time_interval_ms: Option<u64>) -> Self {\n        self.faucet_time_interval = time_interval_ms;\n        self\n    }\n\n    pub fn query_and_set_faucet_contract_hash(\n        &mut self,\n        builder: &LmdbWasmTestBuilder,\n    ) -> AddressableEntityHash {\n        let contract_hash = get_faucet_entity_hash(builder, self.installer_account());\n        self.faucet_contract_hash = Some(contract_hash);\n\n        contract_hash\n    }\n\n    pub fn query_faucet_purse(&self, builder: &LmdbWasmTestBuilder) -> URef {\n        get_faucet_purse(builder, self.installer_account())\n    }\n\n    pub fn query_faucet_purse_balance(&self, builder: &LmdbWasmTestBuilder) -> U512 {\n        let faucet_purse = self.query_faucet_purse(builder);\n        builder.get_purse_balance(faucet_purse)\n    }\n\n    pub fn faucet_purse_fund_amount(&self) -> U512 {\n        self.faucet_purse_fund_amount\n    }\n\n    pub fn faucet_contract_hash(&self) -> Option<AddressableEntityHash> {\n        self.faucet_contract_hash\n    }\n\n    pub fn faucet_distributions_per_interval(&self) -> Option<u64> {\n        self.faucet_distributions_per_interval\n    }\n\n    pub fn faucet_time_interval(&self) -> Option<u64> {\n        self.faucet_time_interval\n    }\n\n    pub fn fund_installer_request(&self) -> TransferRequest {\n        self.fund_account_request_builder\n            .with_target_account(self.installer_account)\n            .with_fund_amount(self.installer_fund_amount)\n            .with_fund_id(self.installer_fund_id)\n            .build()\n    }\n\n    pub fn faucet_install_request(&self) -> ExecuteRequest {\n        self.faucet_install_session_request_builder\n            .clone()\n            .with_installer_account(self.installer_account)\n            .with_faucet_id(self.faucet_id)\n            .with_faucet_fund_amount(self.faucet_purse_fund_amount)\n            .with_faucet_installer_session(&self.faucet_installer_session)\n            .build()\n    }\n\n    pub fn faucet_config_request(&self) -> ExecuteRequest {\n        self.faucet_config_request_builder\n            .with_installer_account(self.installer_account())\n            .with_faucet_contract_hash(\n                self.faucet_contract_hash()\n                    .expect(\"must supply faucet contract hash\"),\n            )\n            .with_distributions_per_interval(self.faucet_distributions_per_interval)\n            .with_available_amount(self.faucet_available_amount)\n            .with_time_interval(self.faucet_time_interval)\n            .build()\n    }\n\n    pub fn new_faucet_fund_request_builder(&self) -> FaucetFundRequestBuilder {\n        FaucetFundRequestBuilder::new().with_faucet_contract_hash(\n            self.faucet_contract_hash()\n                .expect(\"must supply faucet contract hash\"),\n        )\n    }\n\n    pub fn new_faucet_authorize_account_request_builder(\n        &self,\n    ) -> FaucetAuthorizeAccountRequestBuilder {\n        FaucetAuthorizeAccountRequestBuilder::new()\n            .with_installer_account(self.installer_account)\n            .with_authorized_user_public_key(self.authorized_user_public_key.clone())\n            .with_faucet_contract_hash(self.faucet_contract_hash)\n    }\n}\n\nimpl Default for FaucetDeployHelper {\n    fn default() -> Self {\n        Self {\n            installer_fund_amount: U512::from(INSTALLER_FUND_AMOUNT),\n            installer_account: INSTALLER_ACCOUNT,\n            installer_fund_id: None,\n            authorized_user_public_key: None,\n            faucet_installer_session: FAUCET_INSTALLER_SESSION.to_string(),\n            faucet_id: FAUCET_ID,\n            faucet_purse_fund_amount: U512::from(FAUCET_FUND_AMOUNT),\n            faucet_contract_hash: None,\n            faucet_distributions_per_interval: None,\n            faucet_available_amount: None,\n            faucet_time_interval: None,\n            fund_account_request_builder: Default::default(),\n            faucet_install_session_request_builder: Default::default(),\n            faucet_config_request_builder: Default::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/explorer/mod.rs",
    "content": "mod faucet;\npub mod faucet_test_helpers;\n\nuse casper_types::account::AccountHash;\n\n// Test constants.\npub const FAUCET_INSTALLER_SESSION: &str = \"faucet_stored.wasm\";\npub const FAUCET_CONTRACT_NAMED_KEY: &str = \"faucet\";\npub const INSTALLER_FUND_AMOUNT: u64 = 500_000_000_000_000;\npub const TWO_HOURS_AS_MILLIS: u64 = 7_200_000;\npub const FAUCET_ID: u64 = 1337;\npub const INSTALLER_ACCOUNT: AccountHash = AccountHash::new([1u8; 32]);\npub const FAUCET_FUND_AMOUNT: u64 = 500_000u64;\npub const FAUCET_TIME_INTERVAL: u64 = 10_000;\n\n// contract args and entry points.\npub const ARG_TARGET: &str = \"target\";\npub const ARG_AMOUNT: &str = \"amount\";\npub const ARG_ID: &str = \"id\";\npub const ARG_AVAILABLE_AMOUNT: &str = \"available_amount\";\npub const ARG_TIME_INTERVAL: &str = \"time_interval\";\npub const ARG_DISTRIBUTIONS_PER_INTERVAL: &str = \"distributions_per_interval\";\npub const ENTRY_POINT_FAUCET: &str = \"call_faucet\";\npub const ENTRY_POINT_SET_VARIABLES: &str = \"set_variables\";\npub const ENTRY_POINT_AUTHORIZE_TO: &str = \"authorize_to\";\n\n// stored contract named keys.\npub const AVAILABLE_AMOUNT_NAMED_KEY: &str = \"available_amount\";\npub const TIME_INTERVAL_NAMED_KEY: &str = \"time_interval\";\npub const LAST_DISTRIBUTION_TIME_NAMED_KEY: &str = \"last_distribution_time\";\npub const FAUCET_PURSE_NAMED_KEY: &str = \"faucet_purse\";\npub const INSTALLER_NAMED_KEY: &str = \"installer\";\npub const DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY: &str = \"distributions_per_interval\";\npub const REMAINING_REQUESTS_NAMED_KEY: &str = \"remaining_requests\";\npub const AUTHORIZED_ACCOUNT_NAMED_KEY: &str = \"authorized_account\";\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/get_balance.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    LmdbWasmTestBuilder, TransferRequestBuilder, LOCAL_GENESIS_REQUEST,\n};\nuse casper_storage::{\n    data_access_layer::BalanceIdentifier,\n    tracking_copy::{self, ValidationError},\n};\nuse casper_types::{\n    account::AccountHash, AccessRights, Digest, Key, ProtocolVersion, PublicKey, SecretKey, URef,\n    U512,\n};\n\nstatic ALICE_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ALICE_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ALICE_KEY));\n\nstatic TRANSFER_AMOUNT_1: Lazy<U512> = Lazy::new(|| U512::from(100_000_000));\n\n#[ignore]\n#[test]\nfn get_balance_should_work() {\n    let protocol_version = ProtocolVersion::V2_0_0;\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let block_time = 1_000_000;\n    let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT_1, *ALICE_ADDR)\n        .with_block_time(block_time)\n        .build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let alice_main_purse = alice_account.main_purse();\n\n    let alice_balance_result = builder.get_purse_balance_result_with_proofs(\n        protocol_version,\n        BalanceIdentifier::Purse(alice_main_purse),\n    );\n\n    let alice_balance = alice_balance_result\n        .available_balance()\n        .cloned()\n        .expect(\"should have motes\");\n\n    assert_eq!(alice_balance, *TRANSFER_AMOUNT_1);\n\n    let state_root_hash = builder.get_post_state_hash();\n\n    let proofs_result = alice_balance_result\n        .proofs_result()\n        .expect(\"should have proofs result\");\n    let balance_proof = proofs_result\n        .total_balance_proof()\n        .expect(\"should have proofs\")\n        .clone();\n\n    assert!(tracking_copy::validate_balance_proof(\n        &state_root_hash,\n        &balance_proof,\n        alice_main_purse.into(),\n        &alice_balance,\n    )\n    .is_ok());\n\n    let bogus_key = Key::Hash([1u8; 32]);\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &state_root_hash,\n            &balance_proof,\n            bogus_key.to_owned(),\n            &alice_balance,\n        ),\n        Err(ValidationError::KeyIsNotAURef(bogus_key))\n    );\n\n    let bogus_uref: Key = Key::URef(URef::new([3u8; 32], AccessRights::READ_ADD_WRITE));\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &state_root_hash,\n            &balance_proof,\n            bogus_uref,\n            &alice_balance,\n        ),\n        Err(ValidationError::UnexpectedKey)\n    );\n\n    let bogus_hash = Digest::hash([5u8; 32]);\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &bogus_hash,\n            &balance_proof,\n            alice_main_purse.into(),\n            &alice_balance,\n        ),\n        Err(ValidationError::InvalidProofHash)\n    );\n\n    let bogus_motes = U512::from(1337);\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &state_root_hash,\n            &balance_proof,\n            alice_main_purse.into(),\n            &bogus_motes,\n        ),\n        Err(ValidationError::UnexpectedValue)\n    );\n}\n\n#[ignore]\n#[test]\nfn get_balance_using_public_key_should_work() {\n    let protocol_version = ProtocolVersion::V2_0_0;\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let block_time = 1_000_000;\n    let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT_1, *ALICE_ADDR)\n        .with_block_time(block_time)\n        .build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let alice_account = builder\n        .get_entity_by_account_hash(*ALICE_ADDR)\n        .expect(\"should have Alice's account\");\n\n    let alice_main_purse = alice_account.main_purse();\n\n    let alice_balance_result =\n        builder.get_public_key_balance_result_with_proofs(protocol_version, ALICE_KEY.clone());\n\n    let alice_balance = alice_balance_result\n        .available_balance()\n        .cloned()\n        .expect(\"should have motes\");\n\n    assert_eq!(alice_balance, *TRANSFER_AMOUNT_1);\n\n    let state_root_hash = builder.get_post_state_hash();\n\n    let proofs_result = alice_balance_result\n        .proofs_result()\n        .expect(\"should have proofs result\");\n    let balance_proof = proofs_result\n        .total_balance_proof()\n        .expect(\"should have proofs\")\n        .clone();\n\n    assert!(tracking_copy::validate_balance_proof(\n        &state_root_hash,\n        &balance_proof,\n        alice_main_purse.into(),\n        &alice_balance,\n    )\n    .is_ok());\n\n    let bogus_key = Key::Hash([1u8; 32]);\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &state_root_hash,\n            &balance_proof,\n            bogus_key.to_owned(),\n            &alice_balance,\n        ),\n        Err(ValidationError::KeyIsNotAURef(bogus_key))\n    );\n\n    let bogus_uref: Key = Key::URef(URef::new([3u8; 32], AccessRights::READ_ADD_WRITE));\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &state_root_hash,\n            &balance_proof,\n            bogus_uref,\n            &alice_balance,\n        ),\n        Err(ValidationError::UnexpectedKey)\n    );\n\n    let bogus_hash = Digest::hash([5u8; 32]);\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &bogus_hash,\n            &balance_proof,\n            alice_main_purse.into(),\n            &alice_balance,\n        ),\n        Err(ValidationError::InvalidProofHash)\n    );\n\n    let bogus_motes = U512::from(1337);\n    assert_eq!(\n        tracking_copy::validate_balance_proof(\n            &state_root_hash,\n            &balance_proof,\n            alice_main_purse.into(),\n            &bogus_motes,\n        ),\n        Err(ValidationError::UnexpectedValue)\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/groups.rs",
    "content": "#![allow(deprecated)]\n\nuse assert_matches::assert_matches;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    account::AccountHash,\n    contracts::{ContractPackageHash, CONTRACT_INITIAL_VERSION},\n    runtime_args, Key, PackageHash, RuntimeArgs, U512,\n};\n\nuse crate::wasm_utils;\n\nconst CONTRACT_GROUPS: &str = \"groups.wasm\";\nconst PACKAGE_HASH_KEY: &str = \"package_hash_key\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\nconst RESTRICTED_SESSION: &str = \"restricted_session\";\nconst RESTRICTED_CONTRACT: &str = \"restricted_contract\";\nconst RESTRICTED_SESSION_CALLER: &str = \"restricted_session_caller\";\nconst UNRESTRICTED_CONTRACT_CALLER: &str = \"unrestricted_contract_caller\";\nconst PACKAGE_HASH_ARG: &str = \"package_hash\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst RESTRICTED_CONTRACT_CALLER_AS_SESSION: &str = \"restricted_contract_caller_as_session\";\nconst UNCALLABLE_SESSION: &str = \"uncallable_session\";\nconst UNCALLABLE_CONTRACT: &str = \"uncallable_contract\";\nconst CALL_RESTRICTED_ENTRY_POINTS: &str = \"call_restricted_entry_points\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\nstatic TRANSFER_1_AMOUNT: Lazy<U512> =\n    Lazy::new(|| U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + 1000);\n\nfn setup_from_lmdb_fixture() -> LmdbWasmTestBuilder {\n    // let (mut builder, _, _) = lmdb_fixture::builder_from_global_state_fixture(GROUPS_FIXTURE);\n    // builder.with_block_time(Timestamp::now().into());\n    // builder.with_gas_hold_config(HoldBalanceHandling::default(), 1200u64);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GROUPS,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request_1).expect_success().commit();\n    builder\n}\n\n#[ignore]\n#[test]\nfn should_call_group_restricted_session() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract\");\n\n    let _package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_HASH_KEY,\n        None,\n        RESTRICTED_SESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_call_group_restricted_session_caller() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_HASH_KEY,\n        None,\n        RESTRICTED_SESSION,\n        runtime_args! {\n            PACKAGE_HASH_ARG => package_hash.into_package_hash()\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n}\n\n#[test]\n#[ignore]\nfn should_not_call_restricted_session_from_wrong_account() {\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let mut builder = setup_from_lmdb_fixture();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let args = runtime_args! {};\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_stored_versioned_contract_by_hash(\n            package_hash.into_package_addr().expect(\"should be hash\"),\n            None,\n            RESTRICTED_SESSION,\n            args,\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[ACCOUNT_1_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_3).commit();\n\n    let _account = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"should query account\");\n\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n}\n\n#[test]\n#[ignore]\nfn should_not_call_restricted_session_caller_from_wrong_account() {\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let mut builder = setup_from_lmdb_fixture();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let package_hash = *account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let package_hash = package_hash\n        .into_package_hash()\n        .map(|package_hash| Key::Hash(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    let args = runtime_args! {\n        \"package_hash\" => package_hash,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_stored_versioned_contract_by_hash(\n            package_hash.into_package_addr().expect(\"should be hash\"),\n            None,\n            RESTRICTED_SESSION_CALLER,\n            args,\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[ACCOUNT_1_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_3).expect_failure();\n\n    let _account = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"should query account\");\n\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n}\n\n#[ignore]\n#[test]\nfn should_call_group_restricted_contract() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_new_protocol_version(DEFAULT_PROTOCOL_VERSION)\n            .with_enable_addressable_entity(false)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => *package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(PACKAGE_HASH_KEY, None, RESTRICTED_CONTRACT, args)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let _account = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"should query account\");\n}\n\n#[ignore]\n#[test]\nfn should_not_call_group_restricted_contract_from_wrong_account() {\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let mut builder = setup_from_lmdb_fixture();\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => *package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_stored_versioned_contract_by_hash(\n            package_hash.into_package_addr().expect(\"should be hash\"),\n            None,\n            RESTRICTED_CONTRACT,\n            args,\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[ACCOUNT_1_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_3).commit();\n\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n}\n\n#[ignore]\n#[test]\nfn should_call_group_unrestricted_contract_caller() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let package_hash = package_hash\n        .into_package_hash()\n        .map(|package_hash| ContractPackageHash::new(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(\n            PACKAGE_HASH_KEY,\n            None,\n            UNRESTRICTED_CONTRACT_CALLER,\n            args,\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let _account = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"should query account\");\n}\n\n#[ignore]\n#[test]\nfn should_call_unrestricted_contract_caller_from_different_account() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let package_hash = *account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let package_hash = package_hash\n        .into_package_hash()\n        .map(|package_hash| ContractPackageHash::new(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        PackageHash::new(package_hash.value()),\n        None,\n        UNRESTRICTED_CONTRACT_CALLER,\n        runtime_args! {\n            PACKAGE_HASH_ARG => package_hash,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_call_group_restricted_contract_as_session() {\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let mut builder = setup_from_lmdb_fixture();\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = *account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let package_hash = package_hash\n        .into_package_hash()\n        .map(|package_hash| ContractPackageHash::new(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        PackageHash::new(package_hash.value()),\n        None,\n        RESTRICTED_CONTRACT_CALLER_AS_SESSION,\n        runtime_args! {\n            PACKAGE_HASH_ARG => package_hash,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_call_group_restricted_contract_as_session_from_wrong_account() {\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let mut builder = setup_from_lmdb_fixture();\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let package_hash = *account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let hash = package_hash\n        .into_package_hash()\n        .expect(\"must convert to package hash\");\n\n    let package_key = package_hash\n        .into_package_hash()\n        .map(|package_hash| ContractPackageHash::new(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        hash,\n        Some(CONTRACT_INITIAL_VERSION),\n        RESTRICTED_CONTRACT_CALLER_AS_SESSION,\n        runtime_args! {\n            PACKAGE_HASH_ARG => package_key,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_failure();\n\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n}\n\n#[ignore]\n#[test]\nfn should_not_call_uncallable_contract_from_deploy() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = *account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let package_hash = package_hash\n        .into_package_hash()\n        .map(|package_hash| Key::Hash(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(PACKAGE_HASH_KEY, None, UNCALLABLE_SESSION, args)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_2).commit();\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(\n            PACKAGE_HASH_KEY,\n            None,\n            CALL_RESTRICTED_ENTRY_POINTS,\n            args,\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([6; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_call_uncallable_session_from_deploy() {\n    let mut builder = setup_from_lmdb_fixture();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let package_hash = *account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let package_hash = package_hash\n        .into_package_hash()\n        .map(|package_hash| Key::Hash(package_hash.value()))\n        .expect(\"must get Key::Hash\");\n\n    // This inserts package as an argument because this test\n    // can work from different accounts which might not have the same keys in their session\n    // code.\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(PACKAGE_HASH_KEY, None, UNCALLABLE_CONTRACT, args)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_2).commit();\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n\n    let args = runtime_args! {\n        PACKAGE_HASH_ARG => package_hash,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(\n            PACKAGE_HASH_KEY,\n            None,\n            CALL_RESTRICTED_ENTRY_POINTS,\n            args,\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([6; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(exec_request_3).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_call_group_restricted_stored_payment_code_from_invalid_account() {\n    // This test calls a stored payment code that is restricted with a group access using an account\n    // that does not have any of the group urefs in context.\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n    let mut builder = setup_from_lmdb_fixture();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let args = runtime_args! {\n        \"amount\" => *DEFAULT_PAYMENT,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n        .with_stored_versioned_payment_contract_by_hash(\n            package_hash\n                .into_package_addr()\n                .expect(\"must have created package hash\"),\n            None,\n            \"restricted_standard_payment\",\n            args,\n        )\n        .with_authorization_keys(&[ACCOUNT_1_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_3).commit();\n\n    let _account = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"should query account\");\n\n    let response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = response.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::InvalidContext));\n}\n\n#[ignore]\n#[test]\nfn should_call_group_restricted_stored_payment_code() {\n    // This test calls a stored payment code that is restricted with a group access using an account\n    // that contains urefs from the group.\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT },\n    )\n    .build();\n\n    let mut builder = setup_from_lmdb_fixture();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let args = runtime_args! {\n        \"amount\" => *DEFAULT_PAYMENT,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n        // .with_stored_versioned_contract_by_name(name, version, entry_point, args)\n        .with_stored_versioned_payment_contract_by_hash(\n            package_hash\n                .into_package_addr()\n                .expect(\"must have created package hash\"),\n            None,\n            \"restricted_standard_payment\",\n            args,\n        )\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([3; 32])\n        .build();\n\n    let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/host_function_costs.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{bytesrepr::Bytes, runtime_args, AddressableEntityHash, RuntimeArgs};\n\nconst HOST_FUNCTION_COSTS_NAME: &str = \"host_function_costs.wasm\";\nconst CONTRACT_KEY_NAME: &str = \"contract\";\n\nconst DO_NOTHING_NAME: &str = \"do_nothing\";\nconst DO_SOMETHING_NAME: &str = \"do_something\";\nconst CALLS_DO_NOTHING_LEVEL1_NAME: &str = \"calls_do_nothing_level1\";\nconst CALLS_DO_NOTHING_LEVEL2_NAME: &str = \"calls_do_nothing_level2\";\nconst ARG_BYTES: &str = \"bytes\";\nconst ARG_SIZE_FUNCTION_CALL_1_NAME: &str = \"arg_size_function_call_1\";\nconst ARG_SIZE_FUNCTION_CALL_100_NAME: &str = \"arg_size_function_call_100\";\n\n#[ignore]\n#[test]\nfn should_measure_gas_cost() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        HOST_FUNCTION_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // Create Accounts\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash_addr()\n        .expect(\"should be hash\")\n        .into();\n\n    //\n    // Measure do nothing\n    //\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        DO_NOTHING_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let do_nothing_cost = builder.last_exec_gas_consumed().value();\n\n    //\n    // Measure opcodes (doing something)\n    //\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        DO_SOMETHING_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let do_something_cost = builder.last_exec_gas_consumed().value();\n    assert!(\n        !do_something_cost.is_zero(),\n        \"executing nothing should cost zero\"\n    );\n    assert!(do_something_cost > do_nothing_cost);\n}\n\n#[ignore]\n#[test]\nfn should_measure_nested_host_function_call_cost() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        HOST_FUNCTION_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // Create Accounts\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash_addr()\n        .expect(\"should be hash\")\n        .into();\n\n    //\n    // Measure level 1 - nested call to 'do nothing'\n    //\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CALLS_DO_NOTHING_LEVEL1_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n    let level_1_cost = builder.last_exec_gas_consumed().value();\n\n    assert!(\n        !level_1_cost.is_zero(),\n        \"executing nested call should not cost zero\"\n    );\n\n    //\n    // Measure level 2 - call to an entrypoint that calls 'do nothing'\n    //\n\n    let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CALLS_DO_NOTHING_LEVEL2_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n    let level_2_cost = builder.last_exec_gas_consumed().value();\n\n    assert!(\n        !level_2_cost.is_zero(),\n        \"executing nested call should not cost zero\"\n    );\n\n    assert!(\n        level_2_cost > level_1_cost,\n        \"call to level2 should be greater than level1 call but {} <= {}\",\n        level_2_cost,\n        level_1_cost,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_measure_argument_size_in_host_function_call() {\n    // Checks if calling a contract with large arguments affects costs\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        HOST_FUNCTION_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // Create Accounts\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash_addr()\n        .expect(\"should be hash\")\n        .into();\n\n    //\n    // Measurement 1 - empty vector (argument with 0 bytes value)\n    //\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ARG_SIZE_FUNCTION_CALL_1_NAME,\n        runtime_args! {\n            ARG_BYTES => Bytes::new(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n    let call_1_cost = builder.last_exec_gas_consumed().value();\n\n    assert!(\n        !call_1_cost.is_zero(),\n        \"executing nested call should not cost zero\"\n    );\n\n    //\n    // Measurement  level 2 - argument that's vector of 100 bytes\n    //\n\n    let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        ARG_SIZE_FUNCTION_CALL_100_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n    let call_2_cost = builder.last_exec_gas_consumed().value();\n\n    assert!(\n        call_2_cost > call_1_cost,\n        \"call 1 {} call 2 {}\",\n        call_1_cost,\n        call_2_cost\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/manage_groups.rs",
    "content": "use std::collections::BTreeSet;\n\nuse assert_matches::assert_matches;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    addressable_entity::{self, MAX_GROUPS},\n    runtime_args, Group, RuntimeArgs, ENTITY_INITIAL_VERSION,\n};\n\nconst CONTRACT_GROUPS: &str = \"manage_groups.wasm\";\nconst PACKAGE_HASH_KEY: &str = \"package_hash_key\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\nconst CREATE_GROUP: &str = \"create_group\";\nconst REMOVE_GROUP: &str = \"remove_group\";\nconst EXTEND_GROUP_UREFS: &str = \"extend_group_urefs\";\nconst REMOVE_GROUP_UREFS: &str = \"remove_group_urefs\";\nconst GROUP_NAME_ARG: &str = \"group_name\";\nconst NEW_UREFS_COUNT: u64 = 3;\nconst GROUP_1_NAME: &str = \"Group 1\";\nconst TOTAL_NEW_UREFS_ARG: &str = \"total_new_urefs\";\nconst TOTAL_EXISTING_UREFS_ARG: &str = \"total_existing_urefs\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_UREF_INDICES: &str = \"uref_indices\";\n\nstatic DEFAULT_CREATE_GROUP_ARGS: Lazy<RuntimeArgs> = Lazy::new(|| {\n    runtime_args! {\n        GROUP_NAME_ARG => GROUP_1_NAME,\n        TOTAL_NEW_UREFS_ARG => 1u64,\n        TOTAL_EXISTING_UREFS_ARG => 1u64,\n    }\n});\n\n#[ignore]\n#[test]\nfn should_create_and_remove_group() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GROUPS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have entity\");\n\n    let package_hash = entity\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have package\");\n    let _access_uref = entity\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let exec_request_2 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                CREATE_GROUP,\n                DEFAULT_CREATE_GROUP_ARGS.clone(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([3; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    assert_eq!(contract_package.groups().len(), 1);\n    let group_1 = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group\");\n    assert_eq!(group_1.len(), 2);\n\n    let exec_request_3 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let args = runtime_args! {\n            GROUP_NAME_ARG => GROUP_1_NAME,\n        };\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                REMOVE_GROUP,\n                args,\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([3; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    assert_eq!(\n        contract_package.groups().get(&Group::new(GROUP_1_NAME)),\n        None\n    );\n}\n\n#[ignore]\n#[test]\nfn should_create_and_extend_user_group() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GROUPS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let exec_request_2 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                CREATE_GROUP,\n                DEFAULT_CREATE_GROUP_ARGS.clone(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([5; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    assert_eq!(contract_package.groups().len(), 1);\n    let group_1 = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group\");\n    assert_eq!(group_1.len(), 2);\n\n    let exec_request_3 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let args = runtime_args! {\n            GROUP_NAME_ARG => GROUP_1_NAME,\n            TOTAL_NEW_UREFS_ARG => NEW_UREFS_COUNT,\n        };\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                EXTEND_GROUP_UREFS,\n                args,\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([3; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    let group_1_extended = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group\");\n    assert!(group_1_extended.len() > group_1.len());\n    // Calculates how many new urefs were created\n    let new_urefs: BTreeSet<_> = group_1_extended.difference(group_1).collect();\n    assert_eq!(new_urefs.len(), NEW_UREFS_COUNT as usize);\n}\n\n#[ignore]\n#[test]\nfn should_create_and_remove_urefs_from_group() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GROUPS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let exec_request_2 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                CREATE_GROUP,\n                DEFAULT_CREATE_GROUP_ARGS.clone(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([3; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    assert_eq!(contract_package.groups().len(), 1);\n    let group_1 = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group\");\n    assert_eq!(group_1.len(), 2);\n\n    let exec_request_3 = {\n        // This inserts package as an argument because this test can work from different accounts\n        // which might not have the same keys in their session code.\n        let args = runtime_args! {\n            GROUP_NAME_ARG => GROUP_1_NAME,\n            // We're passing indices of urefs inside a group rather than URef values as group urefs\n            // aren't part of the access rights. This test will read a ContractPackage instance, get\n            // the group by its name, and remove URefs by their indices.\n            ARG_UREF_INDICES => vec![0u64, 1u64],\n        };\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                REMOVE_GROUP_UREFS,\n                args,\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([3; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    let group_1_modified = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group 1\");\n    assert!(group_1_modified.len() < group_1.len());\n}\n\n#[ignore]\n#[test]\nfn should_limit_max_urefs_while_extending() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GROUPS,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n    let package_hash = account\n        .named_keys()\n        .get(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package\");\n    let _access_uref = account\n        .named_keys()\n        .get(PACKAGE_ACCESS_KEY)\n        .expect(\"should have package hash\");\n\n    let exec_request_2 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                CREATE_GROUP,\n                DEFAULT_CREATE_GROUP_ARGS.clone(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([3; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    assert_eq!(contract_package.groups().len(), 1);\n    let group_1 = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group\");\n    assert_eq!(group_1.len(), 2);\n\n    let exec_request_3 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let args = runtime_args! {\n            GROUP_NAME_ARG => GROUP_1_NAME,\n            TOTAL_NEW_UREFS_ARG => 8u64,\n        };\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                EXTEND_GROUP_UREFS,\n                args,\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([5; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    let exec_request_4 = {\n        // This inserts package as an argument because this test\n        // can work from different accounts which might not have the same keys in their session\n        // code.\n        let args = runtime_args! {\n            GROUP_NAME_ARG => GROUP_1_NAME,\n            // Exceeds by 1\n            TOTAL_NEW_UREFS_ARG => 1u64,\n        };\n        let deploy = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_HASH_KEY,\n                Some(ENTITY_INITIAL_VERSION),\n                EXTEND_GROUP_UREFS,\n                args,\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([32; 32])\n            .build();\n\n        ExecuteRequestBuilder::from_deploy_item(&deploy).build()\n    };\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let query_result = builder\n        .query(None, *package_hash, &[])\n        .expect(\"should have result\");\n    let contract_package = query_result\n        .as_contract_package()\n        .expect(\"should be package\");\n    let group_1_modified = contract_package\n        .groups()\n        .get(&Group::new(GROUP_1_NAME))\n        .expect(\"should have group 1\");\n    assert_eq!(group_1_modified.len(), MAX_GROUPS as usize);\n\n    // Tries to exceed the limit by 1\n    builder.exec(exec_request_4).commit();\n\n    let exec_response = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let error = exec_response.error().expect(\"should have error\");\n    let error = assert_matches!(error, Error::Exec(ExecError::Revert(e)) => e);\n    assert_eq!(\n        error,\n        &addressable_entity::Error::MaxTotalURefsExceeded.into()\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/mod.rs",
    "content": "mod calling_packages_by_version_query;\nmod chainspec_registry;\nmod check_transfer_success;\nmod contract_api;\nmod contract_context;\nmod contract_messages;\nmod counter_factory;\nmod deploy;\nmod explorer;\nmod get_balance;\nmod groups;\nmod host_function_costs;\nmod manage_groups;\nmod private_chain;\nmod regression;\nmod stack_overflow;\nmod step;\nmod storage_costs;\nmod system_contracts;\nmod system_costs;\nmod tutorial;\nmod upgrade;\nmod wasmless_transfer;\n\n// NOTE: the original execution engine also handled charging for gas costs\n// and these integration tests commonly would, in addition to other behavior being tested,\n// also check that expected payment handling was being done.\n// As of 2.0 compliant execution engines no longer handle payment...\n// all payment handling is done in the node prior to engaging native logic or an execution target\n// and all testing of payment handling occurs within the node tests.\n// Thus these ee integration tests cannot (and should not) test changes to balances related\n// to costs as they once did. Instead they should (and only can) test that gas limits are\n// correctly applied and enforced and that non-cost transfers work properly.\n// Because many tests included balance checks with expectations around payment handling in\n// addition to whatever else they were testing, they required adjustment.\n// In some cases the names of the tests included terms such as 'should_charge_' or 'should_cost_'\n// which is no longer true and require the name of the test be adjusted to reflect the new reality.\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/private_chain/fees_accumulation.rs",
    "content": "use std::collections::BTreeSet;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_BLOCK_TIME, DEFAULT_PROPOSER_ADDR, DEFAULT_PROTOCOL_VERSION,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_types::{\n    account::AccountHash, system::handle_payment::ACCUMULATION_PURSE_KEY, EntityAddr, EraId,\n    FeeHandling, Key, ProtocolVersion, RuntimeArgs, U512,\n};\n\nuse crate::{\n    lmdb_fixture,\n    test::private_chain::{self, ACCOUNT_1_ADDR, DEFAULT_ADMIN_ACCOUNT_ADDR},\n    wasm_utils,\n};\n\nconst OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION;\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    OLD_PROTOCOL_VERSION.value().major,\n    OLD_PROTOCOL_VERSION.value().minor,\n    OLD_PROTOCOL_VERSION.value().patch + 1,\n);\n\n#[ignore]\n#[test]\nfn default_genesis_config_should_not_have_rewards_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let handle_payment = builder.get_handle_payment_contract_hash();\n    let handle_payment_contract =\n        builder.get_named_keys(EntityAddr::System(handle_payment.value()));\n\n    assert!(\n        handle_payment_contract.contains(ACCUMULATION_PURSE_KEY),\n        \"Did not find rewards purse in handle payment's named keys {:?}\",\n        handle_payment_contract\n    );\n}\n\n#[ignore]\n#[test]\nfn should_finalize_and_accumulate_rewards_purse() {\n    let mut builder = private_chain::setup_genesis_only();\n\n    let handle_payment = builder.get_handle_payment_contract_hash();\n    let handle_payment_1 = builder.get_named_keys(EntityAddr::System(handle_payment.value()));\n\n    let rewards_purse_key = handle_payment_1\n        .get(ACCUMULATION_PURSE_KEY)\n        .expect(\"should have rewards purse\");\n    let rewards_purse_uref = rewards_purse_key.into_uref().expect(\"should be uref\");\n    assert_eq!(builder.get_purse_balance(rewards_purse_uref), U512::zero());\n\n    let exec_request_1 = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let handle_payment_2 = builder.get_named_keys(EntityAddr::System(handle_payment.value()));\n\n    assert_eq!(\n        handle_payment_1, handle_payment_2,\n        \"none of the named keys should change before and after execution\"\n    );\n\n    let _transfer_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR)\n            .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n            .build();\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_accumulate_deploy_fees() {\n    let mut builder = super::private_chain_setup();\n\n    // Check handle payments has rewards purse\n    let handle_payment_hash = builder.get_handle_payment_contract_hash();\n    let handle_payment_contract =\n        builder.get_named_keys(EntityAddr::System(handle_payment_hash.value()));\n\n    let rewards_purse = handle_payment_contract\n        .get(ACCUMULATION_PURSE_KEY)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n\n    // At this point rewards purse balance is not zero as the `private_chain_setup` executes bunch\n    // of deploys before\n    let rewards_balance_before = builder.get_purse_balance(rewards_purse);\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // let exec_request_proposer = exec_request.proposer.clone();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let handle_payment_after =\n        builder.get_named_keys(EntityAddr::System(handle_payment_hash.value()));\n\n    assert_eq!(\n        handle_payment_after.get(ACCUMULATION_PURSE_KEY),\n        handle_payment_contract.get(ACCUMULATION_PURSE_KEY),\n        \"keys should not change before and after deploy has been processed\",\n    );\n\n    let rewards_purse = handle_payment_contract\n        .get(ACCUMULATION_PURSE_KEY)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n    let rewards_balance_after = builder.get_purse_balance(rewards_purse);\n    assert!(\n        rewards_balance_after > rewards_balance_before,\n        \"rewards balance should increase\"\n    );\n\n    // // Ensures default proposer didn't receive any funds\n    // let proposer_account = builder\n    //     .get_entity_by_account_hash(exec_request_proposer.to_account_hash())\n    //     .expect(\"should have proposer account\");\n    //\n    // assert_eq!(\n    //     builder.get_purse_balance(proposer_account.main_purse()),\n    //     U512::zero()\n    // );\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_distribute_accumulated_fees_to_admins() {\n    let mut builder = super::private_chain_setup();\n\n    let handle_payment_hash = builder.get_handle_payment_contract_hash();\n    let handle_payment = builder.get_named_keys(EntityAddr::System(handle_payment_hash.value()));\n\n    let accumulation_purse = handle_payment\n        .get(ACCUMULATION_PURSE_KEY)\n        .expect(\"handle payment should have named key\")\n        .into_uref()\n        .expect(\"accumulation purse should be an uref\");\n\n    let exec_request_1 = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let accumulated_purse_balance_before_exec = builder.get_purse_balance(accumulation_purse);\n    assert!(accumulated_purse_balance_before_exec.is_zero());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // At this point rewards purse balance is not zero as the `private_chain_setup` executes bunch\n    // of deploys before\n    let accumulated_purse_balance_after_exec = builder.get_purse_balance(accumulation_purse);\n    assert!(!accumulated_purse_balance_after_exec.is_zero());\n\n    let admin = builder\n        .get_entity_by_account_hash(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n        .expect(\"should have admin account\");\n    let admin_balance_before = builder.get_purse_balance(admin.main_purse());\n\n    let mut administrative_accounts: BTreeSet<AccountHash> = BTreeSet::new();\n    administrative_accounts.insert(*DEFAULT_ADMIN_ACCOUNT_ADDR);\n\n    let result = builder.distribute_fees(None, DEFAULT_PROTOCOL_VERSION, DEFAULT_BLOCK_TIME);\n\n    assert!(result.is_success(), \"expected success not: {:?}\", result);\n\n    let accumulated_purse_balance_after_distribute = builder.get_purse_balance(accumulation_purse);\n\n    assert!(\n        accumulated_purse_balance_after_distribute < accumulated_purse_balance_after_exec,\n        \"accumulated purse balance should be distributed ({} >= {})\",\n        accumulated_purse_balance_after_distribute,\n        accumulated_purse_balance_after_exec\n    );\n\n    let admin_balance_after = builder.get_purse_balance(admin.main_purse());\n\n    assert!(\n        admin_balance_after > admin_balance_before,\n        \"admin balance should grow after distributing accumulated purse\"\n    );\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_accumulate_fees_after_upgrade() {\n    let (mut builder, _lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_5);\n\n    // Ensures default proposer didn't receive any funds\n    let proposer_account = builder\n        .query(None, Key::Account(*DEFAULT_PROPOSER_ADDR), &[])\n        .expect(\"should have proposer account\")\n        .into_account()\n        .expect(\"should have legacy Account under the Key::Account variant\");\n\n    let proposer_balance_before = builder.get_purse_balance(proposer_account.main_purse());\n\n    // Check handle payments has rewards purse\n    let handle_payment_hash = builder.get_handle_payment_contract_hash();\n\n    let handle_payment_contract = builder\n        .query(None, Key::Hash(handle_payment_hash.value()), &[])\n        .expect(\"should have handle payment contract\")\n        .into_contract()\n        .expect(\"should have legacy Contract under the Key::Contract variant\");\n\n    assert!(\n        handle_payment_contract\n            .named_keys()\n            .get(ACCUMULATION_PURSE_KEY)\n            .is_none(),\n        \"should not have accumulation purse in a persisted state\"\n    );\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n            .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n            .with_activation_point(EraId::default())\n            .with_fee_handling(FeeHandling::Accumulate)\n            .build()\n    };\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_fee_handling(FeeHandling::Accumulate);\n\n    builder.with_chainspec(updated_chainspec);\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n    // Check handle payments has rewards purse\n    let handle_payment_hash = builder.get_handle_payment_contract_hash();\n    let handle_payment_contract =\n        builder.get_named_keys(EntityAddr::System(handle_payment_hash.value()));\n    let rewards_purse = handle_payment_contract\n        .get(ACCUMULATION_PURSE_KEY)\n        .expect(\"should have accumulation purse\")\n        .into_uref()\n        .expect(\"should be uref\");\n\n    // At this point rewards purse balance is not zero as the `private_chain_setup` executes bunch\n    // of deploys before\n    let rewards_balance_before = builder.get_purse_balance(rewards_purse);\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let handle_payment_after =\n        builder.get_named_keys(EntityAddr::System(handle_payment_hash.value()));\n\n    assert_eq!(\n        handle_payment_after.get(ACCUMULATION_PURSE_KEY),\n        handle_payment_contract.get(ACCUMULATION_PURSE_KEY),\n        \"keys should not change before and after deploy has been processed\",\n    );\n\n    let rewards_purse = handle_payment_contract\n        .get(ACCUMULATION_PURSE_KEY)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n    let rewards_balance_after = builder.get_purse_balance(rewards_purse);\n    assert!(\n        rewards_balance_after > rewards_balance_before,\n        \"rewards balance should increase\"\n    );\n\n    let proposer_balance_after = builder.get_purse_balance(proposer_account.main_purse());\n    assert_eq!(\n        proposer_balance_before, proposer_balance_after,\n        \"proposer should not receive any more funds after switching to accumulation\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/private_chain/management.rs",
    "content": "use casper_engine_test_support::{\n    ChainspecConfig, DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    TransferRequestBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY,\n    DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION,\n    DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_STORAGE_COSTS, DEFAULT_SYSTEM_CONFIG,\n    DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_storage::{data_access_layer::GenesisRequest, tracking_copy::TrackingCopyError};\nuse casper_types::{\n    account::{AccountHash, Weight},\n    bytesrepr::ToBytes,\n    runtime_args,\n    system::{\n        auction::{self, DelegationRate},\n        mint,\n        standard_payment::{self, ARG_AMOUNT},\n    },\n    AddressableEntityHash, ApiError, CLType, CLValue, CoreConfig, EntityAddr, GenesisAccount, Key,\n    Package, PackageHash, RuntimeArgs, U512,\n};\nuse tempfile::TempDir;\n\nuse crate::{\n    test::private_chain::{\n        self, ACCOUNT_2_ADDR, ADMIN_1_ACCOUNT_ADDR, PRIVATE_CHAIN_ALLOW_AUCTION_BIDS,\n        PRIVATE_CHAIN_COMPUTE_REWARDS, VALIDATOR_1_PUBLIC_KEY,\n    },\n    wasm_utils, GenesisConfigBuilder,\n};\n\nuse super::{\n    ACCOUNT_1_ADDR, ACCOUNT_1_PUBLIC_KEY, DEFAULT_ADMIN_ACCOUNT_ADDR,\n    PRIVATE_CHAIN_DEFAULT_ACCOUNTS, PRIVATE_CHAIN_FEE_HANDLING,\n    PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS, PRIVATE_CHAIN_GENESIS_ADMIN_SET,\n    PRIVATE_CHAIN_REFUND_HANDLING,\n};\n\nconst ADD_ASSOCIATED_KEY_CONTRACT: &str = \"add_associated_key.wasm\";\nconst REMOVE_ASSOCIATED_KEY_CONTRACT: &str = \"remove_associated_key.wasm\";\nconst SET_ACTION_THRESHOLDS_CONTRACT: &str = \"set_action_thresholds.wasm\";\nconst UPDATE_ASSOCIATED_KEY_CONTRACT: &str = \"update_associated_key.wasm\";\nconst DISABLE_CONTRACT: &str = \"disable_contract.wasm\";\nconst ENABLE_CONTRACT: &str = \"enable_contract.wasm\";\nconst TRANSFER_TO_ACCOUNT_CONTRACT: &&str = &\"transfer_to_account.wasm\";\nconst ARG_CONTRACT_PACKAGE_HASH: &str = \"contract_package_hash\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\nconst ARG_ACCOUNT: &str = \"account\";\nconst ARG_WEIGHT: &str = \"weight\";\n\nconst ARG_KEY_MANAGEMENT_THRESHOLD: &str = \"key_management_threshold\";\nconst ARG_DEPLOY_THRESHOLD: &str = \"deploy_threshold\";\nconst DO_NOTHING_HASH_NAME: &str = \"do_nothing_hash\";\n\nconst DO_NOTHING_STORED_CONTRACT: &str = \"do_nothing_stored.wasm\";\nconst CALL_CONTRACT_PROXY: &str = \"call_contract.wasm\";\nconst DELEGATE_ENTRYPOINT: &str = \"delegate\";\n\nconst TEST_PAYMENT_STORED_CONTRACT: &str = \"test_payment_stored.wasm\";\nconst TEST_PAYMENT_STORED_HASH_NAME: &str = \"test_payment_hash\";\nconst PAY_ENTRYPOINT: &str = \"pay\";\n\n#[should_panic(expected = \"DuplicatedAdministratorEntry\")]\n#[ignore]\n#[test]\nfn should_not_run_genesis_with_duplicated_administrator_accounts() {\n    let core_config = CoreConfig {\n        administrators: PRIVATE_CHAIN_GENESIS_ADMIN_SET.clone(),\n        ..Default::default()\n    };\n    let chainspec = ChainspecConfig {\n        core_config,\n        wasm_config: Default::default(),\n        system_costs_config: Default::default(),\n        storage_costs: Default::default(),\n    };\n\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.as_ref(), chainspec);\n\n    let duplicated_administrator_accounts = {\n        let mut accounts = PRIVATE_CHAIN_DEFAULT_ACCOUNTS.clone();\n\n        let genesis_admins = PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS\n            .clone()\n            .into_iter()\n            .map(GenesisAccount::from);\n        accounts.extend(genesis_admins);\n        accounts\n    };\n\n    let genesis_config = GenesisConfigBuilder::default()\n        .with_accounts(duplicated_administrator_accounts)\n        .with_wasm_config(*DEFAULT_WASM_CONFIG)\n        .with_system_config(*DEFAULT_SYSTEM_CONFIG)\n        .with_validator_slots(DEFAULT_VALIDATOR_SLOTS)\n        .with_auction_delay(DEFAULT_AUCTION_DELAY)\n        .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS)\n        .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE)\n        .with_unbonding_delay(DEFAULT_UNBONDING_DELAY)\n        .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS)\n        .with_storage_costs(*DEFAULT_STORAGE_COSTS)\n        .build();\n\n    let modified_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    builder.run_genesis(modified_genesis_request);\n}\n\n#[ignore]\n#[test]\nfn genesis_accounts_should_not_update_key_weight() {\n    let mut builder = super::private_chain_setup();\n\n    let exec_request_1 = {\n        let session_args = runtime_args! {\n            ARG_ACCOUNT => *ACCOUNT_1_ADDR,\n            ARG_WEIGHT => Weight::MAX,\n        };\n        ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            UPDATE_ASSOCIATED_KEY_CONTRACT,\n            session_args,\n        )\n        .build()\n    };\n\n    builder.exec(exec_request_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(ApiError::PermissionDenied))\n        ),\n        \"{:?}\",\n        error\n    );\n\n    let exec_request_2 = {\n        let session_args = runtime_args! {\n            ARG_ACCOUNT => *DEFAULT_ADMIN_ACCOUNT_ADDR,\n            ARG_WEIGHT => Weight::new(1),\n        };\n        ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            UPDATE_ASSOCIATED_KEY_CONTRACT,\n            session_args,\n        )\n        .build()\n    };\n\n    builder.exec(exec_request_2).expect_failure().commit();\n}\n\n#[ignore]\n#[test]\nfn genesis_accounts_should_not_modify_action_thresholds() {\n    let mut builder = super::private_chain_setup();\n\n    let exec_request = {\n        let session_args = runtime_args! {\n            ARG_DEPLOY_THRESHOLD => Weight::new(1),\n            ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1),\n        };\n        ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            SET_ACTION_THRESHOLDS_CONTRACT,\n            session_args,\n        )\n        .build()\n    };\n\n    builder.exec(exec_request).expect_failure().commit();\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(ApiError::PermissionDenied))\n        ),\n        \"{:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn genesis_accounts_should_not_add_associated_keys() {\n    let secondary_account_hash = AccountHash::new([55; 32]);\n\n    let mut builder = super::private_chain_setup();\n\n    let exec_request = {\n        let session_args = runtime_args! {\n            ARG_ACCOUNT => secondary_account_hash,\n            ARG_WEIGHT => Weight::MAX,\n        };\n        ExecuteRequestBuilder::standard(*ACCOUNT_1_ADDR, ADD_ASSOCIATED_KEY_CONTRACT, session_args)\n            .build()\n    };\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(ApiError::PermissionDenied))\n        ),\n        \"{:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn genesis_accounts_should_not_remove_associated_keys() {\n    let secondary_account_hash = AccountHash::new([55; 32]);\n\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_ACCOUNT => secondary_account_hash,\n        ARG_WEIGHT => Weight::MAX,\n    };\n\n    let account_hash = *ACCOUNT_1_ADDR;\n    let deploy_hash: [u8; 32] = [55; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(ADD_ASSOCIATED_KEY_CONTRACT, session_args)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT\n        })\n        .with_authorization_keys(&[*ADMIN_1_ACCOUNT_ADDR])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let add_associated_key_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(add_associated_key_request)\n        .expect_success()\n        .commit();\n\n    let remove_associated_key_request = {\n        let session_args = runtime_args! {\n            ARG_ACCOUNT => secondary_account_hash,\n        };\n        ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            REMOVE_ASSOCIATED_KEY_CONTRACT,\n            session_args,\n        )\n        .build()\n    };\n\n    builder\n        .exec(remove_associated_key_request)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(ApiError::PermissionDenied))\n        ),\n        \"{:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn administrator_account_should_disable_any_account() {\n    let mut builder = super::private_chain_setup();\n\n    let account_1_genesis = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account 1 after genesis\");\n\n    // Account 1 can deploy after genesis\n    let exec_request_1 = ExecuteRequestBuilder::module_bytes(\n        *ACCOUNT_1_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // Disable account 1\n    let session_args = runtime_args! {\n        ARG_DEPLOY_THRESHOLD => Weight::MAX,\n        ARG_KEY_MANAGEMENT_THRESHOLD => Weight::MAX,\n    };\n\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [54; 32];\n\n    // Here, deploy is sent as an account, but signed by an administrator.\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_code(SET_ACTION_THRESHOLDS_CONTRACT, session_args)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ADMIN_ACCOUNT_ADDR])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let disable_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(disable_request_1).expect_success().commit();\n    // Account 1 can not deploy after freezing\n    let exec_request_2 = ExecuteRequestBuilder::module_bytes(\n        *ACCOUNT_1_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request_2).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(matches!(\n        error,\n        Error::TrackingCopy(TrackingCopyError::DeploymentAuthorizationFailure)\n    ));\n\n    let account_1_disabled = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account 1 after genesis\");\n    assert_ne!(\n        account_1_genesis, account_1_disabled,\n        \"account 1 should be modified\"\n    );\n\n    // Unfreeze account 1\n    let session_args = runtime_args! {\n        ARG_DEPLOY_THRESHOLD => Weight::new(1),\n        ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0),\n    };\n\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [53; 32];\n\n    // Here, deploy is sent as an account, but signed by an administrator.\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_code(SET_ACTION_THRESHOLDS_CONTRACT, session_args)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ADMIN_ACCOUNT_ADDR])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let enable_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let session_args = runtime_args! {\n        ARG_DEPLOY_THRESHOLD => Weight::new(0),\n        ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1),\n    };\n\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [52; 32];\n\n    // Here, deploy is sent as an account, but signed by an administrator.\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_code(SET_ACTION_THRESHOLDS_CONTRACT, session_args)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ADMIN_ACCOUNT_ADDR])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let enable_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(enable_request_1).expect_success().commit();\n    builder.exec(enable_request_2).expect_success().commit();\n\n    // Account 1 can deploy after unfreezing\n    let exec_request_3 = ExecuteRequestBuilder::module_bytes(\n        *ACCOUNT_1_ADDR,\n        wasm_utils::do_minimum_bytes(),\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let account_1_unfrozen = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account 1 after genesis\");\n    assert_eq!(\n        account_1_genesis, account_1_unfrozen,\n        \"account 1 should be modified back to genesis state\"\n    );\n}\n\n#[ignore]\n#[test]\nfn native_transfer_should_create_new_private_account() {\n    let mut builder = super::private_chain_setup();\n\n    // Account 1 can deploy after genesis\n    let transfer_request = TransferRequestBuilder::new(1, *ACCOUNT_2_ADDR)\n        .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n        .build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let _account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"should have account 1 after transfer\");\n}\n\n#[ignore]\n#[test]\nfn wasm_transfer_should_create_new_private_account() {\n    let mut builder = super::private_chain_setup();\n\n    // Account 1 can deploy after genesis\n    let transfer_args = runtime_args! {\n        mint::ARG_TARGET => *ACCOUNT_2_ADDR,\n        mint::ARG_AMOUNT => 1u64,\n    };\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        TRANSFER_TO_ACCOUNT_CONTRACT,\n        transfer_args,\n    )\n    .build();\n\n    builder.exec(transfer_request).expect_success().commit();\n\n    let _account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"should have account 1 after genesis\");\n}\n\n#[ignore]\n#[test]\nfn administrator_account_should_disable_any_contract_used_as_session() {\n    let mut builder = super::private_chain_setup();\n\n    let store_contract_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        DO_NOTHING_STORED_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder\n        .exec(store_contract_request)\n        .expect_success()\n        .commit();\n\n    let account_1_genesis = builder\n        .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account 1 after genesis\");\n\n    let stored_entity_key = account_1_genesis\n        .named_keys()\n        .get(DO_NOTHING_HASH_NAME)\n        .unwrap();\n\n    let stored_entity_hash = stored_entity_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .expect(\"should have stored contract hash\");\n\n    let do_nothing_contract_package_key = {\n        let addressable_entity = builder\n            .get_addressable_entity(stored_entity_hash)\n            .expect(\"should be entity\");\n        Key::Hash(addressable_entity.package_hash().value())\n    };\n\n    let contract_package_before = Package::try_from(\n        builder\n            .query(None, do_nothing_contract_package_key, &[])\n            .expect(\"should query\"),\n    )\n    .expect(\"should be contract package\");\n\n    let stored_entity_addr = stored_entity_key\n        .into_hash_addr()\n        .map(EntityAddr::SmartContract)\n        .expect(\"must get entity addr\");\n\n    assert!(\n        contract_package_before.is_entity_enabled(&stored_entity_addr),\n        \"newly stored contract should be enabled\"\n    );\n\n    // Account 1 can deploy after genesis\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name(\n        *ACCOUNT_1_ADDR,\n        DO_NOTHING_HASH_NAME,\n        DELEGATE_ENTRYPOINT,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let do_nothing_contract_package_hash =\n        PackageHash::new(do_nothing_contract_package_key.into_hash_addr().unwrap());\n\n    // Disable stored contract\n    let disable_request = {\n        let session_args = runtime_args! {\n            ARG_CONTRACT_PACKAGE_HASH => do_nothing_contract_package_hash,\n            ARG_CONTRACT_HASH => stored_entity_hash,\n        };\n\n        ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, DISABLE_CONTRACT, session_args)\n            .build()\n    };\n\n    builder.exec(disable_request).expect_success().commit();\n\n    let contract_package_after_disable = Package::try_from(\n        builder\n            .query(None, do_nothing_contract_package_key, &[])\n            .expect(\"should query\"),\n    )\n    .expect(\"should be contract package\");\n\n    assert_ne!(\n        contract_package_before, contract_package_after_disable,\n        \"contract package should be disabled\"\n    );\n    assert!(!contract_package_after_disable.is_entity_enabled(&stored_entity_addr),);\n\n    let call_delegate_requests_1 = {\n        // Unable to call disabled stored contract directly\n        let call_delegate_by_name = ExecuteRequestBuilder::contract_call_by_name(\n            *ACCOUNT_1_ADDR,\n            DO_NOTHING_HASH_NAME,\n            DELEGATE_ENTRYPOINT,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        let call_delegate_by_hash = ExecuteRequestBuilder::contract_call_by_hash(\n            *ACCOUNT_1_ADDR,\n            stored_entity_hash,\n            DELEGATE_ENTRYPOINT,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        let call_delegate_from_wasm = make_call_contract_session_request(\n            *ACCOUNT_1_ADDR,\n            stored_entity_hash,\n            DELEGATE_ENTRYPOINT,\n            RuntimeArgs::default(),\n        );\n\n        vec![\n            call_delegate_by_name,\n            call_delegate_by_hash,\n            call_delegate_from_wasm,\n        ]\n    };\n\n    for call_delegate_request in call_delegate_requests_1 {\n        builder\n            .exec(call_delegate_request)\n            .expect_failure()\n            .commit();\n        let error = builder.get_error().expect(\"should have error\");\n        assert!(\n            matches!(\n                error,\n                Error::Exec(ExecError::DisabledEntity(disabled_contract_hash))\n                if disabled_contract_hash == stored_entity_hash\n            ),\n            \"expected disabled contract error, found {:?}\",\n            error\n        );\n    }\n\n    // Enable stored contract\n    let enable_request = {\n        let session_args = runtime_args! {\n            ARG_CONTRACT_PACKAGE_HASH => do_nothing_contract_package_hash,\n            ARG_CONTRACT_HASH => stored_entity_hash,\n        };\n\n        ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, ENABLE_CONTRACT, session_args)\n            .build()\n    };\n\n    builder.exec(enable_request).expect_success().commit();\n\n    let call_delegate_requests_2 = {\n        // Unable to call disabled stored contract directly\n        let call_delegate_by_name = ExecuteRequestBuilder::contract_call_by_name(\n            *ACCOUNT_1_ADDR,\n            DO_NOTHING_HASH_NAME,\n            DELEGATE_ENTRYPOINT,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        let call_delegate_by_hash = ExecuteRequestBuilder::contract_call_by_hash(\n            *ACCOUNT_1_ADDR,\n            stored_entity_hash,\n            DELEGATE_ENTRYPOINT,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        let call_delegate_from_wasm = make_call_contract_session_request(\n            *ACCOUNT_1_ADDR,\n            stored_entity_hash,\n            DELEGATE_ENTRYPOINT,\n            RuntimeArgs::default(),\n        );\n\n        vec![\n            call_delegate_by_name,\n            call_delegate_by_hash,\n            call_delegate_from_wasm,\n        ]\n    };\n\n    for exec_request in call_delegate_requests_2 {\n        builder.exec(exec_request).expect_success().commit();\n    }\n}\n\n#[ignore]\n#[test]\nfn administrator_account_should_disable_any_contract_used_as_payment() {\n    // We'll simulate enabled unrestricted transfers here to test if stored payment contract is\n    // disabled.\n    let mut builder = private_chain::custom_setup_genesis_only(\n        PRIVATE_CHAIN_ALLOW_AUCTION_BIDS,\n        true,\n        PRIVATE_CHAIN_REFUND_HANDLING,\n        PRIVATE_CHAIN_FEE_HANDLING,\n        PRIVATE_CHAIN_COMPUTE_REWARDS,\n    );\n\n    let store_contract_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TEST_PAYMENT_STORED_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder\n        .exec(store_contract_request)\n        .expect_success()\n        .commit();\n\n    let account_1_genesis = builder\n        .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account 1 after genesis\");\n\n    let stored_entity_key = account_1_genesis\n        .named_keys()\n        .get(TEST_PAYMENT_STORED_HASH_NAME)\n        .unwrap();\n\n    let stored_entity_hash = stored_entity_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .expect(\"should have stored entity hash\");\n\n    let addressable_entity = builder\n        .get_addressable_entity(stored_entity_hash)\n        .expect(\"should be addressable entity\");\n    let test_payment_stored_package_key = { Key::Hash(addressable_entity.package_hash().value()) };\n\n    let test_payment_stored_package_hash =\n        PackageHash::new(addressable_entity.package_hash().value());\n\n    let contract_package_before = Package::try_from(\n        builder\n            .query(None, test_payment_stored_package_key, &[])\n            .expect(\"should query\"),\n    )\n    .expect(\"should be contract package\");\n    let stored_entity_addr = stored_entity_key\n        .into_entity_addr()\n        .expect(\"must get entity addr\");\n    assert!(\n        contract_package_before.is_entity_enabled(&stored_entity_addr),\n        \"newly stored contract should be enabled\"\n    );\n\n    // Account 1 can deploy after genesis\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [100; 32];\n\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT,\n    };\n    let session_args = RuntimeArgs::default();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args)\n        .with_stored_payment_named_key(TEST_PAYMENT_STORED_HASH_NAME, PAY_ENTRYPOINT, payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_1).expect_failure();\n\n    // Disable payment contract\n    let disable_request = {\n        let session_args = runtime_args! {\n            ARG_CONTRACT_PACKAGE_HASH => test_payment_stored_package_hash,\n            ARG_CONTRACT_HASH => stored_entity_hash,\n        };\n\n        ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, DISABLE_CONTRACT, session_args)\n            .build()\n    };\n\n    builder.exec(disable_request).expect_success().commit();\n\n    let contract_package_after_disable = Package::try_from(\n        builder\n            .query(None, test_payment_stored_package_key, &[])\n            .expect(\"should query\"),\n    )\n    .expect(\"should be contract package\");\n\n    assert_ne!(\n        contract_package_before, contract_package_after_disable,\n        \"contract package should be disabled\"\n    );\n    assert!(!contract_package_after_disable.is_entity_enabled(&stored_entity_addr),);\n\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT,\n    };\n    let session_args = RuntimeArgs::default();\n\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [100; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args.clone())\n        .with_stored_payment_named_key(\n            TEST_PAYMENT_STORED_HASH_NAME,\n            PAY_ENTRYPOINT,\n            payment_args.clone(),\n        )\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let call_by_name = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args)\n        .with_stored_payment_hash(stored_entity_hash, PAY_ENTRYPOINT, payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let call_by_hash = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    for execute_request in [call_by_name, call_by_hash] {\n        builder.exec(execute_request).expect_failure().commit();\n        let error = builder.get_error().expect(\"should have error\");\n        assert!(\n            matches!(\n                error,\n                Error::Exec(ExecError::DisabledEntity(disabled_contract_hash))\n                if disabled_contract_hash == stored_entity_hash\n            ),\n            \"expected disabled contract error, found {:?}\",\n            error\n        );\n    }\n\n    // Enable stored contract\n    let enable_request = {\n        let session_args = runtime_args! {\n            ARG_CONTRACT_PACKAGE_HASH => test_payment_stored_package_hash,\n            ARG_CONTRACT_HASH => stored_entity_hash,\n        };\n\n        ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, ENABLE_CONTRACT, session_args)\n            .build()\n    };\n\n    builder.exec(enable_request).expect_success().commit();\n\n    let payment_args = runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT };\n    let session_args = RuntimeArgs::default();\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [100; 32];\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args.clone())\n        .with_stored_payment_named_key(\n            TEST_PAYMENT_STORED_HASH_NAME,\n            PAY_ENTRYPOINT,\n            payment_args.clone(),\n        )\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let call_by_name = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args)\n        .with_stored_payment_hash(stored_entity_hash, PAY_ENTRYPOINT, payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let call_by_hash = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    for exec_request in [call_by_name, call_by_hash] {\n        builder.exec(exec_request).expect_failure();\n    }\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_add_bid_on_private_chain() {\n    let mut builder = super::private_chain_setup();\n\n    let delegation_rate: DelegationRate = 4;\n    let session_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => U512::one(),\n        auction::ARG_DELEGATION_RATE => delegation_rate,\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*ACCOUNT_1_ADDR, \"add_bid.wasm\", session_args).build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(api_error))\n            if api_error == auction::Error::AuctionBidsDisabled.into(),\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_delegate_on_private_chain() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => U512::one(),\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*ACCOUNT_1_ADDR, \"delegate.wasm\", session_args).build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(api_error))\n            if api_error == auction::Error::AuctionBidsDisabled.into()\n        ),\n        \"{:?}\",\n        error\n    );\n    // Redelegation would not work since delegate, and add_bid are disabled on private chains\n    // therefore there is nothing to test.\n}\n\nfn make_call_contract_session_request(\n    account_hash: AccountHash,\n    contract_hash: AddressableEntityHash,\n    entrypoint: &str,\n    arguments: RuntimeArgs,\n) -> ExecuteRequest {\n    let arguments_any = {\n        let arg_bytes = arguments.to_bytes().unwrap();\n        CLValue::from_components(CLType::Any, arg_bytes)\n    };\n\n    let mut session_args = runtime_args! {\n        \"entrypoint\" => entrypoint,\n        \"contract_hash\" => contract_hash,\n    };\n    session_args.insert_cl_value(\"arguments\", arguments_any);\n\n    ExecuteRequestBuilder::standard(account_hash, CALL_CONTRACT_PROXY, session_args).build()\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/private_chain/restricted_auction.rs",
    "content": "use casper_engine_test_support::{\n    StepRequestBuilder, DEFAULT_BLOCK_TIME, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_storage::data_access_layer::RewardItem;\nuse casper_types::{system::auction::SeigniorageAllocation, Key, U512};\n\nuse crate::test::private_chain::{PRIVATE_CHAIN_GENESIS_VALIDATORS, VALIDATOR_1_PUBLIC_KEY};\n\n#[ignore]\n#[test]\nfn should_not_distribute_rewards_but_compute_next_set() {\n    const VALIDATOR_1_REWARD_FACTOR: u64 = 0;\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = super::private_chain_setup();\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n\n    for _ in 0..3 {\n        builder.distribute(\n            None,\n            DEFAULT_PROTOCOL_VERSION,\n            IntoIterator::into_iter([(VALIDATOR_1_PUBLIC_KEY.clone(), vec![U512::from(0)])])\n                .collect(),\n            DEFAULT_BLOCK_TIME,\n        );\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n            .with_next_era_id(builder.get_era().successor())\n            .with_era_end_timestamp_millis(timestamp_millis)\n            .with_run_auction(true)\n            .with_reward_item(RewardItem::new(\n                VALIDATOR_1_PUBLIC_KEY.clone(),\n                VALIDATOR_1_REWARD_FACTOR,\n            ))\n            .build();\n        assert!(\n            builder.step(step_request).is_success(),\n            \"should execute step\"\n        );\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let last_trusted_era = builder.get_era();\n\n    builder.distribute(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        IntoIterator::into_iter([(VALIDATOR_1_PUBLIC_KEY.clone(), vec![U512::from(0)])]).collect(),\n        DEFAULT_BLOCK_TIME,\n    );\n\n    let step_request = StepRequestBuilder::new()\n        .with_parent_state_hash(builder.get_post_state_hash())\n        .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n        .with_reward_item(RewardItem::new(\n            VALIDATOR_1_PUBLIC_KEY.clone(),\n            VALIDATOR_1_REWARD_FACTOR,\n        ))\n        .with_next_era_id(last_trusted_era.successor())\n        .with_era_end_timestamp_millis(timestamp_millis)\n        .with_run_auction(true)\n        .build();\n\n    assert!(\n        builder.step(step_request).is_success(),\n        \"should execute step\"\n    );\n\n    let era_info = {\n        let era_info_value = builder\n            .query(None, Key::EraSummary, &[])\n            .expect(\"should have value\");\n\n        era_info_value\n            .as_era_info()\n            .cloned()\n            .expect(\"should be era info\")\n    };\n\n    const EXPECTED_VALIDATOR_1_PAYOUT: U512 = U512::zero();\n\n    assert_eq!(\n        era_info.seigniorage_allocations().len(),\n        PRIVATE_CHAIN_GENESIS_VALIDATORS.len(),\n        \"running auction should not increase number of validators\",\n    );\n\n    assert!(\n        matches!(\n            era_info.select(VALIDATOR_1_PUBLIC_KEY.clone()).next(),\n            Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n            if *validator_public_key == *VALIDATOR_1_PUBLIC_KEY && *amount == EXPECTED_VALIDATOR_1_PAYOUT\n        ),\n        \"era info is {:?}\",\n        era_info\n    );\n\n    let total_supply_after_distribution = builder.total_supply(protocol_version, None);\n    assert_eq!(\n        initial_supply, total_supply_after_distribution,\n        \"total supply of tokens should not increase after an auction is ran\"\n    )\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/private_chain/unrestricted_transfers.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, TransferRequestBuilder, DEFAULT_PAYMENT,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_storage::system::transfer::TransferError;\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{mint, standard_payment},\n    Key, PublicKey, RuntimeArgs, StoredValue, URef, U512,\n};\n\nuse crate::{test::private_chain::ADMIN_1_ACCOUNT_ADDR, wasm_utils};\n\nuse super::{ACCOUNT_1_ADDR, ACCOUNT_2_ADDR, DEFAULT_ADMIN_ACCOUNT_ADDR};\n\nconst TRANSFER_TO_ACCOUNT_U512_CONTRACT: &str = \"transfer_to_account_u512.wasm\";\nconst TRANSFER_TO_NAMED_PURSE_CONTRACT: &str = \"transfer_to_named_purse.wasm\";\n\nconst TEST_PURSE: &str = \"test\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst TEST_PAYMENT_STORED_CONTRACT: &str = \"test_payment_stored.wasm\";\nconst TEST_PAYMENT_STORED_HASH_NAME: &str = \"test_payment_hash\";\n\n#[ignore]\n#[test]\nfn should_restrict_native_transfer_to_from_non_administrators() {\n    let mut builder = super::private_chain_setup();\n\n    let fund_transfer_1 =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR)\n            .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n            .build();\n\n    // Admin can transfer funds to create new account.\n    builder\n        .transfer_and_commit(fund_transfer_1)\n        .expect_success();\n\n    let transfer_request_1 = TransferRequestBuilder::new(1, *ACCOUNT_2_ADDR)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .build();\n\n    // User can't transfer funds to non administrator (it doesn't matter if this would create a new\n    // account or not...the receiver must be an EXISTING administrator account\n    builder\n        .transfer_and_commit(transfer_request_1)\n        .expect_failure();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Transfer(TransferError::RestrictedTransferAttempted)\n        ),\n        \"expected RestrictedTransferAttempted error, found {:?}\",\n        error\n    );\n\n    let transfer_request_2 = TransferRequestBuilder::new(1, *DEFAULT_ADMIN_ACCOUNT_ADDR)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .build();\n\n    // User can transfer funds back to admin.\n    builder\n        .transfer_and_commit(transfer_request_2)\n        .expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_restrict_wasm_transfer_to_from_non_administrators() {\n    let mut builder = super::private_chain_setup();\n\n    let fund_transfer_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        TRANSFER_TO_ACCOUNT_U512_CONTRACT,\n        runtime_args! {\n            mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n            mint::ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n        },\n    )\n    .build();\n\n    // Admin can transfer funds to create new account.\n    builder.exec(fund_transfer_1).expect_success().commit();\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TRANSFER_TO_ACCOUNT_U512_CONTRACT,\n        runtime_args! {\n            mint::ARG_TARGET => *ACCOUNT_2_ADDR,\n            mint::ARG_AMOUNT => U512::one(),\n        },\n    )\n    .build();\n\n    // User can't transfer funds to create new account.\n    builder.exec(transfer_request_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(error, Error::Exec(ExecError::DisabledUnrestrictedTransfers)),\n        \"expected DisabledUnrestrictedTransfers error, found {:?}\",\n        error\n    );\n\n    let transfer_request_2 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TRANSFER_TO_ACCOUNT_U512_CONTRACT,\n        runtime_args! {\n            mint::ARG_TARGET => *DEFAULT_ADMIN_ACCOUNT_ADDR,\n            mint::ARG_AMOUNT => U512::one(),\n        },\n    )\n    .build();\n\n    // User can transfer funds back to admin.\n    builder.exec(transfer_request_2).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_noop_self_transfer() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE,\n        ARG_AMOUNT => U512::zero(), // create empty purse without transfer\n    };\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        session_args,\n    )\n    .build();\n    builder.exec(create_purse_request).expect_success().commit();\n\n    let mint_contract_hash = builder.get_mint_contract_hash();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let maybe_to: Option<AccountHash> = None;\n    let source: URef = account.main_purse();\n    let target: URef = account\n        .named_keys()\n        .get(TEST_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n    let amount: U512 = U512::one();\n    let id: Option<u64> = None;\n\n    let session_args = runtime_args! {\n        mint::ARG_TO => maybe_to,\n        mint::ARG_SOURCE => source,\n        mint::ARG_TARGET => target,\n        mint::ARG_AMOUNT => amount,\n        mint::ARG_ID => id,\n    };\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *ACCOUNT_1_ADDR,\n        mint_contract_hash,\n        mint::METHOD_TRANSFER,\n        session_args,\n    )\n    .build();\n    builder.exec(exec_request).expect_success().commit();\n\n    // Transfer technically succeeded but the result of mint::Error was discarded so we have to\n    // ensure that purse has 0 balance.\n    let value = builder\n        .query(None, Key::Balance(target.addr()), &[])\n        .unwrap();\n    let value: U512 = if let StoredValue::CLValue(cl_value) = value {\n        cl_value.into_t().unwrap()\n    } else {\n        panic!(\"should be a CLValue\");\n    };\n    assert_eq!(value, U512::zero());\n}\n\n#[ignore]\n#[test]\nfn should_allow_admin_to_native_transfer_from_own_purse() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE,\n        ARG_AMOUNT => U512::zero(), // create empty purse without transfer\n    };\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        session_args,\n    )\n    .build();\n    builder.exec(create_purse_request).expect_success().commit();\n\n    let mint_contract_hash = builder.get_mint_contract_hash();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let maybe_to: Option<AccountHash> = None;\n    let source: URef = account.main_purse();\n    let target: URef = account\n        .named_keys()\n        .get(TEST_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n    let amount: U512 = U512::one();\n    let id: Option<u64> = None;\n\n    let session_args = runtime_args! {\n        mint::ARG_TO => maybe_to,\n        mint::ARG_SOURCE => source,\n        mint::ARG_TARGET => target,\n        mint::ARG_AMOUNT => amount,\n        mint::ARG_ID => id,\n    };\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        mint_contract_hash,\n        mint::METHOD_TRANSFER,\n        session_args,\n    )\n    .build();\n    builder.exec(exec_request).expect_success().commit();\n\n    // Transfer technically succeeded but the result of mint::Error was discarded so we have to\n    // ensure that purse has 0 balance.\n    let value = builder\n        .query(None, Key::Balance(target.addr()), &[])\n        .unwrap();\n    let value: U512 = if let StoredValue::CLValue(cl_value) = value {\n        cl_value.into_t().unwrap()\n    } else {\n        panic!(\"should be a CLValue\");\n    };\n    assert_eq!(value, amount);\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_wasm_transfer_from_non_administrator_to_misc_purse() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE,\n        ARG_AMOUNT => U512::one(),\n    };\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        session_args,\n    )\n    .build();\n    builder.exec(create_purse_request).expect_failure().commit();\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()\n        ),\n        \"expected DisabledUnrestrictedTransfers error, found {:?}\",\n        error\n    )\n}\n\n#[ignore]\n#[test]\nfn should_allow_wasm_transfer_from_administrator() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE,\n        ARG_AMOUNT => U512::one(),\n    };\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        session_args,\n    )\n    .build();\n    builder.exec(create_purse_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_native_transfer_from_non_administrator_to_misc_purse() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE,\n        ARG_AMOUNT => U512::zero(), // we can't transfer in private chain mode, so we'll just create empty valid purse\n    };\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        session_args,\n    )\n    .build();\n    builder.exec(create_purse_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let source = account.main_purse();\n    let target = account\n        .named_keys()\n        .get(TEST_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n\n    let transfer_request = TransferRequestBuilder::new(1, target)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .with_source(source)\n        .build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_failure();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Transfer(TransferError::UnableToVerifyTargetIsAdmin)\n        ),\n        \"expected UnableToVerifyTargetIsAdmin error, found {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_allow_native_transfer_to_administrator_from_misc_purse() {\n    let mut builder = super::private_chain_setup();\n\n    let session_args = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE,\n        ARG_AMOUNT => U512::zero(), // we can't transfer in private chain mode, so we'll just create empty valid purse\n    };\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ADMIN_ACCOUNT_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        session_args,\n    )\n    .build();\n    builder.exec(create_purse_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let source = account.main_purse();\n    let target = account\n        .named_keys()\n        .get(TEST_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n\n    let transfer_request = TransferRequestBuilder::new(1, target)\n        .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n        .with_source(source)\n        .build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_wasm_transfer_from_non_administrator_to_known_purse() {\n    let mut builder = super::private_chain_setup();\n\n    let store_contract_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        \"contract_funds.wasm\",\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder\n        .exec(store_contract_request)\n        .expect_success()\n        .commit();\n\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        \"contract_funds_call.wasm\",\n        runtime_args! {\n            ARG_AMOUNT => U512::one(),\n        },\n    )\n    .build();\n\n    builder.exec(transfer_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()\n        ),\n        \"expected DisabledUnrestrictedTransfers error, found {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[allow(unused)]\n#[test]\nfn should_not_allow_payment_to_purse_in_stored_payment() {\n    // This effectively disables any custom payment code\n    let mut builder = super::private_chain_setup();\n\n    let store_contract_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        TEST_PAYMENT_STORED_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder\n        .exec(store_contract_request)\n        .expect_success()\n        .commit();\n\n    // Account 1 can deploy after genesis\n    let sender = *ACCOUNT_1_ADDR;\n    let deploy_hash = [100; 32];\n\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT,\n    };\n    let session_args = RuntimeArgs::default();\n\n    const PAY_ENTRYPOINT: &str = \"pay\";\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args)\n        .with_stored_payment_named_key(TEST_PAYMENT_STORED_HASH_NAME, PAY_ENTRYPOINT, payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(error, Error::Exec(ExecError::ForgedReference(_))),\n        \"expected ForgedReference error, found {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_direct_mint_transfer_with_system_addr_specified() {\n    // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to\n    // avoid restrictions.\n    let mut builder = super::private_chain_setup();\n\n    let fund_transfer_1 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        \"mint_transfer_proxy.wasm\",\n        runtime_args! {\n            \"to\" => Some(PublicKey::System.to_account_hash()),\n            \"amount\" => U512::from(1u64),\n        },\n    )\n    .build();\n\n    // should fail because the imputed TO arg is not valid if PublicKey::System in this flow\n    builder.exec(fund_transfer_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(error, Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()),\n        \"expected DisabledUnrestrictedTransfers error, found {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_direct_mint_transfer_with_an_admin_in_to_field() {\n    // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to\n    // avoid restrictions.\n    let mut builder = super::private_chain_setup();\n\n    let fund_transfer_1 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        \"mint_transfer_proxy.wasm\",\n        runtime_args! {\n            \"to\" => Some(*ADMIN_1_ACCOUNT_ADDR),\n            \"amount\" => U512::from(1u64),\n        },\n    )\n    .build();\n\n    // Admin can transfer funds to create new account.\n    builder.exec(fund_transfer_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(error, Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()),\n        \"expected DisabledUnrestrictedTransfers error, found {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_allow_mint_transfer_without_to_field_from_admin() {\n    // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to\n    // avoid restrictions.\n    let mut builder = super::private_chain_setup();\n\n    let fund_transfer_1 = ExecuteRequestBuilder::standard(\n        *ADMIN_1_ACCOUNT_ADDR,\n        \"mint_transfer_proxy.wasm\",\n        runtime_args! {\n            \"to\" => None::<AccountHash>,\n            \"amount\" => U512::from(1u64),\n        },\n    )\n    .build();\n\n    // Admin can transfer funds to create new account.\n    builder.exec(fund_transfer_1).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_transfer_without_to_field_from_non_admin() {\n    // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to\n    // avoid restrictions.\n    let mut builder = super::private_chain_setup();\n\n    let fund_transfer_1 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        \"mint_transfer_proxy.wasm\",\n        runtime_args! {\n            \"to\" => None::<AccountHash>,\n            \"amount\" => U512::from(1u64),\n        },\n    )\n    .build();\n\n    // Admin can transfer funds to create new account.\n    builder.exec(fund_transfer_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(error, Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()),\n        \"expected DisabledUnrestrictedTransfers error, found {:?}\",\n        error\n    );\n}\n\n// #[ignore]\n// #[allow(unused)]\n// #[test]\n// fn should_not_allow_custom_payment() {\n//     let mut builder = super::private_chain_setup();\n//\n//     // Account 1 can deploy after genesis\n//     let sender = *ACCOUNT_1_ADDR;\n//     let deploy_hash = [100; 32];\n//\n//     let payment_amount = *DEFAULT_PAYMENT + U512::from(1u64);\n//\n//     let payment_args = runtime_args! {\n//         standard_payment::ARG_AMOUNT => payment_amount,\n//     };\n//     let session_args = RuntimeArgs::default();\n//\n//     let deploy_item = DeployItemBuilder::new()\n//         .with_address(sender)\n//         .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args)\n//         .with_payment_code(\"non_standard_payment.wasm\", payment_args)\n//         .with_authorization_keys(&[sender])\n//         .with_deploy_hash(deploy_hash)\n//         .build();\n//     let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n//\n//     builder.exec(exec_request_1).expect_failure();\n// }\n//\n// #[ignore]\n// #[test]\n// fn should_allow_wasm_transfer_to_system() {\n//     let mut builder = super::private_chain_setup();\n//\n//     // Account 1 can deploy after genesis\n//     let sender = *ACCOUNT_1_ADDR;\n//     let deploy_hash = [100; 32];\n//\n//     let payment_amount = *DEFAULT_PAYMENT + U512::from(1u64);\n//\n//     let payment_args = runtime_args! {\n//         standard_payment::ARG_AMOUNT => payment_amount,\n//     };\n//     let session_args = runtime_args! {\n//         \"target\" => *SYSTEM_ADDR,\n//         \"amount\" => U512::one(),\n//     };\n//\n//     let deploy_item = DeployItemBuilder::new()\n//         .with_address(sender)\n//         .with_session_code(\"transfer_to_account_u512.wasm\", session_args)\n//         .with_standard_payment(payment_args)\n//         .with_authorization_keys(&[sender])\n//         .with_deploy_hash(deploy_hash)\n//         .build();\n//     let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n//\n//     builder.exec(exec_request_1).expect_success().commit();\n//\n//     let handle_payment_contract = builder.get_named_keys(EntityAddr::System(\n//         builder.get_handle_payment_contract_hash().value(),\n//     ));\n//     let payment_purse_key = handle_payment_contract\n//         .get(handle_payment::PAYMENT_PURSE_KEY)\n//         .unwrap();\n//     let payment_purse_uref = payment_purse_key.into_uref().unwrap();\n//     println!(\"payment uref: {payment_purse_uref}\");\n//     assert_eq!(\n//         builder.get_purse_balance(payment_purse_uref),\n//         U512::zero(),\n//         \"after finalizing a private chain a payment purse should be empty\"\n//     );\n// }\n//\n// #[ignore]\n// #[test]\n// fn should_allow_native_transfer_to_administrator() {\n//     let mut builder = super::private_chain_setup();\n//\n//     let payment_purse_uref = {\n//         let handle_payment_contract = builder.get_named_keys(EntityAddr::System(\n//             builder.get_handle_payment_contract_hash().value(),\n//         ));\n//         let payment_purse_key = handle_payment_contract\n//             .get(handle_payment::PAYMENT_PURSE_KEY)\n//             .unwrap();\n//         payment_purse_key.into_uref().unwrap()\n//     };\n//\n//     assert_eq!(\n//         builder.get_purse_balance(payment_purse_uref),\n//         U512::zero(),\n//         \"payment purse should be empty\"\n//     );\n//\n//     let fund_transfer_1 =\n//         TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *SYSTEM_ADDR)\n//             .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR)\n//             .build();\n//\n//     builder\n//         .transfer_and_commit(fund_transfer_1)\n//         .expect_success();\n//\n//     assert_eq!(\n//         builder.get_purse_balance(payment_purse_uref),\n//         U512::zero(),\n//         \"after finalizing a private chain a payment purse should be empty\"\n//     );\n// }\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/private_chain.rs",
    "content": "mod fees_accumulation;\npub mod management;\nmod restricted_auction;\nmod unrestricted_transfers;\n\nuse casper_engine_test_support::{\n    genesis_config_builder::GenesisConfigBuilder, ChainspecConfig, LmdbWasmTestBuilder,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY,\n    DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROPOSER_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION,\n    DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_STORAGE_COSTS, DEFAULT_SYSTEM_CONFIG,\n    DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG,\n};\nuse num_rational::Ratio;\nuse once_cell::sync::Lazy;\nuse std::collections::{BTreeMap, BTreeSet};\n\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    account::AccountHash, system::auction::DELEGATION_RATE_DENOMINATOR, AdministratorAccount,\n    CoreConfig, FeeHandling, GenesisAccount, GenesisConfig, GenesisValidator, HostFunction,\n    HostFunctionCostsV1, MessageLimits, Motes, OpcodeCosts, PublicKey, RefundHandling, SecretKey,\n    StorageCosts, WasmConfig, WasmV1Config, WasmV2Config, DEFAULT_MAX_STACK_HEIGHT,\n    DEFAULT_WASM_MAX_MEMORY, U512,\n};\nuse tempfile::TempDir;\n\nstatic VALIDATOR_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([244; 32]).unwrap());\nstatic VALIDATOR_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*VALIDATOR_1_SECRET_KEY));\n\nconst DEFAULT_VALIDATOR_BONDED_AMOUNT: U512 = U512([u64::MAX, 0, 0, 0, 0, 0, 0, 0]);\n\nstatic DEFAULT_ADMIN_ACCOUNT_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([250; 32]).unwrap());\nstatic DEFAULT_ADMIN_ACCOUNT_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*DEFAULT_ADMIN_ACCOUNT_SECRET_KEY));\nstatic DEFAULT_ADMIN_ACCOUNT_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| DEFAULT_ADMIN_ACCOUNT_PUBLIC_KEY.to_account_hash());\n\nstatic ADMIN_1_ACCOUNT_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([240; 32]).unwrap());\nstatic ADMIN_1_ACCOUNT_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ADMIN_1_ACCOUNT_SECRET_KEY));\nstatic ADMIN_1_ACCOUNT_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| ADMIN_1_ACCOUNT_PUBLIC_KEY.to_account_hash());\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([251; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash());\n\nstatic ACCOUNT_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([241; 32]).unwrap());\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY));\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash());\n\nconst ADMIN_ACCOUNT_INITIAL_BALANCE: U512 = U512([100_000_000_000_000_000u64, 0, 0, 0, 0, 0, 0, 0]);\n\nconst PRIVATE_CHAIN_ALLOW_AUCTION_BIDS: bool = false;\nconst PRIVATE_CHAIN_ALLOW_UNRESTRICTED_TRANSFERS: bool = false;\n\nstatic PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS: Lazy<Vec<AdministratorAccount>> = Lazy::new(|| {\n    let default_admin = AdministratorAccount::new(\n        DEFAULT_ADMIN_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(ADMIN_ACCOUNT_INITIAL_BALANCE),\n    );\n    let admin_1 = AdministratorAccount::new(\n        ADMIN_1_ACCOUNT_PUBLIC_KEY.clone(),\n        Motes::new(ADMIN_ACCOUNT_INITIAL_BALANCE),\n    );\n    vec![default_admin, admin_1]\n});\n\nstatic PRIVATE_CHAIN_GENESIS_ADMIN_SET: Lazy<BTreeSet<PublicKey>> = Lazy::new(|| {\n    PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS\n        .iter()\n        .map(|admin| admin.public_key().clone())\n        .collect()\n});\n\nstatic PRIVATE_CHAIN_GENESIS_VALIDATORS: Lazy<BTreeMap<PublicKey, GenesisValidator>> =\n    Lazy::new(|| {\n        let public_key = VALIDATOR_1_PUBLIC_KEY.clone();\n        let genesis_validator_1 = GenesisValidator::new(\n            Motes::new(DEFAULT_VALIDATOR_BONDED_AMOUNT),\n            DELEGATION_RATE_DENOMINATOR,\n        );\n        let mut genesis_validators = BTreeMap::new();\n        genesis_validators.insert(public_key, genesis_validator_1);\n        genesis_validators\n    });\n\nstatic PRIVATE_CHAIN_DEFAULT_ACCOUNTS: Lazy<Vec<GenesisAccount>> = Lazy::new(|| {\n    let mut default_accounts = Vec::new();\n\n    let proposer_account =\n        GenesisAccount::account(DEFAULT_PROPOSER_PUBLIC_KEY.clone(), Motes::zero(), None);\n    default_accounts.push(proposer_account);\n\n    // One normal account that starts at genesis\n    default_accounts.push(GenesisAccount::account(\n        ACCOUNT_1_PUBLIC_KEY.clone(),\n        Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n        None,\n    ));\n\n    // Set up genesis validators\n    {\n        let public_key = VALIDATOR_1_PUBLIC_KEY.clone();\n        let genesis_validator = PRIVATE_CHAIN_GENESIS_VALIDATORS[&public_key];\n        default_accounts.push(GenesisAccount::Account {\n            public_key,\n            // Genesis validators for a private network doesn't have balances, but they are part of\n            // fixed set of validators\n            balance: Motes::zero(),\n            validator: Some(genesis_validator),\n        });\n    }\n\n    let admin_accounts = PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS.clone();\n    let genesis_admins = admin_accounts.into_iter().map(GenesisAccount::from);\n    default_accounts.extend(genesis_admins);\n\n    default_accounts\n});\n\nconst PRIVATE_CHAIN_REFUND_HANDLING: RefundHandling = RefundHandling::Refund {\n    refund_ratio: Ratio::new_raw(1, 1),\n};\nconst PRIVATE_CHAIN_FEE_HANDLING: FeeHandling = FeeHandling::Accumulate;\nconst PRIVATE_CHAIN_COMPUTE_REWARDS: bool = false;\n\nstatic DEFUALT_PRIVATE_CHAIN_EXEC_CONFIG: Lazy<GenesisConfig> = Lazy::new(|| {\n    GenesisConfigBuilder::default()\n        .with_accounts(PRIVATE_CHAIN_DEFAULT_ACCOUNTS.clone())\n        .with_wasm_config(*DEFAULT_WASM_CONFIG)\n        .with_system_config(*DEFAULT_SYSTEM_CONFIG)\n        .with_validator_slots(DEFAULT_VALIDATOR_SLOTS)\n        .with_auction_delay(DEFAULT_AUCTION_DELAY)\n        .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS)\n        .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE)\n        .with_unbonding_delay(DEFAULT_UNBONDING_DELAY)\n        .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS)\n        .with_storage_costs(*DEFAULT_STORAGE_COSTS)\n        .build()\n});\n\nstatic DEFAULT_PRIVATE_CHAIN_GENESIS: Lazy<GenesisRequest> = Lazy::new(|| {\n    GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        DEFUALT_PRIVATE_CHAIN_EXEC_CONFIG.clone(),\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    )\n});\n\nfn custom_setup_genesis_only(\n    allow_auction_bids: bool,\n    allow_unrestricted_transfers: bool,\n    refund_handling: RefundHandling,\n    fee_handling: FeeHandling,\n    compute_rewards: bool,\n) -> LmdbWasmTestBuilder {\n    let engine_config = make_private_chain_config(\n        allow_auction_bids,\n        allow_unrestricted_transfers,\n        refund_handling,\n        fee_handling,\n        compute_rewards,\n    );\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.as_ref(), engine_config);\n    builder.run_genesis(DEFAULT_PRIVATE_CHAIN_GENESIS.clone());\n    builder\n}\n\nfn setup_genesis_only() -> LmdbWasmTestBuilder {\n    custom_setup_genesis_only(\n        PRIVATE_CHAIN_ALLOW_AUCTION_BIDS,\n        PRIVATE_CHAIN_ALLOW_UNRESTRICTED_TRANSFERS,\n        PRIVATE_CHAIN_REFUND_HANDLING,\n        PRIVATE_CHAIN_FEE_HANDLING,\n        PRIVATE_CHAIN_COMPUTE_REWARDS,\n    )\n}\n\nfn make_wasm_config() -> WasmConfig {\n    let host_functions = HostFunctionCostsV1 {\n        // Required for non-standard payment that transfers to a system account.\n        // Depends on a bug filled to lower transfer host functions to be able to freely transfer\n        // funds inside payment code.\n        transfer_from_purse_to_account: HostFunction::fixed(0),\n        ..HostFunctionCostsV1::default()\n    };\n    let wasm_v1_config = WasmV1Config::new(\n        DEFAULT_WASM_MAX_MEMORY,\n        DEFAULT_MAX_STACK_HEIGHT,\n        OpcodeCosts::default(),\n        host_functions,\n    );\n    let wasm_v2_config = WasmV2Config::default();\n    WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config)\n}\n\nfn make_private_chain_config(\n    allow_auction_bids: bool,\n    allow_unrestricted_transfers: bool,\n    refund_handling: RefundHandling,\n    fee_handling: FeeHandling,\n    compute_rewards: bool,\n) -> ChainspecConfig {\n    let administrators = PRIVATE_CHAIN_GENESIS_ADMIN_SET.clone();\n    let core_config = CoreConfig {\n        administrators,\n        allow_auction_bids,\n        allow_unrestricted_transfers,\n        refund_handling,\n        fee_handling,\n        compute_rewards,\n        ..Default::default()\n    };\n    let wasm_config = make_wasm_config();\n    let storage_costs = StorageCosts::default();\n    ChainspecConfig {\n        core_config,\n        wasm_config,\n        system_costs_config: Default::default(),\n        storage_costs,\n    }\n}\n\nfn private_chain_setup() -> LmdbWasmTestBuilder {\n    custom_setup_genesis_only(\n        PRIVATE_CHAIN_ALLOW_AUCTION_BIDS,\n        PRIVATE_CHAIN_ALLOW_UNRESTRICTED_TRANSFERS,\n        PRIVATE_CHAIN_REFUND_HANDLING,\n        PRIVATE_CHAIN_FEE_HANDLING,\n        PRIVATE_CHAIN_COMPUTE_REWARDS,\n    )\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1045.rs",
    "content": "use num_traits::Zero;\nuse std::collections::BTreeSet;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n    MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_types::{\n    runtime_args,\n    system::auction::{DelegationRate, ARG_VALIDATOR_PUBLIC_KEYS, INITIAL_ERA_ID, METHOD_SLASH},\n    GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512,\n};\nuse once_cell::sync::Lazy;\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\n\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE + 1000;\n\nstatic ACCOUNT_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_1_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ACCOUNT_1_BOND: u64 = 100_000;\n\nstatic ACCOUNT_2_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ACCOUNT_2_BOND: u64 = 200_000;\n\nstatic ACCOUNT_3_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_3_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ACCOUNT_3_BOND: u64 = 200_000;\n\nstatic ACCOUNT_4_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_4_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ACCOUNT_4_BOND: u64 = 200_000;\n\n#[ignore]\n#[test]\nfn should_run_ee_1045_squash_validators() {\n    let account_1 = GenesisAccount::account(\n        ACCOUNT_1_PK.clone(),\n        Motes::new(ACCOUNT_1_BALANCE),\n        Some(GenesisValidator::new(\n            Motes::new(ACCOUNT_1_BOND),\n            DelegationRate::zero(),\n        )),\n    );\n    let account_2 = GenesisAccount::account(\n        ACCOUNT_2_PK.clone(),\n        Motes::new(ACCOUNT_2_BALANCE),\n        Some(GenesisValidator::new(\n            Motes::new(ACCOUNT_2_BOND),\n            DelegationRate::zero(),\n        )),\n    );\n    let account_3 = GenesisAccount::account(\n        ACCOUNT_3_PK.clone(),\n        Motes::new(ACCOUNT_3_BALANCE),\n        Some(GenesisValidator::new(\n            Motes::new(ACCOUNT_3_BOND),\n            DelegationRate::zero(),\n        )),\n    );\n    let account_4 = GenesisAccount::account(\n        ACCOUNT_4_PK.clone(),\n        Motes::new(ACCOUNT_4_BALANCE),\n        Some(GenesisValidator::new(\n            Motes::new(ACCOUNT_4_BOND),\n            DelegationRate::zero(),\n        )),\n    );\n\n    let round_1_validator_squash = vec![ACCOUNT_2_PK.clone(), ACCOUNT_4_PK.clone()];\n    let round_2_validator_squash = vec![ACCOUNT_1_PK.clone(), ACCOUNT_3_PK.clone()];\n\n    let extra_accounts = vec![account_1, account_2, account_3, account_4];\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.extend(extra_accounts);\n        tmp\n    };\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let genesis_validator_weights = builder\n        .get_validator_weights(INITIAL_ERA_ID)\n        .expect(\"should have genesis validator weights\");\n\n    let mut new_era_id = INITIAL_ERA_ID + DEFAULT_AUCTION_DELAY + 1;\n    assert!(builder.get_validator_weights(new_era_id).is_none());\n    assert!(builder.get_validator_weights(new_era_id - 1).is_some());\n\n    builder.exec(transfer_request_1).expect_success().commit();\n\n    let auction_contract = builder.get_auction_contract_hash();\n\n    let squash_request_1 = {\n        let args = runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => round_1_validator_squash.clone(),\n        };\n        ExecuteRequestBuilder::contract_call_by_hash(\n            *SYSTEM_ADDR,\n            auction_contract,\n            METHOD_SLASH,\n            args,\n        )\n        .build()\n    };\n\n    let squash_request_2 = {\n        let args = runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => round_2_validator_squash.clone(),\n        };\n        ExecuteRequestBuilder::contract_call_by_hash(\n            *SYSTEM_ADDR,\n            auction_contract,\n            METHOD_SLASH,\n            args,\n        )\n        .build()\n    };\n\n    //\n    // ROUND 1\n    //\n    builder.exec(squash_request_1).expect_success().commit();\n\n    // new_era_id += 1;\n    assert!(builder.get_validator_weights(new_era_id).is_none());\n    assert!(builder.get_validator_weights(new_era_id - 1).is_some());\n\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    let post_round_1_auction_weights = builder\n        .get_validator_weights(new_era_id)\n        .expect(\"should have new era validator weights computed\");\n\n    assert_ne!(genesis_validator_weights, post_round_1_auction_weights);\n\n    let lhs: BTreeSet<_> = genesis_validator_weights.keys().cloned().collect();\n    let rhs: BTreeSet<_> = post_round_1_auction_weights.keys().cloned().collect();\n    assert_eq!(\n        lhs.difference(&rhs).cloned().collect::<BTreeSet<_>>(),\n        round_1_validator_squash.into_iter().collect()\n    );\n\n    //\n    // ROUND 2\n    //\n    builder.exec(squash_request_2).expect_success().commit();\n    new_era_id += 1;\n    assert!(builder.get_validator_weights(new_era_id).is_none());\n    assert!(builder.get_validator_weights(new_era_id - 1).is_some());\n\n    builder.run_auction(timestamp_millis, Vec::new());\n\n    let post_round_2_auction_weights = builder\n        .get_validator_weights(new_era_id)\n        .expect(\"should have new era validator weights computed\");\n\n    assert_ne!(genesis_validator_weights, post_round_2_auction_weights);\n\n    let lhs: BTreeSet<_> = post_round_1_auction_weights.keys().cloned().collect();\n    let rhs: BTreeSet<_> = post_round_2_auction_weights.keys().cloned().collect();\n    assert_eq!(\n        lhs.difference(&rhs).cloned().collect::<BTreeSet<_>>(),\n        round_2_validator_squash.into_iter().collect()\n    );\n\n    assert!(post_round_2_auction_weights.is_empty()); // all validators are squashed\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1071.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{EntityAddr, RuntimeArgs};\n\nconst CONTRACT_EE_1071_REGRESSION: &str = \"ee_1071_regression.wasm\";\nconst CONTRACT_HASH_NAME: &str = \"contract\";\nconst NEW_UREF_ENTRYPOINT: &str = \"new_uref\";\n\n#[ignore]\n#[test]\nfn should_run_ee_1071_regression() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_1071_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash = (*account\n        .named_keys()\n        .get(CONTRACT_HASH_NAME)\n        .expect(\"should have hash\"))\n    .into_entity_hash_addr()\n    .expect(\"should be hash\")\n    .into();\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        NEW_UREF_ENTRYPOINT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let contract_before = builder.get_named_keys(EntityAddr::SmartContract(contract_hash.value()));\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let contract_after = builder.get_named_keys(EntityAddr::SmartContract(contract_hash.value()));\n\n    assert_ne!(\n        contract_after, contract_before,\n        \"contract object should be modified\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1103.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n    MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::auction::{DelegationRate, ARG_DELEGATOR, ARG_VALIDATOR},\n    GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512,\n};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nstatic FAUCET: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_3: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([203; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_3: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\n// These values were chosen to correspond to the values in accounts.toml\n// at the time of their introduction.\n\nstatic FAUCET_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*FAUCET));\nstatic FAUCET_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(100_000_000_000_000_000u64));\nstatic VALIDATOR_1_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(100_000_000_000_000_000u64));\nstatic VALIDATOR_2_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(100_000_000_000_000_000u64));\nstatic VALIDATOR_3_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(100_000_000_000_000_000u64));\nstatic VALIDATOR_1_STAKE: Lazy<U512> = Lazy::new(|| U512::from(500_000_000_000_000_000u64));\nstatic VALIDATOR_2_STAKE: Lazy<U512> = Lazy::new(|| U512::from(400_000_000_000_000u64));\nstatic VALIDATOR_3_STAKE: Lazy<U512> = Lazy::new(|| U512::from(300_000_000_000_000u64));\nstatic DELEGATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_1));\nstatic DELEGATOR_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_2));\nstatic DELEGATOR_3_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_3));\nstatic DELEGATOR_1_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(1_000_000_000_000_000u64));\nstatic DELEGATOR_2_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(1_000_000_000_000_000u64));\nstatic DELEGATOR_3_BALANCE: Lazy<U512> = Lazy::new(|| U512::from(1_000_000_000_000_000u64));\nstatic DELEGATOR_1_STAKE: Lazy<U512> = Lazy::new(|| U512::from(500_000_000_000_000u64));\nstatic DELEGATOR_2_STAKE: Lazy<U512> = Lazy::new(|| U512::from(400_000_000_000_000u64));\nstatic DELEGATOR_3_STAKE: Lazy<U512> = Lazy::new(|| U512::from(300_000_000_000_000u64));\n\n#[ignore]\n#[test]\nfn validator_scores_should_reflect_delegates() {\n    let accounts = {\n        let faucet = GenesisAccount::account(FAUCET.clone(), Motes::new(*FAUCET_BALANCE), None);\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(*VALIDATOR_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(*VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let validator_2 = GenesisAccount::account(\n            VALIDATOR_2.clone(),\n            Motes::new(*VALIDATOR_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(*VALIDATOR_2_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let validator_3 = GenesisAccount::account(\n            VALIDATOR_3.clone(),\n            Motes::new(*VALIDATOR_3_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(*VALIDATOR_3_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(faucet);\n        tmp.push(validator_1);\n        tmp.push(validator_2);\n        tmp.push(validator_3);\n        tmp\n    };\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *FAUCET_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *FAUCET_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => *DELEGATOR_1_BALANCE\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *FAUCET_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => *DELEGATOR_2_BALANCE\n        },\n    )\n    .build();\n\n    let delegator_3_fund_request = ExecuteRequestBuilder::standard(\n        *FAUCET_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => *DELEGATOR_3_BALANCE\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        delegator_3_fund_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n    builder.run_genesis(run_genesis_request);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    let mut era = builder.get_era();\n    let auction_delay = builder.get_auction_delay();\n\n    // Check initial weights\n    {\n        let era_weights = builder\n            .get_validator_weights(era)\n            .expect(\"should get validator weights\");\n\n        assert_eq!(era_weights.get(&VALIDATOR_1), Some(&*VALIDATOR_1_STAKE));\n        assert_eq!(era_weights.get(&VALIDATOR_2), Some(&*VALIDATOR_2_STAKE));\n        assert_eq!(era_weights.get(&VALIDATOR_3), Some(&*VALIDATOR_3_STAKE));\n    }\n\n    // Check weights after auction_delay eras\n    {\n        for _ in 0..=auction_delay {\n            builder.run_auction(timestamp_millis, Vec::new());\n            timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n        }\n\n        era = builder.get_era();\n        assert_eq!(builder.get_auction_delay(), auction_delay);\n\n        let era_weights = builder\n            .get_validator_weights(era + auction_delay)\n            .expect(\"should get validator weights\");\n\n        assert_eq!(era_weights.get(&VALIDATOR_1), Some(&*VALIDATOR_1_STAKE));\n        assert_eq!(era_weights.get(&VALIDATOR_2), Some(&*VALIDATOR_2_STAKE));\n        assert_eq!(era_weights.get(&VALIDATOR_3), Some(&*VALIDATOR_3_STAKE));\n    }\n\n    // Check weights after Delegator 1 delegates to Validator 1 (and auction_delay)\n    {\n        let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n            *DELEGATOR_1_ADDR,\n            CONTRACT_DELEGATE,\n            runtime_args! {\n                ARG_AMOUNT => *DELEGATOR_1_STAKE,\n                ARG_VALIDATOR => VALIDATOR_1.clone(),\n                ARG_DELEGATOR => DELEGATOR_1.clone(),\n            },\n        )\n        .build();\n\n        builder\n            .exec(delegator_1_delegate_request)\n            .commit()\n            .expect_success();\n\n        for _ in 0..=auction_delay {\n            builder.run_auction(timestamp_millis, Vec::new());\n            timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n        }\n\n        era = builder.get_era();\n        assert_eq!(builder.get_auction_delay(), auction_delay);\n\n        let era_weights = builder\n            .get_validator_weights(era)\n            .expect(\"should get validator weights\");\n\n        let validator_1_expected_stake = *VALIDATOR_1_STAKE + *DELEGATOR_1_STAKE;\n\n        let validator_2_expected_stake = *VALIDATOR_2_STAKE;\n\n        let validator_3_expected_stake = *VALIDATOR_3_STAKE;\n\n        assert_eq!(\n            era_weights.get(&VALIDATOR_1),\n            Some(&validator_1_expected_stake)\n        );\n        assert_eq!(\n            era_weights.get(&VALIDATOR_2),\n            Some(&validator_2_expected_stake)\n        );\n        assert_eq!(\n            era_weights.get(&VALIDATOR_3),\n            Some(&validator_3_expected_stake)\n        );\n    }\n\n    // Check weights after Delegator 2 delegates to Validator 1 (and auction_delay)\n    {\n        let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n            *DELEGATOR_2_ADDR,\n            CONTRACT_DELEGATE,\n            runtime_args! {\n                ARG_AMOUNT => *DELEGATOR_2_STAKE,\n                ARG_VALIDATOR => VALIDATOR_1.clone(),\n                ARG_DELEGATOR => DELEGATOR_2.clone(),\n            },\n        )\n        .build();\n\n        builder\n            .exec(delegator_2_delegate_request)\n            .commit()\n            .expect_success();\n\n        for _ in 0..=auction_delay {\n            builder.run_auction(timestamp_millis, Vec::new());\n            timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n        }\n\n        era = builder.get_era();\n        assert_eq!(builder.get_auction_delay(), auction_delay);\n\n        let era_weights = builder\n            .get_validator_weights(era)\n            .expect(\"should get validator weights\");\n\n        let validator_1_expected_stake =\n            *VALIDATOR_1_STAKE + *DELEGATOR_1_STAKE + *DELEGATOR_2_STAKE;\n\n        let validator_2_expected_stake = *VALIDATOR_2_STAKE;\n\n        let validator_3_expected_stake = *VALIDATOR_3_STAKE;\n\n        assert_eq!(\n            era_weights.get(&VALIDATOR_1),\n            Some(&validator_1_expected_stake)\n        );\n        assert_eq!(\n            era_weights.get(&VALIDATOR_2),\n            Some(&validator_2_expected_stake)\n        );\n        assert_eq!(\n            era_weights.get(&VALIDATOR_3),\n            Some(&validator_3_expected_stake)\n        );\n    }\n\n    // Check weights after Delegator 3 delegates to Validator 2 (and auction_delay)\n    {\n        let delegator_3_delegate_request = ExecuteRequestBuilder::standard(\n            *DELEGATOR_3_ADDR,\n            CONTRACT_DELEGATE,\n            runtime_args! {\n                ARG_AMOUNT => *DELEGATOR_3_STAKE,\n                ARG_VALIDATOR => VALIDATOR_2.clone(),\n                ARG_DELEGATOR => DELEGATOR_3.clone(),\n            },\n        )\n        .build();\n\n        builder\n            .exec(delegator_3_delegate_request)\n            .commit()\n            .expect_success();\n\n        for _ in 0..=auction_delay {\n            builder.run_auction(timestamp_millis, Vec::new());\n            timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n        }\n        era = builder.get_era();\n        assert_eq!(builder.get_auction_delay(), auction_delay);\n\n        let era_weights = builder\n            .get_validator_weights(era)\n            .expect(\"should get validator weights\");\n\n        let validator_1_expected_stake =\n            *VALIDATOR_1_STAKE + *DELEGATOR_1_STAKE + *DELEGATOR_2_STAKE;\n\n        let validator_2_expected_stake = *VALIDATOR_2_STAKE + *DELEGATOR_3_STAKE;\n\n        let validator_3_expected_stake = *VALIDATOR_3_STAKE;\n\n        assert_eq!(\n            era_weights.get(&VALIDATOR_1),\n            Some(&validator_1_expected_stake)\n        );\n        assert_eq!(\n            era_weights.get(&VALIDATOR_2),\n            Some(&validator_2_expected_stake)\n        );\n        assert_eq!(\n            era_weights.get(&VALIDATOR_3),\n            Some(&validator_3_expected_stake)\n        );\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1119.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n};\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{\n        auction::{\n            BidsExt, DelegationRate, UnbondKind, ARG_DELEGATOR, ARG_VALIDATOR,\n            ARG_VALIDATOR_PUBLIC_KEYS, METHOD_SLASH,\n        },\n        mint::TOTAL_SUPPLY_KEY,\n    },\n    EntityAddr, GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512,\n};\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_WITHDRAW_BID: &str = \"withdraw_bid.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\nconst CONTRACT_UNDELEGATE: &str = \"undelegate.wasm\";\n\nconst DELEGATE_AMOUNT_1: u64 = 95_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst UNDELEGATE_AMOUNT_1: u64 = 17_000;\n\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_1));\nconst VALIDATOR_1_STAKE: u64 = 250_000;\n\nconst VESTING_WEEKS: u64 = 14;\n\n#[ignore]\n#[test]\nfn should_slash_validator_and_their_delegators() {\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp\n    };\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n\n    let fund_system_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            \"amount\" => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder\n        .exec(fund_system_exec_request)\n        .expect_success()\n        .commit();\n\n    let auction = builder.get_auction_contract_hash();\n\n    //\n    // Validator delegates funds on other genesis validator\n    //\n\n    let delegate_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegate_exec_request)\n        .expect_success()\n        .commit();\n\n    let bids = builder.get_bids();\n    let validator_1_bid = bids.validator_bid(&VALIDATOR_1).expect(\"should have bid\");\n    let bid_purse = validator_1_bid.bonding_purse();\n    assert_eq!(\n        builder.get_purse_balance(*bid_purse),\n        U512::from(VALIDATOR_1_STAKE),\n    );\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 0);\n\n    //\n    // Unlock funds of genesis validators\n    //\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    //\n    // Partial unbond through undelegate on other genesis validator\n    //\n\n    let unbond_amount = U512::from(VALIDATOR_1_STAKE / VESTING_WEEKS);\n\n    let undelegate_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        },\n    )\n    .build();\n    builder\n        .exec(undelegate_exec_request)\n        .commit()\n        .expect_success();\n\n    //\n    // Other genesis validator withdraws his bid\n    //\n\n    let unbond_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_AMOUNT => unbond_amount,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(unbond_request).expect_success().commit();\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 2);\n\n    let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone());\n\n    let unbonds = unbond_purses\n        .get(&unbond_kind)\n        .cloned()\n        .expect(\"should have unbond\");\n    assert_eq!(unbonds.len(), 1);\n    let unbond = unbonds.first().expect(\"must get unbond\");\n    assert_eq!(unbond.eras().len(), 1);\n    assert_eq!(unbond.validator_public_key(), &*VALIDATOR_1,);\n    assert_eq!(\n        unbond.unbond_kind(),\n        &UnbondKind::Validator(VALIDATOR_1.clone()),\n    );\n    assert!(unbond.is_validator());\n    let era = unbond.eras().first().expect(\"should have eras\");\n    assert_eq!(era.amount(), &unbond_amount);\n\n    assert!(\n        unbond_purses.contains_key(&unbond_kind),\n        \"should be part of unbonds\"\n    );\n\n    let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction,\n        METHOD_SLASH,\n        runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => vec![DEFAULT_ACCOUNT_PUBLIC_KEY.clone()]\n        },\n    )\n    .build();\n\n    builder.exec(slash_request_1).expect_success().commit();\n\n    let unbond_purses_noop = builder.get_unbonds();\n    assert_eq!(\n        unbond_purses, unbond_purses_noop,\n        \"slashing default validator should be noop because no unbonding was done\"\n    );\n\n    let bids = builder.get_bids();\n    assert!(!bids.is_empty());\n    bids.validator_bid(&VALIDATOR_1).expect(\"bids should exist\");\n\n    //\n    // Slash - only `withdraw_bid` amount is slashed\n    //\n    let total_supply_before_slashing: U512 = builder.get_value(\n        EntityAddr::System(builder.get_mint_contract_hash().value()),\n        TOTAL_SUPPLY_KEY,\n    );\n\n    let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction,\n        METHOD_SLASH,\n        runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => vec![VALIDATOR_1.clone()]\n        },\n    )\n    .build();\n\n    builder.exec(slash_request_2).expect_success().commit();\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 0);\n\n    let bids = builder.get_bids();\n    assert!(bids.validator_bid(&VALIDATOR_1).is_none());\n\n    let total_supply_after_slashing: U512 = builder.get_value(\n        EntityAddr::System(builder.get_mint_contract_hash().value()),\n        TOTAL_SUPPLY_KEY,\n    );\n\n    assert_eq!(\n        total_supply_after_slashing + VALIDATOR_1_STAKE + DELEGATE_AMOUNT_1,\n        total_supply_before_slashing,\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1120.rs",
    "content": "use std::{collections::BTreeSet, iter::FromIterator};\n\nuse num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n};\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::auction::{\n        BidKind, BidsExt, DelegationRate, DelegatorKind, UnbondKind, ARG_DELEGATOR, ARG_VALIDATOR,\n        ARG_VALIDATOR_PUBLIC_KEYS, METHOD_SLASH,\n    },\n    GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512,\n};\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\nconst CONTRACT_UNDELEGATE: &str = \"undelegate.wasm\";\n\nconst DELEGATE_AMOUNT_1: u64 = 1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst DELEGATE_AMOUNT_2: u64 = 2 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst DELEGATE_AMOUNT_3: u64 = 3 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst UNDELEGATE_AMOUNT_1: u64 = 1;\nconst UNDELEGATE_AMOUNT_2: u64 = 2;\nconst UNDELEGATE_AMOUNT_3: u64 = 3;\n\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nconst ARG_AMOUNT: &str = \"amount\";\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\nstatic VALIDATOR_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_2));\nstatic DELEGATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_1));\n\nconst VALIDATOR_1_STAKE: u64 = 250_000;\nconst VALIDATOR_2_STAKE: u64 = 350_000;\n\n#[ignore]\n#[test]\nfn should_run_ee_1120_slash_delegators() {\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let validator_2 = GenesisAccount::account(\n            VALIDATOR_2.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_2_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp.push(validator_2);\n        tmp\n    };\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            \"amount\" => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder.exec(transfer_request_1).expect_success().commit();\n\n    let transfer_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *DELEGATOR_1_ADDR,\n            \"amount\" => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder.exec(transfer_request_2).expect_success().commit();\n\n    let auction = builder.get_auction_contract_hash();\n\n    // Validator delegates funds to other genesis validator\n\n    let delegate_exec_request_1 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_2.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegate_exec_request_2 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegate_exec_request_3 = ExecuteRequestBuilder::standard(\n        *VALIDATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_3),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => VALIDATOR_2.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegate_exec_request_1)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(delegate_exec_request_2)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(delegate_exec_request_3)\n        .expect_success()\n        .commit();\n\n    // Ensure that initial bid entries exist for validator 1 and validator 2\n    let initial_bids = builder.get_bids();\n    let key_map = initial_bids.delegator_map();\n    let initial_bids_keys = key_map.keys().cloned().collect::<BTreeSet<_>>();\n    assert_eq!(\n        initial_bids_keys,\n        BTreeSet::from_iter(vec![VALIDATOR_2.clone(), VALIDATOR_1.clone()])\n    );\n\n    let initial_unbond_purses = builder.get_unbonds();\n    assert_eq!(initial_unbond_purses.len(), 0);\n\n    // DELEGATOR_1 partially unbonds from VALIDATOR_1\n    let undelegate_request_1 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    // DELEGATOR_1 partially unbonds from VALIDATOR_2\n    let undelegate_request_2 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => VALIDATOR_2.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    // VALIDATOR_2 partially unbonds from VALIDATOR_1\n    let undelegate_request_3 = ExecuteRequestBuilder::standard(\n        *VALIDATOR_2_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_3),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => VALIDATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let expected_unbond_keys = (&*DELEGATOR_1, &*VALIDATOR_2);\n    builder.exec(undelegate_request_1).expect_success().commit();\n    builder.exec(undelegate_request_2).expect_success().commit();\n    builder.exec(undelegate_request_3).expect_success().commit();\n\n    // Check unbonding purses before slashing\n    let unbond_purses_before = builder.get_unbonds();\n    // should be an unbonding purse for each distinct undelegator\n    unbond_purses_before.contains_key(&UnbondKind::Validator(expected_unbond_keys.1.clone()));\n    let delegator_unbond = unbond_purses_before\n        .get(&UnbondKind::DelegatedPublicKey(\n            expected_unbond_keys.0.clone(),\n        ))\n        .expect(\"should have entry\");\n    println!(\"du {:?}\", delegator_unbond);\n    assert_eq!(\n        delegator_unbond.len(),\n        2,\n        \"this entity undelegated from 2 different validators\"\n    );\n    let undelegate_from_v1 = delegator_unbond[1]\n        .eras()\n        .first()\n        .expect(\"should have entry\");\n    assert_eq!(undelegate_from_v1.amount().as_u64(), UNDELEGATE_AMOUNT_1);\n    let undelegate_from_v2 = delegator_unbond[0]\n        .eras()\n        .first()\n        .expect(\"should have entry\");\n    assert_eq!(undelegate_from_v2.amount().as_u64(), UNDELEGATE_AMOUNT_2);\n\n    let dual_role_unbond = unbond_purses_before\n        .get(&UnbondKind::DelegatedPublicKey(expected_unbond_keys.1.clone()))\n        .expect(\"should have entry for entity that is both a validator and has also delegated to a different validator then unbonded from that other validator\");\n    assert_eq!(\n        dual_role_unbond.len(),\n        1,\n        \"this entity undelegated from 1 validator\"\n    );\n    let undelegate_from_v1 = dual_role_unbond[0]\n        .eras()\n        .first()\n        .expect(\"should have entry\");\n    assert_eq!(undelegate_from_v1.amount().as_u64(), UNDELEGATE_AMOUNT_3);\n\n    // Check bids before slashing\n\n    let bids_before: Vec<BidKind> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    /*\n        There should be 5 total bids at this point:\n        VALIDATOR1 and VALIDATOR2 each have a validator bid\n        DELEGATOR1 is delegated to each of them for 2 more bids\n        VALIDATOR2 is also delegated to VALIDATOR1 for 1 more bid\n    */\n    assert_eq!(bids_before.len(), 5);\n    let bids_before_keys = bids_before\n        .delegator_map()\n        .keys()\n        .cloned()\n        .collect::<BTreeSet<_>>();\n\n    assert_eq!(\n        bids_before_keys, initial_bids_keys,\n        \"prior to taking action, keys should match initial keys\"\n    );\n\n    let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction,\n        METHOD_SLASH,\n        runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => vec![VALIDATOR_2.clone()]\n        },\n    )\n    .build();\n\n    builder.exec(slash_request_1).expect_success().commit();\n\n    // Compare bids after slashing validator 2\n    let bids_after: Vec<BidKind> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_ne!(bids_before, bids_after);\n    /*\n        there should be 3 total bids at this point:\n        VALIDATOR1 was not slashed, and their bid remains\n        DELEGATOR1 is still delegated to VALIDATOR1 and their bid remains\n        VALIDATOR2's validator bid was slashed (and removed), but they are\n            also delegated to VALIDATOR1 and that delegation bid remains\n    */\n    assert_eq!(bids_after.len(), 3);\n    assert!(bids_after.validator_bid(&VALIDATOR_2).is_none());\n\n    let validator_1_bid = bids_after\n        .validator_bid(&VALIDATOR_1)\n        .expect(\"should have validator1 bid\");\n    let delegators = bids_after\n        .delegators_by_validator_public_key(validator_1_bid.validator_public_key())\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 2);\n\n    bids_after.delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(VALIDATOR_2.clone())).expect(\"the delegation record from VALIDATOR2 should exist on VALIDATOR1, in this particular and unusual edge case\");\n    bids_after\n        .delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(DELEGATOR_1.clone()))\n        .expect(\"the delegation record from DELEGATOR_1 should exist on VALIDATOR1\");\n\n    let unbond_purses_after = builder.get_unbonds();\n    assert_ne!(unbond_purses_before, unbond_purses_after);\n    assert!(!unbond_purses_after.contains_key(&UnbondKind::Validator(VALIDATOR_1.clone())));\n    assert!(unbond_purses_after.contains_key(&UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone())));\n    assert!(unbond_purses_after.contains_key(&UnbondKind::DelegatedPublicKey(VALIDATOR_2.clone())));\n\n    // slash validator 1 to clear remaining bids and unbonding purses\n    let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction,\n        METHOD_SLASH,\n        runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => vec![VALIDATOR_1.clone()]\n        },\n    )\n    .build();\n\n    builder.exec(slash_request_2).expect_success().commit();\n\n    let bids_after = builder.get_bids();\n    assert_eq!(\n        bids_after.len(),\n        0,\n        \"we slashed everybody so there should be no bids remaining\"\n    );\n\n    let unbond_purses_after = builder.get_unbonds();\n    assert_eq!(\n        unbond_purses_after.len(),\n        0,\n        \"we slashed everybody currently unbonded so there should be no unbonds remaining\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1129.rs",
    "content": "use casper_wasm::builder;\nuse num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_types::{GenesisAccount, GenesisValidator, Key};\n\nuse casper_engine_test_support::{\n    utils, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    engine_state::Error, execution::ExecError, runtime::PreprocessingError,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::DEFAULT_ENTRY_POINT_NAME,\n    runtime_args,\n    system::auction::{self, DelegationRate},\n    Motes, PublicKey, RuntimeArgs, SecretKey, DEFAULT_DELEGATE_COST, U512,\n};\n\nuse crate::wasm_utils;\n\nconst ENTRY_POINT_NAME: &str = \"create_purse\";\nconst CONTRACT_KEY: &str = \"contract\";\nconst ACCESS_KEY: &str = \"access\";\n\nconst CONTRACT_EE_1129_REGRESSION: &str = \"ee_1129_regression.wasm\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_1));\nconst VALIDATOR_1_STAKE: u64 = 250_000;\nstatic UNDERFUNDED_DELEGATE_AMOUNT: Lazy<U512> = Lazy::new(|| U512::from(1));\nstatic UNDERFUNDED_ADD_BID_AMOUNT: Lazy<U512> = Lazy::new(|| U512::from(1));\nstatic CALL_STORED_CONTRACT_OVERHEAD: Lazy<U512> = Lazy::new(|| U512::from(10_001));\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_run_ee_1129_underfunded_delegate_call() {\n    assert!(U512::from(DEFAULT_DELEGATE_COST) > *UNDERFUNDED_DELEGATE_AMOUNT);\n\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n\n    let auction = builder.get_auction_contract_hash();\n\n    let bid_amount = U512::from(100_000_000_000_000u64);\n\n    let deploy_hash = [42; 32];\n\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n        auction::ARG_AMOUNT => bid_amount,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_session_hash(auction, auction::METHOD_DELEGATE, args)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *UNDERFUNDED_DELEGATE_AMOUNT, // underfunded deploy\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(error, Error::Exec(ExecError::GasLimit)),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_run_ee_1129_underfunded_add_bid_call() {\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            None,\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n\n    let auction = builder.get_auction_contract_hash();\n\n    let deploy_hash = [42; 32];\n\n    let delegation_rate: DelegationRate = 10;\n\n    let args = runtime_args! {\n            auction::ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => *UNDERFUNDED_ADD_BID_AMOUNT,\n            auction::ARG_DELEGATION_RATE => delegation_rate,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*VALIDATOR_1_ADDR)\n        .with_stored_session_hash(auction, auction::METHOD_ADD_BID, args)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *UNDERFUNDED_DELEGATE_AMOUNT,\n        })\n        .with_authorization_keys(&[*VALIDATOR_1_ADDR])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(error, Error::Exec(ExecError::GasLimit)),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_run_ee_1129_underfunded_mint_contract_call() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_1129_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_session_named_key(CONTRACT_KEY, ENTRY_POINT_NAME, RuntimeArgs::default())\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD,\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(error, Error::Exec(ExecError::GasLimit)),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_panic_when_calling_session_contract_by_uref() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_1129_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_session_named_key(ACCESS_KEY, ENTRY_POINT_NAME, RuntimeArgs::default())\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD,\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(error, Error::InvalidKeyVariant(Key::URef(_))),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_panic_when_calling_payment_contract_by_uref() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_1129_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::new())\n        .with_stored_payment_named_key(ACCESS_KEY, ENTRY_POINT_NAME, RuntimeArgs::new())\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(error, Error::InvalidKeyVariant(Key::URef(_))),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_panic_when_calling_contract_package_by_uref() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_1129_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_stored_versioned_contract_by_name(\n            ACCESS_KEY,\n            None,\n            ENTRY_POINT_NAME,\n            RuntimeArgs::default(),\n        )\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD,\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(error, Error::InvalidKeyVariant(Key::URef(_))),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_panic_when_calling_payment_versioned_contract_by_uref() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_1129_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::new())\n        .with_stored_versioned_payment_contract_by_name(\n            ACCESS_KEY,\n            None,\n            ENTRY_POINT_NAME,\n            RuntimeArgs::new(),\n        )\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n    assert!(\n        matches!(error, Error::InvalidKeyVariant(Key::URef(_))),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n\nfn do_nothing_without_memory() -> Vec<u8> {\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        .build();\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\n#[ignore]\n#[test]\nfn should_not_panic_when_calling_module_without_memory() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(do_nothing_without_memory(), RuntimeArgs::new())\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT,\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(\n        matches!(\n            error,\n            Error::WasmPreprocessing(PreprocessingError::MissingMemorySection)\n        ),\n        \"Unexpected error {:?}\",\n        error\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1152.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, MINIMUM_ACCOUNT_CREATION_BALANCE,\n    TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::auction::{self, DelegationRate, INITIAL_ERA_ID},\n    GenesisAccount, GenesisValidator, Motes, ProtocolVersion, PublicKey, SecretKey, U512,\n};\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\nstatic DELEGATOR_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::ed25519_from_bytes([226; SecretKey::ED25519_LENGTH]).unwrap());\nstatic VALIDATOR_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::ed25519_from_bytes([227; SecretKey::ED25519_LENGTH]).unwrap());\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&*VALIDATOR_1_SECRET_KEY));\nstatic DELEGATOR_1: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&*DELEGATOR_1_SECRET_KEY));\nstatic DELEGATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_1));\n\nconst VALIDATOR_STAKE: u64 = 1_000_000_000;\nconst DELEGATE_AMOUNT: u64 = 1_234_567 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n\n#[ignore]\n#[test]\nfn should_run_ee_1152_regression_test() {\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let validator_2 = GenesisAccount::account(\n            DELEGATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp.push(validator_2);\n        tmp\n    };\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let fund_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => PublicKey::System.to_account_hash(),\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let fund_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => *DELEGATOR_1_ADDR, ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    builder.exec(fund_request_1).commit().expect_success();\n    builder.exec(fund_request_2).commit().expect_success();\n\n    let auction_hash = builder.get_auction_contract_hash();\n\n    let delegate_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *DELEGATOR_1_ADDR,\n        auction_hash,\n        auction::METHOD_DELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DELEGATOR_1.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(DELEGATE_AMOUNT),\n        },\n    )\n    .build();\n\n    let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DELEGATOR_1_ADDR,\n        auction_hash,\n        auction::METHOD_UNDELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DELEGATOR_1.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(DELEGATE_AMOUNT),\n        },\n    )\n    .build();\n\n    builder.exec(delegate_request_1).expect_success().commit();\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    // In reality a step request is made, but to simplify the test I'm just calling the auction part\n    // only.\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    builder.run_auction(timestamp_millis, Vec::new()); // At this point paying out rewards would fail\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    let era_validators = builder.get_era_validators();\n\n    assert!(!era_validators.is_empty());\n\n    let (era_id, _) = era_validators\n        .into_iter()\n        .last()\n        .expect(\"should have last element\");\n    assert!(era_id > INITIAL_ERA_ID, \"{}\", era_id);\n\n    builder.exec(undelegate_request).expect_success().commit();\n\n    let step_request = StepRequestBuilder::new()\n        .with_parent_state_hash(builder.get_post_state_hash())\n        .with_protocol_version(ProtocolVersion::V1_0_0)\n        // Next era id is used for returning future era validators, which we don't need to inspect\n        // in this test.\n        .with_next_era_id(era_id)\n        .with_era_end_timestamp_millis(timestamp_millis);\n\n    builder.step(step_request.build());\n\n    builder.run_auction(timestamp_millis, Vec::new());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1160.rs",
    "content": "use casper_engine_test_support::{\n    LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, U512};\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\n\n#[ignore]\n#[test]\nfn ee_1160_wasmless_transfer_should_empty_account() {\n    let transfer_amount = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get default_account\");\n\n    let no_wasm_transfer_request_1 =\n        TransferRequestBuilder::new(transfer_amount, ACCOUNT_1_ADDR).build();\n    builder\n        .transfer_and_commit(no_wasm_transfer_request_1)\n        .expect_success();\n\n    let last_result = builder.get_exec_result_owned(0).unwrap();\n\n    assert!(last_result.error().is_none(), \"{:?}\", last_result);\n    assert!(!last_result.transfers().is_empty());\n\n    let default_account_balance_after = builder.get_purse_balance(default_account.main_purse());\n\n    let account_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should get default_account\");\n    let account_1_balance = builder.get_purse_balance(account_1.main_purse());\n\n    assert_eq!(default_account_balance_after, U512::zero());\n    assert_eq!(account_1_balance, transfer_amount);\n}\n\n#[ignore]\n#[test]\nfn ee_1160_transfer_larger_than_balance_should_fail() {\n    let transfer_amount = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE)\n        // One above the available balance to transfer should raise an InsufficientPayment already\n        + U512::one();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let no_wasm_transfer_request_1 =\n        TransferRequestBuilder::new(transfer_amount, ACCOUNT_1_ADDR).build();\n    builder.transfer_and_commit(no_wasm_transfer_request_1);\n\n    let last_result = builder.get_exec_result_owned(0).unwrap();\n\n    assert!(\n        last_result.error().is_some(),\n        \"Expected error but last result is {:?}\",\n        last_result\n    );\n    assert!(\n        last_result.transfers().is_empty(),\n        \"Expected empty list of transfers\"\n    );\n}\n\n#[ignore]\n#[test]\nfn ee_1160_large_wasmless_transfer_should_avoid_overflow() {\n    let transfer_amount = U512::max_value();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let no_wasm_transfer_request_1 =\n        TransferRequestBuilder::new(transfer_amount, ACCOUNT_1_ADDR).build();\n    builder.transfer_and_commit(no_wasm_transfer_request_1);\n\n    let last_result = builder.get_exec_result_owned(0).unwrap();\n\n    assert!(\n        last_result.error().is_some(),\n        \"Expected error but last result is {:?}\",\n        last_result\n    );\n    assert!(\n        last_result.transfers().is_empty(),\n        \"Expected empty list of transfers\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1163.rs",
    "content": "use casper_engine_test_support::{\n    LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::engine_state::Error;\nuse casper_storage::{data_access_layer::TransferRequest, system::transfer::TransferError};\nuse casper_types::{\n    account::AccountHash, system::handle_payment, Gas, MintCosts, Motes, RuntimeArgs, SystemConfig,\n    U512,\n};\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    builder\n}\n\nfn should_enforce_limit_for_user_error(\n    builder: &mut LmdbWasmTestBuilder,\n    request: TransferRequest,\n) -> Error {\n    let transfer_cost = Gas::from(SystemConfig::default().mint_costs().transfer);\n\n    builder.transfer_and_commit(request);\n\n    let response = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have result\");\n\n    assert_eq!(response.limit(), transfer_cost);\n    assert_eq!(response.consumed(), transfer_cost);\n\n    let handle_payment = builder.get_handle_payment_contract();\n    let payment_purse = handle_payment\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .expect(\"should have handle payment payment purse\")\n        .into_uref()\n        .expect(\"should have uref\");\n    let payment_purse_balance = builder.get_purse_balance(payment_purse);\n\n    assert_eq!(payment_purse_balance, U512::zero());\n\n    response.error().cloned().expect(\"should have error\")\n}\n\n#[ignore]\n#[test]\nfn should_enforce_system_host_gas_limit() {\n    // implies 1:1 gas/motes conversion rate regardless of gas price\n    let transfer_amount = Motes::new(U512::one());\n\n    let transfer_request = TransferRequestBuilder::new(transfer_amount.value(), ACCOUNT_1_ADDR)\n        .with_initiator(*DEFAULT_ACCOUNT_ADDR)\n        .build();\n\n    let mut builder = setup();\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n    let main_purse = default_account.main_purse();\n    let purse_balance_before = builder.get_purse_balance(main_purse);\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let purse_balance_after = builder.get_purse_balance(main_purse);\n\n    let transfer_cost = Gas::from(MintCosts::default().transfer);\n    let response = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have result\");\n    assert_eq!(\n        response.limit(),\n        transfer_cost,\n        \"expected actual limit is {}\",\n        transfer_cost\n    );\n    assert_eq!(\n        purse_balance_before - transfer_amount.value(),\n        purse_balance_after\n    );\n}\n\n#[ignore]\n#[test]\nfn should_detect_wasmless_transfer_missing_args() {\n    let transfer_args = RuntimeArgs::new();\n    let transfer_request = TransferRequestBuilder::new(1, AccountHash::default())\n        .with_args(transfer_args)\n        .build();\n\n    let mut builder = setup();\n    let error = should_enforce_limit_for_user_error(&mut builder, transfer_request);\n\n    assert!(matches!(\n        error,\n        Error::Transfer(TransferError::MissingArgument)\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_detect_wasmless_transfer_invalid_purse() {\n    let mut builder = setup();\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n    let main_purse = default_account.main_purse();\n\n    let transfer_request = TransferRequestBuilder::new(1, main_purse).build();\n\n    let error = should_enforce_limit_for_user_error(&mut builder, transfer_request);\n    assert!(matches!(\n        error,\n        Error::Transfer(TransferError::InvalidPurse)\n    ));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1174.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,\n    LOCAL_GENESIS_REQUEST,\n};\n\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    runtime_args,\n    system::{\n        self,\n        auction::{self, DelegationRate},\n    },\n    ApiError, DEFAULT_MINIMUM_BID_AMOUNT, U512,\n};\n\nconst LARGE_DELEGATION_RATE: DelegationRate = 101;\n\n#[ignore]\n#[test]\nfn should_run_ee_1174_delegation_rate_too_high() {\n    let bid_amount = U512::from(DEFAULT_MINIMUM_BID_AMOUNT);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let auction = builder.get_auction_contract_hash();\n\n    let args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => bid_amount,\n        auction::ARG_DELEGATION_RATE => LARGE_DELEGATION_RATE,\n    };\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        auction,\n        auction::METHOD_ADD_BID,\n        args,\n    )\n    .build();\n\n    builder.exec(add_bid_request).commit();\n\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have results\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) if auction_error == system::auction::Error::DelegationRateTooLarge as u8));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1217.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{\n    engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error as CoreError},\n    execution::ExecError,\n};\nuse casper_types::{\n    runtime_args, system::auction, ApiError, PublicKey, SecretKey, DEFAULT_MINIMUM_BID_AMOUNT, U512,\n};\nuse once_cell::sync::Lazy;\n\nconst CONTRACT_REGRESSION: &str = \"ee_1217_regression.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst CONTRACT_WITHDRAW_BID: &str = \"withdraw_bid.wasm\";\n\nconst PACKAGE_NAME: &str = \"call_auction\";\nconst CONTRACT_ADD_BID_ENTRYPOINT_SESSION: &str = \"add_bid_session\";\nconst CONTRACT_ADD_BID_ENTRYPOINT_CONTRACT: &str = \"add_bid_contract\";\nconst CONTRACT_WITHDRAW_BID_ENTRYPOINT_SESSION: &str = \"withdraw_bid_session\";\nconst CONTRACT_WITHDRAW_BID_ENTRYPOINT_CONTRACT: &str = \"withdraw_bid_contract\";\nconst CONTRACT_DELEGATE_ENTRYPOINT_SESSION: &str = \"delegate_session\";\nconst CONTRACT_DELEGATE_ENTRYPOINT_CONTRACT: &str = \"delegate_contract\";\nconst CONTRACT_UNDELEGATE_ENTRYPOINT_SESSION: &str = \"undelegate_session\";\nconst CONTRACT_UNDELEGATE_ENTRYPOINT_CONTRACT: &str = \"undelegate_contract\";\nconst CONTRACT_ACTIVATE_BID_ENTRYPOINT_CONTRACT: &str = \"activate_bid_contract\";\nconst CONTRACT_ACTIVATE_BID_ENTRYPOINT_SESSION: &str = \"activate_bid_session\";\n\nstatic VALIDATOR_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([33; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\n#[ignore]\n#[test]\nfn should_fail_to_add_bid_from_stored_session_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let add_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_ADD_BID_ENTRYPOINT_SESSION,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => default_public_key_arg,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(add_bid_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_add_bid_from_stored_contract_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let add_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_ADD_BID_ENTRYPOINT_CONTRACT,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => default_public_key_arg,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(add_bid_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_withdraw_bid_from_stored_session_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let withdraw_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_WITHDRAW_BID_ENTRYPOINT_SESSION,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => default_public_key_arg,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(withdraw_bid_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_withdraw_bid_from_stored_contract_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let withdraw_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_WITHDRAW_BID_ENTRYPOINT_CONTRACT,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => default_public_key_arg,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(withdraw_bid_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_delegate_from_stored_session_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone();\n    let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash();\n\n    let validator_fund_request = {\n        const CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\n        const ARG_AMOUNT: &str = \"amount\";\n        const ARG_TARGET: &str = \"target\";\n\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => validator_addr,\n                ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n            },\n        )\n        .build()\n    };\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        VALIDATOR_PUBLIC_KEY.to_account_hash(),\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let delegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_DELEGATE_ENTRYPOINT_SESSION,\n        runtime_args! {\n            auction::ARG_DELEGATOR => default_public_key_arg,\n            auction::ARG_VALIDATOR => validator_public_key_arg,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(validator_fund_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(delegate_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_delegate_from_stored_contract_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone();\n    let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash();\n\n    let validator_fund_request = {\n        const CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\n        const ARG_AMOUNT: &str = \"amount\";\n        const ARG_TARGET: &str = \"target\";\n\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => validator_addr,\n                ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n            },\n        )\n        .build()\n    };\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        VALIDATOR_PUBLIC_KEY.to_account_hash(),\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let delegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_DELEGATE_ENTRYPOINT_CONTRACT,\n        runtime_args! {\n            auction::ARG_DELEGATOR => default_public_key_arg,\n            auction::ARG_VALIDATOR => validator_public_key_arg,\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(validator_fund_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(delegate_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_undelegate_from_stored_session_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone();\n    let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash();\n\n    let validator_fund_request = {\n        const CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\n        const ARG_AMOUNT: &str = \"amount\";\n        const ARG_TARGET: &str = \"target\";\n\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => validator_addr,\n                ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n            },\n        )\n        .build()\n    };\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        VALIDATOR_PUBLIC_KEY.to_account_hash(),\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let delegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_DELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => default_public_key_arg.clone(),\n            auction::ARG_VALIDATOR => validator_public_key_arg.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n        },\n    )\n    .build();\n\n    let undelegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_UNDELEGATE_ENTRYPOINT_SESSION,\n        runtime_args! {\n            auction::ARG_DELEGATOR => default_public_key_arg,\n            auction::ARG_VALIDATOR => validator_public_key_arg,\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_fund_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(delegate_request).commit().expect_success();\n\n    builder.exec(undelegate_request).commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_undelegate_from_stored_contract_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone();\n    let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash();\n\n    let validator_fund_request = {\n        const CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\n        const ARG_AMOUNT: &str = \"amount\";\n        const ARG_TARGET: &str = \"target\";\n\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => validator_addr,\n                ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n            },\n        )\n        .build()\n    };\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        VALIDATOR_PUBLIC_KEY.to_account_hash(),\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let delegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_DELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => default_public_key_arg.clone(),\n            auction::ARG_VALIDATOR => validator_public_key_arg.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n        },\n    )\n    .build();\n\n    let undelegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_UNDELEGATE_ENTRYPOINT_CONTRACT,\n        runtime_args! {\n            auction::ARG_DELEGATOR => default_public_key_arg,\n            auction::ARG_VALIDATOR => validator_public_key_arg,\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_fund_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    builder.exec(delegate_request).commit().expect_success();\n\n    builder.exec(undelegate_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_activate_bid_from_stored_session_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let withdraw_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(add_bid_request).commit().expect_success();\n    builder.exec(withdraw_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    let activate_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_ACTIVATE_BID_ENTRYPOINT_SESSION,\n        runtime_args! {\n            auction::ARG_VALIDATOR => default_public_key_arg,\n        },\n    )\n    .build();\n\n    builder.exec(activate_bid_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_activate_bid_from_stored_contract_code() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            auction::ARG_DELEGATION_RATE => 0u8,\n        },\n    )\n    .build();\n\n    let withdraw_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall\n            auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n        },\n    )\n    .build();\n\n    let store_call_auction_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(add_bid_request).commit().expect_success();\n    builder.exec(withdraw_bid_request).commit().expect_success();\n\n    builder\n        .exec(store_call_auction_request)\n        .commit()\n        .expect_success();\n\n    let activate_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_NAME,\n        None,\n        CONTRACT_ACTIVATE_BID_ENTRYPOINT_CONTRACT,\n        runtime_args! {\n            auction::ARG_VALIDATOR => default_public_key_arg,\n        },\n    )\n    .build();\n\n    builder.exec(activate_bid_request);\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(\n            auction_error,\n        ))) if auction_error == auction::Error::InvalidContext as u8)\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_1225.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\n\nuse casper_types::{runtime_args, RuntimeArgs};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst EE_1225_REGRESSION_CONTRACT: &str = \"ee_1225_regression.wasm\";\nconst DO_NOTHING_CONTRACT: &str = \"do_nothing.wasm\";\n\n#[should_panic(expected = \"Finalization\")]\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_run_ee_1225_verify_finalize_payment_invariants() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_payment_code(\n            EE_1225_REGRESSION_CONTRACT,\n            runtime_args! {\n                ARG_AMOUNT => *DEFAULT_PAYMENT,\n            },\n        )\n        .with_session_code(DO_NOTHING_CONTRACT, RuntimeArgs::default())\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([2; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_221.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst CONTRACT_EE_221_REGRESSION: &str = \"ee_221_regression.wasm\";\n\n#[ignore]\n#[test]\nfn should_run_ee_221_get_uref_regression_test() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_221_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_401.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst CONTRACT_EE_401_REGRESSION: &str = \"ee_401_regression.wasm\";\nconst CONTRACT_EE_401_REGRESSION_CALL: &str = \"ee_401_regression_call.wasm\";\n\n#[ignore]\n#[test]\nfn should_execute_contracts_which_provide_extra_urefs() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_401_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_401_REGRESSION_CALL,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n    builder.exec(exec_request_2).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_441.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, Key, URef};\n\nconst EE_441_RNG_STATE: &str = \"ee_441_rng_state.wasm\";\n\nfn get_uref(key: Key) -> URef {\n    match key {\n        Key::URef(uref) => uref,\n        _ => panic!(\"Key {:?} is not an URef\", key),\n    }\n}\n\nfn do_pass(pass: &str) -> (URef, URef) {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let deploy = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_session_code(\n            EE_441_RNG_STATE,\n            runtime_args! {\n                \"flag\" => pass,\n            },\n        )\n        .with_deploy_hash([1u8; 32])\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .build();\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    (\n        get_uref(*account.named_keys().get(\"uref1\").unwrap()),\n        get_uref(*account.named_keys().get(\"uref2\").unwrap()),\n    )\n}\n\n#[ignore]\n#[test]\nfn should_properly_pass_rng_state_to_subcontracts() {\n    // the baseline pass, no subcalls\n    let (pass1_uref1, pass1_uref2) = do_pass(\"pass1\");\n    // second pass does a subcall that does nothing, should be consistent with pass1\n    let (pass2_uref1, pass2_uref2) = do_pass(\"pass2\");\n    // second pass calls new_uref, and uref2 is returned from a sub call\n    let (pass3_uref1, pass3_uref2) = do_pass(\"pass3\");\n\n    // First urefs from each pass should yield same results where pass1 is the\n    // baseline\n    assert_eq!(pass1_uref1.addr(), pass2_uref1.addr());\n    assert_eq!(pass2_uref1.addr(), pass3_uref1.addr());\n\n    // Second urefs from each pass should yield the same result where pass1 is the\n    // baseline\n    assert_eq!(pass1_uref2.addr(), pass2_uref2.addr());\n    assert_eq!(pass2_uref2.addr(), pass3_uref2.addr());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_460.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    addressable_entity::EntityKindTag, execution::TransformKindV2, runtime_args, Key, U512,\n};\n\nconst CONTRACT_EE_460_REGRESSION: &str = \"ee_460_regression.wasm\";\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_run_ee_460_no_side_effects_on_error_regression() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_460_REGRESSION,\n        runtime_args! { ARG_AMOUNT => U512::max_value() },\n    )\n    .build();\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit();\n\n    // In this regression test it is verified that no new urefs are created on the\n    // mint uref, which should mean no new purses are created in case of\n    // transfer error. This is considered sufficient cause to confirm that the\n    // mint uref is left untouched.\n    let mint_entity_key =\n        Key::addressable_entity_key(EntityKindTag::System, builder.get_mint_contract_hash());\n\n    let effects = &builder.get_effects()[0];\n    let mint_transforms = effects\n        .transforms()\n        .iter()\n        .find(|transform| transform.key() == &mint_entity_key)\n        // Skips the Identity writes introduced since payment code execution for brevity of the\n        // check\n        .filter(|transform| transform.kind() != &TransformKindV2::Identity);\n    assert!(mint_transforms.is_none());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_468.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{bytesrepr, RuntimeArgs};\n\nconst CONTRACT_DESERIALIZE_ERROR: &str = \"deserialize_error.wasm\";\n\n#[ignore]\n#[test]\nfn should_not_fail_deserializing() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_DESERIALIZE_ERROR,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let error = LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit()\n        .get_error();\n\n    assert!(\n        matches!(\n            error,\n            Some(Error::Exec(ExecError::BytesRepr(\n                bytesrepr::Error::EarlyEndOfStream\n            )))\n        ),\n        \"{:?}\",\n        error\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_470.rs",
    "content": "use std::sync::Arc;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY;\nuse casper_storage::global_state::{\n    state::{lmdb::LmdbGlobalState, StateProvider},\n    transaction_source::lmdb::LmdbEnvironment,\n    trie_store::lmdb::LmdbTrieStore,\n};\nuse casper_types::RuntimeArgs;\nuse lmdb::DatabaseFlags;\n\nconst CONTRACT_DO_NOTHING: &str = \"do_nothing.wasm\";\n\n#[ignore]\n#[test]\nfn regression_test_genesis_hash_mismatch() {\n    let mut builder_base = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_DO_NOTHING,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    // Step 1.\n    let builder = builder_base.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // This is trie's post state hash after calling run_genesis endpoint.\n    // Step 1a)\n    let genesis_run_hash = builder.get_genesis_hash();\n    let genesis_transforms = builder.get_genesis_effects().clone();\n\n    let empty_root_hash = {\n        let gs = {\n            let tempdir = tempfile::tempdir().expect(\"should create tempdir\");\n            let lmdb_environment = LmdbEnvironment::new(tempdir.path(), 1024 * 1024, 32, false)\n                .expect(\"should create lmdb environment\");\n            let lmdb_trie_store =\n                LmdbTrieStore::new(&lmdb_environment, None, DatabaseFlags::default())\n                    .expect(\"should create lmdb trie store\");\n\n            LmdbGlobalState::empty(\n                Arc::new(lmdb_environment),\n                Arc::new(lmdb_trie_store),\n                6,\n                DEFAULT_ENABLE_ENTITY,\n            )\n            .expect(\"Empty GlobalState.\")\n        };\n        gs.empty_root()\n    };\n\n    // This is trie's post state hash after committing genesis effects on top of\n    // empty trie. Step 1b)\n    let genesis_transforms_hash = builder\n        .commit_transforms(empty_root_hash, genesis_transforms)\n        .get_post_state_hash();\n\n    // They should match.\n    assert_eq!(genesis_run_hash, genesis_transforms_hash);\n\n    // Step 2.\n    builder.exec(exec_request_1).commit().expect_success();\n\n    // No step 3.\n    // Step 4.\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // Step 4a)\n    let second_genesis_run_hash = builder.get_genesis_hash();\n    let second_genesis_transforms = builder.get_genesis_effects().clone();\n\n    // Step 4b)\n    let second_genesis_transforms_hash = builder\n        .commit_transforms(empty_root_hash, second_genesis_transforms)\n        .get_post_state_hash();\n\n    assert_eq!(second_genesis_run_hash, second_genesis_transforms_hash);\n\n    assert_eq!(second_genesis_run_hash, genesis_run_hash);\n    assert_eq!(second_genesis_transforms_hash, genesis_transforms_hash);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_532.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::engine_state::Error;\nuse casper_storage::tracking_copy::TrackingCopyError;\nuse casper_types::{account::AccountHash, RuntimeArgs};\n\nconst CONTRACT_EE_532_REGRESSION: &str = \"ee_532_regression.wasm\";\nconst UNKNOWN_ADDR: AccountHash = AccountHash::new([42u8; 32]);\n\n#[ignore]\n#[test]\nfn should_run_ee_532_non_existent_account_regression_test() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        UNKNOWN_ADDR,\n        CONTRACT_EE_532_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let deploy_result = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n\n    assert!(\n        deploy_result.has_precondition_failure(),\n        \"expected precondition failure\"\n    );\n\n    let message = deploy_result.error().map(|err| format!(\"{}\", err));\n    assert_eq!(\n        message,\n        Some(format!(\n            \"{}\",\n            Error::TrackingCopy(TrackingCopyError::KeyNotFound(UNKNOWN_ADDR.into()))\n        )),\n        \"expected Error::Authorization\"\n    )\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_536.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst CONTRACT_EE_536_REGRESSION: &str = \"ee_536_regression.wasm\";\n\n#[ignore]\n#[test]\nfn should_run_ee_536_associated_account_management_regression() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_536_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_539.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{addressable_entity::Weight, runtime_args};\n\nconst CONTRACT_EE_539_REGRESSION: &str = \"ee_539_regression.wasm\";\nconst ARG_KEY_MANAGEMENT_THRESHOLD: &str = \"key_management_threshold\";\nconst ARG_DEPLOYMENT_THRESHOLD: &str = \"deployment_threshold\";\n\n#[ignore]\n#[test]\nfn should_run_ee_539_serialize_action_thresholds_regression() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request = ExecuteRequestBuilder::standard(\n       *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_539_REGRESSION,\n        runtime_args! { ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(4), ARG_DEPLOYMENT_THRESHOLD => Weight::new(3) },\n    )\n        .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_549.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst CONTRACT_EE_549_REGRESSION: &str = \"ee_549_regression.wasm\";\n\n#[ignore]\n#[test]\nfn should_run_ee_549_set_refund_regression() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_549_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request);\n\n    // Execution should encounter an error because set_refund\n    // is not allowed to be called during session execution.\n    assert!(builder.is_error());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_550.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, runtime_args};\n\nconst PASS_INIT_REMOVE: &str = \"init_remove\";\nconst PASS_TEST_REMOVE: &str = \"test_remove\";\nconst PASS_INIT_UPDATE: &str = \"init_update\";\nconst PASS_TEST_UPDATE: &str = \"test_update\";\n\nconst CONTRACT_EE_550_REGRESSION: &str = \"ee_550_regression.wasm\";\nconst KEY_2_ADDR: [u8; 32] = [101; 32];\nconst DEPLOY_HASH: [u8; 32] = [42; 32];\nconst ARG_PASS: &str = \"pass\";\n\n#[ignore]\n#[test]\nfn should_run_ee_550_remove_with_saturated_threshold_regression() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_550_REGRESSION,\n        runtime_args! { ARG_PASS => String::from(PASS_INIT_REMOVE) },\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(\n            CONTRACT_EE_550_REGRESSION,\n            runtime_args! { ARG_PASS => String::from(PASS_TEST_REMOVE) },\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, AccountHash::new(KEY_2_ADDR)])\n        .with_deploy_hash(DEPLOY_HASH)\n        .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_550_update_with_saturated_threshold_regression() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_550_REGRESSION,\n        runtime_args! { ARG_PASS => String::from(PASS_INIT_UPDATE) },\n    )\n    .build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(\n            CONTRACT_EE_550_REGRESSION,\n            runtime_args! { ARG_PASS => String::from(PASS_TEST_UPDATE) },\n        )\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, AccountHash::new(KEY_2_ADDR)])\n        .with_deploy_hash(DEPLOY_HASH)\n        .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_572.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, runtime_args, Key, U512};\n\nconst CONTRACT_CREATE: &str = \"ee_572_regression_create.wasm\";\nconst CONTRACT_ESCALATE: &str = \"ee_572_regression_escalate.wasm\";\nconst CONTRACT_TRANSFER: &str = \"transfer_purse_to_account.wasm\";\nconst CREATE: &str = \"create\";\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst ACCOUNT_2_ADDR: AccountHash = AccountHash::new([2u8; 32]);\n\n#[ignore]\n#[test]\nfn should_run_ee_572_regression() {\n    let account_amount: U512 = *DEFAULT_PAYMENT + U512::from(100);\n    let account_1_creation_args = runtime_args! {\n        \"target\" => ACCOUNT_1_ADDR,\n        \"amount\" => account_amount\n    };\n    let account_2_creation_args = runtime_args! {\n        \"target\" => ACCOUNT_2_ADDR,\n        \"amount\" => account_amount,\n    };\n\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER,\n        account_1_creation_args,\n    )\n    .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER,\n        account_2_creation_args.clone(),\n    )\n    .build();\n\n    let exec_request_3 =\n        ExecuteRequestBuilder::standard(ACCOUNT_1_ADDR, CONTRACT_CREATE, account_2_creation_args)\n            .build();\n\n    // Create Accounts\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    // Store the creation contract\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let contract: Key = {\n        let account = builder\n            .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR)\n            .expect(\"must have default contract package\");\n        *account\n            .named_keys()\n            .get(CREATE)\n            .expect(\"Could not find contract pointer\")\n    };\n\n    // Attempt to forge a new URef with escalated privileges\n    let exec_request_4 = ExecuteRequestBuilder::standard(\n        ACCOUNT_2_ADDR,\n        CONTRACT_ESCALATE,\n        runtime_args! {\n            \"contract_hash\" => contract.into_entity_hash_addr().expect(\"should be hash\"),\n        },\n    )\n    .build();\n\n    // Attempt to forge a new URef with escalated privileges\n    let _ = builder\n        .exec(exec_request_4)\n        .get_exec_result_owned(3)\n        .expect(\"should have a response\");\n\n    let error_message = builder.get_error_message().unwrap();\n\n    assert!(\n        error_message.contains(\"Forged reference\"),\n        \"{}\",\n        error_message\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_584.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{execution::TransformKindV2, RuntimeArgs, StoredValue};\n\nconst CONTRACT_EE_584_REGRESSION: &str = \"ee_584_regression.wasm\";\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_run_ee_584_no_errored_session_transforms() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_584_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request);\n\n    assert!(builder.is_error());\n\n    let effects = &builder.get_effects()[0];\n\n    assert!(!effects.transforms().iter().any(|transform| {\n        if let TransformKindV2::Write(StoredValue::CLValue(cl_value)) = transform.kind() {\n            cl_value.to_owned().into_t::<String>().unwrap_or_default() == \"Hello, World!\"\n        } else {\n            false\n        }\n    }));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_597.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_types::{\n    account::AccountHash, system::auction, ApiError, GenesisAccount, Motes, PublicKey, RuntimeArgs,\n    SecretKey,\n};\n\nconst CONTRACT_EE_597_REGRESSION: &str = \"ee_597_regression.wasm\";\n\nstatic VALID_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALID_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALID_PUBLIC_KEY));\nconst VALID_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\n#[ignore]\n#[test]\nfn should_fail_when_bonding_amount_is_zero_ee_597_regression() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account =\n            GenesisAccount::account(VALID_PUBLIC_KEY.clone(), Motes::new(VALID_BALANCE), None);\n        tmp.push(account);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *VALID_ADDR,\n        CONTRACT_EE_597_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(run_genesis_request)\n        .exec(exec_request)\n        .commit();\n\n    let error_message = builder.get_error_message().expect(\"should have a result\");\n\n    // Error::BondTooSmall => 5,\n    assert!(\n        error_message.contains(&format!(\n            \"{:?}\",\n            ApiError::from(auction::Error::BondTooSmall)\n        )),\n        \"{}\",\n        error_message\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_598.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_ACCOUNT_ADDR,\n};\nuse casper_types::{\n    account::AccountHash, runtime_args, system::auction::DelegationRate, GenesisAccount,\n    GenesisValidator, Motes, PublicKey, SecretKey, U512,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\nconst ARG_ENTRY_POINT: &str = \"entry_point\";\nconst ARG_ACCOUNT_HASH: &str = \"account_hash\";\n\nconst CONTRACT_AUCTION_BIDDING: &str = \"auction_bidding.wasm\";\n\nstatic ACCOUNT_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\nconst GENESIS_VALIDATOR_STAKE: u64 = 50_000;\n\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PK));\nstatic ACCOUNT_1_FUND: Lazy<U512> = Lazy::new(|| U512::from(10_000_000_000_000u64));\nstatic ACCOUNT_1_BALANCE: Lazy<U512> = Lazy::new(|| *ACCOUNT_1_FUND + 100_000);\nstatic ACCOUNT_1_BOND: Lazy<U512> = Lazy::new(|| U512::from(25_000));\n\n#[ignore]\n#[test]\nfn should_handle_unbond_for_more_than_stake_as_full_unbond_of_stake_ee_598_regression() {\n    let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    let public_key = PublicKey::from(&secret_key);\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account = GenesisAccount::account(\n            public_key,\n            Motes::new(GENESIS_VALIDATOR_STAKE)\n                .checked_mul(Motes::new(2))\n                .unwrap(),\n            Some(GenesisValidator::new(\n                Motes::new(GENESIS_VALIDATOR_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let seed_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_AUCTION_BIDDING,\n        runtime_args! {\n            ARG_ENTRY_POINT => \"seed_new_account\",\n            ARG_ACCOUNT_HASH => *ACCOUNT_1_ADDR,\n            ARG_AMOUNT => *ACCOUNT_1_BALANCE,\n        },\n    )\n    .build();\n    let deploy = DeployItemBuilder::new()\n        .with_address(*ACCOUNT_1_ADDR)\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *ACCOUNT_1_FUND })\n        .with_session_code(\n            \"ee_598_regression.wasm\",\n            runtime_args! {\n                ARG_AMOUNT => *ACCOUNT_1_BOND,\n                ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(),\n            },\n        )\n        .with_deploy_hash([2u8; 32])\n        .with_authorization_keys(&[*ACCOUNT_1_ADDR])\n        .build();\n    let combined_bond_and_unbond_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(run_genesis_request);\n\n    builder.exec(seed_request).expect_success().commit();\n\n    builder\n        .exec(combined_bond_and_unbond_request)\n        .expect_failure()\n        .commit();\n\n    let err = builder.get_error().expect(\"should have error\");\n    assert_eq!(\n        \"ApiError::AuctionError(UnbondTooLarge) [64532]\",\n        err.to_string()\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_599.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{account::AccountHash, runtime_args, U512};\n\nconst CONTRACT_EE_599_REGRESSION: &str = \"ee_599_regression.wasm\";\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst DONATION_PURSE_COPY_KEY: &str = \"donation_purse_copy\";\nconst EXPECTED_ERROR: &str = \"Forged reference\";\nconst TRANSFER_FUNDS_KEY: &str = \"transfer_funds\";\nconst VICTIM_ADDR: AccountHash = AccountHash::new([42; 32]);\n\nstatic VICTIM_INITIAL_FUNDS: Lazy<U512> = Lazy::new(|| *DEFAULT_PAYMENT * 10);\n\nfn setup() -> LmdbWasmTestBuilder {\n    // Creates victim account\n    let exec_request_1 = {\n        let args = runtime_args! {\n            \"target\" => VICTIM_ADDR,\n            \"amount\" => *VICTIM_INITIAL_FUNDS,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, args)\n            .build()\n    };\n\n    // Deploy contract\n    let exec_request_2 = {\n        let args = runtime_args! {\n            \"method\" => \"install\".to_string(),\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_EE_599_REGRESSION, args)\n            .build()\n    };\n\n    let mut ctx = LmdbWasmTestBuilder::default();\n    ctx.run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit()\n        .clear_results();\n    ctx\n}\n\n#[ignore]\n#[test]\nfn should_not_be_able_to_transfer_funds_with_transfer_purse_to_purse() {\n    let mut builder = setup();\n\n    let victim_account = builder\n        .get_entity_by_account_hash(VICTIM_ADDR)\n        .expect(\"should have victim account\");\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n    let transfer_funds = default_account\n        .named_keys()\n        .get(TRANSFER_FUNDS_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", TRANSFER_FUNDS_KEY));\n    let donation_purse_copy_key = default_account\n        .named_keys()\n        .get(DONATION_PURSE_COPY_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", DONATION_PURSE_COPY_KEY));\n\n    let donation_purse_copy = donation_purse_copy_key.into_uref().expect(\"should be uref\");\n\n    let exec_request_3 = {\n        let args = runtime_args! {\n            \"method\" => \"call\",\n            \"contract_key\" => transfer_funds.into_entity_hash_addr().expect(\"should be hash\"),\n            \"sub_contract_method_fwd\" => \"transfer_from_purse_to_purse_ext\",\n        };\n        ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()\n    };\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_3).commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let error_msg = builder.get_error_message().expect(\"should have error\");\n    assert!(\n        error_msg.contains(EXPECTED_ERROR),\n        \"Got error: {}\",\n        error_msg\n    );\n\n    let victim_balance_after = builder.get_purse_balance(victim_account.main_purse());\n\n    assert_eq!(\n        *VICTIM_INITIAL_FUNDS - transaction_fee,\n        victim_balance_after\n    );\n\n    assert_eq!(builder.get_purse_balance(donation_purse_copy), U512::zero(),);\n}\n\n#[ignore]\n#[test]\nfn should_not_be_able_to_transfer_funds_with_transfer_from_purse_to_account() {\n    let mut builder = setup();\n\n    let victim_account = builder\n        .get_entity_by_account_hash(VICTIM_ADDR)\n        .expect(\"should have victim account\");\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let default_account_balance = builder.get_purse_balance(default_account.main_purse());\n\n    let transfer_funds = default_account\n        .named_keys()\n        .get(TRANSFER_FUNDS_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", TRANSFER_FUNDS_KEY));\n    let donation_purse_copy_key = default_account\n        .named_keys()\n        .get(DONATION_PURSE_COPY_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", DONATION_PURSE_COPY_KEY));\n\n    let donation_purse_copy = donation_purse_copy_key.into_uref().expect(\"should be uref\");\n\n    let exec_request_3 = {\n        let args = runtime_args! {\n            \"method\" => \"call\".to_string(),\n            \"contract_key\" => transfer_funds.into_entity_hash_addr().expect(\"should get key\"),\n            \"sub_contract_method_fwd\" => \"transfer_from_purse_to_account_ext\",\n        };\n        ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()\n    };\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_3).commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let error_msg = builder.get_error_message().expect(\"should have error\");\n    assert!(\n        error_msg.contains(EXPECTED_ERROR),\n        \"Got error: {}\",\n        error_msg\n    );\n\n    let victim_balance_after = builder.get_purse_balance(victim_account.main_purse());\n\n    assert_eq!(\n        *VICTIM_INITIAL_FUNDS - transaction_fee,\n        victim_balance_after\n    );\n    // In this variant of test `donation_purse` is left unchanged i.e. zero balance\n    assert_eq!(builder.get_purse_balance(donation_purse_copy), U512::zero(),);\n\n    // Main purse of the contract owner is unchanged\n    let updated_default_account_balance = builder.get_purse_balance(default_account.main_purse());\n\n    assert_eq!(\n        updated_default_account_balance - default_account_balance,\n        U512::zero(),\n    )\n}\n\n#[ignore]\n#[test]\nfn should_not_be_able_to_transfer_funds_with_transfer_to_account() {\n    let mut builder = setup();\n\n    let victim_account = builder\n        .get_entity_by_account_hash(VICTIM_ADDR)\n        .expect(\"should have victim account\");\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let default_account_balance = builder.get_purse_balance(default_account.main_purse());\n\n    let transfer_funds = default_account\n        .named_keys()\n        .get(TRANSFER_FUNDS_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", TRANSFER_FUNDS_KEY));\n    let donation_purse_copy_key = default_account\n        .named_keys()\n        .get(DONATION_PURSE_COPY_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", DONATION_PURSE_COPY_KEY));\n\n    let donation_purse_copy = donation_purse_copy_key.into_uref().expect(\"should be uref\");\n\n    let exec_request_3 = {\n        let args = runtime_args! {\n            \"method\" => \"call\",\n            \"contract_key\" => transfer_funds.into_entity_hash_addr().expect(\"should be hash\"),\n            \"sub_contract_method_fwd\" => \"transfer_to_account_ext\",\n        };\n        ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()\n    };\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_3).commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let error_msg = builder.get_error_message().expect(\"should have error\");\n    assert!(\n        error_msg.contains(EXPECTED_ERROR),\n        \"Got error: {}\",\n        error_msg\n    );\n\n    let victim_balance_after = builder.get_purse_balance(victim_account.main_purse());\n\n    assert_eq!(\n        *VICTIM_INITIAL_FUNDS - transaction_fee,\n        victim_balance_after\n    );\n\n    // In this variant of test `donation_purse` is left unchanged i.e. zero balance\n    assert_eq!(builder.get_purse_balance(donation_purse_copy), U512::zero(),);\n\n    // Verify that default account's balance didn't change\n    let updated_default_account_balance = builder.get_purse_balance(default_account.main_purse());\n\n    assert_eq!(\n        updated_default_account_balance - default_account_balance,\n        U512::zero(),\n    )\n}\n\n#[ignore]\n#[test]\nfn should_not_be_able_to_get_main_purse_in_invalid_builder() {\n    let mut builder = setup();\n\n    let victim_account = builder\n        .get_entity_with_named_keys_by_account_hash(VICTIM_ADDR)\n        .expect(\"should have victim account\");\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let transfer_funds = default_account\n        .named_keys()\n        .get(TRANSFER_FUNDS_KEY)\n        .cloned()\n        .unwrap_or_else(|| panic!(\"should have {}\", TRANSFER_FUNDS_KEY));\n\n    let exec_request_3 = {\n        let args = runtime_args! {\n            \"method\" => \"call\".to_string(),\n            \"contract_key\" => transfer_funds.into_entity_hash_addr().expect(\"should be hash\"),\n            \"sub_contract_method_fwd\" => \"transfer_to_account_ext\",\n        };\n        ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()\n    };\n\n    let victim_balance_before = builder.get_purse_balance(victim_account.main_purse());\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request_3).commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let error_msg = builder.get_error_message().expect(\"should have error\");\n    assert!(\n        error_msg.contains(EXPECTED_ERROR),\n        \"Got error: {}\",\n        error_msg\n    );\n\n    let victim_balance_after = builder.get_purse_balance(victim_account.main_purse());\n\n    assert_eq!(\n        victim_balance_before - transaction_fee,\n        victim_balance_after\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_601.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, CLValue, EntityAddr, RuntimeArgs, StoredValue};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_run_ee_601_pay_session_new_uref_collision() {\n    let genesis_account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_deploy_hash([1; 32])\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_payment_code(\n            \"ee_601_regression.wasm\",\n            runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT },\n        )\n        .with_session_code(\"ee_601_regression.wasm\", RuntimeArgs::default())\n        .with_authorization_keys(&[genesis_account_hash])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n\n    let hash = *DEFAULT_ACCOUNT_ADDR;\n    let named_keys = builder.get_named_keys(EntityAddr::Account(hash.value()));\n\n    let payment_uref = *named_keys\n        .get(\"new_uref_result-payment\")\n        .expect(\"payment uref should exist\");\n\n    let session_uref = *named_keys\n        .get(\"new_uref_result-session\")\n        .expect(\"session uref should exist\");\n\n    assert_ne!(\n        payment_uref, session_uref,\n        \"payment and session code should not create same uref\"\n    );\n\n    let payment_value: StoredValue = builder\n        .query(None, payment_uref, &[])\n        .expect(\"should find payment value\");\n\n    assert_eq!(\n        payment_value,\n        StoredValue::CLValue(CLValue::from_t(\"payment\".to_string()).unwrap()),\n        \"expected payment\"\n    );\n\n    let session_value: StoredValue = builder\n        .query(None, session_uref, &[])\n        .expect(\"should find session value\");\n\n    assert_eq!(\n        session_value,\n        StoredValue::CLValue(CLValue::from_t(\"session\".to_string()).unwrap()),\n        \"expected session\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_771.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst CONTRACT_EE_771_REGRESSION: &str = \"ee_771_regression.wasm\";\n\n#[ignore]\n#[test]\nfn should_run_ee_771_regression() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_771_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n\n    let exec_result = builder\n        .get_exec_result_owned(0)\n        .expect(\"should have a response\");\n\n    let error = exec_result.error().expect(\"should have error\");\n    assert_eq!(\n        format!(\"{}\", error),\n        \"Function not found: functiondoesnotexist\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_890.rs",
    "content": "use casper_wasm::{self, builder};\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{addressable_entity::DEFAULT_ENTRY_POINT_NAME, runtime_args, RuntimeArgs};\n\nconst DO_NOTHING_WASM: &str = \"do_nothing.wasm\";\n\n// NOTE: Apparently rustc does not emit \"start\" when targeting wasm32\n// Ref: https://github.com/rustwasm/team/issues/108\n\n/// Creates minimal session code that does nothing but has start node\nfn make_do_nothing_with_start() -> Vec<u8> {\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        // main() marks given function as a start() node\n        .main()\n        .body()\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        // Memory section is mandatory\n        .memory()\n        .build()\n        .build();\n\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_890_gracefully_reject_start_node_in_session() {\n    let wasm_binary = make_do_nothing_with_start();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(wasm_binary, RuntimeArgs::new())\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([123; 32])\n        .build();\n\n    let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .commit();\n    let message = builder.get_error_message().expect(\"should fail\");\n    assert!(\n        message.contains(\"Unsupported Wasm start\"),\n        \"Error message {:?} does not contain expected pattern\",\n        message\n    );\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_890_gracefully_reject_start_node_in_payment() {\n    let wasm_binary = make_do_nothing_with_start();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::new())\n        .with_payment_bytes(wasm_binary, RuntimeArgs::new())\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([123; 32])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .commit();\n    let message = builder.get_error_message().expect(\"should fail\");\n    assert!(\n        message.contains(\"Unsupported Wasm start\"),\n        \"Error message {:?} does not contain expected pattern\",\n        message\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/ee_966.rs",
    "content": "use assert_matches::assert_matches;\nuse casper_wasm::builder;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    UpgradeRequestBuilder, ARG_AMOUNT, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    addressable_entity::DEFAULT_ENTRY_POINT_NAME, runtime_args, ApiError, EraId,\n    HostFunctionCostsV1, HostFunctionCostsV2, MessageLimits, OpcodeCosts, ProtocolVersion,\n    RuntimeArgs, WasmConfig, WasmV1Config, WasmV2Config, DEFAULT_MAX_STACK_HEIGHT,\n    DEFAULT_WASM_MAX_MEMORY,\n};\n\nconst CONTRACT_EE_966_REGRESSION: &str = \"ee_966_regression.wasm\";\nconst MINIMUM_INITIAL_MEMORY: u32 = 16;\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(0);\n\nstatic DOUBLED_WASM_MEMORY_LIMIT: Lazy<WasmConfig> = Lazy::new(|| {\n    let wasm_v1_config = WasmV1Config::new(\n        DEFAULT_WASM_MAX_MEMORY * 2,\n        DEFAULT_MAX_STACK_HEIGHT,\n        OpcodeCosts::default(),\n        HostFunctionCostsV1::default(),\n    );\n    let wasm_v2_config = WasmV2Config::new(\n        DEFAULT_WASM_MAX_MEMORY * 2,\n        OpcodeCosts::default(),\n        HostFunctionCostsV2::default(),\n    );\n    WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config)\n});\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    DEFAULT_PROTOCOL_VERSION.value().major,\n    DEFAULT_PROTOCOL_VERSION.value().minor,\n    DEFAULT_PROTOCOL_VERSION.value().patch + 1,\n);\n\nfn make_session_code_with_memory_pages(initial_pages: u32, max_pages: Option<u32>) -> Vec<u8> {\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        // Memory section is mandatory\n        .memory()\n        // Produces entry `(memory (0) initial_pages [max_pages])`\n        .with_min(initial_pages)\n        .with_max(max_pages)\n        .build()\n        .build();\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\nfn make_request_with_session_bytes(session_code: Vec<u8>) -> ExecuteRequest {\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(session_code, RuntimeArgs::new())\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    ExecuteRequestBuilder::from_deploy_item(&deploy_item).build()\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_with_zero_min_and_zero_max_memory() {\n    // A contract that has initial memory pages of 0 and maximum memory pages of 0 is valid\n    let session_code = make_session_code_with_memory_pages(0, Some(0));\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit().expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_cant_have_too_much_initial_memory() {\n    let session_code = make_session_code_with_memory_pages(DEFAULT_WASM_MAX_MEMORY + 1, None);\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let exec_result = &builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Interpreter(_)));\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_should_request_exactly_maximum() {\n    let session_code =\n        make_session_code_with_memory_pages(DEFAULT_WASM_MAX_MEMORY, Some(DEFAULT_WASM_MAX_MEMORY));\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit().expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_should_request_exactly_maximum_as_initial() {\n    let session_code = make_session_code_with_memory_pages(DEFAULT_WASM_MAX_MEMORY, None);\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit().expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_cant_have_too_much_max_memory() {\n    let session_code = make_session_code_with_memory_pages(\n        MINIMUM_INITIAL_MEMORY,\n        Some(DEFAULT_WASM_MAX_MEMORY + 1),\n    );\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let exec_result = &builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Interpreter(_)));\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_cant_have_way_too_much_max_memory() {\n    let session_code = make_session_code_with_memory_pages(\n        MINIMUM_INITIAL_MEMORY,\n        Some(DEFAULT_WASM_MAX_MEMORY + 42),\n    );\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let exec_result = &builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Interpreter(_)));\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_cant_have_larger_initial_than_max_memory() {\n    let session_code =\n        make_session_code_with_memory_pages(DEFAULT_WASM_MAX_MEMORY, Some(MINIMUM_INITIAL_MEMORY));\n\n    let exec_request = make_request_with_session_bytes(session_code);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let exec_result = &builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Interpreter(_)));\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_regression_fail_when_growing_mem_past_max() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_966_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let exec_result = &builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Revert(ApiError::OutOfMemory)));\n}\n\n#[ignore]\n#[test]\nfn should_run_ee_966_regression_when_growing_mem_after_upgrade() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_966_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).commit();\n\n    //\n    // This request should fail - as it's exceeding default memory limit\n    //\n\n    let exec_result = &builder\n        .get_exec_result_owned(0)\n        .expect(\"should have exec response\");\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Revert(ApiError::OutOfMemory)));\n\n    //\n    // Upgrade default memory limit\n    //\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(DEFAULT_PROTOCOL_VERSION)\n        .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .build();\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(*DOUBLED_WASM_MEMORY_LIMIT);\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request);\n\n    //\n    // Now this request is working as the maximum memory limit is doubled.\n    //\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_EE_966_REGRESSION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit().expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_1470.rs",
    "content": "use std::collections::BTreeMap;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{auction, auction::DelegationRate},\n    AccessRights, AddressableEntityHash, CLTyped, CLValue, Digest, EraId, HoldBalanceHandling, Key,\n    PackageHash, ProtocolVersion, RuntimeArgs, StoredValue, StoredValueTypeMismatch,\n    SystemHashRegistry, Timestamp, URef, U512,\n};\n\nuse crate::lmdb_fixture;\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst GH_1470_REGRESSION: &str = \"gh_1470_regression.wasm\";\nconst GH_1470_REGRESSION_CALL: &str = \"gh_1470_regression_call.wasm\";\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst BOND_AMOUNT: u64 = 42;\nconst BID_DELEGATION_RATE: DelegationRate = auction::DELEGATION_RATE_DENOMINATOR;\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\nconst PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V1_0_0;\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer = TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR)\n        .with_transfer_id(42)\n        .build();\n\n    builder.transfer_and_commit(transfer).expect_success();\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_strict_argument_checking(true);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    builder\n}\n\nfn apply_global_state_update(\n    builder: &LmdbWasmTestBuilder,\n    post_state_hash: Digest,\n) -> BTreeMap<Key, StoredValue> {\n    let key = URef::new([0u8; 32], AccessRights::all()).into();\n\n    let system_contract_hashes = builder\n        .query(Some(post_state_hash), key, &Vec::new())\n        .expect(\"Must have stored system contract hashes\")\n        .as_cl_value()\n        .expect(\"must be CLValue\")\n        .clone()\n        .into_t::<SystemHashRegistry>()\n        .expect(\"must convert to btree map\");\n\n    let mut global_state_update = BTreeMap::<Key, StoredValue>::new();\n    let registry = CLValue::from_t(system_contract_hashes)\n        .expect(\"must convert to StoredValue\")\n        .into();\n\n    global_state_update.insert(Key::SystemEntityRegistry, registry);\n\n    global_state_update\n}\n\n#[ignore]\n#[test]\nfn gh_1470_call_contract_should_verify_group_access() {\n    let mut builder = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_1470_REGRESSION,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let entity_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::CONTRACT_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let entity_hash = entity_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let package_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::PACKAGE_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let package_hash = package_hash_key\n        .into_package_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    let call_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_DO_NOTHING,\n            gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash,\n        };\n        ExecuteRequestBuilder::standard(ACCOUNT_1_ADDR, GH_1470_REGRESSION_CALL, args).build()\n    };\n\n    builder.exec(call_contract_request).commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let call_contract_error = exec_result.error().cloned().expect(\"should have error\");\n\n    let call_versioned_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING,\n            gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash,\n        };\n        ExecuteRequestBuilder::standard(ACCOUNT_1_ADDR, GH_1470_REGRESSION_CALL, args).build()\n    };\n\n    builder.exec(call_versioned_contract_request).commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let call_versioned_contract_error = exec_result.error().expect(\"should have error\");\n\n    match (&call_contract_error, &call_versioned_contract_error) {\n        (Error::Exec(ExecError::InvalidContext), Error::Exec(ExecError::InvalidContext)) => (),\n        _ => panic!(\"Both variants should raise same error.\"),\n    }\n\n    assert!(matches!(\n        call_versioned_contract_error,\n        Error::Exec(ExecError::InvalidContext)\n    ));\n    assert!(matches!(\n        call_contract_error,\n        Error::Exec(ExecError::InvalidContext)\n    ));\n}\n\n// #[ignore]\n// #[test]\n// fn gh_1470_call_contract_should_verify_invalid_arguments_length() {\n//     let mut builder = setup();\n\n//     let exec_request_1 = ExecuteRequestBuilder::standard(\n//         *DEFAULT_ACCOUNT_ADDR,\n//         GH_1470_REGRESSION,\n//         RuntimeArgs::new(),\n//     )\n//     .build();\n\n//     builder.exec(exec_request_1).expect_success().commit();\n\n//     let account_stored_value = builder\n//         .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n//         .unwrap();\n//     let account = account_stored_value.as_account().cloned().unwrap();\n\n//     let contract_hash_key = account\n//         .named_keys()\n//         .get(gh_1470_regression::CONTRACT_HASH_NAME)\n//         .cloned()\n//         .unwrap();\n//     let contract_hash = contract_hash_key\n//         .into_hash()\n//         .map(ContractHash::new)\n//         .unwrap();\n//     let contract_package_hash_key = account\n//         .named_keys()\n//         .get(gh_1470_regression::CONTRACT_PACKAGE_HASH_NAME)\n//         .cloned()\n//         .unwrap();\n//     let contract_package_hash = contract_package_hash_key\n//         .into_hash()\n//         .map(ContractPackageHash::new)\n//         .unwrap();\n\n//     let call_contract_request = {\n//         let args = runtime_args! {\n//             gh_1470_regression_call::ARG_TEST_METHOD =>\n// gh_1470_regression_call::METHOD_CALL_DO_NOTHING_NO_ARGS,\n// gh_1470_regression_call::ARG_CONTRACT_HASH => contract_hash,         };\n//         ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n//             .build()\n//     };\n\n//     builder.exec(call_contract_request).commit();\n\n//     let response = builder\n//         .get_last_exec_result()\n//         .expect(\"should have last response\");\n//     assert_eq!(response.len(), 1);\n//     let exec_response = response.last().expect(\"should have response\");\n//     let call_contract_error = exec_response\n//         .as_error()\n//         .cloned()\n//         .expect(\"should have error\");\n\n//     let call_versioned_contract_request = {\n//         let args = runtime_args! {\n//             gh_1470_regression_call::ARG_TEST_METHOD =>\n// gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_NO_ARGS,\n// gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => contract_package_hash,         };\n//         ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n//             .build()\n//     };\n\n//     builder.exec(call_versioned_contract_request).commit();\n\n//     let response = builder\n//         .get_last_exec_result()\n//         .expect(\"should have last response\");\n//     assert_eq!(response.len(), 1);\n//     let exec_response = response.last().expect(\"should have response\");\n//     let call_versioned_contract_error = exec_response.as_error().expect(\"should have error\");\n\n//     match (&call_contract_error, &call_versioned_contract_error) {\n//         (\n//             Error::Exec(ExecError::MissingArgument { name: lhs_name }),\n//             Error::Exec(ExecError::MissingArgument { name: rhs_name }),\n//         ) if lhs_name == rhs_name => (),\n//         _ => panic!(\n//             \"Both variants should raise same error: lhs={:?} rhs={:?}\",\n//             call_contract_error, call_versioned_contract_error\n//         ),\n//     }\n\n//     assert!(\n//         matches!(\n//             &call_versioned_contract_error,\n//             Error::Exec(ExecError::MissingArgument {\n//                 name,\n//             })\n//             if name == gh_1470_regression::ARG1\n//         ),\n//         \"{:?}\",\n//         call_versioned_contract_error\n//     );\n//     assert!(\n//         matches!(\n//             &call_contract_error,\n//             Error::Exec(ExecError::MissingArgument {\n//                 name,\n//             })\n//             if name == gh_1470_regression::ARG1\n//         ),\n//         \"{:?}\",\n//         call_contract_error\n//     );\n// }\n\n#[ignore]\n#[test]\nfn gh_1470_call_contract_should_ignore_optional_args() {\n    let mut builder = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_1470_REGRESSION,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let contract_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::CONTRACT_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let entity_hash = contract_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let package_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::PACKAGE_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let package_hash = package_hash_key\n        .into_package_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    let call_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_DO_NOTHING_NO_OPTIONALS,\n            gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder\n        .exec(call_contract_request)\n        .expect_success()\n        .commit();\n\n    let call_versioned_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_NO_OPTIONALS,\n            gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder\n        .exec(call_versioned_contract_request)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn gh_1470_call_contract_should_not_accept_extra_args() {\n    let mut builder = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_1470_REGRESSION,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let contract_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::CONTRACT_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let entity_hash = contract_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let package_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::PACKAGE_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let package_hash = package_hash_key\n        .into_package_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    let call_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_DO_NOTHING_EXTRA,\n            gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder\n        .exec(call_contract_request)\n        .expect_success()\n        .commit();\n\n    let call_versioned_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_EXTRA,\n            gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder\n        .exec(call_versioned_contract_request)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn gh_1470_call_contract_should_verify_wrong_argument_types() {\n    let mut builder = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_1470_REGRESSION,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have contract\");\n\n    let entity_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::CONTRACT_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let entity_hash = entity_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let package_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::PACKAGE_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let package_hash = package_hash_key\n        .into_package_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    let call_contract_request = {\n        let args = runtime_args! {\n                    gh_1470_regression_call::ARG_TEST_METHOD =>\n        gh_1470_regression_call::METHOD_CALL_DO_NOTHING_TYPE_MISMATCH,\n        gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash,         };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder.exec(call_contract_request).commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let call_contract_error = exec_result.error().cloned().expect(\"should have error\");\n\n    let call_versioned_contract_request = {\n        let args = runtime_args! {\n                    gh_1470_regression_call::ARG_TEST_METHOD =>\n        gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_TYPE_MISMATCH,\n        gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash,         };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder.exec(call_versioned_contract_request).commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let call_versioned_contract_error = exec_result.error().expect(\"should have error\");\n\n    let expected = gh_1470_regression::Arg1Type::cl_type();\n    let found = gh_1470_regression::Arg3Type::cl_type();\n\n    let expected_type_mismatch =\n        StoredValueTypeMismatch::new(format!(\"{:?}\", expected), format!(\"{:?}\", found));\n\n    match (&call_contract_error, &call_versioned_contract_error) {\n        (\n            Error::Exec(ExecError::TypeMismatch(lhs_type_mismatch)),\n            Error::Exec(ExecError::TypeMismatch(rhs_type_mismatch)),\n        ) if lhs_type_mismatch == &expected_type_mismatch\n            && rhs_type_mismatch == &expected_type_mismatch => {}\n        _ => panic!(\n            \"Both variants should raise same error: lhs={:?} rhs={:?}\",\n            call_contract_error, call_versioned_contract_error\n        ),\n    }\n\n    assert!(matches!(\n        call_versioned_contract_error,\n        Error::Exec(ExecError::TypeMismatch(type_mismatch))\n            if *type_mismatch == expected_type_mismatch\n    ));\n    assert!(matches!(\n        call_contract_error,\n        Error::Exec(ExecError::TypeMismatch(type_mismatch))\n            if type_mismatch == expected_type_mismatch\n    ));\n}\n\n#[ignore]\n#[test]\nfn gh_1470_call_contract_should_verify_wrong_optional_argument_types() {\n    let mut builder = setup();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_1470_REGRESSION,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default contract package\");\n\n    let entity_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::CONTRACT_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let entity_hash = entity_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let package_hash_key = account\n        .named_keys()\n        .get(gh_1470_regression::PACKAGE_HASH_NAME)\n        .cloned()\n        .unwrap();\n    let package_hash = package_hash_key\n        .into_package_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    let call_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD =>\n            gh_1470_regression_call::METHOD_CALL_DO_NOTHING_OPTIONAL_TYPE_MISMATCH,\n            gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder\n        .exec(call_contract_request)\n        .expect_failure()\n        .commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let call_contract_error = exec_result.error().cloned().expect(\"should have error\");\n\n    let call_versioned_contract_request = {\n        let args = runtime_args! {\n            gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_OPTIONAL_TYPE_MISMATCH,\n            gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args)\n            .build()\n    };\n\n    builder.exec(call_versioned_contract_request).commit();\n\n    let exec_result = builder\n        .get_last_exec_result()\n        .expect(\"should have last response\");\n    let call_versioned_contract_error = exec_result.error().expect(\"should have error\");\n\n    let expected = gh_1470_regression::Arg3Type::cl_type();\n    let found = gh_1470_regression::Arg4Type::cl_type();\n\n    let expected_type_mismatch =\n        StoredValueTypeMismatch::new(format!(\"{:?}\", expected), format!(\"{:?}\", found));\n\n    match (&call_contract_error, &call_versioned_contract_error) {\n        (\n            Error::Exec(ExecError::TypeMismatch(lhs_type_mismatch)),\n            Error::Exec(ExecError::TypeMismatch(rhs_type_mismatch)),\n        ) if lhs_type_mismatch == &expected_type_mismatch\n            && rhs_type_mismatch == &expected_type_mismatch => {}\n        _ => panic!(\n            \"Both variants should raise same error: lhs={:?} rhs={:?}\",\n            call_contract_error, call_versioned_contract_error\n        ),\n    }\n\n    assert!(matches!(\n        call_versioned_contract_error,\n        Error::Exec(ExecError::TypeMismatch(type_mismatch))\n        if *type_mismatch == expected_type_mismatch\n    ));\n    assert!(matches!(\n        call_contract_error,\n        Error::Exec(ExecError::TypeMismatch(type_mismatch))\n        if type_mismatch == expected_type_mismatch\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_transfer_after_major_version_bump_from_1_2_0() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0);\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(previous_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .build()\n    };\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let transfer = TransferRequestBuilder::new(1, AccountHash::new([3; 32]))\n        .with_transfer_id(1)\n        .build();\n\n    builder.transfer_and_commit(transfer).expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_transfer_after_minor_version_bump_from_1_2_0() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version = ProtocolVersion::from_parts(\n        current_protocol_version.value().major,\n        current_protocol_version.value().minor + 1,\n        0,\n    );\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .build()\n    };\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let transfer = TransferRequestBuilder::new(1, AccountHash::new([3; 32]))\n        .with_transfer_id(1)\n        .build();\n    builder.transfer_and_commit(transfer).expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_add_bid_after_major_bump() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0);\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .build()\n    };\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n}\n\n#[ignore]\n#[test]\nfn should_add_bid_after_minor_bump() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version = ProtocolVersion::from_parts(\n        current_protocol_version.value().major,\n        current_protocol_version.value().minor + 1,\n        0,\n    );\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .build()\n    };\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n}\n\n#[ignore]\n#[test]\nfn should_wasm_transfer_after_major_bump() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0);\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .build()\n    };\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let wasm_transfer = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_AMOUNT => U512::one(),\n            ARG_TARGET => AccountHash::new([1; 32]),\n        },\n    )\n    .build();\n\n    builder.exec(wasm_transfer).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n}\n\n#[ignore]\n#[test]\nfn should_wasm_transfer_after_minor_bump() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version = ProtocolVersion::from_parts(\n        current_protocol_version.value().major,\n        current_protocol_version.value().minor + 1,\n        0,\n    );\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .build()\n    };\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let wasm_transfer = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_AMOUNT => U512::one(),\n            ARG_TARGET => AccountHash::new([1; 32]),\n        },\n    )\n    .build();\n\n    builder.exec(wasm_transfer).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_from_1_3_1_rel_fixture() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version = ProtocolVersion::from_parts(\n        previous_protocol_version.value().major,\n        previous_protocol_version.value().minor + 1,\n        0,\n    );\n\n    let global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(previous_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_1688.rs",
    "content": "use casper_engine_test_support::{\n    deploy_item::DeployItem, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    runtime_args, system::standard_payment::ARG_AMOUNT, AddressableEntityHash, PackageHash,\n    RuntimeArgs,\n};\n\nconst GH_1688_REGRESSION: &str = \"gh_1688_regression.wasm\";\n\nconst METHOD_PUT_KEY: &str = \"put_key\";\nconst NEW_KEY_NAME: &str = \"Hello\";\nconst PACKAGE_KEY: &str = \"contract_package\";\nconst CONTRACT_HASH_KEY: &str = \"contract_hash\";\n\nfn setup() -> (LmdbWasmTestBuilder, PackageHash, AddressableEntityHash) {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_contract_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_1688_REGRESSION,\n        runtime_args! {},\n    )\n    .build();\n\n    builder\n        .exec(install_contract_request_1)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let package_hash_key = account\n        .named_keys()\n        .get(PACKAGE_KEY)\n        .expect(\"should have package hash\");\n\n    let entity_hash_key = account\n        .named_keys()\n        .get(CONTRACT_HASH_KEY)\n        .expect(\"should have hash\");\n\n    let contract_package_hash = package_hash_key\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should be hash\");\n\n    let entity_hash = entity_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .expect(\"should be hash\");\n\n    (builder, contract_package_hash, entity_hash)\n}\n\nfn test(deploy_item_builder: impl FnOnce(PackageHash, AddressableEntityHash) -> DeployItem) {\n    let (mut builder, contract_package_hash, contract_hash) = setup();\n\n    let deploy_item = deploy_item_builder(contract_package_hash, contract_hash);\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .expect(\"should have contract\");\n\n    assert!(\n        contract.named_keys().contains(NEW_KEY_NAME),\n        \"expected {} in {:?}\",\n        NEW_KEY_NAME,\n        contract.named_keys()\n    );\n    assert!(\n        !account.named_keys().contains(NEW_KEY_NAME),\n        \"unexpected {} in {:?}\",\n        NEW_KEY_NAME,\n        contract.named_keys()\n    );\n}\n\n#[ignore]\n#[test]\nfn should_run_gh_1688_regression_stored_versioned_contract_by_hash() {\n    test(|contract_package_hash, _contract_hash| {\n        DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_hash(\n                contract_package_hash.value(),\n                None,\n                METHOD_PUT_KEY,\n                RuntimeArgs::default(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([42; 32])\n            .build()\n    });\n}\n\n#[ignore]\n#[test]\nfn should_run_gh_1688_regression_stored_versioned_contract_by_name() {\n    test(|_contract_package_hash, _contract_hash| {\n        DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_versioned_contract_by_name(\n                PACKAGE_KEY,\n                None,\n                METHOD_PUT_KEY,\n                RuntimeArgs::default(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([42; 32])\n            .build()\n    });\n}\n\n#[ignore]\n#[test]\nfn should_run_gh_1688_regression_stored_contract_by_hash() {\n    test(|_contract_package_hash, contract_hash| {\n        DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_session_hash(contract_hash, METHOD_PUT_KEY, RuntimeArgs::default())\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([42; 32])\n            .build()\n    });\n}\n\n#[ignore]\n#[test]\nfn should_run_gh_1688_regression_stored_contract_by_name() {\n    test(|_contract_package_hash, _contract_hash| {\n        DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_stored_session_named_key(\n                CONTRACT_HASH_KEY,\n                METHOD_PUT_KEY,\n                RuntimeArgs::default(),\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n            .with_deploy_hash([42; 32])\n            .build()\n    });\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_1902.rs",
    "content": "use num_rational::Ratio;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{\n        auction::{self, DelegationRate},\n        standard_payment,\n    },\n    FeeHandling, Gas, PublicKey, RefundHandling, SecretKey, U512,\n};\n\nconst BOND_AMOUNT: u64 = 42;\nconst DELEGATE_AMOUNT: u64 = 100 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst DELEGATION_RATE: DelegationRate = 10;\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([99; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PUBLIC_KEY));\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let chainspec = builder\n        .chainspec()\n        .clone()\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(1, 1),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer);\n    builder.with_chainspec(chainspec);\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR).build();\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n    builder\n}\n\nfn exec_and_assert_costs(\n    builder: &mut LmdbWasmTestBuilder,\n    exec_request: ExecuteRequest,\n    expected_gas_cost: Gas,\n) {\n    builder.exec(exec_request).expect_success().commit();\n    assert_eq!(builder.last_exec_gas_consumed(), expected_gas_cost);\n}\n\n#[ignore]\n#[test]\nfn should_not_charge_for_create_purse_in_first_time_bond() {\n    let mut builder = setup();\n\n    let bond_amount = U512::from(BOND_AMOUNT);\n    // This amount should be enough to make first time add_bid call.\n    let add_bid_cost = builder.get_auction_costs().add_bid;\n\n    let pay_cost = builder\n        .chainspec()\n        .system_costs_config\n        .standard_payment_costs()\n        .pay;\n\n    let add_bid_payment_amount = U512::from(add_bid_cost + pay_cost as u64) * 2;\n\n    let sender = *DEFAULT_ACCOUNT_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_ADD_BID;\n    let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => add_bid_payment_amount, };\n    let session_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => bond_amount,\n        auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash([43; 32])\n        .build();\n\n    let add_bid_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    exec_and_assert_costs(&mut builder, add_bid_request, Gas::from(add_bid_cost));\n\n    let delegate_cost = builder.get_auction_costs().delegate;\n    let delegate_payment_amount = U512::from(delegate_cost);\n    let delegate_amount = U512::from(DELEGATE_AMOUNT);\n\n    let sender = *ACCOUNT_1_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_DELEGATE;\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => delegate_payment_amount,\n    };\n    let session_args = runtime_args! {\n        auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => delegate_amount,\n    };\n    let deploy_hash = [55; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let delegate_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    exec_and_assert_costs(&mut builder, delegate_request, Gas::from(delegate_cost));\n\n    let undelegate_cost = builder.get_auction_costs().undelegate;\n    let undelegate_payment_amount = U512::from(undelegate_cost);\n    let undelegate_amount = delegate_amount;\n\n    let sender = *ACCOUNT_1_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_UNDELEGATE;\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => undelegate_payment_amount,\n    };\n    let session_args = runtime_args! {\n        auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => undelegate_amount,\n    };\n    let deploy_hash = [56; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let undelegate_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    exec_and_assert_costs(&mut builder, undelegate_request, Gas::from(undelegate_cost));\n\n    let unbond_amount = bond_amount;\n    // This amount should be enough to make first time add_bid call.\n    let withdraw_bid_cost = builder.get_auction_costs().withdraw_bid;\n    let withdraw_bid_payment_amount = U512::from(withdraw_bid_cost);\n\n    let sender = *DEFAULT_ACCOUNT_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_WITHDRAW_BID;\n    let payment_args =\n        runtime_args! { standard_payment::ARG_AMOUNT => withdraw_bid_payment_amount, };\n    let session_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => unbond_amount,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash([58; 32])\n        .build();\n\n    let withdraw_bid_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    exec_and_assert_costs(\n        &mut builder,\n        withdraw_bid_request,\n        Gas::from(withdraw_bid_cost),\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_1931.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{RuntimeArgs, StoredValue};\n\nconst CONTRACT_NAME: &str = \"do_nothing_stored.wasm\";\nconst CONTRACT_PACKAGE_NAMED_KEY: &str = \"do_nothing_package_hash\";\n\n#[ignore]\n#[test]\nfn should_query_contract_package() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit();\n\n    let install_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_NAME, RuntimeArgs::new())\n            .build();\n\n    builder.exec(install_request).expect_success().commit();\n\n    let contract_package_hash = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap()\n        .named_keys()\n        .clone()\n        .get(CONTRACT_PACKAGE_NAMED_KEY)\n        .expect(\"failed to get contract package named key.\")\n        .to_owned();\n\n    let contract_package = builder\n        .query(None, contract_package_hash, &[])\n        .expect(\"failed to find contract package\");\n\n    assert!(matches!(contract_package, StoredValue::ContractPackage(_)));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_2280.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_types::{\n    account::AccountHash, runtime_args, system::mint, AddressableEntityHash, EraId, Gas,\n    HostFunction, HostFunctionCost, HostFunctionCostsV1, Key, MintCosts, Motes,\n    ProtocolUpgradeConfig, ProtocolVersion, PublicKey, SecretKey, WasmConfig, WasmV1Config,\n    WasmV2Config, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY, U512,\n};\n\nconst TRANSFER_TO_ACCOUNT_CONTRACT: &str = \"transfer_to_account.wasm\";\nconst TRANSFER_PURSE_TO_ACCOUNT_CONTRACT: &str = \"transfer_purse_to_account.wasm\";\nconst GH_2280_REGRESSION_CONTRACT: &str = \"gh_2280_regression.wasm\";\nconst GH_2280_REGRESSION_CALL_CONTRACT: &str = \"gh_2280_regression_call.wasm\";\nconst CREATE_PURSE_01_CONTRACT: &str = \"create_purse_01.wasm\";\nconst FAUCET_NAME: &str = \"faucet\";\n\nstatic ACCOUNT_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_1_PK.to_account_hash());\n\nstatic ACCOUNT_2_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_2_PK.to_account_hash());\n\nstatic ACCOUNT_3_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_3_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_3_PK.to_account_hash());\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst TOKEN_AMOUNT: u64 = 1_000_000;\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst TEST_PURSE_NAME: &str = \"test\";\n\nconst OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION;\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    OLD_PROTOCOL_VERSION.value().major,\n    OLD_PROTOCOL_VERSION.value().minor,\n    OLD_PROTOCOL_VERSION.value().patch + 1,\n);\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n\nconst HOST_FUNCTION_COST_CHANGE: HostFunctionCost = 13_730_593; // random prime number\n\nconst ARG_FAUCET_FUNDS: &str = \"faucet_initial_balance\";\nconst HASH_KEY_NAME: &str = \"gh_2280_hash\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\n#[ignore]\n#[test]\nfn gh_2280_transfer_should_always_cost_the_same_gas() {\n    let session_file = TRANSFER_TO_ACCOUNT_CONTRACT;\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let (mut builder, _) = setup();\n\n    let faucet_args_1 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_1_ADDR,\n        ARG_AMOUNT => TOKEN_AMOUNT,\n    };\n\n    let fund_request_1 =\n        ExecuteRequestBuilder::standard(account_hash, session_file, faucet_args_1).build();\n    builder.exec(fund_request_1).expect_success().commit();\n\n    let gas_cost_1 = builder.last_exec_gas_consumed();\n\n    // Next time pay exactly the amount that was reported which should be also the minimum you\n    // should be able to pay next time.\n    let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap();\n\n    let deploy_hash: [u8; 32] = [55; 32];\n    let faucet_args_2 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_2_ADDR,\n        ARG_AMOUNT => TOKEN_AMOUNT,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, faucet_args_2)\n        // + default_create_purse_cost\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value()\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(fund_request_2).expect_success().commit();\n\n    let gas_cost_2 = builder.last_exec_gas_consumed();\n\n    assert_eq!(gas_cost_1, gas_cost_2);\n\n    // Increase \"transfer_to_account\" host function call exactly by X, so we can assert that\n    // transfer cost increased by exactly X without hidden fees.\n    let default_host_function_costs = HostFunctionCostsV1::default();\n\n    let default_transfer_to_account_cost = default_host_function_costs.transfer_to_account.cost();\n    let new_transfer_to_account_cost = default_transfer_to_account_cost\n        .checked_add(HOST_FUNCTION_COST_CHANGE)\n        .expect(\"should add without overflow\");\n    let new_transfer_to_account = HostFunction::fixed(new_transfer_to_account_cost);\n\n    let new_host_function_costs = HostFunctionCostsV1 {\n        transfer_to_account: new_transfer_to_account,\n        ..default_host_function_costs\n    };\n\n    let new_wasm_config =\n        make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config);\n\n    // Inflate affected system contract entry point cost to the maximum\n    let new_mint_create_cost = u32::MAX;\n    let new_mint_costs = MintCosts {\n        create: new_mint_create_cost,\n        ..Default::default()\n    };\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(new_wasm_config)\n        .with_mint_costs(new_mint_costs);\n\n    builder.with_chainspec(updated_chainspec);\n\n    let mut upgrade_request = make_upgrade_request();\n    builder.upgrade(&mut upgrade_request);\n\n    let deploy_hash: [u8; 32] = [77; 32];\n    let faucet_args_3 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_3_ADDR,\n        ARG_AMOUNT => TOKEN_AMOUNT,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, faucet_args_3)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(fund_request_3).expect_success().commit();\n\n    let gas_cost_3 = builder.last_exec_gas_consumed();\n\n    assert!(gas_cost_3 > gas_cost_1);\n    assert!(gas_cost_3 > gas_cost_2);\n}\n\n#[ignore]\n#[test]\nfn gh_2280_create_purse_should_always_cost_the_same_gas() {\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let session_file = CREATE_PURSE_01_CONTRACT;\n\n    let (mut builder, _) = setup();\n\n    let create_purse_args_1 = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE_NAME\n    };\n\n    let fund_request_1 =\n        ExecuteRequestBuilder::standard(account_hash, session_file, create_purse_args_1).build();\n    builder.exec(fund_request_1).expect_success().commit();\n\n    let gas_cost_1 = builder.last_exec_gas_consumed();\n\n    // Next time pay exactly the amount that was reported which should be also the minimum you\n    // should be able to pay next time.\n    let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap();\n\n    let deploy_hash: [u8; 32] = [55; 32];\n    let create_purse_args_2 = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE_NAME,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, create_purse_args_2)\n        // + default_create_purse_cost\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value()\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(fund_request_2).expect_success().commit();\n\n    let gas_cost_2 = builder.last_exec_gas_consumed();\n\n    assert_eq!(gas_cost_1, gas_cost_2);\n\n    let mut upgrade_request = make_upgrade_request();\n\n    // Increase \"transfer_to_account\" host function call exactly by X, so we can assert that\n    // transfer cost increased by exactly X without hidden fees.\n    let host_function_costs = builder\n        .chainspec()\n        .wasm_config\n        .v1()\n        .take_host_function_costs();\n\n    let default_create_purse_cost = host_function_costs.create_purse.cost();\n    let new_create_purse_cost = default_create_purse_cost\n        .checked_add(HOST_FUNCTION_COST_CHANGE)\n        .expect(\"should add without overflow\");\n    let new_create_purse = HostFunction::fixed(new_create_purse_cost);\n\n    let new_host_function_costs = HostFunctionCostsV1 {\n        create_purse: new_create_purse,\n        ..host_function_costs\n    };\n\n    let new_wasm_config =\n        make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config);\n\n    // Inflate affected system contract entry point cost to the maximum\n    let new_mint_create_cost = u32::MAX;\n    let new_mint_costs = MintCosts {\n        create: new_mint_create_cost,\n        ..Default::default()\n    };\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(new_wasm_config)\n        .with_mint_costs(new_mint_costs);\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let deploy_hash: [u8; 32] = [77; 32];\n    let create_purse_args_3 = runtime_args! {\n        ARG_PURSE_NAME => TEST_PURSE_NAME,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, create_purse_args_3)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(fund_request_3).expect_success().commit();\n\n    let gas_cost_3 = builder.last_exec_gas_consumed();\n\n    assert!(gas_cost_3 > gas_cost_1);\n    assert!(gas_cost_3 > gas_cost_2);\n\n    let gas_cost_diff = gas_cost_3.checked_sub(gas_cost_2).unwrap_or_default();\n    assert_eq!(\n        gas_cost_diff,\n        Gas::new(U512::from(HOST_FUNCTION_COST_CHANGE))\n    );\n}\n\n#[ignore]\n#[test]\nfn gh_2280_transfer_purse_to_account_should_always_cost_the_same_gas() {\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let session_file = TRANSFER_PURSE_TO_ACCOUNT_CONTRACT;\n\n    let (mut builder, _) = setup();\n\n    let faucet_args_1 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_1_ADDR,\n        ARG_AMOUNT => U512::from(TOKEN_AMOUNT),\n    };\n\n    let fund_request_1 =\n        ExecuteRequestBuilder::standard(account_hash, session_file, faucet_args_1).build();\n    builder.exec(fund_request_1).expect_success().commit();\n\n    let gas_cost_1 = builder.last_exec_gas_consumed();\n\n    // Next time pay exactly the amount that was reported which should be also the minimum you\n    // should be able to pay next time.\n    let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap();\n\n    let deploy_hash: [u8; 32] = [55; 32];\n    let faucet_args_2 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_2_ADDR,\n        ARG_AMOUNT => U512::from(TOKEN_AMOUNT),\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(TRANSFER_PURSE_TO_ACCOUNT_CONTRACT, faucet_args_2)\n        // + default_create_purse_cost\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value()\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(fund_request_2).expect_success().commit();\n\n    let gas_cost_2 = builder.last_exec_gas_consumed();\n\n    assert_eq!(gas_cost_1, gas_cost_2);\n\n    let mut upgrade_request = make_upgrade_request();\n\n    // Increase \"transfer_to_account\" host function call exactly by X, so we can assert that\n    // transfer cost increased by exactly X without hidden fees.\n    let default_host_function_costs = HostFunctionCostsV1::default();\n\n    let default_transfer_from_purse_to_account_cost = default_host_function_costs\n        .transfer_from_purse_to_account\n        .cost();\n    let new_transfer_from_purse_to_account_cost = default_transfer_from_purse_to_account_cost\n        .checked_add(HOST_FUNCTION_COST_CHANGE)\n        .expect(\"should add without overflow\");\n    let new_transfer_from_purse_to_account =\n        HostFunction::fixed(new_transfer_from_purse_to_account_cost);\n\n    let new_host_function_costs = HostFunctionCostsV1 {\n        transfer_from_purse_to_account: new_transfer_from_purse_to_account,\n        ..default_host_function_costs\n    };\n\n    let new_wasm_config =\n        make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config);\n\n    // Inflate affected system contract entry point cost to the maximum\n    let new_mint_create_cost = u32::MAX;\n    let new_mint_costs = MintCosts {\n        create: new_mint_create_cost,\n        ..Default::default()\n    };\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(new_wasm_config)\n        .with_mint_costs(new_mint_costs);\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request);\n\n    let deploy_hash: [u8; 32] = [77; 32];\n    let faucet_args_3 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_3_ADDR,\n        ARG_AMOUNT => U512::from(TOKEN_AMOUNT),\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, faucet_args_3)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(fund_request_3).expect_success().commit();\n\n    let gas_cost_3 = builder.last_exec_gas_consumed();\n\n    assert!(gas_cost_3 > gas_cost_1);\n    assert!(gas_cost_3 > gas_cost_2);\n}\n\n#[ignore]\n#[test]\nfn gh_2280_stored_transfer_to_account_should_always_cost_the_same_gas() {\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let entry_point = FAUCET_NAME;\n\n    let (mut builder, TestContext { gh_2280_regression }) = setup();\n\n    let faucet_args_1 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_1_ADDR,\n    };\n\n    let fund_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        account_hash,\n        gh_2280_regression,\n        entry_point,\n        faucet_args_1,\n    )\n    .build();\n    builder.exec(fund_request_1).expect_success().commit();\n\n    let gas_cost_1 = builder.last_exec_gas_consumed();\n\n    // Next time pay exactly the amount that was reported which should be also the minimum you\n    // should be able to pay next time.\n    let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap();\n\n    let deploy_hash: [u8; 32] = [55; 32];\n    let faucet_args_2 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_2_ADDR,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_stored_session_hash(gh_2280_regression, entry_point, faucet_args_2)\n        // + default_create_purse_cost\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value()\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(fund_request_2).expect_success().commit();\n\n    let gas_cost_2 = builder.last_exec_gas_consumed();\n\n    assert_eq!(gas_cost_1, gas_cost_2);\n\n    let mut upgrade_request = make_upgrade_request();\n\n    // Increase \"transfer_to_account\" host function call exactly by X, so we can assert that\n    // transfer cost increased by exactly X without hidden fees.\n    let default_host_function_costs = HostFunctionCostsV1::default();\n\n    let default_transfer_from_purse_to_account_cost = default_host_function_costs\n        .transfer_from_purse_to_account\n        .cost();\n    let new_transfer_from_purse_to_account_cost = default_transfer_from_purse_to_account_cost\n        .checked_add(HOST_FUNCTION_COST_CHANGE)\n        .expect(\"should add without overflow\");\n    let new_transfer_from_purse_to_account =\n        HostFunction::fixed(new_transfer_from_purse_to_account_cost);\n\n    let new_host_function_costs = HostFunctionCostsV1 {\n        transfer_from_purse_to_account: new_transfer_from_purse_to_account,\n        ..default_host_function_costs\n    };\n\n    let new_wasm_config =\n        make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config);\n\n    // Inflate affected system contract entry point cost to the maximum\n    let new_mint_create_cost = u32::MAX;\n    let new_mint_costs = MintCosts {\n        create: new_mint_create_cost,\n        ..Default::default()\n    };\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(new_wasm_config)\n        .with_mint_costs(new_mint_costs);\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request);\n\n    let deploy_hash: [u8; 32] = [77; 32];\n    let faucet_args_3 = runtime_args! {\n        ARG_TARGET => *ACCOUNT_3_ADDR,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_stored_session_hash(gh_2280_regression, entry_point, faucet_args_3)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(fund_request_3).expect_success().commit();\n\n    let gas_cost_3 = builder.last_exec_gas_consumed();\n\n    assert!(gas_cost_3 > gas_cost_1, \"{} <= {}\", gas_cost_3, gas_cost_1);\n    assert!(gas_cost_3 > gas_cost_2);\n}\n\n#[ignore]\n#[test]\nfn gh_2280_stored_faucet_call_should_cost_the_same() {\n    let session_file = GH_2280_REGRESSION_CALL_CONTRACT;\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let (mut builder, TestContext { gh_2280_regression }) = setup();\n\n    let faucet_args_1 = runtime_args! {\n        ARG_CONTRACT_HASH => gh_2280_regression,\n        ARG_TARGET => *ACCOUNT_1_ADDR,\n    };\n\n    let fund_request_1 =\n        ExecuteRequestBuilder::standard(account_hash, session_file, faucet_args_1).build();\n    builder.exec(fund_request_1).expect_success().commit();\n\n    let gas_cost_1 = builder.last_exec_gas_consumed();\n\n    // Next time pay exactly the amount that was reported which should be also the minimum you\n    // should be able to pay next time.\n    let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap();\n\n    let deploy_hash: [u8; 32] = [55; 32];\n    let faucet_args_2 = runtime_args! {\n        ARG_CONTRACT_HASH => gh_2280_regression,\n        ARG_TARGET => *ACCOUNT_2_ADDR,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, faucet_args_2)\n        // + default_create_purse_cost\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value()\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n    builder.exec(fund_request_2).expect_success().commit();\n\n    let gas_cost_2 = builder.last_exec_gas_consumed();\n\n    assert_eq!(gas_cost_1, gas_cost_2);\n\n    let mut upgrade_request = make_upgrade_request();\n\n    // Increase \"transfer_to_account\" host function call exactly by X, so we can assert that\n    // transfer cost increased by exactly X without hidden fees.\n    let default_host_function_costs = HostFunctionCostsV1::default();\n\n    let default_transfer_from_purse_to_account_cost = default_host_function_costs\n        .transfer_from_purse_to_account\n        .cost();\n    let new_transfer_from_purse_to_account_cost = default_transfer_from_purse_to_account_cost\n        .checked_add(HOST_FUNCTION_COST_CHANGE)\n        .expect(\"should add without overflow\");\n    let new_transfer_from_purse_to_account =\n        HostFunction::fixed(new_transfer_from_purse_to_account_cost);\n\n    let new_host_function_costs = HostFunctionCostsV1 {\n        transfer_from_purse_to_account: new_transfer_from_purse_to_account,\n        ..default_host_function_costs\n    };\n\n    let new_wasm_config =\n        make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config);\n\n    // Inflate affected system contract entry point cost to the maximum\n    let new_mint_create_cost = u32::MAX;\n    let new_mint_costs = MintCosts {\n        create: new_mint_create_cost,\n        ..Default::default()\n    };\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(new_wasm_config)\n        .with_mint_costs(new_mint_costs);\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request);\n\n    let deploy_hash: [u8; 32] = [77; 32];\n    let faucet_args_3 = runtime_args! {\n        ARG_CONTRACT_HASH => gh_2280_regression,\n        ARG_TARGET => *ACCOUNT_3_ADDR,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(session_file, faucet_args_3)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(fund_request_3).expect_success().commit();\n\n    let gas_cost_3 = builder.last_exec_gas_consumed();\n\n    assert!(gas_cost_3 > gas_cost_1, \"{} <= {}\", gas_cost_3, gas_cost_1);\n    assert!(gas_cost_3 > gas_cost_2);\n}\n\nstruct TestContext {\n    gh_2280_regression: AddressableEntityHash,\n}\n\nfn setup() -> (LmdbWasmTestBuilder, TestContext) {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let session_args = runtime_args! {\n        mint::ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n        ARG_FAUCET_FUNDS => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n    };\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_2280_REGRESSION_CONTRACT,\n        session_args,\n    )\n    .build();\n\n    builder.exec(install_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let gh_2280_regression = account\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .cloned()\n        .and_then(Key::into_entity_hash_addr)\n        .map(AddressableEntityHash::new)\n        .expect(\"should have key\");\n\n    (builder, TestContext { gh_2280_regression })\n}\n\nfn make_wasm_config(\n    new_host_function_costs: HostFunctionCostsV1,\n    old_wasm_config: WasmConfig,\n) -> WasmConfig {\n    let wasm_v1_config = WasmV1Config::new(\n        DEFAULT_WASM_MAX_MEMORY,\n        DEFAULT_MAX_STACK_HEIGHT,\n        old_wasm_config.v1().opcode_costs(),\n        new_host_function_costs,\n    );\n    let wasm_v2_config = WasmV2Config::default();\n    WasmConfig::new(\n        old_wasm_config.messages_limits(),\n        wasm_v1_config,\n        wasm_v2_config,\n    )\n}\n\nfn make_upgrade_request() -> ProtocolUpgradeConfig {\n    UpgradeRequestBuilder::new()\n        .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n        .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .build()\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_3097.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    runtime_args, AddressableEntityHash, EntityVersionKey, PackageHash, RuntimeArgs,\n};\nuse gh_1470_regression::PACKAGE_HASH_NAME;\n\nconst GH_3097_REGRESSION_WASM: &str = \"gh_3097_regression.wasm\";\nconst GH_3097_REGRESSION_CALL_WASM: &str = \"gh_3097_regression_call.wasm\";\nconst DO_SOMETHING_ENTRYPOINT: &str = \"do_something\";\nconst DISABLED_CONTRACT_HASH_KEY: &str = \"disabled_contract_hash\";\nconst ENABLED_CONTRACT_HASH_KEY: &str = \"enabled_contract_hash\";\nconst CONTRACT_PACKAGE_HASH_KEY: &str = \"contract_package_hash\";\nconst ARG_METHOD: &str = \"method\";\nconst ARG_CONTRACT_HASH_KEY: &str = \"contract_hash_key\";\nconst ARG_MAJOR_VERSION: &str = \"major_version\";\nconst ARG_CONTRACT_VERSION: &str = \"contract_version\";\nconst METHOD_CALL_CONTRACT: &str = \"call_contract\";\nconst METHOD_CALL_VERSIONED_CONTRACT: &str = \"call_versioned_contract\";\n\n#[ignore]\n#[test]\nfn should_run_regression() {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_3097_REGRESSION_WASM,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let disabled_contract_hash = account\n        .named_keys()\n        .get(DISABLED_CONTRACT_HASH_KEY)\n        .unwrap()\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let enabled_contract_hash = account\n        .named_keys()\n        .get(ENABLED_CONTRACT_HASH_KEY)\n        .unwrap()\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap();\n    let contract_package_hash = account\n        .named_keys()\n        .get(CONTRACT_PACKAGE_HASH_KEY)\n        .unwrap()\n        .into_package_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    // Versioned contract calls by name\n\n    let direct_call_latest_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_HASH_NAME,\n        None,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let direct_call_v2_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_HASH_NAME,\n        Some(2),\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n    let direct_call_v2_request_with_major =\n        ExecuteRequestBuilder::contract_call_by_name_versioned_with_major(\n            *DEFAULT_ACCOUNT_ADDR,\n            PACKAGE_HASH_NAME,\n            Some(2),\n            Some(2),\n            DO_SOMETHING_ENTRYPOINT,\n            RuntimeArgs::new(),\n        )\n        .build();\n\n    let direct_call_v1_request = ExecuteRequestBuilder::versioned_contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        PACKAGE_HASH_NAME,\n        Some(1),\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let direct_call_v1_request_with_major =\n        ExecuteRequestBuilder::contract_call_by_name_versioned_with_major(\n            *DEFAULT_ACCOUNT_ADDR,\n            PACKAGE_HASH_NAME,\n            Some(1),\n            Some(2),\n            DO_SOMETHING_ENTRYPOINT,\n            RuntimeArgs::new(),\n        )\n        .build();\n\n    builder\n        .exec(direct_call_latest_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(direct_call_v2_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(direct_call_v2_request_with_major)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(direct_call_v1_request)\n        .expect_failure()\n        .commit();\n\n    builder\n        .exec(direct_call_v1_request_with_major)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(\n                ExecError::DisabledEntityVersion(version)\n            )\n            if version == EntityVersionKey::new(2, 1),\n        ),\n        \"Expected disabled contract version, found {:?}\",\n        error,\n    );\n\n    // Versioned contract calls by hash\n\n    let direct_call_latest_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_package_hash,\n        None,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let direct_call_v2_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_package_hash,\n        Some(2),\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n    let direct_call_v2_request_with_major =\n        ExecuteRequestBuilder::contract_call_by_hash_versioned_with_major(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_package_hash,\n            Some(2),\n            Some(2),\n            DO_SOMETHING_ENTRYPOINT,\n            RuntimeArgs::new(),\n        )\n        .build();\n\n    let direct_call_v1_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_package_hash,\n        Some(1),\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let direct_call_v1_request_with_major =\n        ExecuteRequestBuilder::contract_call_by_hash_versioned_with_major(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_package_hash,\n            Some(1),\n            Some(2),\n            DO_SOMETHING_ENTRYPOINT,\n            RuntimeArgs::new(),\n        )\n        .build();\n\n    builder\n        .exec(direct_call_latest_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(direct_call_v2_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(direct_call_v2_request_with_major)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(direct_call_v1_request)\n        .expect_failure()\n        .commit();\n\n    builder\n        .exec(direct_call_v1_request_with_major)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(\n                ExecError::DisabledEntityVersion(version)\n            )\n            if version == EntityVersionKey::new(2, 1),\n        ),\n        \"Expected disabled contract version, found {:?}\",\n        error,\n    );\n\n    // Versioned call from a session wasm\n\n    let session_call_v1_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_3097_REGRESSION_CALL_WASM,\n        runtime_args! {\n            ARG_METHOD => METHOD_CALL_VERSIONED_CONTRACT,\n            ARG_MAJOR_VERSION => 2u32,\n            ARG_CONTRACT_VERSION => Some(1u32),\n        },\n    )\n    .build();\n\n    let session_call_v2_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_3097_REGRESSION_CALL_WASM,\n        runtime_args! {\n            ARG_METHOD => METHOD_CALL_VERSIONED_CONTRACT,\n            ARG_MAJOR_VERSION => 2u32,\n            ARG_CONTRACT_VERSION => Some(2u32),\n        },\n    )\n    .build();\n\n    let session_call_latest_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_3097_REGRESSION_CALL_WASM,\n        runtime_args! {\n            ARG_METHOD => METHOD_CALL_VERSIONED_CONTRACT,\n            ARG_MAJOR_VERSION => 2u32,\n            ARG_CONTRACT_VERSION => Option::<u32>::None,\n        },\n    )\n    .build();\n\n    builder\n        .exec(session_call_latest_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(session_call_v2_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(session_call_v1_request)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(\n                ExecError::DisabledEntityVersion(version)\n            )\n            if version == EntityVersionKey::new(2, 1),\n        ),\n        \"Expected disabled contract version, found {:?}\",\n        error,\n    );\n\n    // Call by contract hashes\n\n    let call_by_hash_v2_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        enabled_contract_hash,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    let call_by_hash_v2_request_with_major = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        enabled_contract_hash,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n\n    builder\n        .exec(call_by_hash_v2_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(call_by_hash_v2_request_with_major)\n        .expect_success()\n        .commit();\n\n    let call_by_name_v2_request = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        ENABLED_CONTRACT_HASH_KEY,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n    builder\n        .exec(call_by_name_v2_request)\n        .expect_success()\n        .commit();\n\n    // This direct contract by name/hash should fail\n    let call_by_hash_v1_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        disabled_contract_hash,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n    builder\n        .exec(call_by_hash_v1_request)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(\n                ExecError::DisabledEntity(contract_hash)\n            )\n            if contract_hash == disabled_contract_hash\n        ),\n        \"Expected invalid contract version, found {:?}\",\n        error,\n    );\n\n    // This direct contract by name/hash should fail\n    let call_by_name_v1_request = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        DISABLED_CONTRACT_HASH_KEY,\n        DO_SOMETHING_ENTRYPOINT,\n        RuntimeArgs::new(),\n    )\n    .build();\n    builder\n        .exec(call_by_name_v1_request)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(\n                ExecError::DisabledEntity(contract_hash)\n            )\n            if contract_hash == disabled_contract_hash\n        ),\n        \"Expected invalid contract version, found {:?}\",\n        error,\n    );\n\n    // Session calls into hashes\n\n    let session_call_hash_v1_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_3097_REGRESSION_CALL_WASM,\n        runtime_args! {\n            ARG_METHOD => METHOD_CALL_CONTRACT,\n            ARG_CONTRACT_HASH_KEY => DISABLED_CONTRACT_HASH_KEY,\n        },\n    )\n    .build();\n\n    let session_call_hash_v2_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_3097_REGRESSION_CALL_WASM,\n        runtime_args! {\n            ARG_METHOD => METHOD_CALL_CONTRACT,\n            ARG_CONTRACT_HASH_KEY => ENABLED_CONTRACT_HASH_KEY,\n        },\n    )\n    .build();\n\n    builder\n        .exec(session_call_hash_v1_request)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            Error::Exec(\n                ExecError::DisabledEntity(contract_hash)\n            )\n            if contract_hash == disabled_contract_hash\n        ),\n        \"Expected invalid contract version, found {:?}\",\n        error,\n    );\n\n    builder\n        .exec(session_call_hash_v2_request)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_3208.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    genesis_config_builder::GenesisConfigBuilder, utils, ChainspecConfig, DeployItemBuilder,\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY,\n    DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PAYMENT, DEFAULT_PROPOSER_ADDR,\n    DEFAULT_PROPOSER_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS,\n};\nuse casper_execution_engine::{\n    engine_state::{self},\n    execution::ExecError,\n};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    runtime_args,\n    system::{\n        auction::{self, BidAddr, DelegationRate},\n        standard_payment,\n    },\n    ApiError, GenesisAccount, GenesisValidator, Key, Motes, StoredValue,\n    DEFAULT_MINIMUM_BID_AMOUNT, U512,\n};\n\nuse crate::lmdb_fixture;\n\nstatic DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE: Lazy<U512> =\n    Lazy::new(|| U512::from(1_000_000_000_000u64));\n\nstatic ACCOUNTS_WITH_GENESIS_VALIDATORS: Lazy<Vec<GenesisAccount>> = Lazy::new(|| {\n    vec![\n        GenesisAccount::account(\n            DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            None,\n        ),\n        GenesisAccount::account(\n            DEFAULT_PROPOSER_PUBLIC_KEY.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(*DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE),\n                15,\n            )),\n        ),\n    ]\n});\nconst DAY_MILLIS: usize = 24 * 60 * 60 * 1000;\nconst DAYS_IN_WEEK: usize = 7;\nconst WEEK_MILLIS: usize = DAYS_IN_WEEK * DAY_MILLIS;\nconst VESTING_SCHEDULE_LENGTH_DAYS: usize = 91;\nconst LOCKED_AMOUNTS_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1;\n\nconst LMDB_FIXTURE_NAME: &str = \"gh_3208\";\n\n#[ignore]\n#[test]\nfn should_run_regression_with_already_initialized_fixed_schedule() {\n    let (builder, _lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(LMDB_FIXTURE_NAME);\n\n    let bid_key = Key::Bid(*DEFAULT_PROPOSER_ADDR);\n\n    let stored_value = builder.query(None, bid_key, &[]).unwrap();\n    if let StoredValue::Bid(bid) = stored_value {\n        assert!(\n            bid.is_locked_with_vesting_schedule(7776000000, DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS)\n        );\n        let vesting_schedule = bid\n            .vesting_schedule()\n            .expect(\"should have a schedule initialized already\");\n\n        let initial_stake = *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE;\n\n        let total_vested_amounts = {\n            let mut total_vested_amounts = U512::zero();\n\n            for i in 0..LOCKED_AMOUNTS_LENGTH {\n                let timestamp =\n                    vesting_schedule.initial_release_timestamp_millis() + (WEEK_MILLIS * i) as u64;\n                if let Some(locked_amount) = vesting_schedule.locked_amount(timestamp) {\n                    let current_vested_amount =\n                        initial_stake - locked_amount - total_vested_amounts;\n                    total_vested_amounts += current_vested_amount\n                }\n            }\n\n            total_vested_amounts\n        };\n\n        assert_eq!(total_vested_amounts, initial_stake);\n    } else {\n        panic!(\"unexpected StoredValue variant.\")\n    }\n}\n\n#[ignore]\n#[test]\nfn should_initialize_default_vesting_schedule() {\n    let genesis_request =\n        utils::create_run_genesis_request(ACCOUNTS_WITH_GENESIS_VALIDATORS.clone());\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(genesis_request);\n\n    let bid_addr = BidAddr::from(*DEFAULT_PROPOSER_ADDR);\n    let stored_value_before = builder\n        .query(None, bid_addr.into(), &[])\n        .expect(\"should query proposers bid\");\n\n    let bid_before = if let StoredValue::BidKind(bid) = stored_value_before {\n        bid\n    } else {\n        panic!(\"Expected a bid variant in the global state\");\n    };\n\n    let bid_vesting_schedule = bid_before\n        .vesting_schedule()\n        .expect(\"genesis validator should have vesting schedule\");\n\n    assert!(\n        bid_vesting_schedule.locked_amounts().is_none(),\n        \"initial funds release is not yet processed\"\n    );\n\n    let mut era_end_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n\n    era_end_timestamp_millis += DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    assert!(\n        builder\n            .step(\n                StepRequestBuilder::default()\n                    .with_era_end_timestamp_millis(era_end_timestamp_millis)\n                    .with_parent_state_hash(builder.get_post_state_hash())\n                    .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n                    .build(),\n            )\n            .is_success(),\n        \"should run step to initialize a schedule\"\n    );\n\n    let stored_value_after = builder\n        .query(None, bid_addr.into(), &[])\n        .expect(\"should query proposers bid\");\n\n    let bid_after = if let StoredValue::BidKind(bid) = stored_value_after {\n        bid\n    } else {\n        panic!(\"Expected a bid variant in the global state\");\n    };\n\n    let bid_vesting_schedule = bid_after\n        .vesting_schedule()\n        .expect(\"genesis validator should have vesting schedule\");\n\n    assert!(\n        bid_vesting_schedule.locked_amounts().is_some(),\n        \"initial funds release is initialized\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_immediatelly_unbond_genesis_validator_with_zero_day_vesting_schedule() {\n    let vesting_schedule_period_millis = 0;\n\n    let exec_config = {\n        let accounts = ACCOUNTS_WITH_GENESIS_VALIDATORS.clone();\n        GenesisConfigBuilder::new().with_accounts(accounts).build()\n    };\n\n    let genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        exec_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let engine_config = ChainspecConfig::default()\n        .with_vesting_schedule_period_millis(vesting_schedule_period_millis);\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(engine_config);\n    builder.run_genesis(genesis_request);\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT),\n            auction::ARG_DELEGATION_RATE => 10 as DelegationRate,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let sender = *DEFAULT_PROPOSER_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_WITHDRAW_BID;\n    let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, };\n    let session_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => DEFAULT_PROPOSER_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash([58; 32])\n        .build();\n\n    let withdraw_bid_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let sender = *DEFAULT_PROPOSER_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_WITHDRAW_BID;\n    let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, };\n    let session_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => DEFAULT_PROPOSER_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash([59; 32])\n        .build();\n\n    let withdraw_bid_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(withdraw_bid_request_1)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(error, engine_state::Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) if auction_error == auction::Error::ValidatorFundsLocked as u8),\n        \"vesting schedule is not yet initialized\"\n    );\n\n    let mut era_end_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n\n    assert!(\n        builder\n            .step(\n                StepRequestBuilder::default()\n                    .with_era_end_timestamp_millis(era_end_timestamp_millis)\n                    .with_parent_state_hash(builder.get_post_state_hash())\n                    .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n                    .with_run_auction(true)\n                    .build(),\n            )\n            .is_success(),\n        \"should run step to initialize a schedule\"\n    );\n\n    era_end_timestamp_millis += DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    assert!(\n        builder\n            .step(\n                StepRequestBuilder::default()\n                    .with_era_end_timestamp_millis(era_end_timestamp_millis)\n                    .with_parent_state_hash(builder.get_post_state_hash())\n                    .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n                    .with_run_auction(true)\n                    .build(),\n            )\n            .is_success(),\n        \"should run step to initialize a schedule\"\n    );\n\n    builder\n        .exec(withdraw_bid_request_2)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_immediatelly_unbond_genesis_validator_with_zero_day_vesting_schedule_and_zero_day_lock() {\n    let vesting_schedule_period_millis = 0;\n    let locked_funds_period_millis = 0;\n\n    let exec_config = {\n        let accounts = ACCOUNTS_WITH_GENESIS_VALIDATORS.clone();\n        GenesisConfigBuilder::new()\n            .with_accounts(accounts)\n            .with_locked_funds_period_millis(locked_funds_period_millis)\n            .build()\n    };\n\n    let genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        exec_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let chainspec = ChainspecConfig::default()\n        .with_vesting_schedule_period_millis(vesting_schedule_period_millis);\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec);\n    builder.run_genesis(genesis_request);\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT),\n            auction::ARG_DELEGATION_RATE => 10 as DelegationRate,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let era_end_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n\n    assert!(\n        builder\n            .step(\n                StepRequestBuilder::default()\n                    .with_era_end_timestamp_millis(era_end_timestamp_millis)\n                    .with_parent_state_hash(builder.get_post_state_hash())\n                    .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n                    .with_run_auction(true)\n                    .build(),\n            )\n            .is_success(),\n        \"should run step to initialize a schedule\"\n    );\n\n    let sender = *DEFAULT_PROPOSER_ADDR;\n    let contract_hash = builder.get_auction_contract_hash();\n    let entry_point = auction::METHOD_WITHDRAW_BID;\n    let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, };\n    let session_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => DEFAULT_PROPOSER_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE,\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(sender)\n        .with_stored_session_hash(contract_hash, entry_point, session_args)\n        .with_standard_payment(payment_args)\n        .with_authorization_keys(&[sender])\n        .with_deploy_hash([58; 32])\n        .build();\n\n    let withdraw_bid_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .exec(withdraw_bid_request_1)\n        .expect_success()\n        .commit();\n}\n\n#[cfg(feature = \"fixture-generators\")]\nmod fixture {\n    use casper_engine_test_support::{\n        utils, StepRequestBuilder, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n        DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION,\n    };\n\n    use crate::lmdb_fixture;\n\n    use super::{ACCOUNTS_WITH_GENESIS_VALIDATORS, LMDB_FIXTURE_NAME};\n\n    #[ignore]\n    #[test]\n    fn generate_gh_3208_fixture() {\n        let genesis_request =\n            utils::create_run_genesis_request(ACCOUNTS_WITH_GENESIS_VALIDATORS.clone());\n\n        lmdb_fixture::generate_fixture(LMDB_FIXTURE_NAME, genesis_request, |builder| {\n            let era_end_timestamp_millis =\n                DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n            // Move forward the clock and initialize vesting schedule with 13 weeks after initial 90\n            // days lock up.\n            builder.step(\n                StepRequestBuilder::default()\n                    .with_era_end_timestamp_millis(era_end_timestamp_millis)\n                    .with_parent_state_hash(builder.get_post_state_hash())\n                    .with_protocol_version(DEFAULT_PROTOCOL_VERSION)\n                    .build(),\n            );\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_3710.rs",
    "content": "use std::{collections::BTreeSet, convert::TryInto, iter::FromIterator};\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, WasmTestBuilder,\n    DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PROPOSER_PUBLIC_KEY, LOCAL_GENESIS_REQUEST,\n};\nuse casper_storage::{\n    data_access_layer::{PruneRequest, PruneResult},\n    global_state::state::{CommitProvider, StateProvider},\n};\nuse casper_types::{\n    runtime_args,\n    system::auction::{self, DelegationRate},\n    Digest, EraId, Key, KeyTag, ProtocolVersion, PublicKey, U512,\n};\n\nuse crate::lmdb_fixture;\n\nconst FIXTURE_N_ERAS: usize = 10;\n\nconst GH_3710_FIXTURE: &str = \"gh_3710\";\n\n#[ignore]\n#[test]\nfn gh_3710_commit_prune_with_empty_keys_should_be_noop() {\n    let (mut builder, _lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE);\n\n    let prune_config = PruneRequest::new(builder.get_post_state_hash(), Vec::new());\n\n    builder.commit_prune(prune_config).expect_prune_success();\n}\n\n#[ignore]\n#[test]\nfn gh_3710_commit_prune_should_validate_state_root_hash() {\n    let (mut builder, _lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE);\n\n    let prune_config = PruneRequest::new(Digest::hash(\"foobar\"), Vec::new());\n\n    builder.commit_prune(prune_config);\n\n    let prune_result = builder\n        .get_prune_result(0)\n        .expect(\"should have prune result\");\n    assert!(builder.get_prune_result(1).is_none());\n\n    assert!(\n        matches!(prune_result, PruneResult::RootNotFound),\n        \"{:?}\",\n        prune_result\n    );\n}\n\n#[ignore]\n#[test]\nfn gh_3710_commit_prune_should_delete_values() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE);\n\n    let auction_delay: u64 = lmdb_fixture_state\n        .genesis_request\n        .get(\"ee_config\")\n        .expect(\"should have ee_config\")\n        .get(\"auction_delay\")\n        .expect(\"should have auction delay\")\n        .as_i64()\n        .expect(\"auction delay should be integer\")\n        .try_into()\n        .expect(\"auction delay should be positive\");\n\n    let keys_before_prune = builder\n        .get_keys(KeyTag::EraInfo)\n        .expect(\"should obtain all given keys\");\n\n    assert_eq!(\n        keys_before_prune.len(),\n        FIXTURE_N_ERAS + 1 + auction_delay as usize\n    );\n\n    let batch_1: Vec<Key> = (0..FIXTURE_N_ERAS)\n        .map(|i| EraId::new(i.try_into().unwrap()))\n        .map(Key::EraInfo)\n        .collect();\n\n    let batch_2: Vec<Key> = (FIXTURE_N_ERAS..FIXTURE_N_ERAS + 1 + auction_delay as usize)\n        .map(|i| EraId::new(i.try_into().unwrap()))\n        .map(Key::EraInfo)\n        .collect();\n\n    assert_eq!(\n        BTreeSet::from_iter(batch_1.iter())\n            .union(&BTreeSet::from_iter(batch_2.iter()))\n            .collect::<BTreeSet<_>>()\n            .len(),\n        keys_before_prune.len(),\n        \"sanity check\"\n    );\n\n    // Process prune of first batch\n    let pre_state_hash = builder.get_post_state_hash();\n\n    let prune_config_1 = PruneRequest::new(pre_state_hash, batch_1);\n\n    builder.commit_prune(prune_config_1).expect_prune_success();\n    let post_state_hash_batch_1 = builder.get_post_state_hash();\n    assert_ne!(pre_state_hash, post_state_hash_batch_1);\n\n    let keys_after_batch_1_prune = builder\n        .get_keys(KeyTag::EraInfo)\n        .expect(\"should obtain all given keys\");\n\n    assert_eq!(keys_after_batch_1_prune.len(), 2);\n\n    // Process prune of second batch\n    let pre_state_hash = builder.get_post_state_hash();\n\n    let prune_config_2 = PruneRequest::new(pre_state_hash, batch_2);\n    builder.commit_prune(prune_config_2).expect_prune_success();\n    let post_state_hash_batch_2 = builder.get_post_state_hash();\n    assert_ne!(pre_state_hash, post_state_hash_batch_2);\n\n    let keys_after_batch_2_prune = builder\n        .get_keys(KeyTag::EraInfo)\n        .expect(\"should obtain all given keys\");\n\n    assert_eq!(keys_after_batch_2_prune.len(), 0);\n}\n\nconst DEFAULT_REWARD_AMOUNT: u64 = 1_000_000;\n\nfn add_validator_and_wait_for_rotation<S>(builder: &mut WasmTestBuilder<S>, public_key: &PublicKey)\nwhere\n    S: StateProvider + CommitProvider,\n{\n    const DELEGATION_RATE: DelegationRate = 10;\n\n    let args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key.clone(),\n        auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        auction::ARG_AMOUNT => U512::from(DEFAULT_REWARD_AMOUNT),\n    };\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        public_key.to_account_hash(),\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        args,\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    // compute N eras\n\n    let current_era_id = builder.get_era();\n\n    // eras current..=delay + 1 without rewards (default genesis validator is not a\n    // validator yet)\n    for era_counter in current_era_id.iter(builder.get_auction_delay() + 1) {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(era_counter)\n            // no rewards as default validator is not a validator yet\n            .build();\n        builder.step(step_request);\n    }\n}\n\nfn distribute_rewards<S>(\n    builder: &mut WasmTestBuilder<S>,\n    block_height: u64,\n    proposer: &PublicKey,\n    amount: U512,\n) where\n    S: StateProvider + CommitProvider,\n{\n    builder.distribute(\n        None,\n        ProtocolVersion::V1_0_0,\n        IntoIterator::into_iter([(proposer.clone(), vec![amount])]).collect(),\n        block_height,\n    );\n}\n\n#[ignore]\n#[test]\nfn gh_3710_should_produce_era_summary_in_a_step() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    add_validator_and_wait_for_rotation(&mut builder, &DEFAULT_ACCOUNT_PUBLIC_KEY);\n    distribute_rewards(&mut builder, 1, &DEFAULT_ACCOUNT_PUBLIC_KEY, 0.into());\n\n    let era_info_keys = builder.get_keys(KeyTag::EraInfo).unwrap();\n    assert_eq!(era_info_keys, Vec::new());\n\n    let era_summary_1 = builder\n        .query(None, Key::EraSummary, &[])\n        .expect(\"should query era summary\");\n\n    let era_summary_1 = era_summary_1.as_era_info().expect(\"era summary\");\n\n    // Reward another validator to observe that the summary changes.\n    add_validator_and_wait_for_rotation(&mut builder, &DEFAULT_PROPOSER_PUBLIC_KEY);\n    distribute_rewards(&mut builder, 2, &DEFAULT_PROPOSER_PUBLIC_KEY, 1.into());\n\n    let era_summary_2 = builder\n        .query(None, Key::EraSummary, &[])\n        .expect(\"should query era summary\");\n\n    let era_summary_2 = era_summary_2.as_era_info().expect(\"era summary\");\n\n    assert_ne!(era_summary_1, era_summary_2);\n\n    let era_info_keys = builder.get_keys(KeyTag::EraInfo).unwrap();\n    assert_eq!(era_info_keys, Vec::new());\n\n    // As a sanity check ensure there's just a single era summary per tip\n    assert_eq!(\n        builder\n            .get_keys(KeyTag::EraSummary)\n            .expect(\"should get all era summary keys\")\n            .len(),\n        1\n    );\n}\n\nmod fixture {\n    use std::collections::BTreeMap;\n\n    use casper_engine_test_support::{\n        ExecuteRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,\n        LOCAL_GENESIS_REQUEST,\n    };\n    use casper_types::{\n        runtime_args,\n        system::auction::{EraInfo, SeigniorageAllocation},\n        EraId, Key, KeyTag, StoredValue, U512,\n    };\n\n    use super::{FIXTURE_N_ERAS, GH_3710_FIXTURE};\n    use crate::lmdb_fixture;\n\n    #[ignore = \"RUN_FIXTURE_GENERATORS env var should be enabled\"]\n    #[test]\n    fn generate_call_stack_fixture() {\n        const CALL_STACK_FIXTURE: &str = \"call_stack_fixture\";\n        const CONTRACT_RECURSIVE_SUBCALL: &str = \"get_call_stack_recursive_subcall.wasm\";\n\n        if !lmdb_fixture::is_fixture_generator_enabled() {\n            println!(\"Enable the RUN_FIXTURE_GENERATORS variable\");\n            return;\n        }\n\n        let genesis_request = LOCAL_GENESIS_REQUEST.clone();\n\n        lmdb_fixture::generate_fixture(CALL_STACK_FIXTURE, genesis_request, |builder| {\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                CONTRACT_RECURSIVE_SUBCALL,\n                runtime_args! {},\n            )\n            .build();\n\n            builder.exec(execute_request).expect_success().commit();\n        })\n        .unwrap();\n    }\n\n    #[ignore = \"RUN_FIXTURE_GENERATORS env var should be enabled\"]\n    #[test]\n    fn generate_groups_fixture() {\n        const GROUPS_FIXTURE: &str = \"groups\";\n        const GROUPS_WASM: &str = \"groups.wasm\";\n\n        if !lmdb_fixture::is_fixture_generator_enabled() {\n            println!(\"Enable the RUN_FIXTURE_GENERATORS variable\");\n            return;\n        }\n\n        let genesis_request = LOCAL_GENESIS_REQUEST.clone();\n\n        lmdb_fixture::generate_fixture(GROUPS_FIXTURE, genesis_request, |builder| {\n            let execute_request = ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                GROUPS_WASM,\n                runtime_args! {},\n            )\n            .build();\n\n            builder.exec(execute_request).expect_success().commit();\n        })\n        .unwrap();\n    }\n\n    #[ignore = \"RUN_FIXTURE_GENERATORS env var should be enabled\"]\n    #[test]\n    fn generate_era_info_bloat_fixture() {\n        if !lmdb_fixture::is_fixture_generator_enabled() {\n            println!(\"Enable the RUN_FIXTURE_GENERATORS variable\");\n            return;\n        }\n        // To generate this fixture again you have to re-run this code release-1.4.13.\n        let genesis_request = LOCAL_GENESIS_REQUEST.clone();\n        lmdb_fixture::generate_fixture(GH_3710_FIXTURE, genesis_request, |builder| {\n            super::add_validator_and_wait_for_rotation(builder, &DEFAULT_ACCOUNT_PUBLIC_KEY);\n\n            // N more eras that pays out rewards\n            super::distribute_rewards(builder, 0, &DEFAULT_ACCOUNT_PUBLIC_KEY, 0.into());\n\n            let last_era_info = EraId::new(builder.get_auction_delay() + FIXTURE_N_ERAS as u64);\n            let last_era_info_key = Key::EraInfo(last_era_info);\n\n            let keys = builder.get_keys(KeyTag::EraInfo).unwrap();\n            let mut keys_lookup = BTreeMap::new();\n            for key in &keys {\n                keys_lookup.insert(key, ());\n            }\n\n            assert!(keys_lookup.contains_key(&last_era_info_key));\n            assert_eq!(keys_lookup.keys().last().copied(), Some(&last_era_info_key));\n\n            // all era infos should have unique rewards that are in increasing order\n            let stored_values: Vec<StoredValue> = keys_lookup\n                .keys()\n                .map(|key| builder.query(None, **key, &[]).unwrap())\n                .collect();\n\n            let era_infos: Vec<&EraInfo> = stored_values\n                .iter()\n                .filter_map(StoredValue::as_era_info)\n                .collect();\n\n            let rewards: Vec<&U512> = era_infos\n                .iter()\n                .flat_map(|era_info| era_info.seigniorage_allocations())\n                .map(|seigniorage| match seigniorage {\n                    SeigniorageAllocation::Validator {\n                        validator_public_key,\n                        amount,\n                    } if validator_public_key == &*DEFAULT_ACCOUNT_PUBLIC_KEY => amount,\n                    SeigniorageAllocation::Validator { .. } => panic!(\"Unexpected validator\"),\n                    SeigniorageAllocation::Delegator { .. }\n                    | SeigniorageAllocation::DelegatorKind { .. } => panic!(\"No delegators\"),\n                })\n                .collect();\n\n            let sorted_rewards = {\n                let mut vec = rewards.clone();\n                vec.sort();\n                vec\n            };\n            assert_eq!(rewards, sorted_rewards);\n\n            assert!(\n                rewards.first().unwrap() < rewards.last().unwrap(),\n                \"{:?}\",\n                rewards\n            );\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gh_4898.rs",
    "content": "use casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\n\nuse casper_types::runtime_args;\n\nconst ARG_DATA: &str = \"data\";\nconst GH_4898_REGRESSION_WASM: &str = \"gh_4898_regression.wasm\";\n\n#[ignore]\n#[test]\nfn should_not_contain_f64_opcodes() {\n    let module_bytes = utils::read_wasm_file(GH_4898_REGRESSION_WASM);\n    let wat = wasmprinter::print_bytes(module_bytes).expect(\"WASM parse error\");\n    assert!(!wat.contains(\"f64.\"), \"WASM contains f64 opcodes\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        GH_4898_REGRESSION_WASM,\n        runtime_args! {\n            ARG_DATA => \"account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb\"\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gov_116.rs",
    "content": "use std::{collections::BTreeSet, iter::FromIterator};\n\nuse num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    genesis_config_builder::GenesisConfigBuilder, utils, ChainspecConfig, ExecuteRequestBuilder,\n    LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_GENESIS_CONFIG_HASH,\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION,\n    DEFAULT_VALIDATOR_SLOTS, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    runtime_args,\n    system::auction::{self, DelegationRate, EraValidators, VESTING_SCHEDULE_LENGTH_MILLIS},\n    GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, DEFAULT_MINIMUM_BID_AMOUNT,\n    U256, U512,\n};\n\nconst MINIMUM_BONDED_AMOUNT: u64 = 1_000;\n\n/// Validator with smallest stake will withdraw most of his stake to ensure we did move time forward\n/// to unlock his whole vesting schedule.\nconst WITHDRAW_AMOUNT: u64 = MINIMUM_BONDED_AMOUNT - DEFAULT_MINIMUM_BID_AMOUNT;\n\n/// Initial lockup period\nconst VESTING_BASE: u64 = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\nconst DAY_MILLIS: u64 = 24 * 60 * 60 * 1000;\nconst WEEK_MILLIS: u64 = 7 * DAY_MILLIS;\nconst DELEGATION_RATE: DelegationRate = 0;\n\n/// Simplified vesting weeks for testing purposes. Each element is used as an argument to\n/// run_auction call.\nconst VESTING_WEEKS: [u64; 3] = [\n    // Passes the vesting schedule (aka initial lockup + schedule length)\n    VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS,\n    // One week after\n    VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS + WEEK_MILLIS,\n    // Two weeks after\n    VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS + (2 * WEEK_MILLIS),\n];\n\nstatic GENESIS_VALIDATOR_PUBLIC_KEYS: Lazy<BTreeSet<PublicKey>> = Lazy::new(|| {\n    let mut set = BTreeSet::new();\n    for i in 1..=DEFAULT_VALIDATOR_SLOTS {\n        let mut secret_key_bytes = [255u8; 32];\n        U256::from(i).to_big_endian(&mut secret_key_bytes);\n        let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap();\n        let public_key = PublicKey::from(&secret_key);\n        set.insert(public_key);\n    }\n    set\n});\n\nstatic GENESIS_VALIDATORS: Lazy<Vec<GenesisAccount>> = Lazy::new(|| {\n    let mut vec = Vec::with_capacity(GENESIS_VALIDATOR_PUBLIC_KEYS.len());\n\n    for (index, public_key) in GENESIS_VALIDATOR_PUBLIC_KEYS.iter().enumerate() {\n        let bond = MINIMUM_BONDED_AMOUNT + index as u64;\n        let account = GenesisAccount::account(\n            public_key.clone(),\n            Motes::new(MINIMUM_ACCOUNT_CREATION_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(bond),\n                DelegationRate::zero(),\n            )),\n        );\n        vec.push(account);\n    }\n\n    vec\n});\n\nstatic LOWEST_STAKE_VALIDATOR: Lazy<PublicKey> = Lazy::new(|| {\n    let mut genesis_accounts: Vec<&GenesisAccount> = GENESIS_ACCOUNTS.iter().collect();\n    genesis_accounts.sort_by_key(|genesis_account| genesis_account.staked_amount());\n\n    // Finds a genesis validator with lowest stake\n    let genesis_account = genesis_accounts\n        .into_iter()\n        .find(|genesis_account| {\n            genesis_account.is_validator() && genesis_account.staked_amount() > Motes::zero()\n        })\n        .unwrap();\n\n    assert_eq!(\n        genesis_account.staked_amount(),\n        Motes::new(MINIMUM_BONDED_AMOUNT)\n    );\n\n    genesis_account.public_key()\n});\n\nstatic GENESIS_ACCOUNTS: Lazy<Vec<GenesisAccount>> = Lazy::new(|| {\n    let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n    tmp.append(&mut GENESIS_VALIDATORS.clone());\n    tmp\n});\n\nfn initialize_builder() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let run_genesis_request = utils::create_run_genesis_request(GENESIS_ACCOUNTS.clone());\n    builder.run_genesis(run_genesis_request);\n\n    let fund_request = TransferRequestBuilder::new(\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n        PublicKey::System.to_account_hash(),\n    )\n    .build();\n\n    builder.transfer_and_commit(fund_request).expect_success();\n\n    builder\n}\n\n#[ignore]\n#[test]\nfn should_not_retain_genesis_validator_slot_protection_after_vesting_period_elapsed() {\n    let lowest_stake_validator_addr = LOWEST_STAKE_VALIDATOR.to_account_hash();\n\n    let mut builder = initialize_builder();\n\n    // Unlock all funds of genesis validator\n    builder.run_auction(VESTING_WEEKS[0], Vec::new());\n\n    let era_validators_1: EraValidators = builder.get_era_validators();\n\n    let (last_era_1, weights_1) = era_validators_1.iter().last().unwrap();\n    let genesis_validator_stake_1 = weights_1.get(&LOWEST_STAKE_VALIDATOR).unwrap();\n    let next_validator_set_1 = BTreeSet::from_iter(weights_1.keys().cloned());\n    assert_eq!(\n        next_validator_set_1,\n        GENESIS_VALIDATOR_PUBLIC_KEYS.clone(),\n        \"expected validator set should be unchanged\"\n    );\n\n    let withdraw_bid_request = {\n        let auction_hash = builder.get_auction_contract_hash();\n        let session_args = runtime_args! {\n            auction::ARG_PUBLIC_KEY => LOWEST_STAKE_VALIDATOR.clone(),\n            auction::ARG_AMOUNT => U512::from(WITHDRAW_AMOUNT),\n        };\n        ExecuteRequestBuilder::contract_call_by_hash(\n            lowest_stake_validator_addr,\n            auction_hash,\n            auction::METHOD_WITHDRAW_BID,\n            session_args,\n        )\n        .build()\n    };\n\n    builder.exec(withdraw_bid_request).expect_success().commit();\n\n    builder.run_auction(VESTING_WEEKS[1], Vec::new());\n\n    let era_validators_2: EraValidators = builder.get_era_validators();\n\n    let (last_era_2, weights_2) = era_validators_2.iter().last().unwrap();\n    assert!(last_era_2 > last_era_1);\n    let genesis_validator_stake_2 = weights_2.get(&LOWEST_STAKE_VALIDATOR).unwrap();\n\n    let next_validator_set_2 = BTreeSet::from_iter(weights_2.keys().cloned());\n    assert_eq!(next_validator_set_2, GENESIS_VALIDATOR_PUBLIC_KEYS.clone());\n\n    assert!(\n        genesis_validator_stake_1 > genesis_validator_stake_2,\n        \"stake should decrease in future era\"\n    );\n\n    let stake_diff = if genesis_validator_stake_1 > genesis_validator_stake_2 {\n        genesis_validator_stake_1 - genesis_validator_stake_2\n    } else {\n        genesis_validator_stake_2 - genesis_validator_stake_1\n    };\n\n    assert_eq!(stake_diff, U512::from(WITHDRAW_AMOUNT));\n\n    // Add nonfounding validator higher than `unbonding_account` has after unlocking & withdrawing\n\n    // New validator bids with the original stake of unbonding_account to take his place in future\n    // era. We know that unbonding_account has now smaller stake than before.\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => *genesis_validator_stake_1,\n            auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    builder.run_auction(VESTING_WEEKS[2], Vec::new());\n\n    let era_validators_3: EraValidators = builder.get_era_validators();\n    let (last_era_3, weights_3) = era_validators_3.iter().last().unwrap();\n    assert!(last_era_3 > last_era_2);\n\n    assert_eq!(\n        weights_3.len(),\n        DEFAULT_VALIDATOR_SLOTS as usize,\n        \"auction incorrectly computed more than slots than available\"\n    );\n\n    assert!(\n        weights_3.contains_key(&*DEFAULT_ACCOUNT_PUBLIC_KEY),\n        \"new non-genesis validator should replace a genesis validator with smaller stake\"\n    );\n\n    assert!(\n        !weights_3.contains_key(&LOWEST_STAKE_VALIDATOR),\n        \"unbonded account should be out of the set\"\n    );\n\n    let next_validator_set_3 = BTreeSet::from_iter(weights_3.keys().cloned());\n    let expected_validators = {\n        let mut pks = GENESIS_VALIDATOR_PUBLIC_KEYS.clone();\n        pks.remove(&LOWEST_STAKE_VALIDATOR);\n        pks.insert(DEFAULT_ACCOUNT_PUBLIC_KEY.clone());\n        pks\n    };\n    assert_eq!(\n        next_validator_set_3, expected_validators,\n        \"actual next validator set does not match expected validator set\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_retain_genesis_validator_slot_protection() {\n    const CASPER_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS;\n    const CASPER_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS;\n    const CASPER_VESTING_BASE: u64 =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = {\n        let chainspec = ChainspecConfig::default()\n            .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS);\n\n        let run_genesis_request = {\n            let accounts = GENESIS_ACCOUNTS.clone();\n            let exec_config = GenesisConfigBuilder::default()\n                .with_accounts(accounts)\n                .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n                .build();\n\n            GenesisRequest::new(\n                DEFAULT_GENESIS_CONFIG_HASH,\n                DEFAULT_PROTOCOL_VERSION,\n                exec_config,\n                DEFAULT_CHAINSPEC_REGISTRY.clone(),\n            )\n        };\n\n        let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec);\n        builder.run_genesis(run_genesis_request);\n\n        let fund_request = TransferRequestBuilder::new(\n            MINIMUM_ACCOUNT_CREATION_BALANCE,\n            PublicKey::System.to_account_hash(),\n        )\n        .build();\n\n        builder.transfer_and_commit(fund_request).expect_success();\n\n        builder\n    };\n\n    let era_validators_1: EraValidators = builder.get_era_validators();\n\n    let (last_era_1, weights_1) = era_validators_1.iter().last().unwrap();\n    let genesis_validator_stake_1 = weights_1.get(&LOWEST_STAKE_VALIDATOR).unwrap();\n    // One higher than the lowest stake\n    let winning_stake = *genesis_validator_stake_1 + U512::one();\n    let next_validator_set_1 = BTreeSet::from_iter(weights_1.keys().cloned());\n    assert_eq!(\n        next_validator_set_1,\n        GENESIS_VALIDATOR_PUBLIC_KEYS.clone(),\n        \"expected validator set should be unchanged\"\n    );\n\n    builder.run_auction(CASPER_VESTING_BASE, Vec::new());\n\n    let era_validators_2: EraValidators = builder.get_era_validators();\n\n    let (last_era_2, weights_2) = era_validators_2.iter().last().unwrap();\n    assert!(last_era_2 > last_era_1);\n    let next_validator_set_2 = BTreeSet::from_iter(weights_2.keys().cloned());\n    assert_eq!(next_validator_set_2, GENESIS_VALIDATOR_PUBLIC_KEYS.clone());\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => winning_stake,\n            auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    builder.run_auction(CASPER_VESTING_BASE + WEEK_MILLIS, Vec::new());\n\n    // All genesis validator slots are protected after ~1 week\n    let era_validators_3: EraValidators = builder.get_era_validators();\n    let (last_era_3, weights_3) = era_validators_3.iter().last().unwrap();\n    assert!(last_era_3 > last_era_2);\n    let next_validator_set_3 = BTreeSet::from_iter(weights_3.keys().cloned());\n    assert_eq!(next_validator_set_3, GENESIS_VALIDATOR_PUBLIC_KEYS.clone());\n\n    // After 13 weeks ~ 91 days lowest stake validator is dropped and replaced with higher bid\n    builder.run_auction(\n        CASPER_VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS,\n        Vec::new(),\n    );\n\n    let era_validators_4: EraValidators = builder.get_era_validators();\n    let (last_era_4, weights_4) = era_validators_4.iter().last().unwrap();\n    assert!(last_era_4 > last_era_3);\n    let next_validator_set_4 = BTreeSet::from_iter(weights_4.keys().cloned());\n    let expected_validators = {\n        let mut pks = GENESIS_VALIDATOR_PUBLIC_KEYS.clone();\n        pks.remove(&LOWEST_STAKE_VALIDATOR);\n        pks.insert(DEFAULT_ACCOUNT_PUBLIC_KEY.clone());\n        pks\n    };\n    assert_eq!(\n        next_validator_set_4, expected_validators,\n        \"actual next validator set does not match expected validator set\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gov_42.rs",
    "content": "// This test focuses on testing whether we charge for\n// WASM files that are malformed (unparseable).\n\n// If we're provided with malformed file, we should charge.\n// The exception is the \"empty wasm\" when send as\n// a payment, because in such case we use the \"default payment\"\n// instead.\n\n// For increased security, we also test some other cases in this test\n// like gas overflow (which is a runtime error).\n\n// Other potential test cases:\n// 1. Wasm with unsupported \"start\" section - tested in `ee_890` (but without asserting the\n// charge)\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, Gas, RuntimeArgs};\n\nuse crate::{\n    test::regression::test_utils::{\n        make_gas_counter_overflow, make_module_with_start_section,\n        make_module_without_memory_section,\n    },\n    wasm_utils,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[derive(Copy, Clone, Debug)]\nenum ExecutionPhase {\n    Payment,\n    Session,\n}\n\nfn run_test_case(input_wasm_bytes: &[u8], expected_error: &str, execution_phase: ExecutionPhase) {\n    let payment_amount = *DEFAULT_PAYMENT;\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let session_args = RuntimeArgs::default();\n    let deploy_hash = [42; 32];\n\n    let (deploy_item_builder, expected_error_message) = match execution_phase {\n        ExecutionPhase::Payment => (\n            DeployItemBuilder::new()\n                .with_payment_bytes(\n                    input_wasm_bytes.to_vec(),\n                    runtime_args! {ARG_AMOUNT => payment_amount,},\n                )\n                .with_session_bytes(wasm_utils::do_nothing_bytes(), session_args),\n            expected_error,\n        ),\n        ExecutionPhase::Session => (\n            DeployItemBuilder::new()\n                .with_session_bytes(input_wasm_bytes.to_vec(), session_args)\n                .with_standard_payment(runtime_args! {ARG_AMOUNT => payment_amount,}),\n            expected_error,\n        ),\n    };\n    let deploy_item = deploy_item_builder\n        .with_address(account_hash)\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n    let do_minimum_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let empty_wasm_in_payment = match execution_phase {\n        ExecutionPhase::Payment => input_wasm_bytes.is_empty(),\n        ExecutionPhase::Session => false,\n    };\n\n    if empty_wasm_in_payment {\n        // Special case: We expect success, since default payment will be used instead.\n        builder.exec(do_minimum_request).expect_success().commit();\n    } else {\n        builder.exec(do_minimum_request).expect_failure().commit();\n\n        let actual_error = builder.get_error().expect(\"should have error\").to_string();\n        assert!(actual_error.contains(expected_error_message));\n\n        let gas = builder.last_exec_gas_consumed();\n        assert_eq!(gas, Gas::zero());\n    }\n}\n\n#[ignore]\n#[test]\nfn should_charge_payment_with_incorrect_wasm_file_invalid_magic_number() {\n    const WASM_BYTES: &[u8] = &[1, 2, 3, 4, 5]; // Correct WASM magic bytes are: 0x00 0x61 0x73 0x6d (\"\\0asm\")\n    let execution_phase = ExecutionPhase::Payment;\n    let expected_error = \" Invalid magic number at start of file\";\n    run_test_case(WASM_BYTES, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_session_with_incorrect_wasm_file_invalid_magic_number() {\n    const WASM_BYTES: &[u8] = &[1, 2, 3, 4, 5]; // Correct WASM magic bytes are: 0x00 0x61 0x73 0x6d (\"\\0asm\")\n    let execution_phase = ExecutionPhase::Session;\n    let expected_error = \"Invalid magic number at start of file\";\n    run_test_case(WASM_BYTES, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_charge_payment_with_incorrect_wasm_file_empty_bytes() {\n    const WASM_BYTES: &[u8] = &[];\n    let execution_phase = ExecutionPhase::Payment;\n    let expected_error = \"I/O Error: UnexpectedEof\";\n    run_test_case(WASM_BYTES, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_session_with_incorrect_wasm_file_empty_bytes() {\n    const WASM_BYTES: &[u8] = &[];\n    let execution_phase = ExecutionPhase::Session;\n    let expected_error = \"I/O Error: UnexpectedEof\";\n    run_test_case(WASM_BYTES, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_payment_with_incorrect_wasm_correct_magic_number_incomplete_module() {\n    const WASM_BYTES: &[u8] = &[\n        0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00, 0x01, 0x35, 0x09, 0x60, 0x02, 0x7F, 0x7F,\n        0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x01, 0x7F, 0x00, 0x60, 0x00,\n        0x00, 0x60, 0x01, 0x7F, 0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x00, 0x60, 0x05, 0x7F,\n        0x7F, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x02, 0x7F, 0x7F, 0x00, 0x60, 0x04, 0x7F, 0x7F,\n        0x7F, 0x7F, 0x00, 0x02, 0x50, 0x03, 0x03, 0x65, 0x6E, 0x76, 0x16, 0x63, 0x61, 0x73, 0x70,\n        0x65, 0x72, 0x5F, 0x6C, 0x6F, 0x61, 0x64, 0x5F, 0x6E, 0x61, 0x6D, 0x65, 0x64, 0x5F, 0x6B,\n        0x65, 0x79, 0x73, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x17, 0x63, 0x61, 0x73, 0x70, 0x65,\n        0x72, 0x5F, 0x72, 0x65, 0x61, 0x64, 0x5F, 0x68, 0x6F, 0x73, 0x74, 0x5F, 0x62, 0x75, 0x66,\n        0x66, 0x65, 0x72, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76, 0x0D, 0x63, 0x61, 0x73, 0x70, 0x65,\n        0x72, 0x5F, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x00,\n    ];\n    let execution_phase = ExecutionPhase::Payment;\n    let expected_error = \"I/O Error: UnexpectedEof\";\n    run_test_case(WASM_BYTES, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_session_with_incorrect_wasm_correct_magic_number_incomplete_module() {\n    const WASM_BYTES: &[u8] = &[\n        0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00, 0x01, 0x35, 0x09, 0x60, 0x02, 0x7F, 0x7F,\n        0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x01, 0x7F, 0x00, 0x60, 0x00,\n        0x00, 0x60, 0x01, 0x7F, 0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x00, 0x60, 0x05, 0x7F,\n        0x7F, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x02, 0x7F, 0x7F, 0x00, 0x60, 0x04, 0x7F, 0x7F,\n        0x7F, 0x7F, 0x00, 0x02, 0x50, 0x03, 0x03, 0x65, 0x6E, 0x76, 0x16, 0x63, 0x61, 0x73, 0x70,\n        0x65, 0x72, 0x5F, 0x6C, 0x6F, 0x61, 0x64, 0x5F, 0x6E, 0x61, 0x6D, 0x65, 0x64, 0x5F, 0x6B,\n        0x65, 0x79, 0x73, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x17, 0x63, 0x61, 0x73, 0x70, 0x65,\n        0x72, 0x5F, 0x72, 0x65, 0x61, 0x64, 0x5F, 0x68, 0x6F, 0x73, 0x74, 0x5F, 0x62, 0x75, 0x66,\n        0x66, 0x65, 0x72, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76, 0x0D, 0x63, 0x61, 0x73, 0x70, 0x65,\n        0x72, 0x5F, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x00,\n    ];\n    let execution_phase = ExecutionPhase::Session;\n    let expected_error = \"I/O Error: UnexpectedEof\";\n    run_test_case(WASM_BYTES, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_payment_with_incorrect_wasm_gas_counter_overflow() {\n    let wasm_bytes = make_gas_counter_overflow();\n    let execution_phase = ExecutionPhase::Payment;\n    let expected_error = \"Encountered operation forbidden by gas rules\";\n    run_test_case(&wasm_bytes, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_session_with_incorrect_wasm_gas_counter_overflow() {\n    let wasm_bytes = make_gas_counter_overflow();\n    let execution_phase = ExecutionPhase::Session;\n    let expected_error = \"Encountered operation forbidden by gas rules\";\n    run_test_case(&wasm_bytes, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_payment_with_incorrect_wasm_no_memory_section() {\n    let wasm_bytes = make_module_without_memory_section();\n    let execution_phase = ExecutionPhase::Payment;\n    let expected_error = \"Memory section should exist\";\n    run_test_case(&wasm_bytes, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_session_with_incorrect_wasm_no_memory_section() {\n    let wasm_bytes = make_module_without_memory_section();\n    let execution_phase = ExecutionPhase::Session;\n    let expected_error = \"Memory section should exist\";\n    run_test_case(&wasm_bytes, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_payment_with_incorrect_wasm_start_section() {\n    let wasm_bytes = make_module_with_start_section();\n    let execution_phase = ExecutionPhase::Payment;\n    let expected_error = \"Unsupported Wasm start\";\n    run_test_case(&wasm_bytes, expected_error, execution_phase)\n}\n\n#[ignore]\n#[test]\nfn should_charge_session_with_incorrect_wasm_start_section() {\n    let wasm_bytes = make_module_with_start_section();\n    let execution_phase = ExecutionPhase::Session;\n    let expected_error = \"Unsupported Wasm start\";\n    run_test_case(&wasm_bytes, expected_error, execution_phase)\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gov_427.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_WASM_V1_CONFIG,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{addressable_entity::DEFAULT_ENTRY_POINT_NAME, RuntimeArgs};\nuse walrus::{ir::Value, FunctionBuilder, Module, ModuleConfig, ValType};\n\n/// Creates a wasm with a function that contains local section with types in `repeated_pattern`\n/// repeated `repeat_count` times with additional `extra_types` appended at the end of local group.\nfn make_arbitrary_local_count(\n    repeat_count: usize,\n    repeat_pattern: &[ValType],\n    extra_types: &[ValType],\n) -> Vec<u8> {\n    let mut module = Module::with_config(ModuleConfig::new());\n\n    let _memory_id = module.memories.add_local(false, 11, None);\n\n    let mut func_with_locals = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n    let mut locals = Vec::new();\n    for _ in 0..repeat_count {\n        for val_type in repeat_pattern {\n            let local = module.locals.add(*val_type);\n            locals.push((local, *val_type));\n        }\n    }\n\n    for extra_type in extra_types {\n        let local = module.locals.add(*extra_type);\n        locals.push((local, *extra_type));\n    }\n\n    for (i, (local, val_type)) in locals.into_iter().enumerate() {\n        let value = match val_type {\n            ValType::I32 => Value::I32(i.try_into().unwrap()),\n            ValType::I64 => Value::I64(i.try_into().unwrap()),\n            ValType::F32 => Value::F32(i as f32),\n            ValType::F64 => Value::F64(i as f64),\n            ValType::V128 => Value::V128(i.try_into().unwrap()),\n            ValType::Externref | ValType::Funcref => todo!(\"{:?}\", val_type),\n        };\n        func_with_locals.func_body().const_(value).local_set(local);\n    }\n\n    let func_with_locals = func_with_locals.finish(vec![], &mut module.funcs);\n\n    let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n\n    call_func.func_body().call(func_with_locals);\n\n    let call = call_func.finish(Vec::new(), &mut module.funcs);\n\n    module.exports.add(DEFAULT_ENTRY_POINT_NAME, call);\n\n    module.emit_wasm()\n}\n\n#[ignore]\n#[test]\nfn too_many_locals_should_exceed_stack_height() {\n    const CALL_COST: usize = 1;\n    let extra_types = [ValType::I32];\n    let repeat_pattern = [ValType::I64];\n    let max_stack_height = DEFAULT_WASM_V1_CONFIG.max_stack_height() as usize;\n\n    let success_wasm_bytes: Vec<u8> = make_arbitrary_local_count(\n        max_stack_height - extra_types.len() - CALL_COST - 1,\n        &repeat_pattern,\n        &extra_types,\n    );\n\n    let failing_wasm_bytes: Vec<u8> = make_arbitrary_local_count(\n        max_stack_height - extra_types.len() - CALL_COST,\n        &repeat_pattern,\n        &extra_types,\n    );\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let success_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        success_wasm_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(success_request).expect_success().commit();\n\n    let failing_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        failing_wasm_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(failing_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    // Here we pass the preprocess stage, but we fail at stack height limiter as we do have very\n    // restrictive default stack height.\n    assert!(\n        matches!(\n            &error,\n            Error::Exec(ExecError::Interpreter(s)) if s.contains(\"Unreachable\")\n        ),\n        \"{:?}\",\n        error\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gov_74.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    engine_state::Error,\n    execution::ExecError,\n    runtime::{PreprocessingError, WasmValidationError, DEFAULT_MAX_PARAMETER_COUNT},\n};\nuse casper_types::{EraId, ProtocolVersion, RuntimeArgs, WasmV1Config};\n\nuse crate::wasm_utils;\n\nconst ARITY_INTERPRETER_LIMIT: usize = DEFAULT_MAX_PARAMETER_COUNT as usize;\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\nconst I32_WAT_TYPE: &str = \"i64\";\nconst NEW_WASM_STACK_HEIGHT: u32 = 16;\nconst OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION;\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    OLD_PROTOCOL_VERSION.value().major,\n    OLD_PROTOCOL_VERSION.value().minor,\n    OLD_PROTOCOL_VERSION.value().patch + 1,\n);\n\nfn initialize_builder() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    builder\n}\n\n#[ignore]\n#[test]\nfn should_pass_max_parameter_count() {\n    let mut builder = initialize_builder();\n\n    // This runs out of the interpreter stack limit\n    let module_bytes = wasm_utils::make_n_arg_call_bytes(ARITY_INTERPRETER_LIMIT, I32_WAT_TYPE)\n        .expect(\"should make wasm bytes\");\n\n    let exec = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec).expect_success().commit();\n\n    let module_bytes = wasm_utils::make_n_arg_call_bytes(ARITY_INTERPRETER_LIMIT + 1, I32_WAT_TYPE)\n        .expect(\"should make wasm bytes\");\n\n    let exec = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec).expect_failure().commit();\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(\n            error,\n            Error::WasmPreprocessing(PreprocessingError::WasmValidation(\n                WasmValidationError::TooManyParameters {\n                    max: 256,\n                    actual: 257\n                }\n            ))\n        ),\n        \"{:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_observe_stack_height_limit() {\n    let mut builder = initialize_builder();\n\n    assert!(WasmV1Config::default().max_stack_height() > NEW_WASM_STACK_HEIGHT);\n\n    // This runs out of the interpreter stack limit\n    let exec_request_1 = {\n        let module_bytes =\n            wasm_utils::make_n_arg_call_bytes(NEW_WASM_STACK_HEIGHT as usize, I32_WAT_TYPE)\n                .expect(\"should make wasm bytes\");\n\n        ExecuteRequestBuilder::module_bytes(\n            *DEFAULT_ACCOUNT_ADDR,\n            module_bytes,\n            RuntimeArgs::default(),\n        )\n        .build()\n    };\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    {\n        let updated_chainspec = builder\n            .chainspec()\n            .clone()\n            .with_wasm_max_stack_height(NEW_WASM_STACK_HEIGHT);\n\n        builder.with_chainspec(updated_chainspec);\n\n        let mut upgrade_request = UpgradeRequestBuilder::new()\n            .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n            .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build();\n\n        builder.upgrade(&mut upgrade_request);\n    }\n\n    // This runs out of the interpreter stack limit.\n    // An amount of args equal to the new limit fails because there's overhead of `fn call` that\n    // adds 1 to the height.\n    let exec_request_2 = {\n        let module_bytes =\n            wasm_utils::make_n_arg_call_bytes(NEW_WASM_STACK_HEIGHT as usize, I32_WAT_TYPE)\n                .expect(\"should make wasm bytes\");\n\n        ExecuteRequestBuilder::module_bytes(\n            *DEFAULT_ACCOUNT_ADDR,\n            module_bytes,\n            RuntimeArgs::default(),\n        )\n        .build()\n    };\n\n    builder.exec(exec_request_2).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(&error, Error::Exec(ExecError::Interpreter(s)) if s.contains(\"Unreachable\")),\n        \"{:?}\",\n        error\n    );\n\n    // But new limit minus one runs fine\n    let exec_request_3 = {\n        let module_bytes =\n            wasm_utils::make_n_arg_call_bytes(NEW_WASM_STACK_HEIGHT as usize - 1, I32_WAT_TYPE)\n                .expect(\"should make wasm bytes\");\n\n        ExecuteRequestBuilder::module_bytes(\n            *DEFAULT_ACCOUNT_ADDR,\n            module_bytes,\n            RuntimeArgs::default(),\n        )\n        .build()\n    };\n\n    builder.exec(exec_request_3).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/gov_89_regression.rs",
    "content": "use std::{\n    collections::BTreeSet,\n    convert::TryInto,\n    time::{Duration, SystemTime, UNIX_EPOCH},\n};\n\nuse num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS,\n};\nuse casper_storage::data_access_layer::{SlashItem, StepResult};\nuse casper_types::{\n    execution::TransformKindV2,\n    system::auction::{\n        BidsExt, DelegationRate, SeigniorageRecipientsSnapshotV2,\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    },\n    CLValue, EntityAddr, EraId, GenesisAccount, GenesisValidator, Key, Motes, ProtocolVersion,\n    PublicKey, SecretKey, StoredValue, U512,\n};\n\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_1_BALANCE: u64 = 100_000_000;\nconst ACCOUNT_1_BOND: u64 = 100_000_000;\n\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_2_BALANCE: u64 = 200_000_000;\nconst ACCOUNT_2_BOND: u64 = 200_000_000;\n\nfn initialize_builder() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PUBLIC_KEY.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PUBLIC_KEY.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp\n    };\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n    builder.run_genesis(run_genesis_request);\n    builder\n}\n\n#[ignore]\n#[test]\nfn should_not_create_any_purse() {\n    let mut builder = initialize_builder();\n    let auction_hash = builder.get_auction_contract_hash();\n\n    let mut now = SystemTime::now();\n    let eras_end_timestamp_millis_1 = now.duration_since(UNIX_EPOCH).expect(\"Time went backwards\");\n\n    now += Duration::from_secs(60 * 60);\n    let eras_end_timestamp_millis_2 = now.duration_since(UNIX_EPOCH).expect(\"Time went backwards\");\n\n    assert!(eras_end_timestamp_millis_2 > eras_end_timestamp_millis_1);\n\n    let step_request_1 = StepRequestBuilder::new()\n        .with_parent_state_hash(builder.get_post_state_hash())\n        .with_protocol_version(ProtocolVersion::V1_0_0)\n        .with_slash_item(SlashItem::new(ACCOUNT_1_PUBLIC_KEY.clone()))\n        .with_next_era_id(EraId::from(1))\n        .with_era_end_timestamp_millis(eras_end_timestamp_millis_1.as_millis().try_into().unwrap())\n        .build();\n\n    let before_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value(\n        EntityAddr::System(auction_hash.value()),\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    );\n\n    let bids_before_slashing = builder.get_bids();\n    assert!(\n        bids_before_slashing.contains_validator_public_key(&ACCOUNT_1_PUBLIC_KEY),\n        \"should have entry in the genesis bids table {:?}\",\n        bids_before_slashing\n    );\n\n    let effects_1 = match builder.step(step_request_1) {\n        StepResult::Failure(_) => {\n            panic!(\"step_request_1: Failure\")\n        }\n        StepResult::RootNotFound => {\n            panic!(\"step_request_1: RootNotFound\")\n        }\n        StepResult::Success { effects, .. } => effects,\n    };\n\n    assert!(\n        builder\n            .query(\n                None,\n                Key::Unbond(ACCOUNT_1_PUBLIC_KEY.to_account_hash()),\n                &[],\n            )\n            .is_err(),\n        \"slash does not unbond\"\n    );\n\n    let bids_after_slashing = builder.get_bids();\n    assert!(\n        !bids_after_slashing.contains_validator_public_key(&ACCOUNT_1_PUBLIC_KEY),\n        \"should not have entry after slashing {:?}\",\n        bids_after_slashing\n    );\n\n    let bids_after_slashing = builder.get_bids();\n    let account_1_bid = bids_after_slashing.validator_bid(&ACCOUNT_1_PUBLIC_KEY);\n    assert!(account_1_bid.is_none());\n\n    let bids_after_slashing = builder.get_bids();\n    assert_ne!(\n        bids_before_slashing, bids_after_slashing,\n        \"bids table should be different before and after slashing\"\n    );\n\n    // seigniorage snapshot should have changed after auction\n    let after_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value(\n        EntityAddr::System(auction_hash.value()),\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    );\n    assert!(\n        !after_auction_seigniorage\n            .keys()\n            .all(|key| before_auction_seigniorage.contains_key(key)),\n        \"run auction should have changed seigniorage keys\"\n    );\n\n    let step_request_2 = StepRequestBuilder::new()\n        .with_parent_state_hash(builder.get_post_state_hash())\n        .with_protocol_version(ProtocolVersion::V1_0_0)\n        .with_slash_item(SlashItem::new(ACCOUNT_1_PUBLIC_KEY.clone()))\n        .with_next_era_id(EraId::from(2))\n        .with_era_end_timestamp_millis(eras_end_timestamp_millis_2.as_millis().try_into().unwrap())\n        .build();\n\n    let effects_2 = match builder.step(step_request_2) {\n        StepResult::RootNotFound | StepResult::Failure(_) => {\n            panic!(\"step_request_2: failed to step\")\n        }\n        StepResult::Success { effects, .. } => effects,\n    };\n\n    let cl_u512_zero = CLValue::from_t(U512::zero()).unwrap();\n\n    let balances_1: BTreeSet<Key> = effects_1\n        .transforms()\n        .iter()\n        .filter_map(|transform| match transform.kind() {\n            TransformKindV2::Write(StoredValue::CLValue(cl_value))\n                if transform.key().as_balance().is_some() && cl_value == &cl_u512_zero =>\n            {\n                Some(*transform.key())\n            }\n            _ => None,\n        })\n        .collect();\n\n    assert_eq!(balances_1.len(), 0, \"distribute should not create purses\");\n\n    let balances_2: BTreeSet<Key> = effects_2\n        .transforms()\n        .iter()\n        .filter_map(|transform| match transform.kind() {\n            TransformKindV2::Write(StoredValue::CLValue(cl_value))\n                if transform.key().as_balance().is_some() && cl_value == &cl_u512_zero =>\n            {\n                Some(*transform.key())\n            }\n            _ => None,\n        })\n        .collect();\n\n    assert_eq!(balances_2.len(), 0, \"distribute should not create purses\");\n\n    let common_keys: BTreeSet<_> = balances_1.intersection(&balances_2).collect();\n    assert_eq!(common_keys.len(), 0, \"there should be no commmon Key::Balance keys with Transfer::Write(0) in two distinct step requests\");\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/host_function_metrics_size_and_gas_cost.rs",
    "content": "use casper_engine_test_support::{\n    utils, DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state, execution::ExecError};\nuse casper_types::{\n    account::{AccountHash, ACCOUNT_HASH_LENGTH},\n    bytesrepr::Bytes,\n    runtime_args,\n    system::standard_payment,\n    ApiError, U512,\n};\n\nconst CONTRACT_HOST_FUNCTION_METRICS: &str = \"host_function_metrics.wasm\";\nconst CONTRACT_TRANSFER_TO_ACCOUNT_U512: &str = \"transfer_to_account_u512.wasm\";\n\n// This value is not systemic, as code is added the size of WASM will increase,\n// you can change this value to reflect the increase in WASM size.\nconst HOST_FUNCTION_METRICS_STANDARD_SIZE: usize = 160_000;\nconst HOST_FUNCTION_METRICS_STANDARD_GAS_COST: u64 = 475_000_000_000;\n\n/// Acceptable size regression/improvement in percentage.\nconst SIZE_MARGIN: usize = 5;\n/// Acceptable gas cost regression/improvement in percentage.\nconst GAS_COST_MARGIN: u64 = 5;\n\nconst HOST_FUNCTION_METRICS_MAX_SIZE: usize =\n    HOST_FUNCTION_METRICS_STANDARD_SIZE * (100 + SIZE_MARGIN) / 100;\nconst HOST_FUNCTION_METRICS_MAX_GAS_COST: u64 =\n    HOST_FUNCTION_METRICS_STANDARD_GAS_COST * (100 + GAS_COST_MARGIN) / 100;\n\nconst ACCOUNT0_ADDR: AccountHash = AccountHash::new([42; ACCOUNT_HASH_LENGTH]);\nconst ACCOUNT1_ADDR: AccountHash = AccountHash::new([43; ACCOUNT_HASH_LENGTH]);\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst ARG_SEED: &str = \"seed\";\nconst ARG_OTHERS: &str = \"others\";\nconst EXPECTED_REVERT_VALUE: u16 = 9;\nconst SEED_VALUE: u64 = 821_577_831_833_715_345;\nconst TRANSFER_FROM_MAIN_PURSE_AMOUNT: u64 = 2_000_000_u64;\n\n#[ignore]\n#[test]\nfn host_function_metrics_has_acceptable_size() {\n    let size = utils::read_wasm_file(CONTRACT_HOST_FUNCTION_METRICS).len();\n    assert!(\n        size <= HOST_FUNCTION_METRICS_MAX_SIZE,\n        \"Performance regression: contract host-function-metrics became {} bytes long; up to {} bytes long would be acceptable.\",\n        size,\n        HOST_FUNCTION_METRICS_MAX_SIZE\n    );\n    println!(\n        \"contract host-function-metrics byte size: {}, ubound: {}\",\n        size, HOST_FUNCTION_METRICS_MAX_SIZE\n    )\n}\n\nfn create_account_exec_request(address: AccountHash) -> ExecuteRequest {\n    ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT_U512,\n        runtime_args! {\n            ARG_TARGET => address,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n        },\n    )\n    .build()\n}\n\n#[ignore]\n#[test]\nfn host_function_metrics_has_acceptable_gas_cost() {\n    let mut builder = setup();\n\n    let seed: u64 = SEED_VALUE;\n    let random_bytes = {\n        let mut random_bytes = vec![0_u8; 10_000];\n        for (i, byte) in random_bytes.iter_mut().enumerate() {\n            *byte = i.checked_rem(256).unwrap().try_into().unwrap();\n        }\n        random_bytes\n    };\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT0_ADDR)\n        .with_deploy_hash([55; 32])\n        .with_session_code(\n            CONTRACT_HOST_FUNCTION_METRICS,\n            runtime_args! {\n                ARG_SEED => seed,\n                ARG_OTHERS => (Bytes::from(random_bytes), ACCOUNT0_ADDR, ACCOUNT1_ADDR),\n                ARG_AMOUNT => TRANSFER_FROM_MAIN_PURSE_AMOUNT,\n            },\n        )\n        .with_standard_payment(runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT })\n        .with_authorization_keys(&[ACCOUNT0_ADDR])\n        .build();\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request);\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::User(user_error)))\n            if user_error == EXPECTED_REVERT_VALUE\n        ),\n        \"Expected revert but actual error is {:?}\",\n        error\n    );\n\n    let gas_cost = builder.last_exec_gas_consumed().value();\n    assert!(\n        gas_cost <= U512::from(HOST_FUNCTION_METRICS_MAX_GAS_COST),\n        \"Performance regression: contract host-function-metrics used {} gas; it should use no more than {} gas.\",\n        gas_cost,\n        HOST_FUNCTION_METRICS_MAX_GAS_COST\n    );\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(create_account_exec_request(ACCOUNT0_ADDR))\n        .expect_success()\n        .commit()\n        .exec(create_account_exec_request(ACCOUNT1_ADDR))\n        .expect_success()\n        .commit();\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/mod.rs",
    "content": "mod ee_1045;\nmod ee_1071;\nmod ee_1103;\nmod ee_1119;\nmod ee_1120;\nmod ee_1129;\nmod ee_1152;\nmod ee_1160;\nmod ee_1163;\nmod ee_1174;\nmod ee_1217;\nmod ee_1225;\nmod ee_221;\nmod ee_401;\nmod ee_441;\nmod ee_460;\nmod ee_468;\nmod ee_470;\nmod ee_532;\nmod ee_536;\nmod ee_539;\nmod ee_549;\nmod ee_550;\nmod ee_572;\nmod ee_584;\nmod ee_597;\nmod ee_598;\nmod ee_599;\nmod ee_601;\nmod ee_771;\nmod ee_890;\nmod ee_966;\nmod gh_1470;\nmod gh_1688;\nmod gh_1902;\nmod gh_1931;\nmod gh_2280;\nmod gh_3097;\nmod gh_3208;\nmod gh_3710;\nmod gh_4898;\nmod gov_116;\nmod gov_42;\nmod gov_427;\nmod gov_74;\nmod gov_89_regression;\nmod host_function_metrics_size_and_gas_cost;\nmod regression_20210707;\nmod regression_20210831;\nmod regression_20210924;\nmod regression_20211110;\nmod regression_20220119;\nmod regression_20220204;\nmod regression_20220207;\nmod regression_20220208;\nmod regression_20220211;\nmod regression_20220217;\nmod regression_20220221;\nmod regression_20220222;\nmod regression_20220223;\nmod regression_20220224;\nmod regression_20220303;\nmod regression_20220727;\nmod regression_20240105;\nmod regression_20250812;\nmod slow_input;\npub(crate) mod test_utils;\nmod transforms_must_be_ordered;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20210707.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, EntityWithNamedKeys, ExecuteRequest, ExecuteRequestBuilder,\n    LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error as CoreError, execution::ExecError};\nuse casper_storage::{data_access_layer::TransferRequest, system::transfer::TransferError};\nuse casper_types::{\n    account::AccountHash, runtime_args, system::mint, AccessRights, AddressableEntityHash,\n    PublicKey, RuntimeArgs, SecretKey, URef, U512,\n};\n\nuse crate::wasm_utils;\n\nconst HARDCODED_UREF: URef = URef::new([42; 32], AccessRights::READ_ADD_WRITE);\nconst CONTRACT_HASH_NAME: &str = \"contract_hash\";\n\nconst METHOD_SEND_TO_ACCOUNT: &str = \"send_to_account\";\nconst METHOD_SEND_TO_PURSE: &str = \"send_to_purse\";\nconst METHOD_HARDCODED_PURSE_SRC: &str = \"hardcoded_purse_src\";\nconst METHOD_STORED_PAYMENT: &str = \"stored_payment\";\nconst METHOD_HARDCODED_PAYMENT: &str = \"hardcoded_payment\";\n\nconst ARG_SOURCE: &str = \"source\";\nconst ARG_RECIPIENT: &str = \"recipient\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\nconst REGRESSION_20210707: &str = \"regression_20210707.wasm\";\n\nstatic ALICE_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ALICE_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ALICE_KEY));\n\nstatic BOB_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic BOB_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*BOB_KEY));\n\nfn setup_regression_contract() -> ExecuteRequest {\n    ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        REGRESSION_20210707,\n        runtime_args! {\n            mint::ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n        },\n    )\n    .build()\n}\n\nfn transfer(sender: AccountHash, target: AccountHash, amount: u64) -> TransferRequest {\n    TransferRequestBuilder::new(amount, target)\n        .with_initiator(sender)\n        .build()\n}\n\nfn get_account_entity_hash(entity: &EntityWithNamedKeys) -> AddressableEntityHash {\n    entity\n        .named_keys()\n        .get(CONTRACT_HASH_NAME)\n        .cloned()\n        .expect(\"should have contract hash\")\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap()\n}\n\nfn assert_forged_uref_error(error: CoreError, forged_uref: URef) {\n    assert!(\n        matches!(error, CoreError::Exec(ExecError::ForgedReference(uref)) if uref == forged_uref),\n        \"Expected forged uref {:?} but received {:?}\",\n        forged_uref,\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_transfer_funds_from_contract_to_new_account() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    assert!(builder.get_entity_by_account_hash(*BOB_ADDR).is_none());\n\n    let call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        METHOD_SEND_TO_ACCOUNT,\n        runtime_args! {\n            ARG_RECIPIENT => *BOB_ADDR,\n            ARG_AMOUNT => U512::from(700_000_000_000u64),\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit().expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_transfer_funds_from_contract_to_existing_account() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    let fund_request_2 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n    builder.transfer_and_commit(fund_request_2).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        METHOD_SEND_TO_ACCOUNT,\n        runtime_args! {\n            ARG_RECIPIENT => *BOB_ADDR,\n            ARG_AMOUNT => U512::from(700_000_000_000u64),\n        },\n    )\n    .build();\n\n    builder.exec(call_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_funds_from_forged_purse_to_account_native_transfer() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request).expect_success();\n\n    let take_from = builder.get_expected_addressable_entity_by_account_hash(*ALICE_ADDR);\n    let alice_main_purse = take_from.main_purse();\n\n    let transfer_request = TransferRequestBuilder::new(700_000_000_000_u64, *BOB_ADDR)\n        .with_source(alice_main_purse)\n        .build();\n\n    builder.transfer_and_commit(transfer_request);\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(error, CoreError::Transfer(TransferError::ForgedReference(uref)) if uref == alice_main_purse),\n        \"Expected forged uref {:?} but received {:?}\",\n        alice_main_purse,\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_funds_from_forged_purse_to_owned_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    let fund_request_2 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n    builder.transfer_and_commit(fund_request_2).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let bob = builder\n        .get_entity_with_named_keys_by_account_hash(*BOB_ADDR)\n        .unwrap();\n    let bob_main_purse = bob.main_purse();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        METHOD_SEND_TO_PURSE,\n        runtime_args! {\n            ARG_TARGET => bob_main_purse,\n            ARG_AMOUNT => U512::from(700_000_000_000u64),\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert_forged_uref_error(error, bob_main_purse);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_funds_into_bob_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let bob = builder.get_expected_addressable_entity_by_account_hash(*BOB_ADDR);\n    let bob_main_purse = bob.main_purse();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        METHOD_SEND_TO_PURSE,\n        runtime_args! {\n            ARG_TARGET => bob_main_purse,\n            ARG_AMOUNT => U512::from(700_000_000_000u64),\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert_forged_uref_error(error, bob_main_purse);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_from_hardcoded_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let call_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        METHOD_HARDCODED_PURSE_SRC,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(700_000_000_000u64),\n        },\n    )\n    .build();\n\n    builder.exec(call_request).commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert_forged_uref_error(error, HARDCODED_UREF);\n}\n\n#[ignore]\n#[allow(unused)]\n//#[test]\nfn should_not_refund_to_bob_and_charge_alice() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    let fund_request_2 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n    builder.transfer_and_commit(fund_request_2).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let bob = builder.get_expected_addressable_entity_by_account_hash(*BOB_ADDR);\n    let bob_main_purse = bob.main_purse();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let args = runtime_args! {\n        ARG_SOURCE => bob_main_purse,\n        ARG_AMOUNT => *DEFAULT_PAYMENT,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n        .with_stored_payment_hash(contract_hash, METHOD_STORED_PAYMENT, args)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([77; 32])\n        .build();\n\n    let call_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(call_request).commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert_forged_uref_error(error, bob_main_purse);\n}\n\n#[ignore]\n#[test]\nfn should_not_charge_alice_for_execution() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    let fund_request_2 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n    builder.transfer_and_commit(fund_request_2).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let bob = builder.get_expected_addressable_entity_by_account_hash(*BOB_ADDR);\n    let bob_main_purse = bob.main_purse();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let args = runtime_args! {\n        ARG_SOURCE => bob_main_purse,\n        ARG_AMOUNT => *DEFAULT_PAYMENT,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        // Just do nothing if ever we'd get into session execution\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n        .with_stored_payment_hash(contract_hash, METHOD_STORED_PAYMENT, args)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([77; 32])\n        .build();\n\n    let call_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(call_request).commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert_forged_uref_error(error, bob_main_purse);\n}\n\n#[ignore]\n#[test]\nfn should_not_charge_for_execution_from_hardcoded_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let store_request = setup_regression_contract();\n\n    let fund_request_1 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *ALICE_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    let fund_request_2 = transfer(\n        *DEFAULT_ACCOUNT_ADDR,\n        *BOB_ADDR,\n        MINIMUM_ACCOUNT_CREATION_BALANCE,\n    );\n\n    builder.exec(store_request).commit().expect_success();\n    builder.transfer_and_commit(fund_request_1).expect_success();\n    builder.transfer_and_commit(fund_request_2).expect_success();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap();\n\n    let contract_hash = get_account_entity_hash(&account);\n\n    let args = runtime_args! {\n        ARG_AMOUNT => *DEFAULT_PAYMENT,\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        // Just do nothing if ever we'd get into session execution\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n        .with_stored_payment_hash(contract_hash, METHOD_HARDCODED_PAYMENT, args)\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([77; 32])\n        .build();\n\n    let call_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(call_request).commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert_forged_uref_error(error, HARDCODED_UREF);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20210831.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_PUBLIC_KEY, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{\n    engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error as CoreError},\n    execution::ExecError,\n};\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::auction::{self, BidsExt, DelegationRate},\n    ApiError, PublicKey, RuntimeArgs, SecretKey, U512,\n};\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([57; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PUBLIC_KEY));\n\nstatic ACCOUNT_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([75; 32]).unwrap());\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY));\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_2_PUBLIC_KEY));\n\nconst CONTRACT_REGRESSION_20210831: &str = \"regression_20210831.wasm\";\n\nconst METHOD_ADD_BID_PROXY_CALL: &str = \"add_bid_proxy_call\";\nconst METHOD_WITHDRAW_PROXY_CALL: &str = \"withdraw_proxy_call\";\nconst METHOD_DELEGATE_PROXY_CALL: &str = \"delegate_proxy_call\";\nconst METHOD_UNDELEGATE_PROXY_CALL: &str = \"undelegate_proxy_call\";\nconst METHOD_ACTIVATE_BID_CALL: &str = \"activate_bid_proxy_call\";\n\nconst CONTRACT_HASH_NAME: &str = \"contract_hash\";\n\nconst BID_DELEGATION_RATE: DelegationRate = 42;\nstatic BID_AMOUNT: Lazy<U512> = Lazy::new(|| U512::from(1_000_000));\nstatic DELEGATE_AMOUNT: Lazy<U512> = Lazy::new(|| U512::from(500_000));\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request_1 =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR).build();\n\n    builder\n        .transfer_and_commit(transfer_request_1)\n        .expect_success();\n\n    let transfer_request_2 =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_2_ADDR).build();\n\n    builder\n        .transfer_and_commit(transfer_request_2)\n        .expect_success();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request_1 =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR).build();\n\n    builder\n        .transfer_and_commit(transfer_request_1)\n        .expect_success();\n\n    let transfer_request_2 =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_2_ADDR).build();\n\n    builder\n        .transfer_and_commit(transfer_request_2)\n        .expect_success();\n\n    let install_request_1 = ExecuteRequestBuilder::standard(\n        *ACCOUNT_2_ADDR,\n        CONTRACT_REGRESSION_20210831,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_request_1).expect_success().commit();\n\n    builder\n}\n\n#[ignore]\n#[test]\nfn regression_20210831_should_fail_to_add_bid() {\n    let mut builder = setup();\n\n    let sender = *ACCOUNT_2_ADDR;\n    let add_bid_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *BID_AMOUNT,\n        auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n    };\n\n    let add_bid_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        add_bid_args.clone(),\n    )\n    .build();\n\n    builder.exec(add_bid_request_1);\n\n    let error_1 = builder\n        .get_error()\n        .expect(\"attempt 1 should raise invalid context\");\n    assert!(\n        matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_1\n    );\n\n    // ACCOUNT_2 unbonds ACCOUNT_1 through a proxy\n    let add_bid_request_2 = ExecuteRequestBuilder::contract_call_by_name(\n        sender,\n        CONTRACT_HASH_NAME,\n        METHOD_ADD_BID_PROXY_CALL,\n        add_bid_args,\n    )\n    .build();\n\n    builder.exec(add_bid_request_2).commit();\n\n    let error_2 = builder\n        .get_error()\n        .expect(\"attempt 2 should raise invalid context\");\n    assert!(\n        matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_2\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20210831_should_fail_to_delegate() {\n    let mut builder = setup();\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => *BID_AMOUNT,\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let sender = *ACCOUNT_2_ADDR;\n    let delegate_args = runtime_args! {\n        auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *DELEGATE_AMOUNT,\n    };\n\n    let delegate_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_DELEGATE,\n        delegate_args.clone(),\n    )\n    .build();\n\n    builder.exec(delegate_request_1);\n\n    let error_1 = builder\n        .get_error()\n        .expect(\"attempt 1 should raise invalid context\");\n    assert!(\n        matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_1\n    );\n\n    // ACCOUNT_2 unbonds ACCOUNT_1 through a proxy\n    let delegate_request_2 = ExecuteRequestBuilder::contract_call_by_name(\n        sender,\n        CONTRACT_HASH_NAME,\n        METHOD_DELEGATE_PROXY_CALL,\n        delegate_args,\n    )\n    .build();\n\n    builder.exec(delegate_request_2).commit();\n\n    let error_2 = builder\n        .get_error()\n        .expect(\"attempt 2 should raise invalid context\");\n    assert!(\n        matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_2\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20210831_should_fail_to_withdraw_bid() {\n    let mut builder = setup();\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *ACCOUNT_1_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => *BID_AMOUNT,\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let account_1_bid_before = bids\n        .validator_bid(&ACCOUNT_1_PUBLIC_KEY)\n        .expect(\"validator bid should exist\");\n    assert_eq!(\n        builder.get_purse_balance(*account_1_bid_before.bonding_purse()),\n        *BID_AMOUNT,\n    );\n    assert!(\n        !account_1_bid_before.inactive(),\n        \"newly added bid should be active\"\n    );\n\n    let sender = *ACCOUNT_2_ADDR;\n    let withdraw_bid_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *BID_AMOUNT,\n    };\n\n    // ACCOUNT_2 unbonds ACCOUNT_1 by a direct auction contract call\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_WITHDRAW_BID,\n        withdraw_bid_args.clone(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).commit();\n\n    let error_1 = builder\n        .get_error()\n        .expect(\"attempt 1 should raise invalid context\");\n    assert!(\n        matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_1\n    );\n\n    // ACCOUNT_2 unbonds ACCOUNT_1 through a proxy\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_name(\n        sender,\n        CONTRACT_HASH_NAME,\n        METHOD_WITHDRAW_PROXY_CALL,\n        withdraw_bid_args,\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit();\n\n    let error_2 = builder\n        .get_error()\n        .expect(\"attempt 2 should raise invalid context\");\n    assert!(\n        matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_2\n    );\n\n    let bids = builder.get_bids();\n    let account_1_bid_after = bids\n        .validator_bid(&ACCOUNT_1_PUBLIC_KEY)\n        .expect(\"after bid should exist\");\n\n    assert_eq!(\n        account_1_bid_after, account_1_bid_before,\n        \"bids before and after malicious attempt should be equal\"\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20210831_should_fail_to_undelegate_bid() {\n    let mut builder = setup();\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => *BID_AMOUNT,\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    let delegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *ACCOUNT_1_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_DELEGATE,\n        runtime_args! {\n            auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n    builder.exec(delegate_request).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let default_account_bid_before = bids\n        .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY)\n        .expect(\"should have bid\");\n    assert_eq!(\n        builder.get_purse_balance(*default_account_bid_before.bonding_purse()),\n        *BID_AMOUNT,\n    );\n    assert!(\n        !default_account_bid_before.inactive(),\n        \"newly added bid should be active\"\n    );\n\n    let sender = *ACCOUNT_2_ADDR;\n    let undelegate_args = runtime_args! {\n        auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(),\n        auction::ARG_AMOUNT => *BID_AMOUNT,\n    };\n\n    // ACCOUNT_2 undelegates ACCOUNT_1 by a direct auction contract call\n    let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_UNDELEGATE,\n        undelegate_args.clone(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).commit();\n\n    let error_1 = builder\n        .get_error()\n        .expect(\"attempt 1 should raise invalid context\");\n    assert!(\n        matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_1\n    );\n\n    // ACCOUNT_2 undelegates ACCOUNT_1 through a proxy\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_name(\n        sender,\n        CONTRACT_HASH_NAME,\n        METHOD_UNDELEGATE_PROXY_CALL,\n        undelegate_args,\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit();\n\n    let error_2 = builder\n        .get_error()\n        .expect(\"attempt 2 should raise invalid context\");\n    assert!(\n        matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_2\n    );\n\n    let bids = builder.get_bids();\n    let default_account_bid_after = bids\n        .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY)\n        .expect(\"should have bid\");\n\n    assert_eq!(\n        default_account_bid_after, default_account_bid_before,\n        \"bids before and after malicious attempt should be equal\"\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20210831_should_fail_to_activate_bid() {\n    let mut builder = setup();\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => *BID_AMOUNT,\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let bid = bids\n        .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY)\n        .expect(\"should have bid\");\n    assert!(!bid.inactive());\n\n    let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_WITHDRAW_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => *BID_AMOUNT,\n        },\n    )\n    .build();\n\n    builder.exec(withdraw_bid_request).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let bid = bids.validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY);\n    assert!(bid.is_none());\n\n    let sender = *ACCOUNT_2_ADDR;\n    let activate_bid_args = runtime_args! {\n        auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n    };\n\n    let activate_bid_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ACTIVATE_BID,\n        activate_bid_args.clone(),\n    )\n    .build();\n\n    builder.exec(activate_bid_request_1);\n\n    let error_1 = builder\n        .get_error()\n        .expect(\"attempt 1 should raise invalid context\");\n    assert!(\n        matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_1\n    );\n\n    let activate_bid_request_2 = ExecuteRequestBuilder::contract_call_by_name(\n        sender,\n        CONTRACT_HASH_NAME,\n        METHOD_ACTIVATE_BID_CALL,\n        activate_bid_args,\n    )\n    .build();\n\n    builder.exec(activate_bid_request_2).commit();\n\n    let error_2 = builder\n        .get_error()\n        .expect(\"attempt 2 should raise invalid context\");\n    assert!(\n        matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8),\n        \"{:?}\",\n        error_2\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20210924.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, Gas, RuntimeArgs, DEFAULT_NOP_COST, U512};\n\nuse crate::wasm_utils;\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn should_charge_minimum_for_do_nothing_session() {\n    let minimum_deploy_payment = U512::from(0);\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let session_args = RuntimeArgs::default();\n    let deploy_hash = [42; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), session_args)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => minimum_deploy_payment,\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let do_nothing_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(do_nothing_request).commit();\n\n    let gas = builder.last_exec_gas_consumed();\n    assert_eq!(gas, Gas::zero());\n}\n\n#[ignore]\n#[test]\nfn should_execute_do_minimum_session() {\n    let minimum_deploy_payment = U512::from(DEFAULT_NOP_COST);\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let session_args = RuntimeArgs::default();\n    let deploy_hash = [42; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args)\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => minimum_deploy_payment,\n        })\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let do_minimum_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(do_minimum_request).expect_success().commit();\n\n    let gas = builder.last_exec_gas_consumed();\n    assert_eq!(gas, Gas::from(DEFAULT_NOP_COST));\n}\n\n#[ignore]\n#[test]\nfn should_charge_minimum_for_do_nothing_payment() {\n    let minimum_deploy_payment = U512::from(0);\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let session_args = RuntimeArgs::default();\n    let deploy_hash = [42; 32];\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), session_args)\n        .with_payment_bytes(\n            wasm_utils::do_nothing_bytes(),\n            runtime_args! {\n                ARG_AMOUNT => minimum_deploy_payment,\n            },\n        )\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let do_nothing_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(do_nothing_request).commit();\n\n    let gas = builder.last_exec_gas_consumed();\n    assert_eq!(gas, Gas::zero());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20211110.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error as CoreError, execution::ExecError};\nuse casper_types::{\n    account::AccountHash, runtime_args, system::standard_payment, AddressableEntityHash, Key, U512,\n};\n\nconst RECURSE_ENTRYPOINT: &str = \"recurse\";\nconst ARG_TARGET: &str = \"target\";\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\nconst REGRESSION_20211110_CONTRACT: &str = \"regression_20211110.wasm\";\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([111; 32]);\nconst INSTALL_COST: u64 = 40_000_000_000;\nconst STARTING_BALANCE: u64 = 100_000_000_000;\n\n#[ignore]\n#[test]\nfn regression_20211110() {\n    let mut funds: u64 = STARTING_BALANCE;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request = TransferRequestBuilder::new(funds, ACCOUNT_1_ADDR).build();\n\n    let session_args = runtime_args! {};\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => U512::from(INSTALL_COST)\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_standard_payment(payment_args)\n        .with_session_code(REGRESSION_20211110_CONTRACT, session_args)\n        .with_authorization_keys(&[ACCOUNT_1_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let install_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n    builder.exec(install_request).expect_success().commit();\n\n    funds = funds.checked_sub(INSTALL_COST).unwrap();\n\n    let contract_hash = match builder\n        .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR)\n        .unwrap()\n        .named_keys()\n        .get(CONTRACT_HASH_NAME)\n        .unwrap()\n    {\n        Key::AddressableEntity(entity_addr) => AddressableEntityHash::new(entity_addr.value()),\n        _ => panic!(\"Couldn't find regression contract.\"),\n    };\n\n    let payment_args = runtime_args! {\n        standard_payment::ARG_AMOUNT => U512::from(funds),\n    };\n    let session_args = runtime_args! {\n        ARG_TARGET => contract_hash\n    };\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_standard_payment(payment_args)\n        .with_stored_session_hash(contract_hash, RECURSE_ENTRYPOINT, session_args)\n        .with_authorization_keys(&[ACCOUNT_1_ADDR])\n        .with_deploy_hash([43; 32])\n        .build();\n    let recurse_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(recurse_request).expect_failure();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(matches!(\n        error,\n        CoreError::Exec(ExecError::RuntimeStackOverflow)\n    ));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220119.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::RuntimeArgs;\n\nconst REGRESSION_20220119_CONTRACT: &str = \"regression_20220119.wasm\";\n\n#[ignore]\n#[test]\nfn should_create_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        REGRESSION_20220119_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220204.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state, execution::ExecError};\nuse casper_types::{runtime_args, AccessRights, RuntimeArgs};\n\nconst REGRESSION_20220204_CONTRACT: &str = \"regression_20220204.wasm\";\nconst REGRESSION_20220204_CALL_CONTRACT: &str = \"regression_20220204_call.wasm\";\nconst REGRESSION_20220204_NONTRIVIAL_CONTRACT: &str = \"regression_20220204_nontrivial.wasm\";\n\nconst NONTRIVIAL_ARG_AS_CONTRACT: &str = \"nontrivial_arg_as_contract\";\nconst ARG_ENTRYPOINT: &str = \"entrypoint\";\nconst ARG_PURSE: &str = \"purse\";\nconst ARG_NEW_ACCESS_RIGHTS: &str = \"new_access_rights\";\nconst TRANSFER_AS_CONTRACT: &str = \"transfer_as_contract\";\n\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\n\n#[ignore]\n#[test]\nfn regression_20220204_as_contract() {\n    let contract = REGRESSION_20220204_CALL_CONTRACT;\n    let entrypoint = TRANSFER_AS_CONTRACT;\n    let new_access_rights = AccessRights::READ_ADD_WRITE;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_ENTRYPOINT => entrypoint,\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).commit();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse().with_access_rights(expected);\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220204_as_contract_attenuated() {\n    let contract = REGRESSION_20220204_CALL_CONTRACT;\n    let entrypoint = TRANSFER_AS_CONTRACT;\n    let new_access_rights = AccessRights::READ;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_ENTRYPOINT => entrypoint,\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).commit();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse().with_access_rights(expected);\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n    let contract = REGRESSION_20220204_CALL_CONTRACT;\n    let entrypoint = TRANSFER_AS_CONTRACT;\n    let new_access_rights = AccessRights::WRITE;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_ENTRYPOINT => entrypoint,\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).commit();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse().with_access_rights(expected);\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220204_as_contract_by_hash() {\n    let entrypoint = TRANSFER_AS_CONTRACT;\n    let new_access_rights = AccessRights::READ_ADD_WRITE;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse();\n    let exec_request = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_NAME,\n        entrypoint,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_PURSE => main_purse,\n        },\n    )\n    .build();\n    builder.exec(exec_request).commit();\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse.with_access_rights(expected)\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220204_nontrivial_arg_as_contract() {\n    let contract = REGRESSION_20220204_NONTRIVIAL_CONTRACT;\n    let entrypoint = NONTRIVIAL_ARG_AS_CONTRACT;\n    let new_access_rights = AccessRights::READ_ADD_WRITE;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_ENTRYPOINT => entrypoint,\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).commit();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse().with_access_rights(expected);\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220204_as_contract_by_hash_attenuated() {\n    let entrypoint = TRANSFER_AS_CONTRACT;\n    let new_access_rights = AccessRights::READ;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse();\n    let exec_request = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_NAME,\n        entrypoint,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_PURSE => main_purse,\n        },\n    )\n    .build();\n    builder.exec(exec_request).commit();\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse.with_access_rights(expected)\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n    let entrypoint = TRANSFER_AS_CONTRACT;\n    let new_access_rights = AccessRights::WRITE;\n    let expected = AccessRights::READ_ADD_WRITE;\n    let mut builder = setup();\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let main_purse = account.main_purse();\n    let exec_request = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_HASH_NAME,\n        entrypoint,\n        runtime_args! {\n            ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(),\n            ARG_PURSE => main_purse,\n        },\n    )\n    .build();\n    builder.exec(exec_request).commit();\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == main_purse.with_access_rights(expected)\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        REGRESSION_20220204_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(install_request).expect_success().commit();\n\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220207.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{account::AccountHash, runtime_args, system::mint, ApiError, U512};\n\nconst REGRESSION_20220207_CONTRACT: &str = \"regression_20220207.wasm\";\nconst ARG_AMOUNT_TO_SEND: &str = \"amount_to_send\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([111; 32]);\n\nconst UNAPPROVED_SPENDING_AMOUNT_ERR: Error = Error::Exec(ExecError::Revert(ApiError::Mint(\n    mint::Error::UnapprovedSpendingAmount as u8,\n)));\n\n#[ignore]\n#[test]\nfn should_not_transfer_above_approved_limit() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let args = runtime_args! {\n        mint::ARG_AMOUNT => U512::from(1000u64), // What we approved.\n        ARG_AMOUNT_TO_SEND => U512::from(1100u64), // What contract is trying to send.\n        mint::ARG_TARGET => ACCOUNT_1_ADDR,\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220207_CONTRACT, args)\n            .build();\n\n    builder\n        .exec(exec_request)\n        .assert_error(UNAPPROVED_SPENDING_AMOUNT_ERR);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_within_approved_limit() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let args = runtime_args! {\n        mint::ARG_AMOUNT => U512::from(1000u64),\n        ARG_AMOUNT_TO_SEND => U512::from(100u64),\n        mint::ARG_TARGET => ACCOUNT_1_ADDR,\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220207_CONTRACT, args)\n            .build();\n\n    builder.exec(exec_request).expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_fail_without_amount_arg() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let args = runtime_args! {\n        // If `amount` arg is absent, host assumes that limit is 0.\n        // This should fail then.\n        ARG_AMOUNT_TO_SEND => U512::from(100u64),\n        mint::ARG_TARGET => ACCOUNT_1_ADDR,\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220207_CONTRACT, args)\n            .build();\n\n    builder\n        .exec(exec_request)\n        .assert_error(UNAPPROVED_SPENDING_AMOUNT_ERR);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220208.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{account::AccountHash, runtime_args, system::mint, ApiError, U512};\n\nconst REGRESSION_20220208_CONTRACT: &str = \"regression_20220208.wasm\";\nconst ARG_AMOUNT_PART_1: &str = \"amount_part_1\";\nconst ARG_AMOUNT_PART_2: &str = \"amount_part_2\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([111; 32]);\n\nconst UNAPPROVED_SPENDING_AMOUNT_ERR: Error = Error::Exec(ExecError::Revert(ApiError::Mint(\n    mint::Error::UnapprovedSpendingAmount as u8,\n)));\n\n#[ignore]\n#[test]\nfn should_transfer_within_approved_limit_multiple_transfers() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let part_1 = U512::from(100u64);\n    let part_2 = U512::from(100u64);\n    let transfers_limit = part_1 + part_2;\n\n    let args = runtime_args! {\n        ARG_AMOUNT_PART_1 => part_1,\n        ARG_AMOUNT_PART_2 => part_2,\n        mint::ARG_AMOUNT => transfers_limit,\n        mint::ARG_TARGET => ACCOUNT_1_ADDR,\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220208_CONTRACT, args)\n            .build();\n\n    builder.exec(exec_request).expect_success();\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_above_approved_limit_multiple_transfers() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let part_1 = U512::from(100u64);\n    let part_2 = U512::from(100u64);\n    let transfers_limit = part_1 + part_2 - U512::one();\n\n    let args = runtime_args! {\n        ARG_AMOUNT_PART_1 => part_1,\n        ARG_AMOUNT_PART_2 => part_2,\n        mint::ARG_AMOUNT => transfers_limit,\n        mint::ARG_TARGET => ACCOUNT_1_ADDR,\n    };\n\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220208_CONTRACT, args)\n            .build();\n\n    builder\n        .exec(exec_request)\n        .assert_error(UNAPPROVED_SPENDING_AMOUNT_ERR);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220211.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state, execution::ExecError};\nuse casper_types::{runtime_args, AccessRights, RuntimeArgs, URef};\n\nconst REGRESSION_20220211_CONTRACT: &str = \"regression_20220211.wasm\";\nconst REGRESSION_20220211_CALL_CONTRACT: &str = \"regression_20220211_call.wasm\";\nconst RET_AS_CONTRACT: &str = \"ret_as_contract\";\nconst RET_AS_SESSION: &str = \"ret_as_contract\";\nconst PUT_KEY_AS_SESSION: &str = \"put_key_as_session\";\nconst PUT_KEY_AS_CONTRACT: &str = \"put_key_as_contract\";\nconst READ_AS_SESSION: &str = \"read_as_session\";\nconst READ_AS_CONTRACT: &str = \"read_as_contract\";\nconst WRITE_AS_SESSION: &str = \"write_as_session\";\nconst WRITE_AS_CONTRACT: &str = \"write_as_contract\";\nconst ADD_AS_SESSION: &str = \"add_as_session\";\nconst ADD_AS_CONTRACT: &str = \"add_as_contract\";\nconst ARG_ENTRYPOINT: &str = \"entrypoint\";\n\n#[ignore]\n#[test]\nfn regression_20220211_ret_as_contract() {\n    test(RET_AS_CONTRACT);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_ret_as_session() {\n    test(RET_AS_SESSION);\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        REGRESSION_20220211_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(install_request).expect_success().commit();\n\n    builder\n}\n\nfn test(entrypoint: &str) {\n    let mut builder = setup();\n\n    let expected_forged_uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        REGRESSION_20220211_CALL_CONTRACT,\n        runtime_args! {\n            ARG_ENTRYPOINT => entrypoint,\n        },\n    )\n    .build();\n    builder.exec(exec_request).commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == expected_forged_uref\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_put_key_as_session() {\n    test(PUT_KEY_AS_SESSION);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_put_key_as_contract() {\n    test(PUT_KEY_AS_CONTRACT);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_read_as_session() {\n    test(READ_AS_SESSION);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_read_as_contract() {\n    test(READ_AS_CONTRACT);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_write_as_session() {\n    test(WRITE_AS_SESSION);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_write_as_contract() {\n    test(WRITE_AS_CONTRACT);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_add_as_session() {\n    test(ADD_AS_SESSION);\n}\n\n#[ignore]\n#[test]\nfn regression_20220211_add_as_contract() {\n    test(ADD_AS_CONTRACT);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220217.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state, execution::ExecError};\nuse casper_types::{account::AccountHash, runtime_args, system::mint, AccessRights, URef, U512};\n\nconst TRANSFER_TO_NAMED_PURSE_CONTRACT: &str = \"transfer_to_named_purse.wasm\";\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst DEFAULT_PURSE_BALANCE: u64 = 1_000_000_000;\nconst PURSE_1: &str = \"purse_1\";\nconst PURSE_2: &str = \"purse_2\";\nconst ACCOUNT_1_PURSE: &str = \"purse_3\";\n\n#[ignore]\n#[test]\nfn regression_20220217_transfer_mint_by_hash_from_main_purse() {\n    let mut builder = setup();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let default_purse = default_account.main_purse();\n    let account_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let account_1_purse = account_1.main_purse();\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(ACCOUNT_1_ADDR),\n            mint::ARG_SOURCE => default_purse,\n            mint::ARG_TARGET => account_1_purse,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => Some(1u64),\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE),\n        ),\n        \"Expected {:?} revert but received {:?}\",\n        default_purse,\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220217_transfer_mint_by_package_hash_from_main_purse() {\n    let mut builder = setup();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let default_purse = default_account.main_purse();\n    let account_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let account_1_purse = account_1.main_purse();\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let mint = builder\n        .get_addressable_entity(mint_hash)\n        .expect(\"should have mint contract\");\n    let mint_package_hash = mint.package_hash();\n\n    let exec_request = ExecuteRequestBuilder::versioned_contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        mint_package_hash,\n        None,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(ACCOUNT_1_ADDR),\n            mint::ARG_SOURCE => default_purse,\n            mint::ARG_TARGET => account_1_purse,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => Some(1u64),\n        },\n    )\n    .build();\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE),\n        ),\n        \"Expected {:?} revert but received {:?}\",\n        default_purse,\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220217_mint_by_hash_transfer_from_other_purse() {\n    let mut builder = setup();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let purse_1 = account\n        .named_keys()\n        .get(PURSE_1)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse 1\");\n    let purse_2 = account\n        .named_keys()\n        .get(PURSE_2)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse 2\");\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(ACCOUNT_1_ADDR),\n            mint::ARG_SOURCE => purse_1,\n            mint::ARG_TARGET => purse_2,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => Some(1u64),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn regression_20220217_mint_by_hash_transfer_from_someones_purse() {\n    let mut builder = setup();\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let account_1_purse = account_1\n        .named_keys()\n        .get(ACCOUNT_1_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have account main purse\");\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let purse_1 = default_account\n        .named_keys()\n        .get(PURSE_1)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse 1\");\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_SOURCE => purse_1,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_TARGET => account_1_purse,\n            mint::ARG_TO => Some(ACCOUNT_1_ADDR),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).commit();\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == purse_1\n        ),\n        \"Expected forged uref but received {:?}\",\n        error,\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220217_should_not_transfer_funds_on_unrelated_purses() {\n    let mut builder = setup();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let purse_1 = account\n        .named_keys()\n        .get(PURSE_1)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse 1\");\n    let purse_2 = account\n        .named_keys()\n        .get(PURSE_2)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse 2\");\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(*DEFAULT_ACCOUNT_ADDR),\n            mint::ARG_SOURCE => purse_1,\n            mint::ARG_TARGET => purse_2,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => Some(1u64),\n        },\n    )\n    .build();\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == purse_1\n        ),\n        \"Expected forged uref but received {:?}\",\n        error,\n    );\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let fund_account_1_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build();\n    let fund_purse_1_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        runtime_args! {\n            ARG_PURSE_NAME => PURSE_1,\n            ARG_AMOUNT => U512::from(DEFAULT_PURSE_BALANCE),\n        },\n    )\n    .build();\n    let fund_purse_2_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        runtime_args! {\n            ARG_PURSE_NAME => PURSE_2,\n            ARG_AMOUNT => U512::from(DEFAULT_PURSE_BALANCE),\n        },\n    )\n    .build();\n    let fund_purse_3_request = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        TRANSFER_TO_NAMED_PURSE_CONTRACT,\n        runtime_args! {\n            ARG_PURSE_NAME => ACCOUNT_1_PURSE,\n            ARG_AMOUNT => U512::from(DEFAULT_PURSE_BALANCE),\n        },\n    )\n    .build();\n\n    builder\n        .transfer_and_commit(fund_account_1_request)\n        .expect_success();\n    builder.exec(fund_purse_1_request).expect_success().commit();\n    builder.exec(fund_purse_2_request).expect_success().commit();\n    builder.exec(fund_purse_3_request).expect_success().commit();\n    builder\n}\n\n#[ignore]\n#[test]\nfn regression_20220217_auction_add_bid_directly() {\n    let mut builder = setup();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let default_purse = default_account.main_purse();\n    let account_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let account_1_purse = account_1.main_purse();\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(ACCOUNT_1_ADDR),\n            mint::ARG_SOURCE => default_purse,\n            mint::ARG_TARGET => account_1_purse,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => Some(1u64),\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE),\n        ),\n        \"Expected {:?} revert but received {:?}\",\n        default_purse,\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn regression_20220217_() {\n    let mut builder = setup();\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let default_purse = default_account.main_purse();\n    let account_1 = builder\n        .get_entity_by_account_hash(ACCOUNT_1_ADDR)\n        .expect(\"should have account\");\n    let account_1_purse = account_1.main_purse();\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        ACCOUNT_1_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(ACCOUNT_1_ADDR),\n            mint::ARG_SOURCE => default_purse,\n            mint::ARG_TARGET => account_1_purse,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => Some(1u64),\n        },\n    )\n    .build();\n    builder.exec(exec_request_2).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE),\n        ),\n        \"Expected {:?} revert but received {:?}\",\n        default_purse,\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn mint_by_hash_transfer_should_fail_because_lack_of_target_uref_access() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let source = default_account.main_purse();\n    // This could be any URef to which the caller has no access rights.\n    let target = URef::default();\n\n    let id = Some(0u64);\n\n    let transfer_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(*DEFAULT_ACCOUNT_ADDR),\n            mint::ARG_SOURCE => source,\n            mint::ARG_TARGET => target,\n            mint::ARG_AMOUNT => U512::one(),\n            mint::ARG_ID => id,\n        },\n    )\n    .build();\n\n    // Previously we would allow deposit in this flow to a purse without explicit ADD access. We\n    // still allow that in some other flows, but due to code complexity, this is no longer\n    // supported.\n    builder.exec(transfer_request).expect_failure();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220221.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, TransferRequestBuilder,\n    UpgradeRequestBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE, TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_execution_engine::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT;\nuse casper_types::{\n    runtime_args,\n    system::auction::{self, DelegationRate, INITIAL_ERA_ID},\n    EraId, ProtocolVersion, PublicKey, SecretKey, U256, U512,\n};\n\nconst VALIDATOR_STAKE: u64 = 1_000_000_000;\n\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n\nconst OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION;\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    OLD_PROTOCOL_VERSION.value().major,\n    OLD_PROTOCOL_VERSION.value().minor,\n    OLD_PROTOCOL_VERSION.value().patch + 1,\n);\n\nfn generate_secret_keys() -> impl Iterator<Item = SecretKey> {\n    (1u64..).map(|i| {\n        let u256 = U256::from(i);\n        let mut u256_bytes = [0u8; 32];\n        u256.to_big_endian(&mut u256_bytes);\n        SecretKey::ed25519_from_bytes(u256_bytes).expect(\"should create secret key\")\n    })\n}\n\nfn generate_public_keys() -> impl Iterator<Item = PublicKey> {\n    generate_secret_keys().map(|secret_key| PublicKey::from(&secret_key))\n}\n\n#[ignore]\n#[test]\nfn regression_20220221_should_distribute_to_many_validators() {\n    // distribute funds in a scenario where validator slots is greater than or equal to max runtime\n    // stack height\n\n    let mut public_keys = generate_public_keys();\n\n    let fund_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, PublicKey::System).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut upgrade_request = UpgradeRequestBuilder::default()\n        .with_new_validator_slots(DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT + 1)\n        .with_pre_state_hash(builder.get_post_state_hash())\n        .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n        .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .build();\n\n    builder.upgrade(&mut upgrade_request);\n\n    builder.transfer_and_commit(fund_request).expect_success();\n\n    // Add validators\n    for _ in 0..DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT {\n        let public_key = public_keys.next().unwrap();\n\n        let transfer_request = TransferRequestBuilder::new(\n            MINIMUM_ACCOUNT_CREATION_BALANCE / 10,\n            public_key.to_account_hash(),\n        )\n        .build();\n\n        builder\n            .transfer_and_commit(transfer_request)\n            .expect_success();\n\n        let delegation_rate: DelegationRate = 10;\n\n        let session_args = runtime_args! {\n            auction::ARG_PUBLIC_KEY => public_key.clone(),\n            auction::ARG_AMOUNT => U512::from(VALIDATOR_STAKE),\n            auction::ARG_DELEGATION_RATE => delegation_rate,\n        };\n\n        let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n            public_key.to_account_hash(),\n            builder.get_auction_contract_hash(),\n            auction::METHOD_ADD_BID,\n            session_args,\n        )\n        .build();\n\n        builder.exec(execute_request).expect_success().commit();\n    }\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    for _ in 0..=DEFAULT_AUCTION_DELAY {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let era_validators = builder.get_era_validators();\n\n    assert!(!era_validators.is_empty());\n\n    let (era_id, trusted_era_validators) = era_validators\n        .into_iter()\n        .last()\n        .expect(\"should have last element\");\n    assert!(era_id > INITIAL_ERA_ID, \"{}\", era_id);\n\n    let step_request = StepRequestBuilder::new()\n        .with_parent_state_hash(builder.get_post_state_hash())\n        .with_protocol_version(NEW_PROTOCOL_VERSION)\n        // Next era id is used for returning future era validators, which we don't need to inspect\n        // in this test.\n        .with_next_era_id(era_id)\n        .with_era_end_timestamp_millis(timestamp_millis);\n\n    assert_eq!(\n        trusted_era_validators.len(),\n        DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT as usize\n    );\n\n    let step_request = step_request.build();\n\n    assert!(builder.step(step_request).is_success(), \"should run step\");\n\n    builder.run_auction(timestamp_millis, Vec::new());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220222.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state, execution::ExecError};\nuse casper_types::{account::AccountHash, runtime_args, U512};\n\nconst ALICE_ADDR: AccountHash = AccountHash::new([42; 32]);\n\n#[ignore]\n#[test]\nfn regression_20220222_escalate() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request =\n        TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ALICE_ADDR).build();\n\n    builder\n        .transfer_and_commit(transfer_request)\n        .expect_success();\n\n    let alice = builder\n        .get_entity_by_account_hash(ALICE_ADDR)\n        .expect(\"should have account\");\n\n    let alice_main_purse = alice.main_purse();\n\n    // Getting main purse URef to verify transfer\n    let _source_purse = builder\n        .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .main_purse();\n\n    let session_args = runtime_args! {\n        \"alice_purse_addr\" => alice_main_purse.addr(),\n        \"amount\" => U512::MAX,\n    };\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        \"regression_20220222.wasm\",\n        session_args,\n    )\n    .build();\n    builder.exec(exec_request).expect_failure();\n\n    let error = builder.get_error().expect(\"should have error\");\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::ForgedReference(forged_uref))\n            if forged_uref == alice_main_purse.into_add()\n        ),\n        \"Expected revert but received {:?}\",\n        error\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220223.rs",
    "content": "use casper_types::system::mint;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{\n    engine_state, engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT,\n    execution::ExecError,\n};\nuse casper_types::{\n    self,\n    account::AccountHash,\n    api_error::ApiError,\n    runtime_args,\n    system::auction::{\n        DelegationRate, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_PUBLIC_KEY,\n        ARG_VALIDATOR,\n    },\n    PublicKey, SecretKey, U512,\n};\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = \"transfer_to_named_purse.wasm\";\n\nconst CONTRACT_REGRESSION_ADD_BID: &str = \"regression_add_bid.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\n\nconst CONTRACT_REGRESSION_DELEGATE: &str = \"regression_delegate.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\n\nconst CONTRACT_REGRESSION_TRANSFER: &str = \"regression_transfer.wasm\";\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst TEST_PURSE: &str = \"test_purse\";\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE + 1000;\nconst ADD_BID_AMOUNT_1: u64 = 95_000;\nconst ADD_BID_DELEGATION_RATE_1: DelegationRate = 10;\nconst DELEGATE_AMOUNT_1: u64 = 125_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n\nstatic VALIDATOR_1_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_1_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*VALIDATOR_1_PUBLIC_KEY));\n\n#[ignore]\n#[test]\nfn should_fail_to_add_new_bid_over_the_approved_amount() {\n    let mut builder = setup();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_REGRESSION_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(validator_1_add_bid_request).expect_failure();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error)))\n            if mint_error == mint::Error::UnapprovedSpendingAmount as u8\n        ),\n        \"Expected unapproved spending amount error but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_add_into_existing_bid_over_the_approved_amount() {\n    let mut builder = setup();\n\n    let validator_1_add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request_2 = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_REGRESSION_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request_1)\n        .expect_success()\n        .commit();\n    builder.exec(validator_1_add_bid_request_2).expect_failure();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error)))\n            if mint_error == mint::Error::UnapprovedSpendingAmount as u8\n        ),\n        \"Expected unapproved spending amount error but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_add_new_delegator_over_the_approved_amount() {\n    let mut builder = setup();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request)\n        .expect_success()\n        .commit();\n\n    let delegator_1_delegate_requestr = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_delegate_requestr)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error)))\n            if mint_error == mint::Error::UnapprovedSpendingAmount as u8\n        ),\n        \"Expected unapproved spending amount error but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_update_existing_delegator_over_the_approved_amount() {\n    let mut builder = setup();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_REGRESSION_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(),\n            ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(delegator_1_delegate_request_1)\n        .expect_success()\n        .commit();\n\n    builder\n        .exec(delegator_1_delegate_request_2)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error)))\n            if mint_error == mint::Error::UnapprovedSpendingAmount as u8\n        ),\n        \"Expected unapproved spending amount error but received {:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_mint_transfer_over_the_limit() {\n    let mut builder = setup();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let test_purse_2 = default_account\n        .named_keys()\n        .get(TEST_PURSE)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have test purse 2\");\n\n    let args = runtime_args! {\n        mint::ARG_TO => Option::<AccountHash>::None,\n        mint::ARG_TARGET => test_purse_2,\n        mint::ARG_AMOUNT => U512::one(),\n        mint::ARG_ID => Some(1u64),\n    };\n    let transfer_request_1 =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_REGRESSION_TRANSFER, args)\n            .build();\n\n    builder.exec(transfer_request_1).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error)))\n            if mint_error == mint::Error::UnapprovedSpendingAmount as u8\n        ),\n        \"Expected unapproved spending amount error but received {:?}\",\n        error\n    );\n}\n\nfn setup() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_NAMED_PURSE,\n        runtime_args! {\n            ARG_PURSE_NAME => TEST_PURSE,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    builder.exec(create_purse_request).expect_success().commit();\n\n    builder\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220224.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state, execution::ExecError};\nuse casper_types::{runtime_args, system::mint, ApiError, RuntimeArgs};\n\nconst CONTRACT_REGRESSION_PAYMENT: &str = \"regression_payment.wasm\";\nconst CONTRACT_REVERT: &str = \"revert.wasm\";\n\n#[ignore]\n#[test]\nfn should_not_transfer_above_approved_limit_in_payment_code() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let account_hash = *DEFAULT_ACCOUNT_ADDR;\n    let deploy_hash: [u8; 32] = [42; 32];\n    let payment_args = runtime_args! {\n        \"amount\" => *DEFAULT_PAYMENT,\n    };\n    let session_args = RuntimeArgs::default();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_session_code(CONTRACT_REVERT, session_args)\n        .with_payment_code(CONTRACT_REGRESSION_PAYMENT, payment_args)\n        .with_authorization_keys(&[account_hash])\n        .with_deploy_hash(deploy_hash)\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should have returned an error\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error)))\n            if mint_error == mint::Error::UnapprovedSpendingAmount as u8\n        ),\n        \"Expected unapproved spending amount error but received {:?}\",\n        error\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220303.rs",
    "content": "use std::{\n    collections::BTreeMap,\n    time::{Duration, Instant},\n};\n\nuse casper_engine_test_support::{LmdbWasmTestBuilder, UpgradeRequestBuilder};\nuse casper_types::{\n    contracts::ContractHash,\n    system::{self, mint},\n    AccessRights, CLValue, Digest, EntityAddr, EntryPoints, EraId, Key, ProtocolVersion,\n    StoredValue, SystemHashRegistry, URef,\n};\nuse rand::Rng;\n\nuse crate::lmdb_fixture::{self, ENTRY_REGISTRY_SPECIAL_ADDRESS};\n\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n\n#[ignore]\n#[test]\nfn should_update_contract_metadata_at_upgrade_with_major_bump() {\n    test_upgrade(1, 0, 0, 0);\n}\n\n#[ignore]\n#[test]\nfn should_update_contract_metadata_at_upgrade_with_minor_bump() {\n    test_upgrade(0, 1, 0, 0);\n}\n\n#[ignore]\n#[test]\nfn should_update_contract_metadata_at_upgrade_with_patch_bump() {\n    test_upgrade(0, 0, 1, 0);\n}\n\n#[ignore]\n#[test]\nfn test_upgrade_with_global_state_update_entries() {\n    test_upgrade(0, 0, 1, 20000);\n}\n\nfn test_upgrade(major_bump: u32, minor_bump: u32, patch_bump: u32, upgrade_entries: u32) {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n    let mint_contract_hash = {\n        let stored_value: StoredValue = builder\n            .query(None, ENTRY_REGISTRY_SPECIAL_ADDRESS, &[])\n            .expect(\"should query system entity registry\");\n        let cl_value = stored_value\n            .as_cl_value()\n            .cloned()\n            .expect(\"should have cl value\");\n        let registry: SystemHashRegistry = cl_value.into_t().expect(\"should have system registry\");\n        registry\n            .get(system::MINT)\n            .cloned()\n            .expect(\"should contract hash\")\n    };\n    let old_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let legacy_mint_hash = ContractHash::new(mint_contract_hash);\n\n    let old_mint_contract = builder\n        .get_contract(legacy_mint_hash)\n        .expect(\"should have mint contract\");\n    assert_eq!(old_mint_contract.protocol_version(), old_protocol_version);\n    let new_protocol_version = ProtocolVersion::from_parts(\n        old_protocol_version.value().major + major_bump,\n        old_protocol_version.value().minor + minor_bump,\n        old_protocol_version.value().patch + patch_bump,\n    );\n    let mut global_state_update =\n        apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash);\n\n    let mut rng = casper_types::testing::TestRng::new();\n    if upgrade_entries > 0 {\n        for _ in 0..upgrade_entries {\n            global_state_update.insert(\n                Key::URef(URef::new(rng.gen(), AccessRights::empty())),\n                StoredValue::CLValue(CLValue::from_t(rng.gen::<u64>()).unwrap()),\n            );\n        }\n    }\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(old_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(global_state_update)\n            .build()\n    };\n    let start = Instant::now();\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n    let elapsed = start.elapsed();\n    assert!(\n        elapsed < Duration::from_secs(40),\n        \"upgrade took too long! {} (millis)\",\n        elapsed.as_millis()\n    );\n    let new_contract = builder\n        .get_addressable_entity(mint_contract_hash.into())\n        .expect(\"should have mint contract\");\n    assert_eq!(\n        old_mint_contract.contract_package_hash().value(),\n        new_contract.package_hash().value()\n    );\n    assert_eq!(\n        old_mint_contract.contract_wasm_hash().value(),\n        new_contract.byte_code_hash().value()\n    );\n    let new_entry_points = builder.get_entry_points(EntityAddr::System(mint_contract_hash));\n    let old_entry_points = EntryPoints::from(old_mint_contract.entry_points().clone());\n    assert_ne!(&old_entry_points, &new_entry_points);\n    assert_eq!(\n        &new_entry_points,\n        &mint::mint_entry_points(),\n        \"should have new entrypoints written\"\n    );\n    assert_eq!(new_contract.protocol_version(), new_protocol_version);\n}\n\nfn apply_global_state_update(\n    builder: &LmdbWasmTestBuilder,\n    post_state_hash: Digest,\n) -> BTreeMap<Key, StoredValue> {\n    let key = URef::new([0u8; 32], AccessRights::all()).into();\n\n    let system_contract_hashes = builder\n        .query(Some(post_state_hash), key, &Vec::new())\n        .expect(\"Must have stored system contract hashes\")\n        .as_cl_value()\n        .expect(\"must be CLValue\")\n        .clone()\n        .into_t::<SystemHashRegistry>()\n        .expect(\"must convert to btree map\");\n\n    let mut global_state_update = BTreeMap::<Key, StoredValue>::new();\n    let registry = CLValue::from_t(system_contract_hashes)\n        .expect(\"must convert to StoredValue\")\n        .into();\n\n    global_state_update.insert(Key::SystemEntityRegistry, registry);\n\n    global_state_update\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20220727.rs",
    "content": "use std::fmt::Write;\n\nuse casper_wasm::{\n    builder,\n    elements::{Instruction, Instructions},\n};\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{\n    engine_state,\n    execution::ExecError,\n    runtime::{\n        PreprocessingError, WasmValidationError, DEFAULT_BR_TABLE_MAX_SIZE, DEFAULT_MAX_GLOBALS,\n        DEFAULT_MAX_PARAMETER_COUNT, DEFAULT_MAX_TABLE_SIZE,\n    },\n};\nuse casper_types::{addressable_entity::DEFAULT_ENTRY_POINT_NAME, RuntimeArgs};\n\nuse crate::wasm_utils;\n\nconst OOM_INIT: (u32, Option<u32>) = (2805655325, None);\nconst FAILURE_ONE_ABOVE_LIMIT: (u32, Option<u32>) = (DEFAULT_MAX_TABLE_SIZE + 1, None);\nconst FAILURE_MAX_ABOVE_LIMIT: (u32, Option<u32>) = (DEFAULT_MAX_TABLE_SIZE, Some(u32::MAX));\nconst FAILURE_INIT_ABOVE_LIMIT: (u32, Option<u32>) =\n    (DEFAULT_MAX_TABLE_SIZE, Some(DEFAULT_MAX_TABLE_SIZE + 1));\nconst ALLOWED_NO_MAX: (u32, Option<u32>) = (DEFAULT_MAX_TABLE_SIZE, None);\nconst ALLOWED_LIMITS: (u32, Option<u32>) = (DEFAULT_MAX_TABLE_SIZE, Some(DEFAULT_MAX_TABLE_SIZE));\n// Anything larger than that fails wasmi interpreter with a runtime stack overflow.\nconst FAILING_BR_TABLE_SIZE: usize = DEFAULT_BR_TABLE_MAX_SIZE as usize + 1;\nconst FAILING_GLOBALS_SIZE: usize = DEFAULT_MAX_PARAMETER_COUNT as usize + 1;\nconst FAILING_PARAMS_COUNT: usize = DEFAULT_MAX_PARAMETER_COUNT as usize + 1;\n\nfn make_oom_payload(initial: u32, maximum: Option<u32>) -> Vec<u8> {\n    let mut bounds = initial.to_string();\n    if let Some(max) = maximum {\n        bounds += \" \";\n        bounds += &max.to_string();\n    }\n\n    let wat = format!(\n        r#\"(module\n            (table (;0;) {} funcref)\n            (memory (;0;) 0)\n            (export \"call\" (func $call))\n            (func $call))\n            \"#,\n        bounds\n    );\n    wat::parse_str(wat).expect(\"should parse wat\")\n}\n\n#[ignore]\n#[test]\nfn should_not_oom() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let initial_size_exceeded = vec![OOM_INIT, FAILURE_ONE_ABOVE_LIMIT];\n\n    let max_size_exceeded = vec![FAILURE_MAX_ABOVE_LIMIT, FAILURE_INIT_ABOVE_LIMIT];\n\n    for (initial, maximum) in initial_size_exceeded {\n        let module_bytes = make_oom_payload(initial, maximum);\n        let exec_request = ExecuteRequestBuilder::module_bytes(\n            *DEFAULT_ACCOUNT_ADDR,\n            module_bytes,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder.exec(exec_request).expect_failure().commit();\n\n        let error = builder.get_error().unwrap();\n\n        assert!(\n            matches!(\n                error,\n                engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::InitialTableSizeExceeded { max, actual }))\n                if max == DEFAULT_MAX_TABLE_SIZE && actual == initial\n            ),\n            \"{:?}\",\n            error\n        );\n    }\n\n    for (initial, maximum) in max_size_exceeded {\n        let module_bytes = make_oom_payload(initial, maximum);\n        let exec_request = ExecuteRequestBuilder::module_bytes(\n            *DEFAULT_ACCOUNT_ADDR,\n            module_bytes,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder.exec(exec_request).expect_failure().commit();\n\n        let error = builder.get_error().unwrap();\n\n        assert!(\n            matches!(\n                error,\n                engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::MaxTableSizeExceeded { max, actual }))\n                if max == DEFAULT_MAX_TABLE_SIZE && Some(actual) == maximum\n            ),\n            \"{initial} {maximum:?} {:?}\",\n            error\n        );\n    }\n}\n\n#[ignore]\n#[test]\nfn should_pass_table_validation() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let passing_test_cases = vec![ALLOWED_NO_MAX, ALLOWED_LIMITS];\n\n    for (initial, maximum) in passing_test_cases {\n        let module_bytes = make_oom_payload(initial, maximum);\n        let exec_request = ExecuteRequestBuilder::module_bytes(\n            *DEFAULT_ACCOUNT_ADDR,\n            module_bytes,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n}\n\n#[ignore]\n#[test]\nfn should_pass_elem_section() {\n    // more functions than elements - wasmi doesn't allocate\n    let elem_does_not_fit_err = test_element_section(0, None, DEFAULT_MAX_TABLE_SIZE);\n    assert!(\n        matches!(\n            elem_does_not_fit_err,\n            Some(engine_state::Error::Exec(ExecError::Interpreter(ref msg)))\n            if msg == \"elements segment does not fit\"\n        ),\n        \"{:?}\",\n        elem_does_not_fit_err\n    );\n\n    // wasmi assumes table size and function pointers are equal\n    assert!(test_element_section(\n        DEFAULT_MAX_TABLE_SIZE,\n        Some(DEFAULT_MAX_TABLE_SIZE),\n        DEFAULT_MAX_TABLE_SIZE\n    )\n    .is_none());\n}\n\nfn test_element_section(\n    table_init: u32,\n    table_max: Option<u32>,\n    function_count: u32,\n) -> Option<engine_state::Error> {\n    // Ensures proper initialization of table elements for different number of function pointers\n    //\n    // This should ensure there's no hidden lazy allocation and initialization that might still\n    // overallocate memory, burn cpu cycles allocating etc.\n\n    // (module\n    //     (table 0 1 anyfunc)\n    //     (memory $0 1)\n    //     (export \"memory\" (memory $0))\n    //     (export \"foo1\" (func $foo1))\n    //     (export \"foo2\" (func $foo2))\n    //     (export \"foo3\" (func $foo3))\n    //     (export \"main\" (func $main))\n    //     (func $foo1 (; 0 ;)\n    //     )\n    //     (func $foo2 (; 1 ;)\n    //     )\n    //     (func $foo3 (; 2 ;)\n    //     )\n    //     (func $main (; 3 ;) (result i32)\n    //      (i32.const 0)\n    //     )\n    //     (elem (i32.const 0) $foo1 $foo2 $foo3)\n    // )\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut wat = String::new();\n\n    if let Some(max) = table_max {\n        writeln!(\n            wat,\n            r#\"(module\n            (table {table_init} {max} funcref)\"#\n        )\n        .unwrap();\n    } else {\n        writeln!(\n            wat,\n            r#\"(module\n            (table {table_init} funcref)\"#\n        )\n        .unwrap();\n    }\n\n    wat += \"(memory $0 1)\\n\";\n    wat += r#\"(export \"memory\" (memory $0))\"#;\n    wat += \"\\n\";\n    wat += r#\"(export \"call\" (func $call))\"#;\n    wat += \"\\n\";\n    for i in 0..function_count {\n        writeln!(wat, r#\"(export \"foo{i}\" (func $foo{i}))\"#).unwrap();\n    }\n    for i in 0..function_count {\n        writeln!(wat, \"(func $foo{i} (; 0 ;))\").unwrap();\n    }\n    wat += \"(func $call)\\n\";\n    wat += \"\\n\";\n    wat += \"(elem (i32.const 0) \";\n    for i in 0..function_count {\n        write!(wat, \"$foo{i} \").unwrap();\n    }\n    wat += \")\\n\";\n    wat += \")\";\n\n    std::fs::write(\"/tmp/elem.wat\", &wat).unwrap();\n\n    let module_bytes = wat::parse_str(wat).unwrap();\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).commit();\n\n    builder.get_error()\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_more_than_one_table() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let module = builder::module()\n        // table 1\n        .table()\n        .with_min(0)\n        .build()\n        // table 2\n        .table()\n        .with_min(1)\n        .build()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        // Generated instructions for our entrypoint\n        .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End]))\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        // Memory section is mandatory\n        .memory()\n        .build()\n        .build();\n    let module_bytes = casper_wasm::serialize(module).expect(\"should serialize\");\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().unwrap();\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(\n                WasmValidationError::MoreThanOneTable\n            ))\n        ),\n        \"{:?}\",\n        error\n    );\n}\n\n/// Generates arbitrary length br_table opcode trying to exploit memory allocation in the wasm\n/// parsing code.\nfn make_arbitrary_br_table(size: usize) -> Result<Vec<u8>, Box<dyn std::error::Error>> {\n    // (module\n    // (type (;0;) (func (param i32) (result i32)))\n    // (type (;1;) (func))\n    // (func (;0;) (type 0) (param i32) (result i32)\n    //   block  ;; label = @1\n    //     block  ;; label = @2\n    //       block  ;; label = @3\n    //         block  ;; label = @4\n    //           local.get 0\n    //           br_table 2 (;@2;) 1 (;@3;) 0 (;@4;) 3 (;@1;)\n    //         end\n    //         i32.const 100\n    //         return\n    //       end\n    //       i32.const 101\n    //       return\n    //     end\n    //     i32.const 102\n    //     return\n    //   end\n    //   i32.const 103\n    //   return)\n    // (func (;1;) (type 1)\n    //   i32.const 0\n    //   call 0\n    //   drop)\n    // (memory (;0;) 0)\n    // (export \"call\" (func 1)))\n\n    let mut src = String::new();\n    writeln!(src, \"(module\")?;\n    writeln!(src, \"(memory (;0;) 0)\")?;\n    writeln!(src, r#\"(export \"call\" (func $call))\"#)?;\n    writeln!(src, r#\"(func $switch_like (param $p i32) (result i32)\"#)?;\n\n    let mut bottom = \";;\\n(local.get $p)\\n\".to_string();\n    bottom += \"(br_table\\n\";\n\n    for (br_table_offset, n) in (0..=size - 1).rev().enumerate() {\n        writeln!(bottom, \"  {n} ;; param == {br_table_offset} => (br {n})\")?; // p == 0 => (br n)\n    }\n    writeln!(bottom, \"{size})) ;; else => (br {size})\")?;\n\n    bottom += \";;\";\n\n    for n in 0..=size {\n        let mut wrap = String::new();\n        writeln!(wrap, \"(block\")?;\n        writeln!(wrap, \"{bottom}\")?;\n        writeln!(wrap, \"(i32.const {val})\", val = 100 + n)?;\n        writeln!(wrap, \"(return))\")?;\n        bottom = wrap;\n    }\n\n    writeln!(src, \"{bottom}\")?;\n\n    writeln!(\n        src,\n        r#\"(func $call (drop (call $switch_like (i32.const 0))))\"#\n    )?;\n\n    writeln!(src, \")\")?;\n\n    let module_bytes = wat::parse_str(&src)?;\n    Ok(module_bytes)\n}\n\n#[ignore]\n#[test]\nfn should_allow_large_br_table() {\n    // Anything larger than that fails wasmi interpreter with a runtime stack overflow.\n    let module_bytes = make_arbitrary_br_table(DEFAULT_BR_TABLE_MAX_SIZE as usize)\n        .expect(\"should create module bytes\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_large_br_table() {\n    let module_bytes =\n        make_arbitrary_br_table(FAILING_BR_TABLE_SIZE).expect(\"should create module bytes\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should fail\");\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::BrTableSizeExceeded { max, actual }))\n            if max == DEFAULT_BR_TABLE_MAX_SIZE && actual == FAILING_BR_TABLE_SIZE\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n\nfn make_arbitrary_global(size: usize) -> Result<Vec<u8>, Box<dyn std::error::Error>> {\n    // (module\n    //   (memory $0 1)\n    //   (global $global0 i32 (i32.const 1))\n    //   (global $global1 i32 (i32.const 2))\n    //   (global $global2 i32 (i32.const 3))\n    //   (func (export \"call\")\n    //     global.get $global0\n    //     global.get $global1\n    //     global.get $global2\n    //     i32.add\n    //     i32.add\n    //     drop\n    //   )\n    // )\n    let mut src = String::new();\n    writeln!(src, \"(module\")?;\n    writeln!(src, \"  (memory $0 1)\")?;\n\n    for i in 0..size {\n        writeln!(\n            src,\n            \"  (global $global{i} i32 (i32.const {value}))\",\n            value = i + 1\n        )?;\n    }\n\n    writeln!(src, r#\"  (func (export \"call\")\"#)?;\n    debug_assert!(size >= 2);\n    writeln!(src, \"    global.get $global{last}\", last = size - 2)?;\n    writeln!(src, \"    global.get $global{last}\", last = size - 1)?;\n    writeln!(src, \"    i32.add\")?;\n    writeln!(src, \"    drop\")?; // drop the result\n    writeln!(src, \"  )\")?;\n    writeln!(src, \")\")?;\n    let module_bytes = wat::parse_str(&src)?;\n    Ok(module_bytes)\n}\n\n#[ignore]\n#[test]\nfn should_allow_multiple_globals() {\n    let module_bytes =\n        make_arbitrary_global(DEFAULT_MAX_GLOBALS as usize).expect(\"should make arbitrary global\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_too_many_globals() {\n    let module_bytes =\n        make_arbitrary_global(FAILING_GLOBALS_SIZE).expect(\"should make arbitrary global\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should fail\");\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::TooManyGlobals { max, actual }))\n            if max == DEFAULT_MAX_GLOBALS && actual == FAILING_GLOBALS_SIZE\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_verify_max_param_count() {\n    let module_bytes_max_params =\n        wasm_utils::make_n_arg_call_bytes(DEFAULT_MAX_PARAMETER_COUNT as usize, \"i32\")\n            .expect(\"should create wasm bytes\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes_max_params,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let module_bytes_100_params =\n        wasm_utils::make_n_arg_call_bytes(100, \"i32\").expect(\"should create wasm bytes\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes_100_params,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_too_many_params() {\n    let module_bytes = wasm_utils::make_n_arg_call_bytes(FAILING_PARAMS_COUNT, \"i32\")\n        .expect(\"should create wasm bytes\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should fail\");\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::TooManyParameters { max, actual }))\n            if max == DEFAULT_MAX_PARAMETER_COUNT && actual == FAILING_PARAMS_COUNT\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_to_import_gas_function() {\n    let module_bytes = wat::parse_str(\n        r#\"(module\n            (func $gas (import \"env\" \"gas\") (param i32))\n            (memory $0 1)\n        )\"#,\n    )\n    .unwrap();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_failure().commit();\n\n    let error = builder.get_error().expect(\"should fail\");\n\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(\n                WasmValidationError::MissingHostFunction\n            ))\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_get_non_existing_global() {\n    let get_undeclared_global = r#\"(module\n        (memory $memory 16)\n        (export \"call\" (func $call_fn))\n        (func $call_fn\n            global.get 0\n            drop\n        )\n    )\"#;\n\n    test_non_existing_global(get_undeclared_global, 0);\n}\n\n#[ignore]\n#[test]\nfn should_not_get_global_above_declared_range() {\n    let get_undeclared_global = r#\"(module\n        (memory $memory 16)\n        (export \"call\" (func $call_fn))\n        (func $call_fn\n            global.get 3\n            drop\n        )\n        (global $global0 i32 (i32.const 0))\n        (global $global1 i32 (i32.const 1))\n        (global $global256 i32 (i32.const 2))\n    )\"#;\n\n    test_non_existing_global(get_undeclared_global, 3);\n}\n\n#[ignore]\n#[test]\nfn should_not_set_non_existing_global() {\n    let set_undeclared_global = r#\"(module\n        (memory $memory 16)\n        (export \"call\" (func $call_fn))\n        (func $call_fn\n            i32.const 123\n            global.set 0\n            drop\n        )\n    )\"#;\n\n    test_non_existing_global(set_undeclared_global, 0);\n}\n\n#[ignore]\n#[test]\nfn should_not_set_non_existing_global_u32_max() {\n    let set_undeclared_global = format!(\n        r#\"(module\n        (memory $memory 16)\n        (export \"call\" (func $call_fn))\n        (func $call_fn\n            i32.const 0\n            global.set {index}\n        )\n        (global $global0 (mut i32) (i32.const 0))\n        (global $global1 (mut i32) (i32.const 1))\n        (global $global256 (mut i32) (i32.const 2))\n    )\"#,\n        index = u32::MAX\n    );\n\n    test_non_existing_global(&set_undeclared_global, u32::MAX);\n}\n\n#[ignore]\n#[test]\nfn should_not_get_non_existing_global_u32_max() {\n    let set_undeclared_global = format!(\n        r#\"(module\n        (memory $memory 16)\n        (export \"call\" (func $call_fn))\n        (func $call_fn\n            global.get {index}\n            drop\n        )\n        (global $global0 (mut i32) (i32.const 0))\n    )\"#,\n        index = u32::MAX\n    );\n\n    test_non_existing_global(&set_undeclared_global, u32::MAX);\n}\n\n#[ignore]\n#[test]\nfn should_not_set_non_existing_global_above_declared_range() {\n    let set_undeclared_global = r#\"(module\n        (memory $memory 16)\n        (export \"call\" (func $call_fn))\n        (func $call_fn\n            i32.const 0\n            global.set 123\n        )\n        (global $global0 (mut i32) (i32.const 0))\n        (global $global1 (mut i32) (i32.const 1))\n        (global $global256 (mut i32) (i32.const 2))\n    )\"#;\n\n    test_non_existing_global(set_undeclared_global, 123);\n}\n\nfn test_non_existing_global(module_wat: &str, index: u32) {\n    let module_bytes = wat::parse_str(module_wat).unwrap();\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    let exec_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request).expect_failure().commit();\n    let error = builder.get_error().expect(\"should fail\");\n    assert!(\n        matches!(\n            error,\n            engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::IncorrectGlobalOperation { index: incorrect_index }))\n            if incorrect_index == index\n        ),\n        \"{:?}\",\n        error,\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20240105.rs",
    "content": "mod repeated_ffi_call_should_gas_out_quickly {\n    use std::{\n        env,\n        sync::mpsc::{self, RecvTimeoutError},\n        thread,\n        time::{Duration, Instant},\n    };\n\n    use casper_execution_engine::{engine_state::Error, execution::ExecError};\n    use rand::Rng;\n    use tempfile::TempDir;\n\n    use casper_engine_test_support::{\n        ChainspecConfig, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n        CHAINSPEC_SYMLINK, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n    };\n    use casper_types::{\n        account::AccountHash, runtime_args, testing::TestRng, ProtocolVersion, RuntimeArgs,\n        DICTIONARY_ITEM_KEY_MAX_LENGTH, U512,\n    };\n\n    const CONTRACT: &str = \"regression_20240105.wasm\";\n    const TIMEOUT: Duration = Duration::from_secs(4);\n    const PAYMENT_AMOUNT: u64 = 1_000_000_000_000_u64;\n\n    fn production_max_associated_keys() -> u8 {\n        let chainspec_config = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK).unwrap();\n        chainspec_config.max_associated_keys().try_into().unwrap()\n    }\n\n    struct Fixture {\n        builder: LmdbWasmTestBuilder,\n        data_dir: TempDir,\n        rng: TestRng,\n    }\n\n    impl Fixture {\n        fn new() -> Self {\n            let data_dir = TempDir::new().unwrap();\n            let mut builder = LmdbWasmTestBuilder::new_with_production_chainspec(data_dir.path());\n            builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit();\n            let rng = TestRng::new();\n            Fixture {\n                builder,\n                data_dir,\n                rng,\n            }\n        }\n\n        /// Calls regression_20240105.wasm with some setup function.  Execution is expected to\n        /// succeed.\n        fn execute_setup(&mut self, session_args: RuntimeArgs) {\n            let deploy = DeployItemBuilder::new()\n                .with_address(*DEFAULT_ACCOUNT_ADDR)\n                .with_session_code(CONTRACT, session_args)\n                .with_standard_payment(runtime_args! { \"amount\" => U512::from(PAYMENT_AMOUNT * 4) })\n                .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n                .with_deploy_hash(self.rng.gen())\n                .build();\n            let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n            self.builder.exec(exec_request).expect_success().commit();\n        }\n\n        /// Calls regression_20240105.wasm with expectation of execution failure due to running out\n        /// of gas within the duration specified in `TIMEOUT`.\n        fn execute_with_timeout(self, session_args: RuntimeArgs, extra_auth_keys: u8) {\n            if cfg!(debug_assertions) {\n                println!(\"not testing in debug mode\");\n                return;\n            }\n            let (tx, rx) = mpsc::channel();\n            let Fixture {\n                builder,\n                data_dir,\n                mut rng,\n            } = self;\n            let post_state_hash = builder.get_post_state_hash();\n            let mut auth_keys = Vec::new();\n            auth_keys.push(*DEFAULT_ACCOUNT_ADDR);\n            for i in 1..=extra_auth_keys {\n                auth_keys.push(AccountHash::new([i; 32]));\n            }\n            let executor = thread::spawn(move || {\n                let deploy = DeployItemBuilder::new()\n                    .with_address(*DEFAULT_ACCOUNT_ADDR)\n                    .with_session_code(CONTRACT, session_args)\n                    .with_standard_payment(runtime_args! { \"amount\" => U512::from(PAYMENT_AMOUNT) })\n                    .with_authorization_keys(&auth_keys)\n                    .with_deploy_hash(rng.gen())\n                    .build();\n                let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build();\n\n                let mut chainspec_config =\n                    ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK).unwrap();\n                // Increase the `max_memory` available in order to avoid hitting unreachable\n                // instruction during execution.\n                *chainspec_config.wasm_config.v1_mut().max_memory_mut() = 10_000;\n                let mut builder = LmdbWasmTestBuilder::open(\n                    data_dir.path(),\n                    chainspec_config,\n                    ProtocolVersion::V2_0_0,\n                    post_state_hash,\n                );\n\n                builder.exec(exec_request);\n                let error = builder.get_error().unwrap();\n                let _ = tx.send(error);\n            });\n\n            let timeout = if let Ok(value) = env::var(\"CL_TEST_TIMEOUT_SECS\") {\n                Duration::from_secs(value.parse().expect(\"should parse as u64\"))\n            } else {\n                TIMEOUT\n            };\n            let start = Instant::now();\n            let receiver_result = rx.recv_timeout(timeout);\n            executor.join().unwrap();\n            match receiver_result {\n                Ok(error) => {\n                    assert!(\n                        matches!(error, Error::Exec(ExecError::GasLimit)),\n                        \"expected gas limit error, but got {:?}\",\n                        error\n                    );\n                }\n                Err(RecvTimeoutError::Timeout) => {\n                    panic!(\n                        \"execution should take less than {} seconds, but took {} seconds \",\n                        timeout.as_secs_f32(),\n                        start.elapsed().as_secs_f32(),\n                    )\n                }\n                Err(RecvTimeoutError::Disconnected) => unreachable!(),\n            }\n        }\n    }\n\n    #[ignore]\n    #[test]\n    fn write_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"write\",\n            \"len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn write_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"write\",\n            \"len\" => 100_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn read_missing() {\n        let session_args = runtime_args! {\n            \"fn\" => \"read\",\n            \"len\" => Option::<u32>::None,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn read_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"read\",\n            \"len\" => Some(1_u32),\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn read_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"read\",\n            \"len\" => Some(100_000_u32),\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add\",\n            \"large\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add\",\n            \"large\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn new_uref_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"new\",\n            \"len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn new_uref_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"new\",\n            \"len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn call_contract_small_runtime_args() {\n        let session_args = runtime_args! {\n            \"fn\" => \"call_contract\",\n            \"args_len\" => 1_u32\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn call_contract_large_runtime_args() {\n        let session_args = runtime_args! {\n            \"fn\" => \"call_contract\",\n            \"args_len\" => 1_024_u32\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_key_small_name_missing_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_key\",\n            \"large_name\" => false,\n            \"large_key\" => Option::<bool>::None\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_key_small_name_small_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_key\",\n            \"large_name\" => false,\n            \"large_key\" => Some(false)\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_key_small_name_large_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_key\",\n            \"large_name\" => false,\n            \"large_key\" => Some(true)\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_key_large_name_small_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_key\",\n            \"large_name\" => true,\n            \"large_key\" => Some(false)\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_key_large_name_large_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_key\",\n            \"large_name\" => true,\n            \"large_key\" => Some(true)\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn has_key_small_name_missing_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"has_key\",\n            \"large_name\" => false,\n            \"key_exists\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn has_key_small_name_existing_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"has_key\",\n            \"large_name\" => false,\n            \"key_exists\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn has_key_large_name_missing_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"has_key\",\n            \"large_name\" => true,\n            \"key_exists\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn has_key_large_name_existing_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"has_key\",\n            \"large_name\" => true,\n            \"key_exists\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn put_key_small_name_small_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => false,\n            \"large_key\" => false,\n            \"num_keys\" => Option::<u32>::None\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn put_key_small_name_large_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => false,\n            \"large_key\" => true,\n            \"num_keys\" => Option::<u32>::None\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn put_key_large_name_small_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => true,\n            \"large_key\" => false,\n            \"num_keys\" => Option::<u32>::None\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn put_key_large_name_large_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => true,\n            \"large_key\" => true,\n            \"num_keys\" => Option::<u32>::None\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn is_valid_uref_for_invalid() {\n        let session_args = runtime_args! {\n            \"fn\" => \"is_valid_uref\",\n            \"valid\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn is_valid_uref_for_valid() {\n        let session_args = runtime_args! {\n            \"fn\" => \"is_valid_uref\",\n            \"valid\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_and_remove_associated_key() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_associated_key\",\n            \"remove_after_adding\" => true,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_associated_key_duplicated() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_associated_key\",\n            \"remove_after_adding\" => false,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn remove_associated_key_non_existent() {\n        let session_args = runtime_args! { \"fn\" => \"remove_associated_key\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn update_associated_key_non_existent() {\n        let session_args = runtime_args! {\n            \"fn\" => \"update_associated_key\",\n            \"exists\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn update_associated_key_existing() {\n        let session_args = runtime_args! {\n            \"fn\" => \"update_associated_key\",\n            \"exists\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn set_action_threshold() {\n        let session_args = runtime_args! { \"fn\" => \"set_action_threshold\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_named_keys_empty() {\n        let session_args = runtime_args! {\n            \"fn\" => \"load_named_keys\",\n            \"num_keys\" => 0_u32,\n            \"large_name\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_named_keys_one_key_small_name() {\n        let num_keys = 1_u32;\n        let mut fixture = Fixture::new();\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => false,\n            \"large_key\" => true,\n            \"num_keys\" => Some(num_keys),\n        };\n        fixture.execute_setup(session_args);\n        let session_args = runtime_args! {\n            \"fn\" => \"load_named_keys\",\n            \"num_keys\" => num_keys\n        };\n        fixture.execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_named_keys_one_key_large_name() {\n        let num_keys = 1_u32;\n        let mut fixture = Fixture::new();\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => true,\n            \"large_key\" => true,\n            \"num_keys\" => Some(num_keys),\n        };\n        fixture.execute_setup(session_args);\n        let session_args = runtime_args! {\n            \"fn\" => \"load_named_keys\",\n            \"num_keys\" => num_keys,\n        };\n        fixture.execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_named_keys_many_keys_small_name() {\n        let num_keys = 1_000_u32;\n        let mut fixture = Fixture::new();\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => false,\n            \"large_key\" => true,\n            \"num_keys\" => Some(num_keys),\n        };\n        fixture.execute_setup(session_args);\n        let session_args = runtime_args! {\n            \"fn\" => \"load_named_keys\",\n            \"num_keys\" => num_keys,\n        };\n        fixture.execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_named_keys_many_keys_large_name() {\n        let num_keys = 10_u32;\n        let mut fixture = Fixture::new();\n        let session_args = runtime_args! {\n            \"fn\" => \"put_key\",\n            \"large_name\" => true,\n            \"large_key\" => true,\n            \"num_keys\" => Some(num_keys),\n        };\n        fixture.execute_setup(session_args);\n        let session_args = runtime_args! {\n            \"fn\" => \"load_named_keys\",\n            \"num_keys\" => num_keys,\n        };\n        fixture.execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn remove_key_small_name() {\n        let session_args = runtime_args! {\n            \"fn\" => \"remove_key\",\n            \"large_name\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn remove_key_large_name() {\n        let session_args = runtime_args! {\n            \"fn\" => \"remove_key\",\n            \"large_name\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_caller() {\n        let session_args = runtime_args! { \"fn\" => \"get_caller\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_blocktime() {\n        let session_args = runtime_args! { \"fn\" => \"get_blocktime\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_purse() {\n        let session_args = runtime_args! { \"fn\" => \"create_purse\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn transfer_to_account_create_account() {\n        let session_args = runtime_args! {\n            \"fn\" => \"transfer_to_account\",\n            \"account_exists\" => false,\n            \"amount\" => U512::MAX\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn transfer_to_account_existing_account() {\n        let session_args = runtime_args! {\n            \"fn\" => \"transfer_to_account\",\n            \"account_exists\" => true,\n            \"amount\" => U512::MAX\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn transfer_from_purse_to_account_create_account() {\n        let session_args = runtime_args! {\n            \"fn\" => \"transfer_from_purse_to_account\",\n            \"account_exists\" => false,\n            \"amount\" => U512::MAX\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn transfer_from_purse_to_account_existing_account() {\n        let session_args = runtime_args! {\n            \"fn\" => \"transfer_from_purse_to_account\",\n            \"account_exists\" => true,\n            \"amount\" => U512::MAX\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn transfer_from_purse_to_purse() {\n        let session_args = runtime_args! {\n            \"fn\" => \"transfer_from_purse_to_purse\",\n            \"amount\" => U512::MAX\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_balance_non_existent_purse() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_balance\",\n            \"purse_exists\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_balance_existing_purse() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_balance\",\n            \"purse_exists\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_phase() {\n        let session_args = runtime_args! { \"fn\" => \"get_phase\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_system_contract() {\n        let session_args = runtime_args! { \"fn\" => \"get_system_contract\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_main_purse() {\n        let session_args = runtime_args! { \"fn\" => \"get_main_purse\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn read_host_buffer_empty() {\n        let session_args = runtime_args! { \"fn\" => \"read_host_buffer\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_package_at_hash() {\n        let session_args = runtime_args! { \"fn\" => \"create_contract_package_at_hash\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_contract_version_no_entry_points_no_named_keys() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_contract_version\",\n            \"entry_points_len\" => 0_u32,\n            \"named_keys_len\" => 0_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_contract_version_small_entry_points_small_named_keys() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_contract_version\",\n            \"entry_points_len\" => 1_u32,\n            \"named_keys_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_contract_version_small_entry_points_large_named_keys() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_contract_version\",\n            \"entry_points_len\" => 1_u32,\n            \"named_keys_len\" => 100_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_contract_version_large_entry_points_small_named_keys() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_contract_version\",\n            \"entry_points_len\" => 100_u32,\n            \"named_keys_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn add_contract_version_large_entry_points_large_named_keys() {\n        let session_args = runtime_args! {\n            \"fn\" => \"add_contract_version\",\n            \"entry_points_len\" => 100_u32,\n            \"named_keys_len\" => 100_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn disable_contract_version() {\n        let session_args = runtime_args! { \"fn\" => \"disable_contract_version\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn call_versioned_contract_small_runtime_args() {\n        let session_args = runtime_args! {\n            \"fn\" => \"call_versioned_contract\",\n            \"args_len\" => 1_u32\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn call_versioned_contract_large_runtime_args() {\n        let session_args = runtime_args! {\n            \"fn\" => \"call_versioned_contract\",\n            \"args_len\" => 1_024_u32\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_no_new_urefs_no_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_no_new_urefs_few_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 1_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_no_new_urefs_many_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 10_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_few_new_urefs_no_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 1_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_few_new_urefs_few_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 1_u8,\n            \"num_existing_urefs\" => 1_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_few_new_urefs_many_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 1_u8,\n            \"num_existing_urefs\" => 5_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_many_new_urefs_no_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 5_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_many_new_urefs_few_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 5_u8,\n            \"num_existing_urefs\" => 1_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_small_label_many_new_urefs_many_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 5_u8,\n            \"num_existing_urefs\" => 5_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_no_new_urefs_no_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_no_new_urefs_few_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 1_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_no_new_urefs_many_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 10_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_few_new_urefs_no_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 1_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_few_new_urefs_few_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 1_u8,\n            \"num_existing_urefs\" => 1_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_few_new_urefs_many_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 1_u8,\n            \"num_existing_urefs\" => 5_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_many_new_urefs_no_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 5_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_many_new_urefs_few_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 5_u8,\n            \"num_existing_urefs\" => 1_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_large_label_many_new_urefs_many_existing_urefs() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_000_000_u32,\n            \"num_new_urefs\" => 5_u8,\n            \"num_existing_urefs\" => 5_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_failure_max_urefs_exceeded() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => u8::MAX,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn create_contract_user_group_failure_max_groups_exceeded() {\n        let session_args = runtime_args! {\n            \"fn\" => \"create_contract_user_group\",\n            \"label_len\" => 1_u32,\n            \"num_new_urefs\" => 0_u8,\n            \"num_existing_urefs\" => 0_u8,\n            \"allow_exceeding_max_groups\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    // #[ignore]\n    // #[test]\n    // fn print_small() {\n    //     let session_args = runtime_args! {\n    //         \"fn\" => \"print\",\n    //         \"num_chars\" => 1_u32\n    //     };\n    //     Fixture::new().execute_with_timeout(session_args, 0)\n    // }\n    //\n    // #[ignore]\n    // #[test]\n    // fn print_large() {\n    //     let session_args = runtime_args! {\n    //         \"fn\" => \"print\",\n    //         \"num_chars\" => 1_000_000_u32\n    //     };\n    //     Fixture::new().execute_with_timeout(session_args, 0)\n    // }\n\n    #[ignore]\n    #[test]\n    fn get_runtime_arg_size_zero() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_runtime_arg_size\",\n            \"arg\" => ()\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_runtime_arg_size_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_runtime_arg_size\",\n            \"arg\" => 1_u8\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_runtime_arg_size_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_runtime_arg_size\",\n            \"arg\" => [1_u8; 1_000_000]\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_runtime_arg_zero_size() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_runtime_arg\",\n            \"arg\" => ()\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_runtime_arg_small_size() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_runtime_arg\",\n            \"arg\" => 1_u8\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn get_runtime_arg_large_size() {\n        let session_args = runtime_args! {\n            \"fn\" => \"get_runtime_arg\",\n            \"arg\" => [1_u8; 1_000_000]\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn remove_contract_user_group() {\n        let session_args = runtime_args! { \"fn\" => \"remove_contract_user_group\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn extend_contract_user_group_urefs_and_remove_as_required() {\n        let session_args = runtime_args! {\n            \"fn\" => \"extend_contract_user_group_urefs\",\n            \"allow_exceeding_max_urefs\" => false\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn extend_contract_user_group_urefs_failure_max_urefs_exceeded() {\n        let session_args = runtime_args! {\n            \"fn\" => \"extend_contract_user_group_urefs\",\n            \"allow_exceeding_max_urefs\" => true\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn remove_contract_user_group_urefs() {\n        let session_args = runtime_args! { \"fn\" => \"remove_contract_user_group_urefs\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn blake2b_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"blake2b\",\n            \"len\" => 1_u32\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn blake2b_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"blake2b\",\n            \"len\" => 1_000_000_u32\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn new_dictionary() {\n        let session_args = runtime_args! { \"fn\" => \"new_dictionary\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_get_small_name_small_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_get\",\n            \"name_len\" => 1_u32,\n            \"value_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_get_small_name_large_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_get\",\n            \"name_len\" => 1_u32,\n            \"value_len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_get_large_name_small_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_get\",\n            \"name_len\" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4,\n            \"value_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_get_large_name_large_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_get\",\n            \"name_len\" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4,\n            \"value_len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_put_small_name_small_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_put\",\n            \"name_len\" => 1_u32,\n            \"value_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_put_small_name_large_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_put\",\n            \"name_len\" => 1_u32,\n            \"value_len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_put_large_name_small_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_put\",\n            \"name_len\" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4,\n            \"value_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_put_large_name_large_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_put\",\n            \"name_len\" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4,\n            \"value_len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_call_stack() {\n        let session_args = runtime_args! { \"fn\" => \"load_call_stack\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_authorization_keys_small() {\n        let session_args = runtime_args! {\n            \"fn\" => \"load_authorization_keys\",\n            \"setup\" => false,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn load_authorization_keys_large() {\n        let session_args = runtime_args! {\n            \"fn\" => \"load_authorization_keys\",\n            \"setup\" => true,\n        };\n        let mut fixture = Fixture::new();\n        fixture.execute_setup(session_args);\n        let session_args = runtime_args! {\n            \"fn\" => \"load_authorization_keys\",\n            \"setup\" => false,\n        };\n        fixture.execute_with_timeout(session_args, production_max_associated_keys() - 1)\n    }\n\n    #[ignore]\n    #[test]\n    fn random_bytes() {\n        let session_args = runtime_args! { \"fn\" => \"random_bytes\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_read_small_name_small_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_read\",\n            \"name_len\" => 1_u32,\n            \"value_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_read_small_name_large_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_read\",\n            \"name_len\" => 1_u32,\n            \"value_len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_read_large_name_small_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_read\",\n            \"name_len\" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4,\n            \"value_len\" => 1_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn dictionary_read_large_name_large_value() {\n        let session_args = runtime_args! {\n            \"fn\" => \"dictionary_read\",\n            \"name_len\" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4,\n            \"value_len\" => 1_000_u32,\n        };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n\n    #[ignore]\n    #[test]\n    fn enable_contract_version() {\n        let session_args = runtime_args! { \"fn\" => \"enable_contract_version\" };\n        Fixture::new().execute_with_timeout(session_args, 0)\n    }\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/regression_20250812.rs",
    "content": "use casper_engine_test_support::{\n    ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PROTOCOL_VERSION,\n};\nuse casper_types::RuntimeArgs;\n\nconst DO_NOTHING_CONTRACT: &str = \"do_nothing_stored.wasm\";\n\n#[ignore]\n#[test]\nfn should_correctly_install_and_add_contract_version_with_ae_turned_on() {\n    let chainspec = ChainspecConfig::default().with_enable_addressable_entity(true);\n    let genesis_request = chainspec\n        .create_genesis_request(DEFAULT_ACCOUNTS.to_vec(), DEFAULT_PROTOCOL_VERSION)\n        .expect(\"must create genesis request\");\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec);\n    builder.run_genesis(genesis_request).commit();\n\n    let install_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        DO_NOTHING_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let install_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        DO_NOTHING_CONTRACT,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_request_1).expect_success().commit();\n    builder.exec(install_request_2).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/slow_input.rs",
    "content": "use std::mem;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    addressable_entity::DEFAULT_ENTRY_POINT_NAME, Gas, RuntimeArgs,\n    DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER,\n};\n\nuse walrus::{ir::BinaryOp, FunctionBuilder, InstrSeqBuilder, Module, ModuleConfig, ValType};\n\n#[ignore]\n#[test]\nfn should_charge_extra_per_amount_of_br_table_elements() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    const FIXED_BLOCK_AMOUNT: usize = 256;\n    const N_ELEMENTS: u32 = 5;\n    const M_ELEMENTS: u32 = 168;\n\n    let br_table_min_elements = fixed_cost_br_table(FIXED_BLOCK_AMOUNT, N_ELEMENTS);\n    let br_table_max_elements = fixed_cost_br_table(FIXED_BLOCK_AMOUNT, M_ELEMENTS);\n\n    assert_ne!(&br_table_min_elements, &br_table_max_elements);\n\n    let exec_request_1 = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        br_table_min_elements,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        br_table_max_elements,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let gas_cost_1 = builder.last_exec_gas_consumed();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let gas_cost_2 = builder.last_exec_gas_consumed();\n\n    assert!(\n        gas_cost_2 > gas_cost_1,\n        \"larger br_table should cost more gas\"\n    );\n\n    let br_table_cycles = 5;\n\n    assert_eq!(\n        gas_cost_2.checked_sub(gas_cost_1),\n        Some(Gas::from(\n            (M_ELEMENTS - N_ELEMENTS) * DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER * br_table_cycles\n        )),\n        \"the cost difference should equal to exactly the size of br_table difference \"\n    );\n}\n\n#[allow(dead_code)]\nfn cpu_burner_br_if(iterations: i64) -> Vec<u8> {\n    let mut module = Module::with_config(ModuleConfig::new());\n\n    let _memory_id = module.memories.add_local(false, 11, None);\n\n    let mut loop_func = FunctionBuilder::new(&mut module.types, &[ValType::I64], &[]);\n\n    let var_counter = module.locals.add(ValType::I64);\n    let var_i = module.locals.add(ValType::I64);\n\n    loop_func\n        .func_body()\n        // i := 0\n        .i64_const(0)\n        .local_set(var_i)\n        .loop_(None, |loop_| {\n            let loop_id = loop_.id();\n            loop_. // loop:\n                // i += 1\n                local_get(var_i)\n                .i64_const(1)\n                .binop(BinaryOp::I64Add)\n                // if i < iterations {\n                .local_tee(var_i)\n                .local_get(var_counter)\n                .binop(BinaryOp::I64LtU)\n                // goto loop\n                // }\n                .br_if(loop_id);\n        });\n\n    let loop_func = loop_func.finish(vec![var_counter], &mut module.funcs);\n\n    let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n    call_func.func_body().i64_const(iterations).call(loop_func);\n\n    let call_func = call_func.finish(Vec::new(), &mut module.funcs);\n\n    module.exports.add(DEFAULT_ENTRY_POINT_NAME, call_func);\n\n    module.emit_wasm()\n}\n\n#[allow(dead_code)]\nfn cpu_burner_br_table(iterations: i64) -> Vec<u8> {\n    let mut module = Module::with_config(ModuleConfig::new());\n\n    let _memory_id = module.memories.add_local(false, 11, None);\n\n    let mut loop_func = FunctionBuilder::new(&mut module.types, &[ValType::I64], &[]);\n\n    let param_iterations = module.locals.add(ValType::I64);\n    let local_i = module.locals.add(ValType::I64);\n\n    loop_func\n        .func_body()\n        // i := 0\n        .i64_const(0)\n        .local_set(local_i)\n        .block(None, |loop_break| {\n            let loop_break_id = loop_break.id();\n\n            loop_break.loop_(None, |while_loop| {\n                let while_loop_id = while_loop.id(); // loop:\n\n                while_loop\n                    .block(None, |while_loop_inner| {\n                        let while_loop_inner_id = while_loop_inner.id();\n                        // counter += 1\n                        while_loop_inner\n                            .local_get(local_i)\n                            .i64_const(1)\n                            .binop(BinaryOp::I64Add)\n                            // switch (i < counter) {\n                            .local_tee(local_i)\n                            .local_get(param_iterations)\n                            .binop(BinaryOp::I64LtU)\n                            .br_table(\n                                vec![\n                                    // case 0: break;\n                                    loop_break_id,\n                                    // case 1: continue; (goto while_loop)\n                                    while_loop_id,\n                                ]\n                                .into(),\n                                // default: throw()\n                                while_loop_inner_id,\n                            );\n                    })\n                    // the \"throw\"\n                    .unreachable();\n            });\n        });\n\n    let loop_func = loop_func.finish(vec![param_iterations], &mut module.funcs);\n\n    let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n    call_func.func_body().i64_const(iterations).call(loop_func);\n\n    let call_func = call_func.finish(Vec::new(), &mut module.funcs);\n\n    module.exports.add(DEFAULT_ENTRY_POINT_NAME, call_func);\n\n    module.emit_wasm()\n}\n\n/// Creates Wasm bytes with fixed amount of `block`s but with a `br_table` of a variable size.\n///\n/// Gas cost of executing `fixed_cost_br_table(n + m)` should be greater than\n/// `fixed_cost_br_table(n)` by exactly `br_table.entry_cost * m` iff m > 0.\nfn fixed_cost_br_table(total_labels: usize, br_table_element_size: u32) -> Vec<u8> {\n    assert!((br_table_element_size as usize) < total_labels);\n\n    let mut module = Module::with_config(ModuleConfig::new());\n\n    let _memory_id = module.memories.add_local(false, 11, None);\n\n    let mut br_table_func = FunctionBuilder::new(&mut module.types, &[ValType::I32], &[]);\n\n    let param_jump_label = module.locals.add(ValType::I32);\n\n    fn recursive_block_generator(\n        current_block: &mut InstrSeqBuilder,\n        mut recursive_step_fn: impl FnMut(&mut InstrSeqBuilder) -> bool,\n    ) {\n        if !recursive_step_fn(current_block) {\n            current_block.block(None, |nested_block| {\n                recursive_block_generator(nested_block, recursive_step_fn);\n            });\n        }\n    }\n\n    br_table_func.func_body().block(None, |outer_block| {\n        // Outer block becames the \"default\" jump label for `br_table`.\n        let outer_block_id = outer_block.id();\n\n        // Count of recursive iterations left\n        let mut counter = total_labels;\n\n        // Labels are extended with newly generated labels at each recursive step\n        let mut labels = Vec::new();\n\n        // Generates nested blocks\n        recursive_block_generator(outer_block, |step| {\n            // Save current nested block in labels.\n            labels.push(step.id());\n\n            if counter == 0 {\n                // At the tail of this recursive generator we'll create a `br_table` with variable\n                // amount of labels depending on this function parameter.\n                let labels = mem::take(&mut labels);\n                let sliced_labels = labels.as_slice()[..br_table_element_size as usize].to_vec();\n\n                // Code at the tail block\n                step.local_get(param_jump_label)\n                    .br_table(sliced_labels.into(), outer_block_id);\n\n                // True means this is a tail call, and we won't go deeper\n                true\n            } else {\n                counter -= 1;\n                // Go deeper\n                false\n            }\n        })\n    });\n\n    let br_table_func = br_table_func.finish(vec![param_jump_label], &mut module.funcs);\n\n    let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]);\n    call_func\n        .func_body()\n        // Call `br_table_func` with 0 as the jump label,\n        // Specific value does not change the cost, so as long as it will generate valid wasm it's\n        // ok.\n        .i32_const(0)\n        .call(br_table_func);\n\n    let call_func = call_func.finish(Vec::new(), &mut module.funcs);\n\n    module.exports.add(DEFAULT_ENTRY_POINT_NAME, call_func);\n\n    module.emit_wasm()\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/test_utils.rs",
    "content": "use casper_engine_test_support::DEFAULT_WASM_V1_CONFIG;\nuse casper_types::addressable_entity::DEFAULT_ENTRY_POINT_NAME;\nuse casper_wasm::{\n    builder,\n    elements::{Instruction, Instructions},\n};\n\n/// Prepare malicious payload with amount of opcodes that could potentially overflow injected gas\n/// counter.\npub(crate) fn make_gas_counter_overflow() -> Vec<u8> {\n    let opcode_costs = DEFAULT_WASM_V1_CONFIG.opcode_costs();\n\n    // Create a lot of `nop` opcodes to potentially overflow gas injector's batching counter.\n    let upper_bound = (u32::MAX as usize / opcode_costs.nop as usize) + 1;\n\n    let instructions = {\n        let mut instructions = vec![Instruction::Nop; upper_bound];\n        instructions.push(Instruction::End);\n        Instructions::new(instructions)\n    };\n\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        // Generated instructions for our entrypoint\n        .with_instructions(instructions)\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        // Memory section is mandatory\n        .memory()\n        .build()\n        .build();\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\n/// Prepare malicious payload in a form of a wasm module without memory section.\npub(crate) fn make_module_without_memory_section() -> Vec<u8> {\n    // Create some opcodes.\n    let upper_bound = 10;\n\n    let instructions = {\n        let mut instructions = vec![Instruction::Nop; upper_bound];\n        instructions.push(Instruction::End);\n        Instructions::new(instructions)\n    };\n\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        // Generated instructions for our entrypoint\n        .with_instructions(instructions)\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        .build();\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\n/// Prepare malicious payload in a form of a wasm module with forbidden start section.\npub(crate) fn make_module_with_start_section() -> Vec<u8> {\n    let module = r#\"\n        (module\n            (memory 1)\n            (start 0)\n            (func (export \"call\")\n            )\n        )\n    \"#;\n    wat::parse_str(module).expect(\"should parse wat\")\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/regression/transforms_must_be_ordered.rs",
    "content": "//! Tests whether transforms produced by contracts appear ordered in the effects.\nuse core::convert::TryInto;\n\nuse rand::{rngs::StdRng, Rng, SeedableRng};\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    execution::TransformKindV2, runtime_args, system::standard_payment, AddressableEntityHash, Key,\n    URef, U512,\n};\n\n#[ignore]\n#[test]\nfn contract_transforms_should_be_ordered_in_the_effects() {\n    // This many URefs will be created in the contract.\n    const N_UREFS: u32 = 100;\n    // This many operations will be scattered among these URefs.\n    const N_OPS: usize = 1000;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut rng = StdRng::seed_from_u64(0);\n\n    let execution_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        \"ordered-transforms.wasm\",\n        runtime_args! { \"n\" => N_UREFS },\n    )\n    .build();\n\n    // Installs the contract and creates the URefs, all initialized to `0_i32`.\n    builder.exec(execution_request).expect_success().commit();\n\n    let contract_hash = match builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .unwrap()\n        .named_keys()\n        .get(\"ordered-transforms-contract-hash\")\n        .unwrap()\n    {\n        Key::AddressableEntity(entity_addr) => AddressableEntityHash::new(entity_addr.value()),\n        _ => panic!(\"Couldn't find ordered-transforms contract.\"),\n    };\n\n    // List of operations to be performed by the contract.\n    // An operation is a tuple (t, i, v) where:\n    // * `t` is the operation type: 0 for reading, 1 for writing and 2 for adding;\n    // * `i` is the URef index;\n    // * `v` is the value to write or add (always zero for reads).\n    let operations: Vec<(u8, u32, i32)> = (0..N_OPS)\n        .map(|_| {\n            let t: u8 = rng.gen_range(0..3);\n            let i: u32 = rng.gen_range(0..N_UREFS);\n            if t == 0 {\n                (t, i, 0)\n            } else {\n                (t, i, rng.gen())\n            }\n        })\n        .collect();\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::from_deploy_item(\n                &DeployItemBuilder::new()\n                    .with_address(*DEFAULT_ACCOUNT_ADDR)\n                    .with_standard_payment(runtime_args! {\n                        standard_payment::ARG_AMOUNT => U512::from(150_000_000_000_u64),\n                    })\n                    .with_stored_session_hash(\n                        contract_hash,\n                        \"perform_operations\",\n                        runtime_args! {\n                            \"operations\" => operations.clone(),\n                        },\n                    )\n                    .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n                    .with_deploy_hash(rng.gen())\n                    .build(),\n            )\n            .build(),\n        )\n        .expect_success()\n        .commit();\n\n    let exec_result = builder.get_exec_result_owned(1).unwrap();\n    let effects = exec_result.effects();\n\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(contract_hash)\n        .unwrap();\n    let urefs: Vec<URef> = (0..N_UREFS)\n        .map(\n            |i| match contract.named_keys().get(&format!(\"uref-{}\", i)).unwrap() {\n                Key::URef(uref) => *uref,\n                _ => panic!(\"Expected a URef.\"),\n            },\n        )\n        .collect();\n\n    assert!(effects\n        .transforms()\n        .iter()\n        .filter_map(|transform| {\n            let uref = match transform.key() {\n                Key::URef(uref) => uref,\n                _ => return None,\n            };\n            let uref_index: u32 = match urefs\n                .iter()\n                .enumerate()\n                .find(|(_, u)| u.addr() == uref.addr())\n            {\n                Some((i, _)) => i.try_into().unwrap(),\n                None => return None,\n            };\n            let (type_index, value): (u8, i32) = match transform.kind() {\n                TransformKindV2::Identity => (0, 0),\n                TransformKindV2::Write(sv) => {\n                    let v: i32 = sv.as_cl_value().unwrap().clone().into_t().unwrap();\n                    (1, v)\n                }\n                TransformKindV2::AddInt32(v) => (2, *v),\n                _ => panic!(\"Invalid transform.\"),\n            };\n            Some((type_index, uref_index, value))\n        })\n        .eq(operations.into_iter()));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/stack_overflow.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::RuntimeArgs;\n\n#[ignore]\n#[test]\nfn runtime_stack_overflow_should_cause_unreachable_error() {\n    // Create an unconstrained recursive call\n    let wat = r#\"(module\n        (func $call (call $call))\n        (export \"call\" (func $call))\n        (memory $memory 1)\n      )\"#;\n\n    let module_bytes = wat::parse_str(wat).unwrap();\n\n    let do_stack_overflow_request = ExecuteRequestBuilder::module_bytes(\n        *DEFAULT_ACCOUNT_ADDR,\n        module_bytes,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    builder\n        .exec(do_stack_overflow_request)\n        .expect_failure()\n        .commit();\n\n    let error = builder.get_error().expect(\"should have error\");\n    assert!(\n        matches!(&error, Error::Exec(ExecError::Interpreter(s)) if s.contains(\"Unreachable\")),\n        \"{:?}\",\n        error\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/step.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS,\n};\nuse casper_storage::data_access_layer::SlashItem;\nuse casper_types::{\n    system::{\n        auction::{\n            BidsExt, DelegationRate, SeigniorageRecipientsSnapshotV2,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n        },\n        mint::TOTAL_SUPPLY_KEY,\n    },\n    CLValue, EntityAddr, EraId, GenesisAccount, GenesisValidator, Key, Motes, ProtocolVersion,\n    PublicKey, SecretKey, U512,\n};\n\nstatic ACCOUNT_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_1_BALANCE: u64 = 100_000_000;\nconst ACCOUNT_1_BOND: u64 = 100_000_000;\n\nstatic ACCOUNT_2_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nconst ACCOUNT_2_BALANCE: u64 = 200_000_000;\nconst ACCOUNT_2_BOND: u64 = 200_000_000;\n\nfn get_named_key(builder: &mut LmdbWasmTestBuilder, entity_hash: EntityAddr, name: &str) -> Key {\n    *builder\n        .get_named_keys(entity_hash)\n        .get(name)\n        .expect(\"should have bid purses\")\n}\n\nfn initialize_builder() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp\n    };\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n    builder.run_genesis(run_genesis_request);\n    builder\n}\n\n/// Should be able to step slashing, rewards, and run auction.\n#[ignore]\n#[test]\nfn should_step() {\n    let mut builder = initialize_builder();\n    let step_request_builder = builder.step_request_builder();\n\n    let step_request = step_request_builder\n        .with_slash_item(SlashItem::new(ACCOUNT_1_PK.clone()))\n        .with_next_era_id(EraId::from(1))\n        .build();\n\n    let auction_hash = builder.get_auction_contract_hash();\n\n    let before_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value(\n        EntityAddr::System(auction_hash.value()),\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    );\n\n    let bids_before_slashing = builder.get_bids();\n    let account_1_bid = bids_before_slashing\n        .validator_bid(&ACCOUNT_1_PK)\n        .expect(\"should have account1 bid\");\n    assert!(!account_1_bid.inactive(), \"bid should not be inactive\");\n    assert!(\n        !account_1_bid.staked_amount().is_zero(),\n        \"bid amount should not be 0\"\n    );\n\n    assert!(builder.step(step_request).is_success(), \"should step\");\n\n    let bids_after_slashing = builder.get_bids();\n    assert!(bids_after_slashing.validator_bid(&ACCOUNT_1_PK).is_none());\n\n    assert_ne!(\n        bids_before_slashing, bids_after_slashing,\n        \"bids table should be different before and after slashing\"\n    );\n\n    // seigniorage snapshot should have changed after auction\n    let after_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value(\n        EntityAddr::System(auction_hash.value()),\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    );\n    assert!(\n        !after_auction_seigniorage\n            .keys()\n            .all(|key| before_auction_seigniorage.contains_key(key)),\n        \"run auction should have changed seigniorage keys\"\n    );\n}\n\n/// Should be able to step slashing, rewards, and run auction.\n#[ignore]\n#[test]\nfn should_adjust_total_supply() {\n    let mut builder = initialize_builder();\n    let maybe_post_state_hash = Some(builder.get_post_state_hash());\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    // should check total supply before step\n    let total_supply_key = get_named_key(\n        &mut builder,\n        EntityAddr::System(mint_hash.value()),\n        TOTAL_SUPPLY_KEY,\n    )\n    .into_uref()\n    .expect(\"should be uref\");\n\n    let starting_total_supply = CLValue::try_from(\n        builder\n            .query(maybe_post_state_hash, total_supply_key.into(), &[])\n            .expect(\"should have total supply\"),\n    )\n    .expect(\"should be a CLValue\")\n    .into_t::<U512>()\n    .expect(\"should be U512\");\n\n    // slash\n    let step_request = StepRequestBuilder::new()\n        .with_parent_state_hash(builder.get_post_state_hash())\n        .with_protocol_version(ProtocolVersion::V1_0_0)\n        .with_slash_item(SlashItem::new(ACCOUNT_1_PK.clone()))\n        .with_slash_item(SlashItem::new(ACCOUNT_2_PK.clone()))\n        .with_next_era_id(EraId::from(1))\n        .build();\n\n    assert!(builder.step(step_request).is_success(), \"should step\");\n\n    let maybe_post_state_hash = Some(builder.get_post_state_hash());\n\n    // should check total supply after step\n    let modified_total_supply = CLValue::try_from(\n        builder\n            .query(maybe_post_state_hash, total_supply_key.into(), &[])\n            .expect(\"should have total supply\"),\n    )\n    .expect(\"should be a CLValue\")\n    .into_t::<U512>()\n    .expect(\"should be U512\");\n\n    assert!(\n        modified_total_supply < starting_total_supply,\n        \"total supply should be reduced due to slashing\"\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/storage_costs.rs",
    "content": "use std::collections::{BTreeMap, BTreeSet};\n\nuse num_rational::Ratio;\nuse num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    contracts::{ContractHash, ContractPackage, ContractVersionKey},\n    runtime_args,\n    system::{\n        auction::{self, DelegationRate},\n        AUCTION,\n    },\n    AddressableEntityHash, BrTableCost, CLValue, ControlFlowCosts, EraId, Gas, Group, Groups,\n    HostFunctionCostsV1, HostFunctionCostsV2, Key, MessageLimits, OpcodeCosts, ProtocolVersion,\n    RuntimeArgs, StorageCosts, StoredValue, URef, WasmConfig, WasmV1Config, WasmV2Config,\n    DEFAULT_ADD_BID_COST, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY, U512,\n};\n\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(0);\nconst STORAGE_COSTS_NAME: &str = \"storage_costs.wasm\";\nconst SYSTEM_CONTRACT_HASHES_NAME: &str = \"system_contract_hashes.wasm\";\nconst DO_NOTHING_WASM: &str = \"do_nothing.wasm\";\nconst CONTRACT_KEY_NAME: &str = \"contract\";\n\nconst WRITE_FUNCTION_SMALL_NAME: &str = \"write_function_small\";\nconst WRITE_FUNCTION_LARGE_NAME: &str = \"write_function_large\";\nconst ADD_FUNCTION_SMALL_NAME: &str = \"add_function_small\";\nconst ADD_FUNCTION_LARGE_NAME: &str = \"add_function_large\";\nconst NEW_UREF_FUNCTION: &str = \"new_uref_function\";\nconst PUT_KEY_FUNCTION: &str = \"put_key_function\";\nconst REMOVE_KEY_FUNCTION: &str = \"remove_key_function\";\nconst CREATE_CONTRACT_PACKAGE_AT_HASH_FUNCTION: &str = \"create_contract_package_at_hash_function\";\nconst CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION: &str = \"create_contract_user_group_function\";\nconst PROVISION_UREFS_FUNCTION: &str = \"provision_urefs_function\";\nconst REMOVE_CONTRACT_USER_GROUP_FUNCTION: &str = \"remove_contract_user_group_function\";\nconst NEW_UREF_SUBCALL_FUNCTION: &str = \"new_uref_subcall\";\n\nconst WRITE_SMALL_VALUE: &[u8] = b\"1\";\nconst WRITE_LARGE_VALUE: &[u8] = b\"1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\";\n\nconst ADD_SMALL_VALUE: u64 = 1;\nconst ADD_LARGE_VALUE: u64 = u64::MAX;\n\nconst NEW_OPCODE_COSTS: OpcodeCosts = OpcodeCosts {\n    bit: 0,\n    add: 0,\n    mul: 0,\n    div: 0,\n    load: 0,\n    store: 0,\n    op_const: 0,\n    local: 0,\n    global: 0,\n    control_flow: ControlFlowCosts {\n        block: 0,\n        op_loop: 0,\n        op_if: 0,\n        op_else: 0,\n        end: 0,\n        br: 0,\n        br_if: 0,\n        br_table: BrTableCost {\n            cost: 0,\n            size_multiplier: 0,\n        },\n        op_return: 0,\n        call: 0,\n        call_indirect: 0,\n        drop: 0,\n        select: 0,\n    },\n    integer_comparison: 0,\n    conversion: 0,\n    unreachable: 0,\n    nop: 0,\n    current_memory: 0,\n    grow_memory: 0,\n    sign: 0,\n};\n\nstatic NEW_HOST_FUNCTION_COSTS: Lazy<HostFunctionCostsV1> = Lazy::new(HostFunctionCostsV1::zero);\nstatic NEW_HOST_FUNCTION_COSTS_V2: Lazy<HostFunctionCostsV2> = Lazy::new(HostFunctionCostsV2::zero);\nstatic NO_COSTS_WASM_CONFIG: Lazy<WasmConfig> = Lazy::new(|| {\n    let wasm_v1_config = WasmV1Config::new(\n        DEFAULT_WASM_MAX_MEMORY,\n        DEFAULT_MAX_STACK_HEIGHT,\n        NEW_OPCODE_COSTS,\n        *NEW_HOST_FUNCTION_COSTS,\n    );\n    let wasm_v2_config = WasmV2Config::new(\n        DEFAULT_WASM_MAX_MEMORY,\n        NEW_OPCODE_COSTS,\n        *NEW_HOST_FUNCTION_COSTS_V2,\n    );\n    WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config)\n});\n\nstatic NEW_PROTOCOL_VERSION: Lazy<ProtocolVersion> = Lazy::new(|| {\n    ProtocolVersion::from_parts(\n        DEFAULT_PROTOCOL_VERSION.value().major,\n        DEFAULT_PROTOCOL_VERSION.value().minor,\n        DEFAULT_PROTOCOL_VERSION.value().patch + 1,\n    )\n});\n\n/*\nNOTE: in this test suite, to isolation specific micro function,\nwe are using specific costs that are not indicative of production values\n\nDo not interpret statements in this test suite as global statements of fact\nrather, they are self-reflective.\n\nFor instance, \"should not charge for x\" does not mean production usage would allow zero\ncost host interaction. It only means in this controlled setup we have isolated that value\nfor fine grained testing.\n*/\n\nfn initialize_isolated_storage_costs() -> LmdbWasmTestBuilder {\n    // This test runs a contract that's after every call extends the same key with\n    // more data\n    let mut builder = LmdbWasmTestBuilder::default();\n    //\n    // Isolate storage costs without host function costs, and without opcode costs\n    //\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(DEFAULT_PROTOCOL_VERSION)\n        .with_new_protocol_version(*NEW_PROTOCOL_VERSION)\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .build();\n\n    let updated_chainspec = builder\n        .chainspec()\n        .clone()\n        .with_wasm_config(*NO_COSTS_WASM_CONFIG);\n\n    builder\n        .with_chainspec(updated_chainspec)\n        .upgrade(&mut upgrade_request);\n\n    builder\n}\n\n#[ignore]\n#[test]\nfn should_verify_isolate_host_side_payment_code_is_free() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        DO_NOTHING_WASM,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    assert_eq!(\n        balance_after,\n        balance_before - transaction_fee,\n        \"balance before and after should match\"\n    );\n    assert_eq!(builder.last_exec_gas_consumed().value(), U512::zero());\n}\n\n#[ignore]\n#[test]\nfn should_verify_isolated_auction_storage_is_free() {\n    const BOND_AMOUNT: u64 = 42;\n    const DELEGATION_RATE: DelegationRate = 10;\n\n    let mut builder = initialize_isolated_storage_costs();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        SYSTEM_CONTRACT_HASHES_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder.exec(exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let bond_amount = U512::from(BOND_AMOUNT);\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => bond_amount,\n            auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let expected = balance_before - bond_amount - transaction_fee;\n\n    assert_eq!(\n        balance_after,\n        expected,\n        \"before and after should match; off by: {}\",\n        expected - balance_after\n    );\n    assert_eq!(\n        builder.last_exec_gas_consumed().value(),\n        U512::from(DEFAULT_ADD_BID_COST)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_measure_gas_cost_for_storage_usage_write() {\n    let cost_per_byte = U512::from(StorageCosts::default().gas_per_byte());\n\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    assert!(!builder.last_exec_gas_consumed().value().is_zero());\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    //\n    // Measure  small write\n    //\n\n    let small_write_function_cost = {\n        let mut builder_a = builder.clone();\n\n        let small_write_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            WRITE_FUNCTION_SMALL_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_a\n            .exec(small_write_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_a.last_exec_gas_consumed()\n    };\n\n    let expected_small_write_data =\n        StoredValue::from(CLValue::from_t(Bytes::from(WRITE_SMALL_VALUE.to_vec())).unwrap());\n\n    let expected_small_cost = U512::from(expected_small_write_data.serialized_length());\n\n    let small_write_cost = Ratio::new(small_write_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        small_write_cost.fract().to_integer(),\n        U512::zero(),\n        \"small cost does not divide without remainder\"\n    );\n    assert!(\n        small_write_cost.to_integer() >= expected_small_cost,\n        \"small write function call should cost at least the expected amount\"\n    );\n\n    //\n    // Measure large write\n    //\n\n    let large_write_function_cost = {\n        let mut builder_b = builder;\n\n        let large_write_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            WRITE_FUNCTION_LARGE_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_b\n            .exec(large_write_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_b.last_exec_gas_consumed()\n    };\n\n    let expected_large_write_data =\n        StoredValue::from(CLValue::from_t(Bytes::from(WRITE_LARGE_VALUE.to_vec())).unwrap());\n    let expected_large_cost = U512::from(expected_large_write_data.serialized_length());\n\n    let large_write_cost = Ratio::new(large_write_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        large_write_cost.fract().to_integer(),\n        U512::zero(),\n        \"cost does not divide without remainder\"\n    );\n    assert!(\n        large_write_cost.to_integer() >= expected_large_cost,\n        \"difference between large and small cost at least the expected write amount {}\",\n        expected_large_cost,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_measure_unisolated_gas_cost_for_storage_usage_write() {\n    let cost_per_byte = U512::from(StorageCosts::default().gas_per_byte());\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    //\n    // Measure  small write\n    //\n\n    let small_write_function_cost = {\n        let mut builder_a = builder.clone();\n\n        let small_write_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            WRITE_FUNCTION_SMALL_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_a\n            .exec(small_write_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_a.last_exec_gas_consumed()\n    };\n\n    let expected_small_write_data =\n        StoredValue::from(CLValue::from_t(Bytes::from(WRITE_SMALL_VALUE.to_vec())).unwrap());\n\n    let expected_small_cost = U512::from(expected_small_write_data.serialized_length());\n\n    let small_write_cost = Ratio::new(small_write_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        small_write_cost.fract().to_integer(),\n        U512::zero(),\n        \"small cost does not divide without remainder\"\n    );\n    assert!(\n        small_write_cost.to_integer() >= expected_small_cost,\n        \"small write function call should cost at least the expected amount\"\n    );\n\n    //\n    // Measure large write\n    //\n\n    let large_write_function_cost = {\n        let mut builder_b = builder;\n\n        let large_write_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            WRITE_FUNCTION_LARGE_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_b\n            .exec(large_write_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_b.last_exec_gas_consumed()\n    };\n\n    let expected_large_write_data =\n        StoredValue::from(CLValue::from_t(Bytes::from(WRITE_LARGE_VALUE.to_vec())).unwrap());\n    let expected_large_cost = U512::from(expected_large_write_data.serialized_length());\n\n    let large_write_cost = Ratio::new(large_write_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        large_write_cost.fract().to_integer(),\n        U512::zero(),\n        \"cost does not divide without remainder\"\n    );\n    assert!(\n        large_write_cost.to_integer() >= expected_large_cost,\n        \"difference between large and small cost at least the expected write amount {}\",\n        expected_large_cost,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_measure_gas_cost_for_storage_usage_add() {\n    let cost_per_byte = U512::from(StorageCosts::default().gas_per_byte());\n\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    // let mut builder_a = builder.clone();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    //\n    // Measure small add\n    //\n\n    let small_add_function_cost = {\n        let mut builder_a = builder.clone();\n\n        let small_add_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            ADD_FUNCTION_SMALL_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_a\n            .exec(small_add_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_a.last_exec_gas_consumed()\n    };\n\n    let expected_small_add_data =\n        StoredValue::from(CLValue::from_t(U512::from(ADD_SMALL_VALUE)).unwrap());\n\n    let expected_small_cost = U512::from(expected_small_add_data.serialized_length());\n\n    let small_add_cost = Ratio::new(small_add_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        small_add_cost.fract().to_integer(),\n        U512::zero(),\n        \"small cost does not divide without remainder\"\n    );\n    assert!(\n        small_add_cost.to_integer() >= expected_small_cost,\n        \"small write function call should cost at least the expected amount\"\n    );\n\n    //\n    // Measure large add\n    //\n\n    let large_add_function_cost = {\n        let mut builder_b = builder;\n\n        let large_write_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            ADD_FUNCTION_LARGE_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_b\n            .exec(large_write_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_b.last_exec_gas_consumed()\n    };\n\n    let expected_large_write_data =\n        StoredValue::from(CLValue::from_t(U512::from(ADD_LARGE_VALUE)).unwrap());\n    let expected_large_cost = U512::from(expected_large_write_data.serialized_length());\n\n    assert!(expected_large_cost > expected_small_cost);\n\n    let large_write_cost = Ratio::new(large_add_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        large_write_cost.fract().to_integer(),\n        U512::zero(),\n        \"cost does not divide without remainder\"\n    );\n    assert!(\n        large_write_cost.to_integer() >= expected_large_cost,\n        \"difference between large and small cost at least the expected write amount {}\",\n        expected_large_cost,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_measure_unisolated_gas_cost_for_storage_usage_add() {\n    let cost_per_byte = U512::from(StorageCosts::default().gas_per_byte());\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    //\n    // Measure small add\n    //\n\n    let small_add_function_cost = {\n        let mut builder_a = builder.clone();\n\n        let small_add_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            ADD_FUNCTION_SMALL_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_a\n            .exec(small_add_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_a.last_exec_gas_consumed()\n    };\n\n    let expected_small_add_data =\n        StoredValue::from(CLValue::from_t(U512::from(ADD_SMALL_VALUE)).unwrap());\n\n    let expected_small_cost = U512::from(expected_small_add_data.serialized_length());\n\n    let small_add_cost = Ratio::new(small_add_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        small_add_cost.fract().to_integer(),\n        U512::zero(),\n        \"small cost does not divide without remainder\"\n    );\n    assert!(\n        small_add_cost.to_integer() >= expected_small_cost,\n        \"small write function call should cost at least the expected amount\"\n    );\n\n    //\n    // Measure large add\n    //\n\n    let large_add_function_cost = {\n        let mut builder_b = builder;\n\n        let large_write_exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            ADD_FUNCTION_LARGE_NAME,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder_b\n            .exec(large_write_exec_request)\n            .expect_success()\n            .commit();\n\n        builder_b.last_exec_gas_consumed()\n    };\n\n    let expected_large_write_data =\n        StoredValue::from(CLValue::from_t(U512::from(ADD_LARGE_VALUE)).unwrap());\n    let expected_large_cost = U512::from(expected_large_write_data.serialized_length());\n\n    assert!(expected_large_cost > expected_small_cost);\n\n    let large_write_cost = Ratio::new(large_add_function_cost.value(), cost_per_byte);\n\n    assert_eq!(\n        large_write_cost.fract().to_integer(),\n        U512::zero(),\n        \"cost does not divide without remainder\"\n    );\n    assert!(\n        large_write_cost.to_integer() >= expected_large_cost,\n        \"difference between large and small cost at least the expected write amount {}\",\n        expected_large_cost,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_verify_new_uref_storage_cost() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        NEW_UREF_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        // should charge for storage of a u64 behind a URef\n        builder.last_exec_gas_consumed(),\n        StorageCosts::default().calculate_gas_cost(\n            StoredValue::CLValue(CLValue::from_t(0u64).expect(\"should create CLValue\"))\n                .serialized_length()\n        )\n    )\n}\n\n#[ignore]\n#[test]\nfn should_verify_put_key_is_charging_for_storage() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        PUT_KEY_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        // should charge for storage of a named key\n        builder.last_exec_gas_consumed(),\n        StorageCosts::default().calculate_gas_cost(\n            StoredValue::CLValue(\n                CLValue::from_t((\"new_key\".to_string(), Key::Hash([0u8; 32]))).unwrap()\n            )\n            .serialized_length()\n        ),\n    )\n}\n\n#[ignore]\n#[test]\nfn should_verify_remove_key_is_not_charging_for_storage() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        REMOVE_KEY_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    if builder.chainspec().core_config.enable_addressable_entity {\n        assert_eq!(\n            // should charge zero, because we do not charge for storage when removing a key\n            builder.last_exec_gas_consumed(),\n            StorageCosts::default().calculate_gas_cost(0),\n        )\n    } else {\n        assert!(builder.last_exec_gas_consumed() > Gas::zero())\n    }\n}\n\n#[ignore]\n#[test]\nfn should_verify_create_contract_at_hash_is_charging_for_storage() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CREATE_CONTRACT_PACKAGE_AT_HASH_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        // should charge at least enough for storage of a package and unit CLValue (for a URef)\n        builder.last_exec_gas_consumed(),\n        StorageCosts::default().calculate_gas_cost(\n            StoredValue::ContractPackage(ContractPackage::default()).serialized_length()\n                + StoredValue::CLValue(CLValue::unit()).serialized_length()\n        )\n    )\n}\n\n#[ignore]\n#[test]\nfn should_verify_create_contract_user_group_is_charging_for_storage() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let mut groups = Groups::new();\n    groups.insert(Group::new(\"Label\"), BTreeSet::new());\n\n    let mut package = ContractPackage::new(\n        URef::default(),\n        [(ContractVersionKey::new(2, 1), ContractHash::new([0u8; 32]))]\n            .iter()\n            .cloned()\n            .collect::<BTreeMap<_, _>>(),\n        Default::default(),\n        groups,\n        Default::default(),\n    );\n\n    assert_eq!(\n        // should charge for storage of the new package\n        builder.last_exec_gas_consumed(),\n        StorageCosts::default()\n            .calculate_gas_cost(StoredValue::ContractPackage(package.clone()).serialized_length()),\n    );\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        PROVISION_UREFS_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    package\n        .groups_mut()\n        .get_mut(&Group::new(\"Label\"))\n        .unwrap()\n        .insert(URef::new([0u8; 32], Default::default()));\n\n    assert!(\n        // should charge for storage of the new package and a unit CLValue (for a URef)\n        builder.last_exec_gas_consumed() > Gas::zero()\n    );\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        REMOVE_CONTRACT_USER_GROUP_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    package.remove_group(&Group::new(\"Label\"));\n\n    assert!(\n        // should charge for storage of the new package\n        builder.last_exec_gas_consumed() > Gas::zero()\n    )\n}\n\n#[ignore]\n#[test]\nfn should_verify_subcall_new_uref_is_charging_for_storage() {\n    let mut builder = initialize_isolated_storage_costs();\n\n    let install_exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        STORAGE_COSTS_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let contract_hash: AddressableEntityHash = account\n        .named_keys()\n        .get(CONTRACT_KEY_NAME)\n        .expect(\"contract hash\")\n        .into_entity_hash()\n        .expect(\"should be hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        PROVISION_UREFS_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        contract_hash,\n        NEW_UREF_SUBCALL_FUNCTION,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        // should charge for storage of a u64 behind a URef\n        builder.last_exec_gas_consumed(),\n        StorageCosts::default().calculate_gas_cost(\n            StoredValue::CLValue(CLValue::from_t(0u64).expect(\"should create CLValue\"))\n                .serialized_length()\n        )\n    )\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs",
    "content": "use assert_matches::assert_matches;\nuse num_traits::{One, Zero};\nuse once_cell::sync::Lazy;\nuse std::{\n    collections::{BTreeMap, BTreeSet},\n    iter::FromIterator,\n};\nuse tempfile::TempDir;\n\nuse casper_engine_test_support::{\n    genesis_config_builder::GenesisConfigBuilder, utils, ChainspecConfig, ExecuteRequestBuilder,\n    LmdbWasmTestBuilder, StepRequestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNTS,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_AUCTION_DELAY,\n    DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH,\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n    DEFAULT_MAXIMUM_DELEGATION_AMOUNT, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE,\n    DEFAULT_UNBONDING_DELAY, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n    TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_execution_engine::{\n    engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error},\n    execution::ExecError,\n};\nuse casper_storage::data_access_layer::{AuctionMethod, GenesisRequest, HandleFeeMode};\n\nuse crate::lmdb_fixture;\nuse casper_types::{\n    self,\n    account::AccountHash,\n    api_error::ApiError,\n    runtime_args,\n    system::auction::{\n        self, BidAddr, BidKind, BidsExt, DelegationRate, DelegatorKind, EraValidators,\n        Error as AuctionError, UnbondKind, ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE,\n        ARG_DELEGATOR, ARG_ENTRY_POINT, ARG_MAXIMUM_DELEGATION_AMOUNT,\n        ARG_MINIMUM_DELEGATION_AMOUNT, ARG_NEW_PUBLIC_KEY, ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY,\n        ARG_REWARDS_MAP, ARG_VALIDATOR, ERA_ID_KEY, INITIAL_ERA_ID, METHOD_DISTRIBUTE,\n    },\n    EntityAddr, EraId, GenesisAccount, GenesisValidator, HoldBalanceHandling, Key, Motes,\n    ProtocolVersion, PublicKey, SecretKey, TransactionHash, DEFAULT_MINIMUM_BID_AMOUNT, U256, U512,\n};\n\nconst ARG_TARGET: &str = \"target\";\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_ACTIVATE_BID: &str = \"activate_bid.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst CONTRACT_WITHDRAW_BID: &str = \"withdraw_bid.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\nconst CONTRACT_UNDELEGATE: &str = \"undelegate.wasm\";\nconst CONTRACT_REDELEGATE: &str = \"redelegate.wasm\";\nconst CONTRACT_CHANGE_BID_PUBLIC_KEY: &str = \"change_bid_public_key.wasm\";\n\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE + 1000;\n\nconst ADD_BID_AMOUNT_1: u64 = 95_000;\nconst ADD_BID_AMOUNT_2: u64 = 47_500;\nconst ADD_BID_AMOUNT_3: u64 = 200_000;\nconst ADD_BID_DELEGATION_RATE_1: DelegationRate = 10;\nconst BID_AMOUNT_2: u64 = 5_000;\nconst ADD_BID_DELEGATION_RATE_2: DelegationRate = 15;\nconst WITHDRAW_BID_AMOUNT_2: u64 = 15_000;\nconst ADD_BID_DELEGATION_RATE_3: DelegationRate = 20;\n\nconst DELEGATE_AMOUNT_1: u64 = 125_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst DELEGATE_AMOUNT_2: u64 = 15_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst UNDELEGATE_AMOUNT_1: u64 = 35_000;\nconst UNDELEGATE_AMOUNT_2: u64 = 5_000;\n\nconst SYSTEM_TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nconst WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000;\n\nstatic NON_FOUNDER_VALIDATOR_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic NON_FOUNDER_VALIDATOR_1_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*NON_FOUNDER_VALIDATOR_1_PK));\n\nstatic NON_FOUNDER_VALIDATOR_2_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic NON_FOUNDER_VALIDATOR_2_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*NON_FOUNDER_VALIDATOR_2_PK));\n\nstatic NON_FOUNDER_VALIDATOR_3_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic NON_FOUNDER_VALIDATOR_3_ADDR: Lazy<AccountHash> =\n    Lazy::new(|| AccountHash::from(&*NON_FOUNDER_VALIDATOR_3_PK));\n\nstatic ACCOUNT_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PK));\nconst ACCOUNT_1_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ACCOUNT_1_BOND: u64 = 100_000;\n\nstatic ACCOUNT_2_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_2_PK));\nconst ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ACCOUNT_2_BOND: u64 = 200_000;\n\nstatic BID_ACCOUNT_1_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic BID_ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*BID_ACCOUNT_1_PK));\nconst BID_ACCOUNT_1_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst BID_ACCOUNT_1_BOND: u64 = 200_000;\n\nstatic BID_ACCOUNT_2_PK: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic BID_ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*BID_ACCOUNT_2_PK));\nconst BID_ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_1));\nstatic DELEGATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_1));\nstatic DELEGATOR_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_2));\nconst VALIDATOR_1_STAKE: u64 = 1_000_000;\nconst DELEGATOR_1_STAKE: u64 = 1_500_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst DELEGATOR_1_BALANCE: u64 = DEFAULT_ACCOUNT_INITIAL_BALANCE;\nconst DELEGATOR_2_STAKE: u64 = 2_000_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst DELEGATOR_2_BALANCE: u64 = DEFAULT_ACCOUNT_INITIAL_BALANCE;\n\nconst VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\nconst EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS: u64 =\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS;\n\nconst WEEK_TIMESTAMPS: [u64; 14] = [\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS,\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + WEEK_MILLIS,\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 2),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 3),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 4),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 5),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 6),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 7),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 8),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 9),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 10),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 11),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 12),\n    EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 13),\n];\n\nconst DAY_MILLIS: u64 = 24 * 60 * 60 * 1000;\nconst CASPER_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS;\nconst CASPER_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS;\n\n#[ignore]\n#[test]\nfn should_add_new_bid() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let bids = builder.get_bids();\n\n    assert_eq!(bids.len(), 1);\n    let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*active_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n    assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1);\n}\n\n#[ignore]\n#[test]\nfn should_add_new_bid_with_limits() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let exec_request_0 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n            // Below global minimum.\n            ARG_MINIMUM_DELEGATION_AMOUNT => 1_000_000_000u64,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_0).expect_failure();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n            ARG_MINIMUM_DELEGATION_AMOUNT => 600_000_000_000u64,\n            ARG_MAXIMUM_DELEGATION_AMOUNT => 900_000_000_000u64,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let bids = builder.get_bids();\n\n    assert_eq!(bids.len(), 1);\n    let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*active_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n    assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1);\n}\n\n#[ignore]\n#[test]\nfn should_increase_existing_bid() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // 2nd bid top-up\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let bids = builder.get_bids();\n\n    assert_eq!(bids.len(), 1);\n\n    let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*active_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1 + BID_AMOUNT_2)\n    );\n    assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_2);\n}\n\n#[ignore]\n#[test]\nfn should_decrease_existing_bid() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let bid_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n    builder.exec(bid_request).expect_success().commit();\n\n    // withdraw some amount\n    let withdraw_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(WITHDRAW_BID_AMOUNT_2),\n        },\n    )\n    .build();\n    builder.exec(withdraw_request).commit().expect_success();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n\n    assert_eq!(bids.len(), 1);\n\n    let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*active_bid.bonding_purse()),\n        // Since we don't pay out immediately `WITHDRAW_BID_AMOUNT_2` is locked in unbonding queue\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n    let unbonds = builder.get_unbonds();\n    let unbond_kind = UnbondKind::Validator(BID_ACCOUNT_1_PK.clone());\n    let unbonds = unbonds.get(&unbond_kind).expect(\"should have unbonded\");\n    let unbond = unbonds.first().expect(\"must have at least an unbond\");\n    assert_eq!(unbond.eras().len(), 1);\n    assert_eq!(unbond.unbond_kind(), &unbond_kind);\n    assert_eq!(unbond.validator_public_key(), &*BID_ACCOUNT_1_PK);\n\n    let era = unbond.eras().first().expect(\"should have era\");\n    // `WITHDRAW_BID_AMOUNT_2` is in unbonding list\n    assert_eq!(era.amount(), &U512::from(WITHDRAW_BID_AMOUNT_2),);\n    assert_eq!(era.era_of_creation(), INITIAL_ERA_ID,);\n}\n\n#[ignore]\n#[test]\nfn should_run_delegate_and_undelegate() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let transfer_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    // non-founding validator request\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(transfer_request_1).expect_success().commit();\n    builder.exec(transfer_request_2).expect_success().commit();\n    builder.exec(add_bid_request_1).expect_success().commit();\n\n    let auction_hash = builder.get_auction_contract_hash();\n\n    let bids: Vec<BidKind> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 1);\n    let active_bid = bids.validator_bid(&NON_FOUNDER_VALIDATOR_1_PK).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*active_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n    assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1);\n\n    let auction_key = Key::Hash(auction_hash.value());\n\n    let auction_stored_value = builder\n        .query(None, auction_key, &[])\n        .expect(\"should query auction hash\");\n    let _auction = auction_stored_value\n        .as_contract()\n        .expect(\"should be contract\");\n\n    //\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).commit().expect_success();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 2);\n    let delegators = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_1_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have account1 delegation\");\n    let delegated_amount_1 = delegator.staked_amount();\n    assert_eq!(delegated_amount_1, U512::from(DELEGATE_AMOUNT_1));\n\n    // 2nd bid top-up\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit().expect_success();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 2);\n    let delegators = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_1_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have account1 delegation\");\n    let delegated_amount_1 = delegator.staked_amount();\n    assert_eq!(\n        delegated_amount_1,\n        U512::from(DELEGATE_AMOUNT_1 + DELEGATE_AMOUNT_2)\n    );\n\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 2);\n    let delegators = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_1_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have account1 delegation\");\n    let delegated_amount_1 = delegator.staked_amount();\n    assert_eq!(\n        delegated_amount_1,\n        U512::from(DELEGATE_AMOUNT_1 + DELEGATE_AMOUNT_2 - UNDELEGATE_AMOUNT_1)\n    );\n\n    let unbonding_purses = builder.get_unbonds();\n    assert_eq!(unbonding_purses.len(), 1);\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(BID_ACCOUNT_1_PK.clone());\n    let unbond = unbonding_purses\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse for non founder validator\");\n    let unbond = unbond.first().expect(\"must get unbond\");\n    assert_eq!(unbond.eras().len(), 1);\n    assert_eq!(unbond.validator_public_key(), &*NON_FOUNDER_VALIDATOR_1_PK);\n    assert_eq!(unbond.unbond_kind(), &unbond_kind);\n    assert!(!unbond.is_validator());\n    let era = unbond.eras().first().expect(\"should have era\");\n    assert_eq!(era.amount(), &U512::from(UNDELEGATE_AMOUNT_1));\n    assert_eq!(era.era_of_creation(), INITIAL_ERA_ID);\n}\n\n#[ignore]\n#[test]\nfn should_run_delegate_with_delegation_amount_limits() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let transfer_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    // non-founding validator request\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n            ARG_MINIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_1,\n            ARG_MAXIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_1,\n        },\n    )\n    .build();\n\n    builder.exec(transfer_request).expect_success().commit();\n    builder.exec(add_bid_request_1).expect_success().commit();\n\n    let bids = builder.get_bids();\n    assert_eq!(bids.len(), 1);\n    let active_bid = bids.validator_bid(&NON_FOUNDER_VALIDATOR_1_PK).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*active_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n    assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1);\n\n    let exec_request_0 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1 - 1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_0).expect_failure();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1 + 1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_failure();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_forcibly_undelegate_after_setting_validator_limits() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_2_validator_1_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    // builder.advance_eras_by_default_auction_delay();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 3);\n\n    let auction_delay = builder.get_auction_delay();\n    // new_era is the first era in the future where new era validator weights will be calculated\n    let new_era = INITIAL_ERA_ID + auction_delay + 1;\n    assert!(builder.get_validator_weights(new_era).is_none());\n    assert_eq!(\n        builder.get_validator_weights(new_era - 1).unwrap(),\n        builder.get_validator_weights(INITIAL_ERA_ID).unwrap()\n    );\n\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    let validator_weights: ValidatorWeights = builder\n        .get_validator_weights(new_era)\n        .expect(\"should have first era validator weights\");\n\n    assert_eq!(\n        *validator_weights.get(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(),\n        U512::from(ADD_BID_AMOUNT_1 + DELEGATE_AMOUNT_1 + DELEGATE_AMOUNT_2)\n    );\n\n    // set delegation limits\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(1_000),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n            ARG_MINIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_2 + 1_000,  // 100\n            ARG_MAXIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_1 - 1_000,  // 1000\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request)\n        .expect_success()\n        .commit();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 2);\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 2);\n\n    assert!(builder.get_validator_weights(new_era + 1).is_none());\n\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    let validator_weights: ValidatorWeights = builder\n        .get_validator_weights(new_era + 1)\n        .expect(\"should have first era validator weights\");\n\n    assert_eq!(\n        *validator_weights.get(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(),\n        // The validator has now bid ADD_BID_AMOUNT_1 + 1_000.\n        // Delegator 1's delegation has been decreased to the maximum of DELEGATE_AMOUNT_1 - 1_000.\n        // Delegator 2's delegation was below minimum, so it has been completely unbonded.\n        U512::from(ADD_BID_AMOUNT_1 + 1_000 + DELEGATE_AMOUNT_1 - 1_000)\n    );\n\n    let unbonding_purses = builder.get_unbonds();\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone());\n    let delegator_1 = unbonding_purses\n        .get(&unbond_kind)\n        .expect(\"should have delegator_1\")\n        .first()\n        .expect(\"must get unbond\");\n\n    let delegator_1_unbonding = delegator_1\n        .eras()\n        .first()\n        .expect(\"should have delegator_1 unbonding\");\n\n    let overage = 1_000;\n\n    assert_eq!(\n        delegator_1_unbonding.amount(),\n        &U512::from(overage),\n        \"expected delegator_1 amount to match\"\n    );\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone());\n    let delegator_2 = unbonding_purses\n        .get(&unbond_kind)\n        .expect(\"should have delegator_2\");\n\n    let delegator_2_unbonding = delegator_2\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have era\");\n\n    assert_eq!(\n        delegator_2_unbonding.amount(),\n        &U512::from(DELEGATE_AMOUNT_2),\n        \"expected delegator_2 amount to match\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_delegator_stake_range_during_vesting() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(BID_ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let genesis_config = GenesisConfigBuilder::new()\n        .with_accounts(accounts)\n        .with_locked_funds_period_millis(1)\n        .build();\n\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n    // need to step past genesis era\n    builder.advance_era();\n\n    // attempt to change delegation limits\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ACCOUNT_1_BOND),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n            ARG_MINIMUM_DELEGATION_AMOUNT => DEFAULT_MINIMUM_DELEGATION_AMOUNT,\n            ARG_MAXIMUM_DELEGATION_AMOUNT => DEFAULT_MAXIMUM_DELEGATION_AMOUNT - 2,\n        },\n    )\n    .build();\n\n    builder.exec(validator_1_add_bid_request).expect_failure();\n\n    let error = builder.get_error().expect(\"must have error\");\n    let err_str = format!(\"{}\", error);\n    assert!(\n        err_str.starts_with(\"ApiError::AuctionError(VestingLockout)\"),\n        \"should get vesting lockout error\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_allow_delegator_stake_range_change_if_no_vesting() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(BID_ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let genesis_config = GenesisConfigBuilder::new()\n        .with_accounts(accounts)\n        .with_locked_funds_period_millis(0)\n        .build();\n\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        genesis_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n    // need to step past genesis era\n    builder.advance_era();\n\n    // attempt to change delegation limits\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ACCOUNT_1_BOND),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n            ARG_MINIMUM_DELEGATION_AMOUNT => DEFAULT_MINIMUM_DELEGATION_AMOUNT,\n            ARG_MAXIMUM_DELEGATION_AMOUNT => DEFAULT_MAXIMUM_DELEGATION_AMOUNT - 2,\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_calculate_era_validators() {\n    assert_ne!(*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR,);\n    assert_ne!(*ACCOUNT_2_ADDR, *BID_ACCOUNT_1_ADDR,);\n    assert_ne!(*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR,);\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_3 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(account_3);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n    let transfer_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let auction_hash = builder.get_auction_contract_hash();\n    let bids = builder.get_bids();\n    assert_eq!(bids.len(), 2, \"founding validators {:?}\", bids);\n\n    // Verify first era validators\n    let first_validator_weights: ValidatorWeights = builder\n        .get_validator_weights(INITIAL_ERA_ID)\n        .expect(\"should have first era validator weights\");\n    assert_eq!(\n        first_validator_weights\n            .keys()\n            .cloned()\n            .collect::<BTreeSet<_>>(),\n        BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()])\n    );\n\n    builder.exec(transfer_request_1).commit().expect_success();\n    builder.exec(transfer_request_2).commit().expect_success();\n\n    // non-founding validator request\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request_1).commit().expect_success();\n\n    let pre_era_id: EraId = builder.get_value(EntityAddr::System(auction_hash.value()), ERA_ID_KEY);\n    assert_eq!(pre_era_id, EraId::from(0));\n\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    let post_era_id: EraId =\n        builder.get_value(EntityAddr::System(auction_hash.value()), ERA_ID_KEY);\n    assert_eq!(post_era_id, EraId::from(1));\n\n    let era_validators: EraValidators = builder.get_era_validators();\n\n    // Check if there are no missing eras after the calculation, but we don't care about what the\n    // elements are\n    let auction_delay = builder.get_auction_delay();\n    let eras: Vec<_> = era_validators.keys().copied().collect();\n    assert!(!era_validators.is_empty());\n    assert!(era_validators.len() >= auction_delay as usize); // definitely more than 1 element\n    let (first_era, _) = era_validators.iter().min().unwrap();\n    let (last_era, _) = era_validators.iter().max().unwrap();\n    let expected_eras: Vec<EraId> = {\n        let lo: u64 = (*first_era).into();\n        let hi: u64 = (*last_era).into();\n        (lo..=hi).map(EraId::from).collect()\n    };\n    assert_eq!(eras, expected_eras, \"Eras {:?}\", eras);\n\n    assert!(post_era_id > EraId::from(0));\n    let consensus_next_era_id: EraId = post_era_id + auction_delay + 1;\n\n    let snapshot_size = auction_delay as usize + 2;\n    assert_eq!(\n        era_validators.len(),\n        snapshot_size,\n        \"era_id={} {:?}\",\n        consensus_next_era_id,\n        era_validators\n    ); // eraindex==1 - ran once\n\n    let lookup_era_id = consensus_next_era_id - 1;\n\n    let validator_weights = era_validators\n        .get(&lookup_era_id) // indexed from 0\n        .unwrap_or_else(|| {\n            panic!(\n                \"should have era_index=={} entry {:?}\",\n                consensus_next_era_id, era_validators\n            )\n        });\n    assert_eq!(\n        validator_weights.len(),\n        3,\n        \"{:?} {:?}\",\n        era_validators,\n        validator_weights\n    ); //2 genesis validators \"winners\"\n    assert_eq!(\n        validator_weights\n            .get(&BID_ACCOUNT_1_PK)\n            .expect(\"should have bid account in this era\"),\n        &U512::from(ADD_BID_AMOUNT_1)\n    );\n\n    // Check validator weights using the API\n    let era_validators_result = builder\n        .get_validator_weights(lookup_era_id)\n        .expect(\"should have validator weights\");\n    assert_eq!(era_validators_result, *validator_weights);\n\n    // Make sure looked up era validators are different than initial era validators\n    assert_ne!(era_validators_result, first_validator_weights);\n}\n\n#[ignore]\n#[test]\nfn should_get_first_seigniorage_recipients() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp\n    };\n\n    // We can't use `utils::create_run_genesis_request` as the snapshot used an auction delay of 3.\n    let auction_delay = 3;\n    let exec_config = GenesisConfigBuilder::new()\n        .with_accounts(accounts)\n        .with_auction_delay(auction_delay)\n        .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n        .build();\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        exec_config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let bids = builder.get_bids();\n    assert_eq!(bids.len(), 2);\n\n    let founding_validator_1 = bids\n        .validator_bid(&ACCOUNT_1_PK)\n        .expect(\"should have account 1 pk\");\n    assert_eq!(\n        founding_validator_1\n            .vesting_schedule()\n            .map(|vesting_schedule| vesting_schedule.initial_release_timestamp_millis()),\n        Some(DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n    );\n\n    let founding_validator_2 = bids\n        .validator_bid(&ACCOUNT_2_PK)\n        .expect(\"should have account 2 pk\");\n    assert_eq!(\n        founding_validator_2\n            .vesting_schedule()\n            .map(|vesting_schedule| vesting_schedule.initial_release_timestamp_millis()),\n        Some(DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n    );\n\n    builder.exec(transfer_request_1).commit().expect_success();\n\n    // run_auction should be executed first\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    let mut era_validators: EraValidators = builder.get_era_validators();\n    let auction_delay = builder.get_auction_delay();\n    let snapshot_size = auction_delay as usize + 2;\n\n    assert_eq!(era_validators.len(), snapshot_size, \"{:?}\", era_validators); // eraindex==1 - ran once\n\n    assert!(era_validators.contains_key(&(EraId::from(auction_delay).successor())));\n\n    let era_id = EraId::from(auction_delay);\n\n    let validator_weights = era_validators.remove(&era_id).unwrap_or_else(|| {\n        panic!(\n            \"should have era_index=={} entry {:?}\",\n            era_id, era_validators\n        )\n    });\n    // 2 genesis validators \"winners\" with non-zero bond\n    assert_eq!(validator_weights.len(), 2, \"{:?}\", validator_weights);\n    assert_eq!(\n        validator_weights.get(&ACCOUNT_1_PK).unwrap(),\n        &U512::from(ACCOUNT_1_BOND)\n    );\n    assert_eq!(\n        validator_weights.get(&ACCOUNT_2_PK).unwrap(),\n        &U512::from(ACCOUNT_2_BOND)\n    );\n\n    let first_validator_weights = builder\n        .get_validator_weights(era_id)\n        .expect(\"should have validator weights\");\n    assert_eq!(first_validator_weights, validator_weights);\n}\n\n#[ignore]\n#[test]\nfn should_release_founder_stake() {\n    const NEW_MINIMUM_DELEGATION_AMOUNT: u64 = 0;\n\n    // ACCOUNT_1_BOND / 14 = 7_142\n    const EXPECTED_WEEKLY_RELEASE: u64 = 7_142;\n\n    const EXPECTED_REMAINDER: u64 = 12;\n\n    const EXPECTED_LOCKED_AMOUNTS: [u64; 14] = [\n        92858, 85716, 78574, 71432, 64290, 57148, 50006, 42864, 35722, 28580, 21438, 14296, 7154, 0,\n    ];\n\n    let expected_locked_amounts: Vec<U512> = EXPECTED_LOCKED_AMOUNTS\n        .iter()\n        .cloned()\n        .map(U512::from)\n        .collect();\n\n    let expect_unbond_success = |builder: &mut LmdbWasmTestBuilder, amount: u64| {\n        let partial_unbond = ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            CONTRACT_WITHDRAW_BID,\n            runtime_args! {\n                ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(),\n                ARG_AMOUNT => U512::from(amount),\n            },\n        )\n        .build();\n\n        builder.exec(partial_unbond).commit().expect_success();\n    };\n\n    let expect_unbond_failure = |builder: &mut LmdbWasmTestBuilder, amount: u64| {\n        let full_unbond = ExecuteRequestBuilder::standard(\n            *ACCOUNT_1_ADDR,\n            CONTRACT_WITHDRAW_BID,\n            runtime_args! {\n                ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(),\n                ARG_AMOUNT => U512::from(amount),\n            },\n        )\n        .build();\n\n        builder.exec(full_unbond).commit();\n\n        let error = builder\n            .get_last_exec_result()\n            .expect(\"should have last exec result\")\n            .error()\n            .cloned()\n            .expect(\"should have error\");\n        assert_matches!(\n            error,\n            Error::Exec(ExecError::Revert(ApiError::AuctionError(15)))\n        );\n    };\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    //let run_genesis_request = utils::create_run_genesis_request(accounts);\n    let run_genesis_request = {\n        let exec_config = GenesisConfigBuilder::default()\n            .with_accounts(accounts)\n            .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n            .build();\n\n        GenesisRequest::new(\n            DEFAULT_GENESIS_CONFIG_HASH,\n            DEFAULT_PROTOCOL_VERSION,\n            exec_config,\n            DEFAULT_CHAINSPEC_REGISTRY.clone(),\n        )\n    };\n\n    let chainspec = ChainspecConfig::default()\n        .with_minimum_delegation_amount(NEW_MINIMUM_DELEGATION_AMOUNT)\n        .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS);\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec);\n\n    builder.run_genesis(run_genesis_request);\n\n    let fund_system_account = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE / 10)\n        },\n    )\n    .build();\n\n    builder.exec(fund_system_account).commit().expect_success();\n\n    // Check bid and its vesting schedule\n    {\n        let bids = builder.get_bids();\n        assert_eq!(bids.len(), 1);\n\n        let entry = bids.validator_bid(&ACCOUNT_1_PK).unwrap();\n        let vesting_schedule = entry.vesting_schedule().unwrap();\n\n        let initial_release = vesting_schedule.initial_release_timestamp_millis();\n        assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS);\n\n        let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec());\n        assert!(locked_amounts.is_none());\n    }\n\n    builder.run_auction(DEFAULT_GENESIS_TIMESTAMP_MILLIS, Vec::new());\n\n    {\n        // Attempt unbond of one mote\n        expect_unbond_failure(&mut builder, u64::one());\n    }\n\n    builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new());\n\n    // Check bid and its vesting schedule\n    {\n        let bids = builder.get_bids();\n        assert_eq!(bids.len(), 1);\n\n        let entry = bids.validator_bid(&ACCOUNT_1_PK).unwrap();\n        let vesting_schedule = entry.vesting_schedule().unwrap();\n\n        let initial_release = vesting_schedule.initial_release_timestamp_millis();\n        assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS);\n\n        let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec());\n        assert_eq!(locked_amounts, Some(expected_locked_amounts));\n    }\n\n    let mut total_unbonded = 0;\n\n    {\n        // Attempt full unbond\n        expect_unbond_failure(&mut builder, ACCOUNT_1_BOND);\n\n        // Attempt unbond of released amount\n        expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE);\n\n        total_unbonded += EXPECTED_WEEKLY_RELEASE;\n\n        assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[0])\n    }\n\n    for i in 1..13 {\n        // Run auction forward by almost a week\n        builder.run_auction(WEEK_TIMESTAMPS[i] - 1, Vec::new());\n\n        // Attempt unbond of 1 mote\n        expect_unbond_failure(&mut builder, u64::one());\n\n        // Run auction forward by one millisecond\n        builder.run_auction(WEEK_TIMESTAMPS[i], Vec::new());\n\n        // Attempt unbond of more than weekly release\n        expect_unbond_failure(&mut builder, EXPECTED_WEEKLY_RELEASE + 1);\n\n        // Attempt unbond of released amount\n        expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE);\n\n        total_unbonded += EXPECTED_WEEKLY_RELEASE;\n\n        assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[i])\n    }\n\n    {\n        // Run auction forward by almost a week\n        builder.run_auction(WEEK_TIMESTAMPS[13] - 1, Vec::new());\n\n        // Attempt unbond of 1 mote\n        expect_unbond_failure(&mut builder, u64::one());\n\n        // Run auction forward by one millisecond\n        builder.run_auction(WEEK_TIMESTAMPS[13], Vec::new());\n\n        // Attempt unbond of released amount + remainder\n        expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER);\n\n        total_unbonded += EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER;\n\n        assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[13])\n    }\n\n    assert_eq!(ACCOUNT_1_BOND, total_unbonded);\n}\n\n#[ignore]\n#[test]\nfn should_fail_to_get_era_validators() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    assert_eq!(\n        builder.get_validator_weights(EraId::MAX),\n        None,\n        \"should not have era validators for invalid era\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_use_era_validators_endpoint_for_first_era() {\n    let extra_accounts = vec![GenesisAccount::account(\n        ACCOUNT_1_PK.clone(),\n        Motes::new(ACCOUNT_1_BALANCE),\n        Some(GenesisValidator::new(\n            Motes::new(ACCOUNT_1_BOND),\n            DelegationRate::zero(),\n        )),\n    )];\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.extend(extra_accounts);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let validator_weights = builder\n        .get_validator_weights(INITIAL_ERA_ID)\n        .expect(\"should have validator weights for era 0\");\n\n    assert_eq!(validator_weights.len(), 1);\n    assert_eq!(validator_weights[&ACCOUNT_1_PK], ACCOUNT_1_BOND.into());\n\n    let era_validators: EraValidators = builder.get_era_validators();\n    assert_eq!(era_validators[&EraId::from(0)], validator_weights);\n}\n\n#[ignore]\n#[test]\nfn should_calculate_era_validators_multiple_new_bids() {\n    assert_ne!(*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR,);\n    assert_ne!(*ACCOUNT_2_ADDR, *BID_ACCOUNT_1_ADDR,);\n    assert_ne!(*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR,);\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_3 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        let account_4 = GenesisAccount::account(\n            BID_ACCOUNT_2_PK.clone(),\n            Motes::new(BID_ACCOUNT_2_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(account_3);\n        tmp.push(account_4);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let genesis_validator_weights = builder\n        .get_validator_weights(INITIAL_ERA_ID)\n        .expect(\"should have genesis validators for initial era\");\n    let auction_delay = builder.get_auction_delay();\n    // new_era is the first era in the future where new era validator weights will be calculated\n    let new_era = INITIAL_ERA_ID + auction_delay + 1;\n    assert!(builder.get_validator_weights(new_era).is_none());\n    assert_eq!(\n        builder.get_validator_weights(new_era - 1).unwrap(),\n        builder.get_validator_weights(INITIAL_ERA_ID).unwrap()\n    );\n\n    assert_eq!(\n        genesis_validator_weights\n            .keys()\n            .cloned()\n            .collect::<BTreeSet<_>>(),\n        BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()])\n    );\n\n    // Fund additional accounts\n    for target in &[\n        *SYSTEM_ADDR,\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n    ] {\n        let transfer_request_1 = ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => *target,\n                ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n            },\n        )\n        .build();\n        builder.exec(transfer_request_1).commit().expect_success();\n    }\n\n    // non-founding validator request\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n    let add_bid_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request_1).commit().expect_success();\n    builder.exec(add_bid_request_2).commit().expect_success();\n\n    // run auction and compute validators for new era\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n    // Verify first era validators\n    let new_validator_weights: ValidatorWeights = builder\n        .get_validator_weights(new_era)\n        .expect(\"should have first era validator weights\");\n\n    // check that the new computed era has exactly the state we expect\n    let lhs = new_validator_weights\n        .keys()\n        .cloned()\n        .collect::<BTreeSet<_>>();\n\n    let rhs = BTreeSet::from_iter(vec![\n        ACCOUNT_1_PK.clone(),\n        ACCOUNT_2_PK.clone(),\n        BID_ACCOUNT_1_PK.clone(),\n        BID_ACCOUNT_2_PK.clone(),\n    ]);\n\n    assert_eq!(lhs, rhs);\n\n    // make sure that new validators are exactly those that were part of add_bid requests\n    let new_validators: BTreeSet<_> = rhs\n        .difference(&genesis_validator_weights.keys().cloned().collect())\n        .cloned()\n        .collect();\n    assert_eq!(\n        new_validators,\n        BTreeSet::from_iter(vec![BID_ACCOUNT_1_PK.clone(), BID_ACCOUNT_2_PK.clone()])\n    );\n}\n\n#[ignore]\n#[test]\nfn undelegated_funds_should_be_released() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        validator_1_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..5 {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let delegator_1_undelegate_purse = builder\n        .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR)\n        .expect(\"should have default account\")\n        .main_purse();\n\n    let delegator_1_undelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_undelegate_request)\n        .commit()\n        .expect_success();\n\n    let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse);\n\n    let unbonding_delay = builder.get_unbonding_delay();\n\n    for _ in 0..=unbonding_delay {\n        let delegator_1_undelegate_purse_balance =\n            builder.get_purse_balance(delegator_1_undelegate_purse);\n        assert_eq!(\n            delegator_1_purse_balance_before,\n            delegator_1_undelegate_purse_balance\n        );\n\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let delegator_1_undelegate_purse_balance =\n        builder.get_purse_balance(delegator_1_undelegate_purse);\n    assert_eq!(\n        delegator_1_undelegate_purse_balance,\n        delegator_1_purse_balance_before + U512::from(UNDELEGATE_AMOUNT_1)\n    )\n}\n\n#[ignore]\n#[test]\nfn fully_undelegated_funds_should_be_released() {\n    const SYSTEM_TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        validator_1_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..5 {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let delegator_1_undelegate_purse = builder\n        .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR)\n        .expect(\"should have default account\")\n        .main_purse();\n\n    let delegator_1_undelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_undelegate_request)\n        .commit()\n        .expect_success();\n\n    let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse);\n\n    let unbonding_delay = builder.get_unbonding_delay();\n\n    for _ in 0..=unbonding_delay {\n        let delegator_1_undelegate_purse_balance =\n            builder.get_purse_balance(delegator_1_undelegate_purse);\n        assert_eq!(\n            delegator_1_undelegate_purse_balance,\n            delegator_1_purse_balance_before\n        );\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let delegator_1_undelegate_purse_after =\n        builder.get_purse_balance(delegator_1_undelegate_purse);\n\n    assert_eq!(\n        delegator_1_undelegate_purse_after - delegator_1_purse_balance_before,\n        U512::from(DELEGATE_AMOUNT_1)\n    )\n}\n\n#[ignore]\n#[test]\nfn should_undelegate_delegators_when_validator_unbonds() {\n    const VALIDATOR_1_REMAINING_BID: u64 = DEFAULT_MINIMUM_BID_AMOUNT;\n    const VALIDATOR_1_WITHDRAW_AMOUNT: u64 = VALIDATOR_1_STAKE - VALIDATOR_1_REMAINING_BID;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let validator_1_partial_withdraw_bid = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(VALIDATOR_1_WITHDRAW_AMOUNT),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n        validator_1_partial_withdraw_bid,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..5 {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let bids_before = builder.get_bids();\n    let validator_1_bid = bids_before\n        .validator_bid(&VALIDATOR_1)\n        .expect(\"should have validator 1 bid\");\n    let delegators = bids_before\n        .delegators_by_validator_public_key(validator_1_bid.validator_public_key())\n        .expect(\"should have delegators\");\n    let delegator_kinds = delegators\n        .iter()\n        .map(|x| x.delegator_kind())\n        .cloned()\n        .collect::<BTreeSet<DelegatorKind>>();\n    assert_eq!(\n        delegator_kinds,\n        BTreeSet::from_iter(vec![\n            DelegatorKind::PublicKey(DELEGATOR_1.clone()),\n            DelegatorKind::PublicKey(DELEGATOR_2.clone())\n        ])\n    );\n\n    // Validator partially unbonds and only one entry is present\n    let unbonding_purses_before = builder.get_unbonds();\n    let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone());\n    let unbond = unbonding_purses_before[&unbond_kind]\n        .first()\n        .expect(\"must get unbond\");\n    assert_eq!(unbond.eras().len(), 1);\n    let unbond = &unbonding_purses_before[&unbond_kind]\n        .first()\n        .expect(\"must have unbond\");\n    assert_eq!(\n        unbond.unbond_kind(),\n        &UnbondKind::Validator(VALIDATOR_1.clone())\n    );\n\n    let validator_1_withdraw_bid = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(VALIDATOR_1_REMAINING_BID),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_withdraw_bid)\n        .commit()\n        .expect_success();\n\n    let bids_after = builder.get_bids();\n    assert!(bids_after.validator_bid(&VALIDATOR_1).is_none());\n\n    let unbonding_purses_after = builder.get_unbonds();\n    assert_ne!(unbonding_purses_after, unbonding_purses_before);\n\n    let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone());\n    let validator1 = unbonding_purses_after\n        .get(&unbond_kind)\n        .expect(\"should have validator1\")\n        .first()\n        .expect(\"must have unbond\");\n\n    let validator1_unbonding = validator1.eras().first().expect(\"should have eras\");\n\n    assert_eq!(\n        validator1_unbonding.amount(),\n        &U512::from(VALIDATOR_1_WITHDRAW_AMOUNT),\n        \"expected validator1 amount to match\"\n    );\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone());\n    let delegator1 = unbonding_purses_after\n        .get(&unbond_kind)\n        .expect(\"should have delegator1\")\n        .first()\n        .expect(\"must have unbond\");\n\n    let delegator1_unbonding = delegator1.eras().first().expect(\"should have eras\");\n\n    assert_eq!(\n        delegator1_unbonding.amount(),\n        &U512::from(DELEGATOR_1_STAKE),\n        \"expected delegator1 amount to match\"\n    );\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone());\n    let delegator2 = unbonding_purses_after\n        .get(&unbond_kind)\n        .expect(\"should have delegator2\")\n        .first()\n        .expect(\"must have unbond\");\n\n    let delegator2_unbonding = delegator2.eras().first().expect(\"should have eras\");\n\n    assert_eq!(\n        delegator2_unbonding.amount(),\n        &U512::from(DELEGATOR_2_STAKE),\n        \"expected delegator2 amount to match\"\n    );\n\n    // Process unbonding requests to verify delegators recevied their stakes\n    let validator_1 = builder\n        .get_entity_by_account_hash(*VALIDATOR_1_ADDR)\n        .expect(\"should have validator 1 account\");\n    let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse());\n\n    let delegator_1 = builder\n        .get_entity_by_account_hash(*DELEGATOR_1_ADDR)\n        .expect(\"should have delegator 1 account\");\n    let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse());\n\n    let delegator_2 = builder\n        .get_entity_by_account_hash(*DELEGATOR_2_ADDR)\n        .expect(\"should have delegator 1 account\");\n    let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse());\n\n    for _ in 0..=DEFAULT_UNBONDING_DELAY {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse());\n    let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse());\n    let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse());\n\n    assert_eq!(\n        validator_1_balance_before + U512::from(VALIDATOR_1_STAKE),\n        validator_1_balance_after\n    );\n    assert_eq!(\n        delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE),\n        delegator_1_balance_after\n    );\n    assert_eq!(\n        delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE),\n        delegator_2_balance_after\n    );\n}\n\n#[ignore]\n#[test]\nfn should_undelegate_delegators_when_validator_fully_unbonds() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    // Fully unbond\n    let validator_1_withdraw_bid = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_withdraw_bid)\n        .commit()\n        .expect_success();\n\n    let bids_after = builder.get_bids();\n    assert!(bids_after.validator_bid(&VALIDATOR_1).is_none());\n\n    let unbonding_purses_before = builder.get_unbonds();\n\n    let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone());\n    let validator_1_era = unbonding_purses_before\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse\")\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have era\");\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone());\n    let delegator_1_unbonding_purse = unbonding_purses_before\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse entry\")\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have unbonding purse\");\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone());\n    let delegator_2_unbonding_purse = unbonding_purses_before\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse entry\")\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have unbonding purse\");\n\n    assert_eq!(validator_1_era.amount(), &U512::from(VALIDATOR_1_STAKE));\n    assert_eq!(\n        delegator_1_unbonding_purse.amount(),\n        &U512::from(DELEGATOR_1_STAKE)\n    );\n    assert_eq!(\n        delegator_2_unbonding_purse.amount(),\n        &U512::from(DELEGATOR_2_STAKE)\n    );\n\n    // Process unbonding requests to verify delegators received their stakes\n    let validator_1 = builder\n        .get_entity_by_account_hash(*VALIDATOR_1_ADDR)\n        .expect(\"should have validator 1 account\");\n    let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse());\n\n    let delegator_1 = builder\n        .get_entity_by_account_hash(*DELEGATOR_1_ADDR)\n        .expect(\"should have delegator 1 account\");\n    let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse());\n\n    let delegator_2 = builder\n        .get_entity_by_account_hash(*DELEGATOR_2_ADDR)\n        .expect(\"should have delegator 1 account\");\n    let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse());\n\n    for _ in 0..=DEFAULT_UNBONDING_DELAY {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse());\n    let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse());\n    let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse());\n\n    assert_eq!(\n        validator_1_balance_before + U512::from(VALIDATOR_1_STAKE),\n        validator_1_balance_after\n    );\n    assert_eq!(\n        delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE),\n        delegator_1_balance_after\n    );\n    assert_eq!(\n        delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE),\n        delegator_2_balance_after\n    );\n}\n\n#[ignore]\n#[test]\nfn should_undelegate_delegators_when_validator_unbonds_below_minimum_bid_amount() {\n    const VALIDATOR_1_REMAINING_BID: u64 = DEFAULT_MINIMUM_BID_AMOUNT - 1;\n    const VALIDATOR_1_WITHDRAW_AMOUNT: u64 = VALIDATOR_1_STAKE - VALIDATOR_1_REMAINING_BID;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    // Try to unbond partially. Stake would fall below minimum bid which should force a full\n    // unbonding.\n    let validator_1_withdraw_bid = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(VALIDATOR_1_WITHDRAW_AMOUNT),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_withdraw_bid)\n        .commit()\n        .expect_success();\n\n    let bids_after = builder.get_bids();\n    assert!(bids_after.validator_bid(&VALIDATOR_1).is_none());\n\n    let unbonding_purses_before = builder.get_unbonds();\n\n    let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone());\n    let validator_1_era = unbonding_purses_before\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse\")\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have era\");\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone());\n    let delegator_1_unbonding_purse = unbonding_purses_before\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse entry\")\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have unbonding purse\");\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone());\n    let delegator_2_unbonding_purse = unbonding_purses_before\n        .get(&unbond_kind)\n        .expect(\"should have unbonding purse entry\")\n        .first()\n        .expect(\"must have unbond\")\n        .eras()\n        .first()\n        .expect(\"should have unbonding purse\");\n\n    assert_eq!(validator_1_era.amount(), &U512::from(VALIDATOR_1_STAKE));\n    assert_eq!(\n        delegator_1_unbonding_purse.amount(),\n        &U512::from(DELEGATOR_1_STAKE)\n    );\n    assert_eq!(\n        delegator_2_unbonding_purse.amount(),\n        &U512::from(DELEGATOR_2_STAKE)\n    );\n\n    // Process unbonding requests to verify delegators received their stakes\n    let validator_1 = builder\n        .get_entity_by_account_hash(*VALIDATOR_1_ADDR)\n        .expect(\"should have validator 1 account\");\n    let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse());\n\n    let delegator_1 = builder\n        .get_entity_by_account_hash(*DELEGATOR_1_ADDR)\n        .expect(\"should have delegator 1 account\");\n    let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse());\n\n    let delegator_2 = builder\n        .get_entity_by_account_hash(*DELEGATOR_2_ADDR)\n        .expect(\"should have delegator 1 account\");\n    let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse());\n\n    for _ in 0..=DEFAULT_UNBONDING_DELAY {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse());\n    let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse());\n    let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse());\n\n    assert_eq!(\n        validator_1_balance_before + U512::from(VALIDATOR_1_STAKE),\n        validator_1_balance_after\n    );\n    assert_eq!(\n        delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE),\n        delegator_1_balance_after\n    );\n    assert_eq!(\n        delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE),\n        delegator_2_balance_after\n    );\n}\n\n#[ignore]\n#[test]\nfn should_handle_evictions() {\n    let activate_bid = |builder: &mut LmdbWasmTestBuilder, validator_public_key: PublicKey| {\n        const ARG_VALIDATOR: &str = \"validator\";\n        let run_request = ExecuteRequestBuilder::standard(\n            AccountHash::from(&validator_public_key),\n            CONTRACT_ACTIVATE_BID,\n            runtime_args! {\n                ARG_VALIDATOR => validator_public_key,\n            },\n        )\n        .build();\n        builder.exec(run_request).expect_success().commit();\n    };\n\n    let latest_validators = |builder: &mut LmdbWasmTestBuilder| {\n        let era_validators: EraValidators = builder.get_era_validators();\n        let validators = era_validators\n            .iter()\n            .next_back()\n            .map(|(_era_id, validators)| validators)\n            .expect(\"should have validators\");\n        validators.keys().cloned().collect::<BTreeSet<PublicKey>>()\n    };\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_3 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(300_000),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_4 = GenesisAccount::account(\n            BID_ACCOUNT_2_PK.clone(),\n            Motes::new(BID_ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(400_000),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(account_3);\n        tmp.push(account_4);\n        tmp\n    };\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let mut timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    builder.exec(system_fund_request).expect_success().commit();\n\n    // No evictions\n    builder.run_auction(timestamp, Vec::new());\n    timestamp += WEEK_MILLIS;\n\n    assert_eq!(\n        latest_validators(&mut builder),\n        BTreeSet::from_iter(vec![\n            ACCOUNT_1_PK.clone(),\n            ACCOUNT_2_PK.clone(),\n            BID_ACCOUNT_1_PK.clone(),\n            BID_ACCOUNT_2_PK.clone(),\n        ])\n    );\n\n    // Evict BID_ACCOUNT_1_PK and BID_ACCOUNT_2_PK\n    builder.run_auction(\n        timestamp,\n        vec![BID_ACCOUNT_1_PK.clone(), BID_ACCOUNT_2_PK.clone()],\n    );\n    timestamp += WEEK_MILLIS;\n\n    assert_eq!(\n        latest_validators(&mut builder),\n        BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()])\n    );\n\n    // Activate BID_ACCOUNT_1_PK\n    activate_bid(&mut builder, BID_ACCOUNT_1_PK.clone());\n    builder.run_auction(timestamp, Vec::new());\n    timestamp += WEEK_MILLIS;\n\n    assert_eq!(\n        latest_validators(&mut builder),\n        BTreeSet::from_iter(vec![\n            ACCOUNT_1_PK.clone(),\n            ACCOUNT_2_PK.clone(),\n            BID_ACCOUNT_1_PK.clone(),\n        ])\n    );\n\n    // Activate BID_ACCOUNT_2_PK\n    activate_bid(&mut builder, BID_ACCOUNT_2_PK.clone());\n    builder.run_auction(timestamp, Vec::new());\n    timestamp += WEEK_MILLIS;\n\n    assert_eq!(\n        latest_validators(&mut builder),\n        BTreeSet::from_iter(vec![\n            ACCOUNT_1_PK.clone(),\n            ACCOUNT_2_PK.clone(),\n            BID_ACCOUNT_1_PK.clone(),\n            BID_ACCOUNT_2_PK.clone(),\n        ])\n    );\n\n    // Evict all validators\n    builder.run_auction(\n        timestamp,\n        vec![\n            ACCOUNT_1_PK.clone(),\n            ACCOUNT_2_PK.clone(),\n            BID_ACCOUNT_1_PK.clone(),\n            BID_ACCOUNT_2_PK.clone(),\n        ],\n    );\n    timestamp += WEEK_MILLIS;\n\n    assert_eq!(latest_validators(&mut builder), BTreeSet::new());\n\n    // Activate all validators\n    for validator in &[\n        ACCOUNT_1_PK.clone(),\n        ACCOUNT_2_PK.clone(),\n        BID_ACCOUNT_1_PK.clone(),\n        BID_ACCOUNT_2_PK.clone(),\n    ] {\n        activate_bid(&mut builder, validator.clone());\n    }\n    builder.run_auction(timestamp, Vec::new());\n\n    assert_eq!(\n        latest_validators(&mut builder),\n        BTreeSet::from_iter(vec![\n            ACCOUNT_1_PK.clone(),\n            ACCOUNT_2_PK.clone(),\n            BID_ACCOUNT_1_PK.clone(),\n            BID_ACCOUNT_2_PK.clone(),\n        ])\n    );\n}\n\n#[should_panic(expected = \"OrphanedDelegator\")]\n#[ignore]\n#[test]\nfn should_validate_orphaned_genesis_delegators() {\n    let missing_validator_secret_key = SecretKey::ed25519_from_bytes([123; 32]).unwrap();\n    let missing_validator = PublicKey::from(&missing_validator_secret_key);\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        let orphaned_delegator = GenesisAccount::delegator(\n            missing_validator,\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(delegator_1);\n        tmp.push(orphaned_delegator);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n}\n\n#[should_panic(expected = \"DuplicatedDelegatorEntry\")]\n#[ignore]\n#[test]\nfn should_validate_duplicated_genesis_delegators() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        let duplicated_delegator_1 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        let duplicated_delegator_2 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_2.clone(),\n            Motes::new(DELEGATOR_2_BALANCE),\n            Motes::new(DELEGATOR_2_STAKE),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(delegator_1);\n        tmp.push(duplicated_delegator_1);\n        tmp.push(duplicated_delegator_2);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n}\n\n#[should_panic(expected = \"InvalidDelegationRate\")]\n#[ignore]\n#[test]\nfn should_validate_delegation_rate_of_genesis_validator() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::MAX,\n            )),\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n}\n\n#[should_panic(expected = \"InvalidBondAmount\")]\n#[ignore]\n#[test]\nfn should_validate_bond_amount_of_genesis_validator() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(Motes::zero(), DelegationRate::zero())),\n        );\n        tmp.push(account_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n}\n\n#[ignore]\n#[test]\nfn should_setup_genesis_delegators() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND), 80)),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(delegator_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let _account_1 = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should install account 1\");\n    let _account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"should install account 2\");\n\n    let delegator_1 = builder\n        .get_entity_by_account_hash(*DELEGATOR_1_ADDR)\n        .expect(\"should install delegator 1\");\n    assert_eq!(\n        builder.get_purse_balance(delegator_1.main_purse()),\n        U512::from(DELEGATOR_1_BALANCE)\n    );\n\n    let bids = builder.get_bids();\n    let key_map = bids.delegator_map();\n    let validator_keys = key_map.keys().cloned().collect::<BTreeSet<_>>();\n    assert_eq!(\n        validator_keys,\n        BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()])\n    );\n\n    let account_1_bid_entry = bids\n        .validator_bid(&ACCOUNT_1_PK)\n        .expect(\"should have account 1 bid\");\n    assert_eq!(*account_1_bid_entry.delegation_rate(), 80);\n    let delegators = bids\n        .delegators_by_validator_public_key(&ACCOUNT_1_PK)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = delegators.first().expect(\"should have delegator\");\n    assert_eq!(\n        delegator.delegator_kind(),\n        &DelegatorKind::PublicKey(DELEGATOR_1.clone()),\n        \"should be DELEGATOR_1\"\n    );\n    assert_eq!(delegator.staked_amount(), U512::from(DELEGATOR_1_STAKE));\n}\n\n#[ignore]\n#[test]\nfn should_not_partially_undelegate_uninitialized_vesting_schedule() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(VALIDATOR_1_STAKE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            VALIDATOR_1.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        tmp.push(validator_1);\n        tmp.push(delegator_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let fund_delegator_account = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n    builder\n        .exec(fund_delegator_account)\n        .commit()\n        .expect_success();\n\n    let partial_undelegate = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE - 1),\n        },\n    )\n    .build();\n\n    builder.exec(partial_undelegate).commit();\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have last exec result\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == auction::Error::DelegatorFundsLocked as u8\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_not_fully_undelegate_uninitialized_vesting_schedule() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(VALIDATOR_1_STAKE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            VALIDATOR_1.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        tmp.push(validator_1);\n        tmp.push(delegator_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let fund_delegator_account = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n    builder\n        .exec(fund_delegator_account)\n        .commit()\n        .expect_success();\n\n    let full_undelegate = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n        },\n    )\n    .build();\n\n    builder.exec(full_undelegate).commit();\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have last exec result\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == auction::Error::DelegatorFundsLocked as u8\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_not_undelegate_vfta_holder_stake() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(VALIDATOR_1_STAKE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            VALIDATOR_1.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Motes::new(DELEGATOR_1_STAKE),\n        );\n        tmp.push(validator_1);\n        tmp.push(delegator_1);\n        tmp\n    };\n\n    let run_genesis_request = {\n        let exec_config = GenesisConfigBuilder::default()\n            .with_accounts(accounts)\n            .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n            .build();\n\n        GenesisRequest::new(\n            DEFAULT_GENESIS_CONFIG_HASH,\n            DEFAULT_PROTOCOL_VERSION,\n            exec_config,\n            DEFAULT_CHAINSPEC_REGISTRY.clone(),\n        )\n    };\n    let chainspec = ChainspecConfig::default()\n        .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS);\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec);\n\n    builder.run_genesis(run_genesis_request);\n\n    let post_genesis_requests = {\n        let fund_delegator_account = ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => *DELEGATOR_1_ADDR,\n                ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n            },\n        )\n        .build();\n\n        let fund_system_account = ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => *SYSTEM_ADDR,\n                ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n            },\n        )\n        .build();\n\n        vec![fund_system_account, fund_delegator_account]\n    };\n\n    for post_genesis_request in post_genesis_requests {\n        builder.exec(post_genesis_request).commit().expect_success();\n    }\n\n    {\n        let bids = builder.get_bids();\n        let delegator = bids\n            .delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(DELEGATOR_1.clone()))\n            .expect(\"should have delegator\");\n        let vesting_schedule = delegator\n            .vesting_schedule()\n            .expect(\"should have delegator vesting schedule\");\n        assert!(\n            vesting_schedule.locked_amounts().is_none(),\n            \"should not be locked\"\n        );\n    }\n\n    builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new());\n\n    {\n        let bids = builder.get_bids();\n        let delegator = bids\n            .delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(DELEGATOR_1.clone()))\n            .expect(\"should have delegator\");\n        let vesting_schedule = delegator\n            .vesting_schedule()\n            .expect(\"should have vesting schedule\");\n        assert!(\n            vesting_schedule.locked_amounts().is_some(),\n            \"should be locked\"\n        );\n    }\n\n    let partial_unbond = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE - 1),\n        },\n    )\n    .build();\n    builder.exec(partial_unbond).commit();\n    let error = builder\n        .get_last_exec_result()\n        .expect(\"should have last exec result\")\n        .error()\n        .cloned()\n        .expect(\"should have error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == auction::Error::DelegatorFundsLocked as u8\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_release_vfta_holder_stake() {\n    const EXPECTED_WEEKLY_RELEASE: u64 =\n        (DELEGATOR_1_STAKE - DEFAULT_MINIMUM_DELEGATION_AMOUNT) / 14;\n    const DELEGATOR_VFTA_STAKE: u64 = DELEGATOR_1_STAKE - DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const EXPECTED_REMAINDER: u64 = 12;\n    const NEW_MINIMUM_DELEGATION_AMOUNT: u64 = 0;\n    const EXPECTED_LOCKED_AMOUNTS: [u64; 14] = [\n        1392858, 1285716, 1178574, 1071432, 964290, 857148, 750006, 642864, 535722, 428580, 321438,\n        214296, 107154, 0,\n    ];\n\n    let expected_locked_amounts: Vec<U512> = EXPECTED_LOCKED_AMOUNTS\n        .iter()\n        .cloned()\n        .map(U512::from)\n        .collect();\n\n    let expect_undelegate_success = |builder: &mut LmdbWasmTestBuilder, amount: u64| {\n        let partial_unbond = ExecuteRequestBuilder::standard(\n            *DELEGATOR_1_ADDR,\n            CONTRACT_UNDELEGATE,\n            runtime_args! {\n                ARG_VALIDATOR => ACCOUNT_1_PK.clone(),\n                ARG_DELEGATOR => DELEGATOR_1.clone(),\n                ARG_AMOUNT => U512::from(amount),\n            },\n        )\n        .build();\n\n        builder.exec(partial_unbond).commit().expect_success();\n    };\n\n    let expect_undelegate_failure = |builder: &mut LmdbWasmTestBuilder, amount: u64| {\n        let full_undelegate = ExecuteRequestBuilder::standard(\n            *DELEGATOR_1_ADDR,\n            CONTRACT_UNDELEGATE,\n            runtime_args! {\n                ARG_VALIDATOR => ACCOUNT_1_PK.clone(),\n                ARG_DELEGATOR => DELEGATOR_1.clone(),\n                ARG_AMOUNT => U512::from(amount),\n            },\n        )\n        .build();\n\n        builder.exec(full_undelegate).commit();\n\n        let error = builder\n            .get_last_exec_result()\n            .expect(\"should have last exec result\")\n            .error()\n            .cloned()\n            .expect(\"should have error\");\n\n        assert!(\n            matches!(\n                error,\n                Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n                if auction_error == auction::Error::DelegatorFundsLocked as u8\n            ),\n            \"{:?}\",\n            error\n        );\n    };\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::new(DELEGATOR_VFTA_STAKE),\n        );\n        tmp.push(account_1);\n        tmp.push(delegator_1);\n        tmp\n    };\n\n    let run_genesis_request = {\n        let genesis_config = GenesisConfigBuilder::default()\n            .with_accounts(accounts)\n            .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS)\n            .build();\n\n        GenesisRequest::new(\n            DEFAULT_GENESIS_CONFIG_HASH,\n            DEFAULT_PROTOCOL_VERSION,\n            genesis_config,\n            DEFAULT_CHAINSPEC_REGISTRY.clone(),\n        )\n    };\n\n    let chainspec = ChainspecConfig::default()\n        .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS)\n        .with_minimum_delegation_amount(NEW_MINIMUM_DELEGATION_AMOUNT);\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec);\n\n    builder.run_genesis(run_genesis_request);\n\n    let fund_delegator_account = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n    builder\n        .exec(fund_delegator_account)\n        .commit()\n        .expect_success();\n\n    let fund_system_account = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    builder.exec(fund_system_account).commit().expect_success();\n\n    // Check bid and its vesting schedule\n    {\n        let bids = builder.get_bids();\n        assert_eq!(bids.len(), 2);\n        let delegator = bids\n            .delegator_by_kind(\n                &ACCOUNT_1_PK,\n                &DelegatorKind::PublicKey(DELEGATOR_1.clone()),\n            )\n            .expect(\"should have delegator\");\n\n        let vesting_schedule = delegator\n            .vesting_schedule()\n            .expect(\"should have delegator vesting schedule\");\n\n        let initial_release = vesting_schedule.initial_release_timestamp_millis();\n        assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS);\n\n        let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec());\n        assert!(locked_amounts.is_none());\n    }\n\n    builder.run_auction(DEFAULT_GENESIS_TIMESTAMP_MILLIS, Vec::new());\n\n    {\n        // Attempt unbond of one mote\n        expect_undelegate_failure(&mut builder, u64::one());\n    }\n\n    builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new());\n\n    // Check bid and its vesting schedule\n    {\n        let bids = builder.get_bids();\n        assert_eq!(bids.len(), 2);\n        let delegator = bids\n            .delegator_by_kind(\n                &ACCOUNT_1_PK,\n                &DelegatorKind::PublicKey(DELEGATOR_1.clone()),\n            )\n            .expect(\"should have delegator\");\n\n        let vesting_schedule = delegator\n            .vesting_schedule()\n            .expect(\"should have delegator vesting schedule\");\n\n        let initial_release = vesting_schedule.initial_release_timestamp_millis();\n        assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS);\n\n        let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec());\n        assert_eq!(locked_amounts, Some(expected_locked_amounts));\n    }\n\n    let mut total_unbonded = 0;\n\n    {\n        // Attempt full unbond\n        expect_undelegate_failure(&mut builder, DELEGATOR_VFTA_STAKE);\n\n        // Attempt unbond of released amount\n        expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE);\n\n        total_unbonded += EXPECTED_WEEKLY_RELEASE;\n\n        assert_eq!(\n            DELEGATOR_VFTA_STAKE - total_unbonded,\n            EXPECTED_LOCKED_AMOUNTS[0]\n        )\n    }\n\n    for i in 1..13 {\n        // Run auction forward by almost a week\n        builder.run_auction(WEEK_TIMESTAMPS[i] - 1, Vec::new());\n\n        // Attempt unbond of 1 mote\n        expect_undelegate_failure(&mut builder, u64::one());\n\n        // Run auction forward by one millisecond\n        builder.run_auction(WEEK_TIMESTAMPS[i], Vec::new());\n\n        // Attempt unbond of more than weekly release\n        expect_undelegate_failure(&mut builder, EXPECTED_WEEKLY_RELEASE + 1);\n\n        // Attempt unbond of released amount\n        expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE);\n\n        total_unbonded += EXPECTED_WEEKLY_RELEASE;\n\n        assert_eq!(\n            DELEGATOR_VFTA_STAKE - total_unbonded,\n            EXPECTED_LOCKED_AMOUNTS[i]\n        )\n    }\n\n    {\n        // Run auction forward by almost a week\n        builder.run_auction(WEEK_TIMESTAMPS[13] - 1, Vec::new());\n\n        // Attempt unbond of 1 mote\n        expect_undelegate_failure(&mut builder, u64::one());\n\n        // Run auction forward by one millisecond\n        builder.run_auction(WEEK_TIMESTAMPS[13], Vec::new());\n\n        // Attempt unbond of released amount + remainder\n        expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER);\n\n        total_unbonded += EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER;\n\n        assert_eq!(\n            DELEGATOR_VFTA_STAKE - total_unbonded,\n            EXPECTED_LOCKED_AMOUNTS[13]\n        )\n    }\n\n    assert_eq!(DELEGATOR_VFTA_STAKE, total_unbonded);\n}\n\n#[ignore]\n#[test]\nfn should_reset_delegators_stake_after_slashing() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_1_validator_2_delegate_request,\n        delegator_2_validator_1_delegate_request,\n        delegator_2_validator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let auction_hash = builder.get_auction_contract_hash();\n\n    // Check bids before slashing\n\n    let bids_1 = builder.get_bids();\n    let _ = bids_1\n        .validator_total_stake(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"should have total stake\");\n\n    let validator_1_delegator_stakes_1 = {\n        match bids_1.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) {\n            None => U512::zero(),\n            Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(),\n        }\n    };\n\n    assert!(validator_1_delegator_stakes_1 > U512::zero());\n\n    let validator_2_delegator_stakes_1 = {\n        match bids_1.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK) {\n            None => U512::zero(),\n            Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(),\n        }\n    };\n    assert!(validator_2_delegator_stakes_1 > U512::zero());\n\n    let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction_hash,\n        auction::METHOD_SLASH,\n        runtime_args! {\n            auction::ARG_VALIDATOR_PUBLIC_KEYS => vec![\n               NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ]\n        },\n    )\n    .build();\n\n    builder.exec(slash_request_1).expect_success().commit();\n\n    // Compare bids after slashing validator 2\n    let bids_2 = builder.get_bids();\n    assert_ne!(bids_1, bids_2);\n\n    let _ = bids_2\n        .validator_bid(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"should have bids\");\n    let validator_1_delegator_stakes_2 = {\n        match bids_1.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) {\n            None => U512::zero(),\n            Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(),\n        }\n    };\n    assert!(validator_1_delegator_stakes_2 > U512::zero());\n\n    assert!(bids_2.validator_bid(&NON_FOUNDER_VALIDATOR_2_PK).is_none());\n\n    // Validator 1 total delegated stake did not change\n    assert_eq!(\n        validator_1_delegator_stakes_2,\n        validator_1_delegator_stakes_1\n    );\n\n    let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction_hash,\n        auction::METHOD_SLASH,\n        runtime_args! {\n            auction::ARG_VALIDATOR_PUBLIC_KEYS => vec![\n                NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ]\n        },\n    )\n    .build();\n\n    builder.exec(slash_request_2).expect_success().commit();\n\n    // Compare bids after slashing validator 2\n    let bids_3 = builder.get_bids();\n    assert_ne!(bids_3, bids_2);\n    assert_ne!(bids_3, bids_1);\n\n    assert!(bids_3.validator_bid(&NON_FOUNDER_VALIDATOR_1_PK).is_none());\n    let validator_1_delegator_stakes_3 = {\n        match bids_3.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) {\n            None => U512::zero(),\n            Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(),\n        }\n    };\n\n    assert_ne!(\n        validator_1_delegator_stakes_3,\n        validator_1_delegator_stakes_1\n    );\n    assert_ne!(\n        validator_1_delegator_stakes_3,\n        validator_1_delegator_stakes_2\n    );\n\n    // Validator 1 total delegated stake is set to 0\n    assert_eq!(validator_1_delegator_stakes_3, U512::zero());\n}\n\n#[should_panic(expected = \"InvalidDelegatedAmount\")]\n#[ignore]\n#[test]\nfn should_validate_genesis_delegators_bond_amount() {\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND), 80)),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let delegator_1 = GenesisAccount::delegator(\n            ACCOUNT_1_PK.clone(),\n            DELEGATOR_1.clone(),\n            Motes::new(DELEGATOR_1_BALANCE),\n            Motes::zero(),\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(delegator_1);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n}\n\nfn check_validator_slots_for_accounts(accounts: usize) {\n    let accounts = {\n        let range = 1..=accounts;\n\n        let mut tmp: Vec<GenesisAccount> = Vec::with_capacity(accounts);\n\n        for count in range.map(U256::from) {\n            let secret_key = {\n                let mut secret_key_bytes = [0; 32];\n                count.to_big_endian(&mut secret_key_bytes);\n                SecretKey::ed25519_from_bytes(secret_key_bytes).expect(\"should create ed25519 key\")\n            };\n\n            let public_key = PublicKey::from(&secret_key);\n\n            let account = GenesisAccount::account(\n                public_key,\n                Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n                Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND), 80)),\n            );\n\n            tmp.push(account)\n        }\n\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n}\n\n#[should_panic(expected = \"InvalidValidatorSlots\")]\n#[ignore]\n#[test]\nfn should_fail_with_more_accounts_than_slots() {\n    check_validator_slots_for_accounts(DEFAULT_EXEC_CONFIG.validator_slots() as usize + 1);\n}\n\n#[ignore]\n#[test]\nfn should_run_genesis_with_exact_validator_slots() {\n    check_validator_slots_for_accounts(DEFAULT_EXEC_CONFIG.validator_slots() as usize);\n}\n\n#[ignore]\n#[test]\nfn should_delegate_and_redelegate() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    builder.advance_eras_by_default_auction_delay();\n\n    let delegator_1_undelegate_purse = builder\n        .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR)\n        .expect(\"should have default account\")\n        .main_purse();\n\n    let delegator_1_redelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_REDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n            ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone()\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_redelegate_request)\n        .commit()\n        .expect_success();\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(BID_ACCOUNT_1_PK.clone());\n    let after_redelegation = builder\n        .get_unbonds()\n        .get(&unbond_kind)\n        .expect(\"must have unbond\")\n        .first()\n        .expect(\"must have an entry for the unbond\")\n        .eras()\n        .len();\n\n    assert_eq!(1, after_redelegation);\n\n    let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse);\n\n    for _ in 0..=DEFAULT_UNBONDING_DELAY {\n        let delegator_1_redelegate_purse_balance =\n            builder.get_purse_balance(delegator_1_undelegate_purse);\n        assert_eq!(\n            delegator_1_purse_balance_before,\n            delegator_1_redelegate_purse_balance\n        );\n        builder.advance_era()\n    }\n\n    // Since a redelegation has been processed no funds should have transferred back to the purse.\n    let delegator_1_purse_balance_after = builder.get_purse_balance(delegator_1_undelegate_purse);\n\n    assert_eq!(\n        delegator_1_purse_balance_before,\n        delegator_1_purse_balance_after\n    );\n\n    let bids = builder.get_bids();\n    assert_eq!(bids.len(), 3);\n\n    assert!(\n        bids.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n            .is_none(),\n        \"fully unbonded\"\n    );\n\n    let delegators = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have delegator\");\n    let redelegated_amount_1 = delegator.staked_amount();\n    assert_eq!(\n        redelegated_amount_1,\n        U512::from(DELEGATE_AMOUNT_1),\n        \"expected full unbond\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_handle_redelegation_to_inactive_validator() {\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        validator_1_fund_request,\n        validator_2_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_2_validator_1_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    builder.advance_eras_by_default_auction_delay();\n\n    let invalid_redelegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_REDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n            ARG_NEW_VALIDATOR => BID_ACCOUNT_1_PK.clone()\n        },\n    )\n    .build();\n\n    builder.exec(invalid_redelegate_request).expect_failure();\n\n    let error = builder.get_error().expect(\"expected error\");\n    let str = format!(\"{}\", error);\n    assert!(\n        str.starts_with(\"ApiError::AuctionError(RedelegationValidatorNotFound)\"),\n        \"expected RedelegationValidatorNotFound\"\n    )\n}\n\n#[ignore]\n#[test]\nfn should_enforce_minimum_delegation_amount() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_to_validator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let post_genesis_request = vec![transfer_to_validator_1, transfer_to_delegator_1];\n\n    for request in post_genesis_request {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request_1).expect_success().commit();\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step request\"\n        );\n    }\n\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(100u64),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    // The delegation amount is below the default value of 500 CSPR,\n    // therefore the delegation should not succeed.\n    builder.exec(delegation_request_1).expect_failure();\n\n    let error = builder.get_error().expect(\"must get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::DelegationAmountTooSmall as u8));\n}\n\n#[ignore]\n#[test]\nfn should_allow_delegations_with_minimal_floor_amount() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_to_validator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let post_genesis_request = vec![\n        transfer_to_validator_1,\n        transfer_to_delegator_1,\n        transfer_to_delegator_2,\n    ];\n\n    for request in post_genesis_request {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request_1).expect_success().commit();\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step request\"\n        );\n    }\n\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT - 1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    // The delegation amount is below the default value of 500 CSPR,\n    // therefore the delegation should not succeed.\n    builder.exec(delegation_request_1).expect_failure();\n\n    let error = builder.get_error().expect(\"must get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::DelegationAmountTooSmall as u8));\n\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_2).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_enforce_max_delegators_per_validator_cap() {\n    let chainspec = ChainspecConfig::default().with_max_delegators_per_validator(2u32);\n\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec);\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_to_validator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_3 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let post_genesis_request = vec![\n        transfer_to_validator_1,\n        transfer_to_delegator_1,\n        transfer_to_delegator_2,\n        transfer_to_delegator_3,\n    ];\n\n    for request in post_genesis_request {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request_1).expect_success().commit();\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step request\"\n        );\n    }\n\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegation_requests = [delegation_request_1, delegation_request_2];\n\n    for request in delegation_requests {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let delegation_request_3 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_3).expect_failure();\n\n    let error = builder.get_error().expect(\"must get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8));\n\n    let delegator_2_staked_amount = {\n        let bids = builder.get_bids();\n        let delegator = bids\n            .delegator_by_kind(\n                &NON_FOUNDER_VALIDATOR_1_PK,\n                &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()),\n            )\n            .expect(\"should have delegator bid\");\n        delegator.staked_amount()\n    };\n\n    let undelegation_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => delegator_2_staked_amount,\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(undelegation_request).expect_success().commit();\n\n    let bids = builder.get_bids();\n\n    let current_delegator_count = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"must have bid record\")\n        .iter()\n        .filter(|x| x.staked_amount() > U512::zero())\n        .collect::<Vec<&auction::DelegatorBid>>()\n        .len();\n\n    assert_eq!(current_delegator_count, 1);\n\n    let delegation_request_3 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_3).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let current_delegator_count = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n        .expect(\"must have bid record\")\n        .len();\n\n    assert_eq!(current_delegator_count, 2);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_to_main_purse_in_case_of_redelegation_past_max_delegation_cap() {\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        validator_1_fund_request,\n        validator_2_fund_request,\n        transfer_to_delegator_1,\n        transfer_to_delegator_2,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_1_validator_2_delegate_request,\n    ];\n\n    let chainspec = ChainspecConfig::default().with_max_delegators_per_validator(1u32);\n\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec);\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).expect_success().commit();\n    }\n\n    builder.advance_eras_by_default_auction_delay();\n\n    let delegator_1_main_purse = builder\n        .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR)\n        .expect(\"should have default account\")\n        .main_purse();\n\n    let delegator_1_redelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_REDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n            ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone()\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_redelegate_request)\n        .commit()\n        .expect_success();\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(BID_ACCOUNT_1_PK.clone());\n    let after_redelegation = builder\n        .get_unbonds()\n        .get(&unbond_kind)\n        .expect(\"must have unbond\")\n        .first()\n        .expect(\"must have at least one entry\")\n        .eras()\n        .len();\n\n    assert_eq!(1, after_redelegation);\n\n    let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_main_purse);\n\n    for _ in 0..=DEFAULT_UNBONDING_DELAY {\n        let delegator_1_redelegate_purse_balance =\n            builder.get_purse_balance(delegator_1_main_purse);\n        assert_eq!(\n            delegator_1_purse_balance_before,\n            delegator_1_redelegate_purse_balance\n        );\n\n        builder.advance_era();\n    }\n}\n\n#[ignore]\n#[test]\nfn should_delegate_and_redelegate_with_eviction_regression_test() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    let delegator_1_redelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_REDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n            ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone()\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_redelegate_request)\n        .commit()\n        .expect_success();\n\n    builder.advance_eras_by(DEFAULT_UNBONDING_DELAY);\n\n    // Advance one more era, this is the point where the redelegate request is processed (era >=\n    // unbonding_delay + 1)\n    builder.advance_era();\n\n    let bids = builder.get_bids();\n    assert!(bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_1_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .is_none());\n    assert!(bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .is_some());\n}\n\n#[ignore]\n#[test]\nfn should_increase_existing_delegation_when_limit_exceeded() {\n    let chainspec = ChainspecConfig::default().with_max_delegators_per_validator(2);\n\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec);\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_to_validator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_3 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE)\n        },\n    )\n    .build();\n\n    let post_genesis_request = vec![\n        transfer_to_validator_1,\n        transfer_to_delegator_1,\n        transfer_to_delegator_2,\n        transfer_to_delegator_3,\n    ];\n\n    for request in post_genesis_request {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let add_bid_request_1 = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request_1).expect_success().commit();\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step request\"\n        );\n    }\n\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegation_requests = [delegation_request_1, delegation_request_2];\n\n    for request in delegation_requests {\n        builder.exec(request).expect_success().commit();\n    }\n\n    let delegation_request_3 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_3).expect_failure();\n\n    let error = builder.get_error().expect(\"must get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8));\n\n    // The validator already has the maximum number of delegators allowed. However, this is a\n    // delegator that already delegated, so their bid should just be increased.\n    let delegation_request_2_repeat = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegation_request_2_repeat)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[test]\nfn should_fail_bid_public_key_change_if_conflicting_validator_bid_exists() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    builder.advance_eras_by_default_auction_delay();\n\n    let change_bid_public_key_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_CHANGE_BID_PUBLIC_KEY,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_NEW_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone()\n        },\n    )\n    .build();\n\n    builder.exec(change_bid_public_key_request).expect_failure();\n\n    let error = builder.get_error().expect(\"must get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ValidatorBidExistsAlready as u8));\n}\n\n#[ignore]\n#[test]\nfn should_change_validator_bid_public_key() {\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_3_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_3_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_3_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_3),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_3,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_3_fund_request,\n        validator_1_add_bid_request,\n        validator_3_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_2_validator_1_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    builder.advance_eras_by_default_auction_delay();\n\n    // redelegate funds to validator 3\n    // NOTE: previously, this would leave a remaining delegation to the original delegator behind\n    // with less than that min delegation bid. Under the new logic if the remaining del bid amount\n    // would be less than the min, it gets converted to a full unbond instead of a partial unbond\n    let attempted_partial_unbond_redlegate_amount =\n        U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT);\n    let actual_delegated_amount = U512::from(DELEGATE_AMOUNT_1);\n    assert!(\n        attempted_partial_unbond_redlegate_amount < actual_delegated_amount,\n        \"attempted partial amount should be less than actual delegated amount\"\n    );\n    let attempted_remaining_delegation_amount =\n        actual_delegated_amount - attempted_partial_unbond_redlegate_amount;\n    assert!(\n        attempted_remaining_delegation_amount < DEFAULT_MINIMUM_DELEGATION_AMOUNT.into(),\n        \"attempted remainder should be less than minimum in this case\"\n    );\n    let delegator_1_redelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_REDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => attempted_partial_unbond_redlegate_amount,\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n            ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_3_PK.clone()\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_redelegate_request)\n        .commit()\n        .expect_success();\n\n    let bids: Vec<BidKind> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n\n    assert_eq!(\n        bids.len(),\n        3,\n        \"with unbonds filtered out there should be 3 bids\"\n    );\n    assert!(bids\n        .validator_bid(&NON_FOUNDER_VALIDATOR_2_PK.clone())\n        .is_none());\n\n    // change validator 1 bid public key to validator 2 public key\n    let change_bid_public_key_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_CHANGE_BID_PUBLIC_KEY,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_NEW_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone()\n        },\n    )\n    .build();\n\n    let era_id = builder.get_era();\n\n    builder\n        .exec(change_bid_public_key_request)\n        .commit()\n        .expect_success();\n\n    let bids: Vec<BidKind> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(\n        bids.len(),\n        4,\n        \"with unbonds filtered out, there should be 4 bids\"\n    );\n    let new_validator_bid = bids\n        .validator_bid(&NON_FOUNDER_VALIDATOR_2_PK.clone())\n        .unwrap_or_else(|| {\n            panic!(\n                \"should have validator bid {:?}\",\n                NON_FOUNDER_VALIDATOR_2_PK.clone()\n            )\n        });\n\n    assert_eq!(\n        builder.get_purse_balance(*new_validator_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n\n    let bridge = bids\n        .bridge(\n            &NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            &NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            &era_id,\n        )\n        .unwrap();\n    assert_eq!(\n        bridge.old_validator_public_key(),\n        &NON_FOUNDER_VALIDATOR_1_PK.clone()\n    );\n    assert_eq!(\n        bridge.new_validator_public_key(),\n        &NON_FOUNDER_VALIDATOR_2_PK.clone()\n    );\n    assert_eq!(*bridge.era_id(), era_id);\n\n    assert!(bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK)\n        .is_none());\n    let delegators = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK)\n        .expect(\"should have delegators\");\n    // NOTE: previously in this test the partial redelegate would have been allowed, and thus this\n    // delegator would have had delegations to the original validator (below min)\n    // and the redelegated target (the redelegated amount)\n    // The new logic converted it into a full unbond to avoid the remainder below min being\n    // left behind.\n    assert_eq!(\n        delegators.len(),\n        1,\n        \"the remaining delegator should have bridged over\"\n    );\n    assert!(\n        bids.delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .is_none(),\n        \"the redelegated unbond should not have bridged over\"\n    );\n\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()),\n        )\n        .expect(\"should have account2 delegation\");\n    assert_eq!(delegator.staked_amount(), U512::from(DELEGATE_AMOUNT_2));\n\n    // distribute rewards\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let mut rewards = BTreeMap::new();\n    rewards.insert(NON_FOUNDER_VALIDATOR_1_PK.clone(), vec![total_payout]);\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n    let bids: Vec<BidKind> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(\n        bids.len(),\n        4,\n        \"excluding unbonds there should now be 4 bids\"\n    );\n\n    assert!(\n        bids.delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .is_none(),\n        \"should not have undelegated delegator\"\n    );\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()),\n        )\n        .expect(\"should have account2 delegation\");\n    assert!(delegator.staked_amount() > U512::from(DELEGATE_AMOUNT_2));\n    let expected_reward = 12;\n    assert_eq!(\n        delegator.staked_amount(),\n        U512::from(DELEGATE_AMOUNT_2 + expected_reward)\n    );\n\n    // advance eras until unbonds are processed\n    builder.advance_eras_by(DEFAULT_UNBONDING_DELAY + 1);\n\n    let bids = builder.get_bids();\n    assert_eq!(bids.len(), 5, \"with unbonds filtered there should be 5\");\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_3_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have account1 delegation\");\n\n    assert_eq!(\n        delegator.staked_amount(),\n        U512::from(DELEGATE_AMOUNT_1 + expected_reward),\n        \"the fully redelegated amount plus the earned rewards\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_handle_excessively_long_bridge_record_chains() {\n    let mut validators = Vec::new();\n    for index in 0..21 {\n        let secret_key =\n            SecretKey::ed25519_from_bytes([10 + index; SecretKey::ED25519_LENGTH]).unwrap();\n        let pubkey = PublicKey::from(&secret_key);\n        let addr = AccountHash::from(&pubkey);\n        validators.push((pubkey, addr));\n    }\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *NON_FOUNDER_VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2,\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n        },\n    )\n    .build();\n\n    let mut post_genesis_requests = vec![\n        system_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_2_validator_2_delegate_request,\n    ];\n\n    // add extra validators fund requests\n    for (_pubkey, addr) in validators.iter() {\n        let validator_fund_request = ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_TO_ACCOUNT,\n            runtime_args! {\n                ARG_TARGET => *addr,\n                ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n            },\n        )\n        .build();\n        post_genesis_requests.push(validator_fund_request)\n    }\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    builder.advance_eras_by_default_auction_delay();\n\n    // verify delegator 2 main purse balance after delegation\n    let delegator_2_main_purse = builder\n        .get_entity_by_account_hash(*BID_ACCOUNT_2_ADDR)\n        .expect(\"should have default account\")\n        .main_purse();\n    assert_eq!(\n        builder.get_purse_balance(delegator_2_main_purse),\n        U512::from(TRANSFER_AMOUNT - DELEGATE_AMOUNT_2)\n    );\n\n    // redelegate funds to validator 1\n    let delegator_2_redelegate_request = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_2_ADDR,\n        CONTRACT_REDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_2),\n            ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(),\n            ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(),\n            ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone()\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_2_redelegate_request)\n        .commit()\n        .expect_success();\n\n    // change validator bid public key\n    let mut current_bid_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone();\n    let mut current_bid_addr = *NON_FOUNDER_VALIDATOR_1_ADDR;\n    for (pubkey, addr) in validators.iter() {\n        let change_bid_public_key_request = ExecuteRequestBuilder::standard(\n            current_bid_addr,\n            CONTRACT_CHANGE_BID_PUBLIC_KEY,\n            runtime_args! {\n                ARG_PUBLIC_KEY => current_bid_public_key.clone(),\n                ARG_NEW_PUBLIC_KEY => pubkey.clone()\n            },\n        )\n        .build();\n\n        builder\n            .exec(change_bid_public_key_request)\n            .commit()\n            .expect_success();\n\n        current_bid_public_key = pubkey.clone();\n        current_bid_addr = *addr;\n    }\n\n    let era_id = builder.get_era();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 25);\n    let new_validator_bid = bids.validator_bid(&current_bid_public_key).unwrap();\n    assert_eq!(\n        builder.get_purse_balance(*new_validator_bid.bonding_purse()),\n        U512::from(ADD_BID_AMOUNT_1)\n    );\n\n    // check if bridge records exist\n    let mut old_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone();\n    for (pubkey, _addr) in validators.iter() {\n        let bridge = bids\n            .bridge(&old_public_key.clone(), &pubkey.clone(), &era_id)\n            .unwrap();\n        assert_eq!(bridge.old_validator_public_key(), &old_public_key.clone());\n        assert_eq!(bridge.new_validator_public_key(), &pubkey.clone());\n        assert_eq!(*bridge.era_id(), era_id);\n\n        old_public_key = pubkey.clone();\n    }\n\n    // verify delegator bids for current validator bid\n    let current_public_key = old_public_key;\n    let delegators = bids\n        .delegators_by_validator_public_key(&current_public_key)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = bids\n        .delegator_by_kind(\n            &current_public_key,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have account1 delegation\");\n    assert_eq!(delegator.staked_amount(), U512::from(DELEGATE_AMOUNT_1));\n    let delegators = bids\n        .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK)\n        .expect(\"should have delegators\");\n    assert_eq!(delegators.len(), 1);\n    let delegator = bids\n        .delegator_by_kind(\n            &NON_FOUNDER_VALIDATOR_2_PK,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()),\n        )\n        .expect(\"should have account2 delegation\");\n    assert_eq!(\n        delegator.staked_amount(),\n        U512::from(DELEGATE_AMOUNT_2 - UNDELEGATE_AMOUNT_2)\n    );\n\n    // distribute rewards\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let mut rewards = BTreeMap::new();\n    rewards.insert(NON_FOUNDER_VALIDATOR_1_PK.clone(), vec![total_payout]);\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 25);\n\n    let delegator = bids\n        .delegator_by_kind(\n            &current_public_key,\n            &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n        )\n        .expect(\"should have account1 delegation\");\n    assert_eq!(delegator.staked_amount(), U512::from(DELEGATE_AMOUNT_1));\n\n    // advance eras until unbonds are processed\n    builder.advance_eras_by(DEFAULT_UNBONDING_DELAY + 1);\n\n    let bids: Vec<_> = builder\n        .get_bids()\n        .into_iter()\n        .filter(|bid| !bid.is_unbond())\n        .collect();\n    assert_eq!(bids.len(), 25);\n    let delegator = bids.delegator_by_kind(\n        &current_public_key,\n        &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()),\n    );\n    assert!(delegator.is_none());\n\n    // verify that unbond was returned to main purse instead of being redelegated\n    assert_eq!(\n        builder.get_purse_balance(delegator_2_main_purse),\n        U512::from(TRANSFER_AMOUNT - DELEGATE_AMOUNT_2 + UNDELEGATE_AMOUNT_2)\n    );\n}\n\n#[ignore]\n#[test]\nfn credits_are_considered_when_determining_validators() {\n    // In this test we have 2 genesis nodes that are validators: Node 1 and Node 2; 1 has less stake\n    // than 2. We have only 2 validator slots so later we'll bid in another node with a stake\n    // slightly higher than the one of node 1.\n    // Under normal circumstances, since node 3 put in a higher bid, it should win the slot and kick\n    // out node 1. But since we add some credits for node 1 (because it was a validator and\n    // proposed blocks) it should maintain its slot.\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account_1 = GenesisAccount::account(\n            ACCOUNT_1_PK.clone(),\n            Motes::new(ACCOUNT_1_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_1_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_2 = GenesisAccount::account(\n            ACCOUNT_2_PK.clone(),\n            Motes::new(ACCOUNT_2_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(ACCOUNT_2_BOND),\n                DelegationRate::zero(),\n            )),\n        );\n        let account_3 = GenesisAccount::account(\n            BID_ACCOUNT_1_PK.clone(),\n            Motes::new(BID_ACCOUNT_1_BALANCE),\n            None,\n        );\n        tmp.push(account_1);\n        tmp.push(account_2);\n        tmp.push(account_3);\n        tmp\n    };\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    let config = GenesisConfigBuilder::default()\n        .with_accounts(accounts)\n        .with_validator_slots(2) // set up only 2 validators\n        .with_auction_delay(DEFAULT_AUCTION_DELAY)\n        .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS)\n        .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE)\n        .with_unbonding_delay(DEFAULT_UNBONDING_DELAY)\n        .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS)\n        .build();\n    let run_genesis_request = GenesisRequest::new(\n        DEFAULT_GENESIS_CONFIG_HASH,\n        DEFAULT_PROTOCOL_VERSION,\n        config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n    builder.run_genesis(run_genesis_request);\n\n    let genesis_validator_weights = builder\n        .get_validator_weights(INITIAL_ERA_ID)\n        .expect(\"should have genesis validators for initial era\");\n    let auction_delay = builder.get_auction_delay();\n\n    // new_era is the first era in the future where new era validator weights will be calculated\n    let new_era = INITIAL_ERA_ID + auction_delay + 1;\n    assert!(builder.get_validator_weights(new_era).is_none());\n    assert_eq!(\n        builder.get_validator_weights(new_era - 1).unwrap(),\n        builder.get_validator_weights(INITIAL_ERA_ID).unwrap()\n    );\n    // in the genesis era both node 1 and 2 are validators.\n    assert_eq!(\n        genesis_validator_weights\n            .keys()\n            .cloned()\n            .collect::<BTreeSet<_>>(),\n        BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()])\n    );\n\n    // bid in the 3rd node with an amount just a bit more than node 1.\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *BID_ACCOUNT_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(),\n            ARG_AMOUNT => U512::from(ACCOUNT_1_BOND + 1),\n            ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    // Add a credit for node 1 artificially (assume it has proposed a block with a transaction and\n    // received credit).\n    let credit_amount = U512::from(2001);\n    let add_credit = HandleFeeMode::credit(\n        Box::new(ACCOUNT_1_PK.clone()),\n        credit_amount,\n        INITIAL_ERA_ID,\n    );\n    builder.handle_fee(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        TransactionHash::from_raw([1; 32]),\n        add_credit,\n    );\n\n    // run auction and compute validators for new era\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    let new_validator_weights: ValidatorWeights = builder\n        .get_validator_weights(new_era)\n        .expect(\"should have first era validator weights\");\n\n    // We have only 2 slots. Node 2 should be in the set because it's the highest bidder. Node 1\n    // should keep its validator slot even though it's bid is now lower than node 3. This should\n    // have happened because there was a credit for node 1 added.\n    assert_eq!(\n        new_validator_weights.get(&ACCOUNT_2_PK),\n        Some(&U512::from(ACCOUNT_2_BOND))\n    );\n    assert!(!new_validator_weights.contains_key(&BID_ACCOUNT_1_PK));\n    let expected_amount = credit_amount.saturating_add(U512::from(ACCOUNT_1_BOND));\n    assert_eq!(\n        new_validator_weights.get(&ACCOUNT_1_PK),\n        Some(&expected_amount)\n    );\n}\n\n#[ignore]\n#[test]\nfn should_mark_bids_with_less_than_minimum_bid_amount_as_inactive_via_upgrade() {\n    const VALIDATOR_MIN_BID_FIXTURE: &str = \"validator_minimum_bid\";\n\n    const FIXTURE_MIN_BID_AMOUNT: u64 = 10_000 * 1_000_000_000;\n\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(VALIDATOR_MIN_BID_FIXTURE);\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version = ProtocolVersion::from_parts(\n        current_protocol_version.value().major,\n        current_protocol_version.value().minor + 1,\n        0,\n    );\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(EraId::new(1))\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .with_validator_minimum_bid_amount(FIXTURE_MIN_BID_AMOUNT + 1)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let public_key = {\n        let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    };\n    let bid = builder\n        .get_validator_bid(public_key)\n        .expect(\"must have the validator bid record\");\n\n    assert!(bid.inactive())\n}\n\n#[ignore]\n#[test]\nfn should_correctly_allow_validator_to_change_delegator_min_max_limits() {\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n        .exec(validator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    let result = builder.bidding(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        (*NON_FOUNDER_VALIDATOR_1_ADDR).into(),\n        AuctionMethod::AddBid {\n            public_key: NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            delegation_rate: 10,\n            amount: U512::from(ADD_BID_AMOUNT_1),\n            minimum_delegation_amount: Some(DEFAULT_MINIMUM_DELEGATION_AMOUNT + 10),\n            maximum_delegation_amount: Some(DEFAULT_MAXIMUM_DELEGATION_AMOUNT - 10),\n            minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n            reserved_slots: 0,\n        },\n    );\n\n    assert!(result.is_success());\n    builder.commit_transforms(builder.get_post_state_hash(), result.effects());\n    let validator_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone();\n\n    let bid = builder\n        .get_bids()\n        .into_iter()\n        .find(|bid| bid.validator_public_key() == validator_public_key)\n        .expect(\"must have bid for the validator\")\n        .as_validator_bid()\n        .expect(\"must get validator bid\");\n\n    assert_eq!(\n        bid.minimum_delegation_amount(),\n        DEFAULT_MINIMUM_DELEGATION_AMOUNT + 10\n    );\n\n    let result = builder.bidding(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        (*NON_FOUNDER_VALIDATOR_1_ADDR).into(),\n        AuctionMethod::AddBid {\n            public_key: NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            delegation_rate: 10,\n            amount: U512::from(ADD_BID_AMOUNT_1),\n            minimum_delegation_amount: Some(DEFAULT_MINIMUM_DELEGATION_AMOUNT + 5),\n            maximum_delegation_amount: None,\n            minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n            reserved_slots: 0,\n        },\n    );\n\n    assert!(result.is_success());\n    builder.commit_transforms(builder.get_post_state_hash(), result.effects());\n\n    let validator_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone();\n\n    let bid = builder\n        .get_bids()\n        .into_iter()\n        .find(|bid| bid.validator_public_key() == validator_public_key)\n        .expect(\"must have bid for the validator\")\n        .as_validator_bid()\n        .expect(\"must get validator bid\");\n\n    assert_eq!(\n        bid.minimum_delegation_amount(),\n        DEFAULT_MINIMUM_DELEGATION_AMOUNT + 5\n    );\n\n    assert_eq!(\n        bid.maximum_delegation_amount(),\n        DEFAULT_MAXIMUM_DELEGATION_AMOUNT - 10\n    );\n\n    let result = builder.bidding(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        (*NON_FOUNDER_VALIDATOR_1_ADDR).into(),\n        AuctionMethod::AddBid {\n            public_key: NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            delegation_rate: 10,\n            amount: U512::from(ADD_BID_AMOUNT_1),\n            minimum_delegation_amount: None,\n            maximum_delegation_amount: Some(DEFAULT_MINIMUM_DELEGATION_AMOUNT + 10),\n            minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n            reserved_slots: 0,\n        },\n    );\n\n    assert!(result.is_success());\n    builder.commit_transforms(builder.get_post_state_hash(), result.effects());\n\n    let validator_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone();\n\n    let bid = builder\n        .get_bids()\n        .into_iter()\n        .find(|bid| bid.validator_public_key() == validator_public_key)\n        .expect(\"must have bid for the validator\")\n        .as_validator_bid()\n        .expect(\"must get validator bid\");\n\n    assert_eq!(\n        bid.minimum_delegation_amount(),\n        DEFAULT_MINIMUM_DELEGATION_AMOUNT + 5\n    );\n\n    assert_eq!(\n        bid.maximum_delegation_amount(),\n        DEFAULT_MINIMUM_DELEGATION_AMOUNT + 10\n    );\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *BID_ACCOUNT_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    let result = builder.bidding(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        (*BID_ACCOUNT_1_ADDR).into(),\n        AuctionMethod::Delegate {\n            delegator: DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n            validator: NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            amount: U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT + 9),\n            max_delegators_per_validator: builder.engine_config().max_delegators_per_validator(),\n        },\n    );\n\n    assert!(result.is_success());\n    builder.commit_transforms(builder.get_post_state_hash(), result.effects());\n\n    let result = builder.bidding(\n        None,\n        DEFAULT_PROTOCOL_VERSION,\n        (*BID_ACCOUNT_1_ADDR).into(),\n        AuctionMethod::Delegate {\n            delegator: DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()),\n            validator: NON_FOUNDER_VALIDATOR_1_PK.clone(),\n            amount: U512::from(10),\n            max_delegators_per_validator: 0,\n        },\n    );\n\n    assert!(!result.is_success());\n    builder.commit_transforms(builder.get_post_state_hash(), result.effects());\n}\n\n#[ignore]\n#[test]\nfn protocol_upgrade_corrects_out_of_bound_delegations_for_validators() {\n    const DELEGATOR_AMOUNT_FIXTURE: &str = \"delegator_amount\";\n\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(DELEGATOR_AMOUNT_FIXTURE);\n\n    let pre_upgrade_delegator_bid = builder\n        .query(\n            None,\n            Key::BidAddr(BidAddr::DelegatedAccount {\n                validator: *NON_FOUNDER_VALIDATOR_1_ADDR,\n                delegator: *BID_ACCOUNT_1_ADDR,\n            }),\n            &[],\n        )\n        .expect(\"must have stored value\")\n        .as_bid_kind()\n        .expect(\"must be bid kind\")\n        .staked_amount()\n        .expect(\"must have staked amount\");\n\n    let validator_maximum_delegation_amount = builder\n        .query(\n            None,\n            Key::BidAddr(BidAddr::Validator(*NON_FOUNDER_VALIDATOR_1_ADDR)),\n            &[],\n        )\n        .expect(\"must have stored value\")\n        .as_bid_kind()\n        .expect(\"must be bid kind\")\n        .as_validator_bid()\n        .expect(\"must be validator_bid\")\n        .maximum_delegation_amount();\n\n    assert!(pre_upgrade_delegator_bid > U512::from(validator_maximum_delegation_amount));\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version = ProtocolVersion::from_parts(\n        current_protocol_version.value().major,\n        current_protocol_version.value().minor + 1,\n        0,\n    );\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(current_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(EraId::new(1))\n            .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n            .with_new_gas_hold_interval(1200u64)\n            .with_validator_minimum_bid_amount(0)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let pre_upgrade_delegator_bid = builder\n        .query(\n            None,\n            Key::BidAddr(BidAddr::DelegatedAccount {\n                validator: *NON_FOUNDER_VALIDATOR_1_ADDR,\n                delegator: *BID_ACCOUNT_1_ADDR,\n            }),\n            &[],\n        )\n        .expect(\"must have stored value\")\n        .as_bid_kind()\n        .expect(\"must be bid kind\")\n        .staked_amount()\n        .expect(\"must have staked amount\");\n\n    let validator_maximum_delegation_amount = builder\n        .query(\n            None,\n            Key::BidAddr(BidAddr::Validator(*NON_FOUNDER_VALIDATOR_1_ADDR)),\n            &[],\n        )\n        .expect(\"must have stored value\")\n        .as_bid_kind()\n        .expect(\"must be bid kind\")\n        .as_validator_bid()\n        .expect(\"must be validator_bid\")\n        .maximum_delegation_amount();\n\n    assert!(pre_upgrade_delegator_bid <= U512::from(validator_maximum_delegation_amount));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs",
    "content": "use std::collections::BTreeMap;\n\nuse num_rational::Ratio;\nuse num_traits::{CheckedMul, CheckedSub};\nuse once_cell::sync::Lazy;\n\nuse crate::test::system_contracts::auction::{\n    get_delegator_staked_amount, get_era_info, get_validator_bid,\n};\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n    DEFAULT_MINIMUM_DELEGATION_AMOUNT, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE,\n    DEFAULT_SUSTAIN_PUBLIC_KEY, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n    PRODUCTION_ROUND_SEIGNIORAGE_RATE, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_storage::data_access_layer::AuctionMethod;\nuse casper_types::{\n    self,\n    account::AccountHash,\n    runtime_args,\n    system::auction::{\n        self, BidsExt, DelegationRate, DelegatorBid, DelegatorKind, SeigniorageAllocation,\n        SeigniorageRecipientsSnapshotV2, UnbondKind, ARG_AMOUNT, ARG_DELEGATION_RATE,\n        ARG_DELEGATOR, ARG_PUBLIC_KEY, ARG_REWARDS_MAP, ARG_VALIDATOR, DELEGATION_RATE_DENOMINATOR,\n        METHOD_DISTRIBUTE, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    },\n    EntityAddr, EraId, GenesisAccount, ProtocolVersion, PublicKey, RewardsHandling, SecretKey,\n    Timestamp, DEFAULT_MINIMUM_BID_AMOUNT, U512,\n};\n\nconst ARG_ENTRY_POINT: &str = \"entry_point\";\nconst ARG_TARGET: &str = \"target\";\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic VALIDATOR_3: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_3: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([208; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\nstatic VALIDATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_1));\nstatic VALIDATOR_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_2));\nstatic VALIDATOR_3_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_3));\nstatic DELEGATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_1));\nstatic DELEGATOR_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_2));\nstatic DELEGATOR_3_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_3));\nstatic GENESIS_ROUND_SEIGNIORAGE_RATE: Lazy<Ratio<U512>> = Lazy::new(|| {\n    Ratio::new(\n        U512::from(*PRODUCTION_ROUND_SEIGNIORAGE_RATE.numer()),\n        U512::from(*PRODUCTION_ROUND_SEIGNIORAGE_RATE.denom()),\n    )\n});\n\nfn get_delegator_bid(\n    builder: &mut LmdbWasmTestBuilder,\n    validator: PublicKey,\n    delegator: PublicKey,\n) -> Option<DelegatorBid> {\n    let bids = builder.get_bids();\n    bids.delegator_by_kind(&validator, &DelegatorKind::PublicKey(delegator.clone()))\n}\n\nfn withdraw_bid(\n    builder: &mut LmdbWasmTestBuilder,\n    sender: AccountHash,\n    validator: PublicKey,\n    amount: U512,\n) {\n    let auction = builder.get_auction_contract_hash();\n    let withdraw_bid_args = runtime_args! {\n        ARG_PUBLIC_KEY => validator,\n        ARG_AMOUNT => amount,\n    };\n    let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        auction,\n        auction::METHOD_WITHDRAW_BID,\n        withdraw_bid_args,\n    )\n    .build();\n    builder.exec(withdraw_bid_request).expect_success().commit();\n}\n\nfn undelegate(\n    builder: &mut LmdbWasmTestBuilder,\n    sender: AccountHash,\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: U512,\n) {\n    let auction = builder.get_auction_contract_hash();\n    let undelegate_args = runtime_args! {\n        ARG_DELEGATOR => delegator,\n        ARG_VALIDATOR => validator,\n        ARG_AMOUNT => amount,\n    };\n    let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        sender,\n        auction,\n        auction::METHOD_UNDELEGATE,\n        undelegate_args,\n    )\n    .build();\n    builder.exec(undelegate_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_distribute_delegation_rate_zero() {\n    const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const DELEGATOR_2_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE;\n    const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let delegators_share = {\n        let commission_rate = Ratio::new(\n            U512::from(VALIDATOR_1_DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE));\n        let delegator_reward = expected_total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    };\n\n    let delegator_1_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_1_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .expect(\"must get delegator 1 payout\")\n    };\n\n    let delegator_2_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_2_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .expect(\"must get delegator 2 payout\")\n    };\n\n    let validator_1_expected_payout = {\n        let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout;\n        let validator_share = expected_total_reward - Ratio::from(total_delegator_payout);\n        validator_share.to_integer()\n    };\n\n    let validator_1_actual_payout = {\n        let vaildator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n\n        validator_stake_after - vaildator_stake_before\n    };\n\n    assert_eq!(\n        validator_1_actual_payout, validator_1_expected_payout,\n        \"rhs {}\",\n        validator_1_expected_payout\n    );\n\n    let delegator_1_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout);\n\n    let delegator_2_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout);\n\n    // Subsequently, there should be no more rewards\n    let validator_1_balance = {\n        withdraw_bid(\n            &mut builder,\n            *VALIDATOR_1_ADDR,\n            VALIDATOR_1.clone(),\n            validator_1_actual_payout + U512::from(VALIDATOR_1_STAKE),\n        );\n        assert!(get_validator_bid(&mut builder, VALIDATOR_1.clone()).is_none());\n        U512::zero()\n    };\n    assert_eq!(validator_1_balance, U512::zero());\n\n    let delegator_1_balance = {\n        assert!(\n            get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()).is_none(),\n            \"validator withdrawing full stake also removes delegator 1 reinvested funds\"\n        );\n        U512::zero()\n    };\n    assert_eq!(delegator_1_balance, U512::zero());\n\n    let delegator_2_balance = {\n        assert!(\n            get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()).is_none(),\n            \"validator withdrawing full stake also removes delegator 1 reinvested funds\"\n        );\n        U512::zero()\n    };\n    assert!(delegator_2_balance.is_zero());\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key) , amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_withdraw_bids_after_distribute() {\n    const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const DELEGATOR_2_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE;\n    const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let total_payout = builder.base_round_reward(None, protocol_version);\n\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let rate = builder.round_seigniorage_rate(None, protocol_version);\n\n    let expected_total_reward = rate * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(protocol_version)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_1_actual_payout = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n\n        validator_stake_after - validator_stake_before\n    };\n\n    let delegators_share = {\n        let commission_rate = Ratio::new(\n            U512::from(VALIDATOR_1_DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE));\n        let delegator_reward = expected_total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    };\n\n    let delegator_1_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_1_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        reward_multiplier\n            .checked_mul(&delegators_share)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let delegator_2_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_2_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        reward_multiplier\n            .checked_mul(&delegators_share)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let validator_1_expected_payout = {\n        let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout;\n        let validator_share = expected_total_reward - Ratio::from(total_delegator_payout);\n        validator_share.to_integer()\n    };\n    assert_eq!(\n        validator_1_actual_payout, validator_1_expected_payout,\n        \"rhs {}\",\n        validator_1_expected_payout\n    );\n\n    let delegator_1_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n\n    assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout);\n\n    let delegator_2_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n\n    assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout);\n\n    let delegator_1_unstaked_amount = {\n        assert!(\n            get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()).is_some(),\n            \"delegator 1 should have a stake\"\n        );\n        let undelegate_amount = U512::from(DELEGATOR_1_STAKE) + delegator_1_actual_payout;\n\n        undelegate(\n            &mut builder,\n            *DELEGATOR_1_ADDR,\n            DELEGATOR_1.clone(),\n            VALIDATOR_1.clone(),\n            undelegate_amount,\n        );\n        assert!(\n            get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()).is_none(),\n            \"delegator 1 did not unstake full expected amount\"\n        );\n        delegator_1_actual_payout\n    };\n    assert!(\n        !delegator_1_unstaked_amount.is_zero(),\n        \"should have unstaked more than zero\"\n    );\n\n    let delegator_2_unstaked_amount = {\n        assert!(\n            get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()).is_some(),\n            \"delegator 2 should have a stake\"\n        );\n        let undelegate_amount = U512::from(DELEGATOR_2_STAKE) + delegator_2_actual_payout;\n        undelegate(\n            &mut builder,\n            *DELEGATOR_2_ADDR,\n            DELEGATOR_2.clone(),\n            VALIDATOR_1.clone(),\n            undelegate_amount,\n        );\n        assert!(\n            get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()).is_none(),\n            \"delegator 2 did not unstake full expected amount\"\n        );\n        delegator_2_actual_payout\n    };\n    assert!(\n        !delegator_2_unstaked_amount.is_zero(),\n        \"should have unstaked more than zero\"\n    );\n\n    let validator_1_balance = {\n        assert!(\n            get_validator_bid(&mut builder, VALIDATOR_1.clone()).is_some(),\n            \"validator 1 should have a stake\"\n        );\n        let withdraw_bid_amount = validator_1_actual_payout + U512::from(VALIDATOR_1_STAKE);\n        withdraw_bid(\n            &mut builder,\n            *VALIDATOR_1_ADDR,\n            VALIDATOR_1.clone(),\n            withdraw_bid_amount,\n        );\n\n        assert!(get_validator_bid(&mut builder, VALIDATOR_1.clone()).is_none());\n\n        withdraw_bid_amount\n    };\n    assert!(!validator_1_balance.is_zero());\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_1_expected_payout\n    ));\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn should_distribute_rewards_after_restaking_delegated_funds() {\n    const VALIDATOR_1_STAKE: u64 = 7_000_000_000_000_000;\n    const DELEGATOR_1_STAKE: u64 = 5_000_000_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 5_000_000_000_000_000;\n    const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let initial_rate = builder.round_seigniorage_rate(None, protocol_version);\n    let initial_round_reward = builder.base_round_reward(None, protocol_version);\n\n    let initial_expected_reward_rate = initial_rate * initial_supply;\n    assert_eq!(\n        initial_round_reward,\n        initial_expected_reward_rate.to_integer()\n    );\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    // we need to crank forward because our validator is not a genesis validator\n    builder.advance_eras_by_default_auction_delay();\n\n    let mut era = builder.get_era();\n    let mut round_reward = initial_round_reward;\n    let mut total_supply = initial_supply;\n    let mut expected_reward_rate = initial_expected_reward_rate;\n    let mut validator_stake = U512::from(VALIDATOR_1_STAKE);\n    let mut delegator_1_stake = U512::from(DELEGATOR_1_STAKE);\n    let mut delegator_2_stake = U512::from(DELEGATOR_2_STAKE);\n    let mut total_delegator_stake = U512::from(TOTAL_DELEGATOR_STAKE);\n    let mut total_stake = U512::from(VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE);\n    for idx in 0..10 {\n        let rewards = {\n            let mut rewards = BTreeMap::new();\n            rewards.insert(VALIDATOR_1.clone(), vec![round_reward]);\n            rewards\n        };\n\n        let result = builder.distribute(None, protocol_version, rewards, Timestamp::now().millis());\n        assert!(result.is_success(), \"failed to distribute {:?}\", result);\n        builder.advance_era();\n        let current_era = builder.get_era();\n        assert_eq!(\n            era.successor(),\n            current_era,\n            \"unexpected era {:?}\",\n            current_era\n        );\n        era = current_era;\n\n        let updated_round_reward = builder.base_round_reward(None, protocol_version);\n        round_reward = updated_round_reward;\n\n        let updated_validator_stake = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        assert!(\n            updated_validator_stake > validator_stake,\n            \"validator stake should go up\"\n        );\n        let updated_delegator_1_stake =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        assert!(\n            updated_delegator_1_stake > delegator_1_stake,\n            \"delegator 1 stake should go up\"\n        );\n        let updated_delegator_2_stake =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        assert!(\n            updated_delegator_2_stake > delegator_2_stake,\n            \"delegator 2 stake should go up was: {:?} is: {:?}\",\n            delegator_2_stake,\n            updated_delegator_2_stake,\n        );\n        let updated_total_delegator_stake = updated_delegator_1_stake + updated_delegator_2_stake;\n        assert!(\n            updated_total_delegator_stake > total_delegator_stake,\n            \"total delegator stake should go up\"\n        );\n        total_delegator_stake = updated_total_delegator_stake;\n        let updated_total_stake = updated_validator_stake + updated_total_delegator_stake;\n        assert!(\n            updated_total_stake > total_stake,\n            \"total stake should go up\"\n        );\n\n        let delegators_share = {\n            let commission_rate = Ratio::new(\n                U512::from(VALIDATOR_1_DELEGATION_RATE),\n                U512::from(DELEGATION_RATE_DENOMINATOR),\n            );\n            let reward_multiplier = Ratio::new(updated_total_delegator_stake, updated_total_stake);\n            let delegator_reward = expected_reward_rate\n                .checked_mul(&reward_multiplier)\n                .expect(\"should get delegator reward ratio\");\n            let commission = delegator_reward\n                .checked_mul(&commission_rate)\n                .expect(\"must get delegator reward\");\n            delegator_reward.checked_sub(&commission).unwrap()\n        };\n\n        let delegator_1_expected_payout = {\n            let reward_multiplier =\n                Ratio::new(updated_delegator_1_stake, updated_total_delegator_stake);\n            delegators_share\n                .checked_mul(&reward_multiplier)\n                .map(|ratio| ratio.to_integer())\n                .expect(\"must get delegator 1 reward\")\n        };\n\n        let delegator_2_expected_payout = {\n            let reward_multiplier =\n                Ratio::new(updated_delegator_2_stake, updated_total_delegator_stake);\n            delegators_share\n                .checked_mul(&reward_multiplier)\n                .map(|ratio| ratio.to_integer())\n                .expect(\"must get delegator 2 reward\")\n        };\n\n        let validator_1_actual_payout = updated_validator_stake - validator_stake;\n\n        let validator_1_expected_payout = (expected_reward_rate\n            - Ratio::from(delegator_1_expected_payout + delegator_2_expected_payout))\n        .to_integer();\n        assert_eq!(validator_1_actual_payout, validator_1_expected_payout);\n\n        let delegator_1_actual_payout = updated_delegator_1_stake - delegator_1_stake;\n        assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout);\n\n        let delegator_2_actual_payout = updated_delegator_2_stake - delegator_2_stake;\n        assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout);\n\n        let updated_era_info = get_era_info(&mut builder);\n\n        assert!(matches!(\n            updated_era_info.select(VALIDATOR_1.clone()).next(),\n            Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n            if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout\n        ));\n\n        assert!(matches!(\n            updated_era_info.select(DELEGATOR_1.clone()).next(),\n            Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n            if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout\n        ));\n\n        assert!(matches!(\n            updated_era_info.select(DELEGATOR_2.clone()).next(),\n            Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n            if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout\n        ));\n\n        // Next round of rewards\n        let updated_supply = builder.total_supply(protocol_version, None);\n        assert!(updated_supply > total_supply);\n        total_supply = updated_supply;\n\n        let updated_rate = builder.round_seigniorage_rate(None, protocol_version);\n        expected_reward_rate = updated_rate * total_supply;\n\n        // lets churn the bids just to have some fun\n        let undelegate_amount = delegator_1_expected_payout - 1;\n        let undelegate_result = builder.bidding(\n            None,\n            protocol_version,\n            (*DELEGATOR_1_ADDR).into(),\n            AuctionMethod::Undelegate {\n                validator: VALIDATOR_1.clone(),\n                delegator: DelegatorKind::PublicKey(DELEGATOR_1.clone()),\n                amount: undelegate_amount,\n            },\n        );\n        assert!(undelegate_result.is_success(), \"{:?}\", undelegate_result);\n        builder.commit_transforms(builder.get_post_state_hash(), undelegate_result.effects());\n        delegator_1_stake =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n\n        let undelegate_amount = U512::from(1_000_000);\n        let undelegate_result = builder.bidding(\n            None,\n            protocol_version,\n            (*DELEGATOR_2_ADDR).into(),\n            AuctionMethod::Delegate {\n                max_delegators_per_validator: u32::MAX,\n                validator: VALIDATOR_1.clone(),\n                delegator: DelegatorKind::PublicKey(DELEGATOR_2.clone()),\n                amount: undelegate_amount,\n            },\n        );\n        assert!(undelegate_result.is_success(), \"{:?}\", undelegate_result);\n        builder.commit_transforms(builder.get_post_state_hash(), undelegate_result.effects());\n        delegator_2_stake =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n\n        let auction_method = {\n            let amount = U512::from(10_000_000);\n            if idx % 2 == 0 {\n                AuctionMethod::AddBid {\n                    public_key: VALIDATOR_1.clone(),\n                    amount,\n                    delegation_rate: 0,\n                    minimum_delegation_amount: Some(undelegate_amount.as_u64()),\n                    maximum_delegation_amount: Some(undelegate_amount.as_u64()),\n                    minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n                    reserved_slots: 0,\n                }\n            } else {\n                AuctionMethod::WithdrawBid {\n                    public_key: VALIDATOR_1.clone(),\n                    amount,\n                    minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n                }\n            }\n        };\n        let bid_flip_result = builder.bidding(\n            None,\n            protocol_version,\n            (*VALIDATOR_1_ADDR).into(),\n            auction_method,\n        );\n        assert!(bid_flip_result.is_success(), \"{:?}\", bid_flip_result);\n        builder.commit_transforms(builder.get_post_state_hash(), undelegate_result.effects());\n        validator_stake = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n\n        total_stake = validator_stake + delegator_1_stake + delegator_2_stake;\n    }\n}\n\n#[ignore]\n#[test]\nfn should_distribute_delegation_rate_half() {\n    const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 1_000_000_000_000;\n    const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE;\n    const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let delegators_share = {\n        let commission_rate = Ratio::new(\n            U512::from(VALIDATOR_1_DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE));\n        let delegator_reward = expected_total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    };\n\n    let delegator_1_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_1_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let delegator_2_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_2_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let validator_1_expected_payout = {\n        let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout;\n        let validators_part = expected_total_reward - Ratio::from(total_delegator_payout);\n        validators_part.to_integer()\n    };\n\n    let validator_1_actual_payout = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n\n    assert_eq!(validator_1_actual_payout, validator_1_expected_payout);\n\n    let delegator_1_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout);\n\n    let delegator_2_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout);\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_distribute_delegation_rate_full() {\n    const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 1_000_000_000_000;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            auction::ARG_MINIMUM_DELEGATION_AMOUNT => 10,\n            auction::ARG_MAXIMUM_DELEGATION_AMOUNT => DELEGATOR_2_STAKE + 1,\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..5 {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![expected_total_reward_integer]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_1_updated_stake = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n    let expected_validator_1_balance =\n        (expected_total_reward * Ratio::from(U512::one())).to_integer();\n    assert_eq!(validator_1_updated_stake, expected_validator_1_balance);\n\n    let delegator_1_updated_stake = {\n        let validator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let validator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        validator_stake_after - validator_stake_before\n    };\n    let expected_delegator_1_balance = U512::zero();\n    assert_eq!(delegator_1_updated_stake, expected_delegator_1_balance);\n\n    let delegator_2_balance = {\n        let validator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let validator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        validator_stake_after - validator_stake_before\n    };\n    let expected_delegator_2_balance = U512::zero();\n    assert_eq!(delegator_2_balance, expected_delegator_2_balance);\n\n    let total_payout = validator_1_updated_stake + delegator_1_updated_stake + delegator_2_balance;\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_balance\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_balance\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_1_balance\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_distribute_uneven_delegation_rate_zero() {\n    const VALIDATOR_1_STAKE: u64 = 200_000_000_000;\n    const DELEGATOR_1_STAKE: u64 = 600_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 800_000_000_000;\n    const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE;\n    const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let delegators_share = {\n        let commission_rate = Ratio::new(\n            U512::from(VALIDATOR_1_DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE));\n        let delegator_reward = expected_total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    };\n\n    let delegator_1_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_1_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let delegator_2_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_2_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let validator_1_expected_payout = {\n        let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout;\n        let validators_part = expected_total_reward - Ratio::from(total_delegator_payout);\n        validators_part.to_integer()\n    };\n\n    let validator_1_updated_stake = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n    assert_eq!(validator_1_updated_stake, validator_1_expected_payout);\n\n    let delegator_1_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_1_updated_stake, delegator_1_expected_payout);\n\n    let delegator_2_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_2_updated_stake, delegator_2_expected_payout);\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_distribute_uneven_delegation_rate_zero_with_sustain_turned_on() {\n    const VALIDATOR_1_STAKE: u64 = 200_000_000_000;\n    const DELEGATOR_1_STAKE: u64 = 600_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 10_000_000_000_000;\n    const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE;\n    const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let mut default_request = LOCAL_GENESIS_REQUEST.clone();\n    default_request.push_genesis_account(GenesisAccount::SustainAccount {\n        public_key: DEFAULT_SUSTAIN_PUBLIC_KEY.clone(),\n    });\n    default_request.push_rewards_ratio(Ratio::new(1, 4));\n    builder.run_genesis(default_request);\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    let sustain_ratio_as_u512 = Ratio::new(U512::from(1), U512::from(4));\n\n    let expected_total_reward =\n        expected_total_reward * { Ratio::new(U512::one(), U512::one()) - sustain_ratio_as_u512 };\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let block_rewards = {\n        let mut ret = BTreeMap::new();\n        ret.insert(VALIDATOR_1.clone(), vec![total_payout]);\n        ret\n    };\n\n    let sustain_purse = builder\n        .get_account(DEFAULT_SUSTAIN_PUBLIC_KEY.to_account_hash())\n        .expect(\"must have sustain account as part of genesis setup\")\n        .main_purse();\n\n    let block_rewards_result = builder.distribute_with_rewards_handling(\n        None,\n        ProtocolVersion::V2_0_0,\n        block_rewards.clone(),\n        0,\n        RewardsHandling::Sustain {\n            ratio: Ratio::new(1, 4),\n            purse_address: sustain_purse.to_formatted_string(),\n        },\n    );\n    assert!(block_rewards_result.is_success());\n\n    let sustain_purse_expected_balance = {\n        let total = {\n            let mut ret = U512::zero();\n            for rewards_vec in block_rewards.values() {\n                for reward in rewards_vec {\n                    ret += *reward\n                }\n            }\n\n            ret\n        };\n\n        Ratio::new(total, U512::one()) * sustain_ratio_as_u512\n    }\n    .to_integer();\n\n    let actual_sustain_balance = builder.get_purse_balance(sustain_purse);\n    assert_eq!(actual_sustain_balance, sustain_purse_expected_balance);\n\n    let delegators_share = {\n        let commission_rate = Ratio::new(\n            U512::from(VALIDATOR_1_DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE));\n        let delegator_reward = expected_total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    };\n\n    let delegator_1_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_1_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let delegator_2_expected_payout = {\n        let reward_multiplier = Ratio::new(\n            U512::from(DELEGATOR_2_STAKE),\n            U512::from(TOTAL_DELEGATOR_STAKE),\n        );\n        delegators_share\n            .checked_mul(&reward_multiplier)\n            .map(|ratio| ratio.to_integer())\n            .unwrap()\n    };\n\n    let validator_1_expected_payout = {\n        let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout;\n\n        let validators_part = expected_total_reward - total_delegator_payout;\n        validators_part.to_integer()\n    };\n\n    let validator_1_updated_stake = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n    assert_eq!(validator_1_updated_stake, validator_1_expected_payout);\n\n    let delegator_1_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_1_updated_stake, delegator_1_expected_payout);\n\n    let delegator_2_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_2_updated_stake, delegator_2_expected_payout);\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_updated_stake\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_updated_stake\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_updated_stake\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_distribute_with_multiple_validators_and_delegators() {\n    const VALIDATOR_1_STAKE: u64 = 1_000_000;\n    const VALIDATOR_2_STAKE: u64 = 1_000_000;\n    const VALIDATOR_3_STAKE: u64 = 1_000_000;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2;\n    const VALIDATOR_2_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 4;\n    const VALIDATOR_3_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR;\n\n    const DELEGATOR_1_STAKE: u64 = 6_000_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 8_000_000_000_000;\n    const DELEGATOR_3_STAKE: u64 = 2_000_000_000_000;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_2_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let validator_3_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_3_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_3_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_3.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let delegator_3_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_3_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_3_STAKE),\n            ARG_VALIDATOR => VALIDATOR_2.clone(),\n            ARG_DELEGATOR => DELEGATOR_3.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_3_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        delegator_3_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        validator_3_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n        delegator_3_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    // Validator 1 distribution\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_1_actual_payout = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n\n    let delegator_1_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n\n    let delegator_2_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_actual_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_actual_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_actual_payout\n    ));\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_2.clone(), vec![total_payout]);\n\n    // Validator 2 distribution\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_2_actual_payout = {\n        let validator_stake_before = U512::from(VALIDATOR_2_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_2.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n\n    let delegator_3_actual_payout = {\n        let delegator_stake_before = U512::from(DELEGATOR_3_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_3.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_2 && *amount == validator_2_actual_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_3.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_3 && *amount == delegator_3_actual_payout\n    ));\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_3.clone(), vec![total_payout]);\n\n    // Validator 3 distribution\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_3_actual_payout = {\n        let validator_stake_before = U512::from(VALIDATOR_3_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_3.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(VALIDATOR_3.clone()).next(),\n        Some(SeigniorageAllocation::Validator { validator_public_key, amount })\n        if *validator_public_key == *VALIDATOR_3 && *amount == validator_3_actual_payout\n    ));\n}\n\n#[ignore]\n#[test]\nfn should_distribute_with_multiple_validators_and_shared_delegator_with_sustain_on() {\n    const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const VALIDATOR_2_STAKE: u64 = 1_000_000_000_000;\n    const VALIDATOR_3_STAKE: u64 = 1_000_000_000_000;\n\n    const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2;\n\n    const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let validator_3_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_3_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_3.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_2.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_3_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_3.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_3_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        delegator_3_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        validator_3_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_1_validator_2_delegate_request,\n        delegator_1_validator_3_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let mut default_request = LOCAL_GENESIS_REQUEST.clone();\n    default_request.push_genesis_account(GenesisAccount::SustainAccount {\n        public_key: DEFAULT_SUSTAIN_PUBLIC_KEY.clone(),\n    });\n    default_request.push_rewards_ratio(Ratio::new(1, 4));\n    builder.run_genesis(default_request);\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    let sustain_ratio_as_u512 = Ratio::new(U512::from(1), U512::from(4));\n\n    let expected_total_reward =\n        expected_total_reward * { Ratio::new(U512::one(), U512::one()) - sustain_ratio_as_u512 };\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n    rewards.insert(VALIDATOR_2.clone(), vec![total_payout]);\n    rewards.insert(VALIDATOR_3.clone(), vec![total_payout]);\n\n    let sustain_purse = builder\n        .get_account(DEFAULT_SUSTAIN_PUBLIC_KEY.to_account_hash())\n        .expect(\"must have sustain account as part of genesis setup\")\n        .main_purse();\n\n    let block_rewards_result = builder.distribute_with_rewards_handling(\n        None,\n        ProtocolVersion::V2_0_0,\n        rewards.clone(),\n        0,\n        RewardsHandling::Sustain {\n            ratio: Ratio::new(1, 4),\n            purse_address: sustain_purse.to_formatted_string(),\n        },\n    );\n    assert!(block_rewards_result.is_success());\n\n    let validator_1_delegator_1_share = {\n        let total_reward = &Ratio::from(expected_total_reward_integer);\n\n        let validator_1_total_stake = VALIDATOR_1_STAKE + DELEGATOR_1_STAKE;\n\n        let delegator_total_stake = U512::from(DELEGATOR_1_STAKE);\n        let commission_rate = Ratio::new(\n            U512::from(DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(delegator_total_stake, U512::from(validator_1_total_stake));\n        let delegator_reward = total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    }\n    .to_integer();\n\n    let validator_1_actual_payout = {\n        let validator_balance_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_balance_after - validator_balance_before\n    };\n\n    let validator_1_expected_payout = {\n        let validator_share = expected_total_reward;\n        let validator_portion = validator_share - Ratio::from(validator_1_delegator_1_share);\n        validator_portion.to_integer()\n    };\n    assert_eq!(validator_1_actual_payout, validator_1_expected_payout);\n\n    let validator_2_delegator_1_share = {\n        let validator_2_total_stake = VALIDATOR_2_STAKE + DELEGATOR_1_STAKE;\n\n        let total_reward = &Ratio::from(expected_total_reward.to_integer());\n\n        let delegator_total_stake = U512::from(DELEGATOR_1_STAKE);\n        let commission_rate = Ratio::new(\n            U512::from(DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(delegator_total_stake, U512::from(validator_2_total_stake));\n        let delegator_reward = total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    }\n    .to_integer();\n\n    let validator_2_actual_payout = {\n        let validator_balance_before = U512::from(VALIDATOR_2_STAKE);\n        let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_2.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_balance_after - validator_balance_before\n    };\n    let validator_2_expected_payout = {\n        let validator_share = expected_total_reward;\n        let validator_portion = validator_share - Ratio::from(validator_2_delegator_1_share);\n        validator_portion.to_integer()\n    };\n    assert_eq!(validator_2_actual_payout, validator_2_expected_payout);\n\n    let validator_3_delegator_1_share = {\n        let validator_3_total_stake = VALIDATOR_3_STAKE + DELEGATOR_1_STAKE;\n\n        let total_reward = &Ratio::from(expected_total_reward.to_integer());\n\n        let delegator_total_stake = U512::from(DELEGATOR_1_STAKE);\n        let commission_rate = Ratio::new(\n            U512::from(DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(delegator_total_stake, U512::from(validator_3_total_stake));\n        let delegator_reward = total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    }\n    .to_integer();\n\n    let validator_3_actual_payout = {\n        let validator_balance_before = U512::from(VALIDATOR_3_STAKE);\n        let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_3.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_balance_after - validator_balance_before\n    };\n    let validator_3_expected_payout = {\n        let validator_share = expected_total_reward;\n        let validator_portion = validator_share - Ratio::from(validator_3_delegator_1_share);\n        validator_portion.to_integer()\n    };\n    assert_eq!(validator_3_actual_payout, validator_3_expected_payout);\n\n    let delegator_1_validator_1_updated_stake = {\n        let delegator_balance_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_balance_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_balance_after - delegator_balance_before\n    };\n\n    assert_eq!(\n        delegator_1_validator_1_updated_stake,\n        validator_1_delegator_1_share\n    );\n\n    let delegator_1_validator_2_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(\n        delegator_1_validator_2_updated_stake,\n        validator_2_delegator_1_share\n    );\n\n    let delegator_1_validator_3_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_3.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(\n        delegator_1_validator_3_updated_stake,\n        validator_3_delegator_1_share\n    );\n}\n\n#[ignore]\n#[test]\nfn should_distribute_with_multiple_validators_and_shared_delegator() {\n    const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const VALIDATOR_2_STAKE: u64 = 1_000_000_000_000;\n    const VALIDATOR_3_STAKE: u64 = 1_000_000_000_000;\n\n    const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2;\n\n    const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let validator_3_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_3_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_3.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_2.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_3_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_3.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_3_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        delegator_3_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        validator_3_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_1_validator_2_delegate_request,\n        delegator_1_validator_3_delegate_request,\n    ];\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n    rewards.insert(VALIDATOR_2.clone(), vec![total_payout]);\n    rewards.insert(VALIDATOR_3.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_1_delegator_1_share = {\n        let total_reward = &Ratio::from(expected_total_reward_integer);\n\n        let validator_1_total_stake = VALIDATOR_1_STAKE + DELEGATOR_1_STAKE;\n\n        let delegator_total_stake = U512::from(DELEGATOR_1_STAKE);\n        let commission_rate = Ratio::new(\n            U512::from(DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(delegator_total_stake, U512::from(validator_1_total_stake));\n        let delegator_reward = total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    }\n    .to_integer();\n\n    let validator_1_actual_payout = {\n        let validator_balance_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_balance_after - validator_balance_before\n    };\n\n    let validator_1_expected_payout = {\n        let validator_share = expected_total_reward;\n        let validator_portion = validator_share - Ratio::from(validator_1_delegator_1_share);\n        validator_portion.to_integer()\n    };\n    assert_eq!(validator_1_actual_payout, validator_1_expected_payout);\n\n    let validator_2_delegator_1_share = {\n        let validator_2_total_stake = VALIDATOR_2_STAKE + DELEGATOR_1_STAKE;\n\n        let total_reward = &Ratio::from(expected_total_reward.to_integer());\n\n        let delegator_total_stake = U512::from(DELEGATOR_1_STAKE);\n        let commission_rate = Ratio::new(\n            U512::from(DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(delegator_total_stake, U512::from(validator_2_total_stake));\n        let delegator_reward = total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    }\n    .to_integer();\n\n    let validator_2_actual_payout = {\n        let validator_balance_before = U512::from(VALIDATOR_2_STAKE);\n        let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_2.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_balance_after - validator_balance_before\n    };\n    let validator_2_expected_payout = {\n        let validator_share = expected_total_reward;\n        let validator_portion = validator_share - Ratio::from(validator_2_delegator_1_share);\n        validator_portion.to_integer()\n    };\n    assert_eq!(validator_2_actual_payout, validator_2_expected_payout);\n\n    let validator_3_delegator_1_share = {\n        let validator_3_total_stake = VALIDATOR_3_STAKE + DELEGATOR_1_STAKE;\n\n        let total_reward = &Ratio::from(expected_total_reward.to_integer());\n\n        let delegator_total_stake = U512::from(DELEGATOR_1_STAKE);\n        let commission_rate = Ratio::new(\n            U512::from(DELEGATION_RATE),\n            U512::from(DELEGATION_RATE_DENOMINATOR),\n        );\n        let reward_multiplier =\n            Ratio::new(delegator_total_stake, U512::from(validator_3_total_stake));\n        let delegator_reward = total_reward\n            .checked_mul(&reward_multiplier)\n            .expect(\"must get delegator reward\");\n        let commission = delegator_reward\n            .checked_mul(&commission_rate)\n            .expect(\"must get commission\");\n        delegator_reward.checked_sub(&commission).unwrap()\n    }\n    .to_integer();\n\n    let validator_3_actual_payout = {\n        let validator_balance_before = U512::from(VALIDATOR_3_STAKE);\n        let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_3.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_balance_after - validator_balance_before\n    };\n    let validator_3_expected_payout = {\n        let validator_share = expected_total_reward;\n        let validator_portion = validator_share - Ratio::from(validator_3_delegator_1_share);\n        validator_portion.to_integer()\n    };\n    assert_eq!(validator_3_actual_payout, validator_3_expected_payout);\n\n    let delegator_1_validator_1_updated_stake = {\n        let delegator_balance_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_balance_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_balance_after - delegator_balance_before\n    };\n\n    assert_eq!(\n        delegator_1_validator_1_updated_stake,\n        validator_1_delegator_1_share\n    );\n\n    let delegator_1_validator_2_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(\n        delegator_1_validator_2_updated_stake,\n        validator_2_delegator_1_share\n    );\n\n    let delegator_1_validator_3_updated_stake = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_3.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(\n        delegator_1_validator_3_updated_stake,\n        validator_3_delegator_1_share\n    );\n}\n\n#[ignore]\n#[test]\nfn should_increase_total_supply_after_distribute() {\n    const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const VALIDATOR_2_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const VALIDATOR_3_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n\n    const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2;\n\n    const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let validator_2_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_2_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let validator_3_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_3_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_3.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_2.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_3_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_3.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        validator_2_fund_request,\n        validator_3_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        delegator_3_fund_request,\n        validator_1_add_bid_request,\n        validator_2_add_bid_request,\n        validator_3_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_1_validator_2_delegate_request,\n        delegator_1_validator_3_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    let post_genesis_supply = builder.total_supply(protocol_version, None);\n\n    assert_eq!(\n        initial_supply, post_genesis_supply,\n        \"total supply should remain unchanged prior to first distribution\"\n    );\n\n    // run auction\n    for _ in 0..5 {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let post_auction_supply = builder.total_supply(protocol_version, None);\n    assert_eq!(\n        initial_supply, post_auction_supply,\n        \"total supply should remain unchanged regardless of auction\"\n    );\n\n    let total_payout = U512::from(1_000_000_000_000_u64);\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n    rewards.insert(VALIDATOR_2.clone(), vec![total_payout]);\n    rewards.insert(VALIDATOR_3.clone(), vec![total_payout]);\n\n    for _ in 0..5 {\n        let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *SYSTEM_ADDR,\n            builder.get_auction_contract_hash(),\n            METHOD_DISTRIBUTE,\n            runtime_args! {\n                ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n                ARG_REWARDS_MAP => rewards.clone()\n            },\n        )\n        .build();\n\n        builder.exec(distribute_request).expect_success().commit();\n\n        let post_distribute_supply = builder.total_supply(protocol_version, None);\n        assert!(\n            initial_supply < post_distribute_supply,\n            \"total supply should increase after distribute ({} >= {})\",\n            initial_supply,\n            post_distribute_supply\n        );\n    }\n}\n\n#[ignore]\n#[test]\nfn should_not_create_purses_during_distribute() {\n    const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n\n    const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2;\n\n    const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_3_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let delegator_3_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_3_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_3.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        delegator_3_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_validator_1_delegate_request,\n        delegator_2_validator_1_delegate_request,\n        delegator_3_validator_1_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    let post_genesis_supply = builder.total_supply(protocol_version, None);\n\n    assert_eq!(\n        initial_supply, post_genesis_supply,\n        \"total supply should remain unchanged prior to first distribution\"\n    );\n\n    // run auction\n    for _ in 0..5 {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let post_auction_supply = builder.total_supply(protocol_version, None);\n    assert_eq!(\n        initial_supply, post_auction_supply,\n        \"total supply should remain unchanged regardless of auction\"\n    );\n\n    let total_payout = U512::from(1_000_000_000_000_u64);\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    let number_of_purses_before_distribute = builder.get_balance_keys().len();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let number_of_purses_after_distribute = builder.get_balance_keys().len();\n\n    assert_eq!(\n        number_of_purses_after_distribute,\n        number_of_purses_before_distribute\n    );\n\n    let post_distribute_supply = builder.total_supply(protocol_version, None);\n    assert!(\n        initial_supply < post_distribute_supply,\n        \"total supply should increase after distribute ({} >= {})\",\n        initial_supply,\n        post_distribute_supply\n    );\n}\n\n#[ignore]\n#[test]\nfn should_distribute_delegation_rate_full_after_upgrading() {\n    const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000;\n    const DELEGATOR_2_STAKE: u64 = 1_000_000_000_000;\n\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR;\n\n    let system_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let delegator_2_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegator_2_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let post_genesis_requests = vec![\n        system_fund_request,\n        validator_1_fund_request,\n        delegator_1_fund_request,\n        delegator_2_fund_request,\n        validator_1_add_bid_request,\n        delegator_1_delegate_request,\n        delegator_2_delegate_request,\n    ];\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    // initial token supply\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let expected_total_reward_before = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply;\n    let expected_total_reward_integer = expected_total_reward_before.to_integer();\n\n    for request in post_genesis_requests {\n        builder.exec(request).commit().expect_success();\n    }\n\n    for _ in 0..5 {\n        builder.advance_era();\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![expected_total_reward_integer]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let validator_1_stake_before = {\n        let validator_stake_before = U512::from(VALIDATOR_1_STAKE);\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n\n    let expected_validator_1_payout_before =\n        (expected_total_reward_before * Ratio::from(U512::one())).to_integer();\n    assert_eq!(validator_1_stake_before, expected_validator_1_payout_before);\n\n    let delegator_1_stake_before = {\n        let delegator_stake_before = U512::from(DELEGATOR_1_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    let expected_delegator_1_payout_before = U512::zero();\n    assert_eq!(delegator_1_stake_before, expected_delegator_1_payout_before);\n\n    let delegator_2_stake_before = {\n        let delegator_stake_before = U512::from(DELEGATOR_2_STAKE);\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    let expected_delegator_2_balance = U512::zero();\n    assert_eq!(delegator_2_stake_before, expected_delegator_2_balance);\n\n    let total_payout_before =\n        validator_1_stake_before + delegator_1_stake_before + delegator_2_stake_before;\n    assert_eq!(total_payout_before, expected_total_reward_integer);\n\n    //\n    // Update round seigniorage rate into 50% of default value\n    //\n    let new_seigniorage_multiplier = Ratio::new_raw(1, 2);\n    let new_round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE * new_seigniorage_multiplier;\n\n    let old_protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let sem_ver = old_protocol_version.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let mut upgrade_request = {\n        const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(old_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_validator_minimum_bid_amount(1u64)\n            .with_new_round_seigniorage_rate(new_round_seigniorage_rate)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let initial_supply = builder.total_supply(protocol_version, None);\n\n    for _ in 0..5 {\n        builder.advance_era();\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    let new_round_seigniorage_rate = {\n        let (numer, denom) = new_round_seigniorage_rate.into();\n        Ratio::new(numer.into(), denom.into())\n    };\n\n    let expected_total_reward_after = new_round_seigniorage_rate * initial_supply;\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(\n        VALIDATOR_1.clone(),\n        vec![expected_total_reward_after.to_integer()],\n    );\n    assert!(\n        builder\n            .distribute(None, new_protocol_version, rewards, timestamp_millis)\n            .is_success(),\n        \"must distribute\"\n    );\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), expected_total_reward_integer);\n\n    let validator_1_balance_after = {\n        let validator_staked_amount = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_staked_amount - validator_1_stake_before - U512::from(VALIDATOR_1_STAKE)\n    };\n    let expected_validator_1_balance_after =\n        (expected_total_reward_after * Ratio::from(U512::one())).to_integer();\n    assert_eq!(\n        validator_1_balance_after,\n        expected_validator_1_balance_after\n    );\n\n    let delegator_1_balance_after = {\n        let delegator_staked_amount =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_staked_amount - delegator_1_stake_before - U512::from(DELEGATOR_1_STAKE)\n    };\n    let expected_delegator_1_balance_after = U512::zero();\n    assert_eq!(\n        delegator_1_balance_after,\n        expected_delegator_1_balance_after\n    );\n\n    let delegator_2_balance_after = {\n        let delegator_staked_amount =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_staked_amount - delegator_2_stake_before - U512::from(DELEGATOR_2_STAKE)\n    };\n    let expected_delegator_2_balance_after = U512::zero();\n    assert_eq!(\n        delegator_2_balance_after,\n        expected_delegator_2_balance_after\n    );\n\n    let expected_total_reward_after = expected_total_reward_after.to_integer();\n\n    let total_payout_after =\n        validator_1_balance_after + delegator_1_balance_after + delegator_2_balance_after;\n    assert_eq!(total_payout_after, expected_total_reward_after);\n\n    // expected amount after reducing the seigniorage rate is lower than the first amount\n    assert!(expected_validator_1_payout_before > expected_validator_1_balance_after);\n    assert!(total_payout_before > total_payout_after);\n}\n\n// In this test, we set up a validator and a delegator, then the delegator delegates to the\n// validator. We step forward one era (auction delay is 3 eras) and then fully undelegate. We expect\n// that there is no bonding purse for this delegator / validator pair. This test should prove that\n// if you undelegate before your delegation would receive rewards from a validator, you will no\n// longer be delegated, as expected.\n#[ignore]\n#[test]\nfn should_not_restake_after_full_unbond() {\n    const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const VALIDATOR_1_STAKE: u64 = 1_000_000;\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // advance past the initial auction delay due to special condition of post-genesis behavior.\n\n    builder.advance_eras_by_default_auction_delay();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request)\n        .expect_success()\n        .commit();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_validator_1_delegate_request)\n        .expect_success()\n        .commit();\n\n    builder.advance_era();\n\n    let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n\n    assert!(delegator.is_some());\n    assert_eq!(\n        delegator.unwrap().staked_amount(),\n        U512::from(DELEGATOR_1_STAKE)\n    );\n\n    builder.advance_era();\n\n    // undelegate in the era right after we delegated.\n    undelegate(\n        &mut builder,\n        *DELEGATOR_1_ADDR,\n        DELEGATOR_1.clone(),\n        VALIDATOR_1.clone(),\n        U512::from(DELEGATOR_1_STAKE),\n    );\n    let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n    assert!(delegator.is_none());\n\n    let withdraws = builder.get_unbonds();\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone());\n    let unbond = withdraws.get(&unbond_kind).expect(\"should have entry\");\n    let delegator_unbond_amount = unbond[0].eras().first().expect(\"should be era\").amount();\n\n    assert_eq!(\n        *delegator_unbond_amount,\n        U512::from(DELEGATOR_1_STAKE),\n        \"unbond purse amount should match staked amount\"\n    );\n\n    // step until validator receives rewards.\n    builder.advance_eras_by(2);\n\n    // validator receives rewards after this step.\n\n    builder.advance_era();\n\n    // Delegator should not remain delegated even though they were eligible for rewards in the\n    // second era.\n    let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n    assert!(delegator.is_none());\n}\n\n// In this test, we set up a delegator and a validator, the delegator delegates to the validator.\n// We then undelegate during the first era where the delegator would be eligible to receive rewards\n// for their delegation and expect that there is no bonding purse for the delegator / validator pair\n// and that the delegator does not remain delegated to the validator as expected.\n#[ignore]\n#[test]\nfn delegator_full_unbond_during_first_reward_era() {\n    const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n    const VALIDATOR_1_STAKE: u64 = 1_000_000;\n    const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // advance past the initial auction delay due to special condition of post-genesis behavior.\n    builder.advance_eras_by_default_auction_delay();\n\n    let validator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    let delegator_1_fund_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_fund_request)\n        .expect_success()\n        .commit();\n\n    let validator_1_add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(validator_1_add_bid_request)\n        .expect_success()\n        .commit();\n\n    let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder\n        .exec(delegator_1_validator_1_delegate_request)\n        .expect_success()\n        .commit();\n\n    // first step after funding, adding bid and delegating.\n    builder.advance_era();\n\n    let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone())\n        .expect(\"should be delegator\");\n\n    assert_eq!(delegator.staked_amount(), U512::from(DELEGATOR_1_STAKE));\n\n    // step until validator receives rewards.\n    builder.advance_eras_by(3);\n\n    // assert that the validator should indeed receive rewards and that\n    // the delegator is scheduled to receive rewards this era.\n\n    let auction_hash = builder.get_auction_contract_hash();\n    let seigniorage_snapshot: SeigniorageRecipientsSnapshotV2 = builder.get_value(\n        EntityAddr::System(auction_hash.value()),\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n    );\n\n    let validator_seigniorage = seigniorage_snapshot\n        .get(&builder.get_era())\n        .expect(\"should be seigniorage for era\")\n        .get(&VALIDATOR_1)\n        .expect(\"should be validator seigniorage for era\");\n\n    let delegator_kind = DelegatorKind::PublicKey(DELEGATOR_1.clone());\n    let delegator_seigniorage = validator_seigniorage\n        .delegator_stake()\n        .get(&delegator_kind)\n        .expect(\"should be delegator seigniorage\");\n    assert_eq!(*delegator_seigniorage, U512::from(DELEGATOR_1_STAKE));\n\n    // undelegate in the first era that the delegator will receive rewards.\n    undelegate(\n        &mut builder,\n        *DELEGATOR_1_ADDR,\n        DELEGATOR_1.clone(),\n        VALIDATOR_1.clone(),\n        U512::from(DELEGATOR_1_STAKE),\n    );\n    let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n    assert!(delegator.is_none());\n\n    let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone());\n    let withdraws = builder.get_unbonds();\n    let unbond = withdraws\n        .get(&unbond_kind)\n        .expect(\"should have validator entry\");\n    let delegator_unbond_amount = unbond[0].eras().first().expect(\"should have era\").amount();\n\n    assert_eq!(\n        *delegator_unbond_amount,\n        U512::from(DELEGATOR_1_STAKE),\n        \"unbond purse amount should match staked amount\"\n    );\n\n    // validator receives rewards after this step.\n    builder.advance_era();\n\n    // Delegator's stake should remain at zero delegated even though they were eligible for rewards\n    // in the second era.\n    let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n    assert!(delegator.is_none());\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/auction/mod.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_PROPOSER_PUBLIC_KEY, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{\n    runtime_args,\n    system::auction::{\n        BidAddr, BidKind, BidsExt, DelegationRate, DelegatorBid, DelegatorKind, EraInfo,\n        ValidatorBid, ARG_AMOUNT, ARG_NEW_VALIDATOR, ARG_VALIDATOR,\n    },\n    GenesisAccount, GenesisValidator, Key, Motes, PublicKey, SecretKey, StoredValue, U512,\n};\nuse num_traits::Zero;\n\nconst STORED_STAKING_CONTRACT_NAME: &str = \"staking_stored.wasm\";\n\nmod bids;\nmod distribute;\nmod reservations;\n\nfn get_validator_bid(\n    builder: &mut LmdbWasmTestBuilder,\n    validator_public_key: PublicKey,\n) -> Option<ValidatorBid> {\n    let bids = builder.get_bids();\n    bids.validator_bid(&validator_public_key)\n}\n\npub fn get_delegator_staked_amount(\n    builder: &mut LmdbWasmTestBuilder,\n    validator_public_key: PublicKey,\n    delegator_public_key: PublicKey,\n) -> U512 {\n    let bids = builder.get_bids();\n\n    let delegator = bids\n        .delegator_by_kind(&validator_public_key, &DelegatorKind::PublicKey(delegator_public_key.clone()))\n        .expect(\"bid should exist for validator-{validator_public_key}, delegator-{delegator_public_key}\");\n\n    delegator.staked_amount()\n}\n\npub fn get_era_info(builder: &mut LmdbWasmTestBuilder) -> EraInfo {\n    let era_info_value = builder\n        .query(None, Key::EraSummary, &[])\n        .expect(\"should have value\");\n\n    era_info_value\n        .as_era_info()\n        .cloned()\n        .expect(\"should be era info\")\n}\n\n#[ignore]\n#[test]\nfn should_support_contract_staking() {\n    const ARG_ACTION: &str = \"action\";\n    let timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n    let purse_name = \"staking_purse\".to_string();\n    let contract_name = \"staking\".to_string();\n    let entry_point_name = \"run\".to_string();\n    let stake = \"STAKE\".to_string();\n    let unstake = \"UNSTAKE\".to_string();\n    let restake = \"RESTAKE\".to_string();\n    let get_staked_amount = \"STAKED_AMOUNT\".to_string();\n    let account = *DEFAULT_ACCOUNT_ADDR;\n    let seed_amount = U512::from(10_000_000_000_000_000_u64);\n    let delegate_amount = U512::from(5_000_000_000_000_000_u64);\n    let validator_pk = &*DEFAULT_PROPOSER_PUBLIC_KEY;\n    let other_validator_pk = {\n        let secret_key = SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    };\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    let mut genesis_request = LOCAL_GENESIS_REQUEST.clone();\n    genesis_request.set_enable_entity(false);\n\n    genesis_request.push_genesis_validator(\n        validator_pk,\n        GenesisValidator::new(\n            Motes::new(10_000_000_000_000_000_u64),\n            DelegationRate::zero(),\n        ),\n    );\n    genesis_request.push_genesis_account(GenesisAccount::Account {\n        public_key: other_validator_pk.clone(),\n        validator: Some(GenesisValidator::new(\n            Motes::new(1_000_000_000_000_000_u64),\n            DelegationRate::zero(),\n        )),\n        balance: Motes::new(10_000_000_000_000_000_u64),\n    });\n    builder.run_genesis(genesis_request);\n\n    let auction_delay = builder.get_unbonding_delay();\n    let unbond_delay = builder.get_unbonding_delay();\n\n    for _ in 0..=auction_delay {\n        // crank era\n        builder.run_auction(timestamp_millis, vec![]);\n    }\n\n    let account_main_purse = builder\n        .get_entity_with_named_keys_by_account_hash(account)\n        .expect(\"should have account\")\n        .main_purse();\n    let starting_account_balance = builder.get_purse_balance(account_main_purse);\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                account,\n                STORED_STAKING_CONTRACT_NAME,\n                runtime_args! {\n                    ARG_AMOUNT => seed_amount\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let default_account = builder.get_account(account).expect(\"should have account\");\n    let named_keys = default_account.named_keys();\n\n    let contract_key = named_keys\n        .get(&contract_name)\n        .expect(\"contract_name key should exist\");\n\n    let stored_contract = builder\n        .query(None, *contract_key, &[])\n        .expect(\"should have stored value at contract key\");\n\n    let contract = stored_contract\n        .as_contract()\n        .expect(\"stored value should be contract\");\n\n    let contract_named_keys = contract.named_keys();\n\n    let contract_purse = contract_named_keys\n        .get(&purse_name)\n        .expect(\"purse_name key should exist\")\n        .into_uref()\n        .expect(\"should be a uref\");\n\n    let post_install_account_balance = builder.get_purse_balance(account_main_purse);\n    assert_eq!(\n        post_install_account_balance,\n        starting_account_balance.saturating_sub(seed_amount),\n        \"post install should be reduced due to seeding contract purse\"\n    );\n\n    let pre_delegation_balance = builder.get_purse_balance(contract_purse);\n    assert_eq!(pre_delegation_balance, seed_amount);\n\n    // check delegated amount from contract\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => get_staked_amount.clone(),\n                    ARG_VALIDATOR => validator_pk.clone(),\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let result = builder.get_last_exec_result().unwrap();\n    let staked_amount: U512 = result.ret().unwrap().to_owned().into_t().unwrap();\n    assert_eq!(\n        staked_amount,\n        U512::zero(),\n        \"staked amount should be zero prior to staking\"\n    );\n\n    // stake from contract\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => stake,\n                    ARG_AMOUNT => delegate_amount,\n                    ARG_VALIDATOR => validator_pk.clone(),\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let post_delegation_balance = builder.get_purse_balance(contract_purse);\n    assert_eq!(\n        post_delegation_balance,\n        pre_delegation_balance.saturating_sub(delegate_amount),\n        \"contract purse balance should be reduced by staked amount\"\n    );\n\n    let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse {\n        validator: validator_pk.to_account_hash(),\n        delegator: contract_purse.addr(),\n    });\n\n    let stored_value = builder\n        .query(None, delegation_key, &[])\n        .expect(\"should have delegation bid\");\n\n    assert!(\n        matches!(stored_value, StoredValue::BidKind(BidKind::Delegator(_))),\n        \"expected delegator bid\"\n    );\n\n    if let StoredValue::BidKind(BidKind::Delegator(delegator)) = stored_value {\n        assert_eq!(\n            delegator.staked_amount(),\n            delegate_amount,\n            \"staked amount should match delegation amount\"\n        );\n    }\n\n    // check delegated amount from contract\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => get_staked_amount.clone(),\n                    ARG_VALIDATOR => validator_pk.clone(),\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let result = builder.get_last_exec_result().unwrap();\n    let staked_amount: U512 = result.ret().unwrap().to_owned().into_t().unwrap();\n    assert_eq!(\n        staked_amount, delegate_amount,\n        \"staked amount should match delegation amount\"\n    );\n\n    for _ in 0..=auction_delay {\n        // crank era\n        builder.run_auction(timestamp_millis, vec![]);\n    }\n\n    let increased_delegate_amount = if let StoredValue::BidKind(BidKind::Delegator(delegator)) =\n        builder\n            .query(None, delegation_key, &[])\n            .expect(\"should have delegation bid\")\n    {\n        delegator.staked_amount()\n    } else {\n        U512::zero()\n    };\n\n    // restake from contract\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => restake,\n                    ARG_AMOUNT => increased_delegate_amount,\n                    ARG_VALIDATOR => validator_pk.clone(),\n                    ARG_NEW_VALIDATOR => other_validator_pk.clone()\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    assert!(\n        builder.query(None, delegation_key, &[]).is_err(),\n        \"delegation record should be removed\"\n    );\n\n    assert_eq!(\n        post_delegation_balance,\n        builder.get_purse_balance(contract_purse),\n        \"at this point, unstaked token has not been returned\"\n    );\n\n    for _ in 0..=unbond_delay {\n        // crank era\n        builder.run_auction(timestamp_millis, vec![]);\n    }\n\n    let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse {\n        validator: other_validator_pk.to_account_hash(),\n        delegator: contract_purse.addr(),\n    });\n\n    let stored_value = builder\n        .query(None, delegation_key, &[])\n        .expect(\"should have delegation bid\");\n\n    assert!(\n        matches!(stored_value, StoredValue::BidKind(BidKind::Delegator(_))),\n        \"expected delegator bid\"\n    );\n\n    if let StoredValue::BidKind(BidKind::Delegator(delegator)) = stored_value {\n        assert_eq!(\n            delegator.staked_amount(),\n            delegate_amount,\n            \"staked amount should match delegation amount\"\n        );\n    }\n\n    // unstake from contract\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => unstake,\n                    ARG_AMOUNT => increased_delegate_amount,\n                    ARG_VALIDATOR => other_validator_pk.clone(),\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    assert!(\n        builder.query(None, delegation_key, &[]).is_err(),\n        \"delegation record should be removed\"\n    );\n\n    assert_eq!(\n        post_delegation_balance,\n        builder.get_purse_balance(contract_purse),\n        \"at this point, unstaked token has not been returned\"\n    );\n\n    let unbond_key = Key::BidAddr(BidAddr::UnbondPurse {\n        validator: other_validator_pk.to_account_hash(),\n        unbonder: contract_purse.addr(),\n    });\n    let unbonded_amount = if let StoredValue::BidKind(BidKind::Unbond(unbond)) = builder\n        .query(None, unbond_key, &[])\n        .expect(\"should have unbond\")\n    {\n        let unbond_era = unbond.eras().first().expect(\"should have an era entry\");\n        assert_eq!(\n            *unbond_era.amount(),\n            increased_delegate_amount,\n            \"unbonded amount should match expectations\"\n        );\n        *unbond_era.amount()\n    } else {\n        U512::zero()\n    };\n\n    for _ in 0..=unbond_delay {\n        // crank era\n        builder.run_auction(timestamp_millis, vec![]);\n    }\n\n    assert_eq!(\n        delegate_amount.saturating_add(unbonded_amount),\n        builder.get_purse_balance(contract_purse),\n        \"unbonded amount should be available to contract staking purse\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_enforce_max_spending_when_main_purse_not_in_use() {\n    const ARG_ACTION: &str = \"action\";\n    let timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n    let purse_name = \"staking_purse\".to_string();\n    let contract_name = \"staking\".to_string();\n    let entry_point_name = \"run\".to_string();\n    let stake_all = \"STAKE_ALL\".to_string();\n    let account = *DEFAULT_ACCOUNT_ADDR;\n    let seed_amount = U512::from(10_000_000_000_000_000_u64);\n    let validator_pk = &*DEFAULT_PROPOSER_PUBLIC_KEY;\n    let other_validator_pk = {\n        let secret_key = SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    };\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    let mut genesis_request = LOCAL_GENESIS_REQUEST.clone();\n    genesis_request.set_enable_entity(false);\n\n    genesis_request.push_genesis_validator(\n        validator_pk,\n        GenesisValidator::new(\n            Motes::new(10_000_000_000_000_000_u64),\n            DelegationRate::zero(),\n        ),\n    );\n    genesis_request.push_genesis_account(GenesisAccount::Account {\n        public_key: other_validator_pk.clone(),\n        validator: Some(GenesisValidator::new(\n            Motes::new(1_000_000_000_000_000_u64),\n            DelegationRate::zero(),\n        )),\n        balance: Motes::new(10_000_000_000_000_000_u64),\n    });\n    builder.run_genesis(genesis_request);\n\n    let auction_delay = builder.get_unbonding_delay();\n\n    for _ in 0..=auction_delay {\n        // crank era\n        builder.run_auction(timestamp_millis, vec![]);\n    }\n\n    let account_main_purse = builder\n        .get_entity_with_named_keys_by_account_hash(account)\n        .expect(\"should have account\")\n        .main_purse();\n    let starting_account_balance = builder.get_purse_balance(account_main_purse);\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                account,\n                STORED_STAKING_CONTRACT_NAME,\n                runtime_args! {\n                    ARG_AMOUNT => seed_amount\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let default_account = builder.get_account(account).expect(\"should have account\");\n    let named_keys = default_account.named_keys();\n\n    let contract_key = named_keys\n        .get(&contract_name)\n        .expect(\"contract_name key should exist\");\n\n    let stored_contract = builder\n        .query(None, *contract_key, &[])\n        .expect(\"should have stored value at contract key\");\n\n    let contract = stored_contract\n        .as_contract()\n        .expect(\"stored value should be contract\");\n\n    let contract_named_keys = contract.named_keys();\n\n    let contract_purse = contract_named_keys\n        .get(&purse_name)\n        .expect(\"purse_name key should exist\")\n        .into_uref()\n        .expect(\"should be a uref\");\n\n    let post_install_account_balance = builder.get_purse_balance(account_main_purse);\n    assert_eq!(\n        post_install_account_balance,\n        starting_account_balance.saturating_sub(seed_amount),\n        \"post install should be reduced due to seeding contract purse\"\n    );\n\n    let pre_delegation_balance = builder.get_purse_balance(contract_purse);\n    assert_eq!(pre_delegation_balance, seed_amount);\n\n    // stake from contract\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => stake_all,\n                    ARG_VALIDATOR => validator_pk.clone(),\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let post_delegation_balance = builder.get_purse_balance(contract_purse);\n    assert_eq!(\n        post_delegation_balance,\n        U512::zero(),\n        \"contract purse balance should be reduced by staked amount\"\n    );\n\n    let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse {\n        validator: validator_pk.to_account_hash(),\n        delegator: contract_purse.addr(),\n    });\n\n    let stored_value = builder\n        .query(None, delegation_key, &[])\n        .expect(\"should have delegation bid\");\n\n    assert!(\n        matches!(stored_value, StoredValue::BidKind(BidKind::Delegator(_))),\n        \"expected delegator bid\"\n    );\n\n    if let StoredValue::BidKind(BidKind::Delegator(delegator)) = stored_value {\n        assert_eq!(\n            delegator.staked_amount(),\n            pre_delegation_balance,\n            \"staked amount should match delegation amount\"\n        );\n    }\n\n    for _ in 0..=auction_delay {\n        // crank era\n        builder.run_auction(timestamp_millis, vec![]);\n    }\n\n    builder\n        .query(None, delegation_key, &[])\n        .expect(\"should have delegation bid\");\n}\n\n#[ignore]\n#[test]\nfn should_read_bid_with_vesting_schedule_populated() {\n    const ARG_ACTION: &str = \"action\";\n    let purse_name = \"staking_purse\".to_string();\n    let contract_name = \"staking\".to_string();\n    let entry_point_name = \"run\".to_string();\n    let get_staked_amount = \"STAKED_AMOUNT\".to_string();\n    let account = *DEFAULT_ACCOUNT_ADDR;\n    let seed_amount = U512::from(10_000_000_000_000_000_u64);\n    let validator_pk = &*DEFAULT_PROPOSER_PUBLIC_KEY;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    let mut genesis_request = LOCAL_GENESIS_REQUEST.clone();\n    genesis_request.set_enable_entity(false);\n    genesis_request.push_genesis_validator(\n        validator_pk,\n        GenesisValidator::new(\n            Motes::new(10_000_000_000_000_000_u64),\n            DelegationRate::zero(),\n        ),\n    );\n    builder.run_genesis(genesis_request);\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::standard(\n                account,\n                STORED_STAKING_CONTRACT_NAME,\n                runtime_args! {\n                    ARG_AMOUNT => seed_amount\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n\n    let default_account = builder.get_account(account).expect(\"should have account\");\n    let named_keys = default_account.named_keys();\n\n    let contract_key = named_keys\n        .get(&contract_name)\n        .expect(\"contract_name key should exist\");\n\n    let stored_contract = builder\n        .query(None, *contract_key, &[])\n        .expect(\"should have stored value at contract key\");\n\n    let contract = stored_contract\n        .as_contract()\n        .expect(\"stored value should be contract\");\n\n    let contract_named_keys = contract.named_keys();\n\n    let contract_purse = contract_named_keys\n        .get(&purse_name)\n        .expect(\"purse_name key should exist\")\n        .into_uref()\n        .expect(\"should be a uref\");\n\n    // Create a mock bid with a vesting schedule initialized.\n    // This is only there to make sure size constraints are not a problem\n    // when trying to read this relatively large structure as a guest.\n    let mut mock_bid = DelegatorBid::locked(\n        DelegatorKind::Purse(contract_purse.addr()),\n        U512::from(100_000_000),\n        contract_purse,\n        validator_pk.clone(),\n        0,\n    );\n\n    mock_bid\n        .vesting_schedule_mut()\n        .unwrap()\n        .initialize_with_schedule(U512::from(100_000_000), 0);\n\n    let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse {\n        validator: validator_pk.to_account_hash(),\n        delegator: contract_purse.addr(),\n    });\n\n    builder.write_data_and_commit(\n        [(\n            delegation_key,\n            StoredValue::BidKind(BidKind::Delegator(Box::new(mock_bid))),\n        )]\n        .iter()\n        .cloned(),\n    );\n\n    builder\n        .query(None, delegation_key, &[])\n        .expect(\"should have delegation bid\")\n        .as_bid_kind()\n        .expect(\"should be bidkind\")\n        .vesting_schedule()\n        .expect(\"should have vesting schedule\")\n        .locked_amounts()\n        .expect(\"should have locked amounts\");\n\n    builder\n        .exec(\n            ExecuteRequestBuilder::contract_call_by_name(\n                account,\n                &contract_name,\n                &entry_point_name,\n                runtime_args! {\n                    ARG_ACTION => get_staked_amount.clone(),\n                    ARG_VALIDATOR => validator_pk.clone(),\n                },\n            )\n            .build(),\n        )\n        .commit()\n        .expect_success();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/auction/reservations.rs",
    "content": "use num_rational::Ratio;\nuse num_traits::{CheckedMul, CheckedSub};\nuse once_cell::sync::Lazy;\nuse std::collections::BTreeMap;\nuse tempfile::TempDir;\n\nuse casper_engine_test_support::{\n    ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n};\nuse casper_execution_engine::{\n    engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error},\n    execution::ExecError,\n};\n\nuse crate::test::system_contracts::auction::{\n    get_delegator_staked_amount, get_era_info, get_validator_bid,\n};\nuse casper_types::{\n    self,\n    account::AccountHash,\n    api_error::ApiError,\n    runtime_args,\n    system::auction::{\n        BidsExt, DelegationRate, DelegatorKind, Error as AuctionError, Reservation,\n        SeigniorageAllocation, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_DELEGATORS,\n        ARG_ENTRY_POINT, ARG_PUBLIC_KEY, ARG_RESERVATIONS, ARG_RESERVED_SLOTS, ARG_REWARDS_MAP,\n        ARG_VALIDATOR, DELEGATION_RATE_DENOMINATOR, METHOD_DISTRIBUTE,\n    },\n    ProtocolVersion, PublicKey, SecretKey, U512,\n};\n\nconst ARG_TARGET: &str = \"target\";\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst CONTRACT_DELEGATE: &str = \"delegate.wasm\";\nconst CONTRACT_UNDELEGATE: &str = \"undelegate.wasm\";\nconst CONTRACT_ADD_RESERVATIONS: &str = \"add_reservations.wasm\";\nconst CONTRACT_CANCEL_RESERVATIONS: &str = \"cancel_reservations.wasm\";\n\nconst ADD_BID_AMOUNT_1: u64 = 1_000_000_000_000;\nconst ADD_BID_RESERVED_SLOTS: u32 = 1;\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_1: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_2: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_3: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([209; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic DELEGATOR_4: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([211; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\nstatic VALIDATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*VALIDATOR_1));\nstatic DELEGATOR_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_1));\nstatic DELEGATOR_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_2));\nstatic DELEGATOR_3_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_3));\nstatic DELEGATOR_4_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*DELEGATOR_4));\n\nconst VALIDATOR_1_DELEGATION_RATE: DelegationRate = 10;\nconst VALIDATOR_1_RESERVATION_DELEGATION_RATE: DelegationRate = 20;\n\n/// Fund validator and delegators accounts.\nfn setup_accounts(max_delegators_per_validator: u32) -> LmdbWasmTestBuilder {\n    let chainspec =\n        ChainspecConfig::default().with_max_delegators_per_validator(max_delegators_per_validator);\n\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec);\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_to_validator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *VALIDATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_1_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_2_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_3 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_3_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let transfer_to_delegator_4 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *DELEGATOR_4_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let post_genesis_request = vec![\n        transfer_to_validator_1,\n        transfer_to_delegator_1,\n        transfer_to_delegator_2,\n        transfer_to_delegator_3,\n        transfer_to_delegator_4,\n    ];\n\n    for request in post_genesis_request {\n        builder.exec(request).expect_success().commit();\n    }\n\n    builder\n}\n\n/// Submit validator bid for `VALIDATOR_1_ADDR` and advance eras\n/// until they are elected as active validator.\nfn setup_validator_bid(builder: &mut LmdbWasmTestBuilder, reserved_slots: u32) {\n    let add_validator_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_RESERVED_SLOTS => reserved_slots,\n        },\n    )\n    .build();\n\n    builder\n        .exec(add_validator_request)\n        .expect_success()\n        .commit();\n\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step request\"\n        );\n    }\n}\n\n#[ignore]\n#[test]\nfn should_enforce_max_delegators_per_validator_with_reserved_slots() {\n    let mut builder = setup_accounts(3);\n\n    setup_validator_bid(&mut builder, ADD_BID_RESERVED_SLOTS);\n\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let delegation_requests = [delegation_request_1, delegation_request_2];\n\n    for request in delegation_requests {\n        builder.exec(request).expect_success().commit();\n    }\n\n    // Delegator 3 is not on reservation list and validator is at delegator limit\n    // therefore delegation request should fail\n    let delegation_request_3 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_3_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_3.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_3).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8));\n\n    // Once we put Delegator 3 on reserved list the delegation request should succeed\n    let reservation = Reservation::new(\n        VALIDATOR_1.clone(),\n        DelegatorKind::PublicKey(DELEGATOR_3.clone()),\n        0,\n    );\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![reservation],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n\n    let delegation_request_4 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_3_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_3.clone(),\n        },\n    )\n    .build();\n    builder.exec(delegation_request_4).expect_success().commit();\n\n    // Delegator 4 not on reserved list and validator at capacity\n    // therefore delegation request should fail\n    let delegation_request_5 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_4_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_4.clone(),\n        },\n    )\n    .build();\n    builder.exec(delegation_request_5).expect_failure();\n\n    // Now we undelegate Delegator 3 and cancel his reservation,\n    // then add reservation for Delegator 4. Then delegation request for\n    // Delegator 4 should succeed\n    let undelegation_request = ExecuteRequestBuilder::standard(\n        *DELEGATOR_3_ADDR,\n        CONTRACT_UNDELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::MAX,\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_3.clone(),\n        },\n    )\n    .build();\n    builder.exec(undelegation_request).expect_success().commit();\n\n    let cancellation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_CANCEL_RESERVATIONS,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_3.clone())],\n        },\n    )\n    .build();\n    builder.exec(cancellation_request).expect_success().commit();\n\n    let reservation = Reservation::new(\n        VALIDATOR_1.clone(),\n        DelegatorKind::PublicKey(DELEGATOR_4.clone()),\n        0,\n    );\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![reservation],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n\n    let delegation_request_6 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_4_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_4.clone(),\n        },\n    )\n    .build();\n    builder.exec(delegation_request_6).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_allow_validator_to_reserve_all_delegator_slots() {\n    let max_delegators_per_validator = 2;\n\n    let mut builder = setup_accounts(max_delegators_per_validator);\n\n    setup_validator_bid(&mut builder, 0);\n\n    // cannot reserve more slots than maximum delegator number\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_RESERVED_SLOTS => max_delegators_per_validator + 1,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededReservationSlotsLimit as u8));\n\n    // can reserve all slots\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_RESERVED_SLOTS => max_delegators_per_validator,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_validator_to_reserve_more_slots_than_free_delegator_slots() {\n    let max_delegators_per_validator = 2;\n\n    let mut builder = setup_accounts(max_delegators_per_validator);\n\n    setup_validator_bid(&mut builder, 0);\n\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_1).expect_success().commit();\n\n    // cannot reserve more slots than number of free delegator slots\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_RESERVED_SLOTS =>  max_delegators_per_validator,\n        },\n    )\n    .build();\n\n    builder.exec(add_bid_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededReservationSlotsLimit as u8));\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_validator_to_reduce_number_of_reserved_spots_if_they_are_occupied() {\n    let mut builder = setup_accounts(3);\n\n    let reserved_slots = 2;\n    setup_validator_bid(&mut builder, reserved_slots);\n\n    // add reservations for Delegators 1 and 2\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()) , 0),\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()) , 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1)\n        .expect(\"should have reservations\");\n    assert_eq!(reservations.len(), 2);\n\n    // cannot reduce number of reserved slots because\n    // there are reservations for all of them\n    let add_validator_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_RESERVED_SLOTS => reserved_slots - 1,\n        },\n    )\n    .build();\n\n    builder.exec(add_validator_bid_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ReservationSlotsCountTooSmall as u8));\n\n    // remove a reservation for Delegator 2 and\n    // reduce number of reserved spots\n    let cancellation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_CANCEL_RESERVATIONS,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_2.clone())],\n        },\n    )\n    .build();\n    builder.exec(cancellation_request).expect_success().commit();\n\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1)\n        .expect(\"should have reservations\");\n    assert_eq!(reservations.len(), 1);\n\n    let add_validator_bid_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_PUBLIC_KEY => VALIDATOR_1.clone(),\n            ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1),\n            ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE,\n            ARG_RESERVED_SLOTS => reserved_slots - 1,\n        },\n    )\n    .build();\n\n    builder\n        .exec(add_validator_bid_request)\n        .expect_success()\n        .commit();\n\n    // cannot add a reservation for Delegator 2 back\n    // because number of slots is reduced\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededReservationsLimit as u8));\n}\n\n#[ignore]\n#[test]\nfn should_not_allow_validator_to_remove_active_reservation_if_there_are_no_free_delegator_slots() {\n    let mut builder = setup_accounts(2);\n\n    let reserved_slots = 1;\n    setup_validator_bid(&mut builder, reserved_slots);\n\n    // add delegation for Delegator 1\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_1).expect_success().commit();\n\n    // cannot add delegation for Delegator 2\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_2).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8));\n\n    // add reservation for Delegator 2\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n\n    // add delegation for Delegator 2\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(delegation_request_2).expect_success().commit();\n\n    // cannot cancel reservation for Delegator 2\n    // because there are no free public slots for delegators\n    let cancellation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_CANCEL_RESERVATIONS,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_2.clone())],\n        },\n    )\n    .build();\n    builder.exec(cancellation_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8));\n}\n\n#[ignore]\n#[test]\nfn should_handle_reserved_slots() {\n    let mut builder = setup_accounts(4);\n\n    let reserved_slots = 3;\n    setup_validator_bid(&mut builder, reserved_slots);\n\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1);\n    assert!(reservations.is_none());\n\n    // add reservations for Delegators 1 and 2\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), 0),\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1)\n        .expect(\"should have reservations\");\n    assert_eq!(reservations.len(), 2);\n\n    // try to cancel reservation for Delegators 1,2 and 3\n    // this fails because reservation for Delegator 3 doesn't exist yet\n    let cancellation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_CANCEL_RESERVATIONS,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATORS => vec![DELEGATOR_1.clone(), DELEGATOR_2.clone(), DELEGATOR_3.clone()],\n        },\n    )\n    .build();\n    builder.exec(cancellation_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ReservationNotFound as u8));\n\n    // add reservation for Delegator 2 and 3\n    // reservation for Delegator 2 already exists, but it shouldn't cause an error\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0),\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_3.clone()), 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1)\n        .expect(\"should have reservations\");\n    assert_eq!(reservations.len(), 3);\n\n    // try to add reservation for Delegator 4\n    // this fails because the reservation list is already full\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_4.clone()), 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::ExceededReservationsLimit as u8));\n\n    // cancel all reservations\n    let cancellation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_CANCEL_RESERVATIONS,\n        runtime_args! {\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_1.clone()), DelegatorKind::PublicKey(DELEGATOR_2.clone()), DelegatorKind::PublicKey(DELEGATOR_3.clone())],\n        },\n    )\n    .build();\n    builder.exec(cancellation_request).expect_success().commit();\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1);\n    assert!(reservations.is_none());\n}\n\n#[ignore]\n#[test]\nfn should_update_reservation_delegation_rate() {\n    let mut builder = setup_accounts(4);\n\n    let reserved_slots = 3;\n    setup_validator_bid(&mut builder, reserved_slots);\n\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1);\n    assert!(reservations.is_none());\n\n    // add reservations for Delegators 1 and 2\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), 0),\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1)\n        .expect(\"should have reservations\");\n    assert_eq!(reservations.len(), 2);\n\n    // try to change delegation rate for Delegator 1\n    // this fails because delegation rate value is invalid\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), DELEGATION_RATE_DENOMINATOR + 1),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_failure();\n    let error = builder.get_error().expect(\"should get error\");\n    assert!(matches!(\n        error,\n        Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error)))\n        if auction_error == AuctionError::DelegationRateTooLarge as u8));\n\n    // change delegation rate for Delegator 1\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), 10),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n    let reservations = builder\n        .get_bids()\n        .reservations_by_validator_public_key(&VALIDATOR_1)\n        .expect(\"should have reservations\");\n    assert_eq!(reservations.len(), 2);\n\n    let delegator_1_reservation = reservations\n        .iter()\n        .find(|r| *r.delegator_kind() == DelegatorKind::PublicKey(DELEGATOR_1.clone()))\n        .unwrap();\n    assert_eq!(*delegator_1_reservation.delegation_rate(), 10);\n}\n\n#[ignore]\n#[test]\nfn should_distribute_rewards_with_reserved_slots() {\n    let validator_stake = U512::from(ADD_BID_AMOUNT_1);\n    let delegator_1_stake = U512::from(1_000_000_000_000u64);\n    let delegator_2_stake = U512::from(1_000_000_000_000u64);\n    let total_delegator_stake = delegator_1_stake + delegator_2_stake;\n    let total_stake = validator_stake + total_delegator_stake;\n\n    let mut builder = setup_accounts(3);\n\n    setup_validator_bid(&mut builder, ADD_BID_RESERVED_SLOTS);\n\n    // add reservation for Delegator 1\n    let reservation_request = ExecuteRequestBuilder::standard(\n        *VALIDATOR_1_ADDR,\n        CONTRACT_ADD_RESERVATIONS,\n        runtime_args! {\n            ARG_RESERVATIONS => vec![\n                Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), VALIDATOR_1_RESERVATION_DELEGATION_RATE),\n            ],\n        },\n    )\n    .build();\n    builder.exec(reservation_request).expect_success().commit();\n\n    // add delegator bids for Delegator 1 and 2\n    let delegation_request_1 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_1_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => delegator_1_stake,\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_1.clone(),\n        },\n    )\n    .build();\n    let delegation_request_2 = ExecuteRequestBuilder::standard(\n        *DELEGATOR_2_ADDR,\n        CONTRACT_DELEGATE,\n        runtime_args! {\n            ARG_AMOUNT => delegator_2_stake,\n            ARG_VALIDATOR => VALIDATOR_1.clone(),\n            ARG_DELEGATOR => DELEGATOR_2.clone(),\n        },\n    )\n    .build();\n\n    let delegation_requests = [delegation_request_1, delegation_request_2];\n\n    for request in delegation_requests {\n        builder.exec(request).expect_success().commit();\n    }\n\n    // calculate expected rewards\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let initial_supply = builder.total_supply(protocol_version, None);\n    let total_payout = builder.base_round_reward(None, protocol_version);\n    let rate = builder.round_seigniorage_rate(None, protocol_version);\n    let expected_total_reward = rate * initial_supply;\n    let expected_total_reward_integer = expected_total_reward.to_integer();\n    assert_eq!(total_payout, expected_total_reward_integer);\n\n    // advance eras\n    for _ in 0..=builder.get_auction_delay() {\n        let step_request = StepRequestBuilder::new()\n            .with_parent_state_hash(builder.get_post_state_hash())\n            .with_protocol_version(ProtocolVersion::V1_0_0)\n            .with_next_era_id(builder.get_era().successor())\n            .with_run_auction(true)\n            .build();\n\n        assert!(\n            builder.step(step_request).is_success(),\n            \"must execute step successfully\"\n        );\n    }\n\n    let mut rewards = BTreeMap::new();\n    rewards.insert(VALIDATOR_1.clone(), vec![total_payout]);\n\n    let distribute_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        builder.get_auction_contract_hash(),\n        METHOD_DISTRIBUTE,\n        runtime_args! {\n            ARG_ENTRY_POINT => METHOD_DISTRIBUTE,\n            ARG_REWARDS_MAP => rewards\n        },\n    )\n    .build();\n\n    builder.exec(distribute_request).commit().expect_success();\n\n    let default_commission_rate = Ratio::new(\n        U512::from(VALIDATOR_1_DELEGATION_RATE),\n        U512::from(DELEGATION_RATE_DENOMINATOR),\n    );\n    let reservation_commission_rate = Ratio::new(\n        U512::from(VALIDATOR_1_RESERVATION_DELEGATION_RATE),\n        U512::from(DELEGATION_RATE_DENOMINATOR),\n    );\n    let reward_multiplier = Ratio::new(total_delegator_stake, total_stake);\n    let base_delegator_reward = expected_total_reward\n        .checked_mul(&reward_multiplier)\n        .expect(\"must get delegator reward\");\n\n    let delegator_1_expected_payout = {\n        let reward_multiplier = Ratio::new(delegator_1_stake, total_delegator_stake);\n        let delegator_1_reward = base_delegator_reward\n            .checked_mul(&reward_multiplier)\n            .unwrap();\n        let commission = delegator_1_reward\n            .checked_mul(&reservation_commission_rate)\n            .unwrap();\n        delegator_1_reward\n            .checked_sub(&commission)\n            .unwrap()\n            .to_integer()\n    };\n    let delegator_2_expected_payout = {\n        let reward_multiplier = Ratio::new(delegator_2_stake, total_delegator_stake);\n        let delegator_2_reward = base_delegator_reward\n            .checked_mul(&reward_multiplier)\n            .unwrap();\n        let commission = delegator_2_reward\n            .checked_mul(&default_commission_rate)\n            .unwrap();\n        delegator_2_reward\n            .checked_sub(&commission)\n            .unwrap()\n            .to_integer()\n    };\n\n    let delegator_1_actual_payout = {\n        let delegator_stake_before = delegator_1_stake;\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout);\n\n    let delegator_2_actual_payout = {\n        let delegator_stake_before = delegator_2_stake;\n        let delegator_stake_after =\n            get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone());\n        delegator_stake_after - delegator_stake_before\n    };\n    assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout);\n\n    let validator_1_expected_payout = {\n        let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout;\n        let validators_part = expected_total_reward - Ratio::from(total_delegator_payout);\n        validators_part.to_integer()\n    };\n\n    let validator_1_actual_payout = {\n        let validator_stake_before = validator_stake;\n        let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone())\n            .expect(\"should have validator bid\")\n            .staked_amount();\n        validator_stake_after - validator_stake_before\n    };\n\n    assert_eq!(validator_1_actual_payout, validator_1_expected_payout);\n\n    let era_info = get_era_info(&mut builder);\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_1.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout\n    ));\n\n    assert!(matches!(\n        era_info.select(DELEGATOR_2.clone()).next(),\n        Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. })\n        if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout\n    ));\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs",
    "content": "use num_traits::Zero;\n\nuse casper_engine_test_support::{\n    utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder,\n    UpgradeRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,\n    DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n    DEFAULT_PROPOSER_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, DEFAULT_UNBONDING_DELAY,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n    TIMESTAMP_MILLIS_INCREMENT,\n};\nuse casper_execution_engine::{engine_state::Error as EngineError, execution::ExecError};\n\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{\n        auction::{\n            self, BidsExt, DelegationRate, UnbondKind, ARG_VALIDATOR_PUBLIC_KEYS, INITIAL_ERA_ID,\n            METHOD_SLASH,\n        },\n        mint,\n    },\n    ApiError, EraId, GenesisAccount, GenesisValidator, Motes, ProtocolVersion, PublicKey,\n    SecretKey, DEFAULT_MINIMUM_BID_AMOUNT, U512,\n};\n\nconst CONTRACT_TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_u512.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst CONTRACT_WITHDRAW_BID: &str = \"withdraw_bid.wasm\";\n\nconst GENESIS_VALIDATOR_STAKE: u64 = 50_000;\nconst GENESIS_ACCOUNT_STAKE: u64 = 100_000;\nconst TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\nconst ARG_DELEGATION_RATE: &str = \"delegation_rate\";\n\nconst DELEGATION_RATE: DelegationRate = 42;\n\n#[ignore]\n#[test]\nfn should_run_successful_bond_and_unbond_and_slashing() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            \"amount\" => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let auction = builder.get_auction_contract_hash();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(GENESIS_ACCOUNT_STAKE),\n            ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let default_account_bid = bids\n        .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY)\n        .expect(\"should have bid\");\n    let bid_purse = *default_account_bid.bonding_purse();\n    assert_eq!(\n        builder.get_purse_balance(bid_purse),\n        GENESIS_ACCOUNT_STAKE.into()\n    );\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 0);\n\n    //\n    // Partial unbond\n    //\n\n    let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE - DEFAULT_MINIMUM_BID_AMOUNT);\n\n    let unbonding_purse = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\")\n        .main_purse();\n    let exec_request_3 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_AMOUNT => unbond_amount,\n            ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_3).expect_success().commit();\n\n    let account_balance_before = builder.get_purse_balance(unbonding_purse);\n\n    let unbonds = builder.get_unbonds();\n    let unbond = {\n        assert_eq!(unbonds.len(), 1);\n        let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n        let unbond = unbonds\n            .get(&unbond_kind)\n            .expect(\"should have unbond\")\n            .first()\n            .expect(\"must have one unbond entry\");\n        assert_eq!(unbond.eras().len(), 1, \"unexpected era count\");\n        assert_eq!(unbond.validator_public_key(), &default_public_key_arg,);\n        assert!(unbond.is_validator());\n        unbond\n    };\n\n    let unbond_era_1 = unbond.eras().first().expect(\"should have era\");\n    assert_eq!(unbond_era_1.era_of_creation(), INITIAL_ERA_ID,);\n\n    builder.run_auction(\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n        Vec::new(),\n    );\n\n    let unbonds = builder.get_unbonds();\n    let unbond = {\n        assert_eq!(unbonds.len(), 1);\n\n        let unbond = unbonds\n            .get(&UnbondKind::Validator(\n                (*DEFAULT_ACCOUNT_PUBLIC_KEY).clone(),\n            ))\n            .expect(\"should have unbond\")\n            .first()\n            .expect(\"must have one unbond entry\");\n        assert_eq!(unbond.eras().len(), 1);\n        assert_eq!(unbond.validator_public_key(), &default_public_key_arg,);\n        assert!(unbond.is_validator());\n        unbond\n    };\n\n    let account_balance = builder.get_purse_balance(unbonding_purse);\n    assert_eq!(account_balance_before, account_balance);\n\n    let unbond_era_2 = unbond.eras().first().expect(\"should have eras\");\n    assert_eq!(unbond_era_2.amount(), &unbond_amount,);\n\n    assert_eq!(unbond_era_2, unbond_era_1);\n\n    let exec_request_5 = ExecuteRequestBuilder::contract_call_by_hash(\n        *SYSTEM_ADDR,\n        auction,\n        METHOD_SLASH,\n        runtime_args! {\n            ARG_VALIDATOR_PUBLIC_KEYS => vec![\n               default_public_key_arg,\n            ]\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_5).expect_success().commit();\n\n    let unbonds = builder.get_unbonds();\n    let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n    assert!(!unbonds.contains_key(&unbond_kind));\n\n    let bids = builder.get_bids();\n    assert!(bids.validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY).is_none());\n\n    let account_balance_after_slashing = builder.get_purse_balance(unbonding_purse);\n    assert_eq!(account_balance_after_slashing, account_balance_before);\n}\n\n#[ignore]\n#[test]\nfn should_fail_bonding_with_insufficient_funds_directly() {\n    let new_validator_sk = SecretKey::ed25519_from_bytes([123; SecretKey::ED25519_LENGTH]).unwrap();\n    let new_validator_pk: PublicKey = (&new_validator_sk).into();\n    let new_validator_hash = AccountHash::from(&new_validator_pk);\n    assert_ne!(&DEFAULT_PROPOSER_PUBLIC_KEY.clone(), &new_validator_pk);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let transfer_amount = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE);\n    let delegation_rate: DelegationRate = 10;\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = TransferRequestBuilder::new(transfer_amount, new_validator_hash)\n        .with_transfer_id(1)\n        .build();\n\n    builder.transfer_and_commit(exec_request).expect_success();\n\n    let new_validator_account = builder\n        .get_entity_by_account_hash(new_validator_hash)\n        .expect(\"should work\");\n\n    let new_validator_balance = builder.get_purse_balance(new_validator_account.main_purse());\n\n    assert_eq!(new_validator_balance, transfer_amount,);\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        new_validator_hash,\n        builder.get_auction_contract_hash(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => new_validator_pk,\n            auction::ARG_AMOUNT => new_validator_balance + U512::one(),\n            auction::ARG_DELEGATION_RATE => delegation_rate,\n        },\n    )\n    .build();\n    builder.exec(add_bid_request);\n\n    let error = builder.get_error().expect(\"should be error\");\n    assert!(\n        matches!(\n            error,\n            EngineError::Exec(ExecError::Revert(ApiError::Mint(mint_error))\n        )\n        if mint_error == mint::Error::InsufficientFunds as u8),\n        \"{:?}\",\n        error\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_unbonding_validator_with_locked_funds() {\n    let account_1_secret_key =\n        SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    let account_1_public_key = PublicKey::from(&account_1_secret_key);\n    let account_1_hash = AccountHash::from(&account_1_public_key);\n    let account_1_balance = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE);\n\n    let accounts = {\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        let account = GenesisAccount::account(\n            account_1_public_key.clone(),\n            Motes::new(account_1_balance),\n            Some(GenesisValidator::new(\n                Motes::new(GENESIS_VALIDATOR_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        tmp.push(account);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        account_1_hash,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(42),\n            ARG_PUBLIC_KEY => account_1_public_key,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).commit();\n\n    let error_message = builder.get_error_message().expect(\"should have a result\");\n\n    // handle_payment::Error::NotBonded => 0\n    assert!(\n        error_message.contains(&format!(\n            \"{:?}\",\n            ApiError::from(auction::Error::ValidatorFundsLocked)\n        )),\n        \"error {:?}\",\n        error_message\n    );\n}\n\n#[ignore]\n#[test]\nfn should_fail_unbonding_validator_without_bonding_first() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(42),\n            ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        },\n    )\n    .build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let error_message = builder.get_error_message().expect(\"should have a result\");\n\n    assert!(\n        error_message.contains(&format!(\n            \"{:?}\",\n            ApiError::from(auction::Error::ValidatorNotFound)\n        )),\n        \"error {:?}\",\n        error_message\n    );\n}\n\n#[ignore]\n#[test]\nfn should_run_successful_bond_and_unbond_with_release() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let unbonding_purse = default_account.main_purse();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            \"amount\" => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(GENESIS_ACCOUNT_STAKE),\n            ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let bid = bids\n        .validator_bid(&default_public_key_arg)\n        .expect(\"should have bid\");\n    let bid_purse = *bid.bonding_purse();\n    assert_eq!(\n        builder.get_purse_balance(bid_purse),\n        GENESIS_ACCOUNT_STAKE.into()\n    );\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 0);\n\n    //\n    // Advance era by calling run_auction\n    //\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    //\n    // Partial unbond\n    //\n\n    let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - DEFAULT_MINIMUM_BID_AMOUNT;\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_AMOUNT => unbond_amount,\n            ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let unbonds = builder.get_unbonds();\n    assert_eq!(unbonds.len(), 1);\n\n    let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n    let unbond = unbonds\n        .get(&unbond_kind)\n        .expect(\"should have unbond\")\n        .first()\n        .expect(\"must have one unbond entry\");\n    assert_eq!(unbond.eras().len(), 1);\n    assert_eq!(unbond.validator_public_key(), &default_public_key_arg,);\n    assert!(unbond.is_validator());\n\n    let era = unbond.eras().first().expect(\"should have era\");\n    assert_eq!(*era.amount(), unbond_amount);\n    let unbond_era_1 = era.era_of_creation();\n    assert_eq!(unbond_era_1, INITIAL_ERA_ID + 1);\n\n    let account_balance_before_auction = builder.get_purse_balance(unbonding_purse);\n\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    assert_eq!(\n        builder.get_purse_balance(unbonding_purse),\n        account_balance_before_auction, // Not paid yet\n    );\n\n    let unbond_era_2 = unbond\n        .eras()\n        .first()\n        .expect(\"should have eras\")\n        .era_of_creation();\n\n    assert_eq!(unbond_era_2, unbond_era_1); // era of withdrawal didn't change since first run\n\n    //\n    // Advance state to hit the unbonding period\n    //\n    for _ in 0..DEFAULT_UNBONDING_DELAY {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    // Should pay out\n    builder.run_auction(timestamp_millis, Vec::new());\n    assert_eq!(\n        builder.get_purse_balance(unbonding_purse),\n        account_balance_before_auction + unbond_amount\n    );\n\n    let unbonds = builder.get_unbonds();\n    let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n    assert!(!unbonds.contains_key(&unbond_kind));\n\n    let bids = builder.get_bids();\n    assert!(!bids.is_empty());\n\n    let bid = bids\n        .validator_bid(&default_public_key_arg)\n        .expect(\"should have bid\");\n    let bid_purse = *bid.bonding_purse();\n    assert_eq!(\n        builder.get_purse_balance(bid_purse),\n        U512::from(GENESIS_ACCOUNT_STAKE) - unbond_amount, // remaining funds\n    );\n}\n\n#[ignore]\n#[test]\nfn should_run_successful_unbond_funds_after_changing_unbonding_delay() {\n    let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone();\n\n    let mut timestamp_millis =\n        DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let new_unbonding_delay = DEFAULT_UNBONDING_DELAY + 5;\n\n    let old_protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let sem_ver = old_protocol_version.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n    let default_activation_point = EraId::from(0);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(old_protocol_version)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(default_activation_point)\n            .with_new_unbonding_delay(new_unbonding_delay)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let unbonding_purse = default_account.main_purse();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_ACCOUNT,\n        runtime_args! {\n            \"target\" => *SYSTEM_ADDR,\n            \"amount\" => U512::from(TRANSFER_AMOUNT)\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let _default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            ARG_AMOUNT => U512::from(GENESIS_ACCOUNT_STAKE),\n            ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n            ARG_DELEGATION_RATE => DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    let bids = builder.get_bids();\n    let bid = bids\n        .validator_bid(&default_public_key_arg)\n        .expect(\"should have bid\");\n    let bid_purse = *bid.bonding_purse();\n    assert_eq!(\n        builder.get_purse_balance(bid_purse),\n        GENESIS_ACCOUNT_STAKE.into()\n    );\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 0);\n\n    //\n    // Advance era by calling run_auction\n    //\n    builder.run_auction(timestamp_millis, Vec::new());\n\n    //\n    // Partial unbond\n    //\n\n    let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - DEFAULT_MINIMUM_BID_AMOUNT;\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_WITHDRAW_BID,\n        runtime_args! {\n            ARG_AMOUNT => unbond_amount,\n            ARG_PUBLIC_KEY => default_public_key_arg.clone(),\n        },\n    )\n    .build();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    let account_balance_before_auction = builder.get_purse_balance(unbonding_purse);\n\n    let unbonds = builder.get_unbonds();\n    assert_eq!(unbonds.len(), 1);\n\n    let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n    let unbond = unbonds\n        .get(&unbond_kind)\n        .expect(\"should have unbond\")\n        .first()\n        .expect(\"must have one unbond entry\");\n    assert_eq!(unbond.eras().len(), 1);\n    assert_eq!(unbond.validator_public_key(), &default_public_key_arg,);\n    assert!(unbond.is_validator());\n\n    let era = unbond.eras().first().expect(\"should have eras\");\n    assert_eq!(era.era_of_creation(), INITIAL_ERA_ID + 1);\n\n    let unbond_era_1 = era.era_of_creation();\n\n    builder.run_auction(timestamp_millis, Vec::new());\n\n    let unbond_purses = builder.get_unbonds();\n    assert_eq!(unbond_purses.len(), 1);\n\n    let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n    let unbond = unbond_purses\n        .get(&unbond_kind)\n        .expect(\"should have unbond\")\n        .first()\n        .expect(\"must have one unbond entry\");\n    assert_eq!(unbond.validator_public_key(), &default_public_key_arg,);\n    assert!(unbond.is_validator());\n\n    assert_eq!(\n        builder.get_purse_balance(unbonding_purse),\n        account_balance_before_auction, // Not paid yet\n    );\n\n    let unbond_era_2 = unbond\n        .eras()\n        .first()\n        .expect(\"should have era\")\n        .era_of_creation();\n\n    assert_eq!(unbond_era_2, unbond_era_1); // era of withdrawal didn't change since first run\n\n    //\n    // Advance state to hit the unbonding period\n    //\n\n    for _ in 0..DEFAULT_UNBONDING_DELAY {\n        builder.run_auction(timestamp_millis, Vec::new());\n        timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n    }\n\n    // Won't pay out (yet) as we increased unbonding period\n    builder.run_auction(timestamp_millis, Vec::new());\n    timestamp_millis += TIMESTAMP_MILLIS_INCREMENT;\n\n    // Not paid yet\n    assert_eq!(\n        builder.get_purse_balance(unbonding_purse),\n        account_balance_before_auction,\n        \"should not pay after reaching default unbond delay era\"\n    );\n\n    // -1 below is the extra run auction above in `run_auction_request_1`\n    for _ in 0..new_unbonding_delay - DEFAULT_UNBONDING_DELAY - 1 {\n        builder.run_auction(timestamp_millis, Vec::new());\n    }\n\n    assert_eq!(\n        builder.get_purse_balance(unbonding_purse),\n        account_balance_before_auction + unbond_amount\n    );\n\n    let unbonds = builder.get_unbonds();\n    let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone());\n    assert!(!unbonds.contains_key(&unbond_kind));\n\n    let bids = builder.get_bids();\n    assert!(!bids.is_empty());\n\n    let bid = bids\n        .validator_bid(&default_public_key_arg)\n        .expect(\"should have bid\");\n    let bid_purse = *bid.bonding_purse();\n    assert_eq!(\n        builder.get_purse_balance(bid_purse),\n        U512::from(GENESIS_ACCOUNT_STAKE) - unbond_amount, // remaining funds\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/genesis.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    genesis_config_builder::GenesisConfigBuilder, ChainspecConfig, LmdbWasmTestBuilder,\n    DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_GENESIS_TIMESTAMP_MILLIS,\n    DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE,\n    DEFAULT_STORAGE_COSTS, DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS,\n    DEFAULT_WASM_CONFIG,\n};\nuse casper_storage::data_access_layer::GenesisRequest;\nuse casper_types::{\n    account::AccountHash, system::auction::DelegationRate, GenesisAccount, GenesisValidator, Key,\n    Motes, ProtocolVersion, PublicKey, SecretKey, StoredValue, U512,\n};\n\nconst GENESIS_CONFIG_HASH: [u8; 32] = [127; 32];\nconst ACCOUNT_1_BONDED_AMOUNT: u64 = 1_000_000;\nconst ACCOUNT_2_BONDED_AMOUNT: u64 = 2_000_000;\nconst ACCOUNT_1_BALANCE: u64 = 1_000_000_000;\nconst ACCOUNT_2_BALANCE: u64 = 2_000_000_000;\n\nconst ACCOUNT_3_BONDED_AMOUNT: u64 = 3_000_000;\n\nconst ACCOUNT_3_BALANCE: u64 = 3_000_000_000;\n\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PUBLIC_KEY));\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| AccountHash::from(&*ACCOUNT_2_PUBLIC_KEY));\n\nstatic ACCOUNT_3_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([45; SecretKey::ED25519_LENGTH]).unwrap();\n    PublicKey::from(&secret_key)\n});\n\nstatic GENESIS_CUSTOM_ACCOUNTS: Lazy<Vec<GenesisAccount>> = Lazy::new(|| {\n    let account_1 = {\n        let account_1_balance = Motes::new(ACCOUNT_1_BALANCE);\n        let account_1_bonded_amount = Motes::new(ACCOUNT_1_BONDED_AMOUNT);\n        GenesisAccount::account(\n            ACCOUNT_1_PUBLIC_KEY.clone(),\n            account_1_balance,\n            Some(GenesisValidator::new(\n                account_1_bonded_amount,\n                DelegationRate::zero(),\n            )),\n        )\n    };\n    let account_2 = {\n        let account_2_balance = Motes::new(ACCOUNT_2_BALANCE);\n        let account_2_bonded_amount = Motes::new(ACCOUNT_2_BONDED_AMOUNT);\n        GenesisAccount::account(\n            ACCOUNT_2_PUBLIC_KEY.clone(),\n            account_2_balance,\n            Some(GenesisValidator::new(\n                account_2_bonded_amount,\n                DelegationRate::zero(),\n            )),\n        )\n    };\n    let account_3 = {\n        let account_3_balance = Motes::new(ACCOUNT_3_BALANCE);\n        let account_3_bonded_amount = Motes::new(ACCOUNT_3_BONDED_AMOUNT);\n        GenesisAccount::Delegator {\n            validator_public_key: ACCOUNT_1_PUBLIC_KEY.clone(),\n            delegator_public_key: ACCOUNT_3_PUBLIC_KEY.clone(),\n            balance: account_3_balance,\n            delegated_amount: account_3_bonded_amount,\n        }\n    };\n\n    vec![account_1, account_2, account_3]\n});\n\n#[ignore]\n#[test]\nfn should_run_genesis() {\n    let protocol_version = ProtocolVersion::V1_0_0;\n\n    let run_genesis_request = ChainspecConfig::create_genesis_request_from_local_chainspec(\n        GENESIS_CUSTOM_ACCOUNTS.clone(),\n        protocol_version,\n    )\n    .expect(\"must create genesis request\");\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(run_genesis_request);\n\n    let _system_account = builder\n        .get_entity_by_account_hash(PublicKey::System.to_account_hash())\n        .expect(\"system account should exist\");\n\n    let account_1_addr = builder\n        .get_entity_hash_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"must get addr for entity account 1\");\n\n    assert_eq!(account_1_addr.value(), ACCOUNT_1_ADDR.value());\n\n    let account_1 = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"account 1 should exist\");\n\n    let account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"account 2 should exist\");\n\n    let account_1_balance_actual = builder.get_purse_balance(account_1.main_purse());\n    let account_2_balance_actual = builder.get_purse_balance(account_2.main_purse());\n\n    assert_eq!(account_1_balance_actual, U512::from(ACCOUNT_1_BALANCE));\n    assert_eq!(account_2_balance_actual, U512::from(ACCOUNT_2_BALANCE));\n\n    let mint_contract_key = Key::Hash(builder.get_mint_contract_hash().value());\n    let handle_payment_contract_key = Key::Hash(builder.get_handle_payment_contract_hash().value());\n\n    let result = builder.query(None, mint_contract_key, &[]);\n    if let Ok(StoredValue::Contract(_)) = result {\n        // Contract exists at mint contract hash\n    } else {\n        panic!(\"contract not found at mint hash\");\n    }\n\n    if let Ok(StoredValue::Contract(_)) = builder.query(None, handle_payment_contract_key, &[]) {\n        // Contract exists at handle payment contract hash\n    } else {\n        panic!(\"contract not found at handle payment hash\");\n    }\n}\n\n#[ignore]\n#[test]\nfn should_track_total_token_supply_in_mint() {\n    should_track_total_token(false)\n}\n\n#[ignore]\n#[test]\nfn should_track_total_token_supply_in_mint_with_enable_addressable_entity() {\n    should_track_total_token(true)\n}\n\nfn should_track_total_token(enable_ae: bool) {\n    let accounts = GENESIS_CUSTOM_ACCOUNTS.clone();\n    let wasm_config = *DEFAULT_WASM_CONFIG;\n    let system_config = *DEFAULT_SYSTEM_CONFIG;\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let validator_slots = DEFAULT_VALIDATOR_SLOTS;\n    let auction_delay = DEFAULT_AUCTION_DELAY;\n    let locked_funds_period = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n    let round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE;\n    let unbonding_delay = DEFAULT_UNBONDING_DELAY;\n    let genesis_timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS;\n    let config = GenesisConfigBuilder::default()\n        .with_accounts(accounts.clone())\n        .with_wasm_config(wasm_config)\n        .with_system_config(system_config)\n        .with_validator_slots(validator_slots)\n        .with_auction_delay(auction_delay)\n        .with_locked_funds_period_millis(locked_funds_period)\n        .with_round_seigniorage_rate(round_seigniorage_rate)\n        .with_unbonding_delay(unbonding_delay)\n        .with_genesis_timestamp_millis(genesis_timestamp)\n        .with_storage_costs(*DEFAULT_STORAGE_COSTS)\n        .with_enable_addressable_entity(enable_ae)\n        .build();\n\n    let genesis_request = GenesisRequest::new(\n        GENESIS_CONFIG_HASH.into(),\n        protocol_version,\n        config,\n        DEFAULT_CHAINSPEC_REGISTRY.clone(),\n    );\n\n    let chainspec_config = ChainspecConfig::default().with_enable_addressable_entity(enable_ae);\n\n    let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec_config);\n\n    builder.run_genesis(genesis_request);\n\n    let total_supply = builder.total_supply(protocol_version, None);\n\n    let expected_balance: U512 = accounts.iter().map(|item| item.balance().value()).sum();\n    let expected_staked_amount: U512 = accounts\n        .iter()\n        .map(|item| item.staked_amount().value())\n        .sum();\n\n    // check total supply against expected\n    assert_eq!(\n        total_supply,\n        expected_balance + expected_staked_amount,\n        \"unexpected total supply\"\n    )\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/handle_payment/finalize_payment.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR,\n};\nuse casper_types::{\n    account::AccountHash, runtime_args, system::handle_payment, Key, RuntimeArgs, URef, U512,\n};\n\nconst CONTRACT_FINALIZE_PAYMENT: &str = \"finalize_payment.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst FINALIZE_PAYMENT: &str = \"finalize_payment.wasm\";\nconst LOCAL_REFUND_PURSE: &str = \"local_refund_purse\";\n\nconst CREATE_PURSE_01: &str = \"create_purse_01.wasm\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\nconst ACCOUNT_ADDR: AccountHash = AccountHash::new([1u8; 32]);\npub const ARG_AMOUNT: &str = \"amount\";\npub const ARG_AMOUNT_SPENT: &str = \"amount_spent\";\npub const ARG_REFUND_FLAG: &str = \"refund\";\npub const ARG_ACCOUNT_KEY: &str = \"account\";\npub const ARG_TARGET: &str = \"target\";\n\nfn initialize() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! {\n            ARG_TARGET => *SYSTEM_ADDR,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)\n        },\n    )\n    .build();\n\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_ADDR, ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) },\n    )\n    .build();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request_1).expect_success().commit();\n\n    builder.exec(exec_request_2).expect_success().commit();\n\n    builder\n}\n\n#[ignore]\n#[test]\nfn finalize_payment_should_not_be_run_by_non_system_accounts() {\n    let mut builder = initialize();\n    let payment_amount = U512::from(300);\n    let spent_amount = U512::from(75);\n    let refund_purse: Option<URef> = None;\n    let args = runtime_args! {\n        ARG_AMOUNT => payment_amount,\n        ARG_REFUND_FLAG => refund_purse,\n        ARG_AMOUNT_SPENT => Some(spent_amount),\n        ARG_ACCOUNT_KEY => Some(ACCOUNT_ADDR),\n    };\n\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_FINALIZE_PAYMENT,\n        args.clone(),\n    )\n    .build();\n    let exec_request_2 =\n        ExecuteRequestBuilder::standard(ACCOUNT_ADDR, CONTRACT_FINALIZE_PAYMENT, args).build();\n\n    assert!(builder.exec(exec_request_1).is_error());\n\n    assert!(builder.exec(exec_request_2).is_error());\n}\n\n#[ignore]\n#[allow(unused)]\n// #[test]\nfn finalize_payment_should_refund_to_specified_purse() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    let payment_amount = *DEFAULT_PAYMENT;\n    let refund_purse_flag: u8 = 1;\n    // Don't need to run finalize_payment manually, it happens during\n    // the deploy because payment code is enabled.\n    let args = runtime_args! {\n        ARG_AMOUNT => payment_amount,\n        ARG_REFUND_FLAG => refund_purse_flag,\n        ARG_AMOUNT_SPENT => Option::<U512>::None,\n        ARG_ACCOUNT_KEY => Option::<AccountHash>::None,\n        ARG_PURSE_NAME => LOCAL_REFUND_PURSE,\n    };\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let create_purse_request = {\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CREATE_PURSE_01,\n            runtime_args! {\n                ARG_PURSE_NAME => LOCAL_REFUND_PURSE,\n            },\n        )\n        .build()\n    };\n\n    builder.exec(create_purse_request).expect_success().commit();\n\n    let rewards_pre_balance = builder.get_proposer_purse_balance();\n\n    let payment_pre_balance = get_handle_payment_payment_purse_balance(&builder);\n    let refund_pre_balance =\n        get_named_account_balance(&builder, *DEFAULT_ACCOUNT_ADDR, LOCAL_REFUND_PURSE)\n            .unwrap_or_else(U512::zero);\n\n    assert!(\n        get_handle_payment_refund_purse(&builder).is_none(),\n        \"refund_purse should start unset\"\n    );\n    assert!(\n        payment_pre_balance.is_zero(),\n        \"payment purse should start with zero balance\"\n    );\n\n    let genesis_account_hash = *DEFAULT_ACCOUNT_ADDR;\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_deploy_hash([1; 32])\n        .with_session_code(\"do_nothing.wasm\", RuntimeArgs::default())\n        .with_payment_code(FINALIZE_PAYMENT, args)\n        .with_authorization_keys(&[genesis_account_hash])\n        .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    let payment_post_balance = get_handle_payment_payment_purse_balance(&builder);\n    let rewards_post_balance = builder.get_proposer_purse_balance();\n    let refund_post_balance =\n        get_named_account_balance(&builder, *DEFAULT_ACCOUNT_ADDR, LOCAL_REFUND_PURSE)\n            .expect(\"should have refund balance\");\n    let expected_amount = rewards_pre_balance + transaction_fee;\n    assert_eq!(\n        expected_amount, rewards_post_balance,\n        \"validators should get paid; expected: {}, actual: {}\",\n        expected_amount, rewards_post_balance\n    );\n\n    // user gets refund\n    assert_eq!(\n        refund_pre_balance + payment_amount - transaction_fee,\n        refund_post_balance,\n        \"user should get refund\"\n    );\n\n    assert!(\n        get_handle_payment_refund_purse(&builder).is_none(),\n        \"refund_purse always ends unset\"\n    );\n    assert!(\n        payment_post_balance.is_zero(),\n        \"payment purse should ends with zero balance\"\n    );\n}\n\n// ------------- utility functions -------------------- //\n\nfn get_handle_payment_payment_purse_balance(builder: &LmdbWasmTestBuilder) -> U512 {\n    let purse = get_payment_purse_by_name(builder, handle_payment::PAYMENT_PURSE_KEY)\n        .expect(\"should find handle payment payment purse\");\n    builder.get_purse_balance(purse)\n}\n\nfn get_handle_payment_refund_purse(builder: &LmdbWasmTestBuilder) -> Option<Key> {\n    let handle_payment_contract = builder.get_handle_payment_contract();\n    handle_payment_contract\n        .named_keys()\n        .get(handle_payment::REFUND_PURSE_KEY)\n        .cloned()\n}\n\nfn get_payment_purse_by_name(builder: &LmdbWasmTestBuilder, purse_name: &str) -> Option<URef> {\n    let handle_payment_contract = builder.get_handle_payment_contract();\n    handle_payment_contract\n        .named_keys()\n        .get(purse_name)\n        .and_then(Key::as_uref)\n        .cloned()\n}\n\nfn get_named_account_balance(\n    builder: &LmdbWasmTestBuilder,\n    account_address: AccountHash,\n    name: &str,\n) -> Option<U512> {\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(account_address)\n        .expect(\"should have account\");\n\n    let purse = account\n        .named_keys()\n        .get(name)\n        .and_then(Key::as_uref)\n        .cloned();\n\n    purse.map(|uref| builder.get_purse_balance(uref))\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/handle_payment/get_payment_purse.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT,\n    LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_types::{account::AccountHash, runtime_args, U512};\n\nconst CONTRACT_GET_PAYMENT_PURSE: &str = \"get_payment_purse.wasm\";\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst ACCOUNT_1_INITIAL_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE;\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\n#[ignore]\n#[allow(unused)]\n//#[test]\nfn should_run_get_payment_purse_contract_default_account() {\n    let exec_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_GET_PAYMENT_PURSE,\n        runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT,\n        },\n    )\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request)\n        .expect_success()\n        .commit();\n}\n\n#[ignore]\n#[allow(unused)]\n//#[test]\nfn should_run_get_payment_purse_contract_account_1() {\n    let exec_request_1 = ExecuteRequestBuilder::standard(\n       *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => U512::from(ACCOUNT_1_INITIAL_BALANCE) },\n    )\n        .build();\n    let exec_request_2 = ExecuteRequestBuilder::standard(\n        ACCOUNT_1_ADDR,\n        CONTRACT_GET_PAYMENT_PURSE,\n        runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT,\n        },\n    )\n    .build();\n    LmdbWasmTestBuilder::default()\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(exec_request_1)\n        .expect_success()\n        .commit()\n        .exec(exec_request_2)\n        .expect_success()\n        .commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/handle_payment/mod.rs",
    "content": "mod finalize_payment;\nmod get_payment_purse;\nmod refund_purse;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/handle_payment/refund_purse.rs",
    "content": "use casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_types::{account::AccountHash, runtime_args, system::mint, RuntimeArgs, U512};\n\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\nconst ARG_PAYMENT_AMOUNT: &str = \"payment_amount\";\nconst CREATE_PURSE_01: &str = \"create_purse_01.wasm\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst ARG_PURSE_NAME_1: &str = \"purse_name_1\";\nconst ARG_PURSE_NAME_2: &str = \"purse_name_2\";\nconst LOCAL_REFUND_PURSE_1: &str = \"local_refund_purse_1\";\nconst LOCAL_REFUND_PURSE_2: &str = \"local_refund_purse_2\";\n\n#[ignore]\n#[test]\nfn should_run_refund_purse_contract_default_account() {\n    let mut builder = initialize();\n    refund_tests(&mut builder, *DEFAULT_ACCOUNT_ADDR);\n}\n\n#[ignore]\n#[test]\nfn should_run_refund_purse_contract_account_1() {\n    let mut builder = initialize();\n    transfer(\n        &mut builder,\n        ACCOUNT_1_ADDR,\n        U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n    );\n    refund_tests(&mut builder, ACCOUNT_1_ADDR);\n}\n\nfn initialize() -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder\n}\n\nfn transfer(builder: &mut LmdbWasmTestBuilder, account_hash: AccountHash, amount: U512) {\n    let exec_request = {\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n            runtime_args! {\n                \"target\" => account_hash,\n                \"amount\" => amount,\n            },\n        )\n        .build()\n    };\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\nfn refund_tests(builder: &mut LmdbWasmTestBuilder, account_hash: AccountHash) {\n    let create_purse_request_1 = {\n        ExecuteRequestBuilder::standard(\n            account_hash,\n            CREATE_PURSE_01,\n            runtime_args! {\n                ARG_PURSE_NAME => LOCAL_REFUND_PURSE_1,\n            },\n        )\n        .build()\n    };\n\n    let create_purse_request_2 = {\n        ExecuteRequestBuilder::standard(\n            account_hash,\n            CREATE_PURSE_01,\n            runtime_args! {\n                ARG_PURSE_NAME => LOCAL_REFUND_PURSE_2,\n            },\n        )\n        .build()\n    };\n\n    builder\n        .exec(create_purse_request_1)\n        .expect_success()\n        .commit();\n    builder\n        .exec(create_purse_request_2)\n        .expect_success()\n        .commit();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(account_hash)\n        .with_deploy_hash([2; 32])\n        .with_session_code(\"do_nothing.wasm\", RuntimeArgs::default())\n        .with_payment_code(\n            \"refund_purse.wasm\",\n            runtime_args! {\n                ARG_PAYMENT_AMOUNT => *DEFAULT_PAYMENT,\n                mint::ARG_AMOUNT => *DEFAULT_PAYMENT,\n                ARG_PURSE_NAME_1 => LOCAL_REFUND_PURSE_1,\n                ARG_PURSE_NAME_2 => LOCAL_REFUND_PURSE_2,\n            },\n        )\n        .with_authorization_keys(&[account_hash])\n        .build();\n\n    let refund_purse_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    builder.exec(refund_purse_request).expect_success().commit();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/mint.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, ProtocolVersion, URef, U512};\n\nuse casper_storage::data_access_layer::BalanceIdentifier;\nuse tempfile::TempDir;\n\n// const TEST_DELEGATOR_INITIAL_ACCOUNT_BALANCE: u64 = 1_000_000 * 1_000_000_000;\n\nconst CONTRACT_BURN: &str = \"burn.wasm\";\nconst CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = \"transfer_to_named_purse.wasm\";\n\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\n#[ignore]\n#[test]\nfn should_empty_purse_when_burning_above_balance() {\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new(data_dir.as_ref());\n    let source = *DEFAULT_ACCOUNT_ADDR;\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // let delegator_keys = auction::generate_public_keys(1);\n    // let validator_keys = auction::generate_public_keys(1);\n\n    // run_genesis_and_create_initial_accounts(\n    //     &mut builder,\n    //     &validator_keys,\n    //     delegator_keys\n    //         .iter()\n    //         .map(|public_key| public_key.to_account_hash())\n    //         .collect::<Vec<_>>(),\n    //     U512::from(TEST_DELEGATOR_INITIAL_ACCOUNT_BALANCE),\n    // );\n\n    let initial_supply = builder.total_supply(ProtocolVersion::V2_0_0, None);\n    let purse_name = \"purse\";\n    let purse_amount = U512::from(10_000_000_000u64);\n\n    // Create purse and transfer tokens to it\n    let exec_request = ExecuteRequestBuilder::standard(\n        source,\n        CONTRACT_TRANSFER_TO_NAMED_PURSE,\n        runtime_args! {\n            ARG_PURSE_NAME => purse_name,\n            ARG_AMOUNT => purse_amount,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(source)\n        .expect(\"should have account\");\n\n    let purse_uref: URef = account\n        .named_keys()\n        .get(purse_name)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n\n    assert_eq!(\n        builder\n            .get_purse_balance_result_with_proofs(\n                ProtocolVersion::V2_0_0,\n                BalanceIdentifier::Purse(purse_uref)\n            )\n            .total_balance()\n            .cloned()\n            .unwrap(),\n        purse_amount\n    );\n\n    // Burn part of tokens in a purse\n    let num_of_tokens_to_burn = U512::from(2_000_000_000u64);\n    let num_of_tokens_after_burn = U512::from(8_000_000_000u64);\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        source,\n        CONTRACT_BURN,\n        runtime_args! {\n            ARG_PURSE_NAME => purse_name,\n            ARG_AMOUNT => num_of_tokens_to_burn,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        builder\n            .get_purse_balance_result_with_proofs(\n                ProtocolVersion::V2_0_0,\n                BalanceIdentifier::Purse(purse_uref)\n            )\n            .total_balance()\n            .cloned()\n            .unwrap(),\n        num_of_tokens_after_burn\n    );\n\n    // Burn rest of tokens in a purse\n    let num_of_tokens_to_burn = U512::from(8_000_000_000u64);\n    let num_of_tokens_after_burn = U512::zero();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        source,\n        CONTRACT_BURN,\n        runtime_args! {\n            ARG_PURSE_NAME => purse_name,\n            ARG_AMOUNT => num_of_tokens_to_burn,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        builder\n            .get_purse_balance_result_with_proofs(\n                ProtocolVersion::V2_0_0,\n                BalanceIdentifier::Purse(purse_uref)\n            )\n            .total_balance()\n            .cloned()\n            .unwrap(),\n        num_of_tokens_after_burn\n    );\n\n    let supply_after_burns = builder.total_supply(ProtocolVersion::V2_0_0, None);\n    let expected_supply_after_burns = initial_supply - U512::from(10_000_000_000u64);\n\n    assert_eq!(supply_after_burns, expected_supply_after_burns);\n}\n\n#[ignore]\n#[test]\nfn should_not_burn_excess_tokens() {\n    let data_dir = TempDir::new().expect(\"should create temp dir\");\n    let mut builder = LmdbWasmTestBuilder::new(data_dir.as_ref());\n    let source = *DEFAULT_ACCOUNT_ADDR;\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n    // let delegator_keys = auction::generate_public_keys(1);\n    // let validator_keys = auction::generate_public_keys(1);\n    //\n    // run_genesis_and_create_initial_accounts(\n    //     &mut builder,\n    //     &validator_keys,\n    //     delegator_keys\n    //         .iter()\n    //         .map(|public_key| public_key.to_account_hash())\n    //         .collect::<Vec<_>>(),\n    //     U512::from(TEST_DELEGATOR_INITIAL_ACCOUNT_BALANCE),\n    // );\n\n    let initial_supply = builder.total_supply(ProtocolVersion::V2_0_0, None);\n    let purse_name = \"purse\";\n    let purse_amount = U512::from(10_000_000_000u64);\n\n    // Create purse and transfer tokens to it\n    let exec_request = ExecuteRequestBuilder::standard(\n        source,\n        CONTRACT_TRANSFER_TO_NAMED_PURSE,\n        runtime_args! {\n            ARG_PURSE_NAME => purse_name,\n            ARG_AMOUNT => purse_amount,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(source)\n        .expect(\"should have account\");\n\n    let purse_uref: URef = account\n        .named_keys()\n        .get(purse_name)\n        .unwrap()\n        .into_uref()\n        .expect(\"should be uref\");\n\n    assert_eq!(\n        builder\n            .get_purse_balance_result_with_proofs(\n                ProtocolVersion::V2_0_0,\n                BalanceIdentifier::Purse(purse_uref)\n            )\n            .total_balance()\n            .cloned()\n            .unwrap(),\n        purse_amount\n    );\n\n    // Try to burn more then in a purse\n    let num_of_tokens_to_burn = U512::MAX;\n    let num_of_tokens_after_burn = U512::zero();\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        source,\n        CONTRACT_BURN,\n        runtime_args! {\n            ARG_PURSE_NAME => purse_name,\n            ARG_AMOUNT => num_of_tokens_to_burn,\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    assert_eq!(\n        builder\n            .get_purse_balance_result_with_proofs(\n                ProtocolVersion::V2_0_0,\n                BalanceIdentifier::Purse(purse_uref)\n            )\n            .total_balance()\n            .cloned()\n            .unwrap(),\n        num_of_tokens_after_burn,\n    );\n\n    let supply_after_burns = builder.total_supply(ProtocolVersion::V2_0_0, None);\n    let expected_supply_after_burns = initial_supply - U512::from(10_000_000_000u64);\n\n    assert_eq!(supply_after_burns, expected_supply_after_burns);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/mod.rs",
    "content": "mod auction;\nmod auction_bidding;\nmod genesis;\nmod handle_payment;\nmod mint;\nmod standard_payment;\nmod upgrade;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/standard_payment.rs",
    "content": "use std::collections::HashMap;\n\nuse assert_matches::assert_matches;\n\nuse casper_engine_test_support::{\n    DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_execution_engine::{engine_state::Error, execution::ExecError};\nuse casper_types::{\n    account::AccountHash, execution::TransformKindV2, runtime_args, system::handle_payment,\n    ApiError, Key, RuntimeArgs, U512,\n};\n\nconst ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]);\nconst DO_NOTHING_WASM: &str = \"do_nothing.wasm\";\nconst TRANSFER_PURSE_TO_ACCOUNT_WASM: &str = \"transfer_purse_to_account.wasm\";\nconst REVERT_WASM: &str = \"revert.wasm\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\n#[ignore]\n#[test]\nfn should_forward_payment_execution_runtime_error() {\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let transferred_amount = U512::from(1);\n\n    let deploy_item = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_deploy_hash([1; 32])\n            .with_payment_code(REVERT_WASM, RuntimeArgs::default())\n            .with_session_code(\n                TRANSFER_PURSE_TO_ACCOUNT_WASM,\n                runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount },\n            )\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n            .build();\n\n    let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    builder.exec(exec_request).commit();\n\n    let exec_result = builder\n        .get_exec_result_owned(0)\n        .expect(\"there should be a response\");\n\n    let error = exec_result.error().expect(\"should have error\");\n    assert_matches!(error, Error::Exec(ExecError::Revert(ApiError::User(100))));\n}\n\n#[ignore]\n#[test]\nfn independent_standard_payments_should_not_write_the_same_keys() {\n    let account_1_account_hash = ACCOUNT_1_ADDR;\n    let payment_purse_amount = *DEFAULT_PAYMENT;\n    let transfer_amount = MINIMUM_ACCOUNT_CREATION_BALANCE;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let deploy_item = DeployItemBuilder::new()\n            .with_address(*DEFAULT_ACCOUNT_ADDR)\n            .with_session_code(\n                TRANSFER_PURSE_TO_ACCOUNT_WASM,\n                runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transfer_amount) },\n            )\n            .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount })\n            .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n            .with_deploy_hash([1; 32])\n            .build();\n\n    let setup_exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    // create another account via transfer\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(setup_exec_request)\n        .expect_success()\n        .commit();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY])\n        .with_deploy_hash([2; 32])\n        .build();\n\n    let exec_request_from_genesis = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(ACCOUNT_1_ADDR)\n        .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())\n        .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount })\n        .with_authorization_keys(&[account_1_account_hash])\n        .with_deploy_hash([1; 32])\n        .build();\n\n    let exec_request_from_account_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    // run two independent deploys\n    builder\n        .exec(exec_request_from_genesis)\n        .expect_success()\n        .commit()\n        .exec(exec_request_from_account_1)\n        .expect_success()\n        .commit();\n\n    let effects = builder.get_effects();\n    let effects_from_genesis = &effects[1];\n    let effects_from_account_1 = &effects[2];\n\n    // Retrieve the payment purse.\n    let payment_purse = builder\n        .get_handle_payment_contract()\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .unwrap()\n        .into_uref()\n        .unwrap();\n\n    let transforms_from_genesis_map: HashMap<Key, TransformKindV2> = effects_from_genesis\n        .transforms()\n        .iter()\n        .map(|transform| (*transform.key(), transform.kind().clone()))\n        .collect();\n    let transforms_from_account_1_map: HashMap<Key, TransformKindV2> = effects_from_account_1\n        .transforms()\n        .iter()\n        .map(|transform| (*transform.key(), transform.kind().clone()))\n        .collect();\n\n    // Confirm the two deploys have no overlapping writes except for the payment purse balance.\n    let common_write_keys = effects_from_genesis\n        .transforms()\n        .iter()\n        .filter_map(|transform| {\n            if transform.key() != &Key::Balance(payment_purse.addr())\n                && matches!(\n                    (\n                        transforms_from_genesis_map.get(transform.key()),\n                        transforms_from_account_1_map.get(transform.key()),\n                    ),\n                    (\n                        Some(TransformKindV2::Write(_)),\n                        Some(TransformKindV2::Write(_))\n                    )\n                )\n            {\n                Some(*transform.key())\n            } else {\n                None\n            }\n        });\n\n    assert_eq!(common_write_keys.count(), 0);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_contracts/upgrade.rs",
    "content": "use std::collections::BTreeMap;\n\nuse num_rational::Ratio;\n\nuse casper_engine_test_support::{\n    ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, DEFAULT_MAX_ASSOCIATED_KEYS, DEFAULT_UNBONDING_DELAY,\n    LOCAL_GENESIS_REQUEST,\n};\n\nuse crate::{lmdb_fixture, lmdb_fixture::ENTRY_REGISTRY_SPECIAL_ADDRESS};\nuse casper_types::{\n    account::{AccountHash, ACCOUNT_HASH_LENGTH},\n    contracts::NamedKeys,\n    runtime_args,\n    system::{\n        self,\n        auction::{\n            DelegatorKind, SeigniorageRecipientsSnapshotV1, SeigniorageRecipientsSnapshotV2,\n            AUCTION_DELAY_KEY, DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION,\n            LOCKED_FUNDS_PERIOD_KEY, MINIMUM_DELEGATION_RATE_KEY,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY,\n            UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY,\n        },\n        mint::ROUND_SEIGNIORAGE_RATE_KEY,\n    },\n    Account, AddressableEntityHash, CLValue, CoreConfig, EntityAddr, EraId, Key, ProtocolVersion,\n    StorageCosts, StoredValue, SystemHashRegistry, U256, U512,\n};\nuse rand::Rng;\n\nconst PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V1_0_0;\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\nconst ARG_ACCOUNT: &str = \"account\";\n\n#[ignore]\n#[test]\nfn should_upgrade_only_protocol_version() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // let old_wasm_config = *builder.get_engine_state().config().wasm_config();\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    // let upgraded_engine_config = builder.get_engine_state().config();\n    //\n    // assert_eq!(\n    //     old_wasm_config,\n    //     *upgraded_engine_config.wasm_config(),\n    //     \"upgraded costs should equal original costs\"\n    // );\n}\n\n#[ignore]\n#[test]\nfn should_allow_only_wasm_costs_patch_version() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 2);\n\n    // let new_wasm_config = get_upgraded_wasm_config();\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    // let upgraded_engine_config = builder.get_engine_state().config();\n\n    // assert_eq!(\n    //     new_wasm_config,\n    //     *upgraded_engine_config.wasm_config(),\n    //     \"upgraded costs should equal new costs\"\n    // );\n}\n\n#[ignore]\n#[test]\nfn should_allow_only_wasm_costs_minor_version() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor + 1, sem_ver.patch);\n\n    // let new_wasm_config = get_upgraded_wasm_config();\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    // let engine_config = EngineConfigBuilder::default()\n    //     .with_wasm_config(new_wasm_config)\n    //     .build();\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n    //\n    // let upgraded_engine_config = builder.get_engine_state().config();\n    //\n    // assert_eq!(\n    //     new_wasm_config,\n    //     *upgraded_engine_config.wasm_config(),\n    //     \"upgraded costs should equal new costs\"\n    // );\n}\n\n#[ignore]\n#[test]\nfn should_not_downgrade() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // let old_wasm_config = *builder.get_engine_state().config().wasm_config();\n\n    let new_protocol_version = ProtocolVersion::from_parts(2, 0, 0);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let mut downgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(new_protocol_version)\n            .with_new_protocol_version(PROTOCOL_VERSION)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder.upgrade(&mut downgrade_request);\n\n    let upgrade_result = builder.get_upgrade_result(1).expect(\"should have response\");\n\n    assert!(\n        !upgrade_result.is_success(),\n        \"expected failure got {:?}\",\n        upgrade_result\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_skip_major_versions() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n\n    let invalid_version =\n        ProtocolVersion::from_parts(sem_ver.major + 2, sem_ver.minor, sem_ver.patch);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(invalid_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let upgrade_result = builder.get_upgrade_result(0).expect(\"should have response\");\n\n    assert!(upgrade_result.is_err(), \"expected failure\");\n}\n\n#[ignore]\n#[test]\nfn should_allow_skip_minor_versions() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n\n    // can skip minor versions as long as they are higher than current version\n    let valid_new_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor + 2, sem_ver.patch);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(valid_new_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let upgrade_result = builder.get_upgrade_result(0).expect(\"should have response\");\n\n    assert!(upgrade_result.is_success(), \"expected success\");\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_only_validator_slots() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let validator_slot_key = *builder\n        .get_named_keys(EntityAddr::System(\n            builder.get_auction_contract_hash().value(),\n        ))\n        .get(VALIDATOR_SLOTS_KEY)\n        .unwrap();\n\n    let before_validator_slots: u32 = builder\n        .query(None, validator_slot_key, &[])\n        .expect(\"should have validator slots\")\n        .as_cl_value()\n        .expect(\"should be CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u32\");\n\n    let new_validator_slots = before_validator_slots + 1;\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_new_validator_slots(new_validator_slots)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let after_validator_slots: u32 = builder\n        .query(None, validator_slot_key, &[])\n        .expect(\"should have validator slots\")\n        .as_cl_value()\n        .expect(\"should be CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u32\");\n\n    assert_eq!(\n        new_validator_slots, after_validator_slots,\n        \"should have upgraded validator slots to expected value\"\n    )\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_only_auction_delay() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let auction_delay_key = *builder\n        .get_named_keys(EntityAddr::System(\n            builder.get_auction_contract_hash().value(),\n        ))\n        .get(AUCTION_DELAY_KEY)\n        .unwrap();\n\n    let before_auction_delay: u64 = builder\n        .query(None, auction_delay_key, &[])\n        .expect(\"should have auction delay\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    let new_auction_delay = before_auction_delay + 1;\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_new_auction_delay(new_auction_delay)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let after_auction_delay: u64 = builder\n        .query(None, auction_delay_key, &[])\n        .expect(\"should have auction delay\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    assert_eq!(\n        new_auction_delay, after_auction_delay,\n        \"should hae upgrade version auction delay\"\n    )\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_only_locked_funds_period() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let locked_funds_period_key = *builder\n        .get_named_keys(EntityAddr::System(\n            builder.get_auction_contract_hash().value(),\n        ))\n        .get(LOCKED_FUNDS_PERIOD_KEY)\n        .unwrap();\n\n    let before_locked_funds_period_millis: u64 = builder\n        .query(None, locked_funds_period_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    let new_locked_funds_period_millis = before_locked_funds_period_millis + 1;\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_new_locked_funds_period_millis(new_locked_funds_period_millis)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let after_locked_funds_period_millis: u64 = builder\n        .query(None, locked_funds_period_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    assert_eq!(\n        new_locked_funds_period_millis, after_locked_funds_period_millis,\n        \"Should have upgraded locked funds period\"\n    )\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_only_round_seigniorage_rate() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let keys = builder.get_named_keys(EntityAddr::System(builder.get_mint_contract_hash().value()));\n\n    let round_seigniorage_rate_key = *keys.get(ROUND_SEIGNIORAGE_RATE_KEY).unwrap();\n\n    let before_round_seigniorage_rate: Ratio<U512> = builder\n        .query(None, round_seigniorage_rate_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    let new_round_seigniorage_rate = Ratio::new(1, 1_000_000_000);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_new_round_seigniorage_rate(new_round_seigniorage_rate)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let after_round_seigniorage_rate: Ratio<U512> = builder\n        .query(None, round_seigniorage_rate_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    assert_ne!(before_round_seigniorage_rate, after_round_seigniorage_rate);\n\n    let expected_round_seigniorage_rate = Ratio::new(\n        U512::from(*new_round_seigniorage_rate.numer()),\n        U512::from(*new_round_seigniorage_rate.denom()),\n    );\n\n    assert_eq!(\n        expected_round_seigniorage_rate, after_round_seigniorage_rate,\n        \"Should have upgraded locked funds period\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_upgrade_only_unbonding_delay() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let entity_addr = EntityAddr::System(builder.get_auction_contract_hash().value());\n\n    let unbonding_delay_key = *builder\n        .get_named_keys(entity_addr)\n        .get(UNBONDING_DELAY_KEY)\n        .unwrap();\n\n    let before_unbonding_delay: u64 = builder\n        .query(None, unbonding_delay_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    let new_unbonding_delay = DEFAULT_UNBONDING_DELAY + 5;\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_new_unbonding_delay(new_unbonding_delay)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let after_unbonding_delay: u64 = builder\n        .query(None, unbonding_delay_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    assert_ne!(before_unbonding_delay, new_unbonding_delay);\n\n    assert_eq!(\n        new_unbonding_delay, after_unbonding_delay,\n        \"Should have upgraded locked funds period\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_apply_global_state_upgrade() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    // We'll try writing directly to this key.\n    let unbonding_delay_key = *builder\n        .get_named_keys(EntityAddr::System(\n            builder.get_auction_contract_hash().value(),\n        ))\n        .get(UNBONDING_DELAY_KEY)\n        .unwrap();\n\n    let before_unbonding_delay: u64 = builder\n        .query(None, unbonding_delay_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    let new_unbonding_delay = DEFAULT_UNBONDING_DELAY + 5;\n\n    let mut update_map = BTreeMap::new();\n    update_map.insert(\n        unbonding_delay_key,\n        StoredValue::from(CLValue::from_t(new_unbonding_delay).expect(\"should create a CLValue\")),\n    );\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_global_state_update(update_map)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let after_unbonding_delay: u64 = builder\n        .query(None, unbonding_delay_key, &[])\n        .expect(\"should have locked funds period\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u64\");\n\n    assert_ne!(before_unbonding_delay, new_unbonding_delay);\n\n    assert_eq!(\n        new_unbonding_delay, after_unbonding_delay,\n        \"Should have modified locked funds period\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_increase_max_associated_keys_after_upgrade() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    let enable_entity = false;\n    let max_associated_keys = DEFAULT_MAX_ASSOCIATED_KEYS + 1;\n    let core_config = CoreConfig {\n        max_associated_keys,\n        enable_addressable_entity: enable_entity,\n        ..Default::default()\n    };\n\n    let chainspec = ChainspecConfig {\n        core_config,\n        wasm_config: Default::default(),\n        system_costs_config: Default::default(),\n        storage_costs: StorageCosts::default(),\n    };\n    builder.with_chainspec(chainspec);\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    for n in (0..DEFAULT_MAX_ASSOCIATED_KEYS).map(U256::from) {\n        let account_hash = {\n            let mut addr = [0; ACCOUNT_HASH_LENGTH];\n            n.to_big_endian(&mut addr);\n            AccountHash::new(addr)\n        };\n\n        let add_request = ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            \"add_update_associated_key.wasm\",\n            runtime_args! {\n                ARG_ACCOUNT => account_hash,\n            },\n        )\n        .build();\n\n        builder.exec(add_request).expect_success().commit();\n    }\n\n    let account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account\");\n\n    assert!(account.associated_keys().len() > DEFAULT_MAX_ASSOCIATED_KEYS as usize);\n    assert_eq!(\n        account.associated_keys().len(),\n        max_associated_keys as usize\n    );\n}\n\n#[ignore]\n#[test]\nfn should_correctly_migrate_and_prune_system_contract_records() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1);\n\n    let legacy_system_entity_registry = {\n        let stored_value: StoredValue = builder\n            .query(None, ENTRY_REGISTRY_SPECIAL_ADDRESS, &[])\n            .expect(\"should query system entity registry\");\n        let cl_value = stored_value\n            .as_cl_value()\n            .cloned()\n            .expect(\"should have cl value\");\n        let registry: SystemHashRegistry = cl_value.into_t().expect(\"should have system registry\");\n        registry\n    };\n\n    let old_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let mut global_state_update = BTreeMap::<Key, StoredValue>::new();\n\n    let registry = CLValue::from_t(legacy_system_entity_registry.clone())\n        .expect(\"must convert to StoredValue\")\n        .into();\n\n    global_state_update.insert(Key::SystemEntityRegistry, registry);\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(old_protocol_version)\n        .with_new_protocol_version(ProtocolVersion::from_parts(2, 0, 0))\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .with_global_state_update(global_state_update)\n        .build();\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let system_names = vec![system::MINT, system::AUCTION, system::HANDLE_PAYMENT];\n\n    for name in system_names {\n        let legacy_hash = *legacy_system_entity_registry\n            .get(name)\n            .expect(\"must have hash\");\n        let legacy_contract_key = Key::Hash(legacy_hash);\n        let _legacy_query = builder.query(None, legacy_contract_key, &[]);\n\n        builder\n            .get_addressable_entity(AddressableEntityHash::new(legacy_hash))\n            .expect(\"must have system entity\");\n    }\n}\n\n#[test]\nfn should_not_migrate_bids_with_invalid_min_max_delegation_amounts() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_maximum_delegation_amount(250_000_000_000)\n            .with_minimum_delegation_amount(500_000_000_000)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_failure();\n}\n\n#[test]\nfn should_upgrade_legacy_accounts() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut rng = rand::thread_rng();\n    let account_data = (0..10000).map(|_| {\n        let account_hash = rng.gen();\n        let main_purse_uref = rng.gen();\n\n        let account_key = Key::Account(account_hash);\n        let account_value = StoredValue::Account(Account::create(\n            account_hash,\n            NamedKeys::new(),\n            main_purse_uref,\n        ));\n\n        (account_key, account_value)\n    });\n\n    builder.write_data_and_commit(account_data);\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(PROTOCOL_VERSION)\n            .with_new_protocol_version(new_protocol_version)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .with_minimum_delegation_amount(250_000_000_000)\n            .with_maximum_delegation_amount(500_000_000_000)\n            .build()\n    };\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n}\n\n#[ignore]\n#[test]\nfn should_migrate_seigniorage_snapshot_to_new_version() {\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_3);\n\n    let auction_contract_hash = builder.get_auction_contract_hash();\n\n    // get legacy auction contract\n    let auction_contract = builder\n        .query(None, Key::Hash(auction_contract_hash.value()), &[])\n        .expect(\"should have auction contract\")\n        .into_contract()\n        .expect(\"should have legacy Contract under the Key::Contract variant\");\n\n    // check that snapshot version key does not exist yet\n    let auction_named_keys = auction_contract.named_keys();\n    let maybe_snapshot_version_named_key =\n        auction_named_keys.get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY);\n    assert!(maybe_snapshot_version_named_key.is_none());\n\n    // fetch legacy snapshot\n    let legacy_seigniorage_snapshot: SeigniorageRecipientsSnapshotV1 = {\n        let snapshot_key = auction_named_keys\n            .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n            .expect(\"snapshot named key should exist\");\n        builder\n            .query(None, *snapshot_key, &[])\n            .expect(\"should have seigniorage snapshot\")\n            .as_cl_value()\n            .expect(\"should be a CLValue\")\n            .clone()\n            .into_t()\n            .expect(\"should be SeigniorageRecipientsSnapshotV1\")\n    };\n\n    // prepare upgrade request\n    let old_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(old_protocol_version)\n        .with_new_protocol_version(ProtocolVersion::from_parts(2, 0, 0))\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .build();\n\n    // execute upgrade\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    // fetch updated named keys\n    let auction_named_keys =\n        builder.get_named_keys(EntityAddr::System(auction_contract_hash.value()));\n\n    // check that snapshot version named key was populated\n    let snapshot_version_key = auction_named_keys\n        .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY)\n        .expect(\"auction should have snapshot version named key\");\n    let snapshot_version: u8 = builder\n        .query(None, *snapshot_version_key, &[])\n        .expect(\"should have seigniorage snapshot version\")\n        .as_cl_value()\n        .expect(\"should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"should be u8\");\n    assert_eq!(\n        snapshot_version,\n        DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION\n    );\n\n    // fetch new snapshot\n    let seigniorage_snapshot: SeigniorageRecipientsSnapshotV2 = {\n        let snapshot_key = auction_named_keys\n            .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n            .expect(\"snapshot named key should exist\");\n        builder\n            .query(None, *snapshot_key, &[])\n            .expect(\"should have seigniorage snapshot\")\n            .as_cl_value()\n            .expect(\"should be a CLValue\")\n            .clone()\n            .into_t()\n            .expect(\"should be SeigniorageRecipientsSnapshotV2\")\n    };\n\n    // compare snapshots\n    for era_id in legacy_seigniorage_snapshot.keys() {\n        let legacy_seigniorage_recipients = legacy_seigniorage_snapshot.get(era_id).unwrap();\n        let new_seigniorage_recipient = seigniorage_snapshot.get(era_id).unwrap();\n\n        for pubkey in legacy_seigniorage_recipients.keys() {\n            let legacy_recipient = legacy_seigniorage_recipients.get(pubkey).unwrap();\n            let new_recipient = new_seigniorage_recipient.get(pubkey).unwrap();\n\n            assert_eq!(legacy_recipient.stake(), new_recipient.stake());\n            assert_eq!(\n                legacy_recipient.delegation_rate(),\n                new_recipient.delegation_rate()\n            );\n            for pk in legacy_recipient.delegator_stake().keys() {\n                assert!(new_recipient\n                    .delegator_stake()\n                    .contains_key(&DelegatorKind::PublicKey(pk.clone())))\n            }\n        }\n    }\n}\n\n#[test]\nfn should_store_and_upgrade_minimum_delegation_rate_named_key() {\n    const UPGRADED_MINIMUM_DELEGATION_RATE: u8 = 20;\n\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let auction_contract_hash = builder.get_auction_contract_hash();\n    let auction_named_keys =\n        builder.get_named_keys(EntityAddr::System(auction_contract_hash.value()));\n    let minimum_delegation_rate_key = *auction_named_keys\n        .get(MINIMUM_DELEGATION_RATE_KEY)\n        .expect(\"minimum delegation rate key should exist at genesis\");\n\n    let minimum_delegation_rate: u8 = builder\n        .query(None, minimum_delegation_rate_key, &[])\n        .expect(\"should have minimum delegation rate\")\n        .as_cl_value()\n        .expect(\"minimum delegation rate should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"minimum delegation rate should be u8\");\n\n    assert_eq!(\n        minimum_delegation_rate, 0,\n        \"genesis should have set minimum delegation rate to 0!\"\n    );\n\n    let sem_ver = PROTOCOL_VERSION.value();\n    let new_protocol_version =\n        ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1);\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(PROTOCOL_VERSION)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(DEFAULT_ACTIVATION_POINT)\n        .with_new_minimum_delegation_rate(UPGRADED_MINIMUM_DELEGATION_RATE)\n        .build();\n\n    builder\n        .upgrade(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let upgraded_auction_contract_hash = builder.get_auction_contract_hash();\n    let upgraded_named_keys =\n        builder.get_named_keys(EntityAddr::System(upgraded_auction_contract_hash.value()));\n    let upgraded_minimum_delegation_rate_key = *upgraded_named_keys\n        .get(MINIMUM_DELEGATION_RATE_KEY)\n        .expect(\"minimum delegation rate key should exist after upgrade\");\n    let upgraded_minimum_delegation_rate: u8 = builder\n        .query(None, upgraded_minimum_delegation_rate_key, &[])\n        .expect(\"should have upgraded minimum delegation rate\")\n        .as_cl_value()\n        .expect(\"minimum delegation rate should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"minimum delegation rate should be u8\");\n\n    assert_eq!(\n        upgraded_minimum_delegation_rate,\n        UPGRADED_MINIMUM_DELEGATION_RATE\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/system_costs.rs",
    "content": "use num_traits::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    utils, ChainspecConfig, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder,\n    UpgradeRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE,\n    DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_MAX_ASSOCIATED_KEYS, DEFAULT_MINIMUM_DELEGATION_AMOUNT,\n    DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST,\n    MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse casper_types::{\n    runtime_args,\n    system::{\n        auction::{self, DelegationRate},\n        handle_payment, mint, AUCTION,\n    },\n    AuctionCosts, BrTableCost, ControlFlowCosts, CoreConfig, EraId, Gas, GenesisAccount,\n    GenesisValidator, HandlePaymentCosts, HostFunction, HostFunctionCost, HostFunctionCostsV1,\n    HostFunctionCostsV2, MessageLimits, MintCosts, Motes, OpcodeCosts, ProtocolVersion, PublicKey,\n    RuntimeArgs, SecretKey, StandardPaymentCosts, StorageCosts, SystemConfig, WasmConfig,\n    WasmV1Config, WasmV2Config, DEFAULT_ADD_BID_COST, DEFAULT_MAX_STACK_HEIGHT,\n    DEFAULT_MINIMUM_BID_AMOUNT, DEFAULT_WASM_MAX_MEMORY, U512,\n};\n\nuse crate::wasm_utils;\n\nconst SYSTEM_CONTRACT_HASHES_NAME: &str = \"system_contract_hashes.wasm\";\nconst CONTRACT_ADD_BID: &str = \"add_bid.wasm\";\nconst CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = \"transfer_to_named_purse.wasm\";\n\nstatic VALIDATOR_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::ed25519_from_bytes([123; SecretKey::ED25519_LENGTH]).unwrap());\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&*VALIDATOR_1_SECRET_KEY));\nconst VALIDATOR_1_STAKE: u64 = 250_000;\nstatic VALIDATOR_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::ed25519_from_bytes([124; SecretKey::ED25519_LENGTH]).unwrap());\nstatic VALIDATOR_2: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&*VALIDATOR_2_SECRET_KEY));\nconst BOND_AMOUNT: u64 = DEFAULT_MINIMUM_BID_AMOUNT + 42;\nconst BID_AMOUNT: u64 = 99 + DEFAULT_MINIMUM_DELEGATION_AMOUNT;\nconst TRANSFER_AMOUNT: u64 = 123;\nconst BID_DELEGATION_RATE: DelegationRate = auction::DELEGATION_RATE_DENOMINATOR;\nconst UPDATED_CALL_CONTRACT_COST: HostFunctionCost = 12_345;\nconst NEW_ADD_BID_COST: u64 = 2_500_000_000;\nconst NEW_WITHDRAW_BID_COST: u32 = 2_500_000_000;\nconst NEW_DELEGATE_COST: u32 = 2_500_000_000;\nconst NEW_UNDELEGATE_COST: u32 = NEW_DELEGATE_COST;\nconst NEW_REDELEGATE_COST: u32 = NEW_DELEGATE_COST;\nconst DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1);\n\nconst OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION;\nconst NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(\n    OLD_PROTOCOL_VERSION.value().major,\n    OLD_PROTOCOL_VERSION.value().minor,\n    OLD_PROTOCOL_VERSION.value().patch + 1,\n);\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst NAMED_PURSE_NAME: &str = \"purse_1\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[ignore]\n#[test]\nfn add_bid_and_withdraw_bid_have_expected_costs() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let system_contract_hashes_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        SYSTEM_CONTRACT_HASHES_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder\n        .exec(system_contract_hashes_request)\n        .expect_success()\n        .commit();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        entity\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(entity.main_purse());\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    builder.exec(add_bid_request).expect_success().commit();\n    let balance_after = builder.get_purse_balance(entity.main_purse());\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    let system_config = builder.chainspec().system_costs_config;\n    let expected_call_cost = U512::from(system_config.auction_costs().add_bid);\n    assert_eq!(\n        balance_after,\n        balance_before - U512::from(BOND_AMOUNT) - transaction_fee_1\n    );\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n\n    // Withdraw bid\n    let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        entity\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_WITHDRAW_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(entity.main_purse());\n\n    let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance();\n\n    builder.exec(withdraw_bid_request).expect_success().commit();\n\n    let balance_after = builder.get_purse_balance(entity.main_purse());\n\n    let transaction_fee_2 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2;\n\n    let system_config = builder.chainspec().system_costs_config;\n    let expected_call_cost = U512::from(system_config.auction_costs().withdraw_bid);\n    assert_eq!(balance_after, balance_before - transaction_fee_2);\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n}\n\n#[ignore]\n#[test]\nfn upgraded_add_bid_and_withdraw_bid_have_expected_costs() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n            .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let system_contract_hashes_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        SYSTEM_CONTRACT_HASHES_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder\n        .exec(system_contract_hashes_request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    builder.exec(add_bid_request).expect_success().commit();\n\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    let expected_call_cost = U512::from(NEW_ADD_BID_COST);\n    assert_eq!(\n        balance_after,\n        balance_before - U512::from(BOND_AMOUNT) - transaction_fee_1\n    );\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n\n    // Withdraw bid\n    let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_WITHDRAW_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance();\n\n    builder.exec(withdraw_bid_request).expect_success().commit();\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee_2 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2;\n\n    let call_cost = U512::from(NEW_WITHDRAW_BID_COST);\n    assert_eq!(balance_after, balance_before - transaction_fee_2);\n    assert_eq!(builder.last_exec_gas_consumed().value(), call_cost);\n}\n\n#[ignore]\n#[test]\nfn delegate_and_undelegate_have_expected_costs() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let validator_2 = GenesisAccount::account(\n            VALIDATOR_2.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp.push(validator_2);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    builder.run_genesis(run_genesis_request);\n\n    let system_contract_hashes_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        SYSTEM_CONTRACT_HASHES_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder\n        .exec(system_contract_hashes_request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let delegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_DELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(BID_AMOUNT),\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    builder.exec(delegate_request).expect_success().commit();\n\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    let system_config = builder.chainspec().system_costs_config;\n    let expected_call_cost = U512::from(system_config.auction_costs().delegate);\n    assert_eq!(\n        balance_after,\n        balance_before - U512::from(BID_AMOUNT) - transaction_fee_1,\n    );\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n\n    let redelegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_REDELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            auction::ARG_NEW_VALIDATOR => VALIDATOR_2.clone()\n        },\n    )\n    .build();\n\n    builder.exec(redelegate_request).expect_success().commit();\n\n    let system_config = builder.chainspec().system_costs_config;\n    let expected_call_cost = U512::from(system_config.auction_costs().redelegate);\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n\n    // Withdraw bid\n    let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_UNDELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(BID_AMOUNT - DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance();\n\n    builder.exec(undelegate_request).expect_success().commit();\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee_2 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2;\n\n    let system_config = builder.chainspec().system_costs_config;\n    let expected_call_cost = U512::from(system_config.auction_costs().undelegate);\n    assert_eq!(balance_after, balance_before - transaction_fee_2);\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n}\n\n#[ignore]\n#[test]\nfn upgraded_delegate_and_undelegate_have_expected_costs() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    let accounts = {\n        let validator_1 = GenesisAccount::account(\n            VALIDATOR_1.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n        let validator_2 = GenesisAccount::account(\n            VALIDATOR_2.clone(),\n            Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE),\n            Some(GenesisValidator::new(\n                Motes::new(VALIDATOR_1_STAKE),\n                DelegationRate::zero(),\n            )),\n        );\n\n        let mut tmp: Vec<GenesisAccount> = DEFAULT_ACCOUNTS.clone();\n        tmp.push(validator_1);\n        tmp.push(validator_2);\n        tmp\n    };\n\n    let run_genesis_request = utils::create_run_genesis_request(accounts);\n\n    builder.run_genesis(run_genesis_request);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n            .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let system_contract_hashes_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        SYSTEM_CONTRACT_HASHES_NAME,\n        RuntimeArgs::default(),\n    )\n    .build();\n    builder\n        .exec(system_contract_hashes_request)\n        .expect_success()\n        .commit();\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let delegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_DELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(BID_AMOUNT),\n        },\n    )\n    .build();\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n    builder.exec(delegate_request).expect_success().commit();\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    let call_cost = U512::from(NEW_DELEGATE_COST);\n    assert_eq!(\n        balance_after,\n        balance_before - U512::from(BID_AMOUNT) - transaction_fee_1,\n    );\n\n    assert_eq!(builder.last_exec_gas_consumed().value(), call_cost);\n\n    // Redelegate bid\n    let redelegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_REDELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n            auction::ARG_NEW_VALIDATOR => VALIDATOR_2.clone()\n        },\n    )\n    .build();\n\n    builder.exec(redelegate_request).expect_success().commit();\n\n    let expected_call_cost = U512::from(NEW_REDELEGATE_COST);\n    assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost);\n\n    // Withdraw bid (undelegate)\n    let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        account\n            .named_keys()\n            .get(AUCTION)\n            .unwrap()\n            .into_entity_hash_addr()\n            .unwrap()\n            .into(),\n        auction::METHOD_UNDELEGATE,\n        runtime_args! {\n            auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_VALIDATOR => VALIDATOR_1.clone(),\n            auction::ARG_AMOUNT => U512::from(BID_AMOUNT - DEFAULT_MINIMUM_DELEGATION_AMOUNT),\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(account.main_purse());\n\n    let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance();\n\n    builder.exec(undelegate_request).expect_success().commit();\n    let balance_after = builder.get_purse_balance(account.main_purse());\n\n    let transaction_fee_2 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2;\n\n    let call_cost = U512::from(NEW_UNDELEGATE_COST);\n    assert_eq!(balance_after, balance_before - transaction_fee_2);\n    assert_eq!(builder.last_exec_gas_consumed().value(), call_cost);\n}\n\n#[ignore]\n#[test]\nfn mint_transfer_has_expected_costs() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let transfer_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_TO_NAMED_PURSE,\n        runtime_args! {\n            ARG_PURSE_NAME => NAMED_PURSE_NAME,\n            ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE),\n        },\n    )\n    .build();\n\n    builder.exec(transfer_request_1).expect_success().commit();\n\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let purse_1 = default_account\n        .named_keys()\n        .get(NAMED_PURSE_NAME)\n        .unwrap()\n        .into_uref()\n        .expect(\"should have purse\");\n\n    let mint_hash = builder.get_mint_contract_hash();\n\n    let source = default_account.main_purse();\n    let target = purse_1;\n\n    let id = Some(0u64);\n\n    let transfer_amount = U512::from(TRANSFER_AMOUNT);\n\n    let transfer_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        mint_hash,\n        mint::METHOD_TRANSFER,\n        runtime_args! {\n            mint::ARG_TO => Some(*DEFAULT_ACCOUNT_ADDR),\n            mint::ARG_SOURCE => source,\n            mint::ARG_TARGET => target,\n            mint::ARG_AMOUNT => U512::from(TRANSFER_AMOUNT),\n            mint::ARG_ID => id,\n        },\n    )\n    .build();\n\n    let balance_before = builder.get_purse_balance(source);\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(transfer_request).expect_success().commit();\n    let balance_after = builder.get_purse_balance(source);\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    assert_eq!(\n        balance_after,\n        balance_before - transfer_amount - transaction_fee,\n    );\n}\n\n#[ignore]\n#[test]\nfn should_charge_for_erroneous_system_contract_calls() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let auction_hash = builder.get_auction_contract_hash();\n    let mint_hash = builder.get_mint_contract_hash();\n    let handle_payment_hash = builder.get_handle_payment_contract_hash();\n\n    let system_config = builder.chainspec().system_costs_config;\n\n    // Entrypoints that could fail early due to missing arguments\n    let entrypoint_calls = vec![\n        (\n            auction_hash,\n            auction::METHOD_ADD_BID,\n            system_config.auction_costs().add_bid,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_WITHDRAW_BID,\n            system_config.auction_costs().withdraw_bid,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_DELEGATE,\n            system_config.auction_costs().delegate,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_UNDELEGATE,\n            system_config.auction_costs().undelegate,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_REDELEGATE,\n            system_config.auction_costs().redelegate,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_RUN_AUCTION,\n            system_config.auction_costs().run_auction,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_SLASH,\n            system_config.auction_costs().slash,\n        ),\n        (\n            auction_hash,\n            auction::METHOD_DISTRIBUTE,\n            system_config.auction_costs().distribute,\n        ),\n        (\n            mint_hash,\n            mint::METHOD_MINT,\n            system_config.mint_costs().mint.into(),\n        ),\n        (\n            mint_hash,\n            mint::METHOD_REDUCE_TOTAL_SUPPLY,\n            system_config.mint_costs().reduce_total_supply.into(),\n        ),\n        (\n            mint_hash,\n            mint::METHOD_BALANCE,\n            system_config.mint_costs().balance.into(),\n        ),\n        (\n            mint_hash,\n            mint::METHOD_TRANSFER,\n            system_config.mint_costs().transfer.into(),\n        ),\n        (\n            handle_payment_hash,\n            handle_payment::METHOD_SET_REFUND_PURSE,\n            system_config.handle_payment_costs().set_refund_purse.into(),\n        ),\n        // (\n        //     handle_payment_hash,\n        //     handle_payment::METHOD_FINALIZE_PAYMENT,\n        //     system_config.handle_payment_costs().finalize_payment,\n        // ),\n        (\n            auction_hash,\n            \"this_entrypoint_does_not_exists_1\",\n            system_config.no_such_entrypoint(),\n        ),\n        (\n            mint_hash,\n            \"this_entrypoint_does_not_exists_2\",\n            system_config.no_such_entrypoint(),\n        ),\n        (\n            handle_payment_hash,\n            \"this_entrypoint_does_not_exists_3\",\n            system_config.no_such_entrypoint(),\n        ),\n    ];\n\n    for (contract_hash, entrypoint, expected_cost) in entrypoint_calls {\n        let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            contract_hash,\n            entrypoint,\n            RuntimeArgs::default(),\n        )\n        .build();\n\n        builder.exec(exec_request).commit();\n\n        let _error = builder\n            .get_last_exec_result()\n            .expect(\"should have results\")\n            .error()\n            .cloned()\n            .unwrap_or_else(|| panic!(\"should have error while executing {}\", entrypoint));\n\n        // assert!(matches!(\n        //     error,\n        //     Error::Exec(ExecError::NoSuchMethod(ref no_such_method)) if no_such_method ==\n        // entrypoint), \"{:?}\",  error);\n\n        let call_cost = U512::from(expected_cost);\n\n        assert_eq!(\n            builder.last_exec_gas_consumed().value(),\n            call_cost,\n            \"{:?}\",\n            entrypoint\n        );\n    }\n}\n\n#[ignore]\n#[test]\nfn should_verify_do_nothing_charges_only_for_standard_payment() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let deploy_item = DeployItemBuilder::new()\n        .with_address(*DEFAULT_ACCOUNT_ADDR)\n        .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default())\n        .with_standard_payment(runtime_args! {\n            ARG_AMOUNT => *DEFAULT_PAYMENT\n        })\n        .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR])\n        .with_deploy_hash([42; 32])\n        .build();\n\n    let do_nothing_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build();\n\n    let user_funds_before = builder.get_purse_balance(default_account.main_purse());\n\n    let proposer_reward_starting_balance = builder.get_proposer_purse_balance();\n\n    builder.exec(do_nothing_request).commit().expect_success();\n\n    let user_funds_after = builder.get_purse_balance(default_account.main_purse());\n\n    let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance;\n\n    assert_eq!(user_funds_after, user_funds_before - transaction_fee,);\n\n    assert_eq!(builder.last_exec_gas_consumed(), Gas::new(U512::zero()));\n}\n\n#[ignore]\n#[test]\nfn should_verify_wasm_add_bid_wasm_cost_is_not_recursive() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let new_opcode_costs = OpcodeCosts {\n        bit: 0,\n        add: 0,\n        mul: 0,\n        div: 0,\n        load: 0,\n        store: 0,\n        op_const: 0,\n        local: 0,\n        global: 0,\n        control_flow: ControlFlowCosts {\n            block: 0,\n            op_loop: 0,\n            op_if: 0,\n            op_else: 0,\n            end: 0,\n            br: 0,\n            br_if: 0,\n            br_table: BrTableCost {\n                cost: 0,\n                size_multiplier: 0,\n            },\n            op_return: 0,\n            call: 0,\n            call_indirect: 0,\n            drop: 0,\n            select: 0,\n        },\n        integer_comparison: 0,\n        conversion: 0,\n        unreachable: 0,\n        nop: 0,\n        current_memory: 0,\n        grow_memory: 0,\n        sign: 0,\n    };\n    let new_storage_costs = StorageCosts::new(0);\n\n    // We're elevating cost of `transfer_from_purse_to_purse` while zeroing others.\n    // This will verify that user pays for the transfer host function _only_ while host does not\n    // additionally charge for calling mint's \"transfer\" entrypoint under the hood.\n    let new_host_function_costs = HostFunctionCostsV1 {\n        call_contract: HostFunction::fixed(UPDATED_CALL_CONTRACT_COST),\n        ..Zero::zero()\n    };\n    let wasm_v1_config = WasmV1Config::new(\n        DEFAULT_WASM_MAX_MEMORY,\n        DEFAULT_MAX_STACK_HEIGHT,\n        new_opcode_costs,\n        new_host_function_costs,\n    );\n    let wasm_v2_config = WasmV2Config::new(\n        DEFAULT_WASM_MAX_MEMORY,\n        OpcodeCosts::default(),\n        HostFunctionCostsV2::default(),\n    );\n    let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config);\n\n    let new_max_associated_keys = DEFAULT_MAX_ASSOCIATED_KEYS;\n    let new_auction_costs = AuctionCosts::default();\n    let new_mint_costs = MintCosts {\n        transfer: 0,\n        ..Default::default()\n    };\n    let new_standard_payment_costs = StandardPaymentCosts::default();\n    let new_handle_payment_costs = HandlePaymentCosts::default();\n\n    let system_costs_config = SystemConfig::new(\n        1,\n        new_auction_costs,\n        new_mint_costs,\n        new_handle_payment_costs,\n        new_standard_payment_costs,\n    );\n\n    let core_config = CoreConfig {\n        max_associated_keys: new_max_associated_keys,\n        ..Default::default()\n    };\n\n    let chainspec = ChainspecConfig {\n        system_costs_config,\n        wasm_config,\n        core_config,\n        storage_costs: new_storage_costs,\n    };\n    builder.with_chainspec(chainspec);\n\n    let mut upgrade_request = {\n        UpgradeRequestBuilder::new()\n            .with_current_protocol_version(OLD_PROTOCOL_VERSION)\n            .with_new_protocol_version(NEW_PROTOCOL_VERSION)\n            .with_activation_point(DEFAULT_ACTIVATION_POINT)\n            .build()\n    };\n\n    builder.upgrade(&mut upgrade_request);\n\n    let default_account = builder\n        .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have default account\");\n\n    let add_bid_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_ADD_BID,\n        runtime_args! {\n            auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n            auction::ARG_AMOUNT => U512::from(BOND_AMOUNT),\n            auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE,\n        },\n    )\n    .build();\n\n    // Verify that user is called and deploy raises runtime error\n    let user_funds_before = builder.get_purse_balance(default_account.main_purse());\n\n    let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance();\n\n    builder.exec(add_bid_request).commit().expect_success();\n\n    let user_funds_after = builder.get_purse_balance(default_account.main_purse());\n\n    let transaction_fee_1 =\n        builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1;\n\n    assert_eq!(\n        user_funds_after,\n        user_funds_before - transaction_fee_1 - U512::from(BOND_AMOUNT)\n    );\n\n    let expected_call_cost =\n        U512::from(DEFAULT_ADD_BID_COST) + U512::from(UPDATED_CALL_CONTRACT_COST);\n\n    assert_eq!(\n        builder.last_exec_gas_consumed(),\n        Gas::new(expected_call_cost)\n    );\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/tutorial/counter.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{Key, RuntimeArgs, StoredValue};\n\nconst COUNT_KEY: &str = \"count\";\nconst COUNTER_INSTALLER_WASM: &str = \"counter_installer.wasm\";\nconst INCREMENT_COUNTER_WASM: &str = \"increment_counter.wasm\";\nconst COUNTER_KEY: &str = \"counter\";\n\n#[ignore]\n#[test]\nfn should_run_counter_example() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let install_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        COUNTER_INSTALLER_WASM,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let inc_request_1 = ExecuteRequestBuilder::contract_call_by_name(\n        *DEFAULT_ACCOUNT_ADDR,\n        COUNTER_KEY,\n        \"counter_inc\",\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    let call_request_1 = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        INCREMENT_COUNTER_WASM,\n        RuntimeArgs::default(),\n    )\n    .build();\n\n    builder.exec(install_request_1).expect_success().commit();\n\n    let binding = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"must have value\");\n    let result = binding.as_account().unwrap().named_keys();\n\n    println!(\"Named keys, {:?}\", result);\n\n    let query_result = builder\n        .query(\n            None,\n            Key::Account(*DEFAULT_ACCOUNT_ADDR),\n            &[COUNTER_KEY.into(), COUNT_KEY.into()],\n        )\n        .expect(\"should query\");\n\n    let counter_before: i32 = if let StoredValue::CLValue(cl_value) = query_result {\n        cl_value.into_t().unwrap()\n    } else {\n        panic!(\"Stored value is not an i32: {:?}\", query_result);\n    };\n\n    builder.exec(inc_request_1).expect_success().commit();\n\n    let query_result = builder\n        .query(\n            None,\n            Key::from(*DEFAULT_ACCOUNT_ADDR),\n            &[COUNTER_KEY.into(), COUNT_KEY.into()],\n        )\n        .expect(\"should query\");\n\n    let counter_after: i32 = if let StoredValue::CLValue(cl_value) = query_result {\n        cl_value.into_t().unwrap()\n    } else {\n        panic!(\"Stored value is not an i32: {:?}\", query_result);\n    };\n\n    let counter_diff = counter_after - counter_before;\n    assert_eq!(counter_diff, 1);\n\n    builder.exec(call_request_1).expect_success().commit();\n}\n\n// #[test]\n// fn gen_fixture() {\n//     lmdb_fixture::generate_fixture(\n//         \"counter_contract\",\n//         LOCAL_GENESIS_REQUEST.clone(),\n//         |builder| {\n//             let install_request_1 = ExecuteRequestBuilder::standard(\n//                 *DEFAULT_ACCOUNT_ADDR,\n//                 COUNTER_INSTALLER_WASM,\n//                 RuntimeArgs::default(),\n//             )\n//             .build();\n//             builder.exec(install_request_1).expect_success().commit();\n//         },\n//     )\n//     .expect(\"should gen fixture\");\n// }\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/tutorial/hello_world.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST,\n};\nuse casper_types::{runtime_args, Key, StoredValue};\n\nconst HELLO_WORLD_CONTRACT: &str = \"hello_world.wasm\";\nconst KEY: &str = \"special_value\";\nconst ARG_MESSAGE: &str = \"message\";\nconst MESSAGE_VALUE: &str = \"Hello, world!\";\n\n#[ignore]\n#[test]\nfn should_run_hello_world() {\n    let mut builder = LmdbWasmTestBuilder::default();\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    let exec_request = {\n        let session_args = runtime_args! {\n            ARG_MESSAGE => MESSAGE_VALUE,\n        };\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, HELLO_WORLD_CONTRACT, session_args)\n            .build()\n    };\n    builder.exec(exec_request).expect_success().commit();\n\n    let stored_message = builder\n        .query(None, Key::from(*DEFAULT_ACCOUNT_ADDR), &[KEY.into()])\n        .expect(\"should query\");\n\n    let message: String = if let StoredValue::CLValue(cl_value) = stored_message {\n        cl_value.into_t().unwrap()\n    } else {\n        panic!(\"Stored message is not a clvalue: {:?}\", stored_message);\n    };\n    assert_eq!(message, MESSAGE_VALUE);\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/tutorial.rs",
    "content": "mod counter;\nmod hello_world;\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/upgrade.rs",
    "content": "use casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, UpgradeRequestBuilder,\n    DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE,\n};\nuse num_rational::Ratio;\nuse std::collections::BTreeMap;\n\nuse crate::lmdb_fixture;\nuse casper_execution_engine::{\n    engine_state,\n    engine_state::{EngineConfigBuilder, Error},\n    execution::ExecError,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{AssociatedKeys, Weight},\n    bytesrepr::{Bytes, FromBytes},\n    contracts::ContractPackageHash,\n    runtime_args,\n    system::{auction::MINIMUM_DELEGATION_RATE_KEY, mint::MINT_SUSTAIN_PURSE_KEY},\n    AccessRights, AddressableEntityHash, CLValue, EntityAddr, EntityVersion, EraId,\n    HoldBalanceHandling, Key, PackageHash, ProtocolVersion, RewardsHandling, RuntimeArgs,\n    StoredValue, Timestamp, URef, ENTITY_INITIAL_VERSION, REWARDS_HANDLING_RATIO_TAG,\n};\n\nconst DO_NOTHING_STORED_CONTRACT_NAME: &str = \"do_nothing_stored\";\nconst DO_NOTHING_STORED_UPGRADER_CONTRACT_NAME: &str = \"do_nothing_stored_upgrader\";\nconst DO_NOTHING_STORED_CALLER_CONTRACT_NAME: &str = \"do_nothing_stored_caller\";\nconst PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME: &str = \"purse_holder_stored_caller\";\nconst PURSE_HOLDER_STORED_CONTRACT_NAME: &str = \"purse_holder_stored\";\nconst PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME: &str = \"purse_holder_stored_upgrader\";\nconst UPGRADE_THRESHOLD_CONTRACT_NAME: &str = \"upgrade_threshold.wasm\";\nconst UPGRADE_THRESHOLD_UPGRADER: &str = \"upgrade_threshold_upgrader.wasm\";\n\nconst ENTRY_FUNCTION_NAME: &str = \"delegate\";\nconst DO_NOTHING_CONTRACT_NAME: &str = \"do_nothing_package_hash\";\nconst DO_NOTHING_HASH_KEY_NAME: &str = \"do_nothing_hash\";\n\nconst INITIAL_VERSION: EntityVersion = ENTITY_INITIAL_VERSION;\nconst UPGRADED_VERSION: EntityVersion = INITIAL_VERSION + 1;\nconst PURSE_NAME_ARG_NAME: &str = \"purse_name\";\nconst PURSE_1: &str = \"purse_1\";\nconst METHOD_REMOVE: &str = \"remove\";\nconst VERSION: &str = \"version\";\n\nconst HASH_KEY_NAME: &str = \"purse_holder\";\n\nconst TOTAL_PURSES: usize = 3;\nconst PURSE_NAME: &str = \"purse_name\";\nconst ENTRY_POINT_NAME: &str = \"entry_point\";\nconst ENTRY_POINT_ADD: &str = \"add_named_purse\";\nconst ARG_CONTRACT_PACKAGE: &str = \"contract_package\";\nconst ARG_MAJOR_VERSION: &str = \"major_version\";\nconst ARG_VERSION: &str = \"version\";\nconst ARG_NEW_PURSE_NAME: &str = \"new_purse_name\";\nconst ARG_IS_LOCKED: &str = \"is_locked\";\n\n/// Performs define and execution of versioned contracts, calling them directly from hash\n#[ignore]\n#[test]\nfn should_upgrade_do_nothing_to_do_something_version_hash_call() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // Create contract package and store contract ver: 1.0.0 with \"delegate\" entry function\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", DO_NOTHING_STORED_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                RuntimeArgs::default(),\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // Calling initial version from contract package hash, should have no effects\n    {\n        let exec_request = {\n            ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                DO_NOTHING_CONTRACT_NAME,\n                Some(INITIAL_VERSION),\n                ENTRY_FUNCTION_NAME,\n                RuntimeArgs::new(),\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let entity_hash = account_1\n        .named_keys()\n        .get(DO_NOTHING_HASH_KEY_NAME)\n        .expect(\"must have do-nothing-hash\")\n        .into_entity_hash()\n        .unwrap();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_entity_hash(entity_hash)\n        .expect(\"must have entity\");\n\n    assert!(\n        entity.named_keys().get(PURSE_1).is_none(),\n        \"purse should not exist\",\n    );\n\n    // Upgrade version having call to create_purse_01\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", DO_NOTHING_STORED_UPGRADER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                RuntimeArgs::default(),\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // Calling upgraded version, expecting purse creation\n    {\n        let args = runtime_args! {\n            PURSE_NAME_ARG_NAME => PURSE_1,\n        };\n        let exec_request = {\n            ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                DO_NOTHING_CONTRACT_NAME,\n                Some(UPGRADED_VERSION),\n                ENTRY_FUNCTION_NAME,\n                args,\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let entity_hash = account_1\n        .named_keys()\n        .get(\"end of upgrade\")\n        .expect(\"must have do-nothing-hash\")\n        .into_entity_hash()\n        .unwrap();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_entity_hash(entity_hash)\n        .expect(\"must have entity\");\n\n    assert!(\n        entity.named_keys().get(PURSE_1).is_some(),\n        \"purse should exist\",\n    );\n}\n\n/// Performs define and execution of versioned contracts, calling them from a contract\n#[ignore]\n#[test]\nfn should_upgrade_do_nothing_to_do_something_contract_call() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // Create contract package and store contract ver: 1.0.0\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", DO_NOTHING_STORED_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                RuntimeArgs::default(),\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    account_1\n        .named_keys()\n        .get(DO_NOTHING_HASH_KEY_NAME)\n        .expect(\"should have key of do_nothing_hash\")\n        .into_entity_hash_addr()\n        .expect(\"should have into hash\");\n\n    let stored_contract_package_hash = account_1\n        .named_keys()\n        .get(DO_NOTHING_CONTRACT_NAME)\n        .expect(\"should have key of do_nothing_hash\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should have hash\");\n\n    // Calling initial stored version from contract package hash, should have no effects\n    {\n        let contract_name = format!(\"{}.wasm\", DO_NOTHING_STORED_CALLER_CONTRACT_NAME);\n        let args = runtime_args! {\n            ARG_CONTRACT_PACKAGE => stored_contract_package_hash,\n            ARG_MAJOR_VERSION => 2u32,\n            ARG_VERSION => INITIAL_VERSION,\n            ARG_NEW_PURSE_NAME => PURSE_1,\n        };\n        let exec_request = {\n            ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, args).build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let entity_hash = account_1\n        .named_keys()\n        .get(DO_NOTHING_HASH_KEY_NAME)\n        .expect(\"must have do-nothing-hash\")\n        .into_entity_hash()\n        .unwrap();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_entity_hash(entity_hash)\n        .expect(\"must have entity\");\n\n    assert!(\n        entity.named_keys().get(PURSE_1).is_none(),\n        \"purse should not exist\",\n    );\n\n    // Upgrade stored contract to version: 2.0.0, having call to create_purse_01\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", DO_NOTHING_STORED_UPGRADER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                RuntimeArgs::default(),\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let stored_contract_package_hash = account_1\n        .named_keys()\n        .get(DO_NOTHING_CONTRACT_NAME)\n        .expect(\"should have key of do_nothing_hash\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should have hash\");\n\n    // Calling upgraded stored version, expecting purse creation\n    {\n        let contract_name = format!(\"{}.wasm\", DO_NOTHING_STORED_CALLER_CONTRACT_NAME);\n        let args = runtime_args! {\n            ARG_CONTRACT_PACKAGE => stored_contract_package_hash,\n            ARG_MAJOR_VERSION => 2,\n            ARG_VERSION => UPGRADED_VERSION,\n            ARG_NEW_PURSE_NAME => PURSE_1,\n        };\n\n        let exec_request = {\n            ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, args).build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should get account 1\");\n\n    let entity_hash = account_1\n        .named_keys()\n        .get(\"end of upgrade\")\n        .expect(\"must have do-nothing-hash\")\n        .into_entity_hash()\n        .unwrap();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_entity_hash(entity_hash)\n        .expect(\"must have entity\");\n\n    assert!(\n        entity.named_keys().get(PURSE_1).is_some(),\n        \"purse should exist\",\n    );\n}\n\n#[ignore]\n#[test]\nfn should_be_able_to_observe_state_transition_across_upgrade() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store do-nothing-stored\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_IS_LOCKED => false,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    assert!(\n        account.named_keys().contains(VERSION),\n        \"version uref should exist on install\"\n    );\n\n    let stored_package_hash = account\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .expect(\"should have stored uref\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should have hash\");\n\n    // verify version before upgrade\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let version = *account\n        .named_keys()\n        .get(VERSION)\n        .expect(\"version uref should exist\");\n\n    let original_version = builder\n        .query(None, version, &[])\n        .expect(\"version should exist\");\n\n    assert_eq!(\n        original_version,\n        StoredValue::CLValue(CLValue::from_t(\"1.0.0\".to_string()).unwrap()),\n        \"should be original version\"\n    );\n\n    // upgrade contract\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_CONTRACT_PACKAGE => stored_package_hash,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // version should change after upgrade\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let version = *account\n        .named_keys()\n        .get(VERSION)\n        .expect(\"version key should exist\");\n\n    let upgraded_version = builder\n        .query(None, version, &[])\n        .expect(\"version should exist\");\n\n    assert_eq!(\n        upgraded_version,\n        StoredValue::CLValue(CLValue::from_t(\"1.0.1\".to_string()).unwrap()),\n        \"should be original version\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_support_extending_functionality() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store do-nothing-stored\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_IS_LOCKED => false\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let stored_package_hash = account\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .expect(\"should have stored uref\")\n        .into_hash_addr()\n        .expect(\"should have hash\");\n\n    let stored_hash = account\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        .expect(\"should have stored uref\")\n        .into_entity_hash_addr()\n        .expect(\"should have hash\")\n        .into();\n\n    // call stored contract and persist a known uref before upgrade\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    HASH_KEY_NAME => stored_hash,\n                    ENTRY_POINT_NAME => ENTRY_POINT_ADD,\n                    PURSE_NAME => PURSE_1,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // verify known uref actually exists prior to upgrade\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(stored_hash)\n        .expect(\"should have contract\");\n    assert!(\n        contract.named_keys().contains(PURSE_1),\n        \"purse uref should exist in contract's named_keys before upgrade\"\n    );\n\n    // upgrade contract\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_CONTRACT_PACKAGE => PackageHash::new(stored_package_hash),\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // verify uref still exists in named_keys after upgrade:\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(stored_hash)\n        .expect(\"should have contract\");\n\n    assert!(\n        contract.named_keys().contains(PURSE_1),\n        \"PURSE_1 uref should still exist in contract's named_keys after upgrade\"\n    );\n\n    // Get account again after upgrade to refresh named keys\n    let account_2 = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n    // Get contract again after upgrade\n\n    let stored_hash_2 = account_2\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        .expect(\"should have stored uref\")\n        .into_entity_hash_addr()\n        .expect(\"should have hash\")\n        .into();\n    assert_ne!(stored_hash, stored_hash_2);\n\n    // call new remove function\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    HASH_KEY_NAME => stored_hash_2,\n                    ENTRY_POINT_NAME => METHOD_REMOVE,\n                    PURSE_NAME => PURSE_1,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // verify known urefs no longer include removed purse\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(stored_hash_2)\n        .expect(\"should have contract\");\n\n    assert!(\n        !contract.named_keys().contains(PURSE_1),\n        \"PURSE_1 uref should no longer exist in contract's named_keys after remove\"\n    );\n}\n\n#[ignore]\n#[test]\nfn should_maintain_named_keys_across_upgrade() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store contract\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_IS_LOCKED => false\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let stored_hash = account\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        .expect(\"should have stored hash\")\n        .into_entity_hash_addr()\n        .expect(\"should have hash\");\n\n    let stored_package_hash = account\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .expect(\"should have stored package hash\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should have hash\");\n\n    // add several purse urefs to named_keys\n    for index in 0..TOTAL_PURSES {\n        let purse_name: &str = &format!(\"purse_{}\", index);\n\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    HASH_KEY_NAME => stored_hash,\n                    ENTRY_POINT_NAME => ENTRY_POINT_ADD,\n                    PURSE_NAME => purse_name,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n\n        // verify known uref actually exists prior to upgrade\n        let contract = builder\n            .get_entity_with_named_keys_by_entity_hash(stored_hash.into())\n            .expect(\"should have contract\");\n        assert!(\n            contract.named_keys().contains(purse_name),\n            \"purse uref should exist in contract's named_keys before upgrade\"\n        );\n    }\n\n    // upgrade contract\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_CONTRACT_PACKAGE => stored_package_hash,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    // verify all urefs still exist in named_keys after upgrade\n    let contract = builder\n        .get_entity_with_named_keys_by_entity_hash(stored_hash.into())\n        .expect(\"should have contract\");\n\n    for index in 0..TOTAL_PURSES {\n        let purse_name: &str = &format!(\"purse_{}\", index);\n        assert!(\n            contract.named_keys().contains(purse_name),\n            \"{} uref should still exist in contract's named_keys after upgrade\",\n            index\n        );\n    }\n}\n\n#[ignore]\n#[test]\nfn should_fail_upgrade_for_locked_contract() {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    // store contract\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_IS_LOCKED => true,\n                },\n            )\n            .build()\n        };\n\n        builder.exec(exec_request).expect_success().commit();\n    }\n\n    let account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"should have account\");\n\n    let stored_package_hash: PackageHash = account\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .expect(\"should have stored package hash\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should have hash\");\n\n    let contract_package = builder\n        .get_package(stored_package_hash)\n        .expect(\"should get package hash\");\n\n    // Ensure that our current package is indeed locked.\n    assert!(contract_package.is_locked());\n\n    {\n        let exec_request = {\n            let contract_name = format!(\"{}.wasm\", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME);\n            ExecuteRequestBuilder::standard(\n                *DEFAULT_ACCOUNT_ADDR,\n                &contract_name,\n                runtime_args! {\n                    ARG_CONTRACT_PACKAGE => stored_package_hash,\n                },\n            )\n            .build()\n        };\n\n        assert!(builder.exec(exec_request).is_error());\n    }\n}\n\n#[ignore]\n#[test]\nfn should_only_upgrade_if_threshold_is_met() {\n    const CONTRACT_HASH_NAME: &str = \"contract_hash_name\";\n    const PACKAGE_HASH_KEY_NAME: &str = \"contract_package_hash\";\n\n    const ENTRYPOINT_ADD_ASSOCIATED_KEY: &str = \"add_associated_key\";\n    const ENTRYPOINT_MANAGE_ACTION_THRESHOLD: &str = \"manage_action_threshold\";\n\n    const ARG_ENTITY_ACCOUNT_HASH: &str = \"entity_account_hash\";\n    const ARG_KEY_WEIGHT: &str = \"key_weight\";\n    const ARG_NEW_UPGRADE_THRESHOLD: &str = \"new_threshold\";\n    const ARG_CONTRACT_PACKAGE: &str = \"contract_package_hash\";\n\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    builder.run_genesis(LOCAL_GENESIS_REQUEST.clone());\n\n    if !builder.chainspec().core_config.enable_addressable_entity {\n        return;\n    }\n\n    let install_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        UPGRADE_THRESHOLD_CONTRACT_NAME,\n        runtime_args! {},\n    )\n    .build();\n\n    builder.exec(install_request).expect_success().commit();\n\n    let entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default addressable entity\");\n\n    let upgrade_threshold_contract_hash = entity\n        .named_keys()\n        .get(CONTRACT_HASH_NAME)\n        .expect(\"must have named key entry for contract hash\")\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .expect(\"must get contract hash\");\n\n    let upgrade_threshold_package_hash = entity\n        .named_keys()\n        .get(PACKAGE_HASH_KEY_NAME)\n        .expect(\"must have named key entry for package hash\")\n        .into_package_addr()\n        .map(PackageHash::new)\n        .expect(\"must get package hash\");\n\n    let upgrade_threshold_contract_entity = builder\n        .get_entity_with_named_keys_by_entity_hash(upgrade_threshold_contract_hash)\n        .expect(\"must have upgrade threshold entity\");\n\n    let entity = upgrade_threshold_contract_entity.entity();\n    let actual_associated_keys = entity.associated_keys();\n    let mut expected_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1));\n    assert_eq!(&expected_associated_keys, actual_associated_keys);\n\n    let mut entity_account_hashes =\n        vec![AccountHash::new([10u8; 32]), AccountHash::new([11u8; 32])];\n\n    for entity_account_hash in &entity_account_hashes {\n        expected_associated_keys\n            .add_key(*entity_account_hash, Weight::new(1))\n            .expect(\"must add associated key\");\n\n        let execute_request = ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            upgrade_threshold_contract_hash,\n            ENTRYPOINT_ADD_ASSOCIATED_KEY,\n            runtime_args! {\n                ARG_ENTITY_ACCOUNT_HASH => *entity_account_hash,\n                ARG_KEY_WEIGHT => 1u8\n            },\n        )\n        .build();\n\n        builder.exec(execute_request).expect_success().commit();\n    }\n\n    let update_upgrade_threshold_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        upgrade_threshold_contract_hash,\n        ENTRYPOINT_MANAGE_ACTION_THRESHOLD,\n        runtime_args! {\n            ARG_NEW_UPGRADE_THRESHOLD => 3u8\n        },\n    )\n    .build();\n\n    builder\n        .exec(update_upgrade_threshold_request)\n        .expect_success()\n        .commit();\n\n    let upgrade_threshold_contract_entity = builder\n        .get_addressable_entity(upgrade_threshold_contract_hash)\n        .expect(\"must have upgrade threshold entity\");\n\n    let updated_associated_keys = upgrade_threshold_contract_entity.associated_keys();\n    assert_eq!(&expected_associated_keys, updated_associated_keys);\n\n    let updated_action_threshold = upgrade_threshold_contract_entity.action_thresholds();\n    assert_eq!(\n        updated_action_threshold.upgrade_management(),\n        &Weight::new(3u8)\n    );\n\n    let invalid_upgrade_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        UPGRADE_THRESHOLD_UPGRADER,\n        runtime_args! {\n            ARG_CONTRACT_PACKAGE => upgrade_threshold_package_hash\n        },\n    )\n    .build();\n\n    builder.exec(invalid_upgrade_request).expect_failure();\n\n    builder.assert_error(engine_state::Error::Exec(\n        ExecError::UpgradeAuthorizationFailure,\n    ));\n\n    let authorization_keys = {\n        entity_account_hashes.push(*DEFAULT_ACCOUNT_ADDR);\n        entity_account_hashes\n    };\n\n    let valid_upgrade_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        UPGRADE_THRESHOLD_UPGRADER,\n        runtime_args! {\n            ARG_CONTRACT_PACKAGE => upgrade_threshold_package_hash\n        },\n    )\n    .with_authorization_keys(authorization_keys.into_iter().collect())\n    .build();\n\n    builder\n        .exec(valid_upgrade_request)\n        .expect_success()\n        .commit();\n}\n\nfn setup_upgrade_threshold_state() -> (LmdbWasmTestBuilder, AccountHash) {\n    const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]);\n    const UPGRADE_THRESHOLDS_FIXTURE: &str = \"upgrade_thresholds\";\n\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        crate::lmdb_fixture::builder_from_global_state_fixture_with_enable_ae(\n            UPGRADE_THRESHOLDS_FIXTURE,\n            true,\n        );\n\n    let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0);\n\n    let activation_point = EraId::new(0u64);\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(current_protocol_version)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(activation_point)\n        .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n        .with_new_gas_hold_interval(24 * 60 * 60 * 60)\n        .with_enable_addressable_entity(true)\n        .build();\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade_using_scratch(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let transfer = TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR)\n        .with_transfer_id(42)\n        .build();\n    builder.transfer_and_commit(transfer).expect_success();\n\n    (builder, ACCOUNT_1_ADDR)\n}\n\n#[ignore]\n#[test]\nfn should_correctly_set_upgrade_threshold_on_entity_upgrade() {\n    let (mut builder, entity_1) = setup_upgrade_threshold_state();\n\n    if !builder.chainspec().core_config.enable_addressable_entity {\n        return;\n    }\n\n    let default_addressable_entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default entity\");\n\n    let entity_hash = default_addressable_entity\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        // We use hash addr as the migration hasn't occurred.\n        .map(|holder_key| holder_key.into_hash_addr().map(AddressableEntityHash::new))\n        .unwrap()\n        .expect(\"must convert to hash\");\n\n    let stored_package_hash = default_addressable_entity\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .expect(\"should have stored package hash\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"should have hash\");\n\n    let exec_request = ExecuteRequestBuilder::standard(\n        entity_1,\n        &format!(\"{}.wasm\", PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME),\n        runtime_args! {\n            ENTRY_POINT_NAME => VERSION,\n            HASH_KEY_NAME => entity_hash\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let purse_holder_as_entity = builder\n        .get_addressable_entity(entity_hash)\n        .expect(\"must have purse holder entity hash\");\n\n    let purse_holder_main_purse_before = purse_holder_as_entity.main_purse();\n\n    let actual_associated_keys = purse_holder_as_entity.associated_keys();\n\n    assert!(actual_associated_keys.is_empty());\n\n    let upgrade_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        &format!(\"{}.wasm\", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME),\n        runtime_args! {\n            ARG_CONTRACT_PACKAGE => stored_package_hash\n        },\n    )\n    .build();\n\n    builder.exec(upgrade_request).expect_success().commit();\n\n    let new_entity_hash = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have entity\")\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        .map(|key| key.into_entity_hash_addr().map(AddressableEntityHash::new))\n        .unwrap()\n        .expect(\"must get contract hash\");\n\n    let updated_purse_entity = builder\n        .get_addressable_entity(new_entity_hash)\n        .expect(\"must have purse holder entity hash\");\n\n    let updated_entity_main_purse = updated_purse_entity.main_purse();\n    let actual_associated_keys = updated_purse_entity.associated_keys();\n\n    let expect_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1));\n\n    assert_eq!(purse_holder_main_purse_before, updated_entity_main_purse);\n    assert_eq!(actual_associated_keys, &expect_associated_keys);\n}\n\n#[allow(clippy::enum_variant_names)]\nenum MigrationScenario {\n    ByContractHash,\n    ByContractName,\n    ByPackageHash(Option<EntityVersion>),\n    ByPackageName(Option<EntityVersion>),\n    ByUpgrader,\n}\n\nfn call_and_migrate_purse_holder_contract(migration_scenario: MigrationScenario) {\n    let (mut builder, _) = setup_upgrade_threshold_state();\n\n    if !builder.chainspec().core_config.enable_addressable_entity {\n        return;\n    }\n\n    let runtime_args = runtime_args! {\n        PURSE_NAME_ARG_NAME => PURSE_1\n    };\n\n    let default_addressable_entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default entity\");\n\n    let entity_hash = default_addressable_entity\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        .map(|holder_key| holder_key.into_hash_addr().map(AddressableEntityHash::new))\n        .unwrap()\n        .expect(\"must convert to hash\");\n\n    let package_hash = default_addressable_entity\n        .named_keys()\n        .get(HASH_KEY_NAME)\n        .expect(\"must have package named key entry\")\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .unwrap();\n\n    let execute_request = match migration_scenario {\n        MigrationScenario::ByPackageName(maybe_contract_version) => {\n            ExecuteRequestBuilder::versioned_contract_call_by_name(\n                *DEFAULT_ACCOUNT_ADDR,\n                HASH_KEY_NAME,\n                maybe_contract_version,\n                ENTRY_POINT_ADD,\n                runtime_args,\n            )\n            .build()\n        }\n        MigrationScenario::ByPackageHash(maybe_contract_version) => {\n            ExecuteRequestBuilder::versioned_contract_call_by_hash(\n                *DEFAULT_ACCOUNT_ADDR,\n                package_hash,\n                maybe_contract_version,\n                ENTRY_POINT_ADD,\n                runtime_args,\n            )\n            .build()\n        }\n        MigrationScenario::ByContractHash => ExecuteRequestBuilder::contract_call_by_hash(\n            *DEFAULT_ACCOUNT_ADDR,\n            entity_hash,\n            ENTRY_POINT_ADD,\n            runtime_args,\n        )\n        .build(),\n        MigrationScenario::ByContractName => ExecuteRequestBuilder::contract_call_by_name(\n            *DEFAULT_ACCOUNT_ADDR,\n            PURSE_HOLDER_STORED_CONTRACT_NAME,\n            ENTRY_POINT_ADD,\n            runtime_args,\n        )\n        .build(),\n        MigrationScenario::ByUpgrader => ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            &format!(\"{}.wasm\", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME),\n            runtime_args! {\n                ARG_CONTRACT_PACKAGE => package_hash\n            },\n        )\n        .build(),\n    };\n\n    builder.exec(execute_request).expect_success().commit();\n\n    let updated_entity = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"must have default entity\");\n\n    let updated_key = updated_entity\n        .named_keys()\n        .get(PURSE_HOLDER_STORED_CONTRACT_NAME)\n        .expect(\"must have updated entity\");\n\n    let updated_hash = if let MigrationScenario::ByUpgrader = migration_scenario {\n        updated_key.into_entity_hash()\n    } else {\n        updated_key.into_hash_addr().map(AddressableEntityHash::new)\n    }\n    .expect(\"must get entity hash\");\n\n    let updated_purse_entity = builder\n        .get_addressable_entity(updated_hash)\n        .expect(\"must have purse holder entity hash\");\n\n    let actual_associated_keys = updated_purse_entity.associated_keys();\n    if let MigrationScenario::ByUpgrader = migration_scenario {\n        let expect_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1));\n        assert_eq!(actual_associated_keys, &expect_associated_keys);\n        // Post migration by upgrade there should be previous + 1 versions\n        // present in the package. (previous = 1)\n        let version_count = builder\n            .get_package(package_hash)\n            .expect(\"must have package\")\n            .versions()\n            .version_count();\n\n        assert_eq!(version_count, 2usize);\n    } else {\n        assert_eq!(actual_associated_keys, &AssociatedKeys::default());\n    }\n}\n\n#[ignore]\n#[test]\nfn should_correct_migrate_contract_when_invoked_by_package_name() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageName(None))\n}\n\n#[ignore]\n#[test]\nfn should_correctly_migrate_contract_when_invoked_by_name_and_version() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageName(Some(INITIAL_VERSION)))\n}\n\n#[ignore]\n#[test]\nfn should_correct_migrate_contract_when_invoked_by_package_hash() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageHash(None))\n}\n\n#[ignore]\n#[test]\nfn should_correct_migrate_contract_when_invoked_by_package_hash_and_specific_version() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageHash(Some(INITIAL_VERSION)))\n}\n\n#[ignore]\n#[test]\nfn should_correctly_migrate_contract_when_invoked_by_contract_hash() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByContractHash)\n}\n\n#[ignore]\n#[test]\nfn should_correctly_migrate_contract_when_invoked_by_contract_name() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByContractName)\n}\n\n#[ignore]\n#[test]\nfn should_correctly_migrate_and_upgrade_with_upgrader() {\n    call_and_migrate_purse_holder_contract(MigrationScenario::ByUpgrader)\n}\n\n#[ignore]\n#[test]\nfn should_correctly_retain_disabled_contract_version() {\n    const DISABLED_VERSIONS_FIX: &str = \"disabled_versions\";\n\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(DISABLED_VERSIONS_FIX);\n\n    let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0);\n\n    let activation_point = EraId::new(0u64);\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(previous_protocol_version)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(activation_point)\n        .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n        .with_new_gas_hold_interval(24 * 60 * 60 * 60)\n        .with_enable_addressable_entity(true)\n        .build();\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade_using_scratch(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let exec_request = {\n        let contract_name = format!(\"{}.wasm\", \"do_nothing_stored_upgrader\");\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            &contract_name,\n            RuntimeArgs::default(),\n        )\n        .build()\n    };\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let contract_package = builder\n        .query(\n            None,\n            Key::Account(*DEFAULT_ACCOUNT_ADDR),\n            &[\"do_nothing_package_hash\".to_string()],\n        )\n        .expect(\"must have stored value\")\n        .as_contract_package()\n        .expect(\"must have contract_package\")\n        .clone();\n\n    assert_eq!(contract_package.versions().len(), 3);\n\n    let disabled_version_key = contract_package\n        .disabled_versions()\n        .first()\n        .expect(\"must have disabled version  key\");\n\n    let disabled_contract_hash = contract_package\n        .versions()\n        .get(disabled_version_key)\n        .expect(\"package must contain one disabled hash\");\n\n    let exec_request = ExecuteRequestBuilder::contract_call_by_hash(\n        *DEFAULT_ACCOUNT_ADDR,\n        AddressableEntityHash::new(disabled_contract_hash.value()),\n        \"delegate\",\n        runtime_args! {\n            \"purse_name\" => \"purse_2\"\n        },\n    )\n    .build();\n\n    builder.exec(exec_request).expect_failure();\n}\n\n#[ignore]\n#[test]\nfn should_correctly_attach_minimum_delegation_rate_on_upgrade() {\n    const DISABLED_VERSIONS_FIX: &str = \"disabled_versions\";\n\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(DISABLED_VERSIONS_FIX);\n\n    let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0);\n\n    let activation_point = EraId::new(0u64);\n\n    let auction_contract_hash = builder.get_auction_contract_hash();\n    let auction_named_keys =\n        builder.get_named_keys(EntityAddr::System(auction_contract_hash.value()));\n    let minimum_delegation_rate_key = auction_named_keys.get(MINIMUM_DELEGATION_RATE_KEY);\n    // The stored lmdb state shouldn't have this key - it's an invariant of this test.\n    assert!(minimum_delegation_rate_key.is_none());\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(previous_protocol_version)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(activation_point)\n        .with_new_minimum_delegation_rate(30)\n        .build();\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade_using_scratch(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let exec_request = {\n        let contract_name = format!(\"{}.wasm\", \"do_nothing_stored_upgrader\");\n        ExecuteRequestBuilder::standard(\n            *DEFAULT_ACCOUNT_ADDR,\n            &contract_name,\n            RuntimeArgs::default(),\n        )\n        .build()\n    };\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let auction_contract_hash = builder.get_auction_contract_hash();\n    let auction_named_keys =\n        builder.get_named_keys(EntityAddr::System(auction_contract_hash.value()));\n    let minimum_delegation_rate_key = *auction_named_keys\n        .get(MINIMUM_DELEGATION_RATE_KEY)\n        .expect(\"minimum delegation rate key should exist at genesis\");\n\n    let minimum_delegation_rate: u8 = builder\n        .query(None, minimum_delegation_rate_key, &[])\n        .expect(\"should have minimum delegation rate\")\n        .as_cl_value()\n        .expect(\"minimum delegation rate should be a CLValue\")\n        .clone()\n        .into_t()\n        .expect(\"minimum delegation rate should be u8\");\n\n    assert_eq!(\n        minimum_delegation_rate, 30,\n        \"this upgrade should have set minimum delegation rate to 30!\"\n    );\n}\n\nfn setup_state_for_version_tests(\n    should_trap_on_ambiguous_entity_version: bool,\n) -> (LmdbWasmTestBuilder, ContractPackageHash) {\n    const THREE_VERSION_FIXTURE: &str = \"three_version_fixture\";\n\n    let (mut builder, lmdb_fixture_state, _temp_dir) =\n        lmdb_fixture::builder_from_global_state_fixture(THREE_VERSION_FIXTURE);\n\n    let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0);\n\n    let activation_point = EraId::new(0u64);\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(previous_protocol_version)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(activation_point)\n        .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n        .with_new_gas_hold_interval(24 * 60 * 60 * 60)\n        .with_enable_addressable_entity(false)\n        .build();\n\n    let config = EngineConfigBuilder::new()\n        .with_trap_on_ambiguous_entity_version(should_trap_on_ambiguous_entity_version)\n        .build();\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade_using_scratch(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    builder.with_engine_config(config);\n\n    let account = builder\n        .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[])\n        .expect(\"must have account as stored value\")\n        .as_account()\n        .expect(\"have account\")\n        .to_owned();\n\n    let contract_package_hash = account\n        .named_keys()\n        .get(\"purse_holder\")\n        .expect(\"must have key\")\n        .into_hash_addr()\n        .map(ContractPackageHash::new)\n        .expect(\"must have package hash\");\n\n    (builder, contract_package_hash)\n}\n\nfn execute_no_major_some_entity_version_calls(trap_on_ambiguous_entity_version: bool) {\n    let (mut builder, contract_package_hash) =\n        setup_state_for_version_tests(trap_on_ambiguous_entity_version);\n\n    let config = builder.engine_config();\n\n    let actual_trap_on_ambiguous_entity_version = config.trap_on_ambiguous_entity_version();\n    assert_eq!(\n        trap_on_ambiguous_entity_version,\n        actual_trap_on_ambiguous_entity_version\n    );\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(1),\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add_named_purse\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(2),\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_2_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(3),\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_3_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package\" => contract_package_hash\n    };\n    let exec_request = {\n        let contract_name = format!(\"{}.wasm\", \"purse_holder_stored_upgrader\");\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args).build()\n    };\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(1),\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    if actual_trap_on_ambiguous_entity_version {\n        builder.exec(exec_request).expect_failure();\n        let expected_error = Error::Exec(ExecError::AmbiguousEntityVersion);\n        builder.assert_error(expected_error);\n        return;\n    }\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(1),\n        \"major_version\" => Some(1),\n        \"entry_point\" => \"add_named_purse\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => None::<u32>,\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let contract_package = builder\n        .query(None, Key::Hash(contract_package_hash.value()), &[])\n        .expect(\"must have contract package as stored value\")\n        .into_contract_package()\n        .expect(\"must get contract package\");\n\n    let disable_hash = contract_package\n        .current_contract_hash()\n        .expect(\"must get hash\");\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"contract_hash\" => disable_hash,\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"disable_contract_by_contract_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(1),\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add_named_purse\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_correctly_manage_entity_version_calls_with_error_flag_off() {\n    execute_no_major_some_entity_version_calls(false)\n}\n\n#[ignore]\n#[test]\nfn should_correctly_return_error_for_multiple_entity_versions() {\n    execute_no_major_some_entity_version_calls(true)\n}\n\n#[ignore]\n#[test]\nfn should_call_correct_version_when_specifying_only_major_version() {\n    let (mut builder, contract_package_hash) = setup_state_for_version_tests(false);\n\n    // There are three 1.x versions in the package.\n    // The 1.1 version has an entry point `add_named_purse` while the 1.2 and 1.3\n    // rename the entry point to `add`\n    // Thus a call specifying 1.1 should work, however as per the rules, if 1.*\n    // is specified, then 1.3 should be invoked and the call should fail with\n    // the 1.1 entry point name.\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(1),\n        \"major_version\" => Some(1),\n        \"entry_point\" => \"add_named_purse\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => None::<u32>,\n        \"major_version\" => Some(1),\n        \"entry_point\" => \"add_named_purse\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_failure();\n\n    let expected_error = Error::Exec(ExecError::NoSuchMethod(\"add_named_purse\".to_string()));\n\n    builder.assert_error(expected_error);\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => None::<u32>,\n        \"major_version\" => Some(1),\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\n#[ignore]\n#[test]\nfn should_correctly_invoke_version_in_package_when_no_versions_are_specified() {\n    let (mut builder, contract_package_hash) = setup_state_for_version_tests(false);\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => None::<u32>,\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package\" => contract_package_hash\n    };\n    let exec_request = {\n        let contract_name = format!(\"{}.wasm\", \"purse_holder_stored_upgrader_v2_2\");\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args).build()\n    };\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => None::<u32>,\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"add\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_failure();\n\n    let expected_error = Error::Exec(ExecError::NoSuchMethod(\"add\".to_string()));\n\n    builder.assert_error(expected_error);\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => None::<u32>,\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"delegate\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .build();\n\n    builder.exec(exec_request).expect_success().commit();\n}\n\nfn should_not_require_subsequent_cases(trap: bool) {\n    let (mut builder, contract_package_hash) = setup_state_for_version_tests(trap);\n\n    let previous_protocol_version = builder.engine_config().protocol_version();\n\n    let new_protocol_version =\n        ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0);\n\n    let activation_point = EraId::new(0u64);\n\n    let sustain_uref = URef::new([6u8; 32], AccessRights::all());\n\n    let sustain_ratio = Ratio::new(2, 8);\n    let rewards_handling = RewardsHandling::Sustain {\n        ratio: sustain_ratio,\n        purse_address: sustain_uref.to_formatted_string(),\n    };\n\n    let mut upgrade_request = UpgradeRequestBuilder::new()\n        .with_current_protocol_version(previous_protocol_version)\n        .with_new_protocol_version(new_protocol_version)\n        .with_activation_point(activation_point)\n        .with_new_gas_hold_handling(HoldBalanceHandling::Accrued)\n        .with_new_gas_hold_interval(24 * 60 * 60 * 60)\n        .with_enable_addressable_entity(false)\n        .with_rewards_handling(rewards_handling)\n        .build();\n\n    builder\n        .with_block_time(Timestamp::now().into())\n        .upgrade_using_scratch(&mut upgrade_request)\n        .expect_upgrade_success();\n\n    let actual_ratio = builder\n        .query(None, Key::RewardsHandling, &[])\n        .expect(\"must have stored value as part of the upgrade\")\n        .as_cl_value()\n        .expect(\"must get cl value\")\n        .to_t::<BTreeMap<u8, Bytes>>()\n        .expect(\"must get btree map\")\n        .get(&REWARDS_HANDLING_RATIO_TAG)\n        .map(|bytes| Ratio::<u64>::from_bytes(bytes).expect(\"failed to deserialize rewards ratio\"))\n        .map(|(ratio, _)| ratio)\n        .expect(\"must get ratio\");\n\n    assert_eq!(sustain_ratio, actual_ratio);\n\n    let actual_sustain_purse = *builder\n        .get_entity_with_named_keys_by_entity_hash(builder.get_mint_contract_hash())\n        .expect(\"must get mint entity\")\n        .named_keys()\n        .get(MINT_SUSTAIN_PURSE_KEY)\n        .expect(\"must have key entry\")\n        .as_uref()\n        .expect(\"must be able to convert to uref\");\n\n    assert_eq!(actual_sustain_purse, sustain_uref);\n\n    let config = EngineConfigBuilder::new()\n        .with_protocol_version(new_protocol_version)\n        .with_trap_on_ambiguous_entity_version(trap)\n        .build();\n\n    builder.with_engine_config(config);\n\n    let config = builder.engine_config();\n    let protocol_version = config.protocol_version();\n\n    let runtime_args = runtime_args! {\n        \"contract_package\" => contract_package_hash\n    };\n    let exec_request = {\n        let contract_name = format!(\"{}.wasm\", \"purse_holder_stored_upgrader_v2_2\");\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .with_protocol_version(protocol_version)\n            .build()\n    };\n\n    builder.exec(exec_request).expect_success().commit();\n\n    let contract_package = builder\n        .query(None, Key::Hash(contract_package_hash.value()), &[])\n        .expect(\"must get package as stored value\")\n        .into_contract_package()\n        .expect(\"must get package\");\n    let current_version = contract_package\n        .current_contract_version()\n        .expect(\"must have the latest current version\");\n\n    assert_eq!(current_version.protocol_version_major(), 3);\n\n    let runtime_args = runtime_args! {\n        \"contract_package_hash\" => contract_package_hash,\n        \"version\" => Some(1),\n        \"major_version\" => None::<u32>,\n        \"entry_point\" => \"delegate\".to_string(),\n        \"purse_name\" => \"v_1_1_purse\",\n    };\n\n    let contract_name = format!(\"{}.wasm\", \"call_package_version_by_hash\");\n    let exec_request =\n        ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args)\n            .with_protocol_version(protocol_version)\n            .build();\n\n    if trap {\n        builder.exec(exec_request).expect_failure();\n        let expected_error = Error::Exec(ExecError::AmbiguousEntityVersion);\n        builder.assert_error(expected_error);\n    } else {\n        builder.exec(exec_request).expect_success().commit();\n    }\n}\n\n#[ignore]\n#[test]\nfn should_not_require_subsequent_increasing_versions_to_correctly_identify_version_key_with_trap_set(\n) {\n    should_not_require_subsequent_cases(true)\n}\n\n#[ignore]\n#[test]\nfn should_not_require_subsequent_increasing_versions_to_correctly_identify_version_key_with_trap_unset(\n) {\n    should_not_require_subsequent_cases(false)\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/test/vm2_tests.rs",
    "content": ""
  },
  {
    "path": "execution_engine_testing/tests/src/test/wasmless_transfer.rs",
    "content": "use once_cell::sync::Lazy;\n\nuse casper_engine_test_support::{\n    ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR,\n    DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST,\n};\nuse casper_execution_engine::engine_state::{\n    Error as CoreError, WASMLESS_TRANSFER_FIXED_GAS_PRICE,\n};\nuse casper_storage::system::transfer::TransferError;\nuse casper_types::{\n    account::AccountHash,\n    runtime_args,\n    system::{handle_payment, mint},\n    AccessRights, Gas, Key, MintCosts, Motes, PublicKey, SecretKey, URef, U512,\n};\n\nconst CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = \"transfer_purse_to_account.wasm\";\nconst CONTRACT_NEW_NAMED_UREF: &str = \"new_named_uref.wasm\";\nconst CONTRACT_CREATE_PURSE_01: &str = \"create_purse_01.wasm\";\nconst NON_UREF_NAMED_KEY: &str = \"transfer_result\";\nconst TEST_PURSE_NAME: &str = \"test_purse\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst ARG_UREF_NAME: &str = \"uref_name\";\n\nstatic ACCOUNT_1_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap());\nstatic ACCOUNT_1_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY));\nstatic ACCOUNT_1_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash());\n\nstatic ACCOUNT_2_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::secp256k1_from_bytes([210u8; 32]).unwrap());\nstatic ACCOUNT_2_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY));\nstatic ACCOUNT_2_ADDR: Lazy<AccountHash> = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash());\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_account_to_purse() {\n    transfer_wasmless(WasmlessTransfer::AccountMainPurseToPurse);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_account_to_public_key() {\n    transfer_wasmless(WasmlessTransfer::AccountMainPurseToPublicKeyMainPurse);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_account_to_account() {\n    transfer_wasmless(WasmlessTransfer::AccountMainPurseToAccountMainPurse);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_account_to_account_by_key() {\n    transfer_wasmless(WasmlessTransfer::AccountToAccountByKey);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_purse_to_purse() {\n    transfer_wasmless(WasmlessTransfer::PurseToPurse);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_purse_to_public_key() {\n    transfer_wasmless(WasmlessTransfer::PurseToPublicKey);\n}\n\n#[ignore]\n#[test]\nfn should_transfer_wasmless_amount_as_u64() {\n    transfer_wasmless(WasmlessTransfer::AmountAsU64);\n}\n\nenum WasmlessTransfer {\n    AccountMainPurseToPurse,\n    AccountMainPurseToAccountMainPurse,\n    AccountMainPurseToPublicKeyMainPurse,\n    PurseToPurse,\n    PurseToPublicKey,\n    AccountToAccountByKey,\n    AmountAsU64,\n}\n\nfn transfer_wasmless(wasmless_transfer: WasmlessTransfer) {\n    let create_account_2: bool = true;\n    let mut builder = init_wasmless_transform_builder(create_account_2);\n    let transfer_amount: U512 = U512::from(1000);\n    let id: Option<u64> = None;\n\n    let account_1_purse = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\")\n        .main_purse();\n\n    let account_2_purse = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"should get account 2\")\n        .main_purse();\n\n    let account_1_starting_balance = builder.get_purse_balance(account_1_purse);\n    let account_2_starting_balance = builder.get_purse_balance(account_2_purse);\n\n    let runtime_args = match wasmless_transfer {\n        WasmlessTransfer::AccountMainPurseToPurse => {\n            runtime_args! {\n                mint::ARG_TARGET => account_2_purse,\n                mint::ARG_AMOUNT => transfer_amount,\n                mint::ARG_ID => id\n            }\n        }\n        WasmlessTransfer::AccountMainPurseToAccountMainPurse => {\n            runtime_args! {\n                mint::ARG_TARGET => *ACCOUNT_2_ADDR,\n                mint::ARG_AMOUNT => transfer_amount,\n                mint::ARG_ID => id\n            }\n        }\n        WasmlessTransfer::AccountMainPurseToPublicKeyMainPurse => {\n            runtime_args! {\n                mint::ARG_TARGET => ACCOUNT_2_PUBLIC_KEY.clone(),\n                mint::ARG_AMOUNT => transfer_amount,\n                mint::ARG_ID => id\n            }\n        }\n        WasmlessTransfer::AccountToAccountByKey => {\n            runtime_args! {\n                mint::ARG_TARGET => Key::Account(*ACCOUNT_2_ADDR),\n                mint::ARG_AMOUNT => transfer_amount,\n                mint::ARG_ID => id\n            }\n        }\n        WasmlessTransfer::PurseToPurse => {\n            runtime_args! {\n                mint::ARG_SOURCE => account_1_purse,\n                mint::ARG_TARGET => account_2_purse,\n                mint::ARG_AMOUNT => transfer_amount,\n                mint::ARG_ID => id\n            }\n        }\n        WasmlessTransfer::PurseToPublicKey => {\n            runtime_args! {\n                mint::ARG_SOURCE => account_1_purse,\n                mint::ARG_TARGET => ACCOUNT_2_PUBLIC_KEY.clone(),\n                mint::ARG_AMOUNT => transfer_amount,\n                mint::ARG_ID => id\n            }\n        }\n        WasmlessTransfer::AmountAsU64 => {\n            runtime_args! {\n                mint::ARG_SOURCE => account_1_purse,\n                mint::ARG_TARGET => account_2_purse,\n                mint::ARG_AMOUNT => 1000u64,\n                mint::ARG_ID => id\n            }\n        }\n    };\n\n    let no_wasm_transfer_request = TransferRequestBuilder::new(0, AccountHash::default())\n        .with_args(runtime_args)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request)\n        .expect_success();\n\n    assert_eq!(\n        account_1_starting_balance - transfer_amount,\n        builder.get_purse_balance(account_1_purse),\n        \"account 1 ending balance incorrect\"\n    );\n    assert_eq!(\n        account_2_starting_balance + transfer_amount,\n        builder.get_purse_balance(account_2_purse),\n        \"account 2 ending balance incorrect\"\n    );\n\n    // Make sure postconditions are met: payment purse has to be empty after finalization\n\n    let handle_payment_entity = builder.get_handle_payment_contract();\n\n    let key = handle_payment_entity\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .cloned()\n        .expect(\"should have named key\");\n\n    assert_eq!(\n        builder.get_purse_balance(key.into_uref().unwrap()),\n        U512::zero()\n    );\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_to_self_by_addr() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::TransferToSelfByAddr);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_to_self_by_key() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::TransferToSelfByKey);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_to_self_by_uref() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::TransferToSelfByURef);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_other_account_by_addr() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::OtherSourceAccountByAddr);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_other_account_by_key() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::OtherSourceAccountByKey);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_other_account_by_uref() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::OtherSourceAccountByURef);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_missing_target() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::MissingTarget);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_missing_amount() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::MissingAmount);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_source_uref_nonexistent() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::SourceURefNonexistent);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_target_uref_nonexistent() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::TargetURefNonexistent);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_invalid_source_uref() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::SourceURefNotPurse);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_invalid_target_uref() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::TargetURefNotPurse);\n}\n\n#[ignore]\n#[test]\nfn should_not_transfer_wasmless_other_purse_to_self_purse() {\n    invalid_transfer_wasmless(InvalidWasmlessTransfer::OtherPurseToSelfPurse);\n}\n\nenum InvalidWasmlessTransfer {\n    TransferToSelfByAddr,\n    TransferToSelfByKey,\n    TransferToSelfByURef,\n    OtherSourceAccountByAddr,\n    OtherSourceAccountByKey,\n    OtherSourceAccountByURef,\n    MissingTarget,\n    MissingAmount,\n    SourceURefNotPurse,\n    TargetURefNotPurse,\n    SourceURefNonexistent,\n    TargetURefNonexistent,\n    OtherPurseToSelfPurse,\n}\n\nfn invalid_transfer_wasmless(invalid_wasmless_transfer: InvalidWasmlessTransfer) {\n    let create_account_2: bool = true;\n    let mut builder = init_wasmless_transform_builder(create_account_2);\n    let transfer_amount: U512 = U512::from(1000);\n    let id: Option<u64> = None;\n\n    let (addr, runtime_args, expected_error) = match invalid_wasmless_transfer {\n        InvalidWasmlessTransfer::TransferToSelfByAddr => {\n            // same source and target purse is invalid\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id,\n                },\n                CoreError::Transfer(TransferError::InvalidPurse),\n            )\n        }\n        InvalidWasmlessTransfer::TransferToSelfByKey => {\n            // same source and target purse is invalid\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_TARGET => Key::Account(*ACCOUNT_1_ADDR),\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidPurse),\n            )\n        }\n        InvalidWasmlessTransfer::TransferToSelfByURef => {\n            let account_1_purse = builder\n                .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n                .expect(\"should get account 1\")\n                .main_purse();\n            // same source and target purse is invalid\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_TARGET => account_1_purse,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidPurse),\n            )\n        }\n        InvalidWasmlessTransfer::OtherSourceAccountByAddr => {\n            // passes another account's addr as source\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_SOURCE => *ACCOUNT_2_ADDR,\n                    mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidArgument),\n            )\n        }\n        InvalidWasmlessTransfer::OtherSourceAccountByKey => {\n            // passes another account's Key::Account as source\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_SOURCE => Key::Account(*ACCOUNT_2_ADDR),\n                    mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidArgument),\n            )\n        }\n        InvalidWasmlessTransfer::OtherSourceAccountByURef => {\n            let account_2_purse = builder\n                .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n                .expect(\"should get account 1\")\n                .main_purse();\n            // passes another account's purse as source\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_SOURCE => account_2_purse,\n                    mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::ForgedReference(account_2_purse)),\n            )\n        }\n        InvalidWasmlessTransfer::MissingTarget => {\n            // does not pass target\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::MissingArgument),\n            )\n        }\n        InvalidWasmlessTransfer::MissingAmount => {\n            // does not pass amount\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_TARGET => *ACCOUNT_2_ADDR,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::MissingArgument),\n            )\n        }\n        InvalidWasmlessTransfer::SourceURefNotPurse => {\n            let not_purse_uref = get_default_account_named_uref(&mut builder, NON_UREF_NAMED_KEY);\n            // passes an invalid uref as source (an existing uref that is not a purse uref)\n            (\n                *DEFAULT_ACCOUNT_ADDR,\n                runtime_args! {\n                    mint::ARG_SOURCE => not_purse_uref,\n                    mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidPurse),\n            )\n        }\n        InvalidWasmlessTransfer::TargetURefNotPurse => {\n            let not_purse_uref = get_default_account_named_uref(&mut builder, NON_UREF_NAMED_KEY);\n            // passes an invalid uref as target (an existing uref that is not a purse uref)\n            (\n                *DEFAULT_ACCOUNT_ADDR,\n                runtime_args! {\n                    mint::ARG_TARGET => not_purse_uref,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidPurse),\n            )\n        }\n        InvalidWasmlessTransfer::SourceURefNonexistent => {\n            let nonexistent_purse = URef::new([255; 32], AccessRights::READ_ADD_WRITE);\n            // passes a nonexistent uref as source; considered to be a forged reference as when\n            // a caller passes a uref as source they are claiming it is a purse and that they have\n            // write access to it / are allowed to take funds from it.\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_SOURCE => nonexistent_purse,\n                    mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::ForgedReference(nonexistent_purse)),\n            )\n        }\n        InvalidWasmlessTransfer::TargetURefNonexistent => {\n            let nonexistent_purse = URef::new([255; 32], AccessRights::READ_ADD_WRITE);\n            // passes a nonexistent uref as target\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_TARGET => nonexistent_purse,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::InvalidPurse),\n            )\n        }\n        InvalidWasmlessTransfer::OtherPurseToSelfPurse => {\n            let account_1_purse = builder\n                .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n                .expect(\"should get account 1\")\n                .main_purse();\n            let account_2_purse = builder\n                .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n                .expect(\"should get account 1\")\n                .main_purse();\n\n            // attempts to take from an unowned purse\n            (\n                *ACCOUNT_1_ADDR,\n                runtime_args! {\n                    mint::ARG_SOURCE => account_2_purse,\n                    mint::ARG_TARGET => account_1_purse,\n                    mint::ARG_AMOUNT => transfer_amount,\n                    mint::ARG_ID => id\n                },\n                CoreError::Transfer(TransferError::ForgedReference(account_2_purse)),\n            )\n        }\n    };\n\n    let no_wasm_transfer_request = TransferRequestBuilder::new(0, AccountHash::default())\n        .with_args(runtime_args)\n        .with_initiator(addr)\n        .build();\n\n    let account_1_purse = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\")\n        .main_purse();\n\n    let account_1_starting_balance = builder.get_purse_balance(account_1_purse);\n\n    builder.transfer_and_commit(no_wasm_transfer_request);\n\n    let result = builder\n        .get_last_exec_result()\n        .expect(\"Expected to be called after run()\");\n\n    let error = result.error().expect(\"should have error\");\n\n    let account_1_closing_balance = builder.get_purse_balance(account_1_purse);\n\n    assert_eq!(\n        format!(\"{}\", &expected_error),\n        format!(\"{}\", error),\n        \"expected_error: {} actual error: {}\",\n        expected_error,\n        error\n    );\n\n    // No balance change expected in invalid transfer tests\n    assert_eq!(account_1_starting_balance, account_1_closing_balance);\n\n    // Make sure postconditions are met: payment purse has to be empty after finalization\n    let handle_payment_entity = builder.get_handle_payment_contract();\n    let key = handle_payment_entity\n        .named_keys()\n        .get(handle_payment::PAYMENT_PURSE_KEY)\n        .cloned()\n        .expect(\"should have named key\");\n\n    assert_eq!(\n        builder.get_purse_balance(key.into_uref().unwrap()),\n        U512::zero()\n    );\n}\n\n#[ignore]\n#[test]\nfn transfer_wasmless_should_create_target_if_it_doesnt_exist() {\n    let create_account_2: bool = false;\n    let mut builder = init_wasmless_transform_builder(create_account_2);\n    let transfer_amount: U512 = U512::from(1000);\n\n    let account_1_purse = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\")\n        .main_purse();\n\n    assert_eq!(\n        builder.get_entity_by_account_hash(*ACCOUNT_2_ADDR),\n        None,\n        \"account 2 should not exist\"\n    );\n\n    let account_1_starting_balance = builder.get_purse_balance(account_1_purse);\n\n    let no_wasm_transfer_request = TransferRequestBuilder::new(transfer_amount, *ACCOUNT_2_ADDR)\n        .with_initiator(*ACCOUNT_1_ADDR)\n        .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request)\n        .expect_success();\n\n    let account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"account 2 should exist\");\n\n    let account_2_starting_balance = builder.get_purse_balance(account_2.main_purse());\n\n    assert_eq!(\n        account_1_starting_balance - transfer_amount,\n        builder.get_purse_balance(account_1_purse),\n        \"account 1 ending balance incorrect\"\n    );\n    assert_eq!(\n        account_2_starting_balance, transfer_amount,\n        \"account 2 ending balance incorrect\"\n    );\n}\n\nfn get_default_account_named_uref(builder: &mut LmdbWasmTestBuilder, name: &str) -> URef {\n    let default_account = builder\n        .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR)\n        .expect(\"default account should exist\");\n    default_account\n        .named_keys()\n        .get(name)\n        .expect(\"default account should have named key\")\n        .as_uref()\n        .expect(\"should be a uref\")\n        .to_owned()\n}\n\nfn init_wasmless_transform_builder(create_account_2: bool) -> LmdbWasmTestBuilder {\n    let mut builder = LmdbWasmTestBuilder::default();\n\n    let id: Option<u64> = None;\n\n    let create_account_1_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! {\n            mint::ARG_TARGET => *ACCOUNT_1_ADDR,\n            mint::ARG_AMOUNT => *DEFAULT_PAYMENT,\n            mint::ARG_ID => id\n        },\n    )\n    .build();\n\n    builder\n        .run_genesis(LOCAL_GENESIS_REQUEST.clone())\n        .exec(create_account_1_request)\n        .expect_success()\n        .commit();\n\n    if !create_account_2 {\n        return builder;\n    }\n\n    let create_account_2_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_TRANSFER_PURSE_TO_ACCOUNT,\n        runtime_args! {\n            mint::ARG_TARGET => *ACCOUNT_2_ADDR,\n            mint::ARG_AMOUNT => *DEFAULT_PAYMENT,\n            mint::ARG_ID => id\n        },\n    )\n    .build();\n\n    builder\n        .exec(create_account_2_request)\n        .commit()\n        .expect_success();\n\n    let new_named_uref_request = ExecuteRequestBuilder::standard(\n        *DEFAULT_ACCOUNT_ADDR,\n        CONTRACT_NEW_NAMED_UREF,\n        runtime_args! {\n            ARG_UREF_NAME => NON_UREF_NAMED_KEY,\n        },\n    )\n    .build();\n\n    builder\n        .exec(new_named_uref_request)\n        .commit()\n        .expect_success();\n\n    builder\n}\n\n#[ignore]\n#[test]\nfn transfer_wasmless_onward() {\n    let create_account_2: bool = false;\n    let mut builder = init_wasmless_transform_builder(create_account_2);\n    let account_1_to_account_2_amount: U512 = U512::one();\n    let account_2_to_account_1_amount: U512 = U512::one();\n\n    let account_1_purse = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\")\n        .main_purse();\n\n    assert_eq!(\n        builder.get_entity_by_account_hash(*ACCOUNT_2_ADDR),\n        None,\n        \"account 2 should not exist\"\n    );\n\n    let account_1_starting_balance = builder.get_purse_balance(account_1_purse);\n\n    let no_wasm_transfer_request_1 =\n        TransferRequestBuilder::new(account_1_to_account_2_amount, *ACCOUNT_2_ADDR)\n            .with_initiator(*ACCOUNT_1_ADDR)\n            .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request_1)\n        .expect_success();\n\n    let account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"account 2 should exist\");\n\n    let account_2_starting_balance = builder.get_purse_balance(account_2.main_purse());\n\n    assert_eq!(\n        account_1_starting_balance - account_1_to_account_2_amount,\n        builder.get_purse_balance(account_1_purse),\n        \"account 1 ending balance incorrect\"\n    );\n    assert_eq!(\n        account_2_starting_balance, account_1_to_account_2_amount,\n        \"account 2 ending balance incorrect\"\n    );\n\n    // Another transfer but this time created account tries to do a transfer\n    let no_wasm_transfer_request_2 =\n        TransferRequestBuilder::new(account_2_to_account_1_amount, *ACCOUNT_1_ADDR)\n            .with_initiator(*ACCOUNT_2_ADDR)\n            .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request_2)\n        .expect_success();\n}\n\n#[ignore]\n#[test]\nfn transfer_wasmless_should_transfer_funds_after_paying_for_transfer() {\n    let wasmless_transfer_gas_cost = Gas::from(MintCosts::default().transfer);\n    let wasmless_transfer_cost = Motes::from_gas(\n        wasmless_transfer_gas_cost,\n        WASMLESS_TRANSFER_FIXED_GAS_PRICE,\n    )\n    .expect(\"gas overflow\");\n\n    let create_account_2: bool = false;\n    let mut builder = init_wasmless_transform_builder(create_account_2);\n    let account_1_to_account_2_amount: U512 = wasmless_transfer_cost.value() + U512::one();\n    // This transfer should succeed as after paying for execution of wasmless transfer account_2's\n    // main purse would contain exactly 1 token.\n    let account_2_to_account_1_amount: U512 = U512::one();\n\n    let account_1_purse = builder\n        .get_entity_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\")\n        .main_purse();\n\n    assert_eq!(\n        builder.get_entity_by_account_hash(*ACCOUNT_2_ADDR),\n        None,\n        \"account 2 should not exist\"\n    );\n\n    let account_1_starting_balance = builder.get_purse_balance(account_1_purse);\n\n    let no_wasm_transfer_request_1 =\n        TransferRequestBuilder::new(account_1_to_account_2_amount, *ACCOUNT_2_ADDR)\n            .with_initiator(*ACCOUNT_1_ADDR)\n            .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request_1)\n        .expect_success();\n\n    let account_2 = builder\n        .get_entity_by_account_hash(*ACCOUNT_2_ADDR)\n        .expect(\"account 2 should exist\");\n\n    let account_2_starting_balance = builder.get_purse_balance(account_2.main_purse());\n\n    assert_eq!(\n        account_1_starting_balance - account_1_to_account_2_amount,\n        builder.get_purse_balance(account_1_purse),\n        \"account 1 ending balance incorrect\"\n    );\n    assert_eq!(\n        account_2_starting_balance, account_1_to_account_2_amount,\n        \"account 2 ending balance incorrect\"\n    );\n\n    // Another transfer but this time created account tries to do a transfer\n    let no_wasm_transfer_request_2 =\n        TransferRequestBuilder::new(account_2_to_account_1_amount, *ACCOUNT_1_ADDR)\n            .with_initiator(*ACCOUNT_2_ADDR)\n            .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request_2)\n        .expect_success();\n}\n\n#[ignore]\n#[test]\nfn transfer_wasmless_should_fail_with_secondary_purse_insufficient_funds() {\n    let create_account_2: bool = false;\n    let mut builder = init_wasmless_transform_builder(create_account_2);\n    let account_1_to_account_2_amount: U512 = U512::from(1000);\n\n    let create_purse_request = ExecuteRequestBuilder::standard(\n        *ACCOUNT_1_ADDR,\n        CONTRACT_CREATE_PURSE_01,\n        runtime_args! { ARG_PURSE_NAME => TEST_PURSE_NAME },\n    )\n    .build();\n    builder.exec(create_purse_request).commit().expect_success();\n\n    let account_1 = builder\n        .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR)\n        .expect(\"should get account 1\");\n\n    let account_1_purse = account_1\n        .named_keys()\n        .get(TEST_PURSE_NAME)\n        .expect(\"should have purse\")\n        .into_uref()\n        .expect(\"should have purse uref\");\n\n    assert_eq!(builder.get_purse_balance(account_1_purse), U512::zero());\n\n    let account_1_starting_balance = builder.get_purse_balance(account_1_purse);\n    assert_eq!(account_1_starting_balance, U512::zero());\n\n    let no_wasm_transfer_request_1 =\n        TransferRequestBuilder::new(account_1_to_account_2_amount, *ACCOUNT_2_ADDR)\n            .with_source(account_1_purse)\n            .with_initiator(*ACCOUNT_1_ADDR)\n            .build();\n\n    builder\n        .transfer_and_commit(no_wasm_transfer_request_1)\n        .expect_failure();\n}\n"
  },
  {
    "path": "execution_engine_testing/tests/src/wasm_utils.rs",
    "content": "//! Wasm helpers.\nuse std::{collections::BTreeSet, fmt::Write};\n\nuse casper_wasm::{\n    builder,\n    elements::{Instruction, Instructions},\n};\nuse walrus::Module;\n\nuse casper_types::addressable_entity::DEFAULT_ENTRY_POINT_NAME;\n\n/// Creates minimal session code that does nothing\npub fn do_nothing_bytes() -> Vec<u8> {\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        // Memory section is mandatory\n        .memory()\n        .build()\n        .build();\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\n/// Creates minimal session code that does only one \"nop\" opcode\npub fn do_minimum_bytes() -> Vec<u8> {\n    let module = builder::module()\n        .function()\n        // A signature with 0 params and no return type\n        .signature()\n        .build()\n        .body()\n        .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End]))\n        .build()\n        .build()\n        // Export above function\n        .export()\n        .field(DEFAULT_ENTRY_POINT_NAME)\n        .build()\n        // Memory section is mandatory\n        .memory()\n        .build()\n        .build();\n    casper_wasm::serialize(module).expect(\"should serialize\")\n}\n\n/// Creates minimal session code that contains a function with arbitrary number of parameters.\npub fn make_n_arg_call_bytes(\n    arity: usize,\n    arg_type: &str,\n) -> Result<Vec<u8>, Box<dyn std::error::Error>> {\n    let mut call_args = String::new();\n    for i in 0..arity {\n        write!(call_args, \"({}.const {}) \", arg_type, i)?;\n    }\n\n    let mut func_params = String::new();\n    for i in 0..arity {\n        write!(func_params, \"(param $arg{} {}) \", i, arg_type)?;\n    }\n\n    // This wasm module contains a function with a specified amount of arguments in it.\n    let wat = format!(\n        r#\"(module\n        (func $call (call $func {call_args}) (return))\n        (func $func {func_params} (return))\n        (export \"func\" (func $func))\n        (export \"call\" (func $call))\n        (memory $memory 1)\n      )\"#\n    );\n    let module_bytes = wat::parse_str(wat)?;\n    Ok(module_bytes)\n}\n\n/// Returns a set of exports for a given wasm module bytes\npub fn get_wasm_exports(module_bytes: &[u8]) -> BTreeSet<String> {\n    let module = Module::from_buffer(module_bytes).expect(\"should have walid wasm bytes\");\n    module\n        .exports\n        .iter()\n        .map(|export| export.name.clone())\n        .collect()\n}\n"
  },
  {
    "path": "executor/wasm/Cargo.toml",
    "content": "[package]\nname = \"casper-executor-wasm\"\nversion = \"0.1.3\"\nedition = \"2021\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndescription = \"Casper executor wasm package\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/executor/wasm\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nblake2 = \"0.10\"\nborsh = { version = \"1.5\", features = [\"derive\"] }\nbytes = \"1.10\"\ncasper-executor-wasm-common = { version = \"0.1.3\", path = \"../wasm_common\" }\ncasper-executor-wasm-host = { version = \"0.1.3\", path = \"../wasm_host\" }\ncasper-executor-wasm-interface = { version = \"0.1.3\", path = \"../wasm_interface\" }\ncasper-executor-wasmer-backend = { version = \"0.1.3\", path = \"../wasmer_backend\" }\ncasper-storage = { version = \"5.0.0\", path = \"../../storage\" }\ncasper-types = { version = \"7.0.0\", path = \"../../types\", features = [\"std\"] }\ncasper-execution-engine = { version = \"9.0.0\", path = \"../../execution_engine\", features = [\n    \"test-support\",\n] }\ndigest = \"0.10.7\"\nparking_lot = \"0.12.1\"\nthiserror = \"2.0\"\ntracing = \"0.1.40\"\nbase16 = \"0.2.1\"\n\n[dev-dependencies]\ntempfile = \"3.10.1\"\nonce_cell = \"1.19.0\"\nfs_extra = \"1.3.0\"\nserde_json = \"1.0.127\"\nitertools = \"0.14.0\"\n"
  },
  {
    "path": "executor/wasm/src/install.rs",
    "content": "use std::sync::Arc;\n\nuse bytes::Bytes;\nuse casper_executor_wasm_common::error::CallError;\nuse casper_executor_wasm_interface::{executor::ExecuteError, GasUsage};\nuse casper_storage::{global_state::error::Error as GlobalStateError, AddressGenerator};\nuse casper_types::{\n    account::AccountHash, execution::Effects, BlockHash, BlockTime, Digest, TransactionHash,\n};\nuse parking_lot::RwLock;\nuse thiserror::Error;\n\n// NOTE: One struct that represents both InstallContractRequest and ExecuteRequest.\n\n/// Store contract request.\npub struct InstallContractRequest {\n    /// Initiator's address.\n    pub(crate) initiator: AccountHash,\n    /// Gas limit.\n    pub(crate) gas_limit: u64,\n    /// Wasm bytes of the contract to be stored.\n    pub(crate) wasm_bytes: Bytes,\n    /// Constructor entry point name.\n    pub(crate) entry_point: Option<String>,\n    /// Input data for the constructor.\n    pub(crate) input: Option<Bytes>,\n    /// Attached tokens value that to be transferred into the constructor.\n    pub(crate) transferred_value: u64,\n    /// Transaction hash.\n    pub(crate) transaction_hash: TransactionHash,\n    /// Address generator.\n    pub(crate) address_generator: Arc<RwLock<AddressGenerator>>,\n    /// Chain name.\n    pub(crate) chain_name: Arc<str>,\n    /// Block time.\n    pub(crate) block_time: BlockTime,\n    /// State hash.\n    pub(crate) state_hash: Digest,\n    /// Parent block hash.\n    pub(crate) parent_block_hash: BlockHash,\n    /// Block height.\n    pub(crate) block_height: u64,\n    /// Seed used for smart contract hash computation.\n    pub(crate) seed: Option<[u8; 32]>,\n}\n\n#[derive(Default)]\npub struct InstallContractRequestBuilder {\n    initiator: Option<AccountHash>,\n    gas_limit: Option<u64>,\n    wasm_bytes: Option<Bytes>,\n    entry_point: Option<String>,\n    input: Option<Bytes>,\n    transferred_value: Option<u64>,\n    transaction_hash: Option<TransactionHash>,\n    address_generator: Option<Arc<RwLock<AddressGenerator>>>,\n    chain_name: Option<Arc<str>>,\n    block_time: Option<BlockTime>,\n    state_hash: Option<Digest>,\n    parent_block_hash: Option<BlockHash>,\n    block_height: Option<u64>,\n    seed: Option<[u8; 32]>,\n}\n\nimpl InstallContractRequestBuilder {\n    pub fn with_initiator(mut self, initiator: AccountHash) -> Self {\n        self.initiator = Some(initiator);\n        self\n    }\n\n    pub fn with_gas_limit(mut self, gas_limit: u64) -> Self {\n        self.gas_limit = Some(gas_limit);\n        self\n    }\n\n    pub fn with_wasm_bytes(mut self, wasm_bytes: Bytes) -> Self {\n        self.wasm_bytes = Some(wasm_bytes);\n        self\n    }\n\n    pub fn with_entry_point(mut self, entry_point: String) -> Self {\n        self.entry_point = Some(entry_point);\n        self\n    }\n\n    pub fn with_input(mut self, input: Bytes) -> Self {\n        self.input = Some(input);\n        self\n    }\n\n    pub fn with_transferred_value(mut self, transferred_value: u64) -> Self {\n        self.transferred_value = Some(transferred_value);\n        self\n    }\n\n    pub fn with_address_generator(mut self, address_generator: AddressGenerator) -> Self {\n        self.address_generator = Some(Arc::new(RwLock::new(address_generator)));\n        self\n    }\n\n    pub fn with_shared_address_generator(\n        mut self,\n        address_generator: Arc<RwLock<AddressGenerator>>,\n    ) -> Self {\n        self.address_generator = Some(address_generator);\n        self\n    }\n\n    pub fn with_transaction_hash(mut self, transaction_hash: TransactionHash) -> Self {\n        self.transaction_hash = Some(transaction_hash);\n        self\n    }\n\n    pub fn with_chain_name<T: Into<Arc<str>>>(mut self, chain_name: T) -> Self {\n        self.chain_name = Some(chain_name.into());\n        self\n    }\n\n    pub fn with_block_time(mut self, block_time: BlockTime) -> Self {\n        self.block_time = Some(block_time);\n        self\n    }\n\n    pub fn with_seed(mut self, seed: [u8; 32]) -> Self {\n        self.seed = Some(seed);\n        self\n    }\n\n    pub fn with_state_hash(mut self, state_hash: Digest) -> Self {\n        self.state_hash = Some(state_hash);\n        self\n    }\n\n    pub fn with_parent_block_hash(mut self, parent_block_hash: BlockHash) -> Self {\n        self.parent_block_hash = Some(parent_block_hash);\n        self\n    }\n\n    pub fn with_block_height(mut self, block_height: u64) -> Self {\n        self.block_height = Some(block_height);\n        self\n    }\n\n    pub fn build(self) -> Result<InstallContractRequest, &'static str> {\n        let initiator = self.initiator.ok_or(\"Initiator not set\")?;\n        let gas_limit = self.gas_limit.ok_or(\"Gas limit not set\")?;\n        let wasm_bytes = self.wasm_bytes.ok_or(\"Wasm bytes not set\")?;\n        let entry_point = self.entry_point;\n        let input = self.input;\n        let transferred_value = self.transferred_value.ok_or(\"Value not set\")?;\n        let address_generator = self.address_generator.ok_or(\"Address generator not set\")?;\n        let transaction_hash = self.transaction_hash.ok_or(\"Transaction hash not set\")?;\n        let chain_name = self.chain_name.ok_or(\"Chain name not set\")?;\n        let block_time = self.block_time.ok_or(\"Block time not set\")?;\n        let seed = self.seed;\n        let state_hash = self.state_hash.ok_or(\"State hash not set\")?;\n        let parent_block_hash = self.parent_block_hash.ok_or(\"Parent block hash not set\")?;\n        let block_height = self.block_height.ok_or(\"Block height not set\")?;\n        Ok(InstallContractRequest {\n            initiator,\n            gas_limit,\n            wasm_bytes,\n            entry_point,\n            input,\n            transferred_value,\n            address_generator,\n            transaction_hash,\n            chain_name,\n            block_time,\n            seed,\n            state_hash,\n            parent_block_hash,\n            block_height,\n        })\n    }\n}\n\n/// Result of executing a Wasm contract.\n#[derive(Debug)]\npub struct InstallContractResult {\n    /// Smart contract address.\n    pub(crate) smart_contract_addr: [u8; 32],\n    /// Gas usage.\n    pub(crate) gas_usage: GasUsage,\n    /// Effects produced by the execution.\n    pub(crate) effects: Effects,\n    /// Post state hash after installation.\n    pub(crate) post_state_hash: Digest,\n}\nimpl InstallContractResult {\n    pub fn effects(&self) -> &Effects {\n        &self.effects\n    }\n\n    pub fn gas_usage(&self) -> &GasUsage {\n        &self.gas_usage\n    }\n\n    pub fn post_state_hash(&self) -> Digest {\n        self.post_state_hash\n    }\n\n    pub fn smart_contract_addr(&self) -> &[u8; 32] {\n        &self.smart_contract_addr\n    }\n}\n\n#[derive(Debug, Error)]\npub enum InstallContractError {\n    #[error(\"system contract error: {0}\")]\n    SystemContract(CallError),\n\n    #[error(\"execute: {0}\")]\n    Execute(ExecuteError),\n\n    #[error(\"Global state error: {0}\")]\n    GlobalState(#[from] GlobalStateError),\n\n    #[error(\"constructor error: {host_error}\")]\n    Constructor { host_error: CallError },\n}\n"
  },
  {
    "path": "executor/wasm/src/lib.rs",
    "content": "pub mod install;\npub(crate) mod system;\n\nuse std::{\n    collections::{BTreeSet, VecDeque},\n    sync::Arc,\n};\n\nuse bytes::Bytes;\nuse casper_execution_engine::{\n    engine_state::{BlockInfo, Error as EngineError, ExecutableItem, ExecutionEngineV1},\n    execution::ExecError,\n};\nuse casper_executor_wasm_common::{\n    chain_utils,\n    error::{CallError, TrapCode},\n    flags::ReturnFlags,\n};\nuse casper_executor_wasm_host::context::Context;\nuse casper_executor_wasm_interface::{\n    executor::{\n        ExecuteError, ExecuteRequest, ExecuteRequestBuilder, ExecuteResult,\n        ExecuteWithProviderError, ExecuteWithProviderResult, ExecutionKind, Executor,\n    },\n    ConfigBuilder, GasUsage, VMError, WasmInstance,\n};\nuse casper_executor_wasmer_backend::WasmerEngine;\nuse casper_storage::{\n    global_state::{\n        error::Error as GlobalStateError,\n        state::{CommitProvider, StateProvider},\n        GlobalStateReader,\n    },\n    TrackingCopy,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AssociatedKeys},\n    bytesrepr, AddressableEntity, ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind,\n    ContractRuntimeTag, Digest, EntityAddr, EntityKind, Gas, Groups, InitiatorAddr, Key,\n    MessageLimits, Package, PackageHash, PackageStatus, Phase, ProtocolVersion, StorageCosts,\n    StoredValue, TransactionInvocationTarget, URef, WasmV2Config, U512,\n};\nuse install::{InstallContractError, InstallContractRequest, InstallContractResult};\nuse parking_lot::RwLock;\nuse system::{MintArgs, MintTransferArgs};\nuse tracing::{error, warn};\n\nconst DEFAULT_WASM_ENTRY_POINT: &str = \"call\";\n\nconst DEFAULT_MINT_TRANSFER_GAS_COST: u64 = 1; // NOTE: Require gas while executing and set this to at least 100_000_000 (or use chainspec)\n\n#[derive(Copy, Clone, Debug)]\npub enum ExecutorKind {\n    /// Ahead of time compiled Wasm.\n    ///\n    /// This is the default executor kind.\n    Compiled,\n}\n\n#[derive(Copy, Clone, Debug)]\npub struct ExecutorConfig {\n    memory_limit: u32,\n    executor_kind: ExecutorKind,\n    wasm_config: WasmV2Config,\n    storage_costs: StorageCosts,\n    message_limits: MessageLimits,\n}\n\nimpl ExecutorConfigBuilder {\n    pub fn new() -> ExecutorConfigBuilder {\n        ExecutorConfigBuilder::default()\n    }\n}\n\n#[derive(Default)]\npub struct ExecutorConfigBuilder {\n    memory_limit: Option<u32>,\n    executor_kind: Option<ExecutorKind>,\n    wasm_config: Option<WasmV2Config>,\n    storage_costs: Option<StorageCosts>,\n    message_limits: Option<MessageLimits>,\n}\n\nimpl ExecutorConfigBuilder {\n    /// Set the memory limit.\n    pub fn with_memory_limit(mut self, memory_limit: u32) -> Self {\n        self.memory_limit = Some(memory_limit);\n        self\n    }\n\n    /// Set the executor kind.\n    pub fn with_executor_kind(mut self, executor_kind: ExecutorKind) -> Self {\n        self.executor_kind = Some(executor_kind);\n        self\n    }\n\n    /// Set the wasm config.\n    pub fn with_wasm_config(mut self, wasm_config: WasmV2Config) -> Self {\n        self.wasm_config = Some(wasm_config);\n        self\n    }\n\n    /// Set the wasm config.\n    pub fn with_storage_costs(mut self, storage_costs: StorageCosts) -> Self {\n        self.storage_costs = Some(storage_costs);\n        self\n    }\n\n    /// Set the message limits.\n    pub fn with_message_limits(mut self, message_limits: MessageLimits) -> Self {\n        self.message_limits = Some(message_limits);\n        self\n    }\n\n    /// Build the `ExecutorConfig`.\n    pub fn build(self) -> Result<ExecutorConfig, &'static str> {\n        let memory_limit = self.memory_limit.ok_or(\"Memory limit is not set\")?;\n        let executor_kind = self.executor_kind.ok_or(\"Executor kind is not set\")?;\n        let wasm_config = self.wasm_config.ok_or(\"Wasm config is not set\")?;\n        let storage_costs = self.storage_costs.ok_or(\"Storage costs are not set\")?;\n        let message_limits = self.message_limits.ok_or(\"Message limits are not set\")?;\n\n        Ok(ExecutorConfig {\n            memory_limit,\n            executor_kind,\n            wasm_config,\n            storage_costs,\n            message_limits,\n        })\n    }\n}\n\n#[derive(Clone)]\npub struct ExecutorV2 {\n    config: ExecutorConfig,\n    compiled_wasm_engine: Arc<WasmerEngine>,\n    execution_stack: Arc<RwLock<VecDeque<ExecutionKind>>>,\n    execution_engine_v1: Arc<ExecutionEngineV1>,\n}\n\nimpl ExecutorV2 {\n    pub fn install_contract<R>(\n        &self,\n        state_root_hash: Digest,\n        state_provider: &R,\n        install_request: InstallContractRequest,\n    ) -> Result<InstallContractResult, InstallContractError>\n    where\n        R: StateProvider + CommitProvider,\n        <R as StateProvider>::Reader: 'static,\n    {\n        let mut tracking_copy = match state_provider.checkout(state_root_hash) {\n            Ok(Some(tracking_copy)) => {\n                TrackingCopy::new(tracking_copy, 1, state_provider.enable_entity())\n            }\n            Ok(None) => {\n                return Err(InstallContractError::GlobalState(\n                    GlobalStateError::RootNotFound,\n                ))\n            }\n            Err(error) => return Err(error.into()),\n        };\n\n        let InstallContractRequest {\n            initiator,\n            gas_limit,\n            wasm_bytes,\n            entry_point,\n            input,\n            transferred_value,\n            address_generator,\n            transaction_hash,\n            chain_name,\n            block_time,\n            seed,\n            state_hash,\n            parent_block_hash,\n            block_height,\n        } = install_request;\n\n        let bytecode_hash = chain_utils::compute_wasm_bytecode_hash(&wasm_bytes);\n\n        let caller_key = Key::Account(initiator);\n        let _source_purse = get_purse_for_entity(&mut tracking_copy, caller_key);\n\n        // 1. Store package hash\n        let smart_contract_addr: [u8; 32] = chain_utils::compute_predictable_address(\n            chain_name.as_bytes(),\n            initiator.value(),\n            bytecode_hash,\n            seed,\n        );\n\n        let mut smart_contract = Package::new(\n            Default::default(),\n            Default::default(),\n            Groups::default(),\n            PackageStatus::Unlocked,\n        );\n\n        let protocol_version = ProtocolVersion::V2_0_0;\n        let protocol_version_major = protocol_version.value().major;\n\n        let next_version = smart_contract.next_entity_version_for(protocol_version_major);\n\n        let entity_version_key = smart_contract.insert_entity_version(\n            protocol_version_major,\n            EntityAddr::SmartContract(smart_contract_addr),\n        );\n        debug_assert_eq!(entity_version_key.entity_version(), next_version);\n\n        let smart_contract_addr = chain_utils::compute_predictable_address(\n            chain_name.as_bytes(),\n            initiator.value(),\n            bytecode_hash,\n            seed,\n        );\n\n        tracking_copy.write(\n            Key::SmartContract(smart_contract_addr),\n            StoredValue::SmartContract(smart_contract),\n        );\n\n        // 2. Store wasm\n\n        let bytecode = ByteCode::new(ByteCodeKind::V2CasperWasm, wasm_bytes.clone().into());\n        let bytecode_addr = ByteCodeAddr::V2CasperWasm(bytecode_hash);\n\n        tracking_copy.write(\n            Key::ByteCode(bytecode_addr),\n            StoredValue::ByteCode(bytecode),\n        );\n\n        // 3. Store addressable entity\n        let addressable_entity_key =\n            Key::AddressableEntity(EntityAddr::SmartContract(smart_contract_addr));\n\n        // TODO: abort(str) as an alternative to trap\n        let main_purse: URef = match system::mint_mint(\n            &mut tracking_copy,\n            transaction_hash,\n            Arc::clone(&address_generator),\n            MintArgs {\n                initial_balance: U512::zero(),\n            },\n        ) {\n            Ok(uref) => uref,\n            Err(mint_error) => {\n                error!(?mint_error, \"Failed to create a purse\");\n                return Err(InstallContractError::SystemContract(\n                    CallError::CalleeTrapped(TrapCode::UnreachableCodeReached),\n                ));\n            }\n        };\n\n        let addressable_entity = AddressableEntity::new(\n            PackageHash::new(smart_contract_addr),\n            ByteCodeHash::new(bytecode_hash),\n            ProtocolVersion::V2_0_0,\n            main_purse,\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2),\n        );\n\n        tracking_copy.write(\n            addressable_entity_key,\n            StoredValue::AddressableEntity(addressable_entity),\n        );\n\n        let ctor_gas_usage = match entry_point {\n            Some(entry_point_name) => {\n                let input = input.unwrap_or_default();\n                let execute_request = ExecuteRequestBuilder::default()\n                    .with_initiator(initiator)\n                    .with_caller_key(caller_key)\n                    .with_target(ExecutionKind::Stored {\n                        address: smart_contract_addr,\n                        entry_point: entry_point_name,\n                    })\n                    .with_gas_limit(gas_limit)\n                    .with_input(input)\n                    .with_transferred_value(transferred_value)\n                    .with_transaction_hash(transaction_hash)\n                    .with_shared_address_generator(address_generator)\n                    .with_chain_name(chain_name)\n                    .with_block_time(block_time)\n                    .with_state_hash(state_hash)\n                    .with_parent_block_hash(parent_block_hash)\n                    .with_block_height(block_height)\n                    .build()\n                    .expect(\"should build\");\n\n                let forked_tc = tracking_copy.fork2();\n\n                match Self::execute_with_tracking_copy(self, forked_tc, execute_request) {\n                    Ok(ExecuteResult {\n                        host_error,\n                        output,\n                        gas_usage,\n                        effects,\n                        cache,\n                        messages,\n                    }) => {\n                        if let Some(host_error) = host_error {\n                            return Err(InstallContractError::Constructor { host_error });\n                        }\n\n                        tracking_copy.apply_changes(effects, cache, messages);\n\n                        if let Some(output) = output {\n                            warn!(?output, \"unexpected output from constructor\");\n                        }\n\n                        gas_usage\n                    }\n                    Err(execute_error) => {\n                        error!(%execute_error, \"unable to execute constructor\");\n                        return Err(InstallContractError::Execute(execute_error));\n                    }\n                }\n            }\n            None => {\n                // TODO: Calculate storage gas cost etc. and make it the base cost, then add\n                // constructor gas cost\n                GasUsage::new(gas_limit, gas_limit)\n            }\n        };\n\n        let effects = tracking_copy.effects();\n\n        match state_provider.commit_effects(state_root_hash, effects.clone()) {\n            Ok(post_state_hash) => Ok(InstallContractResult {\n                smart_contract_addr,\n                gas_usage: ctor_gas_usage,\n                effects,\n                post_state_hash,\n            }),\n            Err(error) => Err(InstallContractError::GlobalState(error)),\n        }\n    }\n\n    fn execute_with_tracking_copy<R: GlobalStateReader + 'static>(\n        &self,\n        mut tracking_copy: TrackingCopy<R>,\n        execute_request: ExecuteRequest,\n    ) -> Result<ExecuteResult, ExecuteError> {\n        let ExecuteRequest {\n            initiator,\n            caller_key,\n            gas_limit,\n            execution_kind,\n            input,\n            transferred_value,\n            transaction_hash,\n            address_generator,\n            chain_name,\n            block_time,\n            state_hash,\n            parent_block_hash,\n            block_height,\n        } = execute_request;\n\n        // TODO: Purse uref does not need to be optional once value transfers to WasmBytes are\n        // supported. let caller_entity_addr = EntityAddr::new_account(caller);\n        let source_purse = get_purse_for_entity(&mut tracking_copy, caller_key);\n\n        let (wasm_bytes, export_name) = match &execution_kind {\n            ExecutionKind::SessionBytes(wasm_bytes) => {\n                // self.execute_wasm(tracking_copy, address, gas_limit, wasm_bytes, input)\n                (wasm_bytes.clone(), DEFAULT_WASM_ENTRY_POINT)\n            }\n            ExecutionKind::Stored {\n                address: smart_contract_addr,\n                entry_point,\n            } => {\n                let smart_contract_key = Key::SmartContract(*smart_contract_addr);\n                let legacy_key = Key::Hash(*smart_contract_addr);\n\n                let mut contract = tracking_copy\n                    .read_first(&[&legacy_key, &smart_contract_key])\n                    .expect(\"should read contract\");\n\n                if let Some(StoredValue::SmartContract(smart_contract_package)) = &contract {\n                    let contract_hash = smart_contract_package\n                        .versions()\n                        .latest()\n                        .expect(\"should have last entry\");\n                    let entity_addr = EntityAddr::SmartContract(contract_hash.value());\n                    let latest_version_key = Key::AddressableEntity(entity_addr);\n                    assert_eq!(&entity_addr.value(), smart_contract_addr);\n                    let new_contract = tracking_copy\n                        .read(&latest_version_key)\n                        .expect(\"should read latest version\");\n                    contract = new_contract;\n                };\n\n                match contract {\n                    Some(StoredValue::AddressableEntity(addressable_entity)) => {\n                        let wasm_key = match addressable_entity.kind() {\n                            EntityKind::System(_) => todo!(),\n                            EntityKind::Account(_) => todo!(),\n                            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) => {\n                                // We need to short circuit here to execute v1 contracts with legacy\n                                // execut\n\n                                let block_info = BlockInfo::new(\n                                    state_hash,\n                                    block_time,\n                                    parent_block_hash,\n                                    block_height,\n                                    self.execution_engine_v1.config().protocol_version(),\n                                );\n\n                                let entity_addr = EntityAddr::SmartContract(*smart_contract_addr);\n\n                                return self.execute_legacy_wasm_byte_code(\n                                    initiator,\n                                    &entity_addr,\n                                    entry_point.clone(),\n                                    &input,\n                                    &mut tracking_copy,\n                                    block_info,\n                                    transaction_hash,\n                                    gas_limit,\n                                );\n                            }\n                            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2) => {\n                                Key::ByteCode(ByteCodeAddr::V2CasperWasm(\n                                    addressable_entity.byte_code_addr(),\n                                ))\n                            }\n                        };\n\n                        // Note: Bytecode stored in the GlobalStateReader has a \"kind\" option -\n                        // currently we know we have a v2 bytecode as the stored contract is of \"V2\"\n                        // variant.\n                        let wasm_bytes = tracking_copy\n                            .read(&wasm_key)\n                            .expect(\"should read wasm\")\n                            .expect(\"should have wasm bytes\")\n                            .into_byte_code()\n                            .expect(\"should be byte code\")\n                            .take_bytes();\n\n                        if transferred_value != 0 {\n                            let args = {\n                                let maybe_to = None;\n                                let source = source_purse;\n                                let target = addressable_entity.main_purse();\n                                let amount = transferred_value;\n                                let id = None;\n                                MintTransferArgs {\n                                    maybe_to,\n                                    source,\n                                    target,\n                                    amount: amount.into(),\n                                    id,\n                                }\n                            };\n\n                            match system::mint_transfer(\n                                &mut tracking_copy,\n                                transaction_hash,\n                                Arc::clone(&address_generator),\n                                args,\n                            ) {\n                                Ok(()) => {\n                                    // Transfer succeed, go on\n                                }\n                                Err(error) => {\n                                    return Ok(ExecuteResult {\n                                        host_error: Some(error),\n                                        output: None,\n                                        gas_usage: GasUsage::new(\n                                            gas_limit,\n                                            gas_limit - DEFAULT_MINT_TRANSFER_GAS_COST,\n                                        ),\n                                        effects: tracking_copy.effects(),\n                                        cache: tracking_copy.cache(),\n                                        messages: tracking_copy.messages(),\n                                    });\n                                }\n                            }\n                        }\n\n                        (Bytes::from(wasm_bytes), entry_point.as_str())\n                    }\n                    Some(StoredValue::Contract(_legacy_contract)) => {\n                        let block_info = BlockInfo::new(\n                            state_hash,\n                            block_time,\n                            parent_block_hash,\n                            block_height,\n                            self.execution_engine_v1.config().protocol_version(),\n                        );\n\n                        let entity_addr = EntityAddr::SmartContract(*smart_contract_addr);\n\n                        return self.execute_legacy_wasm_byte_code(\n                            initiator,\n                            &entity_addr,\n                            entry_point.clone(),\n                            &input,\n                            &mut tracking_copy,\n                            block_info,\n                            transaction_hash,\n                            gas_limit,\n                        );\n                    }\n                    Some(stored_value) => {\n                        todo!(\n                            \"Unexpected {stored_value:?} under key {:?}\",\n                            &execution_kind\n                        );\n                    }\n                    None => {\n                        error!(\n                            smart_contract_addr = base16::encode_lower(&smart_contract_addr),\n                            ?execution_kind,\n                            \"No contract code found\",\n                        );\n                        return Err(ExecuteError::CodeNotFound(*smart_contract_addr));\n                    }\n                }\n            }\n        };\n\n        let vm = Arc::clone(&self.compiled_wasm_engine);\n\n        let mut initial_tracking_copy = tracking_copy.fork2();\n\n        // Derive callee key from the execution target.\n        let callee_key = match &execution_kind {\n            ExecutionKind::Stored {\n                address: smart_contract_addr,\n                ..\n            } => Key::SmartContract(*smart_contract_addr),\n            ExecutionKind::SessionBytes(_wasm_bytes) => Key::Account(initiator),\n        };\n\n        let context = Context {\n            initiator,\n            config: self.config.wasm_config,\n            storage_costs: self.config.storage_costs,\n            caller: caller_key,\n            callee: callee_key,\n            transferred_value,\n            tracking_copy,\n            executor: self.clone(),\n            address_generator: Arc::clone(&address_generator),\n            transaction_hash,\n            chain_name,\n            input,\n            block_time,\n            message_limits: self.config.message_limits,\n        };\n\n        let wasm_instance_config = ConfigBuilder::new()\n            .with_gas_limit(gas_limit)\n            .with_memory_limit(self.config.memory_limit)\n            .build();\n\n        let mut instance = vm.instantiate(wasm_bytes, context, wasm_instance_config)?;\n\n        self.push_execution_stack(execution_kind.clone());\n        let (vm_result, gas_usage) = instance.call_export(export_name);\n\n        let top_execution_kind = self\n            .pop_execution_stack()\n            .expect(\"should have execution kind\"); // SAFETY: We just pushed\n        debug_assert_eq!(&top_execution_kind, &execution_kind);\n\n        let context = instance.teardown();\n\n        let Context {\n            tracking_copy: final_tracking_copy,\n            ..\n        } = context;\n\n        match vm_result {\n            Ok(()) => Ok(ExecuteResult {\n                host_error: None,\n                output: None,\n                gas_usage,\n                effects: final_tracking_copy.effects(),\n                cache: final_tracking_copy.cache(),\n                messages: final_tracking_copy.messages(),\n            }),\n            Err(VMError::Return { flags, data }) => {\n                let host_error = if flags.contains(ReturnFlags::REVERT) {\n                    // The contract has reverted.\n                    Some(CallError::CalleeReverted)\n                } else {\n                    // Merge the tracking copy parts since the execution has succeeded.\n                    initial_tracking_copy.apply_changes(\n                        final_tracking_copy.effects(),\n                        final_tracking_copy.cache(),\n                        final_tracking_copy.messages(),\n                    );\n\n                    None\n                };\n\n                Ok(ExecuteResult {\n                    host_error,\n                    output: data,\n                    gas_usage,\n                    effects: initial_tracking_copy.effects(),\n                    cache: initial_tracking_copy.cache(),\n                    messages: initial_tracking_copy.messages(),\n                })\n            }\n            Err(VMError::OutOfGas) => Ok(ExecuteResult {\n                host_error: Some(CallError::CalleeGasDepleted),\n                output: None,\n                gas_usage,\n                effects: final_tracking_copy.effects(),\n                cache: final_tracking_copy.cache(),\n                messages: final_tracking_copy.messages(),\n            }),\n            Err(VMError::Trap(trap_code)) => Ok(ExecuteResult {\n                host_error: Some(CallError::CalleeTrapped(trap_code)),\n                output: None,\n                gas_usage,\n                effects: initial_tracking_copy.effects(),\n                cache: initial_tracking_copy.cache(),\n                messages: initial_tracking_copy.messages(),\n            }),\n            Err(VMError::Export(export_error)) => {\n                error!(?export_error, \"export error\");\n                Ok(ExecuteResult {\n                    host_error: Some(CallError::NotCallable),\n                    output: None,\n                    gas_usage,\n                    effects: initial_tracking_copy.effects(),\n                    cache: initial_tracking_copy.cache(),\n                    messages: initial_tracking_copy.messages(),\n                })\n            }\n            Err(VMError::Execute(execute_error)) => {\n                let effects = initial_tracking_copy.effects();\n                let cache = initial_tracking_copy.cache();\n                let messages = initial_tracking_copy.messages();\n                error!(\n                    ?execute_error,\n                    ?gas_usage,\n                    ?effects,\n                    ?cache,\n                    ?messages,\n                    \"host error\"\n                );\n                Err(execute_error)\n            }\n            Err(VMError::Internal(internal_error)) => {\n                error!(?internal_error, \"internal host error\");\n                Err(ExecuteError::InternalHost(internal_error))\n            }\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn execute_legacy_wasm_byte_code<R>(\n        &self,\n        initiator: AccountHash,\n        entity_addr: &EntityAddr,\n        entry_point: String,\n        input: &Bytes,\n        tracking_copy: &mut TrackingCopy<R>,\n        block_info: BlockInfo,\n        transaction_hash: casper_types::TransactionHash,\n        gas_limit: u64,\n    ) -> Result<ExecuteResult, ExecuteError>\n    where\n        R: GlobalStateReader + 'static,\n    {\n        let authorization_keys = BTreeSet::from_iter([initiator]);\n        let initiator_addr = InitiatorAddr::AccountHash(initiator);\n        let executable_item =\n            ExecutableItem::Invocation(TransactionInvocationTarget::ByHash(entity_addr.value()));\n        let entry_point = entry_point.clone();\n        let args = bytesrepr::deserialize_from_slice(input).expect(\"should deserialize\");\n        let phase = Phase::Session;\n\n        let wasm_v1_result = {\n            let forked_tc = tracking_copy.fork2();\n            self.execution_engine_v1.execute_with_tracking_copy(\n                forked_tc,\n                block_info,\n                transaction_hash,\n                Gas::from(gas_limit),\n                initiator_addr,\n                executable_item,\n                entry_point,\n                args,\n                authorization_keys,\n                phase,\n            )\n        };\n\n        let effects = wasm_v1_result.effects();\n        let messages = wasm_v1_result.messages();\n\n        match wasm_v1_result.cache() {\n            Some(cache) => {\n                tracking_copy.apply_changes(effects.clone(), cache.clone(), messages.clone());\n            }\n            None => {\n                debug_assert!(\n                    effects.is_empty(),\n                    \"effects should be empty if there is no cache\"\n                );\n            }\n        }\n\n        let gas_consumed = wasm_v1_result\n            .consumed()\n            .value()\n            .try_into()\n            .expect(\"Should convert consumed gas to u64\");\n\n        let mut output = wasm_v1_result\n            .ret()\n            .map(|ret| bytesrepr::serialize(ret).unwrap())\n            .map(Bytes::from);\n\n        let host_error = match wasm_v1_result.error() {\n            Some(EngineError::Exec(ExecError::GasLimit)) => Some(CallError::CalleeGasDepleted),\n            Some(EngineError::Exec(ExecError::Revert(revert_code))) => {\n                assert!(output.is_none(), \"output should be None\"); // ExecutionEngineV1 sets output to None when error occurred.\n                let revert_code: u32 = (*revert_code).into();\n                output = Some(revert_code.to_le_bytes().to_vec().into()); // Pass serialized revert code as output.\n                Some(CallError::CalleeReverted)\n            }\n            Some(_) => Some(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)),\n            None => None,\n        };\n\n        // TODO: Support multisig\n\n        // TODO: Convert this to a host error as if it was executed.\n\n        // SAFETY: Gas limit is first promoted from u64 to u512, and we know\n        // consumed gas under v1 would not exceed the imposed limit therefore an\n        // unwrap here is safe.\n\n        let remaining_points = gas_limit.checked_sub(gas_consumed).unwrap();\n\n        let fork2 = tracking_copy.fork2();\n        Ok(ExecuteResult {\n            host_error,\n            output,\n            gas_usage: GasUsage::new(gas_limit, remaining_points),\n            effects: fork2.effects(),\n            cache: fork2.cache(),\n            messages: fork2.messages(),\n        })\n    }\n\n    pub fn execute_with_provider<R>(\n        &self,\n        state_root_hash: Digest,\n        state_provider: &R,\n        execute_request: ExecuteRequest,\n    ) -> Result<ExecuteWithProviderResult, ExecuteWithProviderError>\n    where\n        R: StateProvider + CommitProvider,\n        <R as StateProvider>::Reader: 'static,\n    {\n        let tracking_copy = match state_provider.checkout(state_root_hash) {\n            Ok(Some(tracking_copy)) => tracking_copy,\n            Ok(None) => {\n                return Err(ExecuteWithProviderError::GlobalState(\n                    GlobalStateError::RootNotFound,\n                ))\n            }\n            Err(global_state_error) => return Err(global_state_error.into()),\n        };\n\n        let tracking_copy = TrackingCopy::new(tracking_copy, 1, state_provider.enable_entity());\n\n        match self.execute_with_tracking_copy(tracking_copy, execute_request) {\n            Ok(ExecuteResult {\n                host_error,\n                output,\n                gas_usage,\n                effects,\n                cache: _,\n                messages,\n            }) => match state_provider.commit_effects(state_root_hash, effects.clone()) {\n                Ok(post_state_hash) => Ok(ExecuteWithProviderResult::new(\n                    host_error,\n                    output,\n                    gas_usage,\n                    effects,\n                    post_state_hash,\n                    messages,\n                )),\n                Err(error) => Err(error.into()),\n            },\n            Err(error) => Err(ExecuteWithProviderError::Execute(error)),\n        }\n    }\n}\n\nimpl ExecutorV2 {\n    /// Create a new `ExecutorV2` instance.\n    pub fn new(config: ExecutorConfig, execution_engine_v1: Arc<ExecutionEngineV1>) -> Self {\n        let wasm_engine = match config.executor_kind {\n            ExecutorKind::Compiled => WasmerEngine::new(),\n        };\n        ExecutorV2 {\n            config,\n            compiled_wasm_engine: Arc::new(wasm_engine),\n            execution_stack: Default::default(),\n            execution_engine_v1,\n        }\n    }\n\n    /// Push the execution stack.\n    pub(crate) fn push_execution_stack(&self, execution_kind: ExecutionKind) {\n        let mut execution_stack = self.execution_stack.write();\n        execution_stack.push_back(execution_kind);\n    }\n\n    /// Pop the execution stack.\n    pub(crate) fn pop_execution_stack(&self) -> Option<ExecutionKind> {\n        let mut execution_stack = self.execution_stack.write();\n        execution_stack.pop_back()\n    }\n}\n\nimpl Executor for ExecutorV2 {\n    /// Execute a Wasm contract.\n    ///\n    /// # Errors\n    /// Returns an error if the execution fails. This can happen if the Wasm instance cannot be\n    /// prepared. Otherwise, returns the result of the execution with a gas usage attached which\n    /// means a successful execution (that may or may not have produced an error such as a trap,\n    /// return, or out of gas).\n    fn execute<R: GlobalStateReader + 'static>(\n        &self,\n        tracking_copy: TrackingCopy<R>,\n        execute_request: ExecuteRequest,\n    ) -> Result<ExecuteResult, ExecuteError> {\n        self.execute_with_tracking_copy(tracking_copy, execute_request)\n    }\n}\n\nfn get_purse_for_entity<R: GlobalStateReader>(\n    tracking_copy: &mut TrackingCopy<R>,\n    entity_key: Key,\n) -> casper_types::URef {\n    let stored_value = tracking_copy\n        .read(&entity_key)\n        .expect(\"should read account\")\n        .expect(\"should have account\");\n    match stored_value {\n        StoredValue::CLValue(addressable_entity_key) => {\n            let key = addressable_entity_key\n                .into_t::<Key>()\n                .expect(\"should be key\");\n            let stored_value = tracking_copy\n                .read(&key)\n                .expect(\"should read account\")\n                .expect(\"should have account\");\n\n            let addressable_entity = stored_value\n                .into_addressable_entity()\n                .expect(\"should be addressable entity\");\n\n            addressable_entity.main_purse()\n        }\n        StoredValue::Account(account) => account.main_purse(),\n        StoredValue::SmartContract(smart_contract_package) => {\n            let contract_hash = smart_contract_package\n                .versions()\n                .latest()\n                .expect(\"should have last entry\");\n            let entity_addr = EntityAddr::SmartContract(contract_hash.value());\n            let latest_version_key = Key::AddressableEntity(entity_addr);\n            let new_contract = tracking_copy\n                .read(&latest_version_key)\n                .expect(\"should read latest version\");\n            let addressable_entity = new_contract\n                .expect(\"should have addressable entity\")\n                .into_addressable_entity()\n                .expect(\"should be addressable entity\");\n            addressable_entity.main_purse()\n        }\n        other => panic!(\"should be account or contract received {other:?}\"),\n    }\n}\n"
  },
  {
    "path": "executor/wasm/src/system.rs",
    "content": "//! System contract wire up for the new engine.\n//!\n//! This module wraps system contract logic into a dispatcher that can be used by the new engine\n//! hiding the complexity of the underlying implementation.\nuse std::{cell::RefCell, rc::Rc, sync::Arc};\n\nuse casper_executor_wasm_common::error::{CallError, TrapCode};\nuse casper_executor_wasm_interface::HostResult;\nuse casper_storage::{\n    global_state::GlobalStateReader,\n    system::{\n        mint::Mint,\n        runtime_native::{Config, Id, RuntimeNative},\n    },\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError},\n    AddressGenerator, TrackingCopy,\n};\nuse casper_types::{\n    account::AccountHash, CLValueError, ContextAccessRights, EntityAddr, Key, Phase,\n    ProtocolVersion, PublicKey, SystemHashRegistry, TransactionHash, URef, U512,\n};\nuse parking_lot::RwLock;\nuse thiserror::Error;\nuse tracing::{debug, error};\n\n#[derive(Debug, Error)]\nenum DispatchError {\n    #[error(\"Tracking copy error: {0}\")]\n    Storage(TrackingCopyError),\n    #[error(\"CLValue error: {0}\")]\n    CLValue(CLValueError),\n    #[error(\"Registry not found\")]\n    RegistryNotFound,\n    #[error(\"Missing system contract: {0}\")]\n    MissingSystemContract(&'static str),\n    #[error(\"Runtime footprint\")]\n    RuntimeFootprint(TrackingCopyError),\n}\n\nfn dispatch_system_contract<R: GlobalStateReader, Ret: PartialEq>(\n    tracking_copy: &mut TrackingCopy<R>,\n    transaction_hash: TransactionHash,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    system_contract: &'static str,\n    func: impl FnOnce(RuntimeNative<R>) -> Ret,\n) -> Result<Ret, DispatchError> {\n    let system_entity_registry = {\n        let stored_value = tracking_copy\n            .read(&Key::SystemEntityRegistry)\n            .map_err(DispatchError::Storage)?\n            .ok_or(DispatchError::RegistryNotFound)?;\n        stored_value\n            .into_cl_value()\n            .expect(\"should convert stored value into CLValue\")\n            .into_t::<SystemHashRegistry>()\n            .map_err(DispatchError::CLValue)?\n    };\n    let system_entity_addr = system_entity_registry\n        .get(system_contract)\n        .ok_or(DispatchError::MissingSystemContract(system_contract))?;\n    let entity_addr = EntityAddr::new_system(*system_entity_addr);\n\n    // let addressable_entity_stored_value =\n\n    let runtime_footprint = tracking_copy\n        .runtime_footprint_by_entity_addr(entity_addr)\n        .map_err(DispatchError::RuntimeFootprint)?;\n\n    let config = Config::default();\n    let protocol_version = ProtocolVersion::V1_0_0;\n\n    let access_rights = ContextAccessRights::new(*system_entity_addr, []);\n    let address = PublicKey::System.to_account_hash();\n\n    let forked_tracking_copy = Rc::new(RefCell::new(tracking_copy.fork2()));\n\n    let remaining_spending_limit = U512::MAX; // NOTE: Since there's no custom payment, there's no need to track the remaining spending limit.\n    let phase = Phase::System; // NOTE: Since this is a system contract, the phase is always `System`.\n\n    let ret = {\n        let runtime = RuntimeNative::new(\n            config,\n            protocol_version,\n            Id::Transaction(transaction_hash),\n            address_generator,\n            Rc::clone(&forked_tracking_copy),\n            address,\n            Key::AddressableEntity(entity_addr),\n            runtime_footprint,\n            access_rights,\n            remaining_spending_limit,\n            phase,\n        );\n\n        func(runtime)\n    };\n\n    // SAFETY: `RuntimeNative` is dropped in the block above, we can extract the tracking copy the\n    // effects.\n    let modified_tracking_copy = Rc::try_unwrap(forked_tracking_copy)\n        .ok()\n        .expect(\"No other references\");\n\n    let modified_tracking_copy = modified_tracking_copy.into_inner();\n\n    tracking_copy.apply_changes(\n        modified_tracking_copy.effects(),\n        modified_tracking_copy.cache(),\n        modified_tracking_copy.messages(),\n    );\n\n    Ok(ret)\n}\n\n#[derive(Debug, Clone, Copy)]\npub(crate) struct MintArgs {\n    pub(crate) initial_balance: U512,\n}\n\npub(crate) fn mint_mint<R: GlobalStateReader>(\n    tracking_copy: &mut TrackingCopy<R>,\n    transaction_hash: TransactionHash,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    args: MintArgs,\n) -> Result<URef, CallError> {\n    let mint_result = match dispatch_system_contract(\n        tracking_copy,\n        transaction_hash,\n        address_generator,\n        \"mint\",\n        |mut runtime| runtime.mint(args.initial_balance),\n    ) {\n        Ok(mint_result) => mint_result,\n        Err(error) => {\n            error!(%error, ?args, \"mint failed\");\n            panic!(\"Mint failed with error {error:?}; aborting\");\n        }\n    };\n\n    match mint_result {\n        Ok(uref) => Ok(uref),\n        Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted),\n        Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted),\n        Err(mint_error) => {\n            error!(%mint_error, ?args, \"mint transfer failed\");\n            Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached))\n        }\n    }\n}\n\n#[derive(Debug, Copy, Clone)]\npub(crate) struct MintTransferArgs {\n    pub(crate) maybe_to: Option<AccountHash>,\n    pub(crate) source: URef,\n    pub(crate) target: URef,\n    pub(crate) amount: U512,\n    pub(crate) id: Option<u64>,\n}\n\npub(crate) fn mint_transfer<R: GlobalStateReader>(\n    tracking_copy: &mut TrackingCopy<R>,\n    id: TransactionHash,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    args: MintTransferArgs,\n) -> HostResult {\n    let transfer_result: Result<(), casper_types::system::mint::Error> =\n        match dispatch_system_contract(\n            tracking_copy,\n            id,\n            address_generator,\n            \"mint\",\n            |mut runtime| {\n                runtime.transfer(\n                    args.maybe_to,\n                    args.source,\n                    args.target,\n                    args.amount,\n                    args.id,\n                )\n            },\n        ) {\n            Ok(result) => result,\n            Err(error) => {\n                error!(%error, \"mint transfer failed\");\n                return Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached));\n            }\n        };\n\n    debug!(?args, ?transfer_result, \"transfer\");\n\n    match transfer_result {\n        Ok(()) => Ok(()),\n        Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted),\n        Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted),\n        Err(mint_error) => {\n            error!(%mint_error, ?args, \"mint transfer failed\");\n            Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached))\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::sync::Arc;\n\n    use casper_storage::{\n        data_access_layer::{GenesisRequest, GenesisResult},\n        global_state::{\n            self,\n            state::{CommitProvider, StateProvider},\n        },\n        system::{\n            mint::{storage_provider::StorageProvider, Mint},\n            runtime_native::Id,\n        },\n        AddressGenerator,\n    };\n    use casper_types::{\n        ChainspecRegistry, Digest, GenesisConfig, Phase, ProtocolVersion, TransactionHash,\n        TransactionV1Hash, U512,\n    };\n    use parking_lot::RwLock;\n\n    use crate::system::dispatch_system_contract;\n\n    #[test]\n    fn test_system_dispatcher() {\n        let (global_state, mut root_hash, _tempdir) =\n            global_state::state::lmdb::make_temporary_global_state([]);\n\n        let genesis_config = GenesisConfig::default();\n\n        let genesis_request: GenesisRequest = GenesisRequest::new(\n            Digest::hash(\"foo\"),\n            ProtocolVersion::V2_0_0,\n            genesis_config,\n            ChainspecRegistry::new_with_genesis(b\"\", b\"\"),\n        );\n\n        match global_state.genesis(genesis_request) {\n            GenesisResult::Failure(failure) => panic!(\"Failed to run genesis: {:?}\", failure),\n            GenesisResult::Fatal(fatal) => panic!(\"Fatal error while running genesis: {}\", fatal),\n            GenesisResult::Success {\n                post_state_hash,\n                effects: _,\n            } => {\n                root_hash = post_state_hash;\n            }\n        }\n\n        let mut tracking_copy = global_state\n            .tracking_copy(root_hash)\n            .expect(\"Obtaining root hash succeed\")\n            .expect(\"Root hash exists\");\n\n        let transaction_hash_bytes: [u8; 32] = [1; 32];\n        let transaction_hash: TransactionHash =\n            TransactionHash::V1(TransactionV1Hash::from_raw(transaction_hash_bytes));\n        let id = Id::Transaction(transaction_hash);\n        let address_generator = Arc::new(RwLock::new(AddressGenerator::new(\n            &id.seed(),\n            Phase::Session,\n        )));\n\n        let ret = dispatch_system_contract(\n            &mut tracking_copy,\n            transaction_hash,\n            Arc::clone(&address_generator),\n            \"mint\",\n            |mut runtime| runtime.mint(U512::from(1000u64)),\n        );\n\n        let uref = ret.expect(\"dispatch mint\").expect(\"uref\");\n\n        let ret: Result<Result<U512, _>, _> = dispatch_system_contract(\n            &mut tracking_copy,\n            transaction_hash,\n            Arc::clone(&address_generator),\n            \"mint\",\n            |mut runtime| runtime.total_balance(uref),\n        );\n\n        // let ret = ret.expect(\"dispatch total balance\");\n\n        assert_eq!(ret.unwrap(), Ok(U512::from(1000u64)));\n\n        let post_root_hash = global_state\n            .commit(root_hash, tracking_copy.effects())\n            .expect(\"Should apply effect\");\n\n        assert_ne!(post_root_hash, root_hash);\n    }\n}\n"
  },
  {
    "path": "executor/wasm/tests/integration.rs",
    "content": "use std::{\n    env,\n    fs::{self, File},\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse bytes::Bytes;\nuse casper_execution_engine::engine_state::ExecutionEngineV1;\nuse casper_executor_wasm::{\n    install::{\n        InstallContractError, InstallContractRequest, InstallContractRequestBuilder,\n        InstallContractResult,\n    },\n    ExecutorConfigBuilder, ExecutorKind, ExecutorV2,\n};\nuse casper_executor_wasm_common::error::CallError;\nuse casper_executor_wasm_interface::executor::{\n    ExecuteError, ExecuteRequest, ExecuteRequestBuilder, ExecuteWithProviderError,\n    ExecuteWithProviderResult, ExecutionKind,\n};\nuse casper_storage::{\n    data_access_layer::{\n        prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult},\n        GenesisRequest, GenesisResult, MessageTopicsRequest, MessageTopicsResult, QueryRequest,\n        QueryResult,\n    },\n    global_state::{\n        self,\n        state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider},\n        transaction_source::lmdb::LmdbEnvironment,\n        trie_store::lmdb::LmdbTrieStore,\n    },\n    system::runtime_native::Id,\n    AddressGenerator, KeyPrefix,\n};\nuse casper_types::{\n    account::AccountHash, BlockHash, ChainspecRegistry, Digest, EntityAddr, GenesisAccount,\n    GenesisConfig, HostFunctionCostsV2, HostFunctionV2, Key, MessageLimits, Motes, Phase,\n    ProtocolVersion, PublicKey, SecretKey, StorageCosts, StoredValue, SystemConfig, Timestamp,\n    TransactionHash, TransactionV1Hash, WasmConfig, WasmV2Config, U512,\n};\nuse fs_extra::dir;\nuse itertools::Itertools;\nuse once_cell::sync::Lazy;\nuse parking_lot::RwLock;\nuse tempfile::TempDir;\n\nstatic DEFAULT_ACCOUNT_SECRET_KEY: Lazy<SecretKey> =\n    Lazy::new(|| SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap());\nstatic DEFAULT_ACCOUNT_PUBLIC_KEY: Lazy<casper_types::PublicKey> =\n    Lazy::new(|| PublicKey::from(&*DEFAULT_ACCOUNT_SECRET_KEY));\nstatic DEFAULT_ACCOUNT_HASH: Lazy<AccountHash> =\n    Lazy::new(|| DEFAULT_ACCOUNT_PUBLIC_KEY.to_account_hash());\n\nconst CSPR: u64 = 10u64.pow(9);\n\nstatic RUST_WORKSPACE_PATH: Lazy<PathBuf> = Lazy::new(|| {\n    let path = Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n        .parent()\n        .and_then(Path::parent)\n        .expect(\"CARGO_MANIFEST_DIR should have parent\");\n    assert!(\n        path.exists(),\n        \"Workspace path {} does not exists\",\n        path.display()\n    );\n    path.to_path_buf()\n});\n\nstatic RUST_WORKSPACE_WASM_PATH: Lazy<PathBuf> = Lazy::new(|| {\n    let path = RUST_WORKSPACE_PATH\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n    assert!(\n        path.exists() || RUST_TOOL_WASM_PATH.exists(),\n        \"Rust Wasm path {} does not exists\",\n        path.display()\n    );\n    path\n});\n// The location of compiled Wasm files if running from within the 'tests' crate generated by the\n// cargo_casper tool, i.e. 'wasm/'.\nstatic RUST_TOOL_WASM_PATH: Lazy<PathBuf> = Lazy::new(|| {\n    env::current_dir()\n        .expect(\"should get current working dir\")\n        .join(\"wasm\")\n});\n\n#[track_caller]\nfn read_wasm<P: AsRef<Path>>(filename: P) -> Bytes {\n    let paths = vec![\n        RUST_WORKSPACE_WASM_PATH.clone(),\n        RUST_TOOL_WASM_PATH.clone(),\n    ];\n\n    for path in &paths {\n        let wasm_path = path.join(&filename);\n        match fs::read(wasm_path) {\n            Ok(bytes) => return Bytes::from(bytes),\n            Err(err) => {\n                if err.kind() == std::io::ErrorKind::NotFound {\n                    continue;\n                } else {\n                    panic!(\n                        \"Failed to read Wasm file at {}: {}\",\n                        filename.as_ref().display(),\n                        err\n                    );\n                }\n            }\n        }\n    }\n\n    panic!(\n        \"Failed to find Wasm file at {} in any of the paths: {:?}\",\n        filename.as_ref().display(),\n        paths\n    );\n}\n\nconst TRANSACTION_HASH_BYTES: [u8; 32] = [55; 32];\nconst TRANSACTION_HASH: TransactionHash =\n    TransactionHash::V1(TransactionV1Hash::from_raw(TRANSACTION_HASH_BYTES));\nconst DEFAULT_GAS_LIMIT: u64 = 1_000_000 * CSPR;\nconst DEFAULT_CHAIN_NAME: &str = \"casper-test\";\n\n// TODO: This is a temporary value, it should be set in the config. Default value from V1 engine\n// does not apply to V2 engine due to different cost structure. Rather than hardcoding it here, we\n// should probably reflect gas costs in a dynamic costs in host function charge. Proper value is\n// pending calculation.\nconst DEFAULT_GAS_PER_BYTE_COST: u32 = 1_117_587;\n\nfn make_address_generator() -> Arc<RwLock<AddressGenerator>> {\n    let id = Id::Transaction(TRANSACTION_HASH);\n    Arc::new(RwLock::new(AddressGenerator::new(\n        &id.seed(),\n        Phase::Session,\n    )))\n}\n\nfn base_execute_builder() -> ExecuteRequestBuilder {\n    ExecuteRequestBuilder::default()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_caller_key(Key::Account(*DEFAULT_ACCOUNT_HASH))\n        .with_gas_limit(DEFAULT_GAS_LIMIT)\n        .with_transferred_value(1000)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_chain_name(DEFAULT_CHAIN_NAME)\n        .with_block_time(Timestamp::now().into())\n        .with_state_hash(Digest::hash(b\"state\"))\n        .with_block_height(1)\n        .with_parent_block_hash(BlockHash::new(Digest::hash(b\"block1\")))\n}\n\nfn base_install_request_builder() -> InstallContractRequestBuilder {\n    InstallContractRequestBuilder::default()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_gas_limit(DEFAULT_GAS_LIMIT)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_chain_name(DEFAULT_CHAIN_NAME)\n        .with_block_time(Timestamp::now().into())\n        .with_state_hash(Digest::hash(b\"state\"))\n        .with_block_height(1)\n        .with_parent_block_hash(BlockHash::new(Digest::hash(b\"block1\")))\n}\n\n#[test]\nfn harness() {\n    let mut executor = make_executor();\n\n    let (mut global_state, mut state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let address_generator = make_address_generator();\n\n    let flipper_address;\n\n    state_root_hash = {\n        let input_data = borsh::to_vec(&(\"Foo Token\".to_string(),))\n            .map(Bytes::from)\n            .unwrap();\n\n        let install_request = base_install_request_builder()\n            .with_wasm_bytes(read_wasm(\"vm2_cep18.wasm\"))\n            .with_shared_address_generator(Arc::clone(&address_generator))\n            .with_transferred_value(0)\n            .with_entry_point(\"new\".to_string())\n            .with_input(input_data)\n            .build()\n            .expect(\"should build\");\n\n        let create_result = run_create_contract(\n            &mut executor,\n            &mut global_state,\n            state_root_hash,\n            install_request,\n        );\n\n        flipper_address = *create_result.smart_contract_addr();\n\n        global_state\n            .commit_effects(state_root_hash, create_result.effects().clone())\n            .expect(\"Should commit\")\n    };\n\n    let execute_request = ExecuteRequestBuilder::default()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_caller_key(Key::Account(*DEFAULT_ACCOUNT_HASH))\n        .with_gas_limit(DEFAULT_GAS_LIMIT)\n        .with_transferred_value(1000)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_target(ExecutionKind::SessionBytes(read_wasm(\"vm2-harness.wasm\")))\n        .with_serialized_input((flipper_address,))\n        .with_shared_address_generator(address_generator)\n        .with_chain_name(DEFAULT_CHAIN_NAME)\n        .with_block_time(Timestamp::now().into())\n        .with_state_hash(state_root_hash)\n        .with_block_height(1)\n        .with_parent_block_hash(BlockHash::new(Digest::hash(b\"bl0ck\")))\n        .build()\n        .expect(\"should build\");\n\n    run_wasm_session(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        execute_request,\n    );\n}\n\npub(crate) fn make_executor() -> ExecutorV2 {\n    let storage_costs = StorageCosts::new(DEFAULT_GAS_PER_BYTE_COST);\n    let execution_engine_v1 = ExecutionEngineV1::default();\n    let executor_config = ExecutorConfigBuilder::default()\n        .with_memory_limit(17)\n        .with_executor_kind(ExecutorKind::Compiled)\n        .with_wasm_config(WasmV2Config::default())\n        .with_storage_costs(storage_costs)\n        .with_message_limits(MessageLimits::default())\n        .build()\n        .expect(\"Should build\");\n    ExecutorV2::new(executor_config, Arc::new(execution_engine_v1))\n}\n\n#[test]\n\nfn cep18() {\n    let mut executor = make_executor();\n\n    let (mut global_state, mut state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let address_generator = make_address_generator();\n\n    let input_data = borsh::to_vec(&(\"Foo Token\".to_string(),))\n        .map(Bytes::from)\n        .unwrap();\n\n    let block_time_1 = Timestamp::now().into();\n\n    let create_request = base_install_request_builder()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_wasm_bytes(read_wasm(\"vm2_cep18.wasm\").clone())\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .with_transferred_value(0)\n        .with_entry_point(\"new\".to_string())\n        .with_input(input_data)\n        .with_block_time(block_time_1)\n        .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash\n        .with_block_height(1) // TODO: Carry on block height\n        .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash\n        .build()\n        .expect(\"should build\");\n\n    let create_result = run_create_contract(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        create_request,\n    );\n\n    dbg!(create_result.gas_usage().gas_spent());\n\n    let contract_hash = EntityAddr::SmartContract(*create_result.smart_contract_addr());\n\n    state_root_hash = global_state\n        .commit_effects(state_root_hash, create_result.effects().clone())\n        .expect(\"Should commit\");\n\n    let msgs = global_state.prefixed_values(PrefixedValuesRequest::new(\n        state_root_hash,\n        KeyPrefix::MessageEntriesByEntity(contract_hash),\n    ));\n    let PrefixedValuesResult::Success {\n        key_prefix: _,\n        values,\n    } = msgs\n    else {\n        panic!(\"Expected success\")\n    };\n\n    {\n        let mut topics_1 = values\n            .iter()\n            .filter_map(|stored_value| stored_value.as_message_topic_summary())\n            .collect_vec();\n        topics_1\n            .sort_by_key(|topic| (topic.topic_name(), topic.blocktime(), topic.message_count()));\n\n        assert_eq!(topics_1[0].topic_name(), \"Transfer\");\n        assert_eq!(topics_1[0].message_count(), 1);\n        assert_eq!(topics_1[0].blocktime(), block_time_1);\n    }\n\n    let block_time_2 = (block_time_1.value() + 1).into();\n    assert_ne!(block_time_1, block_time_2);\n\n    let execute_request = ExecuteRequestBuilder::default()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_caller_key(Key::Account(*DEFAULT_ACCOUNT_HASH))\n        .with_gas_limit(DEFAULT_GAS_LIMIT)\n        .with_transferred_value(1000)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_target(ExecutionKind::SessionBytes(read_wasm(\n            \"vm2_cep18_caller.wasm\",\n        )))\n        .with_serialized_input((create_result.smart_contract_addr(),))\n        .with_transferred_value(0)\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .with_chain_name(DEFAULT_CHAIN_NAME)\n        .with_block_time(block_time_2)\n        .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash\n        .with_block_height(2) // TODO: Carry on block height\n        .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash\n        .build()\n        .expect(\"should build\");\n\n    let result_2 = run_wasm_session(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        execute_request,\n    );\n    dbg!(result_2.gas_usage().gas_spent());\n\n    state_root_hash = global_state\n        .commit_effects(state_root_hash, result_2.effects().clone())\n        .expect(\"Should commit\");\n\n    let MessageTopicsResult::Success { message_topics } =\n        global_state.message_topics(MessageTopicsRequest::new(state_root_hash, contract_hash))\n    else {\n        panic!(\"Expected success\")\n    };\n\n    assert!(matches!(message_topics.get(\"Transfer\"), Some(_)));\n    assert_ne!(\n        message_topics.get(\"Mint\"),\n        message_topics.get(\"Transfer\"),\n        \"Mint and Transfer topics should have different hashes\"\n    );\n\n    {\n        let msgs = global_state.prefixed_values(PrefixedValuesRequest::new(\n            state_root_hash,\n            KeyPrefix::MessageEntriesByEntity(contract_hash),\n        ));\n        let PrefixedValuesResult::Success {\n            key_prefix: _,\n            values,\n        } = msgs\n        else {\n            panic!(\"Expected success\")\n        };\n\n        let mut topics_2 = values\n            .iter()\n            .filter_map(|stored_value| stored_value.as_message_topic_summary())\n            .collect_vec();\n        topics_2\n            .sort_by_key(|topic| (topic.topic_name(), topic.blocktime(), topic.message_count()));\n\n        assert_eq!(topics_2.len(), 1);\n        assert_eq!(topics_2[0].topic_name(), \"Transfer\");\n        assert_eq!(topics_2[0].message_count(), 2);\n        assert_eq!(topics_2[0].blocktime(), block_time_2); // NOTE: Session called mint; the topic\n                                                           // summary blocktime is refreshed\n    }\n\n    let mut messages = result_2.messages().iter().collect_vec();\n    messages.sort_by_key(|message| {\n        (\n            message.topic_name(),\n            message.topic_index(),\n            message.block_index(),\n        )\n    });\n    assert_eq!(messages.len(), 2);\n    assert_eq!(messages[0].topic_name(), \"Transfer\");\n    assert_eq!(messages[0].topic_index(), 0);\n    assert_eq!(messages[0].block_index(), 0);\n\n    assert_eq!(messages[1].topic_name(), \"Transfer\");\n    assert_eq!(messages[1].topic_index(), 1);\n    assert_eq!(messages[1].block_index(), 1);\n}\n\nfn make_global_state_with_genesis() -> (LmdbGlobalState, Digest, TempDir) {\n    let default_accounts = vec![GenesisAccount::Account {\n        public_key: DEFAULT_ACCOUNT_PUBLIC_KEY.clone(),\n        balance: Motes::new(U512::from(100 * CSPR)),\n        validator: None,\n    }];\n\n    let (global_state, _state_root_hash, _tempdir) =\n        global_state::state::lmdb::make_temporary_global_state([]);\n\n    let genesis_config = GenesisConfig::new(\n        default_accounts,\n        WasmConfig::default(),\n        SystemConfig::default(),\n        10,\n        10,\n        0,\n        Default::default(),\n        14,\n        Timestamp::now().millis(),\n        casper_types::HoldBalanceHandling::Accrued,\n        0,\n        true,\n        StorageCosts::default(),\n    );\n    let genesis_request: GenesisRequest = GenesisRequest::new(\n        Digest::hash(\"foo\"),\n        ProtocolVersion::V2_0_0,\n        genesis_config,\n        ChainspecRegistry::new_with_genesis(b\"\", b\"\"),\n    );\n    match global_state.genesis(genesis_request) {\n        GenesisResult::Failure(failure) => panic!(\"Failed to run genesis: {:?}\", failure),\n        GenesisResult::Fatal(fatal) => panic!(\"Fatal error while running genesis: {}\", fatal),\n        GenesisResult::Success {\n            post_state_hash,\n            effects: _,\n        } => (global_state, post_state_hash, _tempdir),\n    }\n}\n\n#[test]\nfn traits() {\n    let mut executor = make_executor();\n    let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let execute_request = base_execute_builder()\n        .with_target(ExecutionKind::SessionBytes(read_wasm(\"vm2_trait.wasm\")))\n        .with_serialized_input(())\n        .with_shared_address_generator(make_address_generator())\n        .build()\n        .expect(\"should build\");\n\n    run_wasm_session(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        execute_request,\n    );\n}\n\n#[test]\nfn upgradable() {\n    let mut executor = make_executor();\n\n    let (mut global_state, mut state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let address_generator = make_address_generator();\n\n    let upgradable_address;\n\n    state_root_hash = {\n        let input_data = borsh::to_vec(&(0u8,)).map(Bytes::from).unwrap();\n\n        let create_request = base_install_request_builder()\n            .with_wasm_bytes(read_wasm(\"vm2_upgradable.wasm\"))\n            .with_shared_address_generator(Arc::clone(&address_generator))\n            .with_gas_limit(DEFAULT_GAS_LIMIT)\n            .with_transferred_value(0)\n            .with_entry_point(\"new\".to_string())\n            .with_input(input_data)\n            .build()\n            .expect(\"should build\");\n\n        let create_result = run_create_contract(\n            &mut executor,\n            &mut global_state,\n            state_root_hash,\n            create_request,\n        );\n\n        upgradable_address = *create_result.smart_contract_addr();\n\n        global_state\n            .commit_effects(state_root_hash, create_result.effects().clone())\n            .expect(\"Should commit\")\n    };\n\n    let version_before_upgrade = {\n        let execute_request = base_execute_builder()\n            .with_target(ExecutionKind::Stored {\n                address: upgradable_address,\n                entry_point: \"version\".to_string(),\n            })\n            .with_input(Bytes::new())\n            .with_gas_limit(DEFAULT_GAS_LIMIT)\n            .with_transferred_value(0)\n            .with_shared_address_generator(Arc::clone(&address_generator))\n            .build()\n            .expect(\"should build\");\n        let res = run_wasm_session(\n            &mut executor,\n            &mut global_state,\n            state_root_hash,\n            execute_request,\n        );\n        let output = res.output().expect(\"should have output\");\n        let version: String = borsh::from_slice(output).expect(\"should deserialize\");\n        version\n    };\n    assert_eq!(version_before_upgrade, \"v1\");\n\n    {\n        // Increment the value\n        let execute_request = base_execute_builder()\n            .with_target(ExecutionKind::Stored {\n                address: upgradable_address,\n                entry_point: \"increment\".to_string(),\n            })\n            .with_input(Bytes::new())\n            .with_gas_limit(DEFAULT_GAS_LIMIT)\n            .with_transferred_value(0)\n            .with_shared_address_generator(Arc::clone(&address_generator))\n            .build()\n            .expect(\"should build\");\n        let res = run_wasm_session(\n            &mut executor,\n            &mut global_state,\n            state_root_hash,\n            execute_request,\n        );\n        state_root_hash = global_state\n            .commit_effects(state_root_hash, res.effects().clone())\n            .expect(\"Should commit\");\n    };\n\n    let binding = read_wasm(\"vm2_upgradable_v2.wasm\");\n    let new_code = binding.as_ref();\n\n    let execute_request = base_execute_builder()\n        .with_transferred_value(0)\n        .with_target(ExecutionKind::Stored {\n            address: upgradable_address,\n            entry_point: \"perform_upgrade\".to_string(),\n        })\n        .with_gas_limit(DEFAULT_GAS_LIMIT * 10)\n        .with_serialized_input((new_code,))\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .build()\n        .expect(\"should build\");\n    let res = run_wasm_session(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        execute_request,\n    );\n    state_root_hash = global_state\n        .commit_effects(state_root_hash, res.effects().clone())\n        .expect(\"Should commit\");\n\n    let version_after_upgrade = {\n        let execute_request = base_execute_builder()\n            .with_target(ExecutionKind::Stored {\n                address: upgradable_address,\n                entry_point: \"version\".to_string(),\n            })\n            .with_input(Bytes::new())\n            .with_gas_limit(DEFAULT_GAS_LIMIT)\n            .with_transferred_value(0)\n            .with_shared_address_generator(Arc::clone(&address_generator))\n            .build()\n            .expect(\"should build\");\n        let res = run_wasm_session(\n            &mut executor,\n            &mut global_state,\n            state_root_hash,\n            execute_request,\n        );\n        let output = res.output().expect(\"should have output\");\n        let version: String = borsh::from_slice(output).expect(\"should deserialize\");\n        version\n    };\n    assert_eq!(version_after_upgrade, \"v2\");\n\n    {\n        // Increment the value\n        let execute_request = base_execute_builder()\n            .with_target(ExecutionKind::Stored {\n                address: upgradable_address,\n                entry_point: \"increment_by\".to_string(),\n            })\n            .with_serialized_input((10u64,))\n            .with_gas_limit(DEFAULT_GAS_LIMIT)\n            .with_transferred_value(0)\n            .with_shared_address_generator(Arc::clone(&address_generator))\n            .build()\n            .expect(\"should build\");\n        let res = run_wasm_session(\n            &mut executor,\n            &mut global_state,\n            state_root_hash,\n            execute_request,\n        );\n        state_root_hash = global_state\n            .commit_effects(state_root_hash, res.effects().clone())\n            .expect(\"Should commit\");\n    };\n\n    let _ = state_root_hash;\n}\n\nfn run_create_contract(\n    executor: &mut ExecutorV2,\n    global_state: &LmdbGlobalState,\n    pre_state_hash: Digest,\n    install_contract_request: InstallContractRequest,\n) -> InstallContractResult {\n    executor\n        .install_contract(pre_state_hash, global_state, install_contract_request)\n        .expect(\"Succeed\")\n}\n\nfn run_wasm_session(\n    executor: &mut ExecutorV2,\n    global_state: &LmdbGlobalState,\n    pre_state_hash: Digest,\n    execute_request: ExecuteRequest,\n) -> ExecuteWithProviderResult {\n    let result = executor\n        .execute_with_provider(pre_state_hash, global_state, execute_request)\n        .expect(\"Succeed\");\n\n    if let Some(host_error) = result.host_error {\n        panic!(\"Host error: {host_error:?}\")\n    }\n\n    result\n}\n\n#[test]\nfn backwards_compatibility() {\n    let (mut global_state, post_state_hash, _temp) = {\n        let fixture_name = \"counter_contract\";\n        // /Users/michal/Dev/casper-node/execution_engine_testing/tests/fixtures/counter_contract/\n        // global_state/data.lmdb\n        let lmdb_fixtures_base_dir = Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n            .join(\"../\")\n            .join(\"../\")\n            .join(\"execution_engine_testing\")\n            .join(\"tests\")\n            .join(\"fixtures\");\n        assert!(lmdb_fixtures_base_dir.exists());\n\n        let source = lmdb_fixtures_base_dir.join(\"counter_contract\");\n        let to = tempfile::tempdir().expect(\"should create temp dir\");\n        fs_extra::copy_items(&[source], &to, &dir::CopyOptions::default())\n            .expect(\"should copy global state fixture\");\n\n        let path_to_state = to.path().join(fixture_name).join(\"state.json\");\n\n        let lmdb_fixture_state: serde_json::Value =\n            serde_json::from_reader(File::open(path_to_state).unwrap()).unwrap();\n        let post_state_hash =\n            Digest::from_hex(lmdb_fixture_state[\"post_state_hash\"].as_str().unwrap()).unwrap();\n\n        let path_to_gs = to.path().join(fixture_name).join(\"global_state\");\n\n        const DEFAULT_LMDB_PAGES: usize = 256_000_000;\n        const DEFAULT_MAX_READERS: u32 = 512;\n\n        let environment = LmdbEnvironment::new(\n            &path_to_gs,\n            16384 * DEFAULT_LMDB_PAGES,\n            DEFAULT_MAX_READERS,\n            true,\n        )\n        .expect(\"should create LmdbEnvironment\");\n\n        let trie_store =\n            LmdbTrieStore::open(&environment, None).expect(\"should open LmdbTrieStore\");\n        (\n            LmdbGlobalState::new(\n                Arc::new(environment),\n                Arc::new(trie_store),\n                post_state_hash,\n                100,\n                false,\n            ),\n            post_state_hash,\n            to,\n        )\n    };\n\n    let result = global_state.query(QueryRequest::new(\n        post_state_hash,\n        Key::Account(*DEFAULT_ACCOUNT_HASH),\n        Vec::new(),\n    ));\n    let value = match result {\n        QueryResult::RootNotFound => todo!(),\n        QueryResult::ValueNotFound(value) => panic!(\"Value not found: {:?}\", value),\n        QueryResult::Success { value, .. } => value,\n        QueryResult::Failure(failure) => panic!(\"Failed to query: {:?}\", failure),\n    };\n\n    //\n    // Calling legacy contract directly by it's address\n    //\n\n    let mut state_root_hash = post_state_hash;\n\n    let value = match *value {\n        StoredValue::Account(account) => account,\n        _ => panic!(\"Expected CLValue\"),\n    };\n\n    let counter_hash = match value.named_keys().get(\"counter\") {\n        Some(Key::Hash(hash_address)) => hash_address,\n        _ => panic!(\"Expected counter URef\"),\n    };\n\n    let mut executor = make_executor();\n    let address_generator = make_address_generator();\n\n    // Calling v1 vm directly by hash is not currently supported (i.e. disabling vm1 runtime, and\n    // allowing vm1 direct calls may circumvent chainspec setting) let execute_request =\n    // base_execute_builder()     .with_target(ExecutionKind::Stored {\n    //         address: *counter_hash,\n    //         entry_point: \"counter_get\".to_string(),\n    //     })\n    //     .with_input(runtime_args.into())\n    //     .with_gas_limit(DEFAULT_GAS_LIMIT)\n    //     .with_transferred_value(0)\n    //     .with_shared_address_generator(Arc::clone(&address_generator))\n    //     .with_state_hash(state_root_hash)\n    //     .with_block_height(1)\n    //     .with_parent_block_hash(BlockHash::new(Digest::hash(b\"block1\")))\n    //     .build()\n    //     .expect(\"should build\");\n    // let res = run_wasm_session(\n    //     &mut executor,\n    //     &mut global_state,\n    //     state_root_hash,\n    //     execute_request,\n    // );\n    // state_root_hash = global_state\n    //     .commit_effects(state_root_hash, res.effects().clone())\n    //     .expect(\"Should commit\");\n\n    //\n    // Instantiate v2 runtime proxy contract\n    //\n    let input_data = counter_hash.to_vec();\n    let install_request: InstallContractRequest = base_install_request_builder()\n        .with_wasm_bytes(read_wasm(\"vm2_legacy_counter_proxy.wasm\"))\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .with_transferred_value(0)\n        .with_entry_point(\"new\".to_string())\n        .with_input(input_data.into())\n        .with_state_hash(state_root_hash)\n        .with_block_height(2)\n        .with_parent_block_hash(BlockHash::new(Digest::hash(b\"block2\")))\n        .build()\n        .expect(\"should build\");\n\n    let create_result = run_create_contract(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        install_request,\n    );\n\n    state_root_hash = create_result.post_state_hash();\n\n    let proxy_address = *create_result.smart_contract_addr();\n\n    // Call v2 contract\n\n    let call_request = base_execute_builder()\n        .with_target(ExecutionKind::Stored {\n            address: proxy_address,\n            entry_point: \"perform_test\".to_string(),\n        })\n        .with_input(Bytes::new())\n        .with_gas_limit(DEFAULT_GAS_LIMIT)\n        .with_transferred_value(0)\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .with_state_hash(state_root_hash)\n        .with_block_height(3)\n        .with_parent_block_hash(BlockHash::new(Digest::hash(b\"block3\")))\n        .build()\n        .expect(\"should build\");\n\n    run_wasm_session(\n        &mut executor,\n        &mut global_state,\n        state_root_hash,\n        call_request,\n    );\n}\n\n// host function tests\n\nfn call_dummy_host_fn_by_name(\n    host_function_name: &str,\n    gas_limit: u64,\n) -> Result<InstallContractResult, InstallContractError> {\n    let executor = {\n        let execution_engine_v1 = ExecutionEngineV1::default();\n        let default_wasm_config = WasmV2Config::default();\n        let wasm_config = WasmV2Config::new(\n            default_wasm_config.max_memory(),\n            default_wasm_config.opcode_costs(),\n            HostFunctionCostsV2 {\n                read: HostFunctionV2::fixed(1),\n                write: HostFunctionV2::fixed(1),\n                remove: HostFunctionV2::fixed(1),\n                copy_input: HostFunctionV2::fixed(1),\n                ret: HostFunctionV2::fixed(1),\n                create: HostFunctionV2::fixed(1),\n                transfer: HostFunctionV2::fixed(1),\n                env_balance: HostFunctionV2::fixed(1),\n                upgrade: HostFunctionV2::fixed(1),\n                call: HostFunctionV2::fixed(1),\n                print: HostFunctionV2::fixed(1),\n                emit: HostFunctionV2::fixed(1),\n                env_info: HostFunctionV2::fixed(1),\n            },\n        );\n        let executor_config = ExecutorConfigBuilder::default()\n            .with_memory_limit(17)\n            .with_executor_kind(ExecutorKind::Compiled)\n            .with_wasm_config(wasm_config)\n            .with_storage_costs(StorageCosts::default())\n            .with_message_limits(MessageLimits::default())\n            .build()\n            .expect(\"Should build\");\n        ExecutorV2::new(executor_config, Arc::new(execution_engine_v1))\n    };\n\n    let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let address_generator = make_address_generator();\n\n    let input_data = borsh::to_vec(&(host_function_name.to_owned(),))\n        .map(Bytes::from)\n        .unwrap();\n\n    let create_request = InstallContractRequestBuilder::default()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_gas_limit(gas_limit)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_wasm_bytes(read_wasm(\"vm2_host.wasm\"))\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .with_transferred_value(0)\n        .with_entry_point(\"new\".to_string())\n        .with_input(input_data)\n        .with_chain_name(DEFAULT_CHAIN_NAME)\n        .with_block_time(Timestamp::now().into())\n        .with_state_hash(Digest::from_raw([0; 32]))\n        .with_block_height(1)\n        .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32])))\n        .build()\n        .expect(\"should build\");\n\n    executor.install_contract(state_root_hash, &mut global_state, create_request)\n}\n\nfn assert_consumes_gas(host_function_name: &str) {\n    let result = call_dummy_host_fn_by_name(host_function_name, 1);\n    assert!(result.is_err_and(|e| match e {\n        InstallContractError::Constructor {\n            host_error: CallError::CalleeGasDepleted,\n        } => true,\n        _ => false,\n    }));\n}\n\n#[test]\nfn host_functions_consume_gas() {\n    assert_consumes_gas(\"get_caller\");\n    assert_consumes_gas(\"get_block_time\");\n    assert_consumes_gas(\"get_transferred_value\");\n    assert_consumes_gas(\"get_balance_of\");\n    assert_consumes_gas(\"call\");\n    assert_consumes_gas(\"input\");\n    assert_consumes_gas(\"create\");\n    assert_consumes_gas(\"print\");\n    assert_consumes_gas(\"read\");\n    assert_consumes_gas(\"ret\");\n    assert_consumes_gas(\"transfer\");\n    assert_consumes_gas(\"upgrade\");\n    assert_consumes_gas(\"write\");\n}\n\n#[allow(dead_code)]\nfn write_n_bytes_at_limit(\n    bytes_len: u64,\n    gas_limit: u64,\n) -> Result<InstallContractResult, InstallContractError> {\n    let executor = {\n        let execution_engine_v1 = ExecutionEngineV1::default();\n        let default_wasm_config = WasmV2Config::default();\n        let wasm_config = WasmV2Config::new(\n            default_wasm_config.max_memory(),\n            default_wasm_config.opcode_costs(),\n            HostFunctionCostsV2 {\n                read: HostFunctionV2::fixed(0),\n                write: HostFunctionV2::fixed(0),\n                remove: HostFunctionV2::fixed(0),\n                copy_input: HostFunctionV2::fixed(0),\n                ret: HostFunctionV2::fixed(0),\n                create: HostFunctionV2::fixed(0),\n                transfer: HostFunctionV2::fixed(0),\n                env_balance: HostFunctionV2::fixed(0),\n                upgrade: HostFunctionV2::fixed(0),\n                call: HostFunctionV2::fixed(0),\n                print: HostFunctionV2::fixed(0),\n                emit: HostFunctionV2::fixed(0),\n                env_info: HostFunctionV2::fixed(0),\n            },\n        );\n        let executor_config = ExecutorConfigBuilder::default()\n            .with_memory_limit(17)\n            .with_executor_kind(ExecutorKind::Compiled)\n            .with_wasm_config(wasm_config)\n            .with_storage_costs(StorageCosts::new(1))\n            .with_message_limits(MessageLimits::default())\n            .build()\n            .expect(\"Should build\");\n        ExecutorV2::new(executor_config, Arc::new(execution_engine_v1))\n    };\n\n    let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let address_generator = make_address_generator();\n\n    let input_data = borsh::to_vec(&(bytes_len,)).map(Bytes::from).unwrap();\n\n    let create_request = InstallContractRequestBuilder::default()\n        .with_initiator(*DEFAULT_ACCOUNT_HASH)\n        .with_gas_limit(gas_limit)\n        .with_transaction_hash(TRANSACTION_HASH)\n        .with_wasm_bytes(read_wasm(\"vm2_host.wasm\"))\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .with_transferred_value(0)\n        .with_entry_point(\"new_with_write\".to_string())\n        .with_input(input_data)\n        .with_chain_name(DEFAULT_CHAIN_NAME)\n        .with_block_time(Timestamp::now().into())\n        .with_state_hash(Digest::from_raw([0; 32]))\n        .with_block_height(1)\n        .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32])))\n        .build()\n        .expect(\"should build\");\n\n    executor.install_contract(state_root_hash, &mut global_state, create_request)\n}\n\n// #[test]\n// fn consume_gas_on_write() {\n//     let successful_write = write_n_bytes_at_limit(50, 10_000);\n//     assert!(successful_write.is_ok());\n\n//     let out_of_gas_write_exceeded_gas_limit = write_n_bytes_at_limit(50, 10);\n//     assert!(out_of_gas_write_exceeded_gas_limit.is_err_and(|e| match e {\n//         InstallContractError::Constructor {\n//             host_error: HostError::CalleeGasDepleted,\n//         } => true,\n//         _ => false,\n//     }));\n// }\n\n#[test]\nfn non_existing_smart_contract_does_not_panic() {\n    let address_generator = make_address_generator();\n    let executor = make_executor();\n    let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis();\n\n    let non_existing_address = [255; 32];\n    let execute_request = base_execute_builder()\n        .with_target(ExecutionKind::Stored {\n            address: non_existing_address,\n            entry_point: \"non_existing\".to_string(),\n        })\n        .with_input(Bytes::new())\n        .with_gas_limit(DEFAULT_GAS_LIMIT)\n        .with_transferred_value(0)\n        .with_shared_address_generator(Arc::clone(&address_generator))\n        .build()\n        .expect(\"should build\");\n\n    let result = executor\n        .execute_with_provider(state_root_hash, &mut global_state, execute_request)\n        .expect_err(\"Failure\");\n\n    assert!(matches!(\n        result,\n        ExecuteWithProviderError::Execute(execute_error) if matches!(execute_error, ExecuteError::CodeNotFound(address) if address == non_existing_address)));\n}\n"
  },
  {
    "path": "executor/wasm_common/Cargo.toml",
    "content": "[package]\nname = \"casper-executor-wasm-common\"\nversion = \"0.1.3\"\nedition = \"2021\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndescription = \"Casper executor common package\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/executor/wasm_common\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbitflags = \"2.9\"\nblake2 = \"0.10\"\nborsh = { version = \"1.5\", features = [\"derive\"] }\ncasper-contract-sdk-sys = { version = \"0.1.3\", path = \"../../smart_contracts/sdk_sys\" }\nnum-derive = { workspace = true }\nnum-traits = { workspace = true }\nthiserror = \"2\"\nsafe-transmute = \"0.11\"\n\n[target.'cfg(not(target_arch = \"wasm32\"))'.dependencies]\nserde = { version = \"1\", features = [\"derive\"] }\n\n[dev-dependencies]\nhex = \"0.4\"\n"
  },
  {
    "path": "executor/wasm_common/src/chain_utils.rs",
    "content": "use blake2::{digest::consts::U32, Blake2b, Digest};\n\n/// Compute a predictable address for a contract.\n///\n/// The address is computed as the hash of the chain name, initiator account, and the hash of the\n/// Wasm code.\npub fn compute_predictable_address<T: AsRef<[u8]>>(\n    chain_name: T,\n    initiator_address: [u8; 32],\n    bytecode_hash: [u8; 32],\n    seed: Option<[u8; 32]>,\n) -> [u8; 32] {\n    let mut hasher = Blake2b::<U32>::new();\n\n    hasher.update(chain_name);\n    hasher.update(initiator_address);\n    hasher.update(bytecode_hash);\n\n    if let Some(seed) = seed {\n        hasher.update(seed);\n    }\n\n    hasher.finalize().into()\n}\n\npub fn compute_wasm_bytecode_hash<T: AsRef<[u8]>>(wasm_bytes: T) -> [u8; 32] {\n    let mut hasher = Blake2b::<U32>::new();\n    hasher.update(wasm_bytes);\n    let hash = hasher.finalize();\n    hash.into()\n}\n\n#[cfg(test)]\nmod tests {\n    const SEED: [u8; 32] = [1u8; 32];\n\n    #[test]\n    fn test_compute_predictable_address() {\n        let initiator = [1u8; 32];\n        let bytecode_hash = [2u8; 32];\n\n        let predictable_address_1 =\n            super::compute_predictable_address(\"testnet\", initiator, bytecode_hash, Some(SEED));\n        let predictable_address_2 =\n            super::compute_predictable_address(\"mainnet\", initiator, bytecode_hash, Some(SEED));\n        assert_ne!(predictable_address_1, predictable_address_2);\n    }\n}\n"
  },
  {
    "path": "executor/wasm_common/src/entry_point.rs",
    "content": "/// The caller must cover cost.\n///\n/// This is the default mode in VM2 runtime.\npub const ENTRY_POINT_PAYMENT_CALLER: u8 = 0;\n/// Will cover cost to execute self but not cost of any subsequent invoked contracts\npub const ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY: u8 = 1;\n/// will cover cost to execute self and the cost of any subsequent invoked contracts\npub const ENTRY_POINT_PAYMENT_SELF_ONWARD: u8 = 2;\n"
  },
  {
    "path": "executor/wasm_common/src/env_info.rs",
    "content": "use safe_transmute::TriviallyTransmutable;\n\n#[derive(Clone, Copy)]\n#[repr(C)]\npub struct EnvInfo {\n    pub block_time: u64,\n    pub transferred_value: u64,\n    pub caller_addr: [u8; 32],\n    pub caller_kind: u32,\n    pub callee_addr: [u8; 32],\n    pub callee_kind: u32,\n}\n\nunsafe impl TriviallyTransmutable for EnvInfo {}\n"
  },
  {
    "path": "executor/wasm_common/src/error.rs",
    "content": "//! Error code for signaling error while processing a host function.\n//!\n//! API inspired by `std::io::Error` and `std::io::ErrorKind` but somewhat more memory efficient.\n\nuse thiserror::Error;\n\n#[derive(Debug, Default, PartialEq)]\n#[non_exhaustive]\n#[repr(u32)]\npub enum CommonResult {\n    #[default]\n    Success = 0,\n    /// An entity was not found, often a missing key in the global state.\n    NotFound = 1,\n    /// Data not valid for the operation were encountered.\n    ///\n    /// As an example this could be a malformed parameter that does not contain a valid UTF-8.\n    InvalidData = 2,\n    /// The input to the host function was invalid.\n    InvalidInput = 3,\n    /// The topic is too long.\n    TopicTooLong = 4,\n    /// Too many topics.\n    TooManyTopics = 5,\n    /// The payload is too long.\n    PayloadTooLong = 6,\n    /// The message topic is full and cannot accept new messages.\n    MessageTopicFull = 7,\n    /// The maximum number of messages emitted per block was exceeded when trying to emit a\n    /// message.\n    MaxMessagesPerBlockExceeded = 8,\n    /// Internal error (for example, failed to acquire a lock)\n    Internal = 9,\n    /// An error code not covered by the other variants.\n    Other(u32),\n}\n\npub const HOST_ERROR_SUCCESS: u32 = 0;\npub const HOST_ERROR_NOT_FOUND: u32 = 1;\npub const HOST_ERROR_INVALID_DATA: u32 = 2;\npub const HOST_ERROR_INVALID_INPUT: u32 = 3;\npub const HOST_ERROR_TOPIC_TOO_LONG: u32 = 4;\npub const HOST_ERROR_TOO_MANY_TOPICS: u32 = 5;\npub const HOST_ERROR_PAYLOAD_TOO_LONG: u32 = 6;\npub const HOST_ERROR_MESSAGE_TOPIC_FULL: u32 = 7;\npub const HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED: u32 = 8;\npub const HOST_ERROR_INTERNAL: u32 = 9;\n\nimpl From<u32> for CommonResult {\n    fn from(value: u32) -> Self {\n        match value {\n            HOST_ERROR_SUCCESS => Self::Success,\n            HOST_ERROR_NOT_FOUND => Self::NotFound,\n            HOST_ERROR_INVALID_DATA => Self::InvalidData,\n            HOST_ERROR_INVALID_INPUT => Self::InvalidInput,\n            HOST_ERROR_TOPIC_TOO_LONG => Self::TopicTooLong,\n            HOST_ERROR_TOO_MANY_TOPICS => Self::TooManyTopics,\n            HOST_ERROR_PAYLOAD_TOO_LONG => Self::PayloadTooLong,\n            HOST_ERROR_MESSAGE_TOPIC_FULL => Self::MessageTopicFull,\n            HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED => Self::MaxMessagesPerBlockExceeded,\n            HOST_ERROR_INTERNAL => Self::Internal,\n            other => Self::Other(other),\n        }\n    }\n}\n\npub fn result_from_code(code: u32) -> Result<(), CommonResult> {\n    match code {\n        HOST_ERROR_SUCCESS => Ok(()),\n        other => Err(CommonResult::from(other)),\n    }\n}\n\n/// Wasm trap code.\n#[derive(Debug, Error)]\npub enum TrapCode {\n    /// Trap code for out of bounds memory access.\n    #[error(\"call stack exhausted\")]\n    StackOverflow,\n    /// Trap code for out of bounds memory access.\n    #[error(\"out of bounds memory access\")]\n    MemoryOutOfBounds,\n    /// Trap code for out of bounds table access.\n    #[error(\"undefined element: out of bounds table access\")]\n    TableAccessOutOfBounds,\n    /// Trap code for indirect call to null.\n    #[error(\"uninitialized element\")]\n    IndirectCallToNull,\n    /// Trap code for indirect call type mismatch.\n    #[error(\"indirect call type mismatch\")]\n    BadSignature,\n    /// Trap code for integer overflow.\n    #[error(\"integer overflow\")]\n    IntegerOverflow,\n    /// Trap code for division by zero.\n    #[error(\"integer divide by zero\")]\n    IntegerDivisionByZero,\n    /// Trap code for invalid conversion to integer.\n    #[error(\"invalid conversion to integer\")]\n    BadConversionToInteger,\n    /// Trap code for unreachable code reached triggered by unreachable instruction.\n    #[error(\"unreachable\")]\n    UnreachableCodeReached,\n}\n\npub const CALLEE_SUCCEEDED: u32 = 0;\npub const CALLEE_REVERTED: u32 = 1;\npub const CALLEE_TRAPPED: u32 = 2;\npub const CALLEE_GAS_DEPLETED: u32 = 3;\npub const CALLEE_NOT_CALLABLE: u32 = 4;\npub const CALLEE_HOST_ERROR: u32 = 5;\n\n/// Represents the result of a host function call.\n///\n/// 0 is used as a success.\n#[derive(Debug, Error)]\npub enum CallError {\n    /// Callee contract reverted.\n    #[error(\"callee reverted\")]\n    CalleeReverted,\n    /// Called contract trapped.\n    #[error(\"callee trapped: {0}\")]\n    CalleeTrapped(TrapCode),\n    /// Called contract reached gas limit.\n    #[error(\"callee gas depleted\")]\n    CalleeGasDepleted,\n    /// Called contract is not callable.\n    #[error(\"not callable\")]\n    NotCallable,\n}\n\nimpl CallError {\n    /// Converts the host error into a u32.\n    #[must_use]\n    pub fn into_u32(self) -> u32 {\n        match self {\n            Self::CalleeReverted => CALLEE_REVERTED,\n            Self::CalleeTrapped(_) => CALLEE_TRAPPED,\n            Self::CalleeGasDepleted => CALLEE_GAS_DEPLETED,\n            Self::NotCallable => CALLEE_NOT_CALLABLE,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_from_u32_not_found() {\n        let error = CommonResult::from(HOST_ERROR_NOT_FOUND);\n        assert_eq!(error, CommonResult::NotFound);\n    }\n\n    #[test]\n    fn test_from_u32_invalid_data() {\n        let error = CommonResult::from(HOST_ERROR_INVALID_DATA);\n        assert_eq!(error, CommonResult::InvalidData);\n    }\n\n    #[test]\n    fn test_from_u32_invalid_input() {\n        let error = CommonResult::from(HOST_ERROR_INVALID_INPUT);\n        assert_eq!(error, CommonResult::InvalidInput);\n    }\n\n    #[test]\n    fn test_from_u32_other() {\n        let error = CommonResult::from(10);\n        assert_eq!(error, CommonResult::Other(10));\n    }\n}\n"
  },
  {
    "path": "executor/wasm_common/src/flags.rs",
    "content": "//! Types that can be safely shared between host and the wasm sdk.\nuse bitflags::bitflags;\n\nbitflags! {\n    /// Flags that can be passed as part of returning values.\n    #[repr(transparent)]\n    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]\n    pub struct ReturnFlags: u32 {\n        /// If this bit is set, the host should return the value to the caller and all the execution effects are reverted.\n        const REVERT = 0x0000_0001;\n\n        // The source may set any bits.\n        const _ = !0;\n    }\n\n    #[repr(transparent)]\n    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]\n    pub struct EntryPointFlags: u32 {\n        const CONSTRUCTOR = 0x0000_0001;\n        const FALLBACK = 0x0000_0002;\n    }\n\n    /// Flags that can be passed as part of calling contracts.\n    #[repr(transparent)]\n    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]\n    pub struct CallFlags: u32 {\n        // TODO: This is a placeholder\n    }\n}\n\nimpl Default for EntryPointFlags {\n    fn default() -> Self {\n        Self::empty()\n    }\n}\n\nimpl Default for CallFlags {\n    fn default() -> Self {\n        Self::empty()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_return_flags() {\n        assert_eq!(ReturnFlags::empty().bits(), 0x0000_0000);\n        assert_eq!(ReturnFlags::REVERT.bits(), 0x0000_0001);\n    }\n\n    #[test]\n    fn creating_from_invalid_bit_flags_does_not_fail() {\n        let _return_flags = ReturnFlags::from_bits(u32::MAX).unwrap();\n        let _revert = ReturnFlags::from_bits(0x0000_0001).unwrap();\n        let _empty = ReturnFlags::from_bits(0x0000_0000).unwrap();\n    }\n}\n"
  },
  {
    "path": "executor/wasm_common/src/keyspace.rs",
    "content": "use num_derive::{FromPrimitive, ToPrimitive};\n\n#[repr(u64)]\n#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]\npub enum KeyspaceTag {\n    /// Used for a state based storage which usually involves single dimensional data i.e.\n    /// key-value pairs, etc.\n    ///\n    /// See also [`Keyspace::State`].\n    State = 0,\n    /// Used for a context based storage which usually involves multi dimensional data i.e. maps,\n    /// efficient vectors, etc.\n    Context = 1,\n    /// Used for a named key based storage which usually involves named keys.\n    NamedKey = 2,\n    /// Used for a payment info based storage which usually involves payment information.\n    PaymentInfo = 3,\n}\n\n#[repr(u64)]\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum Keyspace<'a> {\n    /// Stores contract's context.\n    ///\n    /// There's no additional payload for this variant as the host implies the contract's address.\n    State,\n    /// Stores contract's context data. Bytes can be any value as long as it uniquely identifies a\n    /// value.\n    Context(&'a [u8]),\n    /// Stores contract's named keys.\n    NamedKey(&'a str),\n    /// Entry point payment info.\n    PaymentInfo(&'a str),\n}\n\nimpl Keyspace<'_> {\n    #[must_use]\n    pub fn as_tag(&self) -> KeyspaceTag {\n        match self {\n            Keyspace::State => KeyspaceTag::State,\n            Keyspace::Context(_) => KeyspaceTag::Context,\n            Keyspace::NamedKey(_) => KeyspaceTag::NamedKey,\n            Keyspace::PaymentInfo(_) => KeyspaceTag::PaymentInfo,\n        }\n    }\n\n    #[must_use]\n    pub fn as_u64(&self) -> u64 {\n        self.as_tag() as u64\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_as_tag_state() {\n        let keyspace = Keyspace::State;\n        assert_eq!(keyspace.as_tag(), KeyspaceTag::State);\n    }\n\n    #[test]\n    fn test_as_tag_context() {\n        let data = [1, 2, 3];\n        let keyspace = Keyspace::Context(&data);\n        assert_eq!(keyspace.as_tag(), KeyspaceTag::Context);\n    }\n\n    #[test]\n    fn test_as_tag_named_key() {\n        let name = \"my_key\";\n        let keyspace = Keyspace::NamedKey(name);\n        assert_eq!(keyspace.as_tag(), KeyspaceTag::NamedKey);\n    }\n\n    #[test]\n    fn test_as_u64_state() {\n        let keyspace = Keyspace::State;\n        assert_eq!(keyspace.as_u64(), 0);\n    }\n\n    #[test]\n    fn test_as_u64_context() {\n        let data = [1, 2, 3];\n        let keyspace = Keyspace::Context(&data);\n        assert_eq!(keyspace.as_u64(), 1);\n    }\n\n    #[test]\n    fn test_as_u64_named_key() {\n        let name = \"my_key\";\n        let keyspace = Keyspace::NamedKey(name);\n        assert_eq!(keyspace.as_u64(), 2);\n    }\n\n    #[test]\n    fn test_as_u64_payment_info() {\n        let name = \"entry_point\";\n        let keyspace: Keyspace = Keyspace::PaymentInfo(name);\n        assert_eq!(keyspace.as_u64(), 3);\n    }\n}\n"
  },
  {
    "path": "executor/wasm_common/src/lib.rs",
    "content": "//! A crate that shares common types and utilities between the Wasm executor and the Wasm interface.\npub mod chain_utils;\npub mod entry_point;\npub mod env_info;\npub mod error;\npub mod flags;\npub mod keyspace;\n"
  },
  {
    "path": "executor/wasm_host/Cargo.toml",
    "content": "[package]\nname = \"casper-executor-wasm-host\"\nversion = \"0.1.3\"\nedition = \"2021\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndescription = \"Casper executor host package\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/executor/wasm_host\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbase16 = \"0.2\"\nbytes = \"1.10\"\ncasper-executor-wasm-common = { version = \"0.1.3\", path = \"../wasm_common\" }\ncasper-executor-wasm-interface = { version = \"0.1.3\", path = \"../wasm_interface\" }\ncasper-storage = { version = \"5.0.0\", path = \"../../storage\" }\ncasper-types = { version = \"7.0.0\", path = \"../../types\" }\neither = \"1.15\"\nnum-derive = { workspace = true }\nnum-traits = { workspace = true }\nparking_lot = \"0.12\"\nsafe-transmute = \"0.11\"\nthiserror = \"2\"\ntracing = \"0.1\"\n"
  },
  {
    "path": "executor/wasm_host/src/abi.rs",
    "content": "use safe_transmute::TriviallyTransmutable;\n\n#[repr(C)]\n#[derive(Copy, Clone, Debug, PartialEq)]\npub(crate) struct ReadInfo {\n    /// Allocated pointer.\n    pub(crate) data: u32,\n    /// Size in bytes.\n    pub(crate) data_size: u32,\n}\n\nunsafe impl TriviallyTransmutable for ReadInfo {}\n\n#[repr(C)]\n#[derive(Copy, Clone, Debug, PartialEq)]\n\npub(crate) struct CreateResult {\n    pub(crate) package_address: [u8; 32],\n}\n\nunsafe impl TriviallyTransmutable for CreateResult {}\n"
  },
  {
    "path": "executor/wasm_host/src/context.rs",
    "content": "use std::sync::Arc;\n\nuse bytes::Bytes;\nuse casper_executor_wasm_interface::executor::Executor;\nuse casper_storage::{global_state::GlobalStateReader, AddressGenerator, TrackingCopy};\nuse casper_types::{\n    account::AccountHash, BlockTime, Key, MessageLimits, StorageCosts, TransactionHash,\n    WasmV2Config,\n};\nuse parking_lot::RwLock;\n\n/// Container that holds all relevant modules necessary to process an execution request.\npub struct Context<S: GlobalStateReader, E: Executor> {\n    /// The address of the account that initiated the contract or session code.\n    pub initiator: AccountHash,\n    /// The address of the addressable entity that is currently executing the contract or session\n    /// code.\n    pub caller: Key,\n    /// The address of the addressable entity that is being called.\n    pub callee: Key,\n    /// The state of the global state at the time of the call based on the currently executing\n    /// contract or session address.\n    // pub state_address: Address,\n    /// The amount of tokens that were send to the contract's purse at the time of the call.\n    pub transferred_value: u64,\n    pub config: WasmV2Config,\n    pub storage_costs: StorageCosts,\n    pub message_limits: MessageLimits,\n    pub tracking_copy: TrackingCopy<S>,\n    pub executor: E, // TODO: This could be part of the caller\n    pub transaction_hash: TransactionHash,\n    pub address_generator: Arc<RwLock<AddressGenerator>>,\n    pub chain_name: Arc<str>,\n    pub input: Bytes,\n    pub block_time: BlockTime,\n}\n"
  },
  {
    "path": "executor/wasm_host/src/host.rs",
    "content": "use std::{borrow::Cow, num::NonZeroU32, sync::Arc};\n\nuse bytes::Bytes;\nuse casper_executor_wasm_common::{\n    chain_utils,\n    entry_point::{\n        ENTRY_POINT_PAYMENT_CALLER, ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY,\n        ENTRY_POINT_PAYMENT_SELF_ONWARD,\n    },\n    env_info::EnvInfo,\n    error::{\n        CallError, CALLEE_NOT_CALLABLE, CALLEE_SUCCEEDED, CALLEE_TRAPPED, HOST_ERROR_INVALID_DATA,\n        HOST_ERROR_INVALID_INPUT, HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED,\n        HOST_ERROR_MESSAGE_TOPIC_FULL, HOST_ERROR_NOT_FOUND, HOST_ERROR_PAYLOAD_TOO_LONG,\n        HOST_ERROR_SUCCESS, HOST_ERROR_TOO_MANY_TOPICS, HOST_ERROR_TOPIC_TOO_LONG,\n    },\n    flags::ReturnFlags,\n    keyspace::{Keyspace, KeyspaceTag},\n};\nuse casper_executor_wasm_interface::{\n    executor::{ExecuteRequestBuilder, ExecuteResult, ExecutionKind, Executor},\n    u32_from_host_result, Caller, InternalHostError, VMError, VMResult,\n};\nuse casper_storage::{\n    global_state::GlobalStateReader,\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt},\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AssociatedKeys, MessageTopicError, NamedKeyAddr},\n    bytesrepr::ToBytes,\n    contract_messages::{Message, MessageAddr, MessagePayload, MessageTopicSummary},\n    AddressableEntity, BlockGlobalAddr, BlockHash, BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash,\n    ByteCodeKind, CLType, CLValue, ContractRuntimeTag, Digest, EntityAddr, EntityEntryPoint,\n    EntityKind, EntryPointAccess, EntryPointAddr, EntryPointPayment, EntryPointType,\n    EntryPointValue, HashAddr, HostFunctionV2, Key, Package, PackageHash, ProtocolVersion,\n    StoredValue, URef, U512,\n};\nuse either::Either;\nuse num_derive::FromPrimitive;\nuse num_traits::FromPrimitive;\nuse tracing::{error, info, warn};\n\nuse crate::{\n    abi::{CreateResult, ReadInfo},\n    context::Context,\n    system::{self, MintArgs, MintTransferArgs},\n};\n\n#[derive(Debug, Copy, Clone, FromPrimitive, PartialEq)]\nenum EntityKindTag {\n    Account = 0,\n    Contract = 1,\n}\n\npub trait FallibleInto<T> {\n    fn try_into_wrapped(self) -> VMResult<T>;\n}\n\nimpl<From, To> FallibleInto<To> for From\nwhere\n    To: TryFrom<From>,\n{\n    fn try_into_wrapped(self) -> VMResult<To> {\n        To::try_from(self).map_err(|_| VMError::Internal(InternalHostError::TypeConversion))\n    }\n}\n\n/// Consumes a set amount of gas for the specified storage value.\nfn charge_gas_storage<S: GlobalStateReader, E: Executor>(\n    caller: &mut impl Caller<Context = Context<S, E>>,\n    size_bytes: usize,\n) -> VMResult<()> {\n    let storage_costs = &caller.context().storage_costs;\n    let gas_cost = storage_costs.calculate_gas_cost(size_bytes);\n    let value: u64 = gas_cost.value().try_into().map_err(|_| VMError::OutOfGas)?;\n    caller.consume_gas(value)?;\n    Ok(())\n}\n\n/// Consumes a set amount of gas for the specified host function and weights\nfn charge_host_function_call<S, E, const N: usize>(\n    caller: &mut impl Caller<Context = Context<S, E>>,\n    host_function: &HostFunctionV2<[u64; N]>,\n    weights: [u64; N],\n) -> VMResult<()>\nwhere\n    S: GlobalStateReader,\n    E: Executor,\n{\n    let Some(cost) = host_function.calculate_gas_cost(weights) else {\n        // Overflowing gas calculation means gas limit was exceeded\n        return Err(VMError::OutOfGas);\n    };\n\n    caller.consume_gas(cost.value().as_u64())?;\n    Ok(())\n}\n\n/// Writes a message to the global state and charges for storage used.\nfn metered_write<S: GlobalStateReader, E: Executor>(\n    caller: &mut impl Caller<Context = Context<S, E>>,\n    key: Key,\n    value: StoredValue,\n) -> VMResult<()> {\n    charge_gas_storage(caller, value.serialized_length())?;\n    caller.context_mut().tracking_copy.write(key, value);\n    Ok(())\n}\n\n/// Write value under a key.\npub fn casper_write<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    key_space: u64,\n    key_ptr: u32,\n    key_size: u32,\n    value_ptr: u32,\n    value_size: u32,\n) -> VMResult<u32> {\n    let write_cost = caller.context().config.host_function_costs().write;\n    charge_host_function_call(\n        &mut caller,\n        &write_cost,\n        [\n            key_space,\n            u64::from(key_ptr),\n            u64::from(key_size),\n            u64::from(value_ptr),\n            u64::from(value_size),\n        ],\n    )?;\n\n    let keyspace_tag = match KeyspaceTag::from_u64(key_space) {\n        Some(keyspace_tag) => keyspace_tag,\n        None => {\n            // Unknown keyspace received, return error\n            return Ok(HOST_ERROR_NOT_FOUND);\n        }\n    };\n\n    let key_payload_bytes = caller.memory_read(key_ptr, key_size.try_into_wrapped()?)?;\n\n    let keyspace = match keyspace_tag {\n        KeyspaceTag::State => Keyspace::State,\n        KeyspaceTag::Context => Keyspace::Context(&key_payload_bytes),\n        KeyspaceTag::NamedKey => {\n            let key_name = match std::str::from_utf8(&key_payload_bytes) {\n                Ok(key_name) => key_name,\n                Err(_) => {\n                    // TODO: Invalid key name encoding\n                    return Ok(HOST_ERROR_INVALID_DATA);\n                }\n            };\n\n            Keyspace::NamedKey(key_name)\n        }\n        KeyspaceTag::PaymentInfo => {\n            let key_name = match std::str::from_utf8(&key_payload_bytes) {\n                Ok(key_name) => key_name,\n                Err(_) => {\n                    return Ok(HOST_ERROR_INVALID_DATA);\n                }\n            };\n\n            if !caller.has_export(key_name) {\n                // Missing wasm export, unable to perform global state write\n                return Ok(HOST_ERROR_NOT_FOUND);\n            }\n\n            Keyspace::PaymentInfo(key_name)\n        }\n    };\n\n    let global_state_key = match keyspace_to_global_state_key(caller.context(), keyspace) {\n        Some(global_state_key) => global_state_key,\n        None => {\n            // Unknown keyspace received, return error\n            return Ok(HOST_ERROR_NOT_FOUND);\n        }\n    };\n\n    let value = caller.memory_read(value_ptr, value_size.try_into_wrapped()?)?;\n\n    let stored_value = match keyspace {\n        Keyspace::State | Keyspace::Context(_) | Keyspace::NamedKey(_) => {\n            StoredValue::RawBytes(value)\n        }\n        Keyspace::PaymentInfo(_) => {\n            let entry_point_payment = match value.as_slice() {\n                [ENTRY_POINT_PAYMENT_CALLER] => EntryPointPayment::Caller,\n                [ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY] => {\n                    EntryPointPayment::DirectInvocationOnly\n                }\n                [ENTRY_POINT_PAYMENT_SELF_ONWARD] => EntryPointPayment::SelfOnward,\n                _ => {\n                    // Invalid entry point payment variant\n                    return Ok(HOST_ERROR_INVALID_INPUT);\n                }\n            };\n\n            let entry_point = EntityEntryPoint::new(\n                \"_\",\n                Vec::new(),\n                CLType::Unit,\n                EntryPointAccess::Public,\n                EntryPointType::Called,\n                entry_point_payment,\n            );\n            let entry_point_value = EntryPointValue::V1CasperVm(entry_point);\n            StoredValue::EntryPoint(entry_point_value)\n        }\n    };\n\n    metered_write(&mut caller, global_state_key, stored_value)?;\n\n    Ok(HOST_ERROR_SUCCESS)\n}\n\n/// Remove value under a key.\n///\n/// This produces a transformation of Prune to the global state. Keep in mind that technically the\n/// data is not removed from the global state as it still there, it's just not reachable anymore\n/// from the newly created tip.\n///\n/// The name for this host function is `remove` to keep it simple and consistent with read/write\n/// verbs, and also consistent with the rust stdlib vocabulary i.e. `V`\npub fn casper_remove<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    key_space: u64,\n    key_ptr: u32,\n    key_size: u32,\n) -> VMResult<u32> {\n    let write_cost = caller.context().config.host_function_costs().remove;\n    charge_host_function_call(\n        &mut caller,\n        &write_cost,\n        [key_space, u64::from(key_ptr), u64::from(key_size)],\n    )?;\n\n    let keyspace_tag = match KeyspaceTag::from_u64(key_space) {\n        Some(keyspace_tag) => keyspace_tag,\n        None => {\n            // Unknown keyspace received, return error\n            return Ok(HOST_ERROR_NOT_FOUND);\n        }\n    };\n\n    let key_payload_bytes = caller.memory_read(key_ptr, key_size.try_into_wrapped()?)?;\n\n    let keyspace = match keyspace_tag {\n        KeyspaceTag::State => Keyspace::State,\n        KeyspaceTag::Context => Keyspace::Context(&key_payload_bytes),\n        KeyspaceTag::NamedKey => {\n            let key_name = match std::str::from_utf8(&key_payload_bytes) {\n                Ok(key_name) => key_name,\n                Err(_) => {\n                    // TODO: Invalid key name encoding\n                    return Ok(HOST_ERROR_INVALID_DATA);\n                }\n            };\n\n            Keyspace::NamedKey(key_name)\n        }\n        KeyspaceTag::PaymentInfo => {\n            let key_name = match std::str::from_utf8(&key_payload_bytes) {\n                Ok(key_name) => key_name,\n                Err(_) => {\n                    return Ok(HOST_ERROR_INVALID_DATA);\n                }\n            };\n\n            if !caller.has_export(key_name) {\n                // Missing wasm export, unable to perform global state write\n                return Ok(HOST_ERROR_NOT_FOUND);\n            }\n\n            Keyspace::PaymentInfo(key_name)\n        }\n    };\n\n    let global_state_key = match keyspace_to_global_state_key(caller.context(), keyspace) {\n        Some(global_state_key) => global_state_key,\n        None => {\n            // Unknown keyspace received, return error\n            return Ok(HOST_ERROR_NOT_FOUND);\n        }\n    };\n\n    let global_state_read_result = caller.context_mut().tracking_copy.read(&global_state_key);\n    match global_state_read_result {\n        Ok(Some(_stored_value)) => {\n            // Produce a prune transform only if value under a given key exists in the global state\n            caller.context_mut().tracking_copy.prune(global_state_key);\n        }\n        Ok(None) => {\n            // Entry does not exists, and we can't proceed with the prune operation\n            return Ok(HOST_ERROR_NOT_FOUND);\n        }\n        Err(error) => {\n            // To protect the network against potential non-determinism (i.e. one validator runs out\n            // of space or just faces I/O issues that other validators may not have) we're simply\n            // aborting the process, hoping that once the node goes back online issues are resolved\n            // on the validator side. TODO: We should signal this to the contract\n            // runtime somehow, and let validator nodes skip execution.\n            error!(\n                ?error,\n                ?global_state_key,\n                \"Error while attempting a read before removing value; aborting\"\n            );\n            panic!(\"Error while attempting a read before removing value; aborting key={global_state_key:?} error={error:?}\")\n        }\n    }\n\n    Ok(HOST_ERROR_SUCCESS)\n}\n\npub fn casper_print<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    message_ptr: u32,\n    message_size: u32,\n) -> VMResult<()> {\n    let print_cost = caller.context().config.host_function_costs().print;\n    charge_host_function_call(\n        &mut caller,\n        &print_cost,\n        [u64::from(message_ptr), u64::from(message_size)],\n    )?;\n\n    let vec = caller.memory_read(message_ptr, message_size.try_into_wrapped()?)?;\n    let msg = String::from_utf8_lossy(&vec);\n    eprintln!(\"⛓️ {msg}\");\n    Ok(())\n}\n\n/// Write value under a key.\npub fn casper_read<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    key_tag: u64,\n    key_ptr: u32,\n    key_size: u32,\n    info_ptr: u32,\n    cb_alloc: u32,\n    alloc_ctx: u32,\n) -> VMResult<u32> {\n    let read_cost = caller.context().config.host_function_costs().read;\n    charge_host_function_call(\n        &mut caller,\n        &read_cost,\n        [\n            key_tag,\n            u64::from(key_ptr),\n            u64::from(key_size),\n            u64::from(info_ptr),\n            u64::from(cb_alloc),\n            u64::from(alloc_ctx),\n        ],\n    )?;\n\n    let keyspace_tag = match KeyspaceTag::from_u64(key_tag) {\n        Some(keyspace_tag) => keyspace_tag,\n        None => {\n            // Unknown keyspace received, return error\n            return Ok(HOST_ERROR_INVALID_INPUT);\n        }\n    };\n\n    // TODO: Opportunity for optimization: don't read data under key_ptr if given key space does not\n    // require it.\n    let key_payload_bytes = caller.memory_read(key_ptr, key_size.try_into_wrapped()?)?;\n\n    let keyspace = match keyspace_tag {\n        KeyspaceTag::State => Keyspace::State,\n        KeyspaceTag::Context => Keyspace::Context(&key_payload_bytes),\n        KeyspaceTag::NamedKey => {\n            let key_name = match std::str::from_utf8(&key_payload_bytes) {\n                Ok(key_name) => key_name,\n                Err(_) => {\n                    return Ok(HOST_ERROR_INVALID_DATA);\n                }\n            };\n\n            Keyspace::NamedKey(key_name)\n        }\n        KeyspaceTag::PaymentInfo => {\n            let key_name = match std::str::from_utf8(&key_payload_bytes) {\n                Ok(key_name) => key_name,\n                Err(_) => {\n                    return Ok(HOST_ERROR_INVALID_DATA);\n                }\n            };\n            if !caller.has_export(key_name) {\n                // Missing wasm export, unable to perform global state read\n                return Ok(HOST_ERROR_NOT_FOUND);\n            }\n            Keyspace::PaymentInfo(key_name)\n        }\n    };\n\n    let global_state_key = match keyspace_to_global_state_key(caller.context(), keyspace) {\n        Some(global_state_key) => global_state_key,\n        None => {\n            // Unknown keyspace received, return error\n            return Ok(HOST_ERROR_NOT_FOUND);\n        }\n    };\n    let global_state_read_result = caller.context_mut().tracking_copy.read(&global_state_key);\n\n    let global_state_raw_bytes: Cow<[u8]> = match global_state_read_result {\n        Ok(Some(StoredValue::RawBytes(raw_bytes))) => Cow::Owned(raw_bytes),\n        Ok(Some(StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)))) => {\n            match entry_point.entry_point_payment() {\n                EntryPointPayment::Caller => Cow::Borrowed(&[ENTRY_POINT_PAYMENT_CALLER]),\n                EntryPointPayment::DirectInvocationOnly => {\n                    Cow::Borrowed(&[ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY])\n                }\n                EntryPointPayment::SelfOnward => Cow::Borrowed(&[ENTRY_POINT_PAYMENT_SELF_ONWARD]),\n            }\n        }\n        Ok(Some(stored_value)) => {\n            // TODO: Backwards compatibility with old EE, although it's not clear if we should do it\n            // at the storage level. Since new VM has storage isolated from the Wasm\n            // (i.e. we have Keyspace on the wasm which gets converted to a global state `Key`).\n            // I think if we were to pursue this we'd add a new `Keyspace` enum variant for each old\n            // VM supported Key types (i.e. URef, Dictionary perhaps) for some period of time, then\n            // deprecate this.\n            todo!(\"Unsupported {stored_value:?}\")\n        }\n        Ok(None) => return Ok(HOST_ERROR_NOT_FOUND), // Entry does not exists\n        Err(error) => {\n            // To protect the network against potential non-determinism (i.e. one validator runs out\n            // of space or just faces I/O issues that other validators may not have) we're simply\n            // aborting the process, hoping that once the node goes back online issues are resolved\n            // on the validator side. TODO: We should signal this to the contract\n            // runtime somehow, and let validator nodes skip execution.\n            error!(?error, \"Error while reading from storage; aborting\");\n            panic!(\"Error while reading from storage; aborting key={global_state_key:?} error={error:?}\")\n        }\n    };\n\n    let out_ptr: u32 = if cb_alloc != 0 {\n        caller.alloc(cb_alloc, global_state_raw_bytes.len(), alloc_ctx)?\n    } else {\n        // treats alloc_ctx as data\n        alloc_ctx\n    };\n\n    let read_info = ReadInfo {\n        data: out_ptr,\n        data_size: global_state_raw_bytes.len().try_into_wrapped()?,\n    };\n\n    let read_info_bytes = safe_transmute::transmute_one_to_bytes(&read_info);\n    caller.memory_write(info_ptr, read_info_bytes)?;\n    if out_ptr != 0 {\n        caller.memory_write(out_ptr, &global_state_raw_bytes)?;\n    }\n    Ok(HOST_ERROR_SUCCESS)\n}\n\nfn keyspace_to_global_state_key<S: GlobalStateReader, E: Executor>(\n    context: &Context<S, E>,\n    keyspace: Keyspace<'_>,\n) -> Option<Key> {\n    let entity_addr = context_to_entity_addr(context);\n\n    match keyspace {\n        Keyspace::State => Some(Key::State(entity_addr)),\n        Keyspace::Context(bytes) => {\n            let digest = Digest::hash(bytes);\n            Some(casper_types::Key::NamedKey(\n                NamedKeyAddr::new_named_key_entry(entity_addr, digest.value()),\n            ))\n        }\n        Keyspace::NamedKey(payload) => {\n            let digest = Digest::hash(payload.as_bytes());\n            Some(casper_types::Key::NamedKey(\n                NamedKeyAddr::new_named_key_entry(entity_addr, digest.value()),\n            ))\n        }\n        Keyspace::PaymentInfo(payload) => {\n            let entry_point_addr =\n                EntryPointAddr::new_v1_entry_point_addr(entity_addr, payload).ok()?;\n            Some(Key::EntryPoint(entry_point_addr))\n        }\n    }\n}\n\nfn context_to_entity_addr<S: GlobalStateReader, E: Executor>(\n    context: &Context<S, E>,\n) -> EntityAddr {\n    match context.callee {\n        Key::Account(account_hash) => EntityAddr::new_account(account_hash.value()),\n        Key::SmartContract(smart_contract_addr) => {\n            EntityAddr::new_smart_contract(smart_contract_addr)\n        }\n        _ => {\n            // This should never happen, as the caller is always an account or a smart contract.\n            panic!(\"Unexpected callee variant: {:?}\", context.callee)\n        }\n    }\n}\n\npub fn casper_copy_input<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    cb_alloc: u32,\n    alloc_ctx: u32,\n) -> VMResult<u32> {\n    let input = caller.context().input.clone();\n\n    let out_ptr: u32 = if cb_alloc != 0 {\n        caller.alloc(cb_alloc, input.len(), alloc_ctx)?\n    } else {\n        // treats alloc_ctx as data\n        alloc_ctx\n    };\n\n    let copy_input_cost = caller.context().config.host_function_costs().copy_input;\n    charge_host_function_call(\n        &mut caller,\n        &copy_input_cost,\n        [\n            u64::from(out_ptr),\n            input\n                .len()\n                .try_into()\n                .expect(\"usize is at least the same size as u64\"),\n        ],\n    )?;\n\n    if out_ptr == 0 {\n        Ok(out_ptr)\n    } else {\n        caller.memory_write(out_ptr, &input)?;\n        Ok(out_ptr + (input.len() as u32))\n    }\n}\n\n/// Returns from the execution of a smart contract with an optional flags.\npub fn casper_return<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    flags: u32,\n    data_ptr: u32,\n    data_len: u32,\n) -> VMResult<()> {\n    let ret_cost = caller.context().config.host_function_costs().ret;\n    charge_host_function_call(\n        &mut caller,\n        &ret_cost,\n        [u64::from(data_ptr), u64::from(data_len)],\n    )?;\n\n    let flags = ReturnFlags::from_bits_retain(flags);\n    let data = if data_ptr == 0 {\n        None\n    } else {\n        let data = caller\n            .memory_read(data_ptr, data_len.try_into_wrapped()?)\n            .map(Bytes::from)?;\n        Some(data)\n    };\n    Err(VMError::Return { flags, data })\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn casper_create<S: GlobalStateReader + 'static, E: Executor + 'static>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    code_ptr: u32,\n    code_len: u32,\n    transferred_value: u64,\n    entry_point_ptr: u32,\n    entry_point_len: u32,\n    input_ptr: u32,\n    input_len: u32,\n    seed_ptr: u32,\n    seed_len: u32,\n    result_ptr: u32,\n) -> VMResult<u32> {\n    let create_cost = caller.context().config.host_function_costs().create;\n    charge_host_function_call(\n        &mut caller,\n        &create_cost,\n        [\n            u64::from(code_ptr),\n            u64::from(code_len),\n            transferred_value,\n            u64::from(entry_point_ptr),\n            u64::from(entry_point_len),\n            u64::from(input_ptr),\n            u64::from(input_len),\n            u64::from(seed_ptr),\n            u64::from(seed_len),\n            u64::from(result_ptr),\n        ],\n    )?;\n\n    let code = if code_ptr != 0 {\n        caller\n            .memory_read(code_ptr, code_len as usize)\n            .map(Bytes::from)?\n    } else {\n        caller.bytecode()\n    };\n\n    let seed = if seed_ptr != 0 {\n        if seed_len != 32 {\n            return Ok(CALLEE_NOT_CALLABLE);\n        }\n        let seed_bytes = caller.memory_read(seed_ptr, seed_len as usize)?;\n        let seed_bytes: [u8; 32] = seed_bytes.try_into().unwrap(); // SAFETY: We checked for length.\n        Some(seed_bytes)\n    } else {\n        None\n    };\n\n    // For calling a constructor\n    let constructor_entry_point = {\n        let entry_point_ptr = NonZeroU32::new(entry_point_ptr);\n        match entry_point_ptr {\n            Some(entry_point_ptr) => {\n                let entry_point_bytes =\n                    caller.memory_read(entry_point_ptr.get(), entry_point_len as _)?;\n                match String::from_utf8(entry_point_bytes) {\n                    Ok(entry_point) => Some(entry_point),\n                    Err(utf8_error) => {\n                        error!(%utf8_error, \"entry point name is not a valid utf-8 string; unable to call\");\n                        return Ok(CALLEE_NOT_CALLABLE);\n                    }\n                }\n            }\n            None => {\n                // No constructor to be called\n                None\n            }\n        }\n    };\n\n    // Pass input data when calling a constructor. It's optional, as constructors aren't required\n    let input_data: Option<Bytes> = if input_ptr == 0 {\n        None\n    } else {\n        let input_data = caller.memory_read(input_ptr, input_len as _)?.into();\n        Some(input_data)\n    };\n\n    let bytecode_hash = chain_utils::compute_wasm_bytecode_hash(&code);\n\n    let bytecode = ByteCode::new(ByteCodeKind::V2CasperWasm, code.clone().into());\n    let bytecode_addr = ByteCodeAddr::V2CasperWasm(bytecode_hash);\n\n    // 1. Store package hash\n    let mut smart_contract_package = Package::default();\n\n    let protocol_version = ProtocolVersion::V2_0_0;\n    let protocol_version_major = protocol_version.value().major;\n\n    let callee_addr = context_to_entity_addr(caller.context()).value();\n\n    let smart_contract_addr: HashAddr = chain_utils::compute_predictable_address(\n        caller.context().chain_name.as_bytes(),\n        callee_addr,\n        bytecode_hash,\n        seed,\n    );\n\n    smart_contract_package.insert_entity_version(\n        protocol_version_major,\n        EntityAddr::SmartContract(smart_contract_addr),\n    );\n\n    if caller\n        .context_mut()\n        .tracking_copy\n        .read(&Key::SmartContract(smart_contract_addr))\n        .map_err(|_| VMError::Internal(InternalHostError::TrackingCopy))?\n        .is_some()\n    {\n        return VMResult::Err(VMError::Internal(InternalHostError::ContractAlreadyExists));\n    }\n\n    metered_write(\n        &mut caller,\n        Key::SmartContract(smart_contract_addr),\n        StoredValue::SmartContract(smart_contract_package),\n    )?;\n\n    // 2. Store wasm\n    metered_write(\n        &mut caller,\n        Key::ByteCode(bytecode_addr),\n        StoredValue::ByteCode(bytecode),\n    )?;\n\n    // 3. Store addressable entity\n\n    let entity_addr = EntityAddr::SmartContract(smart_contract_addr);\n    let addressable_entity_key = Key::AddressableEntity(entity_addr);\n\n    // TODO: abort(str) as an alternative to trap\n    let address_generator = Arc::clone(&caller.context().address_generator);\n    let transaction_hash = caller.context().transaction_hash;\n    let main_purse: URef = match system::mint_mint(\n        &mut caller.context_mut().tracking_copy,\n        transaction_hash,\n        address_generator,\n        MintArgs {\n            initial_balance: U512::zero(),\n        },\n    ) {\n        Ok(uref) => uref,\n        Err(mint_error) => {\n            error!(?mint_error, \"Failed to create a purse\");\n            return Ok(CALLEE_TRAPPED);\n        }\n    };\n\n    let addressable_entity = AddressableEntity::new(\n        PackageHash::new(smart_contract_addr),\n        ByteCodeHash::new(bytecode_hash),\n        ProtocolVersion::V2_0_0,\n        main_purse,\n        AssociatedKeys::default(),\n        ActionThresholds::default(),\n        EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2),\n    );\n\n    metered_write(\n        &mut caller,\n        addressable_entity_key,\n        StoredValue::AddressableEntity(addressable_entity),\n    )?;\n\n    let _initial_state = match constructor_entry_point {\n        Some(entry_point_name) => {\n            // Take the gas spent so far and use it as a limit for the new VM.\n            let gas_limit = caller\n                .gas_consumed()\n                .try_into_remaining()\n                .map_err(|_| InternalHostError::TypeConversion)?;\n\n            let execute_request = ExecuteRequestBuilder::default()\n                .with_initiator(caller.context().initiator)\n                .with_caller_key(caller.context().callee)\n                .with_gas_limit(gas_limit)\n                .with_target(ExecutionKind::Stored {\n                    address: smart_contract_addr,\n                    entry_point: entry_point_name.clone(),\n                })\n                .with_input(input_data.unwrap_or_default())\n                .with_transferred_value(transferred_value)\n                .with_transaction_hash(caller.context().transaction_hash)\n                // We're using shared address generator there as we need to preserve and advance the\n                // state of deterministic address generator across chain of calls.\n                .with_shared_address_generator(Arc::clone(&caller.context().address_generator))\n                .with_chain_name(caller.context().chain_name.clone())\n                .with_block_time(caller.context().block_time)\n                .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash\n                .with_block_height(1) // TODO: Carry on block height\n                .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash\n                .build()\n                .map_err(|_| InternalHostError::ExecuteRequestBuildFailure)?;\n\n            let tracking_copy_for_ctor = caller.context().tracking_copy.fork2();\n\n            match caller\n                .context()\n                .executor\n                .execute(tracking_copy_for_ctor, execute_request)\n            {\n                Ok(ExecuteResult {\n                    host_error,\n                    output,\n                    gas_usage,\n                    effects,\n                    cache,\n                    messages,\n                }) => {\n                    // output\n                    caller.consume_gas(gas_usage.gas_spent())?;\n\n                    if let Some(host_error) = host_error {\n                        return Ok(host_error.into_u32());\n                    }\n\n                    caller\n                        .context_mut()\n                        .tracking_copy\n                        .apply_changes(effects, cache, messages);\n\n                    output\n                }\n                Err(execute_error) => {\n                    // This is a bug in the EE, as it should have been caught during the preparation\n                    // phase when the contract was stored in the global state.\n                    error!(?execute_error, \"Failed to execute constructor entry point\");\n                    return Err(VMError::Execute(execute_error));\n                }\n            }\n        }\n        None => None,\n    };\n\n    let create_result = CreateResult {\n        package_address: smart_contract_addr,\n    };\n\n    let create_result_bytes = safe_transmute::transmute_one_to_bytes(&create_result);\n\n    debug_assert_eq!(\n        safe_transmute::transmute_one(create_result_bytes),\n        Ok(create_result),\n        \"Sanity check\", // NOTE: Remove these guards with sufficient test coverage\n    );\n\n    caller.memory_write(result_ptr, create_result_bytes)?;\n\n    Ok(CALLEE_SUCCEEDED)\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn casper_call<S: GlobalStateReader + 'static, E: Executor + 'static>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    address_ptr: u32,\n    address_len: u32,\n    transferred_value: u64,\n    entry_point_ptr: u32,\n    entry_point_len: u32,\n    input_ptr: u32,\n    input_len: u32,\n    cb_alloc: u32,\n    cb_ctx: u32,\n) -> VMResult<u32> {\n    let call_cost = caller.context().config.host_function_costs().call;\n    charge_host_function_call(\n        &mut caller,\n        &call_cost,\n        [\n            u64::from(address_ptr),\n            u64::from(address_len),\n            transferred_value,\n            u64::from(entry_point_ptr),\n            u64::from(entry_point_len),\n            u64::from(input_ptr),\n            u64::from(input_len),\n            u64::from(cb_alloc),\n            u64::from(cb_ctx),\n        ],\n    )?;\n\n    // 1. Look up address in the storage\n    // 1a. if it's legacy contract, wire up old EE, pretend you're 1.x. Input data would be\n    // \"RuntimeArgs\". Serialized output of the call has to be passed as output. Value is ignored as\n    // you can't pass value (tokens) to called contracts. 1b. if it's new contract, wire up\n    // another VM as according to the bytecode format. 2. Depends on the VM used (old or new) at\n    // this point either entry point is validated (i.e. EE returned error) or will be validated as\n    // for now. 3. If entry point is valid, call it, transfer the value, pass the input data. If\n    // it's invalid, return error. 4. Output data is captured by calling `cb_alloc`.\n    // let vm = VM::new();\n    // vm.\n    let address = caller.memory_read(address_ptr, address_len as _)?;\n    let smart_contract_addr: HashAddr = address.try_into_wrapped()?;\n\n    let input_data: Bytes = caller.memory_read(input_ptr, input_len as _)?.into();\n\n    let entry_point = {\n        let entry_point_bytes = caller.memory_read(entry_point_ptr, entry_point_len as _)?;\n        match String::from_utf8(entry_point_bytes) {\n            Ok(entry_point) => entry_point,\n            Err(utf8_error) => {\n                error!(%utf8_error, \"entry point name is not a valid utf-8 string; unable to call\");\n                return Ok(CALLEE_NOT_CALLABLE);\n            }\n        }\n    };\n\n    let tracking_copy = caller.context().tracking_copy.fork2();\n\n    // Take the gas spent so far and use it as a limit for the new VM.\n    let gas_limit = caller\n        .gas_consumed()\n        .try_into_remaining()\n        .map_err(|_| InternalHostError::TypeConversion)?;\n\n    let execute_request = ExecuteRequestBuilder::default()\n        .with_initiator(caller.context().initiator)\n        .with_caller_key(caller.context().callee)\n        .with_gas_limit(gas_limit)\n        .with_target(ExecutionKind::Stored {\n            address: smart_contract_addr,\n            entry_point: entry_point.clone(),\n        })\n        .with_transferred_value(transferred_value)\n        .with_input(input_data)\n        .with_transaction_hash(caller.context().transaction_hash)\n        // We're using shared address generator there as we need to preserve and advance the state\n        // of deterministic address generator across chain of calls.\n        .with_shared_address_generator(Arc::clone(&caller.context().address_generator))\n        .with_chain_name(caller.context().chain_name.clone())\n        .with_block_time(caller.context().block_time)\n        .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash\n        .with_block_height(1) // TODO: Carry on block height\n        .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash\n        .build()\n        .map_err(|_| InternalHostError::ExecuteRequestBuildFailure)?;\n\n    let (gas_usage, host_result) = match caller\n        .context()\n        .executor\n        .execute(tracking_copy, execute_request)\n    {\n        Ok(ExecuteResult {\n            host_error,\n            output,\n            gas_usage,\n            effects,\n            cache,\n            messages,\n        }) => {\n            if let Some(output) = output {\n                let out_ptr: u32 = if cb_alloc != 0 {\n                    caller.alloc(cb_alloc, output.len(), cb_ctx)?\n                } else {\n                    // treats alloc_ctx as data\n                    cb_ctx\n                };\n\n                if out_ptr != 0 {\n                    caller.memory_write(out_ptr, &output)?;\n                }\n            }\n\n            let host_result = match host_error {\n                Some(host_error) => Err(host_error),\n                None => {\n                    caller\n                        .context_mut()\n                        .tracking_copy\n                        .apply_changes(effects, cache, messages);\n                    Ok(())\n                }\n            };\n\n            (gas_usage, host_result)\n        }\n        Err(execute_error) => {\n            error!(\n                ?execute_error,\n                ?smart_contract_addr,\n                ?entry_point,\n                \"Failed to execute entry point\"\n            );\n            return Err(VMError::Execute(execute_error));\n        }\n    };\n\n    let gas_spent = gas_usage\n        .gas_limit()\n        .checked_sub(gas_usage.remaining_points())\n        .ok_or(InternalHostError::RemainingGasExceedsGasLimit)?;\n\n    caller.consume_gas(gas_spent)?;\n\n    Ok(u32_from_host_result(host_result))\n}\n\npub fn casper_env_balance<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    entity_kind: u32,\n    entity_addr_ptr: u32,\n    entity_addr_len: u32,\n    output_ptr: u32,\n) -> VMResult<u32> {\n    let balance_cost = caller.context().config.host_function_costs().env_balance;\n    charge_host_function_call(\n        &mut caller,\n        &balance_cost,\n        [\n            u64::from(entity_kind),\n            u64::from(entity_addr_ptr),\n            u64::from(entity_addr_len),\n            u64::from(output_ptr),\n        ],\n    )?;\n\n    let entity_key = match EntityKindTag::from_u32(entity_kind) {\n        Some(EntityKindTag::Account) => {\n            if entity_addr_len != 32 {\n                return Ok(HOST_ERROR_SUCCESS);\n            }\n            let entity_addr = caller.memory_read(entity_addr_ptr, entity_addr_len as usize)?;\n            let account_hash: AccountHash = AccountHash::new(entity_addr.try_into_wrapped()?);\n\n            let account_key = Key::Account(account_hash);\n            match caller.context_mut().tracking_copy.read(&account_key) {\n                Ok(Some(StoredValue::CLValue(clvalue))) => {\n                    let addressible_entity_key = clvalue\n                        .into_t::<Key>()\n                        .map_err(|_| InternalHostError::TypeConversion)?;\n                    Either::Right(addressible_entity_key)\n                }\n                Ok(Some(StoredValue::Account(account))) => Either::Left(account.main_purse()),\n                Ok(Some(other_entity)) => {\n                    error!(\"Unexpected entity type: {other_entity:?}\");\n                    return Err(InternalHostError::UnexpectedEntityKind.into());\n                }\n                Ok(None) => return Ok(HOST_ERROR_SUCCESS),\n                Err(error) => {\n                    error!(\"Error while reading from storage; aborting key={account_key:?} error={error:?}\");\n                    return Err(InternalHostError::TrackingCopy.into());\n                }\n            }\n        }\n        Some(EntityKindTag::Contract) => {\n            if entity_addr_len != 32 {\n                return Ok(HOST_ERROR_SUCCESS);\n            }\n            let hash_bytes = caller.memory_read(entity_addr_ptr, entity_addr_len as usize)?;\n            let hash_bytes: [u8; 32] = hash_bytes.try_into().unwrap(); // SAFETY: We checked for length.\n\n            let smart_contract_key = Key::SmartContract(hash_bytes);\n            match caller.context_mut().tracking_copy.read(&smart_contract_key) {\n                Ok(Some(StoredValue::SmartContract(smart_contract_package))) => {\n                    match smart_contract_package.versions().latest() {\n                        Some(addressible_entity_hash) => {\n                            let key = Key::AddressableEntity(EntityAddr::SmartContract(\n                                addressible_entity_hash.value(),\n                            ));\n                            Either::Right(key)\n                        }\n                        None => {\n                            warn!(\n                                ?smart_contract_key,\n                                \"Unable to find latest addressible entity hash for contract\"\n                            );\n                            return Ok(HOST_ERROR_SUCCESS);\n                        }\n                    }\n                }\n                Ok(Some(_)) => {\n                    return Ok(HOST_ERROR_SUCCESS);\n                }\n                Ok(None) => {\n                    // Not found, balance is 0\n                    return Ok(HOST_ERROR_SUCCESS);\n                }\n                Err(error) => {\n                    error!(\n                        hash_bytes = base16::encode_lower(&hash_bytes),\n                        ?error,\n                        \"Error while reading from storage; aborting\"\n                    );\n                    panic!(\"Error while reading from storage\")\n                }\n            }\n        }\n        None => return Ok(HOST_ERROR_SUCCESS),\n    };\n\n    let purse = match entity_key {\n        Either::Left(main_purse) => main_purse,\n        Either::Right(indirect_entity_key) => {\n            match caller\n                .context_mut()\n                .tracking_copy\n                .read(&indirect_entity_key)\n            {\n                Ok(Some(StoredValue::AddressableEntity(addressable_entity))) => {\n                    addressable_entity.main_purse()\n                }\n                Ok(Some(other_entity)) => {\n                    panic!(\"Unexpected entity type: {other_entity:?}\")\n                }\n                Ok(None) => panic!(\"Key not found while checking balance\"), //return Ok(0),\n                Err(error) => {\n                    panic!(\"Error while reading from storage; aborting key={entity_key:?} error={error:?}\")\n                }\n            }\n        }\n    };\n\n    let total_balance = caller\n        .context_mut()\n        .tracking_copy\n        .get_total_balance(Key::URef(purse))\n        .map_err(|_| InternalHostError::TotalBalanceReadFailure)?;\n\n    let total_balance: u64 = total_balance\n        .value()\n        .try_into()\n        .map_err(|_| InternalHostError::TotalBalanceOverflow)?;\n\n    caller.memory_write(output_ptr, &total_balance.to_le_bytes())?;\n    Ok(HOST_ERROR_NOT_FOUND)\n}\n\npub fn casper_transfer<S: GlobalStateReader + 'static, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    entity_addr_ptr: u32,\n    entity_addr_len: u32,\n    amount_ptr: u32,\n) -> VMResult<u32> {\n    let transfer_cost = caller.context().config.host_function_costs().transfer;\n    charge_host_function_call(\n        &mut caller,\n        &transfer_cost,\n        [\n            u64::from(entity_addr_ptr),\n            u64::from(entity_addr_len),\n            u64::from(amount_ptr),\n        ],\n    )?;\n\n    if entity_addr_len != 32 {\n        // Invalid entity address; failing to proceed with the transfer\n        return Ok(u32_from_host_result(Err(CallError::NotCallable)));\n    }\n\n    let amount = {\n        let mut amount_bytes = [0u8; 8];\n        caller.memory_read_into(amount_ptr, &mut amount_bytes)?;\n        u64::from_le_bytes(amount_bytes)\n    };\n\n    let (target_entity_addr, _runtime_footprint) = {\n        let entity_addr = caller.memory_read(entity_addr_ptr, entity_addr_len as usize)?;\n        debug_assert_eq!(entity_addr.len(), 32);\n\n        // SAFETY: entity_addr is 32 bytes long\n        let account_hash: AccountHash = AccountHash::new(entity_addr.try_into().unwrap());\n\n        let protocol_version = ProtocolVersion::V2_0_0;\n        let (entity_addr, runtime_footprint) = match caller\n            .context_mut()\n            .tracking_copy\n            .runtime_footprint_by_account_hash(protocol_version, account_hash)\n        {\n            Ok((entity_addr, runtime_footprint)) => (entity_addr, runtime_footprint),\n            Err(TrackingCopyError::KeyNotFound(key)) => {\n                warn!(?key, \"Account not found\");\n                return Ok(u32_from_host_result(Err(CallError::NotCallable)));\n            }\n            Err(error) => {\n                error!(?error, \"Error while reading from storage; aborting\");\n                panic!(\"Error while reading from storage\")\n            }\n        };\n        (entity_addr, runtime_footprint)\n    };\n\n    let callee_addressable_entity_key = match caller.context().callee {\n        callee_account_key @ Key::Account(_account_hash) => {\n            match caller.context_mut().tracking_copy.read(&callee_account_key) {\n                Ok(Some(StoredValue::CLValue(indirect))) => {\n                    // is it an account?\n                    indirect\n                        .into_t::<Key>()\n                        .map_err(|_| InternalHostError::TypeConversion)?\n                }\n                Ok(Some(other)) => panic!(\"should be cl value but got {other:?}\"),\n                Ok(None) => return Ok(u32_from_host_result(Err(CallError::NotCallable))),\n                Err(error) => {\n                    error!(\n                        ?error,\n                        ?callee_account_key,\n                        \"Error while reading from storage; aborting\"\n                    );\n                    panic!(\"Error while reading from storage\")\n                }\n            }\n        }\n        smart_contract_key @ Key::SmartContract(_) => {\n            match caller.context_mut().tracking_copy.read(&smart_contract_key) {\n                Ok(Some(StoredValue::SmartContract(smart_contract_package))) => {\n                    match smart_contract_package.versions().latest() {\n                        Some(addressible_entity_hash) => Key::AddressableEntity(\n                            EntityAddr::SmartContract(addressible_entity_hash.value()),\n                        ),\n                        None => {\n                            warn!(\n                                ?smart_contract_key,\n                                \"Unable to find latest addressible entity hash for contract\"\n                            );\n                            return Ok(u32_from_host_result(Err(CallError::NotCallable)));\n                        }\n                    }\n                }\n                Ok(Some(other)) => panic!(\"should be smart contract but got {other:?}\"),\n                Ok(None) => return Ok(u32_from_host_result(Err(CallError::NotCallable))),\n                Err(error) => {\n                    error!(\n                        ?error,\n                        ?smart_contract_key,\n                        \"Error while reading from storage; aborting\"\n                    );\n                    panic!(\"Error while reading from storage\")\n                }\n            }\n        }\n        other => panic!(\"should be account or smart contract but got {other:?}\"),\n    };\n\n    let callee_stored_value = caller\n        .context_mut()\n        .tracking_copy\n        .read(&callee_addressable_entity_key)\n        .map_err(|_| InternalHostError::TrackingCopy)?\n        .ok_or(InternalHostError::AccountRecordNotFound)?;\n    let callee_addressable_entity = callee_stored_value\n        .into_addressable_entity()\n        .ok_or(InternalHostError::TypeConversion)?;\n    let callee_purse = callee_addressable_entity.main_purse();\n\n    let target_purse = match caller\n        .context_mut()\n        .tracking_copy\n        .runtime_footprint_by_entity_addr(target_entity_addr)\n    {\n        Ok(runtime_footprint) => match runtime_footprint.main_purse() {\n            Some(target_purse) => target_purse,\n            None => todo!(\"create a main purse for a contract\"),\n        },\n        Err(TrackingCopyError::KeyNotFound(key)) => {\n            warn!(?key, \"Transfer recipient not found\");\n            return Ok(u32_from_host_result(Err(CallError::NotCallable)));\n        }\n        Err(error) => {\n            error!(?error, \"Error while reading from storage; aborting\");\n            return Err(InternalHostError::TrackingCopy)?;\n        }\n    };\n    // We don't execute anything as it does not make sense to execute an account as there\n    // are no entry points.\n    let transaction_hash = caller.context().transaction_hash;\n    let address_generator = Arc::clone(&caller.context().address_generator);\n    let args = MintTransferArgs {\n        source: callee_purse,\n        target: target_purse,\n        amount: U512::from(amount),\n        maybe_to: None,\n        id: None,\n    };\n\n    let result = system::mint_transfer(\n        &mut caller.context_mut().tracking_copy,\n        transaction_hash,\n        address_generator,\n        args,\n    );\n\n    Ok(u32_from_host_result(result))\n}\n\npub fn casper_upgrade<S: GlobalStateReader + 'static, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    code_ptr: u32,\n    code_size: u32,\n    entry_point_ptr: u32,\n    entry_point_size: u32,\n    input_ptr: u32,\n    input_size: u32,\n) -> VMResult<u32> {\n    let upgrade_cost = caller.context().config.host_function_costs().upgrade;\n    charge_host_function_call(\n        &mut caller,\n        &upgrade_cost,\n        [\n            u64::from(code_ptr),\n            u64::from(code_size),\n            u64::from(entry_point_ptr),\n            u64::from(entry_point_size),\n            u64::from(input_ptr),\n            u64::from(input_size),\n        ],\n    )?;\n\n    let code = caller\n        .memory_read(code_ptr, code_size as usize)\n        .map(Bytes::from)?;\n\n    let entry_point = match NonZeroU32::new(entry_point_ptr) {\n        Some(entry_point_ptr) => {\n            // There's upgrade entry point to be called\n            let entry_point_bytes =\n                caller.memory_read(entry_point_ptr.get(), entry_point_size as usize)?;\n            match String::from_utf8(entry_point_bytes) {\n                Ok(entry_point) => Some(entry_point),\n                Err(utf8_error) => {\n                    error!(%utf8_error, \"entry point name is not a valid utf-8 string; unable to call\");\n                    return Ok(CALLEE_NOT_CALLABLE);\n                }\n            }\n        }\n        None => {\n            // No constructor to be called\n            None\n        }\n    };\n\n    // Pass input data when calling a constructor. It's optional, as constructors aren't required\n    let input_data: Option<Bytes> = if input_ptr == 0 {\n        None\n    } else {\n        let input_data = caller.memory_read(input_ptr, input_size as _)?.into();\n        Some(input_data)\n    };\n\n    let (smart_contract_addr, callee_addressable_entity_key) = match caller.context().callee {\n        Key::Account(_account_hash) => {\n            error!(\"Account upgrade is not possible\");\n            return Ok(CALLEE_NOT_CALLABLE);\n        }\n        addressable_entity_key @ Key::SmartContract(smart_contract_addr) => {\n            let smart_contract_key = addressable_entity_key;\n            match caller.context_mut().tracking_copy.read(&smart_contract_key) {\n                Ok(Some(StoredValue::SmartContract(smart_contract_package))) => {\n                    match smart_contract_package.versions().latest() {\n                        Some(addressible_entity_hash) => {\n                            let key = Key::AddressableEntity(EntityAddr::SmartContract(\n                                addressible_entity_hash.value(),\n                            ));\n                            (smart_contract_addr, key)\n                        }\n                        None => {\n                            warn!(\n                                ?smart_contract_key,\n                                \"Unable to find latest addressible entity hash for contract\"\n                            );\n                            return Ok(CALLEE_NOT_CALLABLE);\n                        }\n                    }\n                }\n                Ok(Some(other)) => panic!(\"should be smart contract but got {other:?}\"),\n                Ok(None) => return Ok(CALLEE_NOT_CALLABLE),\n                Err(error) => {\n                    error!(\n                        ?error,\n                        ?smart_contract_key,\n                        \"Error while reading from storage; aborting\"\n                    );\n                    panic!(\"Error while reading from storage\")\n                }\n            }\n        }\n        other => panic!(\"should be account or addressable entity but got {other:?}\"),\n    };\n\n    let callee_addressable_entity = match caller\n        .context_mut()\n        .tracking_copy\n        .read(&callee_addressable_entity_key)\n    {\n        Ok(Some(StoredValue::AddressableEntity(addressable_entity))) => addressable_entity,\n        Ok(Some(other_entity)) => {\n            panic!(\"Unexpected entity type: {other_entity:?}\")\n        }\n        Ok(None) => return Ok(CALLEE_NOT_CALLABLE),\n        Err(error) => {\n            panic!(\"Error while reading from storage; aborting key={callee_addressable_entity_key:?} error={error:?}\")\n        }\n    };\n\n    // 1. Ensure that the new code is valid (maybe?)\n    // TODO: Is validating new code worth it if the user pays for the storage anyway? Should we\n    // protect users against invalid code?\n\n    // 2. Update the code therefore making hash(new_code) != addressable_entity.bytecode_addr (aka\n    //    hash(old_code))\n    let bytecode_key = Key::ByteCode(ByteCodeAddr::V2CasperWasm(\n        callee_addressable_entity.byte_code_addr(),\n    ));\n    metered_write(\n        &mut caller,\n        bytecode_key,\n        StoredValue::ByteCode(ByteCode::new(\n            ByteCodeKind::V2CasperWasm,\n            code.clone().into(),\n        )),\n    )?;\n\n    // 3. Execute upgrade routine (if specified)\n    // this code should handle reading old state, and saving new state\n\n    if let Some(entry_point_name) = entry_point {\n        // Take the gas spent so far and use it as a limit for the new VM.\n        let gas_limit = caller\n            .gas_consumed()\n            .try_into_remaining()\n            .map_err(|_| InternalHostError::TypeConversion)?;\n\n        let execute_request = ExecuteRequestBuilder::default()\n            .with_initiator(caller.context().initiator)\n            .with_caller_key(caller.context().callee)\n            .with_gas_limit(gas_limit)\n            .with_target(ExecutionKind::Stored {\n                address: smart_contract_addr,\n                entry_point: entry_point_name.clone(),\n            })\n            .with_input(input_data.unwrap_or_default())\n            // Upgrade entry point is executed with zero value as it does not seem to make sense to\n            // be able to transfer anything.\n            .with_transferred_value(0)\n            .with_transaction_hash(caller.context().transaction_hash)\n            // We're using shared address generator there as we need to preserve and advance the\n            // state of deterministic address generator across chain of calls.\n            .with_shared_address_generator(Arc::clone(&caller.context().address_generator))\n            .with_chain_name(caller.context().chain_name.clone())\n            .with_block_time(caller.context().block_time)\n            .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash\n            .with_block_height(1) // TODO: Carry on block height\n            .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash\n            .build()\n            .map_err(|_| InternalHostError::ExecuteRequestBuildFailure)?;\n\n        let tracking_copy_for_ctor = caller.context().tracking_copy.fork2();\n\n        match caller\n            .context()\n            .executor\n            .execute(tracking_copy_for_ctor, execute_request)\n        {\n            Ok(ExecuteResult {\n                host_error,\n                output,\n                gas_usage,\n                effects,\n                cache,\n                messages,\n            }) => {\n                // output\n                caller.consume_gas(gas_usage.gas_spent())?;\n\n                if let Some(host_error) = host_error {\n                    return Ok(host_error.into_u32());\n                }\n\n                caller\n                    .context_mut()\n                    .tracking_copy\n                    .apply_changes(effects, cache, messages);\n\n                if let Some(output) = output {\n                    info!(\n                        ?entry_point_name,\n                        ?output,\n                        \"unexpected output from migration entry point\"\n                    );\n                }\n            }\n            Err(execute_error) => {\n                // Unable to call contract because of execution error or internal host error.\n                // This usually means an internal error that should not happen and has to be handled\n                // by the contract runtime.\n                error!(\n                    ?execute_error,\n                    ?entry_point_name,\n                    smart_contract_addr = base16::encode_lower(&smart_contract_addr),\n                    \"Failed to execute upgrade entry point\"\n                );\n                return Err(VMError::Execute(execute_error));\n            }\n        }\n    }\n\n    Ok(CALLEE_SUCCEEDED)\n}\n\npub fn casper_env_info<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    info_ptr: u32,\n    info_size: u32,\n) -> VMResult<u32> {\n    let block_time_cost = caller.context().config.host_function_costs().env_info;\n    charge_host_function_call(\n        &mut caller,\n        &block_time_cost,\n        [u64::from(info_ptr), u64::from(info_size)],\n    )?;\n\n    let (caller_kind, caller_addr) = match &caller.context().caller {\n        Key::Account(account_hash) => (EntityKindTag::Account as u32, account_hash.value()),\n        Key::SmartContract(smart_contract_addr) => {\n            (EntityKindTag::Contract as u32, *smart_contract_addr)\n        }\n        other => panic!(\"Unexpected caller: {other:?}\"),\n    };\n\n    let (callee_kind, callee_addr) = match &caller.context().callee {\n        Key::Account(initiator_addr) => (EntityKindTag::Account as u32, initiator_addr.value()),\n        Key::SmartContract(smart_contract_addr) => {\n            (EntityKindTag::Contract as u32, *smart_contract_addr)\n        }\n        other => panic!(\"Unexpected callee: {other:?}\"),\n    };\n\n    let transferred_value = caller.context().transferred_value;\n\n    let block_time = caller.context().block_time.value();\n\n    // `EnvInfo` in little-endian representation.\n    let env_info_le = EnvInfo {\n        caller_addr,\n        caller_kind: caller_kind.to_le(),\n        callee_addr,\n        callee_kind: callee_kind.to_le(),\n        transferred_value: transferred_value.to_le(),\n        block_time: block_time.to_le(),\n    };\n\n    let env_info_bytes = safe_transmute::transmute_one_to_bytes(&env_info_le);\n    let write_len = env_info_bytes.len().min(info_size as usize);\n    caller.memory_write(info_ptr, &env_info_bytes[..write_len])?;\n\n    Ok(HOST_ERROR_SUCCESS)\n}\n\npub fn casper_emit<S: GlobalStateReader, E: Executor>(\n    mut caller: impl Caller<Context = Context<S, E>>,\n    topic_name_ptr: u32,\n    topic_name_size: u32,\n    payload_ptr: u32,\n    payload_size: u32,\n) -> VMResult<u32> {\n    // Charge for parameter weights.\n    let emit_host_function = caller.context().config.host_function_costs().emit;\n\n    charge_host_function_call(\n        &mut caller,\n        &emit_host_function,\n        [\n            u64::from(topic_name_ptr),\n            u64::from(topic_name_size),\n            u64::from(payload_ptr),\n            u64::from(payload_size),\n        ],\n    )?;\n\n    if topic_name_size > caller.context().message_limits.max_topic_name_size {\n        return Ok(HOST_ERROR_TOPIC_TOO_LONG);\n    }\n\n    if payload_size > caller.context().message_limits.max_message_size {\n        return Ok(HOST_ERROR_PAYLOAD_TOO_LONG);\n    }\n\n    let topic_name = {\n        let topic: Vec<u8> = caller.memory_read(topic_name_ptr, topic_name_size as usize)?;\n        let Ok(topic) = String::from_utf8(topic) else {\n            // Not a valid UTF-8 string\n            return Ok(HOST_ERROR_INVALID_DATA);\n        };\n        topic\n    };\n\n    let payload = caller.memory_read(payload_ptr, payload_size as usize)?;\n\n    let entity_addr = context_to_entity_addr(caller.context());\n\n    let mut message_topics = caller\n        .context_mut()\n        .tracking_copy\n        .get_message_topics(entity_addr)\n        .unwrap_or_else(|error| {\n            panic!(\"Error while reading from storage; aborting error={error:?}\")\n        });\n\n    if message_topics.len() >= caller.context().message_limits.max_topics_per_contract as usize {\n        return Ok(HOST_ERROR_TOO_MANY_TOPICS);\n    }\n\n    let topic_name_hash = Digest::hash(&topic_name).value().into();\n\n    match message_topics.add_topic(&topic_name, topic_name_hash) {\n        Ok(()) => {\n            // New topic is created\n        }\n        Err(MessageTopicError::DuplicateTopic) => {\n            // We're lazily creating message topics and this operation is idempotent. Therefore\n            // already existing topic is not an issue.\n        }\n        Err(MessageTopicError::MaxTopicsExceeded) => {\n            // We're validating the size of topics before adding them\n            return Ok(HOST_ERROR_TOO_MANY_TOPICS);\n        }\n        Err(MessageTopicError::TopicNameSizeExceeded) => {\n            // We're validating the length of topic before adding it\n            return Ok(HOST_ERROR_TOPIC_TOO_LONG);\n        }\n        Err(error) => {\n            // These error variants are non_exhaustive, and we should handle them explicitly.\n            unreachable!(\"Unexpected error while adding a topic: {:?}\", error);\n        }\n    };\n\n    let current_block_time = caller.context().block_time;\n    eprintln!(\"📩 {topic_name}: {payload:?} (at {current_block_time:?})\");\n\n    let topic_key = Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash));\n    let prev_topic_summary = match caller.context_mut().tracking_copy.read(&topic_key) {\n        Ok(Some(StoredValue::MessageTopic(message_topic_summary))) => message_topic_summary,\n        Ok(Some(stored_value)) => {\n            panic!(\"Unexpected stored value: {stored_value:?}\");\n        }\n        Ok(None) => {\n            let message_topic_summary =\n                MessageTopicSummary::new(0, current_block_time, topic_name.clone());\n            let summary = StoredValue::MessageTopic(message_topic_summary.clone());\n            caller.context_mut().tracking_copy.write(topic_key, summary);\n            message_topic_summary\n        }\n        Err(error) => panic!(\"Error while reading from storage; aborting error={error:?}\"),\n    };\n\n    let topic_message_index = if prev_topic_summary.blocktime() != current_block_time {\n        for index in 1..prev_topic_summary.message_count() {\n            let message_key = Key::message(entity_addr, topic_name_hash, index);\n            debug_assert!(\n                {\n                    // NOTE: This assertion is to ensure that the message index is continuous, and\n                    // the previous messages are pruned properly.\n                    caller\n                        .context_mut()\n                        .tracking_copy\n                        .read(&message_key)\n                        .map_err(|_| VMError::Internal(InternalHostError::TrackingCopy))?\n                        .is_some()\n                },\n                \"Message index is not continuous\"\n            );\n\n            // Prune the previous messages\n            caller.context_mut().tracking_copy.prune(message_key);\n        }\n        0\n    } else {\n        prev_topic_summary.message_count()\n    };\n\n    // Data stored in the global state associated with the message block.\n    type MessageCountPair = (BlockTime, u64);\n\n    let block_message_index: u64 = match caller\n        .context_mut()\n        .tracking_copy\n        .read(&Key::BlockGlobal(BlockGlobalAddr::MessageCount))\n    {\n        Ok(Some(StoredValue::CLValue(value_pair))) => {\n            let (prev_block_time, prev_count): MessageCountPair =\n                CLValue::into_t(value_pair).map_err(|_| InternalHostError::TypeConversion)?;\n            if prev_block_time == current_block_time {\n                prev_count\n            } else {\n                0\n            }\n        }\n        Ok(Some(other)) => panic!(\"Unexpected stored value: {other:?}\"),\n        Ok(None) => {\n            // No messages in current block yet\n            0\n        }\n        Err(error) => {\n            panic!(\"Error while reading from storage; aborting error={error:?}\")\n        }\n    };\n\n    let Some(topic_message_count) = topic_message_index.checked_add(1) else {\n        return Ok(HOST_ERROR_MESSAGE_TOPIC_FULL);\n    };\n\n    let Some(block_message_count) = block_message_index.checked_add(1) else {\n        return Ok(HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED);\n    };\n\n    // Under v2 runtime messages are only limited to bytes.\n    let message_payload = MessagePayload::Bytes(payload.into());\n\n    let message = Message::new(\n        entity_addr,\n        message_payload,\n        topic_name,\n        topic_name_hash,\n        topic_message_index,\n        block_message_index,\n    );\n    let topic_value = StoredValue::MessageTopic(MessageTopicSummary::new(\n        topic_message_count,\n        current_block_time,\n        message.topic_name().to_owned(),\n    ));\n\n    let message_key = message.message_key();\n    let message_value = StoredValue::Message(\n        message\n            .checksum()\n            .map_err(|_| InternalHostError::MessageChecksumMissing)?,\n    );\n    let message_count_pair: MessageCountPair = (current_block_time, block_message_count);\n    let block_message_count_value = StoredValue::CLValue(\n        CLValue::from_t(message_count_pair).map_err(|_| InternalHostError::TypeConversion)?,\n    );\n\n    // Charge for amount as measured by serialized length\n    let bytes_count = topic_value.serialized_length()\n        + message_value.serialized_length()\n        + block_message_count_value.serialized_length();\n    charge_gas_storage(&mut caller, bytes_count)?;\n\n    caller.context_mut().tracking_copy.emit_message(\n        topic_key,\n        topic_value,\n        message_key,\n        message_value,\n        block_message_count_value,\n        message,\n    );\n\n    Ok(HOST_ERROR_SUCCESS)\n}\n"
  },
  {
    "path": "executor/wasm_host/src/lib.rs",
    "content": "//! Implementation of all host functions.\npub(crate) mod abi;\npub mod context;\npub mod host;\npub(crate) mod system;\n"
  },
  {
    "path": "executor/wasm_host/src/system.rs",
    "content": "//! System contract dispatch.\n//!\n//! System contracts are special contracts that are always available to the system.\n//! They are used to implement core system functionality, such as minting and transferring tokens.\n//! This module provides a way to dispatch calls to system contracts that are implemented under\n//! storage crate.\n//!\n//! The dispatcher provides the necessary information to properly execute system contract's code\n//! within the context of the current execution of the new Wasm host logic.\nuse std::{cell::RefCell, rc::Rc, sync::Arc};\n\nuse casper_executor_wasm_common::error::{CallError, TrapCode};\nuse casper_executor_wasm_interface::HostResult;\nuse casper_storage::{\n    global_state::GlobalStateReader,\n    system::{\n        mint::Mint,\n        runtime_native::{Config, Id, RuntimeNative},\n    },\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError},\n    AddressGenerator, TrackingCopy,\n};\nuse casper_types::{\n    account::AccountHash, CLValueError, ContextAccessRights, EntityAddr, Key, Phase,\n    ProtocolVersion, PublicKey, SystemHashRegistry, TransactionHash, URef, U512,\n};\nuse parking_lot::RwLock;\nuse thiserror::Error;\nuse tracing::{debug, error};\n\n#[derive(Debug, Error)]\nenum DispatchError {\n    #[error(\"Tracking copy error: {0}\")]\n    Storage(#[from] TrackingCopyError),\n    #[error(\"CLValue error: {0}\")]\n    CLValue(CLValueError),\n    #[error(\"Registry not found\")]\n    RegistryNotFound,\n    #[error(\"Missing addressable entity\")]\n    MissingRuntimeFootprint(TrackingCopyError),\n    #[error(\"Missing system contract: {0}\")]\n    MissingSystemContract(&'static str),\n}\n\nfn dispatch_system_contract<R: GlobalStateReader, Ret>(\n    tracking_copy: &mut TrackingCopy<R>,\n    transaction_hash: TransactionHash,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    system_contract: &'static str,\n    func: impl FnOnce(RuntimeNative<R>) -> Ret,\n) -> Result<Ret, DispatchError> {\n    let system_entity_registry = {\n        let stored_value = tracking_copy\n            .read(&Key::SystemEntityRegistry)?\n            .ok_or(DispatchError::RegistryNotFound)?;\n        stored_value\n            .into_cl_value()\n            .expect(\"should convert stored value into CLValue\")\n            .into_t::<SystemHashRegistry>()\n            .map_err(DispatchError::CLValue)?\n    };\n    let system_entity_addr = system_entity_registry\n        .get(system_contract)\n        .ok_or(DispatchError::MissingSystemContract(system_contract))?;\n    let entity_addr = EntityAddr::new_system(*system_entity_addr);\n\n    let runtime_footprint = tracking_copy\n        .runtime_footprint_by_entity_addr(entity_addr)\n        .map_err(DispatchError::MissingRuntimeFootprint)?;\n\n    let config = Config::default();\n    let protocol_version = ProtocolVersion::V1_0_0;\n\n    let access_rights = ContextAccessRights::new(*system_entity_addr, []);\n    let address = PublicKey::System.to_account_hash();\n\n    let forked_tracking_copy = Rc::new(RefCell::new(tracking_copy.fork2()));\n\n    let remaining_spending_limit = U512::MAX; // NOTE: Since there's no custom payment, there's no need to track the remaining spending limit.\n    let phase = Phase::System; // NOTE: Since this is a system contract, the phase is always `System`.\n\n    let ret = {\n        let runtime = RuntimeNative::new(\n            config,\n            protocol_version,\n            Id::Transaction(transaction_hash),\n            address_generator,\n            Rc::clone(&forked_tracking_copy),\n            address,\n            Key::AddressableEntity(entity_addr),\n            runtime_footprint,\n            access_rights,\n            remaining_spending_limit,\n            phase,\n        );\n\n        func(runtime)\n    };\n\n    // SAFETY: `RuntimeNative` is dropped in the block above, we can extract the tracking copy and\n    // the effects.\n    let modified_tracking_copy = Rc::try_unwrap(forked_tracking_copy)\n        .ok()\n        .expect(\"No other references\");\n\n    let modified_tracking_copy = modified_tracking_copy.into_inner();\n\n    tracking_copy.apply_changes(\n        modified_tracking_copy.effects(),\n        modified_tracking_copy.cache(),\n        modified_tracking_copy.messages(),\n    );\n\n    Ok(ret)\n}\n\n#[derive(Debug, Clone, Copy)]\npub(crate) struct MintArgs {\n    pub(crate) initial_balance: U512,\n}\n\npub(crate) fn mint_mint<R: GlobalStateReader>(\n    tracking_copy: &mut TrackingCopy<R>,\n    transaction_hash: TransactionHash,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    args: MintArgs,\n) -> Result<URef, CallError> {\n    let mint_result = match dispatch_system_contract(\n        tracking_copy,\n        transaction_hash,\n        address_generator,\n        \"mint\",\n        |mut runtime| runtime.mint(args.initial_balance),\n    ) {\n        Ok(mint_result) => mint_result,\n        Err(error) => {\n            error!(%error, ?args, \"mint failed\");\n            panic!(\"Mint failed; aborting\");\n        }\n    };\n\n    match mint_result {\n        Ok(uref) => Ok(uref),\n        Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted),\n        Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted),\n        Err(mint_error) => {\n            error!(%mint_error, ?args, \"mint transfer failed\");\n            Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached))\n        }\n    }\n}\n\n#[derive(Debug, Copy, Clone)]\npub(crate) struct MintTransferArgs {\n    pub(crate) maybe_to: Option<AccountHash>,\n    pub(crate) source: URef,\n    pub(crate) target: URef,\n    pub(crate) amount: U512,\n    pub(crate) id: Option<u64>,\n}\n\npub(crate) fn mint_transfer<R: GlobalStateReader>(\n    tracking_copy: &mut TrackingCopy<R>,\n    id: TransactionHash,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    args: MintTransferArgs,\n) -> HostResult {\n    let transfer_result: Result<(), casper_types::system::mint::Error> =\n        match dispatch_system_contract(\n            tracking_copy,\n            id,\n            address_generator,\n            \"mint\",\n            |mut runtime| {\n                runtime.transfer(\n                    args.maybe_to,\n                    args.source,\n                    args.target,\n                    args.amount,\n                    args.id,\n                )\n            },\n        ) {\n            Ok(result) => result,\n            Err(error) => {\n                error!(%error, \"mint transfer failed\");\n                return Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached));\n            }\n        };\n\n    debug!(?args, ?transfer_result, \"transfer\");\n\n    match transfer_result {\n        Ok(()) => Ok(()),\n        Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted),\n        Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted),\n        Err(mint_error) => {\n            error!(%mint_error, ?args, \"mint transfer failed\");\n            Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached))\n        }\n    }\n}\n"
  },
  {
    "path": "executor/wasm_interface/Cargo.toml",
    "content": "[package]\nname = \"casper-executor-wasm-interface\"\nversion = \"0.1.3\"\nedition = \"2021\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndescription = \"Casper executor interface package\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/executor/wasm_interface\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbytes = \"1.10\"\nborsh = { version = \"1.5\", features = [\"derive\"] }\ncasper-executor-wasm-common = { version = \"0.1.3\", path = \"../wasm_common\" }\ncasper-storage = { version = \"5.0.0\", path = \"../../storage\" }\ncasper-types = { version = \"7.0.0\", path = \"../../types\" }\nparking_lot = \"0.12\"\nthiserror = \"2\"\n"
  },
  {
    "path": "executor/wasm_interface/src/executor.rs",
    "content": "use std::sync::Arc;\n\nuse borsh::BorshSerialize;\nuse bytes::Bytes;\nuse casper_storage::{\n    global_state::{error::Error as GlobalStateError, GlobalStateReader},\n    tracking_copy::TrackingCopyCache,\n    AddressGenerator, TrackingCopy,\n};\nuse casper_types::{\n    account::AccountHash, contract_messages::Messages, execution::Effects, BlockHash, BlockTime,\n    Digest, HashAddr, Key, TransactionHash,\n};\nuse parking_lot::RwLock;\nuse thiserror::Error;\n\nuse crate::{CallError, GasUsage, InternalHostError, WasmPreparationError};\n\n/// Request to execute a Wasm contract.\npub struct ExecuteRequest {\n    /// Initiator's address.\n    pub initiator: AccountHash,\n    /// Caller's address key.\n    ///\n    /// Either a `[`Key::Account`]` or a `[`Key::AddressableEntity`].\n    pub caller_key: Key,\n    /// Gas limit.\n    pub gas_limit: u64,\n    /// Target for execution.\n    pub execution_kind: ExecutionKind,\n    /// Input data.\n    pub input: Bytes,\n    /// Value transferred to the contract.\n    pub transferred_value: u64,\n    /// Transaction hash.\n    pub transaction_hash: TransactionHash,\n    /// Address generator.\n    ///\n    /// This can be either seeded and created as part of the builder or shared across chain of\n    /// execution requests.\n    pub address_generator: Arc<RwLock<AddressGenerator>>,\n    /// Chain name.\n    ///\n    /// This is very important ingredient for deriving contract hashes on the network.\n    pub chain_name: Arc<str>,\n    /// Block time represented as a unix timestamp.\n    pub block_time: BlockTime,\n    /// State root hash of the global state in which the transaction will be executed.\n    pub state_hash: Digest,\n    /// Parent block hash.\n    pub parent_block_hash: BlockHash,\n    /// Block height.\n    pub block_height: u64,\n}\n\n/// Builder for `ExecuteRequest`.\n#[derive(Default)]\npub struct ExecuteRequestBuilder {\n    initiator: Option<AccountHash>,\n    caller_key: Option<Key>,\n    gas_limit: Option<u64>,\n    target: Option<ExecutionKind>,\n    input: Option<Bytes>,\n    value: Option<u64>,\n    transaction_hash: Option<TransactionHash>,\n    address_generator: Option<Arc<RwLock<AddressGenerator>>>,\n    chain_name: Option<Arc<str>>,\n    block_time: Option<BlockTime>,\n    state_hash: Option<Digest>,\n    parent_block_hash: Option<BlockHash>,\n    block_height: Option<u64>,\n}\n\nimpl ExecuteRequestBuilder {\n    /// Set the initiator's address.\n    #[must_use]\n    pub fn with_initiator(mut self, initiator: AccountHash) -> Self {\n        self.initiator = Some(initiator);\n        self\n    }\n\n    /// Set the caller's key.\n    #[must_use]\n    pub fn with_caller_key(mut self, caller_key: Key) -> Self {\n        self.caller_key = Some(caller_key);\n        self\n    }\n\n    /// Set the gas limit.\n    #[must_use]\n    pub fn with_gas_limit(mut self, gas_limit: u64) -> Self {\n        self.gas_limit = Some(gas_limit);\n        self\n    }\n\n    /// Set the target for execution.\n    #[must_use]\n    pub fn with_target(mut self, target: ExecutionKind) -> Self {\n        self.target = Some(target);\n        self\n    }\n\n    /// Pass input data.\n    #[must_use]\n    pub fn with_input(mut self, input: Bytes) -> Self {\n        self.input = Some(input);\n        self\n    }\n\n    /// Pass input data that can be serialized.\n    #[must_use]\n    pub fn with_serialized_input<T: BorshSerialize>(self, input: T) -> Self {\n        let input = borsh::to_vec(&input)\n            .map(Bytes::from)\n            .expect(\"should serialize input\");\n        self.with_input(input)\n    }\n\n    /// Pass value to be sent to the contract.\n    #[must_use]\n    pub fn with_transferred_value(mut self, value: u64) -> Self {\n        self.value = Some(value);\n        self\n    }\n\n    /// Set the transaction hash.\n    #[must_use]\n    pub fn with_transaction_hash(mut self, transaction_hash: TransactionHash) -> Self {\n        self.transaction_hash = Some(transaction_hash);\n        self\n    }\n\n    /// Set the address generator.\n    ///\n    /// This can be either seeded and created as part of the builder or shared across chain of\n    /// execution requests.\n    #[must_use]\n    pub fn with_address_generator(mut self, address_generator: AddressGenerator) -> Self {\n        self.address_generator = Some(Arc::new(RwLock::new(address_generator)));\n        self\n    }\n\n    /// Set the shared address generator.\n    ///\n    /// This is useful when the address generator is shared across a chain of multiple execution\n    /// requests.\n    #[must_use]\n    pub fn with_shared_address_generator(\n        mut self,\n        address_generator: Arc<RwLock<AddressGenerator>>,\n    ) -> Self {\n        self.address_generator = Some(address_generator);\n        self\n    }\n\n    /// Set the chain name.\n    #[must_use]\n    pub fn with_chain_name<T: Into<Arc<str>>>(mut self, chain_name: T) -> Self {\n        self.chain_name = Some(chain_name.into());\n        self\n    }\n\n    /// Set the block time.\n    #[must_use]\n    pub fn with_block_time(mut self, block_time: BlockTime) -> Self {\n        self.block_time = Some(block_time);\n        self\n    }\n\n    /// Set the state hash.\n    #[must_use]\n    pub fn with_state_hash(mut self, state_hash: Digest) -> Self {\n        self.state_hash = Some(state_hash);\n        self\n    }\n\n    /// Set the parent block hash.\n    #[must_use]\n    pub fn with_parent_block_hash(mut self, parent_block_hash: BlockHash) -> Self {\n        self.parent_block_hash = Some(parent_block_hash);\n        self\n    }\n\n    /// Set the block height.\n    #[must_use]\n    pub fn with_block_height(mut self, block_height: u64) -> Self {\n        self.block_height = Some(block_height);\n        self\n    }\n\n    /// Build the `ExecuteRequest`.\n    pub fn build(self) -> Result<ExecuteRequest, &'static str> {\n        let initiator = self.initiator.ok_or(\"Initiator is not set\")?;\n        let caller_key = self.caller_key.ok_or(\"Caller is not set\")?;\n        let gas_limit = self.gas_limit.ok_or(\"Gas limit is not set\")?;\n        let execution_kind = self.target.ok_or(\"Target is not set\")?;\n        let input = self.input.ok_or(\"Input is not set\")?;\n        let transferred_value = self.value.ok_or(\"Value is not set\")?;\n        let transaction_hash = self.transaction_hash.ok_or(\"Transaction hash is not set\")?;\n        let address_generator = self\n            .address_generator\n            .ok_or(\"Address generator is not set\")?;\n        let chain_name = self.chain_name.ok_or(\"Chain name is not set\")?;\n        let block_time = self.block_time.ok_or(\"Block time is not set\")?;\n        let state_hash = self.state_hash.ok_or(\"State hash is not set\")?;\n        let parent_block_hash = self\n            .parent_block_hash\n            .ok_or(\"Parent block hash is not set\")?;\n        let block_height = self.block_height.ok_or(\"Block height is not set\")?;\n        Ok(ExecuteRequest {\n            initiator,\n            caller_key,\n            gas_limit,\n            execution_kind,\n            input,\n            transferred_value,\n            transaction_hash,\n            address_generator,\n            chain_name,\n            block_time,\n            state_hash,\n            parent_block_hash,\n            block_height,\n        })\n    }\n}\n\n/// Result of executing a Wasm contract.\n#[derive(Debug)]\npub struct ExecuteResult {\n    /// Error while executing Wasm: traps, memory access errors, etc.\n    pub host_error: Option<CallError>,\n    /// Output produced by the Wasm contract.\n    pub output: Option<Bytes>,\n    /// Gas usage.\n    pub gas_usage: GasUsage,\n    /// Effects produced by the execution.\n    pub effects: Effects,\n    /// Cache of tracking copy effects produced by the execution.\n    pub cache: TrackingCopyCache,\n    /// Messages produced by the execution.\n    pub messages: Messages,\n}\n\nimpl ExecuteResult {\n    /// Returns the host error.\n    pub fn effects(&self) -> &Effects {\n        &self.effects\n    }\n\n    pub fn into_effects(self) -> Effects {\n        self.effects\n    }\n\n    pub fn host_error(&self) -> Option<&CallError> {\n        self.host_error.as_ref()\n    }\n\n    pub fn output(&self) -> Option<&Bytes> {\n        self.output.as_ref()\n    }\n\n    pub fn gas_usage(&self) -> &GasUsage {\n        &self.gas_usage\n    }\n}\n\n/// Result of executing a Wasm contract on a state provider.\n#[derive(Debug)]\npub struct ExecuteWithProviderResult {\n    /// Error while executing Wasm: traps, memory access errors, etc.\n    pub host_error: Option<CallError>,\n    /// Output produced by the Wasm contract.\n    output: Option<Bytes>,\n    /// Gas usage.\n    gas_usage: GasUsage,\n    /// Effects produced by the execution.\n    effects: Effects,\n    /// Post state hash.\n    post_state_hash: Digest,\n    /// Messages produced by the execution.\n    messages: Messages,\n}\n\nimpl ExecuteWithProviderResult {\n    #[must_use]\n    pub fn new(\n        host_error: Option<CallError>,\n        output: Option<Bytes>,\n        gas_usage: GasUsage,\n        effects: Effects,\n        post_state_hash: Digest,\n        messages: Messages,\n    ) -> Self {\n        Self {\n            host_error,\n            output,\n            gas_usage,\n            effects,\n            post_state_hash,\n            messages,\n        }\n    }\n\n    pub fn output(&self) -> Option<&Bytes> {\n        self.output.as_ref()\n    }\n\n    pub fn gas_usage(&self) -> &GasUsage {\n        &self.gas_usage\n    }\n\n    pub fn effects(&self) -> &Effects {\n        &self.effects\n    }\n\n    #[must_use]\n    pub fn post_state_hash(&self) -> Digest {\n        self.post_state_hash\n    }\n\n    pub fn messages(&self) -> &Messages {\n        &self.messages\n    }\n}\n\n/// Target for Wasm execution.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum ExecutionKind {\n    /// Execute Wasm bytes directly.\n    SessionBytes(Bytes),\n    /// Execute a stored contract by its address.\n    Stored {\n        /// Address of the contract.\n        address: HashAddr,\n        /// Entry point to call.\n        entry_point: String,\n    },\n}\n\n/// Error that can occur during execution, before the Wasm virtual machine is involved.\n///\n/// This error is returned by the `execute` function. It contains information about the error that\n/// occurred.\n#[derive(Debug, Error)]\npub enum ExecuteError {\n    /// Error while preparing Wasm instance: export not found, validation, compilation errors, etc.\n    ///\n    /// No wasm was executed at this point.\n    #[error(\"Wasm error error: {0}\")]\n    WasmPreparation(#[from] WasmPreparationError),\n    /// Error while executing Wasm: traps, memory access errors, etc.\n    #[error(\"Internal host error: {0}\")]\n    InternalHost(#[from] InternalHostError),\n    #[error(\"Code not found\")]\n    CodeNotFound(HashAddr),\n}\n\n#[derive(Debug, Error)]\npub enum ExecuteWithProviderError {\n    /// Error while accessing global state.\n    #[error(\"Global state error: {0}\")]\n    GlobalState(#[from] GlobalStateError),\n    #[error(transparent)]\n    Execute(#[from] ExecuteError),\n}\n\n/// Executor trait.\n///\n/// An executor is responsible for executing Wasm contracts. This implies that the executor is able\n/// to prepare Wasm instances, execute them, and handle errors that occur during execution.\n///\n/// Trait bounds also implying that the executor has to support interior mutability, as it may need\n/// to update its internal state during execution of a single or a chain of multiple contracts.\npub trait Executor: Clone + Send {\n    fn execute<R: GlobalStateReader + 'static>(\n        &self,\n        tracking_copy: TrackingCopy<R>,\n        execute_request: ExecuteRequest,\n    ) -> Result<ExecuteResult, ExecuteError>;\n}\n"
  },
  {
    "path": "executor/wasm_interface/src/lib.rs",
    "content": "pub mod executor;\n\nuse bytes::Bytes;\nuse executor::ExecuteError;\nuse thiserror::Error;\n\nuse casper_executor_wasm_common::{\n    error::{CallError, TrapCode, CALLEE_SUCCEEDED},\n    flags::ReturnFlags,\n};\n\n/// Interface version for the Wasm host functions.\n///\n/// This defines behavior of the Wasm execution environment i.e. the host behavior, serialiation,\n/// etc.\n///\n/// Only the highest `interface_version_X` is taken from the imports table which means Wasm has to\n/// support X-1, X-2 versions as well.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\npub struct InterfaceVersion(u32);\n\nimpl From<u32> for InterfaceVersion {\n    fn from(value: u32) -> Self {\n        InterfaceVersion(value)\n    }\n}\n\npub type HostResult = Result<(), CallError>;\n\n/// Converts a host result into a u32.\n#[must_use]\npub fn u32_from_host_result(result: HostResult) -> u32 {\n    match result {\n        Ok(()) => CALLEE_SUCCEEDED,\n        Err(host_error) => host_error.into_u32(),\n    }\n}\n\n/// Errors that can occur when resolving imports.\n#[derive(Debug, Error)]\npub enum Resolver {\n    #[error(\"export {name} not found.\")]\n    Export { name: String },\n    /// Trying to call a function pointer by index.\n    #[error(\"function pointer {index} not found.\")]\n    Table { index: u32 },\n}\n\n#[derive(Error, Debug)]\npub enum ExportError {\n    /// An error than occurs when the exported type and the expected type\n    /// are incompatible.\n    #[error(\"incompatible type\")]\n    IncompatibleType,\n    /// This error arises when an export is missing\n    #[error(\"missing export {0}\")]\n    Missing(String),\n}\n\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum MemoryError {\n    /// Memory access is outside heap bounds.\n    #[error(\"memory access out of bounds\")]\n    HeapOutOfBounds,\n    /// Address calculation overflow.\n    #[error(\"address calculation overflow\")]\n    Overflow,\n    /// String is not valid UTF-8.\n    #[error(\"string is not valid utf-8\")]\n    NonUtf8String,\n}\n\n#[derive(Error, Debug)]\n/// Represents a catastrophic internal host error.\npub enum InternalHostError {\n    #[error(\"type conversion failure\")]\n    TypeConversion,\n    #[error(\"contract already exists\")]\n    ContractAlreadyExists,\n    #[error(\"tracking copy error\")]\n    TrackingCopy,\n    #[error(\"failed building execution request\")]\n    ExecuteRequestBuildFailure,\n    #[error(\"unexpected entity kind\")]\n    UnexpectedEntityKind,\n    #[error(\"failed reading total balance\")]\n    TotalBalanceReadFailure,\n    #[error(\"total balance exceeded u64::MAX\")]\n    TotalBalanceOverflow,\n    #[error(\"remaining gas exceeded the gas limit\")]\n    RemainingGasExceedsGasLimit,\n    #[error(\"account not found under key\")]\n    AccountRecordNotFound,\n    #[error(\"message did not have a checksum\")]\n    MessageChecksumMissing,\n}\n\n/// The outcome of a call.\n/// We can fold all errors into this type and return it from the host functions and remove Outcome\n/// type.\n#[derive(Debug, Error)]\npub enum VMError {\n    #[error(\"Return 0x{flags:?} {data:?}\")]\n    Return {\n        flags: ReturnFlags,\n        data: Option<Bytes>,\n    },\n    #[error(\"export: {0}\")]\n    Export(ExportError),\n    #[error(\"Out of gas\")]\n    OutOfGas,\n    /// Error while executing Wasm: traps, memory access errors, etc.\n    ///\n    /// NOTE: for supporting multiple different backends we may want to abstract this a bit and\n    /// extract memory access errors, trap codes, and unify error reporting.\n    #[error(\"Trap: {0}\")]\n    Trap(TrapCode),\n    #[error(\"Internal host error\")]\n    Internal(#[from] InternalHostError),\n    #[error(\"Execute error: {0}\")]\n    Execute(#[from] ExecuteError),\n}\n\nimpl VMError {\n    /// Returns the output data if the error is a `Return` error.\n    pub fn into_output_data(self) -> Option<Bytes> {\n        match self {\n            VMError::Return { data, .. } => data,\n            _ => None,\n        }\n    }\n}\n\n/// Result of a VM operation.\npub type VMResult<T> = Result<T, VMError>;\n\n/// Configuration for the Wasm engine.\n#[derive(Clone, Debug)]\npub struct Config {\n    gas_limit: u64,\n    memory_limit: u32,\n}\n\nimpl Config {\n    #[must_use]\n    pub fn gas_limit(&self) -> u64 {\n        self.gas_limit\n    }\n\n    #[must_use]\n    pub fn memory_limit(&self) -> u32 {\n        self.memory_limit\n    }\n}\n\n/// Configuration for the Wasm engine.\n#[derive(Clone, Debug, Default)]\npub struct ConfigBuilder {\n    gas_limit: Option<u64>,\n    /// Memory limit in pages.\n    memory_limit: Option<u32>,\n}\n\nimpl ConfigBuilder {\n    /// Create a new configuration builder.\n    #[must_use]\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    /// Gas limit in units.\n    #[must_use]\n    pub fn with_gas_limit(mut self, gas_limit: u64) -> Self {\n        self.gas_limit = Some(gas_limit);\n        self\n    }\n\n    /// Memory limit denominated in pages.\n    #[must_use]\n    pub fn with_memory_limit(mut self, memory_limit: u32) -> Self {\n        self.memory_limit = Some(memory_limit);\n        self\n    }\n\n    /// Build the configuration.\n    #[must_use]\n    pub fn build(self) -> Config {\n        let gas_limit = self.gas_limit.expect(\"Required field missing: gas_limit\");\n        let memory_limit = self\n            .memory_limit\n            .expect(\"Required field missing: memory_limit\");\n        Config {\n            gas_limit,\n            memory_limit,\n        }\n    }\n}\n\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\npub enum MeteringPoints {\n    Remaining(u64),\n    Exhausted,\n}\n\nimpl MeteringPoints {\n    pub fn try_into_remaining(self) -> Result<u64, Self> {\n        if let Self::Remaining(v) = self {\n            Ok(v)\n        } else {\n            Err(self)\n        }\n    }\n}\n\n/// An abstraction over the 'caller' object of a host function that works for any Wasm VM.\n///\n/// This allows access for important instances such as the context object that was passed to the\n/// instance, wasm linear memory access, etc.\npub trait Caller {\n    type Context;\n\n    fn context(&self) -> &Self::Context;\n    fn context_mut(&mut self) -> &mut Self::Context;\n    /// Returns currently running *unmodified* bytecode.\n    fn bytecode(&self) -> Bytes;\n\n    /// Check if an export is present in the module.\n    fn has_export(&self, name: &str) -> bool;\n\n    fn memory_read(&self, offset: u32, size: usize) -> VMResult<Vec<u8>> {\n        let mut vec = vec![0; size];\n        self.memory_read_into(offset, &mut vec)?;\n        Ok(vec)\n    }\n    fn memory_read_into(&self, offset: u32, output: &mut [u8]) -> VMResult<()>;\n    fn memory_write(&self, offset: u32, data: &[u8]) -> VMResult<()>;\n    /// Allocates memory inside the Wasm VM by calling an export.\n    ///\n    /// Error is a type-erased error coming from the VM itself.\n    fn alloc(&mut self, idx: u32, size: usize, ctx: u32) -> VMResult<u32>;\n    /// Returns the amount of gas used.\n    fn gas_consumed(&mut self) -> MeteringPoints;\n    /// Set the amount of gas used.\n    fn consume_gas(&mut self, value: u64) -> VMResult<()>;\n}\n\n#[derive(Debug, Error)]\npub enum WasmPreparationError {\n    #[error(\"Missing export {0}\")]\n    MissingExport(String),\n    #[error(\"Compile error: {0}\")]\n    Compile(String),\n    #[error(\"Memory instantiation error: {0}\")]\n    Memory(String),\n    #[error(\"Instantiation error: {0}\")]\n    Instantiation(String),\n}\n\n#[derive(Debug)]\npub struct GasUsage {\n    /// The amount of gas used by the execution.\n    gas_limit: u64,\n    /// The amount of gas remaining after the execution.\n    remaining_points: u64,\n}\n\nimpl GasUsage {\n    #[must_use]\n    pub fn new(gas_limit: u64, remaining_points: u64) -> Self {\n        GasUsage {\n            gas_limit,\n            remaining_points,\n        }\n    }\n\n    #[must_use]\n    pub fn gas_spent(&self) -> u64 {\n        debug_assert!(self.remaining_points <= self.gas_limit);\n        self.gas_limit - self.remaining_points\n    }\n\n    #[must_use]\n    pub fn gas_limit(&self) -> u64 {\n        self.gas_limit\n    }\n\n    #[must_use]\n    pub fn remaining_points(&self) -> u64 {\n        self.remaining_points\n    }\n}\n\n/// A trait that represents a Wasm instance.\npub trait WasmInstance {\n    type Context;\n\n    fn call_export(&mut self, name: &str) -> (Result<(), VMError>, GasUsage);\n    fn teardown(self) -> Self::Context;\n}\n"
  },
  {
    "path": "executor/wasmer_backend/Cargo.toml",
    "content": "[package]\nname = \"casper-executor-wasmer-backend\"\nversion = \"0.1.3\"\nedition = \"2021\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndescription = \"Casper executor interface package\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/executor/wasm_interface\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbytes = \"1.10\"\ncasper-executor-wasm-common = { version = \"0.1.3\", path = \"../wasm_common\" }\ncasper-executor-wasm-interface = { version = \"0.1.3\", path = \"../wasm_interface\" }\ncasper-executor-wasm-host = { version = \"0.1.0\", path = \"../wasm_host\" }\ncasper-storage = { version = \"5.0.0\", path = \"../../storage\" }\ncasper-contract-sdk-sys = { version = \"0.1.3\", path = \"../../smart_contracts/sdk_sys\" }\ncasper-types = { version = \"7.0.0\", path = \"../../types\" }\nregex = \"1.11\"\nwasmer = { version = \"5.0.4\", default-features = false, features = [\n    \"singlepass\",\n] }\nwasmer-compiler-singlepass = \"5.0.4\"\nwasmer-middlewares = \"5.0.4\"\nwasmer-types = \"5.0.4\"\ntracing = \"0.1.41\"\n\n[dev-dependencies]\nwat = \"1.227.1\"\n"
  },
  {
    "path": "executor/wasmer_backend/src/imports.rs",
    "content": "use casper_executor_wasm_interface::{executor::Executor, VMError, VMResult};\nuse casper_storage::global_state::GlobalStateReader;\nuse tracing::warn;\nuse wasmer::{FunctionEnv, FunctionEnvMut, Imports, Store};\n\nuse casper_contract_sdk_sys::for_each_host_function;\n\nuse crate::WasmerEnv;\n\n/// A trait for converting a C ABI type declaration to a type that is understandable by wasm32\n/// target (and wasmer, by a consequence).\n#[allow(dead_code)]\npub(crate) trait WasmerConvert: Sized {\n    type Output;\n}\n\nimpl WasmerConvert for i32 {\n    type Output = i32;\n}\n\nimpl WasmerConvert for u32 {\n    type Output = u32;\n}\nimpl WasmerConvert for u64 {\n    type Output = u64;\n}\n\nimpl WasmerConvert for usize {\n    type Output = u32;\n}\n\nimpl<T> WasmerConvert for *const T {\n    type Output = u32; // Pointers are 32-bit addressable\n}\n\nimpl<T> WasmerConvert for *mut T {\n    type Output = u32; // Pointers are 32-bit addressable\n}\n\nimpl<Arg1: WasmerConvert, Arg2: WasmerConvert, Ret: WasmerConvert> WasmerConvert\n    for extern \"C\" fn(Arg1, Arg2) -> Ret\n{\n    type Output = u32; // Function pointers are 32-bit addressable\n}\n\nconst DEFAULT_ENV_NAME: &str = \"env\";\n\n/// This function will populate imports object with all host functions that are defined.\npub(crate) fn generate_casper_imports<S: GlobalStateReader + 'static, E: Executor + 'static>(\n    store: &mut Store,\n    env: &FunctionEnv<WasmerEnv<S, E>>,\n) -> Imports {\n    let mut imports = Imports::new();\n\n    macro_rules! visit_host_function {\n        (@convert_ret $ret:ty) => {\n            <$ret as $crate::imports::WasmerConvert>::Output\n        };\n        (@convert_ret) => { () };\n        ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => {\n            $(\n                imports.define($crate::imports::DEFAULT_ENV_NAME, stringify!($name), wasmer::Function::new_typed_with_env(\n                    store,\n                    env,\n                    |\n                        env: FunctionEnvMut<WasmerEnv<S, E>>,\n                        // List all types and statically mapped C types into wasm types\n                        $($($arg: <$argty as $crate::imports::WasmerConvert>::Output,)*)?\n                    | -> VMResult<visit_host_function!(@convert_ret $($ret)?)> {\n                        let wasmer_caller = $crate::WasmerCaller { env };\n\n                        // Dispatch to the actual host function. This also ensures that the return type of host function impl has expected type.\n                        let result: VMResult< visit_host_function!(@convert_ret $($ret)?) > = casper_executor_wasm_host::host::$name(wasmer_caller, $($($arg,)*)?);\n\n                        match result {\n                            Ok(ret) => Ok(ret),\n                            Err(error) => {\n                                warn!(\n                                    \"Host function {} failed with error: {error:?}\",\n                                    stringify!($name),\n                                );\n\n                                if let VMError::Internal(internal) = error {\n                                    panic!(\"InternalHostError {internal:?}; aborting\");\n                                }\n\n                                Err(error)\n                            }\n                        }\n                    }\n                ));\n            )*\n        }\n    }\n    for_each_host_function!(visit_host_function);\n\n    imports\n}\n"
  },
  {
    "path": "executor/wasmer_backend/src/lib.rs",
    "content": "pub(crate) mod imports;\npub(crate) mod middleware;\n\nuse std::{\n    collections::BinaryHeap,\n    sync::{Arc, LazyLock, Weak},\n};\n\nuse bytes::Bytes;\nuse casper_executor_wasm_common::error::TrapCode;\nuse casper_executor_wasm_host::context::Context;\nuse casper_executor_wasm_interface::{\n    executor::Executor, Caller, Config, ExportError, GasUsage, InterfaceVersion, MeteringPoints,\n    VMError, VMResult, WasmInstance, WasmPreparationError,\n};\nuse casper_storage::global_state::GlobalStateReader;\nuse middleware::{\n    gas_metering,\n    gatekeeper::{Gatekeeper, GatekeeperConfig},\n};\nuse regex::Regex;\nuse wasmer::{\n    AsStoreMut, AsStoreRef, CompilerConfig, Engine, Function, FunctionEnv, FunctionEnvMut,\n    Instance, Memory, MemoryView, Module, RuntimeError, Store, StoreMut, Table, TypedFunction,\n};\nuse wasmer_compiler_singlepass::Singlepass;\nuse wasmer_middlewares::metering;\n\nfn from_wasmer_memory_access_error(error: wasmer::MemoryAccessError) -> VMError {\n    let trap_code = match error {\n        wasmer::MemoryAccessError::HeapOutOfBounds | wasmer::MemoryAccessError::Overflow => {\n            // As according to Wasm spec section `Memory Instructions` any access to memory that\n            // is out of bounds of the memory's current size is a trap. Reference: https://webassembly.github.io/spec/core/syntax/instructions.html#memory-instructions\n            TrapCode::MemoryOutOfBounds\n        }\n        wasmer::MemoryAccessError::NonUtf8String => {\n            // This can happen only when using wasmer's utf8 reading routines which we don't\n            // need.\n            unreachable!(\"NonUtf8String\")\n        }\n        _ => {\n            // All errors are handled and converted to a trap code, but we have to add this as\n            // wasmer's errors are #[non_exhaustive]\n            unreachable!(\"Unexpected error: {error:?}\")\n        }\n    };\n    VMError::Trap(trap_code)\n}\n\nfn from_wasmer_trap_code(value: wasmer_types::TrapCode) -> TrapCode {\n    match value {\n        wasmer_types::TrapCode::StackOverflow => TrapCode::StackOverflow,\n        wasmer_types::TrapCode::HeapAccessOutOfBounds => TrapCode::MemoryOutOfBounds,\n        wasmer_types::TrapCode::HeapMisaligned => {\n            unreachable!(\"Atomic operations are not supported\")\n        }\n        wasmer_types::TrapCode::TableAccessOutOfBounds => TrapCode::TableAccessOutOfBounds,\n        wasmer_types::TrapCode::IndirectCallToNull => TrapCode::IndirectCallToNull,\n        wasmer_types::TrapCode::BadSignature => TrapCode::BadSignature,\n        wasmer_types::TrapCode::IntegerOverflow => TrapCode::IntegerOverflow,\n        wasmer_types::TrapCode::IntegerDivisionByZero => TrapCode::IntegerDivisionByZero,\n        wasmer_types::TrapCode::BadConversionToInteger => TrapCode::BadConversionToInteger,\n        wasmer_types::TrapCode::UnreachableCodeReached => TrapCode::UnreachableCodeReached,\n        wasmer_types::TrapCode::UnalignedAtomic => {\n            todo!(\"Atomic memory extension is not supported\")\n        }\n    }\n}\n\nfn from_wasmer_export_error(error: wasmer::ExportError) -> VMError {\n    let export_error = match error {\n        wasmer::ExportError::IncompatibleType => ExportError::IncompatibleType,\n        wasmer::ExportError::Missing(export_name) => ExportError::Missing(export_name),\n    };\n    VMError::Export(export_error)\n}\n\n#[derive(Default)]\npub struct WasmerEngine(());\n\nimpl WasmerEngine {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn instantiate<T: Into<Bytes>, S: GlobalStateReader + 'static, E: Executor + 'static>(\n        &self,\n        wasm_bytes: T,\n        context: Context<S, E>,\n        config: Config,\n    ) -> Result<impl WasmInstance<Context = Context<S, E>>, WasmPreparationError> {\n        WasmerInstance::from_wasm_bytes(wasm_bytes, context, config)\n    }\n}\n\nstruct WasmerEnv<S: GlobalStateReader, E: Executor> {\n    context: Context<S, E>,\n    instance: Weak<Instance>,\n    bytecode: Bytes,\n    exported_runtime: Option<ExportedRuntime>,\n    interface_version: InterfaceVersion,\n}\n\npub(crate) struct WasmerCaller<'a, S: GlobalStateReader, E: Executor> {\n    env: FunctionEnvMut<'a, WasmerEnv<S, E>>,\n}\n\nimpl<S: GlobalStateReader + 'static, E: Executor + 'static> WasmerCaller<'_, S, E> {\n    fn with_memory<T>(&self, f: impl FnOnce(MemoryView<'_>) -> T) -> T {\n        let mem = &self.env.data().exported_runtime().memory;\n        let binding = self.env.as_store_ref();\n        let view = mem.view(&binding);\n        f(view)\n    }\n\n    fn with_instance<Ret>(&self, f: impl FnOnce(&Instance) -> Ret) -> Ret {\n        let instance = self.env.data().instance.upgrade().expect(\"Valid instance\");\n        f(&instance)\n    }\n\n    fn with_store_and_instance<Ret>(&mut self, f: impl FnOnce(StoreMut, &Instance) -> Ret) -> Ret {\n        let (data, store) = self.env.data_and_store_mut();\n        let instance = data.instance.upgrade().expect(\"Valid instance\");\n        f(store, &instance)\n    }\n\n    /// Returns the amount of gas used.\n    fn get_remaining_points(&mut self) -> MeteringPoints {\n        self.with_store_and_instance(|mut store, instance| {\n            let metering_points = metering::get_remaining_points(&mut store, instance);\n            match metering_points {\n                metering::MeteringPoints::Remaining(points) => MeteringPoints::Remaining(points),\n                metering::MeteringPoints::Exhausted => MeteringPoints::Exhausted,\n            }\n        })\n    }\n    /// Set the amount of gas used.\n    fn set_remaining_points(&mut self, new_value: u64) {\n        self.with_store_and_instance(|mut store, instance| {\n            metering::set_remaining_points(&mut store, instance, new_value);\n        })\n    }\n}\n\nimpl<S: GlobalStateReader + 'static, E: Executor + 'static> Caller for WasmerCaller<'_, S, E> {\n    type Context = Context<S, E>;\n\n    fn memory_write(&self, offset: u32, data: &[u8]) -> Result<(), VMError> {\n        self.with_memory(|mem| mem.write(offset.into(), data))\n            .map_err(from_wasmer_memory_access_error)\n    }\n\n    fn context(&self) -> &Context<S, E> {\n        &self.env.data().context\n    }\n\n    fn context_mut(&mut self) -> &mut Context<S, E> {\n        &mut self.env.data_mut().context\n    }\n\n    fn memory_read_into(&self, offset: u32, output: &mut [u8]) -> Result<(), VMError> {\n        self.with_memory(|mem| mem.read(offset.into(), output))\n            .map_err(from_wasmer_memory_access_error)\n    }\n\n    fn alloc(&mut self, idx: u32, size: usize, ctx: u32) -> VMResult<u32> {\n        let _interface_version = self.env.data().interface_version;\n\n        let (data, mut store) = self.env.data_and_store_mut();\n        let value = data\n            .exported_runtime()\n            .exported_table\n            .as_ref()\n            .expect(\"should have table exported\") // TODO: if theres no table then no function pointer is stored in the wasm blob -\n            // probably safe\n            .get(&mut store.as_store_mut(), idx)\n            .expect(\"has entry in the table\"); // TODO: better error handling - pass 0 as nullptr?\n        let funcref = value.funcref().expect(\"is funcref\");\n        let valid_funcref = funcref.as_ref().expect(\"valid funcref\");\n        let alloc_callback: TypedFunction<(u32, u32), u32> = valid_funcref\n            .typed(&store)\n            .unwrap_or_else(|error| panic!(\"{error:?}\"));\n        let ptr = alloc_callback\n            .call(&mut store.as_store_mut(), size.try_into().unwrap(), ctx)\n            .map_err(handle_wasmer_runtime_error)?;\n        Ok(ptr)\n    }\n\n    fn bytecode(&self) -> Bytes {\n        self.env.data().bytecode.clone()\n    }\n\n    /// Returns the amount of gas used.\n    #[inline]\n    fn gas_consumed(&mut self) -> MeteringPoints {\n        self.get_remaining_points()\n    }\n\n    /// Set the amount of gas used.\n    ///\n    /// This method will cause the VM engine to stop in case remaining gas points are depleted.\n    fn consume_gas(&mut self, amount: u64) -> VMResult<()> {\n        let gas_consumed = self.gas_consumed();\n        match gas_consumed {\n            MeteringPoints::Remaining(remaining_points) => {\n                let remaining_points = remaining_points\n                    .checked_sub(amount)\n                    .ok_or(VMError::OutOfGas)?;\n                self.set_remaining_points(remaining_points);\n                Ok(())\n            }\n            MeteringPoints::Exhausted => Err(VMError::OutOfGas),\n        }\n    }\n\n    #[inline]\n    fn has_export(&self, name: &str) -> bool {\n        self.with_instance(|instance| instance.exports.contains(name))\n    }\n}\n\nimpl<S: GlobalStateReader, E: Executor> WasmerEnv<S, E> {\n    fn new(context: Context<S, E>, code: Bytes, interface_version: InterfaceVersion) -> Self {\n        Self {\n            context,\n            instance: Weak::new(),\n            exported_runtime: None,\n            bytecode: code,\n            interface_version,\n        }\n    }\n    pub(crate) fn exported_runtime(&self) -> &ExportedRuntime {\n        self.exported_runtime\n            .as_ref()\n            .expect(\"Valid instance of exported runtime\")\n    }\n}\n\n/// Container for Wasm-provided exports such as alloc, dealloc, etc.\n///\n/// Let's call it a \"minimal runtime\" that is expected to exist inside a Wasm.\n#[derive(Clone)]\npub(crate) struct ExportedRuntime {\n    pub(crate) memory: Memory,\n    pub(crate) exported_table: Option<Table>,\n}\n\npub(crate) struct WasmerInstance<S: GlobalStateReader, E: Executor + 'static> {\n    instance: Arc<Instance>,\n    env: FunctionEnv<WasmerEnv<S, E>>,\n    store: Store,\n    config: Config,\n}\n\nfn handle_wasmer_runtime_error(error: RuntimeError) -> VMError {\n    match error.downcast::<VMError>() {\n        Ok(vm_error) => vm_error,\n        Err(wasmer_runtime_error) => {\n            // NOTE: Can this be other variant than VMError and trap? This may indicate a bug in\n            // our code.\n            let wasmer_trap_code = wasmer_runtime_error.to_trap().expect(\"Trap code\");\n            VMError::Trap(from_wasmer_trap_code(wasmer_trap_code))\n        }\n    }\n}\n\nimpl<S, E> WasmerInstance<S, E>\nwhere\n    S: GlobalStateReader + 'static,\n    E: Executor + 'static,\n{\n    pub(crate) fn call_export(&mut self, name: &str) -> Result<(), VMError> {\n        let exported_call_func: TypedFunction<(), ()> = self\n            .instance\n            .exports\n            .get_typed_function(&self.store, name)\n            .map_err(from_wasmer_export_error)?;\n\n        exported_call_func\n            .call(&mut self.store.as_store_mut())\n            .map_err(handle_wasmer_runtime_error)?;\n        Ok(())\n    }\n\n    pub(crate) fn from_wasm_bytes<C: Into<Bytes>>(\n        wasm_bytes: C,\n        context: Context<S, E>,\n        config: Config,\n    ) -> Result<Self, WasmPreparationError> {\n        let engine = {\n            let mut singlepass_compiler = Singlepass::new();\n            let gatekeeper_config = GatekeeperConfig::default();\n            singlepass_compiler.push_middleware(Arc::new(Gatekeeper::new(gatekeeper_config)));\n            singlepass_compiler\n                .push_middleware(gas_metering::gas_metering_middleware(config.gas_limit()));\n            singlepass_compiler\n        };\n\n        let engine = Engine::from(engine);\n\n        let wasm_bytes: Bytes = wasm_bytes.into();\n\n        let module = Module::new(&engine, &wasm_bytes)\n            .map_err(|error| WasmPreparationError::Compile(error.to_string()))?;\n\n        let mut store = Store::new(engine);\n\n        let wasmer_env = WasmerEnv::new(context, wasm_bytes, InterfaceVersion::from(1u32));\n        let function_env = FunctionEnv::new(&mut store, wasmer_env);\n\n        let memory = Memory::new(\n            &mut store,\n            wasmer_types::MemoryType {\n                minimum: wasmer_types::Pages(17),\n                maximum: None,\n                shared: false,\n            },\n        )\n        .map_err(|error| WasmPreparationError::Memory(error.to_string()))?;\n\n        let imports = {\n            let mut imports = imports::generate_casper_imports(&mut store, &function_env);\n\n            imports.define(\"env\", \"memory\", memory.clone());\n\n            imports.define(\n                \"env\",\n                \"interface_version_1\",\n                Function::new_typed(&mut store, || {}),\n            );\n\n            imports\n        };\n\n        // TODO: Deal with \"start\" section that executes actual Wasm - test, measure gas, etc. ->\n        // Instance::new may fail with RuntimError\n\n        let instance = {\n            let instance = Instance::new(&mut store, &module, &imports)\n                .map_err(|error| WasmPreparationError::Instantiation(error.to_string()))?;\n\n            // We don't necessarily need atomic counter. Arc's purpose is to be able to retrieve a\n            // Weak reference to the instance to be able to invoke recursive calls to the wasm\n            // itself from within a host function implementation.\n\n            // instance.exports.get_table(name)\n            Arc::new(instance)\n        };\n\n        let interface_version = {\n            static RE: LazyLock<Regex> =\n                LazyLock::new(|| Regex::new(r\"^interface_version_(?P<version>\\d+)$\").unwrap());\n\n            let mut interface_versions = BinaryHeap::new();\n            for import in module.imports() {\n                if import.module() == \"env\" {\n                    if let Some(caps) = RE.captures(import.name()) {\n                        let version = &caps[\"version\"];\n                        let version: u32 = version.parse().expect(\"valid number\"); // SAFETY: regex guarantees this is a number, and imports table guarantees\n                                                                                   // limited set of values.\n                        interface_versions.push(InterfaceVersion::from(version));\n                    }\n                }\n            }\n\n            // Get the highest one assuming given Wasm can support all previous interface versions.\n            interface_versions.pop()\n        };\n\n        // TODO: get first export of type table as some compilers generate different names (i.e.\n        // rust __indirect_function_table, assemblyscript `table` etc). There's only one table\n        // allowed in a valid module.\n        let table = match instance.exports.get_table(\"__indirect_function_table\") {\n            Ok(table) => Some(table.clone()),\n            Err(error @ wasmer::ExportError::IncompatibleType) => {\n                return Err(WasmPreparationError::MissingExport(error.to_string()))\n            }\n            Err(wasmer::ExportError::Missing(_)) => None,\n        };\n\n        {\n            let function_env_mut = function_env.as_mut(&mut store);\n            function_env_mut.instance = Arc::downgrade(&instance);\n            function_env_mut.exported_runtime = Some(ExportedRuntime {\n                memory,\n                exported_table: table,\n            });\n            if let Some(interface_version) = interface_version {\n                function_env_mut.interface_version = interface_version;\n            }\n        }\n\n        Ok(Self {\n            instance,\n            env: function_env,\n            store,\n            config,\n        })\n    }\n}\n\nimpl<S, E> WasmInstance for WasmerInstance<S, E>\nwhere\n    S: GlobalStateReader + 'static,\n    E: Executor + 'static,\n{\n    type Context = Context<S, E>;\n    fn call_export(&mut self, name: &str) -> (Result<(), VMError>, GasUsage) {\n        let vm_result = self.call_export(name);\n        let remaining_points = metering::get_remaining_points(&mut self.store, &self.instance);\n        match remaining_points {\n            metering::MeteringPoints::Remaining(remaining_points) => {\n                let gas_usage = GasUsage::new(self.config.gas_limit(), remaining_points);\n                (vm_result, gas_usage)\n            }\n            metering::MeteringPoints::Exhausted => {\n                let gas_usage = GasUsage::new(self.config.gas_limit(), 0);\n                (Err(VMError::OutOfGas), gas_usage)\n            }\n        }\n    }\n\n    /// Consume instance object and retrieve the [`Context`] object.\n    fn teardown(self) -> Context<S, E> {\n        let WasmerInstance { env, mut store, .. } = self;\n\n        let mut env_mut = env.into_mut(&mut store);\n\n        let data = env_mut.data_mut();\n\n        // NOTE: There must be a better way than re-creating the object based on consumed fields.\n\n        Context {\n            initiator: data.context.initiator,\n            caller: data.context.caller,\n            callee: data.context.callee,\n            config: data.context.config,\n            storage_costs: data.context.storage_costs,\n            transferred_value: data.context.transferred_value,\n            tracking_copy: data.context.tracking_copy.fork2(),\n            executor: data.context.executor.clone(),\n            transaction_hash: data.context.transaction_hash,\n            address_generator: Arc::clone(&data.context.address_generator),\n            chain_name: data.context.chain_name.clone(),\n            input: data.context.input.clone(),\n            block_time: data.context.block_time,\n            message_limits: data.context.message_limits,\n        }\n    }\n}\n"
  },
  {
    "path": "executor/wasmer_backend/src/middleware/gas_metering.rs",
    "content": "use std::sync::Arc;\n\nuse wasmer::{wasmparser::Operator, ModuleMiddleware};\nuse wasmer_middlewares::Metering;\n\n/// Calculated based on the benchmark results and fitted for approx ~1000 CSPR of computation and\n/// 16s maximum computation time.\nconst MULTIPLIER: u64 = 16;\n\n/// The scaling factor for the cost function is used to saturate the computation time for the\n/// maximum limits allocated. Multiplier derived from benchmarks itself is not accurate enough due\n/// to non-linear overhead of a gas metering on real world code. Fixed scaling factor is used\n/// to adjust the multiplier to counter the effects of metering overhead. This is validated with\n/// real world compute-intensive Wasm.\nconst SCALING_FACTOR: u64 = 2;\n\nfn cycles(operator: &Operator) -> u64 {\n    match operator {\n        Operator::I32Const { .. } => 1,\n        Operator::I64Const { .. } => 1,\n        Operator::F32Const { .. } => 1,\n        Operator::F64Const { .. } => 1,\n        Operator::I32Clz => 1,\n        Operator::I32Ctz => 1,\n        Operator::I32Popcnt => 1,\n        Operator::I64Clz => 1,\n        Operator::I64Ctz => 1,\n        Operator::I64Popcnt => 1,\n        Operator::F32Abs => 1,\n        Operator::F32Neg => 1,\n        Operator::F64Abs => 2,\n        Operator::F64Neg => 1,\n        Operator::F32Ceil => 4,\n        Operator::F32Floor => 4,\n        Operator::F32Trunc => 3,\n        Operator::F32Nearest => 3,\n        Operator::F64Ceil => 4,\n        Operator::F64Floor => 4,\n        Operator::F64Trunc => 4,\n        Operator::F64Nearest => 4,\n        Operator::F32Sqrt => 4,\n        Operator::F64Sqrt => 8,\n        Operator::I32Add => 1,\n        Operator::I32Sub => 1,\n        Operator::I32Mul => 1,\n        Operator::I32And => 1,\n        Operator::I32Or => 1,\n        Operator::I32Xor => 1,\n        Operator::I32Shl => 1,\n        Operator::I32ShrS => 1,\n        Operator::I32ShrU => 1,\n        Operator::I32Rotl => 1,\n        Operator::I32Rotr => 1,\n        Operator::I64Add => 1,\n        Operator::I64Sub => 1,\n        Operator::I64Mul => 1,\n        Operator::I64And => 1,\n        Operator::I64Or => 1,\n        Operator::I64Xor => 1,\n        Operator::I64Shl => 1,\n        Operator::I64ShrS => 1,\n        Operator::I64ShrU => 1,\n        Operator::I64Rotl => 1,\n        Operator::I64Rotr => 1,\n        Operator::I32DivS => 18,\n        Operator::I32DivU => 18,\n        Operator::I32RemS => 19,\n        Operator::I32RemU => 19,\n        Operator::I64DivS => 19,\n        Operator::I64DivU => 18,\n        Operator::I64RemS => 18,\n        Operator::I64RemU => 18,\n        Operator::F32Add => 3,\n        Operator::F32Sub => 4,\n        Operator::F32Mul => 3,\n        Operator::F64Add => 4,\n        Operator::F64Sub => 4,\n        Operator::F64Mul => 4,\n        Operator::F32Div => 5,\n        Operator::F64Div => 4,\n        Operator::F32Min => 24,\n        Operator::F32Max => 21,\n        Operator::F64Min => 24,\n        Operator::F64Max => 23,\n        Operator::F32Copysign => 2,\n        Operator::F64Copysign => 4,\n        Operator::I32Eqz => 1,\n        Operator::I64Eqz => 2,\n        Operator::I32Eq => 1,\n        Operator::I32Ne => 1,\n        Operator::I32LtS => 1,\n        Operator::I32LtU => 2,\n        Operator::I32GtS => 1,\n        Operator::I32GtU => 2,\n        Operator::I32LeS => 2,\n        Operator::I32LeU => 1,\n        Operator::I32GeS => 1,\n        Operator::I32GeU => 1,\n        Operator::I64Eq => 1,\n        Operator::I64Ne => 2,\n        Operator::I64LtS => 1,\n        Operator::I64LtU => 1,\n        Operator::I64GtS => 1,\n        Operator::I64GtU => 2,\n        Operator::I64LeS => 1,\n        Operator::I64LeU => 1,\n        Operator::I64GeS => 2,\n        Operator::I64GeU => 1,\n        Operator::F32Eq => 2,\n        Operator::F32Ne => 2,\n        Operator::F64Eq => 2,\n        Operator::F64Ne => 2,\n        Operator::F32Lt => 2,\n        Operator::F32Gt => 2,\n        Operator::F32Le => 2,\n        Operator::F32Ge => 2,\n        Operator::F64Lt => 2,\n        Operator::F64Gt => 2,\n        Operator::F64Le => 2,\n        Operator::F64Ge => 2,\n        Operator::I32Extend8S => 1,\n        Operator::I32Extend16S => 1,\n        Operator::I64Extend8S => 1,\n        Operator::I64Extend16S => 1,\n        Operator::F32ConvertI32S => 2,\n        Operator::F32ConvertI64S => 2,\n        Operator::F64ConvertI32S => 2,\n        Operator::F64ConvertI64S => 2,\n        Operator::I64Extend32S => 1,\n        Operator::I32WrapI64 => 1,\n        Operator::I64ExtendI32S => 1,\n        Operator::I64ExtendI32U => 1,\n        Operator::F32DemoteF64 => 1,\n        Operator::F64PromoteF32 => 2,\n        Operator::F32ReinterpretI32 => 1,\n        Operator::F64ReinterpretI64 => 1,\n        Operator::F32ConvertI32U => 2,\n        Operator::F64ConvertI32U => 2,\n        Operator::I32ReinterpretF32 => 1,\n        Operator::I64ReinterpretF64 => 1,\n        Operator::I32TruncF32S => 19,\n        Operator::I32TruncF32U => 17,\n        Operator::I32TruncF64S => 19,\n        Operator::I32TruncF64U => 18,\n        Operator::I64TruncF32S => 19,\n        Operator::I64TruncF32U => 21,\n        Operator::I64TruncF64S => 19,\n        Operator::I64TruncF64U => 23,\n        Operator::I64TruncSatF32S => 19,\n        Operator::I64TruncSatF64S => 19,\n        Operator::I32TruncSatF32U => 19,\n        Operator::I32TruncSatF64U => 18,\n        Operator::I64TruncSatF32U => 20,\n        Operator::I64TruncSatF64U => 22,\n        Operator::I32TruncSatF32S => 18,\n        Operator::I32TruncSatF64S => 19,\n        Operator::F32ConvertI64U => 14,\n        Operator::F64ConvertI64U => 13,\n        Operator::RefFunc { .. } => 29,\n        Operator::RefTestNullable { .. } => 34,\n        Operator::LocalGet { .. } => 1,\n        Operator::GlobalGet { .. } => 5,\n        Operator::GlobalSet { .. } => 1,\n        Operator::LocalTee { .. } => 1,\n        Operator::TableGet { .. } => 29,\n        Operator::TableSize { .. } => 25,\n        Operator::I32Load { .. } => 2,\n        Operator::I64Load { .. } => 2,\n        Operator::F32Load { .. } => 2,\n        Operator::F64Load { .. } => 2,\n        Operator::I32Store { .. } => 1,\n        Operator::I64Store { .. } => 1,\n        Operator::F32Store { .. } => 1,\n        Operator::F64Store { .. } => 1,\n        Operator::I32Load8S { .. } => 2,\n        Operator::I32Load8U { .. } => 2,\n        Operator::I32Load16S { .. } => 2,\n        Operator::I32Load16U { .. } => 2,\n        Operator::I64Load8S { .. } => 2,\n        Operator::I64Load8U { .. } => 2,\n        Operator::I64Load16S { .. } => 2,\n        Operator::I64Load16U { .. } => 2,\n        Operator::I64Load32S { .. } => 2,\n        Operator::I64Load32U { .. } => 2,\n        Operator::I32Store8 { .. } => 1,\n        Operator::I32Store16 { .. } => 1,\n        Operator::I64Store8 { .. } => 1,\n        Operator::I64Store16 { .. } => 1,\n        Operator::I64Store32 { .. } => 1,\n        Operator::MemorySize { .. } => 31,\n        Operator::MemoryGrow { .. } => 67,\n\n\n        Operator::MemoryInit { .. }\n        | Operator::DataDrop { .. }\n        | Operator::MemoryCopy { ..}\n        | Operator::MemoryFill { .. }\n        | Operator::TableInit { .. }\n        | Operator::ElemDrop { .. }\n        | Operator::TableCopy { .. } => 31, // memory.copy has cycle count of 31, rest needs benchmark validation (bulk memory extension)\n\n\n        Operator::Select => 14,\n        Operator::If { .. } => 1,\n        Operator::Call { .. } => 17,\n        Operator::Br { .. } => 12,\n        Operator::BrIf { .. } => 14,\n        Operator::BrTable { .. } => 34,\n        Operator::CallIndirect { .. } => 23,\n        Operator::Unreachable => 1,\n        Operator::Nop => 1,\n        Operator::Block { .. } | Operator::Loop { .. } | Operator::Else => 1,\n        Operator::TryTable { .. }\n        | Operator::Throw { .. }\n        | Operator::ThrowRef\n        | Operator::Try { .. }\n        | Operator::Catch { .. }\n        | Operator::Rethrow { .. }\n        | Operator::Delegate { .. }\n        | Operator::CatchAll => todo!(\"try/catch operators are not metered yet; gatekeeper config should not enable this extension\"),\n        Operator::End\n        | Operator::Return\n        | Operator::ReturnCall { .. }\n        | Operator::ReturnCallIndirect { .. } => 1,\n        Operator::Drop => 1,\n        Operator::TypedSelect { .. } => unreachable!(),\n        Operator::LocalSet { .. } => 1,\n        Operator::RefNull { .. }\n        | Operator::RefIsNull\n        | Operator::RefEq\n        | Operator::StructNew { .. }\n        | Operator::StructNewDefault { .. }\n        | Operator::StructGet { .. }\n        | Operator::StructGetS { .. }\n        | Operator::StructGetU { .. }\n        | Operator::StructSet { .. }\n        | Operator::ArrayNew { .. }\n        | Operator::ArrayNewDefault { .. }\n        | Operator::ArrayNewFixed { .. }\n        | Operator::ArrayNewData { .. }\n        | Operator::ArrayNewElem { .. }\n        | Operator::ArrayGet { .. }\n        | Operator::ArrayGetS { .. }\n        | Operator::ArrayGetU { .. }\n        | Operator::ArraySet { .. }\n        | Operator::ArrayLen\n        | Operator::ArrayFill { .. }\n        | Operator::ArrayCopy { .. }\n        | Operator::ArrayInitData { .. }\n        | Operator::ArrayInitElem { .. }\n        | Operator::RefTestNonNull { .. }\n        | Operator::RefCastNonNull { .. }\n        | Operator::RefCastNullable { .. }\n        | Operator::BrOnCast { .. }\n        | Operator::BrOnCastFail { .. }\n        | Operator::AnyConvertExtern\n        | Operator::ExternConvertAny\n        | Operator::RefI31\n        | Operator::I31GetS\n        | Operator::I31GetU\n        | Operator::TableFill { .. }\n        | Operator::TableSet { .. }\n        | Operator::TableGrow { .. }\n        | Operator::MemoryDiscard { .. }\n        | Operator::MemoryAtomicNotify { .. }\n        | Operator::MemoryAtomicWait32 { .. }\n        | Operator::MemoryAtomicWait64 { .. }\n        | Operator::AtomicFence\n        | Operator::I32AtomicLoad { .. }\n        | Operator::I64AtomicLoad { .. }\n        | Operator::I32AtomicLoad8U { .. }\n        | Operator::I32AtomicLoad16U { .. }\n        | Operator::I64AtomicLoad8U { .. }\n        | Operator::I64AtomicLoad16U { .. }\n        | Operator::I64AtomicLoad32U { .. }\n        | Operator::I32AtomicStore { .. }\n        | Operator::I64AtomicStore { .. }\n        | Operator::I32AtomicStore8 { .. }\n        | Operator::I32AtomicStore16 { .. }\n        | Operator::I64AtomicStore8 { .. }\n        | Operator::I64AtomicStore16 { .. }\n        | Operator::I64AtomicStore32 { .. }\n        | Operator::I32AtomicRmwAdd { .. }\n        | Operator::I64AtomicRmwAdd { .. }\n        | Operator::I32AtomicRmw8AddU { .. }\n        | Operator::I32AtomicRmw16AddU { .. }\n        | Operator::I64AtomicRmw8AddU { .. }\n        | Operator::I64AtomicRmw16AddU { .. }\n        | Operator::I64AtomicRmw32AddU { .. }\n        | Operator::I32AtomicRmwSub { .. }\n        | Operator::I64AtomicRmwSub { .. }\n        | Operator::I32AtomicRmw8SubU { .. }\n        | Operator::I32AtomicRmw16SubU { .. }\n        | Operator::I64AtomicRmw8SubU { .. }\n        | Operator::I64AtomicRmw16SubU { .. }\n        | Operator::I64AtomicRmw32SubU { .. }\n        | Operator::I32AtomicRmwAnd { .. }\n        | Operator::I64AtomicRmwAnd { .. }\n        | Operator::I32AtomicRmw8AndU { .. }\n        | Operator::I32AtomicRmw16AndU { .. }\n        | Operator::I64AtomicRmw8AndU { .. }\n        | Operator::I64AtomicRmw16AndU { .. }\n        | Operator::I64AtomicRmw32AndU { .. }\n        | Operator::I32AtomicRmwOr { .. }\n        | Operator::I64AtomicRmwOr { .. }\n        | Operator::I32AtomicRmw8OrU { .. }\n        | Operator::I32AtomicRmw16OrU { .. }\n        | Operator::I64AtomicRmw8OrU { .. }\n        | Operator::I64AtomicRmw16OrU { .. }\n        | Operator::I64AtomicRmw32OrU { .. }\n        | Operator::I32AtomicRmwXor { .. }\n        | Operator::I64AtomicRmwXor { .. }\n        | Operator::I32AtomicRmw8XorU { .. }\n        | Operator::I32AtomicRmw16XorU { .. }\n        | Operator::I64AtomicRmw8XorU { .. }\n        | Operator::I64AtomicRmw16XorU { .. }\n        | Operator::I64AtomicRmw32XorU { .. }\n        | Operator::I32AtomicRmwXchg { .. }\n        | Operator::I64AtomicRmwXchg { .. }\n        | Operator::I32AtomicRmw8XchgU { .. }\n        | Operator::I32AtomicRmw16XchgU { .. }\n        | Operator::I64AtomicRmw8XchgU { .. }\n        | Operator::I64AtomicRmw16XchgU { .. }\n        | Operator::I64AtomicRmw32XchgU { .. }\n        | Operator::I32AtomicRmwCmpxchg { .. }\n        | Operator::I64AtomicRmwCmpxchg { .. }\n        | Operator::I32AtomicRmw8CmpxchgU { .. }\n        | Operator::I32AtomicRmw16CmpxchgU { .. }\n        | Operator::I64AtomicRmw8CmpxchgU { .. }\n        | Operator::I64AtomicRmw16CmpxchgU { .. }\n        | Operator::I64AtomicRmw32CmpxchgU { .. }\n        | Operator::V128Load { .. }\n        | Operator::V128Load8x8S { .. }\n        | Operator::V128Load8x8U { .. }\n        | Operator::V128Load16x4S { .. }\n        | Operator::V128Load16x4U { .. }\n        | Operator::V128Load32x2S { .. }\n        | Operator::V128Load32x2U { .. }\n        | Operator::V128Load8Splat { .. }\n        | Operator::V128Load16Splat { .. }\n        | Operator::V128Load32Splat { .. }\n        | Operator::V128Load64Splat { .. }\n        | Operator::V128Load32Zero { .. }\n        | Operator::V128Load64Zero { .. }\n        | Operator::V128Store { .. }\n        | Operator::V128Load8Lane { .. }\n        | Operator::V128Load16Lane { .. }\n        | Operator::V128Load32Lane { .. }\n        | Operator::V128Load64Lane { .. }\n        | Operator::V128Store8Lane { .. }\n        | Operator::V128Store16Lane { .. }\n        | Operator::V128Store32Lane { .. }\n        | Operator::V128Store64Lane { .. }\n        | Operator::V128Const { .. }\n        | Operator::I8x16Shuffle { .. }\n        | Operator::I8x16ExtractLaneS { .. }\n        | Operator::I8x16ExtractLaneU { .. }\n        | Operator::I8x16ReplaceLane { .. }\n        | Operator::I16x8ExtractLaneS { .. }\n        | Operator::I16x8ExtractLaneU { .. }\n        | Operator::I16x8ReplaceLane { .. }\n        | Operator::I32x4ExtractLane { .. }\n        | Operator::I32x4ReplaceLane { .. }\n        | Operator::I64x2ExtractLane { .. }\n        | Operator::I64x2ReplaceLane { .. }\n        | Operator::F32x4ExtractLane { .. }\n        | Operator::F32x4ReplaceLane { .. }\n        | Operator::F64x2ExtractLane { .. }\n        | Operator::F64x2ReplaceLane { .. }\n        | Operator::I8x16Swizzle\n        | Operator::I8x16Splat\n        | Operator::I16x8Splat\n        | Operator::I32x4Splat\n        | Operator::I64x2Splat\n        | Operator::F32x4Splat\n        | Operator::F64x2Splat\n        | Operator::I8x16Eq\n        | Operator::I8x16Ne\n        | Operator::I8x16LtS\n        | Operator::I8x16LtU\n        | Operator::I8x16GtS\n        | Operator::I8x16GtU\n        | Operator::I8x16LeS\n        | Operator::I8x16LeU\n        | Operator::I8x16GeS\n        | Operator::I8x16GeU\n        | Operator::I16x8Eq\n        | Operator::I16x8Ne\n        | Operator::I16x8LtS\n        | Operator::I16x8LtU\n        | Operator::I16x8GtS\n        | Operator::I16x8GtU\n        | Operator::I16x8LeS\n        | Operator::I16x8LeU\n        | Operator::I16x8GeS\n        | Operator::I16x8GeU\n        | Operator::I32x4Eq\n        | Operator::I32x4Ne\n        | Operator::I32x4LtS\n        | Operator::I32x4LtU\n        | Operator::I32x4GtS\n        | Operator::I32x4GtU\n        | Operator::I32x4LeS\n        | Operator::I32x4LeU\n        | Operator::I32x4GeS\n        | Operator::I32x4GeU\n        | Operator::I64x2Eq\n        | Operator::I64x2Ne\n        | Operator::I64x2LtS\n        | Operator::I64x2GtS\n        | Operator::I64x2LeS\n        | Operator::I64x2GeS\n        | Operator::F32x4Eq\n        | Operator::F32x4Ne\n        | Operator::F32x4Lt\n        | Operator::F32x4Gt\n        | Operator::F32x4Le\n        | Operator::F32x4Ge\n        | Operator::F64x2Eq\n        | Operator::F64x2Ne\n        | Operator::F64x2Lt\n        | Operator::F64x2Gt\n        | Operator::F64x2Le\n        | Operator::F64x2Ge\n        | Operator::V128Not\n        | Operator::V128And\n        | Operator::V128AndNot\n        | Operator::V128Or\n        | Operator::V128Xor\n        | Operator::V128Bitselect\n        | Operator::V128AnyTrue\n        | Operator::I8x16Abs\n        | Operator::I8x16Neg\n        | Operator::I8x16Popcnt\n        | Operator::I8x16AllTrue\n        | Operator::I8x16Bitmask\n        | Operator::I8x16NarrowI16x8S\n        | Operator::I8x16NarrowI16x8U\n        | Operator::I8x16Shl\n        | Operator::I8x16ShrS\n        | Operator::I8x16ShrU\n        | Operator::I8x16Add\n        | Operator::I8x16AddSatS\n        | Operator::I8x16AddSatU\n        | Operator::I8x16Sub\n        | Operator::I8x16SubSatS\n        | Operator::I8x16SubSatU\n        | Operator::I8x16MinS\n        | Operator::I8x16MinU\n        | Operator::I8x16MaxS\n        | Operator::I8x16MaxU\n        | Operator::I8x16AvgrU\n        | Operator::I16x8ExtAddPairwiseI8x16S\n        | Operator::I16x8ExtAddPairwiseI8x16U\n        | Operator::I16x8Abs\n        | Operator::I16x8Neg\n        | Operator::I16x8Q15MulrSatS\n        | Operator::I16x8AllTrue\n        | Operator::I16x8Bitmask\n        | Operator::I16x8NarrowI32x4S\n        | Operator::I16x8NarrowI32x4U\n        | Operator::I16x8ExtendLowI8x16S\n        | Operator::I16x8ExtendHighI8x16S\n        | Operator::I16x8ExtendLowI8x16U\n        | Operator::I16x8ExtendHighI8x16U\n        | Operator::I16x8Shl\n        | Operator::I16x8ShrS\n        | Operator::I16x8ShrU\n        | Operator::I16x8Add\n        | Operator::I16x8AddSatS\n        | Operator::I16x8AddSatU\n        | Operator::I16x8Sub\n        | Operator::I16x8SubSatS\n        | Operator::I16x8SubSatU\n        | Operator::I16x8Mul\n        | Operator::I16x8MinS\n        | Operator::I16x8MinU\n        | Operator::I16x8MaxS\n        | Operator::I16x8MaxU\n        | Operator::I16x8AvgrU\n        | Operator::I16x8ExtMulLowI8x16S\n        | Operator::I16x8ExtMulHighI8x16S\n        | Operator::I16x8ExtMulLowI8x16U\n        | Operator::I16x8ExtMulHighI8x16U\n        | Operator::I32x4ExtAddPairwiseI16x8S\n        | Operator::I32x4ExtAddPairwiseI16x8U\n        | Operator::I32x4Abs\n        | Operator::I32x4Neg\n        | Operator::I32x4AllTrue\n        | Operator::I32x4Bitmask\n        | Operator::I32x4ExtendLowI16x8S\n        | Operator::I32x4ExtendHighI16x8S\n        | Operator::I32x4ExtendLowI16x8U\n        | Operator::I32x4ExtendHighI16x8U\n        | Operator::I32x4Shl\n        | Operator::I32x4ShrS\n        | Operator::I32x4ShrU\n        | Operator::I32x4Add\n        | Operator::I32x4Sub\n        | Operator::I32x4Mul\n        | Operator::I32x4MinS\n        | Operator::I32x4MinU\n        | Operator::I32x4MaxS\n        | Operator::I32x4MaxU\n        | Operator::I32x4DotI16x8S\n        | Operator::I32x4ExtMulLowI16x8S\n        | Operator::I32x4ExtMulHighI16x8S\n        | Operator::I32x4ExtMulLowI16x8U\n        | Operator::I32x4ExtMulHighI16x8U\n        | Operator::I64x2Abs\n        | Operator::I64x2Neg\n        | Operator::I64x2AllTrue\n        | Operator::I64x2Bitmask\n        | Operator::I64x2ExtendLowI32x4S\n        | Operator::I64x2ExtendHighI32x4S\n        | Operator::I64x2ExtendLowI32x4U\n        | Operator::I64x2ExtendHighI32x4U\n        | Operator::I64x2Shl\n        | Operator::I64x2ShrS\n        | Operator::I64x2ShrU\n        | Operator::I64x2Add\n        | Operator::I64x2Sub\n        | Operator::I64x2Mul\n        | Operator::I64x2ExtMulLowI32x4S\n        | Operator::I64x2ExtMulHighI32x4S\n        | Operator::I64x2ExtMulLowI32x4U\n        | Operator::I64x2ExtMulHighI32x4U\n        | Operator::F32x4Ceil\n        | Operator::F32x4Floor\n        | Operator::F32x4Trunc\n        | Operator::F32x4Nearest\n        | Operator::F32x4Abs\n        | Operator::F32x4Neg\n        | Operator::F32x4Sqrt\n        | Operator::F32x4Add\n        | Operator::F32x4Sub\n        | Operator::F32x4Mul\n        | Operator::F32x4Div\n        | Operator::F32x4Min\n        | Operator::F32x4Max\n        | Operator::F32x4PMin\n        | Operator::F32x4PMax\n        | Operator::F64x2Ceil\n        | Operator::F64x2Floor\n        | Operator::F64x2Trunc\n        | Operator::F64x2Nearest\n        | Operator::F64x2Abs\n        | Operator::F64x2Neg\n        | Operator::F64x2Sqrt\n        | Operator::F64x2Add\n        | Operator::F64x2Sub\n        | Operator::F64x2Mul\n        | Operator::F64x2Div\n        | Operator::F64x2Min\n        | Operator::F64x2Max\n        | Operator::F64x2PMin\n        | Operator::F64x2PMax\n        | Operator::I32x4TruncSatF32x4S\n        | Operator::I32x4TruncSatF32x4U\n        | Operator::F32x4ConvertI32x4S\n        | Operator::F32x4ConvertI32x4U\n        | Operator::I32x4TruncSatF64x2SZero\n        | Operator::I32x4TruncSatF64x2UZero\n        | Operator::F64x2ConvertLowI32x4S\n        | Operator::F64x2ConvertLowI32x4U\n        | Operator::F32x4DemoteF64x2Zero\n        | Operator::F64x2PromoteLowF32x4\n        | Operator::I8x16RelaxedSwizzle\n        | Operator::I32x4RelaxedTruncF32x4S\n        | Operator::I32x4RelaxedTruncF32x4U\n        | Operator::I32x4RelaxedTruncF64x2SZero\n        | Operator::I32x4RelaxedTruncF64x2UZero\n        | Operator::F32x4RelaxedMadd\n        | Operator::F32x4RelaxedNmadd\n        | Operator::F64x2RelaxedMadd\n        | Operator::F64x2RelaxedNmadd\n        | Operator::I8x16RelaxedLaneselect\n        | Operator::I16x8RelaxedLaneselect\n        | Operator::I32x4RelaxedLaneselect\n        | Operator::I64x2RelaxedLaneselect\n        | Operator::F32x4RelaxedMin\n        | Operator::F32x4RelaxedMax\n        | Operator::F64x2RelaxedMin\n        | Operator::F64x2RelaxedMax\n        | Operator::I16x8RelaxedQ15mulrS\n        | Operator::I16x8RelaxedDotI8x16I7x16S\n        | Operator::I32x4RelaxedDotI8x16I7x16AddS\n        | Operator::CallRef { .. }\n        | Operator::ReturnCallRef { .. }\n        | Operator::RefAsNonNull\n        | Operator::BrOnNull { .. }\n        | Operator::BrOnNonNull { .. }\n        | Operator::GlobalAtomicGet { .. }\n        | Operator::GlobalAtomicSet { .. }\n        | Operator::GlobalAtomicRmwAdd { .. }\n        | Operator::GlobalAtomicRmwSub { .. }\n        | Operator::GlobalAtomicRmwAnd { .. }\n        | Operator::GlobalAtomicRmwOr { .. }\n        | Operator::GlobalAtomicRmwXor { .. }\n        | Operator::GlobalAtomicRmwXchg { .. }\n        | Operator::GlobalAtomicRmwCmpxchg { .. }\n        | Operator::TableAtomicGet { .. }\n        | Operator::TableAtomicSet { .. }\n        | Operator::TableAtomicRmwXchg { .. }\n        | Operator::TableAtomicRmwCmpxchg { .. }\n        | Operator::StructAtomicGet { .. }\n        | Operator::StructAtomicGetS { .. }\n        | Operator::StructAtomicGetU { .. }\n        | Operator::StructAtomicSet { .. }\n        | Operator::StructAtomicRmwAdd { .. }\n        | Operator::StructAtomicRmwSub { .. }\n        | Operator::StructAtomicRmwAnd { .. }\n        | Operator::StructAtomicRmwOr { .. }\n        | Operator::StructAtomicRmwXor { .. }\n        | Operator::StructAtomicRmwXchg { .. }\n        | Operator::StructAtomicRmwCmpxchg { .. }\n        | Operator::ArrayAtomicGet { .. }\n        | Operator::ArrayAtomicGetS { .. }\n        | Operator::ArrayAtomicGetU { .. }\n        | Operator::ArrayAtomicSet { .. }\n        | Operator::ArrayAtomicRmwAdd { .. }\n        | Operator::ArrayAtomicRmwSub { .. }\n        | Operator::ArrayAtomicRmwAnd { .. }\n        | Operator::ArrayAtomicRmwOr { .. }\n        | Operator::ArrayAtomicRmwXor { .. }\n        | Operator::ArrayAtomicRmwXchg { .. }\n        | Operator::ArrayAtomicRmwCmpxchg { .. }\n        | Operator::RefI31Shared => todo!(\"{operator:?}\"),\n    }\n}\n\npub(crate) fn gas_metering_middleware(initial_limit: u64) -> Arc<dyn ModuleMiddleware> {\n    Arc::new(Metering::new(initial_limit, |operator| {\n        cycles(operator) * MULTIPLIER / SCALING_FACTOR\n    }))\n}\n"
  },
  {
    "path": "executor/wasmer_backend/src/middleware/gatekeeper.rs",
    "content": "use wasmer::{wasmparser::Operator, FunctionMiddleware, MiddlewareError, ModuleMiddleware};\n\nconst MIDDLEWARE_NAME: &str = \"Gatekeeper\";\nconst FLOATING_POINTS_NOT_ALLOWED: &str = \"Floating point opcodes are not allowed\";\n\n#[inline]\nfn extension_not_allowed_error(extension: &str) -> MiddlewareError {\n    MiddlewareError::new(\n        MIDDLEWARE_NAME,\n        format!(\"Wasm `{extension}` extension is not allowed\"),\n    )\n}\n\n#[derive(Copy, Clone, Debug)]\npub(crate) struct GatekeeperConfig {\n    /// Allow the `bulk_memory` proposal.\n    bulk_memory: bool,\n    /// Allow the `exceptions` proposal.\n    exceptions: bool,\n    /// Allow the `function_references` proposal.\n    function_references: bool,\n    /// Allow the `gc` proposal.\n    gc: bool,\n    /// Allow the `legacy_exceptions` proposal.\n    #[allow(dead_code)]\n    legacy_exceptions: bool,\n    /// Allow the `memory_control` proposal.\n    memory_control: bool,\n    /// Allow the `mvp` proposal.\n    mvp: bool,\n    /// Allow the `reference_types` proposal.\n    reference_types: bool,\n    /// Allow the `relaxed_simd` proposal.\n    relaxed_simd: bool,\n    /// Allow the `saturating_float_to_int` proposal.\n    ///\n    /// This *requires* canonicalized NaNs enabled in the compiler config.\n    saturating_float_to_int: bool,\n    /// Allow the `shared_everything_threads` proposal.\n    #[allow(dead_code)]\n    shared_everything_threads: bool,\n    /// Allow the `sign_extension` proposal.\n    sign_extension: bool,\n    /// Allow the `simd` proposal.\n    simd: bool,\n    /// Allow the `stack_switching` proposal.\n    #[allow(dead_code)]\n    stack_switching: bool,\n    /// Allow the `tail_call` proposal.\n    tail_call: bool,\n    /// Allow the `threads` proposal.\n    threads: bool,\n    /// Allow the `wide_arithmetic` proposal.\n    #[allow(dead_code)]\n    wide_arithmetic: bool,\n    /// Allow floating point opcodes from `mvp` extension.\n    ///\n    /// This *requires* canonicalized NaNs enabled in the compiler config.\n    allow_floating_points: bool,\n}\n\n/// Check if the operator is a floating point operator.\n#[inline]\nconst fn is_floating_point(operator: &wasmer::wasmparser::Operator<'_>) -> bool {\n    match operator {\n        // mvp\n        Operator::F32Load {..} |\n        Operator::F64Load {..} |\n        Operator::F32Store {..} |\n        Operator::F64Store {..} |\n        Operator::F32Const {..} |\n        Operator::F64Const {..} |\n        Operator::F32Abs |\n        Operator::F32Neg |\n        Operator::F32Ceil |\n        Operator::F32Floor |\n        Operator::F32Trunc |\n        Operator::F32Nearest |\n        Operator::F32Sqrt |\n        Operator::F32Add |\n        Operator::F32Sub |\n        Operator::F32Mul |\n        Operator::F32Div |\n        Operator::F32Min |\n        Operator::F32Max |\n        Operator::F32Copysign |\n        Operator::F64Abs |\n        Operator::F64Neg |\n        Operator::F64Ceil |\n        Operator::F64Floor |\n        Operator::F64Trunc |\n        Operator::F64Nearest |\n        Operator::F64Sqrt |\n        Operator::F64Add |\n        Operator::F64Sub |\n        Operator::F64Mul |\n        Operator::F64Div |\n        Operator::F64Min |\n        Operator::F64Max |\n        Operator::F64Copysign |\n        Operator::F32Eq |\n        Operator::F32Ne |\n        Operator::F32Lt |\n        Operator::F32Gt |\n        Operator::F32Le |\n        Operator::F32Ge |\n        Operator::F64Eq |\n        Operator::F64Ne |\n        Operator::F64Lt |\n        Operator::F64Gt |\n        Operator::F64Le |\n        Operator::F64Ge |\n        Operator::I32TruncF32S |\n        Operator::I32TruncF32U |\n        Operator::I32TruncF64S |\n        Operator::I32TruncF64U |\n        Operator::I64TruncF32S |\n        Operator::I64TruncF32U |\n        Operator::I64TruncF64S |\n        Operator::I64TruncF64U |\n        Operator::F32ConvertI32S |\n        Operator::F32ConvertI32U |\n        Operator::F32ConvertI64S |\n        Operator::F32ConvertI64U |\n        Operator::F32DemoteF64 |\n        Operator::F64ConvertI32S |\n        Operator::F64ConvertI32U |\n        Operator::F64ConvertI64S |\n        Operator::F64ConvertI64U |\n        Operator::F64PromoteF32 |\n        Operator::I32ReinterpretF32 |\n        Operator::I64ReinterpretF64 |\n        Operator::F32ReinterpretI32 |\n        Operator::F64ReinterpretI64 |\n        // saturating_float_to_int\n        Operator::I32TruncSatF32S |\n        Operator::I32TruncSatF32U |\n        Operator::I32TruncSatF64S |\n        Operator::I32TruncSatF64U |\n        Operator::I64TruncSatF32S |\n        Operator::I64TruncSatF32U |\n        Operator::I64TruncSatF64S |\n        Operator::I64TruncSatF64U |\n        // simd\n        Operator::F32x4ExtractLane{..} |\n        Operator::F32x4ReplaceLane{..} |\n        Operator::F64x2ExtractLane{..} |\n        Operator::F64x2ReplaceLane{..} |\n        Operator::F32x4Splat |\n        Operator::F64x2Splat |\n        Operator::F32x4Eq |\n        Operator::F32x4Ne |\n        Operator::F32x4Lt |\n        Operator::F32x4Gt |\n        Operator::F32x4Le |\n        Operator::F32x4Ge |\n        Operator::F64x2Eq |\n        Operator::F64x2Ne |\n        Operator::F64x2Lt |\n        Operator::F64x2Gt |\n        Operator::F64x2Le |\n        Operator::F64x2Ge |\n        Operator::F32x4Ceil |\n        Operator::F32x4Floor |\n        Operator::F32x4Trunc |\n        Operator::F32x4Nearest |\n        Operator::F32x4Abs |\n        Operator::F32x4Neg |\n        Operator::F32x4Sqrt |\n        Operator::F32x4Add |\n        Operator::F32x4Sub |\n        Operator::F32x4Mul |\n        Operator::F32x4Div |\n        Operator::F32x4Min |\n        Operator::F32x4Max |\n        Operator::F32x4PMin |\n        Operator::F32x4PMax |\n        Operator::F64x2Ceil |\n        Operator::F64x2Floor |\n        Operator::F64x2Trunc |\n        Operator::F64x2Nearest |\n        Operator::F64x2Abs |\n        Operator::F64x2Neg |\n        Operator::F64x2Sqrt |\n        Operator::F64x2Add |\n        Operator::F64x2Sub |\n        Operator::F64x2Mul |\n        Operator::F64x2Div |\n        Operator::F64x2Min |\n        Operator::F64x2Max |\n        Operator::F64x2PMin |\n        Operator::F64x2PMax |\n        Operator::I32x4TruncSatF32x4S |\n        Operator::I32x4TruncSatF32x4U |\n        Operator::F32x4ConvertI32x4S |\n        Operator::F32x4ConvertI32x4U |\n        Operator::I32x4TruncSatF64x2SZero |\n        Operator::I32x4TruncSatF64x2UZero |\n        Operator::F64x2ConvertLowI32x4S |\n        Operator::F64x2ConvertLowI32x4U |\n        Operator::F32x4DemoteF64x2Zero |\n        Operator::F64x2PromoteLowF32x4 |\n        // relaxed_simd extension\n        Operator::I32x4RelaxedTruncF32x4S |\n        Operator::I32x4RelaxedTruncF32x4U |\n        Operator::I32x4RelaxedTruncF64x2SZero |\n        Operator::I32x4RelaxedTruncF64x2UZero |\n        Operator::F32x4RelaxedMadd |\n        Operator::F32x4RelaxedNmadd |\n        Operator::F64x2RelaxedMadd |\n        Operator::F64x2RelaxedNmadd |\n        Operator::F32x4RelaxedMin |\n        Operator::F32x4RelaxedMax |\n        Operator::F64x2RelaxedMin |\n        Operator::F64x2RelaxedMax => true,\n        _ => false,\n    }\n}\n\nimpl Default for GatekeeperConfig {\n    fn default() -> Self {\n        Self {\n            bulk_memory: true,\n            exceptions: false,\n            function_references: false,\n            gc: false,\n            legacy_exceptions: false,\n            memory_control: false,\n            mvp: true,\n            reference_types: false,\n            relaxed_simd: false,\n            saturating_float_to_int: false,\n            shared_everything_threads: false,\n            sign_extension: true,\n            simd: false,\n            stack_switching: false,\n            tail_call: false,\n            threads: false,\n            wide_arithmetic: false,\n            // Not yet ready to enable this; needs updated benchmark to accomodate overhead of\n            // canonicalized NaNs and manual validation.\n            allow_floating_points: false,\n        }\n    }\n}\n\n#[derive(Debug, Default)]\npub(crate) struct Gatekeeper {\n    config: GatekeeperConfig,\n}\n\nimpl Gatekeeper {\n    pub(crate) fn new(config: GatekeeperConfig) -> Self {\n        Self { config }\n    }\n}\n\nimpl ModuleMiddleware for Gatekeeper {\n    fn generate_function_middleware(\n        &self,\n        _local_function_index: wasmer::LocalFunctionIndex,\n    ) -> Box<dyn wasmer::FunctionMiddleware> {\n        Box::new(FunctionGatekeeper::new(self.config))\n    }\n}\n\n#[derive(Debug)]\nstruct FunctionGatekeeper {\n    config: GatekeeperConfig,\n}\n\nimpl FunctionGatekeeper {\n    fn new(config: GatekeeperConfig) -> Self {\n        Self { config }\n    }\n\n    /// Ensure that floating point opcodes are allowed.\n    fn ensure_floating_point_allowed(\n        &self,\n        operator: &wasmer::wasmparser::Operator<'_>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if !self.config.allow_floating_points && is_floating_point(operator) {\n            return Err(MiddlewareError::new(\n                MIDDLEWARE_NAME,\n                FLOATING_POINTS_NOT_ALLOWED,\n            ));\n        }\n        Ok(())\n    }\n\n    fn validated_push_operator<'b, 'a: 'b>(\n        &self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        // This is a late check as we first check if given extension is allowed and then check if\n        // floating point opcodes are allowed. This is because different Wasm extensions do\n        // contain floating point opcodes and this approach makes all the gatekeeping more robust.\n        self.ensure_floating_point_allowed(&operator)?;\n        // Push the operator to the state.\n        state.push_operator(operator);\n        Ok(())\n    }\n\n    fn bulk_memory<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.bulk_memory {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"bulk_memory\"))\n        }\n    }\n\n    fn exceptions<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.exceptions {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"exceptions\"))\n        }\n    }\n\n    fn function_references<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.function_references {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"function_references\"))\n        }\n    }\n\n    fn gc<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.gc {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"gc\"))\n        }\n    }\n\n    #[allow(dead_code)]\n    fn legacy_exceptions<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.legacy_exceptions {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"legacy_exceptions\"))\n        }\n    }\n\n    fn memory_control<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.memory_control {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"memory_control\"))\n        }\n    }\n\n    fn mvp<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.mvp {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"mvp\"))\n        }\n    }\n\n    fn reference_types<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.reference_types {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"reference_types\"))\n        }\n    }\n\n    fn relaxed_simd<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.relaxed_simd {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"relaxed_simd\"))\n        }\n    }\n\n    fn saturating_float_to_int<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.saturating_float_to_int {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"saturating_float_to_int\"))\n        }\n    }\n\n    #[allow(dead_code)]\n    fn shared_everything_threads<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.shared_everything_threads {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"shared_everything_threads\"))\n        }\n    }\n\n    fn sign_extension<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.sign_extension {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"sign_extension\"))\n        }\n    }\n\n    fn simd<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.simd {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"simd\"))\n        }\n    }\n\n    #[allow(dead_code)]\n    fn stack_switching<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.stack_switching {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"stack_switching\"))\n        }\n    }\n\n    fn tail_call<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.tail_call {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"tail_call\"))\n        }\n    }\n    fn threads<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.threads {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"threads\"))\n        }\n    }\n\n    #[allow(dead_code)]\n    fn wide_arithmetic<'b, 'a: 'b>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'b>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        if self.config.wide_arithmetic {\n            self.validated_push_operator(operator, state)?;\n            Ok(())\n        } else {\n            Err(extension_not_allowed_error(\"wide_arithmetic\"))\n        }\n    }\n}\n\nimpl FunctionMiddleware for FunctionGatekeeper {\n    fn feed<'a>(\n        &mut self,\n        operator: wasmer::wasmparser::Operator<'a>,\n        state: &mut wasmer::MiddlewareReaderState<'a>,\n    ) -> Result<(), wasmer::MiddlewareError> {\n        macro_rules! match_op {\n            ($op:ident { $($payload:tt)* }) => {\n                $op { .. }\n            };\n            ($op:ident) => {\n                $op\n            };\n        }\n\n        macro_rules! gatekeep {\n          ($( @$proposal:ident $op:ident $({ $($payload:tt)* })? => $visit:ident)*) => {{\n                use wasmer::wasmparser::Operator::*;\n                match operator {\n                    $(\n                        match_op!($op $({ $($payload)* })?) => self.$proposal(operator, state),\n                    )*\n                }\n            }}\n        }\n\n        wasmer::wasmparser::for_each_operator!(gatekeep)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use std::sync::Arc;\n    use wasmer::{sys::EngineBuilder, CompilerConfig, Module, Singlepass, Store, WasmError};\n\n    #[test]\n    fn mvp_opcodes_allowed() {\n        let bytecode = wat::parse_str(\n            r#\"\n            (module\n                (func (export \"add\") (param i32 i32) (result i32)\n                    local.get 0\n                    local.get 1\n                    i32.add)\n\n            )\n            \"#,\n        )\n        .unwrap();\n        let mut gatekeeper = Gatekeeper::default();\n        gatekeeper.config.mvp = true;\n        let gatekeeper = Arc::new(gatekeeper);\n        let mut compiler_config = Singlepass::default();\n        compiler_config.push_middleware(gatekeeper);\n        let store = Store::new(EngineBuilder::new(compiler_config));\n        let _module = Module::new(&store, &bytecode).unwrap();\n    }\n    #[test]\n    fn mvp_opcodes_allowed_without_floating_points() {\n        let bytecode = wat::parse_str(\n            r#\"\n            (module\n                (func (export \"add\") (param f32 f32) (result f32)\n                    local.get 0\n                    local.get 1\n                    f32.add)\n            )\n            \"#,\n        )\n        .unwrap();\n        let mut gatekeeper = Gatekeeper::default();\n        gatekeeper.config.mvp = true;\n        gatekeeper.config.allow_floating_points = false;\n        let gatekeeper = Arc::new(gatekeeper);\n        let mut compiler_config = Singlepass::default();\n        compiler_config.push_middleware(gatekeeper);\n        let store = Store::new(EngineBuilder::new(compiler_config));\n        let error = Module::new(&store, &bytecode).unwrap_err();\n        let middleware = match error {\n            wasmer::CompileError::Wasm(WasmError::Middleware(middleware)) => middleware,\n            _ => panic!(\"Expected a middleware error\"),\n        };\n        assert_eq!(middleware.message, FLOATING_POINTS_NOT_ALLOWED);\n    }\n\n    #[test]\n    fn mvp_opcodes_allowed_with_floating_points() {\n        let bytecode = wat::parse_str(\n            r#\"\n            (module\n                (func (export \"add\") (param f32 f32) (result f32)\n                    local.get 0\n                    local.get 1\n                    f32.add)\n            )\n            \"#,\n        )\n        .unwrap();\n        let mut gatekeeper = Gatekeeper::default();\n        gatekeeper.config.mvp = true;\n        gatekeeper.config.allow_floating_points = true;\n        let gatekeeper = Arc::new(gatekeeper);\n        let mut compiler_config = Singlepass::default();\n        compiler_config.push_middleware(gatekeeper);\n        let store = Store::new(EngineBuilder::new(compiler_config));\n        let _module = Module::new(&store, &bytecode).unwrap();\n    }\n    #[test]\n    fn mvp_opcodes_not_allowed() {\n        let bytecode = wat::parse_str(\n            r#\"\n            (module\n                (func (export \"add\") (param i32 i32) (result i32)\n                    local.get 0\n                    local.get 1\n                    i32.add)\n            )\n            \"#,\n        )\n        .unwrap();\n        let mut gatekeeper = Gatekeeper::default();\n        gatekeeper.config.mvp = false;\n        let gatekeeper = Arc::new(gatekeeper);\n        let mut compiler_config = Singlepass::default();\n        compiler_config.push_middleware(gatekeeper);\n        let store = Store::new(EngineBuilder::new(compiler_config));\n        let error = Module::new(&store, &bytecode).unwrap_err();\n        assert_eq!(error.to_string(), \"WebAssembly translation error: Error in middleware Gatekeeper: Wasm `mvp` extension is not allowed\");\n    }\n}\n"
  },
  {
    "path": "executor/wasmer_backend/src/middleware.rs",
    "content": "pub(crate) mod gas_metering;\npub(crate) mod gatekeeper;\n"
  },
  {
    "path": "generate-chainspec.sh",
    "content": "#!/usr/bin/env bash\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\ngenerate_timestamp() {\n    local DELAY=${1}\n\n    local SCRIPT=(\n        \"from datetime import datetime, timedelta, timezone;\"\n        \"print((datetime.now(timezone.utc).replace(tzinfo=None) + timedelta(seconds=${DELAY})).isoformat('T') + 'Z')\"\n    )\n\n    python3 -c \"${SCRIPT[*]}\"\n}\n\ngenerate_chainspec() {\n    local BASEDIR=${1}\n    local TIMESTAMP=${2}\n    local SOURCE=\"${BASEDIR}/resources/local/chainspec.toml.in\"\n    local TARGET=\"${BASEDIR}/resources/local/chainspec.toml\"\n\n    export BASEDIR\n    export TIMESTAMP\n\n    echo \"Generating chainspec...\"\n    envsubst < ${SOURCE} > ${TARGET}\n}\n\nmain() {\n    local DELAY=${1:-40}\n    local BASEDIR=\"$(readlink -f $(dirname ${0}))\"\n    local TIMESTAMP=\"$(generate_timestamp ${DELAY})\"\n\n    generate_chainspec ${BASEDIR} ${TIMESTAMP}\n}\n\nmain $@\n"
  },
  {
    "path": "node/BINARY_PORT_PROTOCOL.md",
    "content": "# The Binary Port Protocol\n\nThis page specifies the protocol of casper nodes Binary Port.\n\n## Synopsis\n\nThe protocol consists of one party (the client) sending requests to another party (the server) and the server sending responses back to the client.\nThe Binary Port communication protocol is binary and supports a long lived tcp connection. Once the tcp connection is open the binary port assumes a series of request-response messages. It is not supported to send a second request before receiving the entirety of the response to the first one via one tcp connection. Both requests and responses are have envelopes containing some metadata.\n\n### Request format\n\n| Size in bytes | Field    | Description                                                                                                                                                                                                                      |\n| ------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| 4             | length   | A LE encoded number of bytes of all the subsequent fields (excluding the length itself). Based on this number the server \"knows\" where the binary request ends.                                                                  |\n| 2             | version  | Version of the binary port header serialized as a single u16 number. The server handles only strictly specified versions and can deny service if the version doesn't meet it's expectation. The current supported version is `1` |\n| 1             | type_tag | Tag identifying the request.                                                                                                                                                                                                     |\n| 2             | id       | An identifier that should dbe understandable to the client and should facilitate correlating requests with responses                                                                                                             |\n| variable      | payload  | Payload to be interpreted according to the `type_tag`.                                                                                                                                                                           |\n\n### Response format\n\n| Size in bytes | Field          | Description                                                                                                                                                                                 |\n| ------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| 4             | length         | A LE encoded number of bytes of all the subsequent fields (excluding the length itself). Based on this number the client should \"know\" where the binary response ends.                      |\n| 4             | Request length | number of bytes of the `request` field.                                                                                                                                                     |\n| variable      | request        | The raw binary request that was provided by the client (including the requests `length` field).                                                                                             |\n| 2             | version        | Version of the binary port response structure. Currently supported version is `1`                                                                                                           |\n| 2             | error_code     | Error code, where 0 indicates success.                                                                                                                                                      |\n| 1-2           | response_type  | Optional payload type tag (first byte being 1 indicates that it exists).                                                                                                                    |\n| 4             | payload_length | Number of bytes of the var-length `payload` field.                                                                                                                                          |\n| Variable      | payload        | Payload to be interpreted according to the `response_type`. If there is no response, or the response was erroneous this field will have 0 bytes and `payload_length` will be the number `0` |\n\n**Notes:** `variable` means that the payload size is variable and depends on the tag.\n\n## Request model details\n\nCurrently, there are 3 supported types of requests, but the request model can be extended. The request types are:\n\n- A `Get` request, which is one of:\n  - A `Record` request asking for a record with an extensible `RecordId` tag and a key\n  - An `Information` request asking for a piece of information with an extensible `InformationRequestTag` tag and a key\n  - A `State` request asking for some data from global state. This can be:\n    - An `Item` request asking for a single item given a `Key`\n    - An `AllItems` request asking for all items given a `KeyTag`\n    - A `Trie` request asking for a trie given a `Digest`\n- A `TryAcceptTransaction` request for a transaction to be accepted and executed\n- A `TrySpeculativeExec` request for a transaction to be executed speculatively, without saving the transaction effects in global state\n"
  },
  {
    "path": "node/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.  The format is based on [Keep a Changelog].\n\n[comment]: <> (Added:      new features)\n[comment]: <> (Changed:    changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed:    now removed features)\n[comment]: <> (Fixed:      any bug fixes)\n[comment]: <> (Security:   in case of vulnerabilities)\n\n## 2.1.2\n### Fixed\n* Fixed an issue in the storage create which allowed delegators to exceed the maximum limit set by the validator for the validator's bid\n\n### Changed\n* Changed the behavior of withdraw bid to return an UnbondAmountTooLarge error instead of triggering a unbonding of the validator and their delegators.\n\n## 2.1.1\n\n### Fixed\n* Fixed an issue in the storage crate regarding incorrect setting of maximum delegator amount on validator bids\n\n## 2.1.0\n### Added\n* `TransactionInvocationTarget::ByPackageHash` has a new field `version_key`\n* `TransactionInvocationTarget::ByPackageName` has a new field `version_key`\n\n### Changed\n* Transaction::Deploy no longer supports using (in `payment` or `session`) when the `ExecutableDeployItem::StoredVersionedContractByHash` with field `version` (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). To execute a stored contract in a specific version please use Transaction::V1.\n* Transaction::Deploy no longer supports using (in `payment` or `session`) when the `ExecutableDeployItem::StoredVersionedContractByName` with field `version` (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). To execute a stored contract in a specific version please use Transaction::V1.\n* Transaction::V1 no longer supports using `TransactionInvocationTarget::ByPackageHash` variant with `version` defined (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). Please use `version_key` instead.\n* Transaction::V1 no longer supports using `TransactionInvocationTarget::ByPackageName` variant with `version` defined (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). Please use `version_key` instead.\n\n## 2.0.0\n\n### Added\n* Add `BinaryPort` interface along with the relevant config entries.\n* Added chainspec settings `finders_fee`, `finality_signature_proportion` and `signature_rewards_max_delay` to control behavior of the new seigniorage model.\n* Isolated sync handling, which comes online with only local data and rejects peers. Useful for testing, auditing, and similar scenarios.\n\n### Changed\n* All SSE events are emitted via the `<IP:Port>/events` endpoint. None of the previous ones (`/events/main`, `/events/deploys`, and `/events/sigs`) is available any longer.\n* `DeployBuffer` was renamed to `TransactionBuffer` along with the related metrics.\n* Switch blocks and the creation and propagation of signatures on them are now rewarded.\n* Era end reports now record rewards as motes rather than scores.\n* Seigniorage model is now independent of the details of consensus (and compatible with both Highway and Zug) and based solely upon block proposals, signature generation and signature distribution by validators.\n\n### Removed\n* Remove the JSON-RPC and speculative execution interfaces.\n* Remove chainspec setting `highway.performance_meter.blocks_to_consider` and the entire `highway.performance_meter` section.\n* Remove chainspec setting `highway.reduced_reward_multiplier`\n\n## 1.5.6\n\n### Changed\n* The node will recognise if a pending upgrade is unstaged and will avoid shutting down for upgrade in this case.\n* If an upgrade with the same activation point as the current one is detected on startup, the node will immediately shut down for upgrade.\n* Reduce chainspec setting `deploys.max_ttl` from 18 hours to 2 hours.\n\n## 1.5.5\n\n### Added\n* New chainspec setting `highway.performance_meter.blocks_to_consider` with a value of 10, meaning that nodes will take 10 most recent blocks into account when determining their performance in Highway for the purpose of choosing their round lengths.\n\n## 1.5.4\n\n### Added\n* New environment variable `CL_EVENT_QUEUE_DUMP_THRESHOLD` to enable dumping of queue event counts to log when a certain threshold is exceeded.\n* Add initial support for private chains.\n* Add support for CA signed client certificates for private chains.\n* Add a Highway Analysis tool for checking the state of the consensus.\n\n### Changed\n* Minimum block time reduced from 32.768s to 16.384s, with corresponding changes to related chainspec settings:\n  * `core.minimum_block_time` reduced to `16384 ms`.\n  * `core.round_seigniorage_rate` reduced to `[7, 175070816]`.\n  * `highway.block_gas_limit` reduced to `4_000_000_000_000`.\n* The `state_identifier` parameter of the `query_global_state` JSON-RPC method is now optional. If no `state_identifier` is specified, the highest complete block known to the node will be used to fulfill the request.\n* `state_get_account_info` RPC handler can now handle an `AccountIdentifier` as a parameter.\n* Replace the `sync_to_genesis` node config field with `sync_handling`.\n  * The new `sync_handling` field accepts three values:\n    - `genesis` - node will attempt to acquire all block data back to genesis\n    - `ttl` - node will attempt to acquire all block data to comply with time to live enforcement\n    - `nosync` - node will only acquire blocks moving forward\n* Make the `network.estimator_weights` section of the node config more fine-grained to provide more precise throttling of non-validator traffic.\n\n### Removed\n* The section `consensus.highway.round_success_meter` has been removed from the config file as no longer relevant with the introduction of a new method of determining the round exponent in Highway.\n\n### Fixed\n* Now possible to build outside a git repository context (e.g. from a source tarball). In such cases, the node's build version (as reported vie status endpoints) will not contain a trailing git short hash.\n* Remove an error that would unnecessarily be raised when a node includes its highest orphaned block within the current era.\n* Short-circuit initialization of block and deploy metadata DB to resolve delays after an upgrade.\n\n### Security\n* Update `openssl` to version 0.10.55 as mitigation for [RUSTSEC-2023-0044](https://rustsec.org/advisories/RUSTSEC-2023-0044).\n\n\n\n## 1.5.3\n\n### Added\n* Add `deploy_acceptor` section to config with a single option `timestamp_leeway` to allow a small leeway when deciding if a deploy is future-dated.\n* Add `deploys.max_timestamp_leeway` chainspec option to define the upper limit for the new config option `deploy_acceptor.timestamp_leeway`.\n* Add `block_validator.max_completed_entries` config option to control the number of recently validated proposed blocks to retain.\n\n### Changed\n* Change the limit of the `core_config.simultaneous_peer_requests` chainspec parameter to 255.\n* Optimize the `BlockValidator` component to reduce the number of simultaneous fetch events created for a given proposed block.\n\n### Fixed\n* Fix issue in `chain_get_block_transfers` JSON-RPC where blocks with no deploys could be reported as having `null` transfers rather than `[]`.\n* Fix issue in `chain_get_block_transfers` JSON-RPC where blocks containing successful transfers could erroneously be reported as having none.\n\n### Removed\n* Remove the `block_synchronizer.stall_limit` node config parameter since it is no longer needed.\n\n\n\n## 1.5.2\n\n### Added\n* Added the `cors_origin` config option under the `[rest_server]`, `[rpc_server]`, `[event_stream_server]` and `[speculative_exec_server]` sections to allow configuration of the CORS Origin.\n\n\n\n## 1.5.1\n\n### Added\n* Added the `upgrade_timeout` config option under the `[node]` section.\n\n### Changed\n* `speculative_exec` server now routes deploys to `DeployAcceptor` for more comprehensive validation, including cryptographic verification of signatures.\n\n\n\n## 1.5.0-rc.1\n\n### Added\n* Introduce fast-syncing to join the network, avoiding the need to execute every block to catch up.\n* Add config sections for new components to support fast-sync: `[block_accumulator]`, `[block_synchronizer]`, `[deploy_buffer]` and `[upgrade_watcher]`.\n* Add new Zug consensus protocol, disabled by default, along with a new `[consensus.zug]` config section.\n* Add a `consensus_protocol` option to the chainspec to choose a consensus protocol, and a `minimum_block_time` setting for the minimum difference between a block's timestamp and its child's.\n* Add a `vesting_schedule_period` option to the chainspec to define the period in which genesis validators' bids are released over time after they are unlocked.\n* Add a `simultaneous_peer_requests` option to the chainspec to define the maximum number of simultaneous block-sync and sync-leap requests.\n* Add following config options under `[node]` section to support fast-sync:\n  * `sync_to_genesis` which if set to `true` will cause the node to retrieve all blocks, deploys and global state back to genesis.\n  * `idle_tolerance` which defines the time after which the syncing process is considered stalled.\n  * `max_attempts` which defines the maximum number of attempts to sync before exiting the node process after the syncing process is considered stalled.\n  * `control_logic_default_delay` which defines the default delay for the control events that have no dedicated delay requirements.\n  * `force_resync` which if set to `true` will cause the node to resync all of the blocks.\n* Add following config options under `[network]` section:\n  * `min_peers_for_initialization` which defines the minimum number of fully-connected peers to consider network component initialized.\n  * `handshake_timeout` which defines connection handshake timeouts (they were hardcoded at 20 seconds previously).\n  * `max_incoming_peer_connections` which defines the maximum number of incoming connections per unique peer allowed.\n  * `max_in_flight_demands` which defines the maximum number of in-flight requests for data from a single peer.\n  * `tarpit_version_threshold`, `tarpit_duration` and `tarpit_chance` to configure the tarpitting feature, designed to reduce the impact of old node versions making repeated, rapid reconnection attempts.\n  * `blocklist_retain_duration` which defines how long peers remain blocked after they get blocklisted.\n  * optional `[network.identity]` section to support loading existing network identity certificates signed by a certificate authority.\n  * In addition to `consensus` and `deploy_requests`, the following values can now be controlled via the `[network.estimator_weights]` section in config: `gossip`, `finality_signatures`, `deploy_responses`, `block_requests`, `block_responses`, `trie_requests` and `trie_responses`.\n* The network handshake now contains the hash of the chainspec used and will be successful only if they match.\n* Checksums for execution results and deploy approvals are written to global state after each block execution.\n* Add a new config option `[rpc_server.max_body_bytes]` to allow a configurable value for the maximum size of the body of a JSON-RPC request.\n* Add `enable_server` option to all HTTP server configuration sections (`rpc_server`, `rest_server`, `event_stream_server`) which allow users to enable/disable each server independently (enabled by default).\n* Add `enable_server`, `address`, `qps_limit` and `max_body_bytes` to new `speculative_exec_server` section to `config.toml` to configure speculative execution JSON-RPC server (disabled by default).\n* Add new event to the main SSE server stream across all endpoints `<IP:PORT>/events/*` which emits a shutdown event when the node shuts down.\n* Add following fields to the `/status` endpoint and the `info_get_status` JSON-RPC:\n  * `reactor_state` indicating the node's current operating mode.\n  * `last_progress` indicating the time the node last made progress.\n  * `available_block_range` indicating the highest contiguous sequence of the block chain for which the node has complete data.\n  * `block_sync` indicating the state of the block synchronizer component.\n* Add new REST `/chainspec` and JSON-RPC `info_get_chainspec` endpoints that return the raw bytes of the `chainspec.toml`, `accounts.toml` and `global_state.toml` files as read at node startup.\n* Add a new JSON-RPC endpoint `query_balance` which queries for balances under a given `PurseIdentifier`.\n* Add new JSON-RPC endpoint `/speculative_exec` that accepts a deploy and a block hash and executes that deploy, returning the execution effects.\n* Add `strict_argument_checking` to the chainspec to enable strict args checking when executing a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\n* A diagnostics port can now be enabled via the `[diagnostics_port]` config section. See the `README.md` for details.\n* Add `SIGUSR2` signal handling to dump the queue in JSON format (see \"Changed\" section for `SIGUSR1`).\n* Add `validate_and_store_timeout` config option under `[gossip]` section to control the time the gossiper waits for another component to validate and store an item received via gossip.\n* Add metrics:\n  * `block_accumulator_block_acceptors`, `block_accumulator_known_child_blocks` to report status of the block accumulator component\n  * `(forward|historical)_block_sync_duration_seconds` to report the progress of block synchronization\n  * `deploy_buffer_total_deploys`, `deploy_buffer_held_deploys`, `deploy_buffer_dead_deploys` to report status of the deploy buffer component\n  * `(lowest|highest)_available_block_height` to report the low/high values of the complete block range (the highest contiguous chain of blocks for which the node has complete data)\n  * `sync_leap_duration_seconds`, `sync_leap_fetched_from_peer_total`, `sync_leap_rejected_by_peer_total`, `sync_leap_cant_fetch_total` to report progress of the sync leaper component\n  * `execution_queue_size` to report the number of blocks enqueued pending execution\n  * `accumulated_(outgoing|incoming)_limiter_delay` to report how much time was spent throttling other peers.\n* Add `testing` feature to casper-node crate to support test-only functionality (random constructors) on blocks and deploys.\n* Connections to unresponsive nodes will be terminated, based on a watchdog feature.\n\n### Changed\n* The `starting_state_root_hash` field from the REST and JSON-RPC status endpoints now represents the state root hash of the lowest block in the available block range.\n* Detection of a crash no longer triggers DB integrity checks to run on node start; the checks can be triggered manually instead.\n* Nodes no longer connect to nodes that do not speak the same protocol version by default.\n* Incoming connections from peers are rejected if they are exceeding the default incoming connections per peer limit of 3.\n* Chain automatically creates a switch block immediately after genesis or an upgrade, known as \"immediate switch blocks\".\n* Requests for data from a peer are now de-prioritized over networking messages necessary for consensus and chain advancement.\n* Replace network message format with a more efficient encoding while keeping the initial handshake intact.\n* Flush outgoing messages immediately, trading bandwidth for latency and hence optimizing feedback loops of various components in the system.\n* Move `finality_threshold_fraction` from the `[highway]` to the `[core]` section in the chainspec.\n* Move `max_execution_delay` config option from `[consensus.highway]` to `[consensus]` section.\n* Add CORS behavior to allow any route on the JSON-RPC, REST and SSE servers.\n* The JSON-RPC server now returns more useful responses in many error cases.\n* Add a new parameter to `info_get_deploys` JSON-RPC, `finalized_approvals` - controlling whether the approvals returned with the deploy should be the ones originally received by the node, or overridden by the approvals that were finalized along with the deploy.\n* Support using block height as the `state_identifier` parameter of JSON-RPC `query_global_state` requests.\n* Add new `block_hash` and `block_height` optional fields to JSON-RPC `info_get_deploy` response which will be present when execution results aren't available.\n* JSON-RPC responses which fail to provide requested data will now also include an indication of that node's available block range, i.e. the block heights for which it holds all global state.  See [#2789](https://github.com/casper-network/casper-node/pull/2789) for an example of the new error response.\n* Add a `lock_status` field to the JSON representation of the `ContractPackage` values.\n* `Key::SystemContractRegistry` is now readable and can be queried via the `query_global_state` JSON-RPC.\n* Unify log messages for blocked nodes and provide more detailed reasons for blocking peers.\n* Rename `current_era` metric to `consensus_current_era`.\n\n### Deprecated\n* `null` should no longer be used as a value for `params` in JSON-RPC requests.  Prefer an empty Array or Object.\n* Deprecate the `chain_height` metric in favor of `highest_available_block_height`.\n\n### Removed\n* Remove legacy synchronization from genesis in favor of fast-sync.\n* Remove config options no longer required due to fast-sync: `[linear_chain_sync]`, `[block_proposer]` and `[consensus.highway.standstill_timeout]`.\n* Remove chainspec setting `[protocol.last_emergency_restart]` as fast sync will use the global state directly for recognizing such restarts instead.\n* Remove a temporary chainspec setting `[core.max_stored_value_size]` which was used to limit the size of individual values stored in global state.\n* Remove config section `[deploy_acceptor]` which only has one option `verify_accounts`, meaning deploys received from clients always undergo account balance checks to assess suitability for execution or not.\n* Remove storage integrity check.\n* Remove `SIGUSR1`/`SIGUSR2` queue dumps in favor of the diagnostics port.\n* Remove `casper-mainnet` feature flag.\n\n### Fixed\n* Limiters for incoming requests and outgoing bandwidth will no longer inadvertently delay some validator traffic when maxed out due to joining nodes.\n* Dropped connections no longer cause the outstanding messages metric to become incorrect.\n* JSON-RPC server is now mostly compliant with the standard. Specifically, correct error values are now returned in responses in many failure cases.\n\n### Security\n* Bump `openssl` crate to version 0.10.48, if compiling with vendored OpenSSL to address latest RUSTSEC advisories.\n\n\n\n## 1.4.15-alt\n\n### Changed\n* Update dependencies (in particular `casper-types` to v2.0.0 due to additional `Key` variant).  Note that publishing `1.4.15-alt` is only to rectify the issue where `casper-types` was published as v1.6.0 despite having a breaking change.  It is expected to only be consumed as a crate; there will be no upgrade of Casper Mainnet, Testnet, etc to protocol version `1.4.15-alt`.\n\n\n\n## 1.4.15\n\n### Changed\n* Modified JSON-RPCs `chain_get_era_info_by_switch_block` and `chain_get_era_summary` to use either `Key::EraInfo` or `Key::EraSummary` as appropriate in order to provide useful responses.\n\n\n\n## 1.4.14\n\n### Added\n* Node executes new prune process after executing each block, whereby entries under `Key::EraInfo` are removed in batches of size defined by the new chainspec option `[core.prune_batch_size]`.\n* After executing a switch block, information about that era is stored to global state under a new static key `Key::EraSummary`.\n* Add a new JSON-RPC endpoint `chain_get_era_summary` to retrieve the information stored under `Key::EraSummary`.\n\n### Changed\n* Rather than storing an ever-increasing collection of era information after executing a switch block under `Key::EraInfo`, the node now stores only the information relevant to that era under `Key::EraSummary`.\n* Update `openssl` and `openssl-sys` to latest versions.\n\n### Removed\n* Remove asymmetric key functionality (move to `casper-types` crate behind feature `std`).\n* Remove time types (move to `casper-types` with some functionality behind feature `std`).\n\n### Fixed\n* Fix issue in BlockValidator inhibiting the use of fallback peers to fetch missing deploys.\n\n\n\n## 1.4.13\n\n### Changed\n* Update `casper-execution-engine`.\n\n\n\n## 1.4.8\n\n### Added\n* Add an `identity` option to load existing network identity certificates signed by a CA.\n\n\n\n### Changed\n* Update `casper-execution-engine`.\n\n\n\n## 1.4.7\n\n### Changed\n* Update `casper-execution-engine` and three `openssl` crates to latest versions.\n\n\n\n## 1.4.6\n\n### Changed\n* Update dependencies to make use of scratch global state in the contract runtime.\n\n\n\n## 1.4.5\n\n### Added\n* Add a temporary chainspec setting `max_stored_value_size` to limit the size of individual values stored in global state.\n* Add a chainspec setting `minimum_delegation_amount` to limit the minimal amount of motes that can be delegated by a first time delegator.\n* Add a chainspec setting `block_max_approval_count` to limit the maximum number of approvals across all deploys in a single block.\n* Add a `finalized_approvals` field to the GetDeploy RPC, which if `true` causes the response to include finalized approvals substituted for the originally-received ones.\n\n### Fixed\n* Include deploy approvals in block payloads upon which consensus operates.\n* Fixes a bug where historical auction data was unavailable via `get-auction-info` RPC.\n\n\n\n## 1.4.4 - 2021-12-29\n\n### Added\n* Add `contract_runtime_latest_commit_step` gauge metric indicating the execution duration of the latest `commit_step` call.\n\n### Changed\n* No longer checksum-hex encode various types.\n\n\n\n## 1.4.3 - 2021-12-06\n\n### Added\n* Add new event to the main SSE server stream accessed via `<IP:Port>/events/main` which emits hashes of expired deploys.\n\n### Changed\n* `enable_manual_sync` configuration parameter defaults to `true`.\n* Default behavior of LMDB changed to use [`NO_READAHEAD`](https://docs.rs/lmdb/0.8.0/lmdb/struct.EnvironmentFlags.html#associatedconstant.NO_READAHEAD).\n\n\n\n## [1.4.2] - 2021-11-11\n\n### Changed\n* There are now less false warnings/errors regarding dropped responders or closed channels during a shutdown, where they are expected and harmless.\n* Execution transforms are ordered by insertion order.\n\n### Removed\n* The config option `consensus.highway.unit_hashes_folder` has been removed.\n\n### Fixed\n* The block proposer component now retains pending deploys and transfers across a restart.\n\n\n\n## [1.4.0] - 2021-10-04\n\n### Added\n* Add `enable_manual_sync` boolean option to `[contract_runtime]` in the config.toml which enables manual LMDB sync.\n* Add `contract_runtime_execute_block` histogram tracking execution time of a whole block.\n* Long-running events now log their event type.\n* Individual weights for traffic throttling can now be set through the configuration value `network.estimator_weights`.\n* Add `consensus.highway.max_request_batch_size` configuration parameter. Defaults to 20.\n* New histogram metrics `deploy_acceptor_accepted_deploy` and `deploy_acceptor_rejected_deploy` that track how long the initial verification took.\n* Add gzip content negotiation (using accept-encoding header) to rpc endpoints.\n* Add `state_get_trie` JSON-RPC endpoint.\n* Add `info_get_validator_changes` JSON-RPC endpoint and REST endpoint `validator-changes` that return the status changes of active validators.\n\n### Changed\n* The following Highway timers are now separate, configurable, and optional (if the entry is not in the config, the timer is never called):\n  * `standstill_timeout` causes the node to restart if no progress is made.\n  * `request_state_interval` makes the node periodically request the latest state from a peer.\n  * `log_synchronizer_interval` periodically logs the number of entries in the synchronizer queues.\n* Add support for providing node uptime via the addition of an `uptime` parameter in the response to the `/status` endpoint and the `info_get_status` JSON-RPC.\n* Support building and testing using stable Rust.\n* Log chattiness in `debug` or lower levels has been reduced and performance at `info` or higher slightly improved.\n* The following parameters in the `[gossip]` section of the config has been renamed:\n  * `[finished_entry_duration_secs]` => `[finished_entry_duration]`\n  * `[gossip_request_timeout_secs]` => `[gossip_request_timeout]`\n  * `[get_remainder_timeout_secs]` => `[get_remainder_timeout]`\n* The following parameters in config now follow the humantime convention ('30sec', '120min', etc.):\n  * `[network][gossip_interval]`\n  * `[gossip][finished_entry_duration]`\n  * `[gossip][gossip_request_timeout]`\n  * `[gossip][get_remainder_timeout]`\n  * `[fetcher][get_from_peer_timeout]`\n\n### Removed\n* The unofficial support for nix-related derivations and support tooling has been removed.\n* Experimental, nix-based kubernetes testing support has been removed.\n* Experimental support for libp2p has been removed.\n* The `isolation_reconnect_delay` configuration, which has been ignored since 1.3, has been removed.\n* The libp2p-exclusive metrics of `read_futures_in_flight`, `read_futures_total`, `write_futures_in_flight`, `write_futures_total` have been removed.\n\n### Fixed\n* Resolve an issue where `Deploys` with payment amounts exceeding the block gas limit would not be rejected.\n* Resolve issue of duplicated config option `max_associated_keys`.\n\n\n\n## [1.3.2] - 2021-08-02\n\n### Fixed\n* Resolve an issue in the `state_get_dictionary_item` JSON-RPC when a `ContractHash` is used.\n* Corrected network state engine to hold in blocked state for full 10 minutes when encountering out of order race condition.\n\n\n\n## [1.3.1] - 2021-07-26\n\n### Fixed\n* Parametrized sync_timeout and increased value to stop possible post upgrade restart loop.\n\n\n\n## [1.3.0] - 2021-07-19\n\n### Added\n* Add support for providing historical auction information via the addition of an optional block ID in the `state_get_auction_info` JSON-RPC.\n* Exclude inactive validators from proposing blocks.\n* Add validation of the `[protocol]` configuration on startup, to ensure the contained values make sense.\n* Add optional outgoing bandwidth limiter to the networking component, controllable via new `[network][max_outgoing_byte_rate_non_validators]` config option.\n* Add optional incoming message limiter to the networking component, controllable via new `[network][max_incoming_message_rate_non_validators]` config option.\n* Add optional in-memory deduplication of deploys, controllable via new `[storage]` config options `[enable_mem_deduplication]` and `[mem_pool_prune_interval]`.\n* Add a new event stream to SSE server accessed via `<IP:Port>/events/deploys` which emits deploys in full as they are accepted.\n* Events now log their ancestors, so detailed tracing of events is possible.\n\n### Changed\n* Major rewrite of the network component, covering connection negotiation and management, periodic housekeeping and logging.\n* Exchange and authenticate Validator public keys in network handshake between peers.\n* Remove needless copying of outgoing network messages.\n* Move finality signatures to separate event stream and change stream endpoints to `/events/main` and `/events/sigs`.\n* Avoid truncating the state root hash when reporting node's status via JSON-RPC or REST servers.\n* The JSON-RPC server waits until an incoming deploy has been sent to storage before responding to the client.\n* Persist event stream event index across node restarts.\n* Separate transfers from other deploys in the block proposer.\n* Enable getting validators for future eras in `EffectBuilder::get_era_validators()`.\n* Improve logging around stalled consensus detection.\n* Skip storage integrity checks if the node didn't previously crash.\n* Update pinned version of Rust to `nightly-2021-06-17`.\n* Changed LMDB flags to reduce flushing and optimize IO performance in the Contract Runtime.\n* Don't shut down by default anymore if stalled. To enable set config option `shutdown_on_standstill = true` in `[consensus.highway]`.\n* Major rewrite of the contract runtime component.\n* Ports used for local testing are now determined in a manner that hopefully leads to less accidental conflicts.\n* At log level `DEBUG`, single events are no longer logged (use `TRACE` instead).\n* More node modules are now `pub(crate)`.\n\n### Removed\n* Remove systemd notify support, including removal of `[network][systemd_support]` config option.\n* Removed dead code revealed by making modules `pub(crate)`.\n* The networking layer no longer gives preferences to validators from the previous era.\n\n### Fixed\n* Avoid redundant requests caused by the Highway synchronizer.\n* Update \"current era\" metric also for initial era.\n* Keep syncing until the node is in the current era, rather than allowing an acceptable drift.\n* Update the list of peers with newly-learned ones in linear chain sync.\n* Drain the joiner reactor queue on exit, to eliminate stale connections whose handshake has completed, but which live on the queue.\n* Shut down SSE event streams gracefully.\n* Limit the maximum number of clients connected to the event stream server via the `[event_stream_server][max_concurrent_subscribers]` config option.\n* Avoid emitting duplicate events in the event stream.\n* Change `BlockIdentifier` params in the Open-RPC schema to be optional.\n* Asymmetric connections are now swept regularly again.\n\n\n\n## [1.2.0] - 2021-05-27\n\n### Added\n* Add configuration options for `[consensus][highway][round_success_meter]`.\n* Add `[protocol][last_emergency_restart]` field to the chainspec for use by fast sync.\n* Add an endpoint at `/rpc-schema` to the REST server which returns the OpenRPC-compatible schema of the JSON-RPC API.\n* Have consensus component shut down the node on an absence of messages for the last era for a given period.\n* Add a new `Step` event to the event stream which displays the contract runtime `Step` execution results.\n* Add a configurable delay before proposing dependencies, to give deploys time to be gossiped before inclusion in a new block.\n* Add instrumentation to the network component.\n* Add fetchers for block headers.\n* Add joiner test.\n\n### Changed\n* Change to Apache 2.0 license.\n* Provide an efficient way of finding the block to which a given deploy belongs.\n* On hard-reset upgrades, only remove stored blocks with old protocol versions, and remove all data associated with a removed block.\n* Restrict expensive startup integrity checks to sessions following unclean shutdowns.\n* Improve node joining process.\n* Improve linear chain component, including cleanups and optimized handling of finality signatures.\n* Make the syncing process, linear chain component and joiner reactor not depend on the Era Supervisor.\n* Improve logging of banned peers.\n* Change trigger for upgrade checks to timed interval rather than announcement of new block.\n* Use the same JSON representation for a block in the event stream as for the JSON-RPC server.\n* Avoid creating a new era when shutting down for an upgrade.\n* Allow consensus to disconnect from faulty peers.\n* Use own most recent round exponent instead of the median when initializing a new era.\n* Request protocol state from peers only for the latest era.\n* Add an instance ID to consensus pings, so that they are only handled in the era and the network they were meant for.\n* Avoid requesting a consensus dependency that is already in the synchronizer queue.\n* Change replay detection to not use execution results.\n* Initialize consensus round success meter with current timestamp.\n* Era Supervisor now accounts for the last emergency restart.\n* Upgrade dependencies, in particular tokio.\n* Use `minimum_block_time` and `maximum_round_length` in Highway, instead of `minimum_round_exponent` and `maximum_round_exponent`. The minimum round length doesn't have to be a power of two in milliseconds anymore.\n\n### Removed\n* Remove `impl Sub<Timestamp> for Timestamp` to help avoid panicking in non-obvious edge cases.\n* Remove `impl Sub<TimeDiff> for Timestamp` from production code to help avoid panicking in non-obvious edge cases.\n* Remove `[event_stream_server][broadcast_channel_size]` from config.toml, and make it a factor of the event stream buffer size.\n\n### Fixed\n* Have casper-node process exit with the exit code returned by the validator reactor.\n* Restore cached block proposer state correctly.\n* Runtime memory estimator now registered in the joiner reactor.\n* Avoid potential arithmetic overflow in consensus component.\n* Avoid potential index out of bounds error in consensus component.\n* Avoid panic on dropping an event responder.\n* Validate each block size in the block validator component.\n* Prevent deploy replays.\n* Ensure finality signatures received after storing a block are gossiped and stored.\n* Ensure isolated bootstrap nodes attempt to reconnect properly.\n* Ensure the reactor doesn't skip fatal errors before successfully exiting.\n* Collect only verified signatures from bonded validators.\n* Fix a race condition where new metrics were replaced before the networking component had shut down completely, resulting in a panic.\n* Ensure an era is not activated twice.\n* Avoid redundant requests caused by the Highway synchronizer.\n* Reduce duplication in block validation requests made by the Highway synchronizer.\n* Request latest consensus state only if consensus has stalled locally.\n\n\n\n## [1.1.1] - 2021-04-19\n\n### Changed\n* Ensure consistent validation when adding deploys and transfers while proposing and validating blocks.\n\n\n\n## [1.1.0] - 2021-04-13 [YANKED]\n\n### Changed\n* Ensure that global state queries will only be permitted to recurse to a fixed maximum depth.\n\n\n\n## [1.0.1] - 2021-04-08\n\n### Added\n* Add `[deploys][max_deploy_size]` to chainspec to limit the size of valid deploys.\n* Add `[network][maximum_net_message_size]` to chainspec to limit the size of peer-to-peer messages.\n\n### Changed\n* Check deploy size does not exceed maximum permitted as part of deploy validation.\n* Include protocol version and maximum message size in network handshake of nodes.\n* Change accounts.toml to only be included in v1.0.0 configurations.\n\n\n\n## [1.0.0] - 2021-03-30\n\n### Added\n* Initial release of node for Casper mainnet.\n\n\n\n[Keep a Changelog]: https://keepachangelog.com/en/1.0.0\n[unreleased]: https://github.com/casper-network/casper-node/compare/37d561634adf73dab40fffa7f1f1ee47e80bf8a1...dev\n[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.0...37d561634adf73dab40fffa7f1f1ee47e80bf8a1\n[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0\n[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0\n[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0\n[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1\n[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0\n"
  },
  {
    "path": "node/Cargo.toml",
    "content": "[package]\nname = \"casper-node\"\nversion = \"2.2.0\" # when updating, also update 'html_root_url' in lib.rs\nauthors = [\"Ed Hastings <ed@casper.network>\", \"Karan Dhareshwar <karan@casper.network>\"]\nedition = \"2021\"\ndescription = \"The Casper blockchain node\"\ndocumentation = \"https://docs.rs/casper-node\"\nreadme = \"README.md\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/node\"\nlicense = \"Apache-2.0\"\ndefault-run = \"casper-node\"\nexclude = [\"proptest-regressions\"]\n\n[dependencies]\nansi_term = \"0.12.1\"\nanyhow = \"1\"\naquamarine = \"0.1.12\"\nasync-trait = \"0.1.50\"\nbacktrace = \"0.3.50\"\nbase16 = \"0.2.1\"\nbase64 = \"0.13.0\"\nbincode = \"1\"\nbytes = \"1.11.0\"\ncasper-binary-port = { version = \"1.1.1\", path = \"../binary_port\" }\ncasper-storage = { version = \"5.0.0\", path = \"../storage\" }\ncasper-types = { version = \"7.0.0\", path = \"../types\", features = [\"datasize\", \"json-schema\", \"std-fs-io\"] }\ncasper-execution-engine = { version = \"9.0.0\", path = \"../execution_engine\" }\ndatasize = { version = \"0.2.11\", features = [\"detailed\", \"fake_clock-types\", \"futures-types\", \"smallvec-types\"] }\nderive_more = \"0.99.7\"\neither = { version = \"1\", features = [\"serde\"] }\nenum-iterator = \"0.6.0\"\nerased-serde = \"0.3.18\"\nfs2 = \"0.4.3\"\nfutures = \"0.3.31\"\nfutures-io = \"0.3.5\"\nhex-buffer-serde = \"0.3.0\"\nhex_fmt = \"0.3.0\"\nhostname = \"0.3.0\"\nhttp = \"0.2.1\"\nhumantime = \"2.1.0\"\nhyper = \"0.14.27\"\nitertools = \"0.10.3\"\nlibc = \"0.2.66\"\nlinked-hash-map = \"0.5.3\"\nlmdb-rkv = \"0.14\"\nlog = { version = \"0.4.8\", features = [\"std\", \"serde\", \"kv_unstable\"] }\nnum = { version = \"0.4.0\", default-features = false }\nnum-derive = { workspace = true }\nnum-rational = { version = \"0.4.0\", features = [\"serde\"] }\nnum-traits = { workspace = true }\nnum_cpus = \"1\"\nonce_cell = \"1\"\nopenssl = \"0.10.70\"\npin-project = \"1.0.6\"\nprometheus = { version = \"0.13.4\", default-features = false }\nquanta = \"0.9.2\"\nrand = \"0.8.3\"\nrand_chacha = \"0.3.0\"\nregex = \"1\"\nrmp-serde = \"0.14.4\"\nrmp = \"=0.8.14\"\nschemars = { version = \"0.8.16\", features = [\"preserve_order\", \"impl_json_schema\"] }\nserde = { version = \"1\", features = [\"derive\", \"rc\"] }\nserde-big-array = \"0.3.0\"\nserde-map-to-array = \"1.1.0\"\nserde_bytes = \"0.11.5\"\nserde_json = { version = \"1\", features = [\"preserve_order\"] }\nserde_repr = \"0.1.6\"\nshlex = \"1.3.0\"\nsignal-hook = \"0.3.4\"\nsignature = \"1\"\nsmallvec = { version = \"1\", features = [\"serde\"] }\nstatic_assertions = \"1\"\nstats_alloc = \"0.1.8\"\nstructopt = \"0.3.14\"\nstrum = { version = \"0.24.1\", features = [\"strum_macros\", \"derive\"] }\nsys-info = \"0.8.0\"\ntempfile = \"3.4.0\"\nthiserror = \"1\"\ntokio = { version = \"1\", features = [\"macros\", \"net\", \"rt-multi-thread\", \"sync\", \"time\"] }\ntokio-openssl = \"0.6.3\"\ntokio-serde = { version = \"0.8.0\", features = [\"bincode\"] }\ntokio-stream = { version = \"0.1.4\", features = [\"sync\"] }\ntokio-util = { version = \"0.6.4\", features = [\"codec\"] }\nmio = \"0.8.11\"\ntoml = { version = \"0.8.19\", features = [\"preserve_order\"] }\ntower = { version = \"0.4.6\", features = [\"limit\"] }\ntracing = \"0.1.18\"\ntracing-futures = \"0.2.5\"\ntracing-subscriber = { version = \"0.3.20\", features = [\"env-filter\", \"fmt\", \"json\"] }\nuint = \"0.9.0\"\nuuid = { version = \"0.8.1\", features = [\"serde\", \"v4\"] }\nwarp = { version = \"0.3.6\", features = [\"compression\"] }\nwheelbuf = \"0.2.0\"\n\ncasper-executor-wasm = { version = \"0.1.3\", path = \"../executor/wasm\" }\ncasper-executor-wasm-interface = { version = \"0.1.3\", path = \"../executor/wasm_interface\" }\nfs_extra = \"1.3.0\"\n\n[dev-dependencies]\ncasper-binary-port = { version = \"1.1.1\", path = \"../binary_port\", features = [\"testing\"] }\nassert-json-diff = \"2.0.1\"\nassert_matches = \"1.5.0\"\ncasper-types = { path = \"../types\", features = [\"datasize\", \"json-schema\", \"std-fs-io\", \"testing\"] }\nfake_instant = \"0.4.0\"\npnet = \"0.28.0\"\npretty_assertions = \"0.7.2\"\nproptest = \"1.0.0\"\nproptest-derive = \"0.5.1\"\nrand_core = \"0.6.2\"\nreqwest = { version = \"0.11.27\", features = [\"stream\"] }\ntokio = { version = \"1\", features = [\"test-util\"] }\n\n[features]\nfailpoints = []\ntesting = [\"casper-types/testing\"]\nvendored-openssl = [\"openssl/vendored\"]\ndatasize = [\"casper-types/datasize\"]\n\n[[bin]]\nname = \"casper-node\"\npath = \"src/app/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[package.metadata.deb]\nfeatures = [\"vendored-openssl\"]\nrevision = \"0\"\ndepends = \"curl\"\nassets = [\n    [\"../target/release/casper-node\", \"/usr/bin/casper-node\", \"755\"],\n    [\"../resources/maintainer_scripts/logrotate.d/casper-node\", \"/etc/logrotate.d/casper-node\", \"644\"],\n    [\"../resources/maintainer_scripts/pull_genesis.sh\", \"/etc/casper/pull_genesis.sh\", \"755\"],\n    [\"../resources/maintainer_scripts/delete_local_db.sh\", \"/etc/casper/delete_local_db.sh\", \"755\"],\n    [\"../resources/maintainer_scripts/config_from_example.sh\", \"/etc/casper/config_from_example.sh\", \"755\"],\n    [\"../resources/maintainer_scripts/systemd_pre_start.sh\", \"/etc/casper/systemd_pre_start.sh\", \"755\"],\n    [\"../resources/production/README.md\", \"/etc/casper/README.md\", \"644\"],\n    [\"../resources/production/CHANGE_LOG.md\", \"/etc/casper/CHANGE_LOG.md\", \"644\"],\n    [\"../resources/production/config-example.toml\", \"/etc/casper/config-example.toml\", \"644\"],\n    [\"../resources/production/validator_keys/README.md\", \"/etc/casper/validator_keys/README.md\", \"644\"]\n]\nmaintainer-scripts = \"../resources/maintainer_scripts/debian\"\nextended-description = \"\"\"\nPackage for Casper Node.\n\nFor information on using package, see https://github.com/casper-network/casper-node\n\"\"\"\n\n[package.metadata.deb.systemd-units]\nunit-scripts = \"../resources/maintainer_scripts/casper_node\"\nrestart-after-upgrade = false\n"
  },
  {
    "path": "node/README.md",
    "content": "# `casper-node`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-node)](https://crates.io/crates/casper-node)\n[![Documentation](https://docs.rs/casper-node/badge.svg)](https://docs.rs/casper-node)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nThe component for running a node on the casper network.\n\n[Node Operator Guide](https://docs.casper.network/operators/)\n\n## License\n\nLicensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE)."
  },
  {
    "path": "node/build.rs",
    "content": "use std::{env, process::Command};\n\nconst NODE_BUILD_PROFILE_ENV_VAR: &str = \"NODE_BUILD_PROFILE\";\nconst NODE_GIT_HASH_ENV_VAR: &str = \"NODE_GIT_SHA\";\n\nconst CARGO_BUILD_PROFILE_ENV_VAR: &str = \"PROFILE\";\n\n///\n/// `casper-node` build script to capture the git revision hash and cargo build profile and export\n/// them to cargo to include them in the version information.\n///\n/// Notes: This script exports information to cargo via println! with the old invocation prefix of\n/// `cargo:`, if/when the node uses a Rust version `1.77` or above, this should be changed to\n/// `cargo::` as the prefix changed in that version of rust\nfn main() {\n    match Command::new(\"git\")\n        .arg(\"rev-parse\")\n        .arg(\"--short\")\n        .arg(\"HEAD\")\n        .output()\n    {\n        Ok(output) => {\n            // In the event the git command is successful, export the properly formatted git hash to\n            // cargo at compile time.\n            let git_hash_raw =\n                String::from_utf8(output.stdout).expect(\"Failed to obtain commit hash to string\");\n            let git_hash = git_hash_raw.trim_end_matches('\\n');\n\n            println!(\"cargo:rustc-env={NODE_GIT_HASH_ENV_VAR}={git_hash}\");\n        }\n\n        Err(error) => {\n            println!(\"cargo:warning={error}\");\n            println!(\"cargo:warning=casper-node build version will not include git short hash\");\n        }\n    }\n\n    println!(\n        \"cargo:rustc-env={NODE_BUILD_PROFILE_ENV_VAR}={}\",\n        env::var(CARGO_BUILD_PROFILE_ENV_VAR).unwrap()\n    );\n}\n"
  },
  {
    "path": "node/proptest-regressions/components/diagnostics_port/stop_at.txt",
    "content": "# Seeds for failure cases proptest has generated in the past. It is\n# automatically read and these particular cases re-run before any\n# novel cases are generated.\n#\n# It is recommended to check this file in to source control so that\n# everyone who runs the test benefits from these saved cases.\ncc a95b33d3196ca47b38fb6d16346318cbfbcd6494087384852a2d4bdb585f1edf # shrinks to stop_at = NextBlock\ncc 5d4cf22796e91f3ca192f4b42ff7738143ba06e6ad7ea088abc9e63784be78a6 # shrinks to stop_at = EraId(EraId(0))\n"
  },
  {
    "path": "node/src/app/main.rs",
    "content": "//! # Casper blockchain node\n//!\n//! This is the core application for the Casper blockchain. Run with `--help` to see available\n//! command-line arguments.\n\nuse std::{\n    panic::{self, PanicHookInfo},\n    process,\n};\n\nuse backtrace::Backtrace;\nuse structopt::StructOpt;\nuse tokio::runtime::Builder;\nuse tracing::info;\n\nuse casper_node::{cli::Cli, MAX_THREAD_COUNT};\n\n/// Aborting panic hook.\n///\n/// Will exit the application using `abort` when an error occurs. Always shows a backtrace.\nfn panic_hook(info: &PanicHookInfo<'_>) {\n    let backtrace = Backtrace::new();\n\n    eprintln!(\"{:?}\", backtrace);\n\n    // Print panic info\n    if let Some(s) = info.payload().downcast_ref::<&str>() {\n        eprintln!(\"node panicked: {s}\");\n    } else {\n        eprintln!(\"{info}\");\n    }\n\n    // Abort after a panic, even if only a worker thread panicked.\n    process::abort()\n}\n\n/// Main function.\nfn main() -> anyhow::Result<()> {\n    // The exit code is determined in a block to ensure that all acquired resources are dropped\n    // before exiting with the given exit code.\n    let exit_code = {\n        let num_cpus = num_cpus::get();\n        let runtime = Builder::new_multi_thread()\n            .enable_all()\n            .worker_threads(num_cpus)\n            .max_blocking_threads(MAX_THREAD_COUNT - num_cpus)\n            .build()\n            .unwrap();\n\n        panic::set_hook(Box::new(panic_hook));\n\n        // Parse CLI args and run selected subcommand.\n        let opts = Cli::from_args();\n\n        runtime.block_on(opts.run())?\n    };\n\n    info!(%exit_code, \"exiting casper-node\");\n    process::exit(exit_code)\n}\n"
  },
  {
    "path": "node/src/cli/arglang.rs",
    "content": "//! TOML-inspired command-line argument language.\n//!\n//! Supports strings, booleans, integers and arrays (lists).\n//!\n//! * Booleans are expressed as `true` or `false`.\n//! * Any integer must fit into `i64`, otherwise will be parsed as strings.\n//! * Strings can be quoted using double quotes. A backslash `\\\\` can be used to escape quotes\n//!   inside.\n//! * Unquoted strings are terminated on whitespace.\n//! * Arrays are written using brackets and commas: `[1, 2, 3]`.\n//!\n//! ## Examples\n//!\n//! * `[127.0.0.1, 1.2.3.4, 6.7.8.9]` list of three strings\n//! * `\"hello world\"` string `hello world`\n//! * `[\"no\\\"de\\\"-1\", node-2]` list of two strings (`no\"de\"-1` and `node-2`).\n\nuse std::{iter::Peekable, str::FromStr};\n\nuse thiserror::Error;\nuse toml::Value;\n\n/// A Token to be parsed.\n#[derive(Clone, Debug, Eq, PartialEq)]\npub(crate) enum Token {\n    String(String),\n    I64(i64),\n    Boolean(bool),\n    Comma,\n    OpenBracket,\n    CloseBracket,\n}\n\n#[derive(Debug, Error, Eq, PartialEq)]\npub(crate) enum Error {\n    #[error(\"unterminated string in input\")]\n    UnterminatedString,\n    #[error(\"unexpected token {0:?}\")]\n    UnexpectedToken(Token),\n    #[error(\"unexpected end of input\")]\n    UnexpectedEndOfInput,\n    #[error(\"trailing input {0:?}...\")]\n    TrailingInput(Token),\n}\n\nimpl Token {\n    /// Constructs a token from a string.\n    #[cfg(test)]\n    fn string(value: &str) -> Token {\n        Token::String(value.to_string())\n    }\n}\n\n/// Tokenizes a stream of characters.\nfn tokenize(input: &str) -> Result<Vec<Token>, Error> {\n    let mut chars = input.chars();\n    let mut tokens = Vec::new();\n\n    let mut buffer = String::new();\n\n    loop {\n        let ch = chars.next();\n\n        // Check if we need to complete a token.\n        if !buffer.is_empty() {\n            match ch {\n                Some(' ' | '\"' | '[' | ']' | ',') | None => {\n                    // Try to parse as number or bool first.\n                    if let Ok(value) = i64::from_str(&buffer) {\n                        tokens.push(Token::I64(value));\n                    } else if let Ok(value) = bool::from_str(&buffer) {\n                        tokens.push(Token::Boolean(value));\n                    } else {\n                        tokens.push(Token::String(buffer.clone()));\n                    }\n\n                    buffer.clear();\n                }\n                _ => {\n                    // Handled in second match below.\n                }\n            }\n        }\n\n        match ch {\n            None => {\n                // On EOF, we break.\n                break;\n            }\n            Some(' ') => {\n                // Ignore whitespace.\n            }\n            Some('\"') => {\n                // Quoted string.\n                let mut escaped = false;\n                let mut string = String::new();\n                loop {\n                    let c = chars.next();\n                    match c {\n                        Some(character) if escaped => {\n                            string.push(character);\n                            escaped = false;\n                        }\n                        Some('\\\\') => {\n                            escaped = true;\n                        }\n                        Some('\"') => {\n                            break;\n                        }\n                        Some(character) => string.push(character),\n                        None => {\n                            return Err(Error::UnterminatedString);\n                        }\n                    }\n                }\n                tokens.push(Token::String(string));\n            }\n            Some('[') => tokens.push(Token::OpenBracket),\n            Some(']') => tokens.push(Token::CloseBracket),\n            Some(',') => tokens.push(Token::Comma),\n            Some(character) => buffer.push(character),\n        }\n    }\n\n    Ok(tokens)\n}\n\n/// Parse a stream of tokens of arglang.\nfn parse_stream<I>(tokens: &mut Peekable<I>) -> Result<Value, Error>\nwhere\n    I: Iterator<Item = Token>,\n{\n    match tokens.next() {\n        Some(Token::String(value)) => Ok(Value::String(value)),\n        Some(Token::I64(value)) => Ok(Value::Integer(value)),\n        Some(Token::Boolean(value)) => Ok(Value::Boolean(value)),\n        Some(Token::OpenBracket) => {\n            // Special case for empty list.\n            if tokens.peek() == Some(&Token::CloseBracket) {\n                tokens.next();\n                return Ok(Value::Array(Vec::new()));\n            }\n\n            let mut items = Vec::new();\n            loop {\n                items.push(parse_stream(tokens)?);\n\n                match tokens.next() {\n                    Some(Token::CloseBracket) => {\n                        return Ok(Value::Array(items));\n                    }\n                    Some(Token::Comma) => {\n                        // Continue parsing next time.\n                    }\n                    Some(t) => {\n                        return Err(Error::UnexpectedToken(t));\n                    }\n                    None => {\n                        return Err(Error::UnexpectedEndOfInput);\n                    }\n                }\n            }\n        }\n        Some(t @ (Token::CloseBracket | Token::Comma)) => Err(Error::UnexpectedToken(t)),\n        None => Err(Error::UnexpectedEndOfInput),\n    }\n}\n\n/// Parse string using arglang.\npub(crate) fn parse(input: &str) -> Result<Value, Error> {\n    let mut tokens = tokenize(input)?.into_iter().peekable();\n    let value = parse_stream(&mut tokens)?;\n\n    // Check if there is trailing input.\n    if let Some(trailing) = tokens.next() {\n        return Err(Error::TrailingInput(trailing));\n    }\n\n    Ok(value)\n}\n\n#[cfg(test)]\nmod tests {\n    use toml::Value;\n\n    use super::{parse, tokenize, Error, Token};\n\n    #[test]\n    fn tokenize_single() {\n        assert_eq!(tokenize(\"asdf\").unwrap(), vec![Token::string(\"asdf\")]);\n        assert_eq!(tokenize(\"  \").unwrap(), vec![]);\n        assert_eq!(tokenize(\"-123\").unwrap(), vec![Token::I64(-123)]);\n        assert_eq!(tokenize(\"123\").unwrap(), vec![Token::I64(123)]);\n        assert_eq!(tokenize(\"true\").unwrap(), vec![Token::Boolean(true)]);\n        assert_eq!(tokenize(\"false\").unwrap(), vec![Token::Boolean(false)]);\n        assert_eq!(tokenize(\"[\").unwrap(), vec![Token::OpenBracket]);\n        assert_eq!(tokenize(\"]\").unwrap(), vec![Token::CloseBracket]);\n        assert_eq!(tokenize(\",\").unwrap(), vec![Token::Comma]);\n\n        assert_eq!(tokenize(\" asdf\").unwrap(), vec![Token::string(\"asdf\")]);\n        assert_eq!(tokenize(\"  \").unwrap(), vec![]);\n        assert_eq!(tokenize(\" -123\").unwrap(), vec![Token::I64(-123)]);\n        assert_eq!(tokenize(\" 123\").unwrap(), vec![Token::I64(123)]);\n        assert_eq!(tokenize(\" true\").unwrap(), vec![Token::Boolean(true)]);\n        assert_eq!(tokenize(\" false\").unwrap(), vec![Token::Boolean(false)]);\n        assert_eq!(tokenize(\" [\").unwrap(), vec![Token::OpenBracket]);\n        assert_eq!(tokenize(\" ]\").unwrap(), vec![Token::CloseBracket]);\n        assert_eq!(tokenize(\" ,\").unwrap(), vec![Token::Comma]);\n\n        assert_eq!(tokenize(\" asdf \").unwrap(), vec![Token::string(\"asdf\")]);\n        assert_eq!(tokenize(\"  \").unwrap(), vec![]);\n        assert_eq!(tokenize(\" -123 \").unwrap(), vec![Token::I64(-123)]);\n        assert_eq!(tokenize(\" 123 \").unwrap(), vec![Token::I64(123)]);\n        assert_eq!(tokenize(\" true \").unwrap(), vec![Token::Boolean(true)]);\n        assert_eq!(tokenize(\" false \").unwrap(), vec![Token::Boolean(false)]);\n        assert_eq!(tokenize(\" [ \").unwrap(), vec![Token::OpenBracket]);\n        assert_eq!(tokenize(\" ] \").unwrap(), vec![Token::CloseBracket]);\n        assert_eq!(tokenize(\" , \").unwrap(), vec![Token::Comma]);\n    }\n\n    #[test]\n    fn tokenize_strings() {\n        assert_eq!(\n            tokenize(\" a1 b2 c3 \").unwrap(),\n            vec![\n                Token::string(\"a1\"),\n                Token::string(\"b2\"),\n                Token::string(\"c3\")\n            ]\n        );\n\n        assert_eq!(\n            tokenize(\"hello \\\"world\\\"!\").unwrap(),\n            vec![\n                Token::string(\"hello\"),\n                Token::string(\"world\"),\n                Token::string(\"!\")\n            ]\n        );\n\n        assert_eq!(\n            tokenize(\"\\\"inner\\\\\\\"quote\\\"\").unwrap(),\n            vec![Token::string(\"inner\\\"quote\"),]\n        );\n\n        assert_eq!(tokenize(\"\\\"asdf\"), Err(Error::UnterminatedString))\n    }\n\n    #[test]\n    fn tokenize_list() {\n        assert_eq!(\n            tokenize(\"[a, 1, 2]\").unwrap(),\n            vec![\n                Token::OpenBracket,\n                Token::String(\"a\".to_owned()),\n                Token::Comma,\n                Token::I64(1),\n                Token::Comma,\n                Token::I64(2),\n                Token::CloseBracket\n            ]\n        );\n    }\n\n    #[test]\n    fn parse_simple() {\n        assert_eq!(\n            parse(\"\\\"hello\\\"\").unwrap(),\n            Value::String(\"hello\".to_owned())\n        );\n        assert_eq!(\n            parse(\"\\\"127.0.0.1\\\"\").unwrap(),\n            Value::String(\"127.0.0.1\".to_owned())\n        );\n        assert_eq!(\n            parse(\"127.0.0.1\").unwrap(),\n            Value::String(\"127.0.0.1\".to_owned())\n        );\n\n        assert_eq!(parse(\"true\").unwrap(), Value::Boolean(true));\n        assert_eq!(parse(\"false\").unwrap(), Value::Boolean(false));\n\n        assert_eq!(parse(\"123\").unwrap(), Value::Integer(123));\n        assert_eq!(parse(\"-123\").unwrap(), Value::Integer(-123));\n\n        assert_eq!(\n            parse(\"123456789012345678901234567890\").unwrap(),\n            Value::String(\"123456789012345678901234567890\".to_string())\n        );\n    }\n\n    #[test]\n    fn parse_arrays() {\n        assert_eq!(parse(\" [ ] \").unwrap(), Value::Array(Vec::new()));\n        assert_eq!(parse(\"[]\").unwrap(), Value::Array(Vec::new()));\n\n        assert_eq!(\n            parse(\"[a, 1, 2]\").unwrap(),\n            Value::Array(vec![\n                Value::String(\"a\".to_string()),\n                Value::Integer(1),\n                Value::Integer(2),\n            ])\n        );\n\n        assert_eq!(\n            parse(\"[a, [1, 2], 3]\").unwrap(),\n            Value::Array(vec![\n                Value::String(\"a\".to_string()),\n                Value::Array(vec![Value::Integer(1), Value::Integer(2)]),\n                Value::Integer(3),\n            ])\n        );\n    }\n\n    #[test]\n    fn doc_examples() {\n        assert_eq!(\n            parse(\"[127.0.0.1, 1.2.3.4, 6.7.8.9]\").unwrap(),\n            Value::Array(vec![\n                Value::String(\"127.0.0.1\".to_owned()),\n                Value::String(\"1.2.3.4\".to_owned()),\n                Value::String(\"6.7.8.9\".to_owned())\n            ])\n        );\n\n        assert_eq!(\n            parse(\"\\\"hello world\\\"\").unwrap(),\n            Value::String(\"hello world\".to_owned())\n        );\n\n        assert_eq!(\n            parse(\"[\\\"no\\\\\\\"de\\\\\\\"-1\\\", node-2]\").unwrap(),\n            Value::Array(vec![\n                Value::String(\"no\\\"de\\\"-1\".to_owned()),\n                Value::String(\"node-2\".to_owned()),\n            ])\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/cli.rs",
    "content": "//! Command-line option parsing.\n//!\n//! Most configuration is done via config files (see [`config`](../config/index.html) for details).\n\npub mod arglang;\n\nuse std::{\n    alloc::System,\n    fs,\n    path::{Path, PathBuf},\n    str::FromStr,\n    sync::Arc,\n};\n\nuse anyhow::{self, bail, Context};\nuse prometheus::Registry;\nuse regex::Regex;\nuse stats_alloc::{StatsAlloc, INSTRUMENTED_SYSTEM};\nuse structopt::StructOpt;\nuse toml::{value::Table, Value};\nuse tracing::{error, info};\n\nuse casper_types::{Chainspec, ChainspecRawBytes};\n\nuse crate::{\n    components::network::Identity as NetworkIdentity,\n    logging,\n    reactor::{main_reactor, Runner},\n    setup_signal_hooks,\n    types::ExitCode,\n    utils::{\n        chain_specification::validate_chainspec, config_specification::validate_config, Loadable,\n        WithDir,\n    },\n};\n\n// We override the standard allocator to gather metrics and tune the allocator via the MALLOC_CONF\n// env var.\n#[global_allocator]\nstatic ALLOC: &StatsAlloc<System> = &INSTRUMENTED_SYSTEM;\n\n// Note: The docstring on `Cli` is the help shown when calling the binary with `--help`.\n#[derive(Debug, StructOpt)]\n#[structopt(version = crate::VERSION_STRING_COLOR.as_str())]\n#[allow(rustdoc::invalid_html_tags)]\n/// Casper blockchain node.\npub enum Cli {\n    /// Run the node in standard mode.\n    ///\n    /// Loads the configuration values from the given configuration file or uses defaults if not\n    /// given, then runs the reactor.\n    #[structopt(alias = \"validator\")]\n    Standard {\n        /// Path to configuration file.\n        config: PathBuf,\n\n        #[structopt(\n            short = \"C\",\n            long,\n            env = \"NODE_CONFIG\",\n            use_delimiter(true),\n            value_delimiter(\";\")\n        )]\n        /// Overrides and extensions for configuration file entries in the form\n        /// <SECTION>.<KEY>=<VALUE>.  For example, '-C=node.chainspec_config_path=chainspec.toml'\n        config_ext: Vec<ConfigExt>,\n    },\n    /// Migrate modified values from the old config as required after an upgrade.\n    MigrateConfig {\n        /// Path to configuration file of previous version of node.\n        #[structopt(long)]\n        old_config: PathBuf,\n        /// Path to configuration file of this version of node.\n        #[structopt(long)]\n        new_config: PathBuf,\n    },\n    /// Migrate any stored data as required after an upgrade.\n    MigrateData {\n        /// Path to configuration file of previous version of node.\n        #[structopt(long)]\n        old_config: PathBuf,\n        /// Path to configuration file of this version of node.\n        #[structopt(long)]\n        new_config: PathBuf,\n    },\n    /// Verify that a given config file can be parsed.\n    ValidateConfig {\n        /// Path to configuration file.\n        config: PathBuf,\n    },\n}\n\n#[derive(Debug)]\n/// Command line extension to be applied to TOML-based config file values.\npub struct ConfigExt {\n    section: String,\n    key: String,\n    value: String,\n}\n\nimpl ConfigExt {\n    /// Updates TOML table with updated or extended key value pairs.\n    ///\n    /// Returns errors if the respective sections to be updated are not TOML tables or if parsing\n    /// the command line options failed.\n    fn update_toml_table(&self, toml_value: &mut Value) -> anyhow::Result<()> {\n        let table = toml_value\n            .as_table_mut()\n            .ok_or_else(|| anyhow::anyhow!(\"configuration table is not a table\"))?;\n\n        if !table.contains_key(&self.section) {\n            table.insert(self.section.clone(), Value::Table(Table::new()));\n        }\n        let val = arglang::parse(&self.value)?;\n        table[&self.section]\n            .as_table_mut()\n            .ok_or_else(|| {\n                anyhow::anyhow!(\"configuration section {} is not a table\", self.section)\n            })?\n            .insert(self.key.clone(), val);\n        Ok(())\n    }\n}\n\nimpl FromStr for ConfigExt {\n    type Err = anyhow::Error;\n\n    /// Attempts to create a ConfigExt from a str patterned as `section.key=value`\n    fn from_str(input: &str) -> Result<Self, Self::Err> {\n        let re = Regex::new(r\"^([^.]+)\\.([^=]+)=(.+)$\").unwrap();\n        let captures = re\n            .captures(input)\n            .context(\"could not parse config_ext (see README.md)\")?;\n        Ok(ConfigExt {\n            section: captures\n                .get(1)\n                .context(\"failed to find section\")?\n                .as_str()\n                .to_owned(),\n            key: captures\n                .get(2)\n                .context(\"failed to find key\")?\n                .as_str()\n                .to_owned(),\n            value: captures\n                .get(3)\n                .context(\"failed to find value\")?\n                .as_str()\n                .to_owned(),\n        })\n    }\n}\n\nimpl Cli {\n    /// Executes selected CLI command.\n    pub async fn run(self) -> anyhow::Result<i32> {\n        match self {\n            Cli::Standard { config, config_ext } => {\n                // Setup UNIX signal hooks.\n                setup_signal_hooks();\n\n                let mut reactor_config = Self::init(&config, config_ext)?;\n\n                // We use a `ChaCha20Rng` for the production node. For one, we want to completely\n                // eliminate any chance of runtime failures, regardless of how small (these\n                // exist with `OsRng`). Additionally, we want to limit the number of syscalls for\n                // performance reasons.\n                let mut rng = crate::new_rng();\n\n                let registry = Registry::new();\n\n                let (chainspec, chainspec_raw_bytes) =\n                    <(Chainspec, ChainspecRawBytes)>::from_path(reactor_config.dir())?;\n\n                info!(\n                    protocol_version = %chainspec.protocol_version(),\n                    build_version = %crate::VERSION_STRING.as_str(),\n                    \"node starting up\"\n                );\n\n                if !validate_chainspec(&chainspec) {\n                    bail!(\"invalid chainspec\");\n                }\n\n                if !validate_config(reactor_config.value()) {\n                    bail!(\"invalid config\");\n                }\n\n                reactor_config.value_mut().ensure_valid(&chainspec);\n\n                let network_identity = NetworkIdentity::from_config(WithDir::new(\n                    reactor_config.dir(),\n                    reactor_config.value().network.clone(),\n                ))\n                .context(\"failed to create a network identity\")?;\n\n                let mut main_runner = Runner::<main_reactor::MainReactor>::with_metrics(\n                    reactor_config,\n                    Arc::new(chainspec),\n                    Arc::new(chainspec_raw_bytes),\n                    network_identity,\n                    &mut rng,\n                    &registry,\n                )\n                .await?;\n\n                let exit_code = main_runner.run(&mut rng).await;\n                Ok(exit_code as i32)\n            }\n            Cli::MigrateConfig {\n                old_config,\n                new_config,\n            } => {\n                let new_config = Self::init(&new_config, vec![])?;\n\n                let old_root = old_config\n                    .parent()\n                    .map_or_else(|| \"/\".into(), Path::to_path_buf);\n                let encoded_old_config = fs::read_to_string(&old_config)\n                    .context(\"could not read old configuration file\")\n                    .with_context(|| old_config.display().to_string())?;\n                let old_config = toml::from_str(&encoded_old_config)?;\n\n                info!(build_version = %crate::VERSION_STRING.as_str(), \"migrating config\");\n                crate::config_migration::migrate_config(\n                    WithDir::new(old_root, old_config),\n                    new_config,\n                )?;\n                Ok(ExitCode::Success as i32)\n            }\n            Cli::MigrateData {\n                old_config,\n                new_config,\n            } => {\n                let new_config = Self::init(&new_config, vec![])?;\n\n                let old_root = old_config\n                    .parent()\n                    .map_or_else(|| \"/\".into(), Path::to_path_buf);\n                let encoded_old_config = fs::read_to_string(&old_config)\n                    .context(\"could not read old configuration file\")\n                    .with_context(|| old_config.display().to_string())?;\n                let old_config = toml::from_str(&encoded_old_config)?;\n\n                info!(build_version = %crate::VERSION_STRING.as_str(), \"migrating data\");\n                crate::data_migration::migrate_data(\n                    WithDir::new(old_root, old_config),\n                    new_config,\n                )?;\n                Ok(ExitCode::Success as i32)\n            }\n            Cli::ValidateConfig { config } => {\n                info!(build_version = %crate::VERSION_STRING.as_str(), config_file = ?config, \"validating config file\");\n                match Self::init(&config, vec![]) {\n                    Ok(_config) => {\n                        info!(build_version = %crate::VERSION_STRING.as_str(), config_file = ?config, \"config file is valid\");\n                        Ok(ExitCode::Success as i32)\n                    }\n                    Err(err) => {\n                        // initialize manually in case of error to avoid double initialization\n                        logging::init_with_config(&Default::default())?;\n                        error!(build_version = %crate::VERSION_STRING.as_str(), config_file = ?config, \"config file is not valid\");\n                        Err(err)\n                    }\n                }\n            }\n        }\n    }\n\n    /// Parses the config file for the current version of casper-node, and initializes logging.\n    fn init(\n        config: &Path,\n        config_ext: Vec<ConfigExt>,\n    ) -> anyhow::Result<WithDir<main_reactor::Config>> {\n        // Determine the parent directory of the configuration file, if any.\n        // Otherwise, we default to `/`.\n        let root = config\n            .parent()\n            .map_or_else(|| \"/\".into(), Path::to_path_buf);\n\n        // The app supports running without a config file, using default values.\n        let encoded_config = fs::read_to_string(config)\n            .context(\"could not read configuration file\")\n            .with_context(|| config.display().to_string())?;\n\n        // Get the TOML table version of the config indicated from CLI args, or from a new\n        // defaulted config instance if one is not provided.\n        let mut config_table: Value = toml::from_str(&encoded_config)?;\n\n        // If any command line overrides to the config values are passed, apply them.\n        for item in config_ext {\n            item.update_toml_table(&mut config_table)?;\n        }\n\n        // Create main config, including any overridden values.\n        let main_config: main_reactor::Config = config_table.try_into()?;\n        logging::init_with_config(&main_config.logging)?;\n\n        Ok(WithDir::new(root, main_config))\n    }\n}\n"
  },
  {
    "path": "node/src/components/binary_port/config.rs",
    "content": "use std::str::FromStr;\n\nuse casper_types::TimeDiff;\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n/// Uses a fixed port per node, but binds on any interface.\nconst DEFAULT_ADDRESS: &str = \"0.0.0.0:0\";\n/// Default maximum message size.\nconst DEFAULT_MAX_MESSAGE_SIZE: u32 = 4 * 1024 * 1024;\n/// Default maximum number of connections.\nconst DEFAULT_MAX_CONNECTIONS: usize = 5;\n/// Default maximum number of requests per second.\nconst DEFAULT_QPS_LIMIT: usize = 110;\n// Initial time given to a connection before it expires\nconst DEFAULT_INITIAL_CONNECTION_LIFETIME: &str = \"10 seconds\";\n// Default amount of time which is given to a connection to extend it's lifetime when a valid\n// [`Command::Get(GetRequest::Record)`] is sent to the node\nconst DEFAULT_GET_RECORD_REQUEST_TERMINATION_DELAY: &str = \"0 seconds\";\n// Default amount of time which is given to a connection to extend it's lifetime when a valid\n// [`Command::Get(GetRequest::Information)`] is sent to the node\nconst DEFAULT_GET_INFORMATION_REQUEST_TERMINATION_DELAY: &str = \"5 seconds\";\n// Default amount of time which is given to a connection to extend it's lifetime when a valid\n// [`Command::Get(GetRequest::State)`] is sent to the node\nconst DEFAULT_GET_STATE_REQUEST_TERMINATION_DELAY: &str = \"0 seconds\";\n// Default amount of time which is given to a connection to extend it's lifetime when a valid\n// [`Command::Get(GetRequest::Trie)`] is sent to the node\nconst DEFAULT_GET_TRIE_REQUEST_TERMINATION_DELAY: &str = \"0 seconds\";\n// Default amount of time which is given to a connection to extend it's lifetime when a valid\n// [`Command::TryAcceptTransaction`] is sent to the node\nconst DEFAULT_ACCEPT_TRANSACTION_REQUEST_TERMINATION_DELAY: &str = \"24 seconds\";\n// Default amount of time which is given to a connection to extend it's lifetime when a valid\n// [`Command::TrySpeculativeExec`] is sent to the node\nconst DEFAULT_SPECULATIVE_EXEC_REQUEST_TERMINATION_DELAY: &str = \"0 seconds\";\n\n/// Binary port server configuration.\n#[derive(Clone, DataSize, Debug, Deserialize, Serialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Setting to enable the BinaryPort server.\n    pub enable_server: bool,\n    /// Address to bind BinaryPort server to.\n    pub address: String,\n    /// Flag used to enable/disable the [`AllValues`] request\n    // In case we need \"enabled\" flag for more than 2 requests we should introduce generic\n    // \"function disabled/enabled\" mechanism. For now, we can stick to these two booleans.\n    pub allow_request_get_all_values: bool,\n    /// Flag used to enable/disable the [`Trie`] request\n    pub allow_request_get_trie: bool,\n    /// Flag used to enable/disable the [`TrySpeculativeExec`] request.\n    pub allow_request_speculative_exec: bool,\n    /// Maximum size of the binary port message.\n    pub max_message_size_bytes: u32,\n    /// Maximum number of connections to the server.\n    pub max_connections: usize,\n    /// Maximum number of requests per second.\n    pub qps_limit: usize,\n    // Initial time given to a connection before it expires\n    pub initial_connection_lifetime: TimeDiff,\n    // The amount of time which is given to a connection to extend it's lifetime when a valid\n    // [`Command::Get(GetRequest::Record)`] is sent to the node\n    pub get_record_request_termination_delay: TimeDiff,\n    // The amount of time which is given to a connection to extend it's lifetime when a valid\n    // [`Command::Get(GetRequest::Information)`] is sent to the node\n    pub get_information_request_termination_delay: TimeDiff,\n    // The amount of time which is given to a connection to extend it's lifetime when a valid\n    // [`Command::Get(GetRequest::State)`] is sent to the node\n    pub get_state_request_termination_delay: TimeDiff,\n    // The amount of time which is given to a connection to extend it's lifetime when a valid\n    // [`Command::Get(GetRequest::Trie)`] is sent to the node\n    pub get_trie_request_termination_delay: TimeDiff,\n    // The amount of time which is given to a connection to extend it's lifetime when a valid\n    // [`Command::TryAcceptTransaction`] is sent to the node\n    pub accept_transaction_request_termination_delay: TimeDiff,\n    // The amount of time which is given to a connection to extend it's lifetime when a valid\n    // [`Command::TrySpeculativeExec`] is sent to the node\n    pub speculative_exec_request_termination_delay: TimeDiff,\n}\n\nimpl Config {\n    /// Creates a default instance for `BinaryPort`.\n    pub fn new() -> Self {\n        Config {\n            enable_server: true,\n            address: DEFAULT_ADDRESS.to_string(),\n            allow_request_get_all_values: false,\n            allow_request_get_trie: false,\n            allow_request_speculative_exec: false,\n            max_message_size_bytes: DEFAULT_MAX_MESSAGE_SIZE,\n            max_connections: DEFAULT_MAX_CONNECTIONS,\n            qps_limit: DEFAULT_QPS_LIMIT,\n            initial_connection_lifetime: TimeDiff::from_str(DEFAULT_INITIAL_CONNECTION_LIFETIME)\n                .unwrap(),\n            get_record_request_termination_delay: TimeDiff::from_str(\n                DEFAULT_GET_RECORD_REQUEST_TERMINATION_DELAY,\n            )\n            .unwrap(),\n            get_information_request_termination_delay: TimeDiff::from_str(\n                DEFAULT_GET_INFORMATION_REQUEST_TERMINATION_DELAY,\n            )\n            .unwrap(),\n            get_state_request_termination_delay: TimeDiff::from_str(\n                DEFAULT_GET_STATE_REQUEST_TERMINATION_DELAY,\n            )\n            .unwrap(),\n            get_trie_request_termination_delay: TimeDiff::from_str(\n                DEFAULT_GET_TRIE_REQUEST_TERMINATION_DELAY,\n            )\n            .unwrap(),\n            accept_transaction_request_termination_delay: TimeDiff::from_str(\n                DEFAULT_ACCEPT_TRANSACTION_REQUEST_TERMINATION_DELAY,\n            )\n            .unwrap(),\n            speculative_exec_request_termination_delay: TimeDiff::from_str(\n                DEFAULT_SPECULATIVE_EXEC_REQUEST_TERMINATION_DELAY,\n            )\n            .unwrap(),\n        }\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config::new()\n    }\n}\n"
  },
  {
    "path": "node/src/components/binary_port/connection_terminator.rs",
    "content": "use casper_types::{TimeDiff, Timestamp};\nuse std::time::Duration;\nuse tokio::{select, sync::Mutex, time};\nuse tokio_util::sync::CancellationToken;\n\nstruct TerminationData {\n    /// Moment in time at which the termination will happen. The\n    /// actual termination can happen some time after this\n    /// timestamp within reasonable timeframe of waking up\n    /// threads and rusts internal polling mechanisms\n    terminate_at: Timestamp,\n    /// A cancellation token which can stop (by calling\n    /// `.cancel()` on it) the countdown in case an extended\n    /// lifetime needs to be placed\n    stop_countdown: CancellationToken,\n}\n\n/// Terminator which causes a cancellation_token to get canceled if a given timeout occurs.\n/// Allows to extend the timeout period by resetting the termination dealine (using `terminate_at`)\n/// or with a helper function `delay_by`. Both functions won't reset the termination deadline if\n/// the new termination would happen before the existing one (we only allow to extend the\n/// termination period)\npub(super) struct ConnectionTerminator {\n    /// This token will get canceled if the timeout passes\n    cancellation_token: CancellationToken,\n    //Data steering the internal countdown\n    countdown_data: Mutex<Option<TerminationData>>,\n}\n\nimpl ConnectionTerminator {\n    /// Updates or sets the termination deadline.\n    /// There will be no update if the termination already happened.\n    /// Both set and update won't happen if the `in_terminate_at` is in the past.\n    /// Updating an already running termination countdown happens only if the incoming\n    /// `in_terminate_at` is > then the existing one. Returns true if the update was in effect.\n    /// False otherwise\n    pub(super) async fn terminate_at(&self, in_terminate_at: Timestamp) -> bool {\n        let now = Timestamp::now();\n        if in_terminate_at <= now {\n            //Do nothing if termiantion is in the past\n            return false;\n        }\n        let terminate_in = Duration::from_millis(in_terminate_at.millis() - now.millis());\n        let mut countdown_data_guard = self.countdown_data.lock().await;\n        if let Some(TerminationData {\n            terminate_at,\n            stop_countdown,\n        }) = countdown_data_guard.as_ref()\n        {\n            if in_terminate_at < *terminate_at {\n                //Don't update termination time if the proposed one is more restrictive than\n                // the existing one.\n                return false;\n            } else {\n                stop_countdown.cancel();\n            }\n        }\n        if self.cancellation_token.is_cancelled() {\n            //Don't proceed if the outbound token was already cancelled\n            return false;\n        }\n        let stop_countdown = self\n            .spawn_termination_countdown(terminate_in, self.cancellation_token.clone())\n            .await;\n        let data = TerminationData {\n            terminate_at: in_terminate_at,\n            stop_countdown,\n        };\n        *countdown_data_guard = Some(data);\n        true\n    }\n\n    /// Delays the termination by `delay_by` amount. If the terminations `terminate_at` is\n    /// further in the future than `now() + delay_by`, this function will have no effect\n    /// and will return false. Returns true otherwise.\n    pub(crate) async fn delay_termination(&self, delay_by: TimeDiff) -> bool {\n        let temrinate_at = Timestamp::now() + delay_by;\n        self.terminate_at(temrinate_at).await\n    }\n\n    //Ctor. To start the countdown mechanism you need to call `terminate_at`\n    pub(super) fn new() -> Self {\n        let cancellation_token = CancellationToken::new();\n        ConnectionTerminator {\n            cancellation_token,\n            countdown_data: Mutex::new(None),\n        }\n    }\n\n    pub(super) fn get_cancellation_token(&self) -> CancellationToken {\n        self.cancellation_token.clone()\n    }\n\n    // Spawns a thread that will cancel `cancellation_token` in a given `terminate_in` duration.\n    // This function doesn't check if the cancellation_token wasn't already cancelled - it needs to\n    // be checked beforehand Return a different CancellationToken which can be used to kill the\n    // running thread\n    async fn spawn_termination_countdown(\n        &self,\n        terminate_in: Duration,\n        cancellation_token: CancellationToken,\n    ) -> CancellationToken {\n        let cancel_countdown = CancellationToken::new();\n        let cancel_countdown_to_move = cancel_countdown.clone();\n        tokio::task::spawn(async move {\n            select! {\n                _ = time::sleep(terminate_in) => {\n                    cancellation_token.cancel()\n                },\n                _ = cancel_countdown_to_move.cancelled() => {\n                },\n\n            }\n        });\n        cancel_countdown\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::ConnectionTerminator;\n    use casper_types::{TimeDiff, Timestamp};\n    use std::time::Duration;\n    use tokio::{select, time::sleep};\n\n    #[tokio::test(flavor = \"multi_thread\", worker_threads = 4)]\n    async fn should_fail_setting_expiration_in_past() {\n        let terminator = ConnectionTerminator::new();\n        let in_past = Timestamp::from(1);\n        assert!(!terminator.terminate_at(in_past).await);\n\n        let initial_inactivity = TimeDiff::from_seconds(1);\n        let terminator = ConnectionTerminator::new();\n        let now = Timestamp::now();\n        assert!(terminator.terminate_at(now + initial_inactivity).await);\n        assert!(!terminator.terminate_at(now).await);\n    }\n\n    #[tokio::test(flavor = \"multi_thread\", worker_threads = 4)]\n    async fn should_fail_setting_expiration_when_already_cancelled() {\n        let initial_inactivity = TimeDiff::from_seconds(1);\n        let terminator = ConnectionTerminator::new();\n        let now = Timestamp::now();\n        assert!(terminator.terminate_at(now + initial_inactivity).await);\n        let cancellation_token = terminator.get_cancellation_token();\n        select! {\n            _ = cancellation_token.cancelled() => {\n                let elapsed = now.elapsed();\n                assert!(elapsed >= TimeDiff::from_seconds(1));\n                assert!(elapsed <= TimeDiff::from_millis(1500));\n            },\n            _ = sleep(Duration::from_secs(10)) => {\n                unreachable!()\n            },\n        }\n\n        let initial_inactivity = TimeDiff::from_seconds(10);\n        let now = Timestamp::now();\n        assert!(!terminator.terminate_at(now + initial_inactivity).await);\n    }\n\n    #[tokio::test(flavor = \"multi_thread\", worker_threads = 4)]\n    async fn should_cancel_after_enough_inactivity() {\n        let initial_inactivity = TimeDiff::from_seconds(1);\n        let terminator = ConnectionTerminator::new();\n        let now = Timestamp::now();\n        assert!(terminator.terminate_at(now + initial_inactivity).await);\n        let cancellation_token = terminator.get_cancellation_token();\n        select! {\n            _ = cancellation_token.cancelled() => {\n                let elapsed = now.elapsed();\n                assert!(elapsed >= TimeDiff::from_seconds(1));\n                assert!(elapsed <= TimeDiff::from_millis(1500));\n            },\n            _ = sleep(Duration::from_secs(10)) => {\n                unreachable!()\n            },\n        }\n    }\n\n    #[tokio::test(flavor = \"multi_thread\", worker_threads = 4)]\n    async fn should_cancel_after_extended_time() {\n        let initial_inactivity = TimeDiff::from_seconds(1);\n        let terminator = ConnectionTerminator::new();\n        let now = Timestamp::now();\n        assert!(terminator.terminate_at(now + initial_inactivity).await);\n        sleep(Duration::from_millis(100)).await;\n        terminator\n            .delay_termination(TimeDiff::from_seconds(2))\n            .await;\n        let cancellation_token = terminator.get_cancellation_token();\n        select! {\n            _ = cancellation_token.cancelled() => {\n                let elapsed = now.elapsed();\n                assert!(elapsed >= TimeDiff::from_seconds(2));\n                assert!(elapsed <= TimeDiff::from_millis(2500));\n            },\n            _ = sleep(Duration::from_secs(10)) => {\n                unreachable!()\n            },\n        }\n    }\n\n    #[tokio::test(flavor = \"multi_thread\", worker_threads = 4)]\n    async fn should_cancel_after_multiple_time_extensions() {\n        let initial_inactivity = TimeDiff::from_seconds(1);\n        let terminator = ConnectionTerminator::new();\n        let now = Timestamp::now();\n        assert!(terminator.terminate_at(now + initial_inactivity).await);\n        sleep(Duration::from_millis(100)).await;\n        terminator\n            .delay_termination(TimeDiff::from_seconds(2))\n            .await;\n        sleep(Duration::from_millis(100)).await;\n        terminator\n            .delay_termination(TimeDiff::from_seconds(3))\n            .await;\n        let cancellation_token = terminator.get_cancellation_token();\n        select! {\n            _ = cancellation_token.cancelled() => {\n                let elapsed = now.elapsed();\n                assert!(elapsed >= TimeDiff::from_seconds(3));\n                assert!(elapsed <= TimeDiff::from_millis(4000));\n            },\n            _ = sleep(Duration::from_secs(10)) => {\n                unreachable!()\n            },\n        }\n    }\n\n    #[tokio::test(flavor = \"multi_thread\", worker_threads = 4)]\n    async fn should_not_shorten_termination_time() {\n        let initial_inactivity = TimeDiff::from_seconds(1);\n        let terminator = ConnectionTerminator::new();\n        let now = Timestamp::now();\n        assert!(terminator.terminate_at(now + initial_inactivity).await);\n        sleep(Duration::from_millis(100)).await;\n        terminator\n            .delay_termination(TimeDiff::from_seconds(2))\n            .await;\n        sleep(Duration::from_millis(100)).await;\n        terminator\n            .delay_termination(TimeDiff::from_seconds(1))\n            .await;\n        let cancellation_token = terminator.get_cancellation_token();\n        select! {\n            _ = cancellation_token.cancelled() => {\n                let elapsed = now.elapsed();\n                assert!(elapsed >= TimeDiff::from_seconds(2));\n                assert!(elapsed <= TimeDiff::from_millis(2500));\n            },\n            _ = sleep(Duration::from_secs(10)) => {\n                unreachable!()\n            },\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/binary_port/error.rs",
    "content": "use casper_types::bytesrepr;\nuse thiserror::Error;\n\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    #[error(transparent)]\n    BytesRepr(#[from] bytesrepr::Error),\n    #[error(\"received request without payload\")]\n    NoPayload,\n    #[error(transparent)]\n    BinaryPort(#[from] casper_binary_port::Error),\n}\n"
  },
  {
    "path": "node/src/components/binary_port/event.rs",
    "content": "use std::{\n    fmt::{Display, Formatter},\n    net::SocketAddr,\n};\n\nuse casper_binary_port::{BinaryResponse, Command, GetRequest};\nuse tokio::net::TcpStream;\n\nuse crate::effect::Responder;\n\n#[derive(Debug)]\npub(crate) enum Event {\n    Initialize,\n    AcceptConnection {\n        stream: TcpStream,\n        peer: SocketAddr,\n        responder: Responder<()>,\n    },\n    HandleRequest {\n        request: Command,\n        responder: Responder<BinaryResponse>,\n    },\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Event::Initialize => write!(f, \"initialize\"),\n            Event::AcceptConnection { peer, .. } => write!(f, \"accept connection from {}\", peer),\n            Event::HandleRequest { request, .. } => match request {\n                Command::Get(request) => match request {\n                    GetRequest::Record {\n                        record_type_tag,\n                        key,\n                    } => {\n                        write!(f, \"get record with tag {} ({})\", record_type_tag, key.len())\n                    }\n                    GetRequest::Information { info_type_tag, key } => {\n                        write!(f, \"get info with tag {} ({})\", info_type_tag, key.len())\n                    }\n                    GetRequest::State(state_request) => state_request.as_ref().fmt(f),\n                    GetRequest::Trie { trie_key } => write!(f, \"get trie ({})\", trie_key),\n                },\n                Command::TryAcceptTransaction { transaction, .. } => {\n                    write!(f, \"try accept transaction ({})\", transaction.hash())\n                }\n                Command::TrySpeculativeExec { transaction, .. } => {\n                    write!(f, \"try speculative exec ({})\", transaction.hash())\n                }\n            },\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/binary_port/metrics.rs",
    "content": "use prometheus::{IntCounter, Registry};\n\nuse crate::unregister_metric;\n\nconst BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_NAME: &str =\n    \"binary_port_try_accept_transaction_count\";\nconst BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_HELP: &str =\n    \"number of TryAcceptTransaction queries received\";\n\nconst BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_NAME: &str = \"binary_port_try_speculative_exec_count\";\nconst BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_HELP: &str =\n    \"number of TrySpeculativeExec queries received\";\n\nconst BINARY_PORT_GET_RECORD_COUNT_NAME: &str = \"binary_port_get_record_count\";\nconst BINARY_PORT_GET_RECORD_COUNT_HELP: &str = \"number of received Get queries for records\";\n\nconst BINARY_PORT_GET_INFORMATION_NAME: &str = \"binary_port_get_info_count\";\nconst BINARY_PORT_GET_INFORMATION_HELP: &str =\n    \"number of received Get queries for information from the node\";\n\nconst BINARY_PORT_GET_STATE_COUNT_NAME: &str = \"binary_port_get_state_count\";\nconst BINARY_PORT_GET_STATE_COUNT_HELP: &str =\n    \"number of Get queries received for the global state\";\n\nconst BINARY_PORT_CONNECTIONS_COUNT_NAME: &str = \"binary_port_connections_count\";\nconst BINARY_PORT_CONNECTIONS_COUNT_HELP: &str =\n    \"total number of external connections established to binary port\";\n\nconst BINARY_PORT_TRIE_COUNT_NAME: &str = \"binary_port_get_trie_count\";\nconst BINARY_PORT_TRIE_COUNT_HELP: &str = \"number of Get queries received for the trie state\";\n\n/// Metrics.\n#[derive(Debug)]\npub(crate) struct Metrics {\n    /// Number of `TryAcceptTransaction` queries received.\n    pub(super) binary_port_try_accept_transaction_count: IntCounter,\n    /// Number of `TrySpeculativeExec` queries received.\n    pub(super) binary_port_try_speculative_exec_count: IntCounter,\n    /// Number of `Get::Record` queries received.\n    pub(super) binary_port_get_record_count: IntCounter,\n    /// Number of `Get::Information` queries received.\n    pub(super) binary_port_get_info_count: IntCounter,\n    /// Number of `Get::State` queries received.\n    pub(super) binary_port_get_state_count: IntCounter,\n    /// Number of distinct connections to binary port.\n    pub(super) binary_port_connections_count: IntCounter,\n    /// Number of `Get::Trie` queries received.\n    pub(super) binary_port_get_trie_count: IntCounter,\n\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of the metrics.\n    pub fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let binary_port_try_accept_transaction_count = IntCounter::new(\n            BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_NAME.to_string(),\n            BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_HELP.to_string(),\n        )?;\n\n        let binary_port_try_speculative_exec_count = IntCounter::new(\n            BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_NAME.to_string(),\n            BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_HELP.to_string(),\n        )?;\n\n        let binary_port_get_record_count = IntCounter::new(\n            BINARY_PORT_GET_RECORD_COUNT_NAME.to_string(),\n            BINARY_PORT_GET_RECORD_COUNT_HELP.to_string(),\n        )?;\n\n        let binary_port_get_info_count = IntCounter::new(\n            BINARY_PORT_GET_INFORMATION_NAME.to_string(),\n            BINARY_PORT_GET_INFORMATION_HELP.to_string(),\n        )?;\n\n        let binary_port_get_state_count = IntCounter::new(\n            BINARY_PORT_GET_STATE_COUNT_NAME.to_string(),\n            BINARY_PORT_GET_STATE_COUNT_HELP.to_string(),\n        )?;\n\n        let binary_port_connections_count = IntCounter::new(\n            BINARY_PORT_CONNECTIONS_COUNT_NAME.to_string(),\n            BINARY_PORT_CONNECTIONS_COUNT_HELP.to_string(),\n        )?;\n\n        let binary_port_get_trie_count = IntCounter::new(\n            BINARY_PORT_TRIE_COUNT_NAME.to_string(),\n            BINARY_PORT_TRIE_COUNT_HELP.to_string(),\n        )?;\n\n        registry.register(Box::new(binary_port_try_accept_transaction_count.clone()))?;\n        registry.register(Box::new(binary_port_try_speculative_exec_count.clone()))?;\n        registry.register(Box::new(binary_port_get_record_count.clone()))?;\n        registry.register(Box::new(binary_port_get_info_count.clone()))?;\n        registry.register(Box::new(binary_port_get_state_count.clone()))?;\n        registry.register(Box::new(binary_port_connections_count.clone()))?;\n        registry.register(Box::new(binary_port_get_trie_count.clone()))?;\n\n        Ok(Metrics {\n            binary_port_try_accept_transaction_count,\n            binary_port_try_speculative_exec_count,\n            binary_port_get_record_count,\n            binary_port_get_info_count,\n            binary_port_get_state_count,\n            binary_port_connections_count,\n            binary_port_get_trie_count,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.binary_port_try_accept_transaction_count);\n        unregister_metric!(self.registry, self.binary_port_try_speculative_exec_count);\n        unregister_metric!(self.registry, self.binary_port_get_record_count);\n        unregister_metric!(self.registry, self.binary_port_get_info_count);\n        unregister_metric!(self.registry, self.binary_port_get_state_count);\n        unregister_metric!(self.registry, self.binary_port_connections_count);\n    }\n}\n"
  },
  {
    "path": "node/src/components/binary_port/rate_limiter.rs",
    "content": "use casper_types::{TimeDiff, Timestamp};\nuse thiserror::Error as ThisError;\n\n#[derive(Debug, ThisError)]\npub(crate) enum RateLimiterError {\n    #[error(\"Cannot create Rate limiter with 0 max_requests\")]\n    EmptyWindowNotAllowed,\n    #[error(\"Maximum window duration is too large\")]\n    WindowDurationTooLarge,\n    #[error(\"Maximum window duration is too small\")]\n    WindowDurationTooSmall,\n}\n\nconst MAX_WINDOW_DURATION_MS: u64 = 1000 * 60 * 60; // 1 hour\n\n#[derive(PartialEq, Eq, Debug)]\n/// Response from the rate limiter.\npub(crate) enum LimiterResponse {\n    /// when limiter allowed the request\n    Allowed,\n    /// when limiter throttled the request\n    Throttled,\n}\n\n/// A buffer to store timestamps of requests. The assumption is that the buffer will keep the\n/// monotonical order of timestamps as they are pushed.\n#[derive(Debug)]\nstruct Buffer {\n    buffer: Vec<u64>,\n    in_index: usize,\n    out_index: usize,\n    capacity: usize,\n}\n\nimpl Buffer {\n    fn new(size: usize) -> Self {\n        Buffer {\n            buffer: vec![0; size + 1],\n            in_index: 0,\n            out_index: 0,\n            capacity: size + 1,\n        }\n    }\n\n    fn is_full(&self) -> bool {\n        self.in_index == (self.out_index + self.capacity - 1) % self.capacity\n    }\n\n    fn is_empty(&self) -> bool {\n        self.in_index == self.out_index\n    }\n\n    //This should only be used from `push`\n    fn push_and_slide(&mut self, value: u64) -> bool {\n        let out_index = self.out_index as i32;\n        let capacity = self.capacity as i32;\n        let mut to_index = self.in_index as i32;\n        let mut from_index = (self.in_index as i32 + capacity - 1) % capacity;\n\n        while to_index != out_index && self.buffer[from_index as usize] > value {\n            self.buffer[to_index as usize] = self.buffer[from_index as usize];\n            to_index = (to_index + capacity - 1) % capacity;\n            from_index = (from_index + capacity - 1) % capacity;\n        }\n        self.buffer[to_index as usize] = value;\n        self.in_index = (self.in_index + 1) % self.capacity;\n        true\n    }\n\n    fn push(&mut self, value: u64) -> bool {\n        if self.is_full() {\n            return false;\n        }\n        if !self.is_empty() {\n            let last_stored_index = (self.in_index + self.capacity - 1) % self.capacity;\n            let last_stored = self.buffer[last_stored_index];\n            // We are expecting values to be monotonically increasing. But there is a scenario in\n            // which the system time might be changed to a previous time.\n            // We handle that by wiggling it inside the buffer\n            if last_stored > value {\n                return self.push_and_slide(value);\n            }\n        }\n        self.buffer[self.in_index] = value;\n        self.in_index = (self.in_index + 1) % self.capacity;\n        true\n    }\n\n    fn prune_lt(&mut self, value: u64) -> usize {\n        if self.is_empty() {\n            return 0;\n        }\n        let mut number_of_pruned = 0;\n        while self.in_index != self.out_index {\n            if self.buffer[self.out_index] >= value {\n                break;\n            }\n            self.out_index = (self.out_index + 1) % self.capacity;\n            number_of_pruned += 1;\n        }\n        number_of_pruned\n    }\n\n    #[cfg(test)]\n    fn to_vec(&self) -> Vec<u64> {\n        let mut vec = Vec::new();\n        let mut local_out = self.out_index;\n        while self.in_index != local_out {\n            vec.push(self.buffer[local_out]);\n            local_out = (local_out + 1) % self.capacity;\n        }\n        vec\n    }\n}\n\n#[derive(Debug)]\npub(crate) struct RateLimiter {\n    /// window duration.\n    window_ms: u64,\n    /// Log of unix epoch time in ms when requests were made.\n    buffer: Buffer,\n}\n\nimpl RateLimiter {\n    //ctor\n    pub(crate) fn new(\n        max_requests: usize,\n        window_duration: TimeDiff,\n    ) -> Result<Self, RateLimiterError> {\n        if max_requests == 0 {\n            // We consider 0-max_requests as a misconfiguration\n            return Err(RateLimiterError::EmptyWindowNotAllowed);\n        }\n        let window_duration_in_ms = window_duration.millis();\n        if window_duration_in_ms >= MAX_WINDOW_DURATION_MS {\n            return Err(RateLimiterError::WindowDurationTooLarge);\n        }\n        let window_duration_in_ms = window_duration.millis();\n        if window_duration_in_ms == 0 {\n            return Err(RateLimiterError::WindowDurationTooSmall);\n        }\n        Ok(RateLimiter {\n            window_ms: window_duration_in_ms,\n            buffer: Buffer::new(max_requests),\n        })\n    }\n\n    pub(crate) fn throttle(&mut self) -> LimiterResponse {\n        self.internal_throttle(Timestamp::now().millis())\n    }\n\n    fn internal_throttle(&mut self, now: u64) -> LimiterResponse {\n        let is_full = self.buffer.is_full();\n        if !is_full {\n            self.buffer.push(now);\n            return LimiterResponse::Allowed;\n        } else {\n            //The following subtraction could theoretically not fit in unsigned, but in real-life\n            // cases we limit the window duration to 1 hour (it's checked in ctor). So unless\n            // someone calls it from the perspective of 1970, it should be fine.\n            let no_of_pruned = self.buffer.prune_lt(now - self.window_ms);\n            if no_of_pruned == 0 {\n                //No pruning was done, so we are still at max_requests\n                return LimiterResponse::Throttled;\n            }\n        }\n        self.buffer.push(now);\n        LimiterResponse::Allowed\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::TimeDiff;\n\n    use super::*;\n\n    #[test]\n    fn sliding_window_should_validate_ctor_inputs() {\n        assert!(RateLimiter::new(0, TimeDiff::from_millis(1000)).is_err());\n        assert!(RateLimiter::new(10, TimeDiff::from_millis(MAX_WINDOW_DURATION_MS + 1)).is_err());\n        assert!(RateLimiter::new(10, TimeDiff::from_millis(0)).is_err());\n    }\n\n    #[test]\n    fn sliding_window_throttle_should_limit_requests() {\n        let mut rate_limiter = rate_limiter();\n        let t_1 = 10000_u64;\n        let t_2 = 10002_u64;\n        let t_3 = 10003_u64;\n\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_2),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_3),\n            LimiterResponse::Throttled\n        );\n    }\n\n    #[test]\n    fn sliding_window_throttle_should_not_count_throttled_requests() {\n        let mut rate_limiter = rate_limiter();\n        let t_1 = 1_u64;\n        let t_2 = 500_u64;\n        let t_3 = 1000_u64;\n        let t_4 = 1400_u64;\n\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_2),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_3),\n            LimiterResponse::Throttled\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_4),\n            LimiterResponse::Allowed\n        );\n    }\n\n    #[test]\n    fn sliding_window_throttle_should_limit_requests_on_burst() {\n        let mut rate_limiter = rate_limiter();\n        let t_1 = 10000;\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Throttled\n        );\n    }\n\n    #[test]\n    fn sliding_window_should_slide_away_from_old_checks() {\n        let mut rate_limiter = rate_limiter();\n        let t_1 = 10000_u64;\n        let t_2 = 10002_u64;\n        let t_3 = 11002_u64;\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_2),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_3),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_3),\n            LimiterResponse::Throttled\n        );\n    }\n\n    #[test]\n    fn sliding_window_should_take_past_timestamp() {\n        let mut rate_limiter = rate_limiter();\n        let t_1 = 10000_u64;\n        let t_2 = 9999_u64;\n        let t_3 = 10001_u64;\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_2),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_3),\n            LimiterResponse::Throttled\n        );\n    }\n\n    #[test]\n    fn sliding_window_should_anneal_timestamp_from_past_() {\n        let mut rate_limiter = rate_limiter();\n        let t_1 = 10000_u64;\n        let t_2 = 9999_u64;\n        let t_3 = 12001_u64;\n        let t_4 = 12002_u64;\n        assert_eq!(\n            rate_limiter.internal_throttle(t_1),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_2),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_3),\n            LimiterResponse::Allowed\n        );\n        assert_eq!(\n            rate_limiter.internal_throttle(t_4),\n            LimiterResponse::Allowed\n        );\n    }\n\n    #[test]\n    fn buffer_should_saturate_with_values() {\n        let mut buffer = Buffer::new(3);\n        assert!(buffer.push(1));\n        assert!(buffer.push(2));\n        assert!(buffer.push(3));\n        assert!(!buffer.push(4));\n        assert_eq!(buffer.to_vec(), vec![1_u64, 2_u64, 3_u64]);\n    }\n\n    #[test]\n    fn buffer_should_prune() {\n        let mut buffer = Buffer::new(3);\n        assert!(buffer.push(1));\n        assert!(buffer.push(2));\n        assert!(buffer.push(3));\n        assert_eq!(buffer.prune_lt(3), 2);\n        assert!(buffer.push(4));\n        assert_eq!(buffer.to_vec(), vec![3_u64, 4_u64]);\n        assert_eq!(buffer.prune_lt(5), 2);\n\n        assert!(buffer.push(1));\n        assert!(buffer.push(2));\n        assert!(buffer.push(3));\n        assert_eq!(buffer.prune_lt(5), 3);\n        assert!(buffer.to_vec().is_empty());\n\n        assert!(buffer.push(5));\n        assert!(buffer.push(6));\n        assert!(buffer.push(7));\n        assert_eq!(buffer.to_vec(), vec![5, 6, 7]);\n    }\n\n    #[test]\n    fn push_and_slide_should_keep_order() {\n        let mut buffer = Buffer::new(5);\n        assert!(buffer.push(1));\n        assert!(buffer.push(2));\n        assert!(buffer.push(7));\n        assert!(buffer.push(6));\n        assert_eq!(buffer.to_vec(), vec![1, 2, 6, 7]);\n        assert_eq!(buffer.prune_lt(7), 3);\n        assert_eq!(buffer.to_vec(), vec![7]);\n\n        let mut buffer = Buffer::new(4);\n        assert!(buffer.push(2));\n        assert!(buffer.push(8));\n        assert!(buffer.push(5));\n        assert!(buffer.push(1));\n        assert_eq!(buffer.to_vec(), vec![1, 2, 5, 8]);\n        assert_eq!(buffer.prune_lt(5), 2);\n        assert_eq!(buffer.to_vec(), vec![5, 8]);\n\n        let mut buffer = Buffer::new(4);\n        assert!(buffer.push(2));\n        assert!(buffer.push(8));\n        assert!(buffer.push(2));\n        assert!(buffer.push(1));\n        assert_eq!(buffer.to_vec(), vec![1, 2, 2, 8]);\n\n        let mut buffer = Buffer::new(4);\n        assert!(buffer.push(2));\n        assert!(buffer.push(8));\n        assert!(buffer.push(3));\n        assert!(buffer.push(1));\n        assert_eq!(buffer.prune_lt(2), 1);\n        assert!(buffer.push(0));\n        assert_eq!(buffer.to_vec(), vec![0, 2, 3, 8]);\n\n        let mut buffer = Buffer::new(4);\n        assert!(buffer.push(8));\n        assert!(buffer.push(7));\n        assert!(buffer.push(6));\n        assert!(buffer.push(5));\n        assert_eq!(buffer.prune_lt(7), 2);\n        assert!(buffer.push(9));\n        assert!(buffer.push(10));\n        assert_eq!(buffer.prune_lt(9), 2);\n        assert!(buffer.push(11));\n        assert!(buffer.push(1));\n        assert_eq!(buffer.to_vec(), vec![1, 9, 10, 11]);\n    }\n\n    fn rate_limiter() -> RateLimiter {\n        RateLimiter::new(2, TimeDiff::from_millis(1000)).unwrap()\n    }\n}\n"
  },
  {
    "path": "node/src/components/binary_port/tests.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse derive_more::From;\nuse either::Either;\nuse rand::Rng;\nuse serde::Serialize;\n\nuse casper_binary_port::{\n    BinaryResponse, Command, GetRequest, GlobalStateEntityQualifier, GlobalStateRequest, RecordId,\n};\n\nuse casper_types::{\n    BlockHeader, Digest, GlobalStateIdentifier, KeyTag, PublicKey, Timestamp, Transaction,\n    TransactionV1,\n};\n\nuse crate::{\n    components::binary_port::event::Event as BinaryPortEvent,\n    effect::{\n        announcements::ControlAnnouncement,\n        requests::{\n            AcceptTransactionRequest, BlockSynchronizerRequest, ChainspecRawBytesRequest,\n            ConsensusRequest, ContractRuntimeRequest, NetworkInfoRequest, ReactorInfoRequest,\n            StorageRequest, UpgradeWatcherRequest,\n        },\n    },\n    reactor::ReactorEvent,\n};\nuse std::{sync::Arc, time::Duration};\n\nuse futures::channel::oneshot::{self, Receiver};\nuse prometheus::Registry;\nuse thiserror::Error as ThisError;\n\nuse casper_binary_port::ErrorCode;\nuse casper_types::{testing::TestRng, Chainspec, ChainspecRawBytes};\n\nuse crate::{\n    components::{\n        binary_port::config::Config as BinaryPortConfig, network::Identity as NetworkIdentity,\n        Component, InitializedComponent,\n    },\n    effect::{EffectBuilder, EffectExt, Effects, Responder},\n    reactor::{self, EventQueueHandle, QueueKind, Reactor, Runner},\n    testing::{network::NetworkedReactor, ConditionCheckReactor},\n    types::NodeRng,\n    utils::Loadable,\n};\n\nuse super::{BinaryPort, Metrics as BinaryPortMetrics};\n\nconst ENABLED: bool = true;\nconst DISABLED: bool = false;\n\nstruct TestCase {\n    allow_request_get_all_values: bool,\n    allow_request_get_trie: bool,\n    allow_request_speculative_exec: bool,\n    request_generator: Either<fn(&mut TestRng) -> Command, Command>,\n}\n\n#[tokio::test]\nasync fn should_enqueue_requests_for_enabled_functions() {\n    let mut rng = TestRng::new();\n\n    let get_all_values_enabled = TestCase {\n        allow_request_get_all_values: ENABLED,\n        allow_request_get_trie: rng.gen(),\n        allow_request_speculative_exec: rng.gen(),\n        request_generator: Either::Left(|_| all_values_request()),\n    };\n\n    let get_trie_enabled = TestCase {\n        allow_request_get_all_values: rng.gen(),\n        allow_request_get_trie: ENABLED,\n        allow_request_speculative_exec: rng.gen(),\n        request_generator: Either::Left(|_| trie_request()),\n    };\n\n    let try_speculative_exec_enabled = TestCase {\n        allow_request_get_all_values: rng.gen(),\n        allow_request_get_trie: rng.gen(),\n        allow_request_speculative_exec: ENABLED,\n        request_generator: Either::Left(try_speculative_exec_request),\n    };\n\n    for test_case in [\n        get_all_values_enabled,\n        get_trie_enabled,\n        try_speculative_exec_enabled,\n    ] {\n        let (_, mut runner) = run_test_case(test_case, &mut rng).await;\n\n        runner\n            .crank_until(\n                &mut rng,\n                got_contract_runtime_request,\n                Duration::from_secs(10),\n            )\n            .await;\n    }\n}\n\n#[tokio::test]\nasync fn should_return_error_for_disabled_functions() {\n    let mut rng = TestRng::new();\n\n    const EXPECTED_ERROR_CODE: ErrorCode = ErrorCode::FunctionDisabled;\n\n    let get_all_values_disabled = TestCase {\n        allow_request_get_all_values: DISABLED,\n        allow_request_get_trie: rng.gen(),\n        allow_request_speculative_exec: rng.gen(),\n        request_generator: Either::Left(|_| all_values_request()),\n    };\n\n    let get_trie_disabled = TestCase {\n        allow_request_get_all_values: rng.gen(),\n        allow_request_get_trie: DISABLED,\n        allow_request_speculative_exec: rng.gen(),\n        request_generator: Either::Left(|_| trie_request()),\n    };\n\n    let try_speculative_exec_disabled = TestCase {\n        allow_request_get_all_values: rng.gen(),\n        allow_request_get_trie: rng.gen(),\n        allow_request_speculative_exec: DISABLED,\n        request_generator: Either::Left(try_speculative_exec_request),\n    };\n\n    for test_case in [\n        get_all_values_disabled,\n        get_trie_disabled,\n        try_speculative_exec_disabled,\n    ] {\n        let (receiver, mut runner) = run_test_case(test_case, &mut rng).await;\n\n        let result = tokio::select! {\n            result = receiver => result.expect(\"expected successful response\"),\n            _ = runner.crank_until(\n                &mut rng,\n                got_contract_runtime_request,\n                Duration::from_secs(10),\n            ) => {\n                panic!(\"expected receiver to complete first\")\n            }\n        };\n        assert_eq!(result.error_code(), EXPECTED_ERROR_CODE as u16)\n    }\n}\n\n#[tokio::test]\nasync fn should_return_empty_response_when_fetching_empty_key() {\n    let mut rng = TestRng::new();\n\n    let test_cases: Vec<TestCase> = record_requests_with_empty_keys()\n        .into_iter()\n        .map(|request| TestCase {\n            allow_request_get_all_values: DISABLED,\n            allow_request_get_trie: DISABLED,\n            allow_request_speculative_exec: DISABLED,\n            request_generator: Either::Right(request),\n        })\n        .collect();\n\n    for test_case in test_cases {\n        let (receiver, mut runner) = run_test_case(test_case, &mut rng).await;\n\n        let result = tokio::select! {\n            result = receiver => result.expect(\"expected successful response\"),\n            _ = runner.crank_until(\n                &mut rng,\n                got_contract_runtime_request,\n                Duration::from_secs(10),\n            ) => {\n                panic!(\"expected receiver to complete first\")\n            }\n        };\n        assert_eq!(result.error_code(), 0);\n        assert!(result.payload().is_empty());\n    }\n}\n\nasync fn run_test_case(\n    TestCase {\n        allow_request_get_all_values,\n        allow_request_get_trie,\n        allow_request_speculative_exec,\n        request_generator,\n    }: TestCase,\n    rng: &mut TestRng,\n) -> (\n    Receiver<BinaryResponse>,\n    Runner<ConditionCheckReactor<MockReactor>>,\n) {\n    let config = BinaryPortConfig {\n        enable_server: true,\n        allow_request_get_all_values,\n        allow_request_get_trie,\n        allow_request_speculative_exec,\n        max_message_size_bytes: 1024,\n        max_connections: 2,\n        ..Default::default()\n    };\n\n    let (chainspec, chainspec_raw_bytes) =\n        <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let mut runner: Runner<ConditionCheckReactor<MockReactor>> = Runner::new(\n        config.clone(),\n        Arc::new(chainspec),\n        Arc::new(chainspec_raw_bytes),\n        rng,\n    )\n    .await\n    .unwrap();\n\n    // Initialize component.\n    runner\n        .process_injected_effects(|effect_builder| {\n            effect_builder\n                .into_inner()\n                .schedule(BinaryPortEvent::Initialize, QueueKind::Api)\n                .ignore()\n        })\n        .await;\n\n    let (sender, receiver) = oneshot::channel();\n    let request = match request_generator {\n        Either::Left(f) => f(rng),\n        Either::Right(v) => v,\n    };\n    let event = BinaryPortEvent::HandleRequest {\n        request,\n        responder: Responder::without_shutdown(sender),\n    };\n\n    runner\n        .process_injected_effects(|effect_builder| {\n            effect_builder\n                .into_inner()\n                .schedule(event, QueueKind::Api)\n                .ignore()\n        })\n        .await;\n\n    (receiver, runner)\n}\n\nstruct MockReactor {\n    binary_port: BinaryPort,\n}\n\nimpl NetworkedReactor for MockReactor {}\n\nimpl Reactor for MockReactor {\n    type Event = Event;\n    type Config = BinaryPortConfig;\n    type Error = ReactorError;\n\n    fn new(\n        config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        _network_identity: NetworkIdentity,\n        registry: &Registry,\n        _event_queue: EventQueueHandle<Self::Event>,\n        _rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let binary_port_metrics = BinaryPortMetrics::new(registry).unwrap();\n        let mut binary_port = BinaryPort::new(config, chainspec, binary_port_metrics);\n        <BinaryPort as InitializedComponent<Event>>::start_initialization(&mut binary_port);\n\n        let reactor = MockReactor { binary_port };\n\n        let effects = Effects::new();\n\n        Ok((reactor, effects))\n    }\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::BinaryPort(event) => reactor::wrap_effects(\n                Event::BinaryPort,\n                self.binary_port.handle_event(effect_builder, rng, event),\n            ),\n            Event::ControlAnnouncement(_) => panic!(\"unexpected control announcement\"),\n            Event::ContractRuntimeRequest(_) | Event::ReactorInfoRequest(_) => {\n                // We're only interested if the binary port actually created a request to Contract\n                // Runtime component, but we're not interested in the result.\n                Effects::new()\n            }\n            Event::AcceptTransactionRequest(req) => req.responder.respond(Ok(())).ignore(),\n            Event::StorageRequest(StorageRequest::GetHighestCompleteBlockHeader { responder }) => {\n                let proposer = PublicKey::random(rng);\n                let block_header_v2 = casper_types::BlockHeaderV2::new(\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                    Timestamp::now(),\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                    proposer,\n                    Default::default(),\n                    Default::default(),\n                    Default::default(),\n                );\n                responder\n                    .respond(Some(BlockHeader::V2(block_header_v2)))\n                    .ignore()\n            }\n            Event::StorageRequest(req) => panic!(\"unexpected storage req {}\", req),\n        }\n    }\n}\n\n/// Error type returned by the test reactor.\n#[derive(Debug, ThisError)]\nenum ReactorError {\n    #[error(\"prometheus (metrics) error: {0}\")]\n    Metrics(#[from] prometheus::Error),\n}\n\n/// Top-level event for the test reactors.\n#[derive(Debug, From, Serialize)]\n#[must_use]\nenum Event {\n    #[from]\n    BinaryPort(#[serde(skip_serializing)] BinaryPortEvent),\n    #[from]\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    ContractRuntimeRequest(ContractRuntimeRequest),\n    #[from]\n    ReactorInfoRequest(ReactorInfoRequest),\n    #[from]\n    AcceptTransactionRequest(AcceptTransactionRequest),\n    StorageRequest(StorageRequest),\n}\n\nimpl From<ChainspecRawBytesRequest> for Event {\n    fn from(_request: ChainspecRawBytesRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<UpgradeWatcherRequest> for Event {\n    fn from(_request: UpgradeWatcherRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<BlockSynchronizerRequest> for Event {\n    fn from(_request: BlockSynchronizerRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<ConsensusRequest> for Event {\n    fn from(_request: ConsensusRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<NetworkInfoRequest> for Event {\n    fn from(_request: NetworkInfoRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<StorageRequest> for Event {\n    fn from(request: StorageRequest) -> Self {\n        Event::StorageRequest(request)\n    }\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::ControlAnnouncement(ctrl_ann) => write!(formatter, \"control: {}\", ctrl_ann),\n            Event::BinaryPort(request) => write!(formatter, \"binary port request: {:?}\", request),\n            Event::ContractRuntimeRequest(request) => {\n                write!(formatter, \"contract runtime request: {:?}\", request)\n            }\n            Event::ReactorInfoRequest(request) => {\n                write!(formatter, \"reactor info request: {:?}\", request)\n            }\n            Event::AcceptTransactionRequest(request) => {\n                write!(formatter, \"accept transaction request: {:?}\", request)\n            }\n            Event::StorageRequest(request) => {\n                write!(formatter, \"storage request: {:?}\", request)\n            }\n        }\n    }\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        matches!(self, Event::ControlAnnouncement(_))\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        if let Self::ControlAnnouncement(ctrl_ann) = self {\n            Some(ctrl_ann)\n        } else {\n            None\n        }\n    }\n}\n\nfn all_values_request() -> Command {\n    let state_identifier = GlobalStateIdentifier::StateRootHash(Digest::hash([1u8; 32]));\n    Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n        Some(state_identifier),\n        GlobalStateEntityQualifier::AllItems {\n            key_tag: KeyTag::Account,\n        },\n    ))))\n}\n\n#[cfg(test)]\nfn record_requests_with_empty_keys() -> Vec<Command> {\n    let mut data = Vec::new();\n    for record_id in RecordId::all() {\n        data.push(Command::Get(GetRequest::Record {\n            record_type_tag: record_id.into(),\n            key: vec![],\n        }))\n    }\n    data\n}\n\nfn trie_request() -> Command {\n    Command::Get(GetRequest::Trie {\n        trie_key: Digest::hash([1u8; 32]),\n    })\n}\n\nfn try_speculative_exec_request(rng: &mut TestRng) -> Command {\n    Command::TrySpeculativeExec {\n        transaction: Transaction::V1(TransactionV1::random(rng)),\n    }\n}\n\nfn got_contract_runtime_request(event: &Event) -> bool {\n    matches!(event, Event::ContractRuntimeRequest(_))\n}\n"
  },
  {
    "path": "node/src/components/binary_port.rs",
    "content": "//! The Binary Port\nmod config;\nmod connection_terminator;\nmod error;\nmod event;\nmod metrics;\nmod rate_limiter;\n#[cfg(test)]\nmod tests;\n\nuse std::{convert::TryFrom, net::SocketAddr, sync::Arc};\n\nuse casper_binary_port::{\n    AccountInformation, AddressableEntityInformation, BalanceResponse, BinaryMessage,\n    BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, Command, CommandHeader,\n    CommandTag, ContractInformation, DictionaryItemIdentifier, DictionaryQueryResult,\n    EntityIdentifier, EraIdentifier, ErrorCode, GetRequest, GetTrieFullResult,\n    GlobalStateEntityQualifier, GlobalStateQueryResult, GlobalStateRequest, InformationRequest,\n    InformationRequestTag, KeyPrefix, NodeStatus, PackageIdentifier, PurseIdentifier,\n    ReactorStateName, RecordId, ResponseType, RewardResponse, TransactionWithExecutionInfo,\n    ValueWithProof,\n};\nuse casper_storage::{\n    data_access_layer::{\n        balance::BalanceHandling,\n        prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult},\n        tagged_values::{TaggedValuesRequest, TaggedValuesResult, TaggedValuesSelection},\n        BalanceIdentifier, BalanceRequest, BalanceResult, ProofHandling, ProofsResult,\n        QueryRequest, QueryResult, SeigniorageRecipientsRequest, SeigniorageRecipientsResult,\n        TrieRequest,\n    },\n    global_state::trie::TrieRaw,\n    system::auction,\n    tracking_copy::TrackingCopyError,\n    KeyPrefix as StorageKeyPrefix,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::NamedKeyAddr,\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    contracts::{ContractHash, ContractPackage, ContractPackageHash},\n    BlockHeader, BlockIdentifier, BlockWithSignatures, ByteCode, ByteCodeAddr, ByteCodeHash,\n    Chainspec, ContractWasm, ContractWasmHash, Digest, EntityAddr, GlobalStateIdentifier, Key,\n    Package, PackageAddr, Peers, ProtocolVersion, Rewards, StoredValue, TimeDiff, Timestamp,\n    Transaction, URef,\n};\nuse connection_terminator::ConnectionTerminator;\nuse thiserror::Error as ThisError;\n\nuse datasize::DataSize;\nuse either::Either;\nuse futures::{SinkExt, StreamExt};\nuse once_cell::sync::OnceCell;\nuse rate_limiter::{LimiterResponse, RateLimiter, RateLimiterError};\nuse tokio::{\n    join,\n    net::{TcpListener, TcpStream},\n    select,\n    sync::{Mutex, Notify, OwnedSemaphorePermit, Semaphore},\n};\nuse tokio_util::codec::{Encoder, Framed};\nuse tracing::{debug, error, info, trace, warn};\n\n#[cfg(test)]\nuse futures::{future::BoxFuture, FutureExt};\n\nuse self::error::Error;\nuse crate::{\n    contract_runtime::SpeculativeExecutionResult,\n    effect::{\n        requests::{\n            AcceptTransactionRequest, BlockSynchronizerRequest, ChainspecRawBytesRequest,\n            ConsensusRequest, ContractRuntimeRequest, NetworkInfoRequest, ReactorInfoRequest,\n            StorageRequest, UpgradeWatcherRequest,\n        },\n        EffectBuilder, EffectExt, Effects,\n    },\n    reactor::{main_reactor::MainEvent, QueueKind},\n    types::NodeRng,\n    utils::{display_error, ListeningError},\n};\npub(crate) use metrics::Metrics;\n\nuse super::{Component, ComponentState, InitializedComponent, PortBoundComponent};\n\npub(crate) use config::Config;\npub(crate) use event::Event;\n\nconst COMPONENT_NAME: &str = \"binary_port\";\n\n#[derive(Debug, ThisError)]\npub(crate) enum BinaryPortInitializationError {\n    #[error(\"could not initialize rate limiter: {0}\")]\n    CannotInitializeRateLimiter(String),\n    #[error(\"could not initialize metrics: {0}\")]\n    CannotInitializeMetrics(prometheus::Error),\n}\n\nimpl From<RateLimiterError> for BinaryPortInitializationError {\n    fn from(value: RateLimiterError) -> Self {\n        BinaryPortInitializationError::CannotInitializeRateLimiter(value.to_string())\n    }\n}\n\nimpl From<prometheus::Error> for BinaryPortInitializationError {\n    fn from(value: prometheus::Error) -> Self {\n        BinaryPortInitializationError::CannotInitializeMetrics(value)\n    }\n}\n\n#[derive(Debug, DataSize)]\npub(crate) struct BinaryPort {\n    #[data_size(skip)]\n    state: ComponentState,\n    #[data_size(skip)]\n    config: Arc<Config>,\n    #[data_size(skip)]\n    chainspec: Arc<Chainspec>,\n    #[data_size(skip)]\n    connection_limit: Arc<Semaphore>,\n    #[data_size(skip)]\n    metrics: Arc<Metrics>,\n    #[data_size(skip)]\n    local_addr: Arc<OnceCell<SocketAddr>>,\n    #[data_size(skip)]\n    shutdown_trigger: Arc<Notify>,\n    #[data_size(skip)]\n    server_join_handle: OnceCell<tokio::task::JoinHandle<()>>,\n    #[data_size(skip)]\n    rate_limiter: OnceCell<Arc<Mutex<RateLimiter>>>,\n}\n\nimpl BinaryPort {\n    pub(crate) fn new(config: Config, chainspec: Arc<Chainspec>, metrics: Metrics) -> Self {\n        Self {\n            state: ComponentState::Uninitialized,\n            connection_limit: Arc::new(Semaphore::new(config.max_connections)),\n            config: Arc::new(config),\n            chainspec,\n            metrics: Arc::new(metrics),\n            local_addr: Arc::new(OnceCell::new()),\n            shutdown_trigger: Arc::new(Notify::new()),\n            server_join_handle: OnceCell::new(),\n            rate_limiter: OnceCell::new(),\n        }\n    }\n\n    /// Returns the binding address.\n    ///\n    /// Only used in testing.\n    #[cfg(test)]\n    pub(crate) fn bind_address(&self) -> Option<SocketAddr> {\n        self.local_addr.get().cloned()\n    }\n}\n\nstruct BinaryRequestTerminationDelayValues {\n    get_record: TimeDiff,\n    get_information: TimeDiff,\n    get_state: TimeDiff,\n    get_trie: TimeDiff,\n    accept_transaction: TimeDiff,\n    speculative_exec: TimeDiff,\n}\n\nimpl BinaryRequestTerminationDelayValues {\n    fn from_config(config: &Config) -> Self {\n        BinaryRequestTerminationDelayValues {\n            get_record: config.get_record_request_termination_delay,\n            get_information: config.get_information_request_termination_delay,\n            get_state: config.get_state_request_termination_delay,\n            get_trie: config.get_trie_request_termination_delay,\n            accept_transaction: config.accept_transaction_request_termination_delay,\n            speculative_exec: config.speculative_exec_request_termination_delay,\n        }\n    }\n    fn get_life_termination_delay(&self, request: &Command) -> TimeDiff {\n        match request {\n            Command::Get(GetRequest::Record { .. }) => self.get_record,\n            Command::Get(GetRequest::Information { .. }) => self.get_information,\n            Command::Get(GetRequest::State(_)) => self.get_state,\n            Command::Get(GetRequest::Trie { .. }) => self.get_trie,\n            Command::TryAcceptTransaction { .. } => self.accept_transaction,\n            Command::TrySpeculativeExec { .. } => self.speculative_exec,\n        }\n    }\n}\n\nasync fn handle_request<REv>(\n    req: Command,\n    effect_builder: EffectBuilder<REv>,\n    config: &Config,\n    metrics: &Metrics,\n    protocol_version: ProtocolVersion,\n) -> BinaryResponse\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    match req {\n        Command::TryAcceptTransaction { transaction } => {\n            metrics.binary_port_try_accept_transaction_count.inc();\n            try_accept_transaction(effect_builder, transaction, false).await\n        }\n        Command::TrySpeculativeExec { transaction } => {\n            metrics.binary_port_try_speculative_exec_count.inc();\n            if !config.allow_request_speculative_exec {\n                debug!(\n                    hash = %transaction.hash(),\n                    \"received a request for speculative execution while the feature is disabled\"\n                );\n                return BinaryResponse::new_error(ErrorCode::FunctionDisabled);\n            }\n            let response = try_accept_transaction(effect_builder, transaction.clone(), true).await;\n            if !response.is_success() {\n                return response;\n            }\n            try_speculative_execution(effect_builder, transaction).await\n        }\n        Command::Get(get_req) => {\n            handle_get_request(get_req, effect_builder, config, metrics, protocol_version).await\n        }\n    }\n}\n\nasync fn handle_get_request<REv>(\n    get_req: GetRequest,\n    effect_builder: EffectBuilder<REv>,\n    config: &Config,\n    metrics: &Metrics,\n    protocol_version: ProtocolVersion,\n) -> BinaryResponse\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + From<ContractRuntimeRequest>\n        + Send,\n{\n    match get_req {\n        // this workaround is in place because get_block_transfers performs a lazy migration\n        GetRequest::Record {\n            record_type_tag,\n            key,\n        } if RecordId::try_from(record_type_tag) == Ok(RecordId::Transfer) => {\n            metrics.binary_port_get_record_count.inc();\n            if key.is_empty() {\n                return BinaryResponse::new_empty();\n            }\n            let Ok(block_hash) = bytesrepr::deserialize_from_slice(&key) else {\n                debug!(\"received an incorrectly serialized key for a transfer record\");\n                return BinaryResponse::new_error(ErrorCode::TransferRecordMalformedKey);\n            };\n            let Some(transfers) = effect_builder\n                .get_block_transfers_from_storage(block_hash)\n                .await\n            else {\n                return BinaryResponse::new_empty();\n            };\n            let Ok(serialized) = bincode::serialize(&transfers) else {\n                return BinaryResponse::new_error(ErrorCode::InternalError);\n            };\n            BinaryResponse::from_raw_bytes(ResponseType::Transfers, serialized)\n        }\n        GetRequest::Record {\n            record_type_tag,\n            key,\n        } => {\n            metrics.binary_port_get_record_count.inc();\n            if key.is_empty() {\n                return BinaryResponse::new_empty();\n            }\n            match RecordId::try_from(record_type_tag) {\n                Ok(record_id) => {\n                    let Some(db_bytes) = effect_builder.get_raw_data(record_id, key).await else {\n                        return BinaryResponse::new_empty();\n                    };\n                    let payload_type =\n                        ResponseType::from_record_id(record_id, db_bytes.is_legacy());\n                    BinaryResponse::from_raw_bytes(payload_type, db_bytes.into_raw_bytes())\n                }\n                Err(_) => BinaryResponse::new_error(ErrorCode::UnsupportedRequest),\n            }\n        }\n        GetRequest::Information { info_type_tag, key } => {\n            metrics.binary_port_get_info_count.inc();\n            let Ok(tag) = InformationRequestTag::try_from(info_type_tag) else {\n                debug!(\n                    tag = info_type_tag,\n                    \"received an unknown information request tag\"\n                );\n                return BinaryResponse::new_error(ErrorCode::UnsupportedRequest);\n            };\n            match InformationRequest::try_from((tag, &key[..])) {\n                Ok(req) => handle_info_request(req, effect_builder, protocol_version).await,\n                Err(error) => {\n                    debug!(?tag, %error, \"failed to parse an information request\");\n                    BinaryResponse::new_error(ErrorCode::MalformedInformationRequest)\n                }\n            }\n        }\n        GetRequest::State(req) => {\n            metrics.binary_port_get_state_count.inc();\n            handle_state_request(effect_builder, *req, protocol_version, config).await\n        }\n        GetRequest::Trie { trie_key } => {\n            metrics.binary_port_get_trie_count.inc();\n            handle_trie_request(effect_builder, trie_key, config).await\n        }\n    }\n}\n\nasync fn handle_get_items_by_prefix<REv>(\n    state_identifier: Option<GlobalStateIdentifier>,\n    key_prefix: KeyPrefix,\n    effect_builder: EffectBuilder<REv>,\n) -> BinaryResponse\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await\n    else {\n        return BinaryResponse::new_error(ErrorCode::RootNotFound);\n    };\n    let storage_key_prefix = match key_prefix {\n        KeyPrefix::DelegatorBidAddrsByValidator(hash) => {\n            StorageKeyPrefix::DelegatorBidAddrsByValidator(hash)\n        }\n        KeyPrefix::MessagesByEntity(addr) => StorageKeyPrefix::MessageEntriesByEntity(addr),\n        KeyPrefix::MessagesByEntityAndTopic(addr, topic) => {\n            StorageKeyPrefix::MessagesByEntityAndTopic(addr, topic)\n        }\n        KeyPrefix::NamedKeysByEntity(addr) => StorageKeyPrefix::NamedKeysByEntity(addr),\n        KeyPrefix::GasBalanceHoldsByPurse(purse) => StorageKeyPrefix::GasBalanceHoldsByPurse(purse),\n        KeyPrefix::ProcessingBalanceHoldsByPurse(purse) => {\n            StorageKeyPrefix::ProcessingBalanceHoldsByPurse(purse)\n        }\n        KeyPrefix::EntryPointsV1ByEntity(addr) => StorageKeyPrefix::EntryPointsV1ByEntity(addr),\n        KeyPrefix::EntryPointsV2ByEntity(addr) => StorageKeyPrefix::EntryPointsV2ByEntity(addr),\n    };\n    let request = PrefixedValuesRequest::new(state_root_hash, storage_key_prefix);\n    match effect_builder.get_prefixed_values(request).await {\n        PrefixedValuesResult::Success { values, .. } => BinaryResponse::from_value(values),\n        PrefixedValuesResult::RootNotFound => BinaryResponse::new_error(ErrorCode::RootNotFound),\n        PrefixedValuesResult::Failure(error) => {\n            debug!(%error, \"failed when querying for values by prefix\");\n            BinaryResponse::new_error(ErrorCode::InternalError)\n        }\n    }\n}\n\nasync fn handle_get_all_items<REv>(\n    state_identifier: Option<GlobalStateIdentifier>,\n    key_tag: casper_types::KeyTag,\n    effect_builder: EffectBuilder<REv>,\n) -> BinaryResponse\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await\n    else {\n        return BinaryResponse::new_error(ErrorCode::RootNotFound);\n    };\n    let request = TaggedValuesRequest::new(state_root_hash, TaggedValuesSelection::All(key_tag));\n    match effect_builder.get_tagged_values(request).await {\n        TaggedValuesResult::Success { values, .. } => BinaryResponse::from_value(values),\n        TaggedValuesResult::RootNotFound => BinaryResponse::new_error(ErrorCode::RootNotFound),\n        TaggedValuesResult::Failure(error) => {\n            debug!(%error, \"failed when querying for all values by tag\");\n            BinaryResponse::new_error(ErrorCode::InternalError)\n        }\n    }\n}\n\nasync fn handle_state_request<REv>(\n    effect_builder: EffectBuilder<REv>,\n    request: GlobalStateRequest,\n    protocol_version: ProtocolVersion,\n    config: &Config,\n) -> BinaryResponse\nwhere\n    REv: From<Event>\n        + From<ContractRuntimeRequest>\n        + From<StorageRequest>\n        + From<ReactorInfoRequest>,\n{\n    let (state_identifier, qualifier) = request.destructure();\n    match qualifier {\n        GlobalStateEntityQualifier::Item { base_key, path } => {\n            let Some(state_root_hash) =\n                resolve_state_root_hash(effect_builder, state_identifier).await\n            else {\n                return BinaryResponse::new_error(ErrorCode::RootNotFound);\n            };\n            match get_global_state_item(effect_builder, state_root_hash, base_key, path).await {\n                Ok(Some(result)) => BinaryResponse::from_value(result),\n                Ok(None) => BinaryResponse::new_empty(),\n                Err(err) => BinaryResponse::new_error(err),\n            }\n        }\n        GlobalStateEntityQualifier::AllItems { key_tag } => {\n            if !config.allow_request_get_all_values {\n                debug!(%key_tag, \"received a request for items by key tag while the feature is disabled\");\n                BinaryResponse::new_error(ErrorCode::FunctionDisabled)\n            } else {\n                handle_get_all_items(state_identifier, key_tag, effect_builder).await\n            }\n        }\n        GlobalStateEntityQualifier::DictionaryItem { identifier } => {\n            let Some(state_root_hash) =\n                resolve_state_root_hash(effect_builder, state_identifier).await\n            else {\n                return BinaryResponse::new_error(ErrorCode::RootNotFound);\n            };\n            let result = match identifier {\n                DictionaryItemIdentifier::AccountNamedKey {\n                    hash,\n                    dictionary_name,\n                    dictionary_item_key,\n                } => {\n                    get_dictionary_item_by_legacy_named_key(\n                        effect_builder,\n                        state_root_hash,\n                        Key::Account(hash),\n                        dictionary_name,\n                        dictionary_item_key,\n                    )\n                    .await\n                }\n                DictionaryItemIdentifier::ContractNamedKey {\n                    hash,\n                    dictionary_name,\n                    dictionary_item_key,\n                } => {\n                    get_dictionary_item_by_legacy_named_key(\n                        effect_builder,\n                        state_root_hash,\n                        Key::Hash(hash),\n                        dictionary_name,\n                        dictionary_item_key,\n                    )\n                    .await\n                }\n                DictionaryItemIdentifier::EntityNamedKey {\n                    addr,\n                    dictionary_name,\n                    dictionary_item_key,\n                } => {\n                    get_dictionary_item_by_named_key(\n                        effect_builder,\n                        state_root_hash,\n                        addr,\n                        dictionary_name,\n                        dictionary_item_key,\n                    )\n                    .await\n                }\n                DictionaryItemIdentifier::URef {\n                    seed_uref,\n                    dictionary_item_key,\n                } => {\n                    let key = Key::dictionary(seed_uref, dictionary_item_key.as_bytes());\n                    get_global_state_item(effect_builder, state_root_hash, key, vec![])\n                        .await\n                        .map(|maybe_res| maybe_res.map(|res| DictionaryQueryResult::new(key, res)))\n                }\n                DictionaryItemIdentifier::DictionaryItem(addr) => {\n                    let key = Key::Dictionary(addr);\n                    get_global_state_item(effect_builder, state_root_hash, key, vec![])\n                        .await\n                        .map(|maybe_res| maybe_res.map(|res| DictionaryQueryResult::new(key, res)))\n                }\n            };\n            match result {\n                Ok(Some(result)) => BinaryResponse::from_value(result),\n                Ok(None) => BinaryResponse::new_empty(),\n                Err(err) => BinaryResponse::new_error(err),\n            }\n        }\n        GlobalStateEntityQualifier::Balance { purse_identifier } => {\n            let Some(state_root_hash) =\n                resolve_state_root_hash(effect_builder, state_identifier).await\n            else {\n                return BinaryResponse::new_empty();\n            };\n            get_balance(\n                effect_builder,\n                state_root_hash,\n                purse_identifier,\n                protocol_version,\n            )\n            .await\n        }\n        GlobalStateEntityQualifier::ItemsByPrefix { key_prefix } => {\n            handle_get_items_by_prefix(state_identifier, key_prefix, effect_builder).await\n        }\n    }\n}\n\nasync fn handle_trie_request<REv>(\n    effect_builder: EffectBuilder<REv>,\n    trie_key: Digest,\n    config: &Config,\n) -> BinaryResponse\nwhere\n    REv: From<Event>\n        + From<ContractRuntimeRequest>\n        + From<StorageRequest>\n        + From<ReactorInfoRequest>,\n{\n    if !config.allow_request_get_trie {\n        debug!(%trie_key, \"received a trie request while the feature is disabled\");\n        BinaryResponse::new_error(ErrorCode::FunctionDisabled)\n    } else {\n        let req = TrieRequest::new(trie_key, None);\n        match effect_builder.get_trie(req).await.into_raw() {\n            Ok(result) => {\n                BinaryResponse::from_value(GetTrieFullResult::new(result.map(TrieRaw::into_inner)))\n            }\n            Err(error) => {\n                debug!(%error, \"failed when querying for a trie\");\n                BinaryResponse::new_error(ErrorCode::InternalError)\n            }\n        }\n    }\n}\n\nasync fn get_dictionary_item_by_legacy_named_key<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    entity_key: Key,\n    dictionary_name: String,\n    dictionary_item_key: String,\n) -> Result<Option<DictionaryQueryResult>, ErrorCode>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    match effect_builder\n        .query_global_state(QueryRequest::new(state_root_hash, entity_key, vec![]))\n        .await\n    {\n        QueryResult::Success { value, .. } => {\n            let named_keys = match &*value {\n                StoredValue::Account(account) => account.named_keys(),\n                StoredValue::Contract(contract) => contract.named_keys(),\n                value => {\n                    debug!(\n                        value_type = value.type_name(),\n                        \"unexpected stored value found when querying for a dictionary\"\n                    );\n                    return Err(ErrorCode::DictionaryURefNotFound);\n                }\n            };\n            let Some(uref) = named_keys.get(&dictionary_name).and_then(Key::as_uref) else {\n                debug!(\n                    dictionary_name,\n                    \"dictionary seed URef not found in named keys\"\n                );\n                return Err(ErrorCode::DictionaryURefNotFound);\n            };\n            let key = Key::dictionary(*uref, dictionary_item_key.as_bytes());\n            let Some(query_result) =\n                get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n            else {\n                return Ok(None);\n            };\n\n            Ok(Some(DictionaryQueryResult::new(key, query_result)))\n        }\n        QueryResult::RootNotFound => {\n            debug!(\"root not found when querying for a dictionary seed URef\");\n            Err(ErrorCode::DictionaryURefNotFound)\n        }\n        QueryResult::ValueNotFound(error) => {\n            debug!(%error, \"value not found when querying for a dictionary seed URef\");\n            Err(ErrorCode::DictionaryURefNotFound)\n        }\n        QueryResult::Failure(error) => {\n            debug!(%error, \"failed when querying for a dictionary seed URef\");\n            Err(ErrorCode::FailedQuery)\n        }\n    }\n}\n\nasync fn get_dictionary_item_by_named_key<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    entity_addr: EntityAddr,\n    dictionary_name: String,\n    dictionary_item_key: String,\n) -> Result<Option<DictionaryQueryResult>, ErrorCode>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    let Ok(key_addr) = NamedKeyAddr::new_from_string(entity_addr, dictionary_name) else {\n        return Err(ErrorCode::InternalError);\n    };\n    let req = QueryRequest::new(state_root_hash, Key::NamedKey(key_addr), vec![]);\n    match effect_builder.query_global_state(req).await {\n        QueryResult::Success { value, .. } => {\n            let key_val = match &*value {\n                StoredValue::NamedKey(key_val) => key_val,\n                value => {\n                    debug!(\n                        value_type = value.type_name(),\n                        \"unexpected stored value found when querying for a dictionary\"\n                    );\n                    return Err(ErrorCode::DictionaryURefNotFound);\n                }\n            };\n            let uref = match key_val.get_key() {\n                Ok(Key::URef(uref)) => uref,\n                result => {\n                    debug!(\n                        ?result,\n                        \"unexpected named key result when querying for a dictionary\"\n                    );\n                    return Err(ErrorCode::DictionaryURefNotFound);\n                }\n            };\n            let key = Key::dictionary(uref, dictionary_item_key.as_bytes());\n            let Some(query_result) =\n                get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n            else {\n                return Ok(None);\n            };\n            Ok(Some(DictionaryQueryResult::new(key, query_result)))\n        }\n        QueryResult::RootNotFound => {\n            debug!(\"root not found when querying for a dictionary seed URef\");\n            Err(ErrorCode::DictionaryURefNotFound)\n        }\n        QueryResult::ValueNotFound(error) => {\n            debug!(%error, \"value not found when querying for a dictionary seed URef\");\n            Err(ErrorCode::DictionaryURefNotFound)\n        }\n        QueryResult::Failure(error) => {\n            debug!(%error, \"failed when querying for a dictionary seed URef\");\n            Err(ErrorCode::FailedQuery)\n        }\n    }\n}\n\nasync fn get_balance<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    purse_identifier: PurseIdentifier,\n    protocol_version: ProtocolVersion,\n) -> BinaryResponse\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let balance_id = match purse_identifier {\n        PurseIdentifier::Payment => BalanceIdentifier::Payment,\n        PurseIdentifier::Accumulate => BalanceIdentifier::Accumulate,\n        PurseIdentifier::Purse(uref) => BalanceIdentifier::Purse(uref),\n        PurseIdentifier::PublicKey(pub_key) => BalanceIdentifier::Public(pub_key),\n        PurseIdentifier::Account(account) => BalanceIdentifier::Account(account),\n        PurseIdentifier::Entity(entity) => BalanceIdentifier::Entity(entity),\n    };\n    let balance_handling = BalanceHandling::Available;\n\n    let balance_req = BalanceRequest::new(\n        state_root_hash,\n        protocol_version,\n        balance_id,\n        balance_handling,\n        ProofHandling::Proofs,\n    );\n    match effect_builder.get_balance(balance_req).await {\n        BalanceResult::RootNotFound => BinaryResponse::new_error(ErrorCode::RootNotFound),\n        BalanceResult::Success {\n            total_balance,\n            available_balance,\n            proofs_result,\n            ..\n        } => {\n            let ProofsResult::Proofs {\n                total_balance_proof,\n                balance_holds,\n            } = proofs_result\n            else {\n                warn!(\"binary port received no proofs for a balance request with proofs\");\n                return BinaryResponse::new_error(ErrorCode::InternalError);\n            };\n            let response = BalanceResponse {\n                total_balance,\n                available_balance,\n                total_balance_proof,\n                balance_holds,\n            };\n            BinaryResponse::from_value(response)\n        }\n        BalanceResult::Failure(TrackingCopyError::KeyNotFound(_)) => {\n            BinaryResponse::new_error(ErrorCode::PurseNotFound)\n        }\n        BalanceResult::Failure(error) => {\n            debug!(%error, \"failed when querying for a balance\");\n            BinaryResponse::new_error(ErrorCode::FailedQuery)\n        }\n    }\n}\n\nasync fn get_global_state_item<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    base_key: Key,\n    path: Vec<String>,\n) -> Result<Option<GlobalStateQueryResult>, ErrorCode>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    match effect_builder\n        .query_global_state(QueryRequest::new(state_root_hash, base_key, path))\n        .await\n    {\n        QueryResult::Success { value, proofs } => {\n            Ok(Some(GlobalStateQueryResult::new(*value, proofs)))\n        }\n        QueryResult::RootNotFound => Err(ErrorCode::RootNotFound),\n        QueryResult::ValueNotFound(error) => {\n            debug!(%error, \"value not found when querying for a global state item\");\n            Err(ErrorCode::NotFound)\n        }\n        QueryResult::Failure(error) => {\n            debug!(%error, \"failed when querying for a global state item\");\n            Err(ErrorCode::FailedQuery)\n        }\n    }\n}\n\nasync fn get_contract_package<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    hash: ContractPackageHash,\n) -> Result<Option<Either<ValueWithProof<ContractPackage>, ValueWithProof<Package>>>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::Hash(hash.value());\n    let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match result.into_inner() {\n        (StoredValue::ContractPackage(contract), proof) => {\n            Ok(Some(Either::Left(ValueWithProof::new(contract, proof))))\n        }\n        (other, _) => {\n            let Some((Key::SmartContract(addr), _)) = other\n                .as_cl_value()\n                .and_then(|cl_val| cl_val.to_t::<(Key, URef)>().ok())\n            else {\n                debug!(\n                    ?other,\n                    \"unexpected stored value found when querying for a contract package\"\n                );\n                return Err(ErrorCode::InternalError);\n            };\n            let package = get_package(effect_builder, state_root_hash, addr).await?;\n            Ok(package.map(Either::Right))\n        }\n    }\n}\n\nasync fn get_package<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    package_addr: PackageAddr,\n) -> Result<Option<ValueWithProof<Package>>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::SmartContract(package_addr);\n    let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match result.into_inner() {\n        (StoredValue::SmartContract(contract), proof) => {\n            Ok(Some(ValueWithProof::new(contract, proof)))\n        }\n        other => {\n            debug!(\n                ?other,\n                \"unexpected stored value found when querying for a package\"\n            );\n            Err(ErrorCode::InternalError)\n        }\n    }\n}\n\nasync fn get_contract<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    hash: ContractHash,\n    include_wasm: bool,\n) -> Result<Option<Either<ContractInformation, AddressableEntityInformation>>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::Hash(hash.value());\n    let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match result.into_inner() {\n        (StoredValue::Contract(contract), proof)\n            if include_wasm && contract.contract_wasm_hash() != ContractWasmHash::default() =>\n        {\n            let wasm_hash = contract.contract_wasm_hash();\n            let Some(wasm) = get_contract_wasm(effect_builder, state_root_hash, wasm_hash).await?\n            else {\n                return Ok(None);\n            };\n            Ok(Some(Either::Left(ContractInformation::new(\n                hash,\n                ValueWithProof::new(contract, proof),\n                Some(wasm),\n            ))))\n        }\n        (StoredValue::Contract(contract), proof) => Ok(Some(Either::Left(\n            ContractInformation::new(hash, ValueWithProof::new(contract, proof), None),\n        ))),\n        (other, _) => {\n            let Some(Key::AddressableEntity(addr)) = other\n                .as_cl_value()\n                .and_then(|cl_val| cl_val.to_t::<Key>().ok())\n            else {\n                debug!(\n                    ?other,\n                    \"unexpected stored value found when querying for a contract\"\n                );\n                return Err(ErrorCode::InternalError);\n            };\n            let entity = get_entity(effect_builder, state_root_hash, addr, include_wasm).await?;\n            Ok(entity.map(Either::Right))\n        }\n    }\n}\n\nasync fn get_account<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    hash: AccountHash,\n    include_bytecode: bool,\n) -> Result<Option<Either<AccountInformation, AddressableEntityInformation>>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::Account(hash);\n    let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match result.into_inner() {\n        (StoredValue::Account(account), proof) => {\n            Ok(Some(Either::Left(AccountInformation::new(account, proof))))\n        }\n        (other, _) => {\n            let Some(Key::AddressableEntity(addr)) = other\n                .as_cl_value()\n                .and_then(|cl_val| cl_val.to_t::<Key>().ok())\n            else {\n                debug!(\n                    ?other,\n                    \"unexpected stored value found when querying for an account\"\n                );\n                return Err(ErrorCode::InternalError);\n            };\n            let entity =\n                get_entity(effect_builder, state_root_hash, addr, include_bytecode).await?;\n            Ok(entity.map(Either::Right))\n        }\n    }\n}\n\nasync fn get_entity<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    addr: EntityAddr,\n    include_bytecode: bool,\n) -> Result<Option<AddressableEntityInformation>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::from(addr);\n    let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match result.into_inner() {\n        (StoredValue::AddressableEntity(entity), proof)\n            if include_bytecode && entity.byte_code_hash() != ByteCodeHash::default() =>\n        {\n            let Some(bytecode) =\n                get_contract_bytecode(effect_builder, state_root_hash, entity.byte_code_hash())\n                    .await?\n            else {\n                return Ok(None);\n            };\n            Ok(Some(AddressableEntityInformation::new(\n                addr,\n                ValueWithProof::new(entity, proof),\n                Some(bytecode),\n            )))\n        }\n        (StoredValue::AddressableEntity(entity), proof) => Ok(Some(\n            AddressableEntityInformation::new(addr, ValueWithProof::new(entity, proof), None),\n        )),\n        (other, _) => {\n            debug!(\n                ?other,\n                \"unexpected stored value found when querying for an entity\"\n            );\n            Err(ErrorCode::InternalError)\n        }\n    }\n}\n\nasync fn get_contract_wasm<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    hash: ContractWasmHash,\n) -> Result<Option<ValueWithProof<ContractWasm>>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::from(hash);\n    let Some(value) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match value.into_inner() {\n        (StoredValue::ContractWasm(wasm), proof) => Ok(Some(ValueWithProof::new(wasm, proof))),\n        other => {\n            debug!(\n                ?other,\n                \"unexpected stored value found when querying for Wasm\"\n            );\n            Err(ErrorCode::InternalError)\n        }\n    }\n}\n\nasync fn get_contract_bytecode<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_root_hash: Digest,\n    addr: ByteCodeHash,\n) -> Result<Option<ValueWithProof<ByteCode>>, ErrorCode>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ReactorInfoRequest>,\n{\n    let key = Key::ByteCode(ByteCodeAddr::new_wasm_addr(addr.value()));\n    let Some(value) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await?\n    else {\n        return Ok(None);\n    };\n    match value.into_inner() {\n        (StoredValue::ByteCode(bytecode), proof) => Ok(Some(ValueWithProof::new(bytecode, proof))),\n        other => {\n            debug!(\n                ?other,\n                \"unexpected stored value found when querying for bytecode\"\n            );\n            Err(ErrorCode::InternalError)\n        }\n    }\n}\n\nasync fn handle_info_request<REv>(\n    req: InformationRequest,\n    effect_builder: EffectBuilder<REv>,\n    protocol_version: ProtocolVersion,\n) -> BinaryResponse\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + From<ContractRuntimeRequest>\n        + Send,\n{\n    match req {\n        InformationRequest::BlockHeader(identifier) => {\n            let maybe_header = resolve_block_header(effect_builder, identifier).await;\n            BinaryResponse::from_option(maybe_header)\n        }\n        InformationRequest::BlockWithSignatures(identifier) => {\n            let Some(height) = resolve_block_height(effect_builder, identifier).await else {\n                return BinaryResponse::new_empty();\n            };\n            let Some(block) = effect_builder\n                .get_block_at_height_with_metadata_from_storage(height, true)\n                .await\n            else {\n                return BinaryResponse::new_empty();\n            };\n            BinaryResponse::from_value(BlockWithSignatures::new(\n                block.block,\n                block.block_signatures,\n            ))\n        }\n        InformationRequest::Transaction {\n            hash,\n            with_finalized_approvals,\n        } => {\n            let Some((transaction, execution_info)) = effect_builder\n                .get_transaction_and_exec_info_from_storage(hash, with_finalized_approvals)\n                .await\n            else {\n                return BinaryResponse::new_empty();\n            };\n            BinaryResponse::from_value(TransactionWithExecutionInfo::new(\n                transaction,\n                execution_info,\n            ))\n        }\n        InformationRequest::Peers => {\n            BinaryResponse::from_value(Peers::from(effect_builder.network_peers().await))\n        }\n        InformationRequest::Uptime => BinaryResponse::from_value(effect_builder.get_uptime().await),\n        InformationRequest::LastProgress => {\n            BinaryResponse::from_value(effect_builder.get_last_progress().await)\n        }\n        InformationRequest::ReactorState => {\n            let state = effect_builder.get_reactor_state().await;\n            BinaryResponse::from_value(ReactorStateName::new(state))\n        }\n        InformationRequest::NetworkName => {\n            BinaryResponse::from_value(effect_builder.get_network_name().await)\n        }\n        InformationRequest::ConsensusValidatorChanges => {\n            BinaryResponse::from_value(effect_builder.get_consensus_validator_changes().await)\n        }\n        InformationRequest::BlockSynchronizerStatus => {\n            BinaryResponse::from_value(effect_builder.get_block_synchronizer_status().await)\n        }\n        InformationRequest::AvailableBlockRange => BinaryResponse::from_value(\n            effect_builder\n                .get_available_block_range_from_storage()\n                .await,\n        ),\n        InformationRequest::NextUpgrade => {\n            BinaryResponse::from_option(effect_builder.get_next_upgrade().await)\n        }\n        InformationRequest::ConsensusStatus => {\n            BinaryResponse::from_option(effect_builder.consensus_status().await)\n        }\n        InformationRequest::ChainspecRawBytes => {\n            BinaryResponse::from_value((*effect_builder.get_chainspec_raw_bytes().await).clone())\n        }\n        InformationRequest::LatestSwitchBlockHeader => BinaryResponse::from_option(\n            effect_builder\n                .get_latest_switch_block_header_from_storage()\n                .await,\n        ),\n        InformationRequest::NodeStatus => {\n            let (\n                node_uptime,\n                network_name,\n                last_added_block,\n                peers,\n                next_upgrade,\n                consensus_status,\n                reactor_state,\n                last_progress,\n                available_block_range,\n                block_sync,\n                latest_switch_block_header,\n            ) = join!(\n                effect_builder.get_uptime(),\n                effect_builder.get_network_name(),\n                effect_builder.get_highest_complete_block_from_storage(),\n                effect_builder.network_peers(),\n                effect_builder.get_next_upgrade(),\n                effect_builder.consensus_status(),\n                effect_builder.get_reactor_state(),\n                effect_builder.get_last_progress(),\n                effect_builder.get_available_block_range_from_storage(),\n                effect_builder.get_block_synchronizer_status(),\n                effect_builder.get_latest_switch_block_header_from_storage(),\n            );\n            let starting_state_root_hash = effect_builder\n                .get_block_header_at_height_from_storage(available_block_range.low(), true)\n                .await\n                .map(|header| *header.state_root_hash())\n                .unwrap_or_default();\n            let (our_public_signing_key, round_length) =\n                consensus_status.map_or((None, None), |consensus_status| {\n                    (\n                        Some(consensus_status.validator_public_key().clone()),\n                        consensus_status.round_length(),\n                    )\n                });\n            let reactor_state = ReactorStateName::new(reactor_state);\n\n            let Ok(uptime) = TimeDiff::try_from(node_uptime) else {\n                return BinaryResponse::new_error(ErrorCode::InternalError);\n            };\n\n            let status = NodeStatus {\n                protocol_version,\n                peers: Peers::from(peers),\n                build_version: crate::VERSION_STRING.clone(),\n                chainspec_name: network_name.into(),\n                starting_state_root_hash,\n                last_added_block_info: last_added_block.map(Into::into),\n                our_public_signing_key,\n                round_length,\n                next_upgrade,\n                uptime,\n                reactor_state,\n                last_progress: last_progress.into(),\n                available_block_range,\n                block_sync,\n                latest_switch_block_hash: latest_switch_block_header\n                    .map(|header| header.block_hash()),\n            };\n            BinaryResponse::from_value(status)\n        }\n        InformationRequest::Reward {\n            era_identifier,\n            validator,\n            delegator,\n        } => {\n            let Some(header) =\n                resolve_era_switch_block_header(effect_builder, era_identifier).await\n            else {\n                return BinaryResponse::new_error(ErrorCode::SwitchBlockNotFound);\n            };\n            let Some(previous_height) = header.height().checked_sub(1) else {\n                // there's not going to be any rewards for the genesis block\n                debug!(\"received a request for rewards in the genesis block\");\n                return BinaryResponse::new_empty();\n            };\n            let Some(parent_header) = effect_builder\n                .get_block_header_at_height_from_storage(previous_height, true)\n                .await\n            else {\n                return BinaryResponse::new_error(ErrorCode::SwitchBlockParentNotFound);\n            };\n            let snapshot_request =\n                SeigniorageRecipientsRequest::new(*parent_header.state_root_hash());\n\n            let (snapshot, rewards_ratio) = match effect_builder\n                .get_seigniorage_recipients_snapshot_from_contract_runtime(snapshot_request)\n                .await\n            {\n                SeigniorageRecipientsResult::Success {\n                    seigniorage_recipients,\n                    rewards_ratio,\n                } => (seigniorage_recipients, rewards_ratio),\n                SeigniorageRecipientsResult::RootNotFound => {\n                    return BinaryResponse::new_error(ErrorCode::RootNotFound)\n                }\n                SeigniorageRecipientsResult::Failure(error) => {\n                    warn!(%error, \"failed when querying for seigniorage recipients\");\n                    return BinaryResponse::new_error(ErrorCode::FailedQuery);\n                }\n                SeigniorageRecipientsResult::AuctionNotFound => {\n                    warn!(\"auction not found when querying for seigniorage recipients\");\n                    return BinaryResponse::new_error(ErrorCode::InternalError);\n                }\n                SeigniorageRecipientsResult::ValueNotFound(error) => {\n                    warn!(%error, \"value not found when querying for seigniorage recipients\");\n                    return BinaryResponse::new_error(ErrorCode::InternalError);\n                }\n            };\n            let Some(era_end) = header.clone_era_end() else {\n                // switch block should have an era end\n                error!(\n                    hash = %header.block_hash(),\n                    \"switch block missing era end (undefined behavior)\"\n                );\n                return BinaryResponse::new_error(ErrorCode::InternalError);\n            };\n            let block_rewards = match era_end.rewards() {\n                Rewards::V2(rewards) => rewards,\n                Rewards::V1(_) => {\n                    //It is possible to calculate V1 rewards, but previously we didn't support an\n                    // endpoint to report it in that way. We could implement it\n                    // in a future release if there is interest in it - it's not trivial though.\n                    return BinaryResponse::new_error(ErrorCode::UnsupportedRewardsV1Request);\n                }\n            };\n            let Some(validator_rewards) = block_rewards.get(&validator) else {\n                return BinaryResponse::new_empty();\n            };\n\n            let seigniorage_recipient =\n                snapshot.get_seignorage_recipient(&header.era_id(), &validator);\n\n            let reward = auction::detail::reward(\n                &validator,\n                delegator.as_deref(),\n                header.era_id(),\n                validator_rewards,\n                &snapshot,\n                rewards_ratio,\n            );\n            match (reward, seigniorage_recipient) {\n                (Ok(Some(reward)), Some(seigniorage_recipient)) => {\n                    let response = RewardResponse::new(\n                        reward,\n                        header.era_id(),\n                        *seigniorage_recipient.delegation_rate(),\n                        header.block_hash(),\n                    );\n                    BinaryResponse::from_value(response)\n                }\n                (Err(error), _) => {\n                    warn!(%error, \"failed when calculating rewards\");\n                    BinaryResponse::new_error(ErrorCode::InternalError)\n                }\n                _ => BinaryResponse::new_empty(),\n            }\n        }\n        InformationRequest::ProtocolVersion => BinaryResponse::from_value(protocol_version),\n        InformationRequest::Package {\n            state_identifier,\n            identifier,\n        } => {\n            let Some(state_root_hash) =\n                resolve_state_root_hash(effect_builder, state_identifier).await\n            else {\n                return BinaryResponse::new_error(ErrorCode::RootNotFound);\n            };\n            let either = match identifier {\n                PackageIdentifier::ContractPackageHash(hash) => {\n                    get_contract_package(effect_builder, state_root_hash, hash).await\n                }\n                PackageIdentifier::PackageAddr(addr) => {\n                    get_package(effect_builder, state_root_hash, addr)\n                        .await\n                        .map(|opt| opt.map(Either::Right))\n                }\n            };\n            match either {\n                Ok(Some(Either::Left(contract_package))) => {\n                    BinaryResponse::from_value(contract_package)\n                }\n                Ok(Some(Either::Right(package))) => BinaryResponse::from_value(package),\n                Ok(None) => BinaryResponse::new_empty(),\n                Err(err) => BinaryResponse::new_error(err),\n            }\n        }\n        InformationRequest::Entity {\n            state_identifier,\n            identifier,\n            include_bytecode,\n        } => {\n            let Some(state_root_hash) =\n                resolve_state_root_hash(effect_builder, state_identifier).await\n            else {\n                return BinaryResponse::new_error(ErrorCode::RootNotFound);\n            };\n            match identifier {\n                EntityIdentifier::ContractHash(hash) => {\n                    match get_contract(effect_builder, state_root_hash, hash, include_bytecode)\n                        .await\n                    {\n                        Ok(Some(Either::Left(contract))) => BinaryResponse::from_value(contract),\n                        Ok(Some(Either::Right(entity))) => BinaryResponse::from_value(entity),\n                        Ok(None) => BinaryResponse::new_empty(),\n                        Err(err) => BinaryResponse::new_error(err),\n                    }\n                }\n                EntityIdentifier::AccountHash(hash) => {\n                    match get_account(effect_builder, state_root_hash, hash, include_bytecode).await\n                    {\n                        Ok(Some(Either::Left(account))) => BinaryResponse::from_value(account),\n                        Ok(Some(Either::Right(entity))) => BinaryResponse::from_value(entity),\n                        Ok(None) => BinaryResponse::new_empty(),\n                        Err(err) => BinaryResponse::new_error(err),\n                    }\n                }\n                EntityIdentifier::PublicKey(pub_key) => {\n                    let hash = pub_key.to_account_hash();\n                    match get_account(effect_builder, state_root_hash, hash, include_bytecode).await\n                    {\n                        Ok(Some(Either::Left(account))) => BinaryResponse::from_value(account),\n                        Ok(Some(Either::Right(entity))) => BinaryResponse::from_value(entity),\n                        Ok(None) => BinaryResponse::new_empty(),\n                        Err(err) => BinaryResponse::new_error(err),\n                    }\n                }\n                EntityIdentifier::EntityAddr(addr) => {\n                    match get_entity(effect_builder, state_root_hash, addr, include_bytecode).await\n                    {\n                        Ok(Some(entity)) => BinaryResponse::from_value(entity),\n                        Ok(None) => BinaryResponse::new_empty(),\n                        Err(err) => BinaryResponse::new_error(err),\n                    }\n                }\n            }\n        }\n    }\n}\n\nasync fn try_accept_transaction<REv>(\n    effect_builder: EffectBuilder<REv>,\n    transaction: Transaction,\n    is_speculative: bool,\n) -> BinaryResponse\nwhere\n    REv: From<AcceptTransactionRequest>,\n{\n    effect_builder\n        .try_accept_transaction(transaction, is_speculative)\n        .await\n        .map_or_else(\n            |err| BinaryResponse::new_error(err.into()),\n            |()| BinaryResponse::new_empty(),\n        )\n}\n\nasync fn try_speculative_execution<REv>(\n    effect_builder: EffectBuilder<REv>,\n    transaction: Transaction,\n) -> BinaryResponse\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    let tip = match effect_builder\n        .get_highest_complete_block_header_from_storage()\n        .await\n    {\n        Some(tip) => tip,\n        None => return BinaryResponse::new_error(ErrorCode::NoCompleteBlocks),\n    };\n\n    let result = effect_builder\n        .speculatively_execute(Box::new(tip), Box::new(transaction))\n        .await;\n\n    match result {\n        SpeculativeExecutionResult::InvalidTransaction(error) => {\n            debug!(%error, \"invalid transaction submitted for speculative execution\");\n            BinaryResponse::new_error(error.into())\n        }\n        SpeculativeExecutionResult::WasmV1(spec_exec_result) => {\n            BinaryResponse::from_value(spec_exec_result)\n        }\n        SpeculativeExecutionResult::ReceivedV1Transaction => {\n            BinaryResponse::new_error(ErrorCode::ReceivedV1Transaction)\n        }\n    }\n}\n\nasync fn handle_client_loop<REv>(\n    stream: TcpStream,\n    effect_builder: EffectBuilder<REv>,\n    config: Arc<Config>,\n    rate_limiter: Arc<Mutex<RateLimiter>>,\n    monitor: ConnectionTerminator,\n    life_extensions_config: BinaryRequestTerminationDelayValues,\n) -> Result<(), Error>\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    let codec = BinaryMessageCodec::new(config.max_message_size_bytes);\n    let mut framed = Framed::new(stream, codec);\n    monitor\n        .terminate_at(Timestamp::now() + config.initial_connection_lifetime)\n        .await;\n    let cancellation_token = monitor.get_cancellation_token();\n    loop {\n        select! {\n            maybe_bytes = framed.next() => {\n                let Some(result) = maybe_bytes else {\n                    debug!(\"remote party closed the connection\");\n                    return Ok(());\n                };\n                let limiter_response = rate_limiter.lock().await.throttle();\n                let binary_message = result?;\n                let payload = binary_message.payload();\n                if payload.is_empty() {\n                    // This should be unreachable, we reject 0-length messages earlier\n                    warn!(\"Empty payload detected late.\");\n                    return Err(Error::NoPayload);\n                }\n                let mut bytes_buf = bytes::BytesMut::with_capacity(payload.len() + 4);\n                let response =\n                    handle_payload(effect_builder, payload, limiter_response, &monitor, &life_extensions_config).await;\n                codec.clone().encode(binary_message, &mut bytes_buf)?;\n                framed\n                    .send(BinaryMessage::new(\n                        BinaryResponseAndRequest::new(response, Bytes::from(bytes_buf.freeze().to_vec())).to_bytes()?,\n                    ))\n                    .await?\n            }\n            _ = cancellation_token.cancelled() => {\n                debug!(\"Binary port connection stale - closing.\");\n                return Ok(());\n            }\n        }\n    }\n}\n\nfn extract_header(payload: &[u8]) -> Result<(CommandHeader, &[u8]), ErrorCode> {\n    const BINARY_VERSION_LENGTH_BYTES: usize = size_of::<u16>();\n\n    if payload.len() < BINARY_VERSION_LENGTH_BYTES {\n        return Err(ErrorCode::TooLittleBytesForRequestHeaderVersion);\n    }\n\n    let binary_protocol_version = match u16::from_bytes(payload) {\n        Ok((binary_protocol_version, _)) => binary_protocol_version,\n        Err(_) => return Err(ErrorCode::MalformedCommandHeaderVersion),\n    };\n\n    if binary_protocol_version != CommandHeader::HEADER_VERSION {\n        return Err(ErrorCode::CommandHeaderVersionMismatch);\n    }\n\n    match CommandHeader::from_bytes(payload) {\n        Ok((header, remainder)) => Ok((header, remainder)),\n        Err(error) => {\n            debug!(%error, \"failed to parse binary request header\");\n            Err(ErrorCode::MalformedCommandHeader)\n        }\n    }\n}\n\nasync fn handle_payload<REv>(\n    effect_builder: EffectBuilder<REv>,\n    payload: &[u8],\n    limiter_response: LimiterResponse,\n    connection_terminator: &ConnectionTerminator,\n    life_extensions_config: &BinaryRequestTerminationDelayValues,\n) -> BinaryResponse\nwhere\n    REv: From<Event>,\n{\n    let (header, remainder) = match extract_header(payload) {\n        Ok(header) => header,\n        Err(error_code) => return BinaryResponse::new_error(error_code),\n    };\n\n    if let LimiterResponse::Throttled = limiter_response {\n        return BinaryResponse::new_error(ErrorCode::RequestThrottled);\n    }\n\n    // we might receive a request added in a minor version if we're behind\n    let Ok(tag) = CommandTag::try_from(header.type_tag()) else {\n        return BinaryResponse::new_error(ErrorCode::UnsupportedRequest);\n    };\n\n    let request = match Command::try_from((tag, remainder)) {\n        Ok(request) => request,\n        Err(error) => {\n            debug!(%error, \"failed to parse binary request body\");\n            return BinaryResponse::new_error(ErrorCode::MalformedCommand);\n        }\n    };\n    connection_terminator\n        .delay_termination(life_extensions_config.get_life_termination_delay(&request))\n        .await;\n\n    effect_builder\n        .make_request(\n            |responder| Event::HandleRequest { request, responder },\n            QueueKind::Regular,\n        )\n        .await\n}\n\nasync fn handle_client<REv>(\n    addr: SocketAddr,\n    stream: TcpStream,\n    effect_builder: EffectBuilder<REv>,\n    config: Arc<Config>,\n    _permit: OwnedSemaphorePermit,\n    rate_limiter: Arc<Mutex<RateLimiter>>,\n) where\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    let keep_alive_monitor = ConnectionTerminator::new();\n    let life_extensions_config = BinaryRequestTerminationDelayValues::from_config(&config);\n    if let Err(err) = handle_client_loop(\n        stream,\n        effect_builder,\n        config,\n        rate_limiter,\n        keep_alive_monitor,\n        life_extensions_config,\n    )\n    .await\n    {\n        // Low severity is used to prevent malicious clients from causing log floods.\n        trace!(%addr, err=display_error(&err), \"binary port client handler error\");\n    }\n}\n\nasync fn run_server<REv>(\n    local_addr: Arc<OnceCell<SocketAddr>>,\n    effect_builder: EffectBuilder<REv>,\n    config: Arc<Config>,\n    shutdown_trigger: Arc<Notify>,\n) where\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    let listener = match TcpListener::bind(&config.address).await {\n        Ok(listener) => listener,\n        Err(err) => {\n            error!(%err, \"unable to bind binary port listener\");\n            return;\n        }\n    };\n\n    let bind_address = match listener.local_addr() {\n        Ok(bind_address) => bind_address,\n        Err(err) => {\n            error!(%err, \"unable to get local addr of binary port\");\n            return;\n        }\n    };\n\n    local_addr.set(bind_address).unwrap();\n\n    loop {\n        select! {\n            _ = shutdown_trigger.notified() => {\n                break;\n            }\n            result = listener.accept() => match result {\n                Ok((stream, peer)) => {\n                    effect_builder\n                        .make_request(\n                            |responder| Event::AcceptConnection {\n                                stream,\n                                peer,\n                                responder,\n                            },\n                            QueueKind::Regular,\n                        )\n                        .await;\n                }\n                Err(io_err) => {\n                    info!(%io_err, \"problem accepting binary port connection\");\n                }\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nimpl crate::reactor::Finalize for BinaryPort {\n    fn finalize(mut self) -> BoxFuture<'static, ()> {\n        self.shutdown_trigger.notify_one();\n        async move {\n            if let Some(handle) = self.server_join_handle.take() {\n                handle.await.ok();\n            }\n        }\n        .boxed()\n    }\n}\n\nasync fn resolve_block_header<REv>(\n    effect_builder: EffectBuilder<REv>,\n    block_identifier: Option<BlockIdentifier>,\n) -> Option<BlockHeader>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    match block_identifier {\n        Some(BlockIdentifier::Hash(block_hash)) => {\n            effect_builder\n                .get_block_header_from_storage(block_hash, true)\n                .await\n        }\n        Some(BlockIdentifier::Height(block_height)) => {\n            effect_builder\n                .get_block_header_at_height_from_storage(block_height, true)\n                .await\n        }\n        None => {\n            effect_builder\n                .get_highest_complete_block_header_from_storage()\n                .await\n        }\n    }\n}\n\nasync fn resolve_block_height<REv>(\n    effect_builder: EffectBuilder<REv>,\n    block_identifier: Option<BlockIdentifier>,\n) -> Option<u64>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    match block_identifier {\n        Some(BlockIdentifier::Hash(block_hash)) => effect_builder\n            .get_block_header_from_storage(block_hash, true)\n            .await\n            .map(|header| header.height()),\n        Some(BlockIdentifier::Height(block_height)) => Some(block_height),\n        None => effect_builder\n            .get_highest_complete_block_from_storage()\n            .await\n            .map(|header| header.height()),\n    }\n}\n\nasync fn resolve_state_root_hash<REv>(\n    effect_builder: EffectBuilder<REv>,\n    state_identifier: Option<GlobalStateIdentifier>,\n) -> Option<Digest>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    match state_identifier {\n        Some(GlobalStateIdentifier::BlockHash(block_hash)) => effect_builder\n            .get_block_header_from_storage(block_hash, true)\n            .await\n            .map(|header| *header.state_root_hash()),\n        Some(GlobalStateIdentifier::BlockHeight(block_height)) => effect_builder\n            .get_block_header_at_height_from_storage(block_height, true)\n            .await\n            .map(|header| *header.state_root_hash()),\n        Some(GlobalStateIdentifier::StateRootHash(state_root_hash)) => Some(state_root_hash),\n        None => effect_builder\n            .get_highest_complete_block_header_from_storage()\n            .await\n            .map(|header| *header.state_root_hash()),\n    }\n}\n\nasync fn resolve_era_switch_block_header<REv>(\n    effect_builder: EffectBuilder<REv>,\n    era_identifier: Option<EraIdentifier>,\n) -> Option<BlockHeader>\nwhere\n    REv: From<Event> + From<ContractRuntimeRequest> + From<StorageRequest>,\n{\n    match era_identifier {\n        Some(EraIdentifier::Era(era_id)) => {\n            effect_builder\n                .get_switch_block_header_by_era_id_from_storage(era_id)\n                .await\n        }\n        Some(EraIdentifier::Block(block_identifier)) => {\n            let header = resolve_block_header(effect_builder, Some(block_identifier)).await?;\n            if header.is_switch_block() {\n                Some(header)\n            } else {\n                effect_builder\n                    .get_switch_block_header_by_era_id_from_storage(header.era_id())\n                    .await\n            }\n        }\n        None => {\n            effect_builder\n                .get_latest_switch_block_header_from_storage()\n                .await\n        }\n    }\n}\n\nimpl<REv> Component<REv> for BinaryPort\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    type Event = Event;\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => match event {\n                Event::Initialize => {\n                    let rate_limiter_res =\n                        RateLimiter::new(self.config.qps_limit, TimeDiff::from_seconds(1));\n                    match rate_limiter_res {\n                        Ok(rate_limiter) => {\n                            match self.rate_limiter.set(Arc::new(Mutex::new(rate_limiter))) {\n                                Ok(_) => {}\n                                Err(_) => {\n                                    error!(\"failed to initialize binary port, rate limiter already initialized\");\n                                    <Self as InitializedComponent<REv>>::set_state(\n                                        self,\n                                        ComponentState::Fatal(\"failed to initialize binary port, rate limiter already initialized\".to_string()),\n                                    );\n                                    return Effects::new();\n                                }\n                            };\n                        }\n                        Err(error) => {\n                            error!(%error, \"failed to initialize binary port\");\n                            <Self as InitializedComponent<REv>>::set_state(\n                                self,\n                                ComponentState::Fatal(error.to_string()),\n                            );\n                            return Effects::new();\n                        }\n                    };\n                    let (effects, state) = self.bind(self.config.enable_server, effect_builder);\n                    <Self as InitializedComponent<MainEvent>>::set_state(self, state);\n                    effects\n                }\n                _ => {\n                    warn!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"binary port is initializing, ignoring event\"\n                    );\n                    Effects::new()\n                }\n            },\n            ComponentState::Initialized => match event {\n                Event::Initialize => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::AcceptConnection {\n                    stream,\n                    peer,\n                    responder,\n                } => {\n                    if let Ok(permit) = Arc::clone(&self.connection_limit).try_acquire_owned() {\n                        self.metrics.binary_port_connections_count.inc();\n                        let config = Arc::clone(&self.config);\n                        let rate_limiter = Arc::clone(\n                            self.rate_limiter\n                                .get()\n                                .expect(\"This should have been set during initialization\"),\n                        );\n                        tokio::spawn(handle_client(\n                            peer,\n                            stream,\n                            effect_builder,\n                            config,\n                            permit,\n                            rate_limiter,\n                        ));\n                    } else {\n                        warn!(\n                            \"connection limit reached, dropping connection from {}\",\n                            peer\n                        );\n                    }\n                    responder.respond(()).ignore()\n                }\n                Event::HandleRequest { request, responder } => {\n                    let config = Arc::clone(&self.config);\n                    let metrics = Arc::clone(&self.metrics);\n                    let protocol_version = self.chainspec.protocol_version();\n                    async move {\n                        let response = handle_request(\n                            request,\n                            effect_builder,\n                            &config,\n                            &metrics,\n                            protocol_version,\n                        )\n                        .await;\n                        responder.respond(response).await;\n                    }\n                    .ignore()\n                }\n            },\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n        }\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for BinaryPort\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\nimpl<REv> PortBoundComponent<REv> for BinaryPort\nwhere\n    REv: From<Event>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<AcceptTransactionRequest>\n        + From<NetworkInfoRequest>\n        + From<ReactorInfoRequest>\n        + From<ConsensusRequest>\n        + From<BlockSynchronizerRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ChainspecRawBytesRequest>\n        + Send,\n{\n    type Error = ListeningError;\n    type ComponentEvent = Event;\n\n    fn listen(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Result<Effects<Self::ComponentEvent>, Self::Error> {\n        let local_addr = Arc::clone(&self.local_addr);\n        let server_join_handle = tokio::spawn(run_server(\n            local_addr,\n            effect_builder,\n            Arc::clone(&self.config),\n            Arc::clone(&self.shutdown_trigger),\n        ));\n        self.server_join_handle\n            .set(server_join_handle)\n            .expect(\"server join handle should not be set elsewhere\");\n\n        Ok(Effects::new())\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/block_acceptor.rs",
    "content": "use std::collections::{BTreeMap, BTreeSet};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse tracing::{debug, error, warn};\n\nuse casper_types::{\n    ActivationPoint, BlockHash, BlockSignaturesV2, ChainNameDigest, EraId, FinalitySignatureV2,\n    PublicKey, Timestamp,\n};\n\nuse crate::{\n    components::block_accumulator::error::{Bogusness, Error as AcceptorError, InvalidGossipError},\n    types::{EraValidatorWeights, ForwardMetaBlock, NodeId, SignatureWeight},\n};\n\n#[derive(DataSize, Debug)]\npub(super) struct BlockAcceptor {\n    block_hash: BlockHash,\n    meta_block: Option<ForwardMetaBlock>,\n    signatures: BTreeMap<PublicKey, (FinalitySignatureV2, BTreeSet<NodeId>)>,\n    peers: BTreeSet<NodeId>,\n    last_progress: Timestamp,\n    our_signature: Option<FinalitySignatureV2>,\n}\n\n#[derive(Debug, PartialEq)]\n#[allow(clippy::large_enum_variant)]\npub(super) enum ShouldStore {\n    SufficientlySignedBlock {\n        meta_block: ForwardMetaBlock,\n        block_signatures: BlockSignaturesV2,\n    },\n    CompletedBlock {\n        meta_block: ForwardMetaBlock,\n        block_signatures: BlockSignaturesV2,\n    },\n    MarkComplete(ForwardMetaBlock),\n    SingleSignature(FinalitySignatureV2),\n    Nothing,\n}\n\nimpl BlockAcceptor {\n    pub(super) fn new<I>(block_hash: BlockHash, peers: I) -> Self\n    where\n        I: IntoIterator<Item = NodeId>,\n    {\n        Self {\n            block_hash,\n            meta_block: None,\n            signatures: BTreeMap::new(),\n            peers: peers.into_iter().collect(),\n            last_progress: Timestamp::now(),\n            our_signature: None,\n        }\n    }\n\n    pub(super) fn peers(&self) -> &BTreeSet<NodeId> {\n        &self.peers\n    }\n\n    pub(super) fn register_peer(&mut self, peer: NodeId) {\n        self.peers.insert(peer);\n    }\n\n    pub(super) fn register_block(\n        &mut self,\n        meta_block: ForwardMetaBlock,\n        peer: Option<NodeId>,\n    ) -> Result<(), AcceptorError> {\n        if self.block_hash() != *meta_block.block.hash() {\n            return Err(AcceptorError::BlockHashMismatch {\n                expected: self.block_hash(),\n                actual: *meta_block.block.hash(),\n            });\n        }\n\n        // Verify is needed for the cases when the block comes from the gossiper. It it came here\n        // from the fetcher it'll already be verified.\n        if let Err(error) = meta_block.block.verify() {\n            warn!(%error, \"received invalid block\");\n            return match peer {\n                Some(node_id) => Err(AcceptorError::InvalidGossip(Box::new(\n                    InvalidGossipError::Block {\n                        block_hash: *meta_block.block.hash(),\n                        peer: node_id,\n                        validation_error: error,\n                    },\n                ))),\n                None => Err(AcceptorError::InvalidConfiguration),\n            };\n        }\n\n        if let Some(node_id) = peer {\n            self.register_peer(node_id);\n        }\n\n        match self.meta_block.take() {\n            Some(existing_meta_block) => {\n                let merged_meta_block = existing_meta_block.merge(meta_block)?;\n                self.meta_block = Some(merged_meta_block);\n            }\n            None => {\n                self.meta_block = Some(meta_block);\n            }\n        }\n\n        Ok(())\n    }\n\n    pub(super) fn register_finality_signature(\n        &mut self,\n        finality_signature: FinalitySignatureV2,\n        peer: Option<NodeId>,\n        validator_slots: u32,\n    ) -> Result<Option<FinalitySignatureV2>, AcceptorError> {\n        if self.block_hash != *finality_signature.block_hash() {\n            return Err(AcceptorError::BlockHashMismatch {\n                expected: self.block_hash,\n                actual: *finality_signature.block_hash(),\n            });\n        }\n        if let Some(node_id) = peer {\n            // We multiply the number of validators by 2 to get the maximum of signatures, because\n            // of the theoretically possible scenario when we're collecting sigs but are\n            // not yet able to validate them (no validator weights). We should allow to\n            // absorb more than theoretical limit (but not much more) so we don't fill\n            // all slots with invalid sigs:\n            check_signatures_from_peer_bound(validator_slots * 2, node_id, &self.signatures)?;\n        }\n        if let Err(error) = finality_signature.is_verified() {\n            warn!(%error, \"received invalid finality signature\");\n            return match peer {\n                Some(node_id) => Err(AcceptorError::InvalidGossip(Box::new(\n                    InvalidGossipError::FinalitySignature {\n                        block_hash: *finality_signature.block_hash(),\n                        peer: node_id,\n                        validation_error: error,\n                    },\n                ))),\n                None => Err(AcceptorError::InvalidConfiguration),\n            };\n        }\n\n        let had_sufficient_finality = self.has_sufficient_finality();\n        // if we don't have finality yet, collect the signature and return\n        // while we could store the finality signature, we currently prefer\n        // to store block and signatures when sufficient weight is attained\n        if false == had_sufficient_finality {\n            if let Some(node_id) = peer {\n                self.register_peer(node_id);\n            }\n            self.signatures\n                .entry(finality_signature.public_key().clone())\n                .and_modify(|(_, senders)| senders.extend(peer))\n                .or_insert_with(|| (finality_signature, peer.into_iter().collect()));\n            return Ok(None);\n        }\n\n        if let Some(meta_block) = &self.meta_block {\n            // if the signature's era does not match the block's era\n            // it's malicious / bogus / invalid.\n            if meta_block.block.era_id() != finality_signature.era_id() {\n                return match peer {\n                    Some(node_id) => Err(AcceptorError::EraMismatch {\n                        block_hash: *finality_signature.block_hash(),\n                        expected: meta_block.block.era_id(),\n                        actual: finality_signature.era_id(),\n                        peer: node_id,\n                    }),\n                    None => Err(AcceptorError::InvalidConfiguration),\n                };\n            }\n        } else {\n            // should have block if self.has_sufficient_finality()\n            return Err(AcceptorError::SufficientFinalityWithoutBlock {\n                block_hash: *finality_signature.block_hash(),\n            });\n        }\n\n        if let Some(node_id) = peer {\n            self.register_peer(node_id);\n        }\n        let is_new = !self\n            .signatures\n            .contains_key(finality_signature.public_key());\n\n        self.signatures\n            .entry(finality_signature.public_key().clone())\n            .and_modify(|(_, senders)| senders.extend(peer))\n            .or_insert_with(|| (finality_signature.clone(), peer.into_iter().collect()));\n\n        if had_sufficient_finality && is_new {\n            // we received this finality signature after putting the block & earlier signatures\n            // to storage\n            self.touch();\n            return Ok(Some(finality_signature));\n        };\n\n        // either we've seen this signature already or we're still waiting for sufficient finality\n        Ok(None)\n    }\n\n    /// Returns instructions to write the block and/or finality signatures to storage.\n    /// Also returns a set of peers that sent us invalid data and should be banned.\n    pub(super) fn should_store_block(\n        &mut self,\n        era_validator_weights: &EraValidatorWeights,\n        chain_name_hash: ChainNameDigest,\n    ) -> (ShouldStore, Vec<(NodeId, AcceptorError)>) {\n        let block_hash = self.block_hash;\n        let no_block = self.meta_block.is_none();\n        let no_sigs = self.signatures.is_empty();\n        if self.has_sufficient_finality() {\n            if let Some(meta_block) = self.meta_block.as_mut() {\n                if meta_block.state.is_executed()\n                    && meta_block.state.register_as_marked_complete().was_updated()\n                {\n                    debug!(\n                        %block_hash, no_block, no_sigs,\n                        \"already have sufficient finality signatures, but marking block complete\"\n                    );\n                    return (ShouldStore::MarkComplete(meta_block.clone()), Vec::new());\n                }\n            }\n\n            debug!(\n                %block_hash, no_block, no_sigs,\n                \"not storing anything - already have sufficient finality signatures\"\n            );\n            return (ShouldStore::Nothing, Vec::new());\n        }\n\n        if no_block || no_sigs {\n            debug!(%block_hash, no_block, no_sigs, \"not storing block\");\n            return (ShouldStore::Nothing, Vec::new());\n        }\n\n        let faulty_senders = self.remove_bogus_validators(era_validator_weights);\n        let signature_weight = era_validator_weights.signature_weight(self.signatures.keys());\n        if SignatureWeight::Strict == signature_weight {\n            self.touch();\n            if let Some(meta_block) = self.meta_block.as_mut() {\n                let mut block_signatures = BlockSignaturesV2::new(\n                    *meta_block.block.hash(),\n                    meta_block.block.height(),\n                    meta_block.block.era_id(),\n                    chain_name_hash,\n                );\n                self.signatures.values().for_each(|(signature, _)| {\n                    block_signatures\n                        .insert_signature(signature.public_key().clone(), *signature.signature());\n                });\n                if meta_block\n                    .state\n                    .register_has_sufficient_finality()\n                    .was_already_registered()\n                {\n                    error!(\n                        %block_hash,\n                        block_height = meta_block.block.height(),\n                        meta_block_state = ?meta_block.state,\n                        \"should not register having sufficient finality for the same block more \\\n                        than once\"\n                    );\n                }\n                if meta_block.state.is_executed() {\n                    if meta_block\n                        .state\n                        .register_as_marked_complete()\n                        .was_already_registered()\n                    {\n                        error!(\n                            %block_hash,\n                            block_height = meta_block.block.height(),\n                            meta_block_state = ?meta_block.state,\n                            \"should not mark the same block complete more than once\"\n                        );\n                    }\n\n                    return (\n                        ShouldStore::CompletedBlock {\n                            meta_block: meta_block.clone(),\n                            block_signatures,\n                        },\n                        faulty_senders,\n                    );\n                }\n                if meta_block\n                    .state\n                    .register_as_stored()\n                    .was_already_registered()\n                {\n                    error!(\n                        %block_hash,\n                        block_height = meta_block.block.height(),\n                        meta_block_state = ?meta_block.state,\n                        \"should not store the same block more than once\"\n                    );\n                }\n                return (\n                    ShouldStore::SufficientlySignedBlock {\n                        meta_block: meta_block.clone(),\n                        block_signatures,\n                    },\n                    faulty_senders,\n                );\n            }\n        }\n\n        let signed_weight = era_validator_weights.signed_weight(self.signatures.keys());\n        let total_era_weight = era_validator_weights.get_total_weight();\n        let satisfaction_percent = signed_weight * 100 / total_era_weight;\n        debug!(\n            %block_hash,\n            %signed_weight,\n            %total_era_weight,\n            %satisfaction_percent,\n            no_block, no_sigs,\n            \"not storing anything - insufficient finality signatures\"\n        );\n        (ShouldStore::Nothing, faulty_senders)\n    }\n\n    pub(super) fn has_sufficient_finality(&self) -> bool {\n        self.meta_block\n            .as_ref()\n            .map(|meta_block| meta_block.state.has_sufficient_finality())\n            .unwrap_or(false)\n    }\n\n    pub(super) fn era_id(&self) -> Option<EraId> {\n        if let Some(meta_block) = &self.meta_block {\n            return Some(meta_block.block.era_id());\n        }\n        if let Some((finality_signature, _)) = self.signatures.values().next() {\n            return Some(finality_signature.era_id());\n        }\n        None\n    }\n\n    pub(super) fn block_height(&self) -> Option<u64> {\n        self.meta_block\n            .as_ref()\n            .map(|meta_block| meta_block.block.height())\n    }\n\n    pub(super) fn block_hash(&self) -> BlockHash {\n        self.block_hash\n    }\n\n    pub(super) fn is_upgrade_boundary(\n        &self,\n        activation_point: Option<ActivationPoint>,\n    ) -> Option<bool> {\n        match (&self.meta_block, activation_point) {\n            (None, _) => None,\n            (Some(_), None) => Some(false),\n            (Some(meta_block), Some(activation_point)) => {\n                Some(meta_block.is_upgrade_boundary(activation_point))\n            }\n        }\n    }\n\n    pub(super) fn last_progress(&self) -> Timestamp {\n        self.last_progress\n    }\n\n    pub(super) fn our_signature(&self) -> Option<&FinalitySignatureV2> {\n        self.our_signature.as_ref()\n    }\n\n    pub(super) fn set_our_signature(&mut self, signature: FinalitySignatureV2) {\n        self.our_signature = Some(signature);\n    }\n\n    /// Removes finality signatures that have the wrong era ID or are signed by non-validators.\n    /// Returns the set of peers that sent us these signatures.\n    fn remove_bogus_validators(\n        &mut self,\n        era_validator_weights: &EraValidatorWeights,\n    ) -> Vec<(NodeId, AcceptorError)> {\n        let bogus_validators = era_validator_weights.bogus_validators(self.signatures.keys());\n\n        let mut faulty_senders = Vec::new();\n        bogus_validators.iter().for_each(|bogus_validator| {\n            debug!(%bogus_validator, \"bogus validator\");\n            if let Some((_, senders)) = self.signatures.remove(bogus_validator) {\n                faulty_senders.extend(senders.iter().map(|sender| {\n                    (\n                        *sender,\n                        AcceptorError::BogusValidator(Bogusness::NotAValidator),\n                    )\n                }));\n            }\n        });\n\n        if let Some(meta_block) = &self.meta_block {\n            let bogus_validators = self\n                .signatures\n                .iter()\n                .filter(|(_, (v, _))| v.era_id() != meta_block.block.era_id())\n                .map(|(k, _)| k.clone())\n                .collect_vec();\n\n            bogus_validators.iter().for_each(|bogus_validator| {\n                debug!(%bogus_validator, \"bogus validator\");\n                if let Some((_, senders)) = self.signatures.remove(bogus_validator) {\n                    faulty_senders.extend(senders.iter().map(|sender| {\n                        (\n                            *sender,\n                            AcceptorError::BogusValidator(Bogusness::SignatureEraIdMismatch),\n                        )\n                    }));\n                }\n            });\n        }\n\n        for (node_id, _) in &faulty_senders {\n            self.peers.remove(node_id);\n        }\n\n        faulty_senders\n    }\n\n    fn touch(&mut self) {\n        self.last_progress = Timestamp::now();\n    }\n}\n\n/// Returns an error if the peer has sent too many finality signatures.\nfn check_signatures_from_peer_bound(\n    limit: u32,\n    peer: NodeId,\n    signatures: &BTreeMap<PublicKey, (FinalitySignatureV2, BTreeSet<NodeId>)>,\n) -> Result<(), AcceptorError> {\n    let signatures_for_peer = signatures\n        .values()\n        .filter(|(_fin_sig, nodes)| nodes.contains(&peer))\n        .count();\n\n    if signatures_for_peer < limit as usize {\n        Ok(())\n    } else {\n        Err(AcceptorError::TooManySignatures { peer, limit })\n    }\n}\n\n#[cfg(test)]\nimpl BlockAcceptor {\n    pub(super) fn executed(&self) -> bool {\n        self.meta_block\n            .as_ref()\n            .is_some_and(|meta_block| meta_block.state.is_executed())\n    }\n\n    pub(super) fn meta_block(&self) -> Option<ForwardMetaBlock> {\n        self.meta_block.clone()\n    }\n\n    pub(super) fn set_last_progress(&mut self, last_progress: Timestamp) {\n        self.last_progress = last_progress;\n    }\n\n    pub(super) fn set_meta_block(&mut self, meta_block: Option<ForwardMetaBlock>) {\n        self.meta_block = meta_block;\n    }\n\n    pub(super) fn set_sufficient_finality(&mut self, has_sufficient_finality: bool) {\n        if let Some(meta_block) = self.meta_block.as_mut() {\n            meta_block\n                .state\n                .set_sufficient_finality(has_sufficient_finality);\n        }\n    }\n\n    pub(super) fn signatures(\n        &self,\n    ) -> &BTreeMap<PublicKey, (FinalitySignatureV2, BTreeSet<NodeId>)> {\n        &self.signatures\n    }\n\n    pub(super) fn signatures_mut(\n        &mut self,\n    ) -> &mut BTreeMap<PublicKey, (FinalitySignatureV2, BTreeSet<NodeId>)> {\n        &mut self.signatures\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n    //use crate::types::NodeId;\n    //use std::collections::{BTreeMap, BTreeSet};\n\n    #[test]\n    fn check_signatures_from_peer_bound_works() {\n        let rng = &mut TestRng::new();\n        let max_signatures = 3;\n        let peer_to_check = NodeId::random(rng);\n\n        let mut signatures = BTreeMap::new();\n        // Insert only the peer to check:\n        signatures.insert(\n            PublicKey::random(rng),\n            (FinalitySignatureV2::random(rng), {\n                let mut nodes = BTreeSet::new();\n                nodes.insert(peer_to_check);\n                nodes\n            }),\n        );\n        // Insert an unrelated peer:\n        signatures.insert(\n            PublicKey::random(rng),\n            (FinalitySignatureV2::random(rng), {\n                let mut nodes = BTreeSet::new();\n                nodes.insert(NodeId::random(rng));\n                nodes\n            }),\n        );\n        // Insert both the peer to check and an unrelated one:\n        signatures.insert(\n            PublicKey::random(rng),\n            (FinalitySignatureV2::random(rng), {\n                let mut nodes = BTreeSet::new();\n                nodes.insert(NodeId::random(rng));\n                nodes.insert(peer_to_check);\n                nodes\n            }),\n        );\n\n        // The peer has send only 2 signatures, so adding a new signature should pass:\n        assert!(matches!(\n            check_signatures_from_peer_bound(max_signatures, peer_to_check, &signatures),\n            Ok(())\n        ));\n\n        // Let's insert once again both the peer to check and an unrelated one:\n        signatures.insert(\n            PublicKey::random(rng),\n            (FinalitySignatureV2::random(rng), {\n                let mut nodes = BTreeSet::new();\n                nodes.insert(NodeId::random(rng));\n                nodes.insert(peer_to_check);\n                nodes\n            }),\n        );\n\n        // Now this should fail:\n        assert!(matches!(\n            check_signatures_from_peer_bound(max_signatures, peer_to_check, &signatures),\n            Err(AcceptorError::TooManySignatures { peer, limit })\n                if peer == peer_to_check && limit == max_signatures\n        ));\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::TimeDiff;\n\nconst DEFAULT_ATTEMPT_EXECUTION_THRESHOLD: u64 = 3;\nconst DEFAULT_DEAD_AIR_INTERVAL_SECS: u32 = 180;\n#[cfg(test)]\nconst DEFAULT_PURGE_INTERVAL_SECS: u32 = 5; // 5 seconds.\n\n#[cfg(not(test))]\nconst DEFAULT_PURGE_INTERVAL_SECS: u32 = 5 * 60; // 5 minutes.\n\n/// Configuration options for the block accumulator.\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\npub struct Config {\n    /// Attempt execution threshold.\n    pub attempt_execution_threshold: u64,\n    /// Dead air interval.\n    pub dead_air_interval: TimeDiff,\n    /// Purge interval.\n    pub purge_interval: TimeDiff,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            attempt_execution_threshold: DEFAULT_ATTEMPT_EXECUTION_THRESHOLD,\n            dead_air_interval: TimeDiff::from_seconds(DEFAULT_DEAD_AIR_INTERVAL_SECS),\n            purge_interval: TimeDiff::from_seconds(DEFAULT_PURGE_INTERVAL_SECS),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/error.rs",
    "content": "use thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{crypto, BlockHash, BlockValidationError, EraId};\n\nuse crate::types::{MetaBlockMergeError, NodeId};\n\n#[derive(Error, Debug)]\npub(crate) enum InvalidGossipError {\n    #[error(\"received cryptographically invalid block for: {block_hash} from: {peer} with error: {validation_error}\")]\n    Block {\n        block_hash: BlockHash,\n        peer: NodeId,\n        validation_error: BlockValidationError,\n    },\n    #[error(\"received cryptographically invalid finality_signature for: {block_hash} from: {peer} with error: {validation_error}\")]\n    FinalitySignature {\n        block_hash: BlockHash,\n        peer: NodeId,\n        validation_error: crypto::Error,\n    },\n}\n\nimpl InvalidGossipError {\n    pub(super) fn peer(&self) -> NodeId {\n        match self {\n            InvalidGossipError::FinalitySignature { peer, .. }\n            | InvalidGossipError::Block { peer, .. } => *peer,\n        }\n    }\n}\n\n#[derive(Error, Debug)]\npub(crate) enum Bogusness {\n    #[error(\"peer is not a validator in current era\")]\n    NotAValidator,\n    #[error(\"peer provided finality signatures from incorrect era\")]\n    SignatureEraIdMismatch,\n}\n\n#[derive(Error, Debug)]\npub(crate) enum Error {\n    #[error(transparent)]\n    InvalidGossip(Box<InvalidGossipError>),\n    #[error(\"invalid configuration\")]\n    InvalidConfiguration,\n    #[error(\"mismatched eras detected\")]\n    EraMismatch {\n        block_hash: BlockHash,\n        expected: EraId,\n        actual: EraId,\n        peer: NodeId,\n    },\n    #[error(\"mismatched block hash: expected={expected}, actual={actual}\")]\n    BlockHashMismatch {\n        expected: BlockHash,\n        actual: BlockHash,\n    },\n    #[error(\"should not be possible to have sufficient finality without block: {block_hash}\")]\n    SufficientFinalityWithoutBlock { block_hash: BlockHash },\n    #[error(\"bogus validator detected\")]\n    BogusValidator(Bogusness),\n    #[error(transparent)]\n    MetaBlockMerge(#[from] MetaBlockMergeError),\n    #[error(\"tried to insert a signature past the bounds\")]\n    TooManySignatures { peer: NodeId, limit: u32 },\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/event.rs",
    "content": "use std::{\n    fmt::{self, Display, Formatter},\n    sync::Arc,\n};\n\nuse derive_more::From;\n\nuse casper_types::{BlockHash, BlockSignaturesV2, BlockV2, EraId, FinalitySignatureV2};\n\nuse crate::{\n    effect::requests::BlockAccumulatorRequest,\n    types::{ForwardMetaBlock, NodeId},\n};\n\n#[derive(Debug, From)]\npub(crate) enum Event {\n    #[from]\n    Request(BlockAccumulatorRequest),\n    RegisterPeer {\n        block_hash: BlockHash,\n        era_id: Option<EraId>,\n        sender: NodeId,\n    },\n    ReceivedBlock {\n        block: Arc<BlockV2>,\n        sender: NodeId,\n    },\n    CreatedFinalitySignature {\n        finality_signature: Box<FinalitySignatureV2>,\n    },\n    ReceivedFinalitySignature {\n        finality_signature: Box<FinalitySignatureV2>,\n        sender: NodeId,\n    },\n    ExecutedBlock {\n        meta_block: ForwardMetaBlock,\n    },\n    Stored {\n        maybe_meta_block: Option<ForwardMetaBlock>,\n        maybe_block_signatures: Option<BlockSignaturesV2>,\n    },\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Request(BlockAccumulatorRequest::GetPeersForBlock { block_hash, .. }) => {\n                write!(\n                    f,\n                    \"block accumulator peers request for block: {}\",\n                    block_hash\n                )\n            }\n            Event::RegisterPeer {\n                block_hash, sender, ..\n            } => {\n                write!(\n                    f,\n                    \"registering peer {} after gossip: {}\",\n                    sender, block_hash\n                )\n            }\n            Event::ReceivedBlock { block, sender } => {\n                write!(f, \"received {} from {}\", block, sender)\n            }\n            Event::CreatedFinalitySignature { finality_signature } => {\n                write!(f, \"created {}\", finality_signature)\n            }\n            Event::ReceivedFinalitySignature {\n                finality_signature,\n                sender,\n            } => {\n                write!(f, \"received {} from {}\", finality_signature, sender)\n            }\n            Event::ExecutedBlock { meta_block } => {\n                write!(f, \"executed block {}\", meta_block.block.hash())\n            }\n            Event::Stored {\n                maybe_meta_block: Some(meta_block),\n                maybe_block_signatures,\n            } => {\n                write!(\n                    f,\n                    \"stored {} and {} finality signatures\",\n                    meta_block.block.hash(),\n                    maybe_block_signatures\n                        .as_ref()\n                        .map(|sigs| sigs.len())\n                        .unwrap_or_default()\n                )\n            }\n            Event::Stored {\n                maybe_meta_block: None,\n                maybe_block_signatures,\n            } => {\n                write!(\n                    f,\n                    \"stored {} finality signatures\",\n                    maybe_block_signatures\n                        .as_ref()\n                        .map(|sigs| sigs.len())\n                        .unwrap_or_default()\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/leap_instruction.rs",
    "content": "use std::fmt::{Display, Formatter};\n\n#[derive(Debug, PartialEq)]\npub(super) enum LeapInstruction {\n    // should not leap\n    AtHighestKnownBlock,\n    WithinAttemptExecutionThreshold(u64),\n    TooCloseToUpgradeBoundary(u64),\n    NoUsableBlockAcceptors,\n\n    // should leap\n    UnsetLocalTip,\n    UnknownBlockHeight,\n    OutsideAttemptExecutionThreshold(u64),\n}\n\nimpl LeapInstruction {\n    pub(super) fn from_execution_threshold(\n        attempt_execution_threshold: u64,\n        distance_from_highest_known_block: u64,\n        is_upgrade_boundary: bool,\n    ) -> Self {\n        if distance_from_highest_known_block == 0 {\n            return LeapInstruction::AtHighestKnownBlock;\n        }\n        // allow double the execution threshold back off as a safety margin\n        if is_upgrade_boundary\n            && distance_from_highest_known_block <= attempt_execution_threshold * 2\n        {\n            return LeapInstruction::TooCloseToUpgradeBoundary(distance_from_highest_known_block);\n        }\n        if distance_from_highest_known_block > attempt_execution_threshold {\n            return LeapInstruction::OutsideAttemptExecutionThreshold(\n                distance_from_highest_known_block,\n            );\n        }\n        LeapInstruction::WithinAttemptExecutionThreshold(distance_from_highest_known_block)\n    }\n\n    pub(super) fn should_leap(&self) -> bool {\n        match self {\n            LeapInstruction::AtHighestKnownBlock\n            | LeapInstruction::WithinAttemptExecutionThreshold(_)\n            | LeapInstruction::TooCloseToUpgradeBoundary(_)\n            | LeapInstruction::NoUsableBlockAcceptors => false,\n            LeapInstruction::UnsetLocalTip\n            | LeapInstruction::UnknownBlockHeight\n            | LeapInstruction::OutsideAttemptExecutionThreshold(_) => true,\n        }\n    }\n}\n\nimpl Display for LeapInstruction {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            LeapInstruction::AtHighestKnownBlock => {\n                write!(f, \"at highest known block\")\n            }\n            LeapInstruction::TooCloseToUpgradeBoundary(diff) => {\n                write!(f, \"{} blocks away from protocol upgrade\", diff)\n            }\n            LeapInstruction::WithinAttemptExecutionThreshold(diff) => {\n                write!(\n                    f,\n                    \"within attempt_execution_threshold, {} blocks behind highest known block\",\n                    diff\n                )\n            }\n            LeapInstruction::OutsideAttemptExecutionThreshold(diff) => {\n                write!(\n                    f,\n                    \"outside attempt_execution_threshold, {} blocks behind highest known block\",\n                    diff\n                )\n            }\n            LeapInstruction::UnsetLocalTip => {\n                write!(f, \"block accumulator local tip is unset\")\n            }\n            LeapInstruction::UnknownBlockHeight => {\n                write!(f, \"unknown block height\")\n            }\n            LeapInstruction::NoUsableBlockAcceptors => {\n                write!(\n                    f,\n                    \"currently have no block acceptor instances with sufficient finality\"\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/local_tip_identifier.rs",
    "content": "use std::cmp::Ordering;\n\nuse casper_types::EraId;\nuse datasize::DataSize;\n\n#[derive(Clone, Copy, DataSize, Debug, Eq, PartialEq)]\npub(super) struct LocalTipIdentifier {\n    pub(super) height: u64,\n    pub(super) era_id: EraId,\n}\n\nimpl LocalTipIdentifier {\n    pub(super) fn new(height: u64, era_id: EraId) -> Self {\n        Self { height, era_id }\n    }\n}\n\nimpl PartialOrd for LocalTipIdentifier {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for LocalTipIdentifier {\n    fn cmp(&self, other: &Self) -> Ordering {\n        self.height.cmp(&other.height)\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/metrics.rs",
    "content": "use prometheus::{IntGauge, Registry};\n\nuse crate::unregister_metric;\n\n/// Metrics for the block accumulator component.\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// Total number of BlockAcceptors contained in the BlockAccumulator.\n    pub(super) block_acceptors: IntGauge,\n    /// Number of child block hashes that we know of and that will be used in order to request next\n    /// blocks.\n    pub(super) known_child_blocks: IntGauge,\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of the block accumulator metrics, using the given prefix.\n    pub fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let block_acceptors = IntGauge::new(\n            \"block_accumulator_block_acceptors\".to_string(),\n            \"number of block acceptors in the Block Accumulator\".to_string(),\n        )?;\n        let known_child_blocks = IntGauge::new(\n            \"block_accumulator_known_child_blocks\".to_string(),\n            \"number of blocks received by the Block Accumulator for which we know the hash of the child block\".to_string(),\n        )?;\n\n        registry.register(Box::new(block_acceptors.clone()))?;\n        registry.register(Box::new(known_child_blocks.clone()))?;\n\n        Ok(Metrics {\n            block_acceptors,\n            known_child_blocks,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.block_acceptors);\n        unregister_metric!(self.registry, self.known_child_blocks);\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/sync_identifier.rs",
    "content": "use std::fmt::{Display, Formatter};\n\nuse casper_types::{BlockHash, EraId};\n\n#[derive(Clone, Debug)]\npub(crate) enum SyncIdentifier {\n    // all we know about the block is its hash;\n    // this is usually a trusted hash from config\n    BlockHash(BlockHash),\n    // we know both the hash and the height of the block\n    BlockIdentifier(BlockHash, u64),\n    // we have just acquired the necessary data for the block\n    // including sufficient finality; this may be a historical\n    // block and / or potentially the new highest block\n    SyncedBlockIdentifier(BlockHash, u64, EraId),\n    // we acquired the necessary data for the block, including\n    // sufficient finality and it has been enqueued for\n    // execution; this state is valid for forward blocks only\n    ExecutingBlockIdentifier(BlockHash, u64, EraId),\n    // we read this block from disk, and have all the parts\n    // we need to discover its descendent (if any) to continue.\n    LocalTip(BlockHash, u64, EraId),\n}\n\nimpl SyncIdentifier {\n    pub(crate) fn block_hash(&self) -> BlockHash {\n        match self {\n            SyncIdentifier::BlockIdentifier(hash, _)\n            | SyncIdentifier::SyncedBlockIdentifier(hash, _, _)\n            | SyncIdentifier::ExecutingBlockIdentifier(hash, _, _)\n            | SyncIdentifier::LocalTip(hash, _, _)\n            | SyncIdentifier::BlockHash(hash) => *hash,\n        }\n    }\n\n    pub(crate) fn block_height(&self) -> Option<u64> {\n        match self {\n            SyncIdentifier::BlockIdentifier(_, height)\n            | SyncIdentifier::SyncedBlockIdentifier(_, height, _)\n            | SyncIdentifier::ExecutingBlockIdentifier(_, height, _)\n            | SyncIdentifier::LocalTip(_, height, _) => Some(*height),\n            SyncIdentifier::BlockHash(_) => None,\n        }\n    }\n\n    pub(crate) fn era_id(&self) -> Option<EraId> {\n        match self {\n            SyncIdentifier::BlockHash(_) | SyncIdentifier::BlockIdentifier(_, _) => None,\n            SyncIdentifier::SyncedBlockIdentifier(_, _, era_id)\n            | SyncIdentifier::ExecutingBlockIdentifier(_, _, era_id)\n            | SyncIdentifier::LocalTip(_, _, era_id) => Some(*era_id),\n        }\n    }\n\n    pub(crate) fn block_height_and_era(&self) -> Option<(u64, EraId)> {\n        if let (Some(block_height), Some(era_id)) = (self.block_height(), self.era_id()) {\n            return Some((block_height, era_id));\n        }\n        None\n    }\n\n    pub(crate) fn is_held_locally(&self) -> bool {\n        match self {\n            SyncIdentifier::BlockHash(_) | SyncIdentifier::BlockIdentifier(_, _) => false,\n\n            SyncIdentifier::SyncedBlockIdentifier(_, _, _)\n            | SyncIdentifier::ExecutingBlockIdentifier(_, _, _)\n            | SyncIdentifier::LocalTip(_, _, _) => true,\n        }\n    }\n\n    pub(crate) fn block_hash_to_sync(&self, child_hash: Option<BlockHash>) -> Option<BlockHash> {\n        if self.is_held_locally() {\n            child_hash\n        } else {\n            Some(self.block_hash())\n        }\n    }\n}\n\nimpl Display for SyncIdentifier {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            SyncIdentifier::BlockHash(block_hash) => block_hash.fmt(f),\n            SyncIdentifier::BlockIdentifier(block_hash, block_height) => {\n                write!(\n                    f,\n                    \"block_hash: {} block_height: {}\",\n                    block_hash, block_height\n                )\n            }\n            SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id)\n            | SyncIdentifier::ExecutingBlockIdentifier(block_hash, block_height, era_id)\n            | SyncIdentifier::LocalTip(block_hash, block_height, era_id) => {\n                write!(\n                    f,\n                    \"block_hash: {} block_height: {} era_id: {}\",\n                    block_hash, block_height, era_id\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/sync_instruction.rs",
    "content": "use casper_types::BlockHash;\n\n#[derive(Debug)]\npub(crate) enum SyncInstruction {\n    Leap { block_hash: BlockHash },\n    BlockSync { block_hash: BlockHash },\n    CaughtUp { block_hash: BlockHash },\n    LeapIntervalElapsed { block_hash: BlockHash },\n}\n\nimpl SyncInstruction {\n    pub(crate) fn block_hash(&self) -> BlockHash {\n        match self {\n            SyncInstruction::Leap { block_hash }\n            | SyncInstruction::BlockSync { block_hash }\n            | SyncInstruction::CaughtUp { block_hash }\n            | SyncInstruction::LeapIntervalElapsed { block_hash } => *block_hash,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator/tests.rs",
    "content": "use std::{\n    collections::BTreeSet,\n    fmt::{self, Debug, Display, Formatter},\n    sync::Arc,\n    time::Duration,\n};\n\nuse derive_more::From;\nuse num_rational::Ratio;\nuse prometheus::Registry;\nuse rand::Rng;\nuse serde::Serialize;\nuse tempfile::TempDir;\nuse thiserror::Error as ThisError;\nuse tokio::time;\n\nuse casper_types::{\n    generate_ed25519_keypair, testing::TestRng, ActivationPoint, BlockV2, ChainNameDigest,\n    Chainspec, ChainspecRawBytes, FinalitySignature, FinalitySignatureV2, ProtocolVersion,\n    PublicKey, SecretKey, Signature, TestBlockBuilder, TransactionConfig, U512,\n};\nuse reactor::ReactorEvent;\n\nuse crate::{\n    components::{\n        consensus::tests::utils::{\n            ALICE_NODE_ID, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_NODE_ID, BOB_PUBLIC_KEY,\n            BOB_SECRET_KEY, CAROL_PUBLIC_KEY, CAROL_SECRET_KEY,\n        },\n        network::Identity as NetworkIdentity,\n        storage::{self, Storage},\n    },\n    effect::{\n        announcements::ControlAnnouncement,\n        requests::{ContractRuntimeRequest, MarkBlockCompletedRequest, NetworkRequest},\n    },\n    protocol::Message,\n    reactor::{self, EventQueueHandle, QueueKind, Reactor, Runner, TryCrankOutcome},\n    types::EraValidatorWeights,\n    utils::{Loadable, WithDir},\n    NodeRng,\n};\n\nuse super::*;\n\nconst POLL_INTERVAL: Duration = Duration::from_millis(10);\nconst RECENT_ERA_INTERVAL: u64 = 1;\nconst VALIDATOR_SLOTS: u32 = 100;\n\nfn meta_block_with_default_state(block: Arc<BlockV2>) -> ForwardMetaBlock {\n    MetaBlock::new_forward(block, vec![], MetaBlockState::new())\n        .try_into()\n        .unwrap()\n}\n\nfn signatures_for_block(\n    block: &BlockV2,\n    signatures: &[FinalitySignatureV2],\n    chain_name_hash: ChainNameDigest,\n) -> BlockSignaturesV2 {\n    let mut block_signatures = BlockSignaturesV2::new(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n    );\n    for signature in signatures {\n        block_signatures.insert_signature(signature.public_key().clone(), *signature.signature());\n    }\n    block_signatures\n}\n\n/// Top-level event for the reactor.\n#[derive(Debug, From, Serialize)]\n#[allow(clippy::large_enum_variant)]\n#[must_use]\nenum Event {\n    #[from]\n    Storage(#[serde(skip_serializing)] storage::Event),\n    #[from]\n    BlockAccumulator(#[serde(skip_serializing)] super::Event),\n    #[from]\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    FatalAnnouncement(FatalAnnouncement),\n    #[from]\n    BlockAccumulatorAnnouncement(#[serde(skip_serializing)] BlockAccumulatorAnnouncement),\n    #[from]\n    MetaBlockAnnouncement(#[serde(skip_serializing)] MetaBlockAnnouncement),\n    #[from]\n    ContractRuntime(#[serde(skip_serializing)] ContractRuntimeRequest),\n    #[from]\n    StorageRequest(StorageRequest),\n    #[from]\n    NetworkRequest(NetworkRequest<Message>),\n    #[from]\n    NetworkPeerBehaviorAnnouncement(PeerBehaviorAnnouncement),\n}\n\nimpl From<MarkBlockCompletedRequest> for Event {\n    fn from(request: MarkBlockCompletedRequest) -> Self {\n        Event::Storage(storage::Event::MarkBlockCompletedRequest(request))\n    }\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        matches!(self, Event::ControlAnnouncement(_))\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        if let Self::ControlAnnouncement(ctrl_ann) = self {\n            Some(ctrl_ann)\n        } else {\n            None\n        }\n    }\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Storage(event) => write!(formatter, \"storage: {}\", event),\n            Event::BlockAccumulator(event) => write!(formatter, \"block accumulator: {}\", event),\n            Event::ControlAnnouncement(ctrl_ann) => write!(formatter, \"control: {}\", ctrl_ann),\n            Event::FatalAnnouncement(fatal_ann) => write!(formatter, \"fatal: {}\", fatal_ann),\n            Event::BlockAccumulatorAnnouncement(ann) => {\n                write!(formatter, \"block-accumulator announcement: {}\", ann)\n            }\n            Event::MetaBlockAnnouncement(meta_block_ann) => {\n                write!(formatter, \"meta block announcement: {}\", meta_block_ann)\n            }\n            Event::ContractRuntime(event) => {\n                write!(formatter, \"contract-runtime event: {:?}\", event)\n            }\n            Event::StorageRequest(request) => write!(formatter, \"storage request: {:?}\", request),\n            Event::NetworkRequest(request) => write!(formatter, \"network request: {:?}\", request),\n            Event::NetworkPeerBehaviorAnnouncement(peer_behavior) => {\n                write!(formatter, \"peer behavior announcement: {:?}\", peer_behavior)\n            }\n        }\n    }\n}\n\n/// Error type returned by the test reactor.\n#[derive(Debug, ThisError)]\nenum ReactorError {\n    #[error(\"prometheus (metrics) error: {0}\")]\n    Metrics(#[from] prometheus::Error),\n}\n\nstruct MockReactor {\n    storage: Storage,\n    block_accumulator: BlockAccumulator,\n    blocked_peers: Vec<PeerBehaviorAnnouncement>,\n    validator_matrix: ValidatorMatrix,\n    _storage_tempdir: TempDir,\n}\n\nimpl Reactor for MockReactor {\n    type Event = Event;\n    type Config = ();\n    type Error = ReactorError;\n\n    fn new(\n        _config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        _network_identity: NetworkIdentity,\n        registry: &Registry,\n        _event_queue: EventQueueHandle<Self::Event>,\n        _rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1);\n        let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config);\n        let validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n        let block_accumulator_config = Config::default();\n        let block_time = block_accumulator_config.purge_interval / 2;\n\n        let block_accumulator = BlockAccumulator::new(\n            block_accumulator_config,\n            validator_matrix.clone(),\n            RECENT_ERA_INTERVAL,\n            block_time,\n            VALIDATOR_SLOTS,\n            registry,\n        )\n        .unwrap();\n\n        let storage = Storage::new(\n            &storage_withdir,\n            None,\n            ProtocolVersion::from_parts(1, 0, 0),\n            EraId::default(),\n            \"test\",\n            chainspec.transaction_config.max_ttl.into(),\n            chainspec.core_config.recent_era_count(),\n            Some(registry),\n            false,\n            TransactionConfig::default(),\n        )\n        .unwrap();\n\n        let reactor = MockReactor {\n            storage,\n            block_accumulator,\n            blocked_peers: vec![],\n            validator_matrix,\n            _storage_tempdir: storage_tempdir,\n        };\n\n        let effects = Effects::new();\n\n        Ok((reactor, effects))\n    }\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::Storage(event) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, event),\n            ),\n            Event::StorageRequest(req) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, req.into()),\n            ),\n            Event::BlockAccumulator(event) => reactor::wrap_effects(\n                Event::BlockAccumulator,\n                self.block_accumulator\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::MetaBlockAnnouncement(MetaBlockAnnouncement(mut meta_block)) => {\n                let effects = Effects::new();\n                let state = meta_block.mut_state();\n                assert!(state.is_stored());\n                state.register_as_sent_to_transaction_buffer();\n                if !state.is_executed() {\n                    return effects;\n                }\n\n                state.register_we_have_tried_to_sign();\n                state.register_as_consensus_notified();\n\n                if state.register_as_accumulator_notified().was_updated() {\n                    return reactor::wrap_effects(\n                        Event::BlockAccumulator,\n                        self.block_accumulator.handle_event(\n                            effect_builder,\n                            rng,\n                            super::Event::ExecutedBlock {\n                                meta_block: meta_block.try_into().unwrap(),\n                            },\n                        ),\n                    );\n                }\n\n                assert!(state.is_marked_complete());\n                state.register_as_gossiped();\n                assert!(state.verify_complete());\n                effects\n            }\n            Event::ControlAnnouncement(ctrl_ann) => {\n                panic!(\"unhandled control announcement: {}\", ctrl_ann)\n            }\n            Event::FatalAnnouncement(fatal_ann) => {\n                panic!(\"unhandled fatal announcement: {}\", fatal_ann)\n            }\n            Event::BlockAccumulatorAnnouncement(_) => {\n                // We do not care about block accumulator announcements in these tests.\n                Effects::new()\n            }\n            Event::ContractRuntime(_event) => {\n                panic!(\"test does not handle contract runtime events\")\n            }\n            Event::NetworkRequest(_) => panic!(\"test does not handle network requests\"),\n            Event::NetworkPeerBehaviorAnnouncement(peer_behavior) => {\n                self.blocked_peers.push(peer_behavior);\n                Effects::new()\n            }\n        }\n    }\n}\n\n#[test]\nfn upsert_acceptor() {\n    let mut rng = TestRng::new();\n    let config = Config::default();\n    let era0 = EraId::from(0);\n    let validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n    let recent_era_interval = 1;\n    let block_time = config.purge_interval / 2;\n    let metrics_registry = Registry::new();\n    let mut accumulator = BlockAccumulator::new(\n        config,\n        validator_matrix,\n        recent_era_interval,\n        block_time,\n        VALIDATOR_SLOTS,\n        &metrics_registry,\n    )\n    .unwrap();\n\n    let random_block_hash = BlockHash::random(&mut rng);\n    accumulator.upsert_acceptor(random_block_hash, Some(era0), Some(*ALICE_NODE_ID));\n    assert!(accumulator\n        .block_acceptors\n        .remove(&random_block_hash)\n        .is_some());\n    assert!(accumulator\n        .peer_block_timestamps\n        .remove(&ALICE_NODE_ID)\n        .is_some());\n\n    accumulator.register_local_tip(0, EraId::new(0));\n\n    let max_block_count =\n        PEER_RATE_LIMIT_MULTIPLIER * ((config.purge_interval / block_time) as usize);\n\n    for _ in 0..max_block_count {\n        accumulator.upsert_acceptor(\n            BlockHash::random(&mut rng),\n            Some(era0),\n            Some(*ALICE_NODE_ID),\n        );\n    }\n\n    assert_eq!(accumulator.block_acceptors.len(), max_block_count);\n\n    let block_hash = BlockHash::random(&mut rng);\n\n    // Alice has sent us too many blocks; we don't register this one.\n    accumulator.upsert_acceptor(block_hash, Some(era0), Some(*ALICE_NODE_ID));\n    assert_eq!(accumulator.block_acceptors.len(), max_block_count);\n    assert!(!accumulator.block_acceptors.contains_key(&block_hash));\n\n    // Bob hasn't sent us anything yet. But we don't insert without an era ID.\n    accumulator.upsert_acceptor(block_hash, None, Some(*BOB_NODE_ID));\n    assert_eq!(accumulator.block_acceptors.len(), max_block_count);\n    assert!(!accumulator.block_acceptors.contains_key(&block_hash));\n\n    // With an era ID he's allowed to tell us about this one.\n    accumulator.upsert_acceptor(block_hash, Some(era0), Some(*BOB_NODE_ID));\n    assert_eq!(accumulator.block_acceptors.len(), max_block_count + 1);\n    assert!(accumulator.block_acceptors.contains_key(&block_hash));\n\n    // And if Alice tells us about it _now_, we'll register her as a peer.\n    accumulator.upsert_acceptor(block_hash, None, Some(*ALICE_NODE_ID));\n    assert!(accumulator.block_acceptors[&block_hash]\n        .peers()\n        .contains(&ALICE_NODE_ID));\n\n    // Modify the timestamp of the acceptor we just added to be too old.\n    let purge_interval = config.purge_interval * 2;\n    let purged_hash = {\n        let (hash, timestamp) = accumulator\n            .peer_block_timestamps\n            .get_mut(&ALICE_NODE_ID)\n            .unwrap()\n            .front_mut()\n            .unwrap();\n        *timestamp = Timestamp::now().saturating_sub(purge_interval);\n        *hash\n    };\n    // This should lead to a purge of said acceptor, therefore enabling us to\n    // add another one for Alice.\n    assert_eq!(accumulator.block_acceptors.len(), max_block_count + 1);\n    accumulator.upsert_acceptor(\n        BlockHash::random(&mut rng),\n        Some(era0),\n        Some(*ALICE_NODE_ID),\n    );\n    // Acceptor was added.\n    assert_eq!(accumulator.block_acceptors.len(), max_block_count + 2);\n    // The timestamp was purged.\n    assert_ne!(\n        accumulator\n            .peer_block_timestamps\n            .get(&ALICE_NODE_ID)\n            .unwrap()\n            .front()\n            .unwrap()\n            .0,\n        purged_hash\n    );\n}\n\n#[test]\nfn acceptor_get_peers() {\n    let mut rng = TestRng::new();\n    let block = TestBlockBuilder::new().build(&mut rng);\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n    assert!(acceptor.peers().is_empty());\n    let first_peer = NodeId::random(&mut rng);\n    let second_peer = NodeId::random(&mut rng);\n    acceptor.register_peer(first_peer);\n    assert_eq!(acceptor.peers(), &BTreeSet::from([first_peer]));\n    acceptor.register_peer(second_peer);\n    assert_eq!(acceptor.peers(), &BTreeSet::from([first_peer, second_peer]));\n}\n\n#[test]\nfn acceptor_register_finality_signature() {\n    let rng = &mut TestRng::new();\n    // Create a block and an acceptor for it.\n    let block = Arc::new(TestBlockBuilder::new().build(rng));\n    let chain_name_hash = ChainNameDigest::random(rng);\n    let mut meta_block: ForwardMetaBlock =\n        MetaBlock::new_forward(block.clone(), vec![], MetaBlockState::new())\n            .try_into()\n            .unwrap();\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n\n    // Create a finality signature with the wrong block hash.\n    let wrong_fin_sig = FinalitySignatureV2::random_for_block(\n        BlockHash::random(rng),\n        rng.gen(),\n        EraId::new(0),\n        ChainNameDigest::random(rng),\n        rng,\n    );\n    assert!(matches!(\n        acceptor\n            .register_finality_signature(wrong_fin_sig, None, VALIDATOR_SLOTS)\n            .unwrap_err(),\n        Error::BlockHashMismatch {\n            expected: _,\n            actual: _\n        }\n    ));\n\n    // Create an invalid finality signature.\n    let invalid_fin_sig = FinalitySignatureV2::new(\n        *block.hash(),\n        block.height(),\n        EraId::random(rng),\n        chain_name_hash,\n        Signature::System,\n        PublicKey::random(rng),\n    );\n    // We shouldn't be able to create invalid signatures ourselves, so we've\n    // reached an invalid state.\n    assert!(matches!(\n        acceptor\n            .register_finality_signature(invalid_fin_sig.clone(), None, VALIDATOR_SLOTS)\n            .unwrap_err(),\n        Error::InvalidConfiguration\n    ));\n    // Peers shouldn't send us invalid signatures.\n    let first_peer = NodeId::random(rng);\n    assert!(matches!(\n        acceptor\n            .register_finality_signature(invalid_fin_sig, Some(first_peer), VALIDATOR_SLOTS)\n            .unwrap_err(),\n        Error::InvalidGossip(_)\n    ));\n    // Create a valid finality signature and register it.\n    let fin_sig = FinalitySignatureV2::random_for_block(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        rng,\n    );\n    assert!(acceptor\n        .register_finality_signature(fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS)\n        .unwrap()\n        .is_none());\n    // Register it from the second peer as well.\n    let second_peer = NodeId::random(rng);\n    assert!(acceptor\n        .register_finality_signature(fin_sig.clone(), Some(second_peer), VALIDATOR_SLOTS)\n        .unwrap()\n        .is_none());\n    // Make sure the peer list is updated accordingly.\n    let (sig, senders) = acceptor.signatures().get(fin_sig.public_key()).unwrap();\n    assert_eq!(*sig, fin_sig);\n    assert_eq!(*senders, BTreeSet::from([first_peer, second_peer]));\n    // Create a second finality signature and register it.\n    let second_fin_sig = FinalitySignatureV2::random_for_block(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        rng,\n    );\n    assert!(acceptor\n        .register_finality_signature(second_fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS)\n        .unwrap()\n        .is_none());\n    // Make sure the peer list for the first signature is unchanged.\n    let (first_sig, first_sig_senders) = acceptor.signatures().get(fin_sig.public_key()).unwrap();\n    assert_eq!(*first_sig, fin_sig);\n    assert_eq!(\n        *first_sig_senders,\n        BTreeSet::from([first_peer, second_peer])\n    );\n    // Make sure the peer list for the second signature is correct.\n    let (sig, senders) = acceptor\n        .signatures()\n        .get(second_fin_sig.public_key())\n        .unwrap();\n    assert_eq!(*sig, second_fin_sig);\n    assert_eq!(*senders, BTreeSet::from([first_peer]));\n    assert!(!acceptor.has_sufficient_finality());\n    // Register the block with the sufficient finality flag set.\n    meta_block.state.register_has_sufficient_finality();\n    acceptor\n        .register_block(meta_block.clone(), Some(first_peer))\n        .unwrap();\n    // Registering invalid signatures should still yield an error.\n    let wrong_era = EraId::from(u64::MAX ^ u64::from(block.era_id()));\n    let invalid_fin_sig = FinalitySignatureV2::random_for_block(\n        *block.hash(),\n        block.height(),\n        wrong_era,\n        chain_name_hash,\n        rng,\n    );\n    assert!(matches!(\n        acceptor\n            .register_finality_signature(invalid_fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS)\n            .unwrap_err(),\n        Error::EraMismatch {\n            block_hash: _,\n            expected: _,\n            actual: _,\n            peer: _\n        }\n    ));\n    // Registering an invalid signature that we created means we're in an\n    // invalid state.\n    assert!(matches!(\n        acceptor\n            .register_finality_signature(invalid_fin_sig, None, VALIDATOR_SLOTS)\n            .unwrap_err(),\n        Error::InvalidConfiguration\n    ));\n    // Registering valid signatures still works, but we already had the second\n    // signature.\n    assert!(acceptor\n        .register_finality_signature(second_fin_sig.clone(), Some(second_peer), VALIDATOR_SLOTS)\n        .unwrap()\n        .is_none());\n    assert!(acceptor\n        .signatures()\n        .get(second_fin_sig.public_key())\n        .unwrap()\n        .1\n        .contains(&second_peer));\n    // Register a new valid signature which should be yielded by the function.\n    let third_fin_sig = FinalitySignatureV2::random_for_block(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        rng,\n    );\n    assert_eq!(\n        acceptor\n            .register_finality_signature(third_fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS)\n            .unwrap()\n            .unwrap(),\n        third_fin_sig\n    );\n    // Additional registrations of the third signature with and without a peer\n    // should still work.\n    assert!(acceptor\n        .register_finality_signature(third_fin_sig.clone(), Some(second_peer), VALIDATOR_SLOTS)\n        .unwrap()\n        .is_none());\n    assert!(acceptor\n        .register_finality_signature(third_fin_sig, None, VALIDATOR_SLOTS)\n        .unwrap()\n        .is_none());\n}\n\n#[test]\nfn acceptor_register_block() {\n    let mut rng = TestRng::new();\n    // Create a block and an acceptor for it.\n    let block = Arc::new(TestBlockBuilder::new().build(&mut rng));\n    let mut meta_block = meta_block_with_default_state(block.clone());\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n\n    // Create a finality signature with the wrong block hash.\n    let wrong_block =\n        meta_block_with_default_state(Arc::new(TestBlockBuilder::new().build(&mut rng)));\n    assert!(matches!(\n        acceptor.register_block(wrong_block, None).unwrap_err(),\n        Error::BlockHashMismatch {\n            expected: _,\n            actual: _\n        }\n    ));\n\n    {\n        // Invalid block case.\n        let invalid_block: Arc<BlockV2> = Arc::new(TestBlockBuilder::new().build_invalid(&mut rng));\n\n        let mut invalid_block_acceptor = BlockAcceptor::new(*invalid_block.hash(), vec![]);\n        let invalid_meta_block = meta_block_with_default_state(invalid_block);\n        let malicious_peer = NodeId::random(&mut rng);\n        // Peers shouldn't send us invalid blocks.\n        assert!(matches!(\n            invalid_block_acceptor\n                .register_block(invalid_meta_block.clone(), Some(malicious_peer))\n                .unwrap_err(),\n            Error::InvalidGossip(_)\n        ));\n        // We shouldn't be able to create invalid blocks ourselves, so we've\n        // reached an invalid state.\n        assert!(matches!(\n            invalid_block_acceptor\n                .register_block(invalid_meta_block, None)\n                .unwrap_err(),\n            Error::InvalidConfiguration\n        ));\n    }\n\n    // At this point, we know only the hash of the block.\n    assert!(acceptor.block_height().is_none());\n    assert!(acceptor.peers().is_empty());\n\n    // Register the block with ourselves as source.\n    acceptor.register_block(meta_block.clone(), None).unwrap();\n    assert_eq!(acceptor.block_height().unwrap(), block.height());\n    assert!(acceptor.peers().is_empty());\n\n    // Register the block from a peer.\n    let first_peer = NodeId::random(&mut rng);\n    acceptor\n        .register_block(meta_block.clone(), Some(first_peer))\n        .unwrap();\n    // Peer list should be updated.\n    assert_eq!(*acceptor.peers(), BTreeSet::from([first_peer]));\n\n    // The `executed` flag should not be set yet.\n    assert!(!acceptor.executed());\n    // Register the block from a second peer with the executed flag set.\n    let second_peer = NodeId::random(&mut rng);\n    assert!(meta_block.state.register_as_executed().was_updated());\n    acceptor\n        .register_block(meta_block.clone(), Some(second_peer))\n        .unwrap();\n    // Peer list should contain both peers.\n    assert_eq!(*acceptor.peers(), BTreeSet::from([first_peer, second_peer]));\n    // `executed` flag should now be set.\n    assert!(acceptor.executed());\n\n    // Re-registering with the `executed` flag set should not change anything.\n    acceptor.register_block(meta_block, None).unwrap();\n    assert_eq!(*acceptor.peers(), BTreeSet::from([first_peer, second_peer]));\n    assert!(acceptor.executed());\n}\n\n#[test]\nfn acceptor_should_store_block() {\n    let mut rng = TestRng::new();\n    // Create a block and an acceptor for it.\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    let block = Arc::new(TestBlockBuilder::new().build(&mut rng));\n    let mut meta_block = meta_block_with_default_state(block.clone());\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n\n    // Create 4 pairs of keys so we can later create 4 signatures.\n    let keys: Vec<(SecretKey, PublicKey)> = (0..4).map(|_| generate_ed25519_keypair()).collect();\n    // Register the keys into the era validator weights, front loaded on the\n    // first 2 with 80% weight.\n    let era_validator_weights = EraValidatorWeights::new(\n        block.era_id(),\n        BTreeMap::from([\n            (keys[0].1.clone(), U512::from(40)),\n            (keys[1].1.clone(), U512::from(40)),\n            (keys[2].1.clone(), U512::from(10)),\n            (keys[3].1.clone(), U512::from(10)),\n        ]),\n        Ratio::new(1, 3),\n    );\n\n    // We should have nothing at this point.\n    assert!(\n        !acceptor.has_sufficient_finality()\n            && acceptor.block_height().is_none()\n            && acceptor.signatures().is_empty()\n    );\n\n    // With the sufficient finality flag set, nothing else should matter and we\n    // should not store anything.\n    acceptor.set_sufficient_finality(true);\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n    // Reset the flag.\n    acceptor.set_sufficient_finality(false);\n\n    let (should_store, offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n    assert!(offenders.is_empty());\n\n    let mut signatures = vec![];\n\n    // Create the first validator's signature.\n    let fin_sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &keys[0].0,\n    );\n    signatures.push(fin_sig.clone());\n    // First signature with 40% weight brings the block to weak finality.\n    acceptor\n        .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS)\n        .unwrap();\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n\n    // Registering the block now.\n    acceptor.register_block(meta_block.clone(), None).unwrap();\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n\n    // Create the third validator's signature.\n    let fin_sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &keys[2].0,\n    );\n    // The third signature with weight 10% doesn't make the block go to\n    // strict finality.\n    signatures.push(fin_sig.clone());\n    acceptor\n        .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS)\n        .unwrap();\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n\n    // Create a bogus signature from a non-validator for this era.\n    let non_validator_keys = generate_ed25519_keypair();\n    let faulty_peer = NodeId::random(&mut rng);\n    let bogus_sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &non_validator_keys.0,\n    );\n    acceptor\n        .register_finality_signature(bogus_sig, Some(faulty_peer), VALIDATOR_SLOTS)\n        .unwrap();\n    let (should_store, offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n    // Make sure the peer who sent us this bogus signature is marked as an\n    // offender.\n    assert_eq!(offenders[0].0, faulty_peer);\n\n    // Create the second validator's signature.\n    let fin_sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &keys[1].0,\n    );\n    signatures.push(fin_sig.clone());\n    // Second signature with 40% weight brings the block to strict finality.\n    acceptor\n        .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS)\n        .unwrap();\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    let block_signatures = signatures_for_block(&block, &signatures, chain_name_hash);\n    let mut meta_block_with_expected_state = meta_block.clone();\n    meta_block_with_expected_state.state.register_as_stored();\n    meta_block_with_expected_state\n        .state\n        .register_has_sufficient_finality();\n    assert_eq!(\n        should_store,\n        ShouldStore::SufficientlySignedBlock {\n            meta_block: meta_block_with_expected_state,\n            block_signatures,\n        }\n    );\n\n    // Create the fourth validator's signature.\n    let fin_sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &keys[3].0,\n    );\n    // Already have sufficient finality signatures, so we're not supposed to\n    // store anything else.\n    acceptor\n        .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS)\n        .unwrap();\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n\n    // Without the block, even with sufficient signatures we should not store anything.\n    acceptor.set_meta_block(None);\n    acceptor.set_sufficient_finality(false);\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n\n    // Without any signatures, we should not store anything.\n    meta_block.state.register_has_sufficient_finality();\n    acceptor.set_meta_block(Some(meta_block));\n    acceptor.signatures_mut().retain(|_, _| false);\n    let (should_store, _offenders) =\n        acceptor.should_store_block(&era_validator_weights, chain_name_hash);\n    assert_eq!(should_store, ShouldStore::Nothing);\n}\n\n#[test]\nfn acceptor_should_correctly_bound_the_signatures() {\n    let mut rng = TestRng::new();\n    let validator_slots = 2;\n\n    // Create a block and an acceptor for it.\n    let block = Arc::new(TestBlockBuilder::new().build(&mut rng));\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n    let first_peer = NodeId::random(&mut rng);\n\n    // Fill the signatures map:\n    for fin_sig in (0..validator_slots * 2).map(|_| {\n        FinalitySignatureV2::random_for_block(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chain_name_hash,\n            &mut rng,\n        )\n    }) {\n        assert!(acceptor\n            .register_finality_signature(fin_sig, Some(first_peer), validator_slots)\n            .unwrap()\n            .is_none());\n    }\n\n    let fin_sig = FinalitySignatureV2::random_for_block(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &mut rng,\n    );\n    assert!(matches!(\n        acceptor.register_finality_signature(fin_sig, Some(first_peer), validator_slots),\n        Err(Error::TooManySignatures { .. }),\n    ));\n}\n\n#[test]\nfn acceptor_signatures_bound_should_not_be_triggered_if_peers_are_different() {\n    let mut rng = TestRng::new();\n    let validator_slots = 3;\n\n    // Create a block and an acceptor for it.\n    let block = Arc::new(TestBlockBuilder::new().build(&mut rng));\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n    let first_peer = NodeId::random(&mut rng);\n    let second_peer = NodeId::random(&mut rng);\n\n    // Fill the signatures map:\n    for fin_sig in (0..validator_slots).map(|_| {\n        FinalitySignatureV2::random_for_block(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chain_name_hash,\n            &mut rng,\n        )\n    }) {\n        assert!(acceptor\n            .register_finality_signature(fin_sig, Some(first_peer), validator_slots)\n            .unwrap()\n            .is_none());\n    }\n\n    // This should pass, because it is another peer:\n    let fin_sig = FinalitySignatureV2::random_for_block(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &mut rng,\n    );\n    assert!(acceptor\n        .register_finality_signature(fin_sig, Some(second_peer), validator_slots)\n        .unwrap()\n        .is_none());\n}\n\n#[test]\nfn accumulator_should_leap() {\n    let mut rng = TestRng::new();\n    let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n    let block_accumulator_config = Config::default();\n    let recent_era_interval = 1;\n    let block_time = block_accumulator_config.purge_interval / 2;\n    let attempt_execution_threshold = block_accumulator_config.attempt_execution_threshold;\n    let mut block_accumulator = BlockAccumulator::new(\n        block_accumulator_config,\n        validator_matrix.clone(),\n        recent_era_interval,\n        block_time,\n        VALIDATOR_SLOTS,\n        &Registry::default(),\n    )\n    .unwrap();\n\n    let era_id = EraId::from(0);\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n    // Register the era in the validator matrix so the block is valid.\n    register_evw_for_era(&mut validator_matrix, era_id);\n\n    assert!(\n        block_accumulator.local_tip.is_none(),\n        \"block_accumulator local tip should init null\"\n    );\n\n    expected_leap_instruction(\n        LeapInstruction::UnsetLocalTip,\n        block_accumulator.leap_instruction(&SyncIdentifier::BlockIdentifier(\n            BlockHash::random(&mut rng),\n            0,\n        )),\n    );\n\n    block_accumulator.local_tip = Some(LocalTipIdentifier::new(1, era_id));\n\n    let synced = SyncIdentifier::BlockHash(BlockHash::random(&mut rng));\n    expected_leap_instruction(\n        LeapInstruction::UnknownBlockHeight,\n        block_accumulator.leap_instruction(&synced),\n    );\n\n    let synced = SyncIdentifier::SyncedBlockIdentifier(BlockHash::random(&mut rng), 1, era_id);\n    expected_leap_instruction(\n        LeapInstruction::NoUsableBlockAcceptors,\n        block_accumulator.leap_instruction(&synced),\n    );\n\n    // Create an acceptor to change the highest usable block height.\n    {\n        let block = TestBlockBuilder::new()\n            .era(era_id)\n            .height(1)\n            .switch_block(false)\n            .build(&mut rng);\n\n        block_accumulator\n            .block_acceptors\n            .insert(*block.hash(), block_acceptor(block, chain_name_hash));\n    }\n\n    expected_leap_instruction(\n        LeapInstruction::AtHighestKnownBlock,\n        block_accumulator.leap_instruction(&synced),\n    );\n\n    let block_height = attempt_execution_threshold;\n    // Insert an acceptor within execution range\n    {\n        let block = TestBlockBuilder::new()\n            .era(era_id)\n            .height(block_height)\n            .switch_block(false)\n            .build(&mut rng);\n\n        block_accumulator\n            .block_acceptors\n            .insert(*block.hash(), block_acceptor(block, chain_name_hash));\n    }\n\n    expected_leap_instruction(\n        LeapInstruction::WithinAttemptExecutionThreshold(\n            attempt_execution_threshold.saturating_sub(1),\n        ),\n        block_accumulator.leap_instruction(&synced),\n    );\n\n    let centurion = 100;\n    // Insert an upgrade boundary\n    {\n        let block = TestBlockBuilder::new()\n            .era(era_id)\n            .height(centurion)\n            .switch_block(true)\n            .build(&mut rng);\n\n        block_accumulator\n            .block_acceptors\n            .insert(*block.hash(), block_acceptor(block, chain_name_hash));\n    }\n\n    expected_leap_instruction(\n        LeapInstruction::AtHighestKnownBlock,\n        block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier(\n            BlockHash::random(&mut rng),\n            centurion,\n            era_id,\n        )),\n    );\n    expected_leap_instruction(\n        LeapInstruction::OutsideAttemptExecutionThreshold(attempt_execution_threshold + 1),\n        block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier(\n            BlockHash::random(&mut rng),\n            centurion - attempt_execution_threshold - 1,\n            era_id,\n        )),\n    );\n\n    let offset = centurion.saturating_sub(attempt_execution_threshold);\n    for height in offset..centurion {\n        expected_leap_instruction(\n            LeapInstruction::WithinAttemptExecutionThreshold(centurion.saturating_sub(height)),\n            block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier(\n                BlockHash::random(&mut rng),\n                height,\n                era_id,\n            )),\n        );\n    }\n\n    let upgrade_attempt_execution_threshold = attempt_execution_threshold * 2;\n    block_accumulator.register_activation_point(Some(ActivationPoint::EraId(era_id.successor())));\n    let offset = centurion.saturating_sub(upgrade_attempt_execution_threshold);\n    for height in offset..centurion {\n        expected_leap_instruction(\n            LeapInstruction::TooCloseToUpgradeBoundary(centurion.saturating_sub(height)),\n            block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier(\n                BlockHash::random(&mut rng),\n                height,\n                era_id,\n            )),\n        );\n    }\n\n    expected_leap_instruction(\n        LeapInstruction::AtHighestKnownBlock,\n        block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier(\n            BlockHash::random(&mut rng),\n            centurion,\n            era_id,\n        )),\n    );\n    expected_leap_instruction(\n        LeapInstruction::OutsideAttemptExecutionThreshold(upgrade_attempt_execution_threshold + 1),\n        block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier(\n            BlockHash::random(&mut rng),\n            centurion - upgrade_attempt_execution_threshold - 1,\n            era_id,\n        )),\n    );\n}\n\nfn expected_leap_instruction(expected: LeapInstruction, actual: LeapInstruction) {\n    assert!(\n        expected.eq(&actual),\n        \"{}\",\n        format!(\"expected: {} actual: {}\", expected, actual)\n    );\n}\n\nfn block_acceptor(block: BlockV2, chain_name_hash: ChainNameDigest) -> BlockAcceptor {\n    let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]);\n    // One finality signature from our only validator for block 1.\n    acceptor\n        .register_finality_signature(\n            FinalitySignatureV2::create(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                chain_name_hash,\n                &ALICE_SECRET_KEY,\n            ),\n            None,\n            VALIDATOR_SLOTS,\n        )\n        .unwrap();\n\n    let meta_block = {\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        MetaBlock::new_forward(Arc::new(block), vec![], state)\n            .try_into()\n            .unwrap()\n    };\n    acceptor.register_block(meta_block, None).unwrap();\n\n    acceptor\n}\n\n#[test]\nfn accumulator_purge() {\n    let mut rng = TestRng::new();\n    let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n    let block_accumulator_config = Config::default();\n    let recent_era_interval = 1;\n    let block_time = block_accumulator_config.purge_interval / 2;\n    let purge_interval = block_accumulator_config.purge_interval;\n    let time_before_insertion = Timestamp::now();\n    let mut block_accumulator = BlockAccumulator::new(\n        block_accumulator_config,\n        validator_matrix.clone(),\n        recent_era_interval,\n        block_time,\n        VALIDATOR_SLOTS,\n        &Registry::default(),\n    )\n    .unwrap();\n    block_accumulator.register_local_tip(0, 0.into());\n\n    // Create 3 parent-child blocks.\n    let block_1 = Arc::new(generate_non_genesis_block(&mut rng));\n    let block_2 = Arc::new(generate_next_block(&mut rng, &block_1));\n    let block_3 = Arc::new(generate_next_block(&mut rng, &block_2));\n\n    // Also create 2 peers.\n    let peer_1 = NodeId::random(&mut rng);\n    let peer_2 = NodeId::random(&mut rng);\n\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n    // One finality signature from our only validator for block 1.\n    let fin_sig_1 = FinalitySignatureV2::create(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    // One finality signature from our only validator for block 2.\n    let fin_sig_2 = FinalitySignatureV2::create(\n        *block_2.hash(),\n        block_2.height(),\n        block_2.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    // One finality signature from our only validator for block 3.\n    let fin_sig_3 = FinalitySignatureV2::create(\n        *block_3.hash(),\n        block_3.height(),\n        block_3.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    // Register the eras in the validator matrix so the blocks are valid.\n    {\n        register_evw_for_era(&mut validator_matrix, block_1.era_id());\n        register_evw_for_era(&mut validator_matrix, block_2.era_id());\n        register_evw_for_era(&mut validator_matrix, block_3.era_id());\n    }\n\n    // We will manually call `upsert_acceptor` in order to have\n    // `peer_block_timestamps` populated.\n    {\n        // Insert the first block with sufficient finality from the first peer.\n        block_accumulator.upsert_acceptor(*block_1.hash(), Some(block_1.era_id()), Some(peer_1));\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(block_1.hash())\n            .unwrap();\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        let meta_block = MetaBlock::new_forward(block_1.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(fin_sig_1, Some(peer_1), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, None).unwrap();\n    }\n\n    {\n        // Insert the second block with sufficient finality from the second\n        // peer.\n        block_accumulator.upsert_acceptor(*block_2.hash(), Some(block_2.era_id()), Some(peer_2));\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(block_2.hash())\n            .unwrap();\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        let meta_block = MetaBlock::new_forward(block_2.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(fin_sig_2, Some(peer_2), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, None).unwrap();\n    }\n\n    {\n        // Insert the third block with sufficient finality from the third peer.\n        block_accumulator.upsert_acceptor(*block_3.hash(), Some(block_3.era_id()), Some(peer_1));\n        block_accumulator.upsert_acceptor(*block_3.hash(), Some(block_3.era_id()), Some(peer_2));\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(block_3.hash())\n            .unwrap();\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        let meta_block = MetaBlock::new_forward(block_3.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(fin_sig_3, Some(peer_1), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, Some(peer_2)).unwrap();\n    }\n\n    {\n        // Modify the times in the acceptors for blocks 1 and 2 as well as in\n        // `peer_block_timestamps` for the second peer to become outdated.\n        let last_progress = time_before_insertion.saturating_sub(purge_interval * 10);\n        block_accumulator\n            .block_acceptors\n            .get_mut(block_1.hash())\n            .unwrap()\n            .set_last_progress(last_progress);\n        block_accumulator\n            .block_acceptors\n            .get_mut(block_2.hash())\n            .unwrap()\n            .set_last_progress(last_progress);\n        for (_block_hash, timestamp) in block_accumulator\n            .peer_block_timestamps\n            .get_mut(&peer_2)\n            .unwrap()\n        {\n            *timestamp = last_progress;\n        }\n    }\n\n    // Entries we modified earlier should be purged.\n    block_accumulator.purge();\n    // Acceptors for blocks 1 and 2 should not have been purged because they\n    // have strict finality.\n    assert!(block_accumulator\n        .block_acceptors\n        .contains_key(block_1.hash()));\n    assert!(block_accumulator\n        .block_acceptors\n        .contains_key(block_2.hash()));\n    assert!(block_accumulator\n        .block_acceptors\n        .contains_key(block_3.hash()));\n    // We should have kept only the timestamps for the first peer.\n    assert!(block_accumulator\n        .peer_block_timestamps\n        .contains_key(&peer_1));\n    assert!(!block_accumulator\n        .peer_block_timestamps\n        .contains_key(&peer_2));\n\n    {\n        // Modify the `strict_finality` flag in the acceptors for blocks 1 and\n        // 2.\n        block_accumulator\n            .block_acceptors\n            .get_mut(block_1.hash())\n            .unwrap()\n            .set_sufficient_finality(false);\n        block_accumulator\n            .block_acceptors\n            .get_mut(block_2.hash())\n            .unwrap()\n            .set_sufficient_finality(false);\n    }\n\n    // Entries we modified earlier should be purged.\n    block_accumulator.purge();\n    // Acceptors for blocks 1 and 2 should have been purged.\n    assert!(!block_accumulator\n        .block_acceptors\n        .contains_key(block_1.hash()));\n    assert!(!block_accumulator\n        .block_acceptors\n        .contains_key(block_2.hash()));\n    assert!(block_accumulator\n        .block_acceptors\n        .contains_key(block_3.hash()));\n    // The third block acceptor is all that is left and it has no known\n    // children, so `block_children` should be empty.\n    assert!(block_accumulator.block_children.is_empty());\n    // We should have kept only the timestamps for the first peer.\n    assert!(block_accumulator\n        .peer_block_timestamps\n        .contains_key(&peer_1));\n    assert!(!block_accumulator\n        .peer_block_timestamps\n        .contains_key(&peer_2));\n\n    // Create a block just in range of block 3 to not qualify for a purge.\n    let in_range_block = Arc::new(\n        TestBlockBuilder::new()\n            .era(block_3.era_id())\n            .height(block_3.height() - block_accumulator.attempt_execution_threshold)\n            .protocol_version(block_3.protocol_version())\n            .switch_block(false)\n            .build(&mut rng),\n    );\n\n    let in_range_block_sig = FinalitySignatureV2::create(\n        *in_range_block.hash(),\n        in_range_block.height(),\n        in_range_block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    {\n        // Insert the in range block with sufficient finality.\n        block_accumulator.upsert_acceptor(\n            *in_range_block.hash(),\n            Some(in_range_block.era_id()),\n            Some(peer_1),\n        );\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(in_range_block.hash())\n            .unwrap();\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        let meta_block = MetaBlock::new_forward(in_range_block.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(in_range_block_sig, Some(peer_1), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, Some(peer_2)).unwrap();\n    }\n\n    // Create a block just out of range of block 3 to qualify for a purge.\n    let out_of_range_block = Arc::new(\n        TestBlockBuilder::new()\n            .era(block_3.era_id())\n            .height(block_3.height() - block_accumulator.attempt_execution_threshold - 1)\n            .protocol_version(block_3.protocol_version())\n            .switch_block(false)\n            .build(&mut rng),\n    );\n    let out_of_range_block_sig = FinalitySignatureV2::create(\n        *out_of_range_block.hash(),\n        out_of_range_block.height(),\n        out_of_range_block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    {\n        // Insert the out of range block with sufficient finality.\n        block_accumulator.upsert_acceptor(\n            *out_of_range_block.hash(),\n            Some(out_of_range_block.era_id()),\n            Some(peer_1),\n        );\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(out_of_range_block.hash())\n            .unwrap();\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        let meta_block = MetaBlock::new_forward(out_of_range_block.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(out_of_range_block_sig, Some(peer_1), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, Some(peer_2)).unwrap();\n    }\n\n    // Make sure the local tip along with its recent parents never get purged.\n    {\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(block_3.hash()));\n        // Make block 3 the local tip.\n        block_accumulator.local_tip =\n            Some(LocalTipIdentifier::new(block_3.height(), block_3.era_id()));\n        // Change the timestamps to old ones so that all blocks would normally\n        // get purged.\n        let last_progress = time_before_insertion.saturating_sub(purge_interval * 10);\n        for (_, acceptor) in block_accumulator.block_acceptors.iter_mut() {\n            acceptor.set_last_progress(last_progress);\n        }\n        for (_, timestamps) in block_accumulator.peer_block_timestamps.iter_mut() {\n            for (_, timestamp) in timestamps.iter_mut() {\n                *timestamp = last_progress;\n            }\n        }\n        // Do the purge.\n        block_accumulator.purge();\n        // As block 3 is the local tip, it should not have been purged.\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(block_3.hash()));\n        // Neither should the block in `attempt_execution_threshold` range.\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(in_range_block.hash()));\n        // But the block out of `attempt_execution_threshold` range should\n        // have been purged.\n        assert!(!block_accumulator\n            .block_acceptors\n            .contains_key(out_of_range_block.hash()));\n\n        // Now replace the local tip with something else (in this case we'll\n        // have no local tip) so that previously created blocks no longer have\n        // purge immunity.\n        block_accumulator.local_tip.take();\n        // Do the purge.\n        block_accumulator.purge();\n        // Block 3 is no longer the local tip, and given that it's old, the\n        // blocks should have been purged.\n        assert!(block_accumulator.block_acceptors.is_empty());\n    }\n\n    // Create a future block after block 3.\n    let future_block = Arc::new(\n        TestBlockBuilder::new()\n            .era(block_3.era_id())\n            .height(block_3.height() + block_accumulator.attempt_execution_threshold)\n            .protocol_version(block_3.protocol_version())\n            .switch_block(false)\n            .build(&mut rng),\n    );\n    let future_block_sig = FinalitySignatureV2::create(\n        *future_block.hash(),\n        future_block.height(),\n        future_block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    {\n        // Insert the future block with sufficient finality.\n        block_accumulator.upsert_acceptor(\n            *future_block.hash(),\n            Some(future_block.era_id()),\n            Some(peer_1),\n        );\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(future_block.hash())\n            .unwrap();\n        let mut state = MetaBlockState::new();\n        state.register_has_sufficient_finality();\n        let meta_block = MetaBlock::new_forward(future_block.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(future_block_sig, Some(peer_1), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, Some(peer_2)).unwrap();\n    }\n\n    // Create a future block after block 3, but which will not have strict\n    // finality.\n    let future_unsigned_block = Arc::new(\n        TestBlockBuilder::new()\n            .era(block_3.era_id())\n            .height(block_3.height() + block_accumulator.attempt_execution_threshold * 2)\n            .protocol_version(block_3.protocol_version())\n            .switch_block(false)\n            .build(&mut rng),\n    );\n    let future_unsigned_block_sig = FinalitySignatureV2::create(\n        *future_unsigned_block.hash(),\n        future_unsigned_block.height(),\n        future_unsigned_block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    {\n        // Insert the future unsigned block without sufficient finality.\n        block_accumulator.upsert_acceptor(\n            *future_unsigned_block.hash(),\n            Some(future_unsigned_block.era_id()),\n            Some(peer_1),\n        );\n        let acceptor = block_accumulator\n            .block_acceptors\n            .get_mut(future_unsigned_block.hash())\n            .unwrap();\n        let state = MetaBlockState::new();\n        let meta_block = MetaBlock::new_forward(future_unsigned_block.clone(), vec![], state)\n            .try_into()\n            .unwrap();\n        acceptor\n            .register_finality_signature(future_unsigned_block_sig, Some(peer_1), VALIDATOR_SLOTS)\n            .unwrap();\n        acceptor.register_block(meta_block, Some(peer_2)).unwrap();\n    }\n\n    // Make sure block with sufficient finality doesn't get purged.\n    {\n        // Make block 3 the local tip again.\n        block_accumulator.local_tip =\n            Some(LocalTipIdentifier::new(block_3.height(), block_3.era_id()));\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(future_block.hash()));\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(future_unsigned_block.hash()));\n\n        // Change the timestamps to old ones so that all blocks would normally\n        // get purged.\n        let last_progress = time_before_insertion.saturating_sub(purge_interval * 10);\n        for (_, acceptor) in block_accumulator.block_acceptors.iter_mut() {\n            acceptor.set_last_progress(last_progress);\n        }\n        for (_, timestamps) in block_accumulator.peer_block_timestamps.iter_mut() {\n            for (_, timestamp) in timestamps.iter_mut() {\n                *timestamp = last_progress;\n            }\n        }\n        // Do the purge.\n        block_accumulator.purge();\n        // Neither should the future block with sufficient finality.\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(future_block.hash()));\n        // But the future block without sufficient finality should have been\n        // purged.\n        assert!(!block_accumulator\n            .block_acceptors\n            .contains_key(future_unsigned_block.hash()));\n\n        // Now replace the local tip with something else (in this case we'll\n        // have no local tip) so that previously created blocks no longer have\n        // purge immunity.\n        block_accumulator.local_tip.take();\n        // Do the purge.\n        block_accumulator.purge();\n        // Block 3 is no longer the local tip, and given that it's old, the\n        // blocks should have been purged.\n        assert!(block_accumulator.block_acceptors.is_empty());\n    }\n}\n\nfn register_evw_for_era(validator_matrix: &mut ValidatorMatrix, era_id: EraId) {\n    let weights = EraValidatorWeights::new(\n        era_id,\n        BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]),\n        Ratio::new(1, 3),\n    );\n    validator_matrix.register_era_validator_weights(weights);\n}\n\nfn generate_next_block(rng: &mut TestRng, block: &BlockV2) -> BlockV2 {\n    let era_id = if block.is_switch_block() {\n        block.era_id().successor()\n    } else {\n        block.era_id()\n    };\n\n    TestBlockBuilder::new()\n        .era(era_id)\n        .height(block.height() + 1)\n        .protocol_version(block.protocol_version())\n        .switch_block(false)\n        .build(rng)\n}\n\nfn generate_non_genesis_block(rng: &mut TestRng) -> BlockV2 {\n    let era = rng.gen_range(10..20);\n    let height = era * 10 + rng.gen_range(0..10);\n    let is_switch = rng.gen_bool(0.1);\n\n    TestBlockBuilder::new()\n        .era(era)\n        .height(height)\n        .switch_block(is_switch)\n        .build(rng)\n}\n\nfn generate_older_block(rng: &mut TestRng, block: &BlockV2, height_difference: u64) -> BlockV2 {\n    TestBlockBuilder::new()\n        .era(block.era_id().predecessor().unwrap_or_default())\n        .height(block.height() - height_difference)\n        .protocol_version(block.protocol_version())\n        .switch_block(false)\n        .build(rng)\n}\n\n#[tokio::test]\nasync fn block_accumulator_reactor_flow() {\n    let mut rng = TestRng::new();\n    let (chainspec, chainspec_raw_bytes) =\n        <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let chain_name_hash = chainspec.name_hash();\n    let mut runner: Runner<MockReactor> = Runner::new(\n        (),\n        Arc::new(chainspec),\n        Arc::new(chainspec_raw_bytes),\n        &mut rng,\n    )\n    .await\n    .unwrap();\n\n    // Create 2 blocks, one parent one child.\n    let block_1 = generate_non_genesis_block(&mut rng);\n    let block_2 = generate_next_block(&mut rng, &block_1);\n\n    // Also create 2 peers.\n    let peer_1 = NodeId::random(&mut rng);\n    let peer_2 = NodeId::random(&mut rng);\n\n    // One finality signature from our only validator for block 1.\n    let fin_sig_1 = FinalitySignatureV2::create(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    // One finality signature from our only validator for block 2.\n    let fin_sig_2 = FinalitySignatureV2::create(\n        *block_2.hash(),\n        block_2.height(),\n        block_2.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    // Register the eras in the validator matrix so the blocks are valid.\n    {\n        let mut validator_matrix = runner.reactor_mut().validator_matrix.clone();\n        register_evw_for_era(&mut validator_matrix, block_1.era_id());\n        register_evw_for_era(&mut validator_matrix, block_2.era_id());\n    }\n\n    // Register a signature for block 1.\n    {\n        let effect_builder = runner.effect_builder();\n        let reactor = runner.reactor_mut();\n\n        let block_accumulator = &mut reactor.block_accumulator;\n        block_accumulator.register_local_tip(0, 0.into());\n\n        let event = super::Event::ReceivedFinalitySignature {\n            finality_signature: Box::new(fin_sig_1.clone()),\n            sender: peer_1,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n    }\n\n    // Register block 1.\n    {\n        runner\n            .process_injected_effects(|effect_builder| {\n                let event = super::Event::ReceivedBlock {\n                    block: Arc::new(block_1.clone()),\n                    sender: peer_2,\n                };\n                effect_builder\n                    .into_inner()\n                    .schedule(event, QueueKind::Validation)\n                    .ignore()\n            })\n            .await;\n        for _ in 0..6 {\n            while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess {\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n        let expected_block = runner\n            .reactor()\n            .storage\n            .read_block_by_hash(*block_1.hash())\n            .unwrap();\n        assert_eq!(expected_block, block_1.clone().into());\n        let expected_block_signatures = runner\n            .reactor()\n            .storage\n            .get_finality_signatures_for_block(*block_1.hash());\n        assert_eq!(\n            expected_block_signatures\n                .and_then(|sigs| sigs.finality_signature(fin_sig_1.public_key()))\n                .unwrap(),\n            FinalitySignature::from(fin_sig_1)\n        );\n    }\n\n    // Register block 2 before the signature.\n    {\n        let effect_builder = runner.effect_builder();\n        let reactor = runner.reactor_mut();\n\n        let block_accumulator = &mut reactor.block_accumulator;\n        let event = super::Event::ReceivedBlock {\n            block: Arc::new(block_2.clone()),\n            sender: peer_2,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n    }\n\n    // Register the signature for block 2.\n    {\n        runner\n            .process_injected_effects(|effect_builder| {\n                let event = super::Event::CreatedFinalitySignature {\n                    finality_signature: Box::new(fin_sig_2.clone()),\n                };\n                effect_builder\n                    .into_inner()\n                    .schedule(event, QueueKind::Validation)\n                    .ignore()\n            })\n            .await;\n        for _ in 0..6 {\n            while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess {\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n\n        let expected_block = runner\n            .reactor()\n            .storage\n            .read_block_by_hash(*block_2.hash())\n            .unwrap();\n        assert_eq!(expected_block, block_2.clone().into());\n        let expected_block_signatures = runner\n            .reactor()\n            .storage\n            .get_finality_signatures_for_block(*block_2.hash());\n        assert_eq!(\n            expected_block_signatures\n                .and_then(|sigs| sigs.finality_signature(fin_sig_2.public_key()))\n                .unwrap(),\n            FinalitySignature::from(fin_sig_2)\n        );\n    }\n\n    // Verify the state of the accumulator is correct.\n    {\n        let reactor = runner.reactor_mut();\n        let block_accumulator = &mut reactor.block_accumulator;\n        // Local tip should not have changed since no blocks were executed.\n        assert_eq!(\n            block_accumulator.local_tip,\n            Some(LocalTipIdentifier::new(0, 0.into()))\n        );\n\n        assert!(!block_accumulator\n            .block_acceptors\n            .get(block_1.hash())\n            .unwrap()\n            .executed());\n        assert!(block_accumulator\n            .block_acceptors\n            .get(block_1.hash())\n            .unwrap()\n            .has_sufficient_finality());\n        assert_eq!(\n            *block_accumulator\n                .block_acceptors\n                .get(block_1.hash())\n                .unwrap()\n                .peers(),\n            BTreeSet::from([peer_1, peer_2])\n        );\n\n        assert!(!block_accumulator\n            .block_acceptors\n            .get(block_2.hash())\n            .unwrap()\n            .executed());\n        assert!(block_accumulator\n            .block_acceptors\n            .get(block_2.hash())\n            .unwrap()\n            .has_sufficient_finality());\n        assert_eq!(\n            *block_accumulator\n                .block_acceptors\n                .get(block_2.hash())\n                .unwrap()\n                .peers(),\n            BTreeSet::from([peer_2])\n        );\n\n        // Shouldn't have any complete blocks.\n        assert!(runner\n            .reactor()\n            .storage\n            .get_highest_complete_block()\n            .unwrap()\n            .is_none());\n    }\n\n    // Get the meta block along with the state, then register it as executed to\n    // later notify the accumulator of its execution.\n    let meta_block_1 = {\n        let block_accumulator = &runner.reactor().block_accumulator;\n        let mut meta_block = block_accumulator\n            .block_acceptors\n            .get(block_1.hash())\n            .unwrap()\n            .meta_block()\n            .unwrap();\n        assert!(meta_block.state.register_as_executed().was_updated());\n        meta_block\n    };\n\n    // Let the accumulator know block 1 has been executed.\n    {\n        runner\n            .process_injected_effects(|effect_builder| {\n                let event = super::Event::ExecutedBlock {\n                    meta_block: meta_block_1.clone(),\n                };\n                effect_builder\n                    .into_inner()\n                    .schedule(event, QueueKind::Validation)\n                    .ignore()\n            })\n            .await;\n        for _ in 0..4 {\n            while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess {\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n    }\n\n    // Verify the state of the accumulator is correct.\n    {\n        let reactor = runner.reactor_mut();\n        let block_accumulator = &mut reactor.block_accumulator;\n        // Local tip should now be block 1.\n        let expected_local_tip = LocalTipIdentifier::new(block_1.height(), block_1.era_id());\n        assert_eq!(block_accumulator.local_tip, Some(expected_local_tip));\n\n        assert!(block_accumulator\n            .block_acceptors\n            .get(block_1.hash())\n            .unwrap()\n            .executed());\n        assert!(block_accumulator\n            .block_acceptors\n            .get(block_1.hash())\n            .unwrap()\n            .has_sufficient_finality());\n        assert_eq!(\n            *block_accumulator\n                .block_acceptors\n                .get(block_1.hash())\n                .unwrap()\n                .peers(),\n            BTreeSet::from([peer_1, peer_2])\n        );\n        // The block should be marked complete in storage by now.\n        assert_eq!(\n            runner\n                .reactor()\n                .storage\n                .get_highest_complete_block()\n                .unwrap()\n                .unwrap()\n                .height(),\n            meta_block_1.block.height()\n        );\n    }\n\n    // Retrigger the event so the accumulator can update its meta block state.\n    {\n        runner\n            .process_injected_effects(|effect_builder| {\n                let event = super::Event::ExecutedBlock {\n                    meta_block: meta_block_1.clone(),\n                };\n                effect_builder\n                    .into_inner()\n                    .schedule(event, QueueKind::Validation)\n                    .ignore()\n            })\n            .await;\n        while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess {\n            time::sleep(POLL_INTERVAL).await;\n        }\n    }\n\n    let older_block = generate_older_block(&mut rng, &block_1, 1);\n    // Register an older block.\n    {\n        let effect_builder = runner.effect_builder();\n        let reactor = runner.reactor_mut();\n\n        let block_accumulator = &mut reactor.block_accumulator;\n        let event = super::Event::ReceivedBlock {\n            block: Arc::new(older_block.clone()),\n            sender: peer_1,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n        // This should have no effect on the accumulator since the block is\n        // older than the local tip.\n        assert!(!block_accumulator\n            .block_acceptors\n            .contains_key(older_block.hash()));\n    }\n\n    let older_block_signature = FinalitySignatureV2::create(\n        *older_block.hash(),\n        older_block.height(),\n        older_block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    // Register a signature for an older block.\n    {\n        let effect_builder = runner.effect_builder();\n        let reactor = runner.reactor_mut();\n\n        let block_accumulator = &mut reactor.block_accumulator;\n        let event = super::Event::ReceivedFinalitySignature {\n            finality_signature: Box::new(older_block_signature),\n            sender: peer_2,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n        // The block is older than the local tip, but the accumulator doesn't\n        // know that because it was only provided with the signature, so it\n        // creates the acceptor if it's in the same era or newer than the\n        // local tip era, which, in this case, it is.\n        assert!(block_accumulator\n            .block_acceptors\n            .contains_key(older_block.hash()));\n    }\n\n    let old_era_block = TestBlockBuilder::new()\n        .era(block_1.era_id() - RECENT_ERA_INTERVAL - 1)\n        .height(1)\n        .switch_block(false)\n        .build(&mut rng);\n\n    let old_era_signature = FinalitySignatureV2::create(\n        *old_era_block.hash(),\n        old_era_block.height(),\n        old_era_block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    // Register a signature for a block in an old era.\n    {\n        let effect_builder = runner.effect_builder();\n        let reactor = runner.reactor_mut();\n\n        let block_accumulator = &mut reactor.block_accumulator;\n        let event = super::Event::ReceivedFinalitySignature {\n            finality_signature: Box::new(old_era_signature),\n            sender: peer_2,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n        // This signature is from an older era and shouldn't lead to the\n        // creation of an acceptor.\n        assert!(!block_accumulator\n            .block_acceptors\n            .contains_key(old_era_block.hash()));\n    }\n}\n\n#[tokio::test]\nasync fn block_accumulator_doesnt_purge_with_delayed_block_execution() {\n    let mut rng = TestRng::new();\n    let (chainspec, chainspec_raw_bytes) =\n        <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let chain_name_hash = chainspec.name_hash();\n    let mut runner: Runner<MockReactor> = Runner::new(\n        (),\n        Arc::new(chainspec),\n        Arc::new(chainspec_raw_bytes),\n        &mut rng,\n    )\n    .await\n    .unwrap();\n\n    // Create 1 block.\n    let block_1 = generate_non_genesis_block(&mut rng);\n\n    // Also create 2 peers.\n    let peer_1 = NodeId::random(&mut rng);\n    let peer_2 = NodeId::random(&mut rng);\n\n    let fin_sig_bob = FinalitySignatureV2::create(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.era_id(),\n        chain_name_hash,\n        &BOB_SECRET_KEY,\n    );\n\n    let fin_sig_carol = FinalitySignatureV2::create(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.era_id(),\n        chain_name_hash,\n        &CAROL_SECRET_KEY,\n    );\n\n    let fin_sig_alice = FinalitySignatureV2::create(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n\n    // Register the era in the validator matrix so the block is valid.\n    {\n        let mut validator_matrix = runner.reactor_mut().validator_matrix.clone();\n        let weights = EraValidatorWeights::new(\n            block_1.era_id(),\n            BTreeMap::from([\n                (ALICE_PUBLIC_KEY.clone(), 10.into()), /* Less weight so that the sig from Alice\n                                                        * would not have sufficient finality */\n                (BOB_PUBLIC_KEY.clone(), 100.into()),\n                (CAROL_PUBLIC_KEY.clone(), 100.into()),\n            ]),\n            Ratio::new(1, 3),\n        );\n        validator_matrix.register_era_validator_weights(weights);\n    }\n\n    // Register signatures for block 1.\n    {\n        let effect_builder = runner.effect_builder();\n        let reactor = runner.reactor_mut();\n\n        let block_accumulator = &mut reactor.block_accumulator;\n        block_accumulator.register_local_tip(0, 0.into());\n\n        let event = super::Event::ReceivedFinalitySignature {\n            finality_signature: Box::new(fin_sig_bob.clone()),\n            sender: peer_1,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n\n        let event = super::Event::ReceivedFinalitySignature {\n            finality_signature: Box::new(fin_sig_carol.clone()),\n            sender: peer_1,\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n\n        // Register the finality signature created by Alice (this validator) after executing the\n        // block.\n        let event = super::Event::CreatedFinalitySignature {\n            finality_signature: Box::new(fin_sig_alice.clone()),\n        };\n        let effects = block_accumulator.handle_event(effect_builder, &mut rng, event);\n        assert!(effects.is_empty());\n    }\n\n    // Register block 1 as received from peer.\n    {\n        runner\n            .process_injected_effects(|effect_builder| {\n                let event = super::Event::ReceivedBlock {\n                    block: Arc::new(block_1.clone()),\n                    sender: peer_2,\n                };\n                effect_builder\n                    .into_inner()\n                    .schedule(event, QueueKind::Validation)\n                    .ignore()\n            })\n            .await;\n        for _ in 0..6 {\n            while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess {\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n        let expected_block = runner\n            .reactor()\n            .storage\n            .read_block_by_hash(*block_1.hash())\n            .unwrap();\n        assert_eq!(expected_block, block_1.clone().into());\n        let expected_block_signatures = runner\n            .reactor()\n            .storage\n            .get_finality_signatures_for_block(*block_1.hash());\n        assert_eq!(\n            expected_block_signatures\n                .and_then(|sigs| sigs.finality_signature(fin_sig_alice.public_key()))\n                .unwrap(),\n            FinalitySignature::from(fin_sig_alice)\n        );\n    }\n\n    // Now add a delay between when the finality signature is created and registered in the\n    // accumulator. Usually registering the created finality signature and the executed block\n    // happen immediately but if the event queue is backed up the event to register the executed\n    // block can be delayed. Since we would purge an acceptor if the purge interval has passed,\n    // we want to simulate a situation in which the purge interval was exceeded in order to test\n    // the special case that if an acceptor that had sufficient finality, it is not purged.\n    time::sleep(\n        Duration::from(runner.reactor().block_accumulator.purge_interval) + Duration::from_secs(1),\n    )\n    .await;\n\n    // Register block 1 as having been executed by Alice (this node).\n    {\n        runner\n            .process_injected_effects(|effect_builder| {\n                let mut meta_block_state = MetaBlockState::new_already_stored();\n                meta_block_state.register_as_executed();\n                let event = super::Event::ExecutedBlock {\n                    meta_block: MetaBlock::new_forward(\n                        Arc::new(block_1.clone()),\n                        Vec::new(),\n                        meta_block_state,\n                    )\n                    .try_into()\n                    .unwrap(),\n                };\n                effect_builder\n                    .into_inner()\n                    .schedule(event, QueueKind::Regular)\n                    .ignore()\n            })\n            .await;\n        let mut finished = false;\n        while !finished {\n            let mut retry_count = 5;\n            while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess {\n                retry_count -= 1;\n                if retry_count == 0 {\n                    finished = true;\n                    break;\n                }\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n\n        // Expect that the block was marked complete by the event generated by the accumulator.\n        let expected_block = runner\n            .reactor()\n            .storage\n            .get_highest_complete_block()\n            .unwrap()\n            .unwrap();\n        assert_eq!(expected_block.height(), block_1.height());\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_accumulator.rs",
    "content": "mod block_acceptor;\nmod config;\nmod error;\nmod event;\nmod leap_instruction;\nmod local_tip_identifier;\nmod metrics;\nmod sync_identifier;\nmod sync_instruction;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::{btree_map, BTreeMap, VecDeque},\n    convert::TryInto,\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse futures::FutureExt;\nuse itertools::Itertools;\nuse prometheus::Registry;\nuse tracing::{debug, error, info, warn};\n\nuse casper_types::{\n    ActivationPoint, Block, BlockHash, BlockSignaturesV2, EraId, FinalitySignatureV2, TimeDiff,\n    Timestamp,\n};\n\nuse crate::{\n    components::{\n        block_accumulator::{\n            block_acceptor::{BlockAcceptor, ShouldStore},\n            leap_instruction::LeapInstruction,\n            local_tip_identifier::LocalTipIdentifier,\n            metrics::Metrics,\n        },\n        network::blocklist::BlocklistJustification,\n        Component, ValidatorBoundComponent,\n    },\n    effect::{\n        announcements::{\n            BlockAccumulatorAnnouncement, FatalAnnouncement, MetaBlockAnnouncement,\n            PeerBehaviorAnnouncement,\n        },\n        requests::{BlockAccumulatorRequest, MarkBlockCompletedRequest, StorageRequest},\n        EffectBuilder, EffectExt, Effects,\n    },\n    fatal,\n    types::{ForwardMetaBlock, MetaBlock, MetaBlockState, NodeId, ValidatorMatrix},\n    NodeRng,\n};\n\npub(crate) use config::Config;\npub(crate) use error::Error;\npub(crate) use event::Event;\npub(crate) use sync_identifier::SyncIdentifier;\npub(crate) use sync_instruction::SyncInstruction;\n\nconst COMPONENT_NAME: &str = \"block_accumulator\";\n\n/// If a peer \"informs\" us about more than the expected number of new blocks times this factor,\n/// they are probably spamming, and we refuse to create new block acceptors for them.\nconst PEER_RATE_LIMIT_MULTIPLIER: usize = 2;\n\n/// A cache of pending blocks and finality signatures that are gossiped to this node.\n///\n/// Announces new blocks and finality signatures once they become valid.\n#[derive(DataSize, Debug)]\npub(crate) struct BlockAccumulator {\n    /// This component requires the era validator weights for every era\n    /// it receives blocks and / or finality signatures for to verify that\n    /// the received signatures are legitimate to the era and to calculate\n    /// sufficient finality from collected finality signatures.\n    validator_matrix: ValidatorMatrix,\n    /// Each block_acceptor instance is responsible for combining\n    /// potential blocks and their finality signatures. When we have\n    /// collected sufficient finality weight's worth of signatures\n    /// for a potential block, we accept the block and store it.\n    block_acceptors: BTreeMap<BlockHash, BlockAcceptor>,\n    /// Key is the parent block hash, value is the child block hash.\n    /// Used to determine if we have awareness of the next block to be\n    /// sync'd or executed.\n    block_children: BTreeMap<BlockHash, BlockHash>,\n    /// The height of the subjective local tip of the chain. This is used to\n    /// keep track of whether blocks received from the network are relevant or not,\n    /// and to determine if this node is close enough to the perceived tip of the\n    /// network to transition to executing block for itself.\n    local_tip: Option<LocalTipIdentifier>,\n    /// Chainspec activation point.\n    activation_point: Option<ActivationPoint>,\n    /// Configured setting for how close to perceived tip local tip must be for\n    /// this node to attempt block execution for itself.\n    attempt_execution_threshold: u64,\n    /// Configured setting for tolerating a lack of newly received block\n    /// and / or finality signature data. If we last saw progress longer\n    /// ago than this interval, we will poll the network to determine\n    /// if we are caught up or have become isolated.\n    dead_air_interval: TimeDiff,\n    /// Configured setting for how often to purge dead state.\n    purge_interval: TimeDiff,\n    /// Configured setting for how many eras are considered to be recent.\n    recent_era_interval: u64,\n    /// Tracks activity and assists with perceived tip determination.\n    last_progress: Timestamp,\n    /// For each peer, a list of block hashes we first heard from them, and the timestamp when we\n    /// created the block acceptor, from oldest to newest.\n    peer_block_timestamps: BTreeMap<NodeId, VecDeque<(BlockHash, Timestamp)>>,\n    /// The minimum time between a block and its child.\n    min_block_time: TimeDiff,\n    /// The number of validator slots.\n    validator_slots: u32,\n    /// Metrics.\n    #[data_size(skip)]\n    metrics: Metrics,\n}\n\nimpl BlockAccumulator {\n    pub(crate) fn new(\n        config: Config,\n        validator_matrix: ValidatorMatrix,\n        recent_era_interval: u64,\n        min_block_time: TimeDiff,\n        validator_slots: u32,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(Self {\n            validator_matrix,\n            attempt_execution_threshold: config.attempt_execution_threshold,\n            dead_air_interval: config.dead_air_interval,\n            block_acceptors: Default::default(),\n            block_children: Default::default(),\n            last_progress: Timestamp::now(),\n            purge_interval: config.purge_interval,\n            local_tip: None,\n            activation_point: None,\n            recent_era_interval,\n            peer_block_timestamps: Default::default(),\n            min_block_time,\n            validator_slots,\n            metrics: Metrics::new(registry)?,\n        })\n    }\n\n    pub(crate) fn sync_instruction(&mut self, sync_identifier: SyncIdentifier) -> SyncInstruction {\n        let block_hash = sync_identifier.block_hash();\n        let leap_instruction = self.leap_instruction(&sync_identifier);\n        debug!(?leap_instruction, \"BlockAccumulator\");\n        if let Some((block_height, era_id)) = sync_identifier.block_height_and_era() {\n            self.register_local_tip(block_height, era_id);\n        }\n        if leap_instruction.should_leap() {\n            return SyncInstruction::Leap { block_hash };\n        }\n        match sync_identifier.block_hash_to_sync(self.next_syncable_block_hash(block_hash)) {\n            Some(block_hash_to_sync) => {\n                self.reset_last_progress();\n                SyncInstruction::BlockSync {\n                    block_hash: block_hash_to_sync,\n                }\n            }\n            None => {\n                if self.is_stale() {\n                    debug!(%block_hash, \"BlockAccumulator: when not in Validate leap because stale gossip\");\n                    SyncInstruction::LeapIntervalElapsed { block_hash }\n                } else {\n                    SyncInstruction::CaughtUp { block_hash }\n                }\n            }\n        }\n    }\n\n    /// Register activation point from next protocol version chainspec, if any.\n    pub(crate) fn register_activation_point(\n        &mut self,\n        maybe_activation_point: Option<ActivationPoint>,\n    ) {\n        self.activation_point = maybe_activation_point;\n    }\n\n    /// Drops all old block acceptors and tracks new local block height;\n    /// subsequent attempts to register a block lower than tip will be rejected.\n    fn register_local_tip(&mut self, height: u64, era_id: EraId) {\n        let new_local_tip = match self.local_tip {\n            Some(current) => current.height < height && current.era_id <= era_id,\n            None => true,\n        };\n        if new_local_tip {\n            self.purge();\n            self.local_tip = Some(LocalTipIdentifier::new(height, era_id));\n            self.reset_last_progress();\n            info!(local_tip=?self.local_tip, \"new local tip detected\");\n        }\n    }\n\n    /// Registers a peer with an existing acceptor, or creates a new one.\n    ///\n    /// If the era is outdated or the peer has already caused us to create more acceptors than\n    /// expected, no new acceptor will be created.\n    fn upsert_acceptor(\n        &mut self,\n        block_hash: BlockHash,\n        maybe_era_id: Option<EraId>,\n        maybe_sender: Option<NodeId>,\n    ) {\n        // If the acceptor already exists, just register the peer, if applicable.\n        let entry = match self.block_acceptors.entry(block_hash) {\n            btree_map::Entry::Occupied(entry) => {\n                if let Some(sender) = maybe_sender {\n                    entry.into_mut().register_peer(sender);\n                }\n                return;\n            }\n            btree_map::Entry::Vacant(entry) => entry,\n        };\n\n        // The acceptor doesn't exist. Don't create it if the item's era is not\n        // provided or the item's era is older than the local tip era by more\n        // than `recent_era_interval`.\n        match (maybe_era_id, self.local_tip) {\n            (Some(era_id), Some(local_tip))\n                if era_id >= local_tip.era_id.saturating_sub(self.recent_era_interval) => {}\n            (Some(_), None) => {}\n            _ => {\n                // If we created the event, it's safe to create the acceptor.\n                if maybe_sender.is_some() {\n                    debug!(?maybe_era_id, local_tip=?self.local_tip, \"not creating acceptor\");\n                    return;\n                }\n            }\n        }\n\n        // Check that the sender isn't telling us about more blocks than expected.\n        if let Some(sender) = maybe_sender {\n            let block_timestamps = self.peer_block_timestamps.entry(sender).or_default();\n\n            // Prune the timestamps, so the count reflects only the most recently added acceptors.\n            let purge_interval = self.purge_interval;\n            while block_timestamps\n                .front()\n                .is_some_and(|(_, timestamp)| timestamp.elapsed() > purge_interval)\n            {\n                block_timestamps.pop_front();\n            }\n\n            // Assume a block time of at least 1 millisecond, so we don't divide by zero.\n            let min_block_time = self.min_block_time.max(TimeDiff::from_millis(1));\n            let expected_blocks = (purge_interval / min_block_time) as usize;\n            let max_block_count = PEER_RATE_LIMIT_MULTIPLIER.saturating_mul(expected_blocks);\n            if block_timestamps.len() >= max_block_count {\n                warn!(\n                    ?sender, %block_hash,\n                    \"rejecting block hash from peer who sent us more than {} within {}\",\n                    max_block_count, self.purge_interval,\n                );\n                return;\n            }\n            block_timestamps.push_back((block_hash, Timestamp::now()));\n        }\n\n        entry.insert(BlockAcceptor::new(block_hash, maybe_sender));\n        self.metrics.block_acceptors.inc();\n    }\n\n    fn register_block<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        meta_block: ForwardMetaBlock,\n        sender: Option<NodeId>,\n    ) -> Effects<Event>\n    where\n        REv: From<StorageRequest>\n            + From<PeerBehaviorAnnouncement>\n            + From<MarkBlockCompletedRequest>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        let block_hash = meta_block.block.hash();\n        debug!(%block_hash, \"registering block\");\n        let era_id = meta_block.block.era_id();\n        let block_height = meta_block.block.height();\n        if self\n            .local_tip\n            .as_ref()\n            .is_some_and(|local_tip| block_height < local_tip.height)\n        {\n            debug!(%block_hash, \"ignoring outdated block\");\n            return Effects::new();\n        }\n        self.upsert_acceptor(*block_hash, Some(era_id), sender);\n\n        let acceptor = match self.block_acceptors.get_mut(block_hash) {\n            None => return Effects::new(),\n            Some(acceptor) => acceptor,\n        };\n\n        match acceptor.register_block(meta_block, sender) {\n            Ok(_) => match self.validator_matrix.validator_weights(era_id) {\n                Some(evw) => {\n                    let (should_store, faulty_senders) =\n                        acceptor.should_store_block(&evw, self.validator_matrix.chain_name_hash());\n                    self.store_block_and_finality_signatures(\n                        effect_builder,\n                        should_store,\n                        faulty_senders,\n                    )\n                }\n                None => Effects::new(),\n            },\n            Err(error) => match error {\n                Error::InvalidGossip(ref gossip_error) => {\n                    warn!(%gossip_error, \"received invalid block\");\n                    effect_builder\n                        .announce_block_peer_with_justification(\n                            gossip_error.peer(),\n                            BlocklistJustification::SentBadBlock { error },\n                        )\n                        .ignore()\n                }\n                Error::EraMismatch {\n                    peer,\n                    block_hash,\n                    expected,\n                    actual,\n                } => {\n                    warn!(\n                        \"era mismatch from {} for {}; expected: {} and actual: {}\",\n                        peer, block_hash, expected, actual\n                    );\n                    effect_builder\n                        .announce_block_peer_with_justification(\n                            peer,\n                            BlocklistJustification::SentBadBlock { error },\n                        )\n                        .ignore()\n                }\n                ref error @ Error::BlockHashMismatch { .. } => {\n                    error!(%error, \"finality signature has mismatched block_hash; this is a bug\");\n                    Effects::new()\n                }\n                ref error @ Error::SufficientFinalityWithoutBlock { .. } => {\n                    error!(%error, \"should not have sufficient finality without block\");\n                    Effects::new()\n                }\n                Error::InvalidConfiguration => fatal!(\n                    effect_builder,\n                    \"node has an invalid configuration, shutting down\"\n                )\n                .ignore(),\n                Error::BogusValidator(_) => {\n                    error!(%error, \"unexpected detection of bogus validator, this is a bug\");\n                    Effects::new()\n                }\n                Error::MetaBlockMerge(error) => {\n                    error!(%error, \"failed to merge meta blocks, this is a bug\");\n                    Effects::new()\n                }\n                Error::TooManySignatures { peer, limit } => effect_builder\n                    .announce_block_peer_with_justification(\n                        peer,\n                        BlocklistJustification::SentTooManyFinalitySignatures {\n                            max_allowed: limit,\n                        },\n                    )\n                    .ignore(),\n            },\n        }\n    }\n\n    fn register_finality_signature<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        finality_signature: FinalitySignatureV2,\n        sender: Option<NodeId>,\n    ) -> Effects<Event>\n    where\n        REv: From<StorageRequest>\n            + From<PeerBehaviorAnnouncement>\n            + From<MarkBlockCompletedRequest>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        let block_hash = finality_signature.block_hash();\n        let era_id = finality_signature.era_id();\n        self.upsert_acceptor(*block_hash, Some(era_id), sender);\n\n        let acceptor = match self.block_acceptors.get_mut(block_hash) {\n            Some(acceptor) => acceptor,\n            // When there is no acceptor for it, this function returns\n            // early, ignoring the signature.\n            None => {\n                debug!(%finality_signature, \"no acceptor to receive finality_signature\");\n                return Effects::new();\n            }\n        };\n\n        if sender.is_none() {\n            acceptor.set_our_signature(finality_signature.clone());\n        }\n\n        debug!(%finality_signature, \"registering finality signature\");\n        match acceptor.register_finality_signature(finality_signature, sender, self.validator_slots)\n        {\n            Ok(Some(finality_signature)) => self.store_block_and_finality_signatures(\n                effect_builder,\n                ShouldStore::SingleSignature(finality_signature),\n                None,\n            ),\n            Ok(None) => match self.validator_matrix.validator_weights(era_id) {\n                Some(evw) => {\n                    let (should_store, faulty_senders) =\n                        acceptor.should_store_block(&evw, self.validator_matrix.chain_name_hash());\n                    self.store_block_and_finality_signatures(\n                        effect_builder,\n                        should_store,\n                        faulty_senders,\n                    )\n                }\n                None => Effects::new(),\n            },\n            Err(error) => match error {\n                Error::InvalidGossip(ref gossip_error) => {\n                    warn!(%gossip_error, \"received invalid finality_signature\");\n                    effect_builder\n                        .announce_block_peer_with_justification(\n                            gossip_error.peer(),\n                            BlocklistJustification::SentBadFinalitySignature { error },\n                        )\n                        .ignore()\n                }\n                Error::EraMismatch {\n                    peer,\n                    block_hash,\n                    expected,\n                    actual,\n                } => {\n                    // the acceptor logic purges finality signatures that don't match\n                    // the era validators, so in this case we can continue to\n                    // use the acceptor\n                    warn!(\n                        \"era mismatch from {} for {}; expected: {} and actual: {}\",\n                        peer, block_hash, expected, actual\n                    );\n                    effect_builder\n                        .announce_block_peer_with_justification(\n                            peer,\n                            BlocklistJustification::SentBadFinalitySignature { error },\n                        )\n                        .ignore()\n                }\n                ref error @ Error::BlockHashMismatch { .. } => {\n                    error!(%error, \"finality signature has mismatched block_hash; this is a bug\");\n                    Effects::new()\n                }\n                ref error @ Error::SufficientFinalityWithoutBlock { .. } => {\n                    error!(%error, \"should not have sufficient finality without block\");\n                    Effects::new()\n                }\n                Error::InvalidConfiguration => fatal!(\n                    effect_builder,\n                    \"node has an invalid configuration, shutting down\"\n                )\n                .ignore(),\n                Error::BogusValidator(_) => {\n                    error!(%error, \"unexpected detection of bogus validator, this is a bug\");\n                    Effects::new()\n                }\n                Error::MetaBlockMerge(error) => {\n                    error!(%error, \"failed to merge meta blocks, this is a bug\");\n                    Effects::new()\n                }\n                Error::TooManySignatures { peer, limit } => effect_builder\n                    .announce_block_peer_with_justification(\n                        peer,\n                        BlocklistJustification::SentTooManyFinalitySignatures {\n                            max_allowed: limit,\n                        },\n                    )\n                    .ignore(),\n            },\n        }\n    }\n\n    fn register_stored<REv>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        maybe_meta_block: Option<ForwardMetaBlock>,\n        maybe_block_signatures: Option<BlockSignaturesV2>,\n    ) -> Effects<Event>\n    where\n        REv: From<BlockAccumulatorAnnouncement>\n            + From<MarkBlockCompletedRequest>\n            + From<MetaBlockAnnouncement>\n            + Send,\n    {\n        let mut effects = Effects::new();\n        if let Some(meta_block) = maybe_meta_block {\n            effects.extend(\n                effect_builder\n                    .announce_meta_block(meta_block.into())\n                    .ignore(),\n            );\n        };\n        if let Some(block_signatures) = maybe_block_signatures {\n            for finality_signature in block_signatures.finality_signatures() {\n                effects.extend(\n                    effect_builder\n                        .announce_finality_signature_accepted(Box::new(finality_signature))\n                        .ignore(),\n                );\n            }\n        }\n        effects\n    }\n\n    fn get_peers(&self, block_hash: BlockHash) -> Option<Vec<NodeId>> {\n        self.block_acceptors\n            .get(&block_hash)\n            .map(|acceptor| acceptor.peers().iter().cloned().collect())\n    }\n\n    fn is_stale(&mut self) -> bool {\n        // we expect to be receiving gossiped blocks from other nodes\n        // if we haven't received any messages describing higher blocks\n        // for more than the self.dead_air_interval config allows\n        // we leap again to poll the network\n        self.last_progress.elapsed() >= self.dead_air_interval\n    }\n\n    pub(crate) fn reset_last_progress(&mut self) {\n        self.last_progress = Timestamp::now();\n    }\n\n    fn leap_instruction(&self, sync_identifier: &SyncIdentifier) -> LeapInstruction {\n        let local_tip_height = match self.local_tip {\n            Some(local_tip) => local_tip.height,\n            None => {\n                // if the accumulator is unaware of local tip,\n                // leap to learn more about the network state\n                return LeapInstruction::UnsetLocalTip;\n            }\n        };\n\n        let sync_identifier_height = match sync_identifier.block_height() {\n            Some(block_height) => block_height,\n            None => {\n                if let Some(height) = self\n                    .block_acceptors\n                    .get(&sync_identifier.block_hash())\n                    .filter(|x| x.block_height().is_some())\n                    .map(|x| x.block_height().unwrap_or_default())\n                {\n                    height\n                } else {\n                    return LeapInstruction::UnknownBlockHeight;\n                }\n            }\n        };\n\n        match self\n            .block_acceptors\n            .iter()\n            .filter(|(_, acceptor)| {\n                acceptor.has_sufficient_finality() && acceptor.block_height().is_some()\n            })\n            .max_by(|x, y| x.1.block_height().cmp(&y.1.block_height()))\n            .map(|(_, acceptor)| {\n                (\n                    acceptor.block_height().unwrap_or_default(),\n                    acceptor.is_upgrade_boundary(self.activation_point),\n                )\n            }) {\n            None => LeapInstruction::NoUsableBlockAcceptors,\n            Some((acceptor_height, is_upgrade_boundary)) => {\n                // the accumulator has heard about at least one usable block via gossiping\n                // if we've see chatter about a usable higher block, we can determine\n                // if we have local state at or near that highest usable block.\n                // if we have reason to believe we have fallen too far behind the network,\n                // we should switch to catchup mode and start the leap process\n                // otherwise, we should attempt to keep up with the network by\n                // executing our own blocks.\n\n                // This is a special case; if we have heard chatter about the last block\n                // before a protocol upgrade and have enough finality signatures to believe\n                // it, we want to be cautious about leaping, because other nodes on the\n                // network are starting to go down and come back up on the new protocol\n                // version and may or may not respond. Thus, it is best for the node to\n                // continue executing its own blocks to get to the upgrade point on its\n                // own (if able).\n                let is_upgrade_boundary = is_upgrade_boundary == Some(true);\n\n                let height = local_tip_height.max(sync_identifier_height);\n                let distance_from_highest_known_block = acceptor_height.saturating_sub(height);\n\n                LeapInstruction::from_execution_threshold(\n                    self.attempt_execution_threshold,\n                    distance_from_highest_known_block,\n                    is_upgrade_boundary,\n                )\n            }\n        }\n    }\n\n    fn next_syncable_block_hash(&self, parent_block_hash: BlockHash) -> Option<BlockHash> {\n        let child_hash = self.block_children.get(&parent_block_hash)?;\n        let block_acceptor = self.block_acceptors.get(child_hash)?;\n        if block_acceptor.has_sufficient_finality() {\n            Some(block_acceptor.block_hash())\n        } else {\n            None\n        }\n    }\n\n    fn purge(&mut self) {\n        let now = Timestamp::now();\n        let mut purged = vec![];\n        let purge_interval = self.purge_interval;\n        let maybe_local_tip_height = self.local_tip.map(|local_tip| local_tip.height);\n        let attempt_execution_threshold = self.attempt_execution_threshold;\n        self.block_acceptors.retain(|k, v| {\n            if let (Some(acceptor_height), Some(local_tip_height)) =\n                (v.block_height(), maybe_local_tip_height)\n            {\n                // With `attempt_execution_threshold` being 3 as of this\n                // comment, we keep blocks in the range\n                // [(local_tip_height - 3), local_tip_height].\n                if acceptor_height >= local_tip_height.saturating_sub(attempt_execution_threshold)\n                    && acceptor_height <= local_tip_height\n                {\n                    return true;\n                }\n                // Keep future blocks that we signed or are sufficiently signed.\n                if acceptor_height > local_tip_height\n                    && (v.our_signature().is_some() || v.has_sufficient_finality())\n                {\n                    return true;\n                }\n            }\n            let expired = now.saturating_diff(v.last_progress()) > purge_interval;\n            if expired {\n                purged.push(*k)\n            }\n            !expired\n        });\n        self.block_children\n            .retain(|_parent, child| false == purged.contains(child));\n        self.peer_block_timestamps.retain(|_, block_timestamps| {\n            while block_timestamps\n                .front()\n                .is_some_and(|(_, timestamp)| timestamp.elapsed() > purge_interval)\n            {\n                block_timestamps.pop_front();\n            }\n            !block_timestamps.is_empty()\n        });\n\n        self.metrics\n            .block_acceptors\n            .set(self.block_acceptors.len().try_into().unwrap_or(i64::MIN));\n        self.metrics\n            .known_child_blocks\n            .set(self.block_children.len().try_into().unwrap_or(i64::MIN));\n    }\n\n    fn update_block_children(&mut self, meta_block: &ForwardMetaBlock) {\n        if meta_block.block.is_genesis() {\n            return;\n        }\n        let parent_hash = meta_block.block.parent_hash();\n        if self\n            .block_children\n            .insert(*parent_hash, *meta_block.block.hash())\n            .is_none()\n        {\n            self.metrics.known_child_blocks.inc();\n        }\n    }\n\n    fn store_block_and_finality_signatures<REv, I>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        should_store: ShouldStore,\n        faulty_senders: I,\n    ) -> Effects<Event>\n    where\n        REv: From<PeerBehaviorAnnouncement>\n            + From<StorageRequest>\n            + From<MarkBlockCompletedRequest>\n            + Send,\n        I: IntoIterator<Item = (NodeId, Error)>,\n    {\n        let mut effects = match should_store {\n            ShouldStore::SufficientlySignedBlock {\n                meta_block,\n                block_signatures,\n            } => {\n                let block_hash = meta_block.block.hash();\n                debug!(%block_hash, \"storing block and finality signatures\");\n                self.update_block_children(&meta_block);\n                // The block wasn't executed yet, so we just put it to storage. An `ExecutedBlock`\n                // event will then re-trigger this flow and eventually mark it complete.\n                let cloned_signatures = block_signatures.clone();\n                let block: Block = (*meta_block.block).clone().into();\n                effect_builder\n                    .put_block_to_storage(Arc::new(block))\n                    .then(move |_| {\n                        effect_builder.put_signatures_to_storage(cloned_signatures.into())\n                    })\n                    .event(move |_| Event::Stored {\n                        maybe_meta_block: Some(meta_block),\n                        maybe_block_signatures: Some(block_signatures),\n                    })\n            }\n            ShouldStore::CompletedBlock {\n                meta_block,\n                block_signatures,\n            } => {\n                let block_hash = meta_block.block.hash();\n                debug!(%block_hash, \"storing finality signatures and marking block complete\");\n                self.update_block_children(&meta_block);\n                // The block was already executed, which means it is stored and we have the global\n                // state for it. As on this code path we also know it is sufficiently signed,\n                // we mark it as complete.\n                let block_height = meta_block.block.height();\n                effect_builder\n                    .put_signatures_to_storage(block_signatures.clone().into())\n                    .then(move |_| effect_builder.mark_block_completed(block_height))\n                    .event(move |_| Event::Stored {\n                        maybe_meta_block: Some(meta_block),\n                        maybe_block_signatures: Some(block_signatures),\n                    })\n            }\n            ShouldStore::MarkComplete(meta_block) => {\n                let block_hash = meta_block.block.hash();\n                debug!(%block_hash, \"marking block complete\");\n                let block_height = meta_block.block.height();\n                effect_builder\n                    .mark_block_completed(block_height)\n                    .event(move |_| Event::Stored {\n                        maybe_meta_block: Some(meta_block),\n                        maybe_block_signatures: None,\n                    })\n            }\n            ShouldStore::SingleSignature(signature) => {\n                debug!(%signature, \"storing finality signature\");\n                let mut block_signatures = BlockSignaturesV2::new(\n                    *signature.block_hash(),\n                    signature.block_height(),\n                    signature.era_id(),\n                    signature.chain_name_hash(),\n                );\n                block_signatures\n                    .insert_signature(signature.public_key().clone(), *signature.signature());\n                effect_builder\n                    .put_finality_signature_to_storage(signature.into())\n                    .event(move |_| Event::Stored {\n                        maybe_meta_block: None,\n                        maybe_block_signatures: Some(block_signatures),\n                    })\n            }\n            ShouldStore::Nothing => {\n                debug!(\"not storing block or finality signatures\");\n                Effects::new()\n            }\n        };\n        effects.extend(faulty_senders.into_iter().flat_map(|(node_id, error)| {\n            effect_builder\n                .announce_block_peer_with_justification(\n                    node_id,\n                    BlocklistJustification::SentBadFinalitySignature { error },\n                )\n                .ignore()\n        }));\n        effects\n    }\n}\n\npub(crate) trait ReactorEvent:\n    From<StorageRequest>\n    + From<PeerBehaviorAnnouncement>\n    + From<BlockAccumulatorAnnouncement>\n    + From<MarkBlockCompletedRequest>\n    + From<MetaBlockAnnouncement>\n    + From<FatalAnnouncement>\n    + Send\n    + 'static\n{\n}\n\nimpl<REv> ReactorEvent for REv where\n    REv: From<StorageRequest>\n        + From<PeerBehaviorAnnouncement>\n        + From<BlockAccumulatorAnnouncement>\n        + From<MarkBlockCompletedRequest>\n        + From<MetaBlockAnnouncement>\n        + From<FatalAnnouncement>\n        + Send\n        + 'static\n{\n}\n\nimpl<REv: ReactorEvent> Component<REv> for BlockAccumulator {\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::Request(BlockAccumulatorRequest::GetPeersForBlock {\n                block_hash,\n                responder,\n            }) => responder.respond(self.get_peers(block_hash)).ignore(),\n            Event::RegisterPeer {\n                block_hash,\n                era_id,\n                sender,\n            } => {\n                self.upsert_acceptor(block_hash, era_id, Some(sender));\n                Effects::new()\n            }\n            Event::ReceivedBlock { block, sender } => {\n                let meta_block: ForwardMetaBlock =\n                    MetaBlock::new_forward(block, vec![], MetaBlockState::new())\n                        .try_into()\n                        .unwrap();\n                self.register_block(effect_builder, meta_block, Some(sender))\n            }\n            Event::CreatedFinalitySignature { finality_signature } => {\n                debug!(%finality_signature, \"BlockAccumulator: CreatedFinalitySignature\");\n                self.register_finality_signature(effect_builder, *finality_signature, None)\n            }\n            Event::ReceivedFinalitySignature {\n                finality_signature,\n                sender,\n            } => {\n                self.register_finality_signature(effect_builder, *finality_signature, Some(sender))\n            }\n            Event::ExecutedBlock { meta_block } => {\n                let height = meta_block.block.height();\n                let era_id = meta_block.block.era_id();\n                let effects = self.register_block(effect_builder, meta_block, None);\n                self.register_local_tip(height, era_id);\n                effects\n            }\n            Event::Stored {\n                maybe_meta_block,\n                maybe_block_signatures,\n            } => self.register_stored(effect_builder, maybe_meta_block, maybe_block_signatures),\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl<REv: ReactorEvent> ValidatorBoundComponent<REv> for BlockAccumulator {\n    fn handle_validators(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _: &mut NodeRng,\n    ) -> Effects<Self::Event> {\n        info!(\"BlockAccumulator: handling updated validator matrix\");\n        let validator_matrix = &self.validator_matrix; // Closure can't borrow all of self.\n        let should_stores = self\n            .block_acceptors\n            .values_mut()\n            .filter(|acceptor| false == acceptor.has_sufficient_finality())\n            .filter_map(|acceptor| {\n                let era_id = acceptor.era_id()?;\n                let evw = validator_matrix.validator_weights(era_id)?;\n                Some(acceptor.should_store_block(&evw, validator_matrix.chain_name_hash()))\n            })\n            .collect_vec();\n        should_stores\n            .into_iter()\n            .flat_map(|(should_store, faulty_senders)| {\n                self.store_block_and_finality_signatures(\n                    effect_builder,\n                    should_store,\n                    faulty_senders,\n                )\n            })\n            .collect()\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/block_acquisition.rs",
    "content": "use std::{\n    collections::{HashMap, HashSet},\n    fmt::{self, Display, Formatter},\n};\n\nuse datasize::DataSize;\nuse derive_more::Display;\nuse tracing::{debug, error, info, trace, warn};\n\nuse casper_storage::block_store::types::ApprovalsHashes;\nuse casper_types::{\n    execution::ExecutionResult, Block, BlockHash, BlockHeader, Digest, EraId, FinalitySignature,\n    ProtocolVersion, PublicKey, TransactionHash, TransactionId,\n};\n\nuse crate::{\n    components::block_synchronizer::{\n        block_acquisition_action::BlockAcquisitionAction, peer_list::PeerList,\n        signature_acquisition::SignatureAcquisition,\n        transaction_acquisition::TransactionAcquisition, BlockAcquisitionError,\n        ExecutionResultsAcquisition, ExecutionResultsChecksum,\n    },\n    types::{BlockExecutionResultsOrChunk, EraValidatorWeights, ExecutableBlock, SignatureWeight},\n    NodeRng,\n};\n\nuse super::transaction_acquisition::TransactionIdentifier;\n\n// BlockAcquisitionState is a milestone oriented state machine; it is always in a resting state\n// indicating the last completed step, while attempting to acquire the necessary data to transition\n// to the next resting state milestone. the start and end of the workflow is linear, but the\n// middle steps conditionally branch depending upon if this is a historical block (needs execution\n// state) or a block we intend to execute, and if the block body has one or more transactions.\n//\n// blocks always require a header & body and sufficient finality signatures; blocks may contain\n// one or more transactions. if a block has any transactions, we must also acquire execution effects\n// for the transactions in the block (we do this as a chunked aggregate), and for post 1.5 blocks\n// we must also acquire approvals hashes (which correlate to which authorized account holders\n// signed the transactions).\n//\n// there are two levels of finality, weak and strict. we first get the block header (which is\n// the minimum amount of necessary information we need to function), and then attempt to acquire\n// at least weak finality before doing further work acquiring data for a block, to avoid being\n// tricked into wasting resources downloading bogus blocks. with at least weak finality, we can\n// go about acquiring the rest of the block's required records relatively safely. if we have not\n// acquired strong finality by the time we've downloaded everything else, we do another round\n// of asking for remaining signatures before accepting the sync'd block.\n//\n// when acquiring data for a historical block, we want global state (always) and execution\n// effects (if any). when acquiring sufficient data to execute a block, we do not acquire\n// global state or execution effects. however, we still check for existence of an execution\n// effect _checksum_ leaf in global state at the block's root hash as an expedient way to\n// determine if a block was created post-1.5\n//\n// note that fetchers are used to acquire the required records, which by default check local\n// storage for existence and only ask peers if we don't already have the record being fetched\n// similarly, we collect finality signatures during each state between HaveBlockHeader and\n// HaveStrictFinalitySignatures inclusive, and therefore may have already acquired strict\n// finality before we check for it at the very end. finally due to the trie store structure\n// of global state, other than the first downloaded historical block we likely already have\n// the vast majority of global state data locally. for these reasons, it is common for most\n// blocks to transition thru the various states very quickly...particularly blocks without\n// transactions. however, the first block downloaded or blocks with a lot of transactions\n// and / or execution state delta can take arbitrarily longer on their relevant steps.\n//\n// similarly, it is possible that the peer set available to us to acquire this data can become\n// partitioned. the block synchronizer will periodically attempt to refresh its peer list to\n// mitigate this, but this strategy is less effective on small networks. we periodically\n// reattempt until we succeed or the node shuts down, in which case: ¯\\_(ツ)_/¯\n#[cfg_attr(doc, aquamarine::aquamarine)]\n/// ```mermaid\n/// flowchart TD\n///     Initialized --> HaveBlockHeader\n///     HaveBlockHeader --> HaveWeakFinalitySignatures\n///     HaveWeakFinalitySignatures --> HaveBlock\n///     HaveBlock --> B{is historical?}\n///     B -->|Yes| HaveGlobalState\n///     B -->|No| C\n///     HaveGlobalState --> HaveAllExecutionResults\n///     HaveAllExecutionResults --> A{is legacy block?}\n///     A -->|Yes| C\n///     A -->|No| HaveApprovalsHashes\n///     HaveApprovalsHashes --> C{is block empty?}\n///     C -->|Yes| HaveStrictFinalitySignatures\n///     C -->|No| HaveAllTransactions\n///     HaveAllTransactions --> HaveStrictFinalitySignatures\n///     HaveStrictFinalitySignatures --> D{is historical?}\n///     D -->|Yes| Complete\n///     D -->|No| HaveFinalizedBlock\n///     HaveFinalizedBlock --> Complete\n/// ```\n#[derive(Clone, DataSize, Debug)]\npub(super) enum BlockAcquisitionState {\n    Initialized(BlockHash, SignatureAcquisition),\n    HaveBlockHeader(Box<BlockHeader>, SignatureAcquisition),\n    HaveWeakFinalitySignatures(Box<BlockHeader>, SignatureAcquisition),\n    HaveBlock(Box<Block>, SignatureAcquisition, TransactionAcquisition),\n    HaveGlobalState(\n        Box<Block>,\n        SignatureAcquisition,\n        TransactionAcquisition,\n        ExecutionResultsAcquisition,\n    ),\n    HaveAllExecutionResults(\n        Box<Block>,\n        SignatureAcquisition,\n        TransactionAcquisition,\n        ExecutionResultsChecksum,\n    ),\n    HaveApprovalsHashes(Box<Block>, SignatureAcquisition, TransactionAcquisition),\n    HaveAllTransactions(Box<Block>, SignatureAcquisition),\n    HaveStrictFinalitySignatures(Box<Block>, SignatureAcquisition),\n    // We keep the `Block` as well as the `FinalizedBlock` because the\n    // block is necessary to reach the `Complete` state and the finalized\n    // block is used to enqueue for execution. While the block would surely\n    // be stored by the time we get to this state, it would be inefficient\n    // to fetch it from storage again to transition to the `Complete` state,\n    // so it is retained. The downside is that the block is useful in its\n    // entirety only in the historical sync, and `HaveFinalizedBlock` along\n    // with execution are strictly forward sync states. Until a refactor splits\n    // the `Complete` states for the historical and forward cases, we need to\n    // keep the block around.\n    HaveExecutableBlock(Box<Block>, Box<ExecutableBlock>, bool),\n    // The `Complete` state needs the block itself in order to produce a meta\n    // block announcement in the historical sync flow. In the forward sync,\n    // only the block hash and height are necessary. Therefore, we retain the\n    // block fully in this state.\n    Complete(Box<Block>),\n    Failed(BlockHash, Option<u64>),\n}\n\nimpl Display for BlockAcquisitionState {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BlockAcquisitionState::Initialized(block_hash, _) => {\n                write!(f, \"initialized for: {}\", block_hash)\n            }\n            BlockAcquisitionState::HaveBlockHeader(block_header, _) => write!(\n                f,\n                \"have block header({}) for: {}\",\n                block_header.height(),\n                block_header.block_hash()\n            ),\n            BlockAcquisitionState::HaveWeakFinalitySignatures(block_header, _) => write!(\n                f,\n                \"have weak finality({}) for: {}\",\n                block_header.height(),\n                block_header.block_hash()\n            ),\n            BlockAcquisitionState::HaveBlock(block, _, _) => write!(\n                f,\n                \"have block body({}) for: {}\",\n                block.height(),\n                block.hash()\n            ),\n            BlockAcquisitionState::HaveGlobalState(block, _, _, _) => write!(\n                f,\n                \"have global state({}) for: {}\",\n                block.height(),\n                block.hash()\n            ),\n            BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _) => write!(\n                f,\n                \"have execution results({}) for: {}\",\n                block.height(),\n                block.hash()\n            ),\n            BlockAcquisitionState::HaveApprovalsHashes(block, _, _) => write!(\n                f,\n                \"have approvals hashes({}) for: {}\",\n                block.height(),\n                block.hash()\n            ),\n            BlockAcquisitionState::HaveAllTransactions(block, _) => {\n                write!(\n                    f,\n                    \"have transactions({}) for: {}\",\n                    block.height(),\n                    block.hash()\n                )\n            }\n            BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) => write!(\n                f,\n                \"have strict finality({}) for: {}\",\n                block.height(),\n                block.hash()\n            ),\n            BlockAcquisitionState::HaveExecutableBlock(block, _, _) => write!(\n                f,\n                \"have finalized block({}) for: {}\",\n                block.height(),\n                *block.hash()\n            ),\n            BlockAcquisitionState::Complete(block) => {\n                write!(\n                    f,\n                    \"have complete block({}) for: {}\",\n                    block.height(),\n                    *block.hash()\n                )\n            }\n            BlockAcquisitionState::Failed(block_hash, maybe_block_height) => {\n                write!(f, \"fatal({:?}) for: {}\", maybe_block_height, block_hash)\n            }\n        }\n    }\n}\n\nimpl BlockAcquisitionState {\n    pub(crate) fn block_hash(&self) -> BlockHash {\n        match self {\n            BlockAcquisitionState::Initialized(block_hash, _)\n            | BlockAcquisitionState::Failed(block_hash, _) => *block_hash,\n            BlockAcquisitionState::HaveBlockHeader(block_header, _)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(block_header, _) => {\n                block_header.block_hash()\n            }\n            BlockAcquisitionState::HaveBlock(block, _, _)\n            | BlockAcquisitionState::HaveGlobalState(block, _, _, _)\n            | BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _)\n            | BlockAcquisitionState::HaveApprovalsHashes(block, _, _)\n            | BlockAcquisitionState::HaveAllTransactions(block, _)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(block, _)\n            | BlockAcquisitionState::HaveExecutableBlock(block, ..)\n            | BlockAcquisitionState::Complete(block) => *block.hash(),\n        }\n    }\n\n    pub(crate) fn maybe_block(&self) -> Option<Box<Block>> {\n        match self {\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..) => None,\n            BlockAcquisitionState::HaveAllTransactions(block, _)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(block, _)\n            | BlockAcquisitionState::HaveBlock(block, _, _)\n            | BlockAcquisitionState::HaveGlobalState(block, _, _, _)\n            | BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _)\n            | BlockAcquisitionState::HaveApprovalsHashes(block, _, _)\n            | BlockAcquisitionState::HaveExecutableBlock(block, _, _)\n            | BlockAcquisitionState::Complete(block) => Some(block.clone()),\n        }\n    }\n}\n\n#[derive(Clone, Copy, Debug, Display, PartialEq)]\n#[must_use]\npub(super) enum Acceptance {\n    #[display(fmt = \"had it\")]\n    HadIt,\n    #[display(fmt = \"needed it\")]\n    NeededIt,\n}\n\npub(super) struct RegisterExecResultsOutcome {\n    pub(super) exec_results: Option<HashMap<TransactionHash, ExecutionResult>>,\n    pub(super) acceptance: Option<Acceptance>,\n}\n\n#[cfg_attr(doc, aquamarine::aquamarine)]\n/// ```mermaid\n/// sequenceDiagram\n///     Note right of Initialized: need next\n///     Initialized ->> BlockHeader: get header\n///     BlockHeader ->> WeakFinalitySignatures: get at least weak finality\n///     WeakFinalitySignatures ->> Block: get block\n///     Block -->> GlobalState: is historical?\n///     GlobalState ->> AllExecutionResults: get execution results\n///     AllExecutionResults -->> ApprovalsHashes: is not legacy?\n///     AllExecutionResults -->> AllTransactions: is legacy?\n///     ApprovalsHashes ->> AllTransactions: get transactions\n///     GlobalState -->> StrictFinalitySignatures: is block empty?\n///     Block -->> AllTransactions: is not historical and is not empty?\n///     Block -->> StrictFinalitySignatures: is not historical and is empty?\n///     AllTransactions ->> StrictFinalitySignatures: get strict finality\n///     StrictFinalitySignatures ->> FinalizedBlock: is forward and finalized block created\n///     StrictFinalitySignatures -->> Complete: is historical and block marked complete\n///     FinalizedBlock ->> Complete: is forward and block executed\n/// ```\nimpl BlockAcquisitionState {\n    // the BlockAcquisitionState states and their valid transitions follow:\n    //\n    //   Initialized -> need block header\n    //\n    //   HaveBlockHeader -> if no era validators -> need era validator weights\n    //                    else need weak finality\n    //\n    //   HaveWeakFinalitySignatures -> need block\n    //\n    //   HaveBlock -> if should_fetch_execution_state -> need global state\n    //              else if block has transactions need approvals hashes\n    //              else if no transactions need strict finality\n    //\n    //   HaveGlobalState -> if should_fetch_execution_state\n    //                      if block has transactions ->\n    //                       if have execution effects -> need approvals hashes\n    //                       else -> need execution effects\n    //                      else -> need strict finality\n    //                     else -> error\n    //\n    //   HaveAllExecutionResults -> if should_fetch_execution_state\n    //                                if approvals checkable -> need approvals hashes\n    //                                else -> need transactions\n    //                               else error\n    //\n    //   HaveApprovalsHashes -> need transactions\n    //\n    //   HaveTransactions -> need strict finality\n    //\n    //   HaveStrictFinalitySignatures -> if should_fetch_execution_state -> need to mark block\n    // complete                                else need to convert block to FinalizedBlock\n    //\n    //   HaveFinalizedBlock -> need enqueue block for execution\n    //\n    //   Complete -> Complete (success / terminal)\n    //\n    //   Failed -> Failed (terminal)\n    //\n    /// Determines what action should be taken to acquire the next needed block related data.\n    pub(super) fn next_action(\n        &mut self,\n        peer_list: &PeerList,\n        validator_weights: &EraValidatorWeights,\n        rng: &mut NodeRng,\n        is_historical: bool,\n        max_simultaneous_peers: u8,\n    ) -> Result<BlockAcquisitionAction, BlockAcquisitionError> {\n        // self is the resting state we are in, ret is the next action that should be taken\n        // to acquire the necessary data to get us to the next step (if any), or an error\n        let ret = match self {\n            BlockAcquisitionState::Initialized(block_hash, ..) => Ok(\n                BlockAcquisitionAction::block_header(peer_list, rng, *block_hash),\n            ),\n            BlockAcquisitionState::HaveBlockHeader(block_header, signatures) => {\n                Ok(signatures_from_missing_validators(\n                    validator_weights,\n                    signatures,\n                    max_simultaneous_peers,\n                    peer_list,\n                    rng,\n                    block_header.era_id(),\n                    block_header.block_hash(),\n                ))\n            }\n            BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) => Ok(\n                BlockAcquisitionAction::block_body(peer_list, rng, header.block_hash()),\n            ),\n            BlockAcquisitionState::HaveBlock(block, signatures, transactions) => {\n                if is_historical {\n                    Ok(BlockAcquisitionAction::global_state(\n                        peer_list,\n                        rng,\n                        *block.hash(),\n                        *block.state_root_hash(),\n                    ))\n                } else if transactions.needs_transaction() {\n                    Ok(BlockAcquisitionAction::approvals_hashes(\n                        block, peer_list, rng,\n                    ))\n                } else if signatures.has_sufficient_finality(is_historical, true) {\n                    Ok(BlockAcquisitionAction::switch_to_have_sufficient_finality(\n                        *block.hash(),\n                        block.height(),\n                    ))\n                } else {\n                    Ok(signatures_from_missing_validators(\n                        validator_weights,\n                        signatures,\n                        max_simultaneous_peers,\n                        peer_list,\n                        rng,\n                        block.era_id(),\n                        *block.hash(),\n                    ))\n                }\n            }\n            BlockAcquisitionState::HaveGlobalState(\n                block,\n                signatures,\n                transaction_state,\n                exec_results,\n            ) => {\n                if false == is_historical {\n                    Err(BlockAcquisitionError::InvalidStateTransition)\n                } else if transaction_state.needs_transaction() {\n                    BlockAcquisitionAction::maybe_execution_results(\n                        block,\n                        peer_list,\n                        rng,\n                        exec_results,\n                    )\n                } else if signatures.has_sufficient_finality(is_historical, true) {\n                    Ok(BlockAcquisitionAction::switch_to_have_sufficient_finality(\n                        *block.hash(),\n                        block.height(),\n                    ))\n                } else {\n                    Ok(signatures_from_missing_validators(\n                        validator_weights,\n                        signatures,\n                        max_simultaneous_peers,\n                        peer_list,\n                        rng,\n                        block.era_id(),\n                        *block.hash(),\n                    ))\n                }\n            }\n            BlockAcquisitionState::HaveAllExecutionResults(\n                block,\n                signatures,\n                transactions,\n                checksum,\n            ) if is_historical => {\n                let is_checkable = checksum.is_checkable();\n                signatures.set_is_legacy(!is_checkable);\n                if is_checkable {\n                    Ok(BlockAcquisitionAction::approvals_hashes(\n                        block, peer_list, rng,\n                    ))\n                } else if let Some(needed_transaction) = transactions.next_needed_transaction() {\n                    // If the checksum is not checkable, it means that we are dealing with a legacy\n                    // deploys. If the required transactions are not deploys for\n                    // this block it means that something went wrong.\n                    let deploy_hash = match needed_transaction {\n                        TransactionIdentifier::ByHash(TransactionHash::Deploy(deploy_hash)) => {\n                            deploy_hash\n                        }\n                        _ => return Err(BlockAcquisitionError::InvalidTransactionType),\n                    };\n                    debug!(\"BlockAcquisition: requesting missing deploy by hash\");\n                    Ok(BlockAcquisitionAction::legacy_deploy_by_hash(\n                        *block.hash(),\n                        deploy_hash,\n                        peer_list,\n                        rng,\n                    ))\n                } else {\n                    Ok(\n                        BlockAcquisitionAction::next_action_after_transaction_acquisition(\n                            *block.hash(),\n                            block.height(),\n                            block.era_id(),\n                            peer_list,\n                            rng,\n                            validator_weights,\n                            signatures,\n                            is_historical,\n                            max_simultaneous_peers,\n                        ),\n                    )\n                }\n            }\n            BlockAcquisitionState::HaveAllExecutionResults(_, _, _, _) => {\n                Err(BlockAcquisitionError::InvalidStateTransition)\n            }\n            BlockAcquisitionState::HaveApprovalsHashes(block, signatures, transactions) => {\n                if let Some(needed_txn_id) = transactions.next_needed_transaction() {\n                    let txn_id = match needed_txn_id {\n                        TransactionIdentifier::ByHash(txn_hash) => {\n                            Err(BlockAcquisitionError::MissingApprovalsHashes(txn_hash))\n                        }\n                        TransactionIdentifier::ById(txn_id) => Ok(txn_id),\n                    }?;\n                    debug!(\"BlockAcquisition: requesting missing transaction by ID\");\n                    Ok(BlockAcquisitionAction::transaction_by_id(\n                        *block.hash(),\n                        txn_id,\n                        peer_list,\n                        rng,\n                    ))\n                } else {\n                    Ok(\n                        BlockAcquisitionAction::next_action_after_transaction_acquisition(\n                            *block.hash(),\n                            block.height(),\n                            block.era_id(),\n                            peer_list,\n                            rng,\n                            validator_weights,\n                            signatures,\n                            is_historical,\n                            max_simultaneous_peers,\n                        ),\n                    )\n                }\n            }\n            BlockAcquisitionState::HaveAllTransactions(block, signatures) => {\n                if signatures.has_sufficient_finality(is_historical, true) {\n                    Ok(BlockAcquisitionAction::switch_to_have_sufficient_finality(\n                        *block.hash(),\n                        block.height(),\n                    ))\n                } else {\n                    Ok(signatures_from_missing_validators(\n                        validator_weights,\n                        signatures,\n                        max_simultaneous_peers,\n                        peer_list,\n                        rng,\n                        block.era_id(),\n                        *block.hash(),\n                    ))\n                }\n            }\n            BlockAcquisitionState::HaveStrictFinalitySignatures(block, ..) => {\n                if is_historical {\n                    // we have enough signatures; need to make sure we've stored the necessary bits\n                    Ok(BlockAcquisitionAction::block_marked_complete(\n                        *block.hash(),\n                        block.height(),\n                    ))\n                } else {\n                    Ok(BlockAcquisitionAction::make_executable_block(\n                        *block.hash(),\n                        block.height(),\n                    ))\n                }\n            }\n            BlockAcquisitionState::HaveExecutableBlock(block, executable_block, enqueued) => {\n                if is_historical {\n                    Err(BlockAcquisitionError::InvalidStateTransition)\n                } else if *enqueued == false {\n                    Ok(BlockAcquisitionAction::enqueue_block_for_execution(\n                        block.hash(),\n                        executable_block.clone(),\n                    ))\n                } else {\n                    // if the block was already enqueued for execution just wait, there's\n                    // nothing else to do\n                    Ok(BlockAcquisitionAction::need_nothing(*block.hash()))\n                }\n            }\n            BlockAcquisitionState::Complete(block) => {\n                Ok(BlockAcquisitionAction::need_nothing(*block.hash()))\n            }\n            BlockAcquisitionState::Failed(block_hash, ..) => {\n                Ok(BlockAcquisitionAction::need_nothing(*block_hash))\n            }\n        };\n        ret\n    }\n\n    /// The block height of the current block, if available.\n    pub(super) fn block_height(&self) -> Option<u64> {\n        match self {\n            BlockAcquisitionState::Initialized(..) | BlockAcquisitionState::Failed(..) => None,\n            BlockAcquisitionState::HaveBlockHeader(header, _)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) => Some(header.height()),\n            BlockAcquisitionState::HaveExecutableBlock(_, executable_block, _) => {\n                Some(executable_block.height)\n            }\n            BlockAcquisitionState::HaveBlock(block, _, _)\n            | BlockAcquisitionState::HaveGlobalState(block, ..)\n            | BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _)\n            | BlockAcquisitionState::HaveApprovalsHashes(block, _, _)\n            | BlockAcquisitionState::HaveAllTransactions(block, ..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(block, _)\n            | BlockAcquisitionState::Complete(block) => Some(block.height()),\n        }\n    }\n\n    /// Register the block header for this block.\n    pub(super) fn register_block_header(\n        &mut self,\n        header: BlockHeader,\n        strict_finality_protocol_version: ProtocolVersion,\n        is_historical: bool,\n    ) -> Result<Option<Acceptance>, BlockAcquisitionError> {\n        let new_state = match self {\n            BlockAcquisitionState::Initialized(block_hash, signatures) => {\n                if header.block_hash() == *block_hash {\n                    info!(\n                        \"BlockAcquisition: registering header for: {:?}, height: {}\",\n                        block_hash,\n                        header.height()\n                    );\n                    let is_legacy_block = is_historical\n                        && header.protocol_version() < strict_finality_protocol_version;\n                    signatures.set_is_legacy(is_legacy_block);\n                    BlockAcquisitionState::HaveBlockHeader(Box::new(header), signatures.clone())\n                } else {\n                    return Err(BlockAcquisitionError::BlockHashMismatch {\n                        expected: *block_hash,\n                        actual: header.block_hash(),\n                    });\n                }\n            }\n            // we never ask for a block_header while in the following states,\n            // and thus it is erroneous to attempt to apply one\n            BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => return Ok(None),\n        };\n        self.set_state(new_state);\n        Ok(Some(Acceptance::NeededIt))\n    }\n\n    /// Register the block body for this block.\n    pub(super) fn register_block(\n        &mut self,\n        block: Block,\n        need_execution_state: bool,\n    ) -> Result<Option<Acceptance>, BlockAcquisitionError> {\n        let new_state = match self {\n            BlockAcquisitionState::HaveWeakFinalitySignatures(header, signatures) => {\n                let expected_block_hash = header.block_hash();\n                let actual_block_hash = block.hash();\n                if *actual_block_hash != expected_block_hash {\n                    return Err(BlockAcquisitionError::BlockHashMismatch {\n                        expected: expected_block_hash,\n                        actual: *actual_block_hash,\n                    });\n                }\n                info!(\n                    \"BlockAcquisition: registering block for: {}\",\n                    header.block_hash()\n                );\n                let transaction_hashes = match &block {\n                    Block::V1(v1) => v1\n                        .deploy_and_transfer_hashes()\n                        .copied()\n                        .map(TransactionHash::from)\n                        .collect(),\n                    Block::V2(v2) => v2.all_transactions().copied().collect(),\n                };\n                let transaction_acquisition =\n                    TransactionAcquisition::new_by_hash(transaction_hashes, need_execution_state);\n\n                BlockAcquisitionState::HaveBlock(\n                    Box::new(block),\n                    signatures.clone(),\n                    transaction_acquisition,\n                )\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(None);\n            }\n        };\n        self.set_state(new_state);\n        Ok(Some(Acceptance::NeededIt))\n    }\n\n    /// Advance acquisition state to HaveStrictFinality.\n    pub(super) fn switch_to_have_strict_finality(\n        &mut self,\n        block_hash: BlockHash,\n        is_historical: bool,\n    ) -> Result<(), BlockAcquisitionError> {\n        if block_hash != self.block_hash() {\n            return Err(BlockAcquisitionError::BlockHashMismatch {\n                expected: self.block_hash(),\n                actual: block_hash,\n            });\n        }\n        let maybe_new_state = match self {\n            BlockAcquisitionState::HaveBlock(block, acquired_signatures, ..)\n            | BlockAcquisitionState::HaveGlobalState(block, acquired_signatures, ..)\n            | BlockAcquisitionState::HaveAllTransactions(block, acquired_signatures)\n            | BlockAcquisitionState::HaveApprovalsHashes(block, acquired_signatures, ..) => {\n                if acquired_signatures.has_sufficient_finality(is_historical, true) {\n                    Some(BlockAcquisitionState::HaveStrictFinalitySignatures(\n                        block.clone(),\n                        acquired_signatures.clone(),\n                    ))\n                } else {\n                    return Err(BlockAcquisitionError::InvalidStateTransition);\n                }\n            }\n            BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Complete(..) => None,\n        };\n        if let Some(new_state) = maybe_new_state {\n            self.set_state(new_state);\n        };\n        Ok(())\n    }\n\n    /// Register a finality signature as pending for this block.\n    pub(super) fn register_finality_signature_pending(&mut self, validator: PublicKey) {\n        match self {\n            BlockAcquisitionState::HaveBlockHeader(_, acquired_signatures)\n            | BlockAcquisitionState::HaveBlock(_, acquired_signatures, ..)\n            | BlockAcquisitionState::HaveGlobalState(_, acquired_signatures, ..)\n            | BlockAcquisitionState::HaveApprovalsHashes(_, acquired_signatures, ..)\n            | BlockAcquisitionState::HaveAllExecutionResults(_, acquired_signatures, ..)\n            | BlockAcquisitionState::HaveAllTransactions(_, acquired_signatures)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(_, acquired_signatures)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(_, acquired_signatures) => {\n                acquired_signatures.register_pending(validator);\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {}\n        };\n    }\n\n    pub(super) fn actively_acquiring_signatures(&self, is_historical: bool) -> bool {\n        match self {\n            BlockAcquisitionState::HaveBlockHeader(..) => true,\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => false,\n            BlockAcquisitionState::HaveBlock(_, acquired_signatures, acquired_transactions) => {\n                !is_historical\n                    && acquired_transactions.needs_transaction() == false\n                    && acquired_signatures.signature_weight() != SignatureWeight::Strict\n            }\n            BlockAcquisitionState::HaveGlobalState(\n                _,\n                acquired_signatures,\n                acquired_transactions,\n                ..,\n            ) => {\n                acquired_transactions.needs_transaction() == false\n                    && acquired_signatures.signature_weight() != SignatureWeight::Strict\n            }\n            BlockAcquisitionState::HaveApprovalsHashes(\n                _,\n                acquired_signatures,\n                acquired_transactions,\n            ) => {\n                acquired_transactions.needs_transaction() == false\n                    && acquired_signatures.signature_weight() != SignatureWeight::Strict\n            }\n            BlockAcquisitionState::HaveAllExecutionResults(\n                _,\n                acquired_signatures,\n                acquired_transactions,\n                ..,\n            ) => {\n                acquired_signatures.is_legacy()\n                    && acquired_transactions.needs_transaction() == false\n                    && acquired_signatures.signature_weight() != SignatureWeight::Strict\n            }\n            BlockAcquisitionState::HaveAllTransactions(_, acquired_signatures) => {\n                acquired_signatures.signature_weight() != SignatureWeight::Strict\n            }\n        }\n    }\n\n    /// Register a finality signature for this block.\n    pub(super) fn register_finality_signature(\n        &mut self,\n        signature: FinalitySignature,\n        validator_weights: &EraValidatorWeights,\n        is_historical: bool,\n    ) -> Result<Option<Acceptance>, BlockAcquisitionError> {\n        // we will accept finality signatures we don't yet have while in every state other than\n        // Initialized and Failed. However, it can only cause a state transition when we\n        // are in a resting state that needs weak finality or strict finality.\n        let cloned_sig = signature.clone();\n        let signer = signature.public_key().clone();\n        let acceptance: Acceptance;\n        let maybe_block_hash: Option<BlockHash>;\n        let currently_acquiring_sigs = self.actively_acquiring_signatures(is_historical);\n        let maybe_new_state: Option<BlockAcquisitionState> = match self {\n            BlockAcquisitionState::HaveBlockHeader(header, acquired_signatures) => {\n                // we are attempting to acquire at least ~1/3 signature weight before\n                // committing to doing non-trivial work to acquire this block\n                // thus the primary thing we are doing in this state is accumulating sigs.\n                // We also want to ensure we've tried at least once to fetch every potential\n                // signature.\n                maybe_block_hash = Some(header.block_hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                if acquired_signatures.has_sufficient_finality(is_historical, false) {\n                    Some(BlockAcquisitionState::HaveWeakFinalitySignatures(\n                        header.clone(),\n                        acquired_signatures.clone(),\n                    ))\n                } else {\n                    None\n                }\n            }\n            BlockAcquisitionState::HaveBlock(block, acquired_signatures, acquired_transactions) => {\n                maybe_block_hash = Some(*block.hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                if !is_historical\n                    && acquired_transactions.needs_transaction() == false\n                    && acquired_signatures.has_sufficient_finality(is_historical, true)\n                {\n                    // When syncing a forward block, if we don't need transactions and have all\n                    // required signatures, advance the state\n                    Some(BlockAcquisitionState::HaveStrictFinalitySignatures(\n                        block.clone(),\n                        acquired_signatures.clone(),\n                    ))\n                } else {\n                    // Otherwise stay in HaveBlock to allow fetching for the next bit of data\n                    None\n                }\n            }\n            BlockAcquisitionState::HaveGlobalState(\n                block,\n                acquired_signatures,\n                acquired_transactions,\n                ..,\n            ) => {\n                maybe_block_hash = Some(*block.hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                if !acquired_transactions.needs_transaction()\n                    && acquired_signatures.has_sufficient_finality(is_historical, true)\n                {\n                    Some(BlockAcquisitionState::HaveStrictFinalitySignatures(\n                        block.clone(),\n                        acquired_signatures.clone(),\n                    ))\n                } else {\n                    None\n                }\n            }\n            BlockAcquisitionState::HaveApprovalsHashes(block, acquired_signatures, ..) => {\n                maybe_block_hash = Some(*block.hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                None\n            }\n            BlockAcquisitionState::HaveAllExecutionResults(\n                block,\n                acquired_signatures,\n                acquired_transactions,\n                ..,\n            ) => {\n                maybe_block_hash = Some(*block.hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                if acquired_signatures.is_legacy()\n                    && acquired_transactions.needs_transaction() == false\n                    && acquired_signatures.has_sufficient_finality(is_historical, true)\n                {\n                    Some(BlockAcquisitionState::HaveStrictFinalitySignatures(\n                        block.clone(),\n                        acquired_signatures.clone(),\n                    ))\n                } else {\n                    None\n                }\n            }\n            BlockAcquisitionState::HaveAllTransactions(block, acquired_signatures) => {\n                maybe_block_hash = Some(*block.hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                if acquired_signatures.has_sufficient_finality(is_historical, true) {\n                    Some(BlockAcquisitionState::HaveStrictFinalitySignatures(\n                        block.clone(),\n                        acquired_signatures.clone(),\n                    ))\n                } else {\n                    None\n                }\n            }\n            BlockAcquisitionState::HaveStrictFinalitySignatures(block, acquired_signatures) => {\n                maybe_block_hash = Some(*block.hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                None\n            }\n            BlockAcquisitionState::HaveWeakFinalitySignatures(header, acquired_signatures) => {\n                // 1: In HaveWeakFinalitySignatures we are waiting to acquire the block body\n                // 2: In HaveStrictFinalitySignatures we are in the happy path resting state\n                // and have enough signatures, but not necessarily all signatures and\n                // will accept late comers while resting in this state\n                maybe_block_hash = Some(header.block_hash());\n                acceptance = acquired_signatures.apply_signature(signature, validator_weights);\n                None\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => return Ok(None),\n        };\n        let ret = currently_acquiring_sigs.then_some(acceptance);\n        info!(\n            signature=%cloned_sig,\n            ?ret,\n            \"BlockAcquisition: registering finality signature for: {}\",\n            if let Some(block_hash) = maybe_block_hash {\n                block_hash.to_string()\n            } else {\n                \"unknown block\".to_string()\n            }\n        );\n        self.log_finality_signature_acceptance(&maybe_block_hash, &signer, ret);\n        if let Some(new_state) = maybe_new_state {\n            self.set_state(new_state);\n        }\n        Ok(ret)\n    }\n\n    /// Register the approvals hashes for this block.\n    pub(super) fn register_approvals_hashes(\n        &mut self,\n        approvals_hashes: &ApprovalsHashes,\n        need_execution_state: bool,\n    ) -> Result<Option<Acceptance>, BlockAcquisitionError> {\n        let new_state = match self {\n            BlockAcquisitionState::HaveBlock(block, signatures, acquired)\n                if !need_execution_state =>\n            {\n                info!(\n                    \"BlockAcquisition: registering approvals hashes for: {}\",\n                    block.hash()\n                );\n                acquired.apply_approvals_hashes(approvals_hashes)?;\n                BlockAcquisitionState::HaveApprovalsHashes(\n                    block.clone(),\n                    signatures.clone(),\n                    acquired.clone(),\n                )\n            }\n\n            BlockAcquisitionState::HaveAllExecutionResults(block, signatures, transactions, _)\n                if need_execution_state =>\n            {\n                transactions.apply_approvals_hashes(approvals_hashes)?;\n                info!(\n                    \"BlockAcquisition: registering approvals hashes for: {}\",\n                    block.hash()\n                );\n                BlockAcquisitionState::HaveApprovalsHashes(\n                    block.clone(),\n                    signatures.clone(),\n                    transactions.clone(),\n                )\n            }\n            // we never ask for transactions in the following states, and thus it is erroneous to\n            // attempt to apply any\n            BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(_, _, _)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(None);\n            }\n        };\n        self.set_state(new_state);\n        Ok(Some(Acceptance::NeededIt))\n    }\n\n    /// Register global state for this block.\n    pub(super) fn register_global_state(\n        &mut self,\n        root_hash: Digest,\n        need_execution_state: bool,\n    ) -> Result<(), BlockAcquisitionError> {\n        let new_state = match self {\n            BlockAcquisitionState::HaveBlock(block, signatures, transactions)\n                if need_execution_state =>\n            {\n                info!(\n                    \"BlockAcquisition: registering global state for: {}\",\n                    block.hash()\n                );\n                if block.state_root_hash() == &root_hash {\n                    let block_hash = *block.hash();\n                    BlockAcquisitionState::HaveGlobalState(\n                        block.clone(),\n                        signatures.clone(),\n                        transactions.clone(),\n                        ExecutionResultsAcquisition::Needed { block_hash },\n                    )\n                } else {\n                    return Err(BlockAcquisitionError::RootHashMismatch {\n                        expected: *block.state_root_hash(),\n                        actual: root_hash,\n                    });\n                }\n            }\n            // we never ask for global state in the following states, and thus it is erroneous to\n            // attempt to apply any\n            BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(());\n            }\n        };\n        self.set_state(new_state);\n        Ok(())\n    }\n\n    /// Register execution results checksum for this block.\n    pub(super) fn register_execution_results_checksum(\n        &mut self,\n        execution_results_checksum: ExecutionResultsChecksum,\n        need_execution_state: bool,\n    ) -> Result<(), BlockAcquisitionError> {\n        debug!(state=%self, need_execution_state, \"BlockAcquisitionState: register_execution_results_checksum\");\n        match self {\n            BlockAcquisitionState::HaveGlobalState(\n                block,\n                _,\n                _,\n                acq @ ExecutionResultsAcquisition::Needed { .. },\n            ) if need_execution_state => {\n                info!(\n                    \"BlockAcquisition: registering execution results hash for: {}\",\n                    block.hash()\n                );\n                *acq = acq\n                    .clone()\n                    .apply_checksum(execution_results_checksum)\n                    .map_err(BlockAcquisitionError::ExecutionResults)?;\n            }\n            BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(());\n            }\n        };\n        Ok(())\n    }\n\n    /// Register execution results or chunk for this block.\n    pub(super) fn register_execution_results_or_chunk(\n        &mut self,\n        block_execution_results_or_chunk: BlockExecutionResultsOrChunk,\n        need_execution_state: bool,\n    ) -> Result<RegisterExecResultsOutcome, BlockAcquisitionError> {\n        debug!(state=%self, need_execution_state,\n            block_execution_results_or_chunk=%block_execution_results_or_chunk,\n            \"register_execution_results_or_chunk\");\n        let (new_state, maybe_exec_results, acceptance) = match self {\n            BlockAcquisitionState::HaveGlobalState(\n                block,\n                signatures,\n                transactions,\n                exec_results_acq,\n            ) if need_execution_state => {\n                info!(\n                    \"BlockAcquisition: registering execution result or chunk for: {}\",\n                    block.hash()\n                );\n                let transaction_hashes = match block.as_ref() {\n                    Block::V1(v1) => v1\n                        .deploy_and_transfer_hashes()\n                        .copied()\n                        .map(TransactionHash::from)\n                        .collect(),\n                    Block::V2(v2) => v2.all_transactions().copied().collect(),\n                };\n                match exec_results_acq\n                    .clone()\n                    .apply_block_execution_results_or_chunk(\n                        block_execution_results_or_chunk,\n                        transaction_hashes,\n                    ) {\n                    Ok((new_acquisition, acceptance)) => match new_acquisition {\n                        ExecutionResultsAcquisition::Needed { .. }\n                        | ExecutionResultsAcquisition::Pending { .. } => {\n                            debug!(\"apply_block_execution_results_or_chunk: Needed | Pending\");\n                            return Ok(RegisterExecResultsOutcome {\n                                exec_results: None,\n                                acceptance: Some(acceptance),\n                            });\n                        }\n                        ExecutionResultsAcquisition::Complete { ref results, .. } => {\n                            debug!(\"apply_block_execution_results_or_chunk: Complete\");\n                            let new_state = BlockAcquisitionState::HaveGlobalState(\n                                block.clone(),\n                                signatures.clone(),\n                                transactions.clone(),\n                                new_acquisition.clone(),\n                            );\n                            let maybe_exec_results = Some(results.clone());\n                            (new_state, maybe_exec_results, acceptance)\n                        }\n                        ExecutionResultsAcquisition::Acquiring { .. } => {\n                            debug!(\"apply_block_execution_results_or_chunk: Acquiring\");\n                            let new_state = BlockAcquisitionState::HaveGlobalState(\n                                block.clone(),\n                                signatures.clone(),\n                                transactions.clone(),\n                                new_acquisition,\n                            );\n                            let maybe_exec_results = None;\n                            (new_state, maybe_exec_results, acceptance)\n                        }\n                    },\n                    Err(error) => {\n                        warn!(%error, \"failed to apply execution results\");\n                        return Err(BlockAcquisitionError::ExecutionResults(error));\n                    }\n                }\n            }\n            BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(RegisterExecResultsOutcome {\n                    exec_results: None,\n                    acceptance: None,\n                });\n            }\n        };\n        self.set_state(new_state);\n        Ok(RegisterExecResultsOutcome {\n            exec_results: maybe_exec_results,\n            acceptance: Some(acceptance),\n        })\n    }\n\n    /// Register execution results stored notification for this block.\n    pub(super) fn register_execution_results_stored_notification(\n        &mut self,\n        need_execution_state: bool,\n    ) -> Result<(), BlockAcquisitionError> {\n        let new_state = match self {\n            BlockAcquisitionState::HaveGlobalState(\n                block,\n                signatures,\n                transactions,\n                ExecutionResultsAcquisition::Complete { checksum, .. },\n            ) if need_execution_state => {\n                info!(\n                    \"BlockAcquisition: registering execution results stored notification for: {}\",\n                    block.hash()\n                );\n                BlockAcquisitionState::HaveAllExecutionResults(\n                    block.clone(),\n                    signatures.clone(),\n                    transactions.clone(),\n                    *checksum,\n                )\n            }\n            BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(());\n            }\n        };\n        self.set_state(new_state);\n        Ok(())\n    }\n\n    /// Register a transactions for this block.\n    pub(super) fn register_transaction(\n        &mut self,\n        txn_id: TransactionId,\n        is_historical: bool,\n    ) -> Result<Option<Acceptance>, BlockAcquisitionError> {\n        let (block, signatures, transactions) = match self {\n            BlockAcquisitionState::HaveBlock(block, signatures, transactions)\n                if false == is_historical =>\n            {\n                (block, signatures, transactions)\n            }\n            BlockAcquisitionState::HaveApprovalsHashes(block, signatures, transactions) => {\n                (block, signatures, transactions)\n            }\n            BlockAcquisitionState::HaveAllExecutionResults(\n                block,\n                signatures,\n                transactions,\n                checksum,\n            ) if is_historical => match checksum {\n                ExecutionResultsChecksum::Uncheckable => (block, signatures, transactions),\n                ExecutionResultsChecksum::Checkable(_) => {\n                    return Err(BlockAcquisitionError::InvalidAttemptToApplyTransaction { txn_id });\n                }\n            },\n            BlockAcquisitionState::Initialized(_, _)\n            | BlockAcquisitionState::HaveBlockHeader(_, _)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(_, _)\n            | BlockAcquisitionState::HaveBlock(_, _, _)\n            | BlockAcquisitionState::HaveGlobalState(_, _, _, _)\n            | BlockAcquisitionState::HaveAllExecutionResults(_, _, _, _)\n            | BlockAcquisitionState::HaveAllTransactions(_, _)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(_, _)\n            | BlockAcquisitionState::Failed(_, _)\n            | BlockAcquisitionState::Complete(..) => {\n                debug!(\n                    ?txn_id,\n                    \"BlockAcquisition: invalid attempt to register transaction for: {}\",\n                    self.block_hash()\n                );\n                return Ok(None);\n            }\n        };\n        info!(\n            \"BlockAcquisition: registering transaction for: {}\",\n            block.hash()\n        );\n        let maybe_acceptance = transactions.apply_transaction(txn_id);\n        if !transactions.needs_transaction() {\n            let new_state =\n                BlockAcquisitionState::HaveAllTransactions(block.clone(), signatures.clone());\n            self.set_state(new_state);\n        }\n        Ok(maybe_acceptance)\n    }\n\n    pub(super) fn register_made_finalized_block(\n        &mut self,\n        need_execution_state: bool,\n        executable_block: ExecutableBlock,\n    ) -> Result<(), BlockAcquisitionError> {\n        if need_execution_state {\n            return Err(BlockAcquisitionError::InvalidAttemptToEnqueueBlockForExecution);\n        }\n\n        let new_state = match self {\n            BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) => {\n                BlockAcquisitionState::HaveExecutableBlock(\n                    block.clone(),\n                    Box::new(executable_block),\n                    false,\n                )\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(());\n            }\n        };\n        self.set_state(new_state);\n        Ok(())\n    }\n\n    /// Register block is enqueued for execution with the contract runtime.\n    pub(super) fn register_block_execution_enqueued(\n        &mut self,\n    ) -> Result<(), BlockAcquisitionError> {\n        match self {\n            BlockAcquisitionState::HaveExecutableBlock(block, _, enqueued) => {\n                info!(\n                    \"BlockAcquisition: registering block enqueued for execution for: {}\",\n                    block\n                );\n                *enqueued = true;\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {}\n        };\n        Ok(())\n    }\n\n    /// Register block executed for this block.\n    pub(super) fn register_block_executed(\n        &mut self,\n        need_execution_state: bool,\n    ) -> Result<(), BlockAcquisitionError> {\n        if need_execution_state {\n            return Err(BlockAcquisitionError::InvalidAttemptToEnqueueBlockForExecution);\n        }\n\n        let new_state = match self {\n            BlockAcquisitionState::HaveExecutableBlock(block, _, _) => {\n                info!(\n                    \"BlockAcquisition: registering block executed for: {}\",\n                    *block.hash()\n                );\n                BlockAcquisitionState::Complete(block.clone())\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(());\n            }\n        };\n        self.set_state(new_state);\n        Ok(())\n    }\n\n    /// Register marked complete (all required data stored locally) for this block.\n    pub(super) fn register_marked_complete(\n        &mut self,\n        need_execution_state: bool,\n    ) -> Result<(), BlockAcquisitionError> {\n        if !need_execution_state {\n            return Err(BlockAcquisitionError::InvalidAttemptToMarkComplete);\n        }\n\n        let new_state = match self {\n            BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) => {\n                info!(\n                    \"BlockAcquisition: registering marked complete for: {}\",\n                    *block.hash()\n                );\n                BlockAcquisitionState::Complete(block.clone())\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => {\n                return Ok(());\n            }\n        };\n        self.set_state(new_state);\n        Ok(())\n    }\n\n    fn log_finality_signature_acceptance(\n        &self,\n        maybe_block_hash: &Option<BlockHash>,\n        signer: &PublicKey,\n        acceptance: Option<Acceptance>,\n    ) {\n        match maybe_block_hash {\n            None => {\n                error!(\n                    \"BlockAcquisition: unknown block_hash for finality signature from {}\",\n                    signer\n                );\n            }\n            Some(block_hash) => match acceptance {\n                Some(Acceptance::HadIt) => {\n                    trace!(\n                        \"BlockAcquisition: existing finality signature for {:?} from {}\",\n                        block_hash,\n                        signer\n                    );\n                }\n                Some(Acceptance::NeededIt) => {\n                    debug!(\n                        \"BlockAcquisition: new finality signature for {:?} from {}\",\n                        block_hash, signer\n                    );\n                }\n                None => {\n                    debug!(\n                        \"BlockAcquisition: finality signature for {:?} from {} while not actively \\\n                        trying to acquire finality signatures\",\n                        block_hash, signer\n                    );\n                }\n            },\n        }\n    }\n\n    fn set_state(&mut self, new_state: BlockAcquisitionState) {\n        debug!(\n            \"BlockAcquisition: {} (transitioned from: {})\",\n            new_state, self\n        );\n        *self = new_state;\n    }\n}\n\n// Collect signatures with Vacant state or which are currently missing from\n// the SignatureAcquisition.\npub(super) fn signatures_from_missing_validators(\n    validator_weights: &EraValidatorWeights,\n    signatures: &mut SignatureAcquisition,\n    max_simultaneous_peers: u8,\n    peer_list: &PeerList,\n    rng: &mut NodeRng,\n    era_id: EraId,\n    block_hash: BlockHash,\n) -> BlockAcquisitionAction {\n    let mut missing_signatures_in_random_order: HashSet<PublicKey> = validator_weights\n        .missing_validators(signatures.not_vacant())\n        .cloned()\n        .collect();\n    // If there are too few, retry any in Pending state.\n    if (missing_signatures_in_random_order.len() as u8) < max_simultaneous_peers {\n        missing_signatures_in_random_order.extend(\n            validator_weights\n                .missing_validators(signatures.not_pending())\n                .cloned(),\n        );\n    }\n    BlockAcquisitionAction::finality_signatures(\n        peer_list,\n        rng,\n        era_id,\n        block_hash,\n        missing_signatures_in_random_order.into_iter().collect(),\n    )\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/block_acquisition_action.rs",
    "content": "use std::fmt::{self, Display, Formatter};\nuse tracing::{debug, warn};\n\nuse casper_types::{Block, BlockHash, DeployHash, Digest, EraId, PublicKey, TransactionId};\n\nuse crate::{\n    components::block_synchronizer::{\n        need_next::NeedNext, peer_list::PeerList, signature_acquisition::SignatureAcquisition,\n        BlockAcquisitionError, ExecutionResultsAcquisition, ExecutionResultsChecksum,\n    },\n    types::{BlockExecutionResultsOrChunkId, EraValidatorWeights, ExecutableBlock, NodeId},\n    NodeRng,\n};\n\nuse super::block_acquisition::signatures_from_missing_validators;\n\n#[derive(Debug, PartialEq)]\npub(crate) struct BlockAcquisitionAction {\n    peers_to_ask: Vec<NodeId>,\n    need_next: NeedNext,\n}\n\nimpl Display for BlockAcquisitionAction {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"{} from {} peers\",\n            self.need_next,\n            self.peers_to_ask.len()\n        )\n    }\n}\n\nimpl BlockAcquisitionAction {\n    pub(super) fn need_next(&self) -> NeedNext {\n        self.need_next.clone()\n    }\n\n    pub(super) fn peers_to_ask(&self) -> Vec<NodeId> {\n        self.peers_to_ask.to_vec()\n    }\n\n    pub(super) fn need_nothing(block_hash: BlockHash) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::Nothing(block_hash),\n        }\n    }\n\n    pub(super) fn peers(block_hash: BlockHash) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::Peers(block_hash),\n        }\n    }\n\n    pub(super) fn execution_results_checksum(\n        block_hash: BlockHash,\n        global_state_root_hash: Digest,\n    ) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::ExecutionResultsChecksum(block_hash, global_state_root_hash),\n        }\n    }\n\n    pub(super) fn execution_results(\n        block_hash: BlockHash,\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        next: BlockExecutionResultsOrChunkId,\n        checksum: ExecutionResultsChecksum,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::ExecutionResults(block_hash, next, checksum),\n        }\n    }\n\n    pub(super) fn approvals_hashes(block: &Block, peer_list: &PeerList, rng: &mut NodeRng) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::ApprovalsHashes(*block.hash(), Box::new(block.clone())),\n        }\n    }\n\n    pub(super) fn legacy_deploy_by_hash(\n        block_hash: BlockHash,\n        deploy_hash: DeployHash,\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::DeployByHash(block_hash, deploy_hash),\n        }\n    }\n\n    pub(super) fn transaction_by_id(\n        block_hash: BlockHash,\n        txn_id: TransactionId,\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::TransactionById(block_hash, txn_id),\n        }\n    }\n\n    pub(super) fn global_state(\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        block_hash: BlockHash,\n        root_hash: Digest,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::GlobalState(block_hash, root_hash),\n        }\n    }\n\n    pub(super) fn finality_signatures(\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        block_hash: BlockHash,\n        missing_signatures: Vec<PublicKey>,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n\n        debug!(\n            %era_id,\n            missing_signatures = missing_signatures.len(),\n            peers_to_ask = peers_to_ask.len(),\n            \"BlockSynchronizer: requesting finality signatures\");\n\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::FinalitySignatures(block_hash, era_id, missing_signatures),\n        }\n    }\n\n    pub(super) fn block_body(\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        block_hash: BlockHash,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::BlockBody(block_hash),\n        }\n    }\n\n    pub(super) fn switch_to_have_sufficient_finality(\n        block_hash: BlockHash,\n        block_height: u64,\n    ) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::SwitchToHaveStrictFinality(block_hash, block_height),\n        }\n    }\n\n    pub(super) fn block_marked_complete(block_hash: BlockHash, block_height: u64) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::BlockMarkedComplete(block_hash, block_height),\n        }\n    }\n\n    pub(super) fn make_executable_block(block_hash: BlockHash, block_height: u64) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::MakeExecutableBlock(block_hash, block_height),\n        }\n    }\n\n    pub(super) fn enqueue_block_for_execution(\n        block_hash: &BlockHash,\n        executable_block: Box<ExecutableBlock>,\n    ) -> Self {\n        BlockAcquisitionAction {\n            peers_to_ask: vec![],\n            need_next: NeedNext::EnqueueForExecution(\n                *block_hash,\n                executable_block.height,\n                executable_block,\n            ),\n        }\n    }\n\n    pub(super) fn block_header(\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        block_hash: BlockHash,\n    ) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::BlockHeader(block_hash),\n        }\n    }\n\n    pub(super) fn era_validators(peer_list: &PeerList, rng: &mut NodeRng, era_id: EraId) -> Self {\n        let peers_to_ask = peer_list.qualified_peers(rng);\n        BlockAcquisitionAction {\n            peers_to_ask,\n            need_next: NeedNext::EraValidators(era_id),\n        }\n    }\n\n    pub(super) fn maybe_execution_results(\n        block: &Block,\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        exec_results: &mut ExecutionResultsAcquisition,\n    ) -> Result<Self, BlockAcquisitionError> {\n        match exec_results {\n            ExecutionResultsAcquisition::Needed { .. } => {\n                Ok(BlockAcquisitionAction::execution_results_checksum(\n                    *block.hash(),\n                    *block.state_root_hash(),\n                ))\n            }\n            acq @ ExecutionResultsAcquisition::Pending { .. }\n            | acq @ ExecutionResultsAcquisition::Acquiring { .. } => {\n                match acq.needs_value_or_chunk() {\n                    None => {\n                        warn!(\n                            block_hash = %block.hash(),\n                            \"execution_results_acquisition.needs_value_or_chunk() should never be \\\n                            None for these variants\"\n                        );\n                        Err(BlockAcquisitionError::InvalidAttemptToAcquireExecutionResults)\n                    }\n                    Some((next, checksum)) => Ok(BlockAcquisitionAction::execution_results(\n                        *block.hash(),\n                        peer_list,\n                        rng,\n                        next,\n                        checksum,\n                    )),\n                }\n            }\n            ExecutionResultsAcquisition::Complete { .. } => Ok(\n                BlockAcquisitionAction::approvals_hashes(block, peer_list, rng),\n            ),\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    pub(super) fn next_action_after_transaction_acquisition(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        peer_list: &PeerList,\n        rng: &mut NodeRng,\n        validator_weights: &EraValidatorWeights,\n        signatures: &mut SignatureAcquisition,\n        is_historical: bool,\n        max_simultaneous_peers: u8,\n    ) -> Self {\n        if signatures.has_sufficient_finality(is_historical, true) {\n            BlockAcquisitionAction::switch_to_have_sufficient_finality(block_hash, block_height)\n        } else {\n            signatures_from_missing_validators(\n                validator_weights,\n                signatures,\n                max_simultaneous_peers,\n                peer_list,\n                rng,\n                era_id,\n                block_hash,\n            )\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/block_builder/latch.rs",
    "content": "use datasize::DataSize;\n\nuse tracing::warn;\n\nuse casper_types::{TimeDiff, Timestamp};\n\n#[derive(Debug, Default, DataSize)]\npub(super) struct Latch {\n    #[data_size(skip)]\n    latch: u8,\n    timestamp: Option<Timestamp>,\n}\n\nimpl Latch {\n    pub(super) fn increment(&mut self, increment_by: u8) {\n        match self.latch.checked_add(increment_by) {\n            Some(val) => {\n                self.latch = val;\n                self.touch();\n            }\n            None => {\n                warn!(\"latch increment overflowed.\");\n            }\n        }\n    }\n\n    pub(super) fn decrement(&mut self, decrement_by: u8) {\n        match self.latch.checked_sub(decrement_by) {\n            Some(val) => {\n                self.latch = val;\n            }\n            None => {\n                self.latch = 0;\n            }\n        }\n        self.touch();\n    }\n\n    pub(super) fn unlatch(&mut self) {\n        self.latch = 0;\n        self.timestamp = None;\n    }\n\n    pub(super) fn check_latch(&mut self, interval: TimeDiff, checked: Timestamp) -> bool {\n        match self.timestamp {\n            None => false,\n            Some(timestamp) => {\n                if checked > timestamp + interval {\n                    self.unlatch()\n                }\n                self.count() > 0\n            }\n        }\n    }\n\n    pub(super) fn count(&self) -> u8 {\n        self.latch\n    }\n\n    pub(super) fn touch(&mut self) {\n        self.timestamp = Some(Timestamp::now());\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/block_builder/tests.rs",
    "content": "use std::{collections::BTreeMap, thread, time::Duration};\n\nuse num_rational::Ratio;\n\nuse casper_types::{\n    testing::TestRng, ChainNameDigest, FinalitySignatureV2, TestBlockBuilder, Transaction,\n};\n\nuse crate::components::consensus::tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_KEY};\n\nuse super::*;\n\n#[test]\nfn handle_acceptance_promotes_and_disqualifies_peers() {\n    let mut rng = TestRng::new();\n    let block = TestBlockBuilder::new().build(&mut rng);\n    let mut builder = BlockBuilder::new(\n        *block.hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n\n    let honest_peer = NodeId::random(&mut rng);\n    let dishonest_peer = NodeId::random(&mut rng);\n\n    // Builder acceptance for needed signature from ourselves.\n    assert!(builder\n        .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), true)\n        .is_ok());\n    assert!(builder.peer_list().qualified_peers(&mut rng).is_empty());\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for existent signature from ourselves.\n    assert!(builder\n        .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), true)\n        .is_ok());\n    assert!(builder.peer_list().qualified_peers(&mut rng).is_empty());\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for no signature from ourselves.\n    assert!(builder.handle_acceptance(None, Ok(None), true).is_ok());\n    assert!(builder.peer_list().qualified_peers(&mut rng).is_empty());\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for no signature from a peer.\n    // Peer shouldn't be registered.\n    assert!(builder\n        .handle_acceptance(Some(honest_peer), Ok(None), true)\n        .is_ok());\n    assert!(builder.peer_list().qualified_peers(&mut rng).is_empty());\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for existent signature from a peer.\n    // Peer shouldn't be registered.\n    assert!(builder\n        .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::HadIt)), true)\n        .is_ok());\n    assert!(builder.peer_list().qualified_peers(&mut rng).is_empty());\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for needed signature from a peer.\n    // Peer should be registered as honest.\n    assert!(builder\n        .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::NeededIt)), true)\n        .is_ok());\n    assert!(builder\n        .peer_list()\n        .qualified_peers(&mut rng)\n        .contains(&honest_peer));\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for error on signature handling from ourselves.\n    assert!(builder\n        .handle_acceptance(\n            None,\n            Err(BlockAcquisitionError::InvalidStateTransition),\n            true\n        )\n        .is_err());\n    assert!(builder\n        .peer_list()\n        .qualified_peers(&mut rng)\n        .contains(&honest_peer));\n    assert!(builder.peer_list().dishonest_peers().is_empty());\n    // Builder acceptance for error on signature handling from a peer.\n    // Peer should be registered as dishonest.\n    assert!(builder\n        .handle_acceptance(\n            Some(dishonest_peer),\n            Err(BlockAcquisitionError::InvalidStateTransition),\n            true\n        )\n        .is_err());\n    assert!(builder\n        .peer_list()\n        .qualified_peers(&mut rng)\n        .contains(&honest_peer));\n    assert!(builder\n        .peer_list()\n        .dishonest_peers()\n        .contains(&dishonest_peer));\n}\n\n#[test]\nfn handle_acceptance_unlatches_builder() {\n    let mut rng = TestRng::new();\n    let block = TestBlockBuilder::new().build(&mut rng);\n    let mut builder = BlockBuilder::new(\n        block.header().block_hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n\n    // Check that if a valid element was received, the latch is reset\n    builder.latch_by(2);\n    assert!(builder\n        .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), true)\n        .is_ok());\n    assert_eq!(builder.latch.count(), 0);\n    builder.latch_by(2);\n    assert!(builder\n        .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), false)\n        .is_ok());\n    assert_eq!(builder.latch.count(), 0);\n\n    // Check that if a element that was previously received,\n    // the latch is not decremented since this is a late response\n    builder.latch_by(2);\n    assert!(builder\n        .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), true)\n        .is_ok());\n    assert_eq!(builder.latch.count(), 2);\n    assert!(builder\n        .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), false)\n        .is_ok());\n    assert_eq!(builder.latch.count(), 2);\n\n    // Check that the latch is decremented if a response lead to an error,\n    // but only if the builder was waiting for that element in its current state\n    assert!(builder\n        .handle_acceptance(\n            None,\n            Err(BlockAcquisitionError::InvalidStateTransition),\n            true\n        )\n        .is_err());\n    assert_eq!(builder.latch.count(), 1);\n    assert!(builder\n        .handle_acceptance(\n            None,\n            Err(BlockAcquisitionError::InvalidStateTransition),\n            false\n        )\n        .is_err());\n    assert_eq!(builder.latch.count(), 1);\n\n    // Check that the latch is decremented if a valid response was received that did not produce any\n    // side effect, but only if the builder was waiting for that element in its current state\n    builder.latch_by(1);\n    assert!(builder.handle_acceptance(None, Ok(None), false).is_ok());\n    assert_eq!(builder.latch.count(), 2);\n    assert!(builder.handle_acceptance(None, Ok(None), true).is_ok());\n    assert_eq!(builder.latch.count(), 1);\n}\n\n#[test]\nfn register_era_validator_weights() {\n    let mut rng = TestRng::new();\n    let block = TestBlockBuilder::new().build(&mut rng);\n    let mut builder = BlockBuilder::new(\n        *block.hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n    let latest_timestamp = builder.last_progress;\n\n    let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n\n    thread::sleep(Duration::from_millis(5));\n    // Register default era (0). We have no information in the builder to\n    // determine if these weights are relevant, so they shouldn't be stored.\n    builder.register_era_validator_weights(&validator_matrix);\n    assert!(builder.validator_weights.is_none());\n    assert_eq!(latest_timestamp, builder.last_progress);\n    // Set the era of the builder to 1000.\n    builder.era_id = Some(EraId::from(1000));\n    thread::sleep(Duration::from_millis(5));\n    // Register the default era again. The builder is interested in weights\n    // for era 1000, but the matrix has weights only for era 0, so they\n    // shouldn't be registered.\n    builder.register_era_validator_weights(&validator_matrix);\n    assert!(builder.validator_weights.is_none());\n    assert_eq!(latest_timestamp, builder.last_progress);\n    // Set the era of the builder to the random block's era.\n    builder.era_id = Some(block.era_id());\n    // Add weights for that era to the validator matrix.\n    let weights = EraValidatorWeights::new(\n        block.era_id(),\n        BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]),\n        Ratio::new(1, 3),\n    );\n    validator_matrix.register_era_validator_weights(weights.clone());\n    thread::sleep(Duration::from_millis(5));\n    // Register the random block's era weights. This should store the weights.\n    builder.register_era_validator_weights(&validator_matrix);\n    assert_eq!(builder.validator_weights.unwrap(), weights);\n    assert_ne!(latest_timestamp, builder.last_progress);\n}\n\n#[test]\nfn register_executable_block() {\n    let mut rng = TestRng::new();\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    // Create a random block.\n    let block = TestBlockBuilder::new().build(&mut rng);\n    // Create a builder for the block.\n    let mut builder = BlockBuilder::new(\n        *block.hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n    let mut latest_timestamp = builder.last_progress;\n    // Create mock era weights for the block's era.\n    let weights = EraValidatorWeights::new(\n        block.era_id(),\n        BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]),\n        Ratio::new(1, 3),\n    );\n    // Create a signature acquisition to fill.\n    let mut signature_acquisition = SignatureAcquisition::new(\n        vec![ALICE_PUBLIC_KEY.clone()],\n        LegacyRequiredFinality::Strict,\n    );\n    let sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    assert_eq!(\n        signature_acquisition.apply_signature(sig.into(), &weights),\n        Acceptance::NeededIt\n    );\n    // Set the builder's state to `HaveStrictFinalitySignatures`.\n    let expected_txns = vec![Transaction::random(&mut rng)];\n    let executable_block =\n        ExecutableBlock::from_block_and_transactions(block.clone(), expected_txns.clone());\n    builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures(\n        Box::new(block.clone().into()),\n        signature_acquisition.clone(),\n    );\n\n    // Register the finalized block.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_made_executable_block(executable_block.clone());\n    match &builder.acquisition_state {\n        BlockAcquisitionState::HaveExecutableBlock(actual_block, executable_block, enqueued) => {\n            assert_eq!(actual_block.hash(), block.hash());\n            assert_eq!(expected_txns, *executable_block.transactions);\n            assert!(!enqueued);\n        }\n        _ => panic!(\"Unexpected outcome in registering finalized block\"),\n    }\n    assert!(!builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n    latest_timestamp = builder.last_progress;\n\n    // Make the builder historical.\n    builder.should_fetch_execution_state = true;\n    // Reset the state to `HaveStrictFinalitySignatures`.\n    builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures(\n        Box::new(block.into()),\n        signature_acquisition.clone(),\n    );\n    // Register the finalized block. This should fail on historical builders.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_made_executable_block(executable_block);\n    assert!(builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n}\n\n#[test]\nfn register_block_execution() {\n    let mut rng = TestRng::new();\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    // Create a random block.\n    let block = TestBlockBuilder::new().build(&mut rng);\n    // Create a builder for the block.\n    let mut builder = BlockBuilder::new(\n        *block.hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n    let mut latest_timestamp = builder.last_progress;\n    // Create mock era weights for the block's era.\n    let weights = EraValidatorWeights::new(\n        block.era_id(),\n        BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]),\n        Ratio::new(1, 3),\n    );\n    // Create a signature acquisition to fill.\n    let mut signature_acquisition = SignatureAcquisition::new(\n        vec![ALICE_PUBLIC_KEY.clone()],\n        LegacyRequiredFinality::Strict,\n    );\n    let sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    assert_eq!(\n        signature_acquisition.apply_signature(sig.into(), &weights),\n        Acceptance::NeededIt\n    );\n\n    let executable_block = Box::new(ExecutableBlock::from_block_and_transactions(\n        block.clone(),\n        vec![Transaction::random(&mut rng)],\n    ));\n    builder.acquisition_state =\n        BlockAcquisitionState::HaveExecutableBlock(Box::new(block.into()), executable_block, false);\n\n    assert_eq!(builder.execution_progress, ExecutionProgress::Idle);\n    // Register the block execution enquement as successful. This should\n    // advance the execution progress.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_block_execution_enqueued();\n    assert_eq!(builder.execution_progress, ExecutionProgress::Started);\n    assert!(matches!(\n        builder.acquisition_state,\n        BlockAcquisitionState::HaveExecutableBlock(_, _, true)\n    ));\n    assert!(!builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n    latest_timestamp = builder.last_progress;\n\n    // Attempt to register the block for execution again. The state shouldn't\n    // change and the builder shouldn't fail.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_block_execution_enqueued();\n    assert_eq!(builder.execution_progress, ExecutionProgress::Started);\n    assert!(matches!(\n        builder.acquisition_state,\n        BlockAcquisitionState::HaveExecutableBlock(_, _, true)\n    ));\n    assert!(!builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n    latest_timestamp = builder.last_progress;\n\n    // Make the builder historical.\n    builder.should_fetch_execution_state = true;\n    // Register the block execution enquement as successful. This should put\n    // the builder in a failed state as we shouldn't execute historical blocks.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_block_execution_enqueued();\n    assert!(builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n}\n\n#[test]\nfn register_block_executed() {\n    let mut rng = TestRng::new();\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    // Create a random block.\n    let block = TestBlockBuilder::new().build(&mut rng);\n    // Create a builder for the block.\n    let mut builder = BlockBuilder::new(\n        *block.hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n    let mut latest_timestamp = builder.last_progress;\n    // Create mock era weights for the block's era.\n    let weights = EraValidatorWeights::new(\n        block.era_id(),\n        BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]),\n        Ratio::new(1, 3),\n    );\n    // Create a signature acquisition to fill.\n    let mut signature_acquisition = SignatureAcquisition::new(\n        vec![ALICE_PUBLIC_KEY.clone()],\n        LegacyRequiredFinality::Strict,\n    );\n    let sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    assert_eq!(\n        signature_acquisition.apply_signature(sig.into(), &weights),\n        Acceptance::NeededIt\n    );\n    // Set the builder state to `HaveStrictFinalitySignatures`.\n    builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures(\n        Box::new(block.into()),\n        signature_acquisition,\n    );\n    // Mark execution as started.\n    builder.execution_progress = ExecutionProgress::Started;\n\n    thread::sleep(Duration::from_millis(5));\n    // Register the block as executed. This should advance the execution\n    // progress to `Done`.\n    builder.register_block_executed();\n    assert_eq!(builder.execution_progress, ExecutionProgress::Done);\n    assert!(!builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n    latest_timestamp = builder.last_progress;\n\n    thread::sleep(Duration::from_millis(5));\n    // Register the block as executed again. This should not change the\n    // builder's state.\n    builder.register_block_executed();\n    assert_eq!(builder.execution_progress, ExecutionProgress::Done);\n    assert!(!builder.is_failed());\n    assert_eq!(latest_timestamp, builder.last_progress);\n\n    // Set the builder to be historical and register the block as executed\n    // again. This should put the builder in the failed state.\n    builder.should_fetch_execution_state = true;\n    thread::sleep(Duration::from_millis(5));\n    builder.register_block_executed();\n    assert!(builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n}\n\n#[test]\nfn register_block_marked_complete() {\n    let mut rng = TestRng::new();\n    let chain_name_hash = ChainNameDigest::random(&mut rng);\n    // Create a random block.\n    let block = TestBlockBuilder::new().build(&mut rng);\n    // Create a builder for the block.\n    let mut builder = BlockBuilder::new(\n        *block.hash(),\n        false,\n        1,\n        TimeDiff::from_seconds(1),\n        LegacyRequiredFinality::Strict,\n        ProtocolVersion::V1_0_0,\n    );\n    // Make the builder historical.\n    builder.should_fetch_execution_state = true;\n    let mut latest_timestamp = builder.last_progress;\n    // Create mock era weights for the block's era.\n    let weights = EraValidatorWeights::new(\n        block.era_id(),\n        BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]),\n        Ratio::new(1, 3),\n    );\n    // Create a signature acquisition to fill.\n    let mut signature_acquisition = SignatureAcquisition::new(\n        vec![ALICE_PUBLIC_KEY.clone()],\n        LegacyRequiredFinality::Strict,\n    );\n    let sig = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        &ALICE_SECRET_KEY,\n    );\n    assert_eq!(\n        signature_acquisition.apply_signature(sig.into(), &weights),\n        Acceptance::NeededIt\n    );\n\n    // Set the builder state to `HaveStrictFinalitySignatures`.\n    builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures(\n        Box::new(block.clone().into()),\n        signature_acquisition.clone(),\n    );\n    // Register the block as marked complete. Since there are no missing\n    // deploys, this should transition the builder state to\n    // `HaveStrictFinalitySignatures`.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_marked_complete();\n    assert!(matches!(\n        builder.acquisition_state,\n        BlockAcquisitionState::Complete(..)\n    ));\n    assert!(!builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n    latest_timestamp = builder.last_progress;\n\n    // Make this a forward builder.\n    builder.should_fetch_execution_state = false;\n    // Set the builder state to `HaveStrictFinalitySignatures`.\n    builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures(\n        Box::new(block.into()),\n        signature_acquisition.clone(),\n    );\n    // Register the block as marked complete. In the forward flow we should\n    // abort the builder as an attempt to mark the block complete is invalid.\n    thread::sleep(Duration::from_millis(5));\n    builder.register_marked_complete();\n    assert!(builder.is_failed());\n    assert_ne!(latest_timestamp, builder.last_progress);\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/block_builder.rs",
    "content": "mod latch;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::HashMap,\n    fmt::{Display, Formatter},\n    time::Instant,\n};\n\nuse datasize::DataSize;\nuse tracing::{debug, error, trace, warn};\n\nuse casper_storage::block_store::types::ApprovalsHashes;\nuse casper_types::{\n    execution::ExecutionResult, Block, BlockHash, BlockHeader, BlockSignatures, Digest, EraId,\n    FinalitySignature, LegacyRequiredFinality, ProtocolVersion, PublicKey, TimeDiff, Timestamp,\n    TransactionHash, TransactionId,\n};\n\nuse super::{\n    block_acquisition::{Acceptance, BlockAcquisitionState, RegisterExecResultsOutcome},\n    block_acquisition_action::BlockAcquisitionAction,\n    execution_results_acquisition::{self, ExecutionResultsChecksum},\n    peer_list::{PeerList, PeersStatus},\n    signature_acquisition::SignatureAcquisition,\n    BlockAcquisitionError,\n};\nuse crate::{\n    components::block_synchronizer::block_builder::latch::Latch,\n    types::{\n        BlockExecutionResultsOrChunk, EraValidatorWeights, ExecutableBlock, NodeId, ValidatorMatrix,\n    },\n    NodeRng,\n};\n\n#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)]\npub(super) enum Error {\n    BlockAcquisition(BlockAcquisitionError),\n    MissingValidatorWeights(BlockHash),\n}\n\nimpl Display for Error {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Error::BlockAcquisition(err) => write!(f, \"block acquisition error: {}\", err),\n            Error::MissingValidatorWeights(block_hash) => {\n                write!(f, \"missing validator weights for: {}\", block_hash)\n            }\n        }\n    }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, DataSize)]\nenum ExecutionProgress {\n    Idle,\n    Started,\n    Done,\n}\n\nimpl ExecutionProgress {\n    fn start(self) -> Option<Self> {\n        match self {\n            Self::Idle => Some(Self::Started),\n            _ => None,\n        }\n    }\n\n    fn finish(self) -> Option<Self> {\n        match self {\n            Self::Started => Some(Self::Done),\n            _ => None,\n        }\n    }\n}\n\n#[derive(DataSize, Debug)]\npub(super) struct BlockBuilder {\n    // imputed\n    block_hash: BlockHash,\n    should_fetch_execution_state: bool,\n    strict_finality_protocol_version: ProtocolVersion,\n    peer_list: PeerList,\n\n    // progress tracking\n    sync_start: Instant,\n    execution_progress: ExecutionProgress,\n    last_progress: Timestamp,\n    latch: Latch,\n\n    // acquired state\n    acquisition_state: BlockAcquisitionState,\n    era_id: Option<EraId>,\n    validator_weights: Option<EraValidatorWeights>,\n}\n\nimpl Display for BlockBuilder {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"is_historical: {:?}, has_validators: {:?}, block builder: {}\",\n            self.should_fetch_execution_state,\n            self.validator_weights.is_some(),\n            self.acquisition_state\n        )\n    }\n}\n\nimpl BlockBuilder {\n    pub(super) fn new(\n        block_hash: BlockHash,\n        should_fetch_execution_state: bool,\n        max_simultaneous_peers: u8,\n        peer_refresh_interval: TimeDiff,\n        legacy_required_finality: LegacyRequiredFinality,\n        strict_finality_protocol_version: ProtocolVersion,\n    ) -> Self {\n        BlockBuilder {\n            block_hash,\n            era_id: None,\n            validator_weights: None,\n            acquisition_state: BlockAcquisitionState::Initialized(\n                block_hash,\n                SignatureAcquisition::new(vec![], legacy_required_finality),\n            ),\n            peer_list: PeerList::new(max_simultaneous_peers, peer_refresh_interval),\n            should_fetch_execution_state,\n            strict_finality_protocol_version,\n            sync_start: Instant::now(),\n            execution_progress: ExecutionProgress::Idle,\n            last_progress: Timestamp::now(),\n            latch: Latch::default(),\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    pub(super) fn new_from_sync_leap(\n        block_header: BlockHeader,\n        maybe_sigs: Option<&BlockSignatures>,\n        validator_weights: EraValidatorWeights,\n        peers: Vec<NodeId>,\n        should_fetch_execution_state: bool,\n        max_simultaneous_peers: u8,\n        peer_refresh_interval: TimeDiff,\n        legacy_required_finality: LegacyRequiredFinality,\n        strict_finality_protocol_version: ProtocolVersion,\n    ) -> Self {\n        let block_hash = block_header.block_hash();\n        let era_id = Some(block_header.era_id());\n        let mut signature_acquisition = SignatureAcquisition::new(\n            validator_weights.validator_public_keys().cloned().collect(),\n            legacy_required_finality,\n        );\n        if let Some(signatures) = maybe_sigs {\n            for finality_signature in signatures.finality_signatures() {\n                let _ =\n                    signature_acquisition.apply_signature(finality_signature, &validator_weights);\n            }\n        }\n        let acquisition_state = BlockAcquisitionState::HaveWeakFinalitySignatures(\n            Box::new(block_header),\n            signature_acquisition,\n        );\n        let mut peer_list = PeerList::new(max_simultaneous_peers, peer_refresh_interval);\n        peers.iter().for_each(|p| peer_list.register_peer(*p));\n\n        BlockBuilder {\n            block_hash,\n            era_id,\n            validator_weights: Some(validator_weights),\n            acquisition_state,\n            peer_list,\n            should_fetch_execution_state,\n            strict_finality_protocol_version,\n            sync_start: Instant::now(),\n            execution_progress: ExecutionProgress::Idle,\n            last_progress: Timestamp::now(),\n            latch: Latch::default(),\n        }\n    }\n\n    pub(super) fn abort(&mut self) {\n        self.acquisition_state =\n            BlockAcquisitionState::Failed(self.block_hash, self.block_height());\n        self.flush_peers();\n        self.touch();\n    }\n\n    pub(crate) fn block_acquisition_state(&self) -> &BlockAcquisitionState {\n        &self.acquisition_state\n    }\n\n    #[cfg(test)]\n    pub(crate) fn set_block_acquisition_state(&mut self, state: BlockAcquisitionState) {\n        self.acquisition_state = state\n    }\n\n    pub(super) fn block_hash(&self) -> BlockHash {\n        self.block_hash\n    }\n\n    pub(super) fn maybe_block(&self) -> Option<Box<Block>> {\n        self.acquisition_state.maybe_block()\n    }\n\n    pub(super) fn block_height(&self) -> Option<u64> {\n        self.acquisition_state.block_height()\n    }\n\n    pub(super) fn block_height_and_era(&self) -> Option<(u64, EraId)> {\n        if let Some(block_height) = self.acquisition_state.block_height() {\n            if let Some(evw) = &self.validator_weights {\n                return Some((block_height, evw.era_id()));\n            }\n        }\n        None\n    }\n\n    pub(super) fn should_fetch_execution_state(&self) -> bool {\n        self.should_fetch_execution_state\n    }\n\n    pub(super) fn sync_start_time(&self) -> Instant {\n        self.sync_start\n    }\n\n    pub(super) fn last_progress_time(&self) -> Timestamp {\n        self.last_progress\n    }\n\n    #[cfg(test)]\n    pub fn latched(&self) -> bool {\n        self.latch.count() > 0\n    }\n\n    #[cfg(test)]\n    pub fn latch_count(&self) -> u8 {\n        self.latch.count()\n    }\n\n    pub(super) fn check_latch(&mut self, interval: TimeDiff) -> bool {\n        self.latch.check_latch(interval, Timestamp::now())\n    }\n\n    /// Increments the latch counter by 1.\n    pub(super) fn latch(&mut self) {\n        self.latch.increment(1);\n    }\n\n    pub(super) fn latch_by(&mut self, count: usize) {\n        self.latch.increment(count as u8);\n    }\n\n    /// Decrements the latch counter.\n    pub(super) fn latch_decrement(&mut self) {\n        self.latch.decrement(1);\n    }\n\n    pub(super) fn is_failed(&self) -> bool {\n        matches!(self.acquisition_state, BlockAcquisitionState::Failed(_, _))\n    }\n\n    pub(super) fn is_finished(&self) -> bool {\n        match self.acquisition_state {\n            BlockAcquisitionState::Initialized(_, _)\n            | BlockAcquisitionState::HaveBlockHeader(_, _)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(_, _)\n            | BlockAcquisitionState::HaveBlock(_, _, _)\n            | BlockAcquisitionState::HaveGlobalState(_, _, _, _)\n            | BlockAcquisitionState::HaveAllExecutionResults(_, _, _, _)\n            | BlockAcquisitionState::HaveApprovalsHashes(_, _, _)\n            | BlockAcquisitionState::HaveAllTransactions(_, _)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(_, _)\n            | BlockAcquisitionState::HaveExecutableBlock(_, _, _)\n            | BlockAcquisitionState::Failed(_, _) => false,\n            BlockAcquisitionState::Complete(_) => true,\n        }\n    }\n\n    pub(super) fn is_executing(&self) -> bool {\n        matches!(self.execution_progress, ExecutionProgress::Started)\n    }\n\n    pub(super) fn execution_unattempted(&self) -> bool {\n        matches!(self.execution_progress, ExecutionProgress::Idle)\n    }\n\n    pub(super) fn register_block_execution_enqueued(&mut self) {\n        if self.should_fetch_execution_state {\n            let block_hash = self.block_hash();\n            error!(%block_hash, \"invalid attempt to enqueue historical block for execution\");\n            self.abort();\n            return;\n        }\n\n        if let Err(error) = self.acquisition_state.register_block_execution_enqueued() {\n            error!(%error, \"register block execution enqueued failed\");\n            self.abort()\n        } else {\n            self.touch();\n        }\n\n        match self.execution_progress.start() {\n            None => {\n                let block_hash = self.block_hash();\n                warn!(%block_hash, \"invalid attempt to start block execution\");\n            }\n            Some(executing_progress) => {\n                self.touch();\n                self.execution_progress = executing_progress;\n            }\n        }\n    }\n\n    pub(super) fn register_made_executable_block(&mut self, executable_block: ExecutableBlock) {\n        if let Err(error) = self\n            .acquisition_state\n            .register_made_finalized_block(self.should_fetch_execution_state, executable_block)\n        {\n            error!(%error, \"register finalized block failed\");\n            self.abort()\n        } else {\n            self.touch();\n        }\n    }\n\n    pub(super) fn register_block_executed(&mut self) {\n        if let Err(error) = self\n            .acquisition_state\n            .register_block_executed(self.should_fetch_execution_state)\n        {\n            error!(%error, \"register block executed failed\");\n            self.abort()\n        } else {\n            if self.should_fetch_execution_state {\n                let block_hash = self.block_hash();\n                error!(%block_hash, \"invalid attempt to finish block execution on historical block\");\n                self.abort();\n            }\n\n            match self.execution_progress.finish() {\n                None => {\n                    let block_hash = self.block_hash();\n                    warn!(%block_hash, \"invalid attempt to finish block execution\");\n                }\n                Some(executing_progress) => {\n                    self.touch();\n                    self.execution_progress = executing_progress;\n                }\n            }\n        }\n    }\n\n    pub(super) fn register_marked_complete(&mut self) {\n        if let Err(error) = self\n            .acquisition_state\n            .register_marked_complete(self.should_fetch_execution_state)\n        {\n            error!(%error, \"register marked complete failed\");\n            self.abort()\n        } else {\n            self.touch();\n        }\n    }\n\n    pub(super) fn dishonest_peers(&self) -> Vec<NodeId> {\n        self.peer_list.dishonest_peers()\n    }\n\n    pub(super) fn disqualify_peer(&mut self, peer: NodeId) {\n        debug!(?peer, \"disqualify_peer\");\n        self.peer_list.disqualify_peer(peer);\n    }\n\n    pub(super) fn promote_peer(&mut self, peer: NodeId) {\n        self.peer_list.promote_peer(peer);\n    }\n\n    pub(super) fn demote_peer(&mut self, peer: NodeId) {\n        self.peer_list.demote_peer(peer);\n    }\n\n    pub(super) fn flush_dishonest_peers(&mut self) {\n        self.peer_list.flush_dishonest_peers();\n    }\n\n    pub(super) fn block_acquisition_action(\n        &mut self,\n        rng: &mut NodeRng,\n        max_simultaneous_peers: u8,\n    ) -> BlockAcquisitionAction {\n        match self.peer_list.need_peers() {\n            PeersStatus::Sufficient => {\n                trace!(\n                    \"BlockBuilder: sufficient peers for block_hash {}\",\n                    self.block_hash\n                );\n            }\n            PeersStatus::Insufficient => {\n                debug!(\n                    \"BlockBuilder: insufficient peers for block_hash {}\",\n                    self.block_hash\n                );\n                return BlockAcquisitionAction::peers(self.block_hash);\n            }\n            PeersStatus::Stale => {\n                debug!(\"BlockBuilder: refreshing peers for {}\", self.block_hash);\n                return BlockAcquisitionAction::peers(self.block_hash);\n            }\n        }\n        let era_id = match self.era_id {\n            None => {\n                // if we don't have the era_id, we only have block_hash, thus get block_header\n                return BlockAcquisitionAction::block_header(&self.peer_list, rng, self.block_hash);\n            }\n            Some(era_id) => era_id,\n        };\n        let validator_weights = match &self.validator_weights {\n            None => {\n                return BlockAcquisitionAction::era_validators(&self.peer_list, rng, era_id);\n            }\n            Some(validator_weights) => {\n                if validator_weights.is_empty() {\n                    return BlockAcquisitionAction::era_validators(&self.peer_list, rng, era_id);\n                }\n                validator_weights\n            }\n        };\n        match self.acquisition_state.next_action(\n            &self.peer_list,\n            validator_weights,\n            rng,\n            self.should_fetch_execution_state,\n            max_simultaneous_peers,\n        ) {\n            Ok(ret) => ret,\n            Err(err) => {\n                error!(%err, \"BlockBuilder: attempt to determine next action resulted in error.\");\n                self.abort();\n                BlockAcquisitionAction::need_nothing(self.block_hash)\n            }\n        }\n    }\n\n    pub(super) fn register_era_validator_weights(&mut self, validator_matrix: &ValidatorMatrix) {\n        if self.validator_weights.is_some() || self.era_id.is_none() {\n            return;\n        }\n\n        if let Some(era_id) = self.era_id {\n            if let Some(evw) = validator_matrix.validator_weights(era_id) {\n                self.validator_weights = Some(evw);\n                self.touch();\n            }\n        }\n    }\n\n    pub(super) fn waiting_for_block_header(&self) -> bool {\n        match &self.acquisition_state {\n            BlockAcquisitionState::Initialized(..) => true,\n            BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => false,\n        }\n    }\n\n    pub(super) fn register_block_header(\n        &mut self,\n        block_header: BlockHeader,\n        maybe_peer: Option<NodeId>,\n    ) -> Result<(), Error> {\n        let was_waiting_for_block_header = self.waiting_for_block_header();\n\n        let era_id = block_header.era_id();\n        let acceptance = self.acquisition_state.register_block_header(\n            block_header,\n            self.strict_finality_protocol_version,\n            self.should_fetch_execution_state,\n        );\n        self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_block_header)?;\n        self.era_id = Some(era_id);\n        Ok(())\n    }\n\n    pub(super) fn waiting_for_block(&self) -> bool {\n        match &self.acquisition_state {\n            BlockAcquisitionState::HaveWeakFinalitySignatures(..) => true,\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => false,\n        }\n    }\n\n    pub(super) fn register_block(\n        &mut self,\n        block: Block,\n        maybe_peer: Option<NodeId>,\n    ) -> Result<(), Error> {\n        let was_waiting_for_block = self.waiting_for_block();\n        let acceptance = self\n            .acquisition_state\n            .register_block(block, self.should_fetch_execution_state);\n        self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_block)\n    }\n\n    pub(super) fn waiting_for_approvals_hashes(&self) -> bool {\n        match &self.acquisition_state {\n            BlockAcquisitionState::HaveBlock(..) if !self.should_fetch_execution_state => true,\n            BlockAcquisitionState::HaveAllExecutionResults(..)\n                if self.should_fetch_execution_state =>\n            {\n                true\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => false,\n        }\n    }\n\n    pub(super) fn register_approvals_hashes(\n        &mut self,\n        approvals_hashes: &ApprovalsHashes,\n        maybe_peer: Option<NodeId>,\n    ) -> Result<(), Error> {\n        let was_waiting_for_approvals_hashes = self.waiting_for_approvals_hashes();\n        let acceptance = self\n            .acquisition_state\n            .register_approvals_hashes(approvals_hashes, self.should_fetch_execution_state);\n        self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_approvals_hashes)\n    }\n\n    pub(super) fn register_finality_signature_pending(&mut self, validator: PublicKey) {\n        self.acquisition_state\n            .register_finality_signature_pending(validator);\n    }\n\n    pub(super) fn switch_to_have_strict_finality(\n        &mut self,\n        block_hash: BlockHash,\n    ) -> Result<(), Error> {\n        match self\n            .acquisition_state\n            .switch_to_have_strict_finality(block_hash, self.should_fetch_execution_state)\n        {\n            Ok(()) => {\n                self.touch();\n                Ok(())\n            }\n            Err(error) => {\n                self.abort();\n                Err(Error::BlockAcquisition(error))\n            }\n        }\n    }\n\n    pub(super) fn waiting_for_signatures(&self) -> bool {\n        self.acquisition_state\n            .actively_acquiring_signatures(self.should_fetch_execution_state)\n    }\n\n    pub(super) fn register_finality_signature(\n        &mut self,\n        finality_signature: FinalitySignature,\n        maybe_peer: Option<NodeId>,\n    ) -> Result<(), Error> {\n        let was_waiting_for_sigs = self.waiting_for_signatures();\n        let validator_weights = self\n            .validator_weights\n            .as_ref()\n            .ok_or(Error::MissingValidatorWeights(self.block_hash))?;\n        let acceptance = self.acquisition_state.register_finality_signature(\n            finality_signature,\n            validator_weights,\n            self.should_fetch_execution_state,\n        );\n        self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_sigs)\n    }\n\n    pub(super) fn register_global_state(&mut self, global_state: Digest) -> Result<(), Error> {\n        if let Err(error) = self\n            .acquisition_state\n            .register_global_state(global_state, self.should_fetch_execution_state)\n        {\n            return Err(Error::BlockAcquisition(error));\n        }\n        self.touch();\n        Ok(())\n    }\n\n    pub(super) fn register_execution_results_checksum(\n        &mut self,\n        execution_results_checksum: ExecutionResultsChecksum,\n    ) -> Result<(), Error> {\n        debug!(block_hash=%self.block_hash, \"register_execution_results_checksum\");\n        if let Err(err) = self.acquisition_state.register_execution_results_checksum(\n            execution_results_checksum,\n            self.should_fetch_execution_state,\n        ) {\n            debug!(block_hash=%self.block_hash, %err, \"register_execution_results_checksum: Error::BlockAcquisition\");\n            return Err(Error::BlockAcquisition(err));\n        }\n        self.touch();\n        Ok(())\n    }\n\n    pub(super) fn waiting_for_execution_results(&self) -> bool {\n        match &self.acquisition_state {\n            BlockAcquisitionState::HaveGlobalState(..) if self.should_fetch_execution_state => true,\n            BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveApprovalsHashes(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => false,\n        }\n    }\n\n    pub(super) fn register_fetched_execution_results(\n        &mut self,\n        maybe_peer: Option<NodeId>,\n        block_execution_results_or_chunk: BlockExecutionResultsOrChunk,\n    ) -> Result<Option<HashMap<TransactionHash, ExecutionResult>>, Error> {\n        debug!(block_hash=%self.block_hash, \"register_fetched_execution_results\");\n        let was_waiting_for_execution_results = self.waiting_for_execution_results();\n        match self.acquisition_state.register_execution_results_or_chunk(\n            block_execution_results_or_chunk,\n            self.should_fetch_execution_state,\n        ) {\n            Ok(RegisterExecResultsOutcome {\n                exec_results,\n                acceptance,\n            }) => {\n                debug!(\n                    ?acceptance,\n                    \"register_fetched_execution_results: Ok(RegisterExecResultsOutcome)\"\n                );\n                self.handle_acceptance(\n                    maybe_peer,\n                    Ok(acceptance),\n                    was_waiting_for_execution_results,\n                )?;\n                Ok(exec_results)\n            }\n            Err(BlockAcquisitionError::ExecutionResults(error)) => {\n                match error {\n                    // late response - not considered an error\n                    execution_results_acquisition::Error::AttemptToApplyDataAfterCompleted { .. } => {\n                        debug!(%error, \"late block_execution_results_or_chunk response\");\n                        return Ok(None);\n                    }\n                    // programmer error\n                    execution_results_acquisition::Error::BlockHashMismatch { .. }\n                    | execution_results_acquisition::Error::InvalidAttemptToApplyChecksum { .. }\n                    | execution_results_acquisition::Error::AttemptToApplyDataWhenMissingChecksum { .. }\n                    | execution_results_acquisition::Error::InvalidOutcomeFromApplyingChunk { .. }\n                    => {\n                        if was_waiting_for_execution_results {\n                            self.latch_decrement();\n                        }\n                        debug!(\n                            \"register_fetched_execution_results: BlockHashMismatch | \\\n                            InvalidAttemptToApplyChecksum | AttemptToApplyDataWhenMissingChecksum \\\n                            | InvalidOutcomeFromApplyingChunk\"\n                        );\n                    },\n                    // malicious peer if checksum is available.\n                    execution_results_acquisition::Error::ChunkCountMismatch { .. } => {\n                        let is_checkable = match &self.acquisition_state {\n                            BlockAcquisitionState::HaveGlobalState(\n                                _,\n                                _,\n                                _,\n                                execution_results_acquisition,\n                            ) => execution_results_acquisition.is_checkable(),\n                            _ => false,\n                        };\n                        debug!(is_checkable, \"register_fetched_execution_results: ChunkCountMismatch\");\n                        if is_checkable {\n                            if let Some(peer) = maybe_peer {\n                                self.disqualify_peer(peer);\n                            }\n                        }\n                        if was_waiting_for_execution_results {\n                            self.latch_decrement();\n                        }\n                    }\n                    // malicious peer\n                    execution_results_acquisition::Error::InvalidChunkCount { .. }\n                    | execution_results_acquisition::Error::ChecksumMismatch { .. }\n                    | execution_results_acquisition::Error::FailedToDeserialize { .. }\n                    | execution_results_acquisition::Error::ExecutionResultToTransactionHashLengthDiscrepancy { .. } => {\n                        debug!(\"register_fetched_execution_results: InvalidChunkCount | ChecksumMismatch | FailedToDeserialize | ExecutionResultToTransactionHashLengthDiscrepancy\");\n                        if let Some(peer) = maybe_peer {\n                            self.disqualify_peer(peer);\n                        }\n                        if was_waiting_for_execution_results {\n                            self.latch_decrement();\n                        }\n                    }\n                    // checksum unavailable, so unknown if this peer is malicious\n                    execution_results_acquisition::Error::ChunksWithDifferentChecksum { .. } => {\n                        debug!(\"register_fetched_execution_results: ChunksWithDifferentChecksum\");\n                        if was_waiting_for_execution_results {\n                            self.latch_decrement();\n                        }\n                    }\n                }\n                Err(Error::BlockAcquisition(\n                    BlockAcquisitionError::ExecutionResults(error),\n                ))\n            }\n            Err(error) => {\n                error!(%error, \"unexpected error\");\n                Ok(None)\n            }\n        }\n    }\n\n    pub(super) fn register_execution_results_stored_notification(&mut self) -> Result<(), Error> {\n        debug!(block_hash=%self.block_hash, \"register_execution_results_stored_notification\");\n        if let Err(err) = self\n            .acquisition_state\n            .register_execution_results_stored_notification(self.should_fetch_execution_state)\n        {\n            debug!(block_hash=%self.block_hash, \"register_execution_results_stored_notification: abort\");\n            self.abort();\n            return Err(Error::BlockAcquisition(err));\n        }\n        self.touch();\n        Ok(())\n    }\n\n    pub(super) fn waiting_for_transactions(&self) -> bool {\n        match &self.acquisition_state {\n            BlockAcquisitionState::HaveApprovalsHashes(_, _, transactions) => {\n                transactions.needs_transaction()\n            }\n            BlockAcquisitionState::HaveAllExecutionResults(_, _, transactions, checksum)\n                if self.should_fetch_execution_state =>\n            {\n                if !checksum.is_checkable() {\n                    transactions.needs_transaction()\n                } else {\n                    false\n                }\n            }\n            BlockAcquisitionState::Initialized(..)\n            | BlockAcquisitionState::HaveBlockHeader(..)\n            | BlockAcquisitionState::HaveWeakFinalitySignatures(..)\n            | BlockAcquisitionState::HaveBlock(..)\n            | BlockAcquisitionState::HaveGlobalState(..)\n            | BlockAcquisitionState::HaveAllExecutionResults(..)\n            | BlockAcquisitionState::HaveAllTransactions(..)\n            | BlockAcquisitionState::HaveStrictFinalitySignatures(..)\n            | BlockAcquisitionState::HaveExecutableBlock(..)\n            | BlockAcquisitionState::Failed(..)\n            | BlockAcquisitionState::Complete(..) => false,\n        }\n    }\n\n    pub(super) fn register_transaction(\n        &mut self,\n        txn_id: TransactionId,\n        maybe_peer: Option<NodeId>,\n    ) -> Result<(), Error> {\n        let was_waiting_for_transactions = self.waiting_for_transactions();\n        let acceptance = self\n            .acquisition_state\n            .register_transaction(txn_id, self.should_fetch_execution_state);\n        self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_transactions)\n    }\n\n    pub(super) fn register_peers(&mut self, peers: Vec<NodeId>) {\n        if peers.is_empty() {\n            // We asked for peers but none were provided. Exit early without\n            // clearing the latch so that we don't ask again needlessly.\n            trace!(\"BlockSynchronizer: no peers available\");\n            return;\n        }\n        if !(self.is_finished() || self.is_failed()) {\n            peers\n                .into_iter()\n                .for_each(|peer| self.peer_list.register_peer(peer));\n        }\n        self.touch();\n    }\n\n    fn handle_acceptance(\n        &mut self,\n        maybe_peer: Option<NodeId>,\n        acceptance: Result<Option<Acceptance>, BlockAcquisitionError>,\n        should_unlatch: bool,\n    ) -> Result<(), Error> {\n        match acceptance {\n            Ok(Some(Acceptance::NeededIt)) => {\n                // Got a useful response. Unlatch in all cases since we want to get the next item.\n                self.touch();\n                if let Some(peer) = maybe_peer {\n                    self.promote_peer(peer);\n                }\n            }\n            Ok(Some(Acceptance::HadIt)) => {\n                // Already had this item, which means that this was a late response for a previous\n                // fetch. We don't unlatch in this case and wait for a valid response.\n            }\n            Ok(None) => {\n                if should_unlatch {\n                    self.latch_decrement();\n                }\n            }\n            Err(error) => {\n                if let Some(peer) = maybe_peer {\n                    self.disqualify_peer(peer);\n                }\n\n                // If we were waiting for a response and the item was not good,\n                // decrement latch. Fetch will be retried when unlatched.\n                if should_unlatch {\n                    self.latch_decrement();\n                }\n\n                return Err(Error::BlockAcquisition(error));\n            }\n        }\n        Ok(())\n    }\n\n    fn flush_peers(&mut self) {\n        self.peer_list.flush();\n    }\n\n    fn touch(&mut self) {\n        self.last_progress = Timestamp::now();\n        self.latch.unlatch();\n    }\n\n    pub(crate) fn peer_list(&self) -> &PeerList {\n        &self.peer_list\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/block_synchronizer_progress.rs",
    "content": "use std::fmt::{Display, Formatter};\n\nuse casper_types::{BlockHash, EraId, Timestamp};\n\n#[derive(Debug)]\npub(crate) enum BlockSynchronizerProgress {\n    Idle,\n    Syncing(BlockHash, Option<u64>, Timestamp),\n    Executing(BlockHash, u64, EraId),\n    Synced(BlockHash, u64, EraId),\n}\n\nimpl BlockSynchronizerProgress {\n    pub(crate) fn is_active(&self) -> bool {\n        match self {\n            BlockSynchronizerProgress::Idle | BlockSynchronizerProgress::Synced(_, _, _) => false,\n            BlockSynchronizerProgress::Syncing(_, _, _)\n            | BlockSynchronizerProgress::Executing(_, _, _) => true,\n        }\n    }\n}\n\nimpl Display for BlockSynchronizerProgress {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        let display_height = |f: &mut Formatter<'_>, maybe_height: &Option<u64>| match maybe_height\n        {\n            Some(height) => write!(f, \"block {height}\"),\n            None => write!(f, \"unknown block height\"),\n        };\n        match self {\n            BlockSynchronizerProgress::Idle => write!(f, \"block synchronizer idle\"),\n            BlockSynchronizerProgress::Syncing(block_hash, block_height, timestamp) => {\n                write!(f, \"block synchronizer syncing \")?;\n                display_height(f, block_height)?;\n                write!(f, \"{}, {}\", timestamp, block_hash)\n            }\n            BlockSynchronizerProgress::Executing(block_hash, block_height, era_id) => {\n                write!(\n                    f,\n                    \"block synchronizer executing block {}, {}, {}\",\n                    block_height, block_hash, era_id\n                )\n            }\n            BlockSynchronizerProgress::Synced(block_hash, block_height, era_id) => {\n                write!(\n                    f,\n                    \"block synchronizer synced block {}, {}, {}\",\n                    block_height, block_hash, era_id\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/config.rs",
    "content": "use std::str::FromStr;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::TimeDiff;\n\nconst DEFAULT_MAX_PARALLEL_TRIE_FETCHES: u32 = 5000;\nconst DEFAULT_PEER_REFRESH_INTERVAL: &str = \"90sec\";\nconst DEFAULT_NEED_NEXT_INTERVAL: &str = \"1sec\";\nconst DEFAULT_DISCONNECT_DISHONEST_PEERS_INTERVAL: &str = \"10sec\";\nconst DEFAULT_LATCH_RESET_INTERVAL: &str = \"5sec\";\n\n/// Configuration options for fetching.\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\npub struct Config {\n    /// Maximum number of trie nodes to fetch in parallel.\n    pub max_parallel_trie_fetches: u32,\n    /// Time interval for the node to ask for refreshed peers.\n    pub peer_refresh_interval: TimeDiff,\n    /// Time interval for the node to check what the block synchronizer needs to acquire next.\n    pub need_next_interval: TimeDiff,\n    /// Time interval for recurring disconnection of dishonest peers.\n    pub disconnect_dishonest_peers_interval: TimeDiff,\n    /// Time interval for resetting the latch in block builders.\n    pub latch_reset_interval: TimeDiff,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            max_parallel_trie_fetches: DEFAULT_MAX_PARALLEL_TRIE_FETCHES,\n            peer_refresh_interval: TimeDiff::from_str(DEFAULT_PEER_REFRESH_INTERVAL).unwrap(),\n            need_next_interval: TimeDiff::from_str(DEFAULT_NEED_NEXT_INTERVAL).unwrap(),\n            disconnect_dishonest_peers_interval: TimeDiff::from_str(\n                DEFAULT_DISCONNECT_DISHONEST_PEERS_INTERVAL,\n            )\n            .unwrap(),\n            latch_reset_interval: TimeDiff::from_str(DEFAULT_LATCH_RESET_INTERVAL).unwrap(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/error.rs",
    "content": "use std::fmt::{Display, Formatter};\n\nuse datasize::DataSize;\nuse derive_more::From;\n\nuse casper_types::{Digest, TransactionHash, TransactionId};\n\nuse super::transaction_acquisition;\n\nuse casper_types::BlockHash;\n\n#[derive(Clone, Copy, From, PartialEq, Eq, DataSize, Debug)]\npub(crate) enum BlockAcquisitionError {\n    InvalidStateTransition,\n    BlockHashMismatch {\n        expected: BlockHash,\n        actual: BlockHash,\n    },\n    RootHashMismatch {\n        expected: Digest,\n        actual: Digest,\n    },\n    InvalidAttemptToAcquireExecutionResults,\n    #[from]\n    InvalidAttemptToApplyApprovalsHashes(transaction_acquisition::Error),\n    InvalidAttemptToApplyTransaction {\n        txn_id: TransactionId,\n    },\n    MissingApprovalsHashes(TransactionHash),\n    InvalidAttemptToMarkComplete,\n    InvalidAttemptToEnqueueBlockForExecution,\n    ExecutionResults(super::execution_results_acquisition::Error),\n    InvalidTransactionType,\n}\n\nimpl Display for BlockAcquisitionError {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            BlockAcquisitionError::InvalidStateTransition => write!(f, \"invalid state transition\"),\n            BlockAcquisitionError::InvalidAttemptToMarkComplete => {\n                write!(f, \"invalid attempt to mark complete\")\n            }\n            BlockAcquisitionError::InvalidAttemptToAcquireExecutionResults => {\n                write!(\n                    f,\n                    \"invalid attempt to acquire execution results while in a terminal state\"\n                )\n            }\n            BlockAcquisitionError::BlockHashMismatch { expected, actual } => {\n                write!(\n                    f,\n                    \"block hash mismatch: expected {} actual: {}\",\n                    expected, actual\n                )\n            }\n            BlockAcquisitionError::RootHashMismatch { expected, actual } => write!(\n                f,\n                \"root hash mismatch: expected {} actual: {}\",\n                expected, actual\n            ),\n            BlockAcquisitionError::ExecutionResults(error) => {\n                write!(f, \"execution results error: {}\", error)\n            }\n            BlockAcquisitionError::InvalidAttemptToApplyApprovalsHashes(error) => write!(\n                f,\n                \"invalid attempt to apply approvals hashes results: {}\",\n                error\n            ),\n            BlockAcquisitionError::InvalidAttemptToEnqueueBlockForExecution => {\n                write!(f, \"invalid attempt to enqueue block for execution\")\n            }\n            BlockAcquisitionError::InvalidAttemptToApplyTransaction { txn_id } => {\n                write!(f, \"invalid attempt to apply transaction: {}\", txn_id)\n            }\n            BlockAcquisitionError::MissingApprovalsHashes(missing_txn_hash) => {\n                write!(\n                    f,\n                    \"missing approvals hashes for transaction {}\",\n                    missing_txn_hash\n                )\n            }\n            BlockAcquisitionError::InvalidTransactionType => {\n                write!(f, \"invalid transaction identifier\",)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/event.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse derive_more::From;\nuse either::Either;\nuse serde::Serialize;\n\nuse casper_storage::{\n    block_store::types::ApprovalsHashes, data_access_layer::ExecutionResultsChecksumResult,\n};\nuse casper_types::{Block, BlockHash, BlockHeader, FinalitySignature, Transaction};\n\nuse super::GlobalStateSynchronizerEvent;\nuse crate::{\n    components::{\n        block_synchronizer::{GlobalStateSynchronizerError, GlobalStateSynchronizerResponse},\n        fetcher::FetchResult,\n    },\n    effect::requests::BlockSynchronizerRequest,\n    types::{BlockExecutionResultsOrChunk, ExecutableBlock, LegacyDeploy, NodeId, SyncLeap},\n};\n\n#[derive(From, Debug, Serialize)]\npub(crate) enum Event {\n    Initialize,\n    #[from]\n    Request(BlockSynchronizerRequest),\n    DisconnectFromPeer(NodeId),\n    #[from]\n    MadeFinalizedBlock {\n        block_hash: BlockHash,\n        result: Option<ExecutableBlock>,\n    },\n    MarkBlockExecutionEnqueued(BlockHash),\n    MarkBlockExecuted(BlockHash),\n    MarkBlockCompleted {\n        block_hash: BlockHash,\n        is_new: bool,\n    },\n    #[from]\n    BlockHeaderFetched(FetchResult<BlockHeader>),\n    #[from]\n    BlockFetched(FetchResult<Block>),\n    #[from]\n    ApprovalsHashesFetched(FetchResult<ApprovalsHashes>),\n    #[from]\n    FinalitySignatureFetched(FetchResult<FinalitySignature>),\n    #[from]\n    SyncLeapFetched(FetchResult<SyncLeap>),\n    GlobalStateSynced {\n        block_hash: BlockHash,\n        #[serde(skip_serializing)]\n        result: Result<GlobalStateSynchronizerResponse, GlobalStateSynchronizerError>,\n    },\n    GotExecutionResultsChecksum {\n        block_hash: BlockHash,\n        #[serde(skip_serializing)]\n        result: ExecutionResultsChecksumResult,\n    },\n    TransactionFetched {\n        block_hash: BlockHash,\n        result: Either<FetchResult<LegacyDeploy>, FetchResult<Transaction>>,\n    },\n    ExecutionResultsFetched {\n        block_hash: BlockHash,\n        result: FetchResult<BlockExecutionResultsOrChunk>,\n    },\n    ExecutionResultsStored(BlockHash),\n    AccumulatedPeers(BlockHash, Option<Vec<NodeId>>),\n    NetworkPeers(BlockHash, Vec<NodeId>),\n    #[from]\n    GlobalStateSynchronizer(GlobalStateSynchronizerEvent),\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Request(BlockSynchronizerRequest::NeedNext { .. }) => {\n                write!(f, \"block synchronizer need next request\")\n            }\n            Event::Request(BlockSynchronizerRequest::SyncGlobalStates(global_states)) => {\n                write!(f, \"global states to be synced: [\")?;\n                for (block_hash, global_state_hash) in global_states {\n                    write!(\n                        f,\n                        \"(block {}, global state {}), \",\n                        block_hash, global_state_hash\n                    )?;\n                }\n                write!(f, \"]\")\n            }\n            Event::Request(_) => {\n                write!(f, \"block synchronizer request from effect builder\")\n            }\n            Event::Initialize => {\n                write!(f, \"initialize this component\")\n            }\n            Event::DisconnectFromPeer(peer) => {\n                write!(f, \"disconnected from peer {}\", peer)\n            }\n            Event::BlockHeaderFetched(Ok(fetched_item)) => {\n                write!(f, \"{}\", fetched_item)\n            }\n            Event::BlockHeaderFetched(Err(fetcher_error)) => {\n                write!(f, \"{}\", fetcher_error)\n            }\n            Event::BlockFetched(Ok(fetched_item)) => {\n                write!(f, \"{}\", fetched_item)\n            }\n            Event::BlockFetched(Err(fetcher_error)) => {\n                write!(f, \"{}\", fetcher_error)\n            }\n            Event::ApprovalsHashesFetched(Ok(fetched_item)) => {\n                write!(f, \"{}\", fetched_item)\n            }\n            Event::ApprovalsHashesFetched(Err(fetcher_error)) => {\n                write!(f, \"{}\", fetcher_error)\n            }\n            Event::FinalitySignatureFetched(Ok(fetched_item)) => {\n                write!(f, \"{}\", fetched_item)\n            }\n            Event::FinalitySignatureFetched(Err(fetcher_error)) => {\n                write!(f, \"{}\", fetcher_error)\n            }\n            Event::SyncLeapFetched(Ok(fetched_item)) => {\n                write!(f, \"{}\", fetched_item)\n            }\n            Event::SyncLeapFetched(Err(fetcher_error)) => {\n                write!(f, \"{}\", fetcher_error)\n            }\n            Event::GlobalStateSynced {\n                block_hash: _,\n                result,\n            } => match result {\n                Ok(response) => write!(f, \"synced global state under root {}\", response.hash()),\n                Err(error) => write!(f, \"failed to sync global state: {}\", error),\n            },\n            Event::GotExecutionResultsChecksum {\n                block_hash: _,\n                result,\n            } => match result.as_legacy() {\n                Ok(Some(digest)) => write!(f, \"got exec results checksum {}\", digest),\n                Ok(None) => write!(f, \"got no exec results checksum\"),\n                Err(error) => write!(f, \"failed to get exec results checksum: {}\", error),\n            },\n            Event::TransactionFetched {\n                block_hash: _,\n                result,\n            } => match result {\n                Either::Left(Ok(fetched_item)) => write!(f, \"{}\", fetched_item),\n                Either::Left(Err(fetcher_error)) => write!(f, \"{}\", fetcher_error),\n                Either::Right(Ok(fetched_item)) => write!(f, \"{}\", fetched_item),\n                Either::Right(Err(fetcher_error)) => write!(f, \"{}\", fetcher_error),\n            },\n            Event::ExecutionResultsFetched {\n                block_hash: _,\n                result,\n            } => match result {\n                Ok(fetched_item) => write!(f, \"{}\", fetched_item),\n                Err(fetcher_error) => write!(f, \"{}\", fetcher_error),\n            },\n            Event::ExecutionResultsStored { .. } => write!(f, \"stored execution results\"),\n            Event::GlobalStateSynchronizer(event) => {\n                write!(f, \"{:?}\", event)\n            }\n            Event::NetworkPeers(..) => {\n                write!(f, \"network peers\")\n            }\n            Event::AccumulatedPeers(..) => {\n                write!(f, \"accumulated peers\")\n            }\n            Event::MadeFinalizedBlock { .. } => {\n                write!(f, \"made finalized block\")\n            }\n            Event::MarkBlockExecutionEnqueued(..) => {\n                write!(f, \"mark block enqueued for execution\")\n            }\n            Event::MarkBlockExecuted(..) => {\n                write!(f, \"block execution complete\")\n            }\n            Event::MarkBlockCompleted { .. } => {\n                write!(f, \"mark block completed\")\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/execution_results_acquisition/tests.rs",
    "content": "use assert_matches::assert_matches;\n\nuse casper_types::{\n    bytesrepr::ToBytes, execution::ExecutionResultV2, testing::TestRng, DeployHash,\n    TestBlockBuilder,\n};\n\nuse super::*;\nuse crate::{\n    components::block_synchronizer::tests::test_utils::chunks_with_proof_from_data,\n    types::BlockExecutionResultsOrChunkId,\n};\n\nconst NUM_TEST_EXECUTION_RESULTS: u64 = 100000;\n\n#[test]\nfn execution_results_chunks_apply_correctly() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    // Create chunkable execution results\n    let exec_results: Vec<ExecutionResult> = (0..NUM_TEST_EXECUTION_RESULTS)\n        .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng)))\n        .collect();\n    let test_chunks = chunks_with_proof_from_data(&exec_results.to_bytes().unwrap());\n    assert!(test_chunks.len() >= 3);\n\n    // Start off with only one chunk applied\n    let mut chunks: HashMap<u64, ChunkWithProof> = HashMap::new();\n    let first_chunk = test_chunks.first_key_value().unwrap();\n    let last_chunk = test_chunks.last_key_value().unwrap();\n    chunks.insert(*first_chunk.0, first_chunk.1.clone());\n\n    // Insert all the other chunks except the last; skip the first one since it should have been\n    // added already\n    for (index, chunk) in test_chunks.iter().take(test_chunks.len() - 1).skip(1) {\n        let apply_result = apply_chunk(\n            *block.hash(),\n            ExecutionResultsChecksum::Uncheckable,\n            chunks,\n            chunk.clone(),\n            None,\n        );\n\n        // Check the index of the next chunk that should be applied\n        chunks = assert_matches!(apply_result, Ok(ApplyChunkOutcome::NeedNext{chunks, chunk_count, next}) => {\n            assert_eq!(next, index + 1);\n            assert_eq!(chunk_count as usize, test_chunks.len());\n            chunks\n        });\n    }\n\n    // Apply the last chunk, and expect to get back the execution results\n    let apply_result = apply_chunk(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        chunks,\n        last_chunk.1.clone(),\n        None,\n    );\n    assert_matches!(apply_result, Ok(ApplyChunkOutcome::Complete{execution_results}) => {\n        assert_eq!(execution_results, exec_results);\n    });\n}\n\n#[test]\nfn single_chunk_execution_results_dont_apply_other_chunks() {\n    let rng = &mut TestRng::new();\n    let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES - 1]);\n    assert_eq!(test_chunks.len(), 1);\n\n    // We can't apply a chunk if the execution results are not chunked (only 1 chunk exists)\n    // Expect an error in this case.\n    let first_chunk = test_chunks.first_key_value().unwrap();\n\n    let apply_result = apply_chunk(\n        *TestBlockBuilder::new().build(rng).hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        test_chunks.clone().into_iter().collect(),\n        first_chunk.1.clone(),\n        None,\n    );\n\n    assert_matches!(apply_result, Err(Error::InvalidChunkCount { .. }));\n}\n\n#[test]\nfn execution_results_chunks_from_block_with_different_hash_are_not_applied() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n    let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 3]);\n\n    // Start acquiring chunks\n    let mut acquisition = ExecutionResultsAcquisition::new_acquiring(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        test_chunks.clone().into_iter().take(1).collect(),\n        3,\n        1,\n    );\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::ChunkWithProof(test_chunks.last_key_value().unwrap().1.clone()),\n    );\n    acquisition = assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]),\n        Ok((acq, Acceptance::NeededIt)) => acq\n    );\n    assert_matches!(acquisition, ExecutionResultsAcquisition::Acquiring { .. });\n\n    // Applying execution results from other block should return an error\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *TestBlockBuilder::new().build(rng).hash(),\n        ValueOrChunk::ChunkWithProof(test_chunks.first_key_value().unwrap().1.clone()),\n    );\n    assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]),\n        Err(Error::BlockHashMismatch {expected, .. }) => assert_eq!(expected, *block.hash())\n    );\n}\n\n#[test]\nfn execution_results_chunks_from_trie_with_different_chunk_count_are_not_applied() {\n    let rng = &mut TestRng::new();\n    let test_chunks_1 = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 3]);\n    assert_eq!(test_chunks_1.len(), 3);\n\n    let test_chunks_2 = chunks_with_proof_from_data(&[1; ChunkWithProof::CHUNK_SIZE_BYTES * 2]);\n    assert_eq!(test_chunks_2.len(), 2);\n\n    // If chunk tries have different number of chunks we shouldn't attempt to apply the incoming\n    // chunk and exit early\n    let bad_chunk = test_chunks_2.first_key_value().unwrap();\n\n    let apply_result = apply_chunk(\n        *TestBlockBuilder::new().build(rng).hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        test_chunks_1.into_iter().take(2).collect(),\n        bad_chunk.1.clone(),\n        Some(3),\n    );\n\n    assert_matches!(apply_result, Err(Error::ChunkCountMismatch {expected, actual, ..}) if expected == 3 && actual == 2);\n}\n\n#[test]\nfn invalid_execution_results_from_applied_chunks_dont_deserialize() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    // Create some chunk data that cannot pe serialized into execution results\n    let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 2]);\n    assert_eq!(test_chunks.len(), 2);\n    let last_chunk = test_chunks.last_key_value().unwrap();\n\n    // Expect that this data cannot be deserialized\n    let apply_result = apply_chunk(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        test_chunks.clone().into_iter().take(1).collect(),\n        last_chunk.1.clone(),\n        None,\n    );\n    assert_matches!(apply_result, Err(Error::FailedToDeserialize { .. }));\n}\n\n#[test]\nfn cant_apply_chunk_from_different_exec_results_or_invalid_checksum() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    // Create valid execution results\n    let valid_exec_results: Vec<ExecutionResult> = (0..NUM_TEST_EXECUTION_RESULTS)\n        .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng)))\n        .collect();\n    let valid_test_chunks = chunks_with_proof_from_data(&valid_exec_results.to_bytes().unwrap());\n    assert!(valid_test_chunks.len() >= 3);\n\n    // Create some invalid chunks that are not part of the execution results we are building\n    let invalid_test_chunks =\n        chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 2]);\n    assert_eq!(invalid_test_chunks.len(), 2);\n\n    // Try to apply the invalid test chunks to the valid chunks and expect to fail since the\n    // checksums for the proofs are different between the chunks.\n    let apply_result = apply_chunk(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        valid_test_chunks.clone().into_iter().take(2).collect(),\n        invalid_test_chunks.first_key_value().unwrap().1.clone(),\n        None,\n    );\n    assert_matches!(apply_result, Err(Error::ChunksWithDifferentChecksum{block_hash: _, expected, actual}) => {\n        assert_eq!(expected, valid_test_chunks.first_key_value().unwrap().1.proof().root_hash());\n        assert_eq!(actual, invalid_test_chunks.first_key_value().unwrap().1.proof().root_hash());\n    });\n\n    // Same test but here we are explicitly specifying the execution results checksum that\n    // should be checked.\n    let apply_result = apply_chunk(\n        *block.hash(),\n        ExecutionResultsChecksum::Checkable(\n            valid_test_chunks\n                .first_key_value()\n                .unwrap()\n                .1\n                .proof()\n                .root_hash(),\n        ),\n        valid_test_chunks.clone().into_iter().take(2).collect(),\n        invalid_test_chunks.first_key_value().unwrap().1.clone(),\n        None,\n    );\n    assert_matches!(apply_result, Err(Error::ChecksumMismatch{block_hash: _, expected, actual}) => {\n        assert_eq!(expected, valid_test_chunks.first_key_value().unwrap().1.proof().root_hash());\n        assert_eq!(actual, invalid_test_chunks.first_key_value().unwrap().1.proof().root_hash());\n    });\n}\n\n// Constructors for acquisition states used for testing and verifying generic properties of\n// these states\nimpl ExecutionResultsAcquisition {\n    fn new_needed(block_hash: BlockHash) -> Self {\n        let acq = Self::Needed { block_hash };\n        assert_eq!(acq.block_hash(), block_hash);\n        assert!(!acq.is_checkable());\n        assert_eq!(acq.needs_value_or_chunk(), None);\n        acq\n    }\n\n    fn new_pending(block_hash: BlockHash, checksum: ExecutionResultsChecksum) -> Self {\n        let acq = Self::Pending {\n            block_hash,\n            checksum,\n        };\n        assert_eq!(acq.block_hash(), block_hash);\n        assert_eq!(acq.is_checkable(), checksum.is_checkable());\n        assert_eq!(\n            acq.needs_value_or_chunk(),\n            Some((BlockExecutionResultsOrChunkId::new(block_hash), checksum))\n        );\n        acq\n    }\n\n    fn new_acquiring(\n        block_hash: BlockHash,\n        checksum: ExecutionResultsChecksum,\n        chunks: HashMap<u64, ChunkWithProof>,\n        chunk_count: u64,\n        next: u64,\n    ) -> Self {\n        let acq = Self::Acquiring {\n            block_hash,\n            checksum,\n            chunks,\n            chunk_count,\n            next,\n        };\n        assert_eq!(acq.block_hash(), block_hash);\n        assert_eq!(acq.is_checkable(), checksum.is_checkable());\n        assert_eq!(\n            acq.needs_value_or_chunk(),\n            Some((\n                BlockExecutionResultsOrChunkId::new(block_hash).next_chunk(next),\n                checksum\n            ))\n        );\n        acq\n    }\n\n    fn new_complete(\n        block_hash: BlockHash,\n        checksum: ExecutionResultsChecksum,\n        results: HashMap<TransactionHash, ExecutionResult>,\n    ) -> Self {\n        let acq = Self::Complete {\n            block_hash,\n            checksum,\n            results,\n        };\n        assert_eq!(acq.block_hash(), block_hash);\n        assert_eq!(acq.is_checkable(), checksum.is_checkable());\n        assert_eq!(acq.needs_value_or_chunk(), None);\n        acq\n    }\n}\n\n#[test]\nfn acquisition_needed_state_has_correct_transitions() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    let acquisition = ExecutionResultsAcquisition::new_needed(*block.hash());\n\n    let exec_results_checksum = ExecutionResultsChecksum::Checkable(Digest::hash([0; 32]));\n    assert_matches!(\n        acquisition.clone().apply_checksum(exec_results_checksum),\n        Ok(ExecutionResultsAcquisition::Pending{block_hash, checksum}) if block_hash == *block.hash() && checksum == exec_results_checksum\n    );\n\n    assert_matches!(\n        acquisition.clone().apply_checksum(ExecutionResultsChecksum::Uncheckable),\n        Ok(ExecutionResultsAcquisition::Pending{block_hash, checksum}) if block_hash == *block.hash() && checksum == ExecutionResultsChecksum::Uncheckable\n    );\n\n    let mut test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES]);\n    assert_eq!(test_chunks.len(), 1);\n\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::ChunkWithProof(test_chunks.remove(&0).unwrap()),\n    );\n    assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]),\n        Err(Error::AttemptToApplyDataWhenMissingChecksum { .. })\n    );\n}\n\n#[test]\nfn acquisition_pending_state_has_correct_transitions() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    let acquisition = ExecutionResultsAcquisition::new_pending(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n    );\n    assert_matches!(\n        acquisition\n            .clone()\n            .apply_checksum(ExecutionResultsChecksum::Uncheckable),\n        Err(Error::InvalidAttemptToApplyChecksum { .. })\n    );\n\n    // Acquisition can transition from `Pending` to `Complete` if a value and deploy hashes are\n    // applied\n    let execution_results = vec![ExecutionResult::from(ExecutionResultV2::random(rng))];\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::new(execution_results, 0).unwrap(),\n    );\n    assert_matches!(\n        acquisition\n            .clone()\n            .apply_block_execution_results_or_chunk(exec_result.clone(), vec![]),\n        Err(Error::ExecutionResultToTransactionHashLengthDiscrepancy { .. })\n    );\n    assert_matches!(\n        acquisition.clone().apply_block_execution_results_or_chunk(\n            exec_result,\n            vec![DeployHash::new(Digest::hash([0; 32])).into()]\n        ),\n        Ok((\n            ExecutionResultsAcquisition::Complete { .. },\n            Acceptance::NeededIt\n        ))\n    );\n\n    // Acquisition can transition from `Pending` to `Acquiring` if a single chunk is applied\n    let exec_results: Vec<ExecutionResult> = (0..NUM_TEST_EXECUTION_RESULTS)\n        .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng)))\n        .collect();\n    let test_chunks = chunks_with_proof_from_data(&exec_results.to_bytes().unwrap());\n    assert!(test_chunks.len() >= 3);\n\n    let first_chunk = test_chunks.first_key_value().unwrap().1;\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::ChunkWithProof(first_chunk.clone()),\n    );\n    let transaction_hashes: Vec<TransactionHash> = (0..NUM_TEST_EXECUTION_RESULTS)\n        .map(|index| DeployHash::new(Digest::hash(index.to_bytes().unwrap())).into())\n        .collect();\n    assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, transaction_hashes),\n        Ok((\n            ExecutionResultsAcquisition::Acquiring { .. },\n            Acceptance::NeededIt\n        ))\n    );\n}\n\n#[test]\nfn acquisition_acquiring_state_has_correct_transitions() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    // Generate valid execution results that are chunkable\n    let exec_results: Vec<ExecutionResult> = (0..NUM_TEST_EXECUTION_RESULTS)\n        .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng)))\n        .collect();\n    let test_chunks = chunks_with_proof_from_data(&exec_results.to_bytes().unwrap());\n    assert!(test_chunks.len() >= 3);\n\n    let mut acquisition = ExecutionResultsAcquisition::new_acquiring(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        test_chunks.clone().into_iter().take(1).collect(),\n        test_chunks.len() as u64,\n        1,\n    );\n    assert_matches!(\n        acquisition\n            .clone()\n            .apply_checksum(ExecutionResultsChecksum::Uncheckable),\n        Err(Error::InvalidAttemptToApplyChecksum { .. })\n    );\n\n    // Apply all chunks except the last and check if the acquisition state remains `Acquiring`\n    for (_, chunk) in test_chunks.iter().take(test_chunks.len() - 1).skip(1) {\n        let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n            *block.hash(),\n            ValueOrChunk::ChunkWithProof(chunk.clone()),\n        );\n        acquisition = assert_matches!(\n            acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]),\n            Ok((acq, Acceptance::NeededIt)) => acq\n        );\n        assert_matches!(acquisition, ExecutionResultsAcquisition::Acquiring { .. });\n    }\n\n    // Now apply the last chunk and check if the acquisition completes\n    let last_chunk = test_chunks.last_key_value().unwrap().1;\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::ChunkWithProof(last_chunk.clone()),\n    );\n    let transaction_hashes: Vec<TransactionHash> = (0..NUM_TEST_EXECUTION_RESULTS)\n        .map(|index| DeployHash::new(Digest::hash(index.to_bytes().unwrap())).into())\n        .collect();\n    acquisition = assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, transaction_hashes),\n        Ok((acq, Acceptance::NeededIt)) => acq\n    );\n    assert_matches!(acquisition, ExecutionResultsAcquisition::Complete { .. });\n}\n\n#[test]\nfn acquisition_acquiring_state_gets_overridden_by_value() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n    let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 3]);\n\n    // Start acquiring chunks\n    let mut acquisition = ExecutionResultsAcquisition::new_acquiring(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        test_chunks.clone().into_iter().take(1).collect(),\n        3,\n        1,\n    );\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::ChunkWithProof(test_chunks.last_key_value().unwrap().1.clone()),\n    );\n    acquisition = assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]),\n        Ok((acq, Acceptance::NeededIt)) => acq\n    );\n    assert_matches!(acquisition, ExecutionResultsAcquisition::Acquiring { .. });\n\n    // Assume we got a full execution result for this block.\n    // Since we don't have a checksum for the execution results, we can't really determine which\n    // data is the better one. We expect to overwrite the execution results chunks that\n    // we previously acquired with this complete result.\n    let execution_results = vec![ExecutionResult::from(ExecutionResultV2::random(rng))];\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::new(execution_results, 0).unwrap(),\n    );\n    assert_matches!(\n        acquisition\n            .clone()\n            .apply_block_execution_results_or_chunk(exec_result.clone(), vec![]),\n        Err(Error::ExecutionResultToTransactionHashLengthDiscrepancy { .. })\n    );\n\n    assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(\n            exec_result,\n            vec![DeployHash::new(Digest::hash([0; 32])).into()]\n        ),\n        Ok((\n            ExecutionResultsAcquisition::Complete { .. },\n            Acceptance::NeededIt\n        ))\n    );\n}\n\n#[test]\nfn acquisition_complete_state_has_correct_transitions() {\n    let rng = &mut TestRng::new();\n    let block = TestBlockBuilder::new().build(rng);\n\n    let acquisition = ExecutionResultsAcquisition::new_complete(\n        *block.hash(),\n        ExecutionResultsChecksum::Uncheckable,\n        HashMap::new(),\n    );\n\n    let exec_results_checksum = ExecutionResultsChecksum::Checkable(Digest::hash([0; 32]));\n    assert_matches!(\n        acquisition.clone().apply_checksum(exec_results_checksum),\n        Err(Error::InvalidAttemptToApplyChecksum { .. })\n    );\n\n    assert_matches!(\n        acquisition\n            .clone()\n            .apply_checksum(ExecutionResultsChecksum::Uncheckable),\n        Err(Error::InvalidAttemptToApplyChecksum { .. })\n    );\n\n    let mut test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES]);\n    assert_eq!(test_chunks.len(), 1);\n\n    let exec_result = BlockExecutionResultsOrChunk::new_from_value(\n        *block.hash(),\n        ValueOrChunk::ChunkWithProof(test_chunks.remove(&0).unwrap()),\n    );\n    assert_matches!(\n        acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]),\n        Err(Error::AttemptToApplyDataAfterCompleted { .. })\n    );\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/execution_results_acquisition.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::HashMap,\n    fmt::{self, Display, Formatter},\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse tracing::{debug, error};\n\nuse casper_types::{\n    bytesrepr, execution::ExecutionResult, BlockHash, ChunkWithProof, Digest, TransactionHash,\n};\n\nuse super::block_acquisition::Acceptance;\nuse crate::types::{BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, ValueOrChunk};\n\n#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug, Serialize, Deserialize)]\npub(crate) enum ExecutionResultsChecksum {\n    // due to historical reasons, pre-1.5 chunks do not support Merkle proof checking\n    Uncheckable,\n    // can be Merkle proof checked\n    Checkable(Digest),\n}\n\nimpl Display for ExecutionResultsChecksum {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Self::Uncheckable => write!(f, \"uncheckable execution results\"),\n            Self::Checkable(digest) => write!(f, \"execution results checksum {}\", digest),\n        }\n    }\n}\n\nimpl ExecutionResultsChecksum {\n    pub(super) fn is_checkable(&self) -> bool {\n        matches!(self, ExecutionResultsChecksum::Checkable(_))\n    }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)]\npub(crate) enum Error {\n    BlockHashMismatch {\n        expected: BlockHash,\n        actual: BlockHash,\n    },\n    ChunkCountMismatch {\n        block_hash: BlockHash,\n        expected: u64,\n        actual: u64,\n    },\n    InvalidChunkCount {\n        block_hash: BlockHash,\n    },\n    InvalidAttemptToApplyChecksum {\n        block_hash: BlockHash,\n    },\n    AttemptToApplyDataAfterCompleted {\n        block_hash: BlockHash,\n    },\n    AttemptToApplyDataWhenMissingChecksum {\n        block_hash: BlockHash,\n    },\n    ChecksumMismatch {\n        block_hash: BlockHash,\n        expected: Digest,\n        actual: Digest,\n    },\n    ChunksWithDifferentChecksum {\n        block_hash: BlockHash,\n        expected: Digest,\n        actual: Digest,\n    },\n    FailedToDeserialize {\n        block_hash: BlockHash,\n    },\n    ExecutionResultToTransactionHashLengthDiscrepancy {\n        block_hash: BlockHash,\n        expected: usize,\n        actual: usize,\n    },\n    InvalidOutcomeFromApplyingChunk {\n        block_hash: BlockHash,\n    },\n}\n\nimpl Display for Error {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Error::BlockHashMismatch { expected, actual } => {\n                write!(\n                    f,\n                    \"block hash mismatch: expected {} actual: {}\",\n                    expected, actual\n                )\n            }\n            Error::ExecutionResultToTransactionHashLengthDiscrepancy {\n                block_hash,\n                expected,\n                actual,\n            } => {\n                write!(\n                    f,\n                    \"discrepancy between the number of transactions and corresponding execution results for block_hash: {}; expected {} actual: {}\",\n                    block_hash, expected, actual\n                )\n            }\n            Error::ChunkCountMismatch {\n                block_hash,\n                expected,\n                actual,\n            } => {\n                write!(\n                    f,\n                    \"chunk count mismatch for block_hash: {}; expected {} actual: {}\",\n                    block_hash, expected, actual\n                )\n            }\n            Error::InvalidChunkCount { block_hash } => {\n                write!(\n                    f,\n                    \"invalid chunk count for block_hash: {}; execution results should either be a complete single value or come in 2 or more chunks\",\n                    block_hash\n                )\n            }\n            Error::InvalidAttemptToApplyChecksum { block_hash } => {\n                write!(\n                    f,\n                    \"attempt to apply checksum to a non-pending item, block_hash: {}\",\n                    block_hash\n                )\n            }\n            Error::AttemptToApplyDataAfterCompleted { block_hash } => {\n                write!(\n                    f,\n                    \"attempt to apply execution results for already completed block_hash: {}\",\n                    block_hash\n                )\n            }\n            Error::AttemptToApplyDataWhenMissingChecksum { block_hash } => {\n                write!(\n                    f,\n                    \"attempt to apply execution results before check sum for block_hash: {}\",\n                    block_hash\n                )\n            }\n            Error::ChecksumMismatch {\n                block_hash,\n                expected,\n                actual,\n            } => {\n                write!(\n                    f,\n                    \"root hash mismatch for block_hash: {}; expected {} actual: {}\",\n                    block_hash, expected, actual\n                )\n            }\n            Error::FailedToDeserialize { block_hash } => {\n                write!(\n                    f,\n                    \"failed to deserialize execution effects for block_hash: {}\",\n                    block_hash,\n                )\n            }\n            Error::ChunksWithDifferentChecksum {\n                block_hash,\n                expected,\n                actual,\n            } => write!(\n                f,\n                \"chunks with different checksum for block_hash: {}; expected {} actual: {}\",\n                block_hash, expected, actual\n            ),\n            Error::InvalidOutcomeFromApplyingChunk { block_hash } => write!(\n                f,\n                \"cannot have already had chunk if in pending mode for block hash: {}\",\n                block_hash\n            ),\n        }\n    }\n}\n\n#[derive(Clone, PartialEq, Eq, DataSize, Debug)]\npub(super) enum ExecutionResultsAcquisition {\n    Needed {\n        block_hash: BlockHash,\n    },\n    Pending {\n        block_hash: BlockHash,\n        checksum: ExecutionResultsChecksum,\n    },\n    Acquiring {\n        block_hash: BlockHash,\n        checksum: ExecutionResultsChecksum,\n        chunks: HashMap<u64, ChunkWithProof>,\n        chunk_count: u64,\n        next: u64,\n    },\n    Complete {\n        block_hash: BlockHash,\n        checksum: ExecutionResultsChecksum,\n        results: HashMap<TransactionHash, ExecutionResult>,\n    },\n}\n\nimpl Display for ExecutionResultsAcquisition {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ExecutionResultsAcquisition::Needed { block_hash } => {\n                write!(f, \"Needed: {}\", block_hash)\n            }\n            ExecutionResultsAcquisition::Pending {\n                block_hash,\n                checksum: _,\n            } => write!(f, \"Pending: {}\", block_hash),\n            ExecutionResultsAcquisition::Acquiring {\n                block_hash,\n                checksum: _,\n                chunks: _,\n                chunk_count,\n                next,\n            } => write!(\n                f,\n                \"Acquiring: {}, chunk_count={}, next={}\",\n                block_hash, chunk_count, next\n            ),\n            ExecutionResultsAcquisition::Complete {\n                block_hash,\n                checksum: _,\n                results: _,\n            } => write!(f, \"Complete: {}\", block_hash),\n        }\n    }\n}\n\nimpl ExecutionResultsAcquisition {\n    pub(super) fn needs_value_or_chunk(\n        &self,\n    ) -> Option<(BlockExecutionResultsOrChunkId, ExecutionResultsChecksum)> {\n        match self {\n            ExecutionResultsAcquisition::Needed { .. }\n            | ExecutionResultsAcquisition::Complete { .. } => None,\n            ExecutionResultsAcquisition::Pending {\n                block_hash,\n                checksum,\n            } => Some((BlockExecutionResultsOrChunkId::new(*block_hash), *checksum)),\n            ExecutionResultsAcquisition::Acquiring {\n                block_hash,\n                checksum,\n                next,\n                ..\n            } => Some((\n                BlockExecutionResultsOrChunkId::new(*block_hash).next_chunk(*next),\n                *checksum,\n            )),\n        }\n    }\n\n    pub(super) fn apply_checksum(self, checksum: ExecutionResultsChecksum) -> Result<Self, Error> {\n        match self {\n            ExecutionResultsAcquisition::Needed { block_hash } => {\n                debug!(\"apply_checksum - Needed\");\n                Ok(ExecutionResultsAcquisition::Pending {\n                    block_hash,\n                    checksum,\n                })\n            }\n            ExecutionResultsAcquisition::Pending { block_hash, .. }\n            | ExecutionResultsAcquisition::Acquiring { block_hash, .. }\n            | ExecutionResultsAcquisition::Complete { block_hash, .. } => {\n                debug!(\"apply_checksum - Pending | Acquiring | Complete\");\n                Err(Error::InvalidAttemptToApplyChecksum { block_hash })\n            }\n        }\n    }\n\n    pub(super) fn apply_block_execution_results_or_chunk(\n        self,\n        block_execution_results_or_chunk: BlockExecutionResultsOrChunk,\n        transaction_hashes: Vec<TransactionHash>,\n    ) -> Result<(Self, Acceptance), Error> {\n        let block_hash = *block_execution_results_or_chunk.block_hash();\n        let value = block_execution_results_or_chunk.into_value();\n\n        debug!(%block_hash, state=%self, \"apply_block_execution_results_or_chunk\");\n\n        let expected_block_hash = self.block_hash();\n        if expected_block_hash != block_hash {\n            debug!(\n                %block_hash,\n                \"apply_block_execution_results_or_chunk: Error::BlockHashMismatch\"\n            );\n            return Err(Error::BlockHashMismatch {\n                expected: expected_block_hash,\n                actual: block_hash,\n            });\n        }\n\n        let (checksum, execution_results) = match (self, value) {\n            (\n                ExecutionResultsAcquisition::Pending { checksum, .. },\n                ValueOrChunk::Value(execution_results),\n            )\n            | (\n                ExecutionResultsAcquisition::Acquiring { checksum, .. },\n                ValueOrChunk::Value(execution_results),\n            ) => {\n                debug!(\n                    \"apply_block_execution_results_or_chunk: (Pending, Value) | (Acquiring, Value)\"\n                );\n                (checksum, execution_results)\n            }\n            (\n                ExecutionResultsAcquisition::Pending { checksum, .. },\n                ValueOrChunk::ChunkWithProof(chunk),\n            ) => {\n                debug!(\"apply_block_execution_results_or_chunk: (Pending, ChunkWithProof)\");\n                match apply_chunk(block_hash, checksum, HashMap::new(), chunk, None) {\n                    Ok(ApplyChunkOutcome::HadIt { .. }) => {\n                        error!(\"cannot have already had chunk if in pending mode\");\n                        return Err(Error::InvalidOutcomeFromApplyingChunk { block_hash });\n                    }\n                    Ok(ApplyChunkOutcome::NeedNext {\n                        chunks,\n                        chunk_count,\n                        next,\n                    }) => {\n                        let acquisition = ExecutionResultsAcquisition::Acquiring {\n                            block_hash,\n                            checksum,\n                            chunks,\n                            chunk_count,\n                            next,\n                        };\n                        let acceptance = Acceptance::NeededIt;\n                        return Ok((acquisition, acceptance));\n                    }\n                    Ok(ApplyChunkOutcome::Complete { execution_results }) => {\n                        (checksum, execution_results)\n                    }\n                    Err(err) => {\n                        return Err(err);\n                    }\n                }\n            }\n            (\n                ExecutionResultsAcquisition::Acquiring {\n                    checksum,\n                    chunks,\n                    chunk_count,\n                    next,\n                    ..\n                },\n                ValueOrChunk::ChunkWithProof(chunk),\n            ) => {\n                debug!(\"apply_block_execution_results_or_chunk: (Acquiring, ChunkWithProof)\");\n                match apply_chunk(block_hash, checksum, chunks, chunk, Some(chunk_count)) {\n                    Ok(ApplyChunkOutcome::HadIt { chunks }) => {\n                        let acquisition = ExecutionResultsAcquisition::Acquiring {\n                            block_hash,\n                            checksum,\n                            chunks,\n                            chunk_count,\n                            next,\n                        };\n                        let acceptance = Acceptance::HadIt;\n                        return Ok((acquisition, acceptance));\n                    }\n                    Ok(ApplyChunkOutcome::NeedNext {\n                        chunks,\n                        chunk_count,\n                        next,\n                    }) => {\n                        let acquisition = ExecutionResultsAcquisition::Acquiring {\n                            block_hash,\n                            checksum,\n                            chunks,\n                            chunk_count,\n                            next,\n                        };\n                        let acceptance = Acceptance::NeededIt;\n                        return Ok((acquisition, acceptance));\n                    }\n                    Ok(ApplyChunkOutcome::Complete { execution_results }) => {\n                        (checksum, execution_results)\n                    }\n                    Err(err) => {\n                        return Err(err);\n                    }\n                }\n            }\n            (ExecutionResultsAcquisition::Needed { block_hash }, _) => {\n                debug!(\"apply_block_execution_results_or_chunk: (Needed, _)\");\n                return Err(Error::AttemptToApplyDataWhenMissingChecksum { block_hash });\n            }\n            (ExecutionResultsAcquisition::Complete { .. }, _) => {\n                debug!(\"apply_block_execution_results_or_chunk: (Complete, _)\");\n                return Err(Error::AttemptToApplyDataAfterCompleted { block_hash });\n            }\n        };\n\n        if transaction_hashes.len() != execution_results.len() {\n            debug!(\n                %block_hash,\n                \"apply_block_execution_results_or_chunk: Error::ExecutionResultToTransactionHashLengthDiscrepancy\"\n            );\n            return Err(Error::ExecutionResultToTransactionHashLengthDiscrepancy {\n                block_hash,\n                expected: transaction_hashes.len(),\n                actual: execution_results.len(),\n            });\n        }\n        let results = transaction_hashes\n            .into_iter()\n            .zip(execution_results)\n            .collect();\n        debug!(\n            %block_hash,\n            \"apply_block_execution_results_or_chunk: returning ExecutionResultsAcquisition::Complete\"\n        );\n        let acceptance = Acceptance::NeededIt;\n        let acquisition = ExecutionResultsAcquisition::Complete {\n            block_hash,\n            results,\n            checksum,\n        };\n        Ok((acquisition, acceptance))\n    }\n\n    pub(super) fn is_checkable(&self) -> bool {\n        match self {\n            ExecutionResultsAcquisition::Needed { .. } => false,\n            ExecutionResultsAcquisition::Pending { checksum, .. }\n            | ExecutionResultsAcquisition::Acquiring { checksum, .. }\n            | ExecutionResultsAcquisition::Complete { checksum, .. } => checksum.is_checkable(),\n        }\n    }\n\n    fn block_hash(&self) -> BlockHash {\n        match self {\n            ExecutionResultsAcquisition::Needed { block_hash }\n            | ExecutionResultsAcquisition::Pending { block_hash, .. }\n            | ExecutionResultsAcquisition::Acquiring { block_hash, .. }\n            | ExecutionResultsAcquisition::Complete { block_hash, .. } => *block_hash,\n        }\n    }\n}\n\n#[derive(Debug)]\nenum ApplyChunkOutcome {\n    HadIt {\n        chunks: HashMap<u64, ChunkWithProof>,\n    },\n    NeedNext {\n        chunks: HashMap<u64, ChunkWithProof>,\n        chunk_count: u64,\n        next: u64,\n    },\n    Complete {\n        execution_results: Vec<ExecutionResult>,\n    },\n}\n\nimpl ApplyChunkOutcome {\n    fn need_next(chunks: HashMap<u64, ChunkWithProof>, chunk_count: u64, next: u64) -> Self {\n        ApplyChunkOutcome::NeedNext {\n            chunks,\n            chunk_count,\n            next,\n        }\n    }\n\n    fn execution_results(execution_results: Vec<ExecutionResult>) -> Self {\n        ApplyChunkOutcome::Complete { execution_results }\n    }\n}\n\nfn apply_chunk(\n    block_hash: BlockHash,\n    checksum: ExecutionResultsChecksum,\n    mut chunks: HashMap<u64, ChunkWithProof>,\n    chunk: ChunkWithProof,\n    expected_count: Option<u64>,\n) -> Result<ApplyChunkOutcome, Error> {\n    let digest = chunk.proof().root_hash();\n    let index = chunk.proof().index();\n    let chunk_count = chunk.proof().count();\n    if chunk_count == 1 {\n        debug!(%block_hash, \"apply_chunk: Error::InvalidChunkCount\");\n        return Err(Error::InvalidChunkCount { block_hash });\n    }\n\n    if let Some(expected) = expected_count {\n        if expected != chunk_count {\n            debug!(%block_hash, \"apply_chunk: Error::ChunkCountMismatch\");\n            return Err(Error::ChunkCountMismatch {\n                block_hash,\n                expected,\n                actual: chunk_count,\n            });\n        }\n    }\n\n    // ExecutionResultsChecksum::Uncheckable has no checksum, otherwise check it\n    if let ExecutionResultsChecksum::Checkable(expected) = checksum {\n        if expected != digest {\n            debug!(%block_hash, \"apply_chunk: Error::ChecksumMismatch\");\n            return Err(Error::ChecksumMismatch {\n                block_hash,\n                expected,\n                actual: digest,\n            });\n        }\n    } else if let Some(other_chunk) = chunks.values().next() {\n        let existing_chunk_digest = other_chunk.proof().root_hash();\n        if existing_chunk_digest != digest {\n            debug!(%block_hash, \"apply_chunk: Error::ChunksWithDifferentChecksum\");\n            return Err(Error::ChunksWithDifferentChecksum {\n                block_hash,\n                expected: existing_chunk_digest,\n                actual: digest,\n            });\n        }\n    }\n\n    if chunks.insert(index, chunk).is_some() {\n        debug!(%block_hash, index, \"apply_chunk: already had it\");\n        return Ok(ApplyChunkOutcome::HadIt { chunks });\n    };\n\n    match (0..chunk_count).find(|idx| !chunks.contains_key(idx)) {\n        Some(next) => Ok(ApplyChunkOutcome::need_next(chunks, chunk_count, next)),\n        None => {\n            let serialized: Vec<u8> = (0..chunk_count)\n                .filter_map(|index| chunks.get(&index))\n                .flat_map(|c| c.chunk())\n                .copied()\n                .collect();\n            match bytesrepr::deserialize(serialized) {\n                Ok(results) => {\n                    debug!(%block_hash, \"apply_chunk: ApplyChunkOutcome::execution_results\");\n                    Ok(ApplyChunkOutcome::execution_results(results))\n                }\n                Err(error) => {\n                    error!(%error, \"failed to deserialize execution results\");\n                    Err(Error::FailedToDeserialize { block_hash })\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/global_state_synchronizer/tests.rs",
    "content": "use std::time::Duration;\n\nuse futures::channel::oneshot;\nuse rand::Rng;\n\nuse casper_storage::global_state::error::Error as GlobalStateError;\nuse casper_types::{bytesrepr::Bytes, testing::TestRng, TestBlockBuilder};\n\nuse super::*;\nuse crate::{\n    reactor::{EventQueueHandle, QueueKind, Scheduler},\n    utils,\n};\n\n/// Event for the mock reactor.\n#[derive(Debug)]\nenum ReactorEvent {\n    TrieAccumulatorRequest(TrieAccumulatorRequest),\n    ContractRuntimeRequest(ContractRuntimeRequest),\n}\n\nimpl From<ContractRuntimeRequest> for ReactorEvent {\n    fn from(req: ContractRuntimeRequest) -> ReactorEvent {\n        ReactorEvent::ContractRuntimeRequest(req)\n    }\n}\n\nimpl From<TrieAccumulatorRequest> for ReactorEvent {\n    fn from(req: TrieAccumulatorRequest) -> ReactorEvent {\n        ReactorEvent::TrieAccumulatorRequest(req)\n    }\n}\n\nstruct MockReactor {\n    scheduler: &'static Scheduler<ReactorEvent>,\n    effect_builder: EffectBuilder<ReactorEvent>,\n}\n\nimpl MockReactor {\n    fn new() -> Self {\n        let scheduler = utils::leak(Scheduler::new(QueueKind::weights(), None));\n        let event_queue_handle = EventQueueHandle::without_shutdown(scheduler);\n        let effect_builder = EffectBuilder::new(event_queue_handle);\n        MockReactor {\n            scheduler,\n            effect_builder,\n        }\n    }\n\n    fn effect_builder(&self) -> EffectBuilder<ReactorEvent> {\n        self.effect_builder\n    }\n\n    async fn expect_trie_accumulator_request(&self, hash: &Digest) {\n        let ((_ancestor, reactor_event), _) = self.scheduler.pop().await;\n        match reactor_event {\n            ReactorEvent::TrieAccumulatorRequest(request) => {\n                assert_eq!(request.hash, *hash);\n            }\n            _ => {\n                unreachable!();\n            }\n        };\n    }\n\n    async fn expect_put_trie_request(&self, trie: &TrieRaw) {\n        let ((_ancestor, reactor_event), _) = self.scheduler.pop().await;\n        match reactor_event {\n            ReactorEvent::ContractRuntimeRequest(ContractRuntimeRequest::PutTrie {\n                request,\n                responder: _,\n            }) => {\n                assert_eq!(request.raw(), trie);\n            }\n            _ => {\n                unreachable!();\n            }\n        };\n    }\n}\n\nfn random_test_trie(rng: &mut TestRng) -> TrieRaw {\n    let data: Vec<u8> = (0..64).map(|_| rng.gen()).collect();\n    TrieRaw::new(Bytes::from(data))\n}\n\nfn random_sync_global_state_request(\n    rng: &mut TestRng,\n    responder: Responder<Result<Response, Error>>,\n) -> (SyncGlobalStateRequest, TrieRaw) {\n    let block = TestBlockBuilder::new().build(rng);\n    let trie = random_test_trie(rng);\n\n    // Create a request\n    (\n        SyncGlobalStateRequest {\n            block_hash: *block.hash(),\n            state_root_hash: Digest::hash(trie.inner()),\n            responder,\n        },\n        trie,\n    )\n}\n\n#[tokio::test]\nasync fn fetch_request_without_peers_is_canceled() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(rng.gen_range(2..10));\n\n    // Create a responder to allow assertion of the error\n    let (sender, receiver) = oneshot::channel();\n    // Create a request without peers\n    let (request, _) =\n        random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender));\n\n    // Check how the request is handled by the block synchronizer.\n    let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 1);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n    assert!(global_state_synchronizer.last_progress.is_some());\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects =\n        global_state_synchronizer.parallel_fetch_with_peers(vec![], reactor.effect_builder());\n\n    // Since the request does not have any peers, it should be canceled.\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_none());\n    // Fetch should be always 0 as long as we're below parallel_fetch_limit\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    // Check if the error is propagated on the channel\n    tokio::spawn(effects.remove(0));\n    let result = receiver.await.unwrap();\n    assert!(result.is_err());\n}\n\n#[tokio::test]\nasync fn sync_global_state_request_starts_maximum_trie_fetches() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let parallel_fetch_limit = rng.gen_range(2..10);\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(parallel_fetch_limit);\n\n    let mut progress = Timestamp::now();\n\n    let (request, trie_raw) = random_sync_global_state_request(\n        &mut rng,\n        Responder::without_shutdown(oneshot::channel().0),\n    );\n    let trie_hash = request.state_root_hash;\n    tokio::time::sleep(Duration::from_millis(5)).await;\n    let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 1);\n    // At first the synchronizer only fetches the root node.\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n    assert!(global_state_synchronizer.last_progress().unwrap() > progress);\n    progress = global_state_synchronizer.last_progress().unwrap();\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    // Fetch should be always 0 as long as we're below parallel_fetch_limit\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n\n    // Check if trie_accumulator requests were generated for all tries.\n    tokio::spawn(effects.remove(0));\n    reactor.expect_trie_accumulator_request(&trie_hash).await;\n\n    // sleep a bit so that the next progress timestamp is different\n    tokio::time::sleep(Duration::from_millis(2)).await;\n    // simulate the fetch returning a trie\n    let effects = global_state_synchronizer.handle_fetched_trie(\n        trie_hash.into(),\n        Ok(TrieAccumulatorResponse::new(trie_raw.clone(), vec![])),\n        reactor.effect_builder(),\n    );\n\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    // the fetch request is no longer in flight\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n    assert!(global_state_synchronizer.last_progress().unwrap() > progress);\n    progress = global_state_synchronizer.last_progress().unwrap();\n\n    // sleep a bit so that the next progress timestamp is different\n    tokio::time::sleep(Duration::from_millis(2)).await;\n\n    // root node would have some children that we haven't yet downloaded\n    let missing_children = (0u8..255)\n        // TODO: generate random hashes when `rng.gen` works\n        .map(|i| Digest::hash([i; 32]))\n        .collect();\n\n    let trie_hash = trie_raw.hash();\n\n    // simulate synchronizer processing the fetched trie\n    let effects = global_state_synchronizer.handle_put_trie_result(\n        trie_hash,\n        PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren(\n            trie_hash,\n            trie_raw,\n            missing_children,\n        )),\n        reactor.effect_builder(),\n    );\n\n    assert_eq!(effects.len(), 2);\n    for effect in effects {\n        let events = tokio::spawn(effect).await.unwrap();\n        assert_eq!(events.len(), 1);\n        assert!(matches!(events[0], Event::GetPeers(_)));\n    }\n\n    let effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    // The global state synchronizer should now start to get the missing tries and create a\n    // trie_accumulator fetch request for each of the missing children.\n    assert_eq!(effects.len(), parallel_fetch_limit);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(\n        global_state_synchronizer.fetch_queue.queue.len(),\n        255 - parallel_fetch_limit\n    );\n    assert_eq!(\n        global_state_synchronizer.in_flight.len(),\n        parallel_fetch_limit\n    );\n    assert!(global_state_synchronizer.last_progress().unwrap() > progress);\n}\n\n#[tokio::test]\nasync fn trie_accumulator_error_cancels_request() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    // Set the parallel fetch limit to allow only 1 fetch\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(1);\n\n    // Create and register one request\n    let (sender, receiver1) = oneshot::channel();\n    let (request1, _) =\n        random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender));\n    let trie_hash1 = request1.state_root_hash;\n    let mut effects = global_state_synchronizer.handle_request(request1, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 1);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n\n    // Validate that a trie accumulator request was created\n    tokio::spawn(effects.remove(0));\n    reactor.expect_trie_accumulator_request(&trie_hash1).await;\n\n    // Create and register a second request\n    let (sender, receiver2) = oneshot::channel();\n    let (request2, _) =\n        random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender));\n    let trie_hash2 = request2.state_root_hash;\n    let mut effects = global_state_synchronizer.handle_request(request2, reactor.effect_builder());\n    // This request should generate an error response\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    // First request is in flight\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n\n    tokio::spawn(effects.remove(0));\n    match receiver2.await.unwrap() {\n        // the synchronizer should say that it's already processing a different request\n        Err(Error::ProcessingAnotherRequest {\n            hash_being_synced,\n            hash_requested,\n        }) => {\n            assert_eq!(hash_being_synced, trie_hash1);\n            assert_eq!(hash_requested, trie_hash2);\n        }\n        res => panic!(\"unexpected result: {:?}\", res),\n    }\n\n    // Simulate a trie_accumulator error for the first trie\n    let trie_accumulator_result = Err(TrieAccumulatorError::Absent(trie_hash1, 0, vec![]));\n    let mut effects = global_state_synchronizer.handle_fetched_trie(\n        trie_hash1.into(),\n        trie_accumulator_result,\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_none());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    let cancel_effect = effects.pop().unwrap();\n\n    // Check if we got the error for the first trie on the channel\n    tokio::spawn(cancel_effect);\n    let result = receiver1.await.unwrap();\n    assert!(result.is_err());\n}\n\n#[tokio::test]\nasync fn successful_trie_fetch_puts_trie_to_store() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(rng.gen_range(2..10));\n\n    // Create a request\n    let (request, trie) = random_sync_global_state_request(\n        &mut rng,\n        Responder::without_shutdown(oneshot::channel().0),\n    );\n    let state_root_hash = request.state_root_hash;\n\n    let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n    // Validate that we got a trie_accumulator request\n    tokio::spawn(effects.remove(0));\n    reactor\n        .expect_trie_accumulator_request(&state_root_hash)\n        .await;\n\n    // Simulate a successful trie fetch\n    let trie_accumulator_result = Ok(TrieAccumulatorResponse::new(trie.clone(), Vec::new()));\n    let mut effects = global_state_synchronizer.handle_fetched_trie(\n        state_root_hash.into(),\n        trie_accumulator_result,\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    // Should attempt to put the trie to the trie store\n    tokio::spawn(effects.remove(0));\n    reactor.expect_put_trie_request(&trie).await;\n}\n\n#[tokio::test]\nasync fn trie_store_error_cancels_request() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(rng.gen_range(2..10));\n\n    // Create a request\n    let (sender, receiver) = oneshot::channel();\n    let (request, trie) =\n        random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender));\n    let state_root_hash = request.state_root_hash;\n\n    let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n\n    // Validate that we got a trie_accumulator request\n    tokio::spawn(effects.remove(0));\n    reactor\n        .expect_trie_accumulator_request(&state_root_hash)\n        .await;\n\n    // Assuming we received the trie from the accumulator, check the behavior when we an error\n    // is returned when trying to put the trie to the store.\n    let mut effects = global_state_synchronizer.handle_put_trie_result(\n        trie.hash(),\n        PutTrieResult::Failure(GlobalStateError::RootNotFound),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    // Request should be canceled.\n    assert!(global_state_synchronizer.request_state.is_none());\n    tokio::spawn(effects.remove(0));\n    let result = receiver.await.unwrap();\n    assert!(result.is_err());\n}\n\n#[tokio::test]\nasync fn missing_trie_node_children_triggers_fetch() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let parallel_fetch_limit = rng.gen_range(2..10);\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(parallel_fetch_limit);\n\n    // Create a request\n    let (request, request_trie) = random_sync_global_state_request(\n        &mut rng,\n        Responder::without_shutdown(oneshot::channel().0),\n    );\n    let trie_hash = Digest::hash(request_trie.clone().inner());\n    let state_root_hash = request.state_root_hash;\n\n    let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n\n    // Validate that we got a trie_accumulator request\n    tokio::spawn(effects.remove(0));\n    reactor\n        .expect_trie_accumulator_request(&state_root_hash)\n        .await;\n\n    // Simulate a successful trie fetch from the accumulator\n    let trie_accumulator_result = Ok(TrieAccumulatorResponse::new(\n        request_trie.clone(),\n        Vec::new(),\n    ));\n    let mut effects = global_state_synchronizer.handle_fetched_trie(\n        state_root_hash.into(),\n        trie_accumulator_result,\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    // Should try to put the trie in the store.\n    tokio::spawn(effects.remove(0));\n    reactor.expect_put_trie_request(&request_trie).await;\n\n    // Simulate an error from the trie store where the trie is missing children.\n    // We generate more than the parallel_fetch_limit.\n    let num_missing_trie_nodes = rng.gen_range(12..20);\n    let missing_tries: Vec<TrieRaw> = (0..num_missing_trie_nodes)\n        .map(|_| random_test_trie(&mut rng))\n        .collect();\n    let missing_trie_nodes_hashes: Vec<Digest> = missing_tries\n        .iter()\n        .map(|missing_trie| Digest::hash(missing_trie.inner()))\n        .collect();\n\n    let effects = global_state_synchronizer.handle_put_trie_result(\n        trie_hash,\n        PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren(\n            trie_hash,\n            request_trie.clone(),\n            missing_trie_nodes_hashes.clone(),\n        )),\n        reactor.effect_builder(),\n    );\n\n    assert_eq!(effects.len(), 2);\n    for effect in effects {\n        let events = tokio::spawn(effect).await.unwrap();\n        assert_eq!(events.len(), 1);\n        assert!(matches!(events[0], Event::GetPeers(_)));\n    }\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    // The global state synchronizer should now start to get the missing tries and create a\n    // trie_accumulator fetch request for each of the missing children.\n    assert_eq!(effects.len(), parallel_fetch_limit);\n    assert_eq!(global_state_synchronizer.tries_awaiting_children.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(\n        global_state_synchronizer.in_flight.len(),\n        parallel_fetch_limit\n    );\n    // There are still tries that were not issued a fetch since it would exceed the limit.\n    assert_eq!(\n        global_state_synchronizer.fetch_queue.queue.len(),\n        num_missing_trie_nodes - parallel_fetch_limit\n    );\n\n    // Check the requests that were issued.\n    for (idx, effect) in effects.drain(0..).rev().enumerate() {\n        tokio::spawn(effect);\n        reactor\n            .expect_trie_accumulator_request(\n                &missing_trie_nodes_hashes[num_missing_trie_nodes - idx - 1],\n            )\n            .await;\n    }\n\n    // Now handle a successful fetch from the trie_accumulator for one of the missing children.\n    let trie_hash = missing_trie_nodes_hashes[num_missing_trie_nodes - 1];\n    let trie_accumulator_result = Ok(TrieAccumulatorResponse::new(\n        missing_tries[num_missing_trie_nodes - 1].clone(),\n        Vec::new(),\n    ));\n    let mut effects = global_state_synchronizer.handle_fetched_trie(\n        trie_hash.into(),\n        trie_accumulator_result,\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(\n        global_state_synchronizer.in_flight.len(),\n        parallel_fetch_limit - 1\n    );\n    assert_eq!(\n        global_state_synchronizer.fetch_queue.queue.len(),\n        num_missing_trie_nodes - parallel_fetch_limit\n    );\n    tokio::spawn(effects.remove(0));\n    reactor\n        .expect_put_trie_request(&missing_tries[num_missing_trie_nodes - 1])\n        .await;\n\n    let trie_hash =\n        Digest::hash_into_chunks_if_necessary(missing_tries[num_missing_trie_nodes - 1].inner());\n\n    // Handle put trie to store for the missing child\n    let mut effects = global_state_synchronizer.handle_put_trie_result(\n        trie_hash,\n        PutTrieResult::Success { hash: trie_hash },\n        reactor.effect_builder(),\n    );\n\n    assert_eq!(effects.len(), 1);\n    assert_eq!(global_state_synchronizer.tries_awaiting_children.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    // The in flight value should still be 1 below the limit - the effects should contain a request\n    // for peers.\n    assert_eq!(\n        global_state_synchronizer.in_flight.len(),\n        parallel_fetch_limit - 1\n    );\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    // Check if one of the pending fetches for the missing children was picked up.\n    assert_eq!(\n        global_state_synchronizer.in_flight.len(),\n        parallel_fetch_limit\n    );\n\n    // Should have one less missing child than before.\n    assert_eq!(\n        global_state_synchronizer\n            .tries_awaiting_children\n            .get(&Digest::hash(request_trie.inner()).into())\n            .unwrap()\n            .missing_children\n            .len(),\n        num_missing_trie_nodes - 1\n    );\n\n    // Check that a fetch was created for the next missing child.\n    tokio::spawn(effects.remove(0));\n    reactor\n        .expect_trie_accumulator_request(\n            &missing_trie_nodes_hashes[num_missing_trie_nodes - parallel_fetch_limit - 1],\n        )\n        .await;\n}\n\n#[tokio::test]\nasync fn stored_trie_finalizes_request() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let parallel_fetch_limit = rng.gen_range(2..10);\n    let mut global_state_synchronizer = GlobalStateSynchronizer::new(parallel_fetch_limit);\n\n    // Create a request\n    let (sender, receiver) = oneshot::channel();\n    let (request, trie) =\n        random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender));\n    let state_root_hash = request.state_root_hash;\n\n    let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder());\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n\n    let events = tokio::spawn(effects.remove(0)).await.unwrap();\n    assert_eq!(events.len(), 1);\n    assert!(matches!(events[0], Event::GetPeers(_)));\n\n    let mut effects = global_state_synchronizer.parallel_fetch_with_peers(\n        std::iter::repeat_with(|| NodeId::random(&mut rng))\n            .take(2)\n            .collect(),\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 1);\n\n    // Validate that we got a trie_accumulator request\n    tokio::spawn(effects.remove(0));\n    reactor\n        .expect_trie_accumulator_request(&state_root_hash)\n        .await;\n\n    // Handle a successful fetch from the trie_accumulator for one of the missing children.\n    let trie_hash = Digest::hash(trie.inner());\n    let trie_accumulator_result = Ok(TrieAccumulatorResponse::new(trie.clone(), Vec::new()));\n    let mut effects = global_state_synchronizer.handle_fetched_trie(\n        trie_hash.into(),\n        trie_accumulator_result,\n        reactor.effect_builder(),\n    );\n    assert_eq!(effects.len(), 1);\n    assert!(global_state_synchronizer.request_state.is_some());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n    tokio::spawn(effects.remove(0));\n    reactor.expect_put_trie_request(&trie).await;\n\n    // Generate a successful trie store\n    let mut effects = global_state_synchronizer.handle_put_trie_result(\n        trie_hash,\n        PutTrieResult::Success { hash: trie_hash },\n        reactor.effect_builder(),\n    );\n    // Assert request was successful and global synchronizer is finished.\n    assert_eq!(effects.len(), 1);\n    assert_eq!(global_state_synchronizer.tries_awaiting_children.len(), 0);\n    assert!(global_state_synchronizer.request_state.is_none());\n    assert_eq!(global_state_synchronizer.in_flight.len(), 0);\n    assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0);\n    tokio::spawn(effects.remove(0));\n    let result = receiver.await.unwrap();\n    assert!(result.is_ok());\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/global_state_synchronizer.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::{BTreeMap, HashSet},\n    fmt, mem,\n};\n\nuse datasize::DataSize;\nuse derive_more::From;\nuse serde::Serialize;\nuse thiserror::Error;\nuse tracing::{debug, error, warn};\n\nuse casper_storage::{\n    data_access_layer::{PutTrieRequest, PutTrieResult},\n    global_state::{error::Error as GlobalStateError, trie::TrieRaw},\n};\nuse casper_types::{BlockHash, Digest, DisplayIter, Timestamp};\n\nuse super::{TrieAccumulator, TrieAccumulatorError, TrieAccumulatorEvent, TrieAccumulatorResponse};\nuse crate::{\n    components::Component,\n    effect::{\n        announcements::PeerBehaviorAnnouncement,\n        requests::{\n            ContractRuntimeRequest, FetcherRequest, SyncGlobalStateRequest, TrieAccumulatorRequest,\n        },\n        EffectBuilder, EffectExt, Effects, Responder,\n    },\n    reactor,\n    types::{NodeId, TrieOrChunk},\n    NodeRng,\n};\n\nconst COMPONENT_NAME: &str = \"global_state_synchronizer\";\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, DataSize, From)]\npub(crate) struct RootHash(Digest);\n\nimpl RootHash {\n    #[cfg(test)]\n    pub(crate) fn new(digest: Digest) -> Self {\n        Self(digest)\n    }\n\n    pub(crate) fn into_inner(self) -> Digest {\n        self.0\n    }\n}\n\nimpl fmt::Display for RootHash {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        self.0.fmt(f)\n    }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, DataSize, From)]\npub(crate) struct TrieHash(Digest);\n\nimpl fmt::Display for TrieHash {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        self.0.fmt(f)\n    }\n}\n\n#[derive(Debug, Clone, Error)]\npub(crate) enum Error {\n    #[error(\"trie accumulator encountered an error while fetching a trie; unreliable peers {}\", DisplayIter::new(.0))]\n    TrieAccumulator(Vec<NodeId>),\n    #[error(\"Failed to persist trie element in global state: {0}; unreliable peers {}\", DisplayIter::new(.1))]\n    PutTrie(GlobalStateError, Vec<NodeId>),\n    #[error(\"no peers available to ask for a trie\")]\n    NoPeersAvailable,\n    #[error(\"received request for {hash_requested} while syncing another root hash: {hash_being_synced}\")]\n    ProcessingAnotherRequest {\n        hash_being_synced: Digest,\n        hash_requested: Digest,\n    },\n}\n\n#[derive(Debug, Clone)]\npub(crate) struct Response {\n    hash: RootHash,\n    unreliable_peers: Vec<NodeId>,\n}\n\nimpl Response {\n    pub(crate) fn new(hash: RootHash, unreliable_peers: Vec<NodeId>) -> Self {\n        Self {\n            hash,\n            unreliable_peers,\n        }\n    }\n\n    pub(crate) fn hash(&self) -> &RootHash {\n        &self.hash\n    }\n\n    pub(crate) fn unreliable_peers(self) -> Vec<NodeId> {\n        self.unreliable_peers\n    }\n}\n\n#[derive(Debug, From, Serialize)]\npub(crate) enum Event {\n    #[from]\n    Request(SyncGlobalStateRequest),\n    GetPeers(Vec<NodeId>),\n    FetchedTrie {\n        trie_hash: TrieHash,\n        trie_accumulator_result: Result<TrieAccumulatorResponse, TrieAccumulatorError>,\n    },\n    PutTrieResult {\n        #[serde(skip)]\n        raw: TrieRaw,\n        #[serde(skip)]\n        result: PutTrieResult,\n    },\n    #[from]\n    TrieAccumulator(TrieAccumulatorEvent),\n}\n\n#[derive(Debug, DataSize)]\nstruct RequestState {\n    root_hash: RootHash,\n    block_hashes: HashSet<BlockHash>,\n    responders: Vec<Responder<Result<Response, Error>>>,\n    unreliable_peers: HashSet<NodeId>,\n}\n\nimpl RequestState {\n    fn new(request: SyncGlobalStateRequest) -> Self {\n        let mut block_hashes = HashSet::new();\n        block_hashes.insert(request.block_hash);\n        Self {\n            root_hash: RootHash(request.state_root_hash),\n            block_hashes,\n            responders: vec![request.responder],\n            unreliable_peers: HashSet::new(),\n        }\n    }\n\n    /// Extends the responders based on an additional request.\n    fn add_request(&mut self, request: SyncGlobalStateRequest) {\n        self.block_hashes.insert(request.block_hash);\n        self.responders.push(request.responder);\n    }\n\n    /// Consumes this request state and sends the response on all responders.\n    fn respond(self, response: Result<Response, Error>) -> Effects<Event> {\n        self.responders\n            .into_iter()\n            .flat_map(|responder| responder.respond(response.clone()).ignore())\n            .collect()\n    }\n}\n\n#[derive(Debug, DataSize)]\nstruct TrieAwaitingChildren {\n    trie_raw: TrieRaw,\n    missing_children: HashSet<TrieHash>,\n}\n\nimpl TrieAwaitingChildren {\n    fn new(trie_raw: TrieRaw, missing_children: Vec<TrieHash>) -> Self {\n        Self {\n            trie_raw,\n            missing_children: missing_children.into_iter().collect(),\n        }\n    }\n\n    /// Handles `written_trie` being written to the database - removes the trie as a dependency and\n    /// returns the next trie to be downloaded.\n    fn trie_written(&mut self, written_trie: TrieHash) {\n        self.missing_children.remove(&written_trie);\n    }\n\n    fn ready_to_be_written(&self) -> bool {\n        self.missing_children.is_empty()\n    }\n\n    fn into_trie_raw(self) -> TrieRaw {\n        self.trie_raw\n    }\n}\n\n#[derive(Debug, Default, DataSize)]\nstruct FetchQueue {\n    queue: Vec<TrieHash>,\n    /// set of the same values that are in the queue - so that we can quickly check that we do not\n    /// duplicate the same entry in the queue\n    hashes_set: HashSet<TrieHash>,\n}\n\nimpl FetchQueue {\n    fn insert(&mut self, trie_hash: TrieHash) {\n        if self.hashes_set.insert(trie_hash) {\n            self.queue.push(trie_hash);\n        }\n    }\n\n    fn take(&mut self, num_to_take: usize) -> Vec<TrieHash> {\n        // `to_return` will contain `num_to_take` elements from the end of the queue (or all of\n        // them if `num_to_take` is greater than queue length).\n        // Taking elements from the end will essentially make our traversal depth-first instead of\n        // breadth-first.\n        let to_return = self\n            .queue\n            .split_off(self.queue.len().saturating_sub(num_to_take));\n        // remove the returned hashes from the \"duplication prevention\" set\n        for returned_hash in &to_return {\n            self.hashes_set.remove(returned_hash);\n        }\n        to_return\n    }\n\n    fn handle_request_cancelled(&mut self) {\n        self.queue = vec![];\n        self.hashes_set = HashSet::new();\n    }\n}\n\n#[derive(Debug, DataSize)]\npub(super) struct GlobalStateSynchronizer {\n    max_parallel_trie_fetches: usize,\n    trie_accumulator: TrieAccumulator,\n    request_state: Option<RequestState>,\n    tries_awaiting_children: BTreeMap<TrieHash, TrieAwaitingChildren>,\n    fetch_queue: FetchQueue,\n    in_flight: HashSet<TrieHash>,\n    last_progress: Option<Timestamp>,\n}\n\nimpl GlobalStateSynchronizer {\n    pub(super) fn new(max_parallel_trie_fetches: usize) -> Self {\n        Self {\n            max_parallel_trie_fetches,\n            trie_accumulator: TrieAccumulator::new(),\n            request_state: None,\n            tries_awaiting_children: Default::default(),\n            fetch_queue: Default::default(),\n            in_flight: Default::default(),\n            last_progress: None,\n        }\n    }\n\n    fn touch(&mut self) {\n        self.last_progress = Some(Timestamp::now());\n    }\n\n    pub(super) fn last_progress(&self) -> Option<Timestamp> {\n        self.last_progress\n    }\n\n    fn handle_request<REv>(\n        &mut self,\n        request: SyncGlobalStateRequest,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Effects<Event>\n    where\n        REv: From<TrieAccumulatorRequest> + From<ContractRuntimeRequest> + Send,\n    {\n        let state_root_hash = request.state_root_hash;\n\n        let mut effects = match &mut self.request_state {\n            None => {\n                self.request_state = Some(RequestState::new(request));\n                self.touch();\n                self.enqueue_trie_for_fetching(effect_builder, TrieHash(state_root_hash))\n            }\n            Some(state) => {\n                if state.root_hash.0 != state_root_hash {\n                    return request\n                        .responder\n                        .respond(Err(Error::ProcessingAnotherRequest {\n                            hash_being_synced: state.root_hash.0,\n                            hash_requested: state_root_hash,\n                        }))\n                        .ignore();\n                } else {\n                    state.add_request(request);\n                    self.touch();\n                }\n                Effects::new()\n            }\n        };\n\n        debug!(\n            %state_root_hash,\n            fetch_queue_length = self.fetch_queue.queue.len(),\n            tries_awaiting_children_length = self.tries_awaiting_children.len(),\n            \"handle_request\"\n        );\n\n        effects.extend(self.parallel_fetch(effect_builder));\n\n        effects\n    }\n\n    fn parallel_fetch<REv>(&mut self, effect_builder: EffectBuilder<REv>) -> Effects<Event> {\n        effect_builder\n            .immediately()\n            .event(|()| Event::GetPeers(vec![]))\n    }\n\n    fn parallel_fetch_with_peers<REv>(\n        &mut self,\n        peers: Vec<NodeId>,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Effects<Event>\n    where\n        REv: From<TrieAccumulatorRequest> + Send,\n    {\n        let mut effects = Effects::new();\n\n        if self.request_state.is_none() {\n            debug!(\"called parallel_fetch while not processing any requests\");\n            return effects;\n        }\n\n        // Just to not overdo parallel trie fetches in small networks. 5000 parallel trie fetches\n        // seemed to be fine in networks of 100 peers, so we set the limit at 50 * number of peers.\n        let max_parallel_trie_fetches = self.max_parallel_trie_fetches.min(peers.len() * 50);\n\n        // if we're not finished, figure out how many new fetching tasks we can start\n        let num_fetches_to_start = max_parallel_trie_fetches.saturating_sub(self.in_flight.len());\n\n        debug!(\n            max_parallel_trie_fetches,\n            in_flight_length = self.in_flight.len(),\n            fetch_queue_length = self.fetch_queue.queue.len(),\n            num_fetches_to_start,\n            \"parallel_fetch\"\n        );\n\n        let to_fetch = self.fetch_queue.take(num_fetches_to_start);\n\n        if peers.is_empty() {\n            // if we have no peers, fail - trie accumulator would return an error, anyway\n            debug!(\"no peers available, cancelling request\");\n            return self.cancel_request(Error::NoPeersAvailable);\n        }\n\n        for trie_hash in to_fetch {\n            if self.in_flight.insert(trie_hash) {\n                effects.extend(effect_builder.fetch_trie(trie_hash.0, peers.clone()).event(\n                    move |trie_accumulator_result| Event::FetchedTrie {\n                        trie_hash,\n                        trie_accumulator_result,\n                    },\n                ));\n            }\n        }\n\n        effects\n    }\n\n    fn handle_fetched_trie<REv>(\n        &mut self,\n        trie_hash: TrieHash,\n        trie_accumulator_result: Result<TrieAccumulatorResponse, TrieAccumulatorError>,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Effects<Event>\n    where\n        REv: From<TrieAccumulatorRequest> + From<ContractRuntimeRequest> + Send,\n    {\n        // A result of `false` probably indicates that this is a stale fetch from a previously\n        // cancelled request - we shouldn't cancel the current request if the result is an error in\n        // such a case.\n        let in_flight_was_present = self.in_flight.remove(&trie_hash);\n\n        debug!(\n            %trie_hash,\n            in_flight_length = self.in_flight.len(),\n            fetch_queue_length = self.fetch_queue.queue.len(),\n            processing_request = self.request_state.is_some(),\n            \"handle_fetched_trie\"\n        );\n\n        let trie_raw = match trie_accumulator_result {\n            Ok(response) => {\n                if let Some(request_state) = &mut self.request_state {\n                    request_state\n                        .unreliable_peers\n                        .extend(response.unreliable_peers());\n                }\n                response.trie()\n            }\n            Err(error) => {\n                debug!(%error, \"error fetching a trie\");\n                let new_unreliable_peers = match error {\n                    TrieAccumulatorError::Absent(_, _, unreliable_peers)\n                    | TrieAccumulatorError::PeersExhausted(_, unreliable_peers) => unreliable_peers,\n                    TrieAccumulatorError::NoPeers(_) => {\n                        // Trie accumulator did not have any peers to download from\n                        // so the request will be canceled with no peers to report\n                        vec![]\n                    }\n                };\n                let unreliable_peers = self.request_state.as_mut().map_or_else(Vec::new, |state| {\n                    state.unreliable_peers.extend(new_unreliable_peers);\n                    state.unreliable_peers.iter().copied().collect()\n                });\n                debug!(%trie_hash, \"unreliable peers for requesting trie, cancelling request\");\n                let mut effects = if in_flight_was_present {\n                    self.cancel_request(Error::TrieAccumulator(unreliable_peers))\n                } else {\n                    Effects::new()\n                };\n\n                // continue fetching other requests if any\n                // request_state might be `None` if we are processing fetch responses that were in\n                // flight when we cancelled a request\n                if self.request_state.is_some() {\n                    effects.extend(self.parallel_fetch(effect_builder));\n                }\n                return effects;\n            }\n        };\n\n        self.touch();\n\n        let request = PutTrieRequest::new((*trie_raw).clone());\n        effect_builder\n            .put_trie_if_all_children_present(request)\n            .event(move |put_trie_result| Event::PutTrieResult {\n                raw: *trie_raw,\n                result: put_trie_result,\n            })\n    }\n\n    pub(super) fn cancel_request(&mut self, error: Error) -> Effects<Event> {\n        match self.request_state.take() {\n            Some(request_state) => {\n                debug!(root_hash=%request_state.root_hash, \"cancelling request\");\n                self.fetch_queue.handle_request_cancelled();\n                self.in_flight = HashSet::new();\n                request_state.respond(Err(error))\n            }\n            None => {\n                debug!(\"not cancelling request - none being processed\");\n                Effects::new()\n            }\n        }\n    }\n\n    fn finish_request(&mut self) -> Effects<Event> {\n        match self.request_state.take() {\n            Some(request_state) => {\n                let root_hash = request_state.root_hash;\n                debug!(%root_hash, \"finishing request\");\n                let unreliable_peers = request_state.unreliable_peers.iter().copied().collect();\n                request_state.respond(Ok(Response::new(root_hash, unreliable_peers)))\n            }\n            None => {\n                // We only call this function after checking that we are processing a request - if\n                // the request is None, this is a bug\n                error!(\"not finishing request - none being processed\");\n                Effects::new()\n            }\n        }\n    }\n\n    fn handle_put_trie_result<REv>(\n        &mut self,\n        requested_hash: Digest,\n        put_trie_result: PutTrieResult,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Effects<Event>\n    where\n        REv: From<TrieAccumulatorRequest> + From<ContractRuntimeRequest> + Send,\n    {\n        let mut effects = Effects::new();\n\n        match put_trie_result {\n            PutTrieResult::Success { hash } if hash == requested_hash => {\n                effects.extend(self.handle_trie_written(effect_builder, TrieHash(hash)))\n            }\n            PutTrieResult::Success { hash } => {\n                error!(\n                    %hash,\n                    %requested_hash,\n                    \"trie was stored under a different hash than was used to request it - \\\n                    it's a bug\"\n                );\n            }\n            PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren(\n                trie_hash,\n                trie_raw,\n                missing_children,\n            )) => effects.extend(self.handle_trie_missing_children(\n                effect_builder,\n                TrieHash(trie_hash),\n                trie_raw,\n                missing_children.into_iter().map(TrieHash).collect(),\n            )),\n            PutTrieResult::Failure(gse) => {\n                warn!(%requested_hash, %gse, \"couldn't put trie into global state\");\n                if let Some(request_state) = &mut self.request_state {\n                    let unreliable_peers = request_state.unreliable_peers.iter().copied().collect();\n                    effects.extend(self.cancel_request(Error::PutTrie(gse, unreliable_peers)));\n                }\n            }\n        }\n\n        // request_state can be none if we're processing a result of a fetch that was in flight\n        // when a request got cancelled\n        if self.request_state.is_some() {\n            effects.extend(self.parallel_fetch(effect_builder));\n        }\n\n        effects\n    }\n\n    fn handle_trie_written<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        written_trie: TrieHash,\n    ) -> Effects<Event>\n    where\n        REv: From<TrieAccumulatorRequest> + From<ContractRuntimeRequest> + Send,\n    {\n        self.touch();\n\n        // Remove the written trie from dependencies of the tries that are waiting.\n        for trie_awaiting in self.tries_awaiting_children.values_mut() {\n            trie_awaiting.trie_written(written_trie);\n        }\n\n        let (ready_tries, still_incomplete): (BTreeMap<_, _>, BTreeMap<_, _>) =\n            mem::take(&mut self.tries_awaiting_children)\n                .into_iter()\n                .partition(|(_, trie_awaiting)| trie_awaiting.ready_to_be_written());\n        debug!(\n            ready_tries = ready_tries.len(),\n            still_incomplete = still_incomplete.len(),\n            \"handle_trie_written\"\n        );\n        self.tries_awaiting_children = still_incomplete;\n\n        let mut effects: Effects<Event> = ready_tries\n            .into_iter()\n            .flat_map(|(_, trie_awaiting)| {\n                let trie_raw = trie_awaiting.into_trie_raw();\n                let request = PutTrieRequest::new(trie_raw.clone());\n                effect_builder\n                    .put_trie_if_all_children_present(request)\n                    .event(move |result| Event::PutTrieResult {\n                        raw: trie_raw,\n                        result,\n                    })\n            })\n            .collect();\n\n        // If there is a request state associated with the trie we just wrote, it means that it was\n        // a root trie and we can report fetching to be finished.\n        if let Some(request_state) = &mut self.request_state {\n            if TrieHash(request_state.root_hash.0) == written_trie {\n                effects.extend(self.finish_request());\n            }\n        }\n\n        effects\n    }\n\n    fn enqueue_trie_for_fetching<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        trie_hash: TrieHash,\n    ) -> Effects<Event>\n    where\n        REv: From<ContractRuntimeRequest> + Send,\n    {\n        // we might have fetched it already!\n        if let Some(trie_awaiting) = self.tries_awaiting_children.get_mut(&trie_hash) {\n            // simulate fetching having been completed in order to start fetching any children that\n            // might be still missing\n            let trie_raw = trie_awaiting.trie_raw.clone();\n            let request = PutTrieRequest::new(trie_raw.clone());\n            effect_builder\n                .put_trie_if_all_children_present(request)\n                .event(move |result| Event::PutTrieResult {\n                    raw: trie_raw,\n                    result,\n                })\n        } else {\n            // otherwise, add to the queue\n            self.fetch_queue.insert(trie_hash);\n            Effects::new()\n        }\n    }\n\n    fn handle_trie_missing_children<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        trie_hash: TrieHash,\n        trie_raw: TrieRaw,\n        missing_children: Vec<TrieHash>,\n    ) -> Effects<Event>\n    where\n        REv: From<TrieAccumulatorRequest> + From<ContractRuntimeRequest> + Send,\n    {\n        if self.request_state.is_none() {\n            // this can be valid if we're processing a fetch result that was in flight while we\n            // were cancelling a request - but we don't want to continue queueing further tries for\n            // fetching\n            return Effects::new();\n        }\n\n        self.touch();\n\n        let mut effects: Effects<Event> = missing_children\n            .iter()\n            .flat_map(|child| self.enqueue_trie_for_fetching(effect_builder, *child))\n            .collect();\n        self.tries_awaiting_children.insert(\n            trie_hash,\n            TrieAwaitingChildren::new(trie_raw, missing_children),\n        );\n        effects.extend(self.parallel_fetch(effect_builder));\n        effects\n    }\n}\n\nimpl<REv> Component<REv> for GlobalStateSynchronizer\nwhere\n    REv: From<TrieAccumulatorRequest>\n        + From<ContractRuntimeRequest>\n        + From<FetcherRequest<TrieOrChunk>>\n        + From<PeerBehaviorAnnouncement>\n        + Send,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::Request(request) => self.handle_request(request, effect_builder),\n            Event::GetPeers(peers) => self.parallel_fetch_with_peers(peers, effect_builder),\n            Event::FetchedTrie {\n                trie_hash,\n                trie_accumulator_result,\n            } => self.handle_fetched_trie(trie_hash, trie_accumulator_result, effect_builder),\n            Event::PutTrieResult {\n                raw: trie_raw,\n                result: put_trie_result,\n            } => self.handle_put_trie_result(trie_raw.hash(), put_trie_result, effect_builder),\n            Event::TrieAccumulator(event) => reactor::wrap_effects(\n                Event::TrieAccumulator,\n                self.trie_accumulator\n                    .handle_event(effect_builder, rng, event),\n            ),\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/metrics.rs",
    "content": "use prometheus::{Histogram, Registry};\n\nuse crate::{unregister_metric, utils};\n\nconst HIST_SYNC_DURATION_NAME: &str = \"historical_block_sync_duration_seconds\";\nconst HIST_SYNC_DURATION_HELP: &str = \"duration (in sec) to synchronize a historical block\";\nconst FWD_SYNC_DURATION_NAME: &str = \"forward_block_sync_duration_seconds\";\nconst FWD_SYNC_DURATION_HELP: &str = \"duration (in sec) to synchronize a forward block\";\n\n// We use exponential buckets to observe the time it takes to synchronize blocks.\n// Coverage is ~7.7s with higher resolution in the first buckets.\nconst EXPONENTIAL_BUCKET_START: f64 = 0.2;\nconst EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0;\nconst EXPONENTIAL_BUCKET_COUNT: usize = 10;\n\n/// Metrics for the block synchronizer component.\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// Time duration for the historical synchronizer to get a block.\n    pub(super) historical_block_sync_duration: Histogram,\n    /// Time duration for the forward synchronizer to get a block.\n    pub(super) forward_block_sync_duration: Histogram,\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of the block synchronizer metrics.\n    pub fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let buckets = prometheus::exponential_buckets(\n            EXPONENTIAL_BUCKET_START,\n            EXPONENTIAL_BUCKET_FACTOR,\n            EXPONENTIAL_BUCKET_COUNT,\n        )?;\n\n        Ok(Metrics {\n            historical_block_sync_duration: utils::register_histogram_metric(\n                registry,\n                HIST_SYNC_DURATION_NAME,\n                HIST_SYNC_DURATION_HELP,\n                buckets.clone(),\n            )?,\n            forward_block_sync_duration: utils::register_histogram_metric(\n                registry,\n                FWD_SYNC_DURATION_NAME,\n                FWD_SYNC_DURATION_HELP,\n                buckets,\n            )?,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.historical_block_sync_duration);\n        unregister_metric!(self.registry, self.forward_block_sync_duration);\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/need_next.rs",
    "content": "use datasize::DataSize;\nuse derive_more::Display;\n\nuse casper_types::{Block, BlockHash, DeployHash, Digest, EraId, PublicKey, TransactionId};\n\nuse crate::types::{BlockExecutionResultsOrChunkId, ExecutableBlock};\n\nuse super::execution_results_acquisition::ExecutionResultsChecksum;\n\n#[derive(DataSize, Debug, Clone, Display, PartialEq)]\npub(crate) enum NeedNext {\n    #[display(fmt = \"need next for {}: nothing\", _0)]\n    Nothing(BlockHash),\n    #[display(fmt = \"need next for {}: peers\", _0)]\n    Peers(BlockHash),\n    #[display(fmt = \"need next for {}: era validators\", _0)]\n    EraValidators(EraId),\n    #[display(fmt = \"need next for {}: block header\", _0)]\n    BlockHeader(BlockHash),\n    #[display(fmt = \"need next for {}: block body\", _0)]\n    BlockBody(BlockHash),\n    #[display(fmt = \"need next for {}: approvals hashes ({})\", _0, _1)]\n    ApprovalsHashes(BlockHash, Box<Block>),\n    #[display(\n        fmt = \"need next for {}: finality signatures at {} ({} validators)\",\n        _0,\n        _1,\n        \"_2.len()\"\n    )]\n    FinalitySignatures(BlockHash, EraId, Vec<PublicKey>),\n    #[display(fmt = \"need next for {}: global state (state root hash {})\", _0, _1)]\n    GlobalState(BlockHash, Digest),\n    #[display(fmt = \"need next for {}: deploy {}\", _0, _1)]\n    DeployByHash(BlockHash, DeployHash),\n    #[display(fmt = \"need next for {}: transaction {}\", _0, _1)]\n    TransactionById(BlockHash, TransactionId),\n    #[display(fmt = \"need next for {}: make block executable (height {})\", _0, _1)]\n    MakeExecutableBlock(BlockHash, u64),\n    #[display(\n        fmt = \"need next for {}: enqueue this block (height {}) for execution\",\n        _0,\n        _1\n    )]\n    EnqueueForExecution(BlockHash, u64, Box<ExecutableBlock>),\n    /// We want the Merkle root hash stored in global state under the ChecksumRegistry key for the\n    /// execution results.\n    #[display(\n        fmt = \"need next for {}: execution results checksum (state root hash {})\",\n        _0,\n        _1\n    )]\n    ExecutionResultsChecksum(BlockHash, Digest),\n    #[display(fmt = \"need next for {}: {} (checksum {})\", _0, _1, _2)]\n    ExecutionResults(\n        BlockHash,\n        BlockExecutionResultsOrChunkId,\n        ExecutionResultsChecksum,\n    ),\n    #[display(fmt = \"need next for {}: mark complete (height {})\", _0, _1)]\n    BlockMarkedComplete(BlockHash, u64),\n    #[display(\n        fmt = \"need next for {}: transition acquisition state to HaveStrictFinality (height {})\",\n        _0,\n        _1\n    )]\n    SwitchToHaveStrictFinality(BlockHash, u64),\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/peer_list/tests.rs",
    "content": "use std::collections::HashSet;\n\nuse super::*;\nuse casper_types::testing::TestRng;\n\nimpl PeerList {\n    pub(crate) fn is_peer_unreliable(&self, peer_id: &NodeId) -> bool {\n        *self.peer_list.get(peer_id).unwrap() == PeerQuality::Unreliable\n    }\n\n    pub(crate) fn is_peer_reliable(&self, peer_id: &NodeId) -> bool {\n        *self.peer_list.get(peer_id).unwrap() == PeerQuality::Reliable\n    }\n\n    pub(crate) fn is_peer_unknown(&self, peer_id: &NodeId) -> bool {\n        *self.peer_list.get(peer_id).unwrap() == PeerQuality::Unknown\n    }\n}\n\n// Create multiple random peers\nfn random_peers(rng: &mut TestRng, num_random_peers: usize) -> HashSet<NodeId> {\n    (0..num_random_peers).map(|_| NodeId::random(rng)).collect()\n}\n\n#[test]\nfn number_of_qualified_peers_is_correct() {\n    let mut rng = TestRng::new();\n    let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1));\n\n    let test_peers: Vec<NodeId> = random_peers(&mut rng, 10).into_iter().collect();\n\n    // Add test peers to the peer list and check the internal size\n    for peer in test_peers.iter() {\n        peer_list.register_peer(*peer);\n    }\n    assert_eq!(peer_list.peer_list.len(), 10);\n\n    // All peers should be `Unknown`; check that the number of qualified peers is within the\n    // `max_simultaneous_peers`\n    let qualified_peers = peer_list.qualified_peers(&mut rng);\n    assert_eq!(qualified_peers.len(), 5);\n\n    // Promote some peers to make them `Reliable`; check the count again\n    for peer in &test_peers[..3] {\n        peer_list.promote_peer(*peer);\n    }\n    let qualified_peers = peer_list.qualified_peers(&mut rng);\n    assert_eq!(qualified_peers.len(), 5);\n\n    // Demote some peers to make them `Unreliable`; check the count again\n    for peer in &test_peers[5..] {\n        peer_list.demote_peer(*peer);\n    }\n    let qualified_peers = peer_list.qualified_peers(&mut rng);\n    assert_eq!(qualified_peers.len(), 5);\n\n    // Disqualify 7 peers; only 3 peers should remain valid for proposal\n    for peer in &test_peers[..7] {\n        peer_list.disqualify_peer(*peer);\n    }\n    let qualified_peers = peer_list.qualified_peers(&mut rng);\n    assert_eq!(qualified_peers.len(), 3);\n}\n\n#[test]\nfn unknown_peer_becomes_reliable_when_promoted() {\n    let mut rng = TestRng::new();\n    let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1));\n    let test_peer = NodeId::random(&mut rng);\n\n    peer_list.register_peer(test_peer);\n    assert!(peer_list.is_peer_unknown(&test_peer));\n    peer_list.promote_peer(test_peer);\n    assert!(peer_list.is_peer_reliable(&test_peer));\n}\n\n#[test]\nfn unknown_peer_becomes_unreliable_when_demoted() {\n    let mut rng = TestRng::new();\n    let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1));\n    let test_peer = NodeId::random(&mut rng);\n\n    peer_list.register_peer(test_peer);\n    assert!(peer_list.is_peer_unknown(&test_peer));\n    peer_list.demote_peer(test_peer);\n    assert!(peer_list.is_peer_unreliable(&test_peer));\n}\n\n#[test]\nfn reliable_peer_becomes_unreliable_when_demoted() {\n    let mut rng = TestRng::new();\n    let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1));\n    let test_peer = NodeId::random(&mut rng);\n\n    peer_list.register_peer(test_peer);\n    assert!(peer_list.is_peer_unknown(&test_peer));\n    peer_list.promote_peer(test_peer);\n    assert!(peer_list.is_peer_reliable(&test_peer));\n    peer_list.demote_peer(test_peer);\n    assert!(peer_list.is_peer_unreliable(&test_peer));\n}\n\n#[test]\nfn unreliable_peer_becomes_reliable_when_promoted() {\n    let mut rng = TestRng::new();\n    let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1));\n    let test_peer = NodeId::random(&mut rng);\n\n    peer_list.register_peer(test_peer);\n    assert!(peer_list.is_peer_unknown(&test_peer));\n    peer_list.demote_peer(test_peer);\n    assert!(peer_list.is_peer_unreliable(&test_peer));\n    peer_list.promote_peer(test_peer);\n    assert!(peer_list.is_peer_reliable(&test_peer));\n}\n\n#[test]\nfn unreliable_peer_remains_unreliable_if_demoted() {\n    let mut rng = TestRng::new();\n    let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1));\n    let test_peer = NodeId::random(&mut rng);\n\n    peer_list.register_peer(test_peer);\n    assert!(peer_list.is_peer_unknown(&test_peer));\n    peer_list.demote_peer(test_peer);\n    assert!(peer_list.is_peer_unreliable(&test_peer));\n    peer_list.demote_peer(test_peer);\n    assert!(peer_list.is_peer_unreliable(&test_peer));\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/peer_list.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::collections::{btree_map::Entry, BTreeMap};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse rand::seq::IteratorRandom;\nuse tracing::debug;\n\nuse crate::{types::NodeId, NodeRng};\nuse casper_types::{TimeDiff, Timestamp};\n\n#[derive(Copy, Clone, PartialEq, Eq, DataSize, Debug, Default)]\nenum PeerQuality {\n    #[default]\n    Unknown,\n    Unreliable,\n    Reliable,\n    Dishonest,\n}\n\npub(super) enum PeersStatus {\n    Sufficient,\n    Insufficient,\n    Stale,\n}\n\n#[derive(Clone, PartialEq, Eq, DataSize, Debug)]\npub(super) struct PeerList {\n    peer_list: BTreeMap<NodeId, PeerQuality>,\n    keep_fresh: Timestamp,\n    max_simultaneous_peers: u8,\n    peer_refresh_interval: TimeDiff,\n}\n\nimpl PeerList {\n    pub(super) fn new(max_simultaneous_peers: u8, peer_refresh_interval: TimeDiff) -> Self {\n        PeerList {\n            peer_list: BTreeMap::new(),\n            keep_fresh: Timestamp::now(),\n            max_simultaneous_peers,\n            peer_refresh_interval,\n        }\n    }\n    pub(super) fn register_peer(&mut self, peer: NodeId) {\n        if self.peer_list.contains_key(&peer) {\n            return;\n        }\n        self.peer_list.insert(peer, PeerQuality::Unknown);\n        self.keep_fresh = Timestamp::now();\n    }\n\n    pub(super) fn dishonest_peers(&self) -> Vec<NodeId> {\n        self.peer_list\n            .iter()\n            .filter_map(|(node_id, pq)| {\n                if *pq == PeerQuality::Dishonest {\n                    Some(*node_id)\n                } else {\n                    None\n                }\n            })\n            .collect_vec()\n    }\n\n    pub(super) fn flush(&mut self) {\n        self.peer_list.clear();\n    }\n\n    pub(super) fn flush_dishonest_peers(&mut self) {\n        self.peer_list.retain(|_, v| *v != PeerQuality::Dishonest);\n    }\n\n    pub(super) fn disqualify_peer(&mut self, peer: NodeId) {\n        self.peer_list.insert(peer, PeerQuality::Dishonest);\n    }\n\n    pub(super) fn promote_peer(&mut self, peer: NodeId) {\n        debug!(\"BlockSynchronizer: promoting peer {:?}\", peer);\n        // vacant should be unreachable\n        match self.peer_list.entry(peer) {\n            Entry::Vacant(_) => {\n                self.peer_list.insert(peer, PeerQuality::Unknown);\n            }\n            Entry::Occupied(entry) => match entry.get() {\n                PeerQuality::Dishonest => {\n                    // no change -- this is terminal\n                }\n                PeerQuality::Unreliable | PeerQuality::Unknown => {\n                    self.peer_list.insert(peer, PeerQuality::Reliable);\n                }\n                PeerQuality::Reliable => {\n                    // no change -- this is the best\n                }\n            },\n        }\n    }\n\n    pub(super) fn demote_peer(&mut self, peer: NodeId) {\n        debug!(\"BlockSynchronizer: demoting peer {:?}\", peer);\n        // vacant should be unreachable\n        match self.peer_list.entry(peer) {\n            Entry::Vacant(_) => {\n                // no change\n            }\n            Entry::Occupied(entry) => match entry.get() {\n                PeerQuality::Dishonest | PeerQuality::Unreliable => {\n                    // no change\n                }\n                PeerQuality::Reliable | PeerQuality::Unknown => {\n                    self.peer_list.insert(peer, PeerQuality::Unreliable);\n                }\n            },\n        }\n    }\n\n    pub(super) fn need_peers(&mut self) -> PeersStatus {\n        if !self\n            .peer_list\n            .iter()\n            .any(|(_, pq)| *pq != PeerQuality::Dishonest)\n        {\n            debug!(\"PeerList: no honest peers\");\n            return PeersStatus::Insufficient;\n        }\n\n        // periodically ask for refreshed peers\n        if Timestamp::now().saturating_diff(self.keep_fresh) > self.peer_refresh_interval {\n            self.keep_fresh = Timestamp::now();\n            let count = self\n                .peer_list\n                .iter()\n                .filter(|(_, pq)| **pq == PeerQuality::Reliable || **pq == PeerQuality::Unknown)\n                .count();\n            let reliability_goal = self.max_simultaneous_peers as usize;\n            if count < reliability_goal {\n                debug!(\"PeerList: is stale\");\n                return PeersStatus::Stale;\n            }\n        }\n\n        PeersStatus::Sufficient\n    }\n\n    fn get_random_peers_by_quality(\n        &self,\n        rng: &mut NodeRng,\n        up_to: usize,\n        peer_quality: PeerQuality,\n    ) -> Vec<NodeId> {\n        self.peer_list\n            .iter()\n            .filter(|(_peer, quality)| **quality == peer_quality)\n            .choose_multiple(rng, up_to)\n            .into_iter()\n            .map(|(peer, _)| *peer)\n            .collect()\n    }\n\n    pub(super) fn qualified_peers(&self, rng: &mut NodeRng) -> Vec<NodeId> {\n        self.qualified_peers_up_to(rng, self.max_simultaneous_peers as usize)\n    }\n\n    pub(super) fn qualified_peers_up_to(&self, rng: &mut NodeRng, up_to: usize) -> Vec<NodeId> {\n        // get most useful up to limit\n        let mut peers = self.get_random_peers_by_quality(rng, up_to, PeerQuality::Reliable);\n\n        // if below limit get unknown peers which may or may not be useful\n        let missing = up_to.saturating_sub(peers.len());\n        if missing > 0 {\n            peers.extend(self.get_random_peers_by_quality(rng, missing, PeerQuality::Unknown));\n        }\n\n        // if still below limit try unreliable peers again until we have the chance to refresh the\n        // peer list\n        let missing = up_to.saturating_sub(peers.len());\n        if missing > 0 {\n            peers.extend(self.get_random_peers_by_quality(rng, missing, PeerQuality::Unreliable));\n        }\n\n        peers\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/signature_acquisition.rs",
    "content": "use std::collections::{btree_map::Entry, BTreeMap};\n\nuse datasize::DataSize;\n\nuse casper_types::{FinalitySignature, LegacyRequiredFinality, PublicKey};\n\nuse super::block_acquisition::Acceptance;\nuse crate::types::{EraValidatorWeights, SignatureWeight};\n\n#[derive(Clone, PartialEq, Eq, DataSize, Debug)]\nenum SignatureState {\n    Vacant,\n    Pending,\n    Signature(Box<FinalitySignature>),\n}\n\n#[derive(Clone, PartialEq, Eq, DataSize, Debug)]\npub(super) struct SignatureAcquisition {\n    inner: BTreeMap<PublicKey, SignatureState>,\n    maybe_is_legacy: Option<bool>,\n    signature_weight: SignatureWeight,\n    legacy_required_finality: LegacyRequiredFinality,\n}\n\nimpl SignatureAcquisition {\n    pub(super) fn new(\n        validators: Vec<PublicKey>,\n        legacy_required_finality: LegacyRequiredFinality,\n    ) -> Self {\n        let inner = validators\n            .into_iter()\n            .map(|validator| (validator, SignatureState::Vacant))\n            .collect();\n        let maybe_is_legacy = None;\n        SignatureAcquisition {\n            inner,\n            maybe_is_legacy,\n            signature_weight: SignatureWeight::Insufficient,\n            legacy_required_finality,\n        }\n    }\n\n    pub(super) fn register_pending(&mut self, public_key: PublicKey) {\n        match self.inner.entry(public_key) {\n            Entry::Vacant(vacant_entry) => {\n                vacant_entry.insert(SignatureState::Pending);\n            }\n            Entry::Occupied(mut occupied_entry) => {\n                if *occupied_entry.get() == SignatureState::Vacant {\n                    occupied_entry.insert(SignatureState::Pending);\n                }\n            }\n        }\n    }\n\n    pub(super) fn apply_signature(\n        &mut self,\n        finality_signature: FinalitySignature,\n        validator_weights: &EraValidatorWeights,\n    ) -> Acceptance {\n        let acceptance = match self.inner.entry(finality_signature.public_key().clone()) {\n            Entry::Vacant(vacant_entry) => {\n                vacant_entry.insert(SignatureState::Signature(Box::new(finality_signature)));\n                Acceptance::NeededIt\n            }\n            Entry::Occupied(mut occupied_entry) => match *occupied_entry.get() {\n                SignatureState::Vacant | SignatureState::Pending => {\n                    occupied_entry.insert(SignatureState::Signature(Box::new(finality_signature)));\n                    Acceptance::NeededIt\n                }\n                SignatureState::Signature(_) => Acceptance::HadIt,\n            },\n        };\n        if self.signature_weight != SignatureWeight::Strict {\n            self.signature_weight = validator_weights.signature_weight(self.have_signatures());\n        }\n        acceptance\n    }\n\n    pub(super) fn have_signatures(&self) -> impl Iterator<Item = &PublicKey> {\n        self.inner.iter().filter_map(|(k, v)| match v {\n            SignatureState::Vacant | SignatureState::Pending => None,\n            SignatureState::Signature(_finality_signature) => Some(k),\n        })\n    }\n\n    pub(super) fn not_vacant(&self) -> impl Iterator<Item = &PublicKey> {\n        self.inner.iter().filter_map(|(k, v)| match v {\n            SignatureState::Vacant => None,\n            SignatureState::Pending | SignatureState::Signature(_) => Some(k),\n        })\n    }\n\n    pub(super) fn not_pending(&self) -> impl Iterator<Item = &PublicKey> {\n        self.inner.iter().filter_map(|(k, v)| match v {\n            SignatureState::Pending => None,\n            SignatureState::Vacant | SignatureState::Signature(_) => Some(k),\n        })\n    }\n\n    pub(super) fn set_is_legacy(&mut self, is_legacy: bool) {\n        self.maybe_is_legacy = Some(is_legacy);\n    }\n\n    pub(super) fn is_legacy(&self) -> bool {\n        self.maybe_is_legacy.unwrap_or(false)\n    }\n\n    pub(super) fn signature_weight(&self) -> SignatureWeight {\n        self.signature_weight\n    }\n\n    // Determines signature weight sufficiency based on the type of sync (forward or historical) and\n    // the protocol version that the block was created with (pre-1.5 or post-1.5)\n    // `requires_strict_finality` determines what the caller requires with regards to signature\n    // sufficiency:\n    //      * false means that the caller considers `Weak` finality as sufficient\n    //      * true means that the caller considers `Strict` finality as sufficient\n    pub(super) fn has_sufficient_finality(\n        &self,\n        is_historical: bool,\n        requires_strict_finality: bool,\n    ) -> bool {\n        if is_historical && self.is_legacy() {\n            match self.legacy_required_finality {\n                LegacyRequiredFinality::Strict => self\n                    .signature_weight\n                    .is_sufficient(requires_strict_finality),\n                LegacyRequiredFinality::Weak => {\n                    self.signature_weight == SignatureWeight::Strict\n                        || self.signature_weight == SignatureWeight::Weak\n                }\n                LegacyRequiredFinality::Any => true,\n            }\n        } else {\n            self.signature_weight\n                .is_sufficient(requires_strict_finality)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::BTreeSet, fmt::Debug, iter};\n\n    use assert_matches::assert_matches;\n    use itertools::Itertools;\n    use num_rational::Ratio;\n    use rand::Rng;\n\n    use casper_types::{\n        testing::TestRng, BlockHash, ChainNameDigest, EraId, FinalitySignatureV2, SecretKey, U512,\n    };\n\n    use super::*;\n\n    impl SignatureAcquisition {\n        pub(super) fn have_no_vacant(&self) -> bool {\n            self.inner.iter().all(|(_, v)| *v != SignatureState::Vacant)\n        }\n    }\n\n    fn keypair(rng: &mut TestRng) -> (PublicKey, SecretKey) {\n        let secret = SecretKey::random(rng);\n        let public = PublicKey::from(&secret);\n\n        (public, secret)\n    }\n\n    /// Asserts that 2 iterators iterate over the same set of items.\n    macro_rules! assert_iter_equal {\n        ( $left:expr, $right:expr $(,)? ) => {{\n            fn to_btreeset<T: Ord + Debug>(\n                left: impl IntoIterator<Item = T>,\n                right: impl IntoIterator<Item = T>,\n            ) -> (BTreeSet<T>, BTreeSet<T>) {\n                (left.into_iter().collect(), right.into_iter().collect())\n            }\n\n            let (left, right) = to_btreeset($left, $right);\n            assert_eq!(left, right);\n        }};\n    }\n\n    fn test_finality_with_ratio(finality_threshold: Ratio<u64>, first_weight: SignatureWeight) {\n        let rng = &mut TestRng::new();\n        let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec();\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let era_id = EraId::new(rng.gen());\n        let chain_name_hash = ChainNameDigest::random(rng);\n        let weights = EraValidatorWeights::new(\n            era_id,\n            validators\n                .iter()\n                .enumerate()\n                .map(|(i, (public, _))| (public.clone(), (i + 1).into()))\n                .collect(),\n            finality_threshold,\n        );\n        assert_eq!(U512::from(10), weights.get_total_weight());\n        let mut signature_acquisition = SignatureAcquisition::new(\n            validators.iter().map(|(p, _)| p.clone()).collect(),\n            LegacyRequiredFinality::Strict,\n        );\n\n        // Signature for the validator #0 weighting 1:\n        let (public_0, secret_0) = validators.first().unwrap();\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_0,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n        assert_iter_equal!(signature_acquisition.have_signatures(), [public_0]);\n        assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]);\n        assert!(signature_acquisition.have_no_vacant() == false);\n        assert_iter_equal!(\n            signature_acquisition.not_pending(),\n            validators.iter().map(|(p, _)| p),\n        );\n\n        assert_eq!(signature_acquisition.signature_weight(), first_weight);\n\n        // Signature for the validator #2 weighting 3:\n        let (public_2, secret_2) = validators.get(2).unwrap();\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_2,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n        assert_iter_equal!(\n            signature_acquisition.have_signatures(),\n            [public_0, public_2],\n        );\n        assert_iter_equal!(signature_acquisition.not_vacant(), [public_0, public_2]);\n        assert!(signature_acquisition.have_no_vacant() == false);\n        assert_iter_equal!(\n            signature_acquisition.not_pending(),\n            validators.iter().map(|(p, _)| p),\n        );\n        // The total signed weight is 4/10, which is higher than 1/3:\n        assert_eq!(\n            signature_acquisition.signature_weight(),\n            SignatureWeight::Weak\n        );\n\n        // Signature for the validator #3 weighting 4:\n        let (public_3, secret_3) = validators.get(3).unwrap();\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_3,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n        assert_iter_equal!(\n            signature_acquisition.have_signatures(),\n            [public_0, public_2, public_3],\n        );\n        assert_iter_equal!(\n            signature_acquisition.not_vacant(),\n            [public_0, public_2, public_3],\n        );\n        assert!(signature_acquisition.have_no_vacant() == false);\n        assert_iter_equal!(\n            signature_acquisition.not_pending(),\n            validators.iter().map(|(p, _)| p),\n        );\n        // The total signed weight is 8/10, which is higher than 2/3:\n        assert_eq!(\n            signature_acquisition.signature_weight(),\n            SignatureWeight::Strict\n        );\n    }\n\n    #[test]\n    fn should_return_insufficient_when_weight_1_and_1_3_is_required() {\n        test_finality_with_ratio(Ratio::new(1, 3), SignatureWeight::Insufficient)\n    }\n\n    #[test]\n    fn should_return_weak_when_weight_1_and_1_10_is_required() {\n        test_finality_with_ratio(Ratio::new(1, 10), SignatureWeight::Insufficient)\n    }\n\n    #[test]\n    fn should_return_weak_when_weight_1_and_1_11_is_required() {\n        test_finality_with_ratio(Ratio::new(1, 11), SignatureWeight::Weak)\n    }\n\n    #[test]\n    fn adding_a_not_already_stored_validator_signature_works() {\n        let rng = &mut TestRng::new();\n        let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec();\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let chain_name_hash = ChainNameDigest::random(rng);\n        let era_id = EraId::new(rng.gen());\n        let weights = EraValidatorWeights::new(\n            era_id,\n            validators\n                .iter()\n                .enumerate()\n                .map(|(i, (public, _))| (public.clone(), (i + 1).into()))\n                .collect(),\n            Ratio::new(1, 3), // Highway finality\n        );\n        assert_eq!(U512::from(10), weights.get_total_weight());\n        let mut signature_acquisition = SignatureAcquisition::new(\n            validators.iter().map(|(p, _)| p.clone()).collect(),\n            LegacyRequiredFinality::Strict,\n        );\n\n        // Signature for an already stored validator:\n        let (_public_0, secret_0) = validators.first().unwrap();\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_0,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n\n        // Signature for an unknown validator:\n        let (_public, secret) = keypair(rng);\n        let finality_signature =\n            FinalitySignatureV2::create(block_hash, block_height, era_id, chain_name_hash, &secret);\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n    }\n\n    #[test]\n    fn signing_twice_does_nothing() {\n        let rng = &mut TestRng::new();\n        let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec();\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let chain_name_hash = ChainNameDigest::random(rng);\n        let era_id = EraId::new(rng.gen());\n        let weights = EraValidatorWeights::new(\n            era_id,\n            validators\n                .iter()\n                .enumerate()\n                .map(|(i, (public, _))| (public.clone(), (i + 1).into()))\n                .collect(),\n            Ratio::new(1, 3), // Highway finality\n        );\n        assert_eq!(U512::from(10), weights.get_total_weight());\n        let mut signature_acquisition = SignatureAcquisition::new(\n            validators.iter().map(|(p, _)| p.clone()).collect(),\n            LegacyRequiredFinality::Strict,\n        );\n\n        let (_public_0, secret_0) = validators.first().unwrap();\n\n        // Signature for an already stored validator:\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_0,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n\n        // Signing again returns `HadIt`:\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_0,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::HadIt\n        );\n    }\n\n    #[test]\n    fn register_pending_has_the_expected_behavior() {\n        let rng = &mut TestRng::new();\n        let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec();\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let era_id = EraId::new(rng.gen());\n        let chain_name_hash = ChainNameDigest::random(rng);\n        let weights = EraValidatorWeights::new(\n            era_id,\n            validators\n                .iter()\n                .enumerate()\n                .map(|(i, (public, _))| (public.clone(), (i + 1).into()))\n                .collect(),\n            Ratio::new(1, 11), // Low finality threshold\n        );\n        assert_eq!(U512::from(10), weights.get_total_weight());\n        let mut signature_acquisition = SignatureAcquisition::new(\n            validators.iter().map(|(p, _)| p.clone()).collect(),\n            LegacyRequiredFinality::Strict,\n        );\n\n        // Set the validator #0 weighting 1 as pending:\n        let (public_0, secret_0) = validators.first().unwrap();\n        signature_acquisition.register_pending(public_0.clone());\n        assert_iter_equal!(signature_acquisition.have_signatures(), []);\n        assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]);\n        assert_iter_equal!(\n            signature_acquisition.not_pending(),\n            validators.iter().skip(1).map(|(p, _s)| p).collect_vec(),\n        );\n        assert!(signature_acquisition.have_no_vacant() == false);\n        assert_eq!(\n            signature_acquisition.signature_weight(),\n            SignatureWeight::Insufficient\n        );\n\n        // Sign it:\n        let finality_signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            secret_0,\n        );\n        assert_matches!(\n            signature_acquisition.apply_signature(finality_signature.into(), &weights),\n            Acceptance::NeededIt\n        );\n        assert_iter_equal!(signature_acquisition.have_signatures(), [public_0]);\n        assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]);\n        assert!(signature_acquisition.have_no_vacant() == false);\n        assert_iter_equal!(\n            signature_acquisition.not_pending(),\n            validators.iter().map(|(p, _)| p),\n        );\n        assert_eq!(\n            signature_acquisition.signature_weight(),\n            SignatureWeight::Weak\n        );\n    }\n\n    #[test]\n    fn register_pending_an_unknown_validator_works() {\n        let rng = &mut TestRng::new();\n        let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec();\n        let mut signature_acquisition = SignatureAcquisition::new(\n            validators.iter().map(|(p, _)| p.clone()).collect(),\n            LegacyRequiredFinality::Strict,\n        );\n\n        // Set a new validator as pending:\n        let (public, _secret) = keypair(rng);\n        signature_acquisition.register_pending(public.clone());\n        assert_iter_equal!(signature_acquisition.have_signatures(), []);\n        assert_iter_equal!(signature_acquisition.not_vacant(), [&public]);\n        assert_iter_equal!(\n            signature_acquisition.not_pending(),\n            validators.iter().map(|(p, _s)| p),\n        );\n        assert!(signature_acquisition.have_no_vacant() == false);\n    }\n\n    #[test]\n    fn missing_legacy_flag_means_not_legacy() {\n        let signature_weight = SignatureWeight::Insufficient;\n        let legacy_required_finality = LegacyRequiredFinality::Any;\n\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: None,\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        assert!(!sa.is_legacy())\n    }\n\n    #[test]\n    fn not_historical_and_not_legacy_and_is_insufficient() {\n        let signature_weight = SignatureWeight::Insufficient;\n\n        // This parameter should not affect calculation for not historical and not legacy blocks.\n        let legacy_required_finality = [\n            LegacyRequiredFinality::Any,\n            LegacyRequiredFinality::Weak,\n            LegacyRequiredFinality::Strict,\n        ];\n\n        legacy_required_finality\n            .iter()\n            .for_each(|legacy_required_finality| {\n                let is_legacy = false;\n                let sa = SignatureAcquisition {\n                    inner: Default::default(),\n                    maybe_is_legacy: Some(is_legacy),\n                    signature_weight,\n                    legacy_required_finality: *legacy_required_finality,\n                };\n\n                let is_historical = false;\n                let requires_strict_finality = false;\n                let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n                assert!(!result);\n\n                let requires_strict_finality = true;\n                let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n                assert!(!result);\n            })\n    }\n\n    #[test]\n    fn not_historical_and_not_legacy_and_is_weak() {\n        let signature_weight = SignatureWeight::Weak;\n\n        // This parameter should not affect calculation for not historical and not legacy blocks.\n        let legacy_required_finality = [\n            LegacyRequiredFinality::Any,\n            LegacyRequiredFinality::Weak,\n            LegacyRequiredFinality::Strict,\n        ];\n\n        legacy_required_finality\n            .iter()\n            .for_each(|legacy_required_finality| {\n                let is_legacy = false;\n                let sa = SignatureAcquisition {\n                    inner: Default::default(),\n                    maybe_is_legacy: Some(is_legacy),\n                    signature_weight,\n                    legacy_required_finality: *legacy_required_finality,\n                };\n\n                let is_historical = false;\n                let requires_strict_finality = false;\n                let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n                assert!(result);\n\n                let requires_strict_finality = true;\n                let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n                assert!(!result);\n            })\n    }\n\n    #[test]\n    fn not_historical_and_not_legacy_and_is_strict() {\n        let signature_weight = SignatureWeight::Strict;\n\n        // This parameter should not affect calculation for not historical and not legacy blocks.\n        let legacy_required_finality = [\n            LegacyRequiredFinality::Any,\n            LegacyRequiredFinality::Weak,\n            LegacyRequiredFinality::Strict,\n        ];\n\n        legacy_required_finality\n            .iter()\n            .for_each(|legacy_required_finality| {\n                let is_legacy = false;\n                let sa = SignatureAcquisition {\n                    inner: Default::default(),\n                    maybe_is_legacy: Some(is_legacy),\n                    signature_weight,\n                    legacy_required_finality: *legacy_required_finality,\n                };\n\n                let is_historical = false;\n                let requires_strict_finality = false;\n                let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n                assert!(result);\n\n                let requires_strict_finality = true;\n                let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n                assert!(result);\n            })\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_any_and_is_insufficient() {\n        let signature_weight = SignatureWeight::Insufficient;\n        let legacy_required_finality = LegacyRequiredFinality::Any;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_any_and_is_weak() {\n        let signature_weight = SignatureWeight::Weak;\n        let legacy_required_finality = LegacyRequiredFinality::Any;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_any_and_is_strict() {\n        let signature_weight = SignatureWeight::Strict;\n        let legacy_required_finality = LegacyRequiredFinality::Any;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_weak_and_is_insufficient() {\n        let signature_weight = SignatureWeight::Insufficient;\n        let legacy_required_finality = LegacyRequiredFinality::Weak;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(!result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(!result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_weak_and_is_weak() {\n        let signature_weight = SignatureWeight::Weak;\n        let legacy_required_finality = LegacyRequiredFinality::Weak;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_weak_and_is_strict() {\n        let signature_weight = SignatureWeight::Strict;\n        let legacy_required_finality = LegacyRequiredFinality::Weak;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_strict_and_is_insufficient() {\n        let signature_weight = SignatureWeight::Insufficient;\n        let legacy_required_finality = LegacyRequiredFinality::Strict;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(!result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(!result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_strict_and_is_weak() {\n        let signature_weight = SignatureWeight::Weak;\n        let legacy_required_finality = LegacyRequiredFinality::Strict;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(!result);\n    }\n\n    #[test]\n    fn historical_and_legacy_requires_strict_and_is_strict() {\n        let signature_weight = SignatureWeight::Strict;\n        let legacy_required_finality = LegacyRequiredFinality::Strict;\n\n        let is_legacy = true;\n        let sa = SignatureAcquisition {\n            inner: Default::default(),\n            maybe_is_legacy: Some(is_legacy),\n            signature_weight,\n            legacy_required_finality,\n        };\n\n        let is_historical = true;\n        let requires_strict_finality = false;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n\n        let requires_strict_finality = true;\n        let result = sa.has_sufficient_finality(is_historical, requires_strict_finality);\n        assert!(result);\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/tests/test_utils.rs",
    "content": "use std::{collections::BTreeMap, convert::TryInto};\n\nuse crate::types::TrieOrChunkId;\n#[cfg(test)]\nuse casper_types::ChunkWithProof;\nuse rand::Rng;\n\npub(crate) fn chunks_with_proof_from_data(data: &[u8]) -> BTreeMap<u64, ChunkWithProof> {\n    (0..data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES).count())\n        .map(|index| {\n            (\n                index as u64,\n                ChunkWithProof::new(data, index.try_into().unwrap()).unwrap(),\n            )\n        })\n        .collect()\n}\n\npub(crate) fn test_chunks_with_proof(\n    num_chunks: u64,\n) -> (Vec<ChunkWithProof>, Vec<TrieOrChunkId>, Vec<u8>) {\n    let mut rng = rand::thread_rng();\n    let data: Vec<u8> = (0..ChunkWithProof::CHUNK_SIZE_BYTES * num_chunks as usize)\n        .map(|_| rng.gen())\n        .collect();\n\n    let chunks = chunks_with_proof_from_data(&data);\n\n    let chunk_ids: Vec<TrieOrChunkId> = chunks\n        .iter()\n        .map(|(index, chunk)| TrieOrChunkId(*index, chunk.proof().root_hash()))\n        .collect();\n\n    (chunks.values().cloned().collect(), chunk_ids, data)\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/tests.rs",
    "content": "pub(crate) mod test_utils;\n\nuse std::{\n    cmp::min,\n    collections::{BTreeMap, VecDeque},\n    convert::TryInto,\n    iter,\n    time::Duration,\n};\n\nuse assert_matches::assert_matches;\nuse derive_more::From;\nuse num_rational::Ratio;\nuse rand::{seq::IteratorRandom, Rng};\n\nuse casper_storage::data_access_layer::ExecutionResultsChecksumResult;\nuse casper_types::{\n    global_state::TrieMerkleProof, testing::TestRng, AccessRights, BlockV2, CLValue,\n    ChainNameDigest, Chainspec, Deploy, Digest, EraId, FinalitySignatureV2, Key,\n    LegacyRequiredFinality, ProtocolVersion, PublicKey, SecretKey, StoredValue, TestBlockBuilder,\n    TestBlockV1Builder, TimeDiff, URef, U512,\n};\n\nuse super::*;\nuse crate::{\n    components::{\n        block_synchronizer::block_acquisition::BlockAcquisitionState,\n        consensus::tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_KEY},\n    },\n    effect::Effect,\n    reactor::{EventQueueHandle, QueueKind, Scheduler},\n    tls::KeyFingerprint,\n    types::{BlockExecutionResultsOrChunkId, ValueOrChunk},\n    utils,\n};\n\nconst MAX_SIMULTANEOUS_PEERS: u8 = 5;\nconst TEST_LATCH_RESET_INTERVAL_MILLIS: u64 = 5;\nconst SHOULD_FETCH_EXECUTION_STATE: bool = true;\nconst STRICT_FINALITY_REQUIRED_VERSION: ProtocolVersion = ProtocolVersion::from_parts(1, 5, 0);\n\n/// Event for the mock reactor.\n#[derive(Debug, From)]\nenum MockReactorEvent {\n    MarkBlockCompletedRequest(#[allow(dead_code)] MarkBlockCompletedRequest),\n    BlockFetcherRequest(FetcherRequest<Block>),\n    BlockHeaderFetcherRequest(FetcherRequest<BlockHeader>),\n    LegacyDeployFetcherRequest(FetcherRequest<LegacyDeploy>),\n    TransactionFetcherRequest(FetcherRequest<Transaction>),\n    FinalitySignatureFetcherRequest(FetcherRequest<FinalitySignature>),\n    TrieOrChunkFetcherRequest(#[allow(dead_code)] FetcherRequest<TrieOrChunk>),\n    BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest<BlockExecutionResultsOrChunk>),\n    SyncLeapFetcherRequest(#[allow(dead_code)] FetcherRequest<SyncLeap>),\n    ApprovalsHashesFetcherRequest(FetcherRequest<ApprovalsHashes>),\n    NetworkInfoRequest(NetworkInfoRequest),\n    BlockAccumulatorRequest(BlockAccumulatorRequest),\n    PeerBehaviorAnnouncement(#[allow(dead_code)] PeerBehaviorAnnouncement),\n    StorageRequest(StorageRequest),\n    TrieAccumulatorRequest(#[allow(dead_code)] TrieAccumulatorRequest),\n    ContractRuntimeRequest(ContractRuntimeRequest),\n    SyncGlobalStateRequest(SyncGlobalStateRequest),\n    MakeBlockExecutableRequest(MakeBlockExecutableRequest),\n    MetaBlockAnnouncement(MetaBlockAnnouncement),\n}\n\nstruct MockReactor {\n    scheduler: &'static Scheduler<MockReactorEvent>,\n    effect_builder: EffectBuilder<MockReactorEvent>,\n}\n\nimpl MockReactor {\n    fn new() -> Self {\n        let scheduler = utils::leak(Scheduler::new(QueueKind::weights(), None));\n        let event_queue_handle = EventQueueHandle::without_shutdown(scheduler);\n        let effect_builder = EffectBuilder::new(event_queue_handle);\n        MockReactor {\n            scheduler,\n            effect_builder,\n        }\n    }\n\n    fn effect_builder(&self) -> EffectBuilder<MockReactorEvent> {\n        self.effect_builder\n    }\n\n    async fn crank(&self) -> MockReactorEvent {\n        let ((_ancestor, reactor_event), _) = self.scheduler.pop().await;\n        reactor_event\n    }\n\n    async fn process_effects(\n        &self,\n        effects: impl IntoIterator<Item = Effect<Event>>,\n    ) -> Vec<MockReactorEvent> {\n        let mut events = Vec::new();\n        for effect in effects {\n            tokio::spawn(effect);\n            let event = self.crank().await;\n            events.push(event);\n        }\n        events\n    }\n}\n\nstruct TestEnv {\n    block: Block,\n    validator_keys: Vec<Arc<SecretKey>>,\n    peers: Vec<NodeId>,\n}\n\n// Utility struct used to generate common test artifacts\nimpl TestEnv {\n    // Replaces the test block with the one provided as parameter\n    fn with_block(self, block: Block) -> Self {\n        Self {\n            block,\n            validator_keys: self.validator_keys,\n            peers: self.peers,\n        }\n    }\n\n    fn block(&self) -> &Block {\n        &self.block\n    }\n\n    fn validator_keys(&self) -> &Vec<Arc<SecretKey>> {\n        &self.validator_keys\n    }\n\n    fn peers(&self) -> &Vec<NodeId> {\n        &self.peers\n    }\n\n    // Generates a `ValidatorMatrix` that has the validators for the era of the test block\n    // All validators have equal weights\n    fn gen_validator_matrix(&self) -> ValidatorMatrix {\n        let validator_weights: BTreeMap<PublicKey, U512> = self\n            .validator_keys\n            .iter()\n            .map(|key| (PublicKey::from(key.as_ref()), 100.into())) // we give each validator equal weight\n            .collect();\n\n        assert_eq!(validator_weights.len(), self.validator_keys.len());\n\n        // Set up a validator matrix for the era in which our test block was created\n        let mut validator_matrix = ValidatorMatrix::new(\n            Ratio::new(1, 3),\n            ChainNameDigest::from_chain_name(\"casper-example\"),\n            None,\n            EraId::from(0),\n            self.validator_keys[0].clone(),\n            PublicKey::from(self.validator_keys[0].as_ref()),\n            1,\n            3,\n        );\n        validator_matrix.register_validator_weights(self.block.era_id(), validator_weights);\n\n        validator_matrix\n    }\n\n    fn random(rng: &mut TestRng) -> TestEnv {\n        let num_validators: usize = rng.gen_range(10..100);\n        let validator_keys: Vec<_> = iter::repeat_with(|| Arc::new(SecretKey::random(rng)))\n            .take(num_validators)\n            .collect();\n\n        let num_peers = rng.gen_range(10..20);\n\n        TestEnv {\n            block: TestBlockBuilder::new().build(rng).into(),\n            validator_keys,\n            peers: iter::repeat(())\n                .take(num_peers)\n                .map(|_| NodeId::from(rng.gen::<KeyFingerprint>()))\n                .collect(),\n        }\n    }\n}\n\nfn check_sync_global_state_event(event: MockReactorEvent, block: &Block) {\n    assert!(matches!(\n        event,\n        MockReactorEvent::SyncGlobalStateRequest { .. }\n    ));\n    let global_sync_request = match event {\n        MockReactorEvent::SyncGlobalStateRequest(req) => req,\n        _ => unreachable!(),\n    };\n    assert_eq!(global_sync_request.block_hash, *block.hash());\n    assert_eq!(\n        global_sync_request.state_root_hash,\n        *block.state_root_hash()\n    );\n}\n\n// Calls need_next for the block_synchronizer and processes the effects resulted returning a list of\n// the new events that were generated\nasync fn need_next(\n    rng: &mut TestRng,\n    reactor: &MockReactor,\n    block_synchronizer: &mut BlockSynchronizer,\n    num_expected_events: u8,\n) -> Vec<MockReactorEvent> {\n    let effects = block_synchronizer.need_next(reactor.effect_builder(), rng);\n    assert_eq!(effects.len() as u8, num_expected_events);\n    reactor.process_effects(effects).await\n}\n\nfn register_multiple_signatures<'a, I: IntoIterator<Item = &'a Arc<SecretKey>>>(\n    builder: &mut BlockBuilder,\n    block: &Block,\n    validator_keys_iter: I,\n    chain_name_hash: ChainNameDigest,\n) {\n    for secret_key in validator_keys_iter {\n        // Register a finality signature\n        let signature = FinalitySignatureV2::create(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chain_name_hash,\n            secret_key.as_ref(),\n        );\n        assert!(signature.is_verified().is_ok());\n        assert!(builder\n            .register_finality_signature(signature.into(), None)\n            .is_ok());\n    }\n}\n\nfn dummy_merkle_proof() -> TrieMerkleProof<Key, StoredValue> {\n    TrieMerkleProof::new(\n        URef::new([255; 32], AccessRights::NONE).into(),\n        StoredValue::CLValue(CLValue::from_t(()).unwrap()),\n        VecDeque::new(),\n    )\n}\n\ntrait OneExt: IntoIterator {\n    fn try_one(self) -> Option<Self::Item>;\n    fn one(self) -> Self::Item;\n}\n\nimpl<I: IntoIterator> OneExt for I {\n    fn try_one(self) -> Option<Self::Item> {\n        let mut it = self.into_iter();\n        let first = it.next()?;\n\n        it.next().is_none().then_some(first)\n    }\n\n    #[track_caller]\n    fn one(self) -> Self::Item {\n        let mut it = self.into_iter();\n        let first = it\n            .next()\n            .expect(\"no element in the iterator, but 1 was expected\");\n\n        if it.next().is_some() {\n            panic!(\"more that 1 element in the iterator, but 1 was expected\")\n        }\n\n        first\n    }\n}\n\n#[cfg(test)]\nimpl BlockSynchronizer {\n    fn new_initialized(\n        rng: &mut TestRng,\n        validator_matrix: ValidatorMatrix,\n        config: Config,\n    ) -> BlockSynchronizer {\n        let mut block_synchronizer = BlockSynchronizer::new(\n            config,\n            Arc::new(Chainspec::random(rng)),\n            MAX_SIMULTANEOUS_PEERS,\n            validator_matrix,\n            &Registry::new(),\n        )\n        .expect(\"Failed to create BlockSynchronizer\");\n\n        <BlockSynchronizer as InitializedComponent<MainEvent>>::set_state(\n            &mut block_synchronizer,\n            ComponentState::Initialized,\n        );\n\n        block_synchronizer\n    }\n\n    fn with_legacy_finality(mut self, legacy_required_finality: LegacyRequiredFinality) -> Self {\n        let core_config = &mut Arc::get_mut(&mut self.chainspec).unwrap().core_config;\n        core_config.start_protocol_version_with_strict_finality_signatures_required =\n            STRICT_FINALITY_REQUIRED_VERSION;\n        core_config.legacy_required_finality = legacy_required_finality;\n\n        self\n    }\n\n    fn forward_builder(&self) -> &BlockBuilder {\n        self.forward.as_ref().expect(\"Forward builder missing\")\n    }\n}\n\n/// Returns the number of validators that need a signature for a weak finality of 1/3.\nfn weak_finality_threshold(n: usize) -> usize {\n    n / 3 + 1\n}\n\n/// Returns the number of validators that need a signature for a strict finality of 2/3.\nfn strict_finality_threshold(n: usize) -> usize {\n    n * 2 / 3 + 1\n}\n\nfn latch_inner_check(builder: Option<&BlockBuilder>, expected: bool, msg: &str) {\n    assert_eq!(\n        builder.expect(\"builder should exist\").latched(),\n        expected,\n        \"{}\",\n        msg\n    );\n}\n\nfn latch_count_check(builder: Option<&BlockBuilder>, expected: u8, msg: &str) {\n    assert_eq!(\n        builder.expect(\"builder should exist\").latch_count(),\n        expected,\n        \"{}\",\n        msg\n    );\n}\n\nfn need_next_inner_check(\n    builder: Option<&mut BlockBuilder>,\n    rng: &mut TestRng,\n    expected: NeedNext,\n    msg: &str,\n) {\n    let need_next = builder\n        .expect(\"should exist\")\n        .block_acquisition_action(rng, MAX_SIMULTANEOUS_PEERS)\n        .need_next();\n    assert_eq!(need_next, expected, \"{}\", msg);\n}\n\n#[tokio::test]\nasync fn global_state_sync_wont_stall_with_bad_peers() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .random_transactions(1, &mut rng)\n            .build(&mut rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let cfg = Config {\n        latch_reset_interval: TimeDiff::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS),\n        ..Default::default()\n    };\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg);\n\n    // Set up the synchronizer for the test block such that the next step is getting global state\n    block_synchronizer.register_block_by_hash(*block.hash(), true);\n    assert!(\n        block_synchronizer.historical.is_some(),\n        \"we only get global state on historical sync\"\n    );\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n    let historical_builder = block_synchronizer.historical.as_mut().unwrap();\n    assert!(\n        historical_builder\n            .register_block_header(block.clone_header(), None)\n            .is_ok(),\n        \"historical builder should register header\"\n    );\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(\n        historical_builder\n            .register_block(block.clone(), None)\n            .is_ok(),\n        \"should register block\"\n    );\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // At this point, the next step the synchronizer takes should be to get global state\n    let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    assert_eq!(\n        effects.len(),\n        1,\n        \"need next should have 1 effect at this step, not {}\",\n        effects.len()\n    );\n    tokio::spawn(async move { effects.remove(0).await });\n    let event = mock_reactor.crank().await;\n\n    // Expect a `SyncGlobalStateRequest` for the `GlobalStateSynchronizer`\n    // The peer list that the GlobalStateSynchronizer will use to fetch the tries\n    let first_peer_set = peers.iter().copied().choose_multiple(&mut rng, 4);\n    check_sync_global_state_event(event, block);\n\n    // Wait for the latch to reset\n    tokio::time::sleep(Duration::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS * 2)).await;\n\n    // Simulate an error form the global_state_synchronizer;\n    // make it seem that the `TrieAccumulator` did not find the required tries on any of the peers\n    block_synchronizer.global_state_synced(\n        *block.hash(),\n        Err(GlobalStateSynchronizerError::TrieAccumulator(\n            first_peer_set.to_vec(),\n        )),\n    );\n\n    // At this point we expect that another request for the global state would be made,\n    // this time with other peers\n    let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    assert_eq!(\n        effects.len(),\n        1,\n        \"need next should still have 1 effect at this step, not {}\",\n        effects.len()\n    );\n    tokio::spawn(async move { effects.remove(0).await });\n    let event = mock_reactor.crank().await;\n\n    let second_peer_set = peers.iter().copied().choose_multiple(&mut rng, 4);\n    check_sync_global_state_event(event, block);\n\n    // Wait for the latch to reset\n    tokio::time::sleep(Duration::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS * 2)).await;\n\n    // Simulate a successful global state sync;\n    // Although the request was successful, some peers did not have the data.\n    let unreliable_peers = second_peer_set.into_iter().choose_multiple(&mut rng, 2);\n    block_synchronizer.global_state_synced(\n        *block.hash(),\n        Ok(GlobalStateSynchronizerResponse::new(\n            (*block.state_root_hash()).into(),\n            unreliable_peers.clone(),\n        )),\n    );\n    let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    assert_eq!(\n        effects.len(),\n        1,\n        \"need next should still have 1 effect after global state sync'd, not {}\",\n        effects.len()\n    );\n    tokio::spawn(async move { effects.remove(0).await });\n    let event = mock_reactor.crank().await;\n\n    assert!(\n        false == matches!(event, MockReactorEvent::SyncGlobalStateRequest { .. }),\n        \"synchronizer should have progressed\"\n    );\n\n    // Check if the peers returned by the `GlobalStateSynchronizer` in the response were marked\n    // unreliable.\n    for peer in unreliable_peers.iter() {\n        assert!(\n            block_synchronizer\n                .historical\n                .as_ref()\n                .unwrap()\n                .peer_list()\n                .is_peer_unreliable(peer),\n            \"{} should be marked unreliable\",\n            peer\n        );\n    }\n}\n\n#[tokio::test]\nasync fn synchronizer_doesnt_busy_loop_without_peers() {\n    fn check_need_peer_events(expected_block_hash: BlockHash, events: Vec<MockReactorEvent>) {\n        // Explicitly verify the two effects are indeed asking networking and accumulator for peers.\n        assert_matches!(\n            events[0],\n            MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers {\n                count,\n                ..\n            }) if count == MAX_SIMULTANEOUS_PEERS as usize\n        );\n        assert_matches!(\n            events[1],\n            MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock {\n                block_hash,\n                ..\n            }) if block_hash == expected_block_hash\n        );\n    }\n\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .random_transactions(1, &mut rng)\n            .build(&mut rng)\n            .into(),\n    );\n    let block = test_env.block();\n    let block_hash = *block.hash();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let cfg = Config {\n        latch_reset_interval: TimeDiff::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS),\n        ..Default::default()\n    };\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg);\n\n    block_synchronizer.register_block_by_hash(block_hash, true);\n\n    latch_inner_check(\n        block_synchronizer.historical.as_ref(),\n        false,\n        \"initial set up, should not be latched\",\n    );\n\n    {\n        // We registered no peers, so we need peers\n        need_next_inner_check(\n            block_synchronizer.historical.as_mut(),\n            &mut rng,\n            NeedNext::Peers(block_hash),\n            \"should need peers\",\n        );\n\n        // We registered no peers, so the synchronizer should ask for peers.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::Request(BlockSynchronizerRequest::NeedNext),\n        );\n        assert_eq!(effects.len(), 2, \"we should ask for peers from both networking and accumulator, thus two effects are expected\");\n\n        latch_inner_check(\n            block_synchronizer.historical.as_ref(),\n            true,\n            \"should be latched waiting for peers\",\n        );\n\n        check_need_peer_events(block_hash, mock_reactor.process_effects(effects).await);\n    }\n\n    {\n        // Inject an empty response from the network, simulating no available peers.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::NetworkPeers(*block.hash(), vec![]),\n        );\n\n        latch_inner_check(\n            block_synchronizer.historical.as_ref(),\n            true,\n            \"should still be latched because only one response was received and it \\\n             did not have what we needed.\",\n        );\n\n        assert!(effects.is_empty(), \"effects should be empty\");\n    }\n\n    {\n        // Inject an empty response from the accumulator, simulating no available peers.\n        // as this is the second of two responses, the latch clears. the logic then\n        // calls need next again, we still need peers, so we generate the same two effects again.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::AccumulatedPeers(*block.hash(), None),\n        );\n        assert!(!effects.is_empty(), \"we should still need peers...\");\n\n        latch_inner_check(\n            block_synchronizer.historical.as_ref(),\n            true,\n            \"we need peers, ask again\",\n        );\n\n        // We registered no peers, so we still need peers\n        need_next_inner_check(\n            block_synchronizer.historical.as_mut(),\n            &mut rng,\n            NeedNext::Peers(block_hash),\n            \"should need peers\",\n        );\n\n        check_need_peer_events(block_hash, mock_reactor.process_effects(effects).await);\n    }\n}\n\n#[tokio::test]\nasync fn should_not_stall_after_registering_new_era_validator_weights() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let block_hash = *block.hash();\n    let era_id = block.era_id();\n\n    // Set up a validator matrix.\n    let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix.clone(), Config::default());\n\n    // Set up the synchronizer for the test block such that the next step is getting era validators.\n    block_synchronizer.register_block_by_hash(block_hash, true);\n    block_synchronizer.register_peers(block_hash, peers.clone());\n    block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"should have historical builder\")\n        .register_block_header(block.clone_header(), None)\n        .expect(\"should register block header\");\n\n    latch_inner_check(\n        block_synchronizer.historical.as_ref(),\n        false,\n        \"initial set up, should not be latched\",\n    );\n    need_next_inner_check(\n        block_synchronizer.historical.as_mut(),\n        &mut rng,\n        NeedNext::EraValidators(era_id),\n        \"should need era validators for era block is in\",\n    );\n\n    let effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    assert_eq!(\n        effects.len(),\n        MAX_SIMULTANEOUS_PEERS as usize,\n        \"need next should have an effect per peer when needing sync leap\"\n    );\n    latch_inner_check(\n        block_synchronizer.historical.as_ref(),\n        true,\n        \"after determination that we need validators, should be latched\",\n    );\n\n    // `need_next` should return no effects while latched.\n    assert!(\n        block_synchronizer\n            .need_next(mock_reactor.effect_builder(), &mut rng)\n            .is_empty(),\n        \"should return no effects while latched\"\n    );\n\n    // bleed off the event q, checking the expected event kind\n    for effect in effects {\n        tokio::spawn(effect);\n        let event = mock_reactor.crank().await;\n        match event {\n            MockReactorEvent::SyncLeapFetcherRequest(_) => (),\n            _ => panic!(\"unexpected event: {:?}\", event),\n        };\n    }\n\n    // Update the validator matrix to now have an entry for the era of our random block.\n    validator_matrix.register_validator_weights(\n        era_id,\n        iter::once((ALICE_PUBLIC_KEY.clone(), 100.into())).collect(),\n    );\n\n    // register validator_matrix\n    block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"should have historical builder\")\n        .register_era_validator_weights(&validator_matrix);\n\n    latch_inner_check(\n        block_synchronizer.historical.as_ref(),\n        false,\n        \"after registering validators, should not be latched\",\n    );\n\n    need_next_inner_check(\n        block_synchronizer.historical.as_mut(),\n        &mut rng,\n        NeedNext::FinalitySignatures(block_hash, era_id, validator_matrix.public_keys(&era_id)),\n        \"should need finality sigs\",\n    );\n\n    // Ensure the in-flight latch has been released, i.e. that `need_next` returns something.\n    let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    assert_eq!(\n        effects.len(),\n        1,\n        \"need next should produce 1 effect because we currently need exactly 1 signature \\\n         NOTE: finality signatures are a special case; we currently we fan out 1 peer per signature \\\n          but do multiple rounds of this against increasingly strict weight thresholds. \\\n         All other fetchers fan out by asking each of MAX_SIMULTANEOUS_PEERS for the _same_ item.\"\n    );\n\n    tokio::spawn(async move { effects.remove(0).await });\n    let event = mock_reactor.crank().await;\n    assert_matches!(\n        event,\n        MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n            id,\n            peer,\n            ..\n        }) if peers.contains(&peer) && id.block_hash() == block.hash()\n    );\n}\n\n#[test]\nfn duplicate_register_block_not_allowed_if_builder_is_not_failed() {\n    let mut rng = TestRng::new();\n    let test_env = TestEnv::random(&mut rng);\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for forward sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some()); // we only get global state on historical sync\n\n    // Registering the block again should not be allowed until the sync finishes\n    assert!(!block_synchronizer.register_block_by_hash(*block.hash(), false));\n\n    // Trying to register a different block should replace the old one\n    let new_block: Block = TestBlockBuilder::new().build(&mut rng).into();\n    assert!(block_synchronizer.register_block_by_hash(*new_block.hash(), false));\n    assert_eq!(\n        block_synchronizer.forward.unwrap().block_hash(),\n        *new_block.hash()\n    );\n}\n\n#[tokio::test]\nasync fn historical_sync_gets_peers_form_both_connected_peers_and_accumulator() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.historical.is_some());\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::Request(BlockSynchronizerRequest::NeedNext),\n    );\n    assert_eq!(effects.len(), 2);\n    let events = mock_reactor.process_effects(effects).await;\n\n    // The first thing the synchronizer should do is get peers.\n    // For the historical flow, the synchronizer will get a random sampling of the connected\n    // peers and also ask the accumulator to provide peers from which it has received information\n    // for the block that is being synchronized.\n    assert_matches!(\n        events[0],\n        MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers {\n            count,\n            ..\n        }) if count == MAX_SIMULTANEOUS_PEERS as usize\n    );\n\n    assert_matches!(\n        events[1],\n        MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock {\n            block_hash,\n            ..\n        }) if block_hash == *block.hash()\n    )\n}\n\n#[tokio::test]\nasync fn fwd_sync_gets_peers_only_from_accumulator() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for forward sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::Request(BlockSynchronizerRequest::NeedNext),\n    );\n    assert_eq!(effects.len(), 1);\n    let events = mock_reactor.process_effects(effects).await;\n\n    // The first thing the synchronizer should do is get peers.\n    // For the forward flow, the synchronizer will ask the accumulator to provide peers\n    // from which it has received information for the block that is being synchronized.\n    assert_matches!(\n        events[0],\n        MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock {\n            block_hash,\n            ..\n        }) if block_hash == *block.hash()\n    )\n}\n\n#[tokio::test]\nasync fn sync_starts_with_header_fetch() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let block = test_env.block();\n    let peers = test_env.peers();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        MAX_SIMULTANEOUS_PEERS,\n    )\n    .await;\n\n    // The first thing needed after the synchronizer has peers is\n    // to fetch the block header from peers.\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id == *block.hash()\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_sync_is_not_blocked_by_failed_header_fetch_within_latch_interval() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let block = test_env.block();\n    let block_hash = *block.hash();\n    let peers = test_env.peers();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let cfg = Config {\n        ..Default::default()\n    };\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg);\n\n    // Register block for fwd sync\n    assert!(\n        block_synchronizer.register_block_by_hash(block_hash, false),\n        \"should register block by hash\"\n    );\n    assert!(\n        block_synchronizer.forward.is_some(),\n        \"should have forward sync\"\n    );\n    block_synchronizer.register_peers(block_hash, peers.clone());\n\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        MAX_SIMULTANEOUS_PEERS,\n    )\n    .await;\n\n    let initial_progress = block_synchronizer\n        .forward\n        .as_ref()\n        .expect(\"should exist\")\n        .last_progress_time();\n\n    latch_inner_check(\n        block_synchronizer.forward.as_ref(),\n        true,\n        \"forward builder should be latched after need next call\",\n    );\n\n    let mut peers_asked = Vec::new();\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id == block_hash => {\n                peers_asked.push(peer);\n            },\n            \"should be block header fetch\"\n        );\n    }\n\n    // Simulate fetch errors for the header\n    let mut generated_effects = Effects::new();\n    for peer in peers_asked {\n        latch_inner_check(\n            block_synchronizer.forward.as_ref(),\n            true,\n            &format!(\"response from peer: {:?}, but should still be latched until after final response received\", peer),\n        );\n        assert!(\n            generated_effects.is_empty(),\n            \"effects should remain empty until last response\"\n        );\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::BlockHeaderFetched(Err(FetcherError::Absent {\n                id: Box::new(*block.hash()),\n                peer,\n            })),\n        );\n\n        // the effects array should be empty while the latch is active\n        // once the latch is reset, we should get some effects\n        generated_effects.extend(effects);\n    }\n\n    need_next_inner_check(\n        block_synchronizer.forward.as_mut(),\n        &mut rng,\n        NeedNext::BlockHeader(block_hash),\n        \"should need block header\",\n    );\n    assert!(\n        !generated_effects.is_empty(),\n        \"should have gotten effects after the final response tail called into need next\"\n    );\n\n    latch_inner_check(\n        block_synchronizer.forward.as_ref(),\n        true,\n        \"all requests have been responded to, and the last event response should have \\\n        resulted in a fresh need next being reported and thus a new latch\",\n    );\n\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == block_hash,\n        \"should be syncing\"\n    );\n\n    tokio::time::sleep(Duration::from(cfg.need_next_interval)).await;\n\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash()\n    );\n\n    let current_progress = block_synchronizer\n        .forward\n        .as_ref()\n        .expect(\"should exist\")\n        .last_progress_time();\n\n    assert_eq!(\n        initial_progress, current_progress,\n        \"we have not gotten the record we need, so progress should remain the same\"\n    )\n}\n\n#[tokio::test]\nasync fn registering_header_successfully_triggers_signatures_fetch_for_weak_finality() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        MAX_SIMULTANEOUS_PEERS,\n    )\n    .await;\n\n    let mut peers_asked = Vec::new();\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id == *block.hash() => {\n                peers_asked.push(peer);\n            }\n        );\n    }\n\n    // Simulate successful fetch of the block header\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::BlockHeaderFetched(Ok(FetchedData::FromPeer {\n            item: Box::new(block.clone_header()),\n            peer: peers_asked[0],\n        })),\n    );\n\n    // Check the block acquisition state\n    let fwd_builder = block_synchronizer.forward_builder();\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == *block.hash()\n    );\n\n    // Check if the peer that provided the successful response was promoted\n    assert!(fwd_builder.peer_list().is_peer_reliable(&peers_asked[0]));\n\n    // Next the synchronizer should fetch finality signatures to reach weak finality.\n    // The number of requests should be limited to the number of peers even if we\n    // need to get more signatures to reach weak finality.\n    assert_eq!(\n        effects.len(),\n        min(\n            test_env.validator_keys().len(),\n            MAX_SIMULTANEOUS_PEERS as usize,\n        )\n    );\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id.block_hash() == block.hash() && id.era_id() == block.era_id()\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_more_signatures_are_requested_if_weak_finality_is_not_reached() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == *block.hash()\n    );\n\n    // Simulate a successful fetch of a single signature\n    let signature = FinalitySignatureV2::create(\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        chain_name_hash,\n        validators_secret_keys[0].as_ref(),\n    );\n    assert!(signature.is_verified().is_ok());\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer {\n            item: Box::new(signature.into()),\n            peer: peers[0],\n        })),\n    );\n\n    // A single signature isn't enough to reach weak finality.\n    // The synchronizer should ask for the remaining signatures.\n    // The peer limit should still be in place.\n    assert_eq!(\n        effects.len(),\n        min(\n            validators_secret_keys.len() - 1,\n            MAX_SIMULTANEOUS_PEERS as usize,\n        )\n    );\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) => {\n                assert!(peers.contains(&peer));\n                assert_eq!(id.block_hash(), block.hash());\n                assert_eq!(id.era_id(), block.era_id());\n                assert_ne!(*id.public_key(), PublicKey::from(validators_secret_keys[0].as_ref()));\n            }\n        );\n    }\n\n    // Register finality signatures to reach weak finality\n    let mut generated_effects = Effects::new();\n    for secret_key in validators_secret_keys\n        .iter()\n        .skip(1)\n        .take(weak_finality_threshold(validators_secret_keys.len()))\n    {\n        // Register a finality signature\n        let signature = FinalitySignatureV2::create(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chain_name_hash,\n            secret_key.as_ref(),\n        );\n        assert!(signature.is_verified().is_ok());\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(signature.into()),\n                peer: peers[2],\n            })),\n        );\n        generated_effects.extend(effects);\n    }\n\n    // Now the block should have weak finality.\n    // We are only interested in the last effects generated since as soon as the block has weak\n    // finality it should start to fetch the block body.\n    let events = mock_reactor\n        .process_effects(\n            generated_effects\n                .into_iter()\n                .rev()\n                .take(MAX_SIMULTANEOUS_PEERS as usize),\n        )\n        .await;\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) => {\n                assert!(peers.contains(&peer));\n                assert_eq!(id, *block.hash());\n            }\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interval() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let expected_block_hash = *block.hash();\n    let era_id = block.era_id();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let num_validators = test_env.validator_keys().len() as u8;\n    let cfg = Config {\n        ..Default::default()\n    };\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg);\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(expected_block_hash, false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(expected_block_hash, peers.clone());\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == expected_block_hash\n    );\n\n    // Synchronizer should fetch finality signatures\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        min(num_validators, MAX_SIMULTANEOUS_PEERS),\n        /* We have num_validators\n         * validators so we\n         * require the num_validators\n         * signatures */\n    )\n    .await;\n\n    // Check what signatures were requested\n    let mut sigs_requested = Vec::new();\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) => {\n                assert!(peers.contains(&peer));\n                assert_eq!(*id.block_hash(), expected_block_hash);\n                assert_eq!(id.era_id(), era_id);\n                sigs_requested.push((peer, id.public_key().clone()));\n            }\n        );\n    }\n\n    // Simulate failed fetch of finality signatures\n    let mut generated_effects = Effects::new();\n    for (peer, public_key) in sigs_requested {\n        latch_inner_check(\n            block_synchronizer.forward.as_ref(),\n            true,\n            &format!(\"response from peer: {:?}, but should still be latched until after final response received\", peer),\n        );\n        assert!(\n            generated_effects.is_empty(),\n            \"effects should remain empty until last response\"\n        );\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::FinalitySignatureFetched(Err(FetcherError::Absent {\n                id: Box::new(Box::new(FinalitySignatureId::new(\n                    expected_block_hash,\n                    era_id,\n                    public_key,\n                ))),\n                peer,\n            })),\n        );\n        // the effects array should be empty while the latch is active\n        // once the latch is reset, we should get some effects\n        generated_effects.extend(effects);\n    }\n\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == expected_block_hash,\n        \"should be syncing\"\n    );\n\n    // The effects are empty at this point and the synchronizer is stuck\n    assert!(\n        !generated_effects.is_empty(),\n        \"should have gotten effects after the final response tail called into need next\"\n    );\n\n    latch_inner_check(\n        block_synchronizer.forward.as_ref(),\n        true,\n        \"all requests have been responded to, and the last event response should have \\\n        resulted in a fresh need next being reported and thus a new latch\",\n    );\n\n    for event in mock_reactor.process_effects(generated_effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && *id.block_hash() == expected_block_hash && id.era_id() == block.era_id()\n        );\n    }\n\n    // Check if the forward builder is reported as stalled so that the control logic can recover\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == expected_block_hash\n    );\n}\n\n#[tokio::test]\nasync fn next_action_for_have_weak_finality_is_fetching_block_body() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) if header.block_hash() == *block.hash()\n    );\n\n    // Now the block should have weak finality.\n    // Next step is to get the block body.\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        MAX_SIMULTANEOUS_PEERS,\n    )\n    .await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) => {\n                assert!(peers.contains(&peer));\n                assert_eq!(id, *block.hash());\n            }\n        );\n    }\n}\n\n#[tokio::test]\nasync fn registering_block_body_transitions_builder_to_have_block_state() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) if header.block_hash() == *block.hash()\n    );\n\n    // Now the block should have weak finality.\n    // Next step is to get the block body.\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        MAX_SIMULTANEOUS_PEERS,\n    )\n    .await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) => {\n                assert!(peers.contains(&peer));\n                assert_eq!(id, *block.hash());\n            }\n        );\n    }\n\n    block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::BlockFetched(Ok(FetchedData::FromPeer {\n            item: Box::new(block.clone()),\n            peer: peers[0],\n        })),\n    );\n\n    assert_matches!(\n        block_synchronizer.forward_builder().block_acquisition_state(),\n        BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash()\n    );\n}\n\n#[tokio::test]\nasync fn fwd_having_block_body_for_block_without_deploys_requires_only_signatures() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash()\n    );\n\n    // Since the block doesn't have any deploys,\n    // the next step should be to fetch the finality signatures for strict finality.\n    let effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id.block_hash() == block.hash() && id.era_id() == block.era_id()\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_having_block_body_for_block_with_deploys_requires_approvals_hashes() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .random_transactions(1, &mut rng)\n            .build(&mut rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash()\n    );\n\n    // Since the block has deploys,\n    // the next step should be to fetch the approvals hashes.\n    let events = need_next(\n        &mut rng,\n        &mock_reactor,\n        &mut block_synchronizer,\n        MAX_SIMULTANEOUS_PEERS,\n    )\n    .await;\n\n    for event in events {\n        if !matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id == *block.hash()\n        ) {\n            println!(\"peers: {:?}\", peers);\n            println!(\"{}\", block.hash());\n            println!(\"event: {:?}\", event);\n        }\n        assert_matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) if peers.contains(&peer) && id == *block.hash()\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_registering_approvals_hashes_triggers_fetch_for_deploys() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let txns = [Transaction::random(&mut rng)];\n    let test_env = TestEnv::random(&mut rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .transactions(txns.iter())\n            .build(&mut rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash()\n    );\n\n    let approvals_hashes = ApprovalsHashes::new(\n        *block.hash(),\n        txns.iter()\n            .map(|txn| txn.compute_approvals_hash().unwrap())\n            .collect(),\n        dummy_merkle_proof(),\n    );\n\n    // Since the block has approvals hashes,\n    // the next step should be to fetch the deploys.\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer {\n            item: Box::new(approvals_hashes.clone()),\n            peer: peers[0],\n        })),\n    );\n    assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS as usize);\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::TransactionFetcherRequest(FetcherRequest {\n                id,\n                peer,\n                ..\n            }) => {\n                assert!(peers.contains(&peer));\n                assert_eq!(id, txns[0].compute_id());\n            }\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_have_block_body_without_deploys_and_strict_finality_transitions_state_machine() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach strict finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys.iter(),\n        chain_name_hash,\n    );\n\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash()\n    );\n\n    // Since the block doesn't have any deploys and already has achieved strict finality, we expect\n    // it to transition directly to HaveStrictFinality and ask for the next piece of work\n    // immediately\n    let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng);\n    assert_eq!(effects.len(), 1);\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_ref()\n        .expect(\"Forward builder should have been initialized\");\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash()\n    );\n\n    // Expect a single NeedNext event\n    let events = effects.remove(0).await;\n    assert_eq!(events.len(), 1);\n    assert_matches!(\n        events[0],\n        Event::Request(BlockSynchronizerRequest::NeedNext)\n    );\n}\n\n#[tokio::test]\nasync fn fwd_have_block_with_strict_finality_requires_creation_of_finalized_block() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register signatures for weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash()\n    );\n\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash()\n    );\n\n    // Block should have strict finality and will require to be executed\n    let events = need_next(&mut rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest {\n                block_hash,\n                ..\n            }) if block_hash == *block.hash()\n        );\n    }\n}\n\n#[tokio::test]\nasync fn fwd_have_strict_finality_requests_enqueue_when_finalized_block_is_created() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash()\n    );\n\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash()\n    );\n\n    // After the FinalizedBlock is created, the block synchronizer will request for it to be\n    // enqueued for execution\n    let event = Event::MadeFinalizedBlock {\n        block_hash: *block.hash(),\n        result: Some(ExecutableBlock::from_block_and_transactions(\n            block.clone().try_into().expect(\"Expected a V2 block.\"),\n            Vec::new(),\n        )),\n    };\n    let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event);\n    assert_eq!(effects.len(), 1);\n    let events = mock_reactor.process_effects(effects).await;\n\n    // Check the block acquisition state\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_ref()\n        .expect(\"Forward builder should have been initialized\");\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveExecutableBlock(actual_block, _, _) if *actual_block.hash() == *block.hash()\n    );\n\n    // This is the first of two events created when `EffectBuilder::enqueue_block_for_execution` is\n    // called.\n    assert_matches!(\n        &events[0],\n        MockReactorEvent::StorageRequest(\n            StorageRequest::GetKeyBlockHeightForActivationPoint { .. }\n        )\n    );\n\n    // Progress is syncing until we get a confirmation that the block was enqueued for execution\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash()\n    );\n}\n\n#[tokio::test]\nasync fn fwd_builder_status_is_executing_when_block_is_enqueued_for_execution() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Check the block acquisition state\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash()\n    );\n\n    // Register finalized block\n    fwd_builder.register_made_executable_block(ExecutableBlock::from_block_and_transactions(\n        block.clone().try_into().expect(\"Expected a V2 block.\"),\n        Vec::new(),\n    ));\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::HaveExecutableBlock(actual_block, _, _) if *actual_block.hash() == *block.hash()\n    );\n\n    // Simulate that enqueuing the block for execution was successful\n    let event = Event::MarkBlockExecutionEnqueued(*block.hash());\n\n    // There is nothing for the synchronizer to do at this point.\n    // It will wait for the block to be executed\n    let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event);\n    assert_eq!(effects.len(), 0);\n\n    // Progress should now indicate that the block is executing\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Executing(block_hash, _, _) if block_hash == *block.hash()\n    );\n}\n\n#[tokio::test]\nasync fn fwd_sync_is_finished_when_block_is_marked_as_executed() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Register finalized block\n    fwd_builder.register_made_executable_block(ExecutableBlock::from_block_and_transactions(\n        block.clone().try_into().expect(\"Expected a V2 block.\"),\n        Vec::new(),\n    ));\n    fwd_builder.register_block_execution_enqueued();\n\n    // Progress should now indicate that the block is executing\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Executing(block_hash, _, _) if block_hash == *block.hash()\n    );\n\n    // Simulate a MarkBlockExecuted event\n    let event = Event::MarkBlockExecuted(*block.hash());\n\n    // There is nothing for the synchronizer to do at this point, the sync is finished.\n    let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event);\n    assert_eq!(effects.len(), 0);\n\n    // Progress should now indicate that the block is executing\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Synced(block_hash, _, _) if block_hash == *block.hash()\n    );\n}\n\n#[tokio::test]\nasync fn historical_sync_announces_meta_block() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.historical.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    assert!(historical_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Set the builder state to `HaveStrictFinalitySignatures`\n    match historical_builder.block_acquisition_state() {\n        BlockAcquisitionState::HaveBlock(state_block, state_signatures, _) => historical_builder\n            .set_block_acquisition_state(BlockAcquisitionState::HaveStrictFinalitySignatures(\n                state_block.clone(),\n                state_signatures.clone(),\n            )),\n        other => panic!(\"Unexpected state: {:?}\", other),\n    }\n    // Make sure the historical builder is syncing\n    assert_matches!(\n        block_synchronizer.historical_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash()\n    );\n\n    // Simulate a MarkBlockCompleted event\n    let event = Event::MarkBlockCompleted {\n        block_hash: *block.hash(),\n        is_new: true,\n    };\n    // Put it through to the synchronizer\n    let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event);\n    assert_eq!(effects.len(), 1);\n    let mut events = mock_reactor.process_effects(effects).await;\n\n    // We should have a request to get the execution results\n    match events.pop().unwrap() {\n        MockReactorEvent::StorageRequest(StorageRequest::GetExecutionResults {\n            block_hash: actual_block_hash,\n            responder,\n        }) => {\n            assert_eq!(actual_block_hash, *block.hash());\n            // We'll just send empty execution results for this case.\n            responder.respond(Some(vec![])).await;\n        }\n        other => panic!(\"Unexpected event: {:?}\", other),\n    }\n    // Crank one more time because the meta block event is chained onto the\n    // execution results fetching\n    let event = mock_reactor.crank().await;\n    match event {\n        MockReactorEvent::MetaBlockAnnouncement(MetaBlockAnnouncement(mut meta_block)) => {\n            assert_eq!(meta_block.hash(), *block.hash());\n            // The transaction buffer is supposed to get notified\n            assert!(meta_block\n                .mut_state()\n                .register_as_sent_to_transaction_buffer()\n                .was_updated());\n        }\n        other => panic!(\"Unexpected event: {:?}\", other),\n    }\n    // The historical sync for this block should now be complete\n    assert_matches!(\n        block_synchronizer.historical_progress(),\n        BlockSynchronizerProgress::Synced(block_hash, _, _) if block_hash == *block.hash()\n    );\n}\n\n#[test]\nfn builders_are_purged_when_requested() {\n    let mut rng = TestRng::new();\n    let test_env = TestEnv::random(&mut rng);\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for forward sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n\n    // Registering block for historical sync\n    assert!(block_synchronizer\n        .register_block_by_hash(*TestBlockBuilder::new().build(&mut rng).hash(), true));\n\n    assert!(block_synchronizer.forward.is_some());\n    assert!(block_synchronizer.historical.is_some());\n\n    block_synchronizer.purge_historical();\n    assert!(block_synchronizer.forward.is_some());\n    assert!(block_synchronizer.historical.is_none());\n\n    assert!(block_synchronizer\n        .register_block_by_hash(*TestBlockBuilder::new().build(&mut rng).hash(), true));\n    assert!(block_synchronizer.forward.is_some());\n    assert!(block_synchronizer.historical.is_some());\n\n    block_synchronizer.purge_forward();\n    assert!(block_synchronizer.forward.is_none());\n    assert!(block_synchronizer.historical.is_some());\n\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    assert!(block_synchronizer.historical.is_some());\n\n    block_synchronizer.purge();\n    assert!(block_synchronizer.forward.is_none());\n    assert!(block_synchronizer.historical.is_none());\n}\n\n#[tokio::test]\nasync fn synchronizer_halts_if_block_cannot_be_made_executable() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(&mut rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_mut()\n        .expect(\"Forward builder should have been initialized\");\n    assert!(fwd_builder\n        .register_block_header(block.clone_header(), None)\n        .is_ok());\n    fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    // Register finality signatures to reach weak finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(fwd_builder.register_block(block.clone(), None).is_ok());\n    // Register the remaining signatures to reach strict finality\n    register_multiple_signatures(\n        fwd_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n\n    // Block should have strict finality and will require to be executed\n    let events = need_next(&mut rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest {\n                block_hash,\n                ..\n            }) if block_hash == *block.hash()\n        );\n    }\n\n    // Simulate an error (the block couldn't be converted for execution).\n    // This can happen if the synchronizer didn't fetch the right approvals hashes.\n    // Don't expect to progress any further here. The control logic should\n    // leap and backfill this block during a historical sync.\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        &mut rng,\n        Event::MadeFinalizedBlock {\n            block_hash: *block.hash(),\n            result: None,\n        },\n    );\n    assert_eq!(effects.len(), 0);\n\n    // Check the block acquisition state\n    let fwd_builder = block_synchronizer\n        .forward\n        .as_ref()\n        .expect(\"Forward builder should have been initialized\");\n    assert_matches!(\n        fwd_builder.block_acquisition_state(),\n        BlockAcquisitionState::Failed(block_hash, _) if block_hash == block.hash()\n    );\n\n    // Progress should now indicate that the block is syncing\n    assert_matches!(\n        block_synchronizer.forward_progress(),\n        BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash()\n    );\n}\n\nfn historical_state(block_synchronizer: &BlockSynchronizer) -> &BlockAcquisitionState {\n    block_synchronizer\n        .historical\n        .as_ref()\n        .unwrap()\n        .block_acquisition_state()\n}\n\n/// When there is no deploy, the state goes from `HaveGlobalState` to `HaveStrictFinalitySignature`\n/// directly, skipping `HaveAllExecutionResults`, `HaveApprovalsHashes` and `HaveAllTransactions`.\n#[tokio::test]\nasync fn historical_sync_skips_exec_results_and_deploys_if_block_empty() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let test_env = TestEnv::random(rng);\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Strict);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.forward.is_none());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    // Skip steps HaveBlockHeader, HaveWeakFinalitySignature, HaveBlock\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    let request = match events.try_one() {\n        Some(MockReactorEvent::SyncGlobalStateRequest(\n            request @ SyncGlobalStateRequest {\n                block_hash,\n                state_root_hash,\n                ..\n            },\n        )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request,\n        _ => panic!(\"there should be a unique event of type SyncGlobalStateRequest\"),\n    };\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)),\n    );\n\n    // ----- HaveBlock -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveBlock { .. }\n    );\n\n    // Those effects are handled directly and not through the reactor:\n    let events = effects\n        .try_one()\n        .expect(\"there should be only one effect\")\n        .await;\n    assert_matches!(\n        events.try_one(),\n        Some(Event::GlobalStateSynchronizer(\n            GlobalStateSynchronizerEvent::GetPeers(_)\n        ))\n    );\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    // ----- HaveGlobalState -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(..));\n    }\n}\n\n#[tokio::test]\nasync fn historical_sync_no_legacy_block() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let txn = Transaction::random(rng);\n    let test_env = TestEnv::random(rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .transactions(iter::once(&txn))\n            .build(rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Strict);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.forward.is_none());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    let request = match events.try_one() {\n        Some(MockReactorEvent::SyncGlobalStateRequest(\n            request @ SyncGlobalStateRequest {\n                block_hash,\n                state_root_hash,\n                ..\n            },\n        )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request,\n        _ => panic!(\"there should be a unique event of type SyncGlobalStateRequest\"),\n    };\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)),\n    );\n\n    // Those effects are handled directly and not through the reactor:\n    let events = effects.one().await;\n    assert_matches!(\n        events.try_one(),\n        Some(Event::GlobalStateSynchronizer(\n            GlobalStateSynchronizerEvent::GetPeers(_)\n        ))\n    );\n\n    // ----- HaveBlock -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveBlock { .. }\n    );\n\n    // Let's not test the detail of the global synchronization event,\n    // since it is already tested in its unit tests.\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    // ----- HaveGlobalState -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    match events.try_one() {\n        Some(MockReactorEvent::ContractRuntimeRequest(\n                 ContractRuntimeRequest::GetExecutionResultsChecksum {\n                     state_root_hash,\n                     responder,\n                 },\n             )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await,\n        other => panic!(\"Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}\", other),\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GotExecutionResultsChecksum {\n            block_hash: *block.hash(),\n            result: ExecutionResultsChecksumResult::Success {\n                checksum: Digest::SENTINEL_NONE,\n            },\n        },\n    );\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash());\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(execution_results))),\n        },\n    );\n\n    let mut events = mock_reactor.process_effects(effects).await;\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    assert_matches!(\n        events.remove(0),\n        MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. })\n    );\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsStored(*block.hash()),\n    );\n    // ----- HaveAllExecutionResults -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) if checksum.is_checkable()\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ApprovalsHashesFetched(Ok(FetchedData::from_storage(Box::new(\n            ApprovalsHashes::new(\n                *block.hash(),\n                vec![txn.compute_approvals_hash().unwrap()],\n                dummy_merkle_proof(),\n            ),\n        )))),\n    );\n    // ----- HaveApprovalsHashes -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveApprovalsHashes(_, _, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Right(Ok(FetchedData::from_storage(Box::new(txn)))),\n        },\n    );\n    // ----- HaveAllTransactions -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllTransactions(_, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_));\n    }\n\n    // Then we get back to the strict finality signature part, which is already tested.\n}\n\n#[tokio::test]\nasync fn historical_sync_legacy_block_strict_finality() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let deploy = Deploy::random(rng);\n    let test_env = TestEnv::random(rng).with_block(\n        TestBlockV1Builder::new()\n            .era(1)\n            .deploys(iter::once(&deploy.clone()))\n            .build(rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Strict);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.forward.is_none());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    let request = match events.try_one() {\n        Some(MockReactorEvent::SyncGlobalStateRequest(\n            request @ SyncGlobalStateRequest {\n                block_hash,\n                state_root_hash,\n                ..\n            },\n        )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request,\n        _ => panic!(\"there should be a unique event of type SyncGlobalStateRequest\"),\n    };\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)),\n    );\n\n    // Those effects are handled directly and not through the reactor:\n    let events = effects.one().await;\n    assert_matches!(\n        events.try_one(),\n        Some(Event::GlobalStateSynchronizer(\n            GlobalStateSynchronizerEvent::GetPeers(_)\n        ))\n    );\n\n    // ----- HaveBlock -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveBlock { .. }\n    );\n\n    // Let's not test the detail of the global synchronization event,\n    // since it is already tested in its unit tests.\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    // ----- HaveGlobalState -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    match events.try_one() {\n        Some(MockReactorEvent::ContractRuntimeRequest(\n                 ContractRuntimeRequest::GetExecutionResultsChecksum {\n                     state_root_hash,\n                     responder,\n                 },\n             )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await,\n        other => panic!(\"Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}\", other),\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GotExecutionResultsChecksum {\n            block_hash: *block.hash(),\n            result: ExecutionResultsChecksumResult::RegistryNotFound, // test a legacy block\n        },\n    );\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash());\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(execution_results))),\n        },\n    );\n\n    let mut events = mock_reactor.process_effects(effects).await;\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    assert_matches!(\n        events.remove(0),\n        MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. })\n    );\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsStored(*block.hash()),\n    );\n    // ----- HaveAllExecutionResults -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum)\n            if checksum.is_checkable() == false\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::LegacyDeployFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Left(Ok(FetchedData::from_storage(Box::new(deploy.into())))),\n        },\n    );\n    // ----- HaveAllTransactions -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllTransactions(_, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_));\n    }\n\n    // Then we get back to the strict finality signature part, which is already tested.\n}\n\n#[tokio::test]\nasync fn historical_sync_legacy_block_weak_finality() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let deploy = Deploy::random(rng);\n    let test_env = TestEnv::random(rng).with_block(\n        TestBlockV1Builder::new()\n            .era(1)\n            .deploys(iter::once(&deploy.clone()))\n            .build(rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Weak);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.forward.is_none());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    let request = match events.try_one() {\n        Some(MockReactorEvent::SyncGlobalStateRequest(\n            request @ SyncGlobalStateRequest {\n                block_hash,\n                state_root_hash,\n                ..\n            },\n        )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request,\n        _ => panic!(\"there should be a unique event of type SyncGlobalStateRequest\"),\n    };\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)),\n    );\n\n    // Those effects are handled directly and not through the reactor:\n    let events = effects.one().await;\n    assert_matches!(\n        events.try_one(),\n        Some(Event::GlobalStateSynchronizer(\n            GlobalStateSynchronizerEvent::GetPeers(_)\n        ))\n    );\n\n    // ----- HaveBlock -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveBlock { .. }\n    );\n\n    // Let's not test the detail of the global synchronization event,\n    // since it is already tested in its unit tests.\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    // ----- HaveGlobalState -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    match events.try_one() {\n        Some(MockReactorEvent::ContractRuntimeRequest(\n                 ContractRuntimeRequest::GetExecutionResultsChecksum {\n                     state_root_hash,\n                     responder,\n                 },\n             )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await,\n        other => panic!(\"Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}\", other),\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GotExecutionResultsChecksum {\n            block_hash: *block.hash(),\n            result: ExecutionResultsChecksumResult::RegistryNotFound, // test a legacy block\n        },\n    );\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash());\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(execution_results))),\n        },\n    );\n\n    let mut events = mock_reactor.process_effects(effects).await;\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    assert_matches!(\n        events.remove(0),\n        MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. })\n    );\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsStored(*block.hash()),\n    );\n    // ----- HaveAllExecutionResults -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum)\n            if checksum.is_checkable() == false\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::LegacyDeployFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Left(Ok(FetchedData::from_storage(Box::new(deploy.into())))),\n        },\n    );\n\n    // ----- HaveStrictFinalitySignatures -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(_, _)\n    );\n\n    let events = effects.one().await;\n\n    let event = match events.try_one() {\n        Some(event @ Event::Request(BlockSynchronizerRequest::NeedNext)) => event,\n        _ => panic!(\"Expected a NeedNext request here\"),\n    };\n\n    let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), rng, event);\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(_, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(event, MockReactorEvent::MarkBlockCompletedRequest(_));\n    }\n}\n\n#[tokio::test]\nasync fn historical_sync_legacy_block_any_finality() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let deploy = Deploy::random(rng);\n    let test_env = TestEnv::random(rng).with_block(\n        TestBlockV1Builder::new()\n            .era(1)\n            .deploys(iter::once(&deploy.clone()))\n            .build(rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Any);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    assert!(block_synchronizer.forward.is_none());\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys.iter().take(1),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await;\n\n    let request = match events.try_one() {\n        Some(MockReactorEvent::SyncGlobalStateRequest(\n            request @ SyncGlobalStateRequest {\n                block_hash,\n                state_root_hash,\n                ..\n            },\n        )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request,\n        _ => panic!(\"there should be a unique event of type SyncGlobalStateRequest\"),\n    };\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)),\n    );\n\n    // Those effects are handled directly and not through the reactor:\n    let events = effects.one().await;\n    assert_matches!(\n        events.try_one(),\n        Some(Event::GlobalStateSynchronizer(\n            GlobalStateSynchronizerEvent::GetPeers(_)\n        ))\n    );\n\n    // ----- HaveBlock -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveBlock { .. }\n    );\n\n    // Let's not test the detail of the global synchronization event,\n    // since it is already tested in its unit tests.\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    // ----- HaveGlobalState -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    match events.try_one() {\n        Some(MockReactorEvent::ContractRuntimeRequest(\n                 ContractRuntimeRequest::GetExecutionResultsChecksum {\n                     state_root_hash,\n                     responder,\n                 },\n             )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await,\n        other => panic!(\"Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}\", other),\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GotExecutionResultsChecksum {\n            block_hash: *block.hash(),\n            result: ExecutionResultsChecksumResult::RegistryNotFound, // test a legacy block\n        },\n    );\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash());\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(execution_results))),\n        },\n    );\n\n    let mut events = mock_reactor.process_effects(effects).await;\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    assert_matches!(\n        events.remove(0),\n        MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. })\n    );\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsStored(*block.hash()),\n    );\n    // ----- HaveAllExecutionResults -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum)\n            if checksum.is_checkable() == false\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::LegacyDeployFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Left(Ok(FetchedData::from_storage(Box::new(deploy.into())))),\n        },\n    );\n\n    // ----- HaveStrictFinalitySignatures -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(_, _)\n    );\n\n    let events = effects.one().await;\n\n    let event = match events.try_one() {\n        Some(event @ Event::Request(BlockSynchronizerRequest::NeedNext)) => event,\n        _ => panic!(\"Expected a NeedNext request here\"),\n    };\n\n    let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), rng, event);\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveStrictFinalitySignatures(_, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n\n    for event in events {\n        assert_matches!(event, MockReactorEvent::MarkBlockCompletedRequest(_));\n    }\n}\n\n#[tokio::test]\nasync fn fwd_sync_latch_should_not_decrement_for_old_responses() {\n    let mut rng = TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let txn = Transaction::random(&mut rng);\n    let test_env = TestEnv::random(&mut rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .transactions(iter::once(&txn))\n            .build(&mut rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default());\n\n    // Register block for fwd sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), false));\n    assert!(block_synchronizer.forward.is_some());\n\n    // Start syncing.\n    {\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::Request(BlockSynchronizerRequest::NeedNext),\n        );\n        assert_eq!(effects.len(), 1);\n\n        // First, the synchronizer should get peers.\n        let events = mock_reactor.process_effects(effects).await;\n        assert_matches!(\n            events[0],\n            MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock {\n                block_hash,\n                ..\n            }) if block_hash == *block.hash()\n        );\n\n        latch_inner_check(\n            block_synchronizer.forward.as_ref(),\n            true,\n            \"should be latched waiting for peers\",\n        );\n    }\n\n    // Register peers. This would make the synchronizer ask for the block header.\n    {\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::AccumulatedPeers(*block.hash(), Some(peers.clone())),\n        );\n        let events = mock_reactor.process_effects(effects).await;\n\n        let mut peers_asked = Vec::new();\n        for event in events {\n            assert_matches!(\n                event,\n                MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest {\n                    id,\n                    peer,\n                    ..\n                }) if peers.contains(&peer) && id == *block.hash() => {\n                    peers_asked.push(peer);\n                }\n            );\n        }\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no block header was received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n\n        // Simulate successful fetch of the block header.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::BlockHeaderFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(block.clone_header()),\n                peer: peers_asked[0],\n            })),\n        );\n        let events = mock_reactor.process_effects(effects).await;\n\n        let expected_latch_count = events.len() as u8; // number of finality sig fetches.\n\n        // Check what signatures were requested\n        let mut sigs_requested = Vec::new();\n        for event in events {\n            assert_matches!(\n                event,\n                MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                    id,\n                    peer,\n                    ..\n                }) => {\n                    assert_eq!(id.block_hash(), block.hash());\n                    assert_eq!(id.era_id(), block.era_id());\n                    sigs_requested.push((peer, id.public_key().clone()));\n                }\n            );\n        }\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            expected_latch_count,\n            format!(\n                \"Latch count should be {} since no finality sigs were received.\",\n                expected_latch_count\n            )\n            .as_str(),\n        );\n\n        // Receive a late response with the block header.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::BlockHeaderFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(block.clone_header()),\n                peer: peers_asked[1],\n            })),\n        );\n\n        assert_eq!(effects.len(), 0);\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            expected_latch_count,\n            format!(\n                \"Latch count should be {} since no finality sigs were received.\",\n                expected_latch_count\n            )\n            .as_str(),\n        );\n    }\n\n    // Register finality sigs. This would make the synchronizer switch to have weak finality and\n    // continue asking for the block body.\n    {\n        let mut generated_effects = Effects::new();\n        for secret_key in validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len()))\n        {\n            // Register a finality signature\n            let signature = FinalitySignatureV2::create(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                chain_name_hash,\n                secret_key.as_ref(),\n            );\n            assert!(signature.is_verified().is_ok());\n            let effects = block_synchronizer.handle_event(\n                mock_reactor.effect_builder(),\n                &mut rng,\n                Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer {\n                    item: Box::new(signature.into()),\n                    peer: peers[2],\n                })),\n            );\n            generated_effects.extend(effects);\n        }\n\n        let events = mock_reactor\n            .process_effects(\n                generated_effects\n                    .into_iter()\n                    .rev()\n                    .take(MAX_SIMULTANEOUS_PEERS as usize),\n            )\n            .await;\n\n        for event in events {\n            assert_matches!(\n                event,\n                MockReactorEvent::BlockFetcherRequest(FetcherRequest {\n                    id,\n                    peer,\n                    ..\n                }) => {\n                    assert!(peers.contains(&peer));\n                    assert_eq!(id, *block.hash());\n                }\n            );\n        }\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no block was received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n\n        // Receive some more finality signatures to check if the latch decrements.\n        let mut generated_effects = Effects::new();\n        for secret_key in validators_secret_keys\n            .iter()\n            .skip(weak_finality_threshold(validators_secret_keys.len()))\n            .take(2)\n        {\n            // Register a finality signature\n            let signature = FinalitySignatureV2::create(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                chain_name_hash,\n                secret_key.as_ref(),\n            );\n            assert!(signature.is_verified().is_ok());\n            let effects = block_synchronizer.handle_event(\n                mock_reactor.effect_builder(),\n                &mut rng,\n                Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer {\n                    item: Box::new(signature.into()),\n                    peer: peers[2],\n                })),\n            );\n            generated_effects.extend(effects);\n        }\n\n        assert_eq!(generated_effects.len(), 0);\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no block was received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n    }\n\n    // Register a block response. This would make the synchronizer switch to HaveBlock and continue\n    // asking for the approvals hashes.\n    {\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::BlockFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(block.clone()),\n                peer: peers[0],\n            })),\n        );\n        let events = mock_reactor.process_effects(effects).await;\n\n        for event in events {\n            assert_matches!(\n                event,\n                MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest {\n                    id,\n                    peer,\n                    ..\n                }) if peers.contains(&peer) && id == *block.hash()\n            );\n        }\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no approval hashes were received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n\n        // Receive another response with the block. This is the second response out of the 5 we sent\n        // out earlier.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::BlockFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(block.clone()),\n                peer: peers[1],\n            })),\n        );\n        assert_eq!(effects.len(), 0);\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no approval hashes were received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n    }\n\n    // Register approvals hashes. This would make the synchronizer switch to HaveApprovalsHashes and\n    // continue asking for the deploys.\n    {\n        let approvals_hashes = ApprovalsHashes::new(\n            *block.hash(),\n            vec![txn.compute_approvals_hash().unwrap()],\n            dummy_merkle_proof(),\n        );\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(approvals_hashes.clone()),\n                peer: peers[0],\n            })),\n        );\n        assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS as usize);\n        for event in mock_reactor.process_effects(effects).await {\n            assert_matches!(\n                event,\n                MockReactorEvent::TransactionFetcherRequest(FetcherRequest {\n                    id,\n                    peer,\n                    ..\n                }) => {\n                    assert!(peers.contains(&peer));\n                    assert_eq!(id, txn.compute_id());\n                }\n            );\n        }\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no deploys were received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n\n        // Receive a late response with the approvals hashes.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer {\n                item: Box::new(approvals_hashes.clone()),\n                peer: peers[1],\n            })),\n        );\n\n        assert_eq!(effects.len(), 0);\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            format!(\n                \"Latch count should be {} since no deploys were received.\",\n                MAX_SIMULTANEOUS_PEERS\n            )\n            .as_str(),\n        );\n    }\n\n    // Receive a deploy. This would make the synchronizer switch to HaveAllTransactions and continue\n    // asking for more finality signatures in order to reach strict finality.\n    {\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::TransactionFetched {\n                block_hash: *block.hash(),\n                result: Either::Right(Ok(FetchedData::from_storage(Box::new(txn.clone())))),\n            },\n        );\n        let events = mock_reactor.process_effects(effects).await;\n        let expected_latch_count = events.len() as u8;\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            expected_latch_count,\n            format!(\n                \"Latch count should be {} since no new signatures were received.\",\n                expected_latch_count\n            )\n            .as_str(),\n        );\n\n        // Since it's the single deploy in the block, the next step is to get the rest of the\n        // finality signatures to get strict finality.\n        for event in events {\n            assert_matches!(\n                event,\n                MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest {\n                    id,\n                    ..\n                }) => {\n                    assert_eq!(id.block_hash(), block.hash());\n                    assert_eq!(id.era_id(), block.era_id());\n                }\n            );\n        }\n\n        // Receive a late deploy response.\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            &mut rng,\n            Event::TransactionFetched {\n                block_hash: *block.hash(),\n                result: Either::Right(Ok(FetchedData::from_storage(Box::new(txn.clone())))),\n            },\n        );\n\n        assert_eq!(effects.len(), 0);\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            expected_latch_count,\n            \"Latch should not have changed since we did not receive a new signature yet.\",\n        );\n    }\n\n    // Receive the rest of the missing signatures to get strict finality. This would switch the\n    // state to HaveStrictFinality and continue to request to make the block executable.\n    {\n        let mut generated_effects = Effects::new();\n        for secret_key in validators_secret_keys.iter().rev().take(\n            strict_finality_threshold(validators_secret_keys.len())\n                - weak_finality_threshold(validators_secret_keys.len()),\n        ) {\n            // Register a finality signature\n            let signature = FinalitySignatureV2::create(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                chain_name_hash,\n                secret_key.as_ref(),\n            );\n            assert!(signature.is_verified().is_ok());\n            let effects = block_synchronizer.handle_event(\n                mock_reactor.effect_builder(),\n                &mut rng,\n                Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer {\n                    item: Box::new(signature.into()),\n                    peer: peers[2],\n                })),\n            );\n            generated_effects.extend(effects);\n        }\n\n        // Once strict finality is achieved, the synchronizer will try to make the block executable.\n        let events = mock_reactor\n            .process_effects(generated_effects.into_iter().rev().take(1))\n            .await;\n\n        for event in events {\n            assert_matches!(\n                event,\n                MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest {\n                    block_hash,\n                    ..\n                }) if block_hash == *block.hash()\n            );\n        }\n\n        latch_count_check(\n            block_synchronizer.forward.as_ref(),\n            1,\n            \"Latch count should still be 1 since no FinalizedBlock was received.\",\n        );\n    }\n}\n\n#[tokio::test]\nasync fn historical_sync_latch_should_not_decrement_for_old_deploy_fetch_responses() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let transactions: BTreeMap<_, _> = iter::repeat_with(|| {\n        let txn = Transaction::random(rng);\n        let hash = txn.hash();\n        (hash, txn)\n    })\n    .take(3)\n    .collect();\n    let test_env = TestEnv::random(rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .transactions(transactions.values())\n            .build(rng)\n            .into(),\n    );\n\n    let block = test_env.block();\n    let block_v2: BlockV2 = block.clone().try_into().unwrap();\n    let first_txn = transactions\n        .get(block_v2.all_transactions().next().unwrap())\n        .unwrap();\n    let second_txn = transactions\n        .get(block_v2.all_transactions().nth(1).unwrap())\n        .unwrap();\n    let third_txn = transactions\n        .get(block_v2.all_transactions().nth(2).unwrap())\n        .unwrap();\n\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Strict);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GotExecutionResultsChecksum {\n            block_hash: *block.hash(),\n            result: ExecutionResultsChecksumResult::Success {\n                checksum: Digest::SENTINEL_NONE,\n            },\n        },\n    );\n\n    let execution_results =\n        BlockExecutionResultsOrChunk::new_mock_value_with_multiple_random_results(\n            rng,\n            *block.hash(),\n            3,\n        );\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(execution_results))),\n        },\n    );\n\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsStored(*block.hash()),\n    );\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum)\n            if checksum.is_checkable() == true\n    );\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ApprovalsHashesFetched(Ok(FetchedData::from_storage(Box::new(\n            ApprovalsHashes::new(\n                *block.hash(),\n                vec![\n                    first_txn.compute_approvals_hash().unwrap(),\n                    second_txn.compute_approvals_hash().unwrap(),\n                    third_txn.compute_approvals_hash().unwrap(),\n                ],\n                dummy_merkle_proof(),\n            ),\n        )))),\n    );\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveApprovalsHashes(_, _, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n    for event in events {\n        assert_matches!(\n            event,\n            MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS,\n        format!(\n            \"Latch count should be {} since no deploys were received.\",\n            MAX_SIMULTANEOUS_PEERS\n        )\n        .as_str(),\n    );\n\n    // Receive 1 out of MAX_SIMULTANEOUS_PEERS requests for the first deploy.\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Right(Ok(FetchedData::from_storage(Box::new(first_txn.clone())))),\n        },\n    );\n\n    // The first deploy was registered. The synchronizer will create MAX_SIMULTANEOUS_PEERS fetch\n    // requests for another deploy.\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. })\n        );\n    }\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS,\n        format!(\n            \"Latch count should be {} since the node should ask for the second deploy.\",\n            MAX_SIMULTANEOUS_PEERS\n        )\n        .as_str(),\n    );\n\n    // Receive 1 out of MAX_SIMULTANEOUS_PEERS requests for the second deploy.\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Right(Ok(FetchedData::from_storage(Box::new(second_txn.clone())))),\n        },\n    );\n\n    // The second deploy was registered. The synchronizer will create MAX_SIMULTANEOUS_PEERS fetch\n    // requests for another deploy.\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. })\n        );\n    }\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS,\n        format!(\n            \"Latch count should be {} since the node should ask for the third deploy.\",\n            MAX_SIMULTANEOUS_PEERS\n        )\n        .as_str(),\n    );\n\n    // The current state is:\n    // * Sent out MAX_SIMULTANEOUS_PEERS requests for the first deploy and received 1 response.\n    // * Sent out MAX_SIMULTANEOUS_PEERS requests for the second deploy and received 1 response.\n    // * Sent out MAX_SIMULTANEOUS_PEERS requests for the third deploy and haven't received anything\n    //   yet.\n    //\n    // So we can receive at this point MAX_SIMULTANEOUS_PEERS - 2 \"late\" responses for the first and\n    // second deploys and MAX_SIMULTANEOUS_PEERS responses for the third deploy.\n    //\n    // Simulate that we receive the \"late\" responses first. The synchronizer shouldn't unlatch and\n    // try to send out more requests for the third deploy. It should hold off until the right\n    // response comes through.\n\n    // Receive the late responses for the first deploy\n    for _ in 1..MAX_SIMULTANEOUS_PEERS {\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            rng,\n            Event::TransactionFetched {\n                block_hash: *block.hash(),\n                result: Either::Right(Ok(FetchedData::from_storage(Box::new(first_txn.clone())))),\n            },\n        );\n\n        assert_eq!(effects.len(), 0);\n\n        latch_count_check(\n            block_synchronizer.historical.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            \"Shouldn't decrement the latch since this was a late response\",\n        );\n    }\n\n    // Receive the late responses for the second deploy\n    for _ in 1..MAX_SIMULTANEOUS_PEERS {\n        let effects = block_synchronizer.handle_event(\n            mock_reactor.effect_builder(),\n            rng,\n            Event::TransactionFetched {\n                block_hash: *block.hash(),\n                result: Either::Right(Ok(FetchedData::from_storage(Box::new(second_txn.clone())))),\n            },\n        );\n\n        assert_eq!(effects.len(), 0);\n\n        latch_count_check(\n            block_synchronizer.historical.as_ref(),\n            MAX_SIMULTANEOUS_PEERS,\n            \"Shouldn't decrement the latch since this was a late response\",\n        );\n    }\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::TransactionFetched {\n            block_hash: *block.hash(),\n            result: Either::Right(Ok(FetchedData::from_storage(Box::new(third_txn.clone())))),\n        },\n    );\n\n    // ----- HaveAllTransactions -----\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveAllTransactions(_, _)\n    );\n\n    let events = mock_reactor.process_effects(effects).await;\n    for event in events {\n        assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_));\n    }\n}\n\n#[tokio::test]\nasync fn historical_sync_latch_should_not_decrement_for_old_execution_results() {\n    let rng = &mut TestRng::new();\n    let mock_reactor = MockReactor::new();\n    let first_txn = Transaction::random(rng);\n    let second_txn = Transaction::random(rng);\n    let third_txn = Transaction::random(rng);\n    let test_env = TestEnv::random(rng).with_block(\n        TestBlockBuilder::new()\n            .era(1)\n            .transactions([first_txn, second_txn, third_txn].iter())\n            .build(rng)\n            .into(),\n    );\n    let peers = test_env.peers();\n    let block = test_env.block();\n    let validator_matrix = test_env.gen_validator_matrix();\n    let chain_name_hash = validator_matrix.chain_name_hash();\n    let validators_secret_keys = test_env.validator_keys();\n    let mut block_synchronizer =\n        BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default())\n            .with_legacy_finality(LegacyRequiredFinality::Strict);\n\n    // Register block for historical sync\n    assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE));\n    block_synchronizer.register_peers(*block.hash(), peers.clone());\n\n    let historical_builder = block_synchronizer\n        .historical\n        .as_mut()\n        .expect(\"Historical builder should have been initialized\");\n    historical_builder\n        .register_block_header(block.clone_header(), None)\n        .expect(\"header registration works\");\n    historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix);\n    register_multiple_signatures(\n        historical_builder,\n        block,\n        validators_secret_keys\n            .iter()\n            .take(weak_finality_threshold(validators_secret_keys.len())),\n        chain_name_hash,\n    );\n    assert!(historical_builder\n        .register_block(block.clone(), None)\n        .is_ok());\n\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GlobalStateSynced {\n            block_hash: *block.hash(),\n            result: Ok(GlobalStateSynchronizerResponse::new(\n                global_state_synchronizer::RootHash::new(*block.state_root_hash()),\n                vec![],\n            )),\n        },\n    );\n\n    assert_matches!(\n        historical_state(&block_synchronizer),\n        BlockAcquisitionState::HaveGlobalState { .. }\n    );\n\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        1,\n        \"Latch count should be 1 since we're waiting for execution results checksum.\",\n    );\n\n    // Create chunked execution results.\n    let execution_results =\n        BlockExecutionResultsOrChunk::new_mock_value_with_multiple_random_results(\n            rng,\n            *block.hash(),\n            100000, // Lots of results to achieve chunking.\n        );\n    let checksum = assert_matches!(\n        execution_results.value(),\n        ValueOrChunk::ChunkWithProof(chunk) => chunk.proof().root_hash()\n    );\n\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::GotExecutionResultsChecksum {\n            block_hash: *block.hash(),\n            result: ExecutionResultsChecksumResult::Success { checksum },\n        },\n    );\n\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. })\n        );\n    }\n\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS,\n        format!(\n            \"Latch count should be {} since no chunks of execution results were received.\",\n            MAX_SIMULTANEOUS_PEERS\n        )\n        .as_str(),\n    );\n\n    // Receive the first chunk of execution results.\n    let effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(\n                execution_results.clone(),\n            ))),\n        },\n    );\n\n    // It's expected that the synchronizer will ask for the next chunks of execution results.\n    for event in mock_reactor.process_effects(effects).await {\n        assert_matches!(\n            event,\n            MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { id, .. }) if id.chunk_index() != 0\n        );\n    }\n\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS,\n        format!(\n            \"Latch count should be {} since no responses with chunks != 0 were received.\",\n            MAX_SIMULTANEOUS_PEERS\n        )\n        .as_str(),\n    );\n\n    // Receive the first chunk of execution results again (late response).\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Ok(FetchedData::from_storage(Box::new(execution_results))),\n        },\n    );\n\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS,\n        format!(\n            \"Latch count should be {} since we already had the first chunk and no responses with chunks != 0 were received.\",\n            MAX_SIMULTANEOUS_PEERS\n        )\n            .as_str(),\n    );\n\n    // Receive a fetch error.\n    let _effects = block_synchronizer.handle_event(\n        mock_reactor.effect_builder(),\n        rng,\n        Event::ExecutionResultsFetched {\n            block_hash: *block.hash(),\n            result: Err(FetcherError::Absent {\n                id: Box::new(BlockExecutionResultsOrChunkId::new(*block.hash())),\n                peer: peers[0],\n            }),\n        },\n    );\n\n    latch_count_check(\n        block_synchronizer.historical.as_ref(),\n        MAX_SIMULTANEOUS_PEERS - 1,\n        format!(\n            \"Latch count should be {} since we received an `Absent` response.\",\n            MAX_SIMULTANEOUS_PEERS - 1\n        )\n        .as_str(),\n    );\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/transaction_acquisition/tests.rs",
    "content": "use std::collections::{BTreeMap, VecDeque};\n\nuse assert_matches::assert_matches;\nuse rand::Rng;\n\nuse casper_storage::block_store::types::ApprovalsHashes;\nuse casper_types::{\n    global_state::TrieMerkleProof, testing::TestRng, AccessRights, CLValue, StoredValue,\n    TestBlockBuilder, Transaction, URef,\n};\n\nuse super::*;\n\nfn gen_test_transactions(rng: &mut TestRng) -> BTreeMap<TransactionHash, Transaction> {\n    let num_txns = rng.gen_range(2..15);\n    (0..num_txns)\n        .map(|_| {\n            let transaction = Transaction::random(rng);\n            (transaction.hash(), transaction)\n        })\n        .collect()\n}\n\nfn gen_approvals_hashes<'a, I: Iterator<Item = &'a Transaction> + Clone>(\n    rng: &mut TestRng,\n    transactions_iter: I,\n) -> ApprovalsHashes {\n    let era = rng.gen_range(0..6);\n    let block = TestBlockBuilder::new()\n        .era(era)\n        .height(era * 10 + rng.gen_range(0..10))\n        .transactions(transactions_iter.clone())\n        .build(rng);\n\n    ApprovalsHashes::new(\n        *block.hash(),\n        transactions_iter\n            .map(|txn| txn.compute_approvals_hash().unwrap())\n            .collect(),\n        TrieMerkleProof::new(\n            URef::new([255; 32], AccessRights::NONE).into(),\n            StoredValue::CLValue(CLValue::from_t(()).unwrap()),\n            VecDeque::new(),\n        ),\n    )\n}\n\nfn get_transaction_id(transaction: &Transaction) -> TransactionId {\n    match transaction {\n        Transaction::Deploy(deploy) => TransactionId::new(\n            TransactionHash::Deploy(*deploy.hash()),\n            deploy.compute_approvals_hash().unwrap(),\n        ),\n        Transaction::V1(transaction_v1) => TransactionId::new(\n            TransactionHash::V1(*transaction_v1.hash()),\n            transaction_v1.compute_approvals_hash().unwrap(),\n        ),\n    }\n}\n\n#[test]\nfn dont_apply_approvals_hashes_when_acquiring_by_id() {\n    let mut rng = TestRng::new();\n    let test_transactions = gen_test_transactions(&mut rng);\n    let approvals_hashes = gen_approvals_hashes(&mut rng, test_transactions.values());\n\n    let mut txn_acquisition = TransactionAcquisition::ById(Acquisition::new(\n        test_transactions.values().map(get_transaction_id).collect(),\n        false,\n    ));\n\n    assert_matches!(\n        txn_acquisition.apply_approvals_hashes(&approvals_hashes),\n        Err(Error::AcquisitionByIdNotPossible)\n    );\n    assert_matches!(\n        txn_acquisition.next_needed_transaction().unwrap(),\n        TransactionIdentifier::ById(id) if test_transactions.contains_key(&id.transaction_hash())\n    );\n}\n\n#[test]\nfn apply_approvals_on_acquisition_by_hash_creates_correct_ids() {\n    let mut rng = TestRng::new();\n    let test_transactions = gen_test_transactions(&mut rng);\n    let mut txn_acquisition =\n        TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false);\n\n    // Generate the ApprovalsHashes for all test transactions except the last one\n    let approvals_hashes = gen_approvals_hashes(\n        &mut rng,\n        test_transactions.values().take(test_transactions.len() - 1),\n    );\n\n    assert_matches!(\n        txn_acquisition.next_needed_transaction().unwrap(),\n        TransactionIdentifier::ByHash(hash) if test_transactions.contains_key(&hash)\n    );\n    assert!(txn_acquisition\n        .apply_approvals_hashes(&approvals_hashes)\n        .is_ok());\n\n    // Now acquisition is done by id\n    assert_matches!(\n        txn_acquisition.next_needed_transaction().unwrap(),\n        TransactionIdentifier::ById(id) if test_transactions.contains_key(&id.transaction_hash())\n    );\n\n    // Apply the transactions\n    for transaction in test_transactions.values().take(test_transactions.len() - 1) {\n        let acceptance = txn_acquisition.apply_transaction(get_transaction_id(transaction));\n        assert_matches!(acceptance, Some(Acceptance::NeededIt));\n    }\n\n    // The last transaction was excluded from acquisition when we applied the approvals hashes so it\n    // should not be needed\n    assert!(!txn_acquisition.needs_transaction());\n\n    // Try to apply the last transaction; it should not be accepted\n    let last_transaction = test_transactions.values().last().unwrap();\n    let last_txn_acceptance =\n        txn_acquisition.apply_transaction(get_transaction_id(last_transaction));\n    assert_matches!(last_txn_acceptance, None);\n}\n\n#[test]\nfn apply_approvals_hashes_after_having_already_applied_transactions() {\n    let mut rng = TestRng::new();\n    let test_transactions = gen_test_transactions(&mut rng);\n    let mut txn_acquisition =\n        TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false);\n    let (_, first_txn) = test_transactions.first_key_value().unwrap();\n\n    let approvals_hashes = gen_approvals_hashes(&mut rng, test_transactions.values());\n\n    // Apply a valid transaction that was not applied before. This should succeed.\n    let acceptance = txn_acquisition.apply_transaction(get_transaction_id(first_txn));\n    assert_matches!(acceptance, Some(Acceptance::NeededIt));\n\n    // Apply approvals hashes. This should fail since we have already acquired transactions by hash.\n    assert_matches!(\n        txn_acquisition.apply_approvals_hashes(&approvals_hashes),\n        Err(Error::EncounteredNonVacantTransactionState)\n    );\n}\n\n#[test]\nfn partially_applied_txns_on_acquisition_by_hash_should_need_missing_txns() {\n    let mut rng = TestRng::new();\n    let test_transactions = gen_test_transactions(&mut rng);\n    let mut txn_acquisition =\n        TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false);\n\n    assert_matches!(\n        txn_acquisition.next_needed_transaction().unwrap(),\n        TransactionIdentifier::ByHash(hash) if test_transactions.contains_key(&hash)\n    );\n\n    // Apply all the transactions except for the last one\n    for transaction in test_transactions.values().take(test_transactions.len() - 1) {\n        let acceptance = txn_acquisition.apply_transaction(get_transaction_id(transaction));\n        assert_matches!(acceptance, Some(Acceptance::NeededIt));\n    }\n\n    // Last transaction should be needed now\n    let last_txn = test_transactions.iter().last().unwrap().1;\n    assert_matches!(\n        txn_acquisition.next_needed_transaction().unwrap(),\n        TransactionIdentifier::ByHash(hash) if last_txn.hash() == hash\n    );\n\n    // Apply the last transaction and check the acceptance\n    let last_txn_acceptance = txn_acquisition.apply_transaction(get_transaction_id(last_txn));\n    assert_matches!(last_txn_acceptance, Some(Acceptance::NeededIt));\n\n    // Try to add the last transaction again to check the acceptance\n    let already_registered_acceptance =\n        txn_acquisition.apply_transaction(get_transaction_id(last_txn));\n    assert_matches!(already_registered_acceptance, Some(Acceptance::HadIt));\n}\n\n#[test]\nfn apply_unregistered_transaction_returns_no_acceptance() {\n    let mut rng = TestRng::new();\n    let test_transactions = gen_test_transactions(&mut rng);\n    let mut txn_acquisition =\n        TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false);\n\n    let unregistered_transaction = Transaction::random(&mut rng);\n    let unregistered_txn_acceptance =\n        txn_acquisition.apply_transaction(get_transaction_id(&unregistered_transaction));\n\n    // An unregistered transaction should not be accepted\n    assert!(unregistered_txn_acceptance.is_none());\n    let first_transaction = test_transactions.iter().next().unwrap().1;\n    assert_matches!(\n        txn_acquisition.next_needed_transaction().unwrap(),\n        TransactionIdentifier::ByHash(hash) if first_transaction.hash() == hash\n    );\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/transaction_acquisition.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::{\n    cmp::Ord,\n    fmt::{Display, Formatter},\n};\n\nuse datasize::DataSize;\nuse tracing::debug;\n\nuse casper_storage::block_store::types::ApprovalsHashes;\nuse casper_types::{TransactionHash, TransactionId};\n\nuse super::block_acquisition::Acceptance;\n\n#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)]\npub(crate) enum Error {\n    AcquisitionByIdNotPossible,\n    EncounteredNonVacantTransactionState,\n}\n\nimpl Display for Error {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Error::AcquisitionByIdNotPossible => write!(f, \"acquisition by id is not possible\"),\n            Error::EncounteredNonVacantTransactionState => {\n                write!(f, \"encountered non vacant transaction state\")\n            }\n        }\n    }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)]\npub(super) enum TransactionIdentifier {\n    ByHash(TransactionHash),\n    ById(TransactionId),\n}\n\n#[derive(Clone, PartialEq, Eq, DataSize, Debug)]\npub(super) enum TransactionAcquisition {\n    ByHash(Acquisition<TransactionHash>),\n    ById(Acquisition<TransactionId>),\n}\n\nimpl TransactionAcquisition {\n    pub(super) fn new_by_hash(\n        transaction_hashes: Vec<TransactionHash>,\n        need_execution_result: bool,\n    ) -> Self {\n        TransactionAcquisition::ByHash(Acquisition::new(transaction_hashes, need_execution_result))\n    }\n\n    pub(super) fn apply_transaction(\n        &mut self,\n        transaction_id: TransactionId,\n    ) -> Option<Acceptance> {\n        match self {\n            TransactionAcquisition::ByHash(acquisition) => {\n                acquisition.apply_transaction(transaction_id.transaction_hash())\n            }\n            TransactionAcquisition::ById(acquisition) => {\n                acquisition.apply_transaction(transaction_id)\n            }\n        }\n    }\n\n    pub(super) fn apply_approvals_hashes(\n        &mut self,\n        approvals_hashes: &ApprovalsHashes,\n    ) -> Result<(), Error> {\n        let new_acquisition = match self {\n            TransactionAcquisition::ByHash(acquisition) => {\n                let mut new_txn_ids = vec![];\n                for ((transaction_hash, txn_state), approvals_hash) in acquisition\n                    .inner\n                    .drain(..)\n                    .zip(approvals_hashes.approvals_hashes())\n                {\n                    if !matches!(txn_state, TransactionState::Vacant) {\n                        return Err(Error::EncounteredNonVacantTransactionState);\n                    };\n                    let txn_id = match (transaction_hash, approvals_hash) {\n                        (TransactionHash::Deploy(deploy_hash), deploy_approvals_hash) => {\n                            TransactionId::new(deploy_hash.into(), deploy_approvals_hash)\n                        }\n                        (TransactionHash::V1(transaction_v1_hash), txn_v1_approvals_hash) => {\n                            TransactionId::new(transaction_v1_hash.into(), txn_v1_approvals_hash)\n                        }\n                    };\n                    new_txn_ids.push((txn_id, TransactionState::Vacant));\n                }\n\n                TransactionAcquisition::ById(Acquisition {\n                    inner: new_txn_ids,\n                    need_execution_result: acquisition.need_execution_result,\n                })\n            }\n            TransactionAcquisition::ById(_) => {\n                debug!(\"TransactionAcquisition: attempt to apply approvals hashes on a transaction acquired by ID\");\n                return Err(Error::AcquisitionByIdNotPossible);\n            }\n        };\n\n        *self = new_acquisition;\n        Ok(())\n    }\n\n    pub(super) fn needs_transaction(&self) -> bool {\n        match self {\n            TransactionAcquisition::ByHash(acq) => acq.needs_transaction().is_some(),\n            TransactionAcquisition::ById(acq) => acq.needs_transaction().is_some(),\n        }\n    }\n\n    pub(super) fn next_needed_transaction(&self) -> Option<TransactionIdentifier> {\n        match self {\n            TransactionAcquisition::ByHash(acq) => {\n                acq.needs_transaction().map(TransactionIdentifier::ByHash)\n            }\n            TransactionAcquisition::ById(acq) => {\n                acq.needs_transaction().map(TransactionIdentifier::ById)\n            }\n        }\n    }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug, Default)]\npub(super) enum TransactionState {\n    #[default]\n    Vacant,\n    HaveTransactionBody,\n}\n\n#[derive(Clone, PartialEq, Eq, DataSize, Debug)]\npub(super) struct Acquisition<T> {\n    inner: Vec<(T, TransactionState)>,\n    need_execution_result: bool,\n}\n\nimpl<T: Copy + Ord> Acquisition<T> {\n    fn new(txn_identifiers: Vec<T>, need_execution_result: bool) -> Self {\n        let inner = txn_identifiers\n            .into_iter()\n            .map(|txn_identifier| (txn_identifier, TransactionState::Vacant))\n            .collect();\n        Acquisition {\n            inner,\n            need_execution_result,\n        }\n    }\n\n    fn apply_transaction(&mut self, transaction_identifier: T) -> Option<Acceptance> {\n        for item in self.inner.iter_mut() {\n            if item.0 == transaction_identifier {\n                match item.1 {\n                    TransactionState::Vacant => {\n                        item.1 = TransactionState::HaveTransactionBody;\n                        return Some(Acceptance::NeededIt);\n                    }\n                    TransactionState::HaveTransactionBody => return Some(Acceptance::HadIt),\n                }\n            }\n        }\n        None\n    }\n\n    fn needs_transaction(&self) -> Option<T> {\n        self.inner\n            .iter()\n            .find_map(|(txn_identifier, state)| match state {\n                TransactionState::Vacant => Some(*txn_identifier),\n                TransactionState::HaveTransactionBody => None,\n            })\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/trie_accumulator/tests.rs",
    "content": "use super::*;\nuse crate::{\n    components::block_synchronizer::tests::test_utils::test_chunks_with_proof,\n    reactor::{EventQueueHandle, QueueKind, Scheduler},\n    types::ValueOrChunk,\n    utils,\n};\nuse casper_types::testing::TestRng;\nuse futures::channel::oneshot;\n\n/// Event for the mock reactor.\n#[derive(Debug)]\nenum ReactorEvent {\n    FetcherRequest(FetcherRequest<TrieOrChunk>),\n    PeerBehaviorAnnouncement(#[allow(dead_code)] PeerBehaviorAnnouncement),\n}\n\nimpl From<PeerBehaviorAnnouncement> for ReactorEvent {\n    fn from(req: PeerBehaviorAnnouncement) -> ReactorEvent {\n        ReactorEvent::PeerBehaviorAnnouncement(req)\n    }\n}\n\nimpl From<FetcherRequest<TrieOrChunk>> for ReactorEvent {\n    fn from(req: FetcherRequest<TrieOrChunk>) -> ReactorEvent {\n        ReactorEvent::FetcherRequest(req)\n    }\n}\n\nstruct MockReactor {\n    scheduler: &'static Scheduler<ReactorEvent>,\n    effect_builder: EffectBuilder<ReactorEvent>,\n}\n\nimpl MockReactor {\n    fn new() -> Self {\n        let scheduler = utils::leak(Scheduler::new(QueueKind::weights(), None));\n        let event_queue_handle = EventQueueHandle::without_shutdown(scheduler);\n        let effect_builder = EffectBuilder::new(event_queue_handle);\n        MockReactor {\n            scheduler,\n            effect_builder,\n        }\n    }\n\n    fn effect_builder(&self) -> EffectBuilder<ReactorEvent> {\n        self.effect_builder\n    }\n\n    async fn expect_fetch_event(&self, chunk_id: &TrieOrChunkId, peer: &NodeId) {\n        let ((_ancestor, reactor_event), _) = self.scheduler.pop().await;\n        match reactor_event {\n            ReactorEvent::FetcherRequest(request) => {\n                assert_eq!(request.id, *chunk_id);\n                assert_eq!(request.peer, *peer);\n            }\n            _ => {\n                unreachable!();\n            }\n        };\n    }\n}\n\nasync fn download_chunk_and_check(\n    reactor: &MockReactor,\n    trie_accumulator: &mut TrieAccumulator,\n    chunk_to_download: &TrieOrChunkId,\n    peer: &NodeId,\n    partial_chunks: PartialChunks,\n) {\n    // Try to download a chunk from a peer\n    let mut effects = trie_accumulator.try_download_chunk(\n        reactor.effect_builder(),\n        *chunk_to_download,\n        *peer,\n        partial_chunks,\n    );\n    // A fetch effect should be generated\n    assert_eq!(effects.len(), 1);\n\n    // Run the effects and check if the correct fetch was requested\n    tokio::spawn(async move { effects.remove(0).await });\n    reactor.expect_fetch_event(chunk_to_download, peer).await;\n}\n\n#[test]\nfn unsolicited_chunk_produces_no_effects() {\n    let reactor = MockReactor::new();\n\n    // Empty accumulator. Does not expect any chunks.\n    let mut trie_accumulator = TrieAccumulator::new();\n    let (test_chunks, _, _) = test_chunks_with_proof(1);\n\n    let effects = trie_accumulator.consume_chunk(reactor.effect_builder(), test_chunks[0].clone());\n    assert!(effects.is_empty());\n}\n\n#[tokio::test]\nasync fn try_download_chunk_generates_fetch_effect() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut trie_accumulator = TrieAccumulator::new();\n\n    // Create a test chunk\n    let (_, chunk_ids, _) = test_chunks_with_proof(1);\n    let peer = NodeId::random(&mut rng);\n    let chunks = PartialChunks {\n        peers: vec![peer],\n        responders: Default::default(),\n        chunks: Default::default(),\n        unreliable_peers: Default::default(),\n    };\n\n    download_chunk_and_check(\n        &reactor,\n        &mut trie_accumulator,\n        &chunk_ids[0],\n        &peer,\n        chunks,\n    )\n    .await;\n}\n\n#[tokio::test]\nasync fn failed_fetch_retriggers_download_with_different_peer() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut trie_accumulator = TrieAccumulator::new();\n\n    // Create a test chunk\n    let (_, chunk_ids, _) = test_chunks_with_proof(1);\n\n    // Create multiple peers\n    let peers: Vec<NodeId> = (0..2).map(|_| NodeId::random(&mut rng)).collect();\n\n    let chunks = PartialChunks {\n        peers: peers.clone(),\n        responders: Default::default(),\n        chunks: Default::default(),\n        unreliable_peers: Default::default(),\n    };\n\n    download_chunk_and_check(\n        &reactor,\n        &mut trie_accumulator,\n        &chunk_ids[0],\n        &peers[1],\n        chunks,\n    )\n    .await;\n\n    // Simulate a fetch error\n    let fetch_result: FetchResult<TrieOrChunk> = Err(FetcherError::TimedOut {\n        id: Box::new(chunk_ids[0]),\n        peer: peers[1],\n    });\n    let event = Event::TrieOrChunkFetched {\n        id: chunk_ids[0],\n        fetch_result,\n    };\n\n    // Handling the fetch error should make the trie accumulator generate another fetch for the\n    // same chunk but with a different peer\n    let mut effects = trie_accumulator.handle_event(reactor.effect_builder(), &mut rng, event);\n    assert_eq!(effects.len(), 1);\n\n    // Run the effects and check if the fetch was re-triggered\n    tokio::spawn(async move { effects.remove(0).await });\n    reactor.expect_fetch_event(&chunk_ids[0], &peers[0]).await;\n}\n\n#[tokio::test]\nasync fn fetched_chunk_triggers_download_of_missing_chunk() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut trie_accumulator = TrieAccumulator::new();\n\n    // Create test chunks\n    let (test_chunks, chunk_ids, _) = test_chunks_with_proof(2);\n    let peer = NodeId::random(&mut rng);\n\n    let chunks = PartialChunks {\n        peers: vec![peer],\n        responders: Default::default(),\n        chunks: Default::default(),\n        unreliable_peers: Default::default(),\n    };\n\n    download_chunk_and_check(\n        &reactor,\n        &mut trie_accumulator,\n        &chunk_ids[1],\n        &peer,\n        chunks,\n    )\n    .await;\n\n    // Simulate a successful fetch\n    let chunk = Box::new(ValueOrChunk::ChunkWithProof(test_chunks[1].clone()));\n    let fetch_result: FetchResult<TrieOrChunk> = Ok(FetchedData::FromPeer { peer, item: chunk });\n    let event = Event::TrieOrChunkFetched {\n        id: chunk_ids[1],\n        fetch_result,\n    };\n\n    // Process the downloaded chunk\n    let mut effects = trie_accumulator.handle_event(reactor.effect_builder(), &mut rng, event);\n    assert_eq!(effects.len(), 1);\n\n    // Check if a new fetch was issued for the missing chunk\n    tokio::spawn(async move { effects.remove(0).await });\n    reactor.expect_fetch_event(&chunk_ids[0], &peer).await;\n}\n\n#[tokio::test]\nasync fn trie_returned_when_all_chunks_fetched() {\n    let mut rng = TestRng::new();\n    let reactor = MockReactor::new();\n    let mut trie_accumulator = TrieAccumulator::new();\n\n    // Create test chunks\n    let (test_chunks, chunk_ids, data) = test_chunks_with_proof(3);\n    let peer = NodeId::random(&mut rng);\n\n    // Create a responder to assert the validity of the assembled trie\n    let (sender, receiver) = oneshot::channel();\n    let responder = Responder::without_shutdown(sender);\n\n    let chunks = PartialChunks {\n        peers: vec![peer],\n        responders: vec![responder],\n        chunks: Default::default(),\n        unreliable_peers: Default::default(),\n    };\n\n    download_chunk_and_check(\n        &reactor,\n        &mut trie_accumulator,\n        &chunk_ids[0],\n        &peer,\n        chunks,\n    )\n    .await;\n\n    let mut effects = Effects::new();\n\n    for i in 0..3 {\n        // Simulate a successful fetch\n        let fetch_result: FetchResult<TrieOrChunk> = Ok(FetchedData::FromPeer {\n            peer,\n            item: Box::new(ValueOrChunk::ChunkWithProof(test_chunks[i].clone())),\n        });\n        let event = Event::TrieOrChunkFetched {\n            id: chunk_ids[i],\n            fetch_result,\n        };\n\n        // Expect to get one effect for each call. First 2 will be requests to download missing\n        // chunks. Last one will be the returned trie since all chunks are available.\n        effects = trie_accumulator.handle_event(reactor.effect_builder(), &mut rng, event);\n        assert_eq!(effects.len(), 1);\n    }\n\n    // Validate the returned trie\n    tokio::spawn(async move { effects.remove(0).await });\n    let result_trie = receiver.await.unwrap().expect(\"Expected trie\").trie;\n    assert_eq!(*result_trie, TrieRaw::new(Bytes::from(data)));\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer/trie_accumulator.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::{HashMap, HashSet},\n    fmt::{self, Debug},\n};\n\nuse datasize::DataSize;\nuse derive_more::From;\nuse rand::seq::SliceRandom;\nuse serde::Serialize;\nuse thiserror::Error;\nuse tracing::{debug, error, trace, warn};\n\nuse casper_storage::global_state::trie::TrieRaw;\nuse casper_types::{bytesrepr::Bytes, ChunkWithProof, Digest, DisplayIter};\n\nuse crate::{\n    components::{\n        fetcher::{\n            EmptyValidationMetadata, Error as FetcherError, FetchItem, FetchResult, FetchedData,\n        },\n        Component,\n    },\n    effect::{\n        announcements::PeerBehaviorAnnouncement,\n        requests::{FetcherRequest, TrieAccumulatorRequest},\n        EffectBuilder, EffectExt, Effects, Responder,\n    },\n    types::{NodeId, TrieOrChunk, TrieOrChunkId},\n    NodeRng,\n};\n\nconst COMPONENT_NAME: &str = \"trie_accumulator\";\n\n#[derive(Debug, From, Error, Clone, Serialize)]\npub(crate) enum Error {\n    #[error(\"trie accumulator ran out of peers trying to fetch item with error: {0}; unreliable peers: {}\", DisplayIter::new(.1))]\n    // Note: Due to being a thrice nested component, this error type tighter size constraints. For\n    //       this reason, we have little choice but to box the `FetcherError`.\n    PeersExhausted(Box<FetcherError<TrieOrChunk>>, Vec<NodeId>),\n    #[error(\"trie accumulator couldn't fetch trie chunk ({0}, {1}); unreliable peers: {}\", DisplayIter::new(.2))]\n    Absent(Digest, u64, Vec<NodeId>),\n    #[error(\"request contained no peers; trie = {0}\")]\n    NoPeers(Digest),\n}\n\n#[derive(Debug, Clone, Serialize)]\npub(crate) struct Response {\n    trie: Box<TrieRaw>,\n    unreliable_peers: Vec<NodeId>,\n}\n\nimpl Response {\n    pub(crate) fn new(trie: TrieRaw, unreliable_peers: Vec<NodeId>) -> Self {\n        Response {\n            trie: Box::new(trie),\n            unreliable_peers,\n        }\n    }\n\n    pub(crate) fn trie(self) -> Box<TrieRaw> {\n        self.trie\n    }\n\n    pub(crate) fn unreliable_peers(&self) -> &Vec<NodeId> {\n        &self.unreliable_peers\n    }\n}\n\n#[derive(DataSize, Debug)]\nstruct PartialChunks {\n    peers: Vec<NodeId>,\n    responders: Vec<Responder<Result<Response, Error>>>,\n    chunks: HashMap<u64, ChunkWithProof>,\n    unreliable_peers: Vec<NodeId>,\n}\n\nimpl PartialChunks {\n    fn missing_chunk(&self, count: u64) -> Option<u64> {\n        (0..count).find(|idx| !self.chunks.contains_key(idx))\n    }\n\n    fn assemble_chunks(&self, count: u64) -> TrieRaw {\n        let data: Bytes = (0..count)\n            .filter_map(|index| self.chunks.get(&index))\n            .flat_map(|chunk| chunk.chunk())\n            .copied()\n            .collect();\n        TrieRaw::new(data)\n    }\n\n    fn next_peer(&mut self) -> Option<&NodeId> {\n        // remove the last used peer from the queue\n        self.peers.pop();\n        self.peers.last()\n    }\n\n    fn merge(&mut self, other: PartialChunks) {\n        self.chunks.extend(other.chunks);\n        self.responders.extend(other.responders);\n        // set used for filtering out duplicates\n        let mut filter_peers: HashSet<NodeId> = self.peers.iter().cloned().collect();\n        for peer in other.peers {\n            if filter_peers.insert(peer) {\n                self.peers.push(peer);\n            }\n        }\n    }\n\n    fn respond(self, value: Result<Response, Error>) -> Effects<Event> {\n        self.responders\n            .into_iter()\n            .flat_map(|responder| responder.respond(value.clone()).ignore())\n            .collect()\n    }\n\n    fn mark_peer_unreliable(&mut self, peer: &NodeId) {\n        self.unreliable_peers.push(*peer);\n    }\n}\n\n#[derive(DataSize, Debug)]\npub(super) struct TrieAccumulator {\n    partial_chunks: HashMap<Digest, PartialChunks>,\n}\n\n#[derive(DataSize, Debug, From, Serialize)]\npub(crate) enum Event {\n    #[from]\n    Request(TrieAccumulatorRequest),\n    TrieOrChunkFetched {\n        id: TrieOrChunkId,\n        fetch_result: FetchResult<TrieOrChunk>,\n    },\n}\n\nimpl fmt::Display for Event {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match self {\n            Event::Request(_) => write!(f, \"trie fetcher request\"),\n            Event::TrieOrChunkFetched { id, .. } => {\n                write!(f, \"got a result for trie or chunk {}\", id)\n            }\n        }\n    }\n}\n\nimpl TrieAccumulator {\n    pub(crate) fn new() -> Self {\n        TrieAccumulator {\n            partial_chunks: Default::default(),\n        }\n    }\n\n    fn consume_trie_or_chunk<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        trie_or_chunk: TrieOrChunk,\n    ) -> Effects<Event>\n    where\n        REv: From<FetcherRequest<TrieOrChunk>> + From<PeerBehaviorAnnouncement> + Send,\n    {\n        let TrieOrChunkId(_index, hash) = trie_or_chunk.fetch_id();\n        match trie_or_chunk {\n            TrieOrChunk::Value(trie) => match self.partial_chunks.remove(&hash) {\n                None => {\n                    error!(%hash, \"fetched a trie we didn't request!\");\n                    Effects::new()\n                }\n                Some(partial_chunks) => {\n                    trace!(%hash, \"got a full trie\");\n                    let unreliable_peers = partial_chunks.unreliable_peers.clone();\n                    partial_chunks.respond(Ok(Response::new(trie.into_inner(), unreliable_peers)))\n                }\n            },\n            TrieOrChunk::ChunkWithProof(chunk) => self.consume_chunk(effect_builder, chunk),\n        }\n    }\n\n    fn consume_chunk<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        chunk: ChunkWithProof,\n    ) -> Effects<Event>\n    where\n        REv: From<FetcherRequest<TrieOrChunk>> + From<PeerBehaviorAnnouncement> + Send,\n    {\n        let digest = chunk.proof().root_hash();\n        let index = chunk.proof().index();\n        let count = chunk.proof().count();\n        let mut partial_chunks = match self.partial_chunks.remove(&digest) {\n            None => {\n                error!(%digest, %index, \"got a chunk that wasn't requested\");\n                return Effects::new();\n            }\n            Some(partial_chunks) => partial_chunks,\n        };\n\n        // Add the downloaded chunk to cache.\n        let _ = partial_chunks.chunks.insert(index, chunk);\n\n        // Check if we can now return a complete trie.\n        match partial_chunks.missing_chunk(count) {\n            Some(missing_index) => {\n                let peer = match partial_chunks.peers.last() {\n                    Some(peer) => *peer,\n                    None => {\n                        debug!(\n                            %digest, %missing_index,\n                            \"no peers to download the next chunk from, giving up\",\n                        );\n                        let unreliable_peers = partial_chunks.unreliable_peers.clone();\n                        return partial_chunks.respond(Err(Error::Absent(\n                            digest,\n                            index,\n                            unreliable_peers,\n                        )));\n                    }\n                };\n                let next_id = TrieOrChunkId(missing_index, digest);\n                self.try_download_chunk(effect_builder, next_id, peer, partial_chunks)\n            }\n            None => {\n                let trie = partial_chunks.assemble_chunks(count);\n                let unreliable_peers = partial_chunks.unreliable_peers.clone();\n                partial_chunks.respond(Ok(Response::new(trie, unreliable_peers)))\n            }\n        }\n    }\n\n    fn try_download_chunk<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        id: TrieOrChunkId,\n        peer: NodeId,\n        partial_chunks: PartialChunks,\n    ) -> Effects<Event>\n    where\n        REv: From<FetcherRequest<TrieOrChunk>> + Send,\n    {\n        let hash = id.digest();\n        let maybe_old_partial_chunks = self.partial_chunks.insert(*hash, partial_chunks);\n        if let Some(old_partial_chunks) = maybe_old_partial_chunks {\n            // unwrap is safe as we just inserted a value at this key\n            self.partial_chunks\n                .get_mut(hash)\n                .unwrap()\n                .merge(old_partial_chunks);\n        }\n        effect_builder\n            .fetch::<TrieOrChunk>(id, peer, Box::new(EmptyValidationMetadata))\n            .event(move |fetch_result| Event::TrieOrChunkFetched { id, fetch_result })\n    }\n}\n\nimpl<REv> Component<REv> for TrieAccumulator\nwhere\n    REv: From<FetcherRequest<TrieOrChunk>> + From<PeerBehaviorAnnouncement> + Send,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        trace!(?event, \"TrieAccumulator: handling event\");\n        match event {\n            Event::Request(TrieAccumulatorRequest {\n                hash,\n                responder,\n                mut peers,\n            }) => {\n                peers.shuffle(rng);\n                let trie_id = TrieOrChunkId(0, hash);\n                let peer = match peers.last() {\n                    Some(peer) => *peer,\n                    None => {\n                        error!(%hash, \"tried to fetch trie with no peers available\");\n                        return responder.respond(Err(Error::NoPeers(hash))).ignore();\n                    }\n                };\n                let partial_chunks = PartialChunks {\n                    responders: vec![responder],\n                    peers,\n                    chunks: Default::default(),\n                    unreliable_peers: Vec::new(),\n                };\n                self.try_download_chunk(effect_builder, trie_id, peer, partial_chunks)\n            }\n            Event::TrieOrChunkFetched { id, fetch_result } => {\n                let hash = id.digest();\n                match fetch_result {\n                    Err(error) => match self.partial_chunks.remove(hash) {\n                        None => {\n                            error!(%id,\n                                \"got a fetch result for a chunk we weren't trying to fetch\",\n                            );\n                            Effects::new()\n                        }\n                        Some(mut partial_chunks) => {\n                            debug!(%error, %id, \"error fetching trie chunk\");\n                            partial_chunks.mark_peer_unreliable(error.peer());\n                            // try with the next peer, if possible\n                            match partial_chunks.next_peer().cloned() {\n                                Some(next_peer) => self.try_download_chunk(\n                                    effect_builder,\n                                    id,\n                                    next_peer,\n                                    partial_chunks,\n                                ),\n                                None => {\n                                    warn!(%id, \"couldn't fetch chunk\");\n                                    let faulty_peers = partial_chunks.unreliable_peers.clone();\n                                    partial_chunks.respond(Err(Error::PeersExhausted(\n                                        Box::new(error),\n                                        faulty_peers,\n                                    )))\n                                }\n                            }\n                        }\n                    },\n                    Ok(FetchedData::FromStorage {\n                        item: trie_or_chunk,\n                    }) => {\n                        debug!(%trie_or_chunk, \"got trie or chunk from storage\");\n                        self.consume_trie_or_chunk(effect_builder, *trie_or_chunk)\n                    }\n                    Ok(FetchedData::FromPeer {\n                        item: trie_or_chunk,\n                        peer,\n                    }) => {\n                        debug!(%peer, %trie_or_chunk, \"got trie or chunk from peer\");\n                        self.consume_trie_or_chunk(effect_builder, *trie_or_chunk)\n                    }\n                }\n            }\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_synchronizer.rs",
    "content": "mod block_acquisition;\nmod block_acquisition_action;\nmod block_builder;\nmod block_synchronizer_progress;\nmod config;\nmod error;\nmod event;\nmod execution_results_acquisition;\nmod global_state_synchronizer;\nmod metrics;\nmod need_next;\nmod peer_list;\nmod signature_acquisition;\nmod transaction_acquisition;\nmod trie_accumulator;\n\n#[cfg(test)]\nmod tests;\n\nuse std::sync::Arc;\n\nuse datasize::DataSize;\nuse either::Either;\nuse futures::FutureExt;\nuse prometheus::Registry;\nuse tracing::{debug, error, info, trace, warn};\n\nuse casper_storage::{\n    block_store::types::ApprovalsHashes, data_access_layer::ExecutionResultsChecksumResult,\n};\nuse casper_types::{\n    Block, BlockHash, BlockHeader, BlockSignatures, BlockSyncStatus, BlockSynchronizerStatus,\n    Chainspec, FinalitySignature, FinalitySignatureId, Timestamp, Transaction,\n};\n\nuse super::network::blocklist::BlocklistJustification;\nuse crate::{\n    components::{\n        fetcher::{\n            EmptyValidationMetadata, Error as FetcherError, FetchItem, FetchResult, FetchedData,\n        },\n        Component, ComponentState, InitializedComponent, ValidatorBoundComponent,\n    },\n    effect::{\n        announcements::{MetaBlockAnnouncement, PeerBehaviorAnnouncement},\n        requests::{\n            BlockAccumulatorRequest, BlockSynchronizerRequest, ContractRuntimeRequest,\n            FetcherRequest, MakeBlockExecutableRequest, MarkBlockCompletedRequest,\n            NetworkInfoRequest, StorageRequest, SyncGlobalStateRequest, TrieAccumulatorRequest,\n        },\n        EffectBuilder, EffectExt, EffectResultExt, Effects,\n    },\n    reactor::{self, main_reactor::MainEvent},\n    types::{\n        sync_leap_validation_metadata::SyncLeapValidationMetaData, BlockExecutionResultsOrChunk,\n        ExecutableBlock, LegacyDeploy, MetaBlock, MetaBlockState, NodeId, SyncLeap,\n        SyncLeapIdentifier, TrieOrChunk, ValidatorMatrix,\n    },\n    NodeRng,\n};\n\nuse block_builder::BlockBuilder;\npub(crate) use block_synchronizer_progress::BlockSynchronizerProgress;\npub(crate) use config::Config;\npub(crate) use error::BlockAcquisitionError;\npub(crate) use event::Event;\nuse execution_results_acquisition::ExecutionResultsAcquisition;\npub(crate) use execution_results_acquisition::ExecutionResultsChecksum;\nuse global_state_synchronizer::GlobalStateSynchronizer;\npub(crate) use global_state_synchronizer::{\n    Error as GlobalStateSynchronizerError, Event as GlobalStateSynchronizerEvent,\n    Response as GlobalStateSynchronizerResponse,\n};\nuse metrics::Metrics;\npub(crate) use need_next::NeedNext;\nuse trie_accumulator::TrieAccumulator;\npub(crate) use trie_accumulator::{\n    Error as TrieAccumulatorError, Event as TrieAccumulatorEvent,\n    Response as TrieAccumulatorResponse,\n};\n\nconst COMPONENT_NAME: &str = \"block_synchronizer\";\n\npub(crate) trait ReactorEvent:\n    From<FetcherRequest<ApprovalsHashes>>\n    + From<NetworkInfoRequest>\n    + From<FetcherRequest<Block>>\n    + From<FetcherRequest<BlockHeader>>\n    + From<FetcherRequest<LegacyDeploy>>\n    + From<FetcherRequest<Transaction>>\n    + From<FetcherRequest<FinalitySignature>>\n    + From<FetcherRequest<TrieOrChunk>>\n    + From<FetcherRequest<BlockExecutionResultsOrChunk>>\n    + From<FetcherRequest<SyncLeap>>\n    + From<BlockAccumulatorRequest>\n    + From<PeerBehaviorAnnouncement>\n    + From<StorageRequest>\n    + From<TrieAccumulatorRequest>\n    + From<ContractRuntimeRequest>\n    + From<SyncGlobalStateRequest>\n    + From<MarkBlockCompletedRequest>\n    + From<MakeBlockExecutableRequest>\n    + From<MetaBlockAnnouncement>\n    + Send\n    + 'static\n{\n}\n\nimpl<REv> ReactorEvent for REv where\n    REv: From<FetcherRequest<ApprovalsHashes>>\n        + From<NetworkInfoRequest>\n        + From<FetcherRequest<Block>>\n        + From<FetcherRequest<BlockHeader>>\n        + From<FetcherRequest<LegacyDeploy>>\n        + From<FetcherRequest<Transaction>>\n        + From<FetcherRequest<FinalitySignature>>\n        + From<FetcherRequest<TrieOrChunk>>\n        + From<FetcherRequest<BlockExecutionResultsOrChunk>>\n        + From<FetcherRequest<SyncLeap>>\n        + From<BlockAccumulatorRequest>\n        + From<PeerBehaviorAnnouncement>\n        + From<StorageRequest>\n        + From<TrieAccumulatorRequest>\n        + From<ContractRuntimeRequest>\n        + From<SyncGlobalStateRequest>\n        + From<MarkBlockCompletedRequest>\n        + From<MakeBlockExecutableRequest>\n        + From<MetaBlockAnnouncement>\n        + Send\n        + 'static\n{\n}\n\n#[derive(DataSize, Debug)]\npub(crate) struct BlockSynchronizer {\n    state: ComponentState,\n    config: Config,\n    chainspec: Arc<Chainspec>,\n    max_simultaneous_peers: u8,\n    validator_matrix: ValidatorMatrix,\n\n    // execute forward block (do not get global state or execution effects)\n    forward: Option<BlockBuilder>,\n    // either sync-to-genesis or sync-leaped block (get global state and execution effects)\n    historical: Option<BlockBuilder>,\n    // deals with global state acquisition for historical blocks\n    global_sync: GlobalStateSynchronizer,\n    #[data_size(skip)]\n    metrics: Metrics,\n}\n\nimpl BlockSynchronizer {\n    pub(crate) fn new(\n        config: Config,\n        chainspec: Arc<Chainspec>,\n        max_simultaneous_peers: u8,\n        validator_matrix: ValidatorMatrix,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(BlockSynchronizer {\n            state: ComponentState::Uninitialized,\n            config,\n            chainspec,\n            max_simultaneous_peers,\n            validator_matrix,\n            forward: None,\n            historical: None,\n            global_sync: GlobalStateSynchronizer::new(config.max_parallel_trie_fetches as usize),\n            metrics: Metrics::new(registry)?,\n        })\n    }\n\n    /// Returns the progress being made on the historical syncing.\n    pub(crate) fn historical_progress(&mut self) -> BlockSynchronizerProgress {\n        match &self.historical {\n            None => BlockSynchronizerProgress::Idle,\n            Some(builder) => self.progress(builder),\n        }\n    }\n\n    /// Returns the progress being made on the forward syncing.\n    pub(crate) fn forward_progress(&mut self) -> BlockSynchronizerProgress {\n        match &self.forward {\n            None => BlockSynchronizerProgress::Idle,\n            Some(builder) => self.progress(builder),\n        }\n    }\n\n    pub(crate) fn purge(&mut self) {\n        self.purge_historical();\n        self.purge_forward();\n    }\n\n    pub(crate) fn purge_historical(&mut self) {\n        if let Some(builder) = &self.historical {\n            debug!(%builder, \"BlockSynchronizer: purging block builder\");\n        }\n        self.historical = None;\n    }\n\n    pub(crate) fn purge_forward(&mut self) {\n        if let Some(builder) = &self.forward {\n            debug!(%builder, \"BlockSynchronizer: purging block builder\");\n        }\n        self.forward = None;\n    }\n\n    /// Registers a block for synchronization.\n    ///\n    /// Returns `true` if a block was registered for synchronization successfully.\n    /// Will return `false` if there was an attempt to register the same block hash\n    /// again while the synchronizer was working on the same block. The synchronizer\n    /// will continue work on the block in that case.\n    pub(crate) fn register_block_by_hash(\n        &mut self,\n        block_hash: BlockHash,\n        should_fetch_execution_state: bool,\n    ) -> bool {\n        if let (true, Some(builder), _) | (false, _, Some(builder)) = (\n            should_fetch_execution_state,\n            &self.historical,\n            &self.forward,\n        ) {\n            if builder.block_hash() == block_hash && !builder.is_failed() {\n                return false;\n            }\n        }\n        let builder = BlockBuilder::new(\n            block_hash,\n            should_fetch_execution_state,\n            self.max_simultaneous_peers,\n            self.config.peer_refresh_interval,\n            self.chainspec.core_config.legacy_required_finality,\n            self.chainspec\n                .core_config\n                .start_protocol_version_with_strict_finality_signatures_required,\n        );\n        if should_fetch_execution_state {\n            self.historical.replace(builder);\n        } else {\n            self.forward.replace(builder);\n        }\n        true\n    }\n\n    /// Registers a sync leap result, if able.\n    pub(crate) fn register_sync_leap(\n        &mut self,\n        sync_leap: &SyncLeap,\n        peers: Vec<NodeId>,\n        should_fetch_execution_state: bool,\n    ) {\n        fn apply_sigs(builder: &mut BlockBuilder, maybe_sigs: Option<&BlockSignatures>) {\n            if let Some(signatures) = maybe_sigs {\n                for finality_signature in signatures.finality_signatures() {\n                    if let Err(error) =\n                        builder.register_finality_signature(finality_signature, None)\n                    {\n                        debug!(%error, \"BlockSynchronizer: failed to register finality signature\");\n                    }\n                }\n            }\n        }\n\n        let (block_header, maybe_sigs) = sync_leap.highest_block_header_and_signatures();\n        if let Some(builder) = self.get_builder(block_header.block_hash(), true) {\n            debug!(%builder, \"BlockSynchronizer: register_sync_leap update builder\");\n            apply_sigs(builder, maybe_sigs);\n            builder.register_peers(peers);\n        } else {\n            let era_id = block_header.era_id();\n            if let Some(validator_weights) = self.validator_matrix.validator_weights(era_id) {\n                let mut builder = BlockBuilder::new_from_sync_leap(\n                    block_header.clone(),\n                    maybe_sigs,\n                    validator_weights,\n                    peers,\n                    should_fetch_execution_state,\n                    self.max_simultaneous_peers,\n                    self.config.peer_refresh_interval,\n                    self.chainspec.core_config.legacy_required_finality,\n                    self.chainspec\n                        .core_config\n                        .start_protocol_version_with_strict_finality_signatures_required,\n                );\n                apply_sigs(&mut builder, maybe_sigs);\n                if should_fetch_execution_state {\n                    self.historical = Some(builder);\n                } else {\n                    self.forward = Some(builder);\n                }\n            } else {\n                warn!(\n                    block_hash = %block_header.block_hash(),\n                    \"BlockSynchronizer: register_sync_leap unable to create block builder\",\n                );\n            }\n        }\n    }\n\n    /// Registers peers to a block builder by `BlockHash`.\n    pub(crate) fn register_peers(&mut self, block_hash: BlockHash, peers: Vec<NodeId>) {\n        if let Some(builder) = self.get_builder(block_hash, false) {\n            builder.register_peers(peers);\n        }\n    }\n\n    /* EVENT LOGIC */\n\n    fn register_made_finalized_block(\n        &mut self,\n        block_hash: &BlockHash,\n        result: Option<ExecutableBlock>,\n    ) {\n        if let Some(builder) = &self.historical {\n            if builder.block_hash() == *block_hash {\n                error!(%block_hash, \"historical block should not have been converted for execution\");\n            }\n        }\n\n        match &mut self.forward {\n            Some(builder) if builder.block_hash() == *block_hash => {\n                if let Some(executable_block) = result {\n                    builder.register_made_executable_block(executable_block);\n                } else {\n                    // Could not create finalized block, abort\n                    builder.abort();\n                }\n            }\n            _ => {\n                trace!(%block_hash, \"BlockSynchronizer: not currently synchronizing forward block\");\n            }\n        }\n    }\n\n    fn register_block_execution_enqueued(&mut self, block_hash: &BlockHash) {\n        if let Some(builder) = &self.historical {\n            if builder.block_hash() == *block_hash {\n                error!(%block_hash, \"historical block should not be enqueued for execution\");\n            }\n        }\n\n        match &mut self.forward {\n            Some(builder) if builder.block_hash() == *block_hash => {\n                builder.register_block_execution_enqueued();\n                self.metrics\n                    .forward_block_sync_duration\n                    .observe(builder.sync_start_time().elapsed().as_secs_f64());\n            }\n            _ => {\n                trace!(%block_hash, \"BlockSynchronizer: not currently synchronizing forward block\");\n            }\n        }\n    }\n\n    fn register_block_executed(&mut self, block_hash: &BlockHash) {\n        if let Some(builder) = &self.historical {\n            if builder.block_hash() == *block_hash {\n                error!(%block_hash, \"historical block should not be executed\");\n            }\n        }\n\n        match &mut self.forward {\n            Some(builder) if builder.block_hash() == *block_hash => {\n                builder.register_block_executed();\n                self.metrics\n                    .forward_block_sync_duration\n                    .observe(builder.sync_start_time().elapsed().as_secs_f64());\n            }\n            _ => {\n                trace!(%block_hash, \"BlockSynchronizer: not currently synchronizing forward block\");\n            }\n        }\n    }\n\n    fn register_marked_complete<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        block_hash: &BlockHash,\n        is_new: bool,\n    ) -> Effects<Event>\n    where\n        REv: From<StorageRequest>\n            + From<MetaBlockAnnouncement>\n            + From<MarkBlockCompletedRequest>\n            + Send,\n    {\n        if let Some(builder) = &self.forward {\n            if builder.block_hash() == *block_hash {\n                error!(\n                    %block_hash,\n                    \"forward block should not be marked complete in block synchronizer\"\n                );\n            }\n        }\n\n        let mut effects = Effects::new();\n        match &mut self.historical {\n            Some(builder) if builder.block_hash() == *block_hash => {\n                builder.register_marked_complete();\n                if !is_new {\n                    warn!(%block_hash, \"marked complete an already-complete block\");\n                    return effects;\n                }\n                // other components need to know that we've added an historical block\n                // that they may be interested in\n                if let Some(block) = builder.maybe_block() {\n                    effects.extend(\n                        effect_builder\n                            .get_execution_results_from_storage(*block.hash())\n                            .then(move |maybe_execution_results| async move {\n                                match maybe_execution_results {\n                                    Some(execution_results) => {\n                                        let meta_block = MetaBlock::new_historical(\n                                            Arc::new(*block),\n                                            execution_results,\n                                            MetaBlockState::new_after_historical_sync(),\n                                        );\n                                        effect_builder.announce_meta_block(meta_block).await;\n                                    }\n                                    None => {\n                                        error!(\n                                            \"should have execution results for {}\",\n                                            block.hash()\n                                        );\n                                    }\n                                }\n                            })\n                            .ignore(),\n                    );\n                }\n                self.metrics\n                    .historical_block_sync_duration\n                    .observe(builder.sync_start_time().elapsed().as_secs_f64());\n            }\n            _ => {\n                trace!(%block_hash, \"BlockSynchronizer: not currently synchronizing historical block\");\n            }\n        }\n        effects\n    }\n\n    fn dishonest_peers(&self) -> Vec<NodeId> {\n        let mut ret = vec![];\n        if let Some(builder) = &self.forward {\n            ret.extend(builder.dishonest_peers());\n        }\n        if let Some(builder) = &self.historical {\n            ret.extend(builder.dishonest_peers());\n        }\n        ret\n    }\n\n    fn flush_dishonest_peers(&mut self) {\n        if let Some(builder) = &mut self.forward {\n            builder.flush_dishonest_peers();\n        }\n        if let Some(builder) = &mut self.historical {\n            builder.flush_dishonest_peers();\n        }\n    }\n\n    fn need_next<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n    ) -> Effects<Event>\n    where\n        REv: ReactorEvent + From<FetcherRequest<Block>> + From<MarkBlockCompletedRequest>,\n    {\n        let latch_reset_interval = self.config.latch_reset_interval;\n        let need_next_interval = self.config.need_next_interval.into();\n        let mut results = Effects::new();\n        let max_simultaneous_peers = self.max_simultaneous_peers;\n        let mut builder_needs_next = |builder: &mut BlockBuilder, chainspec: Arc<Chainspec>| {\n            if builder.check_latch(latch_reset_interval)\n                || builder.is_finished()\n                || builder.is_failed()\n            {\n                return;\n            }\n            let action = builder.block_acquisition_action(rng, max_simultaneous_peers);\n            let peers = action.peers_to_ask();\n            let need_next = action.need_next();\n            info!(\n                \"BlockSynchronizer: {} with {} peers\",\n                need_next,\n                peers.len()\n            );\n            match need_next {\n                NeedNext::Nothing(_) => {\n                    // currently idle or waiting, check back later\n                    results.extend(\n                        effect_builder\n                            .set_timeout(need_next_interval)\n                            .event(|_| Event::Request(BlockSynchronizerRequest::NeedNext)),\n                    );\n                }\n                NeedNext::BlockHeader(block_hash) => {\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        effect_builder\n                            .fetch::<BlockHeader>(\n                                block_hash,\n                                node_id,\n                                Box::new(EmptyValidationMetadata),\n                            )\n                            .event(Event::BlockHeaderFetched)\n                    }))\n                }\n                NeedNext::BlockBody(block_hash) => {\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        effect_builder\n                            .fetch::<Block>(block_hash, node_id, Box::new(EmptyValidationMetadata))\n                            .event(Event::BlockFetched)\n                    }))\n                }\n                NeedNext::FinalitySignatures(block_hash, era_id, validators) => {\n                    builder.latch_by(std::cmp::min(\n                        validators.len(),\n                        max_simultaneous_peers as usize,\n                    ));\n                    for (validator, peer) in validators\n                        .into_iter()\n                        .take(max_simultaneous_peers as usize)\n                        .zip(peers.into_iter().cycle())\n                    {\n                        debug!(%validator, %peer, \"attempting to fetch FinalitySignature\");\n                        builder.register_finality_signature_pending(validator.clone());\n                        let id = Box::new(FinalitySignatureId::new(block_hash, era_id, validator));\n                        results.extend(\n                            effect_builder\n                                .fetch::<FinalitySignature>(\n                                    id,\n                                    peer,\n                                    Box::new(EmptyValidationMetadata),\n                                )\n                                .event(Event::FinalitySignatureFetched),\n                        );\n                    }\n                }\n                NeedNext::GlobalState(block_hash, global_state_root_hash) => {\n                    builder.latch();\n                    results.extend(\n                        effect_builder\n                            .sync_global_state(block_hash, global_state_root_hash)\n                            .event(move |result| Event::GlobalStateSynced { block_hash, result }),\n                    );\n                }\n                NeedNext::ExecutionResultsChecksum(block_hash, global_state_root_hash) => {\n                    builder.latch();\n                    results.extend(\n                        effect_builder\n                            .get_execution_results_checksum(global_state_root_hash)\n                            .event(move |result| Event::GotExecutionResultsChecksum {\n                                block_hash,\n                                result,\n                            }),\n                    );\n                }\n                NeedNext::ExecutionResults(block_hash, id, checksum) => {\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        debug!(\"attempting to fetch BlockExecutionResultsOrChunk\");\n                        effect_builder\n                            .fetch::<BlockExecutionResultsOrChunk>(id, node_id, Box::new(checksum))\n                            .event(move |result| Event::ExecutionResultsFetched {\n                                block_hash,\n                                result,\n                            })\n                    }))\n                }\n                NeedNext::ApprovalsHashes(block_hash, block) => {\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        effect_builder\n                            .fetch::<ApprovalsHashes>(block_hash, node_id, block.clone())\n                            .event(Event::ApprovalsHashesFetched)\n                    }))\n                }\n                NeedNext::DeployByHash(block_hash, deploy_hash) => {\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        effect_builder\n                            .fetch::<LegacyDeploy>(\n                                deploy_hash,\n                                node_id,\n                                Box::new(EmptyValidationMetadata),\n                            )\n                            .event(move |result| Event::TransactionFetched {\n                                block_hash,\n                                result: Either::Left(result),\n                            })\n                    }))\n                }\n                NeedNext::TransactionById(block_hash, txn_id) => {\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        effect_builder\n                            .fetch::<Transaction>(\n                                txn_id,\n                                node_id,\n                                Box::new(EmptyValidationMetadata),\n                            )\n                            .event(move |result| Event::TransactionFetched {\n                                block_hash,\n                                result: Either::Right(result),\n                            })\n                    }))\n                }\n                NeedNext::MakeExecutableBlock(block_hash, _) => {\n                    let need_to_execute = false == builder.should_fetch_execution_state()\n                        && builder.execution_unattempted();\n                    if need_to_execute {\n                        builder.latch();\n                        results.extend(\n                            effect_builder\n                                .make_block_executable(block_hash)\n                                .event(move |result| Event::MadeFinalizedBlock {\n                                    block_hash,\n                                    result,\n                                }),\n                        )\n                    }\n                }\n                NeedNext::EnqueueForExecution(block_hash, _, executable_block) => {\n                    builder.latch();\n                    results.extend(\n                        effect_builder\n                            .enqueue_block_for_execution(\n                                *executable_block,\n                                MetaBlockState::new_already_stored(),\n                            )\n                            .event(move |_| Event::MarkBlockExecutionEnqueued(block_hash)),\n                    )\n                }\n                NeedNext::BlockMarkedComplete(block_hash, block_height) => {\n                    // Only mark the block complete if we're syncing historical\n                    // because we have global state and execution effects (if\n                    // any).\n                    if builder.should_fetch_execution_state() {\n                        builder.latch();\n                        results.extend(\n                            effect_builder.mark_block_completed(block_height).event(\n                                move |is_new| Event::MarkBlockCompleted { block_hash, is_new },\n                            ),\n                        )\n                    }\n                }\n                NeedNext::Peers(block_hash) => {\n                    if builder.should_fetch_execution_state() {\n                        builder.latch();\n                        // the accumulator may or may not have peers for an older block,\n                        // so we're going to also get a random sampling from networking\n                        results.extend(\n                            effect_builder\n                                .get_fully_connected_peers(max_simultaneous_peers as usize)\n                                .event(move |peers| Event::NetworkPeers(block_hash, peers)),\n                        )\n                    }\n                    builder.latch();\n                    results.extend(\n                        effect_builder\n                            .get_block_accumulated_peers(block_hash)\n                            .event(move |maybe_peers| {\n                                Event::AccumulatedPeers(block_hash, maybe_peers)\n                            }),\n                    )\n                }\n                NeedNext::EraValidators(era_id) => {\n                    warn!(\n                        \"BlockSynchronizer: does not have era_validators for era_id: {}\",\n                        era_id\n                    );\n                    builder.latch_by(peers.len());\n                    results.extend(peers.into_iter().flat_map(|node_id| {\n                        effect_builder\n                            .fetch::<SyncLeap>(\n                                SyncLeapIdentifier::sync_to_historical(builder.block_hash()),\n                                node_id,\n                                Box::new(SyncLeapValidationMetaData::from_chainspec(\n                                    chainspec.as_ref(),\n                                )),\n                            )\n                            .event(Event::SyncLeapFetched)\n                    }))\n                }\n                NeedNext::SwitchToHaveStrictFinality(block_hash, _) => {\n                    // Don't set the latch since this is an internal state transition\n                    if builder.block_hash() != block_hash {\n                        debug!(%block_hash, \"BlockSynchronizer: not currently synchronizing block\");\n                    } else if let Err(error) = builder.switch_to_have_strict_finality(block_hash) {\n                        error!(%error, \"BlockSynchronizer: failed to advance acquisition state\");\n                    } else {\n                        results.extend(\n                            effect_builder\n                                .set_timeout(need_next_interval)\n                                .event(|_| Event::Request(BlockSynchronizerRequest::NeedNext)),\n                        );\n                    }\n                }\n            }\n        };\n\n        if let Some(builder) = &mut self.forward {\n            builder_needs_next(builder, Arc::clone(&self.chainspec));\n        }\n        if let Some(builder) = &mut self.historical {\n            builder_needs_next(builder, Arc::clone(&self.chainspec));\n        }\n        results\n    }\n\n    fn peers_accumulated(&mut self, block_hash: BlockHash, peers: Vec<NodeId>) {\n        if let Some(builder) = self.get_builder(block_hash, true) {\n            builder.register_peers(peers);\n        }\n    }\n\n    fn block_header_fetched(\n        &mut self,\n        result: Result<FetchedData<BlockHeader>, FetcherError<BlockHeader>>,\n    ) {\n        let (block_hash, maybe_block_header, maybe_peer_id): (\n            BlockHash,\n            Option<Box<BlockHeader>>,\n            Option<NodeId>,\n        ) = match result {\n            Ok(FetchedData::FromPeer { item, peer }) => (item.fetch_id(), Some(item), Some(peer)),\n            Ok(FetchedData::FromStorage { item }) => (item.fetch_id(), Some(item), None),\n            Err(err) => {\n                debug!(%err, \"BlockSynchronizer: failed to fetch block header\");\n                if err.is_peer_fault() {\n                    (*err.id(), None, Some(*err.peer()))\n                } else {\n                    (*err.id(), None, None)\n                }\n            }\n        };\n\n        let validator_matrix = &self.validator_matrix.clone();\n        if let Some(builder) = self.get_builder(block_hash, false) {\n            match maybe_block_header {\n                None => {\n                    if let Some(peer_id) = maybe_peer_id {\n                        builder.demote_peer(peer_id);\n                    }\n\n                    if builder.waiting_for_block_header() {\n                        builder.latch_decrement();\n                    }\n                }\n                Some(block_header) => {\n                    if let Err(error) = builder.register_block_header(*block_header, maybe_peer_id)\n                    {\n                        error!(%error, \"BlockSynchronizer: failed to apply block header\");\n                    } else {\n                        builder.register_era_validator_weights(validator_matrix);\n                    }\n                }\n            }\n        }\n    }\n\n    fn block_fetched(&mut self, result: Result<FetchedData<Block>, FetcherError<Block>>) {\n        let (block_hash, maybe_block, maybe_peer_id): (\n            BlockHash,\n            Option<Box<Block>>,\n            Option<NodeId>,\n        ) = match result {\n            Ok(FetchedData::FromPeer { item, peer }) => {\n                debug!(\n                    \"BlockSynchronizer: fetched body {:?} from peer {}\",\n                    item.hash(),\n                    peer\n                );\n                (*item.hash(), Some(item), Some(peer))\n            }\n            Ok(FetchedData::FromStorage { item }) => (*item.hash(), Some(item), None),\n            Err(err) => {\n                debug!(%err, \"BlockSynchronizer: failed to fetch block\");\n                if err.is_peer_fault() {\n                    (*err.id(), None, Some(*err.peer()))\n                } else {\n                    (*err.id(), None, None)\n                }\n            }\n        };\n\n        if let Some(builder) = self.get_builder(block_hash, false) {\n            match maybe_block {\n                None => {\n                    if let Some(peer_id) = maybe_peer_id {\n                        builder.demote_peer(peer_id);\n                    }\n\n                    if builder.waiting_for_block() {\n                        builder.latch_decrement();\n                    }\n                }\n                Some(block) => {\n                    if let Err(error) = builder.register_block(*block, maybe_peer_id) {\n                        error!(%error, \"BlockSynchronizer: failed to apply block\");\n                    }\n                }\n            }\n        }\n    }\n\n    fn approvals_hashes_fetched(\n        &mut self,\n        result: Result<FetchedData<ApprovalsHashes>, FetcherError<ApprovalsHashes>>,\n    ) {\n        let (block_hash, maybe_approvals_hashes, maybe_peer_id): (\n            BlockHash,\n            Option<Box<ApprovalsHashes>>,\n            Option<NodeId>,\n        ) = match result {\n            Ok(FetchedData::FromPeer { item, peer }) => {\n                debug!(\n                    \"BlockSynchronizer: fetched approvals hashes {:?} from peer {}\",\n                    item.block_hash(),\n                    peer\n                );\n                (*item.block_hash(), Some(item), Some(peer))\n            }\n            Ok(FetchedData::FromStorage { item }) => (*item.block_hash(), Some(item), None),\n            Err(err) => {\n                debug!(%err, \"BlockSynchronizer: failed to fetch approvals hashes\");\n                if err.is_peer_fault() {\n                    (*err.id(), None, Some(*err.peer()))\n                } else {\n                    (*err.id(), None, None)\n                }\n            }\n        };\n\n        if let Some(builder) = self.get_builder(block_hash, false) {\n            match maybe_approvals_hashes {\n                None => {\n                    if let Some(peer_id) = maybe_peer_id {\n                        builder.demote_peer(peer_id);\n                    }\n\n                    if builder.waiting_for_approvals_hashes() {\n                        builder.latch_decrement();\n                    }\n                }\n                Some(approvals_hashes) => {\n                    if let Err(error) =\n                        builder.register_approvals_hashes(&approvals_hashes, maybe_peer_id)\n                    {\n                        error!(%error, \"BlockSynchronizer: failed to apply approvals hashes\");\n                    }\n                }\n            }\n        }\n    }\n\n    fn finality_signature_fetched(\n        &mut self,\n        result: Result<FetchedData<FinalitySignature>, FetcherError<FinalitySignature>>,\n    ) {\n        let (id, maybe_finality_signature, maybe_peer_id) = match result {\n            Ok(FetchedData::FromPeer { item, peer }) => {\n                debug!(\n                    \"BlockSynchronizer: fetched finality signature {} from peer {}\",\n                    item, peer\n                );\n                (item.fetch_id(), Some(item), Some(peer))\n            }\n            Ok(FetchedData::FromStorage { item }) => (item.fetch_id(), Some(item), None),\n            Err(err) => {\n                debug!(%err, \"BlockSynchronizer: failed to fetch finality signature\");\n                if err.is_peer_fault() {\n                    (err.id().clone(), None, Some(*err.peer()))\n                } else {\n                    (err.id().clone(), None, None)\n                }\n            }\n        };\n\n        if let Some(builder) = self.get_builder(*id.block_hash(), false) {\n            match maybe_finality_signature {\n                None => {\n                    if let Some(peer_id) = maybe_peer_id {\n                        builder.demote_peer(peer_id);\n                    }\n\n                    // Failed to fetch a finality sig. Decrement the latch if we were actually\n                    // waiting for signatures.\n                    if builder.waiting_for_signatures() {\n                        builder.latch_decrement();\n                    }\n                }\n                Some(finality_signature) => {\n                    if let Err(error) =\n                        builder.register_finality_signature(*finality_signature, maybe_peer_id)\n                    {\n                        warn!(%error, \"BlockSynchronizer: failed to apply finality signature\");\n                    }\n                }\n            }\n        }\n    }\n\n    fn sync_leap_fetched(&mut self, result: Result<FetchedData<SyncLeap>, FetcherError<SyncLeap>>) {\n        let (block_hash, maybe_sync_leap, maybe_peer_id): (\n            BlockHash,\n            Option<Box<SyncLeap>>,\n            Option<NodeId>,\n        ) = match result {\n            Ok(FetchedData::FromPeer { item, peer }) => {\n                debug!(\n                    \"BlockSynchronizer: fetched sync leap {:?} from peer {}\",\n                    item.fetch_id().block_hash(),\n                    peer\n                );\n\n                (item.fetch_id().block_hash(), Some(item), Some(peer))\n            }\n            Ok(FetchedData::FromStorage { item }) => {\n                error!(%item, \"BlockSynchronizer: sync leap should never come from storage\");\n                (item.fetch_id().block_hash(), None, None) // maybe_sync_leap None will demote peer\n            }\n            Err(err) => {\n                debug!(%err, \"BlockSynchronizer: failed to fetch sync leap\");\n                if err.is_peer_fault() {\n                    (err.id().block_hash(), None, Some(*err.peer()))\n                } else {\n                    (err.id().block_hash(), None, None)\n                }\n            }\n        };\n        let demote_peer = maybe_sync_leap.is_none();\n        if let Some(sync_leap) = maybe_sync_leap {\n            let era_validator_weights = sync_leap.era_validator_weights(\n                self.validator_matrix.fault_tolerance_threshold(),\n                &self.chainspec.protocol_config,\n            );\n            for evw in era_validator_weights {\n                self.validator_matrix.register_era_validator_weights(evw);\n            }\n        }\n        let validator_matrix = &self.validator_matrix.clone();\n        if let Some(builder) = self.get_builder(block_hash, true) {\n            if demote_peer {\n                if let Some(peer_id) = maybe_peer_id {\n                    builder.demote_peer(peer_id);\n                }\n            } else {\n                if let Some(peer_id) = maybe_peer_id {\n                    builder.promote_peer(peer_id);\n                }\n                builder.register_era_validator_weights(validator_matrix);\n            }\n        }\n    }\n\n    fn global_state_synced(\n        &mut self,\n        block_hash: BlockHash,\n        result: Result<GlobalStateSynchronizerResponse, GlobalStateSynchronizerError>,\n    ) {\n        let (maybe_root_hash, unreliable_peers) = match result {\n            Ok(response) => (Some(*response.hash()), response.unreliable_peers()),\n            Err(error) => {\n                debug!(%error, \"BlockSynchronizer: failed to sync global state\");\n                match error {\n                    GlobalStateSynchronizerError::TrieAccumulator(unreliable_peers)\n                    | GlobalStateSynchronizerError::PutTrie(_, unreliable_peers) => {\n                        (None, unreliable_peers)\n                    }\n                    GlobalStateSynchronizerError::NoPeersAvailable => {\n                        // This should never happen. Before creating a sync request,\n                        // the block synchronizer will request another set of peers\n                        // (both random and from the accumulator).\n                        debug!(\n                            \"BlockSynchronizer: global state sync request was issued with no peers\"\n                        );\n                        (None, Vec::new())\n                    }\n                    GlobalStateSynchronizerError::ProcessingAnotherRequest {\n                        hash_being_synced,\n                        hash_requested,\n                    } => {\n                        warn!(%hash_being_synced, %hash_requested,\n                        \"BlockSynchronizer: global state sync is processing another request\");\n                        (None, Vec::new())\n                    }\n                }\n            }\n        };\n\n        if let Some(builder) = &mut self.historical {\n            if builder.block_hash() != block_hash {\n                debug!(%block_hash, \"BlockSynchronizer: not currently synchronizing block\");\n            } else {\n                builder.latch_decrement();\n                if let Some(root_hash) = maybe_root_hash {\n                    if let Err(error) = builder.register_global_state(root_hash.into_inner()) {\n                        error!(%block_hash, %error, \"BlockSynchronizer: failed to apply global state\");\n                    }\n                }\n                // Demote all the peers where we didn't find the required global state tries\n                for peer in unreliable_peers.iter() {\n                    builder.demote_peer(*peer);\n                }\n            }\n        }\n    }\n\n    fn got_execution_results_checksum(\n        &mut self,\n        block_hash: BlockHash,\n        result: ExecutionResultsChecksumResult,\n    ) {\n        let builder = match &mut self.historical {\n            None => {\n                // execution results checksums are only relevant to historical blocks\n                debug!(%block_hash, \"BlockSynchronizer: not currently synchronising block\");\n                return;\n            }\n            Some(builder) => {\n                let current_block_hash = builder.block_hash();\n                if current_block_hash != block_hash {\n                    debug!(%block_hash, %current_block_hash, \"BlockSynchronizer: currently synchronising different block\");\n                    return;\n                }\n                builder\n            }\n        };\n\n        let execution_results_checksum = match result {\n            ExecutionResultsChecksumResult::Failure(error) => {\n                error!(%block_hash, %error, \"BlockSynchronizer: unexpected error getting checksum registry\");\n                ExecutionResultsChecksum::Uncheckable\n            }\n            ExecutionResultsChecksumResult::RootNotFound => {\n                error!(%block_hash, \"BlockSynchronizer: unexpected error getting checksum registry (root not found)\");\n                ExecutionResultsChecksum::Uncheckable\n            }\n            ExecutionResultsChecksumResult::ChecksumNotFound => {\n                error!(%block_hash, \"BlockSynchronizer: checksum not found (should exist)\");\n                ExecutionResultsChecksum::Uncheckable\n            }\n            ExecutionResultsChecksumResult::RegistryNotFound => {\n                // we didn't track this checksum pre-1.5\n                debug!(%block_hash, \"BlockSynchronizer: checksum registry not found (legacy record)\");\n                ExecutionResultsChecksum::Uncheckable\n            }\n            ExecutionResultsChecksumResult::Success { checksum } => {\n                debug!(\n                    %block_hash, \"BlockSynchronizer: got execution_results_checksum {}\",\n                    checksum\n                );\n                ExecutionResultsChecksum::Checkable(checksum)\n            }\n        };\n\n        builder.latch_decrement();\n        if let Err(error) = builder.register_execution_results_checksum(execution_results_checksum)\n        {\n            error!(%block_hash, %error, \"BlockSynchronizer: failed to apply execution results checksum\");\n        }\n    }\n\n    fn execution_results_fetched<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        block_hash: BlockHash,\n        result: FetchResult<BlockExecutionResultsOrChunk>,\n    ) -> Effects<Event>\n    where\n        REv: From<StorageRequest> + Send,\n    {\n        debug!(%block_hash, \"execution_results_fetched\");\n        let (maybe_value_or_chunk, maybe_peer_id) = match result {\n            Ok(FetchedData::FromPeer { item, peer }) => {\n                debug!(\n                    \"BlockSynchronizer: fetched execution results {} from peer {}\",\n                    item.block_hash(),\n                    peer\n                );\n                (Some(item), Some(peer))\n            }\n            Ok(FetchedData::FromStorage { item }) => (Some(item), None),\n            Err(err) => {\n                debug!(%err, \"BlockSynchronizer: failed to fetch execution results or chunk\");\n                if err.is_peer_fault() {\n                    (None, Some(*err.peer()))\n                } else {\n                    (None, None)\n                }\n            }\n        };\n        debug!(\n            has_value_or_chunk = maybe_value_or_chunk.is_some(),\n            ?maybe_peer_id,\n            \"execution_results_fetched\"\n        );\n\n        if let Some(builder) = &mut self.historical {\n            if builder.block_hash() != block_hash {\n                debug!(%block_hash, \"BlockSynchronizer: not currently synchronizing block\");\n                return Effects::new();\n            }\n            match maybe_value_or_chunk {\n                None => {\n                    debug!(%block_hash, \"execution_results_fetched: No maybe_value_or_chunk\");\n                    if let Some(peer_id) = maybe_peer_id {\n                        builder.demote_peer(peer_id);\n                    }\n                    if builder.waiting_for_execution_results() {\n                        builder.latch_decrement();\n                    }\n                }\n                Some(value_or_chunk) => {\n                    // due to reasons, the stitched back together execution effects need to be saved\n                    // to disk here, when the last chunk is collected.\n                    // we expect a response back, which will crank the block builder for this block\n                    // to the next state.\n                    debug!(\n                        %value_or_chunk,\n                        \"execution_results_fetched\"\n                    );\n                    match builder.register_fetched_execution_results(maybe_peer_id, *value_or_chunk)\n                    {\n                        Ok(Some(execution_results)) => {\n                            debug!(%block_hash, \"execution_results_fetched: putting execution results to storage\");\n                            let (block_height, era_id) = match builder.block_height_and_era() {\n                                Some(value) => value,\n                                None => {\n                                    error!(\n                                        %block_hash,\n                                        \"BlockSynchronizer: failed to apply execution results or \\\n                                        chunk due to missing block height and era id\"\n                                    );\n                                    return Effects::new();\n                                }\n                            };\n                            return effect_builder\n                                .put_execution_artifacts_to_storage(\n                                    block_hash,\n                                    block_height,\n                                    era_id,\n                                    execution_results,\n                                )\n                                .event(move |()| Event::ExecutionResultsStored(block_hash));\n                        }\n                        Ok(None) => {\n                            debug!(%block_hash, \"execution_results_fetched: Ok(None)\");\n                        }\n                        Err(error) => {\n                            error!(%block_hash, %error, \"BlockSynchronizer: failed to apply execution results or chunk\");\n                        }\n                    }\n                }\n            }\n        }\n        Effects::new()\n    }\n\n    fn execution_results_stored(&mut self, block_hash: BlockHash) {\n        if let Some(builder) = &mut self.historical {\n            if builder.block_hash() != block_hash {\n                debug!(%block_hash, \"BlockSynchronizer: register_execution_results_stored: not currently synchronizing block\");\n            } else {\n                builder.latch_decrement();\n                if let Err(error) = builder.register_execution_results_stored_notification() {\n                    error!(%block_hash, %error, \"BlockSynchronizer: register_execution_results_stored: failed to apply stored execution results\");\n                }\n            }\n        }\n    }\n\n    fn transaction_fetched(\n        &mut self,\n        block_hash: BlockHash,\n        fetched_txn: FetchedData<Transaction>,\n    ) {\n        let (txn, maybe_peer) = match fetched_txn {\n            FetchedData::FromPeer { item, peer } => (item, Some(peer)),\n            FetchedData::FromStorage { item } => (item, None),\n        };\n\n        if let Some(builder) = self.get_builder(block_hash, false) {\n            if let Err(error) = builder.register_transaction(txn.fetch_id(), maybe_peer) {\n                error!(%block_hash, %error, \"BlockSynchronizer: failed to apply transaction\");\n            }\n        }\n    }\n\n    fn disqualify_peer(&mut self, node_id: NodeId) {\n        if let Some(builder) = &mut self.forward {\n            builder.disqualify_peer(node_id);\n        }\n        if let Some(builder) = &mut self.historical {\n            builder.disqualify_peer(node_id);\n        }\n    }\n\n    fn progress(&self, builder: &BlockBuilder) -> BlockSynchronizerProgress {\n        if builder.is_finished() {\n            match builder.block_height_and_era() {\n                None => {\n                    error!(\"BlockSynchronizer: finished builder should have block height and era\")\n                }\n                Some((block_height, era_id)) => {\n                    return BlockSynchronizerProgress::Synced(\n                        builder.block_hash(),\n                        block_height,\n                        era_id,\n                    );\n                }\n            }\n        }\n\n        if builder.is_executing() {\n            match builder.block_height_and_era() {\n                None => {\n                    error!(\"BlockSynchronizer: finished builder should have block height and era\")\n                }\n                Some((block_height, era_id)) => {\n                    // If the block is currently being executed, we will not\n                    // purge the builder and instead wait for it to be\n                    // executed and marked complete.\n                    if builder.is_executing() {\n                        return BlockSynchronizerProgress::Executing(\n                            builder.block_hash(),\n                            block_height,\n                            era_id,\n                        );\n                    }\n                }\n            }\n        }\n\n        let last_progress_time = builder.last_progress_time().max(\n            self.global_sync\n                .last_progress()\n                .unwrap_or_else(Timestamp::zero),\n        );\n\n        BlockSynchronizerProgress::Syncing(\n            builder.block_hash(),\n            builder.block_height(),\n            last_progress_time,\n        )\n    }\n\n    fn status(&self) -> BlockSynchronizerStatus {\n        BlockSynchronizerStatus::new(\n            self.historical.as_ref().map(|builder| {\n                BlockSyncStatus::new(\n                    builder.block_hash(),\n                    builder.block_height(),\n                    builder.block_acquisition_state().to_string(),\n                )\n            }),\n            self.forward.as_ref().map(|builder| {\n                BlockSyncStatus::new(\n                    builder.block_hash(),\n                    builder.block_height(),\n                    builder.block_acquisition_state().to_string(),\n                )\n            }),\n        )\n    }\n\n    fn get_builder(\n        &mut self,\n        block_hash: BlockHash,\n        decrement_latch: bool,\n    ) -> Option<&mut BlockBuilder> {\n        match (&mut self.forward, &mut self.historical) {\n            (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => {\n                if decrement_latch {\n                    builder.latch_decrement();\n                }\n                Some(builder)\n            }\n            _ => {\n                trace!(%block_hash, \"BlockSynchronizer: not currently synchronizing block\");\n                None\n            }\n        }\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for BlockSynchronizer\nwhere\n    REv: ReactorEvent + From<FetcherRequest<Block>>,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\nimpl<REv: ReactorEvent> Component<REv> for BlockSynchronizer {\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => {\n                match event {\n                    Event::Initialize => {\n                        <Self as InitializedComponent<MainEvent>>::set_state(\n                            self,\n                            ComponentState::Initialized,\n                        );\n                        // start dishonest peer management on initialization\n                        effect_builder\n                            .set_timeout(self.config.disconnect_dishonest_peers_interval.into())\n                            .event(move |_| {\n                                Event::Request(BlockSynchronizerRequest::DishonestPeers)\n                            })\n                    }\n                    Event::Request(_)\n                    | Event::DisconnectFromPeer(_)\n                    | Event::MadeFinalizedBlock { .. }\n                    | Event::MarkBlockExecutionEnqueued(_)\n                    | Event::MarkBlockExecuted(_)\n                    | Event::MarkBlockCompleted { .. }\n                    | Event::BlockHeaderFetched(_)\n                    | Event::BlockFetched(_)\n                    | Event::ApprovalsHashesFetched(_)\n                    | Event::FinalitySignatureFetched(_)\n                    | Event::SyncLeapFetched(_)\n                    | Event::GlobalStateSynced { .. }\n                    | Event::GotExecutionResultsChecksum { .. }\n                    | Event::TransactionFetched { .. }\n                    | Event::ExecutionResultsFetched { .. }\n                    | Event::ExecutionResultsStored(_)\n                    | Event::AccumulatedPeers(_, _)\n                    | Event::NetworkPeers(_, _)\n                    | Event::GlobalStateSynchronizer(_) => {\n                        warn!(\n                            ?event,\n                            name = <Self as Component<MainEvent>>::name(self),\n                            \"should not handle this event when component is pending initialization\"\n                        );\n                        Effects::new()\n                    }\n                }\n            }\n            ComponentState::Initialized => match event {\n                Event::Initialize => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::Request(request) => match request {\n                    // the rpc and rest servers include block sync data on their status responses\n                    BlockSynchronizerRequest::Status { responder } => {\n                        responder.respond(self.status()).ignore()\n                    }\n                    // prompts for what data (if any) is needed next to acquire block(s) being\n                    // sync'd\n                    BlockSynchronizerRequest::NeedNext => self.need_next(effect_builder, rng),\n                    // this component is periodically asked for any peers that have provided false\n                    // data (if any) which are then disconnected from\n                    BlockSynchronizerRequest::DishonestPeers => {\n                        let mut effects: Effects<Self::Event> = self\n                            .dishonest_peers()\n                            .into_iter()\n                            .flat_map(|node_id| {\n                                effect_builder\n                                    .announce_block_peer_with_justification(\n                                        node_id,\n                                        BlocklistJustification::DishonestPeer,\n                                    )\n                                    .ignore()\n                            })\n                            .collect();\n                        self.flush_dishonest_peers();\n                        effects.extend(\n                            effect_builder\n                                .set_timeout(self.config.disconnect_dishonest_peers_interval.into())\n                                .event(move |_| {\n                                    Event::Request(BlockSynchronizerRequest::DishonestPeers)\n                                }),\n                        );\n                        effects\n                    }\n\n                    // this is a request that's separate from a typical block synchronizer flow;\n                    // it's sent when we need to sync global states of block after an upgrade\n                    // and its parent in order to check whether the validators have been\n                    // changed by the upgrade\n                    BlockSynchronizerRequest::SyncGlobalStates(mut global_states) => {\n                        if let Some((block_hash, global_state_hash)) = global_states.pop() {\n                            let global_states_clone = global_states.clone();\n                            effect_builder\n                                .sync_global_state(block_hash, global_state_hash)\n                                .result(\n                                    move |_| {\n                                        Event::Request(BlockSynchronizerRequest::SyncGlobalStates(\n                                            global_states_clone,\n                                        ))\n                                    },\n                                    move |_| {\n                                        global_states.push((block_hash, global_state_hash));\n                                        Event::Request(BlockSynchronizerRequest::SyncGlobalStates(\n                                            global_states,\n                                        ))\n                                    },\n                                )\n                        } else {\n                            Effects::new()\n                        }\n                    }\n                },\n                // tunnel event to global state synchronizer\n                // global_state_sync is a black box; we do not hook need next here\n                // global_state_sync signals the historical sync builder at the end of its process,\n                // and need next is then re-hooked to get the rest of the block\n                Event::GlobalStateSynchronizer(event) => {\n                    let processed_event = match event {\n                        GlobalStateSynchronizerEvent::GetPeers(_) => {\n                            let peers = self.historical.as_ref().map_or_else(Vec::new, |builder| {\n                                builder.peer_list().qualified_peers_up_to(\n                                    rng,\n                                    self.config.max_parallel_trie_fetches as usize,\n                                )\n                            });\n                            GlobalStateSynchronizerEvent::GetPeers(peers)\n                        }\n                        event => event,\n                    };\n                    reactor::wrap_effects(\n                        Event::GlobalStateSynchronizer,\n                        self.global_sync\n                            .handle_event(effect_builder, rng, processed_event),\n                    )\n                }\n                // when a peer is disconnected from for any reason, disqualify peer\n                Event::DisconnectFromPeer(node_id) => {\n                    self.disqualify_peer(node_id);\n                    Effects::new()\n                }\n                Event::MarkBlockExecutionEnqueued(block_hash) => {\n                    // when syncing a forward block the synchronizer considers it\n                    // finished after it has been successfully enqueued for execution\n                    self.register_block_execution_enqueued(&block_hash);\n                    Effects::new()\n                }\n                Event::MarkBlockExecuted(block_hash) => {\n                    // when syncing a forward block the synchronizer considers it\n                    // synced after it has been successfully executed and marked\n                    // complete in storage.\n                    self.register_block_executed(&block_hash);\n                    Effects::new()\n                }\n                Event::MarkBlockCompleted { block_hash, is_new } => {\n                    // when syncing an historical block, the synchronizer considers it\n                    // finished after receiving confirmation that the complete block\n                    // has been stored.\n                    self.register_marked_complete(effect_builder, &block_hash, is_new)\n                }\n\n                // --- each of the following events MUST return need next  ---\n\n                // for both historical and forward sync, the block header has been fetched\n                Event::BlockHeaderFetched(result) => {\n                    self.block_header_fetched(result);\n                    self.need_next(effect_builder, rng)\n                }\n                // for both historical and forward sync, the block body has been fetched\n                Event::BlockFetched(result) => {\n                    self.block_fetched(result);\n                    self.need_next(effect_builder, rng)\n                }\n                // for both historical and forward sync, a finality signature has been fetched\n                Event::FinalitySignatureFetched(result) => {\n                    self.finality_signature_fetched(result);\n                    self.need_next(effect_builder, rng)\n                }\n                // for both historical and forward sync, post-1.4 blocks track approvals hashes\n                // for the transactions they contain\n                Event::ApprovalsHashesFetched(result) => {\n                    self.approvals_hashes_fetched(result);\n                    self.need_next(effect_builder, rng)\n                }\n                Event::SyncLeapFetched(result) => {\n                    self.sync_leap_fetched(result);\n                    self.need_next(effect_builder, rng)\n                }\n                // we use the existence of n execution results checksum as an expedient way to\n                // determine if a block is post-1.4\n                Event::GotExecutionResultsChecksum { block_hash, result } => {\n                    self.got_execution_results_checksum(block_hash, result);\n                    self.need_next(effect_builder, rng)\n                }\n                // historical sync needs to know that global state has been sync'd\n                Event::GlobalStateSynced { block_hash, result } => {\n                    self.global_state_synced(block_hash, result);\n                    self.need_next(effect_builder, rng)\n                }\n                // historical sync needs to know that execution results have been fetched\n                Event::ExecutionResultsFetched { block_hash, result } => {\n                    let mut effects =\n                        self.execution_results_fetched(effect_builder, block_hash, result);\n                    effects.extend(self.need_next(effect_builder, rng));\n                    effects\n                }\n                // historical sync needs to know that execution effects have been stored\n                Event::ExecutionResultsStored(block_hash) => {\n                    self.execution_results_stored(block_hash);\n                    self.need_next(effect_builder, rng)\n                }\n                // for pre-1.5 blocks we use the legacy deploy fetcher, otherwise we use the\n                // transaction fetcher but the results of both are forwarded to this\n                // handler\n                Event::TransactionFetched { block_hash, result } => {\n                    match result {\n                        Either::Left(Ok(fetched_legacy_deploy)) => {\n                            let deploy_id = fetched_legacy_deploy.id();\n                            debug!(%block_hash, ?deploy_id, \"BlockSynchronizer: fetched legacy deploy\");\n                            self.transaction_fetched(block_hash, fetched_legacy_deploy.convert())\n                        }\n                        Either::Right(Ok(fetched_txn)) => {\n                            let txn_id = fetched_txn.id();\n                            debug!(%block_hash, %txn_id, \"BlockSynchronizer: fetched transaction\");\n                            self.transaction_fetched(block_hash, fetched_txn)\n                        }\n                        Either::Left(Err(error)) => {\n                            if let Some(builder) = self.get_builder(block_hash, false) {\n                                if builder.waiting_for_transactions() {\n                                    builder.latch_decrement();\n                                }\n                            }\n\n                            debug!(%error, \"BlockSynchronizer: failed to fetch legacy deploy\");\n                        }\n                        Either::Right(Err(error)) => {\n                            if let Some(builder) = self.get_builder(block_hash, false) {\n                                if builder.waiting_for_transactions() {\n                                    builder.latch_decrement();\n                                }\n                            }\n\n                            debug!(%error, \"BlockSynchronizer: failed to fetch transaction\");\n                        }\n                    };\n                    self.need_next(effect_builder, rng)\n                }\n                // fresh peers to apply (random sample from network)\n                Event::NetworkPeers(block_hash, peers) => {\n                    debug!(%block_hash, \"BlockSynchronizer: got {} peers from network\", peers.len());\n                    self.peers_accumulated(block_hash, peers);\n                    self.need_next(effect_builder, rng)\n                }\n                // fresh peers to apply (qualified peers from accumulator)\n                Event::AccumulatedPeers(block_hash, Some(peers)) => {\n                    debug!(%block_hash, \"BlockSynchronizer: got {} peers from accumulator\", peers.len());\n                    self.peers_accumulated(block_hash, peers);\n                    self.need_next(effect_builder, rng)\n                }\n                // no more peers available; periodically retry via need next...\n                // the node will likely get more peers over time and resume\n                Event::AccumulatedPeers(block_hash, None) => {\n                    debug!(%block_hash, \"BlockSynchronizer: got 0 peers from accumulator\");\n                    self.peers_accumulated(block_hash, vec![]);\n                    self.need_next(effect_builder, rng)\n                }\n                Event::MadeFinalizedBlock { block_hash, result } => {\n                    // when syncing a forward block the node does not acquire\n                    // global state and execution results from peers; instead\n                    // the node attempts to execute the block to produce the\n                    // global state and execution results and check the results\n                    // first, the block it must be turned into a finalized block\n                    // and then enqueued for execution.\n                    self.register_made_finalized_block(&block_hash, result);\n                    self.need_next(effect_builder, rng)\n                }\n            },\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl<REv: ReactorEvent> ValidatorBoundComponent<REv> for BlockSynchronizer {\n    fn handle_validators(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n    ) -> Effects<Self::Event> {\n        info!(\"BlockSynchronizer: handling updated validator matrix\");\n        if let Some(block_builder) = &mut self.forward {\n            block_builder.register_era_validator_weights(&self.validator_matrix);\n        }\n        if let Some(block_builder) = &mut self.historical {\n            block_builder.register_era_validator_weights(&self.validator_matrix);\n        }\n        self.need_next(effect_builder, rng)\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_validator/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n/// Configuration options for block validation.\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\npub struct Config {\n    pub max_completed_entries: u32,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            max_completed_entries: 3,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_validator/event.rs",
    "content": "use derive_more::{Display, From};\n\nuse casper_types::{EraId, FinalitySignature, FinalitySignatureId, Transaction, TransactionHash};\n\nuse crate::{\n    components::fetcher::FetchResult, effect::requests::BlockValidationRequest,\n    types::BlockWithMetadata,\n};\n\n#[derive(Debug, From, Display)]\npub(crate) enum Event {\n    #[from]\n    Request(BlockValidationRequest),\n\n    #[display(fmt = \"past blocks read from storage\")]\n    GotPastBlocksWithMetadata {\n        past_blocks_with_metadata: Vec<Option<BlockWithMetadata>>,\n        request: BlockValidationRequest,\n    },\n\n    #[display(fmt = \"block {} has been stored\", _0)]\n    BlockStored(u64),\n\n    #[display(fmt = \"{} fetched\", transaction_hash)]\n    TransactionFetched {\n        transaction_hash: TransactionHash,\n        result: FetchResult<Transaction>,\n    },\n\n    #[display(fmt = \"{} fetched\", finality_signature_id)]\n    FinalitySignatureFetched {\n        finality_signature_id: Box<FinalitySignatureId>,\n        result: FetchResult<FinalitySignature>,\n    },\n\n    #[display(fmt = \"{} price for era {}\", _1, _0)]\n    UpdateEraGasPrice(EraId, u8),\n}\n"
  },
  {
    "path": "node/src/components/block_validator/state.rs",
    "content": "use std::{\n    collections::{hash_map::Entry, BTreeSet, HashMap, HashSet},\n    fmt::{self, Debug, Display, Formatter},\n    iter, mem,\n};\n\nuse datasize::DataSize;\nuse tracing::{debug, error, warn};\n\nuse casper_types::{\n    Approval, ApprovalsHash, Chainspec, FinalitySignatureId, Timestamp, TransactionConfig,\n    TransactionHash,\n};\n\nuse crate::{\n    components::consensus::{ClContext, ProposedBlock},\n    effect::Responder,\n    types::{\n        appendable_block::AppendableBlock, InvalidProposalError, NodeId, TransactionFootprint,\n    },\n};\n\n/// The state of a peer which claims to be a holder of the transactions.\n#[derive(Clone, Copy, Eq, PartialEq, DataSize, Debug)]\npub(super) enum HolderState {\n    /// No fetch attempt has been made using this peer.\n    Unasked,\n    /// At least one fetch attempt has been made and no fetch attempts have failed when using this\n    /// peer.\n    Asked,\n    /// At least one fetch attempt has failed when using this peer.\n    Failed,\n}\n\n/// The return type of `BlockValidationState::add_responder`.\npub(super) enum AddResponderResult {\n    /// The responder was added, meaning validation is still ongoing.\n    Added,\n    /// Validation is completed, so the responder should be called with the provided value.\n    ValidationCompleted {\n        responder: Responder<Result<(), Box<InvalidProposalError>>>,\n        response_to_send: Result<(), Box<InvalidProposalError>>,\n    },\n}\n\n/// The return type of `BlockValidationState::start_fetching`.\n#[derive(Eq, PartialEq, Debug)]\npub(super) enum MaybeStartFetching {\n    /// Should start a new round of fetches.\n    Start {\n        holder: NodeId,\n        missing_transactions: HashMap<TransactionHash, ApprovalsHash>,\n        missing_signatures: HashSet<FinalitySignatureId>,\n    },\n    /// No new round of fetches should be started as one is already in progress.\n    Ongoing,\n    /// We still have missing transactions, but all holders have failed.\n    Unable,\n    /// Validation has succeeded already.\n    ValidationSucceeded,\n    /// Validation has failed already.\n    ValidationFailed,\n}\n\n#[derive(Clone, Eq, PartialEq, DataSize, Debug)]\npub(super) struct ApprovalInfo {\n    approvals: BTreeSet<Approval>,\n    approvals_hash: ApprovalsHash,\n}\n\nimpl ApprovalInfo {\n    fn new(approvals: BTreeSet<Approval>, approvals_hash: ApprovalsHash) -> Self {\n        ApprovalInfo {\n            approvals,\n            approvals_hash,\n        }\n    }\n}\n\n/// State of the current process of block validation.\n///\n/// Tracks whether there are transactions still missing and who is interested in the final\n/// result.\n#[derive(DataSize, Debug)]\npub(super) enum BlockValidationState {\n    /// The validity is not yet decided.\n    InProgress {\n        /// Appendable block ensuring that the transactions satisfy the validity conditions.\n        appendable_block: AppendableBlock,\n        /// The set of approvals contains approvals from transactions that would be finalized with\n        /// the block.\n        missing_transactions: HashMap<TransactionHash, ApprovalInfo>,\n        /// The set of finality signatures for past blocks cited in this block.\n        missing_signatures: HashSet<FinalitySignatureId>,\n        /// The set of peers which each claim to hold all the transactions.\n        holders: HashMap<NodeId, HolderState>,\n        /// A list of responders that are awaiting an answer.\n        responders: Vec<Responder<Result<(), Box<InvalidProposalError>>>>,\n    },\n    /// The proposed block with the given timestamp is valid.\n    Valid(Timestamp),\n    /// The proposed block with the given timestamp is invalid, and the validation error.\n    ///\n    /// Note that only hard failures in validation will result in this state.  For soft failures,\n    /// like failing to fetch from a peer, the state will remain `Unknown`, even if there are no\n    /// more peers to ask, since more peers could be provided before this `BlockValidationState` is\n    /// purged.\n    Invalid {\n        timestamp: Timestamp,\n        error: Box<InvalidProposalError>,\n    },\n}\n\npub(super) type MaybeBlockValidationStateResponder =\n    Option<Responder<Result<(), Box<InvalidProposalError>>>>;\n\nimpl BlockValidationState {\n    /// Returns a new `BlockValidationState`.\n    ///\n    /// If the new state is `Valid` or `Invalid`, the provided responder is also returned so it can\n    /// be actioned.\n    pub(super) fn new(\n        proposed_block: &ProposedBlock<ClContext>,\n        missing_signatures: HashSet<FinalitySignatureId>,\n        sender: NodeId,\n        responder: Responder<Result<(), Box<InvalidProposalError>>>,\n        current_gas_price: u8,\n        chainspec: &Chainspec,\n    ) -> (Self, MaybeBlockValidationStateResponder) {\n        let transaction_count = proposed_block.transaction_count();\n        if transaction_count == 0 && missing_signatures.is_empty() {\n            let state = BlockValidationState::Valid(proposed_block.timestamp());\n            return (state, Some(responder));\n        }\n\n        // this is an optimization, rejects proposal that exceeds lane limits OR\n        // proposes a transaction in an unsupported lane\n        if let Err(err) =\n            Self::validate_transaction_lane_counts(proposed_block, &chainspec.transaction_config)\n        {\n            let state = BlockValidationState::Invalid {\n                timestamp: proposed_block.timestamp(),\n                error: err,\n            };\n            return (state, Some(responder));\n        }\n\n        let proposed_gas_price = proposed_block.value().current_gas_price();\n        if current_gas_price != proposed_gas_price {\n            let state = BlockValidationState::Invalid {\n                timestamp: proposed_block.timestamp(),\n                error: Box::new(InvalidProposalError::InvalidGasPrice {\n                    proposed_gas_price,\n                    current_gas_price,\n                }),\n            };\n            return (state, Some(responder));\n        }\n\n        let mut missing_transactions = HashMap::new();\n\n        for (transaction_hash, approvals) in proposed_block.all_transactions() {\n            let approval_info: ApprovalInfo = match ApprovalsHash::compute(approvals) {\n                Ok(approvals_hash) => ApprovalInfo::new(approvals.clone(), approvals_hash),\n                Err(error) => {\n                    warn!(%transaction_hash, %error, \"could not compute approvals hash\");\n                    let state = BlockValidationState::Invalid {\n                        timestamp: proposed_block.timestamp(),\n                        error: Box::new(InvalidProposalError::InvalidApprovalsHash(format!(\n                            \"{}\",\n                            error\n                        ))),\n                    };\n                    return (state, Some(responder));\n                }\n            };\n\n            // this checks to see if the same transaction has been included multiple\n            // times with different approvals, which is invalid\n            if missing_transactions\n                .insert(*transaction_hash, approval_info)\n                .is_some()\n            {\n                warn!(%transaction_hash, \"duplicated transaction in proposed block\");\n                let state = BlockValidationState::Invalid {\n                    timestamp: proposed_block.timestamp(),\n                    error: Box::new(InvalidProposalError::CompetingApprovals {\n                        transaction_hash: *transaction_hash,\n                    }),\n                };\n                return (state, Some(responder));\n            }\n        }\n\n        let state = BlockValidationState::InProgress {\n            appendable_block: AppendableBlock::new(\n                chainspec.transaction_config.clone(),\n                current_gas_price,\n                proposed_block.timestamp(),\n            ),\n            missing_transactions,\n            missing_signatures,\n            holders: iter::once((sender, HolderState::Unasked)).collect(),\n            responders: vec![responder],\n        };\n\n        (state, None)\n    }\n\n    fn validate_transaction_lane_counts(\n        block: &ProposedBlock<ClContext>,\n        config: &TransactionConfig,\n    ) -> Result<(), Box<InvalidProposalError>> {\n        let lanes = config.transaction_v1_config.get_supported_lanes();\n        if block.value().has_transaction_in_unsupported_lane(&lanes) {\n            return Err(Box::new(InvalidProposalError::UnsupportedLane));\n        }\n        for supported_lane in lanes {\n            let transactions = block.value().count(Some(supported_lane));\n            let lane_count_limit = config\n                .transaction_v1_config\n                .get_max_transaction_count(supported_lane);\n            if lane_count_limit < transactions as u64 {\n                warn!(\n                    supported_lane,\n                    lane_count_limit, transactions, \"too many transactions in lane\"\n                );\n                return Err(Box::new(InvalidProposalError::ExceedsLaneLimit {\n                    lane_id: supported_lane,\n                }));\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Adds the given responder to the collection if the current state is `InProgress` and returns\n    /// `Added`.\n    ///\n    /// If the state is not `InProgress`, `ValidationCompleted` is returned with the responder and\n    /// the value which should be provided to the responder.\n    pub(super) fn add_responder(\n        &mut self,\n        responder: Responder<Result<(), Box<InvalidProposalError>>>,\n    ) -> AddResponderResult {\n        match self {\n            BlockValidationState::InProgress { responders, .. } => {\n                responders.push(responder);\n                AddResponderResult::Added\n            }\n            BlockValidationState::Valid(_) => AddResponderResult::ValidationCompleted {\n                responder,\n                response_to_send: Ok(()),\n            },\n            BlockValidationState::Invalid { error, .. } => {\n                AddResponderResult::ValidationCompleted {\n                    responder,\n                    response_to_send: Err(error.clone()),\n                }\n            }\n        }\n    }\n\n    /// If the current state is `InProgress` and the peer isn't already known, adds the peer.\n    /// Otherwise, any existing entry is not updated and `false` is returned.\n    pub(super) fn add_holder(&mut self, holder: NodeId) {\n        match self {\n            BlockValidationState::InProgress {\n                appendable_block,\n                holders,\n                ..\n            } => match holders.entry(holder) {\n                Entry::Occupied(entry) => {\n                    debug!(\n                        block_timestamp = %appendable_block.timestamp(),\n                        peer = %entry.key(),\n                        \"already registered peer as holder for block validation\"\n                    );\n                }\n                Entry::Vacant(entry) => {\n                    entry.insert(HolderState::Unasked);\n                }\n            },\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => {\n                warn!(state = %self, \"unexpected state when adding holder\");\n            }\n        }\n    }\n\n    /// If the current state is `InProgress` and the holder is present, sets the holder's state to\n    /// `Failed`.\n    pub(super) fn try_mark_holder_failed(&mut self, holder: &NodeId) {\n        if let BlockValidationState::InProgress { holders, .. } = self {\n            if let Some(holder_state) = holders.get_mut(holder) {\n                debug_assert!(*holder_state != HolderState::Unasked);\n                *holder_state = HolderState::Failed;\n            }\n        }\n    }\n\n    /// Returns fetch info based on the current state:\n    ///   * if `InProgress` and there are no holders `Asked` (i.e. no ongoing fetches) and at least\n    ///     one `Unasked` holder, returns `Start`\n    ///   * if `InProgress` and any holder `Asked`, returns `Ongoing`\n    ///   * if `InProgress` and all holders `Failed`, returns `Unable`\n    ///   * if `Valid` or `Invalid`, returns `ValidationSucceeded` or `ValidationFailed`\n    ///     respectively\n    pub(super) fn start_fetching(&mut self) -> MaybeStartFetching {\n        match self {\n            BlockValidationState::InProgress {\n                missing_transactions,\n                missing_signatures,\n                holders,\n                ..\n            } => {\n                if missing_transactions.is_empty() && missing_signatures.is_empty() {\n                    error!(\n                        \"should always have missing transactions or signatures while in state \\\n                        `InProgress`\"\n                    );\n                    debug_assert!(false, \"invalid state\");\n                    return MaybeStartFetching::ValidationFailed;\n                }\n                let mut unasked = None;\n                for (peer_id, holder_state) in holders.iter() {\n                    match holder_state {\n                        HolderState::Unasked => {\n                            unasked = Some(*peer_id);\n                        }\n                        HolderState::Asked => return MaybeStartFetching::Ongoing,\n                        HolderState::Failed => {}\n                    }\n                }\n\n                let holder = match unasked {\n                    Some(peer) => peer,\n                    None => return MaybeStartFetching::Unable,\n                };\n                // Mark the holder as `Asked`.  Safe to `expect` as we just found the entry above.\n                *holders.get_mut(&holder).expect(\"must be in set\") = HolderState::Asked;\n                let missing_transactions = missing_transactions\n                    .iter()\n                    .map(|(dt_hash, infos)| (*dt_hash, infos.approvals_hash))\n                    .collect();\n                let missing_signatures = missing_signatures.clone();\n                MaybeStartFetching::Start {\n                    holder,\n                    missing_transactions,\n                    missing_signatures,\n                }\n            }\n            BlockValidationState::Valid(_) => MaybeStartFetching::ValidationSucceeded,\n            BlockValidationState::Invalid { .. } => MaybeStartFetching::ValidationFailed,\n        }\n    }\n\n    pub(super) fn take_responders(\n        &mut self,\n    ) -> Vec<Responder<Result<(), Box<InvalidProposalError>>>> {\n        match self {\n            BlockValidationState::InProgress { responders, .. } => mem::take(responders),\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => vec![],\n        }\n    }\n\n    /// If the current state is `InProgress` and `dt_hash` is present, tries to add the footprint to\n    /// the appendable block to continue validation of the proposed block.\n    pub(super) fn try_add_transaction_footprint(\n        &mut self,\n        transaction_hash: &TransactionHash,\n        footprint: &TransactionFootprint,\n    ) -> Vec<Responder<Result<(), Box<InvalidProposalError>>>> {\n        let (new_state, responders) = match self {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_transactions,\n                missing_signatures,\n                responders,\n                ..\n            } => {\n                let approvals_info = match missing_transactions.remove(transaction_hash) {\n                    Some(info) => info,\n                    None => {\n                        // If this transaction is not present, just return.\n                        return vec![];\n                    }\n                };\n                // Try adding the footprint to the appendable block to see if the block remains\n                // valid.\n                let approvals = approvals_info.approvals;\n                let footprint = footprint.clone().with_approvals(approvals);\n                match appendable_block.add_transaction(&footprint) {\n                    Ok(_) => {\n                        if !missing_transactions.is_empty() || !missing_signatures.is_empty() {\n                            // The appendable block is still valid, but we still have missing\n                            // transactions - nothing further to do here.\n                            debug!(\n                                block_timestamp = %appendable_block.timestamp(),\n                                missing_transactions_len = missing_transactions.len(),\n                                \"still missing transactions - block validation incomplete\"\n                            );\n                            return vec![];\n                        }\n                        debug!(\n                            block_timestamp = %appendable_block.timestamp(),\n                            \"no further missing transactions - block validation complete\"\n                        );\n                        let new_state = BlockValidationState::Valid(appendable_block.timestamp());\n                        (new_state, mem::take(responders))\n                    }\n                    Err(error) => {\n                        warn!(%transaction_hash, ?footprint, %error, \"block invalid\");\n                        let new_state = BlockValidationState::Invalid {\n                            timestamp: appendable_block.timestamp(),\n                            error: error.into(),\n                        };\n                        (new_state, mem::take(responders))\n                    }\n                }\n            }\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![],\n        };\n        *self = new_state;\n        responders\n    }\n\n    /// If the current state is `InProgress` and `dt_hash` is present, tries to add the footprint to\n    /// the appendable block to continue validation of the proposed block.\n    pub(super) fn try_add_signature(\n        &mut self,\n        finality_signature_id: &FinalitySignatureId,\n    ) -> Vec<Responder<Result<(), Box<InvalidProposalError>>>> {\n        let (new_state, responders) = match self {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_transactions,\n                missing_signatures,\n                responders,\n                ..\n            } => {\n                missing_signatures.remove(finality_signature_id);\n                if missing_signatures.is_empty() && missing_transactions.is_empty() {\n                    debug!(\n                        block_timestamp = %appendable_block.timestamp(),\n                        \"no further missing transactions or signatures - block validation complete\"\n                    );\n                    let new_state = BlockValidationState::Valid(appendable_block.timestamp());\n                    (new_state, mem::take(responders))\n                } else {\n                    debug!(\n                        block_timestamp = %appendable_block.timestamp(),\n                        missing_transactions_len = missing_transactions.len(),\n                        missing_signatures_len = missing_signatures.len(),\n                        \"still missing transactions or signatures - block validation incomplete\"\n                    );\n                    return vec![];\n                }\n            }\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![],\n        };\n        *self = new_state;\n        responders\n    }\n\n    /// If the current state is `InProgress` and `dt_hash` is present, sets the state to `Invalid`\n    /// and returns the responders.\n    pub(super) fn try_mark_invalid(\n        &mut self,\n        transaction_hash: &TransactionHash,\n    ) -> Vec<Responder<Result<(), Box<InvalidProposalError>>>> {\n        let (timestamp, responders) = match self {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_transactions,\n                responders,\n                ..\n            } => {\n                if !missing_transactions.contains_key(transaction_hash) {\n                    return vec![];\n                }\n                (appendable_block.timestamp(), mem::take(responders))\n            }\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![],\n        };\n        *self = BlockValidationState::Invalid {\n            timestamp,\n            error: Box::new(InvalidProposalError::UnfetchedTransaction {\n                transaction_hash: *transaction_hash,\n            }),\n        };\n        responders\n    }\n\n    /// If the current state is `InProgress` and `finality_signature_id` is present, sets the state\n    /// to `Invalid` and returns the responders.\n    pub(super) fn try_mark_invalid_signature(\n        &mut self,\n        finality_signature_id: &FinalitySignatureId,\n    ) -> Vec<Responder<Result<(), Box<InvalidProposalError>>>> {\n        let (timestamp, responders) = match self {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_signatures,\n                responders,\n                ..\n            } => {\n                if !missing_signatures.contains(finality_signature_id) {\n                    return vec![];\n                }\n                (appendable_block.timestamp(), mem::take(responders))\n            }\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![],\n        };\n        *self = BlockValidationState::Invalid {\n            timestamp,\n            error: Box::new(InvalidProposalError::InvalidFinalitySignature(\n                finality_signature_id.clone(),\n            )),\n        };\n        responders\n    }\n\n    pub(super) fn block_timestamp_if_completed(&self) -> Option<Timestamp> {\n        match self {\n            BlockValidationState::InProgress { .. } => None,\n            BlockValidationState::Valid(timestamp)\n            | BlockValidationState::Invalid { timestamp, .. } => Some(*timestamp),\n        }\n    }\n\n    #[cfg(test)]\n    pub(super) fn missing_hashes(&self) -> Vec<TransactionHash> {\n        match self {\n            BlockValidationState::InProgress {\n                missing_transactions,\n                ..\n            } => missing_transactions.keys().copied().collect(),\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => vec![],\n        }\n    }\n\n    #[cfg(test)]\n    pub(super) fn holders_mut(&mut self) -> Option<&mut HashMap<NodeId, HolderState>> {\n        match self {\n            BlockValidationState::InProgress { holders, .. } => Some(holders),\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => None,\n        }\n    }\n\n    #[cfg(test)]\n    pub(super) fn responder_count(&self) -> usize {\n        match self {\n            BlockValidationState::InProgress { responders, .. } => responders.len(),\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => 0,\n        }\n    }\n\n    #[cfg(test)]\n    pub(super) fn completed(&self) -> bool {\n        !matches!(self, BlockValidationState::InProgress { .. })\n    }\n}\n\nimpl Display for BlockValidationState {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_transactions,\n                missing_signatures,\n                holders,\n                responders,\n            } => {\n                write!(\n                    formatter,\n                    \"BlockValidationState::InProgress({}, {} missing transactions, \\\n                    {} missing signatures, {} holders, {} responders)\",\n                    appendable_block,\n                    missing_transactions.len(),\n                    missing_signatures.len(),\n                    holders.len(),\n                    responders.len()\n                )\n            }\n            BlockValidationState::Valid(timestamp) => {\n                write!(formatter, \"BlockValidationState::Valid({timestamp})\")\n            }\n            BlockValidationState::Invalid { timestamp, error } => {\n                write!(\n                    formatter,\n                    \"BlockValidationState::Invalid({timestamp} {:?})\",\n                    error\n                )\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use futures::channel::oneshot;\n    use rand::Rng;\n\n    use casper_types::{\n        testing::TestRng, ChainspecRawBytes, TimeDiff, Transaction, TransactionHash, TransactionV1,\n    };\n\n    use super::{super::tests::*, *};\n    use crate::utils::Loadable;\n\n    struct Fixture<'a> {\n        rng: &'a mut TestRng,\n        transactions: Vec<Transaction>,\n        chainspec: Chainspec,\n    }\n\n    impl<'a> Fixture<'a> {\n        fn new(rng: &'a mut TestRng) -> Self {\n            let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n            Fixture {\n                rng,\n                transactions: vec![],\n                chainspec,\n            }\n        }\n\n        fn new_with_block_gas_limit(rng: &'a mut TestRng, block_limit: u64) -> Self {\n            let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n            chainspec.transaction_config.block_gas_limit = block_limit;\n            Fixture {\n                rng,\n                transactions: vec![],\n                chainspec,\n            }\n        }\n\n        fn footprints(&self) -> Vec<(TransactionHash, TransactionFootprint)> {\n            self.transactions\n                .iter()\n                .map(|transaction| {\n                    (\n                        transaction.hash(),\n                        TransactionFootprint::new(&self.chainspec, transaction)\n                            .expect(\"must create footprint\"),\n                    )\n                })\n                .collect()\n        }\n\n        fn new_state(\n            &mut self,\n            mint_count: u64,\n            auction_count: u64,\n            install_upgrade_count: u64,\n            standard_count: u64,\n        ) -> (BlockValidationState, MaybeBlockValidationStateResponder) {\n            let total_non_transfer_count = standard_count + auction_count + install_upgrade_count;\n            let ttl = TimeDiff::from_seconds(10);\n            let timestamp = Timestamp::from(1000 + total_non_transfer_count + mint_count);\n\n            let mint_for_block = {\n                let mut ret = vec![];\n                for _ in 0..mint_count {\n                    let txn = new_mint(self.rng, timestamp, ttl);\n                    ret.push((txn.hash(), txn.approvals().clone()));\n                    self.transactions.push(txn);\n                }\n\n                ret\n            };\n\n            let auction_for_block = {\n                let mut ret = vec![];\n                for _ in 0..auction_count {\n                    let txn = new_auction(self.rng, timestamp, ttl);\n                    ret.push((txn.hash(), txn.approvals().clone()));\n                    self.transactions.push(txn);\n                }\n                ret\n            };\n\n            let install_upgrade_for_block = {\n                let mut ret = vec![];\n                for _ in 0..install_upgrade_count {\n                    let txn: Transaction =\n                        TransactionV1::random_install_upgrade(self.rng, Some(timestamp), Some(ttl))\n                            .into();\n                    ret.push((txn.hash(), txn.approvals().clone()));\n                    self.transactions.push(txn);\n                }\n                ret\n            };\n\n            let standard_for_block = {\n                let mut ret = vec![];\n                for _ in 0..standard_count {\n                    let txn = new_standard(self.rng, timestamp, ttl);\n                    ret.push((txn.hash(), txn.approvals().clone()));\n                    self.transactions.push(txn);\n                }\n                ret\n            };\n\n            let proposed_block = new_proposed_block(\n                timestamp,\n                mint_for_block,\n                auction_for_block,\n                install_upgrade_for_block,\n                standard_for_block,\n            );\n\n            BlockValidationState::new(\n                &proposed_block,\n                HashSet::new(),\n                NodeId::random(self.rng),\n                new_responder(),\n                1u8,\n                &self.chainspec,\n            )\n        }\n    }\n\n    fn new_responder() -> Responder<Result<(), Box<InvalidProposalError>>> {\n        let (sender, _receiver) = oneshot::channel();\n        Responder::without_shutdown(sender)\n    }\n\n    // Please note: values in the following test cases must match the production chainspec.\n    const MAX_LARGE_COUNT: u64 = 1;\n    const MAX_AUCTION_COUNT: u64 = 650;\n    const MAX_INSTALL_UPGRADE_COUNT: u64 = 1;\n    const MAX_MINT_COUNT: u64 = 650;\n\n    #[derive(Debug)]\n    struct TestCase {\n        mint_count: u64,\n        auction_count: u64,\n        install_upgrade_count: u64,\n        standard_count: u64,\n        state_validator: fn((BlockValidationState, MaybeBlockValidationStateResponder)) -> bool,\n    }\n\n    const NO_TRANSACTIONS: TestCase = TestCase {\n        mint_count: 0,\n        auction_count: 0,\n        install_upgrade_count: 0,\n        standard_count: 0,\n        state_validator: |(state, responder)| {\n            responder.is_some() && matches!(state, BlockValidationState::Valid(_))\n        },\n    };\n\n    const FULL_AUCTION: TestCase = TestCase {\n        mint_count: 0,\n        auction_count: MAX_AUCTION_COUNT,\n        install_upgrade_count: 0,\n        standard_count: 0,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n    };\n\n    const LESS_THAN_MAX_AUCTION: TestCase = TestCase {\n        auction_count: FULL_AUCTION.auction_count - 1,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n        ..FULL_AUCTION\n    };\n\n    const TOO_MANY_AUCTION: TestCase = TestCase {\n        auction_count: FULL_AUCTION.auction_count + 1,\n        state_validator: |(state, responder)| {\n            responder.is_some() && matches!(state, BlockValidationState::Invalid { .. })\n        },\n        ..FULL_AUCTION\n    };\n\n    const FULL_INSTALL_UPGRADE: TestCase = TestCase {\n        mint_count: 0,\n        auction_count: 0,\n        install_upgrade_count: MAX_INSTALL_UPGRADE_COUNT,\n        standard_count: 0,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n    };\n\n    #[allow(dead_code)]\n    const LESS_THAN_MAX_INSTALL_UPGRADE: TestCase = TestCase {\n        install_upgrade_count: FULL_INSTALL_UPGRADE.install_upgrade_count - 1,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n        ..FULL_INSTALL_UPGRADE\n    };\n\n    const TOO_MANY_INSTALL_UPGRADE: TestCase = TestCase {\n        install_upgrade_count: FULL_INSTALL_UPGRADE.install_upgrade_count + 1,\n        state_validator: |(state, responder)| {\n            responder.is_some() && matches!(state, BlockValidationState::Invalid { .. })\n        },\n        ..FULL_INSTALL_UPGRADE\n    };\n\n    const FULL_STANDARD: TestCase = TestCase {\n        mint_count: 0,\n        auction_count: 0,\n        install_upgrade_count: 0,\n        standard_count: MAX_LARGE_COUNT,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n    };\n\n    // const LESS_THAN_MAX_STANDARD: TestCase = TestCase {\n    //     standard_count: FULL_STANDARD.standard_count - 1,\n    //     state_validator: |(state, responder)| {\n    //         responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n    //     },\n    //     ..FULL_STANDARD\n    // };\n\n    const TOO_MANY_STANDARD: TestCase = TestCase {\n        standard_count: FULL_STANDARD.standard_count + 1,\n        state_validator: |(state, responder)| {\n            responder.is_some() && matches!(state, BlockValidationState::Invalid { .. })\n        },\n        ..FULL_STANDARD\n    };\n\n    const FULL_MINT: TestCase = TestCase {\n        mint_count: MAX_MINT_COUNT,\n        auction_count: 0,\n        install_upgrade_count: 0,\n        standard_count: 0,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n    };\n\n    const LESS_THAN_MAX_MINT: TestCase = TestCase {\n        mint_count: FULL_MINT.mint_count - 1,\n        state_validator: |(state, responder)| {\n            responder.is_none() && matches!(state, BlockValidationState::InProgress { .. })\n        },\n        ..FULL_MINT\n    };\n\n    const TOO_MANY_MINT: TestCase = TestCase {\n        mint_count: FULL_MINT.mint_count + 1,\n        state_validator: |(state, responder)| {\n            responder.is_some() && matches!(state, BlockValidationState::Invalid { .. })\n        },\n        ..FULL_MINT\n    };\n\n    fn run_test_case(\n        TestCase {\n            mint_count,\n            auction_count,\n            install_upgrade_count,\n            standard_count,\n            state_validator,\n        }: TestCase,\n        rng: &mut TestRng,\n    ) {\n        let mut fixture = Fixture::new(rng);\n        let state = fixture.new_state(\n            mint_count,\n            auction_count,\n            install_upgrade_count,\n            standard_count,\n        );\n        assert!(state_validator(state));\n    }\n\n    #[test]\n    fn new_state_should_be_valid_with_no_transactions() {\n        let mut rng = TestRng::new();\n        run_test_case(NO_TRANSACTIONS, &mut rng);\n    }\n\n    #[test]\n    fn new_state_should_respect_auction_limits() {\n        let mut rng = TestRng::new();\n        run_test_case(TOO_MANY_AUCTION, &mut rng);\n        run_test_case(FULL_AUCTION, &mut rng);\n        run_test_case(LESS_THAN_MAX_AUCTION, &mut rng);\n    }\n\n    #[test]\n    fn new_state_should_respect_install_upgrade_limits() {\n        let mut rng = TestRng::new();\n        run_test_case(TOO_MANY_INSTALL_UPGRADE, &mut rng);\n        run_test_case(FULL_INSTALL_UPGRADE, &mut rng);\n        //TODO: Fix test setup so this isn't identical to the no transactions case\n        //run_test_case(LESS_THAN_MAX_INSTALL_UPGRADE, &mut rng);\n    }\n\n    #[test]\n    fn new_state_should_respect_standard_limits() {\n        let mut rng = TestRng::new();\n        run_test_case(TOO_MANY_STANDARD, &mut rng);\n        run_test_case(FULL_STANDARD, &mut rng);\n        // NOTE: current prod chainspec has a limit of 1 large transaction, so one less is 0 which\n        // makes the test invalid run_test_case(LESS_THAN_MAX_STANDARD, &mut rng);\n    }\n\n    #[test]\n    fn new_state_should_respect_mint_limits() {\n        let mut rng = TestRng::new();\n        run_test_case(TOO_MANY_MINT, &mut rng);\n        run_test_case(FULL_MINT, &mut rng);\n        run_test_case(LESS_THAN_MAX_MINT, &mut rng);\n    }\n\n    #[test]\n    fn new_state_should_be_invalid_with_duplicated_transaction() {\n        let mut rng = TestRng::new();\n        let fixture = Fixture::new(&mut rng);\n\n        let timestamp = Timestamp::from(1000);\n        let mint = vec![new_mint(fixture.rng, timestamp, TimeDiff::from_millis(200)); 2];\n\n        let mint_for_block: Vec<(TransactionHash, BTreeSet<Approval>)> = mint\n            .iter()\n            .map(|transaction| (transaction.hash(), transaction.approvals()))\n            .collect();\n\n        let proposed_block = new_proposed_block(timestamp, mint_for_block, vec![], vec![], vec![]);\n\n        let (state, maybe_responder) = BlockValidationState::new(\n            &proposed_block,\n            HashSet::new(),\n            NodeId::random(fixture.rng),\n            new_responder(),\n            1u8,\n            &fixture.chainspec,\n        );\n\n        assert!(matches!(state, BlockValidationState::Invalid { .. }));\n        assert!(maybe_responder.is_some());\n    }\n\n    #[test]\n    fn new_state_should_be_in_progress_with_some_transactions() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n\n        // This test must generate number of transactions within the limits as per the chainspec.\n        let (transfer_count, auction_count, install_upgrade_count, standard_count) = loop {\n            let transfer_count = fixture.rng.gen_range(0..10);\n            let auction_count = fixture.rng.gen_range(0..20);\n            let install_upgrade_count = fixture.rng.gen_range(0..2);\n            let standard_count = fixture.rng.gen_range(0..2);\n            // Ensure at least one transaction is generated. Otherwise, the state will be Valid.\n            if transfer_count + auction_count + install_upgrade_count + standard_count > 0 {\n                break (\n                    transfer_count,\n                    auction_count,\n                    install_upgrade_count,\n                    standard_count,\n                );\n            }\n        };\n        let (state, maybe_responder) = fixture.new_state(\n            transfer_count,\n            auction_count,\n            install_upgrade_count,\n            standard_count,\n        );\n\n        match state {\n            BlockValidationState::InProgress {\n                missing_transactions,\n                holders,\n                responders,\n                ..\n            } => {\n                assert_eq!(\n                    missing_transactions.len() as u64,\n                    standard_count + transfer_count + install_upgrade_count + auction_count\n                );\n                assert_eq!(holders.len(), 1);\n                assert_eq!(holders.values().next().unwrap(), &HolderState::Unasked);\n                assert_eq!(responders.len(), 1);\n            }\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => {\n                panic!(\"unexpected state\")\n            }\n        }\n        assert!(maybe_responder.is_none());\n    }\n\n    #[test]\n    fn should_add_responder_if_in_progress() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n        assert_eq!(state.responder_count(), 1);\n\n        let add_responder_result = state.add_responder(new_responder());\n        assert!(matches!(add_responder_result, AddResponderResult::Added));\n        assert_eq!(state.responder_count(), 2);\n    }\n\n    #[test]\n    fn should_not_add_responder_if_valid() {\n        let mut state = BlockValidationState::Valid(Timestamp::from(1000));\n        let add_responder_result = state.add_responder(new_responder());\n        assert!(matches!(\n            add_responder_result,\n            AddResponderResult::ValidationCompleted {\n                response_to_send: Ok(()),\n                ..\n            }\n        ));\n        assert_eq!(state.responder_count(), 0);\n    }\n\n    #[test]\n    fn should_not_add_responder_if_invalid() {\n        let err = InvalidProposalError::InvalidTransaction(\n            \"should_not_add_responder_if_invalid\".to_string(),\n        );\n        let mut state = BlockValidationState::Invalid {\n            timestamp: Timestamp::from(1000),\n            error: Box::new(err),\n        };\n        let add_responder_result = state.add_responder(new_responder());\n        assert!(matches!(\n            add_responder_result,\n            AddResponderResult::ValidationCompleted {\n                response_to_send: Err(_err),\n                ..\n            }\n        ));\n        assert_eq!(state.responder_count(), 0);\n    }\n\n    #[test]\n    fn should_add_new_holder_if_in_progress() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n        assert_eq!(state.holders_mut().unwrap().len(), 1);\n\n        let new_holder = NodeId::random(fixture.rng);\n        state.add_holder(new_holder);\n        assert_eq!(state.holders_mut().unwrap().len(), 2);\n        assert_eq!(\n            state.holders_mut().unwrap().get(&new_holder),\n            Some(&HolderState::Unasked)\n        );\n    }\n\n    #[test]\n    fn should_not_change_holder_state() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n        let (holder, holder_state) = state\n            .holders_mut()\n            .expect(\"should have holders\")\n            .iter_mut()\n            .next()\n            .expect(\"should have one entry\");\n        *holder_state = HolderState::Asked;\n        let holder = *holder;\n\n        state.add_holder(holder);\n        assert_eq!(state.holders_mut().unwrap().len(), 1);\n        assert_eq!(\n            state.holders_mut().unwrap().get(&holder),\n            Some(&HolderState::Asked)\n        );\n    }\n\n    #[test]\n    fn should_start_fetching() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n        let (holder, holder_state) = state\n            .holders_mut()\n            .expect(\"should have holders\")\n            .iter_mut()\n            .next()\n            .expect(\"should have one entry\");\n        assert_eq!(*holder_state, HolderState::Unasked);\n        let original_holder = *holder;\n\n        // We currently have one unasked holder.  Add some failed holders - should still return\n        // `MaybeStartFetching::Start` containing the original holder.\n        for _ in 0..3 {\n            state\n                .holders_mut()\n                .unwrap()\n                .insert(NodeId::random(fixture.rng), HolderState::Failed);\n        }\n\n        let maybe_start_fetching = state.start_fetching();\n        match maybe_start_fetching {\n            MaybeStartFetching::Start {\n                holder,\n                missing_transactions,\n                ..\n            } => {\n                assert_eq!(holder, original_holder);\n                assert_eq!(missing_transactions.len(), 6);\n            }\n            _ => panic!(\"unexpected return value\"),\n        }\n\n        // The original holder should now be marked as `Asked`.\n        let holder_state = state.holders_mut().unwrap().get(&original_holder);\n        assert_eq!(holder_state, Some(&HolderState::Asked));\n    }\n\n    #[test]\n    fn start_fetching_should_return_ongoing_if_any_holder_in_asked_state() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n\n        // Change the current (only) holder's state to `Asked`.\n        let maybe_start_fetching = state.start_fetching();\n        assert!(matches!(\n            maybe_start_fetching,\n            MaybeStartFetching::Start { .. }\n        ));\n        let holder_state = state.holders_mut().unwrap().values().next();\n        assert_eq!(holder_state, Some(&HolderState::Asked));\n\n        // Add some unasked holders and some failed - should still return\n        // `MaybeStartFetching::Ongoing`.\n        let unasked_count = fixture.rng.gen_range(0..3);\n        for _ in 0..unasked_count {\n            state\n                .holders_mut()\n                .unwrap()\n                .insert(NodeId::random(fixture.rng), HolderState::Unasked);\n        }\n        let failed_count = fixture.rng.gen_range(0..3);\n        for _ in 0..failed_count {\n            state\n                .holders_mut()\n                .unwrap()\n                .insert(NodeId::random(fixture.rng), HolderState::Failed);\n        }\n\n        // Clone the holders collection before calling `start_fetching` as it should be unmodified\n        // by the call.\n        let holders_before = state.holders_mut().unwrap().clone();\n\n        // `start_fetching` should return `Ongoing` due to the single `Asked` holder.\n        let maybe_start_fetching = state.start_fetching();\n        assert_eq!(maybe_start_fetching, MaybeStartFetching::Ongoing);\n\n        // The holders should be unchanged.\n        assert_eq!(state.holders_mut().unwrap(), &holders_before);\n    }\n\n    #[test]\n    fn start_fetching_should_return_unable_if_all_holders_in_failed_state() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n\n        // Set the original holder's state to `Failed` and add some more failed.\n        *state\n            .holders_mut()\n            .expect(\"should have holders\")\n            .values_mut()\n            .next()\n            .expect(\"should have one entry\") = HolderState::Failed;\n\n        let failed_count = fixture.rng.gen_range(0..3);\n        for _ in 0..failed_count {\n            state\n                .holders_mut()\n                .unwrap()\n                .insert(NodeId::random(fixture.rng), HolderState::Failed);\n        }\n\n        // Clone the holders collection before calling `start_fetching` as it should be unmodified\n        // by the call.\n        let holders_before = state.holders_mut().unwrap().clone();\n\n        // `start_fetching` should return `Unable` due to no un-failed holders.\n        let maybe_start_fetching = state.start_fetching();\n        assert_eq!(maybe_start_fetching, MaybeStartFetching::Unable);\n\n        // The holders should be unchanged.\n        assert_eq!(state.holders_mut().unwrap(), &holders_before);\n    }\n\n    #[test]\n    fn start_fetching_should_return_validation_succeeded_if_valid() {\n        let mut state = BlockValidationState::Valid(Timestamp::from(1000));\n        let maybe_start_fetching = state.start_fetching();\n        assert_eq!(\n            maybe_start_fetching,\n            MaybeStartFetching::ValidationSucceeded\n        );\n    }\n\n    #[test]\n    fn start_fetching_should_return_validation_failed_if_invalid() {\n        let mut state = BlockValidationState::Invalid {\n            timestamp: Timestamp::from(1000),\n            error: Box::new(InvalidProposalError::InvalidTransaction(\n                \"start_fetching_should_return_validation_failed_if_invalid\".to_string(),\n            )),\n        };\n        let maybe_start_fetching = state.start_fetching();\n        assert_eq!(maybe_start_fetching, MaybeStartFetching::ValidationFailed);\n    }\n\n    #[test]\n    fn state_should_change_to_validation_succeeded() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new_with_block_gas_limit(&mut rng, 50_000_000_000_000);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n\n        // While there is still at least one missing transaction, `try_add_transaction_footprint`\n        // should keep the state `InProgress` and never return responders.\n        let mut footprints = fixture.footprints();\n        while footprints.len() > 1 {\n            let (transaction_hash, footprint) = footprints.pop().unwrap();\n            let responders = state.try_add_transaction_footprint(&transaction_hash, &footprint);\n            assert!(responders.is_empty());\n            assert!(matches!(\n                state,\n                BlockValidationState::InProgress { ref responders, .. }\n                if !responders.is_empty()\n            ));\n        }\n\n        // The final transaction should cause the state to go to `Valid` and the responders to be\n        // returned.\n        let (dt_hash, footprint) = footprints.pop().unwrap();\n        let responders = state.try_add_transaction_footprint(&dt_hash, &footprint);\n        assert_eq!(responders.len(), 1);\n        assert!(matches!(state, BlockValidationState::Valid(_)));\n    }\n\n    #[test]\n    fn unrelated_transaction_added_should_not_change_state() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1);\n        let (appendable_block_before, missing_transactions_before, holders_before) = match &state {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_transactions,\n                holders,\n                ..\n            } => (\n                appendable_block.clone(),\n                missing_transactions.clone(),\n                holders.clone(),\n            ),\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => {\n                panic!(\"unexpected state\")\n            }\n        };\n\n        // Create a new, random transaction.\n        let transaction = new_standard(fixture.rng, 1500.into(), TimeDiff::from_seconds(1));\n        let transaction_hash = match &transaction {\n            Transaction::Deploy(deploy) => TransactionHash::Deploy(*deploy.hash()),\n            Transaction::V1(v1) => TransactionHash::V1(*v1.hash()),\n        };\n        let chainspec = Chainspec::default();\n        let footprint = TransactionFootprint::new(&chainspec, &transaction).unwrap();\n\n        // Ensure trying to add it doesn't change the state.\n        let responders = state.try_add_transaction_footprint(&transaction_hash, &footprint);\n        assert!(responders.is_empty());\n        match &state {\n            BlockValidationState::InProgress {\n                appendable_block,\n                missing_transactions: missing_deploys,\n                holders,\n                ..\n            } => {\n                assert_eq!(&appendable_block_before, appendable_block);\n                assert_eq!(&missing_transactions_before, missing_deploys);\n                assert_eq!(&holders_before, holders);\n            }\n            BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => {\n                panic!(\"unexpected state\")\n            }\n        };\n    }\n\n    #[test]\n    fn state_should_change_to_validation_failed() {\n        let mut rng = TestRng::new();\n        let mut fixture = Fixture::new(&mut rng);\n        // Add an invalid (future-dated) transaction to the fixture.\n        let invalid_transaction =\n            new_standard(fixture.rng, Timestamp::MAX, TimeDiff::from_seconds(1));\n        let invalid_transaction_hash = invalid_transaction.hash();\n        fixture.transactions.push(invalid_transaction.clone());\n        let (mut state, _maybe_responder) = fixture.new_state(1, 1, 1, 1);\n        assert!(matches!(state, BlockValidationState::InProgress { .. }));\n        if let BlockValidationState::InProgress {\n            ref mut missing_transactions,\n            ..\n        } = state\n        {\n            let approvals = invalid_transaction.approvals();\n            let approvals_hash =\n                ApprovalsHash::compute(&approvals).expect(\"must get approvals hash\");\n            let info = ApprovalInfo::new(approvals, approvals_hash);\n            missing_transactions.insert(invalid_transaction_hash, info);\n        };\n\n        // Add some valid deploys, should keep the state `InProgress` and never return responders.\n        let mut footprints = fixture.footprints();\n        while footprints.len() > 3 {\n            let (dt_hash, footprint) = footprints.pop().unwrap();\n            if dt_hash == invalid_transaction_hash {\n                continue;\n            }\n            let responders = state.try_add_transaction_footprint(&dt_hash, &footprint);\n            assert!(responders.is_empty());\n        }\n\n        let transaction_hash = invalid_transaction.hash();\n        // The invalid transaction should cause the state to go to `Invalid` and the responders to\n        // be returned.\n        let chainspec = Chainspec::default();\n        let footprint = TransactionFootprint::new(&chainspec, &invalid_transaction).unwrap();\n        let responders = state.try_add_transaction_footprint(&transaction_hash, &footprint);\n        assert_eq!(responders.len(), 1);\n        assert!(matches!(state, BlockValidationState::Invalid { .. }));\n    }\n}\n"
  },
  {
    "path": "node/src/components/block_validator/tests.rs",
    "content": "use std::{collections::VecDeque, sync::Arc, time::Duration};\n\nuse derive_more::From;\nuse itertools::Itertools;\nuse rand::Rng;\n\nuse casper_types::{\n    bytesrepr::Bytes, runtime_args, system::standard_payment::ARG_AMOUNT, testing::TestRng, Block,\n    BlockSignatures, BlockSignaturesV2, Chainspec, ChainspecRawBytes, Deploy, ExecutableDeployItem,\n    FinalitySignatureV2, RuntimeArgs, SecretKey, TestBlockBuilder, TimeDiff, Transaction,\n    TransactionHash, TransactionId, TransactionV1, TransactionV1Config, AUCTION_LANE_ID,\n    INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512,\n};\n\nuse crate::{\n    components::{\n        consensus::BlockContext,\n        fetcher::{self, FetchItem},\n    },\n    effect::requests::StorageRequest,\n    reactor::{EventQueueHandle, QueueKind, Scheduler},\n    testing::LARGE_WASM_LANE_ID,\n    types::{BlockPayload, ValidatorMatrix},\n    utils::{self, Loadable},\n};\n\nuse super::*;\n\n#[derive(Debug, From)]\nenum ReactorEvent {\n    #[from]\n    BlockValidator(Event),\n    #[from]\n    TransactionFetcher(FetcherRequest<Transaction>),\n    #[from]\n    FinalitySigFetcher(FetcherRequest<FinalitySignature>),\n    #[from]\n    Storage(StorageRequest),\n    #[from]\n    FatalAnnouncement(#[allow(dead_code)] FatalAnnouncement),\n}\n\nimpl From<BlockValidationRequest> for ReactorEvent {\n    fn from(req: BlockValidationRequest) -> ReactorEvent {\n        ReactorEvent::BlockValidator(req.into())\n    }\n}\n\nstruct MockReactor {\n    scheduler: &'static Scheduler<ReactorEvent>,\n    validator_matrix: ValidatorMatrix,\n}\n\nimpl MockReactor {\n    fn new<I: IntoIterator<Item = PublicKey>>(\n        our_secret_key: Arc<SecretKey>,\n        public_keys: I,\n    ) -> Self {\n        MockReactor {\n            scheduler: utils::leak(Scheduler::new(QueueKind::weights(), None)),\n            validator_matrix: ValidatorMatrix::new_with_validators(our_secret_key, public_keys),\n        }\n    }\n\n    async fn expect_block_validator_event(&self) -> Event {\n        let ((_ancestor, reactor_event), _) = self.scheduler.pop().await;\n        if let ReactorEvent::BlockValidator(event) = reactor_event {\n            event\n        } else {\n            panic!(\"unexpected event: {:?}\", reactor_event);\n        }\n    }\n\n    async fn handle_requests(&self, context: &ValidationContext) {\n        while let Ok(((_ancestor, event), _)) =\n            tokio::time::timeout(Duration::from_millis(100), self.scheduler.pop()).await\n        {\n            match event {\n                ReactorEvent::TransactionFetcher(FetcherRequest {\n                    id,\n                    peer,\n                    validation_metadata: _,\n                    responder,\n                }) => {\n                    if let Some(transaction) = context.get_transaction(id) {\n                        let response = FetchedData::FromPeer {\n                            item: Box::new(transaction),\n                            peer,\n                        };\n                        responder.respond(Ok(response)).await;\n                    } else {\n                        responder\n                            .respond(Err(fetcher::Error::Absent {\n                                id: Box::new(id),\n                                peer,\n                            }))\n                            .await;\n                    }\n                }\n                ReactorEvent::Storage(StorageRequest::GetBlockAndMetadataByHeight {\n                    block_height,\n                    only_from_available_block_range: _,\n                    responder,\n                }) => {\n                    let maybe_block = context.get_block_with_metadata(block_height);\n                    responder.respond(maybe_block).await;\n                }\n                ReactorEvent::FinalitySigFetcher(FetcherRequest {\n                    id,\n                    peer,\n                    validation_metadata: _,\n                    responder,\n                }) => {\n                    if let Some(signature) = context.get_signature(&id) {\n                        let response = FetchedData::FromPeer {\n                            item: Box::new(signature),\n                            peer,\n                        };\n                        responder.respond(Ok(response)).await;\n                    } else {\n                        responder\n                            .respond(Err(fetcher::Error::Absent {\n                                id: Box::new(id),\n                                peer,\n                            }))\n                            .await;\n                    }\n                }\n                reactor_event => {\n                    panic!(\"unexpected event: {:?}\", reactor_event);\n                }\n            };\n        }\n    }\n}\n\npub(super) fn new_proposed_block_with_cited_signatures(\n    timestamp: Timestamp,\n    transfer: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    staking: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    install_upgrade: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    standard: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    cited_signatures: RewardedSignatures,\n) -> ProposedBlock<ClContext> {\n    // Accusations and ancestors are empty, and the random bit is always true:\n    // These values are not checked by the block validator.\n    let block_context = BlockContext::new(timestamp, vec![]);\n    let transactions = {\n        let mut ret = BTreeMap::new();\n        ret.insert(MINT_LANE_ID, transfer.into_iter().collect());\n        ret.insert(AUCTION_LANE_ID, staking.into_iter().collect());\n        ret.insert(\n            INSTALL_UPGRADE_LANE_ID,\n            install_upgrade.into_iter().collect(),\n        );\n        ret.insert(LARGE_WASM_LANE_ID, standard.into_iter().collect());\n        ret\n    };\n    let block_payload = BlockPayload::new(transactions, vec![], cited_signatures, true, 1u8);\n    ProposedBlock::new(Arc::new(block_payload), block_context)\n}\n\npub(super) fn new_proposed_block(\n    timestamp: Timestamp,\n    transfer: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    staking: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    install_upgrade: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    standard: Vec<(TransactionHash, BTreeSet<Approval>)>,\n) -> ProposedBlock<ClContext> {\n    new_proposed_block_with_cited_signatures(\n        timestamp,\n        transfer,\n        staking,\n        install_upgrade,\n        standard,\n        Default::default(),\n    )\n}\n\npub(super) fn new_v1_standard(\n    rng: &mut TestRng,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n) -> Transaction {\n    let transaction_v1 = TransactionV1::random_wasm(rng, Some(timestamp), Some(ttl));\n    Transaction::V1(transaction_v1)\n}\n\npub(super) fn new_auction(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction {\n    let transaction_v1 = TransactionV1::random_auction(rng, Some(timestamp), Some(ttl));\n    Transaction::V1(transaction_v1)\n}\n\npub(super) fn new_install_upgrade(\n    rng: &mut TestRng,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n) -> Transaction {\n    TransactionV1::random_install_upgrade(rng, Some(timestamp), Some(ttl)).into()\n}\n\npub(super) fn new_deploy(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction {\n    let secret_key = SecretKey::random(rng);\n    let chain_name = \"chain\".to_string();\n    let payment = ExecutableDeployItem::ModuleBytes {\n        module_bytes: Bytes::new(),\n        args: runtime_args! { ARG_AMOUNT => U512::from(1) },\n    };\n    let session = ExecutableDeployItem::ModuleBytes {\n        module_bytes: Bytes::new(),\n        args: RuntimeArgs::new(),\n    };\n    let dependencies = vec![];\n    let gas_price = 1;\n\n    Deploy::new_signed(\n        timestamp,\n        ttl,\n        gas_price,\n        dependencies,\n        chain_name,\n        payment,\n        session,\n        &secret_key,\n        None,\n    )\n    .into()\n}\n\npub(super) fn new_v1_transfer(\n    rng: &mut TestRng,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n) -> Transaction {\n    TransactionV1::random_transfer(rng, Some(timestamp), Some(ttl)).into()\n}\n\npub(super) fn new_transfer(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction {\n    let secret_key = SecretKey::random(rng);\n    let chain_name = \"chain\".to_string();\n    let payment = ExecutableDeployItem::ModuleBytes {\n        module_bytes: Bytes::new(),\n        args: runtime_args! { ARG_AMOUNT => U512::from(1) },\n    };\n    let session = ExecutableDeployItem::Transfer {\n        args: RuntimeArgs::new(),\n    };\n    let dependencies = vec![];\n    let gas_price = 1;\n\n    Deploy::new_signed(\n        timestamp,\n        ttl,\n        gas_price,\n        dependencies,\n        chain_name,\n        payment,\n        session,\n        &secret_key,\n        None,\n    )\n    .into()\n}\n\npub(super) fn new_mint(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction {\n    if rng.gen() {\n        new_v1_transfer(rng, timestamp, ttl)\n    } else {\n        new_transfer(rng, timestamp, ttl)\n    }\n}\n\npub(super) fn new_standard(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction {\n    if rng.gen() {\n        new_v1_standard(rng, timestamp, ttl)\n    } else {\n        new_deploy(rng, timestamp, ttl)\n    }\n}\n\npub(super) fn new_non_transfer(\n    rng: &mut TestRng,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n) -> Transaction {\n    match rng.gen_range(0..3) {\n        0 => new_standard(rng, timestamp, ttl),\n        1 => new_install_upgrade(rng, timestamp, ttl),\n        2 => new_auction(rng, timestamp, ttl),\n        _ => unreachable!(),\n    }\n}\n\ntype SecretKeys = BTreeMap<PublicKey, Arc<SecretKey>>;\n\nstruct ValidationContext {\n    chainspec: Chainspec,\n    // Validators\n    secret_keys: SecretKeys,\n    // map of height → block\n    past_blocks: HashMap<u64, Block>,\n    // blocks that will be \"stored\" during validation\n    delayed_blocks: HashMap<u64, Block>,\n    transactions: HashMap<TransactionId, Transaction>,\n    transfers: HashMap<TransactionId, Transaction>,\n    // map of block height → signatures for the block\n    signatures: HashMap<u64, HashMap<PublicKey, FinalitySignatureV2>>,\n    // map of signatures that aren't stored, but are fetchable\n    fetchable_signatures: HashMap<FinalitySignatureId, FinalitySignature>,\n\n    // fields defining the proposed block that will be validated\n    transactions_to_include: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    transfers_to_include: Vec<(TransactionHash, BTreeSet<Approval>)>,\n    signatures_to_include: HashMap<u64, BTreeSet<PublicKey>>,\n    proposed_block_height: Option<u64>,\n}\n\nimpl ValidationContext {\n    fn new() -> Self {\n        let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n        Self {\n            chainspec,\n            secret_keys: BTreeMap::new(),\n            past_blocks: HashMap::new(),\n            delayed_blocks: HashMap::new(),\n            transactions: HashMap::new(),\n            transfers: HashMap::new(),\n            fetchable_signatures: HashMap::new(),\n            signatures: HashMap::new(),\n            transactions_to_include: vec![],\n            transfers_to_include: vec![],\n            signatures_to_include: HashMap::new(),\n            proposed_block_height: None,\n        }\n    }\n\n    fn with_num_validators(mut self, rng: &mut TestRng, num_validators: usize) -> Self {\n        for _ in 0..num_validators {\n            let validator_key = Arc::new(SecretKey::random(rng));\n            self.secret_keys\n                .insert(PublicKey::from(&*validator_key), validator_key.clone());\n        }\n        self\n    }\n\n    fn with_count_limits(\n        mut self,\n        mint_count: Option<u64>,\n        auction: Option<u64>,\n        install: Option<u64>,\n        large_limit: Option<u64>,\n    ) -> Self {\n        let transaction_v1_config = TransactionV1Config::default().with_count_limits(\n            mint_count,\n            auction,\n            install,\n            large_limit,\n        );\n        self.chainspec.transaction_config.transaction_v1_config = transaction_v1_config;\n        self\n    }\n\n    fn with_block_gas_limit(mut self, block_gas_limit: u64) -> Self {\n        self.chainspec.transaction_config.block_gas_limit = block_gas_limit;\n        self\n    }\n\n    fn get_validators(&self) -> Vec<PublicKey> {\n        self.secret_keys.keys().cloned().collect()\n    }\n\n    fn with_past_blocks(\n        mut self,\n        rng: &mut TestRng,\n        min_height: u64,\n        max_height: u64,\n        era: EraId,\n    ) -> Self {\n        self.past_blocks\n            .extend((min_height..=max_height).map(|height| {\n                let block = TestBlockBuilder::new().height(height).era(era).build(rng);\n                (height, block.into())\n            }));\n        self.proposed_block_height = self\n            .proposed_block_height\n            .map(|height| height.max(max_height + 1))\n            .or(Some(max_height + 1));\n        self\n    }\n\n    fn with_delayed_blocks(\n        mut self,\n        rng: &mut TestRng,\n        min_height: u64,\n        max_height: u64,\n        era: EraId,\n    ) -> Self {\n        self.delayed_blocks\n            .extend((min_height..=max_height).map(|height| {\n                let block = TestBlockBuilder::new().height(height).era(era).build(rng);\n                (height, block.into())\n            }));\n        self.proposed_block_height = self\n            .proposed_block_height\n            .map(|height| height.max(max_height + 1))\n            .or(Some(max_height + 1));\n        self\n    }\n\n    fn get_delayed_blocks(&mut self) -> Vec<u64> {\n        let heights = self.delayed_blocks.keys().cloned().collect();\n        self.past_blocks\n            .extend(std::mem::take(&mut self.delayed_blocks));\n        heights\n    }\n\n    fn with_signatures_for_block<'a, I: IntoIterator<Item = &'a PublicKey>>(\n        mut self,\n        min_height: u64,\n        max_height: u64,\n        validators: I,\n    ) -> Self {\n        for validator in validators {\n            for height in min_height..=max_height {\n                let block = self\n                    .past_blocks\n                    .get(&height)\n                    .or_else(|| self.delayed_blocks.get(&height))\n                    .expect(\"should have block\");\n                let secret_key = self\n                    .secret_keys\n                    .get(validator)\n                    .expect(\"should have validator\");\n                let signature = FinalitySignatureV2::create(\n                    *block.hash(),\n                    block.height(),\n                    block.era_id(),\n                    self.chainspec.name_hash(),\n                    secret_key,\n                );\n                self.signatures\n                    .entry(height)\n                    .or_default()\n                    .insert(validator.clone(), signature);\n            }\n        }\n        self\n    }\n\n    fn with_fetchable_signatures<'a, I: IntoIterator<Item = &'a PublicKey>>(\n        mut self,\n        min_height: u64,\n        max_height: u64,\n        validators: I,\n    ) -> Self {\n        for validator in validators {\n            for height in min_height..=max_height {\n                let block = self.past_blocks.get(&height).expect(\"should have block\");\n                let secret_key = self\n                    .secret_keys\n                    .get(validator)\n                    .expect(\"should have validator\");\n                let signature = FinalitySignature::V2(FinalitySignatureV2::create(\n                    *block.hash(),\n                    block.height(),\n                    block.era_id(),\n                    self.chainspec.name_hash(),\n                    secret_key,\n                ));\n                self.fetchable_signatures\n                    .insert(*signature.fetch_id(), signature);\n            }\n        }\n        self\n    }\n\n    fn include_signatures<'a, I: IntoIterator<Item = &'a PublicKey>>(\n        mut self,\n        min_height: u64,\n        max_height: u64,\n        validators: I,\n    ) -> Self {\n        for validator in validators {\n            for height in min_height..=max_height {\n                self.signatures_to_include\n                    .entry(height)\n                    .or_default()\n                    .insert(validator.clone());\n            }\n        }\n        self\n    }\n\n    fn with_transactions(mut self, transactions: Vec<Transaction>) -> Self {\n        self.transactions.extend(\n            transactions\n                .into_iter()\n                .map(|transaction| (transaction.clone().fetch_id(), transaction)),\n        );\n        self\n    }\n\n    fn with_transfers(mut self, transfers: Vec<Transaction>) -> Self {\n        self.transfers.extend(\n            transfers\n                .into_iter()\n                .map(|transaction| (transaction.clone().fetch_id(), transaction)),\n        );\n        self\n    }\n\n    fn include_all_transactions(mut self) -> Self {\n        self.transactions_to_include.extend(\n            self.transactions\n                .values()\n                .map(|transaction| (transaction.hash(), transaction.approvals())),\n        );\n        self\n    }\n\n    fn include_all_transfers(mut self) -> Self {\n        self.transfers_to_include.extend(\n            self.transfers\n                .values()\n                .map(|transaction| (transaction.hash(), transaction.approvals())),\n        );\n        self\n    }\n\n    fn include_transactions<I: IntoIterator<Item = (TransactionHash, BTreeSet<Approval>)>>(\n        mut self,\n        transactions: I,\n    ) -> Self {\n        self.transactions_to_include.extend(transactions);\n        self\n    }\n\n    fn include_transfers<I: IntoIterator<Item = (TransactionHash, BTreeSet<Approval>)>>(\n        mut self,\n        transfers: I,\n    ) -> Self {\n        self.transfers_to_include.extend(transfers);\n        self\n    }\n\n    fn get_transaction(&self, id: TransactionId) -> Option<Transaction> {\n        self.transactions\n            .get(&id)\n            .cloned()\n            .or_else(|| self.transfers.get(&id).cloned())\n    }\n\n    fn get_signature(&self, id: &FinalitySignatureId) -> Option<FinalitySignature> {\n        self.fetchable_signatures.get(id).cloned()\n    }\n\n    fn get_block_with_metadata(&self, block_height: u64) -> Option<BlockWithMetadata> {\n        self.past_blocks.get(&block_height).map(|block| {\n            let empty_hashmap = HashMap::new();\n            let signatures = self.signatures.get(&block_height).unwrap_or(&empty_hashmap);\n            let mut block_signatures = BlockSignaturesV2::new(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                self.chainspec.name_hash(),\n            );\n            for signature in signatures.values() {\n                block_signatures\n                    .insert_signature(signature.public_key().clone(), *signature.signature());\n            }\n            BlockWithMetadata {\n                block: block.clone(),\n                block_signatures: BlockSignatures::V2(block_signatures),\n            }\n        })\n    }\n\n    fn proposed_block(&self, timestamp: Timestamp) -> ProposedBlock<ClContext> {\n        let rewards_window = self.chainspec.core_config.signature_rewards_max_delay;\n        let rewarded_signatures = self\n            .proposed_block_height\n            .map(|proposed_block_height| {\n                RewardedSignatures::new(\n                    (1..=rewards_window)\n                        .filter_map(|height_diff| proposed_block_height.checked_sub(height_diff))\n                        .map(|height| {\n                            let signing_validators = self\n                                .signatures_to_include\n                                .get(&height)\n                                .cloned()\n                                .unwrap_or_default();\n                            SingleBlockRewardedSignatures::from_validator_set(\n                                &signing_validators,\n                                self.secret_keys.keys(),\n                            )\n                        }),\n                )\n            })\n            .unwrap_or_default();\n        new_proposed_block_with_cited_signatures(\n            timestamp,\n            self.transfers_to_include.to_vec(),\n            vec![],\n            vec![],\n            self.transactions_to_include.to_vec(),\n            rewarded_signatures,\n        )\n    }\n\n    async fn proposal_is_valid(&mut self, rng: &mut TestRng, timestamp: Timestamp) -> bool {\n        self.validate_proposed_block(rng, timestamp).await.is_ok()\n    }\n\n    /// Validates a block using a `BlockValidator` component, and returns the result.\n    async fn validate_proposed_block(\n        &mut self,\n        rng: &mut TestRng,\n        timestamp: Timestamp,\n    ) -> Result<(), Box<InvalidProposalError>> {\n        let proposed_block = self.proposed_block(timestamp);\n\n        // Create the reactor and component.\n        let our_secret_key = self\n            .secret_keys\n            .values()\n            .next()\n            .expect(\"should have a secret key\")\n            .clone();\n        let reactor = MockReactor::new(our_secret_key, self.secret_keys.keys().cloned());\n        let effect_builder =\n            EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler));\n        let mut block_validator = BlockValidator::new(\n            Arc::new(self.chainspec.clone()),\n            reactor.validator_matrix.clone(),\n            Config::default(),\n            1u8,\n        );\n\n        // Pass the block to the component. This future will eventually resolve to the result, i.e.\n        // whether the block is valid or not.\n        let bob_node_id = NodeId::random(rng);\n        let block_height = rng.gen_range(0..1000);\n        let validation_result = tokio::spawn(effect_builder.validate_block(\n            bob_node_id,\n            self.proposed_block_height.unwrap_or(block_height),\n            proposed_block.clone(),\n        ));\n        let event = reactor.expect_block_validator_event().await;\n        let effects = block_validator.handle_event(effect_builder, rng, event);\n\n        // If validity could already be determined, the effect will be the validation response.\n        if !block_validator.validation_states.is_empty()\n            && block_validator\n                .validation_states\n                .values()\n                .all(BlockValidationState::completed)\n        {\n            assert_eq!(1, effects.len());\n            for effect in effects {\n                tokio::spawn(effect).await.unwrap(); // Response.\n            }\n            return validation_result.await.unwrap();\n        }\n\n        // Otherwise the effects are either requests to fetch the block's transactions, or to fetch\n        // past blocks for the purpose of signature validation.\n        let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect();\n\n        // We make our mock reactor answer with the expected blocks and/or transactions and\n        // transfers:\n        reactor.handle_requests(self).await;\n\n        // At this point we either responded with requested transactions, or the past blocks. This\n        // should generate other events (`GotPastBlocksWithMetadata` in the case of past blocks, or\n        // a bunch of `TransactionFetched` in the case of transactions). We have to handle them.\n        let mut effects = Effects::new();\n        for future in event_futures {\n            let events = future.await.unwrap();\n            effects.extend(\n                events\n                    .into_iter()\n                    .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)),\n            );\n        }\n\n        // If there are no effects - some blocks have been missing from storage. Announce the\n        // finalization of the blocks we have in the context.\n        if effects.is_empty() {\n            for block_height in self.get_delayed_blocks() {\n                effects.extend(block_validator.handle_event(\n                    effect_builder,\n                    rng,\n                    Event::BlockStored(block_height),\n                ));\n            }\n        }\n\n        // If there are still no effects, something went wrong.\n        assert!(!effects.is_empty());\n\n        // If there were no signatures in the block, the validity of the block should be determined\n        // at this point. In such a case, return the result.\n        if !block_validator.validation_states.is_empty()\n            && block_validator\n                .validation_states\n                .values()\n                .all(BlockValidationState::completed)\n        {\n            assert_eq!(1, effects.len());\n            for effect in effects {\n                tokio::spawn(effect).await.unwrap();\n            }\n            return validation_result.await.unwrap();\n        }\n\n        // Otherwise, we have more effects to handle. After the blocks have been returned, the\n        // validator should now ask for the transactions and signatures.\n        // If some blocks have been delayed, this can be another request for past blocks.\n        // Let's handle those requests.\n        let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect();\n\n        // We make our mock reactor answer with the expected items.\n        reactor.handle_requests(self).await;\n\n        // Again, we'll have a bunch of events to handle, so we handle them.\n        let mut effects = Effects::new();\n        for future in event_futures {\n            let events = future.await.unwrap();\n            effects.extend(\n                events\n                    .into_iter()\n                    .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)),\n            );\n        }\n\n        // If there are no effects at this point, something went wrong.\n        assert!(!effects.is_empty());\n\n        // If no blocks were delayed, we just returned all the fetched items, so now the validity\n        // should have been resolved. Return the result if it is so.\n        if !block_validator.validation_states.is_empty()\n            && block_validator\n                .validation_states\n                .values()\n                .all(BlockValidationState::completed)\n        {\n            assert_eq!(1, effects.len());\n            for effect in effects {\n                tokio::spawn(effect).await.unwrap();\n            }\n            return validation_result.await.unwrap();\n        }\n\n        // Otherwise, we have more effects to handle. At this point, all the delayed blocks should\n        // have been stored and returned, so we just have a bunch of fetch requests to handle.\n        let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect();\n\n        // We make our mock reactor answer with the expected items.\n        reactor.handle_requests(self).await;\n\n        // Again, we'll have a bunch of events to handle. At this point we should have a bunch of\n        // `TransactionFetched` or `FinalitySignatureFetched` events. We handle them.\n        let mut effects = Effects::new();\n        for future in event_futures {\n            let events = future.await.unwrap();\n            effects.extend(\n                events\n                    .into_iter()\n                    .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)),\n            );\n        }\n\n        // Nothing more should be requested, so we expect at most one effect: the validation\n        // response. Zero effects is possible if block validator responded with false before, but\n        // hasn't marked the state invalid (it can happen when peers are exhausted). In any case,\n        // the result should be resolved now.\n        assert!(effects.len() < 2);\n        for effect in effects {\n            tokio::spawn(effect).await.unwrap(); // Response.\n        }\n        validation_result.await.unwrap()\n    }\n}\n\n/// Verifies that a block without any transactions or transfers is valid.\n#[tokio::test]\nasync fn empty_block() {\n    let mut rng = TestRng::new();\n    let mut empty_context = ValidationContext::new().with_num_validators(&mut rng, 1);\n    assert!(empty_context.proposal_is_valid(&mut rng, 1000.into()).await);\n}\n\n/// Verifies that the block validator checks transaction and transfer timestamps and ttl.\n#[tokio::test]\nasync fn ttl() {\n    // The ttl is 200 ms, and our transactions and transfers have timestamps 900 and 1000. So the\n    // block timestamp must be at least 1000 and at most 1100.\n    let mut rng = TestRng::new();\n    let ttl = TimeDiff::from_millis(200);\n    let transactions = vec![\n        new_non_transfer(&mut rng, 1000.into(), ttl),\n        new_non_transfer(&mut rng, 900.into(), ttl),\n    ];\n    let transfers: Vec<Transaction> = vec![\n        new_v1_transfer(&mut rng, 1000.into(), ttl),\n        new_v1_transfer(&mut rng, 900.into(), ttl),\n    ];\n\n    let mut transactions_context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions.clone())\n        .with_count_limits(Some(3000), Some(3000), Some(3000), Some(3000))\n        .with_block_gas_limit(15_300_000_000_000)\n        .include_all_transactions();\n    let mut transfers_context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transfers(transfers.clone())\n        .with_count_limits(Some(3000), Some(3000), Some(3000), Some(3000))\n        .with_block_gas_limit(15_300_000_000_000)\n        .include_all_transfers();\n    let mut both_context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .with_count_limits(Some(3000), Some(3000), Some(3000), Some(3000))\n        .with_block_gas_limit(15_300_000_000_000)\n        .include_all_transactions()\n        .include_all_transfers();\n\n    // Both 1000 and 1100 are timestamps compatible with the transactions and transfers.\n    assert!(both_context.proposal_is_valid(&mut rng, 1000.into()).await);\n    assert!(both_context.proposal_is_valid(&mut rng, 1100.into()).await);\n\n    // A block with timestamp 999 can't contain a transfer or transactions with timestamp 1000.\n    assert!(\n        !transactions_context\n            .proposal_is_valid(&mut rng, 999.into())\n            .await\n    );\n    assert!(\n        !transfers_context\n            .proposal_is_valid(&mut rng, 999.into())\n            .await\n    );\n    assert!(!both_context.proposal_is_valid(&mut rng, 999.into()).await);\n\n    // At time 1101, the transactions and transfer from time 900 have expired.\n    assert!(\n        !transactions_context\n            .proposal_is_valid(&mut rng, 1101.into())\n            .await\n    );\n    assert!(\n        !transfers_context\n            .proposal_is_valid(&mut rng, 1101.into())\n            .await\n    );\n    assert!(!both_context.proposal_is_valid(&mut rng, 1101.into()).await);\n}\n\n/// Verifies that a block is invalid if it contains a transfer in the transactions section\n/// or vice versa.\n#[tokio::test]\nasync fn transfer_transaction_mixup_and_replay() {\n    let mut rng = TestRng::new();\n    let ttl = TimeDiff::from_millis(200);\n    let timestamp = Timestamp::from(1000);\n    let deploy = new_deploy(&mut rng, timestamp, ttl);\n    let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl);\n    let transfer_orig = new_transfer(&mut rng, timestamp, ttl);\n    let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl);\n\n    // First we make sure that our transfers and transactions would normally be valid.\n    let transactions = vec![transaction_v1.clone()];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers();\n    assert!(context.proposal_is_valid(&mut rng, timestamp).await);\n\n    // Now we test for different invalid combinations of transactions and transfers:\n    // 1. Original style transfer in the transactions section.\n    let transactions = vec![\n        transfer_orig.clone(),\n        transaction_v1.clone(),\n        deploy.clone(),\n    ];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers();\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n    // 2. V1 transfer in the transactions section.\n    let transactions = vec![transfer_v1.clone(), transaction_v1.clone(), deploy.clone()];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers();\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n    // 3. Legacy deploy in the transfers section.\n    let transactions = vec![transaction_v1.clone(), deploy.clone()];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone(), deploy.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers();\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n    // 4. V1 transaction in the transfers section.\n    let transactions = vec![transaction_v1.clone(), deploy.clone()];\n    let transfers = vec![\n        transfer_orig.clone(),\n        transfer_v1.clone(),\n        transaction_v1.clone(),\n    ];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers();\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n\n    // Each transaction must be unique\n    let transactions = vec![deploy.clone(), transaction_v1.clone()];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers()\n        .include_transactions(vec![(deploy.hash(), deploy.approvals())]);\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n    let transactions = vec![deploy.clone(), transaction_v1.clone()];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers()\n        .include_transactions(vec![(transaction_v1.hash(), transaction_v1.approvals())]);\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n\n    // And each transfer must be unique, too.\n    let transactions = vec![deploy.clone(), transaction_v1.clone()];\n    let transfers = vec![transfer_v1.clone(), transfer_orig.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers()\n        .include_transfers(vec![(transfer_v1.hash(), transfer_v1.approvals())]);\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n    let transactions = vec![deploy.clone(), transaction_v1.clone()];\n    let transfers = vec![transfer_orig.clone(), transfer_v1.clone()];\n    let mut context = ValidationContext::new()\n        .with_num_validators(&mut rng, 1)\n        .with_transactions(transactions)\n        .with_transfers(transfers)\n        .include_all_transactions()\n        .include_all_transfers()\n        .include_transactions(vec![(transfer_orig.hash(), transfer_orig.approvals())]);\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n}\n\n/// Verifies that the block validator fetches from multiple peers.\n#[tokio::test]\nasync fn should_fetch_from_multiple_peers() {\n    let _ = crate::logging::init();\n    tokio::time::timeout(Duration::from_secs(5), async move {\n        let peer_count = 3;\n        let mut rng = TestRng::new();\n        let ttl = TimeDiff::from_seconds(200);\n        let transactions = (0..peer_count)\n            .map(|i| new_non_transfer(&mut rng, (900 + i).into(), ttl))\n            .collect_vec();\n        let transfers = (0..peer_count)\n            .map(|i| new_v1_transfer(&mut rng, (1000 + i).into(), ttl))\n            .collect_vec();\n\n        // Assemble the block to be validated.\n        let transfers_for_block = transfers\n            .iter()\n            .map(|transfer| (transfer.hash(), transfer.approvals()))\n            .collect_vec();\n        let standard_for_block = transactions\n            .iter()\n            .map(|transaction| (transaction.hash(), transaction.approvals()))\n            .collect_vec();\n        let proposed_block = new_proposed_block(\n            1100.into(),\n            transfers_for_block,\n            vec![],\n            vec![],\n            standard_for_block,\n        );\n\n        // Create the reactor and component.\n        let secret_key = Arc::new(SecretKey::random(&mut rng));\n        let public_key = PublicKey::from(&*secret_key);\n        let reactor = MockReactor::new(secret_key, vec![public_key]);\n        let effect_builder =\n            EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler));\n        let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n\n        chainspec.transaction_config.block_gas_limit = 100_000_000_000_000;\n        let transaction_v1_config = TransactionV1Config::default().with_count_limits(\n            Some(3000),\n            Some(3000),\n            Some(3000),\n            Some(3000),\n        );\n        chainspec.transaction_config.transaction_v1_config = transaction_v1_config;\n\n        let mut block_validator = BlockValidator::new(\n            Arc::new(chainspec),\n            reactor.validator_matrix.clone(),\n            Config::default(),\n            1u8,\n        );\n\n        // Have a validation request for each one of the peers. These futures will eventually all\n        // resolve to the same result, i.e. whether the block is valid or not.\n        let validation_results = (0..peer_count)\n            .map(|_| {\n                let node_id = NodeId::random(&mut rng);\n                let block_height = rng.gen_range(0..1000);\n                tokio::spawn(effect_builder.validate_block(\n                    node_id,\n                    block_height,\n                    proposed_block.clone(),\n                ))\n            })\n            .collect_vec();\n\n        let mut fetch_effects = VecDeque::new();\n        for index in 0..peer_count {\n            let event = reactor.expect_block_validator_event().await;\n            let effects = block_validator.handle_event(effect_builder, &mut rng, event);\n            if index == 0 {\n                assert_eq!(effects.len(), 6);\n                fetch_effects.extend(effects);\n            } else {\n                assert!(effects.is_empty());\n            }\n        }\n\n        // The effects are requests to fetch the block's transactions.  There are six fetch\n        // requests, all using the first peer.\n        let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec();\n\n        // Provide the first deploy and transfer on first asking.\n        let context = ValidationContext::new()\n            .with_num_validators(&mut rng, 1)\n            .with_transactions(vec![transactions[0].clone()])\n            .with_transfers(vec![transfers[0].clone()]);\n        reactor.handle_requests(&context).await;\n\n        let mut missing = vec![];\n        for fetch_result in fetch_results {\n            let mut events = fetch_result.await.unwrap();\n            assert_eq!(1, events.len());\n            // The event should be `TransactionFetched`.\n            let event = events.pop().unwrap();\n            // New fetch requests will be made using a different peer for all transactions not\n            // already registered as fetched.\n            let effects = block_validator.handle_event(effect_builder, &mut rng, event);\n            if !effects.is_empty() {\n                assert!(missing.is_empty());\n                missing = block_validator\n                    .validation_states\n                    .values()\n                    .next()\n                    .unwrap()\n                    .missing_hashes();\n            }\n            fetch_effects.extend(effects);\n        }\n\n        // Handle the second set of fetch requests now.\n        let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec();\n\n        // Provide the first and second deploys and transfers which haven't already been fetched on\n        // second asking.\n        let context = context\n            .with_transactions(vec![transactions[1].clone()])\n            .with_transfers(vec![transfers[1].clone()]);\n        reactor.handle_requests(&context).await;\n\n        missing.clear();\n        for fetch_result in fetch_results {\n            let mut events = fetch_result.await.unwrap();\n            assert_eq!(1, events.len());\n            // The event should be `TransactionFetched`.\n            let event = events.pop().unwrap();\n            // New fetch requests will be made using a different peer for all transactions not\n            // already registered as fetched.\n            let effects = block_validator.handle_event(effect_builder, &mut rng, event);\n            if !effects.is_empty() {\n                assert!(missing.is_empty());\n                missing = block_validator\n                    .validation_states\n                    .values()\n                    .next()\n                    .unwrap()\n                    .missing_hashes();\n            }\n            fetch_effects.extend(effects);\n        }\n\n        // Handle the final set of fetch requests now.\n        let fetch_results = fetch_effects.into_iter().map(tokio::spawn).collect_vec();\n\n        // Provide all deploys and transfers not already fetched on third asking.\n        let context = context\n            .with_transactions(vec![transactions[2].clone()])\n            .with_transfers(vec![transfers[2].clone()]);\n        reactor.handle_requests(&context).await;\n\n        let mut effects = Effects::new();\n        for fetch_result in fetch_results {\n            let mut events = fetch_result.await.unwrap();\n            assert_eq!(1, events.len());\n            // The event should be `TransactionFetched`.\n            let event = events.pop().unwrap();\n            // Once the block is deemed valid (i.e. when the final missing transaction is\n            // successfully fetched) the effects will be three validation responses.\n            effects.extend(block_validator.handle_event(effect_builder, &mut rng, event));\n            assert!(effects.is_empty() || effects.len() == peer_count as usize);\n        }\n\n        for effect in effects {\n            tokio::spawn(effect).await.unwrap();\n        }\n\n        for validation_result in validation_results {\n            assert!(validation_result.await.unwrap().is_ok());\n        }\n    })\n    .await\n    .expect(\"should not hang\");\n}\n\n#[tokio::test]\nasync fn should_validate_block_with_signatures() {\n    let mut rng = TestRng::new();\n    let ttl = TimeDiff::from_millis(200);\n    let timestamp = Timestamp::from(1000);\n    let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl);\n    let transfer = new_transfer(&mut rng, timestamp, ttl);\n    let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl);\n\n    let context = ValidationContext::new()\n        .with_num_validators(&mut rng, 3)\n        .with_past_blocks(&mut rng, 0, 5, 0.into())\n        .with_transactions(vec![transaction_v1])\n        .with_transfers(vec![transfer, transfer_v1])\n        .include_all_transactions()\n        .include_all_transfers();\n\n    let validators = context.get_validators();\n\n    let mut context = context\n        .with_signatures_for_block(3, 5, &validators)\n        .include_signatures(3, 5, &validators);\n\n    assert!(context.proposal_is_valid(&mut rng, timestamp).await);\n}\n\n#[tokio::test]\nasync fn should_fetch_missing_signature() {\n    let mut rng = TestRng::new();\n    let ttl = TimeDiff::from_millis(200);\n    let timestamp = Timestamp::from(1000);\n    let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl);\n    let transfer = new_transfer(&mut rng, timestamp, ttl);\n    let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl);\n\n    let context = ValidationContext::new()\n        .with_num_validators(&mut rng, 3)\n        .with_past_blocks(&mut rng, 0, 5, 0.into())\n        .with_transactions(vec![transaction_v1])\n        .with_transfers(vec![transfer, transfer_v1])\n        .include_all_transactions()\n        .include_all_transfers();\n\n    let validators = context.get_validators();\n    let mut signing_validators = context.get_validators();\n    let leftover = signing_validators.pop().unwrap(); // one validator will be missing from the set that signed\n\n    let mut context = context\n        .with_signatures_for_block(3, 5, &signing_validators)\n        .with_fetchable_signatures(3, 5, &[leftover])\n        .include_signatures(3, 5, &validators);\n\n    assert!(context.proposal_is_valid(&mut rng, timestamp).await);\n}\n\n#[tokio::test]\nasync fn should_fail_if_unable_to_fetch_signature() {\n    let mut rng = TestRng::new();\n    let ttl = TimeDiff::from_millis(200);\n    let timestamp = Timestamp::from(1000);\n    let deploy = new_deploy(&mut rng, timestamp, ttl);\n    let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl);\n    let transfer = new_transfer(&mut rng, timestamp, ttl);\n    let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl);\n\n    let context = ValidationContext::new()\n        .with_num_validators(&mut rng, 3)\n        .with_past_blocks(&mut rng, 0, 5, 0.into())\n        .with_transactions(vec![deploy, transaction_v1])\n        .with_transfers(vec![transfer, transfer_v1])\n        .include_all_transactions()\n        .include_all_transfers();\n\n    let validators = context.get_validators();\n    let mut signing_validators = context.get_validators();\n    let _ = signing_validators.pop().expect(\"must pop\"); // one validator will be missing from the set that signed\n\n    let mut context = context\n        .with_signatures_for_block(3, 5, &signing_validators)\n        .include_signatures(3, 5, &validators);\n\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n}\n\n#[tokio::test]\nasync fn should_fail_if_unable_to_fetch_signature_for_block_without_transactions() {\n    let mut rng = TestRng::new();\n    let timestamp = Timestamp::from(1000);\n\n    // No transactions in the block.\n    let context = ValidationContext::new()\n        .with_num_validators(&mut rng, 3)\n        .with_past_blocks(&mut rng, 0, 5, 0.into());\n\n    let validators = context.get_validators();\n    let mut signing_validators = context.get_validators();\n    let _ = signing_validators.pop(); // one validator will be missing from the set that signed\n\n    let mut context = context\n        .with_signatures_for_block(3, 5, &signing_validators)\n        .include_signatures(3, 5, &validators);\n\n    assert!(!context.proposal_is_valid(&mut rng, timestamp).await);\n}\n\n#[tokio::test]\nasync fn should_validate_with_delayed_block() {\n    let mut rng = TestRng::new();\n    let ttl = TimeDiff::from_millis(200);\n    let timestamp = Timestamp::from(1000);\n    let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl);\n    let transfer = new_transfer(&mut rng, timestamp, ttl);\n    let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl);\n\n    let context = ValidationContext::new()\n        .with_num_validators(&mut rng, 3)\n        .with_past_blocks(&mut rng, 0, 4, 0.into())\n        .with_delayed_blocks(&mut rng, 5, 5, 0.into())\n        .with_transactions(vec![transaction_v1])\n        .with_transfers(vec![transfer, transfer_v1])\n        .include_all_transactions()\n        .include_all_transfers();\n\n    let validators = context.get_validators();\n\n    let mut context = context\n        .with_signatures_for_block(3, 5, &validators)\n        .include_signatures(3, 5, &validators);\n\n    assert!(context.proposal_is_valid(&mut rng, timestamp).await);\n}\n"
  },
  {
    "path": "node/src/components/block_validator.rs",
    "content": "//! Block validator\n//!\n//! The block validator checks whether all the transactions included in the block payload exist,\n//! either locally or on the network.\n//!\n//! When multiple requests are made to validate the same block payload, they will eagerly return\n//! true if valid, but only fail if all sources have been exhausted. This is only relevant when\n//! calling for validation of the same proposed block multiple times at the same time.\n\nmod config;\nmod event;\nmod state;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet},\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse tracing::{debug, error, trace, warn};\n\nuse casper_types::{\n    Approval, ApprovalsHash, Chainspec, EraId, FinalitySignature, FinalitySignatureId, PublicKey,\n    RewardedSignatures, SingleBlockRewardedSignatures, Timestamp, Transaction, TransactionHash,\n    TransactionId,\n};\n\nuse crate::{\n    components::{\n        consensus::{ClContext, ProposedBlock},\n        fetcher::{self, EmptyValidationMetadata, FetchResult, FetchedData},\n        Component,\n    },\n    effect::{\n        announcements::FatalAnnouncement,\n        requests::{BlockValidationRequest, FetcherRequest, StorageRequest},\n        EffectBuilder, EffectExt, Effects, Responder,\n    },\n    fatal,\n    types::{\n        BlockWithMetadata, InvalidProposalError, NodeId, TransactionFootprint, ValidatorMatrix,\n    },\n    NodeRng,\n};\npub use config::Config;\npub(crate) use event::Event;\nuse state::{AddResponderResult, BlockValidationState, MaybeStartFetching};\n\nconst COMPONENT_NAME: &str = \"block_validator\";\n\nimpl ProposedBlock<ClContext> {\n    fn timestamp(&self) -> Timestamp {\n        self.context().timestamp()\n    }\n\n    /// How many transactions are being tracked?\n    pub(crate) fn transaction_count(&self) -> usize {\n        self.value().count(None)\n    }\n\n    pub(crate) fn all_transactions(\n        &self,\n    ) -> impl Iterator<Item = &(TransactionHash, BTreeSet<Approval>)> {\n        self.value().all_transactions()\n    }\n}\n\n/// The return type of trying to handle a validation request as an already-existing request.\nenum MaybeHandled {\n    /// The request is already being handled - return the wrapped effects and finish.\n    Handled(Effects<Event>),\n    /// The request is new - it still needs to be handled.\n    NotHandled(BlockValidationRequest),\n}\n\n#[derive(DataSize, Debug)]\npub(crate) struct BlockValidator {\n    /// Component configuration.\n    config: Config,\n    /// Chainspec loaded for transaction validation.\n    #[data_size(skip)]\n    chainspec: Arc<Chainspec>,\n    /// Validator matrix.\n    #[data_size(skip)]\n    validator_matrix: ValidatorMatrix,\n    /// State of validation of a specific block.\n    validation_states: HashMap<ProposedBlock<ClContext>, BlockValidationState>,\n    /// Requests awaiting storing of a block, keyed by the height of the block being awaited.\n    requests_on_hold: BTreeMap<u64, Vec<BlockValidationRequest>>,\n    /// The gas price for validation of proposed blocks.\n    current_gas_price: u8,\n}\n\nimpl BlockValidator {\n    /// Creates a new block validator instance.\n    pub(crate) fn new(\n        chainspec: Arc<Chainspec>,\n        validator_matrix: ValidatorMatrix,\n        config: Config,\n        current_gas_price: u8,\n    ) -> Self {\n        BlockValidator {\n            chainspec,\n            validator_matrix,\n            config,\n            validation_states: HashMap::new(),\n            requests_on_hold: BTreeMap::new(),\n            current_gas_price,\n        }\n    }\n\n    /// If the request is already being handled, we record the new info and return effects.  If not,\n    /// the request is returned for processing as a new request.\n    fn try_handle_as_existing_request<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        request: BlockValidationRequest,\n    ) -> MaybeHandled\n    where\n        REv: From<Event>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + Send,\n    {\n        if let Some(state) = self.validation_states.get_mut(&request.block) {\n            let BlockValidationRequest {\n                block,\n                sender,\n                responder,\n                ..\n            } = request;\n            debug!(%sender, %block, \"already validating proposed block\");\n            match state.add_responder(responder) {\n                AddResponderResult::Added => {}\n                AddResponderResult::ValidationCompleted {\n                    responder,\n                    response_to_send,\n                } => {\n                    debug!(\n                        ?response_to_send,\n                        \"proposed block validation already completed\"\n                    );\n                    return MaybeHandled::Handled(responder.respond(response_to_send).ignore());\n                }\n            }\n            state.add_holder(sender);\n\n            let effects = match state.start_fetching() {\n                MaybeStartFetching::Start {\n                    holder,\n                    missing_transactions,\n                    missing_signatures,\n                } => fetch_transactions_and_signatures(\n                    effect_builder,\n                    holder,\n                    missing_transactions,\n                    missing_signatures,\n                ),\n                MaybeStartFetching::Ongoing => {\n                    debug!(\"ongoing fetches while validating proposed block - noop\");\n                    Effects::new()\n                }\n                MaybeStartFetching::Unable => {\n                    debug!(\"no new info while validating proposed block - responding `false`\");\n                    respond_invalid(\n                        Box::new(InvalidProposalError::UnableToFetch),\n                        state.take_responders(),\n                    )\n                }\n                MaybeStartFetching::ValidationSucceeded | MaybeStartFetching::ValidationFailed => {\n                    // If validation is already completed, we should have exited in the\n                    // `AddResponderResult::ValidationCompleted` branch above.\n                    error!(\"proposed block validation already completed - noop\");\n                    Effects::new()\n                }\n            };\n            MaybeHandled::Handled(effects)\n        } else {\n            MaybeHandled::NotHandled(request)\n        }\n    }\n\n    fn handle_new_request<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        request: BlockValidationRequest,\n    ) -> Effects<Event>\n    where\n        REv: From<Event>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + From<StorageRequest>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        debug!(sender = %request.sender, block = %request.block, \"validating new proposed block\");\n        debug_assert!(!self.validation_states.contains_key(&request.block));\n\n        if request.block.value().rewarded_signatures().has_some() {\n            // The block contains cited signatures - we have to read the relevant blocks and find\n            // out who the validators are in order to decode the signature IDs\n            let signature_rewards_max_delay =\n                self.chainspec.core_config.signature_rewards_max_delay;\n            let minimum_block_height = request\n                .proposed_block_height\n                .saturating_sub(signature_rewards_max_delay);\n\n            debug!(\n                proposed_block=?request.block,\n                %minimum_block_height,\n                proposed_block_height=%request.proposed_block_height,\n                \"block cites signatures, validation required - requesting past blocks from storage\"\n            );\n\n            effect_builder\n                .collect_past_blocks_with_metadata(\n                    minimum_block_height..request.proposed_block_height,\n                    false,\n                )\n                .event(\n                    move |past_blocks_with_metadata| Event::GotPastBlocksWithMetadata {\n                        past_blocks_with_metadata,\n                        request,\n                    },\n                )\n        } else {\n            self.handle_new_request_with_signatures(effect_builder, request, HashSet::new())\n        }\n    }\n\n    /// This function pairs the `SingleBlockRewardedSignatures` entries from `rewarded_signatures`\n    /// with the relevant past blocks and their metadata. If a block for which some signatures are\n    /// cited is missing, or if some signatures are double-cited, it will return `None`.\n    fn relevant_blocks_and_cited_signatures<'b, 'c>(\n        past_blocks_with_metadata: &'b [Option<BlockWithMetadata>],\n        proposed_block_height: u64,\n        rewarded_signatures: &'c RewardedSignatures,\n    ) -> Result<\n        Vec<(&'b BlockWithMetadata, &'c SingleBlockRewardedSignatures)>,\n        Box<InvalidProposalError>,\n    > {\n        let mut result = Vec::new();\n        // Check whether we know all the blocks for which the proposed block cites some signatures,\n        // and if no signatures are doubly cited.\n        for ((past_block_height, signatures), maybe_block) in rewarded_signatures\n            .iter_with_height(proposed_block_height)\n            .zip(past_blocks_with_metadata.iter().rev())\n        {\n            match maybe_block {\n                None if signatures.has_some() => {\n                    trace!(%past_block_height, \"maybe_block = None if signatures.has_some() - returning\");\n                    return Err(Box::new(\n                        InvalidProposalError::RewardSignaturesMissingCitedBlock {\n                            cited_block_height: past_block_height,\n                        },\n                    ));\n                }\n                None => {\n                    // we have no block, but there are also no signatures cited for this block, so\n                    // we can continue\n                    trace!(%past_block_height, \"maybe_block = None\");\n                }\n                Some(block) => {\n                    let padded_signatures = block.block.rewarded_signatures().clone().left_padded(\n                        proposed_block_height.saturating_sub(past_block_height) as usize,\n                    );\n                    trace!(\n                        ?padded_signatures,\n                        ?rewarded_signatures,\n                        intersection = ?rewarded_signatures.intersection(&padded_signatures),\n                        \"maybe_block is Some\"\n                    );\n                    if rewarded_signatures\n                        .intersection(&padded_signatures)\n                        .has_some()\n                    {\n                        // block cited a signature that has been cited before - it is invalid!\n                        debug!(\n                            %past_block_height,\n                            \"maybe_block is Some, nonzero intersection with previous\"\n                        );\n                        return Err(Box::new(InvalidProposalError::RewardSignatureReplay {\n                            cited_block_height: past_block_height,\n                        }));\n                    }\n                    // everything is OK - save the block in the result\n                    result.push((block, signatures));\n                }\n            }\n        }\n        Ok(result)\n    }\n\n    fn era_ids_vec(past_blocks_with_metadata: &[Option<BlockWithMetadata>]) -> Vec<Option<EraId>> {\n        // This will create a vector of era ids for the past blocks corresponding to cited\n        // signatures. The index of the entry in the vector will be the number of blocks in the\n        // past relative to the current block, minus 1 (i.e., 0 is the previous block, 1 is the one\n        // before that, etc.) - these indices will correspond directly to the indices in\n        // RewardedSignatures.\n        past_blocks_with_metadata\n            .iter()\n            .rev()\n            .map(|maybe_metadata| {\n                maybe_metadata\n                    .as_ref()\n                    .map(|metadata| metadata.block.era_id())\n            })\n            .collect()\n    }\n\n    fn get_relevant_validators(\n        &mut self,\n        past_blocks_with_metadata: &[Option<BlockWithMetadata>],\n    ) -> HashMap<EraId, BTreeSet<PublicKey>> {\n        let era_ids_vec = Self::era_ids_vec(past_blocks_with_metadata);\n        // get the set of unique era ids that are present in the cited blocks\n        let era_ids: HashSet<_> = era_ids_vec.iter().flatten().copied().collect();\n        let validator_matrix = &self.validator_matrix;\n\n        era_ids\n            .into_iter()\n            .filter_map(move |era_id| {\n                validator_matrix\n                    .validator_weights(era_id)\n                    .map(|weights| (era_id, weights.into_validator_public_keys().collect()))\n            })\n            .collect()\n    }\n\n    fn handle_got_past_blocks_with_metadata<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        past_blocks_with_metadata: Vec<Option<BlockWithMetadata>>,\n        request: BlockValidationRequest,\n    ) -> Effects<Event>\n    where\n        REv: From<Event>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        let rewarded_signatures = request.block.value().rewarded_signatures();\n\n        match Self::relevant_blocks_and_cited_signatures(\n            &past_blocks_with_metadata,\n            request.proposed_block_height,\n            rewarded_signatures,\n        ) {\n            Ok(blocks_and_signatures) => {\n                let validators = self.get_relevant_validators(&past_blocks_with_metadata);\n\n                // This will be a set of signature IDs of the signatures included in the block, but\n                // not found in metadata in storage.\n                let mut missing_sigs = HashSet::new();\n\n                for (block_with_metadata, single_block_rewarded_sigs) in blocks_and_signatures {\n                    let era_id = block_with_metadata.block.era_id();\n                    let Some(all_validators) = validators.get(&era_id) else {\n                        return fatal!(effect_builder, \"couldn't get validators for {}\", era_id)\n                            .ignore();\n                    };\n                    let public_keys = single_block_rewarded_sigs\n                        .clone()\n                        .to_validator_set(all_validators.iter().cloned());\n                    let block_hash = *block_with_metadata.block.hash();\n                    missing_sigs.extend(\n                        public_keys\n                            .into_iter()\n                            .filter(move |public_key| {\n                                !block_with_metadata\n                                    .block_signatures\n                                    .has_finality_signature(public_key)\n                            })\n                            .map(move |public_key| {\n                                FinalitySignatureId::new(block_hash, era_id, public_key)\n                            }),\n                    );\n                }\n\n                trace!(\n                    ?missing_sigs,\n                    \"handle_got_past_blocks_with_metadata missing_sigs\"\n                );\n\n                self.handle_new_request_with_signatures(effect_builder, request, missing_sigs)\n            }\n            Err(error) => {\n                if let InvalidProposalError::RewardSignaturesMissingCitedBlock {\n                    cited_block_height,\n                } = *error\n                {\n                    // We are missing some blocks necessary for unpacking signatures from storage -\n                    // put the request on hold for now.\n                    self.requests_on_hold\n                        .entry(cited_block_height)\n                        .or_default()\n                        .push(request);\n                    Effects::new()\n                } else {\n                    // Rewarded signatures pre-validation failed\n                    respond_invalid(error, Some(request.responder))\n                }\n            }\n        }\n    }\n\n    fn handle_block_stored<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        stored_block_height: u64,\n    ) -> Effects<Event>\n    where\n        REv: From<Event>\n            + From<StorageRequest>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        let mut pending_requests = vec![];\n\n        while self\n            .requests_on_hold\n            .first_key_value()\n            .is_some_and(|(height, _)| *height <= stored_block_height)\n        {\n            // unwrap is safe - we'd break the loop if there were no elements\n            pending_requests.extend(self.requests_on_hold.pop_first().unwrap().1);\n        }\n\n        pending_requests\n            .into_iter()\n            .flat_map(|request| self.handle_new_request(effect_builder, request))\n            .collect()\n    }\n\n    fn handle_new_request_with_signatures<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        BlockValidationRequest {\n            block,\n            sender,\n            responder,\n            ..\n        }: BlockValidationRequest,\n        missing_signatures: HashSet<FinalitySignatureId>,\n    ) -> Effects<Event>\n    where\n        REv: From<Event>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        if let Some(old_state) = self.validation_states.get_mut(&block) {\n            // if we got two requests for the same block in quick succession, it is possible that\n            // a state has been created and inserted for one of them while the other one was\n            // awaiting the past blocks from storage; in such a case just save the holder and\n            // responders, and return no effects, as all the fetching will have already been\n            // started\n            match old_state.add_responder(responder) {\n                AddResponderResult::Added => {}\n                AddResponderResult::ValidationCompleted {\n                    responder,\n                    response_to_send,\n                } => {\n                    debug!(\n                        ?response_to_send,\n                        \"proposed block validation already completed\"\n                    );\n                    return responder.respond(response_to_send).ignore();\n                }\n            }\n            old_state.add_holder(sender);\n            return Effects::new();\n        }\n\n        let (mut state, maybe_responder) = BlockValidationState::new(\n            &block,\n            missing_signatures,\n            sender,\n            responder,\n            self.current_gas_price,\n            self.chainspec.as_ref(),\n        );\n        let effects = match state.start_fetching() {\n            MaybeStartFetching::Start {\n                holder,\n                missing_transactions,\n                missing_signatures,\n            } => fetch_transactions_and_signatures(\n                effect_builder,\n                holder,\n                missing_transactions,\n                missing_signatures,\n            ),\n            MaybeStartFetching::ValidationSucceeded => {\n                debug!(\"no transactions - block validation complete\");\n                debug_assert!(maybe_responder.is_some());\n                respond_valid(maybe_responder)\n            }\n            MaybeStartFetching::ValidationFailed => {\n                debug_assert!(maybe_responder.is_some());\n                respond_invalid(\n                    Box::new(InvalidProposalError::FailedFetcherValidation),\n                    maybe_responder,\n                )\n            }\n            MaybeStartFetching::Ongoing | MaybeStartFetching::Unable => {\n                // This `MaybeStartFetching` variant should never be returned here.\n                error!(%state, \"invalid state while handling new block validation\");\n                debug_assert!(false, \"invalid state {}\", state);\n                respond_invalid(\n                    Box::new(InvalidProposalError::UnexpectedFetchStatus),\n                    state.take_responders(),\n                )\n            }\n        };\n        self.validation_states.insert(block, state);\n        self.purge_oldest_complete();\n        effects\n    }\n\n    fn purge_oldest_complete(&mut self) {\n        let mut completed_times: Vec<_> = self\n            .validation_states\n            .values()\n            .filter_map(BlockValidationState::block_timestamp_if_completed)\n            .collect();\n        // Sort from newest (highest timestamp) to oldest.\n        completed_times.sort_unstable_by(|lhs, rhs| rhs.cmp(lhs));\n\n        // Normally we'll only need to remove a maximum of a single entry, but loop until we don't\n        // exceed the completed limit to cover any edge cases.\n        let max_completed_entries = self.config.max_completed_entries as usize;\n        while completed_times.len() > max_completed_entries {\n            self.validation_states.retain(|_block, state| {\n                if completed_times.len() <= max_completed_entries {\n                    return true;\n                }\n                if state.block_timestamp_if_completed().as_ref() == completed_times.last() {\n                    debug!(\n                        %state,\n                        num_completed_remaining = (completed_times.len() - 1),\n                        \"purging completed block validation state\"\n                    );\n                    let _ = completed_times.pop();\n                    return false;\n                }\n                true\n            });\n        }\n    }\n\n    fn update_era_price(&mut self, current_price: u8) {\n        self.current_gas_price = current_price;\n    }\n\n    fn handle_transaction_fetched<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        transaction_hash: TransactionHash,\n        result: FetchResult<Transaction>,\n    ) -> Effects<Event>\n    where\n        REv: From<Event>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + Send,\n    {\n        match &result {\n            Ok(FetchedData::FromPeer { peer, .. }) => {\n                debug!(%transaction_hash, %peer, \"fetched transaction from peer\")\n            }\n            Ok(FetchedData::FromStorage { .. }) => {\n                debug!(%transaction_hash, \"fetched transaction locally\")\n            }\n            Err(error) => warn!(%transaction_hash, %error, \"could not fetch transaction\"),\n        }\n        match result {\n            Ok(FetchedData::FromStorage { item } | FetchedData::FromPeer { item, .. }) => {\n                let item_hash = item.hash();\n                if item_hash != transaction_hash {\n                    // Hard failure - change state to Invalid.\n                    // this should not be reachable\n                    let responders = self\n                        .validation_states\n                        .values_mut()\n                        .flat_map(|state| state.try_mark_invalid(&transaction_hash));\n                    return respond_invalid(\n                        Box::new(InvalidProposalError::FetchedIncorrectTransactionById {\n                            expected_transaction_hash: transaction_hash,\n                            actual_transaction_hash: item_hash,\n                        }),\n                        responders,\n                    );\n                }\n                let transaction_footprint = match TransactionFootprint::new(&self.chainspec, &item)\n                {\n                    Ok(footprint) => footprint,\n                    Err(invalid_transaction_error) => {\n                        warn!(\n                            %transaction_hash, ?invalid_transaction_error,\n                            \"could not convert transaction\",\n                        );\n                        // Hard failure - change state to Invalid.\n                        let responders = self\n                            .validation_states\n                            .values_mut()\n                            .flat_map(|state| state.try_mark_invalid(&transaction_hash));\n                        return respond_invalid(invalid_transaction_error.into(), responders);\n                    }\n                };\n\n                let mut effects = Effects::new();\n                for state in self.validation_states.values_mut() {\n                    let responders = state\n                        .try_add_transaction_footprint(&transaction_hash, &transaction_footprint);\n                    if !responders.is_empty() {\n                        let ret = match &state {\n                            BlockValidationState::InProgress { .. } => {\n                                // this seems to be unreachable as currently written\n                                respond_invalid(\n                                    Box::new(InvalidProposalError::TransactionFetchingAborted),\n                                    responders,\n                                )\n                            }\n                            BlockValidationState::Invalid { error, .. } => {\n                                respond_invalid(error.clone(), responders)\n                            }\n                            BlockValidationState::Valid(_) => respond_valid(responders),\n                        };\n                        effects.extend(ret);\n                    }\n                }\n                effects\n            }\n            Err(error) => {\n                match error {\n                    fetcher::Error::Absent { peer, .. }\n                    | fetcher::Error::Rejected { peer, .. }\n                    | fetcher::Error::TimedOut { peer, .. } => {\n                        // Soft failure - just mark the holder as failed and see if we can start\n                        // fetching using a different holder.\n                        let mut effects = Effects::new();\n                        self.validation_states.values_mut().for_each(|state| {\n                            state.try_mark_holder_failed(&peer);\n                            match state.start_fetching() {\n                                MaybeStartFetching::Start {\n                                    holder,\n                                    missing_transactions,\n                                    missing_signatures,\n                                } => {\n                                    debug!(\n                                        %holder,\n                                        missing_transactions_len = missing_transactions.len(),\n                                        \"fetching missing transactions from different peer\"\n                                    );\n                                    effects.extend(fetch_transactions_and_signatures(\n                                        effect_builder,\n                                        holder,\n                                        missing_transactions,\n                                        missing_signatures,\n                                    ));\n                                }\n                                MaybeStartFetching::Unable => {\n                                    debug!(\n                                        \"exhausted peers while validating proposed block - \\\n                                        responding `false`\"\n                                    );\n                                    effects.extend(respond_invalid(\n                                        Box::new(InvalidProposalError::FetcherError(format!(\n                                            \"{:?}\",\n                                            error\n                                        ))),\n                                        state.take_responders(),\n                                    ));\n                                }\n                                MaybeStartFetching::Ongoing\n                                | MaybeStartFetching::ValidationSucceeded\n                                | MaybeStartFetching::ValidationFailed => {}\n                            }\n                        });\n                        effects\n                    }\n                    fetcher::Error::CouldNotConstructGetRequest { .. }\n                    | fetcher::Error::ValidationMetadataMismatch { .. } => {\n                        // Hard failure - change state to Invalid.\n                        let responders = self\n                            .validation_states\n                            .values_mut()\n                            .flat_map(|state| state.try_mark_invalid(&transaction_hash));\n                        respond_invalid(\n                            Box::new(InvalidProposalError::FetcherError(format!(\"{:?}\", error))),\n                            responders,\n                        )\n                    }\n                }\n            }\n        }\n    }\n\n    fn handle_finality_signature_fetched<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        finality_signature_id: FinalitySignatureId,\n        result: FetchResult<FinalitySignature>,\n    ) -> Effects<Event>\n    where\n        REv: From<Event>\n            + From<FetcherRequest<Transaction>>\n            + From<FetcherRequest<FinalitySignature>>\n            + Send,\n    {\n        match &result {\n            Ok(FetchedData::FromPeer { peer, .. }) => {\n                debug!(%finality_signature_id, %peer, \"fetched finality signature from peer\")\n            }\n            Ok(FetchedData::FromStorage { .. }) => {\n                debug!(%finality_signature_id, \"fetched finality signature locally\")\n            }\n            Err(error) => {\n                warn!(%finality_signature_id, %error, \"could not fetch finality signature\")\n            }\n        }\n        match result {\n            Ok(FetchedData::FromStorage { .. } | FetchedData::FromPeer { .. }) => {\n                let mut effects = Effects::new();\n                for state in self.validation_states.values_mut() {\n                    let responders = state.try_add_signature(&finality_signature_id);\n                    if !responders.is_empty() {\n                        let ret = match &state {\n                            BlockValidationState::InProgress { .. } => {\n                                // this seems to be unreachable as currently written\n                                respond_invalid(\n                                    Box::new(\n                                        InvalidProposalError::FinalitySignatureFetchingAborted,\n                                    ),\n                                    responders,\n                                )\n                            }\n                            BlockValidationState::Invalid { error, .. } => {\n                                respond_invalid(error.clone(), responders)\n                            }\n                            BlockValidationState::Valid(_) => respond_valid(responders),\n                        };\n                        effects.extend(ret);\n                    }\n                }\n                effects\n            }\n            Err(error) => {\n                match error {\n                    fetcher::Error::Absent { peer, .. }\n                    | fetcher::Error::Rejected { peer, .. }\n                    | fetcher::Error::TimedOut { peer, .. } => {\n                        // Soft failure - just mark the holder as failed and see if we can start\n                        // fetching using a different holder.\n                        let mut effects = Effects::new();\n                        self.validation_states.values_mut().for_each(|state| {\n                            state.try_mark_holder_failed(&peer);\n                            match state.start_fetching() {\n                                MaybeStartFetching::Start {\n                                    holder,\n                                    missing_transactions,\n                                    missing_signatures,\n                                } => {\n                                    debug!(\n                                        %holder,\n                                        missing_transactions_len = missing_transactions.len(),\n                                        \"fetching missing transactions and signatures from different \\\n                                        peer\"\n                                    );\n                                    effects.extend(fetch_transactions_and_signatures(\n                                        effect_builder,\n                                        holder,\n                                        missing_transactions,\n                                        missing_signatures,\n                                    ));\n                                }\n                                MaybeStartFetching::Unable => {\n                                    debug!(\n                                        \"exhausted peers while validating proposed block - \\\n                                        responding `false`\"\n                                    );\n                                    effects.extend(respond_invalid(\n                                        Box::new(InvalidProposalError::FetcherError(format!(\"{:?}\", error))),\n                                        state.take_responders()));\n                                }\n                                MaybeStartFetching::Ongoing\n                                | MaybeStartFetching::ValidationSucceeded\n                                | MaybeStartFetching::ValidationFailed => {}\n                            }\n                        });\n                        effects\n                    }\n                    fetcher::Error::CouldNotConstructGetRequest { .. }\n                    | fetcher::Error::ValidationMetadataMismatch { .. } => {\n                        // Hard failure - change state to Invalid.\n                        let responders = self.validation_states.values_mut().flat_map(|state| {\n                            state.try_mark_invalid_signature(&finality_signature_id)\n                        });\n                        respond_invalid(\n                            Box::new(InvalidProposalError::FetcherError(format!(\"{:?}\", error))),\n                            responders,\n                        )\n                    }\n                }\n            }\n        }\n    }\n}\n\nfn fetch_transactions_and_signatures<REv>(\n    effect_builder: EffectBuilder<REv>,\n    holder: NodeId,\n    missing_transactions: HashMap<TransactionHash, ApprovalsHash>,\n    missing_signatures: HashSet<FinalitySignatureId>,\n) -> Effects<Event>\nwhere\n    REv: From<Event>\n        + From<FetcherRequest<Transaction>>\n        + From<FetcherRequest<FinalitySignature>>\n        + Send,\n{\n    let mut effects: Effects<Event> = Effects::new();\n    for (transaction_hash, approvals_hash) in missing_transactions {\n        let transaction_id = match transaction_hash {\n            TransactionHash::Deploy(deploy_hash) => {\n                TransactionId::new(deploy_hash.into(), approvals_hash)\n            }\n            TransactionHash::V1(v1_hash) => TransactionId::new(v1_hash.into(), approvals_hash),\n        };\n        effects.extend(\n            effect_builder\n                .fetch::<Transaction>(transaction_id, holder, Box::new(EmptyValidationMetadata))\n                .event(move |result| Event::TransactionFetched {\n                    transaction_hash,\n                    result,\n                }),\n        );\n    }\n\n    for missing_signature in missing_signatures {\n        effects.extend(\n            effect_builder\n                .fetch::<FinalitySignature>(\n                    Box::new(missing_signature.clone()),\n                    holder,\n                    Box::new(EmptyValidationMetadata),\n                )\n                .event(move |result| Event::FinalitySignatureFetched {\n                    finality_signature_id: Box::new(missing_signature),\n                    result,\n                }),\n        )\n    }\n\n    effects\n}\n\nfn respond_valid(\n    responders: impl IntoIterator<Item = Responder<Result<(), Box<InvalidProposalError>>>>,\n) -> Effects<Event> {\n    responders\n        .into_iter()\n        .flat_map(|responder| responder.respond(Ok(())).ignore())\n        .collect()\n}\n\nfn respond_invalid(\n    error: Box<InvalidProposalError>,\n    responders: impl IntoIterator<Item = Responder<Result<(), Box<InvalidProposalError>>>>,\n) -> Effects<Event> {\n    responders\n        .into_iter()\n        .flat_map(|responder| responder.respond(Err(error.clone())).ignore())\n        .collect()\n}\n\nimpl<REv> Component<REv> for BlockValidator\nwhere\n    REv: From<Event>\n        + From<BlockValidationRequest>\n        + From<FetcherRequest<Transaction>>\n        + From<FetcherRequest<FinalitySignature>>\n        + From<StorageRequest>\n        + From<FatalAnnouncement>\n        + Send,\n{\n    type Event = Event;\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::Request(request) => {\n                debug!(block = ?request.block, \"validating proposed block\");\n                match self.try_handle_as_existing_request(effect_builder, request) {\n                    MaybeHandled::Handled(effects) => effects,\n                    MaybeHandled::NotHandled(request) => {\n                        self.handle_new_request(effect_builder, request)\n                    }\n                }\n            }\n            Event::GotPastBlocksWithMetadata {\n                past_blocks_with_metadata,\n                request,\n            } => self.handle_got_past_blocks_with_metadata(\n                effect_builder,\n                past_blocks_with_metadata,\n                request,\n            ),\n            Event::BlockStored(stored_block_height) => {\n                self.handle_block_stored(effect_builder, stored_block_height)\n            }\n            Event::TransactionFetched {\n                transaction_hash,\n                result,\n            } => self.handle_transaction_fetched(effect_builder, transaction_hash, result),\n            Event::FinalitySignatureFetched {\n                finality_signature_id,\n                result,\n            } => self.handle_finality_signature_fetched(\n                effect_builder,\n                *finality_signature_id,\n                result,\n            ),\n            Event::UpdateEraGasPrice(_, current_price) => {\n                self.update_era_price(current_price);\n                Effects::new()\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/cl_context.rs",
    "content": "use std::sync::Arc;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse tracing::info;\n\nuse casper_types::{crypto, Digest, PublicKey, SecretKey, Signature};\n\nuse crate::{\n    components::consensus::traits::{ConsensusValueT, Context, ValidatorSecret},\n    types::BlockPayload,\n};\n\n#[derive(DataSize)]\npub struct Keypair {\n    secret_key: Arc<SecretKey>,\n    public_key: PublicKey,\n}\n\nimpl Keypair {\n    pub(crate) fn new(secret_key: Arc<SecretKey>, public_key: PublicKey) -> Self {\n        Self {\n            secret_key,\n            public_key,\n        }\n    }\n\n    #[cfg(test)]\n    pub(crate) fn public_key(&self) -> &PublicKey {\n        &self.public_key\n    }\n}\n\nimpl From<Arc<SecretKey>> for Keypair {\n    fn from(secret_key: Arc<SecretKey>) -> Self {\n        let public_key: PublicKey = secret_key.as_ref().into();\n        Self::new(secret_key, public_key)\n    }\n}\n\nimpl ValidatorSecret for Keypair {\n    type Hash = Digest;\n    type Signature = Signature;\n\n    fn sign(&self, hash: &Digest) -> Signature {\n        crypto::sign(hash, self.secret_key.as_ref(), &self.public_key)\n    }\n}\n\nimpl ConsensusValueT for Arc<BlockPayload> {\n    fn needs_validation(&self) -> bool {\n        self.all_transactions().next().is_some()\n            || !self.accusations().is_empty()\n            || self.rewarded_signatures().has_some()\n    }\n}\n\n/// The collection of types used for cryptography, IDs and blocks in the Casper node.\n#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]\npub struct ClContext;\n\nimpl Context for ClContext {\n    type ConsensusValue = Arc<BlockPayload>;\n    type ValidatorId = PublicKey;\n    type ValidatorSecret = Keypair;\n    type Signature = Signature;\n    type Hash = Digest;\n    type InstanceId = Digest;\n\n    fn hash(data: &[u8]) -> Digest {\n        Digest::hash(data)\n    }\n\n    fn verify_signature(hash: &Digest, public_key: &PublicKey, signature: &Signature) -> bool {\n        if let Err(error) = crypto::verify(hash, signature, public_key) {\n            info!(%error, %signature, %public_key, %hash, \"failed to validate signature\");\n            return false;\n        }\n        true\n    }\n}\n\nmod specimen_support {\n    use super::Keypair;\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n    use casper_types::{PublicKey, SecretKey};\n    use std::sync::Arc;\n\n    impl LargestSpecimen for Keypair {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            let secret_key = SecretKey::largest_specimen(estimator, cache);\n            let public_key = PublicKey::from(&secret_key);\n            Keypair {\n                secret_key: Arc::new(secret_key),\n                public_key,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/config.rs",
    "content": "use std::{path::Path, sync::Arc};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{Chainspec, PublicKey, SecretKey};\n\nuse crate::{\n    components::consensus::{\n        era_supervisor::PAST_EVIDENCE_ERAS,\n        protocols::{highway::config::Config as HighwayConfig, zug::config::Config as ZugConfig},\n        EraId,\n    },\n    utils::{External, LoadError, Loadable},\n};\n\nconst DEFAULT_MAX_EXECUTION_DELAY: u64 = 3;\n\n/// Consensus configuration.\n#[derive(DataSize, Debug, Serialize, Deserialize, Clone)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Path to secret key file.\n    pub secret_key_path: External,\n    /// The maximum number of blocks by which execution is allowed to lag behind finalization.\n    /// If it is more than that, consensus will pause, and resume once the executor has caught up.\n    pub max_execution_delay: u64,\n    /// Highway-specific node configuration.\n    #[serde(default)]\n    pub highway: HighwayConfig,\n    /// Zug-specific node configuration.\n    #[serde(default)]\n    pub zug: ZugConfig,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            secret_key_path: External::Missing,\n            max_execution_delay: DEFAULT_MAX_EXECUTION_DELAY,\n            highway: HighwayConfig::default(),\n            zug: ZugConfig::default(),\n        }\n    }\n}\n\ntype LoadKeyError = LoadError<<Arc<SecretKey> as Loadable>::Error>;\n\nimpl Config {\n    /// Loads the secret key from the configuration file and derives the public key.\n    pub(crate) fn load_keys<P: AsRef<Path>>(\n        &self,\n        root: P,\n    ) -> Result<(Arc<SecretKey>, PublicKey), LoadKeyError> {\n        let secret_signing_key: Arc<SecretKey> = self.secret_key_path.clone().load(root)?;\n        let public_key: PublicKey = PublicKey::from(secret_signing_key.as_ref());\n        Ok((secret_signing_key, public_key))\n    }\n}\n\npub trait ChainspecConsensusExt {\n    /// Returns the ID of the last activation era, i.e. the era immediately after the most recent\n    /// upgrade or restart.\n    fn activation_era(&self) -> EraId;\n\n    /// Returns the earliest era whose evidence is still relevant to the current era. If the current\n    /// era is N, that is usually N - 1, except that it's never at or before the most recent\n    /// activation point.\n    fn earliest_relevant_era(&self, current_era: EraId) -> EraId;\n\n    /// Returns the earliest era whose switch block is needed to initialize the given era. For era\n    /// N that will usually be N - A - 1, where A is the auction delay, except that switch block\n    /// from before the most recent activation point are never used.\n    fn earliest_switch_block_needed(&self, era_id: EraId) -> EraId;\n\n    /// Returns the number of switch blocks needed for initializing an era.\n    fn number_of_past_switch_blocks_needed(&self) -> u64;\n}\n\nimpl ChainspecConsensusExt for Chainspec {\n    fn activation_era(&self) -> EraId {\n        self.protocol_config.activation_point.era_id()\n    }\n\n    fn earliest_relevant_era(&self, current_era: EraId) -> EraId {\n        self.activation_era()\n            .successor()\n            .max(current_era.saturating_sub(PAST_EVIDENCE_ERAS))\n    }\n\n    fn earliest_switch_block_needed(&self, era_id: EraId) -> EraId {\n        self.activation_era().max(\n            era_id\n                .saturating_sub(1)\n                .saturating_sub(self.core_config.auction_delay),\n        )\n    }\n\n    fn number_of_past_switch_blocks_needed(&self) -> u64 {\n        self.core_config\n            .auction_delay\n            .saturating_add(PAST_EVIDENCE_ERAS)\n            .saturating_add(1)\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/consensus_protocol.rs",
    "content": "use std::{\n    any::Any,\n    fmt::{self, Debug, Display, Formatter},\n    path::PathBuf,\n};\n\nuse datasize::DataSize;\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::{\n    components::consensus::{traits::Context, ActionId, TimerId},\n    types::NodeId,\n    NodeRng,\n};\n\nuse super::era_supervisor::SerializedMessage;\n\n/// Information about the context in which a new block is created.\n#[derive(Clone, DataSize, Eq, PartialEq, Debug, Ord, PartialOrd, Hash)]\npub struct BlockContext<C>\nwhere\n    C: Context,\n{\n    timestamp: Timestamp,\n    /// The ancestors of the new block, in reverse chronological order, i.e. the first entry is the\n    /// new block's parent.\n    ancestor_values: Vec<C::ConsensusValue>,\n}\n\nimpl<C: Context> BlockContext<C> {\n    /// Constructs a new `BlockContext`.\n    pub(crate) fn new(timestamp: Timestamp, ancestor_values: Vec<C::ConsensusValue>) -> Self {\n        BlockContext {\n            timestamp,\n            ancestor_values,\n        }\n    }\n\n    /// The block's timestamp.\n    pub(crate) fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// The block's relative height, i.e. the number of ancestors in the current era.\n    #[cfg(test)]\n    pub(crate) fn height(&self) -> u64 {\n        self.ancestor_values.len() as u64\n    }\n\n    /// The values of the block's ancestors.\n    pub(crate) fn ancestor_values(&self) -> &[C::ConsensusValue] {\n        &self.ancestor_values\n    }\n}\n\n/// A proposed block, with context.\n#[derive(Clone, DataSize, Eq, PartialEq, Debug, Ord, PartialOrd, Hash)]\npub struct ProposedBlock<C>\nwhere\n    C: Context,\n{\n    value: C::ConsensusValue,\n    context: BlockContext<C>,\n}\n\nimpl<C: Context> ProposedBlock<C> {\n    pub(crate) fn new(value: C::ConsensusValue, context: BlockContext<C>) -> Self {\n        ProposedBlock { value, context }\n    }\n\n    pub(crate) fn value(&self) -> &C::ConsensusValue {\n        &self.value\n    }\n\n    pub(crate) fn context(&self) -> &BlockContext<C> {\n        &self.context\n    }\n\n    pub(crate) fn destructure(self) -> (C::ConsensusValue, BlockContext<C>) {\n        (self.value, self.context)\n    }\n}\n\nimpl<C: Context> Display for ProposedBlock<C> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"proposed block at {}: {}\",\n            self.context.timestamp(),\n            self.value\n        )\n    }\n}\n\n#[derive(Clone, Debug, Eq, PartialEq, Hash)]\npub(crate) struct TerminalBlockData<C: Context> {\n    /// The list of validators that haven't produced any units.\n    pub(crate) inactive_validators: Vec<C::ValidatorId>,\n}\n\n/// A finalized block. All nodes are guaranteed to see the same sequence of blocks, and to agree\n/// about all the information contained in this type, as long as the total weight of faulty\n/// validators remains below the threshold.\n#[derive(Clone, Debug, Eq, PartialEq, Hash)]\npub(crate) struct FinalizedBlock<C: Context> {\n    /// The finalized value.\n    pub(crate) value: C::ConsensusValue,\n    /// The timestamp at which this value was proposed.\n    pub(crate) timestamp: Timestamp,\n    /// The relative height in this instance of the protocol.\n    pub(crate) relative_height: u64,\n    /// The validators known to be faulty as seen by this block.\n    pub(crate) equivocators: Vec<C::ValidatorId>,\n    /// If this is a terminal block, i.e. the last one to be finalized, this contains additional\n    /// data like rewards and inactive validators.\n    pub(crate) terminal_block_data: Option<TerminalBlockData<C>>,\n    /// Proposer of this value\n    pub(crate) proposer: C::ValidatorId,\n}\n\npub(crate) type ProtocolOutcomes<C> = Vec<ProtocolOutcome<C>>;\n\n#[derive(Clone, Debug, Eq, PartialEq)]\npub(crate) enum ProtocolOutcome<C: Context> {\n    CreatedGossipMessage(SerializedMessage),\n    CreatedTargetedMessage(SerializedMessage, NodeId),\n    CreatedMessageToRandomPeer(SerializedMessage),\n    CreatedRequestToRandomValidator(SerializedMessage),\n    ScheduleTimer(Timestamp, TimerId),\n    QueueAction(ActionId),\n    /// Request transactions for a new block, providing the necessary context.\n    CreateNewBlock(BlockContext<C>, Timestamp),\n    /// A block was finalized.\n    FinalizedBlock(FinalizedBlock<C>),\n    /// Request validation of the consensus value, contained in a message received from the given\n    /// node.\n    ///\n    /// The domain logic should verify any intrinsic validity conditions of consensus values, e.g.\n    /// that it has the expected structure, or that transactions that are mentioned by hash\n    /// actually exist, and then call `ConsensusProtocol::resolve_validity`.\n    ValidateConsensusValue {\n        sender: NodeId,\n        proposed_block: ProposedBlock<C>,\n    },\n    /// New direct evidence was added against the given validator.\n    NewEvidence(C::ValidatorId),\n    /// Send evidence about the validator from an earlier era to the peer.\n    SendEvidence(NodeId, C::ValidatorId),\n    /// We've detected an equivocation our own node has made.\n    WeAreFaulty,\n    /// We've received a unit from a doppelganger.\n    DoppelgangerDetected,\n    /// Too many faulty validators. The protocol's fault tolerance threshold has been exceeded and\n    /// consensus cannot continue.\n    FttExceeded,\n    /// We want to disconnect from a sender of invalid data.\n    Disconnect(NodeId),\n    /// We added a proposed block to the protocol state.\n    ///\n    /// This is used to inform the transaction buffer, so we don't propose the same transactions\n    /// again. Does not need to be raised for proposals this node created itself.\n    HandledProposedBlock(ProposedBlock<C>),\n}\n\n/// An API for a single instance of the consensus.\npub(crate) trait ConsensusProtocol<C: Context>: Send {\n    /// Upcasts consensus protocol into `dyn Any`.\n    ///\n    /// Typically called on a boxed trait object for downcasting afterwards.\n    fn as_any(&self) -> &dyn Any;\n\n    /// Handles an incoming message (like NewUnit, RequestDependency).\n    fn handle_message(\n        &mut self,\n        rng: &mut NodeRng,\n        sender: NodeId,\n        msg: SerializedMessage,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C>;\n\n    /// Handles an incoming request message and returns an optional response.\n    fn handle_request_message(\n        &mut self,\n        rng: &mut NodeRng,\n        sender: NodeId,\n        msg: SerializedMessage,\n        now: Timestamp,\n    ) -> (ProtocolOutcomes<C>, Option<SerializedMessage>);\n\n    /// Current instance of consensus protocol is latest era.\n    fn handle_is_current(&self, now: Timestamp) -> ProtocolOutcomes<C>;\n\n    /// Triggers consensus' timer.\n    fn handle_timer(\n        &mut self,\n        timestamp: Timestamp,\n        now: Timestamp,\n        timer_id: TimerId,\n        rng: &mut NodeRng,\n    ) -> ProtocolOutcomes<C>;\n\n    /// Triggers a queued action.\n    fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes<C>;\n\n    /// Proposes a new value for consensus.\n    fn propose(&mut self, proposed_block: ProposedBlock<C>, now: Timestamp) -> ProtocolOutcomes<C>;\n\n    /// Marks the `value` as valid or invalid, based on validation requested via\n    /// `ProtocolOutcome::ValidateConsensusvalue`.\n    fn resolve_validity(\n        &mut self,\n        proposed_block: ProposedBlock<C>,\n        valid: bool,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C>;\n\n    /// Turns this instance into an active validator, that participates in the consensus protocol.\n    fn activate_validator(\n        &mut self,\n        our_id: C::ValidatorId,\n        secret: C::ValidatorSecret,\n        timestamp: Timestamp,\n        unit_hash_file: Option<PathBuf>,\n    ) -> ProtocolOutcomes<C>;\n\n    /// Turns this instance into a passive observer, that does not create any new vertices.\n    fn deactivate_validator(&mut self);\n\n    /// Clears this instance and keeps only the information necessary to validate evidence.\n    fn set_evidence_only(&mut self);\n\n    /// Returns whether the validator `vid` is known to be faulty.\n    fn has_evidence(&self, vid: &C::ValidatorId) -> bool;\n\n    /// Marks the validator `vid` as faulty, based on evidence from a different instance.\n    fn mark_faulty(&mut self, vid: &C::ValidatorId);\n\n    /// Sends evidence for a faulty of validator `vid` to the `sender` of the request.\n    fn send_evidence(&self, sender: NodeId, vid: &C::ValidatorId) -> ProtocolOutcomes<C>;\n\n    /// Sets the pause status: While paused we don't create consensus messages other than pings.\n    fn set_paused(&mut self, paused: bool, now: Timestamp) -> ProtocolOutcomes<C>;\n\n    /// Returns the list of all validators that were observed as faulty in this consensus instance.\n    fn validators_with_evidence(&self) -> Vec<&C::ValidatorId>;\n\n    /// Returns whether this instance of a protocol is an active validator.\n    fn is_active(&self) -> bool;\n\n    /// Returns the instance ID of this instance.\n    fn instance_id(&self) -> &C::InstanceId;\n\n    // TODO: Make this less Highway-specific.\n    fn next_round_length(&self) -> Option<TimeDiff>;\n}\n"
  },
  {
    "path": "node/src/components/consensus/era_supervisor/debug.rs",
    "content": "//! Data types used solely for dumping of consensus data via the diagnostics port.\n\nuse std::{\n    borrow::Cow,\n    collections::{BTreeMap, HashSet},\n    fmt::{self, Display, Formatter},\n};\n\nuse casper_types::{EraId, PublicKey, Timestamp, U512};\nuse serde::Serialize;\n\nuse crate::components::consensus::{highway_core::State, ClContext, HighwayProtocol};\n\nuse super::Era;\n\n/// Debug dump of era used for serialization.\n#[derive(Debug, Serialize)]\npub(crate) struct EraDump<'a> {\n    /// The era that is being dumped.\n    pub(crate) id: EraId,\n\n    /// The scheduled starting time of this era.\n    pub(crate) start_time: Timestamp,\n    /// The height of this era's first block.\n    pub(crate) start_height: u64,\n\n    // omitted: pending blocks\n    /// Validators that have been faulty in any of the recent BONDED_ERAS switch blocks. This\n    /// includes `new_faulty`.\n    pub(crate) faulty: &'a HashSet<PublicKey>,\n    /// Validators that are excluded from proposing new blocks.\n    pub(crate) cannot_propose: &'a HashSet<PublicKey>,\n    /// Accusations collected in this era so far.\n    pub(crate) accusations: &'a HashSet<PublicKey>,\n    /// The validator weights.\n    pub(crate) validators: &'a BTreeMap<PublicKey, U512>,\n\n    /// The state of the highway instance associated with the era.\n    pub(crate) highway_state: &'a State<ClContext>,\n}\n\nimpl Display for EraDump<'_> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"era {}: TBD\", self.id)\n    }\n}\n\nimpl<'a> EraDump<'a> {\n    /// Creates a new `EraDump` from a given era.\n    pub(crate) fn dump_era(era: &'a Era, era_id: EraId) -> Result<Self, Cow<'static, str>> {\n        let highway = era\n            .consensus\n            .as_any()\n            .downcast_ref::<HighwayProtocol<ClContext>>()\n            .ok_or(Cow::Borrowed(\n                \"could not downcast `ConsensusProtocol` into `HighwayProtocol<ClContext>`\",\n            ))?;\n\n        Ok(EraDump {\n            id: era_id,\n            start_time: era.start_time,\n            start_height: era.start_height,\n            faulty: &era.faulty,\n            cannot_propose: &era.cannot_propose,\n            accusations: &era.accusations,\n            validators: &era.validators,\n            highway_state: highway.highway().state(),\n        })\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/era_supervisor/era.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap, HashSet},\n    env,\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse once_cell::sync::Lazy;\nuse tracing::{debug, info, warn};\n\nuse casper_types::{PublicKey, Timestamp, U512};\n\nuse crate::components::consensus::{\n    cl_context::ClContext,\n    consensus_protocol::{ConsensusProtocol, ProposedBlock},\n    protocols::{highway::HighwayProtocol, zug::Zug},\n};\n\nconst CASPER_ENABLE_DETAILED_CONSENSUS_METRICS_ENV_VAR: &str =\n    \"CASPER_ENABLE_DETAILED_CONSENSUS_METRICS\";\nstatic CASPER_ENABLE_DETAILED_CONSENSUS_METRICS: Lazy<bool> =\n    Lazy::new(|| env::var(CASPER_ENABLE_DETAILED_CONSENSUS_METRICS_ENV_VAR).is_ok());\n\n/// A proposed block waiting for validation and dependencies.\n#[derive(DataSize)]\npub struct ValidationState {\n    /// Whether the block has been validated yet.\n    validated: bool,\n    /// A list of IDs of accused validators for which we are still missing evidence.\n    missing_evidence: Vec<PublicKey>,\n}\n\nimpl ValidationState {\n    fn new(missing_evidence: Vec<PublicKey>) -> Self {\n        ValidationState {\n            validated: false,\n            missing_evidence,\n        }\n    }\n\n    fn is_complete(&self) -> bool {\n        self.validated && self.missing_evidence.is_empty()\n    }\n}\n\npub struct Era {\n    /// The consensus protocol instance.\n    pub(crate) consensus: Box<dyn ConsensusProtocol<ClContext>>,\n    /// The scheduled starting time of this era.\n    pub(crate) start_time: Timestamp,\n    /// The height of this era's first block.\n    pub(crate) start_height: u64,\n    /// Pending blocks, waiting for validation and dependencies.\n    pub(crate) validation_states: HashMap<ProposedBlock<ClContext>, ValidationState>,\n    /// Validators banned in this and the next BONDED_ERAS eras, because they were faulty in the\n    /// previous switch block.\n    pub(crate) faulty: HashSet<PublicKey>,\n    /// Validators that are excluded from proposing new blocks.\n    pub(crate) cannot_propose: HashSet<PublicKey>,\n    /// Accusations collected in this era so far.\n    pub(crate) accusations: HashSet<PublicKey>,\n    /// The validator weights.\n    pub(crate) validators: BTreeMap<PublicKey, U512>,\n}\n\nimpl Era {\n    pub(crate) fn new(\n        consensus: Box<dyn ConsensusProtocol<ClContext>>,\n        start_time: Timestamp,\n        start_height: u64,\n        faulty: HashSet<PublicKey>,\n        cannot_propose: HashSet<PublicKey>,\n        validators: BTreeMap<PublicKey, U512>,\n    ) -> Self {\n        Era {\n            consensus,\n            start_time,\n            start_height,\n            validation_states: HashMap::new(),\n            faulty,\n            cannot_propose,\n            accusations: HashSet::new(),\n            validators,\n        }\n    }\n\n    /// Adds a new block, together with the accusations for which we don't have evidence yet.\n    pub(crate) fn add_block(\n        &mut self,\n        proposed_block: ProposedBlock<ClContext>,\n        missing_evidence: Vec<PublicKey>,\n    ) {\n        self.validation_states\n            .insert(proposed_block, ValidationState::new(missing_evidence));\n    }\n\n    /// Marks the dependencies of blocks on evidence against validator `pub_key` as resolved and\n    /// returns all valid blocks that have no missing dependencies left.\n    pub(crate) fn resolve_evidence_and_mark_faulty(\n        &mut self,\n        pub_key: &PublicKey,\n    ) -> Vec<ProposedBlock<ClContext>> {\n        for pc in self.validation_states.values_mut() {\n            pc.missing_evidence.retain(|pk| pk != pub_key);\n        }\n        self.consensus.mark_faulty(pub_key);\n        let (complete, incomplete): (HashMap<_, _>, HashMap<_, _>) = self\n            .validation_states\n            .drain()\n            .partition(|(_, validation_state)| validation_state.is_complete());\n        self.validation_states = incomplete;\n        complete.into_keys().collect()\n    }\n\n    /// Marks the block payload as valid or invalid. Returns `false` if the block was not present\n    /// or is still missing evidence. Otherwise, it returns `true`: The block can now be processed\n    /// by the consensus protocol.\n    pub(crate) fn resolve_validity(\n        &mut self,\n        proposed_block: &ProposedBlock<ClContext>,\n        valid: bool,\n    ) -> bool {\n        if valid {\n            if let Some(vs) = self.validation_states.get_mut(proposed_block) {\n                if !vs.missing_evidence.is_empty() {\n                    info!(\"Cannot resolve validity of proposed block (timestamp {}) due to missing_evidence still present.\", proposed_block.context().timestamp());\n                    vs.validated = true;\n                    return false;\n                }\n            }\n        }\n        self.validation_states.remove(proposed_block).is_some()\n    }\n\n    /// Adds new accusations from a finalized block.\n    pub(crate) fn add_accusations(&mut self, accusations: &[PublicKey]) {\n        for pub_key in accusations {\n            if !self.faulty.contains(pub_key) {\n                self.accusations.insert(pub_key.clone());\n            }\n        }\n    }\n\n    /// Returns all accusations from finalized blocks so far.\n    pub(crate) fn accusations(&self) -> Vec<PublicKey> {\n        self.accusations.iter().cloned().sorted().collect()\n    }\n\n    /// Returns the map of validator weights.\n    pub(crate) fn validators(&self) -> &BTreeMap<PublicKey, U512> {\n        &self.validators\n    }\n}\n\nimpl DataSize for Era {\n    const IS_DYNAMIC: bool = true;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    #[inline]\n    fn estimate_heap_size(&self) -> usize {\n        // Destructure self, so we can't miss any fields.\n        let Era {\n            consensus,\n            start_time,\n            start_height,\n            validation_states,\n            faulty,\n            cannot_propose,\n            accusations,\n            validators,\n        } = self;\n\n        // `DataSize` cannot be made object safe due its use of associated constants. We implement\n        // it manually here, downcasting the consensus protocol as a workaround.\n\n        let consensus_heap_size = {\n            let any_ref = consensus.as_any();\n\n            if let Some(highway) = any_ref.downcast_ref::<HighwayProtocol<ClContext>>() {\n                if *CASPER_ENABLE_DETAILED_CONSENSUS_METRICS {\n                    let detailed = (*highway).estimate_detailed_heap_size();\n                    match serde_json::to_string(&detailed) {\n                        Ok(encoded) => debug!(%encoded, \"consensus memory metrics\"),\n                        Err(err) => warn!(%err, \"error encoding consensus memory metrics\"),\n                    }\n                    detailed.total()\n                } else {\n                    (*highway).estimate_heap_size()\n                }\n            } else if let Some(zug) = any_ref.downcast_ref::<Zug<ClContext>>() {\n                if *CASPER_ENABLE_DETAILED_CONSENSUS_METRICS {\n                    let detailed = (*zug).estimate_detailed_heap_size();\n                    match serde_json::to_string(&detailed) {\n                        Ok(encoded) => debug!(%encoded, \"consensus memory metrics\"),\n                        Err(err) => warn!(%err, \"error encoding consensus memory metrics\"),\n                    }\n                    detailed.total()\n                } else {\n                    (*zug).estimate_heap_size()\n                }\n            } else {\n                warn!(\"could not downcast consensus protocol to determine heap allocation size\");\n                0\n            }\n        };\n\n        consensus_heap_size\n            .saturating_add(start_time.estimate_heap_size())\n            .saturating_add(start_height.estimate_heap_size())\n            .saturating_add(validation_states.estimate_heap_size())\n            .saturating_add(faulty.estimate_heap_size())\n            .saturating_add(cannot_propose.estimate_heap_size())\n            .saturating_add(accusations.estimate_heap_size())\n            .saturating_add(validators.estimate_heap_size())\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/era_supervisor.rs",
    "content": "#![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged.\n\n//! Consensus service is a component that will be communicating with the reactor.\n//! It will receive events (like incoming message event or create new message event)\n//! and propagate them to the underlying consensus protocol.\n//! It tries to know as little as possible about the underlying consensus. The only thing\n//! it assumes is the concept of era/epoch and that each era runs separate consensus instance.\n//! Most importantly, it doesn't care about what messages it's forwarding.\n\npub(super) mod debug;\nmod era;\n\nuse std::{\n    cmp,\n    collections::{BTreeMap, BTreeSet, HashMap},\n    convert::TryInto,\n    fmt::{self, Debug, Formatter},\n    fs, io,\n    path::{Path, PathBuf},\n    sync::Arc,\n    time::Duration,\n};\n\nuse anyhow::Error;\nuse datasize::DataSize;\nuse futures::{Future, FutureExt};\nuse itertools::Itertools;\nuse prometheus::Registry;\nuse rand::Rng;\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\nuse tracing::{debug, error, info, trace, warn};\n\nuse casper_binary_port::{ConsensusStatus, ConsensusValidatorChanges};\n\nuse casper_types::{\n    Approval, AsymmetricType, BlockHash, BlockHeader, Chainspec, ConsensusProtocolName, Digest,\n    DisplayIter, EraId, PublicKey, RewardedSignatures, Timestamp, Transaction, TransactionHash,\n    ValidatorChange,\n};\n\nuse crate::{\n    components::{\n        consensus::{\n            cl_context::{ClContext, Keypair},\n            consensus_protocol::{\n                ConsensusProtocol, FinalizedBlock as CpFinalizedBlock, ProposedBlock,\n                ProtocolOutcome,\n            },\n            metrics::Metrics,\n            validator_change::ValidatorChanges,\n            ActionId, ChainspecConsensusExt, Config, ConsensusMessage, ConsensusRequestMessage,\n            Event, HighwayProtocol, NewBlockPayload, ReactorEventT, ResolveValidity, TimerId, Zug,\n        },\n        network::blocklist::BlocklistJustification,\n    },\n    effect::{\n        announcements::FatalAnnouncement,\n        requests::{BlockValidationRequest, ContractRuntimeRequest, StorageRequest},\n        AutoClosingResponder, EffectBuilder, EffectExt, Effects, Responder,\n    },\n    failpoints::Failpoint,\n    fatal, protocol,\n    types::{\n        create_single_block_rewarded_signatures, BlockWithMetadata, ExecutableBlock,\n        FinalizedBlock, InternalEraReport, MetaBlockState, NodeId, ValidatorMatrix,\n    },\n    NodeRng,\n};\n\npub use self::era::Era;\nuse super::{traits::ConsensusNetworkMessage, BlockContext};\nuse crate::{components::consensus::error::CreateNewEraError, types::InvalidProposalError};\n\n/// The delay in milliseconds before we shut down after the number of faulty validators exceeded the\n/// fault tolerance threshold.\nconst FTT_EXCEEDED_SHUTDOWN_DELAY_MILLIS: u64 = 60 * 1000;\n/// A warning is printed if a timer is delayed by more than this.\nconst TIMER_DELAY_WARNING_MILLIS: u64 = 1000;\n\n/// The number of eras across which evidence can be cited.\n/// If this is 1, you can cite evidence from the previous era, but not the one before that.\n/// To be able to detect that evidence, we also keep that number of active past eras in memory.\npub(super) const PAST_EVIDENCE_ERAS: u64 = 1;\n/// The total number of past eras that are kept in memory in addition to the current one.\n/// The more recent half of these is active: it contains units and can still accept further units.\n/// The older half is in evidence-only state, and only used to validate cited evidence.\npub(super) const PAST_OPEN_ERAS: u64 = 2 * PAST_EVIDENCE_ERAS;\n\n#[derive(DataSize)]\npub struct EraSupervisor {\n    /// A map of consensus protocol instances.\n    /// A value is a trait so that we can run different consensus protocols per era.\n    ///\n    /// This map contains three consecutive entries, with the last one being the current era N. Era\n    /// N - 1 is also kept in memory so that we would still detect any equivocations there and use\n    /// them in era N to get the equivocator banned. And era N - 2 one is in an \"evidence-only\"\n    /// state: It doesn't accept any new Highway units anymore, but we keep the instance in memory\n    /// so we can evaluate evidence that units in era N - 1 might cite.\n    ///\n    /// Since eras at or before the most recent activation point are never instantiated, shortly\n    /// after that there can temporarily be fewer than three entries in the map.\n    open_eras: BTreeMap<EraId, Era>,\n    validator_matrix: ValidatorMatrix,\n    chainspec: Arc<Chainspec>,\n    config: Config,\n    /// The height of the next block to be finalized.\n    /// We keep that in order to be able to signal to the Block Proposer how many blocks have been\n    /// finalized when we request a new block. This way the Block Proposer can know whether it's up\n    /// to date, or whether it has to wait for more finalized blocks before responding.\n    /// This value could be obtained from the consensus instance in a relevant era, but caching it\n    /// here is the easiest way of achieving the desired effect.\n    next_block_height: u64,\n    /// The height of the next block to be executed. If this falls too far behind, we pause.\n    next_executed_height: u64,\n    #[data_size(skip)]\n    metrics: Metrics,\n    /// The path to the folder where unit files will be stored.\n    unit_files_folder: PathBuf,\n    last_progress: Timestamp,\n\n    /// Failpoints\n    pub(super) message_delay_failpoint: Failpoint<u64>,\n    pub(super) proposal_delay_failpoint: Failpoint<u64>,\n}\n\nimpl Debug for EraSupervisor {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        let ae: Vec<_> = self.open_eras.keys().collect();\n        write!(formatter, \"EraSupervisor {{ open_eras: {:?}, .. }}\", ae)\n    }\n}\n\nimpl EraSupervisor {\n    /// Creates a new `EraSupervisor`, starting in the indicated current era.\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn new(\n        storage_dir: &Path,\n        validator_matrix: ValidatorMatrix,\n        config: Config,\n        chainspec: Arc<Chainspec>,\n        registry: &Registry,\n    ) -> Result<Self, Error> {\n        let unit_files_folder = storage_dir.join(\"unit_files\");\n        fs::create_dir_all(&unit_files_folder)?;\n        info!(our_id = %validator_matrix.public_signing_key(), \"EraSupervisor pubkey\",);\n        let metrics = Metrics::new(registry)?;\n\n        let era_supervisor = Self {\n            open_eras: Default::default(),\n            validator_matrix,\n            chainspec,\n            config,\n            next_block_height: 0,\n            metrics,\n            unit_files_folder,\n            next_executed_height: 0,\n            last_progress: Timestamp::now(),\n            message_delay_failpoint: Failpoint::new(\"consensus.message_delay\"),\n            proposal_delay_failpoint: Failpoint::new(\"consensus.proposal_delay\"),\n        };\n\n        Ok(era_supervisor)\n    }\n\n    /// Returns whether we are a validator in the current era.\n    pub(crate) fn is_active_validator(&self) -> bool {\n        if let Some(era_id) = self.current_era() {\n            return self.open_eras[&era_id]\n                .validators()\n                .contains_key(self.validator_matrix.public_signing_key());\n        }\n        false\n    }\n\n    /// Returns the most recent era.\n    pub(crate) fn current_era(&self) -> Option<EraId> {\n        self.open_eras.keys().last().copied()\n    }\n\n    pub(crate) fn create_required_eras<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        recent_switch_block_headers: &[BlockHeader],\n    ) -> Option<Effects<Event>> {\n        if !recent_switch_block_headers\n            .iter()\n            .tuple_windows()\n            .all(|(b0, b1)| b0.next_block_era_id() == b1.era_id())\n        {\n            error!(\"switch block headers are not consecutive; this is a bug\");\n            return None;\n        }\n\n        let highest_switch_block_header = recent_switch_block_headers.last()?;\n\n        let new_era_id = highest_switch_block_header.next_block_era_id();\n\n        // We need to initialize current_era and (evidence-only) current_era - 1.\n        // To initialize an era, all switch blocks between its booking block and its key block are\n        // required. The booking block for era N is in N - auction_delay - 1, and the key block in\n        // N - 1. So we need all switch blocks between:\n        // (including) current_era - 1 - auction_delay - 1 and (excluding) current_era.\n        // However, we never use any block from before the last activation point.\n        //\n        // Example: If auction_delay is 1, to initialize era N we need the switch blocks from era N\n        // and N - 1. If current_era is 10, we will initialize eras 10 and 9. So we need the switch\n        // blocks from eras 9, 8, and 7.\n        let earliest_open_era = self.chainspec.earliest_relevant_era(new_era_id);\n        let earliest_era = self\n            .chainspec\n            .earliest_switch_block_needed(earliest_open_era);\n        debug_assert!(earliest_era <= new_era_id);\n\n        let earliest_index = recent_switch_block_headers\n            .iter()\n            .position(|block_header| block_header.era_id() == earliest_era)?;\n        let relevant_switch_block_headers = &recent_switch_block_headers[earliest_index..];\n\n        // We initialize the era that `relevant_switch_block_headers` last block is the key\n        // block for. We want to initialize the two latest eras, so we have to pass in the whole\n        // slice for the current era, and omit one element for the other one. We never initialize\n        // the activation era or an earlier era, however.\n        //\n        // In the example above, we would call create_new_era with the switch blocks from eras\n        // 8 and 9 (to initialize 10) and then 7 and 8 (for era 9).\n        // (We don't truncate the slice at the start since unneeded blocks are ignored.)\n        let mut effects = Effects::new();\n        let from = relevant_switch_block_headers\n            .len()\n            .saturating_sub(PAST_EVIDENCE_ERAS as usize)\n            .max(1);\n        let old_current_era = self.current_era();\n        let now = Timestamp::now();\n        for i in (from..=relevant_switch_block_headers.len()).rev() {\n            effects.extend(self.create_new_era_effects(\n                effect_builder,\n                rng,\n                &relevant_switch_block_headers[..i],\n                now,\n            ));\n        }\n        if self.current_era() != old_current_era {\n            effects.extend(self.make_latest_era_current(effect_builder, rng, now));\n        }\n        effects.extend(self.activate_latest_era_if_needed(effect_builder, rng, now));\n        Some(effects)\n    }\n\n    /// Returns a list of status changes of active validators.\n    pub(super) fn get_validator_changes(&self) -> ConsensusValidatorChanges {\n        let mut result: BTreeMap<PublicKey, Vec<(EraId, ValidatorChange)>> = BTreeMap::new();\n        for ((_, era0), (era_id, era1)) in self.open_eras.iter().tuple_windows() {\n            for (pub_key, change) in ValidatorChanges::new(era0, era1).0 {\n                result.entry(pub_key).or_default().push((*era_id, change));\n            }\n        }\n        ConsensusValidatorChanges::new(result)\n    }\n\n    fn era_seed(booking_block_hash: BlockHash, key_block_seed: Digest) -> u64 {\n        let result = Digest::hash_pair(booking_block_hash, key_block_seed).value();\n        u64::from_le_bytes(result[0..size_of::<u64>()].try_into().unwrap())\n    }\n\n    /// Returns an iterator over era IDs of `num_eras` past eras, plus the provided one.\n    ///\n    /// Note: Excludes the activation point era and earlier eras. The activation point era itself\n    /// contains only the single switch block we created after the upgrade. There is no consensus\n    /// instance for it.\n    pub(crate) fn iter_past(&self, era_id: EraId, num_eras: u64) -> impl Iterator<Item = EraId> {\n        (self\n            .chainspec\n            .activation_era()\n            .successor()\n            .max(era_id.saturating_sub(num_eras))\n            .value()..=era_id.value())\n            .map(EraId::from)\n    }\n\n    /// Returns an iterator over era IDs of `num_eras` past eras, excluding the provided one.\n    ///\n    /// Note: Excludes the activation point era and earlier eras. The activation point era itself\n    /// contains only the single switch block we created after the upgrade. There is no consensus\n    /// instance for it.\n    pub(crate) fn iter_past_other(\n        &self,\n        era_id: EraId,\n        num_eras: u64,\n    ) -> impl Iterator<Item = EraId> {\n        (self\n            .chainspec\n            .activation_era()\n            .successor()\n            .max(era_id.saturating_sub(num_eras))\n            .value()..era_id.value())\n            .map(EraId::from)\n    }\n\n    /// Returns an iterator over era IDs of `num_eras` future eras, plus the provided one.\n    fn iter_future(&self, era_id: EraId, num_eras: u64) -> impl Iterator<Item = EraId> {\n        (era_id.value()..=era_id.value().saturating_add(num_eras)).map(EraId::from)\n    }\n\n    /// Pauses or unpauses consensus: Whenever the last executed block is too far behind the last\n    /// finalized block, we suspend consensus.\n    fn update_consensus_pause<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n    ) -> Effects<Event> {\n        let paused = self\n            .next_block_height\n            .saturating_sub(self.next_executed_height)\n            > self.config.max_execution_delay;\n        self.delegate_to_era(effect_builder, rng, era_id, |consensus, _| {\n            consensus.set_paused(paused, Timestamp::now())\n        })\n    }\n\n    /// Initializes a new era. The switch blocks must contain the most recent `auction_delay + 1`\n    /// ones, in order, but at most as far back as to the last activation point.\n    pub(super) fn create_new_era_effects<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        switch_blocks: &[BlockHeader],\n        now: Timestamp,\n    ) -> Effects<Event> {\n        match self.create_new_era(switch_blocks, now) {\n            Ok((era_id, outcomes)) => {\n                self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes)\n            }\n            Err(err) => fatal!(\n                effect_builder,\n                \"failed to create era; this is a bug: {:?}\",\n                err,\n            )\n            .ignore(),\n        }\n    }\n\n    fn make_latest_era_current<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        now: Timestamp,\n    ) -> Effects<Event> {\n        let era_id = match self.current_era() {\n            Some(era_id) => era_id,\n            None => {\n                return Effects::new();\n            }\n        };\n        self.metrics\n            .consensus_current_era\n            .set(era_id.value() as i64);\n        let start_height = self.era(era_id).start_height;\n        self.next_block_height = self.next_block_height.max(start_height);\n        let outcomes = self.era_mut(era_id).consensus.handle_is_current(now);\n        self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes)\n    }\n\n    fn activate_latest_era_if_needed<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        now: Timestamp,\n    ) -> Effects<Event> {\n        let era_id = match self.current_era() {\n            Some(era_id) => era_id,\n            None => {\n                return Effects::new();\n            }\n        };\n        if self.era(era_id).consensus.is_active() {\n            return Effects::new();\n        }\n        let our_id = self.validator_matrix.public_signing_key().clone();\n        let outcomes = if !self.era(era_id).validators().contains_key(&our_id) {\n            info!(era = era_id.value(), %our_id, \"not voting; not a validator\");\n            vec![]\n        } else {\n            info!(era = era_id.value(), %our_id, \"start voting\");\n            let secret = Keypair::new(\n                self.validator_matrix.secret_signing_key().clone(),\n                our_id.clone(),\n            );\n            let instance_id = self.era(era_id).consensus.instance_id();\n            let unit_hash_file = self.protocol_state_file(instance_id);\n            self.era_mut(era_id).consensus.activate_validator(\n                our_id,\n                secret,\n                now,\n                Some(unit_hash_file),\n            )\n        };\n        self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes)\n    }\n\n    /// Initializes a new era. The switch blocks must contain the most recent `auction_delay + 1`\n    /// ones, in order, but at most as far back as to the last activation point.\n    fn create_new_era(\n        &mut self,\n        switch_blocks: &[BlockHeader],\n        now: Timestamp,\n    ) -> Result<(EraId, Vec<ProtocolOutcome<ClContext>>), CreateNewEraError> {\n        let key_block = switch_blocks\n            .last()\n            .ok_or(CreateNewEraError::AttemptedToCreateEraWithNoSwitchBlocks)?;\n        let era_id = key_block.era_id().successor();\n\n        let chainspec_hash = self.chainspec.hash();\n        let key_block_hash = key_block.block_hash();\n        let instance_id = instance_id(chainspec_hash, era_id, key_block_hash);\n\n        if self.open_eras.contains_key(&era_id) {\n            debug!(era = era_id.value(), \"era already exists\");\n            return Ok((era_id, vec![]));\n        }\n\n        let era_end = key_block.clone_era_end().ok_or_else(|| {\n            CreateNewEraError::LastBlockHeaderNotASwitchBlock {\n                era_id,\n                last_block_header: Box::new(key_block.clone()),\n            }\n        })?;\n\n        let earliest_era = self.chainspec.earliest_switch_block_needed(era_id);\n        let switch_blocks_needed = era_id.value().saturating_sub(earliest_era.value()) as usize;\n        let first_idx = switch_blocks\n            .len()\n            .checked_sub(switch_blocks_needed)\n            .ok_or_else(|| CreateNewEraError::InsufficientSwitchBlocks {\n                era_id,\n                switch_blocks: switch_blocks.to_vec(),\n            })?;\n        for (i, switch_block) in switch_blocks[first_idx..].iter().enumerate() {\n            if switch_block.era_id() != earliest_era.saturating_add(i as u64) {\n                return Err(CreateNewEraError::WrongSwitchBlockEra {\n                    era_id,\n                    switch_blocks: switch_blocks.to_vec(),\n                });\n            }\n        }\n\n        let validators = era_end.next_era_validator_weights();\n\n        if let Some(current_era) = self.current_era() {\n            if current_era > era_id.saturating_add(PAST_EVIDENCE_ERAS) {\n                warn!(era = era_id.value(), \"trying to create obsolete era\");\n                return Ok((era_id, vec![]));\n            }\n        }\n\n        // Compute the seed for the PRNG from the booking block hash and the accumulated seed.\n        let auction_delay = self.chainspec.core_config.auction_delay as usize;\n        let booking_block_hash =\n            if let Some(booking_block) = switch_blocks.iter().rev().nth(auction_delay) {\n                booking_block.block_hash()\n            } else {\n                // If there's no booking block for the `era_id`\n                // (b/c it would have been from before Genesis, upgrade or emergency restart),\n                // use a \"zero\" block hash. This should not hurt the security of the leader\n                // selection algorithm.\n                BlockHash::default()\n            };\n        let seed = Self::era_seed(booking_block_hash, *key_block.accumulated_seed());\n\n        // The beginning of the new era is marked by the key block.\n        #[allow(clippy::arithmetic_side_effects)] // Block height should never reach u64::MAX.\n        let start_height = key_block.height() + 1;\n        let start_time = key_block.timestamp();\n\n        // Validators that were inactive in the previous era will be excluded from leader selection\n        // in the new era.\n        let inactive = era_end.inactive_validators().iter().cloned().collect();\n\n        // Validators that were only exposed as faulty after the booking block are still in the new\n        // era's validator set but get banned.\n        let blocks_after_booking_block = switch_blocks.iter().rev().take(auction_delay);\n        let faulty = blocks_after_booking_block\n            .filter_map(|switch_block| switch_block.maybe_equivocators())\n            .flat_map(|equivocators| equivocators.iter())\n            .cloned()\n            .collect();\n\n        info!(\n            ?validators,\n            %start_time,\n            %now,\n            %start_height,\n            %chainspec_hash,\n            %key_block_hash,\n            %instance_id,\n            %seed,\n            era = era_id.value(),\n            \"starting era\",\n        );\n\n        let maybe_prev_era = era_id\n            .checked_sub(1)\n            .and_then(|last_era_id| self.open_eras.get(&last_era_id));\n        let validators_with_evidence: Vec<PublicKey> = maybe_prev_era\n            .into_iter()\n            .flat_map(|prev_era| prev_era.consensus.validators_with_evidence())\n            .cloned()\n            .collect();\n\n        // Create and insert the new era instance.\n        let protocol_state_file = self.protocol_state_file(&instance_id);\n        let (consensus, mut outcomes) = match self.chainspec.core_config.consensus_protocol {\n            ConsensusProtocolName::Highway => HighwayProtocol::new_boxed(\n                instance_id,\n                validators.clone(),\n                &faulty,\n                &inactive,\n                self.chainspec.as_ref(),\n                &self.config,\n                maybe_prev_era.map(|era| &*era.consensus),\n                start_time,\n                seed,\n                now,\n                Some(protocol_state_file),\n            ),\n            ConsensusProtocolName::Zug => Zug::new_boxed(\n                instance_id,\n                validators.clone(),\n                &faulty,\n                &inactive,\n                self.chainspec.as_ref(),\n                &self.config,\n                maybe_prev_era.map(|era| &*era.consensus),\n                start_time,\n                seed,\n                now,\n                protocol_state_file,\n            ),\n        };\n\n        let era = Era::new(\n            consensus,\n            start_time,\n            start_height,\n            faulty,\n            inactive,\n            validators.clone(),\n        );\n        let _ = self.open_eras.insert(era_id, era);\n\n        // Activate the era if this node was already running when the era began, it is still\n        // ongoing based on its minimum duration, and we are one of the validators.\n        let our_id = self.validator_matrix.public_signing_key().clone();\n        if self\n            .current_era()\n            .is_some_and(|current_era| current_era > era_id)\n        {\n            trace!(\n                era = era_id.value(),\n                current_era = ?self.current_era(),\n                \"not voting; initializing past era\"\n            );\n            // We're creating an era that's not the current era - which means we're currently\n            // initializing consensus, and we want to set all the older eras to be evidence only.\n            if let Some(era) = self.open_eras.get_mut(&era_id) {\n                era.consensus.set_evidence_only();\n            }\n        } else {\n            self.metrics\n                .consensus_current_era\n                .set(era_id.value() as i64);\n            self.next_block_height = self.next_block_height.max(start_height);\n            outcomes.extend(self.era_mut(era_id).consensus.handle_is_current(now));\n            if !self.era(era_id).validators().contains_key(&our_id) {\n                info!(era = era_id.value(), %our_id, \"not voting; not a validator\");\n            } else {\n                info!(era = era_id.value(), %our_id, \"start voting\");\n                let secret = Keypair::new(\n                    self.validator_matrix.secret_signing_key().clone(),\n                    our_id.clone(),\n                );\n                let unit_hash_file = self.protocol_state_file(&instance_id);\n                outcomes.extend(self.era_mut(era_id).consensus.activate_validator(\n                    our_id,\n                    secret,\n                    now,\n                    Some(unit_hash_file),\n                ))\n            };\n        }\n\n        // Mark validators as faulty for which we have evidence in the previous era.\n        for pub_key in validators_with_evidence {\n            let proposed_blocks = self\n                .era_mut(era_id)\n                .resolve_evidence_and_mark_faulty(&pub_key);\n            if !proposed_blocks.is_empty() {\n                error!(\n                    ?proposed_blocks,\n                    era = era_id.value(),\n                    \"unexpected block in new era\"\n                );\n            }\n        }\n\n        // Clear the obsolete data from the era before the previous one. We only retain the\n        // information necessary to validate evidence that units in the two most recent eras may\n        // refer to for cross-era fault tracking.\n        if let Some(current_era) = self.current_era() {\n            let mut removed_instance_ids = vec![];\n            let earliest_open_era = current_era.saturating_sub(PAST_OPEN_ERAS);\n            let earliest_active_era = current_era.saturating_sub(PAST_EVIDENCE_ERAS);\n            self.open_eras.retain(|era_id, era| {\n                if earliest_open_era > *era_id {\n                    trace!(era = era_id.value(), \"removing obsolete era\");\n                    removed_instance_ids.push(*era.consensus.instance_id());\n                    false\n                } else if earliest_active_era > *era_id {\n                    trace!(era = era_id.value(), \"setting old era to evidence only\");\n                    era.consensus.set_evidence_only();\n                    true\n                } else {\n                    true\n                }\n            });\n            for instance_id in removed_instance_ids {\n                if let Err(err) = fs::remove_file(self.protocol_state_file(&instance_id)) {\n                    match err.kind() {\n                        io::ErrorKind::NotFound => {}\n                        err => warn!(?err, \"could not delete unit hash file\"),\n                    }\n                }\n            }\n        }\n\n        Ok((era_id, outcomes))\n    }\n\n    /// Returns the path to the era's unit file.\n    fn protocol_state_file(&self, instance_id: &Digest) -> PathBuf {\n        self.unit_files_folder.join(format!(\n            \"unit_{:?}_{}.dat\",\n            instance_id,\n            self.validator_matrix.public_signing_key().to_hex()\n        ))\n    }\n\n    /// Applies `f` to the consensus protocol of the specified era.\n    fn delegate_to_era<REv: ReactorEventT, F>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        f: F,\n    ) -> Effects<Event>\n    where\n        F: FnOnce(\n            &mut dyn ConsensusProtocol<ClContext>,\n            &mut NodeRng,\n        ) -> Vec<ProtocolOutcome<ClContext>>,\n    {\n        match self.open_eras.get_mut(&era_id) {\n            None => {\n                self.log_missing_era(era_id);\n                Effects::new()\n            }\n            Some(era) => {\n                let outcomes = f(&mut *era.consensus, rng);\n                self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes)\n            }\n        }\n    }\n\n    fn log_missing_era(&self, era_id: EraId) {\n        let era = era_id.value();\n        if let Some(current_era_id) = self.current_era() {\n            match era_id.cmp(&current_era_id) {\n                cmp::Ordering::Greater => trace!(era, \"received message for future era\"),\n                cmp::Ordering::Equal => error!(era, \"missing current era\"),\n                cmp::Ordering::Less => info!(era, \"received message for obsolete era\"),\n            }\n        } else {\n            info!(era, \"received message, but no era initialized\");\n        }\n    }\n\n    pub(super) fn handle_timer<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        timestamp: Timestamp,\n        timer_id: TimerId,\n    ) -> Effects<Event> {\n        let now = Timestamp::now();\n        let delay = now.saturating_diff(timestamp).millis();\n        if delay > TIMER_DELAY_WARNING_MILLIS {\n            warn!(\n                era = era_id.value(), timer_id = timer_id.0, %delay,\n                \"timer called with long delay\"\n            );\n        }\n        self.delegate_to_era(effect_builder, rng, era_id, move |consensus, rng| {\n            consensus.handle_timer(timestamp, now, timer_id, rng)\n        })\n    }\n\n    pub(super) fn handle_action<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        action_id: ActionId,\n    ) -> Effects<Event> {\n        self.delegate_to_era(effect_builder, rng, era_id, move |consensus, _| {\n            consensus.handle_action(action_id, Timestamp::now())\n        })\n    }\n\n    pub(super) fn handle_message<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        sender: NodeId,\n        msg: ConsensusMessage,\n    ) -> Effects<Event> {\n        match msg {\n            ConsensusMessage::Protocol { era_id, payload } => {\n                trace!(era = era_id.value(), \"received a consensus message\");\n\n                self.delegate_to_era(effect_builder, rng, era_id, move |consensus, rng| {\n                    consensus.handle_message(rng, sender, payload, Timestamp::now())\n                })\n            }\n            ConsensusMessage::EvidenceRequest { era_id, pub_key } => match self.current_era() {\n                None => Effects::new(),\n                Some(current_era) => {\n                    if era_id.saturating_add(PAST_EVIDENCE_ERAS) < current_era\n                        || !self.open_eras.contains_key(&era_id)\n                    {\n                        trace!(era = era_id.value(), \"not handling message; era too old\");\n                        return Effects::new();\n                    }\n                    self.iter_past(era_id, PAST_EVIDENCE_ERAS)\n                        .flat_map(|e_id| {\n                            self.delegate_to_era(effect_builder, rng, e_id, |consensus, _| {\n                                consensus.send_evidence(sender, &pub_key)\n                            })\n                        })\n                        .collect()\n                }\n            },\n        }\n    }\n\n    pub(super) fn handle_demand<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        sender: NodeId,\n        request: Box<ConsensusRequestMessage>,\n        auto_closing_responder: AutoClosingResponder<protocol::Message>,\n    ) -> Effects<Event> {\n        let ConsensusRequestMessage { era_id, payload } = *request;\n\n        trace!(era = era_id.value(), \"received a consensus request\");\n        match self.open_eras.get_mut(&era_id) {\n            None => {\n                self.log_missing_era(era_id);\n                auto_closing_responder.respond_none().ignore()\n            }\n            Some(era) => {\n                let (outcomes, response) =\n                    era.consensus\n                        .handle_request_message(rng, sender, payload, Timestamp::now());\n                let mut effects =\n                    self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes);\n                if let Some(payload) = response {\n                    effects.extend(\n                        auto_closing_responder\n                            .respond(ConsensusMessage::Protocol { era_id, payload }.into())\n                            .ignore(),\n                    );\n                } else {\n                    effects.extend(auto_closing_responder.respond_none().ignore());\n                }\n                effects\n            }\n        }\n    }\n\n    pub(super) fn handle_new_block_payload<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        new_block_payload: NewBlockPayload,\n    ) -> Effects<Event> {\n        let NewBlockPayload {\n            era_id,\n            block_payload,\n            block_context,\n        } = new_block_payload;\n        match self.current_era() {\n            None => {\n                warn!(\"new block payload but no initialized era\");\n                Effects::new()\n            }\n            Some(current_era) => {\n                if era_id.saturating_add(PAST_EVIDENCE_ERAS) < current_era\n                    || !self.open_eras.contains_key(&era_id)\n                {\n                    warn!(era = era_id.value(), \"new block payload in outdated era\");\n                    return Effects::new();\n                }\n                let proposed_block = ProposedBlock::new(block_payload, block_context);\n                self.delegate_to_era(effect_builder, rng, era_id, move |consensus, _| {\n                    consensus.propose(proposed_block, Timestamp::now())\n                })\n            }\n        }\n    }\n\n    pub(super) fn handle_block_added<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        block_header: BlockHeader,\n    ) -> Effects<Event> {\n        self.last_progress = Timestamp::now();\n        self.next_executed_height = self\n            .next_executed_height\n            .max(block_header.height().saturating_add(1));\n        let era_id = block_header.era_id();\n        let mut effects = self.update_consensus_pause(effect_builder, rng, era_id);\n\n        if self\n            .current_era()\n            .is_none_or(|current_era| era_id < current_era)\n        {\n            trace!(era = era_id.value(), \"executed block in old era\");\n            return effects;\n        }\n        if block_header.next_era_validator_weights().is_some() {\n            if let Some(era) = self.open_eras.get_mut(&era_id) {\n                // This was the era's last block. Schedule deactivating this era.\n                let delay = Timestamp::now()\n                    .saturating_diff(block_header.timestamp())\n                    .into();\n                let faulty_num = era.consensus.validators_with_evidence().len();\n                let deactivate_era = move |_| Event::DeactivateEra {\n                    era_id,\n                    faulty_num,\n                    delay,\n                };\n                effects.extend(effect_builder.set_timeout(delay).event(deactivate_era));\n            }\n        }\n        effects\n    }\n\n    pub(super) fn handle_deactivate_era<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        era_id: EraId,\n        old_faulty_num: usize,\n        delay: Duration,\n    ) -> Effects<Event> {\n        let era = if let Some(era) = self.open_eras.get_mut(&era_id) {\n            era\n        } else {\n            warn!(era = era_id.value(), \"trying to deactivate obsolete era\");\n            return Effects::new();\n        };\n        let faulty_num = era.consensus.validators_with_evidence().len();\n        if faulty_num == old_faulty_num {\n            info!(era = era_id.value(), \"stop voting in era\");\n            era.consensus.deactivate_validator();\n            Effects::new()\n        } else {\n            let deactivate_era = move |_| Event::DeactivateEra {\n                era_id,\n                faulty_num,\n                delay,\n            };\n            effect_builder.set_timeout(delay).event(deactivate_era)\n        }\n    }\n\n    /// Will deactivate voting for the current era.\n    /// Does nothing if the current era doesn't exist or is inactive already.\n    pub(crate) fn deactivate_current_era(&mut self) -> Result<EraId, String> {\n        let which_era = self\n            .current_era()\n            .ok_or_else(|| \"attempt to deactivate an era with no eras instantiated!\".to_string())?;\n        let era = self.era_mut(which_era);\n        if false == era.consensus.is_active() {\n            debug!(era_id=%which_era, \"attempt to deactivate inactive era\");\n            return Ok(which_era);\n        }\n        era.consensus.deactivate_validator();\n        Ok(which_era)\n    }\n\n    pub(super) fn resolve_validity<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        resolve_validity: ResolveValidity,\n    ) -> Effects<Event> {\n        let ResolveValidity {\n            era_id,\n            sender,\n            proposed_block,\n            maybe_error,\n        } = resolve_validity;\n        self.metrics.proposed_block();\n        let mut effects = Effects::new();\n        let valid = maybe_error.is_none();\n        if let Some(error) = maybe_error {\n            debug!(%era_id, %sender, ?error, \"announcing block peer due to invalid proposal\");\n            effects.extend({\n                effect_builder\n                    .announce_block_peer_with_justification(\n                        sender,\n                        BlocklistJustification::SentInvalidProposal { era: era_id, error },\n                    )\n                    .ignore()\n            });\n        }\n        if self\n            .open_eras\n            .get_mut(&era_id)\n            .is_some_and(|era| era.resolve_validity(&proposed_block, valid))\n        {\n            effects.extend(\n                self.delegate_to_era(effect_builder, rng, era_id, |consensus, _| {\n                    consensus.resolve_validity(proposed_block.clone(), valid, Timestamp::now())\n                }),\n            );\n        }\n        effects\n    }\n\n    pub(crate) fn last_progress(&self) -> Timestamp {\n        self.last_progress\n    }\n\n    fn handle_consensus_outcomes<REv: ReactorEventT, T>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        outcomes: T,\n    ) -> Effects<Event>\n    where\n        T: IntoIterator<Item = ProtocolOutcome<ClContext>>,\n    {\n        outcomes\n            .into_iter()\n            .flat_map(|result| self.handle_consensus_outcome(effect_builder, rng, era_id, result))\n            .collect()\n    }\n\n    /// Returns `true` if any of the most recent eras has evidence against the validator with key\n    /// `pub_key`.\n    fn has_evidence(&self, era_id: EraId, pub_key: PublicKey) -> bool {\n        self.iter_past(era_id, PAST_EVIDENCE_ERAS)\n            .any(|eid| self.era(eid).consensus.has_evidence(&pub_key))\n    }\n\n    /// Returns the era with the specified ID. Panics if it does not exist.\n    fn era(&self, era_id: EraId) -> &Era {\n        &self.open_eras[&era_id]\n    }\n\n    /// Returns the era with the specified ID mutably. Panics if it does not exist.\n    fn era_mut(&mut self, era_id: EraId) -> &mut Era {\n        self.open_eras.get_mut(&era_id).unwrap()\n    }\n\n    #[allow(clippy::arithmetic_side_effects)] // Block height should never reach u64::MAX.\n    fn handle_consensus_outcome<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        consensus_result: ProtocolOutcome<ClContext>,\n    ) -> Effects<Event> {\n        let current_era = match self.current_era() {\n            Some(current_era) => current_era,\n            None => {\n                error!(\"no current era\");\n                return Effects::new();\n            }\n        };\n        match consensus_result {\n            ProtocolOutcome::Disconnect(sender) => {\n                warn!(\n                    %sender,\n                    \"disconnecting from the sender of invalid data\"\n                );\n                {\n                    effect_builder\n                        .announce_block_peer_with_justification(\n                            sender,\n                            BlocklistJustification::BadConsensusBehavior,\n                        )\n                        .ignore()\n                }\n            }\n            ProtocolOutcome::CreatedGossipMessage(payload) => {\n                let message = ConsensusMessage::Protocol { era_id, payload };\n                let delay_by = self.message_delay_failpoint.fire(rng).cloned();\n                async move {\n                    if let Some(delay) = delay_by {\n                        effect_builder\n                            .set_timeout(Duration::from_millis(delay))\n                            .await;\n                    }\n                    effect_builder\n                        .broadcast_message_to_validators(message.into(), era_id)\n                        .await\n                }\n                .ignore()\n            }\n            ProtocolOutcome::CreatedTargetedMessage(payload, to) => {\n                let message = ConsensusMessage::Protocol { era_id, payload };\n                effect_builder.enqueue_message(to, message.into()).ignore()\n            }\n            ProtocolOutcome::CreatedMessageToRandomPeer(payload) => {\n                let message = ConsensusMessage::Protocol { era_id, payload };\n\n                async move {\n                    let peers = effect_builder.get_fully_connected_peers(1).await;\n                    if let Some(to) = peers.into_iter().next() {\n                        effect_builder.enqueue_message(to, message.into()).await;\n                    }\n                }\n                .ignore()\n            }\n            ProtocolOutcome::CreatedRequestToRandomValidator(payload) => {\n                let message = ConsensusRequestMessage { era_id, payload };\n\n                async move {\n                    let peers = effect_builder\n                        .get_fully_connected_validators(1, era_id)\n                        .await;\n                    if let Some(to) = peers.into_iter().next() {\n                        effect_builder.enqueue_message(to, message.into()).await;\n                    }\n                }\n                .ignore()\n            }\n            ProtocolOutcome::ScheduleTimer(timestamp, timer_id) => {\n                let timediff = timestamp.saturating_diff(Timestamp::now());\n                effect_builder\n                    .set_timeout(timediff.into())\n                    .event(move |_| Event::Timer {\n                        era_id,\n                        timestamp,\n                        timer_id,\n                    })\n            }\n            ProtocolOutcome::QueueAction(action_id) => effect_builder\n                .immediately()\n                .event(move |()| Event::Action { era_id, action_id }),\n            ProtocolOutcome::CreateNewBlock(block_context, proposal_expiry) => {\n                let signature_rewards_max_delay =\n                    self.chainspec.core_config.signature_rewards_max_delay;\n                let current_block_height = self.proposed_block_height(&block_context, era_id);\n                let minimum_block_height =\n                    current_block_height.saturating_sub(signature_rewards_max_delay);\n\n                let awaitable_appendable_block = effect_builder.request_appendable_block(\n                    block_context.timestamp(),\n                    era_id,\n                    proposal_expiry,\n                );\n                let awaitable_blocks_with_metadata = async move {\n                    effect_builder\n                        .collect_past_blocks_with_metadata(\n                            minimum_block_height..current_block_height,\n                            false,\n                        )\n                        .await\n                };\n                let accusations = self\n                    .iter_past(era_id, PAST_EVIDENCE_ERAS)\n                    .flat_map(|e_id| self.era(e_id).consensus.validators_with_evidence())\n                    .unique()\n                    .filter(|pub_key| !self.era(era_id).faulty.contains(pub_key))\n                    .cloned()\n                    .collect();\n                let random_bit = rng.gen();\n\n                let validator_matrix = self.validator_matrix.clone();\n\n                let delay_by = self.proposal_delay_failpoint.fire(rng).cloned();\n                async move {\n                    if let Some(delay) = delay_by {\n                        effect_builder\n                            .set_timeout(Duration::from_millis(delay))\n                            .await;\n                    }\n                    join_2(awaitable_appendable_block, awaitable_blocks_with_metadata).await\n                }\n                .event(\n                    move |(appendable_block, maybe_past_blocks_with_metadata)| {\n                        let rewarded_signatures = create_rewarded_signatures(\n                            &maybe_past_blocks_with_metadata,\n                            validator_matrix,\n                            &block_context,\n                            signature_rewards_max_delay,\n                        );\n\n                        let block_payload = Arc::new(appendable_block.into_block_payload(\n                            accusations,\n                            rewarded_signatures,\n                            random_bit,\n                        ));\n\n                        Event::NewBlockPayload(NewBlockPayload {\n                            era_id,\n                            block_payload,\n                            block_context,\n                        })\n                    },\n                )\n            }\n            ProtocolOutcome::FinalizedBlock(CpFinalizedBlock {\n                value,\n                timestamp,\n                relative_height,\n                terminal_block_data,\n                equivocators,\n                proposer,\n            }) => {\n                if era_id != current_era {\n                    debug!(era = era_id.value(), \"finalized block in old era\");\n                    return Effects::new();\n                }\n                let era = self.open_eras.get_mut(&era_id).unwrap();\n                era.add_accusations(&equivocators);\n                era.add_accusations(value.accusations());\n                // If this is the era's last block, it contains rewards. Everyone who is accused in\n                // the block or seen as equivocating via the consensus protocol gets faulty.\n\n                // TODO - add support for the `compute_rewards` chainspec parameter coming from\n                // private chain implementation in the 2.0 rewards scheme.\n                let _compute_rewards = self.chainspec.core_config.compute_rewards;\n                let report = terminal_block_data.map(|tbd| {\n                    // If block rewards are disabled, zero them.\n                    // if !compute_rewards {\n                    //     for reward in tbd.rewards.values_mut() {\n                    //         *reward = 0;\n                    //     }\n                    // }\n\n                    InternalEraReport {\n                        equivocators: era.accusations(),\n                        inactive_validators: tbd.inactive_validators,\n                    }\n                });\n                let proposed_block = Arc::try_unwrap(value).unwrap_or_else(|arc| (*arc).clone());\n                let finalized_approvals: HashMap<_, _> =\n                    proposed_block.all_transactions().cloned().collect();\n                if let Some(era_report) = report.as_ref() {\n                    info!(\n                        inactive = %DisplayIter::new(&era_report.inactive_validators),\n                        faulty = %DisplayIter::new(&era_report.equivocators),\n                        era_id = era_id.value(),\n                        \"era end: inactive and faulty validators\"\n                    );\n                }\n                let finalized_block = FinalizedBlock::new(\n                    proposed_block,\n                    report,\n                    timestamp,\n                    era_id,\n                    era.start_height + relative_height,\n                    proposer,\n                );\n                info!(\n                    era_id = finalized_block.era_id.value(),\n                    height = finalized_block.height,\n                    timestamp = %finalized_block.timestamp,\n                    \"finalized block\"\n                );\n                self.metrics.finalized_block(&finalized_block);\n                // Announce the finalized block.\n                let mut effects = effect_builder\n                    .announce_finalized_block(finalized_block.clone())\n                    .ignore();\n                self.next_block_height = self.next_block_height.max(finalized_block.height + 1);\n                // Request execution of the finalized block.\n                effects.extend(\n                    execute_finalized_block(effect_builder, finalized_approvals, finalized_block)\n                        .ignore(),\n                );\n                let effects_from_updating_pause =\n                    self.update_consensus_pause(effect_builder, rng, era_id);\n                effects.extend(effects_from_updating_pause);\n                effects\n            }\n            ProtocolOutcome::ValidateConsensusValue {\n                sender,\n                proposed_block,\n            } => {\n                if era_id.saturating_add(PAST_EVIDENCE_ERAS) < current_era\n                    || !self.open_eras.contains_key(&era_id)\n                {\n                    debug!(%sender, %era_id, \"validate_consensus_value: skipping outdated era\");\n                    return Effects::new(); // Outdated era; we don't need the value anymore.\n                }\n                let missing_evidence: Vec<PublicKey> = proposed_block\n                    .value()\n                    .accusations()\n                    .iter()\n                    .filter(|pub_key| !self.has_evidence(era_id, (*pub_key).clone()))\n                    .cloned()\n                    .collect();\n                self.era_mut(era_id)\n                    .add_block(proposed_block.clone(), missing_evidence.clone());\n                if let Some(transaction_hash) = proposed_block.contains_replay() {\n                    warn!(%sender, %transaction_hash, \"block contains a replayed transaction\");\n                    return self.resolve_validity(\n                        effect_builder,\n                        rng,\n                        ResolveValidity {\n                            era_id,\n                            sender,\n                            proposed_block,\n                            maybe_error: Some(Box::new(\n                                InvalidProposalError::AncestorTransactionReplay {\n                                    replayed_transaction_hash: transaction_hash,\n                                },\n                            )),\n                        },\n                    );\n                }\n                let mut effects = Effects::new();\n                for pub_key in missing_evidence {\n                    let msg = ConsensusMessage::EvidenceRequest { era_id, pub_key };\n                    effects.extend(effect_builder.send_message(sender, msg.into()).ignore());\n                }\n                let proposed_block_height =\n                    self.proposed_block_height(proposed_block.context(), era_id);\n                effects.extend(\n                    async move {\n                        check_txns_for_replay_in_previous_eras_and_validate_block(\n                            effect_builder,\n                            era_id,\n                            proposed_block_height,\n                            sender,\n                            proposed_block,\n                        )\n                        .await\n                    }\n                    .event(std::convert::identity),\n                );\n                effects\n            }\n            ProtocolOutcome::HandledProposedBlock(proposed_block) => effect_builder\n                .announce_proposed_block(proposed_block)\n                .ignore(),\n            ProtocolOutcome::NewEvidence(pub_key) => {\n                info!(%pub_key, era = era_id.value(), \"validator equivocated\");\n                let mut effects = effect_builder\n                    .announce_fault_event(era_id, pub_key.clone(), Timestamp::now())\n                    .ignore();\n                for e_id in self.iter_future(era_id, PAST_EVIDENCE_ERAS) {\n                    let proposed_blocks = if let Some(era) = self.open_eras.get_mut(&e_id) {\n                        era.resolve_evidence_and_mark_faulty(&pub_key)\n                    } else {\n                        continue;\n                    };\n                    for proposed_block in proposed_blocks {\n                        effects.extend(self.delegate_to_era(\n                            effect_builder,\n                            rng,\n                            e_id,\n                            |consensus, _| {\n                                consensus.resolve_validity(proposed_block, true, Timestamp::now())\n                            },\n                        ));\n                    }\n                }\n                effects\n            }\n            ProtocolOutcome::SendEvidence(sender, pub_key) => self\n                .iter_past_other(era_id, PAST_EVIDENCE_ERAS)\n                .flat_map(|e_id| {\n                    self.delegate_to_era(effect_builder, rng, e_id, |consensus, _| {\n                        consensus.send_evidence(sender, &pub_key)\n                    })\n                })\n                .collect(),\n            ProtocolOutcome::WeAreFaulty => Default::default(),\n            ProtocolOutcome::DoppelgangerDetected => Default::default(),\n            ProtocolOutcome::FttExceeded => effect_builder\n                .set_timeout(Duration::from_millis(FTT_EXCEEDED_SHUTDOWN_DELAY_MILLIS))\n                .then(move |_| fatal!(effect_builder, \"too many faulty validators\"))\n                .ignore(),\n        }\n    }\n\n    pub(super) fn status(&self, responder: Responder<Option<ConsensusStatus>>) -> Effects<Event> {\n        let public_key = self.validator_matrix.public_signing_key().clone();\n        let round_length = self\n            .open_eras\n            .values()\n            .last()\n            .and_then(|era| era.consensus.next_round_length());\n        responder\n            .respond(Some(ConsensusStatus::new(public_key, round_length)))\n            .ignore()\n    }\n\n    /// Get a reference to the era supervisor's open eras.\n    pub(crate) fn open_eras(&self) -> &BTreeMap<EraId, Era> {\n        &self.open_eras\n    }\n\n    /// This node's public signing key.\n    pub(crate) fn public_key(&self) -> &PublicKey {\n        self.validator_matrix.public_signing_key()\n    }\n\n    fn proposed_block_height(&self, block_context: &BlockContext<ClContext>, era_id: EraId) -> u64 {\n        let initial_era_height = self.era(era_id).start_height;\n        initial_era_height.saturating_add(block_context.ancestor_values().len() as u64)\n    }\n}\n\n/// A serialized consensus network message.\n///\n/// An entirely transparent newtype around raw bytes. Exists solely to avoid accidental\n/// double-serialization of network messages, or serialization of unsuitable types.\n///\n/// Note that this type fixates the encoding for all consensus implementations to one scheme.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[serde(transparent)]\n#[repr(transparent)]\npub(crate) struct SerializedMessage(Vec<u8>);\n\nimpl SerializedMessage {\n    /// Serialize the given message from a consensus protocol into bytes.\n    ///\n    /// # Panics\n    ///\n    /// Will panic if serialization fails (which must never happen -- ensure types are\n    /// serializable!).\n    pub(crate) fn from_message<T>(msg: &T) -> Self\n    where\n        T: ConsensusNetworkMessage + Serialize,\n    {\n        SerializedMessage(bincode::serialize(msg).expect(\"should serialize message\"))\n    }\n\n    /// Attempt to deserialize a given type from incoming raw bytes.\n    pub(crate) fn deserialize_incoming<T>(&self) -> Result<T, bincode::Error>\n    where\n        T: ConsensusNetworkMessage + DeserializeOwned,\n    {\n        bincode::deserialize(&self.0)\n    }\n\n    /// Returns the inner raw bytes.\n    pub(crate) fn into_raw(self) -> Vec<u8> {\n        self.0\n    }\n\n    /// Returns a reference to the inner raw bytes.\n    pub(crate) fn as_raw(&self) -> &[u8] {\n        &self.0\n    }\n}\n\n#[cfg(test)]\nimpl SerializedMessage {\n    /// Deserializes a message into the given value.\n    ///\n    /// # Panics\n    ///\n    /// Will panic if deserialization fails.\n    #[track_caller]\n    pub(crate) fn deserialize_expect<T>(&self) -> T\n    where\n        T: ConsensusNetworkMessage + DeserializeOwned,\n    {\n        self.deserialize_incoming()\n            .expect(\"could not deserialize valid zug message from serialized message\")\n    }\n}\n\nasync fn get_transactions<REv>(\n    effect_builder: EffectBuilder<REv>,\n    hashes: Vec<TransactionHash>,\n) -> Vec<Transaction>\nwhere\n    REv: From<StorageRequest>,\n{\n    let from_storage = effect_builder.get_transactions_from_storage(hashes).await;\n\n    let mut ret = vec![];\n    for item in from_storage {\n        match item {\n            Some((transaction, Some(approvals))) => {\n                ret.push(transaction.with_approvals(approvals));\n            }\n            Some((transaction, None)) => {\n                ret.push(transaction);\n            }\n            None => continue,\n        }\n    }\n\n    ret\n}\n\nasync fn execute_finalized_block<REv>(\n    effect_builder: EffectBuilder<REv>,\n    finalized_approvals: HashMap<TransactionHash, BTreeSet<Approval>>,\n    finalized_block: FinalizedBlock,\n) where\n    REv: From<StorageRequest> + From<FatalAnnouncement> + From<ContractRuntimeRequest>,\n{\n    for (txn_hash, finalized_approvals) in finalized_approvals {\n        effect_builder\n            .store_finalized_approvals(txn_hash, finalized_approvals)\n            .await;\n    }\n    // Get all transactions in order they appear in the finalized block.\n    let transactions = get_transactions(\n        effect_builder,\n        finalized_block.all_transactions().copied().collect(),\n    )\n    .await;\n\n    let executable_block =\n        ExecutableBlock::from_finalized_block_and_transactions(finalized_block, transactions);\n    effect_builder\n        .enqueue_block_for_execution(executable_block, MetaBlockState::new())\n        .await\n}\n\n/// Computes the instance ID for an era, given the era ID and the chainspec hash.\nfn instance_id(chainspec_hash: Digest, era_id: EraId, key_block_hash: BlockHash) -> Digest {\n    Digest::hash_pair(\n        key_block_hash.inner().value(),\n        Digest::hash_pair(chainspec_hash, era_id.to_le_bytes()).value(),\n    )\n}\n\n/// Checks that a `BlockPayload` does not have transactions we have already included in blocks in\n/// previous eras. This is done by repeatedly querying storage for transaction metadata. When\n/// metadata is found storage is queried again to get the era id for the included transaction. That\n/// era id must *not* be less than the current era, otherwise the transaction is a replay attack.\nasync fn check_txns_for_replay_in_previous_eras_and_validate_block<REv>(\n    effect_builder: EffectBuilder<REv>,\n    proposed_block_era_id: EraId,\n    proposed_block_height: u64,\n    sender: NodeId,\n    proposed_block: ProposedBlock<ClContext>,\n) -> Event\nwhere\n    REv: From<BlockValidationRequest> + From<StorageRequest>,\n{\n    let txns_era_ids = effect_builder\n        .get_transactions_era_ids(\n            proposed_block\n                .value()\n                .all_transactions()\n                .map(|(x, _)| *x)\n                .collect(),\n        )\n        .await;\n\n    for txn_era_id in txns_era_ids {\n        // If the stored transaction was executed in a previous era, it is a replay attack.\n        //\n        // If not, then it might be this is a transaction for a block on which we are currently\n        // coming to consensus, and we will rely on the immediate ancestors of the\n        // block_payload within the current era to determine if we are facing a replay\n        // attack.\n        if txn_era_id < proposed_block_era_id {\n            debug!(%sender, %txn_era_id, %proposed_block_era_id, \"consensus replay detection: transaction from previous era\");\n            return Event::ResolveValidity(ResolveValidity {\n                era_id: proposed_block_era_id,\n                sender,\n                proposed_block: proposed_block.clone(),\n                maybe_error: Some(Box::new(\n                    InvalidProposalError::TransactionReplayPreviousEra {\n                        transaction_era_id: txn_era_id.value(),\n                        proposed_block_era_id: proposed_block_era_id.value(),\n                    },\n                )),\n            });\n        }\n    }\n\n    let sender_for_validate_block: NodeId = sender;\n    let maybe_error = effect_builder\n        .validate_block(\n            sender_for_validate_block,\n            proposed_block_height,\n            proposed_block.clone(),\n        )\n        .await\n        .err();\n\n    Event::ResolveValidity(ResolveValidity {\n        era_id: proposed_block_era_id,\n        sender,\n        proposed_block,\n        maybe_error,\n    })\n}\n\nimpl ProposedBlock<ClContext> {\n    /// If this block contains a transaction that's also present in an ancestor, this returns the\n    /// transaction hash, otherwise `None`.\n    fn contains_replay(&self) -> Option<TransactionHash> {\n        let block_txns_set: BTreeSet<TransactionHash> =\n            self.value().all_transaction_hashes().collect();\n        self.context()\n            .ancestor_values()\n            .iter()\n            .flat_map(|ancestor| ancestor.all_transaction_hashes())\n            .find(|typed_txn_hash| block_txns_set.contains(typed_txn_hash))\n    }\n}\n\n/// When `async move { join!(…) }` is used inline, it prevents rustfmt\n/// to run on the chained `event` block.\nasync fn join_2<T: Future, U: Future>(\n    t: T,\n    u: U,\n) -> (<T as Future>::Output, <U as Future>::Output) {\n    futures::join!(t, u)\n}\n\n// The created RewardedSignatures should contain bit vectors for each of the block for which\n// signatures are being cited. If we are eligible to cite 3 blocks, RewardsSignature will contain an\n// at-most 3 vectors of bit vectors (Vec<Vec<u8>>). With `signature_rewards_max_delay = 3` The logic\n// is - \"we can cite signatures for the blocks parent, parents parent and parents parent parent\".\n// If we are close to genesis, the outer vector will obviously not have 3 entries.\n// (At height 0 there is no parent, at height 1 there is no grandparent etc.)\n// The `rewarded_signatures` vector will look something like:\n//    [[255, 64],[128, 0],[0, 0]]\n// Entries in the outer vec are interpreted as:\n//   - on index 0 - the last finalized block\n//   - on index 1 - the penultimate finalized block\n//   - on index 2 - the penpenultimate finalized block\n//  There are at most `signature_rewards_max_delay` entries in this vector. if we are \"close\" to\n//  genesis there can be less (at height 0 there is no history, so there will be no cited blocks, at\n// height 1 we can only cite signatures from one block etc.)  Each entry in this vector is also a\n// vector of u8 numbers. To interpret them we need to realize that if we concatenate all the bytes\n// of the numbers, the  nth bit will say that the nth validators signature was either cited (if the\n// bit is 1) or not (if the bit is 0).  To figure out which validator is on position n, we need to\n// take all the validators relevant to the era of the  particular block, fetch their public keys and\n// sort them ascending. In the quoted example we see that:  For the parent on the proposed block we\n// cite signatures of validators on position 0, 1, 2, 3, 4, 5, 6, 7 and 9  For the grandparent on\n// the proposed block we cite signatures of validators on position 0  For the grandgrandparent on\n// the proposed block we cite no signatures  Please note that due to using u8 as the \"packing\"\n// mechanism it is possible that the byte vector will have more bits than there are validators - we\n// round  it up to 8 (ceiling(number_of_valuidators/8)), the remaining bits are only used as padding\n// to full bytes.\nfn create_rewarded_signatures(\n    maybe_past_blocks_with_metadata: &[Option<BlockWithMetadata>],\n    validator_matrix: ValidatorMatrix,\n    block_context: &BlockContext<ClContext>,\n    signature_rewards_max_delay: u64,\n) -> RewardedSignatures {\n    let num_ancestor_values = block_context.ancestor_values().len();\n    let mut rewarded_signatures =\n        RewardedSignatures::new(maybe_past_blocks_with_metadata.iter().rev().map(\n            |maybe_past_block_with_metadata| {\n                maybe_past_block_with_metadata\n                    .as_ref()\n                    .and_then(|past_block_with_metadata| {\n                        create_single_block_rewarded_signatures(\n                            &validator_matrix,\n                            past_block_with_metadata,\n                        )\n                    })\n                    .unwrap_or_default()\n            },\n        ));\n\n    // exclude the signatures that were already included in ancestor blocks\n    for (past_index, ancestor_rewarded_signatures) in block_context\n        .ancestor_values()\n        .iter()\n        .map(|value| value.rewarded_signatures().clone())\n        // the above will only cover the signatures from the same era - chain\n        // with signatures from the blocks read from storage\n        .chain(\n            maybe_past_blocks_with_metadata\n                .iter()\n                .rev()\n                // skip the blocks corresponding to heights covered by\n                // ancestor_values\n                .skip(num_ancestor_values)\n                .map(|maybe_past_block| {\n                    maybe_past_block.as_ref().map_or_else(\n                        // if we're missing a block, this could cause us to include duplicate\n                        // signatures and make our proposal invalid - but this is covered by the\n                        // requirement for a validator to have blocks spanning the max deploy TTL\n                        // in the past\n                        Default::default,\n                        |past_block| past_block.block.rewarded_signatures().clone(),\n                    )\n                }),\n        )\n        .enumerate()\n        .take(signature_rewards_max_delay as usize)\n    {\n        rewarded_signatures = rewarded_signatures\n            .difference(&ancestor_rewarded_signatures.left_padded(past_index.saturating_add(1)));\n    }\n\n    rewarded_signatures\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::{BTreeMap, BTreeSet};\n\n    use crate::{\n        consensus::{\n            era_supervisor::create_rewarded_signatures,\n            tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, CAROL_PUBLIC_KEY},\n            BlockContext, ClContext,\n        },\n        types::{BlockWithMetadata, ValidatorMatrix},\n    };\n    use casper_types::{\n        bytesrepr::{Bytes, ToBytes},\n        testing::TestRng,\n        Block, BlockHash, BlockSignatures, BlockSignaturesV2, BlockV2, Digest, EraId,\n        ProtocolVersion, PublicKey, RewardedSignatures, Signature, SingleBlockRewardedSignatures,\n        Timestamp, U512,\n    };\n\n    #[test]\n    fn should_set_first_bit_if_earliest_key_cited() {\n        // The first bit in the bit list should be set to 1 if the \"lowest\" (in the sense of public\n        // key comaparison) public key signature was cited.\n        let mut rng = TestRng::new();\n\n        let mut bs_v2 = BlockSignaturesV2::random(&mut rng);\n        bs_v2.insert_signature(\n            ALICE_PUBLIC_KEY.clone(),\n            Signature::ed25519([44; Signature::ED25519_LENGTH]).unwrap(),\n        );\n        let signatures = build_rewarded_signatures_without_historical_blocks(&mut rng, bs_v2);\n        assert_eq!(\n            signatures.to_bytes().unwrap(),\n            vec![Bytes::from(vec![128_u8])].to_bytes().unwrap()\n        );\n    }\n\n    #[test]\n    fn should_set_third_bit_if_the_first_validator_signature_cited() {\n        // Given there are three validators, if the first (by public key copmparison) validator\n        // signature was cited - the third bit should be set to 1\n        let mut rng = TestRng::new();\n\n        let mut bs_v2 = BlockSignaturesV2::random(&mut rng);\n        bs_v2.insert_signature(\n            BOB_PUBLIC_KEY.clone(),\n            Signature::ed25519([44; Signature::ED25519_LENGTH]).unwrap(),\n        );\n        let signatures = build_rewarded_signatures_without_historical_blocks(&mut rng, bs_v2);\n        assert_eq!(\n            signatures.to_bytes().unwrap(),\n            vec![Bytes::from(vec![32_u8])].to_bytes().unwrap()\n        );\n    }\n\n    #[test]\n    fn should_set_second_bit_if_the_second_validator_signature_cited() {\n        // Given there are three validators, if the second (by public key copmparison) validator\n        // signature was cited - the second bit should be set to 1\n        let mut rng = TestRng::new();\n\n        let mut bs_v2 = BlockSignaturesV2::random(&mut rng);\n        bs_v2.insert_signature(\n            CAROL_PUBLIC_KEY.clone(),\n            Signature::ed25519([44; Signature::ED25519_LENGTH]).unwrap(),\n        );\n        let signatures = build_rewarded_signatures_without_historical_blocks(&mut rng, bs_v2);\n        assert_eq!(\n            signatures.to_bytes().unwrap(),\n            vec![Bytes::from(vec![64_u8])].to_bytes().unwrap()\n        );\n    }\n\n    fn build_rewarded_signatures_without_historical_blocks(\n        rng: &mut TestRng,\n        bs_v2: BlockSignaturesV2,\n    ) -> RewardedSignatures {\n        assert!(*BOB_PUBLIC_KEY > *CAROL_PUBLIC_KEY && *CAROL_PUBLIC_KEY > *ALICE_PUBLIC_KEY);\n        let signatures_1 = BTreeSet::new();\n        let mut validator_public_keys: BTreeMap<PublicKey, U512> = BTreeMap::new();\n        // Making sure that Alice, Bob and Carols keys by stake have different ordering than\n        // by PublicKey\n        validator_public_keys.insert(\n            ALICE_PUBLIC_KEY.clone(),\n            U512::MAX.saturating_sub(100.into()),\n        );\n        validator_public_keys.insert(BOB_PUBLIC_KEY.clone(), 1_u64.into());\n        validator_public_keys.insert(CAROL_PUBLIC_KEY.clone(), U512::MAX);\n\n        let past_rewarded_signatures =\n            RewardedSignatures::new(vec![SingleBlockRewardedSignatures::from_validator_set(\n                &signatures_1,\n                validator_public_keys.keys(),\n            )]);\n\n        let block_v2 = BlockV2::new(\n            BlockHash::random(rng),\n            Digest::random(rng),\n            Digest::random(rng),\n            false,\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            1010,\n            ProtocolVersion::V2_0_0,\n            PublicKey::random(rng),\n            BTreeMap::new(),\n            past_rewarded_signatures,\n            1,\n            None,\n        );\n        let block = Block::V2(block_v2);\n\n        let block_1 = BlockWithMetadata {\n            block,\n            block_signatures: BlockSignatures::V2(bs_v2),\n        };\n        let maybe_past_blocks_with_metadata = vec![Some(block_1)];\n        let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n        validator_matrix.register_validator_weights(EraId::new(1), validator_public_keys);\n        let timestamp = Timestamp::now();\n        let ancestor_values = vec![];\n        let block_context = BlockContext::<ClContext>::new(timestamp, ancestor_values);\n        create_rewarded_signatures(\n            &maybe_past_blocks_with_metadata,\n            validator_matrix,\n            &block_context,\n            1,\n        )\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/error.rs",
    "content": "use thiserror::Error;\n\nuse casper_types::{BlockHeader, EraId};\n\n#[derive(Error, Debug)]\npub enum CreateNewEraError {\n    #[error(\"Attempted to create era with no switch blocks.\")]\n    AttemptedToCreateEraWithNoSwitchBlocks,\n    #[error(\"Attempted to create {era_id} with non-switch block {last_block_header:?}.\")]\n    LastBlockHeaderNotASwitchBlock {\n        era_id: EraId,\n        last_block_header: Box<BlockHeader>,\n    },\n    #[error(\"Attempted to create {era_id} with too few switch blocks {switch_blocks:?}.\")]\n    InsufficientSwitchBlocks {\n        era_id: EraId,\n        switch_blocks: Vec<BlockHeader>,\n    },\n    #[error(\n        \"Attempted to create {era_id} with switch blocks from unexpected eras: {switch_blocks:?}.\"\n    )]\n    WrongSwitchBlockEra {\n        era_id: EraId,\n        switch_blocks: Vec<BlockHeader>,\n    },\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/active_validator.rs",
    "content": "use std::{\n    fmt::{self, Debug},\n    iter,\n};\n\nuse datasize::DataSize;\nuse tracing::{error, info, trace, warn};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse super::{\n    endorsement::{Endorsement, SignedEndorsement},\n    evidence::Evidence,\n    highway::{Ping, ValidVertex, Vertex, WireUnit},\n    state::{self, Panorama, State, Unit},\n    ENABLE_ENDORSEMENTS,\n};\n\nuse crate::components::consensus::{\n    consensus_protocol::BlockContext,\n    highway_core::{highway::SignedWireUnit, state::Fault},\n    traits::{Context, ValidatorSecret},\n    utils::{ValidatorIndex, Weight},\n};\n\n/// An action taken by a validator.\n#[derive(Clone, Eq, PartialEq, Debug)]\npub(crate) enum Effect<C: Context> {\n    /// Newly vertex that should be gossiped to peers and added to the protocol state.\n    NewVertex(ValidVertex<C>),\n    /// `handle_timer` needs to be called at the specified time.\n    ScheduleTimer(Timestamp),\n    /// `propose` needs to be called with a value for a new block with the specified block context\n    /// and parent value.\n    /// The timestamp is the time at which the witness unit will be sent, which will invalidate the\n    /// proposal - so any response to this request has to be received before that time.\n    RequestNewBlock(BlockContext<C>, Timestamp),\n    /// This validator is faulty.\n    ///\n    /// When this is returned, the validator automatically deactivates.\n    WeAreFaulty(Fault<C>),\n}\n\n/// A validator that actively participates in consensus by creating new vertices.\n///\n/// It implements the Highway schedule. The protocol proceeds in rounds, and in each round one\n/// validator is the _leader_.\n/// * In the beginning of the round, the leader sends a _proposal_ unit, containing a consensus\n///   value (i.e. a block).\n/// * Upon receiving the proposal, all the other validators send a _confirmation_ unit, citing only\n///   the proposal, their own previous message, and resulting transitive justifications.\n/// * At a fixed point in time later in the round, everyone unconditionally sends a _witness_ unit,\n///   citing every unit they have received so far.\n///\n/// If the rounds are long enough (i.e. message delivery is fast enough) and there are enough\n/// honest validators, there will be a lot of confirmations for the proposal, and enough witness\n/// units citing all those confirmations, to create a summit and finalize the proposal.\n#[derive(DataSize)]\npub(crate) struct ActiveValidator<C>\nwhere\n    C: Context,\n{\n    /// Our own validator index.\n    vidx: ValidatorIndex,\n    /// The validator's secret signing key.\n    secret: C::ValidatorSecret,\n    /// The next round length.\n    next_round_len: TimeDiff,\n    /// The latest timer we scheduled.\n    next_timer: Timestamp,\n    /// Panorama and context for a block we are about to propose when we get a consensus value.\n    next_proposal: Option<(BlockContext<C>, Panorama<C>)>,\n    /// The target fault tolerance threshold. The validator pauses (i.e. doesn't create new units)\n    /// if not enough validators are online to finalize values at this FTT.\n    target_ftt: Weight,\n    /// If this flag is set we don't create new units and just send pings instead.\n    paused: bool,\n}\n\nimpl<C: Context> Debug for ActiveValidator<C> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        f.debug_struct(\"ActiveValidator\")\n            .field(\"vidx\", &self.vidx)\n            .field(\"next_round_len\", &self.next_round_len)\n            .field(\"next_timer\", &self.next_timer)\n            .field(\"paused\", &self.paused)\n            .finish()\n    }\n}\n\nimpl<C: Context> ActiveValidator<C> {\n    /// Creates a new `ActiveValidator` and the timer effect for the first call.\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn new(\n        vidx: ValidatorIndex,\n        secret: C::ValidatorSecret,\n        current_time: Timestamp,\n        start_time: Timestamp,\n        state: &State<C>,\n        target_ftt: Weight,\n        instance_id: C::InstanceId,\n    ) -> (Self, Vec<Effect<C>>) {\n        let mut av = ActiveValidator {\n            vidx,\n            secret,\n            next_round_len: state.params().init_round_len(),\n            next_timer: state.params().start_timestamp(),\n            next_proposal: None,\n            target_ftt,\n            paused: false,\n        };\n        let mut effects = av.schedule_timer(start_time, state);\n        effects.push(av.send_ping(current_time, instance_id));\n        (av, effects)\n    }\n\n    /// Sets the next round length to the new value.\n    pub(crate) fn set_round_len(&mut self, new_round_len: TimeDiff) {\n        self.next_round_len = new_round_len;\n    }\n\n    /// Sets the pause status: While paused we don't create any new units, just pings.\n    pub(crate) fn set_paused(&mut self, paused: bool) {\n        self.paused = paused\n    }\n\n    /// Returns actions a validator needs to take at the specified `timestamp`, with the given\n    /// protocol `state`.\n    pub(crate) fn handle_timer(\n        &mut self,\n        timestamp: Timestamp,\n        state: &State<C>,\n        instance_id: C::InstanceId,\n    ) -> Vec<Effect<C>> {\n        if self.is_faulty(state) {\n            warn!(\"Creator knows it's faulty. Won't create a message.\");\n            return vec![];\n        }\n        let mut effects = self.schedule_timer(timestamp, state);\n        if self.earliest_unit_time(state) > timestamp {\n            warn!(%timestamp, \"skipping outdated timer event\");\n            return effects;\n        }\n        let r_len = self.round_len(state, timestamp);\n        let r_id = state::round_id(timestamp, r_len);\n        // Only create new units if enough validators are online.\n        if !self.paused && self.enough_validators_online(state, timestamp) {\n            if timestamp == r_id && state.leader(r_id) == self.vidx {\n                let expiry = r_id.saturating_add(self.proposal_request_expiry(r_len));\n                effects.extend(self.request_new_block(state, instance_id, timestamp, expiry));\n                return effects;\n            } else if timestamp == r_id.saturating_add(self.witness_offset(r_len)) {\n                let panorama = self.panorama_at(state, timestamp);\n                if let Some(witness_unit) =\n                    self.new_unit(panorama, timestamp, None, state, instance_id)\n                {\n                    if self\n                        .latest_unit(state)\n                        .is_none_or(|latest_unit| latest_unit.round_id() != r_id)\n                    {\n                        info!(round_id = %r_id, \"sending witness in round with no proposal\");\n                    }\n                    effects.push(Effect::NewVertex(ValidVertex(Vertex::Unit(witness_unit))));\n                    return effects;\n                }\n            }\n        }\n        // We are not creating a new unit. Send a ping once per maximum-length round, to show that\n        // we're online.\n        let one_max_round_ago = timestamp.saturating_sub(state.params().max_round_length());\n        if !state.has_ping(\n            self.vidx,\n            one_max_round_ago.saturating_add(TimeDiff::from_millis(1)),\n        ) {\n            warn!(%timestamp, \"too many validators offline, sending ping\");\n            effects.push(self.send_ping(timestamp, instance_id));\n        }\n        effects\n    }\n\n    /// Creates a Ping vertex.\n    pub(crate) fn send_ping(&self, timestamp: Timestamp, instance_id: C::InstanceId) -> Effect<C> {\n        let ping = Ping::new(self.vidx, timestamp, instance_id, &self.secret);\n        Effect::NewVertex(ValidVertex(Vertex::Ping(ping)))\n    }\n\n    /// Returns whether enough validators are online to finalize values with the target fault\n    /// tolerance threshold, always counting this validator as online.\n    fn enough_validators_online(&self, state: &State<C>, now: Timestamp) -> bool {\n        // We divide before adding, because  total_weight + target_fft  could overflow u64.\n        #[allow(clippy::arithmetic_side_effects)]\n        let target_quorum = state.total_weight() / 2 + self.target_ftt / 2;\n        let online_weight: Weight = state\n            .weights()\n            .enumerate()\n            .filter(|(vidx, _)| {\n                self.vidx == *vidx || (!state.is_faulty(*vidx) && state.is_online(*vidx, now))\n            })\n            .map(|(_, w)| *w)\n            .sum();\n        online_weight > target_quorum\n    }\n\n    /// Returns actions a validator needs to take upon receiving a new unit.\n    pub(crate) fn on_new_unit(\n        &mut self,\n        uhash: &C::Hash,\n        now: Timestamp,\n        state: &State<C>,\n        instance_id: C::InstanceId,\n    ) -> Vec<Effect<C>> {\n        if let Some(fault) = state.maybe_fault(self.vidx) {\n            return vec![Effect::WeAreFaulty(fault.clone())];\n        }\n        let mut effects = vec![];\n        if self.should_send_confirmation(uhash, now, state) {\n            let panorama = state.confirmation_panorama(self.vidx, uhash);\n            if panorama.has_correct() {\n                if let Some(confirmation_unit) =\n                    self.new_unit(panorama, now, None, state, instance_id)\n                {\n                    let vv = ValidVertex(Vertex::Unit(confirmation_unit));\n                    effects.push(Effect::NewVertex(vv));\n                }\n            }\n        };\n        if self.should_endorse(uhash, state) {\n            let endorsement = self.endorse(uhash);\n            effects.extend(vec![Effect::NewVertex(ValidVertex(endorsement))]);\n        }\n        effects\n    }\n\n    /// Returns actions validator needs to take upon receiving a new evidence.\n    /// Endorses all latest units by honest validators that do not mark new perpetrator as faulty\n    /// and cite some new message by that validator.\n    pub(crate) fn on_new_evidence(\n        &mut self,\n        evidence: &Evidence<C>,\n        state: &State<C>,\n    ) -> Vec<Effect<C>> {\n        if !ENABLE_ENDORSEMENTS {\n            return Vec::new();\n        }\n        let vidx = evidence.perpetrator();\n        state\n            .iter_correct_hashes()\n            .filter(|&v| {\n                let unit = state.unit(v);\n                unit.new_hash_obs(state, vidx)\n            })\n            .map(|v| self.endorse(v))\n            .map(|endorsement| Effect::NewVertex(ValidVertex(endorsement)))\n            .collect()\n    }\n\n    /// Returns an effect to request a consensus value for a block to propose.\n    ///\n    /// If we are already waiting for a consensus value, `None` is returned instead.\n    /// If the new value would come after a terminal block, the proposal is made immediately, and\n    /// without a value.\n    fn request_new_block(\n        &mut self,\n        state: &State<C>,\n        instance_id: C::InstanceId,\n        timestamp: Timestamp,\n        expiry: Timestamp,\n    ) -> Option<Effect<C>> {\n        if let Some((prop_context, _)) = self.next_proposal.take() {\n            warn!(?prop_context, \"no proposal received; requesting new one\");\n        }\n        let panorama = self.panorama_at(state, timestamp);\n        let maybe_parent_hash = state.fork_choice(&panorama);\n        // If the parent is a terminal block, just create a unit without a new block.\n        if maybe_parent_hash.is_some_and(|hash| state.is_terminal_block(hash)) {\n            return self\n                .new_unit(panorama, timestamp, None, state, instance_id)\n                .map(|proposal_unit| Effect::NewVertex(ValidVertex(Vertex::Unit(proposal_unit))));\n        }\n        // Otherwise we need to request a new consensus value to propose.\n        let ancestor_values = match maybe_parent_hash {\n            None => vec![],\n            Some(parent_hash) => iter::once(parent_hash)\n                .chain(state.ancestor_hashes(parent_hash))\n                .map(|bhash| state.block(bhash).value.clone())\n                .collect(),\n        };\n        let block_context = BlockContext::new(timestamp, ancestor_values);\n        self.next_proposal = Some((block_context.clone(), panorama));\n        Some(Effect::RequestNewBlock(block_context, expiry))\n    }\n\n    /// Proposes a new block with the given consensus value.\n    pub(crate) fn propose(\n        &mut self,\n        value: C::ConsensusValue,\n        block_context: BlockContext<C>,\n        state: &State<C>,\n        instance_id: C::InstanceId,\n    ) -> Vec<Effect<C>> {\n        let timestamp = block_context.timestamp();\n        let panorama = if let Some((expected_context, panorama)) = self.next_proposal.take() {\n            if expected_context != block_context {\n                warn!(?expected_context, ?block_context, \"unexpected proposal\");\n                return vec![];\n            }\n            panorama\n        } else {\n            warn!(\"unexpected proposal value\");\n            return vec![];\n        };\n        if self.earliest_unit_time(state) > timestamp {\n            warn!(?block_context, \"skipping outdated proposal\");\n            return vec![];\n        }\n        if self.is_faulty(state) {\n            warn!(\"Creator knows it's faulty. Won't create a message.\");\n            return vec![];\n        }\n\n        self.new_unit(panorama, timestamp, Some(value), state, instance_id)\n            .map(|proposal_unit| Effect::NewVertex(ValidVertex(Vertex::Unit(proposal_unit))))\n            .into_iter()\n            .collect()\n    }\n\n    /// Returns whether the incoming message is a proposal that we need to send a confirmation for.\n    fn should_send_confirmation(\n        &self,\n        vhash: &C::Hash,\n        timestamp: Timestamp,\n        state: &State<C>,\n    ) -> bool {\n        let unit = state.unit(vhash);\n        // If it's not a proposal, the sender is faulty, or we are, don't send a confirmation.\n        if unit.creator == self.vidx || self.is_faulty(state) || !state.is_correct_proposal(unit) {\n            return false;\n        }\n        let r_id = state::round_id(timestamp, self.round_len(state, timestamp));\n        if unit.timestamp != r_id {\n            trace!(\n                %unit.timestamp, %r_id,\n                \"not confirming proposal: wrong round\",\n            );\n            return false;\n        }\n        if unit.timestamp > timestamp {\n            error!(\n                %unit.timestamp, %timestamp,\n                \"added a unit with a future timestamp, should never happen\"\n            );\n            return false;\n        }\n        if let Some(unit) = self.latest_unit(state) {\n            if unit.panorama.sees_correct(state, vhash) {\n                error!(%vhash, \"called on_new_unit with already confirmed proposal\");\n                return false; // We already sent a confirmation.\n            }\n        }\n        let earliest_unit_time = self.earliest_unit_time(state);\n        if timestamp < earliest_unit_time {\n            warn!(\n                %earliest_unit_time, %timestamp,\n                \"earliest_unit_time is greater than current time stamp\"\n            );\n            return false;\n        }\n        true\n    }\n\n    /// Returns a new unit with the given data, and the correct sequence number.\n    ///\n    /// Returns `None` if it's not possible to create a valid unit with the given panorama.\n    fn new_unit(\n        &mut self,\n        panorama: Panorama<C>,\n        timestamp: Timestamp,\n        value: Option<C::ConsensusValue>,\n        state: &State<C>,\n        instance_id: C::InstanceId,\n    ) -> Option<SignedWireUnit<C>> {\n        if value.is_none() && !panorama.has_correct() {\n            return None; // Wait for the first proposal before creating a unit without a value.\n        }\n        if let Some((prop_context, _)) = self.next_proposal.take() {\n            warn!(?prop_context, \"canceling proposal due to unit\");\n        }\n        for hash in panorama.iter_correct_hashes() {\n            if timestamp < state.unit(hash).timestamp {\n                error!(\n                    %timestamp, justification_timestamp = %state.unit(hash).timestamp,\n                    \"canceling unit creation because of outdated timestamp\"\n                );\n                return None;\n            }\n        }\n        if panorama[self.vidx] != state.panorama()[self.vidx] {\n            error!(\n                ?panorama,\n                \"panorama for new unit would be equivocation; canceling unit creation\"\n            );\n            return None;\n        }\n        let seq_number = panorama.next_seq_num(state, self.vidx);\n        let endorsed = state.seen_endorsed(&panorama);\n        #[allow(clippy::arithmetic_side_effects)] // min_round_length is guaranteed to be > 0.\n        let round_exp = (self.round_len(state, timestamp) / state.params().min_round_length())\n            .trailing_zeros() as u8;\n        let hwunit = WireUnit {\n            panorama,\n            creator: self.vidx,\n            instance_id,\n            value,\n            seq_number,\n            timestamp,\n            round_exp,\n            endorsed,\n        }\n        .into_hashed();\n        let swunit = SignedWireUnit::new(hwunit, &self.secret);\n        Some(swunit)\n    }\n\n    /// Returns a `ScheduleTimer` effect for the next time we need to be called.\n    ///\n    /// If the time is before the current round's witness unit, schedule the witness unit.\n    /// Otherwise, if we are the next round's leader, schedule the proposal unit.\n    /// Otherwise schedule the next round's witness unit.\n    fn schedule_timer(&mut self, timestamp: Timestamp, state: &State<C>) -> Vec<Effect<C>> {\n        if self.next_timer > timestamp {\n            return Vec::new(); // We already scheduled the next call; nothing to do.\n        }\n        let r_len = self.round_len(state, timestamp);\n        let r_id = state::round_id(timestamp, r_len);\n        self.next_timer = if timestamp < r_id.saturating_add(self.witness_offset(r_len)) {\n            r_id.saturating_add(self.witness_offset(r_len))\n        } else {\n            let next_r_id = r_id.saturating_add(r_len);\n            if state.leader(next_r_id) == self.vidx {\n                next_r_id\n            } else {\n                let next_r_len = self.round_len(state, next_r_id);\n                next_r_id.saturating_add(self.witness_offset(next_r_len))\n            }\n        };\n        vec![Effect::ScheduleTimer(self.next_timer)]\n    }\n\n    /// Returns the earliest timestamp where we can cast our next unit: It can't be earlier than\n    /// our previous unit, and it can't be the third unit in a single round.\n    fn earliest_unit_time(&self, state: &State<C>) -> Timestamp {\n        self.latest_unit(state)\n            .map_or(state.params().start_timestamp(), |unit| {\n                unit.previous().map_or(unit.timestamp, |vh2| {\n                    let unit2 = state.unit(vh2);\n                    unit.timestamp\n                        .max(unit2.round_id().saturating_add(unit2.round_len()))\n                })\n            })\n    }\n\n    /// Returns the most recent unit by this validator.\n    pub(crate) fn latest_unit<'a>(&self, state: &'a State<C>) -> Option<&'a Unit<C>> {\n        state\n            .panorama()\n            .get(self.vidx)?\n            .correct()\n            .map(|vh| state.unit(vh))\n    }\n\n    /// Checks if validator knows it's faulty.\n    fn is_faulty(&self, state: &State<C>) -> bool {\n        state\n            .panorama()\n            .get(self.vidx)\n            .is_some_and(|obs| obs.is_faulty())\n    }\n\n    /// Returns the duration after the beginning of a round when the witness units are sent.\n    #[allow(clippy::arithmetic_side_effects)] // Round length will never be large enough to overflow.\n    fn witness_offset(&self, round_len: TimeDiff) -> TimeDiff {\n        round_len * 2 / 3\n    }\n\n    /// Returns the duration after the beginning of a round during which a response to a proposal\n    /// request has to be returned.\n    #[allow(clippy::arithmetic_side_effects)] // Round length will never be large enough to overflow.\n    fn proposal_request_expiry(&self, round_len: TimeDiff) -> TimeDiff {\n        // The time window is 1/6 of the round length - but no shorter than 500 ms, unless that's\n        // longer than the witness offset, in which case it's just the witness offset.\n        (round_len / 6)\n            .max(TimeDiff::from_millis(500))\n            .min(self.witness_offset(round_len))\n    }\n\n    /// The round length of the round containing `timestamp`.\n    ///\n    /// This returns `self.next_round_len`, if that is a valid round length for a unit cast at\n    /// `timestamp`. Otherwise it returns the round length of our latest unit.\n    fn round_len(&self, state: &State<C>, timestamp: Timestamp) -> TimeDiff {\n        self.latest_unit(state).map_or(self.next_round_len, |unit| {\n            let max_rl = self.next_round_len.max(unit.round_len);\n            if unit.timestamp < state::round_id(timestamp, max_rl) {\n                self.next_round_len\n            } else {\n                unit.round_len\n            }\n        })\n    }\n\n    /// Returns whether we should endorse the `vhash`.\n    ///\n    /// We should endorse unit from honest validator that cites _an_ equivocator\n    /// as honest and it cites some new message by that validator.\n    fn should_endorse(&self, vhash: &C::Hash, state: &State<C>) -> bool {\n        if !ENABLE_ENDORSEMENTS {\n            return false;\n        }\n        let unit = state.unit(vhash);\n        !state.is_faulty(unit.creator)\n            && unit\n                .panorama\n                .enumerate()\n                .any(|(vidx, _)| state.is_faulty(vidx) && unit.new_hash_obs(state, vidx))\n    }\n\n    /// Creates endorsement of the `vhash`.\n    fn endorse(&self, vhash: &C::Hash) -> Vertex<C> {\n        let endorsement = Endorsement::new(*vhash, self.vidx);\n        let signature = self.secret.sign(&endorsement.hash());\n        Vertex::Endorsements(SignedEndorsement::new(endorsement, signature).into())\n    }\n\n    /// Returns a panorama that is valid to use in our own unit at the given timestamp.\n    fn panorama_at(&self, state: &State<C>, timestamp: Timestamp) -> Panorama<C> {\n        // Take the panorama of all units at or before the given timestamp, because it's invalid to\n        // cite units newer than that. This is only relevant if we added units to the state whose\n        // timestamp is newer than the one of the unit we are creating, but it can happen due to\n        // delayed timer events.\n        let past_panorama = state.panorama().cutoff(state, timestamp);\n        state.valid_panorama(self.vidx, past_panorama)\n    }\n\n    /// Returns whether the unit was created by us.\n    pub(crate) fn is_our_unit(&self, wunit: &WireUnit<C>) -> bool {\n        self.vidx == wunit.creator\n    }\n\n    /// Returns whether the incoming vertex was signed by our key even though we don't have it yet.\n    /// This can only happen if another node is running with the same signing key.\n    pub(crate) fn is_doppelganger_vertex(&self, vertex: &Vertex<C>, state: &State<C>) -> bool {\n        match vertex {\n            Vertex::Unit(swunit) => {\n                // If we already have the unit in our local state,\n                // we must have had created it ourselves earlier and it is now gossiped back to us.\n                self.is_our_unit(swunit.wire_unit()) && !state.has_unit(&swunit.hash())\n            }\n            Vertex::Endorsements(endorsements) => {\n                if state::TODO_ENDORSEMENT_EVIDENCE_DISABLED {\n                    return false;\n                }\n                // Check whether the list of endorsements includes one created by a doppelganger.\n                // An endorsement created by a doppelganger cannot be found in the local protocol\n                // state (since we haven't created it ourselves).\n                let is_ours = |(vidx, _): &(ValidatorIndex, _)| vidx == &self.vidx;\n                endorsements.endorsers.iter().any(is_ours)\n                    && !state.has_endorsement(endorsements.unit(), self.vidx)\n            }\n            Vertex::Ping(ping) => {\n                // If we get a ping from ourselves with a later timestamp than the latest one we\n                // know of, another node must be signing with our key.\n                ping.creator() == self.vidx && !state.has_ping(self.vidx, ping.timestamp())\n            }\n            Vertex::Evidence(_) => false,\n        }\n    }\n\n    pub(crate) fn next_round_length(&self) -> TimeDiff {\n        self.next_round_len\n    }\n}\n\n#[cfg(test)]\n#[allow(clippy::arithmetic_side_effects)] // Overflows in tests panic anyway.\nmod tests {\n    use std::{collections::BTreeSet, fmt::Debug};\n\n    use crate::components::consensus::{\n        highway_core::highway_testing::TEST_INSTANCE_ID,\n        utils::{ValidatorMap, Weight},\n    };\n\n    use super::{\n        super::{\n            finality_detector::FinalityDetector,\n            state::{tests::*, State},\n        },\n        Vertex, *,\n    };\n\n    type Eff = Effect<TestContext>;\n\n    impl Eff {\n        fn unwrap_unit(self) -> SignedWireUnit<TestContext> {\n            if let Eff::NewVertex(ValidVertex(Vertex::Unit(swunit))) = self {\n                swunit\n            } else {\n                panic!(\"Unexpected effect: {:?}\", self);\n            }\n        }\n    }\n\n    struct TestState {\n        state: State<TestContext>,\n        instance_id: u64,\n        fd: FinalityDetector<TestContext>,\n        active_validators: ValidatorMap<ActiveValidator<TestContext>>,\n        timers: BTreeSet<(Timestamp, ValidatorIndex)>,\n    }\n\n    impl TestState {\n        fn new(\n            mut state: State<TestContext>,\n            start_time: Timestamp,\n            instance_id: u64,\n            fd: FinalityDetector<TestContext>,\n            validators: Vec<ValidatorIndex>,\n        ) -> Self {\n            let mut timers = BTreeSet::new();\n            let current_round_id = state::round_id(start_time, state.params().init_round_len());\n            let earliest_round_start = if start_time == current_round_id {\n                start_time\n            } else {\n                current_round_id + state.params().init_round_len()\n            };\n            let target_ftt = state.total_weight() / 3;\n            let mut active_validators = Vec::with_capacity(validators.len());\n            for vidx in validators {\n                let secret = TestSecret(vidx.0);\n                let (av, effects) = ActiveValidator::new(\n                    vidx,\n                    secret,\n                    start_time,\n                    start_time,\n                    &state,\n                    target_ftt,\n                    TEST_INSTANCE_ID,\n                );\n\n                let (timestamp, ping) = match &*effects {\n                    [Effect::ScheduleTimer(timestamp), Effect::NewVertex(ValidVertex(Vertex::Ping(ping)))] => {\n                        (*timestamp, ping)\n                    }\n                    other => panic!(\"expected timer and ping effects, got={:?}\", other),\n                };\n\n                state.add_ping(ping.creator(), ping.timestamp());\n\n                if state.leader(earliest_round_start) == vidx {\n                    assert_eq!(\n                        timestamp, earliest_round_start,\n                        \"Invalid initial timer scheduled for {:?}.\",\n                        vidx,\n                    )\n                } else {\n                    let witness_offset = av.witness_offset(state.params().init_round_len());\n                    let witness_timestamp = earliest_round_start + witness_offset;\n                    assert_eq!(\n                        timestamp, witness_timestamp,\n                        \"Invalid initial timer scheduled for {:?}.\",\n                        vidx,\n                    )\n                }\n                timers.insert((timestamp, vidx));\n                active_validators.push(av);\n            }\n\n            TestState {\n                state,\n                instance_id,\n                fd,\n                active_validators: active_validators.into_iter().collect(),\n                timers,\n            }\n        }\n\n        /// Force the validator to handle timer that may not have been scheduled by it.\n        /// Useful for testing.\n        /// Returns effects created when handling the timer.\n        fn handle_timer(\n            &mut self,\n            vidx: ValidatorIndex,\n            timestamp: Timestamp,\n        ) -> Vec<Effect<TestContext>> {\n            // Remove the timer from the queue if it has been scheduled.\n            let _ = self.timers.remove(&(timestamp, vidx));\n            let validator = &mut self.active_validators[vidx];\n            let effects = validator.handle_timer(timestamp, &self.state, self.instance_id);\n            self.schedule_timer(vidx, &effects);\n            self.add_new_unit(&effects);\n            effects\n        }\n\n        /// Propose new consensus value as validator `vidx`.\n        /// Returns effects created when proposing and newly proposed wire unit.\n        fn propose(\n            &mut self,\n            vidx: ValidatorIndex,\n            cv: <TestContext as Context>::ConsensusValue,\n            block_context: BlockContext<TestContext>,\n        ) -> (Vec<Effect<TestContext>>, SignedWireUnit<TestContext>) {\n            let validator = &mut self.active_validators[vidx];\n            let proposal_timestamp = block_context.timestamp();\n            let effects = validator.propose(cv, block_context, &self.state, self.instance_id);\n\n            // Add the new unit to the state.\n            let proposal_wunit = unwrap_single(&effects).unwrap_unit();\n            let prop_hash = proposal_wunit.hash();\n            self.state.add_unit(proposal_wunit.clone()).unwrap();\n            let effects = validator.on_new_unit(\n                &prop_hash,\n                proposal_timestamp + TimeDiff::from_millis(1),\n                &self.state,\n                self.instance_id,\n            );\n            self.schedule_timer(vidx, &effects);\n            (effects, proposal_wunit)\n        }\n\n        /// Handle new unit by validator `vidx`.\n        /// Since all validators use the same state, that unit should be added already. Panics if\n        /// not. Returns effect created when handling new unit.\n        fn handle_new_unit(\n            &mut self,\n            vidx: ValidatorIndex,\n            uhash: &<TestContext as Context>::Hash,\n        ) -> Vec<Effect<TestContext>> {\n            let validator = &mut self.active_validators[vidx];\n            let delivery_timestamp = self.state.unit(uhash).timestamp + TimeDiff::from_millis(1);\n            let effects =\n                validator.on_new_unit(uhash, delivery_timestamp, &self.state, self.instance_id);\n            self.schedule_timer(vidx, &effects);\n            self.add_new_unit(&effects);\n            effects\n        }\n\n        /// Schedules new timers, if any was returned as an effect.\n        fn schedule_timer(&mut self, vidx: ValidatorIndex, effects: &[Effect<TestContext>]) {\n            let new_timestamps: Vec<Timestamp> = effects\n                .iter()\n                .filter_map(|eff| {\n                    if let Effect::ScheduleTimer(timestamp) = eff {\n                        Some(*timestamp)\n                    } else {\n                        None\n                    }\n                })\n                .collect();\n            match *new_timestamps {\n                [] => (),\n                [timestamp] => {\n                    let _ = self.timers.insert((timestamp, vidx));\n                }\n                _ => panic!(\n                    \"Expected at most one timer to be scheduled: {:?}\",\n                    new_timestamps\n                ),\n            }\n        }\n\n        /// Adds new unit, if any, to the state.\n        fn add_new_unit(&mut self, effects: &[Effect<TestContext>]) {\n            let new_units: Vec<_> = effects\n                .iter()\n                .filter_map(|eff| {\n                    if let Effect::NewVertex(ValidVertex(Vertex::Unit(swunit))) = eff {\n                        Some(swunit)\n                    } else {\n                        None\n                    }\n                })\n                .collect();\n            match *new_units {\n                [] => (),\n                [unit] => {\n                    self.state.add_unit(unit.clone()).unwrap();\n                }\n                _ => panic!(\n                    \"Expected at most one timer to be scheduled: {:?}\",\n                    new_units\n                ),\n            }\n        }\n\n        /// Returns hash of the newly finalized unit.\n        fn next_finalized(&mut self) -> Option<&<TestContext as Context>::Hash> {\n            self.fd.next_finalized(&self.state)\n        }\n    }\n\n    fn unwrap_single<T: Debug + Clone>(vec: &[T]) -> T {\n        let mut iter = vec.iter();\n        match (iter.next(), iter.next()) {\n            (None, _) => panic!(\"Unexpected empty vec\"),\n            (Some(t), None) => t.clone(),\n            (Some(t0), Some(t1)) => panic!(\"Expected only one element: {:?}, {:?}\", t0, t1),\n        }\n    }\n\n    #[test]\n    #[allow(clippy::unreadable_literal)] // 0xC0FFEE is more readable than 0x00C0_FFEE.\n    fn active_validator() {\n        let mut test = TestState::new(\n            State::new_test(&[Weight(3), Weight(4)], 0),\n            410.into(),\n            1u64,\n            FinalityDetector::new(Weight(2)),\n            vec![ALICE, BOB],\n        );\n\n        assert!(test.handle_timer(ALICE, 415.into()).is_empty()); // Too early: No new effects.\n\n        // We start at time 410, with round length 16, so the first leader tick is\n        // 416, and the first witness tick 426.\n        // Alice wants to propose a block, and also make her witness unit at 426.\n        let bctx = match &*test.handle_timer(ALICE, 416.into()) {\n            [Eff::ScheduleTimer(timestamp), Eff::RequestNewBlock(bctx, expiry)]\n                if *timestamp == 426.into() && *expiry == 426.into() =>\n            {\n                bctx.clone()\n            }\n            effects => panic!(\"unexpected effects {:?}\", effects),\n        };\n        assert_eq!(\n            Timestamp::from(416),\n            bctx.timestamp(),\n            \"Proposal should be scheduled for the expected timestamp.\"\n        );\n\n        // She has a pending deploy from Colin who wants to pay for a hot beverage.\n        let (effects, new_unit) = test.propose(ALICE, 0xC0FFEE, bctx);\n        assert!(\n            effects.is_empty(),\n            \"No effects by creator after proposing a unit.\"\n        );\n\n        // Bob creates a confirmation unit for Alice's proposal.\n        let effects = test.handle_new_unit(BOB, &new_unit.hash());\n        // Validate that `effects` contain only one new unit – that is Bob's confirmation of Alice's\n        // vote.\n        let _ = unwrap_single(&effects).unwrap_unit();\n\n        // Bob creates his witness message 2/3 through the round.\n        let mut effects = test.handle_timer(BOB, 426.into()).into_iter();\n        assert_eq!(Some(Eff::ScheduleTimer(432.into())), effects.next()); // Bob is the next leader.\n        let _ = effects.next().unwrap().unwrap_unit();\n        assert_eq!(None, effects.next());\n\n        // Alice has not witnessed Bob's unit yet.\n        assert_eq!(None, test.next_finalized());\n\n        // Alice also sends her own witness message, completing the summit for her proposal.\n        let mut effects = test.handle_timer(ALICE, 426.into()).into_iter();\n        assert_eq!(Some(Eff::ScheduleTimer(442.into())), effects.next()); // Timer for witness unit.\n        let _ = effects.next().unwrap().unwrap_unit();\n        assert_eq!(None, effects.next());\n\n        // Payment finalized! \"One Pumpkin Spice Mochaccino for Corbyn!\"\n        assert_eq!(Some(&new_unit.hash()), test.next_finalized());\n    }\n\n    #[test]\n    fn ping_on_startup() {\n        let state = State::new_test(&[Weight(3)], 0);\n        let (_alice, init_effects) = ActiveValidator::new(\n            ALICE,\n            TestSecret(ALICE.0),\n            410.into(),\n            410.into(),\n            &state,\n            Weight(2),\n            TEST_INSTANCE_ID,\n        );\n\n        match &*init_effects {\n            &[Effect::ScheduleTimer(_), Effect::NewVertex(ValidVertex(Vertex::Ping(_)))] => {}\n            other => panic!(\n                \"expected two effects on startup: timer and ping. Got {:?}\",\n                other\n            ),\n        }\n    }\n\n    #[test]\n    fn detects_doppelganger_ping() {\n        let mut state = State::new_test(&[Weight(3)], 0);\n        let (active_validator, _init_effects) = ActiveValidator::new(\n            ALICE,\n            ALICE_SEC.clone(),\n            410.into(),\n            410.into(),\n            &state,\n            Weight(2),\n            TEST_INSTANCE_ID,\n        );\n\n        let ping = Vertex::Ping(Ping::new(ALICE, 500.into(), TEST_INSTANCE_ID, &ALICE_SEC));\n\n        // The ping is suspicious if it is newer than the latest ping (or unit) that has been added\n        // to the state.\n        assert!(active_validator.is_doppelganger_vertex(&ping, &state));\n        state.add_ping(ALICE, 499.into());\n        assert!(active_validator.is_doppelganger_vertex(&ping, &state));\n        state.add_ping(ALICE, 500.into());\n        assert!(!active_validator.is_doppelganger_vertex(&ping, &state));\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/endorsement.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\n\nuse crate::components::consensus::{traits::Context, utils::ValidatorIndex};\n\n/// An error due to an invalid endorsement.\n#[derive(Debug, Error, Eq, PartialEq)]\npub(crate) enum EndorsementError {\n    #[error(\"The creator is not a validator.\")]\n    Creator,\n    #[error(\"The creator is banned.\")]\n    Banned,\n    #[error(\"The signature is invalid.\")]\n    Signature,\n    #[error(\"The list of endorsements is empty.\")]\n    Empty,\n}\n\n/// Testimony that creator of `unit` was seen honest\n/// by `endorser` at the moment of creating this endorsement.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub struct Endorsement<C>\nwhere\n    C: Context,\n{\n    /// Unit being endorsed.\n    unit: C::Hash,\n    /// The validator who created and sent this endorsement.\n    creator: ValidatorIndex,\n}\n\nimpl<C: Context> Endorsement<C> {\n    pub(crate) fn new(vhash: C::Hash, creator: ValidatorIndex) -> Self {\n        Endorsement {\n            unit: vhash,\n            creator,\n        }\n    }\n\n    /// Returns the hash of the endorsement.\n    pub fn hash(&self) -> C::Hash {\n        <C as Context>::hash(\n            &bincode::serialize(&(self.unit, self.creator)).expect(\"serialize endorsement\"),\n        )\n    }\n}\n\nmod specimen_support {\n    use crate::{\n        components::consensus::ClContext,\n        utils::specimen::{Cache, LargestSpecimen, SizeEstimator},\n    };\n\n    use super::Endorsement;\n\n    impl LargestSpecimen for Endorsement<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            Endorsement {\n                unit: LargestSpecimen::largest_specimen(estimator, cache),\n                creator: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n}\n\n/// Testimony that creator of `unit` was seen honest\n/// by `endorser` at the moment of creating this endorsement.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[serde(bound(\n    serialize = \"C::Signature: Serialize\",\n    deserialize = \"C::Signature: Deserialize<'de>\",\n))]\npub struct SignedEndorsement<C>\nwhere\n    C: Context,\n{\n    /// Original endorsement,\n    endorsement: Endorsement<C>,\n    /// Original signature.\n    signature: C::Signature,\n}\n\nimpl<C: Context> SignedEndorsement<C> {\n    pub fn new(endorsement: Endorsement<C>, signature: C::Signature) -> Self {\n        SignedEndorsement {\n            endorsement,\n            signature,\n        }\n    }\n\n    /// Returns the unit being endorsed.\n    pub fn unit(&self) -> &C::Hash {\n        &self.endorsement.unit\n    }\n\n    /// Returns the creator of the endorsement.\n    pub fn validator_idx(&self) -> ValidatorIndex {\n        self.endorsement.creator\n    }\n\n    /// Returns the signature of the endorsement.\n    pub fn signature(&self) -> &C::Signature {\n        &self.signature\n    }\n\n    /// Returns the hash of the endorsement.\n    pub fn hash(&self) -> C::Hash {\n        self.endorsement.hash()\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/evidence.rs",
    "content": "use std::iter;\n\nuse itertools::Itertools;\nuse thiserror::Error;\n\nuse crate::components::consensus::{\n    highway_core::{highway::SignedWireUnit, state::Params},\n    traits::Context,\n    utils::{ValidatorIndex, Validators},\n};\n\n/// An error due to invalid evidence.\n#[derive(Debug, Error, Eq, PartialEq)]\npub enum EvidenceError {\n    #[error(\"The sequence numbers in the equivocating units are different.\")]\n    EquivocationDifferentSeqNumbers,\n    #[error(\"The creators in the equivocating units are different.\")]\n    EquivocationDifferentCreators,\n    #[error(\"The units were created for a different instance ID.\")]\n    EquivocationInstanceId,\n    #[error(\"The two units are equal.\")]\n    EquivocationSameUnit,\n    #[error(\"The endorsements don't match the unit hashes.\")]\n    EndorsementWrongHash,\n    #[error(\"The creators of the conflicting endorsements are different.\")]\n    EndorsementDifferentCreators,\n    #[error(\"The swimlane is not a contiguous sequence of units.\")]\n    EndorsementInvalidSwimlane,\n    #[error(\"Includes more units than allowed.\")]\n    EndorsementTooManyUnits,\n    #[error(\"The perpetrator is not a validator.\")]\n    UnknownPerpetrator,\n    #[error(\"The signature is invalid.\")]\n    Signature,\n}\n\n#[allow(clippy::arithmetic_side_effects)]\npub mod relaxed {\n    // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the\n    // module-wide `clippy::arithmetic_side_effects` lint.\n\n    use datasize::DataSize;\n    use serde::{Deserialize, Serialize};\n    use strum::EnumDiscriminants;\n\n    use crate::components::consensus::{\n        highway_core::{endorsement::SignedEndorsement, highway::SignedWireUnit},\n        traits::Context,\n    };\n\n    /// Evidence that a validator is faulty.\n    #[derive(\n        Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash, EnumDiscriminants,\n    )]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub enum Evidence<C>\n    where\n        C: Context,\n    {\n        /// The validator produced two units with the same sequence number.\n        Equivocation(SignedWireUnit<C>, SignedWireUnit<C>),\n        /// The validator endorsed two conflicting units.\n        Endorsements {\n            /// The endorsement for `unit1`.\n            endorsement1: SignedEndorsement<C>,\n            /// The unit with the lower (or equal) sequence number.\n            unit1: SignedWireUnit<C>,\n            /// The endorsement for `unit2`, by the same creator as endorsement1.\n            endorsement2: SignedEndorsement<C>,\n            /// The unit with the higher (or equal) sequence number, on a conflicting fork of the\n            /// same creator as `unit1`.\n            unit2: SignedWireUnit<C>,\n            /// The predecessors of `unit2`, back to the same sequence number as `unit1`, in\n            /// reverse chronological order.\n            swimlane2: Vec<SignedWireUnit<C>>,\n        },\n    }\n}\npub use relaxed::{Evidence, EvidenceDiscriminants};\n\nimpl<C: Context> Evidence<C> {\n    /// Returns the ID of the faulty validator.\n    pub fn perpetrator(&self) -> ValidatorIndex {\n        match self {\n            Evidence::Equivocation(unit1, _) => unit1.wire_unit().creator,\n            Evidence::Endorsements { endorsement1, .. } => endorsement1.validator_idx(),\n        }\n    }\n\n    /// Validates the evidence and returns `Ok(())` if it is valid.\n    /// \"Validation\" can mean different things for different type of evidence.\n    ///\n    /// - For an equivocation, it checks whether the creators, sequence numbers and instance IDs of\n    ///   the two units are the same.\n    pub fn validate(\n        &self,\n        validators: &Validators<C::ValidatorId>,\n        instance_id: &C::InstanceId,\n        params: &Params,\n    ) -> Result<(), EvidenceError> {\n        match self {\n            Evidence::Equivocation(unit1, unit2) => {\n                Self::validate_equivocation(unit1, unit2, instance_id, validators)\n            }\n            Evidence::Endorsements {\n                endorsement1,\n                unit1,\n                endorsement2,\n                unit2,\n                swimlane2,\n            } => {\n                if swimlane2.len() as u64 > params.endorsement_evidence_limit() {\n                    return Err(EvidenceError::EndorsementTooManyUnits);\n                }\n                let v_id = validators\n                    .id(endorsement1.validator_idx())\n                    .ok_or(EvidenceError::UnknownPerpetrator)?;\n                if *endorsement1.unit() != unit1.hash() || *endorsement2.unit() != unit2.hash() {\n                    return Err(EvidenceError::EndorsementWrongHash);\n                }\n                if endorsement1.validator_idx() != endorsement2.validator_idx() {\n                    return Err(EvidenceError::EndorsementDifferentCreators);\n                }\n                for (unit, pred) in iter::once(unit2).chain(swimlane2).tuple_windows() {\n                    if unit.wire_unit().previous() != Some(&pred.hash()) {\n                        return Err(EvidenceError::EndorsementInvalidSwimlane);\n                    }\n                }\n                Self::validate_equivocation(\n                    unit1,\n                    swimlane2.last().unwrap_or(unit2),\n                    instance_id,\n                    validators,\n                )?;\n                if !C::verify_signature(&endorsement1.hash(), v_id, endorsement1.signature())\n                    || !C::verify_signature(&endorsement2.hash(), v_id, endorsement2.signature())\n                {\n                    return Err(EvidenceError::Signature);\n                }\n                Ok(())\n            }\n        }\n    }\n\n    fn validate_equivocation(\n        unit1: &SignedWireUnit<C>,\n        unit2: &SignedWireUnit<C>,\n        instance_id: &C::InstanceId,\n        validators: &Validators<C::ValidatorId>,\n    ) -> Result<(), EvidenceError> {\n        let wunit1 = unit1.wire_unit();\n        let wunit2 = unit2.wire_unit();\n        let v_id = validators\n            .id(wunit1.creator)\n            .ok_or(EvidenceError::UnknownPerpetrator)?;\n        if wunit1.creator != wunit2.creator {\n            return Err(EvidenceError::EquivocationDifferentCreators);\n        }\n        if wunit1.seq_number != wunit2.seq_number {\n            return Err(EvidenceError::EquivocationDifferentSeqNumbers);\n        }\n        if wunit1.instance_id != *instance_id || wunit2.instance_id != *instance_id {\n            return Err(EvidenceError::EquivocationInstanceId);\n        }\n        if unit1 == unit2 {\n            return Err(EvidenceError::EquivocationSameUnit);\n        }\n        if !C::verify_signature(&unit1.hash(), v_id, &unit1.signature)\n            || !C::verify_signature(&unit2.hash(), v_id, &unit2.signature)\n        {\n            return Err(EvidenceError::Signature);\n        }\n        Ok(())\n    }\n}\n\nmod specimen_support {\n\n    use crate::{\n        components::consensus::ClContext,\n        utils::specimen::{\n            estimator_max_rounds_per_era, largest_variant, vec_of_largest_specimen, Cache,\n            LargestSpecimen, SizeEstimator,\n        },\n    };\n\n    use super::{Evidence, EvidenceDiscriminants};\n\n    impl LargestSpecimen for Evidence<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, EvidenceDiscriminants, _, _>(estimator, |variant| match variant\n            {\n                EvidenceDiscriminants::Equivocation => Evidence::Equivocation(\n                    LargestSpecimen::largest_specimen(estimator, cache),\n                    LargestSpecimen::largest_specimen(estimator, cache),\n                ),\n                EvidenceDiscriminants::Endorsements => {\n                    if estimator.parameter_bool(\"endorsements_enabled\") {\n                        Evidence::Endorsements {\n                            endorsement1: LargestSpecimen::largest_specimen(estimator, cache),\n                            unit1: LargestSpecimen::largest_specimen(estimator, cache),\n                            endorsement2: LargestSpecimen::largest_specimen(estimator, cache),\n                            unit2: LargestSpecimen::largest_specimen(estimator, cache),\n                            swimlane2: vec_of_largest_specimen(\n                                estimator,\n                                estimator_max_rounds_per_era(estimator),\n                                cache,\n                            ),\n                        }\n                    } else {\n                        Evidence::Equivocation(\n                            LargestSpecimen::largest_specimen(estimator, cache),\n                            LargestSpecimen::largest_specimen(estimator, cache),\n                        )\n                    }\n                }\n            })\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/finality_detector/horizon.rs",
    "content": "use std::collections::BTreeSet;\n\nuse crate::components::consensus::{\n    highway_core::state::{State, Unit},\n    traits::Context,\n    utils::{ValidatorIndex, ValidatorMap, Weight},\n};\n\ntype Committee = Vec<ValidatorIndex>;\n\n/// A list containing the earliest level-n messages of each member of some committee, for some n.\n///\n/// A summit is a sequence of committees of validators, where each member of the level-n committee\n/// has produced a unit that can see level-(n-1) units by a quorum of the level-n committee.\n///\n/// The level-n horizon maps each validator of a level-n committee to their earliest level-n unit.\n/// From a level-n horizon, the level-(n+1) committee and horizon can be computed.\n#[derive(Debug)]\npub(super) struct Horizon<'a, C: Context> {\n    /// Assigns to each member of a committee the sequence number of the earliest message that\n    /// qualifies them for that committee.\n    sequence_numbers: ValidatorMap<Option<u64>>,\n    /// A reference to the protocol state this horizon belongs to.\n    state: &'a State<C>,\n    // The latest units that are eligible for the summit.\n    latest: &'a ValidatorMap<Option<&'a C::Hash>>,\n}\n\nimpl<'a, C: Context> Horizon<'a, C> {\n    /// Creates a horizon assigning to each validator their level-0 unit, i.e. the oldest unit in\n    /// their current streak of units for `candidate` (and descendants), or `None` if their latest\n    /// unit is not for `candidate`.\n    pub(super) fn level0(\n        candidate: &'a C::Hash,\n        state: &'a State<C>,\n        latest: &'a ValidatorMap<Option<&'a C::Hash>>,\n    ) -> Self {\n        let height = state.block(candidate).height;\n        let to_lvl0unit = |&maybe_vhash: &Option<&'a C::Hash>| {\n            state\n                .swimlane(maybe_vhash?)\n                .take_while(|(_, unit)| {\n                    state.find_ancestor_proposal(&unit.block, height) == Some(candidate)\n                })\n                .last()\n                .map(|(_, unit)| unit.seq_number)\n        };\n        Horizon {\n            sequence_numbers: latest.iter().map(to_lvl0unit).collect(),\n            state,\n            latest,\n        }\n    }\n\n    /// Returns a horizon `s` of units each of which can see a quorum of units in `self` by\n    /// validators that are part of `s`.\n    pub(super) fn next(&self, quorum: Weight) -> Option<Self> {\n        let (committee, _pruned) =\n            self.prune_committee(quorum, self.sequence_numbers.keys_some().collect());\n        if committee.is_empty() {\n            None\n        } else {\n            Some(self.next_from_committee(quorum, &committee))\n        }\n    }\n\n    /// Returns the greatest subset of the `committee` of validators whose latest units can see a\n    /// quorum of units by the subset in `self`.\n    ///\n    /// The first returned value is the pruned committee, the second one are the validators that\n    /// were pruned.\n    ///\n    /// Panics if a member of the committee is not in `self.latest`. This can never happen if the\n    /// committee was computed from a `Horizon` that originated from the same `level0` as this one.\n    pub(super) fn prune_committee(\n        &self,\n        quorum: Weight,\n        mut committee: Committee,\n    ) -> (Committee, BTreeSet<ValidatorIndex>) {\n        let mut pruned = BTreeSet::new();\n        loop {\n            let sees_quorum = |idx: &ValidatorIndex| {\n                self.seen_weight(self.state.unit(self.latest[*idx].unwrap()), &committee) >= quorum\n            };\n            let (new_committee, new_pruned): (Vec<_>, Vec<_>) =\n                committee.iter().cloned().partition(sees_quorum);\n            if new_pruned.is_empty() {\n                return (new_committee, pruned);\n            }\n            pruned.extend(new_pruned);\n            committee = new_committee;\n        }\n    }\n\n    /// The maximal quorum for which this is a committee, i.e. the minimum seen weight of the\n    /// members.\n    ///\n    /// Panics if a member of the committee is not in `self.latest`. This can never happen if the\n    /// committee was computed from a `Horizon` that originated from the same `level0` as this one.\n    #[allow(dead_code)]\n    pub(super) fn committee_quorum(&self, committee: &[ValidatorIndex]) -> Option<Weight> {\n        let seen_weight = |idx: &ValidatorIndex| {\n            self.seen_weight(self.state.unit(self.latest[*idx].unwrap()), committee)\n        };\n        committee.iter().map(seen_weight).min()\n    }\n\n    /// Returns the horizon containing the earliest unit of each of the `committee` members that\n    /// can see a quorum of units by `committee` members in `self`.\n    fn next_from_committee(&self, quorum: Weight, committee: &[ValidatorIndex]) -> Self {\n        let find_first_lvl_n = |idx: &ValidatorIndex| {\n            self.state\n                .swimlane(self.latest[*idx]?)\n                .take_while(|(_, unit)| self.seen_weight(unit, committee) >= quorum)\n                .last()\n                .map(|(_, unit)| (*idx, unit.seq_number))\n        };\n        let mut sequence_numbers = ValidatorMap::from(vec![None; self.latest.len()]);\n        for (vidx, sn) in committee.iter().flat_map(find_first_lvl_n) {\n            sequence_numbers[vidx] = Some(sn);\n        }\n        Horizon {\n            sequence_numbers,\n            state: self.state,\n            latest: self.latest,\n        }\n    }\n\n    /// Returns the total weight of the `committee`'s members whose message in this horizon is seen\n    /// by `unit`.\n    fn seen_weight(&self, unit: &Unit<C>, committee: &[ValidatorIndex]) -> Weight {\n        let to_weight = |&idx: &ValidatorIndex| self.state.weight(idx);\n        let is_seen = |&&idx: &&ValidatorIndex| self.can_see(unit, idx);\n        committee.iter().filter(is_seen).map(to_weight).sum()\n    }\n\n    /// Returns whether `unit` can see `idx`'s unit in `self`, where `unit` is considered to see\n    /// itself.\n    fn can_see(&self, unit: &Unit<C>, idx: ValidatorIndex) -> bool {\n        self.sequence_numbers[idx].is_some_and(|self_sn| {\n            if unit.creator == idx {\n                unit.seq_number >= self_sn\n            } else {\n                let sees_self_sn = |vhash| self.state.unit(vhash).seq_number >= self_sn;\n                unit.panorama[idx].correct().is_some_and(sees_self_sn)\n            }\n        })\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/finality_detector.rs",
    "content": "//! Functions for detecting finality of proposed blocks and calculating rewards.\n\nmod horizon;\n\nuse std::iter;\n\nuse datasize::DataSize;\nuse tracing::{trace, warn};\n\nuse casper_types::Timestamp;\n\nuse crate::components::consensus::{\n    consensus_protocol::{FinalizedBlock, TerminalBlockData},\n    highway_core::{\n        highway::Highway,\n        state::{Observation, State, Unit},\n    },\n    traits::Context,\n    utils::{ValidatorIndex, Weight},\n};\nuse horizon::Horizon;\n\n/// An error returned if the configured fault tolerance has been exceeded.\n#[derive(Debug)]\npub(crate) struct FttExceeded(pub Weight);\n\n/// An incremental finality detector.\n///\n/// It reuses information between subsequent calls, so it must always be applied to the same\n/// `State` instance: Later calls of `run` must see the same or a superset of the previous state.\n#[derive(Debug, DataSize)]\npub(crate) struct FinalityDetector<C>\nwhere\n    C: Context,\n{\n    /// The most recent known finalized block.\n    last_finalized: Option<C::Hash>,\n    /// The fault tolerance threshold.\n    ftt: Weight,\n}\n\nimpl<C: Context> FinalityDetector<C> {\n    pub(crate) fn new(ftt: Weight) -> Self {\n        assert!(ftt > Weight(0), \"finality threshold must not be zero\");\n        FinalityDetector {\n            last_finalized: None,\n            ftt,\n        }\n    }\n\n    /// Returns all blocks that have been finalized since the last call.\n    pub(crate) fn run<'a>(\n        &'a mut self,\n        highway: &'a Highway<C>,\n    ) -> Result<impl Iterator<Item = FinalizedBlock<C>> + 'a, FttExceeded> {\n        let state = highway.state();\n        let fault_w = state.faulty_weight();\n        // TODO - remove `allow` once false positive ceases.\n        #[allow(clippy::arithmetic_side_effects)] // False positive on `/ 2`.\n        if fault_w >= self.ftt || fault_w > (state.total_weight().saturating_sub(Weight(1))) / 2 {\n            warn!(panorama = ?state.panorama(), \"fault tolerance threshold exceeded\");\n            return Err(FttExceeded(fault_w));\n        }\n        Ok(iter::from_fn(move || {\n            let bhash = self.next_finalized(state)?;\n            // Safe to unwrap: Index exists, since we have units from them.\n            let to_id = |vidx: ValidatorIndex| highway.validators().id(vidx).unwrap().clone();\n            let block = state.block(bhash);\n            let unit = state.unit(bhash);\n            let terminal_block_data = state\n                .is_terminal_block(bhash)\n                .then(|| Self::create_terminal_block_data(bhash, unit, highway));\n            let finalized_block = FinalizedBlock {\n                value: block.value.clone(),\n                timestamp: unit.timestamp,\n                relative_height: block.height,\n                terminal_block_data,\n                equivocators: unit.panorama.iter_faulty().map(to_id).collect(),\n                proposer: to_id(unit.creator),\n            };\n            trace!(panorama = ?state.panorama(), ?finalized_block, \"finality detected\");\n            Some(finalized_block)\n        }))\n    }\n\n    /// Returns the next block, if any has been finalized since the last call.\n    pub(super) fn next_finalized<'a>(&mut self, state: &'a State<C>) -> Option<&'a C::Hash> {\n        let start_time = Timestamp::now();\n        let candidate = self.next_candidate(state)?;\n        // For `lvl` → ∞, the quorum converges to a fixed value. After level 63, it is closer\n        // to that limit than 1/2^-63. This won't make a difference in practice, so there is no\n        // point looking for higher summits.\n        let mut target_lvl = 63;\n        while target_lvl > 0 {\n            trace!(%target_lvl, \"looking for summit\");\n            let lvl = self.find_summit(target_lvl, candidate, state);\n            if lvl == target_lvl {\n                self.last_finalized = Some(*candidate);\n                let elapsed = start_time.elapsed();\n                trace!(%elapsed, \"found finalized block\");\n                return Some(candidate);\n            }\n            // The required quorum increases with decreasing level, so choosing `target_lvl`\n            // greater than `lvl` would always yield a summit of level `lvl` or lower.\n            target_lvl = lvl;\n        }\n        let elapsed = start_time.elapsed();\n        trace!(%elapsed, \"found no finalized block\");\n        None\n    }\n\n    /// Returns the number of levels of the highest summit with a quorum that a `target_lvl` summit\n    /// would need for the desired FTT. If the returned number is `target_lvl` that means the\n    /// `candidate` is finalized. If not, we need to retry with a lower `target_lvl`.\n    pub(crate) fn find_summit(\n        &self,\n        target_lvl: usize,\n        candidate: &C::Hash,\n        state: &State<C>,\n    ) -> usize {\n        let total_w = state.total_weight();\n        let quorum = self.quorum_for_lvl(target_lvl, total_w);\n        let latest = state.panorama().iter().map(Observation::correct).collect();\n        let sec0 = Horizon::level0(candidate, state, &latest);\n        let horizons_iter = iter::successors(Some(sec0), |sec| sec.next(quorum));\n        horizons_iter.skip(1).take(target_lvl).count()\n    }\n\n    /// Returns the quorum required by a summit with the specified level and the required FTT.\n    #[allow(clippy::arithmetic_side_effects)] // See comments.\n    fn quorum_for_lvl(&self, lvl: usize, total_w: Weight) -> Weight {\n        // A level-lvl summit with quorum  total_w/2 + t  has relative FTT  2t(1 − 1/2^lvl). So:\n        // quorum = total_w / 2 + ftt / 2 / (1 - 1/2^lvl)\n        //        = total_w / 2 + 2^lvl * ftt / 2 / (2^lvl - 1)\n        //        = ((2^lvl - 1) total_w + 2^lvl ftt) / (2 * 2^lvl - 2))\n        // Levels higher than 63 have negligible effect. With 63, this can't overflow.\n        let pow_lvl = 1u128 << lvl.min(63);\n        // Since  pow_lvl <= 2^63,  we have  numerator < (2^64 - 1) * 2^64.\n        // It is safe to subtract because  pow_lvl > 0.\n        let numerator = (pow_lvl - 1) * u128::from(total_w) + pow_lvl * u128::from(self.ftt);\n        // And  denominator < 2^64,  so  numerator + denominator < 2^128.\n        let denominator = 2 * pow_lvl - 2;\n        // The numerator is positive because  ftt > 0.\n        // Since this is a lower bound for the quorum, we round up when dividing.\n        Weight(\n            numerator\n                .div_ceil(denominator)\n                .try_into()\n                .expect(\"quorum overflow\"),\n        )\n    }\n\n    /// Returns the next candidate for finalization, i.e. the lowest block in the fork choice that\n    /// has not been finalized yet.\n    fn next_candidate<'a>(&self, state: &'a State<C>) -> Option<&'a C::Hash> {\n        let fork_choice = state.fork_choice(state.panorama())?;\n        state.find_ancestor_proposal(fork_choice, self.next_height(state))\n    }\n\n    /// Returns the height of the next block that will be finalized.\n    fn next_height(&self, state: &State<C>) -> u64 {\n        // In a trillion years, we need to make block height u128.\n        #[allow(clippy::arithmetic_side_effects)]\n        let height_plus_1 = |bhash| state.block(bhash).height + 1;\n        self.last_finalized.as_ref().map_or(0, height_plus_1)\n    }\n\n    /// Returns the hash of the last finalized block (if any).\n    pub(crate) fn last_finalized(&self) -> Option<&C::Hash> {\n        self.last_finalized.as_ref()\n    }\n\n    /// Returns the configured fault tolerance threshold of this detector.\n    pub(crate) fn fault_tolerance_threshold(&self) -> Weight {\n        self.ftt\n    }\n\n    /// Creates the information for the terminal block: which validators were inactive, and how\n    /// rewards should be distributed.\n    fn create_terminal_block_data(\n        bhash: &C::Hash,\n        unit: &Unit<C>,\n        highway: &Highway<C>,\n    ) -> TerminalBlockData<C> {\n        // Safe to unwrap: Index exists, since we have units from them.\n        let to_id = |vidx: ValidatorIndex| highway.validators().id(vidx).unwrap().clone();\n        let state = highway.state();\n\n        // Report inactive validators, but only if they had sufficient time to create a unit, i.e.\n        // if at least one maximum-length round passed between the first and last block.\n        // Safe to unwrap: Ancestor at height 0 always exists.\n        let first_bhash = state.find_ancestor_proposal(bhash, 0).unwrap();\n        let sufficient_time_for_activity = unit.timestamp\n            >= state\n                .unit(first_bhash)\n                .timestamp\n                .saturating_add(state.params().max_round_length());\n        let inactive_validators = if sufficient_time_for_activity {\n            unit.panorama.iter_none().map(to_id).collect()\n        } else {\n            Vec::new()\n        };\n\n        TerminalBlockData {\n            inactive_validators,\n        }\n    }\n}\n\n#[allow(unused_qualifications, clippy::arithmetic_side_effects)] // This is to suppress warnings originating in the test macros.\n#[cfg(test)]\nmod tests {\n    use super::{\n        super::state::{tests::*, State},\n        *,\n    };\n\n    #[test]\n    fn finality_detector() -> Result<(), AddUnitError<TestContext>> {\n        let mut state = State::new_test(&[Weight(5), Weight(4), Weight(1)], 0);\n\n        // Create blocks with scores as follows:\n        //\n        //          a0: 9 — a1: 5\n        //        /       \\\n        // b0: 10           b1: 4\n        //        \\\n        //          c0: 1 — c1: 1\n        let b0 = add_unit!(state, BOB, 0xB0; N, N, N)?;\n        let c0 = add_unit!(state, CAROL, 0xC0; N, b0, N)?;\n        let c1 = add_unit!(state, CAROL, 0xC1; N, b0, c0)?;\n        let a0 = add_unit!(state, ALICE, 0xA0; N, b0, N)?;\n        let a1 = add_unit!(state, ALICE, 0xA1; a0, b0, c1)?;\n        let b1 = add_unit!(state, BOB, 0xB1; a0, b0, N)?;\n\n        let mut fd4 = FinalityDetector::new(Weight(4)); // Fault tolerance 4.\n        let mut fd6 = FinalityDetector::new(Weight(6)); // Fault tolerance 6.\n\n        // The total weight of our validators is 10.\n        // A level-k summit with quorum q has relative FTT  2 (q - 10/2) (1 − 1/2^k).\n        //\n        // `b0`, `a0` are level 0 for `B0`. `a0`, `b1` are level 1.\n        // So the fault tolerance of `B0` is 2 * (9 - 10/2) * (1 - 1/2) = 4.\n        assert_eq!(None, fd6.next_finalized(&state));\n        assert_eq!(Some(&b0), fd4.next_finalized(&state));\n        assert_eq!(None, fd4.next_finalized(&state));\n\n        // Adding another level to the summit increases `B0`'s fault tolerance to 6.\n        let _a2 = add_unit!(state, ALICE, None; a1, b1, c1)?;\n        let _b2 = add_unit!(state, BOB, None; a1, b1, c1)?;\n        assert_eq!(Some(&b0), fd6.next_finalized(&state));\n        assert_eq!(None, fd6.next_finalized(&state));\n        Ok(())\n    }\n\n    #[test]\n    fn equivocators() -> Result<(), AddUnitError<TestContext>> {\n        let mut state = State::new_test(&[Weight(5), Weight(4), Weight(1)], 0);\n        let mut fd4 = FinalityDetector::new(Weight(4)); // Fault tolerance 4.\n\n        // Create blocks with scores as follows:\n        //\n        //          a0: 9 — a1: 9 - a2: 5 - a3: 5\n        //        /       \\      \\       \\\n        // b0: 10           b1: 4  b2: 4  b3: 4\n        //        \\\n        //          c0: 1 — c1: 1\n        //               \\ c1': 1\n        let b0 = add_unit!(state, BOB, 0xB0; N, N, N)?;\n        let a0 = add_unit!(state, ALICE, 0xA0; N, b0, N)?;\n        let c0 = add_unit!(state, CAROL, 0xC0; N, b0, N)?;\n        let _c1 = add_unit!(state, CAROL, 0xC1; N, b0, c0)?;\n        assert_eq!(Weight(0), state.faulty_weight());\n        let _c1_prime = add_unit!(state, CAROL, None; N, b0, c0)?;\n        assert_eq!(Weight(1), state.faulty_weight());\n        let b1 = add_unit!(state, BOB, 0xB1; a0, b0, N)?;\n        assert_eq!(Some(&b0), fd4.next_finalized(&state));\n        let a1 = add_unit!(state, ALICE, 0xA1; a0, b0, F)?;\n        let b2 = add_unit!(state, BOB, None; a1, b1, F)?;\n        let a2 = add_unit!(state, ALICE, 0xA2; a1, b2, F)?;\n        assert_eq!(Some(&a0), fd4.next_finalized(&state));\n        assert_eq!(Some(&a1), fd4.next_finalized(&state));\n        // Finalize A2.\n        let b3 = add_unit!(state, BOB, None; a2, b2, F)?;\n        let _a3 = add_unit!(state, ALICE, None; a2, b3, F)?;\n        assert_eq!(Some(&a2), fd4.next_finalized(&state));\n\n        // Test that an initial block reports equivocators as well.\n        let mut bstate: State<TestContext> = State::new_test(&[Weight(5), Weight(4), Weight(1)], 0);\n        let mut fde4 = FinalityDetector::new(Weight(4)); // Fault tolerance 4.\n        let _c0 = add_unit!(bstate, CAROL, 0xC0; N, N, N)?;\n        let _c0_prime = add_unit!(bstate, CAROL, 0xCC0; N, N, N)?;\n        let a0 = add_unit!(bstate, ALICE, 0xA0; N, N, F)?;\n        let b0 = add_unit!(bstate, BOB, None; a0, N, F)?;\n        let _a1 = add_unit!(bstate, ALICE, None; a0, b0, F)?;\n        assert_eq!(Some(&a0), fde4.next_finalized(&bstate));\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/highway/vertex.rs",
    "content": "use std::{collections::BTreeSet, fmt::Debug};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Deserializer, Serialize, Serializer};\n\nuse casper_types::Timestamp;\n\nuse crate::components::consensus::{\n    highway_core::{\n        endorsement::SignedEndorsement,\n        highway::{PingError, VertexError},\n        state::Panorama,\n    },\n    traits::{Context, ValidatorSecret},\n    utils::{ValidatorIndex, Validators},\n};\n\n#[allow(clippy::arithmetic_side_effects)]\nmod relaxed {\n    // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the\n    // module-wide `clippy::arithmetic_side_effects` lint.\n\n    use casper_types::Timestamp;\n    use datasize::DataSize;\n    use serde::{Deserialize, Serialize};\n    use strum::EnumDiscriminants;\n\n    use crate::components::consensus::{\n        highway_core::evidence::Evidence, traits::Context, utils::ValidatorIndex,\n    };\n\n    use super::{Endorsements, Ping, SignedWireUnit};\n\n    /// A dependency of a `Vertex` that can be satisfied by one or more other vertices.\n    #[derive(\n        DataSize,\n        Clone,\n        Debug,\n        Eq,\n        PartialEq,\n        PartialOrd,\n        Ord,\n        Hash,\n        Serialize,\n        Deserialize,\n        EnumDiscriminants,\n    )]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub enum Dependency<C>\n    where\n        C: Context,\n    {\n        /// The hash of a unit.\n        Unit(C::Hash),\n        /// The index of the validator against which evidence is needed.\n        Evidence(ValidatorIndex),\n        /// The hash of the unit to be endorsed.\n        Endorsement(C::Hash),\n        /// The ping by a particular validator for a particular timestamp.\n        Ping(ValidatorIndex, Timestamp),\n    }\n\n    /// An element of the protocol state, that might depend on other elements.\n    ///\n    /// It is the vertex in a directed acyclic graph, whose edges are dependencies.\n    #[derive(\n        DataSize, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash, EnumDiscriminants,\n    )]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub enum Vertex<C>\n    where\n        C: Context,\n    {\n        /// A signed unit of the consensus DAG.\n        Unit(SignedWireUnit<C>),\n        /// Evidence of a validator's transgression.\n        Evidence(Evidence<C>),\n        /// Endorsements for a unit.\n        Endorsements(Endorsements<C>),\n        /// A ping conveying the activity of its creator.\n        Ping(Ping<C>),\n    }\n}\npub use relaxed::{Dependency, DependencyDiscriminants, Vertex, VertexDiscriminants};\n\nimpl<C: Context> Dependency<C> {\n    /// Returns whether this identifies a unit, as opposed to other types of vertices.\n    pub fn is_unit(&self) -> bool {\n        matches!(self, Dependency::Unit(_))\n    }\n}\n\nimpl<C: Context> Vertex<C> {\n    /// Returns the consensus value mentioned in this vertex, if any.\n    ///\n    /// These need to be validated before passing the vertex into the protocol state. E.g. if\n    /// `C::ConsensusValue` is a transaction, it should be validated first (correct signature,\n    /// structure, gas limit, etc.). If it is a hash of a transaction, the transaction should be\n    /// obtained _and_ validated. Only after that, the vertex can be considered valid.\n    pub fn value(&self) -> Option<&C::ConsensusValue> {\n        match self {\n            Vertex::Unit(swunit) => swunit.wire_unit().value.as_ref(),\n            Vertex::Evidence(_) | Vertex::Endorsements(_) | Vertex::Ping(_) => None,\n        }\n    }\n\n    /// Returns the unit hash of this vertex (if it is a unit).\n    pub fn unit_hash(&self) -> Option<C::Hash> {\n        match self {\n            Vertex::Unit(swunit) => Some(swunit.hash()),\n            Vertex::Evidence(_) | Vertex::Endorsements(_) | Vertex::Ping(_) => None,\n        }\n    }\n\n    /// Returns the seq number of this vertex (if it is a unit).\n    pub fn unit_seq_number(&self) -> Option<u64> {\n        match self {\n            Vertex::Unit(swunit) => Some(swunit.wire_unit().seq_number),\n            _ => None,\n        }\n    }\n\n    /// Returns whether this is evidence, as opposed to other types of vertices.\n    pub fn is_evidence(&self) -> bool {\n        matches!(self, Vertex::Evidence(_))\n    }\n\n    /// Returns a `Timestamp` provided the vertex is a `Vertex::Unit` or `Vertex::Ping`.\n    pub fn timestamp(&self) -> Option<Timestamp> {\n        match self {\n            Vertex::Unit(signed_wire_unit) => Some(signed_wire_unit.wire_unit().timestamp),\n            Vertex::Ping(ping) => Some(ping.timestamp()),\n            Vertex::Evidence(_) | Vertex::Endorsements(_) => None,\n        }\n    }\n\n    /// Returns the creator of this vertex, if one is defined.\n    pub fn creator(&self) -> Option<ValidatorIndex> {\n        match self {\n            Vertex::Unit(signed_wire_unit) => Some(signed_wire_unit.wire_unit().creator),\n            Vertex::Ping(ping) => Some(ping.creator),\n            Vertex::Evidence(_) | Vertex::Endorsements(_) => None,\n        }\n    }\n\n    /// Returns the ID of this vertex.\n    pub fn id(&self) -> Dependency<C> {\n        match self {\n            Vertex::Unit(signed_wire_unit) => Dependency::Unit(signed_wire_unit.hash()),\n            Vertex::Evidence(evidence) => Dependency::Evidence(evidence.perpetrator()),\n            Vertex::Endorsements(endorsement) => Dependency::Endorsement(endorsement.unit),\n            Vertex::Ping(ping) => Dependency::Ping(ping.creator(), ping.timestamp()),\n        }\n    }\n\n    /// Returns a reference to the unit, or `None` if this is not a unit.\n    pub fn unit(&self) -> Option<&SignedWireUnit<C>> {\n        match self {\n            Vertex::Unit(signed_wire_unit) => Some(signed_wire_unit),\n            _ => None,\n        }\n    }\n\n    /// Returns true whether unit is a proposal.\n    pub fn is_proposal(&self) -> bool {\n        self.value().is_some()\n    }\n}\n\nmod specimen_support {\n    use super::{\n        Dependency, DependencyDiscriminants, Endorsements, HashedWireUnit, Ping, SignedEndorsement,\n        SignedWireUnit, Vertex, VertexDiscriminants, WireUnit,\n    };\n    use crate::{\n        components::consensus::ClContext,\n        utils::specimen::{\n            btree_set_distinct_from_prop, largest_variant, vec_prop_specimen, Cache,\n            LargestSpecimen, SizeEstimator,\n        },\n    };\n\n    impl LargestSpecimen for Vertex<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, VertexDiscriminants, _, _>(estimator, |variant| match variant {\n                VertexDiscriminants::Unit => {\n                    Vertex::Unit(LargestSpecimen::largest_specimen(estimator, cache))\n                }\n                VertexDiscriminants::Evidence => {\n                    Vertex::Evidence(LargestSpecimen::largest_specimen(estimator, cache))\n                }\n                VertexDiscriminants::Endorsements => {\n                    if estimator.parameter_bool(\"endorsements_enabled\") {\n                        Vertex::Endorsements(LargestSpecimen::largest_specimen(estimator, cache))\n                    } else {\n                        Vertex::Ping(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                }\n                VertexDiscriminants::Ping => {\n                    Vertex::Ping(LargestSpecimen::largest_specimen(estimator, cache))\n                }\n            })\n        }\n    }\n\n    impl LargestSpecimen for Dependency<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, DependencyDiscriminants, _, _>(estimator, |variant| {\n                match variant {\n                    DependencyDiscriminants::Unit => {\n                        Dependency::Unit(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    DependencyDiscriminants::Evidence => {\n                        Dependency::Evidence(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    DependencyDiscriminants::Endorsement => {\n                        Dependency::Endorsement(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    DependencyDiscriminants::Ping => Dependency::Ping(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                }\n            })\n        }\n    }\n\n    impl LargestSpecimen for SignedWireUnit<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            SignedWireUnit {\n                hashed_wire_unit: LargestSpecimen::largest_specimen(estimator, cache),\n                signature: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n\n    impl LargestSpecimen for Endorsements<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            Endorsements {\n                unit: LargestSpecimen::largest_specimen(estimator, cache),\n                endorsers: if estimator.parameter_bool(\"endorsements_enabled\") {\n                    vec_prop_specimen(estimator, \"validator_count\", cache)\n                } else {\n                    Vec::new()\n                },\n            }\n        }\n    }\n\n    impl LargestSpecimen for SignedEndorsement<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            SignedEndorsement::new(\n                LargestSpecimen::largest_specimen(estimator, cache),\n                LargestSpecimen::largest_specimen(estimator, cache),\n            )\n        }\n    }\n\n    impl LargestSpecimen for Ping<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            Ping {\n                creator: LargestSpecimen::largest_specimen(estimator, cache),\n                timestamp: LargestSpecimen::largest_specimen(estimator, cache),\n                instance_id: LargestSpecimen::largest_specimen(estimator, cache),\n                signature: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n\n    impl LargestSpecimen for HashedWireUnit<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            if let Some(item) = cache.get::<Self>() {\n                return item.clone();\n            }\n\n            let hash = LargestSpecimen::largest_specimen(estimator, cache);\n            let wire_unit = LargestSpecimen::largest_specimen(estimator, cache);\n            cache.set(HashedWireUnit { hash, wire_unit }).clone()\n        }\n    }\n\n    impl LargestSpecimen for WireUnit<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            WireUnit {\n                panorama: LargestSpecimen::largest_specimen(estimator, cache),\n                creator: LargestSpecimen::largest_specimen(estimator, cache),\n                instance_id: LargestSpecimen::largest_specimen(estimator, cache),\n                value: LargestSpecimen::largest_specimen(estimator, cache),\n                seq_number: LargestSpecimen::largest_specimen(estimator, cache),\n                timestamp: LargestSpecimen::largest_specimen(estimator, cache),\n                round_exp: LargestSpecimen::largest_specimen(estimator, cache),\n                endorsed: btree_set_distinct_from_prop(estimator, \"validator_count\", cache),\n            }\n        }\n    }\n}\n\n/// A `WireUnit` together with its hash and a cryptographic signature by its creator.\n#[derive(DataSize, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub struct SignedWireUnit<C>\nwhere\n    C: Context,\n{\n    pub(crate) hashed_wire_unit: HashedWireUnit<C>,\n    pub(crate) signature: C::Signature,\n}\n\nimpl<C: Context> SignedWireUnit<C> {\n    pub(crate) fn new(\n        hashed_wire_unit: HashedWireUnit<C>,\n        secret_key: &C::ValidatorSecret,\n    ) -> Self {\n        let signature = secret_key.sign(&hashed_wire_unit.hash);\n        SignedWireUnit {\n            hashed_wire_unit,\n            signature,\n        }\n    }\n\n    /// Returns the inner `WireUnit`.\n    pub fn wire_unit(&self) -> &WireUnit<C> {\n        self.hashed_wire_unit.wire_unit()\n    }\n\n    /// Returns this unit's hash.\n    pub fn hash(&self) -> C::Hash {\n        self.hashed_wire_unit.hash()\n    }\n}\n\n/// A `WireUnit` together with its hash.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash)]\npub struct HashedWireUnit<C>\nwhere\n    C: Context,\n{\n    hash: C::Hash,\n    wire_unit: WireUnit<C>,\n}\n\nimpl<C> HashedWireUnit<C>\nwhere\n    C: Context,\n{\n    /// Computes the unit's hash and creates a new `HashedWireUnit`.\n    pub(crate) fn new(wire_unit: WireUnit<C>) -> Self {\n        let hash = wire_unit.compute_hash();\n        Self::new_with_hash(wire_unit, hash)\n    }\n\n    /// Returns the inner `WireUnit`.\n    pub fn into_inner(self) -> WireUnit<C> {\n        self.wire_unit\n    }\n\n    /// Returns a reference to the inner `WireUnit`.\n    pub fn wire_unit(&self) -> &WireUnit<C> {\n        &self.wire_unit\n    }\n\n    /// Returns this unit's hash.\n    pub fn hash(&self) -> C::Hash {\n        self.hash\n    }\n\n    /// Creates a new `HashedWireUnit`. Make sure the `hash` is correct, and identical with the\n    /// result of `wire_unit.compute_hash`.\n    pub(crate) fn new_with_hash(wire_unit: WireUnit<C>, hash: C::Hash) -> Self {\n        HashedWireUnit { hash, wire_unit }\n    }\n}\n\nimpl<C: Context> Serialize for HashedWireUnit<C> {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        self.wire_unit.serialize(serializer)\n    }\n}\n\nimpl<'de, C: Context> Deserialize<'de> for HashedWireUnit<C> {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        Ok(HashedWireUnit::new(<_>::deserialize(deserializer)?))\n    }\n}\n\n/// A unit as it is sent over the wire, possibly containing a new block.\n#[derive(DataSize, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub struct WireUnit<C>\nwhere\n    C: Context,\n{\n    /// The panorama of cited units.\n    pub panorama: Panorama<C>,\n    /// The index of the creator of this unit.\n    pub creator: ValidatorIndex,\n    /// The consensus instance ID for which this unit was created.\n    pub instance_id: C::InstanceId,\n    /// The consensus value included in the unit, if any.\n    pub value: Option<C::ConsensusValue>,\n    /// The sequence number of this unit in the creator's swimlane.\n    pub seq_number: u64,\n    /// Timestamp of when the unit was created.\n    pub timestamp: Timestamp,\n    /// The current round exponent of the unit's creator.\n    pub round_exp: u8,\n    /// The units this unit endorses.\n    pub endorsed: BTreeSet<C::Hash>,\n}\n\nimpl<C: Context> Debug for WireUnit<C> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        /// A type whose debug implementation prints \"..\" (without the quotes).\n        struct Ellipsis;\n\n        impl Debug for Ellipsis {\n            fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n                write!(f, \"..\")\n            }\n        }\n\n        f.debug_struct(\"WireUnit\")\n            .field(\"value\", &self.value.as_ref().map(|_| Ellipsis))\n            .field(\"creator.0\", &self.creator.0)\n            .field(\"instance_id\", &self.instance_id)\n            .field(\"seq_number\", &self.seq_number)\n            .field(\"timestamp\", &self.timestamp.millis())\n            .field(\"panorama\", self.panorama.as_ref())\n            .field(\"round_exp\", &self.round_exp)\n            .field(\"endorsed\", &self.endorsed)\n            .finish()\n    }\n}\n\nimpl<C: Context> WireUnit<C> {\n    pub(crate) fn into_hashed(self) -> HashedWireUnit<C> {\n        HashedWireUnit::new(self)\n    }\n\n    /// Returns the creator's previous unit.\n    pub fn previous(&self) -> Option<&C::Hash> {\n        self.panorama[self.creator].correct()\n    }\n\n    /// Returns the unit's hash, which is used as a unit identifier.\n    fn compute_hash(&self) -> C::Hash {\n        // TODO: Use serialize_into to avoid allocation?\n        <C as Context>::hash(&bincode::serialize(self).expect(\"serialize WireUnit\"))\n    }\n}\n\n/// A set of endorsements for a unit.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub struct Endorsements<C>\nwhere\n    C: Context,\n{\n    /// The endorsed unit.\n    pub unit: C::Hash,\n    /// The endorsements for the unit.\n    pub endorsers: Vec<(ValidatorIndex, C::Signature)>,\n}\n\nimpl<C: Context> Endorsements<C> {\n    /// Returns hash of the endorsed vote.\n    pub fn unit(&self) -> &C::Hash {\n        &self.unit\n    }\n\n    /// Returns an iterator over validator indexes that endorsed the `unit`.\n    pub fn validator_ids(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.endorsers.iter().map(|(v, _)| *v)\n    }\n}\n\nimpl<C: Context> From<SignedEndorsement<C>> for Endorsements<C> {\n    fn from(signed_e: SignedEndorsement<C>) -> Self {\n        Endorsements {\n            unit: *signed_e.unit(),\n            endorsers: vec![(signed_e.validator_idx(), *signed_e.signature())],\n        }\n    }\n}\n\n/// A ping sent by a validator to signal that it is online but has not created new units in a\n/// while.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub struct Ping<C>\nwhere\n    C: Context,\n{\n    creator: ValidatorIndex,\n    timestamp: Timestamp,\n    instance_id: C::InstanceId,\n    signature: C::Signature,\n}\n\nimpl<C: Context> Ping<C> {\n    /// Creates a new signed ping.\n    pub(crate) fn new(\n        creator: ValidatorIndex,\n        timestamp: Timestamp,\n        instance_id: C::InstanceId,\n        sk: &C::ValidatorSecret,\n    ) -> Self {\n        let signature = sk.sign(&Self::hash(creator, timestamp, instance_id));\n        Ping {\n            creator,\n            timestamp,\n            instance_id,\n            signature,\n        }\n    }\n\n    /// The creator who signals that it is online.\n    pub fn creator(&self) -> ValidatorIndex {\n        self.creator\n    }\n\n    /// The timestamp when the ping was created.\n    pub fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// Validates the ping and returns an error if it is not signed by the creator.\n    pub(crate) fn validate(\n        &self,\n        validators: &Validators<C::ValidatorId>,\n        our_instance_id: &C::InstanceId,\n    ) -> Result<(), VertexError> {\n        let Ping {\n            creator,\n            timestamp,\n            instance_id,\n            signature,\n        } = self;\n        if instance_id != our_instance_id {\n            return Err(PingError::InstanceId.into());\n        }\n        let v_id = validators.id(self.creator).ok_or(PingError::Creator)?;\n        let hash = Self::hash(*creator, *timestamp, *instance_id);\n        if !C::verify_signature(&hash, v_id, signature) {\n            return Err(PingError::Signature.into());\n        }\n        Ok(())\n    }\n\n    /// Computes the hash of a ping, i.e. of the creator and timestamp.\n    fn hash(creator: ValidatorIndex, timestamp: Timestamp, instance_id: C::InstanceId) -> C::Hash {\n        let bytes = bincode::serialize(&(creator, timestamp, instance_id)).expect(\"serialize Ping\");\n        <C as Context>::hash(&bytes)\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/highway.rs",
    "content": "//! The implementation of the Highway consensus protocol.\n\nmod vertex;\n\npub(crate) use crate::components::consensus::highway_core::state::Params;\npub use vertex::{\n    Dependency, Endorsements, HashedWireUnit, Ping, SignedWireUnit, Vertex, WireUnit,\n};\n\nuse std::path::PathBuf;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tracing::{debug, error, info, trace, warn};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::components::consensus::{\n    consensus_protocol::BlockContext,\n    highway_core::{\n        active_validator::{ActiveValidator, Effect},\n        endorsement::{Endorsement, EndorsementError},\n        evidence::{Evidence, EvidenceError},\n        state::{Fault, Observation, State, UnitError},\n    },\n    traits::Context,\n    utils::{\n        wal::{ReadWal, WalEntry, WriteWal},\n        Validator, ValidatorIndex, Validators, Weight,\n    },\n};\n\n/// If a lot of rounds were skipped between two blocks, log at most this many.\nconst MAX_SKIPPED_PROPOSAL_LOGS: u64 = 10;\n\n/// An error due to an invalid vertex.\n#[derive(Debug, Error, PartialEq)]\npub(crate) enum VertexError {\n    #[error(\"The vertex contains an invalid unit: `{0}`\")]\n    Unit(#[from] UnitError),\n    #[error(\"The vertex contains invalid evidence: `{0}`\")]\n    Evidence(#[from] EvidenceError),\n    #[error(\"The endorsements contains invalid entry: `{0}`\")]\n    Endorsement(#[from] EndorsementError),\n    #[error(\"Invalid ping: `{0}`\")]\n    Ping(#[from] PingError),\n}\n\n/// An error due to an invalid ping.\n#[derive(Debug, Error, Eq, PartialEq)]\npub(crate) enum PingError {\n    #[error(\"The creator is not a validator.\")]\n    Creator,\n    #[error(\"The signature is invalid.\")]\n    Signature,\n    #[error(\"The ping is for a different consensus protocol instance.\")]\n    InstanceId,\n}\n\n/// A vertex that has passed initial validation.\n///\n/// The vertex could not be determined to be invalid based on its contents alone. The remaining\n/// checks will be applied once all of its dependencies have been added to `Highway`. (See\n/// `ValidVertex`.)\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash)]\npub(crate) struct PreValidatedVertex<C>(Vertex<C>)\nwhere\n    C: Context;\n\nimpl<C: Context> PreValidatedVertex<C> {\n    pub(crate) fn inner(&self) -> &Vertex<C> {\n        &self.0\n    }\n\n    pub(crate) fn timestamp(&self) -> Option<Timestamp> {\n        self.0.timestamp()\n    }\n\n    #[cfg(test)]\n    pub(crate) fn into_vertex(self) -> Vertex<C> {\n        self.0\n    }\n}\n\nimpl<C: Context> From<ValidVertex<C>> for PreValidatedVertex<C> {\n    fn from(vv: ValidVertex<C>) -> PreValidatedVertex<C> {\n        PreValidatedVertex(vv.0)\n    }\n}\n\nimpl<C: Context> From<ValidVertex<C>> for Vertex<C> {\n    fn from(vv: ValidVertex<C>) -> Vertex<C> {\n        vv.0\n    }\n}\n\nimpl<C: Context> From<PreValidatedVertex<C>> for Vertex<C> {\n    fn from(pvv: PreValidatedVertex<C>) -> Vertex<C> {\n        pvv.0\n    }\n}\n\n/// A vertex that has been validated: `Highway` has all its dependencies and can add it to its\n/// protocol state.\n///\n/// Note that this must only be added to the `Highway` instance that created it. Can cause a panic\n/// or inconsistent state otherwise.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) struct ValidVertex<C>(pub(crate) Vertex<C>)\nwhere\n    C: Context;\n\nimpl<C: Context> ValidVertex<C> {\n    pub(crate) fn inner(&self) -> &Vertex<C> {\n        &self.0\n    }\n\n    pub(crate) fn is_proposal(&self) -> bool {\n        self.0.value().is_some()\n    }\n    pub(crate) fn endorsements(&self) -> Option<&Endorsements<C>> {\n        match &self.0 {\n            Vertex::Endorsements(endorsements) => Some(endorsements),\n            Vertex::Evidence(_) | Vertex::Unit(_) | Vertex::Ping(_) => None,\n        }\n    }\n}\n\n/// A result indicating whether and how a requested dependency is satisfied.\npub(crate) enum GetDepOutcome<C: Context> {\n    /// We don't have this dependency.\n    None,\n    /// This vertex satisfies the dependency.\n    Vertex(ValidVertex<C>),\n    /// The dependency must be satisfied by providing evidence against this faulty validator, but\n    /// this `Highway` instance does not have direct evidence.\n    Evidence(C::ValidatorId),\n}\n\n#[derive(Serialize, Deserialize, Debug, PartialEq)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\nstruct HighwayWalEntry<C: Context> {\n    vertex: ValidVertex<C>,\n    timestamp: Timestamp,\n}\n\nimpl<C: Context> WalEntry for HighwayWalEntry<C> {}\n\n/// An instance of the Highway protocol, containing its local state.\n///\n/// Both observers and active validators must instantiate this, pass in all incoming vertices from\n/// peers, and use a [FinalityDetector](../finality_detector/struct.FinalityDetector.html) to\n/// determine the outcome of the consensus process.\n#[derive(Debug, DataSize)]\npub(crate) struct Highway<C>\nwhere\n    C: Context,\n{\n    /// The protocol instance ID. This needs to be unique, to prevent replay attacks.\n    instance_id: C::InstanceId,\n    /// The validator IDs and weight map.\n    validators: Validators<C::ValidatorId>,\n    /// The abstract protocol state.\n    state: State<C>,\n    /// The state of an active validator, who is participating and creating new vertices.\n    active_validator: Option<ActiveValidator<C>>,\n    /// The path to the protocol state file.\n    write_wal: Option<WriteWal<HighwayWalEntry<C>>>,\n}\n\nimpl<C: Context> Highway<C> {\n    /// Creates a new `Highway` instance. All participants must agree on the protocol parameters.\n    ///\n    /// Arguments:\n    ///\n    /// * `instance_id`: A unique identifier for every execution of the protocol (e.g. for every\n    ///   era) to prevent replay attacks.\n    /// * `validators`: The set of validators and their weights.\n    /// * `params`: The Highway protocol parameters.\n    pub(crate) fn new(\n        instance_id: C::InstanceId,\n        validators: Validators<C::ValidatorId>,\n        params: Params,\n        protocol_state_file: Option<PathBuf>,\n    ) -> Highway<C> {\n        info!(%validators, instance=%instance_id, \"creating Highway instance\");\n        let weights = validators.iter().map(Validator::weight);\n        let banned = validators.iter_banned_idx();\n        let cannot_propose = validators.iter_cannot_propose_idx();\n        let state = State::new(weights, params, banned, cannot_propose);\n        let (write_wal, entries) = if let Some(protocol_state_file) = protocol_state_file.as_ref() {\n            let entries = Self::read_stored_vertices(protocol_state_file);\n            let write_wal = match WriteWal::<HighwayWalEntry<C>>::new(protocol_state_file) {\n                Ok(wal) => Some(wal),\n                Err(err) => {\n                    panic!(\"couldn't open WriteWal: {}\", err);\n                }\n            };\n            (write_wal, entries)\n        } else {\n            (None, vec![])\n        };\n        let mut result = Highway {\n            instance_id,\n            validators,\n            state,\n            active_validator: None,\n            write_wal,\n        };\n        result.restore_state(entries);\n        result\n    }\n\n    fn read_stored_vertices(protocol_state_file: &PathBuf) -> Vec<HighwayWalEntry<C>> {\n        let mut read_wal = match ReadWal::<HighwayWalEntry<C>>::new(protocol_state_file) {\n            Ok(wal) => wal,\n            Err(err) => {\n                panic!(\"couldn't open ReadWal: {}\", err);\n            }\n        };\n        let mut entries = vec![];\n        loop {\n            match read_wal.read_next_entry() {\n                Ok(Some(entry)) => {\n                    entries.push(entry);\n                }\n                Ok(None) => {\n                    break;\n                }\n                Err(err) => {\n                    panic!(\"error while reading ReadWal: {}\", err);\n                }\n            }\n        }\n        entries\n    }\n\n    fn restore_state(&mut self, entries: Vec<HighwayWalEntry<C>>) {\n        for entry in entries {\n            // we can safely ignore the effects - they were properly processed when persisting the\n            // vertex\n            self.add_valid_vertex(entry.vertex, entry.timestamp);\n        }\n    }\n\n    /// Turns this instance from a passive observer into an active validator that proposes new\n    /// blocks and creates and signs new vertices.\n    ///\n    /// Panics if `id` is not the ID of a validator with a weight in this Highway instance.\n    pub(crate) fn activate_validator(\n        &mut self,\n        id: C::ValidatorId,\n        secret: C::ValidatorSecret,\n        current_time: Timestamp,\n        _unit_hash_file: Option<PathBuf>,\n        target_ftt: Weight,\n    ) -> Vec<Effect<C>> {\n        if self.active_validator.is_some() {\n            error!(?id, \"activate_validator called twice\");\n            return vec![];\n        }\n        let idx = match self.validators.get_index(&id) {\n            Some(idx) => idx,\n            None => {\n                error!(?id, \"missing own validator ID\");\n                return vec![];\n            }\n        };\n        let start_time = current_time.max(self.state.params().start_timestamp());\n        let (av, effects) = ActiveValidator::new(\n            idx,\n            secret,\n            current_time,\n            start_time,\n            &self.state,\n            target_ftt,\n            self.instance_id,\n        );\n        self.active_validator = Some(av);\n        self.add_new_own_vertices(effects, current_time)\n    }\n\n    /// Turns this instance into a passive observer, that does not create any new vertices.\n    pub(crate) fn deactivate_validator(&mut self) {\n        self.active_validator = None;\n    }\n\n    /// Switches the active validator to a new round length.\n    pub(crate) fn set_round_len(&mut self, new_round_len: TimeDiff) {\n        if let Some(ref mut av) = self.active_validator {\n            av.set_round_len(new_round_len);\n        }\n    }\n\n    /// Does initial validation. Returns an error if the vertex is invalid.\n    pub(crate) fn pre_validate_vertex(\n        &self,\n        vertex: Vertex<C>,\n    ) -> Result<PreValidatedVertex<C>, (Vertex<C>, VertexError)> {\n        match self.do_pre_validate_vertex(&vertex) {\n            Err(err) => Err((vertex, err)),\n            Ok(()) => Ok(PreValidatedVertex(vertex)),\n        }\n    }\n\n    /// Returns the next missing dependency, or `None` if all dependencies of `pvv` are satisfied.\n    ///\n    /// If this returns `None`, `validate_vertex` can be called.\n    pub(super) fn missing_dependency(&self, pvv: &PreValidatedVertex<C>) -> Option<Dependency<C>> {\n        match pvv.inner() {\n            Vertex::Evidence(_) | Vertex::Ping(_) => None,\n            Vertex::Endorsements(endorsements) => {\n                let unit = *endorsements.unit();\n                if !self.state.has_unit(&unit) {\n                    Some(Dependency::Unit(unit))\n                } else {\n                    None\n                }\n            }\n            Vertex::Unit(unit) => unit\n                .wire_unit()\n                .panorama\n                .missing_dependency(&self.state)\n                .or_else(|| {\n                    self.state\n                        .needs_endorsements(unit)\n                        .map(Dependency::Endorsement)\n                }),\n        }\n    }\n\n    /// Does full validation. Returns an error if the vertex is invalid.\n    ///\n    /// All dependencies must be added to the state before this validation step.\n    pub(crate) fn validate_vertex(\n        &self,\n        pvv: PreValidatedVertex<C>,\n    ) -> Result<ValidVertex<C>, (PreValidatedVertex<C>, VertexError)> {\n        match self.do_validate_vertex(pvv.inner()) {\n            Err(err) => Err((pvv, err)),\n            Ok(()) => Ok(ValidVertex(pvv.0)),\n        }\n    }\n\n    /// Add a validated vertex to the protocol state.\n    ///\n    /// The validation must have been performed by _this_ `Highway` instance.\n    /// More precisely: The instance on which `add_valid_vertex` is called must contain everything\n    /// (and possibly more) that the instance on which `validate_vertex` was called contained.\n    pub(crate) fn add_valid_vertex(\n        &mut self,\n        ValidVertex(vertex): ValidVertex<C>,\n        now: Timestamp,\n    ) -> Vec<Effect<C>> {\n        if !self.has_vertex(&vertex) {\n            if let Some(ref mut wal) = self.write_wal {\n                let entry = HighwayWalEntry {\n                    vertex: ValidVertex(vertex.clone()),\n                    timestamp: now,\n                };\n                if let Err(err) = wal.record_entry(&entry) {\n                    error!(\"error recording entry: {}\", err);\n                }\n            }\n            match vertex {\n                Vertex::Unit(unit) => self.add_valid_unit(unit, now),\n                Vertex::Evidence(evidence) => self.add_evidence(evidence),\n                Vertex::Endorsements(endorsements) => self.add_endorsements(endorsements),\n                Vertex::Ping(ping) => self.add_ping(ping),\n            }\n        } else {\n            vec![]\n        }\n    }\n\n    /// Returns whether the vertex is already part of this protocol state.\n    pub(crate) fn has_vertex(&self, vertex: &Vertex<C>) -> bool {\n        match vertex {\n            Vertex::Unit(unit) => self.state.has_unit(&unit.hash()),\n            Vertex::Evidence(evidence) => self.state.has_evidence(evidence.perpetrator()),\n            Vertex::Endorsements(endorsements) => {\n                let unit = endorsements.unit();\n                self.state\n                    .has_all_endorsements(unit, endorsements.validator_ids())\n            }\n            Vertex::Ping(ping) => self.state.has_ping(ping.creator(), ping.timestamp()),\n        }\n    }\n\n    /// Returns whether the validator is known to be faulty and we have evidence.\n    pub(crate) fn has_evidence(&self, vid: &C::ValidatorId) -> bool {\n        self.validators\n            .get_index(vid)\n            .is_some_and(|vidx| self.state.has_evidence(vidx))\n    }\n\n    /// Marks the given validator as faulty, if it exists.\n    pub(crate) fn mark_faulty(&mut self, vid: &C::ValidatorId) {\n        if let Some(vidx) = self.validators.get_index(vid) {\n            self.state.mark_faulty(vidx);\n        }\n    }\n\n    /// Returns whether we have a vertex that satisfies the dependency.\n    pub(crate) fn has_dependency(&self, dependency: &Dependency<C>) -> bool {\n        match dependency {\n            Dependency::Unit(hash) => self.state.has_unit(hash),\n            Dependency::Evidence(idx) => self.state.is_faulty(*idx),\n            Dependency::Endorsement(hash) => self.state.is_endorsed(hash),\n            Dependency::Ping(_, _) => false, // We don't store signatures; nothing depends on pings.\n        }\n    }\n\n    /// Returns a vertex that satisfies the dependency, if available.\n    ///\n    /// If we send a vertex to a peer who is missing a dependency, they will ask us for it. In that\n    /// case, `get_dependency` will never return `None`, unless the peer is faulty.\n    pub(crate) fn get_dependency(&self, dependency: &Dependency<C>) -> GetDepOutcome<C> {\n        match dependency {\n            Dependency::Unit(hash) => match self.state.wire_unit(hash, self.instance_id) {\n                None => GetDepOutcome::None,\n                Some(unit) => GetDepOutcome::Vertex(ValidVertex(Vertex::Unit(unit))),\n            },\n            Dependency::Evidence(idx) => match self.state.maybe_fault(*idx) {\n                None | Some(Fault::Banned) => GetDepOutcome::None,\n                Some(Fault::Direct(ev)) => {\n                    GetDepOutcome::Vertex(ValidVertex(Vertex::Evidence(ev.clone())))\n                }\n                Some(Fault::Indirect) => {\n                    let vid = self.validators.id(*idx).expect(\"missing validator\").clone();\n                    GetDepOutcome::Evidence(vid)\n                }\n            },\n            Dependency::Endorsement(hash) => match self.state.maybe_endorsements(hash) {\n                None => GetDepOutcome::None,\n                Some(e) => GetDepOutcome::Vertex(ValidVertex(Vertex::Endorsements(e))),\n            },\n            Dependency::Ping(_, _) => GetDepOutcome::None, // We don't store ping signatures.\n        }\n    }\n\n    /// Returns a vertex by a validator with the requested sequence number.\n    pub(crate) fn get_dependency_by_index(\n        &self,\n        vid: ValidatorIndex,\n        unit_seq: u64,\n    ) -> GetDepOutcome<C> {\n        let obs = match self.state.panorama().get(vid) {\n            Some(obs) => obs,\n            None => return GetDepOutcome::None,\n        };\n        match obs {\n            Observation::None => GetDepOutcome::None,\n            Observation::Faulty => match self.state.maybe_fault(vid) {\n                None | Some(Fault::Banned) => GetDepOutcome::None,\n                Some(Fault::Direct(ev)) => {\n                    GetDepOutcome::Vertex(ValidVertex(Vertex::Evidence(ev.clone())))\n                }\n                Some(Fault::Indirect) => match self.validators.id(vid) {\n                    Some(vid) => GetDepOutcome::Evidence(vid.clone()),\n                    None => GetDepOutcome::None,\n                },\n            },\n            Observation::Correct(last_seen) => self\n                .state\n                .find_in_swimlane(last_seen, unit_seq)\n                .and_then(|req_hash| self.state.wire_unit(req_hash, self.instance_id))\n                .map(|swunit| GetDepOutcome::Vertex(ValidVertex(Vertex::Unit(swunit))))\n                .unwrap_or_else(|| GetDepOutcome::None),\n        }\n    }\n\n    pub(crate) fn handle_timer(&mut self, timestamp: Timestamp) -> Vec<Effect<C>> {\n        let instance_id = self.instance_id;\n\n        // Here we just use the timer's timestamp, and assume it's ~ Timestamp::now()\n        //\n        // This is because proposal units, i.e. new blocks, are\n        // supposed to have the exact timestamp that matches the\n        // beginning of the round (which we use as the \"round ID\").\n        //\n        // But at least any discrepancy here can only come from event\n        // handling delays in our own node, and not from timestamps\n        // set by other nodes.\n\n        self.map_active_validator(\n            |av, state| av.handle_timer(timestamp, state, instance_id),\n            timestamp,\n        )\n        .unwrap_or_else(|| {\n            debug!(%timestamp, \"Ignoring `handle_timer` event: only an observer node.\");\n            vec![]\n        })\n    }\n\n    pub(crate) fn propose(\n        &mut self,\n        value: C::ConsensusValue,\n        block_context: BlockContext<C>,\n    ) -> Vec<Effect<C>> {\n        let instance_id = self.instance_id;\n\n        // We just use the block context's timestamp, which is\n        // hopefully not much older than `Timestamp::now()`\n        //\n        // We do this because essentially what happens is this:\n        //\n        // 1. We realize it's our turn to propose a block in\n        // millisecond 64, so we set a timer.\n        //\n        // 2. The timer for timestamp 64 fires, and we request deploys\n        // for the new block from the block proposer (with 64 in the\n        // block context).\n        //\n        // 3. The block proposer responds and we finally end up here,\n        // and can propose the new block. But we still have to use\n        // timestamp 64.\n\n        let timestamp = block_context.timestamp();\n        self.map_active_validator(\n            |av, state| av.propose(value, block_context, state, instance_id),\n            timestamp,\n        )\n        .unwrap_or_else(|| {\n            warn!(\"ignoring `propose` event: validator has been deactivated\");\n            vec![]\n        })\n    }\n\n    pub(crate) fn validators(&self) -> &Validators<C::ValidatorId> {\n        &self.validators\n    }\n\n    /// Returns an iterator over all validators against which we have direct evidence.\n    pub(crate) fn validators_with_evidence(&self) -> impl Iterator<Item = &C::ValidatorId> {\n        self.validators\n            .enumerate_ids()\n            .filter(move |(idx, _)| self.state.has_evidence(*idx))\n            .map(|(_, v_id)| v_id)\n    }\n\n    pub(crate) fn state(&self) -> &State<C> {\n        &self.state\n    }\n\n    /// Sets the pause status: While paused we don't create any new units, just pings.\n    pub(crate) fn set_paused(&mut self, paused: bool) {\n        if let Some(av) = &mut self.active_validator {\n            av.set_paused(paused);\n        }\n    }\n\n    /// Drops all state other than evidence.\n    pub(crate) fn retain_evidence_only(&mut self) {\n        self.deactivate_validator();\n        self.state.retain_evidence_only();\n    }\n\n    fn on_new_unit(&mut self, uhash: &C::Hash, timestamp: Timestamp) -> Vec<Effect<C>> {\n        let instance_id = self.instance_id;\n        self.map_active_validator(\n            |av, state| av.on_new_unit(uhash, timestamp, state, instance_id),\n            timestamp,\n        )\n        .unwrap_or_default()\n    }\n\n    /// Takes action on a new evidence.\n    fn on_new_evidence(&mut self, evidence: Evidence<C>) -> Vec<Effect<C>> {\n        let state = &self.state;\n        let mut effects = self\n            .active_validator\n            .as_mut()\n            .map(|av| av.on_new_evidence(&evidence, state))\n            .unwrap_or_default();\n        // Add newly created endorsements to the local state. These can only be our own ones, so we\n        // don't need to look for conflicts and call State::add_endorsements directly.\n        for effect in effects.iter() {\n            if let Effect::NewVertex(vv) = effect {\n                if let Some(e) = vv.endorsements() {\n                    self.state.add_endorsements(e.clone());\n                }\n            }\n        }\n        // Gossip `Evidence` only if we just learned about faults by the validator.\n        effects.extend(vec![Effect::NewVertex(ValidVertex(Vertex::Evidence(\n            evidence,\n        )))]);\n        effects\n    }\n\n    /// Applies `f` if this is an active validator, otherwise returns `None`.\n    ///\n    /// Newly created vertices are added to the state. If an equivocation of this validator is\n    /// detected, it gets deactivated.\n    fn map_active_validator<F>(&mut self, f: F, timestamp: Timestamp) -> Option<Vec<Effect<C>>>\n    where\n        F: FnOnce(&mut ActiveValidator<C>, &State<C>) -> Vec<Effect<C>>,\n    {\n        let effects = f(self.active_validator.as_mut()?, &self.state);\n        Some(self.add_new_own_vertices(effects, timestamp))\n    }\n\n    /// Handles all `NewVertex` effects and adds the vertices to the protocol state.\n    ///\n    /// This needs to be applied to all effects created by `ActiveValidator`, so that new vertices\n    /// are not interpreted as coming from a doppelgänger.\n    fn add_new_own_vertices(\n        &mut self,\n        effects: Vec<Effect<C>>,\n        timestamp: Timestamp,\n    ) -> Vec<Effect<C>> {\n        let mut result = Vec::with_capacity(effects.len());\n        for effect in &effects {\n            match effect {\n                Effect::NewVertex(vv) => {\n                    result.extend(self.add_valid_vertex(vv.clone(), timestamp))\n                }\n                Effect::WeAreFaulty(_) => self.deactivate_validator(),\n                Effect::ScheduleTimer(_) | Effect::RequestNewBlock(_, _) => (),\n            }\n        }\n        result.extend(effects);\n        result\n    }\n\n    /// Performs initial validation and returns an error if `vertex` is invalid. (See\n    /// `PreValidatedVertex` and `validate_vertex`.)\n    fn do_pre_validate_vertex(&self, vertex: &Vertex<C>) -> Result<(), VertexError> {\n        match vertex {\n            Vertex::Unit(unit) => {\n                let creator = unit.wire_unit().creator;\n                let v_id = self.validators.id(creator).ok_or(UnitError::Creator)?;\n                if unit.wire_unit().instance_id != self.instance_id {\n                    return Err(UnitError::InstanceId.into());\n                }\n                if !C::verify_signature(&unit.hash(), v_id, &unit.signature) {\n                    return Err(UnitError::Signature.into());\n                }\n                Ok(self.state.pre_validate_unit(unit)?)\n            }\n            Vertex::Evidence(evidence) => {\n                Ok(evidence.validate(&self.validators, &self.instance_id, self.state.params())?)\n            }\n            Vertex::Endorsements(endorsements) => {\n                let unit = *endorsements.unit();\n                if endorsements.endorsers.is_empty() {\n                    return Err(EndorsementError::Empty.into());\n                }\n                for (creator, signature) in endorsements.endorsers.iter() {\n                    let v_id = self\n                        .validators\n                        .id(*creator)\n                        .ok_or(EndorsementError::Creator)?;\n                    if self.state.maybe_fault(*creator) == Some(&Fault::Banned) {\n                        return Err(EndorsementError::Banned.into());\n                    }\n                    let endorsement: Endorsement<C> = Endorsement::new(unit, *creator);\n                    if !C::verify_signature(&endorsement.hash(), v_id, signature) {\n                        return Err(EndorsementError::Signature.into());\n                    }\n                }\n                Ok(())\n            }\n            Vertex::Ping(ping) => ping.validate(&self.validators, &self.instance_id),\n        }\n    }\n\n    /// Validates `vertex` and returns an error if it is invalid.\n    /// This requires all dependencies to be present.\n    fn do_validate_vertex(&self, vertex: &Vertex<C>) -> Result<(), VertexError> {\n        match vertex {\n            Vertex::Unit(unit) => Ok(self.state.validate_unit(unit)?),\n            Vertex::Evidence(_) | Vertex::Endorsements(_) | Vertex::Ping(_) => Ok(()),\n        }\n    }\n\n    /// Adds evidence to the protocol state.\n    /// Gossip the evidence if it's the first equivocation from the creator.\n    fn add_evidence(&mut self, evidence: Evidence<C>) -> Vec<Effect<C>> {\n        if self.state.add_evidence(evidence.clone()) {\n            self.on_new_evidence(evidence)\n        } else {\n            vec![]\n        }\n    }\n\n    /// Adds a valid unit to the protocol state.\n    ///\n    /// Validity must be checked before calling this! Adding an invalid unit will result in a panic\n    /// or an inconsistent state.\n    fn add_valid_unit(&mut self, swunit: SignedWireUnit<C>, now: Timestamp) -> Vec<Effect<C>> {\n        let unit_hash = swunit.hash();\n        let creator = swunit.wire_unit().creator;\n        let was_honest = !self.state.is_faulty(creator);\n        self.state.add_valid_unit(swunit);\n        self.log_if_missing_proposal(&unit_hash);\n        let mut evidence_effects = self\n            .state\n            .maybe_evidence(creator)\n            .cloned()\n            .map(|ev| {\n                if was_honest {\n                    self.on_new_evidence(ev)\n                } else {\n                    vec![]\n                }\n            })\n            .unwrap_or_default();\n        evidence_effects.extend(self.on_new_unit(&unit_hash, now));\n        evidence_effects\n    }\n\n    /// Adds endorsements to the state. If there are conflicting endorsements, `NewVertex` effects\n    /// are returned containing evidence to prove them faulty.\n    fn add_endorsements(&mut self, endorsements: Endorsements<C>) -> Vec<Effect<C>> {\n        let evidence = self\n            .state\n            .find_conflicting_endorsements(&endorsements, &self.instance_id);\n        self.state.add_endorsements(endorsements);\n        let add_and_create_effect = |ev: Evidence<C>| {\n            self.state.add_evidence(ev.clone());\n            Effect::NewVertex(ValidVertex(Vertex::Evidence(ev)))\n        };\n        evidence.into_iter().map(add_and_create_effect).collect()\n    }\n\n    /// Adds a ping to the state.\n    fn add_ping(&mut self, ping: Ping<C>) -> Vec<Effect<C>> {\n        self.state.add_ping(ping.creator(), ping.timestamp());\n        vec![]\n    }\n\n    /// Checks whether the unit was created by a doppelganger.\n    pub(crate) fn is_doppelganger_vertex(&self, vertex: &Vertex<C>) -> bool {\n        self.active_validator\n            .as_ref()\n            .is_some_and(|av| av.is_doppelganger_vertex(vertex, &self.state))\n    }\n\n    /// Returns whether this instance of protocol is an active validator.\n    pub(crate) fn is_active(&self) -> bool {\n        self.active_validator.is_some()\n    }\n\n    /// Returns the instance ID of this Highway instance.\n    pub(crate) fn instance_id(&self) -> &C::InstanceId {\n        &self.instance_id\n    }\n\n    pub(crate) fn next_round_length(&self) -> Option<TimeDiff> {\n        self.active_validator\n            .as_ref()\n            .map(|av| av.next_round_length())\n    }\n\n    /// Logs a message if this is a block and any previous blocks were skipped.\n    fn log_if_missing_proposal(&self, unit_hash: &C::Hash) {\n        let state = &self.state;\n        let unit = state.unit(unit_hash);\n        let r_id = unit.round_id();\n        if unit.timestamp != r_id\n            || unit.block != *unit_hash\n            || state.leader(r_id) != unit.creator\n            || state.is_faulty(unit.creator)\n        {\n            return; // Not a block by an honest validator. (Don't let faulty validators spam logs.)\n        }\n\n        // Iterate over all rounds since the parent — or since the start time, if there is none.\n        let parent_timestamp = if let Some(parent_hash) = state.block(unit_hash).parent() {\n            state.unit(parent_hash).timestamp\n        } else {\n            state.params().start_timestamp()\n        };\n        for skipped_r_id in (1..=MAX_SKIPPED_PROPOSAL_LOGS)\n            .filter_map(|i| r_id.checked_sub(state.params().min_round_length().checked_mul(i)?))\n            .take_while(|skipped_r_id| *skipped_r_id > parent_timestamp)\n        {\n            let leader_index = state.leader(skipped_r_id);\n            let leader_id = match self.validators.id(leader_index) {\n                None => {\n                    error!(?leader_index, \"missing leader validator ID\");\n                    return;\n                }\n                Some(leader_id) => leader_id,\n            };\n            if state.is_faulty(leader_index) {\n                trace!(\n                    ?leader_index, %leader_id, round_id = %skipped_r_id,\n                    \"missing proposal: faulty leader was skipped\",\n                );\n            } else {\n                let reason = state.panorama()[leader_index]\n                    .correct()\n                    .and_then(|leader_hash| {\n                        state\n                            .swimlane(leader_hash)\n                            .find(|(_, unit)| unit.timestamp <= skipped_r_id)\n                            .filter(|(_, unit)| unit.timestamp == skipped_r_id)\n                    })\n                    .map_or(\"the leader missed their turn\", |_| {\n                        \"the leader's proposal got orphaned\"\n                    });\n                info!(\n                    ?leader_index, %leader_id, round_id = %skipped_r_id,\n                    \"missing proposal: {}\", reason,\n                );\n            }\n        }\n    }\n}\n\n#[cfg(test)]\n#[allow(clippy::arithmetic_side_effects)]\npub(crate) mod tests {\n    use std::{collections::BTreeSet, iter::FromIterator};\n\n    use casper_types::Timestamp;\n\n    use crate::components::consensus::{\n        highway_core::{\n            evidence::{Evidence, EvidenceError},\n            highway::{\n                vertex::Ping, Dependency, Highway, SignedWireUnit, UnitError, Vertex, VertexError,\n                WireUnit,\n            },\n            highway_testing::TEST_INSTANCE_ID,\n            state::{tests::*, Panorama, State},\n        },\n        traits::ValidatorSecret,\n        utils::Validators,\n    };\n\n    pub(crate) fn test_validators() -> Validators<u32> {\n        let vid_weights: Vec<(u32, u64)> =\n            vec![(ALICE_SEC, ALICE), (BOB_SEC, BOB), (CAROL_SEC, CAROL)]\n                .into_iter()\n                .map(|(sk, vid)| {\n                    assert_eq!(sk.0, vid.0);\n                    (sk.0, WEIGHTS[vid.0 as usize].0)\n                })\n                .collect();\n        Validators::from_iter(vid_weights)\n    }\n\n    #[test]\n    fn invalid_signature_error() {\n        let now: Timestamp = 500.into();\n\n        let state: State<TestContext> = State::new_test(WEIGHTS, 0);\n        let mut highway = Highway {\n            instance_id: TEST_INSTANCE_ID,\n            validators: test_validators(),\n            state,\n            active_validator: None,\n            write_wal: None,\n        };\n        let wunit = WireUnit {\n            panorama: Panorama::new(WEIGHTS.len()),\n            creator: CAROL,\n            instance_id: highway.instance_id,\n            value: Some(0),\n            seq_number: 0,\n            timestamp: Timestamp::zero(),\n            round_exp: 4,\n            endorsed: BTreeSet::new(),\n        };\n        let invalid_signature = 1u64;\n        let invalid_signature_unit = SignedWireUnit {\n            hashed_wire_unit: wunit.clone().into_hashed(),\n            signature: invalid_signature,\n        };\n        let invalid_vertex = Vertex::Unit(invalid_signature_unit);\n        let err = VertexError::Unit(UnitError::Signature);\n        let expected = (invalid_vertex.clone(), err);\n        assert_eq!(Err(expected), highway.pre_validate_vertex(invalid_vertex));\n\n        let hwunit = wunit.into_hashed();\n        let valid_signature = CAROL_SEC.sign(&hwunit.hash());\n        let correct_signature_unit = SignedWireUnit {\n            hashed_wire_unit: hwunit,\n            signature: valid_signature,\n        };\n        let valid_vertex = Vertex::Unit(correct_signature_unit);\n        let pvv = highway.pre_validate_vertex(valid_vertex).unwrap();\n        assert_eq!(None, highway.missing_dependency(&pvv));\n        let vv = highway.validate_vertex(pvv).unwrap();\n        assert!(highway.add_valid_vertex(vv, now).is_empty());\n    }\n\n    #[test]\n    fn missing_dependency() -> Result<(), AddUnitError<TestContext>> {\n        let mut state = State::new_test(WEIGHTS, 0);\n        let now: Timestamp = 500.into();\n\n        add_unit!(state, CAROL, 0xC0; N, N, N)?;\n        add_unit!(state, CAROL, 0xC1; N, N, N)?;\n        let a = add_unit!(state, ALICE, 0xA; N, N, N)?;\n        endorse!(state, a; ALICE, BOB, CAROL);\n        // Bob's unit depends on Alice's unit, an endorsement of Alice's unit, and evidence against\n        // Carol.\n        let b = add_unit!(state, BOB, 0xB; a, N, F; a)?;\n\n        let end_a = state.maybe_endorsements(&a).expect(\"unit a is endorsed\");\n        let ev_c = state.maybe_evidence(CAROL).unwrap().clone();\n        let wunit_a = state.wire_unit(&a, TEST_INSTANCE_ID).unwrap();\n        let wunit_b = state.wire_unit(&b, TEST_INSTANCE_ID).unwrap();\n\n        let mut highway = Highway {\n            instance_id: TEST_INSTANCE_ID,\n            validators: test_validators(),\n            state: State::new_test(WEIGHTS, 0),\n            active_validator: None,\n            write_wal: None,\n        };\n\n        let vertex_end_a = Vertex::Endorsements(end_a);\n        let pvv_a = highway.pre_validate_vertex(Vertex::Unit(wunit_a)).unwrap();\n        let pvv_end_a = highway.pre_validate_vertex(vertex_end_a).unwrap();\n        let pvv_ev_c = highway.pre_validate_vertex(Vertex::Evidence(ev_c)).unwrap();\n        let pvv_b = highway.pre_validate_vertex(Vertex::Unit(wunit_b)).unwrap();\n\n        assert_eq!(\n            Some(Dependency::Unit(a)),\n            highway.missing_dependency(&pvv_b)\n        );\n        assert_eq!(\n            Some(Dependency::Unit(a)),\n            highway.missing_dependency(&pvv_end_a)\n        );\n        assert_eq!(None, highway.missing_dependency(&pvv_a));\n        let vv_a = highway.validate_vertex(pvv_a).unwrap();\n        highway.add_valid_vertex(vv_a, now);\n\n        assert_eq!(None, highway.missing_dependency(&pvv_end_a));\n        assert_eq!(\n            Some(Dependency::Evidence(CAROL)),\n            highway.missing_dependency(&pvv_b)\n        );\n        assert_eq!(None, highway.missing_dependency(&pvv_ev_c));\n        let vv_ev_c = highway.validate_vertex(pvv_ev_c).unwrap();\n        highway.add_valid_vertex(vv_ev_c, now);\n\n        assert_eq!(\n            Some(Dependency::Endorsement(a)),\n            highway.missing_dependency(&pvv_b)\n        );\n        assert_eq!(None, highway.missing_dependency(&pvv_end_a));\n        let vv_end_a = highway.validate_vertex(pvv_end_a).unwrap();\n        highway.add_valid_vertex(vv_end_a, now);\n\n        assert_eq!(None, highway.missing_dependency(&pvv_b));\n        let vv_b = highway.validate_vertex(pvv_b).unwrap();\n        highway.add_valid_vertex(vv_b, now);\n\n        Ok(())\n    }\n\n    #[test]\n    fn invalid_evidence() {\n        let state: State<TestContext> = State::new_test(WEIGHTS, 0);\n        let highway = Highway {\n            instance_id: TEST_INSTANCE_ID,\n            validators: test_validators(),\n            state,\n            active_validator: None,\n            write_wal: None,\n        };\n\n        let validate = |wunit0: &WireUnit<TestContext>,\n                        signer0: &TestSecret,\n                        wunit1: &WireUnit<TestContext>,\n                        signer1: &TestSecret| {\n            let hwunit0 = wunit0.clone().into_hashed();\n            let swunit0 = SignedWireUnit::new(hwunit0, signer0);\n            let hwunit1 = wunit1.clone().into_hashed();\n            let swunit1 = SignedWireUnit::new(hwunit1, signer1);\n            let evidence = Evidence::Equivocation(swunit0, swunit1);\n            let vertex = Vertex::Evidence(evidence);\n            highway\n                .pre_validate_vertex(vertex.clone())\n                .map_err(|(v, err)| {\n                    assert_eq!(v, vertex);\n                    err\n                })\n        };\n\n        // Two units with different values and the same sequence number. Carol equivocated!\n        let mut wunit0 = WireUnit {\n            panorama: Panorama::new(WEIGHTS.len()),\n            creator: CAROL,\n            instance_id: highway.instance_id,\n            value: Some(0),\n            seq_number: 0,\n            timestamp: Timestamp::zero(),\n            round_exp: 4,\n            endorsed: BTreeSet::new(),\n        };\n        let wunit1 = WireUnit {\n            panorama: Panorama::new(WEIGHTS.len()),\n            creator: CAROL,\n            instance_id: highway.instance_id,\n            value: Some(1),\n            seq_number: 0,\n            timestamp: Timestamp::zero(),\n            round_exp: 4,\n            endorsed: BTreeSet::new(),\n        };\n\n        assert!(validate(&wunit0, &CAROL_SEC, &wunit1, &CAROL_SEC,).is_ok());\n\n        // It's only an equivocation if the two units are different.\n        assert_eq!(\n            Err(VertexError::Evidence(EvidenceError::EquivocationSameUnit)),\n            validate(&wunit0, &CAROL_SEC, &wunit0, &CAROL_SEC)\n        );\n\n        // Both units have Carol as their creator; Bob's signature would be invalid.\n        assert_eq!(\n            Err(VertexError::Evidence(EvidenceError::Signature)),\n            validate(&wunit0, &CAROL_SEC, &wunit1, &BOB_SEC)\n        );\n        assert_eq!(\n            Err(VertexError::Evidence(EvidenceError::Signature)),\n            validate(&wunit0, &BOB_SEC, &wunit1, &CAROL_SEC)\n        );\n\n        // If the first unit was actually Bob's and the second Carol's, nobody equivocated.\n        wunit0.creator = BOB;\n        assert_eq!(\n            Err(VertexError::Evidence(\n                EvidenceError::EquivocationDifferentCreators\n            )),\n            validate(&wunit0, &BOB_SEC, &wunit1, &CAROL_SEC)\n        );\n        wunit0.creator = CAROL;\n\n        // If the units have different sequence numbers they might belong to the same fork.\n        wunit0.seq_number = 1;\n        assert_eq!(\n            Err(VertexError::Evidence(\n                EvidenceError::EquivocationDifferentSeqNumbers\n            )),\n            validate(&wunit0, &CAROL_SEC, &wunit1, &CAROL_SEC)\n        );\n        wunit0.seq_number = 0;\n\n        // If the units are from a different network or era we don't accept the evidence.\n        wunit0.instance_id = TEST_INSTANCE_ID + 1;\n        assert_eq!(\n            Err(VertexError::Evidence(EvidenceError::EquivocationInstanceId)),\n            validate(&wunit0, &CAROL_SEC, &wunit1, &CAROL_SEC)\n        );\n    }\n\n    #[test]\n    fn invalid_ping_ndrs1077_regression() {\n        let now: Timestamp = 500.into();\n\n        let state: State<TestContext> = State::new_test(WEIGHTS, 0);\n        let highway = Highway {\n            instance_id: TEST_INSTANCE_ID,\n            validators: test_validators(),\n            state,\n            active_validator: None,\n            write_wal: None,\n        };\n\n        // Ping by validator that is not bonded, with an index that is outside of boundaries of the\n        // state.\n        let ping: Vertex<TestContext> =\n            Vertex::Ping(Ping::new(DAN, now, TEST_INSTANCE_ID, &DAN_SEC));\n        assert!(\n            DAN.0 >= WEIGHTS.len() as u32,\n            \"should use validator that is not bonded\"\n        );\n        // Verify that sending a Ping from a non-existing validator does not panic.\n        assert!(!highway.has_vertex(&ping));\n    }\n\n    #[test]\n    fn own_initial_ping_is_not_from_doppelganger() {\n        let now: Timestamp = 500.into();\n        let later = 501.into();\n\n        let state: State<TestContext> = State::new_test(WEIGHTS, 0);\n        let target_ftt = state.total_weight() / 3;\n        let mut highway = Highway {\n            instance_id: TEST_INSTANCE_ID,\n            validators: test_validators(),\n            state,\n            active_validator: None,\n            write_wal: None,\n        };\n\n        let _effects =\n            highway.activate_validator(ALICE.0, ALICE_SEC.clone(), now, None, target_ftt);\n\n        let ping = Vertex::Ping(Ping::new(ALICE, now, TEST_INSTANCE_ID, &ALICE_SEC));\n        assert!(!highway.is_doppelganger_vertex(&ping));\n        let ping = Vertex::Ping(Ping::new(ALICE, later, TEST_INSTANCE_ID, &ALICE_SEC));\n        assert!(highway.is_doppelganger_vertex(&ping));\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/highway_testing.rs",
    "content": "#![allow(clippy::arithmetic_side_effects)] // In tests, overflows panic anyway.\n\nuse std::{\n    collections::{hash_map::DefaultHasher, HashMap, VecDeque},\n    fmt::{self, Debug, Display, Formatter},\n    hash::{Hash, Hasher},\n};\n\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse itertools::Itertools;\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\nuse tracing::{trace, warn};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse super::{\n    active_validator::Effect,\n    finality_detector::{FinalityDetector, FttExceeded},\n    highway::{\n        Dependency, GetDepOutcome, Highway, Params, PreValidatedVertex, SignedWireUnit,\n        ValidVertex, Vertex, VertexError,\n    },\n    state::Fault,\n};\nuse crate::{\n    components::consensus::{\n        consensus_protocol::FinalizedBlock,\n        tests::{\n            consensus_des_testing::{\n                DeliverySchedule, Fault as DesFault, Message, Node, Target, TargetedMessage,\n                ValidatorId, VirtualNet,\n            },\n            queue::QueueEntry,\n        },\n        traits::{ConsensusValueT, Context, ValidatorSecret},\n        utils::{Validators, Weight},\n        BlockContext,\n    },\n    NodeRng,\n};\n\n#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, DataSize, Default)]\npub(crate) struct ConsensusValue(Vec<u8>);\n\nimpl ConsensusValueT for ConsensusValue {\n    fn needs_validation(&self) -> bool {\n        !self.0.is_empty()\n    }\n}\n\nimpl Display for ConsensusValue {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0))\n    }\n}\n\nconst TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 12);\nconst TEST_MAX_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 19);\nconst TEST_END_HEIGHT: u64 = 100000;\npub(crate) const TEST_INSTANCE_ID: u64 = 42;\npub(crate) const TEST_ENDORSEMENT_EVIDENCE_LIMIT: u64 = 20;\n\n#[derive(Clone, Eq, PartialEq, Hash)]\nenum HighwayMessage {\n    Timer(Timestamp),\n    NewVertex(Box<Vertex<TestContext>>),\n    RequestBlock(BlockContext<TestContext>),\n    WeAreFaulty(Box<Fault<TestContext>>),\n}\n\nimpl Debug for HighwayMessage {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            HighwayMessage::Timer(t) => f.debug_tuple(\"Timer\").field(&t.millis()).finish(),\n            HighwayMessage::RequestBlock(bc) => f\n                .debug_struct(\"RequestBlock\")\n                .field(\"timestamp\", &bc.timestamp().millis())\n                .finish(),\n            HighwayMessage::NewVertex(v) => {\n                f.debug_struct(\"NewVertex\").field(\"vertex\", &v).finish()\n            }\n            HighwayMessage::WeAreFaulty(ft) => f.debug_tuple(\"WeAreFaulty\").field(&ft).finish(),\n        }\n    }\n}\n\nimpl HighwayMessage {\n    fn into_targeted(self, creator: ValidatorId) -> TargetedMessage<HighwayMessage> {\n        let create_msg = |hwm: HighwayMessage| Message::new(creator, hwm);\n\n        match self {\n            HighwayMessage::NewVertex(_) => {\n                TargetedMessage::new(create_msg(self), Target::AllExcept(creator))\n            }\n            HighwayMessage::Timer(_)\n            | HighwayMessage::RequestBlock(_)\n            | HighwayMessage::WeAreFaulty(_) => {\n                TargetedMessage::new(create_msg(self), Target::SingleValidator(creator))\n            }\n        }\n    }\n\n    fn is_new_unit(&self) -> bool {\n        if let HighwayMessage::NewVertex(vertex) = self {\n            matches!(**vertex, Vertex::Unit(_))\n        } else {\n            false\n        }\n    }\n}\n\nimpl From<Effect<TestContext>> for HighwayMessage {\n    fn from(eff: Effect<TestContext>) -> Self {\n        match eff {\n            // The effect is `ValidVertex` but we want to gossip it to other\n            // validators so for them it's just `Vertex` that needs to be validated.\n            Effect::NewVertex(ValidVertex(v)) => HighwayMessage::NewVertex(Box::new(v)),\n            Effect::ScheduleTimer(t) => HighwayMessage::Timer(t),\n            Effect::RequestNewBlock(block_context, _expiry) => {\n                HighwayMessage::RequestBlock(block_context)\n            }\n            Effect::WeAreFaulty(fault) => HighwayMessage::WeAreFaulty(Box::new(fault)),\n        }\n    }\n}\n\nimpl PartialOrd for HighwayMessage {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for HighwayMessage {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        let mut hasher0 = DefaultHasher::new();\n        let mut hasher1 = DefaultHasher::new();\n        self.hash(&mut hasher0);\n        other.hash(&mut hasher1);\n        hasher0.finish().cmp(&hasher1.finish())\n    }\n}\n\n#[derive(Debug, Eq, PartialEq)]\npub(crate) enum TestRunError {\n    /// VirtualNet was missing a validator when it was expected to exist.\n    MissingValidator(ValidatorId),\n    /// Sender sent a vertex for which it didn't have all dependencies.\n    SenderMissingDependency(ValidatorId, Dependency<TestContext>),\n    /// No more messages in the message queue.\n    NoMessages,\n}\n\nimpl Display for TestRunError {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            TestRunError::NoMessages => write!(\n                f,\n                \"Test finished prematurely due to lack of messages in the queue\"\n            ),\n            TestRunError::SenderMissingDependency(validator_id, dependency) => write!(\n                f,\n                \"{:?} was missing a dependency {:?} of a vertex it created.\",\n                validator_id, dependency\n            ),\n            TestRunError::MissingValidator(id) => {\n                write!(f, \"Virtual net is missing validator {:?}.\", id)\n            }\n        }\n    }\n}\n\nenum Distribution {\n    Uniform,\n    // TODO: Poisson(f64)\n}\n\nimpl Distribution {\n    /// Returns vector of `count` elements of random values between `lower` and `upper`.\n    fn gen_range_vec(&self, rng: &mut NodeRng, lower: u64, upper: u64, count: u8) -> Vec<u64> {\n        match self {\n            Distribution::Uniform => (0..count).map(|_| rng.gen_range(lower..upper)).collect(),\n        }\n    }\n}\n\ntrait DeliveryStrategy {\n    fn gen_delay(\n        &mut self,\n        rng: &mut NodeRng,\n        message: &HighwayMessage,\n        distribution: &Distribution,\n        base_delivery_timestamp: Timestamp,\n    ) -> DeliverySchedule;\n}\n\nstruct HighwayValidator {\n    highway: Highway<TestContext>,\n    finality_detector: FinalityDetector<TestContext>,\n    fault: Option<DesFault>,\n}\n\nimpl HighwayValidator {\n    fn new(\n        highway: Highway<TestContext>,\n        finality_detector: FinalityDetector<TestContext>,\n        fault: Option<DesFault>,\n    ) -> Self {\n        HighwayValidator {\n            highway,\n            finality_detector,\n            fault,\n        }\n    }\n\n    fn highway_mut(&mut self) -> &mut Highway<TestContext> {\n        &mut self.highway\n    }\n\n    fn highway(&self) -> &Highway<TestContext> {\n        &self.highway\n    }\n\n    fn run_finality(&mut self) -> Result<Vec<FinalizedBlock<TestContext>>, FttExceeded> {\n        Ok(self.finality_detector.run(&self.highway)?.collect())\n    }\n\n    fn post_hook(&mut self, delivery_time: Timestamp, msg: HighwayMessage) -> Vec<HighwayMessage> {\n        match self.fault.as_ref() {\n            Some(DesFault::TemporarilyMute { from, till })\n                if *from <= delivery_time && delivery_time <= *till =>\n            {\n                // For mute validators we add it to the state but not gossip, if the delivery time\n                // is in the interval in which they are muted.\n                match msg {\n                    HighwayMessage::NewVertex(_) => {\n                        warn!(\"Validator is mute – won't gossip vertices in response\");\n                        vec![]\n                    }\n                    HighwayMessage::Timer(_) | HighwayMessage::RequestBlock(_) => vec![msg],\n                    HighwayMessage::WeAreFaulty(ev) => {\n                        panic!(\"validator equivocated unexpectedly: {:?}\", ev);\n                    }\n                }\n            }\n            Some(DesFault::PermanentlyMute) => {\n                // For mute validators we add it to the state but not gossip.\n                match msg {\n                    HighwayMessage::NewVertex(_) => {\n                        warn!(\"Validator is mute – won't gossip vertices in response\");\n                        vec![]\n                    }\n                    HighwayMessage::Timer(_) | HighwayMessage::RequestBlock(_) => vec![msg],\n                    HighwayMessage::WeAreFaulty(ev) => {\n                        panic!(\"validator equivocated unexpectedly: {:?}\", ev);\n                    }\n                }\n            }\n            None | Some(DesFault::TemporarilyMute { .. }) => {\n                // Honest validator.\n                match &msg {\n                    HighwayMessage::NewVertex(_)\n                    | HighwayMessage::Timer(_)\n                    | HighwayMessage::RequestBlock(_) => vec![msg],\n                    HighwayMessage::WeAreFaulty(ev) => {\n                        panic!(\"validator equivocated unexpectedly: {:?}\", ev);\n                    }\n                }\n            }\n            Some(DesFault::Equivocate) => {\n                match msg {\n                    HighwayMessage::NewVertex(ref vertex) => {\n                        match **vertex {\n                            Vertex::Unit(ref swunit) => {\n                                // Create an equivocating message, with a different timestamp.\n                                // TODO: Don't send both messages to every peer. Add different\n                                // strategies.\n                                let mut wunit2 = swunit.wire_unit().clone();\n                                match wunit2.value.as_mut() {\n                                    None => wunit2.timestamp += TimeDiff::from_millis(1),\n                                    Some(v) => v.0.push(0),\n                                }\n                                let secret = TestSecret(wunit2.creator.0.into());\n                                let hwunit2 = wunit2.into_hashed();\n                                let swunit2 = SignedWireUnit::new(hwunit2, &secret);\n                                let vertex2 = Box::new(Vertex::Unit(swunit2));\n                                vec![msg, HighwayMessage::NewVertex(vertex2)]\n                            }\n                            _ => vec![msg],\n                        }\n                    }\n                    HighwayMessage::RequestBlock(_)\n                    | HighwayMessage::WeAreFaulty(_)\n                    | HighwayMessage::Timer(_) => vec![msg],\n                }\n            }\n        }\n    }\n}\n\ntype HighwayNode = Node<ConsensusValue, HighwayMessage, HighwayValidator>;\n\ntype HighwayNet = VirtualNet<ConsensusValue, HighwayMessage, HighwayValidator>;\n\nimpl HighwayNode {\n    fn unit_count(&self) -> usize {\n        self.validator().highway.state().unit_count()\n    }\n}\n\nstruct HighwayTestHarness<DS>\nwhere\n    DS: DeliveryStrategy,\n{\n    virtual_net: HighwayNet,\n    /// Consensus values to be proposed.\n    /// Order of values in the vector defines the order in which they will be proposed.\n    consensus_values: VecDeque<ConsensusValue>,\n    /// A strategy to pseudo randomly change the message delivery times.\n    delivery_time_strategy: DS,\n    /// Distribution of delivery times.\n    delivery_time_distribution: Distribution,\n}\n\ntype TestResult<T> = Result<T, TestRunError>;\n\n// Outer `Err` (from `TestResult`) represents an unexpected error in test framework, global error.\n// Inner `Result` is a local result, its error is also local.\ntype TestRunResult<T> = TestResult<Result<T, (Vertex<TestContext>, VertexError)>>;\n\nimpl<DS> HighwayTestHarness<DS>\nwhere\n    DS: DeliveryStrategy,\n{\n    /// Advance the test by one message.\n    ///\n    /// Pops one message from the message queue (if there are any)\n    /// and pass it to the recipient validator for execution.\n    /// Messages returned from the execution are scheduled for later delivery.\n    pub(crate) fn crank(&mut self, rng: &mut NodeRng) -> TestResult<()> {\n        let QueueEntry {\n            delivery_time,\n            recipient,\n            message,\n        } = self\n            .virtual_net\n            .pop_message()\n            .ok_or(TestRunError::NoMessages)?;\n\n        let span = tracing::trace_span!(\"crank\", validator = %recipient);\n        let _enter = span.enter();\n        trace!(\n            \"Processing: tick {}, sender validator={}, payload {:?}\",\n            delivery_time,\n            message.sender,\n            message.payload(),\n        );\n\n        let messages = self.process_message(rng, recipient, message, delivery_time)?;\n\n        let targeted_messages = messages\n            .into_iter()\n            .filter_map(|hwm| {\n                let delivery = self.delivery_time_strategy.gen_delay(\n                    rng,\n                    &hwm,\n                    &self.delivery_time_distribution,\n                    delivery_time,\n                );\n                match delivery {\n                    DeliverySchedule::Drop => {\n                        trace!(\"{:?} message is dropped.\", hwm);\n                        None\n                    }\n                    DeliverySchedule::AtInstant(timestamp) => {\n                        trace!(\"{:?} scheduled for {:?}\", hwm, timestamp);\n                        let targeted = hwm.into_targeted(recipient);\n                        Some((targeted, timestamp))\n                    }\n                }\n            })\n            .collect();\n\n        self.virtual_net.dispatch_messages(targeted_messages);\n        Ok(())\n    }\n\n    fn next_consensus_value(&mut self, height: u64) -> ConsensusValue {\n        self.consensus_values\n            .get(height as usize)\n            .cloned()\n            .unwrap_or_default()\n    }\n\n    /// Helper for getting validator from the underlying virtual net.\n    fn node_mut(&mut self, validator_id: &ValidatorId) -> TestResult<&mut HighwayNode> {\n        self.virtual_net\n            .node_mut(validator_id)\n            .ok_or(TestRunError::MissingValidator(*validator_id))\n    }\n\n    fn call_validator<F>(\n        &mut self,\n        delivery_time: Timestamp,\n        validator_id: &ValidatorId,\n        f: F,\n    ) -> TestResult<Vec<HighwayMessage>>\n    where\n        F: FnOnce(&mut HighwayValidator) -> Vec<Effect<TestContext>>,\n    {\n        let validator_node = self.node_mut(validator_id)?;\n        let res = f(validator_node.validator_mut());\n        let messages = res\n            .into_iter()\n            .flat_map(|eff| {\n                validator_node\n                    .validator_mut()\n                    .post_hook(delivery_time, HighwayMessage::from(eff))\n            })\n            .collect();\n        Ok(messages)\n    }\n\n    /// Processes a message sent to `validator_id`.\n    /// Returns a vector of messages produced by the `validator` in reaction to processing a\n    /// message.\n    fn process_message(\n        &mut self,\n        rng: &mut NodeRng,\n        validator_id: ValidatorId,\n        message: Message<HighwayMessage>,\n        delivery_time: Timestamp,\n    ) -> TestResult<Vec<HighwayMessage>> {\n        self.node_mut(&validator_id)?\n            .push_messages_received(vec![message.clone()]);\n\n        let messages = {\n            let sender_id = message.sender;\n\n            let hwm = message.payload().clone();\n\n            match hwm {\n                HighwayMessage::Timer(timestamp) => {\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus.highway_mut().handle_timer(timestamp)\n                    })?\n                }\n                HighwayMessage::NewVertex(v) => {\n                    match self.add_vertex(\n                        rng,\n                        validator_id,\n                        sender_id,\n                        *v.clone(),\n                        delivery_time,\n                    )? {\n                        Ok(msgs) => {\n                            trace!(\"{:?} successfully added to the state.\", v);\n                            msgs\n                        }\n                        Err((v, error)) => {\n                            // TODO: this seems to get output from passing tests\n                            warn!(\n                                \"{:?} sent an invalid vertex {:?} to {:?} \\\n                                that resulted in {:?} error\",\n                                sender_id, v, validator_id, error\n                            );\n                            vec![]\n                        }\n                    }\n                }\n                HighwayMessage::RequestBlock(block_context) => {\n                    let consensus_value = self.next_consensus_value(block_context.height());\n\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus\n                            .highway_mut()\n                            .propose(consensus_value, block_context)\n                    })?\n                }\n                HighwayMessage::WeAreFaulty(_evidence) => vec![],\n            }\n        };\n\n        let recipient = self.node_mut(&validator_id)?;\n        recipient.push_messages_produced(messages.clone());\n\n        self.run_finality_detector(&validator_id)?;\n\n        Ok(messages)\n    }\n\n    /// Runs finality detector.\n    fn run_finality_detector(&mut self, validator_id: &ValidatorId) -> TestResult<()> {\n        let recipient = self.node_mut(validator_id)?;\n\n        let finalized_values = recipient\n            .validator_mut()\n            .run_finality()\n            // TODO: https://casperlabs.atlassian.net/browse/HWY-119\n            .expect(\"FTT exceeded but not handled\");\n        for FinalizedBlock {\n            value,\n            timestamp: _,\n            relative_height,\n            terminal_block_data,\n            equivocators: _,\n            proposer: _,\n        } in finalized_values\n        {\n            trace!(\n                \"{}consensus value finalized: {:?}, height: {:?}\",\n                if terminal_block_data.is_some() {\n                    \"last \"\n                } else {\n                    \"\"\n                },\n                value,\n                relative_height,\n            );\n\n            recipient.push_finalized(value);\n        }\n\n        Ok(())\n    }\n\n    // Adds vertex to the `recipient` validator state.\n    // Synchronizes its state if necessary.\n    // From the POV of the test system, synchronization is immediate.\n    fn add_vertex(\n        &mut self,\n        rng: &mut NodeRng,\n        recipient: ValidatorId,\n        sender: ValidatorId,\n        vertex: Vertex<TestContext>,\n        delivery_time: Timestamp,\n    ) -> TestRunResult<Vec<HighwayMessage>> {\n        // 1. pre_validate_vertex\n        // 2. missing_dependency\n        // 3. validate_vertex\n        // 4. add_valid_vertex\n\n        let sync_result = {\n            let validator = self.node_mut(&recipient)?;\n\n            match validator\n                .validator_mut()\n                .highway_mut()\n                .pre_validate_vertex(vertex)\n            {\n                Err((v, error)) => Ok(Err((v, error))),\n                Ok(pvv) => self.synchronize_validator(rng, recipient, sender, pvv, delivery_time),\n            }\n        }?;\n\n        match sync_result {\n            Err(vertex_error) => Ok(Err(vertex_error)),\n            Ok((prevalidated_vertex, mut sync_effects)) => {\n                let add_vertex_effects: Vec<HighwayMessage> = {\n                    match self\n                        .node_mut(&recipient)?\n                        .validator_mut()\n                        .highway_mut()\n                        .validate_vertex(prevalidated_vertex)\n                    {\n                        Err((pvv, error)) => return Ok(Err((pvv.into_vertex(), error))),\n                        Ok(valid_vertex) => {\n                            self.call_validator(delivery_time, &recipient, |v| {\n                                v.highway_mut()\n                                    .add_valid_vertex(valid_vertex, delivery_time)\n                            })?\n                        }\n                    }\n                };\n\n                sync_effects.extend(add_vertex_effects);\n\n                Ok(Ok(sync_effects))\n            }\n        }\n    }\n\n    /// Synchronizes all missing dependencies of `pvv` that `recipient` is missing.\n    /// If an error occurs during synchronization of one of `pvv`'s dependencies\n    /// it's returned and the original vertex mustn't be added to the state.\n    fn synchronize_validator(\n        &mut self,\n        rng: &mut NodeRng,\n        recipient: ValidatorId,\n        sender: ValidatorId,\n        pvv: PreValidatedVertex<TestContext>,\n        delivery_time: Timestamp,\n    ) -> TestRunResult<(PreValidatedVertex<TestContext>, Vec<HighwayMessage>)> {\n        // There may be more than one dependency missing and we want to sync all of them.\n        loop {\n            let validator = self\n                .virtual_net\n                .validator(&recipient)\n                .ok_or(TestRunError::MissingValidator(recipient))?\n                .validator();\n\n            let mut messages = vec![];\n\n            match validator.highway().missing_dependency(&pvv) {\n                None => return Ok(Ok((pvv, messages))),\n                Some(d) => {\n                    match self.synchronize_dependency(rng, d, recipient, sender, delivery_time)? {\n                        Ok(sync_messages) => {\n                            // `hwm` represent messages produced while synchronizing `d`.\n                            messages.extend(sync_messages)\n                        }\n                        Err(vertex_error) => {\n                            // An error occurred when trying to synchronize a missing dependency.\n                            // We must stop the synchronization process and return it to the caller.\n                            return Ok(Err(vertex_error));\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    // Synchronizes `validator` in case of missing dependencies.\n    //\n    // If validator has missing dependencies then we have to add them first.\n    // We don't want to test synchronization, and the Highway theory assumes\n    // that when units are added then all their dependencies are satisfied.\n    fn synchronize_dependency(\n        &mut self,\n        rng: &mut NodeRng,\n        missing_dependency: Dependency<TestContext>,\n        recipient: ValidatorId,\n        sender: ValidatorId,\n        delivery_time: Timestamp,\n    ) -> TestRunResult<Vec<HighwayMessage>> {\n        match self\n            .node_mut(&sender)?\n            .validator_mut()\n            .highway()\n            .get_dependency(&missing_dependency)\n        {\n            GetDepOutcome::Vertex(vv) => {\n                self.add_vertex(rng, recipient, sender, vv.0, delivery_time)\n            }\n            GetDepOutcome::Evidence(_) | GetDepOutcome::None => Err(\n                TestRunError::SenderMissingDependency(sender, missing_dependency),\n            ),\n        }\n    }\n\n    /// Returns a `MutableHandle` on the `HighwayTestHarness` object\n    /// that allows for manipulating internal state of the test state.\n    fn mutable_handle(&mut self) -> MutableHandle<DS> {\n        MutableHandle(self)\n    }\n}\n\nfn crank_until<F, DS: DeliveryStrategy>(\n    hth: &mut HighwayTestHarness<DS>,\n    rng: &mut NodeRng,\n    f: F,\n) -> TestResult<()>\nwhere\n    F: Fn(&HighwayTestHarness<DS>) -> bool,\n{\n    while !f(hth) {\n        hth.crank(rng)?;\n    }\n    Ok(())\n}\n\nfn crank_until_finalized<DS: DeliveryStrategy>(\n    hth: &mut HighwayTestHarness<DS>,\n    rng: &mut NodeRng,\n    cv_count: usize,\n) -> TestResult<()> {\n    crank_until(hth, rng, |hth| {\n        let has_all_finalized = |v: &HighwayNode| v.finalized_count() == cv_count;\n        hth.virtual_net.validators().all(has_all_finalized)\n    })\n}\n\nfn crank_until_time<DS: DeliveryStrategy>(\n    hth: &mut HighwayTestHarness<DS>,\n    rng: &mut NodeRng,\n    timestamp: Timestamp,\n) -> TestResult<()> {\n    crank_until(hth, rng, |hth| {\n        hth.virtual_net\n            .peek_message()\n            .is_none_or(|qe| qe.delivery_time > timestamp)\n    })\n}\n\nstruct MutableHandle<'a, DS: DeliveryStrategy>(&'a mut HighwayTestHarness<DS>);\n\nimpl<DS: DeliveryStrategy> MutableHandle<'_, DS> {\n    /// Drops all messages from the queue.\n    fn clear_message_queue(&mut self) {\n        self.0.virtual_net.empty_queue();\n    }\n\n    fn validators(&self) -> impl Iterator<Item = &HighwayNode> {\n        self.0.virtual_net.validators()\n    }\n\n    fn correct_validators(&self) -> impl Iterator<Item = &HighwayNode> {\n        self.0\n            .virtual_net\n            .validators()\n            .filter(|v| v.validator().fault.is_none())\n    }\n}\n\nfn test_params() -> Params {\n    Params::new(\n        0, // random seed\n        TEST_MIN_ROUND_LEN,\n        TEST_MAX_ROUND_LEN,\n        TEST_MIN_ROUND_LEN,\n        TEST_END_HEIGHT,\n        Timestamp::zero(),\n        Timestamp::zero(), // Length depends only on block number.\n        TEST_ENDORSEMENT_EVIDENCE_LIMIT,\n    )\n}\n\n#[derive(Debug)]\nenum BuilderError {\n    WeightLimits,\n}\n\nstruct HighwayTestHarnessBuilder<DS: DeliveryStrategy> {\n    /// Maximum number of faulty validators in the network.\n    /// Defaults to 10.\n    max_faulty_validators: u8,\n    /// Percentage of faulty validators' (i.e. equivocators) weight.\n    /// Defaults to 0 (network is perfectly secure).\n    faulty_percent: u64,\n    fault_type: Option<DesFault>,\n    /// FTT value for the finality detector.\n    /// If not given, defaults to 1/3 of total validators' weight.\n    ftt: Option<u64>,\n    /// Number of consensus values to be proposed by the nodes in the network.\n    /// Those will be generated by the test framework.\n    /// Defaults to 10.\n    consensus_values_count: u8,\n    /// Distribution of message delivery (delaying, dropping) delays..\n    delivery_distribution: Distribution,\n    delivery_strategy: DS,\n    /// Upper and lower limits for validators' weights.\n    weight_limits: (u64, u64),\n    /// Time when the test era starts at.\n    /// Defaults to 0.\n    start_time: Timestamp,\n    /// Type of discrete distribution of validators' weights.\n    /// Defaults to uniform.\n    weight_distribution: Distribution,\n    /// Highway parameters.\n    params: Params,\n}\n\n// Default strategy for message delivery.\nstruct InstantDeliveryNoDropping;\n\nimpl DeliveryStrategy for InstantDeliveryNoDropping {\n    fn gen_delay(\n        &mut self,\n        _rng: &mut NodeRng,\n        message: &HighwayMessage,\n        _distribution: &Distribution,\n        base_delivery_timestamp: Timestamp,\n    ) -> DeliverySchedule {\n        match message {\n            HighwayMessage::RequestBlock(bc) => DeliverySchedule::AtInstant(bc.timestamp()),\n            HighwayMessage::Timer(t) => DeliverySchedule::AtInstant(*t),\n            HighwayMessage::NewVertex(_) => {\n                DeliverySchedule::AtInstant(base_delivery_timestamp + TimeDiff::from_millis(1))\n            }\n            HighwayMessage::WeAreFaulty(_) => {\n                DeliverySchedule::AtInstant(base_delivery_timestamp + TimeDiff::from_millis(1))\n            }\n        }\n    }\n}\n\nimpl HighwayTestHarnessBuilder<InstantDeliveryNoDropping> {\n    fn new() -> Self {\n        HighwayTestHarnessBuilder {\n            max_faulty_validators: 10,\n            faulty_percent: 0,\n            fault_type: None,\n            ftt: None,\n            consensus_values_count: 10,\n            delivery_distribution: Distribution::Uniform,\n            delivery_strategy: InstantDeliveryNoDropping,\n            weight_limits: (1, 100),\n            start_time: Timestamp::zero(),\n            weight_distribution: Distribution::Uniform,\n            params: test_params(),\n        }\n    }\n}\n\nimpl<DS: DeliveryStrategy> HighwayTestHarnessBuilder<DS> {\n    /// Sets a percentage of weight that will be assigned to malicious nodes.\n    /// `faulty_weight` must be a value between 0 (inclusive) and 33 (inclusive).\n    pub(crate) fn faulty_weight_perc(mut self, faulty_weight: u64) -> Self {\n        self.faulty_percent = faulty_weight;\n        self\n    }\n\n    fn fault_type(mut self, fault_type: DesFault) -> Self {\n        self.fault_type = Some(fault_type);\n        self\n    }\n\n    pub(crate) fn consensus_values_count(mut self, count: u8) -> Self {\n        assert!(count > 0);\n        self.consensus_values_count = count;\n        self\n    }\n\n    pub(crate) fn weight_limits(mut self, lower: u64, upper: u64) -> Self {\n        assert!(\n            lower >= 100,\n            \"Lower limit has to be higher than 100 to avoid rounding problems.\"\n        );\n        self.weight_limits = (lower, upper);\n        self\n    }\n\n    fn max_faulty_validators(mut self, max_faulty_count: u8) -> Self {\n        self.max_faulty_validators = max_faulty_count;\n        self\n    }\n\n    fn params(mut self, params: Params) -> Self {\n        self.params = params;\n        self\n    }\n\n    fn build(self, rng: &mut NodeRng) -> Result<HighwayTestHarness<DS>, BuilderError> {\n        let consensus_values = (0..self.consensus_values_count)\n            .map(|el| ConsensusValue(vec![el]))\n            .collect::<VecDeque<ConsensusValue>>();\n\n        let instance_id = 0;\n        let start_time = self.start_time;\n\n        let (lower, upper) = {\n            let (l, u) = self.weight_limits;\n            if l >= u {\n                return Err(BuilderError::WeightLimits);\n            }\n            (l, u)\n        };\n\n        let (faulty_weights, honest_weights): (Vec<Weight>, Vec<Weight>) = {\n            if self.faulty_percent == 0 {\n                // All validators are honest.\n                let validators_num = rng.gen_range(2..self.max_faulty_validators + 1);\n                let honest_validators: Vec<Weight> = self\n                    .weight_distribution\n                    .gen_range_vec(rng, lower, upper, validators_num)\n                    .into_iter()\n                    .map(Weight)\n                    .collect();\n\n                (vec![], honest_validators)\n            } else {\n                // At least 2 validators total and at least one faulty.\n                let faulty_num = rng.gen_range(1..self.max_faulty_validators + 1);\n\n                // Randomly (but within chosen range) assign weights to faulty nodes.\n                let faulty_weights = self\n                    .weight_distribution\n                    .gen_range_vec(rng, lower, upper, faulty_num);\n\n                // Assign enough weights to honest nodes so that we reach expected\n                // `faulty_percentage` ratio.\n                let honest_weights = {\n                    let faulty_sum = faulty_weights.iter().sum::<u64>();\n                    let mut weights_to_distribute: u64 =\n                        (faulty_sum * 100).div_ceil(self.faulty_percent) - faulty_sum;\n                    let mut weights = vec![];\n                    while weights_to_distribute > 0 {\n                        let weight = if weights_to_distribute < upper {\n                            weights_to_distribute\n                        } else {\n                            rng.gen_range(lower..upper)\n                        };\n                        weights.push(weight);\n                        weights_to_distribute -= weight\n                    }\n                    weights\n                };\n\n                (\n                    faulty_weights.into_iter().map(Weight).collect(),\n                    honest_weights.into_iter().map(Weight).collect(),\n                )\n            }\n        };\n\n        let weights_sum = faulty_weights\n            .iter()\n            .chain(honest_weights.iter())\n            .sum::<Weight>();\n\n        let validators: Validators<ValidatorId> = faulty_weights\n            .iter()\n            .chain(honest_weights.iter())\n            .enumerate()\n            .map(|(i, weight)| (ValidatorId(i as u64), *weight))\n            .collect();\n\n        trace!(\"Weights: {:?}\", validators.iter().collect::<Vec<_>>());\n\n        let mut secrets = validators\n            .iter()\n            .map(|validator| (*validator.id(), TestSecret(validator.id().0)))\n            .collect();\n\n        let ftt = self\n            .ftt\n            .map(|p| p * weights_sum.0 / 100)\n            .unwrap_or_else(|| (weights_sum.0 - 1) / 3);\n        let params = self.params;\n\n        // Local function creating an instance of `HighwayConsensus` for a single validator.\n        let highway_consensus =\n            |(vid, secrets): (ValidatorId, &mut HashMap<ValidatorId, TestSecret>)| {\n                let v_sec = secrets.remove(&vid).expect(\"Secret key should exist.\");\n\n                let mut highway =\n                    Highway::new(instance_id, validators.clone(), params.clone(), None);\n                let effects = highway.activate_validator(vid, v_sec, start_time, None, Weight(ftt));\n\n                let finality_detector = FinalityDetector::new(Weight(ftt));\n\n                (\n                    highway,\n                    finality_detector,\n                    effects.into_iter().map(HighwayMessage::from).collect_vec(),\n                )\n            };\n\n        let faulty_num = faulty_weights.len();\n\n        let (validators, init_messages) = {\n            let mut validators_loc = vec![];\n            let mut init_messages = vec![];\n\n            for validator in validators.iter() {\n                let vid = *validator.id();\n                let fault = if vid.0 < faulty_num as u64 {\n                    self.fault_type\n                } else {\n                    None\n                };\n                let (highway, finality_detector, msgs) = highway_consensus((vid, &mut secrets));\n                let highway_consensus = HighwayValidator::new(highway, finality_detector, fault);\n                let validator = Node::new(vid, highway_consensus);\n                let qm: Vec<QueueEntry<HighwayMessage>> = msgs\n                    .into_iter()\n                    .map(|hwm| {\n                        // These are messages crated on the start of the network.\n                        // They are sent from validator to himself.\n                        QueueEntry::new(start_time, vid, Message::new(vid, hwm))\n                    })\n                    .collect();\n                init_messages.extend(qm);\n                validators_loc.push(validator);\n            }\n\n            (validators_loc, init_messages)\n        };\n\n        let delivery_time_strategy = self.delivery_strategy;\n\n        let delivery_time_distribution = self.delivery_distribution;\n\n        let virtual_net = VirtualNet::new(validators, init_messages);\n\n        let hwth = HighwayTestHarness {\n            virtual_net,\n            consensus_values,\n            delivery_time_strategy,\n            delivery_time_distribution,\n        };\n\n        Ok(hwth)\n    }\n}\n\n#[derive(Clone, DataSize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub(crate) struct TestContext;\n\n#[derive(Clone, DataSize, Debug, Eq, PartialEq)]\npub(crate) struct TestSecret(pub(crate) u64);\n\n// Newtype wrapper for test signature.\n// Added so that we can use custom Debug impl.\n#[derive(Clone, DataSize, Copy, Hash, PartialOrd, Ord, Eq, PartialEq, Serialize, Deserialize)]\npub(crate) struct SignatureWrapper(u64);\n\nimpl Debug for SignatureWrapper {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0.to_le_bytes()))\n    }\n}\n\n// Newtype wrapper for test hash.\n// Added so that we can use custom Debug impl.\n#[derive(Clone, Copy, DataSize, Hash, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]\npub(crate) struct HashWrapper(u64);\n\nimpl Debug for HashWrapper {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0.to_le_bytes()))\n    }\n}\n\nimpl Display for HashWrapper {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Debug::fmt(self, f)\n    }\n}\n\nimpl ValidatorSecret for TestSecret {\n    type Hash = HashWrapper;\n    type Signature = SignatureWrapper;\n\n    fn sign(&self, data: &Self::Hash) -> Self::Signature {\n        SignatureWrapper(data.0 + self.0)\n    }\n}\n\nimpl Context for TestContext {\n    type ConsensusValue = ConsensusValue;\n    type ValidatorId = ValidatorId;\n    type ValidatorSecret = TestSecret;\n    type Signature = SignatureWrapper;\n    type Hash = HashWrapper;\n    type InstanceId = u64;\n\n    fn hash(data: &[u8]) -> Self::Hash {\n        let mut hasher = DefaultHasher::new();\n        hasher.write(data);\n        HashWrapper(hasher.finish())\n    }\n\n    fn verify_signature(\n        hash: &Self::Hash,\n        public_key: &Self::ValidatorId,\n        signature: &<Self::ValidatorSecret as ValidatorSecret>::Signature,\n    ) -> bool {\n        let computed_signature = hash.0 + public_key.0;\n        computed_signature == signature.0\n    }\n}\n\nmod test_harness {\n    use std::{collections::HashSet, fmt::Debug};\n\n    use itertools::Itertools;\n\n    use casper_types::Timestamp;\n\n    use super::{\n        crank_until, crank_until_finalized, crank_until_time, test_params, ConsensusValue,\n        HighwayTestHarness, HighwayTestHarnessBuilder, InstantDeliveryNoDropping, TestRunError,\n        TEST_MIN_ROUND_LEN,\n    };\n    use crate::{\n        components::consensus::tests::consensus_des_testing::{Fault as DesFault, ValidatorId},\n        logging,\n    };\n    use logging::{LoggingConfig, LoggingFormat};\n\n    #[test]\n    fn on_empty_queue_error() {\n        let mut rng = crate::new_rng();\n        let mut highway_test_harness: HighwayTestHarness<InstantDeliveryNoDropping> =\n            HighwayTestHarnessBuilder::new()\n                .consensus_values_count(1)\n                .weight_limits(100, 120)\n                .build(&mut rng)\n                .expect(\"Construction was successful\");\n\n        highway_test_harness.mutable_handle().clear_message_queue();\n\n        assert_eq!(\n            highway_test_harness.crank(&mut rng),\n            Err(TestRunError::NoMessages),\n            \"Expected the test run to stop.\"\n        );\n    }\n\n    // Test that all elements of the vector all equal.\n    fn assert_eq_vectors<I: Eq + Debug>(coll: Vec<I>, error_msg: &str) {\n        let mut iter = coll.into_iter();\n        let reference = iter.next().unwrap();\n\n        iter.for_each(|v| assert_eq!(v, reference, \"{}\", error_msg));\n    }\n\n    #[test]\n    fn liveness_test_no_faults() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10;\n\n        let mut highway_test_harness = HighwayTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 120)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        crank_until(&mut highway_test_harness, &mut rng, |hth| {\n            // Stop the test when each node finalized expected number of consensus values.\n            // Note that we're not testing the order of finalization here.\n            // It will be tested later – it's not the condition for stopping the test run.\n            hth.virtual_net\n                .validators()\n                .all(|v| v.finalized_count() == cv_count as usize)\n        })\n        .unwrap();\n\n        let handle = highway_test_harness.mutable_handle();\n        let validators = handle.validators();\n\n        let (finalized_values, units_produced): (Vec<Vec<ConsensusValue>>, Vec<usize>) = validators\n            .map(|v| {\n                (\n                    v.finalized_values().cloned().collect::<Vec<_>>(),\n                    v.messages_produced()\n                        .filter(|&hwm| hwm.is_new_unit())\n                        .cloned()\n                        .count(),\n                )\n            })\n            .unzip();\n\n        units_produced\n            .into_iter()\n            .enumerate()\n            .for_each(|(v_idx, units_count)| {\n                // NOTE: Works only when all validators are honest and correct (no \"mute\"\n                // validators). Validator produces two units per round. It may\n                // produce just one before lambda message is finalized. Add one in case it's just\n                // one round (one consensus value) – 1 message. 1/2=0 but 3/2=1 b/c of the rounding.\n                let rounds_participated_in = (units_count as u8 + 1) / 2;\n\n                assert_eq!(\n                    rounds_participated_in, cv_count,\n                    \"Expected that validator={} participated in {} rounds.\",\n                    v_idx, cv_count\n                )\n            });\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n    }\n\n    #[test]\n    fn liveness_test_some_mute() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10;\n        let fault_perc = 30;\n\n        let mut highway_test_harness = HighwayTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .faulty_weight_perc(fault_perc)\n            .fault_type(DesFault::PermanentlyMute)\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 120)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        crank_until(&mut highway_test_harness, &mut rng, |hth| {\n            // Stop the test when each node finalized expected number of consensus values.\n            // Note that we're not testing the order of finalization here.\n            // It will be tested later – it's not the condition for stopping the test run.\n            hth.virtual_net\n                .validators()\n                .all(|v| v.finalized_count() == cv_count as usize)\n        })\n        .unwrap();\n\n        let handle = highway_test_harness.mutable_handle();\n        let validators = handle.validators();\n\n        let finalized_values: Vec<Vec<ConsensusValue>> = validators\n            .map(|v| v.finalized_values().cloned().collect::<Vec<_>>())\n            .collect();\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n    }\n\n    #[test]\n    fn liveness_test_some_equivocate() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10;\n        let fault_perc = 10;\n\n        let mut highway_test_harness = HighwayTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .faulty_weight_perc(fault_perc)\n            .fault_type(DesFault::Equivocate)\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 150)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        crank_until(&mut highway_test_harness, &mut rng, |hth| {\n            // Stop the test when each node finalized expected number of consensus values.\n            // Note that we're not testing the order of finalization here.\n            // It will be tested later – it's not the condition for stopping the test run.\n            hth.virtual_net\n                .validators()\n                .all(|v| v.finalized_count() == cv_count as usize)\n        })\n        .unwrap();\n\n        let handle = highway_test_harness.mutable_handle();\n        let validators = handle.validators();\n\n        let (finalized_values, equivocators_seen): (\n            Vec<Vec<ConsensusValue>>,\n            Vec<HashSet<ValidatorId>>,\n        ) = validators\n            .map(|v| {\n                (\n                    v.finalized_values().cloned().collect::<Vec<_>>(),\n                    v.validator()\n                        .highway()\n                        .validators_with_evidence()\n                        .cloned()\n                        .collect::<HashSet<_>>(),\n                )\n            })\n            .unzip();\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n        assert_eq_vectors(\n            equivocators_seen,\n            \"Nodes saw different set of equivocators.\",\n        );\n    }\n\n    #[test]\n    fn pause_if_too_many_are_offline() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10u8;\n        let max_round_len = TEST_MIN_ROUND_LEN * 2;\n\n        let start_mute = Timestamp::zero() + max_round_len * 2;\n        let should_start_pause = start_mute + max_round_len * 4;\n        let stop_mute = should_start_pause + max_round_len * 3;\n\n        let params = test_params()\n            .with_max_round_len(max_round_len)\n            .with_end_height(cv_count as u64);\n        let mut test_harness = HighwayTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .faulty_weight_perc(40) // Too many mute validators to be live...\n            .fault_type(DesFault::TemporarilyMute {\n                from: start_mute,\n                till: stop_mute,\n            }) // ...but just temporarily mute.\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 120)\n            .params(params)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        // Three max-length rounds after 40% went silent, the honest validators should stop voting.\n        crank_until_time(&mut test_harness, &mut rng, should_start_pause).unwrap();\n\n        // They should all see the same number of finalized blocks.\n        let handle = test_harness.mutable_handle();\n        let first_validator = handle.correct_validators().next().unwrap();\n        let finalized_before_pause = first_validator.finalized_count();\n        let unit_count_before_pause = first_validator.unit_count();\n        assert_ne!(finalized_before_pause, 0);\n        assert!(finalized_before_pause < cv_count as usize);\n        for v in handle.correct_validators() {\n            assert_eq!(finalized_before_pause, v.finalized_count());\n            assert_eq!(unit_count_before_pause, v.unit_count());\n        }\n\n        // Much later, just before the missing 40% come back online...\n        crank_until_time(&mut test_harness, &mut rng, stop_mute).unwrap();\n\n        // ...there should still be no new unit yet.\n        for v in test_harness.mutable_handle().correct_validators() {\n            assert_eq!(finalized_before_pause, v.finalized_count());\n            assert_eq!(unit_count_before_pause, v.unit_count());\n        }\n\n        // After that, however, the network should resume...\n        crank_until_finalized(&mut test_harness, &mut rng, cv_count as usize).unwrap();\n\n        // ...and finalize the remaining blocks.\n        let finalized_values = test_harness\n            .mutable_handle()\n            .validators()\n            .map(|v| v.finalized_values().cloned().collect_vec())\n            .collect_vec();\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/block.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse super::State;\nuse crate::components::consensus::traits::Context;\n\n/// A block: Chains of blocks are the consensus values in the CBC Casper sense.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize)]\npub struct Block<C>\nwhere\n    C: Context,\n{\n    /// The total number of ancestors, i.e. the height in the blockchain.\n    pub height: u64,\n    /// The payload, e.g. a list of transactions.\n    pub value: C::ConsensusValue,\n    /// A skip list index of the block's ancestors.\n    ///\n    /// For every `p = 1 << i` that divides `height`, this contains an `i`-th entry pointing to the\n    /// ancestor with `height - p`.\n    pub skip_idx: Vec<C::Hash>,\n}\n\nimpl<C: Context> Block<C> {\n    /// Creates a new block with the given parent and values. Panics if parent does not exist.\n    pub(crate) fn new(\n        parent_hash: Option<C::Hash>,\n        value: C::ConsensusValue,\n        state: &State<C>,\n    ) -> Block<C> {\n        let (parent, mut skip_idx) = match parent_hash {\n            None => return Block::initial(value),\n            Some(hash) => (state.block(&hash), vec![hash]),\n        };\n        // In a trillion years, we need to make block height u128.\n        #[allow(clippy::arithmetic_side_effects)]\n        let height = parent.height + 1;\n        for i in 0..height.trailing_zeros() as usize {\n            let ancestor = state.block(&skip_idx[i]);\n            skip_idx.push(ancestor.skip_idx[i]);\n        }\n        Block {\n            height,\n            value,\n            skip_idx,\n        }\n    }\n\n    /// Returns the block's parent, or `None` if it has height 0.\n    pub fn parent(&self) -> Option<&C::Hash> {\n        self.skip_idx.first()\n    }\n\n    fn initial(value: C::ConsensusValue) -> Block<C> {\n        Block {\n            height: 0,\n            value,\n            skip_idx: vec![],\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/index_panorama.rs",
    "content": "use std::fmt::Debug;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::components::consensus::{\n    highway_core::state::{Observation, Panorama, State},\n    traits::Context,\n    utils::ValidatorMap,\n};\n\npub(crate) type IndexPanorama = ValidatorMap<IndexObservation>;\n\n/// The observed behavior of a validator at some point in time.\n#[derive(Clone, Copy, DataSize, Eq, PartialEq, Serialize, Deserialize, Hash)]\npub(crate) enum IndexObservation {\n    /// We have evidence that the validator is faulty.\n    Faulty,\n    /// The next sequence number we need, i.e. the lowest one that is missing from our protocol\n    /// state. This is equal to the total number of units we have from that validator, and one more\n    /// than the highest sequence number we have.\n    NextSeq(u64),\n}\n\nimpl Debug for IndexObservation {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            IndexObservation::Faulty => write!(f, \"F\"),\n            IndexObservation::NextSeq(next_seq) => write!(f, \"{:?}\", next_seq),\n        }\n    }\n}\n\nimpl IndexPanorama {\n    /// Creates an instance of `IndexPanorama` out of a panorama.\n    pub(crate) fn from_panorama<'a, C: Context>(\n        panorama: &'a Panorama<C>,\n        state: &'a State<C>,\n    ) -> Self {\n        let mut validator_map: ValidatorMap<IndexObservation> =\n            ValidatorMap::from(vec![IndexObservation::NextSeq(0); panorama.len()]);\n        for (vid, obs) in panorama.enumerate() {\n            let index_obs = match obs {\n                Observation::None => IndexObservation::NextSeq(0),\n                Observation::Correct(hash) => IndexObservation::NextSeq(\n                    state\n                        .maybe_unit(hash)\n                        .map_or(0, |unit| unit.seq_number.saturating_add(1)),\n                ),\n                Observation::Faulty => IndexObservation::Faulty,\n            };\n            validator_map[vid] = index_obs;\n        }\n        validator_map\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n    use super::IndexObservation;\n\n    impl LargestSpecimen for IndexObservation {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            // This is the largest variant since the other one is empty:\n            IndexObservation::NextSeq(LargestSpecimen::largest_specimen(estimator, cache))\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/panorama.rs",
    "content": "use std::{collections::HashSet, fmt::Debug};\n\nuse itertools::Itertools;\n\nuse casper_types::Timestamp;\n\nuse crate::components::consensus::{\n    highway_core::{\n        highway::Dependency,\n        state::{State, Unit, UnitError},\n    },\n    traits::Context,\n    utils::{ValidatorIndex, ValidatorMap},\n};\n\n#[allow(clippy::arithmetic_side_effects)]\nmod relaxed {\n    // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the\n    // module-wide `clippy::arithmetic_side_effects` lint.\n\n    use datasize::DataSize;\n    use serde::{Deserialize, Serialize};\n    use strum::EnumDiscriminants;\n\n    use crate::components::consensus::traits::Context;\n\n    /// The observed behavior of a validator at some point in time.\n    #[derive(Clone, DataSize, Eq, PartialEq, Serialize, Deserialize, Hash, EnumDiscriminants)]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub enum Observation<C>\n    where\n        C: Context,\n    {\n        /// No unit by that validator was observed yet.\n        None,\n        /// The validator's latest unit.\n        Correct(C::Hash),\n        /// The validator has been seen\n        Faulty,\n    }\n}\npub use relaxed::{Observation, ObservationDiscriminants};\n\nimpl<C: Context> Debug for Observation<C>\nwhere\n    C::Hash: Debug,\n{\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Observation::None => write!(f, \"N\"),\n            Observation::Faulty => write!(f, \"F\"),\n            Observation::Correct(hash) => write!(f, \"{:?}\", hash),\n        }\n    }\n}\n\nimpl<C: Context> Observation<C> {\n    /// Returns the unit hash, if this is a correct observation.\n    pub fn correct(&self) -> Option<&C::Hash> {\n        match self {\n            Self::None | Self::Faulty => None,\n            Self::Correct(hash) => Some(hash),\n        }\n    }\n\n    /// Returns `true` if this `Observation` is an `Observation::Correct`.\n    pub fn is_correct(&self) -> bool {\n        match self {\n            Self::None | Self::Faulty => false,\n            Self::Correct(_) => true,\n        }\n    }\n\n    /// Returns `true` if this `Observation` is an `Observation::Faulty`.\n    pub fn is_faulty(&self) -> bool {\n        match self {\n            Self::Faulty => true,\n            Self::None | Self::Correct(_) => false,\n        }\n    }\n\n    /// Returns `true` if this `Observation` is an `Observation::None`.\n    pub fn is_none(&self) -> bool {\n        match self {\n            Self::None => true,\n            Self::Faulty | Self::Correct(_) => false,\n        }\n    }\n\n    /// Returns whether `self` can come later in time than `other`.\n    fn geq(&self, state: &State<C>, other: &Observation<C>) -> bool {\n        match (self, other) {\n            (Observation::Faulty, _) | (_, Observation::None) => true,\n            (Observation::Correct(hash0), Observation::Correct(hash1)) => {\n                hash0 == hash1 || state.unit(hash0).panorama.sees_correct(state, hash1)\n            }\n            (_, _) => false,\n        }\n    }\n\n    /// Returns the missing dependency if `self` is referring to a vertex we don't know yet.\n    fn missing_dep(&self, state: &State<C>, idx: ValidatorIndex) -> Option<Dependency<C>> {\n        match self {\n            Observation::Faulty if !state.is_faulty(idx) => Some(Dependency::Evidence(idx)),\n            Observation::Correct(hash) if !state.has_unit(hash) => Some(Dependency::Unit(*hash)),\n            _ => None,\n        }\n    }\n}\n\n/// The observed behavior of all validators at some point in time.\npub type Panorama<C> = ValidatorMap<Observation<C>>;\n\nimpl<C: Context> Panorama<C> {\n    /// Creates a new, empty panorama.\n    pub(crate) fn new(num_validators: usize) -> Panorama<C> {\n        Panorama::from(vec![Observation::None; num_validators])\n    }\n\n    /// Returns `true` if there is at least one correct observation.\n    pub fn has_correct(&self) -> bool {\n        self.iter().any(Observation::is_correct)\n    }\n\n    /// Returns an iterator over all honest validators' latest units.\n    pub fn iter_correct<'a>(&'a self, state: &'a State<C>) -> impl Iterator<Item = &'a Unit<C>> {\n        let to_unit = move |vh: &C::Hash| state.unit(vh);\n        self.iter_correct_hashes().map(to_unit)\n    }\n\n    /// Returns an iterator over all honest validators' latest units' hashes.\n    pub fn iter_correct_hashes(&self) -> impl Iterator<Item = &C::Hash> {\n        self.iter().filter_map(Observation::correct)\n    }\n\n    /// Returns an iterator over all faulty validators' indices.\n    pub fn iter_faulty(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.enumerate()\n            .filter(|(_, obs)| obs.is_faulty())\n            .map(|(i, _)| i)\n    }\n\n    /// Returns an iterator over all faulty validators' indices.\n    pub fn iter_none(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.enumerate()\n            .filter(|(_, obs)| obs.is_none())\n            .map(|(i, _)| i)\n    }\n\n    /// Returns the correct sequence number for a new unit by `vidx` with this panorama.\n    pub(crate) fn next_seq_num(&self, state: &State<C>, vidx: ValidatorIndex) -> u64 {\n        // In a trillion years, we need to make seq number u128.\n        #[allow(clippy::arithmetic_side_effects)]\n        let add1 = |vh: &C::Hash| state.unit(vh).seq_number + 1;\n        self[vidx].correct().map_or(0, add1)\n    }\n\n    /// Returns `true` if `self` sees the creator of `hash` as correct, and sees that unit.\n    pub fn sees_correct(&self, state: &State<C>, hash: &C::Hash) -> bool {\n        let unit = state.unit(hash);\n        let can_see = |latest_hash: &C::Hash| {\n            Some(hash) == state.find_in_swimlane(latest_hash, unit.seq_number)\n        };\n        self.get(unit.creator)\n            .and_then(Observation::correct)\n            .is_some_and(can_see)\n    }\n\n    /// Returns `true` if `self` sees the unit with the specified `hash`.\n    pub fn sees(&self, state: &State<C>, hash_to_be_found: &C::Hash) -> bool {\n        let unit_to_be_found = state.unit(hash_to_be_found);\n        let mut visited = HashSet::new();\n        let mut to_visit: Vec<_> = self.iter_correct_hashes().collect();\n        while let Some(hash) = to_visit.pop() {\n            if visited.insert(hash) {\n                if hash == hash_to_be_found {\n                    return true;\n                }\n                let unit = state.unit(hash);\n                // If the creator is seen as faulty, we need to continue traversing the whole DAG.\n                // If it is correct, we only need to follow their own units.\n                match &unit.panorama[unit_to_be_found.creator] {\n                    Observation::Faulty => to_visit.extend(unit.panorama.iter_correct_hashes()),\n                    Observation::Correct(prev_hash) => to_visit.push(prev_hash),\n                    Observation::None => (),\n                }\n            }\n        }\n        false\n    }\n\n    /// Merges two panoramas into a new one.\n    pub(crate) fn merge(&self, state: &State<C>, other: &Panorama<C>) -> Panorama<C> {\n        let merge_obs = |observations: (&Observation<C>, &Observation<C>)| match observations {\n            (Observation::Faulty, _) | (_, Observation::Faulty) => Observation::Faulty,\n            (Observation::None, obs) | (obs, Observation::None) => obs.clone(),\n            (obs0, Observation::Correct(vh1)) if self.sees_correct(state, vh1) => obs0.clone(),\n            (Observation::Correct(vh0), obs1) if other.sees_correct(state, vh0) => obs1.clone(),\n            (Observation::Correct(_), Observation::Correct(_)) => Observation::Faulty,\n        };\n        let observations = self.iter().zip(other).map(merge_obs).collect_vec();\n        Panorama::from(observations)\n    }\n\n    /// Returns the panorama seeing all units seen by `self` with a timestamp no later than\n    /// `timestamp`. Accusations are preserved regardless of the evidence's timestamp.\n    pub fn cutoff(&self, state: &State<C>, timestamp: Timestamp) -> Panorama<C> {\n        let obs_cutoff = |obs: &Observation<C>| match obs {\n            Observation::Correct(vhash) => state\n                .swimlane(vhash)\n                .find(|(_, unit)| unit.timestamp <= timestamp)\n                .map(|(vh, _)| *vh)\n                .map_or(Observation::None, Observation::Correct),\n            obs @ (Observation::None | Observation::Faulty) => obs.clone(),\n        };\n        Panorama::from(self.iter().map(obs_cutoff).collect_vec())\n    }\n\n    /// Returns the first missing dependency, or `None` if all are satisfied.\n    pub(crate) fn missing_dependency(&self, state: &State<C>) -> Option<Dependency<C>> {\n        let missing_dep = |(idx, obs): (_, &Observation<C>)| obs.missing_dep(state, idx);\n        self.enumerate().filter_map(missing_dep).next()\n    }\n\n    /// Returns whether `self` can possibly come later in time than `other`, i.e. it can see\n    /// every honest message and every fault seen by `other`.\n    pub fn geq(&self, state: &State<C>, other: &Panorama<C>) -> bool {\n        let mut pairs_iter = self.iter().zip(other);\n        pairs_iter.all(|(obs_self, obs_other)| obs_self.geq(state, obs_other))\n    }\n\n    /// Returns `Ok(())` if `self` is valid, i.e. it contains the latest units of some substate.\n    ///\n    /// Panics if the unit has missing dependencies.\n    pub(super) fn validate(&self, state: &State<C>) -> Result<(), UnitError> {\n        for (idx, observation) in self.enumerate() {\n            if let Some(hash) = observation.correct() {\n                let unit = state.unit(hash);\n                if unit.creator != idx {\n                    return Err(UnitError::PanoramaIndex(unit.creator, idx));\n                }\n                if !self.geq(state, &unit.panorama) {\n                    return Err(UnitError::InconsistentPanorama(idx));\n                }\n            }\n        }\n        Ok(())\n    }\n}\n\nmod specimen_support {\n    use crate::{\n        components::consensus::ClContext,\n        utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator},\n    };\n\n    use super::{Observation, ObservationDiscriminants};\n\n    impl LargestSpecimen for Observation<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            if let Some(item) = cache.get::<Self>() {\n                return item.clone();\n            }\n\n            let correct = LargestSpecimen::largest_specimen(estimator, cache);\n            cache\n                .set(largest_variant(estimator, |variant| match variant {\n                    ObservationDiscriminants::None => Observation::None,\n                    ObservationDiscriminants::Correct => Observation::Correct(correct),\n                    ObservationDiscriminants::Faulty => Observation::Faulty,\n                }))\n                .clone()\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/params.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse super::{TimeDiff, Timestamp};\n\n/// Protocol parameters for Highway.\n#[derive(Debug, DataSize, Clone, Serialize, Deserialize)]\npub struct Params {\n    seed: u64,\n    min_round_len: TimeDiff,\n    max_round_len: TimeDiff,\n    init_round_len: TimeDiff,\n    end_height: u64,\n    start_timestamp: Timestamp,\n    end_timestamp: Timestamp,\n    endorsement_evidence_limit: u64,\n}\n\nimpl Params {\n    /// Creates a new set of Highway protocol parameters.\n    ///\n    /// Arguments:\n    ///\n    /// * `seed`: The random seed.\n    /// * `min_round_len`: The minimum round length.\n    /// * `max_round_len`: The maximum round length.\n    /// * `end_height`, `end_timestamp`: The last block will be the first one that has at least the\n    ///   specified height _and_ is no earlier than the specified timestamp. No children of this\n    ///   block can be proposed.\n    #[allow(clippy::too_many_arguments)] // FIXME\n    pub(crate) fn new(\n        seed: u64,\n        min_round_len: TimeDiff,\n        max_round_len: TimeDiff,\n        init_round_len: TimeDiff,\n        end_height: u64,\n        start_timestamp: Timestamp,\n        end_timestamp: Timestamp,\n        endorsement_evidence_limit: u64,\n    ) -> Params {\n        assert_ne!(min_round_len.millis(), 0); // Highway::new_boxed uses at least 1ms.\n        Params {\n            seed,\n            min_round_len,\n            max_round_len,\n            init_round_len,\n            end_height,\n            start_timestamp,\n            end_timestamp,\n            endorsement_evidence_limit,\n        }\n    }\n\n    /// Returns the random seed.\n    pub fn seed(&self) -> u64 {\n        self.seed\n    }\n\n    /// Returns the minimum round length. This is always greater than 0.\n    pub fn min_round_length(&self) -> TimeDiff {\n        self.min_round_len\n    }\n\n    /// Returns the maximum round length.\n    pub fn max_round_length(&self) -> TimeDiff {\n        self.max_round_len\n    }\n\n    /// Returns the initial round length.\n    pub fn init_round_len(&self) -> TimeDiff {\n        self.init_round_len\n    }\n\n    /// Returns the minimum height of the last block.\n    pub fn end_height(&self) -> u64 {\n        self.end_height\n    }\n\n    /// Returns the start timestamp of the era.\n    pub fn start_timestamp(&self) -> Timestamp {\n        self.start_timestamp\n    }\n\n    /// Returns the minimum timestamp of the last block.\n    pub fn end_timestamp(&self) -> Timestamp {\n        self.end_timestamp\n    }\n\n    /// Returns the maximum number of additional units included in evidence for conflicting\n    /// endorsements. If you endorse two conflicting forks at sequence numbers that differ by more\n    /// than this, you get away with it and are not marked faulty.\n    pub fn endorsement_evidence_limit(&self) -> u64 {\n        self.endorsement_evidence_limit\n    }\n}\n\n#[cfg(test)]\nimpl Params {\n    pub(crate) fn with_endorsement_evidence_limit(mut self, new_limit: u64) -> Params {\n        self.endorsement_evidence_limit = new_limit;\n        self\n    }\n\n    pub(crate) fn with_max_round_len(mut self, new_max_round_len: TimeDiff) -> Params {\n        self.max_round_len = new_max_round_len;\n        self\n    }\n\n    pub(crate) fn with_end_height(mut self, new_end_height: u64) -> Params {\n        self.end_height = new_end_height;\n        self\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/tallies.rs",
    "content": "use std::{\n    collections::BTreeMap,\n    iter::{self, Extend, FromIterator},\n    ops::Index,\n};\n\nuse crate::components::consensus::{highway_core::state::State, traits::Context, utils::Weight};\n\n/// A tally of votes at a specific height. This is never empty: It contains at least one vote.\n///\n/// It must always contain at most one vote from each validator. In particular, the sum of the\n/// weights must be at most the total of all validators' weights.\n#[derive(Clone)]\npub(crate) struct Tally<'a, C: Context> {\n    /// The block with the highest weight, and the highest hash if there's a tie.\n    max: (Weight, &'a C::Hash),\n    /// The total vote weight for each block.\n    votes: BTreeMap<&'a C::Hash, Weight>,\n}\n\nimpl<'a, C: Context> Extend<(&'a C::Hash, Weight)> for Tally<'a, C> {\n    fn extend<T: IntoIterator<Item = (&'a C::Hash, Weight)>>(&mut self, iter: T) {\n        for (bhash, w) in iter {\n            self.add(bhash, w);\n        }\n    }\n}\n\nimpl<'a, 'b, C: Context> IntoIterator for &'b Tally<'a, C> {\n    type Item = (&'a C::Hash, Weight);\n    type IntoIter = Box<dyn Iterator<Item = Self::Item> + 'b>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        Box::new(self.votes.iter().map(|(b, w)| (*b, *w)))\n    }\n}\n\nimpl<'a, C: Context> Tally<'a, C> {\n    /// Returns a new tally with a single entry.\n    fn new(bhash: &'a C::Hash, w: Weight) -> Self {\n        Tally {\n            max: (w, bhash),\n            votes: iter::once((bhash, w)).collect(),\n        }\n    }\n\n    /// Creates a tally from a list of units. Returns `None` if the iterator is empty.\n    fn try_from_iter<T: IntoIterator<Item = (&'a C::Hash, Weight)>>(iter: T) -> Option<Self> {\n        let mut iter = iter.into_iter();\n        let (bhash, w) = iter.next()?;\n        let mut tally = Tally::new(bhash, w);\n        tally.extend(iter);\n        Some(tally)\n    }\n\n    /// Returns a new tally with the same votes, but one level lower: a vote for a block counts as\n    /// a vote for that block's parent. Panics if called on level 0.\n    ///\n    /// This preserves the total weight, and the set of validators who contribute to that weight.\n    fn parents(&self, state: &'a State<C>) -> Self {\n        let to_parent = |(h, w): (&&'a C::Hash, &Weight)| (state.block(*h).parent().unwrap(), *w);\n        Self::try_from_iter(self.votes.iter().map(to_parent)).unwrap() // Tally is never empty.\n    }\n\n    /// Adds a vote for a block to the tally, possibly updating the current maximum.\n    fn add(&mut self, bhash: &'a C::Hash, weight: Weight) {\n        let w = self.votes.entry(bhash).or_default();\n        *w = (*w).saturating_add(weight);\n        self.max = (*w, bhash).max(self.max);\n    }\n\n    /// Returns the total weight of the votes included in this tally.\n    fn weight(&self) -> Weight {\n        self.votes.values().cloned().sum()\n    }\n\n    /// Returns the maximum voting weight a single block received.\n    fn max_w(&self) -> Weight {\n        self.max.0\n    }\n\n    /// Returns the block hash that received the most votes; the highest hash in case of a tie.\n    fn max_bhash(&self) -> &'a C::Hash {\n        self.max.1\n    }\n\n    /// Returns a tally containing only the votes for descendants of `bhash`.\n    ///\n    /// The total weight of the result is less than or equal to the total weight of `self`, and the\n    /// set of validators contributing to it is a subset of the ones contributing to `self`.\n    fn filter_descendants(\n        self,\n        height: u64,\n        bhash: &'a C::Hash,\n        state: &'a State<C>,\n    ) -> Option<Self> {\n        let iter = self.votes.into_iter();\n        Self::try_from_iter(\n            iter.filter(|&(b, _)| state.find_ancestor_proposal(b, height) == Some(bhash)),\n        )\n    }\n}\n\n/// A list of tallies by block height. The tally at each height contains only the units that point\n/// directly to a block at that height, not at a descendant.\n///\n/// Each validator must contribute their weight to at most one entry: The height of the block that\n/// they most recently voted for.\npub(crate) struct Tallies<'a, C: Context>(BTreeMap<u64, Tally<'a, C>>);\n\nimpl<C: Context> Default for Tallies<'_, C> {\n    fn default() -> Self {\n        Tallies(BTreeMap::new())\n    }\n}\n\nimpl<'a, C: Context> Index<u64> for Tallies<'a, C> {\n    type Output = Tally<'a, C>;\n\n    fn index(&self, index: u64) -> &Self::Output {\n        &self.0[&index]\n    }\n}\n\nimpl<'a, C: Context> FromIterator<(u64, &'a C::Hash, Weight)> for Tallies<'a, C> {\n    fn from_iter<T: IntoIterator<Item = (u64, &'a C::Hash, Weight)>>(iter: T) -> Self {\n        let mut tallies = Self::default();\n        for (height, bhash, weight) in iter {\n            tallies.add(height, bhash, weight);\n        }\n        tallies\n    }\n}\n\nimpl<'a, C: Context> Tallies<'a, C> {\n    /// Returns the height and hash of a block that is an ancestor of the fork choice, and _not_ an\n    /// ancestor of all entries in `self`. Returns `None` if `self` is empty.\n    pub(crate) fn find_decided(&self, state: &'a State<C>) -> Option<(u64, &'a C::Hash)> {\n        let max_height = *self.0.keys().next_back()?;\n        let total_weight: Weight = self.0.values().map(Tally::weight).sum();\n        // In the loop, this will be the tally of all votes from higher than the current height.\n        let mut prev_tally = self[max_height].clone();\n        // Start from `max_height - 1` and find the greatest height where a decision can be made.\n        for height in (0..max_height).rev() {\n            // The tally at `height` is the sum of the parents of `height + 1` and the units that\n            // point directly to blocks at `height`.\n            let mut h_tally = prev_tally.parents(state);\n            if let Some(tally) = self.0.get(&height) {\n                h_tally.extend(tally);\n            }\n            // If any block received more than 50%, a decision can be made: Either that block is\n            // the fork choice, or we can pick its highest scoring child from `prev_tally`.\n            #[allow(clippy::arithmetic_side_effects)]\n            if h_tally.max_w() > total_weight / 2 {\n                // height < max_height, so height < u64::MAX\n                return Some(\n                    match prev_tally.filter_descendants(height, h_tally.max_bhash(), state) {\n                        Some(filtered) => (height + 1, filtered.max_bhash()),\n                        None => (height, h_tally.max_bhash()),\n                    },\n                );\n            }\n            prev_tally = h_tally;\n        }\n        // Even at level 0 no block received a majority. Pick the one with the highest weight.\n        Some((0, prev_tally.max_bhash()))\n    }\n\n    /// Removes all votes for blocks that are not descendants of `bhash`.\n    pub(crate) fn filter_descendants(\n        self,\n        height: u64,\n        bhash: &'a C::Hash,\n        state: &'a State<C>,\n    ) -> Self {\n        // Each tally will be filtered to remove blocks incompatible with `bhash`.\n        let map_compatible = |(h, t): (u64, Tally<'a, C>)| {\n            t.filter_descendants(height, bhash, state).map(|t| (h, t))\n        };\n        // All tallies at `height` and lower can be removed, too.\n        let relevant_heights = self.0.into_iter().rev().take_while(|(h, _)| *h > height);\n        Tallies(relevant_heights.filter_map(map_compatible).collect())\n    }\n\n    /// Adds an entry to the tally at the specified `height`.\n    fn add(&mut self, height: u64, bhash: &'a C::Hash, weight: Weight) {\n        self.0\n            .entry(height)\n            .and_modify(|tally| tally.add(bhash, weight))\n            .or_insert_with(|| Tally::new(bhash, weight));\n    }\n\n    /// Returns `true` if there are no tallies in this map.\n    pub(crate) fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n}\n\n#[cfg(test)]\n#[allow(clippy::arithmetic_side_effects)]\nmod tests {\n    use super::{\n        super::{tests::*, State},\n        *,\n    };\n\n    impl Tallies<'_, TestContext> {\n        /// Returns the number of tallies.\n        pub(crate) fn len(&self) -> usize {\n            self.0.len()\n        }\n    }\n\n    #[test]\n    fn tallies() -> Result<(), AddUnitError<TestContext>> {\n        let mut state = State::new_test(WEIGHTS, 0);\n\n        // Create blocks with scores as follows:\n        //\n        //          a0: 7 — a1: 3\n        //        /       \\\n        // b0: 12           b2: 4\n        //        \\\n        //          c0: 5 — c1: 5\n        let b0 = add_unit!(state, BOB, 0xB0; N, N, N)?;\n        let c0 = add_unit!(state, CAROL, 0xC0; N, b0, N)?;\n        let c1 = add_unit!(state, CAROL, 0xC1; N, b0, c0)?;\n        let a0 = add_unit!(state, ALICE, 0xA0; N, b0, N)?;\n        let b1 = add_unit!(state, BOB, None; a0, b0, N)?; // Just a ballot; not shown above.\n        let a1 = add_unit!(state, ALICE, 0xA1; a0, b1, c1)?;\n        let b2 = add_unit!(state, BOB, 0xB2; a0, b1, N)?;\n\n        // These are the entries of a panorama seeing `a1`, `b2` and `c0`.\n        let vote_entries = vec![\n            (1, &c0, Weight(5)),\n            (2, &a1, Weight(3)),\n            (2, &b2, Weight(4)),\n        ];\n        let tallies: Tallies<TestContext> = vote_entries.into_iter().collect();\n        assert_eq!(2, tallies.len());\n        assert_eq!(Weight(5), tallies[1].weight()); // Carol's unit is on height 1.\n        assert_eq!(Weight(7), tallies[2].weight()); // Alice's and Bob's units are on height 2.\n\n        // Compute the tally at height 1: Take the parents of the blocks Alice and Bob vote for...\n        let mut h1_tally = tallies[2].parents(&state);\n        // (Their units have the same parent: `a0`.)\n        assert_eq!(1, h1_tally.votes.len());\n        assert_eq!(Weight(7), h1_tally.votes[&a0]);\n        // ...and adding Carol's vote.\n        h1_tally.extend(&tallies[1]);\n        assert_eq!(2, h1_tally.votes.len());\n        assert_eq!(Weight(5), h1_tally.votes[&c0]);\n\n        // `find_decided` finds the fork choice in one step: On height 1, `a0` has the majority. On\n        // height 2, the child of `a0` with the highest score is `b2`.\n        assert_eq!(Some((2, &b2)), tallies.find_decided(&state));\n\n        // But let's filter at level 1, and keep only the children of `a0`:\n        let tallies = tallies.filter_descendants(1, &a0, &state);\n        assert_eq!(1, tallies.len());\n        assert_eq!(2, tallies[2].votes.len());\n        assert_eq!(Weight(3), tallies[2].votes[&a1]);\n        assert_eq!(Weight(4), tallies[2].votes[&b2]);\n        Ok(())\n    }\n\n    #[test]\n    fn tally_try_from_iter() {\n        let tally: Option<Tally<TestContext>> = Tally::try_from_iter(vec![]);\n        assert!(tally.is_none());\n        let votes = vec![\n            (&10, Weight(2)),\n            (&20, Weight(3)),\n            (&10, Weight(4)),\n            (&30, Weight(5)),\n            (&20, Weight(6)),\n        ];\n        let tally: Tally<TestContext> = Tally::try_from_iter(votes).unwrap();\n        assert_eq!(Weight(9), tally.max_w());\n        assert_eq!(&20, tally.max_bhash());\n        assert_eq!(Weight(20), tally.weight());\n        assert_eq!(Weight(6), tally.votes[&10]);\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/tests.rs",
    "content": "#![allow(unused_qualifications)] // This is to suppress warnings originating in the test macros.\n#![allow(clippy::arithmetic_side_effects)] // Overflows in tests would panic anyway.\n\nuse std::{\n    collections::{hash_map::DefaultHasher, BTreeSet},\n    hash::Hasher,\n};\n\nuse datasize::DataSize;\n\nuse super::*;\nuse crate::components::consensus::{\n    highway_core::{\n        evidence::EvidenceError,\n        highway::Dependency,\n        highway_testing::{TEST_ENDORSEMENT_EVIDENCE_LIMIT, TEST_INSTANCE_ID},\n    },\n    traits::{ConsensusValueT, ValidatorSecret},\n};\n\npub(crate) const WEIGHTS: &[Weight] = &[Weight(3), Weight(4), Weight(5)];\n\npub(crate) const ALICE: ValidatorIndex = ValidatorIndex(0);\npub(crate) const BOB: ValidatorIndex = ValidatorIndex(1);\npub(crate) const CAROL: ValidatorIndex = ValidatorIndex(2);\npub(crate) const DAN: ValidatorIndex = ValidatorIndex(3);\npub(crate) const ERIC: ValidatorIndex = ValidatorIndex(4);\npub(crate) const FRANK: ValidatorIndex = ValidatorIndex(5);\npub(crate) const GINA: ValidatorIndex = ValidatorIndex(6);\npub(crate) const HANNA: ValidatorIndex = ValidatorIndex(7);\n\npub(crate) const N: Observation<TestContext> = Observation::None;\npub(crate) const F: Observation<TestContext> = Observation::Faulty;\n\nconst TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 4);\nconst TEST_MAX_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 19);\nconst TEST_INIT_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 4);\nconst TEST_ERA_HEIGHT: u64 = 5;\n\n#[derive(Clone, DataSize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub(crate) struct TestContext;\n\n#[derive(Clone, DataSize, Debug, Eq, PartialEq)]\npub(crate) struct TestSecret(pub(crate) u32);\n\nimpl ValidatorSecret for TestSecret {\n    type Hash = u64;\n    type Signature = u64;\n\n    fn sign(&self, data: &Self::Hash) -> Self::Signature {\n        data + u64::from(self.0)\n    }\n}\n\npub(crate) const ALICE_SEC: TestSecret = TestSecret(0);\npub(crate) const BOB_SEC: TestSecret = TestSecret(1);\npub(crate) const CAROL_SEC: TestSecret = TestSecret(2);\npub(crate) const DAN_SEC: TestSecret = TestSecret(3);\n\nimpl ConsensusValueT for u32 {\n    fn needs_validation(&self) -> bool {\n        false\n    }\n}\n\nimpl Context for TestContext {\n    type ConsensusValue = u32;\n    type ValidatorId = u32;\n    type ValidatorSecret = TestSecret;\n    type Signature = u64;\n    type Hash = u64;\n    type InstanceId = u64;\n\n    fn hash(data: &[u8]) -> Self::Hash {\n        let mut hasher = DefaultHasher::new();\n        hasher.write(data);\n        hasher.finish()\n    }\n\n    fn verify_signature(\n        hash: &Self::Hash,\n        public_key: &Self::ValidatorId,\n        signature: &<Self::ValidatorSecret as ValidatorSecret>::Signature,\n    ) -> bool {\n        let computed_signature = hash + u64::from(*public_key);\n        computed_signature == *signature\n    }\n}\n\nimpl From<<TestContext as Context>::Hash> for Observation<TestContext> {\n    fn from(vhash: <TestContext as Context>::Hash) -> Self {\n        Observation::Correct(vhash)\n    }\n}\n\n/// Returns the cause of the error, dropping the `WireUnit`.\nfn unit_err(err: AddUnitError<TestContext>) -> UnitError {\n    err.cause\n}\n\n/// An error that occurred when trying to add a unit.\n#[derive(Debug, Error)]\n#[error(\"{:?}\", .cause)]\npub(crate) struct AddUnitError<C: Context> {\n    /// The invalid unit that was not added to the protocol state.\n    pub(crate) swunit: SignedWireUnit<C>,\n    /// The reason the unit is invalid.\n    #[source]\n    pub(crate) cause: UnitError,\n}\n\nimpl<C: Context> SignedWireUnit<C> {\n    fn with_error(self, cause: UnitError) -> AddUnitError<C> {\n        AddUnitError {\n            swunit: self,\n            cause,\n        }\n    }\n}\n\npub(crate) fn test_params(seed: u64) -> Params {\n    Params::new(\n        seed,\n        TEST_MIN_ROUND_LEN,\n        TEST_MAX_ROUND_LEN,\n        TEST_INIT_ROUND_LEN,\n        TEST_ERA_HEIGHT,\n        Timestamp::from(0),\n        Timestamp::from(0),\n        TEST_ENDORSEMENT_EVIDENCE_LIMIT,\n    )\n}\n\nimpl State<TestContext> {\n    /// Returns a new `State` with `TestContext` parameters suitable for tests.\n    pub(crate) fn new_test(weights: &[Weight], seed: u64) -> Self {\n        State::new(weights, test_params(seed), vec![], vec![])\n    }\n\n    /// Adds the unit to the protocol state, or returns an error if it is invalid.\n    /// Panics if dependencies are not satisfied.\n    pub(crate) fn add_unit(\n        &mut self,\n        swunit: SignedWireUnit<TestContext>,\n    ) -> Result<(), AddUnitError<TestContext>> {\n        if let Err(err) = self\n            .pre_validate_unit(&swunit)\n            .and_then(|()| self.validate_unit(&swunit))\n        {\n            return Err(swunit.with_error(err));\n        }\n        assert_eq!(None, swunit.wire_unit().panorama.missing_dependency(self));\n        assert_eq!(None, self.needs_endorsements(&swunit));\n        self.add_valid_unit(swunit);\n        Ok(())\n    }\n}\n\n#[test]\nfn add_unit() -> Result<(), AddUnitError<TestContext>> {\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    // Create units as follows; a0, b0 are blocks:\n    //\n    // Alice: a0 ————— a1\n    //                /\n    // Bob:   b0 —— b1\n    //          \\  /\n    // Carol:    c0\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N)?;\n    let b0 = add_unit!(state, BOB, 48, 0u8, 0xB; N, N, N)?;\n    let c0 = add_unit!(state, CAROL, 49, 0u8, None; N, b0, N)?;\n    let b1 = add_unit!(state, BOB, 49, 0u8, None; N, b0, c0)?;\n    let _a1 = add_unit!(state, ALICE, None; a0, b1, c0)?;\n\n    // Wrong sequence number: Bob hasn't produced b2 yet.\n    let mut wunit = WireUnit {\n        panorama: panorama!(N, b1, c0),\n        creator: BOB,\n        instance_id: TEST_INSTANCE_ID,\n        value: None,\n        seq_number: 3,\n        timestamp: 51.into(),\n        round_exp: 0u8,\n        endorsed: BTreeSet::new(),\n    };\n    let unit = SignedWireUnit::new(wunit.clone().into_hashed(), &BOB_SEC);\n    let maybe_err = state.add_unit(unit).err().map(unit_err);\n    assert_eq!(Some(UnitError::SequenceNumber), maybe_err);\n    // Still not valid: This would be the third unit in the first round.\n    wunit.seq_number = 2;\n    let unit = SignedWireUnit::new(wunit.into_hashed(), &BOB_SEC);\n    let maybe_err = state.add_unit(unit).err().map(unit_err);\n    assert_eq!(Some(UnitError::ThreeUnitsInRound), maybe_err);\n\n    // Inconsistent panorama: If you see b1, you have to see c0, too.\n    let maybe_err = add_unit!(state, CAROL, None; N, b1, N).err().map(unit_err);\n    assert_eq!(Some(UnitError::InconsistentPanorama(BOB)), maybe_err);\n    // You can't change the round length within a round.\n    let maybe_err = add_unit!(state, CAROL, 50, 1u8, None; N, b1, c0)\n        .err()\n        .map(unit_err);\n    assert_eq!(Some(UnitError::RoundLengthChangedWithinRound), maybe_err);\n    // And you can't make the round length too big\n    let maybe_err = add_unit!(state, CAROL, 50, 36u8, None; N, b1, c0)\n        .err()\n        .map(unit_err);\n    assert_eq!(Some(UnitError::RoundLengthGreaterThanMaximum), maybe_err);\n    // After the round from 48 to 64 has ended, the exponent can change.\n    let c1 = add_unit!(state, CAROL, 65, 1u8, None; N, b1, c0)?;\n\n    // Alice has not equivocated yet, and not produced message A1.\n    let missing = panorama!(F, b1, c0).missing_dependency(&state);\n    assert_eq!(Some(Dependency::Evidence(ALICE)), missing);\n    let missing = panorama!(42, b1, c0).missing_dependency(&state);\n    assert_eq!(Some(Dependency::Unit(42)), missing);\n\n    // Alice equivocates: ae1 doesn't see a1.\n    let ae1 = add_unit!(state, ALICE, 0xAE1; a0, b1, c0)?;\n    assert!(state.has_evidence(ALICE));\n    assert_eq!(panorama![F, b1, c1], *state.panorama());\n\n    let missing = panorama!(F, b1, c0).missing_dependency(&state);\n    assert_eq!(None, missing);\n    let missing = panorama!(ae1, b1, c0).missing_dependency(&state);\n    assert_eq!(None, missing);\n\n    // Bob can see the equivocation.\n    let b2 = add_unit!(state, BOB, None; F, b1, c0)?;\n\n    // The state's own panorama has been updated correctly.\n    assert_eq!(*state.panorama(), panorama!(F, b2, c1));\n    Ok(())\n}\n\n#[test]\nfn ban_and_mark_faulty() -> Result<(), AddUnitError<TestContext>> {\n    let params = Params::new(\n        0,\n        TimeDiff::from_millis(1 << 4),\n        TimeDiff::from_millis(1 << 19),\n        TimeDiff::from_millis(1 << 4),\n        u64::MAX,\n        Timestamp::zero(),\n        Timestamp::from(u64::MAX),\n        TEST_ENDORSEMENT_EVIDENCE_LIMIT,\n    );\n    // Everyone already knows Alice is faulty, so she is banned.\n    let mut state = State::new(WEIGHTS, params, vec![ALICE], vec![]);\n\n    assert_eq!(panorama![F, N, N], *state.panorama());\n    assert_eq!(Some(&Fault::Banned), state.maybe_fault(ALICE));\n    let err = unit_err(add_unit!(state, ALICE, 0xA; N, N, N).err().unwrap());\n    assert_eq!(UnitError::Banned, err);\n\n    state.mark_faulty(ALICE); // No change: Banned state is permanent.\n    assert_eq!(panorama![F, N, N], *state.panorama());\n    assert_eq!(Some(&Fault::Banned), state.maybe_fault(ALICE));\n    let err = unit_err(add_unit!(state, ALICE, 0xA; N, N, N).err().unwrap());\n    assert_eq!(UnitError::Banned, err);\n\n    // Now we also received external evidence (i.e. not in this instance) that Bob is faulty.\n    state.mark_faulty(BOB);\n    assert_eq!(panorama![F, F, N], *state.panorama());\n    assert_eq!(Some(&Fault::Indirect), state.maybe_fault(BOB));\n\n    // However, we still accept messages from Bob, since he is not banned.\n    add_unit!(state, BOB, 0xB; F, N, N)?;\n    Ok(())\n}\n\n#[test]\nfn find_in_swimlane() -> Result<(), AddUnitError<TestContext>> {\n    let mut state = State::new_test(WEIGHTS, 0);\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N)?;\n    let mut a = vec![a0];\n    for i in 1..10 {\n        let ai = add_unit!(state, ALICE, None; a[i - 1], N, N)?;\n        a.push(ai);\n    }\n\n    // The predecessor with sequence number i should always equal a[i].\n    for j in (a.len() - 2)..a.len() {\n        for i in 0..j {\n            assert_eq!(Some(&a[i]), state.find_in_swimlane(&a[j], i as u64));\n        }\n    }\n\n    // The skip list index of a[k] includes a[k - 2^i] for each i such that 2^i divides k.\n    assert_eq!(&[a[8]], &state.unit(&a[9]).skip_idx.as_ref());\n    assert_eq!(\n        &[a[7], a[6], a[4], a[0]],\n        &state.unit(&a[8]).skip_idx.as_ref()\n    );\n    Ok(())\n}\n\n#[test]\nfn fork_choice() -> Result<(), AddUnitError<TestContext>> {\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    // Create blocks with scores as follows:\n    //\n    //          a0: 7 — a1: 3\n    //        /       \\\n    // b0: 12           b2: 4\n    //        \\\n    //          c0: 5 — c1: 5\n    let b0 = add_unit!(state, BOB, 0xB0; N, N, N)?;\n    let c0 = add_unit!(state, CAROL, 0xC0; N, b0, N)?;\n    let c1 = add_unit!(state, CAROL, 0xC1; N, b0, c0)?;\n    let a0 = add_unit!(state, ALICE, 0xA0; N, b0, N)?;\n    let b1 = add_unit!(state, BOB, None; a0, b0, N)?; // Just a ballot; not shown above.\n    let a1 = add_unit!(state, ALICE, 0xA1; a0, b1, c1)?;\n    let b2 = add_unit!(state, BOB, 0xB2; a0, b1, N)?;\n\n    // Alice built `a1` on top of `a0`, which had already 7 points.\n    assert_eq!(Some(&a0), state.block(&state.unit(&a1).block).parent());\n    // The fork choice is now `b2`: At height 1, `a0` wins against `c0`.\n    // At height 2, `b2` wins against `a1`. `c1` has most points but is not a child of `a0`.\n    assert_eq!(Some(&b2), state.fork_choice(state.panorama()));\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_no_equivocation() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    // No equivocations – incoming vote doesn't violate LNC.\n    // Create votes as follows; a0, b0 are blocks:\n    //\n    // Alice: a0 — a1\n    //           /\n    // Bob:   b0\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N)?;\n    let b0 = add_unit!(state, BOB, 0xB; N, N, N)?;\n\n    // a1 does not violate LNC\n    add_unit!(state, ALICE, None; a0, b0, N)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_fault_seen_directly() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Equivocation cited by one honest validator in the vote's panorama.\n    // Does NOT violate LNC.\n    //\n    // Bob:      b0\n    //          / |\n    // Alice: a0  |\n    //            |\n    //        a0' |\n    //           \\|\n    // Carol:    c0\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N)?;\n    let b0 = add_unit!(state, BOB, 0xB; a0, N, N)?;\n    let _a0_prime = add_unit!(state, ALICE, 0xA2; N, N, N)?;\n    // c0 does not violate LNC b/c it sees Alice as faulty.\n    add_unit!(state, CAROL, None; F, b0, N)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_one_equivocator() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Equivocation cited by two honest validators in the vote's panorama – their votes need to\n    // be endorsed.\n    //\n    // Bob:      b0\n    //          / \\\n    // Alice: a0   \\\n    //              \\\n    //        a0'    \\\n    //           \\   |\n    // Carol:    c0  |\n    //             \\ |\n    // Dan:         d0\n\n    let weights4 = &[Weight(3), Weight(4), Weight(5), Weight(5)];\n    let mut state = State::new_test(weights4, 0);\n\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N, N)?;\n    let a0_prime = add_unit!(state, ALICE, 0xA2; N, N, N, N)?;\n    let b0 = add_unit!(state, BOB, 0xB; a0, N, N, N)?;\n    let c0 = add_unit!(state, CAROL, 0xB2; a0_prime, N, N, N)?;\n    // d0 violates LNC b/c it naively cites Alice's equivocation.\n    // None of the votes is marked as being endorsed – violates LNC.\n    assert_eq!(\n        add_unit!(state, DAN, None; F, b0, c0, N).unwrap_err().cause,\n        UnitError::LncNaiveCitation(ALICE)\n    );\n    endorse!(state, CAROL, c0);\n    endorse!(state, c0; BOB, DAN);\n    // Now d0 cites non-naively b/c c0 is endorsed.\n    add_unit!(state, DAN, None; F, b0, c0, N; c0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_two_equivocators() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Multiple equivocators and indirect equivocations.\n    // Votes are seen as endorsed by `state` – does not violate LNC.\n    //\n    // Alice   a0<---------+\n    //                     |\n    //         a0'<--+     |\n    //               |     |\n    // Bob          b0<-----------+\n    //               |     |      |\n    // Carol   c0<---+     |      |\n    //                     |      |\n    //         c0'<--------+      |\n    //                     |      |\n    // Dan                 d0<----+\n    //                            |\n    // Eric                       e0\n\n    let weights5 = &[Weight(3), Weight(4), Weight(5), Weight(5), Weight(6)];\n    let mut state = State::new_test(weights5, 0);\n\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N, N, N)?;\n    let a0_prime = add_unit!(state, ALICE, 0xA2; N, N, N, N, N)?;\n    let c0 = add_unit!(state, CAROL, 0xC; N, N, N, N, N)?;\n    let b0 = add_unit!(state, BOB, 0xB; a0_prime, N, c0, N, N)?;\n    let c0_prime = add_unit!(state, CAROL, 0xC2; N, N, N, N, N)?;\n    let d0 = add_unit!(state, DAN, 0xD; a0, N, c0_prime, N, N)?;\n    // e0 violates LNC b/c it naively cites Alice's & Carol's equivocations.\n    assert_eq!(\n        add_unit!(state, ERIC, None; F, b0, F, d0, N)\n            .unwrap_err()\n            .cause,\n        UnitError::LncNaiveCitation(ALICE)\n    );\n    // Endorse b0.\n    endorse!(state, b0; BOB, DAN, ERIC);\n    add_unit!(state,ERIC, None; F, b0, F, d0, N; b0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_own_naive_citation() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    //           a0'<-----+\n    // Alice              |\n    //           a0 <--+  |\n    //                 |  |\n    // Bob             |  +--b0<--+--b1\n    //                 |  |       |\n    // Carol           |  +--c0<--+\n    //                 |          |\n    // Dan             +-----d0<--+\n    let weights4 = &[Weight(3), Weight(4), Weight(5), Weight(5)];\n    let mut state = State::new_test(weights4, 0);\n\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N, N)?;\n    let a0_prime = add_unit!(state, ALICE, 0xA2; N, N, N, N)?;\n\n    // Bob and Carol don't see a0 yet, so they cite a0_prime naively. Dan cites a0 naively.\n    let b0 = add_unit!(state, BOB, None; a0_prime, N, N, N)?;\n    let c0 = add_unit!(state, CAROL, None; a0_prime, N, N, N)?;\n    let d0 = add_unit!(state, DAN, None; a0, N, N, N)?;\n    endorse!(state, c0; ALICE, BOB, CAROL, DAN); // Everyone endorses c0.\n    endorse!(state, d0; ALICE, BOB, CAROL, DAN); // Everyone endorses d0.\n\n    // The fact that c0 is endorsed is not enough. Bob would violate the LNC because his new unit\n    // cites a0 naively, and his previous unit b0 cited a0_prime naively.\n    assert_eq!(\n        add_unit!(state, BOB, None; F, b0, c0, d0; c0)\n            .unwrap_err()\n            .cause,\n        UnitError::LncNaiveCitation(ALICE)\n    );\n    // The fact that d0 is endorsed makes both of Bob's units cite only one of Alice's forks\n    // naively (namely a0_prime), which is fine.\n    add_unit!(state, BOB, None; F, b0, c0, d0; d0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_mixed_citations() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Eric's vote should not require an endorsement as his unit e0 cites equivocator Carol before\n    // the fork.\n    //\n    // Alice                              +a0+\n    //                                    ++ |\n    //                                    || |\n    //                                    || |\n    // Bob                     b0<---------+ |\n    //                          + | |\n    //                          |          | |\n    //                    +c1<--+          | |\n    // Carol         c0<--+                | |\n    //                ^   +c1'<-+          | |\n    //                |         +          | |\n    // Dan            |        d0<---------+ |\n    //                |                      |\n    // Eric           +--+e0<----------------+\n    //\n    let weights5 = &[Weight(3), Weight(4), Weight(5), Weight(5), Weight(6)];\n    let mut state = State::new_test(weights5, 0);\n\n    let c0 = add_unit!(state, CAROL, 0xC; N, N, N, N, N)?;\n    let c1 = add_unit!(state, CAROL, 0xC1; N, N, c0, N, N)?;\n    let c1_prime = add_unit!(state, CAROL, 0xC1B; N, N, c0, N, N)?;\n    let b0 = add_unit!(state, BOB, 0xB; N, N, c1, N, N)?;\n    let d0 = add_unit!(state, DAN, 0xD; N, N, c1_prime, N, N)?;\n    // Should not require endorsements b/c e0 sees Carol as correct.\n    let e0 = add_unit!(state, ERIC, 0xE; N, N, c0, N, N)?;\n    assert_eq!(\n        add_unit!(state, ALICE, None; N, b0, F, d0, e0)\n            .unwrap_err()\n            .cause,\n        UnitError::LncNaiveCitation(CAROL)\n    );\n    // We pick b0 to be endorsed.\n    endorse!(state, b0; ALICE, BOB, ERIC);\n    add_unit!(state, ALICE, None; N, b0, F, d0, e0; b0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_transitive_endorsement() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Endorsements should be transitive to descendants.\n    // c1 doesn't have to be endorsed, it is enough that c0 is.\n    //\n    // Alice           a0<-----------+\n    //                 + |\n    //          b0<----+             |\n    // Bob                           |\n    //                               |\n    //          b0'<---+             |\n    //                 + |\n    // Carol           c0<---+c1<----+\n    //                               |\n    //                               |\n    // Dan                          d0\n\n    let weights_dan = &[Weight(3), Weight(4), Weight(5), Weight(5)];\n    let mut state = State::new_test(weights_dan, 0);\n\n    let b0 = add_unit!(state, BOB, 0xB; N, N, N, N)?;\n    let b0_prime = add_unit!(state, BOB, 0xB1; N, N, N, N)?;\n    let a0 = add_unit!(state, ALICE, 0xA; N, b0, N, N)?;\n    let c0 = add_unit!(state, CAROL, 0xC; N, b0_prime, N, N)?;\n    let c1 = add_unit!(state, CAROL, 0xC1; N, b0_prime, c0, N)?;\n    assert_eq!(\n        add_unit!(state, DAN, None; a0, F, c1, N).unwrap_err().cause,\n        UnitError::LncNaiveCitation(BOB)\n    );\n    endorse!(state, c0; CAROL, DAN, ALICE);\n    add_unit!(state, DAN, None; a0, F, c1, N; c0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_cite_descendant_of_equivocation() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // a0 cites a descendant b1 of an equivocation vote (b0 and b0').\n    // This is still detected as violation of the LNC.\n    //\n    // Alice                  a0<----+\n    //                        + |\n    //          b0<---+b1<----+      |\n    // Bob                           |\n    //                               |\n    //          b0'<---+             |\n    //                 + |\n    // Carol           c0            |\n    //                  ^            +\n    // Dan              +----------+d0\n    let weights_dan = &[Weight(3), Weight(4), Weight(5), Weight(5)];\n    let mut state = State::new_test(weights_dan, 0);\n\n    let b0 = add_unit!(state, BOB, 0xB; N, N, N, N)?;\n    let b0_prime = add_unit!(state, BOB, 0xBA; N, N, N, N)?;\n    let b1 = add_unit!(state, BOB, 0xB1; N, b0, N, N)?;\n    let a0 = add_unit!(state, ALICE, 0xA; N, b1, N, N)?;\n    let c0 = add_unit!(state, CAROL, 0xC; N, b0_prime, N, N)?;\n    assert_eq!(\n        add_unit!(state, DAN, None; a0, F, c0, N).unwrap_err().cause,\n        UnitError::LncNaiveCitation(BOB)\n    );\n    endorse!(state, c0; ALICE, CAROL, DAN);\n    add_unit!(state, DAN, None; a0, F, c0, N; c0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_endorse_mix_pairs() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Diagram of the DAG can be found under\n    // /resources/test/dags/validate_lnc_endorse_mix_pairs.png\n    //\n    // Both c0 and g0 need only one of their descendants votes endorsed to not validate LNC.\n    // Since endorsements are monotonic (c0's and g0' endorsements are also endorsed by h0),\n    // h0 does not violate LNC b/c it cites at most one fork naively.\n    let weights = &[\n        Weight(3),\n        Weight(4),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n    ];\n    let mut state = State::new_test(weights, 0);\n\n    let d0 = add_unit!(state, DAN, 0xBA; N, N, N, N, N, N, N, N)?;\n    let d0_prime = add_unit!(state, DAN, 0xBB; N, N, N, N, N, N, N, N)?;\n    let a0 = add_unit!(state, ALICE, None; N, N, N, d0, N, N, N, N)?;\n    let b0 = add_unit!(state, BOB, None; N, N, N, d0_prime, N, N, N, N)?;\n    endorse!(state, a0; ALICE, BOB, CAROL, ERIC, FRANK, GINA);\n    let c0 = add_unit!(state, CAROL, None; a0, b0, N, F, N, N, N, N; a0)?;\n    let e0 = add_unit!(state, ERIC, None; N, N, N, d0, N, N, N, N)?;\n    let f0 = add_unit!(state, FRANK, None; N, N, N, d0_prime, N, N, N, N)?;\n    endorse!(state, f0; ALICE, BOB, CAROL, ERIC, FRANK, GINA);\n    let g0 = add_unit!(state, GINA, None; N, N, N, F, e0, f0, N, N; f0)?;\n    // Should pass the LNC validation test.\n    add_unit!(state, HANNA, None; a0, b0, c0, F, e0, f0, g0, N; a0, f0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_shared_equiv_unit() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Diagram of the DAG can be found under\n    // /resources/test/dags/validate_lnc_shared_equiv_unit.png\n    let weights = &[\n        Weight(3),\n        Weight(4),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n    ];\n    let mut state = State::new_test(weights, 0);\n\n    let d0 = add_unit!(state, DAN, 0xDA; N, N, N, N, N, N, N, N)?;\n    let d0_prime = add_unit!(state, DAN, 0xDB; N, N, N, N, N, N, N, N)?;\n    let d0_bis = add_unit!(state, DAN, 0xDC; N, N, N, N, N, N, N, N)?;\n    let b0 = add_unit!(state, BOB, None; N, N, N, d0, N, N, N, N)?;\n    let c0 = add_unit!(state, CAROL, None; N, N, N, d0_prime, N, N, N, N)?;\n    let e0 = add_unit!(state, ERIC, None; N, N, N, d0_prime, N, N, N, N)?;\n    let f0 = add_unit!(state, FRANK, None; N, N, N, d0_bis, N, N, N, N)?;\n    endorse!(state, c0; ALICE, BOB, CAROL, ERIC, FRANK);\n    let a0 = add_unit!(state, ALICE, None; N, b0, c0, F, N, N, N, N; c0)?;\n    endorse!(state, e0; ALICE, BOB, CAROL, ERIC, FRANK);\n    let g0 = add_unit!(state, GINA, None; N, N, N, F, e0, f0, N, N; e0)?;\n    assert_eq!(\n        add_unit!(state, HANNA, None; a0, b0, c0, F, e0, f0, g0, N; c0, e0)\n            .unwrap_err()\n            .cause,\n        UnitError::LncNaiveCitation(DAN)\n    );\n    // Even though both a0 and g0 cite DAN non-naively, for h0 to be valid\n    // we need to endorse either b0 or f0.\n    let mut pre_endorse_state = state.clone();\n    // Endorse b0 first.\n    endorse!(state, b0; ALICE, BOB, CAROL, ERIC, FRANK);\n    add_unit!(state, HANNA, None; a0, b0, c0, F, e0, f0, g0, N; c0, e0, b0)?;\n    // Should also pass if e0 is endorsed.\n    endorse!(pre_endorse_state, f0; ALICE, BOB, CAROL, ERIC, FRANK);\n    add_unit!(pre_endorse_state, HANNA, None; a0, b0, c0, F, e0, f0, g0, N; c0, e0, f0)?;\n    Ok(())\n}\n\n#[test]\nfn validate_lnc_four_forks() -> Result<(), AddUnitError<TestContext>> {\n    if !ENABLE_ENDORSEMENTS {\n        return Ok(());\n    }\n    // Diagram of the DAG can be found under\n    // /resources/test/dags/validate_lnc_four_forks.png\n    let weights = &[\n        Weight(3),\n        Weight(4),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n        Weight(5),\n    ];\n    let mut state = State::new_test(weights, 0);\n\n    let e0 = add_unit!(state, ERIC, 0xEA; N, N, N, N, N, N, N, N)?;\n    let e0_prime = add_unit!(state, ERIC, 0xEB; N, N, N, N, N, N, N, N)?;\n    let e0_bis = add_unit!(state, ERIC, 0xEC; N, N, N, N, N, N, N, N)?;\n    let e0_cis = add_unit!(state, ERIC, 0xED; N, N, N, N, N, N, N, N)?;\n    let a0 = add_unit!(state, ALICE, None; N, N, N, N, e0, N, N, N)?;\n    let b0 = add_unit!(state, BOB, None; N, N, N, N, e0_prime, N, N, N)?;\n    let g0 = add_unit!(state, GINA, None; N, N, N, N, e0_bis, N, N, N)?;\n    let h0 = add_unit!(state, HANNA, None; N, N, N, N, e0_cis, N, N, N)?;\n    endorse!(state, a0; ALICE, BOB, CAROL, DAN, GINA, HANNA);\n    let c0 = add_unit!(state, CAROL, None; a0, b0, N, N, F, N, N, N; a0)?;\n    endorse!(state, g0; ALICE, BOB, CAROL, DAN, GINA, HANNA);\n    let f0 = add_unit!(state, FRANK, None; N, N, N, N, F, N, g0, h0; g0)?;\n    let d0 = add_unit!(state, DAN, None; N, N, N, N, F, f0, g0, h0; g0)?;\n    assert_eq!(\n        add_unit!(state, DAN, None; a0, b0, c0, d0, F, f0, g0, h0; a0, g0)\n            .unwrap_err()\n            .cause,\n        UnitError::LncNaiveCitation(ERIC)\n    );\n    let mut pre_endorse_state = state.clone();\n    // If we endorse h0, then d1 still violates the LNC: d0 cited e0_cis naively and d1 cites\n    // e0_prime naively.\n    endorse!(state, h0; ALICE, BOB, CAROL, DAN, GINA, HANNA);\n    let result = add_unit!(state, DAN, None; a0, b0, c0, d0, F, f0, g0, h0; a0, g0, h0);\n    assert_eq!(result.unwrap_err().cause, UnitError::LncNaiveCitation(ERIC));\n    // It should work if we had endorsed b0 instead.\n    endorse!(pre_endorse_state, b0; ALICE, BOB, CAROL, DAN, GINA, HANNA);\n    add_unit!(pre_endorse_state, DAN, None; a0, b0, c0, d0, F, f0, g0, h0; a0, g0, b0)?;\n    // And it should still work if both were endorsed.\n    endorse!(pre_endorse_state, h0; ALICE, BOB, CAROL, DAN, GINA, HANNA);\n    add_unit!(pre_endorse_state, DAN, None; a0, b0, c0, d0, F, f0, g0, h0; a0, g0, b0, h0)?;\n    Ok(())\n}\n\n#[test]\nfn is_terminal_block() -> Result<(), AddUnitError<TestContext>> {\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    let a0 = add_unit!(state, ALICE, 0x00; N, N, N)?;\n    assert!(!state.is_terminal_block(&a0)); // height 0\n    let b0 = add_unit!(state, BOB, 0x01; a0, N, N)?;\n    assert!(!state.is_terminal_block(&b0)); // height 1\n    let c0 = add_unit!(state, CAROL, 0x02; a0, b0, N)?;\n    assert!(!state.is_terminal_block(&c0)); // height 2\n    let a1 = add_unit!(state, ALICE, 0x03; a0, b0, c0)?;\n    assert!(!state.is_terminal_block(&a1)); // height 3\n    let a2 = add_unit!(state, ALICE, None; a1, b0, c0)?;\n    assert!(!state.is_terminal_block(&a2)); // not a block\n    let a3 = add_unit!(state, ALICE, 0x04; a2, b0, c0)?;\n    assert!(state.is_terminal_block(&a3)); // height 4, i.e. the fifth block and thus the last one\n    assert_eq!(TEST_ERA_HEIGHT - 1, state.block(&a3).height);\n    let a4 = add_unit!(state, ALICE, None; a3, b0, c0)?;\n    assert!(!state.is_terminal_block(&a4)); // not a block\n    Ok(())\n}\n\n#[test]\nfn conflicting_endorsements() -> Result<(), AddUnitError<TestContext>> {\n    if TODO_ENDORSEMENT_EVIDENCE_DISABLED {\n        return Ok(()); // Endorsement evidence is disabled, so don't test it.\n    }\n    let validators = vec![(ALICE_SEC, ALICE), (BOB_SEC, BOB), (CAROL_SEC, CAROL)]\n        .into_iter()\n        .map(|(sk, vid)| {\n            assert_eq!(sk.0, vid.0);\n            (sk.0, WEIGHTS[vid.0 as usize].0)\n        })\n        .collect();\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    // Alice equivocates and creates two forks:\n    // * a0_prime and\n    // * a0 < a1 < a2\n    let a0 = add_unit!(state, ALICE, 0x00; N, N, N)?;\n    let a0_prime = add_unit!(state, ALICE, 0x01; N, N, N)?;\n    let a1 = add_unit!(state, ALICE, None; a0, N, N)?;\n    let a2 = add_unit!(state, ALICE, None; a1, N, N)?;\n\n    // Bob endorses a0_prime.\n    endorse!(state, BOB, a0_prime);\n    assert!(!state.is_faulty(BOB));\n\n    // Now he also endorses a2, even though it is on a different fork. That's a fault!\n    endorse!(state, BOB, a2);\n    assert!(state.is_faulty(BOB));\n\n    let evidence = state\n        .maybe_evidence(BOB)\n        .expect(\"Bob should be considered faulty\")\n        .clone();\n    assert_eq!(\n        Ok(()),\n        evidence.validate(&validators, &TEST_INSTANCE_ID, state.params())\n    );\n\n    let limit = TEST_ENDORSEMENT_EVIDENCE_LIMIT as usize;\n\n    let mut a = vec![a0, a1, a2];\n    while a.len() <= limit + 1 {\n        let prev_a = *a.last().unwrap();\n        a.push(add_unit!(state, ALICE, None; prev_a, N, N)?);\n    }\n\n    // Carol endorses a0_prime.\n    endorse!(state, CAROL, a0_prime);\n\n    // She also endorses a[limit + 1], and gets away with it because the evidence would be too big.\n    endorse!(state, CAROL, a[limit + 1]);\n    assert!(!state.is_faulty(CAROL));\n\n    // But if she endorses a[limit], the units fit into an evidence vertex.\n    endorse!(state, CAROL, a[limit]);\n    assert!(state.is_faulty(CAROL));\n\n    let evidence = state\n        .maybe_evidence(CAROL)\n        .expect(\"Carol is faulty\")\n        .clone();\n    assert_eq!(\n        Ok(()),\n        evidence.validate(&validators, &TEST_INSTANCE_ID, state.params())\n    );\n\n    // If the limit were less, that evidence would be considered invalid.\n    let params2 = test_params(0).with_endorsement_evidence_limit(limit as u64 - 1);\n    assert_eq!(\n        Err(EvidenceError::EndorsementTooManyUnits),\n        evidence.validate(&validators, &TEST_INSTANCE_ID, &params2)\n    );\n\n    Ok(())\n}\n\n#[test]\nfn retain_evidence_only() -> Result<(), AddUnitError<TestContext>> {\n    let mut state = State::new_test(WEIGHTS, 0);\n\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N)?;\n    let b0 = add_unit!(state, BOB, 0xB; a0, N, N)?;\n    let _a0_prime = add_unit!(state, ALICE, 0xA2; N, N, N)?;\n    assert_eq!(&panorama!(F, b0, N), state.panorama());\n    state.retain_evidence_only();\n    assert_eq!(&panorama!(F, N, N), state.panorama());\n    assert!(!state.has_unit(&a0));\n    assert!(state.has_evidence(ALICE));\n    Ok(())\n}\n\n#[test]\nfn test_log2() {\n    assert_eq!(0, log2(0));\n    assert_eq!(2, log2(0b100));\n    assert_eq!(2, log2(0b101));\n    assert_eq!(2, log2(0b111));\n    assert_eq!(3, log2(0b1000));\n    assert_eq!(63, log2(u64::MAX));\n    assert_eq!(63, log2(1 << 63));\n    assert_eq!(62, log2((1 << 63) - 1));\n}\n\n#[test]\nfn test_leader() {\n    let weights = &[Weight(3), Weight(4), Weight(5), Weight(4), Weight(5)];\n\n    // All five validators get slots in the leader sequence. If 1, 2 and 4 are excluded, their slots\n    // get reassigned, but 0 and 3 keep their old slots.\n    let before = vec![0, 2, 4, 3, 3, 1, 2, 1, 0, 0, 0, 2, 0, 2, 3, 2, 3, 3, 1, 2];\n    let after = vec![0, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3];\n    let excluded = vec![ValidatorIndex(1), ValidatorIndex(2), ValidatorIndex(4)];\n    let state = State::<TestContext>::new(weights, test_params(0), vec![], vec![]);\n    assert_eq!(\n        before,\n        (0..20u64)\n            .map(|r_id| state.leader(r_id.into()).0)\n            .collect_vec()\n    );\n    let state = State::<TestContext>::new(weights, test_params(0), vec![], excluded);\n    assert_eq!(\n        after,\n        (0..20u64)\n            .map(|r_id| state.leader(r_id.into()).0)\n            .collect_vec()\n    );\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state/unit.rs",
    "content": "use std::collections::BTreeSet;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::components::consensus::{\n    highway_core::{\n        highway::SignedWireUnit,\n        state::{self, Panorama, State},\n    },\n    traits::Context,\n    utils::ValidatorIndex,\n};\n\n/// A unit sent to or received from the network.\n///\n/// This is only instantiated when it gets added to a `State`, and only once it has been validated.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize)]\npub struct Unit<C>\nwhere\n    C: Context,\n{\n    /// The list of latest units and faults observed by the creator of this message.\n    /// The panorama must be valid, and this unit's creator must not be marked as faulty.\n    pub panorama: Panorama<C>,\n    /// The number of earlier messages by the same creator.\n    /// This must be `0` if the creator's entry in the panorama is `None`. Otherwise it must be\n    /// the previous unit's sequence number plus one.\n    pub seq_number: u64,\n    /// The validator who created and sent this unit.\n    pub creator: ValidatorIndex,\n    /// The block this unit votes for. Either it or its parent must be the fork choice.\n    pub block: C::Hash,\n    /// A skip list index of the creator's swimlane, i.e. the previous unit by the same creator.\n    ///\n    /// For every `p = 1 << i` that divides `seq_number`, this contains an `i`-th entry pointing to\n    /// the older unit with `seq_number - p`.\n    pub skip_idx: Vec<C::Hash>,\n    /// This unit's timestamp, in milliseconds since the epoch. This must not be earlier than the\n    /// timestamp of any unit cited in the panorama.\n    pub timestamp: Timestamp,\n    /// Original signature of the `SignedWireUnit`.\n    pub signature: C::Signature,\n    /// The length of the current round, that this message belongs to.\n    ///\n    /// All cited units by `creator` in the same round must have the same round length.\n    pub round_len: TimeDiff,\n    /// Units that this one claims are endorsed.\n    /// All of these must be cited (directly or indirectly) by the panorama.\n    pub endorsed: BTreeSet<C::Hash>,\n}\n\nimpl<C: Context> Unit<C> {\n    /// Creates a new `Unit` from the `WireUnit`, and returns the value if it contained any.\n    /// Values must be stored as a block, with the same hash.\n    pub(super) fn new(\n        swunit: SignedWireUnit<C>,\n        fork_choice: Option<&C::Hash>,\n        state: &State<C>,\n    ) -> (Unit<C>, Option<C::ConsensusValue>) {\n        let SignedWireUnit {\n            hashed_wire_unit,\n            signature,\n        } = swunit;\n        let hash = hashed_wire_unit.hash();\n        let wunit = hashed_wire_unit.into_inner();\n        let block = if wunit.value.is_some() {\n            hash // A unit with a new block votes for itself.\n        } else {\n            // If the unit didn't introduce a new block, it votes for the fork choice itself.\n            // `pre_validate_unit` checks that the panorama has a `Correct` entry.\n            fork_choice\n                .cloned()\n                .expect(\"nonempty panorama has nonempty fork choice\")\n        };\n        let mut skip_idx = Vec::new();\n        if let Some(hash) = wunit.panorama[wunit.creator].correct() {\n            skip_idx.push(*hash);\n            for i in 0..wunit.seq_number.trailing_zeros() as usize {\n                let old_unit = state.unit(&skip_idx[i]);\n                skip_idx.push(old_unit.skip_idx[i]);\n            }\n        }\n        #[allow(clippy::arithmetic_side_effects)] // Only called with valid units.\n        let round_len =\n            TimeDiff::from_millis(state.params().min_round_length().millis() << wunit.round_exp);\n        let unit = Unit {\n            panorama: wunit.panorama,\n            seq_number: wunit.seq_number,\n            creator: wunit.creator,\n            block,\n            skip_idx,\n            timestamp: wunit.timestamp,\n            signature,\n            round_len,\n            endorsed: wunit.endorsed,\n        };\n        (unit, wunit.value)\n    }\n\n    /// Returns the creator's previous unit.\n    pub fn previous(&self) -> Option<&C::Hash> {\n        self.skip_idx.first()\n    }\n\n    /// Returns the time at which the round containing this unit began.\n    pub fn round_id(&self) -> Timestamp {\n        state::round_id(self.timestamp, self.round_len)\n    }\n\n    /// Returns the length of the round containing this unit.\n    pub fn round_len(&self) -> TimeDiff {\n        self.round_len\n    }\n\n    /// Returns whether `unit` cites a new unit from `vidx` in the last panorama.\n    /// i.e. whether previous unit from creator of `vhash` cites different unit by `vidx`.\n    ///\n    /// NOTE: Returns `false` if `vidx` is faulty or hasn't produced any units according to the\n    /// creator of `vhash`.\n    pub fn new_hash_obs(&self, state: &State<C>, vidx: ValidatorIndex) -> bool {\n        let latest_obs = self.panorama[vidx].correct();\n        let penultimate_obs = self\n            .previous()\n            .and_then(|v| state.unit(v).panorama[vidx].correct());\n        match (latest_obs, penultimate_obs) {\n            (Some(latest_hash), Some(penultimate_hash)) => latest_hash != penultimate_hash,\n            _ => false,\n        }\n    }\n\n    /// Returns an iterator over units this one claims are endorsed.\n    pub fn claims_endorsed(&self) -> impl Iterator<Item = &C::Hash> {\n        self.endorsed.iter()\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/state.rs",
    "content": "mod block;\nmod index_panorama;\nmod panorama;\nmod params;\nmod tallies;\nmod unit;\n\n#[cfg(test)]\npub(crate) mod tests;\n\npub(crate) use params::Params;\nuse quanta::Clock;\nuse serde::{Deserialize, Serialize};\n\npub(crate) use index_panorama::{IndexObservation, IndexPanorama};\npub use panorama::{Observation, Panorama};\npub(super) use unit::Unit;\n\nuse std::{\n    borrow::Borrow,\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet},\n    iter,\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse thiserror::Error;\nuse tracing::{error, info, trace, warn};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::{\n    components::consensus::{\n        highway_core::{\n            endorsement::{Endorsement, SignedEndorsement},\n            evidence::Evidence,\n            highway::{Endorsements, HashedWireUnit, SignedWireUnit, WireUnit},\n            ENABLE_ENDORSEMENTS,\n        },\n        traits::Context,\n        utils::{ValidatorIndex, ValidatorMap, Weight},\n        LeaderSequence,\n    },\n    utils::ds,\n};\nuse block::Block;\nuse tallies::Tallies;\n\n// TODO: The restart mechanism only persists and loads our own latest unit, so that we don't\n// equivocate after a restart. It doesn't yet persist our latest endorsed units, so we could\n// accidentally endorse conflicting votes. Fix this and enable detecting conflicting\n// endorsements again.\npub(super) const TODO_ENDORSEMENT_EVIDENCE_DISABLED: bool = true;\n\n/// Number of maximum-length rounds after which a validator counts as offline, if we haven't heard\n/// from them.\nconst PING_TIMEOUT: u64 = 3;\n\n#[derive(Debug, Error, Eq, PartialEq, Clone)]\npub(crate) enum UnitError {\n    #[error(\"The unit is a ballot but doesn't cite any block.\")]\n    MissingBlock,\n    #[error(\"The panorama's length {} doesn't match the number of validators.\", _0)]\n    PanoramaLength(usize),\n    #[error(\"The unit accuses its own creator as faulty.\")]\n    FaultyCreator,\n    #[error(\"The panorama has a unit from {:?} in the slot for {:?}.\", _0, _1)]\n    PanoramaIndex(ValidatorIndex, ValidatorIndex),\n    #[error(\"The panorama is missing units indirectly cited via {:?}.\", _0)]\n    InconsistentPanorama(ValidatorIndex),\n    #[error(\"The unit contains the wrong sequence number.\")]\n    SequenceNumber,\n    #[error(\"The unit's timestamp is older than a justification's.\")]\n    Timestamps,\n    #[error(\"The creator is not a validator.\")]\n    Creator,\n    #[error(\"The unit was created for a wrong instance ID.\")]\n    InstanceId,\n    #[error(\"The signature is invalid.\")]\n    Signature,\n    #[error(\"The round length has somehow changed within a round.\")]\n    RoundLengthChangedWithinRound,\n    #[error(\"The round length is greater than the maximum allowed by the chainspec.\")]\n    RoundLengthGreaterThanMaximum,\n    #[error(\"This would be the third unit in that round. Only two are allowed.\")]\n    ThreeUnitsInRound,\n    #[error(\n        \"A block must be the leader's ({:?}) first unit, at the beginning of the round.\",\n        _0\n    )]\n    NonLeaderBlock(ValidatorIndex),\n    #[error(\"The unit is a block, but its parent is already a terminal block.\")]\n    ValueAfterTerminalBlock,\n    #[error(\"The unit's creator is banned.\")]\n    Banned,\n    #[error(\"The unit's endorsed units were not a superset of its justifications.\")]\n    EndorsementsNotMonotonic,\n    #[error(\"The LNC rule was violated. Vote cited ({:?}) naively.\", _0)]\n    LncNaiveCitation(ValidatorIndex),\n    #[error(\n        \"Wire unit endorses hash but does not see it. Hash: {:?}; Wire unit: {:?}\",\n        hash,\n        wire_unit\n    )]\n    EndorsedButUnseen { hash: String, wire_unit: String },\n}\n\n/// A reason for a validator to be marked as faulty.\n///\n/// The `Banned` state is fixed from the beginning and can't be replaced. However, `Indirect` can\n/// be replaced with `Direct` evidence, which has the same effect but doesn't rely on information\n/// from other consensus protocol instances.\n#[derive(Clone, DataSize, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)]\npub enum Fault<C>\nwhere\n    C: Context,\n{\n    /// The validator was known to be malicious from the beginning. All their messages are\n    /// considered invalid in this Highway instance.\n    Banned,\n    /// We have direct evidence of the validator's fault.\n    Direct(Evidence<C>),\n    /// The validator is known to be faulty, but the evidence is not in this Highway instance.\n    Indirect,\n}\n\nimpl<C: Context> Fault<C> {\n    /// Returns the evidence included in this `Fault`.\n    pub fn evidence(&self) -> Option<&Evidence<C>> {\n        match self {\n            Fault::Banned | Fault::Indirect => None,\n            Fault::Direct(ev) => Some(ev),\n        }\n    }\n}\n\n/// A passive instance of the Highway protocol, containing its local state.\n///\n/// Both observers and active validators must instantiate this, pass in all incoming vertices from\n/// peers, and use a [FinalityDetector](../finality_detector/struct.FinalityDetector.html) to\n/// determine the outcome of the consensus process.\n#[derive(Debug, Clone, DataSize, Serialize, Deserialize)]\npub struct State<C>\nwhere\n    C: Context,\n{\n    /// The fixed parameters.\n    params: Params,\n    /// The validator's voting weights.\n    weights: ValidatorMap<Weight>,\n    /// The pseudorandom sequence of round leaders.\n    leader_sequence: LeaderSequence,\n    /// All units imported so far, by hash.\n    /// This is a downward closed set: A unit must only be added here once all of its dependencies\n    /// have been added as well, and it has been fully validated.\n    #[data_size(with = ds::hashmap_sample)]\n    units: HashMap<C::Hash, Unit<C>>,\n    /// All blocks, by hash. All block hashes are also unit hashes, but units that did not\n    /// introduce a new block don't have their own entry here.\n    #[data_size(with = ds::hashmap_sample)]\n    blocks: HashMap<C::Hash, Block<C>>,\n    /// List of faulty validators and their type of fault.\n    /// Every validator that has an equivocation in `units` must have an entry here, but there can\n    /// be additional entries for other kinds of faults.\n    faults: HashMap<ValidatorIndex, Fault<C>>,\n    /// The full panorama, corresponding to the complete protocol state.\n    /// This points to the latest unit of every honest validator.\n    panorama: Panorama<C>,\n    /// All currently endorsed units, by hash: units that have enough endorsements to be cited even\n    /// if they naively cite an equivocator.\n    #[data_size(with = ds::hashmap_sample)]\n    endorsements: HashMap<C::Hash, ValidatorMap<Option<C::Signature>>>,\n    /// Units that don't yet have 1/2 of stake endorsing them.\n    /// Signatures are stored in a map so that a single validator sending lots of signatures for\n    /// different units doesn't cause us to allocate a lot of memory.\n    #[data_size(with = ds::hashmap_sample)]\n    incomplete_endorsements: HashMap<C::Hash, BTreeMap<ValidatorIndex, C::Signature>>,\n    /// Timestamp of the last ping or unit we received from each validator.\n    pings: ValidatorMap<Timestamp>,\n    /// Clock to measure time spent in fork choice computation.\n    #[data_size(skip)] // Not implemented for Clock; probably negligible.\n    #[serde(skip, default)]\n    // Serialization is used by external tools only, which cannot make sense of `Clock`.\n    clock: Clock,\n}\n\nimpl<C: Context> State<C> {\n    pub(crate) fn new<I, IB, IB2>(\n        weights: I,\n        params: Params,\n        banned: IB,\n        cannot_propose: IB2,\n    ) -> State<C>\n    where\n        I: IntoIterator,\n        I::Item: Borrow<Weight>,\n        IB: IntoIterator<Item = ValidatorIndex>,\n        IB2: IntoIterator<Item = ValidatorIndex>,\n    {\n        let weights = ValidatorMap::from(weights.into_iter().map(|w| *w.borrow()).collect_vec());\n        assert!(\n            !weights.is_empty(),\n            \"cannot initialize Highway with no validators\"\n        );\n        let mut panorama = Panorama::new(weights.len());\n        let faults: HashMap<_, _> = banned.into_iter().map(|idx| (idx, Fault::Banned)).collect();\n        for idx in faults.keys() {\n            assert!(\n                idx.0 < weights.len() as u32,\n                \"invalid banned validator index\"\n            );\n            panorama[*idx] = Observation::Faulty;\n        }\n        let mut can_propose: ValidatorMap<bool> = weights.iter().map(|_| true).collect();\n        for idx in cannot_propose {\n            assert!(\n                idx.0 < weights.len() as u32,\n                \"invalid validator index for exclusion from leader sequence\"\n            );\n            can_propose[idx] = false;\n        }\n        let leader_sequence = LeaderSequence::new(params.seed(), &weights, can_propose);\n        let pings = iter::repeat(params.start_timestamp())\n            .take(weights.len())\n            .collect();\n        State {\n            params,\n            weights,\n            leader_sequence,\n            units: HashMap::new(),\n            blocks: HashMap::new(),\n            faults,\n            panorama,\n            endorsements: HashMap::new(),\n            incomplete_endorsements: HashMap::new(),\n            pings,\n            clock: Clock::new(),\n        }\n    }\n\n    /// Returns the fixed parameters.\n    pub fn params(&self) -> &Params {\n        &self.params\n    }\n\n    /// Returns the number of validators.\n    pub fn validator_count(&self) -> usize {\n        self.weights.len()\n    }\n\n    /// Returns the `idx`th validator's voting weight.\n    pub fn weight(&self, idx: ValidatorIndex) -> Weight {\n        self.weights[idx]\n    }\n\n    /// Returns the map of validator weights.\n    pub fn weights(&self) -> &ValidatorMap<Weight> {\n        &self.weights\n    }\n\n    /// Returns the total weight of all validators marked faulty in this panorama.\n    pub fn faulty_weight_in(&self, panorama: &Panorama<C>) -> Weight {\n        panorama\n            .iter()\n            .zip(&self.weights)\n            .filter(|(obs, _)| **obs == Observation::Faulty)\n            .map(|(_, w)| *w)\n            .sum()\n    }\n\n    /// Returns the total weight of all known-faulty validators.\n    pub fn faulty_weight(&self) -> Weight {\n        self.faulty_weight_in(self.panorama())\n    }\n\n    /// Returns the sum of all validators' voting weights.\n    pub fn total_weight(&self) -> Weight {\n        self.leader_sequence.total_weight()\n    }\n\n    /// Returns evidence against validator nr. `idx`, if present.\n    pub fn maybe_evidence(&self, idx: ValidatorIndex) -> Option<&Evidence<C>> {\n        self.maybe_fault(idx).and_then(Fault::evidence)\n    }\n\n    /// Returns endorsements for `unit`, if any.\n    pub fn maybe_endorsements(&self, unit: &C::Hash) -> Option<Endorsements<C>> {\n        self.endorsements.get(unit).map(|signatures| Endorsements {\n            unit: *unit,\n            endorsers: signatures.iter_some().map(|(i, sig)| (i, *sig)).collect(),\n        })\n    }\n\n    /// Returns whether evidence against validator nr. `idx` is known.\n    pub fn has_evidence(&self, idx: ValidatorIndex) -> bool {\n        self.maybe_evidence(idx).is_some()\n    }\n\n    /// Returns whether we have all endorsements for `unit`.\n    pub fn has_all_endorsements<I: IntoIterator<Item = ValidatorIndex>>(\n        &self,\n        unit: &C::Hash,\n        v_ids: I,\n    ) -> bool {\n        if self.endorsements.contains_key(unit) {\n            true // We have enough endorsements for this unit.\n        } else if let Some(sigs) = self.incomplete_endorsements.get(unit) {\n            v_ids.into_iter().all(|v_id| sigs.contains_key(&v_id))\n        } else {\n            v_ids.into_iter().next().is_none()\n        }\n    }\n\n    /// Returns whether we have seen enough endorsements for the unit.\n    /// Unit is endorsed when it has endorsements from more than 50% of the validators (by weight).\n    pub fn is_endorsed(&self, hash: &C::Hash) -> bool {\n        self.endorsements.contains_key(hash)\n    }\n\n    /// Returns hash of unit that needs to be endorsed.\n    pub fn needs_endorsements(&self, unit: &SignedWireUnit<C>) -> Option<C::Hash> {\n        unit.wire_unit()\n            .endorsed\n            .iter()\n            .find(|hash| !self.endorsements.contains_key(hash))\n            .cloned()\n    }\n\n    /// Returns the timestamp of the last ping or unit received from the validator, or the start\n    /// timestamp if we haven't received anything yet.\n    pub fn last_seen(&self, idx: ValidatorIndex) -> Timestamp {\n        self.pings[idx]\n    }\n\n    /// Marks the given validator as faulty, unless it is already banned or we have direct evidence.\n    pub fn mark_faulty(&mut self, idx: ValidatorIndex) {\n        self.panorama[idx] = Observation::Faulty;\n        self.faults.entry(idx).or_insert(Fault::Indirect);\n    }\n\n    /// Returns the fault type of validator nr. `idx`, if it is known to be faulty.\n    pub fn maybe_fault(&self, idx: ValidatorIndex) -> Option<&Fault<C>> {\n        self.faults.get(&idx)\n    }\n\n    /// Returns whether validator nr. `idx` is known to be faulty.\n    pub fn is_faulty(&self, idx: ValidatorIndex) -> bool {\n        self.faults.contains_key(&idx)\n    }\n\n    /// Returns an iterator over all faulty validators.\n    pub fn faulty_validators(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.faults.keys().cloned()\n    }\n\n    /// Returns an iterator over latest unit hashes from honest validators.\n    pub fn iter_correct_hashes(&self) -> impl Iterator<Item = &C::Hash> {\n        self.panorama.iter_correct_hashes()\n    }\n\n    /// Returns the unit with the given hash, if present.\n    pub fn maybe_unit(&self, hash: &C::Hash) -> Option<&Unit<C>> {\n        self.units.get(hash)\n    }\n\n    /// Returns whether the unit with the given hash is known.\n    pub fn has_unit(&self, hash: &C::Hash) -> bool {\n        self.units.contains_key(hash)\n    }\n\n    /// Returns the unit with the given hash. Panics if not found.\n    pub fn unit(&self, hash: &C::Hash) -> &Unit<C> {\n        self.maybe_unit(hash).expect(\"unit hash must exist\")\n    }\n\n    /// Returns the block contained in the unit with the given hash, if present.\n    pub fn maybe_block(&self, hash: &C::Hash) -> Option<&Block<C>> {\n        self.blocks.get(hash)\n    }\n\n    /// Returns the block contained in the unit with the given hash. Panics if not found.\n    pub fn block(&self, hash: &C::Hash) -> &Block<C> {\n        self.maybe_block(hash).expect(\"block hash must exist\")\n    }\n\n    /// Returns the complete protocol state's latest panorama.\n    pub fn panorama(&self) -> &Panorama<C> {\n        &self.panorama\n    }\n\n    /// Returns the leader in the specified time slot.\n    ///\n    /// First the assignment is computed ignoring the `can_propose` flags. Only if the selected\n    /// leader's entry is `false`, the computation is repeated, this time with the flagged\n    /// validators excluded. This ensures that once the validator set has been decided, correct\n    /// validators' slots never get reassigned to someone else, even if after the fact someone is\n    /// excluded as a leader.\n    pub fn leader(&self, timestamp: Timestamp) -> ValidatorIndex {\n        self.leader_sequence.leader(timestamp.millis())\n    }\n\n    /// Adds the unit to the protocol state.\n    ///\n    /// The unit must be valid (see `validate_unit`), and its dependencies satisfied.\n    pub(crate) fn add_valid_unit(&mut self, swunit: SignedWireUnit<C>) {\n        let wunit = swunit.wire_unit();\n        let hash = swunit.hash();\n        if self.has_unit(&hash) {\n            warn!(%hash, \"called add_valid_unit twice\");\n            return;\n        }\n        let instance_id = wunit.instance_id;\n        let fork_choice = self.fork_choice(&wunit.panorama).cloned();\n        let (unit, maybe_value) = Unit::new(swunit, fork_choice.as_ref(), self);\n        if let Some(value) = maybe_value {\n            let block = Block::new(fork_choice, value, self);\n            self.blocks.insert(hash, block);\n        }\n        self.add_ping(unit.creator, unit.timestamp);\n        self.units.insert(hash, unit);\n\n        // Update the panorama.\n        let unit = self.unit(&hash);\n        let creator = unit.creator;\n        let new_obs = match (&self.panorama[creator], &unit.panorama[creator]) {\n            (Observation::Faulty, _) => Observation::Faulty,\n            (obs0, obs1) if obs0 == obs1 => Observation::Correct(hash),\n            (Observation::None, _) => panic!(\"missing creator's previous unit\"),\n            (Observation::Correct(hash0), _) => {\n                // We have all dependencies of unit, but it does not cite hash0 as its predecessor,\n                // which is our latest known unit by the creator. It must therefore cite an older\n                // unit and so its sequence number must be at most the same as hash0. Hence it is\n                // an equivocation, and to prove that, we only need to provide the other unit with\n                // the same sequence number.\n                let prev0 = self.find_in_swimlane(hash0, unit.seq_number).unwrap();\n                let wunit0 = self.wire_unit(prev0, instance_id).unwrap();\n                let wunit1 = self.wire_unit(&hash, instance_id).unwrap();\n                self.add_evidence(Evidence::Equivocation(wunit0, wunit1));\n                Observation::Faulty\n            }\n        };\n        self.panorama[creator] = new_obs;\n    }\n\n    /// Adds direct evidence proving a validator to be faulty, unless that validators is already\n    /// banned or we already have other direct evidence. This must only be called with valid\n    /// evidence (see `Evidence::validate`). Returns `false` if the evidence was not added because\n    /// the perpetrator is banned or we already have evidence against them.\n    pub(crate) fn add_evidence(&mut self, evidence: Evidence<C>) -> bool {\n        if TODO_ENDORSEMENT_EVIDENCE_DISABLED && matches!(evidence, Evidence::Endorsements { .. }) {\n            return false;\n        }\n        let idx = evidence.perpetrator();\n        match self.faults.get(&idx) {\n            Some(&Fault::Banned | &Fault::Direct(_)) => return false,\n            None | Some(&Fault::Indirect) => (),\n        }\n        // TODO: Should use Display, not Debug!\n        trace!(?evidence, \"marking validator #{} as faulty\", idx.0);\n        self.faults.insert(idx, Fault::Direct(evidence));\n        self.panorama[idx] = Observation::Faulty;\n        true\n    }\n\n    /// Adds a set of endorsements to the state.\n    /// If, after adding, we have collected enough endorsements to consider unit _endorsed_,\n    /// it will be *upgraded* to fully endorsed.\n    ///\n    /// Endorsements must be validated before calling this: The endorsers must exist, the\n    /// signatures must be valid and the endorsed unit must be present in `self.units`.\n    pub(crate) fn add_endorsements(&mut self, endorsements: Endorsements<C>) {\n        let uhash = *endorsements.unit();\n        if self.endorsements.contains_key(&uhash) {\n            return; // We already have a sufficient number of endorsements.\n        }\n        info!(\"Received endorsements of {:?}\", uhash);\n        self.incomplete_endorsements\n            .entry(uhash)\n            .or_default()\n            .extend(endorsements.endorsers);\n        let endorsed: Weight = self.incomplete_endorsements[&uhash]\n            .keys()\n            .map(|vidx| self.weight(*vidx))\n            .sum();\n        // Stake required to consider unit to be endorsed.\n        // TODO - remove `allow` once false positive ceases.\n        #[allow(clippy::arithmetic_side_effects)] // False positive on `/ 2`.\n        let threshold = self.total_weight() / 2;\n        if endorsed > threshold {\n            info!(%uhash, \"Unit endorsed by at least 1/2 of validators.\");\n            // Unwrap is safe: We created the map entry above.\n            let mut fully_endorsed = self.incomplete_endorsements.remove(&uhash).unwrap();\n            let endorsed_map = self\n                .weights()\n                .keys()\n                .map(|vidx| fully_endorsed.remove(&vidx))\n                .collect();\n            self.endorsements.insert(uhash, endorsed_map);\n        }\n    }\n\n    /// Returns whether this state already includes an endorsement of `uhash` by `vidx`.\n    pub fn has_endorsement(&self, uhash: &C::Hash, vidx: ValidatorIndex) -> bool {\n        self.endorsements\n            .get(uhash)\n            .map(|vmap| vmap[vidx].is_some())\n            .unwrap_or(false)\n            || self\n                .incomplete_endorsements\n                .get(uhash)\n                .map(|ends| ends.contains_key(&vidx))\n                .unwrap_or(false)\n    }\n\n    /// Updates `self.pings` with the given timestamp.\n    pub(crate) fn add_ping(&mut self, creator: ValidatorIndex, timestamp: Timestamp) {\n        self.pings[creator] = self.pings[creator].max(timestamp);\n    }\n\n    /// Returns `true` if the latest timestamp we have is older than the given timestamp.\n    pub fn has_ping(&self, creator: ValidatorIndex, timestamp: Timestamp) -> bool {\n        self.pings\n            .get(creator)\n            .is_some_and(|ping_time| *ping_time >= timestamp)\n    }\n\n    /// Returns whether the validator's latest unit or ping is at most `PING_TIMEOUT` maximum round\n    /// lengths old.\n    pub(crate) fn is_online(&self, vidx: ValidatorIndex, now: Timestamp) -> bool {\n        self.pings.has(vidx)\n            && self.pings[vidx]\n                .saturating_add(self.params.max_round_length().saturating_mul(PING_TIMEOUT))\n                >= now\n    }\n\n    /// Creates new `Evidence` if the new endorsements contain any that conflict with existing\n    /// ones.\n    ///\n    /// Endorsements must be validated before calling this: The endorsers must exist, the\n    /// signatures must be valid and the endorsed unit must be present in `self.units`.\n    pub fn find_conflicting_endorsements(\n        &self,\n        endorsements: &Endorsements<C>,\n        instance_id: &C::InstanceId,\n    ) -> Vec<Evidence<C>> {\n        if TODO_ENDORSEMENT_EVIDENCE_DISABLED {\n            return Vec::new();\n        }\n        let uhash = endorsements.unit();\n        let unit = self.unit(uhash);\n        if !self.has_evidence(unit.creator) {\n            return vec![]; // There are no equivocations, so endorsements cannot conflict.\n        }\n\n        // We are only interested in endorsements that we didn't know before and whose validators\n        // we don't already have evidence against.\n        let is_new_endorsement = |&&(vidx, _): &&(ValidatorIndex, _)| {\n            if self.has_evidence(vidx) {\n                false\n            } else if let Some(known_endorsements) = self.endorsements.get(uhash) {\n                known_endorsements[vidx].is_none()\n            } else if let Some(known_endorsements) = self.incomplete_endorsements.get(uhash) {\n                !known_endorsements.contains_key(&vidx)\n            } else {\n                true\n            }\n        };\n        let new_endorsements = endorsements.endorsers.iter().filter(is_new_endorsement);\n\n        // For each new endorser, find a pair of conflicting endorsements, if it exists.\n        // Order the data so that the first unit has a lower or equal sequence number.\n        let conflicting_endorsements = new_endorsements.filter_map(|&(vidx, ref sig)| {\n            // Iterate over all existing endorsements by vidx.\n            let known_endorsements = self.endorsements.iter();\n            let known_incomplete_endorsements = self.incomplete_endorsements.iter();\n            let known_vidx_endorsements = known_endorsements\n                .filter_map(|(uhash2, sigs2)| sigs2[vidx].as_ref().map(|sig2| (uhash2, sig2)));\n            let known_vidx_incomplete_endorsements = known_incomplete_endorsements\n                .filter_map(|(uhash2, sigs2)| sigs2.get(&vidx).map(|sig2| (uhash2, sig2)));\n            // Find a conflicting one, i.e. one that endorses a unit with the same creator as uhash\n            // but incompatible with uhash. Put the data into a tuple, with the earlier unit first.\n            known_vidx_endorsements\n                .chain(known_vidx_incomplete_endorsements)\n                .find(|(uhash2, _)| {\n                    let unit2 = self.unit(uhash2);\n                    let ee_limit = self.params().endorsement_evidence_limit();\n                    self.unit(uhash2).creator == unit.creator\n                        && !self.is_compatible(uhash, uhash2)\n                        && unit.seq_number.saturating_add(ee_limit) >= unit2.seq_number\n                        && unit2.seq_number.saturating_add(ee_limit) >= unit.seq_number\n                })\n                .map(|(uhash2, sig2)| {\n                    if unit.seq_number <= self.unit(uhash2).seq_number {\n                        (vidx, uhash, sig, uhash2, sig2)\n                    } else {\n                        (vidx, uhash2, sig2, uhash, sig)\n                    }\n                })\n        });\n\n        // Create an Evidence instance for each conflict we found.\n        // The unwraps are safe, because we know that there are units with these hashes.\n        conflicting_endorsements\n            .map(|(vidx, uhash1, sig1, uhash2, sig2)| {\n                let unit1 = self.unit(uhash1);\n                let swimlane2 = self\n                    .swimlane(uhash2)\n                    .skip(1)\n                    .take_while(|(_, pred2)| pred2.seq_number >= unit1.seq_number)\n                    .map(|(pred2_hash, _)| self.wire_unit(pred2_hash, *instance_id).unwrap())\n                    .collect();\n                Evidence::Endorsements {\n                    endorsement1: SignedEndorsement::new(Endorsement::new(*uhash1, vidx), *sig1),\n                    unit1: self.wire_unit(uhash1, *instance_id).unwrap(),\n                    endorsement2: SignedEndorsement::new(Endorsement::new(*uhash2, vidx), *sig2),\n                    unit2: self.wire_unit(uhash2, *instance_id).unwrap(),\n                    swimlane2,\n                }\n            })\n            .collect()\n    }\n\n    /// Returns the `SignedWireUnit` with the given hash, if it is present in the state.\n    pub fn wire_unit(\n        &self,\n        hash: &C::Hash,\n        instance_id: C::InstanceId,\n    ) -> Option<SignedWireUnit<C>> {\n        let unit = self.maybe_unit(hash)?.clone();\n        let maybe_block = self.maybe_block(hash);\n        let value = maybe_block.map(|block| block.value.clone());\n        let endorsed = unit.claims_endorsed().cloned().collect();\n        #[allow(clippy::arithmetic_side_effects)] // min_round_length is guaranteed to be > 0.\n        let round_exp =\n            (unit.round_len() / self.params().min_round_length()).trailing_zeros() as u8;\n        let wunit = WireUnit {\n            panorama: unit.panorama.clone(),\n            creator: unit.creator,\n            instance_id,\n            value,\n            seq_number: unit.seq_number,\n            timestamp: unit.timestamp,\n            round_exp,\n            endorsed,\n        };\n        Some(SignedWireUnit {\n            hashed_wire_unit: HashedWireUnit::new_with_hash(wunit, *hash),\n            signature: unit.signature,\n        })\n    }\n\n    /// Returns the fork choice from `pan`'s view, or `None` if there are no blocks yet.\n    ///\n    /// The correct validators' latest units count as votes for the block they point to, as well as\n    /// all of its ancestors. At each level the block with the highest score is selected from the\n    /// children of the previously selected block (or from all blocks at height 0), until a block\n    /// is reached that has no children with any votes.\n    pub fn fork_choice<'a>(&'a self, pan: &Panorama<C>) -> Option<&'a C::Hash> {\n        let start = self.clock.start();\n        // Collect all correct votes in a `Tallies` map, sorted by height.\n        let to_entry = |(obs, w): (&Observation<C>, &Weight)| {\n            let bhash = &self.unit(obs.correct()?).block;\n            Some((self.block(bhash).height, bhash, *w))\n        };\n        let mut tallies: Tallies<C> = pan.iter().zip(&self.weights).filter_map(to_entry).collect();\n        loop {\n            // Find the highest block that we know is an ancestor of the fork choice.\n            let (height, bhash) = tallies.find_decided(self)?;\n            // Drop all votes that are not descendants of `bhash`.\n            tallies = tallies.filter_descendants(height, bhash, self);\n            // If there are no blocks left, `bhash` itself is the fork choice. Otherwise repeat.\n            if tallies.is_empty() {\n                let end = self.clock.end();\n                let delta = self.clock.delta(start, end).as_nanos();\n                trace!(%delta,\"Time taken for fork-choice to run\");\n                return Some(bhash);\n            }\n        }\n    }\n\n    /// Returns the ancestor of the block with the given `hash`, on the specified `height`, or\n    /// `None` if the block's height is lower than that.\n    /// NOTE: Panics if used on non-proposal hashes.\n    pub fn find_ancestor_proposal<'a>(\n        &'a self,\n        hash: &'a C::Hash,\n        height: u64,\n    ) -> Option<&'a C::Hash> {\n        let block = self.block(hash);\n        if block.height < height {\n            return None;\n        }\n        if block.height == height {\n            return Some(hash);\n        }\n        #[allow(clippy::arithmetic_side_effects)] // block.height > height, otherwise we returned.\n        let diff = block.height - height;\n        // We want to make the greatest step 2^i such that 2^i <= diff.\n        let max_i = log2(diff) as usize;\n        // A block at height > 0 always has at least its parent entry in skip_idx.\n        #[allow(clippy::arithmetic_side_effects)]\n        let i = max_i.min(block.skip_idx.len() - 1);\n        self.find_ancestor_proposal(&block.skip_idx[i], height)\n    }\n\n    /// Returns an error if `swunit` is invalid. This can be called even if the dependencies are\n    /// not present yet.\n    pub(crate) fn pre_validate_unit(&self, swunit: &SignedWireUnit<C>) -> Result<(), UnitError> {\n        let wunit = swunit.wire_unit();\n        let creator = wunit.creator;\n        if creator.0 as usize >= self.validator_count() {\n            error!(\"Nonexistent validator should be rejected in Highway::pre_validate_unit.\");\n            return Err(UnitError::Creator); // Should be unreachable.\n        }\n        if Some(&Fault::Banned) == self.faults.get(&creator) {\n            return Err(UnitError::Banned);\n        }\n        let rl_millis = self.params.min_round_length().millis();\n        #[allow(clippy::arithmetic_side_effects)] // We check for overflow before the left shift.\n        if wunit.round_exp as u32 > rl_millis.leading_zeros()\n            || rl_millis << wunit.round_exp > self.params.max_round_length().millis()\n        {\n            return Err(UnitError::RoundLengthGreaterThanMaximum);\n        }\n        if wunit.value.is_none() && !wunit.panorama.has_correct() {\n            return Err(UnitError::MissingBlock);\n        }\n        if wunit.panorama.len() != self.validator_count() {\n            return Err(UnitError::PanoramaLength(wunit.panorama.len()));\n        }\n        if wunit.panorama[creator].is_faulty() {\n            return Err(UnitError::FaultyCreator);\n        }\n        Ok(())\n    }\n\n    /// Returns an error if `swunit` is invalid. Must only be called once `pre_validate_unit`\n    /// returned `Ok` and all dependencies have been added to the state.\n    pub(crate) fn validate_unit(&self, swunit: &SignedWireUnit<C>) -> Result<(), UnitError> {\n        let wunit = swunit.wire_unit();\n        let creator = wunit.creator;\n        let panorama = &wunit.panorama;\n        let timestamp = wunit.timestamp;\n        panorama.validate(self)?;\n        if panorama.iter_correct(self).any(|v| v.timestamp > timestamp)\n            || wunit.timestamp < self.params.start_timestamp()\n        {\n            return Err(UnitError::Timestamps);\n        }\n        if wunit.seq_number != panorama.next_seq_num(self, creator) {\n            return Err(UnitError::SequenceNumber);\n        }\n        #[allow(clippy::arithmetic_side_effects)] // We checked for overflow in pre_validate_unit.\n        let round_len =\n            TimeDiff::from_millis(self.params.min_round_length().millis() << wunit.round_exp);\n        let r_id = round_id(timestamp, round_len);\n        let maybe_prev_unit = wunit.previous().map(|vh| self.unit(vh));\n        if let Some(prev_unit) = maybe_prev_unit {\n            if prev_unit.round_len() != round_len {\n                // The round length must not change within a round: Even with respect to the\n                // greater of the two lengths, a round boundary must be between the units.\n                let max_rl = prev_unit.round_len().max(round_len);\n                #[allow(clippy::arithmetic_side_effects)] // max_rl is always greater than 0.\n                if prev_unit.timestamp.millis() / max_rl.millis()\n                    == timestamp.millis() / max_rl.millis()\n                {\n                    return Err(UnitError::RoundLengthChangedWithinRound);\n                }\n            }\n            // There can be at most two units per round: proposal/confirmation and witness.\n            if let Some(prev2_unit) = prev_unit.previous().map(|h2| self.unit(h2)) {\n                if prev2_unit.round_id() == r_id {\n                    return Err(UnitError::ThreeUnitsInRound);\n                }\n            }\n        }\n        if ENABLE_ENDORSEMENTS {\n            // All endorsed units from the panorama of this wunit.\n            let endorsements_in_panorama = panorama\n                .iter_correct_hashes()\n                .flat_map(|hash| self.unit(hash).claims_endorsed())\n                .collect::<HashSet<_>>();\n            if endorsements_in_panorama\n                .iter()\n                .any(|&e| !wunit.endorsed.iter().any(|h| h == e))\n            {\n                return Err(UnitError::EndorsementsNotMonotonic);\n            }\n            for hash in &wunit.endorsed {\n                if !wunit.panorama.sees(self, hash) {\n                    return Err(UnitError::EndorsedButUnseen {\n                        hash: format!(\"{:?}\", hash),\n                        wire_unit: format!(\"{:?}\", wunit),\n                    });\n                }\n            }\n        }\n        if wunit.value.is_some() {\n            // If this unit is a block, it must be the first unit in this round, its timestamp must\n            // match the round ID, and the creator must be the round leader.\n            if maybe_prev_unit.is_some_and(|pv| pv.round_id() == r_id)\n                || timestamp != r_id\n                || self.leader(r_id) != creator\n            {\n                return Err(UnitError::NonLeaderBlock(self.leader(r_id)));\n            }\n            // It's not allowed to create a child block of a terminal block.\n            let is_terminal = |hash: &C::Hash| self.is_terminal_block(hash);\n            if self.fork_choice(panorama).is_some_and(is_terminal) {\n                return Err(UnitError::ValueAfterTerminalBlock);\n            }\n        }\n        match self.validate_lnc(creator, panorama, &wunit.endorsed) {\n            None => Ok(()),\n            Some(vidx) => Err(UnitError::LncNaiveCitation(vidx)),\n        }\n    }\n\n    /// Returns `true` if the `bhash` is a block that can have no children.\n    pub(crate) fn is_terminal_block(&self, bhash: &C::Hash) -> bool {\n        self.blocks.get(bhash).is_some_and(|block| {\n            block.height.saturating_add(1) >= self.params.end_height()\n                && self.unit(bhash).timestamp >= self.params.end_timestamp()\n        })\n    }\n\n    /// Returns `true` if this is a proposal and the creator is not faulty.\n    pub(super) fn is_correct_proposal(&self, unit: &Unit<C>) -> bool {\n        !self.is_faulty(unit.creator)\n            && self.leader(unit.timestamp) == unit.creator\n            && unit.timestamp == round_id(unit.timestamp, unit.round_len)\n    }\n\n    /// Returns the hash of the message with the given sequence number from the creator of `hash`,\n    /// or `None` if the sequence number is higher than that of the unit with `hash`.\n    pub fn find_in_swimlane<'a>(\n        &'a self,\n        hash: &'a C::Hash,\n        seq_number: u64,\n    ) -> Option<&'a C::Hash> {\n        let unit = self.unit(hash);\n        match unit.seq_number.checked_sub(seq_number) {\n            None => None,          // There is no unit with seq_number in our swimlane.\n            Some(0) => Some(hash), // The sequence number is the one we're looking for.\n            Some(diff) => {\n                // We want to make the greatest step 2^i such that 2^i <= diff.\n                let max_i = log2(diff) as usize; // Log is safe because diff is not zero.\n\n                // Diff is not zero, so the unit has a predecessor and skip_idx is not empty.\n                #[allow(clippy::arithmetic_side_effects)]\n                let i = max_i.min(unit.skip_idx.len() - 1);\n                self.find_in_swimlane(&unit.skip_idx[i], seq_number)\n            }\n        }\n    }\n\n    /// Returns an iterator over units (with hashes) by the same creator, in reverse chronological\n    /// order, starting with the specified unit. Panics if no unit with `uhash` exists.\n    pub fn swimlane<'a>(\n        &'a self,\n        uhash: &'a C::Hash,\n    ) -> impl Iterator<Item = (&'a C::Hash, &'a Unit<C>)> {\n        let mut next = Some(uhash);\n        iter::from_fn(move || {\n            let current = next?;\n            let unit = self.unit(current);\n            next = unit.previous();\n            Some((current, unit))\n        })\n    }\n\n    /// Returns an iterator over all hashes of ancestors of the block `bhash`, excluding `bhash`\n    /// itself. Panics if `bhash` is not the hash of a known block.\n    pub fn ancestor_hashes<'a>(&'a self, bhash: &'a C::Hash) -> impl Iterator<Item = &'a C::Hash> {\n        let mut next = self.block(bhash).parent();\n        iter::from_fn(move || {\n            let current = next?;\n            next = self.block(current).parent();\n            Some(current)\n        })\n    }\n\n    /// Returns the number of units received.\n    #[cfg(test)]\n    pub(crate) fn unit_count(&self) -> usize {\n        self.units.len()\n    }\n\n    /// Returns the set of units (by hash) that are endorsed and seen from the panorama.\n    pub fn seen_endorsed(&self, pan: &Panorama<C>) -> BTreeSet<C::Hash> {\n        if !ENABLE_ENDORSEMENTS {\n            return Default::default();\n        };\n        // First we collect all units that were already seen as endorsed by earlier units.\n        let mut result: BTreeSet<C::Hash> = pan\n            .iter_correct_hashes()\n            .flat_map(|hash| self.unit(hash).endorsed.iter().cloned())\n            .collect();\n        // Now add all remaining endorsed units. Since the pan.sees check is expensive, do it only\n        // for the ones that are actually new.\n        for hash in self.endorsements.keys() {\n            if !result.contains(hash) && pan.sees(self, hash) {\n                result.insert(*hash);\n            }\n        }\n        result\n    }\n\n    /// Drops all state other than evidence.\n    pub(crate) fn retain_evidence_only(&mut self) {\n        self.units.clear();\n        self.blocks.clear();\n        for obs in self.panorama.iter_mut() {\n            if obs.is_correct() {\n                *obs = Observation::None;\n            }\n        }\n        self.endorsements.clear();\n        self.incomplete_endorsements.clear();\n    }\n\n    /// Validates whether a unit with the given panorama and `endorsed` set satisfies the\n    /// Limited Naivety Criterion (LNC).\n    /// Returns index of the first equivocator that was cited naively in violation of the LNC, or\n    /// `None` if the LNC is satisfied.\n    fn validate_lnc(\n        &self,\n        creator: ValidatorIndex,\n        panorama: &Panorama<C>,\n        endorsed: &BTreeSet<C::Hash>,\n    ) -> Option<ValidatorIndex> {\n        if !ENABLE_ENDORSEMENTS {\n            return None;\n        }\n        let violates_lnc =\n            |eq_idx: &ValidatorIndex| !self.satisfies_lnc_for(creator, panorama, endorsed, *eq_idx);\n        panorama.iter_faulty().find(violates_lnc)\n    }\n\n    /// Returns `true` if there is at most one fork by the validator `eq_idx` that is cited naively\n    /// by a unit with the given panorama and `endorsed` set, or earlier units by the same creator.\n    fn satisfies_lnc_for(\n        &self,\n        creator: ValidatorIndex,\n        panorama: &Panorama<C>,\n        endorsed: &BTreeSet<C::Hash>,\n        eq_idx: ValidatorIndex,\n    ) -> bool {\n        // Find all forks by eq_idx that are cited naively by the panorama itself.\n        // * If it's more than one, return false: the LNC is violated.\n        // * If it's none, return true: If the LNC were violated, it would be because of two naive\n        //   citations by creator's earlier units. So the latest of those earlier units would\n        //   already be violating the LNC itself, and thus would not have been added to the state.\n        // * Otherwise store the unique naively cited fork in naive_fork.\n        let mut maybe_naive_fork = None;\n        {\n            // Returns true if any endorsed unit cites the given unit.\n            let seen_by_endorsed = |hash| endorsed.iter().any(|e_hash| self.sees(e_hash, hash));\n\n            // Iterate over all units cited by the panorama.\n            let mut to_visit: Vec<_> = panorama.iter_correct_hashes().collect();\n            // This set is a filter so that units don't get added to to_visit twice.\n            let mut added_to_to_visit: HashSet<_> = to_visit.iter().cloned().collect();\n            while let Some(hash) = to_visit.pop() {\n                if seen_by_endorsed(hash) {\n                    continue; // This unit and everything below is not cited naively.\n                }\n                let unit = self.unit(hash);\n                match &unit.panorama[eq_idx] {\n                    Observation::Correct(eq_hash) => {\n                        // The unit (and everything it cites) can only see a single fork.\n                        // No need to traverse further downward.\n                        if !seen_by_endorsed(eq_hash) {\n                            // The fork is cited naively!\n                            match maybe_naive_fork {\n                                // It's the first naively cited fork we found.\n                                None => maybe_naive_fork = Some(eq_hash),\n                                Some(other_hash) => {\n                                    // If eq_hash is later than other_hash, it is the tip of the\n                                    // same fork. If it is earlier, then other_hash is the tip.\n                                    if self.sees_correct(eq_hash, other_hash) {\n                                        maybe_naive_fork = Some(eq_hash);\n                                    } else if !self.sees_correct(other_hash, eq_hash) {\n                                        return false; // We found two incompatible forks!\n                                    }\n                                }\n                            }\n                        }\n                    }\n                    // No forks are cited by this unit. No need to traverse further.\n                    Observation::None => (),\n                    // The unit still sees the equivocator as faulty: We need to traverse further\n                    // down the graph to find all cited forks.\n                    Observation::Faulty => to_visit.extend(\n                        unit.panorama\n                            .iter_correct_hashes()\n                            .filter(|hash| added_to_to_visit.insert(hash)),\n                    ),\n                }\n            }\n        }\n        let naive_fork = match maybe_naive_fork {\n            None => return true, // No forks are cited naively.\n            Some(naive_fork) => naive_fork,\n        };\n\n        // Iterate over all earlier units by creator, and find all forks by eq_idx they\n        // naively cite. If any of those forks are incompatible with naive_fork, the LNC is\n        // violated.\n        let mut maybe_pred_hash = panorama[creator].correct();\n        while let Some(pred_hash) = maybe_pred_hash {\n            let pred_unit = self.unit(pred_hash);\n            // Returns true if any endorsed (according to pred_unit) unit cites the given unit.\n            let seen_by_endorsed = |hash| {\n                pred_unit\n                    .endorsed\n                    .iter()\n                    .any(|e_hash| self.sees(e_hash, hash))\n            };\n            // Iterate over all units seen by pred_unit.\n            let mut to_visit = vec![pred_hash];\n            // This set is a filter so that units don't get added to to_visit twice.\n            let mut added_to_to_visit: HashSet<_> = to_visit.iter().cloned().collect();\n            while let Some(hash) = to_visit.pop() {\n                if seen_by_endorsed(hash) {\n                    continue; // This unit and everything below is not cited naively.\n                }\n                let unit = self.unit(hash);\n                match &unit.panorama[eq_idx] {\n                    Observation::Correct(eq_hash) => {\n                        if !seen_by_endorsed(eq_hash) && !self.is_compatible(eq_hash, naive_fork) {\n                            return false;\n                        }\n                    }\n                    // No forks are cited by this unit. No need to traverse further.\n                    Observation::None => (),\n                    // The unit still sees the equivocator as faulty: We need to traverse further\n                    // down the graph to find all cited forks.\n                    Observation::Faulty => to_visit.extend(\n                        unit.panorama\n                            .iter_correct_hashes()\n                            .filter(|hash| added_to_to_visit.insert(hash)),\n                    ),\n                }\n            }\n            if !pred_unit.panorama[eq_idx].is_faulty() {\n                // This unit and everything below sees only a single fork of the equivocator. If we\n                // haven't found conflicting naively cited forks yet, there are none.\n                return true;\n            }\n            maybe_pred_hash = pred_unit.previous();\n        }\n        true // No earlier messages, so no conflicting naively cited forks.\n    }\n\n    /// Returns whether the unit with `hash0` sees the one with `hash1` (i.e. `hash0 ≥ hash1`),\n    /// and sees `hash1`'s creator as correct.\n    pub fn sees_correct(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool {\n        hash0 == hash1 || self.unit(hash0).panorama.sees_correct(self, hash1)\n    }\n\n    /// Returns whether the unit with `hash0` sees the one with `hash1` (i.e. `hash0 ≥ hash1`).\n    pub fn sees(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool {\n        hash0 == hash1 || self.unit(hash0).panorama.sees(self, hash1)\n    }\n\n    // Returns whether the units with `hash0` and `hash1` see each other or are equal.\n    fn is_compatible(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool {\n        hash0 == hash1\n            || self.unit(hash0).panorama.sees(self, hash1)\n            || self.unit(hash1).panorama.sees(self, hash0)\n    }\n\n    /// Returns the panorama of the confirmation for the leader unit `uhash`.\n    pub fn confirmation_panorama(&self, creator: ValidatorIndex, uhash: &C::Hash) -> Panorama<C> {\n        self.valid_panorama(creator, self.inclusive_panorama(uhash))\n    }\n\n    /// Creates a panorama that is valid for use in `creator`'s next unit, and as close as possible\n    /// to the given one. It is only modified if necessary for validity:\n    /// * Cite `creator`'s previous unit, i.e. don't equivocate.\n    /// * Satisfy the LNC, i.e. don't add new naively cited forks.\n    pub fn valid_panorama(&self, creator: ValidatorIndex, mut pan: Panorama<C>) -> Panorama<C> {\n        // Make sure the panorama sees the creator's own previous unit.\n        let maybe_prev_uhash = self.panorama()[creator].correct();\n        if let Some(prev_uhash) = maybe_prev_uhash {\n            if pan[creator].correct() != Some(prev_uhash) {\n                pan = pan.merge(self, &self.inclusive_panorama(prev_uhash));\n            }\n        }\n        let endorsed = self.seen_endorsed(&pan);\n        if self.validate_lnc(creator, &pan, &endorsed).is_none() {\n            return pan;\n        }\n        // `pan` violates the LNC.\n        // Start from the creator's previous unit, mark all faulty\n        // validators as faulty, and add only endorsed units from correct validators.\n        pan = maybe_prev_uhash.map_or_else(\n            || Panorama::new(self.validator_count()),\n            |prev_uhash| self.inclusive_panorama(prev_uhash),\n        );\n        for faulty_v in self.faulty_validators() {\n            pan[faulty_v] = Observation::Faulty;\n        }\n        for endorsed_hash in &endorsed {\n            if !pan.sees_correct(self, endorsed_hash) {\n                pan = pan.merge(self, &self.inclusive_panorama(endorsed_hash));\n            }\n        }\n        pan\n    }\n\n    /// Returns panorama of a unit where latest entry of the creator is that unit's hash.\n    pub fn inclusive_panorama(&self, uhash: &C::Hash) -> Panorama<C> {\n        let unit = self.unit(uhash);\n        let mut pan = unit.panorama.clone();\n        pan[unit.creator] = Observation::Correct(*uhash);\n        pan\n    }\n}\n\n/// Returns the time at which the round with the given timestamp and round length began.\n///\n/// The boundaries of rounds with length `l` are multiples of that length, in\n/// milliseconds since the epoch. So the beginning of the current round is the greatest multiple\n/// of `l` that is less or equal to `timestamp`.\npub(crate) fn round_id(timestamp: Timestamp, round_len: TimeDiff) -> Timestamp {\n    if round_len.millis() == 0 {\n        error!(\"called round_id with round_len 0.\");\n        return timestamp;\n    }\n    #[allow(clippy::arithmetic_side_effects)] // Checked for division by 0 above.\n    Timestamp::from((timestamp.millis() / round_len.millis()) * round_len.millis())\n}\n\n/// Returns the base-2 logarithm of `x`, rounded down, i.e. the greatest `i` such that\n/// `2.pow(i) <= x`. If `x == 0`, it returns `0`.\nfn log2(x: u64) -> u32 {\n    // Find the least power of two strictly greater than x and count its trailing zeros.\n    // Then subtract 1 to get the zeros of the greatest power of two less or equal than x.\n    x.saturating_add(1)\n        .checked_next_power_of_two()\n        .unwrap_or(0)\n        .trailing_zeros()\n        .saturating_sub(1)\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/synchronizer/tests.rs",
    "content": "#![allow(clippy::arithmetic_side_effects)]\n\nuse std::collections::BTreeSet;\n\nuse itertools::Itertools;\n\nuse crate::{\n    components::consensus::{\n        highway_core::{\n            highway::{tests::test_validators, ValidVertex},\n            highway_testing::TEST_INSTANCE_ID,\n            state::{tests::*, State},\n        },\n        BlockContext,\n    },\n    types::NodeId,\n};\n\nuse super::*;\n\n#[test]\nfn purge_vertices() {\n    let params = test_params(0);\n    let mut state = State::new(WEIGHTS, params.clone(), vec![], vec![]);\n\n    // We use round exponent 0u8, so a round is 0x10 ms. With seed 0, Carol is the first leader.\n    //\n    // time:  0x00 0x0A 0x1A 0x2A 0x3A\n    //\n    // Carol   c0 — c1 — c2\n    //            \\\n    // Bob          ————————— b0 — b1\n    let c0 = add_unit!(state, CAROL, 0x00, 0u8, 0xA; N, N, N).unwrap();\n    let c1 = add_unit!(state, CAROL, 0x0A, 0u8, None; N, N, c0).unwrap();\n    let c2 = add_unit!(state, CAROL, 0x1A, 0u8, None; N, N, c1).unwrap();\n    let b0 = add_unit!(state, BOB, 0x2A, 0u8, None; N, N, c0).unwrap();\n    let b1 = add_unit!(state, BOB, 0x3A, 0u8, None; N, b0, c0).unwrap();\n\n    // A Highway instance that's just used to create PreValidatedVertex instances below.\n    let util_highway =\n        Highway::<TestContext>::new(TEST_INSTANCE_ID, test_validators(), params.clone(), None);\n\n    // Returns the WireUnit with the specified hash.\n    let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap());\n    // Returns the PreValidatedVertex with the specified hash.\n    let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap();\n\n    let peer0 = NodeId::from([0; 64]);\n\n    // Create a synchronizer with a 0x20 ms timeout, and a Highway instance.\n    let max_requests_for_vertex = 5;\n    let mut sync = Synchronizer::<TestContext>::new(WEIGHTS.len(), TEST_INSTANCE_ID);\n    let mut highway =\n        Highway::<TestContext>::new(TEST_INSTANCE_ID, test_validators(), params, None);\n\n    // At time 0x20, we receive c2, b0 and b1 — the latter ahead of their timestamp.\n    // Since c2 is the first entry in the main queue, processing is scheduled.\n    let now = 0x20.into();\n    assert!(matches!(\n        *sync.schedule_add_vertex(peer0, pvv(c2), now),\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    sync.store_vertex_for_addition_later(unit(b1).timestamp().unwrap(), now, peer0, pvv(b1));\n    sync.store_vertex_for_addition_later(unit(b0).timestamp().unwrap(), now, peer0, pvv(b0));\n\n    // At time 0x21, we receive c1.\n    let now = 0x21.into();\n    assert!(sync.schedule_add_vertex(peer0, pvv(c1), now).is_empty());\n\n    // No new vertices can be added yet, because all are missing dependencies.\n    // The missing dependencies of c1 and c2 are requested.\n    let (maybe_pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert!(maybe_pv.is_none());\n    assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(c0));\n\n    // At 0x23, c0 gets enqueued and added.\n    // That puts c1 back into the main queue, since its dependency is satisfied.\n    let now = 0x23.into();\n    let outcomes = sync.schedule_add_vertex(peer0, pvv(c0), now);\n    assert!(\n        matches!(*outcomes, [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]),\n        \"unexpected outcomes: {:?}\",\n        outcomes\n    );\n    let (maybe_pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert_eq!(Dependency::Unit(c0), maybe_pv.unwrap().vertex().id());\n    assert!(outcomes.is_empty());\n    let vv_c0 = highway.validate_vertex(pvv(c0)).expect(\"c0 is valid\");\n    highway.add_valid_vertex(vv_c0, now);\n    let outcomes = sync.remove_satisfied_deps(&highway);\n    assert!(\n        matches!(*outcomes, [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]),\n        \"unexpected outcomes: {:?}\",\n        outcomes\n    );\n\n    // At time 0x2A, the vertex b0 moves into the main queue.\n    let now = 0x2A.into();\n    assert!(sync.add_past_due_stored_vertices(now).is_empty());\n\n    // At 0x41, all vertices received at 0x20 are expired, but c1 (received at 0x21) isn't.\n    // This will remove:\n    // * b1: still postponed due to future timestamp\n    // * b0: in the main queue\n    // * c2: waiting for dependency c1 to be added\n    let purge_vertex_timeout = 0x20;\n    #[allow(clippy::arithmetic_side_effects)]\n    sync.purge_vertices((0x41 - purge_vertex_timeout).into());\n\n    // The main queue should now contain only c1. If we remove it, the synchronizer is empty.\n    let (maybe_pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert_eq!(Dependency::Unit(c1), maybe_pv.unwrap().vertex().id());\n    assert!(outcomes.is_empty());\n    assert!(sync.is_empty());\n}\n\n#[test]\n/// Test that when a vertex depends on a dependency that has already been synchronized, and is\n/// waiting in the synchronizer queue state, but is not yet added to the protocol state – that we\n/// don't request it again.\nfn do_not_download_synchronized_dependencies() {\n    let params = test_params(0);\n    // A Highway and state instances that are used to create PreValidatedVertex instances below.\n\n    let mut state = State::new(WEIGHTS, params.clone(), vec![], vec![]);\n    let util_highway =\n        Highway::<TestContext>::new(TEST_INSTANCE_ID, test_validators(), params.clone(), None);\n\n    // We use round exponent 0u8, so a round is 0x40 ms. With seed 0, Carol is the first leader.\n    //\n    // time:  0x00 0x0A 0x1A 0x2A 0x3A\n    //\n    // Carol   c0 — c1 — c2\n    //                \\\n    // Bob             — b0\n\n    let c0 = add_unit!(state, CAROL, 0x00, 0u8, 0xA; N, N, N).unwrap();\n    let c1 = add_unit!(state, CAROL, 0x0A, 0u8, None; N, N, c0).unwrap();\n    let c2 = add_unit!(state, CAROL, 0x1A, 0u8, None; N, N, c1).unwrap();\n    let b0 = add_unit!(state, BOB, 0x2A, 0u8, None; N, N, c1).unwrap();\n\n    // Returns the WireUnit with the specified hash.\n    let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap());\n    // Returns the PreValidatedVertex with the specified hash.\n    let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap();\n\n    let peer0 = NodeId::from([0; 64]);\n    let peer1 = NodeId::from([1; 64]);\n\n    // Create a synchronizer with a 0x20 ms timeout, and a Highway instance.\n    let max_requests_for_vertex = 5;\n    let mut sync = Synchronizer::<TestContext>::new(WEIGHTS.len(), TEST_INSTANCE_ID);\n\n    let mut highway =\n        Highway::<TestContext>::new(TEST_INSTANCE_ID, test_validators(), params, None);\n    let now = 0x20.into();\n\n    assert!(matches!(\n        *sync.schedule_add_vertex(peer0, pvv(c2), now),\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    // `c2` can't be added to the protocol state yet b/c it's missing its `c1` dependency.\n    let (pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert!(pv.is_none());\n    assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(c1));\n    // Simulate `c1` being downloaded…\n    let c1_outcomes = sync.schedule_add_vertex(peer0, pvv(c1), now);\n    assert!(matches!(\n        *c1_outcomes,\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    // `b0` can't be added to the protocol state b/c it's missing its `c1` dependency,\n    // but `c1` has already been downloaded so we should not request it again. We will only request\n    // `c0` as that's what `c1` depends on.\n    let (pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert!(pv.is_none());\n    assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(c0));\n    // `c1` is now part of the synchronizer's state, we should not try requesting it if other\n    // vertices depend on it.\n    assert!(matches!(\n        *sync.schedule_add_vertex(peer1, pvv(b0), now),\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    let (pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert!(pv.is_none());\n    // `b0` depends on `c1`, that is already in the synchronizer's state, but it also depends on\n    // `c0` transitively that is not yet known. We should request it, even if we had already\n    // done that for `c1`.\n    assert_targeted_message(&unwrap_single(outcomes), &peer1, Dependency::Unit(c0));\n    // \"Download\" the last dependency.\n    let _ = sync.schedule_add_vertex(peer0, pvv(c0), now);\n    // Now, the whole chain can be added to the protocol state.\n    let mut units: BTreeSet<Dependency<TestContext>> = vec![c0, c1, b0, c2]\n        .into_iter()\n        .map(Dependency::Unit)\n        .collect();\n    while let (Some(pv), outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex)\n    {\n        // Verify that we don't request any dependency now.\n        assert!(\n            !outcomes\n                .iter()\n                .any(|outcome| matches!(outcome, ProtocolOutcome::CreatedTargetedMessage(_, _))),\n            \"unexpected dependency request {:?}\",\n            outcomes\n        );\n        let pv_dep = pv.vertex().id();\n        assert!(units.remove(&pv_dep), \"unexpected dependency\");\n        match pv_dep {\n            Dependency::Unit(hash) => {\n                let vv = highway\n                    .validate_vertex(pvv(hash))\n                    .unwrap_or_else(|_| panic!(\"{:?} unit is valid\", hash));\n                highway.add_valid_vertex(vv, now);\n                let _ = sync.remove_satisfied_deps(&highway);\n            }\n            _ => panic!(\"expected unit\"),\n        }\n    }\n    assert!(sync.is_empty());\n}\n\n#[test]\nfn transitive_proposal_dependency() {\n    let params = test_params(0);\n    // A Highway and state instances that are used to create PreValidatedVertex instances below.\n\n    let mut state = State::new(WEIGHTS, params.clone(), vec![], vec![]);\n    let util_highway =\n        Highway::<TestContext>::new(TEST_INSTANCE_ID, test_validators(), params.clone(), None);\n\n    // Alice   a0 — a1\n    //             /  \\\n    // Bob        /    b0\n    //           /\n    // Carol   c0\n\n    let a0 = add_unit!(state, ALICE, 0xA; N, N, N).unwrap();\n    let c0 = add_unit!(state, CAROL, 0xC; N, N, N).unwrap();\n    let a1 = add_unit!(state, ALICE, None; a0, N, c0).unwrap();\n    let b0 = add_unit!(state, BOB, None; a1, N, c0).unwrap();\n\n    // Returns the WireUnit with the specified hash.\n    let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap());\n    // Returns the PreValidatedVertex with the specified hash.\n    let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap();\n\n    let peer0 = NodeId::from([0; 64]);\n    let peer1 = NodeId::from([1; 64]);\n\n    // Create a synchronizer with a 0x200 ms timeout, and a Highway instance.\n    let max_requests_for_vertex = 5;\n    let mut sync = Synchronizer::<TestContext>::new(WEIGHTS.len(), TEST_INSTANCE_ID);\n\n    let mut highway =\n        Highway::<TestContext>::new(TEST_INSTANCE_ID, test_validators(), params, None);\n    let now = 0x100.into();\n\n    assert!(matches!(\n        *sync.schedule_add_vertex(peer0, pvv(a1), now),\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    // `a1` can't be added to the protocol state yet b/c it's missing its `a0` dependency.\n    let (pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert!(pv.is_none());\n    assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(a0));\n\n    // \"Download\" and schedule addition of a0.\n    let a0_outcomes = sync.schedule_add_vertex(peer0, pvv(a0), now);\n    assert!(matches!(\n        *a0_outcomes,\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    // `a0` has no dependencies so we can try adding it to the protocol state.\n    let (maybe_pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    let pv = maybe_pv.expect(\"expected a0 vertex\");\n    assert_eq!(pv.vertex(), &unit(a0));\n    assert!(outcomes.is_empty());\n\n    // `b0` can't be added either b/c it's relying on `a1` and `c0`.\n    assert!(matches!(\n        *sync.schedule_add_vertex(peer1, pvv(b0), now),\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    let a0_pending_values = {\n        let mut tmp = HashMap::new();\n        let vv = ValidVertex(unit(a0));\n        let proposed_block = ProposedBlock::new(1u32, BlockContext::new(now, Vec::new()));\n        let mut set = HashSet::new();\n        set.insert((vv, peer0));\n        tmp.insert(proposed_block, set);\n        tmp\n    };\n    let (maybe_pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &a0_pending_values, max_requests_for_vertex);\n    // `peer1` is added as a holder of `a0`'s deploys due to the indirect dependency.\n    let pv = maybe_pv.unwrap();\n    assert!(pv.sender() == &peer1);\n    assert!(pv.vertex() == &unit(a0));\n    // `b0` depends on `a0` transitively but `a0`'s deploys are being downloaded,\n    // so we don't re-request it.\n    assert!(outcomes.is_empty());\n\n    // If we add `a0` to the protocol state, `a1`'s dependency is satisfied.\n    // `a1`'s other dependency is `c0`. Since both peers have it we request it from both.\n    let vv = highway.validate_vertex(pvv(a0)).expect(\"a0 is valid\");\n    highway.add_valid_vertex(vv, now);\n    assert!(matches!(\n        *sync.remove_satisfied_deps(&highway),\n        [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n    ));\n    let (maybe_pv, outcomes) =\n        sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex);\n    assert!(maybe_pv.is_none());\n    match &*outcomes {\n        [ProtocolOutcome::CreatedTargetedMessage(msg0_serialized, p0), ProtocolOutcome::CreatedTargetedMessage(msg1_serialized, p1)] =>\n        {\n            let msg0: HighwayMessage<TestContext> = msg0_serialized.deserialize_expect();\n            let msg1: HighwayMessage<TestContext> = msg1_serialized.deserialize_expect();\n            assert_eq!(\n                vec![&peer0, &peer1],\n                vec![p0, p1].into_iter().sorted().collect_vec(),\n                \"expected to request dependency from exactly two different peers\",\n            );\n\n            match (msg0, msg1) {\n                (\n                    HighwayMessage::RequestDependency(_, dep0),\n                    HighwayMessage::RequestDependency(_, dep1),\n                ) => {\n                    assert_eq!(\n                        dep0,\n                        Dependency::Unit(c0),\n                        \"unexpected dependency requested\"\n                    );\n                    assert_eq!(\n                        dep0, dep1,\n                        \"we should have requested the same dependency from two different peers\"\n                    );\n                }\n                other => panic!(\"unexpected HighwayMessage variant {:?}\", other),\n            }\n        }\n        outcomes => panic!(\"unexpected outcomes: {:?}\", outcomes),\n    }\n}\n\nfn unwrap_single<T: Debug>(vec: Vec<T>) -> T {\n    assert_eq!(\n        vec.len(),\n        1,\n        \"expected single element in the vector {:?}\",\n        vec\n    );\n    vec.into_iter().next().unwrap()\n}\n\nfn assert_targeted_message(\n    outcome: &ProtocolOutcome<TestContext>,\n    peer: &NodeId,\n    expected: Dependency<TestContext>,\n) {\n    match outcome {\n        ProtocolOutcome::CreatedTargetedMessage(raw_msg, peer0) => {\n            assert_eq!(peer, peer0);\n            let msg = raw_msg.deserialize_expect();\n            match msg {\n                HighwayMessage::RequestDependency(_, got) => assert_eq!(got, expected),\n                other => panic!(\"unexpected variant: {:?}\", other),\n            }\n        }\n        _ => panic!(\"unexpected outcome: {:?}\", outcome),\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/synchronizer.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap, HashSet},\n    fmt::Debug,\n    iter,\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse rand::{thread_rng, RngCore};\nuse tracing::{debug, info, trace};\n\nuse casper_types::Timestamp;\n\nuse crate::{\n    components::consensus::{\n        consensus_protocol::{ProposedBlock, ProtocolOutcome, ProtocolOutcomes},\n        era_supervisor::SerializedMessage,\n        protocols::highway::{HighwayMessage, ACTION_ID_VERTEX},\n        traits::Context,\n        utils::ValidatorMap,\n    },\n    types::NodeId,\n};\n\nuse super::highway::{Dependency, Highway, PreValidatedVertex, ValidVertex, Vertex};\n\n#[cfg(test)]\nmod tests;\n\n/// Incoming pre-validated vertices that we haven't added to the protocol state yet, and the\n/// timestamp when we received them.\n#[derive(DataSize, Debug)]\npub(crate) struct PendingVertices<C>(HashMap<PreValidatedVertex<C>, HashMap<NodeId, Timestamp>>)\nwhere\n    C: Context;\n\nimpl<C: Context> Default for PendingVertices<C> {\n    fn default() -> Self {\n        PendingVertices(Default::default())\n    }\n}\n\nimpl<C: Context> PendingVertices<C> {\n    /// Removes expired vertices.\n    fn remove_expired(&mut self, oldest: Timestamp) -> Vec<C::Hash> {\n        let mut removed = vec![];\n        for time_by_sender in self.0.values_mut() {\n            time_by_sender.retain(|_, time_received| *time_received >= oldest);\n        }\n        self.0.retain(|pvv, time_by_peer| {\n            if time_by_peer.is_empty() {\n                removed.extend(pvv.inner().unit_hash());\n                false\n            } else {\n                true\n            }\n        });\n        removed\n    }\n\n    /// Adds a vertex, or updates its timestamp.\n    fn add(&mut self, sender: NodeId, pvv: PreValidatedVertex<C>, time_received: Timestamp) {\n        self.0\n            .entry(pvv)\n            .or_default()\n            .entry(sender)\n            .and_modify(|timestamp| *timestamp = (*timestamp).max(time_received))\n            .or_insert(time_received);\n    }\n\n    /// Adds a holder to the vertex that satisfies `dep`.\n    fn add_holder(&mut self, dep: &Dependency<C>, sender: NodeId, time_received: Timestamp) {\n        if let Some((_, holders)) = self.0.iter_mut().find(|(pvv, _)| pvv.inner().id() == *dep) {\n            holders.entry(sender).or_insert(time_received);\n        }\n    }\n\n    /// Adds a vertex, or updates its timestamp.\n    fn push(&mut self, pv: PendingVertex<C>) {\n        self.add(pv.sender, pv.pvv, pv.time_received)\n    }\n\n    fn pop(&mut self) -> Option<PendingVertex<C>> {\n        let pvv = self.0.keys().next()?.clone();\n        let (sender, timestamp, is_empty) = {\n            let time_by_sender = self.0.get_mut(&pvv)?;\n            let sender = *time_by_sender.keys().next()?;\n            let timestamp = time_by_sender.remove(&sender)?;\n            (sender, timestamp, time_by_sender.is_empty())\n        };\n        if is_empty {\n            self.0.remove(&pvv);\n        }\n        Some(PendingVertex::new(sender, pvv, timestamp))\n    }\n\n    /// Returns whether dependency exists in the pending vertices collection.\n    fn contains_dependency(&self, d: &Dependency<C>) -> bool {\n        self.0.keys().any(|pvv| &pvv.inner().id() == d)\n    }\n\n    /// Drops all pending vertices other than evidence.\n    pub(crate) fn retain_evidence_only(&mut self) {\n        self.0.retain(|pvv, _| pvv.inner().is_evidence());\n    }\n\n    /// Returns number of unique vertices pending in the queue.\n    pub(crate) fn len(&self) -> u64 {\n        self.0.len() as u64\n    }\n\n    fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n}\n\nimpl<C: Context> Iterator for PendingVertices<C> {\n    type Item = PendingVertex<C>;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        self.pop()\n    }\n}\n\n/// An incoming pre-validated vertex that we haven't added to the protocol state yet.\n#[derive(DataSize, Debug)]\npub(crate) struct PendingVertex<C>\nwhere\n    C: Context,\n{\n    /// The peer who sent it to us.\n    sender: NodeId,\n    /// The pre-validated vertex.\n    pvv: PreValidatedVertex<C>,\n    /// The time when we received it.\n    time_received: Timestamp,\n}\n\nimpl<C: Context> PendingVertex<C> {\n    /// Returns a new pending vertex with the current timestamp.\n    pub(crate) fn new(\n        sender: NodeId,\n        pvv: PreValidatedVertex<C>,\n        time_received: Timestamp,\n    ) -> Self {\n        Self {\n            sender,\n            pvv,\n            time_received,\n        }\n    }\n\n    /// Returns the peer from which we received this vertex.\n    pub(crate) fn sender(&self) -> &NodeId {\n        &self.sender\n    }\n\n    /// Returns the vertex waiting to be added.\n    pub(crate) fn vertex(&self) -> &Vertex<C> {\n        self.pvv.inner()\n    }\n\n    /// Returns the pre-validated vertex.\n    pub(crate) fn pvv(&self) -> &PreValidatedVertex<C> {\n        &self.pvv\n    }\n}\n\nimpl<C: Context> From<PendingVertex<C>> for PreValidatedVertex<C> {\n    fn from(vertex: PendingVertex<C>) -> Self {\n        vertex.pvv\n    }\n}\n\n#[derive(DataSize, Debug)]\npub(crate) struct Synchronizer<C>\nwhere\n    C: Context,\n{\n    /// Incoming vertices we can't add yet because they are still missing a dependency.\n    vertices_awaiting_deps: BTreeMap<Dependency<C>, PendingVertices<C>>,\n    /// The vertices that are scheduled to be processed at a later time.  The keys of this\n    /// `BTreeMap` are timestamps when the corresponding vector of vertices will be added.\n    vertices_to_be_added_later: BTreeMap<Timestamp, PendingVertices<C>>,\n    /// Vertices that might be ready to add to the protocol state: We are not currently waiting for\n    /// a requested dependency.\n    vertices_no_deps: PendingVertices<C>,\n    /// Instance ID of an era for which this synchronizer is constructed.\n    instance_id: C::InstanceId,\n    /// Keeps track of the lowest/oldest seen unit per validator when syncing.\n    /// Used only for logging.\n    oldest_seen_panorama: ValidatorMap<Option<u64>>,\n    /// Keeps track of the requests we've sent so far and the recipients.\n    /// Used to decide whether we should ask more nodes for a particular dependency.\n    requests_sent: BTreeMap<Dependency<C>, HashSet<NodeId>>,\n    /// Boolean flag indicating whether we're synchronizing current era.\n    pub(crate) current_era: bool,\n}\n\nimpl<C: Context + 'static> Synchronizer<C> {\n    /// Creates a new synchronizer with the specified timeout for pending vertices.\n    pub(crate) fn new(validator_len: usize, instance_id: C::InstanceId) -> Self {\n        Synchronizer {\n            vertices_awaiting_deps: BTreeMap::new(),\n            vertices_to_be_added_later: BTreeMap::new(),\n            vertices_no_deps: Default::default(),\n            oldest_seen_panorama: iter::repeat(None).take(validator_len).collect(),\n            instance_id,\n            requests_sent: BTreeMap::new(),\n            current_era: true,\n        }\n    }\n\n    /// Removes expired pending vertices from the queues, and schedules the next purge.\n    pub(crate) fn purge_vertices(&mut self, oldest: Timestamp) {\n        info!(\"purging synchronizer queues\");\n        let no_deps_expired = self.vertices_no_deps.remove_expired(oldest);\n        trace!(?no_deps_expired, \"expired no dependencies\");\n        self.requests_sent.clear();\n        let to_be_added_later_expired =\n            Self::remove_expired(&mut self.vertices_to_be_added_later, oldest);\n        trace!(\n            ?to_be_added_later_expired,\n            \"expired to be added later dependencies\"\n        );\n        let awaiting_deps_expired = Self::remove_expired(&mut self.vertices_awaiting_deps, oldest);\n        trace!(?awaiting_deps_expired, \"expired awaiting dependencies\");\n    }\n\n    // Returns number of elements in the `vertices_to_be_added_later` queue.\n    // Every pending vertex is counted once, even if it has multiple senders.\n    fn vertices_to_be_added_later_len(&self) -> u64 {\n        self.vertices_to_be_added_later\n            .values()\n            .map(|pv| pv.len())\n            .sum()\n    }\n\n    // Returns number of elements in `vertex_deps` queue.\n    fn vertices_awaiting_deps_len(&self) -> u64 {\n        self.vertices_awaiting_deps\n            .values()\n            .map(|pv| pv.len())\n            .sum()\n    }\n\n    // Returns number of elements in `vertices_to_be_added` queue.\n    fn vertices_no_deps_len(&self) -> u64 {\n        self.vertices_no_deps.len()\n    }\n\n    pub(crate) fn log_len(&self) {\n        debug!(\n            era_id = ?self.instance_id,\n            vertices_to_be_added_later = self.vertices_to_be_added_later_len(),\n            vertices_no_deps = self.vertices_no_deps_len(),\n            vertices_awaiting_deps = self.vertices_awaiting_deps_len(),\n            \"synchronizer queue lengths\"\n        );\n        // All units seen have seq_number == 0.\n        let all_lowest = self\n            .oldest_seen_panorama\n            .iter()\n            .all(|entry| entry.map(|seq_num| seq_num == 0).unwrap_or(false));\n        if all_lowest {\n            debug!(\"all seen units while synchronization with seq_num=0\");\n        } else {\n            debug!(oldest_panorama=%self.oldest_seen_panorama, \"oldest seen unit per validator\");\n        }\n    }\n\n    /// Store a (pre-validated) vertex which will be added later.  This creates a timer to be sent\n    /// to the reactor. The vertex be added using `Self::add_vertices` when that timer goes off.\n    pub(crate) fn store_vertex_for_addition_later(\n        &mut self,\n        future_timestamp: Timestamp,\n        now: Timestamp,\n        sender: NodeId,\n        pvv: PreValidatedVertex<C>,\n    ) {\n        self.vertices_to_be_added_later\n            .entry(future_timestamp)\n            .or_default()\n            .add(sender, pvv, now);\n    }\n\n    /// Schedules calls to `add_vertex` on any vertices in `vertices_to_be_added_later` which are\n    /// scheduled for after the given `transpired_timestamp`.  In general the specified `timestamp`\n    /// is approximately `Timestamp::now()`.  Vertices keyed by timestamps chronologically before\n    /// `transpired_timestamp` should all be added.\n    pub(crate) fn add_past_due_stored_vertices(\n        &mut self,\n        timestamp: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        let mut results = vec![];\n        let past_due_timestamps: Vec<Timestamp> = self\n            .vertices_to_be_added_later\n            .range(..=timestamp) // Inclusive range\n            .map(|(past_due_timestamp, _)| past_due_timestamp.to_owned())\n            .collect();\n        for past_due_timestamp in past_due_timestamps {\n            if let Some(vertices_to_add) =\n                self.vertices_to_be_added_later.remove(&past_due_timestamp)\n            {\n                results.extend(self.schedule_add_vertices(vertices_to_add))\n            }\n        }\n        results\n    }\n\n    /// Schedules a vertex to be added to the protocol state.\n    pub(crate) fn schedule_add_vertex(\n        &mut self,\n        sender: NodeId,\n        pvv: PreValidatedVertex<C>,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        self.update_last_seen(&pvv);\n        let pv = PendingVertex::new(sender, pvv, now);\n        self.schedule_add_vertices(iter::once(pv))\n    }\n\n    fn update_last_seen(&mut self, pvv: &PreValidatedVertex<C>) {\n        let v = pvv.inner();\n        if let (Some(v_id), Some(seq_num)) = (v.creator(), v.unit_seq_number()) {\n            let prev_seq_num = self.oldest_seen_panorama[v_id].unwrap_or(u64::MAX);\n            self.oldest_seen_panorama[v_id] = Some(prev_seq_num.min(seq_num));\n        }\n    }\n\n    /// Moves all vertices whose known missing dependency is now satisfied into the\n    /// `vertices_to_be_added` queue.\n    pub(crate) fn remove_satisfied_deps(&mut self, highway: &Highway<C>) -> ProtocolOutcomes<C> {\n        let satisfied_deps = self\n            .vertices_awaiting_deps\n            .keys()\n            .filter(|dep| highway.has_dependency(dep))\n            .cloned()\n            .collect_vec();\n        // Safe to unwrap: We know the keys exist.\n        // TODO: Replace with BTreeMap::drain_filter once stable.\n        let pvs = satisfied_deps\n            .into_iter()\n            .flat_map(|dep| {\n                self.requests_sent.remove(&dep);\n                self.vertices_awaiting_deps.remove(&dep).unwrap()\n            })\n            .collect_vec();\n        self.schedule_add_vertices(pvs)\n    }\n\n    /// Pops and returns the next entry from `vertices_to_be_added` that is not yet in the protocol\n    /// state. Also returns a `ProtocolOutcome` that schedules the next action to add a vertex,\n    /// unless the queue is empty, and `ProtocolOutcome`s to request missing dependencies.\n    pub(crate) fn pop_vertex_to_add(\n        &mut self,\n        highway: &Highway<C>,\n        pending_values: &HashMap<ProposedBlock<C>, HashSet<(ValidVertex<C>, NodeId)>>,\n        max_requests_for_vertex: usize,\n    ) -> (Option<PendingVertex<C>>, ProtocolOutcomes<C>) {\n        let mut outcomes = Vec::new();\n        // Get the next vertex to be added; skip the ones that are already in the protocol state,\n        // and the ones that are still missing dependencies.\n        loop {\n            let pv = match self.vertices_no_deps.pop() {\n                None => return (None, outcomes),\n                Some(pv) if highway.has_vertex(pv.vertex()) => continue,\n                Some(pv) => pv,\n            };\n            if let Some(dep) = highway.missing_dependency(pv.pvv()) {\n                let sender = *pv.sender();\n                let time_received = pv.time_received;\n                // Find the first dependency that `pv` needs that we haven't synchronized yet\n                // and request it from the sender of `pv`. Since it relies on it, it should have\n                // it as well.\n                let transitive_dependency =\n                    self.find_transitive_dependency(dep.clone(), &sender, time_received);\n                if self\n                    .vertices_no_deps\n                    .contains_dependency(&transitive_dependency)\n                {\n                    // `dep` is already downloaded and waiting in the synchronizer queue to be\n                    // added, we don't have to request it again. Add the `pv`\n                    // back to the queue so that it can be retried later. `dep` does not wait for\n                    // any of the dependencies currently so it should be retried soon.\n                    self.add_missing_dependency(dep.clone(), pv);\n                    continue;\n                }\n                // We are still missing a dependency. Store the vertex in the map and request\n                // the dependency from the sender.\n                // Make `pv` depend on the direct dependency `dep` and not `transitive_dependency`\n                // since there's a higher chance of adding `pv` to the protocol\n                // state after `dep` is added, rather than `transitive_dependency`.\n                self.add_missing_dependency(dep.clone(), pv);\n                // If we already have the dependency and it is a proposal that is currently being\n                // handled by the block validator, and this sender is already known as a source,\n                // do nothing.\n                if pending_values\n                    .values()\n                    .flatten()\n                    .any(|(vv, s)| vv.inner().id() == transitive_dependency && s == &sender)\n                {\n                    continue;\n                }\n                // If we already have the dependency and it is a proposal that is currently being\n                // handled by the block validator, and this sender is not yet known as a source,\n                // we return the proposal as if this sender had sent it to us, so they get added.\n                if let Some((vv, _)) = pending_values\n                    .values()\n                    .flatten()\n                    .find(|(vv, _)| vv.inner().id() == transitive_dependency)\n                {\n                    debug!(\n                        dependency = ?transitive_dependency, %sender,\n                        \"adding sender as a source for proposal\"\n                    );\n                    let dep_pv = PendingVertex::new(sender, vv.clone().into(), time_received);\n                    // We found the next vertex to add.\n                    if !self.vertices_no_deps.is_empty() {\n                        // There are still vertices in the queue: schedule next call.\n                        outcomes.push(ProtocolOutcome::QueueAction(ACTION_ID_VERTEX));\n                    }\n                    return (Some(dep_pv), outcomes);\n                }\n                // If we have already requested the dependency from this peer, or from the maximum\n                // number of peers, do nothing.\n                let entry = self\n                    .requests_sent\n                    .entry(transitive_dependency.clone())\n                    .or_default();\n                if entry.len() >= max_requests_for_vertex || !entry.insert(sender) {\n                    continue;\n                }\n                // Otherwise request the missing dependency from the sender.\n                let uuid = thread_rng().next_u64();\n                debug!(?uuid, dependency = ?transitive_dependency, %sender, \"requesting dependency\");\n                let msg = HighwayMessage::RequestDependency(uuid, transitive_dependency);\n                outcomes.push(ProtocolOutcome::CreatedTargetedMessage(\n                    SerializedMessage::from_message(&msg),\n                    sender,\n                ));\n                continue;\n            }\n            // We found the next vertex to add.\n            if !self.vertices_no_deps.is_empty() {\n                // There are still vertices in the queue: schedule next call.\n                outcomes.push(ProtocolOutcome::QueueAction(ACTION_ID_VERTEX));\n            }\n            return (Some(pv), outcomes);\n        }\n    }\n\n    // Finds the highest missing dependency (i.e. one that we are waiting to be downloaded) and\n    // returns it, if any.\n    fn find_transitive_dependency(\n        &mut self,\n        mut missing_dependency: Dependency<C>,\n        sender: &NodeId,\n        time_received: Timestamp,\n    ) -> Dependency<C> {\n        // If `missing_dependency` is already downloaded and waiting for its dependency to be\n        // resolved, we will follow that dependency until we find \"the bottom\" of the\n        // chain – when there are no more known dependency requests scheduled,\n        // and we request the last one in the chain.\n        while let Some((next_missing, pvs)) = self\n            .vertices_awaiting_deps\n            .iter_mut()\n            .find(|(_, pvs)| pvs.contains_dependency(&missing_dependency))\n        {\n            pvs.add_holder(&missing_dependency, *sender, time_received);\n            missing_dependency = next_missing.clone();\n        }\n        missing_dependency\n    }\n\n    /// Adds a vertex with a known missing dependency to the queue.\n    fn add_missing_dependency(&mut self, dep: Dependency<C>, pv: PendingVertex<C>) {\n        self.vertices_awaiting_deps.entry(dep).or_default().push(pv)\n    }\n\n    #[cfg(test)]\n    /// Returns `true` if no vertices are in the queues.\n    pub(crate) fn is_empty(&self) -> bool {\n        self.vertices_awaiting_deps.is_empty()\n            && self.vertices_no_deps.is_empty()\n            && self.vertices_to_be_added_later.is_empty()\n    }\n\n    /// Returns `true` if there are any vertices waiting for the specified dependency.\n    pub(crate) fn is_dependency(&self, dep: &Dependency<C>) -> bool {\n        self.vertices_awaiting_deps.contains_key(dep)\n    }\n\n    /// Drops all vertices that (directly or indirectly) have the specified dependencies, and\n    /// returns the set of their senders. If the specified dependencies are known to be invalid,\n    /// those senders must be faulty.\n    pub(crate) fn invalid_vertices(&mut self, mut vertices: Vec<Dependency<C>>) -> HashSet<NodeId> {\n        let mut senders = HashSet::new();\n        while !vertices.is_empty() {\n            let (new_vertices, new_senders) = self.do_drop_dependent_vertices(vertices);\n            vertices = new_vertices;\n            senders.extend(new_senders);\n        }\n        senders\n    }\n\n    /// Drops all pending vertices other than evidence.\n    pub(crate) fn retain_evidence_only(&mut self) {\n        self.vertices_awaiting_deps.clear();\n        self.vertices_to_be_added_later.clear();\n        self.vertices_no_deps.retain_evidence_only();\n        self.requests_sent.clear();\n    }\n\n    /// Schedules vertices to be added to the protocol state.\n    fn schedule_add_vertices<T>(&mut self, pending_vertices: T) -> ProtocolOutcomes<C>\n    where\n        T: IntoIterator<Item = PendingVertex<C>>,\n    {\n        let was_empty = self.vertices_no_deps.is_empty();\n        for pv in pending_vertices {\n            self.vertices_no_deps.push(pv);\n        }\n        if was_empty && !self.vertices_no_deps.is_empty() {\n            vec![ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]\n        } else {\n            Vec::new()\n        }\n    }\n\n    /// Drops all vertices that have the specified direct dependencies, and returns their IDs and\n    /// senders.\n    fn do_drop_dependent_vertices(\n        &mut self,\n        vertices: Vec<Dependency<C>>,\n    ) -> (Vec<Dependency<C>>, HashSet<NodeId>) {\n        // collect the vertices that depend on the ones we got in the argument and their senders\n        vertices\n            .into_iter()\n            // filtering by is_unit, so that we don't drop vertices depending on invalid evidence\n            // or endorsements - we can still get valid ones from someone else and eventually\n            // satisfy the dependency\n            .filter(|dep| dep.is_unit())\n            .flat_map(|vertex| self.vertices_awaiting_deps.remove(&vertex))\n            .flatten()\n            .map(|pv| (pv.pvv.inner().id(), pv.sender))\n            .unzip()\n    }\n\n    /// Removes all expired entries from a `BTreeMap` of `Vec`s.\n    fn remove_expired<T: Ord + Clone>(\n        map: &mut BTreeMap<T, PendingVertices<C>>,\n        oldest: Timestamp,\n    ) -> Vec<C::Hash> {\n        let mut expired = vec![];\n        for pvs in map.values_mut() {\n            expired.extend(pvs.remove_expired(oldest));\n        }\n        let keys = map\n            .iter()\n            .filter(|(_, pvs)| pvs.is_empty())\n            .map(|(key, _)| key.clone())\n            .collect_vec();\n        for key in keys {\n            map.remove(&key);\n        }\n        expired\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core/test_macros.rs",
    "content": "//! Macros for concise test setup.\n\n/// Creates a panorama from a list of either observations or unit hashes. Unit hashes are converted\n/// to `Correct` observations.\nmacro_rules! panorama {\n    ($($obs:expr),*) => {{\n        use crate::components::consensus::highway_core::state::Panorama;\n\n        Panorama::from(vec![$($obs.into()),*])\n    }};\n}\n\n/// Creates a unit, adds it to `$state` and returns its hash.\n/// Returns an error if unit addition fails.\n///\n/// The short variant is for tests that don't care about timestamps and round lengths: It\n/// automatically picks reasonable values for those.\nmacro_rules! add_unit {\n    ($state: ident, $creator: expr, $val: expr; $($obs:expr),*) => {{\n        add_unit!($state, $creator, $val; $($obs),*;)\n    }};\n    ($state: ident, $creator: expr, $val: expr; $($obs:expr),*; $($ends:expr),*) => {{\n        #[allow(unused_imports)] // These might be already imported at the call site.\n        use crate::{\n            components::consensus::highway_core::{\n                state::{self, tests::TestSecret},\n                highway::{SignedWireUnit, WireUnit},\n                highway_testing::TEST_INSTANCE_ID,\n            },\n        };\n\n        #[allow(unused_imports)] // These might be already imported at the call site.\n        use casper_types::{TimeDiff, Timestamp};\n\n        let creator = $creator;\n        let panorama = panorama!($($obs),*);\n        let seq_number = panorama.next_seq_num(&$state, creator);\n        let maybe_parent_hash = panorama[creator].correct();\n        // Use our most recent round length, or the configured initial one.\n        let r_len = maybe_parent_hash.map_or_else(\n            || $state.params().init_round_len(),\n            |vh| $state.unit(vh).round_len(),\n        );\n        let value = Option::from($val);\n        // At most two units per round are allowed.\n        let two_units_limit = maybe_parent_hash\n            .and_then(|ph| $state.unit(ph).previous())\n            .map(|pph| $state.unit(pph))\n            .map(|unit| unit.round_id() + unit.round_len());\n        // And our timestamp must not be less than any justification's.\n        let mut timestamp = panorama\n            .iter_correct(&$state)\n            .map(|unit| unit.timestamp + TimeDiff::from_millis(1))\n            .chain(two_units_limit)\n            .max()\n            .unwrap_or($state.params().start_timestamp());\n        // If this is a block: Find the next time we're a leader.\n        if value.is_some() {\n            timestamp = state::round_id(timestamp + r_len - TimeDiff::from_millis(1), r_len);\n            while $state.leader(timestamp) != creator {\n                timestamp += r_len;\n            }\n        }\n        let round_exp = (r_len / $state.params().min_round_length()).trailing_zeros() as u8;\n        let wunit = WireUnit {\n            panorama,\n            creator,\n            instance_id: TEST_INSTANCE_ID,\n            value,\n            seq_number,\n            timestamp,\n            round_exp,\n            endorsed: vec![$($ends),*].into_iter().collect(),\n        };\n        let hwunit = wunit.into_hashed();\n        let hash = hwunit.hash();\n        let swunit = SignedWireUnit::new(hwunit, &TestSecret(($creator).0));\n        $state.add_unit(swunit).map(|()| hash)\n    }};\n    ($state: ident, $creator: expr, $time: expr, $round_exp: expr, $val: expr; $($obs:expr),*) => {{\n        add_unit!($state, $creator, $time, $round_exp, $val; $($obs),*; std::collections::BTreeSet::new())\n    }};\n    ($state: ident, $creator: expr, $time: expr, $round_exp: expr, $val: expr; $($obs:expr),*; $($ends:expr),*) => {{\n        use crate::components::consensus::highway_core::{\n            state::tests::TestSecret,\n            highway::{SignedWireUnit, WireUnit},\n            highway_testing::TEST_INSTANCE_ID,\n        };\n\n        let creator = $creator;\n        let panorama = panorama!($($obs),*);\n        let seq_number = panorama.next_seq_num(&$state, creator);\n        let wunit = WireUnit {\n            panorama,\n            creator,\n            instance_id: TEST_INSTANCE_ID,\n            value: ($val).into(),\n            seq_number,\n            timestamp: ($time).into(),\n            round_exp: $round_exp,\n            endorsed: $($ends.into()),*\n        };\n        let hwunit = wunit.into_hashed();\n        let hash = hwunit.hash();\n        let swunit = SignedWireUnit::new(hwunit, &TestSecret(($creator).0));\n        $state.add_unit(swunit).map(|()| hash)\n    }};\n}\n\n/// Creates an endorsement of `vote` by `creator` and adds it to the state.\nmacro_rules! endorse {\n    ($state: ident, $vote: expr; $($creators: expr),*) => {\n        let creators = vec![$($creators.into()),*];\n        for creator in creators.into_iter() {\n            endorse!($state, creator, $vote);\n        }\n    };\n    ($state: ident, $creator: expr, $vote: expr) => {{\n        use crate::components::consensus::highway_core::endorsement::{\n            Endorsement, SignedEndorsement,\n        };\n\n        let endorsement: Endorsement<TestContext> = Endorsement::new($vote, ($creator));\n        let signature = TestSecret(($creator).0).sign(&endorsement.hash());\n        let endorsements = SignedEndorsement::new(endorsement, signature).into();\n        let evidence = $state.find_conflicting_endorsements(&endorsements, &TEST_INSTANCE_ID);\n        $state.add_endorsements(endorsements);\n        for ev in evidence {\n            $state.add_evidence(ev);\n        }\n    }};\n}\n"
  },
  {
    "path": "node/src/components/consensus/highway_core.rs",
    "content": "//! # Highway\n//!\n//! The core logic of Casper' Highway consensus protocol.\n//!\n//! At the center of Highway are:\n//! * the _protocol state_, a grow-only data structure which can be considered a directed acyclic\n//!   graph (DAG), and needs to be continually synchronized among the participating nodes,\n//! * rules for the active participants — the _validators_ — to create and add new vertices, and\n//! * a finality detector that provides a criterion to consider a block \"finalized\". Finalized\n//!   blocks are guaranteed to remain finalized as the DAG grows further, unless too many validators\n//!   are malicious.\n//!\n//! It's not a complete protocol. To implement permissioned consensus, several components must be\n//! added:\n//! * Networking, serialization and cryptographic primitives for signing and hashing.\n//! * A _synchronizer_ that exchanges messages with other participating nodes to exchange their DAG\n//!   vertices and ensure that each vertex becomes eventually known to every node.\n//! * Semantics for the consensus values, which can e.g. represent token transfers, or programs to\n//!   be executed in a virtual machine for a smart contract platform.\n//! * Signing of finalized blocks, as a finality proof to third parties/clients.\n//!\n//! Note that consensus values should be small. If they represent a lot of data, e.g. lists of\n//! complex transactions, they should not be passed into `highway_core` directly. Instead, the\n//! consensus value should be the list's hash.\n//!\n//! Permissioned consensus protocols can also be used in a _permissionless_ Proof-of-Stake context,\n//! or with some other governance system that can add and remove validators, by starting a new\n//! protocol instance whenever the set of validators changes.\n\n// This needs to come before the other modules, so the macros are available everywhere.\n#[cfg(test)]\n#[macro_use]\nmod test_macros;\n\npub(crate) mod active_validator;\npub mod finality_detector;\npub mod highway;\npub(crate) mod state;\npub(super) mod synchronizer;\n\nmod endorsement;\nmod evidence;\n#[cfg(test)]\npub(crate) mod highway_testing;\n\npub use state::{Observation, Panorama, State};\n\n// Enables the endorsement mechanism.\nconst ENABLE_ENDORSEMENTS: bool = false;\n"
  },
  {
    "path": "node/src/components/consensus/leader_sequence.rs",
    "content": "use datasize::DataSize;\nuse rand::{Rng, SeedableRng};\nuse rand_chacha::ChaCha8Rng;\nuse serde::{Deserialize, Serialize};\nuse tracing::error;\n\nuse crate::components::consensus::utils::{ValidatorIndex, ValidatorMap, Weight};\n\n/// A pseudorandom sequence of validator indices, distributed by weight.\n#[derive(Debug, Clone, DataSize, Serialize, Deserialize)]\npub(crate) struct LeaderSequence {\n    /// Cumulative validator weights: Entry `i` contains the sum of the weights of validators `0`\n    /// through `i`.\n    cumulative_w: ValidatorMap<Weight>,\n    /// Cumulative validator weights, but with the weight of banned validators set to `0`.\n    cumulative_w_leaders: ValidatorMap<Weight>,\n    /// This is `false` for validators who have been excluded from the sequence.\n    leaders: ValidatorMap<bool>,\n    /// The PRNG seed.\n    seed: u64,\n}\n\nimpl LeaderSequence {\n    pub(crate) fn new(\n        seed: u64,\n        weights: &ValidatorMap<Weight>,\n        leaders: ValidatorMap<bool>,\n    ) -> LeaderSequence {\n        let sums = |mut sums: Vec<Weight>, w: Weight| {\n            let sum = sums.last().copied().unwrap_or(Weight(0));\n            sums.push(sum.checked_add(w).expect(\"total weight must be < 2^64\"));\n            sums\n        };\n        let cumulative_w = ValidatorMap::from(weights.iter().copied().fold(vec![], sums));\n        assert!(\n            *cumulative_w.as_ref().last().unwrap() > Weight(0),\n            \"total weight must not be zero\"\n        );\n        let cumulative_w_leaders = weights\n            .enumerate()\n            .map(|(idx, weight)| if leaders[idx] { *weight } else { Weight(0) })\n            .fold(vec![], sums)\n            .into();\n        LeaderSequence {\n            cumulative_w,\n            cumulative_w_leaders,\n            leaders,\n            seed,\n        }\n    }\n\n    /// Returns the leader in the specified slot.\n    ///\n    /// First the assignment is computed ignoring the `leaders` flags. Only if the selected\n    /// leader's entry is `false`, the computation is repeated, this time with the flagged\n    /// validators excluded. This ensures that once the validator set has been decided, correct\n    /// validators' slots never get reassigned to someone else, even if after the fact someone is\n    /// excluded as a leader.\n    pub(crate) fn leader(&self, slot: u64) -> ValidatorIndex {\n        // The binary search cannot return None; if it does, it's a programming error. In that case,\n        // we want the tests to panic but production to pick a default.\n        let panic_or_0 = || {\n            if cfg!(test) {\n                panic!(\"random number out of range\");\n            } else {\n                error!(\"random number out of range\");\n                ValidatorIndex(0)\n            }\n        };\n        let seed = self.seed.wrapping_add(slot);\n        // We select a random one out of the `total_weight` weight units, starting numbering at 1.\n        let r = Weight(leader_prng(self.total_weight().0, seed));\n        // The weight units are subdivided into intervals that belong to some validator.\n        // `cumulative_w[i]` denotes the last weight unit that belongs to validator `i`.\n        // `binary_search` returns the first `i` with `cumulative_w[i] >= r`, i.e. the validator\n        // who owns the randomly selected weight unit.\n        let leader_index = self\n            .cumulative_w\n            .binary_search(&r)\n            .unwrap_or_else(panic_or_0);\n        if self.leaders[leader_index] {\n            return leader_index;\n        }\n        // If the selected leader is excluded, we reassign the slot to someone else. This time we\n        // consider only the non-banned validators.\n        let total_w_leaders = *self.cumulative_w_leaders.as_ref().last().unwrap();\n        let r = Weight(leader_prng(total_w_leaders.0, seed.wrapping_add(1)));\n        self.cumulative_w_leaders\n            .binary_search(&r)\n            .unwrap_or_else(panic_or_0)\n    }\n\n    /// Returns the sum of all validators' voting weights.\n    pub(crate) fn total_weight(&self) -> Weight {\n        *self\n            .cumulative_w\n            .as_ref()\n            .last()\n            .expect(\"weight list cannot be empty\")\n    }\n}\n\n/// Returns a pseudorandom `u64` between `1` and `upper` (inclusive).\nfn leader_prng(upper: u64, seed: u64) -> u64 {\n    ChaCha8Rng::seed_from_u64(seed)\n        .gen_range(0..upper)\n        .saturating_add(1)\n}\n\n/// Returns a seed that with the given weights results in the desired leader sequence.\n#[cfg(test)]\npub(crate) fn find_seed(\n    seq: &[ValidatorIndex],\n    weights: &ValidatorMap<Weight>,\n    leaders: &ValidatorMap<bool>,\n) -> u64 {\n    for seed in 0..1000 {\n        let ls = LeaderSequence::new(seed, weights, leaders.clone());\n        if seq\n            .iter()\n            .enumerate()\n            .all(|(slot, &v_idx)| ls.leader(slot as u64) == v_idx)\n        {\n            return seed;\n        }\n    }\n    panic!(\"No suitable seed for leader sequence found\");\n}\n\n#[test]\nfn test_leader_prng() {\n    use rand::RngCore;\n\n    let mut rng = crate::new_rng();\n\n    // Repeat a few times to make it likely that the inner loop runs more than once.\n    for _ in 0..10 {\n        let upper = rng.gen_range(1..u64::MAX);\n        let seed = rng.next_u64();\n\n        // This tests that the rand crate's gen_range implementation, which is used in\n        // leader_prng, doesn't change, and uses this algorithm:\n        // https://github.com/rust-random/rand/blob/73befa480c58dd0461da5f4469d5e04c564d4de3/src/distributions/uniform.rs#L515\n        let mut prng = ChaCha8Rng::seed_from_u64(seed);\n        let zone = upper << upper.leading_zeros(); // A multiple of upper that fits into a u64.\n        let expected = loop {\n            // Multiply a random u64 by upper. This is between 0 and u64::MAX * upper.\n            let prod = (prng.next_u64() as u128) * (upper as u128);\n            // So prod >> 64 is between 0 and upper - 1. Each interval from (N << 64) to\n            // (N << 64) + zone contains the same number of such values.\n            // If the value is in such an interval, return N + 1; otherwise retry.\n            if (prod as u64) < zone {\n                break (prod >> 64) as u64 + 1;\n            }\n        };\n\n        assert_eq!(expected, leader_prng(upper, seed));\n    }\n}\n\n#[test]\nfn test_leader_prng_values() {\n    // Test a few concrete values, to detect if the ChaCha8Rng impl changes.\n    assert_eq!(12578764544318200737, leader_prng(u64::MAX, 42));\n    assert_eq!(12358540700710939054, leader_prng(u64::MAX, 1337));\n    assert_eq!(4134160578770126600, leader_prng(u64::MAX, 0x1020304050607));\n}\n"
  },
  {
    "path": "node/src/components/consensus/metrics.rs",
    "content": "use prometheus::{Gauge, IntGauge, Registry};\n\nuse casper_types::Timestamp;\n\nuse crate::{types::FinalizedBlock, unregister_metric};\n\n/// Network metrics to track Consensus\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// Gauge to track time between proposal and finalization.\n    finalization_time: Gauge,\n    /// Amount of finalized blocks.\n    finalized_block_count: IntGauge,\n    /// Timestamp of the most recently accepted block payload.\n    time_of_last_proposed_block: IntGauge,\n    /// Timestamp of the most recently finalized block.\n    time_of_last_finalized_block: IntGauge,\n    /// The current era.\n    pub(super) consensus_current_era: IntGauge,\n    /// Registry component.\n    registry: Registry,\n}\n\nimpl Metrics {\n    pub(super) fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let finalization_time = Gauge::new(\n            \"finalization_time\",\n            \"the amount of time, in milliseconds, between proposal and finalization of the latest finalized block\",\n        )?;\n        let finalized_block_count =\n            IntGauge::new(\"amount_of_blocks\", \"the number of blocks finalized so far\")?;\n        let time_of_last_proposed_block = IntGauge::new(\n            \"time_of_last_block_payload\",\n            \"timestamp of the most recently accepted block payload\",\n        )?;\n        let time_of_last_finalized_block = IntGauge::new(\n            \"time_of_last_finalized_block\",\n            \"timestamp of the most recently finalized block\",\n        )?;\n        let consensus_current_era =\n            IntGauge::new(\"consensus_current_era\", \"the current era in consensus\")?;\n        registry.register(Box::new(finalization_time.clone()))?;\n        registry.register(Box::new(finalized_block_count.clone()))?;\n        registry.register(Box::new(consensus_current_era.clone()))?;\n        registry.register(Box::new(time_of_last_proposed_block.clone()))?;\n        registry.register(Box::new(time_of_last_finalized_block.clone()))?;\n        Ok(Metrics {\n            finalization_time,\n            finalized_block_count,\n            time_of_last_proposed_block,\n            time_of_last_finalized_block,\n            consensus_current_era,\n            registry: registry.clone(),\n        })\n    }\n\n    /// Updates the metrics based on a newly finalized block.\n    pub(super) fn finalized_block(&mut self, finalized_block: &FinalizedBlock) {\n        let time_since_block_payload = finalized_block.timestamp.elapsed().millis() as f64;\n        self.finalization_time.set(time_since_block_payload);\n        self.time_of_last_finalized_block\n            .set(finalized_block.timestamp.millis() as i64);\n        self.finalized_block_count\n            .set(finalized_block.height as i64);\n    }\n\n    /// Updates the metrics and records a newly proposed block.\n    pub(super) fn proposed_block(&mut self) {\n        self.time_of_last_proposed_block\n            .set(Timestamp::now().millis() as i64);\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.finalization_time);\n        unregister_metric!(self.registry, self.finalized_block_count);\n        unregister_metric!(self.registry, self.consensus_current_era);\n        unregister_metric!(self.registry, self.time_of_last_finalized_block);\n        unregister_metric!(self.registry, self.time_of_last_proposed_block);\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/common.rs",
    "content": "//! Utilities common to different consensus algorithms.\n\nuse itertools::Itertools;\nuse num_rational::Ratio;\nuse std::collections::{BTreeMap, HashSet};\n\nuse num_traits::AsPrimitive;\n\nuse crate::components::consensus::{\n    traits::Context,\n    utils::{ValidatorMap, Validators, Weight},\n};\nuse casper_types::U512;\n\n/// Computes the validator set given the stakes and the faulty and inactive\n/// reports from the previous eras.\npub fn validators<C: Context>(\n    faulty: &HashSet<C::ValidatorId>,\n    inactive: &HashSet<C::ValidatorId>,\n    validator_stakes: BTreeMap<C::ValidatorId, U512>,\n) -> Validators<C::ValidatorId> {\n    let sum_stakes = safe_sum(validator_stakes.values().copied()).expect(\"should not overflow\");\n    // We use u64 weights. Scale down by floor(sum / u64::MAX) + 1.\n    // This guarantees that the resulting sum is greater than 0 and less than u64::MAX.\n    #[allow(clippy::arithmetic_side_effects)] // Divisor isn't 0 and addition can't overflow.\n    let scaling_factor: U512 = sum_stakes / U512::from(u64::MAX) + 1;\n\n    // TODO sort validators by descending weight\n    #[allow(clippy::arithmetic_side_effects)] // Divisor isn't 0.\n    let mut validators: Validators<C::ValidatorId> = validator_stakes\n        .into_iter()\n        .map(|(key, stake)| (key, AsPrimitive::<u64>::as_(stake / scaling_factor)))\n        .collect();\n\n    for vid in faulty {\n        validators.ban(vid);\n    }\n\n    for vid in inactive {\n        validators.set_cannot_propose(vid);\n    }\n\n    assert!(\n        validators.ensure_nonzero_proposing_stake(),\n        \"cannot start era with total weight 0\"\n    );\n\n    validators\n}\n\n/// Compute the validator weight map from the set of validators.\npub(crate) fn validator_weights<C: Context>(\n    validators: &Validators<C::ValidatorId>,\n) -> ValidatorMap<Weight> {\n    ValidatorMap::from(validators.iter().map(|v| v.weight()).collect_vec())\n}\n\n/// Computes the fault tolerance threshold for the protocol instance\npub(crate) fn ftt<C: Context>(\n    finality_threshold_fraction: Ratio<u64>,\n    validators: &Validators<C::ValidatorId>,\n) -> Weight {\n    let total_weight = u128::from(validators.total_weight());\n    assert!(\n        finality_threshold_fraction < 1.into(),\n        \"finality threshold must be less than 100%\"\n    );\n    #[allow(clippy::arithmetic_side_effects)] // FTT is less than 1, so this can't overflow\n    let ftt = total_weight * *finality_threshold_fraction.numer() as u128\n        / *finality_threshold_fraction.denom() as u128;\n    (ftt as u64).into()\n}\n\n/// A U512 sum implementation that check for overflow.\nfn safe_sum<I>(mut iterator: I) -> Option<U512>\nwhere\n    I: Iterator<Item = U512>,\n{\n    iterator.try_fold(U512::zero(), |acc, n| acc.checked_add(n))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::components::consensus::ClContext;\n    use casper_types::{testing::TestRng, PublicKey};\n    use rand::Rng;\n\n    #[test]\n    #[should_panic]\n    fn ftt_panics_during_overflow() {\n        let rng = &mut TestRng::new();\n        let mut validator_stakes = BTreeMap::new();\n        validator_stakes.insert(PublicKey::random(rng), U512::MAX);\n        validator_stakes.insert(PublicKey::random(rng), U512::from(1_u32));\n\n        validators::<ClContext>(&Default::default(), &Default::default(), validator_stakes);\n    }\n\n    #[test]\n    fn total_weights_less_than_u64_max() {\n        let mut rng = TestRng::new();\n\n        let (test_stake_1, test_stake_2) = (rng.gen(), rng.gen());\n\n        let mut test_stakes = |a: u64, b: u64| -> BTreeMap<PublicKey, U512> {\n            let mut result = BTreeMap::new();\n            result.insert(\n                PublicKey::random(&mut rng),\n                U512::from(a) * U512::from(u128::MAX),\n            );\n            result.insert(\n                PublicKey::random(&mut rng),\n                U512::from(b) * U512::from(u128::MAX),\n            );\n            result\n        };\n\n        // First, we test with random values.\n        let stakes = test_stakes(test_stake_1, test_stake_2);\n        let weights = validators::<ClContext>(&Default::default(), &Default::default(), stakes);\n        assert!(weights.total_weight().0 < u64::MAX);\n\n        // Then, we test with values that were known to cause issues before.\n        let stakes = test_stakes(514, 771);\n        let weights = validators::<ClContext>(&Default::default(), &Default::default(), stakes);\n        assert!(weights.total_weight().0 < u64::MAX);\n\n        let stakes = test_stakes(668, 614);\n        let weights = validators::<ClContext>(&Default::default(), &Default::default(), stakes);\n        assert!(weights.total_weight().0 < u64::MAX);\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway/config.rs",
    "content": "use serde::{Deserialize, Serialize};\n\nuse datasize::DataSize;\n\nuse casper_types::{serde_option_time_diff, TimeDiff};\n\nuse super::round_success_meter::config::Config as RSMConfig;\n\n/// Highway-specific configuration.\n/// NOTE: This is *NOT* protocol configuration that has to be the same on all nodes.\n#[derive(DataSize, Debug, Clone, Serialize, Deserialize)]\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// The duration for which incoming vertices with missing dependencies are kept in a queue.\n    pub pending_vertex_timeout: TimeDiff,\n    /// Request the latest protocol state from a random peer periodically, with this interval.\n    #[serde(with = \"serde_option_time_diff\")]\n    pub request_state_interval: Option<TimeDiff>,\n    /// Log inactive or faulty validators periodically, with this interval.\n    #[serde(with = \"serde_option_time_diff\")]\n    pub log_participation_interval: Option<TimeDiff>,\n    /// Log synchronizer state periodically, with this interval.\n    #[serde(with = \"serde_option_time_diff\")]\n    pub log_synchronizer_interval: Option<TimeDiff>,\n    /// Log the size of every incoming and outgoing serialized unit.\n    pub log_unit_sizes: bool,\n    /// The maximum number of peers we request the same vertex from in parallel.\n    pub max_requests_for_vertex: usize,\n    /// The maximum number of dependencies we request per validator in a batch.\n    /// Limits requests per validator in panorama - in order to get a total number of\n    /// requests, multiply by # of validators.\n    pub max_request_batch_size: usize,\n    pub round_success_meter: RSMConfig,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            pending_vertex_timeout: \"10sec\".parse().unwrap(),\n            request_state_interval: Some(\"10sec\".parse().unwrap()),\n            log_participation_interval: Some(\"10sec\".parse().unwrap()),\n            log_synchronizer_interval: Some(\"5sec\".parse().unwrap()),\n            log_unit_sizes: false,\n            max_requests_for_vertex: 5,\n            max_request_batch_size: 20,\n            round_success_meter: RSMConfig::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway/participation.rs",
    "content": "use std::cmp::Reverse;\n\nuse casper_types::Timestamp;\n\nuse crate::{\n    components::consensus::{\n        highway_core::{\n            highway::Highway,\n            state::{Fault, State},\n        },\n        traits::Context,\n        utils::ValidatorIndex,\n    },\n    utils::div_round,\n};\n\n/// A validator's participation status: whether they are faulty or inactive.\n#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]\nenum Status {\n    LastSeenSecondsAgo(u64),\n    Inactive,\n    EquivocatedInOtherEra,\n    Equivocated,\n}\n\nimpl Status {\n    /// Returns a `Status` for a validator unless they are honest and online.\n    fn for_index<C: Context>(\n        idx: ValidatorIndex,\n        state: &State<C>,\n        now: Timestamp,\n    ) -> Option<Status> {\n        if let Some(fault) = state.maybe_fault(idx) {\n            return Some(match fault {\n                Fault::Banned | Fault::Indirect => Status::EquivocatedInOtherEra,\n                Fault::Direct(_) => Status::Equivocated,\n            });\n        }\n        if state.panorama()[idx].is_none() {\n            return Some(Status::Inactive);\n        }\n        if state\n            .last_seen(idx)\n            .saturating_add(state.params().max_round_length())\n            < now\n        {\n            let seconds = now.saturating_diff(state.last_seen(idx)).millis() / 1000;\n            return Some(Status::LastSeenSecondsAgo(seconds));\n        }\n        None\n    }\n}\n\n/// A map of status (faulty, inactive) by validator ID.\n#[derive(Debug)]\n// False positive, as the fields of this struct are all used in logging validator participation.\n#[allow(dead_code)]\npub(crate) struct Participation<C>\nwhere\n    C: Context,\n{\n    instance_id: C::InstanceId,\n    faulty_stake_percent: u8,\n    inactive_stake_percent: u8,\n    inactive_validators: Vec<(ValidatorIndex, C::ValidatorId, Status)>,\n    faulty_validators: Vec<(ValidatorIndex, C::ValidatorId, Status)>,\n}\n\nimpl<C: Context> Participation<C> {\n    /// Creates a new `Participation` map, showing validators seen as faulty or inactive by the\n    /// Highway instance.\n    #[allow(clippy::arithmetic_side_effects)] // We use u128 to prevent overflows in weight calculation.\n    pub(crate) fn new(highway: &Highway<C>) -> Self {\n        let now = Timestamp::now();\n        let state = highway.state();\n        let mut inactive_w = 0;\n        let mut faulty_w = 0;\n        let total_w = u128::from(state.total_weight().0);\n        let mut inactive_validators = Vec::new();\n        let mut faulty_validators = Vec::new();\n        for (idx, v_id) in highway.validators().enumerate_ids() {\n            if let Some(status) = Status::for_index(idx, state, now) {\n                match status {\n                    Status::Equivocated | Status::EquivocatedInOtherEra => {\n                        faulty_w += u128::from(state.weight(idx).0);\n                        faulty_validators.push((idx, v_id.clone(), status));\n                    }\n                    Status::Inactive | Status::LastSeenSecondsAgo(_) => {\n                        inactive_w += u128::from(state.weight(idx).0);\n                        inactive_validators.push((idx, v_id.clone(), status));\n                    }\n                }\n            }\n        }\n        inactive_validators.sort_by_key(|(idx, _, status)| (Reverse(*status), *idx));\n        faulty_validators.sort_by_key(|(idx, _, status)| (Reverse(*status), *idx));\n        Participation {\n            instance_id: *highway.instance_id(),\n            inactive_stake_percent: div_round(inactive_w * 100, total_w) as u8,\n            faulty_stake_percent: div_round(faulty_w * 100, total_w) as u8,\n            inactive_validators,\n            faulty_validators,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway/round_success_meter/config.rs",
    "content": "use crate::components::consensus::config::Config as ConsensusConfig;\nuse datasize::DataSize;\nuse num_rational::Ratio;\n\nuse serde::{Deserialize, Serialize};\n\n/// The number of most recent rounds we will be keeping track of.\npub(crate) const NUM_ROUNDS_TO_CONSIDER: usize = 40;\n/// The number of successful rounds that triggers us to slow down: With this many or fewer\n/// successes per `NUM_ROUNDS_TO_CONSIDER`, we increase our round length.\npub(crate) const NUM_ROUNDS_SLOWDOWN: usize = 10;\n/// The number of successful rounds that triggers us to speed up: With this many or more successes\n/// per `NUM_ROUNDS_TO_CONSIDER`, we decrease our round length.\npub(crate) const NUM_ROUNDS_SPEEDUP: usize = 32;\n/// We will try to accelerate (decrease our round length) every `ACCELERATION_PARAMETER` rounds if\n/// we have few enough failures.\npub(crate) const ACCELERATION_PARAMETER: u64 = 40;\n/// The FTT, as a percentage (i.e. `THRESHOLD = 1` means 1% of the validators' total weight), which\n/// we will use for looking for a summit in order to determine a proposal's finality.\n/// The required quorum in a summit we will look for to check if a round was successful is\n/// determined by this FTT.\npub(crate) const THRESHOLD: u64 = 1;\n\n#[cfg(test)]\npub(crate) const MAX_FAILED_ROUNDS: usize = NUM_ROUNDS_TO_CONSIDER - NUM_ROUNDS_SLOWDOWN - 1;\n\n#[derive(DataSize, Debug, Clone, Copy, Serialize, Deserialize)]\npub struct Config {\n    pub num_rounds_to_consider: u64,\n    pub num_rounds_slowdown: u64,\n    pub num_rounds_speedup: u64,\n    pub acceleration_parameter: u64,\n    #[data_size(skip)]\n    pub acceleration_ftt: Ratio<u64>,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Self {\n            num_rounds_to_consider: NUM_ROUNDS_TO_CONSIDER as u64,\n            num_rounds_slowdown: NUM_ROUNDS_SLOWDOWN as u64,\n            num_rounds_speedup: NUM_ROUNDS_SPEEDUP as u64,\n            acceleration_parameter: ACCELERATION_PARAMETER,\n            acceleration_ftt: Ratio::new(THRESHOLD, 100),\n        }\n    }\n}\n\nimpl Config {\n    /// The maximum number of failures allowed among `num_rounds_to_consider` latest rounds, with\n    /// which we won't increase our round length. Exceeding this threshold will mean that we\n    /// should slow down.\n    pub(crate) fn max_failed_rounds(&self) -> u64 {\n        self.num_rounds_to_consider\n            .saturating_sub(self.num_rounds_slowdown)\n            .saturating_sub(1)\n    }\n\n    /// The maximum number of failures with which we will attempt to accelerate (decrease the round\n    /// exponent).\n    pub(crate) fn max_failures_for_acceleration(&self) -> u64 {\n        self.num_rounds_to_consider\n            .saturating_sub(self.num_rounds_speedup)\n    }\n}\n\nimpl From<&ConsensusConfig> for Config {\n    fn from(config: &ConsensusConfig) -> Self {\n        config.highway.round_success_meter\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway/round_success_meter/tests.rs",
    "content": "use config::{Config, ACCELERATION_PARAMETER, MAX_FAILED_ROUNDS, NUM_ROUNDS_TO_CONSIDER};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::components::consensus::{\n    cl_context::ClContext,\n    protocols::highway::round_success_meter::{config, round_index},\n};\n\nconst TEST_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 13);\nconst TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 8);\nconst TEST_MAX_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 19);\n\n#[test]\nfn new_length_steady() {\n    let round_success_meter: super::RoundSuccessMeter<ClContext> = super::RoundSuccessMeter::new(\n        TEST_ROUND_LEN,\n        TEST_MIN_ROUND_LEN,\n        TEST_MAX_ROUND_LEN,\n        Timestamp::now(),\n        Config::default(),\n    );\n    assert_eq!(round_success_meter.new_length(), TEST_ROUND_LEN);\n}\n\n#[test]\nfn new_length_slow_down() {\n    let mut round_success_meter: super::RoundSuccessMeter<ClContext> =\n        super::RoundSuccessMeter::new(\n            TEST_ROUND_LEN,\n            TEST_MIN_ROUND_LEN,\n            TEST_MAX_ROUND_LEN,\n            Timestamp::now(),\n            Config::default(),\n        );\n    // If there have been more rounds of failure than MAX_FAILED_ROUNDS, slow down\n    round_success_meter.rounds = vec![false; MAX_FAILED_ROUNDS + 1].into();\n    assert_eq!(round_success_meter.new_length(), TEST_ROUND_LEN * 2);\n}\n\n#[test]\nfn new_length_can_not_slow_down_because_max_round_len() {\n    // If the round length is the same as the maximum round length, can't go up\n    let mut round_success_meter: super::RoundSuccessMeter<ClContext> =\n        super::RoundSuccessMeter::new(\n            TEST_MAX_ROUND_LEN,\n            TEST_MIN_ROUND_LEN,\n            TEST_MAX_ROUND_LEN,\n            Timestamp::now(),\n            Config::default(),\n        );\n    // If there have been more rounds of failure than MAX_FAILED_ROUNDS, slow down -- but can't\n    // slow down because of ceiling\n    round_success_meter.rounds = vec![false; MAX_FAILED_ROUNDS + 1].into();\n    assert_eq!(round_success_meter.new_length(), TEST_MAX_ROUND_LEN);\n}\n\n#[test]\nfn new_length_speed_up() {\n    // If there's been enough successful rounds and it's an acceleration round, speed up\n    let mut round_success_meter: super::RoundSuccessMeter<ClContext> =\n        super::RoundSuccessMeter::new(\n            TEST_ROUND_LEN,\n            TEST_MIN_ROUND_LEN,\n            TEST_MAX_ROUND_LEN,\n            Timestamp::now(),\n            Config::default(),\n        );\n    round_success_meter.rounds = vec![true; NUM_ROUNDS_TO_CONSIDER].into();\n    // Increase our round index until we are at an acceleration round\n    loop {\n        let current_round_index = round_index(\n            round_success_meter.current_round_id,\n            round_success_meter.current_round_len,\n        );\n        if current_round_index % ACCELERATION_PARAMETER == 0 {\n            break;\n        };\n        round_success_meter.current_round_id += TimeDiff::from_millis(1);\n    }\n    assert_eq!(round_success_meter.new_length(), TEST_ROUND_LEN / 2);\n}\n\n#[test]\nfn new_length_can_not_speed_up_because_min_round_len() {\n    // If there's been enough successful rounds and it's an acceleration round, but we are\n    // already at the smallest round length possible, stay at the current round length\n    let mut round_success_meter: super::RoundSuccessMeter<ClContext> =\n        super::RoundSuccessMeter::new(\n            TEST_MIN_ROUND_LEN,\n            TEST_MIN_ROUND_LEN,\n            TEST_MAX_ROUND_LEN,\n            Timestamp::now(),\n            Config::default(),\n        );\n    round_success_meter.rounds = vec![true; NUM_ROUNDS_TO_CONSIDER].into();\n    // Increase our round index until we are at an acceleration round\n    loop {\n        let current_round_index = round_index(\n            round_success_meter.current_round_id,\n            round_success_meter.current_round_len,\n        );\n        if current_round_index % ACCELERATION_PARAMETER == 0 {\n            break;\n        };\n        round_success_meter.current_round_id += TimeDiff::from_millis(1);\n    }\n    assert_eq!(round_success_meter.new_length(), TEST_MIN_ROUND_LEN);\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway/round_success_meter.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::{cmp::max, collections::VecDeque, mem};\n\nuse datasize::DataSize;\nuse tracing::{error, trace};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::components::consensus::{\n    highway_core::{finality_detector::FinalityDetector, state, State},\n    traits::Context,\n    utils::Weight,\n};\n\npub(crate) mod config;\nuse config::*;\n\n#[derive(DataSize, Debug, Clone)]\npub(crate) struct RoundSuccessMeter<C>\nwhere\n    C: Context,\n{\n    // store whether a particular round was successful\n    // index 0 is the last handled round, 1 is the second-to-last etc.\n    rounds: VecDeque<bool>,\n    current_round_id: Timestamp,\n    proposals: Vec<C::Hash>,\n    min_round_len: TimeDiff,\n    max_round_len: TimeDiff,\n    current_round_len: TimeDiff,\n    config: Config,\n}\n\nimpl<C: Context> RoundSuccessMeter<C> {\n    pub fn new(\n        round_len: TimeDiff,\n        min_round_len: TimeDiff,\n        max_round_len: TimeDiff,\n        timestamp: Timestamp,\n        config: Config,\n    ) -> Self {\n        let current_round_id = state::round_id(timestamp, round_len);\n        Self {\n            rounds: VecDeque::with_capacity(config.num_rounds_to_consider as usize),\n            current_round_id,\n            proposals: Vec::new(),\n            min_round_len,\n            max_round_len,\n            current_round_len: round_len,\n            config,\n        }\n    }\n\n    fn change_length(&mut self, new_len: TimeDiff, timestamp: Timestamp) {\n        self.rounds = VecDeque::with_capacity(self.config.num_rounds_to_consider as usize);\n        self.current_round_len = new_len;\n        self.current_round_id = state::round_id(timestamp, new_len);\n        self.proposals = Vec::new();\n    }\n\n    fn check_proposals_success(&self, state: &State<C>, proposal_h: &C::Hash) -> bool {\n        let total_w = state.total_weight();\n\n        #[allow(clippy::arithmetic_side_effects)] // FTT is less than 100%, so this can't overflow.\n        let finality_detector = FinalityDetector::<C>::new(max(\n            Weight(\n                (u128::from(total_w) * *self.config.acceleration_ftt.numer() as u128\n                    / *self.config.acceleration_ftt.denom() as u128) as u64,\n            ),\n            Weight(1),\n        ));\n\n        // check for the existence of a level-1 summit\n        finality_detector.find_summit(1, proposal_h, state) == 1\n    }\n\n    /// Registers a proposal within this round - if it's finalized within the round, the round will\n    /// be successful.\n    pub fn new_proposal(&mut self, proposal_h: C::Hash, timestamp: Timestamp) {\n        // only add proposals from within the current round\n        if state::round_id(timestamp, self.current_round_len) == self.current_round_id {\n            trace!(\n                %self.current_round_id,\n                timestamp = timestamp.millis(),\n                \"adding a proposal\"\n            );\n            self.proposals.push(proposal_h);\n        } else {\n            trace!(\n                %self.current_round_id,\n                timestamp = timestamp.millis(),\n                %self.current_round_len,\n                \"trying to add proposal for a different round!\"\n            );\n        }\n    }\n\n    /// If the current timestamp indicates that the round has ended, checks the known proposals for\n    /// a level-1 summit.\n    /// If there is a summit, the round is considered successful. Otherwise, it is considered\n    /// failed.\n    /// Next, a number of last rounds are being checked for success and if not enough of them are\n    /// successful, we return a higher round length for the future.\n    /// If the length shouldn't grow, and the round ID is divisible by a certain number, a lower\n    /// round length is returned.\n    pub fn calculate_new_length(&mut self, state: &State<C>) -> TimeDiff {\n        let now = Timestamp::now();\n        // if the round hasn't finished, just return whatever we have now\n        if state::round_id(now, self.current_round_len) <= self.current_round_id {\n            return self.new_length();\n        }\n\n        trace!(%self.current_round_id, \"calculating length\");\n        let current_round_index = round_index(self.current_round_id, self.current_round_len);\n        let new_round_index = round_index(now, self.current_round_len);\n\n        if mem::take(&mut self.proposals)\n            .into_iter()\n            .any(|proposal| self.check_proposals_success(state, &proposal))\n        {\n            trace!(\"round succeeded\");\n            self.rounds.push_front(true);\n        } else {\n            trace!(\"round failed\");\n            self.rounds.push_front(false);\n        }\n\n        // if we're just switching rounds and more than a single round has passed, all the\n        // rounds since the last registered round have failed\n        let failed_round_count = new_round_index\n            .saturating_sub(current_round_index)\n            .saturating_sub(1);\n        for _ in 0..failed_round_count {\n            trace!(\"round failed\");\n            self.rounds.push_front(false);\n        }\n\n        self.current_round_id = Timestamp::zero()\n            .saturating_add(self.current_round_len.saturating_mul(new_round_index));\n\n        self.clean_old_rounds();\n\n        trace!(\n            %self.current_round_len,\n            \"{} failures among the last {} rounds.\",\n            self.count_failures(),\n            self.rounds.len()\n        );\n\n        let new_len = self.new_length();\n\n        trace!(%new_len, \"new length calculated\");\n\n        if new_len != self.current_round_len {\n            self.change_length(new_len, now);\n        }\n\n        new_len\n    }\n\n    /// Returns an instance of `Self` for the new era: resetting the counters where appropriate.\n    pub fn next_era(&self, timestamp: Timestamp) -> Self {\n        Self {\n            rounds: self.rounds.clone(),\n            current_round_id: state::round_id(timestamp, self.current_round_len),\n            proposals: Default::default(),\n            min_round_len: self.min_round_len,\n            max_round_len: self.max_round_len,\n            current_round_len: self.current_round_len,\n            config: self.config,\n        }\n    }\n\n    fn clean_old_rounds(&mut self) {\n        while self.rounds.len() as u64 > self.config.num_rounds_to_consider {\n            self.rounds.pop_back();\n        }\n    }\n\n    fn count_failures(&self) -> usize {\n        self.rounds.iter().filter(|&success| !success).count()\n    }\n\n    /// Returns the round length to be used in the next round, based on the previously used round\n    /// length and the current counts of successes and failures.\n    pub(super) fn new_length(&self) -> TimeDiff {\n        let current_round_index = round_index(self.current_round_id, self.current_round_len);\n        let num_failures = self.count_failures() as u64;\n        #[allow(clippy::arithmetic_side_effects)] // The acceleration_parameter is not zero.\n        if num_failures > self.config.max_failed_rounds()\n            && self.current_round_len * 2 <= self.max_round_len\n        {\n            self.current_round_len * 2\n        } else if current_round_index % self.config.acceleration_parameter == 0\n            && self.current_round_len > self.min_round_len\n            // we will only accelerate if we collected data about enough rounds\n            && self.rounds.len() as u64 == self.config.num_rounds_to_consider\n            && num_failures < self.config.max_failures_for_acceleration()\n        {\n            self.current_round_len / 2\n        } else {\n            self.current_round_len\n        }\n    }\n}\n\n/// Returns the round index `i`, if `r_id` is the ID of the `i`-th round after the epoch.\n#[allow(clippy::arithmetic_side_effects)] // Checking for division by 0.\nfn round_index(r_id: Timestamp, round_len: TimeDiff) -> u64 {\n    if round_len.millis() == 0 {\n        error!(\"called round_index with round_len 0.\");\n        return r_id.millis();\n    }\n    r_id.millis() / round_len.millis()\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway/tests.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    sync::Arc,\n};\n\nuse casper_types::{testing::TestRng, PublicKey, TimeDiff, Timestamp, U512};\n\nuse crate::{\n    components::consensus::{\n        cl_context::{ClContext, Keypair},\n        config::Config,\n        consensus_protocol::{ConsensusProtocol, ProtocolOutcome},\n        highway_core::{\n            highway::{SignedWireUnit, Vertex, WireUnit},\n            highway_testing,\n            state::{self, tests::ALICE, Observation, Panorama},\n            State,\n        },\n        max_rounds_per_era,\n        protocols::highway::{\n            config::Config as HighwayConfig, HighwayMessage, HighwayProtocol, ACTION_ID_VERTEX,\n        },\n        tests::utils::{\n            new_test_chainspec, ALICE_NODE_ID, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY,\n        },\n        traits::Context,\n        utils::{ValidatorIndex, Weight},\n        SerializedMessage,\n    },\n    types::BlockPayload,\n};\n\n/// Returns a new `State` with `ClContext` parameters suitable for tests.\npub(crate) fn new_test_state<I, T>(weights: I, seed: u64) -> State<ClContext>\nwhere\n    I: IntoIterator<Item = T>,\n    T: Into<Weight>,\n{\n    #[allow(clippy::arithmetic_side_effects)] // Left shift with small enough constants.\n    let params = state::Params::new(\n        seed,\n        TimeDiff::from_millis(1 << 14),\n        TimeDiff::from_millis(1 << 19),\n        TimeDiff::from_millis(1 << 14),\n        u64::MAX,\n        0.into(),\n        Timestamp::MAX,\n        highway_testing::TEST_ENDORSEMENT_EVIDENCE_LIMIT,\n    );\n    let weights = weights.into_iter().map(|w| w.into()).collect::<Vec<_>>();\n    State::new(weights, params, vec![], vec![])\n}\n\nconst INSTANCE_ID_DATA: &[u8; 1] = &[123u8; 1];\n\npub(crate) fn new_test_highway_protocol<I1, I2, T>(\n    weights: I1,\n    init_faulty: I2,\n) -> Box<dyn ConsensusProtocol<ClContext>>\nwhere\n    I1: IntoIterator<Item = (PublicKey, T)>,\n    I2: IntoIterator<Item = PublicKey>,\n    T: Into<U512>,\n{\n    let weights = weights\n        .into_iter()\n        .map(|(pk, w)| (pk, w.into()))\n        .collect::<Vec<_>>();\n    let chainspec = new_test_chainspec(weights.clone());\n    let config = Config {\n        max_execution_delay: 3,\n        highway: HighwayConfig {\n            pending_vertex_timeout: \"1min\".parse().unwrap(),\n            log_participation_interval: Some(\"10sec\".parse().unwrap()),\n            ..HighwayConfig::default()\n        },\n        ..Default::default()\n    };\n    // Timestamp of the genesis era start and test start.\n    let start_timestamp: Timestamp = 0.into();\n    let (hw_proto, outcomes) = HighwayProtocol::<ClContext>::new_boxed(\n        ClContext::hash(INSTANCE_ID_DATA),\n        weights.into_iter().collect(),\n        &init_faulty.into_iter().collect(),\n        &None.into_iter().collect(),\n        &chainspec,\n        &config,\n        None,\n        start_timestamp,\n        0,\n        start_timestamp,\n        None,\n    );\n    // We expect three messages:\n    // * log participation timer,\n    // * log synchronizer queue length timer,\n    // * purge synchronizer queue timer\n    // If there are more, the tests might need to handle them.\n    assert_eq!(3, outcomes.len());\n    hw_proto\n}\n\npub(crate) const N: Observation<ClContext> = Observation::None;\n\n#[test]\nfn send_a_wire_unit_with_too_small_a_round_exp() {\n    let mut rng = TestRng::new();\n    let creator: ValidatorIndex = ValidatorIndex(0);\n    let validators = vec![(ALICE_PUBLIC_KEY.clone(), 100)];\n    let state: State<ClContext> = new_test_state(validators.iter().map(|(_pk, w)| *w), 0);\n    let panorama: Panorama<ClContext> = Panorama::from(vec![N]);\n    let seq_number = panorama.next_seq_num(&state, creator);\n    let now = Timestamp::zero();\n    let wunit: WireUnit<ClContext> = WireUnit {\n        panorama,\n        creator,\n        instance_id: ClContext::hash(INSTANCE_ID_DATA),\n        value: None,\n        seq_number,\n        timestamp: now,\n        round_exp: 0,\n        endorsed: BTreeSet::new(),\n    };\n    let alice_keypair: Keypair = Keypair::from(Arc::clone(&*ALICE_SECRET_KEY));\n    let highway_message: HighwayMessage<ClContext> = HighwayMessage::NewVertex(Vertex::Unit(\n        SignedWireUnit::new(wunit.into_hashed(), &alice_keypair),\n    ));\n    let mut highway_protocol = new_test_highway_protocol(validators, vec![]);\n    let sender = *ALICE_NODE_ID;\n    let msg = SerializedMessage::from_message(&highway_message);\n    let outcomes = highway_protocol.handle_message(&mut rng, sender.to_owned(), msg, now);\n    assert_eq!(&*outcomes, [ProtocolOutcome::Disconnect(sender)]);\n}\n\n#[test]\nfn send_a_valid_wire_unit() {\n    let mut rng = TestRng::new();\n    let creator: ValidatorIndex = ValidatorIndex(0);\n    let validators = vec![(ALICE_PUBLIC_KEY.clone(), 100)];\n    let state: State<ClContext> = new_test_state(validators.iter().map(|(_pk, w)| *w), 0);\n    let panorama: Panorama<ClContext> = Panorama::from(vec![N]);\n    let seq_number = panorama.next_seq_num(&state, creator);\n    let now = Timestamp::zero();\n    let wunit: WireUnit<ClContext> = WireUnit {\n        panorama,\n        creator,\n        instance_id: ClContext::hash(INSTANCE_ID_DATA),\n        value: Some(Arc::new(BlockPayload::new(\n            BTreeMap::new(),\n            vec![],\n            Default::default(),\n            false,\n            1u8,\n        ))),\n        seq_number,\n        timestamp: now,\n        round_exp: 0,\n        endorsed: BTreeSet::new(),\n    };\n    let alice_keypair: Keypair = Keypair::from(Arc::clone(&*ALICE_SECRET_KEY));\n    let highway_message: HighwayMessage<ClContext> = HighwayMessage::NewVertex(Vertex::Unit(\n        SignedWireUnit::new(wunit.into_hashed(), &alice_keypair),\n    ));\n\n    let mut highway_protocol = new_test_highway_protocol(validators, vec![]);\n    let sender = *ALICE_NODE_ID;\n    let msg = SerializedMessage::from_message(&highway_message);\n\n    let mut outcomes = highway_protocol.handle_message(&mut rng, sender, msg, now);\n    while let Some(outcome) = outcomes.pop() {\n        match outcome {\n            ProtocolOutcome::CreatedGossipMessage(_)\n            | ProtocolOutcome::FinalizedBlock(_)\n            | ProtocolOutcome::HandledProposedBlock(_) => (),\n            ProtocolOutcome::QueueAction(ACTION_ID_VERTEX) => {\n                outcomes.extend(highway_protocol.handle_action(ACTION_ID_VERTEX, now))\n            }\n            outcome => panic!(\"Unexpected outcome: {:?}\", outcome),\n        }\n    }\n}\n\n#[test]\nfn detect_doppelganger() {\n    let mut rng = TestRng::new();\n    let creator: ValidatorIndex = ALICE;\n    let validators = vec![\n        (ALICE_PUBLIC_KEY.clone(), 100),\n        (BOB_PUBLIC_KEY.clone(), 100),\n    ];\n    let state: State<ClContext> = new_test_state(validators.iter().map(|(_pk, w)| *w), 0);\n    let panorama: Panorama<ClContext> = Panorama::from(vec![N, N]);\n    let seq_number = panorama.next_seq_num(&state, creator);\n    let instance_id = ClContext::hash(INSTANCE_ID_DATA);\n    let round_exp = 0;\n    let now = Timestamp::zero();\n    let value = Arc::new(BlockPayload::new(\n        BTreeMap::new(),\n        vec![],\n        Default::default(),\n        false,\n        1u8,\n    ));\n    let wunit: WireUnit<ClContext> = WireUnit {\n        panorama,\n        creator,\n        instance_id,\n        value: Some(value),\n        seq_number,\n        timestamp: now,\n        round_exp,\n        endorsed: BTreeSet::new(),\n    };\n    let alice_keypair: Keypair = Keypair::from(Arc::clone(&*ALICE_SECRET_KEY));\n    let highway_message: HighwayMessage<ClContext> = HighwayMessage::NewVertex(Vertex::Unit(\n        SignedWireUnit::new(wunit.into_hashed(), &alice_keypair),\n    ));\n    let mut highway_protocol = new_test_highway_protocol(validators, vec![]);\n    // Activate ALICE as validator.\n    let _ = highway_protocol.activate_validator(ALICE_PUBLIC_KEY.clone(), alice_keypair, now, None);\n    assert!(highway_protocol.is_active());\n    let sender = *ALICE_NODE_ID;\n    let msg = SerializedMessage::from_message(&highway_message);\n    // \"Send\" a message created by ALICE to an instance of Highway where she's an active validator.\n    // An incoming unit, created by the same validator, should be properly detected as a\n    // doppelganger.\n    let mut outcomes = highway_protocol.handle_message(&mut rng, sender, msg, now);\n    while let Some(outcome) = outcomes.pop() {\n        match outcome {\n            ProtocolOutcome::DoppelgangerDetected => return,\n            ProtocolOutcome::QueueAction(ACTION_ID_VERTEX) => {\n                outcomes.extend(highway_protocol.handle_action(ACTION_ID_VERTEX, now))\n            }\n            _ => (),\n        }\n    }\n    panic!(\"failed to return DoppelgangerDetected effect\");\n}\n\n#[test]\nfn max_rounds_per_era_returns_the_correct_value_for_prod_chainspec_value() {\n    let max_rounds_per_era = max_rounds_per_era(\n        20,\n        TimeDiff::from_seconds(120 * 60),\n        TimeDiff::from_millis(32768),\n    );\n\n    assert_eq!(219, max_rounds_per_era);\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/highway.rs",
    "content": "pub(crate) mod config;\nmod participation;\nmod round_success_meter;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    any::Any,\n    collections::{BTreeMap, HashMap, HashSet},\n    fmt::Debug,\n    iter,\n    path::PathBuf,\n};\n\nuse casper_types::{Chainspec, TimeDiff, Timestamp, U512};\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse rand::RngCore;\nuse tracing::{debug, error, info, trace, warn};\n\nuse crate::{\n    components::consensus::{\n        config::Config,\n        consensus_protocol::{\n            BlockContext, ConsensusProtocol, ProposedBlock, ProtocolOutcome, ProtocolOutcomes,\n        },\n        era_supervisor::SerializedMessage,\n        highway_core::{\n            active_validator::Effect as AvEffect,\n            finality_detector::{FinalityDetector, FttExceeded},\n            highway::{\n                Dependency, GetDepOutcome, Highway, Params, PreValidatedVertex, ValidVertex,\n                Vertex, VertexError,\n            },\n            state::{IndexObservation, IndexPanorama, Observation},\n            synchronizer::Synchronizer,\n        },\n        protocols,\n        traits::{ConsensusValueT, Context},\n        utils::ValidatorIndex,\n        ActionId, TimerId,\n    },\n    types::NodeId,\n    NodeRng,\n};\n\nuse self::round_success_meter::RoundSuccessMeter;\n\n/// Never allow more than this many units in a piece of evidence for conflicting endorsements,\n/// even if eras are longer than this.\nconst MAX_ENDORSEMENT_EVIDENCE_LIMIT: u64 = 10_000;\n\n/// The timer for creating new units, as a validator actively participating in consensus.\nconst TIMER_ID_ACTIVE_VALIDATOR: TimerId = TimerId(0);\n/// The timer for adding a vertex with a future timestamp.\nconst TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP: TimerId = TimerId(1);\n/// The timer for purging expired pending vertices from the queues.\nconst TIMER_ID_PURGE_VERTICES: TimerId = TimerId(2);\n/// The timer for logging inactive validators.\nconst TIMER_ID_LOG_PARTICIPATION: TimerId = TimerId(3);\n/// The timer for logging synchronizer queue size.\nconst TIMER_ID_SYNCHRONIZER_LOG: TimerId = TimerId(4);\n/// The timer to request the latest state from a random peer.\nconst TIMER_ID_REQUEST_STATE: TimerId = TimerId(5);\n\n/// The action of adding a vertex from the `vertices_to_be_added` queue.\npub(crate) const ACTION_ID_VERTEX: ActionId = ActionId(0);\n\n#[derive(DataSize, Debug)]\npub(crate) struct HighwayProtocol<C>\nwhere\n    C: Context,\n{\n    /// Incoming blocks we can't add yet because we are waiting for validation.\n    pending_values: HashMap<ProposedBlock<C>, HashSet<(ValidVertex<C>, NodeId)>>,\n    finality_detector: FinalityDetector<C>,\n    highway: Highway<C>,\n    /// A tracker for whether we are keeping up with the current round length or not.\n    round_success_meter: RoundSuccessMeter<C>,\n    synchronizer: Synchronizer<C>,\n    pvv_cache: HashMap<Dependency<C>, PreValidatedVertex<C>>,\n    evidence_only: bool,\n    config: config::Config,\n}\n\nimpl<C: Context + 'static> HighwayProtocol<C> {\n    /// Creates a new boxed `HighwayProtocol` instance.\n    #[allow(clippy::too_many_arguments, clippy::type_complexity)]\n    pub(crate) fn new_boxed(\n        instance_id: C::InstanceId,\n        validator_stakes: BTreeMap<C::ValidatorId, U512>,\n        faulty: &HashSet<C::ValidatorId>,\n        inactive: &HashSet<C::ValidatorId>,\n        chainspec: &Chainspec,\n        config: &Config,\n        prev_cp: Option<&dyn ConsensusProtocol<C>>,\n        era_start_time: Timestamp,\n        seed: u64,\n        now: Timestamp,\n        protocol_state_file: Option<PathBuf>,\n    ) -> (Box<dyn ConsensusProtocol<C>>, ProtocolOutcomes<C>) {\n        let validators_count = validator_stakes.len();\n        let validators = protocols::common::validators::<C>(faulty, inactive, validator_stakes);\n        let highway_config = &chainspec.highway_config;\n        let ftt = protocols::common::ftt::<C>(\n            chainspec.core_config.finality_threshold_fraction,\n            &validators,\n        );\n\n        let minimum_round_length = chainspec\n            .core_config\n            .minimum_block_time\n            .max(TimeDiff::from_millis(1));\n        // The maximum round exponent x is such that 2^x * m is at most M, where m and M are min\n        // and max round length. So x is the floor of log_2(M / m). Thus the ceiling of\n        // log_2(M / m + 1) is always x + 1.\n        #[allow(clippy::arithmetic_side_effects)] // minimum_round_length is guaranteed to be > 0.\n        let maximum_round_exponent = (highway_config.maximum_round_length / minimum_round_length)\n            .saturating_add(1)\n            .next_power_of_two()\n            .trailing_zeros()\n            .saturating_sub(1) as u8;\n        // Doesn't overflow since it's at most highway_config.maximum_round_length.\n        #[allow(clippy::arithmetic_side_effects)]\n        let maximum_round_length =\n            TimeDiff::from_millis(minimum_round_length.millis() << maximum_round_exponent);\n\n        let round_success_meter = prev_cp\n            .and_then(|cp| cp.as_any().downcast_ref::<HighwayProtocol<C>>())\n            .map(|highway_proto| highway_proto.next_era_round_succ_meter(era_start_time.max(now)))\n            .unwrap_or_else(|| {\n                RoundSuccessMeter::new(\n                    minimum_round_length,\n                    minimum_round_length,\n                    maximum_round_length,\n                    era_start_time.max(now),\n                    config.into(),\n                )\n            });\n        // This will return the minimum round length if we just initialized the meter, i.e. if\n        // there was no previous consensus instance or it had no round success meter.\n        let init_round_len = round_success_meter.new_length();\n\n        info!(\n            %init_round_len,\n            \"initializing Highway instance\",\n        );\n\n        // Allow about as many units as part of evidence for conflicting endorsements as we expect\n        // a validator to create during an era. After that, they can endorse two conflicting forks\n        // without getting faulty.\n        let max_rounds_per_era = max_rounds_per_era(\n            chainspec.core_config.minimum_era_height,\n            chainspec.core_config.era_duration,\n            minimum_round_length,\n        );\n        let endorsement_evidence_limit = max_rounds_per_era\n            .saturating_mul(2)\n            .min(MAX_ENDORSEMENT_EVIDENCE_LIMIT);\n\n        let params = Params::new(\n            seed,\n            minimum_round_length,\n            maximum_round_length,\n            init_round_len,\n            chainspec.core_config.minimum_era_height,\n            era_start_time,\n            era_start_time.saturating_add(chainspec.core_config.era_duration),\n            endorsement_evidence_limit,\n        );\n\n        let outcomes = Self::initialize_timers(now, era_start_time, &config.highway);\n\n        let highway = Highway::new(instance_id, validators, params, protocol_state_file);\n        let hw_proto = Box::new(HighwayProtocol {\n            pending_values: HashMap::new(),\n            finality_detector: FinalityDetector::new(ftt),\n            highway,\n            round_success_meter,\n            synchronizer: Synchronizer::new(validators_count, instance_id),\n            pvv_cache: Default::default(),\n            evidence_only: false,\n            config: config.highway.clone(),\n        });\n\n        (hw_proto, outcomes)\n    }\n\n    fn initialize_timers(\n        now: Timestamp,\n        era_start_time: Timestamp,\n        config: &config::Config,\n    ) -> ProtocolOutcomes<C> {\n        let mut outcomes = vec![ProtocolOutcome::ScheduleTimer(\n            now.saturating_add(config.pending_vertex_timeout),\n            TIMER_ID_PURGE_VERTICES,\n        )];\n        if let Some(interval) = config.log_participation_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.max(era_start_time).saturating_add(interval),\n                TIMER_ID_LOG_PARTICIPATION,\n            ));\n        }\n        if let Some(interval) = config.log_synchronizer_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.saturating_add(interval),\n                TIMER_ID_SYNCHRONIZER_LOG,\n            ));\n        }\n        outcomes\n    }\n\n    fn process_av_effects<E>(&mut self, av_effects: E, now: Timestamp) -> ProtocolOutcomes<C>\n    where\n        E: IntoIterator<Item = AvEffect<C>>,\n    {\n        av_effects\n            .into_iter()\n            .flat_map(|effect| self.process_av_effect(effect, now))\n            .collect()\n    }\n\n    fn process_av_effect(&mut self, effect: AvEffect<C>, now: Timestamp) -> ProtocolOutcomes<C> {\n        match effect {\n            AvEffect::NewVertex(vv) => {\n                self.log_unit_size(vv.inner(), \"sending new unit\");\n                self.calculate_round_length(&vv, now);\n                self.process_new_vertex(vv)\n            }\n            AvEffect::ScheduleTimer(timestamp) => {\n                vec![ProtocolOutcome::ScheduleTimer(\n                    timestamp,\n                    TIMER_ID_ACTIVE_VALIDATOR,\n                )]\n            }\n            AvEffect::RequestNewBlock(block_context, expiry) => {\n                vec![ProtocolOutcome::CreateNewBlock(block_context, expiry)]\n            }\n            AvEffect::WeAreFaulty(fault) => {\n                error!(\"this validator is faulty: {:?}\", fault);\n                vec![ProtocolOutcome::WeAreFaulty]\n            }\n        }\n    }\n\n    fn process_new_vertex(&mut self, vv: ValidVertex<C>) -> ProtocolOutcomes<C> {\n        let mut outcomes = Vec::new();\n        if let Vertex::Evidence(ev) = vv.inner() {\n            let v_id = self\n                .highway\n                .validators()\n                .id(ev.perpetrator())\n                .expect(\"validator not found\") // We already validated this vertex.\n                .clone();\n            outcomes.push(ProtocolOutcome::NewEvidence(v_id));\n        }\n        let msg = HighwayMessage::NewVertex(vv.into());\n        outcomes.push(ProtocolOutcome::CreatedGossipMessage(\n            SerializedMessage::from_message(&msg),\n        ));\n        outcomes.extend(self.detect_finality());\n        outcomes\n    }\n\n    fn detect_finality(&mut self) -> ProtocolOutcomes<C> {\n        let faulty_weight = match self.finality_detector.run(&self.highway) {\n            Ok(iter) => return iter.map(ProtocolOutcome::FinalizedBlock).collect(),\n            Err(FttExceeded(weight)) => weight.0,\n        };\n        error!(\n            %faulty_weight,\n            total_weight = %self.highway.state().total_weight().0,\n            \"too many faulty validators\"\n        );\n        self.log_participation();\n        vec![ProtocolOutcome::FttExceeded]\n    }\n\n    /// Adds the given vertices to the protocol state, if possible, or requests missing\n    /// dependencies or validation. Recursively schedules events to add everything that is\n    /// unblocked now.\n    fn add_vertex(&mut self, now: Timestamp) -> ProtocolOutcomes<C> {\n        let (maybe_pending_vertex, mut outcomes) = self.synchronizer.pop_vertex_to_add(\n            &self.highway,\n            &self.pending_values,\n            self.config.max_requests_for_vertex,\n        );\n        let pending_vertex = match maybe_pending_vertex {\n            None => return outcomes,\n            Some(pending_vertex) => pending_vertex,\n        };\n\n        // If unit is sent by a doppelganger, deactivate this instance of an active\n        // validator. Continue processing the unit so that it can be added to the state.\n        if self.highway.is_doppelganger_vertex(pending_vertex.vertex()) {\n            error!(\n                vertex = ?pending_vertex.vertex(),\n                \"received vertex from a doppelganger. \\\n                 Are you running multiple nodes with the same validator key?\",\n            );\n            self.deactivate_validator();\n            outcomes.push(ProtocolOutcome::DoppelgangerDetected);\n        }\n\n        // If the vertex is invalid, drop all vertices that depend on this one, and disconnect from\n        // the faulty senders.\n        let sender = *pending_vertex.sender();\n        let vv = match self.highway.validate_vertex(pending_vertex.into()) {\n            Ok(vv) => vv,\n            Err((pvv, err)) => {\n                info!(?pvv, ?err, \"invalid vertex\");\n                let vertices = vec![pvv.inner().id()];\n                let faulty_senders = self.synchronizer.invalid_vertices(vertices);\n                outcomes.extend(faulty_senders.into_iter().map(ProtocolOutcome::Disconnect));\n                return outcomes;\n            }\n        };\n\n        // If the vertex contains a consensus value, i.e. it is a proposal, request validation.\n        let vertex = vv.inner();\n        if let (Some(value), Some(timestamp), Some(swunit)) =\n            (vertex.value(), vertex.timestamp(), vertex.unit())\n        {\n            let panorama = &swunit.wire_unit().panorama;\n            let fork_choice = self.highway.state().fork_choice(panorama);\n            if value.needs_validation() {\n                self.log_proposal(vertex, \"requesting proposal validation\");\n                let ancestor_values = self.ancestors(fork_choice).cloned().collect();\n                let block_context = BlockContext::new(timestamp, ancestor_values);\n                let proposed_block = ProposedBlock::new(value.clone(), block_context);\n                if self\n                    .pending_values\n                    .entry(proposed_block.clone())\n                    .or_default()\n                    .insert((vv, sender))\n                {\n                    outcomes.push(ProtocolOutcome::ValidateConsensusValue {\n                        sender,\n                        proposed_block,\n                    });\n                }\n                return outcomes;\n            }\n            self.log_proposal(vertex, \"proposal does not need validation\");\n        }\n\n        // Either consensus value doesn't need validation or it's not a proposal.\n        // We can add it to the state.\n        outcomes.extend(self.add_valid_vertex(vv, now));\n        // If we added new vertices to the state, check whether any dependencies we were\n        // waiting for are now satisfied, and try adding the pending vertices as well.\n        outcomes.extend(self.synchronizer.remove_satisfied_deps(&self.highway));\n        // Check whether any new blocks were finalized.\n        outcomes.extend(self.detect_finality());\n        outcomes\n    }\n\n    fn calculate_round_length(&mut self, vv: &ValidVertex<C>, now: Timestamp) {\n        let new_round_len = self\n            .round_success_meter\n            .calculate_new_length(self.highway.state());\n        // If the vertex contains a proposal, register it in the success meter.\n        // It's important to do this _after_ the calculation above - otherwise we might try to\n        // register the proposal before the meter is aware that a new round has started, and it\n        // will reject the proposal.\n        if vv.is_proposal() {\n            let vertex = vv.inner();\n            if let (Some(hash), Some(timestamp)) = (vertex.unit_hash(), vertex.timestamp()) {\n                trace!(%now, timestamp = timestamp.millis(), \"adding proposal to protocol state\");\n                self.round_success_meter.new_proposal(hash, timestamp);\n            } else {\n                error!(?vertex, \"proposal without unit hash and timestamp\");\n            }\n        }\n        self.highway.set_round_len(new_round_len);\n    }\n\n    fn add_valid_vertex(&mut self, vv: ValidVertex<C>, now: Timestamp) -> ProtocolOutcomes<C> {\n        if self.evidence_only && !vv.inner().is_evidence() {\n            error!(vertex = ?vv.inner(), \"unexpected vertex in evidence-only mode\");\n            return vec![];\n        }\n        if self.highway.has_vertex(vv.inner()) {\n            return vec![];\n        }\n        let mut outcomes = ProtocolOutcomes::new();\n        if let (Some(value), Some(unit)) = (vv.inner().value(), vv.inner().unit()) {\n            // We are adding a proposed block to the protocol state, so we might use it as an\n            // ancestor in the future. Notify the reactor so we don't re-propose those deploys.\n            let panorama = &unit.wire_unit().panorama;\n            let fork_choice = self.highway.state().fork_choice(panorama);\n            let ancestor_values = self.ancestors(fork_choice).cloned().collect();\n            let block_context = BlockContext::new(unit.wire_unit().timestamp, ancestor_values);\n            let proposed_block = ProposedBlock::new(value.clone(), block_context);\n            outcomes.push(ProtocolOutcome::HandledProposedBlock(proposed_block));\n        } else if let Some(hash) = vv.inner().unit_hash() {\n            trace!(?hash, \"adding unit to the protocol state\");\n        } else {\n            trace!(vertex=?vv.inner(), \"adding vertex to the protocol state\");\n        }\n        self.log_unit_size(vv.inner(), \"adding new unit to the protocol state\");\n        self.log_proposal(vv.inner(), \"adding valid proposal to the protocol state\");\n        let vertex_id = vv.inner().id();\n        // Check whether we should change the round length.\n        // It's important to do it before the vertex is added to the state - this way if the last\n        // round has finished, we now have all the vertices from that round in the state, and no\n        // newer ones.\n        self.calculate_round_length(&vv, now);\n        let av_effects = self.highway.add_valid_vertex(vv, now);\n        // Once vertex is added to the state, we can remove it from the cache.\n        self.pvv_cache.remove(&vertex_id);\n        outcomes.extend(self.process_av_effects(av_effects, now));\n        outcomes\n    }\n\n    /// Returns an instance of `RoundSuccessMeter` for the new era: resetting the counters where\n    /// appropriate.\n    fn next_era_round_succ_meter(&self, timestamp: Timestamp) -> RoundSuccessMeter<C> {\n        self.round_success_meter.next_era(timestamp)\n    }\n\n    /// Returns an iterator over all the values that are in parents of the given block.\n    fn ancestors<'a>(\n        &'a self,\n        mut maybe_hash: Option<&'a C::Hash>,\n    ) -> impl Iterator<Item = &'a C::ConsensusValue> {\n        iter::from_fn(move || {\n            let hash = maybe_hash.take()?;\n            let block = self.highway.state().block(hash);\n            let value = Some(&block.value);\n            maybe_hash = block.parent();\n            value\n        })\n    }\n\n    /// Prints a log statement listing the inactive and faulty validators.\n    fn log_participation(&self) {\n        let participation = participation::Participation::new(&self.highway);\n        info!(?participation, \"validator participation\");\n    }\n\n    /// Logs the vertex' (network) serialized size.\n    fn log_unit_size(&self, vertex: &Vertex<C>, log_msg: &str) {\n        if self.config.log_unit_sizes {\n            if let Some(hash) = vertex.unit_hash() {\n                let size =\n                    SerializedMessage::from_message(&HighwayMessage::NewVertex(vertex.clone()))\n                        .into_raw()\n                        .len();\n                info!(size, %hash, \"{}\", log_msg);\n            }\n        }\n    }\n\n    /// Returns whether the switch block has already been finalized.\n    fn finalized_switch_block(&self) -> bool {\n        let is_switch = |block_hash: &C::Hash| self.highway.state().is_terminal_block(block_hash);\n        self.finality_detector\n            .last_finalized()\n            .is_some_and(is_switch)\n    }\n\n    /// Request the latest state from a random peer.\n    fn handle_request_state_timer(&mut self, now: Timestamp) -> ProtocolOutcomes<C> {\n        if self.evidence_only || self.finalized_switch_block() {\n            return vec![]; // Era has ended. No further progress is expected.\n        }\n        debug!(\n            instance_id = ?self.highway.instance_id(),\n            \"requesting latest state from random peer\",\n        );\n        // Request latest state from a peer and schedule the next request.\n        let mut outcomes = self.latest_state_request();\n        if let Some(interval) = self.config.request_state_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.saturating_add(interval),\n                TIMER_ID_REQUEST_STATE,\n            ));\n        }\n        outcomes\n    }\n\n    /// Prints a log message if the vertex is a proposal unit. Otherwise returns `false`.\n    fn log_proposal(&self, vertex: &Vertex<C>, msg: &str) -> bool {\n        let (wire_unit, hash) = match vertex.unit() {\n            Some(swu) if swu.wire_unit().value.is_some() => (swu.wire_unit(), swu.hash()),\n            _ => return false, // Not a proposal.\n        };\n        let creator = if let Some(creator) = self.highway.validators().id(wire_unit.creator) {\n            creator\n        } else {\n            error!(?wire_unit, \"{}: invalid creator\", msg);\n            return true;\n        };\n        info!(\n            ?hash,\n            ?creator,\n            creator_index = wire_unit.creator.0,\n            timestamp = %wire_unit.timestamp,\n            round_exp = wire_unit.round_exp,\n            seq_number = wire_unit.seq_number,\n            \"{}\", msg\n        );\n        true\n    }\n\n    // Logs the details about the received vertex.\n    fn log_received_vertex(&self, vertex: &Vertex<C>) {\n        match vertex {\n            Vertex::Unit(swu) => {\n                let creator = if let Some(creator) = vertex\n                    .creator()\n                    .and_then(|vid| self.highway.validators().id(vid))\n                {\n                    creator\n                } else {\n                    error!(?vertex, \"invalid creator\");\n                    return;\n                };\n\n                let wire_unit = swu.wire_unit();\n                let hash = swu.hash();\n\n                if vertex.is_proposal() {\n                    info!(\n                        ?hash,\n                        ?creator,\n                        creator_index = wire_unit.creator.0,\n                        timestamp = %wire_unit.timestamp,\n                        round_exp = wire_unit.round_exp,\n                        seq_number = wire_unit.seq_number,\n                        \"received a proposal\"\n                    );\n                } else {\n                    trace!(\n                        ?hash,\n                        ?creator,\n                        creator_index = wire_unit.creator.0,\n                        timestamp = %wire_unit.timestamp,\n                        round_exp = wire_unit.round_exp,\n                        seq_number = wire_unit.seq_number,\n                        \"received a non-proposal unit\"\n                    );\n                };\n            }\n            Vertex::Evidence(evidence) => trace!(?evidence, \"received an evidence\"),\n            Vertex::Endorsements(endorsement) => trace!(?endorsement, \"received an endorsement\"),\n            Vertex::Ping(ping) => trace!(?ping, \"received ping\"),\n        }\n    }\n\n    /// Prevalidates the vertex but checks the cache for previously validated vertices.\n    /// Avoids multiple validation of the same vertex.\n    fn pre_validate_vertex(\n        &mut self,\n        v: Vertex<C>,\n    ) -> Result<PreValidatedVertex<C>, (Vertex<C>, VertexError)> {\n        let id = v.id();\n        if let Some(prev_pvv) = self.pvv_cache.get(&id) {\n            return Ok(prev_pvv.clone());\n        }\n        let pvv = self.highway.pre_validate_vertex(v)?;\n        self.pvv_cache.insert(id, pvv.clone());\n        Ok(pvv)\n    }\n\n    /// Creates a message to send our panorama to a random peer.\n    fn latest_state_request(&self) -> ProtocolOutcomes<C> {\n        let request: HighwayMessage<C> = HighwayMessage::LatestStateRequest(\n            IndexPanorama::from_panorama(self.highway.state().panorama(), self.highway.state()),\n        );\n        vec![ProtocolOutcome::CreatedMessageToRandomPeer(\n            SerializedMessage::from_message(&request),\n        )]\n    }\n\n    /// Creates a batch of dependency requests if the peer has more units by the validator `vidx`\n    /// than we do; otherwise sends a batch of missing units to the peer.\n    fn batch_request(\n        &self,\n        rng: &mut NodeRng,\n        vid: ValidatorIndex,\n        our_next_seq: u64,\n        their_next_seq: u64,\n    ) -> Vec<HighwayMessage<C>> {\n        let state = self.highway.state();\n        if our_next_seq == their_next_seq {\n            return vec![];\n        }\n        if our_next_seq < their_next_seq {\n            // We're behind. Request missing vertices.\n            (our_next_seq..their_next_seq)\n                .take(self.config.max_request_batch_size)\n                .map(|unit_seq_number| {\n                    let uuid = rng.next_u64();\n                    debug!(?uuid, ?vid, ?unit_seq_number, \"requesting dependency\");\n                    HighwayMessage::RequestDependencyByHeight {\n                        uuid,\n                        vid,\n                        unit_seq_number,\n                    }\n                })\n                .collect()\n        } else {\n            // We're ahead.\n            match state.panorama().get(vid) {\n                None => {\n                    warn!(?vid, \"received a request for non-existing validator\");\n                    vec![]\n                }\n                Some(observation) => match observation {\n                    Observation::None => {\n                        warn!(\n                            ?vid,\n                            our_next_seq,\n                            ?observation,\n                            \"expected unit for validator but found none\"\n                        );\n                        vec![]\n                    }\n                    Observation::Faulty => {\n                        let ev = match state.maybe_evidence(vid) {\n                            Some(ev) => ev.clone(),\n                            None => {\n                                warn!(\n                                    ?vid, instance_id=?self.highway.instance_id(),\n                                    \"panorama marked validator as faulty but no evidence was found\"\n                                );\n                                return vec![];\n                            }\n                        };\n                        vec![HighwayMessage::NewVertex(Vertex::Evidence(ev))]\n                    }\n                    Observation::Correct(hash) => (their_next_seq..our_next_seq)\n                        .take(self.config.max_request_batch_size)\n                        .filter_map(|seq_num| {\n                            let unit = state.find_in_swimlane(hash, seq_num).unwrap();\n                            state\n                                .wire_unit(unit, *self.highway.instance_id())\n                                .map(|swu| HighwayMessage::NewVertex(Vertex::Unit(swu)))\n                        })\n                        .collect(),\n                },\n            }\n        }\n    }\n\n    /// Grant read-only access to the internal `Highway` instance.\n    #[inline]\n    pub(crate) fn highway(&self) -> &Highway<C> {\n        &self.highway\n    }\n}\n\n#[allow(clippy::arithmetic_side_effects)]\nmod relaxed {\n    // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the\n    // module-wide `clippy::arithmetic_side_effects` lint.\n\n    use datasize::DataSize;\n    use serde::{Deserialize, Serialize};\n    use strum::EnumDiscriminants;\n\n    use crate::components::consensus::{\n        highway_core::{\n            highway::{Dependency, Vertex},\n            state::IndexPanorama,\n        },\n        traits::{ConsensusNetworkMessage, Context},\n        utils::ValidatorIndex,\n    };\n\n    #[derive(\n        DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, EnumDiscriminants,\n    )]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub(crate) enum HighwayMessage<C>\n    where\n        C: Context,\n    {\n        NewVertex(Vertex<C>),\n        // A dependency request. u64 is a random UUID identifying the request.\n        RequestDependency(u64, Dependency<C>),\n        RequestDependencyByHeight {\n            uuid: u64,\n            vid: ValidatorIndex,\n            unit_seq_number: u64,\n        },\n        LatestStateRequest(IndexPanorama),\n    }\n\n    impl<C: Context> ConsensusNetworkMessage for HighwayMessage<C> {}\n}\npub(crate) use relaxed::{HighwayMessage, HighwayMessageDiscriminants};\n\nmod specimen_support {\n    use crate::{\n        components::consensus::ClContext,\n        utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator},\n    };\n\n    use super::{HighwayMessage, HighwayMessageDiscriminants};\n\n    impl LargestSpecimen for HighwayMessage<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, HighwayMessageDiscriminants, _, _>(estimator, |variant| {\n                match variant {\n                    HighwayMessageDiscriminants::NewVertex => HighwayMessage::NewVertex(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                    HighwayMessageDiscriminants::RequestDependency => {\n                        HighwayMessage::RequestDependency(\n                            LargestSpecimen::largest_specimen(estimator, cache),\n                            LargestSpecimen::largest_specimen(estimator, cache),\n                        )\n                    }\n                    HighwayMessageDiscriminants::RequestDependencyByHeight => {\n                        HighwayMessage::RequestDependencyByHeight {\n                            uuid: LargestSpecimen::largest_specimen(estimator, cache),\n                            vid: LargestSpecimen::largest_specimen(estimator, cache),\n                            unit_seq_number: LargestSpecimen::largest_specimen(estimator, cache),\n                        }\n                    }\n                    HighwayMessageDiscriminants::LatestStateRequest => {\n                        HighwayMessage::LatestStateRequest(LargestSpecimen::largest_specimen(\n                            estimator, cache,\n                        ))\n                    }\n                }\n            })\n        }\n    }\n}\n\nimpl<C> ConsensusProtocol<C> for HighwayProtocol<C>\nwhere\n    C: Context + 'static,\n{\n    fn handle_message(\n        &mut self,\n        rng: &mut NodeRng,\n        sender: NodeId,\n        msg: SerializedMessage,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        match msg.deserialize_incoming() {\n            Err(err) => {\n                warn!(?err, \"could not deserialize highway message\");\n                vec![ProtocolOutcome::Disconnect(sender)]\n            }\n            Ok(HighwayMessage::NewVertex(v))\n                if self.highway.has_vertex(&v) || (self.evidence_only && !v.is_evidence()) =>\n            {\n                trace!(\n                    has_vertex = self.highway.has_vertex(&v),\n                    is_evidence = v.is_evidence(),\n                    evidence_only = %self.evidence_only,\n                    \"received an irrelevant vertex\"\n                );\n                vec![]\n            }\n            Ok(HighwayMessage::NewVertex(v)) => {\n                let v_id = v.id();\n                // If we already have that vertex, do not process it.\n                if self.highway.has_dependency(&v_id) {\n                    return vec![];\n                }\n                let pvv = match self.pre_validate_vertex(v) {\n                    Ok(pvv) => pvv,\n                    Err((_, err)) => {\n                        // drop the vertices that might have depended on this one\n                        let faulty_senders = self.synchronizer.invalid_vertices(vec![v_id]);\n                        warn!(?err, ?sender, ?faulty_senders, \"invalid incoming message\");\n                        return iter::once(ProtocolOutcome::Disconnect(sender))\n                            .chain(faulty_senders.into_iter().map(ProtocolOutcome::Disconnect))\n                            .collect();\n                    }\n                };\n                // Keep track of whether the prevalidated vertex was from an equivocator\n                let is_faulty = match pvv.inner().creator() {\n                    Some(creator) => self.highway.state().is_faulty(creator),\n                    None => false,\n                };\n\n                if is_faulty && !self.synchronizer.is_dependency(&pvv.inner().id()) {\n                    trace!(\"received a vertex from a faulty validator; dropping\");\n                    return vec![];\n                }\n\n                match pvv.timestamp() {\n                    Some(timestamp)\n                        if timestamp > now.saturating_add(self.config.pending_vertex_timeout) =>\n                    {\n                        trace!(\"received a vertex with a timestamp far in the future; dropping\");\n                        vec![]\n                    }\n                    Some(timestamp) if timestamp > now => {\n                        // If it's not from an equivocator and from the future, add to queue\n                        trace!(\"received a vertex from the future; storing for later\");\n                        self.synchronizer\n                            .store_vertex_for_addition_later(timestamp, now, sender, pvv);\n                        let timer_id = TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP;\n                        vec![ProtocolOutcome::ScheduleTimer(timestamp, timer_id)]\n                    }\n                    _ => {\n                        // If it's not from an equivocator or it is a transitive dependency, add the\n                        // vertex\n                        self.log_received_vertex(pvv.inner());\n                        self.synchronizer.schedule_add_vertex(sender, pvv, now)\n                    }\n                }\n            }\n            Ok(HighwayMessage::RequestDependency(uuid, dep)) => {\n                trace!(?uuid, dependency=?dep, \"received a request for a dependency\");\n                match self.highway.get_dependency(&dep) {\n                    GetDepOutcome::None => {\n                        info!(?dep, peer_id=?sender, \"requested dependency doesn't exist\");\n                        vec![]\n                    }\n                    GetDepOutcome::Evidence(vid) => {\n                        vec![ProtocolOutcome::SendEvidence(sender, vid)]\n                    }\n                    GetDepOutcome::Vertex(vv) => vec![ProtocolOutcome::CreatedTargetedMessage(\n                        SerializedMessage::from_message(&HighwayMessage::NewVertex(vv.into())),\n                        sender,\n                    )],\n                }\n            }\n            Ok(HighwayMessage::RequestDependencyByHeight {\n                uuid,\n                vid,\n                unit_seq_number,\n            }) => {\n                debug!(\n                    ?uuid,\n                    ?vid,\n                    ?unit_seq_number,\n                    \"received a request for a dependency\"\n                );\n                match self.highway.get_dependency_by_index(vid, unit_seq_number) {\n                    GetDepOutcome::None => {\n                        info!(\n                            ?vid,\n                            ?unit_seq_number,\n                            ?sender,\n                            \"requested dependency doesn't exist\"\n                        );\n                        vec![]\n                    }\n                    GetDepOutcome::Evidence(vid) => {\n                        vec![ProtocolOutcome::SendEvidence(sender, vid)]\n                    }\n                    GetDepOutcome::Vertex(vv) => {\n                        vec![ProtocolOutcome::CreatedTargetedMessage(\n                            SerializedMessage::from_message(&HighwayMessage::NewVertex(vv.into())),\n                            sender,\n                        )]\n                    }\n                }\n            }\n            Ok(HighwayMessage::LatestStateRequest(their_index_panorama)) => {\n                trace!(\"received a request for the latest state\");\n                let state = self.highway.state();\n\n                let create_message = |((vid, our_obs), their_obs): (\n                    (ValidatorIndex, &IndexObservation),\n                    &IndexObservation,\n                )| {\n                    match (*our_obs, *their_obs) {\n                        (our_obs, their_obs) if our_obs == their_obs => vec![],\n\n                        (IndexObservation::Faulty, _) => state\n                            .maybe_evidence(vid)\n                            .map(|evidence| {\n                                HighwayMessage::NewVertex(Vertex::Evidence(evidence.clone()))\n                            })\n                            .into_iter()\n                            .collect(),\n\n                        (_, IndexObservation::Faulty) => {\n                            let dependency = Dependency::Evidence(vid);\n                            let uuid = rng.next_u64();\n                            debug!(?uuid, \"requesting evidence\");\n                            vec![HighwayMessage::RequestDependency(uuid, dependency)]\n                        }\n\n                        (\n                            IndexObservation::NextSeq(our_next_seq),\n                            IndexObservation::NextSeq(their_next_seq),\n                        ) => self.batch_request(rng, vid, our_next_seq, their_next_seq),\n                    }\n                };\n\n                IndexPanorama::from_panorama(state.panorama(), state)\n                    .enumerate()\n                    .zip(&their_index_panorama)\n                    .map(create_message)\n                    .flat_map(|msgs| {\n                        msgs.into_iter().map(|msg| {\n                            ProtocolOutcome::CreatedTargetedMessage(\n                                SerializedMessage::from_message(&msg),\n                                sender,\n                            )\n                        })\n                    })\n                    .collect()\n            }\n        }\n    }\n\n    fn handle_request_message(\n        &mut self,\n        _rng: &mut NodeRng,\n        sender: NodeId,\n        _msg: SerializedMessage,\n        _now: Timestamp,\n    ) -> (ProtocolOutcomes<C>, Option<SerializedMessage>) {\n        info!(?sender, \"invalid incoming request\");\n        (vec![ProtocolOutcome::Disconnect(sender)], None)\n    }\n\n    fn handle_timer(\n        &mut self,\n        timestamp: Timestamp,\n        _now: Timestamp,\n        timer_id: TimerId,\n        _rng: &mut NodeRng,\n    ) -> ProtocolOutcomes<C> {\n        match timer_id {\n            TIMER_ID_ACTIVE_VALIDATOR => {\n                let effects = self.highway.handle_timer(timestamp);\n                self.process_av_effects(effects, timestamp)\n            }\n            TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP => {\n                self.synchronizer.add_past_due_stored_vertices(timestamp)\n            }\n            TIMER_ID_PURGE_VERTICES => {\n                let oldest = timestamp.saturating_sub(self.config.pending_vertex_timeout);\n                self.synchronizer.purge_vertices(oldest);\n                self.pvv_cache.clear();\n                let next_time = timestamp.saturating_add(self.config.pending_vertex_timeout);\n                vec![ProtocolOutcome::ScheduleTimer(next_time, timer_id)]\n            }\n            TIMER_ID_LOG_PARTICIPATION => match self.config.log_participation_interval {\n                Some(interval) if !self.evidence_only && !self.finalized_switch_block() => {\n                    self.log_participation();\n                    vec![ProtocolOutcome::ScheduleTimer(\n                        timestamp.saturating_add(interval),\n                        timer_id,\n                    )]\n                }\n                _ => vec![],\n            },\n            TIMER_ID_REQUEST_STATE => self.handle_request_state_timer(timestamp),\n            TIMER_ID_SYNCHRONIZER_LOG => {\n                self.synchronizer.log_len();\n                match self.config.log_synchronizer_interval {\n                    Some(interval) if !self.finalized_switch_block() => {\n                        vec![ProtocolOutcome::ScheduleTimer(\n                            timestamp.saturating_add(interval),\n                            timer_id,\n                        )]\n                    }\n                    _ => vec![],\n                }\n            }\n            _ => unreachable!(\"unexpected timer ID\"),\n        }\n    }\n\n    fn handle_is_current(&self, now: Timestamp) -> ProtocolOutcomes<C> {\n        // Request latest protocol state of the current era.\n        let mut outcomes = self.latest_state_request();\n        // If configured, schedule periodic latest state requests.\n        if let Some(interval) = self.config.request_state_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.saturating_add(interval),\n                TIMER_ID_REQUEST_STATE,\n            ));\n        }\n        outcomes\n    }\n\n    fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes<C> {\n        match action_id {\n            ACTION_ID_VERTEX => self.add_vertex(now),\n            _ => unreachable!(\"unexpected action ID\"),\n        }\n    }\n\n    fn propose(&mut self, proposed_block: ProposedBlock<C>, now: Timestamp) -> ProtocolOutcomes<C> {\n        let (value, block_context) = proposed_block.destructure();\n        let effects = self.highway.propose(value, block_context);\n        self.process_av_effects(effects, now)\n    }\n\n    fn resolve_validity(\n        &mut self,\n        proposed_block: ProposedBlock<C>,\n        valid: bool,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        if valid {\n            let mut outcomes = self\n                .pending_values\n                .remove(&proposed_block)\n                .into_iter()\n                .flatten()\n                .flat_map(|(vv, _)| self.add_valid_vertex(vv, now))\n                .collect_vec();\n            outcomes.extend(self.synchronizer.remove_satisfied_deps(&self.highway));\n            outcomes.extend(self.detect_finality());\n            outcomes\n        } else {\n            // TODO: Report proposer as faulty?\n            // Drop vertices dependent on the invalid value.\n            let dropped_vertices = self.pending_values.remove(&proposed_block);\n            warn!(?proposed_block, ?dropped_vertices, \"proposal is invalid\");\n            let dropped_vertex_ids = dropped_vertices\n                .into_iter()\n                .flatten()\n                .map(|(vv, _)| {\n                    self.log_proposal(vv.inner(), \"dropping invalid proposal\");\n                    vv.inner().id()\n                })\n                .collect();\n            // recursively remove vertices depending on the dropped ones\n            let _faulty_senders = self.synchronizer.invalid_vertices(dropped_vertex_ids);\n            // We don't disconnect from the faulty senders here: The block validator considers the\n            // value \"invalid\" even if it just couldn't download the deploys, which could just be\n            // because the original sender went offline.\n            vec![]\n        }\n    }\n\n    fn activate_validator(\n        &mut self,\n        our_id: C::ValidatorId,\n        secret: C::ValidatorSecret,\n        now: Timestamp,\n        unit_hash_file: Option<PathBuf>,\n    ) -> ProtocolOutcomes<C> {\n        let ftt = self.finality_detector.fault_tolerance_threshold();\n        let av_effects = self\n            .highway\n            .activate_validator(our_id, secret, now, unit_hash_file, ftt);\n        self.process_av_effects(av_effects, now)\n    }\n\n    fn deactivate_validator(&mut self) {\n        self.highway.deactivate_validator()\n    }\n\n    fn set_evidence_only(&mut self) {\n        // TODO: We could also drop the finality detector and round success meter here. Maybe make\n        // HighwayProtocol an enum with an EvidenceOnly variant?\n        self.pending_values.clear();\n        self.synchronizer.retain_evidence_only();\n        self.highway.retain_evidence_only();\n        self.evidence_only = true;\n    }\n\n    fn has_evidence(&self, vid: &C::ValidatorId) -> bool {\n        self.highway.has_evidence(vid)\n    }\n\n    fn mark_faulty(&mut self, vid: &C::ValidatorId) {\n        self.highway.mark_faulty(vid);\n    }\n\n    fn send_evidence(&self, sender: NodeId, vid: &C::ValidatorId) -> ProtocolOutcomes<C> {\n        self.highway\n            .validators()\n            .get_index(vid)\n            .and_then(\n                move |vidx| match self.highway.get_dependency(&Dependency::Evidence(vidx)) {\n                    GetDepOutcome::None | GetDepOutcome::Evidence(_) => None,\n                    GetDepOutcome::Vertex(vv) => {\n                        let msg = HighwayMessage::NewVertex(vv.into());\n                        Some(ProtocolOutcome::CreatedTargetedMessage(\n                            SerializedMessage::from_message(&msg),\n                            sender,\n                        ))\n                    }\n                },\n            )\n            .into_iter()\n            .collect()\n    }\n\n    /// Sets the pause status: While paused we don't create any new units, just pings.\n    fn set_paused(&mut self, paused: bool, _now: Timestamp) -> ProtocolOutcomes<C> {\n        self.highway.set_paused(paused);\n        vec![]\n    }\n\n    fn validators_with_evidence(&self) -> Vec<&C::ValidatorId> {\n        self.highway.validators_with_evidence().collect()\n    }\n\n    fn as_any(&self) -> &dyn Any {\n        self\n    }\n\n    fn is_active(&self) -> bool {\n        self.highway.is_active()\n    }\n\n    fn instance_id(&self) -> &C::InstanceId {\n        self.highway.instance_id()\n    }\n\n    fn next_round_length(&self) -> Option<TimeDiff> {\n        self.highway.next_round_length()\n    }\n}\n\n/// Maximum possible rounds in one era.\n///\n/// It is the maximum of:\n/// - The era duration divided by the minimum round length, that is the maximum number of blocks\n///   that can fit within the duration of one era,\n/// - The minimum era height, which is the minimum number of blocks for an era to be considered\n///   complete.\npub fn max_rounds_per_era(\n    minimum_era_height: u64,\n    era_duration: TimeDiff,\n    minimum_round_length: TimeDiff,\n) -> u64 {\n    #[allow(clippy::arithmetic_side_effects)] // minimum_round_length is guaranteed to be > 0.\n    minimum_era_height.max((era_duration.saturating_add(1)) / minimum_round_length)\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/config.rs",
    "content": "use serde::{Deserialize, Serialize};\n\nuse datasize::DataSize;\n\nuse casper_types::{serde_option_time_diff, TimeDiff};\n\n/// `Zug`-specific configuration.\n/// *Note*: This is *not* protocol configuration that has to be the same on all nodes.\n#[derive(DataSize, Debug, Clone, Serialize, Deserialize)]\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Request the latest protocol state from a random peer periodically, with this interval. 0\n    /// means disabled.\n    #[serde(with = \"serde_option_time_diff\")]\n    pub sync_state_interval: Option<TimeDiff>,\n    /// Log inactive or faulty validators periodically, with this interval. 0 means disabled.\n    #[serde(with = \"serde_option_time_diff\")]\n    pub log_participation_interval: Option<TimeDiff>,\n    /// The minimal and initial timeout for a proposal.\n    pub proposal_timeout: TimeDiff,\n    /// The additional proposal delay that is still considered fast enough, in percent. This should\n    /// take into account variables like empty vs. full blocks, network traffic etc.\n    /// E.g. if proposing a full block while under heavy load takes 50% longer than an empty one\n    /// while idle this should be at least 50, meaning that the timeout is 50% longer than\n    /// necessary for a quorum of recent proposals, approximately.\n    pub proposal_grace_period: u16,\n    /// The average number of rounds after which the proposal timeout adapts by a factor of 2.\n    /// Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve.\n    pub proposal_timeout_inertia: u16,\n    /// Incoming proposals whose timestamps lie further in the future are rejected.\n    pub clock_tolerance: TimeDiff,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            sync_state_interval: Some(\"1sec\".parse().unwrap()),\n            log_participation_interval: Some(\"10sec\".parse().unwrap()),\n            proposal_timeout: \"1sec\".parse().unwrap(),\n            clock_tolerance: \"1sec\".parse().unwrap(),\n            proposal_grace_period: 200,\n            proposal_timeout_inertia: 10,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/des_testing.rs",
    "content": "#![allow(clippy::arithmetic_side_effects)] // In tests, overflows panic anyway.\n\nuse std::{\n    collections::{hash_map::DefaultHasher, HashMap, VecDeque},\n    fmt::{self, Debug, Display, Formatter},\n    hash::{Hash, Hasher},\n};\n\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse itertools::Itertools;\nuse rand::{prelude::IteratorRandom, Rng};\nuse serde::{Deserialize, Serialize};\nuse tracing::{trace, warn};\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse super::{\n    config::Config,\n    message::{Content, Message as ZugProtocolMessage, SignedMessage},\n    Params, Zug,\n};\nuse crate::{\n    components::consensus::{\n        consensus_protocol::{\n            ConsensusProtocol, FinalizedBlock, ProposedBlock, ProtocolOutcome, ProtocolOutcomes,\n        },\n        tests::{\n            consensus_des_testing::{\n                DeliverySchedule, Fault as DesFault, Message, Node, Target, TargetedMessage,\n                ValidatorId, VirtualNet,\n            },\n            queue::QueueEntry,\n        },\n        traits::{ConsensusValueT, Context, ValidatorSecret},\n        utils::{Validators, Weight},\n        ActionId, BlockContext, SerializedMessage, TimerId,\n    },\n    types::NodeId,\n    NodeRng,\n};\n\n#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, DataSize, Default)]\npub(crate) struct ConsensusValue(Vec<u8>);\n\nimpl ConsensusValueT for ConsensusValue {\n    fn needs_validation(&self) -> bool {\n        !self.0.is_empty()\n    }\n}\n\nimpl Display for ConsensusValue {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0))\n    }\n}\n\nconst TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 12);\nconst TEST_END_HEIGHT: u64 = 100000;\npub(crate) const TEST_INSTANCE_ID: u64 = 42;\n\n#[derive(Debug, Clone, Eq, PartialEq, Hash)]\nenum ZugMessage {\n    GossipMessage(SerializedMessage),\n    TargetedMessage(SerializedMessage, NodeId),\n    MessageToRandomPeer(SerializedMessage),\n    RequestToRandomPeer(SerializedMessage),\n    Timer(Timestamp, TimerId),\n    QueueAction(ActionId),\n    RequestNewBlock(BlockContext<TestContext>),\n    FinalizedBlock(FinalizedBlock<TestContext>),\n    ValidateConsensusValue(NodeId, ProposedBlock<TestContext>),\n    NewEvidence(ValidatorId),\n    SendEvidence(NodeId, ValidatorId),\n    WeAreFaulty,\n    DoppelgangerDetected,\n    FttExceeded,\n    Disconnect(NodeId),\n    HandledProposedBlock(ProposedBlock<TestContext>),\n}\n\nimpl ZugMessage {\n    fn is_signed_gossip_message(&self) -> bool {\n        if let ZugMessage::GossipMessage(raw) = self {\n            let deserialized: super::Message<TestContext> =\n                raw.deserialize_incoming().expect(\"message not valid\");\n            matches!(deserialized, ZugProtocolMessage::Signed(_))\n        } else {\n            false\n        }\n    }\n\n    fn is_proposal(&self) -> bool {\n        if let ZugMessage::GossipMessage(raw) = self {\n            let deserialized: super::Message<TestContext> =\n                raw.deserialize_incoming().expect(\"message not valid\");\n            matches!(deserialized, ZugProtocolMessage::Proposal { .. })\n        } else {\n            false\n        }\n    }\n}\n\nimpl PartialOrd for ZugMessage {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for ZugMessage {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        let mut hasher0 = DefaultHasher::new();\n        let mut hasher1 = DefaultHasher::new();\n        self.hash(&mut hasher0);\n        other.hash(&mut hasher1);\n        hasher0.finish().cmp(&hasher1.finish())\n    }\n}\n\nimpl From<ProtocolOutcome<TestContext>> for ZugMessage {\n    fn from(outcome: ProtocolOutcome<TestContext>) -> ZugMessage {\n        match outcome {\n            ProtocolOutcome::CreatedGossipMessage(msg) => ZugMessage::GossipMessage(msg),\n            ProtocolOutcome::CreatedTargetedMessage(msg, target) => {\n                ZugMessage::TargetedMessage(msg, target)\n            }\n            ProtocolOutcome::CreatedMessageToRandomPeer(msg) => {\n                ZugMessage::MessageToRandomPeer(msg)\n            }\n            ProtocolOutcome::CreatedRequestToRandomValidator(request) => {\n                ZugMessage::RequestToRandomPeer(request)\n            }\n            ProtocolOutcome::ScheduleTimer(timestamp, timer_id) => {\n                ZugMessage::Timer(timestamp, timer_id)\n            }\n            ProtocolOutcome::QueueAction(action_id) => ZugMessage::QueueAction(action_id),\n            ProtocolOutcome::CreateNewBlock(block_ctx, _expiry) => {\n                ZugMessage::RequestNewBlock(block_ctx)\n            }\n            ProtocolOutcome::FinalizedBlock(finalized_block) => {\n                ZugMessage::FinalizedBlock(finalized_block)\n            }\n            ProtocolOutcome::ValidateConsensusValue {\n                sender,\n                proposed_block,\n            } => ZugMessage::ValidateConsensusValue(sender, proposed_block),\n            ProtocolOutcome::NewEvidence(vid) => ZugMessage::NewEvidence(vid),\n            ProtocolOutcome::SendEvidence(target, vid) => ZugMessage::SendEvidence(target, vid),\n            ProtocolOutcome::WeAreFaulty => ZugMessage::WeAreFaulty,\n            ProtocolOutcome::DoppelgangerDetected => ZugMessage::DoppelgangerDetected,\n            ProtocolOutcome::FttExceeded => ZugMessage::FttExceeded,\n            ProtocolOutcome::Disconnect(sender) => ZugMessage::Disconnect(sender),\n            ProtocolOutcome::HandledProposedBlock(proposed_block) => {\n                ZugMessage::HandledProposedBlock(proposed_block)\n            }\n        }\n    }\n}\n\n#[derive(Debug, Eq, PartialEq)]\npub(crate) enum TestRunError {\n    /// VirtualNet was missing a validator when it was expected to exist.\n    MissingValidator(ValidatorId),\n    /// No more messages in the message queue.\n    NoMessages,\n}\n\nimpl Display for TestRunError {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            TestRunError::NoMessages => write!(\n                f,\n                \"Test finished prematurely due to lack of messages in the queue\"\n            ),\n            TestRunError::MissingValidator(id) => {\n                write!(f, \"Virtual net is missing validator {:?}.\", id)\n            }\n        }\n    }\n}\n\nenum Distribution {\n    Uniform,\n}\n\nimpl Distribution {\n    /// Returns vector of `count` elements of random values between `lower` and `upper`.\n    fn gen_range_vec(&self, rng: &mut NodeRng, lower: u64, upper: u64, count: u8) -> Vec<u64> {\n        match self {\n            Distribution::Uniform => (0..count).map(|_| rng.gen_range(lower..upper)).collect(),\n        }\n    }\n}\n\ntrait DeliveryStrategy {\n    fn gen_delay(\n        &mut self,\n        rng: &mut NodeRng,\n        message: &ZugMessage,\n        distribution: &Distribution,\n        base_delivery_timestamp: Timestamp,\n    ) -> DeliverySchedule;\n}\n\nstruct ZugValidator {\n    zug: Zug<TestContext>,\n    fault: Option<DesFault>,\n}\n\nimpl ZugValidator {\n    fn new(zug: Zug<TestContext>, fault: Option<DesFault>) -> Self {\n        ZugValidator { zug, fault }\n    }\n\n    fn zug_mut(&mut self) -> &mut Zug<TestContext> {\n        &mut self.zug\n    }\n\n    fn zug(&self) -> &Zug<TestContext> {\n        &self.zug\n    }\n\n    fn post_hook(&mut self, delivery_time: Timestamp, msg: ZugMessage) -> Vec<ZugMessage> {\n        match self.fault.as_ref() {\n            Some(DesFault::TemporarilyMute { from, till })\n                if *from <= delivery_time && delivery_time <= *till =>\n            {\n                // For mute validators we drop the generated messages to be sent, if the delivery\n                // time is in the interval in which they are muted.\n                match msg {\n                    ZugMessage::GossipMessage(_)\n                    | ZugMessage::TargetedMessage(_, _)\n                    | ZugMessage::MessageToRandomPeer(_)\n                    | ZugMessage::RequestToRandomPeer(_)\n                    | ZugMessage::SendEvidence(_, _) => {\n                        warn!(\"Validator is mute – won't send messages in response\");\n                        vec![]\n                    }\n                    ZugMessage::Timer(_, _)\n                    | ZugMessage::QueueAction(_)\n                    | ZugMessage::RequestNewBlock(_)\n                    | ZugMessage::FinalizedBlock(_)\n                    | ZugMessage::ValidateConsensusValue(_, _)\n                    | ZugMessage::NewEvidence(_)\n                    | ZugMessage::Disconnect(_)\n                    | ZugMessage::HandledProposedBlock(_) => vec![msg],\n                    ZugMessage::WeAreFaulty => {\n                        panic!(\"validator equivocated unexpectedly\");\n                    }\n                    ZugMessage::DoppelgangerDetected => {\n                        panic!(\"unexpected doppelganger detected\");\n                    }\n                    ZugMessage::FttExceeded => {\n                        panic!(\"unexpected FTT exceeded\");\n                    }\n                }\n            }\n            Some(DesFault::PermanentlyMute) => {\n                // For permanently mute validators we drop the generated messages to be sent\n                match msg {\n                    ZugMessage::GossipMessage(_)\n                    | ZugMessage::TargetedMessage(_, _)\n                    | ZugMessage::MessageToRandomPeer(_)\n                    | ZugMessage::RequestToRandomPeer(_)\n                    | ZugMessage::SendEvidence(_, _) => {\n                        warn!(\"Validator is mute – won't send messages in response\");\n                        vec![]\n                    }\n                    ZugMessage::Timer(_, _)\n                    | ZugMessage::QueueAction(_)\n                    | ZugMessage::RequestNewBlock(_)\n                    | ZugMessage::FinalizedBlock(_)\n                    | ZugMessage::ValidateConsensusValue(_, _)\n                    | ZugMessage::NewEvidence(_)\n                    | ZugMessage::Disconnect(_)\n                    | ZugMessage::HandledProposedBlock(_) => vec![msg],\n                    ZugMessage::WeAreFaulty => {\n                        panic!(\"validator equivocated unexpectedly\");\n                    }\n                    ZugMessage::DoppelgangerDetected => {\n                        panic!(\"unexpected doppelganger detected\");\n                    }\n                    ZugMessage::FttExceeded => {\n                        panic!(\"unexpected FTT exceeded\");\n                    }\n                }\n            }\n            None | Some(DesFault::TemporarilyMute { .. }) => {\n                // Honest validator.\n                match &msg {\n                    ZugMessage::WeAreFaulty => {\n                        panic!(\"validator equivocated unexpectedly\");\n                    }\n                    ZugMessage::DoppelgangerDetected => {\n                        panic!(\"unexpected doppelganger detected\");\n                    }\n                    ZugMessage::FttExceeded => {\n                        panic!(\"unexpected FTT exceeded\");\n                    }\n                    _ => vec![msg],\n                }\n            }\n            Some(DesFault::Equivocate) => match msg {\n                ZugMessage::GossipMessage(ref serialized_msg) => {\n                    match serialized_msg.deserialize_incoming::<ZugProtocolMessage<TestContext>>() {\n                        Ok(ZugProtocolMessage::Signed(\n                            signed_msg @ SignedMessage { content, .. },\n                        )) => match content {\n                            Content::Echo(hash) => {\n                                let conflicting_message = SignedMessage::sign_new(\n                                    signed_msg.round_id,\n                                    signed_msg.instance_id,\n                                    Content::<TestContext>::Echo(HashWrapper(\n                                        hash.0.wrapping_add(1),\n                                    )),\n                                    signed_msg.validator_idx,\n                                    &TestSecret(signed_msg.validator_idx.0.into()),\n                                );\n                                vec![\n                                    ZugMessage::GossipMessage(SerializedMessage::from_message(\n                                        &ZugProtocolMessage::Signed(conflicting_message),\n                                    )),\n                                    msg,\n                                ]\n                            }\n                            Content::Vote(vote) => {\n                                let conflicting_message = SignedMessage::sign_new(\n                                    signed_msg.round_id,\n                                    signed_msg.instance_id,\n                                    Content::<TestContext>::Vote(!vote),\n                                    signed_msg.validator_idx,\n                                    &TestSecret(signed_msg.validator_idx.0.into()),\n                                );\n                                vec![\n                                    ZugMessage::GossipMessage(SerializedMessage::from_message(\n                                        &ZugProtocolMessage::Signed(conflicting_message),\n                                    )),\n                                    msg,\n                                ]\n                            }\n                        },\n                        _ => vec![msg],\n                    }\n                }\n                _ => vec![msg],\n            },\n        }\n    }\n}\n\ntype ZugNode = Node<ConsensusValue, ZugMessage, ZugValidator>;\n\ntype ZugNet = VirtualNet<ConsensusValue, ZugMessage, ZugValidator>;\n\nstruct ZugTestHarness<DS>\nwhere\n    DS: DeliveryStrategy,\n{\n    virtual_net: ZugNet,\n    /// Consensus values to be proposed.\n    /// Order of values in the vector defines the order in which they will be proposed.\n    consensus_values: VecDeque<ConsensusValue>,\n    /// A strategy to pseudo randomly change the message delivery times.\n    delivery_time_strategy: DS,\n    /// Distribution of delivery times.\n    delivery_time_distribution: Distribution,\n    /// Mapping of validator IDs to node IDs\n    vid_to_node_id: HashMap<ValidatorId, NodeId>,\n    /// Mapping of node IDs to validator IDs\n    node_id_to_vid: HashMap<NodeId, ValidatorId>,\n}\n\ntype TestResult<T> = Result<T, TestRunError>;\n\nimpl<DS> ZugTestHarness<DS>\nwhere\n    DS: DeliveryStrategy,\n{\n    /// Advance the test by one message.\n    ///\n    /// Pops one message from the message queue (if there are any)\n    /// and pass it to the recipient validator for execution.\n    /// Messages returned from the execution are scheduled for later delivery.\n    pub(crate) fn crank(&mut self, rng: &mut NodeRng) -> TestResult<()> {\n        let QueueEntry {\n            delivery_time,\n            recipient,\n            message,\n        } = self\n            .virtual_net\n            .pop_message()\n            .ok_or(TestRunError::NoMessages)?;\n\n        let span = tracing::trace_span!(\"crank\", validator = %recipient);\n        let _enter = span.enter();\n        trace!(\n            \"Processing: tick {}, sender validator={}, payload {:?}\",\n            delivery_time,\n            message.sender,\n            message.payload(),\n        );\n\n        let messages = self.process_message(rng, recipient, message, delivery_time)?;\n\n        let targeted_messages = messages\n            .into_iter()\n            .filter_map(|zm| {\n                let delivery = self.delivery_time_strategy.gen_delay(\n                    rng,\n                    &zm,\n                    &self.delivery_time_distribution,\n                    delivery_time,\n                );\n                match delivery {\n                    DeliverySchedule::Drop => {\n                        trace!(\"{:?} message is dropped.\", zm);\n                        None\n                    }\n                    DeliverySchedule::AtInstant(timestamp) => {\n                        trace!(\"{:?} scheduled for {:?}\", zm, timestamp);\n                        self.convert_into_targeted(zm, recipient, rng)\n                            .map(|targeted| (targeted, timestamp))\n                    }\n                }\n            })\n            .collect();\n\n        self.virtual_net.dispatch_messages(targeted_messages);\n        Ok(())\n    }\n\n    fn convert_into_targeted(\n        &self,\n        zm: ZugMessage,\n        creator: ValidatorId,\n        rng: &mut NodeRng,\n    ) -> Option<TargetedMessage<ZugMessage>> {\n        let create_msg = |zm: ZugMessage| Message::new(creator, zm);\n\n        match zm {\n            ZugMessage::GossipMessage(_) => Some(TargetedMessage::new(\n                create_msg(zm),\n                Target::AllExcept(creator),\n            )),\n            ZugMessage::TargetedMessage(_, target) => self\n                .node_id_to_vid\n                .get(&target)\n                .map(|vid| TargetedMessage::new(create_msg(zm), Target::SingleValidator(*vid))),\n            ZugMessage::MessageToRandomPeer(_) | ZugMessage::RequestToRandomPeer(_) => self\n                .virtual_net\n                .validators_ids()\n                .choose(rng)\n                .map(|random_vid| {\n                    TargetedMessage::new(create_msg(zm), Target::SingleValidator(*random_vid))\n                }),\n            ZugMessage::Timer(_, _)\n            | ZugMessage::QueueAction(_)\n            | ZugMessage::RequestNewBlock(_)\n            | ZugMessage::FinalizedBlock(_)\n            | ZugMessage::ValidateConsensusValue(_, _)\n            | ZugMessage::NewEvidence(_)\n            | ZugMessage::Disconnect(_)\n            | ZugMessage::HandledProposedBlock(_)\n            | ZugMessage::SendEvidence(_, _)\n            | ZugMessage::WeAreFaulty\n            | ZugMessage::DoppelgangerDetected\n            | ZugMessage::FttExceeded => Some(TargetedMessage::new(\n                create_msg(zm),\n                Target::SingleValidator(creator),\n            )),\n        }\n    }\n\n    fn next_consensus_value(&mut self, height: u64) -> ConsensusValue {\n        self.consensus_values\n            .get(height as usize)\n            .cloned()\n            .unwrap_or_default()\n    }\n\n    /// Helper for getting validator from the underlying virtual net.\n    fn node_mut(&mut self, validator_id: &ValidatorId) -> TestResult<&mut ZugNode> {\n        self.virtual_net\n            .node_mut(validator_id)\n            .ok_or(TestRunError::MissingValidator(*validator_id))\n    }\n\n    fn call_validator<F>(\n        &mut self,\n        delivery_time: Timestamp,\n        validator_id: &ValidatorId,\n        f: F,\n    ) -> TestResult<Vec<ZugMessage>>\n    where\n        F: FnOnce(&mut ZugValidator) -> ProtocolOutcomes<TestContext>,\n    {\n        let validator_node = self.node_mut(validator_id)?;\n        let res = f(validator_node.validator_mut());\n        let messages = res\n            .into_iter()\n            .flat_map(|outcome| {\n                validator_node\n                    .validator_mut()\n                    .post_hook(delivery_time, ZugMessage::from(outcome))\n            })\n            .collect();\n        Ok(messages)\n    }\n\n    /// Processes a message sent to `validator_id`.\n    /// Returns a vector of messages produced by the `validator` in reaction to processing a\n    /// message.\n    fn process_message(\n        &mut self,\n        rng: &mut NodeRng,\n        validator_id: ValidatorId,\n        message: Message<ZugMessage>,\n        delivery_time: Timestamp,\n    ) -> TestResult<Vec<ZugMessage>> {\n        self.node_mut(&validator_id)?\n            .push_messages_received(vec![message.clone()]);\n\n        let messages = {\n            let sender_id = message.sender;\n\n            let zm = message.payload().clone();\n\n            match zm {\n                ZugMessage::GossipMessage(msg)\n                | ZugMessage::TargetedMessage(msg, _)\n                | ZugMessage::MessageToRandomPeer(msg) => {\n                    let sender = *self\n                        .vid_to_node_id\n                        .get(&sender_id)\n                        .ok_or(TestRunError::MissingValidator(sender_id))?;\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus\n                            .zug_mut()\n                            .handle_message(rng, sender, msg, delivery_time)\n                    })?\n                }\n                ZugMessage::RequestToRandomPeer(req) => {\n                    let sender = *self\n                        .vid_to_node_id\n                        .get(&sender_id)\n                        .ok_or(TestRunError::MissingValidator(sender_id))?;\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        let (mut outcomes, maybe_msg) = consensus.zug_mut().handle_request_message(\n                            rng,\n                            sender,\n                            req,\n                            delivery_time,\n                        );\n                        outcomes.extend(\n                            maybe_msg\n                                .into_iter()\n                                .map(|msg| ProtocolOutcome::CreatedTargetedMessage(msg, sender)),\n                        );\n                        outcomes\n                    })?\n                }\n                ZugMessage::Timer(timestamp, timer_id) => {\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus\n                            .zug_mut()\n                            .handle_timer(timestamp, delivery_time, timer_id, rng)\n                    })?\n                }\n                ZugMessage::QueueAction(_) => vec![], // not used in Zug\n                ZugMessage::RequestNewBlock(block_context) => {\n                    let consensus_value = self.next_consensus_value(block_context.height());\n                    let proposed_block = ProposedBlock::new(consensus_value, block_context);\n\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus.zug_mut().propose(proposed_block, delivery_time)\n                    })?\n                }\n                ZugMessage::FinalizedBlock(FinalizedBlock {\n                    value,\n                    timestamp: _,\n                    relative_height,\n                    terminal_block_data,\n                    equivocators: _,\n                    proposer: _,\n                }) => {\n                    trace!(\n                        \"{}consensus value finalized: {:?}, height: {:?}\",\n                        if terminal_block_data.is_some() {\n                            \"last \"\n                        } else {\n                            \"\"\n                        },\n                        value,\n                        relative_height,\n                    );\n                    self.node_mut(&validator_id)?.push_finalized(value);\n                    vec![]\n                }\n                ZugMessage::ValidateConsensusValue(_, proposed_block) => {\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus\n                            .zug_mut()\n                            .resolve_validity(proposed_block, true, delivery_time)\n                    })?\n                }\n                ZugMessage::NewEvidence(_) => vec![], // irrelevant to consensus\n                ZugMessage::Disconnect(target) => {\n                    if let Some(vid) = self.node_id_to_vid.get(&target) {\n                        warn!(\"{} wants to disconnect from {}\", validator_id, vid);\n                    }\n                    vec![] // TODO: register the disconnect attempt somehow?\n                }\n                ZugMessage::HandledProposedBlock(_) => vec![], // irrelevant to consensus\n                ZugMessage::WeAreFaulty => {\n                    warn!(\"{} detected that it is faulty\", validator_id);\n                    vec![] // TODO: stop the node or something?\n                }\n                ZugMessage::DoppelgangerDetected => {\n                    warn!(\"{} detected a doppelganger\", validator_id);\n                    vec![] // TODO: stop the node or something?\n                }\n                ZugMessage::FttExceeded => {\n                    warn!(\"{} detected FTT exceeded\", validator_id);\n                    vec![] // TODO: stop the node or something?\n                }\n                ZugMessage::SendEvidence(node_id, vid) => {\n                    self.call_validator(delivery_time, &validator_id, |consensus| {\n                        consensus.zug_mut().send_evidence(node_id, &vid)\n                    })?\n                }\n            }\n        };\n\n        let recipient = self.node_mut(&validator_id)?;\n        recipient.push_messages_produced(messages.clone());\n\n        Ok(messages)\n    }\n\n    /// Returns a `MutableHandle` on the `ZugTestHarness` object\n    /// that allows for manipulating internal state of the test state.\n    fn mutable_handle(&mut self) -> MutableHandle<DS> {\n        MutableHandle(self)\n    }\n}\n\nfn crank_until<F, DS: DeliveryStrategy>(\n    zth: &mut ZugTestHarness<DS>,\n    rng: &mut NodeRng,\n    f: F,\n) -> TestResult<()>\nwhere\n    F: Fn(&ZugTestHarness<DS>) -> bool,\n{\n    while !f(zth) {\n        zth.crank(rng)?;\n    }\n    Ok(())\n}\n\nstruct MutableHandle<'a, DS: DeliveryStrategy>(&'a mut ZugTestHarness<DS>);\n\nimpl<DS: DeliveryStrategy> MutableHandle<'_, DS> {\n    /// Drops all messages from the queue.\n    fn clear_message_queue(&mut self) {\n        self.0.virtual_net.empty_queue();\n    }\n\n    fn validators(&self) -> impl Iterator<Item = &ZugNode> {\n        self.0.virtual_net.validators()\n    }\n}\n\n#[derive(Debug)]\nenum BuilderError {\n    WeightLimits,\n}\n\nstruct ZugTestHarnessBuilder<DS: DeliveryStrategy> {\n    /// Maximum number of faulty validators in the network.\n    /// Defaults to 10.\n    max_faulty_validators: u8,\n    /// Percentage of faulty validators' (i.e. equivocators) weight.\n    /// Defaults to 0 (network is perfectly secure).\n    faulty_percent: u64,\n    fault_type: Option<DesFault>,\n    /// FTT value for the finality detector.\n    /// If not given, defaults to 1/3 of total validators' weight.\n    ftt: Option<u64>,\n    /// Number of consensus values to be proposed by the nodes in the network.\n    /// Those will be generated by the test framework.\n    /// Defaults to 10.\n    consensus_values_count: u8,\n    /// Distribution of message delivery (delaying, dropping) delays..\n    delivery_distribution: Distribution,\n    delivery_strategy: DS,\n    /// Upper and lower limits for validators' weights.\n    weight_limits: (u64, u64),\n    /// Time when the test era starts at.\n    /// Defaults to 0.\n    start_time: Timestamp,\n    /// Era end height.\n    end_height: u64,\n    /// Type of discrete distribution of validators' weights.\n    /// Defaults to uniform.\n    weight_distribution: Distribution,\n    /// Zug protocol config\n    config: Config,\n}\n\n// Default strategy for message delivery.\nstruct InstantDeliveryNoDropping;\n\nimpl DeliveryStrategy for InstantDeliveryNoDropping {\n    fn gen_delay(\n        &mut self,\n        _rng: &mut NodeRng,\n        message: &ZugMessage,\n        _distribution: &Distribution,\n        base_delivery_timestamp: Timestamp,\n    ) -> DeliverySchedule {\n        match message {\n            ZugMessage::RequestNewBlock(bc) => DeliverySchedule::AtInstant(bc.timestamp()),\n            ZugMessage::Timer(t, _) => DeliverySchedule::AtInstant(*t),\n            ZugMessage::GossipMessage(_)\n            | ZugMessage::TargetedMessage(_, _)\n            | ZugMessage::MessageToRandomPeer(_)\n            | ZugMessage::RequestToRandomPeer(_)\n            | ZugMessage::QueueAction(_)\n            | ZugMessage::FinalizedBlock(_)\n            | ZugMessage::ValidateConsensusValue(_, _)\n            | ZugMessage::NewEvidence(_)\n            | ZugMessage::Disconnect(_)\n            | ZugMessage::HandledProposedBlock(_)\n            | ZugMessage::WeAreFaulty\n            | ZugMessage::DoppelgangerDetected\n            | ZugMessage::FttExceeded\n            | ZugMessage::SendEvidence(_, _) => {\n                DeliverySchedule::AtInstant(base_delivery_timestamp + TimeDiff::from_millis(1))\n            }\n        }\n    }\n}\n\nimpl ZugTestHarnessBuilder<InstantDeliveryNoDropping> {\n    fn new() -> Self {\n        ZugTestHarnessBuilder {\n            max_faulty_validators: 10,\n            faulty_percent: 0,\n            fault_type: None,\n            ftt: None,\n            consensus_values_count: 10,\n            delivery_distribution: Distribution::Uniform,\n            delivery_strategy: InstantDeliveryNoDropping,\n            weight_limits: (1, 100),\n            start_time: Timestamp::zero(),\n            end_height: TEST_END_HEIGHT,\n            weight_distribution: Distribution::Uniform,\n            config: Default::default(),\n        }\n    }\n}\n\nimpl<DS: DeliveryStrategy> ZugTestHarnessBuilder<DS> {\n    /// Sets a percentage of weight that will be assigned to malicious nodes.\n    /// `faulty_weight` must be a value between 0 (inclusive) and 33 (inclusive).\n    pub(crate) fn faulty_weight_perc(mut self, faulty_weight: u64) -> Self {\n        self.faulty_percent = faulty_weight;\n        self\n    }\n\n    fn fault_type(mut self, fault_type: DesFault) -> Self {\n        self.fault_type = Some(fault_type);\n        self\n    }\n\n    pub(crate) fn consensus_values_count(mut self, count: u8) -> Self {\n        assert!(count > 0);\n        self.consensus_values_count = count;\n        self\n    }\n\n    pub(crate) fn weight_limits(mut self, lower: u64, upper: u64) -> Self {\n        assert!(\n            lower >= 100,\n            \"Lower limit has to be higher than 100 to avoid rounding problems.\"\n        );\n        self.weight_limits = (lower, upper);\n        self\n    }\n\n    fn max_faulty_validators(mut self, max_faulty_count: u8) -> Self {\n        self.max_faulty_validators = max_faulty_count;\n        self\n    }\n\n    fn build(self, rng: &mut NodeRng) -> Result<ZugTestHarness<DS>, BuilderError> {\n        let consensus_values = (0..self.consensus_values_count)\n            .map(|el| ConsensusValue(vec![el]))\n            .collect::<VecDeque<ConsensusValue>>();\n\n        let instance_id = TEST_INSTANCE_ID;\n        let start_time = self.start_time;\n\n        let (lower, upper) = {\n            let (l, u) = self.weight_limits;\n            if l >= u {\n                return Err(BuilderError::WeightLimits);\n            }\n            (l, u)\n        };\n\n        let (faulty_weights, honest_weights): (Vec<Weight>, Vec<Weight>) = {\n            if self.faulty_percent == 0 {\n                // All validators are honest.\n                let validators_num = rng.gen_range(2..self.max_faulty_validators + 1);\n                let honest_validators: Vec<Weight> = self\n                    .weight_distribution\n                    .gen_range_vec(rng, lower, upper, validators_num)\n                    .into_iter()\n                    .map(Weight)\n                    .collect();\n\n                (vec![], honest_validators)\n            } else {\n                // At least 2 validators total and at least one faulty.\n                let faulty_num = rng.gen_range(1..self.max_faulty_validators + 1);\n\n                // Randomly (but within chosen range) assign weights to faulty nodes.\n                let faulty_weights = self\n                    .weight_distribution\n                    .gen_range_vec(rng, lower, upper, faulty_num);\n\n                // Assign enough weights to honest nodes so that we reach expected\n                // `faulty_percentage` ratio.\n                let honest_weights = {\n                    let faulty_sum = faulty_weights.iter().sum::<u64>();\n                    let mut weights_to_distribute: u64 =\n                        (faulty_sum * 100).div_ceil(self.faulty_percent) - faulty_sum;\n                    let mut weights = vec![];\n                    while weights_to_distribute > 0 {\n                        let weight = if weights_to_distribute < upper {\n                            weights_to_distribute\n                        } else {\n                            rng.gen_range(lower..upper)\n                        };\n                        weights.push(weight);\n                        weights_to_distribute -= weight\n                    }\n                    weights\n                };\n\n                (\n                    faulty_weights.into_iter().map(Weight).collect(),\n                    honest_weights.into_iter().map(Weight).collect(),\n                )\n            }\n        };\n\n        let weights_sum = faulty_weights\n            .iter()\n            .chain(honest_weights.iter())\n            .sum::<Weight>();\n\n        let validators: Validators<ValidatorId> = faulty_weights\n            .iter()\n            .chain(honest_weights.iter())\n            .enumerate()\n            .map(|(i, weight)| (ValidatorId(i as u64), *weight))\n            .collect();\n\n        trace!(\"Weights: {:?}\", validators.iter().collect::<Vec<_>>());\n\n        let mut secrets = validators\n            .iter()\n            .map(|validator| (*validator.id(), TestSecret(validator.id().0)))\n            .collect();\n\n        let ftt = self\n            .ftt\n            .map(|p| p * weights_sum.0 / 100)\n            .unwrap_or_else(|| (weights_sum.0 - 1) / 3);\n\n        let params = Params::new(\n            instance_id,\n            TEST_MIN_ROUND_LEN,\n            start_time,\n            self.end_height,\n            start_time, // Length depends only on block number.\n            ftt.into(),\n        );\n\n        // Local function creating an instance of `ZugConsensus` for a single validator.\n        let zug_consensus =\n            |(vid, secrets): (ValidatorId, &mut HashMap<ValidatorId, TestSecret>)| {\n                let v_sec = secrets.remove(&vid).expect(\"Secret key should exist.\");\n\n                let mut zug = Zug::new_with_params(\n                    validators.clone(),\n                    params.clone(),\n                    &self.config,\n                    None,\n                    0, // random seed\n                );\n                let tmpdir = tempfile::tempdir().expect(\"could not create tempdir\");\n                let wal_file = tmpdir.path().join(\"wal_file.dat\");\n                let effects = zug.activate_validator(vid, v_sec, start_time, Some(wal_file));\n\n                (zug, effects.into_iter().map(ZugMessage::from).collect_vec())\n            };\n\n        let faulty_num = faulty_weights.len();\n\n        let (validators, init_messages) = {\n            let mut validators_loc = vec![];\n            let mut init_messages = vec![];\n\n            for validator in validators.iter() {\n                let vid = *validator.id();\n                let fault = if vid.0 < faulty_num as u64 {\n                    self.fault_type\n                } else {\n                    None\n                };\n                let (zug, msgs) = zug_consensus((vid, &mut secrets));\n                let zug_consensus = ZugValidator::new(zug, fault);\n                let validator = Node::new(vid, zug_consensus);\n                let qm: Vec<QueueEntry<ZugMessage>> = msgs\n                    .into_iter()\n                    .map(|zm| {\n                        // These are messages crated on the start of the network.\n                        // They are sent from validator to himself.\n                        QueueEntry::new(start_time, vid, Message::new(vid, zm))\n                    })\n                    .collect();\n                init_messages.extend(qm);\n                validators_loc.push(validator);\n            }\n\n            (validators_loc, init_messages)\n        };\n\n        let delivery_time_strategy = self.delivery_strategy;\n\n        let delivery_time_distribution = self.delivery_distribution;\n\n        let vid_to_node_id: HashMap<_, _> = validators\n            .iter()\n            .map(|validator| (validator.id, NodeId::random(rng)))\n            .collect();\n\n        let node_id_to_vid: HashMap<_, _> = vid_to_node_id\n            .iter()\n            .map(|(vid, node_id)| (*node_id, *vid))\n            .collect();\n\n        let virtual_net = VirtualNet::new(validators, init_messages);\n\n        let zth = ZugTestHarness {\n            virtual_net,\n            consensus_values,\n            delivery_time_strategy,\n            delivery_time_distribution,\n            vid_to_node_id,\n            node_id_to_vid,\n        };\n\n        Ok(zth)\n    }\n}\n\n#[derive(Clone, DataSize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub(crate) struct TestContext;\n\n#[derive(Clone, DataSize, Debug, Eq, PartialEq)]\npub(crate) struct TestSecret(pub(crate) u64);\n\n// Newtype wrapper for test signature.\n// Added so that we can use custom Debug impl.\n#[derive(Clone, DataSize, Copy, Hash, PartialOrd, Ord, Eq, PartialEq, Serialize, Deserialize)]\npub(crate) struct SignatureWrapper(u64);\n\nimpl Debug for SignatureWrapper {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0.to_le_bytes()))\n    }\n}\n\n// Newtype wrapper for test hash.\n// Added so that we can use custom Debug impl.\n#[derive(Clone, Copy, DataSize, Hash, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]\npub(crate) struct HashWrapper(u64);\n\nimpl Debug for HashWrapper {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0.to_le_bytes()))\n    }\n}\n\nimpl Display for HashWrapper {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Debug::fmt(self, f)\n    }\n}\n\nimpl ValidatorSecret for TestSecret {\n    type Hash = HashWrapper;\n    type Signature = SignatureWrapper;\n\n    fn sign(&self, data: &Self::Hash) -> Self::Signature {\n        SignatureWrapper(data.0 + self.0)\n    }\n}\n\nimpl Context for TestContext {\n    type ConsensusValue = ConsensusValue;\n    type ValidatorId = ValidatorId;\n    type ValidatorSecret = TestSecret;\n    type Signature = SignatureWrapper;\n    type Hash = HashWrapper;\n    type InstanceId = u64;\n\n    fn hash(data: &[u8]) -> Self::Hash {\n        let mut hasher = DefaultHasher::new();\n        hasher.write(data);\n        HashWrapper(hasher.finish())\n    }\n\n    fn verify_signature(\n        hash: &Self::Hash,\n        public_key: &Self::ValidatorId,\n        signature: &<Self::ValidatorSecret as ValidatorSecret>::Signature,\n    ) -> bool {\n        let computed_signature = hash.0 + public_key.0;\n        computed_signature == signature.0\n    }\n}\n\nmod test_harness {\n    use std::{collections::HashSet, fmt::Debug};\n\n    use super::{\n        crank_until, ConsensusValue, InstantDeliveryNoDropping, TestRunError, ZugTestHarness,\n        ZugTestHarnessBuilder,\n    };\n    use crate::{\n        components::consensus::{\n            consensus_protocol::ConsensusProtocol,\n            tests::consensus_des_testing::{Fault as DesFault, ValidatorId},\n        },\n        logging,\n    };\n    use logging::{LoggingConfig, LoggingFormat};\n\n    #[test]\n    fn on_empty_queue_error() {\n        let mut rng = crate::new_rng();\n        let mut zug_test_harness: ZugTestHarness<InstantDeliveryNoDropping> =\n            ZugTestHarnessBuilder::new()\n                .consensus_values_count(1)\n                .weight_limits(100, 120)\n                .build(&mut rng)\n                .expect(\"Construction was successful\");\n\n        zug_test_harness.mutable_handle().clear_message_queue();\n\n        assert_eq!(\n            zug_test_harness.crank(&mut rng),\n            Err(TestRunError::NoMessages),\n            \"Expected the test run to stop.\"\n        );\n    }\n\n    // Test that all elements of the vector all equal.\n    fn assert_eq_vectors<I: Eq + Debug>(coll: Vec<I>, error_msg: &str) {\n        let mut iter = coll.into_iter();\n        let reference = iter.next().unwrap();\n\n        iter.for_each(|v| assert_eq!(v, reference, \"{}\", error_msg));\n    }\n\n    #[test]\n    fn liveness_test_no_faults() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10;\n\n        let mut zug_test_harness = ZugTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 120)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        crank_until(&mut zug_test_harness, &mut rng, |zth| {\n            // Stop the test when each node finalized expected number of consensus values.\n            // Note that we're not testing the order of finalization here.\n            // It will be tested later – it's not the condition for stopping the test run.\n            zth.virtual_net\n                .validators()\n                .all(|v| v.finalized_count() == cv_count as usize)\n        })\n        .unwrap();\n\n        let handle = zug_test_harness.mutable_handle();\n        let validators = handle.validators();\n\n        let (finalized_values, msgs_produced): (Vec<Vec<ConsensusValue>>, Vec<usize>) = validators\n            .map(|v| {\n                (\n                    v.finalized_values().cloned().collect::<Vec<_>>(),\n                    v.messages_produced()\n                        .filter(|&zm| zm.is_signed_gossip_message() || zm.is_proposal())\n                        .cloned()\n                        .count(),\n                )\n            })\n            .unzip();\n\n        msgs_produced\n            .into_iter()\n            .enumerate()\n            .for_each(|(v_idx, units_count)| {\n                // NOTE: Works only when all validators are honest and correct (no \"mute\"\n                // validators). Validator produces two units per round. It may\n                // produce just one before lambda message is finalized. Add one in case it's just\n                // one round (one consensus value) – 1 message. 1/2=0 but 3/2=1 b/c of the rounding.\n                let expected_msgs = cv_count as usize * 2;\n\n                assert_eq!(\n                    units_count, expected_msgs,\n                    \"Expected that validator={} produced {} messages.\",\n                    v_idx, expected_msgs\n                )\n            });\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n    }\n\n    #[test]\n    fn liveness_test_some_mute() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10;\n        let fault_perc = 30;\n\n        let mut zug_test_harness = ZugTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .faulty_weight_perc(fault_perc)\n            .fault_type(DesFault::PermanentlyMute)\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 120)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        crank_until(&mut zug_test_harness, &mut rng, |zth| {\n            // Stop the test when each node finalized expected number of consensus values.\n            // Note that we're not testing the order of finalization here.\n            // It will be tested later – it's not the condition for stopping the test run.\n            zth.virtual_net\n                .validators()\n                .all(|v| v.finalized_count() == cv_count as usize)\n        })\n        .unwrap();\n\n        let handle = zug_test_harness.mutable_handle();\n        let validators = handle.validators();\n\n        let finalized_values: Vec<Vec<ConsensusValue>> = validators\n            .map(|v| v.finalized_values().cloned().collect::<Vec<_>>())\n            .collect();\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n    }\n\n    #[test]\n    fn liveness_test_some_equivocate() {\n        let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true));\n\n        let mut rng = crate::new_rng();\n        let cv_count = 10;\n        let fault_perc = 10;\n\n        let mut zug_test_harness = ZugTestHarnessBuilder::new()\n            .max_faulty_validators(3)\n            .faulty_weight_perc(fault_perc)\n            .fault_type(DesFault::Equivocate)\n            .consensus_values_count(cv_count)\n            .weight_limits(100, 150)\n            .build(&mut rng)\n            .expect(\"Construction was successful\");\n\n        crank_until(&mut zug_test_harness, &mut rng, |zth| {\n            // Stop the test when each node finalized expected number of consensus values.\n            // Note that we're not testing the order of finalization here.\n            // It will be tested later – it's not the condition for stopping the test run.\n            zth.virtual_net\n                .validators()\n                .all(|v| v.finalized_count() == cv_count as usize)\n        })\n        .unwrap();\n\n        let handle = zug_test_harness.mutable_handle();\n        let validators = handle.validators();\n\n        let (finalized_values, equivocators_seen): (\n            Vec<Vec<ConsensusValue>>,\n            Vec<HashSet<ValidatorId>>,\n        ) = validators\n            .map(|v| {\n                (\n                    v.finalized_values().cloned().collect::<Vec<_>>(),\n                    v.validator()\n                        .zug()\n                        .validators_with_evidence()\n                        .into_iter()\n                        .cloned()\n                        .collect::<HashSet<_>>(),\n                )\n            })\n            .unzip();\n\n        assert_eq_vectors(\n            finalized_values,\n            \"Nodes finalized different consensus values.\",\n        );\n        assert_eq_vectors(\n            equivocators_seen,\n            \"Nodes saw different set of equivocators.\",\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/fault.rs",
    "content": "use datasize::DataSize;\n\nuse crate::components::consensus::{\n    protocols::zug::{Content, SignedMessage},\n    traits::Context,\n};\n\n/// A reason for a validator to be marked as faulty.\n///\n/// The `Banned` state is fixed from the beginning and can't be replaced. However, `Indirect` can\n/// be replaced with `Direct` evidence, which has the same effect but doesn't rely on information\n/// from other consensus protocol instances.\n#[derive(DataSize, Clone, Debug, PartialEq)]\npub(crate) enum Fault<C>\nwhere\n    C: Context,\n{\n    /// The validator was known to be malicious from the beginning. All their messages are\n    /// considered invalid in this `Zug` instance.\n    Banned,\n    /// We have direct evidence of the validator's fault: two conflicting signatures.\n    Direct(SignedMessage<C>, Content<C>, C::Signature),\n    /// The validator is known to be faulty, but the evidence is not in this era.\n    Indirect,\n}\n\nimpl<C: Context> Fault<C> {\n    pub(super) fn is_direct(&self) -> bool {\n        matches!(self, Fault::Direct(..))\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/message.rs",
    "content": "use std::{collections::BTreeMap, fmt::Debug};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse either::Either;\n\nuse crate::{\n    components::consensus::{\n        protocols::zug::{Proposal, RoundId},\n        traits::{ConsensusNetworkMessage, Context, ValidatorSecret},\n        utils::ValidatorIndex,\n    },\n    utils::ds,\n};\n\n#[allow(clippy::arithmetic_side_effects)]\nmod relaxed {\n    // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the\n    // module-wide `clippy::arithmetic_side_effects` lint.\n\n    use datasize::DataSize;\n    use serde::{Deserialize, Serialize};\n    use strum::EnumDiscriminants;\n\n    use crate::components::consensus::{\n        protocols::zug::{proposal::Proposal, RoundId},\n        traits::{ConsensusNetworkMessage, Context},\n    };\n\n    use super::{SignedMessage, SyncResponse};\n\n    /// The content of a message in the main protocol, as opposed to the proposal, and to sync\n    /// messages, which are somewhat decoupled from the rest of the protocol. These messages,\n    /// along with the instance and round ID, are signed by the active validators.\n    #[derive(\n        Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, DataSize, EnumDiscriminants,\n    )]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub(crate) enum Content<C>\n    where\n        C: Context,\n    {\n        /// By signing the echo of a proposal hash a validator affirms that this is the first (and\n        /// usually only) proposal by the round leader that they have received. A quorum of echoes\n        /// is a requirement for a proposal to become accepted.\n        Echo(C::Hash),\n        /// By signing a `true` vote a validator confirms that they have accepted a proposal in\n        /// this round before the timeout. If there is a quorum of `true` votes, the\n        /// proposal becomes finalized, together with its ancestors.\n        ///\n        /// A `false` vote means they timed out waiting for a proposal to get accepted. A quorum of\n        /// `false` votes allows the next round's leader to make a proposal without waiting for\n        /// this round's.\n        Vote(bool),\n    }\n\n    /// All messages of the protocol.\n    #[derive(\n        DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, EnumDiscriminants,\n    )]\n    #[serde(bound(\n        serialize = \"C::Hash: Serialize\",\n        deserialize = \"C::Hash: Deserialize<'de>\",\n    ))]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub(crate) enum Message<C>\n    where\n        C: Context,\n    {\n        /// Signatures, proposals and evidence the requester was missing.\n        SyncResponse(SyncResponse<C>),\n        /// A proposal for a new block. This does not contain any signature; instead, the proposer\n        /// is expected to sign an echo with the proposal hash. Validators will drop any\n        /// proposal they receive unless they either have a signed echo by the proposer and\n        /// the proposer has not double-signed, or they have a quorum of echoes.\n        Proposal {\n            round_id: RoundId,\n            instance_id: C::InstanceId,\n            proposal: Proposal<C>,\n            echo: SignedMessage<C>,\n        },\n        /// An echo or vote signed by an active validator.\n        Signed(SignedMessage<C>),\n        /// Two conflicting signatures by the same validator.\n        Evidence(SignedMessage<C>, Content<C>, C::Signature),\n    }\n\n    impl<C: Context> ConsensusNetworkMessage for Message<C> {}\n}\npub(crate) use relaxed::{Content, ContentDiscriminants, Message, MessageDiscriminants};\n\nuse super::registered_sync::RandomId;\n\nimpl<C: Context> Content<C> {\n    /// Returns whether the two contents contradict each other. A correct validator is expected to\n    /// never sign two contradictory contents in the same round.\n    pub(crate) fn contradicts(&self, other: &Content<C>) -> bool {\n        match (self, other) {\n            (Content::Vote(vote0), Content::Vote(vote1)) => vote0 != vote1,\n            (Content::Echo(hash0), Content::Echo(hash1)) => hash0 != hash1,\n            _ => false,\n        }\n    }\n}\n\n// This has to be implemented manually because of the <C> generic parameter, which isn't\n// necessarily `Copy` and that breaks the derive.\nimpl<C: Context> Copy for Content<C> {}\n\n/// A vote or echo with a signature.\n#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, DataSize)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) struct SignedMessage<C>\nwhere\n    C: Context,\n{\n    pub(super) round_id: RoundId,\n    pub(super) instance_id: C::InstanceId,\n    pub(super) content: Content<C>,\n    pub(super) validator_idx: ValidatorIndex,\n    pub(super) signature: C::Signature,\n}\n\nimpl<C: Context> SignedMessage<C> {\n    /// Creates a new signed message with a valid signature.\n    pub(crate) fn sign_new(\n        round_id: RoundId,\n        instance_id: C::InstanceId,\n        content: Content<C>,\n        validator_idx: ValidatorIndex,\n        secret: &C::ValidatorSecret,\n    ) -> SignedMessage<C> {\n        let hash = Self::hash_fields(round_id, &instance_id, &content, validator_idx);\n        SignedMessage {\n            round_id,\n            instance_id,\n            content,\n            validator_idx,\n            signature: secret.sign(&hash),\n        }\n    }\n\n    /// Creates a new signed message with the alternative content and signature.\n    pub(crate) fn with(&self, content: Content<C>, signature: C::Signature) -> SignedMessage<C> {\n        SignedMessage {\n            content,\n            signature,\n            ..*self\n        }\n    }\n\n    /// Returns whether the signature is valid.\n    pub(crate) fn verify_signature(&self, validator_id: &C::ValidatorId) -> bool {\n        let hash = Self::hash_fields(\n            self.round_id,\n            &self.instance_id,\n            &self.content,\n            self.validator_idx,\n        );\n        C::verify_signature(&hash, validator_id, &self.signature)\n    }\n\n    /// Returns the hash of all fields except the signature.\n    fn hash_fields(\n        round_id: RoundId,\n        instance_id: &C::InstanceId,\n        content: &Content<C>,\n        validator_idx: ValidatorIndex,\n    ) -> C::Hash {\n        let serialized_fields =\n            bincode::serialize(&(round_id, instance_id, content, validator_idx))\n                .expect(\"failed to serialize fields\");\n        <C as Context>::hash(&serialized_fields)\n    }\n}\n\n/// Partial information about the sender's protocol state. The receiver should send missing data.\n///\n/// The sender chooses a random peer and a random era, and includes in its `SyncRequest` message\n/// information about received proposals, echoes and votes. The idea is to set the `i`-th bit\n/// in the `u128` fields to `1` if we have a signature from the `i`-th validator.\n///\n/// To keep the size of these messages constant even if there are more than 128 validators, a\n/// random interval is selected and only information about validators in that interval is\n/// included: The bit with the lowest significance corresponds to validator number\n/// `first_validator_idx`, and the one with the highest to\n/// `(first_validator_idx + 127) % validator_count`.\n///\n/// For example if there are 500 validators and `first_validator_idx` is 450, the `u128`'s bits\n/// refer to validators 450, 451, ..., 499, 0, 1, ..., 77.\n#[derive(DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) struct SyncRequest<C>\nwhere\n    C: Context,\n{\n    /// The round the information refers to.\n    pub(crate) round_id: RoundId,\n    /// The proposal hash with the most echoes (by weight).\n    pub(crate) proposal_hash: Option<C::Hash>,\n    /// Whether the sender has the proposal with that hash.\n    pub(crate) has_proposal: bool,\n    /// The index of the first validator covered by the bit fields below.\n    pub(crate) first_validator_idx: ValidatorIndex,\n    /// A bit field with 1 for every validator the sender has an echo from.\n    pub(crate) echoes: u128,\n    /// A bit field with 1 for every validator the sender has a `true` vote from.\n    pub(crate) true_votes: u128,\n    /// A bit field with 1 for every validator the sender has a `false` vote from.\n    pub(crate) false_votes: u128,\n    /// A bit field with 1 for every validator the sender has any signed message from.\n    pub(crate) active: u128,\n    /// A bit field with 1 for every validator the sender has evidence against.\n    pub(crate) faulty: u128,\n    pub(crate) instance_id: C::InstanceId,\n    pub(crate) sync_id: RandomId,\n}\n\nimpl<C: Context> ConsensusNetworkMessage for SyncRequest<C> {}\n\nimpl<C: Context> SyncRequest<C> {\n    /// Creates a `SyncRequest` for a round in which we haven't received any messages yet.\n    pub(super) fn new_empty_round(\n        round_id: RoundId,\n        first_validator_idx: ValidatorIndex,\n        faulty: u128,\n        active: u128,\n        instance_id: C::InstanceId,\n        sync_id: RandomId,\n    ) -> Self {\n        SyncRequest {\n            round_id,\n            proposal_hash: None,\n            has_proposal: false,\n            first_validator_idx,\n            echoes: 0,\n            true_votes: 0,\n            false_votes: 0,\n            active,\n            faulty,\n            instance_id,\n            sync_id,\n        }\n    }\n}\n\n/// The response to a `SyncRequest`, containing proposals, signatures and evidence the requester is\n/// missing.\n#[derive(DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) struct SyncResponse<C>\nwhere\n    C: Context,\n{\n    /// The round the information refers to.\n    pub(crate) round_id: RoundId,\n    /// The proposal in this round, or its hash.\n    #[data_size(with = ds::maybe_either)]\n    pub(crate) proposal_or_hash: Option<Either<Proposal<C>, C::Hash>>,\n    /// Echo signatures the requester is missing.\n    pub(crate) echo_sigs: BTreeMap<ValidatorIndex, C::Signature>,\n    /// Vote signatures for `true` the requester is missing.\n    pub(crate) true_vote_sigs: BTreeMap<ValidatorIndex, C::Signature>,\n    /// Vote signatures for `false` the requester is missing.\n    pub(crate) false_vote_sigs: BTreeMap<ValidatorIndex, C::Signature>,\n    /// Signed messages that prove that a validator was active.\n    pub(crate) signed_messages: Vec<SignedMessage<C>>,\n    /// Evidence against faulty validators.\n    pub(crate) evidence: Vec<(SignedMessage<C>, Content<C>, C::Signature)>,\n    pub(crate) instance_id: C::InstanceId,\n    pub(crate) sync_id: RandomId,\n}\n\nimpl<C: Context> Message<C> {\n    pub(super) fn instance_id(&self) -> &C::InstanceId {\n        match self {\n            Message::SyncResponse(SyncResponse { instance_id, .. })\n            | Message::Signed(SignedMessage { instance_id, .. })\n            | Message::Proposal { instance_id, .. }\n            | Message::Evidence(SignedMessage { instance_id, .. }, ..) => instance_id,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/params.rs",
    "content": "use datasize::DataSize;\nuse serde::Serialize;\n\nuse casper_types::{TimeDiff, Timestamp};\n\nuse crate::components::consensus::{traits::Context, utils::Weight};\n\n/// Protocol parameters for `Zug`.\n#[derive(Debug, DataSize, Clone, Serialize)]\npub(crate) struct Params<C>\nwhere\n    C: Context,\n{\n    instance_id: C::InstanceId,\n    min_block_time: TimeDiff,\n    start_timestamp: Timestamp,\n    end_height: u64,\n    end_timestamp: Timestamp,\n    ftt: Weight,\n}\n\nimpl<C: Context> Params<C> {\n    /// Creates a new set of `Zug` protocol parameters.\n    pub(crate) fn new(\n        instance_id: C::InstanceId,\n        min_block_time: TimeDiff,\n        start_timestamp: Timestamp,\n        end_height: u64,\n        end_timestamp: Timestamp,\n        ftt: Weight,\n    ) -> Params<C> {\n        Params {\n            instance_id,\n            min_block_time,\n            start_timestamp,\n            end_height,\n            end_timestamp,\n            ftt,\n        }\n    }\n\n    /// Returns the unique identifier for this protocol instance.\n    pub(crate) fn instance_id(&self) -> &C::InstanceId {\n        &self.instance_id\n    }\n\n    /// Returns the minimum difference between a block's and its child's timestamp.\n    pub(crate) fn min_block_time(&self) -> TimeDiff {\n        self.min_block_time\n    }\n\n    /// Returns the start timestamp of the era.\n    pub(crate) fn start_timestamp(&self) -> Timestamp {\n        self.start_timestamp\n    }\n\n    /// Returns the minimum height of the last block.\n    pub(crate) fn end_height(&self) -> u64 {\n        self.end_height\n    }\n\n    /// Returns the minimum timestamp of the last block.\n    pub(crate) fn end_timestamp(&self) -> Timestamp {\n        self.end_timestamp\n    }\n\n    /// The threshold weight above which we are not fault tolerant any longer.\n    pub(crate) fn ftt(&self) -> Weight {\n        self.ftt\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/participation.rs",
    "content": "use std::fmt::Debug;\n\nuse crate::components::consensus::{\n    protocols::zug::{Fault, RoundId, Zug},\n    traits::Context,\n    utils::ValidatorIndex,\n};\n\n/// A map of status (faulty, inactive) by validator ID.\n#[derive(Debug)]\n// False positive, as the fields of this struct are all used in logging validator participation.\n#[allow(dead_code)]\npub(super) struct Participation<C>\nwhere\n    C: Context,\n{\n    pub(super) instance_id: C::InstanceId,\n    pub(super) faulty_stake_percent: u8,\n    pub(super) inactive_stake_percent: u8,\n    pub(super) inactive_validators: Vec<(ValidatorIndex, C::ValidatorId, ParticipationStatus)>,\n    pub(super) faulty_validators: Vec<(ValidatorIndex, C::ValidatorId, ParticipationStatus)>,\n}\n\n/// A validator's participation status: whether they are faulty or inactive.\n#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]\npub(super) enum ParticipationStatus {\n    LastSeenInRound(RoundId),\n    Inactive,\n    EquivocatedInOtherEra,\n    Equivocated,\n}\n\nimpl ParticipationStatus {\n    /// Returns a `Status` for a validator unless they are honest and online.\n    pub(super) fn for_index<C: Context + 'static>(\n        idx: ValidatorIndex,\n        zug: &Zug<C>,\n    ) -> Option<ParticipationStatus> {\n        if let Some(fault) = zug.faults.get(&idx) {\n            return Some(match fault {\n                Fault::Banned | Fault::Indirect => ParticipationStatus::EquivocatedInOtherEra,\n                Fault::Direct(..) => ParticipationStatus::Equivocated,\n            });\n        }\n\n        let last_seen_round = zug\n            .active\n            .get(idx)\n            .and_then(Option::as_ref)\n            .map(|signed_msg| signed_msg.round_id);\n        match last_seen_round {\n            // not seen at all\n            None => Some(ParticipationStatus::Inactive),\n            // seen, but not within last 2 rounds\n            Some(r_id) if r_id.saturating_add(2) < zug.current_round => {\n                Some(ParticipationStatus::LastSeenInRound(r_id))\n            }\n            // seen recently\n            _ => None,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/proposal.rs",
    "content": "use std::{collections::BTreeSet, fmt};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::Timestamp;\n\nuse crate::components::consensus::{\n    consensus_protocol::ProposedBlock, protocols::zug::RoundId, traits::Context,\n    utils::ValidatorIndex,\n};\n\n/// A proposal in the consensus protocol.\n#[derive(Clone, Hash, Serialize, Deserialize, Debug, PartialEq, Eq, DataSize)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) struct Proposal<C>\nwhere\n    C: Context,\n{\n    /// The timestamp when the proposal was created. If finalized, this will be the block's\n    /// timestamp.\n    pub(super) timestamp: Timestamp,\n    /// The proposed block. This must be `None` after the switch block.\n    pub(super) maybe_block: Option<C::ConsensusValue>,\n    /// The parent round. This is `None` if the proposed block has no parent in this era.\n    pub(super) maybe_parent_round_id: Option<RoundId>,\n    /// The set of validators that appear to be inactive in this era.\n    /// This is `None` in round 0 and in dummy blocks.\n    pub(super) inactive: Option<BTreeSet<ValidatorIndex>>,\n}\n\nimpl<C: Context> Proposal<C> {\n    /// Creates a new proposal with no block. This must be used if an ancestor would be the\n    /// switch block, since no blocks can come after the switch block.\n    pub(super) fn dummy(timestamp: Timestamp, parent_round_id: RoundId) -> Self {\n        Proposal {\n            timestamp,\n            maybe_block: None,\n            maybe_parent_round_id: Some(parent_round_id),\n            inactive: None,\n        }\n    }\n\n    /// Creates a new proposal with the given block and parent round. If the parent round is none\n    /// it is proposed as the first block in this era.\n    pub(super) fn with_block(\n        proposed_block: &ProposedBlock<C>,\n        maybe_parent_round_id: Option<RoundId>,\n        inactive: impl Iterator<Item = ValidatorIndex>,\n    ) -> Self {\n        Proposal {\n            maybe_block: Some(proposed_block.value().clone()),\n            timestamp: proposed_block.context().timestamp(),\n            maybe_parent_round_id,\n            inactive: maybe_parent_round_id.map(|_| inactive.collect()),\n        }\n    }\n\n    /// Returns the proposal hash.\n    #[cfg(test)] // Only used in tests; in production use HashedProposal below.\n    pub(super) fn hash(&self) -> C::Hash {\n        let serialized = bincode::serialize(&self).expect(\"failed to serialize fields\");\n        <C as Context>::hash(&serialized)\n    }\n}\n\n/// A proposal with its memoized hash.\n#[derive(Clone, Hash, Debug, PartialEq, Eq, DataSize)]\npub(crate) struct HashedProposal<C>\nwhere\n    C: Context,\n{\n    hash: C::Hash,\n    proposal: Proposal<C>,\n}\n\nimpl<C: Context> HashedProposal<C> {\n    pub(crate) fn new(proposal: Proposal<C>) -> Self {\n        let serialized = bincode::serialize(&proposal).expect(\"failed to serialize fields\");\n        let hash = <C as Context>::hash(&serialized);\n        HashedProposal { hash, proposal }\n    }\n\n    pub(crate) fn hash(&self) -> &C::Hash {\n        &self.hash\n    }\n\n    pub(crate) fn inner(&self) -> &Proposal<C> {\n        &self.proposal\n    }\n\n    pub(crate) fn into_inner(self) -> Proposal<C> {\n        self.proposal\n    }\n\n    pub(crate) fn maybe_block(&self) -> Option<&C::ConsensusValue> {\n        self.proposal.maybe_block.as_ref()\n    }\n\n    pub(crate) fn timestamp(&self) -> Timestamp {\n        self.proposal.timestamp\n    }\n\n    pub(crate) fn inactive(&self) -> Option<&BTreeSet<ValidatorIndex>> {\n        self.proposal.inactive.as_ref()\n    }\n\n    pub(crate) fn maybe_parent_round_id(&self) -> Option<RoundId> {\n        self.proposal.maybe_parent_round_id\n    }\n}\n\nimpl<C: Context> fmt::Display for Proposal<C> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match &self.maybe_block {\n            None => write!(f, \"dummy proposal at {}\", self.timestamp),\n            Some(block) => write!(f, \"proposal at {}: {}\", self.timestamp, block),\n        }\n    }\n}\n\nimpl<C: Context> fmt::Display for HashedProposal<C> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"{}, hash {}\", self.proposal, self.hash)\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/round.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap},\n    fmt::Debug,\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    components::consensus::{\n        protocols::zug::{Content, HashedProposal},\n        traits::Context,\n        utils::{ValidatorIndex, ValidatorMap},\n    },\n    utils::ds,\n};\n\n/// The protocol proceeds in rounds, for each of which we must\n/// keep track of proposals, echoes, votes, and the current outcome\n/// of the round.\n#[derive(Debug, DataSize, PartialEq)]\npub(crate) struct Round<C>\nwhere\n    C: Context,\n{\n    /// The leader, who is allowed to create a proposal in this round.\n    leader_idx: ValidatorIndex,\n    /// The unique proposal signed by the leader, or the unique proposal with a quorum of echoes.\n    proposal: Option<HashedProposal<C>>,\n    /// The echoes we've received for each proposal so far.\n    #[data_size(with = ds::hashmap_sample)]\n    echoes: HashMap<C::Hash, BTreeMap<ValidatorIndex, C::Signature>>,\n    /// The votes we've received for this round so far.\n    votes: BTreeMap<bool, ValidatorMap<Option<C::Signature>>>,\n    /// The memoized results in this round.\n    outcome: RoundOutcome<C>,\n}\n\nimpl<C: Context> Round<C> {\n    /// Creates a new [`Round`] with no proposals, echoes, votes, and empty\n    /// round outcome.\n    pub(super) fn new(validator_count: usize, leader_idx: ValidatorIndex) -> Round<C> {\n        let mut votes = BTreeMap::new();\n        votes.insert(false, vec![None; validator_count].into());\n        votes.insert(true, vec![None; validator_count].into());\n        Round {\n            leader_idx,\n            proposal: None,\n            echoes: HashMap::new(),\n            votes,\n            outcome: RoundOutcome::default(),\n        }\n    }\n\n    /// Returns the map of all proposals sent to us this round from the leader\n    pub(super) fn proposal(&self) -> Option<&HashedProposal<C>> {\n        self.proposal.as_ref()\n    }\n\n    /// Returns whether we have received at least one proposal.\n    pub(super) fn has_proposal(&self) -> bool {\n        self.proposal.is_some()\n    }\n\n    /// Returns whether this proposal is justified by an echo signature from the round leader or by\n    /// a quorum of echoes.\n    pub(super) fn has_echoes_for_proposal(&self, hash: &C::Hash) -> bool {\n        match (self.quorum_echoes(), self.echoes.get(hash)) {\n            (Some(quorum_hash), _) => quorum_hash == *hash,\n            (None, Some(echo_map)) => echo_map.contains_key(&self.leader_idx),\n            (None, None) => false,\n        }\n    }\n\n    /// Inserts a `Proposal` and returns `false` if we already had it or it cannot be added due to\n    /// missing echoes.\n    pub(super) fn insert_proposal(&mut self, proposal: HashedProposal<C>) -> bool {\n        let hash = proposal.hash();\n        if self.has_echoes_for_proposal(hash) && self.proposal.as_ref() != Some(&proposal) {\n            self.proposal = Some(proposal);\n            true\n        } else {\n            false\n        }\n    }\n\n    /// Returns the echoes we've received for each proposal so far.\n    pub(super) fn echoes(&self) -> &HashMap<C::Hash, BTreeMap<ValidatorIndex, C::Signature>> {\n        &self.echoes\n    }\n\n    /// Inserts an `Echo`; returns `false` if we already had it.\n    pub(super) fn insert_echo(\n        &mut self,\n        hash: C::Hash,\n        validator_idx: ValidatorIndex,\n        signature: C::Signature,\n    ) -> bool {\n        self.echoes\n            .entry(hash)\n            .or_default()\n            .insert(validator_idx, signature)\n            .is_none()\n    }\n\n    /// Returns whether the validator has already sent an `Echo` in this round.\n    pub(super) fn has_echoed(&self, validator_idx: ValidatorIndex) -> bool {\n        self.echoes\n            .values()\n            .any(|echo_map| echo_map.contains_key(&validator_idx))\n    }\n\n    /// Stores in the outcome that we have a quorum of echoes for this hash.\n    pub(super) fn set_quorum_echoes(&mut self, hash: C::Hash) {\n        self.outcome.quorum_echoes = Some(hash);\n        if self\n            .proposal\n            .as_ref()\n            .is_some_and(|proposal| *proposal.hash() != hash)\n        {\n            self.proposal = None;\n        }\n    }\n\n    /// Returns the hash for which we have a quorum of echoes, if any.\n    pub(super) fn quorum_echoes(&self) -> Option<C::Hash> {\n        self.outcome.quorum_echoes\n    }\n\n    /// Returns the votes we've received for this round so far.\n    pub(super) fn votes(&self, vote: bool) -> &ValidatorMap<Option<C::Signature>> {\n        &self.votes[&vote]\n    }\n\n    /// Inserts a `Vote`; returns `false` if we already had it.\n    pub(super) fn insert_vote(\n        &mut self,\n        vote: bool,\n        validator_idx: ValidatorIndex,\n        signature: C::Signature,\n    ) -> bool {\n        // Safe to unwrap: Both `true` and `false` entries were created in `new`.\n        let votes_map = self.votes.get_mut(&vote).unwrap();\n        if votes_map[validator_idx].is_none() {\n            votes_map[validator_idx] = Some(signature);\n            true\n        } else {\n            false\n        }\n    }\n\n    /// Returns whether the validator has already cast a `true` or `false` vote.\n    pub(super) fn has_voted(&self, validator_idx: ValidatorIndex) -> bool {\n        self.votes(true)[validator_idx].is_some() || self.votes(false)[validator_idx].is_some()\n    }\n\n    /// Stores in the outcome that we have a quorum of votes for this value.\n    pub(super) fn set_quorum_votes(&mut self, vote: bool) {\n        self.outcome.quorum_votes = Some(vote);\n    }\n\n    /// Returns the value for which we have a quorum of votes, if any.\n    pub(super) fn quorum_votes(&self) -> Option<bool> {\n        self.outcome.quorum_votes\n    }\n\n    /// Removes all votes and echoes from the given validator.\n    pub(super) fn remove_votes_and_echoes(&mut self, validator_idx: ValidatorIndex) {\n        self.votes.get_mut(&false).unwrap()[validator_idx] = None;\n        self.votes.get_mut(&true).unwrap()[validator_idx] = None;\n        self.echoes.retain(|_, echo_map| {\n            echo_map.remove(&validator_idx);\n            !echo_map.is_empty()\n        });\n    }\n\n    /// Updates the outcome and marks the proposal that has a quorum of echoes as accepted. It also\n    /// stores the proposal's block height.\n    pub(super) fn set_accepted_proposal_height(&mut self, height: u64) {\n        self.outcome.accepted_proposal_height = Some(height);\n    }\n\n    /// Returns the accepted proposal, if any, together with its height.\n    pub(super) fn accepted_proposal(&self) -> Option<(u64, &HashedProposal<C>)> {\n        let height = self.outcome.accepted_proposal_height?;\n        let proposal = self.proposal.as_ref()?;\n        Some((height, proposal))\n    }\n\n    /// Check if the round has already received this message.\n    pub(super) fn contains(&self, content: &Content<C>, validator_idx: ValidatorIndex) -> bool {\n        match content {\n            Content::Echo(hash) => self\n                .echoes\n                .get(hash)\n                .is_some_and(|echo_map| echo_map.contains_key(&validator_idx)),\n            Content::Vote(vote) => self.votes[vote][validator_idx].is_some(),\n        }\n    }\n\n    /// Removes the proposal: This round was skipped and will never become finalized.\n    pub(super) fn prune_skipped(&mut self) {\n        self.proposal = None;\n        self.outcome.accepted_proposal_height = None;\n    }\n\n    /// Returns the validator index of this round's leader.\n    pub(super) fn leader(&self) -> ValidatorIndex {\n        self.leader_idx\n    }\n}\n\n/// Indicates the outcome of a given round.\n#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, DataSize)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) struct RoundOutcome<C>\nwhere\n    C: Context,\n{\n    /// This is `Some(h)` if there is an accepted proposal with relative height `h`, i.e. there is\n    /// a quorum of echoes, `h` accepted ancestors, and all rounds since the parent's are\n    /// skippable.\n    accepted_proposal_height: Option<u64>,\n    quorum_echoes: Option<C::Hash>,\n    quorum_votes: Option<bool>,\n}\n\nimpl<C: Context> Default for RoundOutcome<C> {\n    fn default() -> RoundOutcome<C> {\n        RoundOutcome {\n            accepted_proposal_height: None,\n            quorum_echoes: None,\n            quorum_votes: None,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug/tests.rs",
    "content": "use super::{registered_sync::RandomId, *};\n\nuse std::{collections::BTreeSet, sync::Arc};\n\nuse casper_types::{PublicKey, SecretKey, Timestamp, U512};\nuse tempfile::tempdir;\nuse tracing::info;\n\nuse crate::{\n    components::consensus::{\n        cl_context::{ClContext, Keypair},\n        config::Config,\n        consensus_protocol::{ConsensusProtocol, ProtocolOutcome},\n        leader_sequence,\n        protocols::common,\n        tests::utils::{\n            new_test_chainspec, ALICE_NODE_ID, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY,\n            BOB_SECRET_KEY, CAROL_PUBLIC_KEY, CAROL_SECRET_KEY,\n        },\n        traits::Context,\n    },\n    testing,\n    types::BlockPayload,\n};\n\nconst INSTANCE_ID_DATA: &[u8; 1] = &[123u8; 1];\n\n/// Creates a new `Zug` instance.\n///\n/// The random seed is selected so that the leader sequence starts with `seq`.\npub(crate) fn new_test_zug<I1, I2, T>(\n    weights: I1,\n    init_faulty: I2,\n    seq: &[ValidatorIndex],\n) -> Zug<ClContext>\nwhere\n    I1: IntoIterator<Item = (PublicKey, T)>,\n    I2: IntoIterator<Item = PublicKey>,\n    T: Into<U512>,\n{\n    let weights = weights\n        .into_iter()\n        .map(|(pk, w)| (pk, w.into()))\n        .collect::<Vec<_>>();\n    let mut chainspec = new_test_chainspec(weights.clone());\n    chainspec.core_config.minimum_era_height = 3;\n    let config = Config::default();\n    let validators = common::validators::<ClContext>(\n        &Default::default(),\n        &Default::default(),\n        weights.iter().cloned().collect(),\n    );\n    let weights_vmap = common::validator_weights::<ClContext>(&validators);\n    let leaders = weights.iter().map(|_| true).collect();\n    let seed = leader_sequence::find_seed(seq, &weights_vmap, &leaders);\n    // Timestamp of the genesis era start and test start.\n    let start_timestamp: Timestamp = 0.into();\n    Zug::<ClContext>::new(\n        ClContext::hash(INSTANCE_ID_DATA),\n        weights.into_iter().collect(),\n        &init_faulty.into_iter().collect(),\n        &None.into_iter().collect(),\n        &chainspec,\n        &config,\n        None,\n        start_timestamp,\n        seed,\n    )\n}\n\n/// Creates a `signed_message`\nfn create_signed_message(\n    validators: &Validators<PublicKey>,\n    round_id: RoundId,\n    content: Content<ClContext>,\n    keypair: &Keypair,\n) -> SignedMessage<ClContext> {\n    let validator_idx = validators.get_index(keypair.public_key()).unwrap();\n    let instance_id = ClContext::hash(INSTANCE_ID_DATA);\n    SignedMessage::sign_new(round_id, instance_id, content, validator_idx, keypair)\n}\n\n/// Creates a `Message::Signed`.\nfn create_message(\n    validators: &Validators<PublicKey>,\n    round_id: RoundId,\n    content: Content<ClContext>,\n    keypair: &Keypair,\n) -> SerializedMessage {\n    let signed_msg = create_signed_message(validators, round_id, content, keypair);\n    SerializedMessage::from_message(&Message::Signed(signed_msg))\n}\n\n/// Creates a `Message::Proposal`\nfn create_proposal_message(\n    round_id: RoundId,\n    proposal: &Proposal<ClContext>,\n    validators: &Validators<PublicKey>,\n    keypair: &Keypair,\n) -> SerializedMessage {\n    let hashed_proposal = HashedProposal::new(proposal.clone());\n    let echo_content = Content::Echo(*hashed_proposal.hash());\n    let echo = create_signed_message(validators, round_id, echo_content, keypair);\n    SerializedMessage::from_message(&Message::Proposal {\n        round_id,\n        instance_id: ClContext::hash(INSTANCE_ID_DATA),\n        proposal: proposal.clone(),\n        echo,\n    })\n}\n\n/// Removes all `CreatedGossipMessage`s from `outcomes` and returns the messages, after\n/// verifying the signatures and instance ID.\nfn remove_gossip(\n    validators: &Validators<PublicKey>,\n    outcomes: &mut ProtocolOutcomes<ClContext>,\n) -> Vec<Message<ClContext>> {\n    let mut result = Vec::new();\n    let expected_instance_id = ClContext::hash(INSTANCE_ID_DATA);\n    outcomes.retain(|outcome| {\n        let msg = match outcome {\n            ProtocolOutcome::CreatedGossipMessage(serialized_msg) => {\n                serialized_msg.deserialize_expect::<Message<ClContext>>()\n            }\n            _ => return true,\n        };\n        assert_eq!(*msg.instance_id(), expected_instance_id);\n        if let Message::Signed(ref signed_msg) = msg {\n            let public_key = validators\n                .id(signed_msg.validator_idx)\n                .expect(\"validator ID\")\n                .clone();\n            assert!(signed_msg.verify_signature(&public_key));\n        }\n        result.push(msg);\n        false\n    });\n    result\n}\n\n/// Removes the expected signed message; returns `true` if found.\nfn remove_signed(\n    gossip: &mut Vec<Message<ClContext>>,\n    expected_round_id: RoundId,\n    expected_validator_idx: ValidatorIndex,\n    expected_content: Content<ClContext>,\n) -> bool {\n    let maybe_pos = gossip.iter().position(|message| {\n        if let Message::Signed(SignedMessage {\n            round_id,\n            instance_id: _,\n            content,\n            validator_idx,\n            signature: _,\n        }) = &message\n        {\n            *round_id == expected_round_id\n                && *validator_idx == expected_validator_idx\n                && *content == expected_content\n        } else {\n            false\n        }\n    });\n    if let Some(pos) = maybe_pos {\n        gossip.remove(pos);\n        true\n    } else {\n        false\n    }\n}\n\n/// Removes the expected proposal message; returns `true` if found.\nfn remove_proposal(\n    gossip: &mut Vec<Message<ClContext>>,\n    expected_round_id: RoundId,\n    expected_proposal: &Proposal<ClContext>,\n) -> bool {\n    let maybe_pos = gossip.iter().position(|message| {\n        if let Message::Proposal {\n            round_id,\n            instance_id: _,\n            proposal,\n            echo: _,\n        } = &message\n        {\n            *round_id == expected_round_id && proposal == expected_proposal\n        } else {\n            false\n        }\n    });\n    if let Some(pos) = maybe_pos {\n        gossip.remove(pos);\n        true\n    } else {\n        false\n    }\n}\n\n/// Removes all `CreatedRequestToRandomValidator`s from `outcomes` and returns the deserialized\n/// messages.\nfn remove_requests_to_random(\n    outcomes: &mut ProtocolOutcomes<ClContext>,\n) -> Vec<SyncRequest<ClContext>> {\n    let mut result = Vec::new();\n    let expected_instance_id = ClContext::hash(INSTANCE_ID_DATA);\n    outcomes.retain(|outcome| {\n        let msg: SyncRequest<ClContext> = match outcome {\n            ProtocolOutcome::CreatedRequestToRandomValidator(msg) => msg.deserialize_expect(),\n            _ => return true,\n        };\n        assert_eq!(msg.instance_id, expected_instance_id);\n        result.push(msg);\n        false\n    });\n    result\n}\n\n/// Removes all `CreatedTargetedMessage`s from `outcomes` and returns the content of\n/// all `Message::Signed`, after verifying the signatures.\nfn remove_targeted_messages(\n    validators: &Validators<PublicKey>,\n    expected_peer: NodeId,\n    outcomes: &mut ProtocolOutcomes<ClContext>,\n) -> Vec<Message<ClContext>> {\n    let mut result = Vec::new();\n    let expected_instance_id = ClContext::hash(INSTANCE_ID_DATA);\n    outcomes.retain(|outcome| {\n        let (msg, peer) = match outcome {\n            ProtocolOutcome::CreatedTargetedMessage(serialized_message, peer) => (\n                serialized_message.deserialize_expect::<Message<ClContext>>(),\n                *peer,\n            ),\n            _ => return true,\n        };\n        if peer != expected_peer {\n            return true;\n        }\n        assert_eq!(*msg.instance_id(), expected_instance_id);\n        if let Message::Signed(ref signed_msg) = msg {\n            let public_key = validators\n                .id(signed_msg.validator_idx)\n                .expect(\"validator ID\")\n                .clone();\n            assert!(signed_msg.verify_signature(&public_key));\n        }\n        result.push(msg);\n        false\n    });\n    result\n}\n\n/// Expects exactly one `CreateNewBlock` in `outcomes`, removes and returns it.\nfn remove_create_new_block(outcomes: &mut ProtocolOutcomes<ClContext>) -> BlockContext<ClContext> {\n    let mut result = None;\n    outcomes.retain(|outcome| match outcome {\n        ProtocolOutcome::CreateNewBlock(block_context, _) => {\n            if let Some(other_context) = result.replace(block_context.clone()) {\n                panic!(\n                    \"got multiple CreateNewBlock outcomes: {:?}, {:?}\",\n                    other_context, block_context\n                );\n            }\n            false\n        }\n        _ => true,\n    });\n    result.expect(\"missing CreateNewBlock outcome\")\n}\n\n/// Checks that the `proposals` match the `FinalizedBlock` outcomes.\nfn expect_finalized(\n    outcomes: &ProtocolOutcomes<ClContext>,\n    proposals: &[(&Proposal<ClContext>, u64)],\n) {\n    let mut proposals_iter = proposals.iter();\n    for outcome in outcomes {\n        if let ProtocolOutcome::FinalizedBlock(fb) = outcome {\n            if let Some(&(proposal, rel_height)) = proposals_iter.next() {\n                assert_eq!(fb.relative_height, rel_height);\n                assert_eq!(fb.timestamp, proposal.timestamp);\n                assert_eq!(Some(&fb.value), proposal.maybe_block.as_ref());\n            } else {\n                panic!(\"unexpected finalized block {:?}\", fb);\n            }\n        }\n    }\n    assert_eq!(None, proposals_iter.next(), \"missing finalized proposal\");\n}\n\n/// Checks that `outcomes` contains no `FinalizedBlock`, `CreateNewBlock` or `CreatedGossipMessage`.\nfn expect_no_gossip_block_finalized(outcomes: ProtocolOutcomes<ClContext>) {\n    for outcome in outcomes {\n        match outcome {\n            ProtocolOutcome::FinalizedBlock(fb) => panic!(\"unexpected finalized block: {:?}\", fb),\n            ProtocolOutcome::CreatedGossipMessage(msg) => {\n                panic!(\"unexpected gossip message {:?}\", msg);\n            }\n            ProtocolOutcome::CreateNewBlock(block_context, expiry) => {\n                panic!(\n                    \"unexpected CreateNewBlock: {:?} exp. {}\",\n                    block_context, expiry\n                );\n            }\n            _ => {}\n        }\n    }\n}\n\n/// Checks that the expected timer was requested by the protocol.\nfn expect_timer(outcomes: &ProtocolOutcomes<ClContext>, timestamp: Timestamp, timer_id: TimerId) {\n    assert!(\n        outcomes.contains(&ProtocolOutcome::ScheduleTimer(timestamp, timer_id)),\n        \"missing timer {} for {:?} from {:?}\",\n        timer_id.0,\n        timestamp,\n        outcomes\n    );\n}\n\n/// Creates a new payload with the given random bit and no deploys or transfers.\nfn new_payload(random_bit: bool) -> Arc<BlockPayload> {\n    Arc::new(BlockPayload::new(\n        BTreeMap::new(),\n        vec![],\n        Default::default(),\n        random_bit,\n        1u8,\n    ))\n}\n\nfn vote(v: bool) -> Content<ClContext> {\n    Content::Vote(v)\n}\n\nfn echo(hash: <ClContext as Context>::Hash) -> Content<ClContext> {\n    Content::Echo(hash)\n}\n\nfn abc_weights(\n    alice_w: u64,\n    bob_w: u64,\n    carol_w: u64,\n) -> (Vec<(PublicKey, U512)>, Validators<PublicKey>) {\n    let weights: Vec<(PublicKey, U512)> = vec![\n        (ALICE_PUBLIC_KEY.clone(), U512::from(alice_w)),\n        (BOB_PUBLIC_KEY.clone(), U512::from(bob_w)),\n        (CAROL_PUBLIC_KEY.clone(), U512::from(carol_w)),\n    ];\n    let validators = common::validators::<ClContext>(\n        &Default::default(),\n        &Default::default(),\n        weights.iter().cloned().collect(),\n    );\n    (weights, validators)\n}\n\n/// Tests the core logic of the consensus protocol, i.e. the criteria for sending votes and echoes\n/// and finalizing blocks.\n///\n/// In this scenario Alice has 60%, Bob 30% and Carol 10% of the weight, and we create Carol's\n/// consensus instance. Bob makes a proposal in round 0. Alice doesn't see it and makes a proposal\n/// without a parent (skipping round 0) in round 1, and proposes a child of that one in round 2.\n///\n/// The fork is resolved in Alice's favor: Round 0 becomes skippable and round 2 committed, so\n/// Alice's two blocks become finalized.\n#[test]\nfn zug_no_fault() {\n    testing::init_logging();\n    let mut rng = crate::new_rng();\n    let (weights, validators) = abc_weights(60, 30, 10);\n    let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap();\n    let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap();\n    let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap();\n    let sender = *ALICE_NODE_ID;\n\n    let mut timestamp = Timestamp::from(100000);\n\n    // The first round leaders are Bob, Alice, Alice, Carol, Carol.\n    let leader_seq = &[bob_idx, alice_idx, alice_idx, carol_idx, carol_idx];\n    let mut sc_c = new_test_zug(weights.clone(), vec![], leader_seq);\n    let dir = tempdir().unwrap();\n    sc_c.open_wal(dir.path().join(\"wal\"), timestamp);\n\n    let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone());\n    let bob_kp = Keypair::from(BOB_SECRET_KEY.clone());\n    let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone());\n\n    sc_c.activate_validator(CAROL_PUBLIC_KEY.clone(), carol_kp, Timestamp::now(), None);\n\n    let block_time = sc_c.params.min_block_time();\n    let proposal_timeout = sc_c.proposal_timeout();\n\n    let proposal0 = Proposal::<ClContext> {\n        timestamp,\n        maybe_block: Some(new_payload(false)),\n        maybe_parent_round_id: None,\n        inactive: None,\n    };\n    let hash0 = proposal0.hash();\n\n    let proposal1 = Proposal {\n        timestamp: proposal0.timestamp + block_time,\n        maybe_block: Some(new_payload(true)),\n        maybe_parent_round_id: None,\n        inactive: None,\n    };\n    let hash1 = proposal1.hash();\n\n    let proposal2 = Proposal {\n        timestamp: proposal1.timestamp + block_time,\n        maybe_block: Some(new_payload(true)),\n        maybe_parent_round_id: Some(1),\n        inactive: Some(Default::default()),\n    };\n    let hash2 = proposal2.hash();\n\n    let proposal3 = Proposal {\n        timestamp: proposal2.timestamp + block_time,\n        maybe_block: Some(new_payload(false)),\n        maybe_parent_round_id: Some(2),\n        inactive: Some(Default::default()),\n    };\n    let hash3 = proposal3.hash();\n\n    let proposal4 = Proposal::<ClContext> {\n        timestamp: proposal3.timestamp + block_time,\n        maybe_block: None,\n        maybe_parent_round_id: Some(3),\n        inactive: None,\n    };\n\n    // Carol's node joins a bit late, and gets some messages out of order.\n    timestamp += block_time;\n\n    // Alice makes a proposal in round 2 with parent in round 1. Alice and Bob echo it.\n    let msg = create_proposal_message(2, &proposal2, &validators, &alice_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_message(&validators, 2, echo(hash2), &bob_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n\n    // Alice and Bob even vote for it, so the round is committed!\n    // But without an accepted parent it isn't finalized yet.\n    let msg = create_message(&validators, 2, vote(true), &alice_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_message(&validators, 2, vote(true), &bob_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n\n    // Alice makes a proposal in round 1 with no parent, and echoes it.\n    let msg = create_proposal_message(1, &proposal1, &validators, &alice_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n\n    // Now Carol receives Bob's proposal in round 0. Carol echoes it.\n    let msg = create_proposal_message(0, &proposal0, &validators, &bob_kp);\n    let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp);\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_signed(&mut gossip, 0, carol_idx, echo(hash0)));\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n    expect_no_gossip_block_finalized(outcomes);\n\n    timestamp += block_time;\n\n    // The first proposal message Carol received had a timestamp in the future, so she didn't store\n    // the proposal. Re-send it to her so that she has a chance to store it now.\n    let msg = create_proposal_message(2, &proposal2, &validators, &alice_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n\n    // On timeout, Carol votes to make round 0 skippable.\n    let mut outcomes = sc_c.handle_timer(timestamp, timestamp, TIMER_ID_UPDATE, &mut rng);\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_signed(&mut gossip, 0, carol_idx, vote(false)));\n    expect_no_gossip_block_finalized(outcomes);\n\n    // Alice also echoes Bob's round 0 proposal, so it has a quorum and is accepted. With that round\n    // 1 becomes current and Carol echoes Alice's proposal. That makes a quorum, but since round\n    // 0 is not skippable round 1 is not yet accepted and thus round 2 is not yet current.\n    let msg = create_message(&validators, 0, echo(hash0), &alice_kp);\n    let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp);\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_signed(&mut gossip, 1, carol_idx, echo(hash1)));\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n    let timeout = timestamp + sc_c.proposal_timeout();\n    expect_timer(&outcomes, timeout, TIMER_ID_UPDATE);\n\n    // Bob votes false in round 0. That's not a quorum yet.\n    let msg = create_message(&validators, 0, vote(false), &bob_kp);\n    expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp));\n\n    // On timeout, Carol votes to make round 1 skippable.\n    // TODO: Come up with a better test scenario where timestamps are in order.\n    let mut outcomes = sc_c.handle_timer(\n        timestamp + proposal_timeout * 2,\n        timestamp + proposal_timeout * 2,\n        TIMER_ID_UPDATE,\n        &mut rng,\n    );\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_signed(&mut gossip, 1, carol_idx, vote(false)));\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n\n    // But with Alice's vote round 0 becomes skippable. That means rounds 1 and 2 are now accepted\n    // and Carol votes for them. Since round 2 is already committed, both 1 and 2 are finalized.\n    // Since round 2 became current, Carol echoes the proposal, too.\n    let msg = create_message(&validators, 0, vote(false), &alice_kp);\n    let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp);\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_signed(&mut gossip, 2, carol_idx, echo(hash2)));\n    assert!(remove_signed(&mut gossip, 2, carol_idx, vote(true)));\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n    expect_finalized(&outcomes, &[(&proposal1, 0), (&proposal2, 1)]);\n    expect_timer(&outcomes, timestamp + block_time, TIMER_ID_UPDATE);\n\n    timestamp += block_time;\n\n    // In round 3 Carol is the leader, so she creates a new block to propose.\n    let mut outcomes = sc_c.handle_timer(timestamp, timestamp, TIMER_ID_UPDATE, &mut rng);\n    let block_context = remove_create_new_block(&mut outcomes);\n    expect_no_gossip_block_finalized(outcomes);\n    assert_eq!(block_context.timestamp(), timestamp);\n    assert_eq!(block_context.ancestor_values().len(), 2);\n\n    let proposed_block = ProposedBlock::new(new_payload(false), block_context);\n    let mut outcomes = sc_c.propose(proposed_block, timestamp);\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_proposal(&mut gossip, 3, &proposal3));\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n\n    timestamp += block_time;\n\n    // Once Alice echoes Carol's proposal, she can go on to propose in round 4, too.\n    // Since the round height is 3, the 4th proposal does not contain a block.\n    let msg = create_message(&validators, 3, echo(hash3), &alice_kp);\n    let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp);\n    let mut gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(remove_signed(&mut gossip, 3, carol_idx, vote(true)));\n    assert!(remove_proposal(&mut gossip, 4, &proposal4));\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n\n    // Only when Alice also votes for the switch block is it finalized.\n    assert!(!sc_c.finalized_switch_block());\n    let msg = create_message(&validators, 3, vote(true), &alice_kp);\n    let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp);\n    let gossip = remove_gossip(&validators, &mut outcomes);\n    assert!(gossip.is_empty(), \"unexpected gossip: {:?}\", gossip);\n    expect_finalized(&outcomes, &[(&proposal3, 2)]);\n    assert!(sc_c.finalized_switch_block());\n\n    info!(\"restoring protocol now\");\n\n    let mut zug = new_test_zug(weights, vec![], leader_seq);\n    zug.open_wal(dir.path().join(\"wal\"), timestamp);\n    let outcomes = zug.handle_timer(timestamp, timestamp, TIMER_ID_UPDATE, &mut rng);\n    let proposals123 = [(&proposal1, 0), (&proposal2, 1), (&proposal3, 2)];\n    expect_finalized(&outcomes, &proposals123);\n    assert!(zug.finalized_switch_block());\n}\n\n/// Tests that a faulty validator counts towards every quorum.\n///\n/// In this scenario Alice has 60% of the weight, Bob 10% and Carol 30%. Carol is offline and Bob is\n/// faulty. Alice proposes a few blocks but can't finalize them alone. Once Bob double-signs, he\n/// counts towards every quorum and Alice's messages suffice to finalize her blocks.\n#[test]\nfn zug_faults() {\n    let mut rng = crate::new_rng();\n    let (weights, validators) = abc_weights(60, 10, 30);\n    let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap();\n    let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap();\n\n    // The first round leaders are Carol, Alice, Alice.\n    let mut zug = new_test_zug(weights, vec![], &[carol_idx, alice_idx, alice_idx]);\n\n    let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone());\n    let bob_kp = Keypair::from(BOB_SECRET_KEY.clone());\n    let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone());\n\n    let sender = *ALICE_NODE_ID;\n    let mut timestamp = Timestamp::now();\n\n    let proposal1 = Proposal {\n        timestamp,\n        maybe_block: Some(new_payload(true)),\n        maybe_parent_round_id: None,\n        inactive: None,\n    };\n\n    let proposal2 = Proposal {\n        timestamp: timestamp + zug.params.min_block_time(),\n        maybe_block: Some(new_payload(true)),\n        maybe_parent_round_id: Some(1),\n        inactive: Some(iter::once(carol_idx).collect()),\n    };\n\n    timestamp += zug.params.min_block_time();\n\n    // Alice makes sproposals in rounds 1 and 2, echoes and votes for them.\n    let msg = create_proposal_message(1, &proposal1, &validators, &alice_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_message(&validators, 1, vote(true), &alice_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_proposal_message(2, &proposal2, &validators, &alice_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_message(&validators, 2, vote(true), &alice_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n\n    // Since Carol did not make a proposal Alice votes to make round 0 skippable.\n    let msg = create_message(&validators, 0, vote(false), &alice_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n\n    // Carol is offline and Alice alone does not have a quorum.\n    // But if Bob equivocates, he counts towards every quorum, so the blocks get finalized.\n    let msg = create_message(&validators, 3, vote(true), &bob_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_message(&validators, 3, vote(false), &bob_kp);\n    let outcomes = zug.handle_message(&mut rng, sender, msg, timestamp);\n    expect_finalized(&outcomes, &[(&proposal1, 0), (&proposal2, 1)]);\n\n    // Now Carol starts two nodes by mistake, and equivocates. That crosses the FTT.\n    let msg = create_message(&validators, 3, vote(true), &carol_kp);\n    expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp));\n    let msg = create_message(&validators, 3, vote(false), &carol_kp);\n    let outcomes = zug.handle_message(&mut rng, sender, msg, timestamp);\n    assert!(outcomes.contains(&ProtocolOutcome::FttExceeded));\n}\n\n/// Tests that a `SyncRequest` message is periodically sent to a random peer.\n#[test]\nfn zug_sends_sync_request() {\n    let mut rng = crate::new_rng();\n    let (weights, validators) = abc_weights(50, 40, 10);\n    let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap();\n    let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap();\n    let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap();\n\n    // The first round leader is Alice.\n    let mut zug = new_test_zug(weights, vec![], &[alice_idx]);\n\n    let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone());\n    let bob_kp = Keypair::from(BOB_SECRET_KEY.clone());\n    let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone());\n\n    let timeout = zug.config.sync_state_interval.expect(\"request state timer\");\n    let sender = *ALICE_NODE_ID;\n    let mut timestamp = Timestamp::from(100000);\n\n    let proposal0 = Proposal::<ClContext> {\n        timestamp,\n        maybe_block: Some(new_payload(false)),\n        maybe_parent_round_id: None,\n        inactive: None,\n    };\n    let hash0 = proposal0.hash();\n\n    let outcomes = zug.handle_is_current(timestamp);\n    expect_timer(&outcomes, timestamp + timeout, TIMER_ID_SYNC_PEER);\n\n    timestamp += timeout;\n\n    // The protocol state is empty and the SyncRequest should reflect that.\n    let mut outcomes = zug.handle_timer(timestamp, timestamp, TIMER_ID_SYNC_PEER, &mut rng);\n    expect_timer(&outcomes, timestamp + timeout, TIMER_ID_SYNC_PEER);\n    let mut msg_iter = remove_requests_to_random(&mut outcomes).into_iter();\n    match (msg_iter.next(), msg_iter.next()) {\n        (\n            Some(SyncRequest {\n                round_id: 0,\n                proposal_hash: None,\n                has_proposal: false,\n                first_validator_idx: _,\n                echoes: 0,\n                true_votes: 0,\n                false_votes: 0,\n                active: 0,\n                faulty: 0,\n                instance_id: _,\n                sync_id: _,\n            }),\n            None,\n        ) => {}\n        (msg0, msg1) => panic!(\"unexpected messages: {:?}, {:?}\", msg0, msg1),\n    }\n\n    timestamp += timeout;\n\n    // Now we get a proposal and echo from Alice, one false vote from Bob, and Carol double-signs.\n    let msg = create_proposal_message(0, &proposal0, &validators, &alice_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(false), &bob_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(true), &carol_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(false), &carol_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n\n    // The next SyncRequest message must include all the new information.\n    let mut outcomes = zug.handle_timer(timestamp, timestamp, TIMER_ID_SYNC_PEER, &mut rng);\n    expect_timer(&outcomes, timestamp + timeout, TIMER_ID_SYNC_PEER);\n    let mut msg_iter = remove_requests_to_random(&mut outcomes).into_iter();\n    match (msg_iter.next(), msg_iter.next()) {\n        (\n            Some(SyncRequest {\n                round_id: 0,\n                proposal_hash: Some(hash),\n                has_proposal: true,\n                first_validator_idx,\n                echoes,\n                true_votes: 0,\n                false_votes,\n                active,\n                faulty,\n                instance_id: _,\n                sync_id: _,\n            }),\n            None,\n        ) => {\n            assert_eq!(hash0, hash);\n            let mut faulty_iter = zug.iter_validator_bit_field(first_validator_idx, faulty);\n            assert_eq!(Some(carol_idx), faulty_iter.next());\n            assert_eq!(None, faulty_iter.next());\n            let mut echoes_iter = zug.iter_validator_bit_field(first_validator_idx, echoes);\n            assert_eq!(Some(alice_idx), echoes_iter.next());\n            assert_eq!(None, echoes_iter.next());\n            let mut false_iter = zug.iter_validator_bit_field(first_validator_idx, false_votes);\n            assert_eq!(Some(bob_idx), false_iter.next());\n            assert_eq!(None, false_iter.next());\n            // When we marked Carol as faulty we removed her entry from the active list.\n            let expected_active =\n                zug.validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter());\n            assert_eq!(active, expected_active);\n        }\n        (msg0, msg1) => panic!(\"unexpected messages: {:?}, {:?}\", msg0, msg1),\n    }\n}\n\n/// Tests that we respond to a `SyncRequest` message with the missing signatures.\n#[test]\nfn zug_handles_sync_request() {\n    let mut rng = crate::new_rng();\n    let (weights, validators) = abc_weights(50, 40, 10);\n    let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap();\n    let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap();\n    let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap();\n\n    // The first round leader is Alice.\n    let mut zug = new_test_zug(weights.clone(), vec![], &[alice_idx]);\n\n    let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone());\n    let bob_kp = Keypair::from(BOB_SECRET_KEY.clone());\n    let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone());\n\n    let sender = *ALICE_NODE_ID;\n    let timestamp = Timestamp::from(100000);\n\n    let proposal0 = Proposal {\n        timestamp,\n        maybe_block: Some(new_payload(false)),\n        maybe_parent_round_id: None,\n        inactive: None,\n    };\n    let hash0 = proposal0.hash();\n\n    let proposal1 = Proposal::<ClContext> {\n        timestamp,\n        maybe_block: Some(new_payload(true)),\n        maybe_parent_round_id: None,\n        inactive: None,\n    };\n    let hash1 = proposal1.hash();\n\n    // We get a proposal, echo and true vote from Alice, one echo and false vote from Bob, and\n    // Carol double-signs.\n    let msg = create_proposal_message(0, &proposal0, &validators, &alice_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, echo(hash0), &bob_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(false), &bob_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(true), &alice_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(true), &carol_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n    let msg = create_message(&validators, 0, vote(false), &carol_kp);\n    zug.handle_message(&mut rng, sender, msg, timestamp);\n\n    let first_validator_idx = ValidatorIndex(rng.gen_range(0..3));\n    let sync_id = RandomId::new(&mut rng);\n\n    // The sender has everything we have except the proposal itself.\n    let msg = SyncRequest::<ClContext> {\n        round_id: 0,\n        proposal_hash: Some(hash0),\n        has_proposal: false,\n        first_validator_idx,\n        echoes: zug.validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()),\n        true_votes: zug\n            .validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()),\n        false_votes: zug\n            .validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()),\n        active: zug.validator_bit_field(\n            first_validator_idx,\n            vec![alice_idx, bob_idx, carol_idx].into_iter(),\n        ),\n        faulty: zug.validator_bit_field(first_validator_idx, vec![carol_idx].into_iter()),\n        instance_id: *zug.instance_id(),\n        sync_id,\n    };\n    let (outcomes, response) = zug.handle_request_message(\n        &mut rng,\n        sender,\n        SerializedMessage::from_message(&msg),\n        timestamp,\n    );\n    assert_eq!(\n        response\n            .expect(\"response\")\n            .deserialize_expect::<Message<_>>(),\n        Message::SyncResponse(SyncResponse {\n            round_id: 0,\n            proposal_or_hash: Some(Either::Left(proposal0)),\n            echo_sigs: BTreeMap::new(),\n            true_vote_sigs: BTreeMap::new(),\n            false_vote_sigs: BTreeMap::new(),\n            signed_messages: Vec::new(),\n            evidence: Vec::new(),\n            instance_id: *zug.instance_id(),\n            sync_id,\n        })\n    );\n    expect_no_gossip_block_finalized(outcomes);\n\n    // But if there are missing messages, these are sent back.\n    let sync_id = RandomId::new(&mut rng);\n    let msg = SyncRequest::<ClContext> {\n        round_id: 0,\n        proposal_hash: Some(hash1), // Wrong proposal!\n        has_proposal: true,\n        first_validator_idx,\n        echoes: zug.validator_bit_field(first_validator_idx, vec![alice_idx].into_iter()),\n        true_votes: zug\n            .validator_bit_field(first_validator_idx, vec![bob_idx, alice_idx].into_iter()),\n        false_votes: zug.validator_bit_field(first_validator_idx, vec![].into_iter()),\n        active: zug.validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()),\n        faulty: zug.validator_bit_field(first_validator_idx, vec![].into_iter()),\n        instance_id: *zug.instance_id(),\n        sync_id,\n    };\n    let (mut outcomes, response) = zug.handle_request_message(\n        &mut rng,\n        sender,\n        SerializedMessage::from_message(&msg),\n        timestamp,\n    );\n    assert_eq!(\n        remove_targeted_messages(&validators, sender, &mut outcomes),\n        vec![]\n    );\n    expect_no_gossip_block_finalized(outcomes);\n\n    let sync_response = match response.expect(\"response\").deserialize_expect() {\n        Message::SyncResponse(sync_response) => sync_response,\n        result => panic!(\"unexpected message: {:?}\", result),\n    };\n\n    assert_eq!(sync_response.round_id, 0);\n    assert_eq!(sync_response.proposal_or_hash, Some(Either::Right(hash0)));\n    assert_eq!(\n        sync_response.echo_sigs,\n        zug.round(0).unwrap().echoes()[&hash0]\n    );\n    assert_eq!(sync_response.true_vote_sigs, BTreeMap::new());\n    assert_eq!(sync_response.false_vote_sigs.len(), 1);\n    assert_eq!(\n        Some(sync_response.false_vote_sigs[&bob_idx]),\n        zug.round(0).unwrap().votes(false)[bob_idx]\n    );\n    assert_eq!(sync_response.signed_messages, vec![]);\n    assert_eq!(sync_response.evidence.len(), 1);\n    assert_eq!(sync_response.sync_id, sync_id);\n    match (&sync_response.evidence[0], &zug.faults[&carol_idx]) {\n        (\n            (signed_msg, content2, sig2),\n            Fault::Direct(expected_signed_msg, expected_content2, expected_sig2),\n        ) => {\n            assert_eq!(signed_msg, expected_signed_msg);\n            assert_eq!(content2, expected_content2);\n            assert_eq!(sig2, expected_sig2);\n        }\n        (evidence, fault) => panic!(\"unexpected evidence: {:?}, {:?}\", evidence, fault),\n    }\n\n    // Create a new instance that doesn't have any data yet, let it send two sync requests to Zug,\n    // and handle the responses.\n    let mut zug2 = new_test_zug(weights, vec![], &[alice_idx]);\n    for _ in 0..2 {\n        let mut outcomes = zug2.handle_timer(timestamp, timestamp, TIMER_ID_SYNC_PEER, &mut rng);\n        let msg = loop {\n            if let ProtocolOutcome::CreatedRequestToRandomValidator(payload) =\n                outcomes.pop().expect(\"expected request to random peer\")\n            {\n                break payload;\n            }\n        };\n        let (_outcomes, response) = zug.handle_request_message(&mut rng, sender, msg, timestamp);\n        if let Some(msg) = response {\n            let mut _outcomes = zug2.handle_message(&mut rng, sender, msg, timestamp);\n        }\n    }\n\n    // They should be synced up now:\n    assert_eq!(zug.rounds, zug2.rounds);\n    assert_eq!(zug.faults, zug2.faults);\n    assert_eq!(zug.active, zug2.active);\n}\n\n#[test]\nfn test_validator_bit_field() {\n    fn test_roundtrip(zug: &Zug<ClContext>, first: u32, indexes: Vec<u32>, expected: Vec<u32>) {\n        let field = zug.validator_bit_field(\n            ValidatorIndex(first),\n            indexes.iter().map(|i| ValidatorIndex(*i)),\n        );\n        let new_indexes: BTreeSet<u32> = zug\n            .iter_validator_bit_field(ValidatorIndex(first), field)\n            .map(|ValidatorIndex(i)| i)\n            .collect();\n        assert_eq!(expected.into_iter().collect::<BTreeSet<u32>>(), new_indexes);\n    }\n\n    let weights100: Vec<(PublicKey, U512)> = (0u8..100)\n        .map(|i| {\n            let sk = SecretKey::ed25519_from_bytes([i; SecretKey::ED25519_LENGTH]).unwrap();\n            (PublicKey::from(&sk), U512::from(100))\n        })\n        .collect();\n\n    let weights250: Vec<(PublicKey, U512)> = (0u8..250)\n        .map(|i| {\n            let sk = SecretKey::ed25519_from_bytes([i; SecretKey::ED25519_LENGTH]).unwrap();\n            (PublicKey::from(&sk), U512::from(100))\n        })\n        .collect();\n\n    let sc100 = new_test_zug(weights100, vec![], &[]);\n    let sc250 = new_test_zug(weights250, vec![], &[]);\n\n    test_roundtrip(&sc100, 50, vec![], vec![]);\n    test_roundtrip(&sc250, 50, vec![], vec![]);\n    test_roundtrip(&sc250, 200, vec![], vec![]);\n\n    test_roundtrip(&sc100, 50, vec![0, 1, 49, 50, 99], vec![50, 99, 0, 1, 49]);\n    test_roundtrip(&sc250, 50, vec![0, 49, 50, 177, 178, 249], vec![50, 177]);\n    test_roundtrip(\n        &sc250,\n        200,\n        vec![0, 77, 78, 200, 249],\n        vec![200, 249, 0, 77],\n    );\n}\n\n#[test]\nfn test_quorum() {\n    // Alice has almost 2/3 of the weight, Bob almost 1/3, and Carol 1.\n    let weights_without_overflow = (66, 33, 1);\n    // A similar distribution, but the quorum calculation would overflow if it naively added the\n    // total weight to the ftt.\n    let weights_with_overflow = (1 << 63, 1 << 62, 1);\n    for (a, b, c) in [weights_without_overflow, weights_with_overflow] {\n        let (weights, validators) = abc_weights(a, b, c);\n        let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap();\n        let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap();\n        let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap();\n\n        let mut zug = new_test_zug(weights, vec![], &[]);\n\n        // The threshold is the highest number that's below 2/3 of the weight.\n        assert_eq!(a, zug.quorum_threshold().0);\n\n        // Alice alone is not a quorum, but with Carol she is.\n        assert!(!zug.is_quorum(vec![].into_iter()));\n        assert!(!zug.is_quorum(vec![alice_idx].into_iter()));\n        assert!(zug.is_quorum(vec![alice_idx, carol_idx].into_iter()));\n        assert!(zug.is_quorum(vec![alice_idx, bob_idx, carol_idx].into_iter()));\n\n        // If Carol is known to be faulty, she counts towards every quorum.\n        zug.mark_faulty(&CAROL_PUBLIC_KEY);\n\n        // So now Alice's vote alone is sufficient.\n        assert!(!zug.is_quorum(vec![].into_iter()));\n        assert!(zug.is_quorum(vec![alice_idx].into_iter()));\n    }\n}\n\n#[test]\nfn update_proposal_timeout() {\n    macro_rules! assert_approx {\n        ($val0:expr, $val1:expr) => {\n            let v0: f64 = $val0;\n            let v1: f64 = $val1;\n            let diff = (v1 - v0).abs();\n            let min = v1.abs().min(v0.abs());\n            assert!(diff < min * 0.1, \"not approximately equal: {}, {}\", v0, v1);\n        };\n    }\n\n    let mut rng = crate::new_rng();\n\n    let (weights, _validators) = abc_weights(1, 2, 3);\n    let mut zug = new_test_zug(weights, vec![], &[]);\n    let _outcomes = zug.handle_timer(\n        Timestamp::from(100000),\n        Timestamp::from(100000),\n        TIMER_ID_UPDATE,\n        &mut rng,\n    );\n\n    let round_start = zug.current_round_start;\n    let grace_factor = zug.config.proposal_grace_period as f64 / 100.0 + 1.0;\n    let inertia = zug.config.proposal_timeout_inertia;\n    let initial_timeout = zug.config.proposal_timeout.millis() as f64 * grace_factor;\n\n    let timeout = zug.proposal_timeout().millis() as f64;\n\n    assert_approx!(initial_timeout, timeout);\n\n    // Within 2 * inertia blocks the timeout should double and go back down again, if rounds\n    // without proposals come before rounds with fast proposals and the fraction of rounds with\n    // fast proposals is (1 + ftt) / 2, i.e. 2/3.\n    let fail_rounds = (inertia as f64 * 2.0 / 3.0).round() as u16;\n    let success_rounds = 2 * inertia - fail_rounds;\n    for _ in 0..fail_rounds {\n        zug.update_proposal_timeout(round_start + TimeDiff::from_seconds(10000));\n    }\n    assert_approx!(\n        2.0 * initial_timeout,\n        zug.proposal_timeout().millis() as f64\n    );\n    for _ in 0..success_rounds {\n        zug.update_proposal_timeout(round_start + TimeDiff::from_millis(1));\n    }\n    assert_approx!(initial_timeout, zug.proposal_timeout().millis() as f64);\n\n    // If the proposal delay is consistently t, the timeout will settle on t * grace_factor\n    // within 2 * inertia rounds.\n    let min_delay = (zug.proposal_timeout().millis() as f64 / grace_factor) as u64;\n    for _ in 0..10 {\n        let delay = TimeDiff::from_millis(rng.gen_range(min_delay..(min_delay * 2)));\n        for _ in 0..(2 * inertia) {\n            zug.update_proposal_timeout(round_start + delay);\n        }\n        assert_eq!(\n            delay.millis() as f64 * grace_factor,\n            zug.proposal_timeout().millis() as f64\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols/zug.rs",
    "content": "//! # The Zug consensus protocol.\n//!\n//! This protocol requires that at most _f_ out of _n > 3 f_ validators (by weight) are faulty. It\n//! also assumes that there is an upper bound for the network delay: how long a message sent by a\n//! correct validator can take before it is delivered.\n//!\n//! Under these conditions all correct nodes will reach agreement on a chain of _finalized_ blocks.\n//!\n//! A _quorum_ is a set of validators whose total weight is greater than _(n + f) / 2_. Thus any two\n//! quorums always have a correct validator in common. Since _(n + f) / 2 < n - f_, the correct\n//! validators constitute a quorum.\n//!\n//!\n//! ## How it Works\n//!\n//! In every round the designated leader can sign a `Proposal` message to suggest a block. The\n//! proposal also points to an earlier round in which the parent block was proposed.\n//!\n//! Each validator then signs an `Echo` message with the proposal's hash. Correct validators only\n//! sign one `Echo` per round, so at most one proposal can get `Echo`s signed by a quorum. If there\n//! is a quorum and some other conditions are met (see below), the proposal is _accepted_. The next\n//! round's leader can now make a proposal that uses this one as a parent.\n//!\n//! Each validator that observes the proposal to be accepted in time signs a `Vote(true)` message.\n//! If they time out waiting they sign `Vote(false)` instead. If a quorum signs `true`, the round is\n//! _committed_ and the proposal and all its ancestors are finalized. If a quorum signs `false`, the\n//! round is _skippable_: The next round's leader can now make a proposal with a parent from an\n//! earlier round. Correct validators only sign either `true` or `false`, so a round can be either\n//! committed or skippable but not both.\n//!\n//! If there is no accepted proposal all correct validators will eventually vote `false`, so the\n//! round becomes skippable. This is what makes the protocol _live_: The next leader will eventually\n//! be allowed to make a proposal, because either there is an accepted proposal that can be the\n//! parent, or the round will eventually be skippable and an earlier round's proposal can be used as\n//! a parent. If the timeout is long enough correct proposers' blocks will usually get finalized.\n//!\n//! For a proposal to be _accepted_, the parent proposal needs to also be accepted, and all rounds\n//! between the parent and the current round must be skippable. This is what makes the protocol\n//! _safe_: If two rounds are committed, their proposals must be ancestors of each other,\n//! because they are not skippable. Thus no two conflicting blocks can become finalized.\n//!\n//! Of course there is also a first block: Whenever _all_ earlier rounds are skippable (in\n//! particular in the first round) the leader may propose a block with no parent.\n//!\n//!\n//! ## Syncing the State\n//!\n//! Every new signed message is optimistically sent directly to all peers. We want to guarantee that\n//! it is eventually seen by all validators, even if they are not fully connected. This is\n//! achieved via a pull-based randomized gossip mechanism:\n//!\n//! A `SyncRequest` message containing information about a random part of the local protocol state\n//! is periodically sent to a random peer. The peer compares that to its local state, and responds\n//! with all signed messages that it has and the other is missing.\n\npub(crate) mod config;\n#[cfg(test)]\nmod des_testing;\nmod fault;\nmod message;\nmod params;\nmod participation;\nmod proposal;\nmod round;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    any::Any,\n    cmp::Reverse,\n    collections::{btree_map, BTreeMap, HashMap, HashSet},\n    fmt::Debug,\n    iter,\n    path::PathBuf,\n};\n\nuse datasize::DataSize;\nuse either::Either;\nuse itertools::Itertools;\nuse rand::{seq::IteratorRandom, Rng};\nuse tracing::{debug, error, event, info, trace, warn, Level};\n\nuse casper_types::{Chainspec, TimeDiff, Timestamp, U512};\n\nuse crate::{\n    components::consensus::{\n        config::Config,\n        consensus_protocol::{\n            BlockContext, ConsensusProtocol, FinalizedBlock, ProposedBlock, ProtocolOutcome,\n            ProtocolOutcomes, TerminalBlockData,\n        },\n        era_supervisor::SerializedMessage,\n        protocols,\n        traits::{ConsensusValueT, Context},\n        utils::{\n            wal::{ReadWal, WalEntry, WriteWal},\n            ValidatorIndex, ValidatorMap, Validators, Weight,\n        },\n        ActionId, LeaderSequence, TimerId,\n    },\n    types::NodeId,\n    utils, NodeRng,\n};\nuse fault::Fault;\nuse message::{Content, SignedMessage, SyncResponse};\nuse params::Params;\nuse participation::{Participation, ParticipationStatus};\nuse proposal::{HashedProposal, Proposal};\nuse round::Round;\nuse serde::{Deserialize, Serialize};\n\npub(crate) use message::{Message, SyncRequest};\n\n/// The timer for syncing with a random peer.\nconst TIMER_ID_SYNC_PEER: TimerId = TimerId(0);\n/// The timer for calling `update`.\nconst TIMER_ID_UPDATE: TimerId = TimerId(1);\n/// The timer for logging inactive validators.\nconst TIMER_ID_LOG_PARTICIPATION: TimerId = TimerId(2);\n\n/// The maximum number of future rounds we instantiate if we get messages from rounds that we\n/// haven't started yet.\nconst MAX_FUTURE_ROUNDS: u32 = 7200; // Don't drop messages in 2-hour eras with 1-second rounds.\n\n/// Identifies a single [`Round`] in the protocol.\npub(crate) type RoundId = u32;\n\ntype ProposalsAwaitingParent = HashSet<(RoundId, NodeId)>;\ntype ProposalsAwaitingValidation<C> = HashSet<(RoundId, HashedProposal<C>, NodeId)>;\n\n/// An entry in the Write-Ahead Log, storing a message we had added to our protocol state.\n#[derive(Deserialize, Serialize, Debug, PartialEq)]\n#[serde(bound(\n    serialize = \"C::Hash: Serialize\",\n    deserialize = \"C::Hash: Deserialize<'de>\",\n))]\npub(crate) enum ZugWalEntry<C: Context> {\n    /// A signed echo or vote.\n    SignedMessage(SignedMessage<C>),\n    /// A proposal.\n    Proposal(Proposal<C>, RoundId),\n    /// Evidence of a validator double-signing.\n    Evidence(SignedMessage<C>, Content<C>, C::Signature),\n}\n\nimpl<C: Context> WalEntry for ZugWalEntry<C> {}\n\n/// Contains the portion of the state required for an active validator to participate in the\n/// protocol.\n#[derive(DataSize)]\npub(crate) struct ActiveValidator<C>\nwhere\n    C: Context,\n{\n    idx: ValidatorIndex,\n    secret: C::ValidatorSecret,\n}\n\nimpl<C: Context> Debug for ActiveValidator<C> {\n    fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        formatter\n            .debug_struct(\"ActiveValidator\")\n            .field(\"idx\", &self.idx)\n            .field(\"secret\", &\"<REDACTED>\")\n            .finish()\n    }\n}\n\nstruct FaultySender(NodeId);\n\n/// Contains the state required for the protocol.\n#[derive(Debug, DataSize)]\npub(crate) struct Zug<C>\nwhere\n    C: Context,\n{\n    /// Contains numerical parameters for the protocol\n    params: Params<C>,\n    /// The timeout for the current round's proposal, in milliseconds\n    proposal_timeout_millis: f64,\n    /// The validators in this instantiation of the protocol\n    validators: Validators<C::ValidatorId>,\n    /// If we are a validator ourselves, we must know which index we\n    /// are in the [`Validators`] and have a private key for consensus.\n    active_validator: Option<ActiveValidator<C>>,\n    /// When an era has already completed, sometimes we still need to keep\n    /// it around to provide evidence for equivocation in previous eras.\n    evidence_only: bool,\n    /// Proposals which have not yet had their parent accepted, by parent round ID.\n    proposals_waiting_for_parent:\n        HashMap<RoundId, HashMap<HashedProposal<C>, ProposalsAwaitingParent>>,\n    /// Incoming blocks we can't add yet because we are waiting for validation.\n    proposals_waiting_for_validation: HashMap<ProposedBlock<C>, ProposalsAwaitingValidation<C>>,\n    /// If we requested a new block from the block proposer component this contains the proposal's\n    /// round ID and the parent's round ID, if there is a parent.\n    pending_proposal: Option<(BlockContext<C>, RoundId, Option<RoundId>)>,\n    leader_sequence: LeaderSequence,\n    /// The [`Round`]s of this protocol which we've instantiated.\n    rounds: BTreeMap<RoundId, Round<C>>,\n    /// List of faulty validators and their type of fault.\n    faults: HashMap<ValidatorIndex, Fault<C>>,\n    /// The configuration for the protocol\n    config: config::Config,\n    /// This is a signed message for every validator we have received a signature from.\n    active: ValidatorMap<Option<SignedMessage<C>>>,\n    /// The lowest round ID of a block that could still be finalized in the future.\n    first_non_finalized_round_id: RoundId,\n    /// The lowest round that needs to be considered in `upgrade`.\n    maybe_dirty_round_id: Option<RoundId>,\n    /// The lowest non-skippable round without an accepted value.\n    current_round: RoundId,\n    /// The time when the current round started.\n    current_round_start: Timestamp,\n    /// Whether anything was recently added to the protocol state.\n    progress_detected: bool,\n    /// Whether or not the protocol is currently paused\n    paused: bool,\n    /// The next update we have set a timer for. This helps deduplicate redundant calls to\n    /// `update`.\n    next_scheduled_update: Timestamp,\n    /// The write-ahead log to prevent honest nodes from double-signing upon restart.\n    write_wal: Option<WriteWal<ZugWalEntry<C>>>,\n    /// A map of random IDs -> tipmestamp of when it has been created, allowing to\n    /// verify that a response has been asked for.\n    sent_sync_requests: registered_sync::RegisteredSync,\n}\n\nimpl<C: Context + 'static> Zug<C> {\n    fn new_with_params(\n        validators: Validators<C::ValidatorId>,\n        params: Params<C>,\n        config: &config::Config,\n        prev_cp: Option<&dyn ConsensusProtocol<C>>,\n        seed: u64,\n    ) -> Zug<C> {\n        let weights = protocols::common::validator_weights::<C>(&validators);\n        let active: ValidatorMap<_> = weights.iter().map(|_| None).collect();\n\n        // Use the estimate from the previous era as the proposal timeout. Start with one minimum\n        // timeout times the grace period factor: This is what we would settle on if proposals\n        // always got accepted exactly after one minimum timeout.\n        let proposal_timeout_millis = prev_cp\n            .and_then(|cp| cp.as_any().downcast_ref::<Zug<C>>())\n            .map(|zug| zug.proposal_timeout_millis)\n            .unwrap_or_else(|| {\n                config.proposal_timeout.millis() as f64\n                    * (config.proposal_grace_period as f64 / 100.0 + 1.0)\n            });\n\n        let mut can_propose: ValidatorMap<bool> = weights.iter().map(|_| true).collect();\n        for vidx in validators.iter_cannot_propose_idx() {\n            can_propose[vidx] = false;\n        }\n        let faults: HashMap<_, _> = validators\n            .iter_banned_idx()\n            .map(|idx| (idx, Fault::Banned))\n            .collect();\n\n        let leader_sequence = LeaderSequence::new(seed, &weights, can_propose);\n\n        info!(\n            instance_id = %params.instance_id(),\n            era_start_time = %params.start_timestamp(),\n            %proposal_timeout_millis,\n            \"initializing Zug instance\",\n        );\n\n        Zug {\n            leader_sequence,\n            proposals_waiting_for_parent: HashMap::new(),\n            proposals_waiting_for_validation: HashMap::new(),\n            rounds: BTreeMap::new(),\n            first_non_finalized_round_id: 0,\n            maybe_dirty_round_id: None,\n            current_round: 0,\n            current_round_start: Timestamp::MAX,\n            evidence_only: false,\n            faults,\n            active,\n            config: config.clone(),\n            params,\n            proposal_timeout_millis,\n            validators,\n            active_validator: None,\n            pending_proposal: None,\n            progress_detected: false,\n            paused: false,\n            next_scheduled_update: Timestamp::MAX,\n            write_wal: None,\n            sent_sync_requests: Default::default(),\n        }\n    }\n\n    /// Creates a new [`Zug`] instance.\n    #[allow(clippy::too_many_arguments)]\n    fn new(\n        instance_id: C::InstanceId,\n        validator_stakes: BTreeMap<C::ValidatorId, U512>,\n        faulty: &HashSet<C::ValidatorId>,\n        inactive: &HashSet<C::ValidatorId>,\n        chainspec: &Chainspec,\n        config: &Config,\n        prev_cp: Option<&dyn ConsensusProtocol<C>>,\n        era_start_time: Timestamp,\n        seed: u64,\n    ) -> Zug<C> {\n        let validators = protocols::common::validators::<C>(faulty, inactive, validator_stakes);\n        let core_config = &chainspec.core_config;\n\n        let params = Params::new(\n            instance_id,\n            core_config.minimum_block_time,\n            era_start_time,\n            core_config.minimum_era_height,\n            era_start_time.saturating_add(core_config.era_duration),\n            protocols::common::ftt::<C>(core_config.finality_threshold_fraction, &validators),\n        );\n\n        Zug::new_with_params(validators, params, &config.zug, prev_cp, seed)\n    }\n\n    /// Creates a new boxed [`Zug`] instance.\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn new_boxed(\n        instance_id: C::InstanceId,\n        validator_stakes: BTreeMap<C::ValidatorId, U512>,\n        faulty: &HashSet<C::ValidatorId>,\n        inactive: &HashSet<C::ValidatorId>,\n        chainspec: &Chainspec,\n        config: &Config,\n        prev_cp: Option<&dyn ConsensusProtocol<C>>,\n        era_start_time: Timestamp,\n        seed: u64,\n        now: Timestamp,\n        wal_file: PathBuf,\n    ) -> (Box<dyn ConsensusProtocol<C>>, ProtocolOutcomes<C>) {\n        let mut zug = Self::new(\n            instance_id,\n            validator_stakes,\n            faulty,\n            inactive,\n            chainspec,\n            config,\n            prev_cp,\n            era_start_time,\n            seed,\n        );\n\n        let outcomes = zug.open_wal(wal_file, now);\n\n        (Box::new(zug), outcomes)\n    }\n\n    /// Returns our validator index (if we are an active validator).\n    fn our_idx(&self) -> Option<u32> {\n        self.active_validator.as_ref().map(|av| av.idx.0)\n    }\n\n    /// Prints a log statement listing the inactive and faulty validators.\n    fn log_participation(&self) {\n        let mut inactive_w: u64 = 0;\n        let mut faulty_w: u64 = 0;\n        let total_w = self.validators.total_weight().0;\n        let mut inactive_validators = Vec::new();\n        let mut faulty_validators = Vec::new();\n        for (idx, v_id) in self.validators.enumerate_ids() {\n            if let Some(status) = ParticipationStatus::for_index(idx, self) {\n                match status {\n                    ParticipationStatus::Equivocated\n                    | ParticipationStatus::EquivocatedInOtherEra => {\n                        faulty_w = faulty_w.saturating_add(self.validators.weight(idx).0);\n                        faulty_validators.push((idx, v_id.clone(), status));\n                    }\n                    ParticipationStatus::Inactive | ParticipationStatus::LastSeenInRound(_) => {\n                        inactive_w = inactive_w.saturating_add(self.validators.weight(idx).0);\n                        inactive_validators.push((idx, v_id.clone(), status));\n                    }\n                }\n            }\n        }\n        inactive_validators.sort_by_key(|(idx, _, status)| (Reverse(*status), *idx));\n        faulty_validators.sort_by_key(|(idx, _, status)| (Reverse(*status), *idx));\n        let inactive_w_100 = u128::from(inactive_w).saturating_mul(100);\n        let faulty_w_100 = u128::from(faulty_w).saturating_mul(100);\n        let participation = Participation::<C> {\n            instance_id: *self.instance_id(),\n            inactive_stake_percent: utils::div_round(inactive_w_100, u128::from(total_w)) as u8,\n            faulty_stake_percent: utils::div_round(faulty_w_100, u128::from(total_w)) as u8,\n            inactive_validators,\n            faulty_validators,\n        };\n        info!(\n            our_idx = self.our_idx(),\n            ?participation,\n            \"validator participation\"\n        );\n    }\n\n    /// Returns whether the switch block has already been finalized.\n    fn finalized_switch_block(&self) -> bool {\n        if let Some(round_id) = self.first_non_finalized_round_id.checked_sub(1) {\n            self.accepted_switch_block(round_id) || self.accepted_dummy_proposal(round_id)\n        } else {\n            false\n        }\n    }\n\n    /// Returns whether a block was accepted that, if finalized, would be the last one.\n    fn accepted_switch_block(&self, round_id: RoundId) -> bool {\n        match self.round(round_id).and_then(Round::accepted_proposal) {\n            None => false,\n            Some((height, proposal)) => {\n                proposal.maybe_block().is_some() // not a dummy proposal\n                    && height.saturating_add(1) >= self.params.end_height() // reached era height\n                    && proposal.timestamp() >= self.params.end_timestamp() // minimum era duration\n            }\n        }\n    }\n\n    /// Returns whether a proposal without a block was accepted, i.e. whether some ancestor of the\n    /// accepted proposal is a switch block.\n    fn accepted_dummy_proposal(&self, round_id: RoundId) -> bool {\n        match self.round(round_id).and_then(Round::accepted_proposal) {\n            None => false,\n            Some((_, proposal)) => proposal.maybe_block().is_none(),\n        }\n    }\n\n    /// Returns whether the validator has already sent an `Echo` in this round.\n    fn has_echoed(&self, round_id: RoundId, validator_idx: ValidatorIndex) -> bool {\n        self.round(round_id)\n            .is_some_and(|round| round.has_echoed(validator_idx))\n    }\n\n    /// Returns whether the validator has already cast a `true` or `false` vote.\n    fn has_voted(&self, round_id: RoundId, validator_idx: ValidatorIndex) -> bool {\n        self.round(round_id)\n            .is_some_and(|round| round.has_voted(validator_idx))\n    }\n\n    /// Request the latest state from a random peer.\n    fn handle_sync_peer_timer(&mut self, now: Timestamp, rng: &mut NodeRng) -> ProtocolOutcomes<C> {\n        if self.evidence_only || self.finalized_switch_block() {\n            return vec![]; // Era has ended. No further progress is expected.\n        }\n        trace!(\n            our_idx = self.our_idx(),\n            instance_id = ?self.instance_id(),\n            \"syncing with random peer\",\n        );\n        // Inform a peer about our protocol state and schedule the next request.\n        let first_validator_idx = ValidatorIndex(rng.gen_range(0..self.validators.len() as u32));\n        let round_id = (self.first_non_finalized_round_id..=self.current_round)\n            .choose(rng)\n            .unwrap_or(self.current_round);\n        let payload = self.create_sync_request(rng, first_validator_idx, round_id);\n        let mut outcomes = vec![ProtocolOutcome::CreatedRequestToRandomValidator(\n            SerializedMessage::from_message(&payload),\n        )];\n        // Periodically sync the state with a random peer.\n        if let Some(interval) = self.config.sync_state_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.saturating_add(interval),\n                TIMER_ID_SYNC_PEER,\n            ));\n        }\n        outcomes\n    }\n\n    /// Prints a log message if the message is a proposal.\n    fn log_proposal(&self, proposal: &HashedProposal<C>, round_id: RoundId, msg: &str) {\n        let creator_index = self.leader(round_id);\n        let creator = if let Some(creator) = self.validators.id(creator_index) {\n            creator\n        } else {\n            error!(\n                our_idx = self.our_idx(),\n                ?creator_index,\n                ?round_id,\n                \"{}: invalid creator\",\n                msg\n            );\n            return;\n        };\n        info!(\n            our_idx = self.our_idx(),\n            hash = %proposal.hash(),\n            %creator,\n            creator_index = creator_index.0,\n            round_id,\n            timestamp = %proposal.timestamp(),\n            \"{}\", msg,\n        );\n    }\n\n    /// Creates a `SyncRequest` message to inform a peer about our view of the given round, so that\n    /// the peer can send us any data we are missing.\n    ///\n    /// If there are more than 128 validators, the information only covers echoes and votes of\n    /// validators with index in `first_validator_idx..=(first_validator_idx + 127)`.\n    fn create_sync_request(\n        &mut self,\n        rng: &mut NodeRng,\n        first_validator_idx: ValidatorIndex,\n        round_id: RoundId,\n    ) -> SyncRequest<C> {\n        let faulty = self.validator_bit_field(first_validator_idx, self.faults.keys().cloned());\n        let active = self.validator_bit_field(first_validator_idx, self.active.keys_some());\n        let round = match self.round(round_id) {\n            Some(round) => round,\n            None => {\n                return SyncRequest::new_empty_round(\n                    round_id,\n                    first_validator_idx,\n                    faulty,\n                    active,\n                    *self.instance_id(),\n                    self.sent_sync_requests.create_and_register_new_id(rng),\n                );\n            }\n        };\n        let true_votes =\n            self.validator_bit_field(first_validator_idx, round.votes(true).keys_some());\n        let false_votes =\n            self.validator_bit_field(first_validator_idx, round.votes(false).keys_some());\n        // We only request information about the proposal with the most echoes, by weight.\n        // TODO: If there's no quorum, should we prefer the one for which we have the leader's echo?\n        let proposal_hash = round.quorum_echoes().or_else(|| {\n            round\n                .echoes()\n                .iter()\n                .max_by_key(|(_, echo_map)| self.sum_weights(echo_map.keys()))\n                .map(|(hash, _)| *hash)\n        });\n        let has_proposal = round.proposal().map(HashedProposal::hash) == proposal_hash.as_ref();\n        let mut echoes = 0;\n        if let Some(echo_map) = proposal_hash.and_then(|hash| round.echoes().get(&hash)) {\n            echoes = self.validator_bit_field(first_validator_idx, echo_map.keys().cloned());\n        }\n\n        // We create a new ID that the responder will use to show it's allowed to do so:\n        let sync_id = self.sent_sync_requests.create_and_register_new_id(rng);\n\n        SyncRequest {\n            round_id,\n            proposal_hash,\n            has_proposal,\n            first_validator_idx,\n            echoes,\n            true_votes,\n            false_votes,\n            active,\n            faulty,\n            instance_id: *self.instance_id(),\n            sync_id,\n        }\n    }\n\n    /// Returns a bit field where each bit stands for a validator: the least significant one for\n    /// `first_idx` and the most significant one for `fist_idx + 127`, wrapping around at the total\n    /// number of validators. The bits of the validators in `index_iter` that fall into that\n    /// range are set to `1`, the others are `0`.\n    fn validator_bit_field(\n        &self,\n        ValidatorIndex(first_idx): ValidatorIndex,\n        index_iter: impl Iterator<Item = ValidatorIndex>,\n    ) -> u128 {\n        let validator_count = self.validators.len() as u32;\n        if first_idx >= validator_count {\n            return 0;\n        }\n        let mut bit_field: u128 = 0;\n        for ValidatorIndex(v_idx) in index_iter {\n            // The validator's bit is v_idx - first_idx, but we wrap around.\n            let idx = match v_idx.overflowing_sub(first_idx) {\n                (idx, false) => idx,\n                // An underflow occurred. Add validator_count to wrap back around.\n                (idx, true) => idx.wrapping_add(validator_count),\n            };\n            if idx < u128::BITS {\n                bit_field |= 1_u128.wrapping_shl(idx); // Set bit number i to 1.\n            }\n        }\n        bit_field\n    }\n\n    /// Returns an iterator over all validator indexes whose bits in the `bit_field` are `1`, where\n    /// the least significant one stands for `first_idx` and the most significant one for\n    /// `first_idx + 127`, wrapping around.\n    fn iter_validator_bit_field(\n        &self,\n        ValidatorIndex(mut idx): ValidatorIndex,\n        mut bit_field: u128,\n    ) -> impl Iterator<Item = ValidatorIndex> {\n        let validator_count = self.validators.len() as u32;\n        iter::from_fn(move || {\n            if bit_field == 0 || idx >= validator_count {\n                return None; // No remaining bits with value 1.\n            }\n            let zeros = bit_field.trailing_zeros();\n            // The index of the validator whose bit is 1. We shift the bits to the right so that the\n            // least significant bit now corresponds to this one, then we output the index and set\n            // the bit to 0.\n            bit_field = bit_field.wrapping_shr(zeros);\n            bit_field &= !1;\n            idx = match idx.overflowing_add(zeros) {\n                (i, false) => i,\n                // If an overflow occurs, go back via an underflow, so the value modulo\n                // validator_count is correct again.\n                (i, true) => i\n                    .checked_rem(validator_count)?\n                    .wrapping_sub(validator_count),\n            }\n            .checked_rem(validator_count)?;\n            Some(ValidatorIndex(idx))\n        })\n    }\n\n    /// Returns whether `v_idx` is covered by a validator index that starts at `first_idx`.\n    fn validator_bit_field_includes(\n        &self,\n        ValidatorIndex(first_idx): ValidatorIndex,\n        ValidatorIndex(v_idx): ValidatorIndex,\n    ) -> bool {\n        let validator_count = self.validators.len() as u32;\n        if first_idx >= validator_count {\n            return false;\n        }\n        let high_bit = u128::BITS.saturating_sub(1);\n        // The overflow bit is the 33rd bit of the actual sum.\n        let (last_idx, last_idx_overflow) = first_idx.overflowing_add(high_bit);\n        if v_idx >= first_idx {\n            // v_idx is at least first_idx, so it's in the range unless it's higher than the last\n            // index, taking into account its 33rd bit.\n            last_idx_overflow || v_idx <= last_idx\n        } else {\n            // v_idx is less than first_idx. But if going from the first to the last index we wrap\n            // around, we might still arrive at v_idx:\n            let (v_idx2, v_idx2_overflow) = v_idx.overflowing_add(validator_count);\n            if v_idx2_overflow == last_idx_overflow {\n                v_idx2 <= last_idx\n            } else {\n                last_idx_overflow\n            }\n        }\n    }\n\n    /// Returns the leader in the specified round.\n    pub(crate) fn leader(&self, round_id: RoundId) -> ValidatorIndex {\n        if let Some(round) = self.round(round_id) {\n            return round.leader();\n        }\n        self.leader_sequence.leader(u64::from(round_id))\n    }\n\n    fn create_message(\n        &mut self,\n        round_id: RoundId,\n        content: Content<C>,\n    ) -> Option<SignedMessage<C>> {\n        let (validator_idx, secret_key) = if let Some(active_validator) = &self.active_validator {\n            (active_validator.idx, &active_validator.secret)\n        } else {\n            return None;\n        };\n        if self.paused {\n            return None;\n        }\n        let already_signed = match &content {\n            Content::Echo(_) => self.has_echoed(round_id, validator_idx),\n            Content::Vote(_) => self.has_voted(round_id, validator_idx),\n        };\n        if already_signed {\n            return None;\n        }\n        let signed_msg = SignedMessage::sign_new(\n            round_id,\n            *self.instance_id(),\n            content,\n            validator_idx,\n            secret_key,\n        );\n        // We only return the new message if we are able to record it. If that fails we\n        // wouldn't know about our own message after a restart and risk double-signing.\n        if self.record_entry(&ZugWalEntry::SignedMessage(signed_msg.clone()))\n            && self.add_content(signed_msg.clone())\n        {\n            Some(signed_msg)\n        } else {\n            debug!(\n                our_idx = self.our_idx(),\n                %round_id,\n                ?content,\n                \"couldn't record a signed message in the WAL or add it to the protocol state\"\n            );\n            None\n        }\n    }\n\n    /// If we are an active validator and it would be safe for us to sign this message and we\n    /// haven't signed it before, we sign it, add it to our state and gossip it to the network.\n    ///\n    /// Does not call `update`!\n    fn create_and_gossip_message(\n        &mut self,\n        round_id: RoundId,\n        content: Content<C>,\n    ) -> ProtocolOutcomes<C> {\n        let maybe_signed_msg = self.create_message(round_id, content);\n        maybe_signed_msg\n            .into_iter()\n            .map(|signed_msg| {\n                let message = Message::Signed(signed_msg);\n                ProtocolOutcome::CreatedGossipMessage(SerializedMessage::from_message(&message))\n            })\n            .collect()\n    }\n\n    /// When we receive evidence for a fault, we must notify the rest of the network of this\n    /// evidence. Beyond that, we can remove all of the faulty validator's previous information\n    /// from the protocol state.\n    fn handle_fault(\n        &mut self,\n        signed_msg: SignedMessage<C>,\n        validator_id: C::ValidatorId,\n        content2: Content<C>,\n        signature2: C::Signature,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        self.record_entry(&ZugWalEntry::Evidence(\n            signed_msg.clone(),\n            content2,\n            signature2,\n        ));\n        self.handle_fault_no_wal(signed_msg, validator_id, content2, signature2, now)\n    }\n\n    /// Internal to handle_fault, documentation from that applies\n    fn handle_fault_no_wal(\n        &mut self,\n        signed_msg: SignedMessage<C>,\n        validator_id: C::ValidatorId,\n        content2: Content<C>,\n        signature2: C::Signature,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        let validator_idx = signed_msg.validator_idx;\n        warn!(\n            our_idx = self.our_idx(),\n            ?signed_msg,\n            ?content2,\n            id = %validator_id,\n            \"validator double-signed\"\n        );\n        let fault = Fault::Direct(signed_msg, content2, signature2);\n        self.faults.insert(validator_idx, fault);\n        if Some(validator_idx) == self.active_validator.as_ref().map(|av| av.idx) {\n            error!(our_idx = validator_idx.0, \"we are faulty; deactivating\");\n            self.active_validator = None;\n        }\n        self.active[validator_idx] = None;\n        self.progress_detected = true;\n        let mut outcomes = vec![ProtocolOutcome::NewEvidence(validator_id)];\n        if self.faulty_weight() > self.params.ftt() {\n            outcomes.push(ProtocolOutcome::FttExceeded);\n            return outcomes;\n        }\n\n        // Remove all votes and echoes from the faulty validator: They count towards every quorum\n        // now so nobody has to store their messages.\n        for round in self.rounds.values_mut() {\n            round.remove_votes_and_echoes(validator_idx);\n        }\n\n        // Recompute quorums; if any new quorums are found, call `update`.\n        for round_id in\n            self.first_non_finalized_round_id..=self.rounds.keys().last().copied().unwrap_or(0)\n        {\n            if !self.rounds.contains_key(&round_id) {\n                continue;\n            }\n            if self.rounds[&round_id].quorum_echoes().is_none() {\n                let hashes = self.rounds[&round_id]\n                    .echoes()\n                    .keys()\n                    .copied()\n                    .collect_vec();\n                if hashes\n                    .into_iter()\n                    .any(|hash| self.check_new_echo_quorum(round_id, hash))\n                {\n                    self.mark_dirty(round_id);\n                }\n            }\n            if self.check_new_vote_quorum(round_id, true)\n                || self.check_new_vote_quorum(round_id, false)\n            {\n                self.mark_dirty(round_id);\n            }\n        }\n        debug!(round_id = ?self.current_round, \"Calling update after handle_fault_no_wal\");\n        outcomes.extend(self.update(now));\n        outcomes\n    }\n\n    /// When we receive a request to synchronize, we must take a careful diff of our state and the\n    /// state in the sync state to ensure we send them exactly what they need to get back up to\n    /// speed in the network.\n    fn handle_sync_request(\n        &self,\n        sync_request: SyncRequest<C>,\n        sender: NodeId,\n    ) -> (ProtocolOutcomes<C>, Option<SerializedMessage>) {\n        let SyncRequest {\n            round_id,\n            mut proposal_hash,\n            mut has_proposal,\n            first_validator_idx,\n            mut echoes,\n            true_votes,\n            false_votes,\n            active,\n            faulty,\n            instance_id,\n            sync_id,\n        } = sync_request;\n        if first_validator_idx.0 >= self.validators.len() as u32 {\n            info!(\n                our_idx = self.our_idx(),\n                first_validator_idx = first_validator_idx.0,\n                %sender,\n                \"invalid SyncRequest message\"\n            );\n            return (vec![ProtocolOutcome::Disconnect(sender)], None);\n        }\n\n        // If we don't have that round we have no information the requester is missing.\n        let round = match self.round(round_id) {\n            Some(round) => round,\n            None => return (vec![], None),\n        };\n\n        // If the peer has no or a wrong proposal we assume they don't have any echoes for the\n        // correct one. We don't send them the right proposal, though: they might already have it.\n        if round.quorum_echoes() != proposal_hash && round.quorum_echoes().is_some() {\n            has_proposal = true;\n            echoes = 0;\n            proposal_hash = round.quorum_echoes();\n        }\n\n        // The bit field of validators we know to be faulty.\n        let our_faulty = self.validator_bit_field(first_validator_idx, self.faults.keys().cloned());\n        // The echo signatures and proposal/hash we will send in the response.\n        let mut proposal_or_hash = None;\n        let mut echo_sigs = BTreeMap::new();\n        // The bit field of validators we have echoes from in this round.\n        let mut our_echoes: u128 = 0;\n\n        if let Some(hash) = proposal_hash {\n            if let Some(echo_map) = round.echoes().get(&hash) {\n                // Send them echoes they are missing, but exclude faulty validators.\n                our_echoes =\n                    self.validator_bit_field(first_validator_idx, echo_map.keys().cloned());\n                let missing_echoes = our_echoes & !(echoes | faulty | our_faulty);\n                for v_idx in self.iter_validator_bit_field(first_validator_idx, missing_echoes) {\n                    echo_sigs.insert(v_idx, echo_map[&v_idx]);\n                }\n                if has_proposal {\n                    proposal_or_hash = Some(Either::Right(hash));\n                } else {\n                    // If they don't have the proposal make sure we include the leader's echo.\n                    let leader_idx = round.leader();\n                    if !self.validator_bit_field_includes(first_validator_idx, leader_idx) {\n                        if let Some(signature) = echo_map.get(&leader_idx) {\n                            echo_sigs.insert(leader_idx, *signature);\n                        }\n                    }\n                    if let Some(proposal) = round.proposal() {\n                        if *proposal.hash() == hash {\n                            proposal_or_hash = Some(Either::Left(proposal.inner().clone()));\n                        }\n                    }\n                }\n            }\n        }\n\n        // Send them votes they are missing, but exclude faulty validators. If there already is a\n        // quorum omit the votes that go against the quorum, since they are irrelevant.\n        let our_true_votes: u128 = if round.quorum_votes() == Some(false) {\n            0\n        } else {\n            self.validator_bit_field(first_validator_idx, round.votes(true).keys_some())\n        };\n        let missing_true_votes = our_true_votes & !(true_votes | faulty | our_faulty);\n        let true_vote_sigs = self\n            .iter_validator_bit_field(first_validator_idx, missing_true_votes)\n            .map(|v_idx| (v_idx, round.votes(true)[v_idx].unwrap()))\n            .collect();\n        let our_false_votes: u128 = if round.quorum_votes() == Some(true) {\n            0\n        } else {\n            self.validator_bit_field(first_validator_idx, round.votes(false).keys_some())\n        };\n        let missing_false_votes = our_false_votes & !(false_votes | faulty | our_faulty);\n        let false_vote_sigs = self\n            .iter_validator_bit_field(first_validator_idx, missing_false_votes)\n            .map(|v_idx| (v_idx, round.votes(false)[v_idx].unwrap()))\n            .collect();\n\n        let mut outcomes = vec![];\n\n        // Add evidence for validators they don't know are faulty.\n        let missing_faulty = our_faulty & !faulty;\n        let mut evidence = vec![];\n        for v_idx in self.iter_validator_bit_field(first_validator_idx, missing_faulty) {\n            match &self.faults[&v_idx] {\n                Fault::Banned => {\n                    info!(\n                        our_idx = self.our_idx(),\n                        validator_index = v_idx.0,\n                        %sender,\n                        \"peer disagrees about banned validator; disconnecting\"\n                    );\n                    return (vec![ProtocolOutcome::Disconnect(sender)], None);\n                }\n                Fault::Direct(signed_msg, content2, signature2) => {\n                    evidence.push((signed_msg.clone(), *content2, *signature2));\n                }\n                Fault::Indirect => {\n                    let vid = self.validators.id(v_idx).unwrap().clone();\n                    outcomes.push(ProtocolOutcome::SendEvidence(sender, vid));\n                }\n            }\n        }\n\n        // Send any signed messages that prove a validator is not completely inactive. We only\n        // need to do this for validators that the requester doesn't know are active, and that\n        // we haven't already included any signature from in our votes, echoes or evidence.\n        let our_active = self.validator_bit_field(first_validator_idx, self.active.keys_some());\n        let missing_active =\n            our_active & !(active | our_echoes | our_true_votes | our_false_votes | our_faulty);\n        let signed_messages = self\n            .iter_validator_bit_field(first_validator_idx, missing_active)\n            .filter_map(|v_idx| self.active[v_idx].clone())\n            .collect();\n\n        // Send the serialized sync response to the requester\n        let sync_response = SyncResponse {\n            round_id,\n            proposal_or_hash,\n            echo_sigs,\n            true_vote_sigs,\n            false_vote_sigs,\n            signed_messages,\n            evidence,\n            instance_id,\n            sync_id,\n        };\n        (\n            outcomes,\n            Some(SerializedMessage::from_message(&Message::SyncResponse(\n                sync_response,\n            ))),\n        )\n    }\n\n    /// The response containing the parts from the sender's protocol state that we were missing.\n    fn handle_sync_response(\n        &mut self,\n        sync_response: SyncResponse<C>,\n        sender: NodeId,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        let SyncResponse {\n            round_id,\n            proposal_or_hash,\n            echo_sigs,\n            true_vote_sigs,\n            false_vote_sigs,\n            signed_messages,\n            evidence,\n            instance_id,\n            sync_id,\n        } = sync_response;\n\n        // We have not asked for any sync response:\n        if self.sent_sync_requests.try_remove_id(sync_id).is_none() {\n            debug!(\n                ?round_id,\n                ?sync_id,\n                \"Disconnecting from peer due to unwanted sync response\"\n            );\n            return vec![ProtocolOutcome::Disconnect(sender)];\n        }\n\n        // `echo_sigs`, `true_vote_sigs` and `false_vote_sigs` ought not to have more items than the\n        // amount of validators. In such a case, the sender is malicious.\n        if echo_sigs\n            .len()\n            .max(true_vote_sigs.len())\n            .max(false_vote_sigs.len())\n            > self.validators.len()\n        {\n            debug!(\n                ?round_id,\n                ?sync_id,\n                \"Disconnecting from peer due to mismatching echos number\"\n            );\n            return vec![ProtocolOutcome::Disconnect(sender)];\n        }\n\n        let local_round_id = self.current_round;\n        let (proposal_hash, proposal) = match proposal_or_hash {\n            Some(Either::Left(proposal)) => {\n                let hashed_prop = HashedProposal::new(proposal);\n                let hash = hashed_prop.hash();\n                debug!(?hash, ?round_id, ?local_round_id, \"Got proposal from peer\");\n                (Some(*hash), Some(hashed_prop.into_inner()))\n            }\n            Some(Either::Right(hash)) => {\n                debug!(\n                    ?hash,\n                    ?round_id,\n                    ?local_round_id,\n                    \"Got proposal hash from peer\"\n                );\n                (Some(hash), None)\n            }\n            None => {\n                debug!(\n                    ?round_id,\n                    ?local_round_id,\n                    \"Got no proposal or hash from peer\"\n                );\n                (None, None)\n            }\n        };\n\n        // `signed_messages` is now the previous `signed_messages` + all the messages from\n        // `echo_sigs`, `true_vote_sigs` and `false_vote_sigs`:\n        let signed_messages = {\n            let echo_sigs = proposal_hash\n                .map(move |hash| {\n                    echo_sigs\n                        .into_iter()\n                        .map(move |(validator_idx, signature)| {\n                            (validator_idx, Content::Echo(hash), signature)\n                        })\n                })\n                .into_iter()\n                .flatten();\n            let true_vote_sigs = true_vote_sigs\n                .into_iter()\n                .map(|(validator_idx, signature)| (validator_idx, Content::Vote(true), signature));\n            let false_vote_sigs = false_vote_sigs\n                .into_iter()\n                .map(|(validator_idx, signature)| (validator_idx, Content::Vote(false), signature));\n\n            let sigs = echo_sigs.chain(true_vote_sigs).chain(false_vote_sigs).map(\n                |(validator_idx, content, signature)| SignedMessage {\n                    round_id,\n                    instance_id,\n                    content,\n                    validator_idx,\n                    signature,\n                },\n            );\n\n            signed_messages.into_iter().chain(sigs)\n        };\n\n        let handle_outcomes = move || -> Result<_, FaultySender> {\n            let mut outcomes = vec![];\n            for signed_msg in signed_messages {\n                outcomes.extend(self.handle_signed_message(signed_msg, sender, now)?);\n            }\n            for (signed_msg, content2, signature2) in evidence {\n                outcomes\n                    .extend(self.handle_evidence(signed_msg, content2, signature2, sender, now)?);\n            }\n            if let Some(proposal) = proposal {\n                outcomes.extend(self.handle_proposal(round_id, proposal, sender, now)?);\n            }\n            Ok(outcomes)\n        };\n\n        outcomes_or_disconnect(handle_outcomes())\n    }\n\n    /// The main entry point for signed echoes or votes. This function mostly authenticates\n    /// and authorizes the message, passing it to [`add_content`] if it passes snuff for the\n    /// main protocol logic.\n    fn handle_signed_message(\n        &mut self,\n        signed_msg: SignedMessage<C>,\n        sender: NodeId,\n        now: Timestamp,\n    ) -> Result<ProtocolOutcomes<C>, FaultySender> {\n        let our_idx = self.our_idx();\n        let validator_idx = signed_msg.validator_idx;\n        let validator_id = if let Some(validator_id) = self.validators.id(validator_idx) {\n            validator_id.clone()\n        } else {\n            warn!(\n                our_idx,\n                ?signed_msg,\n                %sender,\n                \"invalid incoming message: validator index out of range\",\n            );\n            return Err(FaultySender(sender));\n        };\n\n        if self.faults.contains_key(&validator_idx) {\n            debug!(\n                our_idx,\n                ?validator_id,\n                \"ignoring message from faulty validator\"\n            );\n            return Ok(vec![]);\n        }\n\n        if signed_msg.round_id > self.current_round.saturating_add(MAX_FUTURE_ROUNDS) {\n            debug!(our_idx, ?signed_msg, \"dropping message from future round\");\n            return Ok(vec![]);\n        }\n\n        if self.evidence_only {\n            debug!(our_idx, ?signed_msg, \"received an irrelevant message\");\n            return Ok(vec![]);\n        }\n\n        if let Some(round) = self.round(signed_msg.round_id) {\n            if round.contains(&signed_msg.content, validator_idx) {\n                debug!(our_idx, ?signed_msg, %sender, \"received a duplicated message\");\n                return Ok(vec![]);\n            }\n        }\n\n        if !signed_msg.verify_signature(&validator_id) {\n            warn!(our_idx, ?signed_msg, %sender, \"invalid signature\",);\n            return Err(FaultySender(sender));\n        }\n\n        if let Some((content2, signature2)) = self.detect_fault(&signed_msg) {\n            let evidence_msg = Message::Evidence(signed_msg.clone(), content2, signature2);\n            let mut outcomes =\n                self.handle_fault(signed_msg, validator_id, content2, signature2, now);\n            outcomes.push(ProtocolOutcome::CreatedGossipMessage(\n                SerializedMessage::from_message(&evidence_msg),\n            ));\n            return Ok(outcomes);\n        }\n\n        if self.faults.contains_key(&signed_msg.validator_idx) {\n            debug!(\n                our_idx,\n                ?signed_msg,\n                \"dropping message from faulty validator\"\n            );\n            return Ok(vec![]);\n        }\n\n        self.record_entry(&ZugWalEntry::SignedMessage(signed_msg.clone()));\n        if self.add_content(signed_msg) {\n            debug!(round_id = ?self.current_round, \"Calling update after add_content\");\n            Ok(self.update(now))\n        } else {\n            Ok(vec![])\n        }\n    }\n\n    /// Verifies an evidence message that is supposed to contain two conflicting sigantures by the\n    /// same validator, and then calls `handle_fault`.\n    fn handle_evidence(\n        &mut self,\n        signed_msg: SignedMessage<C>,\n        content2: Content<C>,\n        signature2: C::Signature,\n        sender: NodeId,\n        now: Timestamp,\n    ) -> Result<ProtocolOutcomes<C>, FaultySender> {\n        let our_idx = self.our_idx();\n        let validator_idx = signed_msg.validator_idx;\n        if let Some(Fault::Direct(..)) = self.faults.get(&validator_idx) {\n            return Ok(vec![]); // Validator is already known to be faulty.\n        }\n        let validator_id = if let Some(validator_id) = self.validators.id(validator_idx) {\n            validator_id.clone()\n        } else {\n            warn!(\n                our_idx,\n                ?signed_msg,\n                %sender,\n                \"invalid incoming evidence: validator index out of range\",\n            );\n            return Err(FaultySender(sender));\n        };\n        if !signed_msg.content.contradicts(&content2) {\n            warn!(\n                our_idx,\n                ?signed_msg,\n                ?content2,\n                %sender,\n                \"invalid evidence: contents don't conflict\",\n            );\n            return Err(FaultySender(sender));\n        }\n        if !signed_msg.verify_signature(&validator_id)\n            || !signed_msg\n                .with(content2, signature2)\n                .verify_signature(&validator_id)\n        {\n            warn!(\n                our_idx,\n                ?signed_msg,\n                ?content2,\n                %sender,\n                \"invalid signature in evidence\",\n            );\n            return Err(FaultySender(sender));\n        }\n        Ok(self.handle_fault(signed_msg, validator_id, content2, signature2, now))\n    }\n\n    /// Checks whether an incoming proposal should be added to the protocol state and starts\n    /// validation.\n    fn handle_proposal(\n        &mut self,\n        round_id: RoundId,\n        proposal: Proposal<C>,\n        sender: NodeId,\n        now: Timestamp,\n    ) -> Result<ProtocolOutcomes<C>, FaultySender> {\n        let leader_idx = self.leader(round_id);\n        let our_idx = self.our_idx();\n\n        macro_rules! log_proposal {\n            ($lvl:expr, $prop:expr, $msg:expr $(,)?) => {\n                event!(\n                    $lvl,\n                    our_idx,\n                    round_id,\n                    parent = $prop.maybe_parent_round_id,\n                    timestamp = %$prop.timestamp,\n                    leader_idx = leader_idx.0,\n                    ?sender,\n                    \"{}\",\n                    $msg\n                );\n            }\n        }\n\n        if let Some(parent_round_id) = proposal.maybe_parent_round_id {\n            if parent_round_id >= round_id {\n                log_proposal!(\n                    Level::WARN,\n                    proposal,\n                    \"invalid proposal: parent is not from an earlier round\",\n                );\n                return Err(FaultySender(sender));\n            }\n        }\n\n        if proposal.timestamp > now.saturating_add(self.config.clock_tolerance) {\n            log_proposal!(\n                Level::TRACE,\n                proposal,\n                \"received a proposal with a timestamp far in the future; dropping\",\n            );\n            return Ok(vec![]);\n        }\n        if proposal.timestamp > now {\n            log_proposal!(\n                Level::TRACE,\n                proposal,\n                \"received a proposal with a timestamp slightly in the future\",\n            );\n        }\n        if (proposal.maybe_parent_round_id.is_none() || proposal.maybe_block.is_none())\n            != proposal.inactive.is_none()\n        {\n            log_proposal!(\n                Level::WARN,\n                proposal,\n                \"invalid proposal: inactive must be present in all except the first and dummy proposals\",\n            );\n            return Err(FaultySender(sender));\n        }\n        if let Some(inactive) = &proposal.inactive {\n            if inactive\n                .iter()\n                .any(|idx| *idx == leader_idx || self.validators.id(*idx).is_none())\n            {\n                log_proposal!(\n                    Level::WARN,\n                    proposal,\n                    \"invalid proposal: invalid inactive validator index\",\n                );\n                return Err(FaultySender(sender));\n            }\n        }\n\n        let hashed_prop = HashedProposal::new(proposal);\n\n        if self\n            .round(round_id)\n            .is_none_or(|round| !round.has_echoes_for_proposal(hashed_prop.hash()))\n        {\n            log_proposal!(\n                Level::DEBUG,\n                hashed_prop.inner(),\n                \"dropping proposal: missing echoes\"\n            );\n            return Ok(vec![]);\n        }\n\n        if self.round(round_id).and_then(Round::proposal) == Some(&hashed_prop) {\n            log_proposal!(\n                Level::DEBUG,\n                hashed_prop.inner(),\n                \"dropping proposal: we already have it\"\n            );\n            return Ok(vec![]);\n        }\n\n        let ancestor_values = if let Some(parent_round_id) = hashed_prop.maybe_parent_round_id() {\n            if let Some(ancestor_values) = self.ancestor_values(parent_round_id) {\n                ancestor_values\n            } else {\n                log_proposal!(\n                    Level::DEBUG,\n                    hashed_prop.inner(),\n                    \"storing proposal for later; still missing ancestors\",\n                );\n                self.proposals_waiting_for_parent\n                    .entry(parent_round_id)\n                    .or_default()\n                    .entry(hashed_prop)\n                    .or_default()\n                    .insert((round_id, sender));\n                return Ok(vec![]);\n            }\n        } else {\n            vec![]\n        };\n\n        let mut outcomes = self.validate_proposal(round_id, hashed_prop, ancestor_values, sender);\n        debug!(round_id = ?self.current_round, \"Calling update after handle_proposal\");\n        outcomes.extend(self.update(now));\n        Ok(outcomes)\n    }\n\n    /// Updates the round's outcome and returns `true` if there is a new quorum of echoes for the\n    /// given hash.\n    fn check_new_echo_quorum(&mut self, round_id: RoundId, hash: C::Hash) -> bool {\n        if self.rounds.contains_key(&round_id)\n            && self.rounds[&round_id].quorum_echoes().is_none()\n            && self.is_quorum(self.rounds[&round_id].echoes()[&hash].keys().copied())\n        {\n            self.round_mut(round_id).set_quorum_echoes(hash);\n            return true;\n        }\n        false\n    }\n\n    /// Updates the round's outcome and returns `true` if there is a new quorum of votes with the\n    /// given value.\n    fn check_new_vote_quorum(&mut self, round_id: RoundId, vote: bool) -> bool {\n        if self.rounds.contains_key(&round_id)\n            && self.rounds[&round_id].quorum_votes().is_none()\n            && self.is_quorum(self.rounds[&round_id].votes(vote).keys_some())\n        {\n            self.round_mut(round_id).set_quorum_votes(vote);\n            let our_idx = self.our_idx();\n            if !vote {\n                info!(our_idx, %round_id, \"round is now skippable\");\n            } else if self.rounds[&round_id].accepted_proposal().is_none() {\n                info!(our_idx, %round_id, \"round committed; no accepted proposal yet\");\n            }\n            return true;\n        }\n        false\n    }\n\n    /// Adds a signed message to the WAL such that we can avoid double signing upon recovery if the\n    /// node shuts down. Returns `true` if the message was added successfully.\n    fn record_entry(&mut self, entry: &ZugWalEntry<C>) -> bool {\n        match self.write_wal.as_mut().map(|ww| ww.record_entry(entry)) {\n            None => false,\n            Some(Ok(())) => true,\n            Some(Err(err)) => {\n                self.active_validator = None;\n                self.write_wal = None;\n                error!(\n                    our_idx = self.our_idx(),\n                    %err,\n                    \"could not record a signed message to the WAL; deactivating\"\n                );\n                false\n            }\n        }\n    }\n\n    /// Consumes all of the signed messages we've previously recorded in our write ahead log, and\n    /// sets up the log for appending future messages. If it fails it prints an error log and\n    /// the WAL remains `None`: That way we can still observe the protocol but not participate as\n    /// a validator.\n    pub(crate) fn open_wal(&mut self, wal_file: PathBuf, now: Timestamp) -> ProtocolOutcomes<C> {\n        let our_idx = self.our_idx();\n        // Open the file for reading.\n        let mut read_wal = match ReadWal::<ZugWalEntry<C>>::new(&wal_file) {\n            Ok(read_wal) => read_wal,\n            Err(err) => {\n                error!(our_idx, %err, \"could not create a ReadWal using this file\");\n                return vec![];\n            }\n        };\n\n        let mut outcomes = vec![];\n\n        // Read all messages recorded in the file.\n        loop {\n            match read_wal.read_next_entry() {\n                Ok(Some(next_entry)) => match next_entry {\n                    ZugWalEntry::SignedMessage(next_message) => {\n                        if !self.add_content(next_message) {\n                            error!(our_idx, \"Could not add content from WAL.\");\n                            return outcomes;\n                        }\n                    }\n                    ZugWalEntry::Proposal(next_proposal, corresponding_round_id) => {\n                        if self\n                            .round(corresponding_round_id)\n                            .and_then(Round::proposal)\n                            .map(HashedProposal::inner)\n                            == Some(&next_proposal)\n                        {\n                            warn!(our_idx, \"Proposal from WAL is duplicated.\");\n                            continue;\n                        }\n                        let mut ancestor_values = vec![];\n                        if let Some(mut round_id) = next_proposal.maybe_parent_round_id {\n                            loop {\n                                let proposal = if let Some(proposal) =\n                                    self.round(round_id).and_then(Round::proposal)\n                                {\n                                    proposal\n                                } else {\n                                    error!(our_idx, \"Proposal from WAL is missing ancestors.\");\n                                    return outcomes;\n                                };\n                                if self.round(round_id).and_then(Round::quorum_echoes)\n                                    != Some(*proposal.hash())\n                                {\n                                    error!(our_idx, \"Proposal from WAL has unaccepted ancestor.\");\n                                    return outcomes;\n                                }\n                                ancestor_values.extend(proposal.maybe_block().cloned());\n                                match proposal.maybe_parent_round_id() {\n                                    None => break,\n                                    Some(parent_round_id) => round_id = parent_round_id,\n                                }\n                            }\n                        }\n                        if self\n                            .round_mut(corresponding_round_id)\n                            .insert_proposal(HashedProposal::new(next_proposal.clone()))\n                        {\n                            self.mark_dirty(corresponding_round_id);\n                            if let Some(block) = next_proposal.maybe_block {\n                                let block_context =\n                                    BlockContext::new(next_proposal.timestamp, ancestor_values);\n                                let proposed_block = ProposedBlock::new(block, block_context);\n                                outcomes\n                                    .push(ProtocolOutcome::HandledProposedBlock(proposed_block));\n                            }\n                        }\n                    }\n                    ZugWalEntry::Evidence(\n                        conflicting_message,\n                        conflicting_message_content,\n                        conflicting_signature,\n                    ) => {\n                        let validator_id = {\n                            if let Some(validator_id) =\n                                self.validators.id(conflicting_message.validator_idx)\n                            {\n                                validator_id.clone()\n                            } else {\n                                warn!(\n                                    our_idx,\n                                    index = conflicting_message.validator_idx.0,\n                                    \"No validator present at this index, despite holding \\\n                                    conflicting messages for it in the WAL\"\n                                );\n                                continue;\n                            }\n                        };\n                        let new_outcomes = self.handle_fault_no_wal(\n                            conflicting_message,\n                            validator_id,\n                            conflicting_message_content,\n                            conflicting_signature,\n                            now,\n                        );\n                        // Ignore most outcomes: These have been processed before the restart.\n                        outcomes.extend(new_outcomes.into_iter().filter(|outcome| match outcome {\n                            ProtocolOutcome::FttExceeded\n                            | ProtocolOutcome::WeAreFaulty\n                            | ProtocolOutcome::FinalizedBlock(_)\n                            | ProtocolOutcome::ValidateConsensusValue { .. }\n                            | ProtocolOutcome::HandledProposedBlock(..)\n                            | ProtocolOutcome::NewEvidence(_) => true,\n                            ProtocolOutcome::SendEvidence(_, _)\n                            | ProtocolOutcome::CreatedGossipMessage(_)\n                            | ProtocolOutcome::CreatedTargetedMessage(_, _)\n                            | ProtocolOutcome::CreatedMessageToRandomPeer(_)\n                            | ProtocolOutcome::CreatedRequestToRandomValidator(_)\n                            | ProtocolOutcome::ScheduleTimer(_, _)\n                            | ProtocolOutcome::QueueAction(_)\n                            | ProtocolOutcome::CreateNewBlock(_, _)\n                            | ProtocolOutcome::DoppelgangerDetected\n                            | ProtocolOutcome::Disconnect(_) => false,\n                        }));\n                    }\n                },\n                Ok(None) => {\n                    break;\n                }\n                Err(err) => {\n                    error!(\n                        our_idx,\n                        ?err,\n                        \"couldn't read a message from the WAL: was this node recently shut down?\"\n                    );\n                    return outcomes; // Not setting WAL file; won't actively participate.\n                }\n            }\n        }\n\n        // Open the file for appending.\n        match WriteWal::new(&wal_file) {\n            Ok(write_wal) => self.write_wal = Some(write_wal),\n            Err(err) => error!(\n                our_idx,\n                ?err,\n                ?wal_file,\n                \"could not create a WAL using this file\"\n            ),\n        }\n        outcomes\n    }\n\n    /// Adds a signed message content to the state.\n    /// Does not call `update` and does not detect faults.\n    fn add_content(&mut self, signed_msg: SignedMessage<C>) -> bool {\n        if self.active[signed_msg.validator_idx]\n            .as_ref()\n            .is_none_or(|old_msg| old_msg.round_id < signed_msg.round_id)\n        {\n            if self.active[signed_msg.validator_idx].is_none() {\n                // We considered this validator inactive until now, and didn't accept proposals that\n                // didn't have them in the `inactive` field. Mark all relevant rounds as dirty so\n                // that the next `update` call checks all proposals again.\n                self.mark_dirty(self.first_non_finalized_round_id);\n            }\n            // Save the latest signed message for participation tracking purposes.\n            self.active[signed_msg.validator_idx] = Some(signed_msg.clone());\n        }\n        let SignedMessage {\n            round_id,\n            instance_id: _,\n            content,\n            validator_idx,\n            signature,\n        } = signed_msg;\n        let our_idx = self.our_idx();\n        match content {\n            Content::Echo(hash) => {\n                if self\n                    .round_mut(round_id)\n                    .insert_echo(hash, validator_idx, signature)\n                {\n                    debug!(our_idx, round_id, %hash, validator = validator_idx.0, \"inserted echo\");\n                    self.progress_detected = true;\n                    if self.check_new_echo_quorum(round_id, hash) {\n                        self.mark_dirty(round_id);\n                    }\n                    return true;\n                }\n            }\n            Content::Vote(vote) => {\n                if self\n                    .round_mut(round_id)\n                    .insert_vote(vote, validator_idx, signature)\n                {\n                    debug!(\n                        our_idx,\n                        round_id,\n                        vote,\n                        validator = validator_idx.0,\n                        \"inserted vote\"\n                    );\n                    self.progress_detected = true;\n                    if self.check_new_vote_quorum(round_id, vote) {\n                        self.mark_dirty(round_id);\n                    }\n                    return true;\n                }\n            }\n        }\n        false\n    }\n\n    /// If there is a signature for conflicting content, returns the content and signature.\n    fn detect_fault(&self, signed_msg: &SignedMessage<C>) -> Option<(Content<C>, C::Signature)> {\n        let round = self.round(signed_msg.round_id)?;\n        match &signed_msg.content {\n            Content::Echo(hash) => round.echoes().iter().find_map(|(hash2, echo_map)| {\n                if hash2 == hash {\n                    return None;\n                }\n                echo_map\n                    .get(&signed_msg.validator_idx)\n                    .map(|sig| (Content::Echo(*hash2), *sig))\n            }),\n            Content::Vote(vote) => {\n                round.votes(!vote)[signed_msg.validator_idx].map(|sig| (Content::Vote(!vote), sig))\n            }\n        }\n    }\n\n    /// Sets an update timer for the given timestamp, unless an earlier timer is already set.\n    fn schedule_update(&mut self, timestamp: Timestamp) -> ProtocolOutcomes<C> {\n        debug!(our_idx = self.our_idx(), %timestamp, \"schedule update\");\n        if self.next_scheduled_update > timestamp {\n            self.next_scheduled_update = timestamp;\n            vec![ProtocolOutcome::ScheduleTimer(timestamp, TIMER_ID_UPDATE)]\n        } else {\n            vec![]\n        }\n    }\n\n    /// Updates the state and sends appropriate messages after a signature has been added to a\n    /// round.\n    fn update(&mut self, now: Timestamp) -> ProtocolOutcomes<C> {\n        let mut outcomes = vec![];\n        if self.finalized_switch_block() || self.faulty_weight() > self.params.ftt() {\n            return outcomes; // This era has ended or the FTT was exceeded.\n        }\n        if let Some(dirty_round_id) = self.maybe_dirty_round_id {\n            for round_id in dirty_round_id.. {\n                outcomes.extend(self.update_round(round_id, now));\n                if round_id >= self.current_round {\n                    break;\n                }\n            }\n        }\n        self.maybe_dirty_round_id = None;\n        outcomes\n    }\n\n    /// Updates a round and sends appropriate messages.\n    fn update_round(&mut self, round_id: RoundId, now: Timestamp) -> ProtocolOutcomes<C> {\n        self.create_round(round_id);\n        let mut outcomes = vec![];\n        let mut voted_on_round_outcome = false;\n\n        // If we have a proposal, echo it.\n        if let Some(&hash) = self.rounds[&round_id].proposal().map(HashedProposal::hash) {\n            outcomes.extend(self.create_and_gossip_message(round_id, Content::Echo(hash)));\n        }\n\n        // Update the round outcome if there is a new accepted proposal.\n        if self.update_accepted_proposal(round_id) {\n            if round_id == self.current_round {\n                self.update_proposal_timeout(now);\n            }\n            // Vote for finalizing this proposal.\n            outcomes.extend(self.create_and_gossip_message(round_id, Content::Vote(true)));\n            voted_on_round_outcome = true;\n            // Proposed descendants of this proposal can now be validated.\n            if let Some(proposals) = self.proposals_waiting_for_parent.remove(&round_id) {\n                let ancestor_values = self\n                    .ancestor_values(round_id)\n                    .expect(\"missing ancestors of accepted proposal\");\n                for (proposal, rounds_and_senders) in proposals {\n                    for (proposal_round_id, sender) in rounds_and_senders {\n                        outcomes.extend(self.validate_proposal(\n                            proposal_round_id,\n                            proposal.clone(),\n                            ancestor_values.clone(),\n                            sender,\n                        ));\n                    }\n                }\n            }\n        }\n\n        if round_id == self.current_round {\n            let our_idx = self.our_idx();\n            let current_round_start = self.current_round_start;\n            let current_timeout = current_round_start.saturating_add(self.proposal_timeout());\n            if now >= current_timeout {\n                debug!(?round_id, \"Voting false due to timeout\");\n                let msg_outcomes = self.create_and_gossip_message(round_id, Content::Vote(false));\n                voted_on_round_outcome = true;\n                // Only update the proposal timeout if this is the first time we timed out in this\n                // round\n                if !msg_outcomes.is_empty() {\n                    self.update_proposal_timeout(now);\n                }\n                outcomes.extend(msg_outcomes);\n            } else if self.faults.contains_key(&self.leader(round_id)) {\n                debug!(?round_id, \"Voting false due to faults\");\n                outcomes.extend(self.create_and_gossip_message(round_id, Content::Vote(false)));\n                voted_on_round_outcome = true;\n            }\n            if self.is_skippable_round(round_id) || self.has_accepted_proposal(round_id) {\n                self.current_round_start = Timestamp::MAX;\n                self.current_round = self.current_round.saturating_add(1);\n                info!(\n                    our_idx,\n                    round_id = self.current_round,\n                    leader = self.leader(self.current_round).0,\n                    \"started a new round\"\n                );\n            } else if let Some((maybe_parent_round_id, timestamp)) = self.suitable_parent_round(now)\n            {\n                if now < timestamp {\n                    // The first opportunity to make a proposal is in the future; check again at\n                    // that time.\n                    debug!(our_idx, %now, %timestamp, \"update_round - schedule update 1\");\n                    outcomes.extend(self.schedule_update(timestamp));\n                } else if self.current_round_start > now {\n                    // A proposal could be made now. Start the timer and propose if leader.\n                    self.current_round_start = now;\n                    outcomes.extend(self.propose_if_leader(maybe_parent_round_id, now));\n                    let current_timeout = self\n                        .current_round_start\n                        .saturating_add(self.proposal_timeout());\n                    if current_timeout > now {\n                        debug!(our_idx, %now, %current_timeout, \"update_round - schedule update 2\");\n                        outcomes.extend(self.schedule_update(current_timeout));\n                    }\n                } else if !voted_on_round_outcome {\n                    // If we weren't able to come to a voting conclusion we need to reschedule\n                    // the check in future.\n                    debug!(round_id, \"Scheduling proposal recheck\");\n                    let updated_timestamp = now.saturating_add(self.proposal_timeout());\n                    outcomes.extend(self.schedule_update(updated_timestamp));\n                }\n            } else {\n                error!(our_idx, \"No suitable parent for current round\");\n            }\n        }\n\n        // If the round has an accepted proposal and is committed, it is finalized.\n        if self.has_accepted_proposal(round_id) && self.is_committed_round(round_id) {\n            outcomes.extend(self.finalize_round(round_id));\n        }\n        outcomes\n    }\n\n    /// If a new proposal is accepted in that round, adds it to the round outcome and returns\n    /// `true`.\n    fn update_accepted_proposal(&mut self, round_id: RoundId) -> bool {\n        if self.has_accepted_proposal(round_id) {\n            return false; // We already have an accepted proposal.\n        }\n        let proposal = if let Some(proposal) = self.round(round_id).and_then(Round::proposal) {\n            proposal\n        } else {\n            return false; // We don't have a proposal.\n        };\n        if self.round(round_id).and_then(Round::quorum_echoes) != Some(*proposal.hash()) {\n            return false; // We don't have a quorum of echoes.\n        }\n        if let Some(inactive) = proposal.inactive() {\n            for (idx, _) in self.validators.enumerate_ids() {\n                if !inactive.contains(&idx)\n                    && self.active[idx].is_none()\n                    && !self.faults.contains_key(&idx)\n                {\n                    // The proposal claims validator idx is active but we haven't seen anything from\n                    // them yet.\n                    return false;\n                }\n            }\n        }\n        let (first_skipped_round_id, rel_height) =\n            if let Some(parent_round_id) = proposal.maybe_parent_round_id() {\n                if let Some((parent_height, _)) = self\n                    .round(parent_round_id)\n                    .and_then(Round::accepted_proposal)\n                {\n                    (\n                        parent_round_id.saturating_add(1),\n                        parent_height.saturating_add(1),\n                    )\n                } else {\n                    return false; // Parent is not accepted yet.\n                }\n            } else {\n                (0, 0)\n            };\n        if (first_skipped_round_id..round_id)\n            .any(|skipped_round_id| !self.is_skippable_round(skipped_round_id))\n        {\n            return false; // A skipped round is not skippable yet.\n        }\n\n        // We have a proposal with accepted parent, a quorum of echoes, and all rounds since the\n        // parent are skippable. That means the proposal is now accepted.\n        self.round_mut(round_id)\n            .set_accepted_proposal_height(rel_height);\n        true\n    }\n\n    /// Sends a proposal to the `BlockValidator` component for validation. If no validation is\n    /// needed, immediately calls `insert_proposal`.\n    fn validate_proposal(\n        &mut self,\n        round_id: RoundId,\n        proposal: HashedProposal<C>,\n        ancestor_values: Vec<C::ConsensusValue>,\n        sender: NodeId,\n    ) -> ProtocolOutcomes<C> {\n        let our_idx = self.our_idx();\n        if proposal.timestamp() < self.params.start_timestamp() {\n            info!(\n                our_idx,\n                \"rejecting proposal with timestamp earlier than era start\"\n            );\n            return vec![];\n        }\n        if let Some((_, parent_proposal)) = proposal\n            .maybe_parent_round_id()\n            .and_then(|parent_round_id| self.accepted_proposal(parent_round_id))\n        {\n            let min_block_time = self.params.min_block_time();\n            if proposal.timestamp() < parent_proposal.timestamp().saturating_add(min_block_time) {\n                info!(\n                    our_idx,\n                    \"rejecting proposal with timestamp earlier than the parent\"\n                );\n                return vec![];\n            }\n            if let (Some(inactive), Some(parent_inactive)) =\n                (proposal.inactive(), parent_proposal.inactive())\n            {\n                if !inactive.is_subset(parent_inactive) {\n                    info!(\n                        our_idx,\n                        \"rejecting proposal with more inactive validators than parent\"\n                    );\n                    return vec![];\n                }\n            }\n        }\n        let block_context = BlockContext::new(proposal.timestamp(), ancestor_values);\n        if let Some(block) = proposal\n            .maybe_block()\n            .filter(|value| value.needs_validation())\n            .cloned()\n        {\n            self.log_proposal(&proposal, round_id, \"requesting proposal validation\");\n            let proposed_block = ProposedBlock::new(block, block_context);\n            if self\n                .proposals_waiting_for_validation\n                .entry(proposed_block.clone())\n                .or_default()\n                .insert((round_id, proposal, sender))\n            {\n                return vec![ProtocolOutcome::ValidateConsensusValue {\n                    sender,\n                    proposed_block,\n                }];\n            }\n        } else {\n            self.log_proposal(&proposal, round_id, \"proposal does not need validation\");\n            if self.round_mut(round_id).insert_proposal(proposal.clone()) {\n                self.record_entry(&ZugWalEntry::Proposal(proposal.inner().clone(), round_id));\n                self.progress_detected = true;\n                self.mark_dirty(round_id);\n                if let Some(block) = proposal.maybe_block().cloned() {\n                    let proposed_block = ProposedBlock::new(block, block_context);\n                    return vec![ProtocolOutcome::HandledProposedBlock(proposed_block)];\n                }\n            }\n        }\n        vec![] // Proposal was already known.\n    }\n\n    /// Finalizes the round, notifying the rest of the node of the finalized block\n    /// if it contained one.\n    fn finalize_round(&mut self, round_id: RoundId) -> ProtocolOutcomes<C> {\n        let mut outcomes = vec![];\n        if round_id < self.first_non_finalized_round_id {\n            return outcomes; // This round was already finalized.\n        }\n        let (relative_height, proposal) = if let Some((height, proposal)) =\n            self.round(round_id).and_then(Round::accepted_proposal)\n        {\n            (height, proposal.clone())\n        } else {\n            error!(\n                our_idx = self.our_idx(),\n                round_id, \"missing finalized proposal; this is a bug\"\n            );\n            return outcomes;\n        };\n        if let Some(parent_round_id) = proposal.maybe_parent_round_id() {\n            // Output the parent first if it isn't already finalized.\n            outcomes.extend(self.finalize_round(parent_round_id));\n        }\n        for prune_round_id in self.first_non_finalized_round_id..round_id {\n            info!(\n                our_idx = self.our_idx(),\n                round_id = prune_round_id,\n                \"skipped round\"\n            );\n            self.round_mut(prune_round_id).prune_skipped();\n        }\n        self.first_non_finalized_round_id = round_id.saturating_add(1);\n        let value = if let Some(block) = proposal.maybe_block() {\n            block.clone()\n        } else {\n            return outcomes; // This era's last block is already finalized.\n        };\n        let proposer = self\n            .validators\n            .id(self.leader(round_id))\n            .expect(\"validator not found\")\n            .clone();\n        let terminal_block_data = self.accepted_switch_block(round_id).then(|| {\n            let inactive_validators = proposal.inactive().map_or_else(Vec::new, |inactive| {\n                inactive\n                    .iter()\n                    .filter_map(|idx| self.validators.id(*idx))\n                    .cloned()\n                    .collect()\n            });\n            TerminalBlockData {\n                inactive_validators,\n            }\n        });\n        let finalized_block = FinalizedBlock {\n            value,\n            timestamp: proposal.timestamp(),\n            relative_height,\n            // Faulty validators are already reported to the era supervisor via\n            // validators_with_evidence.\n            // TODO: Is this field entirely obsoleted by accusations?\n            equivocators: vec![],\n            terminal_block_data,\n            proposer,\n        };\n        outcomes.push(ProtocolOutcome::FinalizedBlock(finalized_block));\n        outcomes\n    }\n\n    /// Makes a new proposal if we are the current round leader.\n    fn propose_if_leader(\n        &mut self,\n        maybe_parent_round_id: Option<RoundId>,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        match &self.active_validator {\n            Some(active_validator) if active_validator.idx == self.leader(self.current_round) => {}\n            _ => return vec![], // Not the current round leader.\n        }\n        match self.pending_proposal {\n            // We already requested a block to propose.\n            Some((_, round_id, _)) if round_id == self.current_round => return vec![],\n            _ => {}\n        }\n        if self.round_mut(self.current_round).has_proposal() {\n            return vec![]; // We already made a proposal.\n        }\n        let ancestor_values = match maybe_parent_round_id {\n            Some(parent_round_id)\n                if self.accepted_switch_block(parent_round_id)\n                    || self.accepted_dummy_proposal(parent_round_id) =>\n            {\n                // One of the ancestors is the switch block, so this proposal has no block.\n                return self.create_echo_and_proposal(Proposal::dummy(now, parent_round_id));\n            }\n            Some(parent_round_id) => self\n                .ancestor_values(parent_round_id)\n                .expect(\"missing ancestor value\"),\n            None => vec![],\n        };\n        // Request a block payload to propose.\n        let block_context = BlockContext::new(now, ancestor_values);\n        self.pending_proposal = Some((\n            block_context.clone(),\n            self.current_round,\n            maybe_parent_round_id,\n        ));\n        vec![ProtocolOutcome::CreateNewBlock(\n            block_context,\n            now.saturating_add(TimeDiff::from_millis(self.proposal_timeout_millis as u64)),\n        )]\n    }\n\n    /// Creates a new proposal message in the current round, and a corresponding signed echo,\n    /// inserts them into our protocol state and gossips them.\n    fn create_echo_and_proposal(&mut self, proposal: Proposal<C>) -> ProtocolOutcomes<C> {\n        let round_id = self.current_round;\n        let hashed_prop = HashedProposal::new(proposal.clone());\n        let echo_content = Content::Echo(*hashed_prop.hash());\n        let echo = if let Some(echo) = self.create_message(round_id, echo_content) {\n            echo\n        } else {\n            return vec![];\n        };\n        let prop_msg = Message::Proposal {\n            round_id,\n            proposal,\n            instance_id: *self.instance_id(),\n            echo,\n        };\n        if !self.record_entry(&ZugWalEntry::Proposal(\n            hashed_prop.inner().clone(),\n            round_id,\n        )) {\n            error!(\n                our_idx = self.our_idx(),\n                \"could not record own proposal in WAL\"\n            );\n            vec![]\n        } else if self.round_mut(round_id).insert_proposal(hashed_prop) {\n            self.mark_dirty(round_id);\n            vec![ProtocolOutcome::CreatedGossipMessage(\n                SerializedMessage::from_message(&prop_msg),\n            )]\n        } else {\n            vec![]\n        }\n    }\n\n    /// Returns a parent if a block with that parent could be proposed in the current round, and the\n    /// earliest possible timestamp for a new proposal.\n    fn suitable_parent_round(&self, now: Timestamp) -> Option<(Option<RoundId>, Timestamp)> {\n        let min_block_time = self.params.min_block_time();\n        let mut maybe_parent = None;\n        // We iterate through the rounds before the current one, in reverse order.\n        for round_id in (0..self.current_round).rev() {\n            if let Some((_, parent)) = self.accepted_proposal(round_id) {\n                // All rounds higher than this one are skippable. When the accepted proposal's\n                // timestamp is old enough it can be used as a parent.\n                let timestamp = parent.timestamp().saturating_add(min_block_time);\n                if now >= timestamp {\n                    return Some((Some(round_id), timestamp));\n                }\n                if maybe_parent.is_none_or(|(_, timestamp2)| timestamp2 > timestamp) {\n                    maybe_parent = Some((Some(round_id), timestamp));\n                }\n            }\n            if !self.is_skippable_round(round_id) {\n                return maybe_parent;\n            }\n        }\n        // All rounds are skippable. When the era starts block 0 can be proposed.\n        Some((None, self.params.start_timestamp()))\n    }\n\n    /// Returns whether a quorum has voted for `false`.\n    fn is_skippable_round(&self, round_id: RoundId) -> bool {\n        self.rounds.get(&round_id).and_then(Round::quorum_votes) == Some(false)\n    }\n\n    /// Returns whether a quorum has voted for `true`.\n    fn is_committed_round(&self, round_id: RoundId) -> bool {\n        self.rounds.get(&round_id).and_then(Round::quorum_votes) == Some(true)\n    }\n\n    /// Returns whether a round has an accepted proposal.\n    fn has_accepted_proposal(&self, round_id: RoundId) -> bool {\n        self.round(round_id)\n            .and_then(Round::accepted_proposal)\n            .is_some()\n    }\n\n    /// Returns the accepted proposal, if any, together with its height.\n    fn accepted_proposal(&self, round_id: RoundId) -> Option<(u64, &HashedProposal<C>)> {\n        self.round(round_id)?.accepted_proposal()\n    }\n\n    /// Returns the current proposal timeout as a `TimeDiff`.\n    fn proposal_timeout(&self) -> TimeDiff {\n        TimeDiff::from_millis(self.proposal_timeout_millis as u64)\n    }\n\n    /// Updates our `proposal_timeout` based on the latest measured actual delay from the start of\n    /// the current round until a proposal was accepted or we voted to skip the round.\n    fn update_proposal_timeout(&mut self, now: Timestamp) {\n        let proposal_delay_millis = now.saturating_diff(self.current_round_start).millis() as f64;\n        let grace_period_factor = self.config.proposal_grace_period as f64 / 100.0 + 1.0;\n        let target_timeout = proposal_delay_millis * grace_period_factor;\n        let inertia = self.config.proposal_timeout_inertia as f64;\n        let ftt = self.params.ftt().0 as f64 / self.validators.total_weight().0 as f64;\n        if target_timeout > self.proposal_timeout_millis {\n            self.proposal_timeout_millis *= (1.0 / (inertia * (1.0 - ftt))).exp2();\n            self.proposal_timeout_millis = self.proposal_timeout_millis.min(target_timeout);\n        } else {\n            self.proposal_timeout_millis *= (-1.0 / (inertia * (1.0 + ftt))).exp2();\n            let min_timeout = (self.config.proposal_timeout.millis() as f64).max(target_timeout);\n            self.proposal_timeout_millis = self.proposal_timeout_millis.max(min_timeout);\n        }\n        debug!(our_idx = self.our_idx(), %self.proposal_timeout_millis, \"proposal timeout updated\");\n    }\n\n    /// Returns `true` if the given validators, together will all faulty validators, form a quorum.\n    fn is_quorum(&self, vidxs: impl Iterator<Item = ValidatorIndex>) -> bool {\n        let mut sum = self.faulty_weight();\n        let quorum_threshold = self.quorum_threshold();\n        if sum > quorum_threshold {\n            return true;\n        }\n        for vidx in vidxs {\n            if !self.faults.contains_key(&vidx) {\n                sum = sum.saturating_add(self.validators.weight(vidx));\n                if sum > quorum_threshold {\n                    return true;\n                }\n            }\n        }\n        false\n    }\n\n    /// Returns the accepted value from the given round and all its ancestors, or `None` if there is\n    /// no accepted value in any of those rounds.\n    fn ancestor_values(&self, mut round_id: RoundId) -> Option<Vec<C::ConsensusValue>> {\n        let mut ancestor_values = vec![];\n        loop {\n            let (_, proposal) = self.accepted_proposal(round_id)?;\n            ancestor_values.extend(proposal.maybe_block().cloned());\n            match proposal.maybe_parent_round_id() {\n                None => return Some(ancestor_values),\n                Some(parent_round_id) => round_id = parent_round_id,\n            }\n        }\n    }\n\n    /// Returns the greatest weight such that two sets of validators with this weight can\n    /// intersect in only faulty validators, i.e. have an intersection of weight `<= ftt`. That is\n    /// `(total_weight + ftt) / 2`, rounded down. A _quorum_ is any set with a weight strictly\n    /// greater than this, so any two quorums have at least one correct validator in common.\n    fn quorum_threshold(&self) -> Weight {\n        let total_weight = self.validators.total_weight().0;\n        let ftt = self.params.ftt().0;\n        // sum_overflow is the 33rd bit of the addition's actual result, representing 2^32.\n        let (sum, sum_overflow) = total_weight.overflowing_add(ftt);\n        if sum_overflow {\n            Weight((sum / 2) | 1u64.reverse_bits()) // Add 2^31.\n        } else {\n            Weight(sum / 2)\n        }\n    }\n\n    /// Returns the total weight of validators known to be faulty.\n    fn faulty_weight(&self) -> Weight {\n        self.sum_weights(self.faults.keys())\n    }\n\n    /// Returns the sum of the weights of the given validators.\n    fn sum_weights<'a>(&self, vidxs: impl Iterator<Item = &'a ValidatorIndex>) -> Weight {\n        vidxs.map(|vidx| self.validators.weight(*vidx)).sum()\n    }\n\n    /// Retrieves a shared reference to the round.\n    fn round(&self, round_id: RoundId) -> Option<&Round<C>> {\n        self.rounds.get(&round_id)\n    }\n\n    /// Retrieves a mutable reference to the round.\n    /// If the round doesn't exist yet, it creates an empty one.\n    fn round_mut(&mut self, round_id: RoundId) -> &mut Round<C> {\n        match self.rounds.entry(round_id) {\n            btree_map::Entry::Occupied(entry) => entry.into_mut(),\n            btree_map::Entry::Vacant(entry) => {\n                let leader_idx = self.leader_sequence.leader(u64::from(round_id));\n                entry.insert(Round::new(self.validators.len(), leader_idx))\n            }\n        }\n    }\n\n    /// Creates a round if it doesn't exist yet.\n    fn create_round(&mut self, round_id: RoundId) {\n        self.round_mut(round_id); // This creates a round as a side effect.\n    }\n\n    /// Marks a round as dirty so that the next `upgrade` call will reevaluate it.\n    fn mark_dirty(&mut self, round_id: RoundId) {\n        if round_id <= self.current_round\n            && self.maybe_dirty_round_id.is_none_or(|r_id| r_id > round_id)\n        {\n            self.maybe_dirty_round_id = Some(round_id);\n        }\n    }\n}\n\nimpl<C> ConsensusProtocol<C> for Zug<C>\nwhere\n    C: Context + 'static,\n{\n    fn handle_message(\n        &mut self,\n        _rng: &mut NodeRng,\n        sender: NodeId,\n        msg: SerializedMessage,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        let our_idx = self.our_idx();\n        match msg.deserialize_incoming() {\n            Err(err) => {\n                warn!(%sender, %err, \"failed to deserialize Zug message\");\n                vec![ProtocolOutcome::Disconnect(sender)]\n            }\n            Ok(zug_msg) if zug_msg.instance_id() != self.instance_id() => {\n                let instance_id = zug_msg.instance_id();\n                warn!(our_idx, ?instance_id, %sender, \"wrong instance ID; disconnecting\");\n                vec![ProtocolOutcome::Disconnect(sender)]\n            }\n            Ok(Message::SyncResponse(sync_response)) => {\n                self.handle_sync_response(sync_response, sender, now)\n            }\n            Ok(Message::Proposal {\n                round_id,\n                instance_id: _,\n                proposal,\n                echo,\n            }) => {\n                // TODO: make sure that `echo` is indeed an echo\n                debug!(our_idx, %sender, %proposal, %round_id, \"handling proposal with echo\");\n\n                let outcomes = || {\n                    let mut outcomes = self.handle_signed_message(echo, sender, now)?;\n                    outcomes.extend(self.handle_proposal(round_id, proposal, sender, now)?);\n                    Ok(outcomes)\n                };\n\n                outcomes_or_disconnect(outcomes())\n            }\n            Ok(Message::Signed(signed_msg)) => {\n                outcomes_or_disconnect(self.handle_signed_message(signed_msg, sender, now))\n            }\n            Ok(Message::Evidence(signed_msg, content2, signature2)) => outcomes_or_disconnect(\n                self.handle_evidence(signed_msg, content2, signature2, sender, now),\n            ),\n        }\n    }\n\n    /// Handles an incoming request message and returns an optional response.\n    fn handle_request_message(\n        &mut self,\n        _rng: &mut NodeRng,\n        sender: NodeId,\n        msg: SerializedMessage,\n        _now: Timestamp,\n    ) -> (ProtocolOutcomes<C>, Option<SerializedMessage>) {\n        let our_idx = self.our_idx();\n        match msg.deserialize_incoming::<SyncRequest<C>>() {\n            Err(err) => {\n                warn!(\n                    our_idx,\n                    %sender,\n                    %err,\n                    \"could not deserialize Zug message\"\n                );\n                (vec![ProtocolOutcome::Disconnect(sender)], None)\n            }\n            Ok(sync_request) if sync_request.instance_id != *self.instance_id() => {\n                let instance_id = sync_request.instance_id;\n                warn!(our_idx, ?instance_id, %sender, \"wrong instance ID; disconnecting\");\n                (vec![ProtocolOutcome::Disconnect(sender)], None)\n            }\n            Ok(sync_request) => self.handle_sync_request(sync_request, sender),\n        }\n    }\n\n    /// Handles the firing of various timers in the protocol.\n    fn handle_timer(\n        &mut self,\n        timestamp: Timestamp,\n        now: Timestamp,\n        timer_id: TimerId,\n        rng: &mut NodeRng,\n    ) -> ProtocolOutcomes<C> {\n        match timer_id {\n            TIMER_ID_SYNC_PEER => self.handle_sync_peer_timer(now, rng),\n            TIMER_ID_UPDATE => {\n                if timestamp >= self.next_scheduled_update {\n                    self.next_scheduled_update = Timestamp::MAX;\n                }\n                let current_round = self.current_round;\n                self.mark_dirty(current_round);\n                debug!(?current_round, \"TIMER_ID_UPDATE\");\n                self.update(now)\n            }\n            TIMER_ID_LOG_PARTICIPATION => {\n                self.log_participation();\n                match self.config.log_participation_interval {\n                    Some(interval) if !self.evidence_only && !self.finalized_switch_block() => {\n                        vec![ProtocolOutcome::ScheduleTimer(\n                            now.saturating_add(interval),\n                            timer_id,\n                        )]\n                    }\n                    _ => vec![],\n                }\n            }\n            // TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP => {\n            //     self.synchronizer.add_past_due_stored_vertices(now)\n            // }\n            timer_id => {\n                error!(\n                    our_idx = self.our_idx(),\n                    timer_id = timer_id.0,\n                    \"unexpected timer ID\"\n                );\n                vec![]\n            }\n        }\n    }\n\n    fn handle_is_current(&self, now: Timestamp) -> ProtocolOutcomes<C> {\n        let mut outcomes = vec![];\n        if let Some(interval) = self.config.sync_state_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.max(self.params.start_timestamp())\n                    .saturating_add(interval),\n                TIMER_ID_SYNC_PEER,\n            ));\n        }\n        if let Some(interval) = self.config.log_participation_interval {\n            outcomes.push(ProtocolOutcome::ScheduleTimer(\n                now.max(self.params.start_timestamp())\n                    .saturating_add(interval),\n                TIMER_ID_LOG_PARTICIPATION,\n            ));\n        }\n        outcomes\n    }\n\n    fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes<C> {\n        error!(our_idx = self.our_idx(), ?action_id, %now, \"unexpected action\");\n        vec![]\n    }\n\n    fn propose(&mut self, proposed_block: ProposedBlock<C>, now: Timestamp) -> ProtocolOutcomes<C> {\n        let maybe_parent_round_id = if let Some((block_context, round_id, maybe_parent_round_id)) =\n            self.pending_proposal.take()\n        {\n            if block_context != *proposed_block.context() || round_id != self.current_round {\n                warn!(our_idx = self.our_idx(), %proposed_block, \"skipping outdated proposal\");\n                self.pending_proposal = Some((block_context, round_id, maybe_parent_round_id));\n                return vec![];\n            }\n            maybe_parent_round_id\n        } else {\n            error!(our_idx = self.our_idx(), \"unexpected call to propose\");\n            return vec![];\n        };\n        let inactive = self\n            .validators\n            .enumerate_ids()\n            .map(|(idx, _)| idx)\n            .filter(|idx| self.active[*idx].is_none() && !self.faults.contains_key(idx));\n        let proposal = Proposal::with_block(&proposed_block, maybe_parent_round_id, inactive);\n        let mut outcomes = self.create_echo_and_proposal(proposal);\n        let round_id = self.current_round;\n        warn!(?round_id, \"Calling update after proposal\");\n        outcomes.extend(self.update(now));\n        outcomes\n    }\n\n    fn resolve_validity(\n        &mut self,\n        proposed_block: ProposedBlock<C>,\n        valid: bool,\n        now: Timestamp,\n    ) -> ProtocolOutcomes<C> {\n        let rounds_and_node_ids = self\n            .proposals_waiting_for_validation\n            .remove(&proposed_block)\n            .into_iter()\n            .flatten();\n        let mut outcomes = vec![];\n        if valid {\n            for (round_id, proposal, _sender) in rounds_and_node_ids {\n                info!(our_idx = self.our_idx(), %round_id, %proposal, \"handling valid proposal\");\n                if self.round_mut(round_id).insert_proposal(proposal.clone()) {\n                    self.record_entry(&ZugWalEntry::Proposal(proposal.into_inner(), round_id));\n                    self.mark_dirty(round_id);\n                    self.progress_detected = true;\n                    outcomes.push(ProtocolOutcome::HandledProposedBlock(\n                        proposed_block.clone(),\n                    ));\n                }\n            }\n            outcomes.extend(self.update(now));\n        } else {\n            for (round_id, proposal, sender) in rounds_and_node_ids {\n                // We don't disconnect from the faulty sender here: The block validator considers\n                // the value \"invalid\" even if it just couldn't download the deploys, which could\n                // just be because the original sender went offline.\n                let validator_index = self.leader(round_id).0;\n                info!(\n                    our_idx = self.our_idx(),\n                    %validator_index,\n                    %round_id,\n                    %sender,\n                    %proposal,\n                    \"dropping invalid proposal\"\n                );\n            }\n        }\n        outcomes\n    }\n\n    fn activate_validator(\n        &mut self,\n        our_id: C::ValidatorId,\n        secret: C::ValidatorSecret,\n        now: Timestamp,\n        wal_file: Option<PathBuf>,\n    ) -> ProtocolOutcomes<C> {\n        let mut outcomes = vec![];\n        if self.write_wal.is_none() {\n            if let Some(wal_file) = wal_file {\n                outcomes.extend(self.open_wal(wal_file, now));\n            }\n            if self.write_wal.is_none() {\n                error!(?our_id, \"missing WAL file; not activating\");\n                return vec![];\n            }\n        }\n        if let Some(idx) = self.validators.get_index(&our_id) {\n            if self.faults.contains_key(&idx) {\n                error!(our_idx = idx.0, \"we are faulty; not activating\");\n                return outcomes;\n            }\n            info!(our_idx = idx.0, \"start voting\");\n            self.active_validator = Some(ActiveValidator { idx, secret });\n            debug!(\n                our_idx = idx.0,\n                %now,\n                start_timestamp=%self.params.start_timestamp(),\n                \"activate_validator - schedule update\"\n            );\n            outcomes.extend(self.schedule_update(self.params.start_timestamp().max(now)));\n        } else {\n            error!(\n                ?our_id,\n                \"we are not a validator in this era; not activating\"\n            );\n        }\n        outcomes\n    }\n\n    fn deactivate_validator(&mut self) {\n        self.active_validator = None;\n    }\n\n    fn set_evidence_only(&mut self) {\n        self.evidence_only = true;\n        self.rounds.clear();\n        self.proposals_waiting_for_parent.clear();\n        self.proposals_waiting_for_validation.clear();\n    }\n\n    fn has_evidence(&self, vid: &C::ValidatorId) -> bool {\n        self.validators\n            .get_index(vid)\n            .and_then(|idx| self.faults.get(&idx))\n            .is_some_and(Fault::is_direct)\n    }\n\n    fn mark_faulty(&mut self, vid: &C::ValidatorId) {\n        if let Some(idx) = self.validators.get_index(vid) {\n            self.faults.entry(idx).or_insert(Fault::Indirect);\n        }\n    }\n\n    fn send_evidence(&self, peer: NodeId, vid: &C::ValidatorId) -> ProtocolOutcomes<C> {\n        self.validators\n            .get_index(vid)\n            .and_then(|idx| self.faults.get(&idx))\n            .cloned()\n            .map(|fault| match fault {\n                Fault::Direct(msg, content, sign) => {\n                    vec![ProtocolOutcome::CreatedTargetedMessage(\n                        SerializedMessage::from_message(&Message::Evidence(msg, content, sign)),\n                        peer,\n                    )]\n                }\n                _ => vec![],\n            })\n            .unwrap_or_default()\n    }\n\n    fn set_paused(&mut self, paused: bool, now: Timestamp) -> ProtocolOutcomes<C> {\n        if self.paused && !paused {\n            info!(\n                our_idx = self.our_idx(),\n                current_round = self.current_round,\n                \"unpausing consensus\"\n            );\n            self.paused = paused;\n            // Reset the timeout to give the proposer another chance, after the pause.\n            self.current_round_start = Timestamp::MAX;\n            let round_id = self.current_round;\n            self.mark_dirty(round_id);\n            debug!(?round_id, \"Calling update after unpausing\");\n            self.update(now)\n        } else {\n            if self.paused != paused {\n                info!(\n                    our_idx = self.our_idx(),\n                    current_round = self.current_round,\n                    \"pausing consensus\"\n                );\n            }\n            self.paused = paused;\n            vec![]\n        }\n    }\n\n    fn validators_with_evidence(&self) -> Vec<&C::ValidatorId> {\n        self.faults\n            .iter()\n            .filter(|(_, fault)| fault.is_direct())\n            .filter_map(|(vidx, _)| self.validators.id(*vidx))\n            .collect()\n    }\n\n    fn as_any(&self) -> &dyn Any {\n        self\n    }\n\n    fn is_active(&self) -> bool {\n        self.active_validator.is_some()\n    }\n\n    fn instance_id(&self) -> &C::InstanceId {\n        self.params.instance_id()\n    }\n\n    fn next_round_length(&self) -> Option<TimeDiff> {\n        Some(self.params.min_block_time())\n    }\n}\n\nfn outcomes_or_disconnect<C: Context>(\n    result: Result<ProtocolOutcomes<C>, FaultySender>,\n) -> ProtocolOutcomes<C> {\n    result.unwrap_or_else(|sender| vec![ProtocolOutcome::Disconnect(sender.0)])\n}\n\nmod specimen_support {\n    use std::collections::BTreeSet;\n\n    use crate::{\n        components::consensus::{utils::ValidatorIndex, ClContext},\n        utils::specimen::{\n            btree_map_distinct_from_prop, btree_set_distinct_from_prop, largest_variant,\n            vec_prop_specimen, Cache, LargeUniqueSequence, LargestSpecimen, SizeEstimator,\n        },\n    };\n\n    use super::{\n        message::{\n            Content, ContentDiscriminants, Message, MessageDiscriminants, SignedMessage,\n            SyncResponse,\n        },\n        proposal::Proposal,\n        SyncRequest,\n    };\n\n    impl LargestSpecimen for Message<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, MessageDiscriminants, _, _>(\n                estimator,\n                |variant| match variant {\n                    MessageDiscriminants::SyncResponse => {\n                        Message::SyncResponse(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    MessageDiscriminants::Proposal => Message::Proposal {\n                        round_id: LargestSpecimen::largest_specimen(estimator, cache),\n                        instance_id: LargestSpecimen::largest_specimen(estimator, cache),\n                        proposal: LargestSpecimen::largest_specimen(estimator, cache),\n                        echo: LargestSpecimen::largest_specimen(estimator, cache),\n                    },\n                    MessageDiscriminants::Signed => {\n                        Message::Signed(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    MessageDiscriminants::Evidence => Message::Evidence(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                },\n            )\n        }\n    }\n\n    impl LargestSpecimen for SyncRequest<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            SyncRequest {\n                round_id: LargestSpecimen::largest_specimen(estimator, cache),\n                proposal_hash: LargestSpecimen::largest_specimen(estimator, cache),\n                has_proposal: LargestSpecimen::largest_specimen(estimator, cache),\n                first_validator_idx: LargestSpecimen::largest_specimen(estimator, cache),\n                echoes: LargestSpecimen::largest_specimen(estimator, cache),\n                true_votes: LargestSpecimen::largest_specimen(estimator, cache),\n                false_votes: LargestSpecimen::largest_specimen(estimator, cache),\n                active: LargestSpecimen::largest_specimen(estimator, cache),\n                faulty: LargestSpecimen::largest_specimen(estimator, cache),\n                instance_id: LargestSpecimen::largest_specimen(estimator, cache),\n                sync_id: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n\n    impl<E> LargeUniqueSequence<E> for ValidatorIndex\n    where\n        E: SizeEstimator,\n    {\n        fn large_unique_sequence(\n            _estimator: &E,\n            count: usize,\n            _cache: &mut Cache,\n        ) -> BTreeSet<Self> {\n            Iterator::map((0..u32::MAX).rev(), ValidatorIndex::from)\n                .take(count)\n                .collect()\n        }\n    }\n\n    impl LargestSpecimen for SyncResponse<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            SyncResponse {\n                round_id: LargestSpecimen::largest_specimen(estimator, cache),\n                proposal_or_hash: LargestSpecimen::largest_specimen(estimator, cache),\n                echo_sigs: btree_map_distinct_from_prop(estimator, \"validator_count\", cache),\n                true_vote_sigs: btree_map_distinct_from_prop(estimator, \"validator_count\", cache),\n                false_vote_sigs: btree_map_distinct_from_prop(estimator, \"validator_count\", cache),\n                signed_messages: vec_prop_specimen(estimator, \"validator_count\", cache),\n                evidence: vec_prop_specimen(estimator, \"validator_count\", cache),\n                instance_id: LargestSpecimen::largest_specimen(estimator, cache),\n                sync_id: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n\n    impl LargestSpecimen for Proposal<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            Proposal {\n                timestamp: LargestSpecimen::largest_specimen(estimator, cache),\n                maybe_block: LargestSpecimen::largest_specimen(estimator, cache),\n                maybe_parent_round_id: LargestSpecimen::largest_specimen(estimator, cache),\n                inactive: Some(btree_set_distinct_from_prop(\n                    estimator,\n                    \"validator_count\",\n                    cache,\n                )),\n            }\n        }\n    }\n\n    impl LargestSpecimen for ValidatorIndex {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            u32::largest_specimen(estimator, cache).into()\n        }\n    }\n\n    impl LargestSpecimen for SignedMessage<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            SignedMessage::sign_new(\n                LargestSpecimen::largest_specimen(estimator, cache),\n                LargestSpecimen::largest_specimen(estimator, cache),\n                LargestSpecimen::largest_specimen(estimator, cache),\n                LargestSpecimen::largest_specimen(estimator, cache),\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            )\n        }\n    }\n\n    impl LargestSpecimen for Content<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            if let Some(item) = cache.get::<Self>() {\n                return *item;\n            }\n\n            let item = largest_variant::<Self, ContentDiscriminants, _, _>(estimator, |variant| {\n                match variant {\n                    ContentDiscriminants::Echo => {\n                        Content::Echo(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    ContentDiscriminants::Vote => {\n                        Content::Vote(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                }\n            });\n            *cache.set(item)\n        }\n    }\n}\n\nmod registered_sync {\n    use crate::{\n        types::{DataSize, NodeRng},\n        utils::specimen::{Cache, LargestSpecimen, SizeEstimator},\n    };\n    use casper_types::{TimeDiff, Timestamp};\n    use rand::Rng as _;\n    use serde::{Deserialize, Serialize};\n    use std::collections::BTreeMap;\n\n    #[derive(Default, DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)]\n    pub struct RegisteredSync(BTreeMap<RandomId, Timestamp>);\n\n    #[derive(\n        DataSize, Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Ord, PartialOrd,\n    )]\n    pub struct RandomId(u64);\n\n    impl RegisteredSync {\n        /// Prunes entries older than one minute.\n        fn prune_old(&mut self) {\n            const ONE_MIN: TimeDiff = TimeDiff::from_seconds(60);\n\n            self.0.retain(|_, timestamp| timestamp.elapsed() < ONE_MIN);\n        }\n\n        pub fn create_and_register_new_id(&mut self, rng: &mut NodeRng) -> RandomId {\n            self.prune_old();\n\n            let id = loop {\n                let id = RandomId::new(rng);\n\n                if self.0.contains_key(&id) == false {\n                    break id;\n                }\n            };\n\n            self.0.insert(id, Timestamp::now());\n\n            id\n        }\n\n        /// Tries and remove the random ID from the stored IDs and returns it if it was present.\n        pub fn try_remove_id(&mut self, id: RandomId) -> Option<RandomId> {\n            self.0.remove(&id)?;\n\n            Some(id)\n        }\n    }\n\n    impl RandomId {\n        pub fn new(rng: &mut NodeRng) -> Self {\n            RandomId(rng.gen())\n        }\n    }\n\n    impl LargestSpecimen for RandomId {\n        fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n            RandomId(u64::MAX)\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/protocols.rs",
    "content": "//! Implementations of consensus protocols.\n\npub mod common;\npub(crate) mod highway;\npub(crate) mod zug;\n"
  },
  {
    "path": "node/src/components/consensus/tests/consensus_des_testing.rs",
    "content": "use std::{\n    collections::BTreeMap,\n    fmt::{Debug, Display, Formatter},\n    hash::Hash,\n};\n\nuse datasize::DataSize;\n\nuse casper_types::Timestamp;\n\nuse super::queue::{MessageT, Queue, QueueEntry};\n\n/// Enum defining recipients of the message.\n#[derive(Debug)]\npub(crate) enum Target {\n    SingleValidator(ValidatorId),\n    AllExcept(ValidatorId),\n}\n\n#[derive(Debug, PartialEq, Eq, Clone)]\npub(crate) struct Message<M: Clone + Debug> {\n    pub(crate) sender: ValidatorId,\n    pub(crate) payload: M,\n}\n\nimpl<M: Clone + Debug> Message<M> {\n    pub(crate) fn new(sender: ValidatorId, payload: M) -> Self {\n        Message { sender, payload }\n    }\n\n    pub(crate) fn payload(&self) -> &M {\n        &self.payload\n    }\n}\n\npub(crate) struct TargetedMessage<M: Clone + Debug> {\n    pub(crate) message: Message<M>,\n    pub(crate) target: Target,\n}\n\nimpl<M: Debug + Clone> Debug for TargetedMessage<M> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"TargetedMessage\")\n            .field(\"from\", &self.message.sender)\n            .field(\"to\", &self.target)\n            .field(\"payload\", &self.message.payload)\n            .finish()\n    }\n}\n\nimpl<M: Clone + Debug> TargetedMessage<M> {\n    pub(crate) fn new(message: Message<M>, target: Target) -> Self {\n        TargetedMessage { message, target }\n    }\n}\n\n#[derive(Debug, Clone, DataSize, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]\npub(crate) struct ValidatorId(pub(crate) u64);\n\nimpl Display for ValidatorId {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.0)\n    }\n}\n\n#[derive(Debug, Clone, Copy)]\npub(crate) enum Fault {\n    /// The validator does not send any messages within the interval between the timestamps.\n    TemporarilyMute { from: Timestamp, till: Timestamp },\n    /// The validator does not send any messages ever.\n    PermanentlyMute,\n    /// The validator is actively malicious.\n    Equivocate,\n}\n\n/// A validator in the test network.\n#[derive(Debug)]\npub(crate) struct Node<C, M, V>\nwhere\n    M: Clone + Debug,\n{\n    pub(crate) id: ValidatorId,\n    /// Vector of consensus values finalized by the validator.\n    finalized_values: Vec<C>,\n    /// Messages received by the validator.\n    messages_received: Vec<Message<M>>,\n    /// Messages produced by the validator.\n    messages_produced: Vec<M>,\n    validator: V,\n}\n\nimpl<C, M, V> Node<C, M, V>\nwhere\n    M: Clone + Debug,\n{\n    pub(crate) fn new(id: ValidatorId, validator: V) -> Self {\n        Node {\n            id,\n            finalized_values: Vec::new(),\n            messages_received: Vec::new(),\n            messages_produced: Vec::new(),\n            validator,\n        }\n    }\n\n    /// Adds vector of finalized consensus values to validator's finalized set.\n    pub(crate) fn push_finalized(&mut self, finalized_value: C) {\n        self.finalized_values.push(finalized_value);\n    }\n\n    /// Adds messages to validator's collection of received messages.\n    pub(crate) fn push_messages_received(&mut self, messages: Vec<Message<M>>) {\n        self.messages_received.extend(messages);\n    }\n\n    /// Adds messages to validator's collection of produced messages.\n    pub(crate) fn push_messages_produced(&mut self, messages: Vec<M>) {\n        self.messages_produced.extend(messages);\n    }\n\n    /// Iterator over consensus values finalized by the validator.\n    pub(crate) fn finalized_values(&self) -> impl Iterator<Item = &C> {\n        self.finalized_values.iter()\n    }\n\n    pub(crate) fn messages_produced(&self) -> impl Iterator<Item = &M> {\n        self.messages_produced.iter()\n    }\n\n    pub(crate) fn finalized_count(&self) -> usize {\n        self.finalized_values.len()\n    }\n\n    pub(crate) fn validator(&self) -> &V {\n        &self.validator\n    }\n\n    pub(crate) fn validator_mut(&mut self) -> &mut V {\n        &mut self.validator\n    }\n}\n\npub(crate) enum DeliverySchedule {\n    AtInstant(Timestamp),\n    #[allow(dead_code)] // Drop variant used in tests.\n    Drop,\n}\n\nimpl DeliverySchedule {\n    fn at(instant: Timestamp) -> DeliverySchedule {\n        DeliverySchedule::AtInstant(instant)\n    }\n}\n\nimpl From<u64> for DeliverySchedule {\n    fn from(instant: u64) -> Self {\n        DeliverySchedule::at(instant.into())\n    }\n}\n\nimpl From<Timestamp> for DeliverySchedule {\n    fn from(timestamp: Timestamp) -> Self {\n        DeliverySchedule::at(timestamp)\n    }\n}\n\npub(crate) struct VirtualNet<C, M, V>\nwhere\n    M: MessageT,\n{\n    /// Maps validator IDs to actual validator instances.\n    validators_map: BTreeMap<ValidatorId, Node<C, M, V>>,\n    /// A collection of all network messages queued up for delivery.\n    msg_queue: Queue<M>,\n}\n\nimpl<C, M, V> VirtualNet<C, M, V>\nwhere\n    M: MessageT,\n{\n    pub(crate) fn new<I: IntoIterator<Item = Node<C, M, V>>>(\n        validators: I,\n        init_messages: Vec<QueueEntry<M>>,\n    ) -> Self {\n        let validators_map = validators\n            .into_iter()\n            .map(|validator| (validator.id, validator))\n            .collect();\n\n        let mut q = Queue::default();\n        for m in init_messages.into_iter() {\n            q.push(m);\n        }\n\n        VirtualNet {\n            validators_map,\n            msg_queue: q,\n        }\n    }\n\n    /// Dispatches messages to their recipients.\n    pub(crate) fn dispatch_messages(&mut self, messages: Vec<(TargetedMessage<M>, Timestamp)>) {\n        for (TargetedMessage { message, target }, delivery_time) in messages {\n            let recipients = match target {\n                Target::AllExcept(creator) => self\n                    .validators_ids()\n                    .filter(|id| **id != creator)\n                    .cloned()\n                    .collect(),\n                Target::SingleValidator(recipient_id) => vec![recipient_id],\n            };\n            self.send_messages(recipients, message, delivery_time)\n        }\n    }\n\n    /// Pop a message from the queue.\n    /// It's a message with the earliest delivery time.\n    pub(crate) fn pop_message(&mut self) -> Option<QueueEntry<M>> {\n        self.msg_queue.pop()\n    }\n\n    /// Returns a reference to the next message from the queue without removing it.\n    /// It's a message with the earliest delivery time.\n    pub(crate) fn peek_message(&self) -> Option<&QueueEntry<M>> {\n        self.msg_queue.peek()\n    }\n\n    pub(crate) fn validators_ids(&self) -> impl Iterator<Item = &ValidatorId> {\n        self.validators_map.keys()\n    }\n\n    pub(crate) fn node_mut(&mut self, validator_id: &ValidatorId) -> Option<&mut Node<C, M, V>> {\n        self.validators_map.get_mut(validator_id)\n    }\n\n    pub(crate) fn validator(&self, validator_id: &ValidatorId) -> Option<&Node<C, M, V>> {\n        self.validators_map.get(validator_id)\n    }\n\n    pub(crate) fn validators(&self) -> impl Iterator<Item = &Node<C, M, V>> {\n        self.validators_map.values()\n    }\n\n    // Utility function for dispatching message to multiple recipients.\n    fn send_messages<I: IntoIterator<Item = ValidatorId>>(\n        &mut self,\n        recipients: I,\n        message: Message<M>,\n        delivery_time: Timestamp,\n    ) {\n        for validator_id in recipients {\n            self.schedule_message(delivery_time, validator_id, message.clone())\n        }\n    }\n\n    /// Schedules a message `message` to be delivered at `delivery_time` to `recipient` validator.\n    fn schedule_message(\n        &mut self,\n        delivery_time: Timestamp,\n        recipient: ValidatorId,\n        message: Message<M>,\n    ) {\n        let qe = QueueEntry::new(delivery_time, recipient, message);\n        self.msg_queue.push(qe);\n    }\n\n    /// Drops all messages from the queue.\n    /// Should never be called during normal operation of the test.\n    pub(crate) fn empty_queue(&mut self) {\n        self.msg_queue.clear();\n    }\n}\n\nmod virtual_net_tests {\n    use super::{Message, Node, Target, TargetedMessage, Timestamp, ValidatorId, VirtualNet};\n\n    type M = u64;\n    type C = u64;\n\n    struct NoOpValidator;\n\n    #[test]\n    fn messages_are_enqueued_in_order() {\n        let validator_id = ValidatorId(1u64);\n        let single_validator: Node<C, u64, NoOpValidator> = Node::new(validator_id, NoOpValidator);\n        let mut virtual_net = VirtualNet::new(vec![single_validator], vec![]);\n\n        let messages_num = 10;\n        // We want to enqueue messages from the latest delivery time to the earliest.\n        let messages: Vec<(Timestamp, Message<u64>)> = (0..messages_num)\n            .map(|i| ((messages_num - i).into(), Message::new(validator_id, i)))\n            .collect();\n\n        messages.clone().into_iter().for_each(|(instant, message)| {\n            virtual_net.schedule_message(instant, validator_id, message)\n        });\n\n        let queued_messages =\n            std::iter::successors(virtual_net.pop_message(), |_| virtual_net.pop_message())\n                .map(|qe| qe.message);\n\n        // Since we enqueued in the order from the latest delivery time,\n        // we expect that the actual delivery will be a reverse.\n        let expected_order = messages.into_iter().map(|(_, msg)| msg).rev();\n\n        assert!(\n            queued_messages.eq(expected_order),\n            \"Messages were not delivered in the expected order.\"\n        );\n    }\n\n    #[test]\n    fn messages_are_dispatched() {\n        let validator_id = ValidatorId(1u64);\n        let a: Node<C, M, NoOpValidator> = Node::new(validator_id, NoOpValidator);\n        let b = Node::new(ValidatorId(2u64), NoOpValidator);\n        let c = Node::new(ValidatorId(3u64), NoOpValidator);\n\n        let mut virtual_net = VirtualNet::new(vec![a, b, c], vec![]);\n\n        let message = Message::new(validator_id, 1u64);\n        let targeted_message =\n            TargetedMessage::new(message.clone(), Target::AllExcept(validator_id));\n\n        virtual_net.dispatch_messages(vec![(targeted_message, 2.into())]);\n\n        let queued_msgs =\n            std::iter::successors(virtual_net.pop_message(), |_| virtual_net.pop_message())\n                .map(|qe| (qe.recipient, qe.message))\n                .collect::<Vec<_>>();\n\n        assert_eq!(\n            queued_msgs,\n            vec![(ValidatorId(3), message.clone()), (ValidatorId(2), message)],\n            \"A broadcast message should be delivered to every node but the creator.\"\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/tests/queue.rs",
    "content": "use std::{cmp::Ordering, collections::BinaryHeap, fmt::Debug};\n\nuse casper_types::Timestamp;\n\nuse super::consensus_des_testing::{Message, ValidatorId};\n\npub(crate) trait MessageT: PartialEq + Eq + Ord + Clone + Debug {}\nimpl<T> MessageT for T where T: PartialEq + Eq + Ord + Clone + Debug {}\n\n/// An entry in the message queue of the test network.\n#[derive(Debug, PartialEq, Eq, Clone)]\npub(crate) struct QueueEntry<M>\nwhere\n    M: MessageT,\n{\n    /// Scheduled delivery time of the message.\n    /// When a message has dependencies that recipient validator is missing,\n    /// those will be added to it in a loop (simulating synchronization)\n    /// and not influence the delivery time.\n    pub(crate) delivery_time: Timestamp,\n    /// Recipient of the message.\n    pub(crate) recipient: ValidatorId,\n    /// The message.\n    pub(crate) message: Message<M>,\n}\n\nimpl<M> QueueEntry<M>\nwhere\n    M: MessageT,\n{\n    pub(crate) fn new(\n        delivery_time: Timestamp,\n        recipient: ValidatorId,\n        message: Message<M>,\n    ) -> Self {\n        QueueEntry {\n            delivery_time,\n            recipient,\n            message,\n        }\n    }\n}\n\nimpl<M> Ord for QueueEntry<M>\nwhere\n    M: MessageT,\n{\n    fn cmp(&self, other: &Self) -> Ordering {\n        self.delivery_time\n            .cmp(&other.delivery_time)\n            .reverse()\n            .then_with(|| self.recipient.cmp(&other.recipient))\n            .then_with(|| self.message.payload.cmp(&other.message.payload))\n    }\n}\n\nimpl<M> PartialOrd for QueueEntry<M>\nwhere\n    M: MessageT,\n{\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\n#[cfg(test)]\nmod queue_entry_tests {\n    use super::{Message, QueueEntry, ValidatorId};\n    use std::cmp::Ordering;\n\n    #[test]\n    fn delivery_time_ord() {\n        let sender = ValidatorId(2);\n        let recipient1 = ValidatorId(1);\n        let recipient2 = ValidatorId(3);\n        let message = Message::new(sender, 1u8);\n        let m1 = QueueEntry::new(1.into(), recipient1, message.clone());\n        let m2 = QueueEntry::new(2.into(), recipient1, message.clone());\n        assert_eq!(m1.cmp(&m2), Ordering::Greater);\n        let m3 = QueueEntry::new(1.into(), recipient2, message);\n        assert_eq!(m1.cmp(&m3), Ordering::Less);\n    }\n}\n\n/// Priority queue of messages scheduled for delivery to validators.\n/// Ordered by the delivery time.\npub(crate) struct Queue<M>(BinaryHeap<QueueEntry<M>>)\nwhere\n    M: MessageT;\n\nimpl<M> Default for Queue<M>\nwhere\n    M: MessageT,\n{\n    fn default() -> Self {\n        Queue(Default::default())\n    }\n}\n\nimpl<M> Queue<M>\nwhere\n    M: MessageT,\n{\n    /// Gets next message.\n    /// Returns `None` if there aren't any.\n    pub(crate) fn pop(&mut self) -> Option<QueueEntry<M>> {\n        self.0.pop()\n    }\n\n    /// Returns a reference to next message without removing it.\n    /// Returns `None` if there aren't any.\n    pub(crate) fn peek(&self) -> Option<&QueueEntry<M>> {\n        self.0.peek()\n    }\n\n    /// Pushes new message to the queue.\n    pub(crate) fn push(&mut self, item: QueueEntry<M>) {\n        self.0.push(item)\n    }\n\n    pub(crate) fn clear(&mut self) {\n        self.0.clear();\n    }\n}\n\n#[cfg(test)]\nmod queue_tests {\n    use super::{Message, Queue, QueueEntry, ValidatorId};\n\n    #[test]\n    fn pop_earliest_delivery() {\n        let mut queue: Queue<u8> = Queue::default();\n        let recipient_a = ValidatorId(1);\n        let recipient_b = ValidatorId(3);\n        let sender = ValidatorId(2);\n        let message_a = Message::new(sender, 1u8);\n        let message_b = Message::new(sender, 2u8);\n\n        let first = QueueEntry::new(1.into(), recipient_a, message_b);\n        let second = QueueEntry::new(1.into(), recipient_a, message_a.clone());\n        let third = QueueEntry::new(3.into(), recipient_b, message_a);\n\n        queue.push(first.clone());\n        queue.push(third.clone());\n        queue.push(second.clone());\n\n        assert_eq!(queue.pop(), Some(first));\n        assert_eq!(queue.pop(), Some(second));\n        assert_eq!(queue.pop(), Some(third));\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/tests/utils.rs",
    "content": "use std::sync::Arc;\n\nuse num::Zero;\nuse once_cell::sync::Lazy;\n\nuse casper_types::{\n    system::auction::DelegationRate, AccountConfig, AccountsConfig, ActivationPoint, Chainspec,\n    ChainspecRawBytes, Motes, PublicKey, SecretKey, TimeDiff, Timestamp, ValidatorConfig, U512,\n};\n\nuse crate::{\n    tls::{KeyFingerprint, Sha512},\n    types::NodeId,\n    utils::Loadable,\n};\n\npub static ALICE_SECRET_KEY: Lazy<Arc<SecretKey>> =\n    Lazy::new(|| Arc::new(SecretKey::ed25519_from_bytes([0; SecretKey::ED25519_LENGTH]).unwrap()));\npub static ALICE_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&**ALICE_SECRET_KEY));\npub static ALICE_NODE_ID: Lazy<NodeId> = Lazy::new(|| {\n    NodeId::from(KeyFingerprint::from(Sha512::new(match *ALICE_PUBLIC_KEY {\n        PublicKey::Ed25519(pub_key) => pub_key,\n        _ => panic!(\"ALICE_PUBLIC_KEY is Ed25519\"),\n    })))\n});\n\npub static BOB_SECRET_KEY: Lazy<Arc<SecretKey>> =\n    Lazy::new(|| Arc::new(SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]).unwrap()));\npub static BOB_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&**BOB_SECRET_KEY));\npub static BOB_NODE_ID: Lazy<NodeId> = Lazy::new(|| {\n    NodeId::from(KeyFingerprint::from(Sha512::new(match *BOB_PUBLIC_KEY {\n        PublicKey::Ed25519(pub_key) => pub_key,\n        _ => panic!(\"BOB_PUBLIC_KEY is Ed25519\"),\n    })))\n});\n\npub static CAROL_SECRET_KEY: Lazy<Arc<SecretKey>> =\n    Lazy::new(|| Arc::new(SecretKey::ed25519_from_bytes([2; SecretKey::ED25519_LENGTH]).unwrap()));\npub static CAROL_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| PublicKey::from(&**CAROL_SECRET_KEY));\n\n/// Loads the local chainspec and overrides timestamp and genesis account with the given stakes.\n/// The test `Chainspec` returned has eras with exactly two blocks.\npub fn new_test_chainspec<I, T>(stakes: I) -> Chainspec\nwhere\n    I: IntoIterator<Item = (PublicKey, T)>,\n    T: Into<U512>,\n{\n    let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let accounts = stakes\n        .into_iter()\n        .map(|(pk, stake)| {\n            let motes = Motes::new(stake);\n            let validator_config = ValidatorConfig::new(motes, DelegationRate::zero());\n            AccountConfig::new(pk, motes, Some(validator_config))\n        })\n        .collect();\n    let delegators = vec![];\n    let administrators = vec![];\n    chainspec.network_config.accounts_config =\n        AccountsConfig::new(accounts, delegators, administrators);\n    chainspec.protocol_config.activation_point = ActivationPoint::Genesis(Timestamp::now());\n\n    // Every era has exactly two blocks.\n    chainspec.core_config.minimum_era_height = 2;\n    chainspec.core_config.era_duration = TimeDiff::from_millis(0);\n    chainspec\n}\n"
  },
  {
    "path": "node/src/components/consensus/tests.rs",
    "content": "/// Basic building blocks for the Discrete Event Simulator (DES).\npub(crate) mod consensus_des_testing;\n/// Message queue.\npub(crate) mod queue;\n/// Miscellaneous code shared among consensus tests\npub(crate) mod utils;\n"
  },
  {
    "path": "node/src/components/consensus/traits.rs",
    "content": "use std::{\n    fmt::{Debug, Display},\n    hash::Hash,\n};\n\nuse datasize::DataSize;\nuse serde::{de::DeserializeOwned, Serialize};\n\n/// A validator identifier.\npub trait ValidatorIdT: Eq + Ord + Clone + Debug + Hash + Send + DataSize + Display {}\nimpl<VID> ValidatorIdT for VID where VID: Eq + Ord + Clone + Debug + Hash + Send + DataSize + Display\n{}\n\n/// The consensus value type, e.g. a list of transactions.\npub trait ConsensusValueT:\n    Eq + Clone + Debug + Display + Hash + Serialize + DeserializeOwned + Send + DataSize\n{\n    /// Returns whether the consensus value needs validation.\n    fn needs_validation(&self) -> bool;\n}\n\n/// A hash, as an identifier for a block or unit.\npub trait HashT:\n    Eq + Ord + Copy + Clone + DataSize + Debug + Display + Hash + Serialize + DeserializeOwned + Send\n{\n}\nimpl<H> HashT for H where\n    H: Eq\n        + Ord\n        + Copy\n        + Clone\n        + DataSize\n        + Debug\n        + Display\n        + Hash\n        + Serialize\n        + DeserializeOwned\n        + Send\n{\n}\n\n/// A validator's secret signing key.\npub trait ValidatorSecret: Send + DataSize {\n    type Hash: DataSize;\n\n    type Signature: Eq + PartialEq + Clone + Debug + Hash + Serialize + DeserializeOwned + DataSize;\n\n    fn sign(&self, hash: &Self::Hash) -> Self::Signature;\n}\n\n/// The collection of types the user can choose for cryptography, IDs, transactions, etc.\n// TODO: These trait bounds make `#[derive(...)]` work for types with a `C: Context` type\n// parameter. Split this up or replace the derives with explicit implementations.\npub trait Context: Clone + DataSize + Debug + Eq + Ord + Hash + Send {\n    /// The consensus value type, e.g. a list of transactions.\n    type ConsensusValue: ConsensusValueT;\n    /// Unique identifiers for validators.\n    type ValidatorId: ValidatorIdT;\n    /// A validator's secret signing key.\n    type ValidatorSecret: ValidatorSecret<Hash = Self::Hash, Signature = Self::Signature>;\n    /// A signature type.\n    type Signature: Copy\n        + Clone\n        + Debug\n        + Eq\n        + Hash\n        + Serialize\n        + DeserializeOwned\n        + Send\n        + DataSize;\n    /// Unique identifiers for units.\n    type Hash: HashT;\n    /// The ID of a consensus protocol instance.\n    type InstanceId: HashT;\n\n    fn hash(data: &[u8]) -> Self::Hash;\n\n    fn verify_signature(\n        hash: &Self::Hash,\n        public_key: &Self::ValidatorId,\n        signature: &<Self::ValidatorSecret as ValidatorSecret>::Signature,\n    ) -> bool;\n}\n\n/// A marker trait indicating that the given type is a valid consensus message to be sent across the\n/// network.\n///\n/// Only implement this for types that are native to the consensus module and never for `Vec<u8>`,\n/// as this would break accidental double-serialization protection.\npub trait ConsensusNetworkMessage {}\n"
  },
  {
    "path": "node/src/components/consensus/utils/validators.rs",
    "content": "use std::{\n    collections::HashMap,\n    fmt,\n    hash::Hash,\n    iter::FromIterator,\n    ops::{Add, Index, IndexMut},\n    slice, vec,\n};\n\nuse datasize::DataSize;\nuse derive_more::{AsRef, From};\nuse itertools::Itertools;\nuse serde::{Deserialize, Serialize};\nuse tracing::warn;\n\nuse super::Weight;\nuse crate::utils::ds;\n\n/// The index of a validator, in a list of all validators, ordered by ID.\n#[derive(\n    Copy, Clone, DataSize, Debug, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize,\n)]\npub struct ValidatorIndex(pub u32);\n\nimpl From<u32> for ValidatorIndex {\n    fn from(idx: u32) -> Self {\n        ValidatorIndex(idx)\n    }\n}\n\n/// Information about a validator: their ID and weight.\n#[derive(Clone, DataSize, Debug, Eq, PartialEq)]\npub struct Validator<VID> {\n    weight: Weight,\n    id: VID,\n    banned: bool,\n    can_propose: bool,\n}\n\nimpl<VID, W: Into<Weight>> From<(VID, W)> for Validator<VID> {\n    fn from((id, weight): (VID, W)) -> Validator<VID> {\n        Validator {\n            id,\n            weight: weight.into(),\n            banned: false,\n            can_propose: true,\n        }\n    }\n}\n\nimpl<VID> Validator<VID> {\n    /// Returns the validator's ID.\n    pub fn id(&self) -> &VID {\n        &self.id\n    }\n\n    /// Returns the validator's weight.\n    pub fn weight(&self) -> Weight {\n        self.weight\n    }\n}\n\n/// The validator IDs and weight map.\n#[derive(Debug, DataSize, Clone)]\npub struct Validators<VID>\nwhere\n    VID: Eq + Hash,\n{\n    index_by_id: HashMap<VID, ValidatorIndex>,\n    validators: Vec<Validator<VID>>,\n    total_weight: Weight,\n}\n\nimpl<VID: Eq + Hash> Validators<VID> {\n    /// Returns the total weight of the set of validators.\n    pub fn total_weight(&self) -> Weight {\n        self.total_weight\n    }\n\n    /// Returns the weight of the validator with the given index.\n    ///\n    /// *Panics* if the validator index does not exist.\n    pub fn weight(&self, idx: ValidatorIndex) -> Weight {\n        self.validators[idx.0 as usize].weight\n    }\n\n    /// Returns `true` if the map is empty.\n    pub fn is_empty(&self) -> bool {\n        self.validators.is_empty()\n    }\n\n    /// Returns the number of validators.\n    pub fn len(&self) -> usize {\n        self.validators.len()\n    }\n\n    /// Gets the index of a validator with the given ID. Returns `None` if no such validator is in\n    /// the set.\n    pub fn get_index(&self, id: &VID) -> Option<ValidatorIndex> {\n        self.index_by_id.get(id).cloned()\n    }\n\n    /// Returns validator ID by index, or `None` if it doesn't exist.\n    pub fn id(&self, idx: ValidatorIndex) -> Option<&VID> {\n        self.validators.get(idx.0 as usize).map(Validator::id)\n    }\n\n    /// Returns an iterator over all validators, sorted by ID.\n    pub fn iter(&self) -> impl Iterator<Item = &Validator<VID>> {\n        self.validators.iter()\n    }\n\n    /// Marks the validator with that ID as banned, if it exists, and excludes it from the leader\n    /// sequence.\n    pub fn ban(&mut self, vid: &VID) {\n        if let Some(idx) = self.get_index(vid) {\n            self.validators[idx.0 as usize].banned = true;\n            self.validators[idx.0 as usize].can_propose = false;\n        }\n    }\n\n    /// Marks the validator as excluded from the leader sequence.\n    pub fn set_cannot_propose(&mut self, vid: &VID) {\n        if let Some(idx) = self.get_index(vid) {\n            self.validators[idx.0 as usize].can_propose = false;\n        }\n    }\n\n    /// Returns an iterator of all indices of banned validators.\n    pub fn iter_banned_idx(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.iter()\n            .enumerate()\n            .filter(|(_, v)| v.banned)\n            .map(|(idx, _)| ValidatorIndex::from(idx as u32))\n    }\n\n    /// Returns an iterator of all indices of validators that are not allowed to propose values.\n    pub fn iter_cannot_propose_idx(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.iter()\n            .enumerate()\n            .filter(|(_, v)| !v.can_propose)\n            .map(|(idx, _)| ValidatorIndex::from(idx as u32))\n    }\n\n    /// Returns an iterator of pairs (validator index, validator ID).\n    pub fn enumerate_ids<'a>(&'a self) -> impl Iterator<Item = (ValidatorIndex, &'a VID)> {\n        let to_idx =\n            |(idx, v): (usize, &'a Validator<VID>)| (ValidatorIndex::from(idx as u32), v.id());\n        self.iter().enumerate().map(to_idx)\n    }\n\n    pub(crate) fn ensure_nonzero_proposing_stake(&mut self) -> bool {\n        if self.total_weight.is_zero() {\n            return false;\n        }\n        if self.iter().all(|v| v.banned || v.weight.is_zero()) {\n            warn!(\"everyone is banned; admitting banned validators anyway\");\n            for validator in &mut self.validators {\n                validator.can_propose = true;\n                validator.banned = false;\n            }\n        } else if self.iter().all(|v| !v.can_propose || v.weight.is_zero()) {\n            warn!(\"everyone is excluded; allowing proposers who are currently inactive\");\n            for validator in &mut self.validators {\n                if !validator.banned {\n                    validator.can_propose = true;\n                }\n            }\n        }\n        true\n    }\n}\n\nimpl<VID: Ord + Hash + Clone, W: Into<Weight>> FromIterator<(VID, W)> for Validators<VID> {\n    fn from_iter<I: IntoIterator<Item = (VID, W)>>(ii: I) -> Validators<VID> {\n        let mut validators: Vec<_> = ii.into_iter().map(Validator::from).collect();\n        let total_weight = validators.iter().fold(Weight(0), |sum, v| {\n            sum.checked_add(v.weight())\n                .expect(\"total weight must be < 2^64\")\n        });\n        validators.sort_by_cached_key(|val| val.id.clone());\n        let index_by_id = validators\n            .iter()\n            .enumerate()\n            .map(|(idx, val)| (val.id.clone(), ValidatorIndex(idx as u32)))\n            .collect();\n        Validators {\n            index_by_id,\n            validators,\n            total_weight,\n        }\n    }\n}\n\nimpl<VID: Ord + Hash + fmt::Debug> fmt::Display for Validators<VID> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        writeln!(f, \"Validators: index, ID, weight\")?;\n        for (i, val) in self.validators.iter().enumerate() {\n            writeln!(f, \"{:3}, {:?}, {}\", i, val.id(), val.weight().0)?\n        }\n        Ok(())\n    }\n}\n\n/// A map from the set of validators to some values.\n#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, AsRef, From, Hash)]\npub struct ValidatorMap<T>(Vec<T>);\n\nimpl<T> fmt::Display for ValidatorMap<Option<T>>\nwhere\n    T: fmt::Display,\n{\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let view = self\n            .0\n            .iter()\n            .map(|maybe_el| match maybe_el {\n                None => \"N\".to_string(),\n                Some(el) => format!(\"{}\", el),\n            })\n            .join(\", \");\n        write!(f, \"ValidatorMap({})\", view)?;\n        Ok(())\n    }\n}\n\nimpl<T> DataSize for ValidatorMap<T>\nwhere\n    T: DataSize,\n{\n    const IS_DYNAMIC: bool = Vec::<T>::IS_DYNAMIC;\n\n    const STATIC_HEAP_SIZE: usize = Vec::<T>::STATIC_HEAP_SIZE;\n\n    fn estimate_heap_size(&self) -> usize {\n        ds::vec_sample(&self.0)\n    }\n}\n\nimpl<T> ValidatorMap<T> {\n    /// Returns the value for the given validator, or `None` if the index is out of range.\n    pub fn get(&self, idx: ValidatorIndex) -> Option<&T> {\n        self.0.get(idx.0 as usize)\n    }\n\n    /// Returns the number of values. This must equal the number of validators.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if this ValidatorMap is empty.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Returns an iterator over all values.\n    pub fn iter(&self) -> impl Iterator<Item = &T> {\n        self.0.iter()\n    }\n\n    /// Returns an iterator over mutable references to all values.\n    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut T> {\n        self.0.iter_mut()\n    }\n\n    /// Returns an iterator over all values, by validator index.\n    pub fn enumerate(&self) -> impl Iterator<Item = (ValidatorIndex, &T)> {\n        self.iter()\n            .enumerate()\n            .map(|(idx, value)| (ValidatorIndex(idx as u32), value))\n    }\n\n    /// Returns `true` if `self` has an entry for validator number `idx`.\n    pub fn has(&self, idx: ValidatorIndex) -> bool {\n        self.0.len() > idx.0 as usize\n    }\n\n    /// Returns an iterator over all validator indices.\n    pub fn keys(&self) -> impl Iterator<Item = ValidatorIndex> {\n        (0..self.len()).map(|idx| ValidatorIndex(idx as u32))\n    }\n\n    /// Binary searches this sorted `ValidatorMap` for `x`.\n    ///\n    /// Returns the lowest index of an entry `>= x`, or `None` if `x` is greater than all entries.\n    pub fn binary_search(&self, x: &T) -> Option<ValidatorIndex>\n    where\n        T: Ord,\n    {\n        match self.0.binary_search(x) {\n            // The standard library's binary search returns `Ok(i)` if it found `x` at index `i`,\n            // but `i` is not necessarily the lowest such index.\n            Ok(i) => Some(ValidatorIndex(\n                (0..i)\n                    .rev()\n                    .take_while(|j| self.0[*j] == *x)\n                    .last()\n                    .unwrap_or(i) as u32,\n            )),\n            // It returns `Err(i)` if `x` was not found but `i` is the index where `x` would have to\n            // be inserted to keep the list. This is either the lowest index of an entry `>= x`...\n            Err(i) if i < self.len() => Some(ValidatorIndex(i as u32)),\n            // ...or the end of the list if `x` is greater than all entries.\n            Err(_) => None,\n        }\n    }\n}\n\nimpl<T> IntoIterator for ValidatorMap<T> {\n    type Item = T;\n    type IntoIter = vec::IntoIter<T>;\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.into_iter()\n    }\n}\n\nimpl<T> FromIterator<T> for ValidatorMap<T> {\n    fn from_iter<I: IntoIterator<Item = T>>(ii: I) -> ValidatorMap<T> {\n        ValidatorMap(ii.into_iter().collect())\n    }\n}\n\nimpl<T> Index<ValidatorIndex> for ValidatorMap<T> {\n    type Output = T;\n\n    fn index(&self, vidx: ValidatorIndex) -> &T {\n        &self.0[vidx.0 as usize]\n    }\n}\n\nimpl<T> IndexMut<ValidatorIndex> for ValidatorMap<T> {\n    fn index_mut(&mut self, vidx: ValidatorIndex) -> &mut T {\n        &mut self.0[vidx.0 as usize]\n    }\n}\n\nimpl<'a, T> IntoIterator for &'a ValidatorMap<T> {\n    type Item = &'a T;\n    type IntoIter = slice::Iter<'a, T>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.iter()\n    }\n}\n\nimpl<Rhs, T: Copy + Add<Rhs, Output = T>> Add<ValidatorMap<Rhs>> for ValidatorMap<T> {\n    type Output = ValidatorMap<T>;\n    fn add(mut self, rhs: ValidatorMap<Rhs>) -> Self::Output {\n        #[allow(clippy::arithmetic_side_effects)]\n        self.0\n            .iter_mut()\n            .zip(rhs)\n            .for_each(|(lhs_val, rhs_val)| *lhs_val = *lhs_val + rhs_val);\n        self\n    }\n}\n\nimpl<T> ValidatorMap<Option<T>> {\n    /// Returns the keys of all validators whose value is `Some`.\n    pub fn keys_some(&self) -> impl Iterator<Item = ValidatorIndex> + '_ {\n        self.iter_some().map(|(vidx, _)| vidx)\n    }\n\n    /// Returns an iterator over all values that are present, together with their index.\n    pub fn iter_some(&self) -> impl Iterator<Item = (ValidatorIndex, &T)> + '_ {\n        self.enumerate()\n            .filter_map(|(vidx, opt)| opt.as_ref().map(|val| (vidx, val)))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn from_iter() {\n        let weights = vec![\n            (\"Bob\".to_string(), 5u64),\n            (\"Carol\".to_string(), 3),\n            (\"Alice\".to_string(), 4),\n        ];\n        let validators = Validators::from_iter(weights);\n        assert_eq!(ValidatorIndex(0), validators.index_by_id[\"Alice\"]);\n        assert_eq!(ValidatorIndex(1), validators.index_by_id[\"Bob\"]);\n        assert_eq!(ValidatorIndex(2), validators.index_by_id[\"Carol\"]);\n    }\n\n    #[test]\n    fn binary_search() {\n        let list = ValidatorMap::from(vec![2, 3, 5, 5, 5, 5, 5, 9]);\n        // Searching for 5 returns the first index, even if the standard library doesn't.\n        assert!(\n            list.0.binary_search(&5).expect(\"5 is in the list\") > 2,\n            \"test case where the std's search would return a higher index\"\n        );\n        assert_eq!(Some(ValidatorIndex(2)), list.binary_search(&5));\n        // Searching for 4 also returns 2, since that is the first index of a value >= 4.\n        assert_eq!(Some(ValidatorIndex(2)), list.binary_search(&4));\n        // 3 is found again, at index 1.\n        assert_eq!(Some(ValidatorIndex(1)), list.binary_search(&3));\n        // 10 is bigger than all entries.\n        assert_eq!(None, list.binary_search(&10));\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/utils/wal.rs",
    "content": "use std::{\n    fs::{File, OpenOptions},\n    io::{self, BufReader, BufWriter, Read, Seek, Write},\n    marker::PhantomData,\n    path::PathBuf,\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tracing::warn;\n\npub(crate) trait WalEntry: Serialize + for<'de> Deserialize<'de> {}\n\n/// A Write-Ahead Log to store every message on disk when we add it to the protocol state.\n#[derive(Debug)]\npub(crate) struct WriteWal<E: WalEntry> {\n    writer: BufWriter<File>,\n    phantom_context: PhantomData<E>,\n}\n\nimpl<E: WalEntry> DataSize for WriteWal<E> {\n    const IS_DYNAMIC: bool = true;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    fn estimate_heap_size(&self) -> usize {\n        self.writer.capacity()\n    }\n}\n\n#[derive(Error, Debug)]\npub(crate) enum WriteWalError {\n    #[error(\"Could not get serialized message size: {0}\")]\n    CouldntGetSerializedSize(bincode::Error),\n    #[error(\"Could not serialize size: {0}\")]\n    CouldntSerializeSizeIntoWriter(io::Error),\n    #[error(\"Could not serialize message: {0}\")]\n    CouldntSerializeMessageIntoWriter(bincode::Error),\n    #[error(\"Could not flush message to disk: {0}\")]\n    CouldntFlushMessageToDisk(io::Error),\n    #[error(\"Could not open file: {0}\")]\n    FileCouldntBeOpened(io::Error),\n}\n\nimpl<E: WalEntry> WriteWal<E> {\n    pub(crate) fn new(wal_path: &PathBuf) -> Result<Self, WriteWalError> {\n        let file = OpenOptions::new()\n            .append(true)\n            .create(true)\n            .open(wal_path)\n            .map_err(WriteWalError::FileCouldntBeOpened)?;\n        Ok(WriteWal {\n            writer: BufWriter::new(file),\n            phantom_context: PhantomData,\n        })\n    }\n\n    pub(crate) fn record_entry(&mut self, entry: &E) -> Result<(), WriteWalError> {\n        // First write the size of the entry as a serialized u64.\n        let entry_size =\n            bincode::serialized_size(entry).map_err(WriteWalError::CouldntGetSerializedSize)?;\n        self.writer\n            .write_all(&entry_size.to_le_bytes())\n            .map_err(WriteWalError::CouldntSerializeSizeIntoWriter)?;\n        // Write the serialized entry itself.\n        bincode::serialize_into(&mut self.writer, entry)\n            .map_err(WriteWalError::CouldntSerializeMessageIntoWriter)?;\n        self.writer\n            .flush()\n            .map_err(WriteWalError::CouldntFlushMessageToDisk)?;\n        Ok(())\n    }\n}\n\n/// A buffer to read a Write-Ahead Log from disk and deserialize its messages.\n#[derive(Debug)]\npub(crate) struct ReadWal<E: WalEntry> {\n    pub(crate) reader: BufReader<File>,\n    pub(crate) phantom_context: PhantomData<E>,\n}\n\n#[derive(Error, Debug)]\npub(crate) enum ReadWalError {\n    #[error(\"Could not create file at {0}: {1}\")]\n    FileCouldntBeCreated(PathBuf, io::Error),\n    #[error(transparent)]\n    OtherIOError(#[from] io::Error),\n    #[error(\"could not deserialize WAL entry: {0}\")]\n    CouldNotDeserialize(bincode::Error),\n}\n\nimpl<E: WalEntry> ReadWal<E> {\n    pub(crate) fn new(wal_path: &PathBuf) -> Result<Self, ReadWalError> {\n        let file = OpenOptions::new()\n            .create(true)\n            .truncate(false)\n            .read(true)\n            .write(true)\n            .open(wal_path)\n            .map_err(|err| ReadWalError::FileCouldntBeCreated(wal_path.clone(), err))?;\n        Ok(ReadWal {\n            reader: BufReader::new(file),\n            phantom_context: PhantomData,\n        })\n    }\n}\n\nimpl<E: WalEntry> ReadWal<E> {\n    /// Reads the next entry from the WAL, or returns an error.\n    /// If there are 0 bytes left it returns `Ok(None)`.\n    pub(crate) fn read_next_entry(&mut self) -> Result<Option<E>, ReadWalError> {\n        // Remember the current position: If we encounter an unreadable entry we trim the file at\n        // this point so we can continue appending entries after it.\n        let position = self.reader.stream_position()?;\n\n        // Deserialize the size of the entry, in bytes, as a u64.\n        let mut entry_size_buf = [0u8; size_of::<u64>()];\n        if let Err(err) = self.reader.read_exact(&mut entry_size_buf) {\n            if err.kind() == io::ErrorKind::UnexpectedEof {\n                self.trim_file(position)?;\n                return Ok(None);\n            }\n            return Err(ReadWalError::OtherIOError(err));\n        }\n        let entry_size = u64::from_le_bytes(entry_size_buf) as usize;\n\n        // Read the serialized entry itself.\n        let mut entry_buf = vec![0; entry_size];\n        if let Err(err) = self.reader.read_exact(&mut entry_buf) {\n            if err.kind() == io::ErrorKind::UnexpectedEof {\n                self.trim_file(position)?;\n                return Ok(None);\n            }\n            return Err(ReadWalError::OtherIOError(err));\n        }\n\n        // Deserialize and return the entry.\n        let entry = bincode::deserialize(&entry_buf).map_err(ReadWalError::CouldNotDeserialize)?;\n        Ok(Some(entry))\n    }\n\n    /// Trims the file to the given length and logs a warning if any bytes were removed.\n    ///\n    /// This should be called with the position where the last complete entry ended. Incomplete\n    /// entries can safely be removed because we only send messages after writing them and\n    /// flushing the buffer, so we won't remove any messages that we already sent.\n    fn trim_file(&mut self, position: u64) -> Result<(), ReadWalError> {\n        if self.reader.stream_position()? > position {\n            warn!(\"removing incomplete entry from WAL\");\n            self.reader.get_mut().set_len(position)?;\n        }\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::iter::from_fn;\n\n    use casper_types::Timestamp;\n    use serde::{Deserialize, Serialize};\n    use tempfile::tempdir;\n\n    use super::*;\n\n    #[derive(Serialize, Deserialize, Debug, PartialEq)]\n    enum TestWalEntry {\n        Variant1(u32),\n        Variant2(Timestamp),\n    }\n\n    impl WalEntry for TestWalEntry {}\n\n    #[test]\n    // Tests the functionality of the ReadWal and WriteWal by constructing one and manipulating it.\n    fn test_read_write_wal() {\n        // Create a bunch of test entries\n        let mut entries = vec![\n            TestWalEntry::Variant1(0),\n            TestWalEntry::Variant1(1),\n            TestWalEntry::Variant1(2),\n            TestWalEntry::Variant2(Timestamp::zero()),\n        ];\n\n        // Create a temporary directory which will be removed upon dropping the dir variable,\n        // using it to store the WAL file.\n        let dir = tempdir().unwrap();\n        let path = dir.path().join(\"wal\");\n\n        let read_entries = || {\n            let mut read_wal: ReadWal<TestWalEntry> = ReadWal::new(&path).unwrap();\n            from_fn(move || read_wal.read_next_entry().unwrap()).collect::<Vec<_>>()\n        };\n\n        assert_eq!(read_entries(), vec![]);\n\n        // Record all of the test entries into the WAL file\n        let mut write_wal: WriteWal<TestWalEntry> = WriteWal::new(&path).unwrap();\n\n        entries.iter().for_each(move |entry| {\n            write_wal.record_entry(entry).unwrap();\n        });\n\n        // Assure that the entries were properly written\n        assert_eq!(entries, read_entries());\n\n        // Now, we go through and corrupt each entry and ensure that its actually removed by the\n        // ReadWal when we fail to read it.\n        loop {\n            // If there are no more entries, we're done\n            if entries.is_empty() {\n                break;\n            }\n\n            // We create a File in order to drop the last byte from the file\n            let mut file = OpenOptions::new()\n                .append(true)\n                .create(true)\n                .open(&path)\n                .unwrap();\n\n            file.seek(io::SeekFrom::End(-1)).unwrap();\n            let position = file.stream_position().unwrap();\n            file.set_len(position).unwrap();\n\n            // We pop the entry off from our in-memory list of entries, then check if that equals\n            // the on-disk WAL\n            entries.pop().unwrap();\n\n            assert_eq!(entries, read_entries());\n        }\n\n        // Finally, we assure that there are no more entries at all in the WAL\n        assert_eq!(entries, read_entries());\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/utils/weight.rs",
    "content": "use std::{\n    iter::Sum,\n    ops::{Div, Mul},\n};\n\nuse datasize::DataSize;\nuse derive_more::{Add, AddAssign, From, Sub, SubAssign, Sum};\nuse serde::{Deserialize, Serialize};\n\n/// A vote weight.\n#[derive(\n    Copy,\n    Clone,\n    DataSize,\n    Default,\n    Debug,\n    PartialEq,\n    Eq,\n    PartialOrd,\n    Ord,\n    Add,\n    Serialize,\n    Deserialize,\n    Sub,\n    AddAssign,\n    SubAssign,\n    Sum,\n    From,\n)]\npub struct Weight(pub u64);\n\nimpl Weight {\n    /// Checked addition. Returns `None` if overflow occurred.\n    pub fn checked_add(self, rhs: Weight) -> Option<Weight> {\n        Some(Weight(self.0.checked_add(rhs.0)?))\n    }\n\n    /// Saturating addition. Returns `Weight(u64::MAX)` if overflow would occur.\n    #[allow(dead_code)]\n    pub fn saturating_add(self, rhs: Weight) -> Weight {\n        Weight(self.0.saturating_add(rhs.0))\n    }\n\n    /// Saturating subtraction. Returns `Weight(0)` if underflow would occur.\n    pub fn saturating_sub(self, rhs: Weight) -> Weight {\n        Weight(self.0.saturating_sub(rhs.0))\n    }\n\n    /// Returns `true` if this weight is zero.\n    pub fn is_zero(self) -> bool {\n        self.0 == 0\n    }\n}\n\nimpl<'a> Sum<&'a Weight> for Weight {\n    fn sum<I: Iterator<Item = &'a Weight>>(iter: I) -> Self {\n        Weight(iter.map(|w| w.0).sum())\n    }\n}\n\nimpl Mul<u64> for Weight {\n    type Output = Self;\n\n    #[allow(clippy::arithmetic_side_effects)] // The caller needs to prevent overflows.\n    fn mul(self, rhs: u64) -> Self {\n        Weight(self.0 * rhs)\n    }\n}\n\nimpl Div<u64> for Weight {\n    type Output = Self;\n\n    #[allow(clippy::arithmetic_side_effects)] // The caller needs to avoid dividing by zero.\n    fn div(self, rhs: u64) -> Self {\n        Weight(self.0 / rhs)\n    }\n}\n\nimpl From<Weight> for u128 {\n    fn from(Weight(w): Weight) -> u128 {\n        u128::from(w)\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus/utils.rs",
    "content": "//! Various utilities relevant to consensus.\n\nmod validators;\npub(crate) mod wal;\nmod weight;\n\npub use validators::{Validator, ValidatorIndex, ValidatorMap, Validators};\npub use weight::Weight;\n"
  },
  {
    "path": "node/src/components/consensus/validator_change.rs",
    "content": "use std::collections::HashSet;\n\nuse casper_types::{PublicKey, ValidatorChange};\n\nuse super::era_supervisor::Era;\n\npub(super) struct ValidatorChanges(pub(super) Vec<(PublicKey, ValidatorChange)>);\n\nimpl ValidatorChanges {\n    pub(super) fn new(era0: &Era, era1: &Era) -> Self {\n        let era0_metadata = EraMetadata::from(era0);\n        let era1_metadata = EraMetadata::from(era1);\n        Self::new_from_metadata(era0_metadata, era1_metadata)\n    }\n\n    fn new_from_metadata(era0_metadata: EraMetadata, era1_metadata: EraMetadata) -> Self {\n        // Validators in `era0` but not `era1` are labeled `Removed`.\n        let removed_iter = era0_metadata\n            .validators\n            .difference(&era1_metadata.validators)\n            .map(|&public_key| (public_key.clone(), ValidatorChange::Removed));\n\n        // Validators in `era1` but not `era0` are labeled `Added`.\n        let added_iter = era1_metadata\n            .validators\n            .difference(&era0_metadata.validators)\n            .map(|&public_key| (public_key.clone(), ValidatorChange::Added));\n\n        // Only those seen as faulty in `era1` are labeled `SeenAsFaulty`.\n        let faulty_iter = era1_metadata\n            .seen_as_faulty\n            .iter()\n            .map(|&public_key| (public_key.clone(), ValidatorChange::SeenAsFaulty));\n\n        // Faulty peers in `era1` but not `era0` which are also validators in `era1` are labeled\n        // `Banned`.\n        let banned_iter = era1_metadata\n            .faulty\n            .difference(era0_metadata.faulty)\n            .filter_map(|public_key| {\n                if era1_metadata.validators.contains(public_key) {\n                    Some((public_key.clone(), ValidatorChange::Banned))\n                } else {\n                    None\n                }\n            });\n\n        // Peers which cannot propose in `era1` but can in `era0` and which are also validators in\n        // `era1` are labeled `CannotPropose`.\n        let cannot_propose_iter = era1_metadata\n            .cannot_propose\n            .difference(era0_metadata.cannot_propose)\n            .filter_map(|public_key| {\n                if era1_metadata.validators.contains(public_key) {\n                    Some((public_key.clone(), ValidatorChange::CannotPropose))\n                } else {\n                    None\n                }\n            });\n\n        ValidatorChanges(\n            removed_iter\n                .chain(faulty_iter)\n                .chain(added_iter)\n                .chain(banned_iter)\n                .chain(cannot_propose_iter)\n                .collect(),\n        )\n    }\n}\n\n#[derive(Clone)]\nstruct EraMetadata<'a> {\n    validators: HashSet<&'a PublicKey>,\n    seen_as_faulty: Vec<&'a PublicKey>,\n    faulty: &'a HashSet<PublicKey>,\n    cannot_propose: &'a HashSet<PublicKey>,\n}\n\nimpl<'a> From<&'a Era> for EraMetadata<'a> {\n    fn from(era: &'a Era) -> Self {\n        let seen_as_faulty = era\n            .consensus\n            .validators_with_evidence()\n            .into_iter()\n            .collect();\n\n        let validators = era.validators().keys().collect();\n        let faulty = &era.faulty;\n        let cannot_propose = &era.cannot_propose;\n        Self {\n            validators,\n            seen_as_faulty,\n            faulty,\n            cannot_propose,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::iter;\n\n    use casper_types::testing::TestRng;\n\n    use super::*;\n\n    fn preset_validators(rng: &mut TestRng) -> HashSet<PublicKey> {\n        iter::repeat_with(|| PublicKey::random(rng))\n            .take(5)\n            .collect()\n    }\n\n    #[test]\n    fn should_report_added() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let mut era1_metadata = era0_metadata.clone();\n        let added_validator = PublicKey::random(&mut rng);\n        let expected_change = vec![(added_validator.clone(), ValidatorChange::Added)];\n        era1_metadata.validators.insert(&added_validator);\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert_eq!(expected_change, actual_change.0);\n    }\n\n    #[test]\n    fn should_report_removed() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let era1_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let mut era0_metadata = era1_metadata.clone();\n        let removed_validator = PublicKey::random(&mut rng);\n        let expected_change = vec![(removed_validator.clone(), ValidatorChange::Removed)];\n        era0_metadata.validators.insert(&removed_validator);\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert_eq!(expected_change, actual_change.0)\n    }\n\n    #[test]\n    fn should_report_seen_as_faulty_in_new_era() {\n        let mut rng = crate::new_rng();\n\n        let seen_as_faulty_in_old_era = PublicKey::random(&mut rng);\n        let era0_metadata = EraMetadata {\n            validators: Default::default(),\n            seen_as_faulty: vec![&seen_as_faulty_in_old_era],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n        let seen_as_faulty_in_new_era = PublicKey::random(&mut rng);\n        let era1_metadata = EraMetadata {\n            validators: Default::default(),\n            seen_as_faulty: vec![&seen_as_faulty_in_new_era],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        let expected_change = vec![(seen_as_faulty_in_new_era, ValidatorChange::SeenAsFaulty)];\n        assert_eq!(expected_change, actual_change.0)\n    }\n\n    #[test]\n    fn should_report_banned() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let faulty = validators.iter().next().unwrap();\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let mut era1_metadata = era0_metadata.clone();\n        let faulty_set = iter::once(faulty.clone()).collect();\n        era1_metadata.faulty = &faulty_set;\n\n        let expected_change = vec![(faulty.clone(), ValidatorChange::Banned)];\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert_eq!(expected_change, actual_change.0)\n    }\n\n    #[test]\n    fn should_not_report_banned_if_in_both_eras() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let faulty = validators.iter().next().unwrap();\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &iter::once(faulty.clone()).collect(),\n            cannot_propose: &Default::default(),\n        };\n        let era1_metadata = era0_metadata.clone();\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert!(actual_change.0.is_empty());\n    }\n\n    #[test]\n    fn should_not_report_banned_if_not_a_validator_in_new_era() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let faulty = PublicKey::random(&mut rng);\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let mut era1_metadata = era0_metadata.clone();\n        let faulty_set = iter::once(faulty).collect();\n        era1_metadata.faulty = &faulty_set;\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert!(actual_change.0.is_empty());\n    }\n\n    #[test]\n    fn should_report_cannot_propose() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let cannot_propose = validators.iter().next().unwrap();\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let mut era1_metadata = era0_metadata.clone();\n        let cannot_propose_set = iter::once(cannot_propose.clone()).collect();\n        era1_metadata.cannot_propose = &cannot_propose_set;\n\n        let expected_change = vec![(cannot_propose.clone(), ValidatorChange::CannotPropose)];\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert_eq!(expected_change, actual_change.0)\n    }\n\n    #[test]\n    fn should_not_report_cannot_propose_if_in_both_eras() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let cannot_propose = validators.iter().next().unwrap();\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &iter::once(cannot_propose.clone()).collect(),\n        };\n        let era1_metadata = era0_metadata.clone();\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert!(actual_change.0.is_empty());\n    }\n\n    #[test]\n    fn should_not_report_cannot_propose_if_not_a_validator_in_new_era() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let cannot_propose = PublicKey::random(&mut rng);\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &Default::default(),\n            cannot_propose: &Default::default(),\n        };\n\n        let mut era1_metadata = era0_metadata.clone();\n        let cannot_propose_set = iter::once(cannot_propose).collect();\n        era1_metadata.cannot_propose = &cannot_propose_set;\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert!(actual_change.0.is_empty());\n    }\n\n    #[test]\n    fn should_report_no_status_change() {\n        let mut rng = crate::new_rng();\n        let validators = preset_validators(&mut rng);\n\n        let era0_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: validators.iter().collect(),\n            faulty: &validators,\n            cannot_propose: &validators,\n        };\n        let era1_metadata = EraMetadata {\n            validators: validators.iter().collect(),\n            seen_as_faulty: vec![],\n            faulty: &validators,\n            cannot_propose: &validators,\n        };\n\n        let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata);\n        assert!(actual_change.0.is_empty());\n    }\n}\n"
  },
  {
    "path": "node/src/components/consensus.rs",
    "content": "//! The consensus component. Provides distributed consensus among the nodes in the network.\n\n#![warn(clippy::arithmetic_side_effects)]\n\nmod cl_context;\nmod config;\nmod consensus_protocol;\nmod era_supervisor;\n#[macro_use]\npub mod highway_core;\npub(crate) mod error;\nmod leader_sequence;\nmod metrics;\npub mod protocols;\n#[cfg(test)]\npub(crate) mod tests;\nmod traits;\npub mod utils;\nmod validator_change;\n\nuse std::{\n    borrow::Cow,\n    fmt::{self, Debug, Display, Formatter},\n    sync::Arc,\n    time::Duration,\n};\n\nuse datasize::DataSize;\nuse derive_more::From;\nuse serde::{Deserialize, Serialize};\nuse tracing::{info, trace};\n\nuse casper_types::{BlockHash, BlockHeader, EraId, Timestamp};\n\nuse crate::{\n    components::Component,\n    effect::{\n        announcements::{\n            ConsensusAnnouncement, FatalAnnouncement, MetaBlockAnnouncement,\n            PeerBehaviorAnnouncement,\n        },\n        diagnostics_port::DumpConsensusStateRequest,\n        incoming::{ConsensusDemand, ConsensusMessageIncoming},\n        requests::{\n            BlockValidationRequest, ChainspecRawBytesRequest, ConsensusRequest,\n            ContractRuntimeRequest, NetworkInfoRequest, NetworkRequest, StorageRequest,\n            TransactionBufferRequest,\n        },\n        EffectBuilder, EffectExt, Effects,\n    },\n    failpoints::FailpointActivation,\n    protocol::Message,\n    reactor::ReactorEvent,\n    types::{BlockPayload, InvalidProposalError, NodeId},\n    NodeRng,\n};\nuse protocols::{highway::HighwayProtocol, zug::Zug};\nuse traits::Context;\n\npub use cl_context::ClContext;\npub(crate) use config::{ChainspecConsensusExt, Config};\npub(crate) use consensus_protocol::{BlockContext, ProposedBlock};\npub(crate) use era_supervisor::{debug::EraDump, EraSupervisor, SerializedMessage};\n#[cfg(test)]\npub(crate) use highway_core::highway::Vertex as HighwayVertex;\npub(crate) use leader_sequence::LeaderSequence;\npub(crate) use protocols::highway::max_rounds_per_era;\n#[cfg(test)]\npub(crate) use protocols::highway::HighwayMessage;\n\nconst COMPONENT_NAME: &str = \"consensus\";\n\n#[allow(clippy::arithmetic_side_effects)]\nmod relaxed {\n    // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the\n    // module-wide `clippy::arithmetic_side_effects` lint.\n\n    use casper_types::{EraId, PublicKey};\n    use datasize::DataSize;\n    use serde::{Deserialize, Serialize};\n    use strum::EnumDiscriminants;\n\n    use super::era_supervisor::SerializedMessage;\n\n    #[derive(DataSize, Clone, Serialize, Deserialize, EnumDiscriminants)]\n    #[strum_discriminants(derive(strum::EnumIter))]\n    pub(crate) enum ConsensusMessage {\n        /// A protocol message, to be handled by the instance in the specified era.\n        Protocol {\n            era_id: EraId,\n            payload: SerializedMessage,\n        },\n        /// A request for evidence against the specified validator, from any era that is still\n        /// bonded in `era_id`.\n        EvidenceRequest { era_id: EraId, pub_key: PublicKey },\n    }\n}\npub(crate) use relaxed::{ConsensusMessage, ConsensusMessageDiscriminants};\n\n/// A request to be handled by the consensus protocol instance in a particular era.\n#[derive(DataSize, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, From)]\npub(crate) enum EraRequest<C>\nwhere\n    C: Context,\n{\n    Zug(protocols::zug::SyncRequest<C>),\n}\n\n/// A protocol request message, to be handled by the instance in the specified era.\n#[derive(DataSize, Clone, Serialize, Deserialize)]\npub(crate) struct ConsensusRequestMessage {\n    era_id: EraId,\n    payload: SerializedMessage,\n}\n\n/// An ID to distinguish different timers. What they are used for is specific to each consensus\n/// protocol implementation.\n#[derive(DataSize, Clone, Copy, Debug, Eq, PartialEq, Hash)]\npub struct TimerId(pub u8);\n\n/// An ID to distinguish queued actions. What they are used for is specific to each consensus\n/// protocol implementation.\n#[derive(DataSize, Clone, Copy, Debug, Eq, PartialEq, Hash)]\npub struct ActionId(pub u8);\n\n/// Payload for a block to be proposed.\n#[derive(DataSize, Debug, From)]\npub struct NewBlockPayload {\n    pub(crate) era_id: EraId,\n    pub(crate) block_payload: Arc<BlockPayload>,\n    pub(crate) block_context: BlockContext<ClContext>,\n}\n\n/// The result of validation of a ProposedBlock.\n#[derive(DataSize, Debug, From)]\npub struct ResolveValidity {\n    era_id: EraId,\n    sender: NodeId,\n    proposed_block: ProposedBlock<ClContext>,\n    maybe_error: Option<Box<InvalidProposalError>>,\n}\n\n/// Consensus component event.\n#[derive(DataSize, Debug, From)]\npub(crate) enum Event {\n    /// An incoming network message.\n    #[from]\n    Incoming(ConsensusMessageIncoming),\n    /// A variant used with failpoints - when a message arrives, we fire this event with a delay,\n    /// and it also causes the message to be handled.\n    DelayedIncoming(ConsensusMessageIncoming),\n    /// An incoming demand message.\n    #[from]\n    DemandIncoming(ConsensusDemand),\n    /// A scheduled event to be handled by a specified era.\n    Timer {\n        era_id: EraId,\n        timestamp: Timestamp,\n        timer_id: TimerId,\n    },\n    /// A queued action to be handled by a specific era.\n    Action { era_id: EraId, action_id: ActionId },\n    /// We are receiving the data we require to propose a new block.\n    NewBlockPayload(NewBlockPayload),\n    #[from]\n    ConsensusRequest(ConsensusRequest),\n    /// A new block has been added to the linear chain.\n    BlockAdded {\n        header: Box<BlockHeader>,\n        header_hash: BlockHash,\n    },\n    /// The proposed block has been validated.\n    ResolveValidity(ResolveValidity),\n    /// Deactivate the era with the given ID, unless the number of faulty validators increases.\n    DeactivateEra {\n        era_id: EraId,\n        faulty_num: usize,\n        delay: Duration,\n    },\n    /// Dump state for debugging purposes.\n    #[from]\n    DumpState(DumpConsensusStateRequest),\n}\n\nimpl Debug for ConsensusMessage {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ConsensusMessage::Protocol { era_id, payload: _ } => {\n                write!(f, \"Protocol {{ era_id: {:?}, .. }}\", era_id)\n            }\n            ConsensusMessage::EvidenceRequest { era_id, pub_key } => f\n                .debug_struct(\"EvidenceRequest\")\n                .field(\"era_id\", era_id)\n                .field(\"pub_key\", pub_key)\n                .finish(),\n        }\n    }\n}\n\nimpl Display for ConsensusMessage {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ConsensusMessage::Protocol { era_id, payload } => {\n                write!(\n                    f,\n                    \"protocol message ({} bytes) in {}\",\n                    payload.as_raw().len(),\n                    era_id\n                )\n            }\n            ConsensusMessage::EvidenceRequest { era_id, pub_key } => write!(\n                f,\n                \"request for evidence of fault by {} in {} or earlier\",\n                pub_key, era_id,\n            ),\n        }\n    }\n}\n\nimpl Debug for ConsensusRequestMessage {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"ConsensusRequestMessage {{ era_id: {:?}, .. }}\",\n            self.era_id\n        )\n    }\n}\n\nimpl Display for ConsensusRequestMessage {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"protocol request {:?} in {}\", self.payload, self.era_id)\n    }\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Incoming(ConsensusMessageIncoming { sender, message }) => {\n                write!(f, \"message from {:?}: {}\", sender, message)\n            }\n            Event::DelayedIncoming(ConsensusMessageIncoming { sender, message }) => {\n                write!(f, \"delayed message from {:?}: {}\", sender, message)\n            }\n            Event::DemandIncoming(demand) => {\n                write!(f, \"demand from {:?}: {}\", demand.sender, demand.request_msg)\n            }\n            Event::Timer {\n                era_id,\n                timestamp,\n                timer_id,\n            } => write!(\n                f,\n                \"timer (ID {}) for {} scheduled for timestamp {}\",\n                timer_id.0, era_id, timestamp,\n            ),\n            Event::Action { era_id, action_id } => {\n                write!(f, \"action (ID {}) for {}\", action_id.0, era_id)\n            }\n            Event::NewBlockPayload(NewBlockPayload {\n                era_id,\n                block_payload,\n                block_context,\n            }) => write!(\n                f,\n                \"New proposed block for era {:?}: {:?}, {:?}\",\n                era_id, block_payload, block_context\n            ),\n            Event::ConsensusRequest(request) => write!(\n                f,\n                \"A request for consensus component hash been received: {:?}\",\n                request\n            ),\n            Event::BlockAdded {\n                header: _,\n                header_hash,\n            } => write!(\n                f,\n                \"A block has been added to the linear chain: {}\",\n                header_hash,\n            ),\n            Event::ResolveValidity(ResolveValidity {\n                era_id,\n                sender,\n                proposed_block,\n                maybe_error,\n            }) => write!(\n                f,\n                \"Proposed block received from {:?} for {} is {}: {:?}\",\n                sender,\n                era_id,\n                if maybe_error.is_none() {\n                    \"valid\".to_string()\n                } else {\n                    format!(\"invalid ({:?})\", maybe_error).to_string()\n                },\n                proposed_block,\n            ),\n            Event::DeactivateEra {\n                era_id, faulty_num, ..\n            } => write!(\n                f,\n                \"Deactivate old {} unless additional faults are observed; faults so far: {}\",\n                era_id, faulty_num\n            ),\n            Event::DumpState(req) => Display::fmt(req, f),\n        }\n    }\n}\n\n/// A helper trait whose bounds represent the requirements for a reactor event that `EraSupervisor`\n/// can work with.\npub(crate) trait ReactorEventT:\n    ReactorEvent\n    + From<Event>\n    + Send\n    + From<NetworkRequest<Message>>\n    + From<ConsensusDemand>\n    + From<NetworkInfoRequest>\n    + From<TransactionBufferRequest>\n    + From<ConsensusAnnouncement>\n    + From<BlockValidationRequest>\n    + From<StorageRequest>\n    + From<ContractRuntimeRequest>\n    + From<ChainspecRawBytesRequest>\n    + From<PeerBehaviorAnnouncement>\n    + From<MetaBlockAnnouncement>\n    + From<FatalAnnouncement>\n{\n}\n\nimpl<REv> ReactorEventT for REv where\n    REv: ReactorEvent\n        + From<Event>\n        + Send\n        + From<ConsensusDemand>\n        + From<NetworkRequest<Message>>\n        + From<NetworkInfoRequest>\n        + From<TransactionBufferRequest>\n        + From<ConsensusAnnouncement>\n        + From<BlockValidationRequest>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<ChainspecRawBytesRequest>\n        + From<PeerBehaviorAnnouncement>\n        + From<MetaBlockAnnouncement>\n        + From<FatalAnnouncement>\n{\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator};\n\n    use super::{\n        protocols::{highway, zug},\n        ClContext, ConsensusMessage, ConsensusMessageDiscriminants, ConsensusRequestMessage,\n        EraRequest, SerializedMessage,\n    };\n\n    impl LargestSpecimen for ConsensusMessage {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, ConsensusMessageDiscriminants, _, _>(estimator, |variant| {\n                match variant {\n                    ConsensusMessageDiscriminants::Protocol => {\n                        let zug_payload = SerializedMessage::from_message(\n                            &zug::Message::<ClContext>::largest_specimen(estimator, cache),\n                        );\n                        let highway_payload = SerializedMessage::from_message(\n                            &highway::HighwayMessage::<ClContext>::largest_specimen(\n                                estimator, cache,\n                            ),\n                        );\n\n                        let payload = if zug_payload.as_raw().len() > highway_payload.as_raw().len()\n                        {\n                            zug_payload\n                        } else {\n                            highway_payload\n                        };\n\n                        ConsensusMessage::Protocol {\n                            era_id: LargestSpecimen::largest_specimen(estimator, cache),\n                            payload,\n                        }\n                    }\n                    ConsensusMessageDiscriminants::EvidenceRequest => {\n                        ConsensusMessage::EvidenceRequest {\n                            era_id: LargestSpecimen::largest_specimen(estimator, cache),\n                            pub_key: LargestSpecimen::largest_specimen(estimator, cache),\n                        }\n                    }\n                }\n            })\n        }\n    }\n\n    impl LargestSpecimen for ConsensusRequestMessage {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            let zug_sync_request = SerializedMessage::from_message(\n                &zug::SyncRequest::<ClContext>::largest_specimen(estimator, cache),\n            );\n\n            ConsensusRequestMessage {\n                era_id: LargestSpecimen::largest_specimen(estimator, cache),\n                payload: zug_sync_request,\n            }\n        }\n    }\n\n    impl LargestSpecimen for EraRequest<ClContext> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            EraRequest::Zug(LargestSpecimen::largest_specimen(estimator, cache))\n        }\n    }\n}\n\nimpl<REv> Component<REv> for EraSupervisor\nwhere\n    REv: ReactorEventT,\n{\n    type Event = Event;\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n\n    fn activate_failpoint(&mut self, activation: &FailpointActivation) {\n        self.message_delay_failpoint.update_from(activation);\n        self.proposal_delay_failpoint.update_from(activation);\n    }\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        trace!(\"{:?}\", event);\n        match event {\n            Event::Timer {\n                era_id,\n                timestamp,\n                timer_id,\n            } => self.handle_timer(effect_builder, rng, era_id, timestamp, timer_id),\n            Event::Action { era_id, action_id } => {\n                self.handle_action(effect_builder, rng, era_id, action_id)\n            }\n            Event::Incoming(ConsensusMessageIncoming { sender, message }) => {\n                let delay_by = self.message_delay_failpoint.fire(rng).cloned();\n                if let Some(delay) = delay_by {\n                    effect_builder\n                        .set_timeout(Duration::from_millis(delay))\n                        .event(move |_| {\n                            Event::DelayedIncoming(ConsensusMessageIncoming { sender, message })\n                        })\n                } else {\n                    self.handle_message(effect_builder, rng, sender, *message)\n                }\n            }\n            Event::DelayedIncoming(ConsensusMessageIncoming { sender, message }) => {\n                self.handle_message(effect_builder, rng, sender, *message)\n            }\n            Event::DemandIncoming(ConsensusDemand {\n                sender,\n                request_msg: demand,\n                auto_closing_responder,\n            }) => self.handle_demand(effect_builder, rng, sender, demand, auto_closing_responder),\n            Event::NewBlockPayload(new_block_payload) => {\n                self.handle_new_block_payload(effect_builder, rng, new_block_payload)\n            }\n            Event::BlockAdded {\n                header,\n                header_hash: _,\n            } => self.handle_block_added(effect_builder, rng, *header),\n            Event::ResolveValidity(resolve_validity) => {\n                self.resolve_validity(effect_builder, rng, resolve_validity)\n            }\n            Event::DeactivateEra {\n                era_id,\n                faulty_num,\n                delay,\n            } => self.handle_deactivate_era(effect_builder, era_id, faulty_num, delay),\n            Event::ConsensusRequest(ConsensusRequest::Status(responder)) => self.status(responder),\n            Event::ConsensusRequest(ConsensusRequest::ValidatorChanges(responder)) => {\n                let validator_changes = self.get_validator_changes();\n                responder.respond(validator_changes).ignore()\n            }\n            Event::DumpState(req @ DumpConsensusStateRequest { era_id, .. }) => {\n                let current_era = match self.current_era() {\n                    None => {\n                        return req\n                            .answer(Err(Cow::Owned(\"consensus not initialized\".to_string())))\n                            .ignore()\n                    }\n                    Some(era_id) => era_id,\n                };\n\n                let requested_era = era_id.unwrap_or(current_era);\n\n                // We emit some log message to get some performance information and give the\n                // operator a chance to find out why their node is busy.\n                info!(era_id=%requested_era.value(), was_latest=era_id.is_none(), \"dumping era via diagnostics port\");\n\n                let era_dump_result = self\n                    .open_eras()\n                    .get(&requested_era)\n                    .ok_or_else(|| {\n                        Cow::Owned(format!(\n                            \"could not dump consensus, {} not found\",\n                            requested_era\n                        ))\n                    })\n                    .and_then(|era| EraDump::dump_era(era, requested_era));\n\n                match era_dump_result {\n                    Ok(dump) => req.answer(Ok(&dump)).ignore(),\n                    Err(err) => req.answer(Err(err)).ignore(),\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse tracing::warn;\n\nuse casper_types::OS_PAGE_SIZE;\n\nconst DEFAULT_MAX_GLOBAL_STATE_SIZE: usize = 805_306_368_000; // 750 GiB\nconst DEFAULT_MAX_READERS: u32 = 512;\nconst DEFAULT_MAX_QUERY_DEPTH: u64 = 5;\nconst DEFAULT_MANUAL_SYNC_ENABLED: bool = true;\n\n/// Contract runtime configuration.\n#[derive(Clone, Copy, DataSize, Debug, Deserialize, Serialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// The maximum size of the database to use for the global state store.\n    ///\n    /// Defaults to 805,306,368,000 == 750 GiB.\n    ///\n    /// The size should be a multiple of the OS page size.\n    pub max_global_state_size: Option<usize>,\n    /// The maximum number of readers to use for the global state store.\n    ///\n    /// Defaults to 512.\n    pub max_readers: Option<u32>,\n    /// The limit of depth of recursive global state queries.\n    ///\n    /// Defaults to 5.\n    pub max_query_depth: Option<u64>,\n    /// Enable synchronizing to disk only after each block is written.\n    ///\n    /// Defaults to `true`.\n    pub enable_manual_sync: Option<bool>,\n}\n\nimpl Config {\n    /// Max global state size in bytes.\n    pub fn max_global_state_size_or_default(&self) -> usize {\n        let value = self\n            .max_global_state_size\n            .unwrap_or(DEFAULT_MAX_GLOBAL_STATE_SIZE);\n        if value % *OS_PAGE_SIZE != 0 {\n            warn!(\n                \"maximum global state database size {} is not multiple of system page size {}\",\n                value, *OS_PAGE_SIZE\n            );\n        }\n        value\n    }\n\n    /// Max lmdb readers.\n    pub fn max_readers_or_default(&self) -> u32 {\n        self.max_readers.unwrap_or(DEFAULT_MAX_READERS)\n    }\n\n    /// Max query depth.\n    pub fn max_query_depth_or_default(&self) -> u64 {\n        self.max_query_depth.unwrap_or(DEFAULT_MAX_QUERY_DEPTH)\n    }\n\n    /// Is manual sync enabled.\n    pub fn manual_sync_enabled_or_default(&self) -> bool {\n        self.enable_manual_sync\n            .unwrap_or(DEFAULT_MANUAL_SYNC_ENABLED)\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            max_global_state_size: Some(DEFAULT_MAX_GLOBAL_STATE_SIZE),\n            max_readers: Some(DEFAULT_MAX_READERS),\n            max_query_depth: Some(DEFAULT_MAX_QUERY_DEPTH),\n            enable_manual_sync: Some(DEFAULT_MANUAL_SYNC_ENABLED),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/error.rs",
    "content": "//! Errors that the contract runtime component may raise.\nuse derive_more::From;\nuse std::collections::BTreeMap;\n\nuse serde::Serialize;\nuse thiserror::Error;\n\nuse casper_execution_engine::engine_state::Error as EngineStateError;\nuse casper_storage::{\n    data_access_layer::{\n        forced_undelegate::ForcedUndelegateError, BlockRewardsError, FeeError, StepError,\n    },\n    global_state::error::Error as GlobalStateError,\n    tracking_copy::TrackingCopyError,\n};\nuse casper_types::{bytesrepr, CLValueError, Digest, EraId, PublicKey, U512};\n\nuse crate::{\n    components::contract_runtime::ExecutionPreState,\n    types::{ChunkingError, ExecutableBlock, InternalEraReport},\n};\n\n/// Common state result errors.\n#[derive(Debug, Error)]\npub(crate) enum StateResultError {\n    /// Invalid state root hash.\n    #[error(\"invalid state root hash\")]\n    RootNotFound,\n    /// Value not found.\n    #[error(\"{0}\")]\n    ValueNotFound(String),\n    /// Failure result.\n    #[error(\"{0}\")]\n    Failure(TrackingCopyError),\n}\n\n/// An error returned from mis-configuring the contract runtime component.\n#[derive(Debug, Error)]\npub(crate) enum ConfigError {\n    /// Error initializing the LMDB environment.\n    #[error(\"failed to initialize LMDB environment for contract runtime: {0}\")]\n    GlobalState(#[from] GlobalStateError),\n    /// Error initializing metrics.\n    #[error(\"failed to initialize metrics for contract runtime: {0}\")]\n    Prometheus(#[from] prometheus::Error),\n}\n\n/// An enum that represents all possible error conditions of a `contract_runtime` component.\n#[derive(Debug, Error, From)]\npub(crate) enum ContractRuntimeError {\n    /// The provided serialized id cannot be deserialized properly.\n    #[error(\"error deserializing id: {0}\")]\n    InvalidSerializedId(#[source] bincode::Error),\n    // It was not possible to get trie with the specified id\n    #[error(\"error retrieving trie by id: {0}\")]\n    FailedToRetrieveTrieById(#[source] GlobalStateError),\n    /// Chunking error.\n    #[error(\"failed to chunk the data {0}\")]\n    ChunkingError(#[source] ChunkingError),\n}\n\n/// An error during block execution.\n#[derive(Debug, Error, Serialize)]\npub enum BlockExecutionError {\n    /// Currently the contract runtime can only execute one commit at a time, so we cannot handle\n    /// more than one execution result.\n    #[error(\"more than one execution result\")]\n    MoreThanOneExecutionResult,\n    /// Both the block to be executed and the execution pre-state specify the height of the next\n    /// block. These must agree and this error will be thrown if they do not.\n    #[error(\n        \"block's height does not agree with execution pre-state. \\\n         block: {executable_block:?}, \\\n         execution pre-state: {execution_pre_state:?}\"\n    )]\n    WrongBlockHeight {\n        /// The finalized block the system attempted to execute.\n        executable_block: Box<ExecutableBlock>,\n        /// The state of the block chain prior to block execution that was to be used.\n        execution_pre_state: Box<ExecutionPreState>,\n    },\n    /// A core error thrown by the execution engine.\n    #[error(transparent)]\n    EngineState(\n        #[from]\n        #[serde(skip_serializing)]\n        EngineStateError,\n    ),\n    /// An error that occurred when trying to run the auction contract.\n    #[error(transparent)]\n    Step(\n        #[from]\n        #[serde(skip_serializing)]\n        StepError,\n    ),\n    #[error(transparent)]\n    DistributeFees(\n        #[from]\n        #[serde(skip_serializing)]\n        FeeError,\n    ),\n    #[error(transparent)]\n    DistributeBlockRewards(\n        #[from]\n        #[serde(skip_serializing)]\n        BlockRewardsError,\n    ),\n    #[error(transparent)]\n    ForcedUndelegate(\n        #[from]\n        #[serde(skip_serializing)]\n        ForcedUndelegateError,\n    ),\n    /// Failed to compute the approvals checksum.\n    #[error(\"failed to compute approvals checksum: {0}\")]\n    FailedToComputeApprovalsChecksum(bytesrepr::Error),\n    /// Failed to compute the execution results checksum.\n    #[error(\"failed to compute execution results checksum: {0}\")]\n    FailedToComputeExecutionResultsChecksum(bytesrepr::Error),\n    /// Failed to convert the checksum registry to a `CLValue`.\n    #[error(\"failed to convert the checksum registry to a clvalue: {0}\")]\n    ChecksumRegistryToCLValue(CLValueError),\n    /// `EraEnd`s need both an `EraReport` present and a map of the next era validator weights.\n    /// If one of them is not present while trying to construct an `EraEnd`, this error is\n    /// produced.\n    #[error(\n        \"cannot create era end unless we have both an era report and next era validators. \\\n         era report: {maybe_era_report:?}, \\\n         next era validator weights: {maybe_next_era_validator_weights:?}\"\n    )]\n    FailedToCreateEraEnd {\n        /// An optional `EraReport` we tried to use to construct an `EraEnd`.\n        maybe_era_report: Option<InternalEraReport>,\n        /// An optional map of the next era validator weights used to construct an `EraEnd`.\n        maybe_next_era_validator_weights: Option<(BTreeMap<PublicKey, U512>, u8)>,\n    },\n    /// An error that occurred while interacting with lmdb.\n    #[error(transparent)]\n    Lmdb(\n        #[from]\n        #[serde(skip_serializing)]\n        GlobalStateError,\n    ),\n    /// An error that occurred while getting era validators.\n    #[error(transparent)]\n    GetEraValidators(\n        #[from]\n        #[serde(skip_serializing)]\n        TrackingCopyError,\n    ),\n    /// A root state hash was not found.\n    #[error(\"Root state hash not found in global state.\")]\n    RootNotFound(Digest),\n    /// Missing checksum registry.\n    #[error(\"Missing checksum registry\")]\n    MissingChecksumRegistry,\n    #[error(\"Failed to get new era gas price when executing switch block\")]\n    FailedToGetNewEraGasPrice { era_id: EraId },\n    // Payment error.\n    #[error(\"Error while trying to set up payment for transaction: {0}\")]\n    PaymentError(String),\n    // Error attempting to set block global data.\n    #[error(\"Error while attempting to store block global data: {0}\")]\n    BlockGlobal(String),\n    #[error(\"No switch block header available for era: {0}\")]\n    /// No switch block available\n    NoSwitchBlockHash(u64),\n    #[error(\"Unsupported execution kind: {0}\")]\n    /// Unsupported execution kind\n    UnsupportedTransactionKind(u8),\n    #[error(\"Error while converting transaction to internal representation: {0}\")]\n    TransactionConversion(String),\n    /// Invalid gas limit amount.\n    #[error(\"Invalid gas limit amount: {0}\")]\n    InvalidGasLimit(U512),\n    /// Invalid transaction variant.\n    #[error(\"Invalid transaction variant\")]\n    InvalidTransactionVariant,\n    /// Invalid transaction arguments.\n    #[error(\"Invalid transaction arguments\")]\n    InvalidTransactionArgs,\n    #[error(\"Data Access Layer conflicts with chainspec setting: {0}\")]\n    InvalidAESetting(bool),\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/event.rs",
    "content": "use std::{\n    fmt,\n    fmt::{Display, Formatter},\n};\n\nuse derive_more::From;\nuse serde::Serialize;\n\nuse crate::effect::{\n    incoming::{TrieDemand, TrieRequestIncoming},\n    requests::ContractRuntimeRequest,\n};\n\n#[derive(Debug, From, Serialize)]\npub(crate) enum Event {\n    #[from]\n    ContractRuntimeRequest(ContractRuntimeRequest),\n\n    #[from]\n    TrieRequestIncoming(TrieRequestIncoming),\n\n    #[from]\n    TrieDemand(TrieDemand),\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::ContractRuntimeRequest(req) => {\n                write!(f, \"contract runtime request: {}\", req)\n            }\n            Event::TrieRequestIncoming(req) => write!(f, \"trie request incoming: {}\", req),\n            Event::TrieDemand(demand) => write!(f, \"trie demand: {}\", demand),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/exec_queue.rs",
    "content": "use datasize::DataSize;\nuse std::{\n    collections::BTreeMap,\n    sync::{Arc, Mutex},\n};\n\nuse crate::types::{ExecutableBlock, MetaBlockState};\n\n#[derive(Default, Clone, DataSize)]\npub(super) struct ExecQueue(Arc<Mutex<BTreeMap<u64, QueueItem>>>);\n\nimpl ExecQueue {\n    /// How many blocks are backed up in the queue\n    pub fn len(&self) -> usize {\n        self.0\n            .lock()\n            .expect(\n                \"components::contract_runtime: couldn't get execution queue size; mutex poisoned\",\n            )\n            .len()\n    }\n\n    pub fn remove(&mut self, height: u64) -> Option<QueueItem> {\n        self.0\n            .lock()\n            .expect(\"components::contract_runtime: couldn't remove from the queue; mutex poisoned\")\n            .remove(&height)\n    }\n\n    pub fn insert(&mut self, height: u64, item: QueueItem) {\n        self.0\n            .lock()\n            .expect(\"components::contract_runtime: couldn't insert into the queue; mutex poisoned\")\n            .insert(height, item);\n    }\n\n    /// Remove every entry older than the given height, and return the new len.\n    pub fn remove_older_then(&mut self, height: u64) -> i64 {\n        let mut locked_queue = self.0\n            .lock()\n            .expect(\n                \"components::contract_runtime: couldn't initialize contract runtime block execution queue; mutex poisoned\"\n            );\n\n        *locked_queue = locked_queue.split_off(&height);\n\n        TryInto::try_into(locked_queue.len()).unwrap_or(i64::MIN)\n    }\n}\n\n// Should it be an enum?\npub(super) struct QueueItem {\n    pub executable_block: ExecutableBlock,\n    pub meta_block_state: MetaBlockState,\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/metrics.rs",
    "content": "use prometheus::{self, Gauge, Histogram, IntGauge, Registry};\n\nuse crate::{unregister_metric, utils};\n\n/// Value of upper bound of histogram.\nconst EXPONENTIAL_BUCKET_START: f64 = 0.2;\n\n/// Multiplier of previous upper bound for next bound.\nconst EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0;\n\n/// Bucket count, with the last bucket going to +Inf which will not be included in the results.\n/// - start = 0.01, factor = 2.0, count = 10\n/// - start * factor ^ count = 0.01 * 2.0 ^ 10 = 10.24\n/// - Values above 10.24 (f64 seconds here) will not fall in a bucket that is kept.\nconst EXPONENTIAL_BUCKET_COUNT: usize = 10;\n\nconst EXEC_WASM_V1_NAME: &str = \"contract_runtime_exec_wasm_v1\";\nconst EXEC_WASM_V1_HELP: &str = \"time in seconds to execute wasm using the v1 exec engine\";\n\nconst EXEC_BLOCK_PRE_PROCESSING_NAME: &str = \"contract_runtime_exec_block_pre_proc\";\nconst EXEC_BLOCK_PRE_PROCESSING_HELP: &str =\n    \"processing time in seconds before any transactions have processed\";\n\nconst EXEC_BLOCK_POST_PROCESSING_NAME: &str = \"contract_runtime_exec_block_post_proc\";\nconst EXEC_BLOCK_POST_PROCESSING_HELP: &str =\n    \"processing time in seconds after all transactions have processed\";\n\nconst EXEC_BLOCK_STEP_PROCESSING_NAME: &str = \"contract_runtime_exec_block_step_proc\";\nconst EXEC_BLOCK_STEP_PROCESSING_HELP: &str = \"processing time in seconds of the end of era step\";\n\nconst EXEC_BLOCK_TOTAL_NAME: &str = \"contract_runtime_exec_block_total_proc\";\nconst EXEC_BLOCK_TOTAL_HELP: &str =\n    \"processing time in seconds for block execution (total elapsed)\";\n\nconst COMMIT_GENESIS_NAME: &str = \"contract_runtime_commit_genesis\";\nconst COMMIT_GENESIS_HELP: &str = \"time in seconds to commit an genesis\";\n\nconst COMMIT_UPGRADE_NAME: &str = \"contract_runtime_commit_upgrade\";\nconst COMMIT_UPGRADE_HELP: &str = \"time in seconds to commit an upgrade\";\n\nconst RUN_QUERY_NAME: &str = \"contract_runtime_run_query\";\nconst RUN_QUERY_HELP: &str = \"time in seconds to run a query in global state\";\n\nconst RUN_QUERY_BY_PREFIX_NAME: &str = \"contract_runtime_run_query_by_prefix\";\nconst RUN_QUERY_BY_PREFIX_HELP: &str = \"time in seconds to run a query by prefix in global state\";\n\nconst COMMIT_STEP_NAME: &str = \"contract_runtime_commit_step\";\nconst COMMIT_STEP_HELP: &str = \"time in seconds to commit the step at era end\";\n\nconst GET_BALANCE_NAME: &str = \"contract_runtime_get_balance\";\nconst GET_BALANCE_HELP: &str = \"time in seconds to get the balance of a purse from global state\";\n\nconst GET_TOTAL_SUPPLY_NAME: &str = \"contract_runtime_get_total_supply\";\nconst GET_TOTAL_SUPPLY_HELP: &str = \"time in seconds to get the total supply from global state\";\n\nconst GET_ROUND_SEIGNIORAGE_RATE_NAME: &str = \"contract_runtime_get_round_seigniorage_rate\";\nconst GET_ROUND_SEIGNIORAGE_RATE_HELP: &str =\n    \"time in seconds to get the round seigniorage rate from global state\";\n\nconst GET_ERA_VALIDATORS_NAME: &str = \"contract_runtime_get_era_validators\";\nconst GET_ERA_VALIDATORS_HELP: &str =\n    \"time in seconds to get validators for a given era from global state\";\n\nconst GET_SEIGNIORAGE_RECIPIENTS_NAME: &str = \"contract_runtime_get_seigniorage_recipients\";\nconst GET_SEIGNIORAGE_RECIPIENTS_HELP: &str =\n    \"time in seconds to get seigniorage recipients from global state\";\n\nconst GET_ALL_VALUES_NAME: &str = \"contract_runtime_get_all_values\";\nconst GET_ALL_VALUES_NAME_HELP: &str =\n    \"time in seconds to get all values under a give key from global state\";\n\nconst EXECUTION_RESULTS_CHECKSUM_NAME: &str = \"contract_runtime_execution_results_checksum\";\nconst EXECUTION_RESULTS_CHECKSUM_HELP: &str = \"contract_runtime_execution_results_checksum\";\n\nconst ADDRESSABLE_ENTITY_NAME: &str = \"contract_runtime_addressable_entity\";\nconst ADDRESSABLE_ENTITY_HELP: &str = \"contract_runtime_addressable_entity\";\n\nconst ENTRY_POINT_NAME: &str = \"contract_runtime_entry_point\";\nconst ENTRY_POINT_HELP: &str = \"contract_runtime_entry_point\";\n\nconst PUT_TRIE_NAME: &str = \"contract_runtime_put_trie\";\nconst PUT_TRIE_HELP: &str = \"time in seconds to put a trie\";\n\nconst GET_TRIE_NAME: &str = \"contract_runtime_get_trie\";\nconst GET_TRIE_HELP: &str = \"time in seconds to get a trie\";\n\nconst EXEC_BLOCK_TNX_PROCESSING_NAME: &str = \"contract_runtime_execute_block\";\nconst EXEC_BLOCK_TNX_PROCESSING_HELP: &str = \"time in seconds to execute all deploys in a block\";\n\nconst LATEST_COMMIT_STEP_NAME: &str = \"contract_runtime_latest_commit_step\";\nconst LATEST_COMMIT_STEP_HELP: &str = \"duration in seconds of latest commit step at era end\";\n\nconst EXEC_QUEUE_SIZE_NAME: &str = \"execution_queue_size\";\nconst EXEC_QUEUE_SIZE_HELP: &str =\n    \"number of blocks that are currently enqueued and waiting for execution\";\n\nconst TXN_APPROVALS_HASHES: &str = \"contract_runtime_txn_approvals_hashes_calculation\";\nconst TXN_APPROVALS_HASHES_HELP: &str =\n    \"time in seconds to get calculate approvals hashes for executed transactions\";\n\nconst BLOCK_REWARDS_PAYOUT: &str = \"contract_runtime_block_rewards_payout\";\nconst BLOCK_REWARDS_PAYOUT_HELP: &str = \"time in seconds to get process rewards payouts\";\n\nconst BATCH_PRUNING_TIME: &str = \"contract_runtime_batch_pruning_time\";\nconst BATCH_PRUNING_TIME_HELP: &str = \"time in seconds to perform batch pruning\";\n\nconst DB_FLUSH_TIME: &str = \"contract_runtime_db_flush_time\";\nconst DB_FLUSH_TIME_HELP: &str = \"time in seconds to flush changes to the database\";\n\nconst SCRATCH_LMDB_WRITE_TIME: &str = \"contract_runtime_scratch_lmdb_write_time\";\nconst SCRATCH_LMDB_WRITE_TIME_HELP: &str = \"time in seconds to write changes to the database\";\n\nconst SEIGNIORAGE_TARGET_FRACTION: &str = \"contract_runtime_seigniorage_target_fraction\";\nconst SEIGNIORAGE_TARGET_FRACTION_HELP: &str = \"fraction of target seigniorage minted in era\";\n\n/// Metrics for the contract runtime component.\n#[derive(Debug)]\npub struct Metrics {\n    pub(super) exec_block_pre_processing: Histogram,\n    // elapsed before tnx processing\n    pub(super) exec_block_tnx_processing: Histogram,\n    // tnx processing elapsed\n    pub(super) exec_wasm_v1: Histogram,\n    // ee_v1 execution elapsed\n    pub(super) exec_block_step_processing: Histogram,\n    // step processing elapsed\n    pub(super) exec_block_post_processing: Histogram,\n    // elapsed after tnx processing\n    pub(super) exec_block_total: Histogram,\n    // total elapsed\n    pub(super) commit_genesis: Histogram,\n    pub(super) commit_upgrade: Histogram,\n    pub(super) run_query: Histogram,\n    pub(super) run_query_by_prefix: Histogram,\n    pub(super) commit_step: Histogram,\n    pub(super) get_balance: Histogram,\n    pub(super) get_total_supply: Histogram,\n    pub(super) get_round_seigniorage_rate: Histogram,\n    pub(super) get_era_validators: Histogram,\n    pub(super) get_seigniorage_recipients: Histogram,\n    pub(super) get_all_values: Histogram,\n    pub(super) execution_results_checksum: Histogram,\n    pub(super) addressable_entity: Histogram,\n    pub(super) entry_points: Histogram,\n    pub(super) put_trie: Histogram,\n    pub(super) get_trie: Histogram,\n    pub(super) latest_commit_step: Gauge,\n    pub(super) exec_queue_size: IntGauge,\n    pub(super) txn_approvals_hashes_calculation: Histogram,\n    pub(super) block_rewards_payout: Histogram,\n    pub(super) pruning_time: Histogram,\n    pub(super) database_flush_time: Histogram,\n    pub(super) scratch_lmdb_write_time: Histogram,\n    pub(super) seigniorage_target_fraction: Gauge,\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Constructor of metrics which creates and registers metrics objects for use.\n    pub(super) fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let common_buckets = prometheus::exponential_buckets(\n            EXPONENTIAL_BUCKET_START,\n            EXPONENTIAL_BUCKET_FACTOR,\n            EXPONENTIAL_BUCKET_COUNT,\n        )?;\n\n        // make wider buckets for operations that might take longer\n        let wider_buckets = prometheus::exponential_buckets(\n            EXPONENTIAL_BUCKET_START * 8.0,\n            EXPONENTIAL_BUCKET_FACTOR,\n            EXPONENTIAL_BUCKET_COUNT,\n        )?;\n\n        // Start from 1 millisecond\n        // Factor by 2\n        // After 10 elements we get to 1s.\n        // Anything above that should be a warning signal.\n        let tiny_buckets = prometheus::exponential_buckets(0.001, 2.0, 10)?;\n\n        let latest_commit_step = Gauge::new(LATEST_COMMIT_STEP_NAME, LATEST_COMMIT_STEP_HELP)?;\n        registry.register(Box::new(latest_commit_step.clone()))?;\n\n        let exec_queue_size = IntGauge::new(EXEC_QUEUE_SIZE_NAME, EXEC_QUEUE_SIZE_HELP)?;\n        registry.register(Box::new(exec_queue_size.clone()))?;\n\n        let seigniorage_target_fraction = Gauge::new(\n            SEIGNIORAGE_TARGET_FRACTION,\n            SEIGNIORAGE_TARGET_FRACTION_HELP,\n        )?;\n        registry.register(Box::new(seigniorage_target_fraction.clone()))?;\n\n        Ok(Metrics {\n            exec_block_pre_processing: utils::register_histogram_metric(\n                registry,\n                EXEC_BLOCK_PRE_PROCESSING_NAME,\n                EXEC_BLOCK_PRE_PROCESSING_HELP,\n                common_buckets.clone(),\n            )?,\n            exec_block_tnx_processing: utils::register_histogram_metric(\n                registry,\n                EXEC_BLOCK_TNX_PROCESSING_NAME,\n                EXEC_BLOCK_TNX_PROCESSING_HELP,\n                common_buckets.clone(),\n            )?,\n            exec_wasm_v1: utils::register_histogram_metric(\n                registry,\n                EXEC_WASM_V1_NAME,\n                EXEC_WASM_V1_HELP,\n                common_buckets.clone(),\n            )?,\n            exec_block_post_processing: utils::register_histogram_metric(\n                registry,\n                EXEC_BLOCK_POST_PROCESSING_NAME,\n                EXEC_BLOCK_POST_PROCESSING_HELP,\n                common_buckets.clone(),\n            )?,\n            exec_block_step_processing: utils::register_histogram_metric(\n                registry,\n                EXEC_BLOCK_STEP_PROCESSING_NAME,\n                EXEC_BLOCK_STEP_PROCESSING_HELP,\n                common_buckets.clone(),\n            )?,\n            exec_block_total: utils::register_histogram_metric(\n                registry,\n                EXEC_BLOCK_TOTAL_NAME,\n                EXEC_BLOCK_TOTAL_HELP,\n                wider_buckets.clone(),\n            )?,\n            run_query: utils::register_histogram_metric(\n                registry,\n                RUN_QUERY_NAME,\n                RUN_QUERY_HELP,\n                common_buckets.clone(),\n            )?,\n            run_query_by_prefix: utils::register_histogram_metric(\n                registry,\n                RUN_QUERY_BY_PREFIX_NAME,\n                RUN_QUERY_BY_PREFIX_HELP,\n                common_buckets.clone(),\n            )?,\n            commit_step: utils::register_histogram_metric(\n                registry,\n                COMMIT_STEP_NAME,\n                COMMIT_STEP_HELP,\n                common_buckets.clone(),\n            )?,\n            commit_genesis: utils::register_histogram_metric(\n                registry,\n                COMMIT_GENESIS_NAME,\n                COMMIT_GENESIS_HELP,\n                common_buckets.clone(),\n            )?,\n            commit_upgrade: utils::register_histogram_metric(\n                registry,\n                COMMIT_UPGRADE_NAME,\n                COMMIT_UPGRADE_HELP,\n                common_buckets.clone(),\n            )?,\n            get_balance: utils::register_histogram_metric(\n                registry,\n                GET_BALANCE_NAME,\n                GET_BALANCE_HELP,\n                common_buckets.clone(),\n            )?,\n            get_total_supply: utils::register_histogram_metric(\n                registry,\n                GET_TOTAL_SUPPLY_NAME,\n                GET_TOTAL_SUPPLY_HELP,\n                common_buckets.clone(),\n            )?,\n            get_round_seigniorage_rate: utils::register_histogram_metric(\n                registry,\n                GET_ROUND_SEIGNIORAGE_RATE_NAME,\n                GET_ROUND_SEIGNIORAGE_RATE_HELP,\n                common_buckets.clone(),\n            )?,\n            get_era_validators: utils::register_histogram_metric(\n                registry,\n                GET_ERA_VALIDATORS_NAME,\n                GET_ERA_VALIDATORS_HELP,\n                common_buckets.clone(),\n            )?,\n            get_seigniorage_recipients: utils::register_histogram_metric(\n                registry,\n                GET_SEIGNIORAGE_RECIPIENTS_NAME,\n                GET_SEIGNIORAGE_RECIPIENTS_HELP,\n                common_buckets.clone(),\n            )?,\n            get_all_values: utils::register_histogram_metric(\n                registry,\n                GET_ALL_VALUES_NAME,\n                GET_ALL_VALUES_NAME_HELP,\n                common_buckets.clone(),\n            )?,\n            execution_results_checksum: utils::register_histogram_metric(\n                registry,\n                EXECUTION_RESULTS_CHECKSUM_NAME,\n                EXECUTION_RESULTS_CHECKSUM_HELP,\n                common_buckets.clone(),\n            )?,\n            addressable_entity: utils::register_histogram_metric(\n                registry,\n                ADDRESSABLE_ENTITY_NAME,\n                ADDRESSABLE_ENTITY_HELP,\n                common_buckets.clone(),\n            )?,\n            entry_points: utils::register_histogram_metric(\n                registry,\n                ENTRY_POINT_NAME,\n                ENTRY_POINT_HELP,\n                common_buckets.clone(),\n            )?,\n            get_trie: utils::register_histogram_metric(\n                registry,\n                GET_TRIE_NAME,\n                GET_TRIE_HELP,\n                tiny_buckets.clone(),\n            )?,\n            put_trie: utils::register_histogram_metric(\n                registry,\n                PUT_TRIE_NAME,\n                PUT_TRIE_HELP,\n                tiny_buckets,\n            )?,\n            latest_commit_step,\n            exec_queue_size,\n            txn_approvals_hashes_calculation: utils::register_histogram_metric(\n                registry,\n                TXN_APPROVALS_HASHES,\n                TXN_APPROVALS_HASHES_HELP,\n                common_buckets.clone(),\n            )?,\n            block_rewards_payout: utils::register_histogram_metric(\n                registry,\n                BLOCK_REWARDS_PAYOUT,\n                BLOCK_REWARDS_PAYOUT_HELP,\n                wider_buckets.clone(),\n            )?,\n            pruning_time: utils::register_histogram_metric(\n                registry,\n                BATCH_PRUNING_TIME,\n                BATCH_PRUNING_TIME_HELP,\n                common_buckets.clone(),\n            )?,\n            database_flush_time: utils::register_histogram_metric(\n                registry,\n                DB_FLUSH_TIME,\n                DB_FLUSH_TIME_HELP,\n                wider_buckets.clone(),\n            )?,\n            scratch_lmdb_write_time: utils::register_histogram_metric(\n                registry,\n                SCRATCH_LMDB_WRITE_TIME,\n                SCRATCH_LMDB_WRITE_TIME_HELP,\n                wider_buckets.clone(),\n            )?,\n            seigniorage_target_fraction,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.exec_block_pre_processing);\n        unregister_metric!(self.registry, self.exec_block_tnx_processing);\n        unregister_metric!(self.registry, self.exec_wasm_v1);\n        unregister_metric!(self.registry, self.exec_block_post_processing);\n        unregister_metric!(self.registry, self.exec_block_step_processing);\n        unregister_metric!(self.registry, self.exec_block_total);\n        unregister_metric!(self.registry, self.commit_genesis);\n        unregister_metric!(self.registry, self.commit_upgrade);\n        unregister_metric!(self.registry, self.run_query);\n        unregister_metric!(self.registry, self.run_query_by_prefix);\n        unregister_metric!(self.registry, self.commit_step);\n        unregister_metric!(self.registry, self.get_balance);\n        unregister_metric!(self.registry, self.get_total_supply);\n        unregister_metric!(self.registry, self.get_round_seigniorage_rate);\n        unregister_metric!(self.registry, self.get_era_validators);\n        unregister_metric!(self.registry, self.get_seigniorage_recipients);\n        unregister_metric!(self.registry, self.get_all_values);\n        unregister_metric!(self.registry, self.execution_results_checksum);\n        unregister_metric!(self.registry, self.put_trie);\n        unregister_metric!(self.registry, self.get_trie);\n        unregister_metric!(self.registry, self.latest_commit_step);\n        unregister_metric!(self.registry, self.exec_queue_size);\n        unregister_metric!(self.registry, self.entry_points);\n        unregister_metric!(self.registry, self.txn_approvals_hashes_calculation);\n        unregister_metric!(self.registry, self.block_rewards_payout);\n        unregister_metric!(self.registry, self.pruning_time);\n        unregister_metric!(self.registry, self.database_flush_time);\n        unregister_metric!(self.registry, self.scratch_lmdb_write_time);\n        unregister_metric!(self.registry, self.seigniorage_target_fraction);\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/operations/wasm_v2_request.rs",
    "content": "use std::sync::Arc;\n\nuse bytes::Bytes;\nuse casper_executor_wasm::{\n    install::{\n        InstallContractError, InstallContractRequest, InstallContractRequestBuilder,\n        InstallContractResult,\n    },\n    ExecutorV2,\n};\nuse casper_executor_wasm_interface::{\n    executor::{\n        ExecuteRequest, ExecuteRequestBuilder, ExecuteWithProviderError, ExecuteWithProviderResult,\n        ExecutionKind,\n    },\n    GasUsage,\n};\nuse casper_storage::{\n    global_state::state::{CommitProvider, StateProvider},\n    AddressGeneratorBuilder,\n};\nuse casper_types::{\n    execution::Effects, BlockHash, Digest, Gas, Key, TransactionEntryPoint,\n    TransactionInvocationTarget, TransactionRuntimeParams, TransactionTarget, U512,\n};\nuse thiserror::Error;\nuse tracing::info;\n\nuse super::MetaTransaction;\n\n/// The request to execute a Wasm contract.\npub(crate) enum WasmV2Request {\n    /// The request to install a Wasm contract.\n    Install(InstallContractRequest),\n    /// The request to execute a Wasm contract.\n    Execute(ExecuteRequest),\n}\n\n/// The result of executing a Wasm contract.\npub(crate) enum WasmV2Result {\n    /// The result of installing a Wasm contract.\n    Install(InstallContractResult),\n    /// The result of executing a Wasm contract.\n    Execute(ExecuteWithProviderResult),\n}\n\nimpl WasmV2Result {\n    /// Returns the gas usage of the contract execution.\n    pub(crate) fn gas_usage(&self) -> &GasUsage {\n        match self {\n            WasmV2Result::Install(result) => result.gas_usage(),\n            WasmV2Result::Execute(result) => result.gas_usage(),\n        }\n    }\n\n    /// Returns the effects of the contract execution.\n    pub(crate) fn effects(&self) -> &Effects {\n        match self {\n            WasmV2Result::Install(result) => result.effects(),\n            WasmV2Result::Execute(result) => result.effects(),\n        }\n    }\n\n    pub(crate) fn post_state_hash(&self) -> Digest {\n        match self {\n            WasmV2Result::Install(result) => result.post_state_hash(),\n            WasmV2Result::Execute(result) => result.post_state_hash(),\n        }\n    }\n}\n\n#[derive(Error, Debug)]\npub(crate) enum WasmV2Error {\n    #[error(transparent)]\n    Install(InstallContractError),\n    #[error(transparent)]\n    Execute(ExecuteWithProviderError),\n}\n\n#[derive(Clone, Eq, PartialEq, Error, Debug)]\npub(crate) enum InvalidRequest {\n    #[error(\"Expected bytes arguments\")]\n    ExpectedBytesArguments,\n    #[error(\"Expected target\")]\n    ExpectedTarget,\n    #[error(\"Invalid gas limit: {0}\")]\n    InvalidGasLimit(U512),\n    #[error(\"Expected transferred value\")]\n    ExpectedTransferredValue,\n    #[error(\"Expected V2 runtime\")]\n    ExpectedV2Runtime,\n}\n\nimpl WasmV2Request {\n    pub(crate) fn new(\n        gas_limit: Gas,\n        network_name: impl Into<Arc<str>>,\n        state_root_hash: Digest,\n        parent_block_hash: BlockHash,\n        block_height: u64,\n        transaction: &MetaTransaction,\n    ) -> Result<Self, InvalidRequest> {\n        let transaction_hash = transaction.hash();\n        let initiator_addr = transaction.initiator_addr();\n\n        let gas_limit: u64 = gas_limit\n            .value()\n            .try_into()\n            .map_err(|_| InvalidRequest::InvalidGasLimit(gas_limit.value()))?;\n\n        let address_generator = AddressGeneratorBuilder::default()\n            .seed_with(transaction_hash.as_ref())\n            .build();\n\n        let session_args = transaction.session_args();\n\n        let input_data = session_args\n            .as_bytesrepr()\n            .ok_or(InvalidRequest::ExpectedBytesArguments)?;\n\n        let value = transaction\n            .transferred_value()\n            .ok_or(InvalidRequest::ExpectedTransferredValue)?;\n\n        enum Target {\n            Install {\n                module_bytes: Bytes,\n                entry_point: String,\n                transferred_value: u64,\n                seed: Option<[u8; 32]>,\n            },\n            Session {\n                module_bytes: Bytes,\n            },\n            Stored {\n                id: TransactionInvocationTarget,\n                entry_point: String,\n            },\n        }\n\n        let transaction_target = transaction.target().ok_or(InvalidRequest::ExpectedTarget)?;\n        let target = match transaction_target {\n            TransactionTarget::Native => todo!(), //\n            TransactionTarget::Stored { id, runtime: _ } => match transaction.entry_point() {\n                TransactionEntryPoint::Custom(entry_point) => Target::Stored {\n                    id: id.clone(),\n                    entry_point: entry_point.clone(),\n                },\n                _ => todo!(),\n            },\n\n            TransactionTarget::Session {\n                module_bytes: _,\n                runtime: TransactionRuntimeParams::VmCasperV1,\n                is_install_upgrade: _, // TODO: Handle this\n            } => {\n                return Err(InvalidRequest::ExpectedV2Runtime);\n            }\n            TransactionTarget::Session {\n                module_bytes,\n                runtime:\n                    TransactionRuntimeParams::VmCasperV2 {\n                        transferred_value,\n                        seed,\n                    },\n                is_install_upgrade: _, // TODO: Handle this\n            } => match transaction.entry_point() {\n                TransactionEntryPoint::Call => Target::Session {\n                    module_bytes: module_bytes.clone().take_inner().into(),\n                },\n                TransactionEntryPoint::Custom(entry_point) => Target::Install {\n                    module_bytes: module_bytes.clone().take_inner().into(),\n                    entry_point: entry_point.to_string(),\n                    transferred_value,\n                    seed,\n                },\n                _ => todo!(),\n            },\n        };\n\n        info!(%transaction_hash, \"executing v1 contract\");\n\n        match target {\n            Target::Install {\n                module_bytes,\n                entry_point,\n                transferred_value,\n                seed,\n            } => {\n                let mut builder = InstallContractRequestBuilder::default();\n\n                let entry_point = (!entry_point.is_empty()).then_some(entry_point);\n\n                match entry_point {\n                    Some(entry_point) => {\n                        builder = builder\n                            .with_entry_point(entry_point.clone())\n                            // Args only matter if there is a constructor to be called.\n                            .with_input(input_data.clone().take_inner().into());\n                    }\n                    None => {\n                        // No input data expected if there is no entry point. This should be\n                        // validated in transaction acceptor.\n                        assert!(input_data.is_empty());\n                    }\n                }\n\n                if let Some(seed) = seed {\n                    builder = builder.with_seed(seed);\n                }\n\n                // Value is expected to be the same as transferred value, it's just taken through\n                // different API.\n                debug_assert_eq!(transferred_value, value);\n\n                let install_request = builder\n                    .with_initiator(initiator_addr.account_hash())\n                    .with_gas_limit(gas_limit)\n                    .with_transaction_hash(transaction_hash)\n                    .with_wasm_bytes(module_bytes)\n                    .with_address_generator(address_generator)\n                    .with_transferred_value(value)\n                    .with_chain_name(network_name)\n                    .with_block_time(transaction.timestamp().into())\n                    .with_state_hash(state_root_hash)\n                    .with_parent_block_hash(parent_block_hash)\n                    .with_block_height(block_height)\n                    .build()\n                    .expect(\"should build\");\n\n                Ok(Self::Install(install_request))\n            }\n            Target::Session { .. } | Target::Stored { .. } => {\n                let mut builder = ExecuteRequestBuilder::default();\n\n                let initiator_account_hash = &initiator_addr.account_hash();\n\n                let initiator_key = Key::Account(*initiator_account_hash);\n\n                builder = builder\n                    .with_address_generator(address_generator)\n                    .with_gas_limit(gas_limit)\n                    .with_transaction_hash(transaction_hash)\n                    .with_initiator(*initiator_account_hash)\n                    .with_caller_key(initiator_key)\n                    .with_chain_name(network_name)\n                    .with_transferred_value(value)\n                    .with_block_time(transaction.timestamp().into())\n                    .with_input(input_data.clone().take_inner().into())\n                    .with_state_hash(state_root_hash)\n                    .with_parent_block_hash(parent_block_hash)\n                    .with_block_height(block_height);\n                let execution_kind = match target {\n                    Target::Session { module_bytes } => ExecutionKind::SessionBytes(module_bytes),\n                    Target::Stored {\n                        id: TransactionInvocationTarget::ByHash(smart_contract_addr),\n                        entry_point,\n                    } => ExecutionKind::Stored {\n                        address: smart_contract_addr,\n                        entry_point: entry_point.clone(),\n                    },\n                    Target::Stored { id, entry_point } => {\n                        todo!(\"Unsupported target {entry_point} {id:?}\")\n                    }\n                    Target::Install { .. } => unreachable!(),\n                };\n\n                builder = builder.with_target(execution_kind);\n\n                let execute_request = builder.build().expect(\"should build\");\n\n                Ok(Self::Execute(execute_request))\n            }\n        }\n    }\n\n    pub(crate) fn execute<P>(\n        self,\n        engine: &ExecutorV2,\n        state_root_hash: Digest,\n        state_provider: &P,\n    ) -> Result<WasmV2Result, WasmV2Error>\n    where\n        P: StateProvider + CommitProvider,\n        <P as StateProvider>::Reader: 'static,\n    {\n        match self {\n            WasmV2Request::Install(install_request) => {\n                match engine.install_contract(state_root_hash, state_provider, install_request) {\n                    Ok(result) => Ok(WasmV2Result::Install(result)),\n                    Err(error) => Err(WasmV2Error::Install(error)),\n                }\n            }\n            WasmV2Request::Execute(execute_request) => {\n                match engine.execute_with_provider(state_root_hash, state_provider, execute_request)\n                {\n                    Ok(result) => Ok(WasmV2Result::Execute(result)),\n                    Err(error) => Err(WasmV2Error::Execute(error)),\n                }\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    #[test]\n    fn smoke_test() {}\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/operations.rs",
    "content": "pub(crate) mod wasm_v2_request;\n\nuse casper_executor_wasm::ExecutorV2;\nuse itertools::Itertools;\nuse std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::Instant};\nuse tracing::{debug, error, info, trace, warn};\nuse wasm_v2_request::{WasmV2Request, WasmV2Result};\n\nuse casper_execution_engine::engine_state::{\n    BlockInfo, ExecutionEngineV1, WasmV1Request, WasmV1Result,\n};\nuse casper_storage::{\n    block_store::types::ApprovalsHashes,\n    data_access_layer::{\n        balance::BalanceHandling,\n        mint::{BalanceIdentifierTransferArgs, BurnRequest},\n        AuctionMethod, BalanceHoldKind, BalanceHoldRequest, BalanceIdentifier,\n        BalanceIdentifierPurseRequest, BalanceIdentifierPurseResult, BalanceRequest,\n        BiddingRequest, BlockGlobalRequest, BlockGlobalResult, BlockRewardsRequest,\n        BlockRewardsResult, DataAccessLayer, EntryPointRequest, EntryPointResult,\n        EraValidatorsRequest, EraValidatorsResult, EvictItem, FeeRequest, FeeResult, FlushRequest,\n        HandleFeeMode, HandleFeeRequest, HandleRefundMode, HandleRefundRequest,\n        InsufficientBalanceHandling, ProofHandling, PruneRequest, PruneResult, StepRequest,\n        StepResult, TransferRequest,\n    },\n    global_state::state::{\n        lmdb::LmdbGlobalState, scratch::ScratchGlobalState, CommitProvider, ScratchProvider,\n        StateProvider, StateReader,\n    },\n    system::runtime_native::Config as NativeRuntimeConfig,\n};\nuse casper_types::{\n    bytesrepr::{self, ToBytes, U32_SERIALIZED_LENGTH},\n    execution::{Effects, ExecutionResult, TransformKindV2, TransformV2},\n    system::handle_payment::ARG_AMOUNT,\n    BlockHash, BlockHeader, BlockTime, BlockV2, CLValue, Chainspec, ChecksumRegistry, Digest,\n    EntityAddr, EraEndV2, EraId, FeeHandling, Gas, InvalidTransaction, InvalidTransactionV1, Key,\n    ProtocolVersion, PublicKey, RefundHandling, Transaction, TransactionEntryPoint,\n    AUCTION_LANE_ID, MINT_LANE_ID, U512,\n};\n\nuse super::{\n    types::{SpeculativeExecutionResult, StepOutcome},\n    utils::{self, calculate_prune_eras},\n    BlockAndExecutionArtifacts, BlockExecutionError, ExecutionPreState, Metrics, StateResultError,\n    APPROVALS_CHECKSUM_NAME, EXECUTION_RESULTS_CHECKSUM_NAME,\n};\nuse crate::{\n    components::fetcher::FetchItem,\n    contract_runtime::types::ExecutionArtifactBuilder,\n    types::{self, Chunkable, ExecutableBlock, InternalEraReport, MetaTransaction},\n};\n\n/// Executes a finalized block.\n#[allow(clippy::too_many_arguments)]\npub fn execute_finalized_block(\n    data_access_layer: &DataAccessLayer<LmdbGlobalState>,\n    execution_engine_v1: &ExecutionEngineV1,\n    execution_engine_v2: ExecutorV2,\n    chainspec: &Chainspec,\n    metrics: Option<Arc<Metrics>>,\n    execution_pre_state: ExecutionPreState,\n    executable_block: ExecutableBlock,\n    key_block_height_for_activation_point: u64,\n    current_gas_price: u8,\n    next_era_gas_price: Option<u8>,\n    last_switch_block_hash: Option<BlockHash>,\n) -> Result<BlockAndExecutionArtifacts, BlockExecutionError> {\n    let block_height = executable_block.height;\n    if block_height != execution_pre_state.next_block_height() {\n        return Err(BlockExecutionError::WrongBlockHeight {\n            executable_block: Box::new(executable_block),\n            execution_pre_state: Box::new(execution_pre_state),\n        });\n    }\n    if executable_block.era_report.is_some() && next_era_gas_price.is_none() {\n        return Err(BlockExecutionError::FailedToGetNewEraGasPrice {\n            era_id: executable_block.era_id.successor(),\n        });\n    }\n    let start = Instant::now();\n    let protocol_version = chainspec.protocol_version();\n    let activation_point_era_id = chainspec.protocol_config.activation_point.era_id();\n    let prune_batch_size = chainspec.core_config.prune_batch_size;\n    let native_runtime_config = NativeRuntimeConfig::from_chainspec(chainspec);\n    let addressable_entity_enabled = chainspec.core_config.enable_addressable_entity();\n\n    if addressable_entity_enabled != data_access_layer.enable_addressable_entity {\n        return Err(BlockExecutionError::InvalidAESetting(\n            data_access_layer.enable_addressable_entity,\n        ));\n    }\n\n    // scrape variables from execution pre state\n    let parent_hash = execution_pre_state.parent_hash();\n    let parent_seed = execution_pre_state.parent_seed();\n    let parent_block_hash = execution_pre_state.parent_hash();\n    let pre_state_root_hash = execution_pre_state.pre_state_root_hash();\n    let mut state_root_hash = pre_state_root_hash; // initial state root is parent's state root\n\n    let payment_balance_addr =\n        match data_access_layer.balance_purse(BalanceIdentifierPurseRequest::new(\n            state_root_hash,\n            protocol_version,\n            BalanceIdentifier::Payment,\n        )) {\n            BalanceIdentifierPurseResult::RootNotFound => {\n                return Err(BlockExecutionError::RootNotFound(state_root_hash))\n            }\n            BalanceIdentifierPurseResult::Failure(tce) => {\n                return Err(BlockExecutionError::BlockGlobal(format!(\"{:?}\", tce)));\n            }\n            BalanceIdentifierPurseResult::Success { purse_addr } => purse_addr,\n        };\n\n    // scrape variables from executable block\n    let block_time = BlockTime::new(executable_block.timestamp.millis());\n\n    let proposer = executable_block.proposer.clone();\n    let era_id = executable_block.era_id;\n    let mut artifacts = Vec::with_capacity(executable_block.transactions.len());\n\n    // set up accounting variables / settings\n    let insufficient_balance_handling = InsufficientBalanceHandling::HoldRemaining;\n    let refund_handling = chainspec.core_config.refund_handling;\n    let fee_handling = chainspec.core_config.fee_handling;\n    let baseline_motes_amount = chainspec.core_config.baseline_motes_amount_u512();\n    let balance_handling = BalanceHandling::Available;\n\n    // get scratch state, which must be used for all processing and post-processing data\n    // requirements.\n    let scratch_state = data_access_layer.get_scratch_global_state();\n\n    // pre-processing is finished\n    if let Some(metrics) = metrics.as_ref() {\n        metrics\n            .exec_block_pre_processing\n            .observe(start.elapsed().as_secs_f64());\n    }\n\n    // grabbing transaction id's now to avoid cloning transactions\n    let transaction_ids = executable_block\n        .transactions\n        .iter()\n        .map(Transaction::fetch_id)\n        .collect_vec();\n\n    // transaction processing starts now\n    let txn_processing_start = Instant::now();\n\n    // put block_time to global state\n    // NOTE this must occur prior to any block processing as subsequent logic\n    // will refer to the block time value being written to GS now.\n    match scratch_state.block_global(BlockGlobalRequest::block_time(\n        state_root_hash,\n        protocol_version,\n        block_time,\n    )) {\n        BlockGlobalResult::RootNotFound => {\n            return Err(BlockExecutionError::RootNotFound(state_root_hash));\n        }\n        BlockGlobalResult::Failure(err) => {\n            return Err(BlockExecutionError::BlockGlobal(format!(\"{:?}\", err)));\n        }\n        BlockGlobalResult::Success {\n            post_state_hash, ..\n        } => {\n            state_root_hash = post_state_hash;\n        }\n    }\n\n    // put protocol version to global state\n    match scratch_state.block_global(BlockGlobalRequest::set_protocol_version(\n        state_root_hash,\n        protocol_version,\n    )) {\n        BlockGlobalResult::RootNotFound => {\n            return Err(BlockExecutionError::RootNotFound(state_root_hash));\n        }\n        BlockGlobalResult::Failure(err) => {\n            return Err(BlockExecutionError::BlockGlobal(format!(\"{:?}\", err)));\n        }\n        BlockGlobalResult::Success {\n            post_state_hash, ..\n        } => {\n            state_root_hash = post_state_hash;\n        }\n    }\n\n    // put enable addressable entity flag to global state\n    match scratch_state.block_global(BlockGlobalRequest::set_addressable_entity(\n        state_root_hash,\n        protocol_version,\n        addressable_entity_enabled,\n    )) {\n        BlockGlobalResult::RootNotFound => {\n            return Err(BlockExecutionError::RootNotFound(state_root_hash));\n        }\n        BlockGlobalResult::Failure(err) => {\n            return Err(BlockExecutionError::BlockGlobal(format!(\"{:?}\", err)));\n        }\n        BlockGlobalResult::Success {\n            post_state_hash, ..\n        } => {\n            state_root_hash = post_state_hash;\n        }\n    }\n\n    let transaction_config = &chainspec.transaction_config;\n\n    for stored_transaction in executable_block.transactions {\n        let transaction = MetaTransaction::from_transaction(\n            &stored_transaction,\n            chainspec.core_config.pricing_handling,\n            transaction_config,\n        )\n        .map_err(|err| BlockExecutionError::TransactionConversion(err.to_string()))?;\n\n        let initiator_addr = transaction.initiator_addr();\n        let transaction_hash = transaction.hash();\n        let transaction_args = transaction.session_args().clone();\n        let entry_point = transaction.entry_point();\n        let authorization_keys = transaction.signers();\n\n        /*\n        we solve for halting state using a `gas limit` which is the maximum amount of\n        computation we will allow a given transaction to consume. the transaction itself\n        provides a function to determine this if provided with the current cost tables\n        gas_limit is ALWAYS calculated with price == 1.\n\n        next there is the actual cost, i.e. how much we charge for that computation\n        this is calculated by multiplying the gas limit by the current `gas_price`\n        gas price has a floor of 1, and the ceiling is configured in the chainspec\n        NOTE: when the gas price is 1, the gas limit and the cost are coincidentally\n        equal because x == x * 1; thus it is recommended to run tests with\n        price >1 to avoid being confused by this.\n\n        the third important value is the amount of computation consumed by executing a\n        transaction  for native transactions there is no wasm and the consumed always\n        equals the limit  for bytecode / wasm based transactions the consumed is based on\n        what opcodes were executed and can range from >=0 to <=gas_limit.\n        consumed is determined after execution and is used for refund & fee post-processing.\n\n        we check these top level concerns early so that we can skip if there is an error\n        */\n\n        let mut artifact_builder = {\n            // NOTE: this is the allowed computation limit (gas limit)\n            let gas_limit = match transaction.gas_limit(chainspec) {\n                Ok(gas) => gas,\n                Err(ite) => {\n                    debug!(%transaction_hash, %ite, \"invalid transaction (gas limit)\");\n                    artifacts.push(\n                        ExecutionArtifactBuilder::pre_condition_failure(\n                            &stored_transaction,\n                            current_gas_price,\n                            ite,\n                        )\n                        .build(),\n                    );\n                    continue;\n                }\n            };\n\n            // NOTE: this is the actual adjusted cost that we charge for (gas limit * gas price)\n            let cost = match stored_transaction.gas_cost(\n                chainspec,\n                transaction.transaction_lane(),\n                current_gas_price,\n            ) {\n                Ok(motes) => motes.value(),\n                Err(ite) => {\n                    debug!(%transaction_hash, \"invalid transaction (motes conversion)\");\n                    artifacts.push(\n                        ExecutionArtifactBuilder::pre_condition_failure(\n                            &stored_transaction,\n                            current_gas_price,\n                            ite,\n                        )\n                        .build(),\n                    );\n                    continue;\n                }\n            };\n\n            // this is the minimum we will charge, even if 0 is consumed\n            let min_cost = gas_limit.value().min(baseline_motes_amount);\n            ExecutionArtifactBuilder::new(\n                &stored_transaction,\n                gas_limit,\n                current_gas_price,\n                cost,\n                min_cost,\n            )\n        };\n\n        let is_standard_payment = transaction.is_standard_payment();\n        let is_custom_payment = !is_standard_payment && transaction.is_custom_payment();\n        let is_v1_wasm = transaction.is_v1_wasm();\n        let is_v2_wasm = transaction.is_v2_wasm();\n        let refund_purse_active = is_custom_payment;\n        if refund_purse_active {\n            // if custom payment before doing any processing, initialize the initiator's main purse\n            //  to be the refund purse for this transaction.\n            // NOTE: when executed, custom payment logic has the option to call set_refund_purse\n            //  on the handle payment contract to set up a different refund purse, if desired.\n            let handle_refund_request = HandleRefundRequest::new(\n                native_runtime_config.clone(),\n                state_root_hash,\n                protocol_version,\n                transaction_hash,\n                HandleRefundMode::SetRefundPurse {\n                    target: Box::new(initiator_addr.clone().into()),\n                },\n            );\n            let handle_refund_result = scratch_state.handle_refund(handle_refund_request);\n            if let Err(root_not_found) =\n                artifact_builder.with_set_refund_purse_result(&handle_refund_result)\n            {\n                if root_not_found {\n                    return Err(BlockExecutionError::RootNotFound(state_root_hash));\n                }\n                artifacts.push(artifact_builder.build());\n                continue; // don't commit effects, move on\n            }\n            state_root_hash = scratch_state\n                .commit_effects(state_root_hash, handle_refund_result.effects().clone())?;\n        }\n\n        {\n            // Ensure the initiator's main purse can cover the penalty payment before proceeding.\n            let initial_balance_result = scratch_state.balance(BalanceRequest::new(\n                state_root_hash,\n                protocol_version,\n                initiator_addr.clone().into(),\n                balance_handling,\n                ProofHandling::NoProofs,\n            ));\n\n            if let Err(root_not_found) = artifact_builder\n                .with_initial_balance_result(initial_balance_result.clone(), baseline_motes_amount)\n            {\n                if root_not_found {\n                    return Err(BlockExecutionError::RootNotFound(state_root_hash));\n                }\n                trace!(%transaction_hash, \"insufficient initial balance\");\n                debug!(%transaction_hash, ?initial_balance_result, %baseline_motes_amount, \"insufficient initial balance\");\n                artifacts.push(artifact_builder.build());\n                // only reads have happened so far, and we can't charge due\n                // to insufficient balance, so move on with no effects committed\n                continue;\n            }\n        }\n\n        let mut balance_identifier = {\n            if is_standard_payment {\n                let contract_might_pay =\n                    addressable_entity_enabled && transaction.is_contract_by_hash_invocation();\n\n                if contract_might_pay {\n                    match invoked_contract_will_pay(&scratch_state, state_root_hash, &transaction) {\n                        Ok(Some(entity_addr)) => BalanceIdentifier::Entity(entity_addr),\n                        Ok(None) => {\n                            // the initiating account pays using its main purse\n                            trace!(%transaction_hash, \"direct invocation with account payment\");\n                            initiator_addr.clone().into()\n                        }\n                        Err(err) => {\n                            trace!(%transaction_hash, \"failed to resolve contract self payment\");\n                            artifact_builder\n                                .with_state_result_error(err)\n                                .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                            BalanceIdentifier::PenalizedAccount(\n                                initiator_addr.clone().account_hash(),\n                            )\n                        }\n                    }\n                } else {\n                    // the initiating account pays using its main purse\n                    trace!(%transaction_hash, \"account session with standard payment\");\n                    initiator_addr.clone().into()\n                }\n            } else if is_v2_wasm {\n                // vm2 does not support custom payment, so it MUST be standard payment\n                // if transaction runtime is v2 then the initiating account will pay using\n                // the refund purse\n                initiator_addr.clone().into()\n            } else if is_custom_payment {\n                // this is the custom payment flow\n                // the initiating account will pay, but wants to do so with a different purse or\n                // in a custom way. If anything goes wrong, penalize the sender, do not execute\n                let custom_payment_gas_limit =\n                    Gas::new(chainspec.transaction_config.native_transfer_minimum_motes * 5);\n                let pay_result = match WasmV1Request::new_custom_payment(\n                    BlockInfo::new(\n                        state_root_hash,\n                        block_time,\n                        parent_block_hash,\n                        block_height,\n                        protocol_version,\n                    ),\n                    custom_payment_gas_limit,\n                    &transaction.to_payment_input_data(),\n                ) {\n                    Ok(mut pay_request) => {\n                        pay_request\n                            .args\n                            .insert(ARG_AMOUNT, artifact_builder.cost_to_use())\n                            .map_err(|e| BlockExecutionError::PaymentError(e.to_string()))?;\n                        execution_engine_v1.execute(&scratch_state, pay_request)\n                    }\n                    Err(error) => {\n                        WasmV1Result::invalid_executable_item(custom_payment_gas_limit, error)\n                    }\n                };\n\n                let insufficient_payment_deposited = !pay_result.balance_increased_by_amount(\n                    payment_balance_addr,\n                    artifact_builder.cost_to_use(),\n                );\n\n                if insufficient_payment_deposited || pay_result.error().is_some() {\n                    // Charge initiator for the penalty payment amount\n                    // the most expedient way to do this that aligns with later code\n                    // is to transfer from the initiator's main purse to the payment purse\n                    let transfer_result = scratch_state.transfer(TransferRequest::new_indirect(\n                        native_runtime_config.clone(),\n                        state_root_hash,\n                        protocol_version,\n                        transaction_hash,\n                        initiator_addr.clone(),\n                        authorization_keys.clone(),\n                        BalanceIdentifierTransferArgs::new(\n                            None,\n                            initiator_addr.clone().into(),\n                            BalanceIdentifier::Payment,\n                            baseline_motes_amount,\n                            None,\n                        ),\n                    ));\n\n                    let msg = match pay_result.error() {\n                        Some(err) => format!(\"{}\", err),\n                        None => {\n                            if insufficient_payment_deposited {\n                                \"Insufficient custom payment\".to_string()\n                            } else {\n                                // this should be unreachable due to guard condition above\n                                let unk = \"Unknown custom payment issue\";\n                                warn!(%transaction_hash, unk);\n                                debug_assert!(false, \"{}\", unk);\n                                unk.to_string()\n                            }\n                        }\n                    };\n                    // commit penalty payment effects\n                    state_root_hash = scratch_state\n                        .commit_effects(state_root_hash, transfer_result.effects().clone())?;\n                    artifact_builder\n                        .with_error_message(msg)\n                        .with_transfer_result(transfer_result)\n                        .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                    trace!(%transaction_hash, balance_identifier=?BalanceIdentifier::PenalizedPayment, \"account session with custom payment failed\");\n                    BalanceIdentifier::PenalizedPayment\n                } else {\n                    // commit successful effects\n                    state_root_hash = scratch_state\n                        .commit_effects(state_root_hash, pay_result.effects().clone())?;\n                    artifact_builder\n                        .with_wasm_v1_result(pay_result)\n                        .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                    trace!(%transaction_hash, balance_identifier=?BalanceIdentifier::Payment, \"account session with custom payment success\");\n                    BalanceIdentifier::Payment\n                }\n            } else {\n                BalanceIdentifier::PenalizedAccount(initiator_addr.clone().account_hash())\n            }\n        };\n\n        let post_payment_balance_result = scratch_state.balance(BalanceRequest::new(\n            state_root_hash,\n            protocol_version,\n            balance_identifier.clone(),\n            balance_handling,\n            ProofHandling::NoProofs,\n        ));\n\n        artifact_builder.with_available(post_payment_balance_result.available_balance().copied());\n        let lane_id = transaction.transaction_lane();\n\n        let allow_execution = {\n            let is_not_penalized = !balance_identifier.is_penalty();\n            // in the case of custom payment, we do all payment processing up front after checking\n            // if the initiator can cover the penalty payment, and then either charge the full\n            // amount in the happy path or the penalty amount in the sad path...in whichever case\n            // the sad path is handled by is_penalty and the balance in the payment purse is\n            // the penalty payment or the full amount but is 'sufficient' either way\n            let actual_cost = artifact_builder.actual_cost(); // use actual cost here\n            let is_sufficient_balance =\n                is_custom_payment || post_payment_balance_result.is_sufficient(actual_cost);\n            let is_allowed_by_chainspec = chainspec.is_supported(lane_id);\n            let allow = is_not_penalized && is_sufficient_balance && is_allowed_by_chainspec;\n            if !allow {\n                let err_msg = {\n                    if !is_sufficient_balance {\n                        \"Insufficient funds\".to_string()\n                    } else {\n                        format!(\n                            \"penalized: {}, sufficient balance: {}, allowed by chainspec: {}\",\n                            !is_not_penalized, is_sufficient_balance, is_allowed_by_chainspec\n                        )\n                    }\n                };\n                if artifact_builder.error_message().is_none() {\n                    artifact_builder.with_error_message(err_msg);\n                }\n                info!(%transaction_hash, ?balance_identifier, ?is_sufficient_balance, ?is_not_penalized, ?is_allowed_by_chainspec, \"payment preprocessing unsuccessful\");\n            } else {\n                debug!(%transaction_hash, ?balance_identifier, ?is_sufficient_balance, ?is_not_penalized, ?is_allowed_by_chainspec, \"payment preprocessing successful\");\n            }\n            allow\n        };\n\n        if allow_execution {\n            debug!(%transaction_hash, ?allow_execution, \"execution allowed\");\n            if is_standard_payment {\n                // place a processing hold on the paying account to prevent double spend.\n                let hold_amount = artifact_builder.cost_to_use();\n                let hold_request = BalanceHoldRequest::new_processing_hold(\n                    state_root_hash,\n                    protocol_version,\n                    balance_identifier.clone(),\n                    hold_amount,\n                    insufficient_balance_handling,\n                );\n                let hold_result = scratch_state.balance_hold(hold_request);\n                state_root_hash =\n                    scratch_state.commit_effects(state_root_hash, hold_result.effects().clone())?;\n                artifact_builder\n                    .with_balance_hold_result(&hold_result)\n                    .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n            }\n\n            trace!(%transaction_hash, ?lane_id, \"eligible for execution\");\n            match lane_id {\n                lane_id if lane_id == MINT_LANE_ID => {\n                    let runtime_args = transaction_args\n                        .as_named()\n                        .ok_or(BlockExecutionError::InvalidTransactionArgs)?;\n                    let entry_point = transaction.entry_point();\n                    if let TransactionEntryPoint::Transfer = entry_point {\n                        let transfer_result =\n                            scratch_state.transfer(TransferRequest::with_runtime_args(\n                                native_runtime_config.clone(),\n                                state_root_hash,\n                                protocol_version,\n                                transaction_hash,\n                                initiator_addr.clone(),\n                                authorization_keys,\n                                runtime_args.clone(),\n                            ));\n                        state_root_hash = scratch_state\n                            .commit_effects(state_root_hash, transfer_result.effects().clone())?;\n                        artifact_builder\n                            .consume_limit()\n                            .with_transfer_result(transfer_result)\n                            .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                    } else if let TransactionEntryPoint::Burn = entry_point {\n                        let burn_result = scratch_state.burn(BurnRequest::with_runtime_args(\n                            native_runtime_config.clone(),\n                            state_root_hash,\n                            protocol_version,\n                            transaction_hash,\n                            initiator_addr.clone(),\n                            authorization_keys,\n                            runtime_args.clone(),\n                        ));\n                        state_root_hash = scratch_state\n                            .commit_effects(state_root_hash, burn_result.effects().clone())?;\n                        artifact_builder\n                            .consume_limit()\n                            .with_burn_result(burn_result)\n                            .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                    } else {\n                        artifact_builder.with_error_message(format!(\n                            \"Attempt to call unsupported native mint entrypoint: {}\",\n                            entry_point\n                        ));\n                    }\n                }\n                lane_id if lane_id == AUCTION_LANE_ID => {\n                    let runtime_args = transaction_args\n                        .as_named()\n                        .ok_or(BlockExecutionError::InvalidTransactionArgs)?;\n                    match AuctionMethod::from_parts(entry_point, runtime_args, chainspec) {\n                        Ok(auction_method) => {\n                            let bidding_result = scratch_state.bidding(BiddingRequest::new(\n                                native_runtime_config.clone(),\n                                state_root_hash,\n                                protocol_version,\n                                transaction_hash,\n                                initiator_addr.clone(),\n                                authorization_keys,\n                                auction_method,\n                            ));\n                            state_root_hash = scratch_state.commit_effects(\n                                state_root_hash,\n                                bidding_result.effects().clone(),\n                            )?;\n                            artifact_builder\n                                .consume_limit()\n                                .with_bidding_result(bidding_result)\n                                .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                        }\n                        Err(ame) => {\n                            error!(\n                                %transaction_hash,\n                                ?ame,\n                                \"failed to determine auction method\"\n                            );\n                            artifact_builder.with_auction_method_error(&ame);\n                        }\n                    };\n                }\n                _ if is_v1_wasm => {\n                    let wasm_v1_start = Instant::now();\n                    let session_input_data = transaction.to_session_input_data();\n                    match WasmV1Request::new_session(\n                        BlockInfo::new(\n                            state_root_hash,\n                            block_time,\n                            parent_block_hash,\n                            block_height,\n                            protocol_version,\n                        ),\n                        artifact_builder.gas_limit(),\n                        &session_input_data,\n                    ) {\n                        Ok(wasm_v1_request) => {\n                            trace!(%transaction_hash, ?lane_id, ?wasm_v1_request, \"able to get wasm v1 request\");\n                            let wasm_v1_result =\n                                execution_engine_v1.execute(&scratch_state, wasm_v1_request);\n                            trace!(%transaction_hash, ?lane_id, ?wasm_v1_result, \"able to get wasm v1 result\");\n                            state_root_hash = scratch_state.commit_effects(\n                                state_root_hash,\n                                wasm_v1_result.effects().clone(),\n                            )?;\n                            // note: consumed is scraped from wasm_v1_result along w/ other fields\n                            artifact_builder\n                                .with_wasm_v1_result(wasm_v1_result)\n                                .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                        }\n                        Err(ire) => {\n                            debug!(%transaction_hash, ?lane_id, ?ire, \"unable to get wasm v1 request\");\n                            artifact_builder.with_invalid_wasm_v1_request(&ire);\n                        }\n                    };\n                    if let Some(metrics) = metrics.as_ref() {\n                        metrics\n                            .exec_wasm_v1\n                            .observe(wasm_v1_start.elapsed().as_secs_f64());\n                    }\n                }\n                _ if is_v2_wasm => match WasmV2Request::new(\n                    artifact_builder.gas_limit(),\n                    chainspec.network_config.name.clone(),\n                    state_root_hash,\n                    parent_block_hash,\n                    block_height,\n                    &transaction,\n                ) {\n                    Ok(wasm_v2_request) => {\n                        match wasm_v2_request.execute(\n                            &execution_engine_v2,\n                            state_root_hash,\n                            &scratch_state,\n                        ) {\n                            Ok(wasm_v2_result) => {\n                                match &wasm_v2_result {\n                                    WasmV2Result::Install(install_result) => {\n                                        info!(\n                                            contract_hash=base16::encode_lower(&install_result.smart_contract_addr()),\n                                            pre_state_root_hash=%state_root_hash,\n                                            post_state_root_hash=%install_result.post_state_hash(),\n                                            \"install contract result\");\n                                    }\n\n                                    WasmV2Result::Execute(execute_result) => {\n                                        info!(\n                                            pre_state_root_hash=%state_root_hash,\n                                            post_state_root_hash=%execute_result.post_state_hash(),\n                                            host_error=?execute_result.host_error.as_ref(),\n                                            \"execute contract result\");\n                                    }\n                                }\n\n                                state_root_hash = wasm_v2_result.post_state_hash();\n                                artifact_builder.with_wasm_v2_result(wasm_v2_result);\n                            }\n                            Err(wasm_v2_error) => {\n                                artifact_builder.with_wasm_v2_error(wasm_v2_error);\n                            }\n                        }\n                    }\n                    Err(ire) => {\n                        debug!(%transaction_hash, ?lane_id, ?ire, \"unable to get wasm v2 request\");\n                        artifact_builder.with_invalid_wasm_v2_request(ire);\n                    }\n                },\n                _ => {\n                    // it is currently not possible to specify a vm other than v1 or v2 on the\n                    // transaction itself, so this should be unreachable\n                    unreachable!(\"Unknown VM target\")\n                }\n            }\n        }\n\n        // clear all holds on the balance_identifier purse before payment processing\n        {\n            let hold_request = BalanceHoldRequest::new_clear(\n                state_root_hash,\n                protocol_version,\n                BalanceHoldKind::All,\n                balance_identifier.clone(),\n            );\n            let hold_result = scratch_state.balance_hold(hold_request);\n            state_root_hash =\n                scratch_state.commit_effects(state_root_hash, hold_result.effects().clone())?;\n            artifact_builder\n                .with_balance_hold_result(&hold_result)\n                .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n        }\n\n        // handle refunds per the chainspec determined setting.\n        let refund_amount = {\n            let consumed =\n                if balance_identifier.is_penalty() || artifact_builder.error_message().is_some() {\n                    artifact_builder.cost_to_use() // no refund for penalty\n                } else {\n                    artifact_builder.consumed()\n                };\n\n            let available = artifact_builder.available().unwrap_or(U512::zero());\n\n            let refund_mode = match refund_handling {\n                RefundHandling::NoRefund => {\n                    if fee_handling.is_no_fee() && is_custom_payment {\n                        // in no fee mode, we need to return the motes to the refund purse,\n                        //  and then point the balance_identifier to the refund purse\n                        // this will result in the downstream no fee handling logic\n                        //  placing a hold on the correct purse.\n                        balance_identifier = BalanceIdentifier::Refund;\n                        Some(HandleRefundMode::RefundNoFeeCustomPayment {\n                            initiator_addr: Box::new(initiator_addr.clone()),\n                            limit: artifact_builder.limit(),\n                            gas_price: current_gas_price,\n                            cost: artifact_builder.cost_to_use(),\n                        })\n                    } else {\n                        None\n                    }\n                }\n                RefundHandling::Burn { refund_ratio } => Some(HandleRefundMode::Burn {\n                    limit: artifact_builder.limit(),\n                    gas_price: current_gas_price,\n                    cost: artifact_builder.cost_to_use(),\n                    consumed,\n                    source: Box::new(balance_identifier.clone()),\n                    ratio: refund_ratio,\n                    available,\n                }),\n                RefundHandling::Refund { refund_ratio } => {\n                    let source = Box::new(balance_identifier.clone());\n                    if is_custom_payment {\n                        // in custom payment we have to do all payment handling up front.\n                        // therefore, if refunds are turned on we have to transfer the refunded\n                        // amount back to the specified refund purse.\n\n                        // the refund purse for a given transaction is set to the initiator's main\n                        // purse by default, but the custom payment provided by the initiator can\n                        // set a different purse when executed. thus, the handle payment system\n                        // contract tracks a refund purse and is handled internally at processing\n                        // time. Outer logic should never assume or refer to a specific purse for\n                        // purposes of refund. instead, `BalanceIdentifier::Refund` is used by outer\n                        // logic, which is interpreted by inner logic to use the currently set\n                        // refund purse.\n                        Some(HandleRefundMode::Refund {\n                            initiator_addr: Box::new(initiator_addr.clone()),\n                            limit: artifact_builder.limit(),\n                            gas_price: current_gas_price,\n                            consumed,\n                            cost: artifact_builder.cost_to_use(),\n                            ratio: refund_ratio,\n                            source,\n                            target: Box::new(BalanceIdentifier::Refund),\n                            available,\n                        })\n                    } else {\n                        // in normal payment handling we put a temporary processing hold\n                        // on the paying purse rather than take the token up front.\n                        // thus, here we only want to determine the refund amount rather than\n                        // attempt to process a refund on something we haven't actually taken yet.\n                        // later in the flow when the processing hold is released and payment is\n                        // finalized we reduce the amount taken by the refunded amount. This avoids\n                        // the churn of taking the token up front via transfer (which writes\n                        // multiple permanent records) and then transfer some of it back (which\n                        // writes more permanent records).\n                        Some(HandleRefundMode::CalculateAmount {\n                            limit: artifact_builder.limit(),\n                            gas_price: current_gas_price,\n                            consumed,\n                            cost: artifact_builder.cost_to_use(),\n                            ratio: refund_ratio,\n                            available,\n                        })\n                    }\n                }\n            };\n            match refund_mode {\n                Some(refund_mode) => {\n                    let handle_refund_request = HandleRefundRequest::new(\n                        native_runtime_config.clone(),\n                        state_root_hash,\n                        protocol_version,\n                        transaction_hash,\n                        refund_mode,\n                    );\n                    let handle_refund_result = scratch_state.handle_refund(handle_refund_request);\n                    let refunded_amount = handle_refund_result.refund_amount();\n                    state_root_hash = scratch_state\n                        .commit_effects(state_root_hash, handle_refund_result.effects().clone())?;\n                    artifact_builder\n                        .with_handle_refund_result(&handle_refund_result)\n                        .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n\n                    refunded_amount\n                }\n                None => U512::zero(),\n            }\n        };\n        artifact_builder.with_refund_amount(refund_amount);\n\n        // take the lower of the difference between cost - refund OR available\n        let fee_amount = artifact_builder\n            .cost_to_use()\n            .saturating_sub(refund_amount)\n            .min(artifact_builder.available().unwrap_or(U512::zero()));\n\n        // handle fees per the chainspec determined setting.\n        let handle_fee_result = match fee_handling {\n            FeeHandling::NoFee => {\n                // in this mode, a gas hold is placed on the payer's purse.\n                let hold_request = BalanceHoldRequest::new_gas_hold(\n                    state_root_hash,\n                    protocol_version,\n                    balance_identifier,\n                    fee_amount,\n                    insufficient_balance_handling,\n                );\n                let hold_result = scratch_state.balance_hold(hold_request);\n                state_root_hash =\n                    scratch_state.commit_effects(state_root_hash, hold_result.effects().clone())?;\n                artifact_builder\n                    .with_balance_hold_result(&hold_result)\n                    .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n                let handle_fee_request = HandleFeeRequest::new(\n                    native_runtime_config.clone(),\n                    state_root_hash,\n                    protocol_version,\n                    transaction_hash,\n                    HandleFeeMode::credit(proposer.clone(), fee_amount, era_id),\n                );\n                scratch_state.handle_fee(handle_fee_request)\n            }\n            FeeHandling::Burn => {\n                // in this mode, the fee portion is burned.\n                let handle_fee_request = HandleFeeRequest::new(\n                    native_runtime_config.clone(),\n                    state_root_hash,\n                    protocol_version,\n                    transaction_hash,\n                    HandleFeeMode::burn(balance_identifier, Some(fee_amount)),\n                );\n                scratch_state.handle_fee(handle_fee_request)\n            }\n            FeeHandling::PayToProposer => {\n                // in this mode, the consumed gas is paid as a fee to the block proposer\n                let handle_fee_request = HandleFeeRequest::new(\n                    native_runtime_config.clone(),\n                    state_root_hash,\n                    protocol_version,\n                    transaction_hash,\n                    HandleFeeMode::pay(\n                        Box::new(initiator_addr.clone()),\n                        balance_identifier,\n                        BalanceIdentifier::Public(*(proposer.clone())),\n                        fee_amount,\n                    ),\n                );\n                scratch_state.handle_fee(handle_fee_request)\n            }\n            FeeHandling::Accumulate => {\n                // in this mode, consumed gas is accumulated into a single purse\n                // for later distribution\n                let handle_fee_request = HandleFeeRequest::new(\n                    native_runtime_config.clone(),\n                    state_root_hash,\n                    protocol_version,\n                    transaction_hash,\n                    HandleFeeMode::pay(\n                        Box::new(initiator_addr.clone()),\n                        balance_identifier,\n                        BalanceIdentifier::Accumulate,\n                        fee_amount,\n                    ),\n                );\n                scratch_state.handle_fee(handle_fee_request)\n            }\n        };\n\n        state_root_hash =\n            scratch_state.commit_effects(state_root_hash, handle_fee_result.effects().clone())?;\n\n        artifact_builder\n            .with_handle_fee_result(&handle_fee_result)\n            .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?;\n\n        // clear refund purse if it was set\n        if refund_purse_active {\n            // if refunds are turned on we initialize the refund purse to the initiator's main\n            // purse before doing any processing. NOTE: when executed, custom payment logic\n            // has the option to call set_refund_purse on the handle payment contract to set\n            // up a different refund purse, if desired.\n            let handle_refund_request = HandleRefundRequest::new(\n                native_runtime_config.clone(),\n                state_root_hash,\n                protocol_version,\n                transaction_hash,\n                HandleRefundMode::ClearRefundPurse,\n            );\n            let handle_refund_result = scratch_state.handle_refund(handle_refund_request);\n            if let Err(root_not_found) =\n                artifact_builder.with_clear_refund_purse_result(&handle_refund_result)\n            {\n                if root_not_found {\n                    return Err(BlockExecutionError::RootNotFound(state_root_hash));\n                }\n                warn!(\n                    \"{}\",\n                    artifact_builder.error_message().unwrap_or(\n                        \"unknown error encountered when attempting to clear refund purse\"\n                            .to_string()\n                    )\n                );\n            }\n            state_root_hash = scratch_state\n                .commit_effects(state_root_hash, handle_refund_result.effects().clone())?;\n        }\n\n        artifacts.push(artifact_builder.build());\n    }\n\n    // transaction processing is finished\n    if let Some(metrics) = metrics.as_ref() {\n        metrics\n            .exec_block_tnx_processing\n            .observe(txn_processing_start.elapsed().as_secs_f64());\n    }\n\n    // post-processing starts now\n    let post_processing_start = Instant::now();\n\n    // calculate and store checksums for approvals and execution effects across the transactions in\n    // the block we do this so that the full set of approvals and the full set of effect metadata\n    // can be verified if necessary for a given block. the block synchronizer in particular\n    // depends on the existence of such checksums.\n    let transaction_approvals_hashes = {\n        let approvals_checksum = types::compute_approvals_checksum(transaction_ids.clone())\n            .map_err(BlockExecutionError::FailedToComputeApprovalsChecksum)?;\n        let execution_results_checksum = compute_execution_results_checksum(\n            artifacts.iter().map(|artifact| &artifact.execution_result),\n        )?;\n        let mut checksum_registry = ChecksumRegistry::new();\n        checksum_registry.insert(APPROVALS_CHECKSUM_NAME, approvals_checksum);\n        checksum_registry.insert(EXECUTION_RESULTS_CHECKSUM_NAME, execution_results_checksum);\n\n        let mut effects = Effects::new();\n        effects.push(TransformV2::new(\n            Key::ChecksumRegistry,\n            TransformKindV2::Write(\n                CLValue::from_t(checksum_registry)\n                    .map_err(BlockExecutionError::ChecksumRegistryToCLValue)?\n                    .into(),\n            ),\n        ));\n        scratch_state.commit_effects(state_root_hash, effects)?;\n        transaction_ids\n            .into_iter()\n            .map(|id| id.approvals_hash())\n            .collect()\n    };\n\n    if let Some(metrics) = metrics.as_ref() {\n        metrics\n            .txn_approvals_hashes_calculation\n            .observe(post_processing_start.elapsed().as_secs_f64());\n    }\n\n    // Pay out  ̶b̶l̶o̶c̶k̶ e͇r͇a͇ rewards\n    // NOTE: despite the name, these rewards are currently paid out per ERA not per BLOCK\n    // at one point, they were going to be paid out per block (and might be in the future)\n    // but it ended up settling on per era. the behavior is driven by Some / None\n    // thus if in future the calling logic passes rewards per block it should just work as is.\n    // This auto-commits.\n    if let Some(rewards) = &executable_block.rewards {\n        let block_rewards_payout_start = Instant::now();\n        // Pay out block fees, if relevant. This auto-commits\n        {\n            let fee_req = FeeRequest::new(\n                native_runtime_config.clone(),\n                state_root_hash,\n                protocol_version,\n                block_time,\n            );\n            debug!(?fee_req, \"distributing fees\");\n            match scratch_state.distribute_fees(fee_req) {\n                FeeResult::RootNotFound => {\n                    return Err(BlockExecutionError::RootNotFound(state_root_hash));\n                }\n                FeeResult::Failure(fer) => return Err(BlockExecutionError::DistributeFees(fer)),\n                FeeResult::Success {\n                    post_state_hash, ..\n                } => {\n                    debug!(\"fee distribution success\");\n                    state_root_hash = post_state_hash;\n                }\n            }\n        }\n\n        let rewards_req = BlockRewardsRequest::new(\n            native_runtime_config.clone(),\n            state_root_hash,\n            protocol_version,\n            block_time,\n            rewards.clone(),\n        );\n        debug!(?rewards_req, \"distributing rewards\");\n        match scratch_state.distribute_block_rewards(rewards_req) {\n            BlockRewardsResult::RootNotFound => {\n                return Err(BlockExecutionError::RootNotFound(state_root_hash));\n            }\n            BlockRewardsResult::Failure(bre) => {\n                return Err(BlockExecutionError::DistributeBlockRewards(bre));\n            }\n            BlockRewardsResult::Success {\n                post_state_hash, ..\n            } => {\n                debug!(\"rewards distribution success\");\n                state_root_hash = post_state_hash;\n            }\n        }\n        if let Some(metrics) = metrics.as_ref() {\n            metrics\n                .block_rewards_payout\n                .observe(block_rewards_payout_start.elapsed().as_secs_f64());\n        }\n    }\n\n    // if era report is some, this is a switch block. a series of end-of-era extra processing must\n    // transpire before this block is entirely finished.\n    let step_outcome = if let Some(era_report) = &executable_block.era_report {\n        // step processing starts now\n        let step_processing_start = Instant::now();\n\n        debug!(\"committing step\");\n        let step_effects = match commit_step(\n            native_runtime_config,\n            &scratch_state,\n            metrics.clone(),\n            protocol_version,\n            state_root_hash,\n            era_report.clone(),\n            block_time.value(),\n            executable_block.era_id.successor(),\n        ) {\n            StepResult::RootNotFound => {\n                return Err(BlockExecutionError::RootNotFound(state_root_hash));\n            }\n            StepResult::Failure(err) => return Err(BlockExecutionError::Step(err)),\n            StepResult::Success {\n                effects,\n                post_state_hash,\n                ..\n            } => {\n                state_root_hash = post_state_hash;\n                effects\n            }\n        };\n        debug!(\"step committed\");\n\n        let era_validators_req = EraValidatorsRequest::new(state_root_hash);\n        let era_validators_result = data_access_layer.era_validators(era_validators_req);\n\n        let upcoming_era_validators = match era_validators_result {\n            EraValidatorsResult::RootNotFound => {\n                panic!(\"root not found\");\n            }\n            EraValidatorsResult::AuctionNotFound => {\n                panic!(\"auction not found\");\n            }\n            EraValidatorsResult::ValueNotFound(msg) => {\n                panic!(\"validator snapshot not found: {}\", msg);\n            }\n            EraValidatorsResult::Failure(tce) => {\n                return Err(BlockExecutionError::GetEraValidators(tce));\n            }\n            EraValidatorsResult::Success { era_validators } => era_validators,\n        };\n\n        // step processing is finished\n        if let Some(metrics) = metrics.as_ref() {\n            metrics\n                .exec_block_step_processing\n                .observe(step_processing_start.elapsed().as_secs_f64());\n        }\n        Some(StepOutcome {\n            step_effects,\n            upcoming_era_validators,\n        })\n    } else {\n        None\n    };\n\n    // Pruning -- this is orthogonal to the contents of the block, but we deliberately do it\n    // at the end to avoid a read ordering issue during block execution.\n    if let Some(previous_block_height) = block_height.checked_sub(1) {\n        if let Some(keys_to_prune) = calculate_prune_eras(\n            activation_point_era_id,\n            key_block_height_for_activation_point,\n            previous_block_height,\n            prune_batch_size,\n        ) {\n            let pruning_start = Instant::now();\n\n            let first_key = keys_to_prune.first().copied();\n            let last_key = keys_to_prune.last().copied();\n            info!(\n                previous_block_height,\n                %key_block_height_for_activation_point,\n                %state_root_hash,\n                first_key=?first_key,\n                last_key=?last_key,\n                \"commit prune: preparing prune config\"\n            );\n            let request = PruneRequest::new(state_root_hash, keys_to_prune);\n            match scratch_state.prune(request) {\n                PruneResult::RootNotFound => {\n                    error!(\n                        previous_block_height,\n                        %state_root_hash,\n                        \"commit prune: root not found\"\n                    );\n                    panic!(\n                        \"Root {} not found while performing a prune.\",\n                        state_root_hash\n                    );\n                }\n                PruneResult::MissingKey => {\n                    warn!(\n                        previous_block_height,\n                        %state_root_hash,\n                        \"commit prune: key does not exist\"\n                    );\n                }\n                PruneResult::Success {\n                    post_state_hash, ..\n                } => {\n                    info!(\n                        previous_block_height,\n                        %key_block_height_for_activation_point,\n                        %state_root_hash,\n                        %post_state_hash,\n                        first_key=?first_key,\n                        last_key=?last_key,\n                        \"commit prune: success\"\n                    );\n                    state_root_hash = post_state_hash;\n                }\n                PruneResult::Failure(tce) => {\n                    error!(?tce, \"commit prune: failure\");\n                    return Err(tce.into());\n                }\n            }\n            if let Some(metrics) = metrics.as_ref() {\n                metrics\n                    .pruning_time\n                    .observe(pruning_start.elapsed().as_secs_f64());\n            }\n        }\n    }\n\n    {\n        let database_write_start = Instant::now();\n        // Finally, the new state-root-hash from the cumulative changes to global state is\n        // returned when they are written to LMDB.\n        state_root_hash = data_access_layer.write_scratch_to_db(state_root_hash, scratch_state)?;\n        if let Some(metrics) = metrics.as_ref() {\n            metrics\n                .scratch_lmdb_write_time\n                .observe(database_write_start.elapsed().as_secs_f64());\n        }\n\n        // Flush once, after all data mutation.\n        let database_flush_start = Instant::now();\n        let flush_req = FlushRequest::new();\n        let flush_result = data_access_layer.flush(flush_req);\n        if let Err(gse) = flush_result.as_error() {\n            error!(\"failed to flush lmdb\");\n            return Err(BlockExecutionError::Lmdb(gse));\n        }\n        if let Some(metrics) = metrics.as_ref() {\n            metrics\n                .database_flush_time\n                .observe(database_flush_start.elapsed().as_secs_f64());\n        }\n    }\n\n    // the rest of this is post process, picking out data bits to return to caller\n    let next_era_id = executable_block.era_id.successor();\n    let maybe_next_era_validator_weights: Option<(BTreeMap<PublicKey, U512>, u8)> =\n        match step_outcome.as_ref() {\n            None => None,\n            Some(effects_and_validators) => {\n                match effects_and_validators\n                    .upcoming_era_validators\n                    .get(&next_era_id)\n                    .cloned()\n                {\n                    Some(validators) => next_era_gas_price.map(|gas_price| (validators, gas_price)),\n                    None => None,\n                }\n            }\n        };\n\n    let era_end = match (\n        executable_block.era_report,\n        maybe_next_era_validator_weights,\n    ) {\n        (None, None) => None,\n        (\n            Some(InternalEraReport {\n                equivocators,\n                inactive_validators,\n            }),\n            Some((next_era_validator_weights, next_era_gas_price)),\n        ) => Some(EraEndV2::new(\n            equivocators,\n            inactive_validators,\n            next_era_validator_weights,\n            executable_block.rewards.unwrap_or_default(),\n            next_era_gas_price,\n        )),\n        (maybe_era_report, maybe_next_era_validator_weights) => {\n            if maybe_era_report.is_none() {\n                error!(\n                    \"era_end {}: maybe_era_report is none\",\n                    executable_block.era_id\n                );\n            }\n            if maybe_next_era_validator_weights.is_none() {\n                error!(\n                    \"era_end {}: maybe_next_era_validator_weights is none\",\n                    executable_block.era_id\n                );\n            }\n            return Err(BlockExecutionError::FailedToCreateEraEnd {\n                maybe_era_report,\n                maybe_next_era_validator_weights,\n            });\n        }\n    };\n\n    let block = Arc::new(BlockV2::new(\n        parent_hash,\n        parent_seed,\n        state_root_hash,\n        executable_block.random_bit,\n        era_end,\n        executable_block.timestamp,\n        executable_block.era_id,\n        block_height,\n        protocol_version,\n        (*proposer).clone(),\n        executable_block.transaction_map,\n        executable_block.rewarded_signatures,\n        current_gas_price,\n        last_switch_block_hash,\n    ));\n\n    let proof_of_checksum_registry = match data_access_layer.tracking_copy(state_root_hash)? {\n        Some(tc) => match tc.reader().read_with_proof(&Key::ChecksumRegistry)? {\n            Some(proof) => proof,\n            None => return Err(BlockExecutionError::MissingChecksumRegistry),\n        },\n        None => return Err(BlockExecutionError::RootNotFound(state_root_hash)),\n    };\n\n    let approvals_hashes = Box::new(ApprovalsHashes::new(\n        *block.hash(),\n        transaction_approvals_hashes,\n        proof_of_checksum_registry,\n    ));\n\n    // processing is finished now\n    if let Some(metrics) = metrics.as_ref() {\n        metrics\n            .exec_block_post_processing\n            .observe(post_processing_start.elapsed().as_secs_f64());\n        metrics\n            .exec_block_total\n            .observe(start.elapsed().as_secs_f64());\n    }\n\n    Ok(BlockAndExecutionArtifacts {\n        block,\n        approvals_hashes,\n        execution_artifacts: artifacts,\n        step_outcome,\n    })\n}\n\n/// Execute the transaction without committing the effects.\n/// Intended to be used for discovery operations on read-only nodes.\n///\n/// Returns effects of the execution.\npub(super) fn speculatively_execute<S>(\n    state_provider: &S,\n    chainspec: &Chainspec,\n    execution_engine_v1: &ExecutionEngineV1,\n    block_header: BlockHeader,\n    input_transaction: Transaction,\n) -> SpeculativeExecutionResult\nwhere\n    S: StateProvider,\n{\n    let transaction_config = &chainspec.transaction_config;\n    let maybe_transaction = MetaTransaction::from_transaction(\n        &input_transaction,\n        chainspec.core_config.pricing_handling,\n        transaction_config,\n    );\n    if let Err(error) = maybe_transaction {\n        return SpeculativeExecutionResult::invalid_transaction(error);\n    }\n    let transaction = maybe_transaction.unwrap();\n    let state_root_hash = block_header.state_root_hash();\n    let parent_block_hash = block_header.block_hash();\n    let block_height = block_header.height();\n    let block_time = block_header\n        .timestamp()\n        .saturating_add(chainspec.core_config.minimum_block_time);\n    let gas_limit = match input_transaction.gas_limit(chainspec, transaction.transaction_lane()) {\n        Ok(gas_limit) => gas_limit,\n        Err(_) => {\n            return SpeculativeExecutionResult::invalid_gas_limit(input_transaction);\n        }\n    };\n\n    if transaction.is_deploy_transaction() {\n        if transaction.is_native() {\n            let limit = Gas::from(chainspec.system_costs_config.mint_costs().transfer);\n            let protocol_version = chainspec.protocol_version();\n            let native_runtime_config = NativeRuntimeConfig::from_chainspec(chainspec);\n            let transaction_hash = transaction.hash();\n            let initiator_addr = transaction.initiator_addr();\n            let authorization_keys = transaction.authorization_keys();\n            let runtime_args = match transaction.session_args().as_named() {\n                Some(runtime_args) => runtime_args.clone(),\n                None => {\n                    return SpeculativeExecutionResult::InvalidTransaction(InvalidTransaction::V1(\n                        InvalidTransactionV1::ExpectedNamedArguments,\n                    ));\n                }\n            };\n\n            let result = state_provider.transfer(TransferRequest::with_runtime_args(\n                native_runtime_config.clone(),\n                *state_root_hash,\n                protocol_version,\n                transaction_hash,\n                initiator_addr.clone(),\n                authorization_keys,\n                runtime_args,\n            ));\n            SpeculativeExecutionResult::WasmV1(Box::new(utils::spec_exec_from_transfer_result(\n                limit,\n                result,\n                block_header.block_hash(),\n            )))\n        } else {\n            let block_info = BlockInfo::new(\n                *state_root_hash,\n                block_time.into(),\n                parent_block_hash,\n                block_height,\n                execution_engine_v1.config().protocol_version(),\n            );\n            let session_input_data = transaction.to_session_input_data();\n            let wasm_v1_result =\n                match WasmV1Request::new_session(block_info, gas_limit, &session_input_data) {\n                    Ok(wasm_v1_request) => {\n                        execution_engine_v1.execute(state_provider, wasm_v1_request)\n                    }\n                    Err(error) => WasmV1Result::invalid_executable_item(gas_limit, error),\n                };\n            SpeculativeExecutionResult::WasmV1(Box::new(utils::spec_exec_from_wasm_v1_result(\n                wasm_v1_result,\n                block_header.block_hash(),\n            )))\n        }\n    } else {\n        SpeculativeExecutionResult::ReceivedV1Transaction\n    }\n}\n\nfn invoked_contract_will_pay(\n    state_provider: &ScratchGlobalState,\n    state_root_hash: Digest,\n    transaction: &MetaTransaction,\n) -> Result<Option<EntityAddr>, StateResultError> {\n    let (hash_addr, entry_point_name) = match transaction.contract_direct_address() {\n        None => {\n            return Err(StateResultError::ValueNotFound(\n                \"contract direct address not found\".to_string(),\n            ))\n        }\n        Some((hash_addr, entry_point_name)) => (hash_addr, entry_point_name),\n    };\n    let entity_addr = EntityAddr::new_smart_contract(hash_addr);\n    let entry_point_request = EntryPointRequest::new(state_root_hash, entry_point_name, hash_addr);\n    let entry_point_response = state_provider.entry_point(entry_point_request);\n    match entry_point_response {\n        EntryPointResult::RootNotFound => Err(StateResultError::RootNotFound),\n        EntryPointResult::ValueNotFound(msg) => Err(StateResultError::ValueNotFound(msg)),\n        EntryPointResult::Failure(tce) => Err(StateResultError::Failure(tce)),\n        EntryPointResult::Success { entry_point } => {\n            if entry_point.will_pay_direct_invocation() {\n                Ok(Some(entity_addr))\n            } else {\n                Ok(None)\n            }\n        }\n    }\n}\n\n#[allow(clippy::too_many_arguments)]\nfn commit_step(\n    native_runtime_config: NativeRuntimeConfig,\n    scratch_state: &ScratchGlobalState,\n    maybe_metrics: Option<Arc<Metrics>>,\n    protocol_version: ProtocolVersion,\n    state_hash: Digest,\n    InternalEraReport {\n        equivocators,\n        inactive_validators,\n    }: InternalEraReport,\n    era_end_timestamp_millis: u64,\n    next_era_id: EraId,\n) -> StepResult {\n    // Both inactive validators and equivocators are evicted\n    let evict_items = inactive_validators\n        .into_iter()\n        .chain(equivocators)\n        .map(EvictItem::new)\n        .collect();\n\n    let step_request = StepRequest::new(\n        native_runtime_config,\n        state_hash,\n        protocol_version,\n        vec![], // <-- casper mainnet currently does not slash\n        evict_items,\n        next_era_id,\n        era_end_timestamp_millis,\n    );\n\n    // Commit the step.\n    let start = Instant::now();\n    let result = scratch_state.step(step_request);\n    debug_assert!(result.is_success(), \"{:?}\", result);\n    if let Some(metrics) = maybe_metrics {\n        let elapsed = start.elapsed().as_secs_f64();\n        metrics.commit_step.observe(elapsed);\n        metrics.latest_commit_step.set(elapsed);\n    }\n    trace!(?result, \"step response\");\n    result\n}\n\n/// Computes the checksum of the given set of execution results.\n///\n/// This will either be a simple hash of the bytesrepr-encoded results (in the case that the\n/// serialized results are not greater than `ChunkWithProof::CHUNK_SIZE_BYTES`), or otherwise will\n/// be a Merkle root hash of the chunks derived from the serialized results.\npub(crate) fn compute_execution_results_checksum<'a>(\n    execution_results_iter: impl Iterator<Item = &'a ExecutionResult> + Clone,\n) -> Result<Digest, BlockExecutionError> {\n    // Serialize the execution results as if they were `Vec<ExecutionResult>`.\n    let serialized_length = U32_SERIALIZED_LENGTH\n        + execution_results_iter\n            .clone()\n            .map(|exec_result| exec_result.serialized_length())\n            .sum::<usize>();\n    let mut serialized = vec![];\n    serialized\n        .try_reserve_exact(serialized_length)\n        .map_err(|_| {\n            BlockExecutionError::FailedToComputeApprovalsChecksum(bytesrepr::Error::OutOfMemory)\n        })?;\n    let item_count: u32 = execution_results_iter\n        .clone()\n        .count()\n        .try_into()\n        .map_err(|_| {\n            BlockExecutionError::FailedToComputeApprovalsChecksum(\n                bytesrepr::Error::NotRepresentable,\n            )\n        })?;\n    item_count\n        .write_bytes(&mut serialized)\n        .map_err(BlockExecutionError::FailedToComputeExecutionResultsChecksum)?;\n    for execution_result in execution_results_iter {\n        execution_result\n            .write_bytes(&mut serialized)\n            .map_err(BlockExecutionError::FailedToComputeExecutionResultsChecksum)?;\n    }\n\n    // Now hash the serialized execution results, using the `Chunkable` trait's `hash` method to\n    // chunk if required.\n    serialized.hash().map_err(|_| {\n        BlockExecutionError::FailedToComputeExecutionResultsChecksum(bytesrepr::Error::OutOfMemory)\n    })\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/rewards/tests.rs",
    "content": "use crate::testing::{map, set};\nuse casper_types::{\n    testing::TestRng, AsymmetricType as _, EraId, RewardedSignatures, TestBlockBuilder,\n};\nuse once_cell::sync::Lazy;\nuse std::{iter, ops::Deref};\n\nuse self::constructors::RewardsInfoConstructor;\n\nuse super::*;\nuse convert::ratio;\n\nfn val(n: u8) -> PublicKey {\n    let mut buf = [0; 32];\n    (0..22).for_each(|i| buf[i] = n);\n    PublicKey::ed25519_from_bytes(buf).unwrap()\n}\n\nstatic VALIDATOR_1: Lazy<PublicKey> = Lazy::new(|| val(1));\nstatic VALIDATOR_2: Lazy<PublicKey> = Lazy::new(|| val(2));\nstatic VALIDATOR_3: Lazy<PublicKey> = Lazy::new(|| val(3));\nstatic VALIDATOR_4: Lazy<PublicKey> = Lazy::new(|| val(4));\n\nfn core_config(\n    rng: &mut TestRng,\n    percent_signatures: u64,\n    percent_finders: u64,\n    minimum_era_height: u64,\n    signature_rewards_max_delay: u64,\n) -> CoreConfig {\n    CoreConfig {\n        finality_signature_proportion: Ratio::new(percent_signatures, 100),\n        finders_fee: Ratio::new(percent_finders, 100),\n        signature_rewards_max_delay,\n        minimum_era_height,\n        ..CoreConfig::random(rng)\n    }\n}\n\n#[test]\nfn production_payout_increases_with_the_supply() {\n    let rng = &mut TestRng::new();\n    let percent_signatures = 40;\n    let percent_finders = 20;\n    let blocks_per_era = 3;\n    let signature_rewards_max_delay = 6;\n    let core_config = core_config(\n        rng,\n        percent_signatures,\n        percent_finders,\n        blocks_per_era,\n        signature_rewards_max_delay,\n    );\n\n    // Eras info:\n\n    let era_1_reward_per_round = 300;\n    let era_2_reward_per_round = 400;\n\n    let weights = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(30_u64),\n    };\n\n    let constructor = RewardsInfoConstructor::new(\n        &core_config,\n        map! {\n            EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![\n                (0, VALIDATOR_1.deref(), vec![])\n            ]),\n            EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![\n                (1, VALIDATOR_1.deref(), vec![]),\n                (2, VALIDATOR_2.deref(), vec![]),\n                (3, VALIDATOR_3.deref(), vec![]),\n            ]),\n            EraId::new(2) => (weights, era_2_reward_per_round, vec![\n                (4, VALIDATOR_3.deref(), vec![]),\n                (5, VALIDATOR_1.deref(), vec![]),\n                (6, VALIDATOR_2.deref(), vec![]),\n            ]),\n        },\n    );\n\n    // Era payouts:\n\n    let rewards_for_era_1 =\n        rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap();\n    let rewards_for_era_2 =\n        rewards_for_era(constructor.for_era(rng, 2), EraId::new(2), &core_config).unwrap();\n\n    // Checks:\n\n    for ((recipient_1, amounts_1), (recipient_2, amounts_2)) in\n        iter::zip(rewards_for_era_1, rewards_for_era_2)\n    {\n        let amount_1: U512 = amounts_1.into_iter().sum();\n        let amount_2: U512 = amounts_2.into_iter().sum();\n        assert_eq!(\n            ratio(amount_1),\n            ratio(era_1_reward_per_round) * ratio(core_config.production_rewards_proportion())\n        );\n        assert_eq!(\n            ratio(amount_2),\n            ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())\n        );\n        assert_eq!(recipient_1, recipient_2);\n        assert_eq!(amount_1 * 4 / 3, amount_2);\n    }\n}\n\n#[test]\nfn production_payout_depends_on_the_blocks_produced() {\n    let rng = &mut TestRng::new();\n    let percent_signatures = 33;\n    let percent_finders = 20;\n    let blocks_per_era = 3;\n    let signature_rewards_max_delay = 4;\n    let core_config = core_config(\n        rng,\n        percent_signatures,\n        percent_finders,\n        blocks_per_era,\n        signature_rewards_max_delay,\n    );\n\n    // Eras info:\n\n    let era_1_reward_per_round = 300;\n    let era_2_reward_per_round = 400;\n\n    let weights_1 = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(30_u64),\n        VALIDATOR_4.clone() => U512::from(89_u64),\n    };\n\n    let weights_2 = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(70_u64),\n        VALIDATOR_4.clone() => U512::from(89_u64),\n    };\n\n    let constructor = RewardsInfoConstructor::new(\n        &core_config,\n        map! {\n            EraId::new(0) => (weights_1.clone(), era_1_reward_per_round, vec![\n                (0, VALIDATOR_1.deref(), vec![])\n            ]),\n            EraId::new(1) => (weights_1, era_1_reward_per_round, vec![\n                (1, VALIDATOR_1.deref(), vec![]),\n                (2, VALIDATOR_1.deref(), vec![]),\n                (3, VALIDATOR_3.deref(), vec![]),\n            ]),\n            EraId::new(2) => (weights_2, era_2_reward_per_round, vec![\n                (4, VALIDATOR_2.deref(), vec![]),\n                (5, VALIDATOR_3.deref(), vec![]),\n                (6, VALIDATOR_4.deref(), vec![]),\n            ]),\n        },\n    );\n\n    // Era 1 payouts:\n\n    let rewards =\n        rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap();\n\n    assert_eq!(\n        rewards,\n        map! {\n            VALIDATOR_1.deref().clone() => vec![(ratio(2 * era_1_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()],\n            VALIDATOR_2.deref().clone() => vec![U512::zero()],\n            VALIDATOR_3.deref().clone() => vec![(ratio(era_1_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()],\n            VALIDATOR_4.deref().clone() => vec![U512::zero()],\n        }\n    );\n\n    // Era 2 payouts:\n\n    let rewards =\n        rewards_for_era(constructor.for_era(rng, 2), EraId::new(2), &core_config).unwrap();\n\n    assert_eq!(\n        rewards,\n        map! {\n            VALIDATOR_1.deref().clone() => vec![U512::zero()],\n            VALIDATOR_2.deref().clone() => vec![(ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()],\n            VALIDATOR_3.deref().clone() => vec![(ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()],\n            VALIDATOR_4.deref().clone() => vec![(ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()],\n        }\n    );\n}\n\n/// Only production & collection fee.\n#[test]\nfn all_signatures_rewards_without_contribution_fee() {\n    let rng = &mut TestRng::new();\n    let percent_signatures = 40;\n    let percent_finders = 100;\n    let blocks_per_era = 3;\n    let signature_rewards_max_delay = 4;\n    let core_config = core_config(\n        rng,\n        percent_signatures,\n        percent_finders,\n        blocks_per_era,\n        signature_rewards_max_delay,\n    );\n\n    // Eras info:\n\n    let era_1_reward_per_round = 300;\n    let era_2_reward_per_round = 400;\n\n    let weights = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(30_u64),\n    };\n\n    // Simple scenario: each validators sign the block finality directly (no \"lag\"):\n    let constructor = RewardsInfoConstructor::new(\n        &core_config,\n        map! {\n            EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![\n                (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis\n            ]),\n            EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![\n                (1, VALIDATOR_1.deref(), vec![set!{}]), // Nobody signed the genesis finality\n                (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}]),\n                (3, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}]),\n            ]),\n            EraId::new(2) => (weights, era_2_reward_per_round, vec![\n                (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n                (5, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n                (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n            ]),\n        },\n    );\n\n    // Era 1 payouts:\n\n    let rewards_for_era_1 =\n        rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap();\n\n    let validator_1_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // No finality signature collected:\n        + ratio(0) * ratio(core_config.collection_rewards_proportion())\n    } * ratio(era_1_reward_per_round);\n    let validator_2_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // All finality signatures collected:\n        + ratio(core_config.collection_rewards_proportion())\n    } * ratio(era_1_reward_per_round);\n    let validator_3_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // All finality signatures collected:\n        + ratio(core_config.collection_rewards_proportion())\n    } * ratio(era_1_reward_per_round);\n\n    assert_eq!(\n        map! {\n            VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()],\n            VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()],\n            VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()],\n        },\n        rewards_for_era_1,\n    );\n\n    // Era 2 payouts:\n\n    let rewards_for_era_2 =\n        rewards_for_era(constructor.for_era(rng, 2), EraId::new(2), &core_config).unwrap();\n\n    let validator_1_expected_payout = {\n        // 1 block produced:\n        ratio(1)\n            * ratio(era_2_reward_per_round)\n            * ratio(core_config.production_rewards_proportion())\n        // All finality signature collected (paid out in era 2):\n        + ratio(era_1_reward_per_round) * ratio(core_config.collection_rewards_proportion())\n    };\n    let validator_2_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // All finality signatures collected:\n        + ratio(core_config.collection_rewards_proportion())\n    } * ratio(era_2_reward_per_round);\n    let validator_3_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // All finality signatures collected:\n        + ratio(core_config.collection_rewards_proportion())\n    } * ratio(era_2_reward_per_round);\n\n    assert_eq!(\n        map! {\n            VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()],\n            VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()],\n            VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()],\n        },\n        rewards_for_era_2,\n    );\n}\n\n/// Only production & contribution fee.\n#[test]\nfn all_signatures_rewards_without_finder_fee() {\n    let rng = &mut TestRng::new();\n    let percent_signatures = 40;\n    let percent_finders = 0;\n    let blocks_per_era = 3;\n    let signature_rewards_max_delay = 4;\n    let core_config = core_config(\n        rng,\n        percent_signatures,\n        percent_finders,\n        blocks_per_era,\n        signature_rewards_max_delay,\n    );\n\n    // Eras info:\n\n    let era_1_reward_per_round = 300;\n    let era_2_reward_per_round = 400;\n\n    let weights = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(30_u64),\n    };\n\n    // Simple scenario: each validators sign the block finality directly (no \"lag\"):\n    let constructor = RewardsInfoConstructor::new(\n        &core_config,\n        map! {\n            EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![\n                (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis\n            ]),\n            EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![\n                (1, VALIDATOR_1.deref(), vec![set!{}]), // Nobody signed the genesis finality\n                (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}]),\n                (3, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}]),\n            ]),\n            EraId::new(2) => (weights, era_2_reward_per_round, vec![\n                (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n                (5, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n                (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n            ]),\n        },\n    );\n\n    // Era 1 payouts:\n\n    let rewards_for_era_1 =\n        rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap();\n\n    let validator_1_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // 2 finality signed:\n        + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_1.deref())\n    } * ratio(era_1_reward_per_round);\n    let validator_2_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // 2 finality signed:\n        + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_2.deref())\n    } * ratio(era_1_reward_per_round);\n    let validator_3_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // 2 finality signed:\n        + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_3.deref())\n    } * ratio(era_1_reward_per_round);\n\n    assert_eq!(\n        rewards_for_era_1,\n        map! {\n            VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()],\n            VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()],\n            VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()],\n        }\n    );\n}\n\n#[test]\nfn all_signatures_rewards() {\n    let rng = &mut TestRng::new();\n    let percent_signatures = 40;\n    let percent_finders = 15;\n    let blocks_per_era = 3;\n    let signature_rewards_max_delay = 4;\n    let core_config = core_config(\n        rng,\n        percent_signatures,\n        percent_finders,\n        blocks_per_era,\n        signature_rewards_max_delay,\n    );\n\n    // Eras info:\n\n    let era_1_reward_per_round = 300;\n    let era_2_reward_per_round = 400;\n\n    let weights = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(30_u64),\n    };\n\n    // Simple scenario: each validators sign the block finality directly (no \"lag\"):\n    let constructor = RewardsInfoConstructor::new(\n        &core_config,\n        map! {\n            EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![\n                (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis\n            ]),\n            EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![\n                (1, VALIDATOR_1.deref(), vec![set!{}]), // Nobody signed the genesis finality\n                (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}]),\n                (3, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}]),\n            ]),\n            EraId::new(2) => (weights, era_2_reward_per_round, vec![\n                (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n                (5, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n                (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]),\n            ]),\n        },\n    );\n\n    // Era 1 payouts:\n\n    let rewards_for_era_1 =\n        rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap();\n\n    let validator_1_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // No finality signature collected:\n        + ratio(0) * ratio(core_config.collection_rewards_proportion())\n        // 2 finality signed:\n        + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_1.deref())\n    } * ratio(era_1_reward_per_round);\n    let validator_2_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // All finality signatures collected:\n        + ratio(core_config.collection_rewards_proportion())\n        // 2 finality signed:\n        + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_2.deref())\n    } * ratio(era_1_reward_per_round);\n    let validator_3_expected_payout = {\n        // 1 block produced:\n        ratio(1) * ratio(core_config.production_rewards_proportion())\n        // All finality signatures collected:\n        + ratio(core_config.collection_rewards_proportion())\n        // 2 finality signed:\n        + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_3.deref())\n    } * ratio(era_1_reward_per_round);\n\n    assert_eq!(\n        rewards_for_era_1,\n        map! {\n            VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()],\n            VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()],\n            VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()],\n        }\n    );\n}\n\n#[test]\nfn mixed_signatures_pattern() {\n    let rng = &mut TestRng::new();\n    let percent_signatures = 30;\n    let percent_finders = 27;\n    let blocks_per_era = 4;\n    let signature_rewards_max_delay = 4;\n    let core_config = core_config(\n        rng,\n        percent_signatures,\n        percent_finders,\n        blocks_per_era,\n        signature_rewards_max_delay,\n    );\n\n    let production = ratio(core_config.production_rewards_proportion());\n    let collection = ratio(core_config.collection_rewards_proportion());\n    let contribution = ratio(core_config.contribution_rewards_proportion());\n\n    // Eras info:\n\n    let era_1_reward_per_round = 300;\n    let era_2_reward_per_round = 400;\n\n    let weights_1 = map! {\n        VALIDATOR_1.clone() => U512::from(100_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(30_u64),\n    };\n\n    let weights_2 = map! {\n        VALIDATOR_1.clone() => U512::from(93_u64),\n        VALIDATOR_2.clone() => U512::from(190_u64),\n        VALIDATOR_3.clone() => U512::from(69_u64),\n        VALIDATOR_4.clone() => U512::from(212_u64),\n    };\n\n    // Complex scenario:\n    // - not all validators sign\n    // - in era 2, signatures are reported from era 1\n    let constructor = RewardsInfoConstructor::new(\n        &core_config,\n        map! {\n            EraId::new(0) => (weights_1.clone(), era_1_reward_per_round, vec![\n                (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis\n            ]),\n            EraId::new(1) => (weights_1, era_1_reward_per_round, vec![\n                (1, VALIDATOR_2.deref(), vec![set!{}]), // Nobody signed the genesis finality\n                (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_3.clone()}, set!{}]),\n                (3, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{VALIDATOR_2.clone()}, set!{}]), // the validator 2 signature is fetched later\n                (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone()}, set!{}, set!{}]), // validator 3 doesn't sign the block 3\n            ]),\n            EraId::new(2) => (weights_2, era_2_reward_per_round, vec![\n                (5, VALIDATOR_2.deref(), vec![set!{}, set!{}, set!{}, set!{}]),\n                (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone(),VALIDATOR_4.clone()}, set!{VALIDATOR_1.clone()}, set!{}, set!{}]),\n                (7, VALIDATOR_4.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{VALIDATOR_3.clone(),VALIDATOR_4.clone()}, set!{VALIDATOR_3.clone()}, set!{}]),\n                (8, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone()}, set!{}, set!{}, set!{}]),\n            ]),\n        },\n    );\n\n    // Era 1 payouts:\n    {\n        let era = EraId::new(1);\n        let rewards_for_era_1 =\n            rewards_for_era(constructor.for_era(rng, era), era, &core_config).unwrap();\n\n        let validator_1_expected_payout = {\n            // 2 blocks produced:\n            ratio(2) * production\n            // 6 finality signatures collected:\n            + collection * (\n                ratio(2) * constructor.weight(era, VALIDATOR_1.deref())\n                + ratio(3) * constructor.weight(era, VALIDATOR_2.deref())\n                + ratio(1) * constructor.weight(era, VALIDATOR_3.deref())\n            )\n            // 3 finality signed:\n            + ratio(3) * contribution * constructor.weight(era, VALIDATOR_1.deref())\n        } * ratio(era_1_reward_per_round);\n        let validator_2_expected_payout = {\n            // 2 blocks produced:\n            ratio(2) * production\n            // 2 finality signatures collected:\n            + collection * (\n                ratio(1) * constructor.weight(era, VALIDATOR_1.deref())\n                + ratio(0) * constructor.weight(era, VALIDATOR_2.deref())\n                + ratio(1) * constructor.weight(era, VALIDATOR_3.deref())\n            )\n            // 3 finality signed:\n            + ratio(3) * contribution * constructor.weight(era, VALIDATOR_2.deref())\n        } * ratio(era_1_reward_per_round);\n        let validator_3_expected_payout = {\n            // No block produced:\n            ratio(0) * production\n            // No finality signatures collected:\n            + ratio(0) * collection\n            // 2 finality signed:\n            + ratio(2) * contribution * constructor.weight(era, VALIDATOR_3.deref())\n        } * ratio(era_1_reward_per_round);\n\n        assert_eq!(\n            rewards_for_era_1,\n            map! {\n                VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()],\n                VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()],\n                VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()],\n            }\n        );\n    }\n\n    // Era 2 payouts:\n    {\n        let era = EraId::new(2);\n        let rewards_for_era_2 =\n            rewards_for_era(constructor.for_era(rng, era), era, &core_config).unwrap();\n\n        let validator_1_expected_payout = vec![\n            // 1 block produced:\n            (production * ratio(1) * ratio(era_2_reward_per_round)\n            // 2 finality signatures collected:\n            + collection * {\n                ratio(1) * constructor.weight(era, VALIDATOR_1.deref())\n                + ratio(1) * constructor.weight(era, VALIDATOR_2.deref())\n            } * ratio(era_2_reward_per_round)\n            // Finality signed:\n            + contribution * {\n                // 3 in current era:\n                ratio(3) * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_1.deref())\n            }).to_integer(),\n            // 1 contributed in previous era:\n            (contribution * {\n                ratio(1) * ratio(era_1_reward_per_round) * constructor.weight(1, VALIDATOR_1.deref())\n            }).to_integer()\n        ];\n\n        let validator_2_expected_payout = vec![\n            // 1 block produced:\n            (ratio(1) * production * ratio(era_2_reward_per_round)\n            // No finality signature collected:\n            // 3 finality signed:\n            + ratio(3) * contribution * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_2.deref()))\n            .to_integer()\n        ];\n\n        let validator_3_expected_payout = vec![\n            // 1 block produced:\n            (ratio(1) * production * ratio(era_2_reward_per_round)\n            // 2 finality signatures collected:\n            + collection * {\n                (\n                    ratio(1) * constructor.weight(era, VALIDATOR_1.deref())\n                    + ratio(1) * constructor.weight(era, VALIDATOR_2.deref())\n                    + ratio(1) * constructor.weight(era, VALIDATOR_3.deref())\n                    + ratio(1) * constructor.weight(era, VALIDATOR_4.deref())\n                ) * ratio(era_2_reward_per_round)\n                // collected one signature from era 1\n                + (\n                    ratio(1) * constructor.weight(1, VALIDATOR_1.deref())\n                ) * ratio(era_1_reward_per_round)\n            }\n            // Finality signed:\n            + contribution * {\n                // 3 in current era:\n                ratio(3) * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_3.deref())\n            }).to_integer(),\n            // for era 1\n            (contribution * {\n                // 1 in previous era:\n                ratio(1) * ratio(era_1_reward_per_round) * constructor.weight(1, VALIDATOR_3.deref())\n            }).to_integer()\n        ];\n\n        let validator_4_expected_payout = vec![\n            // 1 block produced:\n            (ratio(1) * production * ratio(era_2_reward_per_round)\n            // 6 finality signatures collected:\n            + collection * {\n                (\n                    ratio(1) * constructor.weight(era, VALIDATOR_1.deref())\n                    + ratio(1) * constructor.weight(era, VALIDATOR_2.deref())\n                    + ratio(2) * constructor.weight(era, VALIDATOR_3.deref())\n                    + ratio(1) * constructor.weight(era, VALIDATOR_4.deref())\n                ) * ratio(era_2_reward_per_round)\n                // collected one signature from era 1\n                + (\n                    ratio(1) * constructor.weight(1, VALIDATOR_3.deref())\n                ) * ratio(era_1_reward_per_round)\n            }\n            // 3 finality signed:\n            + ratio(2) * contribution * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_4.deref()))\n            .to_integer(),\n        ];\n\n        assert_eq!(\n            rewards_for_era_2,\n            map! {\n                VALIDATOR_1.clone() => validator_1_expected_payout,\n                VALIDATOR_2.clone() => validator_2_expected_payout,\n                VALIDATOR_3.clone() => validator_3_expected_payout,\n                VALIDATOR_4.clone() => validator_4_expected_payout,\n            }\n        );\n    }\n}\n\nmod constructors {\n    use casper_types::SingleBlockRewardedSignatures;\n\n    use super::*;\n    use std::collections::BTreeSet;\n\n    type Weights = BTreeMap<PublicKey, U512>;\n    type RewardPerRound = u64;\n    type BlockInfo<'a> = (u64, &'a PublicKey, Vec<BTreeSet<PublicKey>>);\n\n    pub(super) struct RewardsInfoConstructor<'a> {\n        signature_rewards_max_delay: u64,\n        blocks: BTreeMap<EraId, (Weights, RewardPerRound, Vec<BlockInfo<'a>>)>,\n        /// A cache with the validators for each era\n        validators: BTreeMap<EraId, BTreeSet<PublicKey>>,\n    }\n\n    impl<'a> RewardsInfoConstructor<'a> {\n        pub(super) fn new(\n            core_config: &'a CoreConfig,\n            blocks: BTreeMap<EraId, (Weights, RewardPerRound, Vec<BlockInfo<'a>>)>,\n        ) -> Self {\n            let validators = blocks\n                .iter()\n                .map(|(era_id, (weights, _, _))| (*era_id, weights.keys().cloned().collect()))\n                .collect();\n\n            Self {\n                signature_rewards_max_delay: core_config.signature_rewards_max_delay,\n                blocks,\n                validators,\n            }\n        }\n\n        /// Returns the relative weight for a validator.\n        pub(super) fn weight(\n            &self,\n            era_id: impl Into<EraId>,\n            validator: &PublicKey,\n        ) -> Ratio<U512> {\n            let weights = &self.blocks[&era_id.into()].0;\n            let total = weights.values().copied().sum();\n            let weight = weights[validator];\n\n            Ratio::new(weight, total)\n        }\n\n        pub(super) fn for_era(&self, rng: &mut TestRng, era_id: impl Into<EraId>) -> RewardsInfo {\n            let era_id = era_id.into();\n            let number_blocks = {\n                let era_size = self.blocks[&era_id].2.len();\n                self.signature_rewards_max_delay as usize + era_size\n            };\n\n            let cited_blocks: Vec<_> = self\n                .blocks\n                .range(EraId::new(0)..=era_id)\n                .rev()\n                .flat_map(|(era_id, (_, _, blocks))| {\n                    let switch_height = blocks.iter().map(|b| b.0).max().unwrap();\n                    // Blocks are being read in reverse, era by era, so that we can build only the\n                    // latest needed:\n                    blocks.clone().into_iter().rev().map(\n                        move |(height, proposer, rewarded_signatures)| {\n                            let rewarded_signatures = RewardedSignatures::new(\n                                rewarded_signatures.into_iter().enumerate().map(\n                                    |(height_offset, signing_validators)| {\n                                        let height =\n                                            height.saturating_sub(height_offset as u64 + 1);\n                                        let era_id = self\n                                            .blocks\n                                            .iter()\n                                            .find_map(|(era_id, (_, _, blocks))| {\n                                                blocks\n                                                    .iter()\n                                                    .find(|(h, _, _)| h == &height)\n                                                    .map(|_| era_id)\n                                            })\n                                            .unwrap_or_else(|| {\n                                                panic!(\"height {} must be provided\", height)\n                                            });\n                                        let era_validators = self\n                                            .validators\n                                            .get(era_id)\n                                            .expect(\"the info for the era to be provided\");\n                                        SingleBlockRewardedSignatures::from_validator_set(\n                                            &signing_validators,\n                                            era_validators,\n                                        )\n                                    },\n                                ),\n                            );\n                            TestBlockBuilder::new()\n                                .height(height)\n                                .era(*era_id)\n                                .proposer(proposer.clone())\n                                .rewarded_signatures(rewarded_signatures)\n                                .switch_block(height == switch_height)\n                        },\n                    )\n                })\n                .map(move |block_builder| CitedBlock::from(Block::from(block_builder.build(rng))))\n                .take(number_blocks)\n                .collect();\n            let cited_blocks: Vec<_> = cited_blocks.into_iter().rev().collect();\n\n            let first_block = cited_blocks.first().expect(\"at least one cited block\");\n            assert!(\n                cited_blocks.len() >= number_blocks || first_block.is_genesis,\n                \"Not enough blocks provided\"\n            );\n\n            let eras_info = self\n                .blocks\n                .range(first_block.era_id..=era_id)\n                .map(|(era_id, (weights, reward_per_round, _))| {\n                    (\n                        *era_id,\n                        EraInfo::new_testing(weights.clone(), ratio(*reward_per_round)),\n                    )\n                })\n                .collect();\n\n            RewardsInfo::new_testing(eras_info, cited_blocks)\n        }\n    }\n}\n\nmod convert {\n    use super::*;\n    use std::convert::TryFrom;\n\n    pub(super) fn ratio(n: impl IntoRatioU512) -> Ratio<U512> {\n        n.into()\n    }\n\n    pub(super) trait IntoRatioU512 {\n        fn into(self) -> Ratio<U512>;\n    }\n\n    impl IntoRatioU512 for u64 {\n        fn into(self) -> Ratio<U512> {\n            Ratio::new(U512::from(self), U512::one())\n        }\n    }\n\n    impl IntoRatioU512 for usize {\n        fn into(self) -> Ratio<U512> {\n            Ratio::new(U512::from(self), U512::one())\n        }\n    }\n\n    impl IntoRatioU512 for U512 {\n        fn into(self) -> Ratio<U512> {\n            Ratio::new(self, U512::one())\n        }\n    }\n\n    impl IntoRatioU512 for i32 {\n        fn into(self) -> Ratio<U512> {\n            Ratio::new(U512::from(u32::try_from(self).unwrap()), U512::one())\n        }\n    }\n\n    impl IntoRatioU512 for Ratio<u64> {\n        fn into(self) -> Ratio<U512> {\n            Ratio::new(U512::from(*self.numer()), U512::from(*self.denom()))\n        }\n    }\n\n    impl IntoRatioU512 for Ratio<U512> {\n        fn into(self) -> Ratio<U512> {\n            self\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/rewards.rs",
    "content": "#[cfg(test)]\nmod tests;\n\nuse std::{collections::BTreeMap, ops::Range, sync::Arc};\n\nuse casper_storage::{\n    data_access_layer::{\n        DataAccessLayer, EraValidatorsRequest, RoundSeigniorageRateRequest,\n        RoundSeigniorageRateResult, TotalSupplyRequest, TotalSupplyResult,\n    },\n    global_state::state::{lmdb::LmdbGlobalState, StateProvider},\n};\nuse futures::stream::{self, StreamExt as _, TryStreamExt as _};\n\nuse itertools::Itertools;\nuse num_rational::Ratio;\nuse num_traits::{CheckedAdd, CheckedMul, ToPrimitive};\nuse thiserror::Error;\nuse tracing::trace;\n\nuse crate::{\n    contract_runtime::metrics::Metrics,\n    effect::{\n        requests::{ContractRuntimeRequest, StorageRequest},\n        EffectBuilder,\n    },\n    types::ExecutableBlock,\n};\nuse casper_types::{\n    Block, Chainspec, CoreConfig, Digest, EraId, ProtocolVersion, PublicKey, RewardedSignatures,\n    U512,\n};\n\npub(crate) trait ReactorEventT:\n    Send + From<StorageRequest> + From<ContractRuntimeRequest>\n{\n}\n\nimpl<T> ReactorEventT for T where T: Send + From<StorageRequest> + From<ContractRuntimeRequest> {}\n\n#[derive(Debug)]\npub(crate) struct CitedBlock {\n    protocol_version: ProtocolVersion,\n    height: u64,\n    era_id: EraId,\n    proposer: PublicKey,\n    rewarded_signatures: RewardedSignatures,\n    state_root_hash: Digest,\n    is_switch_block: bool,\n    is_genesis: bool,\n}\n\nimpl CitedBlock {\n    fn from_executable_block(block: ExecutableBlock, protocol_version: ProtocolVersion) -> Self {\n        Self {\n            protocol_version,\n            era_id: block.era_id,\n            height: block.height,\n            proposer: *block.proposer,\n            rewarded_signatures: block.rewarded_signatures,\n            state_root_hash: Digest::default(),\n            is_switch_block: block.era_report.is_some(),\n            is_genesis: block.era_id.is_genesis(),\n        }\n    }\n}\n\n#[derive(Debug)]\npub(crate) struct RewardsInfo {\n    eras_info: BTreeMap<EraId, EraInfo>,\n    cited_blocks: Vec<CitedBlock>,\n    cited_block_height_start: u64,\n}\n\n/// The era information needed in the rewards computation:\n#[derive(Debug, Clone)]\npub(crate) struct EraInfo {\n    weights: BTreeMap<PublicKey, U512>,\n    total_weights: U512,\n    reward_per_round: Ratio<U512>,\n}\n\n#[derive(Error, Debug)]\npub enum RewardsError {\n    /// We got a block height which is not in the era range it should be in (should not happen).\n    #[error(\"block height {0} is not in the era range\")]\n    HeightNotInEraRange(u64),\n    /// The era is not in the range we have (should not happen).\n    #[error(\"era {0} is not in the era range\")]\n    EraIdNotInEraRange(EraId),\n    /// The validator public key is not in the era it should be in (should not happen).\n    #[error(\"validator key {0:?} is not in the era\")]\n    ValidatorKeyNotInEra(Box<PublicKey>),\n    /// We didn't have a required switch block.\n    #[error(\"missing switch block for era {0}\")]\n    MissingSwitchBlock(EraId),\n    /// We got an overflow while computing something.\n    #[error(\"arithmetic overflow\")]\n    ArithmeticOverflow,\n    #[error(\"failed to fetch block with height {0}\")]\n    FailedToFetchBlockWithHeight(u64),\n    #[error(\"failed to fetch era {0}\")]\n    FailedToFetchEra(String),\n    /// Fetching the era validators succedeed, but no info is present (should not happen).\n    /// The `Digest` is the one that was queried.\n    #[error(\"failed to fetch era validators for {0}\")]\n    FailedToFetchEraValidators(Digest),\n    #[error(\"failed to fetch total supply\")]\n    FailedToFetchTotalSupply,\n    #[error(\"failed to fetch seigniorage rate\")]\n    FailedToFetchSeigniorageRate,\n}\n\nimpl RewardsInfo {\n    pub async fn new<REv: ReactorEventT>(\n        effect_builder: EffectBuilder<REv>,\n        data_access_layer: Arc<DataAccessLayer<LmdbGlobalState>>,\n        protocol_version: ProtocolVersion,\n        activation_era_id: EraId,\n        maybe_upgraded_validators: Option<&BTreeMap<PublicKey, U512>>,\n        signature_rewards_max_delay: u64,\n        executable_block: ExecutableBlock,\n    ) -> Result<Self, RewardsError> {\n        let current_era_id = executable_block.era_id;\n        // All the blocks that may appear as a signed block. They are collected upfront, so that we\n        // don't have to worry about doing it one by one later.\n        //\n        // They are sorted from the oldest to the newest:\n\n        let cited_block_height_start = {\n            let previous_era_id = current_era_id.saturating_sub(1);\n            let previous_era_switch_block_header = effect_builder\n                .get_switch_block_header_by_era_id_from_storage(previous_era_id)\n                .await\n                .ok_or(RewardsError::MissingSwitchBlock(previous_era_id))?;\n\n            if previous_era_id.is_genesis() || previous_era_id == activation_era_id {\n                // We do not attempt to reward blocks from before an upgrade!\n                previous_era_switch_block_header.height()\n            } else {\n                // Here we do not substract 1, because we want one block more:\n                previous_era_switch_block_header\n                    .height()\n                    .saturating_sub(signature_rewards_max_delay)\n            }\n        };\n\n        // We need just one block from before the upgrade to determine the validators in\n        // the following era.\n        let range_to_fetch = cited_block_height_start.saturating_sub(1)..executable_block.height;\n        let mut cited_blocks =\n            collect_past_blocks_batched(effect_builder, range_to_fetch.clone()).await?;\n\n        tracing::info!(\n            current_era_id = %current_era_id.value(),\n            range_requested = ?range_to_fetch,\n            num_fetched_blocks = %cited_blocks.len(),\n            \"blocks fetched\",\n        );\n\n        let eras_info = Self::create_eras_info(\n            data_access_layer,\n            activation_era_id,\n            current_era_id,\n            maybe_upgraded_validators,\n            cited_blocks.iter(),\n        )?;\n\n        cited_blocks.push(CitedBlock::from_executable_block(\n            executable_block,\n            protocol_version,\n        ));\n\n        Ok(RewardsInfo {\n            eras_info,\n            cited_blocks,\n            cited_block_height_start,\n        })\n    }\n\n    #[cfg(test)]\n    pub fn new_testing(eras_info: BTreeMap<EraId, EraInfo>, cited_blocks: Vec<CitedBlock>) -> Self {\n        let cited_block_height_start = cited_blocks.first().map(|block| block.height).unwrap_or(0);\n        Self {\n            eras_info,\n            cited_blocks,\n            cited_block_height_start,\n        }\n    }\n\n    /// `block_hashs` is an iterator over the era ID to get the information about + the block\n    /// hash to query to have such information (which may not be from the same era).\n    fn create_eras_info<'a>(\n        data_access_layer: Arc<DataAccessLayer<LmdbGlobalState>>,\n        activation_era_id: EraId,\n        current_era_id: EraId,\n        maybe_upgraded_validators: Option<&BTreeMap<PublicKey, U512>>,\n        mut cited_blocks: impl Iterator<Item = &'a CitedBlock>,\n    ) -> Result<BTreeMap<EraId, EraInfo>, RewardsError> {\n        let oldest_block = cited_blocks.next();\n\n        // If the oldest block is genesis, we add the validator information for genesis (era 0) from\n        // era 1, because it's the same:\n        let oldest_block_is_genesis = oldest_block.is_some_and(|block| block.is_genesis);\n\n        // Here, we gather a list of all of the era ID we need to fetch to calculate the rewards,\n        // as well as the state root hash allowing to query this information.\n        //\n        // To get all of the needed era IDs, we take the very first block, then every switch block\n        // We take the first block, because we need it for the first cited era, then every switch\n        // block for every subsequent eras.\n        // If the first block is itself a switch block, that's fine, because we fetch one block more\n        // in the first place to handle this case.\n        let eras_and_state_root_hashes: Vec<_> = oldest_block\n            .into_iter()\n            .chain(cited_blocks.filter(|&block| block.is_switch_block))\n            .map(|block| {\n                let state_root_hash = block.state_root_hash;\n                let protocol_version = block.protocol_version;\n                let era = if block.is_switch_block {\n                    block.era_id.successor()\n                } else {\n                    block.era_id\n                };\n                (era, protocol_version, state_root_hash)\n            })\n            .collect();\n\n        let num_eras_to_fetch =\n            eras_and_state_root_hashes.len() + usize::from(oldest_block_is_genesis);\n\n        let data_access_layer = &data_access_layer;\n\n        let mut eras_info: BTreeMap<_, _> = eras_and_state_root_hashes\n            .into_iter()\n            .map(|(era_id, protocol_version, state_root_hash)| {\n                let weights = if let (true, Some(upgraded_validators)) =\n                    (era_id == activation_era_id, maybe_upgraded_validators)\n                {\n                    upgraded_validators.clone()\n                } else {\n                    let request = EraValidatorsRequest::new(state_root_hash);\n                    let era_validators_result = data_access_layer.era_validators(request);\n                    let msg = format!(\"{}\", era_validators_result);\n                    era_validators_result\n                        .take_era_validators()\n                        .ok_or(msg)\n                        .map_err(RewardsError::FailedToFetchEra)?\n                        // We consume the map to not clone the value:\n                        .into_iter()\n                        .find(|(key, _)| key == &era_id)\n                        .ok_or(RewardsError::FailedToFetchEraValidators(state_root_hash))?\n                        .1\n                };\n\n                let total_supply_request =\n                    TotalSupplyRequest::new(state_root_hash, protocol_version);\n                let total_supply = match data_access_layer.total_supply(total_supply_request) {\n                    TotalSupplyResult::RootNotFound\n                    | TotalSupplyResult::MintNotFound\n                    | TotalSupplyResult::ValueNotFound(_)\n                    | TotalSupplyResult::Failure(_) => {\n                        return Err(RewardsError::FailedToFetchTotalSupply)\n                    }\n                    TotalSupplyResult::Success { total_supply } => total_supply,\n                };\n\n                let seigniorage_rate_request =\n                    RoundSeigniorageRateRequest::new(state_root_hash, protocol_version);\n                let seigniorage_rate =\n                    match data_access_layer.round_seigniorage_rate(seigniorage_rate_request) {\n                        RoundSeigniorageRateResult::RootNotFound\n                        | RoundSeigniorageRateResult::MintNotFound\n                        | RoundSeigniorageRateResult::ValueNotFound(_)\n                        | RoundSeigniorageRateResult::Failure(_) => {\n                            return Err(RewardsError::FailedToFetchSeigniorageRate);\n                        }\n                        RoundSeigniorageRateResult::Success { rate } => rate,\n                    };\n\n                let reward_per_round = seigniorage_rate * total_supply;\n                let total_weights = weights.values().copied().sum();\n\n                Ok::<_, RewardsError>((\n                    era_id,\n                    EraInfo {\n                        weights,\n                        total_weights,\n                        reward_per_round,\n                    },\n                ))\n            })\n            .try_collect()?;\n\n        // We cannot get the genesis info from a root hash, so we copy it from era 1 when needed.\n        if oldest_block_is_genesis {\n            let era_1 = EraId::from(1);\n            let era_1_info = eras_info\n                .get(&era_1)\n                .ok_or(RewardsError::EraIdNotInEraRange(era_1))?;\n            eras_info.insert(EraId::from(0), era_1_info.clone());\n        }\n\n        {\n            let era_ids: Vec<_> = eras_info.keys().map(|id| id.value()).collect();\n            tracing::info!(\n                current_era_id = %current_era_id.value(),\n                %num_eras_to_fetch,\n                eras_fetched = ?era_ids,\n            );\n        }\n\n        Ok(eras_info)\n    }\n\n    /// Returns the validators from a given era.\n    pub fn validator_keys(\n        &self,\n        era_id: EraId,\n    ) -> Result<impl Iterator<Item = PublicKey> + '_, RewardsError> {\n        let keys = self\n            .eras_info\n            .get(&era_id)\n            .ok_or(RewardsError::EraIdNotInEraRange(era_id))?\n            .weights\n            .keys()\n            .cloned();\n\n        Ok(keys)\n    }\n\n    /// Returns the total potential reward per block.\n    /// Since it is per block, we do not care about the expected number of blocks per era.\n    pub fn reward(&self, era_id: EraId) -> Result<Ratio<U512>, RewardsError> {\n        Ok(self\n            .eras_info\n            .get(&era_id)\n            .ok_or(RewardsError::EraIdNotInEraRange(era_id))?\n            .reward_per_round)\n    }\n\n    /// Returns the weight ratio for a given validator for a given era.\n    pub fn weight_ratio(\n        &self,\n        era_id: EraId,\n        validator: &PublicKey,\n    ) -> Result<Ratio<U512>, RewardsError> {\n        let era = self\n            .eras_info\n            .get(&era_id)\n            .ok_or(RewardsError::EraIdNotInEraRange(era_id))?;\n        let weight = era\n            .weights\n            .get(validator)\n            .ok_or_else(|| RewardsError::ValidatorKeyNotInEra(Box::new(validator.clone())))?;\n\n        Ok(Ratio::new(*weight, era.total_weights))\n    }\n\n    /// Returns the era in which is the given block height.\n    pub fn era_for_block_height(&self, height: u64) -> Result<EraId, RewardsError> {\n        self.cited_blocks\n            .iter()\n            .find_map(|block| (block.height == height).then_some(block.era_id))\n            .ok_or(RewardsError::HeightNotInEraRange(height))\n    }\n\n    /// Returns all the blocks belonging to an era.\n    pub fn blocks_from_era(&self, era_id: EraId) -> impl Iterator<Item = &CitedBlock> {\n        self.cited_blocks\n            .iter()\n            .filter(move |block| block.era_id == era_id)\n    }\n}\n\nimpl EraInfo {\n    #[cfg(test)]\n    pub fn new_testing(weights: BTreeMap<PublicKey, U512>, reward_per_round: Ratio<U512>) -> Self {\n        let total_weights = weights.values().copied().sum();\n        Self {\n            weights,\n            total_weights,\n            reward_per_round,\n        }\n    }\n}\n\n/// First create the `RewardsInfo` structure, then compute the rewards.\n/// It is done in 2 steps so that it is easier to unit test the rewards calculation.\npub(crate) async fn fetch_data_and_calculate_rewards_for_era<REv: ReactorEventT>(\n    effect_builder: EffectBuilder<REv>,\n    data_access_layer: Arc<DataAccessLayer<LmdbGlobalState>>,\n    chainspec: &Chainspec,\n    metrics: &Arc<Metrics>,\n    executable_block: ExecutableBlock,\n) -> Result<BTreeMap<PublicKey, Vec<U512>>, RewardsError> {\n    let current_era_id = executable_block.era_id;\n    tracing::info!(\n        current_era_id = %current_era_id.value(),\n        \"starting the rewards calculation\"\n    );\n\n    if current_era_id.is_genesis()\n        || current_era_id == chainspec.protocol_config.activation_point.era_id()\n    {\n        // Special case: genesis block and immediate switch blocks do not yield any reward, because\n        // there is no block producer, and no signatures from previous blocks to be rewarded:\n        Ok(BTreeMap::new())\n    } else {\n        let rewards_info = RewardsInfo::new(\n            effect_builder,\n            data_access_layer,\n            chainspec.protocol_version(),\n            chainspec.protocol_config.activation_point.era_id(),\n            chainspec\n                .protocol_config\n                .global_state_update\n                .as_ref()\n                .and_then(|gsu| gsu.validators.as_ref()),\n            chainspec.core_config.signature_rewards_max_delay,\n            executable_block,\n        )\n        .await?;\n\n        let cited_blocks_count_current_era = rewards_info.blocks_from_era(current_era_id).count();\n\n        let reward_per_round_current_era = rewards_info\n            .eras_info\n            .get(&current_era_id)\n            .expect(\"expected EraInfo\")\n            .reward_per_round;\n\n        let rewards = rewards_for_era(rewards_info, current_era_id, &chainspec.core_config);\n\n        // Calculate and push reward metric(s)\n        if let Ok(rewards_map) = &rewards {\n            let expected_total_seigniorage = reward_per_round_current_era\n                .to_integer()\n                .saturating_mul(U512::from(cited_blocks_count_current_era as u64));\n            let actual_total_seigniorage =\n                rewards_map\n                    .iter()\n                    .fold(U512::zero(), |acc, (_, rewards_vec)| {\n                        let current_era_reward = rewards_vec\n                            .first()\n                            .expect(\"expected current era reward amount\");\n                        acc.saturating_add(*current_era_reward)\n                    });\n            let seigniorage_target_fraction = Ratio::new(\n                actual_total_seigniorage.low_u128(),\n                expected_total_seigniorage.low_u128(),\n            );\n            let gauge_value = match Ratio::to_f64(&seigniorage_target_fraction) {\n                Some(v) => v,\n                None => f64::NAN,\n            };\n            metrics.seigniorage_target_fraction.set(gauge_value)\n        }\n\n        rewards\n    }\n}\n\npub(crate) fn rewards_for_era(\n    rewards_info: RewardsInfo,\n    current_era_id: EraId,\n    core_config: &CoreConfig,\n) -> Result<BTreeMap<PublicKey, Vec<U512>>, RewardsError> {\n    fn to_ratio_u512(ratio: Ratio<u64>) -> Ratio<U512> {\n        Ratio::new(U512::from(*ratio.numer()), U512::from(*ratio.denom()))\n    }\n\n    let ratio_u512_zero = Ratio::new(U512::zero(), U512::one());\n    let zero_for_current_era = {\n        let mut map = BTreeMap::new();\n        map.insert(current_era_id, ratio_u512_zero);\n        map\n    };\n    let mut full_reward_for_validators: BTreeMap<_, _> = rewards_info\n        .validator_keys(current_era_id)?\n        .map(|key| (key, zero_for_current_era.clone()))\n        .collect();\n\n    let mut increase_value_for_key_and_era =\n        |key: PublicKey, era: EraId, value: Ratio<U512>| -> Result<(), RewardsError> {\n            match full_reward_for_validators.entry(key) {\n                std::collections::btree_map::Entry::Vacant(entry) => {\n                    let mut map = BTreeMap::new();\n                    map.insert(era, value);\n                    entry.insert(map);\n                }\n                std::collections::btree_map::Entry::Occupied(mut entry) => {\n                    let old_value = entry.get().get(&era).unwrap_or(&ratio_u512_zero);\n                    let new_value = old_value\n                        .checked_add(&value)\n                        .ok_or(RewardsError::ArithmeticOverflow)?;\n                    entry.get_mut().insert(era, new_value);\n                }\n            }\n\n            Ok(())\n        };\n\n    // Rules out a special case: genesis block does not yield any reward,\n    // because there is no block producer, and no previous blocks whose\n    // signatures are to be rewarded:\n    debug_assert!(\n        current_era_id.is_genesis() == false,\n        \"the genesis block should be handled as a special case\"\n    );\n\n    let collection_proportion = to_ratio_u512(core_config.collection_rewards_proportion());\n    let contribution_proportion = to_ratio_u512(core_config.contribution_rewards_proportion());\n\n    // Reward for producing a block from this era:\n    let production_reward = to_ratio_u512(core_config.production_rewards_proportion())\n        .checked_mul(&rewards_info.reward(current_era_id)?)\n        .ok_or(RewardsError::ArithmeticOverflow)?;\n\n    // Collect all rewards as a ratio:\n    for block in rewards_info.blocks_from_era(current_era_id) {\n        // Transfer the block production reward for this block proposer:\n        trace!(\n            proposer=?block.proposer,\n            amount=%production_reward.to_integer(),\n            block=%block.height,\n            \"proposer reward\"\n        );\n        increase_value_for_key_and_era(block.proposer.clone(), current_era_id, production_reward)?;\n\n        // Now, let's compute the reward attached to each signed block reported by the block\n        // we examine:\n        for (signature_rewards, signed_block_height) in block\n            .rewarded_signatures\n            .iter()\n            .zip((rewards_info.cited_block_height_start..block.height).rev())\n        {\n            let signed_block_era = rewards_info.era_for_block_height(signed_block_height)?;\n            let validators_providing_signature =\n                signature_rewards.to_validator_set(rewards_info.validator_keys(signed_block_era)?);\n\n            for signing_validator in validators_providing_signature {\n                // Reward for contributing to the finality signature, ie signing this block:\n                let contribution_reward = rewards_info\n                    .weight_ratio(signed_block_era, &signing_validator)?\n                    .checked_mul(&contribution_proportion)\n                    .ok_or(RewardsError::ArithmeticOverflow)?\n                    .checked_mul(&rewards_info.reward(signed_block_era)?)\n                    .ok_or(RewardsError::ArithmeticOverflow)?;\n                // Reward for gathering this signature. It is both weighted by the block\n                // producing/signature collecting validator, and the signing validator:\n                let collection_reward = rewards_info\n                    .weight_ratio(signed_block_era, &signing_validator)?\n                    .checked_mul(&collection_proportion)\n                    .ok_or(RewardsError::ArithmeticOverflow)?\n                    .checked_mul(&rewards_info.reward(signed_block_era)?)\n                    .ok_or(RewardsError::ArithmeticOverflow)?;\n\n                trace!(\n                    signer=?signing_validator,\n                    amount=%contribution_reward.to_integer(),\n                    block=%block.height,\n                    signed_block=%signed_block_height,\n                    \"signature contribution reward\"\n                );\n                trace!(\n                    collector=?block.proposer,\n                    signer=?signing_validator,\n                    amount=%collection_reward.to_integer(),\n                    block=%block.height,\n                    signed_block=%signed_block_height,\n                    \"signature collection reward\"\n                );\n                increase_value_for_key_and_era(\n                    signing_validator,\n                    signed_block_era,\n                    contribution_reward,\n                )?;\n                increase_value_for_key_and_era(\n                    block.proposer.clone(),\n                    current_era_id,\n                    collection_reward,\n                )?;\n            }\n        }\n    }\n\n    let rewards_map_to_vec = |rewards_map: BTreeMap<EraId, Ratio<U512>>| {\n        let min_era = rewards_map\n            .iter()\n            .find(|(_era, &amount)| !amount.numer().is_zero())\n            .map(|(era, _amount)| era)\n            .copied()\n            .unwrap_or(current_era_id);\n        EraId::iter_range_inclusive(min_era, current_era_id)\n            .rev()\n            .map(|era_id| {\n                rewards_map\n                    .get(&era_id)\n                    .copied()\n                    .unwrap_or(ratio_u512_zero)\n                    .to_integer()\n            })\n            .collect()\n    };\n\n    // Return the rewards as plain U512:\n    Ok(full_reward_for_validators\n        .into_iter()\n        .map(|(key, amounts)| (key, rewards_map_to_vec(amounts)))\n        .collect())\n}\n\n/// Query all the blocks from the given range with a batch mechanism.\nasync fn collect_past_blocks_batched<REv: From<StorageRequest>>(\n    effect_builder: EffectBuilder<REv>,\n    era_height_span: Range<u64>,\n) -> Result<Vec<CitedBlock>, RewardsError> {\n    const STEP: usize = 100;\n    let only_from_available_block_range = false;\n\n    let batches = {\n        let range_end = era_height_span.end;\n\n        era_height_span\n            .step_by(STEP)\n            .map(move |internal_start| internal_start..range_end.min(internal_start + STEP as u64))\n    };\n\n    stream::iter(batches)\n        .then(|range| async move {\n            stream::iter(\n                effect_builder\n                    .collect_past_blocks_with_metadata(\n                        range.clone(),\n                        only_from_available_block_range,\n                    )\n                    .await\n                    .into_iter()\n                    .zip(range)\n                    .map(|(maybe_block_with_metadata, height)| {\n                        maybe_block_with_metadata\n                            .ok_or(RewardsError::FailedToFetchBlockWithHeight(height))\n                            .map(|b| CitedBlock::from(b.block))\n                    }),\n            )\n        })\n        .flatten()\n        .try_collect()\n        .await\n}\n\nimpl From<Block> for CitedBlock {\n    fn from(block: Block) -> Self {\n        Self {\n            protocol_version: block.protocol_version(),\n            era_id: block.era_id(),\n            height: block.height(),\n            proposer: block.proposer().clone(),\n            rewarded_signatures: block.rewarded_signatures().clone(),\n            state_root_hash: *block.state_root_hash(),\n            is_switch_block: block.is_switch_block(),\n            is_genesis: block.is_genesis(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/tests.rs",
    "content": "use std::{collections::BTreeMap, iter, path::PathBuf, sync::Arc, time::Duration};\n\nuse casper_storage::data_access_layer::{QueryRequest, QueryResult};\nuse derive_more::{Display, From};\nuse fs_extra::dir;\nuse prometheus::Registry;\nuse rand::RngCore;\nuse serde::Serialize;\nuse tempfile::TempDir;\n\nuse casper_types::{\n    bytesrepr::Bytes, contracts::ProtocolVersionMajor, runtime_args, BlockHash, Chainspec,\n    ChainspecRawBytes, Deploy, Digest, EntityVersion, EraId, ExecutableDeployItem, PackageHash,\n    PricingMode, PublicKey, RuntimeArgs, SecretKey, TimeDiff, Timestamp, Transaction,\n    TransactionConfig, TransactionRuntimeParams, MINT_LANE_ID, U512,\n};\n\nuse super::*;\nuse crate::{\n    components::{\n        network::Identity as NetworkIdentity,\n        storage::{self, Storage},\n    },\n    effect::announcements::{ContractRuntimeAnnouncement, ControlAnnouncement, FatalAnnouncement},\n    protocol::Message,\n    reactor::{self, EventQueueHandle, ReactorEvent, Runner},\n    testing::{self, network::NetworkedReactor, ConditionCheckReactor},\n    types::{\n        transaction::{\n            calculate_transaction_lane_for_transaction,\n            transaction_v1_builder::TransactionV1Builder,\n        },\n        BlockPayload, ExecutableBlock, FinalizedBlock, InternalEraReport, MetaBlockState,\n    },\n    utils::{Loadable, WithDir, RESOURCES_PATH},\n    NodeRng,\n};\n\nconst FIXTURES_DIRECTORY: &str = \"../execution_engine_testing/tests/fixtures\";\nfn path_to_lmdb_fixtures() -> PathBuf {\n    Path::new(env!(\"CARGO_MANIFEST_DIR\")).join(FIXTURES_DIRECTORY)\n}\n\nconst RECENT_ERA_COUNT: u64 = 5;\nconst MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400);\nconst TEST_TIMEOUT: Duration = Duration::from_secs(10);\n\n/// Top-level event for the reactor.\n#[derive(Debug, From, Serialize, Display)]\n#[must_use]\nenum Event {\n    #[from]\n    ContractRuntime(super::Event),\n    #[from]\n    ContractRuntimeRequest(ContractRuntimeRequest),\n    #[from]\n    ContractRuntimeAnnouncement(ContractRuntimeAnnouncement),\n    #[from]\n    Storage(storage::Event),\n    #[from]\n    StorageRequest(StorageRequest),\n    #[from]\n    MetaBlockAnnouncement(MetaBlockAnnouncement),\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        false\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        None\n    }\n}\n\ntrait Unhandled {}\n\nimpl<T: Unhandled> From<T> for Event {\n    fn from(_: T) -> Self {\n        unimplemented!(\"not handled in contract runtime tests\")\n    }\n}\n\nimpl Unhandled for ControlAnnouncement {}\n\nimpl Unhandled for FatalAnnouncement {}\n\nimpl Unhandled for NetworkRequest<Message> {}\n\nimpl Unhandled for UnexecutedBlockAnnouncement {}\n\nstruct TestConfig {\n    config: Config,\n    fixture_name: Option<String>,\n}\n\nstruct Reactor {\n    storage: Storage,\n    contract_runtime: ContractRuntime,\n    _storage_tempdir: TempDir,\n}\n\nimpl reactor::Reactor for Reactor {\n    type Event = Event;\n    type Config = TestConfig;\n    type Error = ConfigError;\n\n    fn new(\n        config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        _network_identity: NetworkIdentity,\n        registry: &Registry,\n        _event_queue: EventQueueHandle<Self::Event>,\n        _rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1);\n        if let Some(fixture_name) = config.fixture_name {\n            let source = path_to_lmdb_fixtures().join(&fixture_name);\n            fs_extra::copy_items(&[source], &storage_tempdir, &dir::CopyOptions::default())\n                .expect(\"should copy global state fixture\");\n        }\n\n        let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config);\n        let storage = Storage::new(\n            &storage_withdir,\n            None,\n            chainspec.protocol_version(),\n            EraId::default(),\n            \"test\",\n            MAX_TTL.into(),\n            RECENT_ERA_COUNT,\n            Some(registry),\n            false,\n            TransactionConfig::default(),\n        )\n        .unwrap();\n\n        let contract_runtime =\n            ContractRuntime::new(storage.root_path(), &config.config, chainspec, registry)?;\n\n        let reactor = Reactor {\n            storage,\n            contract_runtime,\n            _storage_tempdir: storage_tempdir,\n        };\n\n        Ok((reactor, Effects::new()))\n    }\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Self::Event> {\n        trace!(?event);\n        match event {\n            Event::ContractRuntime(event) => reactor::wrap_effects(\n                Event::ContractRuntime,\n                self.contract_runtime\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::ContractRuntimeRequest(req) => reactor::wrap_effects(\n                Event::ContractRuntime,\n                self.contract_runtime\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n            Event::ContractRuntimeAnnouncement(announcement) => {\n                info!(\"{announcement}\");\n                Effects::new()\n            }\n            Event::Storage(event) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, event),\n            ),\n            Event::StorageRequest(req) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, req.into()),\n            ),\n            Event::MetaBlockAnnouncement(announcement) => {\n                info!(\"{announcement}\");\n                Effects::new()\n            }\n        }\n    }\n}\n\nimpl NetworkedReactor for Reactor {}\n\n/// Schedule the given block and its deploys to be executed by the contract runtime.\nfn execute_block(\n    executable_block: ExecutableBlock,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    |effect_builder| {\n        effect_builder\n            .enqueue_block_for_execution(executable_block, MetaBlockState::new())\n            .ignore()\n    }\n}\n\n/// A function to be used a condition check, indicating that execution has started.\nfn execution_started(event: &Event) -> bool {\n    matches!(\n        event,\n        Event::ContractRuntimeRequest(ContractRuntimeRequest::EnqueueBlockForExecution { .. })\n    )\n}\n\n/// A function to be used a condition check, indicating that execution has completed.\nfn execution_completed(event: &Event) -> bool {\n    matches!(event, Event::MetaBlockAnnouncement(_))\n}\n\n#[tokio::test]\nasync fn should_not_set_shared_pre_state_to_lower_block_height() {\n    testing::init_logging();\n\n    let config = Config {\n        max_global_state_size: Some(100 * 1024 * 1024),\n        ..Config::default()\n    };\n    let config = TestConfig {\n        config,\n        fixture_name: None,\n    };\n    let (chainspec, chainspec_raw_bytes) =\n        <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let chainspec = Arc::new(chainspec);\n    let chainspec_raw_bytes = Arc::new(chainspec_raw_bytes);\n\n    let mut rng = crate::new_rng();\n    let rng = &mut rng;\n\n    let mut runner: Runner<ConditionCheckReactor<Reactor>> = Runner::new(\n        config,\n        Arc::clone(&chainspec),\n        Arc::clone(&chainspec_raw_bytes),\n        rng,\n    )\n    .await\n    .unwrap();\n\n    // Commit genesis to set up initial global state.\n    let post_commit_genesis_state_hash = runner\n        .reactor()\n        .inner()\n        .contract_runtime\n        .commit_genesis(chainspec.as_ref(), chainspec_raw_bytes.as_ref())\n        .as_legacy()\n        .unwrap()\n        .0;\n\n    let initial_pre_state = ExecutionPreState::new(\n        0,\n        post_commit_genesis_state_hash,\n        BlockHash::default(),\n        Digest::default(),\n    );\n    runner\n        .reactor_mut()\n        .inner_mut()\n        .contract_runtime\n        .set_initial_state(initial_pre_state);\n\n    // Create the genesis immediate switch block.\n    let block_0 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            BlockPayload::default(),\n            Some(InternalEraReport::default()),\n            Timestamp::now(),\n            EraId::new(0),\n            0,\n            PublicKey::System,\n        ),\n        vec![],\n    );\n\n    runner\n        .process_injected_effects(execute_block(block_0))\n        .await;\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    // Create the first block of era 1.\n    let block_1 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            BlockPayload::default(),\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            1,\n            PublicKey::System,\n        ),\n        vec![],\n    );\n    runner\n        .process_injected_effects(execute_block(block_1))\n        .await;\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    // Check that the next block height expected by the contract runtime is 2.\n    assert_eq!(\n        runner\n            .reactor()\n            .inner()\n            .contract_runtime\n            .execution_pre_state\n            .lock()\n            .unwrap()\n            .next_block_height(),\n        2\n    );\n\n    // Prepare to create a block which will take a while to execute, i.e. loaded with many deploys\n    // transferring from node-1's main account to new random public keys.\n    let node_1_secret_key = SecretKey::from_file(\n        RESOURCES_PATH\n            .join(\"local\")\n            .join(\"secret_keys\")\n            .join(\"node-1.pem\"),\n    )\n    .unwrap();\n    let timestamp = Timestamp::now();\n    let ttl = TimeDiff::from_seconds(100);\n    let gas_price = 1;\n    let chain_name = chainspec.network_config.name.clone();\n    let payment = ExecutableDeployItem::ModuleBytes {\n        module_bytes: Bytes::new(),\n        args: runtime_args! {\n          \"amount\" => U512::from(chainspec.system_costs_config.mint_costs().transfer),\n        },\n    };\n\n    let txns: Vec<Transaction> = iter::repeat_with(|| {\n        let target_public_key = PublicKey::random(rng);\n        let session = ExecutableDeployItem::Transfer {\n            args: runtime_args! {\n              \"amount\" => U512::from(chainspec.transaction_config.native_transfer_minimum_motes),\n              \"target\" => target_public_key,\n              \"id\" => Some(9_u64),\n            },\n        };\n        Transaction::Deploy(Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            vec![],\n            chain_name.clone(),\n            payment.clone(),\n            session,\n            &node_1_secret_key,\n            None,\n        ))\n    })\n    .take(200)\n    .collect();\n\n    let mut txn_set = BTreeMap::new();\n    let val = txns\n        .iter()\n        .map(|transaction| {\n            let hash = transaction.hash();\n            let approvals = transaction.approvals();\n            (hash, approvals)\n        })\n        .collect();\n    txn_set.insert(MINT_LANE_ID, val);\n    let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8);\n    let block_2 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            block_payload,\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            2,\n            PublicKey::System,\n        ),\n        txns,\n    );\n    runner\n        .process_injected_effects(execute_block(block_2))\n        .await;\n\n    // Crank until execution is scheduled.\n    runner\n        .crank_until(rng, execution_started, TEST_TIMEOUT)\n        .await;\n\n    // While executing this block, set the execution pre-state to a later block (as if we had sync\n    // leaped and skipped ahead).\n    let next_block_height = 9;\n    tokio::time::sleep(Duration::from_millis(50)).await;\n    runner\n        .reactor_mut()\n        .inner_mut()\n        .contract_runtime\n        .set_initial_state(ExecutionPreState::new(\n            next_block_height,\n            Digest::hash(rng.next_u64().to_le_bytes()),\n            BlockHash::random(rng),\n            Digest::hash(rng.next_u64().to_le_bytes()),\n        ));\n\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    let actual = runner\n        .reactor()\n        .inner()\n        .contract_runtime\n        .execution_pre_state\n        .lock()\n        .unwrap()\n        .next_block_height();\n\n    let expected = next_block_height;\n\n    // Check that the next block height expected by the contract runtime is `next_block_height` and\n    // not 3.\n    assert_eq!(actual, expected);\n}\n\nfn valid_wasm_txn(\n    initiator: &SecretKey,\n    chain_name: &str,\n    pricing_mode: PricingMode,\n    name: &str,\n    runtime_args: RuntimeArgs,\n) -> Transaction {\n    let contract_file = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(format!(\"{name}.wasm\"));\n    let module_bytes = Bytes::from(std::fs::read(contract_file).expect(\"cannot read module bytes\"));\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session_with_runtime_args(\n            true,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n            runtime_args,\n        )\n        .with_chain_name(chain_name)\n        .with_pricing_mode(pricing_mode)\n        .with_initiator_addr(PublicKey::from(initiator))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(initiator);\n    txn\n}\n\n#[allow(clippy::too_many_arguments)]\nfn valid_versioned_call_txn(\n    initiator: &SecretKey,\n    chain_name: &str,\n    pricing_mode: PricingMode,\n    entry_point: &str,\n    package_hash: PackageHash,\n    runtime_args: RuntimeArgs,\n    version: Option<EntityVersion>,\n    protocol_version_major: Option<ProtocolVersionMajor>,\n) -> Transaction {\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_targeting_package_with_runtime_args(\n            package_hash,\n            version,\n            protocol_version_major,\n            entry_point,\n            TransactionRuntimeParams::VmCasperV1,\n            runtime_args,\n        )\n        .with_chain_name(chain_name)\n        .with_pricing_mode(pricing_mode)\n        .with_initiator_addr(PublicKey::from(initiator))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(initiator);\n    txn\n}\n\n#[tokio::test]\nasync fn should_correctly_manage_entity_version_calls() {\n    testing::init_logging();\n\n    let config = Config {\n        max_global_state_size: Some(100 * 1024 * 1024),\n        ..Config::default()\n    };\n    let config = TestConfig {\n        config,\n        fixture_name: None,\n    };\n    let (chainspec, chainspec_raw_bytes) =\n        <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let chainspec = Arc::new(chainspec);\n    let chainspec_raw_bytes = Arc::new(chainspec_raw_bytes);\n\n    let mut rng = crate::new_rng();\n    let rng = &mut rng;\n\n    let mut runner: Runner<ConditionCheckReactor<Reactor>> = Runner::new(\n        config,\n        Arc::clone(&chainspec),\n        Arc::clone(&chainspec_raw_bytes),\n        rng,\n    )\n    .await\n    .unwrap();\n\n    // Commit genesis to set up initial global state.\n    let post_commit_genesis_state_hash = runner\n        .reactor()\n        .inner()\n        .contract_runtime\n        .commit_genesis(chainspec.as_ref(), chainspec_raw_bytes.as_ref())\n        .as_legacy()\n        .unwrap()\n        .0;\n\n    let initial_pre_state = ExecutionPreState::new(\n        0,\n        post_commit_genesis_state_hash,\n        BlockHash::default(),\n        Digest::default(),\n    );\n    runner\n        .reactor_mut()\n        .inner_mut()\n        .contract_runtime\n        .set_initial_state(initial_pre_state);\n\n    // Create the genesis immediate switch block.\n    let block_0 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            BlockPayload::default(),\n            Some(InternalEraReport::default()),\n            Timestamp::now(),\n            EraId::new(0),\n            0,\n            PublicKey::System,\n        ),\n        vec![],\n    );\n\n    runner\n        .process_injected_effects(execute_block(block_0))\n        .await;\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    // Create the first block of era 1.\n    let block_1 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            BlockPayload::default(),\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            1,\n            PublicKey::System,\n        ),\n        vec![],\n    );\n    runner\n        .process_injected_effects(execute_block(block_1))\n        .await;\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    // Prepare to create a block which will take a while to execute, i.e. loaded with many deploys\n    // transferring from node-1's main account to new random public keys.\n    let node_1_secret_key = SecretKey::from_file(\n        RESOURCES_PATH\n            .join(\"local\")\n            .join(\"secret_keys\")\n            .join(\"node-1.pem\"),\n    )\n    .unwrap();\n\n    let node_1_public_key = PublicKey::from(&node_1_secret_key);\n    let chain_name = chainspec.network_config.name.clone();\n    let installer_transaction = valid_wasm_txn(\n        &node_1_secret_key,\n        &chain_name,\n        PricingMode::PaymentLimited {\n            payment_amount: 250_000_000_000,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n        \"purse_holder_stored\",\n        runtime_args! {\n            \"is_locked\" => false\n        },\n    );\n\n    let lane_id =\n        calculate_transaction_lane_for_transaction(&installer_transaction, &chainspec).unwrap();\n\n    let mut txn_set = BTreeMap::new();\n    let txn_hash = installer_transaction.hash();\n    let approvals = installer_transaction.approvals();\n\n    txn_set.insert(lane_id, vec![(txn_hash, approvals)]);\n\n    let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8);\n    let block_2 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            block_payload,\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            2,\n            PublicKey::System,\n        ),\n        vec![installer_transaction],\n    );\n    runner\n        .process_injected_effects(execute_block(block_2))\n        .await;\n\n    // Crank until execution is scheduled.\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    let pre_state_hash = {\n        let prestate = runner\n            .reactor()\n            .inner()\n            .contract_runtime\n            .execution_pre_state\n            .lock()\n            .expect(\"must get lock\");\n        prestate.pre_state_root_hash()\n    };\n\n    let key = Key::Account(node_1_public_key.to_account_hash());\n    let query_request = QueryRequest::new(pre_state_hash, key, vec![]);\n\n    let package_key = if let QueryResult::Success { value, .. } = runner\n        .reactor()\n        .inner()\n        .contract_runtime\n        .data_access_layer\n        .query(query_request)\n    {\n        *value\n            .as_account()\n            .expect(\"must get account\")\n            .named_keys()\n            .get(\"purse_holder\")\n            .expect(\"must get package key\")\n    } else {\n        panic!(\"query failed\");\n    };\n\n    let package_hash = package_key\n        .into_hash_addr()\n        .map(PackageHash::new)\n        .expect(\"must get package hash\");\n\n    let upgrader_transaction = valid_wasm_txn(\n        &node_1_secret_key,\n        &chain_name,\n        PricingMode::PaymentLimited {\n            payment_amount: 250_000_000_000,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n        \"purse_holder_stored_upgrader\",\n        runtime_args! {\n            \"contract_package\" => package_hash\n        },\n    );\n\n    let lane_id =\n        calculate_transaction_lane_for_transaction(&upgrader_transaction, &chainspec).unwrap();\n\n    let mut txn_set = BTreeMap::new();\n    let txn_hash = upgrader_transaction.hash();\n    let approvals = upgrader_transaction.approvals();\n\n    txn_set.insert(lane_id, vec![(txn_hash, approvals)]);\n\n    let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8);\n    let block_2 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            block_payload,\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            3,\n            PublicKey::System,\n        ),\n        vec![upgrader_transaction],\n    );\n    runner\n        .process_injected_effects(execute_block(block_2))\n        .await;\n\n    // Crank until execution is scheduled.\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    let pre_state_hash = {\n        let prestate = runner\n            .reactor()\n            .inner()\n            .contract_runtime\n            .execution_pre_state\n            .lock()\n            .expect(\"must get lock\");\n        prestate.pre_state_root_hash()\n    };\n\n    let query_request = QueryRequest::new(pre_state_hash, package_key, vec![]);\n    if let QueryResult::Success { value, .. } = runner\n        .reactor()\n        .inner()\n        .contract_runtime\n        .data_access_layer\n        .query(query_request)\n    {\n        let versions = value\n            .as_contract_package()\n            .expect(\"must get account\")\n            .versions();\n\n        assert_eq!(2, versions.len())\n    } else {\n        panic!(\"query failed\");\n    };\n\n    let call_by_entity_version_1 = valid_versioned_call_txn(\n        &node_1_secret_key,\n        &chain_name,\n        PricingMode::PaymentLimited {\n            payment_amount: 250_000_000_000,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n        \"add_named_purse\",\n        package_hash,\n        runtime_args! {\n            \"purse_name\" => \"purse\"\n        },\n        Some(1),\n        None,\n    );\n\n    let call_by_major_version_and_entity_version = valid_versioned_call_txn(\n        &node_1_secret_key,\n        &chain_name,\n        PricingMode::PaymentLimited {\n            payment_amount: 250_000_000_000,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n        \"add_named_purse\",\n        package_hash,\n        runtime_args! {\n            \"purse_name\" => \"purse\"\n        },\n        Some(1),\n        Some(2),\n    );\n\n    let call_by_major_version = valid_versioned_call_txn(\n        &node_1_secret_key,\n        &chain_name,\n        PricingMode::PaymentLimited {\n            payment_amount: 250_000_000_000,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n        \"add\",\n        package_hash,\n        runtime_args! {\n            \"purse_name\" => \"purse\"\n        },\n        None,\n        Some(2),\n    );\n\n    let lane_id =\n        calculate_transaction_lane_for_transaction(&call_by_entity_version_1, &chainspec).unwrap();\n\n    let txns = vec![\n        call_by_entity_version_1,\n        call_by_major_version_and_entity_version,\n        call_by_major_version,\n    ];\n\n    let mut txn_set = BTreeMap::new();\n    let val = txns\n        .iter()\n        .map(|txn| {\n            let hash = txn.hash();\n            let approvals = txn.approvals();\n            (hash, approvals)\n        })\n        .collect();\n\n    txn_set.insert(lane_id, val);\n\n    let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8);\n    let block_3 = ExecutableBlock::from_finalized_block_and_transactions(\n        FinalizedBlock::new(\n            block_payload,\n            None,\n            Timestamp::now(),\n            EraId::new(1),\n            4,\n            PublicKey::System,\n        ),\n        txns.clone(),\n    );\n    runner\n        .process_injected_effects(execute_block(block_3))\n        .await;\n\n    // Crank until execution is scheduled.\n    runner\n        .crank_until(rng, execution_completed, TEST_TIMEOUT)\n        .await;\n\n    for txn in txns.iter() {\n        let hash = txn.hash();\n        let results = runner\n            .reactor()\n            .inner()\n            .storage\n            .read_execution_result(&hash)\n            .unwrap();\n\n        assert!(results.error_message().is_none())\n    }\n}\n\n#[cfg(test)]\nmod test_mod {\n    use std::sync::Arc;\n\n    use prometheus::Registry;\n    use rand::Rng;\n    use tempfile::tempdir;\n\n    use casper_storage::{\n        data_access_layer::{EntryPointExistsRequest, EntryPointExistsResult},\n        global_state::{\n            state::{CommitProvider, StateProvider},\n            trie::Trie,\n        },\n    };\n    use casper_types::{\n        account::AccountHash,\n        bytesrepr,\n        contracts::{ContractPackageHash, EntryPoint, EntryPoints},\n        execution::{TransformKindV2, TransformV2},\n        global_state::Pointer,\n        testing::TestRng,\n        ActivationPoint, CLType, CLValue, Chainspec, ChunkWithProof, Contract, ContractWasmHash,\n        CoreConfig, Digest, EntityAddr, EntryPointAccess, EntryPointAddr, EntryPointPayment,\n        EntryPointType, EntryPointValue, EraId, HashAddr, Key, NamedKeys, ProtocolConfig,\n        ProtocolVersion, StoredValue, TimeDiff, DEFAULT_FEE_HANDLING, DEFAULT_GAS_HOLD_INTERVAL,\n        DEFAULT_REFUND_HANDLING,\n    };\n\n    use super::{Config as ContractRuntimeConfig, ContractRuntime};\n    use crate::{\n        components::fetcher::FetchResponse,\n        contract_runtime::ContractRuntimeError,\n        types::{ChunkingError, TrieOrChunk, TrieOrChunkId, ValueOrChunk},\n    };\n\n    #[derive(Debug, Clone)]\n    struct TestPair(Key, StoredValue);\n\n    fn create_pre_condor_contract(\n        rng: &mut TestRng,\n        contract_hash: Key,\n        entry_point_name: &str,\n        protocol_version: ProtocolVersion,\n    ) -> Vec<TestPair> {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntryPoint::new(\n            entry_point_name,\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let contract_package_hash = ContractPackageHash::new(rng.gen());\n        let contract_wasm_hash = ContractWasmHash::new(rng.gen());\n        let named_keys = NamedKeys::new();\n        let contract = Contract::new(\n            contract_package_hash,\n            contract_wasm_hash,\n            named_keys,\n            entry_points,\n            protocol_version,\n        );\n        vec![TestPair(contract_hash, StoredValue::Contract(contract))]\n    }\n\n    fn create_entry_point(entity_addr: EntityAddr, entry_point_name: &str) -> Vec<TestPair> {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntryPoint::new(\n            entry_point_name,\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let key = Key::EntryPoint(\n            EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point_name).unwrap(),\n        );\n        let entry_point = casper_types::EntityEntryPoint::new(\n            entry_point_name,\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Caller,\n            EntryPointPayment::Caller,\n        );\n        let entry_point_value = EntryPointValue::V1CasperVm(entry_point);\n        vec![TestPair(key, StoredValue::EntryPoint(entry_point_value))]\n    }\n\n    // Creates the test pairs that contain data of size\n    // greater than the chunk limit.\n    fn create_test_pairs_with_large_data() -> Vec<TestPair> {\n        let val = CLValue::from_t(\n            String::from_utf8(vec![b'a'; ChunkWithProof::CHUNK_SIZE_BYTES * 2]).unwrap(),\n        )\n        .unwrap();\n        vec![\n            TestPair(\n                Key::Account(AccountHash::new([1_u8; 32])),\n                StoredValue::CLValue(val.clone()),\n            ),\n            TestPair(\n                Key::Account(AccountHash::new([2_u8; 32])),\n                StoredValue::CLValue(val),\n            ),\n        ]\n    }\n\n    fn extract_next_hash_from_trie(trie_or_chunk: TrieOrChunk) -> Digest {\n        let next_hash = if let TrieOrChunk::Value(trie_bytes) = trie_or_chunk {\n            if let Trie::Node { pointer_block } = bytesrepr::deserialize::<Trie<Key, StoredValue>>(\n                trie_bytes.into_inner().into_inner().into(),\n            )\n            .expect(\"Could not parse trie bytes\")\n            {\n                if pointer_block.child_count() == 0 {\n                    panic!(\"expected children\");\n                }\n                let (_, ptr) = pointer_block.as_indexed_pointers().next().unwrap();\n                match ptr {\n                    Pointer::LeafPointer(ptr) | Pointer::NodePointer(ptr) => ptr,\n                }\n            } else {\n                panic!(\"expected `Node`\");\n            }\n        } else {\n            panic!(\"expected `Trie`\");\n        };\n        next_hash\n    }\n\n    // Creates a test ContractRuntime and feeds the underlying GlobalState with `test_pair`.\n    // Returns [`ContractRuntime`] instance and the new Merkle root after applying the `test_pair`.\n    fn create_test_state(rng: &mut TestRng, test_pair: Vec<TestPair>) -> (ContractRuntime, Digest) {\n        let temp_dir = tempdir().unwrap();\n        let chainspec = Chainspec {\n            protocol_config: ProtocolConfig {\n                activation_point: ActivationPoint::EraId(EraId::from(2)),\n                ..ProtocolConfig::random(rng)\n            },\n            core_config: CoreConfig {\n                max_associated_keys: 10,\n                max_runtime_call_stack_height: 10,\n                minimum_delegation_amount: 10,\n                prune_batch_size: 5,\n                strict_argument_checking: true,\n                vesting_schedule_period: TimeDiff::from_millis(1),\n                max_delegators_per_validator: 0,\n                allow_auction_bids: true,\n                allow_unrestricted_transfers: true,\n                fee_handling: DEFAULT_FEE_HANDLING,\n                refund_handling: DEFAULT_REFUND_HANDLING,\n                gas_hold_interval: DEFAULT_GAS_HOLD_INTERVAL,\n                ..CoreConfig::random(rng)\n            },\n            wasm_config: Default::default(),\n            system_costs_config: Default::default(),\n            ..Chainspec::random(rng)\n        };\n        let contract_runtime = ContractRuntime::new(\n            temp_dir.path(),\n            &ContractRuntimeConfig::default(),\n            Arc::new(chainspec),\n            &Registry::default(),\n        )\n        .unwrap();\n        let empty_state_root = contract_runtime.data_access_layer().empty_root();\n        let mut effects = casper_types::execution::Effects::new();\n        for TestPair(key, value) in test_pair {\n            effects.push(TransformV2::new(key, TransformKindV2::Write(value)));\n        }\n        let post_state_hash = &contract_runtime\n            .data_access_layer()\n            .as_ref()\n            .commit_effects(empty_state_root, effects)\n            .expect(\"applying effects to succeed\");\n        (contract_runtime, *post_state_hash)\n    }\n\n    fn read_trie(contract_runtime: &ContractRuntime, id: TrieOrChunkId) -> TrieOrChunk {\n        let serialized_id = bincode::serialize(&id).unwrap();\n        match contract_runtime\n            .fetch_trie_local(&serialized_id)\n            .expect(\"expected a successful read\")\n        {\n            FetchResponse::Fetched(found) => found,\n            FetchResponse::NotProvided(_) | FetchResponse::NotFound(_) => {\n                panic!(\"expected to find the trie\")\n            }\n        }\n    }\n\n    #[test]\n    fn fetching_enty_points_falls_back_to_contract() {\n        let rng = &mut TestRng::new();\n        let hash_addr: HashAddr = rng.gen();\n        let contract_hash = Key::Hash(hash_addr);\n        let entry_point_name = \"ep1\";\n        let initial_state = create_pre_condor_contract(\n            rng,\n            contract_hash,\n            entry_point_name,\n            ProtocolVersion::V2_0_0,\n        );\n        let (contract_runtime, state_hash) = create_test_state(rng, initial_state);\n        let request =\n            EntryPointExistsRequest::new(state_hash, entry_point_name.to_string(), hash_addr);\n        let res = contract_runtime\n            .data_access_layer()\n            .entry_point_exists(request);\n        assert!(matches!(res, EntryPointExistsResult::Success));\n    }\n\n    #[test]\n    fn fetching_enty_points_fetches_entry_point_from_v2() {\n        let rng = &mut TestRng::new();\n        let hash_addr: HashAddr = rng.gen();\n        let entity_addr = EntityAddr::new_smart_contract(hash_addr);\n        let entry_point_name = \"ep1\";\n        let initial_state = create_entry_point(entity_addr, entry_point_name);\n        let (contract_runtime, state_hash) = create_test_state(rng, initial_state);\n        let request =\n            EntryPointExistsRequest::new(state_hash, entry_point_name.to_string(), hash_addr);\n        let res = contract_runtime\n            .data_access_layer()\n            .entry_point_exists(request);\n        assert!(matches!(res, EntryPointExistsResult::Success));\n    }\n\n    #[test]\n    fn fetching_enty_points_fetches_fail_when_asking_for_non_existing() {\n        let rng = &mut TestRng::new();\n        let hash_addr: HashAddr = rng.gen();\n        let entity_addr = EntityAddr::new_smart_contract(hash_addr);\n        let initial_state = create_entry_point(entity_addr, \"ep1\");\n        let (contract_runtime, state_hash) = create_test_state(rng, initial_state);\n        let request = EntryPointExistsRequest::new(state_hash, \"ep2\".to_string(), hash_addr);\n        let res = contract_runtime\n            .data_access_layer()\n            .entry_point_exists(request);\n        assert!(matches!(res, EntryPointExistsResult::ValueNotFound { .. }));\n    }\n\n    #[test]\n    fn returns_trie_or_chunk() {\n        let rng = &mut TestRng::new();\n        let (contract_runtime, root_hash) =\n            create_test_state(rng, create_test_pairs_with_large_data());\n\n        // Expect `Trie` with NodePointer when asking with a root hash.\n        let trie = read_trie(&contract_runtime, TrieOrChunkId(0, root_hash));\n        assert!(matches!(trie, ValueOrChunk::Value(_)));\n\n        // Expect another `Trie` with two LeafPointers.\n        let trie = read_trie(\n            &contract_runtime,\n            TrieOrChunkId(0, extract_next_hash_from_trie(trie)),\n        );\n        assert!(matches!(trie, TrieOrChunk::Value(_)));\n\n        // Now, the next hash will point to the actual leaf, which as we expect\n        // contains large data, so we expect to get `ChunkWithProof`.\n        let hash = extract_next_hash_from_trie(trie);\n        let chunk = match read_trie(&contract_runtime, TrieOrChunkId(0, hash)) {\n            TrieOrChunk::ChunkWithProof(chunk) => chunk,\n            other => panic!(\"expected ChunkWithProof, got {:?}\", other),\n        };\n\n        assert_eq!(chunk.proof().root_hash(), hash);\n\n        // try to read all the chunks\n        let count = chunk.proof().count();\n        let mut chunks = vec![chunk];\n        for i in 1..count {\n            let chunk = match read_trie(&contract_runtime, TrieOrChunkId(i, hash)) {\n                TrieOrChunk::ChunkWithProof(chunk) => chunk,\n                other => panic!(\"expected ChunkWithProof, got {:?}\", other),\n            };\n            chunks.push(chunk);\n        }\n\n        // there should be no chunk with index `count`\n        let serialized_id = bincode::serialize(&TrieOrChunkId(count, hash)).unwrap();\n        assert!(matches!(\n            contract_runtime.fetch_trie_local(&serialized_id),\n            Err(ContractRuntimeError::ChunkingError(\n                ChunkingError::MerkleConstruction(_)\n            ))\n        ));\n\n        // all chunks should be valid\n        assert!(chunks.iter().all(|chunk| chunk.verify().is_ok()));\n\n        let data: Vec<u8> = chunks\n            .into_iter()\n            .flat_map(|chunk| chunk.into_chunk())\n            .collect();\n\n        let trie: Trie<Key, StoredValue> =\n            bytesrepr::deserialize(data).expect(\"trie should deserialize correctly\");\n\n        // should be deserialized to a leaf\n        assert!(matches!(trie, Trie::Leaf { .. }));\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/types.rs",
    "content": "use std::{collections::BTreeMap, sync::Arc};\n\nuse crate::{contract_runtime::StateResultError, types::TransactionHeader};\nuse casper_types::{InitiatorAddr, Transfer};\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse casper_execution_engine::engine_state::{\n    Error, InvalidRequest as InvalidWasmV1Request, WasmV1Result,\n};\nuse casper_storage::{\n    block_store::types::ApprovalsHashes,\n    data_access_layer::{\n        auction::AuctionMethodError, mint::BurnResult, BalanceHoldResult, BalanceResult,\n        BiddingResult, EraValidatorsRequest, HandleFeeResult, HandleRefundResult, TransferResult,\n    },\n};\nuse casper_types::{\n    contract_messages::Messages,\n    execution::{Effects, ExecutionResult, ExecutionResultV2},\n    BlockHash, BlockHeaderV2, BlockV2, Digest, EraId, Gas, InvalidDeploy, InvalidTransaction,\n    InvalidTransactionV1, ProtocolVersion, PublicKey, Transaction, TransactionHash, U512,\n};\n\nuse self::wasm_v2_request::{WasmV2Error, WasmV2Result};\n\nuse super::operations::wasm_v2_request;\n\n/// Request for validator weights for a specific era.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ValidatorWeightsByEraIdRequest {\n    state_hash: Digest,\n    era_id: EraId,\n    protocol_version: ProtocolVersion,\n}\n\nimpl ValidatorWeightsByEraIdRequest {\n    /// Constructs a new ValidatorWeightsByEraIdRequest.\n    pub fn new(state_hash: Digest, era_id: EraId, protocol_version: ProtocolVersion) -> Self {\n        ValidatorWeightsByEraIdRequest {\n            state_hash,\n            era_id,\n            protocol_version,\n        }\n    }\n\n    /// Get the state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Get the era id.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Get the protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n}\n\nimpl From<ValidatorWeightsByEraIdRequest> for EraValidatorsRequest {\n    fn from(input: ValidatorWeightsByEraIdRequest) -> Self {\n        EraValidatorsRequest::new(input.state_hash)\n    }\n}\n\n#[derive(Clone, Debug)]\npub(crate) struct ExecutionArtifactBuilder {\n    effects: Effects,\n    hash: TransactionHash,\n    header: TransactionHeader,\n    error_message: Option<String>,\n    messages: Messages,\n    transfers: Vec<Transfer>,\n    initiator: InitiatorAddr,\n    current_price: u8,\n    cost: U512,\n    limit: Gas,\n    consumed: Gas,\n    refund: U512,\n    size_estimate: u64,\n    min_cost: U512,\n    available: Option<U512>,\n}\n\nimpl ExecutionArtifactBuilder {\n    pub fn new(\n        transaction: &Transaction,\n        limit: Gas,\n        current_price: u8,\n        initial_cost: U512,\n        min_cost: U512,\n    ) -> Self {\n        ExecutionArtifactBuilder {\n            effects: Effects::new(),\n            hash: transaction.hash(),\n            header: transaction.into(),\n            error_message: None,\n            transfers: vec![],\n            messages: Default::default(),\n            initiator: transaction.initiator_addr(),\n            current_price,\n            cost: initial_cost,\n            limit,\n            consumed: Gas::zero(),\n            refund: U512::zero(),\n            size_estimate: transaction.size_estimate() as u64,\n            min_cost,\n            available: None,\n        }\n    }\n\n    pub fn pre_condition_failure(\n        transaction: &Transaction,\n        current_price: u8,\n        invalid_transaction: InvalidTransaction,\n    ) -> Self {\n        ExecutionArtifactBuilder {\n            effects: Effects::new(),\n            hash: transaction.hash(),\n            header: transaction.into(),\n            error_message: Some(format!(\"{}\", invalid_transaction)),\n            transfers: vec![],\n            messages: Default::default(),\n            initiator: transaction.initiator_addr(),\n            current_price,\n            cost: U512::zero(),\n            limit: Gas::zero(),\n            consumed: Gas::zero(),\n            refund: U512::zero(),\n            size_estimate: transaction.size_estimate() as u64,\n            min_cost: U512::zero(),\n            available: None,\n        }\n    }\n\n    pub fn error_message(&self) -> Option<String> {\n        self.error_message.clone()\n    }\n\n    pub fn gas_limit(&self) -> Gas {\n        self.limit\n    }\n\n    pub fn limit(&self) -> U512 {\n        self.limit.value()\n    }\n\n    pub fn consumed(&self) -> U512 {\n        self.consumed.value()\n    }\n\n    pub fn available(&self) -> Option<U512> {\n        self.available\n    }\n\n    pub fn actual_cost(&self) -> U512 {\n        self.cost\n    }\n\n    pub fn cost_to_use(&self) -> U512 {\n        // to prevent do-nothing exhaustion and other 0 cost scenarios,\n        // we raise cost to min_cost if less than that\n\n        let cost = {\n            let cost = self.cost;\n            if cost < self.min_cost {\n                self.min_cost\n            } else {\n                cost\n            }\n        };\n\n        match self.available {\n            Some(available) => {\n                if available < self.cost {\n                    available\n                } else {\n                    cost\n                }\n            }\n            None => cost,\n        }\n    }\n\n    pub fn consume_limit(&mut self) -> &mut Self {\n        self.consumed = self.consumed.saturating_add(self.limit);\n        self\n    }\n\n    pub fn with_added_consumed(&mut self, consumed: Gas) -> &mut Self {\n        self.consumed = self.consumed.saturating_add(consumed);\n        self\n    }\n\n    pub fn with_appended_transfers(&mut self, transfers: &mut Vec<Transfer>) -> &mut Self {\n        self.transfers.append(transfers);\n        self\n    }\n\n    pub fn with_appended_effects(&mut self, effects: Effects) -> &mut Self {\n        self.effects.append(effects);\n        self\n    }\n\n    pub fn with_appended_messages(&mut self, messages: &mut Messages) -> &mut Self {\n        self.messages.append(messages);\n        self\n    }\n\n    pub fn with_state_result_error(&mut self, error: StateResultError) -> Result<&mut Self, ()> {\n        if let StateResultError::RootNotFound = error {\n            return Err(());\n        }\n        if self.error_message.is_none() {\n            self.error_message = Some(format!(\"{:?}\", error));\n        }\n        Ok(self)\n    }\n\n    pub fn with_initial_balance_result(\n        &mut self,\n        balance_result: BalanceResult,\n        minimum_amount: U512,\n    ) -> Result<&mut Self, bool> {\n        if let BalanceResult::RootNotFound = balance_result {\n            return Err(true);\n        }\n        if let (None, Some(err)) = (&self.error_message, balance_result.error()) {\n            self.error_message = Some(format!(\"{}\", err));\n            return Err(false);\n        }\n        if let Some(purse) = balance_result.purse_addr() {\n            let is_sufficient = balance_result.is_sufficient(minimum_amount);\n            if !is_sufficient {\n                self.error_message = Some(format!(\n                    \"Purse {} has less than {}\",\n                    base16::encode_lower(&purse),\n                    minimum_amount\n                ));\n                return Ok(self);\n            }\n        }\n        Ok(self)\n    }\n\n    pub fn with_wasm_v1_result(&mut self, wasm_v1_result: WasmV1Result) -> Result<&mut Self, ()> {\n        if let Some(Error::RootNotFound(_)) = wasm_v1_result.error() {\n            return Err(());\n        }\n        self.with_added_consumed(wasm_v1_result.consumed());\n\n        if let Some(err) = wasm_v1_result.error() {\n            self.error_message = Some(format!(\"{}\", err));\n        } else if wasm_v1_result.consumed() == Gas::zero() {\n            self.error_message = Some(\"Wasm consumed 0 gas\".to_string());\n        }\n\n        if self.error_message.is_some() {\n            return Ok(self);\n        }\n\n        self.with_appended_transfers(&mut wasm_v1_result.transfers().clone())\n            .with_appended_messages(&mut wasm_v1_result.messages().clone())\n            .with_appended_effects(wasm_v1_result.effects().clone());\n        Ok(self)\n    }\n\n    pub fn with_error_message(&mut self, error_message: String) -> &mut Self {\n        self.error_message = Some(error_message);\n        self\n    }\n\n    pub fn with_set_refund_purse_result(\n        &mut self,\n        handle_refund_result: &HandleRefundResult,\n    ) -> Result<&mut Self, bool> {\n        if let HandleRefundResult::RootNotFound = handle_refund_result {\n            return Err(true);\n        }\n        if let HandleRefundResult::Success {\n            effects, transfers, ..\n        } = handle_refund_result\n        {\n            self.with_appended_transfers(&mut transfers.clone())\n                .with_appended_effects(effects.clone());\n        }\n        if let (None, HandleRefundResult::Failure(_)) = (&self.error_message, handle_refund_result)\n        {\n            self.error_message = handle_refund_result.error_message();\n            return Err(false);\n        }\n        Ok(self)\n    }\n\n    pub fn with_clear_refund_purse_result(\n        &mut self,\n        handle_refund_result: &HandleRefundResult,\n    ) -> Result<&mut Self, bool> {\n        if let HandleRefundResult::RootNotFound = handle_refund_result {\n            return Err(true);\n        }\n        if let HandleRefundResult::Success {\n            effects, transfers, ..\n        } = handle_refund_result\n        {\n            self.with_appended_transfers(&mut transfers.clone())\n                .with_appended_effects(effects.clone());\n        }\n        if let (None, HandleRefundResult::Failure(_)) = (&self.error_message, handle_refund_result)\n        {\n            self.error_message = handle_refund_result.error_message();\n            return Err(false);\n        }\n        Ok(self)\n    }\n\n    pub fn with_handle_refund_result(\n        &mut self,\n        handle_refund_result: &HandleRefundResult,\n    ) -> Result<&mut Self, ()> {\n        if let HandleRefundResult::RootNotFound = handle_refund_result {\n            return Err(());\n        }\n        if let HandleRefundResult::Success {\n            effects, transfers, ..\n        } = handle_refund_result\n        {\n            self.with_appended_transfers(&mut transfers.clone())\n                .with_appended_effects(effects.clone());\n        }\n        if let (None, HandleRefundResult::Failure(_)) = (&self.error_message, handle_refund_result)\n        {\n            self.error_message = handle_refund_result.error_message();\n            return Ok(self);\n        }\n        Ok(self)\n    }\n\n    pub fn with_handle_fee_result(\n        &mut self,\n        handle_fee_result: &HandleFeeResult,\n    ) -> Result<&mut Self, ()> {\n        if let HandleFeeResult::RootNotFound = handle_fee_result {\n            return Err(());\n        }\n        if let HandleFeeResult::Success {\n            effects, transfers, ..\n        } = handle_fee_result\n        {\n            self.with_appended_transfers(&mut transfers.clone())\n                .with_appended_effects(effects.clone());\n        }\n        if let (None, HandleFeeResult::Failure(_)) = (&self.error_message, handle_fee_result) {\n            self.error_message = handle_fee_result.error_message();\n            return Ok(self);\n        }\n        Ok(self)\n    }\n\n    pub fn with_balance_hold_result(\n        &mut self,\n        hold_result: &BalanceHoldResult,\n    ) -> Result<&mut Self, ()> {\n        if let BalanceHoldResult::RootNotFound = hold_result {\n            return Err(());\n        }\n        if let BalanceHoldResult::Success { effects, .. } = hold_result {\n            self.with_appended_effects(*effects.clone());\n        }\n        if let (None, BalanceHoldResult::Failure(_)) = (&self.error_message, hold_result) {\n            self.error_message = hold_result.error_message();\n            return Ok(self);\n        }\n        Ok(self)\n    }\n\n    pub fn with_refund_amount(&mut self, refund: U512) -> &mut Self {\n        self.refund = refund;\n        self\n    }\n\n    pub fn with_invalid_wasm_v1_request(\n        &mut self,\n        invalid_request: &InvalidWasmV1Request,\n    ) -> &mut Self {\n        if self.error_message.is_none() {\n            self.error_message = Some(format!(\"{}\", invalid_request));\n        }\n        self\n    }\n\n    pub fn with_auction_method_error(\n        &mut self,\n        auction_method_error: &AuctionMethodError,\n    ) -> &mut Self {\n        if self.error_message.is_none() {\n            self.error_message = Some(format!(\"{}\", auction_method_error));\n        }\n        self\n    }\n\n    pub fn with_transfer_result(\n        &mut self,\n        transfer_result: TransferResult,\n    ) -> Result<&mut Self, ()> {\n        if let TransferResult::RootNotFound = transfer_result {\n            return Err(());\n        }\n        if let (None, TransferResult::Failure(err)) = (&self.error_message, &transfer_result) {\n            self.error_message = Some(format!(\"{}\", err));\n        }\n        if let TransferResult::Success {\n            effects,\n            transfers,\n            cache: _,\n        } = transfer_result\n        {\n            self.with_appended_transfers(&mut transfers.clone())\n                .with_appended_effects(effects);\n        }\n        Ok(self)\n    }\n\n    pub fn with_burn_result(&mut self, burn_result: BurnResult) -> Result<&mut Self, ()> {\n        if let BurnResult::RootNotFound = burn_result {\n            return Err(());\n        }\n        if let (None, BurnResult::Failure(err)) = (&self.error_message, &burn_result) {\n            self.error_message = Some(format!(\"{}\", err));\n        }\n        if let BurnResult::Success { effects, cache: _ } = burn_result {\n            self.with_appended_effects(effects);\n        }\n        Ok(self)\n    }\n\n    pub fn with_bidding_result(&mut self, bidding_result: BiddingResult) -> Result<&mut Self, ()> {\n        if let BiddingResult::RootNotFound = bidding_result {\n            return Err(());\n        }\n        if let (None, BiddingResult::Failure(err)) = (&self.error_message, &bidding_result) {\n            self.error_message = Some(format!(\"{}\", err));\n        }\n        if let BiddingResult::Success {\n            effects, transfers, ..\n        } = bidding_result\n        {\n            self.with_appended_transfers(&mut transfers.clone())\n                .with_appended_effects(effects);\n        }\n        Ok(self)\n    }\n\n    #[allow(unused)]\n    pub fn with_initiator_addr(&mut self, initiator_addr: InitiatorAddr) -> &mut Self {\n        self.initiator = initiator_addr;\n        self\n    }\n\n    pub fn with_available(&mut self, available: Option<U512>) -> &mut Self {\n        self.available = available;\n        self\n    }\n\n    pub(crate) fn build(self) -> ExecutionArtifact {\n        let actual_cost = self.cost_to_use();\n        let result = ExecutionResultV2 {\n            effects: self.effects,\n            transfers: self.transfers,\n            initiator: self.initiator,\n            refund: self.refund,\n            limit: self.limit,\n            consumed: self.consumed,\n            cost: actual_cost,\n            current_price: self.current_price,\n            size_estimate: self.size_estimate,\n            error_message: self.error_message,\n        };\n        let execution_result = ExecutionResult::V2(Box::new(result));\n        ExecutionArtifact::new(self.hash, self.header, execution_result, self.messages)\n    }\n\n    /// Adds the error message from a `InvalidRequest` to the artifact.\n    pub(crate) fn with_invalid_wasm_v2_request(\n        &mut self,\n        ire: wasm_v2_request::InvalidRequest,\n    ) -> &mut Self {\n        if self.error_message.is_none() {\n            self.error_message = Some(format!(\"{}\", ire));\n        }\n        self\n    }\n\n    /// Adds the result from a `WasmV2Result` to the artifact.\n    pub(crate) fn with_wasm_v2_result(&mut self, result: WasmV2Result) -> &mut Self {\n        self.with_added_consumed(Gas::from(result.gas_usage().gas_spent()));\n\n        // TODO: Use system message to notify about contract hash\n\n        self.with_appended_effects(result.effects().clone());\n\n        self\n    }\n\n    /// Adds the error message from a `WasmV2Error` to the artifact.\n    #[inline]\n    pub(crate) fn with_wasm_v2_error(&mut self, error: WasmV2Error) -> &mut Self {\n        self.with_error_message(error.to_string());\n        self\n    }\n}\n\n/// Effects from running step and the next era validators that are gathered when an era ends.\n#[derive(Clone, Debug, DataSize)]\npub(crate) struct StepOutcome {\n    /// Validator sets for all upcoming eras that have already been determined.\n    pub(crate) upcoming_era_validators: BTreeMap<EraId, BTreeMap<PublicKey, U512>>,\n    /// An [`Effects`] created by an era ending.\n    pub(crate) step_effects: Effects,\n}\n\n#[derive(Clone, Debug, DataSize, PartialEq, Eq, Serialize)]\npub(crate) struct ExecutionArtifact {\n    pub(crate) transaction_hash: TransactionHash,\n    pub(crate) transaction_header: TransactionHeader,\n    pub(crate) execution_result: ExecutionResult,\n    pub(crate) messages: Messages,\n}\n\nimpl ExecutionArtifact {\n    pub(crate) fn new(\n        transaction_hash: TransactionHash,\n        transaction_header: TransactionHeader,\n        execution_result: ExecutionResult,\n        messages: Messages,\n    ) -> Self {\n        Self {\n            transaction_hash,\n            transaction_header,\n            execution_result,\n            messages,\n        }\n    }\n}\n\n#[doc(hidden)]\n/// A [`Block`] that was the result of execution in the `ContractRuntime` along with any execution\n/// effects it may have.\n#[derive(Clone, Debug, DataSize)]\npub struct BlockAndExecutionArtifacts {\n    /// The [`Block`] the contract runtime executed.\n    pub(crate) block: Arc<BlockV2>,\n    /// The [`ApprovalsHashes`] for the transactions in this block.\n    pub(crate) approvals_hashes: Box<ApprovalsHashes>,\n    /// The results from executing the transactions in the block.\n    pub(crate) execution_artifacts: Vec<ExecutionArtifact>,\n    /// The [`Effects`] and the upcoming validator sets determined by the `step`\n    pub(crate) step_outcome: Option<StepOutcome>,\n}\n\n/// Type representing results of the speculative execution.\n#[derive(Debug)]\npub enum SpeculativeExecutionResult {\n    InvalidTransaction(InvalidTransaction),\n    WasmV1(Box<casper_binary_port::SpeculativeExecutionResult>),\n    ReceivedV1Transaction,\n}\n\nimpl SpeculativeExecutionResult {\n    pub fn invalid_gas_limit(transaction: Transaction) -> Self {\n        match transaction {\n            Transaction::Deploy(_) => SpeculativeExecutionResult::InvalidTransaction(\n                InvalidTransaction::Deploy(InvalidDeploy::UnableToCalculateGasLimit),\n            ),\n            Transaction::V1(_) => SpeculativeExecutionResult::InvalidTransaction(\n                InvalidTransaction::V1(InvalidTransactionV1::UnableToCalculateGasLimit),\n            ),\n        }\n    }\n\n    pub fn invalid_transaction(error: InvalidTransaction) -> Self {\n        SpeculativeExecutionResult::InvalidTransaction(error)\n    }\n}\n\n/// State to use to construct the next block in the blockchain. Includes the state root hash for the\n/// execution engine as well as certain values the next header will be based on.\n#[derive(DataSize, Default, Debug, Clone, Serialize)]\npub struct ExecutionPreState {\n    /// The height of the next `Block` to be constructed. Note that this must match the height of\n    /// the `FinalizedBlock` used to generate the block.\n    next_block_height: u64,\n    /// The state root to use when executing deploys.\n    pre_state_root_hash: Digest,\n    /// The parent hash of the next `Block`.\n    parent_hash: BlockHash,\n    /// The accumulated seed for the pseudo-random number generator to be incorporated into the\n    /// next `Block`, where additional entropy will be introduced.\n    parent_seed: Digest,\n}\n\nimpl ExecutionPreState {\n    pub(crate) fn new(\n        next_block_height: u64,\n        pre_state_root_hash: Digest,\n        parent_hash: BlockHash,\n        parent_seed: Digest,\n    ) -> Self {\n        ExecutionPreState {\n            next_block_height,\n            pre_state_root_hash,\n            parent_hash,\n            parent_seed,\n        }\n    }\n\n    /// Creates instance of `ExecutionPreState` from given block header nad Merkle tree hash\n    /// activation point.\n    pub fn from_block_header(block_header: &BlockHeaderV2) -> Self {\n        ExecutionPreState {\n            pre_state_root_hash: *block_header.state_root_hash(),\n            next_block_height: block_header.height() + 1,\n            parent_hash: block_header.block_hash(),\n            parent_seed: *block_header.accumulated_seed(),\n        }\n    }\n\n    // The height of the next `Block` to be constructed. Note that this must match the height of\n    /// the `FinalizedBlock` used to generate the block.\n    pub fn next_block_height(&self) -> u64 {\n        self.next_block_height\n    }\n    /// The state root to use when executing deploys.\n    pub fn pre_state_root_hash(&self) -> Digest {\n        self.pre_state_root_hash\n    }\n    /// The parent hash of the next `Block`.\n    pub fn parent_hash(&self) -> BlockHash {\n        self.parent_hash\n    }\n    /// The accumulated seed for the pseudo-random number generator to be incorporated into the\n    /// next `Block`, where additional entropy will be introduced.\n    pub fn parent_seed(&self) -> Digest {\n        self.parent_seed\n    }\n}\n\n#[derive(Clone, Copy, Ord, Eq, PartialOrd, PartialEq, DataSize, Debug)]\npub(crate) struct EraPrice {\n    era_id: EraId,\n    gas_price: u8,\n}\n\nimpl EraPrice {\n    pub(crate) fn new(era_id: EraId, gas_price: u8) -> Self {\n        Self { era_id, gas_price }\n    }\n\n    pub(crate) fn gas_price(&self) -> u8 {\n        self.gas_price\n    }\n\n    pub(crate) fn maybe_gas_price_for_era_id(&self, era_id: EraId) -> Option<u8> {\n        if self.era_id == era_id {\n            return Some(self.gas_price);\n        }\n\n        None\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime/utils.rs",
    "content": "use casper_executor_wasm::ExecutorV2;\nuse num_rational::Ratio;\nuse once_cell::sync::Lazy;\nuse std::{\n    cmp,\n    collections::{BTreeMap, HashMap},\n    fmt::Debug,\n    ops::Range,\n    sync::{Arc, Mutex},\n    time::Instant,\n};\nuse tracing::{debug, error, info, warn};\n\nuse crate::{\n    contract_runtime::{\n        exec_queue::{ExecQueue, QueueItem},\n        execute_finalized_block,\n        metrics::Metrics,\n        rewards, BlockAndExecutionArtifacts, BlockExecutionError, ExecutionPreState, StepOutcome,\n    },\n    effect::{\n        announcements::{ContractRuntimeAnnouncement, FatalAnnouncement, MetaBlockAnnouncement},\n        requests::{ContractRuntimeRequest, StorageRequest},\n        EffectBuilder,\n    },\n    fatal,\n    types::{ExecutableBlock, MetaBlock, MetaBlockState},\n};\n\nuse casper_binary_port::SpeculativeExecutionResult;\nuse casper_execution_engine::engine_state::{ExecutionEngineV1, WasmV1Result};\nuse casper_storage::{\n    data_access_layer::{\n        DataAccessLayer, FlushRequest, FlushResult, ProtocolUpgradeRequest, ProtocolUpgradeResult,\n        TransferResult,\n    },\n    global_state::state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider},\n};\nuse casper_types::{BlockHash, Chainspec, Digest, EraId, Gas, Key, ProtocolUpgradeConfig};\n\n/// Maximum number of resource intensive tasks that can be run in parallel.\n///\n/// TODO: Fine tune this constant to the machine executing the node.\nconst MAX_PARALLEL_INTENSIVE_TASKS: usize = 4;\n/// Semaphore enforcing maximum number of parallel resource intensive tasks.\nstatic INTENSIVE_TASKS_SEMAPHORE: Lazy<tokio::sync::Semaphore> =\n    Lazy::new(|| tokio::sync::Semaphore::new(MAX_PARALLEL_INTENSIVE_TASKS));\n\n/// Asynchronously runs a resource intensive task.\n/// At most `MAX_PARALLEL_INTENSIVE_TASKS` are being run in parallel at any time.\n///\n/// The task is a closure that takes no arguments and returns a value.\n/// This function returns a future for that value.\npub(super) async fn run_intensive_task<T, V>(task: T) -> V\nwhere\n    T: 'static + Send + FnOnce() -> V,\n    V: 'static + Send + Debug,\n{\n    // This will never panic since the semaphore is never closed.\n    let _permit = INTENSIVE_TASKS_SEMAPHORE.acquire().await.unwrap();\n    let result = tokio::task::spawn_blocking(task).await;\n    match result {\n        Ok(ret) => ret,\n        Err(err) => {\n            error!(\"{:?}\", err);\n            panic!(\"intensive contract runtime task errored: {:?}\", err);\n        }\n    }\n}\n\n#[allow(clippy::too_many_arguments)]\npub(super) async fn exec_or_requeue<REv>(\n    data_access_layer: Arc<DataAccessLayer<LmdbGlobalState>>,\n    execution_engine_v1: Arc<ExecutionEngineV1>,\n    execution_engine_v2: ExecutorV2,\n    chainspec: Arc<Chainspec>,\n    metrics: Arc<Metrics>,\n    mut exec_queue: ExecQueue,\n    shared_pre_state: Arc<Mutex<ExecutionPreState>>,\n    current_pre_state: ExecutionPreState,\n    effect_builder: EffectBuilder<REv>,\n    mut executable_block: ExecutableBlock,\n    key_block_height_for_activation_point: u64,\n    mut meta_block_state: MetaBlockState,\n) where\n    REv: From<ContractRuntimeRequest>\n        + From<ContractRuntimeAnnouncement>\n        + From<StorageRequest>\n        + From<MetaBlockAnnouncement>\n        + From<FatalAnnouncement>\n        + Send,\n{\n    debug!(\"ContractRuntime: execute_finalized_block_or_requeue\");\n    let contract_runtime_metrics = metrics.clone();\n    let is_era_end = executable_block.era_report.is_some();\n    let current_gas_price = executable_block.current_gas_price;\n    let era_id = executable_block.era_id;\n    let block_height = executable_block.height;\n\n    if is_era_end && executable_block.rewards.is_none() {\n        executable_block.rewards = Some(if chainspec.core_config.compute_rewards {\n            let rewards = match rewards::fetch_data_and_calculate_rewards_for_era(\n                effect_builder,\n                data_access_layer.clone(),\n                chainspec.as_ref(),\n                &metrics,\n                executable_block.clone(),\n            )\n            .await\n            {\n                Ok(rewards) => rewards,\n                Err(e) => {\n                    return fatal!(effect_builder, \"Failed to compute the rewards: {e:?}\").await;\n                }\n            };\n\n            debug!(\"rewards successfully computed\");\n\n            rewards\n        } else {\n            BTreeMap::new()\n        });\n    }\n\n    let maybe_next_era_gas_price = if is_era_end && executable_block.next_era_gas_price.is_none() {\n        let max_block_size = chainspec.transaction_config.max_block_size as u64;\n        let block_gas_limit = chainspec.transaction_config.block_gas_limit;\n        let go_up = chainspec.vacancy_config.upper_threshold;\n        let go_down = chainspec.vacancy_config.lower_threshold;\n        let max = chainspec.vacancy_config.max_gas_price;\n        let min = chainspec.vacancy_config.min_gas_price;\n        info!(%era_id, %block_height, \"End of era calculating new gas price\");\n        let era_id = executable_block.era_id;\n        let block_height = executable_block.height;\n\n        let per_block_capacity = chainspec\n            .transaction_config\n            .transaction_v1_config\n            .get_max_block_count();\n\n        let switch_block_utilization_score = {\n            let mut has_hit_slot_limt = false;\n            let mut transaction_hash_to_lane_id = HashMap::new();\n\n            for (lane_id, transactions) in executable_block.transaction_map.iter() {\n                transaction_hash_to_lane_id.extend(\n                    transactions\n                        .iter()\n                        .map(|transaction| (transaction, *lane_id)),\n                );\n                let max_count = chainspec\n                    .transaction_config\n                    .transaction_v1_config\n                    .get_max_transaction_count(*lane_id);\n                if max_count == transactions.len() as u64 {\n                    has_hit_slot_limt = true;\n                }\n            }\n\n            if has_hit_slot_limt {\n                100u64\n            } else if executable_block.transactions.is_empty() {\n                0u64\n            } else {\n                let size_utilization: u64 = {\n                    let total_size_of_transactions: u64 = executable_block\n                        .transactions\n                        .iter()\n                        .map(|transaction| transaction.size_estimate() as u64)\n                        .sum();\n\n                    Ratio::new(total_size_of_transactions * 100, max_block_size).to_integer()\n                };\n                let gas_utilization: u64 = {\n                    let total_gas_limit: u64 = executable_block\n                        .transactions\n                        .iter()\n                        .map(|transaction| {\n                            match transaction_hash_to_lane_id.get(&transaction.hash()) {\n                                Some(lane_id) => {\n                                    match &transaction.gas_limit(&chainspec, *lane_id) {\n                                        Ok(gas_limit) => gas_limit.value().as_u64(),\n                                        Err(_) => {\n                                            warn!(\"Unable to determine gas limit\");\n                                            0u64\n                                        }\n                                    }\n                                }\n                                None => {\n                                    warn!(\"Unable to determine gas limit\");\n                                    0u64\n                                }\n                            }\n                        })\n                        .sum();\n\n                    Ratio::new(total_gas_limit * 100, block_gas_limit).to_integer()\n                };\n\n                let slot_utilization = Ratio::new(\n                    executable_block.transactions.len() as u64 * 100,\n                    per_block_capacity,\n                )\n                .to_integer();\n\n                let utilization_scores = [slot_utilization, gas_utilization, size_utilization];\n\n                match utilization_scores.iter().max() {\n                    Some(max_score) => *max_score,\n                    None => {\n                        let error = BlockExecutionError::FailedToGetNewEraGasPrice { era_id };\n                        return fatal!(effect_builder, \"{}\", error).await;\n                    }\n                }\n            }\n        };\n\n        let maybe_utilization = effect_builder\n            .get_block_utilization(era_id, block_height, switch_block_utilization_score)\n            .await;\n        debug!(\n            %era_id,\n            %block_height,\n            ?maybe_utilization,\n            \"Calculated utilization for block\"\n        );\n\n        match maybe_utilization {\n            None => {\n                let error = BlockExecutionError::FailedToGetNewEraGasPrice { era_id };\n                return fatal!(effect_builder, \"{}\", error).await;\n            }\n            Some((utilization, block_count)) => {\n                let era_score = { Ratio::new(utilization, block_count).to_integer() };\n\n                let new_gas_price = if era_score >= go_up {\n                    let new_gas_price = current_gas_price.saturating_add(1);\n                    if new_gas_price > max {\n                        max\n                    } else {\n                        new_gas_price\n                    }\n                } else if era_score <= go_down {\n                    let new_gas_price = current_gas_price.saturating_sub(1);\n                    if new_gas_price <= min {\n                        min\n                    } else {\n                        new_gas_price\n                    }\n                } else {\n                    current_gas_price\n                };\n                info!(%new_gas_price, \"Calculated new gas price\");\n                Some(new_gas_price)\n            }\n        }\n    } else if executable_block.next_era_gas_price.is_some() {\n        debug!(\n            %era_id,\n            %block_height,\n            next_era_gas_price = executable_block.next_era_gas_price,\n            \"New gas price obtained from block\"\n        );\n        executable_block.next_era_gas_price\n    } else {\n        None\n    };\n\n    let era_id = executable_block.era_id;\n\n    let last_switch_block_hash = if let Some(previous_era) = era_id.predecessor() {\n        let switch_block_header = effect_builder\n            .get_switch_block_header_by_era_id_from_storage(previous_era)\n            .await;\n        switch_block_header.map(|header| header.block_hash())\n    } else {\n        None\n    };\n\n    let task = move || {\n        debug!(\"ContractRuntime: execute_finalized_block\");\n        execute_finalized_block(\n            data_access_layer.as_ref(),\n            execution_engine_v1.as_ref(),\n            execution_engine_v2,\n            chainspec.as_ref(),\n            Some(contract_runtime_metrics),\n            current_pre_state,\n            executable_block,\n            key_block_height_for_activation_point,\n            current_gas_price,\n            maybe_next_era_gas_price,\n            last_switch_block_hash,\n        )\n    };\n    let BlockAndExecutionArtifacts {\n        block,\n        approvals_hashes,\n        execution_artifacts,\n        step_outcome: maybe_step_outcome,\n    } = match run_intensive_task(task).await {\n        Ok(ret) => ret,\n        Err(error) => {\n            error!(%error, \"failed to execute block\");\n            return fatal!(effect_builder, \"{}\", error).await;\n        }\n    };\n\n    let new_execution_pre_state = ExecutionPreState::from_block_header(block.header());\n    {\n        // The `shared_pre_state` could have been set to a block we just fully synced after\n        // doing a sync leap (via a call to `set_initial_state`).  We should not allow a block\n        // which completed execution just after this to set the `shared_pre_state` back to an\n        // earlier block height.\n        let mut shared_pre_state = shared_pre_state.lock().unwrap();\n        if shared_pre_state.next_block_height() < new_execution_pre_state.next_block_height() {\n            debug!(\n                next_block_height = new_execution_pre_state.next_block_height(),\n                \"ContractRuntime: updating shared pre-state\",\n            );\n            *shared_pre_state = new_execution_pre_state.clone();\n        } else {\n            debug!(\n                current_next_block_height = shared_pre_state.next_block_height(),\n                attempted_next_block_height = new_execution_pre_state.next_block_height(),\n                \"ContractRuntime: not updating shared pre-state to older state\"\n            );\n        }\n    }\n\n    let current_era_id = block.era_id();\n\n    if let Some(StepOutcome {\n        step_effects,\n        mut upcoming_era_validators,\n    }) = maybe_step_outcome\n    {\n        effect_builder\n            .announce_commit_step_success(current_era_id, step_effects)\n            .await;\n\n        if current_era_id.is_genesis() {\n            match upcoming_era_validators\n                .get(&current_era_id.successor())\n                .cloned()\n            {\n                Some(era_validators) => {\n                    upcoming_era_validators.insert(EraId::default(), era_validators);\n                }\n                None => {\n                    fatal!(effect_builder, \"Missing era 1 validators\").await;\n                }\n            }\n        }\n\n        effect_builder\n            .announce_upcoming_era_validators(current_era_id, upcoming_era_validators)\n            .await;\n    }\n\n    debug!(\n        block_hash = %block.hash(),\n        height = block.height(),\n        era = block.era_id().value(),\n        is_switch_block = block.is_switch_block(),\n        \"executed block\"\n    );\n\n    let artifacts_map: HashMap<_, _> = execution_artifacts\n        .iter()\n        .cloned()\n        .map(|artifact| (artifact.transaction_hash, artifact.execution_result))\n        .collect();\n\n    if meta_block_state.register_as_stored().was_updated() {\n        debug!(\n            %era_id,\n            %block_height,\n            \"Storing block after execution\"\n        );\n        effect_builder\n            .put_executed_block_to_storage(Arc::clone(&block), approvals_hashes, artifacts_map)\n            .await;\n    } else {\n        debug!(\n            %era_id,\n            %block_height,\n            \"Block was already stored before execution, storing approvals\"\n        );\n        effect_builder\n            .put_approvals_hashes_to_storage(approvals_hashes)\n            .await;\n        effect_builder\n            .put_execution_artifacts_to_storage(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                artifacts_map,\n            )\n            .await;\n    }\n    if meta_block_state\n        .register_as_executed()\n        .was_already_registered()\n    {\n        error!(\n            block_hash = %block.hash(),\n            block_height = block.height(),\n            ?meta_block_state,\n            \"should not execute the same block more than once\"\n        );\n    }\n\n    let meta_block = MetaBlock::new_forward(block, execution_artifacts, meta_block_state);\n    effect_builder.announce_meta_block(meta_block).await;\n\n    // If the child is already finalized, start execution.\n    let next_block = exec_queue.remove(new_execution_pre_state.next_block_height());\n\n    if let Some(next_era_gas_price) = maybe_next_era_gas_price {\n        effect_builder\n            .announce_new_era_gas_price(current_era_id.successor(), next_era_gas_price)\n            .await;\n    }\n\n    // We schedule the next block from the queue to be executed:\n    if let Some(QueueItem {\n        executable_block,\n        meta_block_state,\n    }) = next_block\n    {\n        metrics.exec_queue_size.dec();\n        debug!(\"ContractRuntime: next block enqueue_block_for_execution\");\n        effect_builder\n            .enqueue_block_for_execution(executable_block, meta_block_state)\n            .await;\n    }\n}\n\npub(super) async fn handle_protocol_upgrade<REv>(\n    effect_builder: EffectBuilder<REv>,\n    data_access_layer: Arc<DataAccessLayer<LmdbGlobalState>>,\n    metrics: Arc<Metrics>,\n    upgrade_config: ProtocolUpgradeConfig,\n    next_block_height: u64,\n    parent_hash: BlockHash,\n    parent_seed: Digest,\n) where\n    REv: From<ContractRuntimeRequest>\n        + From<ContractRuntimeAnnouncement>\n        + From<StorageRequest>\n        + From<MetaBlockAnnouncement>\n        + From<FatalAnnouncement>\n        + Send,\n{\n    debug!(?upgrade_config, \"upgrade\");\n    let start = Instant::now();\n    let upgrade_request = ProtocolUpgradeRequest::new(upgrade_config);\n\n    let result = run_intensive_task(move || {\n        let result = data_access_layer.protocol_upgrade(upgrade_request);\n        if result.is_success() {\n            info!(\"committed upgrade\");\n            metrics\n                .commit_upgrade\n                .observe(start.elapsed().as_secs_f64());\n            let flush_req = FlushRequest::new();\n            if let FlushResult::Failure(err) = data_access_layer.flush(flush_req) {\n                return Err(format!(\"{:?}\", err));\n            }\n        }\n\n        Ok(result)\n    })\n    .await;\n\n    match result {\n        Err(error_msg) => {\n            // The only way this happens is if there is a problem in the flushing.\n            error!(%error_msg, \":Error in post upgrade flush\");\n            fatal!(effect_builder, \"{}\", error_msg).await;\n        }\n        Ok(result) => match result {\n            ProtocolUpgradeResult::RootNotFound => {\n                let error_msg = \"Root not found for protocol upgrade\";\n                fatal!(effect_builder, \"{}\", error_msg).await;\n            }\n            ProtocolUpgradeResult::Failure(err) => {\n                fatal!(effect_builder, \"{:?}\", err).await;\n            }\n            ProtocolUpgradeResult::Success {\n                post_state_hash, ..\n            } => {\n                let post_upgrade_state = ExecutionPreState::new(\n                    next_block_height,\n                    post_state_hash,\n                    parent_hash,\n                    parent_seed,\n                );\n\n                effect_builder\n                    .update_contract_runtime_state(post_upgrade_state)\n                    .await\n            }\n        },\n    }\n}\n\nfn generate_range_by_index(\n    highest_era: u64,\n    batch_size: u64,\n    batch_index: u64,\n) -> Option<Range<u64>> {\n    let start = batch_index.checked_mul(batch_size)?;\n    let end = cmp::min(start.checked_add(batch_size)?, highest_era);\n    Some(start..end)\n}\n\n/// Calculates era keys to be pruned.\n///\n/// Outcomes:\n/// * Ok(Some(range)) -- these keys should be pruned\n/// * Ok(None) -- nothing to do, either done, or there is not enough eras to prune\npub(super) fn calculate_prune_eras(\n    activation_era_id: EraId,\n    activation_height: u64,\n    current_height: u64,\n    batch_size: u64,\n) -> Option<Vec<Key>> {\n    if batch_size == 0 {\n        // Nothing to do, the batch size is 0.\n        return None;\n    }\n\n    let nth_chunk: u64 = match current_height.checked_sub(activation_height) {\n        Some(nth_chunk) => nth_chunk,\n        None => {\n            // Time went backwards, programmer error, etc\n            error!(\n                %activation_era_id,\n                activation_height,\n                current_height,\n                batch_size,\n                \"unable to calculate eras to prune (activation height higher than the block height)\"\n            );\n            panic!(\"activation height higher than the block height\");\n        }\n    };\n\n    let range = generate_range_by_index(activation_era_id.value(), batch_size, nth_chunk)?;\n\n    if range.is_empty() {\n        return None;\n    }\n\n    Some(range.map(EraId::new).map(Key::EraInfo).collect())\n}\n\npub(crate) fn spec_exec_from_transfer_result(\n    limit: Gas,\n    transfer_result: TransferResult,\n    block_hash: BlockHash,\n) -> SpeculativeExecutionResult {\n    let transfers = transfer_result.transfers().to_owned();\n    let consumed = limit;\n    let effects = transfer_result.effects().to_owned();\n    let messages = vec![];\n    let error_msg = transfer_result\n        .error()\n        .to_owned()\n        .map(|err| format!(\"{:?}\", err));\n\n    SpeculativeExecutionResult::new(\n        block_hash, transfers, limit, consumed, effects, messages, error_msg,\n    )\n}\n\npub(crate) fn spec_exec_from_wasm_v1_result(\n    wasm_v1_result: WasmV1Result,\n    block_hash: BlockHash,\n) -> SpeculativeExecutionResult {\n    let transfers = wasm_v1_result.transfers().to_owned();\n    let limit = wasm_v1_result.limit().to_owned();\n    let consumed = wasm_v1_result.consumed().to_owned();\n    let effects = wasm_v1_result.effects().to_owned();\n    let messages = wasm_v1_result.messages().to_owned();\n    let error_msg = wasm_v1_result\n        .error()\n        .to_owned()\n        .map(|err| format!(\"{:?}\", err));\n\n    SpeculativeExecutionResult::new(\n        block_hash, transfers, limit, consumed, effects, messages, error_msg,\n    )\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn calculation_is_safe_with_invalid_input() {\n        assert_eq!(calculate_prune_eras(EraId::new(0), 0, 0, 0), None);\n        assert_eq!(calculate_prune_eras(EraId::new(0), 0, 0, 5), None);\n        assert_eq!(calculate_prune_eras(EraId::new(u64::MAX), 0, 0, 0), None);\n        assert_eq!(\n            calculate_prune_eras(EraId::new(u64::MAX), 1, u64::MAX, u64::MAX),\n            None\n        );\n    }\n\n    #[test]\n    fn calculation_is_lazy() {\n        // NOTE: Range of EraInfos is lazy, so it does not consume memory, but getting the last\n        // batch out of u64::MAX of erainfos needs to iterate over all chunks.\n        assert!(calculate_prune_eras(EraId::new(u64::MAX), 0, u64::MAX, 100,).is_none(),);\n        assert_eq!(\n            calculate_prune_eras(EraId::new(u64::MAX), 1, 100, 100)\n                .unwrap()\n                .len(),\n            100\n        );\n    }\n\n    #[test]\n    fn should_calculate_prune_eras() {\n        let activation_height = 50;\n        let current_height = 50;\n        const ACTIVATION_POINT_ERA_ID: EraId = EraId::new(5);\n\n        // batch size 1\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                1,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(0))])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                1,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(1))])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                1,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(2))])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 3,\n                1,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(3))])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 4,\n                1,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(4))])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 5,\n                1\n            ),\n            None,\n        );\n        assert_eq!(\n            calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 1),\n            None,\n        );\n\n        // batch size 2\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                2,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(0)),\n                Key::EraInfo(EraId::new(1)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                2,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(2)),\n                Key::EraInfo(EraId::new(3)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                2,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(4))])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 3,\n                2,\n            ),\n            None\n        );\n        assert_eq!(\n            calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 2),\n            None,\n        );\n\n        // batch size 3\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                3,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(0)),\n                Key::EraInfo(EraId::new(1)),\n                Key::EraInfo(EraId::new(2)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                3,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(3)),\n                Key::EraInfo(EraId::new(4)),\n            ])\n        );\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                3,\n            ),\n            None\n        );\n        assert_eq!(\n            calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 3),\n            None,\n        );\n\n        // batch size 4\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                4,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(0)),\n                Key::EraInfo(EraId::new(1)),\n                Key::EraInfo(EraId::new(2)),\n                Key::EraInfo(EraId::new(3)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                4,\n            ),\n            Some(vec![Key::EraInfo(EraId::new(4))])\n        );\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                4,\n            ),\n            None\n        );\n        assert_eq!(\n            calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 4),\n            None,\n        );\n\n        // batch size 5\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                5,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(0)),\n                Key::EraInfo(EraId::new(1)),\n                Key::EraInfo(EraId::new(2)),\n                Key::EraInfo(EraId::new(3)),\n                Key::EraInfo(EraId::new(4)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                5\n            ),\n            None,\n        );\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                5,\n            ),\n            None\n        );\n        assert_eq!(\n            calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 5),\n            None,\n        );\n\n        // batch size 6\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                6,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(0)),\n                Key::EraInfo(EraId::new(1)),\n                Key::EraInfo(EraId::new(2)),\n                Key::EraInfo(EraId::new(3)),\n                Key::EraInfo(EraId::new(4)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                6\n            ),\n            None,\n        );\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                6,\n            ),\n            None\n        );\n        assert_eq!(\n            calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 6),\n            None,\n        );\n\n        // batch size max\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height,\n                u64::MAX,\n            ),\n            Some(vec![\n                Key::EraInfo(EraId::new(0)),\n                Key::EraInfo(EraId::new(1)),\n                Key::EraInfo(EraId::new(2)),\n                Key::EraInfo(EraId::new(3)),\n                Key::EraInfo(EraId::new(4)),\n            ])\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 1,\n                u64::MAX,\n            ),\n            None,\n        );\n\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                current_height + 2,\n                u64::MAX,\n            ),\n            None\n        );\n        assert_eq!(\n            calculate_prune_eras(\n                ACTIVATION_POINT_ERA_ID,\n                activation_height,\n                u64::MAX,\n                u64::MAX,\n            ),\n            None,\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/contract_runtime.rs",
    "content": "//! Contract Runtime component.\n\nmod config;\nmod error;\nmod event;\nmod exec_queue;\nmod metrics;\nmod operations;\nmod rewards;\n#[cfg(test)]\nmod tests;\nmod types;\nmod utils;\n\nuse std::{\n    cmp::Ordering,\n    collections::BTreeMap,\n    convert::TryInto,\n    fmt::{self, Debug, Formatter},\n    path::Path,\n    sync::{Arc, Mutex},\n    time::Instant,\n};\n\nuse casper_executor_wasm::{ExecutorConfigBuilder, ExecutorKind, ExecutorV2};\nuse datasize::DataSize;\nuse lmdb::DatabaseFlags;\nuse prometheus::Registry;\nuse tracing::{debug, error, info, trace};\n\nuse casper_execution_engine::engine_state::{EngineConfigBuilder, ExecutionEngineV1};\nuse casper_storage::{\n    data_access_layer::{\n        AddressableEntityRequest, AddressableEntityResult, BlockStore, DataAccessLayer,\n        EntryPointExistsRequest, ExecutionResultsChecksumRequest, FlushRequest, FlushResult,\n        GenesisRequest, GenesisResult, TrieRequest,\n    },\n    global_state::{\n        state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider},\n        transaction_source::lmdb::LmdbEnvironment,\n        trie_store::lmdb::LmdbTrieStore,\n    },\n    system::genesis::GenesisError,\n    tracking_copy::TrackingCopyError,\n};\nuse casper_types::{\n    account::AccountHash, ActivationPoint, Chainspec, ChainspecRawBytes, ChainspecRegistry,\n    EntityAddr, EraId, Key, PublicKey,\n};\n\nuse crate::{\n    components::{fetcher::FetchResponse, Component, ComponentState},\n    contract_runtime::{types::EraPrice, utils::handle_protocol_upgrade},\n    effect::{\n        announcements::{\n            ContractRuntimeAnnouncement, FatalAnnouncement, MetaBlockAnnouncement,\n            UnexecutedBlockAnnouncement,\n        },\n        incoming::{TrieDemand, TrieRequest as TrieRequestMessage, TrieRequestIncoming},\n        requests::{ContractRuntimeRequest, NetworkRequest, StorageRequest},\n        EffectBuilder, EffectExt, Effects,\n    },\n    fatal,\n    protocol::Message,\n    types::{\n        BlockPayload, ExecutableBlock, FinalizedBlock, InternalEraReport, MetaBlockState,\n        TrieOrChunk, TrieOrChunkId,\n    },\n    NodeRng,\n};\npub(crate) use config::Config;\npub(crate) use error::{BlockExecutionError, ConfigError, ContractRuntimeError, StateResultError};\npub(crate) use event::Event;\nuse exec_queue::{ExecQueue, QueueItem};\nuse metrics::Metrics;\n#[cfg(test)]\npub(crate) use operations::compute_execution_results_checksum;\npub use operations::execute_finalized_block;\nuse operations::speculatively_execute;\npub(crate) use types::{\n    BlockAndExecutionArtifacts, ExecutionArtifact, ExecutionPreState, SpeculativeExecutionResult,\n    StepOutcome,\n};\nuse utils::{exec_or_requeue, run_intensive_task};\n\nconst COMPONENT_NAME: &str = \"contract_runtime\";\n\npub(crate) const APPROVALS_CHECKSUM_NAME: &str = \"approvals_checksum\";\npub(crate) const EXECUTION_RESULTS_CHECKSUM_NAME: &str = \"execution_results_checksum\";\n\n/// The contract runtime components.\n#[derive(DataSize)]\npub(crate) struct ContractRuntime {\n    state: ComponentState,\n    execution_pre_state: Arc<Mutex<ExecutionPreState>>,\n    #[data_size(skip)]\n    execution_engine_v1: Arc<ExecutionEngineV1>,\n    #[data_size(skip)]\n    execution_engine_v2: ExecutorV2,\n    metrics: Arc<Metrics>,\n    /// Finalized blocks waiting for their pre-state hash to start executing.\n    exec_queue: ExecQueue,\n    /// The chainspec.\n    chainspec: Arc<Chainspec>,\n    #[data_size(skip)]\n    data_access_layer: Arc<DataAccessLayer<LmdbGlobalState>>,\n    current_gas_price: EraPrice,\n}\n\nimpl Debug for ContractRuntime {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"ContractRuntime\").finish()\n    }\n}\n\nimpl ContractRuntime {\n    pub(crate) fn new(\n        storage_dir: &Path,\n        contract_runtime_config: &Config,\n        chainspec: Arc<Chainspec>,\n        registry: &Registry,\n    ) -> Result<Self, ConfigError> {\n        let execution_pre_state = Arc::new(Mutex::new(ExecutionPreState::default()));\n\n        let current_gas_price = match chainspec.protocol_config.activation_point {\n            ActivationPoint::EraId(era_id) => {\n                EraPrice::new(era_id, chainspec.vacancy_config.min_gas_price)\n            }\n            ActivationPoint::Genesis(_) => {\n                EraPrice::new(EraId::new(0), chainspec.vacancy_config.min_gas_price)\n            }\n        };\n        let enable_addressable_entity = chainspec.core_config.enable_addressable_entity;\n        let engine_config = EngineConfigBuilder::new()\n            .with_max_query_depth(contract_runtime_config.max_query_depth_or_default())\n            .with_max_associated_keys(chainspec.core_config.max_associated_keys)\n            .with_max_runtime_call_stack_height(chainspec.core_config.max_runtime_call_stack_height)\n            .with_minimum_delegation_amount(chainspec.core_config.minimum_delegation_amount)\n            .with_maximum_delegation_amount(chainspec.core_config.maximum_delegation_amount)\n            .with_strict_argument_checking(chainspec.core_config.strict_argument_checking)\n            .with_vesting_schedule_period_millis(\n                chainspec.core_config.vesting_schedule_period.millis(),\n            )\n            .with_max_delegators_per_validator(chainspec.core_config.max_delegators_per_validator)\n            .with_wasm_config(chainspec.wasm_config)\n            .with_system_config(chainspec.system_costs_config)\n            .with_administrative_accounts(chainspec.core_config.administrators.clone())\n            .with_allow_auction_bids(chainspec.core_config.allow_auction_bids)\n            .with_allow_unrestricted_transfers(chainspec.core_config.allow_unrestricted_transfers)\n            .with_refund_handling(chainspec.core_config.refund_handling)\n            .with_fee_handling(chainspec.core_config.fee_handling)\n            .with_enable_entity(enable_addressable_entity)\n            .with_trap_on_ambiguous_entity_version(\n                chainspec.core_config.trap_on_ambiguous_entity_version,\n            )\n            .with_protocol_version(chainspec.protocol_version())\n            .with_storage_costs(chainspec.storage_costs)\n            .with_minimum_bid_amount(chainspec.core_config.minimum_bid_amount)\n            .build();\n\n        let data_access_layer = Arc::new(\n            Self::new_data_access_layer(\n                storage_dir,\n                contract_runtime_config,\n                enable_addressable_entity,\n            )\n            .map_err(ConfigError::GlobalState)?,\n        );\n\n        let execution_engine_v1 = Arc::new(ExecutionEngineV1::new(engine_config));\n\n        let executor_v2 = {\n            let executor_config = ExecutorConfigBuilder::default()\n                .with_memory_limit(chainspec.wasm_config.v2().max_memory())\n                .with_executor_kind(ExecutorKind::Compiled)\n                .with_wasm_config(*chainspec.wasm_config.v2())\n                .with_storage_costs(chainspec.storage_costs)\n                .with_message_limits(chainspec.wasm_config.messages_limits())\n                .build()\n                .expect(\"Should build\");\n            ExecutorV2::new(executor_config, Arc::clone(&execution_engine_v1))\n        };\n\n        let metrics = Arc::new(Metrics::new(registry)?);\n\n        Ok(ContractRuntime {\n            state: ComponentState::Initialized,\n            execution_pre_state,\n            execution_engine_v1,\n            execution_engine_v2: executor_v2,\n            metrics,\n            exec_queue: Default::default(),\n            chainspec,\n            data_access_layer,\n            current_gas_price,\n        })\n    }\n\n    pub(crate) fn set_initial_state(&mut self, sequential_block_state: ExecutionPreState) {\n        let next_block_height = sequential_block_state.next_block_height();\n        let mut execution_pre_state = self.execution_pre_state.lock().unwrap();\n        *execution_pre_state = sequential_block_state;\n\n        let new_len = self\n            .exec_queue\n            .remove_older_then(execution_pre_state.next_block_height());\n        self.metrics.exec_queue_size.set(new_len);\n        debug!(next_block_height, \"ContractRuntime: set initial state\");\n    }\n\n    fn new_data_access_layer(\n        storage_dir: &Path,\n        contract_runtime_config: &Config,\n        enable_addressable_entity: bool,\n    ) -> Result<DataAccessLayer<LmdbGlobalState>, casper_storage::global_state::error::Error> {\n        let data_access_layer = {\n            let environment = Arc::new(LmdbEnvironment::new(\n                storage_dir,\n                contract_runtime_config.max_global_state_size_or_default(),\n                contract_runtime_config.max_readers_or_default(),\n                contract_runtime_config.manual_sync_enabled_or_default(),\n            )?);\n\n            let trie_store = Arc::new(LmdbTrieStore::new(\n                &environment,\n                None,\n                DatabaseFlags::empty(),\n            )?);\n\n            let block_store = BlockStore::new();\n\n            let max_query_depth = contract_runtime_config.max_query_depth_or_default();\n            let global_state = LmdbGlobalState::empty(\n                environment,\n                trie_store,\n                max_query_depth,\n                enable_addressable_entity,\n            )?;\n\n            DataAccessLayer {\n                state: global_state,\n                block_store,\n                max_query_depth,\n                enable_addressable_entity,\n            }\n        };\n        Ok(data_access_layer)\n    }\n\n    /// How many blocks are backed up in the queue\n    pub(crate) fn queue_depth(&self) -> usize {\n        self.exec_queue.len()\n    }\n\n    /// Commits a genesis request.\n    pub(crate) fn commit_genesis(\n        &self,\n        chainspec: &Chainspec,\n        chainspec_raw_bytes: &ChainspecRawBytes,\n    ) -> GenesisResult {\n        debug!(\"commit_genesis\");\n        let start = Instant::now();\n        let protocol_version = chainspec.protocol_config.version;\n        let chainspec_hash = chainspec.hash();\n        let genesis_config = chainspec.into();\n        let account_bytes = match chainspec_raw_bytes.maybe_genesis_accounts_bytes() {\n            Some(bytes) => bytes,\n            None => {\n                error!(\"failed to provide genesis account bytes in commit genesis\");\n                return GenesisResult::Failure(GenesisError::MissingGenesisAccounts);\n            }\n        };\n\n        let chainspec_registry = ChainspecRegistry::new_with_genesis(\n            chainspec_raw_bytes.chainspec_bytes(),\n            account_bytes,\n        );\n\n        let genesis_request = GenesisRequest::new(\n            chainspec_hash,\n            protocol_version,\n            genesis_config,\n            chainspec_registry,\n        );\n\n        let data_access_layer = Arc::clone(&self.data_access_layer);\n        let result = data_access_layer.genesis(genesis_request);\n        self.metrics\n            .commit_genesis\n            .observe(start.elapsed().as_secs_f64());\n        debug!(?result, \"upgrade result\");\n        if result.is_success() {\n            let flush_req = FlushRequest::new();\n            if let FlushResult::Failure(err) = data_access_layer.flush(flush_req) {\n                return GenesisResult::Failure(GenesisError::TrackingCopy(\n                    TrackingCopyError::Storage(err),\n                ));\n            }\n        }\n        result\n    }\n\n    /// Handles a contract runtime request.\n    fn handle_contract_runtime_request<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        request: ContractRuntimeRequest,\n    ) -> Effects<Event>\n    where\n        REv: From<ContractRuntimeRequest>\n            + From<ContractRuntimeAnnouncement>\n            + From<StorageRequest>\n            + From<MetaBlockAnnouncement>\n            + From<UnexecutedBlockAnnouncement>\n            + From<FatalAnnouncement>\n            + Send,\n    {\n        match request {\n            ContractRuntimeRequest::Query {\n                request: query_request,\n                responder,\n            } => {\n                trace!(?query_request, \"query\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.query(query_request);\n                    metrics.run_query.observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"query result\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::QueryByPrefix {\n                request: query_request,\n                responder,\n            } => {\n                trace!(?query_request, \"query by prefix\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n\n                    let result = data_access_layer.prefixed_values(query_request);\n                    metrics.run_query.observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"query by prefix result\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetBalance {\n                request: balance_request,\n                responder,\n            } => {\n                trace!(?balance_request, \"balance\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.balance(balance_request);\n                    metrics.get_balance.observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"balance result\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetEraValidators {\n                request: era_validators_request,\n                responder,\n            } => {\n                trace!(?era_validators_request, \"get era validators request\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.era_validators(era_validators_request);\n                    metrics\n                        .get_era_validators\n                        .observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"era validators result\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetSeigniorageRecipients { request, responder } => {\n                trace!(?request, \"get seigniorage recipients request\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.seigniorage_recipients(request);\n                    metrics\n                        .get_seigniorage_recipients\n                        .observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"seigniorage recipients result\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetExecutionResultsChecksum {\n                state_root_hash,\n                responder,\n            } => {\n                trace!(?state_root_hash, \"get execution results checksum request\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let request = ExecutionResultsChecksumRequest::new(state_root_hash);\n                    let result = data_access_layer.execution_result_checksum(request);\n                    metrics\n                        .execution_results_checksum\n                        .observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"execution result checksum\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetAddressableEntity {\n                state_root_hash,\n                entity_addr,\n                responder,\n            } => {\n                trace!(?state_root_hash, \"get addressable entity\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let entity_key = match entity_addr {\n                        EntityAddr::SmartContract(_) | EntityAddr::System(_) => Key::AddressableEntity(entity_addr),\n                        EntityAddr::Account(account) => Key::Account(AccountHash::new(account)),\n                    };\n                    let request = AddressableEntityRequest::new(state_root_hash, entity_key);\n                    let result = data_access_layer.addressable_entity(request);\n                    let result = match &result {\n                        AddressableEntityResult::ValueNotFound(msg) => {\n                            if entity_addr.is_contract() {\n                                trace!(%msg, \"can not read addressable entity by Key::AddressableEntity or Key::Account, will try by Key::Hash\");\n                                let entity_key = Key::Hash(entity_addr.value());\n                                let request = AddressableEntityRequest::new(state_root_hash, entity_key);\n                                data_access_layer.addressable_entity(request)\n                            }\n                            else {\n                                result\n                            }\n                        },\n                        AddressableEntityResult::RootNotFound |\n                        AddressableEntityResult::Success { .. } |\n                        AddressableEntityResult::Failure(_) => result,\n                    };\n\n                    metrics\n                        .addressable_entity\n                        .observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"get addressable entity\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetEntryPointExists {\n                state_root_hash,\n                contract_hash,\n                entry_point_name,\n                responder,\n            } => {\n                trace!(?state_root_hash, \"get entry point\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let request = EntryPointExistsRequest::new(\n                        state_root_hash,\n                        entry_point_name,\n                        contract_hash,\n                    );\n                    let result = data_access_layer.entry_point_exists(request);\n                    metrics.entry_points.observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"get addressable entity\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetTaggedValues {\n                request: tagged_values_request,\n                responder,\n            } => {\n                trace!(?tagged_values_request, \"tagged values request\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.tagged_values(tagged_values_request);\n                    metrics\n                        .get_all_values\n                        .observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"get all values result\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            // trie related events\n            ContractRuntimeRequest::GetTrie {\n                request: trie_request,\n                responder,\n            } => {\n                trace!(?trie_request, \"trie request\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.trie(trie_request);\n                    metrics.get_trie.observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"trie response\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::PutTrie {\n                request: put_trie_request,\n                responder,\n            } => {\n                trace!(?put_trie_request, \"put trie request\");\n                let metrics = Arc::clone(&self.metrics);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                async move {\n                    let start = Instant::now();\n                    let result = data_access_layer.put_trie(put_trie_request);\n                    let flush_req = FlushRequest::new();\n                    // PERF: consider flushing periodically.\n                    if let FlushResult::Failure(gse) = data_access_layer.flush(flush_req) {\n                        fatal!(effect_builder, \"error flushing data environment {:?}\", gse).await;\n                    }\n                    metrics.put_trie.observe(start.elapsed().as_secs_f64());\n                    trace!(?result, \"put trie response\");\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::UpdatePreState { new_pre_state } => {\n                let next_block_height = new_pre_state.next_block_height();\n                self.set_initial_state(new_pre_state);\n                let current_price = self.current_gas_price.gas_price();\n                async move {\n                    let block_header = match effect_builder\n                        .get_highest_complete_block_header_from_storage()\n                        .await\n                    {\n                        Some(header)\n                            if header.is_switch_block()\n                                && (header.height() + 1 == next_block_height) =>\n                        {\n                            header\n                        }\n                        Some(_) => {\n                            return fatal!(\n                                effect_builder,\n                                \"Latest complete block is not a switch block to update state\"\n                            )\n                            .await;\n                        }\n                        None => {\n                            return fatal!(\n                                effect_builder,\n                                \"No complete block header found to update post upgrade state\"\n                            )\n                            .await;\n                        }\n                    };\n\n                    let payload = BlockPayload::new(\n                        BTreeMap::new(),\n                        vec![],\n                        Default::default(),\n                        false,\n                        current_price,\n                    );\n\n                    let finalized_block = FinalizedBlock::new(\n                        payload,\n                        Some(InternalEraReport::default()),\n                        block_header.timestamp(),\n                        block_header.next_block_era_id(),\n                        next_block_height,\n                        PublicKey::System,\n                    );\n\n                    info!(\"Enqueuing block for execution post state refresh\");\n\n                    effect_builder\n                        .enqueue_block_for_execution(\n                            ExecutableBlock::from_finalized_block_and_transactions(\n                                finalized_block,\n                                vec![],\n                            ),\n                            MetaBlockState::new_not_to_be_gossiped(),\n                        )\n                        .await;\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::DoProtocolUpgrade {\n                protocol_upgrade_config,\n                next_block_height,\n                parent_hash,\n                parent_seed,\n            } => {\n                let mut effects = Effects::new();\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                let metrics = Arc::clone(&self.metrics);\n                effects.extend(\n                    handle_protocol_upgrade(\n                        effect_builder,\n                        data_access_layer,\n                        metrics,\n                        protocol_upgrade_config,\n                        next_block_height,\n                        parent_hash,\n                        parent_seed,\n                    )\n                    .ignore(),\n                );\n                effects\n            }\n            ContractRuntimeRequest::EnqueueBlockForExecution {\n                executable_block,\n                key_block_height_for_activation_point,\n                meta_block_state,\n            } => {\n                let mut effects = Effects::new();\n                let mut exec_queue = self.exec_queue.clone();\n                let finalized_block_height = executable_block.height;\n                let era_id = executable_block.era_id;\n                let current_pre_state = self.execution_pre_state.lock().unwrap();\n                let next_block_height = current_pre_state.next_block_height();\n                match finalized_block_height.cmp(&next_block_height) {\n                    // An old block: it won't be executed:\n                    Ordering::Less => {\n                        debug!(\n                            %era_id,\n                            \"ContractRuntime: finalized block({}) precedes expected next block({})\",\n                            finalized_block_height,\n                            next_block_height,\n                        );\n                        effects.extend(\n                            effect_builder\n                                .announce_unexecuted_block(finalized_block_height)\n                                .ignore(),\n                        );\n                    }\n                    // This is a future block, we store it into exec_queue, to be executed later:\n                    Ordering::Greater => {\n                        debug!(\n                            %era_id,\n                            \"ContractRuntime: enqueuing({}) waiting for({})\",\n                            finalized_block_height, next_block_height\n                        );\n                        info!(\n                            \"ContractRuntime: enqueuing finalized block({}) with {} transactions \\\n                            for execution\",\n                            finalized_block_height,\n                            executable_block.transactions.len()\n                        );\n                        exec_queue.insert(\n                            finalized_block_height,\n                            QueueItem {\n                                executable_block,\n                                meta_block_state,\n                            },\n                        );\n                    }\n                    // This is the next block to be executed, we do it right away:\n                    Ordering::Equal => {\n                        info!(\n                            \"ContractRuntime: execute finalized block({}) with {} transactions\",\n                            finalized_block_height,\n                            executable_block.transactions.len()\n                        );\n                        let data_access_layer = Arc::clone(&self.data_access_layer);\n                        let execution_engine_v1 = Arc::clone(&self.execution_engine_v1);\n                        let execution_engine_v2 = self.execution_engine_v2.clone();\n                        let chainspec = Arc::clone(&self.chainspec);\n                        let metrics = Arc::clone(&self.metrics);\n                        let shared_pre_state = Arc::clone(&self.execution_pre_state);\n                        effects.extend(\n                            exec_or_requeue(\n                                data_access_layer,\n                                execution_engine_v1,\n                                execution_engine_v2,\n                                chainspec,\n                                metrics,\n                                exec_queue,\n                                shared_pre_state,\n                                current_pre_state.clone(),\n                                effect_builder,\n                                executable_block,\n                                key_block_height_for_activation_point,\n                                meta_block_state,\n                            )\n                            .ignore(),\n                        )\n                    }\n                }\n                self.metrics\n                    .exec_queue_size\n                    .set(self.exec_queue.len().try_into().unwrap_or(i64::MIN));\n                effects\n            }\n            ContractRuntimeRequest::SpeculativelyExecute {\n                block_header,\n                transaction,\n                responder,\n            } => {\n                let chainspec = Arc::clone(&self.chainspec);\n                let data_access_layer = Arc::clone(&self.data_access_layer);\n                let execution_engine_v1 = Arc::clone(&self.execution_engine_v1);\n                async move {\n                    let result = run_intensive_task(move || {\n                        speculatively_execute(\n                            data_access_layer.as_ref(),\n                            chainspec.as_ref(),\n                            execution_engine_v1.as_ref(),\n                            *block_header,\n                            *transaction,\n                        )\n                    })\n                    .await;\n                    responder.respond(result).await\n                }\n                .ignore()\n            }\n            ContractRuntimeRequest::GetEraGasPrice { era_id, responder } => responder\n                .respond(self.current_gas_price.maybe_gas_price_for_era_id(era_id))\n                .ignore(),\n            ContractRuntimeRequest::UpdateRuntimePrice(era_id, new_gas_price) => {\n                self.current_gas_price = EraPrice::new(era_id, new_gas_price);\n                Effects::new()\n            }\n        }\n    }\n\n    /// Handles an incoming request to get a trie.\n    fn handle_trie_request<REv>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        TrieRequestIncoming { sender, message }: TrieRequestIncoming,\n    ) -> Effects<Event>\n    where\n        REv: From<NetworkRequest<Message>> + Send,\n    {\n        let TrieRequestMessage(ref serialized_id) = *message;\n        let fetch_response = match self.fetch_trie_local(serialized_id) {\n            Ok(fetch_response) => fetch_response,\n            Err(error) => {\n                debug!(\"failed to get trie: {}\", error);\n                return Effects::new();\n            }\n        };\n\n        match Message::new_get_response(&fetch_response) {\n            Ok(message) => effect_builder.send_message(sender, message).ignore(),\n            Err(error) => {\n                error!(\"failed to create get-response: {}\", error);\n                Effects::new()\n            }\n        }\n    }\n\n    /// Handles an incoming demand for a trie.\n    fn handle_trie_demand(\n        &self,\n        TrieDemand {\n            request_msg,\n            auto_closing_responder,\n            ..\n        }: TrieDemand,\n    ) -> Effects<Event> {\n        let TrieRequestMessage(ref serialized_id) = *request_msg;\n        let fetch_response = match self.fetch_trie_local(serialized_id) {\n            Ok(fetch_response) => fetch_response,\n            Err(error) => {\n                // Something is wrong in our trie store, but be courteous and still send a reply.\n                debug!(\"failed to get trie: {}\", error);\n                return auto_closing_responder.respond_none().ignore();\n            }\n        };\n\n        match Message::new_get_response(&fetch_response) {\n            Ok(message) => auto_closing_responder.respond(message).ignore(),\n            Err(error) => {\n                // This should never happen, but if it does, we let the peer know we cannot help.\n                error!(\"failed to create get-response: {}\", error);\n                auto_closing_responder.respond_none().ignore()\n            }\n        }\n    }\n\n    /// Reads the trie (or chunk of a trie) under the given key and index.\n    fn fetch_trie_local(\n        &self,\n        serialized_id: &[u8],\n    ) -> Result<FetchResponse<TrieOrChunk, TrieOrChunkId>, ContractRuntimeError> {\n        trace!(?serialized_id, \"get_trie\");\n        let trie_or_chunk_id: TrieOrChunkId = bincode::deserialize(serialized_id)?;\n        let data_access_layer = Arc::clone(&self.data_access_layer);\n        let maybe_trie = {\n            let start = Instant::now();\n            let TrieOrChunkId(chunk_index, trie_key) = trie_or_chunk_id;\n            let req = TrieRequest::new(trie_key, Some(chunk_index));\n            let maybe_raw = data_access_layer\n                .trie(req)\n                .into_raw()\n                .map_err(ContractRuntimeError::FailedToRetrieveTrieById)?;\n            let ret = match maybe_raw {\n                Some(raw) => Some(TrieOrChunk::new(raw.into(), chunk_index)?),\n                None => None,\n            };\n            self.metrics.get_trie.observe(start.elapsed().as_secs_f64());\n            ret\n        };\n        Ok(FetchResponse::from_opt(trie_or_chunk_id, maybe_trie))\n    }\n\n    /// Returns data_access_layer, for testing only.\n    #[cfg(test)]\n    pub(crate) fn data_access_layer(&self) -> Arc<DataAccessLayer<LmdbGlobalState>> {\n        Arc::clone(&self.data_access_layer)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn current_era_price(&self) -> EraPrice {\n        self.current_gas_price\n    }\n}\n\nimpl<REv> Component<REv> for ContractRuntime\nwhere\n    REv: From<ContractRuntimeRequest>\n        + From<ContractRuntimeAnnouncement>\n        + From<NetworkRequest<Message>>\n        + From<StorageRequest>\n        + From<MetaBlockAnnouncement>\n        + From<UnexecutedBlockAnnouncement>\n        + From<FatalAnnouncement>\n        + Send,\n{\n    type Event = Event;\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::ContractRuntimeRequest(request) => {\n                self.handle_contract_runtime_request(effect_builder, rng, request)\n            }\n            Event::TrieRequestIncoming(request) => {\n                self.handle_trie_request(effect_builder, request)\n            }\n            Event::TrieDemand(demand) => self.handle_trie_demand(demand),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/diagnostics_port/command.rs",
    "content": "use std::{\n    fmt::{self, Display, Formatter},\n    str::FromStr,\n};\n\nuse serde::Serialize;\nuse structopt::StructOpt;\nuse thiserror::Error;\n\nuse super::StopAtSpec;\n\n/// Command processing error.\n///\n/// Failures that occur when trying to parse an incoming client message.\n#[derive(Debug, Error)]\npub(super) enum Error {\n    /// Error processing a line using the shell-like lexer.\n    #[error(\"failed to split line using shell lexing rules\")]\n    ShlexFailure,\n    /// Not a valid command input.\n    #[error(transparent)]\n    Invalid(#[from] structopt::clap::Error),\n}\n\n/// Output format information is sent back to the client it.\n#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Default)]\npub(super) enum OutputFormat {\n    /// Human-readable interactive format.\n    ///\n    /// No string form, utilizes the `Display` implementation of types passed in.\n    #[default]\n    Interactive,\n    /// JSON, pretty-printed.\n    Json,\n    /// Binary using bincode.\n    Bincode,\n}\n\nimpl Display for OutputFormat {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            OutputFormat::Interactive => f.write_str(\"interactive\"),\n            OutputFormat::Json => f.write_str(\"json\"),\n            OutputFormat::Bincode => f.write_str(\"bincode\"),\n        }\n    }\n}\n\nimpl FromStr for OutputFormat {\n    type Err = &'static str;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        match s.to_lowercase().as_str() {\n            \"interactive\" | \"i\" => Ok(OutputFormat::Interactive),\n            \"json\" | \"j\" => Ok(OutputFormat::Json),\n            \"bincode\" | \"b\" => Ok(OutputFormat::Bincode),\n            _ => Err(\"invalid output format, must be one of 'interactive', 'json', 'bincode'\"),\n        }\n    }\n}\n\n/// Action to perform.\n#[derive(Debug, StructOpt)]\npub(super) enum Action {\n    /// Retrieve the active diagnostics port session information.\n    Session,\n    /// Set options on active diagnostics port session.\n    Set {\n        /// Whether or not to omit command confirmation after every command sent. Defaults to off,\n        /// meaning commands WILL send confirmations.\n        #[structopt(short, long)]\n        quiet: Option<bool>,\n        /// Output format for any type of response, one of `interactive`, `json` or `bincode`.\n        /// Defaults to `interactive`.\n        #[structopt(short, long)]\n        output: Option<OutputFormat>,\n    },\n    /// Show the current log filter configuration.\n    GetLogFilter,\n    /// Change the current log filter configuration.\n    SetLogFilter { directive: String },\n    /// Dump the state of the consensus component.\n    ///\n    /// It is recommended to set the output format to `bincode` if the data is to be visualized\n    /// after.\n    DumpConsensus {\n        /// Era to dump. If omitted, dumps the latest era.\n        era: Option<u64>,\n    },\n    /// Dump the event queues.\n    DumpQueues,\n    /// Get detailed networking insights.\n    NetInfo,\n    /// Stop the node at a certain condition.\n    Stop {\n        /// When to stop the node.\n        ///\n        /// Supports `block:12345` for block height, `era:123` for eras, `block:next` / `era:end`\n        /// to stop on an upcoming block or switch block, or `now` to stop immediately. Defaults to\n        /// `block:next`.\"\n        ///\n        /// Returns the previously set stopping point, if any.\n        #[structopt(short, long, default_value)]\n        at: StopAtSpec,\n        /// Ignore all further options to stop and clear any currently scheduled stops.\n        #[structopt(short, long)]\n        clear: bool,\n    },\n    /// Activate or clear a failpoint.\n    ///\n    /// Failpoint syntax is as follows: `key(,meta:meta_value)*(=value)?`, with `key` being the\n    /// identifier of the failpoint, `meta` being additional settings, and `value` JSON encoded.\n    ///\n    /// If `value` is not set, the failpoint is cleared instead of being set.\n    ///\n    /// The following `meta` values are understood:\n    ///\n    /// * `sub` sets the subkey (example: `sub:e4c2a1f`)\n    /// * `p` sets the probability, must be between `0.0` and `1.0` (example: `p:0.1`)\n    /// * `once` has no value and indicates the failpoint should only be fired once.\n    ///\n    /// No colons or commas are allowed in `key`, `meta` or `meta_value`.\n    ///\n    /// Examples:\n    ///\n    /// * `foobar` clears the failpoint with key \"foobar\".\n    /// * `foobar,sub:example value,p:0.123,once={\"hello\": \"world\"}` sets the failpoint \"foobar\",\n    ///   with a subkey of \"example value\", a probability of 12.3%, to be fired only once, and a\n    ///   JSON encoded value of `{\"hello\": \"world\"}`.\n    SetFailpoint {\n        /// The failpoint activation/deactivation.\n        activation: String,\n    },\n    /// Close connection server-side.\n    Quit,\n}\n\n/// A command to be performed on the node's diagnostic port.\n#[derive(Debug, StructOpt)]\npub(super) struct Command {\n    #[structopt(subcommand)]\n    pub(super) action: Action,\n}\n\nimpl Command {\n    /// Parses a line of input into a `Command`.\n    pub(super) fn from_line(line: &str) -> Result<Self, Error> {\n        let mut parts = vec![\"casper-diagnostics-port\".to_owned()];\n        parts.extend(shlex::split(line).ok_or(Error::ShlexFailure)?);\n        Ok(Self::from_iter_safe(parts.into_iter())?)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::components::diagnostics_port::command::{Action, Command};\n\n    #[test]\n    fn can_parse_simple_commands() {\n        let cmd = Command::from_line(\"dump-consensus 123\").expect(\"command parsing failed\");\n        assert!(matches!(cmd.action, Action::DumpConsensus { era } if era == Some(123)));\n\n        let cmd = Command::from_line(\"dump-queues\").expect(\"command parsing failed\");\n        assert!(matches!(cmd.action, Action::DumpQueues));\n    }\n}\n"
  },
  {
    "path": "node/src/components/diagnostics_port/stop_at.rs",
    "content": "use std::{\n    fmt::{self, Display, Formatter},\n    str::FromStr,\n};\n\nuse casper_types::EraId;\nuse datasize::DataSize;\nuse serde::Serialize;\n\n/// A specification for a stopping point.\n#[derive(Copy, Clone, DataSize, Debug, Eq, PartialEq, Serialize, Default)]\n#[cfg_attr(test, derive(proptest_derive::Arbitrary))]\npub(crate) enum StopAtSpec {\n    /// Stop after completion of the current block.\n    #[default]\n    NextBlock,\n    /// Stop after the completion of the next switch block.\n    EndOfCurrentEra,\n    /// Stop immediately.\n    Immediately,\n    /// Stop at a given block height.\n    BlockHeight(u64),\n    /// Stop at a given era id.\n    EraId(EraId),\n}\n\nimpl Display for StopAtSpec {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            StopAtSpec::NextBlock => f.write_str(\"block:next\"),\n            StopAtSpec::EndOfCurrentEra => f.write_str(\"era:end\"),\n            StopAtSpec::Immediately => f.write_str(\"now\"),\n            StopAtSpec::BlockHeight(height) => write!(f, \"block:{}\", height),\n            StopAtSpec::EraId(era_id) => write!(f, \"era:{}\", era_id.value()),\n        }\n    }\n}\n\nimpl FromStr for StopAtSpec {\n    type Err = String;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        match s {\n            \"block:next\" => Ok(StopAtSpec::NextBlock),\n            \"era:end\" => Ok(StopAtSpec::EndOfCurrentEra),\n            \"now\" => Ok(StopAtSpec::Immediately),\n            val if val.starts_with(\"block:\") => u64::from_str(&val[6..])\n                .map_err(|err| format!(\"could not parse block height: {}\", err))\n                .map(StopAtSpec::BlockHeight),\n            val if val.starts_with(\"era:\") => u64::from_str(&val[4..])\n                .map_err(|err| format!(\"could not parse era id: {}\", err))\n                .map(EraId::new)\n                .map(StopAtSpec::EraId),\n            _ => Err(\"invalid stop-at specification\".to_string()),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::str::FromStr;\n\n    use super::StopAtSpec;\n    use casper_types::EraId;\n    use proptest::proptest;\n\n    proptest! {\n        #[test]\n        fn roundtrip_stop_at_spec(stop_at: StopAtSpec) {\n            let rendered = stop_at.to_string();\n            let parsed = StopAtSpec::from_str(rendered.as_str()).expect(\"failed to roundtrip\");\n            assert_eq!(stop_at, parsed);\n        }\n\n        #[test]\n        fn string_fuzz_stop_at_spec(input in \".*\") {\n            let _outcome = StopAtSpec::from_str(&input);\n        }\n\n        #[test]\n        fn prefixed_examples(input in \"(era|block):.*\") {\n            let _outcome = StopAtSpec::from_str(&input);\n        }\n    }\n\n    #[test]\n    fn known_good_examples() {\n        assert_eq!(\n            Ok(StopAtSpec::NextBlock),\n            StopAtSpec::from_str(\"block:next\")\n        );\n        assert_eq!(\n            Ok(StopAtSpec::EndOfCurrentEra),\n            StopAtSpec::from_str(\"era:end\")\n        );\n        assert_eq!(Ok(StopAtSpec::Immediately), StopAtSpec::from_str(\"now\"));\n        assert_eq!(\n            Ok(StopAtSpec::BlockHeight(123)),\n            StopAtSpec::from_str(\"block:123\")\n        );\n        assert_eq!(\n            Ok(StopAtSpec::EraId(EraId::new(123))),\n            StopAtSpec::from_str(\"era:123\")\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/diagnostics_port/tasks.rs",
    "content": "use std::{\n    borrow::Cow,\n    fmt::{self, Debug, Display, Formatter},\n    fs::{self, File},\n    io,\n    path::PathBuf,\n    str::FromStr,\n};\n\nuse bincode::{\n    config::{AllowTrailing, FixintEncoding, WithOtherIntEncoding, WithOtherTrailing},\n    DefaultOptions, Options,\n};\nuse erased_serde::Serializer as ErasedSerializer;\nuse futures::future::{self, Either};\nuse serde::Serialize;\nuse thiserror::Error;\nuse tokio::{\n    io::{AsyncBufReadExt, AsyncRead, AsyncWriteExt, BufReader},\n    net::{unix::OwnedWriteHalf, UnixListener, UnixStream},\n    sync::watch,\n};\nuse tracing::{debug, info, info_span, warn, Instrument};\n\nuse casper_types::EraId;\nuse tracing_subscriber::{filter::ParseError, EnvFilter};\n\nuse super::{\n    command::{Action, Command, OutputFormat},\n    util::ShowUnixAddr,\n};\nuse crate::{\n    components::consensus::EraDump,\n    effect::{\n        announcements::{ControlAnnouncement, QueueDumpFormat},\n        diagnostics_port::DumpConsensusStateRequest,\n        requests::{NetworkInfoRequest, SetNodeStopRequest},\n        EffectBuilder,\n    },\n    failpoints::FailpointActivation,\n    logging,\n    utils::{display_error, opt_display::OptDisplay},\n};\n\n/// Success or failure response.\n///\n/// This response is sent back to clients after every operation (unless suppressed in quiet mode),\n/// indicating the outcome of the operation.\n#[derive(Debug, Serialize)]\nenum Outcome {\n    /// Operation succeeded.\n    Success {\n        /// Human-readable message giving additional info and/or stating the effect.\n        msg: String,\n    },\n    /// Operation failed.\n    Failure {\n        /// Human-readable message describing the failure that occurred.\n        reason: String,\n    },\n}\n\nimpl Outcome {\n    /// Constructs a new successful outcome.\n    fn success<S: ToString>(msg: S) -> Self {\n        Outcome::Success {\n            msg: msg.to_string(),\n        }\n    }\n\n    /// Constructs a new failed outcome.\n    fn failed<S: ToString>(reason: S) -> Self {\n        Outcome::Failure {\n            reason: reason.to_string(),\n        }\n    }\n}\n\nimpl Display for Outcome {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Outcome::Success { msg } => {\n                write!(f, \"OK   {}\", msg)\n            }\n            Outcome::Failure { reason } => {\n                write!(f, \"ERR  {}\", reason)\n            }\n        }\n    }\n}\n\n/// Configuration for a connection diagnostics port session.\n#[derive(Copy, Clone, Debug, Default, Serialize)]\nstruct Session {\n    /// Whether or not to suppress the operation outcome.\n    quiet: bool,\n    /// Output format to send to client.\n    output: OutputFormat,\n}\n\nimpl Display for Session {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Debug::fmt(self, f)\n    }\n}\n\n/// A serializer supporting multiple format variants that writes into a file.\npub enum FileSerializer {\n    /// JSON-format serializer.\n    Json(serde_json::Serializer<File>),\n    /// Bincode-format serializer.\n    Bincode(\n        bincode::Serializer<\n            File,\n            WithOtherTrailing<WithOtherIntEncoding<DefaultOptions, FixintEncoding>, AllowTrailing>,\n        >,\n    ),\n}\n\nimpl FileSerializer {\n    /// Converts the temp file serializer into an actual erased serializer.\n    pub fn as_serializer<'a>(&'a mut self) -> Box<dyn ErasedSerializer + 'a> {\n        match self {\n            FileSerializer::Json(json) => Box::new(<dyn erased_serde::Serializer>::erase(json)),\n            FileSerializer::Bincode(bincode) => {\n                Box::new(<dyn erased_serde::Serializer>::erase(bincode))\n            }\n        }\n    }\n}\n\n/// Error obtaining a queue dump.\n#[derive(Debug, Error)]\nenum ObtainDumpError {\n    /// Error trying to create a temporary directory.\n    #[error(\"could not create temporary directory\")]\n    CreateTempDir(#[source] io::Error),\n    /// Error trying to create a file in the temporary directory.\n    #[error(\"could not create file in temporary directory\")]\n    CreateTempFile(#[source] io::Error),\n    /// Error trying to reopen the file in the temporary directory after writing.\n    #[error(\"could not reopen file in temporary directory\")]\n    ReopenTempFile(#[source] io::Error),\n}\n\nimpl Session {\n    /// Creates a serializer for an `EraDump`.\n    fn create_era_dump_serializer(&self) -> fn(&EraDump<'_>) -> Result<Vec<u8>, Cow<'static, str>> {\n        match self.output {\n            OutputFormat::Interactive => |data: &EraDump| {\n                let mut buf = data.to_string().into_bytes();\n                buf.push(b'\\n');\n                Ok(buf)\n            },\n            OutputFormat::Json => |data: &EraDump| {\n                let mut buf = serde_json::to_vec(&data).map_err(|err| {\n                    Cow::Owned(format!(\"failed to serialize era dump as JSON: {}\", err))\n                })?;\n                buf.push(b'\\n');\n                Ok(buf)\n            },\n            OutputFormat::Bincode => |data: &EraDump| {\n                bincode::serialize(&data).map_err(|err| {\n                    Cow::Owned(format!(\"failed to serialize era dump as bincode: {}\", err))\n                })\n            },\n        }\n    }\n\n    /// Creates a generic serializer that is writing to a temporary file.\n    ///\n    /// The resulting serializer will write to the given file.\n    fn create_queue_dump_format(&self, file: File) -> QueueDumpFormat {\n        match self.output {\n            OutputFormat::Interactive => QueueDumpFormat::debug(file),\n            OutputFormat::Json => {\n                QueueDumpFormat::serde(FileSerializer::Json(serde_json::Serializer::new(file)))\n            }\n            OutputFormat::Bincode => {\n                QueueDumpFormat::serde(FileSerializer::Bincode(bincode::Serializer::new(\n                    file,\n                    // TODO: Do not use `bincode::serialize` above, but rather always instantiate\n                    // options across the file to ensure it is always the same.\n                    DefaultOptions::new()\n                        .with_fixint_encoding()\n                        .allow_trailing_bytes(),\n                )))\n            }\n        }\n    }\n\n    /// Processes a single command line sent from a client.\n    async fn process_line<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        writer: &mut OwnedWriteHalf,\n        line: &str,\n    ) -> io::Result<bool>\n    where\n        REv: From<DumpConsensusStateRequest>\n            + From<ControlAnnouncement>\n            + From<NetworkInfoRequest>\n            + From<SetNodeStopRequest>\n            + Send,\n    {\n        debug!(%line, \"line received\");\n        match Command::from_line(line) {\n            Ok(ref cmd) => {\n                info!(?cmd, \"processing command\");\n                match cmd.action {\n                    Action::Session => {\n                        self.send_outcome(writer, &Outcome::success(\"showing session info\"))\n                            .await?;\n                        self.send_to_client(writer, &self).await?;\n                    }\n                    Action::Set { quiet, output } => {\n                        let mut changed = false;\n\n                        if let Some(quiet) = quiet {\n                            changed |= self.quiet != quiet;\n                            self.quiet = quiet;\n                        }\n\n                        if let Some(output) = output {\n                            changed |= self.output != output;\n                            self.output = output;\n                        }\n\n                        if changed {\n                            self.send_outcome(writer, &Outcome::success(\"session updated\"))\n                                .await?;\n                        } else {\n                            self.send_outcome(writer, &Outcome::success(\"session unchanged\"))\n                                .await?;\n                        }\n                    }\n                    Action::GetLogFilter => match logging::display_global_env_filter() {\n                        Ok(formatted) => {\n                            self.send_outcome(writer, &Outcome::success(\"found log filter\"))\n                                .await?;\n                            self.send_to_client(writer, &formatted).await?;\n                        }\n                        Err(err) => {\n                            self.send_outcome(\n                                writer,\n                                &Outcome::failed(format!(\"failed to retrieve log filter: {}\", err)),\n                            )\n                            .await?;\n                        }\n                    },\n                    Action::SetLogFilter { ref directive } => match set_log_filter(directive) {\n                        Ok(()) => {\n                            self.send_outcome(\n                                writer,\n                                &Outcome::success(\"new logging directive set\"),\n                            )\n                            .await?;\n                        }\n                        Err(err) => {\n                            self.send_outcome(\n                                writer,\n                                &Outcome::failed(format!(\n                                    \"failed to set new logging directive: {}\",\n                                    err\n                                )),\n                            )\n                            .await?;\n                        }\n                    },\n                    Action::DumpConsensus { era } => {\n                        let output = effect_builder\n                            .diagnostics_port_dump_consensus_state(\n                                era.map(EraId::new),\n                                self.create_era_dump_serializer(),\n                            )\n                            .await;\n\n                        match output {\n                            Ok(ref data) => {\n                                self.send_outcome(\n                                    writer,\n                                    &Outcome::success(\"dumping consensus state\"),\n                                )\n                                .await?;\n                                writer.write_all(data).await?;\n                            }\n                            Err(err) => {\n                                self.send_outcome(writer, &Outcome::failed(err)).await?;\n                            }\n                        }\n                    }\n                    Action::DumpQueues => {\n                        // Note: The preferable approach would be to use a tempfile instead of a\n                        //       named one in a temporary directory, and return it through the\n                        //       responder. This is currently hamstrung by `bincode` not allowing\n                        //       the retrival of the inner writer from its serializer.\n\n                        match self.obtain_queue_dump(effect_builder).await {\n                            Ok(file) => {\n                                self.send_outcome(writer, &Outcome::success(\"dumping queues\"))\n                                    .await?;\n\n                                let mut tokio_file = tokio::fs::File::from_std(file);\n                                self.stream_to_client(writer, &mut tokio_file).await?;\n                            }\n                            Err(err) => {\n                                self.send_outcome(\n                                    writer,\n                                    &Outcome::failed(format!(\n                                        \"failed to obtain dump: {}\",\n                                        display_error(&err)\n                                    )),\n                                )\n                                .await?;\n                            }\n                        };\n                    }\n                    Action::NetInfo => {\n                        self.send_outcome(writer, &Outcome::success(\"collecting insights\"))\n                            .await?;\n                        let insights = effect_builder.get_network_insights().await;\n                        self.send_to_client(writer, &insights).await?;\n                    }\n                    Action::Stop { at, clear } => {\n                        let (msg, stop_at) = if clear {\n                            (\"clearing stopping point\", None)\n                        } else {\n                            (\"setting new stopping point\", Some(at))\n                        };\n                        let prev = effect_builder.set_node_stop_at(stop_at).await;\n                        self.send_outcome(writer, &Outcome::success(msg)).await?;\n                        self.send_to_client(\n                            writer,\n                            &OptDisplay::new(prev, \"no previous stop-at spec\"),\n                        )\n                        .await?;\n                    }\n                    Action::SetFailpoint { ref activation } => {\n                        match FailpointActivation::from_str(activation) {\n                            Ok(fp_activation) => {\n                                effect_builder.activate_failpoint(fp_activation).await;\n\n                                self.send_outcome(\n                                    writer,\n                                    &Outcome::success(\"failpoint activation sent\".to_string()),\n                                )\n                                .await?;\n                            }\n                            Err(ref err) => {\n                                self.send_outcome(\n                                    writer,\n                                    &Outcome::failed(format!(\n                                        \"invalid failpoint activation: {}\",\n                                        display_error(err)\n                                    )),\n                                )\n                                .await?;\n                            }\n                        }\n                    }\n                    Action::Quit => {\n                        self.send_outcome(writer, &Outcome::success(\"goodbye!\"))\n                            .await?;\n                        return Ok(false);\n                    }\n                };\n            }\n            Err(err) => {\n                self.send_outcome(writer, &Outcome::failed(err.to_string().as_str()))\n                    .await?\n            }\n        }\n\n        Ok(true)\n    }\n\n    /// Obtains a queue dump from the reactor.\n    ///\n    /// Returns an open file that contains the entire dump.\n    async fn obtain_queue_dump<REv>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Result<File, ObtainDumpError>\n    where\n        REv: From<ControlAnnouncement> + Send,\n    {\n        // Note: The preferable approach would be to use a tempfile instead of a\n        //       named one in a temporary directory, and return it through the\n        //       responder. This is currently hamstrung since `bincode` does not\n        //       allow retrieving the inner writer from its serializer.\n\n        let tempdir = tempfile::tempdir().map_err(ObtainDumpError::CreateTempDir)?;\n        let tempfile_path = tempdir.path().join(\"queue-dump\");\n\n        let tempfile = File::create(&tempfile_path).map_err(ObtainDumpError::CreateTempFile)?;\n\n        effect_builder\n            .diagnostics_port_dump_queue(self.create_queue_dump_format(tempfile))\n            .await;\n\n        // We can now reopen the file and return it.\n        let reopened_tempfile =\n            File::open(tempfile_path).map_err(ObtainDumpError::ReopenTempFile)?;\n        Ok(reopened_tempfile)\n    }\n\n    /// Sends an operation outcome.\n    ///\n    /// The outcome will be silently dropped if the session is in quiet mode.\n    async fn send_outcome(\n        &self,\n        writer: &mut OwnedWriteHalf,\n        response: &Outcome,\n    ) -> io::Result<()> {\n        if self.quiet {\n            return Ok(());\n        }\n\n        self.send_to_client(writer, response).await\n    }\n\n    /// Sends a message to the client.\n    ///\n    /// Any type of message can be sent to a client, as long as it has a `Display` (use for\n    /// `interactive` encoding) and `Serialize` (used for `bincode` and `json`) implementation.\n    async fn send_to_client<T>(&self, writer: &mut OwnedWriteHalf, response: &T) -> io::Result<()>\n    where\n        T: Display + Serialize,\n    {\n        match self.output {\n            OutputFormat::Interactive => {\n                writer.write_all(response.to_string().as_bytes()).await?;\n                writer.write_all(b\"\\n\").await?;\n            }\n            OutputFormat::Json => {\n                info!(\"sending json\");\n                let buf = serde_json::to_string_pretty(response).map_err(|err| {\n                    warn!(%err, \"error outputting JSON string\");\n                    io::Error::new(io::ErrorKind::Other, err)\n                })?;\n                writer.write_all(buf.as_bytes()).await?;\n                writer.write_all(b\"\\n\").await?;\n            }\n            OutputFormat::Bincode => {\n                let buf = bincode::serialize(response)\n                    .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;\n                writer.write_all(&buf).await?;\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Streams data from a source to the client.\n    ///\n    /// Returns the number of bytes sent.\n    async fn stream_to_client<R: AsyncRead + Unpin + ?Sized>(\n        &self,\n        writer: &mut OwnedWriteHalf,\n        src: &mut R,\n    ) -> io::Result<u64> {\n        tokio::io::copy(src, writer).await\n    }\n}\n\n/// Error while trying to set the global log filter.\n#[derive(Debug, Error)]\nenum SetLogFilterError {\n    /// Failed to parse the given directive (the `RUST_LOG=...directive` string).\n    #[error(\"could not parse filter directive\")]\n    ParseError(ParseError),\n    /// Failure setting the correctly parsed filter.\n    #[error(\"failed to set global filter\")]\n    SetFailed(anyhow::Error),\n}\n\n/// Sets the global log using the given new directive.\nfn set_log_filter(filter_str: &str) -> Result<(), SetLogFilterError> {\n    let new_filter = EnvFilter::try_new(filter_str).map_err(SetLogFilterError::ParseError)?;\n\n    logging::reload_global_env_filter(new_filter).map_err(SetLogFilterError::SetFailed)\n}\n\n/// Handler for client connection.\n///\n/// The core loop for the diagnostics port; reads commands via unix socket and processes them.\n///\n/// # Security\n///\n/// The handler itself will buffer an unlimited amount of data if no newline is encountered in the\n/// input stream. For this reason ensure that only trusted client connect to the socket producing\n/// the passed in `stream`.\nasync fn handler<REv>(\n    effect_builder: EffectBuilder<REv>,\n    stream: UnixStream,\n    mut shutdown_receiver: watch::Receiver<()>,\n) -> io::Result<()>\nwhere\n    REv: From<DumpConsensusStateRequest>\n        + From<ControlAnnouncement>\n        + From<NetworkInfoRequest>\n        + From<SetNodeStopRequest>\n        + Send,\n{\n    debug!(\"accepted new connection on diagnostics port\");\n\n    let (reader, mut writer) = stream.into_split();\n    let mut lines = BufReader::new(reader).lines();\n    let mut session = Session::default();\n\n    let mut keep_going = true;\n    while keep_going {\n        let shutdown_messages = async { while shutdown_receiver.changed().await.is_ok() {} };\n\n        match future::select(Box::pin(shutdown_messages), Box::pin(lines.next_line())).await {\n            Either::Left(_) => {\n                info!(\"shutting down diagnostics port connection to client\");\n                return Ok(());\n            }\n            Either::Right((line_result, _)) => {\n                if let Some(line) = line_result? {\n                    keep_going = session\n                        .process_line(effect_builder, &mut writer, line.as_str())\n                        .await?;\n                } else {\n                    info!(\"client closed diagnostics port connection\");\n                    return Ok(());\n                }\n            }\n        }\n    }\n\n    Ok(())\n}\n\n/// Server task for diagnostics port.\npub(super) async fn server<REv>(\n    effect_builder: EffectBuilder<REv>,\n    socket_path: PathBuf,\n    listener: UnixListener,\n    mut shutdown_receiver: watch::Receiver<()>,\n) where\n    REv: From<DumpConsensusStateRequest>\n        + From<ControlAnnouncement>\n        + From<NetworkInfoRequest>\n        + From<SetNodeStopRequest>\n        + Send,\n{\n    let handling_shutdown_receiver = shutdown_receiver.clone();\n    let mut next_client_id: u64 = 0;\n    let accept_connections = async move {\n        loop {\n            match listener.accept().await {\n                Ok((stream, client_addr)) => {\n                    let client_id = next_client_id;\n\n                    let span = info_span!(\"diagnostics_port\", client_id,);\n\n                    span.in_scope(|| {\n                        info!(client_addr = %ShowUnixAddr(&client_addr), \"accepted connection\");\n                    });\n\n                    next_client_id += 1;\n\n                    tokio::spawn(\n                        handler(effect_builder, stream, handling_shutdown_receiver.clone())\n                            .instrument(span),\n                    );\n                }\n                Err(err) => {\n                    info!(%err, \"failed to accept incoming connection on diagnostics port\");\n                }\n            }\n        }\n    };\n\n    let shutdown_messages = async move { while shutdown_receiver.changed().await.is_ok() {} };\n\n    // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the\n    // infinite loop to terminate, which never happens.\n    match future::select(Box::pin(shutdown_messages), Box::pin(accept_connections)).await {\n        Either::Left(_) => info!(\"shutting down diagnostics port\"),\n        Either::Right(_) => unreachable!(\"server accept returns `!`\"),\n    }\n\n    // When we're shutting down, we try to delete the socket, but only warn in case of failure.\n    match fs::remove_file(&socket_path) {\n        Ok(_) => {\n            debug!(socket_path=%socket_path.display(), \"removed socket file\");\n        }\n        Err(_) => {\n            warn!(socket_path=%socket_path.display(), \"could not remove socket file\");\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{\n        fmt::{self, Debug, Display, Formatter},\n        path::{Path, PathBuf},\n        sync::Arc,\n        time::Duration,\n    };\n\n    use derive_more::From;\n    use prometheus::Registry;\n    use serde::Serialize;\n    use tokio::{\n        io::{AsyncReadExt, AsyncWriteExt},\n        net::UnixStream,\n        sync::Notify,\n    };\n\n    use casper_types::{testing::TestRng, Chainspec, ChainspecRawBytes};\n\n    use crate::{\n        components::{\n            diagnostics_port::{self, Config as DiagnosticsPortConfig, DiagnosticsPort},\n            network::{self, Identity as NetworkIdentity},\n            Component, InitializedComponent,\n        },\n        effect::{\n            announcements::ControlAnnouncement,\n            diagnostics_port::DumpConsensusStateRequest,\n            requests::{NetworkInfoRequest, SetNodeStopRequest},\n            EffectBuilder, EffectExt, Effects,\n        },\n        reactor::{\n            self, main_reactor::MainEvent, EventQueueHandle, QueueKind, Reactor as ReactorTrait,\n            ReactorEvent,\n        },\n        testing::{\n            self,\n            network::{NetworkedReactor, TestingNetwork},\n        },\n        utils::WeightedRoundRobin,\n        NodeRng, WithDir,\n    };\n\n    pub struct TestReactorConfig {\n        base_dir: PathBuf,\n        diagnostics_port: DiagnosticsPortConfig,\n    }\n\n    impl TestReactorConfig {\n        /// Creates a new test reactor configuration with a given base dir and index.\n        fn new<P: AsRef<Path>>(base_dir: P, idx: usize) -> Self {\n            TestReactorConfig {\n                base_dir: base_dir.as_ref().to_owned(),\n                diagnostics_port: DiagnosticsPortConfig {\n                    enabled: true,\n                    socket_path: format!(\"node_{}.socket\", idx).into(),\n                    socket_umask: 0o022,\n                },\n            }\n        }\n\n        fn socket_path(&self) -> PathBuf {\n            self.base_dir.join(&self.diagnostics_port.socket_path)\n        }\n    }\n\n    #[derive(Debug)]\n    struct Error;\n\n    impl From<prometheus::Error> for Error {\n        fn from(_: prometheus::Error) -> Self {\n            Self\n        }\n    }\n\n    #[derive(Serialize, Debug, From)]\n    enum Event {\n        #[from]\n        DiagnosticsConsole(diagnostics_port::Event),\n        #[from]\n        DumpConsensusStateRequest(DumpConsensusStateRequest),\n        #[from]\n        ControlAnnouncement(ControlAnnouncement),\n        #[from]\n        NetworkInfoRequest(NetworkInfoRequest),\n        #[from]\n        SetNodeStopRequest(SetNodeStopRequest),\n    }\n\n    impl Display for Event {\n        fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n            Debug::fmt(self, f)\n        }\n    }\n\n    impl ReactorEvent for Event {\n        fn is_control(&self) -> bool {\n            matches!(self, Event::ControlAnnouncement(_))\n        }\n\n        fn try_into_control(self) -> Option<ControlAnnouncement> {\n            match self {\n                Event::ControlAnnouncement(ctrl_ann) => Some(ctrl_ann),\n                _ => None,\n            }\n        }\n    }\n\n    #[derive(Debug)]\n    struct Reactor {\n        diagnostics_console: DiagnosticsPort,\n    }\n\n    impl ReactorTrait for Reactor {\n        type Event = Event;\n        type Error = Error;\n        type Config = TestReactorConfig;\n\n        fn dispatch_event(\n            &mut self,\n            effect_builder: EffectBuilder<Self::Event>,\n            rng: &mut NodeRng,\n            event: Event,\n        ) -> Effects<Event> {\n            match event {\n                Event::DiagnosticsConsole(event) => reactor::wrap_effects(\n                    Event::DiagnosticsConsole,\n                    self.diagnostics_console\n                        .handle_event(effect_builder, rng, event),\n                ),\n                Event::DumpConsensusStateRequest(_)\n                | Event::SetNodeStopRequest(_)\n                | Event::ControlAnnouncement(_)\n                | Event::NetworkInfoRequest(_) => {\n                    panic!(\"unexpected: {}\", event)\n                }\n            }\n        }\n\n        fn new(\n            cfg: TestReactorConfig,\n            _chainspec: Arc<Chainspec>,\n            _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n            _network_identity: NetworkIdentity,\n            _registry: &Registry,\n            _event_queue: EventQueueHandle<Event>,\n            _rng: &mut NodeRng,\n        ) -> Result<(Self, Effects<Event>), Error> {\n            let mut diagnostics_console =\n                DiagnosticsPort::new(WithDir::new(cfg.base_dir.clone(), cfg.diagnostics_port));\n            <DiagnosticsPort as InitializedComponent<Event>>::start_initialization(\n                &mut diagnostics_console,\n            );\n            let reactor = Reactor {\n                diagnostics_console,\n            };\n            let effects = reactor::wrap_effects(\n                Event::DiagnosticsConsole,\n                async {}.event(|()| diagnostics_port::Event::Initialize),\n            );\n\n            Ok((reactor, effects))\n        }\n    }\n\n    impl NetworkedReactor for Reactor {}\n\n    /// Runs a single mini-node with a diagnostics console and requests a dump of the (empty)\n    /// event queue, then returns it.\n    async fn run_single_node_console_and_dump_events(dump_format: &'static str) -> String {\n        let mut network = TestingNetwork::<Reactor>::new();\n        let mut rng = TestRng::new();\n\n        let base_dir = tempfile::tempdir().expect(\"could not create tempdir\");\n\n        // We just add a single node to the network.\n        let cfg = TestReactorConfig::new(base_dir.path(), 0);\n        let socket_path = cfg.socket_path();\n        let (_node_id, _runner) = network.add_node_with_config(cfg, &mut rng).await.unwrap();\n\n        // Wait for the listening socket to initialize.\n        network\n            .settle(&mut rng, Duration::from_millis(500), Duration::from_secs(5))\n            .await;\n\n        let ready = Arc::new(Notify::new());\n\n        // Start a background task that connects to the unix socket and sends a few requests down.\n        let client_ready = ready.clone();\n        let join_handle = tokio::spawn(async move {\n            let mut stream = UnixStream::connect(socket_path)\n                .await\n                .expect(\"could not connect to socket path of node\");\n\n            let commands = format!(\"set -o {} -q true\\ndump-queues\\nquit\\n\", dump_format);\n            stream\n                .write_all(commands.as_bytes())\n                .await\n                .expect(\"could not write to listener\");\n            stream.flush().await.expect(\"flushing failed\");\n\n            client_ready.notify_one();\n\n            let mut buffer = Vec::new();\n            stream\n                .read_to_end(&mut buffer)\n                .await\n                .expect(\"could not read console output to end\");\n\n            String::from_utf8(buffer).expect(\"could not parse output as UTF8\")\n        });\n\n        // Wait for all the commands to be buffered.\n        ready.notified().await;\n\n        // Give the node a chance to satisfy the dump.\n        network\n            .settle(&mut rng, Duration::from_secs(1), Duration::from_secs(10))\n            .await;\n\n        join_handle.await.expect(\"error joining client task\")\n    }\n\n    #[tokio::test]\n    async fn ensure_diagnostics_port_can_dump_events_in_json_format() {\n        testing::init_logging();\n\n        let output = run_single_node_console_and_dump_events(\"json\").await;\n\n        // The output will be empty queues, albeit formatted as JSON. Just check if there is a\n        // proper JSON header present.\n        assert!(output.starts_with(r#\"{\"queues\":{\"\"#));\n    }\n\n    #[tokio::test]\n    async fn ensure_diagnostics_port_can_dump_events_in_interactive_format() {\n        testing::init_logging();\n\n        let output = run_single_node_console_and_dump_events(\"interactive\").await;\n\n        // The output will be empty queues in debug format. We only look at the start of the output,\n        // since some time-triggered output may have already been included.\n        assert!(output.starts_with(r#\"QueueDump { queues: {\"#));\n    }\n\n    #[tokio::test]\n    async fn can_dump_actual_events_from_scheduler() {\n        // Create a scheduler with a few synthetic events.\n        let scheduler = WeightedRoundRobin::new(QueueKind::weights(), None);\n        scheduler\n            .push(\n                MainEvent::Network(network::Event::SweepOutgoing),\n                QueueKind::Network,\n            )\n            .await;\n        scheduler\n            .push(\n                MainEvent::Network(network::Event::GossipOurAddress),\n                QueueKind::Gossip,\n            )\n            .await;\n\n        // Construct the debug representation and compare as strings to avoid issues with missing\n        // `PartialEq` implementations.\n        scheduler\n            .dump(|dump| {\n                let debug_repr = format!(\"{:?}\", dump);\n                assert!(debug_repr.starts_with(r#\"QueueDump { queues: {\"#));\n            })\n            .await;\n    }\n}\n"
  },
  {
    "path": "node/src/components/diagnostics_port/util.rs",
    "content": "//! Renderer for unix socket addresses.\n\nuse std::fmt::{self, Display, Formatter};\n\nuse tokio::net::unix::SocketAddr;\n\n/// Unix socket address `Display` wrapper.\n///\n/// Allows displaying a unix socket address.\n#[derive(Debug)]\npub(super) struct ShowUnixAddr<'a>(pub &'a SocketAddr);\n\nimpl Display for ShowUnixAddr<'_> {\n    #[inline]\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self.0.as_pathname() {\n            Some(path) => path.display().fmt(f),\n            None => f.write_str(\"<unnamed unix socket>\"),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/diagnostics_port.rs",
    "content": "//! Diagnostics port component.\n//!\n//! The diagnostics port listens on a configurable unix socket for incoming connections and allows\n//! deep debug access to a running node via special commands.\n\nmod command;\nmod stop_at;\nmod tasks;\nmod util;\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    fs, io,\n    path::{Path, PathBuf},\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tokio::{net::UnixListener, sync::watch};\nuse tracing::{debug, error, info, warn};\n\nuse crate::{\n    components::{Component, ComponentState, InitializedComponent, PortBoundComponent},\n    effect::{\n        announcements::ControlAnnouncement,\n        diagnostics_port::DumpConsensusStateRequest,\n        requests::{NetworkInfoRequest, SetNodeStopRequest},\n        EffectBuilder, EffectExt, Effects,\n    },\n    reactor::main_reactor::MainEvent,\n    types::NodeRng,\n    utils::umask,\n    WithDir,\n};\npub(crate) use stop_at::StopAtSpec;\npub use tasks::FileSerializer;\nuse util::ShowUnixAddr;\n\nconst COMPONENT_NAME: &str = \"diagnostics_port\";\n\n/// Diagnostics port configuration.\n#[derive(Clone, DataSize, Debug, Serialize, Deserialize)]\npub struct Config {\n    /// Whether or not the diagnostics port is enabled.\n    pub enabled: bool,\n    /// Path to listen on.\n    pub socket_path: PathBuf,\n    /// `umask` to apply before creating the socket.\n    pub socket_umask: u16,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Self {\n            enabled: false,\n            socket_path: \"debug.socket\".into(),\n            socket_umask: 0o077,\n        }\n    }\n}\n\n/// Diagnostics port component.\n#[derive(Debug, DataSize)]\npub(crate) struct DiagnosticsPort {\n    state: ComponentState,\n    /// Sender which will cause server and client connections to exit when dropped.\n    #[data_size(skip)]\n    _shutdown_sender: Option<watch::Sender<()>>, // only used for its `Drop` impl\n    config: WithDir<Config>,\n}\n\nimpl DiagnosticsPort {\n    /// Creates a new diagnostics port component.\n    pub(crate) fn new(config: WithDir<Config>) -> Self {\n        DiagnosticsPort {\n            state: ComponentState::Uninitialized,\n            config,\n            _shutdown_sender: None,\n        }\n    }\n}\n\n/// Diagnostics port event.\n#[derive(Debug, Serialize)]\npub(crate) enum Event {\n    Initialize,\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.write_str(\"diagnostics port event\")\n    }\n}\n\n/// A diagnostics port initialization error.\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    /// Error setting up the diagnostics port's unix socket listener.\n    #[error(\"could not setup diagnostics port listener\")]\n    SetupListener(#[from] io::Error),\n}\n\nimpl<REv> Component<REv> for DiagnosticsPort\nwhere\n    REv: From<Event>\n        + From<DumpConsensusStateRequest>\n        + From<ControlAnnouncement>\n        + From<NetworkInfoRequest>\n        + From<SetNodeStopRequest>\n        + Send,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => match event {\n                Event::Initialize => {\n                    if self.state != ComponentState::Initializing {\n                        return Effects::new();\n                    }\n                    let (effects, state) = self.bind(self.config.value().enabled, effect_builder);\n                    <Self as InitializedComponent<MainEvent>>::set_state(self, state);\n                    effects\n                }\n            },\n            ComponentState::Initialized => Effects::new(),\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for DiagnosticsPort\nwhere\n    REv: From<Event>\n        + From<DumpConsensusStateRequest>\n        + From<ControlAnnouncement>\n        + From<NetworkInfoRequest>\n        + From<SetNodeStopRequest>\n        + Send,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\nimpl<REv> PortBoundComponent<REv> for DiagnosticsPort\nwhere\n    REv: From<Event>\n        + From<DumpConsensusStateRequest>\n        + From<ControlAnnouncement>\n        + From<NetworkInfoRequest>\n        + From<SetNodeStopRequest>\n        + Send,\n{\n    type Error = Error;\n    type ComponentEvent = Event;\n\n    fn listen(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Result<Effects<Event>, Self::Error> {\n        let (shutdown_sender, shutdown_receiver) = watch::channel(());\n\n        self._shutdown_sender = Some(shutdown_sender);\n\n        let cfg = self.config.value();\n\n        let socket_path = self.config.with_dir(cfg.socket_path.clone());\n        let listener = setup_listener(\n            &socket_path,\n            // Mac OS X / Linux use different types for the mask, so we need to call .into() here.\n            #[allow(clippy::useless_conversion)]\n            cfg.socket_umask.into(),\n        )?;\n        let server = tasks::server(effect_builder, socket_path, listener, shutdown_receiver);\n        Ok(server.ignore())\n    }\n}\n\n/// Sets up a UNIX socket listener at the given path.\n///\n/// If the socket already exists, an attempt to delete it is made. Errors during deletion are\n/// ignored, but may cause the subsequent socket opening to fail.\nfn setup_listener<P: AsRef<Path>>(path: P, socket_umask: umask::Mode) -> io::Result<UnixListener> {\n    let socket_path = path.as_ref();\n\n    // This would be racy, but no one is racing us for the socket, so we'll just do a naive\n    // check-then-delete :).\n    if socket_path.exists() {\n        debug!(socket_path=%socket_path.display(), \"found stale socket file, trying to remove\");\n        match fs::remove_file(socket_path) {\n            Ok(_) => {\n                debug!(\"stale socket file removed\");\n            }\n            Err(err) => {\n                // This happens if a background program races us for the removal, as it usually\n                // means the file is already gone. We can ignore this, but make note of it in the\n                // log.\n                warn!(%err, \"could not remove stale socket file, assuming race with other process\");\n            }\n        }\n    }\n\n    // This is not thread-safe, as it will set the umask for the entire process, but we assume that\n    // initialization happens \"sufficiently single-threaded\".\n    let umask_guard = umask::temp_umask(socket_umask);\n    let listener = UnixListener::bind(socket_path)?;\n    drop(umask_guard);\n\n    debug!(local_addr=%ShowUnixAddr(&listener.local_addr()?), \"diagnostics port listening\");\n\n    Ok(listener)\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{\n        fs,\n        os::unix::prelude::{FileTypeExt, PermissionsExt},\n    };\n\n    use tokio::{\n        io::{AsyncReadExt, AsyncWriteExt},\n        net::UnixStream,\n    };\n\n    use super::setup_listener;\n\n    #[tokio::test]\n    async fn setup_listener_creates_listener() {\n        const TEST_MESSAGE: &[u8] = b\"hello, world!\";\n\n        let tmpdir = tempfile::tempdir().expect(\"could not create tempdir\");\n        let socket_path = tmpdir.path().join(\"test.socket\");\n\n        // We give it a strict umask to check.\n        let listener = setup_listener(&socket_path, 0o077).expect(\"could not setup listener\");\n\n        let meta = fs::metadata(&socket_path).expect(\"could not get metadata\");\n        // With the given umask, world and group permissions should be 0.\n        assert_eq!(meta.permissions().mode() & 0o077, 0);\n\n        // Attempt to connect.\n        tokio::spawn(async move {\n            let mut stream = UnixStream::connect(socket_path)\n                .await\n                .expect(\"could not connect to listener\");\n            stream\n                .write_all(TEST_MESSAGE)\n                .await\n                .expect(\"could not write to listener\");\n        });\n\n        let (mut stream, _socket_addr) = listener\n            .accept()\n            .await\n            .expect(\"could not accept connection\");\n\n        let mut buffer = Vec::new();\n        stream\n            .read_to_end(&mut buffer)\n            .await\n            .expect(\"failed to read to end\");\n        assert_eq!(TEST_MESSAGE, buffer.as_slice());\n    }\n\n    #[tokio::test]\n    async fn setup_listener_removes_previous_listener() {\n        let tmpdir = tempfile::tempdir().expect(\"could not create tempdir\");\n        let socket_path = tmpdir.path().join(\"overwrite-me.socket\");\n\n        fs::write(&socket_path, b\"this-file-should-be-deleted-soon\")\n            .expect(\"could not write to socket-blocking temporary file\");\n\n        let meta = fs::metadata(&socket_path).expect(\"could not get metadata\");\n        assert!(\n            !meta.file_type().is_socket(),\n            \"temporary file created should not be a socket\"\n        );\n\n        // Creating the listener should remove the underlying file.\n        let _listener = setup_listener(&socket_path, 0o022).expect(\"could not setup listener\");\n\n        let meta = fs::metadata(&socket_path).expect(\"could not get metadata\");\n        assert!(\n            meta.file_type().is_socket(),\n            \"did not overwrite previous file\"\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n/// Default binding address for the SSE HTTP server.\n///\n/// Uses a fixed port per node, but binds on any interface.\nconst DEFAULT_ADDRESS: &str = \"0.0.0.0:0\";\n\n/// Default number of SSEs to buffer.\nconst DEFAULT_EVENT_STREAM_BUFFER_LENGTH: u32 = 5000;\n\n/// Default maximum number of subscribers.\nconst DEFAULT_MAX_CONCURRENT_SUBSCRIBERS: u32 = 100;\n\n/// Default CORS origin.\nconst DEFAULT_CORS_ORIGIN: &str = \"\";\n\n/// SSE HTTP server configuration.\n#[derive(Clone, DataSize, Debug, Deserialize, Serialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Setting to enable the HTTP server.\n    pub enable_server: bool,\n\n    /// Address to bind event stream SSE HTTP server to.\n    pub address: String,\n\n    /// Number of SSEs to buffer.\n    pub event_stream_buffer_length: u32,\n\n    /// Default maximum number of subscribers across all event streams permitted at any one time.\n    pub max_concurrent_subscribers: u32,\n\n    /// CORS origin.\n    pub cors_origin: String,\n}\n\nimpl Config {\n    /// Creates a default instance for `EventStreamServer`.\n    pub fn new() -> Self {\n        Config {\n            enable_server: true,\n            address: DEFAULT_ADDRESS.to_string(),\n            event_stream_buffer_length: DEFAULT_EVENT_STREAM_BUFFER_LENGTH,\n            max_concurrent_subscribers: DEFAULT_MAX_CONCURRENT_SUBSCRIBERS,\n            cors_origin: DEFAULT_CORS_ORIGIN.to_string(),\n        }\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config::new()\n    }\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server/event.rs",
    "content": "use std::{\n    fmt::{self, Display, Formatter},\n    sync::Arc,\n};\n\nuse crate::types::TransactionHeader;\nuse itertools::Itertools;\n\nuse casper_types::{\n    contract_messages::Messages,\n    execution::{Effects, ExecutionResult},\n    Block, BlockHash, EraId, FinalitySignature, PublicKey, Timestamp, Transaction, TransactionHash,\n};\n\n#[derive(Debug)]\npub enum Event {\n    Initialize,\n    BlockAdded(Arc<Block>),\n    TransactionAccepted(Arc<Transaction>),\n    TransactionProcessed {\n        transaction_hash: TransactionHash,\n        transaction_header: Box<TransactionHeader>,\n        block_hash: BlockHash,\n        execution_result: Box<ExecutionResult>,\n        messages: Messages,\n    },\n    TransactionsExpired(Vec<TransactionHash>),\n    Fault {\n        era_id: EraId,\n        public_key: Box<PublicKey>,\n        timestamp: Timestamp,\n    },\n    FinalitySignature(Box<FinalitySignature>),\n    Step {\n        era_id: EraId,\n        execution_effects: Effects,\n    },\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Event::Initialize => write!(formatter, \"initialize\"),\n            Event::BlockAdded(block) => write!(formatter, \"block added {}\", block.hash()),\n            Event::TransactionAccepted(transaction_hash) => {\n                write!(formatter, \"transaction accepted {}\", transaction_hash)\n            }\n            Event::TransactionProcessed {\n                transaction_hash, ..\n            } => {\n                write!(formatter, \"transaction processed {}\", transaction_hash)\n            }\n            Event::TransactionsExpired(transaction_hashes) => {\n                write!(\n                    formatter,\n                    \"transactions expired: {}\",\n                    transaction_hashes.iter().join(\", \")\n                )\n            }\n            Event::Fault {\n                era_id,\n                public_key,\n                timestamp,\n            } => write!(\n                formatter,\n                \"An equivocator with public key: {} has been identified at time: {} in era: {}\",\n                public_key, timestamp, era_id,\n            ),\n            Event::FinalitySignature(fs) => write!(formatter, \"finality signature {}\", fs),\n            Event::Step { era_id, .. } => write!(formatter, \"step committed for {}\", era_id),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server/event_indexer.rs",
    "content": "use std::{fs, path::PathBuf};\n\nuse datasize::DataSize;\nuse tracing::{debug, warn};\n\nconst CACHE_FILENAME: &str = \"sse_index\";\n\npub(super) type EventIndex = u32;\n\n#[derive(Debug, DataSize)]\npub(super) struct EventIndexer {\n    index: EventIndex,\n    persistent_cache: PathBuf,\n}\n\nimpl EventIndexer {\n    pub(super) fn new(storage_path: PathBuf) -> Self {\n        let persistent_cache = storage_path.join(CACHE_FILENAME);\n        let mut bytes = EventIndex::default().to_le_bytes();\n        match fs::read(&persistent_cache) {\n            Err(error) => {\n                if persistent_cache.exists() {\n                    warn!(\n                        file = %persistent_cache.display(),\n                        %error,\n                        \"failed to read sse cache file\"\n                    );\n                }\n            }\n            Ok(cached_bytes) => {\n                if cached_bytes.len() == bytes.len() {\n                    bytes.copy_from_slice(cached_bytes.as_slice());\n                } else {\n                    warn!(\n                        file = %persistent_cache.display(),\n                        byte_count = %cached_bytes.len(),\n                        \"failed to parse sse cache file\"\n                    );\n                }\n            }\n        }\n\n        let index = EventIndex::from_le_bytes(bytes);\n        debug!(%index, \"initialized sse index\");\n\n        EventIndexer {\n            index,\n            persistent_cache,\n        }\n    }\n\n    pub(super) fn next_index(&mut self) -> EventIndex {\n        let index = self.index;\n        self.index = index.wrapping_add(1);\n        index\n    }\n\n    #[cfg(test)]\n    pub(super) fn current_index(&self) -> EventIndex {\n        self.index\n    }\n}\n\nimpl Drop for EventIndexer {\n    fn drop(&mut self) {\n        match fs::write(&self.persistent_cache, self.index.to_le_bytes()) {\n            Err(error) => warn!(\n                file = %self.persistent_cache.display(),\n                %error,\n                \"failed to write sse cache file\"\n            ),\n            Ok(_) => debug!(\n                file = %self.persistent_cache.display(),\n                index = %self.index,\n                \"cached sse index to file\"\n            ),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::iter;\n\n    use super::*;\n    use crate::logging;\n\n    #[test]\n    fn should_persist_in_cache() {\n        let _ = logging::init();\n        let tempdir = tempfile::tempdir().unwrap();\n\n        // This represents a single session where five events are produced before the session ends.\n        let init_and_increment_by_five = |expected_first_index: EventIndex| {\n            let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf());\n            for i in 0..5 {\n                assert_eq!(event_indexer.next_index(), expected_first_index + i);\n            }\n            // Explicitly drop, just to be clear that the cache write is being triggered.\n            drop(event_indexer);\n        };\n\n        // Should start at 0 when no cache file exists.\n        init_and_increment_by_five(0);\n\n        // Should keep reading and writing to cache over ten subsequent sessions.\n        for session in 1..11 {\n            init_and_increment_by_five(session * 5);\n        }\n    }\n\n    #[test]\n    fn should_wrap() {\n        let _ = logging::init();\n        let tempdir = tempfile::tempdir().unwrap();\n\n        let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf());\n        event_indexer.index = EventIndex::MAX;\n\n        assert_eq!(event_indexer.next_index(), EventIndex::MAX);\n        assert_eq!(event_indexer.next_index(), 0);\n    }\n\n    #[test]\n    fn should_reset_index_on_cache_read_failure() {\n        let _ = logging::init();\n        let tempdir = tempfile::tempdir().unwrap();\n\n        // Create a folder with the same name as the cache file to cause reading to fail.\n        fs::create_dir(tempdir.path().join(CACHE_FILENAME)).unwrap();\n        let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf());\n        assert_eq!(event_indexer.next_index(), 0);\n    }\n\n    #[test]\n    fn should_reset_index_on_corrupt_cache() {\n        let _ = logging::init();\n        let tempdir = tempfile::tempdir().unwrap();\n\n        {\n            // Create the cache file with too few bytes to be parsed as an `Index`.\n            let index: EventIndex = 1;\n            fs::write(\n                tempdir.path().join(CACHE_FILENAME),\n                &index.to_le_bytes()[1..],\n            )\n            .unwrap();\n\n            let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf());\n            assert_eq!(event_indexer.next_index(), 0);\n        }\n\n        {\n            // Create the cache file with too many bytes to be parsed as an `Index`.\n            let index: EventIndex = 1;\n            let bytes: Vec<u8> = index\n                .to_le_bytes()\n                .iter()\n                .chain(iter::once(&0))\n                .copied()\n                .collect();\n            fs::write(tempdir.path().join(CACHE_FILENAME), bytes).unwrap();\n\n            let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf());\n            assert_eq!(event_indexer.next_index(), 0);\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server/http_server.rs",
    "content": "use futures::{future, Future, FutureExt};\nuse tokio::{\n    select,\n    sync::{broadcast, mpsc, oneshot},\n    task,\n};\nuse tracing::{info, trace};\nuse wheelbuf::WheelBuf;\n\nuse casper_types::ProtocolVersion;\n\nuse super::{\n    sse_server::{BroadcastChannelMessage, Id, NewSubscriberInfo, ServerSentEvent},\n    Config, EventIndex, SseData,\n};\n\n/// Run the HTTP server.\n///\n/// * `server_with_shutdown` is the actual server as a future which can be gracefully shut down.\n/// * `server_shutdown_sender` is the channel by which the server will be notified to shut down.\n/// * `data_receiver` will provide the server with local events which should then be sent to all\n///   subscribed clients.\n/// * `broadcaster` is used by the server to send events to each subscribed client after receiving\n///   them via the `data_receiver`.\n/// * `new_subscriber_info_receiver` is used to notify the server of the details of a new client\n///   having subscribed to the event stream.  It allows the server to populate that client's stream\n///   with the requested number of historical events.\npub(super) async fn run(\n    config: Config,\n    api_version: ProtocolVersion,\n    server_with_shutdown: impl Future<Output = ()> + Send + 'static,\n    server_shutdown_sender: oneshot::Sender<()>,\n    mut data_receiver: mpsc::UnboundedReceiver<(EventIndex, SseData)>,\n    broadcaster: broadcast::Sender<BroadcastChannelMessage>,\n    mut new_subscriber_info_receiver: mpsc::UnboundedReceiver<NewSubscriberInfo>,\n) {\n    let server_joiner = task::spawn(server_with_shutdown);\n\n    // Initialize the index and buffer for the SSEs.\n    let mut buffer = WheelBuf::new(vec![\n        ServerSentEvent::initial_event(api_version);\n        config.event_stream_buffer_length as usize\n    ]);\n\n    // Start handling received messages from the two channels; info on new client subscribers and\n    // incoming events announced by node components.\n    let event_stream_fut = async {\n        loop {\n            select! {\n                maybe_new_subscriber = new_subscriber_info_receiver.recv() => {\n                    if let Some(subscriber) = maybe_new_subscriber {\n                        // First send the client the `ApiVersion` event.  We don't care if this\n                        // errors - the client may have disconnected already.\n                        let _ = subscriber\n                            .initial_events_sender\n                            .send(ServerSentEvent::initial_event(api_version));\n                        // If the client supplied a \"start_from\" index, provide the buffered events.\n                        // If they requested more than is buffered, just provide the whole buffer.\n                        if let Some(start_index) = subscriber.start_from {\n                            // If the buffer's first event ID is in the range [0, buffer size) or\n                            // (Id::MAX - buffer size, Id::MAX], then the events in the buffer are\n                            // considered to have their IDs wrapping round, or that was recently the\n                            // case.  In this case, we add `buffer.capacity()` to `start_index` and\n                            // the buffered events' IDs when considering which events to include in\n                            // the requested initial events, effectively shifting all the IDs past\n                            // the wrapping transition.\n                            let buffer_size = buffer.capacity() as Id;\n                            let in_wraparound_zone = buffer\n                                .iter()\n                                .next()\n                                .map(|event| {\n                                    let id = event.id.unwrap();\n                                    id > Id::MAX - buffer_size || id < buffer_size\n                                })\n                                .unwrap_or_default();\n                            for event in buffer.iter().skip_while(|event| {\n                                if in_wraparound_zone {\n                                    event.id.unwrap().wrapping_add(buffer_size)\n                                        < start_index.wrapping_add(buffer_size)\n                                } else {\n                                    event.id.unwrap() < start_index\n                                }\n                            }) {\n                                // As per sending `SSE_INITIAL_EVENT`, we don't care if this errors.\n                                let _ = subscriber.initial_events_sender.send(event.clone());\n                            }\n                        }\n                    }\n                }\n\n                maybe_data = data_receiver.recv() => {\n                    match maybe_data {\n                        Some((event_index, data)) => {\n                            // Buffer the data and broadcast it to subscribed clients.\n                            trace!(\"Event stream server received {:?}\", data);\n                            let event = ServerSentEvent { id: Some(event_index), data };\n                            buffer.push(event.clone());\n                            let message = BroadcastChannelMessage::ServerSentEvent(event);\n                            // This can validly fail if there are no connected clients, so don't log\n                            // the error.\n                            let _ = broadcaster.send(message);\n                        }\n                        None => {\n                            // The data sender has been dropped - exit the loop.\n                            info!(\"shutting down HTTP server\");\n                            break;\n                        }\n                    }\n                }\n            }\n        }\n    };\n\n    // Wait for the event stream future to exit, which will only happen if the last `data_sender`\n    // paired with `data_receiver` is dropped.  `server_joiner` will never return here.\n    let _ = future::select(server_joiner, event_stream_fut.boxed()).await;\n\n    // Kill the event-stream handlers, and shut down the server.\n    let _ = broadcaster.send(BroadcastChannelMessage::Shutdown);\n    let _ = server_shutdown_sender.send(());\n\n    trace!(\"Event stream server stopped\");\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server/sse_server.rs",
    "content": "//! Types and functions used by the http server to manage the event-stream.\n\nuse std::{\n    collections::{HashMap, HashSet},\n    net::SocketAddr,\n    sync::{Arc, RwLock},\n};\n\nuse datasize::DataSize;\nuse futures::{future, Stream, StreamExt};\nuse http::StatusCode;\nuse hyper::Body;\n#[cfg(test)]\nuse rand::Rng;\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse tokio::sync::{\n    broadcast::{self, error::RecvError},\n    mpsc,\n};\nuse tokio_stream::wrappers::{\n    errors::BroadcastStreamRecvError, BroadcastStream, UnboundedReceiverStream,\n};\nuse tracing::{debug, error, info, warn};\nuse warp::{\n    addr,\n    filters::BoxedFilter,\n    path,\n    reject::Rejection,\n    reply::Response,\n    sse::{self, Event as WarpServerSentEvent},\n    Filter, Reply,\n};\n\nuse casper_types::{\n    contract_messages::Messages,\n    execution::{Effects, ExecutionResult},\n    Block, BlockHash, EraId, FinalitySignature, InitiatorAddr, ProtocolVersion, PublicKey,\n    TimeDiff, Timestamp, Transaction, TransactionHash,\n};\n#[cfg(test)]\nuse casper_types::{\n    execution::ExecutionResultV2, testing::TestRng, Deploy, TestBlockBuilder, TransactionV1,\n};\n\n/// The URL root path.\npub const SSE_API_PATH: &str = \"events\";\n/// The URL query string field name.\npub const QUERY_FIELD: &str = \"start_from\";\n\n/// The \"id\" field of the events sent on the event stream to clients.\npub type Id = u32;\n\n/// The \"data\" field of the events sent on the event stream to clients.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize, JsonSchema)]\npub enum SseData {\n    /// The version of this node's API server.  This event will always be the first sent to a new\n    /// client, and will have no associated event ID provided.\n    #[data_size(skip)]\n    ApiVersion(ProtocolVersion),\n    /// The given block has been added to the linear chain and stored locally.\n    BlockAdded {\n        block_hash: BlockHash,\n        block: Box<Block>,\n    },\n    /// The given transaction has been newly-accepted by this node.\n    TransactionAccepted {\n        #[schemars(with = \"Transaction\", description = \"a transaction\")]\n        transaction: Arc<Transaction>,\n    },\n    /// The given transaction has been executed, committed and forms part of the given block.\n    TransactionProcessed {\n        transaction_hash: Box<TransactionHash>,\n        initiator_addr: Box<InitiatorAddr>,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        block_hash: Box<BlockHash>,\n        //#[data_size(skip)]\n        execution_result: Box<ExecutionResult>,\n        messages: Messages,\n    },\n    /// The given transaction has expired.\n    TransactionExpired { transaction_hash: TransactionHash },\n    /// Generic representation of validator's fault in an era.\n    Fault {\n        era_id: EraId,\n        public_key: Box<PublicKey>,\n        timestamp: Timestamp,\n    },\n    /// New finality signature received.\n    FinalitySignature(Box<FinalitySignature>),\n    /// The execution effects produced by a `StepRequest`.\n    Step {\n        era_id: EraId,\n        execution_effects: Effects,\n    },\n    /// The node is about to shut down.\n    Shutdown,\n}\n\n#[cfg(test)]\nimpl SseData {\n    /// Returns a random `SseData::BlockAdded`.\n    pub(super) fn random_block_added(rng: &mut TestRng) -> Self {\n        let block = TestBlockBuilder::new().build(rng);\n        SseData::BlockAdded {\n            block_hash: *block.hash(),\n            block: Box::new(block.into()),\n        }\n    }\n\n    /// Returns a random `SseData::TransactionAccepted`, along with the random `Transaction`.\n    pub(super) fn random_transaction_accepted(rng: &mut TestRng) -> (Self, Transaction) {\n        let txn = Transaction::random(rng);\n        let event = SseData::TransactionAccepted {\n            transaction: Arc::new(txn.clone()),\n        };\n        (event, txn)\n    }\n\n    /// Returns a random `SseData::TransactionProcessed`.\n    pub(super) fn random_transaction_processed(rng: &mut TestRng) -> Self {\n        let txn = Transaction::random(rng);\n        let (timestamp, ttl) = match &txn {\n            Transaction::Deploy(deploy) => (deploy.timestamp(), deploy.ttl()),\n            Transaction::V1(txn) => (txn.timestamp(), txn.ttl()),\n        };\n        let message_count = rng.gen_range(0..6);\n        let messages = std::iter::repeat_with(|| rng.gen())\n            .take(message_count)\n            .collect();\n\n        SseData::TransactionProcessed {\n            transaction_hash: Box::new(txn.hash()),\n            initiator_addr: Box::new(txn.initiator_addr()),\n            timestamp,\n            ttl,\n            block_hash: Box::new(BlockHash::random(rng)),\n            execution_result: Box::new(ExecutionResult::from(ExecutionResultV2::random(rng))),\n            messages,\n        }\n    }\n\n    /// Returns a random `SseData::TransactionExpired`\n    pub(super) fn random_transaction_expired(rng: &mut TestRng) -> Self {\n        let timestamp = Timestamp::now() - TimeDiff::from_seconds(20);\n        let ttl = TimeDiff::from_seconds(10);\n        let txn = if rng.gen() {\n            Transaction::from(Deploy::random_with_timestamp_and_ttl(rng, timestamp, ttl))\n        } else {\n            let txn = TransactionV1::random_with_timestamp_and_ttl(rng, Some(timestamp), Some(ttl));\n            Transaction::from(txn)\n        };\n\n        SseData::TransactionExpired {\n            transaction_hash: txn.hash(),\n        }\n    }\n\n    /// Returns a random `SseData::Fault`.\n    pub(super) fn random_fault(rng: &mut TestRng) -> Self {\n        SseData::Fault {\n            era_id: EraId::new(rng.gen()),\n            public_key: Box::new(PublicKey::random(rng)),\n            timestamp: Timestamp::random(rng),\n        }\n    }\n\n    /// Returns a random `SseData::FinalitySignature`.\n    pub(super) fn random_finality_signature(rng: &mut TestRng) -> Self {\n        SseData::FinalitySignature(Box::new(FinalitySignature::random(rng)))\n    }\n\n    /// Returns a random `SseData::Step`.\n    pub(super) fn random_step(rng: &mut TestRng) -> Self {\n        let execution_effects = ExecutionResultV2::random(rng).effects;\n        SseData::Step {\n            era_id: EraId::new(rng.gen()),\n            execution_effects,\n        }\n    }\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"PascalCase\")]\npub(super) struct TransactionAccepted {\n    pub(super) transaction_accepted: Arc<Transaction>,\n}\n\n/// The components of a single SSE.\n#[derive(Clone, PartialEq, Eq, Debug)]\npub(super) struct ServerSentEvent {\n    /// The ID should only be `None` where the `data` is `SseData::ApiVersion`.\n    pub(super) id: Option<Id>,\n    pub(super) data: SseData,\n}\n\nimpl ServerSentEvent {\n    /// The first event sent to every subscribing client.\n    pub(super) fn initial_event(client_api_version: ProtocolVersion) -> Self {\n        ServerSentEvent {\n            id: None,\n            data: SseData::ApiVersion(client_api_version),\n        }\n    }\n}\n\n/// The messages sent via the tokio broadcast channel to the handler of each client's SSE stream.\n#[derive(Clone, PartialEq, Eq, Debug)]\n#[allow(clippy::large_enum_variant)]\npub(super) enum BroadcastChannelMessage {\n    /// The message should be sent to the client as an SSE with an optional ID.  The ID should only\n    /// be `None` where the `data` is `SseData::ApiVersion`.\n    ServerSentEvent(ServerSentEvent),\n    /// The stream should terminate as the server is shutting down.\n    ///\n    /// Note: ideally, we'd just drop all the tokio broadcast channel senders to make the streams\n    /// terminate naturally, but we can't drop the sender cloned into warp filter.\n    Shutdown,\n}\n\n/// Passed to the server whenever a new client subscribes.\npub(super) struct NewSubscriberInfo {\n    /// The event ID from which the stream should start for this client.\n    pub(super) start_from: Option<Id>,\n    /// A channel to send the initial events to the client's handler.  This will always send the\n    /// ApiVersion as the first event, and then any buffered events as indicated by `start_from`.\n    pub(super) initial_events_sender: mpsc::UnboundedSender<ServerSentEvent>,\n}\n\n/// Maps the `event` to a warp event, or `None` if it's a malformed event (ie.: `ApiVersion` event\n/// with `id` set or event other than `ApiVersion` without `id`)\nfn map_server_sent_event(\n    event: &ServerSentEvent,\n) -> Option<Result<WarpServerSentEvent, RecvError>> {\n    let id = match event.id {\n        Some(id) => {\n            if matches!(&event.data, &SseData::ApiVersion { .. }) {\n                error!(\"ApiVersion should have no event ID\");\n                return None;\n            }\n            id.to_string()\n        }\n        None => {\n            if !matches!(&event.data, &SseData::ApiVersion { .. }) {\n                error!(\"only ApiVersion may have no event ID\");\n                return None;\n            }\n            String::new()\n        }\n    };\n\n    match &event.data {\n        &SseData::ApiVersion { .. } => Some(Ok(WarpServerSentEvent::default()\n            .json_data(&event.data)\n            .unwrap_or_else(|error| {\n                warn!(%error, ?event, \"failed to jsonify sse event\");\n                WarpServerSentEvent::default()\n            }))),\n\n        &SseData::BlockAdded { .. }\n        | &SseData::TransactionProcessed { .. }\n        | &SseData::TransactionExpired { .. }\n        | &SseData::Fault { .. }\n        | &SseData::Step { .. }\n        | &SseData::FinalitySignature(_)\n        | &SseData::Shutdown => Some(Ok(WarpServerSentEvent::default()\n            .json_data(&event.data)\n            .unwrap_or_else(|error| {\n                warn!(%error, ?event, \"failed to jsonify sse event\");\n                WarpServerSentEvent::default()\n            })\n            .id(id))),\n\n        SseData::TransactionAccepted { transaction } => Some(Ok(WarpServerSentEvent::default()\n            .json_data(&TransactionAccepted {\n                transaction_accepted: Arc::clone(transaction),\n            })\n            .unwrap_or_else(|error| {\n                warn!(%error, \"failed to jsonify sse event\");\n                WarpServerSentEvent::default()\n            })\n            .id(event.id.unwrap().to_string()))),\n    }\n}\n\n/// Extracts the starting event ID from the provided query, or `None` if `query` is empty.\n///\n/// If `query` is not empty, returns a 422 response if `query` doesn't have exactly one entry,\n/// \"starts_from\" mapped to a value representing an event ID.\nfn parse_query(query: &HashMap<String, String>) -> Result<Option<Id>, Response> {\n    if query.is_empty() {\n        return Ok(None);\n    }\n\n    if query.len() > 1 {\n        return Err(create_422());\n    }\n\n    match query\n        .get(QUERY_FIELD)\n        .and_then(|id_str| id_str.parse::<Id>().ok())\n    {\n        Some(id) => Ok(Some(id)),\n        None => Err(create_422()),\n    }\n}\n\n/// Creates a 404 response with a useful error message in the body.\nfn create_404() -> Response {\n    let mut response = Response::new(Body::from(format!(\n        \"invalid path: expected '/{root}'\\n\",\n        root = SSE_API_PATH,\n    )));\n    *response.status_mut() = StatusCode::NOT_FOUND;\n    response\n}\n\n/// Creates a 422 response with a useful error message in the body for use in case of a bad query\n/// string.\nfn create_422() -> Response {\n    let mut response = Response::new(Body::from(format!(\n        \"invalid query: expected single field '{}=<EVENT ID>'\\n\",\n        QUERY_FIELD\n    )));\n    *response.status_mut() = StatusCode::UNPROCESSABLE_ENTITY;\n    response\n}\n\n/// Creates a 503 response (Service Unavailable) to be returned if the server has too many\n/// subscribers.\nfn create_503() -> Response {\n    let mut response = Response::new(Body::from(\"server has reached limit of subscribers\"));\n    *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE;\n    response\n}\n\npub(super) struct ChannelsAndFilter {\n    pub(super) event_broadcaster: broadcast::Sender<BroadcastChannelMessage>,\n    pub(super) new_subscriber_info_receiver: mpsc::UnboundedReceiver<NewSubscriberInfo>,\n    pub(super) sse_filter: BoxedFilter<(Response,)>,\n}\n\nimpl ChannelsAndFilter {\n    /// Creates the message-passing channels required to run the event-stream server and the warp\n    /// filter for the event-stream server.\n    pub(super) fn new(broadcast_channel_size: usize, max_concurrent_subscribers: u32) -> Self {\n        // Create a channel to broadcast new events to all subscribed clients' streams.\n        let (event_broadcaster, _) = broadcast::channel(broadcast_channel_size);\n        let cloned_broadcaster = event_broadcaster.clone();\n\n        // Create a channel for `NewSubscriberInfo`s to pass the information required to handle a\n        // new client subscription.\n        let (new_subscriber_info_sender, new_subscriber_info_receiver) = mpsc::unbounded_channel();\n\n        let serve = move |query: HashMap<String, String>,\n                          maybe_remote_address: Option<SocketAddr>| {\n            let remote_address = match maybe_remote_address {\n                Some(address) => address.to_string(),\n                None => \"unknown\".to_string(),\n            };\n\n            // If we already have the maximum number of subscribers, reject this new one.\n            if cloned_broadcaster.receiver_count() >= max_concurrent_subscribers as usize {\n                info!(\n                    %remote_address,\n                    %max_concurrent_subscribers,\n                    \"event stream server has max subscribers: rejecting new one\"\n                );\n                return create_503();\n            }\n\n            let start_from = match parse_query(&query) {\n                Ok(maybe_id) => maybe_id,\n                Err(error_response) => return error_response,\n            };\n\n            // Create a channel for the client's handler to receive the stream of initial events.\n            let (initial_events_sender, initial_events_receiver) = mpsc::unbounded_channel();\n\n            // Supply the server with the sender part of the channel along with the client's\n            // requested starting point.\n            let new_subscriber_info = NewSubscriberInfo {\n                start_from,\n                initial_events_sender,\n            };\n            if new_subscriber_info_sender\n                .send(new_subscriber_info)\n                .is_err()\n            {\n                error!(\"failed to send new subscriber info\");\n            }\n\n            // Create a channel for the client's handler to receive the stream of ongoing events.\n            let ongoing_events_receiver = cloned_broadcaster.subscribe();\n\n            sse::reply(sse::keep_alive().stream(stream_to_client(\n                initial_events_receiver,\n                ongoing_events_receiver,\n                remote_address,\n            )))\n            .into_response()\n        };\n\n        let sse_filter = warp::get()\n            .and(path(SSE_API_PATH))\n            .and(path::end())\n            .and(warp::query())\n            .and(addr::remote())\n            .map(serve)\n            .or_else(|_| async move { Ok::<_, Rejection>((create_404(),)) })\n            .boxed();\n\n        ChannelsAndFilter {\n            event_broadcaster,\n            new_subscriber_info_receiver,\n            sse_filter,\n        }\n    }\n}\n\n/// This takes the two channel receivers and turns them into a stream of SSEs to the subscribed\n/// client.\n///\n/// The initial events receiver (an mpsc receiver) is exhausted first, and contains an initial\n/// `ApiVersion` message, followed by any historical events the client requested using the query\n/// string.\n///\n/// The ongoing events channel (a broadcast receiver) is then consumed, and will remain in use until\n/// either the client disconnects, or the server shuts down (indicated by sending a `Shutdown`\n/// variant via the channel).  This channel will receive all SSEs created from the moment the client\n/// subscribed to the server's event stream.\n///\n/// It also takes an `EventFilter` which causes events to which the client didn't subscribe to be\n/// skipped.\nfn stream_to_client(\n    initial_events: mpsc::UnboundedReceiver<ServerSentEvent>,\n    ongoing_events: broadcast::Receiver<BroadcastChannelMessage>,\n    remote_address: String,\n) -> impl Stream<Item = Result<WarpServerSentEvent, RecvError>> + 'static {\n    // Keep a record of the IDs of the events delivered via the `initial_events` receiver.\n    let initial_stream_ids = Arc::new(RwLock::new(HashSet::new()));\n    let cloned_initial_ids = Arc::clone(&initial_stream_ids);\n\n    // Map the events arriving after the initial stream to the correct error type, filtering out any\n    // that have already been sent in the initial stream.\n    let ongoing_stream = BroadcastStream::new(ongoing_events)\n        .filter_map(move |result| {\n            let cloned_initial_ids = Arc::clone(&cloned_initial_ids);\n            let remote_address = remote_address.clone();\n            async move {\n                match result {\n                    Ok(BroadcastChannelMessage::ServerSentEvent(event)) => {\n                        if let Some(id) = event.id {\n                            if cloned_initial_ids.read().unwrap().contains(&id) {\n                                debug!(event_id=%id, \"skipped duplicate event\");\n                                return None;\n                            }\n                        }\n                        Some(Ok(event))\n                    }\n                    Ok(BroadcastChannelMessage::Shutdown) => Some(Err(RecvError::Closed)),\n                    Err(BroadcastStreamRecvError::Lagged(lagged_count)) => {\n                        info!(\n                            %remote_address,\n                            %lagged_count,\n                            \"client lagged: dropping event stream connection to client\",\n                        );\n                        Some(Err(RecvError::Lagged(lagged_count)))\n                    }\n                }\n            }\n        })\n        .take_while(|result| future::ready(!matches!(result, Err(RecvError::Closed))));\n\n    // Serve the initial events followed by the ongoing ones, filtering as dictated by the\n    // `event_filter`.\n    UnboundedReceiverStream::new(initial_events)\n        .map(move |event| {\n            if let Some(id) = event.id {\n                let _ = initial_stream_ids.write().unwrap().insert(id);\n            }\n            Ok(event)\n        })\n        .chain(ongoing_stream)\n        .filter_map(move |result| async move {\n            match result {\n                Ok(event) => map_server_sent_event(&event),\n                Err(error) => Some(Err(error)),\n            }\n        })\n}\n\n#[cfg(test)]\nmod tests {\n    use std::iter;\n\n    use casper_types::testing::TestRng;\n\n    use super::*;\n    use crate::logging;\n\n    /// This test checks that events from the initial stream which are duplicated in the\n    /// ongoing stream are filtered out.\n    #[tokio::test]\n    async fn should_filter_duplicate_events() {\n        // Returns `count` SSE events. The events will have sequential IDs starting from `start_id`.\n        fn make_events(rng: &mut TestRng, start_id: Id, count: usize) -> Vec<ServerSentEvent> {\n            (start_id..(start_id + count as u32))\n                .map(|id| ServerSentEvent {\n                    id: Some(id),\n                    data: SseData::random_finality_signature(rng),\n                })\n                .collect()\n        }\n\n        // Returns `NUM_ONGOING_EVENTS` SSE events containing duplicates taken from the end of the\n        // initial stream.  Allows for the full initial stream to be duplicated except for\n        // its first event (the `ApiVersion` one) which has no ID.\n        fn make_ongoing_events(\n            rng: &mut TestRng,\n            duplicate_count: usize,\n            initial_events: &[ServerSentEvent],\n        ) -> Vec<ServerSentEvent> {\n            assert!(duplicate_count < initial_events.len());\n            let initial_skip_count = initial_events.len() - duplicate_count;\n            let unique_start_id = initial_events.len() as Id - 1;\n            let unique_count = NUM_ONGOING_EVENTS - duplicate_count;\n            initial_events\n                .iter()\n                .skip(initial_skip_count)\n                .cloned()\n                .chain(make_events(rng, unique_start_id, unique_count))\n                .collect()\n        }\n\n        // The number of events in the initial stream, excluding the very first `ApiVersion` one.\n        const NUM_INITIAL_EVENTS: usize = 10;\n        // The number of events in the ongoing stream, including any duplicated from the initial\n        // stream.\n        const NUM_ONGOING_EVENTS: usize = 20;\n\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n\n        let initial_events: Vec<ServerSentEvent> =\n            iter::once(ServerSentEvent::initial_event(ProtocolVersion::V1_0_0))\n                .chain(make_events(&mut rng, 0, NUM_INITIAL_EVENTS))\n                .collect();\n\n        // Run three cases; where only a single event is duplicated, where five are duplicated, and\n        // where the whole initial stream (except the `ApiVersion`) is duplicated.\n        for duplicate_count in &[1, 5, NUM_INITIAL_EVENTS] {\n            // Create the events with the requisite duplicates at the start of the collection.\n            let ongoing_events = make_ongoing_events(&mut rng, *duplicate_count, &initial_events);\n\n            let (initial_events_sender, initial_events_receiver) = mpsc::unbounded_channel();\n            let (ongoing_events_sender, ongoing_events_receiver) =\n                broadcast::channel(NUM_INITIAL_EVENTS + NUM_ONGOING_EVENTS + 1);\n\n            // Send all the events.\n            for event in initial_events.iter().cloned() {\n                initial_events_sender.send(event).unwrap();\n            }\n            for event in ongoing_events.iter().cloned() {\n                let _ = ongoing_events_sender\n                    .send(BroadcastChannelMessage::ServerSentEvent(event))\n                    .unwrap();\n            }\n            // Drop the channel senders so that the chained receiver streams can both complete.\n            drop(initial_events_sender);\n            drop(ongoing_events_sender);\n\n            // Collect the events emitted by `stream_to_client()` - should not contain duplicates.\n            let received_events: Vec<Result<WarpServerSentEvent, RecvError>> = stream_to_client(\n                initial_events_receiver,\n                ongoing_events_receiver,\n                \"127.0.0.1:3456\".to_string(),\n            )\n            .collect()\n            .await;\n\n            // Create the expected collection of emitted events.\n            let deduplicated_events: Vec<ServerSentEvent> = initial_events\n                .iter()\n                .take(initial_events.len() - duplicate_count)\n                .cloned()\n                .chain(ongoing_events)\n                .collect();\n\n            assert_eq!(received_events.len(), deduplicated_events.len());\n\n            // Iterate the received and expected collections, asserting that each matches.  As we\n            // don't have access to the internals of the `WarpServerSentEvent`s, assert using their\n            // `String` representations.\n            for (received_event, deduplicated_event) in\n                received_events.iter().zip(deduplicated_events.iter())\n            {\n                let received_event = received_event.as_ref().unwrap();\n                let expected_data_string = serde_json::to_string(&deduplicated_event.data).unwrap();\n\n                let expected_id_string = if let Some(id) = deduplicated_event.id {\n                    format!(\"\\nid:{}\", id)\n                } else {\n                    String::new()\n                };\n\n                let expected_string =\n                    format!(\"data:{}{}\", expected_data_string, expected_id_string);\n\n                assert_eq!(received_event.to_string().trim(), expected_string)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server/tests.rs",
    "content": "use std::{\n    collections::HashMap,\n    error::Error,\n    fs, io,\n    iter::{self, FromIterator},\n    str,\n    sync::{\n        atomic::{AtomicBool, Ordering},\n        Arc,\n    },\n    time::Duration,\n};\n\nuse bytes::{Buf, Bytes};\nuse futures::{join, StreamExt};\nuse http::StatusCode;\nuse pretty_assertions::assert_eq;\nuse reqwest::Response;\nuse schemars::schema_for;\nuse tempfile::TempDir;\nuse tokio::{\n    sync::{Barrier, Notify},\n    task::{self, JoinHandle},\n    time,\n};\nuse tracing::debug;\n\nuse casper_types::testing::TestRng;\n\nuse super::*;\nuse crate::{logging, testing::assert_schema};\nuse sse_server::{Id, TransactionAccepted, QUERY_FIELD, SSE_API_PATH as ROOT_PATH};\n\n/// The total number of random events `EventStreamServer` will emit by default, excluding the\n/// initial `ApiVersion` event.\nconst EVENT_COUNT: u32 = 100;\n/// The maximum number of random events `EventStreamServer` will emit, excluding the initial\n/// `ApiVersion` event.\nconst MAX_EVENT_COUNT: u32 = 100_000_000;\n/// The event stream buffer length, set in the server's config.  Set to half of the total event\n/// count to allow for the buffer purging events in the test.\nconst BUFFER_LENGTH: u32 = EVENT_COUNT / 2;\n/// The maximum amount of time to wait for a test server to complete.  If this time is exceeded, the\n/// test has probably hung, and should be deemed to have failed.\nconst MAX_TEST_TIME: Duration = Duration::from_secs(2);\n/// The duration of the sleep called between each event being sent by the server.\nconst DELAY_BETWEEN_EVENTS: Duration = Duration::from_millis(1);\n\n/// A helper to allow the synchronization of a single client joining the SSE server.\n///\n/// It provides the primitives to allow the client to connect to the server just before a specific\n/// event is emitted by the server.\n#[derive(Clone)]\nstruct ClientSyncBehavior {\n    /// The event ID before which the server should wait at the barrier for the client to join.\n    join_before_event: Id,\n    /// The barrier to sync the client joining the server.\n    barrier: Arc<Barrier>,\n}\n\nimpl ClientSyncBehavior {\n    fn new(join_before_event: Id) -> (Self, Arc<Barrier>) {\n        let barrier = Arc::new(Barrier::new(2));\n        let behavior = ClientSyncBehavior {\n            join_before_event,\n            barrier: Arc::clone(&barrier),\n        };\n        (behavior, barrier)\n    }\n}\n\n/// A helper defining the behavior of the server.\n#[derive(Clone)]\nstruct ServerBehavior {\n    /// Whether the server should have a delay between sending events, to allow a client to keep up\n    /// and not be disconnected for lagging.\n    has_delay_between_events: bool,\n    /// Whether the server should send all events once, or keep repeating the batch up until\n    /// `MAX_EVENT_COUNT` have been sent.\n    repeat_events: bool,\n    /// If `Some`, sets the `max_concurrent_subscribers` server config value, otherwise uses the\n    /// config default.\n    max_concurrent_subscribers: Option<u32>,\n    clients: Vec<ClientSyncBehavior>,\n}\n\nimpl ServerBehavior {\n    /// Returns a default new `ServerBehavior`.\n    ///\n    /// It has a small delay between events, and sends the collection of random events once.\n    fn new() -> Self {\n        ServerBehavior {\n            has_delay_between_events: true,\n            repeat_events: false,\n            max_concurrent_subscribers: None,\n            clients: Vec::new(),\n        }\n    }\n\n    /// Returns a new `ServerBehavior` suitable for testing lagging clients.\n    ///\n    /// It has no delay between events, and sends the collection of random events repeatedly up to a\n    /// maximum of `MAX_EVENT_COUNT` events.\n    fn new_for_lagging_test() -> Self {\n        ServerBehavior {\n            has_delay_between_events: false,\n            repeat_events: true,\n            max_concurrent_subscribers: None,\n            clients: Vec::new(),\n        }\n    }\n\n    /// Adds a client sync behavior, specified for the client to connect to the server just before\n    /// `id` is emitted.\n    fn add_client_sync_before_event(&mut self, id: Id) -> Arc<Barrier> {\n        let (client_behavior, barrier) = ClientSyncBehavior::new(id);\n        self.clients.push(client_behavior);\n        barrier\n    }\n\n    /// Sets the `max_concurrent_subscribers` server config value.\n    fn set_max_concurrent_subscribers(&mut self, count: u32) {\n        self.max_concurrent_subscribers = Some(count);\n    }\n\n    /// Waits for all clients which specified they wanted to join just before the given event ID.\n    async fn wait_for_clients(&self, id: Id) {\n        for client_behavior in &self.clients {\n            if client_behavior.join_before_event == id {\n                debug!(\"server waiting before event {}\", id);\n                client_behavior.barrier.wait().await;\n                debug!(\"server waiting for client to connect before event {}\", id);\n                client_behavior.barrier.wait().await;\n                debug!(\"server finished waiting before event {}\", id);\n            }\n        }\n    }\n\n    /// Sleeps if `self` was set to enable delays between events.\n    async fn sleep_if_required(&self) {\n        if self.has_delay_between_events {\n            time::sleep(DELAY_BETWEEN_EVENTS).await;\n        } else {\n            task::yield_now().await;\n        }\n    }\n}\n\n/// A helper to allow the server to be kept alive until a specific call to stop it.\n#[derive(Clone)]\nstruct ServerStopper {\n    should_stop: Arc<AtomicBool>,\n    notifier: Arc<Notify>,\n}\n\nimpl ServerStopper {\n    fn new() -> Self {\n        ServerStopper {\n            should_stop: Arc::new(AtomicBool::new(false)),\n            notifier: Arc::new(Notify::new()),\n        }\n    }\n\n    /// Returns whether the server should stop now or not.\n    fn should_stop(&self) -> bool {\n        self.should_stop.load(Ordering::SeqCst)\n    }\n\n    /// Waits until the server should stop.\n    async fn wait(&self) {\n        while !self.should_stop() {\n            self.notifier.notified().await;\n        }\n    }\n\n    /// Tells the server to stop.\n    fn stop(&self) {\n        self.should_stop.store(true, Ordering::SeqCst);\n        self.notifier.notify_one();\n    }\n}\n\nimpl Drop for ServerStopper {\n    fn drop(&mut self) {\n        self.stop();\n    }\n}\n\nstruct TestFixture {\n    storage_dir: TempDir,\n    protocol_version: ProtocolVersion,\n    events: Vec<SseData>,\n    first_event_id: Id,\n    server_join_handle: Option<JoinHandle<()>>,\n    server_stopper: ServerStopper,\n}\n\nimpl TestFixture {\n    /// Constructs a new `TestFixture` including `EVENT_COUNT` random events ready to be served.\n    fn new(rng: &mut TestRng) -> Self {\n        const DISTINCT_EVENTS_COUNT: u32 = 7;\n\n        let _ = logging::init();\n        let storage_dir = tempfile::tempdir().unwrap();\n        fs::create_dir_all(&storage_dir).unwrap();\n        let protocol_version = ProtocolVersion::from_parts(1, 2, 3);\n\n        let mut txns = HashMap::new();\n        let events = (0..EVENT_COUNT)\n            .map(|i| match i % DISTINCT_EVENTS_COUNT {\n                0 => SseData::random_block_added(rng),\n                1 => {\n                    let (event, txn) = SseData::random_transaction_accepted(rng);\n                    assert!(txns.insert(txn.hash(), txn).is_none());\n                    event\n                }\n                2 => SseData::random_transaction_processed(rng),\n                3 => SseData::random_transaction_expired(rng),\n                4 => SseData::random_fault(rng),\n                5 => SseData::random_step(rng),\n                6 => SseData::random_finality_signature(rng),\n                _ => unreachable!(),\n            })\n            .collect();\n\n        TestFixture {\n            storage_dir,\n            protocol_version,\n            events,\n            first_event_id: 0,\n            server_join_handle: None,\n            server_stopper: ServerStopper::new(),\n        }\n    }\n\n    /// Creates a new `EventStreamServer` and runs it in a tokio task, returning the actual address\n    /// the server is listening on.\n    ///\n    /// Only one server can be run at a time; this panics if there is already a server task running.\n    ///\n    /// The server emits a clone of each of the random events held by the `TestFixture`, in the\n    /// order in which they're held in the `TestFixture`.\n    ///\n    /// The server runs until `TestFixture::stop_server()` is called, or the `TestFixture` is\n    /// dropped.\n    async fn run_server(&mut self, server_behavior: ServerBehavior) -> SocketAddr {\n        if self.server_join_handle.is_some() {\n            panic!(\"one `TestFixture` can only run one server at a time\");\n        }\n        self.server_stopper = ServerStopper::new();\n\n        // Set the server to use a channel buffer of half the total events it will emit, unless\n        // we're running with no delay between events, in which case set a minimal buffer as we're\n        // trying to cause clients to get ejected for lagging.\n        let config = Config {\n            event_stream_buffer_length: if server_behavior.has_delay_between_events {\n                BUFFER_LENGTH\n            } else {\n                1\n            },\n            max_concurrent_subscribers: server_behavior\n                .max_concurrent_subscribers\n                .unwrap_or(Config::default().max_concurrent_subscribers),\n            ..Default::default()\n        };\n        let mut server = EventStreamServer::new(\n            config,\n            self.storage_dir.path().to_path_buf(),\n            self.protocol_version,\n        );\n        server.listen().unwrap();\n        assert!(server.sse_server.is_some());\n\n        self.first_event_id = server\n            .sse_server\n            .as_ref()\n            .unwrap()\n            .event_indexer\n            .current_index();\n\n        let first_event_id = server\n            .sse_server\n            .as_ref()\n            .unwrap()\n            .event_indexer\n            .current_index();\n        let server_address = server.sse_server.as_ref().unwrap().listening_address;\n        let events = self.events.clone();\n        let server_stopper = self.server_stopper.clone();\n\n        let join_handle = tokio::spawn(async move {\n            let event_count = if server_behavior.repeat_events {\n                MAX_EVENT_COUNT\n            } else {\n                EVENT_COUNT\n            };\n            for (id, event) in events.iter().cycle().enumerate().take(event_count as usize) {\n                if server_stopper.should_stop() {\n                    debug!(\"stopping server early\");\n                    return;\n                }\n                server_behavior\n                    .wait_for_clients((id as Id).wrapping_add(first_event_id))\n                    .await;\n                let _ = server.broadcast(event.clone());\n                server_behavior.sleep_if_required().await;\n            }\n\n            // Keep the server running until told to stop.  Clients connecting from now will only\n            // receive keepalives.\n            debug!(\"server finished sending all events\");\n            server_stopper.wait().await;\n            debug!(\"server stopped\");\n        });\n\n        self.server_join_handle = Some(join_handle);\n\n        server_address\n    }\n\n    /// Stops the currently-running server, if any, panicking if unable to stop the server within\n    /// `MAX_TEST_TIME`.\n    ///\n    /// Must be called and awaited before starting a new server with this particular `TestFixture`.\n    ///\n    /// Should be called in every test where a server has been started, since this will ensure\n    /// failed tests won't hang indefinitely.\n    async fn stop_server(&mut self) {\n        let join_handle = match self.server_join_handle.take() {\n            Some(join_handle) => join_handle,\n            None => return,\n        };\n        self.server_stopper.stop();\n        time::timeout(MAX_TEST_TIME, join_handle)\n            .await\n            .expect(\"stopping server timed out (test hung)\")\n            .expect(\"server task should not error\");\n    }\n\n    /// Returns all the events which would have been received by a client, where the client\n    /// connected just before `from` was emitted from the server.  This includes the initial\n    /// `ApiVersion` event.\n    ///\n    /// Also returns the last event's ID,\n    fn events_filtered_by_id(&self, from: Id) -> (Vec<ReceivedEvent>, Id) {\n        // Convert the IDs to `u128`s to cater for wrapping and add `Id::MAX + 1` to `from` if the\n        // buffer wrapped and `from` represents an event from after the wrap.\n        let threshold = Id::MAX - EVENT_COUNT;\n        let from = if self.first_event_id >= threshold && from < threshold {\n            from as u128 + Id::MAX as u128 + 1\n        } else {\n            from as u128\n        };\n\n        let id_filter = |id: u128, event: &SseData| -> Option<ReceivedEvent> {\n            if id < from {\n                return None;\n            }\n\n            let data = match event {\n                SseData::TransactionAccepted { transaction } => {\n                    serde_json::to_string(&TransactionAccepted {\n                        transaction_accepted: Arc::clone(transaction),\n                    })\n                    .unwrap()\n                }\n                _ => serde_json::to_string(event).unwrap(),\n            };\n\n            Some(ReceivedEvent {\n                id: Some(id as Id),\n                data,\n            })\n        };\n\n        let api_version_event = ReceivedEvent {\n            id: None,\n            data: serde_json::to_string(&SseData::ApiVersion(self.protocol_version)).unwrap(),\n        };\n\n        let events: Vec<_> = iter::once(api_version_event)\n            .chain(self.events.iter().enumerate().filter_map(|(id, event)| {\n                let id = id as u128 + self.first_event_id as u128;\n                id_filter(id, event)\n            }))\n            .collect();\n\n        let final_id = events\n            .last()\n            .expect(\"should have events\")\n            .id\n            .expect(\"should have ID\");\n\n        (events, final_id)\n    }\n\n    /// Returns all the events which would have been received by a client connected from server\n    /// startup, including the initial `ApiVersion` event.\n    ///\n    /// Also returns the last event's ID.\n    fn all_events(&self) -> (Vec<ReceivedEvent>, Id) {\n        self.events_filtered_by_id(self.first_event_id)\n    }\n}\n\n/// Returns the URL for a client to use to connect to the server at the given address.\n///\n/// The URL is `/events` with `?start_from=X` query string appended if\n/// `maybe_start_from` is `Some`.\nfn make_url(server_address: SocketAddr, maybe_start_from: Option<Id>) -> String {\n    format!(\n        \"http://{}/{}/{}\",\n        server_address,\n        ROOT_PATH,\n        match maybe_start_from {\n            Some(start_from) => format!(\"?{}={}\", QUERY_FIELD, start_from),\n            None => String::new(),\n        }\n    )\n}\n\n/// The representation of an SSE event as received by a subscribed client.\n#[derive(Clone, Debug, Eq, PartialEq)]\nstruct ReceivedEvent {\n    id: Option<Id>,\n    data: String,\n}\n\n/// Runs a client, consuming all SSE events until the server has emitted the event with ID\n/// `final_event_id`.\n///\n/// If the client receives a keepalive (i.e. `:`), it panics, as the server has no further events to\n/// emit.\n///\n/// The client waits at the barrier before connecting to the server, and then again immediately\n/// after connecting to ensure the server doesn't start sending events before the client is\n/// connected.\nasync fn subscribe(\n    url: &str,\n    barrier: Arc<Barrier>,\n    final_event_id: Id,\n    client_id: &str,\n) -> Result<Vec<ReceivedEvent>, reqwest::Error> {\n    debug!(\"{} waiting before connecting via {}\", client_id, url);\n    barrier.wait().await;\n    let response = reqwest::get(url).await?;\n    debug!(\"{} waiting after connecting\", client_id);\n    barrier.wait().await;\n    debug!(\"{} finished waiting\", client_id);\n    handle_response(response, final_event_id, client_id).await\n}\n\n/// Runs a client, consuming all SSE events until the server has emitted the event with ID\n/// `final_event_id`.\n///\n/// If the client receives a keepalive (i.e. `:`), it panics, as the server has no further events to\n/// emit.\n///\n/// There is no synchronization between client and server regarding the client joining.  In most\n/// tests such synchronization is required, in which case `subscribe()` should be used.\nasync fn subscribe_no_sync(\n    url: &str,\n    final_event_id: Id,\n    client_id: &str,\n) -> Result<Vec<ReceivedEvent>, reqwest::Error> {\n    debug!(\"{} about to connect via {}\", client_id, url);\n    let response = reqwest::get(url).await?;\n    debug!(\"{} has connected\", client_id);\n    handle_response(response, final_event_id, client_id).await\n}\n\n/// Converts some bytes to a `String`.\n///\n/// If `maybe_previous_bytes` is `Some`, these bytes are prepended to `new_bytes`.  If a string\n/// cannot be constructed from the resulting bytes, the bytes are returned as an `Err`.\nfn bytes_to_string(\n    maybe_previous_bytes: &mut Option<Bytes>,\n    new_bytes: Bytes,\n) -> Result<String, Bytes> {\n    let bytes = if let Some(previous_bytes) = maybe_previous_bytes.take() {\n        Bytes::from_iter(previous_bytes.chain(new_bytes))\n    } else {\n        new_bytes\n    };\n    str::from_utf8(bytes.as_ref())\n        .map(ToString::to_string)\n        .map_err(|_| bytes)\n}\n\n/// Handles a response from the server.\nasync fn handle_response(\n    response: Response,\n    final_event_id: Id,\n    client_id: &str,\n) -> Result<Vec<ReceivedEvent>, reqwest::Error> {\n    if response.status() == StatusCode::SERVICE_UNAVAILABLE {\n        debug!(\"{} rejected by server: too many clients\", client_id);\n        assert_eq!(\n            response.text().await.unwrap(),\n            \"server has reached limit of subscribers\"\n        );\n        return Ok(Vec::new());\n    }\n\n    // The stream from the server is not always chunked into events, so gather the stream into a\n    // single `String` until we receive a keepalive.\n    let mut response_text = String::new();\n    let mut stream = response.bytes_stream();\n    let final_id_line = format!(\"id:{}\", final_event_id);\n    let keepalive = \":\";\n    let mut temp_bytes: Option<Bytes> = None;\n    while let Some(item) = stream.next().await {\n        // If the server crashes or returns an error in the stream, it is caught here as `item`\n        // will be an `Err`.\n        let new_bytes = item?;\n        let chunk = match bytes_to_string(&mut temp_bytes, new_bytes) {\n            Ok(chunk) => chunk,\n            Err(bytes) => {\n                // We got a chunk splitting a unicode scalar value - dump the data to `temp_bytes`\n                // and get the next chunk from the stream.\n                temp_bytes = Some(bytes);\n                continue;\n            }\n        };\n        response_text.push_str(&chunk);\n        if let Some(line) = response_text\n            .lines()\n            .find(|&line| line == final_id_line || line == keepalive)\n        {\n            if line == keepalive {\n                panic!(\"{} received keepalive\", client_id);\n            }\n            debug!(\n                \"{} received final event ID {}: exiting\",\n                client_id, final_event_id\n            );\n            break;\n        }\n    }\n\n    Ok(parse_response(response_text, client_id))\n}\n\n/// Iterate the lines of the response body.  Each line should be one of\n///   * an SSE event: line starts with \"data:\" and the remainder of the line is a JSON object\n///   * an SSE event ID: line starts with \"id:\" and the remainder is a decimal encoded `u32`\n///   * empty\n///   * a keepalive: line contains exactly \":\"\n///\n/// The expected order is:\n///   * data:<JSON-encoded ApiVersion> (note, no ID line follows this first event) then the\n///     following three repeated for as many events as are applicable to that stream:\n///   * data:<JSON-encoded event>\n///   * id:<integer>\n///   * empty line\n///\n/// then finally, repeated keepalive lines until the server is shut down.\nfn parse_response(response_text: String, client_id: &str) -> Vec<ReceivedEvent> {\n    let mut received_events = Vec::new();\n    let mut line_itr = response_text.lines();\n    while let Some(data_line) = line_itr.next() {\n        let data = match data_line.strip_prefix(\"data:\") {\n            Some(data_str) => data_str.to_string(),\n            None => {\n                if data_line.trim().is_empty() || data_line.trim() == \":\" {\n                    continue;\n                } else {\n                    panic!(\n                        \"{}: data line should start with 'data:'\\n{}\",\n                        client_id, data_line\n                    )\n                }\n            }\n        };\n\n        let id_line = match line_itr.next() {\n            Some(line) => line,\n            None => break,\n        };\n\n        let id = match id_line.strip_prefix(\"id:\") {\n            Some(id_str) => Some(id_str.parse().unwrap_or_else(|_| {\n                panic!(\"{}: failed to get ID line from:\\n{}\", client_id, id_line)\n            })),\n            None => {\n                if id_line.trim().is_empty() && received_events.is_empty() {\n                    None\n                } else if id_line.trim() == \":\" {\n                    continue;\n                } else {\n                    panic!(\n                        \"{}: every event must have an ID except the first one\",\n                        client_id\n                    );\n                }\n            }\n        };\n\n        received_events.push(ReceivedEvent { id, data });\n    }\n    received_events\n}\n\n/// Client setup:\n///   * `<IP:port>/events`\n///   * no `?start_from=` query\n///   * connected before first event\n///\n/// Expected to receive all events depending on `filter`.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_serve_events_with_no_query() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let mut server_behavior = ServerBehavior::new();\n    let barrier = server_behavior.add_client_sync_before_event(0);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url = make_url(server_address, None);\n    let (expected_events, final_id) = fixture.all_events();\n    let received_events = subscribe(&url, barrier, final_id, \"client\").await.unwrap();\n    fixture.stop_server().await;\n\n    assert_eq!(received_events, expected_events);\n}\n\n/// Client setup:\n///   * `<IP:port>/events?start_from=25`\n///   * connected just before event ID 50\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_serve_events_with_query() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let connect_at_event_id = BUFFER_LENGTH;\n    let start_from_event_id = BUFFER_LENGTH / 2;\n\n    let mut server_behavior = ServerBehavior::new();\n    let barrier = server_behavior.add_client_sync_before_event(connect_at_event_id);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url = make_url(server_address, Some(start_from_event_id));\n    let (expected_events, final_id) = fixture.events_filtered_by_id(start_from_event_id);\n    let received_events = subscribe(&url, barrier, final_id, \"client\").await.unwrap();\n    fixture.stop_server().await;\n\n    assert_eq!(received_events, expected_events);\n}\n\n/// Client setup:\n///   * `<IP:port>/events?start_from=0`\n///   * connected just before event ID 75\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_serve_remaining_events_with_query() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let connect_at_event_id = BUFFER_LENGTH * 3 / 2;\n    let start_from_event_id = 0;\n\n    let mut server_behavior = ServerBehavior::new();\n    let barrier = server_behavior.add_client_sync_before_event(connect_at_event_id);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url = make_url(server_address, Some(start_from_event_id));\n    let expected_first_event = connect_at_event_id - BUFFER_LENGTH;\n    let (expected_events, final_id) = fixture.events_filtered_by_id(expected_first_event);\n    let received_events = subscribe(&url, barrier, final_id, \"client\").await.unwrap();\n    fixture.stop_server().await;\n\n    assert_eq!(received_events, expected_events);\n}\n\n/// Client setup:\n///   * `<IP:port>/events?start_from=25`\n///   * connected before first event\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_serve_events_with_query_for_future_event() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let mut server_behavior = ServerBehavior::new();\n    let barrier = server_behavior.add_client_sync_before_event(0);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url = make_url(server_address, Some(25));\n    let (expected_events, final_id) = fixture.all_events();\n    let received_events = subscribe(&url, barrier, final_id, \"client\").await.unwrap();\n    fixture.stop_server().await;\n\n    assert_eq!(received_events, expected_events);\n}\n\n/// Checks that when a server is shut down (e.g. for a node upgrade), connected clients don't have\n/// an error while handling the HTTP response.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn server_exit_should_gracefully_shut_down_stream() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    // Start the server, waiting for three clients to connect.\n    let mut server_behavior = ServerBehavior::new();\n    let barrier1 = server_behavior.add_client_sync_before_event(0);\n    let barrier2 = server_behavior.add_client_sync_before_event(0);\n    let barrier3 = server_behavior.add_client_sync_before_event(0);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url1 = make_url(server_address, None);\n\n    // Run the three clients, and stop the server after a short delay.\n    let (received_events1, received_events2, received_events3, _) = join!(\n        subscribe(&url1, barrier1, EVENT_COUNT, \"client 1\"),\n        subscribe(&url1, barrier2, EVENT_COUNT, \"client 2\"),\n        subscribe(&url1, barrier3, EVENT_COUNT, \"client 3\"),\n        async {\n            time::sleep(DELAY_BETWEEN_EVENTS * EVENT_COUNT / 2).await;\n            fixture.stop_server().await\n        }\n    );\n\n    // Ensure all clients' streams terminated without error.\n    let received_events1 = received_events1.unwrap();\n    let received_events2 = received_events2.unwrap();\n    let received_events3 = received_events3.unwrap();\n\n    // Ensure all clients received some events...\n    assert!(!received_events1.is_empty());\n    assert!(!received_events2.is_empty());\n    assert!(!received_events3.is_empty());\n\n    // ...but not the full set they would have if the server hadn't stopped early.\n    assert!(received_events1.len() < fixture.all_events().0.len());\n    assert!(received_events2.len() < fixture.all_events().0.len());\n    assert!(received_events3.len() < fixture.all_events().0.len());\n\n    // Ensure all clients received a `Shutdown` event as the final one.\n    assert_eq!(\n        received_events1.last().unwrap().data,\n        serde_json::to_string(&SseData::Shutdown).unwrap()\n    );\n    assert_eq!(\n        received_events2.last().unwrap().data,\n        serde_json::to_string(&SseData::Shutdown).unwrap()\n    );\n    assert_eq!(\n        received_events3.last().unwrap().data,\n        serde_json::to_string(&SseData::Shutdown).unwrap()\n    );\n}\n\n/// Checks that clients which don't consume the events in a timely manner are forcibly disconnected\n/// by the server.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn lagging_clients_should_be_disconnected() {\n    // Similar to the `subscribe()` function, except this has a long pause at the start and short\n    // pauses after each read.\n    //\n    // The objective is to create backpressure by filling the client's receive buffer, then filling\n    // the server's send buffer, which in turn causes the server's internal broadcast channel to\n    // deem that client as lagging.\n    async fn subscribe_slow(\n        url: &str,\n        barrier: Arc<Barrier>,\n        client_id: &str,\n    ) -> Result<(), reqwest::Error> {\n        barrier.wait().await;\n        let response = reqwest::get(url).await.unwrap();\n        barrier.wait().await;\n\n        time::sleep(Duration::from_secs(5)).await;\n\n        let mut stream = response.bytes_stream();\n        let pause_between_events = Duration::from_secs(100) / MAX_EVENT_COUNT;\n        let mut temp_bytes: Option<Bytes> = None;\n        while let Some(item) = stream.next().await {\n            // The function is expected to exit here with an `UnexpectedEof` error.\n            let new_bytes = item?;\n            let chunk = match bytes_to_string(&mut temp_bytes, new_bytes) {\n                Ok(chunk) => chunk,\n                Err(bytes) => {\n                    temp_bytes = Some(bytes);\n                    continue;\n                }\n            };\n            if chunk.lines().any(|line| line == \":\") {\n                debug!(\"{} received keepalive: exiting\", client_id);\n                break;\n            }\n            time::sleep(pause_between_events).await;\n        }\n\n        Ok(())\n    }\n\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    // Start the server, setting it to run with no delay between sending each event.  It will send\n    // at most `MAX_EVENT_COUNT` events, but the clients' futures should return before that, having\n    // been disconnected for lagging.\n    let mut server_behavior = ServerBehavior::new_for_lagging_test();\n    let barrier = server_behavior.add_client_sync_before_event(0);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url = make_url(server_address, None);\n\n    // Run the slow clients, then stop the server.\n    let result_slow = subscribe_slow(&url, barrier, \"client 1\").await;\n    fixture.stop_server().await;\n\n    // Ensure both slow clients' streams terminated with an `UnexpectedEof` error.\n    let check_error = |result: Result<(), reqwest::Error>| {\n        let kind = result\n            .unwrap_err()\n            .source()\n            .expect(\"reqwest::Error should have source\")\n            .downcast_ref::<hyper::Error>()\n            .expect(\"reqwest::Error's source should be a hyper::Error\")\n            .source()\n            .expect(\"hyper::Error should have source\")\n            .downcast_ref::<io::Error>()\n            .expect(\"hyper::Error's source should be a std::io::Error\")\n            .kind();\n        assert!(matches!(kind, io::ErrorKind::UnexpectedEof));\n    };\n    check_error(result_slow);\n}\n\n/// Checks that clients using the correct <IP:Port> but wrong path get a helpful error response.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_handle_bad_url_path() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let server_address = fixture.run_server(ServerBehavior::new()).await;\n\n    #[rustfmt::skip]\n        let urls = [\n        format!(\"http://{}\", server_address),\n        format!(\"http://{}?{}=0\", server_address, QUERY_FIELD),\n        format!(\"http://{}/bad\", server_address),\n        format!(\"http://{}/bad?{}=0\", server_address, QUERY_FIELD),\n        format!(\"http://{}/{}?{}=0\", server_address, QUERY_FIELD, ROOT_PATH),\n        format!(\"http://{}/{}/bad\", server_address, ROOT_PATH),\n        format!(\"http://{}/{}/bad?{}=0\", server_address, QUERY_FIELD, ROOT_PATH),\n    ];\n\n    let expected_body = format!(\"invalid path: expected '/{0}'\", ROOT_PATH);\n    for url in &urls {\n        let response = reqwest::get(url).await.unwrap();\n        assert_eq!(response.status(), StatusCode::NOT_FOUND, \"URL: {}\", url);\n        assert_eq!(\n            response.text().await.unwrap().trim(),\n            &expected_body,\n            \"URL: {}\",\n            url\n        );\n    }\n\n    fixture.stop_server().await;\n}\n\n/// Checks that clients using the correct <IP:Port/path> but wrong query get a helpful error\n/// response.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_handle_bad_url_query() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let server_address = fixture.run_server(ServerBehavior::new()).await;\n\n    let url = format!(\"http://{}/{}\", server_address, ROOT_PATH);\n    let urls = [\n        format!(\"{}?not-a-kv-pair\", url),\n        format!(\"{}?start_fro=0\", url),\n        format!(\"{}?{}=not-integer\", url, QUERY_FIELD),\n        format!(\"{}?{}='0'\", url, QUERY_FIELD),\n        format!(\"{}?{}=0&extra=1\", url, QUERY_FIELD),\n    ];\n\n    let expected_body = format!(\n        \"invalid query: expected single field '{}=<EVENT ID>'\",\n        QUERY_FIELD\n    );\n    for url in &urls {\n        let response = reqwest::get(url).await.unwrap();\n        assert_eq!(\n            response.status(),\n            StatusCode::UNPROCESSABLE_ENTITY,\n            \"URL: {}\",\n            url\n        );\n        assert_eq!(\n            response.text().await.unwrap().trim(),\n            &expected_body,\n            \"URL: {}\",\n            url\n        );\n    }\n\n    fixture.stop_server().await;\n}\n\n/// Check that a server which restarts continues from the previous numbering of event IDs.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_persist_event_ids() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    let first_run_final_id = {\n        // Run the first server to emit the 100 events.\n        let mut server_behavior = ServerBehavior::new();\n        let barrier = server_behavior.add_client_sync_before_event(0);\n        let server_address = fixture.run_server(server_behavior).await;\n\n        // Consume these and stop the server.\n        let url = make_url(server_address, None);\n        let (_expected_events, final_id) = fixture.all_events();\n        let _ = subscribe(&url, barrier, final_id, \"client 1\")\n            .await\n            .unwrap();\n        fixture.stop_server().await;\n        final_id\n    };\n\n    assert!(first_run_final_id > 0);\n\n    {\n        // Start a new server with a client barrier set for just before event ID 100 + 1 (the extra\n        // event being the `Shutdown`).\n        let mut server_behavior = ServerBehavior::new();\n        let barrier = server_behavior.add_client_sync_before_event(EVENT_COUNT + 1);\n        let server_address = fixture.run_server(server_behavior).await;\n\n        // Check the test fixture has set the server's first event ID to at least\n        // `first_run_final_id`.\n        assert!(fixture.first_event_id >= first_run_final_id);\n\n        // Consume the events and assert their IDs are all >= `first_run_final_id`.\n        let url = make_url(server_address, None);\n        let (expected_events, final_id) = fixture.events_filtered_by_id(EVENT_COUNT + 1);\n        let received_events = subscribe(&url, barrier, final_id, \"client 2\")\n            .await\n            .unwrap();\n        fixture.stop_server().await;\n\n        assert_eq!(received_events, expected_events);\n        assert!(received_events\n            .iter()\n            .skip(1)\n            .all(|event| event.id.unwrap() >= first_run_final_id));\n    }\n}\n\n/// Check that a server handles wrapping round past the maximum value for event IDs.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_handle_wrapping_past_max_event_id() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    // Set up an `EventIndexer` cache file as if the server previously stopped at an event with ID\n    // just less than the maximum.\n    let start_index = Id::MAX - (BUFFER_LENGTH / 2);\n    fs::write(\n        fixture.storage_dir.path().join(\"sse_index\"),\n        start_index.to_le_bytes(),\n    )\n    .unwrap();\n\n    // Set up a client which will connect at the start of the stream, and another two for once the\n    // IDs have wrapped past the maximum value.\n    let mut server_behavior = ServerBehavior::new();\n    let barrier1 = server_behavior.add_client_sync_before_event(start_index);\n    let barrier2 = server_behavior.add_client_sync_before_event(BUFFER_LENGTH / 2);\n    let barrier3 = server_behavior.add_client_sync_before_event(BUFFER_LENGTH / 2);\n    let server_address = fixture.run_server(server_behavior).await;\n    assert_eq!(fixture.first_event_id, start_index);\n\n    // The first client doesn't need a query string, but the second will request to start from an ID\n    // from before they wrapped past the maximum value, and the third from event 0.\n    let url1 = make_url(server_address, None);\n    let url2 = make_url(server_address, Some(start_index + 1));\n    let url3 = make_url(server_address, Some(0));\n    let (expected_events1, final_id1) = fixture.all_events();\n    let (expected_events2, final_id2) = fixture.events_filtered_by_id(start_index + 1);\n    let (expected_events3, final_id3) = fixture.events_filtered_by_id(0);\n    let (received_events1, received_events2, received_events3) = join!(\n        subscribe(&url1, barrier1, final_id1, \"client 1\"),\n        subscribe(&url2, barrier2, final_id2, \"client 2\"),\n        subscribe(&url3, barrier3, final_id3, \"client 3\"),\n    );\n    fixture.stop_server().await;\n\n    assert_eq!(received_events1.unwrap(), expected_events1);\n    assert_eq!(received_events2.unwrap(), expected_events2);\n    assert_eq!(received_events3.unwrap(), expected_events3);\n}\n\n/// Checks that a server rejects new clients with an HTTP 503 when it already has the specified\n/// limit of connected clients.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn should_limit_concurrent_subscribers() {\n    let mut rng = crate::new_rng();\n    let mut fixture = TestFixture::new(&mut rng);\n\n    // Start the server with `max_concurrent_subscribers == 3`, and set to wait for three clients to\n    // connect at event 0 and another three at event 1.\n    let mut server_behavior = ServerBehavior::new();\n    server_behavior.set_max_concurrent_subscribers(3);\n    let barrier1 = server_behavior.add_client_sync_before_event(0);\n    let barrier2 = server_behavior.add_client_sync_before_event(0);\n    let barrier3 = server_behavior.add_client_sync_before_event(0);\n    let barrier4 = server_behavior.add_client_sync_before_event(1);\n    let barrier5 = server_behavior.add_client_sync_before_event(1);\n    let barrier6 = server_behavior.add_client_sync_before_event(1);\n    let server_address = fixture.run_server(server_behavior).await;\n\n    let url = make_url(server_address, None);\n\n    let (expected_events, final_id) = fixture.all_events();\n\n    // Run the six clients.\n    let (\n        received_events_1,\n        received_events_2,\n        received_events_3,\n        empty_events_1,\n        empty_events_2,\n        empty_events_3,\n    ) = join!(\n        subscribe(&url, barrier1, final_id, \"client 1\"),\n        subscribe(&url, barrier2, final_id, \"client 2\"),\n        subscribe(&url, barrier3, final_id, \"client 3\"),\n        subscribe(&url, barrier4, final_id, \"client 4\"),\n        subscribe(&url, barrier5, final_id, \"client 5\"),\n        subscribe(&url, barrier6, final_id, \"client 6\"),\n    );\n\n    // Check the first three received all expected events.\n    assert_eq!(received_events_1.unwrap(), expected_events);\n    assert_eq!(received_events_2.unwrap(), expected_events);\n    assert_eq!(received_events_3.unwrap(), expected_events);\n\n    // Check the second three received no events.\n    assert!(empty_events_1.unwrap().is_empty());\n    assert!(empty_events_2.unwrap().is_empty());\n    assert!(empty_events_3.unwrap().is_empty());\n\n    // Check that now the first clients have all disconnected, three new clients can connect.  Have\n    // them start from event 80 to allow them to actually pull some events off the stream (as the\n    // server has by now stopped creating any new events).\n    let start_id = EVENT_COUNT - 20;\n\n    let url = make_url(server_address, Some(start_id));\n\n    let (expected_events, final_id) = fixture.events_filtered_by_id(start_id);\n\n    let received_events = subscribe_no_sync(&url, final_id, \"client 7\").await;\n\n    // Check the last three clients' received events are as expected.\n    assert_eq!(received_events.unwrap(), expected_events);\n\n    fixture.stop_server().await;\n}\n\n/// Rather than being a test proper, this is more a means to easily determine differences between\n/// versions of the events emitted by the SSE server by comparing the contents of\n/// `resources/test/sse_data_schema.json` across different versions of the codebase.\n#[test]\nfn json_schema_check() {\n    let schema_path = format!(\n        \"{}/../resources/test/sse_data_schema.json\",\n        env!(\"CARGO_MANIFEST_DIR\")\n    );\n    let pretty = serde_json::to_string_pretty(&schema_for!(SseData)).unwrap();\n    assert_schema(schema_path, pretty);\n}\n"
  },
  {
    "path": "node/src/components/event_stream_server.rs",
    "content": "//! Event stream server\n//!\n//! The event stream server provides clients with an event-stream returning Server-Sent Events\n//! (SSEs) holding JSON-encoded data.\n//!\n//! The actual server is run in backgrounded tasks.\n//!\n//! This module currently provides both halves of what is required for an API server:\n//! a component implementation that interfaces with other components via being plugged into a\n//! reactor, and an external facing http server that manages SSE subscriptions on a single endpoint.\n//!\n//! This component is passive and receives announcements made by other components while never making\n//! a request of other components itself. The handled announcements are serialized to JSON and\n//! pushed to subscribers.\n//!\n//! This component uses a ring buffer for outbound events providing some robustness against\n//! unintended subscriber disconnects, if a disconnected subscriber re-subscribes before the buffer\n//! has advanced past their last received event.\n\nmod config;\nmod event;\nmod event_indexer;\nmod http_server;\nmod sse_server;\n#[cfg(test)]\nmod tests;\n\nuse std::{fmt::Debug, net::SocketAddr, path::PathBuf};\n\nuse datasize::DataSize;\nuse tokio::sync::{\n    mpsc::{self, UnboundedSender},\n    oneshot,\n};\nuse tracing::{error, info, warn};\nuse warp::Filter;\n\nuse casper_types::{InitiatorAddr, ProtocolVersion};\n\nuse super::Component;\nuse crate::{\n    components::{ComponentState, InitializedComponent, PortBoundComponent},\n    effect::{EffectBuilder, Effects},\n    reactor::main_reactor::MainEvent,\n    types::TransactionHeader,\n    utils::{self, ListeningError},\n    NodeRng,\n};\npub use config::Config;\npub(crate) use event::Event;\nuse event_indexer::{EventIndex, EventIndexer};\nuse sse_server::ChannelsAndFilter;\npub(crate) use sse_server::SseData;\n\nconst COMPONENT_NAME: &str = \"event_stream_server\";\n\n/// This is used to define the number of events to buffer in the tokio broadcast channel to help\n/// slower clients to try to avoid missing events (See\n/// <https://docs.rs/tokio/1.4.0/tokio/sync/broadcast/index.html#lagging> for further details).  The\n/// resulting broadcast channel size is `ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE` percent\n/// greater than `config.event_stream_buffer_length`.\n///\n/// We always want the broadcast channel size to be greater than the event stream buffer length so\n/// that a new client can retrieve the entire set of buffered events if desired.\nconst ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE: u32 = 20;\n\n/// A helper trait whose bounds represent the requirements for a reactor event that `run_server` can\n/// work with.\npub trait ReactorEventT: From<Event> + Send {}\n\nimpl<REv> ReactorEventT for REv where REv: From<Event> + Send + 'static {}\n\n#[derive(DataSize, Debug)]\nstruct InnerServer {\n    /// Channel sender to pass event-stream data to the event-stream server.\n    // TODO - this should not be skipped.  Awaiting support for `UnboundedSender` in datasize crate.\n    #[data_size(skip)]\n    sse_data_sender: UnboundedSender<(EventIndex, SseData)>,\n    event_indexer: EventIndexer,\n    listening_address: SocketAddr,\n}\n\n#[derive(DataSize, Debug)]\npub(crate) struct EventStreamServer {\n    state: ComponentState,\n    config: Config,\n    storage_path: PathBuf,\n    api_version: ProtocolVersion,\n    sse_server: Option<InnerServer>,\n}\n\nimpl EventStreamServer {\n    pub(crate) fn new(config: Config, storage_path: PathBuf, api_version: ProtocolVersion) -> Self {\n        EventStreamServer {\n            state: ComponentState::Uninitialized,\n            config,\n            storage_path,\n            api_version,\n            sse_server: None,\n        }\n    }\n\n    fn listen(&mut self) -> Result<(), ListeningError> {\n        let required_address = utils::resolve_address(&self.config.address).map_err(|error| {\n            warn!(\n                %error,\n                address=%self.config.address,\n                \"failed to start event stream server, cannot parse address\"\n            );\n            ListeningError::ResolveAddress(error)\n        })?;\n\n        // Event stream channels and filter.\n        let broadcast_channel_size = self.config.event_stream_buffer_length\n            * (100 + ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE)\n            / 100;\n\n        let ChannelsAndFilter {\n            event_broadcaster,\n            new_subscriber_info_receiver,\n            sse_filter,\n        } = ChannelsAndFilter::new(\n            broadcast_channel_size as usize,\n            self.config.max_concurrent_subscribers,\n        );\n\n        let (server_shutdown_sender, shutdown_receiver) = oneshot::channel::<()>();\n\n        let (sse_data_sender, sse_data_receiver) = mpsc::unbounded_channel();\n\n        let listening_address = match self.config.cors_origin.as_str() {\n            \"\" => {\n                let (listening_address, server_with_shutdown) = warp::serve(sse_filter)\n                    .try_bind_with_graceful_shutdown(required_address, async {\n                        shutdown_receiver.await.ok();\n                    })\n                    .map_err(|error| ListeningError::Listen {\n                        address: required_address,\n                        error: Box::new(error),\n                    })?;\n\n                tokio::spawn(http_server::run(\n                    self.config.clone(),\n                    self.api_version,\n                    server_with_shutdown,\n                    server_shutdown_sender,\n                    sse_data_receiver,\n                    event_broadcaster,\n                    new_subscriber_info_receiver,\n                ));\n                listening_address\n            }\n            \"*\" => {\n                let (listening_address, server_with_shutdown) =\n                    warp::serve(sse_filter.with(warp::cors().allow_any_origin()))\n                        .try_bind_with_graceful_shutdown(required_address, async {\n                            shutdown_receiver.await.ok();\n                        })\n                        .map_err(|error| ListeningError::Listen {\n                            address: required_address,\n                            error: Box::new(error),\n                        })?;\n\n                tokio::spawn(http_server::run(\n                    self.config.clone(),\n                    self.api_version,\n                    server_with_shutdown,\n                    server_shutdown_sender,\n                    sse_data_receiver,\n                    event_broadcaster,\n                    new_subscriber_info_receiver,\n                ));\n                listening_address\n            }\n            _ => {\n                let (listening_address, server_with_shutdown) = warp::serve(\n                    sse_filter.with(warp::cors().allow_origin(self.config.cors_origin.as_str())),\n                )\n                .try_bind_with_graceful_shutdown(required_address, async {\n                    shutdown_receiver.await.ok();\n                })\n                .map_err(|error| ListeningError::Listen {\n                    address: required_address,\n                    error: Box::new(error),\n                })?;\n\n                tokio::spawn(http_server::run(\n                    self.config.clone(),\n                    self.api_version,\n                    server_with_shutdown,\n                    server_shutdown_sender,\n                    sse_data_receiver,\n                    event_broadcaster,\n                    new_subscriber_info_receiver,\n                ));\n                listening_address\n            }\n        };\n\n        info!(address=%listening_address, \"started event stream server\");\n\n        let event_indexer = EventIndexer::new(self.storage_path.clone());\n\n        self.sse_server = Some(InnerServer {\n            sse_data_sender,\n            event_indexer,\n            listening_address,\n        });\n        Ok(())\n    }\n\n    /// Broadcasts the SSE data to all clients connected to the event stream.\n    fn broadcast(&mut self, sse_data: SseData) -> Effects<Event> {\n        if let Some(server) = self.sse_server.as_mut() {\n            let event_index = server.event_indexer.next_index();\n            let _ = server.sse_data_sender.send((event_index, sse_data));\n        }\n        Effects::new()\n    }\n}\n\nimpl Drop for EventStreamServer {\n    fn drop(&mut self) {\n        let _ = self.broadcast(SseData::Shutdown);\n    }\n}\n\nimpl<REv> Component<REv> for EventStreamServer\nwhere\n    REv: ReactorEventT,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => match event {\n                Event::Initialize => {\n                    let (effects, state) = self.bind(self.config.enable_server, effect_builder);\n                    <Self as InitializedComponent<MainEvent>>::set_state(self, state);\n                    effects\n                }\n                Event::BlockAdded(_)\n                | Event::TransactionAccepted(_)\n                | Event::TransactionProcessed { .. }\n                | Event::TransactionsExpired(_)\n                | Event::Fault { .. }\n                | Event::FinalitySignature(_)\n                | Event::Step { .. } => {\n                    warn!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"should not handle this event when component is pending initialization\"\n                    );\n                    Effects::new()\n                }\n            },\n            ComponentState::Initialized => match event {\n                Event::Initialize => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::BlockAdded(block) => self.broadcast(SseData::BlockAdded {\n                    block_hash: *block.hash(),\n                    block: Box::new((*block).clone()),\n                }),\n                Event::TransactionAccepted(transaction) => {\n                    self.broadcast(SseData::TransactionAccepted { transaction })\n                }\n                Event::TransactionProcessed {\n                    transaction_hash,\n                    transaction_header,\n                    block_hash,\n                    execution_result,\n                    messages,\n                } => {\n                    let (initiator_addr, timestamp, ttl) = match *transaction_header {\n                        TransactionHeader::Deploy(deploy_header) => (\n                            InitiatorAddr::PublicKey(deploy_header.account().clone()),\n                            deploy_header.timestamp(),\n                            deploy_header.ttl(),\n                        ),\n                        TransactionHeader::V1(metadata) => (\n                            metadata.initiator_addr().clone(),\n                            metadata.timestamp(),\n                            metadata.ttl(),\n                        ),\n                    };\n                    self.broadcast(SseData::TransactionProcessed {\n                        transaction_hash: Box::new(transaction_hash),\n                        initiator_addr: Box::new(initiator_addr),\n                        timestamp,\n                        ttl,\n                        block_hash: Box::new(block_hash),\n                        execution_result,\n                        messages,\n                    })\n                }\n                Event::TransactionsExpired(transaction_hashes) => transaction_hashes\n                    .into_iter()\n                    .flat_map(|transaction_hash| {\n                        self.broadcast(SseData::TransactionExpired { transaction_hash })\n                    })\n                    .collect(),\n                Event::Fault {\n                    era_id,\n                    public_key,\n                    timestamp,\n                } => self.broadcast(SseData::Fault {\n                    era_id,\n                    public_key,\n                    timestamp,\n                }),\n                Event::FinalitySignature(fs) => self.broadcast(SseData::FinalitySignature(fs)),\n                Event::Step {\n                    era_id,\n                    execution_effects,\n                } => self.broadcast(SseData::Step {\n                    era_id,\n                    execution_effects,\n                }),\n            },\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for EventStreamServer\nwhere\n    REv: ReactorEventT,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\nimpl<REv> PortBoundComponent<REv> for EventStreamServer\nwhere\n    REv: ReactorEventT,\n{\n    type Error = ListeningError;\n    type ComponentEvent = Event;\n\n    fn listen(\n        &mut self,\n        _effect_builder: EffectBuilder<REv>,\n    ) -> Result<Effects<Self::ComponentEvent>, Self::Error> {\n        self.listen()?;\n        Ok(Effects::new())\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/config.rs",
    "content": "use std::str::FromStr;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::TimeDiff;\n\nconst DEFAULT_GET_FROM_PEER_TIMEOUT: &str = \"3sec\";\n\n/// Configuration options for fetching.\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\npub struct Config {\n    get_from_peer_timeout: TimeDiff,\n}\n\nimpl Config {\n    /// Returns `get_from_peer` timeout.\n    pub fn get_from_peer_timeout(&self) -> TimeDiff {\n        self.get_from_peer_timeout\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            get_from_peer_timeout: TimeDiff::from_str(DEFAULT_GET_FROM_PEER_TIMEOUT).unwrap(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/error.rs",
    "content": "use datasize::DataSize;\nuse serde::Serialize;\nuse thiserror::Error;\nuse tracing::error;\n\nuse crate::{components::fetcher::FetchItem, types::NodeId};\n\n#[derive(Clone, Debug, Error, PartialEq, Eq, Serialize)]\npub(crate) enum Error<T: FetchItem> {\n    #[error(\"item with id {id:?} absent on peer {peer:?}\")]\n    Absent { id: Box<T::Id>, peer: NodeId },\n\n    #[error(\"peer {peer:?} rejected fetch request for item with id {id:?}\")]\n    Rejected { id: Box<T::Id>, peer: NodeId },\n\n    #[error(\"timed out getting item with id {id:?} from peer {peer:?}\")]\n    TimedOut { id: Box<T::Id>, peer: NodeId },\n\n    #[error(\"could not construct get request for item with id {id:?} for peer {peer:?}\")]\n    CouldNotConstructGetRequest { id: Box<T::Id>, peer: NodeId },\n\n    #[error(\n        \"ongoing fetch for {id} from {peer} has different validation metadata ({current:?}) to \\\n        that given in new fetch attempt ({new:?})\"\n    )]\n    ValidationMetadataMismatch {\n        id: Box<T::Id>,\n        peer: NodeId,\n        current: Box<T::ValidationMetadata>,\n        new: Box<T::ValidationMetadata>,\n    },\n}\n\nimpl<T: FetchItem> Error<T> {\n    pub(crate) fn is_peer_fault(&self) -> bool {\n        match self {\n            // The peer claimed to have the item, so it should not be absent.\n            Error::Absent { .. } | Error::Rejected { .. } | Error::TimedOut { .. } => true,\n            Error::CouldNotConstructGetRequest { .. }\n            | Error::ValidationMetadataMismatch { .. } => false,\n        }\n    }\n\n    pub(crate) fn id(&self) -> &T::Id {\n        match self {\n            Error::Absent { id, .. } => id,\n            Error::Rejected { id, .. } => id,\n            Error::TimedOut { id, .. } => id,\n            Error::CouldNotConstructGetRequest { id, .. } => id,\n            Error::ValidationMetadataMismatch { id, .. } => id,\n        }\n    }\n\n    pub(crate) fn peer(&self) -> &NodeId {\n        match self {\n            Error::Absent { peer, .. }\n            | Error::Rejected { peer, .. }\n            | Error::TimedOut { peer, .. }\n            | Error::CouldNotConstructGetRequest { peer, .. }\n            | Error::ValidationMetadataMismatch { peer, .. } => peer,\n        }\n    }\n}\n\nimpl<T: FetchItem> DataSize for Error<T>\nwhere\n    T::Id: DataSize,\n{\n    const IS_DYNAMIC: bool = <T::Id as DataSize>::IS_DYNAMIC;\n\n    const STATIC_HEAP_SIZE: usize = <T::Id as DataSize>::STATIC_HEAP_SIZE;\n\n    fn estimate_heap_size(&self) -> usize {\n        match self {\n            Error::Absent { id, .. }\n            | Error::Rejected { id, .. }\n            | Error::TimedOut { id, .. }\n            | Error::CouldNotConstructGetRequest { id, .. } => id.estimate_heap_size(),\n            Error::ValidationMetadataMismatch {\n                id, current, new, ..\n            } => id.estimate_heap_size() + current.estimate_heap_size() + new.estimate_heap_size(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/event.rs",
    "content": "use std::fmt::{self, Debug, Display, Formatter};\n\nuse serde::Serialize;\nuse tracing::error;\n\nuse casper_types::Transaction;\n\nuse super::{FetchItem, FetchResponder, FetchResponse};\nuse crate::{\n    effect::{announcements::TransactionAcceptorAnnouncement, requests::FetcherRequest},\n    types::NodeId,\n    utils::Source,\n};\n\n/// `Fetcher` events.\n#[derive(Debug, Serialize)]\npub(crate) enum Event<T: FetchItem> {\n    /// The initiating event to fetch an item by its id.\n    Fetch(FetcherRequest<T>),\n    /// The result of the `Fetcher` getting a item from the storage component.  If the\n    /// result is `None`, the item should be requested from the peer.\n    GetLocallyResult {\n        id: T::Id,\n        peer: NodeId,\n        validation_metadata: Box<T::ValidationMetadata>,\n        maybe_item: Option<Box<T>>,\n        responder: FetchResponder<T>,\n    },\n    /// An announcement from a different component that we have accepted and stored the given item.\n    GotRemotely { item: Box<T>, source: Source },\n    /// The result of putting the item to storage.\n    PutToStorage { item: Box<T>, peer: NodeId },\n    /// A different component rejected an item.\n    GotInvalidRemotely { id: T::Id, source: Source },\n    /// An item was not available on the remote peer.\n    AbsentRemotely { id: T::Id, peer: NodeId },\n    /// An item was available on the remote peer, but it chose to not provide it.\n    RejectedRemotely { id: T::Id, peer: NodeId },\n    /// The timeout has elapsed and we should clean up state.\n    TimeoutPeer { id: T::Id, peer: NodeId },\n}\n\nimpl<T: FetchItem> Event<T> {\n    pub(crate) fn from_get_response_serialized_item(\n        peer: NodeId,\n        serialized_item: &[u8],\n    ) -> Option<Self> {\n        match bincode::deserialize::<FetchResponse<T, T::Id>>(serialized_item) {\n            Ok(FetchResponse::Fetched(item)) => Some(Event::GotRemotely {\n                item: Box::new(item),\n                source: Source::Peer(peer),\n            }),\n            Ok(FetchResponse::NotFound(id)) => Some(Event::AbsentRemotely { id, peer }),\n            Ok(FetchResponse::NotProvided(id)) => Some(Event::RejectedRemotely { id, peer }),\n            Err(error) => {\n                error!(\"failed to decode {:?} from {}: {:?}\", T::TAG, peer, error);\n                None\n            }\n        }\n    }\n}\n\nimpl<T: FetchItem> From<FetcherRequest<T>> for Event<T> {\n    fn from(fetcher_request: FetcherRequest<T>) -> Self {\n        Event::Fetch(fetcher_request)\n    }\n}\n\n// A transaction fetcher knows how to update its state if transactions are coming in via the\n// transaction acceptor.\nimpl From<TransactionAcceptorAnnouncement> for Event<Transaction> {\n    fn from(announcement: TransactionAcceptorAnnouncement) -> Self {\n        match announcement {\n            TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                transaction,\n                source,\n            } => Event::GotRemotely {\n                item: Box::new((*transaction).clone()),\n                source,\n            },\n            TransactionAcceptorAnnouncement::InvalidTransaction {\n                transaction,\n                source,\n            } => Event::GotInvalidRemotely {\n                id: transaction.fetch_id(),\n                source,\n            },\n        }\n    }\n}\n\nimpl<T: FetchItem> Display for Event<T> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Fetch(FetcherRequest { id, .. }) => {\n                write!(formatter, \"request to fetch item at hash {}\", id)\n            }\n            Event::GetLocallyResult { id, maybe_item, .. } => {\n                if maybe_item.is_some() {\n                    write!(formatter, \"got {} from storage\", id)\n                } else {\n                    write!(formatter, \"failed to fetch {} from storage\", id)\n                }\n            }\n            Event::GotRemotely { item, source } => {\n                write!(formatter, \"got {} from {}\", item.fetch_id(), source)\n            }\n            Event::GotInvalidRemotely { id, source } => {\n                write!(formatter, \"invalid item {} from {}\", id, source)\n            }\n            Event::TimeoutPeer { id, peer } => write!(\n                formatter,\n                \"check get from peer timeout for {} with {}\",\n                id, peer\n            ),\n            Event::AbsentRemotely { id, peer } => {\n                write!(formatter, \"item {} was not available on {}\", id, peer)\n            }\n            Event::RejectedRemotely { id, peer } => {\n                write!(\n                    formatter,\n                    \"request to fetch item {} was rejected by {}\",\n                    id, peer\n                )\n            }\n            Event::PutToStorage { item, .. } => {\n                write!(formatter, \"item {} was put to storage\", item.fetch_id())\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetch_item.rs",
    "content": "use std::{\n    error::Error as StdError,\n    fmt::{self, Debug, Display, Formatter},\n    hash::Hash,\n};\n\nuse datasize::DataSize;\nuse serde::{de::DeserializeOwned, Serialize};\n\nuse super::Tag;\n\n#[derive(Clone, Copy, Eq, PartialEq, Serialize, Debug, DataSize)]\npub(crate) struct EmptyValidationMetadata;\n\nimpl Display for EmptyValidationMetadata {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> {\n        write!(formatter, \"no validation metadata\")\n    }\n}\n\n/// A trait which allows an implementing type to be used by a fetcher component.\npub(crate) trait FetchItem:\n    Clone + Serialize + DeserializeOwned + Send + Sync + Debug + Display + Eq\n{\n    /// The type of ID of the item.\n    type Id: Clone + Eq + Hash + Serialize + DeserializeOwned + Send + Sync + Debug + Display;\n    /// The error type returned when validating to get the ID of the item.\n    type ValidationError: StdError + Debug + Display;\n    /// The type of the metadata provided when validating the item.\n    type ValidationMetadata: Eq + Clone + Serialize + Debug + DataSize + Send;\n\n    /// The tag representing the type of the item.\n    const TAG: Tag;\n\n    /// The ID of the specific item.\n    fn fetch_id(&self) -> Self::Id;\n\n    /// Checks validity of the item, and returns an error if invalid.\n    fn validate(&self, metadata: &Self::ValidationMetadata) -> Result<(), Self::ValidationError>;\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetch_response.rs",
    "content": "use serde::{Deserialize, Serialize};\n\n/// Message to be returned by a peer. Indicates if the item could be fetched or not.\n#[derive(Debug, Serialize, Deserialize, strum::EnumDiscriminants)]\n#[strum_discriminants(derive(strum::EnumIter))]\npub enum FetchResponse<T, Id> {\n    /// The requested item.\n    Fetched(T),\n    /// The sender does not have the requested item available.\n    NotFound(Id),\n    /// The sender chose to not provide the requested item.\n    NotProvided(Id),\n}\n\nimpl<T, Id> FetchResponse<T, Id> {\n    /// Constructs a fetched or not found from an option and an id.\n    pub(crate) fn from_opt(id: Id, item: Option<T>) -> Self {\n        match item {\n            Some(item) => FetchResponse::Fetched(item),\n            None => FetchResponse::NotFound(id),\n        }\n    }\n\n    /// Returns whether this response is a positive (fetched / \"found\") one.\n    pub(crate) fn was_found(&self) -> bool {\n        matches!(self, FetchResponse::Fetched(_))\n    }\n}\n\nimpl<T, Id> FetchResponse<T, Id>\nwhere\n    Self: Serialize,\n{\n    /// The canonical serialization for the inner encoding of the `FetchResponse` response (see\n    /// [`Message::GetResponse`]).\n    pub(crate) fn to_serialized(&self) -> Result<Vec<u8>, bincode::Error> {\n        bincode::serialize(self)\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator};\n    use serde::Serialize;\n\n    use super::{FetchResponse, FetchResponseDiscriminants};\n\n    impl<T: Serialize + LargestSpecimen, Id: Serialize + LargestSpecimen> LargestSpecimen\n        for FetchResponse<T, Id>\n    {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, FetchResponseDiscriminants, _, _>(estimator, |variant| {\n                match variant {\n                    FetchResponseDiscriminants::Fetched => {\n                        FetchResponse::Fetched(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    FetchResponseDiscriminants::NotFound => {\n                        FetchResponse::NotFound(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    FetchResponseDiscriminants::NotProvided => FetchResponse::NotProvided(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                }\n            })\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetched_data.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse crate::{components::fetcher::FetchItem, types::NodeId};\n\n#[derive(Clone, DataSize, Debug, PartialEq, Serialize)]\npub(crate) enum FetchedData<T> {\n    FromStorage { item: Box<T> },\n    FromPeer { item: Box<T>, peer: NodeId },\n}\n\nimpl<T> FetchedData<T> {\n    pub(crate) fn from_storage(item: Box<T>) -> Self {\n        FetchedData::FromStorage { item }\n    }\n\n    pub(crate) fn from_peer(item: T, peer: NodeId) -> Self {\n        FetchedData::FromPeer {\n            item: Box::new(item),\n            peer,\n        }\n    }\n\n    pub(crate) fn convert<U>(self) -> FetchedData<U>\n    where\n        T: Into<U>,\n    {\n        match self {\n            FetchedData::FromStorage { item } => FetchedData::FromStorage {\n                item: Box::new((*item).into()),\n            },\n            FetchedData::FromPeer { item, peer } => FetchedData::FromPeer {\n                item: Box::new((*item).into()),\n                peer,\n            },\n        }\n    }\n}\n\nimpl<T: FetchItem> FetchedData<T> {\n    pub(crate) fn id(&self) -> T::Id {\n        match self {\n            FetchedData::FromStorage { item } | FetchedData::FromPeer { peer: _, item } => {\n                item.fetch_id()\n            }\n        }\n    }\n}\n\nimpl<T: FetchItem> Display for FetchedData<T> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            FetchedData::FromStorage { item } => {\n                write!(f, \"fetched {} from storage\", item.fetch_id())\n            }\n            FetchedData::FromPeer { item, peer } => {\n                write!(f, \"fetched {} from {}\", item.fetch_id(), peer)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/approvals_hashes_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\nuse casper_storage::block_store::types::{ApprovalsHashes, ApprovalsHashesValidationError};\nuse futures::FutureExt;\n\nuse casper_types::{Block, BlockHash};\n\nuse crate::{\n    components::fetcher::{\n        metrics::Metrics, FetchItem, Fetcher, ItemFetcher, ItemHandle, StoringState, Tag,\n    },\n    effect::{requests::StorageRequest, EffectBuilder},\n    types::NodeId,\n};\n\nimpl FetchItem for ApprovalsHashes {\n    type Id = BlockHash;\n    type ValidationError = ApprovalsHashesValidationError;\n    type ValidationMetadata = Block;\n\n    const TAG: Tag = Tag::ApprovalsHashes;\n\n    fn fetch_id(&self) -> Self::Id {\n        *self.block_hash()\n    }\n\n    fn validate(&self, block: &Block) -> Result<(), Self::ValidationError> {\n        self.verify(block)\n    }\n}\n\n#[async_trait]\nimpl ItemFetcher<ApprovalsHashes> for Fetcher<ApprovalsHashes> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = false;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<BlockHash, HashMap<NodeId, ItemHandle<ApprovalsHashes>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: BlockHash,\n    ) -> Option<ApprovalsHashes> {\n        effect_builder.get_approvals_hashes_from_storage(id).await\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: ApprovalsHashes,\n    ) -> StoringState<'a, ApprovalsHashes> {\n        StoringState::Enqueued(\n            effect_builder\n                .put_approvals_hashes_to_storage(Box::new(item))\n                .map(|_| ())\n                .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: ApprovalsHashes,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/block_execution_results_or_chunk_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\n\nuse crate::{\n    components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState},\n    effect::{requests::StorageRequest, EffectBuilder},\n    types::{BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, NodeId},\n};\n\n#[async_trait]\nimpl ItemFetcher<BlockExecutionResultsOrChunk> for Fetcher<BlockExecutionResultsOrChunk> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = true;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<\n        BlockExecutionResultsOrChunkId,\n        HashMap<NodeId, ItemHandle<BlockExecutionResultsOrChunk>>,\n    > {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: BlockExecutionResultsOrChunkId,\n    ) -> Option<BlockExecutionResultsOrChunk> {\n        effect_builder\n            .get_block_execution_results_or_chunk_from_storage(id)\n            .await\n    }\n\n    fn put_to_storage<'a, REv>(\n        _effect_builder: EffectBuilder<REv>,\n        item: BlockExecutionResultsOrChunk,\n    ) -> StoringState<'a, BlockExecutionResultsOrChunk> {\n        // Stored by the BlockSynchronizer once all chunks are fetched.\n        StoringState::WontStore(item)\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: BlockExecutionResultsOrChunk,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/block_fetcher.rs",
    "content": "use std::{collections::HashMap, sync::Arc, time::Duration};\n\nuse async_trait::async_trait;\nuse futures::FutureExt;\n\nuse casper_types::{Block, BlockHash, BlockValidationError};\n\nuse crate::{\n    components::fetcher::{\n        metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle,\n        StoringState, Tag,\n    },\n    effect::{\n        announcements::FetchedNewBlockAnnouncement,\n        requests::{BlockAccumulatorRequest, StorageRequest},\n        EffectBuilder,\n    },\n    types::NodeId,\n};\n\nimpl FetchItem for Block {\n    type Id = BlockHash;\n    type ValidationError = BlockValidationError;\n    type ValidationMetadata = EmptyValidationMetadata;\n\n    const TAG: Tag = Tag::Block;\n\n    fn fetch_id(&self) -> Self::Id {\n        *self.hash()\n    }\n\n    fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> {\n        self.verify()\n    }\n}\n\n#[async_trait]\nimpl ItemFetcher<Block> for Fetcher<Block> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = false;\n\n    fn item_handles(&mut self) -> &mut HashMap<BlockHash, HashMap<NodeId, ItemHandle<Block>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + From<BlockAccumulatorRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: BlockHash,\n    ) -> Option<Block> {\n        effect_builder.get_block_from_storage(id).await\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: Block,\n    ) -> StoringState<'a, Block> {\n        StoringState::Enqueued(\n            effect_builder\n                .put_block_to_storage(Arc::new(item))\n                .map(|_| ())\n                .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv: From<FetchedNewBlockAnnouncement> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: Block,\n        peer: NodeId,\n    ) {\n        effect_builder\n            .announce_fetched_new_block(Arc::new(item), peer)\n            .await\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs",
    "content": "use std::{collections::HashMap, convert::Infallible, time::Duration};\n\nuse async_trait::async_trait;\nuse futures::FutureExt;\n\nuse casper_types::{BlockHash, BlockHeader};\n\nuse crate::{\n    components::fetcher::{\n        metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle,\n        StoringState, Tag,\n    },\n    effect::{requests::StorageRequest, EffectBuilder},\n    types::NodeId,\n};\n\nimpl FetchItem for BlockHeader {\n    type Id = BlockHash;\n    type ValidationError = Infallible;\n    type ValidationMetadata = EmptyValidationMetadata;\n\n    const TAG: Tag = Tag::BlockHeader;\n\n    fn fetch_id(&self) -> Self::Id {\n        self.block_hash()\n    }\n\n    fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> {\n        // No need for further validation.  The received header has necessarily had its hash\n        // computed to be the same value we used for the fetch ID if we got here.\n        Ok(())\n    }\n}\n\n#[async_trait]\nimpl ItemFetcher<BlockHeader> for Fetcher<BlockHeader> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = true;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<BlockHash, HashMap<NodeId, ItemHandle<BlockHeader>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: BlockHash,\n    ) -> Option<BlockHeader> {\n        // Requests from fetcher are not restricted by the block availability index.\n        let only_from_available_block_range = false;\n        effect_builder\n            .get_block_header_from_storage(id, only_from_available_block_range)\n            .await\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: BlockHeader,\n    ) -> StoringState<'a, BlockHeader> {\n        StoringState::Enqueued(\n            effect_builder\n                .put_block_header_to_storage(Box::new(item))\n                .map(|_| ())\n                .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: BlockHeader,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/finality_signature_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\nuse futures::FutureExt;\n\nuse casper_types::{crypto, FinalitySignature, FinalitySignatureId};\n\nuse crate::{\n    components::fetcher::{\n        metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle,\n        StoringState, Tag,\n    },\n    effect::{\n        announcements::FetchedNewFinalitySignatureAnnouncement,\n        requests::{BlockAccumulatorRequest, StorageRequest},\n        EffectBuilder,\n    },\n    types::NodeId,\n};\n\nimpl FetchItem for FinalitySignature {\n    type Id = Box<FinalitySignatureId>;\n    type ValidationError = crypto::Error;\n    type ValidationMetadata = EmptyValidationMetadata;\n\n    const TAG: Tag = Tag::FinalitySignature;\n\n    fn fetch_id(&self) -> Self::Id {\n        Box::new(FinalitySignatureId::new(\n            *self.block_hash(),\n            self.era_id(),\n            self.public_key().clone(),\n        ))\n    }\n\n    fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> {\n        self.is_verified()\n    }\n}\n\n#[async_trait]\nimpl ItemFetcher<FinalitySignature> for Fetcher<FinalitySignature> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = true;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<Box<FinalitySignatureId>, HashMap<NodeId, ItemHandle<FinalitySignature>>>\n    {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + From<BlockAccumulatorRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: Box<FinalitySignatureId>,\n    ) -> Option<FinalitySignature> {\n        effect_builder\n            .get_signature_from_storage(*id.block_hash(), id.public_key().clone())\n            .await\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: FinalitySignature,\n    ) -> StoringState<'a, FinalitySignature> {\n        StoringState::Enqueued(\n            effect_builder\n                .put_finality_signature_to_storage(item)\n                .map(|_| ())\n                .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv>(\n        effect_builder: EffectBuilder<REv>,\n        item: FinalitySignature,\n        peer: NodeId,\n    ) where\n        REv: From<FetchedNewFinalitySignatureAnnouncement> + Send,\n    {\n        effect_builder\n            .announce_fetched_new_finality_signature(Box::new(item.clone()), peer)\n            .await\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/legacy_deploy_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\nuse futures::FutureExt;\n\nuse casper_types::{Deploy, DeployHash, Transaction};\n\nuse crate::{\n    components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState},\n    effect::{requests::StorageRequest, EffectBuilder},\n    types::{LegacyDeploy, NodeId},\n};\n\n#[async_trait]\nimpl ItemFetcher<LegacyDeploy> for Fetcher<LegacyDeploy> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = true;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<DeployHash, HashMap<NodeId, ItemHandle<LegacyDeploy>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: DeployHash,\n    ) -> Option<LegacyDeploy> {\n        effect_builder.get_stored_legacy_deploy(id).await\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: LegacyDeploy,\n    ) -> StoringState<'a, LegacyDeploy> {\n        StoringState::Enqueued(\n            effect_builder\n                .put_transaction_to_storage(Transaction::from(Deploy::from(item)))\n                .map(|_| ())\n                .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: LegacyDeploy,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/sync_leap_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\nuse futures::FutureExt;\n\nuse crate::{\n    components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState},\n    effect::{requests::StorageRequest, EffectBuilder},\n    types::{NodeId, SyncLeap, SyncLeapIdentifier},\n};\n\n#[async_trait]\nimpl ItemFetcher<SyncLeap> for Fetcher<SyncLeap> {\n    // We want the fetcher to ask all the peers we give to it separately, and return their\n    // responses separately, not just respond with the first SyncLeap it successfully gets from a\n    // single peer.\n    const SAFE_TO_RESPOND_TO_ALL: bool = false;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<SyncLeapIdentifier, HashMap<NodeId, ItemHandle<SyncLeap>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _id: SyncLeapIdentifier,\n    ) -> Option<SyncLeap> {\n        // We never get a SyncLeap we requested from our own storage.\n        None\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: SyncLeap,\n    ) -> StoringState<'a, SyncLeap> {\n        StoringState::Enqueued(\n            async move {\n                for header in item.headers() {\n                    effect_builder\n                        .put_block_header_to_storage(Box::new(header.clone()))\n                        .await;\n                }\n                for block_header in item.block_headers_with_signatures {\n                    effect_builder\n                        .put_signatures_to_storage(block_header.block_signatures().clone())\n                        .await;\n                }\n            }\n            .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: SyncLeap,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/transaction_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\nuse futures::FutureExt;\n\nuse casper_types::{InvalidTransaction, Transaction, TransactionId};\n\nuse crate::{\n    components::fetcher::{\n        metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle,\n        StoringState, Tag,\n    },\n    effect::{requests::StorageRequest, EffectBuilder},\n    types::NodeId,\n};\n\nimpl FetchItem for Transaction {\n    type Id = TransactionId;\n    type ValidationError = InvalidTransaction;\n    type ValidationMetadata = EmptyValidationMetadata;\n\n    const TAG: Tag = Tag::Transaction;\n\n    fn fetch_id(&self) -> Self::Id {\n        self.compute_id()\n    }\n\n    fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> {\n        self.verify()\n    }\n}\n\n#[async_trait]\nimpl ItemFetcher<Transaction> for Fetcher<Transaction> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = true;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<TransactionId, HashMap<NodeId, ItemHandle<Transaction>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: TransactionId,\n    ) -> Option<Transaction> {\n        effect_builder.get_stored_transaction(id).await\n    }\n\n    fn put_to_storage<'a, REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item: Transaction,\n    ) -> StoringState<'a, Transaction> {\n        StoringState::Enqueued(\n            async move {\n                let is_new = effect_builder\n                    .put_transaction_to_storage(item.clone())\n                    .await;\n                // If `is_new` is `false`, the transaction was previously stored, and the incoming\n                // transaction could have a different set of approvals to the one already stored.\n                // We can treat the incoming approvals as finalized and now try and store them.\n                if !is_new {\n                    effect_builder\n                        .store_finalized_approvals(item.hash(), item.approvals())\n                        .await;\n                }\n            }\n            .boxed(),\n        )\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: Transaction,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls/trie_or_chunk_fetcher.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse async_trait::async_trait;\nuse tracing::error;\n\nuse casper_storage::data_access_layer::{TrieElement, TrieRequest, TrieResult};\n\nuse crate::{\n    components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState},\n    effect::{requests::ContractRuntimeRequest, EffectBuilder},\n    types::{NodeId, TrieOrChunk, TrieOrChunkId},\n};\n\n#[async_trait]\nimpl ItemFetcher<TrieOrChunk> for Fetcher<TrieOrChunk> {\n    const SAFE_TO_RESPOND_TO_ALL: bool = true;\n\n    fn item_handles(\n        &mut self,\n    ) -> &mut HashMap<TrieOrChunkId, HashMap<NodeId, ItemHandle<TrieOrChunk>>> {\n        &mut self.item_handles\n    }\n\n    fn metrics(&mut self) -> &Metrics {\n        &self.metrics\n    }\n\n    fn peer_timeout(&self) -> Duration {\n        self.get_from_peer_timeout\n    }\n\n    async fn get_locally<REv: From<ContractRuntimeRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        id: TrieOrChunkId,\n    ) -> Option<TrieOrChunk> {\n        let TrieOrChunkId(chunk_index, trie_key) = id;\n        let request = TrieRequest::new(trie_key, Some(chunk_index));\n        let result = effect_builder.get_trie(request).await;\n        match result {\n            TrieResult::ValueNotFound(_) => None,\n            TrieResult::Failure(err) => {\n                error!(%err, \"failed to get trie element locally\");\n                None\n            }\n            TrieResult::Success { element } => match element {\n                TrieElement::Raw(raw) => match TrieOrChunk::new(raw.into(), 0) {\n                    Ok(voc) => Some(voc),\n                    Err(err) => {\n                        error!(%err, \"raw chunking error\");\n                        None\n                    }\n                },\n                TrieElement::Chunked(raw, chunk_id) => match TrieOrChunk::new(raw.into(), chunk_id)\n                {\n                    Ok(voc) => Some(voc),\n                    Err(err) => {\n                        error!(%err, \"chunking error\");\n                        None\n                    }\n                },\n            },\n        }\n    }\n\n    fn put_to_storage<'a, REv>(\n        _effect_builder: EffectBuilder<REv>,\n        item: TrieOrChunk,\n    ) -> StoringState<'a, TrieOrChunk> {\n        // Stored by the GlobalStateSynchronizer once all chunks are fetched.\n        StoringState::WontStore(item)\n    }\n\n    async fn announce_fetched_new_item<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: TrieOrChunk,\n        _peer: NodeId,\n    ) {\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/fetcher_impls.rs",
    "content": "mod approvals_hashes_fetcher;\nmod block_execution_results_or_chunk_fetcher;\nmod block_fetcher;\nmod block_header_fetcher;\nmod finality_signature_fetcher;\nmod legacy_deploy_fetcher;\nmod sync_leap_fetcher;\nmod transaction_fetcher;\nmod trie_or_chunk_fetcher;\n"
  },
  {
    "path": "node/src/components/fetcher/item_fetcher.rs",
    "content": "use std::{\n    collections::{hash_map::Entry, HashMap},\n    time::Duration,\n};\n\nuse async_trait::async_trait;\nuse futures::future::BoxFuture;\nuse tracing::{debug, error, trace};\n\nuse super::{Error, Event, FetchResponder, FetchedData, ItemHandle, Metrics};\nuse crate::{\n    components::{fetcher::FetchItem, network::blocklist::BlocklistJustification},\n    effect::{\n        announcements::{\n            FetchedNewBlockAnnouncement, FetchedNewFinalitySignatureAnnouncement,\n            PeerBehaviorAnnouncement,\n        },\n        requests::{\n            BlockAccumulatorRequest, ContractRuntimeRequest, NetworkRequest, StorageRequest,\n        },\n        EffectBuilder, EffectExt, Effects,\n    },\n    protocol::Message,\n    types::NodeId,\n};\n\npub(super) enum StoringState<'a, T> {\n    Enqueued(BoxFuture<'a, ()>),\n    WontStore(T),\n}\n\n#[async_trait]\npub(super) trait ItemFetcher<T: FetchItem + 'static> {\n    /// Indicator on whether it is safe to respond to all of our responders. For example, [Deploy]s\n    /// and [BlockHeader]s are safe because their [Item::id] is all that is needed for\n    /// authentication. But other structures have _finality signatures_ or have substructures that\n    /// require validation. These are not infallible, and only the responders corresponding to the\n    /// node queried may be responded to.\n    const SAFE_TO_RESPOND_TO_ALL: bool;\n\n    fn item_handles(&mut self) -> &mut HashMap<T::Id, HashMap<NodeId, ItemHandle<T>>>;\n\n    fn metrics(&mut self) -> &Metrics;\n\n    fn peer_timeout(&self) -> Duration;\n\n    /// We've been asked to fetch the item by another component of this node.  We'll try to get it\n    /// locally first (generally from our own storage component), and if that fails, we'll send a\n    /// request to `peer` for the item.\n    fn fetch<REv>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        id: T::Id,\n        peer: NodeId,\n        validation_metadata: Box<T::ValidationMetadata>,\n        responder: FetchResponder<T>,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<StorageRequest>\n            + From<BlockAccumulatorRequest>\n            + From<ContractRuntimeRequest>\n            + Send,\n    {\n        Self::get_locally(effect_builder, id.clone()).event(move |result| Event::GetLocallyResult {\n            id,\n            peer,\n            validation_metadata,\n            maybe_item: result.map(Box::new),\n            responder,\n        })\n    }\n\n    /// Handles attempting to get the item locally.\n    async fn get_locally<REv>(effect_builder: EffectBuilder<REv>, id: T::Id) -> Option<T>\n    where\n        REv: From<StorageRequest>\n            + From<BlockAccumulatorRequest>\n            + From<ContractRuntimeRequest>\n            + Send;\n\n    /// Handles the `Err` case for a `Result` of attempting to get the item locally.\n    fn failed_to_get_locally<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        id: T::Id,\n        peer: NodeId,\n        validation_metadata: Box<T::ValidationMetadata>,\n        responder: FetchResponder<T>,\n    ) -> Effects<Event<T>>\n    where\n        <T as FetchItem>::Id: 'static,\n        REv: From<NetworkRequest<Message>> + Send,\n    {\n        let peer_timeout = self.peer_timeout();\n        // Capture responder for later signalling.\n        let item_handles = self.item_handles();\n        match item_handles.entry(id.clone()).or_default().entry(peer) {\n            Entry::Occupied(mut entry) => {\n                let handle = entry.get_mut();\n                if handle.validation_metadata() != &*validation_metadata {\n                    let error = Error::ValidationMetadataMismatch {\n                        id: Box::new(id),\n                        peer,\n                        current: Box::new(handle.validation_metadata().clone()),\n                        new: validation_metadata,\n                    };\n                    error!(%error, \"failed to fetch\");\n                    return responder.respond(Err(error)).ignore();\n                }\n                handle.push_responder(responder);\n            }\n            Entry::Vacant(entry) => {\n                entry.insert(ItemHandle::new(validation_metadata, responder));\n            }\n        }\n        match Message::new_get_request::<T>(&id) {\n            Ok(message) => {\n                self.metrics().fetch_total.inc();\n                async move {\n                    effect_builder.send_message(peer, message).await;\n                    effect_builder.set_timeout(peer_timeout).await\n                }\n            }\n            .event(move |_| Event::TimeoutPeer { id, peer }),\n            Err(error) => {\n                error!(%peer, %error, \"failed to construct get request\");\n\n                self.signal(\n                    id.clone(),\n                    Err(Error::CouldNotConstructGetRequest {\n                        id: Box::new(id),\n                        peer,\n                    }),\n                    peer,\n                )\n            }\n        }\n    }\n\n    fn got_from_peer<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        peer: NodeId,\n        item: Box<T>,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<StorageRequest> + From<PeerBehaviorAnnouncement> + Send,\n    {\n        self.metrics().found_on_peer.inc();\n\n        let validation_metadata = match self\n            .item_handles()\n            .get(&item.fetch_id())\n            .and_then(|item_handles| item_handles.get(&peer))\n        {\n            Some(item_handle) => item_handle.validation_metadata(),\n            None => {\n                debug!(item_id = %item.fetch_id(), tag = ?T::TAG, %peer, \"got unexpected item from peer\");\n                return Effects::new();\n            }\n        };\n\n        if let Err(err) = item.validate(validation_metadata) {\n            debug!(%peer, %err, ?item, \"peer sent invalid item\");\n            effect_builder\n                .announce_block_peer_with_justification(\n                    peer,\n                    BlocklistJustification::SentInvalidItem {\n                        tag: T::TAG,\n                        error_msg: err.to_string(),\n                    },\n                )\n                .ignore()\n        } else {\n            match Self::put_to_storage(effect_builder, *item.clone()) {\n                StoringState::WontStore(item) => self.signal(item.fetch_id(), Ok(item), peer),\n                StoringState::Enqueued(store_future) => {\n                    store_future.event(move |_| Event::PutToStorage { item, peer })\n                }\n            }\n        }\n    }\n\n    /// Sends fetched data to all responders\n    fn respond_to_all(&mut self, id: T::Id, fetched_data: FetchedData<T>) -> Effects<Event<T>> {\n        let mut effects = Effects::new();\n        let item_handles = self.item_handles().remove(&id).unwrap_or_default();\n        for (_peer, item_handle) in item_handles {\n            for responder in item_handle.take_responders() {\n                effects.extend(responder.respond(Ok(fetched_data.clone())).ignore());\n            }\n        }\n        effects\n    }\n\n    /// Responds to all responders corresponding to a specific item-peer combination with a result.\n    fn send_response_from_peer(\n        &mut self,\n        id: T::Id,\n        result: Result<T, Error<T>>,\n        peer: NodeId,\n    ) -> Effects<Event<T>> {\n        let mut effects = Effects::new();\n        let mut item_handles = self.item_handles().remove(&id).unwrap_or_default();\n        match result {\n            Ok(item) => {\n                // Since this is a success, we can safely respond to all awaiting processes.\n                for responder in item_handles\n                    .remove(&peer)\n                    .map(ItemHandle::take_responders)\n                    .unwrap_or_default()\n                {\n                    effects.extend(\n                        responder\n                            .respond(Ok(FetchedData::from_peer(item.clone(), peer)))\n                            .ignore(),\n                    );\n                }\n            }\n            Err(error @ Error::TimedOut { .. }) => {\n                // We take just one responder as only one request had timed out. We want to avoid\n                // prematurely failing too many waiting processes since other requests may still\n                // succeed before timing out.\n                let should_remove_item_handle = match item_handles.get_mut(&peer) {\n                    Some(item_handle) => {\n                        if let Some(responder) = item_handle.pop_front_responder() {\n                            effects.extend(responder.respond(Err(error)).ignore());\n                            // Only if there's still a responder waiting for the item we increment\n                            // the metric. Otherwise we will count every request as timed out, even\n                            // if the item had been fetched.\n                            trace!(TAG=%T::TAG, %id, %peer, \"request timed out\");\n                            self.metrics().timeouts.inc();\n                        }\n                        item_handle.has_no_responders()\n                    }\n                    None => false,\n                };\n                if should_remove_item_handle {\n                    item_handles.remove(&peer);\n                }\n            }\n            Err(\n                error @ (Error::Absent { .. }\n                | Error::Rejected { .. }\n                | Error::CouldNotConstructGetRequest { .. }\n                | Error::ValidationMetadataMismatch { .. }),\n            ) => {\n                // For all other error variants we can safely respond with failure as there's no\n                // chance for the request to succeed.\n                for responder in item_handles\n                    .remove(&peer)\n                    .map(ItemHandle::take_responders)\n                    .unwrap_or_default()\n                {\n                    effects.extend(responder.respond(Err(error.clone())).ignore());\n                }\n            }\n        }\n        if !item_handles.is_empty() {\n            self.item_handles().insert(id, item_handles);\n        }\n        effects\n    }\n\n    fn put_to_storage<'a, REv>(\n        _effect_builder: EffectBuilder<REv>,\n        _item: T,\n    ) -> StoringState<'a, T>\n    where\n        REv: From<StorageRequest> + Send;\n\n    async fn announce_fetched_new_item<REv>(\n        _effect_builder: EffectBuilder<REv>,\n        item: T,\n        peer: NodeId,\n    ) where\n        REv: From<FetchedNewBlockAnnouncement>\n            + From<FetchedNewFinalitySignatureAnnouncement>\n            + Send;\n\n    /// Handles signalling responders with the item or an error.\n    fn signal(\n        &mut self,\n        id: T::Id,\n        result: Result<T, Error<T>>,\n        peer: NodeId,\n    ) -> Effects<Event<T>> {\n        match result {\n            Ok(fetched_item) if Self::SAFE_TO_RESPOND_TO_ALL => {\n                self.respond_to_all(id, FetchedData::from_peer(fetched_item, peer))\n            }\n            Ok(_) => self.send_response_from_peer(id, result, peer),\n            Err(_) => self.send_response_from_peer(id, result, peer),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/item_handle.rs",
    "content": "use datasize::DataSize;\n\nuse super::{FetchItem, FetchResponder};\n\n#[derive(Debug, DataSize)]\npub(crate) struct ItemHandle<T>\nwhere\n    T: FetchItem,\n{\n    validation_metadata: Box<T::ValidationMetadata>,\n    responders: Vec<FetchResponder<T>>,\n}\n\nimpl<T: FetchItem> ItemHandle<T> {\n    pub(super) fn new(\n        validation_metadata: Box<T::ValidationMetadata>,\n        responder: FetchResponder<T>,\n    ) -> Self {\n        Self {\n            validation_metadata,\n            responders: vec![responder],\n        }\n    }\n\n    pub(super) fn validation_metadata(&self) -> &T::ValidationMetadata {\n        &self.validation_metadata\n    }\n\n    pub(super) fn push_responder(&mut self, responder: FetchResponder<T>) {\n        self.responders.push(responder)\n    }\n\n    pub(super) fn pop_front_responder(&mut self) -> Option<FetchResponder<T>> {\n        if self.responders.is_empty() {\n            return None;\n        }\n        Some(self.responders.remove(0))\n    }\n\n    pub(super) fn take_responders(self) -> Vec<FetchResponder<T>> {\n        self.responders\n    }\n\n    pub(super) fn has_no_responders(&self) -> bool {\n        self.responders.is_empty()\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/metrics.rs",
    "content": "use prometheus::{IntCounter, Registry};\n\nuse crate::unregister_metric;\n\n#[derive(Debug)]\npub(crate) struct Metrics {\n    /// Number of fetch requests that found an item in the storage.\n    pub found_in_storage: IntCounter,\n    /// Number of fetch requests that fetched an item from peer.\n    pub found_on_peer: IntCounter,\n    /// Number of fetch requests that timed out.\n    pub timeouts: IntCounter,\n    /// Number of total fetch requests made.\n    pub fetch_total: IntCounter,\n    /// Reference to the registry for unregistering.\n    registry: Registry,\n}\n\nimpl Metrics {\n    pub(super) fn new(name: &str, registry: &Registry) -> Result<Self, prometheus::Error> {\n        let found_in_storage = IntCounter::new(\n            format!(\"{}_found_in_storage\", name),\n            format!(\n                \"number of fetch requests that found {} in local storage\",\n                name\n            ),\n        )?;\n        let found_on_peer = IntCounter::new(\n            format!(\"{}_found_on_peer\", name),\n            format!(\"number of fetch requests that fetched {} from peer\", name),\n        )?;\n        let timeouts = IntCounter::new(\n            format!(\"{}_timeouts\", name),\n            format!(\"number of {} fetch requests that timed out\", name),\n        )?;\n        let fetch_total = IntCounter::new(\n            format!(\"{}_fetch_total\", name),\n            format!(\"number of {} all fetch requests made\", name),\n        )?;\n        registry.register(Box::new(found_in_storage.clone()))?;\n        registry.register(Box::new(found_on_peer.clone()))?;\n        registry.register(Box::new(timeouts.clone()))?;\n        registry.register(Box::new(fetch_total.clone()))?;\n\n        Ok(Metrics {\n            found_in_storage,\n            found_on_peer,\n            timeouts,\n            fetch_total,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.found_in_storage);\n        unregister_metric!(self.registry, self.found_on_peer);\n        unregister_metric!(self.registry, self.timeouts);\n        unregister_metric!(self.registry, self.fetch_total);\n    }\n}\n"
  },
  {
    "path": "node/src/components/fetcher/tag.rs",
    "content": "use std::hash::Hash;\n\nuse datasize::DataSize;\nuse derive_more::Display;\nuse serde_repr::{Deserialize_repr, Serialize_repr};\nuse strum::EnumIter;\n\n/// An identifier for a specific type implementing the `Item` trait.  Each different implementing\n/// type should have a unique `Tag` variant.\n#[derive(\n    Clone,\n    Copy,\n    DataSize,\n    PartialEq,\n    Eq,\n    PartialOrd,\n    Ord,\n    Hash,\n    Serialize_repr,\n    Deserialize_repr,\n    Debug,\n    Display,\n    EnumIter,\n)]\n#[repr(u8)]\npub enum Tag {\n    /// A transaction identified by its hash and its approvals hash.\n    #[display(fmt = \"transaction\")]\n    Transaction,\n    /// A legacy deploy identified by its hash alone.\n    #[display(fmt = \"legacy deploy\")]\n    LegacyDeploy,\n    /// A block.\n    #[display(fmt = \"block\")]\n    Block,\n    /// A block header.\n    #[display(fmt = \"block header\")]\n    BlockHeader,\n    /// A trie or chunk of a trie from global state.\n    #[display(fmt = \"trie or chunk\")]\n    TrieOrChunk,\n    /// A finality signature for a block.\n    #[display(fmt = \"finality signature\")]\n    FinalitySignature,\n    /// Headers and signatures required to prove that if a given trusted block hash is on the\n    /// correct chain, then so is a later header, which should be the most recent one according\n    /// to the sender.\n    #[display(fmt = \"sync leap\")]\n    SyncLeap,\n    /// The hashes of the finalized deploy approvals sets for a single block.\n    #[display(fmt = \"approvals hashes\")]\n    ApprovalsHashes,\n    /// The execution results for a single block.\n    #[display(fmt = \"block execution results\")]\n    BlockExecutionResults,\n}\n"
  },
  {
    "path": "node/src/components/fetcher/tests.rs",
    "content": "#![cfg(test)]\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    sync::{Arc, Mutex},\n};\n\nuse derive_more::From;\nuse futures::FutureExt;\nuse serde::Serialize;\nuse tempfile::TempDir;\nuse thiserror::Error;\n\nuse casper_types::{\n    testing::TestRng, BlockV2, Chainspec, ChainspecRawBytes, FinalitySignatureV2, Transaction,\n    TransactionConfig, TransactionHash, TransactionId,\n};\n\nuse super::*;\nuse crate::{\n    components::{\n        consensus::ConsensusRequestMessage,\n        fetcher,\n        in_memory_network::{self, InMemoryNetwork, NetworkController},\n        network::{GossipedAddress, Identity as NetworkIdentity},\n        storage::{self, Storage},\n        transaction_acceptor,\n    },\n    effect::{\n        announcements::{ControlAnnouncement, FatalAnnouncement, TransactionAcceptorAnnouncement},\n        incoming::{\n            ConsensusMessageIncoming, DemandIncoming, FinalitySignatureIncoming, GossiperIncoming,\n            NetRequestIncoming, NetResponse, NetResponseIncoming, TrieDemand, TrieRequestIncoming,\n            TrieResponseIncoming,\n        },\n        requests::{AcceptTransactionRequest, MarkBlockCompletedRequest},\n    },\n    fatal,\n    protocol::Message,\n    reactor::{self, EventQueueHandle, Reactor as ReactorTrait, ReactorEvent, Runner},\n    testing::{\n        self,\n        network::{NetworkedReactor, TestingNetwork},\n        ConditionCheckReactor, FakeTransactionAcceptor,\n    },\n    types::NodeId,\n    utils::WithDir,\n};\n\nconst TIMEOUT: Duration = Duration::from_secs(1);\n\n/// Error type returned by the test reactor.\n#[derive(Debug, Error)]\nenum Error {\n    #[error(\"prometheus (metrics) error: {0}\")]\n    Metrics(#[from] prometheus::Error),\n}\n\nimpl Drop for Reactor {\n    fn drop(&mut self) {\n        NetworkController::<Message>::remove_node(&self.network.node_id())\n    }\n}\n\n#[derive(Debug)]\npub struct FetcherTestConfig {\n    fetcher_config: Config,\n    storage_config: storage::Config,\n    temp_dir: TempDir,\n}\n\nimpl Default for FetcherTestConfig {\n    fn default() -> Self {\n        let (storage_config, temp_dir) = storage::Config::new_for_tests(1);\n        FetcherTestConfig {\n            fetcher_config: Default::default(),\n            storage_config,\n            temp_dir,\n        }\n    }\n}\n\n#[derive(Debug, From, Serialize)]\nenum Event {\n    #[from]\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    FatalAnnouncement(FatalAnnouncement),\n    #[from]\n    Network(in_memory_network::Event<Message>),\n    #[from]\n    Storage(storage::Event),\n    #[from]\n    FakeTransactionAcceptor(transaction_acceptor::Event),\n    #[from]\n    TransactionFetcher(fetcher::Event<Transaction>),\n    #[from]\n    NetworkRequestMessage(NetworkRequest<Message>),\n    #[from]\n    StorageRequest(StorageRequest),\n    #[from]\n    FetcherRequestTransaction(FetcherRequest<Transaction>),\n    #[from]\n    BlockAccumulatorRequest(BlockAccumulatorRequest),\n    #[from]\n    AcceptTransactionRequest(AcceptTransactionRequest),\n    #[from]\n    TransactionAcceptorAnnouncement(TransactionAcceptorAnnouncement),\n    #[from]\n    FetchedNewFinalitySignatureAnnouncement(FetchedNewFinalitySignatureAnnouncement),\n    #[from]\n    FetchedNewBlockAnnouncement(FetchedNewBlockAnnouncement),\n    #[from]\n    NetRequestIncoming(NetRequestIncoming),\n    #[from]\n    NetResponseIncoming(NetResponseIncoming),\n    #[from]\n    BlocklistAnnouncement(PeerBehaviorAnnouncement),\n    #[from]\n    MarkBlockCompletedRequest(MarkBlockCompletedRequest),\n    #[from]\n    TrieDemand(TrieDemand),\n    #[from]\n    ContractRuntimeRequest(ContractRuntimeRequest),\n    #[from]\n    GossiperIncomingTransaction(GossiperIncoming<Transaction>),\n    #[from]\n    GossiperIncomingBlock(GossiperIncoming<BlockV2>),\n    #[from]\n    GossiperIncomingFinalitySignature(GossiperIncoming<FinalitySignatureV2>),\n    #[from]\n    GossiperIncomingGossipedAddress(GossiperIncoming<GossipedAddress>),\n    #[from]\n    TrieRequestIncoming(TrieRequestIncoming),\n    #[from]\n    TrieResponseIncoming(TrieResponseIncoming),\n    #[from]\n    ConsensusMessageIncoming(ConsensusMessageIncoming),\n    #[from]\n    ConsensusDemandIncoming(DemandIncoming<ConsensusRequestMessage>),\n    #[from]\n    FinalitySignatureIncoming(FinalitySignatureIncoming),\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Debug::fmt(self, f)\n    }\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        matches!(self, Event::ControlAnnouncement(_))\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        match self {\n            Event::ControlAnnouncement(ctrl_ann) => Some(ctrl_ann),\n            _ => None,\n        }\n    }\n}\n\nstruct Reactor {\n    network: InMemoryNetwork<Message>,\n    storage: Storage,\n    fake_transaction_acceptor: FakeTransactionAcceptor,\n    transaction_fetcher: Fetcher<Transaction>,\n}\n\nimpl ReactorTrait for Reactor {\n    type Event = Event;\n    type Config = FetcherTestConfig;\n    type Error = Error;\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::Network(event) => reactor::wrap_effects(\n                Event::Network,\n                self.network.handle_event(effect_builder, rng, event),\n            ),\n            Event::Storage(event) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, event),\n            ),\n            Event::FakeTransactionAcceptor(event) => reactor::wrap_effects(\n                Event::FakeTransactionAcceptor,\n                self.fake_transaction_acceptor\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::TransactionFetcher(event) => reactor::wrap_effects(\n                Event::TransactionFetcher,\n                self.transaction_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::NetworkRequestMessage(request) => reactor::wrap_effects(\n                Event::Network,\n                self.network\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            Event::StorageRequest(request) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            Event::FetcherRequestTransaction(request) => reactor::wrap_effects(\n                Event::TransactionFetcher,\n                self.transaction_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            Event::TransactionAcceptorAnnouncement(announcement) => {\n                let event = fetcher::Event::from(announcement);\n                reactor::wrap_effects(\n                    Event::TransactionFetcher,\n                    self.transaction_fetcher\n                        .handle_event(effect_builder, rng, event),\n                )\n            }\n            Event::AcceptTransactionRequest(AcceptTransactionRequest {\n                transaction,\n                is_speculative,\n                responder,\n            }) => {\n                assert!(!is_speculative);\n                let event = transaction_acceptor::Event::Accept {\n                    transaction,\n                    source: Source::Client,\n                    maybe_responder: Some(responder),\n                };\n                reactor::wrap_effects(\n                    Event::FakeTransactionAcceptor,\n                    self.fake_transaction_acceptor\n                        .handle_event(effect_builder, rng, event),\n                )\n            }\n            Event::NetRequestIncoming(announcement) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage\n                    .handle_event(effect_builder, rng, announcement.into()),\n            ),\n            Event::NetResponseIncoming(announcement) => {\n                let mut announcement_effects = Effects::new();\n                let effects = self.handle_net_response(effect_builder, rng, announcement);\n                announcement_effects.extend(effects);\n                announcement_effects\n            }\n            Event::MarkBlockCompletedRequest(request) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            Event::TrieDemand(_)\n            | Event::ContractRuntimeRequest(_)\n            | Event::BlockAccumulatorRequest(_)\n            | Event::BlocklistAnnouncement(_)\n            | Event::GossiperIncomingTransaction(_)\n            | Event::GossiperIncomingBlock(_)\n            | Event::GossiperIncomingFinalitySignature(_)\n            | Event::GossiperIncomingGossipedAddress(_)\n            | Event::TrieRequestIncoming(_)\n            | Event::TrieResponseIncoming(_)\n            | Event::ConsensusMessageIncoming(_)\n            | Event::ConsensusDemandIncoming(_)\n            | Event::FinalitySignatureIncoming(_)\n            | Event::FetchedNewBlockAnnouncement(_)\n            | Event::FetchedNewFinalitySignatureAnnouncement(_)\n            | Event::ControlAnnouncement(_)\n            | Event::FatalAnnouncement(_) => panic!(\"unexpected: {}\", event),\n        }\n    }\n\n    fn new(\n        cfg: Self::Config,\n        chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        _network_identity: NetworkIdentity,\n        registry: &Registry,\n        event_queue: EventQueueHandle<Self::Event>,\n        rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let network = InMemoryNetwork::<Message>::new(event_queue, rng);\n\n        let storage = Storage::new(\n            &WithDir::new(cfg.temp_dir.path(), cfg.storage_config),\n            chainspec.hard_reset_to_start_of_era(),\n            chainspec.protocol_config.version,\n            chainspec.protocol_config.activation_point.era_id(),\n            &chainspec.network_config.name,\n            chainspec.transaction_config.max_ttl.into(),\n            chainspec.core_config.unbonding_delay,\n            Some(registry),\n            false,\n            TransactionConfig::default(),\n        )\n        .unwrap();\n\n        let fake_transaction_acceptor = FakeTransactionAcceptor::new();\n        let transaction_fetcher =\n            Fetcher::<Transaction>::new(\"transaction\", &cfg.fetcher_config, registry).unwrap();\n        let reactor = Reactor {\n            network,\n            storage,\n            fake_transaction_acceptor,\n            transaction_fetcher,\n        };\n        Ok((reactor, Effects::new()))\n    }\n}\n\nimpl Reactor {\n    fn handle_net_response(\n        &mut self,\n        effect_builder: EffectBuilder<Event>,\n        rng: &mut NodeRng,\n        response: NetResponseIncoming,\n    ) -> Effects<Event> {\n        match *response.message {\n            NetResponse::Transaction(ref serialized_item) => {\n                let transaction = match bincode::deserialize::<\n                    FetchResponse<Transaction, TransactionHash>,\n                >(serialized_item)\n                {\n                    Ok(FetchResponse::Fetched(txn)) => txn,\n                    Ok(FetchResponse::NotFound(txn_hash)) => {\n                        return fatal!(\n                            effect_builder,\n                            \"peer did not have transaction with hash {}: {}\",\n                            txn_hash,\n                            response.sender,\n                        )\n                        .ignore();\n                    }\n                    Ok(FetchResponse::NotProvided(txn_hash)) => {\n                        return fatal!(\n                            effect_builder,\n                            \"peer refused to provide transaction with hash {}: {}\",\n                            txn_hash,\n                            response.sender,\n                        )\n                        .ignore();\n                    }\n                    Err(error) => {\n                        return fatal!(\n                            effect_builder,\n                            \"failed to decode transaction from {}: {}\",\n                            response.sender,\n                            error\n                        )\n                        .ignore();\n                    }\n                };\n\n                self.dispatch_event(\n                    effect_builder,\n                    rng,\n                    Event::FakeTransactionAcceptor(transaction_acceptor::Event::Accept {\n                        transaction,\n                        source: Source::Peer(response.sender),\n                        maybe_responder: None,\n                    }),\n                )\n            }\n            _ => fatal!(\n                effect_builder,\n                \"no support for anything but transaction responses in fetcher test\"\n            )\n            .ignore(),\n        }\n    }\n}\n\nimpl NetworkedReactor for Reactor {\n    fn node_id(&self) -> NodeId {\n        self.network.node_id()\n    }\n}\n\nfn announce_transaction_received(\n    txn: Transaction,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    |effect_builder: EffectBuilder<Event>| {\n        effect_builder.try_accept_transaction(txn, false).ignore()\n    }\n}\n\ntype FetchedTransactionResult = Arc<Mutex<(bool, Option<FetchResult<Transaction>>)>>;\n\nfn fetch_txn(\n    txn_id: TransactionId,\n    node_id: NodeId,\n    fetched: FetchedTransactionResult,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    move |effect_builder: EffectBuilder<Event>| {\n        effect_builder\n            .fetch::<Transaction>(txn_id, node_id, Box::new(EmptyValidationMetadata))\n            .then(move |txn| async move {\n                let mut result = fetched.lock().unwrap();\n                result.0 = true;\n                result.1 = Some(txn);\n            })\n            .ignore()\n    }\n}\n\n/// Store a transaction on a target node.\nasync fn store_txn(\n    txn: &Transaction,\n    node_id: &NodeId,\n    network: &mut TestingNetwork<Reactor>,\n    rng: &mut TestRng,\n) {\n    network\n        .process_injected_effect_on(node_id, announce_transaction_received(txn.clone()))\n        .await;\n\n    // cycle to transaction acceptor announcement\n    network\n        .crank_until(\n            node_id,\n            rng,\n            move |event: &Event| {\n                matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::AcceptedNewTransaction { .. },\n                    )\n                )\n            },\n            TIMEOUT,\n        )\n        .await;\n}\n\n#[derive(Debug)]\nenum ExpectedFetchedTransactionResult {\n    TimedOut,\n    FromStorage {\n        expected_txn: Box<Transaction>,\n    },\n    FromPeer {\n        expected_txn: Box<Transaction>,\n        expected_peer: NodeId,\n    },\n}\n\nasync fn assert_settled(\n    node_id: &NodeId,\n    txn_id: TransactionId,\n    expected_result: ExpectedFetchedTransactionResult,\n    fetched: FetchedTransactionResult,\n    network: &mut TestingNetwork<Reactor>,\n    rng: &mut TestRng,\n    timeout: Duration,\n) {\n    let has_responded = |_nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| {\n        fetched.lock().unwrap().0\n    };\n\n    network.settle_on(rng, has_responded, timeout).await;\n\n    let maybe_stored_txn = network\n        .nodes()\n        .get(node_id)\n        .unwrap()\n        .reactor()\n        .inner()\n        .storage\n        .get_transaction_by_hash(txn_id.transaction_hash());\n\n    let actual_fetcher_result = fetched.lock().unwrap().1.clone();\n    match (expected_result, actual_fetcher_result, maybe_stored_txn) {\n        // Timed-out case: despite the delayed response causing a timeout, the response does arrive,\n        // and the TestTransactionAcceptor unconditionally accepts the txn and stores it.  For the\n        // test, we don't care whether it was stored or not, just that the TimedOut event fired.\n        (\n            ExpectedFetchedTransactionResult::TimedOut,\n            Some(Err(fetcher::Error::TimedOut { .. })),\n            _,\n        ) => {}\n        // FromStorage case: expect txn to correspond to item fetched, as well as stored item.\n        (\n            ExpectedFetchedTransactionResult::FromStorage { expected_txn },\n            Some(Ok(FetchedData::FromStorage { item })),\n            Some(stored_txn),\n        ) if expected_txn == item && stored_txn == *item => {}\n        // FromPeer case: txns should correspond, storage should be present and correspond, and\n        // peers should correspond.\n        (\n            ExpectedFetchedTransactionResult::FromPeer {\n                expected_txn,\n                expected_peer,\n            },\n            Some(Ok(FetchedData::FromPeer { item, peer })),\n            Some(stored_txn),\n        ) if expected_txn == item && stored_txn == *item && expected_peer == peer => {}\n        // Sad path case\n        (expected_result, actual_fetcher_result, maybe_stored_txn) => {\n            panic!(\n                \"Expected result type {:?} but found {:?} (stored transaction is {:?})\",\n                expected_result, actual_fetcher_result, maybe_stored_txn\n            )\n        }\n    }\n}\n\n#[tokio::test]\nasync fn should_fetch_from_local() {\n    const NETWORK_SIZE: usize = 1;\n\n    NetworkController::<Message>::create_active();\n    let (mut network, mut rng, node_ids) = {\n        let mut network = TestingNetwork::<Reactor>::new();\n        let mut rng = TestRng::new();\n        let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await;\n        (network, rng, node_ids)\n    };\n\n    // Create a random txn.\n    let txn = Transaction::random(&mut rng);\n\n    // Store txn on a node.\n    let node_to_store_on = &node_ids[0];\n    store_txn(&txn, node_to_store_on, &mut network, &mut rng).await;\n\n    // Try to fetch the txn from a node that holds it.\n    let node_id = node_ids[0];\n    let txn_id = txn.fetch_id();\n    let fetched = Arc::new(Mutex::new((false, None)));\n    network\n        .process_injected_effect_on(&node_id, fetch_txn(txn_id, node_id, Arc::clone(&fetched)))\n        .await;\n\n    let expected_result = ExpectedFetchedTransactionResult::FromStorage {\n        expected_txn: Box::new(txn),\n    };\n    assert_settled(\n        &node_id,\n        txn_id,\n        expected_result,\n        fetched,\n        &mut network,\n        &mut rng,\n        TIMEOUT,\n    )\n    .await;\n\n    NetworkController::<Message>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_fetch_from_peer() {\n    const NETWORK_SIZE: usize = 2;\n\n    NetworkController::<Message>::create_active();\n    let (mut network, mut rng, node_ids) = {\n        let mut network = TestingNetwork::<Reactor>::new();\n        let mut rng = TestRng::new();\n        let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await;\n        (network, rng, node_ids)\n    };\n\n    // Create a random txn.\n    let txn = Transaction::random(&mut rng);\n\n    // Store txn on a node.\n    let node_with_txn = node_ids[0];\n    store_txn(&txn, &node_with_txn, &mut network, &mut rng).await;\n\n    let node_without_txn = node_ids[1];\n    let txn_id = txn.fetch_id();\n    let fetched = Arc::new(Mutex::new((false, None)));\n\n    // Try to fetch the txn from a node that does not hold it; should get from peer.\n    network\n        .process_injected_effect_on(\n            &node_without_txn,\n            fetch_txn(txn_id, node_with_txn, Arc::clone(&fetched)),\n        )\n        .await;\n\n    let expected_result = ExpectedFetchedTransactionResult::FromPeer {\n        expected_txn: Box::new(txn),\n        expected_peer: node_with_txn,\n    };\n    assert_settled(\n        &node_without_txn,\n        txn_id,\n        expected_result,\n        fetched,\n        &mut network,\n        &mut rng,\n        TIMEOUT,\n    )\n    .await;\n\n    NetworkController::<Message>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_timeout_fetch_from_peer() {\n    const NETWORK_SIZE: usize = 2;\n\n    NetworkController::<Message>::create_active();\n    let (mut network, mut rng, node_ids) = {\n        let mut network = TestingNetwork::<Reactor>::new();\n        let mut rng = TestRng::new();\n        let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await;\n        (network, rng, node_ids)\n    };\n\n    // Create a random txn.\n    let txn = Transaction::random(&mut rng);\n    let txn_id = txn.fetch_id();\n\n    let holding_node = node_ids[0];\n    let requesting_node = node_ids[1];\n\n    // Store txn on holding node.\n    store_txn(&txn, &holding_node, &mut network, &mut rng).await;\n\n    // Initiate requesting node asking for txn from holding node.\n    let fetched = Arc::new(Mutex::new((false, None)));\n    network\n        .process_injected_effect_on(\n            &requesting_node,\n            fetch_txn(txn_id, holding_node, Arc::clone(&fetched)),\n        )\n        .await;\n\n    // Crank until message sent from the requester.\n    network\n        .crank_until(\n            &requesting_node,\n            &mut rng,\n            move |event: &Event| {\n                if let Event::NetworkRequestMessage(NetworkRequest::SendMessage {\n                    payload, ..\n                }) = event\n                {\n                    matches!(**payload, Message::GetRequest { .. })\n                } else {\n                    false\n                }\n            },\n            TIMEOUT,\n        )\n        .await;\n\n    // Crank until the message is received by the holding node.\n    network\n        .crank_until(\n            &holding_node,\n            &mut rng,\n            move |event: &Event| {\n                if let Event::NetworkRequestMessage(NetworkRequest::SendMessage {\n                    payload, ..\n                }) = event\n                {\n                    matches!(**payload, Message::GetResponse { .. })\n                } else {\n                    false\n                }\n            },\n            TIMEOUT,\n        )\n        .await;\n\n    // Advance time.\n    let duration_to_advance: Duration = Config::default().get_from_peer_timeout().into();\n    let duration_to_advance = duration_to_advance + Duration::from_secs(10);\n    testing::advance_time(duration_to_advance).await;\n\n    // Settle the network, allowing timeout to avoid panic.\n    let expected_result = ExpectedFetchedTransactionResult::TimedOut;\n    assert_settled(\n        &requesting_node,\n        txn_id,\n        expected_result,\n        fetched,\n        &mut network,\n        &mut rng,\n        TIMEOUT,\n    )\n    .await;\n\n    NetworkController::<Message>::remove_active();\n}\n"
  },
  {
    "path": "node/src/components/fetcher.rs",
    "content": "mod config;\nmod error;\nmod event;\nmod fetch_item;\nmod fetch_response;\nmod fetched_data;\nmod fetcher_impls;\nmod item_fetcher;\nmod item_handle;\nmod metrics;\nmod tag;\nmod tests;\n\nuse std::{collections::HashMap, fmt::Debug, time::Duration};\n\nuse datasize::DataSize;\nuse prometheus::Registry;\nuse tracing::trace;\n\nuse crate::{\n    components::Component,\n    effect::{\n        announcements::{\n            FetchedNewBlockAnnouncement, FetchedNewFinalitySignatureAnnouncement,\n            PeerBehaviorAnnouncement,\n        },\n        requests::{\n            BlockAccumulatorRequest, ContractRuntimeRequest, FetcherRequest, NetworkRequest,\n            StorageRequest,\n        },\n        EffectBuilder, EffectExt, Effects, Responder,\n    },\n    protocol::Message,\n    types::NodeId,\n    utils::Source,\n    NodeRng,\n};\n\npub(crate) use config::Config;\npub(crate) use error::Error;\npub(crate) use event::Event;\npub(crate) use fetch_item::{EmptyValidationMetadata, FetchItem};\npub(crate) use fetch_response::FetchResponse;\npub(crate) use fetched_data::FetchedData;\nuse item_fetcher::{ItemFetcher, StoringState};\nuse item_handle::ItemHandle;\nuse metrics::Metrics;\npub(crate) use tag::Tag;\n\npub(crate) type FetchResult<T> = Result<FetchedData<T>, Error<T>>;\npub(crate) type FetchResponder<T> = Responder<FetchResult<T>>;\n\n/// The component which fetches an item from local component(s) or asks a peer if it's not\n/// available locally.\n#[derive(DataSize, Debug)]\npub(crate) struct Fetcher<T>\nwhere\n    T: FetchItem,\n{\n    get_from_peer_timeout: Duration,\n    item_handles: HashMap<T::Id, HashMap<NodeId, ItemHandle<T>>>,\n    #[data_size(skip)]\n    name: &'static str,\n    #[data_size(skip)]\n    metrics: Metrics,\n}\n\nimpl<T: FetchItem> Fetcher<T> {\n    pub(crate) fn new(\n        name: &'static str,\n        config: &Config,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(Fetcher {\n            get_from_peer_timeout: config.get_from_peer_timeout().into(),\n            item_handles: HashMap::new(),\n            name,\n            metrics: Metrics::new(name, registry)?,\n        })\n    }\n}\n\nimpl<T, REv> Component<REv> for Fetcher<T>\nwhere\n    Fetcher<T>: ItemFetcher<T>,\n    T: FetchItem + 'static,\n    REv: From<StorageRequest>\n        + From<BlockAccumulatorRequest>\n        + From<ContractRuntimeRequest>\n        + From<NetworkRequest<Message>>\n        + From<PeerBehaviorAnnouncement>\n        + From<FetchedNewBlockAnnouncement>\n        + From<FetchedNewFinalitySignatureAnnouncement>\n        + Send,\n{\n    type Event = Event<T>;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        trace!(?event, \"Fetcher: handling event\");\n        match event {\n            Event::Fetch(FetcherRequest {\n                id,\n                peer,\n                validation_metadata,\n                responder,\n            }) => self.fetch(effect_builder, id, peer, validation_metadata, responder),\n            Event::GetLocallyResult {\n                id,\n                peer,\n                validation_metadata,\n                maybe_item,\n                responder,\n            } => match maybe_item {\n                Some(item) => {\n                    self.metrics().found_in_storage.inc();\n                    responder\n                        .respond(Ok(FetchedData::from_storage(item)))\n                        .ignore()\n                }\n                None => self.failed_to_get_locally(\n                    effect_builder,\n                    id,\n                    peer,\n                    validation_metadata,\n                    responder,\n                ),\n            },\n            Event::GotRemotely { item, source } => match source {\n                Source::PeerGossiped(peer) | Source::Peer(peer) => {\n                    self.got_from_peer(effect_builder, peer, item)\n                }\n                Source::Client | Source::SpeculativeExec | Source::Ourself => Effects::new(),\n            },\n            Event::GotInvalidRemotely { .. } => Effects::new(),\n            Event::AbsentRemotely { id, peer } => {\n                trace!(TAG=%T::TAG, %id, %peer, \"item absent on the remote node\");\n                self.signal(\n                    id.clone(),\n                    Err(Error::Absent {\n                        id: Box::new(id),\n                        peer,\n                    }),\n                    peer,\n                )\n            }\n            Event::RejectedRemotely { id, peer } => {\n                trace!(TAG=%T::TAG, %id, %peer, \"peer rejected fetch request\");\n                self.signal(\n                    id.clone(),\n                    Err(Error::Rejected {\n                        id: Box::new(id),\n                        peer,\n                    }),\n                    peer,\n                )\n            }\n            Event::TimeoutPeer { id, peer } => self.signal(\n                id.clone(),\n                Err(Error::TimedOut {\n                    id: Box::new(id),\n                    peer,\n                }),\n                peer,\n            ),\n            Event::PutToStorage { item, peer } => {\n                let mut effects =\n                    Self::announce_fetched_new_item(effect_builder, (*item).clone(), peer).ignore();\n                effects.extend(self.signal(item.fetch_id(), Ok(*item), peer));\n                effects\n            }\n        }\n    }\n\n    fn name(&self) -> &str {\n        self.name\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/config.rs",
    "content": "use std::str::FromStr;\n\nuse datasize::DataSize;\nuse serde::{\n    de::{Deserializer, Error as SerdeError, Unexpected},\n    Deserialize, Serialize,\n};\nuse tracing::error;\n\nuse casper_types::TimeDiff;\n\n#[cfg(test)]\nuse super::error::Error;\n\nconst DEFAULT_INFECTION_TARGET: u8 = 3;\nconst DEFAULT_SATURATION_LIMIT_PERCENT: u8 = 80;\npub(super) const MAX_SATURATION_LIMIT_PERCENT: u8 = 99;\npub(super) const DEFAULT_FINISHED_ENTRY_DURATION: &str = \"60sec\";\nconst DEFAULT_GOSSIP_REQUEST_TIMEOUT: &str = \"10sec\";\nconst DEFAULT_GET_REMAINDER_TIMEOUT: &str = \"60sec\";\nconst DEFAULT_VALIDATE_AND_STORE_TIMEOUT: &str = \"60sec\";\n#[cfg(test)]\nconst SMALL_TIMEOUTS_FINISHED_ENTRY_DURATION: &str = \"2sec\";\n#[cfg(test)]\nconst SMALL_TIMEOUTS_GOSSIP_REQUEST_TIMEOUT: &str = \"1sec\";\n#[cfg(test)]\nconst SMALL_TIMEOUTS_GET_REMAINDER_TIMEOUT: &str = \"1sec\";\n#[cfg(test)]\nconst SMALL_TIMEOUTS_VALIDATE_AND_STORE_TIMEOUT: &str = \"1sec\";\n\n/// Configuration options for gossiping.\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\npub struct Config {\n    /// Target number of peers to infect with a given piece of data.\n    pub infection_target: u8,\n    /// The saturation limit as a percentage, with a maximum value of 99.  Used as a termination\n    /// condition.\n    ///\n    /// Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we\n    /// don't manage to newly infect 3 peers.  We will stop gossiping once we know of more than 15\n    /// holders excluding us since 80% saturation would imply 3 new infections in 15 peers.\n    #[serde(deserialize_with = \"deserialize_saturation_limit_percent\")]\n    pub saturation_limit_percent: u8,\n    /// The maximum duration in seconds for which to keep finished entries.\n    ///\n    /// The longer they are retained, the lower the likelihood of re-gossiping a piece of data.\n    /// However, the longer they are retained, the larger the list of finished entries can grow.\n    pub finished_entry_duration: TimeDiff,\n    /// The timeout duration in seconds for a single gossip request, i.e. for a single gossip\n    /// message sent from this node, it will be considered timed out if the expected response from\n    /// that peer is not received within this specified duration.\n    pub gossip_request_timeout: TimeDiff,\n    /// The timeout duration in seconds for retrieving the remaining part(s) of newly-discovered\n    /// data from a peer which gossiped information about that data to this node.\n    pub get_remainder_timeout: TimeDiff,\n    /// The timeout duration for a newly-received, gossiped item to be validated and stored by\n    /// another component before the gossiper abandons waiting to gossip the item onwards.\n    pub validate_and_store_timeout: TimeDiff,\n}\n\nimpl Config {\n    #[cfg(test)]\n    pub(crate) fn new(\n        infection_target: u8,\n        saturation_limit_percent: u8,\n        finished_entry_duration: TimeDiff,\n        gossip_request_timeout: TimeDiff,\n        get_remainder_timeout: TimeDiff,\n        validate_and_store_timeout: TimeDiff,\n    ) -> Result<Self, Error> {\n        if saturation_limit_percent > MAX_SATURATION_LIMIT_PERCENT {\n            return Err(Error::InvalidSaturationLimit);\n        }\n        Ok(Config {\n            infection_target,\n            saturation_limit_percent,\n            finished_entry_duration,\n            gossip_request_timeout,\n            get_remainder_timeout,\n            validate_and_store_timeout,\n        })\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_with_small_timeouts() -> Self {\n        Config {\n            finished_entry_duration: TimeDiff::from_str(SMALL_TIMEOUTS_FINISHED_ENTRY_DURATION)\n                .unwrap(),\n            gossip_request_timeout: TimeDiff::from_str(SMALL_TIMEOUTS_GOSSIP_REQUEST_TIMEOUT)\n                .unwrap(),\n            get_remainder_timeout: TimeDiff::from_str(SMALL_TIMEOUTS_GET_REMAINDER_TIMEOUT)\n                .unwrap(),\n            validate_and_store_timeout: TimeDiff::from_str(\n                SMALL_TIMEOUTS_VALIDATE_AND_STORE_TIMEOUT,\n            )\n            .unwrap(),\n            ..Default::default()\n        }\n    }\n\n    pub(crate) fn infection_target(&self) -> u8 {\n        self.infection_target\n    }\n\n    pub(crate) fn saturation_limit_percent(&self) -> u8 {\n        self.saturation_limit_percent\n    }\n\n    pub(crate) fn finished_entry_duration(&self) -> TimeDiff {\n        self.finished_entry_duration\n    }\n\n    pub(crate) fn gossip_request_timeout(&self) -> TimeDiff {\n        self.gossip_request_timeout\n    }\n\n    pub(crate) fn get_remainder_timeout(&self) -> TimeDiff {\n        self.get_remainder_timeout\n    }\n\n    pub(crate) fn validate_and_store_timeout(&self) -> TimeDiff {\n        self.validate_and_store_timeout\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            infection_target: DEFAULT_INFECTION_TARGET,\n            saturation_limit_percent: DEFAULT_SATURATION_LIMIT_PERCENT,\n            finished_entry_duration: TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION).unwrap(),\n            gossip_request_timeout: TimeDiff::from_str(DEFAULT_GOSSIP_REQUEST_TIMEOUT).unwrap(),\n            get_remainder_timeout: TimeDiff::from_str(DEFAULT_GET_REMAINDER_TIMEOUT).unwrap(),\n            validate_and_store_timeout: TimeDiff::from_str(DEFAULT_VALIDATE_AND_STORE_TIMEOUT)\n                .unwrap(),\n        }\n    }\n}\n\n/// Deserializes a `usize` but fails if it's not in the range 0..100.\nfn deserialize_saturation_limit_percent<'de, D>(deserializer: D) -> Result<u8, D::Error>\nwhere\n    D: Deserializer<'de>,\n{\n    let saturation_limit_percent = u8::deserialize(deserializer)?;\n    if saturation_limit_percent > MAX_SATURATION_LIMIT_PERCENT {\n        error!(\n            \"saturation_limit_percent of {} is above {}\",\n            saturation_limit_percent, MAX_SATURATION_LIMIT_PERCENT\n        );\n        return Err(SerdeError::invalid_value(\n            Unexpected::Unsigned(saturation_limit_percent as u64),\n            &\"a value between 0 and 99 inclusive\",\n        ));\n    }\n\n    Ok(saturation_limit_percent)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn invalid_config_should_fail() {\n        // saturation_limit_percent > MAX_SATURATION_LIMIT_PERCENT\n        let invalid_config = Config {\n            infection_target: 3,\n            saturation_limit_percent: MAX_SATURATION_LIMIT_PERCENT + 1,\n            finished_entry_duration: TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION).unwrap(),\n            gossip_request_timeout: TimeDiff::from_str(DEFAULT_GOSSIP_REQUEST_TIMEOUT).unwrap(),\n            get_remainder_timeout: TimeDiff::from_str(DEFAULT_GET_REMAINDER_TIMEOUT).unwrap(),\n            validate_and_store_timeout: TimeDiff::from_str(DEFAULT_VALIDATE_AND_STORE_TIMEOUT)\n                .unwrap(),\n        };\n\n        // Parsing should fail.\n        let config_as_json = serde_json::to_string(&invalid_config).unwrap();\n        assert!(serde_json::from_str::<Config>(&config_as_json).is_err());\n\n        // Construction should fail.\n        assert!(Config::new(\n            3,\n            MAX_SATURATION_LIMIT_PERCENT + 1,\n            TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION).unwrap(),\n            TimeDiff::from_str(DEFAULT_GOSSIP_REQUEST_TIMEOUT).unwrap(),\n            TimeDiff::from_str(DEFAULT_GET_REMAINDER_TIMEOUT).unwrap(),\n            TimeDiff::from_str(DEFAULT_VALIDATE_AND_STORE_TIMEOUT).unwrap()\n        )\n        .is_err())\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/error.rs",
    "content": "use thiserror::Error;\n\nuse super::config::MAX_SATURATION_LIMIT_PERCENT;\n\n/// Error returned by a `GossipTable`.\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    /// Invalid configuration value for `saturation_limit_percent`.\n    #[error(\n        \"invalid saturation_limit_percent - should be between 0 and {} inclusive\",\n        MAX_SATURATION_LIMIT_PERCENT\n    )]\n    InvalidSaturationLimit,\n}\n"
  },
  {
    "path": "node/src/components/gossiper/event.rs",
    "content": "use std::{\n    collections::HashSet,\n    fmt::{self, Display, Formatter},\n};\n\nuse derive_more::From;\nuse serde::Serialize;\n\nuse casper_types::DisplayIter;\n\nuse super::GossipItem;\nuse crate::{\n    effect::{incoming::GossiperIncoming, requests::BeginGossipRequest, GossipTarget},\n    types::NodeId,\n    utils::Source,\n};\n\n/// `Gossiper` events.\n#[derive(Debug, From, Serialize)]\npub(crate) enum Event<T: GossipItem> {\n    /// A request to gossip an item has been made.\n    #[from]\n    BeginGossipRequest(BeginGossipRequest<T>),\n    /// A new item has been received to be gossiped.\n    ItemReceived {\n        item_id: T::Id,\n        source: Source,\n        target: GossipTarget,\n    },\n    /// The network component gossiped to the included peers.\n    GossipedTo {\n        item_id: T::Id,\n        requested_count: usize,\n        peers: HashSet<NodeId>,\n    },\n    /// The timeout for waiting for a gossip response has elapsed and we should check the response\n    /// arrived.\n    CheckGossipTimeout { item_id: T::Id, peer: NodeId },\n    /// The timeout for waiting for the full item has elapsed and we should check the response\n    /// arrived.\n    CheckGetFromPeerTimeout { item_id: T::Id, peer: NodeId },\n    /// An incoming gossip network message.\n    #[from]\n    Incoming(GossiperIncoming<T>),\n    /// The timeout for waiting for a different component to validate and store the item has\n    /// elapsed and we should check that `ItemReceived` has been called by now.\n    CheckItemReceivedTimeout { item_id: T::Id },\n    /// The result of the gossiper checking if an item exists in storage.\n    IsStoredResult {\n        item_id: T::Id,\n        sender: NodeId,\n        result: bool,\n    },\n    /// The result of the gossiper getting an item from storage. If the result is `Some`, the item\n    /// should be sent to the requesting peer.\n    GetFromStorageResult {\n        item_id: T::Id,\n        requester: NodeId,\n        maybe_item: Option<Box<T>>,\n    },\n}\n\nimpl<T: GossipItem> Display for Event<T> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::BeginGossipRequest(BeginGossipRequest {\n                item_id, source, ..\n            }) => {\n                write!(\n                    formatter,\n                    \"begin gossping new item {} received from {}\",\n                    item_id, source\n                )\n            }\n            Event::ItemReceived {\n                item_id, source, ..\n            } => {\n                write!(formatter, \"new item {} received from {}\", item_id, source)\n            }\n            Event::GossipedTo { item_id, peers, .. } => write!(\n                formatter,\n                \"gossiped {} to {}\",\n                item_id,\n                DisplayIter::new(peers)\n            ),\n            Event::CheckGossipTimeout { item_id, peer } => write!(\n                formatter,\n                \"check gossip timeout for {} with {}\",\n                item_id, peer\n            ),\n            Event::CheckGetFromPeerTimeout { item_id, peer } => write!(\n                formatter,\n                \"check get from peer timeout for {} with {}\",\n                item_id, peer\n            ),\n            Event::Incoming(incoming) => {\n                write!(formatter, \"incoming: {}\", incoming)\n            }\n            Event::CheckItemReceivedTimeout { item_id } => {\n                write!(formatter, \"check item received timeout for {}\", item_id,)\n            }\n            Event::IsStoredResult {\n                item_id,\n                sender,\n                result,\n            } => {\n                write!(\n                    formatter,\n                    \"{} is stored for gossip message from {}: {}\",\n                    item_id, sender, result\n                )\n            }\n            Event::GetFromStorageResult {\n                item_id,\n                maybe_item,\n                ..\n            } => {\n                if maybe_item.is_some() {\n                    write!(formatter, \"got {} from storage\", item_id)\n                } else {\n                    write!(formatter, \"failed to get {} from storage\", item_id)\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/gossip_item.rs",
    "content": "use std::{\n    fmt::{Debug, Display},\n    hash::Hash,\n};\n\nuse serde::{de::DeserializeOwned, Serialize};\n\nuse crate::effect::GossipTarget;\n\n/// A trait which allows an implementing type to be used by a gossiper component.\npub(crate) trait GossipItem:\n    Clone + Serialize + DeserializeOwned + Send + Sync + Debug + Display + Eq\n{\n    /// The type of ID of the item.\n    type Id: Clone + Eq + Hash + Serialize + DeserializeOwned + Send + Sync + Debug + Display;\n\n    /// Whether the item's ID _is_ the complete item or not.\n    const ID_IS_COMPLETE_ITEM: bool;\n    /// Whether the arrival of a new gossip message should be announced or not.\n    const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool;\n\n    /// The ID of the specific item.\n    fn gossip_id(&self) -> Self::Id;\n\n    /// Identifies the kind of peers which should be targeted for onwards gossiping.\n    fn gossip_target(&self) -> GossipTarget;\n}\n\npub(crate) trait LargeGossipItem: GossipItem {}\n\npub(crate) trait SmallGossipItem: GossipItem {\n    /// Convert a `Self::Id` into `Self`.\n    fn id_as_item(id: &Self::Id) -> &Self;\n}\n"
  },
  {
    "path": "node/src/components/gossiper/gossip_table.rs",
    "content": "#[cfg(not(test))]\nuse std::time::Instant;\nuse std::{\n    collections::{HashMap, HashSet},\n    fmt::{self, Display, Formatter},\n    hash::Hash,\n    time::Duration,\n};\n\nuse datasize::DataSize;\n#[cfg(test)]\nuse fake_instant::FakeClock as Instant;\nuse tracing::{error, trace, warn};\n\nuse casper_types::DisplayIter;\n\nuse super::Config;\nuse crate::{effect::GossipTarget, types::NodeId};\n\n#[derive(Debug, PartialEq, Eq)]\npub(super) enum GossipAction {\n    /// This is new data, previously unknown by us, and for which we don't yet hold everything\n    /// required to allow us start gossiping it onwards.  We should get the remaining parts from\n    /// the provided holder and not gossip the ID onwards yet.\n    GetRemainder { holder: NodeId },\n    /// This is data already known to us, but for which we don't yet hold everything required to\n    /// allow us start gossiping it onwards.  We should already be getting the remaining parts from\n    /// a holder, so there's no need to do anything else now.\n    AwaitingRemainder,\n    /// We hold the data locally and should gossip the ID onwards.\n    ShouldGossip(ShouldGossip),\n    /// We hold the data locally, and we shouldn't gossip the ID onwards.\n    Noop,\n    /// We just finished gossiping the data: no need to gossip further, but an announcement that we\n    /// have finished gossiping this data should be made.\n    AnnounceFinished,\n}\n\nimpl Display for GossipAction {\n    fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {\n        match self {\n            GossipAction::GetRemainder { holder } => {\n                write!(formatter, \"should get remainder from {}\", holder)\n            }\n            GossipAction::AwaitingRemainder => write!(formatter, \"awaiting remainder\"),\n            GossipAction::ShouldGossip(should_gossip) => Display::fmt(should_gossip, formatter),\n            GossipAction::Noop => write!(formatter, \"should do nothing\"),\n            GossipAction::AnnounceFinished => write!(formatter, \"finished gossiping\"),\n        }\n    }\n}\n\n/// Used as a return type from API methods to indicate that the caller should continue to gossip the\n/// given data.\n#[derive(Debug, PartialEq, Eq)]\npub(super) struct ShouldGossip {\n    /// The number of copies of the gossip message to send.\n    pub(super) count: usize,\n    /// Peers we should avoid gossiping this data to, since they already hold it.\n    pub(super) exclude_peers: HashSet<NodeId>,\n    /// Whether we already held the full data or not.\n    pub(super) is_already_held: bool,\n    /// Who to gossip this to.\n    pub(super) target: GossipTarget,\n}\n\nimpl Display for ShouldGossip {\n    fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {\n        write!(formatter, \"should gossip to {} peer(s) \", self.count)?;\n        if !self.exclude_peers.is_empty() {\n            write!(\n                formatter,\n                \"excluding {} \",\n                DisplayIter::new(&self.exclude_peers)\n            )?;\n        }\n        write!(\n            formatter,\n            \"(we {} the item)\",\n            if self.is_already_held {\n                \"previously held\"\n            } else {\n                \"didn't previously hold\"\n            }\n        )\n    }\n}\n\n#[derive(DataSize, Debug, Default)]\npub(super) struct State {\n    /// The peers excluding us which hold the data.\n    holders: HashSet<NodeId>,\n    /// The subset of `holders` we have infected.  Not just a count so we don't attribute the same\n    /// peer multiple times.\n    infected_by_us: HashSet<NodeId>,\n    /// The count of in-flight gossip messages sent by us for this data.\n    in_flight_count: usize,\n    /// The relevant target for this data, if known yet.\n    target: Option<GossipTarget>,\n    /// The set of peers we attempted to infect.\n    attempted_to_infect: HashSet<NodeId>,\n}\n\nimpl State {\n    /// Whether we hold the full data locally yet or not.\n    fn held_by_us(&self) -> bool {\n        self.target.is_some()\n    }\n\n    /// Returns whether we should finish gossiping this data.\n    fn is_finished(&self, infection_target: usize, attempted_to_infect_limit: usize) -> bool {\n        self.infected_by_us.len() >= infection_target\n            || self.attempted_to_infect.len() >= attempted_to_infect_limit\n    }\n\n    /// Returns a `GossipAction` derived from the given state.\n    fn action(\n        &mut self,\n        infection_target: usize,\n        attempted_to_infect_limit: usize,\n        is_new: bool,\n    ) -> GossipAction {\n        if self.is_finished(infection_target, attempted_to_infect_limit) {\n            return GossipAction::Noop;\n        }\n\n        if let Some(target) = self.target {\n            // The item is held by us, decide whether we should gossip it or not.\n            let count =\n                infection_target.saturating_sub(self.in_flight_count + self.infected_by_us.len());\n            if count > 0 {\n                self.in_flight_count += count;\n                return GossipAction::ShouldGossip(ShouldGossip {\n                    count,\n                    target,\n                    exclude_peers: self.attempted_to_infect.clone(),\n                    is_already_held: !is_new,\n                });\n            }\n            return GossipAction::Noop;\n        }\n\n        if is_new {\n            let holder = *self\n                .holders\n                .iter()\n                .next()\n                .expect(\"holders cannot be empty if we don't hold the data\");\n            GossipAction::GetRemainder { holder }\n        } else {\n            GossipAction::AwaitingRemainder\n        }\n    }\n}\n\n#[derive(DataSize, Debug)]\npub(super) struct Timeouts<T> {\n    values: Vec<(Instant, T)>,\n}\n\nimpl<T> Timeouts<T> {\n    fn new() -> Self {\n        Timeouts { values: Vec::new() }\n    }\n\n    fn push(&mut self, timeout: Instant, data_id: T) {\n        self.values.push((timeout, data_id));\n    }\n\n    fn purge(&mut self, now: &Instant) -> impl Iterator<Item = T> + '_ {\n        // The values are sorted by timeout.  Locate the index of the first non-expired one.\n        let split_index = match self\n            .values\n            .binary_search_by(|(timeout, _data_id)| timeout.cmp(now))\n        {\n            Ok(index) => index,\n            Err(index) => index,\n        };\n\n        // Drain and return the expired IDs.\n        self.values\n            .drain(..split_index)\n            .map(|(_timeout, data_id)| data_id)\n    }\n}\n\n#[derive(DataSize, Debug)]\npub(super) struct GossipTable<T> {\n    /// Data IDs for which gossiping is still ongoing.\n    current: HashMap<T, State>,\n    /// Data IDs for which gossiping is complete.\n    finished: HashSet<T>,\n    /// Timeouts for removal of items from the `finished` cache.\n    timeouts: Timeouts<T>,\n    /// See `Config::infection_target`.\n    infection_target: usize,\n    /// Derived from `Config::saturation_limit_percent` - we gossip data while the number of\n    /// attempts to infect doesn't exceed `attempted_to_infect_limit`.\n    attempted_to_infect_limit: usize,\n    /// See `Config::finished_entry_duration`.\n    finished_entry_duration: Duration,\n}\n\nimpl<T> GossipTable<T> {\n    /// Number of items currently being gossiped.\n    pub(super) fn items_current(&self) -> usize {\n        self.current.len()\n    }\n\n    /// Number of items that are kept but are finished gossiping.\n    pub(super) fn items_finished(&self) -> usize {\n        self.finished.len()\n    }\n}\n\nimpl<T: Clone + Eq + Hash + Display> GossipTable<T> {\n    /// Returns a new `GossipTable` using the provided configuration.\n    pub(super) fn new(config: Config) -> Self {\n        let attempted_to_infect_limit = (100 * usize::from(config.infection_target()))\n            / (100 - usize::from(config.saturation_limit_percent()));\n        GossipTable {\n            current: HashMap::new(),\n            finished: HashSet::new(),\n            timeouts: Timeouts::new(),\n            infection_target: usize::from(config.infection_target()),\n            attempted_to_infect_limit,\n            finished_entry_duration: config.finished_entry_duration().into(),\n        }\n    }\n\n    /// We received knowledge about potentially new data with given ID from the given peer.  This\n    /// should only be called where we don't already hold everything locally needed to be able to\n    /// gossip it onwards.  If we are able to gossip the data already, call `new_complete_data`\n    /// instead.\n    ///\n    /// Once we have retrieved everything we need in order to begin gossiping onwards, call\n    /// `new_complete_data`.\n    ///\n    /// Returns whether we should gossip it, and a list of peers to exclude.\n    pub(super) fn new_data_id(&mut self, data_id: &T, holder: NodeId) -> GossipAction {\n        self.purge_finished();\n\n        if self.finished.contains(data_id) {\n            trace!(item=%data_id, \"no further action: item already finished\");\n            return GossipAction::Noop;\n        }\n\n        let update = |state: &mut State| {\n            let _ = state.holders.insert(holder);\n        };\n\n        if let Some(action) = self.update_current(data_id, update) {\n            trace!(item=%data_id, %action, \"item is currently being gossiped\");\n            return action;\n        }\n\n        // This isn't in finished or current - add a new entry to current.\n        let mut state = State::default();\n        update(&mut state);\n        let is_new = true;\n        let action = state.action(\n            self.infection_target,\n            self.attempted_to_infect_limit,\n            is_new,\n        );\n        let _ = self.current.insert(data_id.clone(), state);\n        trace!(item=%data_id, %action, \"gossiping new item should begin\");\n        action\n    }\n\n    /// We received or generated potentially new data with given ID.  If received from a peer,\n    /// its ID should be passed in `maybe_holder`.  If received from a client or generated on this\n    /// node, `maybe_holder` should be `None`.\n    ///\n    /// This should only be called once we hold everything locally needed to be able to gossip it\n    /// onwards.  If we aren't able to gossip this data yet, call `new_data_id` instead.\n    ///\n    /// Returns whether we should gossip it, and a list of peers to exclude.\n    pub(super) fn new_complete_data(\n        &mut self,\n        data_id: &T,\n        maybe_holder: Option<NodeId>,\n        target: GossipTarget,\n    ) -> GossipAction {\n        self.purge_finished();\n\n        if self.finished.contains(data_id) {\n            trace!(item=%data_id, \"no further action: item already finished\");\n            return GossipAction::Noop;\n        }\n\n        let update = |state: &mut State| {\n            state.holders.extend(maybe_holder);\n            state.target = Some(target);\n        };\n\n        if let Some(action) = self.update_current(data_id, update) {\n            trace!(item=%data_id, %action, \"item is currently being gossiped\");\n            return action;\n        }\n\n        // This isn't in finished or current - add a new entry to current.\n        let mut state = State::default();\n        update(&mut state);\n        let is_new = true;\n        let action = state.action(\n            self.infection_target,\n            self.attempted_to_infect_limit,\n            is_new,\n        );\n        let _ = self.current.insert(data_id.clone(), state);\n        trace!(item=%data_id, %action, \"gossiping new item should begin\");\n        action\n    }\n\n    pub(super) fn register_infection_attempt<'a>(\n        &'a mut self,\n        item_id: &T,\n        peers: impl Iterator<Item = &'a NodeId>,\n    ) {\n        if let Some(state) = self.current.get_mut(item_id) {\n            state.attempted_to_infect.extend(peers);\n        }\n    }\n\n    /// We got a response from a peer we gossiped to indicating we infected it (it didn't previously\n    /// know of this data).\n    ///\n    /// If the given `data_id` is not a member of the current entries (those not deemed finished),\n    /// then `GossipAction::Noop` will be returned under the assumption that the data has already\n    /// finished being gossiped.\n    pub(super) fn we_infected(&mut self, data_id: &T, peer: NodeId) -> GossipAction {\n        let infected_by_us = true;\n        self.infected(data_id, peer, infected_by_us)\n    }\n\n    /// We got a response from a peer we gossiped to indicating it was already infected (it\n    /// previously knew of this data).\n    ///\n    /// If the given `data_id` is not a member of the current entries (those not deemed finished),\n    /// then `GossipAction::Noop` will be returned under the assumption that the data has already\n    /// finished being gossiped.\n    pub(super) fn already_infected(&mut self, data_id: &T, peer: NodeId) -> GossipAction {\n        let infected_by_us = false;\n        self.infected(data_id, peer, infected_by_us)\n    }\n\n    fn infected(&mut self, data_id: &T, peer: NodeId, by_us: bool) -> GossipAction {\n        let update = |state: &mut State| {\n            if !state.held_by_us() {\n                warn!(\n                    item=%data_id,\n                    %peer, \"shouldn't have received a gossip response for data we don't hold\"\n                );\n                return;\n            }\n            let _ = state.holders.insert(peer);\n            if by_us {\n                let _ = state.infected_by_us.insert(peer);\n            }\n            state.in_flight_count = state.in_flight_count.saturating_sub(1);\n        };\n\n        self.update_current(data_id, update)\n            .unwrap_or(GossipAction::Noop)\n    }\n\n    /// Directly reduces the in-flight count of gossip requests for the given item by the given\n    /// amount.\n    ///\n    /// Returns `true` if there was a current entry for this data and it is now finished.\n    ///\n    /// This should be called if, after trying to gossip to a given number of peers, we find that\n    /// we've not been able to select enough peers.  Without this reduction, the given gossip item\n    /// would never move from `current` to `finished`, and hence would never be purged.\n    pub(super) fn reduce_in_flight_count(&mut self, data_id: &T, reduce_by: usize) -> bool {\n        let should_finish = if let Some(state) = self.current.get_mut(data_id) {\n            state.in_flight_count = state.in_flight_count.saturating_sub(reduce_by);\n            trace!(\n                item=%data_id,\n                in_flight_count=%state.in_flight_count,\n                \"reduced in-flight count for item\"\n            );\n            state.in_flight_count == 0\n        } else {\n            false\n        };\n\n        if should_finish {\n            trace!(item=%data_id, \"finished gossiping since no more peers to gossip to\");\n            return self.force_finish(data_id);\n        }\n\n        false\n    }\n\n    /// Checks if gossip request we sent timed out.\n    ///\n    /// If the peer is already counted as a holder, it has previously responded and this method\n    /// returns Noop.  Otherwise it has timed out and we return the appropriate action to take.\n    pub(super) fn check_timeout(&mut self, data_id: &T, peer: NodeId) -> GossipAction {\n        let update = |state: &mut State| {\n            debug_assert!(\n                state.held_by_us(),\n                \"shouldn't check timeout for a gossip response for data we don't hold\"\n            );\n            if !state.held_by_us() {\n                error!(\n                    item=%data_id,\n                    %peer, \"shouldn't check timeout for a gossip response for data we don't hold\"\n                );\n                return;\n            }\n\n            if !state.holders.contains(&peer) {\n                // Add the peer as a holder just to avoid retrying it.\n                let _ = state.holders.insert(peer);\n                state.in_flight_count = state.in_flight_count.saturating_sub(1);\n            }\n        };\n\n        self.update_current(data_id, update)\n            .unwrap_or(GossipAction::Noop)\n    }\n\n    /// If we hold the full data, assume `peer` provided it to us and shouldn't be removed as a\n    /// holder.  Otherwise, assume `peer` was unresponsive and remove from list of holders.\n    ///\n    /// If this causes the list of holders to become empty, and we also don't hold the full data,\n    /// then this entry is removed as if we'd never heard of it.\n    pub(super) fn remove_holder_if_unresponsive(\n        &mut self,\n        data_id: &T,\n        peer: NodeId,\n    ) -> GossipAction {\n        if let Some(mut state) = self.current.remove(data_id) {\n            if !state.held_by_us() {\n                let _ = state.holders.remove(&peer);\n                trace!(item=%data_id, %peer, \"removed peer as a holder of the item\");\n                if state.holders.is_empty() {\n                    // We don't hold the full data, and we don't know any holders - remove the entry\n                    trace!(item=%data_id, \"no further action: item now removed as no holders\");\n                    return GossipAction::Noop;\n                }\n            }\n            let is_new = !state.held_by_us();\n            let action = state.action(\n                self.infection_target,\n                self.attempted_to_infect_limit,\n                is_new,\n            );\n            let _ = self.current.insert(data_id.clone(), state);\n            trace!(item=%data_id, %action, \"assuming peer response did not timeout\");\n            return action;\n        }\n\n        GossipAction::Noop\n    }\n\n    /// We have deemed the data not suitable for gossiping further.  The entry will be marked as\n    /// `finished` and eventually be purged.\n    ///\n    /// Returns `true` if there was a current entry for this data.\n    pub(super) fn force_finish(&mut self, data_id: &T) -> bool {\n        if self.current.remove(data_id).is_some() {\n            self.insert_to_finished(data_id);\n            return true;\n        }\n        false\n    }\n\n    /// If the data has not been deemed valid by the component responsible for it (i.e.\n    /// `state.held_by_us` is false) it should not be gossiped onwards by us.  The entry will be\n    /// marked as `finished` and eventually be purged.\n    ///\n    /// Returns `true` if such an entry was found and marked `finished`.\n    pub(super) fn finish_if_not_held_by_us(&mut self, data_id: &T) -> bool {\n        if self\n            .current\n            .get(data_id)\n            .map(|state| !state.held_by_us())\n            .unwrap_or(false)\n        {\n            return self.force_finish(data_id);\n        }\n        false\n    }\n\n    /// Returns `true` if the given ID is in `current` or `finished`.\n    pub(super) fn has_entry(&self, data_id: &T) -> bool {\n        self.current.contains_key(data_id) || self.finished.contains(data_id)\n    }\n\n    /// Updates the entry under `data_id` in `self.current` and returns the action we should now\n    /// take, or `None` if the entry does not exist.\n    ///\n    /// If the entry becomes finished, it is moved from `self.current` to `self.finished`.\n    fn update_current<F: Fn(&mut State)>(\n        &mut self,\n        data_id: &T,\n        update: F,\n    ) -> Option<GossipAction> {\n        let mut state = self.current.remove(data_id)?;\n        update(&mut state);\n        if state.is_finished(self.infection_target, self.attempted_to_infect_limit) {\n            self.insert_to_finished(data_id);\n            return Some(GossipAction::AnnounceFinished);\n        }\n        let is_new = false;\n        let action = state.action(\n            self.infection_target,\n            self.attempted_to_infect_limit,\n            is_new,\n        );\n        let _ = self.current.insert(data_id.clone(), state);\n        Some(action)\n    }\n\n    fn insert_to_finished(&mut self, data_id: &T) {\n        let timeout = Instant::now() + self.finished_entry_duration;\n        let _ = self.finished.insert(data_id.clone());\n        self.timeouts.push(timeout, data_id.clone());\n    }\n\n    /// Retains only those finished entries which still haven't timed out.\n    fn purge_finished(&mut self) {\n        let now = Instant::now();\n\n        for expired_finished in self.timeouts.purge(&now) {\n            let _ = self.finished.remove(&expired_finished);\n        }\n    }\n\n    #[cfg(test)]\n    pub(super) fn is_empty(&self) -> bool {\n        self.current.is_empty() && self.finished.is_empty()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::BTreeSet, iter, str::FromStr};\n\n    use rand::Rng;\n\n    use casper_types::{testing::TestRng, DisplayIter, TimeDiff};\n\n    use super::{super::config::DEFAULT_FINISHED_ENTRY_DURATION, *};\n    use crate::logging;\n\n    const EXPECTED_DEFAULT_INFECTION_TARGET: usize = 3;\n    const EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT: usize = 15;\n\n    fn random_node_ids(rng: &mut TestRng) -> Vec<NodeId> {\n        iter::repeat_with(|| NodeId::random(rng))\n            .take(EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT + 3)\n            .collect()\n    }\n\n    fn check_holders(expected: &[NodeId], gossip_table: &GossipTable<u64>, data_id: &u64) {\n        let expected: BTreeSet<_> = expected.iter().collect();\n        let actual: BTreeSet<_> = gossip_table\n            .current\n            .get(data_id)\n            .map_or_else(BTreeSet::new, |state| state.holders.iter().collect());\n        assert!(\n            expected == actual,\n            \"\\nexpected: {}\\nactual:   {}\\n\",\n            DisplayIter::new(expected.iter()),\n            DisplayIter::new(actual.iter())\n        );\n    }\n\n    #[test]\n    fn new_data_id() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n        assert_eq!(\n            EXPECTED_DEFAULT_INFECTION_TARGET,\n            gossip_table.infection_target\n        );\n        assert_eq!(\n            EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT,\n            gossip_table.attempted_to_infect_limit\n        );\n\n        // Check new data ID causes `GetRemainder` to be returned.\n        let action = gossip_table.new_data_id(&data_id, node_ids[0]);\n        let expected = GossipAction::GetRemainder {\n            holder: node_ids[0],\n        };\n        assert_eq!(expected, action);\n        check_holders(&node_ids[..1], &gossip_table, &data_id);\n\n        // Check same data ID from same source causes `AwaitingRemainder` to be returned.\n        let action = gossip_table.new_data_id(&data_id, node_ids[0]);\n        assert_eq!(GossipAction::AwaitingRemainder, action);\n        check_holders(&node_ids[..1], &gossip_table, &data_id);\n\n        // Check same data ID from different source causes `AwaitingRemainder` to be returned\n        // and holders updated.\n        let action = gossip_table.new_data_id(&data_id, node_ids[1]);\n        assert_eq!(GossipAction::AwaitingRemainder, action);\n        check_holders(&node_ids[..2], &gossip_table, &data_id);\n\n        // Finish the gossip by reporting three infections, then check same data ID causes\n        // `Noop` to be returned and holders cleared.\n        let _ = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All);\n        let limit = 3 + EXPECTED_DEFAULT_INFECTION_TARGET;\n        for node_id in &node_ids[3..limit] {\n            let _ = gossip_table.we_infected(&data_id, *node_id);\n        }\n        let action = gossip_table.new_data_id(&data_id, node_ids[limit]);\n        assert_eq!(GossipAction::Noop, action);\n        check_holders(&node_ids[..0], &gossip_table, &data_id);\n\n        // Time the finished data out, then check same data ID causes `GetRemainder` to be\n        // returned as per a completely new entry.\n        let millis = TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION)\n            .unwrap()\n            .millis();\n        Instant::advance_time(millis + 1);\n        let action = gossip_table.new_data_id(&data_id, node_ids[0]);\n        let expected = GossipAction::GetRemainder {\n            holder: node_ids[0],\n        };\n        assert_eq!(expected, action);\n        check_holders(&node_ids[..1], &gossip_table, &data_id);\n    }\n\n    #[test]\n    fn should_noop_if_we_dont_hold_data_and_get_gossip_response() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_id = NodeId::random(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        let _ = gossip_table.new_data_id(&data_id, node_id);\n\n        let action = gossip_table.we_infected(&data_id, node_id);\n        assert_eq!(GossipAction::AwaitingRemainder, action);\n\n        let action = gossip_table.already_infected(&data_id, node_id);\n        assert_eq!(GossipAction::AwaitingRemainder, action);\n    }\n\n    #[test]\n    fn new_complete_data() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Check new complete data from us causes `ShouldGossip` to be returned.\n        let action = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: EXPECTED_DEFAULT_INFECTION_TARGET,\n            target: GossipTarget::All,\n            exclude_peers: HashSet::new(),\n            is_already_held: false,\n        });\n        assert_eq!(expected, action);\n        check_holders(&node_ids[..0], &gossip_table, &data_id);\n\n        // Check same complete data from other source causes `Noop` to be returned since we still\n        // have all gossip requests in flight.  Check it updates holders.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0]));\n        let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All);\n        assert_eq!(GossipAction::Noop, action);\n        check_holders(&node_ids[..1], &gossip_table, &data_id);\n\n        // Check receiving a gossip response, causes `ShouldGossip` to be returned and holders\n        // updated.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[1]));\n        let action = gossip_table.already_infected(&data_id, node_ids[1]);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: 1,\n            target: GossipTarget::All,\n            exclude_peers: node_ids[..2].iter().cloned().collect(),\n            is_already_held: true,\n        });\n        assert_eq!(expected, action);\n        check_holders(&node_ids[..2], &gossip_table, &data_id);\n\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[2]));\n        let action = gossip_table.new_complete_data(&data_id, Some(node_ids[2]), GossipTarget::All);\n        assert_eq!(GossipAction::Noop, action);\n        check_holders(&node_ids[..3], &gossip_table, &data_id);\n\n        // Finish the gossip by reporting enough non-infections, then check same complete data\n        // causes `Noop` to be returned and holders cleared.\n        let limit = 3 + EXPECTED_DEFAULT_INFECTION_TARGET;\n        for node_id in &node_ids[3..limit] {\n            gossip_table.register_infection_attempt(&data_id, iter::once(node_id));\n            let _ = gossip_table.we_infected(&data_id, *node_id);\n        }\n        let action = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        assert_eq!(GossipAction::Noop, action);\n        check_holders(&node_ids[..0], &gossip_table, &data_id);\n\n        // Time the finished data out, then check same complete data causes `ShouldGossip` to be\n        // returned as per a completely new entry.\n        let millis = TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION)\n            .unwrap()\n            .millis();\n        Instant::advance_time(millis + 1);\n\n        let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: EXPECTED_DEFAULT_INFECTION_TARGET,\n            target: GossipTarget::All,\n            exclude_peers: HashSet::new(), // We didn't infect anyone yet.\n            is_already_held: false,\n        });\n        assert_eq!(expected, action);\n        check_holders(&node_ids[..1], &gossip_table, &data_id);\n    }\n\n    #[test]\n    fn should_terminate_via_infection_limit() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new complete data from us and check two infections doesn't cause us to stop\n        // gossiping.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        let limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1;\n        for node_id in node_ids.iter().take(limit) {\n            gossip_table.register_infection_attempt(&data_id, iter::once(node_id));\n            let action = gossip_table.we_infected(&data_id, *node_id);\n            assert_eq!(GossipAction::Noop, action);\n            assert!(!gossip_table.finished.contains(&data_id));\n        }\n\n        // Check recording an infection from an already-recorded infectee doesn't cause us to stop\n        // gossiping.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit - 1]));\n        let action = gossip_table.we_infected(&data_id, node_ids[limit - 1]);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: 1,\n            target: GossipTarget::All,\n            exclude_peers: node_ids[..limit].iter().cloned().collect(),\n            is_already_held: true,\n        });\n        assert_eq!(expected, action);\n        assert!(!gossip_table.finished.contains(&data_id));\n\n        // Check third new infection does cause us to stop gossiping.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit]));\n        let action = gossip_table.we_infected(&data_id, node_ids[limit]);\n        assert_eq!(GossipAction::AnnounceFinished, action);\n        assert!(gossip_table.finished.contains(&data_id));\n    }\n\n    #[test]\n    fn should_not_terminate_via_incoming_gossip() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id1: u64 = rng.gen();\n        let data_id2: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Take the two items close to the termination condition of holder count by simulating\n        // receiving several incoming gossip requests.  Each should remain unfinished.\n        let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1;\n        for node_id in node_ids.iter().take(limit) {\n            let _ = gossip_table.new_data_id(&data_id1, *node_id);\n            assert!(!gossip_table.finished.contains(&data_id1));\n\n            let _ = gossip_table.new_complete_data(&data_id2, Some(*node_id), GossipTarget::All);\n            assert!(!gossip_table.finished.contains(&data_id2));\n        }\n\n        // Simulate receiving a final gossip request for each, which should cause them both to be\n        // moved to the `finished` collection.\n        let action = gossip_table.new_data_id(\n            &data_id1,\n            node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT],\n        );\n        assert!(!gossip_table.finished.contains(&data_id1));\n        assert_eq!(GossipAction::AwaitingRemainder, action);\n\n        let action = gossip_table.new_complete_data(\n            &data_id2,\n            Some(node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT]),\n            GossipTarget::All,\n        );\n        assert!(!gossip_table.finished.contains(&data_id2));\n        assert_eq!(GossipAction::Noop, action);\n    }\n\n    #[test]\n    fn should_terminate_via_checking_timeout() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Take the item close to the termination condition of holder count by simulating receiving\n        // several incoming gossip requests.  It should remain unfinished.\n        let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1;\n        for node_id in node_ids.iter().take(limit) {\n            let _ = gossip_table.new_complete_data(&data_id, Some(*node_id), GossipTarget::All);\n            gossip_table.register_infection_attempt(&data_id, iter::once(node_id));\n            assert!(!gossip_table.finished.contains(&data_id));\n        }\n\n        // Simulate a gossip response timing out, which should cause the item to be moved to the\n        // `finished` collection.\n        gossip_table.register_infection_attempt(\n            &data_id,\n            iter::once(&node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT]),\n        );\n        let action = gossip_table.check_timeout(\n            &data_id,\n            node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT],\n        );\n        assert!(gossip_table.finished.contains(&data_id));\n        assert_eq!(GossipAction::AnnounceFinished, action);\n    }\n\n    #[test]\n    fn should_terminate_via_reducing_in_flight_count() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Take the item close to the termination condition of in-flight count reaching 0.  It\n        // should remain unfinished.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        let limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1;\n        assert!(!gossip_table.reduce_in_flight_count(&data_id, limit));\n        assert!(!gossip_table.finished.contains(&data_id));\n\n        // Reduce the in-flight count to 0, which should cause the item to be moved to the\n        // `finished` collection.\n        assert!(gossip_table.reduce_in_flight_count(&data_id, 1));\n        assert!(gossip_table.finished.contains(&data_id));\n\n        // Check that calling this again has no effect and continues to return `false`.\n        assert!(!gossip_table.reduce_in_flight_count(&data_id, 1));\n        assert!(gossip_table.finished.contains(&data_id));\n    }\n\n    #[test]\n    fn should_terminate_via_saturation() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new complete data with 14 non-infections and check this doesn't cause us to stop\n        // gossiping.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1;\n        for (index, node_id) in node_ids.iter().enumerate().take(limit) {\n            gossip_table.register_infection_attempt(&data_id, iter::once(node_id));\n            let action = gossip_table.already_infected(&data_id, *node_id);\n            let expected = GossipAction::ShouldGossip(ShouldGossip {\n                count: 1,\n                target: GossipTarget::All,\n                exclude_peers: node_ids[..(index + 1)].iter().cloned().collect(),\n                is_already_held: true,\n            });\n            assert_eq!(expected, action);\n        }\n\n        // Check recording a non-infection from an already-recorded holder doesn't cause us to stop\n        // gossiping.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0]));\n        let action = gossip_table.already_infected(&data_id, node_ids[0]);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: 1,\n            target: GossipTarget::All,\n            exclude_peers: node_ids[..limit].iter().cloned().collect(),\n            is_already_held: true,\n        });\n        assert_eq!(expected, action);\n\n        // Check 15th non-infection does cause us to stop gossiping.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit]));\n        let action = gossip_table.we_infected(&data_id, node_ids[limit]);\n        assert_eq!(GossipAction::AnnounceFinished, action);\n    }\n\n    #[test]\n    fn should_not_terminate_below_infection_limit_and_saturation() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new complete data with 2 infections and 11 non-infections.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        let infection_limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1;\n        for node_id in &node_ids[0..infection_limit] {\n            gossip_table.register_infection_attempt(&data_id, iter::once(node_id));\n            let _ = gossip_table.we_infected(&data_id, *node_id);\n        }\n\n        let attempted_to_infect = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 2;\n        for node_id in &node_ids[infection_limit..attempted_to_infect] {\n            gossip_table.register_infection_attempt(&data_id, iter::once(node_id));\n            let _ = gossip_table.already_infected(&data_id, *node_id);\n        }\n\n        // Check adding 12th non-infection doesn't cause us to stop gossiping.\n        gossip_table\n            .register_infection_attempt(&data_id, iter::once(&node_ids[attempted_to_infect]));\n        let action = gossip_table.already_infected(&data_id, node_ids[attempted_to_infect]);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: 1,\n            target: GossipTarget::All,\n            exclude_peers: node_ids[..(attempted_to_infect + 1)]\n                .iter()\n                .cloned()\n                .collect(),\n            is_already_held: true,\n        });\n        assert_eq!(expected, action);\n    }\n\n    #[test]\n    fn check_timeout_should_detect_holder() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new complete data and get a response from node 0 only.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        let _ = gossip_table.we_infected(&data_id, node_ids[0]);\n\n        // check_timeout for node 0 should return Noop, and for node 1 it should represent a timed\n        // out response and return ShouldGossip.\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0]));\n        let action = gossip_table.check_timeout(&data_id, node_ids[0]);\n        assert_eq!(GossipAction::Noop, action);\n\n        gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[1]));\n        let action = gossip_table.check_timeout(&data_id, node_ids[1]);\n        let expected = GossipAction::ShouldGossip(ShouldGossip {\n            count: 1,\n            target: GossipTarget::All,\n            exclude_peers: node_ids[..=1].iter().cloned().collect(),\n            is_already_held: true,\n        });\n        assert_eq!(expected, action);\n    }\n\n    #[test]\n    #[cfg_attr(\n        debug_assertions,\n        should_panic(\n            expected = \"shouldn't check timeout for a gossip response for data we don't hold\"\n        )\n    )]\n    fn check_timeout_should_panic_for_data_we_dont_hold() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n        let _ = gossip_table.new_data_id(&data_id, node_ids[0]);\n        let _ = gossip_table.check_timeout(&data_id, node_ids[0]);\n    }\n\n    #[test]\n    fn should_remove_holder_if_unresponsive() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new data ID from nodes 0 and 1.\n        let _ = gossip_table.new_data_id(&data_id, node_ids[0]);\n        let _ = gossip_table.new_data_id(&data_id, node_ids[1]);\n\n        // Node 0 should be removed from the holders since it hasn't provided us with the full data,\n        // and we should be told to get the remainder from node 1.\n        let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[0]);\n        let expected = GossipAction::GetRemainder {\n            holder: node_ids[1],\n        };\n        assert_eq!(expected, action);\n        check_holders(&node_ids[1..2], &gossip_table, &data_id);\n\n        // Node 1 should be removed from the holders since it hasn't provided us with the full data,\n        // and the entry should be removed since there are no more holders.\n        let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[1]);\n        assert_eq!(GossipAction::Noop, action);\n        assert!(!gossip_table.current.contains_key(&data_id));\n        assert!(!gossip_table.finished.contains(&data_id));\n    }\n\n    #[test]\n    fn should_not_remove_holder_if_responsive() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new data ID from node 0 and record that we have received the full data from it.\n        let _ = gossip_table.new_data_id(&data_id, node_ids[0]);\n        let _ = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All);\n\n        // Node 0 should remain as a holder since we now hold the complete data.\n        let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[0]);\n        assert_eq!(GossipAction::Noop, action); // Noop as all RPCs are still in-flight\n        check_holders(&node_ids[..1], &gossip_table, &data_id);\n        assert!(gossip_table.current.contains_key(&data_id));\n    }\n\n    #[test]\n    fn should_force_finish() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new data ID from node 0, then forcibly finish gossiping.\n        let _ = gossip_table.new_data_id(&data_id, node_ids[0]);\n        assert!(gossip_table.force_finish(&data_id));\n        assert!(gossip_table.finished.contains(&data_id));\n\n        // Ensure forcibly finishing the same data returns `false`.\n        assert!(!gossip_table.force_finish(&data_id));\n    }\n\n    #[test]\n    fn should_purge() {\n        let _ = logging::init();\n        let mut rng = crate::new_rng();\n        let node_ids = random_node_ids(&mut rng);\n        let data_id: u64 = rng.gen();\n\n        let mut gossip_table = GossipTable::new(Config::default());\n\n        // Add new complete data and finish via infection limit.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        for node_id in &node_ids[0..EXPECTED_DEFAULT_INFECTION_TARGET] {\n            let _ = gossip_table.we_infected(&data_id, *node_id);\n        }\n        assert!(gossip_table.finished.contains(&data_id));\n\n        // Time the finished data out and check it has been purged.\n        let millis = TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION)\n            .unwrap()\n            .millis();\n        Instant::advance_time(millis + 1);\n        gossip_table.purge_finished();\n        assert!(!gossip_table.finished.contains(&data_id));\n\n        // Add new complete data and forcibly finish.\n        let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All);\n        assert!(gossip_table.force_finish(&data_id));\n        assert!(gossip_table.finished.contains(&data_id));\n\n        // Time the finished data out and check it has been purged.\n        Instant::advance_time(millis + 1);\n        gossip_table.purge_finished();\n        assert!(!gossip_table.finished.contains(&data_id));\n    }\n\n    #[test]\n    fn timeouts_purge_in_order() {\n        let mut timeouts = Timeouts::new();\n        let now = Instant::now();\n        let later_100 = now + Duration::from_millis(100);\n        let later_200 = now + Duration::from_millis(200);\n\n        // Timeouts are added and purged in chronological order.\n        timeouts.push(now, 0);\n        timeouts.push(later_100, 1);\n        timeouts.push(later_200, 2);\n\n        let now_after_time_travel = now + Duration::from_millis(10);\n        let purged = timeouts.purge(&now_after_time_travel).collect::<Vec<i32>>();\n\n        assert_eq!(purged, vec![0]);\n    }\n\n    #[test]\n    fn timeouts_depends_on_binary_search_by_implementation() {\n        // This test is meant to document the dependency of\n        // Timeouts::purge on https://doc.rust-lang.org/std/vec/struct.Vec.html#method.binary_search_by.\n        // If this test is failing then it's reasonable to believe that the implementation of\n        // binary_search_by has been updated.\n        let mut timeouts = Timeouts::new();\n        let now = Instant::now();\n        let later_100 = now + Duration::from_millis(100);\n        let later_200 = now + Duration::from_millis(200);\n        let later_300 = now + Duration::from_millis(300);\n        let later_400 = now + Duration::from_millis(400);\n        let later_500 = now + Duration::from_millis(500);\n        let later_600 = now + Duration::from_millis(600);\n\n        timeouts.push(later_100, 1);\n        timeouts.push(later_200, 2);\n        timeouts.push(later_300, 3);\n\n        // If a node's system time was changed to a time earlier than\n        // the earliest timeout, and a new timeout is added with an instant\n        // corresponding to this new early time, then this would make the earliest\n        // timeout the LAST timeout in the vec.\n        // [100 < 200 < 300 > 0]\n        timeouts.push(now, 0);\n\n        let now_after_time_travel = now + Duration::from_millis(10);\n        // Intuitively, we would expect [1,2,3,0] to be in the \"purged\" vec here.\n        // This is not the case because we're using binary_search_by, which (currently)\n        // is implemented with logic that checks if a, b, ... z are in a consistent order.\n        // in this case, the order that we've established is a < b < ... < z for each element in the\n        // vec, but we broke that order by inserting '0' last, and for some reason,\n        // binary_search_by won't find this unless there is a number > n occurring AFTER n\n        // in the vec.\n\n        let purged = timeouts.purge(&now_after_time_travel).collect::<Vec<i32>>();\n        let empty: Vec<i32> = vec![];\n\n        // This isn't a problem and the order will eventually\n        // be restored.\n        assert_eq!(purged, empty);\n\n        timeouts.push(later_400, 4);\n        timeouts.push(later_500, 5);\n        timeouts.push(later_600, 6);\n\n        // Now, we advance time another 10 ms and purge again.\n        // In this scenario, timeouts with a later time are added after our\n        // improperly ordered \"now\" timeout\n        // [100 < 200 < 300 > 0 < 400 < 500 < 600]\n        let now_after_time_travel = now + Duration::from_millis(20);\n        let purged = timeouts.purge(&now_after_time_travel).collect::<Vec<i32>>();\n        let expected = [1, 2, 3, 0];\n\n        assert_eq!(purged, expected);\n\n        // After the previous purge, an order is restored where a < b for consecutive elements in\n        // the vec. [400 < 500 < 600], so, purging timeouts up to 610 will properly clear\n        // the vec.\n        let now_after_time_travel = now + Duration::from_millis(610);\n        let purged = timeouts.purge(&now_after_time_travel).collect::<Vec<i32>>();\n        let expected = [4, 5, 6];\n\n        assert_eq!(purged, expected);\n        assert_eq!(0, timeouts.values.len());\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/item_provider.rs",
    "content": "use async_trait::async_trait;\n\nuse super::GossipItem;\nuse crate::effect::{requests::StorageRequest, EffectBuilder};\n\n#[async_trait]\npub(super) trait ItemProvider<T: GossipItem> {\n    async fn is_stored<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n    ) -> bool;\n\n    async fn get_from_storage<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n    ) -> Option<Box<T>>;\n}\n"
  },
  {
    "path": "node/src/components/gossiper/message.rs",
    "content": "use std::{\n    boxed::Box,\n    fmt::{self, Display, Formatter},\n};\n\nuse serde::{Deserialize, Serialize};\nuse strum::EnumDiscriminants;\n\nuse super::GossipItem;\n\n#[derive(Clone, Debug, Deserialize, Serialize, EnumDiscriminants)]\n#[strum_discriminants(derive(strum::EnumIter))]\n#[serde(bound = \"for<'a> T: Deserialize<'a>\")]\npub(crate) enum Message<T: GossipItem> {\n    /// Gossiped out to random peers to notify them of an item we hold.\n    Gossip(T::Id),\n    /// Response to a `Gossip` message.  If `is_already_held` is false, the recipient should treat\n    /// this as a `GetItem` message and send an `Item` message containing the item.\n    GossipResponse {\n        item_id: T::Id,\n        is_already_held: bool,\n    },\n    /// Request to get an item we were previously told about, but the peer timed out and we never\n    /// received it.\n    GetItem(T::Id),\n    /// Response to either a `GossipResponse` with `is_already_held` set to `false` or to a\n    /// `GetItem` message. Contains the actual item requested.\n    Item(Box<T>),\n}\n\nimpl<T: GossipItem> Display for Message<T> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Message::Gossip(item_id) => write!(formatter, \"gossip({})\", item_id),\n            Message::GossipResponse {\n                item_id,\n                is_already_held,\n            } => write!(\n                formatter,\n                \"gossip-response({}, {})\",\n                item_id, is_already_held\n            ),\n            Message::GetItem(item_id) => write!(formatter, \"gossip-get-item({})\", item_id),\n            Message::Item(item) => write!(formatter, \"gossip-item({})\", item.gossip_id()),\n        }\n    }\n}\n\nmod specimen_support {\n    use crate::{\n        components::gossiper::GossipItem,\n        utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator},\n    };\n\n    use super::{Message, MessageDiscriminants};\n\n    impl<T> LargestSpecimen for Message<T>\n    where\n        T: GossipItem + LargestSpecimen,\n        <T as GossipItem>::Id: LargestSpecimen,\n    {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, MessageDiscriminants, _, _>(\n                estimator,\n                |variant| match variant {\n                    MessageDiscriminants::Gossip => {\n                        Message::Gossip(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    MessageDiscriminants::GossipResponse => Message::GossipResponse {\n                        item_id: LargestSpecimen::largest_specimen(estimator, cache),\n                        is_already_held: LargestSpecimen::largest_specimen(estimator, cache),\n                    },\n                    MessageDiscriminants::GetItem => {\n                        Message::GetItem(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    MessageDiscriminants::Item => {\n                        Message::Item(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                },\n            )\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/metrics.rs",
    "content": "use prometheus::{IntCounter, IntGauge, Registry};\n\nuse crate::unregister_metric;\n\n/// Metrics for the gossiper component.\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// Total number of items received by the gossiper.\n    pub(super) items_received: IntCounter,\n    /// Total number of gossip requests sent to peers.\n    pub(super) times_gossiped: IntCounter,\n    /// Number of times the process had to pause due to running out of peers.\n    pub(super) times_ran_out_of_peers: IntCounter,\n    /// Number of items in the gossip table that are currently being gossiped.\n    pub(super) table_items_current: IntGauge,\n    /// Number of items in the gossip table that are finished.\n    pub(super) table_items_finished: IntGauge,\n    /// Reference to the registry for unregistering.\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of gossiper metrics, using the given prefix.\n    pub fn new(name: &str, registry: &Registry) -> Result<Self, prometheus::Error> {\n        let items_received = IntCounter::new(\n            format!(\"{}_items_received\", name),\n            format!(\"number of items received by the {}\", name),\n        )?;\n        let times_gossiped = IntCounter::new(\n            format!(\"{}_times_gossiped\", name),\n            format!(\"number of times the {} sent gossip requests to peers\", name),\n        )?;\n        let times_ran_out_of_peers = IntCounter::new(\n            format!(\"{}_times_ran_out_of_peers\", name),\n            format!(\n                \"number of times the {} ran out of peers and had to pause\",\n                name\n            ),\n        )?;\n        let table_items_current = IntGauge::new(\n            format!(\"{}_table_items_current\", name),\n            format!(\n                \"number of items in the gossip table of {} in state current\",\n                name\n            ),\n        )?;\n        let table_items_finished = IntGauge::new(\n            format!(\"{}_table_items_finished\", name),\n            format!(\n                \"number of items in the gossip table of {} in state finished\",\n                name\n            ),\n        )?;\n\n        registry.register(Box::new(items_received.clone()))?;\n        registry.register(Box::new(times_gossiped.clone()))?;\n        registry.register(Box::new(times_ran_out_of_peers.clone()))?;\n        registry.register(Box::new(table_items_current.clone()))?;\n        registry.register(Box::new(table_items_finished.clone()))?;\n\n        Ok(Metrics {\n            items_received,\n            times_gossiped,\n            times_ran_out_of_peers,\n            table_items_current,\n            table_items_finished,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.items_received);\n        unregister_metric!(self.registry, self.times_gossiped);\n        unregister_metric!(self.registry, self.times_ran_out_of_peers);\n        unregister_metric!(self.registry, self.table_items_current);\n        unregister_metric!(self.registry, self.table_items_finished);\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/provider_impls/address_provider.rs",
    "content": "use async_trait::async_trait;\nuse tracing::error;\n\nuse crate::{\n    components::{\n        gossiper::{GossipItem, Gossiper, ItemProvider},\n        network::GossipedAddress,\n    },\n    effect::EffectBuilder,\n};\n\n#[async_trait]\nimpl ItemProvider<GossipedAddress>\n    for Gossiper<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, GossipedAddress>\n{\n    async fn is_stored<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        item_id: GossipedAddress,\n    ) -> bool {\n        error!(%item_id, \"address gossiper should never try to check if item is stored\");\n        false\n    }\n\n    async fn get_from_storage<REv: Send>(\n        _effect_builder: EffectBuilder<REv>,\n        item_id: GossipedAddress,\n    ) -> Option<Box<GossipedAddress>> {\n        error!(%item_id, \"address gossiper should never try to get from storage\");\n        None\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/provider_impls/block_provider.rs",
    "content": "use async_trait::async_trait;\nuse std::convert::TryInto;\n\nuse casper_types::{BlockHash, BlockV2};\n\nuse crate::{\n    components::gossiper::{GossipItem, GossipTarget, Gossiper, ItemProvider, LargeGossipItem},\n    effect::{requests::StorageRequest, EffectBuilder},\n};\n\nimpl GossipItem for BlockV2 {\n    type Id = BlockHash;\n\n    const ID_IS_COMPLETE_ITEM: bool = false;\n    const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = true;\n\n    fn gossip_id(&self) -> Self::Id {\n        *self.hash()\n    }\n\n    fn gossip_target(&self) -> GossipTarget {\n        GossipTarget::Mixed(self.era_id())\n    }\n}\n\nimpl LargeGossipItem for BlockV2 {}\n\n#[async_trait]\nimpl ItemProvider<BlockV2> for Gossiper<{ BlockV2::ID_IS_COMPLETE_ITEM }, BlockV2> {\n    async fn is_stored<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: BlockHash,\n    ) -> bool {\n        effect_builder.is_block_stored(item_id).await\n    }\n\n    async fn get_from_storage<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: BlockHash,\n    ) -> Option<Box<BlockV2>> {\n        if let Some(block) = effect_builder.get_block_from_storage(item_id).await {\n            block.try_into().ok().map(Box::new)\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/provider_impls/finality_signature_provider.rs",
    "content": "use async_trait::async_trait;\n\nuse casper_types::{FinalitySignature, FinalitySignatureId, FinalitySignatureV2};\n\nuse crate::{\n    components::gossiper::{GossipItem, GossipTarget, Gossiper, ItemProvider, LargeGossipItem},\n    effect::{requests::StorageRequest, EffectBuilder},\n};\n\nimpl GossipItem for FinalitySignatureV2 {\n    type Id = Box<FinalitySignatureId>;\n\n    const ID_IS_COMPLETE_ITEM: bool = false;\n    const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = true;\n\n    fn gossip_id(&self) -> Self::Id {\n        Box::new(FinalitySignatureId::new(\n            *self.block_hash(),\n            self.era_id(),\n            self.public_key().clone(),\n        ))\n    }\n\n    fn gossip_target(&self) -> GossipTarget {\n        GossipTarget::Mixed(self.era_id())\n    }\n}\n\nimpl LargeGossipItem for FinalitySignatureV2 {}\n\n#[async_trait]\nimpl ItemProvider<FinalitySignatureV2>\n    for Gossiper<{ FinalitySignatureV2::ID_IS_COMPLETE_ITEM }, FinalitySignatureV2>\n{\n    async fn is_stored<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: Box<FinalitySignatureId>,\n    ) -> bool {\n        effect_builder.is_finality_signature_stored(item_id).await\n    }\n\n    async fn get_from_storage<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: Box<FinalitySignatureId>,\n    ) -> Option<Box<FinalitySignatureV2>> {\n        if let Some(FinalitySignature::V2(sig)) = effect_builder\n            .get_finality_signature_from_storage(item_id)\n            .await\n        {\n            Some(Box::new(sig))\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/provider_impls/transaction_provider.rs",
    "content": "use async_trait::async_trait;\n\nuse casper_types::{Transaction, TransactionId};\n\nuse crate::{\n    components::gossiper::{GossipItem, GossipTarget, Gossiper, ItemProvider, LargeGossipItem},\n    effect::{requests::StorageRequest, EffectBuilder},\n};\n\nimpl GossipItem for Transaction {\n    type Id = TransactionId;\n\n    const ID_IS_COMPLETE_ITEM: bool = false;\n    const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = false;\n\n    fn gossip_id(&self) -> Self::Id {\n        self.compute_id()\n    }\n\n    fn gossip_target(&self) -> GossipTarget {\n        GossipTarget::All\n    }\n}\n\nimpl LargeGossipItem for Transaction {}\n\n#[async_trait]\nimpl ItemProvider<Transaction> for Gossiper<{ Transaction::ID_IS_COMPLETE_ITEM }, Transaction> {\n    async fn is_stored<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: TransactionId,\n    ) -> bool {\n        effect_builder.is_transaction_stored(item_id).await\n    }\n\n    async fn get_from_storage<REv: From<StorageRequest> + Send>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: TransactionId,\n    ) -> Option<Box<Transaction>> {\n        effect_builder\n            .get_stored_transaction(item_id)\n            .await\n            .map(Box::new)\n    }\n}\n"
  },
  {
    "path": "node/src/components/gossiper/provider_impls.rs",
    "content": "mod address_provider;\nmod block_provider;\nmod finality_signature_provider;\nmod transaction_provider;\n"
  },
  {
    "path": "node/src/components/gossiper/tests.rs",
    "content": "// Unrestricted event size is okay in tests.\n#![allow(clippy::large_enum_variant)]\n#![cfg(test)]\nuse std::{\n    collections::{BTreeSet, HashMap},\n    iter,\n    sync::Arc,\n};\n\nuse derive_more::{Display, From};\nuse prometheus::Registry;\nuse rand::Rng;\nuse reactor::ReactorEvent;\nuse serde::Serialize;\nuse tempfile::TempDir;\nuse thiserror::Error;\nuse tokio::time;\nuse tracing::debug;\n\nuse casper_types::{\n    testing::TestRng, BlockV2, Chainspec, ChainspecRawBytes, EraId, FinalitySignatureV2,\n    ProtocolVersion, TimeDiff, Transaction, TransactionConfig,\n};\n\nuse super::*;\nuse crate::{\n    components::{\n        in_memory_network::{self, InMemoryNetwork, NetworkController},\n        network::{GossipedAddress, Identity as NetworkIdentity},\n        storage::{self, Storage},\n        transaction_acceptor,\n    },\n    effect::{\n        announcements::{\n            ControlAnnouncement, FatalAnnouncement, GossiperAnnouncement,\n            TransactionAcceptorAnnouncement,\n        },\n        incoming::{\n            ConsensusDemand, ConsensusMessageIncoming, FinalitySignatureIncoming,\n            NetRequestIncoming, NetResponseIncoming, TrieDemand, TrieRequestIncoming,\n            TrieResponseIncoming,\n        },\n        requests::AcceptTransactionRequest,\n    },\n    protocol::Message as NodeMessage,\n    reactor::{self, EventQueueHandle, QueueKind, Runner, TryCrankOutcome},\n    testing::{\n        self,\n        network::{NetworkedReactor, TestingNetwork},\n        ConditionCheckReactor, FakeTransactionAcceptor,\n    },\n    types::NodeId,\n    utils::WithDir,\n    NodeRng,\n};\n\nconst RECENT_ERA_COUNT: u64 = 5;\nconst MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400);\nconst EXPECTED_GOSSIP_TARGET: GossipTarget = GossipTarget::All;\n\n/// Top-level event for the reactor.\n#[derive(Debug, From, Serialize, Display)]\n#[must_use]\nenum Event {\n    #[from]\n    Network(in_memory_network::Event<NodeMessage>),\n    #[from]\n    Storage(storage::Event),\n    #[from]\n    TransactionAcceptor(#[serde(skip_serializing)] transaction_acceptor::Event),\n    #[from]\n    TransactionGossiper(super::Event<Transaction>),\n    #[from]\n    NetworkRequest(NetworkRequest<NodeMessage>),\n    #[from]\n    StorageRequest(StorageRequest),\n    #[from]\n    AcceptTransactionRequest(AcceptTransactionRequest),\n    #[from]\n    TransactionAcceptorAnnouncement(#[serde(skip_serializing)] TransactionAcceptorAnnouncement),\n    #[from]\n    TransactionGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement<Transaction>),\n    #[from]\n    TransactionGossiperIncoming(GossiperIncoming<Transaction>),\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        false\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        None\n    }\n}\n\nimpl From<NetworkRequest<Message<Transaction>>> for Event {\n    fn from(request: NetworkRequest<Message<Transaction>>) -> Self {\n        Event::NetworkRequest(request.map_payload(NodeMessage::from))\n    }\n}\n\ntrait Unhandled {}\n\nimpl<T: Unhandled> From<T> for Event {\n    fn from(_: T) -> Self {\n        unimplemented!(\"not handled in gossiper tests\")\n    }\n}\n\nimpl Unhandled for ConsensusDemand {}\nimpl Unhandled for ControlAnnouncement {}\nimpl Unhandled for FatalAnnouncement {}\nimpl Unhandled for ConsensusMessageIncoming {}\nimpl Unhandled for GossiperIncoming<BlockV2> {}\nimpl Unhandled for GossiperIncoming<FinalitySignatureV2> {}\nimpl Unhandled for GossiperIncoming<GossipedAddress> {}\nimpl Unhandled for NetRequestIncoming {}\nimpl Unhandled for NetResponseIncoming {}\nimpl Unhandled for TrieRequestIncoming {}\nimpl Unhandled for TrieDemand {}\nimpl Unhandled for TrieResponseIncoming {}\nimpl Unhandled for FinalitySignatureIncoming {}\n\n/// Error type returned by the test reactor.\n#[derive(Debug, Error)]\nenum Error {\n    #[error(\"prometheus (metrics) error: {0}\")]\n    Metrics(#[from] prometheus::Error),\n}\n\nstruct Reactor {\n    network: InMemoryNetwork<NodeMessage>,\n    storage: Storage,\n    fake_transaction_acceptor: FakeTransactionAcceptor,\n    transaction_gossiper: Gossiper<{ Transaction::ID_IS_COMPLETE_ITEM }, Transaction>,\n    _storage_tempdir: TempDir,\n}\n\nimpl Drop for Reactor {\n    fn drop(&mut self) {\n        NetworkController::<NodeMessage>::remove_node(&self.network.node_id())\n    }\n}\n\nimpl reactor::Reactor for Reactor {\n    type Event = Event;\n    type Config = Config;\n    type Error = Error;\n\n    fn new(\n        config: Self::Config,\n        _chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        _network_identity: NetworkIdentity,\n        registry: &Registry,\n        event_queue: EventQueueHandle<Self::Event>,\n        rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1);\n        let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config);\n        let storage = Storage::new(\n            &storage_withdir,\n            None,\n            ProtocolVersion::from_parts(1, 0, 0),\n            EraId::default(),\n            \"test\",\n            MAX_TTL.into(),\n            RECENT_ERA_COUNT,\n            Some(registry),\n            false,\n            TransactionConfig::default(),\n        )\n        .unwrap();\n\n        let fake_transaction_acceptor = FakeTransactionAcceptor::new();\n        let transaction_gossiper = Gossiper::<{ Transaction::ID_IS_COMPLETE_ITEM }, _>::new(\n            \"transaction_gossiper\",\n            config,\n            registry,\n        )?;\n\n        let network = NetworkController::create_node(event_queue, rng);\n        let reactor = Reactor {\n            network,\n            storage,\n            fake_transaction_acceptor,\n            transaction_gossiper,\n            _storage_tempdir: storage_tempdir,\n        };\n\n        Ok((reactor, Effects::new()))\n    }\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Self::Event> {\n        trace!(?event);\n        match event {\n            Event::Storage(event) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, event),\n            ),\n            Event::TransactionAcceptor(event) => reactor::wrap_effects(\n                Event::TransactionAcceptor,\n                self.fake_transaction_acceptor\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::TransactionGossiper(super::Event::ItemReceived {\n                item_id,\n                source,\n                target,\n            }) => {\n                // Ensure the correct target type for transactions is provided.\n                assert_eq!(target, EXPECTED_GOSSIP_TARGET);\n                let event = super::Event::ItemReceived {\n                    item_id,\n                    source,\n                    target,\n                };\n                reactor::wrap_effects(\n                    Event::TransactionGossiper,\n                    self.transaction_gossiper\n                        .handle_event(effect_builder, rng, event),\n                )\n            }\n            Event::TransactionGossiper(event) => reactor::wrap_effects(\n                Event::TransactionGossiper,\n                self.transaction_gossiper\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::NetworkRequest(NetworkRequest::Gossip {\n                payload,\n                gossip_target,\n                count,\n                exclude,\n                auto_closing_responder,\n            }) => {\n                // Ensure the correct target type for transactions is carried through to the\n                // `Network`.\n                assert_eq!(gossip_target, EXPECTED_GOSSIP_TARGET);\n                let request = NetworkRequest::Gossip {\n                    payload,\n                    gossip_target,\n                    count,\n                    exclude,\n                    auto_closing_responder,\n                };\n                reactor::wrap_effects(\n                    Event::Network,\n                    self.network\n                        .handle_event(effect_builder, rng, request.into()),\n                )\n            }\n            Event::NetworkRequest(request) => reactor::wrap_effects(\n                Event::Network,\n                self.network\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            Event::StorageRequest(request) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            Event::AcceptTransactionRequest(AcceptTransactionRequest {\n                transaction,\n                is_speculative,\n                responder,\n            }) => {\n                assert!(!is_speculative);\n                let event = transaction_acceptor::Event::Accept {\n                    transaction,\n                    source: Source::Client,\n                    maybe_responder: Some(responder),\n                };\n                self.dispatch_event(effect_builder, rng, Event::TransactionAcceptor(event))\n            }\n            Event::TransactionAcceptorAnnouncement(\n                TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                    transaction,\n                    source,\n                },\n            ) => {\n                let event = super::Event::ItemReceived {\n                    item_id: transaction.gossip_id(),\n                    source,\n                    target: transaction.gossip_target(),\n                };\n                self.dispatch_event(effect_builder, rng, Event::TransactionGossiper(event))\n            }\n            Event::TransactionAcceptorAnnouncement(\n                TransactionAcceptorAnnouncement::InvalidTransaction {\n                    transaction: _,\n                    source: _,\n                },\n            ) => Effects::new(),\n            Event::TransactionGossiperAnnouncement(GossiperAnnouncement::NewItemBody {\n                item,\n                sender,\n            }) => reactor::wrap_effects(\n                Event::TransactionAcceptor,\n                self.fake_transaction_acceptor.handle_event(\n                    effect_builder,\n                    rng,\n                    transaction_acceptor::Event::Accept {\n                        transaction: *item,\n                        source: Source::Peer(sender),\n                        maybe_responder: None,\n                    },\n                ),\n            ),\n            Event::TransactionGossiperAnnouncement(_ann) => Effects::new(),\n            Event::Network(event) => reactor::wrap_effects(\n                Event::Network,\n                self.network.handle_event(effect_builder, rng, event),\n            ),\n            Event::TransactionGossiperIncoming(incoming) => reactor::wrap_effects(\n                Event::TransactionGossiper,\n                self.transaction_gossiper\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n        }\n    }\n}\n\nimpl NetworkedReactor for Reactor {\n    fn node_id(&self) -> NodeId {\n        self.network.node_id()\n    }\n}\n\nfn announce_transaction_received(\n    transaction: &Transaction,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    let txn = transaction.clone();\n    |effect_builder: EffectBuilder<Event>| {\n        effect_builder.try_accept_transaction(txn, false).ignore()\n    }\n}\n\nasync fn run_gossip(rng: &mut TestRng, network_size: usize, txn_count: usize) {\n    const TIMEOUT: Duration = Duration::from_secs(30);\n    const QUIET_FOR: Duration = Duration::from_millis(50);\n\n    NetworkController::<NodeMessage>::create_active();\n    let mut network = TestingNetwork::<Reactor>::new();\n\n    // Add `network_size` nodes.\n    let node_ids = network.add_nodes(rng, network_size).await;\n\n    // Create `txn_count` random transactions.\n    let (all_txn_hashes, mut txns): (BTreeSet<_>, Vec<_>) = iter::repeat_with(|| {\n        let txn = Transaction::random(rng);\n        (txn.hash(), txn)\n    })\n    .take(txn_count)\n    .unzip();\n\n    // Give each transaction to a randomly-chosen node to be gossiped.\n    for txn in txns.drain(..) {\n        let index: usize = rng.gen_range(0..network_size);\n        network\n            .process_injected_effect_on(&node_ids[index], announce_transaction_received(&txn))\n            .await;\n    }\n\n    // Check every node has every transaction stored locally.\n    let all_txns_held = |nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| {\n        nodes.values().all(|runner| {\n            for hash in all_txn_hashes.iter() {\n                if runner\n                    .reactor()\n                    .inner()\n                    .storage\n                    .get_transaction_by_hash(*hash)\n                    .is_none()\n                {\n                    return false;\n                }\n            }\n            true\n        })\n    };\n    network.settle_on(rng, all_txns_held, TIMEOUT).await;\n\n    // Ensure all responders are called before dropping the network.\n    network.settle(rng, QUIET_FOR, TIMEOUT).await;\n\n    NetworkController::<NodeMessage>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_gossip() {\n    const NETWORK_SIZES: [usize; 3] = [2, 5, 10];\n    const TXN_COUNTS: [usize; 3] = [1, 10, 30];\n\n    let rng = &mut TestRng::new();\n\n    for network_size in &NETWORK_SIZES {\n        for txn_count in &TXN_COUNTS {\n            run_gossip(rng, *network_size, *txn_count).await\n        }\n    }\n}\n\n#[tokio::test]\nasync fn should_get_from_alternate_source() {\n    const NETWORK_SIZE: usize = 3;\n    const POLL_DURATION: Duration = Duration::from_millis(10);\n    const TIMEOUT: Duration = Duration::from_secs(2);\n\n    NetworkController::<NodeMessage>::create_active();\n    let mut network = TestingNetwork::<Reactor>::new();\n    let rng = &mut TestRng::new();\n\n    // Add `NETWORK_SIZE` nodes.\n    let node_ids = network.add_nodes(rng, NETWORK_SIZE).await;\n\n    // Create random transaction.\n    let txn = Transaction::random(rng);\n    let txn_hash = txn.hash();\n\n    // Give the transaction to nodes 0 and 1 to be gossiped.\n    for node_id in node_ids.iter().take(2) {\n        network\n            .process_injected_effect_on(node_id, announce_transaction_received(&txn))\n            .await;\n    }\n\n    // Run node 0 until it has sent the gossip request then remove it from the network.\n    let made_gossip_request = |event: &Event| -> bool {\n        matches!(event, Event::NetworkRequest(NetworkRequest::Gossip { .. }))\n    };\n    network\n        .crank_until(&node_ids[0], rng, made_gossip_request, TIMEOUT)\n        .await;\n    assert!(network.remove_node(&node_ids[0]).is_some());\n    debug!(\"removed node {}\", &node_ids[0]);\n\n    // Run node 2 until it receives and responds to the gossip request from node 0.\n    let node_id_0 = node_ids[0];\n    let sent_gossip_response = move |event: &Event| -> bool {\n        match event {\n            Event::NetworkRequest(NetworkRequest::SendMessage { dest, payload, .. }) => {\n                if let NodeMessage::TransactionGossiper(Message::GossipResponse { .. }) = **payload\n                {\n                    **dest == node_id_0\n                } else {\n                    false\n                }\n            }\n            _ => false,\n        }\n    };\n    network\n        .crank_until(&node_ids[2], rng, sent_gossip_response, TIMEOUT)\n        .await;\n\n    // Run nodes 1 and 2 until settled.  Node 2 will be waiting for the transaction from node 0.\n    network.settle(rng, POLL_DURATION, TIMEOUT).await;\n\n    // Advance time to trigger node 2's timeout causing it to request the transaction from node 1.\n    let duration_to_advance = Config::default().get_remainder_timeout();\n    testing::advance_time(duration_to_advance.into()).await;\n\n    // Check node 0 has the transaction stored locally.\n    let txn_held = |nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| {\n        let runner = nodes.get(&node_ids[2]).unwrap();\n        runner\n            .reactor()\n            .inner()\n            .storage\n            .get_transaction_by_hash(txn_hash)\n            .map(|retrieved_txn| retrieved_txn == txn)\n            .unwrap_or_default()\n    };\n    network.settle_on(rng, txn_held, TIMEOUT).await;\n\n    NetworkController::<NodeMessage>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_timeout_gossip_response() {\n    const PAUSE_DURATION: Duration = Duration::from_millis(50);\n    const TIMEOUT: Duration = Duration::from_secs(2);\n\n    NetworkController::<NodeMessage>::create_active();\n    let mut network = TestingNetwork::<Reactor>::new();\n    let rng = &mut TestRng::new();\n\n    // The target number of peers to infect with a given piece of data.\n    let infection_target = Config::default().infection_target();\n\n    // Add `infection_target + 1` nodes.\n    let mut node_ids = network.add_nodes(rng, infection_target as usize + 1).await;\n\n    // Create random transaction.\n    let txn = Transaction::random(rng);\n    let txn_hash = txn.hash();\n\n    // Give the transaction to node 0 to be gossiped.\n    network\n        .process_injected_effect_on(&node_ids[0], announce_transaction_received(&txn))\n        .await;\n\n    // Run node 0 until it has sent the gossip requests.\n    let made_gossip_request = |event: &Event| -> bool {\n        matches!(\n            event,\n            Event::TransactionGossiper(super::Event::GossipedTo { .. })\n        )\n    };\n    network\n        .crank_until(&node_ids[0], rng, made_gossip_request, TIMEOUT)\n        .await;\n    // Give node 0 time to set the timeouts before advancing the clock.\n    time::sleep(PAUSE_DURATION).await;\n\n    // Replace all nodes except node 0 with new nodes.\n    for node_id in node_ids.drain(1..) {\n        assert!(network.remove_node(&node_id).is_some());\n        debug!(\"removed node {}\", node_id);\n    }\n    for _ in 0..infection_target {\n        let (node_id, _runner) = network.add_node(rng).await.unwrap();\n        node_ids.push(node_id);\n    }\n\n    // Advance time to trigger node 0's timeout causing it to gossip to the new nodes.\n    let duration_to_advance = Config::default().gossip_request_timeout();\n    testing::advance_time(duration_to_advance.into()).await;\n\n    // Check every node has every transaction stored locally.\n    let txn_held = |nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| {\n        nodes.values().all(|runner| {\n            runner\n                .reactor()\n                .inner()\n                .storage\n                .get_transaction_by_hash(txn_hash)\n                .map(|retrieved_txn| retrieved_txn == txn)\n                .unwrap_or_default()\n        })\n    };\n    network.settle_on(rng, txn_held, TIMEOUT).await;\n\n    NetworkController::<NodeMessage>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_timeout_new_item_from_peer() {\n    const NETWORK_SIZE: usize = 2;\n    const VALIDATE_AND_STORE_TIMEOUT: Duration = Duration::from_secs(1);\n    const TIMEOUT: Duration = Duration::from_secs(5);\n\n    NetworkController::<NodeMessage>::create_active();\n    let mut network = TestingNetwork::<Reactor>::new();\n    let rng = &mut TestRng::new();\n\n    let node_ids = network.add_nodes(rng, NETWORK_SIZE).await;\n    let node_0 = node_ids[0];\n    let node_1 = node_ids[1];\n    // Set the timeout on node 0 low for testing.\n    let reactor_0 = network\n        .nodes_mut()\n        .get_mut(&node_0)\n        .unwrap()\n        .reactor_mut()\n        .inner_mut();\n    reactor_0.transaction_gossiper.validate_and_store_timeout = VALIDATE_AND_STORE_TIMEOUT;\n    // Switch off the fake transaction acceptor on node 0 so that once the new transaction is\n    // received, no component triggers the `ItemReceived` event.\n    reactor_0.fake_transaction_acceptor.set_active(false);\n\n    let txn = Transaction::random(rng);\n\n    // Give the transaction to node 1 to gossip to node 0.\n    network\n        .process_injected_effect_on(&node_1, announce_transaction_received(&txn))\n        .await;\n\n    // Run the network until node 1 has sent the gossip request and node 0 has handled it to the\n    // point where the `NewItemBody` announcement has been received).\n    let got_new_item_body_announcement = |event: &Event| -> bool {\n        matches!(\n            event,\n            Event::TransactionGossiperAnnouncement(GossiperAnnouncement::NewItemBody { .. })\n        )\n    };\n    network\n        .crank_all_until(&node_0, rng, got_new_item_body_announcement, TIMEOUT)\n        .await;\n\n    // Run node 0 until it receives its own `CheckItemReceivedTimeout` event.\n    let received_timeout_event = |event: &Event| -> bool {\n        matches!(\n            event,\n            Event::TransactionGossiper(super::Event::CheckItemReceivedTimeout { .. })\n        )\n    };\n    network\n        .crank_until(&node_0, rng, received_timeout_event, TIMEOUT)\n        .await;\n\n    // Ensure node 0 makes a `FinishedGossiping` announcement.\n    let made_finished_gossiping_announcement = |event: &Event| -> bool {\n        matches!(\n            event,\n            Event::TransactionGossiperAnnouncement(GossiperAnnouncement::FinishedGossiping(_))\n        )\n    };\n    network\n        .crank_until(&node_0, rng, made_finished_gossiping_announcement, TIMEOUT)\n        .await;\n\n    NetworkController::<NodeMessage>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_not_gossip_old_stored_item_again() {\n    const NETWORK_SIZE: usize = 2;\n    const TIMEOUT: Duration = Duration::from_secs(2);\n\n    NetworkController::<NodeMessage>::create_active();\n    let mut network = TestingNetwork::<Reactor>::new();\n    let rng = &mut TestRng::new();\n\n    let node_ids = network.add_nodes(rng, NETWORK_SIZE).await;\n    let node_0 = node_ids[0];\n\n    let txn = Transaction::random(rng);\n\n    // Store the transaction on node 0.\n    let store_txn = |effect_builder: EffectBuilder<Event>| {\n        effect_builder\n            .put_transaction_to_storage(txn.clone())\n            .ignore()\n    };\n    network.process_injected_effect_on(&node_0, store_txn).await;\n\n    // Node 1 sends a gossip message to node 0.\n    network\n        .process_injected_effect_on(&node_0, |effect_builder| {\n            let event = Event::TransactionGossiperIncoming(GossiperIncoming {\n                sender: node_ids[1],\n                message: Box::new(Message::Gossip(txn.gossip_id())),\n            });\n            effect_builder\n                .into_inner()\n                .schedule(event, QueueKind::Gossip)\n                .ignore()\n        })\n        .await;\n\n    // Run node 0 until it has handled the gossip message and checked if the transaction is already\n    // stored.\n    let checked_if_stored = |event: &Event| -> bool {\n        matches!(\n            event,\n            Event::TransactionGossiper(super::Event::IsStoredResult { .. })\n        )\n    };\n    network\n        .crank_until(&node_0, rng, checked_if_stored, TIMEOUT)\n        .await;\n    // Assert the message did not cause a new entry in the gossip table and spawned no new events.\n    assert!(network\n        .nodes()\n        .get(&node_0)\n        .unwrap()\n        .reactor()\n        .inner()\n        .transaction_gossiper\n        .table\n        .is_empty());\n    assert!(matches!(\n        network.crank(&node_0, rng).await,\n        TryCrankOutcome::NoEventsToProcess\n    ));\n\n    NetworkController::<NodeMessage>::remove_active();\n}\n\nenum Unexpected {\n    Response,\n    GetItem,\n    Item,\n}\n\nasync fn should_ignore_unexpected_message(message_type: Unexpected) {\n    const NETWORK_SIZE: usize = 2;\n    const TIMEOUT: Duration = Duration::from_secs(2);\n\n    NetworkController::<NodeMessage>::create_active();\n    let mut network = TestingNetwork::<Reactor>::new();\n    let rng = &mut TestRng::new();\n\n    let node_ids = network.add_nodes(rng, NETWORK_SIZE).await;\n    let node_0 = node_ids[0];\n\n    let txn = Box::new(Transaction::random(rng));\n\n    let message = match message_type {\n        Unexpected::Response => Message::GossipResponse {\n            item_id: txn.gossip_id(),\n            is_already_held: false,\n        },\n        Unexpected::GetItem => Message::GetItem(txn.gossip_id()),\n        Unexpected::Item => Message::Item(txn),\n    };\n\n    // Node 1 sends an unexpected message to node 0.\n    network\n        .process_injected_effect_on(&node_0, |effect_builder| {\n            let event = Event::TransactionGossiperIncoming(GossiperIncoming {\n                sender: node_ids[1],\n                message: Box::new(message),\n            });\n            effect_builder\n                .into_inner()\n                .schedule(event, QueueKind::Gossip)\n                .ignore()\n        })\n        .await;\n\n    // Run node 0 until it has handled the gossip message.\n    let received_gossip_message =\n        |event: &Event| -> bool { matches!(event, Event::TransactionGossiperIncoming(..)) };\n    network\n        .crank_until(&node_0, rng, received_gossip_message, TIMEOUT)\n        .await;\n    // Assert the message did not cause a new entry in the gossip table and spawned no new events.\n    assert!(network\n        .nodes()\n        .get(&node_0)\n        .unwrap()\n        .reactor()\n        .inner()\n        .transaction_gossiper\n        .table\n        .is_empty());\n    assert!(matches!(\n        network.crank(&node_0, rng).await,\n        TryCrankOutcome::NoEventsToProcess\n    ));\n\n    NetworkController::<NodeMessage>::remove_active();\n}\n\n#[tokio::test]\nasync fn should_ignore_unexpected_response_message() {\n    should_ignore_unexpected_message(Unexpected::Response).await\n}\n\n#[tokio::test]\nasync fn should_ignore_unexpected_get_item_message() {\n    should_ignore_unexpected_message(Unexpected::GetItem).await\n}\n\n#[tokio::test]\nasync fn should_ignore_unexpected_item_message() {\n    should_ignore_unexpected_message(Unexpected::Item).await\n}\n"
  },
  {
    "path": "node/src/components/gossiper.rs",
    "content": "mod config;\n#[cfg(test)]\nmod error;\nmod event;\nmod gossip_item;\nmod gossip_table;\nmod item_provider;\nmod message;\nmod metrics;\nmod provider_impls;\nmod tests;\n\nuse std::{\n    collections::HashSet,\n    fmt::{self, Debug, Formatter},\n    time::Duration,\n};\n\nuse datasize::DataSize;\nuse prometheus::Registry;\nuse tracing::{debug, error, trace, warn};\n\nuse crate::{\n    components::Component,\n    effect::{\n        announcements::GossiperAnnouncement,\n        incoming::GossiperIncoming,\n        requests::{BeginGossipRequest, NetworkRequest, StorageRequest},\n        EffectBuilder, EffectExt, Effects, GossipTarget,\n    },\n    types::NodeId,\n    utils::Source,\n    NodeRng,\n};\npub(crate) use config::Config;\npub(crate) use event::Event;\npub(crate) use gossip_item::{GossipItem, LargeGossipItem, SmallGossipItem};\nuse gossip_table::{GossipAction, GossipTable};\nuse item_provider::ItemProvider;\npub(crate) use message::Message;\nuse metrics::Metrics;\n\n/// The component which gossips to peers and handles incoming gossip messages from peers.\n#[allow(clippy::type_complexity)]\npub(crate) struct Gossiper<const ID_IS_COMPLETE_ITEM: bool, T>\nwhere\n    T: GossipItem + 'static,\n{\n    table: GossipTable<T::Id>,\n    gossip_timeout: Duration,\n    get_from_peer_timeout: Duration,\n    validate_and_store_timeout: Duration,\n    name: &'static str,\n    metrics: Metrics,\n}\n\nimpl<const ID_IS_COMPLETE_ITEM: bool, T: GossipItem + 'static> Gossiper<ID_IS_COMPLETE_ITEM, T> {\n    /// Constructs a new gossiper component.\n    ///\n    /// Must be supplied with a name, which should be a snake-case identifier to disambiguate the\n    /// specific gossiper from other potentially present gossipers.\n    pub(crate) fn new(\n        name: &'static str,\n        config: Config,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(Gossiper {\n            table: GossipTable::new(config),\n            gossip_timeout: config.gossip_request_timeout().into(),\n            get_from_peer_timeout: config.get_remainder_timeout().into(),\n            validate_and_store_timeout: config.validate_and_store_timeout().into(),\n            name,\n            metrics: Metrics::new(name, registry)?,\n        })\n    }\n\n    /// This could be the first time we've encountered this item in the gossiper (e.g. the\n    /// `Network` component requesting that we gossip an address, or the `TransactionAcceptor`\n    /// having accepted a transaction which we received from a client), or it could be the result\n    /// of this gossiper having requested the complete data from a peer, announcing it, and that\n    /// complete item having been deemed valid by the relevant component and stored is now ready to\n    /// be gossiped onwards by us.\n    fn handle_item_received<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        source: Source,\n        target: GossipTarget,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>> + From<GossiperAnnouncement<T>> + Send,\n    {\n        debug!(item=%item_id, %source, \"received new gossip item\");\n        match self\n            .table\n            .new_complete_data(&item_id, source.node_id(), target)\n        {\n            GossipAction::ShouldGossip(should_gossip) => {\n                self.metrics.items_received.inc();\n                Self::gossip(\n                    effect_builder,\n                    item_id,\n                    should_gossip.target,\n                    should_gossip.count,\n                    should_gossip.exclude_peers,\n                )\n            }\n            GossipAction::Noop => Effects::new(),\n            GossipAction::AnnounceFinished => {\n                effect_builder.announce_finished_gossiping(item_id).ignore()\n            }\n            GossipAction::GetRemainder { .. } | GossipAction::AwaitingRemainder => {\n                error!(\"can't be waiting for remainder since we hold the complete data\");\n                Effects::new()\n            }\n        }\n    }\n\n    /// Gossips the given item ID to `count` random peers excluding the indicated ones.\n    fn gossip<REv>(\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        gossip_target: GossipTarget,\n        count: usize,\n        exclude_peers: HashSet<NodeId>,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>> + Send,\n    {\n        let message = Message::Gossip(item_id.clone());\n        effect_builder\n            .gossip_message(message, gossip_target, count, exclude_peers)\n            .event(move |peers| Event::GossipedTo {\n                item_id,\n                requested_count: count,\n                peers,\n            })\n    }\n\n    /// Handles the response from the network component detailing which peers it gossiped to.\n    fn gossiped_to<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        requested_count: usize,\n        peers: HashSet<NodeId>,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<GossiperAnnouncement<T>> + Send,\n    {\n        self.metrics.times_gossiped.inc_by(peers.len() as u64);\n        // We don't have any peers to gossip to, so pause the process, which will eventually result\n        // in the entry being removed.\n        if peers.is_empty() {\n            self.metrics.times_ran_out_of_peers.inc();\n        }\n\n        // We didn't gossip to as many peers as was requested.  Reduce the table entry's in-flight\n        // count.\n        let mut effects = Effects::new();\n        if peers.len() < requested_count\n            && self\n                .table\n                .reduce_in_flight_count(&item_id, requested_count - peers.len())\n        {\n            effects.extend(\n                effect_builder\n                    .announce_finished_gossiping(item_id.clone())\n                    .ignore(),\n            );\n        }\n\n        // Remember which peers we *tried* to infect.\n        self.table\n            .register_infection_attempt(&item_id, peers.iter());\n\n        // Set timeouts to check later that the specified peers all responded.\n        for peer in peers {\n            let item_id = item_id.clone();\n            effects.extend(\n                effect_builder\n                    .set_timeout(self.gossip_timeout)\n                    .event(move |_| Event::CheckGossipTimeout { item_id, peer }),\n            )\n        }\n\n        effects\n    }\n\n    /// Checks that the given peer has responded to a previous gossip request we sent it.\n    fn check_gossip_timeout<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        peer: NodeId,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>> + From<GossiperAnnouncement<T>> + Send,\n    {\n        match self.table.check_timeout(&item_id, peer) {\n            GossipAction::ShouldGossip(should_gossip) => Self::gossip(\n                effect_builder,\n                item_id,\n                should_gossip.target,\n                should_gossip.count,\n                should_gossip.exclude_peers,\n            ),\n            GossipAction::Noop => Effects::new(),\n            GossipAction::AnnounceFinished => {\n                effect_builder.announce_finished_gossiping(item_id).ignore()\n            }\n            GossipAction::GetRemainder { .. } | GossipAction::AwaitingRemainder => {\n                warn!(\n                    \"can't have gossiped if we don't hold the complete data - likely the timeout \\\n                    check was very delayed due to busy reactor\"\n                );\n                Effects::new()\n            }\n        }\n    }\n\n    /// Checks that the given peer has responded to a previous `GossipResponse` or `GetItem` we\n    /// sent it indicating we wanted to get the full item from it.\n    fn check_get_from_peer_timeout<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        peer: NodeId,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>> + From<GossiperAnnouncement<T>> + Send,\n    {\n        match self.table.remove_holder_if_unresponsive(&item_id, peer) {\n            GossipAction::ShouldGossip(should_gossip) => Self::gossip(\n                effect_builder,\n                item_id,\n                should_gossip.target,\n                should_gossip.count,\n                should_gossip.exclude_peers,\n            ),\n\n            GossipAction::GetRemainder { holder } => {\n                // The previous peer failed to provide the item, so we still need to get it.  Send\n                // a `GetItem` to a different holder and set a timeout to check we got the response.\n                let request = Message::GetItem(item_id.clone());\n                let mut effects = effect_builder.send_message(holder, request).ignore();\n                effects.extend(\n                    effect_builder\n                        .set_timeout(self.get_from_peer_timeout)\n                        .event(move |_| Event::CheckGetFromPeerTimeout {\n                            item_id,\n                            peer: holder,\n                        }),\n                );\n                effects\n            }\n\n            GossipAction::AnnounceFinished => {\n                effect_builder.announce_finished_gossiping(item_id).ignore()\n            }\n\n            GossipAction::Noop | GossipAction::AwaitingRemainder => Effects::new(),\n        }\n    }\n\n    /// Handles an incoming gossip request from a peer on the network, after having registered the\n    /// item in the gossip table.\n    fn handle_gossip<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        sender: NodeId,\n        action: GossipAction,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>> + From<GossiperAnnouncement<T>> + Send,\n    {\n        let mut effects = match action {\n            GossipAction::ShouldGossip(should_gossip) => {\n                debug!(item=%item_id, %sender, %should_gossip, \"received gossip request\");\n                self.metrics.items_received.inc();\n                // Gossip the item ID.\n                let mut effects = Self::gossip(\n                    effect_builder,\n                    item_id.clone(),\n                    should_gossip.target,\n                    should_gossip.count,\n                    should_gossip.exclude_peers,\n                );\n\n                // If this is a new complete item to us, announce it.\n                if ID_IS_COMPLETE_ITEM && !should_gossip.is_already_held {\n                    debug!(item=%item_id, \"announcing new complete gossip item received\");\n                    effects.extend(\n                        effect_builder\n                            .announce_complete_item_received_via_gossip(item_id.clone())\n                            .ignore(),\n                    );\n                }\n\n                // Send a response to the sender indicating whether we already hold the item.\n                let reply = Message::GossipResponse {\n                    item_id: item_id.clone(),\n                    is_already_held: should_gossip.is_already_held,\n                };\n                effects.extend(effect_builder.send_message(sender, reply).ignore());\n                effects\n            }\n            GossipAction::GetRemainder { .. } => {\n                debug!(item=%item_id, %sender, %action, \"received gossip request\");\n                self.metrics.items_received.inc();\n                // Send a response to the sender indicating we want the full item from them, and set\n                // a timeout for this response.\n                let reply = Message::GossipResponse {\n                    item_id: item_id.clone(),\n                    is_already_held: false,\n                };\n                let mut effects = effect_builder.send_message(sender, reply).ignore();\n                let item_id_clone = item_id.clone();\n                effects.extend(\n                    effect_builder\n                        .set_timeout(self.get_from_peer_timeout)\n                        .event(move |_| Event::CheckGetFromPeerTimeout {\n                            item_id: item_id_clone,\n                            peer: sender,\n                        }),\n                );\n                effects\n            }\n            GossipAction::Noop\n            | GossipAction::AwaitingRemainder\n            | GossipAction::AnnounceFinished => {\n                trace!(item=%item_id, %sender, %action, \"received gossip request\");\n                // Send a response to the sender indicating we already hold the item.\n                let reply = Message::GossipResponse {\n                    item_id: item_id.clone(),\n                    is_already_held: true,\n                };\n                let mut effects = effect_builder.send_message(sender, reply).ignore();\n\n                if action == GossipAction::AnnounceFinished {\n                    effects.extend(\n                        effect_builder\n                            .announce_finished_gossiping(item_id.clone())\n                            .ignore(),\n                    );\n                }\n\n                effects\n            }\n        };\n        if T::REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT {\n            effects.extend(\n                effect_builder\n                    .announce_gossip_received(item_id, sender)\n                    .ignore(),\n            );\n        }\n        effects\n    }\n\n    /// Handles an incoming gossip response from a peer on the network.\n    fn handle_gossip_response<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        is_already_held: bool,\n        sender: NodeId,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>>\n            + From<StorageRequest>\n            + From<GossiperAnnouncement<T>>\n            + Send,\n        Self: ItemProvider<T>,\n    {\n        let mut effects: Effects<_> = Effects::new();\n        if !self.table.has_entry(&item_id) {\n            debug!(\n                item = %item_id,\n                %sender,\n                \"got a gossip response for an item we're not gossiping\"\n            );\n            return effects;\n        }\n\n        let action = if is_already_held {\n            self.table.already_infected(&item_id, sender)\n        } else {\n            if !ID_IS_COMPLETE_ITEM {\n                // `sender` doesn't hold the full item; get the item from the component responsible\n                // for holding it, then send it to `sender`.\n                let cloned_id = item_id.clone();\n                effects.extend(\n                    Self::get_from_storage(effect_builder, item_id.clone()).event(\n                        move |maybe_item| Event::GetFromStorageResult {\n                            item_id: cloned_id,\n                            requester: sender,\n                            maybe_item,\n                        },\n                    ),\n                );\n            }\n            self.table.we_infected(&item_id, sender)\n        };\n\n        match action {\n            GossipAction::ShouldGossip(should_gossip) => effects.extend(Self::gossip(\n                effect_builder,\n                item_id,\n                should_gossip.target,\n                should_gossip.count,\n                should_gossip.exclude_peers,\n            )),\n            GossipAction::Noop => (),\n            GossipAction::AnnounceFinished => {\n                effects.extend(effect_builder.announce_finished_gossiping(item_id).ignore())\n            }\n            GossipAction::GetRemainder { .. } => {\n                error!(\"shouldn't try to get remainder as result of receiving a gossip response\");\n            }\n            GossipAction::AwaitingRemainder => {\n                warn!(\n                    \"shouldn't have gossiped if we don't hold the complete item - possible \\\n                    significant latency, or malicious peer\"\n                );\n            }\n        }\n\n        effects\n    }\n\n    /// Handles the `Some` case when attempting to get the item from storage in order to send it to\n    /// the requester.\n    fn got_from_storage<REv>(\n        effect_builder: EffectBuilder<REv>,\n        item: Box<T>,\n        requester: NodeId,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<NetworkRequest<Message<T>>> + Send,\n    {\n        let message = Message::Item(item);\n        effect_builder.send_message(requester, message).ignore()\n    }\n\n    /// Handles the `None` case when attempting to get the item from storage.\n    fn failed_to_get_from_storage<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<GossiperAnnouncement<T>> + Send,\n    {\n        error!(\n            \"finished gossiping {} since failed to get from storage\",\n            item_id\n        );\n\n        if self.table.force_finish(&item_id) {\n            return effect_builder.announce_finished_gossiping(item_id).ignore();\n        }\n\n        Effects::new()\n    }\n\n    fn handle_get_item_request<REv>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n        requester: NodeId,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<StorageRequest> + Send,\n        Self: ItemProvider<T>,\n    {\n        if !self.table.has_entry(&item_id) {\n            debug!(\n                item = %item_id,\n                %requester,\n                \"got a gossip get-item request for an item we're not gossiping\"\n            );\n            return Effects::new();\n        }\n\n        Self::get_from_storage(effect_builder, item_id.clone()).event(move |maybe_item| {\n            Event::GetFromStorageResult {\n                item_id,\n                requester,\n                maybe_item,\n            }\n        })\n    }\n\n    fn handle_item_received_from_peer<REv>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        item: Box<T>,\n        sender: NodeId,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<GossiperAnnouncement<T>> + Send,\n    {\n        let item_id = item.gossip_id();\n        if !self.table.has_entry(&item_id) {\n            debug!(\n                item = %item_id,\n                %sender,\n                \"got a full gossip item for an item we're not gossiping\"\n            );\n            return Effects::new();\n        }\n\n        let mut effects = effect_builder\n            .announce_item_body_received_via_gossip(item, sender)\n            .ignore();\n        effects.extend(\n            effect_builder\n                .set_timeout(self.validate_and_store_timeout)\n                .event(move |_| Event::CheckItemReceivedTimeout { item_id }),\n        );\n        effects\n    }\n\n    /// Checks that having made a `NewItemBody` announcement (in `handle_item_received_from_peer`)\n    /// we have subsequently received an `ItemReceived` for the item from whichever component is\n    /// responsible for validating and storing the item.\n    fn check_item_received_timeout<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        item_id: T::Id,\n    ) -> Effects<Event<T>>\n    where\n        REv: From<GossiperAnnouncement<T>> + Send,\n    {\n        if self.table.finish_if_not_held_by_us(&item_id) {\n            return effect_builder.announce_finished_gossiping(item_id).ignore();\n        }\n        Effects::new()\n    }\n\n    /// Updates the gossiper metrics from the state of the gossip table.\n    fn update_gossip_table_metrics(&self) {\n        self.metrics\n            .table_items_current\n            .set(self.table.items_current() as i64);\n        self.metrics\n            .table_items_finished\n            .set(self.table.items_finished() as i64);\n    }\n}\n\n/// Impl for gossipers of large items, i.e. where `T::ID_IS_COMPLETE_ITEM` is false.\nimpl<T, REv> Component<REv> for Gossiper<false, T>\nwhere\n    T: LargeGossipItem + 'static,\n    REv: From<NetworkRequest<Message<T>>>\n        + From<StorageRequest>\n        + From<GossiperAnnouncement<T>>\n        + Send,\n    Self: ItemProvider<T>,\n{\n    type Event = Event<T>;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        let effects = match event {\n            Event::BeginGossipRequest(BeginGossipRequest {\n                item_id,\n                source,\n                target,\n                responder,\n            }) => {\n                let mut effects =\n                    self.handle_item_received(effect_builder, item_id, source, target);\n                effects.extend(responder.respond(()).ignore());\n                effects\n            }\n            Event::ItemReceived {\n                item_id,\n                source,\n                target,\n            } => self.handle_item_received(effect_builder, item_id, source, target),\n            Event::GossipedTo {\n                item_id,\n                requested_count,\n                peers,\n            } => self.gossiped_to(effect_builder, item_id, requested_count, peers),\n            Event::CheckGossipTimeout { item_id, peer } => {\n                self.check_gossip_timeout(effect_builder, item_id, peer)\n            }\n            Event::CheckGetFromPeerTimeout { item_id, peer } => {\n                self.check_get_from_peer_timeout(effect_builder, item_id, peer)\n            }\n            Event::Incoming(GossiperIncoming::<T> { sender, message }) => match *message {\n                Message::Gossip(item_id) => {\n                    Self::is_stored(effect_builder, item_id.clone()).event(move |result| {\n                        Event::IsStoredResult {\n                            item_id,\n                            sender,\n                            result,\n                        }\n                    })\n                }\n                Message::GossipResponse {\n                    item_id,\n                    is_already_held,\n                } => self.handle_gossip_response(effect_builder, item_id, is_already_held, sender),\n                Message::GetItem(item_id) => {\n                    self.handle_get_item_request(effect_builder, item_id, sender)\n                }\n                Message::Item(item) => {\n                    self.handle_item_received_from_peer(effect_builder, item, sender)\n                }\n            },\n            Event::CheckItemReceivedTimeout { item_id } => {\n                self.check_item_received_timeout(effect_builder, item_id)\n            }\n            Event::IsStoredResult {\n                item_id,\n                sender,\n                result: is_stored_locally,\n            } => {\n                let action = if self.table.has_entry(&item_id) || !is_stored_locally {\n                    self.table.new_data_id(&item_id, sender)\n                } else {\n                    // We're not already handling this item, and we do have the full item stored, so\n                    // don't initiate gossiping for it.\n                    GossipAction::Noop\n                };\n                self.handle_gossip(effect_builder, item_id, sender, action)\n            }\n            Event::GetFromStorageResult {\n                item_id,\n                requester,\n                maybe_item,\n            } => match maybe_item {\n                Some(item) => Self::got_from_storage(effect_builder, item, requester),\n                None => self.failed_to_get_from_storage(effect_builder, item_id),\n            },\n        };\n        self.update_gossip_table_metrics();\n        effects\n    }\n\n    fn name(&self) -> &str {\n        self.name\n    }\n}\n\n/// Impl for gossipers of small items, i.e. where `T::ID_IS_COMPLETE_ITEM` is true.\nimpl<T, REv> Component<REv> for Gossiper<true, T>\nwhere\n    T: SmallGossipItem + 'static,\n    REv: From<NetworkRequest<Message<T>>>\n        + From<StorageRequest>\n        + From<GossiperAnnouncement<T>>\n        + Send,\n    Self: ItemProvider<T>,\n{\n    type Event = Event<T>;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        let effects = match event {\n            Event::BeginGossipRequest(BeginGossipRequest {\n                item_id,\n                source,\n                target,\n                responder,\n            }) => {\n                let mut effects =\n                    self.handle_item_received(effect_builder, item_id, source, target);\n                effects.extend(responder.respond(()).ignore());\n                effects\n            }\n            Event::ItemReceived {\n                item_id,\n                source,\n                target,\n            } => self.handle_item_received(effect_builder, item_id, source, target),\n            Event::GossipedTo {\n                item_id,\n                requested_count,\n                peers,\n            } => self.gossiped_to(effect_builder, item_id, requested_count, peers),\n            Event::CheckGossipTimeout { item_id, peer } => {\n                self.check_gossip_timeout(effect_builder, item_id, peer)\n            }\n            Event::CheckGetFromPeerTimeout { item_id, peer } => {\n                error!(%item_id, %peer, \"should not timeout getting small item from peer\");\n                Effects::new()\n            }\n            Event::Incoming(GossiperIncoming::<T> { sender, message }) => match *message {\n                Message::Gossip(item_id) => {\n                    let target = <T as SmallGossipItem>::id_as_item(&item_id).gossip_target();\n                    let action = self.table.new_complete_data(&item_id, Some(sender), target);\n                    self.handle_gossip(effect_builder, item_id, sender, action)\n                }\n                Message::GossipResponse {\n                    item_id,\n                    is_already_held,\n                } => self.handle_gossip_response(effect_builder, item_id, is_already_held, sender),\n                Message::GetItem(item_id) => {\n                    debug!(%item_id, %sender, \"unexpected get request for small item\");\n                    Effects::new()\n                }\n                Message::Item(item) => {\n                    let item_id = item.gossip_id();\n                    debug!(%item_id, %sender, \"unexpected get response for small item\");\n                    Effects::new()\n                }\n            },\n            Event::CheckItemReceivedTimeout { item_id } => {\n                error!(%item_id, \"should not timeout item-received for small item\");\n                Effects::new()\n            }\n            event @ Event::IsStoredResult { .. } => {\n                error!(%event, \"unexpected is-stored result for small item\");\n                Effects::new()\n            }\n            Event::GetFromStorageResult {\n                item_id,\n                requester,\n                maybe_item,\n            } => {\n                error!(\n                    %item_id, %requester, ?maybe_item,\n                    \"unexpected get-from-storage result for small item\"\n                );\n                Effects::new()\n            }\n        };\n        self.update_gossip_table_metrics();\n        effects\n    }\n\n    fn name(&self) -> &str {\n        self.name\n    }\n}\n\nimpl<const ID_IS_COMPLETE_ITEM: bool, T: GossipItem + 'static> Debug\n    for Gossiper<ID_IS_COMPLETE_ITEM, T>\n{\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        formatter\n            .debug_struct(self.name)\n            .field(\"table\", &self.table)\n            .field(\"gossip_timeout\", &self.gossip_timeout)\n            .field(\"get_from_peer_timeout\", &self.get_from_peer_timeout)\n            .field(\n                \"validate_and_store_timeout\",\n                &self.validate_and_store_timeout,\n            )\n            .finish()\n    }\n}\n\nimpl<const ID_IS_COMPLETE_ITEM: bool, T: GossipItem + 'static> DataSize\n    for Gossiper<ID_IS_COMPLETE_ITEM, T>\n{\n    const IS_DYNAMIC: bool = true;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    #[inline]\n    fn estimate_heap_size(&self) -> usize {\n        let Gossiper {\n            table,\n            gossip_timeout,\n            get_from_peer_timeout,\n            validate_and_store_timeout,\n            name,\n            metrics: _,\n        } = self;\n\n        table.estimate_heap_size()\n            + gossip_timeout.estimate_heap_size()\n            + get_from_peer_timeout.estimate_heap_size()\n            + validate_and_store_timeout.estimate_heap_size()\n            + name.estimate_heap_size()\n    }\n}\n"
  },
  {
    "path": "node/src/components/in_memory_network.rs",
    "content": "//! Very fast networking component used for testing and simulations.\n//!\n//! The `InMemoryNetwork` represents a full virtual network with flawless connectivity and delivery\n//! by default.\n//!\n//! # Setup\n//!\n//! The network itself is managed by a `NetworkController` that can be used to create networking\n//! components for nodes. Let's demonstrate this with an example in which we\n//!\n//! 1. Define a fictional \"shouter\" component to utilize the network.\n//! 2. Create an application (in the form of a reactor) that connects this shouter to an in-memory\n//!    network of nodes.\n//! 3. Run a test that verifies everything is working.\n//!\n//! ```rust\n//! #\n//! # use std::{\n//! #     collections::HashMap,\n//! #     fmt::{self, Debug, Display, Formatter},\n//! #     ops::AddAssign,\n//! #     time::Duration,\n//! # };\n//! #\n//! # use derive_more::From;\n//! # use prometheus::Registry;\n//! # use rand::{rngs::OsRng, CryptoRng, Rng};\n//! #\n//! # use casper_node::{\n//! #     components::{\n//! #         in_memory_network::{InMemoryNetwork, NetworkController, NodeId},\n//! #         Component,\n//! #     },\n//! #     effect::{\n//! #         announcements::NetworkAnnouncement, requests::NetworkRequest, EffectBuilder, EffectExt,\n//! #         Effects,\n//! #     },\n//! #     reactor::{self, wrap_effects, EventQueueHandle},\n//! #     testing::network::{Network, NetworkedReactor},\n//! # };\n//! #\n//! # let mut runtime = tokio::runtime::Runtime::new().unwrap();\n//! #\n//! // Our network messages are just integers in this example.\n//! type Message = u64;\n//!\n//! // When gossiping, always select exactly two nodes.\n//! const TEST_GOSSIP_COUNT: usize = 2;\n//!\n//! // We will test with three nodes.\n//! const TEST_NODE_COUNT: usize = 3;\n//! # assert!(TEST_GOSSIP_COUNT < TEST_NODE_COUNT);\n//!\n//! /// The shouter component. Sends messages across the network and tracks incoming.\n//! #[derive(Debug)]\n//! struct Shouter {\n//!     /// Values we will gossip.\n//!     whispers: Vec<Message>,\n//!     /// Values we will broadcast.\n//!     shouts: Vec<Message>,\n//!     /// Values we received.\n//!     received: Vec<(NodeId, Message)>,\n//! }\n//!\n//! impl Shouter {\n//!     /// Returns the totals of each message value received. Used for verification in testing.\n//!     fn count_messages(&self) -> HashMap<Message, usize> {\n//!         let mut totals = HashMap::<Message, usize>::new();\n//!\n//!         for (_node_id, message) in &self.received {\n//!             totals.entry(*message).or_default().add_assign(1);\n//!         }\n//!\n//!         totals\n//!     }\n//! }\n//!\n//! #[derive(Debug, From)]\n//! enum ShouterEvent<Message> {\n//!     #[from]\n//!     // We received a new message via the network.\n//!     Net(NetworkAnnouncement<Message>),\n//!     // Ready to send another message.\n//!     #[from]\n//!     ReadyToSend,\n//! }\n//!\n//! impl Shouter {\n//!     /// Creates a new shouter.\n//!     fn new<REv: Send, P: 'static>(effect_builder: EffectBuilder<REv>)\n//!             -> (Self, Effects<ShouterEvent<P>>) {\n//!         (Shouter {\n//!             whispers: Vec::new(),\n//!             shouts: Vec::new(),\n//!             received: Vec::new(),\n//!         }, effect_builder.immediately().event(|_| ShouterEvent::ReadyToSend))\n//!     }\n//! }\n//!\n//! // Besides its own events, the shouter is capable of receiving network messages.\n//! impl<REv, R> Component<REv, R> for Shouter\n//! where\n//!     REv: From<NetworkRequest<Message>> + Send,\n//! {\n//!     type Event = ShouterEvent<Message>;\n//!\n//!     fn handle_event(&mut self,\n//!         effect_builder: EffectBuilder<REv>,\n//!         _rng: &mut NodeRng,\n//!         event: Self::Event\n//!     ) -> Effects<Self::Event> {\n//!         match event {\n//!             ShouterEvent::Net(NetworkAnnouncement::MessageReceived { sender, payload }) => {\n//!                 // Record the message we received.\n//!                 self.received.push((sender, payload));\n//!                 Effects::new()\n//!             }\n//!             ShouterEvent::ReadyToSend => {\n//!                 // If we need to whisper something, do so.\n//!                 if let Some(msg) = self.whispers.pop() {\n//!                     return effect_builder.gossip_message(msg,\n//!                                                          TEST_GOSSIP_COUNT,\n//!                                                          Default::default())\n//!                         .event(|_| ShouterEvent::ReadyToSend);\n//!                 }\n//!                 // Shouts get broadcast.\n//!                 if let Some(msg) = self.shouts.pop() {\n//!                     return effect_builder.broadcast_message(msg)\n//!                         .event(|_| ShouterEvent::ReadyToSend);\n//!                 }\n//!                 Effects::new()\n//!             }\n//!         }\n//!     }\n//! }\n//!\n//! /// The reactor ties the shouter component to a network.\n//! #[derive(Debug)]\n//! struct Reactor {\n//!     /// The connection to the internal network.\n//!     net: InMemoryNetwork<u64>,\n//!     /// Local shouter instance.\n//!     shouter: Shouter,\n//! }\n//!\n//! /// Reactor event\n//! #[derive(Debug, From)]\n//! enum Event {\n//!    /// Asked to perform a network action.\n//!    #[from]\n//!    Request(NetworkRequest<Message>),\n//!    /// Event for the shouter.\n//!    #[from]\n//!    Shouter(ShouterEvent<Message>),\n//!    /// Notified of some network event.\n//!    #[from]\n//!    Announcement(NetworkAnnouncement<Message>)\n//! };\n//! #\n//! # impl Display for Event {\n//! #   fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {\n//! #       Debug::fmt(self, fmt)\n//! #   }\n//! # }\n//! #\n//! # impl<P> Display for ShouterEvent<P>\n//! #     where P: Debug,\n//! # {\n//! #   fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {\n//! #       Debug::fmt(self, fmt)\n//! #   }\n//! # }\n//!\n//! impl reactor::Reactor for Reactor {\n//!     type Event = Event;\n//!     type Config = ();\n//!     type Error = anyhow::Error;\n//!\n//!     fn new<R: Rng + ?Sized>(\n//!            _cfg: Self::Config,\n//!            _registry: &Registry,\n//!            event_queue: EventQueueHandle<Self::Event>,\n//!            rng: &mut NodeRng,\n//!     ) -> Result<(Self, Effects<Self::Event>), anyhow::Error> {\n//!         let effect_builder = EffectBuilder::new(event_queue);\n//!         let (shouter, shouter_effect) = Shouter::new(effect_builder);\n//!\n//!         Ok((Reactor {\n//!             net: NetworkController::create_node(event_queue, rng),\n//!             shouter,\n//!         }, wrap_effects(From::from, shouter_effect)))\n//!     }\n//!\n//!     fn dispatch_event<R: Rng + ?Sized>(&mut self,\n//!                       effect_builder: EffectBuilder<Event>,\n//!                       rng: &mut NodeRng,\n//!                       event: Event\n//!     ) -> Effects<Event> {\n//!          match event {\n//!              Event::Announcement(anc) => { wrap_effects(From::from,\n//!                  self.shouter.handle_event(effect_builder, rng, anc.into())\n//!              )}\n//!              Event::Request(req) => { wrap_effects(From::from,\n//!                  self.net.handle_event(effect_builder, rng, req.into())\n//!              )}\n//!              Event::Shouter(ev) => { wrap_effects(From::from,\n//!                  self.shouter.handle_event(effect_builder, rng, ev)\n//!              )}\n//!          }\n//!     }\n//! }\n//!\n//! impl NetworkedReactor for Reactor {\n//!     fn node_id(&self) -> NodeId {\n//!         self.net.node_id()\n//!     }\n//! }\n//!\n//! // We can finally run the tests:\n//!\n//! # // We need to be inside a tokio runtime to execute `async` code.\n//! # runtime.block_on(async move {\n//! #\n//! // Create a new network controller that manages the network itself. This will register the\n//! // network controller on the current thread and allow initialization functions to find it.\n//! NetworkController::<Message>::create_active();\n//!\n//! // We can now create the network of nodes, using the `testing::Network` and insert three nodes.\n//! // Each node is given some data to send.\n//! let mut rng = OsRng;\n//! let mut net = Network::<Reactor>::new();\n//! let (id1, n1) = net.add_node(&mut rng).await.unwrap();\n//! n1.reactor_mut().shouter.shouts.push(1);\n//! n1.reactor_mut().shouter.shouts.push(2);\n//! n1.reactor_mut().shouter.whispers.push(3);\n//! n1.reactor_mut().shouter.whispers.push(4);\n//!\n//! let (id2, n2) = net.add_node(&mut rng).await.unwrap();\n//! n2.reactor_mut().shouter.shouts.push(6);\n//! n2.reactor_mut().shouter.whispers.push(4);\n//!\n//! let (id3, n3) = net.add_node(&mut rng).await.unwrap();\n//! n3.reactor_mut().shouter.whispers.push(8);\n//! n3.reactor_mut().shouter.shouts.push(1);\n//!\n//! net.settle(&mut rng, Duration::from_secs(1)).await;\n//! assert_eq!(net.nodes().len(), TEST_NODE_COUNT);\n//!\n//! let mut global_count = HashMap::<Message, usize>::new();\n//! for node_id in &[id1, id2, id3] {\n//!     let totals = net.nodes()[node_id].reactor().shouter.count_messages();\n//!\n//!     // The broadcast values should be the same for each node:\n//!     assert_eq!(totals[&1], 2);\n//!     assert_eq!(totals[&2], 1);\n//!     assert_eq!(totals[&6], 1);\n//!\n//!     // Add values to global_count count.\n//!     for (val, count) in totals.into_iter() {\n//!         global_count.entry(val).or_default().add_assign(count);\n//!     }\n//! }\n//!\n//! let mut expected = HashMap::new();\n//! let _ = expected.insert(1, 2 * TEST_NODE_COUNT);\n//! let _ = expected.insert(2, TEST_NODE_COUNT);\n//! let _ = expected.insert(3, TEST_GOSSIP_COUNT);\n//! let _ = expected.insert(4, 2 * TEST_GOSSIP_COUNT);\n//! let _ = expected.insert(6, TEST_NODE_COUNT);\n//! let _ = expected.insert(8, TEST_GOSSIP_COUNT);\n//! assert_eq!(global_count, expected);\n//!\n//! // It's good form to remove the active network.\n//! NetworkController::<Message>::remove_active();\n//!\n//! # }); // end of tokio::block_on\n//! ```\n\nuse std::{\n    any::Any,\n    cell::RefCell,\n    collections::{HashMap, HashSet},\n    fmt::{self, Display, Formatter},\n    sync::{Arc, RwLock},\n};\n\nuse rand::seq::IteratorRandom;\nuse serde::Serialize;\nuse tokio::sync::mpsc::{self, error::SendError};\nuse tracing::{debug, error, info, warn};\n\nuse casper_types::testing::TestRng;\n\nuse crate::{\n    components::Component,\n    effect::{requests::NetworkRequest, EffectBuilder, EffectExt, Effects},\n    logging,\n    reactor::{EventQueueHandle, QueueKind},\n    types::NodeId,\n    NodeRng,\n};\n\nuse super::network::FromIncoming;\n\nconst COMPONENT_NAME: &str = \"in_memory_network\";\n\n/// A network.\ntype Network<P> = Arc<RwLock<HashMap<NodeId, mpsc::UnboundedSender<(NodeId, P)>>>>;\n\n/// An in-memory network events.\n#[derive(Debug, Serialize)]\npub(crate) struct Event<P>(NetworkRequest<P>);\n\nimpl<P> From<NetworkRequest<P>> for Event<P> {\n    fn from(req: NetworkRequest<P>) -> Self {\n        Event(req)\n    }\n}\n\nimpl<P: Display> Display for Event<P> {\n    #[inline]\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Display::fmt(&self.0, f)\n    }\n}\n\nthread_local! {\n    /// The currently active network as a thread local.\n    ///\n    /// The type is dynamic, every network can be of a distinct type when the payload `P` differs.\n    static ACTIVE_NETWORK: RefCell<Option<Box<dyn Any>>> = RefCell::new(None);\n}\n\n/// The network controller is used to control the network topology (e.g. adding and removing nodes).\n#[derive(Debug, Default)]\npub(crate) struct NetworkController<P> {\n    /// Channels for network communication.\n    nodes: Network<P>,\n}\n\nimpl<P> NetworkController<P>\nwhere\n    P: 'static + Send,\n{\n    /// Create a new, empty network.\n    fn new() -> Self {\n        let _ = logging::init();\n        NetworkController {\n            nodes: Default::default(),\n        }\n    }\n\n    /// Creates a new, empty network controller and sets it as active.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the internal lock has been poisoned.\n    pub(crate) fn create_active() {\n        let _ = logging::init();\n        ACTIVE_NETWORK\n            .with(|active_network| active_network.borrow_mut().replace(Box::new(Self::new())));\n    }\n\n    /// Removes the active network.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the internal lock has been poisoned, a network with the wrong type of message was\n    /// removed or if there was no network at all.\n    pub(crate) fn remove_active() {\n        assert!(\n            ACTIVE_NETWORK.with(|active_network| {\n                active_network\n                    .borrow_mut()\n                    .take()\n                    .expect(\"tried to remove non-existent network\")\n                    .is::<Self>()\n            }),\n            \"removed network was of wrong type\"\n        );\n    }\n\n    /// Creates an in-memory network component on the active network.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the internal lock has been poisoned, there is no active network or the active\n    /// network is not of the correct message type.\n    pub(crate) fn create_node<REv>(\n        event_queue: EventQueueHandle<REv>,\n        rng: &mut TestRng,\n    ) -> InMemoryNetwork<P>\n    where\n        REv: Send + FromIncoming<P>,\n    {\n        ACTIVE_NETWORK.with(|active_network| {\n            active_network\n                .borrow_mut()\n                .as_mut()\n                .expect(\"tried to create node without active network set\")\n                .downcast_mut::<Self>()\n                .expect(\"active network has wrong message type\")\n                .create_node_local(event_queue, rng)\n        })\n    }\n\n    /// Removes an in-memory network component on the active network.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the internal lock has been poisoned, the active network is not of the correct\n    /// message type, or the node to remove doesn't exist.\n    pub(crate) fn remove_node(node_id: &NodeId) {\n        ACTIVE_NETWORK.with(|active_network| {\n            if let Some(active_network) = active_network.borrow_mut().as_mut() {\n                active_network\n                    .downcast_mut::<Self>()\n                    .expect(\"active network has wrong message type\")\n                    .nodes\n                    .write()\n                    .expect(\"poisoned lock\")\n                    .remove(node_id)\n                    .expect(\"node doesn't exist in network\");\n            }\n        })\n    }\n\n    /// Creates a new networking node with a random node ID.\n    ///\n    /// Returns the already connected new networking component for new node.\n    pub(crate) fn create_node_local<REv>(\n        &self,\n        event_queue: EventQueueHandle<REv>,\n        rng: &mut TestRng,\n    ) -> InMemoryNetwork<P>\n    where\n        REv: Send + FromIncoming<P>,\n    {\n        InMemoryNetwork::new_with_data(event_queue, NodeId::random(rng), self.nodes.clone())\n    }\n}\n\n/// Networking component connected to an in-memory network.\n#[derive(Debug)]\npub(crate) struct InMemoryNetwork<P> {\n    /// Our node id.\n    node_id: NodeId,\n\n    /// The nodes map, contains the incoming channel for each virtual node.\n    nodes: Network<P>,\n}\n\nimpl<P> InMemoryNetwork<P>\nwhere\n    P: 'static + Send,\n{\n    /// Creates a new in-memory network node.\n    ///\n    /// This function is an alias of `NetworkController::create_node_local`.\n    pub(crate) fn new<REv>(event_queue: EventQueueHandle<REv>, rng: &mut NodeRng) -> Self\n    where\n        REv: Send + FromIncoming<P>,\n    {\n        NetworkController::create_node(event_queue, rng)\n    }\n\n    /// Creates a new in-memory network node.\n    fn new_with_data<REv>(\n        event_queue: EventQueueHandle<REv>,\n        node_id: NodeId,\n        nodes: Network<P>,\n    ) -> Self\n    where\n        REv: Send + FromIncoming<P>,\n    {\n        let (sender, receiver) = mpsc::unbounded_channel();\n\n        // Sanity check, ensure that we do not create duplicate nodes.\n        {\n            let mut nodes_write = nodes.write().expect(\"network lock poisoned\");\n            assert!(!nodes_write.contains_key(&node_id));\n            nodes_write.insert(node_id, sender);\n        }\n\n        tokio::spawn(receiver_task(event_queue, receiver));\n\n        InMemoryNetwork { node_id, nodes }\n    }\n\n    /// Returns this node's ID.\n    #[inline]\n    pub(crate) fn node_id(&self) -> NodeId {\n        self.node_id\n    }\n}\n\nimpl<P> InMemoryNetwork<P>\nwhere\n    P: Display,\n{\n    /// Internal helper, sends a payload to a node, ignoring but logging all errors.\n    fn send(\n        &self,\n        nodes: &HashMap<NodeId, mpsc::UnboundedSender<(NodeId, P)>>,\n        dest: NodeId,\n        payload: P,\n    ) {\n        if dest == self.node_id {\n            panic!(\"can't send message to self\");\n        }\n\n        match nodes.get(&dest) {\n            Some(sender) => {\n                if let Err(SendError((_, msg))) = sender.send((self.node_id, payload)) {\n                    warn!(%dest, %msg, \"could not send message (send error)\");\n\n                    // We do nothing else, the message is just dropped.\n                }\n            }\n            None => info!(%dest, %payload, \"dropping message to non-existent recipient\"),\n        }\n    }\n}\n\nimpl<P, REv> Component<REv> for InMemoryNetwork<P>\nwhere\n    P: Display + Clone,\n{\n    type Event = Event<P>;\n\n    fn handle_event(\n        &mut self,\n        _effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        Event(event): Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            NetworkRequest::SendMessage {\n                dest,\n                payload,\n                respond_after_queueing: _,\n                auto_closing_responder,\n            } => {\n                if *dest == self.node_id {\n                    panic!(\"can't send message to self\");\n                }\n\n                if let Ok(guard) = self.nodes.read() {\n                    self.send(&guard, *dest, *payload);\n                } else {\n                    error!(\"network lock has been poisoned\")\n                };\n\n                auto_closing_responder.respond(()).ignore()\n            }\n            NetworkRequest::ValidatorBroadcast {\n                payload,\n                auto_closing_responder,\n                era_id: _,\n            } => {\n                if let Ok(guard) = self.nodes.read() {\n                    for dest in guard.keys().filter(|&node_id| node_id != &self.node_id) {\n                        self.send(&guard, *dest, *payload.clone());\n                    }\n                } else {\n                    error!(\"network lock has been poisoned\")\n                };\n\n                auto_closing_responder.respond(()).ignore()\n            }\n            NetworkRequest::Gossip {\n                payload,\n                count,\n                exclude,\n                auto_closing_responder,\n                gossip_target: _,\n            } => {\n                if let Ok(guard) = self.nodes.read() {\n                    let chosen: HashSet<_> = guard\n                        .keys()\n                        .filter(|&node_id| !exclude.contains(node_id) && node_id != &self.node_id)\n                        .cloned()\n                        .choose_multiple(rng, count)\n                        .into_iter()\n                        .collect();\n                    // Not terribly efficient, but will always get us the maximum amount of nodes.\n                    for dest in chosen.iter() {\n                        self.send(&guard, *dest, *payload.clone());\n                    }\n                    auto_closing_responder.respond(chosen).ignore()\n                } else {\n                    error!(\"network lock has been poisoned\");\n                    auto_closing_responder.respond(Default::default()).ignore()\n                }\n            }\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nasync fn receiver_task<REv, P>(\n    event_queue: EventQueueHandle<REv>,\n    mut receiver: mpsc::UnboundedReceiver<(NodeId, P)>,\n) where\n    REv: FromIncoming<P>,\n    P: 'static + Send,\n{\n    while let Some((sender, payload)) = receiver.recv().await {\n        let announce: REv = REv::from_incoming(sender, payload);\n\n        event_queue\n            .schedule(announce, QueueKind::NetworkIncoming)\n            .await;\n    }\n\n    debug!(\"receiver shutting down\")\n}\n"
  },
  {
    "path": "node/src/components/metrics.rs",
    "content": "//! Metrics component.\n//!\n//! The metrics component renders metrics upon request.\n//!\n//! # Adding metrics to a component\n//!\n//! When adding metrics to an existing component, there are a few guidelines that should in general\n//! be followed:\n//!\n//! 1. For a component `XYZ`, there should be a `XYZMetrics` struct that is one of its fields that\n//!    holds all of the `Collectors` (`Counter`s, etc) to make it easy to find all of the metrics\n//!    for a component in one place.\n//!\n//!    Creation and instantiation of this component happens inside the `reactor::Reactor::new`\n//!    function, which is passed in a `prometheus::Registry` (see 2.).\n//!\n//! 2. Instantiation of an `XYZMetrics` struct should always be combined with registering all of the\n//!    metrics on a registry. For this reason it is advisable to have the `XYZMetrics::new` method\n//!    take a `prometheus::Registry` and register it directly.\n//!\n//! 3. Updating metrics is done inside the `handle_event` function by simply calling methods on the\n//!    fields of `self.metrics` (`: XYZMetrics`). **Important**: Metrics should never be read to\n//!    prevent any actual logic depending on them. If a counter is being increment as a metric and\n//!    also required for business logic, a second counter should be kept in the component's state.\n\nuse datasize::DataSize;\nuse prometheus::{Encoder, Registry, TextEncoder};\nuse tracing::error;\n\nuse crate::{\n    components::Component,\n    effect::{requests::MetricsRequest, EffectBuilder, EffectExt, Effects},\n    NodeRng,\n};\n\nconst COMPONENT_NAME: &str = \"metrics\";\n\n/// The metrics component.\n#[derive(DataSize, Debug)]\npub(crate) struct Metrics {\n    /// Metrics registry used to answer metrics queries.\n    #[data_size(skip)] // Actual implementation is just a wrapper around an `Arc`.\n    registry: Registry,\n}\n\nimpl<REv> Component<REv> for Metrics {\n    type Event = MetricsRequest;\n\n    fn handle_event(\n        &mut self,\n        _effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        req: Self::Event,\n    ) -> Effects<Self::Event> {\n        match req {\n            MetricsRequest::RenderNodeMetricsText { responder } => {\n                let mut buf: Vec<u8> = Vec::<u8>::new();\n\n                if let Err(e) = TextEncoder::new().encode(&self.registry.gather(), &mut buf) {\n                    error!(%e, \"text encoding of metrics failed\");\n                    return responder.respond(None).ignore();\n                };\n\n                match String::from_utf8(buf) {\n                    Ok(text) => responder.respond(Some(text)).ignore(),\n                    Err(e) => {\n                        error!(%e, \"generated text metrics are not valid UTF-8\");\n                        responder.respond(None).ignore()\n                    }\n                }\n            }\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl Metrics {\n    /// Create and initialize a new metrics component.\n    pub(crate) fn new(registry: Registry) -> Self {\n        Metrics { registry }\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/bincode_format.rs",
    "content": "//! Bincode wire format encoder.\n//!\n//! An encoder for `Bincode` messages with our specific settings pinned.\n\nuse std::{fmt::Debug, io, pin::Pin, sync::Arc};\n\nuse bincode::{\n    config::{\n        RejectTrailing, VarintEncoding, WithOtherEndian, WithOtherIntEncoding, WithOtherLimit,\n        WithOtherTrailing,\n    },\n    Options,\n};\nuse bytes::{Bytes, BytesMut};\nuse serde::{Deserialize, Serialize};\nuse tokio_serde::{Deserializer, Serializer};\n\nuse super::Message;\n\n/// bincode encoder/decoder for messages.\n#[allow(clippy::type_complexity)]\npub struct BincodeFormat(\n    // Note: `bincode` encodes its options at the type level. The exact shape is determined by\n    // `BincodeFormat::default()`.\n    pub(crate)  WithOtherTrailing<\n        WithOtherIntEncoding<\n            WithOtherEndian<\n                WithOtherLimit<bincode::DefaultOptions, bincode::config::Infinite>,\n                bincode::config::LittleEndian,\n            >,\n            VarintEncoding,\n        >,\n        RejectTrailing,\n    >,\n);\n\nimpl BincodeFormat {\n    /// Serializes an arbitrary serializable value with the networking bincode serializer.\n    #[inline]\n    pub(crate) fn serialize_arbitrary<T>(&self, item: &T) -> io::Result<Vec<u8>>\n    where\n        T: Serialize,\n    {\n        self.0\n            .serialize(item)\n            .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))\n    }\n}\n\nimpl Debug for BincodeFormat {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.write_str(\"BincodeFormat\")\n    }\n}\n\nimpl Default for BincodeFormat {\n    fn default() -> Self {\n        let opts = bincode::options()\n            .with_no_limit() // We rely on framed tokio transports to impose limits.\n            .with_little_endian() // Default at the time of this writing, we are merely pinning it.\n            .with_varint_encoding() // Same as above.\n            .reject_trailing_bytes(); // There is no reason for us not to reject trailing bytes.\n        BincodeFormat(opts)\n    }\n}\n\nimpl<P> Serializer<Arc<Message<P>>> for BincodeFormat\nwhere\n    Message<P>: Serialize,\n{\n    type Error = io::Error;\n\n    #[inline]\n    fn serialize(self: Pin<&mut Self>, item: &Arc<Message<P>>) -> Result<Bytes, Self::Error> {\n        let msg = &**item;\n        self.serialize_arbitrary(msg).map(Into::into)\n    }\n}\n\nimpl<P> Deserializer<Message<P>> for BincodeFormat\nwhere\n    for<'de> Message<P>: Deserialize<'de>,\n{\n    type Error = io::Error;\n\n    #[inline]\n    fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result<Message<P>, Self::Error> {\n        self.0\n            .deserialize(src)\n            .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/blocklist.rs",
    "content": "//! Blocklisting support.\n//!\n//! Blocked peers are prevented from interacting with the node through a variety of means.\n\nuse std::fmt::{self, Display, Formatter};\n\nuse casper_types::{Digest, EraId};\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse crate::{\n    components::{block_accumulator, fetcher::Tag},\n    types::InvalidProposalError,\n};\n\n/// Reasons why a peer was blocked.\n#[derive(DataSize, Debug, Serialize)]\npub(crate) enum BlocklistJustification {\n    /// Peer sent incorrect item.\n    SentBadItem { tag: Tag },\n    /// Peer sent an item which failed validation.\n    SentInvalidItem { tag: Tag, error_msg: String },\n    /// A finality signature that was sent is invalid.\n    SentBadFinalitySignature {\n        /// Error reported by block accumulator.\n        #[serde(skip_serializing)]\n        #[data_size(skip)]\n        error: block_accumulator::Error,\n    },\n    /// A block that was sent is invalid.\n    SentBadBlock {\n        /// Error reported by block accumulator.\n        #[serde(skip_serializing)]\n        #[data_size(skip)]\n        error: block_accumulator::Error,\n    },\n    /// An invalid proposal was received.\n    SentInvalidProposal {\n        /// The era for which the invalid value was destined.\n        era: EraId,\n        /// The specific error.\n        #[serde(skip_serializing)]\n        error: Box<InvalidProposalError>,\n    },\n    /// Too many unasked or expired pongs were sent by the peer.\n    #[allow(dead_code)] // Disabled as per 1.5.5 for stability reasons.\n    PongLimitExceeded,\n    /// Peer misbehaved during consensus and is blocked for it.\n    BadConsensusBehavior,\n    /// Peer is on the wrong network.\n    WrongNetwork {\n        /// The network name reported by the peer.\n        peer_network_name: String,\n    },\n    /// Peer presented the wrong chainspec hash.\n    WrongChainspecHash {\n        /// The chainspec hash reported by the peer.\n        peer_chainspec_hash: Digest,\n    },\n    /// Peer did not present a chainspec hash.\n    MissingChainspecHash,\n    /// Peer is considered dishonest.\n    DishonestPeer,\n    /// Peer sent too many finality signatures.\n    SentTooManyFinalitySignatures { max_allowed: u32 },\n}\n\nimpl Display for BlocklistJustification {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BlocklistJustification::SentBadItem { tag } => {\n                write!(f, \"sent a {} we couldn't parse\", tag)\n            }\n            BlocklistJustification::SentInvalidItem { tag, error_msg } => {\n                write!(f, \"sent a {} which failed validation ({})\", tag, error_msg)\n            }\n            BlocklistJustification::SentBadFinalitySignature { error } => write!(\n                f,\n                \"sent a finality signature that is invalid or unexpected ({})\",\n                error\n            ),\n            BlocklistJustification::SentInvalidProposal { era, error } => {\n                write!(f, \"sent an invalid proposal in {} ({:?})\", era, error)\n            }\n            BlocklistJustification::PongLimitExceeded => {\n                f.write_str(\"wrote too many expired or invalid pongs\")\n            }\n            BlocklistJustification::BadConsensusBehavior => {\n                f.write_str(\"sent invalid data in consensus\")\n            }\n            BlocklistJustification::WrongNetwork { peer_network_name } => write!(\n                f,\n                \"reported to be on the wrong network ({:?})\",\n                peer_network_name\n            ),\n            BlocklistJustification::WrongChainspecHash {\n                peer_chainspec_hash,\n            } => write!(\n                f,\n                \"reported a mismatched chainspec hash ({})\",\n                peer_chainspec_hash\n            ),\n            BlocklistJustification::MissingChainspecHash => {\n                f.write_str(\"sent handshake without chainspec hash\")\n            }\n            BlocklistJustification::SentBadBlock { error } => {\n                write!(f, \"sent a block that is invalid or unexpected ({})\", error)\n            }\n            BlocklistJustification::DishonestPeer => f.write_str(\"dishonest peer\"),\n            BlocklistJustification::SentTooManyFinalitySignatures { max_allowed } => write!(\n                f,\n                \"sent too many finality signatures: maximum {max_allowed} signatures are allowed\"\n            ),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/chain_info.rs",
    "content": "//! Network-related chain identification information.\n\n// TODO: This module and `ChainId` should disappear in its entirety and the actual chainspec be made\n// available.\n\nuse std::net::SocketAddr;\n\nuse casper_types::{Chainspec, Digest, ProtocolVersion};\nuse datasize::DataSize;\n\nuse super::{\n    counting_format::ConnectionId,\n    message::{ConsensusCertificate, NodeKeyPair},\n    Message,\n};\n\n/// Data retained from the chainspec by the networking component.\n///\n/// Typically this information is used for creating handshakes.\n#[derive(DataSize, Debug)]\npub(crate) struct ChainInfo {\n    /// Name of the network we participate in. We only remain connected to peers with the same\n    /// network name as us.\n    pub(super) network_name: String,\n    /// The maximum message size for a network message, as supplied from the chainspec.\n    pub(super) maximum_net_message_size: u32,\n    /// The protocol version.\n    pub(super) protocol_version: ProtocolVersion,\n    /// The hash of the chainspec.\n    pub(super) chainspec_hash: Digest,\n}\n\nimpl ChainInfo {\n    /// Create an instance of `ChainInfo` for testing.\n    #[cfg(test)]\n    pub fn create_for_testing() -> Self {\n        let network_name = \"rust-tests-network\";\n        ChainInfo {\n            network_name: network_name.to_string(),\n            maximum_net_message_size: 24 * 1024 * 1024, // Hardcoded at 24M.\n            protocol_version: ProtocolVersion::V1_0_0,\n            chainspec_hash: Digest::hash(format!(\"{}-chainspec\", network_name)),\n        }\n    }\n\n    /// Create a handshake based on chain identification data.\n    pub(super) fn create_handshake<P>(\n        &self,\n        public_addr: SocketAddr,\n        consensus_keys: Option<&NodeKeyPair>,\n        connection_id: ConnectionId,\n        is_syncing: bool,\n    ) -> Message<P> {\n        Message::Handshake {\n            network_name: self.network_name.clone(),\n            public_addr,\n            protocol_version: self.protocol_version,\n            consensus_certificate: consensus_keys\n                .map(|key_pair| ConsensusCertificate::create(connection_id, key_pair)),\n            is_syncing,\n            chainspec_hash: Some(self.chainspec_hash),\n        }\n    }\n}\n\nimpl From<&Chainspec> for ChainInfo {\n    fn from(chainspec: &Chainspec) -> Self {\n        ChainInfo {\n            network_name: chainspec.network_config.name.clone(),\n            maximum_net_message_size: chainspec.network_config.maximum_net_message_size,\n            protocol_version: chainspec.protocol_version(),\n            chainspec_hash: chainspec.hash(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/config.rs",
    "content": "#[cfg(test)]\nuse std::net::{Ipv4Addr, SocketAddr};\nuse std::path::PathBuf;\n\nuse casper_types::{ProtocolVersion, TimeDiff};\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse super::EstimatorWeights;\n\n/// Default binding address.\n///\n/// Uses a fixed port per node, but binds on any interface.\nconst DEFAULT_BIND_ADDRESS: &str = \"0.0.0.0:34553\";\n\n/// Default public address.\n///\n/// Automatically sets the port, but defaults publishing localhost as the public address.\nconst DEFAULT_PUBLIC_ADDRESS: &str = \"127.0.0.1:0\";\n\nconst DEFAULT_MIN_PEERS_FOR_INITIALIZATION: u16 = 1;\n\n/// Default interval for gossiping network addresses.\nconst DEFAULT_GOSSIP_INTERVAL: TimeDiff = TimeDiff::from_seconds(30);\n\n/// Default delay until initial round of address gossiping starts.\nconst DEFAULT_INITIAL_GOSSIP_DELAY: TimeDiff = TimeDiff::from_seconds(5);\n\n/// Default time limit for an address to be in the pending set.\nconst DEFAULT_MAX_ADDR_PENDING_TIME: TimeDiff = TimeDiff::from_seconds(60);\n\n/// Default timeout during which the handshake needs to be completed.\nconst DEFAULT_HANDSHAKE_TIMEOUT: TimeDiff = TimeDiff::from_seconds(20);\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            bind_address: DEFAULT_BIND_ADDRESS.to_string(),\n            public_address: DEFAULT_PUBLIC_ADDRESS.to_string(),\n            known_addresses: Vec::new(),\n            min_peers_for_initialization: DEFAULT_MIN_PEERS_FOR_INITIALIZATION,\n            gossip_interval: DEFAULT_GOSSIP_INTERVAL,\n            initial_gossip_delay: DEFAULT_INITIAL_GOSSIP_DELAY,\n            max_addr_pending_time: DEFAULT_MAX_ADDR_PENDING_TIME,\n            handshake_timeout: DEFAULT_HANDSHAKE_TIMEOUT,\n            max_incoming_peer_connections: 0,\n            max_outgoing_byte_rate_non_validators: 0,\n            max_incoming_message_rate_non_validators: 0,\n            estimator_weights: Default::default(),\n            tarpit_version_threshold: None,\n            tarpit_duration: TimeDiff::from_seconds(600),\n            tarpit_chance: 0.2,\n            max_in_flight_demands: 50,\n            blocklist_retain_min_duration: TimeDiff::from_seconds(600),\n            blocklist_retain_max_duration: TimeDiff::from_seconds(1600),\n            identity: None,\n        }\n    }\n}\n\n/// Network identity configuration.\n#[derive(DataSize, Debug, Clone, Deserialize, Serialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct IdentityConfig {\n    /// Path to a signed certificate\n    pub tls_certificate: PathBuf,\n    /// Path to a secret key.\n    pub secret_key: PathBuf,\n    /// Path to a certificate authority certificate\n    pub ca_certificate: PathBuf,\n}\n\n/// Network configuration.\n#[derive(DataSize, Debug, Clone, Deserialize, Serialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Address to bind to.\n    pub bind_address: String,\n    /// Publicly advertised address, in case the node has a different external IP.\n    ///\n    /// If the port is specified as `0`, it will be replaced with the actually bound port.\n    pub public_address: String,\n    /// Known address of a node on the network used for joining.\n    pub known_addresses: Vec<String>,\n    /// Minimum number of fully-connected peers to consider component initialized.\n    pub min_peers_for_initialization: u16,\n    /// Interval in milliseconds used for gossiping.\n    pub gossip_interval: TimeDiff,\n    /// Initial delay before the first round of gossip.\n    pub initial_gossip_delay: TimeDiff,\n    /// Maximum allowed time for an address to be kept in the pending set.\n    pub max_addr_pending_time: TimeDiff,\n    /// Maximum allowed time for handshake completion.\n    pub handshake_timeout: TimeDiff,\n    /// Maximum number of incoming connections per unique peer. Unlimited if `0`.\n    pub max_incoming_peer_connections: u16,\n    /// Maximum number of bytes per second allowed for non-validating peers. Unlimited if 0.\n    pub max_outgoing_byte_rate_non_validators: u32,\n    /// Maximum of requests answered from non-validating peers. Unlimited if 0.\n    pub max_incoming_message_rate_non_validators: u32,\n    /// Weight distribution for the payload impact estimator.\n    pub estimator_weights: EstimatorWeights,\n    /// The protocol version at which (or under) tarpitting is enabled.\n    pub tarpit_version_threshold: Option<ProtocolVersion>,\n    /// If tarpitting is enabled, duration for which connections should be kept open.\n    pub tarpit_duration: TimeDiff,\n    /// The chance, expressed as a number between 0.0 and 1.0, of triggering the tarpit.\n    pub tarpit_chance: f32,\n    /// Maximum number of demands for objects that can be in-flight.\n    pub max_in_flight_demands: u32,\n    /// Minimum time a peer is kept on block list before being redeemed. The actual\n    /// timeout duration is calculated by selecting a random value between\n    /// <blocklist_retain_min_duration, blocklist_retain_max_duration>.\n    pub blocklist_retain_min_duration: TimeDiff,\n    /// Maximum time a peer is kept on block list before being redeemed. The actual\n    /// timeout duration is calculated by selecting a random value between\n    /// <blocklist_retain_min_duration, blocklist_retain_max_duration>.\n    pub blocklist_retain_max_duration: TimeDiff,\n    /// Network identity configuration option.\n    ///\n    /// An identity will be automatically generated when starting up a node if this option is\n    /// unspecified.\n    pub identity: Option<IdentityConfig>,\n}\n\n#[cfg(test)]\n/// Reduced gossip interval for local testing.\nconst DEFAULT_TEST_GOSSIP_INTERVAL: TimeDiff = TimeDiff::from_seconds(1);\n\n#[cfg(test)]\n/// Address used to bind all local testing networking to by default.\nconst TEST_BIND_INTERFACE: Ipv4Addr = Ipv4Addr::LOCALHOST;\n\n#[cfg(test)]\nimpl Config {\n    /// Construct a configuration suitable for testing with no known address that binds to a\n    /// specific address.\n    pub(super) fn new(bind_address: SocketAddr) -> Self {\n        Config {\n            bind_address: bind_address.to_string(),\n            public_address: bind_address.to_string(),\n            known_addresses: vec![bind_address.to_string()],\n            gossip_interval: DEFAULT_TEST_GOSSIP_INTERVAL,\n            ..Default::default()\n        }\n    }\n\n    /// Constructs a `Config` suitable for use by the first node of a testnet on a single machine.\n    pub(crate) fn default_local_net_first_node(bind_port: u16) -> Self {\n        Config::new((TEST_BIND_INTERFACE, bind_port).into())\n    }\n\n    /// Constructs a `Config` suitable for use by a node joining a testnet on a single machine.\n    pub(crate) fn default_local_net(known_peer_port: u16) -> Self {\n        Config {\n            bind_address: SocketAddr::from((TEST_BIND_INTERFACE, 0)).to_string(),\n            public_address: SocketAddr::from((TEST_BIND_INTERFACE, 0)).to_string(),\n            known_addresses: vec![\n                SocketAddr::from((TEST_BIND_INTERFACE, known_peer_port)).to_string()\n            ],\n            gossip_interval: DEFAULT_TEST_GOSSIP_INTERVAL,\n            ..Default::default()\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/counting_format.rs",
    "content": "//! Observability for network serialization/deserialization.\n//!\n//! This module introduces two IDs: [`ConnectionId`] and [`TraceId`]. The [`ConnectionId`] is a\n//! unique ID per established connection that can be independently derive by peers on either of a\n//! connection. [`TraceId`] identifies a single message, distinguishing even messages that are sent\n//! to the same peer with equal contents.\n\nuse std::{\n    convert::TryFrom,\n    fmt::{self, Display, Formatter},\n    pin::Pin,\n    sync::{Arc, Weak},\n};\n\nuse bytes::{Bytes, BytesMut};\nuse openssl::ssl::SslRef;\nuse pin_project::pin_project;\n#[cfg(test)]\nuse rand::RngCore;\nuse static_assertions::const_assert;\nuse tokio_serde::{Deserializer, Serializer};\nuse tracing::{trace, warn};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::Digest;\n\nuse super::{tls::KeyFingerprint, Message, Metrics, Payload};\nuse crate::{types::NodeId, utils};\n\n/// Lazily-evaluated network message ID generator.\n///\n/// Calculates a hash for the wrapped value when `Display::fmt` is called.\n#[derive(Copy, Clone, Debug, Eq, PartialEq)]\nstruct TraceId([u8; 8]);\n\nimpl Display for TraceId {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.write_str(&base16::encode_lower(&self.0))\n    }\n}\n\n/// A metric-updating serializer/deserializer wrapper for network messages.\n///\n/// Classifies each message given and updates the `NetworkingMetrics` accordingly. Also emits a\n/// TRACE-level message to the `net_out` and `net_in` target with a per-message unique hash when\n/// a message is sent or received.\n#[pin_project]\n#[derive(Debug)]\npub struct CountingFormat<F> {\n    /// The actual serializer performing the work.\n    #[pin]\n    inner: F,\n    /// Identifier for the connection.\n    connection_id: ConnectionId,\n    /// Counter for outgoing messages.\n    out_count: u64,\n    /// Counter for incoming messages.\n    in_count: u64,\n    /// Our role in the connection.\n    role: Role,\n    /// Metrics to update.\n    metrics: Weak<Metrics>,\n}\n\nimpl<F> CountingFormat<F> {\n    /// Creates a new counting formatter.\n    #[inline]\n    pub(super) fn new(\n        metrics: Weak<Metrics>,\n        connection_id: ConnectionId,\n        role: Role,\n        inner: F,\n    ) -> Self {\n        Self {\n            metrics,\n            connection_id,\n            out_count: 0,\n            in_count: 0,\n            role,\n            inner,\n        }\n    }\n}\n\nimpl<F, P> Serializer<Arc<Message<P>>> for CountingFormat<F>\nwhere\n    F: Serializer<Arc<Message<P>>>,\n    P: Payload,\n{\n    type Error = F::Error;\n\n    #[inline]\n    fn serialize(self: Pin<&mut Self>, item: &Arc<Message<P>>) -> Result<Bytes, Self::Error> {\n        let this = self.project();\n        let projection: Pin<&mut F> = this.inner;\n\n        let serialized = F::serialize(projection, item)?;\n        let msg_size = serialized.len() as u64;\n        let msg_kind = item.classify();\n        Metrics::record_payload_out(this.metrics, msg_kind, msg_size);\n\n        let trace_id = this\n            .connection_id\n            .create_trace_id(this.role.out_flag(), *this.out_count);\n        *this.out_count += 1;\n\n        trace!(target: \"net_out\",\n            msg_id = %trace_id,\n            msg_size,\n            msg_kind = %msg_kind, \"sending\");\n\n        Ok(serialized)\n    }\n}\n\nimpl<F, P> Deserializer<Message<P>> for CountingFormat<F>\nwhere\n    F: Deserializer<Message<P>>,\n    P: Payload,\n{\n    type Error = F::Error;\n\n    #[inline]\n    fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result<Message<P>, Self::Error> {\n        let this = self.project();\n        let projection: Pin<&mut F> = this.inner;\n\n        let msg_size = src.len() as u64;\n\n        let deserialized = F::deserialize(projection, src)?;\n        let msg_kind = deserialized.classify();\n        Metrics::record_payload_in(this.metrics, msg_kind, msg_size);\n\n        let trace_id = this\n            .connection_id\n            .create_trace_id(this.role.in_flag(), *this.in_count);\n        *this.in_count += 1;\n\n        trace!(target: \"net_in\",\n            msg_id = %trace_id,\n            msg_size,\n            msg_kind = %msg_kind, \"received\");\n\n        Ok(deserialized)\n    }\n}\n\n/// An ID identifying a connection.\n///\n/// The ID is guaranteed to be the same on both ends of the connection, but not guaranteed to be\n/// unique or sufficiently random. Do not use it for any cryptographic/security related purposes.\n#[derive(Copy, Clone, Debug, Eq, PartialEq)]\npub(super) struct ConnectionId([u8; Digest::LENGTH]);\n\n// Invariant assumed by `ConnectionId`, `Digest` must be <= than `KeyFingerprint`.\nconst_assert!(KeyFingerprint::LENGTH >= Digest::LENGTH);\n// We also assume it is at least 12 bytes.\nconst_assert!(Digest::LENGTH >= 12);\n\n/// Random data derived from TLS connections.\n#[derive(Copy, Clone, Debug)]\npub(super) struct TlsRandomData {\n    /// Random data extract from the client of the connection.\n    combined_random: [u8; 12],\n}\n\n/// Zero-randomness.\n///\n/// Used to check random data.\nconst ZERO_RANDOMNESS: [u8; 12] = [0; 12];\n\nimpl TlsRandomData {\n    /// Collects random data from an existing SSL collection.\n    ///\n    /// Ideally we would use the TLS session ID, but it is not available on outgoing connections at\n    /// the times we need it. Instead, we use the `server_random` and `client_random` nonces, which\n    /// will be the same on both ends of the connection.\n    fn collect(ssl: &SslRef) -> Self {\n        // We are using only the first 12 bytes of these 32 byte values here, just in case we missed\n        // something in our assessment that hashing these should be safe. Additionally, these values\n        // are XOR'd, not concatenated. All this is done to prevent leaking information about these\n        // numbers.\n        //\n        // Some SSL implementations use timestamps for the first four bytes, so to be sufficiently\n        // random, we use 4 + 8 bytes of the nonces.\n        let mut server_random = [0; 12];\n        let mut client_random = [0; 12];\n\n        ssl.server_random(&mut server_random);\n\n        if server_random == ZERO_RANDOMNESS {\n            warn!(\"TLS server random is all zeros\");\n        }\n\n        ssl.client_random(&mut client_random);\n\n        if server_random == ZERO_RANDOMNESS {\n            warn!(\"TLS client random is all zeros\");\n        }\n\n        // Combine using XOR.\n        utils::xor(&mut server_random, &client_random);\n\n        Self {\n            combined_random: server_random,\n        }\n    }\n\n    /// Creates random `TlsRandomData`.\n    #[cfg(test)]\n    fn random(rng: &mut TestRng) -> Self {\n        let mut buffer = [0u8; 12];\n\n        rng.fill_bytes(&mut buffer);\n\n        Self {\n            combined_random: buffer,\n        }\n    }\n}\n\nimpl ConnectionId {\n    /// Creates a new connection ID, based on random values from server and client, as well as\n    /// node IDs.\n    fn create(random_data: TlsRandomData, our_id: NodeId, their_id: NodeId) -> ConnectionId {\n        // Hash the resulting random values.\n        let mut id = Digest::hash(random_data.combined_random).value();\n\n        // We XOR in a hashes of server and client fingerprint, to ensure that in the case of an\n        // accidental collision (e.g. when `server_random` and `client_random` turn out to be all\n        // zeros), we still have a chance of producing a reasonable ID.\n        utils::xor(&mut id, &our_id.hash_bytes()[0..Digest::LENGTH]);\n        utils::xor(&mut id, &their_id.hash_bytes()[0..Digest::LENGTH]);\n\n        ConnectionId(id)\n    }\n\n    /// Creates a new [`TraceID`] based on the message count.\n    ///\n    /// The `flag` should be created using the [`Role::in_flag`] or [`Role::out_flag`] method and\n    /// must be created accordingly (`out_flag` when serializing, `in_flag` when deserializing).\n    fn create_trace_id(&self, flag: u8, count: u64) -> TraceId {\n        // Copy the basic network ID.\n        let mut buffer = self.0;\n\n        // Direction set on first byte.\n        buffer[0] ^= flag;\n\n        // XOR in message count.\n        utils::xor(&mut buffer[4..12], &count.to_ne_bytes());\n\n        // Hash again and truncate.\n        let full_hash = Digest::hash(buffer);\n\n        // Safe to expect here, as we assert earlier that `Digest` is at least 12 bytes.\n        let truncated = TryFrom::try_from(&full_hash.value()[0..8]).expect(\"buffer size mismatch\");\n\n        TraceId(truncated)\n    }\n\n    #[inline]\n    /// Returns a reference to the raw bytes of the connection ID.\n    pub(crate) fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Creates a new connection ID from an existing SSL connection.\n    #[inline]\n    pub(crate) fn from_connection(ssl: &SslRef, our_id: NodeId, their_id: NodeId) -> Self {\n        Self::create(TlsRandomData::collect(ssl), our_id, their_id)\n    }\n\n    /// Creates a random `ConnectionId`.\n    #[cfg(test)]\n    pub(super) fn random(rng: &mut TestRng) -> Self {\n        ConnectionId::create(\n            TlsRandomData::random(rng),\n            NodeId::random(rng),\n            NodeId::random(rng),\n        )\n    }\n}\n\n/// Message sending direction.\n#[derive(Copy, Clone, Debug)]\n#[repr(u8)]\npub(super) enum Role {\n    /// Dialer, i.e. initiator of the connection.\n    Dialer,\n    /// Listener, acceptor of the connection.\n    Listener,\n}\n\nimpl Role {\n    /// Returns a flag suitable for hashing incoming messages.\n    #[inline]\n    fn in_flag(self) -> u8 {\n        !(self.out_flag())\n    }\n\n    /// Returns a flag suitable for hashing outgoing messages.\n    #[inline]\n    fn out_flag(self) -> u8 {\n        // The magic flag uses 50% of the bits, to be XOR'd into the hash later.\n        const MAGIC_FLAG: u8 = 0b10101010;\n\n        match self {\n            Role::Dialer => MAGIC_FLAG,\n            Role::Listener => !MAGIC_FLAG,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::types::NodeId;\n\n    use super::{ConnectionId, Role, TlsRandomData, TraceId};\n\n    #[test]\n    fn trace_id_has_16_character() {\n        let data = [0, 1, 2, 3, 4, 5, 6, 7];\n\n        let output = format!(\"{}\", TraceId(data));\n\n        assert_eq!(output.len(), 16);\n    }\n\n    #[test]\n    fn can_create_deterministic_trace_id() {\n        let mut rng = crate::new_rng();\n\n        // Scenario: Nodes A and B are connecting to each other. Both connections are established.\n        let node_a = NodeId::random(&mut rng);\n        let node_b = NodeId::random(&mut rng);\n\n        // We get two connections, with different Tls random data, but it will be the same on both\n        // ends of the connection.\n        let a_to_b_random = TlsRandomData::random(&mut rng);\n        let a_to_b = ConnectionId::create(a_to_b_random, node_a, node_b);\n        let a_to_b_alt = ConnectionId::create(a_to_b_random, node_b, node_a);\n\n        // Ensure that either peer ends up with the same connection id.\n        assert_eq!(a_to_b, a_to_b_alt);\n\n        let b_to_a_random = TlsRandomData::random(&mut rng);\n        let b_to_a = ConnectionId::create(b_to_a_random, node_b, node_a);\n        let b_to_a_alt = ConnectionId::create(b_to_a_random, node_a, node_b);\n        assert_eq!(b_to_a, b_to_a_alt);\n\n        // The connection IDs must be distinct though.\n        assert_ne!(a_to_b, b_to_a);\n\n        // We are only looking at messages sent on the `a_to_b` connection, although from both ends.\n        // In our example example, `node_a` is the dialing node, `node_b` the listener.\n\n        // Trace ID on A, after sending to B.\n        let msg_ab_0_on_a = a_to_b.create_trace_id(Role::Dialer.out_flag(), 0);\n\n        // The same message on B.\n        let msg_ab_0_on_b = a_to_b.create_trace_id(Role::Listener.in_flag(), 0);\n\n        // These trace IDs must match.\n        assert_eq!(msg_ab_0_on_a, msg_ab_0_on_b);\n\n        // The second message must have a distinct trace ID.\n        let msg_ab_1_on_a = a_to_b.create_trace_id(Role::Dialer.out_flag(), 1);\n        let msg_ab_1_on_b = a_to_b.create_trace_id(Role::Listener.in_flag(), 1);\n        assert_eq!(msg_ab_1_on_a, msg_ab_1_on_b);\n        assert_ne!(msg_ab_0_on_a, msg_ab_1_on_a);\n\n        // Sending a message on the **same connection** in a **different direction** also must yield\n        // a different message id.\n        let msg_ba_0_on_b = a_to_b.create_trace_id(Role::Listener.out_flag(), 0);\n        let msg_ba_0_on_a = a_to_b.create_trace_id(Role::Dialer.in_flag(), 0);\n        assert_eq!(msg_ba_0_on_b, msg_ba_0_on_a);\n        assert_ne!(msg_ba_0_on_b, msg_ab_0_on_b);\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/error.rs",
    "content": "use std::{error, io, net::SocketAddr, result};\n\nuse datasize::DataSize;\nuse openssl::{error::ErrorStack, ssl};\nuse serde::Serialize;\nuse thiserror::Error;\n\nuse casper_types::{crypto, Digest, ProtocolVersion};\n\nuse crate::{\n    tls::{LoadCertError, ValidationError},\n    utils::ResolveAddressError,\n};\n\npub(super) type Result<T> = result::Result<T, Error>;\n\n/// Error type returned by the `Network` component.\n#[derive(Debug, Error, Serialize)]\npub enum Error {\n    /// We do not have any known hosts.\n    #[error(\"could not resolve at least one known host (or none provided)\")]\n    EmptyKnownHosts,\n    /// Failed to create a TCP listener.\n    #[error(\"failed to create listener on {1}\")]\n    ListenerCreation(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n        SocketAddr,\n    ),\n    /// Failed to get TCP listener address.\n    #[error(\"failed to get listener addr\")]\n    ListenerAddr(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// Failed to set listener to non-blocking.\n    #[error(\"failed to set listener to non-blocking\")]\n    ListenerSetNonBlocking(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// Failed to convert std TCP listener to tokio TCP listener.\n    #[error(\"failed to convert listener to tokio\")]\n    ListenerConversion(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// Could not resolve root node address.\n    #[error(\"failed to resolve network address\")]\n    ResolveAddr(\n        #[serde(skip_serializing)]\n        #[source]\n        ResolveAddressError,\n    ),\n    /// Instantiating metrics failed.\n    #[error(transparent)]\n    Metrics(\n        #[serde(skip_serializing)]\n        #[from]\n        prometheus::Error,\n    ),\n    /// Failed to load a certificate.\n    #[error(\"failed to load a certificate: {0}\")]\n    LoadCertificate(\n        #[serde(skip_serializing)]\n        #[from]\n        LoadCertError,\n    ),\n}\n\n// Manual implementation for `DataSize` - the type contains too many FFI variants that are hard to\n// size, so we give up on estimating it altogether.\nimpl DataSize for Error {\n    const IS_DYNAMIC: bool = false;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    fn estimate_heap_size(&self) -> usize {\n        0\n    }\n}\n\nimpl DataSize for ConnectionError {\n    const IS_DYNAMIC: bool = false;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    fn estimate_heap_size(&self) -> usize {\n        0\n    }\n}\n\n/// An error related to an incoming or outgoing connection.\n#[derive(Debug, Error, Serialize)]\npub enum ConnectionError {\n    /// Failed to create TLS acceptor.\n    #[error(\"failed to create TLS acceptor/connector\")]\n    TlsInitialization(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// TCP connection failed.\n    #[error(\"TCP connection failed\")]\n    TcpConnection(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// Did not succeed setting TCP_NODELAY on the connection.\n    #[error(\"Could not set TCP_NODELAY on outgoing connection\")]\n    TcpNoDelay(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// Handshaking error.\n    #[error(\"TLS handshake error\")]\n    TlsHandshake(\n        #[serde(skip_serializing)]\n        #[source]\n        ssl::Error,\n    ),\n    /// Remote failed to present a client/server certificate.\n    #[error(\"no client certificate presented\")]\n    NoPeerCertificate,\n    /// TLS validation error.\n    #[error(\"TLS validation error of peer certificate\")]\n    PeerCertificateInvalid(#[source] ValidationError),\n    /// Failed to send handshake.\n    #[error(\"handshake send failed\")]\n    HandshakeSend(\n        #[serde(skip_serializing)]\n        #[source]\n        IoError<io::Error>,\n    ),\n    /// Failed to receive handshake.\n    #[error(\"handshake receive failed\")]\n    HandshakeRecv(\n        #[serde(skip_serializing)]\n        #[source]\n        IoError<io::Error>,\n    ),\n    /// Peer reported a network name that does not match ours.\n    #[error(\"peer is on different network: {0}\")]\n    WrongNetwork(String),\n    /// Peer reported an incompatible version.\n    #[error(\"peer is running incompatible version: {0}\")]\n    IncompatibleVersion(ProtocolVersion),\n    /// Peer is using a different chainspec.\n    #[error(\"peer is using a different chainspec, hash: {0}\")]\n    WrongChainspecHash(Digest),\n    /// Peer should have included the chainspec hash in the handshake message,\n    /// but didn't.\n    #[error(\"peer did not include chainspec hash in the handshake when it was required\")]\n    MissingChainspecHash,\n    /// Peer did not send any message, or a non-handshake as its first message.\n    #[error(\"peer did not send handshake\")]\n    DidNotSendHandshake,\n    /// Failed to encode our handshake.\n    #[error(\"could not encode our handshake\")]\n    CouldNotEncodeOurHandshake(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// A background sender for our handshake panicked or crashed.\n    ///\n    /// This is usually a bug.\n    #[error(\"handshake sender crashed\")]\n    HandshakeSenderCrashed(\n        #[serde(skip_serializing)]\n        #[source]\n        tokio::task::JoinError,\n    ),\n    /// Could not deserialize the message that is supposed to contain the remotes handshake.\n    #[error(\"could not decode remote handshake message\")]\n    InvalidRemoteHandshakeMessage(\n        #[serde(skip_serializing)]\n        #[source]\n        io::Error,\n    ),\n    /// The peer sent a consensus certificate, but it was invalid.\n    #[error(\"invalid consensus certificate\")]\n    InvalidConsensusCertificate(\n        #[serde(skip_serializing)]\n        #[source]\n        crypto::Error,\n    ),\n    /// Failed to reunite handshake sink/stream.\n    ///\n    /// This is usually a bug.\n    #[error(\"handshake sink/stream could not be reunited\")]\n    FailedToReuniteHandshakeSinkAndStream,\n    /// Handshake not allowed (Isolated mode)\n    #[error(\"handshake not allowed (Isolated mode)\")]\n    HandshakeNotAllowed,\n}\n\n/// IO operation that can time out or close.\n#[derive(Debug, Error)]\npub enum IoError<E>\nwhere\n    E: error::Error + 'static,\n{\n    /// IO operation timed out.\n    #[error(\"io timeout\")]\n    Timeout,\n    /// Non-timeout IO error.\n    #[error(transparent)]\n    Error(#[from] E),\n    /// Unexpected close/end-of-file.\n    #[error(\"closed unexpectedly\")]\n    UnexpectedEof,\n}\n"
  },
  {
    "path": "node/src/components/network/event.rs",
    "content": "use std::{\n    fmt::{self, Debug, Display, Formatter},\n    io,\n    mem::size_of,\n    net::SocketAddr,\n    sync::Arc,\n};\n\nuse derive_more::From;\nuse futures::stream::{SplitSink, SplitStream};\nuse serde::Serialize;\nuse static_assertions::const_assert;\nuse tracing::Span;\n\nuse casper_types::PublicKey;\n\nuse super::{error::ConnectionError, FullTransport, GossipedAddress, Message, NodeId};\nuse crate::{\n    effect::{\n        announcements::PeerBehaviorAnnouncement,\n        requests::{NetworkInfoRequest, NetworkRequest},\n    },\n    protocol::Message as ProtocolMessage,\n};\n\nconst_assert!(size_of::<Event<ProtocolMessage>>() < 65);\n\n/// A network event.\n#[derive(Debug, From, Serialize)]\npub(crate) enum Event<P> {\n    Initialize,\n\n    /// The TLS handshake completed on the incoming connection.\n    IncomingConnection {\n        incoming: Box<IncomingConnection<P>>,\n        #[serde(skip)]\n        span: Span,\n    },\n\n    /// Received network message.\n    IncomingMessage {\n        peer_id: Box<NodeId>,\n        msg: Box<Message<P>>,\n        #[serde(skip)]\n        span: Span,\n    },\n\n    /// Incoming connection closed.\n    IncomingClosed {\n        #[serde(skip_serializing)]\n        result: io::Result<()>,\n        peer_id: Box<NodeId>,\n        peer_addr: SocketAddr,\n        #[serde(skip_serializing)]\n        span: Box<Span>,\n    },\n\n    /// A new outgoing connection was successfully established.\n    OutgoingConnection {\n        outgoing: Box<OutgoingConnection<P>>,\n        #[serde(skip_serializing)]\n        span: Span,\n    },\n\n    /// An established connection was terminated.\n    OutgoingDropped {\n        peer_id: Box<NodeId>,\n        peer_addr: SocketAddr,\n    },\n\n    /// Incoming network request.\n    #[from]\n    NetworkRequest {\n        #[serde(skip_serializing)]\n        req: Box<NetworkRequest<P>>,\n    },\n\n    /// Incoming network info request.\n    #[from]\n    NetworkInfoRequest {\n        #[serde(skip_serializing)]\n        req: Box<NetworkInfoRequest>,\n    },\n\n    /// The node should gossip its own public listening address.\n    GossipOurAddress,\n\n    /// We received a peer's public listening address via gossip.\n    PeerAddressReceived(GossipedAddress),\n\n    /// Housekeeping for the outgoing manager.\n    SweepOutgoing,\n\n    /// Blocklist announcement.\n    #[from]\n    BlocklistAnnouncement(PeerBehaviorAnnouncement),\n}\n\nimpl From<NetworkRequest<ProtocolMessage>> for Event<ProtocolMessage> {\n    fn from(req: NetworkRequest<ProtocolMessage>) -> Self {\n        Self::NetworkRequest { req: Box::new(req) }\n    }\n}\n\nimpl From<NetworkInfoRequest> for Event<ProtocolMessage> {\n    fn from(req: NetworkInfoRequest) -> Self {\n        Self::NetworkInfoRequest { req: Box::new(req) }\n    }\n}\n\nimpl<P: Display> Display for Event<P> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Initialize => write!(f, \"initialize\"),\n            Event::IncomingConnection { incoming, span: _ } => {\n                write!(f, \"incoming connection: {}\", incoming)\n            }\n            Event::IncomingMessage {\n                peer_id: node_id,\n                msg,\n                span: _,\n            } => write!(f, \"msg from {}: {}\", node_id, msg),\n            Event::IncomingClosed { peer_addr, .. } => {\n                write!(f, \"closed connection from {}\", peer_addr)\n            }\n            Event::OutgoingConnection { outgoing, span: _ } => {\n                write!(f, \"outgoing connection: {}\", outgoing)\n            }\n            Event::OutgoingDropped { peer_id, peer_addr } => {\n                write!(f, \"dropped outgoing {} {}\", peer_id, peer_addr)\n            }\n            Event::NetworkRequest { req } => write!(f, \"request: {}\", req),\n            Event::NetworkInfoRequest { req } => write!(f, \"request: {}\", req),\n            Event::GossipOurAddress => write!(f, \"gossip our address\"),\n            Event::PeerAddressReceived(gossiped_address) => {\n                write!(f, \"received gossiped peer address {}\", gossiped_address)\n            }\n            Event::BlocklistAnnouncement(ann) => {\n                write!(f, \"handling blocklist announcement: {}\", ann)\n            }\n            Event::SweepOutgoing => {\n                write!(f, \"sweep outgoing connections\")\n            }\n        }\n    }\n}\n\n/// Outcome of an incoming connection negotiation.\n#[derive(Debug, Serialize)]\npub(crate) enum IncomingConnection<P> {\n    /// The connection failed early on, before even a peer's [`NodeId`] could be determined.\n    FailedEarly {\n        /// Remote port the peer dialed us from.\n        peer_addr: SocketAddr,\n        /// Error causing the failure.\n        error: ConnectionError,\n    },\n    /// Connection failed after TLS was successfully established; thus we have a valid [`NodeId`].\n    Failed {\n        /// Remote port the peer dialed us from.\n        peer_addr: SocketAddr,\n        /// Peer's [`NodeId`].\n        peer_id: NodeId,\n        /// Error causing the failure.\n        error: ConnectionError,\n    },\n    /// Connection turned out to be a loopback connection.\n    Loopback,\n    /// Connection successfully established.\n    Established {\n        /// Remote port the peer dialed us from.\n        peer_addr: SocketAddr,\n        /// Public address advertised by the peer.\n        public_addr: SocketAddr,\n        /// Peer's [`NodeId`].\n        peer_id: NodeId,\n        /// The public key the peer is validating with, if any.\n        peer_consensus_public_key: Option<PublicKey>,\n        /// Stream of incoming messages. for incoming connections.\n        #[serde(skip_serializing)]\n        stream: SplitStream<FullTransport<P>>,\n    },\n}\n\nimpl<P> Display for IncomingConnection<P> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            IncomingConnection::FailedEarly { peer_addr, error } => {\n                write!(f, \"early failure from {}: {}\", peer_addr, error)\n            }\n            IncomingConnection::Failed {\n                peer_addr,\n                peer_id,\n                error,\n            } => write!(f, \"failure from {}/{}: {}\", peer_addr, peer_id, error),\n            IncomingConnection::Loopback => f.write_str(\"loopback\"),\n            IncomingConnection::Established {\n                peer_addr,\n                public_addr,\n                peer_id,\n                peer_consensus_public_key,\n                stream: _,\n            } => {\n                write!(\n                    f,\n                    \"connection established from {}/{}; public: {}\",\n                    peer_addr, peer_id, public_addr\n                )?;\n\n                if let Some(public_key) = peer_consensus_public_key {\n                    write!(f, \" [{}]\", public_key)\n                } else {\n                    f.write_str(\" [no validator id]\")\n                }\n            }\n        }\n    }\n}\n\n/// Outcome of an outgoing connection attempt.\n#[derive(Debug, Serialize)]\npub(crate) enum OutgoingConnection<P> {\n    /// The outgoing connection failed early on, before a peer's [`NodeId`] could be determined.\n    FailedEarly {\n        /// Address that was dialed.\n        peer_addr: SocketAddr,\n        /// Error causing the failure.\n        error: ConnectionError,\n    },\n    /// Connection failed after TLS was successfully established; thus we have a valid [`NodeId`].\n    Failed {\n        /// Address that was dialed.\n        peer_addr: SocketAddr,\n        /// Peer's [`NodeId`].\n        peer_id: NodeId,\n        /// Error causing the failure.\n        error: ConnectionError,\n    },\n    /// Connection turned out to be a loopback connection.\n    Loopback { peer_addr: SocketAddr },\n    /// Connection successfully established.\n    Established {\n        /// Address that was dialed.\n        peer_addr: SocketAddr,\n        /// Peer's [`NodeId`].\n        peer_id: NodeId,\n        /// The public key the peer is validating with, if any.\n        peer_consensus_public_key: Option<PublicKey>,\n        /// Sink for outgoing messages.\n        #[serde(skip_serializing)]\n        sink: SplitSink<FullTransport<P>, Arc<Message<P>>>,\n        /// Holds the information whether the remote node is syncing.\n        is_syncing: bool,\n    },\n}\n\nimpl<P> Display for OutgoingConnection<P> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            OutgoingConnection::FailedEarly { peer_addr, error } => {\n                write!(f, \"early failure to {}: {}\", peer_addr, error)\n            }\n            OutgoingConnection::Failed {\n                peer_addr,\n                peer_id,\n                error,\n            } => write!(f, \"failure to {}/{}: {}\", peer_addr, peer_id, error),\n            OutgoingConnection::Loopback { peer_addr } => write!(f, \"loopback to {}\", peer_addr),\n            OutgoingConnection::Established {\n                peer_addr,\n                peer_id,\n                peer_consensus_public_key,\n                sink: _,\n                is_syncing,\n            } => {\n                write!(\n                    f,\n                    \"connection established to {}/{}, is_syncing: {}\",\n                    peer_addr, peer_id, is_syncing\n                )?;\n\n                if let Some(public_key) = peer_consensus_public_key {\n                    write!(f, \" [{}]\", public_key)\n                } else {\n                    f.write_str(\" [no validator id]\")\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/gossiped_address.rs",
    "content": "use std::{\n    fmt::{self, Display, Formatter},\n    net::SocketAddr,\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    components::gossiper::{GossipItem, SmallGossipItem},\n    effect::GossipTarget,\n};\n\n/// Used to gossip our public listening address to peers.\n#[derive(\n    Copy, Clone, DataSize, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug,\n)]\npub struct GossipedAddress(SocketAddr);\n\nimpl GossipedAddress {\n    pub(super) fn new(address: SocketAddr) -> Self {\n        GossipedAddress(address)\n    }\n}\n\nimpl Display for GossipedAddress {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"gossiped-address {}\", self.0)\n    }\n}\n\nimpl GossipItem for GossipedAddress {\n    const ID_IS_COMPLETE_ITEM: bool = true;\n    const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = false;\n\n    type Id = GossipedAddress;\n\n    fn gossip_id(&self) -> Self::Id {\n        *self\n    }\n\n    fn gossip_target(&self) -> GossipTarget {\n        GossipTarget::All\n    }\n}\n\nimpl SmallGossipItem for GossipedAddress {\n    fn id_as_item(id: &Self::Id) -> &Self {\n        id\n    }\n}\n\nimpl From<GossipedAddress> for SocketAddr {\n    fn from(gossiped_address: GossipedAddress) -> Self {\n        gossiped_address.0\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n    use super::GossipedAddress;\n\n    impl LargestSpecimen for GossipedAddress {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            GossipedAddress::new(LargestSpecimen::largest_specimen(estimator, cache))\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/health.rs",
    "content": "//! Health-check state machine.\n//!\n//! Health checks perform periodic pings to remote peers to ensure the connection is still alive. It\n//! has somewhat complicated logic that is encoded in the `ConnectionHealth` struct, which has\n//! multiple implicit states.\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    time::{Duration, Instant},\n};\n\nuse datasize::DataSize;\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\n\nuse crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n/// Connection health information.\n///\n/// All data related to the ping/pong functionality used to verify a peer's networking liveness.\n#[derive(Clone, Copy, DataSize, Debug)]\npub(crate) struct ConnectionHealth {\n    /// The moment the connection was established.\n    pub(crate) connected_since: Instant,\n    /// The last ping that was requested to be sent.\n    pub(crate) last_ping_sent: Option<TaggedTimestamp>,\n    /// The most recent pong received.\n    pub(crate) last_pong_received: Option<TaggedTimestamp>,\n    /// Number of invalid pongs received, reset upon receiving a valid pong.\n    pub(crate) invalid_pong_count: u32,\n    /// Number of pings that timed out.\n    pub(crate) ping_timeouts: u32,\n}\n\n/// Health check configuration.\n#[derive(DataSize, Debug)]\npub(crate) struct HealthConfig {\n    /// How often to send a ping to ensure a connection is established.\n    ///\n    /// Determines how soon after connecting or a successful ping another ping is sent.\n    pub(crate) ping_interval: Duration,\n    /// Duration during which a ping must succeed to be considered successful.\n    pub(crate) ping_timeout: Duration,\n    /// Number of retries before giving up and disconnecting a peer due to too many failed pings.\n    pub(crate) ping_retries: u16,\n    /// How many spurious pongs to tolerate before banning a peer.\n    pub(crate) pong_limit: u32,\n}\n\n/// A timestamp with an associated nonce.\n#[derive(Clone, Copy, DataSize, Debug)]\npub(crate) struct TaggedTimestamp {\n    /// The actual timestamp.\n    timestamp: Instant,\n    /// The nonce of the timestamp.\n    nonce: Nonce,\n}\n\nimpl TaggedTimestamp {\n    /// Creates a new tagged timestamp with a random nonce.\n    pub(crate) fn new<R: Rng>(rng: &mut R, timestamp: Instant) -> Self {\n        Self {\n            timestamp,\n            nonce: rng.gen(),\n        }\n    }\n\n    /// Creates a new tagged timestamp from parts.\n    pub(crate) fn from_parts(timestamp: Instant, nonce: Nonce) -> Self {\n        TaggedTimestamp { nonce, timestamp }\n    }\n\n    /// Returns the actual timestamp.\n    pub(crate) fn timestamp(&self) -> Instant {\n        self.timestamp\n    }\n\n    /// Returns the nonce inside the timestamp.\n    pub(crate) fn nonce(self) -> Nonce {\n        self.nonce\n    }\n}\n\n/// A number-used-once, specifically one used in pings.\n// Note: This nonce used to be a `u32`, but that is too small - since we immediately disconnect when\n//       a duplicate ping is generated, a `u32` has a ~ 1/(2^32) chance of a consecutive collision.\n//\n//       If we ping every 5 seconds, this is a ~ 0.01% chance over a month, which is too high over\n//       thousands over nodes. At 64 bits, in theory the upper bound is 0.0000000002%, which is\n//       better (the period of the RNG used should be >> 64 bits).\n//\n//       While we do check for consecutive ping nonces being generated, we still like the lower\n//       collision chance for repeated pings being sent.\n#[derive(Clone, Copy, DataSize, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]\npub(crate) struct Nonce(u64);\n\nimpl Display for Nonce {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:016X}\", self.0)\n    }\n}\n\nimpl rand::distributions::Distribution<Nonce> for rand::distributions::Standard {\n    #[inline(always)]\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Nonce {\n        Nonce(rng.gen())\n    }\n}\n\nimpl ConnectionHealth {\n    /// Creates a new connection health instance, recording when the connection was established.\n    pub(crate) fn new(connected_since: Instant) -> Self {\n        Self {\n            connected_since,\n            last_ping_sent: None,\n            last_pong_received: None,\n            invalid_pong_count: 0,\n            ping_timeouts: 0,\n        }\n    }\n}\n\nimpl ConnectionHealth {\n    /// Calculate the round-trip time, if possible.\n    pub(crate) fn calc_rrt(&self) -> Option<Duration> {\n        match (self.last_ping_sent, self.last_pong_received) {\n            (Some(last_ping), Some(last_pong)) if last_ping.nonce == last_pong.nonce => {\n                Some(last_pong.timestamp.duration_since(last_ping.timestamp))\n            }\n            _ => None,\n        }\n    }\n\n    /// Check current health status.\n    ///\n    /// This function must be polled periodically and returns a potential action to be performed.\n    pub(crate) fn update_health<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        cfg: &HealthConfig,\n        now: Instant,\n    ) -> HealthCheckOutcome {\n        // Having received too many pongs should always result in a disconnect.\n        if self.invalid_pong_count > cfg.pong_limit {\n            return HealthCheckOutcome::GiveUp;\n        }\n\n        // Our honeymoon period is from first establishment of the connection until we send a ping.\n        if now.saturating_duration_since(self.connected_since) < cfg.ping_interval {\n            return HealthCheckOutcome::DoNothing;\n        }\n\n        let send_ping = match self.last_ping_sent {\n            Some(last_ping) => {\n                match self.last_pong_received {\n                    Some(prev_pong) if prev_pong.nonce() == last_ping.nonce() => {\n                        // Normal operation. The next ping should be sent in a regular interval\n                        // after receiving the last pong.\n                        now >= prev_pong.timestamp() + cfg.ping_interval\n                    }\n\n                    _ => {\n                        // No matching pong on record. Check if we need to timeout the ping.\n                        if now >= last_ping.timestamp() + cfg.ping_timeout {\n                            self.ping_timeouts += 1;\n                            // Clear the `last_ping_sent`, schedule another to be sent.\n                            self.last_ping_sent = None;\n                            true\n                        } else {\n                            false\n                        }\n                    }\n                }\n            }\n            None => true,\n        };\n\n        if send_ping {\n            if self.ping_timeouts > cfg.ping_retries as u32 {\n                // We have exceeded the timeouts and will give up as a result.\n                return HealthCheckOutcome::GiveUp;\n            }\n\n            let ping = loop {\n                let candidate = TaggedTimestamp::new(rng, now);\n\n                if let Some(prev) = self.last_ping_sent {\n                    if prev.nonce() == candidate.nonce() {\n                        // Ensure we don't produce consecutive pings.\n                        continue;\n                    }\n                }\n\n                break candidate;\n            };\n\n            self.last_ping_sent = Some(ping);\n            HealthCheckOutcome::SendPing(ping.nonce())\n        } else {\n            HealthCheckOutcome::DoNothing\n        }\n    }\n\n    /// Records a pong that has been sent.\n    ///\n    /// If `true`, the maximum number of pongs has been exceeded and the peer should be banned.\n    pub(crate) fn record_pong(&mut self, cfg: &HealthConfig, tt: TaggedTimestamp) -> bool {\n        let is_valid_pong = match self.last_ping_sent {\n            Some(last_ping) if last_ping.nonce() == tt.nonce => {\n                // Check if we already received a pong for this ping, which is a protocol violation.\n                if self\n                    .last_pong_received\n                    .map(|existing| existing.nonce() == tt.nonce)\n                    .unwrap_or(false)\n                {\n                    // Ping is a collsion, ban.\n                    return true;\n                }\n\n                if last_ping.timestamp() > tt.timestamp() {\n                    // Ping is from the past somehow, ignore it (probably a bug on our side).\n                    return false;\n                }\n\n                // The ping is valid if it is within the timeout period.\n                last_ping.timestamp() + cfg.ping_timeout >= tt.timestamp()\n            }\n            _ => {\n                // Either the nonce did not match, or the nonce mismatched.\n                false\n            }\n        };\n\n        if is_valid_pong {\n            // Our pong is valid, reset invalid and ping count, then record it.\n            self.invalid_pong_count = 0;\n            self.ping_timeouts = 0;\n            self.last_pong_received = Some(tt);\n            false\n        } else {\n            self.invalid_pong_count += 1;\n            // If we have exceeded the invalid pong limit, ban.\n            self.invalid_pong_count > cfg.pong_limit\n        }\n    }\n}\n\n/// The outcome of periodic health check.\n#[derive(Clone, Copy, Debug)]\n\npub(crate) enum HealthCheckOutcome {\n    /// Do nothing, as we recently took action.\n    DoNothing,\n    /// Send a ping with the given nonce.\n    SendPing(Nonce),\n    /// Give up on (i.e. terminate) the connection, as we exceeded the allowable ping limit.\n    GiveUp,\n}\n\nimpl LargestSpecimen for Nonce {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        Self(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::HashSet, time::Duration};\n\n    use assert_matches::assert_matches;\n    use rand::Rng;\n\n    use super::{ConnectionHealth, HealthCheckOutcome, HealthConfig};\n    use crate::{\n        components::network::health::TaggedTimestamp, testing::test_clock::TestClock,\n        types::NodeRng,\n    };\n\n    impl HealthConfig {\n        pub(crate) fn test_config() -> Self {\n            // Note: These values are assumed in tests, so do not change them.\n            HealthConfig {\n                ping_interval: Duration::from_secs(5),\n                ping_timeout: Duration::from_secs(2),\n                ping_retries: 3,\n                pong_limit: 6,\n            }\n        }\n    }\n\n    struct Fixtures {\n        clock: TestClock,\n        cfg: HealthConfig,\n        rng: NodeRng,\n        health: ConnectionHealth,\n    }\n\n    /// Sets up fixtures used in almost every test.\n    fn fixtures() -> Fixtures {\n        let clock = TestClock::new();\n        let cfg = HealthConfig::test_config();\n        let rng = crate::new_rng();\n\n        let health = ConnectionHealth::new(clock.now());\n\n        Fixtures {\n            clock,\n            cfg,\n            rng,\n            health,\n        }\n    }\n\n    #[test]\n    fn scenario_no_response() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // Repeated checks should not change the outcome.\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // After 4.9 seconds, we still do not send a ping.\n        clock.advance(Duration::from_millis(4900));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // At 5, we expect our first ping.\n        clock.advance(Duration::from_millis(100));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Checking health again should not result in another ping.\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        clock.advance(Duration::from_millis(100));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // After two seconds, we expect another ping to be sent, due to timeouts.\n        clock.advance(Duration::from_millis(2000));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // At this point, two pings have been sent. Configuration says to retry 3 times, so a total\n        // of five pings is expected.\n        clock.advance(Duration::from_millis(2000));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        clock.advance(Duration::from_millis(2000));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Finally, without receiving a ping at all, we give up.\n        clock.advance(Duration::from_millis(2000));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::GiveUp\n        );\n    }\n\n    #[test]\n    fn pings_use_different_nonces() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n        clock.advance(Duration::from_secs(5));\n\n        let mut nonce_set = HashSet::new();\n\n        nonce_set.insert(assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        ));\n        clock.advance(Duration::from_secs(2));\n\n        nonce_set.insert(assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        ));\n        clock.advance(Duration::from_secs(2));\n\n        nonce_set.insert(assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        ));\n        clock.advance(Duration::from_secs(2));\n\n        nonce_set.insert(assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        ));\n\n        // Since it is a set, we expect less than 4 items if there were any duplicates.\n        assert_eq!(nonce_set.len(), 4);\n    }\n\n    #[test]\n    fn scenario_all_working() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // At 5 seconds, we expect our first ping.\n        clock.advance(Duration::from_secs(5));\n\n        let nonce_1 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n\n        // Record a reply 500 ms later.\n        clock.advance(Duration::from_millis(500));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1)));\n\n        // Our next pong should be 5 seconds later, not 4.5.\n        clock.advance(Duration::from_millis(4500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n        clock.advance(Duration::from_millis(500));\n\n        let nonce_2 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n\n        // We test an edge case here where we use the same timestamp for the received pong.\n        clock.advance(Duration::from_millis(500));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_2)));\n\n        // Afterwards, no ping should be sent.\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // Do 1000 additional ping/pongs.\n        for _ in 0..1000 {\n            clock.advance(Duration::from_millis(5000));\n            let nonce = assert_matches!(\n                health.update_health(&mut rng, &cfg, clock.now()),\n                HealthCheckOutcome::SendPing(nonce) => nonce\n            );\n            assert_matches!(\n                health.update_health(&mut rng, &cfg, clock.now()),\n                HealthCheckOutcome::DoNothing\n            );\n\n            clock.advance(Duration::from_millis(250));\n            assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce)));\n\n            assert_matches!(\n                health.update_health(&mut rng, &cfg, clock.now()),\n                HealthCheckOutcome::DoNothing\n            );\n        }\n    }\n\n    #[test]\n    fn scenario_intermittent_failures() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        // We miss two pings initially, before recovering.\n        clock.advance(Duration::from_secs(5));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        clock.advance(Duration::from_secs(2));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        clock.advance(Duration::from_secs(2));\n\n        let nonce_1 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n\n        clock.advance(Duration::from_secs(1));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1)));\n\n        // We successfully \"recovered\", this should reset our ping counts. Miss three pings before\n        // successfully receiving a pong from 4th from here on out.\n        clock.advance(Duration::from_millis(5500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        let nonce_2 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n        clock.advance(Duration::from_millis(500));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_2)));\n\n        // This again should reset. We miss four more pings and are disconnected.\n        clock.advance(Duration::from_millis(5500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n        clock.advance(Duration::from_millis(2500));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::GiveUp\n        );\n    }\n\n    #[test]\n    fn ignores_unwanted_pongs() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        clock.advance(Duration::from_secs(5));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Make the `ConnectionHealth` receive some unasked pongs, without exceeding the unasked\n        // pong limit.\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n\n        // The retry delay is 2 seconds (instead of 5 for the next pong after success), so ensure\n        // we retry due to not having received the correct nonce in the pong.\n\n        clock.advance(Duration::from_secs(2));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n    }\n\n    #[test]\n    fn ensure_excessive_pongs_result_in_ban() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        clock.advance(Duration::from_secs(5));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Make the `ConnectionHealth` receive some unasked pongs, without exceeding the unasked\n        // pong limit.\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        // 6 unasked pongs is still okay.\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        assert!(health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        // 7 is too much.\n\n        // For good measure, we expect the health check to also output a disconnect instruction.\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::GiveUp\n        );\n    }\n\n    #[test]\n    fn time_reversal_does_not_crash_but_is_ignored() {\n        // Usually a pong for a given (or any) nonce should always be received with a timestamp\n        // equal or later than the ping sent out. Due to a programming error or a lucky attacker +\n        // scheduling issue, there is a very minute chance this can actually happen.\n        //\n        // In these cases, the pongs should just be discarded, not crashing due to a underflow in\n        // the comparison.\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        clock.advance(Duration::from_secs(5)); // t = 5\n\n        let nonce_1 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n\n        // Ignore the nonce if sent in the past (and also don't crash).\n        clock.rewind(Duration::from_secs(1)); // t = 4\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1)));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n\n        // Another ping should be sent out, since `nonce_1` was ignored.\n        clock.advance(Duration::from_secs(3)); // t = 7\n        let nonce_2 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n\n        // Nonce 2 will be received seemingly before the connection was even established.\n        clock.rewind(Duration::from_secs(3600));\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_2)));\n    }\n\n    #[test]\n    fn handles_missed_health_checks() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        clock.advance(Duration::from_secs(15));\n\n        // We initially exceed our scheduled first ping by 10 seconds. This will cause the ping to\n        // be sent right there and then.\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Going forward 1 second should not change anything.\n        clock.advance(Duration::from_secs(1));\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        // After another second, two seconds have passed since sending the first ping in total, so\n        // send another once.\n        clock.advance(Duration::from_secs(1));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // We have missed two pings total, now wait an hour. This will trigger the third ping.\n        clock.advance(Duration::from_secs(3600));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Fourth right after\n        clock.advance(Duration::from_secs(2));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        // Followed by a disconnect.\n        clock.advance(Duration::from_secs(2));\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::GiveUp\n        );\n    }\n\n    #[test]\n    fn ignores_time_travel() {\n        // Any call of the health update with timestamps that are provably from the past (i.e.\n        // before a recorded timestamp like a previous ping) should be ignored.\n\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        clock.advance(Duration::from_secs(5)); // t = 5\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n\n        clock.rewind(Duration::from_secs(3)); // t = 2\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n\n        clock.advance(Duration::from_secs(4)); // t = 6\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::DoNothing\n        );\n        clock.advance(Duration::from_secs(1)); // t = 7\n\n        assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(_)\n        );\n    }\n\n    #[test]\n    fn duplicate_pong_immediately_terminates() {\n        let Fixtures {\n            mut clock,\n            cfg,\n            mut rng,\n            mut health,\n        } = fixtures();\n\n        clock.advance(Duration::from_secs(5));\n        let nonce_1 = assert_matches!(\n            health.update_health(&mut rng, &cfg, clock.now()),\n            HealthCheckOutcome::SendPing(nonce) => nonce\n        );\n\n        clock.advance(Duration::from_secs(1));\n\n        // Recording the pong once is fine, but the second time should result in a ban.\n        assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1)));\n        assert!(health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1)));\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/identity.rs",
    "content": "use std::sync::Arc;\n\nuse datasize::DataSize;\nuse openssl::{\n    error::ErrorStack as OpenSslErrorStack,\n    pkey::{PKey, Private},\n    x509::X509,\n};\nuse thiserror::Error;\nuse tracing::warn;\n\nuse super::{Config, IdentityConfig};\nuse crate::{\n    tls::{self, LoadCertError, LoadSecretKeyError, TlsCert, ValidationError},\n    types::NodeId,\n    WithDir,\n};\n\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    #[error(\"could not generate TLS certificate: {0}\")]\n    CouldNotGenerateTlsCertificate(OpenSslErrorStack),\n    #[error(transparent)]\n    Validation(#[from] ValidationError),\n    #[error(transparent)]\n    LoadCert(#[from] LoadCertError),\n    #[error(transparent)]\n    LoadSecretKey(#[from] LoadSecretKeyError),\n}\n\n/// An ephemeral [PKey<Private>] and [TlsCert] that identifies this node\n#[derive(DataSize, Debug, Clone)]\npub(crate) struct Identity {\n    pub(super) secret_key: Arc<PKey<Private>>,\n    pub(super) tls_certificate: Arc<TlsCert>,\n    pub(super) network_ca: Option<Arc<X509>>,\n}\n\nimpl Identity {\n    fn new(secret_key: PKey<Private>, tls_certificate: TlsCert, network_ca: Option<X509>) -> Self {\n        Self {\n            secret_key: Arc::new(secret_key),\n            tls_certificate: Arc::new(tls_certificate),\n            network_ca: network_ca.map(Arc::new),\n        }\n    }\n\n    pub(crate) fn from_config(config: WithDir<Config>) -> Result<Self, Error> {\n        match &config.value().identity {\n            Some(identity) => Self::from_identity_config(identity),\n            None => Self::with_generated_certs(),\n        }\n    }\n\n    fn from_identity_config(identity_config: &IdentityConfig) -> Result<Self, Error> {\n        let not_yet_validated_x509_cert = tls::load_cert(&identity_config.tls_certificate)?;\n        let secret_key = tls::load_secret_key(&identity_config.secret_key)?;\n        let x509_cert = tls::tls_cert_from_x509(not_yet_validated_x509_cert)?;\n\n        // Load a ca certificate (if present)\n        let network_ca = tls::load_cert(&identity_config.ca_certificate)?;\n\n        // A quick sanity check for the loaded cert against supplied CA.\n        tls::validate_cert_with_authority(x509_cert.as_x509().clone(), &network_ca).map_err(\n            |error| {\n                warn!(%error, \"the given node certificate is not signed by the network CA\");\n                Error::Validation(error)\n            },\n        )?;\n\n        Ok(Identity::new(secret_key, x509_cert, Some(network_ca)))\n    }\n\n    pub(crate) fn with_generated_certs() -> Result<Self, Error> {\n        let (not_yet_validated_x509_cert, secret_key) =\n            tls::generate_node_cert().map_err(Error::CouldNotGenerateTlsCertificate)?;\n        let tls_certificate = tls::validate_self_signed_cert(not_yet_validated_x509_cert)?;\n        Ok(Identity::new(secret_key, tls_certificate, None))\n    }\n}\n\nimpl From<&Identity> for NodeId {\n    fn from(identity: &Identity) -> Self {\n        NodeId::from(identity.tls_certificate.public_key_fingerprint())\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/insights.rs",
    "content": "//! Networking debug insights.\n//!\n//! The `insights` module exposes some internals of the networking component, mainly for inspection\n//! through the diagnostics console. It should specifically not be used for any business logic and\n//! affordances made in other corners of the `network` module to allow collecting these\n//! insights should neither be abused just because they are available.\n\nuse std::{\n    collections::{BTreeSet, HashSet},\n    fmt::{self, Debug, Display, Formatter},\n    net::SocketAddr,\n    sync::atomic::Ordering,\n    time::{Duration, SystemTime},\n};\n\nuse casper_types::{DisplayIter, EraId, PublicKey};\nuse serde::Serialize;\n\nuse crate::{\n    types::NodeId,\n    utils::{opt_display::OptDisplay, TimeAnchor},\n};\n\nuse super::{\n    error::ConnectionError, outgoing::OutgoingState, symmetry::ConnectionSymmetry, Network,\n    OutgoingHandle, Payload,\n};\n\n/// A collection of insights into the active networking component.\n#[derive(Debug, Serialize)]\npub(crate) struct NetworkInsights {\n    /// The nodes current ID.\n    our_id: NodeId,\n    /// Whether or not a network CA was present (is a private network).\n    network_ca: bool,\n    /// The public address of the node.\n    public_addr: Option<SocketAddr>,\n    /// Whether or not the node is syncing.\n    is_syncing: bool,\n    /// The active era as seen by the networking component.\n    net_active_era: EraId,\n    /// The list of node IDs that are being preferred due to being active validators.\n    privileged_active_outgoing_nodes: Option<HashSet<PublicKey>>,\n    /// The list of node IDs that are being preferred due to being upcoming validators.\n    privileged_upcoming_outgoing_nodes: Option<HashSet<PublicKey>>,\n    /// The amount of bandwidth allowance currently buffered, ready to be spent.\n    unspent_bandwidth_allowance_bytes: Option<i64>,\n    /// Map of outgoing connections, along with their current state.\n    outgoing_connections: Vec<(SocketAddr, OutgoingInsight)>,\n    /// Map of incoming connections.\n    connection_symmetries: Vec<(NodeId, ConnectionSymmetryInsight)>,\n}\n\n/// Insight into an outgoing connection.\n#[derive(Debug, Serialize)]\nstruct OutgoingInsight {\n    /// Whether or not the address is marked unforgettable.\n    unforgettable: bool,\n    /// The current connection state.\n    state: OutgoingStateInsight,\n}\n\n/// The state of an outgoing connection, reduced to exportable insights.\n#[derive(Debug, Serialize)]\nenum OutgoingStateInsight {\n    Connecting {\n        failures_so_far: u8,\n        since: SystemTime,\n    },\n    Waiting {\n        failures_so_far: u8,\n        error: Option<String>,\n        last_failure: SystemTime,\n    },\n    Connected {\n        peer_id: NodeId,\n        peer_addr: SocketAddr,\n        last_ping_sent: Option<SystemTime>,\n        last_pong_received: Option<SystemTime>,\n        invalid_pong_count: u32,\n        rtt: Option<Duration>,\n    },\n    Blocked {\n        since: SystemTime,\n        justification: String,\n        until: SystemTime,\n    },\n    Loopback,\n}\n\nfn time_delta(now: SystemTime, then: SystemTime) -> impl Display {\n    OptDisplay::new(\n        now.duration_since(then)\n            .map(humantime::format_duration)\n            .ok(),\n        \"err\",\n    )\n}\n\nimpl OutgoingStateInsight {\n    /// Constructs a new outgoing state insight from a given outgoing state.\n    fn from_outgoing_state<P>(\n        anchor: &TimeAnchor,\n        state: &OutgoingState<OutgoingHandle<P>, ConnectionError>,\n    ) -> Self {\n        match state {\n            OutgoingState::Connecting {\n                failures_so_far,\n                since,\n            } => OutgoingStateInsight::Connecting {\n                failures_so_far: *failures_so_far,\n                since: anchor.convert(*since),\n            },\n            OutgoingState::Waiting {\n                failures_so_far,\n                error,\n                last_failure,\n            } => OutgoingStateInsight::Waiting {\n                failures_so_far: *failures_so_far,\n                error: error.as_ref().map(ToString::to_string),\n                last_failure: anchor.convert(*last_failure),\n            },\n            OutgoingState::Connected {\n                peer_id,\n                handle,\n                health,\n            } => OutgoingStateInsight::Connected {\n                peer_id: *peer_id,\n                peer_addr: handle.peer_addr,\n                last_ping_sent: health\n                    .last_ping_sent\n                    .map(|tt| anchor.convert(tt.timestamp())),\n                last_pong_received: health\n                    .last_pong_received\n                    .map(|tt| anchor.convert(tt.timestamp())),\n                invalid_pong_count: health.invalid_pong_count,\n                rtt: health.calc_rrt(),\n            },\n            OutgoingState::Blocked {\n                since,\n                justification,\n                until,\n            } => OutgoingStateInsight::Blocked {\n                since: anchor.convert(*since),\n                justification: justification.to_string(),\n                until: anchor.convert(*until),\n            },\n            OutgoingState::Loopback => OutgoingStateInsight::Loopback,\n        }\n    }\n\n    /// Formats the outgoing state insight with times relative to a given timestamp.\n    fn fmt_time_relative(&self, now: SystemTime, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            OutgoingStateInsight::Connecting {\n                failures_so_far,\n                since,\n            } => write!(\n                f,\n                \"connecting (fails: {}), since {}\",\n                failures_so_far,\n                time_delta(now, *since)\n            ),\n            OutgoingStateInsight::Waiting {\n                failures_so_far,\n                error,\n                last_failure,\n            } => write!(\n                f,\n                \"waiting (fails: {}, last error: {}), since {}\",\n                failures_so_far,\n                OptDisplay::new(error.as_ref(), \"none\"),\n                time_delta(now, *last_failure)\n            ),\n            OutgoingStateInsight::Connected {\n                peer_id,\n                peer_addr,\n                last_ping_sent,\n                last_pong_received,\n                invalid_pong_count,\n                rtt,\n            } => {\n                let rtt_ms = rtt.map(|duration| duration.as_millis());\n\n                write!(\n                    f,\n                    \"connected -> {} @ {} (rtt {}, invalid {}, last ping/pong {}/{})\",\n                    peer_id,\n                    peer_addr,\n                    OptDisplay::new(rtt_ms, \"?\"),\n                    invalid_pong_count,\n                    OptDisplay::new(last_ping_sent.map(|t| time_delta(now, t)), \"-\"),\n                    OptDisplay::new(last_pong_received.map(|t| time_delta(now, t)), \"-\"),\n                )\n            }\n            OutgoingStateInsight::Blocked {\n                since,\n                justification,\n                until,\n            } => {\n                write!(\n                    f,\n                    \"blocked since {}, until {}: {}\",\n                    time_delta(now, *since),\n                    time_delta(now, *until),\n                    justification\n                )\n            }\n            OutgoingStateInsight::Loopback => f.write_str(\"loopback\"),\n        }\n    }\n}\n\n/// Describes whether a connection is uni- or bi-directional.\n#[derive(Debug, Serialize)]\npub(super) enum ConnectionSymmetryInsight {\n    IncomingOnly {\n        since: SystemTime,\n        peer_addrs: BTreeSet<SocketAddr>,\n    },\n    OutgoingOnly {\n        since: SystemTime,\n    },\n    Symmetric {\n        peer_addrs: BTreeSet<SocketAddr>,\n    },\n    Gone,\n}\n\nimpl ConnectionSymmetryInsight {\n    /// Creates a new insight from a given connection symmetry.\n    fn from_connection_symmetry(anchor: &TimeAnchor, sym: &ConnectionSymmetry) -> Self {\n        match sym {\n            ConnectionSymmetry::IncomingOnly { since, peer_addrs } => {\n                ConnectionSymmetryInsight::IncomingOnly {\n                    since: anchor.convert(*since),\n                    peer_addrs: peer_addrs.clone(),\n                }\n            }\n            ConnectionSymmetry::OutgoingOnly { since } => ConnectionSymmetryInsight::OutgoingOnly {\n                since: anchor.convert(*since),\n            },\n            ConnectionSymmetry::Symmetric { peer_addrs } => ConnectionSymmetryInsight::Symmetric {\n                peer_addrs: peer_addrs.clone(),\n            },\n            ConnectionSymmetry::Gone => ConnectionSymmetryInsight::Gone,\n        }\n    }\n\n    /// Formats the connection symmetry insight with times relative to a given timestamp.\n    fn fmt_time_relative(&self, now: SystemTime, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ConnectionSymmetryInsight::IncomingOnly { since, peer_addrs } => write!(\n                f,\n                \"<- {} (since {})\",\n                DisplayIter::new(peer_addrs.iter()),\n                time_delta(now, *since)\n            ),\n            ConnectionSymmetryInsight::OutgoingOnly { since } => {\n                write!(f, \"-> (since {})\", time_delta(now, *since))\n            }\n            ConnectionSymmetryInsight::Symmetric { peer_addrs } => {\n                write!(f, \"<> {}\", DisplayIter::new(peer_addrs.iter()))\n            }\n            ConnectionSymmetryInsight::Gone => f.write_str(\"gone\"),\n        }\n    }\n}\n\nimpl NetworkInsights {\n    /// Collect networking insights from a given networking component.\n    pub(super) fn collect_from_component<REv, P>(net: &Network<REv, P>) -> Self\n    where\n        P: Payload,\n    {\n        // Since we are at the top level of the component, we gain access to inner values of the\n        // respective structs. We abuse this to gain debugging insights. Note: If limiters are no\n        // longer a `trait`, the trait methods can be removed as well in favor of direct access.\n        let (privileged_active_outgoing_nodes, privileged_upcoming_outgoing_nodes) = net\n            .outgoing_limiter\n            .debug_inspect_validators(&net.active_era)\n            .map(|(a, b)| (Some(a), Some(b)))\n            .unwrap_or_default();\n\n        let anchor = TimeAnchor::now();\n\n        let outgoing_connections = net\n            .outgoing_manager\n            .outgoing\n            .iter()\n            .map(|(addr, outgoing)| {\n                let state = OutgoingStateInsight::from_outgoing_state(&anchor, &outgoing.state);\n                (\n                    *addr,\n                    OutgoingInsight {\n                        unforgettable: outgoing.is_unforgettable,\n                        state,\n                    },\n                )\n            })\n            .collect();\n\n        let connection_symmetries = net\n            .connection_symmetries\n            .iter()\n            .map(|(id, sym)| {\n                (\n                    *id,\n                    ConnectionSymmetryInsight::from_connection_symmetry(&anchor, sym),\n                )\n            })\n            .collect();\n\n        NetworkInsights {\n            our_id: net.context.our_id(),\n            network_ca: net.context.network_ca().is_some(),\n            public_addr: net.context.public_addr(),\n            is_syncing: net.context.is_syncing().load(Ordering::Relaxed),\n            net_active_era: net.active_era,\n            privileged_active_outgoing_nodes,\n            privileged_upcoming_outgoing_nodes,\n            unspent_bandwidth_allowance_bytes: net\n                .outgoing_limiter\n                .debug_inspect_unspent_allowance(),\n            outgoing_connections,\n            connection_symmetries,\n        }\n    }\n}\n\nimpl Display for NetworkInsights {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let now = SystemTime::now();\n\n        if !self.network_ca {\n            f.write_str(\"Public \")?;\n        } else {\n            f.write_str(\"Private \")?;\n        }\n        writeln!(\n            f,\n            \"node {} @ {:?} (syncing: {})\",\n            self.our_id, self.public_addr, self.is_syncing\n        )?;\n        writeln!(\n            f,\n            \"active era: {} unspent_bandwidth_allowance_bytes: {}\",\n            self.net_active_era,\n            OptDisplay::new(self.unspent_bandwidth_allowance_bytes, \"inactive\"),\n        )?;\n        let active = self\n            .privileged_active_outgoing_nodes\n            .as_ref()\n            .map(HashSet::iter)\n            .map(DisplayIter::new);\n        writeln!(\n            f,\n            \"privileged active: {}\",\n            OptDisplay::new(active, \"inactive\")\n        )?;\n        let upcoming = self\n            .privileged_upcoming_outgoing_nodes\n            .as_ref()\n            .map(HashSet::iter)\n            .map(DisplayIter::new);\n        writeln!(\n            f,\n            \"privileged upcoming: {}\",\n            OptDisplay::new(upcoming, \"inactive\")\n        )?;\n\n        f.write_str(\"outgoing connections:\\n\")?;\n        writeln!(f, \"address                  uf     state\")?;\n        for (addr, outgoing) in &self.outgoing_connections {\n            write!(f, \"{:23}  {:5}  \", addr, outgoing.unforgettable,)?;\n            outgoing.state.fmt_time_relative(now, f)?;\n            f.write_str(\"\\n\")?;\n        }\n\n        f.write_str(\"connection symmetries:\\n\")?;\n        writeln!(f, \"peer ID         symmetry\")?;\n        for (peer_id, symmetry) in &self.connection_symmetries {\n            write!(f, \"{:10}  \", peer_id)?;\n            symmetry.fmt_time_relative(now, f)?;\n            f.write_str(\"\\n\")?;\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/limiter.rs",
    "content": "//! Resource limiters\n//!\n//! Resource limiters restrict the usable amount of a resource through slowing down the request rate\n//! by making each user request an allowance first.\n\nuse std::{\n    collections::{HashMap, HashSet},\n    sync::{Arc, RwLock},\n    time::{Duration, Instant},\n};\n\nuse prometheus::Counter;\nuse tokio::{runtime::Handle, sync::Mutex, task};\nuse tracing::{error, trace, warn};\n\nuse casper_types::{EraId, PublicKey};\n\nuse crate::types::{NodeId, ValidatorMatrix};\n\n/// Amount of resource allowed to buffer in `Limiter`.\nconst STORED_BUFFER_SECS: Duration = Duration::from_secs(2);\n\n/// A limiter dividing resources into two classes based on their validator status.\n///\n/// Any consumer of a specific resource is expected to call `create_handle` for every peer and use\n/// the returned handle to request a access to a resource.\n///\n/// Imposes a limit on non-validator resources while not limiting active validator resources at all.\n#[derive(Debug)]\npub(super) struct Limiter {\n    /// Shared data across all handles.\n    data: Arc<LimiterData>,\n    /// Set of active and upcoming validators shared across all handles.\n    validator_matrix: ValidatorMatrix,\n}\n\nimpl Limiter {\n    /// Creates a new class based limiter.\n    ///\n    /// Starts the background worker task as well.\n    pub(super) fn new(\n        resources_per_second: u32,\n        wait_time_sec: Counter,\n        validator_matrix: ValidatorMatrix,\n    ) -> Self {\n        Limiter {\n            data: Arc::new(LimiterData::new(resources_per_second, wait_time_sec)),\n            validator_matrix,\n        }\n    }\n\n    /// Create a handle for a connection using the given peer and optional consensus key.\n    pub(super) fn create_handle(\n        &self,\n        peer_id: NodeId,\n        consensus_key: Option<PublicKey>,\n    ) -> LimiterHandle {\n        if let Some(public_key) = consensus_key.as_ref().cloned() {\n            match self.data.connected_validators.write() {\n                Ok(mut connected_validators) => {\n                    let _ = connected_validators.insert(peer_id, public_key);\n                }\n                Err(_) => {\n                    error!(\n                        \"could not update connected validator data set of limiter, lock poisoned\"\n                    );\n                }\n            }\n        }\n        LimiterHandle {\n            data: self.data.clone(),\n            validator_matrix: self.validator_matrix.clone(),\n            consumer_id: ConsumerId {\n                _peer_id: peer_id,\n                consensus_key,\n            },\n        }\n    }\n\n    pub(super) fn remove_connected_validator(&self, peer_id: &NodeId) {\n        match self.data.connected_validators.write() {\n            Ok(mut connected_validators) => {\n                let _ = connected_validators.remove(peer_id);\n            }\n            Err(_) => {\n                error!(\n                    \"could not remove connected validator from data set of limiter, lock poisoned\"\n                );\n            }\n        }\n    }\n\n    pub(super) fn is_validator_in_era(&self, era: EraId, peer_id: &NodeId) -> bool {\n        let public_key = match self.data.connected_validators.read() {\n            Ok(connected_validators) => match connected_validators.get(peer_id) {\n                None => return false,\n                Some(public_key) => public_key.clone(),\n            },\n            Err(_) => {\n                error!(\"could not read from connected_validators of limiter, lock poisoned\");\n                return false;\n            }\n        };\n\n        match self.validator_matrix.is_validator_in_era(era, &public_key) {\n            None => {\n                warn!(%era, \"missing validator weights for given era\");\n                false\n            }\n            Some(is_validator) => is_validator,\n        }\n    }\n\n    pub(super) fn debug_inspect_unspent_allowance(&self) -> Option<i64> {\n        Some(task::block_in_place(move || {\n            Handle::current().block_on(async move { self.data.resources.lock().await.available })\n        }))\n    }\n\n    pub(super) fn debug_inspect_validators(\n        &self,\n        current_era: &EraId,\n    ) -> Option<(HashSet<PublicKey>, HashSet<PublicKey>)> {\n        Some((\n            self.validator_keys_for_era(current_era),\n            self.validator_keys_for_era(&current_era.successor()),\n        ))\n    }\n\n    fn validator_keys_for_era(&self, era: &EraId) -> HashSet<PublicKey> {\n        self.validator_matrix\n            .validator_weights(*era)\n            .map(|validator_weights| validator_weights.validator_public_keys().cloned().collect())\n            .unwrap_or_default()\n    }\n}\n\n/// The limiter's state.\n#[derive(Debug)]\nstruct LimiterData {\n    /// Number of resource units to allow for non-validators per second.\n    resources_per_second: u32,\n    /// A mapping from node IDs to public keys of validators to which we have an outgoing\n    /// connection.\n    connected_validators: RwLock<HashMap<NodeId, PublicKey>>,\n    /// Information about available resources.\n    resources: Mutex<ResourceData>,\n    /// Total time spent waiting.\n    wait_time_sec: Counter,\n}\n\n/// Resource data.\n#[derive(Debug)]\nstruct ResourceData {\n    /// How many resource units are buffered.\n    ///\n    /// May go negative in the case of a deficit.\n    available: i64,\n    /// Last time resource data was refilled.\n    last_refill: Instant,\n}\n\nimpl LimiterData {\n    /// Creates a new set of class based limiter data.\n    ///\n    /// Initial resources will be initialized to 0, with the last refill set to the current time.\n    fn new(resources_per_second: u32, wait_time_sec: Counter) -> Self {\n        LimiterData {\n            resources_per_second,\n            connected_validators: Default::default(),\n            resources: Mutex::new(ResourceData {\n                available: 0,\n                last_refill: Instant::now(),\n            }),\n            wait_time_sec,\n        }\n    }\n}\n\n/// Peer class for the `Limiter`.\nenum PeerClass {\n    /// A validator.\n    Validator,\n    /// Unclassified/low-priority peer.\n    NonValidator,\n}\n\n/// A per-peer handle for `Limiter`.\n#[derive(Debug)]\npub(super) struct LimiterHandle {\n    /// Data shared between handles and limiter.\n    data: Arc<LimiterData>,\n    /// Set of active and upcoming validators.\n    validator_matrix: ValidatorMatrix,\n    /// Consumer ID for the sender holding this handle.\n    consumer_id: ConsumerId,\n}\n\nimpl LimiterHandle {\n    /// Waits until the requester is allocated `amount` additional resources.\n    pub(super) async fn request_allowance(&self, amount: u32) {\n        // As a first step, determine the peer class by checking if our id is in the validator set.\n\n        if self.validator_matrix.is_empty() {\n            // It is likely that we have not been initialized, thus no node is getting the\n            // reserved resources. In this case, do not limit at all.\n            trace!(\"empty set of validators, not limiting resources at all\");\n\n            return;\n        }\n\n        let peer_class = if let Some(ref public_key) = self.consumer_id.consensus_key {\n            if self\n                .validator_matrix\n                .is_active_or_upcoming_validator(public_key)\n            {\n                PeerClass::Validator\n            } else {\n                PeerClass::NonValidator\n            }\n        } else {\n            PeerClass::NonValidator\n        };\n\n        match peer_class {\n            PeerClass::Validator => {\n                // No limit imposed on validators.\n            }\n            PeerClass::NonValidator => {\n                if self.data.resources_per_second == 0 {\n                    return;\n                }\n\n                let max_stored_resource = ((self.data.resources_per_second as f64)\n                    * STORED_BUFFER_SECS.as_secs_f64())\n                    as u32;\n\n                // We are a low-priority sender. Obtain a lock on the resources and wait an\n                // appropriate amount of time to fill them up.\n                {\n                    let mut resources = self.data.resources.lock().await;\n\n                    while resources.available < 0 {\n                        // Determine time delta since last refill.\n                        let now = Instant::now();\n                        let elapsed = now - resources.last_refill;\n                        resources.last_refill = now;\n\n                        // Add appropriate amount of resources, capped at `max_stored_bytes`. We\n                        // are still maintaining the lock here to avoid issues with other\n                        // low-priority requestors.\n                        resources.available += ((elapsed.as_nanos()\n                            * self.data.resources_per_second as u128)\n                            / 1_000_000_000) as i64;\n                        resources.available = resources.available.min(max_stored_resource as i64);\n\n                        // If we do not have enough resources available, sleep until we do.\n                        if resources.available < 0 {\n                            let estimated_time_remaining = Duration::from_millis(\n                                (-resources.available) as u64 * 1000\n                                    / self.data.resources_per_second as u64,\n                            );\n\n                            // Note: This sleep call is the reason we are using a tokio mutex\n                            //       instead of a regular `std` one, as we are holding it across the\n                            //       await point here.\n                            tokio::time::sleep(estimated_time_remaining).await;\n                            self.data\n                                .wait_time_sec\n                                .inc_by(estimated_time_remaining.as_secs_f64());\n                        }\n                    }\n\n                    // Subtract the amount. If available resources go negative as a result, it\n                    // is the next sender's problem.\n                    resources.available -= amount as i64;\n                }\n            }\n        }\n    }\n}\n\n/// An identity for a consumer.\n#[derive(Debug)]\nstruct ConsumerId {\n    /// The peer's ID.\n    _peer_id: NodeId,\n    /// The remote node's public consensus key.\n    consensus_key: Option<PublicKey>,\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{sync::Arc, time::Duration};\n\n    use casper_types::{ChainNameDigest, EraId, SecretKey};\n    use num_rational::Ratio;\n    use prometheus::Counter;\n    use tokio::time::Instant;\n\n    use super::{Limiter, NodeId, PublicKey};\n    use crate::{testing::init_logging, types::ValidatorMatrix};\n\n    /// Something that happens almost immediately, with some allowance for test jitter.\n    const SHORT_TIME: Duration = Duration::from_millis(250);\n\n    /// Creates a new counter for testing.\n    fn new_wait_time_sec() -> Counter {\n        Counter::new(\"test_time_waiting\", \"wait time counter used in tests\")\n            .expect(\"could not create new counter\")\n    }\n\n    #[tokio::test]\n    async fn unlimited_limiter_is_unlimited() {\n        let mut rng = crate::new_rng();\n\n        // We insert one unrelated active validator to avoid triggering the automatic disabling of\n        // the limiter in case there are no active validators.\n        let validator_matrix =\n            ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(&mut rng)));\n        let limiter = Limiter::new(0, new_wait_time_sec(), validator_matrix);\n\n        // Try with non-validators or unknown nodes.\n        let handles = vec![\n            limiter.create_handle(NodeId::random(&mut rng), Some(PublicKey::random(&mut rng))),\n            limiter.create_handle(NodeId::random(&mut rng), None),\n        ];\n\n        for handle in handles {\n            let start = Instant::now();\n            handle.request_allowance(0).await;\n            handle.request_allowance(u32::MAX).await;\n            handle.request_allowance(1).await;\n            assert!(start.elapsed() < SHORT_TIME);\n        }\n    }\n\n    #[tokio::test]\n    async fn active_validator_is_unlimited() {\n        let mut rng = crate::new_rng();\n\n        let secret_key = SecretKey::random(&mut rng);\n        let consensus_key = PublicKey::from(&secret_key);\n        let validator_matrix = ValidatorMatrix::new_with_validator(Arc::new(secret_key));\n        let limiter = Limiter::new(1_000, new_wait_time_sec(), validator_matrix);\n\n        let handle = limiter.create_handle(NodeId::random(&mut rng), Some(consensus_key));\n\n        let start = Instant::now();\n        handle.request_allowance(0).await;\n        handle.request_allowance(u32::MAX).await;\n        handle.request_allowance(1).await;\n        assert!(start.elapsed() < SHORT_TIME);\n    }\n\n    #[tokio::test]\n    async fn inactive_validator_limited() {\n        let rng = &mut crate::new_rng();\n\n        // We insert one unrelated active validator to avoid triggering the automatic disabling of\n        // the limiter in case there are no active validators.\n        let validator_matrix =\n            ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(rng)));\n        let peers = [\n            (NodeId::random(rng), Some(PublicKey::random(rng))),\n            (NodeId::random(rng), None),\n        ];\n\n        let limiter = Limiter::new(1_000, new_wait_time_sec(), validator_matrix);\n\n        for (peer, maybe_public_key) in peers {\n            let start = Instant::now();\n            let handle = limiter.create_handle(peer, maybe_public_key);\n\n            // Send 9_0001 bytes, we expect this to take roughly 15 seconds.\n            handle.request_allowance(1000).await;\n            handle.request_allowance(1000).await;\n            handle.request_allowance(1000).await;\n            handle.request_allowance(2000).await;\n            handle.request_allowance(4000).await;\n            handle.request_allowance(1).await;\n            let elapsed = start.elapsed();\n\n            assert!(\n                elapsed >= Duration::from_secs(9),\n                \"{}s\",\n                elapsed.as_secs_f64()\n            );\n            assert!(\n                elapsed <= Duration::from_secs(10),\n                \"{}s\",\n                elapsed.as_secs_f64()\n            );\n        }\n    }\n\n    #[tokio::test]\n    async fn nonvalidators_parallel_limited() {\n        let mut rng = crate::new_rng();\n\n        let wait_metric = new_wait_time_sec();\n\n        // We insert one unrelated active validator to avoid triggering the automatic disabling of\n        // the limiter in case there are no active validators.\n        let validator_matrix =\n            ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(&mut rng)));\n        let limiter = Limiter::new(1_000, wait_metric.clone(), validator_matrix);\n\n        let start = Instant::now();\n\n        // Parallel test, 5 non-validators sharing 1000 bytes per second. Each sends 1001 bytes, so\n        // total time is expected to be just over 5 seconds.\n        let join_handles = (0..5)\n            .map(|_| {\n                limiter.create_handle(NodeId::random(&mut rng), Some(PublicKey::random(&mut rng)))\n            })\n            .map(|handle| {\n                tokio::spawn(async move {\n                    handle.request_allowance(500).await;\n                    handle.request_allowance(150).await;\n                    handle.request_allowance(350).await;\n                    handle.request_allowance(1).await;\n                })\n            });\n\n        for join_handle in join_handles {\n            join_handle.await.expect(\"could not join task\");\n        }\n\n        let elapsed = start.elapsed();\n        assert!(elapsed >= Duration::from_secs(5));\n        assert!(elapsed <= Duration::from_secs(6));\n\n        // Ensure metrics recorded the correct number of seconds.\n        assert!(\n            wait_metric.get() <= 6.0,\n            \"wait metric is too large: {}\",\n            wait_metric.get()\n        );\n\n        // Note: The limiting will not apply to all data, so it should be slightly below 5 seconds.\n        assert!(\n            wait_metric.get() >= 4.5,\n            \"wait metric is too small: {}\",\n            wait_metric.get()\n        );\n    }\n\n    #[tokio::test]\n    async fn inactive_validators_unlimited_when_no_validators_known() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n\n        let secret_key = SecretKey::random(&mut rng);\n        let consensus_key = PublicKey::from(&secret_key);\n        let wait_metric = new_wait_time_sec();\n        let limiter = Limiter::new(\n            1_000,\n            wait_metric.clone(),\n            ValidatorMatrix::new(\n                Ratio::new(1, 3),\n                ChainNameDigest::from_chain_name(\"casper-example\"),\n                None,\n                EraId::from(0),\n                Arc::new(secret_key),\n                consensus_key.clone(),\n                2,\n                3,\n            ),\n        );\n\n        // Try with non-validators or unknown nodes.\n        let handles = vec![\n            limiter.create_handle(NodeId::random(&mut rng), Some(PublicKey::random(&mut rng))),\n            limiter.create_handle(NodeId::random(&mut rng), None),\n        ];\n\n        for handle in handles {\n            let start = Instant::now();\n\n            // Send 9_0001 bytes, should now finish instantly.\n            handle.request_allowance(1000).await;\n            handle.request_allowance(1000).await;\n            handle.request_allowance(1000).await;\n            handle.request_allowance(2000).await;\n            handle.request_allowance(4000).await;\n            handle.request_allowance(1).await;\n            assert!(start.elapsed() < SHORT_TIME);\n        }\n\n        // There should have been no time spent waiting.\n        assert!(\n            wait_metric.get() < SHORT_TIME.as_secs_f64(),\n            \"wait_metric is too large: {}\",\n            wait_metric.get()\n        );\n    }\n\n    /// Regression test for #2929.\n    #[tokio::test]\n    async fn throttling_of_non_validators_does_not_affect_validators() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n\n        let secret_key = SecretKey::random(&mut rng);\n        let consensus_key = PublicKey::from(&secret_key);\n        let validator_matrix = ValidatorMatrix::new_with_validator(Arc::new(secret_key));\n        let limiter = Limiter::new(1_000, new_wait_time_sec(), validator_matrix);\n\n        let non_validator_handle = limiter.create_handle(NodeId::random(&mut rng), None);\n        let validator_handle = limiter.create_handle(NodeId::random(&mut rng), Some(consensus_key));\n\n        // We request a large resource at once using a non-validator handle. At the same time,\n        // validator requests should be still served, even while waiting for the long-delayed\n        // request still blocking.\n        let start = Instant::now();\n        let background_nv_request = tokio::spawn(async move {\n            non_validator_handle.request_allowance(5000).await;\n            non_validator_handle.request_allowance(5000).await;\n\n            Instant::now()\n        });\n\n        // Allow for a little bit of time to pass to ensure the background task is running.\n        tokio::time::sleep(Duration::from_secs(1)).await;\n\n        validator_handle.request_allowance(10000).await;\n        validator_handle.request_allowance(10000).await;\n\n        let v_finished = Instant::now();\n\n        let nv_finished = background_nv_request\n            .await\n            .expect(\"failed to join background nv task\");\n\n        let nv_completed = nv_finished.duration_since(start);\n        assert!(\n            nv_completed >= Duration::from_millis(4500),\n            \"non-validator did not delay sufficiently: {:?}\",\n            nv_completed\n        );\n\n        let v_completed = v_finished.duration_since(start);\n        assert!(\n            v_completed <= Duration::from_millis(1500),\n            \"validator did not finish quickly enough: {:?}\",\n            v_completed\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/message.rs",
    "content": "use std::{\n    fmt::{self, Debug, Display, Formatter},\n    net::SocketAddr,\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse futures::future::BoxFuture;\nuse serde::{\n    de::{DeserializeOwned, Error as SerdeError},\n    Deserialize, Deserializer, Serialize, Serializer,\n};\nuse strum::EnumDiscriminants;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    crypto, AsymmetricType, Chainspec, Digest, ProtocolVersion, PublicKey, SecretKey, Signature,\n    AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n\nuse super::{counting_format::ConnectionId, health::Nonce, BincodeFormat};\nuse crate::{\n    effect::EffectBuilder,\n    protocol,\n    types::NodeId,\n    utils::{\n        opt_display::OptDisplay,\n        specimen::{Cache, LargestSpecimen, SizeEstimator},\n    },\n};\n\nuse tracing::warn;\n\n// Additional overhead accounted for (eg. lower level networking packet encapsulation).\nconst NETWORK_MESSAGE_LIMIT_SAFETY_MARGIN: usize = 256;\n\n/// The default protocol version to use in absence of one in the protocol version field.\n#[inline]\nfn default_protocol_version() -> ProtocolVersion {\n    ProtocolVersion::V1_0_0\n}\n\n#[derive(Clone, Debug, Deserialize, Serialize, EnumDiscriminants)]\n#[strum_discriminants(derive(strum::EnumIter))]\n#[allow(clippy::large_enum_variant)]\npub(crate) enum Message<P> {\n    Handshake {\n        /// Network we are connected to.\n        network_name: String,\n        /// The public address of the node connecting.\n        public_addr: SocketAddr,\n        /// Protocol version the node is speaking.\n        #[serde(default = \"default_protocol_version\")]\n        protocol_version: ProtocolVersion,\n        /// A self-signed certificate indicating validator status.\n        #[serde(default)]\n        consensus_certificate: Option<ConsensusCertificate>,\n        /// True if the node is syncing.\n        #[serde(default)]\n        is_syncing: bool,\n        /// Hash of the chainspec the node is running.\n        #[serde(default)]\n        chainspec_hash: Option<Digest>,\n    },\n    /// A ping request.\n    Ping {\n        /// The nonce to be returned with the pong.\n        nonce: Nonce,\n    },\n    /// A pong response.\n    Pong {\n        /// Nonce to match pong to ping.\n        nonce: Nonce,\n    },\n    Payload(P),\n}\n\nimpl<P: Payload> Message<P> {\n    /// Classifies a message based on its payload.\n    #[inline]\n    pub(super) fn classify(&self) -> MessageKind {\n        match self {\n            Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => {\n                MessageKind::Protocol\n            }\n            Message::Payload(payload) => payload.message_kind(),\n        }\n    }\n\n    /// Determines whether or not a message is low priority.\n    #[inline]\n    pub(super) fn is_low_priority(&self) -> bool {\n        match self {\n            Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => false,\n            Message::Payload(payload) => payload.is_low_priority(),\n        }\n    }\n\n    /// Returns the incoming resource estimate of the payload.\n    #[inline]\n    pub(super) fn payload_incoming_resource_estimate(&self, weights: &EstimatorWeights) -> u32 {\n        match self {\n            Message::Handshake { .. } => 0,\n            // Ping and Pong have a hardcoded weights. Since every ping will result in a pong being\n            // sent as a reply, it has a higher weight.\n            Message::Ping { .. } => 2,\n            Message::Pong { .. } => 1,\n            Message::Payload(payload) => payload.incoming_resource_estimate(weights),\n        }\n    }\n\n    /// Returns whether or not the payload is unsafe for syncing node consumption.\n    #[inline]\n    pub(super) fn payload_is_unsafe_for_syncing_nodes(&self) -> bool {\n        match self {\n            Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => false,\n            Message::Payload(payload) => payload.is_unsafe_for_syncing_peers(),\n        }\n    }\n\n    /// Attempts to create a demand-event from this message.\n    ///\n    /// Succeeds if the outer message contains a payload that can be converted into a demand.\n    pub(super) fn try_into_demand<REv>(\n        self,\n        effect_builder: EffectBuilder<REv>,\n        sender: NodeId,\n    ) -> Result<(REv, BoxFuture<'static, Option<P>>), Box<Self>>\n    where\n        REv: FromIncoming<P> + Send,\n    {\n        match self {\n            Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => {\n                Err(self.into())\n            }\n            Message::Payload(payload) => {\n                // Note: For now, the wrapping/unwrap of the payload is a bit unfortunate here.\n                REv::try_demand_from_incoming(effect_builder, sender, payload)\n                    .map_err(|err| Message::Payload(err).into())\n            }\n        }\n    }\n}\n\n/// A pair of secret keys used by consensus.\npub(super) struct NodeKeyPair {\n    secret_key: Arc<SecretKey>,\n    public_key: PublicKey,\n}\n\nimpl NodeKeyPair {\n    /// Creates a new key pair for consensus signing.\n    pub(super) fn new(key_pair: (Arc<SecretKey>, PublicKey)) -> Self {\n        Self {\n            secret_key: key_pair.0,\n            public_key: key_pair.1,\n        }\n    }\n\n    /// Sign a value using this keypair.\n    fn sign<T: AsRef<[u8]>>(&self, value: T) -> Signature {\n        crypto::sign(value, &self.secret_key, &self.public_key)\n    }\n}\n\n/// Certificate used to indicate that the peer is a validator using the specified public key.\n///\n/// Note that this type has custom `Serialize` and `Deserialize` implementations to allow the\n/// `public_key` and `signature` fields to be encoded to all-lowercase hex, hence circumventing the\n/// checksummed-hex encoding used by `PublicKey` and `Signature` in versions 1.4.2 and 1.4.3.\n#[derive(Clone, Debug, Eq, PartialEq)]\npub(crate) struct ConsensusCertificate {\n    public_key: PublicKey,\n    signature: Signature,\n}\n\nimpl ConsensusCertificate {\n    /// Creates a new consensus certificate from a connection ID and key pair.\n    pub(super) fn create(connection_id: ConnectionId, key_pair: &NodeKeyPair) -> Self {\n        let signature = key_pair.sign(connection_id.as_bytes());\n        ConsensusCertificate {\n            public_key: key_pair.public_key.clone(),\n            signature,\n        }\n    }\n\n    /// Validates a certificate, returning a `PublicKey` if valid.\n    pub(super) fn validate(self, connection_id: ConnectionId) -> Result<PublicKey, crypto::Error> {\n        crypto::verify(connection_id.as_bytes(), &self.signature, &self.public_key)?;\n        Ok(self.public_key)\n    }\n\n    /// Creates a random `ConnectionId`.\n    #[cfg(test)]\n    fn random(rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random(rng);\n        let public_key = PublicKey::from(&secret_key);\n        ConsensusCertificate::create(\n            ConnectionId::random(rng),\n            &NodeKeyPair::new((Arc::new(secret_key), public_key)),\n        )\n    }\n}\n\nimpl Display for ConsensusCertificate {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"key:{}\", self.public_key)\n    }\n}\n\n/// This type and the `NonHumanReadableCertificate` are helper structs only used in the `Serialize`\n/// and `Deserialize` implementations of `ConsensusCertificate` to allow handshaking between nodes\n/// running the casper-node v1.4.2 and v1.4.3 software versions.\n///\n/// Checksummed-hex encoding was introduced in 1.4.2 and was applied to `PublicKey` and `Signature`\n/// types, affecting the encoding of `ConsensusCertificate` since handshaking uses a human-readable\n/// type of encoder/decoder.\n///\n/// The 1.4.3 version immediately after 1.4.2 used a slightly different style of checksummed-hex\n/// encoding which is incompatible with the 1.4.2 style.  To effectively disable checksummed-hex\n/// encoding, we need to use an all-lowercase form of hex encoding for the `PublicKey` and\n/// `Signature` types.\n///\n/// The `HumanReadableCertificate` enables that by explicitly being constructed from all-lowercase\n/// hex encoded types, while the `NonHumanReadableCertificate` is a simple mirror of\n/// `ConsensusCertificate` to allow us to derive `Serialize` and `Deserialize`, avoiding complex\n/// hand-written implementations for the non-human-readable case.\n#[derive(Serialize, Deserialize)]\nstruct HumanReadableCertificate {\n    public_key: String,\n    signature: String,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct NonHumanReadableCertificate {\n    public_key: PublicKey,\n    signature: Signature,\n}\n\nimpl Serialize for ConsensusCertificate {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            let human_readable_certificate = HumanReadableCertificate {\n                public_key: self.public_key.to_hex().to_lowercase(),\n                signature: self.signature.to_hex().to_lowercase(),\n            };\n\n            return human_readable_certificate.serialize(serializer);\n        }\n\n        let non_human_readable_certificate = NonHumanReadableCertificate {\n            public_key: self.public_key.clone(),\n            signature: self.signature,\n        };\n        non_human_readable_certificate.serialize(serializer)\n    }\n}\n\nimpl<'de> Deserialize<'de> for ConsensusCertificate {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let human_readable_certificate = HumanReadableCertificate::deserialize(deserializer)?;\n            let public_key = PublicKey::from_hex(\n                human_readable_certificate\n                    .public_key\n                    .to_lowercase()\n                    .as_bytes(),\n            )\n            .map_err(D::Error::custom)?;\n            let signature = Signature::from_hex(\n                human_readable_certificate\n                    .signature\n                    .to_lowercase()\n                    .as_bytes(),\n            )\n            .map_err(D::Error::custom)?;\n            return Ok(ConsensusCertificate {\n                public_key,\n                signature,\n            });\n        }\n\n        let non_human_readable_certificate =\n            NonHumanReadableCertificate::deserialize(deserializer)?;\n        Ok(ConsensusCertificate {\n            public_key: non_human_readable_certificate.public_key,\n            signature: non_human_readable_certificate.signature,\n        })\n    }\n}\n\nimpl<P: Display> Display for Message<P> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Message::Handshake {\n                network_name,\n                public_addr,\n                protocol_version,\n                consensus_certificate,\n                is_syncing,\n                chainspec_hash,\n            } => {\n                write!(\n                    f,\n                    \"handshake: {}, public addr: {}, protocol_version: {}, consensus_certificate: {}, is_syncing: {}, chainspec_hash: {}\",\n                    network_name,\n                    public_addr,\n                    protocol_version,\n                    OptDisplay::new(consensus_certificate.as_ref(), \"none\"),\n                    is_syncing,\n                    OptDisplay::new(chainspec_hash.as_ref(), \"none\")\n                )\n            }\n            Message::Ping { nonce } => write!(f, \"ping({})\", nonce),\n            Message::Pong { nonce } => write!(f, \"pong({})\", nonce),\n            Message::Payload(payload) => write!(f, \"payload: {}\", payload),\n        }\n    }\n}\n\n/// A classification system for networking messages.\n#[derive(Copy, Clone, Debug)]\npub(crate) enum MessageKind {\n    /// Non-payload messages, like handshakes.\n    Protocol,\n    /// Messages directly related to consensus.\n    Consensus,\n    /// Transactions being gossiped.\n    TransactionGossip,\n    /// Blocks being gossiped.\n    BlockGossip,\n    /// Finality signatures being gossiped.\n    FinalitySignatureGossip,\n    /// Addresses being gossiped.\n    AddressGossip,\n    /// Transactions being transferred directly (via requests).\n    TransactionTransfer,\n    /// Blocks for finality signatures being transferred directly (via requests and other means).\n    BlockTransfer,\n    /// Tries transferred, usually as part of chain syncing.\n    TrieTransfer,\n    /// Any other kind of payload (or missing classification).\n    Other,\n}\n\nimpl Display for MessageKind {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            MessageKind::Protocol => f.write_str(\"protocol\"),\n            MessageKind::Consensus => f.write_str(\"consensus\"),\n            MessageKind::TransactionGossip => f.write_str(\"transaction_gossip\"),\n            MessageKind::BlockGossip => f.write_str(\"block_gossip\"),\n            MessageKind::FinalitySignatureGossip => f.write_str(\"finality_signature_gossip\"),\n            MessageKind::AddressGossip => f.write_str(\"address_gossip\"),\n            MessageKind::TransactionTransfer => f.write_str(\"transaction_transfer\"),\n            MessageKind::BlockTransfer => f.write_str(\"block_transfer\"),\n            MessageKind::TrieTransfer => f.write_str(\"trie_transfer\"),\n            MessageKind::Other => f.write_str(\"other\"),\n        }\n    }\n}\n\n/// Network message payload.\n///\n/// Payloads are what is transferred across the network outside of control messages from the\n/// networking component itself.\npub(crate) trait Payload:\n    Serialize + DeserializeOwned + Clone + Debug + Display + Send + Sync + 'static\n{\n    /// Classifies the payload based on its contents.\n    fn message_kind(&self) -> MessageKind;\n\n    /// The penalty for resource usage of a message to be applied when processed as incoming.\n    fn incoming_resource_estimate(&self, _weights: &EstimatorWeights) -> u32;\n\n    /// Determines if the payload should be considered low priority.\n    fn is_low_priority(&self) -> bool {\n        false\n    }\n\n    /// Indicates a message is not safe to send to a syncing node.\n    ///\n    /// This functionality should be removed once multiplexed networking lands.\n    fn is_unsafe_for_syncing_peers(&self) -> bool;\n}\n\n/// Network message conversion support.\npub(crate) trait FromIncoming<P> {\n    /// Creates a new value from a received payload.\n    fn from_incoming(sender: NodeId, payload: P) -> Self;\n\n    /// Tries to convert a payload into a demand.\n    ///\n    /// This function can optionally be called before `from_incoming` to attempt to convert an\n    /// incoming payload into a potential demand.\n    fn try_demand_from_incoming(\n        _effect_builder: EffectBuilder<Self>,\n        _sender: NodeId,\n        payload: P,\n    ) -> Result<(Self, BoxFuture<'static, Option<P>>), P>\n    where\n        Self: Sized + Send,\n    {\n        Err(payload)\n    }\n}\n\n/// A generic configuration for payload weights.\n///\n/// Implementors of `Payload` are free to interpret this as they see fit.\n///\n/// The default implementation sets all weights to zero.\n#[derive(DataSize, Debug, Default, Clone, Deserialize, Serialize)]\npub struct EstimatorWeights {\n    pub consensus: u32,\n    pub block_gossip: u32,\n    pub transaction_gossip: u32,\n    pub finality_signature_gossip: u32,\n    pub address_gossip: u32,\n    pub finality_signature_broadcasts: u32,\n    pub transaction_requests: u32,\n    pub transaction_responses: u32,\n    pub legacy_deploy_requests: u32,\n    pub legacy_deploy_responses: u32,\n    pub block_requests: u32,\n    pub block_responses: u32,\n    pub block_header_requests: u32,\n    pub block_header_responses: u32,\n    pub trie_requests: u32,\n    pub trie_responses: u32,\n    pub finality_signature_requests: u32,\n    pub finality_signature_responses: u32,\n    pub sync_leap_requests: u32,\n    pub sync_leap_responses: u32,\n    pub approvals_hashes_requests: u32,\n    pub approvals_hashes_responses: u32,\n    pub execution_results_requests: u32,\n    pub execution_results_responses: u32,\n}\n\nmod specimen_support {\n    use std::iter;\n\n    use serde::Serialize;\n\n    use crate::utils::specimen::{\n        largest_variant, Cache, LargestSpecimen, SizeEstimator, HIGHEST_UNICODE_CODEPOINT,\n    };\n\n    use super::{ConsensusCertificate, Message, MessageDiscriminants};\n\n    impl<P> LargestSpecimen for Message<P>\n    where\n        P: Serialize + LargestSpecimen,\n    {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            let largest_network_name = estimator.parameter(\"network_name_limit\");\n\n            largest_variant::<Self, MessageDiscriminants, _, _>(\n                estimator,\n                |variant| match variant {\n                    MessageDiscriminants::Handshake => Message::Handshake {\n                        network_name: iter::repeat(HIGHEST_UNICODE_CODEPOINT)\n                            .take(largest_network_name)\n                            .collect(),\n                        public_addr: LargestSpecimen::largest_specimen(estimator, cache),\n                        protocol_version: LargestSpecimen::largest_specimen(estimator, cache),\n                        consensus_certificate: LargestSpecimen::largest_specimen(estimator, cache),\n                        is_syncing: LargestSpecimen::largest_specimen(estimator, cache),\n                        chainspec_hash: LargestSpecimen::largest_specimen(estimator, cache),\n                    },\n                    MessageDiscriminants::Ping => Message::Ping {\n                        nonce: LargestSpecimen::largest_specimen(estimator, cache),\n                    },\n                    MessageDiscriminants::Pong => Message::Pong {\n                        nonce: LargestSpecimen::largest_specimen(estimator, cache),\n                    },\n                    MessageDiscriminants::Payload => {\n                        Message::Payload(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                },\n            )\n        }\n    }\n\n    impl LargestSpecimen for ConsensusCertificate {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            ConsensusCertificate {\n                public_key: LargestSpecimen::largest_specimen(estimator, cache),\n                signature: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n}\n\n/// An estimator that uses the serialized network representation as a measure of size.\n#[derive(Clone, Debug)]\npub(crate) struct NetworkMessageEstimator<'a> {\n    /// The chainspec to retrieve estimation values from.\n    chainspec: &'a Chainspec,\n}\n\nimpl<'a> NetworkMessageEstimator<'a> {\n    /// Creates a new network message estimator.\n    pub(crate) fn new(chainspec: &'a Chainspec) -> Self {\n        Self { chainspec }\n    }\n\n    /// Returns a parameter by name as `i64`.\n    fn get_parameter(&self, name: &'static str) -> Option<i64> {\n        let max_transaction_size = self\n            .chainspec\n            .transaction_config\n            .transaction_v1_config\n            .get_max_serialized_length(INSTALL_UPGRADE_LANE_ID);\n        Some(match name {\n            // The name limit will be larger than the actual name, so it is a safe upper bound.\n            \"network_name_limit\" => self.chainspec.network_config.name.len() as i64,\n            // These limits are making deploys bigger than they actually are, since many items\n            // have both a `contract_name` and an `entry_point`. We accept 2X as an upper bound.\n            \"contract_name_limit\" => max_transaction_size as i64,\n            \"entry_point_limit\" => max_transaction_size as i64,\n            \"recent_era_count\" => {\n                (self.chainspec.core_config.unbonding_delay\n                    - self.chainspec.core_config.auction_delay) as i64\n            }\n            \"validator_count\" => self.chainspec.core_config.validator_slots as i64,\n            \"minimum_era_height\" => self.chainspec.core_config.minimum_era_height as i64,\n            \"era_duration_ms\" => self.chainspec.core_config.era_duration.millis() as i64,\n            \"minimum_round_length_ms\" => self\n                .chainspec\n                .core_config\n                .minimum_block_time\n                .millis()\n                .max(1) as i64,\n            \"max_transaction_size\" => max_transaction_size as i64,\n            \"approvals_hashes\" => self\n                .chainspec\n                .transaction_config\n                .transaction_v1_config\n                .get_max_block_count() as i64,\n            \"max_mint_per_block\" => self\n                .chainspec\n                .transaction_config\n                .transaction_v1_config\n                .get_max_transaction_count(MINT_LANE_ID) as i64,\n            \"max_auctions_per_block\" => {\n                self.chainspec\n                    .transaction_config\n                    .transaction_v1_config\n                    .get_max_transaction_count(AUCTION_LANE_ID) as i64\n            }\n            \"max_install_upgrade_transactions_per_block\" => {\n                self.chainspec\n                    .transaction_config\n                    .transaction_v1_config\n                    .get_max_transaction_count(INSTALL_UPGRADE_LANE_ID) as i64\n            }\n            \"max_standard_transactions_per_block\" => {\n                self.chainspec\n                    .transaction_config\n                    .transaction_v1_config\n                    .get_max_wasm_transaction_count() as i64\n            }\n            \"average_approvals_per_transaction_in_block\" => {\n                let max_total_txns = self\n                    .chainspec\n                    .transaction_config\n                    .transaction_v1_config\n                    .get_max_block_count() as i64;\n\n                // Note: The +1 is to overestimate, as depending on the serialization format chosen,\n                //       spreading out the approvals can increase or decrease the size. For\n                //       example, in a length-prefixed encoding, putting them all in one may result\n                //       in a smaller size if variable size integer encoding it used. In a format\n                //       using separators without trailing separators (e.g. commas in JSON),\n                //       spreading out will reduce the total number of bytes.\n                ((self.chainspec.transaction_config.block_max_approval_count as i64\n                    + max_total_txns\n                    - 1)\n                    / max_total_txns)\n                    .max(0)\n                    + 1\n            }\n            \"max_accusations_per_block\" => self.chainspec.core_config.validator_slots as i64,\n            // `RADIX` from EE.\n            \"max_pointer_per_node\" => 255,\n            // Endorsements are currently hard-disabled (via code). If ever re-enabled, this\n            // parameter should ideally be removed entirely.\n            \"endorsements_enabled\" => 0,\n            \"signature_rewards_max_delay\" => {\n                self.chainspec.core_config.signature_rewards_max_delay as i64\n            }\n            _ => return None,\n        })\n    }\n}\n\n/// Encoding helper function.\n///\n/// Encodes a message in the same manner the network component would before sending it.\nfn serialize_net_message<T>(data: &T) -> Vec<u8>\nwhere\n    T: Serialize,\n{\n    BincodeFormat::default()\n        .serialize_arbitrary(data)\n        .expect(\"did not expect serialization to fail\")\n}\n\n/// Creates a serialized specimen of the largest possible networking message.\nfn generate_largest_message(chainspec: &Chainspec) -> Message<protocol::Message> {\n    let estimator = &NetworkMessageEstimator::new(chainspec);\n    let cache = &mut Cache::default();\n\n    Message::largest_specimen(estimator, cache)\n}\n\n/// Enforces chainspec configured message size limit.\npub(crate) fn within_message_size_limit_tolerance(chainspec: &Chainspec) -> bool {\n    // Ensure the size of the largest message generated under these chainspec settings does not\n    // exceed the configured message size limit.\n    let configured_maximum = chainspec.network_config.maximum_net_message_size as usize;\n    let serialized = serialize_net_message(&generate_largest_message(chainspec));\n    let calculated_size = serialized.len();\n    let within_tolerance =\n        calculated_size + NETWORK_MESSAGE_LIMIT_SAFETY_MARGIN <= configured_maximum;\n    if !within_tolerance {\n        warn!(\n            calculated_size,\n            configured_maximum,\n            \"config value [network][maximum_net_message_size] is too small to accommodate the \\\n            maximum message size\"\n        );\n    }\n    within_tolerance\n}\n\nimpl SizeEstimator for NetworkMessageEstimator<'_> {\n    fn estimate<T: Serialize>(&self, val: &T) -> usize {\n        serialize_net_message(&val).len()\n    }\n\n    fn parameter<T: TryFrom<i64>>(&self, name: &'static str) -> T {\n        let value = self\n            .get_parameter(name)\n            .unwrap_or_else(|| panic!(\"missing parameter \\\"{}\\\" for specimen estimation\", name));\n\n        T::try_from(value).unwrap_or_else(|_| {\n            panic!(\n                \"Failed to convert the parameter `{name}` of value `{value}` to the type `{}`\",\n                core::any::type_name::<T>()\n            )\n        })\n    }\n}\n\n#[cfg(test)]\n// We use a variety of weird names in these tests.\n#[allow(non_camel_case_types)]\nmod tests {\n    use std::{\n        net::{Ipv4Addr, SocketAddr},\n        pin::Pin,\n    };\n\n    use assert_matches::assert_matches;\n    use bytes::BytesMut;\n    use casper_types::ProtocolVersion;\n    use serde::{de::DeserializeOwned, Deserialize, Serialize};\n    use tokio_serde::{Deserializer, Serializer};\n\n    use crate::{components::network::message_pack_format::MessagePackFormat, protocol};\n\n    use super::*;\n\n    /// Version 1.0.0 network level message.\n    ///\n    /// Note that the message itself may go out of sync over time as `protocol::Message` changes.\n    /// The test further below ensures that the handshake is accurate in the meantime.\n    #[derive(Clone, Debug, Deserialize, Serialize)]\n    pub(crate) enum V1_0_0_Message {\n        Handshake {\n            /// Network we are connected to.\n            network_name: String,\n            /// The public address of the node connecting.\n            public_address: SocketAddr,\n        },\n        Payload(protocol::Message),\n    }\n\n    /// A \"conserved\" version 1.0.0 handshake.\n    ///\n    /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON MAINNET DATA.\n    const V1_0_0_HANDSHAKE: &[u8] = &[\n        129, 0, 146, 178, 115, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 45, 116,\n        101, 115, 116, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54,\n    ];\n\n    /// A \"conserved\" version 1.4.2 handshake.\n    ///\n    /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON TESTNET DATA.\n    const V1_4_2_HANDSHAKE: &[u8] = &[\n        129, 0, 148, 177, 101, 120, 97, 109, 112, 108, 101, 45, 104, 97, 110, 100, 115, 104, 97,\n        107, 101, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54, 165, 49,\n        46, 52, 46, 50, 146, 217, 68, 48, 50, 48, 50, 56, 51, 99, 48, 68, 54, 56, 55, 57, 51, 51,\n        69, 98, 50, 48, 97, 53, 52, 49, 67, 56, 53, 52, 48, 52, 55, 56, 56, 55, 55, 56, 54, 49,\n        101, 100, 69, 52, 65, 70, 102, 65, 102, 48, 52, 97, 54, 56, 101, 97, 49, 57, 52, 66, 55,\n        65, 52, 48, 48, 52, 54, 52, 50, 52, 101, 217, 130, 48, 50, 99, 68, 70, 65, 51, 51, 51, 99,\n        49, 56, 56, 57, 51, 100, 57, 102, 51, 54, 48, 51, 53, 97, 51, 98, 55, 55, 48, 50, 51, 52,\n        56, 97, 67, 102, 70, 48, 70, 68, 53, 65, 50, 65, 69, 57, 99, 66, 67, 48, 69, 52, 56, 69,\n        53, 57, 100, 100, 48, 56, 53, 53, 56, 49, 97, 54, 48, 49, 53, 57, 66, 55, 102, 99, 67, 99,\n        53, 52, 68, 68, 48, 70, 65, 57, 52, 52, 51, 100, 50, 69, 51, 53, 55, 51, 51, 55, 56, 68,\n        54, 49, 69, 97, 49, 54, 101, 54, 53, 57, 68, 49, 54, 100, 48, 48, 48, 57, 65, 52, 48, 66,\n        55, 55, 53, 48, 66, 67, 67, 69, 65, 69,\n    ];\n\n    /// A \"conserved\" version 1.4.3 handshake.\n    ///\n    /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON MAINNET DATA.\n    const V1_4_3_HANDSHAKE: &[u8] = &[\n        129, 0, 148, 177, 101, 120, 97, 109, 112, 108, 101, 45, 104, 97, 110, 100, 115, 104, 97,\n        107, 101, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54, 165, 49,\n        46, 52, 46, 51, 146, 217, 68, 48, 50, 48, 51, 51, 49, 101, 102, 98, 102, 55, 99, 99, 51,\n        51, 56, 49, 53, 49, 53, 97, 55, 50, 50, 57, 102, 57, 99, 51, 101, 55, 57, 55, 48, 51, 48,\n        50, 50, 56, 99, 97, 97, 49, 56, 57, 102, 98, 50, 97, 49, 48, 50, 56, 97, 100, 101, 48, 52,\n        101, 50, 57, 55, 48, 102, 55, 52, 99, 53, 217, 130, 48, 50, 55, 54, 54, 52, 56, 54, 54, 55,\n        57, 52, 98, 97, 99, 99, 101, 52, 52, 49, 51, 57, 50, 102, 52, 51, 50, 100, 98, 97, 50, 100,\n        101, 54, 55, 100, 97, 51, 98, 97, 55, 56, 53, 101, 53, 57, 99, 57, 52, 56, 48, 102, 49, 50,\n        54, 55, 57, 52, 101, 100, 55, 56, 98, 56, 101, 53, 50, 57, 57, 57, 55, 54, 49, 99, 48, 56,\n        49, 53, 56, 50, 56, 53, 53, 56, 48, 98, 52, 97, 54, 55, 98, 55, 101, 51, 52, 51, 99, 50,\n        50, 56, 49, 51, 51, 99, 52, 49, 100, 52, 50, 53, 48, 98, 102, 55, 57, 100, 55, 56, 54, 100,\n        55, 99, 49, 57, 57, 99, 97, 57, 55, 55,\n    ];\n\n    // Note: MessagePack messages can be visualized using the message pack visualizer at\n    // https://sugendran.github.io/msgpack-visualizer/. Rust arrays can be copy&pasted and converted\n    // to base64 using the following one-liner: `import base64; base64.b64encode(bytes([129, 0,\n    // ...]))`\n\n    // It is very important to note that different versions of the message pack codec crate set the\n    // human-readable flag in a different manner. Thus the V1.0.0 handshake can be serialized in two\n    // different ways, with \"human readable\" enabled and without.\n    //\n    // Our V1.0.0 protocol uses the \"human readable\" enabled version, they key difference being that\n    // the `SocketAddr` is encoded as a string instead of a two-item array.\n\n    /// A pseudo-1.0.0 handshake, where the serde human readable flag has been changed due to an\n    /// `rmp` version mismatch.\n    const BROKEN_V1_0_0_HANDSHAKE: &[u8] = &[\n        129, 0, 146, 178, 115, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 45, 116,\n        101, 115, 116, 129, 0, 146, 148, 12, 34, 56, 78, 205, 48, 58,\n    ];\n\n    const TEST_SOCKET_ADDR: SocketAddr = SocketAddr::V4(std::net::SocketAddrV4::new(\n        Ipv4Addr::new(12, 34, 56, 78),\n        12346,\n    ));\n\n    /// Serialize a message using the standard serialization method for handshakes.\n    fn serialize_message<M: Serialize>(msg: &M) -> Vec<u8> {\n        let mut serializer = MessagePackFormat;\n\n        Pin::new(&mut serializer)\n            .serialize(&msg)\n            .expect(\"handshake serialization failed\")\n            .into_iter()\n            .collect()\n    }\n\n    /// Deserialize a message using the standard deserialization method for handshakes.\n    fn deserialize_message<M: DeserializeOwned>(serialized: &[u8]) -> M {\n        let mut deserializer = MessagePackFormat;\n\n        Pin::new(&mut deserializer)\n            .deserialize(&BytesMut::from(serialized))\n            .expect(\"message deserialization failed\")\n    }\n\n    /// Given a message `from` of type `F`, serializes it, then deserializes it as `T`.\n    fn roundtrip_message<F, T>(from: &F) -> T\n    where\n        F: Serialize,\n        T: DeserializeOwned,\n    {\n        let serialized = serialize_message(from);\n        deserialize_message(&serialized)\n    }\n\n    // This test ensure that the serialization of the `V_1_0_0_Message` has not changed and that the\n    // serialization/deserialization methods for message in this test are likely accurate.\n    #[test]\n    fn v1_0_0_handshake_is_as_expected() {\n        let handshake = V1_0_0_Message::Handshake {\n            network_name: \"serialization-test\".to_owned(),\n            public_address: TEST_SOCKET_ADDR,\n        };\n\n        let serialized = serialize_message::<V1_0_0_Message>(&handshake);\n\n        assert_eq!(&serialized, V1_0_0_HANDSHAKE);\n        assert_ne!(&serialized, BROKEN_V1_0_0_HANDSHAKE);\n\n        let deserialized: V1_0_0_Message = deserialize_message(&serialized);\n\n        match deserialized {\n            V1_0_0_Message::Handshake {\n                network_name,\n                public_address,\n            } => {\n                assert_eq!(network_name, \"serialization-test\");\n                assert_eq!(public_address, TEST_SOCKET_ADDR);\n            }\n            other => {\n                panic!(\"did not expect {:?} as the deserialized product\", other);\n            }\n        }\n    }\n\n    #[test]\n    fn v1_0_0_can_decode_current_handshake() {\n        let mut rng = crate::new_rng();\n        let modern_handshake = Message::<protocol::Message>::Handshake {\n            network_name: \"example-handshake\".to_string(),\n            public_addr: TEST_SOCKET_ADDR,\n            protocol_version: ProtocolVersion::from_parts(5, 6, 7),\n            consensus_certificate: Some(ConsensusCertificate::random(&mut rng)),\n            is_syncing: false,\n            chainspec_hash: Some(Digest::hash(\"example-chainspec\")),\n        };\n\n        let legacy_handshake: V1_0_0_Message = roundtrip_message(&modern_handshake);\n\n        match legacy_handshake {\n            V1_0_0_Message::Handshake {\n                network_name,\n                public_address,\n            } => {\n                assert_eq!(network_name, \"example-handshake\");\n                assert_eq!(public_address, TEST_SOCKET_ADDR);\n            }\n            V1_0_0_Message::Payload(_) => {\n                panic!(\"did not expect legacy handshake to deserialize to payload\")\n            }\n        }\n    }\n\n    #[test]\n    fn current_handshake_decodes_from_v1_0_0() {\n        let legacy_handshake = V1_0_0_Message::Handshake {\n            network_name: \"example-handshake\".to_string(),\n            public_address: TEST_SOCKET_ADDR,\n        };\n\n        let modern_handshake: Message<protocol::Message> = roundtrip_message(&legacy_handshake);\n\n        if let Message::Handshake {\n            network_name,\n            public_addr,\n            protocol_version,\n            consensus_certificate,\n            is_syncing,\n            chainspec_hash,\n        } = modern_handshake\n        {\n            assert_eq!(network_name, \"example-handshake\");\n            assert_eq!(public_addr, TEST_SOCKET_ADDR);\n            assert_eq!(protocol_version, ProtocolVersion::V1_0_0);\n            assert!(consensus_certificate.is_none());\n            assert!(!is_syncing);\n            assert!(chainspec_hash.is_none())\n        } else {\n            panic!(\"did not expect modern handshake to deserialize to anything but\")\n        }\n    }\n\n    #[test]\n    fn current_handshake_decodes_from_historic_v1_0_0() {\n        let modern_handshake: Message<protocol::Message> = deserialize_message(V1_0_0_HANDSHAKE);\n\n        if let Message::Handshake {\n            network_name,\n            public_addr,\n            protocol_version,\n            consensus_certificate,\n            is_syncing,\n            chainspec_hash,\n        } = modern_handshake\n        {\n            assert!(!is_syncing);\n            assert_eq!(network_name, \"serialization-test\");\n            assert_eq!(public_addr, TEST_SOCKET_ADDR);\n            assert_eq!(protocol_version, ProtocolVersion::V1_0_0);\n            assert!(consensus_certificate.is_none());\n            assert!(!is_syncing);\n            assert!(chainspec_hash.is_none())\n        } else {\n            panic!(\"did not expect modern handshake to deserialize to anything but\")\n        }\n    }\n\n    #[test]\n    fn current_handshake_decodes_from_historic_v1_4_2() {\n        let modern_handshake: Message<protocol::Message> = deserialize_message(V1_4_2_HANDSHAKE);\n\n        if let Message::Handshake {\n            network_name,\n            public_addr,\n            protocol_version,\n            consensus_certificate,\n            is_syncing,\n            chainspec_hash,\n        } = modern_handshake\n        {\n            assert_eq!(network_name, \"example-handshake\");\n            assert_eq!(public_addr, TEST_SOCKET_ADDR);\n            assert_eq!(protocol_version, ProtocolVersion::from_parts(1, 4, 2));\n            assert!(!is_syncing);\n            let ConsensusCertificate {\n                public_key,\n                signature,\n            } = consensus_certificate.unwrap();\n\n            assert_eq!(\n                public_key,\n                PublicKey::from_hex(\n                    \"020283c0d687933eb20a541c8540478877861ede4affaf04a68ea194b7a40046424e\"\n                )\n                .unwrap()\n            );\n            assert_eq!(\n                signature,\n                Signature::from_hex(\n                    \"02cdfa333c18893d9f36035a3b7702348acff0fd5a2ae9cbc0e48e59dd085581a6015\\\n                        9b7fccc54dd0fa9443d2e3573378d61ea16e659d16d0009a40b7750bcceae\"\n                )\n                .unwrap()\n            );\n            assert!(!is_syncing);\n            assert!(chainspec_hash.is_none())\n        } else {\n            panic!(\"did not expect modern handshake to deserialize to anything but\")\n        }\n    }\n\n    #[test]\n    fn current_handshake_decodes_from_historic_v1_4_3() {\n        let modern_handshake: Message<protocol::Message> = deserialize_message(V1_4_3_HANDSHAKE);\n\n        if let Message::Handshake {\n            network_name,\n            public_addr,\n            protocol_version,\n            consensus_certificate,\n            is_syncing,\n            chainspec_hash,\n        } = modern_handshake\n        {\n            assert!(!is_syncing);\n            assert_eq!(network_name, \"example-handshake\");\n            assert_eq!(public_addr, TEST_SOCKET_ADDR);\n            assert_eq!(protocol_version, ProtocolVersion::from_parts(1, 4, 3));\n            let ConsensusCertificate {\n                public_key,\n                signature,\n            } = consensus_certificate.unwrap();\n\n            assert_eq!(\n                public_key,\n                PublicKey::from_hex(\n                    \"020331efbf7cc3381515a7229f9c3e797030228caa189fb2a1028ade04e2970f74c5\"\n                )\n                .unwrap()\n            );\n            assert_eq!(\n                signature,\n                Signature::from_hex(\n                    \"027664866794bacce441392f432dba2de67da3ba785e59c9480f126794ed78b8e5299\\\n                        9761c08158285580b4a67b7e343c228133c41d4250bf79d786d7c199ca977\"\n                )\n                .unwrap()\n            );\n            assert!(!is_syncing);\n            assert!(chainspec_hash.is_none())\n        } else {\n            panic!(\"did not expect modern handshake to deserialize to anything but\")\n        }\n    }\n\n    fn roundtrip_certificate(use_human_readable: bool) {\n        let mut rng = crate::new_rng();\n        let certificate = ConsensusCertificate::random(&mut rng);\n\n        let deserialized = if use_human_readable {\n            let serialized = serde_json::to_string(&certificate).unwrap();\n            serde_json::from_str(&serialized).unwrap()\n        } else {\n            let serialized = bincode::serialize(&certificate).unwrap();\n            bincode::deserialize(&serialized).unwrap()\n        };\n        assert_eq!(certificate, deserialized);\n    }\n\n    #[test]\n    fn serde_json_roundtrip_certificate() {\n        roundtrip_certificate(true)\n    }\n\n    #[test]\n    fn bincode_roundtrip_certificate() {\n        roundtrip_certificate(false)\n    }\n\n    #[test]\n    fn assert_the_largest_specimen_type_and_size() {\n        let (chainspec, _) = crate::utils::Loadable::from_resources(\"production\");\n        let specimen = generate_largest_message(&chainspec);\n\n        assert_matches!(\n            specimen,\n            Message::Payload(protocol::Message::GetResponse { .. }),\n            \"the type of the largest possible network message based on the production chainspec has changed\"\n        );\n\n        let serialized = serialize_net_message(&specimen);\n\n        assert_eq!(\n            serialized.len(),\n            8_388_736,\n            \"the size of the largest possible network message based on the production chainspec has changed\"\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/message_pack_format.rs",
    "content": "//! Message pack wire format encoder.\n//!\n//! This module is used to pin the correct version of message pack used throughout the codebase to\n//! our network decoder via `Cargo.toml`; using `tokio_serde::MessagePack` would instead tie it\n//! to the dependency specified in `tokio_serde`'s `Cargo.toml`.\n\nuse std::{io, pin::Pin};\n\nuse bytes::{Bytes, BytesMut};\nuse serde::{Deserialize, Serialize};\nuse tokio_serde::{Deserializer, Serializer};\n\n/// msgpack encoder/decoder for messages.\n#[derive(Debug)]\npub struct MessagePackFormat;\n\nimpl<M> Serializer<M> for MessagePackFormat\nwhere\n    M: Serialize,\n{\n    // Note: We cast to `io::Error` because of the `Codec::Error: Into<Transport::Error>`\n    // requirement.\n    type Error = io::Error;\n\n    #[inline]\n    fn serialize(self: Pin<&mut Self>, item: &M) -> Result<Bytes, Self::Error> {\n        rmp_serde::to_vec(item)\n            .map(Into::into)\n            .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))\n    }\n}\n\nimpl<M> Deserializer<M> for MessagePackFormat\nwhere\n    for<'de> M: Deserialize<'de>,\n{\n    type Error = io::Error;\n\n    #[inline]\n    fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result<M, Self::Error> {\n        rmp_serde::from_read_ref(src).map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/metrics.rs",
    "content": "use std::sync::Weak;\n\nuse prometheus::{Counter, IntCounter, IntGauge, Registry};\nuse tracing::debug;\n\nuse super::{outgoing::OutgoingMetrics, MessageKind};\nuse crate::unregister_metric;\n\n/// Network-type agnostic networking metrics.\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// How often a request was made by a component to broadcast.\n    pub(super) broadcast_requests: IntCounter,\n    /// How often a request to send a message directly to a peer was made.\n    pub(super) direct_message_requests: IntCounter,\n    /// Number of messages still waiting to be sent out (broadcast and direct).\n    pub(super) queued_messages: IntGauge,\n    /// Number of connected peers.\n    pub(super) peers: IntGauge,\n\n    /// Count of outgoing messages that are protocol overhead.\n    pub(super) out_count_protocol: IntCounter,\n    /// Count of outgoing messages with consensus payload.\n    pub(super) out_count_consensus: IntCounter,\n    /// Count of outgoing messages with deploy gossiper payload.\n    pub(super) out_count_deploy_gossip: IntCounter,\n    pub(super) out_count_block_gossip: IntCounter,\n    pub(super) out_count_finality_signature_gossip: IntCounter,\n    /// Count of outgoing messages with address gossiper payload.\n    pub(super) out_count_address_gossip: IntCounter,\n    /// Count of outgoing messages with deploy request/response payload.\n    pub(super) out_count_deploy_transfer: IntCounter,\n    /// Count of outgoing messages with block request/response payload.\n    pub(super) out_count_block_transfer: IntCounter,\n    /// Count of outgoing messages with trie request/response payload.\n    pub(super) out_count_trie_transfer: IntCounter,\n    /// Count of outgoing messages with other payload.\n    pub(super) out_count_other: IntCounter,\n\n    /// Volume in bytes of outgoing messages that are protocol overhead.\n    pub(super) out_bytes_protocol: IntCounter,\n    /// Volume in bytes of outgoing messages with consensus payload.\n    pub(super) out_bytes_consensus: IntCounter,\n    /// Volume in bytes of outgoing messages with deploy gossiper payload.\n    pub(super) out_bytes_deploy_gossip: IntCounter,\n    pub(super) out_bytes_block_gossip: IntCounter,\n    pub(super) out_bytes_finality_signature_gossip: IntCounter,\n    /// Volume in bytes of outgoing messages with address gossiper payload.\n    pub(super) out_bytes_address_gossip: IntCounter,\n    /// Volume in bytes of outgoing messages with deploy request/response payload.\n    pub(super) out_bytes_deploy_transfer: IntCounter,\n    /// Volume in bytes of outgoing messages with block request/response payload.\n    pub(super) out_bytes_block_transfer: IntCounter,\n    /// Volume in bytes of outgoing messages with block request/response payload.\n    pub(super) out_bytes_trie_transfer: IntCounter,\n    /// Volume in bytes of outgoing messages with other payload.\n    pub(super) out_bytes_other: IntCounter,\n\n    /// Number of outgoing connections in connecting state.\n    pub(super) out_state_connecting: IntGauge,\n    /// Number of outgoing connections in waiting state.\n    pub(super) out_state_waiting: IntGauge,\n    /// Number of outgoing connections in connected state.\n    pub(super) out_state_connected: IntGauge,\n    /// Number of outgoing connections in blocked state.\n    pub(super) out_state_blocked: IntGauge,\n    /// Number of outgoing connections in loopback state.\n    pub(super) out_state_loopback: IntGauge,\n\n    /// Volume in bytes of incoming messages that are protocol overhead.\n    pub(super) in_bytes_protocol: IntCounter,\n    /// Volume in bytes of incoming messages with consensus payload.\n    pub(super) in_bytes_consensus: IntCounter,\n    /// Volume in bytes of incoming messages with deploy gossiper payload.\n    pub(super) in_bytes_deploy_gossip: IntCounter,\n    pub(super) in_bytes_block_gossip: IntCounter,\n    pub(super) in_bytes_finality_signature_gossip: IntCounter,\n    /// Volume in bytes of incoming messages with address gossiper payload.\n    pub(super) in_bytes_address_gossip: IntCounter,\n    /// Volume in bytes of incoming messages with deploy request/response payload.\n    pub(super) in_bytes_deploy_transfer: IntCounter,\n    /// Volume in bytes of incoming messages with block request/response payload.\n    pub(super) in_bytes_block_transfer: IntCounter,\n    /// Volume in bytes of incoming messages with block request/response payload.\n    pub(super) in_bytes_trie_transfer: IntCounter,\n    /// Volume in bytes of incoming messages with other payload.\n    pub(super) in_bytes_other: IntCounter,\n\n    /// Count of incoming messages that are protocol overhead.\n    pub(super) in_count_protocol: IntCounter,\n    /// Count of incoming messages with consensus payload.\n    pub(super) in_count_consensus: IntCounter,\n    /// Count of incoming messages with deploy gossiper payload.\n    pub(super) in_count_deploy_gossip: IntCounter,\n    pub(super) in_count_block_gossip: IntCounter,\n    pub(super) in_count_finality_signature_gossip: IntCounter,\n    /// Count of incoming messages with address gossiper payload.\n    pub(super) in_count_address_gossip: IntCounter,\n    /// Count of incoming messages with deploy request/response payload.\n    pub(super) in_count_deploy_transfer: IntCounter,\n    /// Count of incoming messages with block request/response payload.\n    pub(super) in_count_block_transfer: IntCounter,\n    /// Count of incoming messages with trie request/response payload.\n    pub(super) in_count_trie_transfer: IntCounter,\n    /// Count of incoming messages with other payload.\n    pub(super) in_count_other: IntCounter,\n\n    /// Number of trie requests accepted for processing.\n    pub(super) requests_for_trie_accepted: IntCounter,\n    /// Number of trie requests finished (successful or unsuccessful).\n    pub(super) requests_for_trie_finished: IntCounter,\n\n    /// Total time spent delaying outgoing traffic to non-validators due to limiter, in seconds.\n    pub(super) accumulated_outgoing_limiter_delay: Counter,\n    /// Total time spent delaying incoming traffic from non-validators due to limiter, in seconds.\n    pub(super) accumulated_incoming_limiter_delay: Counter,\n\n    /// Registry instance.\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of networking metrics.\n    pub(super) fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let broadcast_requests =\n            IntCounter::new(\"net_broadcast_requests\", \"number of broadcasting requests\")?;\n        let direct_message_requests = IntCounter::new(\n            \"net_direct_message_requests\",\n            \"number of requests to send a message directly to a peer\",\n        )?;\n        let queued_messages = IntGauge::new(\n            \"net_queued_direct_messages\",\n            \"number of messages waiting to be sent out\",\n        )?;\n        let peers = IntGauge::new(\"peers\", \"number of connected peers\")?;\n\n        let out_count_protocol = IntCounter::new(\n            \"net_out_count_protocol\",\n            \"count of outgoing messages that are protocol overhead\",\n        )?;\n        let out_count_consensus = IntCounter::new(\n            \"net_out_count_consensus\",\n            \"count of outgoing messages with consensus payload\",\n        )?;\n        let out_count_deploy_gossip = IntCounter::new(\n            \"net_out_count_deploy_gossip\",\n            \"count of outgoing messages with deploy gossiper payload\",\n        )?;\n        let out_count_block_gossip = IntCounter::new(\n            \"net_out_count_block_gossip\",\n            \"count of outgoing messages with block gossiper payload\",\n        )?;\n        let out_count_finality_signature_gossip = IntCounter::new(\n            \"net_out_count_finality_signature_gossip\",\n            \"count of outgoing messages with finality signature gossiper payload\",\n        )?;\n        let out_count_address_gossip = IntCounter::new(\n            \"net_out_count_address_gossip\",\n            \"count of outgoing messages with address gossiper payload\",\n        )?;\n        let out_count_deploy_transfer = IntCounter::new(\n            \"net_out_count_deploy_transfer\",\n            \"count of outgoing messages with deploy request/response payload\",\n        )?;\n        let out_count_block_transfer = IntCounter::new(\n            \"net_out_count_block_transfer\",\n            \"count of outgoing messages with block request/response payload\",\n        )?;\n        let out_count_trie_transfer = IntCounter::new(\n            \"net_out_count_trie_transfer\",\n            \"count of outgoing messages with trie payloads\",\n        )?;\n        let out_count_other = IntCounter::new(\n            \"net_out_count_other\",\n            \"count of outgoing messages with other payload\",\n        )?;\n\n        let out_bytes_protocol = IntCounter::new(\n            \"net_out_bytes_protocol\",\n            \"volume in bytes of outgoing messages that are protocol overhead\",\n        )?;\n        let out_bytes_consensus = IntCounter::new(\n            \"net_out_bytes_consensus\",\n            \"volume in bytes of outgoing messages with consensus payload\",\n        )?;\n        let out_bytes_deploy_gossip = IntCounter::new(\n            \"net_out_bytes_deploy_gossip\",\n            \"volume in bytes of outgoing messages with deploy gossiper payload\",\n        )?;\n        let out_bytes_block_gossip = IntCounter::new(\n            \"net_out_bytes_block_gossip\",\n            \"volume in bytes of outgoing messages with block gossiper payload\",\n        )?;\n        let out_bytes_finality_signature_gossip = IntCounter::new(\n            \"net_out_bytes_finality_signature_gossip\",\n            \"volume in bytes of outgoing messages with finality signature gossiper payload\",\n        )?;\n        let out_bytes_address_gossip = IntCounter::new(\n            \"net_out_bytes_address_gossip\",\n            \"volume in bytes of outgoing messages with address gossiper payload\",\n        )?;\n        let out_bytes_deploy_transfer = IntCounter::new(\n            \"net_out_bytes_deploy_transfer\",\n            \"volume in bytes of outgoing messages with deploy request/response payload\",\n        )?;\n        let out_bytes_block_transfer = IntCounter::new(\n            \"net_out_bytes_block_transfer\",\n            \"volume in bytes of outgoing messages with block request/response payload\",\n        )?;\n        let out_bytes_trie_transfer = IntCounter::new(\n            \"net_out_bytes_trie_transfer\",\n            \"volume in bytes of outgoing messages with trie payloads\",\n        )?;\n        let out_bytes_other = IntCounter::new(\n            \"net_out_bytes_other\",\n            \"volume in bytes of outgoing messages with other payload\",\n        )?;\n\n        let out_state_connecting = IntGauge::new(\n            \"out_state_connecting\",\n            \"number of connections in the connecting state\",\n        )?;\n        let out_state_waiting = IntGauge::new(\n            \"out_state_waiting\",\n            \"number of connections in the waiting state\",\n        )?;\n        let out_state_connected = IntGauge::new(\n            \"out_state_connected\",\n            \"number of connections in the connected state\",\n        )?;\n        let out_state_blocked = IntGauge::new(\n            \"out_state_blocked\",\n            \"number of connections in the blocked state\",\n        )?;\n        let out_state_loopback = IntGauge::new(\n            \"out_state_loopback\",\n            \"number of connections in the loopback state\",\n        )?;\n\n        let in_count_protocol = IntCounter::new(\n            \"net_in_count_protocol\",\n            \"count of incoming messages that are protocol overhead\",\n        )?;\n        let in_count_consensus = IntCounter::new(\n            \"net_in_count_consensus\",\n            \"count of incoming messages with consensus payload\",\n        )?;\n        let in_count_deploy_gossip = IntCounter::new(\n            \"net_in_count_deploy_gossip\",\n            \"count of incoming messages with deploy gossiper payload\",\n        )?;\n        let in_count_block_gossip = IntCounter::new(\n            \"net_in_count_block_gossip\",\n            \"count of incoming messages with block gossiper payload\",\n        )?;\n        let in_count_finality_signature_gossip = IntCounter::new(\n            \"net_in_count_finality_signature_gossip\",\n            \"count of incoming messages with finality signature gossiper payload\",\n        )?;\n        let in_count_address_gossip = IntCounter::new(\n            \"net_in_count_address_gossip\",\n            \"count of incoming messages with address gossiper payload\",\n        )?;\n        let in_count_deploy_transfer = IntCounter::new(\n            \"net_in_count_deploy_transfer\",\n            \"count of incoming messages with deploy request/response payload\",\n        )?;\n        let in_count_block_transfer = IntCounter::new(\n            \"net_in_count_block_transfer\",\n            \"count of incoming messages with block request/response payload\",\n        )?;\n        let in_count_trie_transfer = IntCounter::new(\n            \"net_in_count_trie_transfer\",\n            \"count of incoming messages with trie payloads\",\n        )?;\n        let in_count_other = IntCounter::new(\n            \"net_in_count_other\",\n            \"count of incoming messages with other payload\",\n        )?;\n\n        let in_bytes_protocol = IntCounter::new(\n            \"net_in_bytes_protocol\",\n            \"volume in bytes of incoming messages that are protocol overhead\",\n        )?;\n        let in_bytes_consensus = IntCounter::new(\n            \"net_in_bytes_consensus\",\n            \"volume in bytes of incoming messages with consensus payload\",\n        )?;\n        let in_bytes_deploy_gossip = IntCounter::new(\n            \"net_in_bytes_deploy_gossip\",\n            \"volume in bytes of incoming messages with deploy gossiper payload\",\n        )?;\n        let in_bytes_block_gossip = IntCounter::new(\n            \"net_in_bytes_block_gossip\",\n            \"volume in bytes of incoming messages with block gossiper payload\",\n        )?;\n        let in_bytes_finality_signature_gossip = IntCounter::new(\n            \"net_in_bytes_finality_signature_gossip\",\n            \"volume in bytes of incoming messages with finality signature gossiper payload\",\n        )?;\n        let in_bytes_address_gossip = IntCounter::new(\n            \"net_in_bytes_address_gossip\",\n            \"volume in bytes of incoming messages with address gossiper payload\",\n        )?;\n        let in_bytes_deploy_transfer = IntCounter::new(\n            \"net_in_bytes_deploy_transfer\",\n            \"volume in bytes of incoming messages with deploy request/response payload\",\n        )?;\n        let in_bytes_block_transfer = IntCounter::new(\n            \"net_in_bytes_block_transfer\",\n            \"volume in bytes of incoming messages with block request/response payload\",\n        )?;\n        let in_bytes_trie_transfer = IntCounter::new(\n            \"net_in_bytes_trie_transfer\",\n            \"volume in bytes of incoming messages with trie payloads\",\n        )?;\n        let in_bytes_other = IntCounter::new(\n            \"net_in_bytes_other\",\n            \"volume in bytes of incoming messages with other payload\",\n        )?;\n\n        let requests_for_trie_accepted = IntCounter::new(\n            \"requests_for_trie_accepted\",\n            \"number of trie requests accepted for processing\",\n        )?;\n        let requests_for_trie_finished = IntCounter::new(\n            \"requests_for_trie_finished\",\n            \"number of trie requests finished, successful or not\",\n        )?;\n\n        let accumulated_outgoing_limiter_delay = Counter::new(\n            \"accumulated_outgoing_limiter_delay\",\n            \"seconds spent delaying outgoing traffic to non-validators due to limiter, in seconds\",\n        )?;\n        let accumulated_incoming_limiter_delay = Counter::new(\n            \"accumulated_incoming_limiter_delay\",\n            \"seconds spent delaying incoming traffic from non-validators due to limiter, in seconds.\"\n        )?;\n\n        registry.register(Box::new(broadcast_requests.clone()))?;\n        registry.register(Box::new(direct_message_requests.clone()))?;\n        registry.register(Box::new(queued_messages.clone()))?;\n        registry.register(Box::new(peers.clone()))?;\n\n        registry.register(Box::new(out_count_protocol.clone()))?;\n        registry.register(Box::new(out_count_consensus.clone()))?;\n        registry.register(Box::new(out_count_deploy_gossip.clone()))?;\n        registry.register(Box::new(out_count_block_gossip.clone()))?;\n        registry.register(Box::new(out_count_finality_signature_gossip.clone()))?;\n        registry.register(Box::new(out_count_address_gossip.clone()))?;\n        registry.register(Box::new(out_count_deploy_transfer.clone()))?;\n        registry.register(Box::new(out_count_block_transfer.clone()))?;\n        registry.register(Box::new(out_count_trie_transfer.clone()))?;\n        registry.register(Box::new(out_count_other.clone()))?;\n\n        registry.register(Box::new(out_bytes_protocol.clone()))?;\n        registry.register(Box::new(out_bytes_consensus.clone()))?;\n        registry.register(Box::new(out_bytes_deploy_gossip.clone()))?;\n        registry.register(Box::new(out_bytes_block_gossip.clone()))?;\n        registry.register(Box::new(out_bytes_finality_signature_gossip.clone()))?;\n        registry.register(Box::new(out_bytes_address_gossip.clone()))?;\n        registry.register(Box::new(out_bytes_deploy_transfer.clone()))?;\n        registry.register(Box::new(out_bytes_block_transfer.clone()))?;\n        registry.register(Box::new(out_bytes_trie_transfer.clone()))?;\n        registry.register(Box::new(out_bytes_other.clone()))?;\n\n        registry.register(Box::new(out_state_connecting.clone()))?;\n        registry.register(Box::new(out_state_waiting.clone()))?;\n        registry.register(Box::new(out_state_connected.clone()))?;\n        registry.register(Box::new(out_state_blocked.clone()))?;\n        registry.register(Box::new(out_state_loopback.clone()))?;\n\n        registry.register(Box::new(in_count_protocol.clone()))?;\n        registry.register(Box::new(in_count_consensus.clone()))?;\n        registry.register(Box::new(in_count_deploy_gossip.clone()))?;\n        registry.register(Box::new(in_count_block_gossip.clone()))?;\n        registry.register(Box::new(in_count_finality_signature_gossip.clone()))?;\n        registry.register(Box::new(in_count_address_gossip.clone()))?;\n        registry.register(Box::new(in_count_deploy_transfer.clone()))?;\n        registry.register(Box::new(in_count_block_transfer.clone()))?;\n        registry.register(Box::new(in_count_trie_transfer.clone()))?;\n        registry.register(Box::new(in_count_other.clone()))?;\n\n        registry.register(Box::new(in_bytes_protocol.clone()))?;\n        registry.register(Box::new(in_bytes_consensus.clone()))?;\n        registry.register(Box::new(in_bytes_deploy_gossip.clone()))?;\n        registry.register(Box::new(in_bytes_block_gossip.clone()))?;\n        registry.register(Box::new(in_bytes_finality_signature_gossip.clone()))?;\n        registry.register(Box::new(in_bytes_address_gossip.clone()))?;\n        registry.register(Box::new(in_bytes_deploy_transfer.clone()))?;\n        registry.register(Box::new(in_bytes_block_transfer.clone()))?;\n        registry.register(Box::new(in_bytes_trie_transfer.clone()))?;\n        registry.register(Box::new(in_bytes_other.clone()))?;\n\n        registry.register(Box::new(requests_for_trie_accepted.clone()))?;\n        registry.register(Box::new(requests_for_trie_finished.clone()))?;\n\n        registry.register(Box::new(accumulated_outgoing_limiter_delay.clone()))?;\n        registry.register(Box::new(accumulated_incoming_limiter_delay.clone()))?;\n\n        Ok(Metrics {\n            broadcast_requests,\n            direct_message_requests,\n            queued_messages,\n            peers,\n            out_count_protocol,\n            out_count_consensus,\n            out_count_deploy_gossip,\n            out_count_block_gossip,\n            out_count_finality_signature_gossip,\n            out_count_address_gossip,\n            out_count_deploy_transfer,\n            out_count_block_transfer,\n            out_count_trie_transfer,\n            out_count_other,\n            out_bytes_protocol,\n            out_bytes_consensus,\n            out_bytes_deploy_gossip,\n            out_bytes_block_gossip,\n            out_bytes_finality_signature_gossip,\n            out_bytes_address_gossip,\n            out_bytes_deploy_transfer,\n            out_bytes_block_transfer,\n            out_bytes_trie_transfer,\n            out_bytes_other,\n            out_state_connecting,\n            out_state_waiting,\n            out_state_connected,\n            out_state_blocked,\n            out_state_loopback,\n            in_count_protocol,\n            in_count_consensus,\n            in_count_deploy_gossip,\n            in_count_block_gossip,\n            in_count_finality_signature_gossip,\n            in_count_address_gossip,\n            in_count_deploy_transfer,\n            in_count_block_transfer,\n            in_count_trie_transfer,\n            in_count_other,\n            in_bytes_protocol,\n            in_bytes_consensus,\n            in_bytes_deploy_gossip,\n            in_bytes_block_gossip,\n            in_bytes_finality_signature_gossip,\n            in_bytes_address_gossip,\n            in_bytes_deploy_transfer,\n            in_bytes_block_transfer,\n            in_bytes_trie_transfer,\n            in_bytes_other,\n            requests_for_trie_accepted,\n            requests_for_trie_finished,\n            accumulated_outgoing_limiter_delay,\n            accumulated_incoming_limiter_delay,\n            registry: registry.clone(),\n        })\n    }\n\n    /// Records an outgoing payload.\n    pub(crate) fn record_payload_out(this: &Weak<Self>, kind: MessageKind, size: u64) {\n        if let Some(metrics) = this.upgrade() {\n            match kind {\n                MessageKind::Protocol => {\n                    metrics.out_bytes_protocol.inc_by(size);\n                    metrics.out_count_protocol.inc();\n                }\n                MessageKind::Consensus => {\n                    metrics.out_bytes_consensus.inc_by(size);\n                    metrics.out_count_consensus.inc();\n                }\n                MessageKind::TransactionGossip => {\n                    metrics.out_bytes_deploy_gossip.inc_by(size);\n                    metrics.out_count_deploy_gossip.inc();\n                }\n                MessageKind::BlockGossip => {\n                    metrics.out_bytes_block_gossip.inc_by(size);\n                    metrics.out_count_block_gossip.inc();\n                }\n                MessageKind::FinalitySignatureGossip => {\n                    metrics.out_bytes_finality_signature_gossip.inc_by(size);\n                    metrics.out_count_finality_signature_gossip.inc();\n                }\n                MessageKind::AddressGossip => {\n                    metrics.out_bytes_address_gossip.inc_by(size);\n                    metrics.out_count_address_gossip.inc();\n                }\n                MessageKind::TransactionTransfer => {\n                    metrics.out_bytes_deploy_transfer.inc_by(size);\n                    metrics.out_count_deploy_transfer.inc();\n                }\n                MessageKind::BlockTransfer => {\n                    metrics.out_bytes_block_transfer.inc_by(size);\n                    metrics.out_count_block_transfer.inc();\n                }\n                MessageKind::TrieTransfer => {\n                    metrics.out_bytes_trie_transfer.inc_by(size);\n                    metrics.out_count_trie_transfer.inc();\n                }\n                MessageKind::Other => {\n                    metrics.out_bytes_other.inc_by(size);\n                    metrics.out_count_other.inc();\n                }\n            }\n        } else {\n            debug!(\"not recording metrics, component already shut down\");\n        }\n    }\n\n    /// Records an incoming payload.\n    pub(crate) fn record_payload_in(this: &Weak<Self>, kind: MessageKind, size: u64) {\n        if let Some(metrics) = this.upgrade() {\n            match kind {\n                MessageKind::Protocol => {\n                    metrics.in_bytes_protocol.inc_by(size);\n                    metrics.in_count_protocol.inc();\n                }\n                MessageKind::Consensus => {\n                    metrics.in_bytes_consensus.inc_by(size);\n                    metrics.in_count_consensus.inc();\n                }\n                MessageKind::TransactionGossip => {\n                    metrics.in_bytes_deploy_gossip.inc_by(size);\n                    metrics.in_count_deploy_gossip.inc();\n                }\n                MessageKind::BlockGossip => {\n                    metrics.in_bytes_block_gossip.inc_by(size);\n                    metrics.in_count_block_gossip.inc();\n                }\n                MessageKind::FinalitySignatureGossip => {\n                    metrics.in_bytes_finality_signature_gossip.inc_by(size);\n                    metrics.in_count_finality_signature_gossip.inc();\n                }\n                MessageKind::AddressGossip => {\n                    metrics.in_bytes_address_gossip.inc_by(size);\n                    metrics.in_count_address_gossip.inc();\n                }\n                MessageKind::TransactionTransfer => {\n                    metrics.in_bytes_deploy_transfer.inc_by(size);\n                    metrics.in_count_deploy_transfer.inc();\n                }\n                MessageKind::BlockTransfer => {\n                    metrics.in_bytes_block_transfer.inc_by(size);\n                    metrics.in_count_block_transfer.inc();\n                }\n                MessageKind::TrieTransfer => {\n                    metrics.in_bytes_trie_transfer.inc_by(size);\n                    metrics.in_count_trie_transfer.inc();\n                }\n                MessageKind::Other => {\n                    metrics.in_bytes_other.inc_by(size);\n                    metrics.in_count_other.inc();\n                }\n            }\n        } else {\n            debug!(\"not recording metrics, component already shut down\");\n        }\n    }\n\n    /// Creates a set of outgoing metrics that is connected to this set of metrics.\n    pub(super) fn create_outgoing_metrics(&self) -> OutgoingMetrics {\n        OutgoingMetrics {\n            out_state_connecting: self.out_state_connecting.clone(),\n            out_state_waiting: self.out_state_waiting.clone(),\n            out_state_connected: self.out_state_connected.clone(),\n            out_state_blocked: self.out_state_blocked.clone(),\n            out_state_loopback: self.out_state_loopback.clone(),\n        }\n    }\n\n    /// Records that a trie request has been started.\n    pub(super) fn record_trie_request_start(this: &Weak<Self>) {\n        if let Some(metrics) = this.upgrade() {\n            metrics.requests_for_trie_accepted.inc();\n        } else {\n            debug!(\"not recording metrics, component already shut down\");\n        }\n    }\n\n    /// Records that a trie request has ended.\n    pub(super) fn record_trie_request_end(this: &Weak<Self>) {\n        if let Some(metrics) = this.upgrade() {\n            metrics.requests_for_trie_finished.inc();\n        } else {\n            debug!(\"not recording metrics, component already shut down\");\n        }\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.broadcast_requests);\n        unregister_metric!(self.registry, self.direct_message_requests);\n        unregister_metric!(self.registry, self.queued_messages);\n        unregister_metric!(self.registry, self.peers);\n\n        unregister_metric!(self.registry, self.out_count_protocol);\n        unregister_metric!(self.registry, self.out_count_consensus);\n        unregister_metric!(self.registry, self.out_count_deploy_gossip);\n        unregister_metric!(self.registry, self.out_count_block_gossip);\n        unregister_metric!(self.registry, self.out_count_finality_signature_gossip);\n        unregister_metric!(self.registry, self.out_count_address_gossip);\n        unregister_metric!(self.registry, self.out_count_deploy_transfer);\n        unregister_metric!(self.registry, self.out_count_block_transfer);\n        unregister_metric!(self.registry, self.out_count_trie_transfer);\n        unregister_metric!(self.registry, self.out_count_other);\n\n        unregister_metric!(self.registry, self.out_bytes_protocol);\n        unregister_metric!(self.registry, self.out_bytes_consensus);\n        unregister_metric!(self.registry, self.out_bytes_deploy_gossip);\n        unregister_metric!(self.registry, self.out_bytes_block_gossip);\n        unregister_metric!(self.registry, self.out_bytes_finality_signature_gossip);\n        unregister_metric!(self.registry, self.out_bytes_address_gossip);\n        unregister_metric!(self.registry, self.out_bytes_deploy_transfer);\n        unregister_metric!(self.registry, self.out_bytes_block_transfer);\n        unregister_metric!(self.registry, self.out_bytes_trie_transfer);\n        unregister_metric!(self.registry, self.out_bytes_other);\n\n        unregister_metric!(self.registry, self.out_state_connecting);\n        unregister_metric!(self.registry, self.out_state_waiting);\n        unregister_metric!(self.registry, self.out_state_connected);\n        unregister_metric!(self.registry, self.out_state_blocked);\n        unregister_metric!(self.registry, self.out_state_loopback);\n\n        unregister_metric!(self.registry, self.in_count_protocol);\n        unregister_metric!(self.registry, self.in_count_consensus);\n        unregister_metric!(self.registry, self.in_count_deploy_gossip);\n        unregister_metric!(self.registry, self.in_count_block_gossip);\n        unregister_metric!(self.registry, self.in_count_finality_signature_gossip);\n        unregister_metric!(self.registry, self.in_count_address_gossip);\n        unregister_metric!(self.registry, self.in_count_deploy_transfer);\n        unregister_metric!(self.registry, self.in_count_block_transfer);\n        unregister_metric!(self.registry, self.in_count_trie_transfer);\n        unregister_metric!(self.registry, self.in_count_other);\n\n        unregister_metric!(self.registry, self.in_bytes_protocol);\n        unregister_metric!(self.registry, self.in_bytes_consensus);\n        unregister_metric!(self.registry, self.in_bytes_deploy_gossip);\n        unregister_metric!(self.registry, self.in_bytes_block_gossip);\n        unregister_metric!(self.registry, self.in_bytes_finality_signature_gossip);\n        unregister_metric!(self.registry, self.in_bytes_address_gossip);\n        unregister_metric!(self.registry, self.in_bytes_deploy_transfer);\n        unregister_metric!(self.registry, self.in_bytes_block_transfer);\n        unregister_metric!(self.registry, self.in_bytes_trie_transfer);\n        unregister_metric!(self.registry, self.in_bytes_other);\n\n        unregister_metric!(self.registry, self.requests_for_trie_accepted);\n        unregister_metric!(self.registry, self.requests_for_trie_finished);\n\n        unregister_metric!(self.registry, self.accumulated_outgoing_limiter_delay);\n        unregister_metric!(self.registry, self.accumulated_incoming_limiter_delay);\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/outgoing.rs",
    "content": "//! Management of outgoing connections.\n//!\n//! This module implements outgoing connection management, decoupled from the underlying transport\n//! or any higher-level level parts. It encapsulates the reconnection and blocklisting logic on the\n//! `SocketAddr` level.\n//!\n//! # Basic structure\n//!\n//! Core of this module is the `OutgoingManager`, which supports the following functionality:\n//!\n//! * Handed a `SocketAddr`s via the `learn_addr` function, it will permanently maintain a\n//!   connection to the given address, only giving up if retry thresholds are exceeded, after which\n//!   it will be forgotten.\n//! * `block_addr` and `redeem_addr` can be used to maintain a `SocketAddr`-keyed block list.\n//! * `OutgoingManager` maintains an internal routing table. The `get_route` function can be used to\n//!   retrieve a \"route\" (typically a `sync::channel` accepting network messages) to a remote peer\n//!   by `NodeId`.\n//!\n//! # Requirements\n//!\n//! `OutgoingManager` is decoupled from the underlying protocol, all of its interactions are\n//! performed through [`DialRequest`] and [`DialOutcome`]s. This frees the `OutgoingManager` from\n//! having to worry about protocol specifics.\n//!\n//! Three conditions not expressed in code must be fulfilled for the `OutgoingManager` to function:\n//!\n//! * The `Dialer` is expected to produce `DialOutcomes` for every dial [`DialRequest::Dial`]\n//!   eventually. These must be forwarded to the `OutgoingManager` via the `handle_dial_outcome`\n//!   function.\n//! * The `perform_housekeeping` method must be called periodically to give the `OutgoingManager` a\n//!   chance to initiate reconnections and collect garbage.\n//! * When a connection is dropped, the connection manager must be notified via\n//!   `handle_connection_drop`.\n//!\n//! # Lifecycle\n//!\n//! The following chart illustrates the lifecycle of an outgoing connection.\n//!\n//! ```text\n//!                   forget (after n tries)\n//!          ┌────────────────────────────────────┐\n//!          │                 learn              ▼\n//!          │               ┌──────────────  unknown/forgotten\n//!          │               │                (implicit state)\n//!          │               │\n//!          │               │                │\n//!          │               │                │ block\n//!          │               │                │\n//!          │               │                │\n//!          │               │                ▼\n//!     ┌────┴────┐          │          ┌─────────┐\n//!     │         │  fail    │    block │         │\n//!     │ Waiting │◄───────┐ │   ┌─────►│ Blocked │◄──────────┐\n//! ┌───┤         │        │ │   │      │         │           │\n//! │   └────┬────┘        │ │   │      └────┬────┘           │\n//! │ block  │             │ │   │           │                │\n//! │        │ timeout     │ ▼   │           │ redeem,        │\n//! │        │        ┌────┴─────┴───┐       │ block timeout  │\n//! │        │        │              │       │                │\n//! │        └───────►│  Connecting  │◄──────┘                │\n//! │                 │              │                        │\n//! │                 └─────┬────┬───┘                        │\n//! │                       │ ▲  │                            │\n//! │               success │ │  │ detect                     │\n//! │                       │ │  │      ┌──────────┐          │\n//! │ ┌───────────┐         │ │  │      │          │          │\n//! │ │           │◄────────┘ │  │      │ Loopback │          │\n//! │ │ Connected │           │  └─────►│          │          │\n//! │ │           │ dropped/  │         └──────────┘          │\n//! │ └─────┬─────┴───────────┘                               │\n//! │       │       timeout                                   │\n//! │       │ block                                           │\n//! └───────┴─────────────────────────────────────────────────┘\n//! ```\n//!\n//! # Timeouts/safety\n//!\n//! The `sweep` transition for connections usually does not happen during normal operations. Three\n//! causes are typical for it:\n//!\n//! * A configured TCP timeout above [`OutgoingConfig::sweep_timeout`].\n//! * Very slow responses from remote peers (similar to a Slowloris-attack)\n//! * Faulty handling by the driver of the [`OutgoingManager`], i.e. the outside component.\n//!\n//! Should a dial attempt exceed a certain timeout, it is considered failed and put into the waiting\n//! state again.\n//!\n//! If a conflict (multiple successful dial results) occurs, the more recent connection takes\n//! precedence over the previous one. This prevents problems when a notification of a terminated\n//! connection is overtaken by the new connection announcement.\n\nuse std::{\n    collections::{hash_map::Entry, HashMap},\n    error::Error,\n    fmt::{self, Debug, Display, Formatter},\n    mem,\n    net::SocketAddr,\n    time::{Duration, Instant},\n};\n\nuse datasize::DataSize;\nuse prometheus::IntGauge;\nuse rand::Rng;\nuse tracing::{debug, error, error_span, field::Empty, info, trace, warn, Span};\n\nuse super::{\n    blocklist::BlocklistJustification,\n    display_error,\n    health::{ConnectionHealth, HealthCheckOutcome, HealthConfig, Nonce, TaggedTimestamp},\n    NodeId,\n};\n\n/// An outgoing connection/address in various states.\n#[derive(DataSize, Debug)]\npub struct Outgoing<H, E>\nwhere\n    H: DataSize,\n    E: DataSize,\n{\n    /// Whether or not the address is unforgettable, see `learn_addr` for details.\n    pub(super) is_unforgettable: bool,\n    /// The current state the connection/address is in.\n    pub(super) state: OutgoingState<H, E>,\n}\n\n/// Active state for a connection/address.\n#[derive(DataSize, Debug)]\npub(crate) enum OutgoingState<H, E>\nwhere\n    H: DataSize,\n    E: DataSize,\n{\n    /// The outgoing address has been known for the first time and we are currently connecting.\n    Connecting {\n        /// Number of attempts that failed, so far.\n        failures_so_far: u8,\n        /// Time when the connection attempt was instantiated.\n        since: Instant,\n    },\n    /// The connection has failed at least one connection attempt and is waiting for a retry.\n    Waiting {\n        /// Number of attempts that failed, so far.\n        failures_so_far: u8,\n        /// The most recent connection error.\n        ///\n        /// If not given, the connection was put into a `Waiting` state due to a sweep timeout.\n        error: Option<E>,\n        /// The precise moment when the last connection attempt failed.\n        last_failure: Instant,\n    },\n    /// An established outgoing connection.\n    Connected {\n        /// The peers remote ID.\n        peer_id: NodeId,\n        /// Handle to a communication channel that can be used to send data to the peer.\n        ///\n        /// Can be a channel to decouple sending, or even a direct connection handle.\n        handle: H,\n        /// Health of the connection.\n        health: ConnectionHealth,\n    },\n    /// The address was blocked and will not be retried.\n    Blocked {\n        /// Since when the block took effect.\n        since: Instant,\n        /// The justification given for blocking.\n        justification: BlocklistJustification,\n        /// Until when the block took effect.\n        until: Instant,\n    },\n    /// The address is owned by ourselves and will not be tried again.\n    Loopback,\n}\n\nimpl<H, E> Display for OutgoingState<H, E>\nwhere\n    H: DataSize,\n    E: DataSize,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            OutgoingState::Connecting {\n                failures_so_far, ..\n            } => {\n                write!(f, \"connecting({})\", failures_so_far)\n            }\n            OutgoingState::Waiting {\n                failures_so_far, ..\n            } => write!(f, \"waiting({})\", failures_so_far),\n            OutgoingState::Connected { .. } => write!(f, \"connected\"),\n            OutgoingState::Blocked { .. } => write!(f, \"blocked\"),\n            OutgoingState::Loopback => write!(f, \"loopback\"),\n        }\n    }\n}\n\n/// The result of dialing `SocketAddr`.\n#[derive(Debug)]\npub enum DialOutcome<H, E> {\n    /// A connection was successfully established.\n    Successful {\n        /// The address dialed.\n        addr: SocketAddr,\n        /// A handle to send data down the connection.\n        handle: H,\n        /// The remote peer's authenticated node ID.\n        node_id: NodeId,\n        /// The moment the connection was established.\n        when: Instant,\n    },\n    /// The connection attempt failed.\n    Failed {\n        /// The address dialed.\n        addr: SocketAddr,\n        /// The error encountered while dialing.\n        error: E,\n        /// The moment the connection attempt failed.\n        when: Instant,\n    },\n    /// The connection was aborted, because the remote peer turned out to be a loopback.\n    Loopback {\n        /// The address used to connect.\n        addr: SocketAddr,\n    },\n}\n\nimpl<H, E> DialOutcome<H, E> {\n    /// Retrieves the socket address from the `DialOutcome`.\n    fn addr(&self) -> SocketAddr {\n        match self {\n            DialOutcome::Successful { addr, .. }\n            | DialOutcome::Failed { addr, .. }\n            | DialOutcome::Loopback { addr, .. } => *addr,\n        }\n    }\n}\n\n/// A request made for dialing.\n#[derive(Clone, Debug)]\n#[must_use]\npub(crate) enum DialRequest<H> {\n    /// Attempt to connect to the outgoing socket address.\n    ///\n    /// For every time this request is emitted, there must be a corresponding call to\n    /// `handle_dial_outcome` eventually.\n    ///\n    /// Any logging of connection issues should be done in the context of `span` for better log\n    /// output.\n    Dial { addr: SocketAddr, span: Span },\n\n    /// Disconnects a potentially existing connection.\n    ///\n    /// Used when a peer has been blocked or should be disconnected for other reasons. Note that\n    /// this request can immediately be followed by a connection request, as in the case of a ping\n    /// timeout.\n    Disconnect { handle: H, span: Span },\n\n    /// Send a ping to a peer.\n    SendPing {\n        peer_id: NodeId,\n        nonce: Nonce,\n        span: Span,\n    },\n}\n\nimpl<H> Display for DialRequest<H>\nwhere\n    H: Display,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            DialRequest::Dial { addr, .. } => {\n                write!(f, \"dial: {}\", addr)\n            }\n            DialRequest::Disconnect { handle, .. } => {\n                write!(f, \"disconnect: {}\", handle)\n            }\n            DialRequest::SendPing { peer_id, nonce, .. } => {\n                write!(f, \"ping[{}]: {}\", nonce, peer_id)\n            }\n        }\n    }\n}\n\n#[derive(DataSize, Debug)]\n/// Connection settings for the outgoing connection manager.\npub struct OutgoingConfig {\n    /// The maximum number of attempts before giving up and forgetting an address, if permitted.\n    pub(crate) retry_attempts: u8,\n    /// The basic time slot for exponential backoff when reconnecting.\n    pub(crate) base_timeout: Duration,\n    /// Time until an outgoing address is unblocked.\n    pub(crate) unblock_after_min: Duration,\n    pub(crate) unblock_after_max: Duration,\n    /// Safety timeout, after which a connection is no longer expected to finish dialing.\n    pub(crate) sweep_timeout: Duration,\n    /// Health check configuration.\n    pub(crate) health: HealthConfig,\n}\n\nimpl OutgoingConfig {\n    /// Calculates the backoff time.\n    ///\n    /// `failed_attempts` (n) is the number of previous attempts *before* the current failure (thus\n    /// starting at 0). The backoff time will be double for each attempt.\n    fn calc_backoff(&self, failed_attempts: u8) -> Duration {\n        (1u32 << failed_attempts as u32) * self.base_timeout\n    }\n}\n\n/// Manager of outbound connections.\n///\n/// See the module documentation for usage suggestions.\n#[derive(DataSize, Debug)]\npub struct OutgoingManager<H, E>\nwhere\n    H: DataSize,\n    E: DataSize,\n{\n    /// Outgoing connections subsystem configuration.\n    config: OutgoingConfig,\n    /// Mapping of address to their current connection state.\n    pub(super) outgoing: HashMap<SocketAddr, Outgoing<H, E>>,\n    /// Routing table.\n    ///\n    /// Contains a mapping from node IDs to connected socket addresses. A missing entry means that\n    /// the destination is not connected.\n    routes: HashMap<NodeId, SocketAddr>,\n    /// A set of outgoing metrics.\n    #[data_size(skip)]\n    metrics: OutgoingMetrics,\n}\n\n/// A set of metrics used by the outgoing component.\n#[derive(Clone, Debug)]\npub(super) struct OutgoingMetrics {\n    /// Number of outgoing connections in connecting state.\n    pub(super) out_state_connecting: IntGauge,\n    /// Number of outgoing connections in waiting state.\n    pub(super) out_state_waiting: IntGauge,\n    /// Number of outgoing connections in connected state.\n    pub(super) out_state_connected: IntGauge,\n    /// Number of outgoing connections in blocked state.\n    pub(super) out_state_blocked: IntGauge,\n    /// Number of outgoing connections in loopback state.\n    pub(super) out_state_loopback: IntGauge,\n}\n\n// Note: We only implement `Default` here for use in testing with `OutgoingManager::new`.\n#[cfg(test)]\nimpl Default for OutgoingMetrics {\n    fn default() -> Self {\n        Self {\n            out_state_connecting: IntGauge::new(\n                \"out_state_connecting\",\n                \"internal out_state_connecting\",\n            )\n            .unwrap(),\n            out_state_waiting: IntGauge::new(\"out_state_waiting\", \"internal out_state_waiting\")\n                .unwrap(),\n            out_state_connected: IntGauge::new(\n                \"out_state_connected\",\n                \"internal out_state_connected\",\n            )\n            .unwrap(),\n            out_state_blocked: IntGauge::new(\"out_state_blocked\", \"internal out_state_blocked\")\n                .unwrap(),\n            out_state_loopback: IntGauge::new(\"out_state_loopback\", \"internal loopback\").unwrap(),\n        }\n    }\n}\n\nimpl<H, E> OutgoingManager<H, E>\nwhere\n    H: DataSize,\n    E: DataSize,\n{\n    /// Creates a new outgoing manager with a set of metrics that is not connected to any registry.\n    #[cfg(test)]\n    #[inline]\n    pub(super) fn new(config: OutgoingConfig) -> Self {\n        Self::with_metrics(config, Default::default())\n    }\n\n    /// Creates a new outgoing manager with an already existing set of metrics.\n    pub(super) fn with_metrics(config: OutgoingConfig, metrics: OutgoingMetrics) -> Self {\n        Self {\n            config,\n            outgoing: Default::default(),\n            routes: Default::default(),\n            metrics,\n        }\n    }\n\n    /// Returns a reference to the internal metrics.\n    #[cfg(test)]\n    fn metrics(&self) -> &OutgoingMetrics {\n        &self.metrics\n    }\n}\n\n/// Creates a logging span for a specific connection.\n#[inline]\nfn make_span<H, E>(addr: SocketAddr, outgoing: Option<&Outgoing<H, E>>) -> Span\nwhere\n    H: DataSize,\n    E: DataSize,\n{\n    // Note: The jury is still out on whether we want to create a single span per connection and\n    // cache it, or create a new one (with the same connection ID) each time this is called. The\n    // advantage of the former is external tools have it easier correlating all related\n    // information, while the drawback is not being able to change the parent span link, which\n    // might be awkward.\n\n    if let Some(outgoing) = outgoing {\n        match outgoing.state {\n            OutgoingState::Connected { peer_id, .. } => {\n                error_span!(\"outgoing\", %addr, state=%outgoing.state, %peer_id, consensus_key=Empty)\n            }\n            _ => {\n                error_span!(\"outgoing\", %addr, state=%outgoing.state, peer_id=Empty, consensus_key=Empty)\n            }\n        }\n    } else {\n        error_span!(\"outgoing\", %addr, state = \"-\")\n    }\n}\n\nimpl<H, E> OutgoingManager<H, E>\nwhere\n    H: DataSize + Clone,\n    E: DataSize + Error,\n{\n    /// Changes the state of an outgoing connection.\n    ///\n    /// Will trigger an update of the routing table if necessary. Does not emit any other\n    /// side-effects.\n    ///\n    /// Returns the new state, as well as any residual handle.\n    fn change_outgoing_state(\n        &mut self,\n        addr: SocketAddr,\n        mut new_state: OutgoingState<H, E>,\n    ) -> (&mut Outgoing<H, E>, Option<H>) {\n        let (prev_state, new_outgoing) = match self.outgoing.entry(addr) {\n            Entry::Vacant(vacant) => {\n                let inserted = vacant.insert(Outgoing {\n                    state: new_state,\n                    is_unforgettable: false,\n                });\n\n                (None, inserted)\n            }\n\n            Entry::Occupied(occupied) => {\n                let prev = occupied.into_mut();\n\n                mem::swap(&mut prev.state, &mut new_state);\n\n                // `new_state` and `prev.state` are swapped now.\n                (Some(new_state), prev)\n            }\n        };\n\n        // Update the routing table.\n        match (&prev_state, &new_outgoing.state) {\n            (Some(OutgoingState::Connected { .. }), OutgoingState::Connected { .. }) => {\n                trace!(\"route unchanged, already connected\");\n            }\n\n            // Dropping from connected to any other state requires clearing the route.\n            (Some(OutgoingState::Connected { peer_id, .. }), _) => {\n                debug!(%peer_id, \"route removed\");\n                self.routes.remove(peer_id);\n            }\n\n            // Otherwise we have established a new route.\n            (_, OutgoingState::Connected { peer_id, .. }) => {\n                debug!(%peer_id, \"route added\");\n                self.routes.insert(*peer_id, addr);\n            }\n\n            _ => {\n                trace!(\"route unchanged\");\n            }\n        }\n\n        // Update the metrics, decreasing the count of the state that was left, while increasing\n        // the new state. Note that this will lead to a non-atomic dec/inc if the previous state\n        // was the same as before.\n        match prev_state {\n            Some(OutgoingState::Blocked { .. }) => self.metrics.out_state_blocked.dec(),\n            Some(OutgoingState::Connected { .. }) => self.metrics.out_state_connected.dec(),\n            Some(OutgoingState::Connecting { .. }) => self.metrics.out_state_connecting.dec(),\n            Some(OutgoingState::Loopback) => self.metrics.out_state_loopback.dec(),\n            Some(OutgoingState::Waiting { .. }) => self.metrics.out_state_waiting.dec(),\n            None => {\n                // Nothing to do, there was no previous state.\n            }\n        }\n\n        match new_outgoing.state {\n            OutgoingState::Blocked { .. } => self.metrics.out_state_blocked.inc(),\n            OutgoingState::Connected { .. } => self.metrics.out_state_connected.inc(),\n            OutgoingState::Connecting { .. } => self.metrics.out_state_connecting.inc(),\n            OutgoingState::Loopback => self.metrics.out_state_loopback.inc(),\n            OutgoingState::Waiting { .. } => self.metrics.out_state_waiting.inc(),\n        }\n\n        // Finally, deconstruct the previous state in case we need to preserve the handle.\n        let handle = if let Some(OutgoingState::Connected { handle, .. }) = prev_state {\n            Some(handle)\n        } else {\n            None\n        };\n\n        (new_outgoing, handle)\n    }\n\n    /// Retrieves the address by peer.\n    pub(crate) fn get_addr(&self, peer_id: NodeId) -> Option<SocketAddr> {\n        self.routes.get(&peer_id).copied()\n    }\n\n    /// Retrieves a handle to a peer.\n    ///\n    /// Primary function to send data to peers; clients retrieve a handle to it which can then\n    /// be used to send data.\n    pub(crate) fn get_route(&self, peer_id: NodeId) -> Option<&H> {\n        let outgoing = self.outgoing.get(self.routes.get(&peer_id)?)?;\n\n        if let OutgoingState::Connected { ref handle, .. } = outgoing.state {\n            Some(handle)\n        } else {\n            None\n        }\n    }\n\n    /// Iterates over all connected peer IDs.\n    pub(crate) fn connected_peers(&'_ self) -> impl Iterator<Item = NodeId> + '_ {\n        self.routes.keys().copied()\n    }\n\n    /// Notify about a potentially new address that has been discovered.\n    ///\n    /// Immediately triggers the connection process to said address if it was not known before.\n    ///\n    /// A connection marked `unforgettable` will never be evicted but reset instead when it exceeds\n    /// the retry limit.\n    pub(crate) fn learn_addr(\n        &mut self,\n        addr: SocketAddr,\n        unforgettable: bool,\n        now: Instant,\n    ) -> Option<DialRequest<H>> {\n        let span = make_span(addr, self.outgoing.get(&addr));\n        span.clone()\n            .in_scope(move || match self.outgoing.entry(addr) {\n                Entry::Occupied(_) => {\n                    trace!(\"ignoring already known address\");\n                    None\n                }\n                Entry::Vacant(_vacant) => {\n                    info!(\"connecting to newly learned address\");\n                    let (outgoing, _) = self.change_outgoing_state(\n                        addr,\n                        OutgoingState::Connecting {\n                            failures_so_far: 0,\n                            since: now,\n                        },\n                    );\n                    if outgoing.is_unforgettable != unforgettable {\n                        outgoing.is_unforgettable = unforgettable;\n                        debug!(unforgettable, \"marked\");\n                    }\n                    Some(DialRequest::Dial { addr, span })\n                }\n            })\n    }\n\n    pub(crate) fn block_addr<R: Rng>(\n        &mut self,\n        addr: SocketAddr,\n        now: Instant,\n        justification: BlocklistJustification,\n        rng: &mut R,\n    ) -> Option<DialRequest<H>> {\n        let span = make_span(addr, self.outgoing.get(&addr));\n        span.clone()\n            .in_scope(move || match self.outgoing.entry(addr) {\n                Entry::Vacant(_vacant) => {\n                    info!(\"unknown address blocked\");\n                    let until = self.calculate_block_until(now, rng);\n                    self.change_outgoing_state(\n                        addr,\n                        OutgoingState::Blocked {\n                            since: now,\n                            justification,\n                            until,\n                        },\n                    );\n                    None\n                }\n                Entry::Occupied(occupied) => match occupied.get().state {\n                    OutgoingState::Blocked { .. } => {\n                        debug!(\"address already blocked\");\n                        None\n                    }\n                    OutgoingState::Loopback => {\n                        warn!(\"loopback address block ignored\");\n                        None\n                    }\n                    OutgoingState::Connected { ref handle, .. } => {\n                        info!(\"connected address blocked, disconnecting\");\n                        let handle = handle.clone();\n                        let until = self.calculate_block_until(now, rng);\n                        self.change_outgoing_state(\n                            addr,\n                            OutgoingState::Blocked {\n                                since: now,\n                                justification,\n                                until,\n                            },\n                        );\n                        Some(DialRequest::Disconnect { span, handle })\n                    }\n                    OutgoingState::Waiting { .. } | OutgoingState::Connecting { .. } => {\n                        let until = self.calculate_block_until(now, rng);\n                        info!(\"address blocked\");\n                        self.change_outgoing_state(\n                            addr,\n                            OutgoingState::Blocked {\n                                since: now,\n                                justification,\n                                until,\n                            },\n                        );\n                        None\n                    }\n                },\n            })\n    }\n\n    /// Checks if an address is blocked.\n    #[cfg(test)]\n    pub(crate) fn is_blocked(&self, addr: SocketAddr) -> bool {\n        match self.outgoing.get(&addr) {\n            Some(outgoing) => matches!(outgoing.state, OutgoingState::Blocked { .. }),\n            None => false,\n        }\n    }\n\n    /// Removes an address from the block list.\n    ///\n    /// Does nothing if the address was not blocked.\n    // This function is currently not in use by `network` itself.\n    #[allow(dead_code)]\n    pub(crate) fn redeem_addr(&mut self, addr: SocketAddr, now: Instant) -> Option<DialRequest<H>> {\n        let span = make_span(addr, self.outgoing.get(&addr));\n        span.clone()\n            .in_scope(move || match self.outgoing.entry(addr) {\n                Entry::Vacant(_) => {\n                    debug!(\"unknown address redeemed\");\n                    None\n                }\n                Entry::Occupied(occupied) => match occupied.get().state {\n                    OutgoingState::Blocked { .. } => {\n                        self.change_outgoing_state(\n                            addr,\n                            OutgoingState::Connecting {\n                                failures_so_far: 0,\n                                since: now,\n                            },\n                        );\n                        Some(DialRequest::Dial { addr, span })\n                    }\n                    _ => {\n                        debug!(\"address redemption ignored, not blocked\");\n                        None\n                    }\n                },\n            })\n    }\n\n    /// Records a pong being received.\n    pub(super) fn record_pong(&mut self, peer_id: NodeId, pong: TaggedTimestamp) -> bool {\n        let addr = if let Some(addr) = self.routes.get(&peer_id) {\n            *addr\n        } else {\n            debug!(%peer_id, nonce=%pong.nonce(), \"ignoring pong received from peer without route\");\n            return false;\n        };\n\n        if let Some(outgoing) = self.outgoing.get_mut(&addr) {\n            if let OutgoingState::Connected { ref mut health, .. } = outgoing.state {\n                health.record_pong(&self.config.health, pong)\n            } else {\n                debug!(%peer_id, nonce=%pong.nonce(), \"ignoring pong received from peer that is not in connected state\");\n                false\n            }\n        } else {\n            debug!(%peer_id, nonce=%pong.nonce(), \"ignoring pong received from peer without route\");\n            false\n        }\n    }\n\n    /// Performs housekeeping like reconnection or unblocking peers.\n    ///\n    /// This function must periodically be called. A good interval is every second.\n    pub(super) fn perform_housekeeping<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        now: Instant,\n    ) -> Vec<DialRequest<H>> {\n        let mut to_forget = Vec::new();\n        let mut to_fail = Vec::new();\n        let mut to_ping_timeout = Vec::new();\n        let mut to_reconnect = Vec::new();\n        let mut to_ping = Vec::new();\n\n        for (&addr, outgoing) in &mut self.outgoing {\n            // Note: `Span::in_scope` is no longer serviceable here due to borrow limitations.\n            let _span_guard = make_span(addr, Some(outgoing)).entered();\n\n            match outgoing.state {\n                // Decide whether to attempt reconnecting a failed-waiting address.\n                OutgoingState::Waiting {\n                    failures_so_far,\n                    last_failure,\n                    ..\n                } => {\n                    if failures_so_far > self.config.retry_attempts {\n                        if outgoing.is_unforgettable {\n                            // Unforgettable addresses simply have their timer reset.\n                            info!(\"unforgettable address reset\");\n\n                            to_reconnect.push((addr, 0));\n                        } else {\n                            // Address had too many attempts at reconnection, we will forget\n                            // it after exiting this closure.\n                            to_forget.push(addr);\n\n                            info!(\"address forgotten\");\n                        }\n                    } else {\n                        // The address has not exceeded the limit, so check if it is due.\n                        let due = last_failure + self.config.calc_backoff(failures_so_far);\n                        if now >= due {\n                            debug!(attempts = failures_so_far, \"address reconnecting\");\n\n                            to_reconnect.push((addr, failures_so_far));\n                        }\n                    }\n                }\n\n                OutgoingState::Blocked { until, .. } => {\n                    if now >= until {\n                        info!(\"address unblocked\");\n                        to_reconnect.push((addr, 0));\n                    }\n                }\n\n                OutgoingState::Connecting {\n                    since,\n                    failures_so_far,\n                } => {\n                    let timeout = since + self.config.sweep_timeout;\n                    if now >= timeout {\n                        // The outer component has not called us with a `DialOutcome` in a\n                        // reasonable amount of time. This should happen very rarely, ideally\n                        // never.\n                        warn!(\"address timed out connecting, was swept\");\n\n                        // Count the timeout as a failure against the connection.\n                        to_fail.push((addr, failures_so_far + 1));\n                    }\n                }\n                OutgoingState::Connected {\n                    peer_id,\n                    ref mut health,\n                    ..\n                } => {\n                    // Check if we need to send a ping, or give up and disconnect.\n                    let health_outcome = health.update_health(rng, &self.config.health, now);\n\n                    match health_outcome {\n                        HealthCheckOutcome::DoNothing => {\n                            // Nothing to do.\n                        }\n                        HealthCheckOutcome::SendPing(nonce) => {\n                            trace!(%nonce, \"sending ping\");\n                            to_ping.push((peer_id, addr, nonce));\n                        }\n                        HealthCheckOutcome::GiveUp => {\n                            info!(\"disconnecting after ping retries were exhausted\");\n                            to_ping_timeout.push(addr);\n                        }\n                    }\n                }\n                OutgoingState::Loopback => {\n                    // Entry is ignored. Not outputting any `trace` because this is log spam even at\n                    // the `trace` level.\n                }\n            }\n        }\n\n        // Remove all addresses marked for forgetting.\n        for addr in to_forget {\n            self.outgoing.remove(&addr);\n        }\n\n        // Fail connections that are taking way too long to connect.\n        for (addr, failures_so_far) in to_fail {\n            let span = make_span(addr, self.outgoing.get(&addr));\n\n            span.in_scope(|| {\n                self.change_outgoing_state(\n                    addr,\n                    OutgoingState::Waiting {\n                        failures_so_far,\n                        error: None,\n                        last_failure: now,\n                    },\n                )\n            });\n        }\n\n        let mut dial_requests = Vec::new();\n\n        // Request disconnection from failed pings.\n        for addr in to_ping_timeout {\n            let span = make_span(addr, self.outgoing.get(&addr));\n\n            let (_, opt_handle) = span.clone().in_scope(|| {\n                self.change_outgoing_state(\n                    addr,\n                    OutgoingState::Connecting {\n                        failures_so_far: 0,\n                        since: now,\n                    },\n                )\n            });\n\n            if let Some(handle) = opt_handle {\n                dial_requests.push(DialRequest::Disconnect {\n                    handle,\n                    span: span.clone(),\n                });\n            } else {\n                error!(\"did not expect connection under ping timeout to not have a residual connection handle. this is a bug\");\n            }\n            dial_requests.push(DialRequest::Dial { addr, span });\n        }\n\n        // Reconnect others.\n        dial_requests.extend(to_reconnect.into_iter().map(|(addr, failures_so_far)| {\n            let span = make_span(addr, self.outgoing.get(&addr));\n\n            span.clone().in_scope(|| {\n                self.change_outgoing_state(\n                    addr,\n                    OutgoingState::Connecting {\n                        failures_so_far,\n                        since: now,\n                    },\n                )\n            });\n\n            DialRequest::Dial { addr, span }\n        }));\n\n        // Finally, schedule pings.\n        dial_requests.extend(to_ping.into_iter().map(|(peer_id, addr, nonce)| {\n            let span = make_span(addr, self.outgoing.get(&addr));\n            DialRequest::SendPing {\n                peer_id,\n                nonce,\n                span,\n            }\n        }));\n\n        dial_requests\n    }\n\n    /// Handles the outcome of a dialing attempt.\n    ///\n    /// Note that reconnects will earliest happen on the next `perform_housekeeping` call.\n    pub(crate) fn handle_dial_outcome(\n        &mut self,\n        dial_outcome: DialOutcome<H, E>,\n    ) -> Option<DialRequest<H>> {\n        let addr = dial_outcome.addr();\n        let span = make_span(addr, self.outgoing.get(&addr));\n\n        span.clone().in_scope(move || match dial_outcome {\n            DialOutcome::Successful {\n                addr,\n                handle,\n                node_id,\n                when\n            } => {\n                info!(\"established outgoing connection\");\n\n                if let Some(Outgoing{\n                    state: OutgoingState::Blocked { .. }, ..\n                }) = self.outgoing.get(&addr) {\n                    // If we connected to a blocked address, do not go into connected, but stay\n                    // blocked instead.\n                    Some(DialRequest::Disconnect{\n                        handle, span\n                    })\n                } else {\n                    // Otherwise, just record the connected state.\n                    self.change_outgoing_state(\n                        addr,\n                        OutgoingState::Connected {\n                            peer_id: node_id,\n                            handle,\n                            health: ConnectionHealth::new(when),\n                        },\n                    );\n                    None\n                }\n            }\n\n            DialOutcome::Failed { addr, error, when } => {\n                info!(err = display_error(&error), \"outgoing connection failed\");\n\n                if let Some(outgoing) = self.outgoing.get(&addr) {\n                    match outgoing.state {\n                        OutgoingState::Connecting { failures_so_far,.. } => {\n                            self.change_outgoing_state(\n                                addr,\n                                OutgoingState::Waiting {\n                                    failures_so_far: failures_so_far + 1,\n                                    error: Some(error),\n                                    last_failure: when,\n                                },\n                            );\n                            None\n                        }\n                        OutgoingState::Blocked { .. } => {\n                            debug!(\"failed dial outcome after block ignored\");\n\n                            // We do not set the connection to \"waiting\" if an out-of-order failed\n                            // connection arrives, but continue to honor the blocking.\n                            None\n                        }\n                        OutgoingState::Waiting { .. } |\n                        OutgoingState::Connected { .. } |\n                        OutgoingState::Loopback => {\n                            warn!(\n                                \"processing dial outcome on a connection that was not marked as connecting or blocked\"\n                            );\n\n                            None\n                        }\n                    }\n                } else {\n                    warn!(\"processing dial outcome non-existent connection\");\n\n                    // If the connection does not exist, do not introduce it!\n                    None\n                }\n            }\n            DialOutcome::Loopback { addr } => {\n                info!(\"found loopback address\");\n                self.change_outgoing_state(addr, OutgoingState::Loopback);\n                None\n            }\n        })\n    }\n\n    /// Notifies the connection manager about a dropped connection.\n    ///\n    /// This will usually result in an immediate reconnection.\n    pub(crate) fn handle_connection_drop(\n        &mut self,\n        addr: SocketAddr,\n        now: Instant,\n    ) -> Option<DialRequest<H>> {\n        let span = make_span(addr, self.outgoing.get(&addr));\n\n        span.clone().in_scope(move || {\n            if let Some(outgoing) = self.outgoing.get(&addr) {\n                match outgoing.state {\n                    OutgoingState::Waiting { .. }\n                    | OutgoingState::Loopback\n                    | OutgoingState::Connecting { .. } => {\n                        // We should, under normal circumstances, not receive drop notifications for\n                        // any of these. Connection failures are handled by the dialer.\n                        warn!(\"unexpected drop notification\");\n                        None\n                    }\n                    OutgoingState::Connected { .. } => {\n                        // Drop the handle, immediately initiate a reconnection.\n                        self.change_outgoing_state(\n                            addr,\n                            OutgoingState::Connecting {\n                                failures_so_far: 0,\n                                since: now,\n                            },\n                        );\n                        Some(DialRequest::Dial { addr, span })\n                    }\n                    OutgoingState::Blocked { .. } => {\n                        // Blocked addresses ignore connection drops.\n                        debug!(\"received drop notification for blocked connection\");\n                        None\n                    }\n                }\n            } else {\n                warn!(\"received connection drop notification for unknown connection\");\n                None\n            }\n        })\n    }\n\n    fn calculate_block_until<R: Rng>(&self, now: Instant, rng: &mut R) -> Instant {\n        let min = self.config.unblock_after_min;\n        let max = self.config.unblock_after_max;\n        if min == max {\n            return now + min;\n        }\n        let block_duration = rng.gen_range(min..=max);\n        now + block_duration\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{net::SocketAddr, time::Duration};\n\n    use assert_matches::assert_matches;\n    use datasize::DataSize;\n    use rand::Rng;\n    use thiserror::Error;\n\n    use super::{DialOutcome, DialRequest, NodeId, OutgoingConfig, OutgoingManager};\n    use crate::{\n        components::network::{\n            blocklist::BlocklistJustification,\n            health::{HealthConfig, TaggedTimestamp},\n        },\n        testing::{init_logging, test_clock::TestClock},\n    };\n\n    /// Error for test dialer.\n    ///\n    /// Tracks a configurable id for the error.\n    #[derive(DataSize, Debug, Error)]\n    #[error(\"test dialer error({})\", id)]\n    struct TestDialerError {\n        id: u32,\n    }\n\n    /// Setup an outgoing configuration for testing.\n    fn test_config() -> OutgoingConfig {\n        OutgoingConfig {\n            retry_attempts: 3,\n            base_timeout: Duration::from_secs(1),\n            unblock_after_min: Duration::from_secs(60),\n            unblock_after_max: Duration::from_secs(60),\n            sweep_timeout: Duration::from_secs(45),\n            health: HealthConfig::test_config(),\n        }\n    }\n\n    /// Setup an outgoing configuration for testing.\n    fn config_variant_unblock() -> OutgoingConfig {\n        OutgoingConfig {\n            retry_attempts: 3,\n            base_timeout: Duration::from_secs(1),\n            unblock_after_min: Duration::from_secs(60),\n            unblock_after_max: Duration::from_secs(80),\n            sweep_timeout: Duration::from_secs(45),\n            health: HealthConfig::test_config(),\n        }\n    }\n\n    /// Helper function that checks if a given dial request actually dials the expected address.\n    fn dials<'a, H, T>(expected: SocketAddr, requests: T) -> bool\n    where\n        T: IntoIterator<Item = &'a DialRequest<H>> + 'a,\n        H: 'a,\n    {\n        for req in requests.into_iter() {\n            if let DialRequest::Dial { addr, .. } = req {\n                if *addr == expected {\n                    return true;\n                }\n            }\n        }\n\n        false\n    }\n\n    /// Helper function that checks if a given dial request actually disconnects the expected\n    /// address.\n    fn disconnects<'a, H, T>(expected: H, requests: T) -> bool\n    where\n        T: IntoIterator<Item = &'a DialRequest<H>> + 'a,\n        H: 'a + PartialEq,\n    {\n        for req in requests.into_iter() {\n            if let DialRequest::Disconnect { handle, .. } = req {\n                if *handle == expected {\n                    return true;\n                }\n            }\n        }\n\n        false\n    }\n\n    #[test]\n    fn successful_lifecycle() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        let id_a = NodeId::random(&mut rng);\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        // We begin by learning a single, regular address, triggering a dial request.\n        assert!(dials(\n            addr_a,\n            &manager.learn_addr(addr_a, false, clock.now())\n        ));\n        assert_eq!(manager.metrics().out_state_connecting.get(), 1);\n\n        // Our first connection attempt fails. The connection should now be in waiting state, but\n        // not reconnect, since the minimum delay is 2 seconds (2*base_timeout).\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_a,\n                error: TestDialerError { id: 1 },\n                when: clock.now(),\n            },)\n            .is_none());\n        assert_eq!(manager.metrics().out_state_connecting.get(), 0);\n        assert_eq!(manager.metrics().out_state_waiting.get(), 1);\n\n        // Performing housekeeping multiple times should not make a difference.\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Advancing the clock will trigger a reconnection on the next housekeeping.\n        clock.advance_time(2_000);\n        assert!(dials(\n            addr_a,\n            &manager.perform_housekeeping(&mut rng, clock.now())\n        ));\n        assert_eq!(manager.metrics().out_state_connecting.get(), 1);\n        assert_eq!(manager.metrics().out_state_waiting.get(), 0);\n\n        // This time the connection succeeds.\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_a,\n                handle: 99,\n                node_id: id_a,\n                when: clock.now(),\n            },)\n            .is_none());\n        assert_eq!(manager.metrics().out_state_connecting.get(), 0);\n        assert_eq!(manager.metrics().out_state_connected.get(), 1);\n\n        // The routing table should have been updated and should return the handle.\n        assert_eq!(manager.get_route(id_a), Some(&99));\n        assert_eq!(manager.get_addr(id_a), Some(addr_a));\n\n        // Time passes, and our connection drops. Reconnecting should be immediate.\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n        clock.advance_time(20_000);\n        assert!(dials(\n            addr_a,\n            &manager.handle_connection_drop(addr_a, clock.now())\n        ));\n        assert_eq!(manager.metrics().out_state_connecting.get(), 1);\n        assert_eq!(manager.metrics().out_state_waiting.get(), 0);\n\n        // The route should have been cleared.\n        assert!(manager.get_route(id_a).is_none());\n        assert!(manager.get_addr(id_a).is_none());\n\n        // Reconnection is already in progress, so we do not expect another request on housekeeping.\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n    }\n\n    #[test]\n    fn connections_forgotten_after_too_many_tries() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        // Address `addr_b` will be a known address.\n        let addr_b: SocketAddr = \"5.6.7.8:5678\".parse().unwrap();\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        // First, attempt to connect. Tests are set to 3 retries after 2, 4 and 8 seconds.\n        assert!(dials(\n            addr_a,\n            &manager.learn_addr(addr_a, false, clock.now())\n        ));\n        assert!(dials(\n            addr_b,\n            &manager.learn_addr(addr_b, true, clock.now())\n        ));\n\n        // Fail the first connection attempts, not triggering a retry (timeout not reached yet).\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_a,\n                error: TestDialerError { id: 10 },\n                when: clock.now(),\n            },)\n            .is_none());\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_b,\n                error: TestDialerError { id: 11 },\n                when: clock.now(),\n            },)\n            .is_none());\n\n        // Learning the address again should not cause a reconnection.\n        assert!(manager.learn_addr(addr_a, false, clock.now()).is_none());\n        assert!(manager.learn_addr(addr_b, false, clock.now()).is_none());\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n        assert!(manager.learn_addr(addr_a, false, clock.now()).is_none());\n        assert!(manager.learn_addr(addr_b, false, clock.now()).is_none());\n\n        // After 1.999 seconds, reconnection should still be delayed.\n        clock.advance_time(1_999);\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Adding 0.001 seconds finally is enough to reconnect.\n        clock.advance_time(1);\n        let requests = manager.perform_housekeeping(&mut rng, clock.now());\n        assert!(dials(addr_a, &requests));\n        assert!(dials(addr_b, &requests));\n\n        // Waiting for more than the reconnection delay should not be harmful or change\n        // anything, as  we are currently connecting.\n        clock.advance_time(6_000);\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Fail the connection again, wait 3.999 seconds, expecting no reconnection.\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_a,\n                error: TestDialerError { id: 40 },\n                when: clock.now(),\n            },)\n            .is_none());\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_b,\n                error: TestDialerError { id: 41 },\n                when: clock.now(),\n            },)\n            .is_none());\n\n        clock.advance_time(3_999);\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Adding 0.001 seconds finally again pushes us over the threshold.\n        clock.advance_time(1);\n        let requests = manager.perform_housekeeping(&mut rng, clock.now());\n        assert!(dials(addr_a, &requests));\n        assert!(dials(addr_b, &requests));\n\n        // Fail the connection quickly.\n        clock.advance_time(25);\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_a,\n                error: TestDialerError { id: 10 },\n                when: clock.now(),\n            },)\n            .is_none());\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_b,\n                error: TestDialerError { id: 10 },\n                when: clock.now(),\n            },)\n            .is_none());\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // The last attempt should happen 8 seconds after the error, not the last attempt.\n        clock.advance_time(7_999);\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        clock.advance_time(1);\n        let requests = manager.perform_housekeeping(&mut rng, clock.now());\n        assert!(dials(addr_a, &requests));\n        assert!(dials(addr_b, &requests));\n\n        // Fail the last attempt. No more reconnections should be happening.\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_a,\n                error: TestDialerError { id: 10 },\n                when: clock.now(),\n            },)\n            .is_none());\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_b,\n                error: TestDialerError { id: 10 },\n                when: clock.now(),\n            },)\n            .is_none());\n\n        // Only the unforgettable address should be reconnecting.\n        let requests = manager.perform_housekeeping(&mut rng, clock.now());\n        assert!(!dials(addr_a, &requests));\n        assert!(dials(addr_b, &requests));\n\n        // But not `addr_a`, even after a long wait.\n        clock.advance_time(1_000_000_000);\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n    }\n\n    #[test]\n    fn blocking_works() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        // We use `addr_b` as an unforgettable address, which does not mean it cannot be blocked!\n        let addr_b: SocketAddr = \"5.6.7.8:5678\".parse().unwrap();\n        let addr_c: SocketAddr = \"9.0.1.2:9012\".parse().unwrap();\n        let id_a = NodeId::random(&mut rng);\n        let id_b = NodeId::random(&mut rng);\n        let id_c = NodeId::random(&mut rng);\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        // Block `addr_a` from the start.\n        assert!(manager\n            .block_addr(\n                addr_a,\n                clock.now(),\n                BlocklistJustification::MissingChainspecHash,\n                &mut rng,\n            )\n            .is_none());\n\n        // Learning both `addr_a` and `addr_b` should only trigger a connection to `addr_b` now.\n        assert!(manager.learn_addr(addr_a, false, clock.now()).is_none());\n        assert!(dials(\n            addr_b,\n            &manager.learn_addr(addr_b, true, clock.now())\n        ));\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Fifteen seconds later we succeed in connecting to `addr_b`.\n        clock.advance_time(15_000);\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_b,\n                handle: 101,\n                node_id: id_b,\n                when: clock.now(),\n            },)\n            .is_none());\n        assert_eq!(manager.get_route(id_b), Some(&101));\n\n        // Invariant through housekeeping.\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        assert_eq!(manager.get_route(id_b), Some(&101));\n\n        // Another fifteen seconds later, we block `addr_b`.\n        clock.advance_time(15_000);\n        assert!(disconnects(\n            101,\n            &manager.block_addr(\n                addr_b,\n                clock.now(),\n                BlocklistJustification::MissingChainspecHash,\n                &mut rng,\n            )\n        ));\n\n        // `addr_c` will be blocked during the connection phase.\n        assert!(dials(\n            addr_c,\n            &manager.learn_addr(addr_c, false, clock.now())\n        ));\n        assert!(manager\n            .block_addr(\n                addr_c,\n                clock.now(),\n                BlocklistJustification::MissingChainspecHash,\n                &mut rng,\n            )\n            .is_none());\n\n        // We are still expect to provide a dial outcome, but afterwards, there should be no\n        // route to C and an immediate disconnection should be queued.\n        assert!(disconnects(\n            42,\n            &manager.handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_c,\n                handle: 42,\n                node_id: id_c,\n                when: clock.now(),\n            },)\n        ));\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        assert!(manager.get_route(id_c).is_none());\n\n        // At this point, we have blocked all three addresses. 30 seconds later, the first one is\n        // unblocked due to the block timing out.\n\n        clock.advance_time(30_000);\n        assert!(dials(\n            addr_a,\n            &manager.perform_housekeeping(&mut rng, clock.now())\n        ));\n\n        // Fifteen seconds later, B and C are still blocked, but we redeem B early.\n        clock.advance_time(15_000);\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        assert!(dials(addr_b, &manager.redeem_addr(addr_b, clock.now())));\n\n        // Succeed both connections, and ensure we have routes to both.\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_b,\n                handle: 77,\n                node_id: id_b,\n                when: clock.now(),\n            },)\n            .is_none());\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_a,\n                handle: 66,\n                node_id: id_a,\n                when: clock.now(),\n            },)\n            .is_none());\n\n        assert_eq!(manager.get_route(id_a), Some(&66));\n        assert_eq!(manager.get_route(id_b), Some(&77));\n    }\n\n    #[test]\n    fn loopback_handled_correctly() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let loopback_addr: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        // Loopback addresses are connected to only once, and then marked as loopback forever.\n        assert!(dials(\n            loopback_addr,\n            &manager.learn_addr(loopback_addr, false, clock.now())\n        ));\n\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Loopback {\n                addr: loopback_addr,\n            },)\n            .is_none());\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Learning loopbacks again should not trigger another connection\n        assert!(manager\n            .learn_addr(loopback_addr, false, clock.now())\n            .is_none());\n\n        // Blocking loopbacks does not result in a block, since regular blocks would clear after\n        // some time.\n        assert!(manager\n            .block_addr(\n                loopback_addr,\n                clock.now(),\n                BlocklistJustification::MissingChainspecHash,\n                &mut rng,\n            )\n            .is_none());\n\n        clock.advance_time(1_000_000_000);\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n    }\n\n    #[test]\n    fn connected_peers_works() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let clock = TestClock::new();\n\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        let addr_b: SocketAddr = \"5.6.7.8:5678\".parse().unwrap();\n\n        let id_a = NodeId::random(&mut rng);\n        let id_b = NodeId::random(&mut rng);\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        manager.learn_addr(addr_a, false, clock.now());\n        manager.learn_addr(addr_b, true, clock.now());\n\n        manager.handle_dial_outcome(DialOutcome::Successful {\n            addr: addr_a,\n            handle: 22,\n            node_id: id_a,\n            when: clock.now(),\n        });\n        manager.handle_dial_outcome(DialOutcome::Successful {\n            addr: addr_b,\n            handle: 33,\n            node_id: id_b,\n            when: clock.now(),\n        });\n\n        let mut peer_ids: Vec<_> = manager.connected_peers().collect();\n        let mut expected = vec![id_a, id_b];\n\n        peer_ids.sort();\n        expected.sort();\n\n        assert_eq!(peer_ids, expected);\n    }\n\n    #[test]\n    fn sweeping_works() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n\n        let id_a = NodeId::random(&mut rng);\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        // Trigger a new connection via learning an address.\n        assert!(dials(\n            addr_a,\n            &manager.learn_addr(addr_a, false, clock.now())\n        ));\n\n        // We now let enough time pass to cause the connection to be considered failed aborted.\n        // No effects are expected at this point.\n        clock.advance_time(50_000);\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // The connection will now experience a regular failure. Since this is the first connection\n        // failure, it should reconnect after 2 seconds.\n        clock.advance_time(2_000);\n        assert!(dials(\n            addr_a,\n            &manager.perform_housekeeping(&mut rng, clock.now())\n        ));\n\n        // We now simulate the second connection (`handle: 2`) succeeding first, after 1 second.\n        clock.advance_time(1_000);\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_a,\n                handle: 2,\n                node_id: id_a,\n                when: clock.now(),\n            })\n            .is_none());\n\n        // A route should now be established.\n        assert_eq!(manager.get_route(id_a), Some(&2));\n\n        // More time passes and the first connection attempt finally finishes.\n        clock.advance_time(30_000);\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr: addr_a,\n                handle: 1,\n                node_id: id_a,\n                when: clock.now(),\n            })\n            .is_none());\n\n        // We now expect to be connected through the first connection (see documentation).\n        assert_eq!(manager.get_route(id_a), Some(&1));\n    }\n\n    #[test]\n    fn blocking_not_overridden_by_racing_failed_connections() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        assert!(!manager.is_blocked(addr_a));\n\n        // Block `addr_a` from the start.\n        assert!(manager\n            .block_addr(\n                addr_a,\n                clock.now(),\n                BlocklistJustification::MissingChainspecHash,\n                &mut rng,\n            )\n            .is_none());\n        assert!(manager.is_blocked(addr_a));\n\n        clock.advance_time(60);\n\n        // Receive an \"illegal\" dial outcome, even though we did not dial.\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Failed {\n                addr: addr_a,\n                error: TestDialerError { id: 12345 },\n\n                // The moment the connection attempt failed.\n                when: clock.now(),\n            })\n            .is_none());\n\n        // The failed connection should _not_ have reset the block!\n        assert!(manager.is_blocked(addr_a));\n        clock.advance_time(60);\n        assert!(manager.is_blocked(addr_a));\n\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n        assert!(manager.is_blocked(addr_a));\n    }\n\n    #[test]\n    fn emits_and_accepts_pings() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        let id = NodeId::random(&mut rng);\n\n        // Setup a connection and put it into the connected state.\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        // Trigger a new connection via learning an address.\n        assert!(dials(addr, &manager.learn_addr(addr, false, clock.now())));\n\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr,\n                handle: 1,\n                node_id: id,\n                when: clock.now(),\n            })\n            .is_none());\n\n        // Initial housekeeping should do nothing.\n        assert!(manager\n            .perform_housekeeping(&mut rng, clock.now())\n            .is_empty());\n\n        // Go through 50 pings, which should be happening every 5 seconds.\n        for _ in 0..50 {\n            clock.advance(Duration::from_secs(3));\n            assert!(manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .is_empty());\n            clock.advance(Duration::from_secs(2));\n\n            let (_first_nonce, peer_id) = assert_matches!(\n                manager\n                    .perform_housekeeping(&mut rng, clock.now())\n                    .as_slice(),\n                &[DialRequest::SendPing { nonce, peer_id, ..  }] => (nonce, peer_id)\n            );\n            assert_eq!(peer_id, id);\n\n            // After a second, nothing should have changed.\n            assert!(manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .is_empty());\n\n            clock.advance(Duration::from_secs(1));\n            // Waiting another second (two in total) should trigger another ping.\n            clock.advance(Duration::from_secs(1));\n\n            let (second_nonce, peer_id) = assert_matches!(\n                manager\n                    .perform_housekeeping(&mut rng, clock.now())\n                    .as_slice(),\n                &[DialRequest::SendPing { nonce, peer_id, ..  }] => (nonce, peer_id)\n            );\n\n            // Ensure the ID is correct.\n            assert_eq!(peer_id, id);\n\n            // Pong arrives 1 second later.\n            clock.advance(Duration::from_secs(1));\n\n            // We now feed back the ping with the correct nonce. This should not result in a ban.\n            assert!(!manager.record_pong(\n                peer_id,\n                TaggedTimestamp::from_parts(clock.now(), second_nonce),\n            ));\n\n            // This resets the \"cycle\", the next ping is due in 5 seconds.\n        }\n\n        // Now we are going to miss 4 pings in a row and expect a disconnect.\n        clock.advance(Duration::from_secs(5));\n        assert_matches!(\n            manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .as_slice(),\n            &[DialRequest::SendPing { .. }]\n        );\n        clock.advance(Duration::from_secs(2));\n        assert_matches!(\n            manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .as_slice(),\n            &[DialRequest::SendPing { .. }]\n        );\n        clock.advance(Duration::from_secs(2));\n        assert_matches!(\n            manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .as_slice(),\n            &[DialRequest::SendPing { .. }]\n        );\n        clock.advance(Duration::from_secs(2));\n        assert_matches!(\n            manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .as_slice(),\n            &[DialRequest::SendPing { .. }]\n        );\n\n        // This results in a disconnect, followed by a reconnect.\n        clock.advance(Duration::from_secs(2));\n        let dial_addr = assert_matches!(\n            manager\n                .perform_housekeeping(&mut rng, clock.now())\n                .as_slice(),\n            &[DialRequest::Disconnect { .. }, DialRequest::Dial { addr, .. }] => addr\n        );\n\n        assert_eq!(dial_addr, addr);\n    }\n\n    #[test]\n    fn indicates_issue_when_excessive_pongs_are_encountered() {\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n\n        let addr: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        let id = NodeId::random(&mut rng);\n\n        // Ensure we have one connected node.\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(test_config());\n\n        assert!(dials(addr, &manager.learn_addr(addr, false, clock.now())));\n        assert!(manager\n            .handle_dial_outcome(DialOutcome::Successful {\n                addr,\n                handle: 1,\n                node_id: id,\n                when: clock.now(),\n            })\n            .is_none());\n\n        clock.advance(Duration::from_millis(50));\n\n        // We can now receive excessive pongs.\n        assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n        assert!(manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen())));\n    }\n\n    #[test]\n    fn unblocking_in_variant_block_time() {\n        init_logging();\n\n        let mut rng = crate::new_rng();\n        let mut clock = TestClock::new();\n        let addr_a: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        let mut manager = OutgoingManager::<u32, TestDialerError>::new(config_variant_unblock());\n\n        assert!(!manager.is_blocked(addr_a));\n\n        // Block `addr_a` from the start.\n        assert!(manager\n            .block_addr(\n                addr_a,\n                clock.now(),\n                BlocklistJustification::MissingChainspecHash,\n                &mut rng,\n            )\n            .is_none());\n        assert!(manager.is_blocked(addr_a));\n\n        clock.advance_time(config_variant_unblock().unblock_after_max.as_millis() as u64 + 1);\n        assert!(dials(\n            addr_a,\n            &manager.perform_housekeeping(&mut rng, clock.now())\n        ));\n        assert!(!manager.is_blocked(addr_a));\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/symmetry.rs",
    "content": "//! Connection symmetry management.\n//!\n//! Tracks the state of connections, which may be uni- or bi-directional, depending on whether a\n//! peer has connected back to us. Asymmetric connections are usually removed periodically.\n\nuse std::{collections::BTreeSet, mem, net::SocketAddr, time::Instant};\n\nuse datasize::DataSize;\nuse tracing::{debug, warn};\n\n/// Describes whether a connection is uni- or bi-directional.\n#[derive(DataSize, Debug, Default)]\npub(super) enum ConnectionSymmetry {\n    /// We have only seen an incoming connection.\n    IncomingOnly {\n        /// Time this connection remained incoming only.\n        since: Instant,\n        /// The outgoing address of the peer that is connected to us.\n        peer_addrs: BTreeSet<SocketAddr>,\n    },\n    /// We have only seen an outgoing connection.\n    OutgoingOnly {\n        /// Time this connection remained outgoing only.\n        since: Instant,\n    },\n    /// The connection is fully symmetric.\n    Symmetric {\n        /// The outgoing address on the peer that is connected to us.\n        peer_addrs: BTreeSet<SocketAddr>,\n    },\n    /// The connection is invalid/missing and should be removed.\n    #[default]\n    Gone,\n}\n\nimpl ConnectionSymmetry {\n    /// A new incoming connection has been registered.\n    ///\n    /// Returns true, if the connection achieved symmetry with this change.\n    pub(super) fn add_incoming(&mut self, peer_addr: SocketAddr, since: Instant) -> bool {\n        match self {\n            ConnectionSymmetry::IncomingOnly {\n                ref mut peer_addrs, ..\n            } => {\n                // Already incoming connection, just add it to the pile.\n                peer_addrs.insert(peer_addr);\n                debug!(\n                    total_incoming_count = peer_addrs.len(),\n                    \"added additional incoming connection on non-symmetric\"\n                );\n                false\n            }\n            ConnectionSymmetry::OutgoingOnly { .. } => {\n                // Outgoing graduates to Symmetric when we receive an incoming connection.\n                let mut peer_addrs = BTreeSet::new();\n                peer_addrs.insert(peer_addr);\n                *self = ConnectionSymmetry::Symmetric { peer_addrs };\n                debug!(\"added incoming connection, now symmetric\");\n                true\n            }\n            ConnectionSymmetry::Symmetric { peer_addrs } => {\n                // Just record an additional incoming connection.\n                peer_addrs.insert(peer_addr);\n                debug!(\n                    total_incoming_count = peer_addrs.len(),\n                    \"added additional incoming connection on symmetric\"\n                );\n                false\n            }\n            ConnectionSymmetry::Gone => {\n                let mut peer_addrs = BTreeSet::new();\n                peer_addrs.insert(peer_addr);\n                *self = ConnectionSymmetry::IncomingOnly { peer_addrs, since };\n                debug!(\"added incoming connection, now incoming only\");\n                false\n            }\n        }\n    }\n\n    /// An incoming address has been removed.\n    ///\n    /// Returns `false` if the `ConnectionSymmetry` should be removed after this.\n    pub(super) fn remove_incoming(&mut self, peer_addr: SocketAddr, now: Instant) -> bool {\n        match self {\n            ConnectionSymmetry::IncomingOnly { peer_addrs, .. } => {\n                // Remove the incoming connection, warn if it didn't exist.\n                if !peer_addrs.remove(&peer_addr) {\n                    warn!(\"tried to remove non-existent incoming connection from symmetry\");\n                }\n\n                // Indicate removal if this was the last incoming connection.\n                if peer_addrs.is_empty() {\n                    *self = ConnectionSymmetry::Gone;\n                    debug!(\"removed incoming connection, now gone\");\n\n                    false\n                } else {\n                    debug!(\n                        total_incoming_count = peer_addrs.len(),\n                        \"removed incoming connection, still has remaining incoming\"\n                    );\n\n                    true\n                }\n            }\n            ConnectionSymmetry::OutgoingOnly { .. } => {\n                warn!(\"cannot remove incoming connection from outgoing-only\");\n                true\n            }\n            ConnectionSymmetry::Symmetric { peer_addrs } => {\n                if !peer_addrs.remove(&peer_addr) {\n                    warn!(\"tried to remove non-existent symmetric connection from symmetry\");\n                }\n                if peer_addrs.is_empty() {\n                    *self = ConnectionSymmetry::OutgoingOnly { since: now };\n                    debug!(\"removed incoming connection, now incoming-only\");\n                }\n                true\n            }\n            ConnectionSymmetry::Gone => {\n                // This is just an error.\n                warn!(\"removing incoming connection from already gone symmetry\");\n                false\n            }\n        }\n    }\n\n    /// Marks a connection as having an outgoing connection.\n    ///\n    /// Returns true, if the connection achieved symmetry with this change.\n    pub(super) fn mark_outgoing(&mut self, now: Instant) -> bool {\n        match self {\n            ConnectionSymmetry::IncomingOnly { peer_addrs, .. } => {\n                // Connection is now complete.\n                debug!(\"incoming connection marked outgoing, now complete\");\n                *self = ConnectionSymmetry::Symmetric {\n                    peer_addrs: mem::take(peer_addrs),\n                };\n                true\n            }\n            ConnectionSymmetry::OutgoingOnly { .. } => {\n                warn!(\"outgoing connection marked outgoing\");\n                false\n            }\n            ConnectionSymmetry::Symmetric { .. } => {\n                warn!(\"symmetric connection marked outgoing\");\n                false\n            }\n            ConnectionSymmetry::Gone => {\n                *self = ConnectionSymmetry::OutgoingOnly { since: now };\n                debug!(\"absent connection marked outgoing\");\n                false\n            }\n        }\n    }\n\n    /// Unmarks a connection as having an outgoing connection.\n    ///\n    /// Returns `false` if the `ConnectionSymmetry` should be removed after this.\n    pub(super) fn unmark_outgoing(&mut self, now: Instant) -> bool {\n        match self {\n            ConnectionSymmetry::IncomingOnly { .. } => {\n                warn!(\"incoming-only unmarked outgoing\");\n                true\n            }\n            ConnectionSymmetry::OutgoingOnly { .. } => {\n                // With neither incoming, nor outgoing connections, the symmetry is finally gone.\n                *self = ConnectionSymmetry::Gone;\n                debug!(\"outgoing connection unmarked, now gone\");\n\n                false\n            }\n            ConnectionSymmetry::Symmetric { peer_addrs } => {\n                *self = ConnectionSymmetry::IncomingOnly {\n                    peer_addrs: mem::take(peer_addrs),\n                    since: now,\n                };\n                debug!(\"symmetric connection unmarked, now outgoing only\");\n\n                true\n            }\n            ConnectionSymmetry::Gone => {\n                warn!(\"gone marked outgoing\");\n                false\n            }\n        }\n    }\n\n    /// Returns the set of incoming addresses, if any.\n    pub(super) fn incoming_addrs(&self) -> Option<&BTreeSet<SocketAddr>> {\n        match self {\n            ConnectionSymmetry::IncomingOnly { peer_addrs, .. }\n            | ConnectionSymmetry::Symmetric { peer_addrs, .. } => Some(peer_addrs),\n            ConnectionSymmetry::OutgoingOnly { .. } | ConnectionSymmetry::Gone => None,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{\n        collections::BTreeSet,\n        net::SocketAddr,\n        time::{Duration, Instant},\n    };\n\n    use crate::testing::test_clock::TestClock;\n\n    use super::ConnectionSymmetry;\n\n    /// Indicates whether or not a connection should be cleaned up.\n    fn should_be_reaped(\n        connection_symmetry: &ConnectionSymmetry,\n        now: Instant,\n        max_time_asymmetric: Duration,\n    ) -> bool {\n        match connection_symmetry {\n            ConnectionSymmetry::IncomingOnly { since, .. } => now >= *since + max_time_asymmetric,\n            ConnectionSymmetry::OutgoingOnly { since } => now >= *since + max_time_asymmetric,\n            ConnectionSymmetry::Symmetric { .. } => false,\n            ConnectionSymmetry::Gone => true,\n        }\n    }\n\n    #[test]\n    fn symmetry_successful_lifecycles() {\n        let mut clock = TestClock::new();\n\n        let max_time_asymmetric = Duration::from_secs(240);\n        let peer_addr: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n\n        let mut sym = ConnectionSymmetry::default();\n\n        // Symmetries that have just been initialized are always reaped instantly.\n        assert!(should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n\n        // Adding an incoming address.\n        sym.add_incoming(peer_addr, clock.now());\n        assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n\n        // Add an outgoing address.\n        clock.advance(Duration::from_secs(20));\n        sym.mark_outgoing(clock.now());\n\n        // The connection will now never be reaped, as it is symmetrical.\n        clock.advance(Duration::from_secs(1_000_000));\n        assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n    }\n\n    #[test]\n    fn symmetry_lifecycle_reaps_incoming_only() {\n        let mut clock = TestClock::new();\n\n        let max_time_asymmetric = Duration::from_secs(240);\n        let peer_addr: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n        let peer_addr2: SocketAddr = \"1.2.3.4:1234\".parse().unwrap();\n\n        let mut sym = ConnectionSymmetry::default();\n\n        // Adding an incoming address prevents it from being reaped.\n        sym.add_incoming(peer_addr, clock.now());\n        assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n\n        // Adding another incoming address does not change the timeout.\n        clock.advance(Duration::from_secs(120));\n        sym.add_incoming(peer_addr2, clock.now());\n        assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n\n        // We also expected `peer_addr` and `peer_addr2` to be the incoming addresses now.\n        let mut expected = BTreeSet::new();\n        expected.insert(peer_addr);\n        expected.insert(peer_addr2);\n        assert_eq!(sym.incoming_addrs(), Some(&expected));\n\n        // After 240 seconds since the first incoming connection, we finally are due reaping.\n        clock.advance(Duration::from_secs(120));\n        assert!(should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n    }\n\n    #[test]\n    fn symmetry_lifecycle_reaps_outgoing_only() {\n        let mut clock = TestClock::new();\n\n        let max_time_asymmetric = Duration::from_secs(240);\n\n        let mut sym = ConnectionSymmetry::default();\n\n        // Mark as outgoing, to prevent reaping.\n        sym.mark_outgoing(clock.now());\n        assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n\n        // Marking as outgoing again is usually an error, but should not affect the timeout.\n        clock.advance(Duration::from_secs(120));\n        assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n\n        // After 240 seconds we finally are reaping.\n        clock.advance(Duration::from_secs(120));\n        assert!(should_be_reaped(&sym, clock.now(), max_time_asymmetric));\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/tasks.rs",
    "content": "//! Tasks run by the component.\n\nuse std::{\n    error::Error as StdError,\n    fmt::Display,\n    io,\n    net::SocketAddr,\n    pin::Pin,\n    sync::{\n        atomic::{AtomicBool, Ordering},\n        Arc, Weak,\n    },\n    time::Duration,\n};\n\nuse bincode::Options;\nuse futures::{\n    future::{self, Either},\n    stream::{SplitSink, SplitStream},\n    Future, SinkExt, StreamExt,\n};\nuse openssl::{\n    pkey::{PKey, Private},\n    ssl::Ssl,\n    x509::X509,\n};\nuse prometheus::IntGauge;\nuse rand::Rng;\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\nuse tokio::{\n    net::TcpStream,\n    sync::{mpsc::UnboundedReceiver, watch, Semaphore},\n};\nuse tokio_openssl::SslStream;\nuse tokio_serde::{Deserializer, Serializer};\nuse tracing::{\n    debug, error, error_span,\n    field::{self, Empty},\n    info, trace, warn, Instrument, Span,\n};\n\nuse casper_types::{ProtocolVersion, PublicKey, TimeDiff};\n\nuse super::{\n    chain_info::ChainInfo,\n    counting_format::{ConnectionId, Role},\n    error::{ConnectionError, IoError},\n    event::{IncomingConnection, OutgoingConnection},\n    full_transport,\n    limiter::LimiterHandle,\n    message::NodeKeyPair,\n    message_pack_format::MessagePackFormat,\n    EstimatorWeights, Event, FramedTransport, FullTransport, Identity, Message, Metrics, Payload,\n    Transport,\n};\nuse crate::{\n    components::network::{framed_transport, BincodeFormat, Config, FromIncoming},\n    effect::{\n        announcements::PeerBehaviorAnnouncement, requests::NetworkRequest, AutoClosingResponder,\n        EffectBuilder,\n    },\n    reactor::{EventQueueHandle, QueueKind},\n    tls::{self, TlsCert, ValidationError},\n    types::NodeId,\n    utils::display_error,\n};\n\n/// An item on the internal outgoing message queue.\n///\n/// Contains a reference counted message and an optional responder to call once the message has been\n/// successfully handed over to the kernel for sending.\npub(super) type MessageQueueItem<P> = (Arc<Message<P>>, Option<AutoClosingResponder<()>>);\n\n/// The outcome of the handshake process.\nstruct HandshakeOutcome {\n    /// A framed transport for peer.\n    framed_transport: FramedTransport,\n    /// Public address advertised by the peer.\n    public_addr: SocketAddr,\n    /// The public key the peer is validating with, if any.\n    peer_consensus_public_key: Option<PublicKey>,\n    /// Holds the information whether the remote node is syncing.\n    is_peer_syncing: bool,\n}\n\n/// Low-level TLS connection function.\n///\n/// Performs the actual TCP+TLS connection setup.\nasync fn tls_connect<REv>(\n    context: &NetworkContext<REv>,\n    peer_addr: SocketAddr,\n) -> Result<(NodeId, Transport), ConnectionError>\nwhere\n    REv: 'static,\n{\n    let stream = TcpStream::connect(peer_addr)\n        .await\n        .map_err(ConnectionError::TcpConnection)?;\n\n    stream\n        .set_nodelay(true)\n        .map_err(ConnectionError::TcpNoDelay)?;\n\n    let mut transport = tls::create_tls_connector(context.our_cert.as_x509(), &context.secret_key)\n        .and_then(|connector| connector.configure())\n        .and_then(|mut config| {\n            config.set_verify_hostname(false);\n            config.into_ssl(\"this-will-not-be-checked.example.com\")\n        })\n        .and_then(|ssl| SslStream::new(ssl, stream))\n        .map_err(ConnectionError::TlsInitialization)?;\n\n    SslStream::connect(Pin::new(&mut transport))\n        .await\n        .map_err(ConnectionError::TlsHandshake)?;\n\n    let peer_cert = transport\n        .ssl()\n        .peer_certificate()\n        .ok_or(ConnectionError::NoPeerCertificate)?;\n\n    let validated_peer_cert = context\n        .validate_peer_cert(peer_cert)\n        .map_err(ConnectionError::PeerCertificateInvalid)?;\n\n    let peer_id = NodeId::from(validated_peer_cert.public_key_fingerprint());\n\n    Ok((peer_id, transport))\n}\n\n/// Initiates a TLS connection to a remote address.\npub(super) async fn connect_outgoing<P, REv>(\n    context: Arc<NetworkContext<REv>>,\n    peer_addr: SocketAddr,\n) -> OutgoingConnection<P>\nwhere\n    REv: 'static,\n    P: Payload,\n{\n    let (peer_id, transport) = match tls_connect(&context, peer_addr).await {\n        Ok(value) => value,\n        Err(error) => return OutgoingConnection::FailedEarly { peer_addr, error },\n    };\n\n    // Register the `peer_id` on the [`Span`].\n    Span::current().record(\"peer_id\", field::display(peer_id));\n\n    if peer_id == context.our_id {\n        info!(\"outgoing loopback connection\");\n        return OutgoingConnection::Loopback { peer_addr };\n    }\n\n    debug!(\"Outgoing TLS connection established\");\n\n    // Setup connection id and framed transport.\n    let connection_id = ConnectionId::from_connection(transport.ssl(), context.our_id, peer_id);\n    let framed_transport = framed_transport(transport, context.chain_info.maximum_net_message_size);\n\n    // Negotiate the handshake, concluding the incoming connection process.\n    match negotiate_handshake::<P, _>(&context, framed_transport, connection_id).await {\n        Ok(HandshakeOutcome {\n            framed_transport,\n            public_addr,\n            peer_consensus_public_key,\n            is_peer_syncing: is_syncing,\n        }) => {\n            if let Some(ref public_key) = peer_consensus_public_key {\n                Span::current().record(\"consensus_key\", field::display(public_key));\n            }\n\n            if public_addr != peer_addr {\n                // We don't need the `public_addr`, as we already connected, but warn anyway.\n                warn!(%public_addr, %peer_addr, \"peer advertises a different public address than what we connected to\");\n            }\n\n            // Setup full framed transport, then close down receiving end of the transport.\n            let full_transport = full_transport::<P>(\n                context.net_metrics.clone(),\n                connection_id,\n                framed_transport,\n                Role::Dialer,\n            );\n            let (sink, _stream) = full_transport.split();\n\n            OutgoingConnection::Established {\n                peer_addr,\n                peer_id,\n                peer_consensus_public_key,\n                sink,\n                is_syncing,\n            }\n        }\n        Err(error) => OutgoingConnection::Failed {\n            peer_addr,\n            peer_id,\n            error,\n        },\n    }\n}\n\n/// A context holding all relevant information for networking communication shared across tasks.\npub(crate) struct NetworkContext<REv>\nwhere\n    REv: 'static,\n{\n    /// The handle to the reactor's event queue, used by incoming message handlers to put events\n    /// onto the queue.\n    event_queue: Option<EventQueueHandle<REv>>,\n    /// Our own [`NodeId`].\n    our_id: NodeId,\n    /// TLS certificate associated with this node's identity.\n    our_cert: Arc<TlsCert>,\n    /// TLS certificate authority associated with this node's identity.\n    network_ca: Option<Arc<X509>>,\n    /// Secret key associated with `our_cert`.\n    secret_key: Arc<PKey<Private>>,\n    /// Weak reference to the networking metrics shared by all sender/receiver tasks.\n    net_metrics: Weak<Metrics>,\n    /// Chain info extract from chainspec.\n    chain_info: ChainInfo,\n    /// Optional set of signing keys, to identify as a node during handshake.\n    node_key_pair: Option<NodeKeyPair>,\n    /// Our own public listening address.\n    public_addr: Option<SocketAddr>,\n    /// Timeout for handshake completion.\n    handshake_timeout: TimeDiff,\n    /// Weights to estimate payloads with.\n    payload_weights: EstimatorWeights,\n    /// The protocol version at which (or under) tarpitting is enabled.\n    tarpit_version_threshold: Option<ProtocolVersion>,\n    /// If tarpitting is enabled, duration for which connections should be kept open.\n    tarpit_duration: TimeDiff,\n    /// The chance, expressed as a number between 0.0 and 1.0, of triggering the tarpit.\n    tarpit_chance: f32,\n    /// Maximum number of demands allowed to be running at once. If 0, no limit is enforced.\n    max_in_flight_demands: usize,\n    /// Flag indicating whether this node is syncing.\n    is_syncing: AtomicBool,\n    /// If false, will not allow handshake.\n    allow_handshake: bool,\n}\n\nimpl<REv> NetworkContext<REv> {\n    pub(super) fn new(\n        cfg: &Config,\n        our_identity: Identity,\n        node_key_pair: Option<NodeKeyPair>,\n        chain_info: ChainInfo,\n        net_metrics: &Arc<Metrics>,\n        allow_handshake: bool,\n    ) -> Self {\n        // Set the demand max from configuration, regarding `0` as \"unlimited\".\n        let max_in_flight_demands = if cfg.max_in_flight_demands == 0 {\n            usize::MAX\n        } else {\n            cfg.max_in_flight_demands as usize\n        };\n\n        let Identity {\n            secret_key,\n            tls_certificate,\n            network_ca,\n        } = our_identity;\n        let our_id = NodeId::from(tls_certificate.public_key_fingerprint());\n\n        NetworkContext {\n            our_id,\n            public_addr: None,\n            event_queue: None,\n            our_cert: tls_certificate,\n            network_ca,\n            secret_key,\n            net_metrics: Arc::downgrade(net_metrics),\n            chain_info,\n            node_key_pair,\n            handshake_timeout: cfg.handshake_timeout,\n            payload_weights: cfg.estimator_weights.clone(),\n            tarpit_version_threshold: cfg.tarpit_version_threshold,\n            tarpit_duration: cfg.tarpit_duration,\n            tarpit_chance: cfg.tarpit_chance,\n            max_in_flight_demands,\n            is_syncing: AtomicBool::new(false),\n            allow_handshake,\n        }\n    }\n\n    pub(super) fn initialize(\n        &mut self,\n        our_public_addr: SocketAddr,\n        event_queue: EventQueueHandle<REv>,\n    ) {\n        self.public_addr = Some(our_public_addr);\n        self.event_queue = Some(event_queue);\n    }\n\n    /// Our own [`NodeId`].\n    pub(super) fn our_id(&self) -> NodeId {\n        self.our_id\n    }\n\n    /// Our own public listening address.\n    pub(super) fn public_addr(&self) -> Option<SocketAddr> {\n        self.public_addr\n    }\n\n    /// Chain info extract from chainspec.\n    pub(super) fn chain_info(&self) -> &ChainInfo {\n        &self.chain_info\n    }\n\n    pub(crate) fn validate_peer_cert(&self, peer_cert: X509) -> Result<TlsCert, ValidationError> {\n        match &self.network_ca {\n            Some(ca_cert) => tls::validate_cert_with_authority(peer_cert, ca_cert),\n            None => tls::validate_self_signed_cert(peer_cert),\n        }\n    }\n\n    pub(crate) fn network_ca(&self) -> Option<&Arc<X509>> {\n        self.network_ca.as_ref()\n    }\n\n    pub(crate) fn is_syncing(&self) -> &AtomicBool {\n        &self.is_syncing\n    }\n}\n\n/// Handles an incoming connection.\n///\n/// Sets up a TLS stream and performs the protocol handshake.\nasync fn handle_incoming<P, REv>(\n    context: Arc<NetworkContext<REv>>,\n    stream: TcpStream,\n    peer_addr: SocketAddr,\n) -> IncomingConnection<P>\nwhere\n    REv: From<Event<P>> + 'static,\n    P: Payload,\n    for<'de> P: Serialize + Deserialize<'de>,\n    for<'de> Message<P>: Serialize + Deserialize<'de>,\n{\n    let (peer_id, transport) = match server_setup_tls(&context, stream).await {\n        Ok(value) => value,\n        Err(error) => {\n            return IncomingConnection::FailedEarly { peer_addr, error };\n        }\n    };\n\n    // Register the `peer_id` on the [`Span`] for logging the ID from here on out.\n    Span::current().record(\"peer_id\", field::display(peer_id));\n\n    if peer_id == context.our_id {\n        info!(\"incoming loopback connection\");\n        return IncomingConnection::Loopback;\n    }\n\n    debug!(\"Incoming TLS connection established\");\n\n    // Setup connection id and framed transport.\n    let connection_id = ConnectionId::from_connection(transport.ssl(), context.our_id, peer_id);\n    let framed_transport = framed_transport(transport, context.chain_info.maximum_net_message_size);\n\n    // Negotiate the handshake, concluding the incoming connection process.\n    match negotiate_handshake::<P, _>(&context, framed_transport, connection_id).await {\n        Ok(HandshakeOutcome {\n            framed_transport,\n            public_addr,\n            peer_consensus_public_key,\n            is_peer_syncing: _,\n        }) => {\n            if !context.allow_handshake {\n                return IncomingConnection::Failed {\n                    peer_addr,\n                    peer_id,\n                    error: ConnectionError::HandshakeNotAllowed,\n                };\n            }\n\n            if let Some(ref public_key) = peer_consensus_public_key {\n                Span::current().record(\"consensus_key\", field::display(public_key));\n            }\n\n            // Establish full transport and close the receiving end.\n            let full_transport = full_transport::<P>(\n                context.net_metrics.clone(),\n                connection_id,\n                framed_transport,\n                Role::Listener,\n            );\n\n            let (_sink, stream) = full_transport.split();\n\n            IncomingConnection::Established {\n                peer_addr,\n                public_addr,\n                peer_id,\n                peer_consensus_public_key,\n                stream,\n            }\n        }\n        Err(error) => IncomingConnection::Failed {\n            peer_addr,\n            peer_id,\n            error,\n        },\n    }\n}\n\n/// Server-side TLS setup.\n///\n/// This function groups the TLS setup into a convenient function, enabling the `?` operator.\npub(super) async fn server_setup_tls<REv>(\n    context: &NetworkContext<REv>,\n    stream: TcpStream,\n) -> Result<(NodeId, Transport), ConnectionError> {\n    let mut tls_stream = tls::create_tls_acceptor(\n        context.our_cert.as_x509().as_ref(),\n        context.secret_key.as_ref(),\n    )\n    .and_then(|ssl_acceptor| Ssl::new(ssl_acceptor.context()))\n    .and_then(|ssl| SslStream::new(ssl, stream))\n    .map_err(ConnectionError::TlsInitialization)?;\n\n    SslStream::accept(Pin::new(&mut tls_stream))\n        .await\n        .map_err(ConnectionError::TlsHandshake)?;\n\n    // We can now verify the certificate.\n    let peer_cert = tls_stream\n        .ssl()\n        .peer_certificate()\n        .ok_or(ConnectionError::NoPeerCertificate)?;\n\n    let validated_peer_cert = context\n        .validate_peer_cert(peer_cert)\n        .map_err(ConnectionError::PeerCertificateInvalid)?;\n\n    Ok((\n        NodeId::from(validated_peer_cert.public_key_fingerprint()),\n        tls_stream,\n    ))\n}\n\n/// Performs an IO-operation that can time out.\nasync fn io_timeout<F, T, E>(duration: Duration, future: F) -> Result<T, IoError<E>>\nwhere\n    F: Future<Output = Result<T, E>>,\n    E: StdError + 'static,\n{\n    tokio::time::timeout(duration, future)\n        .await\n        .map_err(|_elapsed| IoError::Timeout)?\n        .map_err(IoError::Error)\n}\n\n/// Performs an IO-operation that can time out or result in a closed connection.\nasync fn io_opt_timeout<F, T, E>(duration: Duration, future: F) -> Result<T, IoError<E>>\nwhere\n    F: Future<Output = Option<Result<T, E>>>,\n    E: StdError + 'static,\n{\n    let item = tokio::time::timeout(duration, future)\n        .await\n        .map_err(|_elapsed| IoError::Timeout)?;\n\n    match item {\n        Some(Ok(value)) => Ok(value),\n        Some(Err(err)) => Err(IoError::Error(err)),\n        None => Err(IoError::UnexpectedEof),\n    }\n}\n\n/// Negotiates a handshake between two peers.\nasync fn negotiate_handshake<P, REv>(\n    context: &NetworkContext<REv>,\n    framed: FramedTransport,\n    connection_id: ConnectionId,\n) -> Result<HandshakeOutcome, ConnectionError>\nwhere\n    P: Payload,\n{\n    let mut encoder = MessagePackFormat;\n\n    // Manually encode a handshake.\n    let handshake_message = context.chain_info.create_handshake::<P>(\n        context.public_addr.expect(\"component not initialized\"),\n        context.node_key_pair.as_ref(),\n        connection_id,\n        context.is_syncing.load(Ordering::SeqCst),\n    );\n\n    let serialized_handshake_message = Pin::new(&mut encoder)\n        .serialize(&Arc::new(handshake_message))\n        .map_err(ConnectionError::CouldNotEncodeOurHandshake)?;\n\n    // To ensure we are not dead-locking, we split the framed transport here and send the handshake\n    // in a background task before awaiting one ourselves. This ensures we can make progress\n    // regardless of the size of the outgoing handshake.\n    let (mut sink, mut stream) = framed.split();\n\n    let handshake_send = tokio::spawn(io_timeout(context.handshake_timeout.into(), async move {\n        sink.send(serialized_handshake_message).await?;\n        Ok(sink)\n    }));\n\n    // The remote's message should be a handshake, but can technically be any message. We receive,\n    // deserialize and check it.\n    let remote_message_raw = io_opt_timeout(context.handshake_timeout.into(), stream.next())\n        .await\n        .map_err(ConnectionError::HandshakeRecv)?;\n\n    // Ensure the handshake was sent correctly.\n    let sink = handshake_send\n        .await\n        .map_err(ConnectionError::HandshakeSenderCrashed)?\n        .map_err(ConnectionError::HandshakeSend)?;\n\n    let remote_message: Message<P> = Pin::new(&mut encoder)\n        .deserialize(&remote_message_raw)\n        .map_err(ConnectionError::InvalidRemoteHandshakeMessage)?;\n\n    if let Message::Handshake {\n        network_name,\n        public_addr,\n        protocol_version,\n        consensus_certificate,\n        is_syncing,\n        chainspec_hash,\n    } = remote_message\n    {\n        debug!(%protocol_version, \"handshake received\");\n\n        // The handshake was valid, we can check the network name.\n        if network_name != context.chain_info.network_name {\n            return Err(ConnectionError::WrongNetwork(network_name));\n        }\n\n        // If there is a version mismatch, we treat it as a connection error. We do not ban peers\n        // for this error, but instead rely on exponential backoff, as bans would result in issues\n        // during upgrades where nodes may have a legitimate reason for differing versions.\n        //\n        // Since we are not using SemVer for versioning, we cannot make any assumptions about\n        // compatibility, so we allow only exact version matches.\n        if protocol_version != context.chain_info.protocol_version {\n            if let Some(threshold) = context.tarpit_version_threshold {\n                if protocol_version <= threshold {\n                    let mut rng = crate::new_rng();\n\n                    if rng.gen_bool(context.tarpit_chance as f64) {\n                        // If tarpitting is enabled, we hold open the connection for a specific\n                        // amount of time, to reduce load on other nodes and keep them from\n                        // reconnecting.\n                        info!(duration=?context.tarpit_duration, \"randomly tarpitting node\");\n                        tokio::time::sleep(Duration::from(context.tarpit_duration)).await;\n                    } else {\n                        debug!(p = context.tarpit_chance, \"randomly not tarpitting node\");\n                    }\n                }\n            }\n            return Err(ConnectionError::IncompatibleVersion(protocol_version));\n        }\n\n        // We check the chainspec hash to ensure peer is using the same chainspec as us.\n        // The remote message should always have a chainspec hash at this point since\n        // we checked the protocol version previously.\n        let peer_chainspec_hash = chainspec_hash.ok_or(ConnectionError::MissingChainspecHash)?;\n        if peer_chainspec_hash != context.chain_info.chainspec_hash {\n            return Err(ConnectionError::WrongChainspecHash(peer_chainspec_hash));\n        }\n\n        let peer_consensus_public_key = consensus_certificate\n            .map(|cert| {\n                cert.validate(connection_id)\n                    .map_err(ConnectionError::InvalidConsensusCertificate)\n            })\n            .transpose()?;\n\n        let framed_transport = sink\n            .reunite(stream)\n            .map_err(|_| ConnectionError::FailedToReuniteHandshakeSinkAndStream)?;\n\n        Ok(HandshakeOutcome {\n            framed_transport,\n            public_addr,\n            peer_consensus_public_key,\n            is_peer_syncing: is_syncing,\n        })\n    } else {\n        // Received a non-handshake, this is an error.\n        Err(ConnectionError::DidNotSendHandshake)\n    }\n}\n\n/// Runs the server core acceptor loop.\npub(super) async fn server<P, REv>(\n    context: Arc<NetworkContext<REv>>,\n    listener: tokio::net::TcpListener,\n    mut shutdown_receiver: watch::Receiver<()>,\n) where\n    REv: From<Event<P>> + Send,\n    P: Payload,\n{\n    // The server task is a bit tricky, since it has to wait on incoming connections while at the\n    // same time shut down if the networking component is dropped, otherwise the TCP socket will\n    // stay open, preventing reuse.\n\n    // We first create a future that never terminates, handling incoming connections:\n    let accept_connections = async {\n        let event_queue = context.event_queue.expect(\"component not initialized\");\n        loop {\n            // We handle accept errors here, since they can be caused by a temporary resource\n            // shortage or the remote side closing the connection while it is waiting in\n            // the queue.\n            match listener.accept().await {\n                Ok((stream, peer_addr)) => {\n                    // The span setup here is used throughout the entire lifetime of the connection.\n                    let span =\n                        error_span!(\"incoming\", %peer_addr, peer_id=Empty, consensus_key=Empty);\n\n                    let context = context.clone();\n                    let handler_span = span.clone();\n                    tokio::spawn(\n                        async move {\n                            let incoming =\n                                handle_incoming(context.clone(), stream, peer_addr).await;\n                            event_queue\n                                .schedule(\n                                    Event::IncomingConnection {\n                                        incoming: Box::new(incoming),\n                                        span,\n                                    },\n                                    QueueKind::NetworkIncoming,\n                                )\n                                .await;\n                        }\n                        .instrument(handler_span),\n                    );\n                }\n\n                // TODO: Handle resource errors gracefully.\n                //       In general, two kinds of errors occur here: Local resource exhaustion,\n                //       which should be handled by waiting a few milliseconds, or remote connection\n                //       errors, which can be dropped immediately.\n                //\n                //       The code in its current state will consume 100% CPU if local resource\n                //       exhaustion happens, as no distinction is made and no delay introduced.\n                Err(ref err) => {\n                    warn!(%context.our_id, err=display_error(err), \"dropping incoming connection during accept\")\n                }\n            }\n        }\n    };\n\n    let shutdown_messages = async move { while shutdown_receiver.changed().await.is_ok() {} };\n\n    // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the\n    // infinite loop to terminate, which never happens.\n    match future::select(Box::pin(shutdown_messages), Box::pin(accept_connections)).await {\n        Either::Left(_) => info!(\n            %context.our_id,\n            \"shutting down socket, no longer accepting incoming connections\"\n        ),\n        Either::Right(_) => unreachable!(),\n    }\n}\n\n/// Network message reader.\n///\n/// Schedules all received messages until the stream is closed or an error occurs.\npub(super) async fn message_reader<REv, P>(\n    context: Arc<NetworkContext<REv>>,\n    mut stream: SplitStream<FullTransport<P>>,\n    limiter: LimiterHandle,\n    mut close_incoming_receiver: watch::Receiver<()>,\n    peer_id: NodeId,\n    span: Span,\n) -> io::Result<()>\nwhere\n    P: DeserializeOwned + Send + Display + Payload,\n    REv: From<Event<P>>\n        + FromIncoming<P>\n        + From<NetworkRequest<P>>\n        + From<PeerBehaviorAnnouncement>\n        + Send,\n{\n    let demands_in_flight = Arc::new(Semaphore::new(context.max_in_flight_demands));\n    let event_queue = context.event_queue.expect(\"component not initialized\");\n\n    let read_messages = async move {\n        while let Some(msg_result) = stream.next().await {\n            match msg_result {\n                Ok(msg) => {\n                    trace!(%msg, \"message received\");\n\n                    let effect_builder = EffectBuilder::new(event_queue);\n\n                    match msg.try_into_demand(effect_builder, peer_id) {\n                        Ok((event, wait_for_response)) => {\n                            // Note: For now, demands bypass the limiter, as we expect the\n                            //       backpressure to handle this instead.\n\n                            // Acquire a permit. If we are handling too many demands at this\n                            // time, this will block, halting the processing of new message,\n                            // thus letting the peer they have reached their maximum allowance.\n                            let in_flight = demands_in_flight\n                                .clone()\n                                .acquire_owned()\n                                .await\n                                // Note: Since the semaphore is reference counted, it must\n                                //       explicitly be closed for acquisition to fail, which we\n                                //       never do. If this happens, there is a bug in the code;\n                                //       we exit with an error and close the connection.\n                                .map_err(|_| {\n                                    io::Error::new(\n                                        io::ErrorKind::Other,\n                                        \"demand limiter semaphore closed unexpectedly\",\n                                    )\n                                })?;\n\n                            Metrics::record_trie_request_start(&context.net_metrics);\n\n                            let net_metrics = context.net_metrics.clone();\n                            // Spawn a future that will eventually send the returned message. It\n                            // will essentially buffer the response.\n                            tokio::spawn(async move {\n                                if let Some(payload) = wait_for_response.await {\n                                    // Send message and await its return. `send_message` should\n                                    // only return when the message has been buffered, if the\n                                    // peer is not accepting data, we will block here until the\n                                    // send buffer has sufficient room.\n                                    effect_builder.send_message(peer_id, payload).await;\n\n                                    // Note: We could short-circuit the event queue here and\n                                    //       directly insert into the outgoing message queue,\n                                    //       which may be potential performance improvement.\n                                }\n\n                                // Missing else: The handler of the demand did not deem it\n                                // worthy a response. Just drop it.\n\n                                // After we have either successfully buffered the message for\n                                // sending, failed to do so or did not have a message to send\n                                // out, we consider the request handled and free up the permit.\n                                Metrics::record_trie_request_end(&net_metrics);\n                                drop(in_flight);\n                            });\n\n                            // Schedule the created event.\n                            event_queue\n                                .schedule::<REv>(event, QueueKind::NetworkDemand)\n                                .await;\n                        }\n                        Err(msg) => {\n                            // We've received a non-demand message. Ensure we have the proper amount\n                            // of resources, then push it to the reactor.\n                            limiter\n                                .request_allowance(\n                                    msg.payload_incoming_resource_estimate(\n                                        &context.payload_weights,\n                                    ),\n                                )\n                                .await;\n\n                            let queue_kind = if msg.is_low_priority() {\n                                QueueKind::NetworkLowPriority\n                            } else {\n                                QueueKind::NetworkIncoming\n                            };\n\n                            event_queue\n                                .schedule(\n                                    Event::IncomingMessage {\n                                        peer_id: Box::new(peer_id),\n                                        msg,\n                                        span: span.clone(),\n                                    },\n                                    queue_kind,\n                                )\n                                .await;\n                        }\n                    }\n                }\n                Err(err) => {\n                    warn!(\n                        err = display_error(&err),\n                        \"receiving message failed, closing connection\"\n                    );\n                    return Err(err);\n                }\n            }\n        }\n        Ok(())\n    };\n\n    let shutdown_messages = async move { while close_incoming_receiver.changed().await.is_ok() {} };\n\n    // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the\n    // while loop to terminate.\n    match future::select(Box::pin(shutdown_messages), Box::pin(read_messages)).await {\n        Either::Left(_) => info!(\"shutting down incoming connection message reader\"),\n        Either::Right(_) => (),\n    }\n    Ok(())\n}\n\n/// Network message sender.\n///\n/// Reads from a channel and sends all messages, until the stream is closed or an error occurs.\npub(super) async fn message_sender<P>(\n    mut queue: UnboundedReceiver<MessageQueueItem<P>>,\n    mut sink: SplitSink<FullTransport<P>, Arc<Message<P>>>,\n    limiter: LimiterHandle,\n    counter: IntGauge,\n) where\n    P: Payload,\n{\n    while let Some((message, opt_responder)) = queue.recv().await {\n        counter.dec();\n\n        let estimated_wire_size = match BincodeFormat::default().0.serialized_size(&*message) {\n            Ok(size) => size as u32,\n            Err(error) => {\n                error!(\n                    error = display_error(&error),\n                    \"failed to get serialized size of outgoing message, closing outgoing connection\"\n                );\n                break;\n            }\n        };\n        limiter.request_allowance(estimated_wire_size).await;\n\n        let mut outcome = sink.send(message).await;\n\n        // Notify via responder that the message has been buffered by the kernel.\n        if let Some(auto_closing_responder) = opt_responder {\n            // Since someone is interested in the message, flush the socket to ensure it was sent.\n            outcome = outcome.and(sink.flush().await);\n            auto_closing_responder.respond(()).await;\n        }\n\n        // We simply error-out if the sink fails, it means that our connection broke.\n        if let Err(ref err) = outcome {\n            info!(\n                err = display_error(err),\n                \"message send failed, closing outgoing connection\"\n            );\n\n            // To ensure, metrics are up to date, we close the queue and drain it.\n            queue.close();\n            while queue.recv().await.is_some() {\n                counter.dec();\n            }\n\n            break;\n        };\n    }\n}\n"
  },
  {
    "path": "node/src/components/network/tests.rs",
    "content": "//! Tests for the `network` component.\n//!\n//! Calling these \"unit tests\" would be a bit of a misnomer, since they deal mostly with multiple\n//! instances of `net` arranged in a network.\n\nuse std::{\n    collections::{HashMap, HashSet},\n    fmt::{self, Debug, Display, Formatter},\n    sync::Arc,\n    time::{Duration, Instant},\n};\n\nuse derive_more::From;\nuse futures::FutureExt;\nuse prometheus::Registry;\nuse reactor::ReactorEvent;\nuse serde::{Deserialize, Serialize};\nuse smallvec::smallvec;\nuse tracing::{debug, info};\n\nuse casper_types::{Chainspec, ChainspecRawBytes, SecretKey};\n\nuse super::{\n    chain_info::ChainInfo, Event as NetworkEvent, FromIncoming, GossipedAddress, Identity,\n    MessageKind, Network, Payload,\n};\nuse crate::{\n    components::{\n        gossiper::{self, GossipItem, Gossiper},\n        network, Component, InitializedComponent,\n    },\n    effect::{\n        announcements::{ControlAnnouncement, GossiperAnnouncement, PeerBehaviorAnnouncement},\n        incoming::GossiperIncoming,\n        requests::{\n            BeginGossipRequest, ChainspecRawBytesRequest, ContractRuntimeRequest, NetworkRequest,\n            StorageRequest,\n        },\n        EffectBuilder, Effects,\n    },\n    protocol,\n    reactor::{self, main_reactor::Config, EventQueueHandle, Finalize, Reactor, Runner},\n    testing::{\n        self, init_logging,\n        network::{NetworkedReactor, Nodes, TestingNetwork},\n        ConditionCheckReactor,\n    },\n    types::{NodeId, SyncHandling, ValidatorMatrix},\n    NodeRng,\n};\n\n/// Test-reactor event.\n#[derive(Debug, From, Serialize)]\nenum Event {\n    #[from]\n    Net(#[serde(skip_serializing)] NetworkEvent<Message>),\n    #[from]\n    AddressGossiper(#[serde(skip_serializing)] gossiper::Event<GossipedAddress>),\n    #[from]\n    NetworkRequest(#[serde(skip_serializing)] NetworkRequest<Message>),\n    #[from]\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement<GossipedAddress>),\n    #[from]\n    BeginAddressGossipRequest(BeginGossipRequest<GossipedAddress>),\n    /// An incoming network message with an address gossiper protocol message.\n    AddressGossiperIncoming(GossiperIncoming<GossipedAddress>),\n    #[from]\n    BlocklistAnnouncement(PeerBehaviorAnnouncement),\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        matches!(self, Event::ControlAnnouncement(_))\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        if let Self::ControlAnnouncement(ctrl_ann) = self {\n            Some(ctrl_ann)\n        } else {\n            None\n        }\n    }\n}\n\nimpl From<NetworkRequest<gossiper::Message<GossipedAddress>>> for Event {\n    fn from(request: NetworkRequest<gossiper::Message<GossipedAddress>>) -> Self {\n        Event::NetworkRequest(request.map_payload(Message::from))\n    }\n}\n\nimpl From<NetworkRequest<Message>> for NetworkEvent<Message> {\n    fn from(request: NetworkRequest<Message>) -> NetworkEvent<Message> {\n        NetworkEvent::NetworkRequest {\n            req: Box::new(request),\n        }\n    }\n}\n\nimpl From<NetworkRequest<protocol::Message>> for Event {\n    fn from(_request: NetworkRequest<protocol::Message>) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<StorageRequest> for Event {\n    fn from(_request: StorageRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<ChainspecRawBytesRequest> for Event {\n    fn from(_request: ChainspecRawBytesRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl From<ContractRuntimeRequest> for Event {\n    fn from(_request: ContractRuntimeRequest) -> Self {\n        unreachable!()\n    }\n}\n\nimpl FromIncoming<Message> for Event {\n    fn from_incoming(sender: NodeId, payload: Message) -> Self {\n        match payload {\n            Message::AddressGossiper(message) => Event::AddressGossiperIncoming(GossiperIncoming {\n                sender,\n                message: Box::new(message),\n            }),\n        }\n    }\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Debug::fmt(self, f)\n    }\n}\n\n#[derive(Clone, Debug, Deserialize, Serialize, From)]\nenum Message {\n    #[from]\n    AddressGossiper(gossiper::Message<GossipedAddress>),\n}\n\nimpl Display for Message {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Debug::fmt(self, f)\n    }\n}\n\nimpl Payload for Message {\n    #[inline]\n    fn message_kind(&self) -> MessageKind {\n        match self {\n            Message::AddressGossiper(_) => MessageKind::AddressGossip,\n        }\n    }\n\n    fn incoming_resource_estimate(&self, _weights: &super::EstimatorWeights) -> u32 {\n        0\n    }\n\n    fn is_unsafe_for_syncing_peers(&self) -> bool {\n        false\n    }\n}\n\n/// Test reactor.\n///\n/// Runs a single network.\n#[derive(Debug)]\nstruct TestReactor {\n    net: Network<Event, Message>,\n    address_gossiper: Gossiper<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, GossipedAddress>,\n}\n\nimpl Reactor for TestReactor {\n    type Event = Event;\n    type Config = Config;\n    type Error = anyhow::Error;\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::Net(ev) => {\n                reactor::wrap_effects(Event::Net, self.net.handle_event(effect_builder, rng, ev))\n            }\n            Event::AddressGossiper(event) => reactor::wrap_effects(\n                Event::AddressGossiper,\n                self.address_gossiper\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::NetworkRequest(req) => reactor::wrap_effects(\n                Event::Net,\n                self.net.handle_event(effect_builder, rng, req.into()),\n            ),\n            Event::ControlAnnouncement(ctrl_ann) => {\n                unreachable!(\"unhandled control announcement: {}\", ctrl_ann)\n            }\n            Event::AddressGossiperAnnouncement(GossiperAnnouncement::NewCompleteItem(\n                gossiped_address,\n            )) => reactor::wrap_effects(\n                Event::Net,\n                self.net.handle_event(\n                    effect_builder,\n                    rng,\n                    NetworkEvent::PeerAddressReceived(gossiped_address),\n                ),\n            ),\n\n            Event::AddressGossiperAnnouncement(GossiperAnnouncement::GossipReceived { .. }) => {\n                // We do not care about the announcement of a new gossiped item in this test.\n                Effects::new()\n            }\n            Event::AddressGossiperAnnouncement(GossiperAnnouncement::FinishedGossiping(_)) => {\n                // We do not care about the announcement of gossiping finished in this test.\n                Effects::new()\n            }\n            Event::AddressGossiperAnnouncement(GossiperAnnouncement::NewItemBody { .. }) => {\n                // Addresses shouldn't have an item body when gossiped.\n                Effects::new()\n            }\n            Event::BeginAddressGossipRequest(ev) => reactor::wrap_effects(\n                Event::AddressGossiper,\n                self.address_gossiper\n                    .handle_event(effect_builder, rng, ev.into()),\n            ),\n            Event::AddressGossiperIncoming(incoming) => reactor::wrap_effects(\n                Event::AddressGossiper,\n                self.address_gossiper\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            Event::BlocklistAnnouncement(_announcement) => Effects::new(),\n        }\n    }\n\n    fn new(\n        cfg: Self::Config,\n        _chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        our_identity: Identity,\n        registry: &Registry,\n        _event_queue: EventQueueHandle<Self::Event>,\n        rng: &mut NodeRng,\n    ) -> anyhow::Result<(Self, Effects<Self::Event>)> {\n        let secret_key = SecretKey::random(rng);\n        let allow_handshake = cfg.node.sync_handling != SyncHandling::Isolated;\n        let mut net = Network::new(\n            cfg.network.clone(),\n            our_identity,\n            None,\n            registry,\n            ChainInfo::create_for_testing(),\n            ValidatorMatrix::new_with_validator(Arc::new(secret_key)),\n            allow_handshake,\n        )?;\n        let gossiper_config = gossiper::Config::new_with_small_timeouts();\n        let address_gossiper = Gossiper::<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, _>::new(\n            \"address_gossiper\",\n            gossiper_config,\n            registry,\n        )?;\n\n        net.start_initialization();\n        let effects = smallvec![async { smallvec![Event::Net(NetworkEvent::Initialize)] }.boxed()];\n\n        Ok((\n            TestReactor {\n                net,\n                address_gossiper,\n            },\n            effects,\n        ))\n    }\n}\n\nimpl NetworkedReactor for TestReactor {\n    fn node_id(&self) -> NodeId {\n        self.net.node_id()\n    }\n}\n\nimpl Finalize for TestReactor {\n    fn finalize(self) -> futures::future::BoxFuture<'static, ()> {\n        self.net.finalize()\n    }\n}\n\n/// Checks whether or not a given network with potentially blocked nodes is completely connected.\nfn network_is_complete(\n    blocklist: &HashSet<NodeId>,\n    nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<TestReactor>>>,\n) -> bool {\n    // Collect expected nodes.\n    let expected: HashSet<_> = nodes\n        .keys()\n        .filter(|&node_id| !blocklist.contains(node_id))\n        .copied()\n        .collect();\n\n    for (node_id, node) in nodes {\n        let net = &node.reactor().inner().net;\n        // TODO: Ensure the connections are symmetrical.\n        let peers: HashSet<_> = net.peers().into_keys().collect();\n\n        let mut missing = expected.difference(&peers);\n\n        if let Some(first_missing) = missing.next() {\n            // We only allow loopbacks to be missing.\n            if first_missing != node_id {\n                return false;\n            }\n        }\n\n        if missing.next().is_some() {\n            // We have at least two missing, which cannot be.\n            return false;\n        }\n    }\n    true\n}\n\n/// Checks whether or not a given network has at least one other node in it\nfn network_started(net: &TestingNetwork<TestReactor>) -> bool {\n    net.nodes()\n        .iter()\n        .map(|(_, runner)| runner.reactor().inner().net.peers())\n        .all(|peers| !peers.is_empty())\n}\n\n/// Run a two-node network five times.\n///\n/// Ensures that network cleanup and basic networking works.\n#[tokio::test]\nasync fn run_two_node_network_five_times() {\n    let mut rng = crate::new_rng();\n\n    // The networking port used by the tests for the root node.\n    let first_node_port = testing::unused_port_on_localhost() + 1;\n\n    init_logging();\n\n    for i in 0..5 {\n        info!(\"two-network test round {}\", i);\n\n        let mut net = TestingNetwork::new();\n\n        let start = Instant::now();\n\n        let cfg = Config::default().with_network_config(\n            network::Config::default_local_net_first_node(first_node_port),\n        );\n        net.add_node_with_config(cfg, &mut rng).await.unwrap();\n\n        let cfg = Config::default()\n            .with_network_config(network::Config::default_local_net(first_node_port));\n        net.add_node_with_config(cfg.clone(), &mut rng)\n            .await\n            .unwrap();\n        let end = Instant::now();\n\n        debug!(\n            total_time_ms = (end - start).as_millis() as u64,\n            \"finished setting up networking nodes\"\n        );\n\n        let timeout = Duration::from_secs(20);\n        let blocklist = HashSet::new();\n        net.settle_on(\n            &mut rng,\n            |nodes| network_is_complete(&blocklist, nodes),\n            timeout,\n        )\n        .await;\n\n        assert!(\n            network_started(&net),\n            \"each node is connected to at least one other node\"\n        );\n\n        let quiet_for = Duration::from_millis(25);\n        let timeout = Duration::from_secs(2);\n        net.settle(&mut rng, quiet_for, timeout).await;\n\n        assert!(\n            network_is_complete(&blocklist, net.nodes()),\n            \"network did not stay connected\"\n        );\n\n        net.finalize().await;\n    }\n}\n\n/// Sanity check that we can bind to a real network.\n///\n/// Very unlikely to ever fail on a real machine.\n#[cfg(not(target_os = \"macos\"))]\n#[tokio::test]\nasync fn bind_to_real_network_interface() {\n    init_logging();\n\n    let mut rng = crate::new_rng();\n\n    let iface = pnet::datalink::interfaces()\n        .into_iter()\n        .find(|net| !net.ips.is_empty() && !net.ips.iter().any(|ip| ip.ip().is_loopback()))\n        .expect(\"could not find a single networking interface that isn't localhost\");\n\n    let local_addr = iface\n        .ips\n        .into_iter()\n        .next()\n        .expect(\"found a interface with no ips\")\n        .ip();\n    let port = testing::unused_port_on_localhost();\n\n    let cfg =\n        Config::default().with_network_config(network::Config::new((local_addr, port).into()));\n\n    let mut net = TestingNetwork::<TestReactor>::new();\n    net.add_node_with_config(cfg, &mut rng).await.unwrap();\n\n    // The network should be fully connected.\n    let timeout = Duration::from_secs(2);\n    let blocklist = HashSet::new();\n    net.settle_on(\n        &mut rng,\n        |nodes| network_is_complete(&blocklist, nodes),\n        timeout,\n    )\n    .await;\n\n    net.finalize().await;\n}\n\n/// Check that a network of varying sizes will connect all nodes properly.\n#[tokio::test]\nasync fn check_varying_size_network_connects() {\n    init_logging();\n\n    let mut rng = crate::new_rng();\n\n    // Try with a few predefined sets of network sizes.\n    for &number_of_nodes in &[2u16, 3, 5, 9, 15] {\n        let timeout = Duration::from_secs(3 * number_of_nodes as u64);\n\n        let mut net = TestingNetwork::new();\n\n        // Pick a random port in the higher ranges that is likely to be unused.\n        let first_node_port = testing::unused_port_on_localhost();\n        let cfg = Config::default().with_network_config(\n            network::Config::default_local_net_first_node(first_node_port),\n        );\n\n        let _ = net.add_node_with_config(cfg, &mut rng).await.unwrap();\n        let cfg = Config::default()\n            .with_network_config(network::Config::default_local_net(first_node_port));\n\n        for _ in 1..number_of_nodes {\n            net.add_node_with_config(cfg.clone(), &mut rng)\n                .await\n                .unwrap();\n        }\n\n        // The network should be fully connected.\n        let blocklist = HashSet::new();\n        net.settle_on(\n            &mut rng,\n            |nodes| network_is_complete(&blocklist, nodes),\n            timeout,\n        )\n        .await;\n\n        let blocklist = HashSet::new();\n        // This should not make a difference at all, but we're paranoid, so check again.\n        assert!(\n            network_is_complete(&blocklist, net.nodes()),\n            \"network did not stay connected after being settled\"\n        );\n\n        // Now the network should have an appropriate number of peers.\n\n        // This test will run multiple times, so ensure we cleanup all ports.\n        net.finalize().await;\n    }\n}\n\n/// Check that a network of varying sizes will connect all nodes properly.\n#[tokio::test]\nasync fn ensure_peers_metric_is_correct() {\n    init_logging();\n\n    let mut rng = crate::new_rng();\n\n    // Larger networks can potentially become more unreliable, so we try with small sizes only.\n    for &number_of_nodes in &[2u16, 3, 5] {\n        let timeout = Duration::from_secs(3 * number_of_nodes as u64);\n\n        let mut net = TestingNetwork::new();\n\n        // Pick a random port in the higher ranges that is likely to be unused.\n        let first_node_port = testing::unused_port_on_localhost();\n\n        let cfg = Config::default().with_network_config(\n            network::Config::default_local_net_first_node(first_node_port),\n        );\n\n        let _ = net.add_node_with_config(cfg, &mut rng).await.unwrap();\n\n        let cfg = Config::default()\n            .with_network_config(network::Config::default_local_net(first_node_port));\n\n        for _ in 1..number_of_nodes {\n            net.add_node_with_config(cfg.clone(), &mut rng)\n                .await\n                .unwrap();\n        }\n\n        net.settle_on(\n            &mut rng,\n            |nodes: &Nodes<TestReactor>| {\n                nodes.values().all(|runner| {\n                    runner.reactor().inner().net.net_metrics.peers.get()\n                        == number_of_nodes as i64 - 1\n                })\n            },\n            timeout,\n        )\n        .await;\n\n        net.finalize().await;\n    }\n}\n"
  },
  {
    "path": "node/src/components/network.rs",
    "content": "//! Fully connected overlay network\n//!\n//! The *network component* is an overlay network where each node participating is attempting to\n//! maintain a connection to every other node identified on the same network. The component does not\n//! guarantee message delivery, so in between reconnections, messages may be lost.\n//!\n//! # Node IDs\n//!\n//! Each node has a self-generated node ID based on its self-signed TLS certificate. Whenever a\n//! connection is made to another node, it verifies the \"server\"'s certificate to check that it\n//! connected to a valid node and sends its own certificate during the TLS handshake, establishing\n//! identity.\n//!\n//! # Connection\n//!\n//! Every node has an ID and a public listening address. The objective of each node is to constantly\n//! maintain an outgoing connection to each other node (and thus have an incoming connection from\n//! these nodes as well).\n//!\n//! Any incoming connection is, after a handshake process, strictly read from, while any outgoing\n//! connection is strictly used for sending messages, also after a handshake.\n//!\n//! Nodes gossip their public listening addresses periodically, and will try to establish and\n//! maintain an outgoing connection to any new address learned.\n\nmod bincode_format;\npub(crate) mod blocklist;\nmod chain_info;\nmod config;\nmod counting_format;\nmod error;\nmod event;\nmod gossiped_address;\nmod health;\nmod identity;\nmod insights;\nmod limiter;\nmod message;\nmod message_pack_format;\nmod metrics;\nmod outgoing;\nmod symmetry;\npub(crate) mod tasks;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::{\n        hash_map::{Entry, HashMap},\n        BTreeMap, BTreeSet, HashSet,\n    },\n    fmt::{self, Debug, Display, Formatter},\n    io,\n    net::{SocketAddr, TcpListener},\n    sync::{Arc, Weak},\n    time::{Duration, Instant},\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse prometheus::Registry;\nuse rand::{\n    seq::{IteratorRandom, SliceRandom},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\nuse tokio::{\n    net::TcpStream,\n    sync::{\n        mpsc::{self, UnboundedSender},\n        watch,\n    },\n    task::JoinHandle,\n};\nuse tokio_openssl::SslStream;\nuse tokio_util::codec::LengthDelimitedCodec;\nuse tracing::{debug, error, info, trace, warn, Instrument, Span};\n\n#[cfg(test)]\nuse futures::{future::BoxFuture, FutureExt};\n\nuse casper_types::{EraId, PublicKey, SecretKey};\n\npub(crate) use self::{\n    bincode_format::BincodeFormat,\n    config::{Config, IdentityConfig},\n    error::Error,\n    event::Event,\n    gossiped_address::GossipedAddress,\n    identity::Identity,\n    insights::NetworkInsights,\n    message::{\n        within_message_size_limit_tolerance, EstimatorWeights, FromIncoming, Message, MessageKind,\n        Payload,\n    },\n};\nuse self::{\n    blocklist::BlocklistJustification,\n    chain_info::ChainInfo,\n    counting_format::{ConnectionId, CountingFormat, Role},\n    error::{ConnectionError, Result},\n    event::{IncomingConnection, OutgoingConnection},\n    health::{HealthConfig, TaggedTimestamp},\n    limiter::Limiter,\n    message::NodeKeyPair,\n    metrics::Metrics,\n    outgoing::{DialOutcome, DialRequest, OutgoingConfig, OutgoingManager},\n    symmetry::ConnectionSymmetry,\n    tasks::{MessageQueueItem, NetworkContext},\n};\nuse crate::{\n    components::{gossiper::GossipItem, Component, ComponentState, InitializedComponent},\n    effect::{\n        announcements::PeerBehaviorAnnouncement,\n        requests::{BeginGossipRequest, NetworkInfoRequest, NetworkRequest, StorageRequest},\n        AutoClosingResponder, EffectBuilder, EffectExt, Effects, GossipTarget,\n    },\n    reactor::ReactorEvent,\n    tls,\n    types::{NodeId, ValidatorMatrix},\n    utils::{self, display_error, Source},\n    NodeRng,\n};\n\nconst COMPONENT_NAME: &str = \"network\";\n\n/// How often to keep attempting to reconnect to a node before giving up. Note that reconnection\n/// delays increase exponentially!\nconst RECONNECTION_ATTEMPTS: u8 = 8;\n\n/// Basic reconnection timeout.\n///\n/// The first reconnection attempt will be made after 2x this timeout.\nconst BASE_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(1);\n\n/// Interval during which to perform outgoing manager housekeeping.\nconst OUTGOING_MANAGER_SWEEP_INTERVAL: Duration = Duration::from_secs(1);\n\n/// How often to send a ping down a healthy connection.\nconst PING_INTERVAL: Duration = Duration::from_secs(30);\n\n/// Maximum time for a ping until it connections are severed.\n///\n/// If you are running a network under very extreme conditions, it may make sense to alter these\n/// values, but usually these values should require no changing.\n///\n/// `PING_TIMEOUT` should be less than `PING_INTERVAL` at all times.\nconst PING_TIMEOUT: Duration = Duration::from_secs(6);\n\n/// How many pings to send before giving up and dropping the connection.\nconst PING_RETRIES: u16 = 5;\n\n#[derive(Clone, DataSize, Debug)]\npub(crate) struct OutgoingHandle<P> {\n    #[data_size(skip)] // Unfortunately, there is no way to inspect an `UnboundedSender`.\n    sender: UnboundedSender<MessageQueueItem<P>>,\n    peer_addr: SocketAddr,\n}\n\nimpl<P> Display for OutgoingHandle<P> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"outgoing handle to {}\", self.peer_addr)\n    }\n}\n\n#[derive(DataSize)]\npub(crate) struct Network<REv, P>\nwhere\n    REv: 'static,\n    P: Payload,\n{\n    /// Initial configuration values.\n    cfg: Config,\n    /// Read-only networking information shared across tasks.\n    context: Arc<NetworkContext<REv>>,\n\n    /// Outgoing connections manager.\n    outgoing_manager: OutgoingManager<OutgoingHandle<P>, ConnectionError>,\n    /// Tracks whether a connection is symmetric or not.\n    connection_symmetries: HashMap<NodeId, ConnectionSymmetry>,\n\n    /// Tracks nodes that have announced themselves as nodes that are syncing.\n    syncing_nodes: HashSet<NodeId>,\n    #[data_size(skip)]\n    channel_management: Option<ChannelManagement>,\n\n    /// Networking metrics.\n    #[data_size(skip)]\n    net_metrics: Arc<Metrics>,\n\n    /// The outgoing bandwidth limiter.\n    #[data_size(skip)]\n    outgoing_limiter: Limiter,\n\n    /// The limiter for incoming resource usage.\n    ///\n    /// This is not incoming bandwidth but an independent resource estimate.\n    #[data_size(skip)]\n    incoming_limiter: Limiter,\n\n    /// The era that is considered the active era by the network component.\n    active_era: EraId,\n\n    /// The state of this component.\n    state: ComponentState,\n}\n\nstruct ChannelManagement {\n    /// Channel signaling a shutdown of the network.\n    // Note: This channel is closed when `Network` is dropped, signalling the receivers that\n    // they should cease operation.\n    #[allow(dead_code)]\n    shutdown_sender: Option<watch::Sender<()>>,\n\n    /// Join handle for the server thread.\n    #[allow(dead_code)]\n    server_join_handle: Option<JoinHandle<()>>,\n\n    /// Channel signaling a shutdown of the incoming connections.\n    // Note: This channel is closed when we finished syncing, so the `Network` can close all\n    // connections. When they are re-established, the proper value of the now updated `is_syncing`\n    // flag will be exchanged on handshake.\n    #[allow(dead_code)]\n    close_incoming_sender: Option<watch::Sender<()>>,\n\n    /// Handle used by the `message_reader` task to receive a notification that incoming\n    /// connections should be closed.\n    close_incoming_receiver: watch::Receiver<()>,\n}\n\nimpl<REv, P> Network<REv, P>\nwhere\n    P: Payload + 'static,\n    REv: ReactorEvent\n        + From<Event<P>>\n        + FromIncoming<P>\n        + From<StorageRequest>\n        + From<NetworkRequest<P>>\n        + From<PeerBehaviorAnnouncement>\n        + From<BeginGossipRequest<GossipedAddress>>,\n{\n    /// Creates a new network component instance.\n    #[allow(clippy::type_complexity)]\n    pub(crate) fn new<C: Into<ChainInfo>>(\n        cfg: Config,\n        our_identity: Identity,\n        node_key_pair: Option<(Arc<SecretKey>, PublicKey)>,\n        registry: &Registry,\n        chain_info_source: C,\n        validator_matrix: ValidatorMatrix,\n        allow_handshake: bool,\n    ) -> Result<Network<REv, P>> {\n        let net_metrics = Arc::new(Metrics::new(registry)?);\n\n        let outgoing_limiter = Limiter::new(\n            cfg.max_outgoing_byte_rate_non_validators,\n            net_metrics.accumulated_outgoing_limiter_delay.clone(),\n            validator_matrix.clone(),\n        );\n\n        let incoming_limiter = Limiter::new(\n            cfg.max_incoming_message_rate_non_validators,\n            net_metrics.accumulated_incoming_limiter_delay.clone(),\n            validator_matrix,\n        );\n\n        let outgoing_manager = OutgoingManager::with_metrics(\n            OutgoingConfig {\n                retry_attempts: RECONNECTION_ATTEMPTS,\n                base_timeout: BASE_RECONNECTION_TIMEOUT,\n                unblock_after_min: cfg.blocklist_retain_min_duration.into(),\n                unblock_after_max: cfg.blocklist_retain_max_duration.into(),\n                sweep_timeout: cfg.max_addr_pending_time.into(),\n                health: HealthConfig {\n                    ping_interval: PING_INTERVAL,\n                    ping_timeout: PING_TIMEOUT,\n                    ping_retries: PING_RETRIES,\n                    pong_limit: (1 + PING_RETRIES as u32) * 2,\n                },\n            },\n            net_metrics.create_outgoing_metrics(),\n        );\n\n        let context = Arc::new(NetworkContext::new(\n            &cfg,\n            our_identity,\n            node_key_pair.map(NodeKeyPair::new),\n            chain_info_source.into(),\n            &net_metrics,\n            allow_handshake,\n        ));\n\n        let component = Network {\n            cfg,\n            context,\n            outgoing_manager,\n            connection_symmetries: HashMap::new(),\n            syncing_nodes: HashSet::new(),\n            channel_management: None,\n            net_metrics,\n            outgoing_limiter,\n            incoming_limiter,\n            // We start with an empty set of validators for era 0 and expect to be updated.\n            active_era: EraId::new(0),\n            state: ComponentState::Uninitialized,\n        };\n\n        Ok(component)\n    }\n\n    fn initialize(&mut self, effect_builder: EffectBuilder<REv>) -> Result<Effects<Event<P>>> {\n        let mut known_addresses = HashSet::new();\n        for address in &self.cfg.known_addresses {\n            match utils::resolve_address(address) {\n                Ok(known_address) => {\n                    if !known_addresses.insert(known_address) {\n                        warn!(%address, resolved=%known_address, \"ignoring duplicated known address\");\n                    };\n                }\n                Err(ref err) => {\n                    warn!(%address, err=display_error(err), \"failed to resolve known address\");\n                }\n            }\n        }\n\n        // Assert we have at least one known address in the config.\n        if known_addresses.is_empty() {\n            warn!(\"no known addresses provided via config or all failed DNS resolution\");\n            return Err(Error::EmptyKnownHosts);\n        }\n\n        let mut public_addr =\n            utils::resolve_address(&self.cfg.public_address).map_err(Error::ResolveAddr)?;\n\n        // We can now create a listener.\n        let bind_address =\n            utils::resolve_address(&self.cfg.bind_address).map_err(Error::ResolveAddr)?;\n        let listener = TcpListener::bind(bind_address)\n            .map_err(|error| Error::ListenerCreation(error, bind_address))?;\n        // We must set non-blocking to `true` or else the tokio task hangs forever.\n        listener\n            .set_nonblocking(true)\n            .map_err(Error::ListenerSetNonBlocking)?;\n\n        let local_addr = listener.local_addr().map_err(Error::ListenerAddr)?;\n\n        // Substitute the actually bound port if set to 0.\n        if public_addr.port() == 0 {\n            public_addr.set_port(local_addr.port());\n        }\n\n        Arc::get_mut(&mut self.context)\n            .expect(\"should be no other pointers\")\n            .initialize(public_addr, effect_builder.into_inner());\n\n        let protocol_version = self.context.chain_info().protocol_version;\n        // Run the server task.\n        // We spawn it ourselves instead of through an effect to get a hold of the join handle,\n        // which we need to shutdown cleanly later on.\n        info!(%local_addr, %public_addr, %protocol_version, \"starting server background task\");\n\n        let (server_shutdown_sender, server_shutdown_receiver) = watch::channel(());\n        let (close_incoming_sender, close_incoming_receiver) = watch::channel(());\n\n        let context = self.context.clone();\n        let server_join_handle = tokio::spawn(\n            tasks::server(\n                context,\n                tokio::net::TcpListener::from_std(listener).map_err(Error::ListenerConversion)?,\n                server_shutdown_receiver,\n            )\n            .in_current_span(),\n        );\n\n        let channel_management = ChannelManagement {\n            shutdown_sender: Some(server_shutdown_sender),\n            server_join_handle: Some(server_join_handle),\n            close_incoming_sender: Some(close_incoming_sender),\n            close_incoming_receiver,\n        };\n\n        self.channel_management = Some(channel_management);\n\n        // Learn all known addresses and mark them as unforgettable.\n        let now = Instant::now();\n        let dial_requests: Vec<_> = known_addresses\n            .into_iter()\n            .filter_map(|addr| self.outgoing_manager.learn_addr(addr, true, now))\n            .collect();\n\n        let mut effects = self.process_dial_requests(dial_requests);\n\n        // Start broadcasting our public listening address.\n        effects.extend(\n            effect_builder\n                .set_timeout(self.cfg.initial_gossip_delay.into())\n                .event(|_| Event::GossipOurAddress),\n        );\n\n        // Start regular housekeeping of the outgoing connections.\n        effects.extend(\n            effect_builder\n                .set_timeout(OUTGOING_MANAGER_SWEEP_INTERVAL)\n                .event(|_| Event::SweepOutgoing),\n        );\n\n        <Self as InitializedComponent<REv>>::set_state(self, ComponentState::Initialized);\n        Ok(effects)\n    }\n\n    /// Should only be called after component has been initialized.\n    fn channel_management(&self) -> &ChannelManagement {\n        self.channel_management\n            .as_ref()\n            .expect(\"component not initialized properly\")\n    }\n\n    /// Queues a message to be sent to validator nodes in the given era.\n    fn broadcast_message_to_validators(&self, msg: Arc<Message<P>>, era_id: EraId) {\n        self.net_metrics.broadcast_requests.inc();\n\n        let mut total_connected_validators_in_era = 0;\n        let mut total_outgoing_manager_connected_peers = 0;\n\n        for peer_id in self.outgoing_manager.connected_peers() {\n            total_outgoing_manager_connected_peers += 1;\n            if self.outgoing_limiter.is_validator_in_era(era_id, &peer_id) {\n                total_connected_validators_in_era += 1;\n                self.send_message(peer_id, msg.clone(), None);\n            }\n        }\n\n        debug!(\n            msg = %msg,\n            era = era_id.value(),\n            total_connected_validators_in_era,\n            total_outgoing_manager_connected_peers,\n            \"broadcast_message_to_validators\"\n        );\n    }\n\n    /// Queues a message to `count` random nodes on the network.\n    fn gossip_message(\n        &self,\n        rng: &mut NodeRng,\n        msg: Arc<Message<P>>,\n        gossip_target: GossipTarget,\n        count: usize,\n        exclude: &HashSet<NodeId>,\n    ) -> HashSet<NodeId> {\n        let is_validator_in_era =\n            |era: EraId, peer_id: &NodeId| self.outgoing_limiter.is_validator_in_era(era, peer_id);\n        let peer_ids = choose_gossip_peers(\n            rng,\n            gossip_target,\n            count,\n            exclude,\n            self.outgoing_manager.connected_peers(),\n            is_validator_in_era,\n        );\n        if peer_ids.len() != count {\n            let not_excluded = self\n                .outgoing_manager\n                .connected_peers()\n                .filter(|peer_id| !exclude.contains(peer_id))\n                .count();\n            if not_excluded > 0 {\n                let connected = self.outgoing_manager.connected_peers().count();\n                debug!(\n                    our_id=%self.context.our_id(),\n                    %gossip_target,\n                    wanted = count,\n                    connected,\n                    not_excluded,\n                    selected = peer_ids.len(),\n                    \"could not select enough random nodes for gossiping\"\n                );\n            }\n        }\n\n        for &peer_id in &peer_ids {\n            self.send_message(peer_id, msg.clone(), None);\n        }\n\n        peer_ids.into_iter().collect()\n    }\n\n    /// Queues a message to be sent to a specific node.\n    fn send_message(\n        &self,\n        dest: NodeId,\n        msg: Arc<Message<P>>,\n        opt_responder: Option<AutoClosingResponder<()>>,\n    ) {\n        // Try to send the message.\n        if let Some(connection) = self.outgoing_manager.get_route(dest) {\n            if msg.payload_is_unsafe_for_syncing_nodes() && self.syncing_nodes.contains(&dest) {\n                // We should never attempt to send an unsafe message to a peer that we know is still\n                // syncing. Since \"unsafe\" does usually not mean immediately catastrophic, we\n                // attempt to carry on, but warn loudly.\n                error!(kind=%msg.classify(), node_id=%dest, \"sending unsafe message to syncing node\");\n            }\n\n            if let Err(msg) = connection.sender.send((msg, opt_responder)) {\n                // We lost the connection, but that fact has not reached us yet.\n                warn!(our_id=%self.context.our_id(), %dest, ?msg, \"dropped outgoing message, lost connection\");\n            } else {\n                self.net_metrics.queued_messages.inc();\n            }\n        } else {\n            // We are not connected, so the reconnection is likely already in progress.\n            debug!(our_id=%self.context.our_id(), %dest, ?msg, \"dropped outgoing message, no connection\");\n        }\n    }\n\n    fn handle_incoming_connection(\n        &mut self,\n        incoming: Box<IncomingConnection<P>>,\n        span: Span,\n    ) -> Effects<Event<P>> {\n        span.clone().in_scope(|| match *incoming {\n            IncomingConnection::FailedEarly {\n                peer_addr: _,\n                ref error,\n            } => {\n                // Failed without much info, there is little we can do about this.\n                debug!(err=%display_error(error), \"incoming connection failed early\");\n                Effects::new()\n            }\n            IncomingConnection::Failed {\n                peer_addr: _,\n                peer_id: _,\n                ref error,\n            } => {\n                debug!(\n                    err = display_error(error),\n                    \"incoming connection failed after TLS setup\"\n                );\n                Effects::new()\n            }\n            IncomingConnection::Loopback => {\n                // Loopback connections are closed immediately, but will be marked as such by the\n                // outgoing manager. We still record that it succeeded in the log, but this should\n                // be the only time per component instantiation that this happens.\n                info!(\"successful incoming loopback connection, will be dropped\");\n                Effects::new()\n            }\n            IncomingConnection::Established {\n                peer_addr,\n                public_addr,\n                peer_id,\n                peer_consensus_public_key,\n                stream,\n            } => {\n                if self.cfg.max_incoming_peer_connections != 0 {\n                    if let Some(symmetries) = self.connection_symmetries.get(&peer_id) {\n                        let incoming_count = symmetries\n                            .incoming_addrs()\n                            .map(BTreeSet::len)\n                            .unwrap_or_default();\n\n                        if incoming_count >= self.cfg.max_incoming_peer_connections as usize {\n                            info!(%public_addr,\n                                  %peer_id,\n                                  count=incoming_count,\n                                  limit=self.cfg.max_incoming_peer_connections,\n                                  \"rejecting new incoming connection, limit for peer exceeded\"\n                            );\n                            return Effects::new();\n                        }\n                    }\n                }\n\n                info!(%public_addr, \"new incoming connection established\");\n\n                // Learn the address the peer gave us.\n                let dial_requests =\n                    self.outgoing_manager\n                        .learn_addr(public_addr, false, Instant::now());\n                let mut effects = self.process_dial_requests(dial_requests);\n\n                // Update connection symmetries.\n                if self\n                    .connection_symmetries\n                    .entry(peer_id)\n                    .or_default()\n                    .add_incoming(peer_addr, Instant::now())\n                {\n                    self.connection_completed(peer_id);\n\n                    // We should NOT update the syncing set when we receive an incoming connection,\n                    // because the `message_sender` which is handling the corresponding outgoing\n                    // connection will not receive the update of the syncing state of the remote\n                    // peer.\n                    //\n                    // Such desync may cause the node to try to send \"unsafe\" requests to the\n                    // syncing node, because the outgoing connection may outlive the\n                    // incoming one, i.e. it may take some time to drop \"our\" outgoing\n                    // connection after a peer has closed the corresponding incoming connection.\n                }\n\n                // Now we can start the message reader.\n                let boxed_span = Box::new(span.clone());\n                effects.extend(\n                    tasks::message_reader(\n                        self.context.clone(),\n                        stream,\n                        self.incoming_limiter\n                            .create_handle(peer_id, peer_consensus_public_key),\n                        self.channel_management().close_incoming_receiver.clone(),\n                        peer_id,\n                        span.clone(),\n                    )\n                    .instrument(span)\n                    .event(move |result| Event::IncomingClosed {\n                        result,\n                        peer_id: Box::new(peer_id),\n                        peer_addr,\n                        span: boxed_span,\n                    }),\n                );\n\n                effects\n            }\n        })\n    }\n\n    fn handle_incoming_closed(\n        &mut self,\n        result: io::Result<()>,\n        peer_id: NodeId,\n        peer_addr: SocketAddr,\n        span: Span,\n    ) -> Effects<Event<P>> {\n        span.in_scope(|| {\n            // Log the outcome.\n            match result {\n                Ok(()) => info!(\"regular connection closing\"),\n                Err(ref err) => warn!(err = display_error(err), \"connection dropped\"),\n            }\n\n            // Update the connection symmetries.\n            if let Entry::Occupied(mut entry) = self.connection_symmetries.entry(peer_id) {\n                if entry.get_mut().remove_incoming(peer_addr, Instant::now()) {\n                    entry.remove();\n                }\n            }\n\n            Effects::new()\n        })\n    }\n\n    /// Determines whether an outgoing peer should be blocked based on the connection error.\n    fn is_blockable_offense_for_outgoing(\n        error: &ConnectionError,\n    ) -> Option<BlocklistJustification> {\n        match error {\n            // Potentially transient failures.\n            //\n            // Note that incompatible versions need to be considered transient, since they occur\n            // during regular upgrades.\n            ConnectionError::TlsInitialization(_)\n            | ConnectionError::TcpConnection(_)\n            | ConnectionError::TcpNoDelay(_)\n            | ConnectionError::TlsHandshake(_)\n            | ConnectionError::HandshakeSend(_)\n            | ConnectionError::HandshakeRecv(_)\n            | ConnectionError::HandshakeNotAllowed\n            | ConnectionError::IncompatibleVersion(_) => None,\n\n            // These errors are potential bugs on our side.\n            ConnectionError::HandshakeSenderCrashed(_)\n            | ConnectionError::FailedToReuniteHandshakeSinkAndStream\n            | ConnectionError::CouldNotEncodeOurHandshake(_) => None,\n\n            // These could be candidates for blocking, but for now we decided not to.\n            ConnectionError::NoPeerCertificate\n            | ConnectionError::PeerCertificateInvalid(_)\n            | ConnectionError::DidNotSendHandshake\n            | ConnectionError::InvalidRemoteHandshakeMessage(_)\n            | ConnectionError::InvalidConsensusCertificate(_) => None,\n\n            // Definitely something we want to avoid.\n            ConnectionError::WrongNetwork(peer_network_name) => {\n                Some(BlocklistJustification::WrongNetwork {\n                    peer_network_name: peer_network_name.clone(),\n                })\n            }\n            ConnectionError::WrongChainspecHash(peer_chainspec_hash) => {\n                Some(BlocklistJustification::WrongChainspecHash {\n                    peer_chainspec_hash: *peer_chainspec_hash,\n                })\n            }\n            ConnectionError::MissingChainspecHash => {\n                Some(BlocklistJustification::MissingChainspecHash)\n            }\n        }\n    }\n\n    /// Sets up an established outgoing connection.\n    ///\n    /// Initiates sending of the handshake as soon as the connection is established.\n    #[allow(clippy::redundant_clone)]\n    fn handle_outgoing_connection(\n        &mut self,\n        outgoing: OutgoingConnection<P>,\n        span: Span,\n        rng: &mut NodeRng,\n    ) -> Effects<Event<P>> {\n        let now = Instant::now();\n        span.clone().in_scope(|| match outgoing {\n            OutgoingConnection::FailedEarly { peer_addr, error }\n            | OutgoingConnection::Failed {\n                peer_addr,\n                peer_id: _,\n                error,\n            } => {\n                debug!(err=%display_error(&error), \"outgoing connection failed\");\n                // We perform blocking first, to not trigger a reconnection before blocking.\n                let mut requests = Vec::new();\n\n                if let Some(justification) = Self::is_blockable_offense_for_outgoing(&error) {\n                    requests.extend(self.outgoing_manager.block_addr(\n                        peer_addr,\n                        now,\n                        justification,\n                        rng,\n                    ));\n                }\n\n                // Now we can proceed with the regular updates.\n                requests.extend(\n                    self.outgoing_manager\n                        .handle_dial_outcome(DialOutcome::Failed {\n                            addr: peer_addr,\n                            error,\n                            when: now,\n                        }),\n                );\n\n                self.process_dial_requests(requests)\n            }\n            OutgoingConnection::Loopback { peer_addr } => {\n                // Loopback connections are marked, but closed.\n                info!(\"successful outgoing loopback connection, will be dropped\");\n                let request = self\n                    .outgoing_manager\n                    .handle_dial_outcome(DialOutcome::Loopback { addr: peer_addr });\n                self.process_dial_requests(request)\n            }\n            OutgoingConnection::Established {\n                peer_addr,\n                peer_id,\n                peer_consensus_public_key,\n                sink,\n                is_syncing,\n            } => {\n                info!(\"new outgoing connection established\");\n\n                let (sender, receiver) = mpsc::unbounded_channel();\n                let handle = OutgoingHandle { sender, peer_addr };\n\n                let request = self\n                    .outgoing_manager\n                    .handle_dial_outcome(DialOutcome::Successful {\n                        addr: peer_addr,\n                        handle,\n                        node_id: peer_id,\n                        when: now,\n                    });\n\n                let mut effects = self.process_dial_requests(request);\n\n                // Update connection symmetries.\n                if self\n                    .connection_symmetries\n                    .entry(peer_id)\n                    .or_default()\n                    .mark_outgoing(now)\n                {\n                    self.connection_completed(peer_id);\n                    self.update_syncing_nodes_set(peer_id, is_syncing);\n                }\n                effects.extend(\n                    tasks::message_sender(\n                        receiver,\n                        sink,\n                        self.outgoing_limiter\n                            .create_handle(peer_id, peer_consensus_public_key),\n                        self.net_metrics.queued_messages.clone(),\n                    )\n                    .instrument(span)\n                    .event(move |_| Event::OutgoingDropped {\n                        peer_id: Box::new(peer_id),\n                        peer_addr,\n                    }),\n                );\n\n                effects\n            }\n        })\n    }\n\n    fn handle_network_request(\n        &self,\n        request: NetworkRequest<P>,\n        rng: &mut NodeRng,\n    ) -> Effects<Event<P>> {\n        match request {\n            NetworkRequest::SendMessage {\n                dest,\n                payload,\n                respond_after_queueing,\n                auto_closing_responder,\n            } => {\n                // We're given a message to send. Pass on the responder so that confirmation\n                // can later be given once the message has actually been buffered.\n                self.net_metrics.direct_message_requests.inc();\n\n                if respond_after_queueing {\n                    self.send_message(*dest, Arc::new(Message::Payload(*payload)), None);\n                    auto_closing_responder.respond(()).ignore()\n                } else {\n                    self.send_message(\n                        *dest,\n                        Arc::new(Message::Payload(*payload)),\n                        Some(auto_closing_responder),\n                    );\n                    Effects::new()\n                }\n            }\n            NetworkRequest::ValidatorBroadcast {\n                payload,\n                era_id,\n                auto_closing_responder,\n            } => {\n                // We're given a message to broadcast.\n                self.broadcast_message_to_validators(Arc::new(Message::Payload(*payload)), era_id);\n                auto_closing_responder.respond(()).ignore()\n            }\n            NetworkRequest::Gossip {\n                payload,\n                gossip_target,\n                count,\n                exclude,\n                auto_closing_responder,\n            } => {\n                // We're given a message to gossip.\n                let sent_to = self.gossip_message(\n                    rng,\n                    Arc::new(Message::Payload(*payload)),\n                    gossip_target,\n                    count,\n                    &exclude,\n                );\n                auto_closing_responder.respond(sent_to).ignore()\n            }\n        }\n    }\n\n    fn handle_outgoing_dropped(\n        &mut self,\n        peer_id: NodeId,\n        peer_addr: SocketAddr,\n    ) -> Effects<Event<P>> {\n        let requests = self\n            .outgoing_manager\n            .handle_connection_drop(peer_addr, Instant::now());\n\n        if let Entry::Occupied(mut entry) = self.connection_symmetries.entry(peer_id) {\n            if entry.get_mut().unmark_outgoing(Instant::now()) {\n                entry.remove();\n            }\n        }\n\n        self.outgoing_limiter.remove_connected_validator(&peer_id);\n\n        self.process_dial_requests(requests)\n    }\n\n    /// Processes a set of `DialRequest`s, updating the component and emitting needed effects.\n    fn process_dial_requests<T>(&mut self, requests: T) -> Effects<Event<P>>\n    where\n        T: IntoIterator<Item = DialRequest<OutgoingHandle<P>>>,\n    {\n        let mut effects = Effects::new();\n\n        for request in requests {\n            trace!(%request, \"processing dial request\");\n            match request {\n                DialRequest::Dial { addr, span } => effects.extend(\n                    tasks::connect_outgoing(self.context.clone(), addr)\n                        .instrument(span.clone())\n                        .event(|outgoing| Event::OutgoingConnection {\n                            outgoing: Box::new(outgoing),\n                            span,\n                        }),\n                ),\n                DialRequest::Disconnect { handle: _, span } => {\n                    // Dropping the `handle` is enough to signal the connection to shutdown.\n                    span.in_scope(|| {\n                        debug!(\"dropping connection, as requested\");\n                    });\n                }\n                DialRequest::SendPing {\n                    peer_id,\n                    nonce,\n                    span,\n                } => span.in_scope(|| {\n                    trace!(\"enqueuing ping to be sent\");\n                    self.send_message(peer_id, Arc::new(Message::Ping { nonce }), None);\n                }),\n            }\n        }\n\n        effects\n    }\n\n    /// Handles a received message.\n    fn handle_incoming_message(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        peer_id: NodeId,\n        msg: Message<P>,\n        span: Span,\n    ) -> Effects<Event<P>>\n    where\n        REv: FromIncoming<P> + From<PeerBehaviorAnnouncement>,\n    {\n        span.in_scope(|| match msg {\n            Message::Handshake { .. } => {\n                // We should never receive a handshake message on an established connection. Simply\n                // discard it. This may be too lenient, so we may consider simply dropping the\n                // connection in the future instead.\n                warn!(\"received unexpected handshake\");\n                Effects::new()\n            }\n            Message::Ping { nonce } => {\n                // Send a pong. Incoming pings and pongs are rate limited.\n\n                self.send_message(peer_id, Arc::new(Message::Pong { nonce }), None);\n                Effects::new()\n            }\n            Message::Pong { nonce } => {\n                // Record the time the pong arrived and forward it to outgoing.\n                let pong = TaggedTimestamp::from_parts(Instant::now(), nonce);\n                if self.outgoing_manager.record_pong(peer_id, pong) {\n                    // Note: We no longer block peers here with a `PongLimitExceeded` for failed\n                    //       pongs, merely warn.\n                    info!(\n                        \"peer {} exceeded failed pong limit, or allowed number of pongs\",\n                        peer_id // Redundant information due to span, but better safe than sorry.\n                    );\n                }\n\n                Effects::new()\n            }\n            Message::Payload(payload) => {\n                effect_builder.announce_incoming(peer_id, payload).ignore()\n            }\n        })\n    }\n\n    /// Emits an announcement that a connection has been completed.\n    fn connection_completed(&self, peer_id: NodeId) {\n        trace!(num_peers = self.peers().len(), new_peer=%peer_id, \"connection complete\");\n        self.net_metrics.peers.set(self.peers().len() as i64);\n    }\n\n    /// Updates a set of known joining nodes.\n    /// If we've just connected to a non-joining node that peer will be removed from the set.\n    fn update_syncing_nodes_set(&mut self, peer_id: NodeId, is_syncing: bool) {\n        // Update set of syncing peers.\n        if is_syncing {\n            debug!(%peer_id, \"is syncing\");\n            self.syncing_nodes.insert(peer_id);\n        } else {\n            debug!(%peer_id, \"is no longer syncing\");\n            self.syncing_nodes.remove(&peer_id);\n        }\n    }\n\n    /// Returns the set of connected nodes.\n    pub(crate) fn peers(&self) -> BTreeMap<NodeId, String> {\n        let mut ret = BTreeMap::new();\n        for node_id in self.outgoing_manager.connected_peers() {\n            if let Some(connection) = self.outgoing_manager.get_route(node_id) {\n                ret.insert(node_id, connection.peer_addr.to_string());\n            } else {\n                // This should never happen unless the state of `OutgoingManager` is corrupt.\n                warn!(%node_id, \"route disappeared unexpectedly\")\n            }\n        }\n\n        for (node_id, sym) in &self.connection_symmetries {\n            if let Some(addrs) = sym.incoming_addrs() {\n                for addr in addrs {\n                    ret.entry(*node_id).or_insert_with(|| addr.to_string());\n                }\n            }\n        }\n\n        ret\n    }\n\n    pub(crate) fn fully_connected_peers_random(\n        &self,\n        rng: &mut NodeRng,\n        count: usize,\n    ) -> Vec<NodeId> {\n        self.connection_symmetries\n            .iter()\n            .filter(|(_, sym)| matches!(sym, ConnectionSymmetry::Symmetric { .. }))\n            .map(|(node_id, _)| *node_id)\n            .choose_multiple(rng, count)\n    }\n\n    pub(crate) fn fully_connected_validators_random(\n        &self,\n        rng: &mut NodeRng,\n        count: usize,\n        era_id: EraId,\n    ) -> Vec<NodeId> {\n        let is_validator_in_era =\n            |era: EraId, peer_id: &NodeId| self.outgoing_limiter.is_validator_in_era(era, peer_id);\n        self.connection_symmetries\n            .iter()\n            .filter(|(_, sym)| matches!(sym, ConnectionSymmetry::Symmetric { .. }))\n            .map(|(node_id, _)| *node_id)\n            .filter(|node_id| is_validator_in_era(era_id, node_id))\n            .choose_multiple(rng, count)\n    }\n\n    pub(crate) fn has_sufficient_fully_connected_peers(&self) -> bool {\n        self.connection_symmetries\n            .iter()\n            .filter(|(_node_id, sym)| matches!(sym, ConnectionSymmetry::Symmetric { .. }))\n            .count()\n            >= self.cfg.min_peers_for_initialization as usize\n    }\n\n    #[cfg(test)]\n    /// Returns the node id of this network node.\n    pub(crate) fn node_id(&self) -> NodeId {\n        self.context.our_id()\n    }\n}\n\n#[cfg(test)]\nconst MAX_METRICS_DROP_ATTEMPTS: usize = 25;\n\n#[cfg(test)]\nconst DROP_RETRY_DELAY: Duration = Duration::from_millis(100);\n\n#[cfg(test)]\nimpl<REv, P> crate::reactor::Finalize for Network<REv, P>\nwhere\n    REv: Send + 'static,\n    P: Payload,\n{\n    fn finalize(mut self) -> BoxFuture<'static, ()> {\n        async move {\n            if let Some(mut channel_management) = self.channel_management.take() {\n                // Close the shutdown socket, causing the server to exit.\n                drop(channel_management.shutdown_sender.take());\n                drop(channel_management.close_incoming_sender.take());\n\n                // Wait for the server to exit cleanly.\n                if let Some(join_handle) = channel_management.server_join_handle.take() {\n                    match join_handle.await {\n                        Ok(_) => debug!(our_id=%self.context.our_id(), \"server exited cleanly\"),\n                        Err(ref err) => {\n                            error!(\n                                our_id=%self.context.our_id(),\n                                err=display_error(err),\n                                \"could not join server task cleanly\"\n                            );\n                        }\n                    }\n                }\n            }\n\n            // Ensure there are no ongoing metrics updates.\n            utils::wait_for_arc_drop(\n                self.net_metrics,\n                MAX_METRICS_DROP_ATTEMPTS,\n                DROP_RETRY_DELAY,\n            )\n            .await;\n        }\n        .boxed()\n    }\n}\n\nfn choose_gossip_peers<F>(\n    rng: &mut NodeRng,\n    gossip_target: GossipTarget,\n    count: usize,\n    exclude: &HashSet<NodeId>,\n    connected_peers: impl Iterator<Item = NodeId>,\n    is_validator_in_era: F,\n) -> HashSet<NodeId>\nwhere\n    F: Fn(EraId, &NodeId) -> bool,\n{\n    let filtered_peers = connected_peers.filter(|peer_id| !exclude.contains(peer_id));\n    match gossip_target {\n        GossipTarget::Mixed(era_id) => {\n            let (validators, non_validators): (Vec<_>, Vec<_>) =\n                filtered_peers.partition(|node_id| is_validator_in_era(era_id, node_id));\n\n            let (first, second) = if rng.gen() {\n                (validators, non_validators)\n            } else {\n                (non_validators, validators)\n            };\n\n            first\n                .choose_multiple(rng, count)\n                .interleave(second.iter().choose_multiple(rng, count))\n                .take(count)\n                .copied()\n                .collect()\n        }\n        GossipTarget::All => filtered_peers\n            .choose_multiple(rng, count)\n            .into_iter()\n            .collect(),\n    }\n}\n\nimpl<REv, P> Component<REv> for Network<REv, P>\nwhere\n    REv: ReactorEvent\n        + From<Event<P>>\n        + From<BeginGossipRequest<GossipedAddress>>\n        + FromIncoming<P>\n        + From<StorageRequest>\n        + From<NetworkRequest<P>>\n        + From<PeerBehaviorAnnouncement>,\n    P: Payload,\n{\n    type Event = Event<P>;\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<REv>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<REv>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => match event {\n                Event::Initialize => match self.initialize(effect_builder) {\n                    Ok(effects) => effects,\n                    Err(error) => {\n                        error!(%error, \"failed to initialize network component\");\n                        <Self as InitializedComponent<REv>>::set_state(\n                            self,\n                            ComponentState::Fatal(error.to_string()),\n                        );\n                        Effects::new()\n                    }\n                },\n                Event::IncomingConnection { .. }\n                | Event::IncomingMessage { .. }\n                | Event::IncomingClosed { .. }\n                | Event::OutgoingConnection { .. }\n                | Event::OutgoingDropped { .. }\n                | Event::NetworkRequest { .. }\n                | Event::NetworkInfoRequest { .. }\n                | Event::GossipOurAddress\n                | Event::PeerAddressReceived(_)\n                | Event::SweepOutgoing\n                | Event::BlocklistAnnouncement(_) => {\n                    warn!(\n                        ?event,\n                        name = <Self as Component<REv>>::name(self),\n                        \"should not handle this event when component is pending initialization\"\n                    );\n                    Effects::new()\n                }\n            },\n            ComponentState::Initialized => match event {\n                Event::Initialize => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<REv>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::IncomingConnection { incoming, span } => {\n                    self.handle_incoming_connection(incoming, span)\n                }\n                Event::IncomingMessage { peer_id, msg, span } => {\n                    self.handle_incoming_message(effect_builder, *peer_id, *msg, span)\n                }\n                Event::IncomingClosed {\n                    result,\n                    peer_id,\n                    peer_addr,\n                    span,\n                } => self.handle_incoming_closed(result, *peer_id, peer_addr, *span),\n                Event::OutgoingConnection { outgoing, span } => {\n                    self.handle_outgoing_connection(*outgoing, span, rng)\n                }\n                Event::OutgoingDropped { peer_id, peer_addr } => {\n                    self.handle_outgoing_dropped(*peer_id, peer_addr)\n                }\n                Event::NetworkRequest { req: request } => {\n                    self.handle_network_request(*request, rng)\n                }\n                Event::NetworkInfoRequest { req } => match *req {\n                    NetworkInfoRequest::Peers { responder } => {\n                        responder.respond(self.peers()).ignore()\n                    }\n                    NetworkInfoRequest::FullyConnectedPeers { count, responder } => responder\n                        .respond(self.fully_connected_peers_random(rng, count))\n                        .ignore(),\n                    NetworkInfoRequest::FullyConnectedValidators {\n                        count,\n                        era_id,\n                        responder,\n                    } => responder\n                        .respond(self.fully_connected_validators_random(rng, count, era_id))\n                        .ignore(),\n                    NetworkInfoRequest::Insight { responder } => responder\n                        .respond(NetworkInsights::collect_from_component(self))\n                        .ignore(),\n                },\n                Event::GossipOurAddress => {\n                    let our_address = GossipedAddress::new(\n                        self.context\n                            .public_addr()\n                            .expect(\"component not initialized properly\"),\n                    );\n\n                    let mut effects = effect_builder\n                        .begin_gossip(our_address, Source::Ourself, our_address.gossip_target())\n                        .ignore();\n                    effects.extend(\n                        effect_builder\n                            .set_timeout(self.cfg.gossip_interval.into())\n                            .event(|_| Event::GossipOurAddress),\n                    );\n                    effects\n                }\n                Event::PeerAddressReceived(gossiped_address) => {\n                    let requests = self.outgoing_manager.learn_addr(\n                        gossiped_address.into(),\n                        false,\n                        Instant::now(),\n                    );\n                    self.process_dial_requests(requests)\n                }\n                Event::SweepOutgoing => {\n                    let now = Instant::now();\n                    let requests = self.outgoing_manager.perform_housekeeping(rng, now);\n\n                    let mut effects = self.process_dial_requests(requests);\n\n                    effects.extend(\n                        effect_builder\n                            .set_timeout(OUTGOING_MANAGER_SWEEP_INTERVAL)\n                            .event(|_| Event::SweepOutgoing),\n                    );\n\n                    effects\n                }\n                Event::BlocklistAnnouncement(announcement) => match announcement {\n                    PeerBehaviorAnnouncement::OffenseCommitted {\n                        offender,\n                        justification,\n                    } => {\n                        // Note: We do not have a proper by-node-ID blocklist, but rather only block\n                        // the current outgoing address of a peer.\n                        info!(%offender, %justification, \"adding peer to blocklist after transgression\");\n\n                        if let Some(addr) = self.outgoing_manager.get_addr(*offender) {\n                            let requests = self.outgoing_manager.block_addr(\n                                addr,\n                                Instant::now(),\n                                *justification,\n                                rng,\n                            );\n                            self.process_dial_requests(requests)\n                        } else {\n                            // Peer got away with it, no longer an outgoing connection.\n                            Effects::new()\n                        }\n                    }\n                },\n            },\n        }\n    }\n}\n\nimpl<REv, P> InitializedComponent<REv> for Network<REv, P>\nwhere\n    REv: ReactorEvent\n        + From<Event<P>>\n        + From<BeginGossipRequest<GossipedAddress>>\n        + FromIncoming<P>\n        + From<StorageRequest>\n        + From<NetworkRequest<P>>\n        + From<PeerBehaviorAnnouncement>,\n    P: Payload,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<REv>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\n/// Transport type alias for base encrypted connections.\ntype Transport = SslStream<TcpStream>;\n\n/// A framed transport for `Message`s.\npub(crate) type FullTransport<P> = tokio_serde::Framed<\n    FramedTransport,\n    Message<P>,\n    Arc<Message<P>>,\n    CountingFormat<BincodeFormat>,\n>;\n\npub(crate) type FramedTransport = tokio_util::codec::Framed<Transport, LengthDelimitedCodec>;\n\n/// Constructs a new full transport on a stream.\n///\n/// A full transport contains the framing as well as the encoding scheme used to send messages.\nfn full_transport<P>(\n    metrics: Weak<Metrics>,\n    connection_id: ConnectionId,\n    framed: FramedTransport,\n    role: Role,\n) -> FullTransport<P>\nwhere\n    for<'de> P: Serialize + Deserialize<'de>,\n    for<'de> Message<P>: Serialize + Deserialize<'de>,\n{\n    tokio_serde::Framed::new(\n        framed,\n        CountingFormat::new(metrics, connection_id, role, BincodeFormat::default()),\n    )\n}\n\n/// Constructs a framed transport.\nfn framed_transport(transport: Transport, maximum_net_message_size: u32) -> FramedTransport {\n    tokio_util::codec::Framed::new(\n        transport,\n        LengthDelimitedCodec::builder()\n            .max_frame_length(maximum_net_message_size as usize)\n            .new_codec(),\n    )\n}\n\nimpl<R, P> Debug for Network<R, P>\nwhere\n    P: Payload,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        // We output only the most important fields of the component, as it gets unwieldy quite fast\n        // otherwise.\n        f.debug_struct(\"Network\")\n            .field(\"our_id\", &self.context.our_id())\n            .field(\"state\", &self.state)\n            .field(\"public_addr\", &self.context.public_addr())\n            .finish()\n    }\n}\n\n#[cfg(test)]\nmod gossip_target_tests {\n    use std::{collections::BTreeSet, iter};\n\n    use static_assertions::const_assert;\n\n    use casper_types::testing::TestRng;\n\n    use super::*;\n\n    const VALIDATOR_COUNT: usize = 10;\n    const NON_VALIDATOR_COUNT: usize = 20;\n    // The tests assume that we have fewer validators than non-validators.\n    const_assert!(VALIDATOR_COUNT < NON_VALIDATOR_COUNT);\n\n    struct Fixture {\n        validators: BTreeSet<NodeId>,\n        non_validators: BTreeSet<NodeId>,\n        all_peers: Vec<NodeId>,\n    }\n\n    impl Fixture {\n        fn new(rng: &mut TestRng) -> Self {\n            let validators: BTreeSet<NodeId> = iter::repeat_with(|| NodeId::random(rng))\n                .take(VALIDATOR_COUNT)\n                .collect();\n            let non_validators: BTreeSet<NodeId> = iter::repeat_with(|| NodeId::random(rng))\n                .take(NON_VALIDATOR_COUNT)\n                .collect();\n\n            let mut all_peers: Vec<NodeId> = validators\n                .iter()\n                .copied()\n                .chain(non_validators.iter().copied())\n                .collect();\n            all_peers.shuffle(rng);\n\n            Fixture {\n                validators,\n                non_validators,\n                all_peers,\n            }\n        }\n\n        fn is_validator_in_era(&self) -> impl Fn(EraId, &NodeId) -> bool + '_ {\n            move |_era_id: EraId, node_id: &NodeId| self.validators.contains(node_id)\n        }\n\n        fn num_validators<'a>(&self, input: impl Iterator<Item = &'a NodeId>) -> usize {\n            input\n                .filter(move |&node_id| self.validators.contains(node_id))\n                .count()\n        }\n\n        fn num_non_validators<'a>(&self, input: impl Iterator<Item = &'a NodeId>) -> usize {\n            input\n                .filter(move |&node_id| self.non_validators.contains(node_id))\n                .count()\n        }\n    }\n\n    #[test]\n    fn should_choose_mixed() {\n        const TARGET: GossipTarget = GossipTarget::Mixed(EraId::new(1));\n\n        let mut rng = TestRng::new();\n        let fixture = Fixture::new(&mut rng);\n\n        // Choose more than total count from all peers, exclude none, should return all peers.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT + NON_VALIDATOR_COUNT + 1,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), fixture.all_peers.len());\n\n        // Choose total count from all peers, exclude none, should return all peers.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT + NON_VALIDATOR_COUNT,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), fixture.all_peers.len());\n\n        // Choose 2 * VALIDATOR_COUNT from all peers, exclude none, should return all validators and\n        // VALIDATOR_COUNT non-validators.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            2 * VALIDATOR_COUNT,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), 2 * VALIDATOR_COUNT);\n        assert_eq!(fixture.num_validators(chosen.iter()), VALIDATOR_COUNT);\n        assert_eq!(fixture.num_non_validators(chosen.iter()), VALIDATOR_COUNT);\n\n        // Choose VALIDATOR_COUNT from all peers, exclude none, should return VALIDATOR_COUNT peers,\n        // half validators and half non-validators.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), VALIDATOR_COUNT);\n        assert_eq!(fixture.num_validators(chosen.iter()), VALIDATOR_COUNT / 2);\n        assert_eq!(\n            fixture.num_non_validators(chosen.iter()),\n            VALIDATOR_COUNT / 2\n        );\n\n        // Choose two from all peers, exclude none, should return two peers, one validator and one\n        // non-validator.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            2,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), 2);\n        assert_eq!(fixture.num_validators(chosen.iter()), 1);\n        assert_eq!(fixture.num_non_validators(chosen.iter()), 1);\n\n        // Choose one from all peers, exclude none, should return one peer with 50-50 chance of\n        // being a validator.\n        let mut got_validator = false;\n        let mut got_non_validator = false;\n        let mut attempts = 0;\n        while !got_validator || !got_non_validator {\n            let chosen = choose_gossip_peers(\n                &mut rng,\n                TARGET,\n                1,\n                &HashSet::new(),\n                fixture.all_peers.iter().copied(),\n                fixture.is_validator_in_era(),\n            );\n            assert_eq!(chosen.len(), 1);\n            let node_id = chosen.iter().next().unwrap();\n            got_validator |= fixture.validators.contains(node_id);\n            got_non_validator |= fixture.non_validators.contains(node_id);\n            attempts += 1;\n            assert!(attempts < 1_000_000);\n        }\n\n        // Choose VALIDATOR_COUNT from all peers, exclude all but one validator, should return the\n        // one validator and VALIDATOR_COUNT - 1 non-validators.\n        let exclude: HashSet<_> = fixture\n            .validators\n            .iter()\n            .copied()\n            .take(VALIDATOR_COUNT - 1)\n            .collect();\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT,\n            &exclude,\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), VALIDATOR_COUNT);\n        assert_eq!(fixture.num_validators(chosen.iter()), 1);\n        assert_eq!(\n            fixture.num_non_validators(chosen.iter()),\n            VALIDATOR_COUNT - 1\n        );\n        assert!(exclude.is_disjoint(&chosen));\n\n        // Choose 3 from all peers, exclude all non-validators, should return 3 validators.\n        let exclude: HashSet<_> = fixture.non_validators.iter().copied().collect();\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            3,\n            &exclude,\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), 3);\n        assert_eq!(fixture.num_validators(chosen.iter()), 3);\n        assert!(exclude.is_disjoint(&chosen));\n    }\n\n    #[test]\n    fn should_choose_all() {\n        const TARGET: GossipTarget = GossipTarget::All;\n\n        let mut rng = TestRng::new();\n        let fixture = Fixture::new(&mut rng);\n\n        // Choose more than total count from all peers, exclude none, should return all peers.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT + NON_VALIDATOR_COUNT + 1,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), fixture.all_peers.len());\n\n        // Choose total count from all peers, exclude none, should return all peers.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT + NON_VALIDATOR_COUNT,\n            &HashSet::new(),\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), fixture.all_peers.len());\n\n        // Choose VALIDATOR_COUNT from only validators, exclude none, should return all validators.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT,\n            &HashSet::new(),\n            fixture.validators.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), VALIDATOR_COUNT);\n        assert_eq!(fixture.num_validators(chosen.iter()), VALIDATOR_COUNT);\n\n        // Choose VALIDATOR_COUNT from only non-validators, exclude none, should return\n        // VALIDATOR_COUNT non-validators.\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT,\n            &HashSet::new(),\n            fixture.non_validators.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), VALIDATOR_COUNT);\n        assert_eq!(fixture.num_non_validators(chosen.iter()), VALIDATOR_COUNT);\n\n        // Choose VALIDATOR_COUNT from all peers, exclude all but VALIDATOR_COUNT from all peers,\n        // should return all the non-excluded peers.\n        let exclude: HashSet<_> = fixture\n            .all_peers\n            .iter()\n            .copied()\n            .take(NON_VALIDATOR_COUNT)\n            .collect();\n        let chosen = choose_gossip_peers(\n            &mut rng,\n            TARGET,\n            VALIDATOR_COUNT,\n            &exclude,\n            fixture.all_peers.iter().copied(),\n            fixture.is_validator_in_era(),\n        );\n        assert_eq!(chosen.len(), VALIDATOR_COUNT);\n        assert!(exclude.is_disjoint(&chosen));\n\n        // Choose one from all peers, exclude enough non-validators to have an even chance of\n        // returning a validator as a non-validator, should return one peer with 50-50 chance of\n        // being a validator.\n        let exclude: HashSet<_> = fixture\n            .non_validators\n            .iter()\n            .copied()\n            .take(NON_VALIDATOR_COUNT - VALIDATOR_COUNT)\n            .collect();\n        let mut got_validator = false;\n        let mut got_non_validator = false;\n        let mut attempts = 0;\n        while !got_validator || !got_non_validator {\n            let chosen = choose_gossip_peers(\n                &mut rng,\n                TARGET,\n                1,\n                &exclude,\n                fixture.all_peers.iter().copied(),\n                fixture.is_validator_in_era(),\n            );\n            assert_eq!(chosen.len(), 1);\n            assert!(exclude.is_disjoint(&chosen));\n            let node_id = chosen.iter().next().unwrap();\n            got_validator |= fixture.validators.contains(node_id);\n            got_non_validator |= fixture.non_validators.contains(node_id);\n            attempts += 1;\n            assert!(attempts < 1_000_000);\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/rest_server/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n/// Default binding address for the REST HTTP server.\n///\n/// Uses a fixed port per node, but binds on any interface.\nconst DEFAULT_ADDRESS: &str = \"0.0.0.0:0\";\n/// Default rate limit in qps.\nconst DEFAULT_QPS_LIMIT: u64 = 100;\n/// Default CORS origin.\nconst DEFAULT_CORS_ORIGIN: &str = \"\";\n\n/// REST HTTP server configuration.\n#[derive(Clone, DataSize, Debug, Deserialize, Serialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Setting to enable the HTTP server.\n    pub enable_server: bool,\n\n    /// Address to bind REST HTTP server to.\n    pub address: String,\n\n    /// Max rate limit in qps.\n    pub qps_limit: u64,\n\n    /// CORS origin.\n    pub cors_origin: String,\n}\n\nimpl Config {\n    /// Creates a default instance for `RestServer`.\n    pub fn new() -> Self {\n        Config {\n            enable_server: true,\n            address: DEFAULT_ADDRESS.to_string(),\n            qps_limit: DEFAULT_QPS_LIMIT,\n            cors_origin: DEFAULT_CORS_ORIGIN.to_string(),\n        }\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config::new()\n    }\n}\n"
  },
  {
    "path": "node/src/components/rest_server/docs.rs",
    "content": "use casper_types::{ProtocolVersion, PublicKey, SecretKey, Timestamp};\nuse once_cell::sync::Lazy;\n\nuse crate::types::InternalEraReport;\n\npub(crate) const DOCS_EXAMPLE_PROTOCOL_VERSION: ProtocolVersion =\n    ProtocolVersion::from_parts(1, 5, 3);\n\n/// A trait used to generate a static hardcoded example of `Self`.\npub trait DocExample {\n    /// Generates a hardcoded example of `Self`.\n    fn doc_example() -> &'static Self;\n}\n\nimpl DocExample for Timestamp {\n    fn doc_example() -> &'static Self {\n        Timestamp::example()\n    }\n}\n\nstatic INTERNAL_ERA_REPORT: Lazy<InternalEraReport> = Lazy::new(|| {\n    let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap();\n    let public_key_1 = PublicKey::from(&secret_key_1);\n    let equivocators = vec![public_key_1];\n\n    let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap();\n    let public_key_3 = PublicKey::from(&secret_key_3);\n    let inactive_validators = vec![public_key_3];\n\n    InternalEraReport {\n        equivocators,\n        inactive_validators,\n    }\n});\n\nimpl DocExample for InternalEraReport {\n    fn doc_example() -> &'static Self {\n        &INTERNAL_ERA_REPORT\n    }\n}\n"
  },
  {
    "path": "node/src/components/rest_server/event.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse derive_more::From;\nuse static_assertions::const_assert;\n\nuse crate::effect::{requests::RestRequest, Responder};\n\nconst _REST_EVENT_SIZE: usize = size_of::<Event>();\nconst_assert!(_REST_EVENT_SIZE < 89);\n\n#[derive(Debug, From)]\npub(crate) enum Event {\n    Initialize,\n    #[from]\n    RestRequest(RestRequest),\n    GetMetricsResult {\n        text: Option<String>,\n        main_responder: Responder<Option<String>>,\n    },\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Event::Initialize => write!(formatter, \"initialize\"),\n            Event::RestRequest(request) => write!(formatter, \"{}\", request),\n            Event::GetMetricsResult { text, .. } => match text {\n                Some(txt) => write!(formatter, \"get metrics ({} bytes)\", txt.len()),\n                None => write!(formatter, \"get metrics (failed)\"),\n            },\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/rest_server/filters.rs",
    "content": "use futures::FutureExt;\nuse http::Response;\nuse hyper::Body;\nuse tracing::warn;\nuse warp::{\n    filters::BoxedFilter,\n    http::StatusCode,\n    reject::Rejection,\n    reply::{self, Reply},\n    Filter,\n};\n\nuse casper_types::ProtocolVersion;\n\nuse super::{GetChainspecResult, GetValidatorChangesResult, ReactorEventT};\nuse crate::{\n    effect::{requests::RestRequest, EffectBuilder},\n    reactor::QueueKind,\n    types::GetStatusResult,\n};\n\n/// The status URL path.\npub const STATUS_API_PATH: &str = \"status\";\n\n/// The metrics URL path.\npub const METRICS_API_PATH: &str = \"metrics\";\n\n/// The validator information URL path.\npub const VALIDATOR_CHANGES_API_PATH: &str = \"validator-changes\";\n\n/// The chainspec file URL path.\npub const CHAINSPEC_API_PATH: &str = \"chainspec\";\n\npub(super) fn create_status_filter<REv: ReactorEventT>(\n    effect_builder: EffectBuilder<REv>,\n    api_version: ProtocolVersion,\n) -> BoxedFilter<(Response<Body>,)> {\n    warp::get()\n        .and(warp::path(STATUS_API_PATH))\n        .and_then(move || {\n            effect_builder\n                .make_request(\n                    |responder| RestRequest::Status { responder },\n                    QueueKind::Api,\n                )\n                .map(move |status_feed| {\n                    let body = GetStatusResult::new(status_feed, api_version);\n                    Ok::<_, Rejection>(reply::json(&body).into_response())\n                })\n        })\n        .boxed()\n}\n\npub(super) fn create_metrics_filter<REv: ReactorEventT>(\n    effect_builder: EffectBuilder<REv>,\n) -> BoxedFilter<(Response<Body>,)> {\n    warp::get()\n        .and(warp::path(METRICS_API_PATH))\n        .and_then(move || {\n            effect_builder\n                .make_request(\n                    |responder| RestRequest::Metrics { responder },\n                    QueueKind::Api,\n                )\n                .map(|maybe_metrics| match maybe_metrics {\n                    Some(metrics) => Ok::<_, Rejection>(\n                        reply::with_status(metrics, StatusCode::OK).into_response(),\n                    ),\n                    None => {\n                        warn!(\"metrics not available\");\n                        Ok(reply::with_status(\n                            \"metrics not available\",\n                            StatusCode::INTERNAL_SERVER_ERROR,\n                        )\n                        .into_response())\n                    }\n                })\n        })\n        .boxed()\n}\n\npub(super) fn create_validator_changes_filter<REv: ReactorEventT>(\n    effect_builder: EffectBuilder<REv>,\n    api_version: ProtocolVersion,\n) -> BoxedFilter<(Response<Body>,)> {\n    warp::get()\n        .and(warp::path(VALIDATOR_CHANGES_API_PATH))\n        .and_then(move || {\n            effect_builder\n                .get_consensus_validator_changes()\n                .map(move |changes| {\n                    let result = GetValidatorChangesResult::new(api_version, changes);\n                    Ok::<_, Rejection>(reply::json(&result).into_response())\n                })\n        })\n        .boxed()\n}\n\npub(super) fn create_chainspec_filter<REv: ReactorEventT>(\n    effect_builder: EffectBuilder<REv>,\n    api_version: ProtocolVersion,\n) -> BoxedFilter<(Response<Body>,)> {\n    warp::get()\n        .and(warp::path(CHAINSPEC_API_PATH))\n        .and_then(move || {\n            effect_builder\n                .get_chainspec_raw_bytes()\n                .map(move |chainspec_bytes| {\n                    let result = GetChainspecResult::new(api_version, (*chainspec_bytes).clone());\n                    Ok::<_, Rejection>(reply::json(&result).into_response())\n                })\n        })\n        .boxed()\n}\n"
  },
  {
    "path": "node/src/components/rest_server/http_server.rs",
    "content": "use std::{convert::Infallible, net::SocketAddr, sync::Arc, time::Duration};\n\nuse futures::{future, TryFutureExt};\nuse hyper::server::{conn::AddrIncoming, Builder};\nuse once_cell::sync::OnceCell;\nuse tokio::sync::oneshot;\nuse tower::builder::ServiceBuilder;\nuse tracing::{info, warn};\nuse warp::Filter;\n\nuse casper_types::ProtocolVersion;\n\nuse super::{filters, ReactorEventT};\nuse crate::effect::EffectBuilder;\n\n/// Run the REST HTTP server.\n///\n/// A message received on `shutdown_receiver` will cause the server to exit cleanly.\npub(super) async fn run<REv: ReactorEventT>(\n    builder: Builder<AddrIncoming>,\n    effect_builder: EffectBuilder<REv>,\n    api_version: ProtocolVersion,\n    shutdown_receiver: oneshot::Receiver<()>,\n    qps_limit: u64,\n    local_addr: Arc<OnceCell<SocketAddr>>,\n) {\n    // REST filters.\n    let rest_status = filters::create_status_filter(effect_builder, api_version);\n    let rest_metrics = filters::create_metrics_filter(effect_builder);\n    let rest_validator_changes =\n        filters::create_validator_changes_filter(effect_builder, api_version);\n    let rest_chainspec_filter = filters::create_chainspec_filter(effect_builder, api_version);\n\n    let service = warp::service(\n        rest_status\n            .or(rest_metrics)\n            .or(rest_validator_changes)\n            .or(rest_chainspec_filter),\n    );\n\n    // Start the server, passing a oneshot receiver to allow the server to be shut down gracefully.\n    let make_svc =\n        hyper::service::make_service_fn(move |_| future::ok::<_, Infallible>(service.clone()));\n\n    let rate_limited_service = ServiceBuilder::new()\n        .rate_limit(qps_limit, Duration::from_secs(1))\n        .service(make_svc);\n\n    let server = builder.serve(rate_limited_service);\n    if let Err(err) = local_addr.set(server.local_addr()) {\n        warn!(%err, \"failed to set local addr for reflection\");\n    }\n    info!(address = %server.local_addr(), \"started REST server\");\n\n    // Shutdown the server gracefully.\n    let _ = server\n        .with_graceful_shutdown(async move {\n            shutdown_receiver.await.ok();\n        })\n        .map_err(|error| {\n            warn!(%error, \"error running REST server\");\n        })\n        .await;\n}\n\n/// Run the REST HTTP server with CORS enabled.\n///\n/// A message received on `shutdown_receiver` will cause the server to exit cleanly.\npub(super) async fn run_with_cors<REv: ReactorEventT>(\n    builder: Builder<AddrIncoming>,\n    effect_builder: EffectBuilder<REv>,\n    api_version: ProtocolVersion,\n    shutdown_receiver: oneshot::Receiver<()>,\n    qps_limit: u64,\n    local_addr: Arc<OnceCell<SocketAddr>>,\n    cors_origin: String,\n) {\n    // REST filters.\n    let rest_status = filters::create_status_filter(effect_builder, api_version);\n    let rest_metrics = filters::create_metrics_filter(effect_builder);\n    let rest_validator_changes =\n        filters::create_validator_changes_filter(effect_builder, api_version);\n    let rest_chainspec_filter = filters::create_chainspec_filter(effect_builder, api_version);\n\n    let service = warp::service(\n        rest_status\n            .or(rest_metrics)\n            .or(rest_validator_changes)\n            .or(rest_chainspec_filter)\n            .with(match cors_origin.as_str() {\n                \"*\" => warp::cors().allow_any_origin(),\n                origin => warp::cors().allow_origin(origin),\n            }),\n    );\n\n    // Start the server, passing a oneshot receiver to allow the server to be shut down gracefully.\n    let make_svc =\n        hyper::service::make_service_fn(move |_| future::ok::<_, Infallible>(service.clone()));\n\n    let rate_limited_service = ServiceBuilder::new()\n        .rate_limit(qps_limit, Duration::from_secs(1))\n        .service(make_svc);\n\n    let server = builder.serve(rate_limited_service);\n    if let Err(err) = local_addr.set(server.local_addr()) {\n        warn!(%err, \"failed to set local addr for reflection\");\n    }\n    info!(address = %server.local_addr(), \"started REST server\");\n\n    // Shutdown the server gracefully.\n    let _ = server\n        .with_graceful_shutdown(async move {\n            shutdown_receiver.await.ok();\n        })\n        .map_err(|error| {\n            warn!(%error, \"error running REST server\");\n        })\n        .await;\n}\n"
  },
  {
    "path": "node/src/components/rest_server/info.rs",
    "content": "use std::str;\n\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse casper_binary_port::ConsensusValidatorChanges;\n\nuse casper_types::{ChainspecRawBytes, EraId, ProtocolVersion, PublicKey, ValidatorChange};\n\n/// A single change to a validator's status in the given era.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\n#[serde(deny_unknown_fields)]\npub struct JsonValidatorStatusChange {\n    /// The era in which the change occurred.\n    era_id: EraId,\n    /// The change in validator status.\n    validator_change: ValidatorChange,\n}\n\nimpl JsonValidatorStatusChange {\n    pub(crate) fn new(era_id: EraId, validator_change: ValidatorChange) -> Self {\n        JsonValidatorStatusChange {\n            era_id,\n            validator_change,\n        }\n    }\n}\n\n/// The changes in a validator's status.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\n#[serde(deny_unknown_fields)]\npub struct JsonValidatorChanges {\n    /// The public key of the validator.\n    public_key: PublicKey,\n    /// The set of changes to the validator's status.\n    status_changes: Vec<JsonValidatorStatusChange>,\n}\n\nimpl JsonValidatorChanges {\n    pub(crate) fn new(\n        public_key: PublicKey,\n        status_changes: Vec<JsonValidatorStatusChange>,\n    ) -> Self {\n        JsonValidatorChanges {\n            public_key,\n            status_changes,\n        }\n    }\n}\n\n/// Result for the \"info_get_validator_changes\" RPC.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\n#[serde(deny_unknown_fields)]\npub struct GetValidatorChangesResult {\n    /// The RPC API version.\n    #[schemars(with = \"String\")]\n    pub api_version: ProtocolVersion,\n    /// The validators' status changes.\n    pub changes: Vec<JsonValidatorChanges>,\n}\n\nimpl GetValidatorChangesResult {\n    pub(crate) fn new(api_version: ProtocolVersion, changes: ConsensusValidatorChanges) -> Self {\n        let changes = changes\n            .into_inner()\n            .into_iter()\n            .map(|(public_key, mut validator_changes)| {\n                validator_changes.sort();\n                let status_changes = validator_changes\n                    .into_iter()\n                    .map(|(era_id, validator_change)| {\n                        JsonValidatorStatusChange::new(era_id, validator_change)\n                    })\n                    .collect();\n                JsonValidatorChanges::new(public_key, status_changes)\n            })\n            .collect();\n        GetValidatorChangesResult {\n            api_version,\n            changes,\n        }\n    }\n}\n\n/// Result for the \"info_get_chainspec\" RPC.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\npub struct GetChainspecResult {\n    /// The RPC API version.\n    #[schemars(with = \"String\")]\n    pub api_version: ProtocolVersion,\n    /// The chainspec file bytes.\n    pub chainspec_bytes: ChainspecRawBytes,\n}\n\nimpl GetChainspecResult {\n    pub(crate) fn new(api_version: ProtocolVersion, chainspec_bytes: ChainspecRawBytes) -> Self {\n        Self {\n            api_version,\n            chainspec_bytes,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/rest_server.rs",
    "content": "//! REST server\n//!\n//! The REST server provides clients with a simple RESTful HTTP API. This component is (currently)\n//! intended for basic informational / GET endpoints only; more complex operations should be handled\n//! via the RPC server.\n//!\n//! The actual server is run in backgrounded tasks. HTTP requests are translated into reactor\n//! requests to various components.\n//!\n//! This module currently provides both halves of what is required for an API server:\n//! a component implementation that interfaces with other components via being plugged into a\n//! reactor, and an external facing http server that exposes various uri routes and converts\n//! HTTP requests into the appropriate component events.\n//!\n//! Currently this component supports two endpoints, each of which takes no arguments:\n//! /status : a human readable JSON equivalent of the info-get-status rpc method.\n//!     example: curl -X GET 'http://IP:8888/status'\n//! /metrics : time series data collected from the internals of the node being queried.\n//!     example: curl -X GET 'http://IP:8888/metrics'\n\nmod config;\nmod docs;\nmod event;\nmod filters;\nmod http_server;\nmod info;\n\nuse std::{net::SocketAddr, sync::Arc};\n\nuse datasize::DataSize;\nuse futures::join;\nuse once_cell::sync::OnceCell;\nuse tokio::{sync::oneshot, task::JoinHandle};\nuse tracing::{error, info, warn};\n\n#[cfg(test)]\nuse futures::{future::BoxFuture, FutureExt};\n\n#[cfg(test)]\nuse tracing::debug;\n\nuse casper_types::ProtocolVersion;\n\nuse super::{Component, ComponentState, InitializedComponent};\nuse crate::{\n    components::PortBoundComponent,\n    effect::{\n        requests::{\n            BlockSynchronizerRequest, ChainspecRawBytesRequest, ConsensusRequest, MetricsRequest,\n            NetworkInfoRequest, ReactorInfoRequest, RestRequest, StorageRequest,\n            UpgradeWatcherRequest,\n        },\n        EffectBuilder, EffectExt, Effects,\n    },\n    reactor::main_reactor::MainEvent,\n    types::{ChainspecInfo, StatusFeed},\n    utils::{self, ListeningError},\n    NodeRng,\n};\npub use config::Config;\npub use docs::DocExample;\npub(crate) use docs::DOCS_EXAMPLE_PROTOCOL_VERSION;\npub(crate) use event::Event;\npub(crate) use info::{GetChainspecResult, GetValidatorChangesResult};\n\nconst COMPONENT_NAME: &str = \"rest_server\";\n\n/// A helper trait capturing all of this components Request type dependencies.\npub(crate) trait ReactorEventT:\n    From<Event>\n    + From<RestRequest>\n    + From<NetworkInfoRequest>\n    + From<StorageRequest>\n    + From<ChainspecRawBytesRequest>\n    + From<UpgradeWatcherRequest>\n    + From<ConsensusRequest>\n    + From<MetricsRequest>\n    + From<ReactorInfoRequest>\n    + From<BlockSynchronizerRequest>\n    + Send\n{\n}\n\nimpl<REv> ReactorEventT for REv where\n    REv: From<Event>\n        + From<RestRequest>\n        + From<NetworkInfoRequest>\n        + From<StorageRequest>\n        + From<ChainspecRawBytesRequest>\n        + From<UpgradeWatcherRequest>\n        + From<ConsensusRequest>\n        + From<MetricsRequest>\n        + From<ReactorInfoRequest>\n        + From<BlockSynchronizerRequest>\n        + Send\n        + 'static\n{\n}\n\n#[derive(DataSize, Debug)]\npub(crate) struct InnerRestServer {\n    /// When the message is sent, it signals the server loop to exit cleanly.\n    #[data_size(skip)]\n    #[allow(dead_code)]\n    shutdown_sender: oneshot::Sender<()>,\n    /// The address the server is listening on.\n    local_addr: Arc<OnceCell<SocketAddr>>,\n    /// The task handle which will only join once the server loop has exited.\n    #[data_size(skip)]\n    #[allow(dead_code)]\n    server_join_handle: Option<JoinHandle<()>>,\n    /// The network name, as specified in the chainspec\n    network_name: String,\n}\n\n#[derive(DataSize, Debug)]\npub(crate) struct RestServer {\n    /// The component state.\n    state: ComponentState,\n    config: Config,\n    api_version: ProtocolVersion,\n    network_name: String,\n    /// Inner server is present only when enabled in the config.\n    inner_rest: Option<InnerRestServer>,\n}\n\nimpl RestServer {\n    pub(crate) fn new(config: Config, api_version: ProtocolVersion, network_name: String) -> Self {\n        RestServer {\n            state: ComponentState::Uninitialized,\n            config,\n            api_version,\n            network_name,\n            inner_rest: None,\n        }\n    }\n}\n\nimpl<REv> Component<REv> for RestServer\nwhere\n    REv: ReactorEventT,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => match event {\n                Event::Initialize => {\n                    let (effects, state) = self.bind(self.config.enable_server, effect_builder);\n                    <Self as InitializedComponent<MainEvent>>::set_state(self, state);\n                    effects\n                }\n                Event::RestRequest(_) | Event::GetMetricsResult { .. } => {\n                    warn!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"should not handle this event when component is pending initialization\"\n                    );\n                    Effects::new()\n                }\n            },\n            ComponentState::Initialized => match event {\n                Event::Initialize => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::RestRequest(RestRequest::Status { responder }) => {\n                    let network_name = self.network_name.clone();\n                    async move {\n                        let (\n                            last_added_block,\n                            peers,\n                            next_upgrade,\n                            consensus_status,\n                            reactor_state,\n                            last_progress,\n                            node_uptime,\n                            available_block_range,\n                            block_sync,\n                            latest_switch_block_header,\n                        ) = join!(\n                            effect_builder.get_highest_complete_block_from_storage(),\n                            effect_builder.network_peers(),\n                            effect_builder.get_next_upgrade(),\n                            effect_builder.consensus_status(),\n                            effect_builder.get_reactor_state(),\n                            effect_builder.get_last_progress(),\n                            effect_builder.get_uptime(),\n                            effect_builder.get_available_block_range_from_storage(),\n                            effect_builder.get_block_synchronizer_status(),\n                            effect_builder.get_latest_switch_block_header_from_storage()\n                        );\n                        let starting_state_root_hash = effect_builder\n                            .get_block_header_at_height_from_storage(\n                                available_block_range.low(),\n                                true,\n                            )\n                            .await\n                            .map(|header| *header.state_root_hash())\n                            .unwrap_or_default();\n                        let status_feed = StatusFeed::new(\n                            last_added_block,\n                            peers,\n                            ChainspecInfo::new(network_name, next_upgrade),\n                            consensus_status,\n                            node_uptime.into(),\n                            reactor_state,\n                            last_progress.into_inner(),\n                            available_block_range,\n                            block_sync,\n                            starting_state_root_hash,\n                            latest_switch_block_header.map(|header| header.block_hash()),\n                        );\n                        responder.respond(status_feed).await;\n                    }\n                }\n                .ignore(),\n                Event::RestRequest(RestRequest::Metrics { responder }) => effect_builder\n                    .get_metrics()\n                    .event(move |text| Event::GetMetricsResult {\n                        text,\n                        main_responder: responder,\n                    }),\n                Event::GetMetricsResult {\n                    text,\n                    main_responder,\n                } => main_responder.respond(text).ignore(),\n            },\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for RestServer\nwhere\n    REv: ReactorEventT,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\nimpl<REv> PortBoundComponent<REv> for RestServer\nwhere\n    REv: ReactorEventT,\n{\n    type Error = ListeningError;\n    type ComponentEvent = Event;\n\n    fn listen(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Result<Effects<Self::ComponentEvent>, Self::Error> {\n        let cfg = &self.config;\n        let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>();\n\n        let builder = utils::start_listening(&cfg.address)?;\n        let local_addr: Arc<OnceCell<SocketAddr>> = Default::default();\n\n        let server_join_handle = if cfg.cors_origin.is_empty() {\n            Some(tokio::spawn(http_server::run(\n                builder,\n                effect_builder,\n                self.api_version,\n                shutdown_receiver,\n                cfg.qps_limit,\n                local_addr.clone(),\n            )))\n        } else {\n            Some(tokio::spawn(http_server::run_with_cors(\n                builder,\n                effect_builder,\n                self.api_version,\n                shutdown_receiver,\n                cfg.qps_limit,\n                local_addr.clone(),\n                cfg.cors_origin.clone(),\n            )))\n        };\n\n        let network_name = self.network_name.clone();\n        self.inner_rest = Some(InnerRestServer {\n            local_addr,\n            shutdown_sender,\n            server_join_handle,\n            network_name,\n        });\n\n        Ok(Effects::new())\n    }\n}\n\n#[cfg(test)]\nimpl crate::reactor::Finalize for RestServer {\n    fn finalize(self) -> BoxFuture<'static, ()> {\n        async {\n            if let Some(mut rest_server) = self.inner_rest {\n                let _ = rest_server.shutdown_sender.send(());\n\n                // Wait for the server to exit cleanly.\n                if let Some(join_handle) = rest_server.server_join_handle.take() {\n                    match join_handle.await {\n                        Ok(_) => debug!(\"rest server exited cleanly\"),\n                        Err(error) => error!(%error, \"could not join rest server task cleanly\"),\n                    }\n                } else {\n                    warn!(\"rest server shutdown while already shut down\")\n                }\n            } else {\n                info!(\"rest server was disabled in config, no shutdown performed\")\n            }\n        }\n        .boxed()\n    }\n}\n\n#[cfg(test)]\nmod schema_tests {\n    use crate::{testing::assert_schema, types::GetStatusResult};\n    use schemars::schema_for;\n\n    use super::{GetChainspecResult, GetValidatorChangesResult};\n\n    #[test]\n    fn json_schema_status_check() {\n        let schema_path = format!(\n            \"{}/../resources/test/rest_schema_status.json\",\n            env!(\"CARGO_MANIFEST_DIR\")\n        );\n        let pretty = serde_json::to_string_pretty(&schema_for!(GetStatusResult)).unwrap();\n        assert_schema(schema_path, pretty);\n    }\n\n    #[test]\n    fn json_schema_validator_changes_check() {\n        let schema_path = format!(\n            \"{}/../resources/test/rest_schema_validator_changes.json\",\n            env!(\"CARGO_MANIFEST_DIR\")\n        );\n        assert_schema(\n            schema_path,\n            serde_json::to_string_pretty(&schema_for!(GetValidatorChangesResult)).unwrap(),\n        );\n    }\n\n    #[test]\n    fn json_schema_chainspec_bytes_check() {\n        let schema_path = format!(\n            \"{}/../resources/test/rest_schema_chainspec_bytes.json\",\n            env!(\"CARGO_MANIFEST_DIR\")\n        );\n        assert_schema(\n            schema_path,\n            serde_json::to_string_pretty(&schema_for!(GetChainspecResult)).unwrap(),\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/shutdown_trigger.rs",
    "content": "//! Shutdown trigger control.\n//!\n//! A component that can be primed with a [`StopAtSpec`] and will monitor the node, until it\n//! detects a specific spec has been triggered. If so, it instructs the system to shut down through\n//! a [`ControlAnnouncement`].\n\nuse std::{fmt::Display, mem};\n\nuse datasize::DataSize;\nuse derive_more::From;\nuse serde::Serialize;\nuse tracing::{info, trace};\n\nuse casper_types::EraId;\n\nuse crate::{\n    effect::{\n        announcements::ControlAnnouncement, requests::SetNodeStopRequest, EffectBuilder, EffectExt,\n        Effects,\n    },\n    types::NodeRng,\n};\n\nuse super::{diagnostics_port::StopAtSpec, Component};\n\n#[derive(DataSize, Debug, Serialize)]\npub(crate) struct CompletedBlockInfo {\n    height: u64,\n    era: EraId,\n    is_switch_block: bool,\n}\n\nimpl CompletedBlockInfo {\n    pub(crate) fn new(height: u64, era: EraId, is_switch_block: bool) -> Self {\n        Self {\n            height,\n            era,\n            is_switch_block,\n        }\n    }\n}\n\n/// The shutdown trigger component's event.\n#[derive(DataSize, Debug, From, Serialize)]\npub(crate) enum Event {\n    /// An announcement that a block has been completed.\n    CompletedBlock(CompletedBlockInfo),\n    /// A request to trigger a shutdown.\n    #[from]\n    SetNodeStopRequest(SetNodeStopRequest),\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Event::CompletedBlock(block_info) => {\n                write!(\n                    f,\n                    \"completed block: height {}, era {}, switch_block {}\",\n                    block_info.height, block_info.era, block_info.is_switch_block\n                )\n            }\n            Event::SetNodeStopRequest(inner) => {\n                write!(f, \"set node stop request: {}\", inner)\n            }\n        }\n    }\n}\n\nconst COMPONENT_NAME: &str = \"shutdown_trigger\";\n\n/// Shutdown trigger component.\n#[derive(DataSize, Debug)]\npub(crate) struct ShutdownTrigger {\n    /// The currently active spec for shutdown triggers.\n    active_spec: Option<StopAtSpec>,\n    /// The highest block height seen, if any.\n    ///\n    /// Constantly kept up to date, so that requests for shutting down on `block:next` can be\n    /// answered without additional requests.\n    highest_block_height_seen: Option<u64>,\n}\n\nimpl ShutdownTrigger {\n    /// Creates a new instance of the shutdown trigger component.\n    pub(crate) fn new() -> Self {\n        Self {\n            active_spec: None,\n            highest_block_height_seen: None,\n        }\n    }\n}\n\nimpl<REv> Component<REv> for ShutdownTrigger\nwhere\n    REv: Send + From<ControlAnnouncement>,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::CompletedBlock(block_info) => {\n                // We ignore every block that is older than one we already possess.\n                let prev_height = self.highest_block_height_seen.unwrap_or_default();\n                if block_info.height > prev_height {\n                    self.highest_block_height_seen = Some(block_info.height);\n                }\n\n                // Once the updating is done, check if we need to emit shutdown announcements.\n                let active_spec = if let Some(spec) = self.active_spec {\n                    spec\n                } else {\n                    trace!(\"received block, but no active stop-at spec, ignoring\");\n                    return Effects::new();\n                };\n\n                let should_shutdown = match active_spec {\n                    StopAtSpec::BlockHeight(trigger_height) => block_info.height >= trigger_height,\n                    StopAtSpec::EraId(trigger_era_id) => block_info.era >= trigger_era_id,\n                    StopAtSpec::Immediately => {\n                        // Immediate stops are handled when the request is received.\n                        false\n                    }\n                    StopAtSpec::NextBlock => {\n                        // Any block that is newer than one we already saw is a \"next\" block.\n                        block_info.height > prev_height\n                    }\n                    StopAtSpec::EndOfCurrentEra => {\n                        // We require that the block we just finished is a switch block.\n                        block_info.height > prev_height && block_info.is_switch_block\n                    }\n                };\n\n                if should_shutdown {\n                    info!(\n                        block_height = block_info.height,\n                        block_era = block_info.era.value(),\n                        is_switch_block = block_info.is_switch_block,\n                        %active_spec,\n                        \"shutdown triggered due to fulfilled stop-at spec\"\n                    );\n                    effect_builder.announce_user_shutdown_request().ignore()\n                } else {\n                    trace!(\n                        block_height = block_info.height,\n                        block_era = block_info.era.value(),\n                        is_switch_block = block_info.is_switch_block,\n                        %active_spec,\n                        \"not shutting down\"\n                    );\n                    Effects::new()\n                }\n            }\n\n            Event::SetNodeStopRequest(SetNodeStopRequest {\n                mut stop_at,\n                responder,\n            }) => {\n                mem::swap(&mut self.active_spec, &mut stop_at);\n\n                let mut effects = responder.respond(stop_at).ignore();\n\n                // If we received an immediate shutdown request, send out the control announcement\n                // directly, instead of waiting for another block.\n                if matches!(self.active_spec, Some(StopAtSpec::Immediately)) {\n                    effects.extend(effect_builder.announce_user_shutdown_request().ignore());\n                }\n\n                effects\n            }\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/config.rs",
    "content": "use std::path::PathBuf;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n#[cfg(test)]\nuse tempfile::TempDir;\n\nconst GIB: usize = 1024 * 1024 * 1024;\nconst DEFAULT_MAX_BLOCK_STORE_SIZE: usize = 450 * GIB;\nconst DEFAULT_MAX_DEPLOY_STORE_SIZE: usize = 300 * GIB;\nconst DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE: usize = 300 * GIB;\nconst DEFAULT_MAX_STATE_STORE_SIZE: usize = 10 * GIB;\n\n/// On-disk storage configuration.\n#[derive(Clone, DataSize, Debug, Deserialize, Serialize)]\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// The path to the folder where any files created or read by the storage component will exist.\n    ///\n    /// If the folder doesn't exist, it and any required parents will be created.\n    pub path: PathBuf,\n    /// The maximum size of the database to use for the block store.\n    ///\n    /// The size should be a multiple of the OS page size.\n    pub max_block_store_size: usize,\n    /// The maximum size of the database to use for the deploy store.\n    ///\n    /// The size should be a multiple of the OS page size.\n    pub max_deploy_store_size: usize,\n    /// The maximum size of the database to use for the deploy metadata store.\n    ///\n    /// The size should be a multiple of the OS page size.\n    pub max_deploy_metadata_store_size: usize,\n    /// The maximum size of the database to use for the component state store.\n    ///\n    /// The size should be a multiple of the OS page size.\n    pub max_state_store_size: usize,\n    /// Whether or not memory deduplication is enabled.\n    pub enable_mem_deduplication: bool,\n    /// How many loads before memory duplication checks for dead references.\n    pub mem_pool_prune_interval: u16,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            // No one should be instantiating a config with storage set to default.\n            path: \"/dev/null\".into(),\n            max_block_store_size: DEFAULT_MAX_BLOCK_STORE_SIZE,\n            max_deploy_store_size: DEFAULT_MAX_DEPLOY_STORE_SIZE,\n            max_deploy_metadata_store_size: DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE,\n            max_state_store_size: DEFAULT_MAX_STATE_STORE_SIZE,\n            enable_mem_deduplication: true,\n            mem_pool_prune_interval: 4096,\n        }\n    }\n}\n\nimpl Config {\n    /// Returns a `Config` suitable for tests, along with a `TempDir` which must be kept alive for\n    /// the duration of the test since its destructor removes the dir from the filesystem.\n    ///\n    /// `size_multiplier` is used to multiply the default DB sizes.\n    #[cfg(test)]\n    pub(crate) fn new_for_tests(size_multiplier: u8) -> (Self, TempDir) {\n        if size_multiplier == 0 {\n            panic!(\"size_multiplier cannot be zero\");\n        }\n        let tempdir = tempfile::tempdir().expect(\"should get tempdir\");\n        let path = tempdir.path().join(\"lmdb\");\n\n        let config = Config {\n            path,\n            max_block_store_size: 1024 * 1024 * size_multiplier as usize,\n            max_deploy_store_size: 1024 * 1024 * size_multiplier as usize,\n            max_deploy_metadata_store_size: 1024 * 1024 * size_multiplier as usize,\n            max_state_store_size: 12 * 1024 * size_multiplier as usize,\n            ..Default::default()\n        };\n        (config, tempdir)\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/disjoint_sequences.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    AvailableBlockRange,\n};\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse tracing::trace;\n\n/// The outcome of an attempt to insert a value into a `Sequence`.\n#[derive(Copy, Clone, Debug, Eq, PartialEq)]\nenum InsertOutcome {\n    /// The value was greater than `Sequence::high + 1` and wasn't inserted.\n    TooHigh,\n    /// The value was inserted at the high end, and is now `Sequence::high`.\n    ExtendedHigh,\n    /// The value was a duplicate; inserted and didn't affect the high or low values.\n    AlreadyInSequence,\n    /// The value was inserted at the low end, and is now `Sequence::low`.\n    ExtendedLow,\n    /// The value was less than `Sequence::low - 1` and wasn't inserted.\n    TooLow,\n}\n\n/// Represents a continuous sequence of `u64`s.\n#[derive(Copy, Clone, Debug, Eq, PartialEq, DataSize, Ord, PartialOrd)]\npub(crate) struct Sequence {\n    /// The upper bound (inclusive) of the sequence.\n    high: u64,\n    /// The lower bound (inclusive) of the sequence.\n    low: u64,\n}\n\nimpl Sequence {\n    /// Constructs a new sequence using the bounds of `a` and `b`.\n    ///\n    /// `low` and `high` will be automatically determined.\n    pub(super) fn new(a: u64, b: u64) -> Self {\n        let (low, high) = if a <= b { (a, b) } else { (b, a) };\n        Sequence { low, high }\n    }\n\n    /// Constructs a new sequence containing only `value`.\n    fn single(value: u64) -> Self {\n        Sequence {\n            high: value,\n            low: value,\n        }\n    }\n\n    /// Tries to insert `value` into the sequence.\n    ///\n    /// Returns an outcome which indicates where the value was inserted if at all.\n    fn try_insert(&mut self, value: u64) -> InsertOutcome {\n        if value == self.high + 1 {\n            self.high = value;\n            InsertOutcome::ExtendedHigh\n        } else if value >= self.low && value <= self.high {\n            InsertOutcome::AlreadyInSequence\n        } else if value + 1 == self.low {\n            self.low = value;\n            InsertOutcome::ExtendedLow\n        } else if value > self.high {\n            InsertOutcome::TooHigh\n        } else {\n            InsertOutcome::TooLow\n        }\n    }\n\n    /// Returns the inclusive high end of the sequence.\n    pub(crate) fn high(&self) -> u64 {\n        self.high\n    }\n\n    /// Returns the inclusive low end of the sequence.\n    pub(crate) fn low(&self) -> u64 {\n        self.low\n    }\n}\n\nimpl From<Sequence> for AvailableBlockRange {\n    fn from(sequence: Sequence) -> Self {\n        AvailableBlockRange::new(sequence.low(), sequence.high())\n    }\n}\n\n/// Represents a collection of disjoint sequences of `u64`s.\n///\n/// The collection is kept ordered from high to low, and each entry represents a discrete portion of\n/// the space from [0, u64::MAX] with a gap of at least 1 between each.\n///\n/// The collection is ordered this way to optimize insertion for the normal use case: adding\n/// monotonically increasing values representing the latest block height.\n///\n/// As values are inserted, if two separate sequences become contiguous, they are merged into a\n/// single sequence.\n///\n/// For example, if `sequences` contains `[9,9], [7,3]` and `8` is inserted, then `sequences` will\n/// be reduced to `[9,3]`.\n#[derive(Default, Debug, DataSize)]\n#[cfg_attr(test, derive(Clone))]\npub(super) struct DisjointSequences {\n    sequences: Vec<Sequence>,\n}\n\nimpl DisjointSequences {\n    /// Constructs disjoint sequences from one initial sequence.\n    ///\n    /// Note: Use [`Default::default()`] to create an empty set of sequences.\n    pub(super) fn new(initial_sequence: Sequence) -> Self {\n        DisjointSequences {\n            sequences: vec![initial_sequence],\n        }\n    }\n\n    /// Inserts `value` into the appropriate sequence and merges sequences if required.\n    ///\n    /// Returns `true` if `value` was not previously contained in the disjoint sequences.\n    ///\n    /// Note, this method is efficient where `value` is one greater than the current highest value.\n    /// However, it's not advisable to use this method in a loop to rebuild a `DisjointSequences`\n    /// from a large collection of randomly-ordered values.  In that case, it is very much more\n    /// efficient to use `DisjointSequences::from(mut input: Vec<u64>)`.\n    pub(super) fn insert(&mut self, value: u64) -> bool {\n        let mut iter_mut = self.sequences.iter_mut().enumerate().peekable();\n\n        // The index at which to add a new `Sequence` containing only `value`.\n        let mut maybe_insertion_index = Some(0);\n        // The index of a `Sequence` to be removed due to the insertion of `value` causing two\n        // consecutive sequences to become contiguous.\n        let mut maybe_removal_index = None;\n        let mut added_new_value = true;\n        while let Some((index, sequence)) = iter_mut.next() {\n            match sequence.try_insert(value) {\n                InsertOutcome::ExtendedHigh => {\n                    // We should exit the loop, and we don't need to add a new sequence; we only\n                    // need to check for merges of sequences when we get `ExtendedLow` since we're\n                    // iterating the sequences from high to low.\n                    maybe_insertion_index = None;\n                    break;\n                }\n                InsertOutcome::AlreadyInSequence => {\n                    // We should exit the loop, and we don't need to add a new sequence.\n                    maybe_insertion_index = None;\n                    added_new_value = false;\n                    break;\n                }\n                InsertOutcome::TooHigh => {\n                    // We should exit the loop and we need to add a new sequence at this index.\n                    maybe_insertion_index = Some(index);\n                    break;\n                }\n                InsertOutcome::TooLow => {\n                    // We need to add a new sequence immediately after this one if this is the last\n                    // sequence.  Continue iterating in case this is not the last sequence.\n                    maybe_insertion_index = Some(index + 1);\n                }\n                InsertOutcome::ExtendedLow => {\n                    // We should exit the loop, and we don't need to add a new sequence.\n                    maybe_insertion_index = None;\n                    // If the next sequence is now contiguous with this one, update this one's low\n                    // value and set the next sequence to be removed.\n                    if let Some((next_index, next_sequence)) = iter_mut.peek() {\n                        if next_sequence.high + 1 == sequence.low {\n                            sequence.low = next_sequence.low;\n                            maybe_removal_index = Some(*next_index);\n                        }\n                    }\n                    break;\n                }\n            };\n        }\n\n        if let Some(index_to_insert) = maybe_insertion_index {\n            self.sequences\n                .insert(index_to_insert, Sequence::single(value));\n        }\n\n        if let Some(index_to_remove) = maybe_removal_index {\n            let _ = self.sequences.remove(index_to_remove);\n        }\n\n        trace!(%self, \"current state of disjoint sequences\");\n        added_new_value\n    }\n\n    /// Returns the highest sequence, or `None` if there are no sequences.\n    pub(super) fn highest_sequence(&self) -> Option<&Sequence> {\n        self.sequences.first()\n    }\n\n    /// Returns all the sequences, if any.\n    pub(super) fn sequences(&self) -> &Vec<Sequence> {\n        &self.sequences\n    }\n\n    /// Reduces the sequence(s), keeping all entries below and including `max_value`.  If\n    /// `max_value` is not already included in a sequence, it will not be added.\n    ///\n    /// If the current highest value is lower than `max_value`, or if there are no sequences, this\n    /// has no effect.\n    pub(super) fn truncate(&mut self, max_value: u64) {\n        self.sequences.retain_mut(|sequence| {\n            if sequence.high <= max_value {\n                // Keep this sequence unchanged.\n                return true;\n            }\n\n            if sequence.low > max_value {\n                // Delete this entire sequence.\n                return false;\n            }\n\n            // This sequence contains `max_value`, so keep the sequence, but reduce its high value.\n            sequence.high = max_value;\n            true\n        })\n    }\n}\n#[cfg(test)]\nimpl DisjointSequences {\n    /// Inserts multiple values produced by the given iterator.\n    fn extend<T: IntoIterator<Item = u64>>(&mut self, iter: T) {\n        iter.into_iter().for_each(|height| {\n            self.insert(height);\n        })\n    }\n\n    /// Returns `true` if `value` exists in the disjoint sequences.\n    fn contains(&self, value: u64) -> bool {\n        self.sequences\n            .iter()\n            .any(|sequence| value >= sequence.low && value <= sequence.high)\n    }\n}\n\nimpl FromBytes for Sequence {\n    #[inline]\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (high, bytes) = u64::from_bytes(bytes)?;\n        let (low, bytes) = u64::from_bytes(bytes)?;\n\n        Ok((Sequence { high, low }, bytes))\n    }\n}\n\nimpl ToBytes for Sequence {\n    #[inline]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buf = Vec::new();\n        self.write_bytes(&mut buf)?;\n        Ok(buf)\n    }\n\n    #[inline]\n    fn serialized_length(&self) -> usize {\n        self.high.serialized_length() + self.low.serialized_length()\n    }\n\n    #[inline]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.high.write_bytes(writer)?;\n        self.low.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for DisjointSequences {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Vec::<Sequence>::from_bytes(bytes)\n            .map(|(sequences, remainder)| (DisjointSequences { sequences }, remainder))\n    }\n\n    #[inline]\n    fn from_vec(bytes: Vec<u8>) -> Result<(Self, Vec<u8>), bytesrepr::Error> {\n        Vec::<Sequence>::from_vec(bytes)\n            .map(|(sequences, remainder)| (DisjointSequences { sequences }, remainder))\n    }\n}\n\nimpl ToBytes for DisjointSequences {\n    #[inline]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.sequences.to_bytes()\n    }\n\n    #[inline]\n    fn serialized_length(&self) -> usize {\n        self.sequences.serialized_length()\n    }\n\n    fn into_bytes(self) -> Result<Vec<u8>, bytesrepr::Error>\n    where\n        Self: Sized,\n    {\n        self.sequences.into_bytes()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.sequences.write_bytes(writer)\n    }\n}\n\n/// This impl is provided to allow for efficient re-building of a `DisjointSequences` from a large,\n/// randomly-ordered set of values.\nimpl From<Vec<u64>> for DisjointSequences {\n    fn from(mut input: Vec<u64>) -> Self {\n        input.sort_unstable();\n\n        let sequences = input\n            .drain(..)\n            .peekable()\n            .batching(|iter| match iter.next() {\n                None => None,\n                Some(low) => {\n                    let mut sequence = Sequence::single(low);\n                    while let Some(i) = iter.peek() {\n                        if *i == sequence.high + 1 {\n                            sequence.high = iter.next().unwrap();\n                        }\n                    }\n                    Some(sequence)\n                }\n            })\n            .collect();\n\n        DisjointSequences { sequences }\n    }\n}\n\nimpl Display for DisjointSequences {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        let mut iter = self.sequences.iter().peekable();\n        while let Some(sequence) = iter.next() {\n            write!(formatter, \"[{}, {}]\", sequence.high, sequence.low)?;\n            if iter.peek().is_some() {\n                write!(formatter, \", \")?;\n            }\n        }\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::BTreeSet;\n\n    use rand::{seq::SliceRandom, Rng};\n\n    use super::*;\n\n    fn new_sequence(a: u64, b: u64) -> Sequence {\n        let (low, high) = if a <= b { (a, b) } else { (b, a) };\n        assert!(low <= high);\n        Sequence { low, high }\n    }\n\n    fn assert_matches(actual: &DisjointSequences, expected: &BTreeSet<u64>) {\n        let mut actual_set = BTreeSet::new();\n        for sequence in &actual.sequences {\n            for i in sequence.low..=sequence.high {\n                assert!(actual_set.insert(i));\n            }\n        }\n        assert_eq!(&actual_set, expected)\n    }\n\n    #[test]\n    fn should_insert_all_u8s_including_duplicates() {\n        let mut rng = crate::new_rng();\n\n        let mut disjoint_sequences = DisjointSequences::default();\n        let mut expected = BTreeSet::new();\n\n        while disjoint_sequences.sequences != vec![Sequence { high: 255, low: 0 }] {\n            let value = rng.gen::<u8>() as u64;\n            let insertion_result = !disjoint_sequences.contains(value);\n            assert_eq!(insertion_result, disjoint_sequences.insert(value));\n            expected.insert(value);\n            assert_matches(&disjoint_sequences, &expected);\n        }\n    }\n\n    #[test]\n    fn should_extend() {\n        let to_be_inserted = vec![5_u64, 4, 3, 2, 1];\n        let mut expected = BTreeSet::new();\n        expected.extend(to_be_inserted.clone());\n\n        let mut disjoint_sequences = DisjointSequences::default();\n        disjoint_sequences.extend(to_be_inserted);\n        assert_matches(&disjoint_sequences, &expected);\n\n        // Extending with empty set should not modify the sequences.\n        disjoint_sequences.extend(Vec::<u64>::new());\n        assert_matches(&disjoint_sequences, &expected);\n    }\n\n    #[test]\n    fn should_insert_with_no_duplicates() {\n        const MAX: u64 = 1000;\n\n        let mut rng = crate::new_rng();\n\n        let mut values = (0..=MAX).collect::<Vec<u64>>();\n        values.shuffle(&mut rng);\n\n        let mut disjoint_sequences = DisjointSequences::default();\n        let mut expected = BTreeSet::new();\n\n        for value in values {\n            assert!(disjoint_sequences.insert(value));\n            expected.insert(value);\n            assert_matches(&disjoint_sequences, &expected);\n        }\n\n        assert_eq!(\n            disjoint_sequences.sequences,\n            vec![Sequence { high: MAX, low: 0 }]\n        );\n    }\n\n    #[test]\n    fn should_construct_from_random_set() {\n        const MAX: u64 = 2_000_000;\n\n        let mut rng = crate::new_rng();\n\n        let mut values = (0..=MAX).collect::<Vec<u64>>();\n        values.shuffle(&mut rng);\n\n        let disjoint_sequences = DisjointSequences::from(values);\n        assert_eq!(\n            disjoint_sequences.sequences,\n            vec![Sequence { high: MAX, low: 0 }]\n        );\n    }\n\n    #[test]\n    fn should_get_highest_sequence() {\n        let mut disjoint_sequences = DisjointSequences::default();\n        assert_eq!(disjoint_sequences.highest_sequence(), None);\n\n        disjoint_sequences.extend([1]);\n        assert_eq!(\n            disjoint_sequences.highest_sequence(),\n            Some(&Sequence { low: 1, high: 1 })\n        );\n\n        disjoint_sequences.extend([5, 6]);\n        assert_eq!(\n            disjoint_sequences.highest_sequence(),\n            Some(&Sequence { low: 5, high: 6 })\n        );\n\n        disjoint_sequences.extend([8, 9]);\n        assert_eq!(\n            disjoint_sequences.highest_sequence(),\n            Some(&Sequence { low: 8, high: 9 })\n        );\n    }\n\n    #[test]\n    fn should_truncate() {\n        const SEQ_HIGH: Sequence = Sequence { high: 11, low: 9 };\n        const SEQ_MID: Sequence = Sequence { high: 6, low: 6 };\n        const SEQ_LOW: Sequence = Sequence { high: 3, low: 1 };\n        let initial_sequences = DisjointSequences {\n            sequences: vec![SEQ_HIGH, SEQ_MID, SEQ_LOW],\n        };\n\n        // Truncate with `max_value` greater or equal to current highest value should be a no-op.\n        let mut disjoint_sequences = initial_sequences.clone();\n        disjoint_sequences.truncate(12);\n        assert_eq!(disjoint_sequences.sequences, initial_sequences.sequences);\n        disjoint_sequences.truncate(11);\n        assert_eq!(disjoint_sequences.sequences, initial_sequences.sequences);\n\n        // Truncate with `max_value` between two sequences should cause the higher sequences to get\n        // removed and the lower ones retained unchanged.\n        disjoint_sequences = initial_sequences.clone();\n        disjoint_sequences.truncate(SEQ_HIGH.low - 1);\n        assert_eq!(disjoint_sequences.sequences, vec![SEQ_MID, SEQ_LOW]);\n\n        disjoint_sequences = initial_sequences.clone();\n        disjoint_sequences.truncate(SEQ_MID.high);\n        assert_eq!(disjoint_sequences.sequences, vec![SEQ_MID, SEQ_LOW]);\n\n        disjoint_sequences = initial_sequences.clone();\n        disjoint_sequences.truncate(SEQ_MID.low - 1);\n        assert_eq!(disjoint_sequences.sequences, vec![SEQ_LOW]);\n\n        disjoint_sequences = initial_sequences.clone();\n        disjoint_sequences.truncate(SEQ_LOW.high);\n        assert_eq!(disjoint_sequences.sequences, vec![SEQ_LOW]);\n\n        // Truncate with `max_value` lower than the lowest value should cause all sequences to get\n        // removed.\n        disjoint_sequences = initial_sequences.clone();\n        disjoint_sequences.truncate(SEQ_LOW.low - 1);\n        assert!(disjoint_sequences.sequences.is_empty());\n\n        // Truncate with `max_value` within a sequence should cause that sequence to get updated,\n        // any higher sequences to get removed, and any lower ones retained unchanged.\n        disjoint_sequences = initial_sequences.clone();\n        let max_value = SEQ_HIGH.high - 1;\n        disjoint_sequences.truncate(max_value);\n        assert_eq!(\n            disjoint_sequences.sequences,\n            vec![new_sequence(max_value, SEQ_HIGH.low), SEQ_MID, SEQ_LOW]\n        );\n\n        disjoint_sequences = initial_sequences.clone();\n        let max_value = SEQ_HIGH.low;\n        disjoint_sequences.truncate(max_value);\n        assert_eq!(\n            disjoint_sequences.sequences,\n            vec![new_sequence(max_value, SEQ_HIGH.low), SEQ_MID, SEQ_LOW]\n        );\n\n        disjoint_sequences = initial_sequences.clone();\n        let max_value = SEQ_MID.low;\n        disjoint_sequences.truncate(max_value);\n        assert_eq!(disjoint_sequences.sequences, vec![SEQ_MID, SEQ_LOW]);\n\n        disjoint_sequences = initial_sequences.clone();\n        let max_value = SEQ_LOW.high - 1;\n        disjoint_sequences.truncate(max_value);\n        assert_eq!(\n            disjoint_sequences.sequences,\n            vec![new_sequence(max_value, SEQ_LOW.low)]\n        );\n\n        disjoint_sequences = initial_sequences;\n        let max_value = SEQ_LOW.low;\n        disjoint_sequences.truncate(max_value);\n        assert_eq!(\n            disjoint_sequences.sequences,\n            vec![new_sequence(max_value, SEQ_LOW.low)]\n        );\n\n        // Truncate on an empty set of sequences should have no effect.\n        disjoint_sequences = DisjointSequences::default();\n        assert!(disjoint_sequences.sequences.is_empty());\n        disjoint_sequences.truncate(100);\n        assert!(disjoint_sequences.sequences.is_empty());\n    }\n\n    #[test]\n    fn roundtrip_to_bytes() {\n        let mut disjoint_sequences = DisjointSequences::default();\n\n        disjoint_sequences.extend([4, 5, 6, 7, 8]);\n        disjoint_sequences.extend([15, 16, 17, 18, 19, 20]);\n\n        // should be represented logically as [(20 to 15), (8 to 4)] and serialize to a sequence of\n        // `2u32 20u64 15u64 8u64 4u64`.\n\n        let expected = [\n            0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00,\n            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n            0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n        ];\n        let actual = disjoint_sequences.to_bytes().expect(\"serialization failed\");\n        assert_eq!(expected.as_slice(), &actual);\n\n        let expected_inner_state = disjoint_sequences.sequences;\n        let (restored, remainder) =\n            DisjointSequences::from_bytes(&actual).expect(\"deserialization failed\");\n        assert!(remainder.is_empty());\n\n        let (restored2, remainder) =\n            DisjointSequences::from_vec(actual).expect(\"deserialization failed\");\n        assert!(remainder.is_empty());\n\n        assert_eq!(restored.sequences, expected_inner_state);\n        assert_eq!(restored2.sequences, expected_inner_state);\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/error.rs",
    "content": "use std::{fmt::Debug, io, path::PathBuf};\n\nuse casper_binary_port::RecordId;\nuse thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{\n    bytesrepr, crypto, BlockBody, BlockHash, BlockHeader, BlockValidationError, DeployHash, Digest,\n    EraId, FinalitySignature, FinalitySignatureId, TransactionHash,\n};\n\nuse crate::types::VariantMismatch;\nuse casper_storage::block_store::BlockStoreError;\n\n/// A fatal storage component error.\n///\n/// An error of this kinds indicates that storage is corrupted or otherwise irrecoverably broken, at\n/// least for the moment. It should usually be followed by swift termination of the node.\n#[derive(Debug, Error)]\npub enum FatalStorageError {\n    /// Failure to create the root database directory.\n    #[error(\"failed to create database directory `{}`: {}\", .0.display(), .1)]\n    CreateDatabaseDirectory(PathBuf, io::Error),\n    /// Found a duplicate switch-block-at-era-id index entry.\n    #[error(\"duplicate entries for switch block at era id {era_id}: {first} / {second}\")]\n    DuplicateEraIdIndex {\n        /// Era ID at which duplicate was found.\n        era_id: EraId,\n        /// First block hash encountered at `era_id`.\n        first: BlockHash,\n        /// Second block hash encountered at `era_id`.\n        second: BlockHash,\n    },\n    /// An internal DB error - blocks should be overwritten.\n    #[error(\"failed overwriting block\")]\n    FailedToOverwriteBlock,\n    /// Record specified in raw request has not been found in the storage module.\n    #[error(\"unable to find db for record: {0}\")]\n    DatabaseNotFound(RecordId),\n    /// Filesystem error while trying to move file.\n    #[error(\"unable to move file {source_path} to {dest_path}: {original_error}\")]\n    UnableToMoveFile {\n        /// The path to the file that should have been moved.\n        source_path: PathBuf,\n        /// The path where the file should have been moved to.\n        dest_path: PathBuf,\n        /// The original `io::Error` from `fs::rename`.\n        original_error: io::Error,\n    },\n    /// Mix of missing and found storage files.\n    #[error(\"expected files to exist: {missing_files:?}.\")]\n    MissingStorageFiles {\n        /// The files that were not be found in the storage directory.\n        missing_files: Vec<PathBuf>,\n    },\n    /// Error when validating a block.\n    #[error(transparent)]\n    BlockValidation(#[from] BlockValidationError),\n    /// A block header was not stored under its hash.\n    #[error(\n        \"Block header not stored under its hash. \\\n         Queried block hash bytes: {queried_block_hash_bytes:x?}, \\\n         Found block header hash bytes: {found_block_header_hash:x?}, \\\n         Block header: {block_header}\"\n    )]\n    BlockHeaderNotStoredUnderItsHash {\n        /// The queried block hash.\n        queried_block_hash_bytes: Vec<u8>,\n        /// The actual header of the block hash.\n        found_block_header_hash: BlockHash,\n        /// The block header found in storage.\n        block_header: Box<BlockHeader>,\n    },\n    /// Block body did not have a block header.\n    #[error(\n        \"No block header corresponding to block body found in LMDB. \\\n         Block body hash: {block_body_hash:?}, \\\n         Block body: {block_body:?}\"\n    )]\n    NoBlockHeaderForBlockBody {\n        /// The block body hash.\n        block_body_hash: Digest,\n        /// The block body.\n        block_body: Box<BlockBody>,\n    },\n    /// Could not verify finality signatures for block.\n    #[error(\"{0} in signature verification. Database is corrupted.\")]\n    SignatureVerification(crypto::Error),\n    /// Corrupted block signature index.\n    #[error(\n        \"Block signatures not indexed by their block hash. \\\n         Key bytes in LMDB: {raw_key:x?}, \\\n         Block hash bytes in record: {block_hash_bytes:x?}\"\n    )]\n    CorruptedBlockSignatureIndex {\n        /// The key in the block signature index.\n        raw_key: Vec<u8>,\n        /// The block hash of the signatures found in the index.\n        block_hash_bytes: Vec<u8>,\n    },\n    /// Switch block does not contain era end.\n    #[error(\"switch block does not contain era end: {0:?}\")]\n    InvalidSwitchBlock(Box<BlockHeader>),\n    /// A block body was found to have more parts than expected.\n    #[error(\n        \"Found an unexpected part of a block body in the database: \\\n        {part_hash:?}\"\n    )]\n    UnexpectedBlockBodyPart {\n        /// The block body with the issue.\n        block_body_hash: Digest,\n        /// The hash of the superfluous body part.\n        part_hash: Digest,\n    },\n    /// Failed to serialize an item that was found in local storage.\n    #[error(\"failed to serialized stored item\")]\n    StoredItemSerializationFailure(#[source] bincode::Error),\n    /// We tried to store finalized approvals for a nonexistent transaction.\n    #[error(\"Tried to store FinalizedApprovals for a nonexistent transaction {transaction_hash}\")]\n    UnexpectedFinalizedApprovals {\n        /// The missing transaction hash.\n        transaction_hash: TransactionHash,\n    },\n    /// `ToBytes` serialization failure of an item that should never fail to serialize.\n    #[error(\"unexpected serialization failure: {0}\")]\n    UnexpectedSerializationFailure(bytesrepr::Error),\n    /// `ToBytes` deserialization failure of an item that should never fail to serialize.\n    #[error(\"unexpected deserialization failure: {0}\")]\n    UnexpectedDeserializationFailure(bytesrepr::Error),\n    /// Stored finalized approvals hashes count doesn't match number of deploys.\n    #[error(\n        \"stored finalized approvals hashes count doesn't match number of deploys: \\\n        block hash: {block_hash}, expected: {expected}, actual: {actual}\"\n    )]\n    ApprovalsHashesLengthMismatch {\n        /// The block hash.\n        block_hash: BlockHash,\n        /// The number of deploys in the block.\n        expected: usize,\n        /// The number of approvals hashes.\n        actual: usize,\n    },\n    /// V1 execution results hashmap doesn't have exactly one entry.\n    #[error(\n        \"stored v1 execution results doesn't have exactly one entry: deploy: {deploy_hash}, number \\\n        of entries: {results_length}\"\n    )]\n    InvalidExecutionResultsV1Length {\n        /// The deploy hash.\n        deploy_hash: DeployHash,\n        /// The number of execution results.\n        results_length: usize,\n    },\n    /// Error initializing metrics.\n    #[error(\"failed to initialize metrics for storage: {0}\")]\n    Prometheus(#[from] prometheus::Error),\n    /// Type mismatch indicating programmer error.\n    #[error(transparent)]\n    VariantMismatch(#[from] VariantMismatch),\n    /// BlockStoreError\n    #[error(transparent)]\n    BlockStoreError(#[from] BlockStoreError),\n    /// BlockStoreError\n    #[error(\"unexpected record id {0}\")]\n    UnexpectedRecordId(RecordId),\n}\n\nimpl From<Box<BlockValidationError>> for FatalStorageError {\n    fn from(err: Box<BlockValidationError>) -> Self {\n        Self::BlockValidation(*err)\n    }\n}\n\n/// An error that may occur when handling a get request.\n///\n/// Wraps a fatal error, callers should check whether the variant is of the fatal or non-fatal kind.\n#[derive(Debug, Error)]\npub(super) enum GetRequestError {\n    /// A fatal error occurred.\n    #[error(transparent)]\n    Fatal(#[from] FatalStorageError),\n    /// Failed to serialized an item ID on an incoming item request.\n    #[error(\"failed to deserialize incoming item id\")]\n    MalformedIncomingItemId(#[source] bincode::Error),\n    #[error(\n        \"id information not matching the finality signature: \\\n        requested id: {requested_id},\\\n        signature: {finality_signature}\"\n    )]\n    FinalitySignatureIdMismatch {\n        // the ID requested\n        requested_id: Box<FinalitySignatureId>,\n        // the finality signature read from storage\n        finality_signature: Box<FinalitySignature>,\n    },\n}\n"
  },
  {
    "path": "node/src/components/storage/event.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse derive_more::From;\nuse serde::Serialize;\nuse static_assertions::const_assert;\n\nuse crate::effect::{\n    incoming::NetRequestIncoming,\n    requests::{MakeBlockExecutableRequest, MarkBlockCompletedRequest, StorageRequest},\n};\n\nconst _STORAGE_EVENT_SIZE: usize = size_of::<Event>();\nconst_assert!(_STORAGE_EVENT_SIZE <= 32);\n\n/// A storage component event.\n#[derive(Debug, From, Serialize)]\n#[repr(u8)]\npub(crate) enum Event {\n    /// Storage request.\n    #[from]\n    StorageRequest(Box<StorageRequest>),\n    /// Incoming net request.\n    NetRequestIncoming(Box<NetRequestIncoming>),\n    /// Mark block completed request.\n    #[from]\n    MarkBlockCompletedRequest(MarkBlockCompletedRequest),\n    /// Make block executable request.\n    #[from]\n    MakeBlockExecutableRequest(Box<MakeBlockExecutableRequest>),\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::StorageRequest(req) => req.fmt(f),\n            Event::NetRequestIncoming(incoming) => incoming.fmt(f),\n            Event::MarkBlockCompletedRequest(req) => req.fmt(f),\n            Event::MakeBlockExecutableRequest(req) => req.fmt(f),\n        }\n    }\n}\n\nimpl From<NetRequestIncoming> for Event {\n    #[inline]\n    fn from(incoming: NetRequestIncoming) -> Self {\n        Event::NetRequestIncoming(Box::new(incoming))\n    }\n}\n\nimpl From<StorageRequest> for Event {\n    #[inline]\n    fn from(request: StorageRequest) -> Self {\n        Event::StorageRequest(Box::new(request))\n    }\n}\n\nimpl From<MakeBlockExecutableRequest> for Event {\n    #[inline]\n    fn from(request: MakeBlockExecutableRequest) -> Self {\n        Event::MakeBlockExecutableRequest(Box::new(request))\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/metrics.rs",
    "content": "use prometheus::{self, IntGauge, Registry};\n\nuse crate::unregister_metric;\n\nconst CHAIN_HEIGHT_NAME: &str = \"chain_height\";\nconst CHAIN_HEIGHT_HELP: &str = \"highest complete block (DEPRECATED)\";\n\nconst HIGHEST_AVAILABLE_BLOCK_NAME: &str = \"highest_available_block_height\";\nconst HIGHEST_AVAILABLE_BLOCK_HELP: &str =\n    \"highest height of the available block range (the highest contiguous chain of complete blocks)\";\n\nconst LOWEST_AVAILABLE_BLOCK_NAME: &str = \"lowest_available_block_height\";\nconst LOWEST_AVAILABLE_BLOCK_HELP: &str =\n    \"lowest height of the available block range (the highest contiguous chain of complete blocks)\";\n\n/// Metrics for the storage component.\n#[derive(Debug)]\npub struct Metrics {\n    // deprecated - replaced by `highest_available_block`\n    pub(super) chain_height: IntGauge,\n    pub(super) highest_available_block: IntGauge,\n    pub(super) lowest_available_block: IntGauge,\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Constructor of metrics which creates and registers metrics objects for use.\n    pub(super) fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let chain_height = IntGauge::new(CHAIN_HEIGHT_NAME, CHAIN_HEIGHT_HELP)?;\n        let highest_available_block =\n            IntGauge::new(HIGHEST_AVAILABLE_BLOCK_NAME, HIGHEST_AVAILABLE_BLOCK_HELP)?;\n        let lowest_available_block =\n            IntGauge::new(LOWEST_AVAILABLE_BLOCK_NAME, LOWEST_AVAILABLE_BLOCK_HELP)?;\n\n        registry.register(Box::new(chain_height.clone()))?;\n        registry.register(Box::new(highest_available_block.clone()))?;\n        registry.register(Box::new(lowest_available_block.clone()))?;\n\n        Ok(Metrics {\n            chain_height,\n            highest_available_block,\n            lowest_available_block,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.chain_height);\n        unregister_metric!(self.registry, self.highest_available_block);\n        unregister_metric!(self.registry, self.lowest_available_block);\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/object_pool.rs",
    "content": "//! A reference pool for items/objects.\n//!\n//! Its core responsibility is to deduplicate potentially expensive loads by keeping a weak\n//! reference to any loaded object around, so that any load request for an object that is currently\n//! in active use can be satisfied using the already existing copy.\n//!\n//! It differs from a cache in that it does not hold strong references to an item itself -- once an\n//! item is no longer used, it will not be kept in the pool for a later request. As a consequence\n//! the memory pool will never consume significantly more memory than what would otherwise be\n//! required by the loaded objects that are in active use anyway and thus has an \"infinite\"\n//! capacity.\nuse std::{\n    borrow::Borrow,\n    collections::HashMap,\n    hash::Hash,\n    sync::{Arc, Weak},\n};\n\nuse datasize::DataSize;\n\n/// A pool of items/objects.\n///\n/// Maintains a pool of weak references and automatically purges them in configurable intervals.\n///\n/// # DataSize\n///\n/// Typically shared references like `Arc`s are not counted when using `DataSize`, however\n/// `ObjectPool` counts its items in \"regular\" manner, as it is assumed to be the virtual owner.\n\n#[derive(Debug)]\npub(super) struct ObjectPool<I> {\n    /// The actual object pool.\n    items: HashMap<I, Weak<[u8]>>,\n    /// Interval for garbage collection, will remove dead references on every n-th `put()`.\n    garbage_collect_interval: u16,\n    /// Counts how many objects have been added since the last garbage collect interval.\n    put_count: u16,\n}\n\nimpl<I> ObjectPool<I> {\n    /// Creates a new object pool.\n    pub(super) fn new(garbage_collect_interval: u16) -> Self {\n        Self {\n            items: HashMap::new(),\n            garbage_collect_interval,\n            put_count: 0,\n        }\n    }\n}\n\n// Note: There is currently a design issue in the `datasize` crate where it does not gracefully\n//       handle unsized types like slices, thus the derivation for any implementation of `DataSize\n//       for Box<[T]>` based on `DataSize for Box<T>` and `DataSize for [T]` is bound to be\n//       incorrect.\n//\n//       Since we currently only use very few different `T`s for `ObjectPool<T>`, we opt to\n//       implement it manually here and gain a chance to optimize as well.\nimpl DataSize for ObjectPool<Box<[u8]>> {\n    const IS_DYNAMIC: bool = true;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    fn estimate_heap_size(&self) -> usize {\n        // See https://docs.rs/datasize/0.2.9/src/datasize/std.rs.html#213-224 for details.\n        let base = self.items.capacity()\n            * (size_of::<Box<[u8]>>() + size_of::<Weak<[u8]>>() + size_of::<usize>());\n\n        base + self\n            .items\n            .iter()\n            .map(|(key, value)| {\n                // Unfortunately we have to check every instance by upgrading.\n                let value_size = value.upgrade().map(|v| v.len()).unwrap_or_default();\n                key.len() + value_size\n            })\n            .sum::<usize>()\n    }\n}\n\nimpl<I> ObjectPool<I>\nwhere\n    I: Hash + Eq,\n{\n    /// Stores a serialized object in the pool.\n    ///\n    /// At configurable intervals (see `garbage_collect_interval`), the entire pool will be checked\n    /// and dead references pruned.\n    pub(super) fn put(&mut self, id: I, item: Weak<[u8]>) {\n        self.items.insert(id, item);\n\n        if self.put_count >= self.garbage_collect_interval {\n            self.items.retain(|_, item| item.strong_count() > 0);\n\n            self.put_count = 0;\n        }\n\n        self.put_count += 1;\n    }\n\n    /// Retrieves an object from the pool, if present.\n    pub(super) fn get<Q>(&self, id: &Q) -> Option<Arc<[u8]>>\n    where\n        I: Borrow<Q>,\n        Q: Hash + Eq + ?Sized,\n    {\n        self.items.get(id).and_then(Weak::upgrade)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::sync::Arc;\n\n    use datasize::DataSize;\n\n    use casper_types::Transaction;\n\n    use super::ObjectPool;\n    use crate::components::fetcher::FetchItem;\n\n    impl<I> ObjectPool<I>\n    where\n        I: DataSize,\n    {\n        fn num_entries(&self) -> usize {\n            self.items.len()\n        }\n    }\n\n    #[test]\n    fn can_load_and_store_items() {\n        let mut pool: ObjectPool<<Transaction as FetchItem>::Id> = ObjectPool::new(5);\n        let mut rng = crate::new_rng();\n\n        let txn1 = Transaction::random(&mut rng);\n        let txn2 = Transaction::random(&mut rng);\n        let txn1_id = txn1.fetch_id();\n        let txn2_id = txn2.fetch_id();\n        let txn1_serialized = bincode::serialize(&txn1).expect(\"could not serialize first deploy\");\n        let txn2_serialized = bincode::serialize(&txn2).expect(\"could not serialize second deploy\");\n\n        let txn1_shared = txn1_serialized.into();\n        let txn2_shared = txn2_serialized.into();\n\n        assert!(pool.get(&txn1_id).is_none());\n        assert!(pool.get(&txn2_id).is_none());\n\n        pool.put(txn1_id, Arc::downgrade(&txn1_shared));\n        assert!(Arc::ptr_eq(\n            &pool.get(&txn1_id).expect(\"did not find d1\"),\n            &txn1_shared\n        ));\n        assert!(pool.get(&txn2_id).is_none());\n\n        pool.put(txn2_id, Arc::downgrade(&txn2_shared));\n        assert!(Arc::ptr_eq(\n            &pool.get(&txn1_id).expect(\"did not find d1\"),\n            &txn1_shared\n        ));\n        assert!(Arc::ptr_eq(\n            &pool.get(&txn2_id).expect(\"did not find d1\"),\n            &txn2_shared\n        ));\n    }\n\n    #[test]\n    fn frees_memory_after_reference_loss() {\n        let mut pool: ObjectPool<<Transaction as FetchItem>::Id> = ObjectPool::new(5);\n        let mut rng = crate::new_rng();\n\n        let txn1 = Transaction::random(&mut rng);\n        let txn1_id = txn1.fetch_id();\n        let txn1_serialized = bincode::serialize(&txn1).expect(\"could not serialize first deploy\");\n\n        let txn1_shared = txn1_serialized.into();\n\n        assert!(pool.get(&txn1_id).is_none());\n\n        pool.put(txn1_id, Arc::downgrade(&txn1_shared));\n        assert!(Arc::ptr_eq(\n            &pool.get(&txn1_id).expect(\"did not find d1\"),\n            &txn1_shared\n        ));\n\n        drop(txn1_shared);\n        assert!(pool.get(&txn1_id).is_none());\n    }\n\n    #[test]\n    fn garbage_is_collected() {\n        let mut pool: ObjectPool<<Transaction as FetchItem>::Id> = ObjectPool::new(5);\n        let mut rng = crate::new_rng();\n\n        assert_eq!(pool.num_entries(), 0);\n\n        for i in 0..5 {\n            let txn = Transaction::random(&mut rng);\n            let id = txn.fetch_id();\n            let serialized = bincode::serialize(&txn).expect(\"could not serialize first deploy\");\n            let shared = serialized.into();\n            pool.put(id, Arc::downgrade(&shared));\n            assert_eq!(pool.num_entries(), i + 1);\n            drop(shared);\n            assert_eq!(pool.num_entries(), i + 1);\n        }\n\n        let txn = Transaction::random(&mut rng);\n        let id = txn.fetch_id();\n        let serialized = bincode::serialize(&txn).expect(\"could not serialize first deploy\");\n        let shared = serialized.into();\n        pool.put(id, Arc::downgrade(&shared));\n        assert_eq!(pool.num_entries(), 1);\n        drop(shared);\n        assert_eq!(pool.num_entries(), 1);\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/tests.rs",
    "content": "//! Unit tests for the storage component.\n\nuse std::{\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet},\n    convert::TryInto,\n    fs::{self, File},\n    io,\n    iter::{self},\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse once_cell::sync::Lazy;\nuse rand::{prelude::SliceRandom, Rng};\nuse serde::{Deserialize, Serialize};\nuse smallvec::smallvec;\n\nuse casper_storage::block_store::{\n    types::{ApprovalsHashes, BlockHashHeightAndEra, BlockTransfers},\n    BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter,\n};\nuse casper_types::{\n    execution::{Effects, ExecutionResult, ExecutionResultV2},\n    generate_ed25519_keypair,\n    testing::TestRng,\n    ApprovalsHash, AvailableBlockRange, Block, BlockHash, BlockHeader, BlockHeaderWithSignatures,\n    BlockSignatures, BlockSignaturesV2, BlockV2, ChainNameDigest, Chainspec, ChainspecRawBytes,\n    Deploy, DeployHash, Digest, EraId, ExecutionInfo, FinalitySignature, FinalitySignatureV2, Gas,\n    InitiatorAddr, ProtocolVersion, PublicKey, SecretKey, TestBlockBuilder, TestBlockV1Builder,\n    TimeDiff, Transaction, TransactionConfig, TransactionHash, TransactionV1Hash, Transfer,\n    TransferV2, U512,\n};\nuse tempfile::tempdir;\n\nuse super::{\n    move_storage_files_to_network_subdir, should_move_storage_files_to_network_subdir, Config,\n    Storage, FORCE_RESYNC_FILE_NAME,\n};\nuse crate::{\n    components::fetcher::{FetchItem, FetchResponse},\n    effect::{\n        requests::{MarkBlockCompletedRequest, StorageRequest},\n        Multiple,\n    },\n    storage::TransactionHeader,\n    testing::{ComponentHarness, UnitTestEvent},\n    types::{\n        sync_leap_validation_metadata::SyncLeapValidationMetaData, BlockWithMetadata,\n        SyncLeapIdentifier,\n    },\n    utils::{Loadable, WithDir},\n};\n\nconst RECENT_ERA_COUNT: u64 = 7;\nconst MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400);\n\nfn new_config(harness: &ComponentHarness<UnitTestEvent>) -> Config {\n    const MIB: usize = 1024 * 1024;\n\n    // Restrict all stores to 50 mibibytes, to catch issues before filling up the entire disk.\n    Config {\n        path: harness.tmp.path().join(\"storage\"),\n        max_block_store_size: 50 * MIB,\n        max_deploy_store_size: 50 * MIB,\n        max_deploy_metadata_store_size: 50 * MIB,\n        max_state_store_size: 50 * MIB,\n        enable_mem_deduplication: true,\n        mem_pool_prune_interval: 4,\n    }\n}\n\nfn block_headers_into_heights(block_headers: &[BlockHeader]) -> Vec<u64> {\n    block_headers\n        .iter()\n        .map(|block_header| block_header.height())\n        .collect()\n}\n\nfn block_headers_with_signatures_into_heights(\n    block_headers_with_signatures: &[BlockHeaderWithSignatures],\n) -> Vec<u64> {\n    block_headers_with_signatures\n        .iter()\n        .map(|block_header_with_signatures| block_header_with_signatures.block_header().height())\n        .collect()\n}\n\nfn create_sync_leap_test_chain(\n    non_signed_blocks: &[u64], // indices of blocks to not be signed\n    include_switch_block_at_tip: bool,\n    maybe_recent_era_count: Option<u64>, // if Some, override default `RECENT_ERA_COUNT`\n) -> (Storage, Chainspec, Vec<Block>) {\n    // Test chain:\n    //      S0      S1 B2 B3 S4 B5 B6 S7 B8 B9 S10 B11 B12\n    //  era 0 | era 1 | era 2  | era 3  | era 4   | era 5 ...\n    //  where\n    //   S - switch block\n    //   B - non-switch block\n\n    // If `include_switch_block_at_tip`, the additional switch block of height 13 will be added at\n    // the tip of the chain.\n    let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture_from_parts(\n        &harness,\n        None,\n        Some(chainspec.protocol_version()),\n        None,\n        None,\n        maybe_recent_era_count,\n    );\n\n    let mut trusted_validator_weights = BTreeMap::new();\n\n    let (validator_secret_key, validator_public_key) = generate_ed25519_keypair();\n    trusted_validator_weights.insert(validator_public_key, U512::from(2000000000000u64));\n\n    let mut blocks: Vec<Block> = vec![];\n    let block_count = 13 + include_switch_block_at_tip as u64;\n    (0_u64..block_count).for_each(|height| {\n        let is_switch = height == 0 || height % 3 == 1;\n        let era_id = EraId::from(match height {\n            0 => 0,\n            1 => 1,\n            _ => (height + 4) / 3,\n        });\n        let parent_hash = if height == 0 {\n            BlockHash::new(Digest::default())\n        } else {\n            *blocks.get((height - 1) as usize).unwrap().hash()\n        };\n\n        let block = TestBlockBuilder::new()\n            .era(era_id)\n            .height(height)\n            .protocol_version(chainspec.protocol_version())\n            .parent_hash(parent_hash)\n            .validator_weights(trusted_validator_weights.clone())\n            .switch_block(is_switch)\n            .build_versioned(&mut harness.rng);\n\n        blocks.push(block);\n    });\n    blocks.iter().for_each(|block| {\n        assert!(put_block(\n            &mut harness,\n            &mut storage,\n            Arc::new(block.clone()),\n        ));\n\n        let fs = FinalitySignatureV2::create(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chainspec.name_hash(),\n            &validator_secret_key,\n        );\n        assert!(fs.is_verified().is_ok());\n\n        let mut block_signatures = BlockSignaturesV2::new(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chainspec.name_hash(),\n        );\n        block_signatures.insert_signature(fs.public_key().clone(), *fs.signature());\n\n        if !non_signed_blocks.contains(&block.height()) {\n            assert!(put_block_signatures(\n                &mut harness,\n                &mut storage,\n                block_signatures.into(),\n            ));\n            storage.completed_blocks.insert(block.height());\n        }\n    });\n    (storage, chainspec, blocks)\n}\n\n/// Storage component test fixture.\n///\n/// Creates a storage component in a temporary directory.\n///\n/// # Panics\n///\n/// Panics if setting up the storage fixture fails.\nfn storage_fixture(harness: &ComponentHarness<UnitTestEvent>) -> Storage {\n    let cfg = new_config(harness);\n    Storage::new(\n        &WithDir::new(harness.tmp.path(), cfg),\n        None,\n        ProtocolVersion::from_parts(1, 0, 0),\n        EraId::default(),\n        \"test\",\n        MAX_TTL.into(),\n        RECENT_ERA_COUNT,\n        None,\n        false,\n        TransactionConfig::default(),\n    )\n    .expect(\"could not create storage component fixture\")\n}\n\n/// Storage component test fixture.\n///\n/// Creates a storage component in a temporary directory.\n///\n/// # Panics\n///\n/// Panics if setting up the storage fixture fails.\nfn storage_fixture_from_parts(\n    harness: &ComponentHarness<UnitTestEvent>,\n    hard_reset_to_start_of_era: Option<EraId>,\n    protocol_version: Option<ProtocolVersion>,\n    network_name: Option<&str>,\n    max_ttl: Option<TimeDiff>,\n    recent_era_count: Option<u64>,\n) -> Storage {\n    let cfg = new_config(harness);\n    Storage::new(\n        &WithDir::new(harness.tmp.path(), cfg),\n        hard_reset_to_start_of_era,\n        protocol_version.unwrap_or(ProtocolVersion::V1_0_0),\n        EraId::default(),\n        network_name.unwrap_or(\"test\"),\n        max_ttl.unwrap_or(MAX_TTL).into(),\n        recent_era_count.unwrap_or(RECENT_ERA_COUNT),\n        None,\n        false,\n        TransactionConfig::default(),\n    )\n    .expect(\"could not create storage component fixture from parts\")\n}\n\n/// Storage component test fixture with force resync enabled.\n///\n/// Creates a storage component in a given temporary directory.\n///\n/// # Panics\n///\n/// Panics if setting up the storage fixture fails.\nfn storage_fixture_with_force_resync(cfg: &WithDir<Config>) -> Storage {\n    Storage::new(\n        cfg,\n        None,\n        ProtocolVersion::from_parts(1, 0, 0),\n        EraId::default(),\n        \"test\",\n        MAX_TTL.into(),\n        RECENT_ERA_COUNT,\n        None,\n        true,\n        TransactionConfig::default(),\n    )\n    .expect(\"could not create storage component fixture\")\n}\n\n/// Storage component test fixture.\n///\n/// Creates a storage component in a temporary directory, but with a hard reset to a specified era.\n///\n/// # Panics\n///\n/// Panics if setting up the storage fixture fails.\nfn storage_fixture_with_hard_reset(\n    harness: &ComponentHarness<UnitTestEvent>,\n    reset_era_id: EraId,\n) -> Storage {\n    storage_fixture_from_parts(\n        harness,\n        Some(reset_era_id),\n        Some(ProtocolVersion::from_parts(1, 1, 0)),\n        None,\n        None,\n        None,\n    )\n}\n\n/// Creates 3 random signatures for the given block.\nfn random_signatures(\n    rng: &mut TestRng,\n    block_hash: BlockHash,\n    block_height: u64,\n    era_id: EraId,\n    chain_name_hash: ChainNameDigest,\n) -> BlockSignatures {\n    let mut block_signatures =\n        BlockSignaturesV2::new(block_hash, block_height, era_id, chain_name_hash);\n    for _ in 0..3 {\n        let secret_key = SecretKey::random(rng);\n        let signature = FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            &secret_key,\n        );\n        block_signatures.insert_signature(signature.public_key().clone(), *signature.signature());\n    }\n    block_signatures.into()\n}\n\n/// Loads a block from a storage component.\nfn get_block(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n) -> Option<Block> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetBlock {\n            block_hash,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn is_block_stored(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n) -> bool {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::IsBlockStored {\n            block_hash,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Loads a block header by height from a storage component.\n/// Requesting a block header by height is required currently by the RPC\n/// component.\nfn get_block_header_by_height(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_height: u64,\n    only_from_available_block_range: bool,\n) -> Option<BlockHeader> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetBlockHeaderByHeight {\n            block_height,\n            only_from_available_block_range,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Loads a set of `Transaction`s from a storage component.\n///\n/// Applies `into_naive` to all loaded `Transaction`s.\nfn get_naive_transactions(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    transaction_hashes: Multiple<TransactionHash>,\n) -> Vec<Option<Transaction>> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetTransactions {\n            transaction_hashes: transaction_hashes.to_vec(),\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n        .into_iter()\n        .map(|opt_twfa| {\n            if let Some((transaction, maybe_approvals)) = opt_twfa {\n                let txn = match maybe_approvals {\n                    None => transaction,\n                    Some(approvals) => transaction.with_approvals(approvals),\n                };\n                Some(txn)\n            } else {\n                None\n            }\n        })\n        .collect()\n}\n\n/// Loads a deploy with associated execution info from the storage component.\n///\n/// Any potential finalized approvals are discarded.\nfn get_naive_transaction_and_execution_info(\n    storage: &mut Storage,\n    transaction_hash: TransactionHash,\n) -> Option<(Transaction, Option<ExecutionInfo>)> {\n    let transaction = storage.get_transaction_by_hash(transaction_hash)?;\n    let execution_info = storage.read_execution_info(transaction.hash());\n    Some((transaction, execution_info))\n}\n\n/// Requests the highest complete block from a storage component.\nfn get_highest_complete_block(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n) -> Option<Block> {\n    let response = harness.send_request(storage, |responder| {\n        StorageRequest::GetHighestCompleteBlock { responder }.into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Requests the highest complete block header from a storage component.\nfn get_highest_complete_block_header(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n) -> Option<BlockHeader> {\n    let response = harness.send_request(storage, |responder| {\n        StorageRequest::GetHighestCompleteBlockHeader { responder }.into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Get the era ids of multiple transactions.\nfn get_transactions_era_ids(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    transaction_hashes: HashSet<TransactionHash>,\n) -> HashSet<EraId> {\n    let response = harness.send_request(storage, |responder| {\n        StorageRequest::GetTransactionsEraIds {\n            transaction_hashes,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Stores a block in a storage component.\nfn put_complete_block(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block: Block,\n) -> bool {\n    let block_height = block.height();\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::PutBlock {\n            block: Arc::new(block),\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    harness.send_request(storage, move |responder| {\n        MarkBlockCompletedRequest {\n            block_height,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n// Mark block complete\nfn mark_block_complete(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_height: u64,\n) -> bool {\n    let response = harness.send_request(storage, move |responder| {\n        MarkBlockCompletedRequest {\n            block_height,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Stores a block in a storage component.\nfn put_block(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block: Arc<Block>,\n) -> bool {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::PutBlock { block, responder }.into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Stores a block's signatures in a storage component.\nfn put_block_signatures(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    signatures: BlockSignatures,\n) -> bool {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::PutBlockSignatures {\n            signatures,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Stores a finality signature in a storage component.\nfn put_finality_signature(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    signature: Box<FinalitySignature>,\n) -> bool {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::PutFinalitySignature {\n            signature,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Stores a `Transaction` in a storage component.\nfn put_transaction(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    transaction: &Transaction,\n) -> bool {\n    let transaction = Arc::new(transaction.clone());\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::PutTransaction {\n            transaction,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n/// Stores execution results in a storage component.\nfn put_execution_results(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n    block_height: u64,\n    era_id: EraId,\n    execution_results: HashMap<TransactionHash, ExecutionResult>,\n) {\n    harness.send_request(storage, move |responder| {\n        StorageRequest::PutExecutionResults {\n            block_hash: Box::new(block_hash),\n            block_height,\n            era_id,\n            execution_results,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n}\n\n/// Gets available block range from storage.\nfn get_available_block_range(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n) -> AvailableBlockRange {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetAvailableBlockRange { responder }.into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn get_approvals_hashes(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n) -> Option<ApprovalsHashes> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetApprovalsHashes {\n            block_hash,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn get_block_header(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n    only_from_available_block_range: bool,\n) -> Option<BlockHeader> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetBlockHeader {\n            block_hash,\n            only_from_available_block_range,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn get_block_transfers(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n) -> Option<Vec<Transfer>> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetBlockTransfers {\n            block_hash,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn get_block_and_metadata_by_height(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_height: u64,\n    only_from_available_block_range: bool,\n) -> Option<BlockWithMetadata> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetBlockAndMetadataByHeight {\n            block_height,\n            only_from_available_block_range,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn get_execution_results(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n) -> Option<Vec<(TransactionHash, TransactionHeader, ExecutionResult)>> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetExecutionResults {\n            block_hash,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\nfn get_block_signature(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: BlockHash,\n    public_key: Box<PublicKey>,\n) -> Option<FinalitySignature> {\n    let response = harness.send_request(storage, move |responder| {\n        StorageRequest::GetBlockSignature {\n            block_hash,\n            public_key,\n            responder,\n        }\n        .into()\n    });\n    assert!(harness.is_idle());\n    response\n}\n\n#[test]\nfn get_block_of_non_existing_block_returns_none() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let block_hash = BlockHash::random(&mut harness.rng);\n    let response = get_block(&mut harness, &mut storage, block_hash);\n\n    assert!(response.is_none());\n    assert!(harness.is_idle());\n}\n\n#[test]\nfn read_block_by_height_with_available_block_range() {\n    let mut harness = ComponentHarness::default();\n\n    // Create a random block, load and store it.\n    let block_33 = TestBlockBuilder::new()\n        .era(1)\n        .height(33)\n        .protocol_version(ProtocolVersion::from_parts(1, 5, 0))\n        .switch_block(true)\n        .build_versioned(&mut harness.rng);\n\n    let mut storage = storage_fixture(&harness);\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, true).is_none());\n\n    let was_new = put_complete_block(&mut harness, &mut storage, block_33.clone());\n    assert!(was_new);\n\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(),\n        Some(&block_33.clone_header())\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, true).as_ref(),\n        Some(&block_33.clone_header())\n    );\n\n    // Create a random block as a different height, load and store it.\n    let block_14 = TestBlockBuilder::new()\n        .era(1)\n        .height(14)\n        .protocol_version(ProtocolVersion::from_parts(1, 5, 0))\n        .switch_block(false)\n        .build_versioned(&mut harness.rng);\n\n    let was_new = put_complete_block(&mut harness, &mut storage, block_14.clone());\n    assert!(was_new);\n\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(),\n        Some(&block_14.clone_header())\n    );\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 14, true).is_none());\n}\n\n#[test]\nfn can_retrieve_block_by_height() {\n    let mut harness = ComponentHarness::default();\n\n    // Create some random blocks, load and store them.\n    let block_33 = TestBlockBuilder::new()\n        .era(1)\n        .height(33)\n        .protocol_version(ProtocolVersion::from_parts(1, 5, 0))\n        .switch_block(true)\n        .build_versioned(&mut harness.rng);\n    let block_14 = TestBlockBuilder::new()\n        .era(1)\n        .height(14)\n        .protocol_version(ProtocolVersion::from_parts(1, 5, 0))\n        .switch_block(false)\n        .build_versioned(&mut harness.rng);\n    let block_99 = TestBlockBuilder::new()\n        .era(2)\n        .height(99)\n        .protocol_version(ProtocolVersion::from_parts(1, 5, 0))\n        .switch_block(true)\n        .build_versioned(&mut harness.rng);\n\n    let mut storage = storage_fixture(&harness);\n\n    // Both block at ID and highest block should return `None` initially.\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_highest_complete_block(&mut harness, &mut storage).is_none());\n    assert!(get_highest_complete_block_header(&mut harness, &mut storage).is_none());\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 14, false).is_none());\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 33, false).is_none());\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none());\n\n    // Inserting 33 changes this.\n    let was_new = put_complete_block(&mut harness, &mut storage, block_33.clone());\n    assert!(was_new);\n\n    assert_eq!(\n        get_highest_complete_block(&mut harness, &mut storage).as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_highest_complete_block_header(&mut harness, &mut storage).as_ref(),\n        Some(&block_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 14, false).is_none());\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false)\n            .map(|blk| blk.block)\n            .as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, true).as_ref(),\n        Some(&block_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none());\n\n    // Inserting block with height 14, no change in highest.\n    let was_new = put_complete_block(&mut harness, &mut storage, block_14.clone());\n    assert!(was_new);\n\n    assert_eq!(\n        get_highest_complete_block(&mut harness, &mut storage).as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_highest_complete_block_header(&mut harness, &mut storage).as_ref(),\n        Some(&block_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false)\n            .map(|blk| blk.block)\n            .as_ref(),\n        Some(&block_14)\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, true).as_ref(),\n        None\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(),\n        Some(&block_14.clone_header())\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false)\n            .map(|blk| blk.block)\n            .as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(),\n        Some(&block_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 9, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none());\n\n    // Inserting block with height 99, changes highest.\n    let was_new = put_complete_block(&mut harness, &mut storage, block_99.clone());\n    // Mark block 99 as complete.\n    storage.completed_blocks.insert(99);\n    assert!(was_new);\n\n    assert_eq!(\n        get_highest_complete_block(&mut harness, &mut storage).as_ref(),\n        Some(&block_99)\n    );\n    assert_eq!(\n        get_highest_complete_block_header(&mut harness, &mut storage).as_ref(),\n        Some(&block_99.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false)\n            .map(|blk| blk.block)\n            .as_ref(),\n        Some(&block_14)\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(),\n        Some(&block_14.clone_header())\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false)\n            .map(|blk| blk.block)\n            .as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(),\n        Some(&block_33.clone_header())\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false)\n            .map(|blk| blk.block)\n            .as_ref(),\n        Some(&block_99)\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 99, false).as_ref(),\n        Some(&block_99.clone_header())\n    );\n}\n\n#[test]\n#[should_panic(expected = \"duplicate entries\")]\nfn different_block_at_height_is_fatal() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    // Create two different blocks at the same height.\n    let block_44_a = TestBlockBuilder::new()\n        .era(1)\n        .height(44)\n        .switch_block(false)\n        .build_versioned(&mut harness.rng);\n    let block_44_b = TestBlockBuilder::new()\n        .era(1)\n        .height(44)\n        .switch_block(false)\n        .build_versioned(&mut harness.rng);\n\n    let was_new = put_complete_block(&mut harness, &mut storage, block_44_a.clone());\n    assert!(was_new);\n\n    let was_new = put_complete_block(&mut harness, &mut storage, block_44_a);\n    assert!(was_new);\n\n    // Putting a different block with the same height should now crash.\n    put_complete_block(&mut harness, &mut storage, block_44_b);\n}\n\n#[test]\nfn get_vec_of_non_existing_transaction_returns_nones() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let transaction_id = Transaction::random(&mut harness.rng).hash();\n    let response = get_naive_transactions(&mut harness, &mut storage, smallvec![transaction_id]);\n    assert_eq!(response, vec![None]);\n\n    // Also verify that we can retrieve using an empty set of transaction hashes.\n    let response = get_naive_transactions(&mut harness, &mut storage, smallvec![]);\n    assert!(response.is_empty());\n}\n\n#[test]\nfn can_retrieve_store_and_load_transactions() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    // Create a random deploy, store and load it.\n    let transaction = Transaction::random(&mut harness.rng);\n\n    let was_new = put_transaction(&mut harness, &mut storage, &transaction);\n    let block_hash_height_and_era = BlockHashHeightAndEra::new(\n        BlockHash::random(&mut harness.rng),\n        harness.rng.gen(),\n        EraId::random(&mut harness.rng),\n    );\n\n    assert!(was_new, \"putting transaction should have returned `true`\");\n\n    // Storing the same deploy again should work, but yield a result of `false`.\n    let was_new_second_time = put_transaction(&mut harness, &mut storage, &transaction);\n    assert!(\n        !was_new_second_time,\n        \"storing transaction the second time should have returned `false`\"\n    );\n\n    // Retrieve the stored transaction.\n    let response =\n        get_naive_transactions(&mut harness, &mut storage, smallvec![transaction.hash()]);\n    assert_eq!(response, vec![Some(transaction.clone())]);\n\n    let mut execution_results: HashMap<TransactionHash, ExecutionResult> = HashMap::new();\n    execution_results.insert(\n        transaction.hash(),\n        ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)),\n    );\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash_height_and_era.block_hash,\n        block_hash_height_and_era.block_height,\n        block_hash_height_and_era.era_id,\n        execution_results,\n    );\n\n    // Finally try to get the execution info as well. Since we did not store any, we expect to get\n    // the block hash and height from the indices.\n    let (transaction_response, exec_info_response) =\n        get_naive_transaction_and_execution_info(&mut storage, transaction.hash())\n            .expect(\"no transaction with execution info returned\");\n\n    assert_eq!(transaction_response, transaction);\n    match exec_info_response {\n        Some(ExecutionInfo {\n            block_hash,\n            block_height,\n            execution_result: Some(_),\n        }) => {\n            assert_eq!(block_hash_height_and_era.block_hash, block_hash);\n            assert_eq!(block_hash_height_and_era.block_height, block_height);\n        }\n        Some(ExecutionInfo {\n            execution_result: None,\n            ..\n        }) => {\n            panic!(\"We didn't receive any execution info but even though we previously stored it.\")\n        }\n        None => panic!(\n            \"We stored block info in the deploy hash index but we received nothing in the response.\"\n        ),\n    }\n\n    // Create a random transaction, store and load it.\n    let transaction = Transaction::random(&mut harness.rng);\n\n    assert!(put_transaction(&mut harness, &mut storage, &transaction));\n    // Don't insert to the transaction hash index. Since we have no execution results\n    // either, we should receive a `None` execution info response.\n    let (transaction_response, exec_info_response) =\n        get_naive_transaction_and_execution_info(&mut storage, transaction.hash())\n            .expect(\"no transaction with execution info returned\");\n\n    assert_eq!(transaction_response, transaction);\n    assert!(\n        exec_info_response.is_none(),\n        \"We didn't store any block info in the index but we received it in the response.\"\n    );\n}\n\n#[test]\nfn should_retrieve_transactions_era_ids() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    // Populate the `transaction_hash_index` with 5 transactions from a block in era 1.\n    let era_1_transactions: Vec<Transaction> =\n        iter::repeat_with(|| Transaction::random(&mut harness.rng))\n            .take(5)\n            .collect();\n    let block_hash_height_and_era = BlockHashHeightAndEra::new(\n        BlockHash::random(&mut harness.rng),\n        harness.rng.gen(),\n        EraId::new(1),\n    );\n    let mut execution_results: HashMap<TransactionHash, ExecutionResult> = HashMap::new();\n    for transaction in era_1_transactions.clone() {\n        let _ = put_transaction(&mut harness, &mut storage, &transaction);\n        execution_results.insert(\n            transaction.hash(),\n            ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)),\n        );\n    }\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash_height_and_era.block_hash,\n        block_hash_height_and_era.block_height,\n        block_hash_height_and_era.era_id,\n        execution_results,\n    );\n\n    // Further populate the `transaction_hash_index` with 5 deploys from a block in era 2.\n    let era_2_transactions: Vec<Transaction> =\n        iter::repeat_with(|| Transaction::random(&mut harness.rng))\n            .take(5)\n            .collect();\n    let block_hash_height_and_era = BlockHashHeightAndEra::new(\n        BlockHash::random(&mut harness.rng),\n        harness.rng.gen(),\n        EraId::new(2),\n    );\n    let mut execution_results: HashMap<TransactionHash, ExecutionResult> = HashMap::new();\n    for transaction in era_2_transactions.clone() {\n        let _ = put_transaction(&mut harness, &mut storage, &transaction);\n        execution_results.insert(\n            transaction.hash(),\n            ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)),\n        );\n    }\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash_height_and_era.block_hash,\n        block_hash_height_and_era.block_height,\n        block_hash_height_and_era.era_id,\n        execution_results,\n    );\n\n    // Check we get an empty set for deploys not yet executed.\n    let random_transaction_hashes: HashSet<TransactionHash> = iter::repeat_with(|| {\n        if harness.rng.gen() {\n            TransactionHash::Deploy(DeployHash::random(&mut harness.rng))\n        } else {\n            TransactionHash::V1(TransactionV1Hash::random(&mut harness.rng))\n        }\n    })\n    .take(5)\n    .collect();\n    assert!(get_transactions_era_ids(\n        &mut harness,\n        &mut storage,\n        random_transaction_hashes.clone(),\n    )\n    .is_empty());\n\n    // Check we get back only era 1 for all of the era 1 deploys and similarly for era 2 ones.\n    let era_1_transaction_hashes: HashSet<_> = era_1_transactions\n        .iter()\n        .map(|transaction| transaction.hash())\n        .collect();\n    let era1: HashSet<EraId> = iter::once(EraId::new(1)).collect();\n    assert_eq!(\n        get_transactions_era_ids(&mut harness, &mut storage, era_1_transaction_hashes.clone()),\n        era1\n    );\n    let era_2_transaction_hashes: HashSet<_> = era_2_transactions\n        .iter()\n        .map(|transaction| transaction.hash())\n        .collect();\n    let era2: HashSet<EraId> = iter::once(EraId::new(2)).collect();\n    assert_eq!(\n        get_transactions_era_ids(&mut harness, &mut storage, era_2_transaction_hashes.clone()),\n        era2\n    );\n\n    // Check we get back both eras if we use some from each collection.\n    let both_eras = HashSet::from_iter([EraId::new(1), EraId::new(2)]);\n    assert_eq!(\n        get_transactions_era_ids(\n            &mut harness,\n            &mut storage,\n            era_1_transaction_hashes\n                .iter()\n                .take(3)\n                .chain(era_2_transaction_hashes.iter().take(3))\n                .copied()\n                .collect(),\n        ),\n        both_eras\n    );\n\n    // Check we get back only era 1 for era 1 deploys interspersed with unexecuted deploys, and\n    // similarly for era 2 ones.\n    assert_eq!(\n        get_transactions_era_ids(\n            &mut harness,\n            &mut storage,\n            era_1_transaction_hashes\n                .iter()\n                .take(1)\n                .chain(random_transaction_hashes.iter().take(3))\n                .copied()\n                .collect(),\n        ),\n        era1\n    );\n    assert_eq!(\n        get_transactions_era_ids(\n            &mut harness,\n            &mut storage,\n            era_2_transaction_hashes\n                .iter()\n                .take(1)\n                .chain(random_transaction_hashes.iter().take(3))\n                .copied()\n                .collect(),\n        ),\n        era2\n    );\n\n    // Check we get back both eras if we use some from each collection and also some unexecuted.\n    assert_eq!(\n        get_transactions_era_ids(\n            &mut harness,\n            &mut storage,\n            era_1_transaction_hashes\n                .iter()\n                .take(3)\n                .chain(era_2_transaction_hashes.iter().take(3))\n                .chain(random_transaction_hashes.iter().take(3))\n                .copied()\n                .collect(),\n        ),\n        both_eras\n    );\n}\n\n#[test]\nfn storing_and_loading_a_lot_of_transactions_does_not_exhaust_handles() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let total = 1000;\n    let batch_size = 25;\n\n    let mut transaction_hashes = Vec::new();\n\n    for _ in 0..total {\n        let transaction = Transaction::random(&mut harness.rng);\n        transaction_hashes.push(transaction.hash());\n        put_transaction(&mut harness, &mut storage, &transaction);\n    }\n\n    // Shuffle transaction hashes around to get a random order.\n    transaction_hashes.as_mut_slice().shuffle(&mut harness.rng);\n\n    // Retrieve all from storage, ensuring they are found.\n    for chunk in transaction_hashes.chunks(batch_size) {\n        let result =\n            get_naive_transactions(&mut harness, &mut storage, chunk.iter().cloned().collect());\n        assert!(result.iter().all(Option::is_some));\n    }\n}\n\n#[test]\nfn store_random_execution_results() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    // We store results for two different blocks. Each block will have five deploys executed in it.\n    let block_hash_a = BlockHash::random(&mut harness.rng);\n    let block_hash_b = BlockHash::random(&mut harness.rng);\n\n    // We collect the expected result per deploy in parallel to adding them.\n    let mut expected_outcome = HashMap::new();\n\n    fn setup_block(\n        harness: &mut ComponentHarness<UnitTestEvent>,\n        storage: &mut Storage,\n        expected_outcome: &mut HashMap<TransactionHash, ExecutionInfo>,\n        block_hash: &BlockHash,\n        block_height: u64,\n        era_id: EraId,\n    ) {\n        let transaction_count = 5;\n\n        // Results for a single block.\n        let mut block_results = HashMap::new();\n\n        // Add deploys to block.\n        for _ in 0..transaction_count {\n            let transaction = Transaction::random(&mut harness.rng);\n\n            // Store deploy.\n            put_transaction(harness, storage, &transaction.clone());\n\n            let execution_result =\n                ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng));\n            let execution_info = ExecutionInfo {\n                block_hash: *block_hash,\n                block_height,\n                execution_result: Some(execution_result.clone()),\n            };\n\n            // Insert deploy results for the unique block-deploy combination.\n            expected_outcome.insert(transaction.hash(), execution_info);\n\n            // Add to our expected outcome.\n            block_results.insert(transaction.hash(), execution_result);\n        }\n\n        // Now we can submit the block's execution results.\n        put_execution_results(\n            harness,\n            storage,\n            *block_hash,\n            block_height,\n            era_id,\n            block_results,\n        );\n    }\n\n    setup_block(\n        &mut harness,\n        &mut storage,\n        &mut expected_outcome,\n        &block_hash_a,\n        1,\n        EraId::new(1),\n    );\n\n    setup_block(\n        &mut harness,\n        &mut storage,\n        &mut expected_outcome,\n        &block_hash_b,\n        2,\n        EraId::new(1),\n    );\n\n    // At this point, we are all set up and ready to receive results. Iterate over every deploy and\n    // see if its execution-data-per-block matches our expectations.\n    for (txn_hash, expected_exec_info) in expected_outcome.into_iter() {\n        let (transaction, maybe_exec_info) =\n            get_naive_transaction_and_execution_info(&mut storage, txn_hash)\n                .expect(\"missing transaction\");\n\n        assert_eq!(txn_hash, transaction.hash());\n        assert_eq!(maybe_exec_info, Some(expected_exec_info));\n    }\n}\n\n#[test]\nfn store_execution_results_twice_for_same_block_deploy_pair() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let block_hash = BlockHash::random(&mut harness.rng);\n    let block_height = harness.rng.gen();\n    let era_id = EraId::random(&mut harness.rng);\n    let transaction = Transaction::random(&mut harness.rng);\n    let transaction_hash = transaction.hash();\n\n    put_transaction(&mut harness, &mut storage, &transaction);\n\n    let mut exec_result_1 = HashMap::new();\n    exec_result_1.insert(\n        transaction_hash,\n        ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)),\n    );\n\n    let mut exec_result_2 = HashMap::new();\n    let new_exec_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng));\n    exec_result_2.insert(transaction_hash, new_exec_result.clone());\n\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash,\n        block_height,\n        era_id,\n        exec_result_1,\n    );\n\n    // Storing a second execution result for the same deploy on the same block should overwrite the\n    // first.\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash,\n        block_height,\n        era_id,\n        exec_result_2,\n    );\n\n    let (returned_transaction, returned_exec_info) =\n        get_naive_transaction_and_execution_info(&mut storage, transaction_hash)\n            .expect(\"missing deploy\");\n    let expected_exec_info = Some(ExecutionInfo {\n        block_hash,\n        block_height,\n        execution_result: Some(new_exec_result),\n    });\n\n    assert_eq!(returned_transaction, transaction);\n    assert_eq!(returned_exec_info, expected_exec_info);\n}\n\nfn prepare_exec_result_with_transfer(\n    rng: &mut TestRng,\n    txn_hash: &TransactionHash,\n) -> (ExecutionResult, Transfer) {\n    let initiator_addr = InitiatorAddr::random(rng);\n    let transfer = Transfer::V2(TransferV2::new(\n        *txn_hash,\n        initiator_addr.clone(),\n        Some(rng.gen()),\n        rng.gen(),\n        rng.gen(),\n        rng.gen(),\n        Gas::from(rng.gen::<u64>()),\n        Some(rng.gen()),\n    ));\n    let limit = Gas::new(rng.gen::<u64>());\n    let current_price = 1;\n    let refund = U512::zero();\n    let exec_result = ExecutionResult::V2(Box::new(ExecutionResultV2 {\n        initiator: initiator_addr,\n        error_message: None,\n        current_price,\n        limit,\n        cost: limit.value(),\n        consumed: limit,\n        refund,\n        transfers: vec![transfer.clone()],\n        effects: Effects::new(),\n        size_estimate: rng.gen(),\n    }));\n    (exec_result, transfer)\n}\n\n#[test]\nfn store_identical_execution_results() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let deploy = Deploy::random_valid_native_transfer(&mut harness.rng);\n    let deploy_hash = *deploy.hash();\n    let transaction: Transaction = deploy.into();\n    let block = Arc::new(Block::V2(\n        TestBlockBuilder::new()\n            .transactions(Some(&transaction))\n            .build(&mut harness.rng),\n    ));\n    put_transaction(&mut harness, &mut storage, &transaction);\n    put_block(&mut harness, &mut storage, block.clone());\n    let block_hash = *block.hash();\n\n    let (exec_result, transfer) =\n        prepare_exec_result_with_transfer(&mut harness.rng, &TransactionHash::Deploy(deploy_hash));\n    let mut exec_results = HashMap::new();\n    exec_results.insert(TransactionHash::from(deploy_hash), exec_result.clone());\n\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash,\n        block.height(),\n        block.era_id(),\n        exec_results.clone(),\n    );\n    {\n        let retrieved_results = get_execution_results(&mut harness, &mut storage, block_hash)\n            .expect(\"should return Some\");\n        assert_eq!(retrieved_results.len(), 1);\n        assert_eq!(retrieved_results[0].0, TransactionHash::from(deploy_hash));\n        assert_eq!(retrieved_results[0].2, exec_result);\n    }\n    let retrieved_transfers =\n        get_block_transfers(&mut harness, &mut storage, block_hash).expect(\"should return Some\");\n    assert_eq!(retrieved_transfers.len(), 1);\n    assert_eq!(retrieved_transfers[0], transfer);\n\n    // We should be fine storing the exact same result twice.\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash,\n        block.height(),\n        block.era_id(),\n        exec_results,\n    );\n    {\n        let retrieved_results = get_execution_results(&mut harness, &mut storage, block_hash)\n            .expect(\"should return Some\");\n        assert_eq!(retrieved_results.len(), 1);\n        assert_eq!(retrieved_results[0].0, TransactionHash::from(deploy_hash));\n        assert_eq!(retrieved_results[0].2, exec_result);\n    }\n    let retrieved_transfers =\n        get_block_transfers(&mut harness, &mut storage, block_hash).expect(\"should return Some\");\n    assert_eq!(retrieved_transfers.len(), 1);\n    assert_eq!(retrieved_transfers[0], transfer);\n}\n\n/// This is a regression test for the issue where `Transfer`s under a block with no deploys could be\n/// returned as `None` rather than the expected `Some(vec![])`.  The fix should ensure that if no\n/// Transfers are found, storage will respond with an empty collection and store the correct value\n/// for future requests.\n///\n/// See https://github.com/casper-network/casper-node/issues/4255 for further info.\n#[test]\nfn should_provide_transfers_if_not_stored() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let block_v2 = TestBlockBuilder::new()\n        .transactions(None)\n        .build(&mut harness.rng);\n    assert_eq!(block_v2.all_transactions().count(), 0);\n    let block = Arc::new(Block::V2(block_v2));\n    let block_hash = *block.hash();\n    put_block(&mut harness, &mut storage, block);\n\n    // Check an empty collection is returned.\n    let retrieved_transfers =\n        get_block_transfers(&mut harness, &mut storage, block_hash).expect(\"should return Some\");\n    assert!(retrieved_transfers.is_empty());\n\n    // Check the empty collection has been stored.\n    let reader = storage.block_store.checkout_rw().unwrap();\n    let maybe_transfers: Option<Vec<Transfer>> = reader.read(block_hash).unwrap();\n    assert_eq!(Some(vec![]), maybe_transfers);\n}\n\n/// This is a regression test for the issue where a valid collection of `Transfer`s under a given\n/// block could be erroneously replaced with an empty collection.  The fix should ensure that if an\n/// empty collection of Transfers is found, storage will replace it with the correct collection and\n/// store the correct value for future requests.\n///\n/// See https://github.com/casper-network/casper-node/issues/4268 for further info.\n#[test]\nfn should_provide_transfers_after_emptied() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let deploy = Deploy::random_valid_native_transfer(&mut harness.rng);\n    let deploy_hash = *deploy.hash();\n    let block = Block::V2(\n        TestBlockBuilder::new()\n            .transactions(Some(&Transaction::Deploy(deploy)))\n            .build(&mut harness.rng),\n    );\n    let block_hash = *block.hash();\n    put_block(&mut harness, &mut storage, Arc::new(block.clone()));\n\n    let (exec_result, transfer) =\n        prepare_exec_result_with_transfer(&mut harness.rng, &TransactionHash::Deploy(deploy_hash));\n    let mut exec_results = HashMap::new();\n    exec_results.insert(TransactionHash::from(deploy_hash), exec_result);\n\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        block_hash,\n        block.height(),\n        block.era_id(),\n        exec_results.clone(),\n    );\n\n    // Replace the valid collection with an empty one.\n    let mut writer = storage.block_store.checkout_rw().unwrap();\n    let empty_transfers = BlockTransfers {\n        block_hash,\n        transfers: Vec::<Transfer>::new(),\n    };\n    assert_eq!(writer.write(&empty_transfers).unwrap(), block_hash);\n    writer.commit().unwrap();\n\n    // Check the correct value is returned.\n    let retrieved_transfers =\n        get_block_transfers(&mut harness, &mut storage, block_hash).expect(\"should return Some\");\n    assert_eq!(retrieved_transfers.len(), 1);\n    assert_eq!(retrieved_transfers[0], transfer);\n\n    // Check the correct value has been stored.\n    let reader = storage.block_store.checkout_rw().unwrap();\n    let maybe_transfers: Option<Vec<Transfer>> = reader.read(block_hash).unwrap();\n    assert_eq!(Some(vec![transfer]), maybe_transfers);\n}\n\n/// Example state used in storage.\n#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]\nstruct StateData {\n    a: Vec<u32>,\n    b: i32,\n}\n\n/* TODO: we can't write the legacy db anymore so this test needs to be refactored.\n#[test]\nfn test_legacy_interface() {\n    let mut harness = ComponentHarness::default();\n    let storage = storage_fixture(&harness);\n\n    let deploy = Deploy::random(&mut harness.rng);\n    let was_new = storage.write_legacy_deploy(&deploy);\n    assert!(was_new);\n\n    // Ensure we get the deploy we expect.\n    let result = storage\n        .get_legacy_deploy(*deploy.hash())\n        .expect(\"should get deploy\");\n    assert_eq!(result, Some(LegacyDeploy::from(deploy)));\n\n    // A non-existent deploy should simply return `None`.\n    assert!(storage\n        .get_legacy_deploy(DeployHash::random(&mut harness.rng))\n        .expect(\"should get deploy\")\n        .is_none())\n}\n*/\n\n#[test]\nfn persist_blocks_txns_and_execution_info_across_instantiations() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    // Create some sample data.\n    let transaction = Transaction::random(&mut harness.rng);\n    let block: Block = TestBlockBuilder::new()\n        .transactions(Some(&transaction))\n        .build_versioned(&mut harness.rng);\n\n    let block_height = block.height();\n    let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng));\n    put_transaction(&mut harness, &mut storage, &transaction);\n    put_complete_block(&mut harness, &mut storage, block.clone());\n    let mut execution_results = HashMap::new();\n    execution_results.insert(transaction.hash(), execution_result.clone());\n    put_execution_results(\n        &mut harness,\n        &mut storage,\n        *block.hash(),\n        block.height(),\n        block.era_id(),\n        execution_results,\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, block_height, false)\n            .expect(\"block not indexed properly\")\n            .block,\n        block\n    );\n\n    // After storing everything, destroy the harness and component, then rebuild using the\n    // same directory as backing.\n    let (on_disk, rng) = harness.into_parts();\n    let mut harness = ComponentHarness::builder()\n        .on_disk(on_disk)\n        .rng(rng)\n        .build();\n    let mut storage = storage_fixture(&harness);\n\n    let actual_block = get_block(&mut harness, &mut storage, *block.hash())\n        .expect(\"missing block we stored earlier\");\n    assert_eq!(actual_block, block);\n    let actual_txns =\n        get_naive_transactions(&mut harness, &mut storage, smallvec![transaction.hash()]);\n    assert_eq!(actual_txns, vec![Some(transaction.clone())]);\n\n    let (_, maybe_exec_info) =\n        get_naive_transaction_and_execution_info(&mut storage, transaction.hash())\n            .expect(\"missing deploy we stored earlier\");\n\n    let retrieved_execution_result = maybe_exec_info\n        .expect(\"should have execution info\")\n        .execution_result\n        .expect(\"should have execution result\");\n    assert_eq!(retrieved_execution_result, execution_result);\n\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, block_height, false)\n            .expect(\"block index was not restored\")\n            .block,\n        block\n    );\n}\n\n#[test]\nfn should_hard_reset() {\n    let blocks_count = 8_usize;\n    let blocks_per_era = 3;\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n    let chain_name_hash = ChainNameDigest::random(&mut harness.rng);\n\n    let random_txns: Vec<_> = iter::repeat_with(|| Transaction::random(&mut harness.rng))\n        .take(blocks_count)\n        .collect();\n\n    // Create and store 8 blocks, 0-2 in era 0, 3-5 in era 1, and 6,7 in era 2.\n    let blocks: Vec<_> = (0..blocks_count)\n        .map(|height| {\n            let is_switch = height % blocks_per_era == blocks_per_era - 1;\n            TestBlockBuilder::new()\n                .era(height as u64 / 3)\n                .height(height as u64)\n                .switch_block(is_switch)\n                .transactions(iter::once(\n                    &random_txns.get(height).expect(\"should_have_deploy\").clone(),\n                ))\n                .build_versioned(&mut harness.rng)\n        })\n        .collect();\n\n    for block in &blocks {\n        assert!(put_complete_block(\n            &mut harness,\n            &mut storage,\n            block.clone(),\n        ));\n    }\n\n    // Create and store signatures for these blocks.\n    for block in &blocks {\n        let block_signatures = random_signatures(\n            &mut harness.rng,\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            chain_name_hash,\n        );\n        assert!(put_block_signatures(\n            &mut harness,\n            &mut storage,\n            block_signatures,\n        ));\n    }\n\n    // Add execution results to deploys; deploy 0 will be executed in block 0, deploy 1 in block 1,\n    // and so on.\n    let mut transactions = vec![];\n    let mut execution_results = vec![];\n    for (index, (block_hash, block_height, era_id)) in blocks\n        .iter()\n        .map(|block| (block.hash(), block.height(), block.era_id()))\n        .enumerate()\n    {\n        let transaction = random_txns.get(index).expect(\"should have deploys\");\n        let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng));\n        put_transaction(&mut harness, &mut storage, &transaction.clone());\n        let mut exec_results = HashMap::new();\n        exec_results.insert(transaction.hash(), execution_result);\n        put_execution_results(\n            &mut harness,\n            &mut storage,\n            *block_hash,\n            block_height,\n            era_id,\n            exec_results.clone(),\n        );\n        transactions.push(transaction);\n        execution_results.push(exec_results);\n    }\n\n    // Check the highest block is #7.\n    assert_eq!(\n        Some(blocks[blocks_count - 1].clone()),\n        get_highest_complete_block(&mut harness, &mut storage)\n    );\n\n    // The closure doing the actual checks.\n    let mut check = |reset_era: usize| {\n        // Initialize a new storage with a hard reset to the given era, deleting blocks from that\n        // era onwards.\n        let mut storage = storage_fixture_with_hard_reset(&harness, EraId::from(reset_era as u64));\n\n        // Check highest block is the last from the previous era, or `None` if resetting to era 0.\n        let highest_block = get_highest_complete_block(&mut harness, &mut storage);\n        if reset_era > 0 {\n            assert_eq!(\n                blocks[blocks_per_era * reset_era - 1].clone(),\n                highest_block.unwrap()\n            );\n        } else {\n            assert!(highest_block.is_none());\n        }\n\n        // Check deleted blocks can't be retrieved.\n        for (index, block) in blocks.iter().enumerate() {\n            let result = get_block(&mut harness, &mut storage, *block.hash());\n            let should_get_block = index < blocks_per_era * reset_era;\n            assert_eq!(should_get_block, result.is_some());\n        }\n\n        // Check signatures of deleted blocks can't be retrieved.\n        for (index, block) in blocks.iter().enumerate() {\n            let result = storage.read_block_with_signatures_by_hash(*block.hash(), false);\n            let should_get_sigs = index < blocks_per_era * reset_era;\n            if should_get_sigs {\n                assert!(!result.unwrap().block_signatures().is_empty())\n            } else if let Some(signed_block) = result {\n                assert!(signed_block.block_signatures().is_empty())\n            }\n        }\n\n        // Check execution results in deleted blocks have been removed.\n        for (index, transaction) in transactions.iter().enumerate() {\n            let (_, maybe_exec_info) =\n                get_naive_transaction_and_execution_info(&mut storage, transaction.hash()).unwrap();\n            let should_have_exec_results = index < blocks_per_era * reset_era;\n            match maybe_exec_info {\n                Some(ExecutionInfo {\n                    execution_result, ..\n                }) => {\n                    assert_eq!(should_have_exec_results, execution_result.is_some());\n                }\n                None => assert!(!should_have_exec_results),\n            };\n        }\n    };\n\n    // Test with a hard reset to era 2, deleting blocks (and associated data) 6 and 7.\n    check(2);\n    // Test with a hard reset to era 1, further deleting blocks (and associated data) 3, 4 and 5.\n    check(1);\n    // Test with a hard reset to era 0, deleting all blocks and associated data.\n    check(0);\n}\n\n#[test]\nfn should_create_subdir_named_after_network() {\n    let harness = ComponentHarness::default();\n    let cfg = new_config(&harness);\n\n    let network_name = \"test\";\n    let storage = Storage::new(\n        &WithDir::new(harness.tmp.path(), cfg.clone()),\n        None,\n        ProtocolVersion::from_parts(1, 0, 0),\n        EraId::default(),\n        network_name,\n        MAX_TTL.into(),\n        RECENT_ERA_COUNT,\n        None,\n        false,\n        TransactionConfig::default(),\n    )\n    .unwrap();\n\n    let expected_path = cfg.path.join(network_name);\n\n    assert!(expected_path.exists());\n    assert_eq!(expected_path, storage.root_path());\n}\n\n#[test]\nfn should_not_try_to_move_nonexistent_files() {\n    let harness = ComponentHarness::default();\n    let cfg = new_config(&harness);\n    let file_names = [\"temp.txt\"];\n\n    let expected = should_move_storage_files_to_network_subdir(&cfg.path, &file_names).unwrap();\n\n    assert!(!expected);\n}\n\n#[test]\nfn should_move_files_if_they_exist() {\n    let harness = ComponentHarness::default();\n    let cfg = new_config(&harness);\n    let file_names = [\"temp1.txt\", \"temp2.txt\", \"temp3.txt\"];\n\n    // Storage will create this in the constructor,\n    // doing this manually since we're not calling the constructor in this test.\n    fs::create_dir(cfg.path.clone()).unwrap();\n\n    // create empty files for testing.\n    File::create(cfg.path.join(file_names[0])).unwrap();\n    File::create(cfg.path.join(file_names[1])).unwrap();\n    File::create(cfg.path.join(file_names[2])).unwrap();\n\n    let expected = should_move_storage_files_to_network_subdir(&cfg.path, &file_names).unwrap();\n\n    assert!(expected);\n}\n\n#[test]\nfn should_return_error_if_files_missing() {\n    let harness = ComponentHarness::default();\n    let cfg = new_config(&harness);\n    let file_names = [\"temp1.txt\", \"temp2.txt\", \"temp3.txt\"];\n\n    // Storage will create this in the constructor,\n    // doing this manually since we're not calling the constructor in this test.\n    fs::create_dir(cfg.path.clone()).unwrap();\n\n    // create empty files for testing, but not all of the files.\n    File::create(cfg.path.join(file_names[1])).unwrap();\n    File::create(cfg.path.join(file_names[2])).unwrap();\n\n    let actual = should_move_storage_files_to_network_subdir(&cfg.path, &file_names);\n\n    assert!(actual.is_err());\n}\n\n#[test]\nfn should_actually_move_specified_files() {\n    let harness = ComponentHarness::default();\n    let cfg = new_config(&harness);\n    let file_names = [\"temp1.txt\", \"temp2.txt\", \"temp3.txt\"];\n    let root = cfg.path;\n    let subdir = root.join(\"test\");\n    let src_path1 = root.join(file_names[0]);\n    let src_path2 = root.join(file_names[1]);\n    let src_path3 = root.join(file_names[2]);\n    let dest_path1 = subdir.join(file_names[0]);\n    let dest_path2 = subdir.join(file_names[1]);\n    let dest_path3 = subdir.join(file_names[2]);\n\n    // Storage will create this in the constructor,\n    // doing this manually since we're not calling the constructor in this test.\n    fs::create_dir_all(subdir.clone()).unwrap();\n\n    // create empty files for testing.\n    File::create(src_path1.clone()).unwrap();\n    File::create(src_path2.clone()).unwrap();\n    File::create(src_path3.clone()).unwrap();\n\n    assert!(src_path1.exists());\n    assert!(src_path2.exists());\n    assert!(src_path3.exists());\n\n    let result = move_storage_files_to_network_subdir(&root, &subdir, &file_names);\n\n    assert!(result.is_ok());\n    assert!(!src_path1.exists());\n    assert!(!src_path2.exists());\n    assert!(!src_path3.exists());\n    assert!(dest_path1.exists());\n    assert!(dest_path2.exists());\n    assert!(dest_path3.exists());\n}\n\n#[test]\nfn can_put_and_get_block() {\n    let mut harness = ComponentHarness::default();\n\n    // This test is not restricted by the block availability index.\n    let only_from_available_block_range = false;\n\n    // Create a random block, store and load it.\n    let block = TestBlockBuilder::new().build(&mut harness.rng);\n\n    let mut storage = storage_fixture(&harness);\n\n    let was_new = put_complete_block(&mut harness, &mut storage, block.clone().into());\n    assert!(was_new, \"putting block should have returned `true`\");\n\n    // Storing the same block again should work, but yield a result of `true`.\n    let was_new_second_time = put_complete_block(&mut harness, &mut storage, block.clone().into());\n    assert!(\n        was_new_second_time,\n        \"storing block the second time should have returned `true`\"\n    );\n\n    let response =\n        get_block(&mut harness, &mut storage, *block.hash()).expect(\"should get response\");\n    let response: BlockV2 = response.try_into().expect(\"should get BlockV2\");\n    assert_eq!(response, block);\n\n    // Also ensure we can retrieve just the header.\n    let response = harness.send_request(&mut storage, |responder| {\n        StorageRequest::GetBlockHeader {\n            block_hash: *block.hash(),\n            only_from_available_block_range,\n            responder,\n        }\n        .into()\n    });\n\n    assert_eq!(response.as_ref(), Some(&block.header().clone().into()));\n}\n\n#[test]\nfn should_get_trusted_ancestor_headers() {\n    let (storage, _, blocks) = create_sync_leap_test_chain(&[], false, None);\n\n    let get_results = |requested_height: usize| -> Vec<u64> {\n        let txn = storage.block_store.checkout_ro().unwrap();\n        let requested_block_header = blocks.get(requested_height).unwrap().clone_header();\n        storage\n            .get_trusted_ancestor_headers(&txn, &requested_block_header)\n            .unwrap()\n            .unwrap()\n            .iter()\n            .map(|block_header| block_header.height())\n            .collect()\n    };\n\n    assert_eq!(get_results(7), &[6, 5, 4]);\n    assert_eq!(get_results(9), &[8, 7]);\n    assert_eq!(get_results(5), &[4]);\n}\n\n#[test]\nfn should_get_block_headers_with_signatures() {\n    let (storage, _, blocks) = create_sync_leap_test_chain(&[], false, None);\n\n    let get_results = |requested_height: usize| -> Vec<u64> {\n        let txn = storage.block_store.checkout_ro().unwrap();\n        let requested_block_header = blocks.get(requested_height).unwrap().clone_header();\n        let highest_block_header_with_sufficient_signatures = storage\n            .get_highest_complete_block_header_with_signatures(&txn)\n            .unwrap()\n            .unwrap();\n        storage\n            .get_block_headers_with_signatures(\n                &txn,\n                &requested_block_header,\n                &highest_block_header_with_sufficient_signatures,\n            )\n            .unwrap()\n            .unwrap()\n            .iter()\n            .map(|block_header_with_signatures| {\n                block_header_with_signatures.block_header().height()\n            })\n            .collect()\n    };\n\n    assert!(\n        get_results(12).is_empty(),\n        \"should return empty set if asked for a most recent signed block\"\n    );\n    assert_eq!(get_results(5), &[7, 10, 12]);\n    assert_eq!(get_results(2), &[4, 7, 10, 12]);\n    assert_eq!(get_results(1), &[4, 7, 10, 12]);\n    assert_eq!(\n        get_results(10),\n        &[12],\n        \"should return only tip if asked for a most recent switch block\"\n    );\n    assert_eq!(\n        get_results(7),\n        &[10, 12],\n        \"should not include switch block that was directly requested\"\n    );\n}\n\n#[test]\nfn should_get_block_headers_with_signatures_when_no_sufficient_finality_in_most_recent_block() {\n    let (storage, _, blocks) = create_sync_leap_test_chain(&[12], false, None);\n\n    let get_results = |requested_height: usize| -> Vec<u64> {\n        let txn = storage.block_store.checkout_ro().unwrap();\n        let requested_block_header = blocks.get(requested_height).unwrap().clone_header();\n        let highest_block_header_with_sufficient_signatures = storage\n            .get_highest_complete_block_header_with_signatures(&txn)\n            .unwrap()\n            .unwrap();\n\n        storage\n            .get_block_headers_with_signatures(\n                &txn,\n                &requested_block_header,\n                &highest_block_header_with_sufficient_signatures,\n            )\n            .unwrap()\n            .unwrap()\n            .iter()\n            .map(|block_header_with_signatures| {\n                block_header_with_signatures.block_header().height()\n            })\n            .collect()\n    };\n\n    assert!(\n        get_results(11).is_empty(),\n        \"should return empty set if asked for a most recent signed block\",\n    );\n    assert_eq!(get_results(5), &[7, 10, 11]);\n    assert_eq!(get_results(2), &[4, 7, 10, 11]);\n    assert_eq!(get_results(1), &[4, 7, 10, 11]);\n    assert_eq!(\n        get_results(10),\n        &[11],\n        \"should return only tip if asked for a most recent switch block\"\n    );\n    assert_eq!(\n        get_results(7),\n        &[10, 11],\n        \"should not include switch block that was directly requested\"\n    );\n}\n\n#[test]\nfn should_get_sync_leap() {\n    let (storage, chainspec, blocks) = create_sync_leap_test_chain(&[], false, None);\n\n    let requested_block_hash = *blocks.get(6).unwrap().hash();\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash);\n    let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap();\n\n    let sync_leap = match sync_leap_result {\n        FetchResponse::Fetched(sync_leap) => sync_leap,\n        _ => panic!(\"should have leap sync\"),\n    };\n\n    assert_eq!(sync_leap.trusted_block_header.height(), 6);\n    assert_eq!(\n        block_headers_into_heights(&sync_leap.trusted_ancestor_headers),\n        vec![5, 4],\n    );\n    assert_eq!(\n        block_headers_with_signatures_into_heights(&sync_leap.block_headers_with_signatures),\n        vec![7, 10, 12]\n    );\n\n    sync_leap\n        .validate(&SyncLeapValidationMetaData::from_chainspec(&chainspec))\n        .unwrap();\n}\n\n#[test]\nfn sync_leap_block_headers_with_signatures_should_be_empty_when_asked_for_a_tip() {\n    let (storage, chainspec, blocks) = create_sync_leap_test_chain(&[], false, None);\n\n    let requested_block_hash = *blocks.get(12).unwrap().hash();\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash);\n    let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap();\n\n    let sync_leap = match sync_leap_result {\n        FetchResponse::Fetched(sync_leap) => sync_leap,\n        _ => panic!(\"should have leap sync\"),\n    };\n\n    assert_eq!(sync_leap.trusted_block_header.height(), 12);\n    assert_eq!(\n        block_headers_into_heights(&sync_leap.trusted_ancestor_headers),\n        vec![11, 10],\n    );\n    assert!(\n        block_headers_with_signatures_into_heights(&sync_leap.block_headers_with_signatures)\n            .is_empty()\n    );\n\n    sync_leap\n        .validate(&SyncLeapValidationMetaData::from_chainspec(&chainspec))\n        .unwrap();\n}\n\n#[test]\nfn sync_leap_should_populate_trusted_ancestor_headers_if_tip_is_a_switch_block() {\n    let (storage, chainspec, blocks) = create_sync_leap_test_chain(&[], true, None);\n\n    let requested_block_hash = *blocks.get(13).unwrap().hash();\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash);\n    let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap();\n\n    let sync_leap = match sync_leap_result {\n        FetchResponse::Fetched(sync_leap) => sync_leap,\n        _ => panic!(\"should have leap sync\"),\n    };\n\n    assert_eq!(sync_leap.trusted_block_header.height(), 13);\n    assert_eq!(\n        block_headers_into_heights(&sync_leap.trusted_ancestor_headers),\n        vec![12, 11, 10],\n    );\n    assert!(\n        block_headers_with_signatures_into_heights(&sync_leap.block_headers_with_signatures)\n            .is_empty()\n    );\n\n    sync_leap\n        .validate(&SyncLeapValidationMetaData::from_chainspec(&chainspec))\n        .unwrap();\n}\n\n#[test]\nfn should_respect_allowed_era_diff_in_get_sync_leap() {\n    let maybe_recent_era_count = Some(1);\n    let (storage, _, blocks) = create_sync_leap_test_chain(&[], false, maybe_recent_era_count);\n\n    let requested_block_hash = *blocks.get(6).unwrap().hash();\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash);\n    let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap();\n\n    assert!(\n        matches!(sync_leap_result, FetchResponse::NotProvided(_)),\n        \"should not have sync leap\"\n    );\n}\n\n#[test]\nfn should_restrict_returned_blocks() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    // Create the following disjoint sequences: 1-2 4-5\n    IntoIterator::into_iter([1, 2, 4, 5]).for_each(|height| {\n        let block = TestBlockBuilder::new()\n            .era(1)\n            .height(height)\n            .protocol_version(ProtocolVersion::from_parts(1, 5, 0))\n            .switch_block(false)\n            .build_versioned(&mut harness.rng);\n\n        let was_new = put_complete_block(&mut harness, &mut storage, block);\n        assert!(was_new);\n    });\n\n    // Without restriction, the node should attempt to return any requested block\n    // regardless if it is in the disjoint sequences.\n    assert!(storage.should_return_block(0, false));\n    assert!(storage.should_return_block(1, false));\n    assert!(storage.should_return_block(2, false));\n    assert!(storage.should_return_block(3, false));\n    assert!(storage.should_return_block(4, false));\n    assert!(storage.should_return_block(5, false));\n    assert!(storage.should_return_block(6, false));\n\n    // With restriction, the node should attempt to return only the blocks that are\n    // on the highest disjoint sequence, i.e blocks 4 and 5 only.\n    assert!(!storage.should_return_block(0, true));\n    assert!(!storage.should_return_block(1, true));\n    assert!(!storage.should_return_block(2, true));\n    assert!(!storage.should_return_block(3, true));\n    assert!(storage.should_return_block(4, true));\n    assert!(storage.should_return_block(5, true));\n    assert!(!storage.should_return_block(6, true));\n}\n\n#[test]\nfn should_get_block_header_by_height() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n\n    let block = TestBlockBuilder::new().build_versioned(&mut harness.rng);\n    let expected_header = block.clone_header();\n    let height = block.height();\n\n    // Requesting the block header before it is in storage should return None.\n    assert!(get_block_header_by_height(&mut harness, &mut storage, height, false).is_none());\n\n    let was_new = put_complete_block(&mut harness, &mut storage, block);\n    assert!(was_new);\n\n    // Requesting the block header after it is in storage should return the block header.\n    let maybe_block_header = get_block_header_by_height(&mut harness, &mut storage, height, false);\n    assert!(maybe_block_header.is_some());\n    assert_eq!(expected_header, maybe_block_header.unwrap());\n}\n\n#[ignore]\n#[test]\nfn check_force_resync_with_marker_file() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n    let cfg = WithDir::new(harness.tmp.path(), new_config(&harness));\n    let force_resync_file_path = storage.root_path().join(FORCE_RESYNC_FILE_NAME);\n    assert!(!force_resync_file_path.exists());\n\n    // Add a couple of blocks into storage.\n    let first_block = TestBlockBuilder::new().build_versioned(&mut harness.rng);\n    put_complete_block(&mut harness, &mut storage, first_block.clone());\n    let second_block = loop {\n        // We need to make sure that the second random block has different height than the first\n        // one.\n        let block = TestBlockBuilder::new().build_versioned(&mut harness.rng);\n        if block.height() != first_block.height() {\n            break block;\n        }\n    };\n    put_complete_block(&mut harness, &mut storage, second_block);\n    // Make sure the completed blocks are not the default anymore.\n    assert_ne!(\n        storage.get_available_block_range(),\n        AvailableBlockRange::RANGE_0_0\n    );\n    storage.persist_completed_blocks().unwrap();\n    drop(storage);\n\n    // The force resync marker file should not exist yet.\n    assert!(!force_resync_file_path.exists());\n    // Reinitialize storage with force resync enabled.\n    let mut storage = storage_fixture_with_force_resync(&cfg);\n    // The marker file should be there now.\n    assert!(force_resync_file_path.exists());\n    // Completed blocks has now been defaulted.\n    assert_eq!(\n        storage.get_available_block_range(),\n        AvailableBlockRange::RANGE_0_0\n    );\n    let first_block_height = first_block.height();\n    // Add a block into storage.\n    put_complete_block(&mut harness, &mut storage, first_block);\n    assert_eq!(\n        storage.get_available_block_range(),\n        AvailableBlockRange::new(first_block_height, first_block_height)\n    );\n    storage.persist_completed_blocks().unwrap();\n    drop(storage);\n\n    // We didn't remove the marker file, so it should still be there.\n    assert!(force_resync_file_path.exists());\n    // Reinitialize storage with force resync enabled.\n    let storage = storage_fixture_with_force_resync(&cfg);\n    assert!(force_resync_file_path.exists());\n    // The completed blocks didn't default this time as the marker file was\n    // present.\n    assert_eq!(\n        storage.get_available_block_range(),\n        AvailableBlockRange::new(first_block_height, first_block_height)\n    );\n    drop(storage);\n    // Remove the marker file.\n    fs::remove_file(&force_resync_file_path).unwrap();\n    assert!(!force_resync_file_path.exists());\n\n    // Reinitialize storage with force resync enabled.\n    let storage = storage_fixture_with_force_resync(&cfg);\n    // The marker file didn't exist, so it was created.\n    assert!(force_resync_file_path.exists());\n    // Completed blocks was defaulted again.\n    assert_eq!(\n        storage.get_available_block_range(),\n        AvailableBlockRange::RANGE_0_0\n    );\n}\n\n// Clippy complains because there's a `OnceCell` in `FinalitySignature`, hence it should not be used\n// as a key in `BTreeSet`. However, we don't change the content of the cell during the course of the\n// test so there's no risk the hash or order of keys will change.\n#[allow(clippy::mutable_key_type)]\n#[track_caller]\nfn assert_signatures(storage: &Storage, block_hash: BlockHash, expected: Vec<FinalitySignature>) {\n    let actual = storage.get_finality_signatures_for_block(block_hash);\n    let actual = actual.map_or(BTreeSet::new(), |signatures| {\n        signatures.finality_signatures().collect()\n    });\n    let expected: BTreeSet<_> = expected.into_iter().collect();\n    assert_eq!(actual, expected);\n}\n\n#[test]\nfn store_and_purge_signatures() {\n    let mut harness = ComponentHarness::default();\n    let mut storage = storage_fixture(&harness);\n    let chain_name_hash = ChainNameDigest::random(&mut harness.rng);\n\n    let block_1 = TestBlockBuilder::new().build(&mut harness.rng);\n    let fs_1_1 = FinalitySignatureV2::random_for_block(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.header().era_id(),\n        chain_name_hash,\n        &mut harness.rng,\n    );\n    let fs_1_2 = FinalitySignatureV2::random_for_block(\n        *block_1.hash(),\n        block_1.height(),\n        block_1.header().era_id(),\n        chain_name_hash,\n        &mut harness.rng,\n    );\n\n    let block_2 = TestBlockBuilder::new().build(&mut harness.rng);\n    let fs_2_1 = FinalitySignatureV2::random_for_block(\n        *block_2.hash(),\n        block_2.height(),\n        block_2.header().era_id(),\n        chain_name_hash,\n        &mut harness.rng,\n    );\n    let fs_2_2 = FinalitySignatureV2::random_for_block(\n        *block_2.hash(),\n        block_2.height(),\n        block_2.header().era_id(),\n        chain_name_hash,\n        &mut harness.rng,\n    );\n\n    let block_3 = TestBlockBuilder::new().build(&mut harness.rng);\n    let fs_3_1 = FinalitySignatureV2::random_for_block(\n        *block_3.hash(),\n        block_3.height(),\n        block_3.header().era_id(),\n        chain_name_hash,\n        &mut harness.rng,\n    );\n    let fs_3_2 = FinalitySignatureV2::random_for_block(\n        *block_3.hash(),\n        block_3.height(),\n        block_3.header().era_id(),\n        chain_name_hash,\n        &mut harness.rng,\n    );\n\n    let block_4 = TestBlockBuilder::new().build(&mut harness.rng);\n\n    let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_1_1.clone().into()));\n    let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_1_2.clone().into()));\n    let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_2_1.clone().into()));\n    let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_2_2.clone().into()));\n    let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_3_1.clone().into()));\n    let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_3_2.clone().into()));\n\n    assert_signatures(\n        &storage,\n        *block_1.hash(),\n        vec![fs_1_1.into(), fs_1_2.into()],\n    );\n    assert_signatures(\n        &storage,\n        *block_2.hash(),\n        vec![fs_2_1.clone().into(), fs_2_2.clone().into()],\n    );\n    assert_signatures(\n        &storage,\n        *block_3.hash(),\n        vec![fs_3_1.clone().into(), fs_3_2.clone().into()],\n    );\n    assert_signatures(&storage, *block_4.hash(), vec![]);\n\n    // Purging for block_1 should leave sigs for block_2 and block_3 intact.\n    let mut writer = storage.block_store.checkout_rw().unwrap();\n    let _ = DataWriter::<BlockHash, BlockSignatures>::delete(&mut writer, *block_1.hash());\n    writer.commit().unwrap();\n    assert_signatures(&storage, *block_1.hash(), vec![]);\n    assert_signatures(\n        &storage,\n        *block_2.hash(),\n        vec![fs_2_1.clone().into(), fs_2_2.clone().into()],\n    );\n    assert_signatures(\n        &storage,\n        *block_3.hash(),\n        vec![fs_3_1.clone().into(), fs_3_2.clone().into()],\n    );\n    assert_signatures(&storage, *block_4.hash(), vec![]);\n\n    // Purging for block_4 (which has no signatures) should not modify state.\n    let mut writer = storage.block_store.checkout_rw().unwrap();\n    let _ = DataWriter::<BlockHash, BlockSignatures>::delete(&mut writer, *block_4.hash());\n    writer.commit().unwrap();\n    assert_signatures(&storage, *block_1.hash(), vec![]);\n    assert_signatures(\n        &storage,\n        *block_2.hash(),\n        vec![fs_2_1.into(), fs_2_2.into()],\n    );\n    assert_signatures(\n        &storage,\n        *block_3.hash(),\n        vec![fs_3_1.into(), fs_3_2.into()],\n    );\n    assert_signatures(&storage, *block_4.hash(), vec![]);\n\n    // Purging for all blocks should leave no signatures.\n    let mut writer = storage.block_store.checkout_rw().unwrap();\n    let _ = DataWriter::<BlockHash, BlockSignatures>::delete(&mut writer, *block_1.hash());\n    let _ = DataWriter::<BlockHash, BlockSignatures>::delete(&mut writer, *block_2.hash());\n    let _ = DataWriter::<BlockHash, BlockSignatures>::delete(&mut writer, *block_3.hash());\n    let _ = DataWriter::<BlockHash, BlockSignatures>::delete(&mut writer, *block_4.hash());\n    writer.commit().unwrap();\n\n    assert_signatures(&storage, *block_1.hash(), vec![]);\n    assert_signatures(&storage, *block_2.hash(), vec![]);\n    assert_signatures(&storage, *block_3.hash(), vec![]);\n    assert_signatures(&storage, *block_4.hash(), vec![]);\n}\n\nfn copy_dir_recursive(src: impl AsRef<Path>, dest: impl AsRef<Path>) -> io::Result<()> {\n    fs::create_dir_all(&dest)?;\n    for entry in fs::read_dir(src)? {\n        let entry = entry?;\n        if entry.file_type()?.is_dir() {\n            copy_dir_recursive(entry.path(), dest.as_ref().join(entry.file_name()))?;\n        } else {\n            fs::copy(entry.path(), dest.as_ref().join(entry.file_name()))?;\n        }\n    }\n    Ok(())\n}\n\n#[test]\nfn can_retrieve_block_by_height_with_different_block_versions() {\n    let mut harness = ComponentHarness::default();\n\n    // BlockV1 as a versioned Block\n    let block_14 = TestBlockV1Builder::new()\n        .era(1)\n        .height(14)\n        .switch_block(false)\n        .build(&mut harness.rng);\n\n    // BlockV2 as a versioned Block\n    let block_v2_33 = TestBlockBuilder::new()\n        .era(1)\n        .height(33)\n        .switch_block(true)\n        .build_versioned(&mut harness.rng);\n    let block_33: Block = block_v2_33.clone();\n\n    // BlockV2\n    let block_v2_99 = TestBlockBuilder::new()\n        .era(2)\n        .height(99)\n        .switch_block(true)\n        .build_versioned(&mut harness.rng);\n    let block_99: Block = block_v2_99.clone();\n\n    let mut storage = storage_fixture(&harness);\n\n    assert!(get_block(&mut harness, &mut storage, *block_14.hash()).is_none());\n    assert!(get_block(&mut harness, &mut storage, *block_v2_33.hash()).is_none());\n    assert!(get_block(&mut harness, &mut storage, *block_v2_99.hash()).is_none());\n    assert!(!is_block_stored(\n        &mut harness,\n        &mut storage,\n        *block_14.hash(),\n    ));\n    assert!(!is_block_stored(\n        &mut harness,\n        &mut storage,\n        *block_v2_33.hash(),\n    ));\n    assert!(!is_block_stored(\n        &mut harness,\n        &mut storage,\n        *block_v2_99.hash(),\n    ));\n\n    let was_new = put_block(&mut harness, &mut storage, Arc::new(block_33.clone()));\n    assert!(was_new);\n    assert!(mark_block_complete(\n        &mut harness,\n        &mut storage,\n        block_v2_33.height(),\n    ));\n\n    // block is of the current version so it should be returned\n    let block =\n        get_block(&mut harness, &mut storage, *block_v2_33.hash()).expect(\"should have block\");\n    assert!(matches!(block, Block::V2(_)));\n\n    // block is stored since it was returned before\n    assert!(is_block_stored(\n        &mut harness,\n        &mut storage,\n        *block_v2_33.hash(),\n    ));\n\n    assert_eq!(\n        get_highest_complete_block(&mut harness, &mut storage).as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_highest_complete_block_header(&mut harness, &mut storage).as_ref(),\n        Some(&block_v2_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 14, false).is_none());\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false)\n            .unwrap()\n            .block,\n        block_33\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, true).as_ref(),\n        Some(&block_v2_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none());\n\n    let was_new = put_block(\n        &mut harness,\n        &mut storage,\n        Arc::new(Block::from(block_14.clone())),\n    );\n    assert!(was_new);\n\n    // block is not of the current version so don't return it\n    let block = get_block(&mut harness, &mut storage, *block_14.hash()).expect(\"should have block\");\n    assert!(matches!(block, Block::V1(_)));\n\n    // block should be stored as versioned and should be returned\n    assert!(get_block(&mut harness, &mut storage, *block_14.hash()).is_some());\n\n    // block is stored since it was returned before\n    assert!(is_block_stored(\n        &mut harness,\n        &mut storage,\n        *block_14.hash(),\n    ));\n\n    assert_eq!(\n        get_highest_complete_block(&mut harness, &mut storage).as_ref(),\n        Some(&block_33)\n    );\n    assert_eq!(\n        get_highest_complete_block_header(&mut harness, &mut storage).as_ref(),\n        Some(&block_v2_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false)\n            .unwrap()\n            .block,\n        Block::from(block_14.clone())\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, true).as_ref(),\n        None\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(),\n        Some(&block_14.header().clone().into())\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false)\n            .unwrap()\n            .block,\n        block_33\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(),\n        Some(&block_v2_33.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none());\n\n    // Inserting block with height 99, changes highest.\n    let was_new = put_complete_block(&mut harness, &mut storage, block_v2_99.clone());\n    // Mark block 99 as complete.\n    storage.completed_blocks.insert(99);\n    assert!(was_new);\n\n    assert_eq!(\n        get_highest_complete_block(&mut harness, &mut storage).as_ref(),\n        Some(&(block_v2_99))\n    );\n    assert_eq!(\n        get_highest_complete_block_header(&mut harness, &mut storage).as_ref(),\n        Some(&block_v2_99.clone_header())\n    );\n    assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none());\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false)\n            .unwrap()\n            .block,\n        Block::from(block_14.clone())\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(),\n        Some(&block_14.header().clone().into())\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false)\n            .unwrap()\n            .block,\n        block_33\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(),\n        Some(&block_v2_33.clone_header())\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false)\n            .unwrap()\n            .block,\n        block_99\n    );\n    assert_eq!(\n        get_block_header_by_height(&mut harness, &mut storage, 99, false).as_ref(),\n        Some(&block_v2_99.clone_header())\n    );\n}\n\nstatic TEST_STORAGE_DIR_1_5_2: Lazy<PathBuf> = Lazy::new(|| {\n    PathBuf::from(env!(\"CARGO_MANIFEST_DIR\")).join(\"../resources/test/storage/1.5.2/storage-1\")\n});\nstatic STORAGE_INFO_FILE_NAME: &str = \"storage_info.json\";\n\n#[derive(Serialize, Deserialize, Debug)]\nstruct Node1_5_2BlockInfo {\n    height: u64,\n    era: EraId,\n    approvals_hashes: Option<Vec<ApprovalsHash>>,\n    signatures: Option<BlockSignatures>,\n    deploy_hashes: Vec<DeployHash>,\n}\n\n// Summary information about the context of a database\n#[derive(Serialize, Deserialize, Debug)]\nstruct Node1_5_2StorageInfo {\n    net_name: String,\n    protocol_version: ProtocolVersion,\n    block_range: (u64, u64),\n    blocks: HashMap<BlockHash, Node1_5_2BlockInfo>,\n    deploys: Vec<DeployHash>,\n}\n\nimpl Node1_5_2StorageInfo {\n    fn from_file(path: impl AsRef<Path>) -> Result<Self, io::Error> {\n        Ok(serde_json::from_slice(fs::read(path)?.as_slice()).expect(\"Malformed JSON\"))\n    }\n}\n\n// Use the storage component APIs to determine if a block is or is not in storage.\nfn assert_block_exists_in_storage(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    block_hash: &BlockHash,\n    block_height: u64,\n    only_from_available_block_range: bool,\n    expect_exists_as_latest_version: bool,\n    expect_exists_as_versioned: bool,\n) {\n    let expect_exists = expect_exists_as_latest_version || expect_exists_as_versioned;\n\n    // Check if the block is stored at all\n    assert_eq!(\n        is_block_stored(harness, storage, *block_hash),\n        expect_exists\n    );\n\n    // GetBlock should return only blocks from storage that are of the current version.\n    assert_eq!(\n        get_block(harness, storage, *block_hash).is_some_and(|block| matches!(block, Block::V2(_))),\n        expect_exists_as_latest_version\n    );\n\n    // Check if we can get the block as a versioned Block.\n    let block = get_block(harness, storage, *block_hash);\n    assert_eq!(block.is_some_and(|_| true), expect_exists_as_versioned);\n\n    // Check if the header can be fetched from storage.\n    assert_eq!(\n        get_block_header(\n            harness,\n            storage,\n            *block_hash,\n            only_from_available_block_range,\n        )\n        .is_some_and(|_| true),\n        expect_exists\n    );\n    assert_eq!(\n        get_block_header(harness, storage, *block_hash, false).is_some_and(|_| true),\n        expect_exists\n    );\n    assert_eq!(\n        storage\n            .read_block_header_by_hash(block_hash)\n            .unwrap()\n            .is_some_and(|_| true),\n        expect_exists\n    );\n\n    assert_eq!(\n        get_block_header_by_height(\n            harness,\n            storage,\n            block_height,\n            only_from_available_block_range,\n        )\n        .is_some_and(|_| true),\n        expect_exists\n    );\n    assert_eq!(\n        storage\n            .read_block_header_by_height(block_height, only_from_available_block_range)\n            .unwrap()\n            .is_some_and(|_| true),\n        expect_exists\n    );\n    assert_eq!(\n        storage\n            .read_block_header_by_height(block_height, false)\n            .unwrap()\n            .is_some_and(|_| true),\n        expect_exists\n    );\n\n    if expect_exists {\n        assert_eq!(\n            storage\n                .read_block_with_signatures_by_height(block_height, false)\n                .unwrap()\n                .block()\n                .hash(),\n            block_hash\n        );\n        assert_eq!(\n            storage\n                .read_block_with_signatures_by_hash(*block_hash, only_from_available_block_range)\n                .unwrap()\n                .block()\n                .height(),\n            block_height\n        );\n        assert_eq!(\n            storage\n                .read_block_with_signatures_by_height(block_height, false)\n                .unwrap()\n                .block()\n                .hash(),\n            block_hash\n        );\n        assert_eq!(\n            storage\n                .read_block_with_signatures_by_height(block_height, only_from_available_block_range)\n                .unwrap()\n                .block()\n                .hash(),\n            block_hash\n        );\n    }\n}\n\n// Use the storage component APIs to determine if the highest stored block is the one expected.\nfn assert_highest_block_in_storage(\n    harness: &mut ComponentHarness<UnitTestEvent>,\n    storage: &mut Storage,\n    only_from_available_block_range: bool,\n    expected_block_hash: &BlockHash,\n    expected_block_height: u64,\n) {\n    assert_eq!(\n        get_highest_complete_block_header(harness, storage)\n            .unwrap()\n            .height(),\n        expected_block_height\n    );\n    let highest_block_header = storage.read_highest_block_header().unwrap();\n    assert_eq!(highest_block_header.block_hash(), *expected_block_hash);\n    assert_eq!(highest_block_header.height(), expected_block_height);\n    assert_eq!(\n        get_highest_complete_block(harness, storage).unwrap().hash(),\n        expected_block_hash\n    );\n    assert_eq!(\n        get_block_and_metadata_by_height(harness, storage, expected_block_height, false)\n            .unwrap()\n            .block\n            .hash(),\n        expected_block_hash\n    );\n\n    if only_from_available_block_range {\n        assert_eq!(\n            storage\n                .read_highest_block_with_signatures(true)\n                .unwrap()\n                .block()\n                .hash(),\n            expected_block_hash\n        );\n\n        assert_eq!(\n            get_highest_complete_block(harness, storage).unwrap().hash(),\n            expected_block_hash\n        );\n    }\n    assert_eq!(\n        storage\n            .read_highest_block_with_signatures(false)\n            .unwrap()\n            .block()\n            .hash(),\n        expected_block_hash\n    );\n}\n\n#[test]\n// Starting with node 2.0, the `Block` struct is versioned.\n// Since this change impacts the storage APIs, create a test to prove that we can still access old\n// unversioned blocks through the new APIs and also check that both versioned and unversioned blocks\n// can co-exist in storage.\n#[ignore = \"stop ignoring once decision around Transfer type is made\"]\nfn check_block_operations_with_node_1_5_2_storage() {\n    let rng: TestRng = TestRng::new();\n\n    let temp_dir = tempdir().unwrap();\n    copy_dir_recursive(TEST_STORAGE_DIR_1_5_2.as_path(), temp_dir.path()).unwrap();\n    let storage_info =\n        Node1_5_2StorageInfo::from_file(temp_dir.path().join(STORAGE_INFO_FILE_NAME)).unwrap();\n    let mut harness = ComponentHarness::builder()\n        .on_disk(temp_dir)\n        .rng(rng)\n        .build();\n    let mut storage = storage_fixture_from_parts(\n        &harness,\n        None,\n        Some(ProtocolVersion::from_parts(2, 0, 0)),\n        Some(storage_info.net_name.as_str()),\n        None,\n        None,\n    );\n    let chain_name_hash = ChainNameDigest::random(&mut harness.rng);\n\n    // Check that legacy blocks appear in the available range\n    let available_range = get_available_block_range(&mut harness, &mut storage);\n    assert_eq!(available_range.low(), storage_info.block_range.0);\n    assert_eq!(available_range.high(), storage_info.block_range.1);\n\n    // Check that all legacy blocks can be read as Versioned blocks with version set to V1\n    for (hash, block_info) in storage_info.blocks.iter() {\n        // Since all blocks in this db are V1, the blocks should exist as versioned blocks only.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            hash,\n            block_info.height,\n            true,\n            false,\n            true,\n        );\n\n        // Check version\n        let block = get_block(&mut harness, &mut storage, *hash).unwrap();\n        assert!(matches!(block, Block::V1(_)));\n\n        assert_eq!(block.height(), block_info.height);\n\n        let approvals_hashes = get_approvals_hashes(&mut harness, &mut storage, *hash);\n        if let Some(expected_approvals_hashes) = &block_info.approvals_hashes {\n            let stored_approvals_hashes = approvals_hashes.unwrap().approvals_hashes().to_vec();\n            assert_eq!(stored_approvals_hashes, expected_approvals_hashes.clone());\n        }\n\n        let transfers = get_block_transfers(&mut harness, &mut storage, *hash);\n        if !block_info.deploy_hashes.is_empty() {\n            let mut stored_transfers: Vec<DeployHash> = transfers\n                .unwrap()\n                .iter()\n                .map(|transfer| match transfer {\n                    Transfer::V1(transfer_v1) => transfer_v1.deploy_hash,\n                    _ => panic!(\"expected transfer v1 variant\"),\n                })\n                .collect();\n            stored_transfers.sort();\n            let mut expected_deploys = block_info.deploy_hashes.clone();\n            expected_deploys.sort();\n            assert_eq!(stored_transfers, expected_deploys);\n        }\n\n        if let Some(expected_signatures) = &block_info.signatures {\n            for expected_signature in expected_signatures.finality_signatures() {\n                let stored_signature = get_block_signature(\n                    &mut harness,\n                    &mut storage,\n                    *hash,\n                    Box::new(expected_signature.public_key().clone()),\n                )\n                .unwrap();\n                assert_eq!(stored_signature, expected_signature);\n            }\n        }\n    }\n\n    let highest_expected_block_hash = storage_info\n        .blocks\n        .iter()\n        .find_map(|(hash, info)| (info.height == storage_info.block_range.1).then_some(*hash))\n        .unwrap();\n\n    assert_highest_block_in_storage(\n        &mut harness,\n        &mut storage,\n        true,\n        &highest_expected_block_hash,\n        storage_info.block_range.1,\n    );\n\n    assert!(storage.read_highest_block().is_some());\n    assert!(storage.get_highest_complete_block().unwrap().is_some());\n    assert!(get_highest_complete_block(&mut harness, &mut storage).is_some());\n    assert!(storage.read_highest_block().is_some());\n    assert!(get_highest_complete_block_header(&mut harness, &mut storage).is_some());\n    assert!(storage.read_highest_block_header().is_some());\n    assert_eq!(\n        storage.read_highest_block().unwrap().height(),\n        storage_info.block_range.1\n    );\n\n    let mut lowest_stored_block_height = storage_info.block_range.0;\n    for height in 0..storage_info.block_range.0 {\n        if get_block_header_by_height(&mut harness, &mut storage, height, false).is_some() {\n            lowest_stored_block_height = height;\n            break;\n        }\n    }\n\n    // Now add some blocks and test if they can be retrieved correctly\n    if let Some(new_lowest_height) = lowest_stored_block_height.checked_sub(1) {\n        // Add a BlockV1 that precedes the lowest available block\n        let new_lowest_block: Arc<Block> = Arc::new(\n            TestBlockV1Builder::new()\n                .era(1)\n                .height(new_lowest_height)\n                .switch_block(false)\n                .build_versioned(&mut harness.rng),\n        );\n\n        // First check that the block doesn't exist.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            new_lowest_block.hash(),\n            new_lowest_height,\n            false,\n            false,\n            false,\n        );\n\n        // Put the block to storage.\n        let was_new = put_block(&mut harness, &mut storage, new_lowest_block.clone());\n        assert!(was_new);\n\n        let block_signatures = random_signatures(\n            &mut harness.rng,\n            *new_lowest_block.hash(),\n            new_lowest_block.height(),\n            new_lowest_block.era_id(),\n            chain_name_hash,\n        );\n        assert!(put_block_signatures(\n            &mut harness,\n            &mut storage,\n            block_signatures,\n        ));\n\n        // Check that the block was stored and can be fetched as a versioned Block.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            new_lowest_block.hash(),\n            new_lowest_height,\n            false,\n            false,\n            true,\n        );\n    }\n\n    {\n        let new_highest_block_height = storage.read_highest_block().unwrap().height() + 1;\n\n        // Add a BlockV2 as a versioned block\n        let new_highest_block: Arc<Block> = Arc::new(\n            TestBlockBuilder::new()\n                .era(50)\n                .height(new_highest_block_height)\n                .switch_block(true)\n                .build_versioned(&mut harness.rng),\n        );\n\n        // First check that the block doesn't exist.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            new_highest_block.hash(),\n            new_highest_block_height,\n            false,\n            false,\n            false,\n        );\n\n        let was_new = put_block(&mut harness, &mut storage, new_highest_block.clone());\n        assert!(was_new);\n\n        let block_signatures = random_signatures(\n            &mut harness.rng,\n            *new_highest_block.hash(),\n            new_highest_block.height(),\n            new_highest_block.era_id(),\n            chain_name_hash,\n        );\n        assert!(put_block_signatures(\n            &mut harness,\n            &mut storage,\n            block_signatures,\n        ));\n\n        // Check that the block was stored and can be fetched as a versioned Block or\n        // as a block at the latest version.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            new_highest_block.hash(),\n            new_highest_block_height,\n            false,\n            true,\n            true,\n        );\n\n        assert_eq!(\n            storage.read_highest_block().unwrap().height(),\n            new_highest_block_height\n        );\n    }\n\n    {\n        let new_highest_block_height = storage.read_highest_block().unwrap().height() + 1;\n\n        // Add a BlockV2 as a unversioned block\n        let new_highest_block = TestBlockBuilder::new()\n            .era(51)\n            .height(new_highest_block_height)\n            .switch_block(false)\n            .build(&mut harness.rng);\n\n        // First check that the block doesn't exist.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            new_highest_block.hash(),\n            new_highest_block_height,\n            false,\n            false,\n            false,\n        );\n\n        // Insert the block and mark it complete.\n        let was_new =\n            put_complete_block(&mut harness, &mut storage, new_highest_block.clone().into());\n        assert!(was_new);\n        let block_signatures = random_signatures(\n            &mut harness.rng,\n            *new_highest_block.hash(),\n            new_highest_block.height(),\n            new_highest_block.era_id(),\n            chain_name_hash,\n        );\n        assert!(put_block_signatures(\n            &mut harness,\n            &mut storage,\n            block_signatures,\n        ));\n\n        // Check that the block was stored and can be fetched as a versioned Block or\n        // as a block at the latest version.\n        assert_block_exists_in_storage(\n            &mut harness,\n            &mut storage,\n            new_highest_block.hash(),\n            new_highest_block_height,\n            true,\n            true,\n            true,\n        );\n\n        assert_eq!(\n            storage.read_highest_block().unwrap().height(),\n            new_highest_block_height\n        );\n\n        let available_range = get_available_block_range(&mut harness, &mut storage);\n        assert_eq!(available_range.high(), new_highest_block_height);\n\n        assert_highest_block_in_storage(\n            &mut harness,\n            &mut storage,\n            true,\n            new_highest_block.hash(),\n            new_highest_block_height,\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/components/storage/utils.rs",
    "content": "use casper_binary_port::RecordId;\nuse casper_storage::{DbTableId, UnknownDbTableId};\nuse std::convert::TryFrom;\n\npub(crate) fn db_table_id_from_record_id(\n    record_id: RecordId,\n) -> Result<DbTableId, UnknownDbTableId> {\n    DbTableId::try_from(record_id as u16)\n}\n"
  },
  {
    "path": "node/src/components/storage.rs",
    "content": "//! Central storage component.\n//!\n//! The central storage component is in charge of persisting data to disk. Its core functionalities\n//! are\n//!\n//! * storing and loading blocks,\n//! * storing and loading deploys,\n//! * [temporary until refactored] holding `DeployExecutionInfo` for each deploy,\n//! * keeping an index of blocks by height and\n//! * [unimplemented] managing disk usage by pruning blocks and deploys from storage.\n//!\n//! Any I/O performed by the component is done on the event handling thread, this is on purpose as\n//! the assumption is that caching by LMDB will offset any gains from offloading it onto a separate\n//! thread, while keeping the maximum event processing time reasonable.\n//!\n//! ## Consistency\n//!\n//! The storage upholds a few invariants internally, namely:\n//!\n//! * [temporary until refactored] Storing an execution result for a deploy in the context of a\n//!   block is guaranteed to be idempotent: Storing the same result twice is a no-op, whilst\n//!   attempting to store a differing one will cause a fatal error.\n//! * Only one block can ever be associated with a specific block height. Attempting to store a\n//!   block with a different block already existing at the same height causes a fatal error.\n//! * Storing a deploy or block that already exists (same hash) is fine and will silently be\n//!   accepted.\n//!\n//! ## Errors\n//!\n//! The storage component itself is panic free and in general reports three classes of errors:\n//! Corruption, temporary resource exhaustion and potential bugs.\n\nmod config;\npub(crate) mod disjoint_sequences;\nmod error;\nmod event;\nmod metrics;\nmod object_pool;\n#[cfg(test)]\nmod tests;\nmod utils;\n\nuse casper_storage::block_store::{\n    lmdb::{IndexedLmdbBlockStore, LmdbBlockStore},\n    types::{\n        ApprovalsHashes, BlockExecutionResults, BlockHashHeightAndEra, BlockHeight, BlockTransfers,\n        LatestSwitchBlock, StateStore, StateStoreKey, Tip, TransactionFinalizedApprovals,\n    },\n    BlockStoreError, BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter,\n};\n\nuse std::{\n    borrow::Cow,\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet},\n    convert::TryInto,\n    fmt::{self, Display, Formatter},\n    fs::{self, OpenOptions},\n    io::ErrorKind,\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse casper_storage::DbRawBytesSpec;\n#[cfg(test)]\nuse casper_types::BlockWithSignatures;\nuse casper_types::{\n    bytesrepr::{FromBytes, ToBytes},\n    execution::{execution_result_v1, ExecutionResult, ExecutionResultV1},\n    Approval, ApprovalsHash, AvailableBlockRange, Block, BlockBody, BlockHash, BlockHeader,\n    BlockHeaderWithSignatures, BlockSignatures, BlockSignaturesV1, BlockSignaturesV2, BlockV2,\n    ChainNameDigest, DeployHash, EraId, ExecutionInfo, FinalitySignature, ProtocolVersion,\n    Timestamp, Transaction, TransactionConfig, TransactionHash, TransactionId, Transfer, U512,\n};\nuse datasize::DataSize;\nuse num_rational::Ratio;\nuse prometheus::Registry;\nuse smallvec::SmallVec;\nuse tracing::{debug, error, info, warn};\n\nuse crate::{\n    components::{\n        fetcher::{FetchItem, FetchResponse},\n        Component,\n    },\n    effect::{\n        announcements::FatalAnnouncement,\n        incoming::{NetRequest, NetRequestIncoming},\n        requests::{MarkBlockCompletedRequest, NetworkRequest, StorageRequest},\n        EffectBuilder, EffectExt, Effects,\n    },\n    fatal,\n    protocol::Message,\n    types::{\n        BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, BlockWithMetadata,\n        ExecutableBlock, LegacyDeploy, MaxTtl, NodeId, NodeRng, SyncLeap, SyncLeapIdentifier,\n        TransactionHeader, VariantMismatch,\n    },\n    utils::{display_error, WithDir},\n};\n\npub use config::Config;\nuse disjoint_sequences::{DisjointSequences, Sequence};\npub use error::FatalStorageError;\nuse error::GetRequestError;\npub(crate) use event::Event;\nuse metrics::Metrics;\nuse object_pool::ObjectPool;\n\nconst COMPONENT_NAME: &str = \"storage\";\n\n/// Key under which completed blocks are to be stored.\nconst COMPLETED_BLOCKS_STORAGE_KEY: &[u8] = b\"completed_blocks_disjoint_sequences\";\n/// Name of the file created when initializing a force resync.\nconst FORCE_RESYNC_FILE_NAME: &str = \"force_resync\";\n\nconst STORAGE_FILES: [&str; 5] = [\n    \"data.lmdb\",\n    \"data.lmdb-lock\",\n    \"storage.lmdb\",\n    \"storage.lmdb-lock\",\n    \"sse_index\",\n];\n\n/// The storage component.\n#[derive(DataSize, Debug)]\npub struct Storage {\n    /// Storage location.\n    root: PathBuf,\n    /// Block store\n    pub(crate) block_store: IndexedLmdbBlockStore,\n    /// Runs of completed blocks known in storage.\n    completed_blocks: DisjointSequences,\n    /// The activation point era of the current protocol version.\n    activation_era: EraId,\n    /// The height of the final switch block of the previous protocol version.\n    key_block_height_for_activation_point: Option<u64>,\n    /// Whether or not memory deduplication is enabled.\n    enable_mem_deduplication: bool,\n    /// An in-memory pool of already loaded serialized items.\n    ///\n    /// Keyed by serialized item ID, contains the serialized item.\n    serialized_item_pool: ObjectPool<Box<[u8]>>,\n    /// The number of eras relative to the highest block's era which are considered as recent for\n    /// the purpose of deciding how to respond to a `NetRequest::SyncLeap`.\n    recent_era_count: u64,\n    #[data_size(skip)]\n    metrics: Option<Metrics>,\n    /// The maximum TTL of a deploy.\n    max_ttl: MaxTtl,\n    /// The hash of the chain name.\n    chain_name_hash: ChainNameDigest,\n    /// The transaction config as specified by the chainspec.\n    transaction_config: TransactionConfig,\n    /// The utilization of blocks.\n    utilization_tracker: BTreeMap<EraId, BTreeMap<u64, u64>>,\n}\n\npub(crate) enum HighestOrphanedBlockResult {\n    MissingHighestSequence,\n    Orphan(BlockHeader),\n    MissingHeader(u64),\n}\n\nimpl Display for HighestOrphanedBlockResult {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            HighestOrphanedBlockResult::MissingHighestSequence => {\n                write!(f, \"missing highest sequence\")\n            }\n            HighestOrphanedBlockResult::Orphan(block_header) => write!(\n                f,\n                \"orphan, height={}, hash={}\",\n                block_header.height(),\n                block_header.block_hash()\n            ),\n            HighestOrphanedBlockResult::MissingHeader(height) => {\n                write!(f, \"missing header for block at height: {}\", height)\n            }\n        }\n    }\n}\n\nimpl<REv> Component<REv> for Storage\nwhere\n    REv: From<FatalAnnouncement> + From<NetworkRequest<Message>> + Send,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        let result = match event {\n            Event::StorageRequest(req) => self.handle_storage_request(*req),\n            Event::NetRequestIncoming(ref incoming) => {\n                match self.handle_net_request_incoming::<REv>(effect_builder, incoming) {\n                    Ok(effects) => Ok(effects),\n                    Err(GetRequestError::Fatal(fatal_error)) => Err(fatal_error),\n                    Err(ref other_err) => {\n                        warn!(\n                            sender=%incoming.sender,\n                            err=display_error(other_err),\n                            \"error handling net request\"\n                        );\n                        // We could still send the requester a \"not found\" message, and could do\n                        // so even in the fatal case, but it is safer to not do so at the\n                        // moment, giving less surface area for possible amplification attacks.\n                        Ok(Effects::new())\n                    }\n                }\n            }\n            Event::MarkBlockCompletedRequest(req) => self.handle_mark_block_completed_request(req),\n            Event::MakeBlockExecutableRequest(req) => {\n                let ret = self.make_executable_block(&req.block_hash);\n                match ret {\n                    Ok(maybe) => Ok(req.responder.respond(maybe).ignore()),\n                    Err(err) => Err(err),\n                }\n            }\n        };\n\n        // Any error is turned into a fatal effect, the component itself does not panic. Note that\n        // we are dropping a lot of responders this way, but since we are crashing with fatal\n        // anyway, it should not matter.\n        match result {\n            Ok(effects) => effects,\n            Err(err) => fatal!(effect_builder, \"storage error: {}\", err).ignore(),\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl Storage {\n    /// Creates a new storage component.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        cfg: &WithDir<Config>,\n        hard_reset_to_start_of_era: Option<EraId>,\n        protocol_version: ProtocolVersion,\n        activation_era: EraId,\n        network_name: &str,\n        max_ttl: MaxTtl,\n        recent_era_count: u64,\n        registry: Option<&Registry>,\n        force_resync: bool,\n        transaction_config: TransactionConfig,\n    ) -> Result<Self, FatalStorageError> {\n        let config = cfg.value();\n\n        // Create the database directory.\n        let mut root = cfg.with_dir(config.path.clone());\n        let network_subdir = root.join(network_name);\n\n        if !network_subdir.exists() {\n            fs::create_dir_all(&network_subdir).map_err(|err| {\n                FatalStorageError::CreateDatabaseDirectory(network_subdir.clone(), err)\n            })?;\n        }\n\n        if should_move_storage_files_to_network_subdir(&root, &STORAGE_FILES)? {\n            move_storage_files_to_network_subdir(&root, &network_subdir, &STORAGE_FILES)?;\n        }\n\n        root = network_subdir;\n\n        // Calculate the upper bound for the memory map that is potentially used.\n        let total_size = config\n            .max_block_store_size\n            .saturating_add(config.max_deploy_store_size)\n            .saturating_add(config.max_deploy_metadata_store_size);\n\n        let block_store = LmdbBlockStore::new(root.as_path(), total_size)?;\n        let indexed_block_store =\n            IndexedLmdbBlockStore::new(block_store, hard_reset_to_start_of_era, protocol_version)?;\n\n        let metrics = registry.map(Metrics::new).transpose()?;\n\n        let mut component = Self {\n            root,\n            block_store: indexed_block_store,\n            completed_blocks: Default::default(),\n            activation_era,\n            key_block_height_for_activation_point: None,\n            enable_mem_deduplication: config.enable_mem_deduplication,\n            serialized_item_pool: ObjectPool::new(config.mem_pool_prune_interval),\n            recent_era_count,\n            max_ttl,\n            utilization_tracker: BTreeMap::new(),\n            metrics,\n            chain_name_hash: ChainNameDigest::from_chain_name(network_name),\n            transaction_config,\n        };\n\n        if force_resync {\n            let force_resync_file_path = component.root_path().join(FORCE_RESYNC_FILE_NAME);\n            // Check if resync is already in progress. Force resync will kick\n            // in only when the marker file didn't exist before.\n            // Use `OpenOptions::create_new` to atomically check for the file\n            // presence and create it if necessary.\n            match OpenOptions::new()\n                .create_new(true)\n                .write(true)\n                .open(&force_resync_file_path)\n            {\n                Ok(_file) => {\n                    // When the force resync marker file was not present and\n                    // is now created, initialize force resync.\n                    info!(\"initializing force resync\");\n                    // Default `storage.completed_blocks`.\n                    component.completed_blocks = Default::default();\n                    component.persist_completed_blocks()?;\n                    // Exit the initialization function early.\n                    return Ok(component);\n                }\n                Err(io_err) if io_err.kind() == ErrorKind::AlreadyExists => {\n                    info!(\"skipping force resync as marker file exists\");\n                }\n                Err(io_err) => {\n                    warn!(\n                        \"couldn't operate on the force resync marker file at path {}: {}\",\n                        force_resync_file_path.to_string_lossy(),\n                        io_err\n                    );\n                }\n            }\n        }\n\n        {\n            let ro_txn = component.block_store.checkout_ro()?;\n            let maybe_state_store: Option<Vec<u8>> = ro_txn.read(StateStoreKey::new(\n                Cow::Borrowed(COMPLETED_BLOCKS_STORAGE_KEY),\n            ))?;\n            match maybe_state_store {\n                Some(raw) => {\n                    let (mut sequences, _) = DisjointSequences::from_vec(raw)\n                        .map_err(FatalStorageError::UnexpectedDeserializationFailure)?;\n\n                    // Truncate the sequences in case we removed blocks via a hard reset.\n                    if let Some(header) = DataReader::<Tip, BlockHeader>::read(&ro_txn, Tip)? {\n                        sequences.truncate(header.height());\n                    }\n\n                    component.completed_blocks = sequences;\n                }\n                None => {\n                    // No state so far. We can make the following observations:\n                    //\n                    // 1. Any block already in storage from versions prior to 1.5 (no fast-sync)\n                    // MUST    have the corresponding global state in contract\n                    // runtime due to the way sync    worked previously, so with\n                    // the potential exception of finality signatures, we    can\n                    // consider all these blocks complete. 2. Any block acquired\n                    // from that point onwards was subject to the insertion of the\n                    //    appropriate announcements (`BlockCompletedAnnouncement`), which would have\n                    //    caused the creation of the completed blocks index, thus would not have\n                    //    resulted in a `None` value here.\n                    //\n                    // Note that a previous run of this version which aborted early could have\n                    // stored some blocks and/or block-headers without\n                    // completing the sync process. Hence, when setting the\n                    // `completed_blocks` in this None case, we'll only consider blocks\n                    // from a previous protocol version as complete.\n\n                    let maybe_block_header: Option<BlockHeader> = ro_txn.read(Tip)?;\n                    if let Some(highest_block_header) = maybe_block_header {\n                        for height in (0..=highest_block_header.height()).rev() {\n                            let maybe_header: Option<BlockHeader> = ro_txn.read(height)?;\n                            match maybe_header {\n                                Some(header) if header.protocol_version() < protocol_version => {\n                                    component.completed_blocks =\n                                        DisjointSequences::new(Sequence::new(0, header.height()));\n                                    break;\n                                }\n                                _ => {}\n                            }\n                        }\n                    };\n                }\n            }\n        }\n        component.persist_completed_blocks()?;\n        Ok(component)\n    }\n\n    /// Returns the path to the storage folder.\n    pub(crate) fn root_path(&self) -> &Path {\n        &self.root\n    }\n\n    fn handle_net_request_incoming<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        incoming: &NetRequestIncoming,\n    ) -> Result<Effects<Event>, GetRequestError>\n    where\n        REv: From<NetworkRequest<Message>> + Send,\n    {\n        if self.enable_mem_deduplication {\n            let unique_id = incoming.message.unique_id();\n\n            if let Some(serialized_item) = self\n                .serialized_item_pool\n                .get(AsRef::<[u8]>::as_ref(&unique_id))\n            {\n                // We found an item in the pool. We can short-circuit all\n                // deserialization/serialization and return the canned item\n                // immediately.\n                let found = Message::new_get_response_from_serialized(\n                    incoming.message.tag(),\n                    serialized_item,\n                );\n                return Ok(effect_builder.send_message(incoming.sender, found).ignore());\n            }\n        }\n\n        match *(incoming.message) {\n            NetRequest::Transaction(ref serialized_id) => {\n                let id = decode_item_id::<Transaction>(serialized_id)?;\n                let opt_item = self.get_transaction_by_id(id)?;\n                let fetch_response = FetchResponse::from_opt(id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::LegacyDeploy(ref serialized_id) => {\n                let id = decode_item_id::<LegacyDeploy>(serialized_id)?;\n                let opt_item = self.get_legacy_deploy(id)?;\n                let fetch_response = FetchResponse::from_opt(id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::Block(ref serialized_id) => {\n                let id = decode_item_id::<Block>(serialized_id)?;\n                let opt_item: Option<Block> = self\n                    .block_store\n                    .checkout_ro()\n                    .map_err(FatalStorageError::from)?\n                    .read(id)\n                    .map_err(FatalStorageError::from)?;\n                let fetch_response = FetchResponse::from_opt(id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::BlockHeader(ref serialized_id) => {\n                let item_id = decode_item_id::<BlockHeader>(serialized_id)?;\n                let opt_item: Option<BlockHeader> = self\n                    .block_store\n                    .checkout_ro()\n                    .map_err(FatalStorageError::from)?\n                    .read(item_id)\n                    .map_err(FatalStorageError::from)?;\n                let fetch_response = FetchResponse::from_opt(item_id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::FinalitySignature(ref serialized_id) => {\n                let id = decode_item_id::<FinalitySignature>(serialized_id)?;\n                let opt_item = self\n                    .block_store\n                    .checkout_ro()\n                    .map_err(FatalStorageError::from)?\n                    .read(*id.block_hash())\n                    .map_err(FatalStorageError::from)?\n                    .and_then(|block_signatures: BlockSignatures| {\n                        block_signatures.finality_signature(id.public_key())\n                    });\n\n                if let Some(item) = opt_item.as_ref() {\n                    if item.block_hash() != id.block_hash() || item.era_id() != id.era_id() {\n                        return Err(GetRequestError::FinalitySignatureIdMismatch {\n                            requested_id: id,\n                            finality_signature: Box::new(item.clone()),\n                        });\n                    }\n                }\n                let fetch_response = FetchResponse::from_opt(id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::SyncLeap(ref serialized_id) => {\n                let item_id = decode_item_id::<SyncLeap>(serialized_id)?;\n                let fetch_response = self.get_sync_leap(item_id)?;\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::ApprovalsHashes(ref serialized_id) => {\n                let item_id = decode_item_id::<ApprovalsHashes>(serialized_id)?;\n                let opt_item: Option<ApprovalsHashes> = self\n                    .block_store\n                    .checkout_ro()\n                    .map_err(FatalStorageError::from)?\n                    .read(item_id)\n                    .map_err(FatalStorageError::from)?;\n                let fetch_response = FetchResponse::from_opt(item_id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n            NetRequest::BlockExecutionResults(ref serialized_id) => {\n                let item_id = decode_item_id::<BlockExecutionResultsOrChunk>(serialized_id)?;\n                let opt_item = self.read_block_execution_results_or_chunk(&item_id)?;\n                let fetch_response = FetchResponse::from_opt(item_id, opt_item);\n\n                Ok(self.update_pool_and_send(\n                    effect_builder,\n                    incoming.sender,\n                    serialized_id,\n                    fetch_response,\n                )?)\n            }\n        }\n    }\n\n    /// Handles a storage request.\n    fn handle_storage_request(\n        &mut self,\n        req: StorageRequest,\n    ) -> Result<Effects<Event>, FatalStorageError> {\n        // Note: Database IO is handled in a blocking fashion on purpose throughout this function.\n        // The rationale is that long IO operations are very rare and cache misses frequent, so on\n        // average the actual execution time will be very low.\n        Ok(match req {\n            StorageRequest::PutBlock { block, responder } => {\n                let mut rw_txn = self.block_store.checkout_rw()?;\n                let _ = rw_txn.write(&*block)?;\n                rw_txn.commit()?;\n                responder.respond(true).ignore()\n            }\n            StorageRequest::PutApprovalsHashes {\n                approvals_hashes,\n                responder,\n            } => {\n                let mut rw_txn = self.block_store.checkout_rw()?;\n                let _ = rw_txn.write(&*approvals_hashes)?;\n                rw_txn.commit()?;\n                responder.respond(true).ignore()\n            }\n            StorageRequest::GetBlock {\n                block_hash,\n                responder,\n            } => {\n                let maybe_block = self.block_store.checkout_ro()?.read(block_hash)?;\n                responder.respond(maybe_block).ignore()\n            }\n            StorageRequest::IsBlockStored {\n                block_hash,\n                responder,\n            } => {\n                let txn = self.block_store.checkout_ro()?;\n                responder\n                    .respond(DataReader::<BlockHash, Block>::exists(&txn, block_hash)?)\n                    .ignore()\n            }\n            StorageRequest::GetApprovalsHashes {\n                block_hash,\n                responder,\n            } => responder\n                .respond(self.block_store.checkout_ro()?.read(block_hash)?)\n                .ignore(),\n            StorageRequest::GetHighestCompleteBlock { responder } => responder\n                .respond(self.get_highest_complete_block()?)\n                .ignore(),\n            StorageRequest::GetHighestCompleteBlockHeader { responder } => responder\n                .respond(self.get_highest_complete_block_header()?)\n                .ignore(),\n            StorageRequest::GetTransactionsEraIds {\n                transaction_hashes,\n                responder,\n            } => {\n                let mut era_ids = HashSet::new();\n                let txn = self.block_store.checkout_ro()?;\n                for transaction_hash in &transaction_hashes {\n                    let maybe_block_info: Option<BlockHashHeightAndEra> =\n                        txn.read(*transaction_hash)?;\n                    if let Some(block_info) = maybe_block_info {\n                        era_ids.insert(block_info.era_id);\n                    }\n                }\n                responder.respond(era_ids).ignore()\n            }\n            StorageRequest::GetBlockHeader {\n                block_hash,\n                only_from_available_block_range,\n                responder,\n            } => {\n                let txn = self.block_store.checkout_ro()?;\n                responder\n                    .respond(self.get_single_block_header_restricted(\n                        &txn,\n                        &block_hash,\n                        only_from_available_block_range,\n                    )?)\n                    .ignore()\n            }\n            StorageRequest::GetBlockTransfers {\n                block_hash,\n                responder,\n            } => {\n                let maybe_transfers = self.get_transfers(&block_hash)?;\n                responder.respond(maybe_transfers).ignore()\n            }\n            StorageRequest::PutTransaction {\n                transaction,\n                responder,\n            } => {\n                let mut rw_txn = self.block_store.checkout_rw()?;\n                if DataReader::<TransactionHash, Transaction>::exists(&rw_txn, transaction.hash())?\n                {\n                    responder.respond(false).ignore()\n                } else {\n                    let _ = rw_txn.write(&*transaction)?;\n                    rw_txn.commit()?;\n                    responder.respond(true).ignore()\n                }\n            }\n            StorageRequest::GetTransactions {\n                transaction_hashes,\n                responder,\n            } => responder\n                .respond(self.get_transactions_with_finalized_approvals(transaction_hashes.iter())?)\n                .ignore(),\n            StorageRequest::GetLegacyDeploy {\n                deploy_hash,\n                responder,\n            } => {\n                let maybe_legacy_deploy = self.get_legacy_deploy(deploy_hash)?;\n                responder.respond(maybe_legacy_deploy).ignore()\n            }\n            StorageRequest::GetTransaction {\n                transaction_id,\n                responder,\n            } => {\n                let ro_txn = self.block_store.checkout_ro()?;\n                let maybe_transaction = match Self::get_transaction_with_finalized_approvals(\n                    &ro_txn,\n                    &transaction_id.transaction_hash(),\n                )? {\n                    None => None,\n                    Some((transaction, maybe_approvals)) => {\n                        let transaction = if let Some(approvals) = maybe_approvals {\n                            transaction.with_approvals(approvals)\n                        } else {\n                            transaction\n                        };\n                        (transaction.fetch_id() == transaction_id).then_some(transaction)\n                    }\n                };\n                responder.respond(maybe_transaction).ignore()\n            }\n            StorageRequest::GetTransactionAndExecutionInfo {\n                transaction_hash,\n                with_finalized_approvals,\n                responder,\n            } => {\n                let ro_txn = self.block_store.checkout_ro()?;\n\n                let transaction = if with_finalized_approvals {\n                    match Self::get_transaction_with_finalized_approvals(\n                        &ro_txn,\n                        &transaction_hash,\n                    )? {\n                        Some((transaction, maybe_approvals)) => {\n                            if let Some(approvals) = maybe_approvals {\n                                transaction.with_approvals(approvals)\n                            } else {\n                                transaction\n                            }\n                        }\n                        None => return Ok(responder.respond(None).ignore()),\n                    }\n                } else {\n                    match ro_txn.read(transaction_hash)? {\n                        Some(transaction) => transaction,\n                        None => return Ok(responder.respond(None).ignore()),\n                    }\n                };\n\n                let block_hash_height_and_era: BlockHashHeightAndEra =\n                    match ro_txn.read(transaction_hash)? {\n                        Some(value) => value,\n                        None => return Ok(responder.respond(Some((transaction, None))).ignore()),\n                    };\n\n                let execution_result = ro_txn.read(transaction_hash)?;\n                let execution_info = ExecutionInfo {\n                    block_hash: block_hash_height_and_era.block_hash,\n                    block_height: block_hash_height_and_era.block_height,\n                    execution_result,\n                };\n\n                responder\n                    .respond(Some((transaction, Some(execution_info))))\n                    .ignore()\n            }\n            StorageRequest::IsTransactionStored {\n                transaction_id,\n                responder,\n            } => {\n                let txn = self.block_store.checkout_ro()?;\n                let has_transaction = DataReader::<TransactionHash, Transaction>::exists(\n                    &txn,\n                    transaction_id.transaction_hash(),\n                )?;\n                responder.respond(has_transaction).ignore()\n            }\n            StorageRequest::GetExecutionResults {\n                block_hash,\n                responder,\n            } => {\n                let txn = self.block_store.checkout_ro()?;\n                responder\n                    .respond(Self::get_execution_results_with_transaction_headers(\n                        &txn,\n                        &block_hash,\n                    )?)\n                    .ignore()\n            }\n            StorageRequest::GetBlockExecutionResultsOrChunk { id, responder } => responder\n                .respond(self.read_block_execution_results_or_chunk(&id)?)\n                .ignore(),\n            StorageRequest::PutExecutionResults {\n                block_hash,\n                block_height,\n                era_id,\n                execution_results,\n                responder,\n            } => {\n                let mut rw_txn = self.block_store.checkout_rw()?;\n                let _ = rw_txn.write(&BlockExecutionResults {\n                    block_info: BlockHashHeightAndEra::new(*block_hash, block_height, era_id),\n                    exec_results: execution_results,\n                })?;\n                rw_txn.commit()?;\n                responder.respond(()).ignore()\n            }\n            StorageRequest::GetFinalitySignature { id, responder } => {\n                let maybe_sig = self\n                    .block_store\n                    .checkout_ro()?\n                    .read(*id.block_hash())?\n                    .and_then(|sigs: BlockSignatures| sigs.finality_signature(id.public_key()))\n                    .filter(|sig| sig.era_id() == id.era_id());\n                responder.respond(maybe_sig).ignore()\n            }\n            StorageRequest::IsFinalitySignatureStored { id, responder } => {\n                let has_signature = self\n                    .block_store\n                    .checkout_ro()?\n                    .read(*id.block_hash())?\n                    .map(|sigs: BlockSignatures| sigs.has_finality_signature(id.public_key()))\n                    .unwrap_or(false);\n                responder.respond(has_signature).ignore()\n            }\n            StorageRequest::GetBlockAndMetadataByHeight {\n                block_height,\n                only_from_available_block_range,\n                responder,\n            } => {\n                if !(self.should_return_block(block_height, only_from_available_block_range)) {\n                    return Ok(responder.respond(None).ignore());\n                }\n\n                let ro_txn = self.block_store.checkout_ro()?;\n\n                let block: Block = {\n                    if let Some(block) = ro_txn.read(block_height)? {\n                        block\n                    } else {\n                        return Ok(responder.respond(None).ignore());\n                    }\n                };\n\n                let hash = block.hash();\n                let block_signatures = match ro_txn.read(*hash)? {\n                    Some(signatures) => signatures,\n                    None => self.get_default_block_signatures(&block),\n                };\n                responder\n                    .respond(Some(BlockWithMetadata {\n                        block,\n                        block_signatures,\n                    }))\n                    .ignore()\n            }\n            StorageRequest::PutBlockSignatures {\n                signatures,\n                responder,\n            } => {\n                if signatures.is_empty() {\n                    error!(\n                        ?signatures,\n                        \"should not attempt to store empty collection of block signatures\"\n                    );\n                    return Ok(responder.respond(false).ignore());\n                }\n                let mut txn = self.block_store.checkout_rw()?;\n                let old_data: Option<BlockSignatures> = txn.read(*signatures.block_hash())?;\n                let new_data = match old_data {\n                    None => signatures,\n                    Some(mut data) => {\n                        if let Err(error) = data.merge(signatures) {\n                            error!(%error, \"failed to put block signatures\");\n                            return Ok(responder.respond(false).ignore());\n                        }\n                        data\n                    }\n                };\n                let _ = txn.write(&new_data)?;\n                txn.commit()?;\n                responder.respond(true).ignore()\n            }\n            StorageRequest::PutFinalitySignature {\n                signature,\n                responder,\n            } => {\n                let mut rw_txn = self.block_store.checkout_rw()?;\n                let block_hash = signature.block_hash();\n                let mut block_signatures: BlockSignatures =\n                    if let Some(existing_signatures) = rw_txn.read(*block_hash)? {\n                        existing_signatures\n                    } else {\n                        match &*signature {\n                            FinalitySignature::V1(signature) => {\n                                BlockSignaturesV1::new(*signature.block_hash(), signature.era_id())\n                                    .into()\n                            }\n                            FinalitySignature::V2(signature) => BlockSignaturesV2::new(\n                                *signature.block_hash(),\n                                signature.block_height(),\n                                signature.era_id(),\n                                signature.chain_name_hash(),\n                            )\n                            .into(),\n                        }\n                    };\n                match (&mut block_signatures, *signature) {\n                    (\n                        BlockSignatures::V1(ref mut block_signatures),\n                        FinalitySignature::V1(signature),\n                    ) => {\n                        block_signatures.insert_signature(\n                            signature.public_key().clone(),\n                            *signature.signature(),\n                        );\n                    }\n                    (\n                        BlockSignatures::V2(ref mut block_signatures),\n                        FinalitySignature::V2(signature),\n                    ) => {\n                        block_signatures.insert_signature(\n                            signature.public_key().clone(),\n                            *signature.signature(),\n                        );\n                    }\n                    (block_signatures, signature) => {\n                        let mismatch =\n                            VariantMismatch(Box::new((block_signatures.clone(), signature)));\n                        return Err(FatalStorageError::from(mismatch));\n                    }\n                }\n\n                let _ = rw_txn.write(&block_signatures);\n                rw_txn.commit()?;\n                responder.respond(true).ignore()\n            }\n            StorageRequest::GetBlockSignature {\n                block_hash,\n                public_key,\n                responder,\n            } => {\n                let maybe_signatures: Option<BlockSignatures> =\n                    self.block_store.checkout_ro()?.read(block_hash)?;\n                responder\n                    .respond(\n                        maybe_signatures\n                            .and_then(|signatures| signatures.finality_signature(&public_key)),\n                    )\n                    .ignore()\n            }\n            StorageRequest::GetBlockHeaderByHeight {\n                block_height,\n                only_from_available_block_range,\n                responder,\n            } => {\n                let maybe_header = self\n                    .read_block_header_by_height(block_height, only_from_available_block_range)?;\n                responder.respond(maybe_header).ignore()\n            }\n            StorageRequest::GetLatestSwitchBlockHeader { responder } => {\n                let txn = self.block_store.checkout_ro()?;\n                let maybe_header = txn.read(LatestSwitchBlock)?;\n                responder.respond(maybe_header).ignore()\n            }\n            StorageRequest::GetSwitchBlockHeaderByEra { era_id, responder } => {\n                let txn = self.block_store.checkout_ro()?;\n                let maybe_header = txn.read(era_id)?;\n                responder.respond(maybe_header).ignore()\n            }\n            StorageRequest::PutBlockHeader {\n                block_header,\n                responder,\n            } => {\n                let mut rw_txn = self.block_store.checkout_rw()?;\n                let _ = rw_txn.write(&*block_header)?;\n                rw_txn.commit()?;\n                responder.respond(true).ignore()\n            }\n            StorageRequest::GetAvailableBlockRange { responder } => {\n                responder.respond(self.get_available_block_range()).ignore()\n            }\n            StorageRequest::StoreFinalizedApprovals {\n                ref transaction_hash,\n                ref finalized_approvals,\n                responder,\n            } => {\n                info!(txt=?transaction_hash, count=finalized_approvals.len(), \"storing finalized approvals {:?}\", finalized_approvals);\n                responder\n                    .respond(self.store_finalized_approvals(transaction_hash, finalized_approvals)?)\n                    .ignore()\n            }\n            StorageRequest::PutExecutedBlock {\n                block,\n                approvals_hashes,\n                execution_results,\n                responder,\n            } => {\n                let block: Block = (*block).clone().into();\n                let transaction_config = self.transaction_config.clone();\n                responder\n                    .respond(self.put_executed_block(\n                        transaction_config,\n                        &block,\n                        &approvals_hashes,\n                        execution_results,\n                    )?)\n                    .ignore()\n            }\n            StorageRequest::GetKeyBlockHeightForActivationPoint { responder } => {\n                // If we haven't already cached the height, try to retrieve the key block header.\n                if self.key_block_height_for_activation_point.is_none() {\n                    let key_block_era = self.activation_era.predecessor().unwrap_or_default();\n                    let txn = self.block_store.checkout_ro()?;\n                    let key_block_header: BlockHeader = match txn.read(key_block_era)? {\n                        Some(block_header) => block_header,\n                        None => return Ok(responder.respond(None).ignore()),\n                    };\n                    self.key_block_height_for_activation_point = Some(key_block_header.height());\n                }\n                responder\n                    .respond(self.key_block_height_for_activation_point)\n                    .ignore()\n            }\n            StorageRequest::GetRawData {\n                key,\n                responder,\n                record_id,\n            } => {\n                let db_table_id = utils::db_table_id_from_record_id(record_id)\n                    .map_err(|_| FatalStorageError::UnexpectedRecordId(record_id))?;\n                let txn = self.block_store.checkout_ro()?;\n                let maybe_data: Option<DbRawBytesSpec> = txn.read((db_table_id, key))?;\n                match maybe_data {\n                    None => responder.respond(None).ignore(),\n                    Some(db_raw) => responder.respond(Some(db_raw)).ignore(),\n                }\n            }\n            StorageRequest::GetBlockUtilizationScore {\n                era_id,\n                block_height,\n                switch_block_utilization,\n                responder,\n            } => {\n                let utilization = self.get_block_utilization_score(\n                    era_id,\n                    block_height,\n                    switch_block_utilization,\n                );\n\n                responder.respond(utilization).ignore()\n            }\n        })\n    }\n\n    pub(crate) fn read_block_header_by_height(\n        &self,\n        block_height: u64,\n        only_from_available_block_range: bool,\n    ) -> Result<Option<BlockHeader>, FatalStorageError> {\n        if !(self.should_return_block(block_height, only_from_available_block_range)) {\n            Ok(None)\n        } else {\n            let txn = self.block_store.checkout_ro()?;\n            txn.read(block_height).map_err(FatalStorageError::from)\n        }\n    }\n\n    pub(crate) fn get_switch_block_by_era_id(\n        &self,\n        era_id: &EraId,\n    ) -> Result<Option<Block>, FatalStorageError> {\n        let txn = self.block_store.checkout_ro()?;\n        txn.read(*era_id).map_err(FatalStorageError::from)\n    }\n\n    /// Retrieves a set of transactions, along with their potential finalized approvals.\n    #[allow(clippy::type_complexity)]\n    fn get_transactions_with_finalized_approvals<'a>(\n        &self,\n        transaction_hashes: impl Iterator<Item = &'a TransactionHash>,\n    ) -> Result<SmallVec<[Option<(Transaction, Option<BTreeSet<Approval>>)>; 1]>, FatalStorageError>\n    {\n        let ro_txn = self.block_store.checkout_ro()?;\n\n        transaction_hashes\n            .map(|transaction_hash| {\n                Self::get_transaction_with_finalized_approvals(&ro_txn, transaction_hash)\n            })\n            .collect()\n    }\n\n    pub(crate) fn put_executed_block(\n        &mut self,\n        transaction_config: TransactionConfig,\n        block: &Block,\n        approvals_hashes: &ApprovalsHashes,\n        execution_results: HashMap<TransactionHash, ExecutionResult>,\n    ) -> Result<bool, FatalStorageError> {\n        let mut txn = self.block_store.checkout_rw()?;\n        let era_id = block.era_id();\n        let block_utilization_score = block.block_utilization(transaction_config.clone());\n        let has_hit_slot_limit = block.has_hit_slot_capacity(transaction_config.clone());\n        let block_hash = txn.write(block)?;\n        let _ = txn.write(approvals_hashes)?;\n        let block_info = BlockHashHeightAndEra::new(block_hash, block.height(), block.era_id());\n\n        let utilization = if has_hit_slot_limit {\n            debug!(\"Block is at slot capacity, using slot utilization score\");\n            block_utilization_score\n        } else if execution_results.is_empty() {\n            0u64\n        } else {\n            let total_gas_utilization = {\n                let total_gas_limit: U512 = execution_results\n                    .values()\n                    .map(|results| match results {\n                        ExecutionResult::V1(v1_result) => match v1_result {\n                            ExecutionResultV1::Failure { cost, .. } => *cost,\n                            ExecutionResultV1::Success { cost, .. } => *cost,\n                        },\n                        ExecutionResult::V2(v2_result) => v2_result.limit.value(),\n                    })\n                    .sum();\n\n                let consumed: u64 = total_gas_limit.as_u64();\n                let block_gas_limit = transaction_config.block_gas_limit;\n\n                Ratio::new(consumed * 100u64, block_gas_limit).to_integer()\n            };\n            debug!(\"Gas utilization at {total_gas_utilization}\");\n\n            let total_size_utilization = {\n                let size_used: u64 = execution_results\n                    .values()\n                    .map(|results| {\n                        if let ExecutionResult::V2(result) = results {\n                            result.size_estimate\n                        } else {\n                            0u64\n                        }\n                    })\n                    .sum();\n\n                let block_size_limit = transaction_config.max_block_size as u64;\n                Ratio::new(size_used * 100, block_size_limit).to_integer()\n            };\n\n            debug!(\"Storage utilization at {total_size_utilization}\");\n\n            let scores = [\n                block_utilization_score,\n                total_size_utilization,\n                total_gas_utilization,\n            ];\n\n            match scores.iter().max() {\n                Some(max_utlization) => *max_utlization,\n                None => {\n                    // This should never happen as we just created the scores vector to find the\n                    // max value\n                    warn!(\"Unable to determine max utilization, marking 0 utilization\");\n                    0u64\n                }\n            }\n        };\n\n        debug!(\"Utilization for block is {utilization}\");\n\n        let _ = txn.write(&BlockExecutionResults {\n            block_info,\n            exec_results: execution_results,\n        })?;\n        txn.commit()?;\n\n        match self.utilization_tracker.get_mut(&era_id) {\n            Some(block_score) => {\n                block_score.insert(block.height(), utilization);\n            }\n            None => {\n                let mut block_score = BTreeMap::new();\n                block_score.insert(block.height(), utilization);\n                self.utilization_tracker.insert(era_id, block_score);\n            }\n        }\n\n        Ok(true)\n    }\n\n    /// Handles a [`BlockCompletedAnnouncement`].\n    fn handle_mark_block_completed_request(\n        &mut self,\n        MarkBlockCompletedRequest {\n            block_height,\n            responder,\n        }: MarkBlockCompletedRequest,\n    ) -> Result<Effects<Event>, FatalStorageError> {\n        let is_new = self.mark_block_complete(block_height)?;\n        Ok(responder.respond(is_new).ignore())\n    }\n\n    /// Marks the block at height `block_height` as complete by inserting it\n    /// into the `completed_blocks` index and storing it to disk.\n    fn mark_block_complete(&mut self, block_height: u64) -> Result<bool, FatalStorageError> {\n        let is_new = self.completed_blocks.insert(block_height);\n        if is_new {\n            self.persist_completed_blocks()?;\n            info!(\n                \"Storage: marked block {} complete: {}\",\n                block_height,\n                self.get_available_block_range()\n            );\n            self.update_chain_height_metrics();\n        } else {\n            debug!(\n                \"Storage: tried to mark already-complete block {} complete\",\n                block_height\n            );\n        }\n        Ok(is_new)\n    }\n\n    /// Persists the completed blocks disjoint sequences state to the database.\n    fn persist_completed_blocks(&mut self) -> Result<(), FatalStorageError> {\n        let serialized = self\n            .completed_blocks\n            .to_bytes()\n            .map_err(FatalStorageError::UnexpectedSerializationFailure)?;\n        let mut rw_txn = self.block_store.checkout_rw()?;\n        rw_txn.write(&StateStore {\n            key: Cow::Borrowed(COMPLETED_BLOCKS_STORAGE_KEY),\n            value: serialized,\n        })?;\n        rw_txn.commit().map_err(FatalStorageError::from)\n    }\n\n    /// Retrieves the height of the highest complete block (if any).\n    pub(crate) fn highest_complete_block_height(&self) -> Option<u64> {\n        self.completed_blocks.highest_sequence().map(Sequence::high)\n    }\n\n    /// Retrieves the contiguous segment of the block chain starting at the highest known switch\n    /// block such that the blocks' timestamps cover a duration of at least the max TTL for deploys\n    /// (a chainspec setting).\n    ///\n    /// If storage doesn't hold enough blocks to cover the specified duration, it will still return\n    /// the highest contiguous segment starting at the highest switch block which it does hold.\n    pub(crate) fn read_blocks_for_replay_protection(\n        &self,\n    ) -> Result<Vec<Block>, FatalStorageError> {\n        let ro_txn = self.block_store.checkout_ro()?;\n\n        let timestamp =\n            match DataReader::<LatestSwitchBlock, BlockHeader>::read(&ro_txn, LatestSwitchBlock)? {\n                Some(last_era_header) => last_era_header\n                    .timestamp()\n                    .saturating_sub(self.max_ttl.value()),\n                None => Timestamp::now(),\n            };\n\n        let mut blocks = Vec::new();\n        for sequence in self.completed_blocks.sequences().iter().rev() {\n            let hi = sequence.high();\n            let low = sequence.low();\n            for idx in (low..=hi).rev() {\n                let maybe_block: Result<Option<Block>, BlockStoreError> = ro_txn.read(idx);\n                match maybe_block {\n                    Ok(Some(block)) => {\n                        let should_continue = block.timestamp() >= timestamp;\n                        blocks.push(block);\n                        if false == should_continue {\n                            return Ok(blocks);\n                        }\n                    }\n                    Ok(None) => {\n                        continue;\n                    }\n                    Err(err) => return Err(FatalStorageError::BlockStoreError(err)),\n                }\n            }\n        }\n        Ok(blocks)\n    }\n\n    /// Returns an executable block.\n    pub(crate) fn make_executable_block(\n        &self,\n        block_hash: &BlockHash,\n    ) -> Result<Option<ExecutableBlock>, FatalStorageError> {\n        let (block, transactions) =\n            match self.read_block_and_finalized_transactions_by_hash(*block_hash)? {\n                Some(block_and_finalized_transactions) => block_and_finalized_transactions,\n                None => {\n                    error!(\n                        ?block_hash,\n                        \"Storage: unable to make_executable_block for  {}\", block_hash\n                    );\n                    return Ok(None);\n                }\n            };\n        let maybe_finalized_approvals: Option<ApprovalsHashes> =\n            self.block_store.checkout_ro()?.read(*block.hash())?;\n        if let Some(finalized_approvals) = maybe_finalized_approvals {\n            if transactions.len() != finalized_approvals.approvals_hashes().len() {\n                error!(\n                    ?block_hash,\n                    \"Storage: transaction hashes length mismatch {}\", block_hash\n                );\n                return Err(FatalStorageError::ApprovalsHashesLengthMismatch {\n                    block_hash: *block_hash,\n                    expected: transactions.len(),\n                    actual: finalized_approvals.approvals_hashes().len(),\n                });\n            }\n            for (transaction, hash) in transactions\n                .iter()\n                .zip(finalized_approvals.approvals_hashes())\n            {\n                let computed_hash = transaction.compute_approvals_hash().map_err(|error| {\n                    error!(%error, \"failed to serialize approvals\");\n                    FatalStorageError::UnexpectedSerializationFailure(error)\n                })?;\n                if computed_hash == hash {\n                    continue;\n                }\n                // This should be unreachable as the `BlockSynchronizer` should ensure we have the\n                // correct approvals before it then calls this method.  By returning `Ok(None)` the\n                // node would be stalled at this block, but should eventually sync leap due to lack\n                // of progress.  It would then backfill this block without executing it.\n                error!(?block_hash, \"Storage: transaction with incorrect approvals\");\n                return Ok(None);\n            }\n        }\n\n        let executable_block = ExecutableBlock::from_block_and_transactions(block, transactions);\n        info!(%block_hash, \"Storage: created {}\", executable_block);\n        Ok(Some(executable_block))\n    }\n\n    /// Retrieves single block and all of its deploys, with the finalized approvals.\n    /// If any of the deploys can't be found, returns `Ok(None)`.\n    fn read_block_and_finalized_transactions_by_hash(\n        &self,\n        block_hash: BlockHash,\n    ) -> Result<Option<(BlockV2, Vec<Transaction>)>, FatalStorageError> {\n        let txn = self.block_store.checkout_ro()?;\n\n        let Some(block) = txn.read(block_hash)? else {\n            debug!(\n                ?block_hash,\n                \"Storage: read_block_and_finalized_transactions_by_hash failed to get block for {}\",\n                block_hash\n            );\n            return Ok(None);\n        };\n\n        let Block::V2(block) = block else {\n            debug!(\n                ?block_hash,\n                \"Storage: read_block_and_finalized_transactions_by_hash expected block V2 {}\",\n                block_hash\n            );\n            return Ok(None);\n        };\n\n        let mut transactions = vec![];\n        for (transaction, _) in (self\n            .get_transactions_with_finalized_approvals(block.all_transactions())?)\n        .into_iter()\n        .flatten()\n        {\n            transactions.push(transaction);\n        }\n\n        Ok(Some((block, transactions)))\n    }\n\n    /// Retrieves the highest complete block header from storage, if one exists. May return an\n    /// LMDB error.\n    fn get_highest_complete_block_header(&self) -> Result<Option<BlockHeader>, FatalStorageError> {\n        let highest_complete_block_height = match self.completed_blocks.highest_sequence() {\n            Some(sequence) => sequence.high(),\n            None => {\n                return Ok(None);\n            }\n        };\n\n        let txn = self.block_store.checkout_ro()?;\n        txn.read(highest_complete_block_height)\n            .map_err(FatalStorageError::from)\n    }\n\n    /// Retrieves the highest block header with metadata from storage, if one exists. May return an\n    /// LMDB error.\n    fn get_highest_complete_block_header_with_signatures(\n        &self,\n        txn: &(impl DataReader<BlockHeight, BlockHeader> + DataReader<BlockHash, BlockSignatures>),\n    ) -> Result<Option<BlockHeaderWithSignatures>, FatalStorageError> {\n        let highest_complete_block_height = match self.completed_blocks.highest_sequence() {\n            Some(sequence) => sequence.high(),\n            None => {\n                return Ok(None);\n            }\n        };\n\n        let block_header: Option<BlockHeader> = txn.read(highest_complete_block_height)?;\n        match block_header {\n            Some(header) => {\n                let block_header_hash = header.block_hash();\n                let block_signatures: BlockSignatures = match txn.read(block_header_hash)? {\n                    Some(signatures) => signatures,\n                    None => match &header {\n                        BlockHeader::V1(header) => BlockSignatures::V1(BlockSignaturesV1::new(\n                            header.block_hash(),\n                            header.era_id(),\n                        )),\n                        BlockHeader::V2(header) => BlockSignatures::V2(BlockSignaturesV2::new(\n                            header.block_hash(),\n                            header.height(),\n                            header.era_id(),\n                            self.chain_name_hash,\n                        )),\n                    },\n                };\n                Ok(Some(BlockHeaderWithSignatures::new(\n                    header,\n                    block_signatures,\n                )))\n            }\n            None => Ok(None),\n        }\n    }\n\n    /// Retrieves the highest complete block from storage, if one exists. May return an LMDB error.\n    pub fn get_highest_complete_block(&self) -> Result<Option<Block>, FatalStorageError> {\n        let highest_complete_block_height = match self.highest_complete_block_height() {\n            Some(height) => height,\n            None => {\n                return Ok(None);\n            }\n        };\n\n        let txn = self.block_store.checkout_ro()?;\n        txn.read(highest_complete_block_height)\n            .map_err(FatalStorageError::from)\n    }\n\n    /// Retrieves a single block header in a given transaction from storage\n    /// respecting the possible restriction on whether the block\n    /// should be present in the available blocks index.\n    fn get_single_block_header_restricted(\n        &self,\n        txn: &impl DataReader<BlockHash, BlockHeader>,\n        block_hash: &BlockHash,\n        only_from_available_block_range: bool,\n    ) -> Result<Option<BlockHeader>, FatalStorageError> {\n        let block_header = match txn.read(*block_hash)? {\n            Some(header) => header,\n            None => return Ok(None),\n        };\n\n        if !(self.should_return_block(block_header.height(), only_from_available_block_range)) {\n            return Ok(None);\n        }\n\n        Ok(Some(block_header))\n    }\n\n    /// Returns headers of complete blocks of the trusted block's ancestors, back to the most\n    /// recent switch block.\n    fn get_trusted_ancestor_headers(\n        &self,\n        txn: &impl DataReader<BlockHash, BlockHeader>,\n        trusted_block_header: &BlockHeader,\n    ) -> Result<Option<Vec<BlockHeader>>, FatalStorageError> {\n        if trusted_block_header.is_genesis() {\n            return Ok(Some(vec![]));\n        }\n        let available_block_range = self.get_available_block_range();\n        let mut result = vec![];\n        let mut current_trusted_block_header = trusted_block_header.clone();\n        loop {\n            let parent_hash = current_trusted_block_header.parent_hash();\n            let parent_block_header: BlockHeader = match txn.read(*parent_hash)? {\n                Some(block_header) => block_header,\n                None => {\n                    warn!(%parent_hash, \"block header not found\");\n                    return Ok(None);\n                }\n            };\n\n            if !available_block_range.contains(parent_block_header.height()) {\n                debug!(%parent_hash, \"block header not complete\");\n                return Ok(None);\n            }\n\n            result.push(parent_block_header.clone());\n            if parent_block_header.is_switch_block() || parent_block_header.is_genesis() {\n                break;\n            }\n            current_trusted_block_header = parent_block_header;\n        }\n        Ok(Some(result))\n    }\n\n    /// Returns headers of all known switch blocks after the trusted block but before\n    /// highest block, with signatures, plus the signed highest block.\n    fn get_block_headers_with_signatures(\n        &self,\n        txn: &(impl DataReader<BlockHash, BlockSignatures> + DataReader<EraId, BlockHeader>),\n        trusted_block_header: &BlockHeader,\n        highest_block_header_with_signatures: &BlockHeaderWithSignatures,\n    ) -> Result<Option<Vec<BlockHeaderWithSignatures>>, FatalStorageError> {\n        if trusted_block_header.block_hash()\n            == highest_block_header_with_signatures\n                .block_header()\n                .block_hash()\n        {\n            return Ok(Some(vec![]));\n        }\n\n        let start_era_id: u64 = trusted_block_header.next_block_era_id().into();\n        let current_era_id: u64 = highest_block_header_with_signatures\n            .block_header()\n            .era_id()\n            .into();\n\n        let mut result = vec![];\n\n        for era_id in start_era_id..current_era_id {\n            let maybe_block_header: Option<BlockHeader> = txn.read(EraId::from(era_id))?;\n            match maybe_block_header {\n                Some(block_header) => {\n                    let block_signatures = match txn.read(block_header.block_hash())? {\n                        Some(signatures) => signatures,\n                        None => match &block_header {\n                            BlockHeader::V1(header) => BlockSignatures::V1(BlockSignaturesV1::new(\n                                header.block_hash(),\n                                header.era_id(),\n                            )),\n                            BlockHeader::V2(header) => BlockSignatures::V2(BlockSignaturesV2::new(\n                                header.block_hash(),\n                                header.height(),\n                                header.era_id(),\n                                self.chain_name_hash,\n                            )),\n                        },\n                    };\n                    result.push(BlockHeaderWithSignatures::new(\n                        block_header,\n                        block_signatures,\n                    ));\n                }\n                None => return Ok(None),\n            }\n        }\n        result.push(highest_block_header_with_signatures.clone());\n\n        Ok(Some(result))\n    }\n\n    /// Stores a set of finalized approvals if they are different to the approvals in the original\n    /// transaction and if they are different to existing finalized approvals if any.\n    ///\n    /// Returns `true` if the provided approvals were stored.\n    fn store_finalized_approvals(\n        &mut self,\n        transaction_hash: &TransactionHash,\n        finalized_approvals: &BTreeSet<Approval>,\n    ) -> Result<bool, FatalStorageError> {\n        let mut txn = self.block_store.checkout_rw()?;\n        let original_transaction: Transaction = txn.read(*transaction_hash)?.ok_or({\n            FatalStorageError::UnexpectedFinalizedApprovals {\n                transaction_hash: *transaction_hash,\n            }\n        })?;\n\n        // Only store the finalized approvals if they are different from the original ones.\n        let maybe_existing_finalized_approvals: Option<BTreeSet<Approval>> =\n            txn.read(*transaction_hash)?;\n        if maybe_existing_finalized_approvals.as_ref() == Some(finalized_approvals) {\n            return Ok(false);\n        }\n\n        let original_approvals = original_transaction.approvals();\n        if &original_approvals != finalized_approvals {\n            let _ = txn.write(&TransactionFinalizedApprovals {\n                transaction_hash: *transaction_hash,\n                finalized_approvals: finalized_approvals.clone(),\n            })?;\n            txn.commit()?;\n            return Ok(true);\n        }\n\n        Ok(false)\n    }\n\n    /// Retrieves successful transfers associated with block.\n    ///\n    /// If there is no record of successful transfers for this block, then the list will be built\n    /// from the execution results and stored to `transfer_db`.  The record could have been missing\n    /// or incorrectly set to an empty collection due to previous synchronization and storage\n    /// issues.  See https://github.com/casper-network/casper-node/issues/4255 and\n    /// https://github.com/casper-network/casper-node/issues/4268 for further info.\n    fn get_transfers(\n        &mut self,\n        block_hash: &BlockHash,\n    ) -> Result<Option<Vec<Transfer>>, FatalStorageError> {\n        let mut rw_txn = self.block_store.checkout_rw()?;\n        let maybe_transfers: Option<Vec<Transfer>> = rw_txn.read(*block_hash)?;\n        if let Some(transfers) = maybe_transfers {\n            if !transfers.is_empty() {\n                return Ok(Some(transfers));\n            }\n        }\n\n        let block: Block = match rw_txn.read(*block_hash)? {\n            Some(block) => block,\n            None => return Ok(None),\n        };\n\n        let deploy_hashes: Vec<DeployHash> = match block.clone_body() {\n            BlockBody::V1(v1) => v1.deploy_and_transfer_hashes().copied().collect(),\n            BlockBody::V2(v2) => v2\n                .all_transactions()\n                .filter_map(|transaction_hash| match transaction_hash {\n                    TransactionHash::Deploy(deploy_hash) => Some(*deploy_hash),\n                    TransactionHash::V1(_) => None,\n                })\n                .collect(),\n        };\n\n        let mut transfers: Vec<Transfer> = vec![];\n        for deploy_hash in deploy_hashes {\n            let transaction_hash = TransactionHash::Deploy(deploy_hash);\n            let successful_xfers = match rw_txn.read(transaction_hash)? {\n                Some(exec_result) => successful_transfers(&exec_result),\n                None => {\n                    error!(%deploy_hash, %block_hash, \"should have exec result\");\n                    vec![]\n                }\n            };\n            transfers.extend(successful_xfers);\n        }\n        rw_txn.write(&BlockTransfers {\n            block_hash: *block_hash,\n            transfers: transfers.clone(),\n        })?;\n        rw_txn.commit()?;\n        Ok(Some(transfers))\n    }\n\n    /// Retrieves a deploy from the deploy store by deploy hash.\n    fn get_legacy_deploy(\n        &self,\n        deploy_hash: DeployHash,\n    ) -> Result<Option<LegacyDeploy>, FatalStorageError> {\n        let transaction_hash = TransactionHash::from(deploy_hash);\n        let txn = self.block_store.checkout_ro()?;\n        let transaction =\n            match Self::get_transaction_with_finalized_approvals(&txn, &transaction_hash)? {\n                Some((transaction, maybe_approvals)) => {\n                    if let Some(approvals) = maybe_approvals {\n                        transaction.with_approvals(approvals)\n                    } else {\n                        transaction\n                    }\n                }\n                None => return Ok(None),\n            };\n\n        match transaction {\n            Transaction::Deploy(deploy) => Ok(Some(LegacyDeploy::from(deploy))),\n            transaction @ Transaction::V1(_) => {\n                let mismatch = VariantMismatch(Box::new((transaction_hash, transaction)));\n                error!(%mismatch, \"failed getting legacy deploy\");\n                Err(FatalStorageError::from(mismatch))\n            }\n        }\n    }\n\n    /// Retrieves a transaction by transaction ID.\n    fn get_transaction_by_id(\n        &self,\n        transaction_id: TransactionId,\n    ) -> Result<Option<Transaction>, FatalStorageError> {\n        let transaction_hash = transaction_id.transaction_hash();\n        let txn = self.block_store.checkout_ro()?;\n\n        let maybe_transaction: Option<Transaction> = txn.read(transaction_hash)?;\n        let transaction: Transaction = match maybe_transaction {\n            None => return Ok(None),\n            Some(transaction) if transaction.fetch_id() == transaction_id => {\n                return Ok(Some(transaction));\n            }\n            Some(transaction) => transaction,\n        };\n\n        let finalized_approvals = match txn.read(transaction_hash)? {\n            None => return Ok(None),\n            Some(approvals) => approvals,\n        };\n\n        match (\n            transaction_id.approvals_hash(),\n            finalized_approvals,\n            transaction,\n        ) {\n            (approvals_hash, finalized_approvals, Transaction::Deploy(deploy)) => {\n                match ApprovalsHash::compute(&finalized_approvals) {\n                    Ok(computed_approvals_hash) if computed_approvals_hash == approvals_hash => {\n                        let deploy = deploy.with_approvals(finalized_approvals);\n                        Ok(Some(Transaction::from(deploy)))\n                    }\n                    Ok(_computed_approvals_hash) => Ok(None),\n                    Err(error) => {\n                        error!(%error, \"failed to calculate finalized deploy approvals hash\");\n                        Err(FatalStorageError::UnexpectedSerializationFailure(error))\n                    }\n                }\n            }\n            (approvals_hash, finalized_approvals, Transaction::V1(transaction_v1)) => {\n                match ApprovalsHash::compute(&finalized_approvals) {\n                    Ok(computed_approvals_hash) if computed_approvals_hash == approvals_hash => {\n                        let transaction_v1 = transaction_v1.with_approvals(finalized_approvals);\n                        Ok(Some(Transaction::from(transaction_v1)))\n                    }\n                    Ok(_computed_approvals_hash) => Ok(None),\n                    Err(error) => {\n                        error!(%error, \"failed to calculate finalized transaction approvals hash\");\n                        Err(FatalStorageError::UnexpectedSerializationFailure(error))\n                    }\n                }\n            }\n        }\n    }\n\n    /// Retrieves a single transaction along with its finalized approvals.\n    #[allow(clippy::type_complexity)]\n    fn get_transaction_with_finalized_approvals(\n        txn: &(impl DataReader<TransactionHash, Transaction>\n              + DataReader<TransactionHash, BTreeSet<Approval>>),\n        transaction_hash: &TransactionHash,\n    ) -> Result<Option<(Transaction, Option<BTreeSet<Approval>>)>, FatalStorageError> {\n        let maybe_transaction: Option<Transaction> = txn.read(*transaction_hash)?;\n        let transaction = match maybe_transaction {\n            Some(transaction) => transaction,\n            None => return Ok(None),\n        };\n\n        let maybe_finalized_approvals: Option<BTreeSet<Approval>> = txn.read(*transaction_hash)?;\n        let ret = (transaction, maybe_finalized_approvals);\n\n        Ok(Some(ret))\n    }\n\n    pub(crate) fn get_sync_leap(\n        &self,\n        sync_leap_identifier: SyncLeapIdentifier,\n    ) -> Result<FetchResponse<SyncLeap, SyncLeapIdentifier>, FatalStorageError> {\n        let block_hash = sync_leap_identifier.block_hash();\n\n        let txn = self.block_store.checkout_ro()?;\n\n        let only_from_available_block_range = true;\n        let trusted_block_header = match self.get_single_block_header_restricted(\n            &txn,\n            &block_hash,\n            only_from_available_block_range,\n        )? {\n            Some(trusted_block_header) => trusted_block_header,\n            None => return Ok(FetchResponse::NotFound(sync_leap_identifier)),\n        };\n\n        let trusted_ancestor_headers =\n            match self.get_trusted_ancestor_headers(&txn, &trusted_block_header)? {\n                Some(trusted_ancestor_headers) => trusted_ancestor_headers,\n                None => return Ok(FetchResponse::NotFound(sync_leap_identifier)),\n            };\n\n        // highest block and signatures are not requested\n        if sync_leap_identifier.trusted_ancestor_only() {\n            return Ok(FetchResponse::Fetched(SyncLeap {\n                trusted_ancestor_only: true,\n                trusted_block_header,\n                trusted_ancestor_headers,\n                block_headers_with_signatures: vec![],\n            }));\n        }\n\n        let highest_complete_block_header =\n            match self.get_highest_complete_block_header_with_signatures(&txn)? {\n                Some(highest_complete_block_header) => highest_complete_block_header,\n                None => return Ok(FetchResponse::NotFound(sync_leap_identifier)),\n            };\n\n        if highest_complete_block_header\n            .block_header()\n            .era_id()\n            .saturating_sub(trusted_block_header.era_id().into())\n            > self.recent_era_count.into()\n        {\n            return Ok(FetchResponse::NotProvided(sync_leap_identifier));\n        }\n\n        if highest_complete_block_header.block_header().height() == 0 {\n            return Ok(FetchResponse::Fetched(SyncLeap {\n                trusted_ancestor_only: false,\n                trusted_block_header,\n                trusted_ancestor_headers: vec![],\n                block_headers_with_signatures: vec![],\n            }));\n        }\n\n        // The `highest_complete_block_header` and `trusted_block_header` are both within the\n        // highest complete block range, thus so are all the switch blocks between them.\n        if let Some(block_headers_with_signatures) = self.get_block_headers_with_signatures(\n            &txn,\n            &trusted_block_header,\n            &highest_complete_block_header,\n        )? {\n            return Ok(FetchResponse::Fetched(SyncLeap {\n                trusted_ancestor_only: false,\n                trusted_block_header,\n                trusted_ancestor_headers,\n                block_headers_with_signatures,\n            }));\n        }\n\n        Ok(FetchResponse::NotFound(sync_leap_identifier))\n    }\n\n    /// Creates a serialized representation of a `FetchResponse` and the resulting message.\n    ///\n    /// If the given item is `Some`, returns a serialization of `FetchResponse::Fetched`. If\n    /// enabled, the given serialization is also added to the in-memory pool.\n    ///\n    /// If the given item is `None`, returns a non-pooled serialization of\n    /// `FetchResponse::NotFound`.\n    fn update_pool_and_send<REv, T>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        sender: NodeId,\n        serialized_id: &[u8],\n        fetch_response: FetchResponse<T, T::Id>,\n    ) -> Result<Effects<Event>, FatalStorageError>\n    where\n        REv: From<NetworkRequest<Message>> + Send,\n        T: FetchItem,\n    {\n        let serialized = fetch_response\n            .to_serialized()\n            .map_err(FatalStorageError::StoredItemSerializationFailure)?;\n        let shared: Arc<[u8]> = serialized.into();\n\n        if self.enable_mem_deduplication && fetch_response.was_found() {\n            self.serialized_item_pool\n                .put(serialized_id.into(), Arc::downgrade(&shared));\n        }\n\n        let message = Message::new_get_response_from_serialized(<T as FetchItem>::TAG, shared);\n        Ok(effect_builder.send_message(sender, message).ignore())\n    }\n\n    /// Returns `true` if the storage should attempt to return a block. Depending on the\n    /// `only_from_available_block_range` flag it should be unconditional or restricted by the\n    /// available block range.\n    fn should_return_block(\n        &self,\n        block_height: u64,\n        only_from_available_block_range: bool,\n    ) -> bool {\n        if only_from_available_block_range {\n            self.get_available_block_range().contains(block_height)\n        } else {\n            true\n        }\n    }\n\n    pub(crate) fn get_available_block_range(&self) -> AvailableBlockRange {\n        match self.completed_blocks.highest_sequence() {\n            Some(&seq) => seq.into(),\n            None => AvailableBlockRange::RANGE_0_0,\n        }\n    }\n\n    pub(crate) fn get_highest_orphaned_block_header(&self) -> HighestOrphanedBlockResult {\n        match self.completed_blocks.highest_sequence() {\n            None => HighestOrphanedBlockResult::MissingHighestSequence,\n            Some(seq) => {\n                let low = seq.low();\n                let txn = self\n                    .block_store\n                    .checkout_ro()\n                    .expect(\"Could not start transaction for lmdb\");\n\n                match txn.read(low) {\n                    Ok(Some(block)) => match block {\n                        Block::V1(_) | Block::V2(_) => {\n                            HighestOrphanedBlockResult::Orphan(block.clone_header())\n                        }\n                    },\n                    Ok(None) | Err(_) => HighestOrphanedBlockResult::MissingHeader(low),\n                }\n            }\n        }\n    }\n\n    /// Returns `count` highest switch block headers, sorted from lowest (oldest) to highest.\n    pub(crate) fn read_highest_switch_block_headers(\n        &self,\n        count: u64,\n    ) -> Result<Vec<BlockHeader>, FatalStorageError> {\n        let txn = self.block_store.checkout_ro()?;\n        if let Some(last_era_header) =\n            DataReader::<LatestSwitchBlock, BlockHeader>::read(&txn, LatestSwitchBlock)?\n        {\n            let mut result = vec![];\n            let last_era_id = last_era_header.era_id();\n            result.push(last_era_header);\n            for era_id in (0..last_era_id.value())\n                .rev()\n                .take(count as usize)\n                .map(EraId::new)\n            {\n                match txn.read(era_id)? {\n                    None => break,\n                    Some(header) => result.push(header),\n                }\n            }\n            result.reverse();\n            debug!(\n                ?result,\n                \"Storage: read_highest_switch_block_headers count:({})\", count\n            );\n            Ok(result)\n        } else {\n            Ok(vec![])\n        }\n    }\n\n    fn read_block_execution_results_or_chunk(\n        &self,\n        request: &BlockExecutionResultsOrChunkId,\n    ) -> Result<Option<BlockExecutionResultsOrChunk>, FatalStorageError> {\n        let txn = self.block_store.checkout_ro()?;\n\n        let execution_results = match Self::get_execution_results(&txn, request.block_hash())? {\n            Some(execution_results) => execution_results\n                .into_iter()\n                .map(|(_deploy_hash, execution_result)| execution_result)\n                .collect(),\n            None => return Ok(None),\n        };\n        Ok(BlockExecutionResultsOrChunk::new(\n            *request.block_hash(),\n            request.chunk_index(),\n            execution_results,\n        ))\n    }\n\n    fn get_default_block_signatures(&self, block: &Block) -> BlockSignatures {\n        match block {\n            Block::V1(block) => BlockSignaturesV1::new(*block.hash(), block.era_id()).into(),\n            Block::V2(block) => BlockSignaturesV2::new(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n                self.chain_name_hash,\n            )\n            .into(),\n        }\n    }\n\n    fn update_chain_height_metrics(&self) {\n        if let Some(metrics) = self.metrics.as_ref() {\n            if let Some(sequence) = self.completed_blocks.highest_sequence() {\n                let highest_available_block: i64 = sequence.high().try_into().unwrap_or(i64::MIN);\n                let lowest_available_block: i64 = sequence.low().try_into().unwrap_or(i64::MIN);\n                metrics.chain_height.set(highest_available_block);\n                metrics.highest_available_block.set(highest_available_block);\n                metrics.lowest_available_block.set(lowest_available_block);\n            }\n        }\n    }\n\n    pub(crate) fn read_block_header_by_hash(\n        &self,\n        block_hash: &BlockHash,\n    ) -> Result<Option<BlockHeader>, FatalStorageError> {\n        let ro_txn = self.block_store.checkout_ro()?;\n\n        ro_txn.read(*block_hash).map_err(FatalStorageError::from)\n    }\n\n    fn get_execution_results(\n        txn: &(impl DataReader<BlockHash, Block> + DataReader<TransactionHash, ExecutionResult>),\n        block_hash: &BlockHash,\n    ) -> Result<Option<Vec<(TransactionHash, ExecutionResult)>>, FatalStorageError> {\n        let block = txn.read(*block_hash)?;\n\n        let block_body = match block {\n            Some(block) => block.take_body(),\n            None => return Ok(None),\n        };\n\n        let transaction_hashes: Vec<TransactionHash> = match block_body {\n            BlockBody::V1(v1) => v1\n                .deploy_and_transfer_hashes()\n                .map(TransactionHash::from)\n                .collect(),\n            BlockBody::V2(v2) => v2.all_transactions().copied().collect(),\n        };\n        let mut execution_results = vec![];\n        for transaction_hash in transaction_hashes {\n            match txn.read(transaction_hash)? {\n                None => {\n                    debug!(\n                        %block_hash,\n                        %transaction_hash,\n                        \"retrieved block but execution result for given transaction is absent\"\n                    );\n                    return Ok(None);\n                }\n                Some(execution_result) => {\n                    execution_results.push((transaction_hash, execution_result));\n                }\n            }\n        }\n        Ok(Some(execution_results))\n    }\n\n    #[allow(clippy::type_complexity)]\n    fn get_execution_results_with_transaction_headers(\n        txn: &(impl DataReader<BlockHash, Block>\n              + DataReader<TransactionHash, ExecutionResult>\n              + DataReader<TransactionHash, Transaction>),\n        block_hash: &BlockHash,\n    ) -> Result<Option<Vec<(TransactionHash, TransactionHeader, ExecutionResult)>>, FatalStorageError>\n    {\n        let execution_results = match Self::get_execution_results(txn, block_hash)? {\n            Some(execution_results) => execution_results,\n            None => return Ok(None),\n        };\n\n        let mut ret = Vec::with_capacity(execution_results.len());\n        for (transaction_hash, execution_result) in execution_results {\n            match txn.read(transaction_hash)? {\n                None => {\n                    error!(\n                        %block_hash,\n                        %transaction_hash,\n                        \"missing transaction\"\n                    );\n                    return Ok(None);\n                }\n                Some(Transaction::Deploy(deploy)) => ret.push((\n                    transaction_hash,\n                    deploy.take_header().into(),\n                    execution_result,\n                )),\n                Some(Transaction::V1(transaction_v1)) => {\n                    ret.push((transaction_hash, (&transaction_v1).into(), execution_result))\n                }\n            };\n        }\n        Ok(Some(ret))\n    }\n\n    fn get_block_utilization_score(\n        &mut self,\n        era_id: EraId,\n        block_height: u64,\n        block_utilization: u64,\n    ) -> Option<(u64, u64)> {\n        let ret = match self.utilization_tracker.get_mut(&era_id) {\n            Some(utilization) => {\n                utilization.entry(block_height).or_insert(block_utilization);\n\n                let transaction_count = utilization.values().sum();\n                let block_count = utilization.keys().len() as u64;\n\n                Some((transaction_count, block_count))\n            }\n            None => {\n                let mut utilization = BTreeMap::new();\n                utilization.insert(block_height, block_utilization);\n\n                self.utilization_tracker.insert(era_id, utilization);\n\n                let block_count = 1u64;\n                Some((block_utilization, block_count))\n            }\n        };\n\n        self.utilization_tracker\n            .retain(|key_era_id, _| key_era_id.value() + 2 >= era_id.value());\n\n        ret\n    }\n}\n\n/// Decodes an item's ID, typically from an incoming request.\nfn decode_item_id<T>(raw: &[u8]) -> Result<T::Id, GetRequestError>\nwhere\n    T: FetchItem,\n{\n    bincode::deserialize(raw).map_err(GetRequestError::MalformedIncomingItemId)\n}\n\nfn should_move_storage_files_to_network_subdir(\n    root: &Path,\n    file_names: &[&str],\n) -> Result<bool, FatalStorageError> {\n    let mut files_found = vec![];\n    let mut files_not_found = vec![];\n\n    for file_name in file_names {\n        let file_path = root.join(file_name);\n\n        if file_path.exists() {\n            files_found.push(file_path);\n        } else {\n            files_not_found.push(file_path);\n        }\n    }\n\n    let should_move_files = files_found.len() == file_names.len();\n\n    if !should_move_files && !files_found.is_empty() {\n        error!(\n            \"found storage files: {:?}, missing storage files: {:?}\",\n            files_found, files_not_found\n        );\n\n        return Err(FatalStorageError::MissingStorageFiles {\n            missing_files: files_not_found,\n        });\n    }\n\n    Ok(should_move_files)\n}\n\nfn move_storage_files_to_network_subdir(\n    root: &Path,\n    subdir: &Path,\n    file_names: &[&str],\n) -> Result<(), FatalStorageError> {\n    file_names\n        .iter()\n        .map(|file_name| {\n            let source_path = root.join(file_name);\n            let dest_path = subdir.join(file_name);\n            fs::rename(&source_path, &dest_path).map_err(|original_error| {\n                FatalStorageError::UnableToMoveFile {\n                    source_path,\n                    dest_path,\n                    original_error,\n                }\n            })\n        })\n        .collect::<Result<Vec<_>, FatalStorageError>>()?;\n\n    info!(\n        \"moved files: {:?} from: {:?} to: {:?}\",\n        file_names, root, subdir\n    );\n    Ok(())\n}\n\n/// Returns all `Transform::WriteTransfer`s from the execution effects if this is an\n/// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`.\nfn successful_transfers(execution_result: &ExecutionResult) -> Vec<Transfer> {\n    let mut all_transfers: Vec<Transfer> = vec![];\n    match execution_result {\n        ExecutionResult::V1(ExecutionResultV1::Success { effect, .. }) => {\n            for transform_v1 in &effect.transforms {\n                if let execution_result_v1::TransformKindV1::WriteTransfer(transfer_v1) =\n                    &transform_v1.transform\n                {\n                    all_transfers.push(Transfer::V1(transfer_v1.clone()));\n                }\n            }\n        }\n        ExecutionResult::V2(execution_result_v2) => {\n            if execution_result_v2.error_message.is_none() {\n                for transfer in &execution_result_v2.transfers {\n                    all_transfers.push(transfer.clone());\n                }\n            }\n            // else no-op: we only record transfers from successful executions.\n        }\n        ExecutionResult::V1(ExecutionResultV1::Failure { .. }) => {\n            // No-op: we only record transfers from successful executions.\n        }\n    }\n    all_transfers\n}\n\n// Testing code. The functions below allow direct inspection of the storage component and should\n// only ever be used when writing tests.\n#[cfg(test)]\nimpl Storage {\n    /// Directly returns a transaction with finalized approvals from internal store.\n    ///\n    /// # Panics\n    ///\n    /// Panics if an IO error occurs.\n    pub(crate) fn get_transaction_with_finalized_approvals_by_hash(\n        &self,\n        transaction_hash: &TransactionHash,\n    ) -> Option<(Transaction, Option<BTreeSet<Approval>>)> {\n        let txn = self\n            .block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\");\n        Self::get_transaction_with_finalized_approvals(&txn, transaction_hash)\n            .expect(\"could not retrieve a transaction with finalized approvals from storage\")\n    }\n\n    /// Directly returns an execution result from internal store.\n    ///\n    /// # Panics\n    ///\n    /// Panics if an IO error occurs.\n    pub(crate) fn read_execution_result(\n        &self,\n        transaction_hash: &TransactionHash,\n    ) -> Option<ExecutionResult> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(*transaction_hash)\n            .expect(\"could not retrieve execution result from storage\")\n    }\n\n    /// Directly returns a transaction from internal store.\n    ///\n    /// # Panics\n    ///\n    /// Panics if an IO error occurs.\n    pub(crate) fn get_transaction_by_hash(\n        &self,\n        transaction_hash: TransactionHash,\n    ) -> Option<Transaction> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(transaction_hash)\n            .expect(\"could not retrieve value from storage\")\n    }\n\n    pub(crate) fn read_block_by_hash(&self, block_hash: BlockHash) -> Option<Block> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(block_hash)\n            .expect(\"could not retrieve value from storage\")\n    }\n\n    pub(crate) fn read_block_by_height(&self, height: u64) -> Option<Block> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(height)\n            .expect(\"could not retrieve value from storage\")\n    }\n\n    pub(crate) fn read_highest_block(&self) -> Option<Block> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(Tip)\n            .expect(\"could not retrieve value from storage\")\n    }\n\n    pub(crate) fn read_highest_block_header(&self) -> Option<BlockHeader> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(Tip)\n            .expect(\"could not retrieve value from storage\")\n    }\n\n    pub(crate) fn get_finality_signatures_for_block(\n        &self,\n        block_hash: BlockHash,\n    ) -> Option<BlockSignatures> {\n        let txn = self\n            .block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\");\n        let res: Option<BlockSignatures> = txn\n            .read(block_hash)\n            .expect(\"could not retrieve value from storage\");\n        txn.commit().expect(\"Could not commit transaction\");\n        res\n    }\n\n    pub(crate) fn read_switch_block_by_era_id(&self, era_id: EraId) -> Option<Block> {\n        self.block_store\n            .checkout_ro()\n            .expect(\"could not create RO transaction\")\n            .read(era_id)\n            .expect(\"could not retrieve value from storage\")\n    }\n\n    pub(crate) fn read_block_with_signatures_by_hash(\n        &self,\n        block_hash: BlockHash,\n        only_from_available_block_range: bool,\n    ) -> Option<BlockWithSignatures> {\n        let ro_txn = self\n            .block_store\n            .checkout_ro()\n            .expect(\"should create ro txn\");\n        let block: Block = ro_txn.read(block_hash).expect(\"should read block\")?;\n\n        if !(self.should_return_block(block.height(), only_from_available_block_range)) {\n            return None;\n        }\n        if block_hash != *block.hash() {\n            error!(\n                queried_block_hash = ?block_hash,\n                actual_block_hash = ?block.hash(),\n                \"block not stored under hash\"\n            );\n            debug_assert_eq!(&block_hash, block.hash());\n            return None;\n        }\n        let block_signatures = ro_txn\n            .read(block_hash)\n            .expect(\"should read block signatures\")\n            .unwrap_or_else(|| self.get_default_block_signatures(&block));\n        if block_signatures.is_verified().is_err() {\n            error!(?block, \"invalid block signatures for block\");\n            debug_assert!(block_signatures.is_verified().is_ok());\n            return None;\n        }\n        Some(BlockWithSignatures::new(block, block_signatures))\n    }\n\n    pub(crate) fn read_block_with_signatures_by_height(\n        &self,\n        height: u64,\n        only_from_available_block_range: bool,\n    ) -> Option<BlockWithSignatures> {\n        if !(self.should_return_block(height, only_from_available_block_range)) {\n            return None;\n        }\n        let ro_txn = self\n            .block_store\n            .checkout_ro()\n            .expect(\"should create ro txn\");\n        let block: Block = ro_txn.read(height).expect(\"should read block\")?;\n        let hash = block.hash();\n        let block_signatures = ro_txn\n            .read(*hash)\n            .expect(\"should read block signatures\")\n            .unwrap_or_else(|| self.get_default_block_signatures(&block));\n        Some(BlockWithSignatures::new(block, block_signatures))\n    }\n\n    pub(crate) fn read_highest_block_with_signatures(\n        &self,\n        only_from_available_block_range: bool,\n    ) -> Option<BlockWithSignatures> {\n        let ro_txn = self\n            .block_store\n            .checkout_ro()\n            .expect(\"should create ro txn\");\n        let highest_block = if only_from_available_block_range {\n            let height = self.highest_complete_block_height()?;\n            ro_txn.read(height).expect(\"should read block\")?\n        } else {\n            DataReader::<Tip, Block>::read(&ro_txn, Tip).expect(\"should read block\")?\n        };\n        let hash = highest_block.hash();\n        let block_signatures = match ro_txn.read(*hash).expect(\"should read block signatures\") {\n            Some(signatures) => signatures,\n            None => self.get_default_block_signatures(&highest_block),\n        };\n        Some(BlockWithSignatures::new(highest_block, block_signatures))\n    }\n\n    pub(crate) fn read_execution_info(\n        &self,\n        transaction_hash: TransactionHash,\n    ) -> Option<ExecutionInfo> {\n        let txn = self\n            .block_store\n            .checkout_ro()\n            .expect(\"should create ro txn\");\n        let block_hash_and_height: BlockHashHeightAndEra = txn\n            .read(transaction_hash)\n            .expect(\"should read block hash and height\")?;\n        let execution_result = txn\n            .read(transaction_hash)\n            .expect(\"should read execution result\");\n        Some(ExecutionInfo {\n            block_hash: block_hash_and_height.block_hash,\n            block_height: block_hash_and_height.block_height,\n            execution_result,\n        })\n    }\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper/error.rs",
    "content": "use datasize::DataSize;\nuse std::{\n    fmt,\n    fmt::{Display, Formatter},\n};\n\nuse crate::types::{NodeId, SyncLeapIdentifier};\n\n#[derive(Debug, Clone, DataSize)]\npub(crate) enum LeapActivityError {\n    TooOld(SyncLeapIdentifier, Vec<NodeId>),\n    Unobtainable(SyncLeapIdentifier, Vec<NodeId>),\n    NoPeers(SyncLeapIdentifier),\n}\n\nimpl Display for LeapActivityError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            LeapActivityError::TooOld(sync_leap_identifier, ..) => {\n                write!(formatter, \"too old: {}\", sync_leap_identifier)\n            }\n            LeapActivityError::Unobtainable(sync_leap_identifier, ..) => {\n                write!(\n                    formatter,\n                    \"unable to acquire data for: {}\",\n                    sync_leap_identifier\n                )\n            }\n            LeapActivityError::NoPeers(sync_leap_identifier) => {\n                write!(\n                    formatter,\n                    \"sync leaper has no peers for: {}\",\n                    sync_leap_identifier\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper/event.rs",
    "content": "use std::fmt::{Display, Formatter};\n\nuse serde::Serialize;\n\nuse crate::{\n    components::fetcher::FetchResult,\n    types::{NodeId, SyncLeap, SyncLeapIdentifier},\n};\n\n#[derive(Debug, Serialize)]\npub(crate) enum Event {\n    AttemptLeap {\n        sync_leap_identifier: SyncLeapIdentifier,\n        peers_to_ask: Vec<NodeId>,\n    },\n    FetchedSyncLeapFromPeer {\n        sync_leap_identifier: SyncLeapIdentifier,\n        fetch_result: FetchResult<SyncLeap>,\n    },\n}\n\nimpl Display for Event {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Event::AttemptLeap {\n                sync_leap_identifier,\n                peers_to_ask,\n            } => write!(\n                f,\n                \"sync pulling sync leap: {:?} {:?}\",\n                sync_leap_identifier, peers_to_ask\n            ),\n            Event::FetchedSyncLeapFromPeer {\n                sync_leap_identifier,\n                fetch_result,\n            } => write!(\n                f,\n                \"fetched sync leap from peer: {} {:?}\",\n                sync_leap_identifier, fetch_result\n            ),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper/leap_activity.rs",
    "content": "use std::{cmp::Ordering, collections::HashMap, time::Instant};\n\nuse datasize::DataSize;\n\nuse crate::types::{NodeId, SyncLeap, SyncLeapIdentifier};\n\nuse super::{leap_state::LeapState, LeapActivityError, PeerState};\n\n#[derive(Debug, DataSize)]\npub(crate) struct LeapActivity {\n    sync_leap_identifier: SyncLeapIdentifier,\n    peers: HashMap<NodeId, PeerState>,\n    leap_start: Instant,\n}\n\nimpl LeapActivity {\n    pub(crate) fn new(\n        sync_leap_identifier: SyncLeapIdentifier,\n        peers: HashMap<NodeId, PeerState>,\n        leap_start: Instant,\n    ) -> Self {\n        Self {\n            sync_leap_identifier,\n            peers,\n            leap_start,\n        }\n    }\n\n    pub(super) fn status(&self) -> LeapState {\n        let sync_leap_identifier = self.sync_leap_identifier;\n        let in_flight = self\n            .peers\n            .values()\n            .filter(|state| matches!(state, PeerState::RequestSent))\n            .count();\n        let responsed = self.peers.len() - in_flight;\n\n        if in_flight == 0 && responsed == 0 {\n            return LeapState::Failed {\n                sync_leap_identifier,\n                in_flight,\n                error: LeapActivityError::NoPeers(sync_leap_identifier),\n                from_peers: vec![],\n            };\n        }\n        if in_flight > 0 && responsed == 0 {\n            return LeapState::Awaiting {\n                sync_leap_identifier,\n                in_flight,\n            };\n        }\n        match self.best_response() {\n            Ok((best_available, from_peers)) => LeapState::Received {\n                in_flight,\n                best_available: Box::new(best_available),\n                from_peers,\n            },\n            // `Unobtainable` means we couldn't download it from any peer so far - don't treat it\n            // as a failure if there are still requests in flight\n            Err(LeapActivityError::Unobtainable(_, _)) if in_flight > 0 => LeapState::Awaiting {\n                sync_leap_identifier,\n                in_flight,\n            },\n            Err(error) => LeapState::Failed {\n                sync_leap_identifier,\n                from_peers: vec![],\n                in_flight,\n                error,\n            },\n        }\n    }\n\n    fn best_response(&self) -> Result<(SyncLeap, Vec<NodeId>), LeapActivityError> {\n        let reject_count = self\n            .peers\n            .values()\n            .filter(|peer_state| matches!(peer_state, PeerState::Rejected))\n            .count();\n\n        let mut peers = vec![];\n        let mut maybe_ret = None;\n        for (peer, peer_state) in &self.peers {\n            match peer_state {\n                PeerState::Fetched(sync_leap) => match &maybe_ret {\n                    None => {\n                        maybe_ret = Some(sync_leap);\n                        peers.push(*peer);\n                    }\n                    Some(current_ret) => {\n                        match current_ret\n                            .highest_block_height()\n                            .cmp(&sync_leap.highest_block_height())\n                        {\n                            Ordering::Less => {\n                                maybe_ret = Some(sync_leap);\n                                peers = vec![*peer];\n                            }\n                            Ordering::Equal => {\n                                peers.push(*peer);\n                            }\n                            Ordering::Greater => {}\n                        }\n                    }\n                },\n                PeerState::RequestSent | PeerState::Rejected | PeerState::CouldntFetch => {}\n            }\n        }\n\n        match maybe_ret {\n            Some(sync_leap) => Ok((*sync_leap.clone(), peers)),\n            None => {\n                if reject_count > 0 {\n                    Err(LeapActivityError::TooOld(self.sync_leap_identifier, peers))\n                } else {\n                    Err(LeapActivityError::Unobtainable(\n                        self.sync_leap_identifier,\n                        peers,\n                    ))\n                }\n            }\n        }\n    }\n\n    pub(crate) fn leap_start(&self) -> Instant {\n        self.leap_start\n    }\n\n    pub(crate) fn sync_leap_identifier(&self) -> &SyncLeapIdentifier {\n        &self.sync_leap_identifier\n    }\n\n    pub(crate) fn peers(&self) -> &HashMap<NodeId, PeerState> {\n        &self.peers\n    }\n\n    pub(crate) fn peers_mut(&mut self) -> &mut HashMap<NodeId, PeerState> {\n        &mut self.peers\n    }\n\n    /// Registers new leap activity if it wasn't already registered for specified peer.\n    pub(crate) fn register_peer(&mut self, peer: NodeId) -> Option<NodeId> {\n        (!self.peers().contains_key(&peer)).then(|| {\n            self.peers.insert(peer, PeerState::RequestSent);\n            peer\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{\n        collections::{BTreeSet, HashMap},\n        time::Instant,\n    };\n\n    use rand::seq::SliceRandom;\n\n    use casper_types::{testing::TestRng, BlockHash, BlockHeader, BlockV2, TestBlockBuilder};\n\n    use crate::{\n        components::sync_leaper::{\n            leap_activity::LeapActivity, tests::make_test_sync_leap, LeapActivityError, LeapState,\n            PeerState,\n        },\n        types::{NodeId, SyncLeap, SyncLeapIdentifier},\n    };\n\n    fn make_random_block_with_height(rng: &mut TestRng, height: u64) -> BlockV2 {\n        TestBlockBuilder::new()\n            .era(0)\n            .height(height)\n            .switch_block(false)\n            .build(rng)\n    }\n\n    fn make_sync_leap_with_trusted_block_header(trusted_block_header: BlockHeader) -> SyncLeap {\n        SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header,\n            trusted_ancestor_headers: vec![],\n            block_headers_with_signatures: vec![],\n        }\n    }\n\n    fn assert_peers<I>(expected_peers: I, leap_activity: &LeapActivity)\n    where\n        I: IntoIterator<Item = NodeId>,\n    {\n        let expected_peers: BTreeSet<_> = expected_peers.into_iter().collect();\n        let actual_peers: BTreeSet<_> = leap_activity\n            .peers()\n            .iter()\n            .map(|(node_id, _)| *node_id)\n            .collect();\n        assert_eq!(expected_peers, actual_peers);\n    }\n\n    #[test]\n    fn best_response_with_single_peer() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n        let sync_leap = make_test_sync_leap(&mut rng);\n        let peer_1 = (\n            NodeId::random(&mut rng),\n            PeerState::Fetched(Box::new(sync_leap.clone())),\n        );\n\n        let mut leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: [peer_1.clone()].iter().cloned().collect(),\n            leap_start: Instant::now(),\n        };\n\n        let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap();\n\n        assert!(!actual_peers.is_empty());\n        assert_eq!(actual_peers.first().unwrap(), &peer_1.0);\n        assert_eq!(actual_sync_leap, sync_leap);\n\n        // Adding peers in other states does not change the result.\n        let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent);\n        let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch);\n        let peer_rejected = (NodeId::random(&mut rng), PeerState::Rejected);\n        leap_activity.peers.extend(\n            [peer_request_sent, peer_couldnt_fetch, peer_rejected]\n                .iter()\n                .cloned(),\n        );\n\n        let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap();\n\n        assert_eq!(actual_peers.len(), 1);\n        assert_eq!(actual_peers.first().unwrap(), &peer_1.0);\n        assert_eq!(actual_sync_leap, sync_leap);\n    }\n\n    #[test]\n    fn best_response_with_multiple_peers() {\n        let mut rng = TestRng::new();\n\n        // Create 10 sync leaps, each with a distinct height. The height is not greater than 10.\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n        let mut heights: Vec<u64> = (0..10).collect();\n        heights.shuffle(&mut rng);\n        let mut peers_with_sync_leaps: HashMap<_, _> = heights\n            .iter()\n            .map(|height| {\n                let block = make_random_block_with_height(&mut rng, *height);\n                let sync_leap =\n                    make_sync_leap_with_trusted_block_header(block.header().clone().into());\n                (\n                    NodeId::random(&mut rng),\n                    PeerState::Fetched(Box::new(sync_leap)),\n                )\n            })\n            .collect();\n\n        // Add another peer with the best response.\n        let block = make_random_block_with_height(&mut rng, 500);\n        let best_sync_leap =\n            make_sync_leap_with_trusted_block_header(block.header().clone().into());\n        let peer_1_best_node_id = NodeId::random(&mut rng);\n        peers_with_sync_leaps.insert(\n            peer_1_best_node_id,\n            PeerState::Fetched(Box::new(best_sync_leap.clone())),\n        );\n        let mut leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: peers_with_sync_leaps.clone(),\n            leap_start: Instant::now(),\n        };\n\n        let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap();\n\n        // Expect only a single peer with the best sync leap.\n        assert_eq!(actual_peers.len(), 1);\n        assert_eq!(actual_peers.first().unwrap(), &peer_1_best_node_id);\n        assert_eq!(actual_sync_leap, best_sync_leap);\n\n        // Add two more peers with even better response.\n        let block = make_random_block_with_height(&mut rng, 1000);\n        let best_sync_leap =\n            make_sync_leap_with_trusted_block_header(block.header().clone().into());\n        let peer_2_best_node_id = NodeId::random(&mut rng);\n        let peer_3_best_node_id = NodeId::random(&mut rng);\n        leap_activity.peers.extend(\n            [\n                (\n                    peer_2_best_node_id,\n                    PeerState::Fetched(Box::new(best_sync_leap.clone())),\n                ),\n                (\n                    peer_3_best_node_id,\n                    PeerState::Fetched(Box::new(best_sync_leap.clone())),\n                ),\n            ]\n            .iter()\n            .cloned(),\n        );\n\n        let (actual_sync_leap, mut actual_peers) = leap_activity.best_response().unwrap();\n\n        // Expect two recently added peers with best sync leap to be reported.\n        let mut expected_peers = vec![peer_2_best_node_id, peer_3_best_node_id];\n        actual_peers.sort_unstable();\n        expected_peers.sort_unstable();\n\n        assert_eq!(actual_peers.len(), 2);\n        assert_eq!(actual_peers, expected_peers);\n        assert_eq!(actual_sync_leap, best_sync_leap);\n\n        // Add two more peers with worse response.\n        let block = make_random_block_with_height(&mut rng, 1);\n        let worse_sync_leap =\n            make_sync_leap_with_trusted_block_header(block.header().clone().into());\n        let peer_3_worse_node_id = NodeId::random(&mut rng);\n        let peer_4_worse_node_id = NodeId::random(&mut rng);\n        leap_activity.peers.extend(\n            [\n                (\n                    peer_3_worse_node_id,\n                    PeerState::Fetched(Box::new(worse_sync_leap.clone())),\n                ),\n                (\n                    peer_4_worse_node_id,\n                    PeerState::Fetched(Box::new(worse_sync_leap)),\n                ),\n            ]\n            .iter()\n            .cloned(),\n        );\n\n        let (actual_sync_leap, mut actual_peers) = leap_activity.best_response().unwrap();\n\n        // Expect two previously added best peers with best sync leap to be reported.\n        let mut expected_peers = vec![peer_2_best_node_id, peer_3_best_node_id];\n        actual_peers.sort_unstable();\n        expected_peers.sort_unstable();\n\n        assert_eq!(actual_peers.len(), 2);\n        assert_eq!(actual_peers, expected_peers);\n        assert_eq!(actual_sync_leap, best_sync_leap);\n\n        // Adding peers in other states does not change the result.\n        let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent);\n        let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch);\n        let peer_rejected = (NodeId::random(&mut rng), PeerState::Rejected);\n        leap_activity.peers.extend(\n            [peer_request_sent, peer_couldnt_fetch, peer_rejected]\n                .iter()\n                .cloned(),\n        );\n\n        let (actual_sync_leap, mut actual_peers) = leap_activity.best_response().unwrap();\n        let mut expected_peers = vec![peer_2_best_node_id, peer_3_best_node_id];\n        actual_peers.sort_unstable();\n        expected_peers.sort_unstable();\n        assert_eq!(actual_peers.len(), 2);\n        assert_eq!(actual_peers, expected_peers);\n        assert_eq!(actual_sync_leap, best_sync_leap);\n    }\n\n    #[test]\n    fn best_response_failed() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n        let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch);\n        let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent);\n\n        let mut leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: [peer_couldnt_fetch, peer_request_sent]\n                .iter()\n                .cloned()\n                .collect(),\n            leap_start: Instant::now(),\n        };\n\n        let best_response_error = leap_activity.best_response().unwrap_err();\n        assert!(matches!(\n            best_response_error,\n            LeapActivityError::Unobtainable(_, _)\n        ));\n\n        leap_activity\n            .peers\n            .insert(NodeId::random(&mut rng), PeerState::Rejected);\n        let best_response_error = leap_activity.best_response().unwrap_err();\n        assert!(matches!(\n            best_response_error,\n            LeapActivityError::TooOld(_, _)\n        ));\n    }\n\n    #[test]\n    fn leap_activity_status_failed() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n        let leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: HashMap::new(),\n            leap_start: Instant::now(),\n        };\n        assert!(matches!(\n            leap_activity.status(),\n            LeapState::Failed { error, .. } if matches!(error, LeapActivityError::NoPeers(_))\n        ));\n\n        let peer_1 = (NodeId::random(&mut rng), PeerState::CouldntFetch);\n        let leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: [peer_1].iter().cloned().collect(),\n            leap_start: Instant::now(),\n        };\n        assert!(matches!(\n            leap_activity.status(),\n            LeapState::Failed { error, .. } if matches!(error, LeapActivityError::Unobtainable(_, _))\n        ));\n    }\n\n    #[test]\n    fn leap_activity_status_awaiting() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n        let peer_1 = (NodeId::random(&mut rng), PeerState::RequestSent);\n        let peer_2 = (NodeId::random(&mut rng), PeerState::RequestSent);\n        let mut leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: [peer_1, peer_2].iter().cloned().collect(),\n            leap_start: Instant::now(),\n        };\n        assert!(matches!(leap_activity.status(), LeapState::Awaiting { .. }));\n\n        leap_activity\n            .peers\n            .insert(NodeId::random(&mut rng), PeerState::CouldntFetch);\n        assert!(matches!(leap_activity.status(), LeapState::Awaiting { .. }));\n    }\n\n    #[test]\n    fn leap_activity_status_received() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n        let sync_leap = make_test_sync_leap(&mut rng);\n        let peer_1 = (\n            NodeId::random(&mut rng),\n            PeerState::Fetched(Box::new(sync_leap)),\n        );\n        let mut leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: [peer_1].iter().cloned().collect(),\n            leap_start: Instant::now(),\n        };\n        assert!(matches!(leap_activity.status(), LeapState::Received { .. }));\n\n        // Adding peers in other states does not change the result.\n        let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent);\n        let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch);\n        let peer_rejected = (NodeId::random(&mut rng), PeerState::Rejected);\n        leap_activity.peers.extend(\n            [peer_request_sent, peer_couldnt_fetch, peer_rejected]\n                .iter()\n                .cloned(),\n        );\n\n        assert!(matches!(leap_activity.status(), LeapState::Received { .. }));\n    }\n\n    #[test]\n    fn register_peer() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n        let peer_1 = (NodeId::random(&mut rng), PeerState::RequestSent);\n\n        let mut leap_activity = LeapActivity {\n            sync_leap_identifier,\n            peers: [peer_1.clone()].iter().cloned().collect(),\n            leap_start: Instant::now(),\n        };\n\n        // Expect the single peer specified on creation.\n        assert_peers([peer_1.0], &leap_activity);\n\n        // Registering the same peer the second time does not register.\n        let maybe_registered_peer = leap_activity.register_peer(peer_1.0);\n        assert!(maybe_registered_peer.is_none());\n\n        // Still expect only the single peer.\n        assert_peers([peer_1.0], &leap_activity);\n\n        // Registering additional peer should succeed.\n        let peer_2 = NodeId::random(&mut rng);\n        let maybe_registered_peer = leap_activity.register_peer(peer_2);\n        assert_eq!(maybe_registered_peer, Some(peer_2));\n\n        // But registering it for the second time should be a noop.\n        let maybe_registered_peer = leap_activity.register_peer(peer_2);\n        assert_eq!(maybe_registered_peer, None);\n\n        // Expect two added peers.\n        assert_peers([peer_1.0, peer_2], &leap_activity);\n    }\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper/leap_state.rs",
    "content": "use std::fmt::{Display, Formatter};\n\nuse datasize::DataSize;\n\nuse crate::types::{NodeId, SyncLeap, SyncLeapIdentifier};\n\nuse super::LeapActivityError;\n\n#[derive(Debug, DataSize)]\npub(crate) enum LeapState {\n    Idle,\n    Awaiting {\n        sync_leap_identifier: SyncLeapIdentifier,\n        in_flight: usize,\n    },\n    Received {\n        best_available: Box<SyncLeap>,\n        from_peers: Vec<NodeId>,\n        in_flight: usize,\n    },\n    Failed {\n        sync_leap_identifier: SyncLeapIdentifier,\n        error: LeapActivityError,\n        from_peers: Vec<NodeId>,\n        in_flight: usize,\n    },\n}\n\nimpl Display for LeapState {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            LeapState::Idle => {\n                write!(f, \"Idle\")\n            }\n            LeapState::Awaiting {\n                sync_leap_identifier,\n                in_flight,\n            } => {\n                write!(\n                    f,\n                    \"Awaiting {} responses for {}\",\n                    in_flight,\n                    sync_leap_identifier.block_hash(),\n                )\n            }\n            LeapState::Received {\n                best_available,\n                from_peers,\n                in_flight,\n            } => {\n                write!(\n                    f,\n                    \"Received {} from {} peers, awaiting {} responses\",\n                    best_available.highest_block_hash(),\n                    from_peers.len(),\n                    in_flight\n                )\n            }\n            LeapState::Failed {\n                sync_leap_identifier,\n                error,\n                ..\n            } => {\n                write!(\n                    f,\n                    \"Failed leap for {} {}\",\n                    sync_leap_identifier.block_hash(),\n                    error\n                )\n            }\n        }\n    }\n}\n\nimpl LeapState {\n    pub(super) fn in_flight(&self) -> usize {\n        match self {\n            LeapState::Idle => 0,\n            LeapState::Awaiting { in_flight, .. }\n            | LeapState::Received { in_flight, .. }\n            | LeapState::Failed { in_flight, .. } => *in_flight,\n        }\n    }\n\n    pub(super) fn active(&self) -> bool {\n        self.in_flight() > 0\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::{testing::TestRng, BlockHash};\n\n    use crate::{\n        components::sync_leaper::{tests::make_test_sync_leap, LeapActivityError, LeapState},\n        types::SyncLeapIdentifier,\n    };\n\n    #[test]\n    fn leap_state() {\n        let mut rng = TestRng::new();\n\n        let leap_state = LeapState::Idle;\n        assert!(!leap_state.active());\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n        let leap_state = LeapState::Awaiting {\n            sync_leap_identifier,\n            in_flight: 0,\n        };\n        assert!(!leap_state.active());\n        assert_eq!(leap_state.in_flight(), 0);\n\n        let leap_state = LeapState::Awaiting {\n            sync_leap_identifier,\n            in_flight: 1,\n        };\n        assert!(leap_state.active());\n        assert_eq!(leap_state.in_flight(), 1);\n\n        let leap_state = LeapState::Failed {\n            sync_leap_identifier,\n            in_flight: 0,\n            error: LeapActivityError::NoPeers(sync_leap_identifier),\n            from_peers: vec![],\n        };\n        assert!(!leap_state.active());\n        assert_eq!(leap_state.in_flight(), 0);\n\n        let leap_state = LeapState::Failed {\n            sync_leap_identifier,\n            in_flight: 1,\n            error: LeapActivityError::NoPeers(sync_leap_identifier),\n            from_peers: vec![],\n        };\n        assert!(leap_state.active());\n        assert_eq!(leap_state.in_flight(), 1);\n\n        let sync_leap = make_test_sync_leap(&mut rng);\n        let leap_state = LeapState::Received {\n            best_available: Box::new(sync_leap.clone()),\n            from_peers: vec![],\n            in_flight: 0,\n        };\n        assert!(!leap_state.active());\n        assert_eq!(leap_state.in_flight(), 0);\n\n        let leap_state = LeapState::Received {\n            best_available: Box::new(sync_leap),\n            from_peers: vec![],\n            in_flight: 1,\n        };\n        assert!(leap_state.active());\n        assert_eq!(leap_state.in_flight(), 1);\n    }\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper/metrics.rs",
    "content": "use prometheus::{Histogram, IntCounter, Registry};\n\nuse crate::{unregister_metric, utils};\n\nconst SYNC_LEAP_DURATION_NAME: &str = \"sync_leap_duration_seconds\";\nconst SYNC_LEAP_DURATION_HELP: &str = \"duration (in sec) to perform a successful sync leap\";\n\n// We use linear buckets to observe the time it takes to do a sync leap.\n// Buckets have 1s widths and cover up to 4s durations with this granularity.\nconst LINEAR_BUCKET_START: f64 = 1.0;\nconst LINEAR_BUCKET_WIDTH: f64 = 1.0;\nconst LINEAR_BUCKET_COUNT: usize = 4;\n\n/// Metrics for the sync leap component.\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// Time duration to perform a sync leap.\n    pub(super) sync_leap_duration: Histogram,\n    /// Number of successful sync leap responses that were received from peers.\n    pub(super) sync_leap_fetched_from_peer: IntCounter,\n    /// Number of requests that were rejected by peers.\n    pub(super) sync_leap_rejected_by_peer: IntCounter,\n    /// Number of requests that couldn't be fetched from peers.\n    pub(super) sync_leap_cant_fetch: IntCounter,\n\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of the block accumulator metrics, using the given prefix.\n    pub fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let buckets = prometheus::linear_buckets(\n            LINEAR_BUCKET_START,\n            LINEAR_BUCKET_WIDTH,\n            LINEAR_BUCKET_COUNT,\n        )?;\n\n        let sync_leap_fetched_from_peer = IntCounter::new(\n            \"sync_leap_fetched_from_peer_total\".to_string(),\n            \"number of successful sync leap responses that were received from peers\".to_string(),\n        )?;\n        let sync_leap_rejected_by_peer = IntCounter::new(\n            \"sync_leap_rejected_by_peer_total\".to_string(),\n            \"number of sync leap requests that were rejected by peers\".to_string(),\n        )?;\n        let sync_leap_cant_fetch = IntCounter::new(\n            \"sync_leap_cant_fetch_total\".to_string(),\n            \"number of sync leap requests that couldn't be fetched from peers\".to_string(),\n        )?;\n\n        registry.register(Box::new(sync_leap_fetched_from_peer.clone()))?;\n        registry.register(Box::new(sync_leap_rejected_by_peer.clone()))?;\n        registry.register(Box::new(sync_leap_cant_fetch.clone()))?;\n\n        Ok(Metrics {\n            sync_leap_duration: utils::register_histogram_metric(\n                registry,\n                SYNC_LEAP_DURATION_NAME,\n                SYNC_LEAP_DURATION_HELP,\n                buckets,\n            )?,\n            sync_leap_fetched_from_peer,\n            sync_leap_rejected_by_peer,\n            sync_leap_cant_fetch,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.sync_leap_duration);\n        unregister_metric!(self.registry, self.sync_leap_cant_fetch);\n        unregister_metric!(self.registry, self.sync_leap_fetched_from_peer);\n        unregister_metric!(self.registry, self.sync_leap_rejected_by_peer);\n    }\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper/tests.rs",
    "content": "use std::{collections::BTreeSet, sync::Arc};\n\nuse prometheus::Registry;\n\nuse casper_types::{testing::TestRng, BlockHash, Chainspec, TestBlockBuilder};\n\nuse crate::{\n    components::{\n        fetcher::{self, FetchResult, FetchedData},\n        sync_leaper::{LeapState, PeerState, RegisterLeapAttemptOutcome},\n    },\n    types::{NodeId, SyncLeap, SyncLeapIdentifier},\n};\n\nuse super::{Error, SyncLeaper};\n\npub(crate) fn make_test_sync_leap(rng: &mut TestRng) -> SyncLeap {\n    let block = TestBlockBuilder::new().build_versioned(rng);\n    SyncLeap {\n        trusted_ancestor_only: false,\n        trusted_block_header: block.clone_header(),\n        trusted_ancestor_headers: vec![],\n        block_headers_with_signatures: vec![],\n    }\n}\n\nfn make_sync_leaper(rng: &mut TestRng) -> SyncLeaper {\n    let chainspec = Chainspec::random(rng);\n    let registry = Registry::new();\n    SyncLeaper::new(Arc::new(chainspec), &registry).unwrap()\n}\n\nfn assert_peers(expected: &[NodeId], actual: &Vec<(NodeId, PeerState)>) {\n    // Assert that all new peers are in `RequestSent` state.\n    for (_, peer_state) in actual {\n        assert!(matches!(peer_state, &PeerState::RequestSent));\n    }\n\n    // Assert that we have the expected list of peers.\n    let expected: BTreeSet<_> = expected.iter().collect();\n    let actual: BTreeSet<_> = actual.iter().map(|(node_id, _)| node_id).collect();\n    assert_eq!(expected, actual);\n}\n\nfn assert_peer(sync_leaper: SyncLeaper, (expected_peer, expected_peer_state): (NodeId, PeerState)) {\n    let peers = sync_leaper.peers().unwrap();\n    let (node_id, actual_peer_state) = peers.first().unwrap();\n    assert_eq!(node_id, &expected_peer);\n    assert_eq!(actual_peer_state, &expected_peer_state);\n}\n\n#[test]\nfn new_sync_leaper_has_no_activity() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n\n    assert!(matches!(sync_leaper.leap_status(), LeapState::Idle));\n}\n\n#[test]\nfn register_leap_attempt_no_peers() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n    let peers_to_ask = vec![];\n\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n    assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing));\n    assert!(sync_leaper.peers().is_none());\n}\n\n#[test]\nfn register_leap_attempt_reattempt_for_different_leap_identifier() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer_1 = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer_1];\n\n    // Start with a single peer.\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone());\n    // Expect that we should fetch SyncLeap from that peer.\n    assert!(matches!(\n        outcome,\n        RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == peers_to_ask\n    ));\n    let expected_peers = vec![peer_1];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n\n    // Request another sync leap, but for new sync leap identifier.\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_historical(BlockHash::random(&mut rng));\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n    // Expect that we should do nothing as the identifiers mismatch.\n    assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing));\n    let expected_peers = vec![peer_1];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n}\n\n#[test]\nfn register_leap_attempt_with_reattempt_for_the_same_leap_identifier() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer_1 = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer_1];\n\n    // Start with a single peer.\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone());\n    // Expect that we should fetch SyncLeap from that peer.\n    assert!(matches!(\n        outcome,\n        RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == peers_to_ask\n    ));\n    let expected_peers = vec![peer_1];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n\n    // Try to register the same peer.\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n    // Expect that we should do nothing as the SyncLeap from this peer has already been requested.\n    assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing));\n    let expected_peers = vec![peer_1];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n\n    // Try to register one new peer.\n    let peer_2 = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer_2];\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone());\n    // Expect that we should fetch SyncLeap from the new peer only.\n    assert!(matches!(\n        outcome,\n        RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == peers_to_ask\n    ));\n    let expected_peers = vec![peer_1, peer_2];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n\n    // Try to register two already existing peers.\n    let mut peers_to_ask = vec![peer_1, peer_2];\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone());\n    // Expect that we should do nothing as the SyncLeap from both these peers has already been\n    // requested.\n    assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing));\n    let expected_peers = vec![peer_1, peer_2];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n\n    // Add two new peers for a total set of four, among which two are already registered.\n    let peer_3 = NodeId::random(&mut rng);\n    let peer_4 = NodeId::random(&mut rng);\n    peers_to_ask.push(peer_3);\n    peers_to_ask.push(peer_4);\n    let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone());\n    // Expect that we should fetch SyncLeap from the two new peers only.\n    assert!(matches!(\n        outcome,\n        RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == vec![peer_3, peer_4]\n    ));\n    let expected_peers = vec![peer_1, peer_2, peer_3, peer_4];\n    let actual_peers = sync_leaper.peers().unwrap();\n    assert_peers(&expected_peers, &actual_peers);\n}\n\n#[test]\nfn fetch_received_from_storage() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap = make_test_sync_leap(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer_1 = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer_1];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let fetch_result: FetchResult<SyncLeap> = Ok(FetchedData::from_storage(Box::new(sync_leap)));\n\n    let actual = sync_leaper\n        .fetch_received(sync_leap_identifier, fetch_result)\n        .unwrap_err();\n    assert!(matches!(actual, Error::FetchedSyncLeapFromStorage(_)));\n}\n\n#[test]\nfn fetch_received_identifier_mismatch() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap = make_test_sync_leap(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let fetch_result: FetchResult<SyncLeap> = Ok(FetchedData::from_peer(sync_leap, peer));\n\n    let different_sync_leap_identifier =\n        SyncLeapIdentifier::sync_to_historical(BlockHash::random(&mut rng));\n\n    let actual = sync_leaper\n        .fetch_received(different_sync_leap_identifier, fetch_result)\n        .unwrap_err();\n\n    assert!(matches!(actual, Error::SyncLeapIdentifierMismatch { .. }));\n}\n\n#[test]\nfn fetch_received_unexpected_response() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap = make_test_sync_leap(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let fetch_result: FetchResult<SyncLeap> = Ok(FetchedData::from_peer(sync_leap, peer));\n\n    let actual = sync_leaper\n        .fetch_received(sync_leap_identifier, fetch_result)\n        .unwrap_err();\n    assert!(matches!(actual, Error::UnexpectedSyncLeapResponse(_)));\n\n    let peers = sync_leaper.peers();\n    assert!(peers.is_none());\n}\n\n#[test]\nfn fetch_received_from_unknown_peer() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap = make_test_sync_leap(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let unknown_peer = NodeId::random(&mut rng);\n    let fetch_result: FetchResult<SyncLeap> = Ok(FetchedData::from_peer(sync_leap, unknown_peer));\n\n    let actual = sync_leaper\n        .fetch_received(sync_leap_identifier, fetch_result)\n        .unwrap_err();\n    assert!(matches!(actual, Error::ResponseFromUnknownPeer { .. }));\n\n    assert_peer(sync_leaper, (peer, PeerState::RequestSent));\n}\n\n#[test]\nfn fetch_received_correctly() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap = make_test_sync_leap(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let fetch_result: FetchResult<SyncLeap> = Ok(FetchedData::from_peer(sync_leap.clone(), peer));\n\n    let actual = sync_leaper.fetch_received(sync_leap_identifier, fetch_result);\n    assert!(actual.is_ok());\n\n    assert_peer(sync_leaper, (peer, PeerState::Fetched(Box::new(sync_leap))));\n}\n\n#[test]\nfn fetch_received_peer_rejected() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let fetch_result: FetchResult<SyncLeap> = Err(fetcher::Error::Rejected {\n        id: Box::new(sync_leap_identifier),\n        peer,\n    });\n\n    let actual = sync_leaper.fetch_received(sync_leap_identifier, fetch_result);\n    assert!(actual.is_ok());\n\n    assert_peer(sync_leaper, (peer, PeerState::Rejected));\n}\n\n#[test]\nfn fetch_received_from_unknown_peer_rejected() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let unknown_peer = NodeId::random(&mut rng);\n    let fetch_result: FetchResult<SyncLeap> = Err(fetcher::Error::Rejected {\n        id: Box::new(sync_leap_identifier),\n        peer: unknown_peer,\n    });\n\n    let actual = sync_leaper\n        .fetch_received(sync_leap_identifier, fetch_result)\n        .unwrap_err();\n    assert!(matches!(actual, Error::ResponseFromUnknownPeer { .. }));\n\n    assert_peer(sync_leaper, (peer, PeerState::RequestSent));\n}\n\n#[test]\nfn fetch_received_other_error() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let fetch_result: FetchResult<SyncLeap> = Err(fetcher::Error::TimedOut {\n        id: Box::new(sync_leap_identifier),\n        peer,\n    });\n\n    let actual = sync_leaper.fetch_received(sync_leap_identifier, fetch_result);\n    assert!(actual.is_ok());\n\n    assert_peer(sync_leaper, (peer, PeerState::CouldntFetch));\n}\n\n#[test]\nfn fetch_received_from_unknown_peer_other_error() {\n    let mut rng = TestRng::new();\n\n    let mut sync_leaper = make_sync_leaper(&mut rng);\n    let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n\n    let peer = NodeId::random(&mut rng);\n    let peers_to_ask = vec![peer];\n    sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask);\n\n    let unknown_peer = NodeId::random(&mut rng);\n    let fetch_result: FetchResult<SyncLeap> = Err(fetcher::Error::TimedOut {\n        id: Box::new(sync_leap_identifier),\n        peer: unknown_peer,\n    });\n\n    let actual = sync_leaper\n        .fetch_received(sync_leap_identifier, fetch_result)\n        .unwrap_err();\n    assert!(matches!(actual, Error::ResponseFromUnknownPeer { .. }));\n\n    assert_peer(sync_leaper, (peer, PeerState::RequestSent));\n}\n"
  },
  {
    "path": "node/src/components/sync_leaper.rs",
    "content": "//! The Sync Leaper\nmod error;\nmod event;\nmod leap_activity;\nmod leap_state;\nmod metrics;\n#[cfg(test)]\nmod tests;\n\nuse std::{sync::Arc, time::Instant};\n\nuse datasize::DataSize;\nuse prometheus::Registry;\nuse thiserror::Error;\nuse tracing::{debug, error, info, warn};\n\nuse casper_types::Chainspec;\n\nuse crate::{\n    components::{\n        fetcher::{self, FetchResult, FetchedData},\n        Component,\n    },\n    effect::{requests::FetcherRequest, EffectBuilder, EffectExt, Effects},\n    types::{\n        sync_leap_validation_metadata::SyncLeapValidationMetaData, NodeId, SyncLeap,\n        SyncLeapIdentifier,\n    },\n    NodeRng,\n};\npub(crate) use error::LeapActivityError;\npub(crate) use event::Event;\npub(crate) use leap_state::LeapState;\n\nuse metrics::Metrics;\n\nuse self::leap_activity::LeapActivity;\n\nconst COMPONENT_NAME: &str = \"sync_leaper\";\n\n#[derive(Clone, Debug, DataSize, Eq, PartialEq)]\npub(crate) enum PeerState {\n    RequestSent,\n    Rejected,\n    CouldntFetch,\n    Fetched(Box<SyncLeap>),\n}\n\n#[derive(Debug)]\nenum RegisterLeapAttemptOutcome {\n    DoNothing,\n    FetchSyncLeapFromPeers(Vec<NodeId>),\n}\n\n#[derive(Debug, Error)]\nenum Error {\n    #[error(\"fetched a sync leap from storage - {0}\")]\n    FetchedSyncLeapFromStorage(SyncLeapIdentifier),\n    #[error(\"received a sync leap response while no requests were in progress - {0}\")]\n    UnexpectedSyncLeapResponse(SyncLeapIdentifier),\n    #[error(\"block hash in the response '{actual}' doesn't match the one requested '{expected}'\")]\n    SyncLeapIdentifierMismatch {\n        expected: SyncLeapIdentifier,\n        actual: SyncLeapIdentifier,\n    },\n    #[error(\n        \"received a sync leap response from an unknown peer - {peer} - {sync_leap_identifier}\"\n    )]\n    ResponseFromUnknownPeer {\n        peer: NodeId,\n        sync_leap_identifier: SyncLeapIdentifier,\n    },\n}\n\n#[derive(Debug, DataSize)]\npub(crate) struct SyncLeaper {\n    leap_activity: Option<LeapActivity>,\n    chainspec: Arc<Chainspec>,\n    #[data_size(skip)]\n    metrics: Metrics,\n}\n\nimpl SyncLeaper {\n    pub(crate) fn new(\n        chainspec: Arc<Chainspec>,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(SyncLeaper {\n            leap_activity: None,\n            chainspec,\n            metrics: Metrics::new(registry)?,\n        })\n    }\n\n    /// Returns whether a sync leap is ongoing or completed and its state if so.\n    ///\n    /// If a sync leap has been completed, successfully or not, the results are returned and the\n    /// attempt is removed, effectively making the component idle.\n    pub(crate) fn leap_status(&mut self) -> LeapState {\n        match &self.leap_activity {\n            None => LeapState::Idle,\n            Some(activity) => {\n                let result = activity.status();\n                if result.active() == false {\n                    match result {\n                        LeapState::Received { .. } | LeapState::Failed { .. } => {\n                            self.metrics\n                                .sync_leap_duration\n                                .observe(activity.leap_start().elapsed().as_secs_f64());\n                        }\n                        LeapState::Idle | LeapState::Awaiting { .. } => {\n                            // should be unreachable\n                            error!(status = %result, ?activity, \"sync leaper has inconsistent status\");\n                        }\n                    }\n                    self.leap_activity = None;\n                }\n                result\n            }\n        }\n    }\n\n    /// Causes any ongoing sync leap attempt to be abandoned, i.e. results gathered so far are\n    /// dropped and responses received later for this attempt are ignored.\n    pub(crate) fn purge(&mut self) {\n        if let Some(activity) = self.leap_activity.take() {\n            debug!(identifier = %activity.sync_leap_identifier(), \"purging sync leap\");\n        }\n    }\n\n    #[cfg_attr(doc, aquamarine::aquamarine)]\n    /// ```mermaid\n    /// flowchart TD\n    ///     style Start fill:#66ccff,stroke:#333,stroke-width:4px\n    ///     style End fill:#66ccff,stroke:#333,stroke-width:4px\n    ///\n    ///     title[SyncLeap process - AttemptLeap]\n    ///     title---Start\n    ///     style title fill:#FFF,stroke:#FFF\n    ///     linkStyle 0 stroke-width:0;\n    ///\n    ///     Start --> A{have at least<br>one peer?}\n    ///     A -->|Yes| B{is other sync<br>leap in progress?}\n    ///     A -->|No| End\n    ///     B -->|Yes| C{do sync leap<br>identifiers match?}\n    ///     C -->|No| End\n    ///     C -->|Yes| D[fetch SyncLeap from potentially<br>newly learned peers]\n    ///     B -->|No| G[fetch SyncLeap<br>from all peers]\n    ///     G --> E\n    ///     D --> E[SyncLeap arrives]\n    ///     E --> F[SyncLeap is stored]\n    ///     F --> End\n    /// ```\n    fn register_leap_attempt(\n        &mut self,\n        sync_leap_identifier: SyncLeapIdentifier,\n        peers_to_ask: Vec<NodeId>,\n    ) -> RegisterLeapAttemptOutcome {\n        info!(%sync_leap_identifier, \"registering leap attempt\");\n        if peers_to_ask.is_empty() {\n            error!(\"tried to start fetching a sync leap without peers to ask\");\n            return RegisterLeapAttemptOutcome::DoNothing;\n        }\n        if let Some(leap_activity) = self.leap_activity.as_mut() {\n            if leap_activity.sync_leap_identifier() != &sync_leap_identifier {\n                error!(\n                    current_sync_leap_identifier = %leap_activity.sync_leap_identifier(),\n                    requested_sync_leap_identifier = %sync_leap_identifier,\n                    \"tried to start fetching a sync leap for a different sync_leap_identifier\"\n                );\n                return RegisterLeapAttemptOutcome::DoNothing;\n            }\n\n            let peers_not_asked_yet: Vec<_> = peers_to_ask\n                .iter()\n                .filter_map(|peer| leap_activity.register_peer(*peer))\n                .collect();\n\n            return if peers_not_asked_yet.is_empty() {\n                debug!(%sync_leap_identifier, \"peers_not_asked_yet.is_empty()\");\n                RegisterLeapAttemptOutcome::DoNothing\n            } else {\n                debug!(%sync_leap_identifier, \"fetching sync leap from {} peers not asked yet\", peers_not_asked_yet.len());\n                RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers_not_asked_yet)\n            };\n        }\n\n        debug!(%sync_leap_identifier, \"fetching sync leap from {} peers\", peers_to_ask.len());\n        self.leap_activity = Some(LeapActivity::new(\n            sync_leap_identifier,\n            peers_to_ask\n                .iter()\n                .map(|peer| (*peer, PeerState::RequestSent))\n                .collect(),\n            Instant::now(),\n        ));\n        RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers_to_ask)\n    }\n\n    fn fetch_received(\n        &mut self,\n        sync_leap_identifier: SyncLeapIdentifier,\n        fetch_result: FetchResult<SyncLeap>,\n    ) -> Result<(), Error> {\n        let leap_activity = match &mut self.leap_activity {\n            Some(leap_activity) => leap_activity,\n            None => {\n                return Err(Error::UnexpectedSyncLeapResponse(sync_leap_identifier));\n            }\n        };\n\n        if leap_activity.sync_leap_identifier() != &sync_leap_identifier {\n            return Err(Error::SyncLeapIdentifierMismatch {\n                actual: sync_leap_identifier,\n                expected: *leap_activity.sync_leap_identifier(),\n            });\n        }\n\n        match fetch_result {\n            Ok(FetchedData::FromStorage { .. }) => {\n                Err(Error::FetchedSyncLeapFromStorage(sync_leap_identifier))\n            }\n            Ok(FetchedData::FromPeer { item, peer, .. }) => {\n                let peer_state = match leap_activity.peers_mut().get_mut(&peer) {\n                    Some(state) => state,\n                    None => {\n                        return Err(Error::ResponseFromUnknownPeer {\n                            peer,\n                            sync_leap_identifier,\n                        });\n                    }\n                };\n                *peer_state = PeerState::Fetched(Box::new(*item));\n                self.metrics.sync_leap_fetched_from_peer.inc();\n                Ok(())\n            }\n            Err(fetcher::Error::Rejected { peer, .. }) => {\n                let peer_state = match leap_activity.peers_mut().get_mut(&peer) {\n                    Some(state) => state,\n                    None => {\n                        return Err(Error::ResponseFromUnknownPeer {\n                            peer,\n                            sync_leap_identifier,\n                        });\n                    }\n                };\n                info!(%peer, %sync_leap_identifier, \"peer rejected our request for a sync leap\");\n                *peer_state = PeerState::Rejected;\n                self.metrics.sync_leap_rejected_by_peer.inc();\n                Ok(())\n            }\n            Err(error) => {\n                let peer = error.peer();\n                info!(?error, %peer, %sync_leap_identifier, \"failed to fetch a sync leap from peer\");\n                let peer_state = match leap_activity.peers_mut().get_mut(peer) {\n                    Some(state) => state,\n                    None => {\n                        return Err(Error::ResponseFromUnknownPeer {\n                            peer: *peer,\n                            sync_leap_identifier,\n                        });\n                    }\n                };\n                *peer_state = PeerState::CouldntFetch;\n                self.metrics.sync_leap_cant_fetch.inc();\n                Ok(())\n            }\n        }\n    }\n}\n\nimpl<REv> Component<REv> for SyncLeaper\nwhere\n    REv: From<FetcherRequest<SyncLeap>> + Send,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match event {\n            Event::AttemptLeap {\n                sync_leap_identifier,\n                peers_to_ask,\n            } => match self.register_leap_attempt(sync_leap_identifier, peers_to_ask) {\n                RegisterLeapAttemptOutcome::DoNothing => Effects::new(),\n                RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) => {\n                    let mut effects = Effects::new();\n                    peers.into_iter().for_each(|peer| {\n                        effects.extend(\n                            effect_builder\n                                .fetch::<SyncLeap>(\n                                    sync_leap_identifier,\n                                    peer,\n                                    Box::new(SyncLeapValidationMetaData::from_chainspec(\n                                        self.chainspec.as_ref(),\n                                    )),\n                                )\n                                .event(move |fetch_result| Event::FetchedSyncLeapFromPeer {\n                                    sync_leap_identifier,\n                                    fetch_result,\n                                }),\n                        )\n                    });\n                    effects\n                }\n            },\n            Event::FetchedSyncLeapFromPeer {\n                sync_leap_identifier,\n                fetch_result,\n            } => {\n                // Log potential error with proper severity and continue processing.\n                if let Err(error) = self.fetch_received(sync_leap_identifier, fetch_result) {\n                    match error {\n                        Error::FetchedSyncLeapFromStorage(_) => error!(%error),\n                        Error::UnexpectedSyncLeapResponse(_)\n                        | Error::SyncLeapIdentifierMismatch { .. }\n                        | Error::ResponseFromUnknownPeer { .. } => warn!(%error),\n                    }\n                }\n                Effects::new()\n            }\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\n#[cfg(test)]\nimpl SyncLeaper {\n    fn peers(&self) -> Option<Vec<(NodeId, PeerState)>> {\n        self.leap_activity\n            .as_ref()\n            .and_then(|leap_activity| {\n                let peers = leap_activity.peers();\n                if leap_activity.peers().is_empty() {\n                    None\n                } else {\n                    Some(peers.clone())\n                }\n            })\n            .map(|peers| peers.into_iter().collect::<Vec<_>>())\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_acceptor/config.rs",
    "content": "use std::str::FromStr;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::TimeDiff;\n\nconst DEFAULT_TIMESTAMP_LEEWAY: &str = \"2sec\";\n\n/// Configuration options for accepting transactions.\n#[derive(Copy, Clone, Serialize, Deserialize, Debug, DataSize)]\npub struct Config {\n    /// The leeway allowed when considering whether a transaction is future-dated or not.\n    ///\n    /// To accommodate minor clock drift, transactions whose timestamps are within\n    /// `timestamp_leeway` in the future are still acceptable.\n    ///\n    /// The maximum value to which `timestamp_leeway` can be set is defined by the chainspec\n    /// setting `transactions.max_timestamp_leeway`.\n    pub timestamp_leeway: TimeDiff,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            timestamp_leeway: TimeDiff::from_str(DEFAULT_TIMESTAMP_LEEWAY).unwrap(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_acceptor/error.rs",
    "content": "use datasize::DataSize;\nuse serde::Serialize;\nuse thiserror::Error;\n\nuse casper_binary_port::ErrorCode as BinaryPortErrorCode;\nuse casper_types::{\n    AddressableEntityHash, BlockHash, BlockHeader, Digest, EntityVersionKey, InitiatorAddr,\n    InvalidTransaction, PackageHash, Timestamp,\n};\n\n// `allow` can be removed once https://github.com/casper-network/casper-node/issues/3063 is fixed.\n#[allow(clippy::large_enum_variant)]\n#[derive(Debug, Error, Serialize)]\npub(crate) enum Error {\n    /// The block chain has no blocks.\n    #[error(\"block chain has no blocks\")]\n    EmptyBlockchain,\n\n    /// The deploy has an invalid transaction.\n    #[error(\"invalid transaction: {0}\")]\n    InvalidTransaction(#[from] InvalidTransaction),\n\n    /// The transaction is invalid due to missing or otherwise invalid parameters.\n    #[error(\n        \"{failure} at state root hash {:?} of block {:?} at height {block_height}\",\n        state_root_hash,\n        block_hash.inner(),\n    )]\n    Parameters {\n        state_root_hash: Digest,\n        block_hash: BlockHash,\n        block_height: u64,\n        failure: ParameterFailure,\n    },\n\n    /// The transaction received by the node from the client has expired.\n    #[error(\n        \"transaction received by the node expired at {expiry_timestamp} with node's time at \\\n        {current_node_timestamp}\"\n    )]\n    Expired {\n        /// The timestamp when the transaction expired.\n        expiry_timestamp: Timestamp,\n        /// The timestamp when the node validated the expiry timestamp.\n        current_node_timestamp: Timestamp,\n    },\n\n    /// Component state error: expected a deploy.\n    #[error(\"internal error: expected a deploy\")]\n    ExpectedDeploy,\n\n    /// Component state error: expected a version 1 transaction.\n    #[error(\"internal error: expected a transaction\")]\n    ExpectedTransactionV1,\n}\n\nimpl Error {\n    pub(super) fn parameter_failure(block_header: &BlockHeader, failure: ParameterFailure) -> Self {\n        Error::Parameters {\n            state_root_hash: *block_header.state_root_hash(),\n            block_hash: block_header.block_hash(),\n            block_height: block_header.height(),\n            failure,\n        }\n    }\n}\n\nimpl From<Error> for BinaryPortErrorCode {\n    fn from(err: Error) -> Self {\n        match err {\n            Error::EmptyBlockchain => BinaryPortErrorCode::EmptyBlockchain,\n            Error::ExpectedDeploy => BinaryPortErrorCode::ExpectedDeploy,\n            Error::ExpectedTransactionV1 => BinaryPortErrorCode::ExpectedTransaction,\n            Error::Expired { .. } => BinaryPortErrorCode::TransactionExpired,\n            Error::Parameters { failure, .. } => match failure {\n                ParameterFailure::NoSuchAddressableEntity { .. } => {\n                    BinaryPortErrorCode::NoSuchAddressableEntity\n                }\n                ParameterFailure::NoSuchContractAtHash { .. } => {\n                    BinaryPortErrorCode::NoSuchContractAtHash\n                }\n                ParameterFailure::NoSuchEntryPoint { .. } => BinaryPortErrorCode::NoSuchEntryPoint,\n                ParameterFailure::NoSuchPackageAtHash { .. } => {\n                    BinaryPortErrorCode::NoSuchPackageAtHash\n                }\n                ParameterFailure::InvalidEntityAtVersion { .. } => {\n                    BinaryPortErrorCode::InvalidEntityAtVersion\n                }\n                ParameterFailure::DisabledEntityAtVersion { .. } => {\n                    BinaryPortErrorCode::DisabledEntityAtVersion\n                }\n                ParameterFailure::MissingEntityAtVersion { .. } => {\n                    BinaryPortErrorCode::MissingEntityAtVersion\n                }\n                ParameterFailure::InvalidAssociatedKeys => {\n                    BinaryPortErrorCode::InvalidAssociatedKeys\n                }\n                ParameterFailure::InsufficientSignatureWeight => {\n                    BinaryPortErrorCode::InsufficientSignatureWeight\n                }\n                ParameterFailure::InsufficientBalance { .. } => {\n                    BinaryPortErrorCode::InsufficientBalance\n                }\n                ParameterFailure::UnknownBalance { .. } => BinaryPortErrorCode::UnknownBalance,\n                ParameterFailure::Deploy(deploy_failure) => match deploy_failure {\n                    DeployParameterFailure::InvalidPaymentVariant => {\n                        BinaryPortErrorCode::DeployInvalidPaymentVariant\n                    }\n                    DeployParameterFailure::MissingPaymentAmount => {\n                        BinaryPortErrorCode::DeployMissingPaymentAmount\n                    }\n                    DeployParameterFailure::FailedToParsePaymentAmount => {\n                        BinaryPortErrorCode::DeployFailedToParsePaymentAmount\n                    }\n                    DeployParameterFailure::MissingTransferTarget => {\n                        BinaryPortErrorCode::DeployMissingTransferTarget\n                    }\n                    DeployParameterFailure::MissingModuleBytes => {\n                        BinaryPortErrorCode::DeployMissingModuleBytes\n                    }\n                },\n            },\n            Error::InvalidTransaction(invalid_transaction) => {\n                BinaryPortErrorCode::from(invalid_transaction)\n            }\n        }\n    }\n}\n\n/// A representation of the way in which a transaction failed parameter checks.\n#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Error, Serialize)]\npub(crate) enum ParameterFailure {\n    /// No such addressable entity.\n    #[error(\"addressable entity under {initiator_addr} does not exist\")]\n    NoSuchAddressableEntity { initiator_addr: InitiatorAddr },\n    /// No such contract at given hash.\n    #[error(\"contract at {contract_hash} does not exist\")]\n    NoSuchContractAtHash {\n        contract_hash: AddressableEntityHash,\n    },\n    /// No such contract entrypoint.\n    #[error(\"contract does not have entry point '{entry_point_name}'\")]\n    NoSuchEntryPoint { entry_point_name: String },\n    /// No such package.\n    #[error(\"package at {package_hash} does not exist\")]\n    NoSuchPackageAtHash { package_hash: PackageHash },\n    /// Invalid contract at given version.\n    #[error(\"invalid entity at version key: {entity_version_key}\")]\n    InvalidEntityAtVersion {\n        entity_version_key: EntityVersionKey,\n    },\n    /// Invalid contract at given version.\n    #[error(\"disabled entity at version key: {entity_version_key}\")]\n    DisabledEntityAtVersion {\n        entity_version_key: EntityVersionKey,\n    },\n    /// Invalid contract at given version.\n    #[error(\"missing entity at version key: {entity_version_key}\")]\n    MissingEntityAtVersion {\n        entity_version_key: EntityVersionKey,\n    },\n    /// Invalid associated keys.\n    #[error(\"account authorization invalid\")]\n    InvalidAssociatedKeys,\n    /// Insufficient transaction signature weight.\n    #[error(\"insufficient transaction signature weight\")]\n    InsufficientSignatureWeight,\n    /// The transaction's addressable entity has insufficient balance.\n    #[error(\"insufficient balance in {initiator_addr}\")]\n    InsufficientBalance { initiator_addr: InitiatorAddr },\n    /// The balance of the transaction's addressable entity cannot be read.\n    #[error(\"unable to determine balance for {initiator_addr}\")]\n    UnknownBalance { initiator_addr: InitiatorAddr },\n    /// Error specific to `Deploy` parameters.\n    #[error(transparent)]\n    Deploy(#[from] DeployParameterFailure),\n}\n\n/// A representation of the way in which a deploy failed validation checks.\n#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Error, Serialize)]\npub(crate) enum DeployParameterFailure {\n    /// Transfer is not valid for payment code.\n    #[error(\"transfer is not valid for payment code\")]\n    InvalidPaymentVariant,\n    /// Missing payment \"amount\" runtime argument.\n    #[error(\"missing payment 'amount' runtime argument\")]\n    MissingPaymentAmount,\n    /// Failed to parse payment \"amount\" runtime argument.\n    #[error(\"failed to parse payment 'amount' runtime argument as U512\")]\n    FailedToParsePaymentAmount,\n    /// Missing transfer \"target\" runtime argument.\n    #[error(\"missing transfer 'target' runtime argument\")]\n    MissingTransferTarget,\n    /// Module bytes for session code cannot be empty.\n    #[error(\"module bytes for session code cannot be empty\")]\n    MissingModuleBytes,\n}\n"
  },
  {
    "path": "node/src/components/transaction_acceptor/event.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse serde::Serialize;\n\nuse casper_types::{\n    contracts::ProtocolVersionMajor, AddressableEntity, AddressableEntityHash, BlockHeader,\n    EntityVersion, Package, PackageHash, Timestamp, Transaction, U512,\n};\n\nuse super::{Error, Source};\nuse crate::{effect::Responder, types::MetaTransaction};\n\n/// A utility struct to hold duplicated information across events.\n#[derive(Debug, Serialize)]\npub(crate) struct EventMetadata {\n    pub(crate) transaction: Transaction,\n    pub(crate) meta_transaction: MetaTransaction,\n    pub(crate) source: Source,\n    pub(crate) maybe_responder: Option<Responder<Result<(), Error>>>,\n    pub(crate) verification_start_timestamp: Timestamp,\n}\n\nimpl EventMetadata {\n    pub(crate) fn new(\n        transaction: Transaction,\n        meta_transaction: MetaTransaction,\n        source: Source,\n        maybe_responder: Option<Responder<Result<(), Error>>>,\n        verification_start_timestamp: Timestamp,\n    ) -> Self {\n        EventMetadata {\n            transaction,\n            meta_transaction,\n            source,\n            maybe_responder,\n            verification_start_timestamp,\n        }\n    }\n}\n\n/// `TransactionAcceptor` events.\n#[derive(Debug, Serialize)]\npub(crate) enum Event {\n    /// The initiating event to accept a new `Transaction`.\n    Accept {\n        transaction: Transaction,\n        source: Source,\n        maybe_responder: Option<Responder<Result<(), Error>>>,\n    },\n    /// The result of the `TransactionAcceptor` putting a `Transaction` to the storage\n    /// component.\n    PutToStorageResult {\n        event_metadata: Box<EventMetadata>,\n        is_new: bool,\n    },\n    /// The result of the `TransactionAcceptor` storing the approvals from a `Transaction`\n    /// provided by a peer.\n    StoredFinalizedApprovals {\n        event_metadata: Box<EventMetadata>,\n        is_new: bool,\n    },\n    /// The result of querying the highest available `BlockHeader` from the storage component.\n    GetBlockHeaderResult {\n        event_metadata: Box<EventMetadata>,\n        maybe_block_header: Option<Box<BlockHeader>>,\n    },\n    /// The result of querying global state for the `AddressableEntity` associated with the\n    /// `Transaction`'s execution context (previously known as the account).\n    GetAddressableEntityResult {\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        maybe_entity: Option<AddressableEntity>,\n    },\n    /// The result of querying the balance of the `AddressableEntity` associated with the\n    /// `Transaction`.\n    GetBalanceResult {\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        maybe_balance: Option<U512>,\n    },\n    /// The result of querying global state for a `Contract` to verify the executable logic.\n    GetContractResult {\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        is_payment: bool,\n        contract_hash: AddressableEntityHash,\n        maybe_entity: Option<AddressableEntity>,\n    },\n    /// The result of querying global state for a `Package` to verify the executable logic.\n    GetPackageResult {\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        is_payment: bool,\n        package_hash: PackageHash,\n        maybe_entity_version: Option<EntityVersion>,\n        maybe_protocol_version_major: Option<ProtocolVersionMajor>,\n        maybe_package: Option<Box<Package>>,\n    },\n    /// The result of querying global state for an `EntryPoint` to verify the executable logic.\n    GetEntryPointResult {\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        is_payment: bool,\n        entry_point_name: String,\n        addressable_entity: AddressableEntity,\n        entry_point_exists: bool,\n    },\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Accept {\n                transaction,\n                source,\n                ..\n            } => {\n                write!(formatter, \"accept {} from {}\", transaction.hash(), source)\n            }\n            Event::PutToStorageResult {\n                event_metadata,\n                is_new,\n                ..\n            } => {\n                if *is_new {\n                    write!(\n                        formatter,\n                        \"put new {} to storage\",\n                        event_metadata.transaction.hash()\n                    )\n                } else {\n                    write!(\n                        formatter,\n                        \"had already stored {}\",\n                        event_metadata.transaction.hash()\n                    )\n                }\n            }\n            Event::StoredFinalizedApprovals {\n                event_metadata,\n                is_new,\n                ..\n            } => {\n                if *is_new {\n                    write!(\n                        formatter,\n                        \"put new finalized approvals {} to storage\",\n                        event_metadata.transaction.hash()\n                    )\n                } else {\n                    write!(\n                        formatter,\n                        \"had already stored finalized approvals for {}\",\n                        event_metadata.transaction.hash()\n                    )\n                }\n            }\n            Event::GetBlockHeaderResult { event_metadata, .. } => {\n                write!(\n                    formatter,\n                    \"received highest block from storage to validate transaction with hash {}\",\n                    event_metadata.transaction.hash()\n                )\n            }\n            Event::GetAddressableEntityResult { event_metadata, .. } => {\n                write!(\n                    formatter,\n                    \"verifying addressable entity to validate transaction with hash {}\",\n                    event_metadata.transaction.hash()\n                )\n            }\n            Event::GetBalanceResult { event_metadata, .. } => {\n                write!(\n                    formatter,\n                    \"verifying account balance to validate transaction with hash {}\",\n                    event_metadata.transaction.hash()\n                )\n            }\n            Event::GetContractResult {\n                event_metadata,\n                block_header,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"verifying contract to validate transaction with hash {} with state hash {}\",\n                    event_metadata.transaction.hash(),\n                    block_header.state_root_hash()\n                )\n            }\n            Event::GetPackageResult {\n                event_metadata,\n                block_header,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"verifying package to validate transaction with hash {} with state hash {}\",\n                    event_metadata.transaction.hash(),\n                    block_header.state_root_hash()\n                )\n            }\n            Event::GetEntryPointResult {\n                event_metadata,\n                block_header,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"verifying entry point to validate transaction with hash {} with state hash {}\",\n                    event_metadata.transaction.hash(),\n                    block_header.state_root_hash(),\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_acceptor/metrics.rs",
    "content": "use prometheus::{Histogram, Registry};\n\nuse casper_types::Timestamp;\n\nuse crate::{unregister_metric, utils};\n\nconst TRANSACTION_ACCEPTED_NAME: &str = \"transaction_acceptor_accepted_transaction\";\nconst TRANSACTION_ACCEPTED_HELP: &str =\n    \"time in seconds to accept a transaction in the transaction acceptor\";\nconst TRANSACTION_REJECTED_NAME: &str = \"transaction_acceptor_rejected_transaction\";\nconst TRANSACTION_REJECTED_HELP: &str =\n    \"time in seconds to reject a transaction in the transaction acceptor\";\n\n/// Value of upper bound of the first bucket. In ms.\nconst EXPONENTIAL_BUCKET_START_MS: f64 = 10.0;\n\n/// Multiplier of previous upper bound for next bound.\nconst EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0;\n\n/// Bucket count, with the last bucket going to +Inf which will not be included in the results.\nconst EXPONENTIAL_BUCKET_COUNT: usize = 10;\n\n#[derive(Debug)]\npub(super) struct Metrics {\n    transaction_accepted: Histogram,\n    transaction_rejected: Histogram,\n    registry: Registry,\n}\n\nimpl Metrics {\n    pub(super) fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let common_buckets = prometheus::exponential_buckets(\n            EXPONENTIAL_BUCKET_START_MS,\n            EXPONENTIAL_BUCKET_FACTOR,\n            EXPONENTIAL_BUCKET_COUNT,\n        )?;\n\n        Ok(Self {\n            transaction_accepted: utils::register_histogram_metric(\n                registry,\n                TRANSACTION_ACCEPTED_NAME,\n                TRANSACTION_ACCEPTED_HELP,\n                common_buckets.clone(),\n            )?,\n            transaction_rejected: utils::register_histogram_metric(\n                registry,\n                TRANSACTION_REJECTED_NAME,\n                TRANSACTION_REJECTED_HELP,\n                common_buckets,\n            )?,\n            registry: registry.clone(),\n        })\n    }\n\n    pub(super) fn observe_rejected(&self, start: Timestamp) {\n        self.transaction_rejected\n            .observe(start.elapsed().millis() as f64);\n    }\n\n    pub(super) fn observe_accepted(&self, start: Timestamp) {\n        self.transaction_accepted\n            .observe(start.elapsed().millis() as f64);\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.transaction_accepted);\n        unregister_metric!(self.registry, self.transaction_rejected);\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_acceptor/tests.rs",
    "content": "#![cfg(test)]\n\nuse std::{\n    collections::{BTreeMap, VecDeque},\n    fmt::{self, Debug, Display, Formatter},\n    iter,\n    sync::Arc,\n    time::Duration,\n};\n\nuse derive_more::From;\nuse futures::{\n    channel::oneshot::{self, Sender},\n    FutureExt,\n};\nuse prometheus::Registry;\nuse reactor::ReactorEvent;\nuse serde::Serialize;\nuse tempfile::TempDir;\nuse thiserror::Error;\nuse tokio::time;\n\nuse casper_storage::{\n    data_access_layer::{\n        AddressableEntityResult, BalanceIdentifier, BalanceResult, EntryPointExistsResult,\n        ProofsResult, QueryResult,\n    },\n    tracking_copy::TrackingCopyError,\n};\nuse casper_types::{\n    account::{Account, AccountHash, ActionThresholds, AssociatedKeys, Weight},\n    addressable_entity::AddressableEntity,\n    bytesrepr::Bytes,\n    contracts::{\n        ContractHash, ContractPackage, ContractPackageStatus, ContractVersionKey, NamedKeys,\n    },\n    global_state::TrieMerkleProof,\n    testing::TestRng,\n    Block, BlockV2, CLValue, Chainspec, ChainspecRawBytes, Contract, Deploy, EraId, Groups,\n    HashAddr, InvalidDeploy, InvalidTransaction, InvalidTransactionV1, Key, PackageAddr,\n    PricingHandling, PricingMode, ProtocolVersion, PublicKey, SecretKey, StoredValue,\n    TestBlockBuilder, TimeDiff, Timestamp, Transaction, TransactionArgs, TransactionConfig,\n    TransactionRuntimeParams, TransactionV1, URef, DEFAULT_BASELINE_MOTES_AMOUNT,\n};\n\nuse super::*;\nuse crate::{\n    components::{\n        network::Identity as NetworkIdentity,\n        storage::{self, Storage},\n    },\n    consensus::tests::utils::{ALICE_PUBLIC_KEY, BOB_PUBLIC_KEY, CAROL_PUBLIC_KEY},\n    effect::{\n        announcements::{ControlAnnouncement, TransactionAcceptorAnnouncement},\n        requests::{\n            ContractRuntimeRequest, MakeBlockExecutableRequest, MarkBlockCompletedRequest,\n            NetworkRequest,\n        },\n        Responder,\n    },\n    logging,\n    protocol::Message,\n    reactor::{self, EventQueueHandle, QueueKind, Runner, TryCrankOutcome},\n    testing::ConditionCheckReactor,\n    types::{transaction::transaction_v1_builder::TransactionV1Builder, NodeId},\n    utils::{Loadable, WithDir},\n    NodeRng,\n};\n\nconst POLL_INTERVAL: Duration = Duration::from_millis(10);\nconst TIMEOUT: Duration = Duration::from_secs(30);\n\n/// Top-level event for the reactor.\n#[derive(Debug, From, Serialize)]\n#[allow(clippy::large_enum_variant)]\n#[must_use]\nenum Event {\n    #[from]\n    Storage(#[serde(skip_serializing)] storage::Event),\n    #[from]\n    TransactionAcceptor(#[serde(skip_serializing)] super::Event),\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    FatalAnnouncement(FatalAnnouncement),\n    #[from]\n    TransactionAcceptorAnnouncement(#[serde(skip_serializing)] TransactionAcceptorAnnouncement),\n    #[from]\n    ContractRuntime(#[serde(skip_serializing)] ContractRuntimeRequest),\n    #[from]\n    StorageRequest(StorageRequest),\n    #[from]\n    NetworkRequest(NetworkRequest<Message>),\n}\n\nimpl From<MakeBlockExecutableRequest> for Event {\n    fn from(request: MakeBlockExecutableRequest) -> Self {\n        Event::Storage(storage::Event::MakeBlockExecutableRequest(Box::new(\n            request,\n        )))\n    }\n}\n\nimpl From<MarkBlockCompletedRequest> for Event {\n    fn from(request: MarkBlockCompletedRequest) -> Self {\n        Event::Storage(storage::Event::MarkBlockCompletedRequest(request))\n    }\n}\n\nimpl From<ControlAnnouncement> for Event {\n    fn from(control_announcement: ControlAnnouncement) -> Self {\n        Event::ControlAnnouncement(control_announcement)\n    }\n}\n\nimpl ReactorEvent for Event {\n    fn is_control(&self) -> bool {\n        matches!(self, Event::ControlAnnouncement(_))\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        if let Self::ControlAnnouncement(ctrl_ann) = self {\n            Some(ctrl_ann)\n        } else {\n            None\n        }\n    }\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Storage(event) => write!(formatter, \"storage: {}\", event),\n            Event::TransactionAcceptor(event) => {\n                write!(formatter, \"transaction acceptor: {}\", event)\n            }\n            Event::ControlAnnouncement(ctrl_ann) => write!(formatter, \"control: {}\", ctrl_ann),\n            Event::FatalAnnouncement(fatal_ann) => write!(formatter, \"fatal: {}\", fatal_ann),\n            Event::TransactionAcceptorAnnouncement(ann) => {\n                write!(formatter, \"transaction-acceptor announcement: {}\", ann)\n            }\n\n            Event::ContractRuntime(event) => {\n                write!(formatter, \"contract-runtime event: {:?}\", event)\n            }\n            Event::StorageRequest(request) => write!(formatter, \"storage request: {:?}\", request),\n            Event::NetworkRequest(request) => write!(formatter, \"network request: {:?}\", request),\n        }\n    }\n}\n\n/// Error type returned by the test reactor.\n#[derive(Debug, Error)]\nenum Error {\n    #[error(\"prometheus (metrics) error: {0}\")]\n    Metrics(#[from] prometheus::Error),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\nenum ContractScenario {\n    Valid,\n    MissingContractAtHash,\n    MissingContractAtName,\n    MissingEntryPoint,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\nenum HashOrName {\n    Hash,\n    Name,\n}\n#[derive(Clone, PartialEq, Eq, Debug)]\nenum ContractVersionExistance {\n    PackageDoesNotExist,\n    PackageExists(\n        bool,\n        BTreeMap<ContractVersionKey, ContractHash>,\n        BTreeSet<ContractVersionKey>,\n    ),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\nenum ContractPackageScenario {\n    Valid,\n    MissingPackageAtHash,\n    MissingPackageAtName,\n    MissingContractVersion,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\nenum TxnType {\n    Deploy,\n    V1,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\nenum TestScenario {\n    FromPeerInvalidTransaction(TxnType),\n    FromPeerInvalidTransactionZeroPayment(TxnType),\n    FromPeerExpired(TxnType),\n    FromPeerValidTransaction(TxnType),\n    FromPeerRepeatedValidTransaction(TxnType),\n    FromPeerMissingAccount(TxnType),\n    FromPeerAccountWithInsufficientWeight(TxnType),\n    FromPeerAccountWithInvalidAssociatedKeys(TxnType),\n    FromPeerCustomPaymentContract(ContractScenario),\n    FromPeerCustomPaymentContractPackage(ContractPackageScenario),\n    FromPeerSessionContract(TxnType, ContractScenario),\n    FromPeerSessionContractPackage(TxnType, ContractPackageScenario),\n    FromClientInvalidTransaction(TxnType),\n    FromClientInvalidTransactionZeroPayment(TxnType),\n    FromClientSlightlyFutureDatedTransaction(TxnType),\n    FromClientFutureDatedTransaction(TxnType),\n    FromClientExpired(TxnType),\n    FromClientMissingAccount(TxnType),\n    FromClientInsufficientBalance(TxnType),\n    FromClientValidTransaction(TxnType),\n    FromClientRepeatedValidTransaction(TxnType),\n    FromClientAccountWithInsufficientWeight(TxnType),\n    FromClientAccountWithInvalidAssociatedKeys(TxnType),\n    AccountWithUnknownBalance,\n    FromClientCustomPaymentContract(ContractScenario),\n    FromClientCustomPaymentContractPackage(ContractPackageScenario),\n    FromClientSessionContract(TxnType, ContractScenario),\n    FromClientSessionContractPackage(TxnType, ContractPackageScenario),\n    FromClientSignedByAdmin(TxnType),\n    DeployWithNativeTransferInPayment,\n    DeployWithEmptySessionModuleBytes,\n    DeployWithoutPaymentAmount,\n    DeployWithMangledPaymentAmount,\n    DeployWithMangledTransferAmount,\n    DeployWithoutTransferTarget,\n    DeployWithoutTransferAmount,\n    DeployWithPaymentOne,\n    BalanceCheckForDeploySentByPeer,\n    InvalidPricingModeForTransactionV1,\n    TooLowGasPriceToleranceForTransactionV1,\n    TransactionWithPaymentOne,\n    TooLowGasPriceToleranceForDeploy,\n    InvalidFields,\n    InvalidFieldsFromPeer,\n    InvalidArgumentsKind,\n    WasmTransactionWithTooBigPayment,\n    WasmDeployWithTooBigPayment,\n    RedelegateExceedingMaximumDelegation,\n    DelegateExceedingMaximumDelegation,\n    V1ByPackage(\n        HashOrName,\n        Option<EntityVersion>,\n        Option<ProtocolVersionMajor>,\n        ContractVersionExistance,\n    ),\n    VmCasperV2ByPackageHash,\n}\n\nimpl TestScenario {\n    fn source(&self, rng: &mut NodeRng) -> Source {\n        match self {\n            TestScenario::FromPeerInvalidTransaction(_)\n            | TestScenario::FromPeerInvalidTransactionZeroPayment(_)\n            | TestScenario::FromPeerExpired(_)\n            | TestScenario::FromPeerValidTransaction(_)\n            | TestScenario::FromPeerRepeatedValidTransaction(_)\n            | TestScenario::BalanceCheckForDeploySentByPeer\n            | TestScenario::FromPeerMissingAccount(_)\n            | TestScenario::FromPeerAccountWithInsufficientWeight(_)\n            | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_)\n            | TestScenario::FromPeerCustomPaymentContract(_)\n            | TestScenario::FromPeerCustomPaymentContractPackage(_)\n            | TestScenario::FromPeerSessionContract(..)\n            | TestScenario::FromPeerSessionContractPackage(..)\n            | TestScenario::InvalidFieldsFromPeer => Source::Peer(NodeId::random(rng)),\n            TestScenario::FromClientInvalidTransaction(_)\n            | TestScenario::FromClientInvalidTransactionZeroPayment(_)\n            | TestScenario::FromClientSlightlyFutureDatedTransaction(_)\n            | TestScenario::FromClientFutureDatedTransaction(_)\n            | TestScenario::FromClientExpired(_)\n            | TestScenario::FromClientMissingAccount(_)\n            | TestScenario::FromClientInsufficientBalance(_)\n            | TestScenario::FromClientValidTransaction(_)\n            | TestScenario::FromClientRepeatedValidTransaction(_)\n            | TestScenario::FromClientAccountWithInsufficientWeight(_)\n            | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_)\n            | TestScenario::AccountWithUnknownBalance\n            | TestScenario::DeployWithoutPaymentAmount\n            | TestScenario::DeployWithMangledPaymentAmount\n            | TestScenario::DeployWithMangledTransferAmount\n            | TestScenario::DeployWithoutTransferAmount\n            | TestScenario::DeployWithPaymentOne\n            | TestScenario::DeployWithoutTransferTarget\n            | TestScenario::FromClientCustomPaymentContract(_)\n            | TestScenario::FromClientCustomPaymentContractPackage(_)\n            | TestScenario::FromClientSessionContract(..)\n            | TestScenario::FromClientSessionContractPackage(..)\n            | TestScenario::FromClientSignedByAdmin(_)\n            | TestScenario::DeployWithEmptySessionModuleBytes\n            | TestScenario::DeployWithNativeTransferInPayment\n            | TestScenario::InvalidPricingModeForTransactionV1\n            | TestScenario::TooLowGasPriceToleranceForTransactionV1\n            | TestScenario::TooLowGasPriceToleranceForDeploy\n            | TestScenario::TransactionWithPaymentOne\n            | TestScenario::InvalidFields\n            | TestScenario::InvalidArgumentsKind\n            | TestScenario::WasmTransactionWithTooBigPayment\n            | TestScenario::WasmDeployWithTooBigPayment\n            | TestScenario::RedelegateExceedingMaximumDelegation\n            | TestScenario::DelegateExceedingMaximumDelegation\n            | TestScenario::VmCasperV2ByPackageHash\n            | TestScenario::V1ByPackage(..) => Source::Client,\n        }\n    }\n\n    fn transaction(&self, rng: &mut TestRng, admin: &SecretKey) -> Transaction {\n        let secret_key = SecretKey::random(rng);\n        match self {\n            TestScenario::FromPeerInvalidTransaction(TxnType::Deploy)\n            | TestScenario::FromClientInvalidTransaction(TxnType::Deploy) => {\n                let mut deploy = Deploy::random_valid_native_transfer(rng);\n                deploy.invalidate();\n                Transaction::from(deploy)\n            }\n            TestScenario::FromPeerInvalidTransaction(TxnType::V1)\n            | TestScenario::FromClientInvalidTransaction(TxnType::V1) => {\n                let mut txn = TransactionV1::random(rng);\n                txn.invalidate();\n                Transaction::from(txn)\n            }\n            TestScenario::FromClientInvalidTransactionZeroPayment(TxnType::V1) => {\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_pricing_mode(PricingMode::PaymentLimited {\n                    standard_payment: true,\n                    gas_price_tolerance: 5,\n                    payment_amount: 0,\n                })\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::now())\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::FromPeerInvalidTransactionZeroPayment(TxnType::V1) => {\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_pricing_mode(PricingMode::PaymentLimited {\n                    standard_payment: true,\n                    gas_price_tolerance: 5,\n                    payment_amount: 0,\n                })\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::now())\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::FromClientInvalidTransactionZeroPayment(TxnType::Deploy) => {\n                Transaction::from(Deploy::random_without_payment_amount(rng))\n            }\n            TestScenario::FromPeerInvalidTransactionZeroPayment(TxnType::Deploy) => {\n                Transaction::from(Deploy::random_without_payment_amount(rng))\n            }\n            TestScenario::FromPeerExpired(TxnType::Deploy)\n            | TestScenario::FromClientExpired(TxnType::Deploy) => {\n                Transaction::from(Deploy::random_expired_deploy(rng))\n            }\n            TestScenario::FromPeerExpired(TxnType::V1)\n            | TestScenario::FromClientExpired(TxnType::V1) => {\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::zero())\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::FromPeerValidTransaction(txn_type)\n            | TestScenario::FromPeerRepeatedValidTransaction(txn_type)\n            | TestScenario::FromPeerMissingAccount(txn_type)\n            | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(txn_type)\n            | TestScenario::FromPeerAccountWithInsufficientWeight(txn_type)\n            | TestScenario::FromClientMissingAccount(txn_type)\n            | TestScenario::FromClientInsufficientBalance(txn_type)\n            | TestScenario::FromClientValidTransaction(txn_type)\n            | TestScenario::FromClientRepeatedValidTransaction(txn_type)\n            | TestScenario::FromClientAccountWithInvalidAssociatedKeys(txn_type)\n            | TestScenario::FromClientAccountWithInsufficientWeight(txn_type) => match txn_type {\n                TxnType::Deploy => Transaction::from(Deploy::random_valid_native_transfer(rng)),\n                TxnType::V1 => {\n                    let txn = TransactionV1Builder::new_session(\n                        false,\n                        Bytes::from(vec![1]),\n                        TransactionRuntimeParams::VmCasperV1,\n                    )\n                    .with_chain_name(\"casper-example\")\n                    .with_timestamp(Timestamp::now())\n                    .with_secret_key(&secret_key)\n                    .build()\n                    .unwrap();\n                    Transaction::from(txn)\n                }\n            },\n            TestScenario::FromClientSignedByAdmin(TxnType::Deploy) => {\n                let mut deploy = Deploy::random_valid_native_transfer(rng);\n                deploy.sign(admin);\n                Transaction::from(deploy)\n            }\n            TestScenario::FromClientSignedByAdmin(TxnType::V1) => {\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::now())\n                .with_secret_key(admin)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::AccountWithUnknownBalance\n            | TestScenario::BalanceCheckForDeploySentByPeer => {\n                Transaction::from(Deploy::random_valid_native_transfer(rng))\n            }\n            TestScenario::DeployWithoutPaymentAmount => {\n                Transaction::from(Deploy::random_without_payment_amount(rng))\n            }\n            TestScenario::DeployWithMangledPaymentAmount => {\n                Transaction::from(Deploy::random_with_mangled_payment_amount(rng))\n            }\n            TestScenario::DeployWithoutTransferTarget => {\n                Transaction::from(Deploy::random_without_transfer_target(rng))\n            }\n            TestScenario::DeployWithoutTransferAmount => {\n                Transaction::from(Deploy::random_without_transfer_amount(rng))\n            }\n            TestScenario::DeployWithMangledTransferAmount => {\n                Transaction::from(Deploy::random_with_mangled_transfer_amount(rng))\n            }\n            TestScenario::DeployWithPaymentOne => {\n                Transaction::from(Deploy::random_with_payment_one(rng))\n            }\n            TestScenario::TransactionWithPaymentOne => {\n                let timestamp = Timestamp::now()\n                    + Config::default().timestamp_leeway\n                    + TimeDiff::from_millis(1000);\n                let ttl = TimeDiff::from_seconds(300);\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_pricing_mode(PricingMode::PaymentLimited {\n                    payment_amount: 1u64,\n                    gas_price_tolerance: 2,\n                    standard_payment: true,\n                })\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(timestamp)\n                .with_ttl(ttl)\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::FromPeerCustomPaymentContract(contract_scenario)\n            | TestScenario::FromClientCustomPaymentContract(contract_scenario) => {\n                match contract_scenario {\n                    ContractScenario::Valid | ContractScenario::MissingContractAtName => {\n                        Transaction::from(\n                            Deploy::random_with_valid_custom_payment_contract_by_name(rng),\n                        )\n                    }\n                    ContractScenario::MissingEntryPoint => Transaction::from(\n                        Deploy::random_with_missing_entry_point_in_payment_contract(rng),\n                    ),\n                    ContractScenario::MissingContractAtHash => {\n                        Transaction::from(Deploy::random_with_missing_payment_contract_by_hash(rng))\n                    }\n                }\n            }\n            TestScenario::FromPeerCustomPaymentContractPackage(contract_package_scenario)\n            | TestScenario::FromClientCustomPaymentContractPackage(contract_package_scenario) => {\n                match contract_package_scenario {\n                    ContractPackageScenario::Valid\n                    | ContractPackageScenario::MissingPackageAtName => Transaction::from(\n                        Deploy::random_with_valid_custom_payment_package_by_name(rng),\n                    ),\n                    ContractPackageScenario::MissingPackageAtHash => {\n                        Transaction::from(Deploy::random_with_missing_payment_package_by_hash(rng))\n                    }\n                    ContractPackageScenario::MissingContractVersion => Transaction::from(\n                        Deploy::random_with_nonexistent_contract_version_in_payment_package(rng),\n                    ),\n                }\n            }\n            TestScenario::FromPeerSessionContract(TxnType::Deploy, contract_scenario)\n            | TestScenario::FromClientSessionContract(TxnType::Deploy, contract_scenario) => {\n                match contract_scenario {\n                    ContractScenario::Valid | ContractScenario::MissingContractAtName => {\n                        Transaction::from(Deploy::random_with_valid_session_contract_by_name(rng))\n                    }\n                    ContractScenario::MissingContractAtHash => {\n                        Transaction::from(Deploy::random_with_missing_session_contract_by_hash(rng))\n                    }\n                    ContractScenario::MissingEntryPoint => Transaction::from(\n                        Deploy::random_with_missing_entry_point_in_session_contract(rng),\n                    ),\n                }\n            }\n            TestScenario::FromPeerSessionContract(TxnType::V1, contract_scenario)\n            | TestScenario::FromClientSessionContract(TxnType::V1, contract_scenario) => {\n                match contract_scenario {\n                    ContractScenario::Valid | ContractScenario::MissingContractAtName => {\n                        let txn = TransactionV1Builder::new_targeting_invocable_entity_via_alias(\n                            \"Test\",\n                            \"call\",\n                            TransactionRuntimeParams::VmCasperV1,\n                        )\n                        .with_chain_name(\"casper-example\")\n                        .with_timestamp(Timestamp::now())\n                        .with_secret_key(&secret_key)\n                        .build()\n                        .unwrap();\n                        Transaction::from(txn)\n                    }\n                    ContractScenario::MissingContractAtHash => {\n                        let txn = TransactionV1Builder::new_targeting_invocable_entity(\n                            AddressableEntityHash::new(HashAddr::default()),\n                            \"call\",\n                            TransactionRuntimeParams::VmCasperV1,\n                        )\n                        .with_chain_name(\"casper-example\")\n                        .with_timestamp(Timestamp::now())\n                        .with_secret_key(&secret_key)\n                        .build()\n                        .unwrap();\n                        Transaction::from(txn)\n                    }\n                    ContractScenario::MissingEntryPoint => {\n                        let txn = TransactionV1Builder::new_targeting_invocable_entity(\n                            AddressableEntityHash::new(HashAddr::default()),\n                            \"non-existent-entry-point\",\n                            TransactionRuntimeParams::VmCasperV1,\n                        )\n                        .with_chain_name(\"casper-example\")\n                        .with_timestamp(Timestamp::now())\n                        .with_secret_key(&secret_key)\n                        .build()\n                        .unwrap();\n                        Transaction::from(txn)\n                    }\n                }\n            }\n            TestScenario::FromPeerSessionContractPackage(\n                TxnType::Deploy,\n                contract_package_scenario,\n            )\n            | TestScenario::FromClientSessionContractPackage(\n                TxnType::Deploy,\n                contract_package_scenario,\n            ) => match contract_package_scenario {\n                ContractPackageScenario::Valid | ContractPackageScenario::MissingPackageAtName => {\n                    Transaction::from(Deploy::random_with_valid_session_package_by_name(rng))\n                }\n                ContractPackageScenario::MissingPackageAtHash => {\n                    Transaction::from(Deploy::random_with_missing_session_package_by_hash(rng))\n                }\n                ContractPackageScenario::MissingContractVersion => Transaction::from(\n                    Deploy::random_with_nonexistent_contract_version_in_payment_package(rng),\n                ),\n            },\n            TestScenario::FromPeerSessionContractPackage(\n                TxnType::V1,\n                contract_package_scenario,\n            )\n            | TestScenario::FromClientSessionContractPackage(\n                TxnType::V1,\n                contract_package_scenario,\n            ) => match contract_package_scenario {\n                ContractPackageScenario::Valid | ContractPackageScenario::MissingPackageAtName => {\n                    let txn = TransactionV1Builder::new_targeting_package_via_alias(\n                        \"Test\",\n                        None,\n                        None,\n                        \"call\",\n                        TransactionRuntimeParams::VmCasperV1,\n                    )\n                    .with_chain_name(\"casper-example\")\n                    .with_timestamp(Timestamp::now())\n                    .with_secret_key(&secret_key)\n                    .build()\n                    .unwrap();\n                    Transaction::from(txn)\n                }\n                ContractPackageScenario::MissingPackageAtHash => {\n                    let txn = TransactionV1Builder::new_targeting_package(\n                        PackageHash::new(PackageAddr::default()),\n                        None,\n                        None,\n                        \"call\",\n                        TransactionRuntimeParams::VmCasperV1,\n                    )\n                    .with_chain_name(\"casper-example\")\n                    .with_timestamp(Timestamp::now())\n                    .with_secret_key(&secret_key)\n                    .build()\n                    .unwrap();\n                    Transaction::from(txn)\n                }\n                ContractPackageScenario::MissingContractVersion => {\n                    let txn = TransactionV1Builder::new_targeting_package(\n                        PackageHash::new(PackageAddr::default()),\n                        Some(6),\n                        Some(2),\n                        \"call\",\n                        TransactionRuntimeParams::VmCasperV1,\n                    )\n                    .with_chain_name(\"casper-example\")\n                    .with_timestamp(Timestamp::now())\n                    .with_secret_key(&secret_key)\n                    .build()\n                    .unwrap();\n                    Transaction::from(txn)\n                }\n            },\n            TestScenario::DeployWithEmptySessionModuleBytes => {\n                Transaction::from(Deploy::random_with_empty_session_module_bytes(rng))\n            }\n            TestScenario::DeployWithNativeTransferInPayment => {\n                Transaction::from(Deploy::random_with_native_transfer_in_payment_logic(rng))\n            }\n            TestScenario::FromClientSlightlyFutureDatedTransaction(txn_type) => {\n                let timestamp = Timestamp::now() + (Config::default().timestamp_leeway / 2);\n                let ttl = TimeDiff::from_seconds(300);\n                match txn_type {\n                    TxnType::Deploy => Transaction::from(\n                        Deploy::random_valid_native_transfer_with_timestamp_and_ttl(\n                            rng, timestamp, ttl,\n                        ),\n                    ),\n                    TxnType::V1 => {\n                        let txn = TransactionV1Builder::new_session(\n                            false,\n                            Bytes::from(vec![1]),\n                            TransactionRuntimeParams::VmCasperV1,\n                        )\n                        .with_chain_name(\"casper-example\")\n                        .with_timestamp(timestamp)\n                        .with_ttl(ttl)\n                        .with_secret_key(&secret_key)\n                        .build()\n                        .unwrap();\n                        Transaction::from(txn)\n                    }\n                }\n            }\n            TestScenario::FromClientFutureDatedTransaction(txn_type) => {\n                let timestamp = Timestamp::now()\n                    + Config::default().timestamp_leeway\n                    + TimeDiff::from_millis(1000);\n                let ttl = TimeDiff::from_seconds(300);\n                match txn_type {\n                    TxnType::Deploy => Transaction::from(\n                        Deploy::random_valid_native_transfer_with_timestamp_and_ttl(\n                            rng, timestamp, ttl,\n                        ),\n                    ),\n                    TxnType::V1 => {\n                        let txn = TransactionV1Builder::new_session(\n                            false,\n                            Bytes::from(vec![1]),\n                            TransactionRuntimeParams::VmCasperV1,\n                        )\n                        .with_chain_name(\"casper-example\")\n                        .with_timestamp(timestamp)\n                        .with_ttl(ttl)\n                        .with_secret_key(&secret_key)\n                        .build()\n                        .unwrap();\n                        Transaction::from(txn)\n                    }\n                }\n            }\n            TestScenario::InvalidPricingModeForTransactionV1 => {\n                let payment_limited_mode_transaction = TransactionV1Builder::new_random(rng)\n                    .with_pricing_mode(PricingMode::Fixed {\n                        gas_price_tolerance: 5,\n                        additional_computation_factor: 0,\n                    })\n                    .with_chain_name(\"casper-example\")\n                    .build()\n                    .expect(\"must create payment limited transaction\");\n                Transaction::from(payment_limited_mode_transaction)\n            }\n            TestScenario::TooLowGasPriceToleranceForTransactionV1 => {\n                const TOO_LOW_GAS_PRICE_TOLERANCE: u8 = 0;\n\n                let fixed_mode_transaction = TransactionV1Builder::new_random(rng)\n                    .with_pricing_mode(PricingMode::Fixed {\n                        gas_price_tolerance: TOO_LOW_GAS_PRICE_TOLERANCE,\n                        additional_computation_factor: 0,\n                    })\n                    .with_chain_name(\"casper-example\")\n                    .build()\n                    .expect(\"must create fixed mode transaction\");\n                Transaction::from(fixed_mode_transaction)\n            }\n            TestScenario::TooLowGasPriceToleranceForDeploy => {\n                const TOO_LOW_GAS_PRICE_TOLERANCE: u64 = 0;\n\n                let deploy = Deploy::random_with_gas_price(rng, TOO_LOW_GAS_PRICE_TOLERANCE);\n                Transaction::from(deploy)\n            }\n            TestScenario::InvalidFields | TestScenario::InvalidFieldsFromPeer => {\n                let mut additional_fields = BTreeMap::new();\n                additional_fields.insert(42, Bytes::from(vec![1]));\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_chain_name(\"casper-example\")\n                .with_ttl(TimeDiff::from_seconds(300))\n                .with_secret_key(&secret_key)\n                .with_additional_fields(additional_fields)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::InvalidArgumentsKind => {\n                let timestamp = Timestamp::now()\n                    + Config::default().timestamp_leeway\n                    + TimeDiff::from_millis(1000);\n                let ttl = TimeDiff::from_seconds(300);\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_transaction_args(TransactionArgs::Bytesrepr(Bytes::from(vec![1, 2, 3])))\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(timestamp)\n                .with_ttl(ttl)\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::WasmTransactionWithTooBigPayment => {\n                let ttl = TimeDiff::from_seconds(300);\n                let txn = TransactionV1Builder::new_session(\n                    false,\n                    Bytes::from(vec![1]),\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_pricing_mode(PricingMode::PaymentLimited {\n                    payment_amount: u64::MAX, /* make sure it's a big value that doesn't match\n                                               * any wasm lane */\n                    gas_price_tolerance: 2,\n                    standard_payment: true,\n                })\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::now())\n                .with_ttl(ttl)\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::WasmDeployWithTooBigPayment => {\n                Transaction::from(Deploy::random_with_oversized_payment_amount(rng))\n            }\n            TestScenario::RedelegateExceedingMaximumDelegation => {\n                let txn = TransactionV1Builder::new_redelegate(\n                    ALICE_PUBLIC_KEY.clone(),\n                    BOB_PUBLIC_KEY.clone(),\n                    1_000_000_000_000_000_001_u64, /* This is 1 mote more than the\n                                                    * maximum_delegation_amount in local\n                                                    * chainspec */\n                    CAROL_PUBLIC_KEY.clone(),\n                )\n                .unwrap()\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::now())\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::DelegateExceedingMaximumDelegation => {\n                let ttl = TimeDiff::from_seconds(300);\n                let txn = TransactionV1Builder::new_delegate(\n                    ALICE_PUBLIC_KEY.clone(),\n                    BOB_PUBLIC_KEY.clone(),\n                    1_000_000_000_000_000_001_u64, /* This is 1 mote more than the\n                                                    * maximum_delegation_amount in local\n                                                    * chainspec */\n                )\n                .unwrap()\n                .with_chain_name(\"casper-example\")\n                .with_timestamp(Timestamp::now())\n                .with_ttl(ttl)\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::VmCasperV2ByPackageHash => {\n                let txn = TransactionV1Builder::new_targeting_stored(\n                    TransactionInvocationTarget::ByPackageHash {\n                        addr: [1; 32],\n                        version: None,\n                        protocol_version_major: None,\n                    },\n                    \"x\",\n                    TransactionRuntimeParams::VmCasperV2 {\n                        transferred_value: 0,\n                        seed: None,\n                    },\n                )\n                .with_chain_name(\"casper-example\")\n                .with_secret_key(&secret_key)\n                .with_transaction_args(TransactionArgs::Bytesrepr(Bytes::new()))\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n            TestScenario::V1ByPackage(hash_or_name, maybe_version, maybe_protocol_version, ..) => {\n                let id = match hash_or_name {\n                    HashOrName::Hash => TransactionInvocationTarget::ByPackageHash {\n                        addr: [1; 32],\n                        version: *maybe_version,\n                        protocol_version_major: *maybe_protocol_version,\n                    },\n                    HashOrName::Name => TransactionInvocationTarget::ByPackageName {\n                        name: \"xyz\".to_owned(),\n                        version: *maybe_version,\n                        protocol_version_major: *maybe_protocol_version,\n                    },\n                };\n                let txn = TransactionV1Builder::new_targeting_stored(\n                    id,\n                    \"x\",\n                    TransactionRuntimeParams::VmCasperV1,\n                )\n                .with_chain_name(\"casper-example\")\n                .with_secret_key(&secret_key)\n                .build()\n                .unwrap();\n                Transaction::from(txn)\n            }\n        }\n    }\n\n    fn is_valid_transaction_case(&self) -> bool {\n        match self {\n            TestScenario::FromPeerRepeatedValidTransaction(_)\n                    | TestScenario::FromPeerExpired(_)\n                    | TestScenario::FromPeerValidTransaction(_)\n                    | TestScenario::FromPeerMissingAccount(_) // account check skipped if from peer\n                    | TestScenario::FromPeerAccountWithInsufficientWeight(_) // account check skipped if from peer\n                    | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_) // account check skipped if from peer\n                    | TestScenario::FromClientRepeatedValidTransaction(_)\n                    | TestScenario::FromClientValidTransaction(_)\n                    | TestScenario::FromClientSlightlyFutureDatedTransaction(_)\n                    | TestScenario::FromClientSignedByAdmin(..) => true,\n            TestScenario::FromPeerInvalidTransaction(_)\n                    | TestScenario::FromPeerInvalidTransactionZeroPayment(_)\n                    | TestScenario::FromClientInsufficientBalance(_)\n                    | TestScenario::FromClientMissingAccount(_)\n                    | TestScenario::FromClientInvalidTransaction(_)\n                    | TestScenario::FromClientInvalidTransactionZeroPayment(_)\n                    | TestScenario::FromClientFutureDatedTransaction(_)\n                    | TestScenario::FromClientAccountWithInsufficientWeight(_)\n                    | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_)\n                    | TestScenario::AccountWithUnknownBalance\n                    | TestScenario::DeployWithEmptySessionModuleBytes\n                    | TestScenario::DeployWithNativeTransferInPayment\n                    | TestScenario::DeployWithoutPaymentAmount\n                    | TestScenario::DeployWithMangledPaymentAmount\n                    | TestScenario::DeployWithMangledTransferAmount\n                    | TestScenario::DeployWithoutTransferAmount\n                    | TestScenario::DeployWithoutTransferTarget\n                    | TestScenario::DeployWithPaymentOne\n                    | TestScenario::BalanceCheckForDeploySentByPeer\n                    | TestScenario::FromClientExpired(_) => false,\n            TestScenario::FromPeerCustomPaymentContract(contract_scenario)\n                    | TestScenario::FromPeerSessionContract(_, contract_scenario)\n                    | TestScenario::FromClientCustomPaymentContract(contract_scenario)\n                    | TestScenario::FromClientSessionContract(_, contract_scenario) => match contract_scenario\n                    {\n                        ContractScenario::Valid\n                        | ContractScenario::MissingContractAtName => true,\n                        | ContractScenario::MissingContractAtHash\n                        | ContractScenario::MissingEntryPoint => false,\n                    },\n            TestScenario::FromPeerCustomPaymentContractPackage(contract_package_scenario)\n                    | TestScenario::FromPeerSessionContractPackage(_, contract_package_scenario)\n                    | TestScenario::FromClientCustomPaymentContractPackage(contract_package_scenario)\n                    | TestScenario::FromClientSessionContractPackage(_, contract_package_scenario) => {\n                        match contract_package_scenario {\n                            ContractPackageScenario::Valid\n                            | ContractPackageScenario::MissingPackageAtName => true,\n                            | ContractPackageScenario::MissingPackageAtHash\n                            | ContractPackageScenario::MissingContractVersion => false,\n                        }\n                    },\n            TestScenario::InvalidPricingModeForTransactionV1\n                    | TestScenario::TooLowGasPriceToleranceForTransactionV1\n                    | TestScenario::TransactionWithPaymentOne\n                    | TestScenario::TooLowGasPriceToleranceForDeploy\n                    | TestScenario::InvalidFields\n                    | TestScenario::InvalidFieldsFromPeer\n                    | TestScenario::InvalidArgumentsKind\n                    | TestScenario::WasmTransactionWithTooBigPayment\n                    | TestScenario::WasmDeployWithTooBigPayment\n                    | TestScenario::RedelegateExceedingMaximumDelegation { .. }\n                    | TestScenario::DelegateExceedingMaximumDelegation { .. }\n                    | TestScenario::VmCasperV2ByPackageHash => false,\n            TestScenario::V1ByPackage(hash_or_name, _, _, scenario, ..) => {\n                match hash_or_name {\n                    HashOrName::Hash => match scenario {\n                            ContractVersionExistance::PackageDoesNotExist | ContractVersionExistance::PackageExists(false, ..) => false,\n                            ContractVersionExistance::PackageExists(true, ..) => true,\n                        },\n                    HashOrName::Name => true,\n                }\n            },\n        }\n    }\n\n    fn is_repeated_transaction_case(&self) -> bool {\n        matches!(\n            self,\n            TestScenario::FromClientRepeatedValidTransaction(_)\n                | TestScenario::FromPeerRepeatedValidTransaction(_)\n        )\n    }\n\n    fn contract_scenario(&self) -> Option<ContractScenario> {\n        match self {\n            TestScenario::FromPeerCustomPaymentContract(contract_scenario)\n            | TestScenario::FromPeerSessionContract(_, contract_scenario)\n            | TestScenario::FromClientCustomPaymentContract(contract_scenario)\n            | TestScenario::FromClientSessionContract(_, contract_scenario) => {\n                Some(contract_scenario.clone())\n            }\n            _ => None,\n        }\n    }\n\n    fn is_v2_casper_vm(&self) -> bool {\n        matches!(self, TestScenario::VmCasperV2ByPackageHash)\n    }\n}\n\nfn create_account(account_hash: AccountHash, test_scenario: &TestScenario) -> Account {\n    match test_scenario {\n        TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_)\n        | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_) => {\n            Account::create(AccountHash::default(), NamedKeys::new(), URef::default())\n        }\n        TestScenario::FromPeerAccountWithInsufficientWeight(_)\n        | TestScenario::FromClientAccountWithInsufficientWeight(_) => {\n            let invalid_action_threshold =\n                ActionThresholds::new(Weight::new(100u8), Weight::new(100u8))\n                    .expect(\"should create action threshold\");\n            Account::new(\n                account_hash,\n                NamedKeys::new(),\n                URef::default(),\n                AssociatedKeys::new(account_hash, Weight::new(1)),\n                invalid_action_threshold,\n            )\n        }\n        _ => Account::create(account_hash, NamedKeys::new(), URef::default()),\n    }\n}\n\nstruct Reactor {\n    storage: Storage,\n    transaction_acceptor: TransactionAcceptor,\n    _storage_tempdir: TempDir,\n    test_scenario: TestScenario,\n}\n\nimpl reactor::Reactor for Reactor {\n    type Event = Event;\n    type Config = TestScenario;\n    type Error = Error;\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Event,\n    ) -> Effects<Self::Event> {\n        debug!(\"{event:?}\");\n        match event {\n            Event::Storage(event) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, event),\n            ),\n            Event::StorageRequest(req) => reactor::wrap_effects(\n                Event::Storage,\n                self.storage.handle_event(effect_builder, rng, req.into()),\n            ),\n            Event::TransactionAcceptor(event) => reactor::wrap_effects(\n                Event::TransactionAcceptor,\n                self.transaction_acceptor\n                    .handle_event(effect_builder, rng, event),\n            ),\n            Event::ControlAnnouncement(ctrl_ann) => {\n                panic!(\"unhandled control announcement: {}\", ctrl_ann)\n            }\n            Event::FatalAnnouncement(fatal_ann) => {\n                panic!(\"unhandled fatal announcement: {}\", fatal_ann)\n            }\n            Event::TransactionAcceptorAnnouncement(_) => {\n                // We do not care about transaction acceptor announcements in the acceptor tests.\n                Effects::new()\n            }\n            Event::ContractRuntime(event) => match event {\n                ContractRuntimeRequest::Query {\n                    request: query_request,\n                    responder,\n                } => {\n                    let query_result = if let Key::Hash(_) | Key::SmartContract(_) =\n                        query_request.key()\n                    {\n                        match &self.test_scenario {\n                            TestScenario::FromPeerCustomPaymentContractPackage(\n                                ContractPackageScenario::MissingPackageAtHash,\n                            )\n                            | TestScenario::FromPeerSessionContractPackage(\n                                _,\n                                ContractPackageScenario::MissingPackageAtHash,\n                            )\n                            | TestScenario::FromClientCustomPaymentContractPackage(\n                                ContractPackageScenario::MissingPackageAtHash,\n                            )\n                            | TestScenario::FromClientSessionContractPackage(\n                                _,\n                                ContractPackageScenario::MissingPackageAtHash,\n                            ) => QueryResult::ValueNotFound(String::new()),\n                            TestScenario::FromPeerCustomPaymentContractPackage(\n                                ContractPackageScenario::MissingContractVersion,\n                            )\n                            | TestScenario::FromPeerSessionContractPackage(\n                                _,\n                                ContractPackageScenario::MissingContractVersion,\n                            )\n                            | TestScenario::FromClientCustomPaymentContractPackage(\n                                ContractPackageScenario::MissingContractVersion,\n                            )\n                            | TestScenario::FromClientSessionContractPackage(\n                                _,\n                                ContractPackageScenario::MissingContractVersion,\n                            )\n                            | TestScenario::VmCasperV2ByPackageHash => QueryResult::Success {\n                                value: Box::new(StoredValue::ContractPackage(\n                                    ContractPackage::default(),\n                                )),\n                                proofs: vec![],\n                            },\n                            TestScenario::V1ByPackage(\n                                hash_or_name,\n                                _,\n                                _,\n                                scenario\n                            ) => {\n                                match hash_or_name {\n                                    HashOrName::Hash => match scenario {\n                                        ContractVersionExistance::PackageDoesNotExist => QueryResult::ValueNotFound(\"xyz\".to_owned()),\n                                        ContractVersionExistance::PackageExists(_, versions, disabled_versions) => {\n                                            let contract_package = ContractPackage::new(\n                                                URef::default(),\n                                                versions.clone(),\n                                                disabled_versions.clone(),\n                                                Groups::default(),\n                                                ContractPackageStatus::Unlocked,\n                                            );\n                                            QueryResult::Success {\n                                                value: Box::new(StoredValue::ContractPackage(\n                                                    contract_package,\n                                                )),\n                                                proofs: vec![],\n                                            }\n                                        },\n                                    },\n                                    HashOrName::Name => unreachable!(\"Calling contract by name should not result in a package fetch in transaction acceptor\"),\n                                }\n                            }\n                            _ => panic!(\n                                \"unexpected query: {query_request:?} in {:?}\",\n                                self.test_scenario\n                            ),\n                        }\n                    } else {\n                        panic!(\"expect only queries using Key::Package variant\");\n                    };\n                    responder.respond(query_result).ignore()\n                }\n                ContractRuntimeRequest::GetBalance {\n                    request: balance_request,\n                    responder,\n                } => {\n                    let key = match balance_request.identifier() {\n                        BalanceIdentifier::Purse(uref) => Key::URef(*uref),\n                        BalanceIdentifier::Public(public_key) => {\n                            Key::Account(public_key.to_account_hash())\n                        }\n                        BalanceIdentifier::Account(account_hash)\n                        | BalanceIdentifier::PenalizedAccount(account_hash) => {\n                            Key::Account(*account_hash)\n                        }\n                        BalanceIdentifier::Entity(entity_addr) => {\n                            Key::AddressableEntity(*entity_addr)\n                        }\n                        BalanceIdentifier::Internal(addr) => Key::Balance(*addr),\n                        BalanceIdentifier::Refund => {\n                            responder\n                                .respond(BalanceResult::Failure(\n                                    TrackingCopyError::NamedKeyNotFound(\"refund\".to_string()),\n                                ))\n                                .ignore::<Self::Event>();\n                            return Effects::new();\n                        }\n                        BalanceIdentifier::Payment | BalanceIdentifier::PenalizedPayment => {\n                            responder\n                                .respond(BalanceResult::Failure(\n                                    TrackingCopyError::NamedKeyNotFound(\"payment\".to_string()),\n                                ))\n                                .ignore::<Self::Event>();\n                            return Effects::new();\n                        }\n                        BalanceIdentifier::Accumulate => {\n                            responder\n                                .respond(BalanceResult::Failure(\n                                    TrackingCopyError::NamedKeyNotFound(\"accumulate\".to_string()),\n                                ))\n                                .ignore::<Self::Event>();\n                            return Effects::new();\n                        }\n                    };\n                    let purse_addr = match balance_request.identifier().as_purse_addr() {\n                        Some(purse_addr) => purse_addr,\n                        None => {\n                            responder\n                                .respond(BalanceResult::Failure(\n                                    TrackingCopyError::UnexpectedKeyVariant(key),\n                                ))\n                                .ignore::<Self::Event>();\n                            return Effects::new();\n                        }\n                    };\n\n                    let proof = TrieMerkleProof::new(\n                        key,\n                        StoredValue::CLValue(CLValue::from_t(()).expect(\"should get CLValue\")),\n                        VecDeque::new(),\n                    );\n                    let baseline_amount = U512::from(DEFAULT_BASELINE_MOTES_AMOUNT);\n                    let motes = if matches!(\n                        self.test_scenario,\n                        TestScenario::FromClientInsufficientBalance(_)\n                    ) {\n                        baseline_amount - 1\n                    } else {\n                        baseline_amount\n                    };\n                    let balance_result =\n                        if self.test_scenario == TestScenario::AccountWithUnknownBalance {\n                            BalanceResult::RootNotFound\n                        } else {\n                            let proofs_result = ProofsResult::Proofs {\n                                total_balance_proof: Box::new(proof),\n                                balance_holds: Default::default(),\n                            };\n                            BalanceResult::Success {\n                                purse_addr,\n                                total_balance: Default::default(),\n                                available_balance: motes,\n                                proofs_result,\n                            }\n                        };\n                    responder.respond(balance_result).ignore()\n                }\n                ContractRuntimeRequest::GetAddressableEntity {\n                    state_root_hash: _,\n                    entity_addr,\n                    responder,\n                } => {\n                    let result = if matches!(\n                        self.test_scenario,\n                        TestScenario::FromClientMissingAccount(_)\n                    ) || matches!(\n                        self.test_scenario,\n                        TestScenario::FromPeerMissingAccount(_)\n                    ) {\n                        AddressableEntityResult::ValueNotFound(\"missing account\".to_string())\n                    } else if let EntityAddr::Account(account_hash) = entity_addr {\n                        let account =\n                            create_account(AccountHash::new(account_hash), &self.test_scenario);\n                        AddressableEntityResult::Success {\n                            entity: AddressableEntity::from(account),\n                        }\n                    } else if let EntityAddr::SmartContract(..) = entity_addr {\n                        match self.test_scenario {\n                            TestScenario::FromPeerCustomPaymentContract(\n                                ContractScenario::MissingContractAtHash,\n                            )\n                            | TestScenario::FromPeerSessionContract(\n                                _,\n                                ContractScenario::MissingContractAtHash,\n                            )\n                            | TestScenario::FromClientCustomPaymentContract(\n                                ContractScenario::MissingContractAtHash,\n                            )\n                            | TestScenario::FromClientSessionContract(\n                                _,\n                                ContractScenario::MissingContractAtHash,\n                            ) => AddressableEntityResult::ValueNotFound(\n                                \"missing contract\".to_string(),\n                            ),\n                            TestScenario::FromPeerCustomPaymentContract(\n                                ContractScenario::MissingEntryPoint,\n                            )\n                            | TestScenario::FromPeerSessionContract(\n                                _,\n                                ContractScenario::MissingEntryPoint,\n                            )\n                            | TestScenario::FromClientCustomPaymentContract(\n                                ContractScenario::MissingEntryPoint,\n                            )\n                            | TestScenario::FromClientSessionContract(\n                                _,\n                                ContractScenario::MissingEntryPoint,\n                            ) => {\n                                let contract = Contract::default();\n                                AddressableEntityResult::Success {\n                                    entity: AddressableEntity::from(contract),\n                                }\n                            }\n                            TestScenario::V1ByPackage(_, _, _, _) => {\n                                let contract = Contract::default();\n                                AddressableEntityResult::Success {\n                                    entity: AddressableEntity::from(contract),\n                                }\n                            }\n                            _ => panic!(\"unexpected GetAddressableEntity: {:?}\", entity_addr),\n                        }\n                    } else {\n                        panic!(\n                            \"should GetAddressableEntity using Account or SmartContract variant\"\n                        );\n                    };\n                    responder.respond(result).ignore()\n                }\n                ContractRuntimeRequest::GetEntryPointExists {\n                    state_root_hash: _,\n                    responder,\n                    ..\n                } => {\n                    if matches!(self.test_scenario, TestScenario::V1ByPackage(..)) {\n                        let result = EntryPointExistsResult::Success;\n                        responder.respond(result).ignore()\n                    } else {\n                        let contract_scenario = self\n                            .test_scenario\n                            .contract_scenario()\n                            .expect(\"must get contract scenario\");\n                        let result = match contract_scenario {\n                            ContractScenario::Valid => EntryPointExistsResult::Success,\n                            ContractScenario::MissingContractAtHash\n                            | ContractScenario::MissingContractAtName\n                            | ContractScenario::MissingEntryPoint => {\n                                EntryPointExistsResult::ValueNotFound(\n                                    \"entry point not found\".to_string(),\n                                )\n                            }\n                        };\n                        responder.respond(result).ignore()\n                    }\n                }\n                _ => panic!(\"should not receive {:?}\", event),\n            },\n            Event::NetworkRequest(_) => panic!(\"test does not handle network requests\"),\n        }\n    }\n\n    fn new(\n        config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        _chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        _network_identity: NetworkIdentity,\n        registry: &Registry,\n        _event_queue: EventQueueHandle<Self::Event>,\n        _rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1);\n        let storage_with_dir = WithDir::new(storage_tempdir.path(), storage_config);\n\n        let transaction_acceptor =\n            TransactionAcceptor::new(Config::default(), Arc::clone(&chainspec), registry)?;\n\n        let storage = Storage::new(\n            &storage_with_dir,\n            None,\n            ProtocolVersion::from_parts(1, 0, 0),\n            EraId::default(),\n            \"test\",\n            chainspec.transaction_config.max_ttl.into(),\n            chainspec.core_config.recent_era_count(),\n            Some(registry),\n            false,\n            TransactionConfig::default(),\n        )\n        .unwrap();\n\n        let reactor = Reactor {\n            storage,\n            transaction_acceptor,\n            _storage_tempdir: storage_tempdir,\n            test_scenario: config,\n        };\n\n        let effects = Effects::new();\n\n        Ok((reactor, effects))\n    }\n}\n\nfn put_block_to_storage_and_mark_complete(\n    block: Arc<BlockV2>,\n    result_sender: Sender<bool>,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    |effect_builder: EffectBuilder<Event>| {\n        async move {\n            let block_height = block.height();\n            let block: Block = (*block).clone().into();\n            let result = effect_builder.put_block_to_storage(Arc::new(block)).await;\n            effect_builder.mark_block_completed(block_height).await;\n            result_sender\n                .send(result)\n                .expect(\"receiver should not be dropped yet\");\n        }\n        .ignore()\n    }\n}\n\nfn put_transaction_to_storage(\n    txn: &Transaction,\n    result_sender: Sender<bool>,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    let txn = txn.clone();\n    |effect_builder: EffectBuilder<Event>| {\n        effect_builder\n            .put_transaction_to_storage(txn)\n            .map(|result| {\n                result_sender\n                    .send(result)\n                    .expect(\"receiver should not be dropped yet\")\n            })\n            .ignore()\n    }\n}\n\nfn schedule_accept_transaction(\n    txn: &Transaction,\n    source: Source,\n    responder: Responder<Result<(), super::Error>>,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    let transaction = txn.clone();\n    |effect_builder: EffectBuilder<Event>| {\n        effect_builder\n            .into_inner()\n            .schedule(\n                super::Event::Accept {\n                    transaction,\n                    source,\n                    maybe_responder: Some(responder),\n                },\n                QueueKind::Validation,\n            )\n            .ignore()\n    }\n}\n\nfn inject_balance_check_for_peer(\n    txn: &Transaction,\n    source: Source,\n    rng: &mut TestRng,\n    responder: Responder<Result<(), super::Error>>,\n    chainspec: &Chainspec,\n) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> {\n    let txn = txn.clone();\n    let block = TestBlockBuilder::new().build(rng);\n    let block_header = Box::new(block.header().clone().into());\n    let meta_transaction = MetaTransaction::from_transaction(\n        &txn,\n        chainspec.core_config.pricing_handling,\n        &chainspec.transaction_config,\n    )\n    .unwrap();\n    |effect_builder: EffectBuilder<Event>| {\n        let event_metadata = Box::new(EventMetadata::new(\n            txn,\n            meta_transaction,\n            source,\n            Some(responder),\n            Timestamp::now(),\n        ));\n        effect_builder\n            .into_inner()\n            .schedule(\n                super::Event::GetBalanceResult {\n                    event_metadata,\n                    block_header,\n                    maybe_balance: None,\n                },\n                QueueKind::ContractRuntime,\n            )\n            .ignore()\n    }\n}\n\nasync fn run_transaction_acceptor_without_timeout(\n    test_scenario: TestScenario,\n) -> Result<(), super::Error> {\n    let _ = logging::init();\n    let rng = &mut TestRng::new();\n\n    let admin = SecretKey::random(rng);\n    let (mut chainspec, chainspec_raw_bytes) =\n        <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n    let mut chainspec = match &test_scenario {\n        TestScenario::TooLowGasPriceToleranceForTransactionV1 => {\n            chainspec.with_pricing_handling(PricingHandling::Fixed);\n            chainspec\n        }\n        test_scenario if test_scenario.is_v2_casper_vm() => {\n            chainspec.with_vm_casper_v2(true);\n            chainspec\n        }\n        _ => chainspec,\n    };\n    chainspec.core_config.administrators = iter::once(PublicKey::from(&admin)).collect();\n\n    let chainspec = Arc::new(chainspec);\n    let mut runner: Runner<ConditionCheckReactor<Reactor>> = Runner::new(\n        test_scenario.clone(),\n        chainspec.clone(),\n        Arc::new(chainspec_raw_bytes),\n        rng,\n    )\n    .await\n    .unwrap();\n\n    let block = Arc::new(TestBlockBuilder::new().build(rng));\n    // Create a channel to assert that the block was successfully injected into storage.\n    let (result_sender, result_receiver) = oneshot::channel();\n\n    runner\n        .process_injected_effects(put_block_to_storage_and_mark_complete(block, result_sender))\n        .await;\n\n    // There are two scheduled events, so we only need to try cranking until the second time it\n    // returns `Some`.\n    for _ in 0..2 {\n        while runner.try_crank(rng).await == TryCrankOutcome::NoEventsToProcess {\n            time::sleep(POLL_INTERVAL).await;\n        }\n    }\n    assert!(result_receiver.await.unwrap());\n\n    // Create a responder to assert the validity of the transaction\n    let (txn_sender, txn_receiver) = oneshot::channel();\n    let txn_responder = Responder::without_shutdown(txn_sender);\n\n    // Create a transaction specific to the test scenario\n    let txn = test_scenario.transaction(rng, &admin);\n    // Mark the source as either a peer or a client depending on the scenario.\n    let source = test_scenario.source(rng);\n\n    {\n        // Inject the transaction artificially into storage to simulate a previously seen one.\n        if test_scenario.is_repeated_transaction_case() {\n            let (result_sender, result_receiver) = oneshot::channel();\n            runner\n                .process_injected_effects(put_transaction_to_storage(&txn, result_sender))\n                .await;\n            while runner.try_crank(rng).await == TryCrankOutcome::NoEventsToProcess {\n                time::sleep(POLL_INTERVAL).await;\n            }\n            // Check that the \"previously seen\" transaction is present in storage.\n            assert!(result_receiver.await.unwrap());\n        }\n\n        if test_scenario == TestScenario::BalanceCheckForDeploySentByPeer {\n            let (txn_sender, _) = oneshot::channel();\n            let txn_responder = Responder::without_shutdown(txn_sender);\n            let chainspec = chainspec.as_ref().clone();\n            runner\n                .process_injected_effects(inject_balance_check_for_peer(\n                    &txn,\n                    source.clone(),\n                    rng,\n                    txn_responder,\n                    &chainspec,\n                ))\n                .await;\n            while runner.try_crank(rng).await == TryCrankOutcome::NoEventsToProcess {\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n    }\n\n    runner\n        .process_injected_effects(schedule_accept_transaction(&txn, source, txn_responder))\n        .await;\n    let test_scenario_clone = test_scenario.clone();\n    // Tests where the transaction is already in storage will not trigger any transaction acceptor\n    // announcement, so use the transaction acceptor `PutToStorage` event as the condition.\n    let stopping_condition = move |event: &Event| -> bool {\n        match &test_scenario_clone {\n            // Check that invalid transactions sent by a client raise the `InvalidTransaction`\n            // announcement with the appropriate source.\n            TestScenario::FromClientInvalidTransaction(_)\n            | TestScenario::FromClientInvalidTransactionZeroPayment(_)\n            | TestScenario::FromClientFutureDatedTransaction(_)\n            | TestScenario::FromClientMissingAccount(_)\n            | TestScenario::FromClientInsufficientBalance(_)\n            | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_)\n            | TestScenario::FromClientAccountWithInsufficientWeight(_)\n            | TestScenario::DeployWithEmptySessionModuleBytes\n            | TestScenario::AccountWithUnknownBalance\n            | TestScenario::DeployWithNativeTransferInPayment\n            | TestScenario::DeployWithoutPaymentAmount\n            | TestScenario::DeployWithMangledPaymentAmount\n            | TestScenario::DeployWithMangledTransferAmount\n            | TestScenario::DeployWithoutTransferTarget\n            | TestScenario::DeployWithoutTransferAmount\n            | TestScenario::DeployWithPaymentOne\n            | TestScenario::InvalidPricingModeForTransactionV1\n            | TestScenario::FromClientExpired(_)\n            | TestScenario::TooLowGasPriceToleranceForTransactionV1\n            | TestScenario::TransactionWithPaymentOne\n            | TestScenario::TooLowGasPriceToleranceForDeploy\n            | TestScenario::InvalidFields\n            | TestScenario::InvalidArgumentsKind\n            | TestScenario::WasmTransactionWithTooBigPayment\n            | TestScenario::WasmDeployWithTooBigPayment\n            | TestScenario::RedelegateExceedingMaximumDelegation { .. }\n            | TestScenario::DelegateExceedingMaximumDelegation { .. }\n            | TestScenario::VmCasperV2ByPackageHash => {\n                matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::InvalidTransaction {\n                            source: Source::Client,\n                            ..\n                        }\n                    )\n                )\n            }\n            // Check that executable items with valid contracts are successfully stored. Conversely,\n            // ensure that invalid contracts will raise the invalid transaction announcement.\n            TestScenario::FromPeerCustomPaymentContract(contract_scenario)\n            | TestScenario::FromPeerSessionContract(_, contract_scenario)\n            | TestScenario::FromClientCustomPaymentContract(contract_scenario)\n            | TestScenario::FromClientSessionContract(_, contract_scenario) => {\n                match contract_scenario {\n                    ContractScenario::Valid | ContractScenario::MissingContractAtName => matches!(\n                        event,\n                        Event::TransactionAcceptorAnnouncement(\n                            TransactionAcceptorAnnouncement::AcceptedNewTransaction { .. }\n                        )\n                    ),\n                    ContractScenario::MissingContractAtHash\n                    | ContractScenario::MissingEntryPoint => {\n                        matches!(\n                            event,\n                            Event::TransactionAcceptorAnnouncement(\n                                TransactionAcceptorAnnouncement::InvalidTransaction { .. }\n                            )\n                        )\n                    }\n                }\n            }\n            // Check that executable items with valid contract packages are successfully stored.\n            // Conversely, ensure that invalid contract packages will raise the invalid transaction\n            // announcement.\n            TestScenario::FromPeerCustomPaymentContractPackage(contract_package_scenario)\n            | TestScenario::FromPeerSessionContractPackage(_, contract_package_scenario)\n            | TestScenario::FromClientCustomPaymentContractPackage(contract_package_scenario)\n            | TestScenario::FromClientSessionContractPackage(_, contract_package_scenario) => {\n                match contract_package_scenario {\n                    ContractPackageScenario::Valid\n                    | ContractPackageScenario::MissingPackageAtName => matches!(\n                        event,\n                        Event::TransactionAcceptorAnnouncement(\n                            TransactionAcceptorAnnouncement::AcceptedNewTransaction { .. }\n                        )\n                    ),\n                    ContractPackageScenario::MissingContractVersion\n                    | ContractPackageScenario::MissingPackageAtHash => matches!(\n                        event,\n                        Event::TransactionAcceptorAnnouncement(\n                            TransactionAcceptorAnnouncement::InvalidTransaction { .. }\n                        )\n                    ),\n                }\n            }\n            // Check that invalid transactions sent by a peer raise the `InvalidTransaction`\n            // announcement with the appropriate source.\n            TestScenario::FromPeerInvalidTransaction(_)\n            | TestScenario::FromPeerInvalidTransactionZeroPayment(_)\n            | TestScenario::BalanceCheckForDeploySentByPeer\n            | TestScenario::InvalidFieldsFromPeer => {\n                matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::InvalidTransaction {\n                            source: Source::Peer(_) | Source::PeerGossiped(_),\n                            ..\n                        }\n                    )\n                )\n            }\n            // Check that a new and valid, transaction sent by a peer raises an\n            // `AcceptedNewTransaction` announcement with the appropriate source.\n            TestScenario::FromPeerValidTransaction(_)\n            | TestScenario::FromPeerMissingAccount(_)\n            | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_)\n            | TestScenario::FromPeerAccountWithInsufficientWeight(_)\n            | TestScenario::FromPeerExpired(_) => {\n                matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                            source: Source::Peer(_),\n                            ..\n                        }\n                    )\n                ) || matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                            source: Source::PeerGossiped(_),\n                            ..\n                        }\n                    )\n                )\n            }\n            // Check that a new and valid transaction sent by a client raises an\n            // `AcceptedNewTransaction` announcement with the appropriate source.\n            TestScenario::FromClientValidTransaction(_)\n            | TestScenario::FromClientSlightlyFutureDatedTransaction(_)\n            | TestScenario::FromClientSignedByAdmin(_) => {\n                matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                            source: Source::Client,\n                            ..\n                        }\n                    )\n                )\n            }\n            // Check that repeated valid transactions from a client raises `PutToStorageResult`\n            // with the `is_new` flag as false.\n            TestScenario::FromClientRepeatedValidTransaction(_) => matches!(\n                event,\n                Event::TransactionAcceptor(super::Event::PutToStorageResult { is_new: false, .. })\n            ),\n            // Check that repeated valid transactions from a peer raises `StoredFinalizedApprovals`\n            // with the `is_new` flag as false.\n            TestScenario::FromPeerRepeatedValidTransaction(_) => matches!(\n                event,\n                Event::TransactionAcceptor(super::Event::StoredFinalizedApprovals {\n                    is_new: false,\n                    ..\n                })\n            ),\n            TestScenario::V1ByPackage(\n                hash_or_name,\n                entity_version,\n                protocol_version_major,\n                scenario,\n            ) => match hash_or_name {\n                HashOrName::Hash => match scenario {\n                    ContractVersionExistance::PackageDoesNotExist => {\n                        matches!(\n                            event,\n                            Event::TransactionAcceptorAnnouncement(\n                                TransactionAcceptorAnnouncement::InvalidTransaction {\n                                    source: Source::Client,\n                                    ..\n                                }\n                            )\n                        )\n                    }\n                    ContractVersionExistance::PackageExists(false, ..) => {\n                        if entity_version.is_none() && protocol_version_major.is_none() {\n                            return matches!(\n                                event,\n                                Event::TransactionAcceptorAnnouncement(\n                                    TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                                        source: Source::Client,\n                                        ..\n                                    }\n                                )\n                            );\n                        }\n                        matches!(\n                            event,\n                            Event::TransactionAcceptorAnnouncement(\n                                TransactionAcceptorAnnouncement::InvalidTransaction {\n                                    source: Source::Client,\n                                    ..\n                                }\n                            )\n                        )\n                    }\n                    ContractVersionExistance::PackageExists(true, ..) => {\n                        matches!(\n                            event,\n                            Event::TransactionAcceptorAnnouncement(\n                                TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                                    source: Source::Client,\n                                    ..\n                                }\n                            )\n                        )\n                    }\n                },\n                HashOrName::Name => matches!(\n                    event,\n                    Event::TransactionAcceptorAnnouncement(\n                        TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                            source: Source::Client,\n                            ..\n                        }\n                    )\n                ),\n            },\n        }\n    };\n    runner\n        .reactor_mut()\n        .set_condition_checker(Box::new(stopping_condition));\n\n    loop {\n        match runner.try_crank(rng).await {\n            TryCrankOutcome::ProcessedAnEvent => {\n                if runner.reactor().condition_result() {\n                    break;\n                }\n            }\n            TryCrankOutcome::NoEventsToProcess => time::sleep(POLL_INTERVAL).await,\n            TryCrankOutcome::ShouldExit(exit_code) => panic!(\"should not exit: {:?}\", exit_code),\n            TryCrankOutcome::Exited => unreachable!(),\n        }\n    }\n\n    {\n        // Assert that the transaction is present in the case of a valid transaction.\n        // Conversely, assert its absence in the invalid case.\n        let is_in_storage = runner\n            .reactor()\n            .inner()\n            .storage\n            .get_transaction_by_hash(txn.hash())\n            .is_some();\n\n        if test_scenario.is_valid_transaction_case() {\n            assert!(is_in_storage)\n        } else {\n            assert!(!is_in_storage)\n        }\n    }\n\n    txn_receiver.await.unwrap()\n}\n\nasync fn run_transaction_acceptor(test_scenario: TestScenario) -> Result<(), super::Error> {\n    time::timeout(\n        TIMEOUT,\n        run_transaction_acceptor_without_timeout(test_scenario),\n    )\n    .await\n    .unwrap()\n}\n\n#[tokio::test]\nasync fn should_accept_valid_deploy_from_peer() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromPeerValidTransaction(TxnType::Deploy)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_transaction_v1_from_peer() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromPeerValidTransaction(TxnType::V1)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_invalid_deploy_from_peer() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromPeerInvalidTransaction(TxnType::Deploy)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(_)\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_invalid_transaction_v1_from_peer() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromPeerInvalidTransaction(TxnType::V1)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(_)))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_zero_payment_transaction_v1_from_peer() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerInvalidTransactionZeroPayment(\n        TxnType::V1,\n    ))\n    .await;\n\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::InvalidPaymentAmount\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_valid_deploy_from_peer_for_missing_account() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromPeerMissingAccount(TxnType::Deploy)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_transaction_v1_from_peer_for_missing_account() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerMissingAccount(TxnType::V1)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_deploy_from_peer_for_account_with_invalid_associated_keys() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInvalidAssociatedKeys(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_transaction_v1_from_peer_for_account_with_invalid_associated_keys() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInvalidAssociatedKeys(\n        TxnType::V1,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_deploy_from_peer_for_account_with_insufficient_weight() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInsufficientWeight(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_transaction_v1_from_peer_for_account_with_insufficient_weight() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInsufficientWeight(\n        TxnType::V1,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_deploy_from_client() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientValidTransaction(TxnType::Deploy)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_valid_transaction_v1_from_client() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientValidTransaction(TxnType::V1)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_invalid_deploy_from_client() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientInvalidTransaction(TxnType::Deploy)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(_)\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_invalid_transaction_v1_from_client() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientInvalidTransaction(TxnType::V1)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(_)))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_invalid_transaction_v1_zero_payment_from_client() {\n    let result = run_transaction_acceptor(TestScenario::FromClientInvalidTransactionZeroPayment(\n        TxnType::V1,\n    ))\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::InvalidPaymentAmount\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_slightly_future_dated_deploy_from_client() {\n    let result = run_transaction_acceptor(TestScenario::FromClientSlightlyFutureDatedTransaction(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_slightly_future_dated_transaction_v1_from_client() {\n    let result = run_transaction_acceptor(TestScenario::FromClientSlightlyFutureDatedTransaction(\n        TxnType::V1,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_future_dated_deploy_from_client() {\n    let result = run_transaction_acceptor(TestScenario::FromClientFutureDatedTransaction(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(InvalidDeploy::TimestampInFuture { .. })\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_future_dated_transaction_v1_from_client() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientFutureDatedTransaction(TxnType::V1)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::TimestampInFuture { .. }\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_deploy_from_client_for_missing_account() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientMissingAccount(TxnType::Deploy)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchAddressableEntity { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_transaction_v1_from_client_for_missing_account() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientMissingAccount(TxnType::V1)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchAddressableEntity { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_deploy_from_client_for_account_with_invalid_associated_keys() {\n    let result = run_transaction_acceptor(\n        TestScenario::FromClientAccountWithInvalidAssociatedKeys(TxnType::Deploy),\n    )\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::InvalidAssociatedKeys,\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_transaction_v1_from_client_for_account_with_invalid_associated_keys() {\n    let result = run_transaction_acceptor(\n        TestScenario::FromClientAccountWithInvalidAssociatedKeys(TxnType::V1),\n    )\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::InvalidAssociatedKeys,\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_deploy_from_client_for_account_with_insufficient_weight() {\n    let result = run_transaction_acceptor(TestScenario::FromClientAccountWithInsufficientWeight(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::InsufficientSignatureWeight,\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_transaction_v1_from_client_for_account_with_insufficient_weight() {\n    let result = run_transaction_acceptor(TestScenario::FromClientAccountWithInsufficientWeight(\n        TxnType::V1,\n    ))\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::InsufficientSignatureWeight,\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_deploy_from_client_for_insufficient_balance() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientInsufficientBalance(TxnType::Deploy))\n            .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::InsufficientBalance { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_transaction_v1_from_client_for_insufficient_balance() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromClientInsufficientBalance(TxnType::V1)).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::InsufficientBalance { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_valid_deploy_from_client_for_unknown_balance() {\n    let result = run_transaction_acceptor(TestScenario::AccountWithUnknownBalance).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::UnknownBalance { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_repeated_valid_deploy_from_peer() {\n    let result = run_transaction_acceptor(TestScenario::FromPeerRepeatedValidTransaction(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_repeated_valid_transaction_v1_from_peer() {\n    let result =\n        run_transaction_acceptor(TestScenario::FromPeerRepeatedValidTransaction(TxnType::V1)).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_repeated_valid_deploy_from_client() {\n    let result = run_transaction_acceptor(TestScenario::FromClientRepeatedValidTransaction(\n        TxnType::Deploy,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_repeated_valid_transaction_v1_from_client() {\n    let result = run_transaction_acceptor(TestScenario::FromClientRepeatedValidTransaction(\n        TxnType::V1,\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_custom_payment_from_client() {\n    let test_scenario = TestScenario::FromClientCustomPaymentContract(ContractScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_custom_payment_contract_by_name_from_client() {\n    let test_scenario =\n        TestScenario::FromClientCustomPaymentContract(ContractScenario::MissingContractAtName);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_custom_payment_contract_by_hash_from_client() {\n    let test_scenario =\n        TestScenario::FromClientCustomPaymentContract(ContractScenario::MissingContractAtHash);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchContractAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_entry_point_custom_payment_from_client() {\n    let test_scenario =\n        TestScenario::FromClientCustomPaymentContract(ContractScenario::MissingEntryPoint);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchEntryPoint { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_payment_contract_package_by_name_from_client() {\n    let test_scenario =\n        TestScenario::FromClientCustomPaymentContractPackage(ContractPackageScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_payment_contract_package_at_name_from_client() {\n    let test_scenario = TestScenario::FromClientCustomPaymentContractPackage(\n        ContractPackageScenario::MissingPackageAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_payment_contract_package_at_hash_from_client() {\n    let test_scenario = TestScenario::FromClientCustomPaymentContractPackage(\n        ContractPackageScenario::MissingPackageAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_session_contract_from_client() {\n    let test_scenario =\n        TestScenario::FromClientSessionContract(TxnType::Deploy, ContractScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_valid_session_contract_from_client() {\n    let test_scenario =\n        TestScenario::FromClientSessionContract(TxnType::V1, ContractScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_session_contract_by_name_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContract(\n        TxnType::Deploy,\n        ContractScenario::MissingContractAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_missing_session_contract_by_name_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContract(\n        TxnType::V1,\n        ContractScenario::MissingContractAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_session_contract_by_hash_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContract(\n        TxnType::Deploy,\n        ContractScenario::MissingContractAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchContractAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_session_contract_by_hash_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContract(\n        TxnType::V1,\n        ContractScenario::MissingContractAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchContractAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_entry_point_in_session_contract_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContract(\n        TxnType::Deploy,\n        ContractScenario::MissingEntryPoint,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchEntryPoint { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_entry_point_in_session_contract_from_client() {\n    let test_scenario =\n        TestScenario::FromClientSessionContract(TxnType::V1, ContractScenario::MissingEntryPoint);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchEntryPoint { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_session_contract_package_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContractPackage(\n        TxnType::Deploy,\n        ContractPackageScenario::Valid,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_valid_session_contract_package_from_client() {\n    let test_scenario =\n        TestScenario::FromClientSessionContractPackage(TxnType::V1, ContractPackageScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_session_contract_package_at_name_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContractPackage(\n        TxnType::Deploy,\n        ContractPackageScenario::MissingPackageAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_missing_session_contract_package_at_name_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContractPackage(\n        TxnType::V1,\n        ContractPackageScenario::MissingPackageAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_session_contract_package_at_hash_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContractPackage(\n        TxnType::Deploy,\n        ContractPackageScenario::MissingPackageAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_session_contract_package_at_hash_from_client() {\n    let test_scenario = TestScenario::FromClientSessionContractPackage(\n        TxnType::V1,\n        ContractPackageScenario::MissingPackageAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_version_in_session_contract_package_from_client()\n{\n    let test_scenario = TestScenario::FromClientSessionContractPackage(\n        TxnType::V1,\n        ContractPackageScenario::MissingContractVersion,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::MissingEntityAtVersion { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_custom_payment_from_peer() {\n    let test_scenario = TestScenario::FromPeerCustomPaymentContract(ContractScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_custom_payment_contract_by_name_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerCustomPaymentContract(ContractScenario::MissingContractAtName);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_custom_payment_contract_by_hash_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerCustomPaymentContract(ContractScenario::MissingContractAtHash);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchContractAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_entry_point_custom_payment_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerCustomPaymentContract(ContractScenario::MissingEntryPoint);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchEntryPoint { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_payment_contract_package_by_name_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerCustomPaymentContractPackage(ContractPackageScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_payment_contract_package_at_name_from_peer() {\n    let test_scenario = TestScenario::FromPeerCustomPaymentContractPackage(\n        ContractPackageScenario::MissingPackageAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_payment_contract_package_at_hash_from_peer() {\n    let test_scenario = TestScenario::FromPeerCustomPaymentContractPackage(\n        ContractPackageScenario::MissingPackageAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_session_contract_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerSessionContract(TxnType::Deploy, ContractScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_valid_session_contract_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_session_contract_by_name_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContract(\n        TxnType::Deploy,\n        ContractScenario::MissingContractAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_missing_session_contract_by_name_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::MissingContractAtName);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_session_contract_by_hash_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContract(\n        TxnType::Deploy,\n        ContractScenario::MissingContractAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchContractAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_session_contract_by_hash_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::MissingContractAtHash);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchContractAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_entry_point_in_session_contract_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerSessionContract(TxnType::Deploy, ContractScenario::MissingEntryPoint);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchEntryPoint { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_entry_point_in_session_contract_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::MissingEntryPoint);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchEntryPoint { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_valid_session_contract_package_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContractPackage(\n        TxnType::Deploy,\n        ContractPackageScenario::Valid,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_valid_session_contract_package_from_peer() {\n    let test_scenario =\n        TestScenario::FromPeerSessionContractPackage(TxnType::V1, ContractPackageScenario::Valid);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_with_missing_session_contract_package_at_name_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContractPackage(\n        TxnType::Deploy,\n        ContractPackageScenario::MissingPackageAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_with_missing_session_contract_package_at_name_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContractPackage(\n        TxnType::V1,\n        ContractPackageScenario::MissingPackageAtName,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_missing_session_contract_package_at_hash_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContractPackage(\n        TxnType::Deploy,\n        ContractPackageScenario::MissingPackageAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_session_contract_package_at_hash_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContractPackage(\n        TxnType::V1,\n        ContractPackageScenario::MissingPackageAtHash,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_missing_version_in_session_contract_package_from_peer() {\n    let test_scenario = TestScenario::FromPeerSessionContractPackage(\n        TxnType::V1,\n        ContractPackageScenario::MissingContractVersion,\n    );\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::MissingEntityAtVersion { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_empty_module_bytes_in_session() {\n    let test_scenario = TestScenario::DeployWithEmptySessionModuleBytes;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::Deploy(DeployParameterFailure::MissingModuleBytes),\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_insufficient_payment() {\n    let test_scenario = TestScenario::DeployWithPaymentOne;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(InvalidDeploy::InvalidPaymentAmount)\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_transfer_in_payment() {\n    let test_scenario = TestScenario::DeployWithNativeTransferInPayment;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::Deploy(DeployParameterFailure::InvalidPaymentVariant),\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_without_payment_amount() {\n    let test_scenario = TestScenario::DeployWithoutPaymentAmount;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::Deploy(DeployParameterFailure::MissingPaymentAmount),\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_mangled_payment_amount() {\n    let test_scenario = TestScenario::DeployWithMangledPaymentAmount;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::Deploy(DeployParameterFailure::FailedToParsePaymentAmount),\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_without_transfer_amount() {\n    let test_scenario = TestScenario::DeployWithoutTransferAmount;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(InvalidDeploy::MissingTransferAmount)\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_without_transfer_target() {\n    let test_scenario = TestScenario::DeployWithoutTransferTarget;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::Deploy(DeployParameterFailure::MissingTransferTarget),\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_mangled_transfer_amount() {\n    let test_scenario = TestScenario::DeployWithMangledTransferAmount;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(InvalidDeploy::FailedToParseTransferAmount)\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_expired_deploy_from_client() {\n    let test_scenario = TestScenario::FromClientExpired(TxnType::Deploy);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(result, Err(super::Error::Expired { .. })))\n}\n\n#[tokio::test]\nasync fn should_reject_expired_transaction_v1_from_client() {\n    let test_scenario = TestScenario::FromClientExpired(TxnType::V1);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(result, Err(super::Error::Expired { .. })))\n}\n\n#[tokio::test]\nasync fn should_accept_expired_deploy_from_peer() {\n    let test_scenario = TestScenario::FromPeerExpired(TxnType::Deploy);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_expired_transaction_v1_from_peer() {\n    let test_scenario = TestScenario::FromPeerExpired(TxnType::V1);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\n#[should_panic]\nasync fn should_panic_when_balance_checking_for_deploy_sent_by_peer() {\n    let test_scenario = TestScenario::BalanceCheckForDeploySentByPeer;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_deploy_signed_by_admin_from_client() {\n    let test_scenario = TestScenario::FromClientSignedByAdmin(TxnType::Deploy);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_accept_transaction_v1_signed_by_admin_from_client() {\n    let test_scenario = TestScenario::FromClientSignedByAdmin(TxnType::V1);\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(result.is_ok())\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_invalid_pricing_mode() {\n    let test_scenario = TestScenario::InvalidPricingModeForTransactionV1;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::InvalidPricingMode { .. }\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_too_low_gas_price_tolerance() {\n    let test_scenario = TestScenario::TooLowGasPriceToleranceForTransactionV1;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::GasPriceToleranceTooLow { .. }\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_v1_with_insufficient_payment() {\n    let test_scenario = TestScenario::TransactionWithPaymentOne;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::InvalidPaymentAmount\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_too_low_gas_price_tolerance() {\n    let test_scenario = TestScenario::TooLowGasPriceToleranceForDeploy;\n    let result = run_transaction_acceptor(test_scenario).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(InvalidDeploy::GasPriceToleranceTooLow { .. })\n        ))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_with_unexpected_fields() {\n    let result = run_transaction_acceptor(TestScenario::InvalidFields).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::UnexpectedTransactionFieldEntries\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_from_peer_with_unexpected_fields() {\n    let result = run_transaction_acceptor(TestScenario::InvalidFieldsFromPeer).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::UnexpectedTransactionFieldEntries\n        )))\n    ))\n}\n\n#[tokio::test]\nasync fn should_reject_transaction_with_invalid_transaction_args() {\n    let result = run_transaction_acceptor(TestScenario::InvalidArgumentsKind).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::ExpectedNamedArguments\n        )))\n    ));\n}\n\n#[tokio::test]\nasync fn should_reject_wasm_transaction_with_limited_too_big_payment() {\n    let result = run_transaction_acceptor(TestScenario::WasmTransactionWithTooBigPayment).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::NoLaneMatch\n        )))\n    ));\n}\n\n#[tokio::test]\nasync fn should_reject_deploy_with_payment_amount_larger_than_max_wasm_lane_limit() {\n    let result = run_transaction_acceptor(TestScenario::WasmDeployWithTooBigPayment).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(\n            InvalidTransaction::Deploy(InvalidDeploy::NoLaneMatch)\n        ))\n    ));\n}\n\n#[tokio::test]\nasync fn should_reject_native_delegate_with_exceeding_amount() {\n    let result = run_transaction_acceptor(TestScenario::DelegateExceedingMaximumDelegation).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::InvalidDelegationAmount { .. }\n        )))\n    ));\n}\n\n#[tokio::test]\nasync fn should_reject_native_redelegate_with_exceeding_amount() {\n    let result = run_transaction_acceptor(TestScenario::RedelegateExceedingMaximumDelegation).await;\n    assert!(matches!(\n        result,\n        Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n            InvalidTransactionV1::InvalidDelegationAmount { .. }\n        )))\n    ));\n}\n\n#[tokio::test]\nasync fn foobar() {\n    let result = run_transaction_acceptor(TestScenario::VmCasperV2ByPackageHash).await;\n    assert!(\n        matches!(\n            result,\n            Err(super::Error::InvalidTransaction(InvalidTransaction::V1(\n                InvalidTransactionV1::UnsupportedInvocationTarget { id: Some(_) }\n            )))\n        ),\n        \"{result:?}\"\n    );\n}\n\n#[tokio::test]\nasync fn should_fail_if_package_doesnt_exist_by_hash() {\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Hash,\n        None,\n        None,\n        ContractVersionExistance::PackageDoesNotExist,\n    ))\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::NoSuchPackageAtHash { .. },\n            ..\n        })\n    ));\n}\n\n#[tokio::test]\nasync fn should_not_fail_if_package_doesnt_exist_by_name() {\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Name,\n        None,\n        None,\n        ContractVersionExistance::PackageDoesNotExist,\n    ))\n    .await;\n    assert!(result.is_ok());\n}\n\n#[tokio::test]\nasync fn should_approve_if_transaction_references_no_version_or_major() {\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Hash,\n        None,\n        None,\n        ContractVersionExistance::PackageExists(true, BTreeMap::new(), BTreeSet::new()),\n    ))\n    .await;\n    assert!(result.is_ok());\n}\n\n#[tokio::test]\nasync fn should_approve_if_transaction_references_package_by_name() {\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Name,\n        None,\n        None,\n        ContractVersionExistance::PackageExists(true, BTreeMap::new(), BTreeSet::new()),\n    ))\n    .await;\n    assert!(result.is_ok());\n}\n\n#[tokio::test]\nasync fn should_approve_if_transaction_references_version_and_no_major() {\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Hash,\n        Some(1),\n        None,\n        ContractVersionExistance::PackageExists(true, BTreeMap::new(), BTreeSet::new()),\n    ))\n    .await;\n    assert!(result.is_ok());\n}\n\n#[tokio::test]\nasync fn should_fail_when_asking_for_non_active_exact_version() {\n    let versions = BTreeMap::from([\n        (ContractVersionKey::new(1, 1), ContractHash::from([2; 32])),\n        (ContractVersionKey::new(2, 1), ContractHash::from([3; 32])),\n    ]);\n    let disabled = BTreeSet::from_iter(vec![ContractVersionKey::new(1, 1)].into_iter());\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Hash,\n        Some(2),\n        Some(2), //Assuming current protocol version >= 2\n        ContractVersionExistance::PackageExists(false, versions, disabled),\n    ))\n    .await;\n    assert!(matches!(\n        result,\n        Err(super::Error::Parameters {\n            failure: ParameterFailure::MissingEntityAtVersion { .. },\n            ..\n        })\n    ))\n}\n\n#[tokio::test]\nasync fn should_succeed_when_asking_for_active_exact_version() {\n    let versions = BTreeMap::from([\n        (ContractVersionKey::new(1, 1), ContractHash::from([2; 32])),\n        (ContractVersionKey::new(2, 1), ContractHash::from([3; 32])),\n        (ContractVersionKey::new(2, 2), ContractHash::from([4; 32])),\n    ]);\n    let disabled = BTreeSet::from_iter(vec![ContractVersionKey::new(1, 1)].into_iter());\n    let result = run_transaction_acceptor(TestScenario::V1ByPackage(\n        HashOrName::Hash,\n        Some(2),\n        Some(2), //Assuming current protocol version >= 2\n        ContractVersionExistance::PackageExists(true, versions, disabled),\n    ))\n    .await;\n    assert!(result.is_ok())\n}\n"
  },
  {
    "path": "node/src/components/transaction_acceptor.rs",
    "content": "mod config;\nmod error;\nmod event;\nmod metrics;\nmod tests;\n\nuse std::{collections::BTreeSet, fmt::Debug, sync::Arc};\n\nuse casper_types::{\n    contracts::ProtocolVersionMajor, ContractRuntimeTag, InvalidTransaction, InvalidTransactionV1,\n};\nuse datasize::DataSize;\nuse prometheus::Registry;\nuse tracing::{debug, error, trace};\n\nuse casper_storage::data_access_layer::{balance::BalanceHandling, BalanceRequest, ProofHandling};\nuse casper_types::{\n    account::AccountHash, addressable_entity::AddressableEntity, system::auction::ARG_AMOUNT,\n    AddressableEntityHash, AddressableEntityIdentifier, BlockHeader, Chainspec, EntityAddr,\n    EntityKind, EntityVersion, EntityVersionKey, ExecutableDeployItem,\n    ExecutableDeployItemIdentifier, InitiatorAddr, Package, PackageAddr, PackageHash,\n    PackageIdentifier, Timestamp, Transaction, TransactionEntryPoint, TransactionInvocationTarget,\n    TransactionTarget, DEFAULT_ENTRY_POINT_NAME, U512,\n};\n\nuse crate::{\n    components::Component,\n    effect::{\n        announcements::{FatalAnnouncement, TransactionAcceptorAnnouncement},\n        requests::{ContractRuntimeRequest, StorageRequest},\n        EffectBuilder, EffectExt, Effects, Responder,\n    },\n    fatal,\n    types::MetaTransaction,\n    utils::Source,\n    NodeRng,\n};\n\npub(crate) use config::Config;\npub(crate) use error::{DeployParameterFailure, Error, ParameterFailure};\npub(crate) use event::{Event, EventMetadata};\n\nconst COMPONENT_NAME: &str = \"transaction_acceptor\";\n\nconst ARG_TARGET: &str = \"target\";\n\n/// A helper trait constraining `TransactionAcceptor` compatible reactor events.\npub(crate) trait ReactorEventT:\n    From<Event>\n    + From<TransactionAcceptorAnnouncement>\n    + From<StorageRequest>\n    + From<ContractRuntimeRequest>\n    + From<FatalAnnouncement>\n    + Send\n{\n}\n\nimpl<REv> ReactorEventT for REv where\n    REv: From<Event>\n        + From<TransactionAcceptorAnnouncement>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + From<FatalAnnouncement>\n        + Send\n{\n}\n\n/// The `TransactionAcceptor` is the component which handles all new `Transaction`s immediately\n/// after they're received by this node, regardless of whether they were provided by a peer or a\n/// client, unless they were actively retrieved by this node via a fetch request (in which case the\n/// fetcher performs the necessary validation and stores it).\n///\n/// It validates a new `Transaction` as far as possible, stores it if valid, then announces the\n/// newly-accepted `Transaction`.\n#[derive(Debug, DataSize)]\npub struct TransactionAcceptor {\n    acceptor_config: Config,\n    chainspec: Arc<Chainspec>,\n    administrators: BTreeSet<AccountHash>,\n    #[data_size(skip)]\n    metrics: metrics::Metrics,\n    balance_hold_interval: u64,\n}\n\nimpl TransactionAcceptor {\n    pub(crate) fn new(\n        acceptor_config: Config,\n        chainspec: Arc<Chainspec>,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        let administrators = chainspec\n            .core_config\n            .administrators\n            .iter()\n            .map(|public_key| public_key.to_account_hash())\n            .collect();\n        let balance_hold_interval = chainspec.core_config.gas_hold_interval.millis();\n        Ok(TransactionAcceptor {\n            acceptor_config,\n            chainspec,\n            administrators,\n            metrics: metrics::Metrics::new(registry)?,\n            balance_hold_interval,\n        })\n    }\n\n    /// Handles receiving a new `Transaction` from the given source.\n    fn accept<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        input_transaction: Transaction,\n        source: Source,\n        maybe_responder: Option<Responder<Result<(), Error>>>,\n    ) -> Effects<Event> {\n        trace!(%source, %input_transaction, \"checking transaction before accepting\");\n        let verification_start_timestamp = Timestamp::now();\n        let transaction_config = &self.chainspec.as_ref().transaction_config;\n        let maybe_meta_transaction = MetaTransaction::from_transaction(\n            &input_transaction,\n            self.chainspec.as_ref().core_config.pricing_handling,\n            transaction_config,\n        );\n        let meta_transaction = match maybe_meta_transaction {\n            Ok(transaction) => transaction,\n            Err(err) => {\n                return self.reject_transaction_direct(\n                    effect_builder,\n                    input_transaction,\n                    source,\n                    maybe_responder,\n                    verification_start_timestamp,\n                    Error::InvalidTransaction(err),\n                );\n            }\n        };\n\n        let event_metadata = Box::new(EventMetadata::new(\n            input_transaction,\n            meta_transaction.clone(),\n            source,\n            maybe_responder,\n            verification_start_timestamp,\n        ));\n\n        if meta_transaction.is_install_or_upgrade()\n            && meta_transaction.is_v2_wasm()\n            && meta_transaction.seed().is_none()\n        {\n            return self.reject_transaction(\n                effect_builder,\n                *event_metadata,\n                Error::InvalidTransaction(InvalidTransaction::V1(\n                    InvalidTransactionV1::MissingSeed,\n                )),\n            );\n        }\n\n        let is_config_compliant = event_metadata\n            .meta_transaction\n            .is_config_compliant(\n                &self.chainspec,\n                self.acceptor_config.timestamp_leeway,\n                verification_start_timestamp,\n            )\n            .map_err(Error::InvalidTransaction);\n\n        if let Err(error) = is_config_compliant {\n            return self.reject_transaction(effect_builder, *event_metadata, error);\n        }\n\n        // We only perform expiry checks on transactions received from the client.\n        let current_node_timestamp = event_metadata.verification_start_timestamp;\n        if event_metadata.source.is_client()\n            && event_metadata.transaction.expired(current_node_timestamp)\n        {\n            let expiry_timestamp = event_metadata.transaction.expires();\n            return self.reject_transaction(\n                effect_builder,\n                *event_metadata,\n                Error::Expired {\n                    expiry_timestamp,\n                    current_node_timestamp,\n                },\n            );\n        }\n\n        effect_builder\n            .get_highest_complete_block_header_from_storage()\n            .event(move |maybe_block_header| Event::GetBlockHeaderResult {\n                event_metadata,\n                maybe_block_header: maybe_block_header.map(Box::new),\n            })\n    }\n\n    fn handle_get_block_header_result<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        maybe_block_header: Option<Box<BlockHeader>>,\n    ) -> Effects<Event> {\n        let mut effects = Effects::new();\n\n        let block_header = match maybe_block_header {\n            Some(block_header) => block_header,\n            None => {\n                // this should be unreachable per current design of the system\n                if let Some(responder) = event_metadata.maybe_responder {\n                    effects.extend(responder.respond(Err(Error::EmptyBlockchain)).ignore());\n                }\n                return effects;\n            }\n        };\n\n        if event_metadata.source.is_client() {\n            let account_hash = match event_metadata.transaction.initiator_addr() {\n                InitiatorAddr::PublicKey(public_key) => public_key.to_account_hash(),\n                InitiatorAddr::AccountHash(account_hash) => account_hash,\n            };\n            let entity_addr = EntityAddr::Account(account_hash.value());\n            effect_builder\n                .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                .event(move |result| Event::GetAddressableEntityResult {\n                    event_metadata,\n                    maybe_entity: result.into_option(),\n                    block_header,\n                })\n        } else {\n            self.verify_payment(effect_builder, event_metadata, block_header)\n        }\n    }\n\n    fn handle_get_entity_result<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        maybe_entity: Option<AddressableEntity>,\n    ) -> Effects<Event> {\n        match maybe_entity {\n            None => {\n                let initiator_addr = event_metadata.transaction.initiator_addr();\n                let error = Error::parameter_failure(\n                    &block_header,\n                    ParameterFailure::NoSuchAddressableEntity { initiator_addr },\n                );\n                self.reject_transaction(effect_builder, *event_metadata, error)\n            }\n            Some(entity) => {\n                if let Err(parameter_failure) =\n                    is_authorized_entity(&entity, &self.administrators, &event_metadata)\n                {\n                    let error = Error::parameter_failure(&block_header, parameter_failure);\n                    return self.reject_transaction(effect_builder, *event_metadata, error);\n                }\n                let protocol_version = block_header.protocol_version();\n                let balance_handling = BalanceHandling::Available;\n                let proof_handling = ProofHandling::NoProofs;\n                let balance_request = BalanceRequest::from_purse(\n                    *block_header.state_root_hash(),\n                    protocol_version,\n                    entity.main_purse(),\n                    balance_handling,\n                    proof_handling,\n                );\n                effect_builder\n                    .get_balance(balance_request)\n                    .event(move |balance_result| Event::GetBalanceResult {\n                        event_metadata,\n                        block_header,\n                        maybe_balance: balance_result.available_balance().copied(),\n                    })\n            }\n        }\n    }\n\n    fn handle_get_balance_result<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        maybe_balance: Option<U512>,\n    ) -> Effects<Event> {\n        if !event_metadata.source.is_client() {\n            // This would only happen due to programmer error and should crash the node. Balance\n            // checks for transactions received from a peer will cause the network to stall.\n            return fatal!(\n                effect_builder,\n                \"Balance checks for transactions received from peers should never occur.\"\n            )\n            .ignore();\n        }\n        match maybe_balance {\n            None => {\n                let initiator_addr = event_metadata.transaction.initiator_addr();\n                let error = Error::parameter_failure(\n                    &block_header,\n                    ParameterFailure::UnknownBalance { initiator_addr },\n                );\n                self.reject_transaction(effect_builder, *event_metadata, error)\n            }\n            Some(balance) => {\n                let has_minimum_balance =\n                    balance >= self.chainspec.core_config.baseline_motes_amount_u512();\n                if !has_minimum_balance {\n                    let initiator_addr = event_metadata.transaction.initiator_addr();\n                    let error = Error::parameter_failure(\n                        &block_header,\n                        ParameterFailure::InsufficientBalance { initiator_addr },\n                    );\n                    self.reject_transaction(effect_builder, *event_metadata, error)\n                } else {\n                    self.verify_payment(effect_builder, event_metadata, block_header)\n                }\n            }\n        }\n    }\n\n    fn verify_payment<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n    ) -> Effects<Event> {\n        // Only deploys need their payment code checked.\n        let payment_identifier = if let Transaction::Deploy(deploy) = &event_metadata.transaction {\n            if let Err(error) = deploy_payment_is_valid(deploy.payment(), &block_header) {\n                return self.reject_transaction(effect_builder, *event_metadata, error);\n            }\n            deploy.payment().identifier()\n        } else {\n            return self.verify_body(effect_builder, event_metadata, block_header);\n        };\n\n        match payment_identifier {\n            // We skip validation if the identifier is a named key, since that could yield a\n            // validation success at block X, then a validation failure at block X+1 (e.g. if the\n            // named key is deleted, or updated to point to an item which will fail subsequent\n            // validation).\n            ExecutableDeployItemIdentifier::Module\n            | ExecutableDeployItemIdentifier::Transfer\n            | ExecutableDeployItemIdentifier::AddressableEntity(\n                AddressableEntityIdentifier::Name(_),\n            )\n            | ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name { .. })\n            | ExecutableDeployItemIdentifier::Package(PackageIdentifier::NameWithMajorVersion {\n                ..\n            }) => self.verify_body(effect_builder, event_metadata, block_header),\n            ExecutableDeployItemIdentifier::AddressableEntity(\n                AddressableEntityIdentifier::Hash(contract_hash),\n            ) => {\n                let entity_addr = EntityAddr::SmartContract(contract_hash.value());\n                effect_builder\n                    .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                    .event(move |result| Event::GetContractResult {\n                        event_metadata,\n                        block_header,\n                        is_payment: true,\n                        contract_hash,\n                        maybe_entity: result.into_option(),\n                    })\n            }\n            ExecutableDeployItemIdentifier::AddressableEntity(\n                AddressableEntityIdentifier::Addr(entity_addr),\n            ) => effect_builder\n                .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                .event(move |result| Event::GetAddressableEntityResult {\n                    event_metadata,\n                    block_header,\n                    maybe_entity: result.into_option(),\n                }),\n            ExecutableDeployItemIdentifier::Package(\n                ref contract_package_identifier @ PackageIdentifier::Hash { package_hash, .. },\n            )\n            | ExecutableDeployItemIdentifier::Package(\n                ref contract_package_identifier @ PackageIdentifier::HashWithMajorVersion {\n                    package_hash,\n                    ..\n                },\n            ) => {\n                let maybe_entity_version = contract_package_identifier.version();\n                let maybe_protocol_version_major =\n                    contract_package_identifier.protocol_version_major();\n                effect_builder\n                    .get_package(*block_header.state_root_hash(), package_hash.value())\n                    .event(move |maybe_package| Event::GetPackageResult {\n                        event_metadata,\n                        block_header,\n                        is_payment: true,\n                        package_hash,\n                        maybe_entity_version,\n                        maybe_protocol_version_major,\n                        maybe_package,\n                    })\n            }\n        }\n    }\n\n    fn verify_body<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n    ) -> Effects<Event> {\n        match &event_metadata.meta_transaction {\n            MetaTransaction::Deploy(_) => {\n                self.verify_deploy_session(effect_builder, event_metadata, block_header)\n            }\n            MetaTransaction::V1(_) => {\n                self.verify_transaction_v1_body(effect_builder, event_metadata, block_header)\n            }\n        }\n    }\n\n    fn verify_deploy_session<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n    ) -> Effects<Event> {\n        let session = match &event_metadata.meta_transaction {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.session(),\n            MetaTransaction::V1(txn) => {\n                error!(%txn, \"should only handle deploys in verify_deploy_session\");\n                return self.reject_transaction(\n                    effect_builder,\n                    *event_metadata,\n                    Error::ExpectedDeploy,\n                );\n            }\n        };\n\n        match session {\n            ExecutableDeployItem::Transfer { args } => {\n                // We rely on the `Deploy::is_config_compliant` to check\n                // that the transfer amount arg is present and is a valid U512.\n                if args.get(ARG_TARGET).is_none() {\n                    let error = Error::parameter_failure(\n                        &block_header,\n                        DeployParameterFailure::MissingTransferTarget.into(),\n                    );\n                    return self.reject_transaction(effect_builder, *event_metadata, error);\n                }\n            }\n            ExecutableDeployItem::ModuleBytes { module_bytes, .. } => {\n                if module_bytes.is_empty() {\n                    let error = Error::parameter_failure(\n                        &block_header,\n                        DeployParameterFailure::MissingModuleBytes.into(),\n                    );\n                    return self.reject_transaction(effect_builder, *event_metadata, error);\n                }\n            }\n            ExecutableDeployItem::StoredContractByHash { .. }\n            | ExecutableDeployItem::StoredContractByName { .. }\n            | ExecutableDeployItem::StoredVersionedContractByHash { .. }\n            | ExecutableDeployItem::StoredVersionedContractByName { .. } => (),\n        }\n\n        match session.identifier() {\n            // We skip validation if the identifier is a named key, since that could yield a\n            // validation success at block X, then a validation failure at block X+1 (e.g. if the\n            // named key is deleted, or updated to point to an item which will fail subsequent\n            // validation).\n            ExecutableDeployItemIdentifier::Module\n            | ExecutableDeployItemIdentifier::Transfer\n            | ExecutableDeployItemIdentifier::AddressableEntity(\n                AddressableEntityIdentifier::Name(_),\n            )\n            | ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name { .. })\n            | ExecutableDeployItemIdentifier::Package(PackageIdentifier::NameWithMajorVersion {\n                ..\n            }) => self.validate_transaction_cryptography(effect_builder, event_metadata),\n            ExecutableDeployItemIdentifier::AddressableEntity(\n                AddressableEntityIdentifier::Hash(entity_hash),\n            ) => {\n                let entity_addr = EntityAddr::SmartContract(entity_hash.value());\n                effect_builder\n                    .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                    .event(move |result| Event::GetContractResult {\n                        event_metadata,\n                        block_header,\n                        is_payment: false,\n                        contract_hash: entity_hash,\n                        maybe_entity: result.into_option(),\n                    })\n            }\n            ExecutableDeployItemIdentifier::AddressableEntity(\n                AddressableEntityIdentifier::Addr(entity_addr),\n            ) => effect_builder\n                .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                .event(move |result| Event::GetAddressableEntityResult {\n                    event_metadata,\n                    block_header,\n                    maybe_entity: result.into_option(),\n                }),\n            ExecutableDeployItemIdentifier::Package(\n                ref package_identifier @ PackageIdentifier::Hash { package_hash, .. },\n            )\n            | ExecutableDeployItemIdentifier::Package(\n                ref package_identifier @ PackageIdentifier::HashWithMajorVersion {\n                    package_hash, ..\n                },\n            ) => {\n                let maybe_package_version = package_identifier.version();\n                effect_builder\n                    .get_package(*block_header.state_root_hash(), package_hash.value())\n                    .event(move |maybe_package| Event::GetPackageResult {\n                        event_metadata,\n                        block_header,\n                        is_payment: false,\n                        package_hash,\n                        maybe_entity_version: maybe_package_version,\n                        maybe_protocol_version_major: None,\n                        maybe_package,\n                    })\n            }\n        }\n    }\n\n    fn verify_transaction_v1_body<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n    ) -> Effects<Event> {\n        enum NextStep {\n            GetContract(EntityAddr),\n            GetPackage(\n                PackageAddr,\n                Option<EntityVersion>,\n                Option<ProtocolVersionMajor>,\n            ),\n            CryptoValidation,\n        }\n\n        let next_step = match &event_metadata.meta_transaction {\n            MetaTransaction::Deploy(meta_deploy) => {\n                let deploy_hash = meta_deploy.deploy().hash();\n                error!(\n                    %deploy_hash,\n                    \"should only handle version 1 transactions in verify_transaction_v1_body\"\n                );\n                return self.reject_transaction(\n                    effect_builder,\n                    *event_metadata,\n                    Error::ExpectedTransactionV1,\n                );\n            }\n            MetaTransaction::V1(txn) => match txn.target() {\n                TransactionTarget::Stored { id, .. } => match id {\n                    TransactionInvocationTarget::ByHash(entity_addr) => {\n                        NextStep::GetContract(EntityAddr::SmartContract(*entity_addr))\n                    }\n                    TransactionInvocationTarget::ByPackageHash {\n                        addr,\n                        version,\n                        protocol_version_major,\n                    } => NextStep::GetPackage(*addr, *version, *protocol_version_major),\n                    TransactionInvocationTarget::ByName(_)\n                    | TransactionInvocationTarget::ByPackageName { .. } => {\n                        NextStep::CryptoValidation\n                    }\n                },\n                TransactionTarget::Native | TransactionTarget::Session { .. } => {\n                    NextStep::CryptoValidation\n                }\n            },\n        };\n\n        match next_step {\n            NextStep::GetContract(entity_addr) => {\n                // Use `Key::Hash` variant so that we try to retrieve the entity as either an\n                // AddressableEntity, or fall back to retrieving an un-migrated Contract.\n                effect_builder\n                    .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                    .event(move |result| Event::GetContractResult {\n                        event_metadata,\n                        block_header,\n                        is_payment: false,\n                        contract_hash: AddressableEntityHash::new(entity_addr.value()),\n                        maybe_entity: result.into_option(),\n                    })\n            }\n            NextStep::GetPackage(\n                package_addr,\n                maybe_entity_version,\n                maybe_protocol_version_major,\n            ) => effect_builder\n                .get_package(*block_header.state_root_hash(), package_addr)\n                .event(move |maybe_package| Event::GetPackageResult {\n                    event_metadata,\n                    block_header,\n                    is_payment: false,\n                    package_hash: PackageHash::new(package_addr),\n                    maybe_entity_version,\n                    maybe_protocol_version_major,\n                    maybe_package,\n                }),\n            NextStep::CryptoValidation => {\n                self.validate_transaction_cryptography(effect_builder, event_metadata)\n            }\n        }\n    }\n\n    fn handle_get_contract_result<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        is_payment: bool,\n        contract_hash: AddressableEntityHash,\n        maybe_contract: Option<AddressableEntity>,\n    ) -> Effects<Event> {\n        let addressable_entity = match maybe_contract {\n            Some(addressable_entity) => addressable_entity,\n            None => {\n                let error = Error::parameter_failure(\n                    &block_header,\n                    ParameterFailure::NoSuchContractAtHash { contract_hash },\n                );\n                return self.reject_transaction(effect_builder, *event_metadata, error);\n            }\n        };\n\n        let maybe_entry_point_name = match &event_metadata.meta_transaction {\n            MetaTransaction::Deploy(meta_deploy) if is_payment => Some(\n                meta_deploy\n                    .deploy()\n                    .payment()\n                    .entry_point_name()\n                    .to_string(),\n            ),\n            MetaTransaction::Deploy(meta_deploy) => Some(\n                meta_deploy\n                    .deploy()\n                    .session()\n                    .entry_point_name()\n                    .to_string(),\n            ),\n            MetaTransaction::V1(_) if is_payment => {\n                error!(\"should not fetch a contract to validate payment logic for transaction v1s\");\n                None\n            }\n            MetaTransaction::V1(txn) => match txn.entry_point() {\n                TransactionEntryPoint::Call => Some(DEFAULT_ENTRY_POINT_NAME.to_owned()),\n                TransactionEntryPoint::Custom(name) => Some(name.clone()),\n                TransactionEntryPoint::Transfer\n                | TransactionEntryPoint::Burn\n                | TransactionEntryPoint::AddBid\n                | TransactionEntryPoint::WithdrawBid\n                | TransactionEntryPoint::Delegate\n                | TransactionEntryPoint::Undelegate\n                | TransactionEntryPoint::Redelegate\n                | TransactionEntryPoint::ActivateBid\n                | TransactionEntryPoint::ChangeBidPublicKey\n                | TransactionEntryPoint::AddReservations\n                | TransactionEntryPoint::CancelReservations => None,\n            },\n        };\n\n        match maybe_entry_point_name {\n            Some(entry_point_name) => effect_builder\n                .does_entry_point_exist(\n                    *block_header.state_root_hash(),\n                    contract_hash.value(),\n                    entry_point_name.clone(),\n                )\n                .event(move |entry_point_result| Event::GetEntryPointResult {\n                    event_metadata,\n                    block_header,\n                    is_payment,\n                    entry_point_name,\n                    addressable_entity,\n                    entry_point_exists: entry_point_result.is_success(),\n                }),\n\n            None => {\n                if is_payment {\n                    return self.verify_body(effect_builder, event_metadata, block_header);\n                }\n                self.validate_transaction_cryptography(effect_builder, event_metadata)\n            }\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn handle_get_entry_point_result<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        is_payment: bool,\n        entry_point_name: String,\n        addressable_entity: AddressableEntity,\n        entry_point_exist: bool,\n    ) -> Effects<Event> {\n        match addressable_entity.kind() {\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1)\n            | EntityKind::Account(_)\n            | EntityKind::System(_) => {\n                if !entry_point_exist {\n                    let error = Error::parameter_failure(\n                        &block_header,\n                        ParameterFailure::NoSuchEntryPoint { entry_point_name },\n                    );\n                    return self.reject_transaction(effect_builder, *event_metadata, error);\n                }\n                if is_payment {\n                    return self.verify_body(effect_builder, event_metadata, block_header);\n                }\n                self.validate_transaction_cryptography(effect_builder, event_metadata)\n            }\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2) => {\n                // Engine V2 does not store entrypoint information on chain and relies entirely on\n                // the Wasm itself.\n                self.validate_transaction_cryptography(effect_builder, event_metadata)\n            }\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn handle_get_package_result<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        block_header: Box<BlockHeader>,\n        is_payment: bool,\n        package_hash: PackageHash,\n        maybe_contract_version: Option<EntityVersion>,\n        maybe_protocol_version_major: Option<ProtocolVersionMajor>,\n        maybe_package: Option<Box<Package>>,\n    ) -> Effects<Event> {\n        let package = match maybe_package {\n            Some(package) => package,\n            None => {\n                let error = Error::parameter_failure(\n                    &block_header,\n                    ParameterFailure::NoSuchPackageAtHash { package_hash },\n                );\n                return self.reject_transaction(effect_builder, *event_metadata, error);\n            }\n        };\n\n        let maybe_entity_version_key = match self.resolve_entity_version_key(\n            package.as_ref(),\n            maybe_contract_version,\n            maybe_protocol_version_major,\n            &block_header,\n        ) {\n            Ok(maybe) => maybe,\n            Err(err) => return self.reject_transaction(effect_builder, *event_metadata, *err),\n        };\n        let entity_version_key = match maybe_entity_version_key {\n            Some(version) => version,\n            None => {\n                // We continue to the next step in None case due to the subjective\n                // nature of global state.\n                if is_payment {\n                    return self.verify_body(effect_builder, event_metadata, block_header);\n                }\n                return self.validate_transaction_cryptography(effect_builder, event_metadata);\n            }\n        };\n\n        if package.is_version_missing(entity_version_key) {\n            let error = Error::parameter_failure(\n                &block_header,\n                ParameterFailure::MissingEntityAtVersion { entity_version_key },\n            );\n            return self.reject_transaction(effect_builder, *event_metadata, error);\n        }\n\n        if !package.is_version_enabled(entity_version_key) {\n            let error = Error::parameter_failure(\n                &block_header,\n                ParameterFailure::DisabledEntityAtVersion { entity_version_key },\n            );\n            return self.reject_transaction(effect_builder, *event_metadata, error);\n        }\n\n        match package.lookup_entity_hash(entity_version_key) {\n            Some(&entity_addr) => {\n                let contract_hash = AddressableEntityHash::new(entity_addr.value());\n                effect_builder\n                    .get_addressable_entity(*block_header.state_root_hash(), entity_addr)\n                    .event(move |result| Event::GetContractResult {\n                        event_metadata,\n                        block_header,\n                        is_payment,\n                        contract_hash,\n                        maybe_entity: result.into_option(),\n                    })\n            }\n            None => {\n                let error = Error::parameter_failure(\n                    &block_header,\n                    ParameterFailure::InvalidEntityAtVersion { entity_version_key },\n                );\n                self.reject_transaction(effect_builder, *event_metadata, error)\n            }\n        }\n    }\n\n    /// Resolves EntityVersionKey for a given contract. Returning Some(k) means that k is an enabled\n    /// version matching the criteria. Returning None doesn't mean there is no fit - it means\n    /// that we can't for sure determine the version key since the state at execution might be\n    /// different - we must assume that a valid EntityVersionKey might be present for the package or\n    /// error out during execution\n    fn resolve_entity_version_key(\n        &self,\n        package: &Package,\n        maybe_entity_version: Option<EntityVersion>,\n        maybe_protocol_version_major: Option<ProtocolVersionMajor>,\n        block_header: &BlockHeader,\n    ) -> Result<Option<EntityVersionKey>, Box<Error>> {\n        let entity_version_key = match (maybe_entity_version, maybe_protocol_version_major) {\n            (Some(entity_version), Some(major)) => EntityVersionKey::new(major, entity_version),\n            (Some(_), None) | (None, Some(_)) | (None, None) => return Ok(None), /* In this case\n                                                                                  * the runtime\n                                                                                  * needs to do\n                                                                                  * the\n                                                                                  * determination, at this point we can't be sure which versions will be available on execution */\n        };\n\n        if package.is_version_missing(entity_version_key) {\n            return Err(Box::new(Error::parameter_failure(\n                block_header,\n                ParameterFailure::MissingEntityAtVersion { entity_version_key },\n            )));\n        }\n\n        if !package.is_version_enabled(entity_version_key) {\n            return Err(Box::new(Error::parameter_failure(\n                block_header,\n                ParameterFailure::DisabledEntityAtVersion { entity_version_key },\n            )));\n        }\n        Ok(Some(entity_version_key))\n    }\n\n    fn validate_transaction_cryptography<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n    ) -> Effects<Event> {\n        let is_valid = match &event_metadata.meta_transaction {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .is_valid()\n                .map_err(|err| Error::InvalidTransaction(err.into())),\n            MetaTransaction::V1(txn) => txn\n                .verify()\n                .map_err(|err| Error::InvalidTransaction(err.into())),\n        };\n        if let Err(error) = is_valid {\n            return self.reject_transaction(effect_builder, *event_metadata, error);\n        }\n\n        // If this has been received from the speculative exec server, we just want to call the\n        // responder and finish.  Otherwise store the transaction and announce it if required.\n        if let Source::SpeculativeExec = event_metadata.source {\n            if let Some(responder) = event_metadata.maybe_responder {\n                return responder.respond(Ok(())).ignore();\n            }\n            error!(\"speculative exec source should always have a responder\");\n            return Effects::new();\n        }\n\n        effect_builder\n            .put_transaction_to_storage(event_metadata.transaction.clone())\n            .event(move |is_new| Event::PutToStorageResult {\n                event_metadata,\n                is_new,\n            })\n    }\n\n    fn reject_transaction<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: EventMetadata,\n        error: Error,\n    ) -> Effects<Event> {\n        let EventMetadata {\n            meta_transaction: _,\n            transaction,\n            source,\n            maybe_responder,\n            verification_start_timestamp,\n        } = event_metadata;\n        self.reject_transaction_direct(\n            effect_builder,\n            transaction,\n            source,\n            maybe_responder,\n            verification_start_timestamp,\n            error,\n        )\n    }\n\n    fn reject_transaction_direct<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        transaction: Transaction,\n        source: Source,\n        maybe_responder: Option<Responder<Result<(), Error>>>,\n        verification_start_timestamp: Timestamp,\n        error: Error,\n    ) -> Effects<Event> {\n        trace!(%error, transaction = %transaction, \"rejected transaction\");\n        self.metrics.observe_rejected(verification_start_timestamp);\n        let mut effects = Effects::new();\n        if let Some(responder) = maybe_responder {\n            // The client has submitted an invalid transaction\n            // Return an error to the RPC component via the responder.\n            effects.extend(responder.respond(Err(error)).ignore());\n        }\n\n        effects.extend(\n            effect_builder\n                .announce_invalid_transaction(transaction, source)\n                .ignore(),\n        );\n        effects\n    }\n\n    fn handle_put_to_storage<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        is_new: bool,\n    ) -> Effects<Event> {\n        let mut effects = Effects::new();\n        if is_new {\n            debug!(transaction = %event_metadata.transaction, \"accepted transaction\");\n            effects.extend(\n                effect_builder\n                    .announce_new_transaction_accepted(\n                        Arc::new(event_metadata.transaction),\n                        event_metadata.source,\n                    )\n                    .ignore(),\n            );\n        } else if matches!(event_metadata.source, Source::Peer(_)) {\n            // If `is_new` is `false`, the transaction was previously stored.  If the source is\n            // `Peer`, we got here as a result of a `Fetch<Deploy>` or `Fetch<TransactionV1>`, and\n            // the incoming transaction could have a different set of approvals to the one already\n            // stored.  We can treat the incoming approvals as finalized and now try and store them.\n            // If storing them returns `true`, (indicating the approvals are different to any\n            // previously stored) we can announce a new transaction accepted, causing the fetcher\n            // to be notified.\n            return effect_builder\n                .store_finalized_approvals(\n                    event_metadata.transaction.hash(),\n                    event_metadata.transaction.approvals(),\n                )\n                .event(move |is_new| Event::StoredFinalizedApprovals {\n                    event_metadata,\n                    is_new,\n                });\n        }\n        self.metrics\n            .observe_accepted(event_metadata.verification_start_timestamp);\n\n        if let Some(responder) = event_metadata.maybe_responder {\n            effects.extend(responder.respond(Ok(())).ignore());\n        }\n        effects\n    }\n\n    fn handle_stored_finalized_approvals<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        is_new: bool,\n    ) -> Effects<Event> {\n        let EventMetadata {\n            meta_transaction: _,\n            transaction,\n            source,\n            maybe_responder,\n            verification_start_timestamp,\n        } = *event_metadata;\n        debug!(%transaction, \"accepted transaction\");\n        self.metrics.observe_accepted(verification_start_timestamp);\n        let mut effects = Effects::new();\n        if is_new {\n            effects.extend(\n                effect_builder\n                    .announce_new_transaction_accepted(Arc::new(transaction), source)\n                    .ignore(),\n            );\n        }\n\n        if let Some(responder) = maybe_responder {\n            effects.extend(responder.respond(Ok(())).ignore());\n        }\n        effects\n    }\n}\n\nimpl<REv: ReactorEventT> Component<REv> for TransactionAcceptor {\n    type Event = Event;\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        trace!(?event, \"TransactionAcceptor: handling event\");\n        match event {\n            Event::Accept {\n                transaction,\n                source,\n                maybe_responder: responder,\n            } => self.accept(effect_builder, transaction, source, responder),\n            Event::GetBlockHeaderResult {\n                event_metadata,\n                maybe_block_header,\n            } => self.handle_get_block_header_result(\n                effect_builder,\n                event_metadata,\n                maybe_block_header,\n            ),\n            Event::GetAddressableEntityResult {\n                event_metadata,\n                block_header,\n                maybe_entity,\n            } => self.handle_get_entity_result(\n                effect_builder,\n                event_metadata,\n                block_header,\n                maybe_entity,\n            ),\n            Event::GetBalanceResult {\n                event_metadata,\n                block_header,\n                maybe_balance,\n            } => self.handle_get_balance_result(\n                effect_builder,\n                event_metadata,\n                block_header,\n                maybe_balance,\n            ),\n            Event::GetContractResult {\n                event_metadata,\n                block_header,\n                is_payment,\n                contract_hash,\n                maybe_entity,\n            } => self.handle_get_contract_result(\n                effect_builder,\n                event_metadata,\n                block_header,\n                is_payment,\n                contract_hash,\n                maybe_entity,\n            ),\n            Event::GetPackageResult {\n                event_metadata,\n                block_header,\n                is_payment,\n                package_hash,\n                maybe_entity_version,\n                maybe_protocol_version_major,\n                maybe_package,\n            } => self.handle_get_package_result(\n                effect_builder,\n                event_metadata,\n                block_header,\n                is_payment,\n                package_hash,\n                maybe_entity_version,\n                maybe_protocol_version_major,\n                maybe_package,\n            ),\n            Event::GetEntryPointResult {\n                event_metadata,\n                block_header,\n                is_payment,\n                entry_point_name,\n                addressable_entity,\n                entry_point_exists,\n            } => self.handle_get_entry_point_result(\n                effect_builder,\n                event_metadata,\n                block_header,\n                is_payment,\n                entry_point_name,\n                addressable_entity,\n                entry_point_exists,\n            ),\n            Event::PutToStorageResult {\n                event_metadata,\n                is_new,\n            } => self.handle_put_to_storage(effect_builder, event_metadata, is_new),\n            Event::StoredFinalizedApprovals {\n                event_metadata,\n                is_new,\n            } => self.handle_stored_finalized_approvals(effect_builder, event_metadata, is_new),\n        }\n    }\n}\n\n// `allow` can be removed once https://github.com/casper-network/casper-node/issues/3063 is fixed.\n#[allow(clippy::result_large_err)]\nfn is_authorized_entity(\n    addressable_entity: &AddressableEntity,\n    administrators: &BTreeSet<AccountHash>,\n    event_metadata: &EventMetadata,\n) -> Result<(), ParameterFailure> {\n    let authorization_keys = event_metadata.transaction.signers();\n\n    if administrators\n        .intersection(&authorization_keys)\n        .next()\n        .is_some()\n    {\n        return Ok(());\n    }\n\n    if !addressable_entity.can_authorize(&authorization_keys) {\n        return Err(ParameterFailure::InvalidAssociatedKeys);\n    }\n\n    if !addressable_entity.can_deploy_with(&authorization_keys) {\n        return Err(ParameterFailure::InsufficientSignatureWeight);\n    }\n\n    Ok(())\n}\n\n// `allow` can be removed once https://github.com/casper-network/casper-node/issues/3063 is fixed.\n#[allow(clippy::result_large_err)]\nfn deploy_payment_is_valid(\n    payment: &ExecutableDeployItem,\n    block_header: &BlockHeader,\n) -> Result<(), Error> {\n    match payment {\n        ExecutableDeployItem::Transfer { .. } => {\n            return Err(Error::parameter_failure(\n                block_header,\n                DeployParameterFailure::InvalidPaymentVariant.into(),\n            ));\n        }\n        ExecutableDeployItem::ModuleBytes { module_bytes, args } => {\n            // module bytes being empty implies the payment executable is standard payment.\n            if module_bytes.is_empty() {\n                if let Some(value) = args.get(ARG_AMOUNT) {\n                    if value.to_t::<U512>().is_err() {\n                        return Err(Error::parameter_failure(\n                            block_header,\n                            DeployParameterFailure::FailedToParsePaymentAmount.into(),\n                        ));\n                    }\n                } else {\n                    return Err(Error::parameter_failure(\n                        block_header,\n                        DeployParameterFailure::MissingPaymentAmount.into(),\n                    ));\n                }\n            }\n        }\n        ExecutableDeployItem::StoredContractByHash { .. }\n        | ExecutableDeployItem::StoredContractByName { .. }\n        | ExecutableDeployItem::StoredVersionedContractByHash { .. }\n        | ExecutableDeployItem::StoredVersionedContractByName { .. } => (),\n    }\n    Ok(())\n}\n"
  },
  {
    "path": "node/src/components/transaction_buffer/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::TimeDiff;\n\nconst DEFAULT_EXPIRY_CHECK_INTERVAL: &str = \"1min\";\n\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// The interval of checking for expired transactions.\n    pub expiry_check_interval: TimeDiff,\n}\n\nimpl Config {\n    /// Returns the interval of checking for expired transactions.\n    pub fn expiry_check_interval(&self) -> TimeDiff {\n        self.expiry_check_interval\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            expiry_check_interval: DEFAULT_EXPIRY_CHECK_INTERVAL.parse().unwrap(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_buffer/event.rs",
    "content": "use std::{\n    fmt::{self, Display, Formatter},\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse derive_more::From;\n\nuse casper_types::{Block, BlockV2, EraId, Timestamp, Transaction, TransactionId};\n\nuse crate::{\n    components::consensus::{ClContext, ProposedBlock},\n    effect::{requests::TransactionBufferRequest, Responder},\n    types::{appendable_block::AppendableBlock, FinalizedBlock},\n};\n\n#[derive(Debug, From, DataSize)]\npub(crate) enum Event {\n    Initialize(Vec<Block>),\n    #[from]\n    Request(TransactionBufferRequest),\n    ReceiveTransactionGossiped(TransactionId),\n    StoredTransaction(TransactionId, Option<Box<Transaction>>),\n    BlockProposed(Box<ProposedBlock<ClContext>>),\n    Block(Arc<BlockV2>),\n    VersionedBlock(Arc<Block>),\n    BlockFinalized(Box<FinalizedBlock>),\n    Expire,\n    UpdateEraGasPrice(EraId, u8),\n    GetGasPriceResult(\n        Option<u8>,\n        EraId,\n        Timestamp,\n        Timestamp,\n        Responder<AppendableBlock>,\n    ),\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Initialize(blocks) => {\n                write!(formatter, \"initialize, {} blocks\", blocks.len())\n            }\n            Event::Request(TransactionBufferRequest::GetAppendableBlock { .. }) => {\n                write!(formatter, \"get appendable block request\")\n            }\n            Event::ReceiveTransactionGossiped(transaction_id) => {\n                write!(formatter, \"receive transaction gossiped {}\", transaction_id)\n            }\n            Event::StoredTransaction(transaction_id, maybe_transaction) => {\n                write!(\n                    formatter,\n                    \"{} stored: {:?}\",\n                    transaction_id,\n                    maybe_transaction.is_some()\n                )\n            }\n            Event::BlockProposed(_) => {\n                write!(formatter, \"proposed block\")\n            }\n            Event::BlockFinalized(finalized_block) => {\n                write!(\n                    formatter,\n                    \"finalized block at height {}\",\n                    finalized_block.height\n                )\n            }\n            Event::Block(_) => {\n                write!(formatter, \"block\")\n            }\n            Event::VersionedBlock(_) => {\n                write!(formatter, \"versioned block\")\n            }\n            Event::Expire => {\n                write!(formatter, \"expire transactions\")\n            }\n            Event::UpdateEraGasPrice(era_id, next_era_gas_price) => {\n                write!(\n                    formatter,\n                    \"gas price {} for era {}\",\n                    next_era_gas_price, era_id\n                )\n            }\n            Event::GetGasPriceResult(_, era_id, _, _, _) => {\n                write!(formatter, \"retrieving gas price for era {}\", era_id)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_buffer/metrics.rs",
    "content": "use prometheus::{IntGauge, Registry};\n\nuse crate::unregister_metric;\n\n/// Metrics for the transaction_buffer component.\n#[derive(Debug)]\npub(super) struct Metrics {\n    /// Total number of transactions contained in the transaction buffer.\n    pub(super) total_transactions: IntGauge,\n    /// Number of transactions contained in in-flight proposed blocks.\n    pub(super) held_transactions: IntGauge,\n    /// Number of transactions that should not be included in future proposals ever again.\n    pub(super) dead_transactions: IntGauge,\n    registry: Registry,\n}\n\nimpl Metrics {\n    /// Creates a new instance of the transaction buffer metrics, using the given prefix.\n    pub fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let total_transactions = IntGauge::new(\n            \"transaction_buffer_total_transactions\".to_string(),\n            \"total number of transactions contained in the transaction buffer.\".to_string(),\n        )?;\n        let held_transactions = IntGauge::new(\n            \"transaction_buffer_held_transactions\".to_string(),\n            \"number of transactions included in in-flight proposed blocks.\".to_string(),\n        )?;\n        let dead_transactions = IntGauge::new(\n            \"transaction_buffer_dead_transactions\".to_string(),\n            \"number of transactions that should not be included in future proposals.\".to_string(),\n        )?;\n\n        registry.register(Box::new(total_transactions.clone()))?;\n        registry.register(Box::new(held_transactions.clone()))?;\n        registry.register(Box::new(dead_transactions.clone()))?;\n\n        Ok(Metrics {\n            total_transactions,\n            held_transactions,\n            dead_transactions,\n            registry: registry.clone(),\n        })\n    }\n}\n\nimpl Drop for Metrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.total_transactions);\n        unregister_metric!(self.registry, self.held_transactions);\n        unregister_metric!(self.registry, self.dead_transactions);\n    }\n}\n"
  },
  {
    "path": "node/src/components/transaction_buffer/tests.rs",
    "content": "use std::iter;\n\nuse prometheus::Registry;\nuse rand::{seq::SliceRandom, Rng};\n\nuse super::*;\nuse crate::{\n    effect::announcements::TransactionBufferAnnouncement::{self, TransactionsExpired},\n    reactor::{EventQueueHandle, QueueKind, Scheduler},\n    testing::LARGE_WASM_LANE_ID,\n    types::{transaction::transaction_v1_builder::TransactionV1Builder, FinalizedBlock},\n    utils,\n};\nuse casper_types::{\n    testing::TestRng, Deploy, EraId, SecretKey, TestBlockBuilder, TimeDiff, Transaction,\n    TransactionConfig, TransactionLaneDefinition, TransactionV1Config,\n    DEFAULT_LARGE_TRANSACTION_GAS_LIMIT,\n};\n\nconst ERA_ONE: EraId = EraId::new(1u64);\nconst GAS_PRICE_TOLERANCE: u8 = 1;\nconst DEFAULT_MINIMUM_GAS_PRICE: u8 = 1;\n\nfn get_appendable_block(\n    rng: &mut TestRng,\n    transaction_buffer: &mut TransactionBuffer,\n    categories: impl Iterator<Item = u8>,\n    transaction_limit: usize,\n) {\n    let transactions: Vec<_> = categories\n        .take(transaction_limit)\n        .map(|category| create_valid_transaction(rng, category, None, None))\n        .collect();\n    transactions\n        .iter()\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n    assert_container_sizes(transaction_buffer, transactions.len(), 0, 0);\n\n    // now check how many transfers were added in the block; should not exceed the config limits.\n    let timestamp = Timestamp::now();\n    let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1));\n    let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry);\n    assert!(appendable_block.transaction_hashes().len() <= transaction_limit);\n    assert_eq!(transaction_buffer.hold.len(), 1);\n    assert_container_sizes(\n        transaction_buffer,\n        transactions.len(),\n        0,\n        appendable_block.transaction_hashes().len(),\n    );\n}\n\n// Generates valid transactions\nfn create_valid_transaction(\n    rng: &mut TestRng,\n    transaction_lane: u8,\n    strict_timestamp: Option<Timestamp>,\n    with_ttl: Option<TimeDiff>,\n) -> Transaction {\n    let transaction_ttl = match with_ttl {\n        Some(ttl) => ttl,\n        None => TimeDiff::from_seconds(rng.gen_range(30..100)),\n    };\n    let transaction_timestamp = match strict_timestamp {\n        Some(timestamp) => timestamp,\n        None => Timestamp::now(),\n    };\n\n    match transaction_lane {\n        transaction_lane if transaction_lane == MINT_LANE_ID => {\n            if rng.gen() {\n                let transaction_v1 =\n                    TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl(\n                        rng,\n                        MINT_LANE_ID,\n                        strict_timestamp,\n                        with_ttl,\n                    )\n                    .build()\n                    .unwrap();\n                Transaction::V1(transaction_v1)\n            } else {\n                Transaction::Deploy(Deploy::random_valid_native_transfer_with_timestamp_and_ttl(\n                    rng,\n                    transaction_timestamp,\n                    transaction_ttl,\n                ))\n            }\n        }\n        transaction_lane if transaction_lane == INSTALL_UPGRADE_LANE_ID => Transaction::V1(\n            TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl(\n                rng,\n                INSTALL_UPGRADE_LANE_ID,\n                strict_timestamp,\n                with_ttl,\n            )\n            .build()\n            .unwrap(),\n        ),\n        transaction_lane if transaction_lane == AUCTION_LANE_ID => Transaction::V1(\n            TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl(\n                rng,\n                AUCTION_LANE_ID,\n                strict_timestamp,\n                with_ttl,\n            )\n            .build()\n            .unwrap(),\n        ),\n        _ => {\n            if rng.gen() {\n                Transaction::Deploy(match (strict_timestamp, with_ttl) {\n                    (Some(timestamp), Some(ttl)) if Timestamp::now() > timestamp + ttl => {\n                        Deploy::random_expired_deploy(rng)\n                    }\n                    _ => Deploy::random_with_valid_session_package_by_name(rng),\n                })\n            } else {\n                Transaction::V1(\n                    TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl(\n                        rng,\n                        LARGE_WASM_LANE_ID,\n                        strict_timestamp,\n                        with_ttl,\n                    )\n                    .build()\n                    .unwrap(),\n                )\n            }\n        }\n    }\n}\n\n/// Checks sizes of the transaction_buffer containers. Also checks the metrics recorded.\n#[track_caller]\nfn assert_container_sizes(\n    transaction_buffer: &TransactionBuffer,\n    expected_buffer: usize,\n    expected_dead: usize,\n    expected_held: usize,\n) {\n    assert_eq!(\n        transaction_buffer.buffer.len(),\n        expected_buffer,\n        \"buffer.len {} != expected {}\",\n        transaction_buffer.buffer.len(),\n        expected_buffer\n    );\n    assert_eq!(\n        transaction_buffer.dead.len(),\n        expected_dead,\n        \"dead.len {} != expected {}\",\n        transaction_buffer.dead.len(),\n        expected_dead\n    );\n    let hold_len = transaction_buffer\n        .hold\n        .values()\n        .map(|transactions| transactions.len())\n        .sum::<usize>();\n    assert_eq!(\n        hold_len, expected_held,\n        \"hold.len {} != expected {}\",\n        hold_len, expected_held,\n    );\n    assert_eq!(\n        transaction_buffer.metrics.total_transactions.get(),\n        expected_buffer as i64,\n        \"metrics total {} != expected {}\",\n        transaction_buffer.metrics.total_transactions.get(),\n        expected_buffer,\n    );\n    assert_eq!(\n        transaction_buffer.metrics.held_transactions.get(),\n        expected_held as i64,\n        \"metrics held {} != expected {}\",\n        transaction_buffer.metrics.held_transactions.get(),\n        expected_held,\n    );\n    assert_eq!(\n        transaction_buffer.metrics.dead_transactions.get(),\n        expected_dead as i64,\n        \"metrics dead {} != expected {}\",\n        transaction_buffer.metrics.dead_transactions.get(),\n        expected_dead,\n    );\n}\n\nconst fn all_categories() -> [u8; 4] {\n    [\n        MINT_LANE_ID,\n        INSTALL_UPGRADE_LANE_ID,\n        AUCTION_LANE_ID,\n        LARGE_WASM_LANE_ID,\n    ]\n}\n\n#[test]\nfn register_transaction_and_check_size() {\n    let mut rng = TestRng::new();\n    let chainspec = Chainspec::default();\n    for category in all_categories() {\n        let mut transaction_buffer = TransactionBuffer::new(\n            Arc::new(chainspec.clone()),\n            Config::default(),\n            &Registry::new(),\n        )\n        .unwrap();\n\n        // Try to register valid transactions\n        let num_valid_transactions: usize = rng.gen_range(50..500);\n        let valid_transactions: Vec<_> = (0..num_valid_transactions)\n            .map(|_| create_valid_transaction(&mut rng, category, None, None))\n            .collect();\n        valid_transactions\n            .iter()\n            .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n        assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0);\n\n        // Try to register a duplicate transaction\n        let duplicate_transaction = valid_transactions\n            .get(rng.gen_range(0..num_valid_transactions))\n            .unwrap()\n            .clone();\n        transaction_buffer.register_transaction(duplicate_transaction.clone());\n        assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0);\n\n        // Insert transaction without footprint\n        let bad_transaction = {\n            let mut deploy = Deploy::random_valid_native_transfer(&mut rng);\n            deploy.invalidate();\n            Transaction::from(deploy)\n        };\n        assert!(bad_transaction.verify().is_err());\n        transaction_buffer.register_transaction(bad_transaction);\n        assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0);\n    }\n}\n\n#[test]\nfn register_block_with_valid_transactions() {\n    let mut rng = TestRng::new();\n\n    for category in all_categories() {\n        let mut transaction_buffer = TransactionBuffer::new(\n            Arc::new(Chainspec::default()),\n            Config::default(),\n            &Registry::new(),\n        )\n        .unwrap();\n\n        let txns: Vec<_> = (0..10)\n            .map(|_| create_valid_transaction(&mut rng, category, None, None))\n            .collect();\n        let era_id = EraId::new(rng.gen_range(0..6));\n        let height = era_id.value() * 10 + rng.gen_range(0..10);\n        let is_switch = rng.gen_bool(0.1);\n        let block = TestBlockBuilder::new()\n            .era(era_id)\n            .height(height)\n            .switch_block(is_switch)\n            .transactions(&txns)\n            .build(&mut rng);\n\n        transaction_buffer.register_block(&block);\n        assert_container_sizes(&transaction_buffer, txns.len(), txns.len(), 0);\n    }\n}\n\n#[test]\nfn register_finalized_block_with_valid_transactions() {\n    let mut rng = TestRng::new();\n\n    for category in all_categories() {\n        let mut transaction_buffer = TransactionBuffer::new(\n            Arc::new(Chainspec::default()),\n            Config::default(),\n            &Registry::new(),\n        )\n        .unwrap();\n\n        let txns: Vec<_> = (0..10)\n            .map(|_| create_valid_transaction(&mut rng, category, None, None))\n            .collect();\n        let block = FinalizedBlock::random(&mut rng, &txns);\n\n        transaction_buffer.register_block_finalized(&block);\n        assert_container_sizes(&transaction_buffer, txns.len(), txns.len(), 0);\n    }\n}\n\n#[test]\nfn get_proposable_transactions() {\n    let mut rng = TestRng::new();\n\n    for category in all_categories() {\n        let mut transaction_buffer = TransactionBuffer::new(\n            Arc::new(Chainspec::default()),\n            Config::default(),\n            &Registry::new(),\n        )\n        .unwrap();\n\n        transaction_buffer\n            .prices\n            .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n        // populate transaction buffer with some transactions\n        let transactions: Vec<_> = (0..50)\n            .map(|_| create_valid_transaction(&mut rng, category, None, None))\n            .collect();\n        transactions\n            .iter()\n            .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n        assert_container_sizes(&transaction_buffer, transactions.len(), 0, 0);\n\n        // Create a block with some transactions and register it with the transaction_buffer\n        let block_transactions: Vec<_> = (0..10)\n            .map(|_| create_valid_transaction(&mut rng, category, None, None))\n            .collect();\n        let txns: Vec<_> = block_transactions.to_vec();\n        let block = FinalizedBlock::random(&mut rng, &txns);\n        transaction_buffer.register_block_finalized(&block);\n        assert_container_sizes(\n            &transaction_buffer,\n            transactions.len() + block_transactions.len(),\n            block_transactions.len(),\n            0,\n        );\n\n        // Check which transactions are proposable. Should return the transactions that were not\n        // included in the block since those should be dead.\n        let proposable: Vec<_> = transaction_buffer\n            .proposable(DEFAULT_MINIMUM_GAS_PRICE)\n            .collect();\n        assert_eq!(proposable.len(), transactions.len());\n        let proposable_transaction_hashes: HashSet<_> =\n            proposable.iter().map(|(th, _)| *th).collect();\n        for transaction in transactions.iter() {\n            assert!(proposable_transaction_hashes.contains(&transaction.hash()));\n        }\n\n        // Get an appendable block. This should put the deploys on hold.\n        let timestamp = Timestamp::now();\n        let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1));\n        let appendable_block =\n            transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry);\n        assert_eq!(transaction_buffer.hold.len(), 1);\n        assert_container_sizes(\n            &transaction_buffer,\n            transactions.len() + block_transactions.len(),\n            block_transactions.len(),\n            appendable_block.transaction_hashes().len(),\n        );\n\n        // Check that held blocks are not proposable\n        let proposable: Vec<_> = transaction_buffer\n            .proposable(DEFAULT_MINIMUM_GAS_PRICE)\n            .collect();\n        assert_eq!(\n            proposable.len(),\n            transactions.len() - appendable_block.transaction_hashes().len()\n        );\n        for transaction in proposable {\n            assert!(!appendable_block\n                .transaction_hashes()\n                .contains(transaction.0));\n        }\n    }\n}\n\n#[test]\nfn get_appendable_block_when_transfers_are_of_one_lane() {\n    let mut rng = TestRng::new();\n\n    let transaction_v1_config =\n        TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10));\n\n    let transaction_config = TransactionConfig {\n        block_max_approval_count: 210,\n        block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first\n        transaction_v1_config,\n        ..Default::default()\n    };\n\n    let chainspec = Arc::new(Chainspec {\n        transaction_config: transaction_config.clone(),\n        ..Default::default()\n    });\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n    get_appendable_block(\n        &mut rng,\n        &mut transaction_buffer,\n        iter::repeat_with(|| MINT_LANE_ID),\n        transaction_config\n            .transaction_v1_config\n            .get_max_transaction_count(MINT_LANE_ID) as usize\n            + 50,\n    );\n}\n\n#[test]\nfn get_appendable_block_when_transfers_are_both_legacy_and_v1() {\n    let mut rng = TestRng::new();\n\n    let transaction_v1_config =\n        TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10));\n\n    let transaction_config = TransactionConfig {\n        block_max_approval_count: 210,\n        block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first\n        transaction_v1_config,\n        ..Default::default()\n    };\n\n    let chainspec = Arc::new(Chainspec {\n        transaction_config: transaction_config.clone(),\n        ..Default::default()\n    });\n\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n    get_appendable_block(\n        &mut rng,\n        &mut transaction_buffer,\n        vec![MINT_LANE_ID].into_iter(),\n        transaction_config\n            .transaction_v1_config\n            .get_max_transaction_count(MINT_LANE_ID) as usize\n            + 50,\n    );\n}\n\n#[test]\nfn get_appendable_block_when_standards_are_of_one_lane() {\n    let large_lane_id: u8 = 3;\n    let mut rng = TestRng::new();\n\n    let transaction_v1_config =\n        TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10));\n\n    let transaction_config = TransactionConfig {\n        block_max_approval_count: 210,\n        block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first\n        transaction_v1_config,\n        ..Default::default()\n    };\n\n    let chainspec = Arc::new(Chainspec {\n        transaction_config: transaction_config.clone(),\n        ..Default::default()\n    });\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n    get_appendable_block(\n        &mut rng,\n        &mut transaction_buffer,\n        iter::repeat_with(|| large_lane_id),\n        transaction_config\n            .transaction_v1_config\n            .get_max_transaction_count(large_lane_id) as usize\n            + 50,\n    );\n}\n\n#[test]\nfn get_appendable_block_when_standards_are_both_legacy_and_v1() {\n    let large_lane_id: u8 = 3;\n    let mut rng = TestRng::new();\n\n    let transaction_v1_config =\n        TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10));\n\n    let transaction_config = TransactionConfig {\n        block_max_approval_count: 210,\n        block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first\n        transaction_v1_config,\n        ..Default::default()\n    };\n\n    let chainspec = Arc::new(Chainspec {\n        transaction_config: transaction_config.clone(),\n        ..Default::default()\n    });\n\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    get_appendable_block(\n        &mut rng,\n        &mut transaction_buffer,\n        vec![MINT_LANE_ID].into_iter(),\n        transaction_config\n            .transaction_v1_config\n            .get_max_transaction_count(large_lane_id) as usize\n            + 5,\n    );\n}\n\n#[test]\nfn block_fully_saturated() {\n    let mut rng = TestRng::new();\n\n    let max_transfers = rng.gen_range(0..20);\n    let max_staking = rng.gen_range(0..20);\n    let max_install_upgrade = rng.gen_range(0..20);\n    let max_standard = rng.gen_range(0..20);\n\n    let total_allowed = max_transfers + max_staking + max_install_upgrade + max_standard;\n\n    let transaction_v1_config = TransactionV1Config::default().with_count_limits(\n        Some(max_transfers),\n        Some(max_staking),\n        Some(max_install_upgrade),\n        Some(max_standard),\n    );\n\n    let transaction_config = TransactionConfig {\n        transaction_v1_config,\n        block_max_approval_count: 210,\n        block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first\n        ..Default::default()\n    };\n\n    let chainspec = Chainspec {\n        transaction_config,\n        ..Default::default()\n    };\n\n    let mut transaction_buffer =\n        TransactionBuffer::new(Arc::new(chainspec), Config::default(), &Registry::new()).unwrap();\n\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    // Try to register 10 more transactions per each category as allowed by the config.\n    let (transfers, stakings, install_upgrades, standards) = generate_and_register_transactions(\n        &mut transaction_buffer,\n        max_transfers + 20,\n        max_staking + 20,\n        max_install_upgrade + 20,\n        max_standard + 20,\n        &mut rng,\n    );\n    let (transfers_hashes, stakings_hashes, install_upgrades_hashes, standards_hashes) = (\n        transfers\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n        stakings\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n        install_upgrades\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n        standards\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n    );\n\n    // Check that we really generated the required number of transactions.\n    assert_eq!(\n        transfers.len() + stakings.len() + install_upgrades.len() + standards.len(),\n        total_allowed as usize + 20 * 4\n    );\n\n    // Ensure that only 'total_allowed' transactions are proposed.\n    let timestamp = Timestamp::now();\n    let expiry = timestamp.saturating_add(TimeDiff::from_seconds(60));\n    let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry);\n\n    assert_eq!(\n        appendable_block.transaction_hashes().len(),\n        total_allowed as usize\n    );\n\n    // Assert the number of proposed transaction types, block should be fully saturated.\n    let mut proposed_transfers = 0;\n    let mut proposed_stakings = 0;\n    let mut proposed_install_upgrades = 0;\n    let mut proposed_standards = 0;\n    appendable_block\n        .transaction_hashes()\n        .iter()\n        .for_each(|transaction_hash| {\n            if transfers_hashes.contains(transaction_hash) {\n                proposed_transfers += 1;\n            } else if stakings_hashes.contains(transaction_hash) {\n                proposed_stakings += 1;\n            } else if install_upgrades_hashes.contains(transaction_hash) {\n                proposed_install_upgrades += 1;\n            } else if standards_hashes.contains(transaction_hash) {\n                proposed_standards += 1;\n            }\n        });\n    let mut has_hit_any_limit = false;\n    if proposed_transfers == max_transfers {\n        has_hit_any_limit = true;\n    }\n    if proposed_stakings == max_staking {\n        has_hit_any_limit = true;\n    }\n    if proposed_install_upgrades == max_install_upgrade {\n        has_hit_any_limit = true;\n    }\n    if proposed_standards == max_standard {\n        has_hit_any_limit = true;\n    }\n    assert!(has_hit_any_limit)\n}\n\n#[test]\nfn block_not_fully_saturated() {\n    let mut rng = TestRng::new();\n\n    const MIN_COUNT: u64 = 10;\n\n    let max_transfers = rng.gen_range(MIN_COUNT..20);\n    let max_staking = rng.gen_range(MIN_COUNT..20);\n    let max_install_upgrade = rng.gen_range(MIN_COUNT..20);\n    let max_standard = rng.gen_range(MIN_COUNT..20);\n\n    let total_allowed = max_transfers + max_staking + max_install_upgrade + max_standard;\n\n    let transaction_v1_config = TransactionV1Config::default().with_count_limits(\n        Some(max_transfers),\n        Some(max_staking),\n        Some(max_install_upgrade),\n        Some(max_standard),\n    );\n\n    let transaction_config = TransactionConfig {\n        transaction_v1_config,\n        block_max_approval_count: 210,\n        block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first\n        ..Default::default()\n    };\n\n    let chainspec = Chainspec {\n        transaction_config,\n        ..Default::default()\n    };\n\n    let mut transaction_buffer =\n        TransactionBuffer::new(Arc::new(chainspec), Config::default(), &Registry::new()).unwrap();\n\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    // Try to register less than max capacity per each category as allowed by the config.\n    let actual_transfer_count = rng.gen_range(0..MIN_COUNT - 1);\n    let actual_stakings_count = rng.gen_range(0..MIN_COUNT - 1);\n    let actual_install_upgrade_count = rng.gen_range(0..MIN_COUNT - 1);\n    let actual_standard_count = rng.gen_range(0..MIN_COUNT - 1);\n    let (transfers, stakings, install_upgrades, standards) = generate_and_register_transactions(\n        &mut transaction_buffer,\n        actual_transfer_count,\n        actual_stakings_count,\n        actual_install_upgrade_count,\n        actual_standard_count,\n        &mut rng,\n    );\n    let (transfers_hashes, stakings_hashes, install_upgrades_hashes, standards_hashes) = (\n        transfers\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n        stakings\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n        install_upgrades\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n        standards\n            .iter()\n            .map(|transaction| transaction.hash())\n            .collect_vec(),\n    );\n\n    // Check that we really generated the required number of transactions.\n    assert_eq!(\n        transfers.len() + stakings.len() + install_upgrades.len() + standards.len(),\n        actual_transfer_count as usize\n            + actual_stakings_count as usize\n            + actual_install_upgrade_count as usize\n            + actual_standard_count as usize\n    );\n\n    // Ensure that not more than 'total_allowed' transactions are proposed.\n    let timestamp = Timestamp::now();\n    let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1));\n    let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry);\n    assert!(appendable_block.transaction_hashes().len() <= total_allowed as usize);\n\n    // Assert the number of proposed transaction types, block should not be fully saturated.\n    let mut proposed_transfers = 0;\n    let mut proposed_stakings = 0;\n    let mut proposed_install_upgrades = 0;\n    let mut proposed_standards = 0;\n    appendable_block\n        .transaction_hashes()\n        .iter()\n        .for_each(|transaction_hash| {\n            if transfers_hashes.contains(transaction_hash) {\n                proposed_transfers += 1;\n            } else if stakings_hashes.contains(transaction_hash) {\n                proposed_stakings += 1;\n            } else if install_upgrades_hashes.contains(transaction_hash) {\n                proposed_install_upgrades += 1;\n            } else if standards_hashes.contains(transaction_hash) {\n                proposed_standards += 1;\n            }\n        });\n    assert_eq!(proposed_transfers, actual_transfer_count);\n    assert_eq!(proposed_stakings, actual_stakings_count);\n    assert_eq!(proposed_install_upgrades, actual_install_upgrade_count);\n    assert_eq!(proposed_standards, actual_standard_count);\n}\n\nfn generate_and_register_transactions(\n    transaction_buffer: &mut TransactionBuffer,\n    transfer_count: u64,\n    stakings_count: u64,\n    install_upgrade_count: u64,\n    standard_count: u64,\n    rng: &mut TestRng,\n) -> (\n    Vec<Transaction>,\n    Vec<Transaction>,\n    Vec<Transaction>,\n    Vec<Transaction>,\n) {\n    let transfers: Vec<_> = (0..transfer_count)\n        .map(|_| create_valid_transaction(rng, MINT_LANE_ID, None, None))\n        .collect();\n    let stakings: Vec<_> = (0..stakings_count)\n        .map(|_| create_valid_transaction(rng, AUCTION_LANE_ID, None, None))\n        .collect();\n    let installs_upgrades: Vec<_> = (0..install_upgrade_count)\n        .map(|_| create_valid_transaction(rng, INSTALL_UPGRADE_LANE_ID, None, None))\n        .collect();\n    let standards: Vec<_> = (0..standard_count)\n        .map(|_| create_valid_transaction(rng, LARGE_WASM_LANE_ID, None, None))\n        .collect();\n    transfers\n        .iter()\n        .chain(\n            stakings\n                .iter()\n                .chain(installs_upgrades.iter().chain(standards.iter())),\n        )\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n\n    (transfers, stakings, installs_upgrades, standards)\n}\n\n#[test]\nfn register_transactions_and_blocks() {\n    let mut rng = TestRng::new();\n    let mut transaction_buffer = TransactionBuffer::new(\n        Arc::new(Chainspec::default()),\n        Config::default(),\n        &Registry::new(),\n    )\n    .unwrap();\n\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    // try to register valid transactions\n    let num_valid_transactions: usize = rng.gen_range(50..500);\n    let category = rng.gen_range(0..4u8);\n    let valid_transactions: Vec<_> = (0..num_valid_transactions)\n        .map(|_| create_valid_transaction(&mut rng, category, None, None))\n        .collect();\n    valid_transactions\n        .iter()\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n    assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0);\n\n    // register a block with transactions\n    let category = rng.gen_range(0..4u8);\n    let block_transaction: Vec<_> = (0..5)\n        .map(|_| create_valid_transaction(&mut rng, category, None, None))\n        .collect();\n    let txns: Vec<_> = block_transaction.to_vec();\n    let era = rng.gen_range(0..6);\n    let height = era * 10 + rng.gen_range(0..10);\n    let is_switch = rng.gen_bool(0.1);\n\n    let block = TestBlockBuilder::new()\n        .era(era)\n        .height(height)\n        .switch_block(is_switch)\n        .transactions(&txns)\n        .build(&mut rng);\n\n    transaction_buffer.register_block(&block);\n    assert_container_sizes(\n        &transaction_buffer,\n        block_transaction.len() + valid_transactions.len(),\n        block_transaction.len(),\n        0,\n    );\n\n    // try to register the transactions of the block again. Should not work since those transactions\n    // are dead.\n    block_transaction\n        .iter()\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n    assert_container_sizes(\n        &transaction_buffer,\n        block_transaction.len() + valid_transactions.len(),\n        block_transaction.len(),\n        0,\n    );\n\n    let pre_proposal_timestamp = Timestamp::now();\n\n    // get an appendable block. This should put the transactions on hold.\n    let timestamp = Timestamp::now();\n    let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1));\n    let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry);\n    assert_eq!(transaction_buffer.hold.len(), 1);\n    assert_container_sizes(\n        &transaction_buffer,\n        block_transaction.len() + valid_transactions.len(),\n        block_transaction.len(),\n        appendable_block.transaction_hashes().len(),\n    );\n\n    // try to register held transactions again.\n    let mut held_transactions = valid_transactions\n        .iter()\n        .filter(|&transaction| {\n            appendable_block\n                .transaction_hashes()\n                .contains(&transaction.hash())\n        })\n        .cloned()\n        .peekable();\n    assert!(held_transactions.peek().is_some());\n    held_transactions\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n    assert_container_sizes(\n        &transaction_buffer,\n        block_transaction.len() + valid_transactions.len(),\n        block_transaction.len(),\n        appendable_block.transaction_hashes().len(),\n    );\n\n    // test if transactions held for proposed blocks which did not get finalized in time\n    // are eligible again\n    let count = rng.gen_range(1..11);\n    let txns: Vec<_> = iter::repeat_with(|| Transaction::Deploy(Deploy::random(&mut rng)))\n        .take(count)\n        .collect();\n    let block = FinalizedBlock::random_with_specifics(\n        &mut rng,\n        EraId::from(2),\n        25,\n        false,\n        pre_proposal_timestamp,\n        &txns,\n    );\n    transaction_buffer.register_block_finalized(&block);\n    assert_container_sizes(\n        &transaction_buffer,\n        block_transaction.len() + valid_transactions.len() + block.all_transactions().count(),\n        block_transaction.len() + block.all_transactions().count(),\n        0,\n    );\n}\n\n/// Event for the mock reactor.\n#[derive(Debug)]\nenum ReactorEvent {\n    TransactionBufferAnnouncement(TransactionBufferAnnouncement),\n    Event(#[allow(dead_code)] Event),\n}\n\nimpl From<TransactionBufferAnnouncement> for ReactorEvent {\n    fn from(req: TransactionBufferAnnouncement) -> ReactorEvent {\n        ReactorEvent::TransactionBufferAnnouncement(req)\n    }\n}\n\nimpl From<Event> for ReactorEvent {\n    fn from(req: Event) -> ReactorEvent {\n        ReactorEvent::Event(req)\n    }\n}\n\nstruct MockReactor {\n    scheduler: &'static Scheduler<ReactorEvent>,\n}\n\nimpl MockReactor {\n    fn new() -> Self {\n        MockReactor {\n            scheduler: utils::leak(Scheduler::new(QueueKind::weights(), None)),\n        }\n    }\n\n    async fn expect_transaction_buffer_expire_announcement(\n        &self,\n        should_be_expired: &HashSet<TransactionHash>,\n    ) {\n        let ((_ancestor, reactor_event), _) = self.scheduler.pop().await;\n        match reactor_event {\n            ReactorEvent::TransactionBufferAnnouncement(TransactionsExpired(expired)) => {\n                let expired_set = HashSet::from_iter(expired);\n                assert_eq!(&expired_set, should_be_expired);\n            }\n            _ => {\n                unreachable!();\n            }\n        };\n    }\n}\n\n#[tokio::test]\nasync fn expire_transactions_and_check_announcement_when_transactions_are_of_one_lane() {\n    let mut rng = TestRng::new();\n\n    for category in all_categories() {\n        let mut transaction_buffer = TransactionBuffer::new(\n            Arc::new(Chainspec::default()),\n            Config::default(),\n            &Registry::new(),\n        )\n        .unwrap();\n        transaction_buffer\n            .prices\n            .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n        let reactor = MockReactor::new();\n        let event_queue_handle = EventQueueHandle::without_shutdown(reactor.scheduler);\n        let effect_builder = EffectBuilder::new(event_queue_handle);\n\n        // generate and register some already expired transactions\n        let ttl = TimeDiff::from_seconds(rng.gen_range(30..300));\n        let past_timestamp = Timestamp::now()\n            .saturating_sub(ttl)\n            .saturating_sub(TimeDiff::from_seconds(5));\n\n        let num_transactions: usize = rng.gen_range(5..50);\n        let expired_transactions: Vec<_> = (0..num_transactions)\n            .map(|_| create_valid_transaction(&mut rng, category, Some(past_timestamp), Some(ttl)))\n            .collect();\n\n        expired_transactions\n            .iter()\n            .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n        assert_container_sizes(&transaction_buffer, expired_transactions.len(), 0, 0);\n\n        // include the last expired transaction in a block and register it\n        let era = rng.gen_range(0..6);\n        let expired_txns: Vec<_> = expired_transactions.to_vec();\n        let block = TestBlockBuilder::new()\n            .era(era)\n            .height(era * 10 + rng.gen_range(0..10))\n            .transactions(expired_txns.last())\n            .build(&mut rng);\n\n        transaction_buffer.register_block(&block);\n        assert_container_sizes(&transaction_buffer, expired_transactions.len(), 1, 0);\n\n        // generate and register some valid transactions\n        let transactions: Vec<_> = (0..num_transactions)\n            .map(|_| create_valid_transaction(&mut rng, category, None, None))\n            .collect();\n        transactions\n            .iter()\n            .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n        assert_container_sizes(\n            &transaction_buffer,\n            transactions.len() + expired_transactions.len(),\n            1,\n            0,\n        );\n\n        // expire transactions and check that they were announced as expired\n        let mut effects = transaction_buffer.expire(effect_builder);\n        tokio::spawn(effects.remove(0)).await.unwrap();\n\n        // the transactions which should be announced as expired are all the expired ones not in a\n        // block, i.e. all but the last one of `expired_transactions`\n        let expired_transaction_hashes: HashSet<_> = expired_transactions\n            .iter()\n            .take(expired_transactions.len() - 1)\n            .map(|transaction| transaction.hash())\n            .collect();\n        reactor\n            .expect_transaction_buffer_expire_announcement(&expired_transaction_hashes)\n            .await;\n\n        // the valid transactions should still be in the buffer\n        assert_container_sizes(&transaction_buffer, transactions.len(), 0, 0);\n    }\n}\n\n#[tokio::test]\nasync fn expire_transactions_and_check_announcement_when_transactions_are_of_random_categories() {\n    let mut rng = TestRng::new();\n\n    let mut transaction_buffer = TransactionBuffer::new(\n        Arc::new(Chainspec::default()),\n        Config::default(),\n        &Registry::new(),\n    )\n    .unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    let reactor = MockReactor::new();\n    let event_queue_handle = EventQueueHandle::without_shutdown(reactor.scheduler);\n    let effect_builder = EffectBuilder::new(event_queue_handle);\n\n    // generate and register some already expired transactions\n    let ttl = TimeDiff::from_seconds(rng.gen_range(30..300));\n    let past_timestamp = Timestamp::now()\n        .saturating_sub(ttl)\n        .saturating_sub(TimeDiff::from_seconds(5));\n\n    let num_transactions: usize = rng.gen_range(5..50);\n    let expired_transactions: Vec<_> = (0..num_transactions)\n        .map(|_| {\n            let random_lane = *all_categories().choose(&mut rng).unwrap();\n            create_valid_transaction(&mut rng, random_lane, Some(past_timestamp), Some(ttl))\n        })\n        .collect();\n\n    expired_transactions\n        .iter()\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n    assert_container_sizes(&transaction_buffer, expired_transactions.len(), 0, 0);\n\n    // include the last expired transaction in a block and register it\n    let era = rng.gen_range(0..6);\n    let expired_txns: Vec<_> = expired_transactions.to_vec();\n    let block = TestBlockBuilder::new()\n        .era(era)\n        .height(era * 10 + rng.gen_range(0..10))\n        .transactions(expired_txns.last())\n        .build(&mut rng);\n\n    transaction_buffer.register_block(&block);\n    assert_container_sizes(&transaction_buffer, expired_transactions.len(), 1, 0);\n\n    // generate and register some valid transactions\n    let transactions: Vec<_> = (0..num_transactions)\n        .map(|_| {\n            let random_lane = *all_categories().choose(&mut rng).unwrap();\n            create_valid_transaction(&mut rng, random_lane, None, None)\n        })\n        .collect();\n    transactions\n        .iter()\n        .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone()));\n    assert_container_sizes(\n        &transaction_buffer,\n        transactions.len() + expired_transactions.len(),\n        1,\n        0,\n    );\n\n    // expire transactions and check that they were announced as expired\n    let mut effects = transaction_buffer.expire(effect_builder);\n    tokio::spawn(effects.remove(0)).await.unwrap();\n\n    // the transactions which should be announced as expired are all the expired ones not in a\n    // block, i.e. all but the last one of `expired_transactions`\n    let expired_transaction_hashes: HashSet<_> = expired_transactions\n        .iter()\n        .take(expired_transactions.len() - 1)\n        .map(|transaction| transaction.hash())\n        .collect();\n    reactor\n        .expect_transaction_buffer_expire_announcement(&expired_transaction_hashes)\n        .await;\n\n    // the valid transactions should still be in the buffer\n    assert_container_sizes(&transaction_buffer, transactions.len(), 0, 0);\n}\n\nfn make_test_chainspec(max_standard_count: u64, max_mint_count: u64) -> Arc<Chainspec> {\n    // These tests uses legacy deploys which always go on the Large lane\n    const WASM_LANE: u64 = 3; // Large\n    let large_lane = vec![\n        WASM_LANE,\n        1_048_576,\n        1024,\n        DEFAULT_LARGE_TRANSACTION_GAS_LIMIT,\n        max_standard_count,\n    ];\n    let mut transaction_v1_config = TransactionV1Config::default();\n    transaction_v1_config.native_mint_lane =\n        TransactionLaneDefinition::try_from(vec![0, 1024, 1024, 65_000_000_000, max_mint_count])\n            .unwrap();\n    transaction_v1_config.set_wasm_lanes(vec![\n        TransactionLaneDefinition::try_from(large_lane).unwrap()\n    ]);\n\n    let transaction_config = TransactionConfig {\n        transaction_v1_config,\n        block_max_approval_count: (max_standard_count + max_mint_count) as u32,\n        ..Default::default()\n    };\n    Arc::new(Chainspec {\n        transaction_config,\n        ..Default::default()\n    })\n}\n\n#[test]\nfn should_have_one_bucket_per_distinct_body_hash() {\n    let mut rng = TestRng::new();\n    let max_standard_count = 2;\n    let max_mint_count = 0;\n\n    let chainspec = make_test_chainspec(max_standard_count, max_mint_count);\n\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    let secret_key1 = SecretKey::random(&mut rng);\n    let ttl = TimeDiff::from_seconds(30);\n    let deploy1 = Deploy::random_contract_by_name(\n        &mut rng,\n        Some(secret_key1),\n        None,\n        None,\n        Some(Timestamp::now()),\n        Some(ttl),\n    );\n    let deploy1_body_hash = *deploy1.header().body_hash();\n    transaction_buffer.register_transaction(deploy1.into());\n\n    let secret_key2 = SecretKey::random(&mut rng); // different signer\n    let deploy2 = Deploy::random_contract_by_name(\n        &mut rng,\n        Some(\n            SecretKey::from_pem(secret_key2.to_pem().expect(\"should pemify\"))\n                .expect(\"should un-pemify\"),\n        ),\n        None,\n        None,\n        Some(Timestamp::now()), // different timestamp\n        Some(ttl),\n    );\n    assert_eq!(\n        &deploy1_body_hash,\n        deploy2.header().body_hash(),\n        \"1 & 2 should have same body hashes\"\n    );\n    transaction_buffer.register_transaction(deploy2.into());\n\n    let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE);\n    assert!(buckets.len() == 1, \"should be 1 bucket\");\n\n    let deploy3 = Deploy::random_contract_by_name(\n        &mut rng,\n        Some(\n            SecretKey::from_pem(secret_key2.to_pem().expect(\"should pemify\"))\n                .expect(\"should un-pemify\"),\n        ),\n        None,\n        None,\n        Some(Timestamp::now()), // different timestamp\n        Some(ttl),\n    );\n    assert_eq!(\n        &deploy1_body_hash,\n        deploy3.header().body_hash(),\n        \"1 & 3 should have same body hashes\"\n    );\n    transaction_buffer.register_transaction(deploy3.into());\n    let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE);\n    assert!(buckets.len() == 1, \"should still be 1 bucket\");\n\n    let deploy4 = Deploy::random_contract_by_name(\n        &mut rng,\n        Some(\n            SecretKey::from_pem(secret_key2.to_pem().expect(\"should pemify\"))\n                .expect(\"should un-pemify\"),\n        ),\n        Some(\"some other contract name\".to_string()),\n        None,\n        Some(Timestamp::now()), // different timestamp\n        Some(ttl),\n    );\n    assert_ne!(\n        &deploy1_body_hash,\n        deploy4.header().body_hash(),\n        \"1 & 4 should have different body hashes\"\n    );\n    transaction_buffer.register_transaction(deploy4.into());\n    let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE);\n    assert!(buckets.len() == 2, \"should be 2 buckets\");\n\n    let transfer5 = Deploy::random_valid_native_transfer_with_timestamp_and_ttl(\n        &mut rng,\n        Timestamp::now(),\n        ttl,\n    );\n    assert_ne!(\n        &deploy1_body_hash,\n        transfer5.header().body_hash(),\n        \"1 & 5 should have different body hashes\"\n    );\n    transaction_buffer.register_transaction(transfer5.into());\n    let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE);\n    assert!(buckets.len() == 3, \"should be 3 buckets\");\n}\n\n#[test]\nfn should_have_diverse_proposable_blocks_with_stocked_buffer() {\n    let rng = &mut TestRng::new();\n    let max_standard_count = 50;\n    let max_mint_count = 5;\n    let chainspec = make_test_chainspec(max_standard_count, max_mint_count);\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    let cap = (max_standard_count * 100) as usize;\n\n    let secret_keys: Vec<SecretKey> = iter::repeat_with(|| SecretKey::random(rng))\n        .take(10)\n        .collect();\n\n    let contract_names = [\"a\", \"b\", \"c\", \"d\", \"e\"];\n    let contract_entry_points = [\"foo\", \"bar\"];\n\n    fn ttl(rng: &mut TestRng) -> TimeDiff {\n        TimeDiff::from_seconds(rng.gen_range(60..3600))\n    }\n\n    let mut last_timestamp = Timestamp::now();\n    for i in 0..cap {\n        let ttl = ttl(rng);\n        let secret_key = Some(\n            SecretKey::from_pem(\n                secret_keys[rng.gen_range(0..secret_keys.len())]\n                    .to_pem()\n                    .expect(\"should pemify\"),\n            )\n            .expect(\"should un-pemify\"),\n        );\n        let contract_name = Some(contract_names[rng.gen_range(0..contract_names.len())].into());\n        let contract_entry_point =\n            Some(contract_entry_points[rng.gen_range(0..contract_entry_points.len())].into());\n        let deploy = Deploy::random_contract_by_name(\n            rng,\n            secret_key,\n            contract_name,\n            contract_entry_point,\n            Some(last_timestamp),\n            Some(ttl),\n        );\n        transaction_buffer.register_transaction(deploy.into());\n        assert_eq!(\n            transaction_buffer.buffer.len(),\n            i + 1,\n            \"failed to buffer deploy {i}\"\n        );\n        last_timestamp += TimeDiff::from_millis(1);\n    }\n\n    for i in 0..max_mint_count {\n        let ttl = ttl(rng);\n        transaction_buffer.register_transaction(\n            Deploy::random_valid_native_transfer_with_timestamp_and_ttl(rng, last_timestamp, ttl)\n                .into(),\n        );\n        assert_eq!(\n            transaction_buffer.buffer.len(),\n            i as usize + 1 + cap,\n            \"failed to buffer transfer {i}\"\n        );\n        last_timestamp += TimeDiff::from_millis(1);\n    }\n\n    let expected_count = cap + (max_mint_count as usize);\n    assert_container_sizes(&transaction_buffer, expected_count, 0, 0);\n\n    let buckets1: HashMap<_, _> = transaction_buffer\n        .buckets(GAS_PRICE_TOLERANCE)\n        .into_iter()\n        .map(|(digest, footprints)| {\n            (\n                *digest,\n                footprints\n                    .into_iter()\n                    .map(|(hash, footprint)| (hash, footprint.clone()))\n                    .collect_vec(),\n            )\n        })\n        .collect();\n    assert!(\n        buckets1.len() > 1,\n        \"should be multiple buckets with this much state\"\n    );\n    let buckets2: HashMap<_, _> = transaction_buffer\n        .buckets(GAS_PRICE_TOLERANCE)\n        .into_iter()\n        .map(|(digest, footprints)| {\n            (\n                *digest,\n                footprints\n                    .into_iter()\n                    .map(|(hash, footprint)| (hash, footprint.clone()))\n                    .collect_vec(),\n            )\n        })\n        .collect();\n\n    assert_eq!(\n        buckets1, buckets2,\n        \"with same state should get same buckets every time\"\n    );\n\n    // while it is not impossible to get identical appendable blocks over an unchanged buffer\n    // using this strategy, it should be very unlikely...the below brute forces a check for this\n    let expected_eq_tolerance = 1;\n    let mut actual_eq_count = 0;\n    let expiry = last_timestamp.saturating_add(TimeDiff::from_seconds(240));\n    for _ in 0..10 {\n        let appendable1 = transaction_buffer.appendable_block(last_timestamp, ERA_ONE, expiry);\n        let appendable2 = transaction_buffer.appendable_block(last_timestamp, ERA_ONE, expiry);\n        if appendable1 == appendable2 {\n            actual_eq_count += 1;\n        }\n    }\n    assert!(\n        actual_eq_count <= expected_eq_tolerance,\n        \"{} matches exceeded tolerance of {}\",\n        actual_eq_count,\n        expected_eq_tolerance\n    );\n}\n\n#[test]\nfn should_be_empty_if_no_time_until_expiry() {\n    let mut rng = TestRng::new();\n    let max_standard_count = 1;\n    let max_mint_count = 1;\n    let chainspec = make_test_chainspec(max_standard_count, max_mint_count);\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    let secret_key1 = SecretKey::random(&mut rng);\n    let ttl = TimeDiff::from_seconds(30);\n    let deploy1 = Deploy::random_contract_by_name(\n        &mut rng,\n        Some(secret_key1),\n        None,\n        None,\n        Some(Timestamp::now()),\n        Some(ttl),\n    );\n    let deploy1_body_hash = *deploy1.header().body_hash();\n    transaction_buffer.register_transaction(deploy1.into());\n\n    let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE);\n    assert!(buckets.len() == 1, \"should be 1 buckets\");\n\n    let transfer2 = Deploy::random_valid_native_transfer_with_timestamp_and_ttl(\n        &mut rng,\n        Timestamp::now(),\n        ttl,\n    );\n    assert_ne!(\n        &deploy1_body_hash,\n        transfer2.header().body_hash(),\n        \"1 & 2 should have different body hashes\"\n    );\n    transaction_buffer.register_transaction(transfer2.into());\n    let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE);\n    assert!(buckets.len() == 2, \"should be 2 buckets\");\n\n    let timestamp = Timestamp::now();\n    let appendable = transaction_buffer.appendable_block(timestamp, ERA_ONE, timestamp);\n    let count = appendable.transaction_count();\n    assert!(count == 0, \"expected 0 found {}\", count);\n\n    // logic should tolerate invalid expiry\n    let appendable = transaction_buffer.appendable_block(\n        timestamp,\n        ERA_ONE,\n        timestamp.saturating_sub(TimeDiff::from_millis(1)),\n    );\n    let count = appendable.transaction_count();\n    assert!(count == 0, \"expected 0 found {}\", count);\n}\n\nfn register_random_deploys_unique_hashes(\n    transaction_buffer: &mut TransactionBuffer,\n    num_deploys: usize,\n    rng: &mut TestRng,\n) {\n    let deploys = iter::repeat_with(|| {\n        let name = format!(\"{}\", rng.gen::<u64>());\n        let call = format!(\"{}\", rng.gen::<u64>());\n        Deploy::random_contract_by_name(\n            rng,\n            None,\n            Some(name),\n            Some(call),\n            Some(Timestamp::now()), // different timestamp\n            None,\n        )\n    })\n    .take(num_deploys);\n    for deploy in deploys {\n        transaction_buffer.register_transaction(deploy.into());\n    }\n}\n\nfn register_random_deploys_same_hash(\n    transaction_buffer: &mut TransactionBuffer,\n    num_deploys: usize,\n    rng: &mut TestRng,\n) {\n    let deploys = iter::repeat_with(|| {\n        let name = \"test\".to_owned();\n        let call = \"test\".to_owned();\n        Deploy::random_contract_by_name(\n            rng,\n            None,\n            Some(name),\n            Some(call),\n            Some(Timestamp::now()), // different timestamp\n            None,\n        )\n    })\n    .take(num_deploys);\n    for deploy in deploys {\n        transaction_buffer.register_transaction(deploy.into());\n    }\n}\n\n#[test]\nfn test_buckets_single_hash() {\n    let mut rng = TestRng::new();\n    let max_standard_count = 100;\n    let max_mint_count = 1000;\n    let chainspec = make_test_chainspec(max_standard_count, max_mint_count);\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    register_random_deploys_same_hash(&mut transaction_buffer, 64000, &mut rng);\n\n    let _block = transaction_buffer.appendable_block(\n        Timestamp::now(),\n        ERA_ONE,\n        Timestamp::now() + TimeDiff::from_millis(16384 / 6),\n    );\n}\n\n#[test]\nfn test_buckets_unique_hashes() {\n    let mut rng = TestRng::new();\n    let max_standard_count = 100;\n    let max_mint_count = 1000;\n    let chainspec = make_test_chainspec(max_standard_count, max_mint_count);\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    register_random_deploys_unique_hashes(&mut transaction_buffer, 64000, &mut rng);\n\n    let _block = transaction_buffer.appendable_block(\n        Timestamp::now(),\n        ERA_ONE,\n        Timestamp::now() + TimeDiff::from_millis(16384 / 6),\n    );\n}\n\n#[test]\nfn test_buckets_mixed_load() {\n    let mut rng = TestRng::new();\n    let max_standard_count = 100;\n    let max_mint_count = 1000;\n    let chainspec = make_test_chainspec(max_standard_count, max_mint_count);\n    let mut transaction_buffer =\n        TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap();\n    transaction_buffer\n        .prices\n        .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE);\n\n    register_random_deploys_unique_hashes(&mut transaction_buffer, 60000, &mut rng);\n    register_random_deploys_same_hash(&mut transaction_buffer, 4000, &mut rng);\n\n    let _block = transaction_buffer.appendable_block(\n        Timestamp::now(),\n        ERA_ONE,\n        Timestamp::now() + TimeDiff::from_millis(16384 / 6),\n    );\n}\n"
  },
  {
    "path": "node/src/components/transaction_buffer.rs",
    "content": "mod config;\nmod event;\nmod metrics;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque},\n    convert::TryInto,\n    iter::FromIterator,\n    mem,\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse futures::FutureExt;\nuse itertools::Itertools;\nuse prometheus::Registry;\nuse smallvec::smallvec;\nuse tracing::{debug, error, info, warn};\n\nuse casper_types::{\n    Block, BlockV2, Chainspec, Digest, DisplayIter, EraId, Timestamp, Transaction, TransactionHash,\n    TransactionId, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n\nuse crate::{\n    components::{\n        consensus::{ClContext, ProposedBlock},\n        Component, ComponentState, InitializedComponent,\n    },\n    effect::{\n        announcements::TransactionBufferAnnouncement,\n        requests::{StorageRequest, TransactionBufferRequest},\n        EffectBuilder, EffectExt, Effects,\n    },\n    fatal,\n    reactor::main_reactor::MainEvent,\n    storage::Storage,\n    types::{\n        appendable_block::{AddError, AppendableBlock},\n        FinalizedBlock, TransactionFootprint,\n    },\n    NodeRng,\n};\npub(crate) use config::Config;\npub(crate) use event::Event;\n\nuse crate::effect::{requests::ContractRuntimeRequest, Responder};\nuse metrics::Metrics;\n\nconst COMPONENT_NAME: &str = \"transaction_buffer\";\n\n#[derive(DataSize, Debug)]\npub(crate) struct TransactionBuffer {\n    state: ComponentState,\n    cfg: Config,\n    chainspec: Arc<Chainspec>,\n    // Keeps track of all transactions the buffer is currently aware of.\n    //\n    // `hold` and `dead` are used to filter it on demand as necessary.\n    //\n    // The timestamp is the time when the transaction expires.\n    // Expired items are removed via a self-perpetuating expire event.\n    buffer: HashMap<TransactionHash, (Timestamp, Option<TransactionFootprint>)>,\n    // When a maybe-block is in flight, we pause inclusion of the transactions within it in other\n    // proposed blocks. If the maybe-block becomes an actual block the transaction hashes will get\n    // put to self.dead, otherwise, the hold will be released and the transactions will become\n    // eligible to propose again.\n    hold: BTreeMap<Timestamp, HashSet<TransactionHash>>,\n    // Transaction hashes that should not be proposed, ever.\n    dead: HashSet<TransactionHash>,\n    prices: BTreeMap<EraId, u8>,\n    #[data_size(skip)]\n    metrics: Metrics,\n}\n\nimpl TransactionBuffer {\n    /// Create a transaction buffer.\n    pub(crate) fn new(\n        chainspec: Arc<Chainspec>,\n        cfg: Config,\n        registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(TransactionBuffer {\n            state: ComponentState::Uninitialized,\n            cfg,\n            chainspec,\n            buffer: HashMap::new(),\n            hold: BTreeMap::new(),\n            dead: HashSet::new(),\n            prices: BTreeMap::new(),\n            metrics: Metrics::new(registry)?,\n        })\n    }\n\n    pub(crate) fn initialize_component(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        storage: &Storage,\n    ) -> Option<Effects<MainEvent>> {\n        if <Self as InitializedComponent<MainEvent>>::is_uninitialized(self) {\n            info!(\n                \"pending initialization of {}\",\n                <Self as Component<MainEvent>>::name(self)\n            );\n            <Self as InitializedComponent<MainEvent>>::set_state(\n                self,\n                ComponentState::Initializing,\n            );\n            let blocks = match storage.read_blocks_for_replay_protection() {\n                Ok(blocks) => blocks,\n                Err(err) => {\n                    return Some(\n                        fatal!(\n                            effect_builder,\n                            \"fatal block store error when attempting to read highest blocks: {}\",\n                            err\n                        )\n                        .ignore(),\n                    );\n                }\n            };\n            debug!(\n                blocks = ?blocks.iter().map(Block::height).collect_vec(),\n                \"TransactionBuffer: initialization\"\n            );\n            info!(\"initialized {}\", <Self as Component<MainEvent>>::name(self));\n            let event = Event::Initialize(blocks);\n            return Some(smallvec![async {\n                smallvec![MainEvent::TransactionBuffer(event)]\n            }\n            .boxed()]);\n        }\n        if <Self as InitializedComponent<MainEvent>>::is_fatal(self) {\n            return Some(\n                fatal!(\n                    effect_builder,\n                    \"{} failed to initialize\",\n                    <Self as Component<MainEvent>>::name(self)\n                )\n                .ignore(),\n            );\n        }\n        None\n    }\n\n    /// Manages cache ejection.\n    fn expire<REv>(&mut self, effect_builder: EffectBuilder<REv>) -> Effects<Event>\n    where\n        REv: From<Event> + From<TransactionBufferAnnouncement> + Send,\n    {\n        let now = Timestamp::now();\n        let (buffer, mut freed): (HashMap<_, _>, _) = mem::take(&mut self.buffer)\n            .into_iter()\n            .partition(|(_, (expiry_time, _))| *expiry_time >= now);\n\n        if !freed.is_empty() {\n            info!(\"TransactionBuffer: purging {} transaction(s)\", freed.len());\n        }\n\n        // clear expired transaction from all holds, then clear any entries that have no items\n        // remaining\n        self.hold.iter_mut().for_each(|(_, held_transactions)| {\n            held_transactions.retain(|transaction_hash| !freed.contains_key(transaction_hash));\n        });\n        self.hold.retain(|_, remaining| !remaining.is_empty());\n\n        // retain all those in `dead` which are not in `freed`, at the same time reducing `freed` to\n        // only those entries not also in `dead` - i.e. transactions which expired without being\n        // included in a block\n        self.dead\n            .retain(|transaction_hash| freed.remove(transaction_hash).is_none());\n        self.buffer = buffer;\n\n        if !freed.is_empty() {\n            info!(\n                \"TransactionBuffer: expiring without executing {} transaction(s)\",\n                freed.len()\n            );\n            debug!(\n                \"TransactionBuffer: expiring without executing {}\",\n                DisplayIter::new(freed.keys())\n            );\n        }\n\n        if let Some(era_id) = self.prices.keys().max() {\n            let updated = self\n                .prices\n                .clone()\n                .into_iter()\n                .filter(|(price_era_id, _)| price_era_id.successor() >= *era_id)\n                .collect();\n\n            self.prices = updated;\n        }\n\n        let mut effects = effect_builder\n            .announce_expired_transactions(freed.keys().cloned().collect())\n            .ignore();\n        effects.extend(\n            effect_builder\n                .set_timeout(self.cfg.expiry_check_interval().into())\n                .event(move |_| Event::Expire),\n        );\n        self.update_all_metrics();\n        effects\n    }\n\n    fn register_transaction_gossiped<REv>(\n        transaction_id: TransactionId,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Effects<Event>\n    where\n        REv: From<Event> + From<StorageRequest> + Send,\n    {\n        debug!(%transaction_id, \"TransactionBuffer: registering gossiped transaction\");\n        effect_builder\n            .get_stored_transaction(transaction_id)\n            .event(move |maybe_transaction| {\n                Event::StoredTransaction(transaction_id, maybe_transaction.map(Box::new))\n            })\n    }\n\n    fn handle_get_appendable_block<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        timestamp: Timestamp,\n        era_id: EraId,\n        request_expiry: Timestamp,\n        responder: Responder<AppendableBlock>,\n    ) -> Effects<Event>\n    where\n        REv: From<ContractRuntimeRequest> + Send,\n    {\n        if !self.prices.contains_key(&era_id) {\n            info!(\"Empty prices field, requesting gas price from contract runtime\");\n            return effect_builder\n                .get_current_gas_price(era_id)\n                .event(move |maybe_gas_price| {\n                    Event::GetGasPriceResult(\n                        maybe_gas_price,\n                        era_id,\n                        timestamp,\n                        request_expiry,\n                        responder,\n                    )\n                });\n        }\n\n        responder\n            .respond(self.appendable_block(timestamp, era_id, request_expiry))\n            .ignore()\n    }\n\n    /// Update buffer considering new stored transaction.\n    fn register_transaction(&mut self, transaction: Transaction) {\n        let transaction_hash = transaction.hash();\n        if let Err(error) = transaction.verify() {\n            error!(%transaction_hash, ?error, \"TransactionBuffer: invalid transaction must not be buffered\");\n            return;\n        }\n\n        if self\n            .hold\n            .values()\n            .any(|ths| ths.contains(&transaction_hash))\n        {\n            info!(%transaction_hash, \"TransactionBuffer: attempt to register already held transaction\");\n            return;\n        }\n\n        let footprint = match TransactionFootprint::new(&self.chainspec, &transaction) {\n            Ok(footprint) => footprint,\n            Err(invalid_transaction_error) => {\n                error!(%transaction_hash, ?invalid_transaction_error, \"TransactionBuffer: unable to created transaction footprint\");\n                return;\n            }\n        };\n        let expiry_time = transaction.expires();\n        match self\n            .buffer\n            .insert(transaction_hash, (expiry_time, Some(footprint)))\n        {\n            Some(prev) => {\n                warn!(%transaction_hash, ?prev, \"TransactionBuffer: transaction upserted\");\n            }\n            None => {\n                debug!(%transaction_hash, \"TransactionBuffer: new transaction buffered\");\n                self.metrics.total_transactions.inc();\n            }\n        }\n    }\n\n    /// Update holds considering new proposed block.\n    fn register_block_proposed(&mut self, proposed_block: ProposedBlock<ClContext>) {\n        let timestamp = &proposed_block.context().timestamp();\n        if let Some(hold_set) = self.hold.get_mut(timestamp) {\n            debug!(%timestamp, \"TransactionBuffer: existing hold timestamp extended\");\n            hold_set.extend(\n                proposed_block\n                    .value()\n                    .all_transactions()\n                    .map(|(transaction_hash, _)| *transaction_hash),\n            );\n        } else {\n            debug!(%timestamp, \"TransactionBuffer: new hold timestamp inserted\");\n            self.hold.insert(\n                *timestamp,\n                HashSet::from_iter(\n                    proposed_block\n                        .value()\n                        .all_transactions()\n                        .map(|(transaction_hash, _)| *transaction_hash),\n                ),\n            );\n        }\n        self.metrics.held_transactions.set(\n            self.hold\n                .values()\n                .map(|transactions| transactions.len())\n                .sum::<usize>()\n                .try_into()\n                .unwrap_or(i64::MIN),\n        );\n    }\n\n    fn register_transactions<'a>(\n        &mut self,\n        timestamp: Timestamp,\n        transaction_hashes: impl Iterator<Item = &'a TransactionHash>,\n    ) {\n        let expiry_timestamp = timestamp.saturating_add(self.chainspec.transaction_config.max_ttl);\n\n        for transaction_hash in transaction_hashes {\n            if !self.buffer.contains_key(transaction_hash) {\n                self.buffer\n                    .insert(*transaction_hash, (expiry_timestamp, None));\n            }\n            self.dead.insert(*transaction_hash);\n        }\n        // Transactions held for proposed blocks which did not get finalized in time are eligible\n        // again\n        let (hold, _) = mem::take(&mut self.hold)\n            .into_iter()\n            .partition(|(ts, _)| *ts > timestamp);\n        self.hold = hold;\n        self.update_all_metrics();\n    }\n\n    /// Update buffer and holds considering new added block.\n    fn register_block(&mut self, block: &BlockV2) {\n        let block_height = block.height();\n        let timestamp = block.timestamp();\n        debug!(%timestamp, \"TransactionBuffer: register_block({}) timestamp finalized\", block_height);\n        self.register_transactions(timestamp, block.all_transactions());\n    }\n\n    /// When initializing the buffer, register past blocks in order to provide replay protection.\n    fn register_versioned_block(&mut self, block: &Block) {\n        let block_height = block.height();\n        let timestamp = block.timestamp();\n        debug!(\n            %timestamp,\n            \"TransactionBuffer: register_versioned_block({}) timestamp finalized\",\n            block_height\n        );\n        match block {\n            Block::V1(v1_block) => {\n                let transaction_hashes: Vec<TransactionHash> = v1_block\n                    .deploy_and_transfer_hashes()\n                    .map(|deploy_hash| TransactionHash::Deploy(*deploy_hash))\n                    .collect();\n                self.register_transactions(timestamp, transaction_hashes.iter())\n            }\n            Block::V2(v2_block) => {\n                self.register_transactions(timestamp, v2_block.all_transactions());\n            }\n        }\n    }\n\n    /// Update buffer and holds considering new finalized block.\n    fn register_block_finalized(&mut self, finalized_block: &FinalizedBlock) {\n        let block_height = finalized_block.height;\n        let timestamp = finalized_block.timestamp;\n        debug!(%timestamp, \"TransactionBuffer: register_block_finalized({}) timestamp finalized\", block_height);\n        self.register_transactions(timestamp, finalized_block.all_transactions());\n    }\n\n    /// Returns eligible transactions that are buffered and not held or dead.\n    fn proposable(\n        &self,\n        current_era_gas_price: u8,\n    ) -> impl Iterator<Item = (&TransactionHash, &TransactionFootprint)> {\n        debug!(\"TransactionBuffer: getting proposable transactions\");\n        self.buffer\n            .iter()\n            .filter(move |(th, _)| !self.hold.values().any(|hs| hs.contains(th)))\n            .filter(move |(th, _)| !self.dead.contains(th))\n            .filter_map(|(th, (_, maybe_footprint))| {\n                maybe_footprint.as_ref().map(|footprint| (th, footprint))\n            })\n            .filter(move |(_, footprint)| footprint.gas_price_tolerance() >= current_era_gas_price)\n    }\n\n    fn buckets(\n        &mut self,\n        current_era_gas_price: u8,\n    ) -> HashMap<&Digest, Vec<(TransactionHash, &TransactionFootprint)>> {\n        let proposable = self.proposable(current_era_gas_price);\n\n        let mut buckets: HashMap<_, Vec<_>> = HashMap::new();\n        for (transaction_hash, footprint) in proposable {\n            buckets\n                .entry(&footprint.payload_hash)\n                .and_modify(|vec| vec.push((*transaction_hash, footprint)))\n                .or_insert(vec![(*transaction_hash, footprint)]);\n        }\n        buckets\n    }\n\n    /// Returns a right-sized payload of transactions that can be proposed.\n    fn appendable_block(\n        &mut self,\n        timestamp: Timestamp,\n        era_id: EraId,\n        request_expiry: Timestamp,\n    ) -> AppendableBlock {\n        let current_era_gas_price = match self.prices.get(&era_id) {\n            Some(gas_price) => *gas_price,\n            None => {\n                return AppendableBlock::new(\n                    self.chainspec.transaction_config.clone(),\n                    self.chainspec.vacancy_config.min_gas_price,\n                    timestamp,\n                );\n            }\n        };\n        let mut ret = AppendableBlock::new(\n            self.chainspec.transaction_config.clone(),\n            current_era_gas_price,\n            timestamp,\n        );\n        if Timestamp::now() >= request_expiry {\n            debug!(\"TransactionBuffer: request expiry reached, returning empty proposal\");\n            return ret;\n        }\n\n        let mut holds = HashSet::new();\n\n        let mut have_hit_mint_limit = false;\n        let mut have_hit_wasm_limit = false;\n        let mut have_hit_install_upgrade_limit = false;\n        let mut have_hit_auction_limit = false;\n\n        #[cfg(test)]\n        let mut iter_counter = 0;\n        #[cfg(test)]\n        let iter_limit = self.buffer.len() * 4;\n\n        let mut buckets = self.buckets(current_era_gas_price);\n        let mut payload_hashes_queue: VecDeque<_> = buckets.keys().cloned().collect();\n\n        while let Some(payload_hash) = payload_hashes_queue.pop_front() {\n            if Timestamp::now() > request_expiry {\n                debug!(\"TransactionBuffer: request expiry reached, returning proposal\");\n                break;\n            }\n            #[cfg(test)]\n            {\n                iter_counter += 1;\n                assert!(\n                    iter_counter < iter_limit,\n                    \"the number of iterations shouldn't be too large\"\n                );\n            }\n\n            let Some((transaction_hash, footprint)) =\n                buckets.get_mut(payload_hash).and_then(Vec::<_>::pop)\n            else {\n                continue;\n            };\n\n            // bucket wasn't empty - push the hash back into the queue to be processed again on the\n            // next pass\n            payload_hashes_queue.push_back(payload_hash);\n\n            if footprint.is_mint() && have_hit_mint_limit {\n                continue;\n            }\n            if footprint.is_install_upgrade() && have_hit_install_upgrade_limit {\n                continue;\n            }\n            if footprint.is_auction() && have_hit_auction_limit {\n                continue;\n            }\n            if footprint.is_wasm_based() && have_hit_wasm_limit {\n                continue;\n            }\n\n            let has_multiple_approvals = footprint.approvals.len() > 1;\n            match ret.add_transaction(footprint) {\n                Ok(_) => {\n                    debug!(%transaction_hash, \"TransactionBuffer: proposing transaction\");\n                    holds.insert(transaction_hash);\n                }\n                Err(error) => {\n                    match error {\n                        AddError::Duplicate => {\n                            // it should be physically impossible for a duplicate transaction or\n                            // transaction to be in the transaction buffer, thus this should be\n                            // unreachable\n                            warn!(\n                                ?transaction_hash,\n                                \"TransactionBuffer: duplicated transaction or transfer in transaction buffer\"\n                            );\n                        }\n                        AddError::Expired => {\n                            info!(\n                                ?transaction_hash,\n                                \"TransactionBuffer: expired transaction or transfer in transaction buffer\"\n                            );\n                        }\n                        AddError::Count(lane_id) => {\n                            match lane_id {\n                                lane_id if lane_id == MINT_LANE_ID => {\n                                    have_hit_mint_limit = true;\n                                }\n                                lane_id if lane_id == AUCTION_LANE_ID => {\n                                    have_hit_auction_limit = true;\n                                }\n                                lane_id if lane_id == INSTALL_UPGRADE_LANE_ID => {\n                                    have_hit_install_upgrade_limit = true;\n                                }\n                                _ => {\n                                    have_hit_wasm_limit = true;\n                                }\n                            }\n                            if have_hit_wasm_limit\n                                && have_hit_auction_limit\n                                && have_hit_install_upgrade_limit\n                                && have_hit_mint_limit\n                            {\n                                info!(\n                                    ?transaction_hash,\n                                    \"TransactionBuffer: block fully saturated\"\n                                );\n                                break;\n                            }\n                        }\n                        AddError::ApprovalCount if has_multiple_approvals => {\n                            // keep iterating, we can maybe fit in a deploy with fewer approvals\n                        }\n                        AddError::ApprovalCount | AddError::GasLimit | AddError::BlockSize => {\n                            info!(\n                                ?transaction_hash,\n                                %error,\n                                \"TransactionBuffer: a block limit has been reached\"\n                            );\n                            // a block limit has been reached\n                            break;\n                        }\n                        AddError::VariantMismatch(mismatch) => {\n                            error!(?transaction_hash, %mismatch,\n                                \"TransactionBuffer: data mismatch when adding transaction\"\n                            );\n                            // keep iterating\n                        }\n                        AddError::ExcessiveTtl => {\n                            error!(\n                                ?transaction_hash,\n                                \"TransactionBuffer: skipping transaction with excessive ttl\"\n                            );\n                            // keep iterating\n                        }\n                        AddError::FutureDatedDeploy => {\n                            error!(\n                                ?transaction_hash,\n                                %footprint.timestamp,\n                                \"TransactionBuffer: skipping transaction with future dated deploy\"\n                            );\n                            // keep iterating\n                        }\n                    }\n                }\n            }\n        }\n\n        // Put a hold on all proposed transactions / transfers and update metrics\n        match self.hold.entry(timestamp) {\n            btree_map::Entry::Vacant(entry) => {\n                entry.insert(holds);\n            }\n            btree_map::Entry::Occupied(mut entry) => {\n                entry.get_mut().extend(holds);\n            }\n        }\n        self.update_all_metrics();\n\n        info!(\n            \"produced {}, buffer has {} held, {} dead, {} total\",\n            ret,\n            self.hold\n                .values()\n                .map(|transactions| transactions.len())\n                .sum::<usize>(),\n            self.dead.len(),\n            self.buffer.len()\n        );\n\n        ret\n    }\n\n    /// Updates all transaction count metrics based on the size of the internal structs.\n    fn update_all_metrics(&mut self) {\n        // if number of elements is too high to fit, we overflow the metric\n        // intentionally in order to get some indication that something is wrong.\n        self.metrics.held_transactions.set(\n            self.hold\n                .values()\n                .map(|transactions| transactions.len())\n                .sum::<usize>()\n                .try_into()\n                .unwrap_or(i64::MIN),\n        );\n        self.metrics\n            .dead_transactions\n            .set(self.dead.len().try_into().unwrap_or(i64::MIN));\n        self.metrics\n            .total_transactions\n            .set(self.buffer.len().try_into().unwrap_or(i64::MIN));\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for TransactionBuffer\nwhere\n    REv: From<Event>\n        + From<TransactionBufferAnnouncement>\n        + From<ContractRuntimeRequest>\n        + From<StorageRequest>\n        + Send\n        + 'static,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\nimpl<REv> Component<REv> for TransactionBuffer\nwhere\n    REv: From<Event>\n        + From<TransactionBufferAnnouncement>\n        + From<StorageRequest>\n        + From<ContractRuntimeRequest>\n        + Send\n        + 'static,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => {\n                match event {\n                    Event::Initialize(blocks) => {\n                        for block in blocks {\n                            self.register_versioned_block(&block);\n                        }\n                        <Self as InitializedComponent<MainEvent>>::set_state(\n                            self,\n                            ComponentState::Initialized,\n                        );\n                        // start self-expiry management on initialization\n                        effect_builder\n                            .set_timeout(self.cfg.expiry_check_interval().into())\n                            .event(move |_| Event::Expire)\n                    }\n                    Event::Request(_)\n                    | Event::ReceiveTransactionGossiped(_)\n                    | Event::StoredTransaction(_, _)\n                    | Event::BlockProposed(_)\n                    | Event::Block(_)\n                    | Event::VersionedBlock(_)\n                    | Event::BlockFinalized(_)\n                    | Event::Expire\n                    | Event::UpdateEraGasPrice { .. }\n                    | Event::GetGasPriceResult(_, _, _, _, _) => {\n                        warn!(\n                            ?event,\n                            name = <Self as Component<MainEvent>>::name(self),\n                            \"should not handle this event when component is pending initialization\"\n                        );\n                        Effects::new()\n                    }\n                }\n            }\n            ComponentState::Initialized => match event {\n                Event::Initialize(_) => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::Request(TransactionBufferRequest::GetAppendableBlock {\n                    timestamp,\n                    era_id,\n                    responder,\n                    request_expiry,\n                }) => self.handle_get_appendable_block(\n                    effect_builder,\n                    timestamp,\n                    era_id,\n                    request_expiry,\n                    responder,\n                ),\n\n                Event::GetGasPriceResult(\n                    maybe_gas_price,\n                    era_id,\n                    timestamp,\n                    request_expiry,\n                    responder,\n                ) => match maybe_gas_price {\n                    None => responder\n                        .respond(AppendableBlock::new(\n                            self.chainspec.transaction_config.clone(),\n                            self.chainspec.vacancy_config.min_gas_price,\n                            timestamp,\n                        ))\n                        .ignore(),\n                    Some(gas_price) => {\n                        self.prices.insert(era_id, gas_price);\n                        responder\n                            .respond(self.appendable_block(timestamp, era_id, request_expiry))\n                            .ignore()\n                    }\n                },\n                Event::BlockFinalized(finalized_block) => {\n                    self.register_block_finalized(&finalized_block);\n                    Effects::new()\n                }\n                Event::Block(block) => {\n                    self.register_block(&block);\n                    Effects::new()\n                }\n                Event::VersionedBlock(block) => {\n                    self.register_versioned_block(&block);\n                    Effects::new()\n                }\n                Event::BlockProposed(proposed) => {\n                    self.register_block_proposed(*proposed);\n                    Effects::new()\n                }\n                Event::ReceiveTransactionGossiped(transaction_id) => {\n                    Self::register_transaction_gossiped(transaction_id, effect_builder)\n                }\n                Event::StoredTransaction(transaction_id, maybe_transaction) => {\n                    match maybe_transaction {\n                        Some(transaction) => {\n                            self.register_transaction(*transaction);\n                        }\n                        None => {\n                            debug!(\"cannot register un-stored transaction({})\", transaction_id);\n                        }\n                    }\n                    Effects::new()\n                }\n                Event::Expire => self.expire(effect_builder),\n                Event::UpdateEraGasPrice(era_id, next_era_gas_price) => {\n                    self.prices.insert(era_id, next_era_gas_price);\n                    Effects::new()\n                }\n            },\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n"
  },
  {
    "path": "node/src/components/upgrade_watcher.rs",
    "content": "//! Chainspec loader component.\n//!\n//! The chainspec loader initializes a node by reading information from the chainspec or an\n//! upgrade_point, and committing it to the permanent storage.\n//!\n//! See\n//! <https://casperlabs.atlassian.net/wiki/spaces/EN/pages/135528449/Genesis+Process+Specification>\n//! for full details.\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    fs, io,\n    path::{Path, PathBuf},\n    str::FromStr,\n};\n\nuse datasize::DataSize;\nuse derive_more::From;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tokio::task;\nuse tracing::{debug, error, info, trace, warn};\n\nuse casper_types::{\n    file_utils::{self, ReadFileError},\n    Chainspec, EraId, NextUpgrade, ProtocolConfig, ProtocolVersion, TimeDiff,\n};\n\nuse crate::{\n    components::{Component, ComponentState, InitializedComponent},\n    effect::{\n        announcements::UpgradeWatcherAnnouncement, requests::UpgradeWatcherRequest, EffectBuilder,\n        EffectExt, Effects,\n    },\n    reactor::main_reactor::MainEvent,\n    utils::chain_specification::parse_toml::CHAINSPEC_FILENAME,\n    NodeRng,\n};\n\nconst COMPONENT_NAME: &str = \"upgrade_watcher\";\n\nconst DEFAULT_UPGRADE_CHECK_INTERVAL: &str = \"30sec\";\n\n#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)]\npub struct Config {\n    /// How often to scan file system for available upgrades.\n    pub upgrade_check_interval: TimeDiff,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Config {\n            upgrade_check_interval: DEFAULT_UPGRADE_CHECK_INTERVAL.parse().unwrap(),\n        }\n    }\n}\n\n/// `ChainspecHandler` events.\n#[derive(Debug, From, Serialize)]\npub(crate) enum Event {\n    /// Start checking for installed upgrades.\n    Initialize,\n    #[from]\n    Request(UpgradeWatcherRequest),\n    /// Check config dir to see if an upgrade activation point is available, and if so announce it.\n    CheckForNextUpgrade,\n    /// If the result of checking for an upgrade is successful, it is passed here.\n    GotNextUpgrade(Option<NextUpgrade>),\n}\n\nimpl Display for Event {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Event::Initialize => {\n                write!(formatter, \"start checking for installed upgrades\")\n            }\n            Event::Request(_) => {\n                write!(formatter, \"upgrade watcher request\")\n            }\n            Event::CheckForNextUpgrade => {\n                write!(formatter, \"check for next upgrade\")\n            }\n            Event::GotNextUpgrade(Some(next_upgrade)) => {\n                write!(formatter, \"got {}\", next_upgrade)\n            }\n            Event::GotNextUpgrade(None) => {\n                write!(formatter, \"no upgrade detected\")\n            }\n        }\n    }\n}\n\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    /// Error while decoding the chainspec from TOML format.\n    #[error(\"decoding from TOML error: {0}\")]\n    DecodingFromToml(#[from] toml::de::Error),\n\n    #[error(\"chainspec directory does not have a parent\")]\n    NoChainspecDirParent,\n\n    /// Error loading the upgrade point.\n    #[error(\"could not load upgrade point: {0}\")]\n    LoadUpgradePoint(ReadFileError),\n\n    /// Failed to read the given directory.\n    #[error(\"failed to read dir {}: {error}\", dir.display())]\n    ReadDir {\n        /// The directory which could not be read.\n        dir: PathBuf,\n        /// The underlying error.\n        error: io::Error,\n    },\n\n    /// No subdirectory representing a semver version was found in the given directory.\n    #[error(\"failed to get a valid version from subdirs in {}\", dir.display())]\n    NoVersionSubdirFound {\n        /// The searched directory.\n        dir: PathBuf,\n    },\n}\n\n#[derive(Clone, DataSize, Debug)]\npub(crate) struct UpgradeWatcher {\n    current_version: ProtocolVersion,\n    config: Config,\n    /// The path to the folder where all chainspec and upgrade_point files will be stored in\n    /// subdirs corresponding to their versions.\n    root_dir: PathBuf,\n    state: ComponentState,\n    next_upgrade: Option<NextUpgrade>,\n}\n\nimpl UpgradeWatcher {\n    pub(crate) fn new<P: AsRef<Path>>(\n        chainspec: &Chainspec,\n        config: Config,\n        chainspec_dir: P,\n    ) -> Result<Self, Error> {\n        let root_dir = chainspec_dir\n            .as_ref()\n            .parent()\n            .map(|path| path.to_path_buf())\n            .ok_or(Error::NoChainspecDirParent)?;\n\n        let current_version = chainspec.protocol_config.version;\n        let next_upgrade = next_upgrade(root_dir.clone(), current_version);\n\n        let upgrade_watcher = UpgradeWatcher {\n            current_version,\n            config,\n            root_dir,\n            state: ComponentState::Uninitialized,\n            next_upgrade,\n        };\n\n        Ok(upgrade_watcher)\n    }\n\n    pub(crate) fn should_upgrade_after(&self, era_id: EraId) -> bool {\n        self.next_upgrade\n            .as_ref()\n            .is_some_and(|upgrade| upgrade.activation_point().should_upgrade(&era_id))\n    }\n\n    pub(crate) fn next_upgrade_activation_point(&self) -> Option<EraId> {\n        self.next_upgrade\n            .map(|next_upgrade| next_upgrade.activation_point().era_id())\n    }\n\n    fn start_checking_for_upgrades<REv>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Effects<Event>\n    where\n        REv: From<UpgradeWatcherAnnouncement> + Send,\n    {\n        if self.state != ComponentState::Initializing {\n            return Effects::new();\n        }\n        <Self as InitializedComponent<MainEvent>>::set_state(self, ComponentState::Initialized);\n        self.check_for_next_upgrade(effect_builder)\n    }\n\n    fn check_for_next_upgrade<REv>(&self, effect_builder: EffectBuilder<REv>) -> Effects<Event>\n    where\n        REv: From<UpgradeWatcherAnnouncement> + Send,\n    {\n        let root_dir = self.root_dir.clone();\n        let current_version = self.current_version;\n        let mut effects = async move {\n            let maybe_next_upgrade =\n                task::spawn_blocking(move || next_upgrade(root_dir, current_version))\n                    .await\n                    .unwrap_or_else(|error| {\n                        warn!(%error, \"failed to join tokio task\");\n                        None\n                    });\n            effect_builder\n                .upgrade_watcher_announcement(maybe_next_upgrade)\n                .await\n        }\n        .ignore();\n\n        effects.extend(\n            effect_builder\n                .set_timeout(self.config.upgrade_check_interval.into())\n                .event(|_| Event::CheckForNextUpgrade),\n        );\n\n        effects\n    }\n\n    fn handle_got_next_upgrade(\n        &mut self,\n        maybe_next_upgrade: Option<NextUpgrade>,\n    ) -> Effects<Event> {\n        trace!(\"got {:?}\", maybe_next_upgrade);\n        if self.next_upgrade != maybe_next_upgrade {\n            let new_point = match &maybe_next_upgrade {\n                Some(next_upgrade) => next_upgrade.to_string(),\n                None => \"none\".to_string(),\n            };\n            let current_point = match &self.next_upgrade {\n                Some(next_upgrade) => next_upgrade.to_string(),\n                None => \"none\".to_string(),\n            };\n            info!(\n                %new_point,\n                %current_point,\n                \"changing upgrade activation point\"\n            );\n        }\n\n        self.next_upgrade = maybe_next_upgrade;\n        Effects::new()\n    }\n}\n\nimpl<REv> Component<REv> for UpgradeWatcher\nwhere\n    REv: From<Event> + From<UpgradeWatcherAnnouncement> + Send,\n{\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match &self.state {\n            ComponentState::Fatal(msg) => {\n                error!(\n                    msg,\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when this component has fatal error\"\n                );\n                Effects::new()\n            }\n            ComponentState::Uninitialized => {\n                warn!(\n                    ?event,\n                    name = <Self as Component<MainEvent>>::name(self),\n                    \"should not handle this event when component is uninitialized\"\n                );\n                Effects::new()\n            }\n            ComponentState::Initializing => match event {\n                Event::Initialize => self.start_checking_for_upgrades(effect_builder),\n                Event::Request(_) | Event::CheckForNextUpgrade | Event::GotNextUpgrade(_) => {\n                    warn!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"should not handle this event when component is pending initialization\"\n                    );\n                    Effects::new()\n                }\n            },\n            ComponentState::Initialized => match event {\n                Event::Initialize => {\n                    error!(\n                        ?event,\n                        name = <Self as Component<MainEvent>>::name(self),\n                        \"component already initialized\"\n                    );\n                    Effects::new()\n                }\n                Event::Request(request) => request.0.respond(self.next_upgrade).ignore(),\n                Event::CheckForNextUpgrade => self.check_for_next_upgrade(effect_builder),\n                Event::GotNextUpgrade(next_upgrade) => self.handle_got_next_upgrade(next_upgrade),\n            },\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n\nimpl<REv> InitializedComponent<REv> for UpgradeWatcher\nwhere\n    REv: From<Event> + From<UpgradeWatcherAnnouncement> + Send,\n{\n    fn state(&self) -> &ComponentState {\n        &self.state\n    }\n\n    fn set_state(&mut self, new_state: ComponentState) {\n        info!(\n            ?new_state,\n            name = <Self as Component<MainEvent>>::name(self),\n            \"component state changed\"\n        );\n\n        self.state = new_state;\n    }\n}\n\n/// This struct can be parsed from a TOML-encoded chainspec file.  It means that as the\n/// chainspec format changes over versions, as long as we maintain the protocol config in this form\n/// in the chainspec file, it can continue to be parsed as an `UpgradePoint`.\n#[derive(Deserialize)]\nstruct UpgradePoint {\n    #[serde(rename = \"protocol\")]\n    pub(crate) protocol_config: ProtocolConfig,\n}\n\nimpl UpgradePoint {\n    /// Parses a chainspec file at the given path as an `UpgradePoint`.\n    fn from_chainspec_path<P: AsRef<Path> + fmt::Debug>(path: P) -> Result<Self, Error> {\n        let bytes = file_utils::read_file(path.as_ref().join(CHAINSPEC_FILENAME))\n            .map_err(Error::LoadUpgradePoint)?;\n        Ok(toml::from_str(std::str::from_utf8(&bytes).unwrap())?)\n    }\n}\n\nfn dir_name_from_version(version: ProtocolVersion) -> PathBuf {\n    PathBuf::from(version.to_string().replace('.', \"_\"))\n}\n\n/// Iterates the given path, returning the subdir representing the immediate next SemVer version\n/// after `current_version`.  If no higher version than `current_version` is found, then\n/// `current_version` is returned.\n///\n/// Subdir names should be semvers with dots replaced with underscores.\nfn next_installed_version(\n    dir: &Path,\n    current_version: ProtocolVersion,\n) -> Result<ProtocolVersion, Error> {\n    let max_version = ProtocolVersion::from_parts(u32::MAX, u32::MAX, u32::MAX);\n\n    let mut next_version = max_version;\n    let mut read_version = false;\n    for entry in fs::read_dir(dir).map_err(|error| Error::ReadDir {\n        dir: dir.to_path_buf(),\n        error,\n    })? {\n        let path = match entry {\n            Ok(dir_entry) => dir_entry.path(),\n            Err(error) => {\n                debug!(dir=%dir.display(), %error, \"bad entry while reading dir\");\n                continue;\n            }\n        };\n\n        let subdir_name = match path.file_name() {\n            Some(name) => name.to_string_lossy().replace('_', \".\"),\n            None => continue,\n        };\n\n        let version = match ProtocolVersion::from_str(&subdir_name) {\n            Ok(version) => version,\n            Err(error) => {\n                trace!(%error, path=%path.display(), \"UpgradeWatcher: failed to get a version\");\n                continue;\n            }\n        };\n\n        if version > current_version && version < next_version {\n            next_version = version;\n        }\n        read_version = true;\n    }\n\n    if !read_version {\n        return Err(Error::NoVersionSubdirFound {\n            dir: dir.to_path_buf(),\n        });\n    }\n\n    if next_version == max_version {\n        next_version = current_version;\n    }\n\n    Ok(next_version)\n}\n\n/// Uses `next_installed_version()` to find the next versioned subdir.  If it exists, reads the\n/// UpgradePoint file from there and returns its version and activation point.  Returns `None` if\n/// there is no greater version available, or if any step errors.\nfn next_upgrade(dir: PathBuf, current_version: ProtocolVersion) -> Option<NextUpgrade> {\n    let next_version = match next_installed_version(&dir, current_version) {\n        Ok(version) => version,\n        Err(_error) => {\n            #[cfg(not(test))]\n            warn!(dir=%dir.display(), error=%_error, \"failed to get a valid version from subdirs\");\n            return None;\n        }\n    };\n\n    if next_version <= current_version {\n        return None;\n    }\n\n    let subdir = dir.join(dir_name_from_version(next_version));\n    let upgrade_point = match UpgradePoint::from_chainspec_path(&subdir) {\n        Ok(upgrade_point) => upgrade_point,\n        Err(error) => {\n            debug!(subdir=%subdir.display(), %error, \"failed to load upgrade point\");\n            return None;\n        }\n    };\n\n    if upgrade_point.protocol_config.version != next_version {\n        warn!(\n            upgrade_point_version=%upgrade_point.protocol_config.version,\n            subdir_version=%next_version,\n            \"next chainspec installed to wrong subdir\"\n        );\n        return None;\n    }\n\n    Some(NextUpgrade::from(upgrade_point.protocol_config))\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::{testing::TestRng, ActivationPoint, ChainspecRawBytes};\n\n    use super::*;\n    use crate::{logging, utils::Loadable};\n\n    const V0_0_0: ProtocolVersion = ProtocolVersion::from_parts(0, 0, 0);\n    const V0_9_9: ProtocolVersion = ProtocolVersion::from_parts(0, 9, 9);\n    const V1_0_0: ProtocolVersion = ProtocolVersion::from_parts(1, 0, 0);\n    const V1_0_3: ProtocolVersion = ProtocolVersion::from_parts(1, 0, 3);\n    const V1_2_3: ProtocolVersion = ProtocolVersion::from_parts(1, 2, 3);\n    const V2_2_2: ProtocolVersion = ProtocolVersion::from_parts(2, 2, 2);\n\n    #[test]\n    fn should_get_next_installed_version() {\n        let tempdir = tempfile::tempdir().expect(\"should create temp dir\");\n\n        let get_next_version = |current_version: ProtocolVersion| {\n            next_installed_version(tempdir.path(), current_version).unwrap()\n        };\n\n        // Should get next version (major version bump).\n        fs::create_dir(tempdir.path().join(\"1_0_0\")).unwrap();\n        assert_eq!(get_next_version(V0_0_0), V1_0_0);\n\n        // Should get next version (minor version bump).\n        fs::create_dir(tempdir.path().join(\"1_2_3\")).unwrap();\n        assert_eq!(get_next_version(V1_0_0), V1_2_3);\n\n        // Should report current as next version if only lower versions staged.\n        fs::create_dir(tempdir.path().join(\"1_0_3\")).unwrap();\n        assert_eq!(get_next_version(V1_2_3), V1_2_3);\n\n        // Should report lower of two higher versions.\n        fs::create_dir(tempdir.path().join(\"2_2_2\")).unwrap();\n        fs::create_dir(tempdir.path().join(\"3_3_3\")).unwrap();\n        assert_eq!(get_next_version(V1_2_3), V2_2_2);\n\n        // If higher versions unstaged, should report current again.\n        fs::remove_dir_all(tempdir.path().join(\"2_2_2\")).unwrap();\n        fs::remove_dir_all(tempdir.path().join(\"3_3_3\")).unwrap();\n        assert_eq!(get_next_version(V1_2_3), V1_2_3);\n    }\n\n    #[test]\n    fn should_ignore_invalid_versions() {\n        let tempdir = tempfile::tempdir().expect(\"should create temp dir\");\n\n        // Executes `next_installed_version()` and asserts the resulting error as a string starts\n        // with the given text.\n        let min_version = V0_0_0;\n        let assert_error_starts_with = |path: &Path, expected: String| {\n            let error_msg = next_installed_version(path, min_version)\n                .unwrap_err()\n                .to_string();\n            assert!(\n                error_msg.starts_with(&expected),\n                \"Error message expected to start with \\\"{}\\\"\\nActual error message: \\\"{}\\\"\",\n                expected,\n                error_msg\n            );\n        };\n\n        // Try with a non-existent dir.\n        let non_existent_dir = Path::new(\"not_a_dir\");\n        assert_error_starts_with(\n            non_existent_dir,\n            format!(\"failed to read dir {}\", non_existent_dir.display()),\n        );\n\n        // Try with a dir which has no subdirs.\n        assert_error_starts_with(\n            tempdir.path(),\n            format!(\n                \"failed to get a valid version from subdirs in {}\",\n                tempdir.path().display()\n            ),\n        );\n\n        // Try with a dir which has one subdir which is not a valid version representation.\n        fs::create_dir(tempdir.path().join(\"not_a_version\")).unwrap();\n        assert_error_starts_with(\n            tempdir.path(),\n            format!(\n                \"failed to get a valid version from subdirs in {}\",\n                tempdir.path().display()\n            ),\n        );\n\n        // Try with a dir which has a valid and invalid subdir - the invalid one should be ignored.\n        fs::create_dir(tempdir.path().join(\"1_2_3\")).unwrap();\n        assert_eq!(\n            next_installed_version(tempdir.path(), min_version).unwrap(),\n            V1_2_3\n        );\n    }\n\n    /// Creates the appropriate subdir in `root_dir`, and adds a random chainspec.toml with the\n    /// protocol_config.version field set to `version`.\n    fn install_chainspec(\n        rng: &mut TestRng,\n        root_dir: &Path,\n        version: ProtocolVersion,\n    ) -> Chainspec {\n        let mut chainspec = Chainspec::random(rng);\n        chainspec.protocol_config.version = version;\n\n        let subdir = root_dir.join(dir_name_from_version(version));\n        fs::create_dir(&subdir).unwrap();\n\n        let path = subdir.join(CHAINSPEC_FILENAME);\n\n        let pretty = toml::to_string_pretty(&chainspec);\n        fs::write(path, pretty.expect(\"should encode to toml\")).expect(\"should install chainspec\");\n        chainspec\n    }\n\n    #[test]\n    fn should_get_next_upgrade() {\n        let tempdir = tempfile::tempdir().expect(\"should create temp dir\");\n\n        let next_point = |current_version: ProtocolVersion| {\n            next_upgrade(tempdir.path().to_path_buf(), current_version).unwrap()\n        };\n\n        let mut rng = crate::new_rng();\n\n        let mut current = ProtocolVersion::from_parts(1, 9, 9);\n        let v2_0_0 = ProtocolVersion::from_parts(2, 0, 0);\n        let chainspec_v2_0_0 = install_chainspec(&mut rng, tempdir.path(), v2_0_0);\n        assert_eq!(next_point(current), chainspec_v2_0_0.protocol_config.into());\n\n        current = v2_0_0;\n        let v2_0_3 = ProtocolVersion::from_parts(2, 0, 3);\n        let chainspec_v2_0_3 = install_chainspec(&mut rng, tempdir.path(), v2_0_3);\n        assert_eq!(next_point(current), chainspec_v2_0_3.protocol_config.into());\n\n        let chainspec_v1_0_0 = install_chainspec(&mut rng, tempdir.path(), V1_0_0);\n        assert_eq!(next_point(V0_9_9), chainspec_v1_0_0.protocol_config.into());\n\n        let chainspec_v1_0_3 = install_chainspec(&mut rng, tempdir.path(), V1_0_3);\n        assert_eq!(next_point(V1_0_0), chainspec_v1_0_3.protocol_config.into());\n    }\n\n    #[test]\n    fn should_not_get_old_or_invalid_upgrade() {\n        let tempdir = tempfile::tempdir().expect(\"should create temp dir\");\n\n        let maybe_next_point = |current_version: ProtocolVersion| {\n            next_upgrade(tempdir.path().to_path_buf(), current_version)\n        };\n\n        let mut rng = crate::new_rng();\n\n        // Check we return `None` if there are no version subdirs.\n        assert!(maybe_next_point(V1_0_0).is_none());\n\n        // Check we return `None` if current_version == next_version.\n        let chainspec_v1_0_0 = install_chainspec(&mut rng, tempdir.path(), V1_0_0);\n        assert!(maybe_next_point(V1_0_0).is_none());\n\n        // Check we return `None` if current_version > next_version.\n        assert!(maybe_next_point(V2_2_2).is_none());\n\n        // Check we return `None` if we find an upgrade file where the protocol_config.version field\n        // doesn't match the subdir name.\n        assert!(maybe_next_point(V0_9_9).is_some());\n\n        let mut chainspec_v0_9_9 = chainspec_v1_0_0;\n        chainspec_v0_9_9.protocol_config.version = V0_9_9;\n        let path_v1_0_0 = tempdir\n            .path()\n            .join(dir_name_from_version(V1_0_0))\n            .join(CHAINSPEC_FILENAME);\n        fs::write(\n            &path_v1_0_0,\n            toml::to_string_pretty(&chainspec_v0_9_9).expect(\"should encode to toml\"),\n        )\n        .expect(\"should install upgrade point\");\n        assert!(maybe_next_point(V0_9_9).is_none());\n\n        // Check we return `None` if the next version upgrade_point file is corrupt.\n        fs::write(&path_v1_0_0, \"bad data\".as_bytes()).unwrap();\n        assert!(maybe_next_point(V0_9_9).is_none());\n\n        // Check we return `None` if the next version upgrade_point file is missing.\n        fs::remove_file(&path_v1_0_0).unwrap();\n        assert!(maybe_next_point(V0_9_9).is_none());\n    }\n\n    #[test]\n    fn should_register_unstaged_upgrade() {\n        let _ = logging::init();\n        let tempdir = tempfile::tempdir().expect(\"should create temp dir\");\n        let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n        let mut upgrade_watcher =\n            UpgradeWatcher::new(&chainspec, Config::default(), tempdir.path()).unwrap();\n        assert!(upgrade_watcher.next_upgrade.is_none());\n\n        let next_upgrade = NextUpgrade::new(\n            ActivationPoint::EraId(EraId::MAX),\n            ProtocolVersion::from_parts(9, 9, 9),\n        );\n        let _ = upgrade_watcher.handle_got_next_upgrade(Some(next_upgrade));\n        assert_eq!(Some(next_upgrade), upgrade_watcher.next_upgrade);\n\n        let _ = upgrade_watcher.handle_got_next_upgrade(None);\n        assert!(upgrade_watcher.next_upgrade.is_none());\n    }\n}\n"
  },
  {
    "path": "node/src/components.rs",
    "content": "//! Components subsystem.\n//!\n//! Components are the building blocks for the application and wired together inside a\n//! [reactor](crate::reactor). Each component has a unified interface, expressed by the\n//! [`Component`] trait.\n//!\n//! # Events\n//!\n//! Every component defines a set of events it can process, expressed through the\n//! [`Component::Event`] associated type. If an event that originated outside the component is to be\n//! handled (e.g. a request or announcement being handled), a `From<OutsideEvent> for\n//! ComponentEvent` implementation must be added (see component vs reactor event section below).\n//!\n//! A typical cycle for components is to receive an event, either originating from the outside, or\n//! as the result of an effect created by the component. This event is processed in the\n//! [`handle_event`](Component::handle_event) function, potentially returning effects that may\n//! produce new events.\n//!\n//! # Error and halting states\n//!\n//! Components in general are expected to be able to handle every input (that is every\n//! [`Component::Event`]) in every state. Unexpected inputs should usually be logged and discarded,\n//! if possible, and the component is expected to recover from error states by itself.\n//!\n//! When a recovery is not possible, the [`fatal!`](crate::fatal!) macro should be used to produce\n//! an effect that will shut down the system.\n//!\n//! # Component events and reactor events\n//!\n//! It is easy to confuse the components own associated event ([`Component::Event`]) and the\n//! so-called \"reactor event\", often written `REv` (see [`effects`](crate::effect) for details on\n//! the distinctions).\n//!\n//! A component's own event defines what sort of events it produces purely for internal use, and\n//! also which unbound events it can accept. **Acceptance of external events** is expressed by\n//! implementing a `From` implementation for the unbound, i.e. a component that can process\n//! `FooAnnouncement` and a `BarRequest` will have to `impl From<FooAnnouncement> for Event` and\n//! `impl From<BarRequest>`, with `Event` being the event named as [`Component::Event`].\n//!\n//! Since components are usually not specific to only a single reactor, they have to implement\n//! `Component<REv>` for a variety of reactor events (`REv`). A component can **demand that the\n//! reactor provides a set of capabilities** by requiring `From`-implementations on the `REv`, e.g.\n//! by restricting the `impl Component<REv>` by `where REv: From<Baz>`. The concrete requirement\n//! will usually be dictated by a restriction on a method on an\n//! [`EffectBuilder`](crate::effect::EffectBuilder).\n\npub(crate) mod binary_port;\npub(crate) mod block_accumulator;\npub(crate) mod block_synchronizer;\npub(crate) mod block_validator;\npub mod consensus;\npub mod contract_runtime;\npub(crate) mod diagnostics_port;\npub(crate) mod event_stream_server;\npub(crate) mod fetcher;\npub(crate) mod gossiper;\npub(crate) mod transaction_buffer;\n// The `in_memory_network` is public for use in doctests.\n#[cfg(test)]\npub mod in_memory_network;\npub(crate) mod metrics;\npub(crate) mod network;\npub(crate) mod rest_server;\npub(crate) mod shutdown_trigger;\npub mod storage;\npub(crate) mod sync_leaper;\npub(crate) mod transaction_acceptor;\npub(crate) mod upgrade_watcher;\n\nuse datasize::DataSize;\nuse serde::Deserialize;\nuse std::fmt::{Debug, Display};\nuse tracing::info;\n\nuse crate::{\n    effect::{EffectBuilder, Effects},\n    failpoints::FailpointActivation,\n    NodeRng,\n};\n\n#[cfg_attr(doc, aquamarine::aquamarine)]\n/// ```mermaid\n/// flowchart TD\n///     style Start fill:#66ccff,stroke:#333,stroke-width:4px\n///     style End fill:#66ccff,stroke:#333,stroke-width:4px\n///\n///     Start --> Uninitialized\n///     Uninitialized --> Initializing\n///     Initializing --> Initialized\n///     Initializing --> Fatal\n///     Initialized --> End\n///     Fatal --> End\n/// ```\n#[derive(Clone, PartialEq, Eq, DataSize, Debug, Deserialize, Default)]\npub(crate) enum ComponentState {\n    #[default]\n    Uninitialized,\n    Initializing,\n    Initialized,\n    Fatal(String),\n}\n\n/// Core Component.\n///\n/// Every component process a set of events it defines itself\n/// Its inputs are `Event`s, allowing it to perform work whenever an event is received, outputting\n/// `Effect`s each time it is called.\n///\n/// # Error and halting states\n///\n/// Components in general are expected to be able to handle every input (`Event`) in every state.\n/// Invalid inputs are supposed to be discarded, and the machine is expected to recover from any\n/// recoverable error states by itself.\n///\n/// If a fatal error occurs that is not recoverable, the reactor should be notified instead.\n///\n/// # Component events and reactor events\n///\n/// Each component has two events related to it: An associated `Event` and a reactor event (`REv`).\n/// The `Event` type indicates what type of event a component accepts, these are typically event\n/// types specific to the component.\n///\n/// Components place restrictions on reactor events (`REv`s), indicating what kind of effects they\n/// need to be able to produce to operate.\npub(crate) trait Component<REv> {\n    /// Event associated with `Component`.\n    ///\n    /// The event type that is handled by the component.\n    type Event;\n\n    /// Name of the component.\n    fn name(&self) -> &str;\n\n    /// Activate/deactivate a failpoint.\n    fn activate_failpoint(&mut self, _activation: &FailpointActivation) {\n        // Default is to ignore failpoints.\n    }\n\n    /// Processes an event, outputting zero or more effects.\n    ///\n    /// This function must not ever perform any blocking or CPU intensive work, as it is expected\n    /// to return very quickly -- it will usually be called from an `async` function context.\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event>;\n}\n\npub(crate) trait InitializedComponent<REv>: Component<REv> {\n    fn state(&self) -> &ComponentState;\n\n    fn is_uninitialized(&self) -> bool {\n        self.state() == &ComponentState::Uninitialized\n    }\n\n    fn is_fatal(&self) -> bool {\n        matches!(self.state(), ComponentState::Fatal(_))\n    }\n\n    fn start_initialization(&mut self) {\n        if self.is_uninitialized() {\n            self.set_state(ComponentState::Initializing);\n        } else {\n            info!(name = self.name(), \"component must be uninitialized\");\n        }\n    }\n\n    fn set_state(&mut self, new_state: ComponentState);\n}\n\npub(crate) trait PortBoundComponent<REv>: InitializedComponent<REv> {\n    type Error: Display + Debug;\n    type ComponentEvent;\n\n    fn bind(\n        &mut self,\n        enabled: bool,\n        effect_builder: EffectBuilder<REv>,\n    ) -> (Effects<Self::ComponentEvent>, ComponentState) {\n        if !enabled {\n            return (Effects::new(), ComponentState::Initialized);\n        }\n\n        match self.listen(effect_builder) {\n            Ok(effects) => (effects, ComponentState::Initialized),\n            Err(error) => (Effects::new(), ComponentState::Fatal(format!(\"{}\", error))),\n        }\n    }\n\n    fn listen(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n    ) -> Result<Effects<Self::ComponentEvent>, Self::Error>;\n}\n\npub(crate) trait ValidatorBoundComponent<REv>: Component<REv> {\n    fn handle_validators(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        rng: &mut NodeRng,\n    ) -> Effects<Self::Event>;\n}\n"
  },
  {
    "path": "node/src/config_migration.rs",
    "content": "use thiserror::Error;\n\nuse crate::{reactor::main_reactor::Config, utils::WithDir};\n\n// This will be changed in favour of an actual old config type when the migration is not a no-op.\ntype OldConfig = Config;\n\n/// Error returned as a result of migrating the config file.\n#[derive(Debug, Error)]\npub enum Error {}\n\n/// Migrates values from the old config file to the new one, modifying the new config file on-disk.\n///\n/// This should be executed after a new version is available, but before the casper-node has been\n/// run in validator mode using the new version.\npub(crate) fn migrate_config(\n    _old_config: WithDir<OldConfig>,\n    _new_config: WithDir<Config>,\n) -> Result<(), Error> {\n    Ok(())\n}\n"
  },
  {
    "path": "node/src/data_migration.rs",
    "content": "use std::{env, fs, io, path::PathBuf, sync::Arc};\n\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tracing::info;\n\nuse casper_types::{\n    crypto, Chainspec, ChainspecRawBytes, Digest, ProtocolVersion, PublicKey, SecretKey, Signature,\n};\n\nuse crate::{\n    reactor::main_reactor::Config,\n    utils::{\n        chain_specification::error::Error as LoadChainspecError, LoadError, Loadable, WithDir,\n    },\n};\n\n/// The name of the file for recording the new global state hash after a data migration.\nconst POST_MIGRATION_STATE_HASH_FILENAME: &str = \"post-migration-state-hash\";\n/// The folder under which the post-migration-state-hash file is written.\nconst CONFIG_ROOT_DIR: &str = \"/etc/casper\";\n/// Environment variable to override the config root dir.\nconst CONFIG_ROOT_DIR_OVERRIDE: &str = \"CASPER_CONFIG_DIR\";\n\n/// Error returned as a result of migrating data.\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    /// Error serializing state hash info.\n    #[error(\"error serializing state hash info: {0}\")]\n    SerializeStateHashInfo(bincode::Error),\n\n    /// Error deserializing state hash info.\n    #[error(\"error deserializing state hash info: {0}\")]\n    DeserializeStateHashInfo(bincode::Error),\n\n    /// Error writing state hash info file.\n    #[error(\"error writing state hash info to {path}: {error}\")]\n    WriteStateHashInfo {\n        /// The file path.\n        path: String,\n        /// The IO error.\n        error: io::Error,\n    },\n\n    /// Error reading state hash info file.\n    #[error(\"error reading state hash info from {path}: {error}\")]\n    ReadStateHashInfo {\n        /// The file path.\n        path: String,\n        /// The IO error.\n        error: io::Error,\n    },\n\n    /// Invalid signature of state hash and version.\n    #[error(\"invalid signature of state hash info\")]\n    InvalidSignatureOfStateHashInfo,\n\n    /// Error loading the secret key.\n    #[error(\"error loading secret key: {0}\")]\n    LoadSecretKey(LoadError<crypto::ErrorExt>),\n\n    /// Error loading the chainspec.\n    #[error(\"error loading chainspec: {0}\")]\n    LoadChainspec(LoadChainspecError),\n}\n\n#[derive(Serialize, Deserialize)]\nstruct PostMigrationInfo {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct SignedPostMigrationInfo {\n    serialized_info: Vec<u8>,\n    signature: Signature,\n}\n\n/// Reads in the root hash of the global state after a previous run of data migration.\n///\n/// Returns `Ok(None)` if there is no saved file or if it doesn't contain the same version as\n/// `protocol_version`.  Returns `Ok(Some)` if the file can be read and it contains the same version\n/// as `protocol_version`.  Otherwise returns an error.\n// TODO - remove once used.\n#[allow(unused)]\npub(crate) fn read_post_migration_info(\n    protocol_version: ProtocolVersion,\n    public_key: &PublicKey,\n) -> Result<Option<Digest>, Error> {\n    do_read_post_migration_info(protocol_version, public_key, info_path())\n}\n\n// TODO - remove once used.\n#[allow(unused)]\nfn do_read_post_migration_info(\n    protocol_version: ProtocolVersion,\n    public_key: &PublicKey,\n    path: PathBuf,\n) -> Result<Option<Digest>, Error> {\n    // If the file doesn't exist, return `Ok(None)`.\n    if !path.is_file() {\n        return Ok(None);\n    }\n\n    // Read the signed info.\n    let serialized_signed_info = fs::read(&path).map_err(|error| Error::ReadStateHashInfo {\n        path: path.display().to_string(),\n        error,\n    })?;\n    let signed_info: SignedPostMigrationInfo =\n        bincode::deserialize(&serialized_signed_info).map_err(Error::DeserializeStateHashInfo)?;\n\n    // Validate the signature.\n    crypto::verify(\n        &signed_info.serialized_info,\n        &signed_info.signature,\n        public_key,\n    )\n    .map_err(|_| Error::InvalidSignatureOfStateHashInfo)?;\n\n    // Deserialize the info.\n    let info: PostMigrationInfo = bincode::deserialize(&signed_info.serialized_info)\n        .map_err(Error::DeserializeStateHashInfo)?;\n\n    if info.protocol_version == protocol_version {\n        Ok(Some(info.state_hash))\n    } else {\n        Ok(None)\n    }\n}\n\n/// Writes the root hash of the global state and the new protocol version after data migration has\n/// completed.\n///\n/// This must be called after a data migration in order to allow the node to read in the new root\n/// state on restart.\nfn write_post_migration_info(\n    state_hash: Digest,\n    new_protocol_version: ProtocolVersion,\n    secret_key: &SecretKey,\n    path: PathBuf,\n) -> Result<(), Error> {\n    // Serialize the info.\n    let info = PostMigrationInfo {\n        state_hash,\n        protocol_version: new_protocol_version,\n    };\n    let serialized_info = bincode::serialize(&info).map_err(Error::SerializeStateHashInfo)?;\n\n    // Sign the info.\n    let public_key = PublicKey::from(secret_key);\n    let signature = crypto::sign(&serialized_info, secret_key, &public_key);\n    let signed_info = SignedPostMigrationInfo {\n        serialized_info,\n        signature,\n    };\n\n    // Write the signed info to disk.\n    let serialized_signed_info =\n        bincode::serialize(&signed_info).map_err(Error::SerializeStateHashInfo)?;\n    fs::write(&path, serialized_signed_info).map_err(|error| Error::WriteStateHashInfo {\n        path: path.display().to_string(),\n        error,\n    })?;\n\n    info!(path=%path.display(), \"wrote post-migration state hash\");\n    Ok(())\n}\n\nfn info_path() -> PathBuf {\n    PathBuf::from(\n        env::var(CONFIG_ROOT_DIR_OVERRIDE).unwrap_or_else(|_| CONFIG_ROOT_DIR.to_string()),\n    )\n    .join(POST_MIGRATION_STATE_HASH_FILENAME)\n}\n\n/// Migrates data from that specified in the old config file to that specified in the new one.\npub(crate) fn migrate_data(\n    _old_config: WithDir<toml::Value>,\n    new_config: WithDir<Config>,\n) -> Result<(), Error> {\n    let (new_root, new_config) = new_config.into_parts();\n    let new_protocol_version = <(Chainspec, ChainspecRawBytes)>::from_path(&new_root)\n        .map_err(Error::LoadChainspec)?\n        .0\n        .protocol_config\n        .version;\n    let secret_key: Arc<SecretKey> = new_config\n        .consensus\n        .secret_key_path\n        .load(&new_root)\n        .map_err(Error::LoadSecretKey)?;\n\n    // Get this by actually migrating the global state data.\n    let state_hash = Digest::default();\n\n    if state_hash != Digest::default() {\n        write_post_migration_info(state_hash, new_protocol_version, &secret_key, info_path())?;\n    }\n\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use super::*;\n\n    #[test]\n    fn should_write_then_read_info() {\n        let tempdir = tempfile::tempdir().unwrap();\n        let info_path = tempdir.path().join(POST_MIGRATION_STATE_HASH_FILENAME);\n\n        let mut rng = crate::new_rng();\n        let state_hash = Digest::hash([rng.gen()]);\n        let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen());\n        let secret_key = SecretKey::random(&mut rng);\n\n        write_post_migration_info(state_hash, protocol_version, &secret_key, info_path.clone())\n            .unwrap();\n\n        let public_key = PublicKey::from(&secret_key);\n        let maybe_hash =\n            do_read_post_migration_info(protocol_version, &public_key, info_path).unwrap();\n        assert_eq!(maybe_hash, Some(state_hash));\n    }\n\n    #[test]\n    fn should_return_none_after_reading_info() {\n        let tempdir = tempfile::tempdir().unwrap();\n        let info_path = tempdir.path().join(POST_MIGRATION_STATE_HASH_FILENAME);\n\n        // Should return `None` if there is no info file.\n        let protocol_version = ProtocolVersion::from_parts(1, 2, 3);\n        let mut rng = crate::new_rng();\n        let secret_key = SecretKey::random(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        let maybe_hash =\n            do_read_post_migration_info(protocol_version, &public_key, info_path.clone()).unwrap();\n        assert!(maybe_hash.is_none());\n\n        // Create the info file and check we can read it.\n        let state_hash = Digest::hash([rng.gen()]);\n        write_post_migration_info(state_hash, protocol_version, &secret_key, info_path.clone())\n            .unwrap();\n        assert!(\n            do_read_post_migration_info(protocol_version, &public_key, info_path.clone())\n                .unwrap()\n                .is_some()\n        );\n\n        // Should return `None` for a version different to that requested.\n        let different_version = ProtocolVersion::from_parts(1, 2, 4);\n        let maybe_hash =\n            do_read_post_migration_info(different_version, &public_key, info_path).unwrap();\n        assert!(maybe_hash.is_none());\n    }\n\n    #[test]\n    fn should_fail_to_read_invalid_info() {\n        let tempdir = tempfile::tempdir().unwrap();\n        let info_path = tempdir.path().join(POST_MIGRATION_STATE_HASH_FILENAME);\n\n        // Should return `Err` if the file can't be parsed.\n        fs::write(&info_path, \"bad value\".as_bytes()).unwrap();\n        let protocol_version = ProtocolVersion::from_parts(1, 2, 3);\n        let mut rng = crate::new_rng();\n        let secret_key = SecretKey::random(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        assert!(\n            do_read_post_migration_info(protocol_version, &public_key, info_path.clone()).is_err()\n        );\n\n        // Should return `Err` if the signature is invalid.\n        let other_secret_key = SecretKey::random(&mut rng);\n        let state_hash = Digest::hash([rng.gen()]);\n        write_post_migration_info(\n            state_hash,\n            protocol_version,\n            &other_secret_key,\n            info_path.clone(),\n        )\n        .unwrap();\n        assert!(do_read_post_migration_info(protocol_version, &public_key, info_path).is_err());\n    }\n}\n"
  },
  {
    "path": "node/src/effect/announcements.rs",
    "content": "//! Announcement effects.\n//!\n//! Announcements indicate new incoming data or events from various sources. See the top-level\n//! module documentation for details.\n\nuse std::{\n    collections::BTreeMap,\n    fmt::{self, Debug, Display, Formatter},\n    fs::File,\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse serde::Serialize;\n\nuse casper_types::{\n    execution::Effects, Block, EraId, FinalitySignature, FinalitySignatureV2, NextUpgrade,\n    PublicKey, Timestamp, Transaction, TransactionHash, U512,\n};\n\nuse crate::{\n    components::{\n        consensus::{ClContext, ProposedBlock},\n        diagnostics_port::FileSerializer,\n        fetcher::FetchItem,\n        gossiper::GossipItem,\n        network::blocklist::BlocklistJustification,\n    },\n    effect::Responder,\n    failpoints::FailpointActivation,\n    types::{FinalizedBlock, MetaBlock, NodeId},\n    utils::Source,\n};\n\n/// Control announcements are special announcements handled directly by the runtime/runner.\n///\n/// Reactors are never passed control announcements back in and every reactor event must be able to\n/// be constructed from a `ControlAnnouncement` to be run.\n///\n/// Control announcements also use a priority queue to ensure that a component that reports a fatal\n/// error is given as few follow-up events as possible. However, there currently is no guarantee\n/// that this happens.\n#[derive(Serialize)]\n#[must_use]\npub(crate) enum ControlAnnouncement {\n    /// A shutdown has been requested by the user.\n    ShutdownDueToUserRequest,\n\n    /// The node should shut down with exit code 0 in readiness for the next binary to start.\n    ShutdownForUpgrade,\n\n    /// The node started in catch up and shutdown mode has caught up to tip and can now exit.\n    ShutdownAfterCatchingUp,\n\n    /// The component has encountered a fatal error and cannot continue.\n    ///\n    /// This usually triggers a shutdown of the application.\n    FatalError {\n        file: &'static str,\n        line: u32,\n        msg: String,\n    },\n    /// An external event queue dump has been requested.\n    QueueDumpRequest {\n        /// The format to dump the queue in.\n        #[serde(skip)]\n        dump_format: QueueDumpFormat,\n        /// Responder called when the dump has been finished.\n        finished: Responder<()>,\n    },\n    /// Activates/deactivates a failpoint.\n    ActivateFailpoint {\n        /// The failpoint activation to process.\n        activation: FailpointActivation,\n    },\n}\n\nimpl Debug for ControlAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ControlAnnouncement::ShutdownDueToUserRequest => write!(f, \"ShutdownDueToUserRequest\"),\n            ControlAnnouncement::ShutdownForUpgrade => write!(f, \"ShutdownForUpgrade\"),\n            ControlAnnouncement::ShutdownAfterCatchingUp => write!(f, \"ShutdownAfterCatchingUp\"),\n            ControlAnnouncement::FatalError { file, line, msg } => f\n                .debug_struct(\"FatalError\")\n                .field(\"file\", file)\n                .field(\"line\", line)\n                .field(\"msg\", msg)\n                .finish(),\n            ControlAnnouncement::QueueDumpRequest { .. } => {\n                f.debug_struct(\"QueueDump\").finish_non_exhaustive()\n            }\n            ControlAnnouncement::ActivateFailpoint { activation } => f\n                .debug_struct(\"ActivateFailpoint\")\n                .field(\"activation\", activation)\n                .finish(),\n        }\n    }\n}\n\nimpl Display for ControlAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ControlAnnouncement::ShutdownDueToUserRequest => {\n                write!(f, \"shutdown due to user request\")\n            }\n            ControlAnnouncement::ShutdownForUpgrade => write!(f, \"shutdown for upgrade\"),\n            ControlAnnouncement::ShutdownAfterCatchingUp => write!(f, \"shutdown after catching up\"),\n            ControlAnnouncement::FatalError { file, line, msg } => {\n                write!(f, \"fatal error [{}:{}]: {}\", file, line, msg)\n            }\n            ControlAnnouncement::QueueDumpRequest { .. } => {\n                write!(f, \"dump event queue\")\n            }\n            ControlAnnouncement::ActivateFailpoint { activation } => {\n                write!(f, \"failpoint activation: {}\", activation)\n            }\n        }\n    }\n}\n\n/// A component has encountered a fatal error and cannot continue.\n///\n/// This usually triggers a shutdown of the application.\n#[derive(Serialize, Debug)]\n#[must_use]\npub(crate) struct FatalAnnouncement {\n    pub(crate) file: &'static str,\n    pub(crate) line: u32,\n    pub(crate) msg: String,\n}\n\nimpl Display for FatalAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"fatal error [{}:{}]: {}\", self.file, self.line, self.msg)\n    }\n}\n\n#[derive(DataSize, Serialize, Debug)]\npub(crate) struct MetaBlockAnnouncement(pub(crate) MetaBlock);\n\nimpl Display for MetaBlockAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"announcement for meta block {} at height {}\",\n            self.0.hash(),\n            self.0.height(),\n        )\n    }\n}\n\n#[derive(DataSize, Serialize, Debug)]\npub(crate) struct UnexecutedBlockAnnouncement(pub(crate) u64);\n\nimpl Display for UnexecutedBlockAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"announcement for unexecuted finalized block at height {}\",\n            self.0,\n        )\n    }\n}\n\n/// Queue dump format with handler.\n#[derive(Serialize)]\npub(crate) enum QueueDumpFormat {\n    /// Dump using given serde serializer.\n    Serde(#[serde(skip)] FileSerializer),\n    /// Dump writing debug output to file.\n    Debug(#[serde(skip)] File),\n}\n\nimpl QueueDumpFormat {\n    /// Creates a new queue dump serde format.\n    pub(crate) fn serde(serializer: FileSerializer) -> Self {\n        QueueDumpFormat::Serde(serializer)\n    }\n\n    /// Creates a new queue dump debug format.\n    pub(crate) fn debug(file: File) -> Self {\n        QueueDumpFormat::Debug(file)\n    }\n}\n\n/// A `TransactionAcceptor` announcement.\n#[derive(Debug, Serialize)]\npub(crate) enum TransactionAcceptorAnnouncement {\n    /// A transaction which wasn't previously stored on this node has been accepted and stored.\n    AcceptedNewTransaction {\n        /// The new transaction.\n        transaction: Arc<Transaction>,\n        /// The source (peer or client) of the transaction.\n        source: Source,\n    },\n\n    /// An invalid transaction was received.\n    InvalidTransaction {\n        /// The invalid transaction.\n        transaction: Transaction,\n        /// The source (peer or client) of the transaction.\n        source: Source,\n    },\n}\n\nimpl Display for TransactionAcceptorAnnouncement {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                transaction,\n                source,\n            } => write!(\n                formatter,\n                \"accepted new transaction {} from {}\",\n                transaction.hash(),\n                source\n            ),\n            TransactionAcceptorAnnouncement::InvalidTransaction {\n                transaction,\n                source,\n            } => {\n                write!(\n                    formatter,\n                    \"invalid transaction {} from {}\",\n                    transaction.hash(),\n                    source\n                )\n            }\n        }\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) enum TransactionBufferAnnouncement {\n    /// Hashes of the transactions that expired.\n    TransactionsExpired(Vec<TransactionHash>),\n}\n\nimpl Display for TransactionBufferAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            TransactionBufferAnnouncement::TransactionsExpired(hashes) => {\n                write!(f, \"pruned hashes: {}\", hashes.iter().join(\", \"))\n            }\n        }\n    }\n}\n\n/// A consensus announcement.\n#[derive(Debug)]\npub(crate) enum ConsensusAnnouncement {\n    /// A block was proposed.\n    Proposed(Box<ProposedBlock<ClContext>>),\n    /// A block was finalized.\n    Finalized(Box<FinalizedBlock>),\n    /// An equivocation has been detected.\n    Fault {\n        /// The Id of the era in which the equivocation was detected\n        era_id: EraId,\n        /// The public key of the equivocator.\n        public_key: Box<PublicKey>,\n        /// The timestamp when the evidence of the equivocation was detected.\n        timestamp: Timestamp,\n    },\n}\n\nimpl Display for ConsensusAnnouncement {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ConsensusAnnouncement::Proposed(block) => {\n                write!(formatter, \"proposed block payload {}\", block)\n            }\n            ConsensusAnnouncement::Finalized(block) => {\n                write!(formatter, \"finalized block payload {}\", block)\n            }\n            ConsensusAnnouncement::Fault {\n                era_id,\n                public_key,\n                timestamp,\n            } => write!(\n                formatter,\n                \"Validator fault with public key: {} has been identified at time: {} in {}\",\n                public_key, timestamp, era_id,\n            ),\n        }\n    }\n}\n\n/// Notable / unexpected peer behavior has been detected by some part of the system.\n#[derive(Debug, Serialize)]\npub(crate) enum PeerBehaviorAnnouncement {\n    /// A given peer committed a blockable offense.\n    OffenseCommitted {\n        /// The peer ID of the offending node.\n        offender: Box<NodeId>,\n        /// Justification for blocking the peer.\n        justification: Box<BlocklistJustification>,\n    },\n}\n\nimpl Display for PeerBehaviorAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            PeerBehaviorAnnouncement::OffenseCommitted {\n                offender,\n                justification,\n            } => {\n                write!(f, \"peer {} committed offense: {}\", offender, justification)\n            }\n        }\n    }\n}\n\n/// A Gossiper announcement.\n#[derive(Debug)]\npub(crate) enum GossiperAnnouncement<T: GossipItem> {\n    /// A new gossip has been received, but not necessarily the full item.\n    GossipReceived { item_id: T::Id, sender: NodeId },\n\n    /// A new item has been received, where the item's ID is the complete item.\n    NewCompleteItem(T::Id),\n\n    /// A new item has been received where the item's ID is NOT the complete item.\n    NewItemBody { item: Box<T>, sender: NodeId },\n\n    /// Finished gossiping about the indicated item.\n    FinishedGossiping(T::Id),\n}\n\nimpl<T: GossipItem> Display for GossiperAnnouncement<T> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            GossiperAnnouncement::GossipReceived { item_id, sender } => {\n                write!(f, \"new gossiped item {} from sender {}\", item_id, sender)\n            }\n            GossiperAnnouncement::NewCompleteItem(item) => write!(f, \"new complete item {}\", item),\n            GossiperAnnouncement::NewItemBody { item, sender } => {\n                write!(f, \"new item body {} from {}\", item.gossip_id(), sender)\n            }\n            GossiperAnnouncement::FinishedGossiping(item_id) => {\n                write!(f, \"finished gossiping {}\", item_id)\n            }\n        }\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) struct UpgradeWatcherAnnouncement(pub(crate) Option<NextUpgrade>);\n\nimpl Display for UpgradeWatcherAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match &self.0 {\n            Some(next_upgrade) => write!(f, \"read {}\", next_upgrade),\n            None => write!(f, \"no upgrade staged\"),\n        }\n    }\n}\n\n/// A ContractRuntime announcement.\n#[derive(Debug, Serialize)]\npub(crate) enum ContractRuntimeAnnouncement {\n    /// A step was committed successfully and has altered global state.\n    CommitStepSuccess {\n        /// The era id in which the step was committed to global state.\n        era_id: EraId,\n        /// The operations and transforms committed to global state.\n        effects: Effects,\n    },\n    /// New era validators.\n    UpcomingEraValidators {\n        /// The era id in which the step was committed to global state.\n        era_that_is_ending: EraId,\n        /// The validators for the eras after the `era_that_is_ending` era.\n        upcoming_era_validators: BTreeMap<EraId, BTreeMap<PublicKey, U512>>,\n    },\n    /// New gas price for an upcoming era has been determined.\n    NextEraGasPrice {\n        /// The era id for which the gas price has been determined\n        era_id: EraId,\n        /// The gas price as determined by chain utilization.\n        next_era_gas_price: u8,\n    },\n}\n\nimpl Display for ContractRuntimeAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ContractRuntimeAnnouncement::CommitStepSuccess { era_id, .. } => {\n                write!(f, \"commit step completed for {}\", era_id)\n            }\n            ContractRuntimeAnnouncement::UpcomingEraValidators {\n                era_that_is_ending, ..\n            } => {\n                write!(\n                    f,\n                    \"upcoming era validators after current {}.\",\n                    era_that_is_ending,\n                )\n            }\n            ContractRuntimeAnnouncement::NextEraGasPrice {\n                era_id,\n                next_era_gas_price,\n            } => {\n                write!(\n                    f,\n                    \"Calculated gas price {} for era {}\",\n                    next_era_gas_price, era_id\n                )\n            }\n        }\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) enum BlockAccumulatorAnnouncement {\n    /// A finality signature which wasn't previously stored on this node has been accepted and\n    /// stored.\n    AcceptedNewFinalitySignature {\n        finality_signature: Box<FinalitySignatureV2>,\n    },\n}\n\nimpl Display for BlockAccumulatorAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BlockAccumulatorAnnouncement::AcceptedNewFinalitySignature { finality_signature } => {\n                write!(\n                    f,\n                    \"finality signature {} accepted\",\n                    finality_signature.gossip_id()\n                )\n            }\n        }\n    }\n}\n\n/// A block which wasn't previously stored on this node has been fetched and stored.\n#[derive(Debug, Serialize)]\npub(crate) struct FetchedNewBlockAnnouncement {\n    pub(crate) block: Arc<Block>,\n    pub(crate) peer: NodeId,\n}\n\nimpl Display for FetchedNewBlockAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"new block {} fetched from {}\",\n            self.block.fetch_id(),\n            self.peer\n        )\n    }\n}\n\n/// A finality signature which wasn't previously stored on this node has been fetched and stored.\n#[derive(Debug, Serialize)]\npub(crate) struct FetchedNewFinalitySignatureAnnouncement {\n    pub(crate) finality_signature: Box<FinalitySignature>,\n    pub(crate) peer: NodeId,\n}\n\nimpl Display for FetchedNewFinalitySignatureAnnouncement {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"new finality signature {} fetched from {}\",\n            self.finality_signature.fetch_id(),\n            self.peer\n        )\n    }\n}\n"
  },
  {
    "path": "node/src/effect/diagnostics_port.rs",
    "content": "use std::{\n    borrow::Cow,\n    fmt::{Debug, Display},\n};\n\nuse casper_types::EraId;\nuse datasize::DataSize;\nuse futures::Future;\nuse serde::Serialize;\n\nuse super::Responder;\nuse crate::components::consensus::EraDump;\n\n/// A request to dump the internal consensus state of a specific era.\n#[derive(DataSize, Serialize)]\npub(crate) struct DumpConsensusStateRequest {\n    /// Era to serialize.\n    ///\n    /// If not given, use active era.\n    pub(crate) era_id: Option<EraId>,\n    /// Serialization function to serialize the given era with.\n    #[data_size(skip)]\n    #[serde(skip)]\n    pub(crate) serialize: fn(&EraDump) -> Result<Vec<u8>, Cow<'static, str>>,\n    /// Responder to send the serialized representation into.\n    pub(crate) responder: Responder<Result<Vec<u8>, Cow<'static, str>>>,\n}\n\nimpl DumpConsensusStateRequest {\n    pub(crate) fn answer(\n        self,\n        value: Result<&EraDump, Cow<'static, str>>,\n    ) -> impl Future<Output = ()> {\n        let answer = match value {\n            Ok(data) => (self.serialize)(data),\n            Err(err) => Err(err),\n        };\n\n        self.responder.respond(answer)\n    }\n}\n\nimpl Display for DumpConsensusStateRequest {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"dump consensus state for \")?;\n        if let Some(ref era_id) = self.era_id {\n            Display::fmt(era_id, f)\n        } else {\n            f.write_str(\" latest era\")\n        }\n    }\n}\n\nimpl Debug for DumpConsensusStateRequest {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"DumpConsensusStateRequest\")\n            .field(\"era_id\", &self.era_id)\n            .finish_non_exhaustive()\n    }\n}\n"
  },
  {
    "path": "node/src/effect/incoming.rs",
    "content": "//! Announcements of incoming network messages.\n//!\n//! Any event suffixed -`Incoming` is usually the arrival of a specific network message.\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse casper_types::FinalitySignatureV2;\n\nuse super::AutoClosingResponder;\nuse crate::{\n    components::{consensus, fetcher::Tag, gossiper},\n    protocol::Message,\n    types::{NodeId, TrieOrChunkIdDisplay},\n};\n\n/// An envelope for an incoming message, attaching a sender address.\n#[derive(DataSize, Debug, Serialize)]\npub struct MessageIncoming<M> {\n    pub(crate) sender: NodeId,\n    pub(crate) message: Box<M>,\n}\n\nimpl<M> Display for MessageIncoming<M>\nwhere\n    M: Display,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"incoming from {}: {}\", self.sender, self.message)\n    }\n}\n\n/// An envelope for an incoming demand, attaching a sender address and responder.\n#[derive(DataSize, Debug, Serialize)]\npub struct DemandIncoming<M> {\n    /// The sender from which the demand originated.\n    pub(crate) sender: NodeId,\n    /// The wrapped demand.\n    pub(crate) request_msg: Box<M>,\n    /// Responder to send the answer down through.\n    pub(crate) auto_closing_responder: AutoClosingResponder<Message>,\n}\n\nimpl<M> Display for DemandIncoming<M>\nwhere\n    M: Display,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"demand from {}: {}\", self.sender, self.request_msg)\n    }\n}\n\n/// A new consensus message arrived.\npub(crate) type ConsensusMessageIncoming = MessageIncoming<consensus::ConsensusMessage>;\n\n/// A new message from a gossiper arrived.\npub(crate) type GossiperIncoming<T> = MessageIncoming<gossiper::Message<T>>;\n\n/// A new message requesting various objects arrived.\npub(crate) type NetRequestIncoming = MessageIncoming<NetRequest>;\n\n/// A new message responding to a request arrived.\npub(crate) type NetResponseIncoming = MessageIncoming<NetResponse>;\n\n/// A new message requesting a trie arrived.\npub(crate) type TrieRequestIncoming = MessageIncoming<TrieRequest>;\n\n/// A demand for a trie that should be answered.\npub(crate) type TrieDemand = DemandIncoming<TrieRequest>;\n\n/// A demand for consensus protocol data that should be answered.\npub(crate) type ConsensusDemand = DemandIncoming<consensus::ConsensusRequestMessage>;\n\n/// A new message responding to a trie request arrived.\npub(crate) type TrieResponseIncoming = MessageIncoming<TrieResponse>;\n\n/// A new finality signature arrived over the network.\npub(crate) type FinalitySignatureIncoming = MessageIncoming<FinalitySignatureV2>;\n\n/// A request for an object out of storage arrived.\n///\n/// Note: The variants here are grouped under a common enum, since they are usually handled by the\n///       same component. If this changes, split up this type (see `TrieRequestIncoming` for an\n///       example).\n#[derive(DataSize, Debug, Serialize)]\n#[repr(u8)]\npub(crate) enum NetRequest {\n    Transaction(Vec<u8>),\n    LegacyDeploy(Vec<u8>),\n    Block(Vec<u8>),\n    BlockHeader(Vec<u8>),\n    FinalitySignature(Vec<u8>),\n    SyncLeap(Vec<u8>),\n    ApprovalsHashes(Vec<u8>),\n    BlockExecutionResults(Vec<u8>),\n}\n\nimpl Display for NetRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            NetRequest::Transaction(_) => f.write_str(\"request for transaction\"),\n            NetRequest::LegacyDeploy(_) => f.write_str(\"request for legacy deploy\"),\n            NetRequest::Block(_) => f.write_str(\"request for block\"),\n            NetRequest::BlockHeader(_) => f.write_str(\"request for block header\"),\n            NetRequest::FinalitySignature(_) => {\n                f.write_str(\"request for gossiped finality signature\")\n            }\n            NetRequest::SyncLeap(_) => f.write_str(\"request for sync leap\"),\n            NetRequest::ApprovalsHashes(_) => f.write_str(\"request for approvals hashes\"),\n            NetRequest::BlockExecutionResults(_) => {\n                f.write_str(\"request for block execution results\")\n            }\n        }\n    }\n}\n\nimpl NetRequest {\n    /// Returns a unique identifier of the requested object.\n    pub(crate) fn unique_id(&self) -> Vec<u8> {\n        let id = match self {\n            NetRequest::Transaction(ref id)\n            | NetRequest::LegacyDeploy(ref id)\n            | NetRequest::Block(ref id)\n            | NetRequest::BlockHeader(ref id)\n            | NetRequest::FinalitySignature(ref id)\n            | NetRequest::SyncLeap(ref id)\n            | NetRequest::ApprovalsHashes(ref id)\n            | NetRequest::BlockExecutionResults(ref id) => id,\n        };\n        let mut unique_id = Vec::with_capacity(id.len() + 1);\n        unique_id.push(self.tag() as u8);\n        unique_id.extend(id);\n\n        unique_id\n    }\n\n    /// Returns the tag associated with the request.\n    pub(crate) fn tag(&self) -> Tag {\n        match self {\n            NetRequest::Transaction(_) => Tag::Transaction,\n            NetRequest::LegacyDeploy(_) => Tag::LegacyDeploy,\n            NetRequest::Block(_) => Tag::Block,\n            NetRequest::BlockHeader(_) => Tag::BlockHeader,\n            NetRequest::FinalitySignature(_) => Tag::FinalitySignature,\n            NetRequest::SyncLeap(_) => Tag::SyncLeap,\n            NetRequest::ApprovalsHashes(_) => Tag::ApprovalsHashes,\n            NetRequest::BlockExecutionResults(_) => Tag::BlockExecutionResults,\n        }\n    }\n}\n\n/// A response for a net request.\n///\n/// See `NetRequest` for notes.\n#[derive(Debug, Serialize)]\npub(crate) enum NetResponse {\n    Transaction(Arc<[u8]>),\n    LegacyDeploy(Arc<[u8]>),\n    Block(Arc<[u8]>),\n    BlockHeader(Arc<[u8]>),\n    FinalitySignature(Arc<[u8]>),\n    SyncLeap(Arc<[u8]>),\n    ApprovalsHashes(Arc<[u8]>),\n    BlockExecutionResults(Arc<[u8]>),\n}\n\n// `NetResponse` uses `Arcs`, so we count all data as 0.\nimpl DataSize for NetResponse {\n    const IS_DYNAMIC: bool = false;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    fn estimate_heap_size(&self) -> usize {\n        0\n    }\n}\n\nimpl Display for NetResponse {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            NetResponse::Transaction(_) => f.write_str(\"response, transaction\"),\n            NetResponse::LegacyDeploy(_) => f.write_str(\"response, legacy deploy\"),\n            NetResponse::Block(_) => f.write_str(\"response, block\"),\n            NetResponse::BlockHeader(_) => f.write_str(\"response, block header\"),\n            NetResponse::FinalitySignature(_) => f.write_str(\"response, finality signature\"),\n            NetResponse::SyncLeap(_) => f.write_str(\"response for sync leap\"),\n            NetResponse::ApprovalsHashes(_) => f.write_str(\"response for approvals hashes\"),\n            NetResponse::BlockExecutionResults(_) => {\n                f.write_str(\"response for block execution results\")\n            }\n        }\n    }\n}\n\n/// A request for a trie.\n#[derive(DataSize, Debug, Serialize)]\npub(crate) struct TrieRequest(pub(crate) Vec<u8>);\n\nimpl Display for TrieRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"request for trie {}\", TrieOrChunkIdDisplay(&self.0))\n    }\n}\n\n/// A response to a request for a trie.\n#[derive(DataSize, Debug, Serialize)]\npub(crate) struct TrieResponse(pub(crate) Vec<u8>);\n\nimpl Display for TrieResponse {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.write_str(\"response, trie\")\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::NetRequest;\n\n    #[test]\n    fn unique_id_is_unique_across_variants() {\n        let inner_id = b\"example\".to_vec();\n\n        let a = NetRequest::Transaction(inner_id.clone());\n        let b = NetRequest::Block(inner_id);\n\n        assert_ne!(a.unique_id(), b.unique_id());\n    }\n}\n"
  },
  {
    "path": "node/src/effect/requests.rs",
    "content": "//! Request effects.\n//!\n//! Requests typically ask other components to perform a service and report back the result. See the\n//! top-level module documentation for details.\n\nuse std::{\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet},\n    fmt::{self, Display, Formatter},\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse serde::Serialize;\nuse smallvec::SmallVec;\nuse static_assertions::const_assert;\n\nuse casper_binary_port::{\n    ConsensusStatus, ConsensusValidatorChanges, LastProgress, NetworkName, RecordId, Uptime,\n};\nuse casper_storage::{\n    block_store::types::ApprovalsHashes,\n    data_access_layer::{\n        prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult},\n        tagged_values::{TaggedValuesRequest, TaggedValuesResult},\n        AddressableEntityResult, BalanceRequest, BalanceResult, EntryPointExistsResult,\n        EraValidatorsRequest, EraValidatorsResult, ExecutionResultsChecksumResult, PutTrieRequest,\n        PutTrieResult, QueryRequest, QueryResult, SeigniorageRecipientsRequest,\n        SeigniorageRecipientsResult, TrieRequest, TrieResult,\n    },\n    DbRawBytesSpec,\n};\nuse casper_types::{\n    execution::ExecutionResult, Approval, AvailableBlockRange, Block, BlockHash, BlockHeader,\n    BlockSignatures, BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest,\n    DisplayIter, EntityAddr, EraId, ExecutionInfo, FinalitySignature, FinalitySignatureId,\n    HashAddr, NextUpgrade, ProtocolUpgradeConfig, PublicKey, TimeDiff, Timestamp, Transaction,\n    TransactionHash, TransactionId, Transfer,\n};\n\nuse super::{AutoClosingResponder, GossipTarget, Responder};\nuse crate::{\n    components::{\n        block_synchronizer::{\n            GlobalStateSynchronizerError, GlobalStateSynchronizerResponse, TrieAccumulatorError,\n            TrieAccumulatorResponse,\n        },\n        consensus::{ClContext, ProposedBlock},\n        contract_runtime::SpeculativeExecutionResult,\n        diagnostics_port::StopAtSpec,\n        fetcher::{FetchItem, FetchResult},\n        gossiper::GossipItem,\n        network::NetworkInsights,\n        transaction_acceptor,\n    },\n    contract_runtime::ExecutionPreState,\n    reactor::main_reactor::ReactorState,\n    types::{\n        appendable_block::AppendableBlock, BlockExecutionResultsOrChunk,\n        BlockExecutionResultsOrChunkId, BlockWithMetadata, ExecutableBlock, InvalidProposalError,\n        LegacyDeploy, MetaBlockState, NodeId, StatusFeed, TransactionHeader,\n    },\n    utils::Source,\n};\n\nconst _STORAGE_REQUEST_SIZE: usize = size_of::<StorageRequest>();\nconst_assert!(_STORAGE_REQUEST_SIZE < 129);\n\n/// A metrics request.\n#[derive(Debug)]\npub(crate) enum MetricsRequest {\n    /// Render current node metrics as prometheus-formatted string.\n    RenderNodeMetricsText {\n        /// Responder returning the rendered metrics or `None`, if an internal error occurred.\n        responder: Responder<Option<String>>,\n    },\n}\n\nimpl Display for MetricsRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            MetricsRequest::RenderNodeMetricsText { .. } => write!(formatter, \"get metrics text\"),\n        }\n    }\n}\n\nconst _NETWORK_EVENT_SIZE: usize = size_of::<NetworkRequest<String>>();\nconst_assert!(_NETWORK_EVENT_SIZE < 105);\n\n/// A networking request.\n#[derive(Debug, Serialize)]\n#[must_use]\npub(crate) enum NetworkRequest<P> {\n    /// Send a message on the network to a specific peer.\n    SendMessage {\n        /// Message destination.\n        dest: Box<NodeId>,\n        /// Message payload.\n        payload: Box<P>,\n        /// If `true`, the responder will be called early after the message has been queued, not\n        /// waiting until it has passed to the kernel.\n        respond_after_queueing: bool,\n        /// Responder to be called when the message has been *buffered for sending*.\n        #[serde(skip_serializing)]\n        auto_closing_responder: AutoClosingResponder<()>,\n    },\n    /// Send a message on the network to validator peers in the given era.\n    ValidatorBroadcast {\n        /// Message payload.\n        payload: Box<P>,\n        /// Era whose validators are recipients.\n        era_id: EraId,\n        /// Responder to be called when all messages are queued.\n        #[serde(skip_serializing)]\n        auto_closing_responder: AutoClosingResponder<()>,\n    },\n    /// Gossip a message to a random subset of peers.\n    Gossip {\n        /// Payload to gossip.\n        payload: Box<P>,\n        /// Type of peers that should receive the gossip message.\n        gossip_target: GossipTarget,\n        /// Number of peers to gossip to. This is an upper bound, otherwise best-effort.\n        count: usize,\n        /// Node IDs of nodes to exclude from gossiping to.\n        #[serde(skip_serializing)]\n        exclude: HashSet<NodeId>,\n        /// Responder to be called when all messages are queued.\n        #[serde(skip_serializing)]\n        auto_closing_responder: AutoClosingResponder<HashSet<NodeId>>,\n    },\n}\n\nimpl<P> NetworkRequest<P> {\n    /// Transform a network request by mapping the contained payload.\n    ///\n    /// This is a replacement for a `From` conversion that is not possible without specialization.\n    pub(crate) fn map_payload<F, P2>(self, wrap_payload: F) -> NetworkRequest<P2>\n    where\n        F: FnOnce(P) -> P2,\n    {\n        match self {\n            NetworkRequest::SendMessage {\n                dest,\n                payload,\n                respond_after_queueing,\n                auto_closing_responder,\n            } => NetworkRequest::SendMessage {\n                dest,\n                payload: Box::new(wrap_payload(*payload)),\n                respond_after_queueing,\n                auto_closing_responder,\n            },\n            NetworkRequest::ValidatorBroadcast {\n                payload,\n                era_id,\n                auto_closing_responder,\n            } => NetworkRequest::ValidatorBroadcast {\n                payload: Box::new(wrap_payload(*payload)),\n                era_id,\n                auto_closing_responder,\n            },\n            NetworkRequest::Gossip {\n                payload,\n                gossip_target,\n                count,\n                exclude,\n                auto_closing_responder,\n            } => NetworkRequest::Gossip {\n                payload: Box::new(wrap_payload(*payload)),\n                gossip_target,\n                count,\n                exclude,\n                auto_closing_responder,\n            },\n        }\n    }\n}\n\nimpl<P> Display for NetworkRequest<P>\nwhere\n    P: Display,\n{\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            NetworkRequest::SendMessage { dest, payload, .. } => {\n                write!(formatter, \"send to {}: {}\", dest, payload)\n            }\n            NetworkRequest::ValidatorBroadcast { payload, .. } => {\n                write!(formatter, \"broadcast: {}\", payload)\n            }\n            NetworkRequest::Gossip { payload, .. } => write!(formatter, \"gossip: {}\", payload),\n        }\n    }\n}\n\n/// A networking info request.\n#[derive(Debug, Serialize)]\npub(crate) enum NetworkInfoRequest {\n    /// Get incoming and outgoing peers.\n    Peers {\n        /// Responder to be called with all connected peers.\n        /// Responds with a map from [NodeId]s to a socket address, represented as a string.\n        responder: Responder<BTreeMap<NodeId, String>>,\n    },\n    /// Get up to `count` fully-connected peers in random order.\n    FullyConnectedPeers {\n        count: usize,\n        /// Responder to be called with the peers.\n        responder: Responder<Vec<NodeId>>,\n    },\n    /// Get up to `count` fully-connected validators in random order.\n    FullyConnectedValidators {\n        count: usize,\n        /// era_id in which the filtered peer needs to be a validator.\n        era_id: EraId,\n        /// Responder to be called with the peers.\n        responder: Responder<Vec<NodeId>>,\n    },\n    /// Get detailed insights into the nodes networking.\n    Insight {\n        responder: Responder<NetworkInsights>,\n    },\n}\n\nimpl Display for NetworkInfoRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            NetworkInfoRequest::Peers { responder: _ } => {\n                formatter.write_str(\"get peers-to-socket-address map\")\n            }\n            NetworkInfoRequest::FullyConnectedPeers {\n                count,\n                responder: _,\n            } => {\n                write!(formatter, \"get up to {} fully connected peers\", count)\n            }\n            NetworkInfoRequest::FullyConnectedValidators {\n                count,\n                era_id,\n                responder: _,\n            } => {\n                write!(\n                    formatter,\n                    \"get up to {} fully connected validators in era {}\",\n                    count, era_id\n                )\n            }\n            NetworkInfoRequest::Insight { responder: _ } => {\n                formatter.write_str(\"get networking insights\")\n            }\n        }\n    }\n}\n\n/// A gossip request.\n///\n/// This request usually initiates gossiping process of the specified item. Note that the gossiper\n/// will fetch the item itself, so only the ID is needed.\n///\n/// The responder will be called as soon as the gossiper has initiated the process.\n// Note: This request should eventually entirely replace `ItemReceived`.\n#[derive(Debug, Serialize)]\n#[must_use]\npub(crate) struct BeginGossipRequest<T>\nwhere\n    T: GossipItem,\n{\n    pub(crate) item_id: T::Id,\n    pub(crate) source: Source,\n    pub(crate) target: GossipTarget,\n    pub(crate) responder: Responder<()>,\n}\n\nimpl<T> Display for BeginGossipRequest<T>\nwhere\n    T: GossipItem,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"begin gossip of {} from {}\", self.item_id, self.source)\n    }\n}\n\n#[derive(Debug, Serialize)]\n/// A storage request.\npub(crate) enum StorageRequest {\n    /// Store given block.\n    PutBlock {\n        /// Block to be stored.\n        block: Arc<Block>,\n        /// Responder to call with the result.  Returns true if the block was stored on this\n        /// attempt or false if it was previously stored.\n        responder: Responder<bool>,\n    },\n    /// Store the approvals hashes.\n    PutApprovalsHashes {\n        /// Approvals hashes to store.\n        approvals_hashes: Box<ApprovalsHashes>,\n        responder: Responder<bool>,\n    },\n    /// Store the block and approvals hashes.\n    PutExecutedBlock {\n        /// Block to be stored.\n        block: Arc<BlockV2>,\n        /// Approvals hashes to store.\n        approvals_hashes: Box<ApprovalsHashes>,\n        execution_results: HashMap<TransactionHash, ExecutionResult>,\n        responder: Responder<bool>,\n    },\n    /// Retrieve block with given hash.\n    GetBlock {\n        /// Hash of block to be retrieved.\n        block_hash: BlockHash,\n        /// Responder to call with the result.  Returns `None` if the block doesn't exist in local\n        /// storage.\n        responder: Responder<Option<Block>>,\n    },\n    IsBlockStored {\n        block_hash: BlockHash,\n        responder: Responder<bool>,\n    },\n    /// Retrieve the approvals hashes.\n    GetApprovalsHashes {\n        /// Hash of the block for which to retrieve approvals hashes.\n        block_hash: BlockHash,\n        /// Responder to call with the result.  Returns `None` if the approvals hashes don't exist\n        /// in local storage.\n        responder: Responder<Option<ApprovalsHashes>>,\n    },\n    /// Retrieve the highest complete block.\n    GetHighestCompleteBlock {\n        /// Responder.\n        responder: Responder<Option<Block>>,\n    },\n    /// Retrieve the highest complete block header.\n    GetHighestCompleteBlockHeader {\n        /// Responder.\n        responder: Responder<Option<BlockHeader>>,\n    },\n    /// Retrieve the era IDs of the blocks in which the given transactions were executed.\n    GetTransactionsEraIds {\n        transaction_hashes: HashSet<TransactionHash>,\n        responder: Responder<HashSet<EraId>>,\n    },\n    /// Retrieve block header with given hash.\n    GetBlockHeader {\n        /// Hash of block to get header of.\n        block_hash: BlockHash,\n        /// If true, only return `Some` if the block is in the available block range, i.e. the\n        /// highest contiguous range of complete blocks.\n        only_from_available_block_range: bool,\n        /// Responder to call with the result.  Returns `None` if the block header doesn't exist in\n        /// local storage.\n        responder: Responder<Option<BlockHeader>>,\n    },\n    /// Retrieve block header with given hash.\n    GetRawData {\n        /// Which record to get.\n        record_id: RecordId,\n        /// bytesrepr serialized key.\n        key: Vec<u8>,\n        /// Responder to call with the result.  Returns `None` if the data doesn't exist in\n        /// local storage.\n        responder: Responder<Option<DbRawBytesSpec>>,\n    },\n    GetBlockHeaderByHeight {\n        /// Height of block to get header of.\n        block_height: u64,\n        /// If true, only return `Some` if the block is in the available block range, i.e. the\n        /// highest contiguous range of complete blocks.\n        only_from_available_block_range: bool,\n        /// Responder to call with the result.  Returns `None` if the block header doesn't exist in\n        /// local storage.\n        responder: Responder<Option<BlockHeader>>,\n    },\n    GetLatestSwitchBlockHeader {\n        responder: Responder<Option<BlockHeader>>,\n    },\n    GetSwitchBlockHeaderByEra {\n        /// Era ID for which to get the block header.\n        era_id: EraId,\n        /// Responder to call with the result.\n        responder: Responder<Option<BlockHeader>>,\n    },\n    /// Retrieve all transfers in a block with given hash.\n    GetBlockTransfers {\n        /// Hash of block to get transfers of.\n        block_hash: BlockHash,\n        /// Responder to call with the result.  Returns `None` if the transfers do not exist in\n        /// local storage under the block_hash provided.\n        responder: Responder<Option<Vec<Transfer>>>,\n    },\n    PutTransaction {\n        transaction: Arc<Transaction>,\n        /// Returns `true` if the transaction was stored on this attempt or false if it was\n        /// previously stored.\n        responder: Responder<bool>,\n    },\n    /// Retrieve transaction with given hashes.\n    GetTransactions {\n        transaction_hashes: Vec<TransactionHash>,\n        #[allow(clippy::type_complexity)]\n        responder: Responder<SmallVec<[Option<(Transaction, Option<BTreeSet<Approval>>)>; 1]>>,\n    },\n    /// Retrieve legacy deploy with given hash.\n    GetLegacyDeploy {\n        deploy_hash: DeployHash,\n        responder: Responder<Option<LegacyDeploy>>,\n    },\n    GetTransaction {\n        transaction_id: TransactionId,\n        responder: Responder<Option<Transaction>>,\n    },\n    IsTransactionStored {\n        transaction_id: TransactionId,\n        responder: Responder<bool>,\n    },\n    GetTransactionAndExecutionInfo {\n        transaction_hash: TransactionHash,\n        with_finalized_approvals: bool,\n        responder: Responder<Option<(Transaction, Option<ExecutionInfo>)>>,\n    },\n    /// Store execution results for a set of transactions of a single block.\n    ///\n    /// Will return a fatal error if there are already execution results known for a specific\n    /// transaction/block combination and a different result is inserted.\n    ///\n    /// Inserting the same transaction/block combination multiple times with the same execution\n    /// results is not an error and will silently be ignored.\n    PutExecutionResults {\n        /// Hash of block.\n        block_hash: Box<BlockHash>,\n        block_height: u64,\n        era_id: EraId,\n        /// Mapping of transactions to execution results of the block.\n        execution_results: HashMap<TransactionHash, ExecutionResult>,\n        /// Responder to call when done storing.\n        responder: Responder<()>,\n    },\n    GetExecutionResults {\n        block_hash: BlockHash,\n        responder: Responder<Option<Vec<(TransactionHash, TransactionHeader, ExecutionResult)>>>,\n    },\n    GetBlockExecutionResultsOrChunk {\n        /// Request ID.\n        id: BlockExecutionResultsOrChunkId,\n        /// Responder to call with the execution results.\n        /// None is returned when we don't have the block in the storage.\n        responder: Responder<Option<BlockExecutionResultsOrChunk>>,\n    },\n    /// Retrieve a finality signature by block hash and public key.\n    GetFinalitySignature {\n        id: Box<FinalitySignatureId>,\n        responder: Responder<Option<FinalitySignature>>,\n    },\n    IsFinalitySignatureStored {\n        id: Box<FinalitySignatureId>,\n        responder: Responder<bool>,\n    },\n    /// Retrieve block and its metadata at a given height.\n    GetBlockAndMetadataByHeight {\n        /// The height of the block.\n        block_height: BlockHeight,\n        /// Flag indicating whether storage should check the block availability before trying to\n        /// retrieve it.\n        only_from_available_block_range: bool,\n        /// The responder to call with the results.\n        responder: Responder<Option<BlockWithMetadata>>,\n    },\n    /// Get a single finality signature for a block hash.\n    GetBlockSignature {\n        /// The hash for the request.\n        block_hash: BlockHash,\n        /// The public key of the signer.\n        public_key: Box<PublicKey>,\n        /// Responder to call with the result.\n        responder: Responder<Option<FinalitySignature>>,\n    },\n    /// Store finality signatures.\n    PutBlockSignatures {\n        /// Signatures that are to be stored.\n        signatures: BlockSignatures,\n        /// Responder to call with the result, if true then the signatures were successfully\n        /// stored.\n        responder: Responder<bool>,\n    },\n    PutFinalitySignature {\n        signature: Box<FinalitySignature>,\n        responder: Responder<bool>,\n    },\n    /// Store a block header.\n    PutBlockHeader {\n        /// Block header that is to be stored.\n        block_header: Box<BlockHeader>,\n        /// Responder to call with the result, if true then the block header was successfully\n        /// stored.\n        responder: Responder<bool>,\n    },\n    /// Retrieve the height range of fully available blocks (not just block headers). Returns\n    /// `[u64::MAX, u64::MAX]` when there are no sequences.\n    GetAvailableBlockRange {\n        /// Responder to call with the result.\n        responder: Responder<AvailableBlockRange>,\n    },\n    /// Store a set of finalized approvals for a specific transaction.\n    StoreFinalizedApprovals {\n        /// The transaction hash to store the finalized approvals for.\n        transaction_hash: TransactionHash,\n        /// The set of finalized approvals.\n        finalized_approvals: BTreeSet<Approval>,\n        /// Responder, responded to once the approvals are written.  If true, new approvals were\n        /// written.\n        responder: Responder<bool>,\n    },\n    /// Retrieve the height of the final block of the previous protocol version, if known.\n    GetKeyBlockHeightForActivationPoint { responder: Responder<Option<u64>> },\n    /// Retrieve the block utilization score.\n    GetBlockUtilizationScore {\n        /// The era id.\n        era_id: EraId,\n        /// The block height of the switch block\n        block_height: u64,\n        /// The utilization within the switch block.\n        switch_block_utilization: u64,\n        /// Responder, responded once the utilization for the era has been determined.\n        responder: Responder<Option<(u64, u64)>>,\n    },\n}\n\nimpl Display for StorageRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            StorageRequest::PutBlock { block, .. } => {\n                write!(formatter, \"put {}\", block)\n            }\n            StorageRequest::PutApprovalsHashes {\n                approvals_hashes, ..\n            } => {\n                write!(formatter, \"put {}\", approvals_hashes)\n            }\n            StorageRequest::GetBlock { block_hash, .. } => {\n                write!(formatter, \"get block {}\", block_hash)\n            }\n            StorageRequest::IsBlockStored { block_hash, .. } => {\n                write!(formatter, \"is block {} stored\", block_hash)\n            }\n            StorageRequest::GetApprovalsHashes { block_hash, .. } => {\n                write!(formatter, \"get approvals hashes {}\", block_hash)\n            }\n            StorageRequest::GetHighestCompleteBlock { .. } => {\n                write!(formatter, \"get highest complete block\")\n            }\n            StorageRequest::GetHighestCompleteBlockHeader { .. } => {\n                write!(formatter, \"get highest complete block header\")\n            }\n            StorageRequest::GetTransactionsEraIds {\n                transaction_hashes, ..\n            } => {\n                write!(\n                    formatter,\n                    \"get era ids for {} transactions\",\n                    transaction_hashes.len()\n                )\n            }\n            StorageRequest::GetBlockHeader { block_hash, .. } => {\n                write!(formatter, \"get {}\", block_hash)\n            }\n            StorageRequest::GetBlockHeaderByHeight { block_height, .. } => {\n                write!(formatter, \"get header for height {}\", block_height)\n            }\n            StorageRequest::GetLatestSwitchBlockHeader { .. } => {\n                write!(formatter, \"get latest switch block header\")\n            }\n            StorageRequest::GetSwitchBlockHeaderByEra { era_id, .. } => {\n                write!(formatter, \"get header for era {}\", era_id)\n            }\n            StorageRequest::GetBlockTransfers { block_hash, .. } => {\n                write!(formatter, \"get transfers for {}\", block_hash)\n            }\n            StorageRequest::PutTransaction { transaction, .. } => {\n                write!(formatter, \"put {}\", transaction)\n            }\n            StorageRequest::GetTransactions {\n                transaction_hashes, ..\n            } => {\n                write!(\n                    formatter,\n                    \"get {}\",\n                    DisplayIter::new(transaction_hashes.iter())\n                )\n            }\n            StorageRequest::GetLegacyDeploy { deploy_hash, .. } => {\n                write!(formatter, \"get legacy deploy {}\", deploy_hash)\n            }\n            StorageRequest::GetTransaction { transaction_id, .. } => {\n                write!(formatter, \"get transaction {}\", transaction_id)\n            }\n            StorageRequest::GetTransactionAndExecutionInfo {\n                transaction_hash, ..\n            } => {\n                write!(\n                    formatter,\n                    \"get transaction and exec info {}\",\n                    transaction_hash\n                )\n            }\n            StorageRequest::IsTransactionStored { transaction_id, .. } => {\n                write!(formatter, \"is transaction {} stored\", transaction_id)\n            }\n            StorageRequest::PutExecutionResults { block_hash, .. } => {\n                write!(formatter, \"put execution results for {}\", block_hash)\n            }\n            StorageRequest::GetExecutionResults { block_hash, .. } => {\n                write!(formatter, \"get execution results for {}\", block_hash)\n            }\n            StorageRequest::GetBlockExecutionResultsOrChunk { id, .. } => {\n                write!(formatter, \"get block execution results or chunk for {}\", id)\n            }\n            StorageRequest::GetFinalitySignature { id, .. } => {\n                write!(formatter, \"get finality signature {}\", id)\n            }\n            StorageRequest::IsFinalitySignatureStored { id, .. } => {\n                write!(formatter, \"is finality signature {} stored\", id)\n            }\n            StorageRequest::GetBlockAndMetadataByHeight { block_height, .. } => {\n                write!(\n                    formatter,\n                    \"get block and metadata for block at height: {}\",\n                    block_height\n                )\n            }\n            StorageRequest::GetBlockSignature {\n                block_hash,\n                public_key,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"get finality signature for block hash {} from {}\",\n                    block_hash, public_key\n                )\n            }\n            StorageRequest::PutBlockSignatures { .. } => {\n                write!(formatter, \"put finality signatures\")\n            }\n            StorageRequest::PutFinalitySignature { .. } => {\n                write!(formatter, \"put finality signature\")\n            }\n            StorageRequest::PutBlockHeader { block_header, .. } => {\n                write!(formatter, \"put block header: {}\", block_header)\n            }\n            StorageRequest::GetAvailableBlockRange { .. } => {\n                write!(formatter, \"get available block range\",)\n            }\n            StorageRequest::StoreFinalizedApprovals {\n                transaction_hash, ..\n            } => {\n                write!(\n                    formatter,\n                    \"finalized approvals for transaction {}\",\n                    transaction_hash\n                )\n            }\n            StorageRequest::PutExecutedBlock { block, .. } => {\n                write!(formatter, \"put executed block {}\", block.hash(),)\n            }\n            StorageRequest::GetKeyBlockHeightForActivationPoint { .. } => {\n                write!(\n                    formatter,\n                    \"get key block height for current activation point\"\n                )\n            }\n            StorageRequest::GetRawData {\n                key,\n                responder: _responder,\n                record_id,\n            } => {\n                write!(formatter, \"get raw data {}::{:?}\", record_id, key)\n            }\n            StorageRequest::GetBlockUtilizationScore { era_id, .. } => {\n                write!(formatter, \"get utilization score for era {}\", era_id)\n            }\n        }\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) struct MakeBlockExecutableRequest {\n    /// Hash of the block to be made executable.\n    pub block_hash: BlockHash,\n    /// Responder with the executable block and it's transactions\n    pub responder: Responder<Option<ExecutableBlock>>,\n}\n\nimpl Display for MakeBlockExecutableRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"block made executable: {}\", self.block_hash)\n    }\n}\n\n/// A request to mark a block at a specific height completed.\n///\n/// A block is considered complete if\n///\n/// * the block header and the actual block are persisted in storage,\n/// * all of its transactions are persisted in storage, and\n/// * the global state root the block refers to has no missing dependencies locally.\n#[derive(Debug, Serialize)]\npub(crate) struct MarkBlockCompletedRequest {\n    pub block_height: u64,\n    /// Responds `true` if the block was not previously marked complete.\n    pub responder: Responder<bool>,\n}\n\nimpl Display for MarkBlockCompletedRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"block completed: height {}\", self.block_height)\n    }\n}\n\n#[derive(DataSize, Debug, Serialize)]\npub(crate) enum TransactionBufferRequest {\n    GetAppendableBlock {\n        timestamp: Timestamp,\n        era_id: EraId,\n        request_expiry: Timestamp,\n        responder: Responder<AppendableBlock>,\n    },\n}\n\nimpl Display for TransactionBufferRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            TransactionBufferRequest::GetAppendableBlock {\n                timestamp,\n                era_id,\n                request_expiry,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"request for appendable block at instant {} for era {} (expires at {})\",\n                    timestamp, era_id, request_expiry\n                )\n            }\n        }\n    }\n}\n\n/// Abstract REST request.\n///\n/// An REST request is an abstract request that does not concern itself with serialization or\n/// transport.\n#[derive(Debug)]\n#[must_use]\npub(crate) enum RestRequest {\n    /// Return string formatted status or `None` if an error occurred.\n    Status {\n        /// Responder to call with the result.\n        responder: Responder<StatusFeed>,\n    },\n    /// Return string formatted, prometheus compatible metrics or `None` if an error occurred.\n    Metrics {\n        /// Responder to call with the result.\n        responder: Responder<Option<String>>,\n    },\n}\n\nimpl Display for RestRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            RestRequest::Status { .. } => write!(formatter, \"get status\"),\n            RestRequest::Metrics { .. } => write!(formatter, \"get metrics\"),\n        }\n    }\n}\n\n/// A contract runtime request.\n#[derive(Debug, Serialize)]\n#[must_use]\npub(crate) enum ContractRuntimeRequest {\n    /// A request to enqueue a `ExecutableBlock` for execution.\n    EnqueueBlockForExecution {\n        /// A `ExecutableBlock` to enqueue.\n        executable_block: ExecutableBlock,\n        /// The key block height for the current protocol version's activation point.\n        key_block_height_for_activation_point: u64,\n        meta_block_state: MetaBlockState,\n    },\n    /// A query request.\n    Query {\n        /// Query request.\n        #[serde(skip_serializing)]\n        request: QueryRequest,\n        /// Responder to call with the query result.\n        responder: Responder<QueryResult>,\n    },\n    /// A query by prefix request.\n    QueryByPrefix {\n        /// Query by prefix request.\n        #[serde(skip_serializing)]\n        request: PrefixedValuesRequest,\n        /// Responder to call with the query result.\n        responder: Responder<PrefixedValuesResult>,\n    },\n    /// A balance request.\n    GetBalance {\n        /// Balance request.\n        #[serde(skip_serializing)]\n        request: BalanceRequest,\n        /// Responder to call with the balance result.\n        responder: Responder<BalanceResult>,\n    },\n    /// Returns validator weights.\n    GetEraValidators {\n        /// Get validators weights request.\n        #[serde(skip_serializing)]\n        request: EraValidatorsRequest,\n        /// Responder to call with the result.\n        responder: Responder<EraValidatorsResult>,\n    },\n    /// Returns the seigniorage recipients snapshot at the given state root hash.\n    GetSeigniorageRecipients {\n        /// Get seigniorage recipients request.\n        #[serde(skip_serializing)]\n        request: SeigniorageRecipientsRequest,\n        /// Responder to call with the result.\n        responder: Responder<SeigniorageRecipientsResult>,\n    },\n    /// Return all values at a given state root hash and given key tag.\n    GetTaggedValues {\n        /// Get tagged values request.\n        #[serde(skip_serializing)]\n        request: TaggedValuesRequest,\n        /// Responder to call with the result.\n        responder: Responder<TaggedValuesResult>,\n    },\n    /// Returns the value of the execution results checksum stored in the ChecksumRegistry for the\n    /// given state root hash.\n    GetExecutionResultsChecksum {\n        state_root_hash: Digest,\n        responder: Responder<ExecutionResultsChecksumResult>,\n    },\n    /// Returns an `AddressableEntity` if found under the given entity_addr.  If a legacy `Account`\n    /// or contract exists under the given key, it will be migrated to an `AddressableEntity`\n    /// and returned. However, global state is not altered and the migrated record does not\n    /// actually exist.\n    GetAddressableEntity {\n        state_root_hash: Digest,\n        entity_addr: EntityAddr,\n        responder: Responder<AddressableEntityResult>,\n    },\n    /// Returns information if an entry point exists under the given state root hash and entry\n    /// point key.\n    GetEntryPointExists {\n        state_root_hash: Digest,\n        contract_hash: HashAddr,\n        entry_point_name: String,\n        responder: Responder<EntryPointExistsResult>,\n    },\n    /// Get a trie or chunk by its ID.\n    GetTrie {\n        /// A request for a trie element.\n        #[serde(skip_serializing)]\n        request: TrieRequest,\n        /// Responder to call with the result.\n        responder: Responder<TrieResult>,\n    },\n    /// Insert a trie into global storage\n    PutTrie {\n        /// A request to persist a trie element.\n        #[serde(skip_serializing)]\n        request: PutTrieRequest,\n        /// Responder to call with the result. Contains the hash of the persisted trie.\n        responder: Responder<PutTrieResult>,\n    },\n    /// Execute transaction without committing results\n    SpeculativelyExecute {\n        /// Pre-state.\n        block_header: Box<BlockHeader>,\n        /// Transaction to execute.\n        transaction: Box<Transaction>,\n        /// Results\n        responder: Responder<SpeculativeExecutionResult>,\n    },\n    UpdateRuntimePrice(EraId, u8),\n    GetEraGasPrice {\n        era_id: EraId,\n        responder: Responder<Option<u8>>,\n    },\n    DoProtocolUpgrade {\n        protocol_upgrade_config: ProtocolUpgradeConfig,\n        next_block_height: u64,\n        parent_hash: BlockHash,\n        parent_seed: Digest,\n    },\n    UpdatePreState {\n        new_pre_state: ExecutionPreState,\n    },\n}\n\nimpl Display for ContractRuntimeRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ContractRuntimeRequest::EnqueueBlockForExecution {\n                executable_block, ..\n            } => {\n                write!(formatter, \"executable_block: {}\", executable_block)\n            }\n            ContractRuntimeRequest::Query {\n                request: query_request,\n                ..\n            } => {\n                write!(formatter, \"query request: {:?}\", query_request)\n            }\n            ContractRuntimeRequest::QueryByPrefix { request, .. } => {\n                write!(formatter, \"query by prefix request: {:?}\", request)\n            }\n            ContractRuntimeRequest::GetBalance {\n                request: balance_request,\n                ..\n            } => write!(formatter, \"balance request: {:?}\", balance_request),\n            ContractRuntimeRequest::GetEraValidators { request, .. } => {\n                write!(formatter, \"get era validators: {:?}\", request)\n            }\n            ContractRuntimeRequest::GetSeigniorageRecipients { request, .. } => {\n                write!(formatter, \"get seigniorage recipients for {:?}\", request)\n            }\n            ContractRuntimeRequest::GetTaggedValues {\n                request: get_all_values_request,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"get all values request: {:?}\",\n                    get_all_values_request\n                )\n            }\n            ContractRuntimeRequest::GetExecutionResultsChecksum {\n                state_root_hash, ..\n            } => write!(\n                formatter,\n                \"get execution results checksum under {}\",\n                state_root_hash\n            ),\n            ContractRuntimeRequest::GetAddressableEntity {\n                state_root_hash,\n                entity_addr,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"get addressable_entity {} under {}\",\n                    entity_addr, state_root_hash\n                )\n            }\n            ContractRuntimeRequest::GetTrie { request, .. } => {\n                write!(formatter, \"get trie: {:?}\", request)\n            }\n            ContractRuntimeRequest::PutTrie { request, .. } => {\n                write!(formatter, \"trie: {:?}\", request)\n            }\n            ContractRuntimeRequest::SpeculativelyExecute {\n                transaction,\n                block_header,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"Execute {} on {}\",\n                    transaction.hash(),\n                    block_header.state_root_hash()\n                )\n            }\n            ContractRuntimeRequest::UpdateRuntimePrice(_, era_gas_price) => {\n                write!(formatter, \"updating price to {}\", era_gas_price)\n            }\n            ContractRuntimeRequest::GetEraGasPrice { era_id, .. } => {\n                write!(formatter, \"Get gas price for era {}\", era_id)\n            }\n            ContractRuntimeRequest::GetEntryPointExists {\n                state_root_hash,\n                contract_hash,\n                entry_point_name,\n                ..\n            } => {\n                let formatted_contract_hash = HexFmt(contract_hash);\n                write!(\n                    formatter,\n                    \"get entry point {}-{} under {}\",\n                    formatted_contract_hash, entry_point_name, state_root_hash\n                )\n            }\n            ContractRuntimeRequest::DoProtocolUpgrade {\n                protocol_upgrade_config,\n                ..\n            } => {\n                write!(\n                    formatter,\n                    \"execute protocol upgrade against config: {:?}\",\n                    protocol_upgrade_config\n                )\n            }\n            ContractRuntimeRequest::UpdatePreState { new_pre_state } => {\n                write!(\n                    formatter,\n                    \"Updating contract runtimes execution prestate: {:?}\",\n                    new_pre_state\n                )\n            }\n        }\n    }\n}\n\n/// Fetcher related requests.\n#[derive(Debug, Serialize)]\n#[must_use]\npub(crate) struct FetcherRequest<T: FetchItem> {\n    /// The ID of the item to be retrieved.\n    pub(crate) id: T::Id,\n    /// The peer id of the peer to be asked if the item is not held locally\n    pub(crate) peer: NodeId,\n    /// Metadata used during validation of the fetched item.\n    pub(crate) validation_metadata: Box<T::ValidationMetadata>,\n    /// Responder to call with the result.\n    pub(crate) responder: Responder<FetchResult<T>>,\n}\n\nimpl<T: FetchItem> Display for FetcherRequest<T> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"request item by id {}\", self.id)\n    }\n}\n\n/// TrieAccumulator related requests.\n#[derive(Debug, Serialize, DataSize)]\n#[must_use]\npub(crate) struct TrieAccumulatorRequest {\n    /// The hash of the trie node.\n    pub(crate) hash: Digest,\n    /// The peers to try to fetch from.\n    pub(crate) peers: Vec<NodeId>,\n    /// Responder to call with the result.\n    pub(crate) responder: Responder<Result<TrieAccumulatorResponse, TrieAccumulatorError>>,\n}\n\nimpl Display for TrieAccumulatorRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"request trie by hash {}\", self.hash)\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) struct SyncGlobalStateRequest {\n    pub(crate) block_hash: BlockHash,\n    pub(crate) state_root_hash: Digest,\n    #[serde(skip)]\n    pub(crate) responder:\n        Responder<Result<GlobalStateSynchronizerResponse, GlobalStateSynchronizerError>>,\n}\n\nimpl Display for SyncGlobalStateRequest {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"request to sync global state at {}\",\n            self.block_hash\n        )\n    }\n}\n\n/// A block validator request.\n#[derive(Debug, DataSize)]\n#[must_use]\npub(crate) struct BlockValidationRequest {\n    /// The height of the proposed block in the chain.\n    pub(crate) proposed_block_height: u64,\n    /// The block to be validated.\n    pub(crate) block: ProposedBlock<ClContext>,\n    /// The sender of the block, which will be asked to provide all missing transactions.\n    pub(crate) sender: NodeId,\n    /// Responder to call with the result.\n    ///\n    /// Indicates whether validation was successful.\n    pub(crate) responder: Responder<Result<(), Box<InvalidProposalError>>>,\n}\n\nimpl Display for BlockValidationRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let BlockValidationRequest { block, sender, .. } = self;\n        write!(f, \"validate block {} from {}\", block, sender)\n    }\n}\n\ntype BlockHeight = u64;\n\n#[derive(DataSize, Debug)]\n#[must_use]\n/// Consensus component requests.\npub(crate) enum ConsensusRequest {\n    /// Request for our public key, and if we're a validator, the next round length.\n    Status(Responder<Option<ConsensusStatus>>),\n    /// Request for a list of validator status changes, by public key.\n    ValidatorChanges(Responder<ConsensusValidatorChanges>),\n}\n\n/// ChainspecLoader component requests.\n#[derive(Debug, Serialize)]\npub(crate) enum ChainspecRawBytesRequest {\n    /// Request for the chainspec file bytes with the genesis_accounts and global_state bytes, if\n    /// they are present.\n    GetChainspecRawBytes(Responder<Arc<ChainspecRawBytes>>),\n}\n\nimpl Display for ChainspecRawBytesRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ChainspecRawBytesRequest::GetChainspecRawBytes(_) => {\n                write!(f, \"get chainspec raw bytes\")\n            }\n        }\n    }\n}\n\n/// UpgradeWatcher component request to get the next scheduled upgrade, if any.\n#[derive(Debug, Serialize)]\npub(crate) struct UpgradeWatcherRequest(pub(crate) Responder<Option<NextUpgrade>>);\n\nimpl Display for UpgradeWatcherRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"get next upgrade\")\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) enum ReactorInfoRequest {\n    ReactorState { responder: Responder<ReactorState> },\n    LastProgress { responder: Responder<LastProgress> },\n    Uptime { responder: Responder<Uptime> },\n    NetworkName { responder: Responder<NetworkName> },\n    BalanceHoldsInterval { responder: Responder<TimeDiff> },\n}\n\nimpl Display for ReactorInfoRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"get reactor status: {}\",\n            match self {\n                ReactorInfoRequest::ReactorState { .. } => \"ReactorState\",\n                ReactorInfoRequest::LastProgress { .. } => \"LastProgress\",\n                ReactorInfoRequest::Uptime { .. } => \"Uptime\",\n                ReactorInfoRequest::NetworkName { .. } => \"NetworkName\",\n                ReactorInfoRequest::BalanceHoldsInterval { .. } => \"BalanceHoldsInterval\",\n            }\n        )\n    }\n}\n\n#[derive(Debug, Serialize)]\n#[allow(clippy::enum_variant_names)]\npub(crate) enum BlockAccumulatorRequest {\n    GetPeersForBlock {\n        block_hash: BlockHash,\n        responder: Responder<Option<Vec<NodeId>>>,\n    },\n}\n\nimpl Display for BlockAccumulatorRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BlockAccumulatorRequest::GetPeersForBlock { block_hash, .. } => {\n                write!(f, \"get peers for {}\", block_hash)\n            }\n        }\n    }\n}\n\n#[derive(Debug, Serialize)]\npub(crate) enum BlockSynchronizerRequest {\n    NeedNext,\n    DishonestPeers,\n    SyncGlobalStates(Vec<(BlockHash, Digest)>),\n    Status {\n        responder: Responder<BlockSynchronizerStatus>,\n    },\n}\n\nimpl Display for BlockSynchronizerRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BlockSynchronizerRequest::NeedNext => {\n                write!(f, \"block synchronizer request: need next\")\n            }\n            BlockSynchronizerRequest::DishonestPeers => {\n                write!(f, \"block synchronizer request: dishonest peers\")\n            }\n            BlockSynchronizerRequest::Status { .. } => {\n                write!(f, \"block synchronizer request: status\")\n            }\n            BlockSynchronizerRequest::SyncGlobalStates(_) => {\n                write!(f, \"request to sync global states\")\n            }\n        }\n    }\n}\n\n/// A request to set the current shutdown trigger.\n#[derive(DataSize, Debug, Serialize)]\npub(crate) struct SetNodeStopRequest {\n    /// The specific stop-at spec.\n    ///\n    /// If `None`, clears the current stop at setting.\n    pub(crate) stop_at: Option<StopAtSpec>,\n    /// Responder to send the previously set stop-at spec to, if any.\n    pub(crate) responder: Responder<Option<StopAtSpec>>,\n}\n\nimpl Display for SetNodeStopRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self.stop_at {\n            None => f.write_str(\"clear node stop\"),\n            Some(stop_at) => write!(f, \"set node stop to: {}\", stop_at),\n        }\n    }\n}\n\n/// A request to accept a new transaction.\n#[derive(DataSize, Debug, Serialize)]\npub(crate) struct AcceptTransactionRequest {\n    pub(crate) transaction: Transaction,\n    pub(crate) is_speculative: bool,\n    pub(crate) responder: Responder<Result<(), transaction_acceptor::Error>>,\n}\n\nimpl Display for AcceptTransactionRequest {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"accept transaction {} is_speculative: {}\",\n            self.transaction.hash(),\n            self.is_speculative\n        )\n    }\n}\n"
  },
  {
    "path": "node/src/effect.rs",
    "content": "//! Effects subsystem.\n//!\n//! Effects describe things that the creator of the effect intends to happen, producing a value upon\n//! completion (they actually are boxed futures).\n//!\n//! A pinned, boxed future returning an event is called an effect and typed as an `Effect<Ev>`,\n//! where `Ev` is the event's type, as every effect must have its return value either wrapped in an\n//! event through [`EffectExt::event`](EffectExt::event) or ignored using\n//! [`EffectExt::ignore`](EffectExt::ignore). As an example, the\n//! [`handle_event`](crate::components::Component::handle_event) function of a component always\n//! returns `Effect<Self::Event>`.\n//!\n//! # A primer on events\n//!\n//! There are three distinct groups of events found around the node:\n//!\n//! * (unbound) events: These events are not associated with a particular reactor or component and\n//!   represent information or requests by themselves. An example is the\n//!   [`PeerBehaviorAnnouncement`](`crate::effect::announcements::PeerBehaviorAnnouncement`), it can\n//!   be emitted through an effect by different components and contains the ID of a peer that should\n//!   be shunned. It is not associated with a particular reactor or component though.\n//!\n//!   While the node is running, these unbound events cannot exist on their own, instead they are\n//!   typically converted into a concrete reactor event by the effect builder as soon as they are\n//!   created.\n//!\n//! * reactor events: A running reactor has a single event type that encompasses all possible\n//!   unbound events that can occur during its operation and all component events of components it\n//!   is made of. Usually they are implemented as one large `enum` with only newtype-variants.\n//!\n//! * component events: Every component defines its own set of events, typically for internal use.\n//!   If the component is able to process unbound events like announcements or requests, it will\n//!   have a `From` implementation that allows converting them into a suitable component event.\n//!\n//!   Component events are also created from the return values of effects: While effects do not\n//!   return events themselves when called, their return values are turned first into component\n//!   events through the [`event`](EffectExt) method. In a second step, inside the\n//!   reactors routing code, `wrap_effect` will then convert from component to reactor event.\n//!\n//! # Using effects\n//!\n//! To create an effect, an `EffectBuilder` will be passed in by the calling reactor runner. For\n//! example, given an effect builder `effect_builder`, we can create a `set_timeout` future and turn\n//! it into an effect:\n//!\n//! ```ignore\n//! use std::time::Duration;\n//! use casper_node::effect::EffectExt;\n//!\n//! // Note: This is our \"component\" event.\n//! enum Event {\n//!     ThreeSecondsElapsed(Duration)\n//! }\n//!\n//! effect_builder\n//!     .set_timeout(Duration::from_secs(3))\n//!     .event(Event::ThreeSecondsElapsed);\n//! ```\n//!\n//! This example will produce an effect that, after three seconds, creates an\n//! `Event::ThreeSecondsElapsed`. Note that effects do nothing on their own, they need to be passed\n//! to a [`reactor`](../reactor/index.html) to be executed.\n//!\n//! # Arbitrary effects\n//!\n//! While it is technically possible to turn any future into an effect, it is in general advisable\n//! to only use the methods on [`EffectBuilder`] or short, anonymous futures to create effects.\n//!\n//! # Announcements and requests\n//!\n//! Events are usually classified into either announcements or requests, although these properties\n//! are not reflected in the type system.\n//!\n//! **Announcements** are events that are essentially \"fire-and-forget\"; the component that created\n//! the effect resulting in the creation of the announcement will never expect an \"answer\".\n//! Announcements are often dispatched to multiple components by the reactor; since that usually\n//! involves a [`clone`](`Clone::clone`), they should be kept light.\n//!\n//! A good example is the arrival of a new transaction passed in by a client. Depending on the setup\n//! it may be stored, buffered or, in certain testing setups, just discarded. None of this is a\n//! concern of the component that talks to the client and deserializes the incoming transaction\n//! though, instead it simply returns an effect that produces an announcement.\n//!\n//! **Requests** are complex events that are used when a component needs something from other\n//! components. Typically, an effect (which uses [`EffectBuilder::make_request`] in its\n//! implementation) is called resulting in the actual request being scheduled and handled. In\n//! contrast to announcements, requests must always be handled by exactly one component.\n//!\n//! Every request has a [`Responder`]-typed field, which a handler of a request calls to produce\n//! another effect that will send the return value to the original requesting component. Failing to\n//! call the [`Responder::respond`] function will result in a runtime warning.\n\npub(crate) mod announcements;\npub(crate) mod diagnostics_port;\npub(crate) mod incoming;\npub(crate) mod requests;\n\nuse std::{\n    any::type_name,\n    borrow::Cow,\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet},\n    fmt::{self, Debug, Display, Formatter},\n    future::Future,\n    mem,\n    sync::Arc,\n    time::{Duration, Instant},\n};\n\nuse datasize::DataSize;\nuse futures::{channel::oneshot, future::BoxFuture, FutureExt};\nuse once_cell::sync::Lazy;\nuse serde::{Serialize, Serializer};\nuse smallvec::{smallvec, SmallVec};\nuse tokio::{sync::Semaphore, time};\nuse tracing::{debug, error, warn};\n\nuse casper_binary_port::{\n    ConsensusStatus, ConsensusValidatorChanges, LastProgress, NetworkName, RecordId, Uptime,\n};\nuse casper_storage::{\n    block_store::types::ApprovalsHashes,\n    data_access_layer::{\n        prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult},\n        tagged_values::{TaggedValuesRequest, TaggedValuesResult},\n        AddressableEntityResult, BalanceRequest, BalanceResult, EraValidatorsRequest,\n        EraValidatorsResult, ExecutionResultsChecksumResult, PutTrieRequest, PutTrieResult,\n        QueryRequest, QueryResult, SeigniorageRecipientsRequest, SeigniorageRecipientsResult,\n        TrieRequest, TrieResult,\n    },\n    DbRawBytesSpec,\n};\nuse casper_types::{\n    execution::{Effects as ExecutionEffects, ExecutionResult},\n    Approval, AvailableBlockRange, Block, BlockHash, BlockHeader, BlockSignatures,\n    BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest, EntityAddr, EraId,\n    ExecutionInfo, FinalitySignature, FinalitySignatureId, FinalitySignatureV2, HashAddr, Key,\n    NextUpgrade, Package, PackageAddr, ProtocolUpgradeConfig, PublicKey, TimeDiff, Timestamp,\n    Transaction, TransactionHash, TransactionId, Transfer, U512,\n};\n\nuse crate::{\n    components::{\n        block_synchronizer::{\n            GlobalStateSynchronizerError, GlobalStateSynchronizerResponse, TrieAccumulatorError,\n            TrieAccumulatorResponse,\n        },\n        consensus::{ClContext, EraDump, ProposedBlock},\n        contract_runtime::SpeculativeExecutionResult,\n        diagnostics_port::StopAtSpec,\n        fetcher::{FetchItem, FetchResult},\n        gossiper::GossipItem,\n        network::{blocklist::BlocklistJustification, FromIncoming, NetworkInsights},\n        transaction_acceptor,\n    },\n    contract_runtime::ExecutionPreState,\n    failpoints::FailpointActivation,\n    reactor::{main_reactor::ReactorState, EventQueueHandle, QueueKind},\n    types::{\n        appendable_block::AppendableBlock, BlockExecutionResultsOrChunk,\n        BlockExecutionResultsOrChunkId, BlockWithMetadata, ExecutableBlock, FinalizedBlock,\n        InvalidProposalError, LegacyDeploy, MetaBlock, MetaBlockState, NodeId, TransactionHeader,\n    },\n    utils::{fmt_limit::FmtLimit, SharedFlag, Source},\n};\nuse announcements::{\n    BlockAccumulatorAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement,\n    ControlAnnouncement, FatalAnnouncement, FetchedNewBlockAnnouncement,\n    FetchedNewFinalitySignatureAnnouncement, GossiperAnnouncement, MetaBlockAnnouncement,\n    PeerBehaviorAnnouncement, QueueDumpFormat, TransactionAcceptorAnnouncement,\n    TransactionBufferAnnouncement, UnexecutedBlockAnnouncement, UpgradeWatcherAnnouncement,\n};\nuse casper_storage::data_access_layer::EntryPointExistsResult;\nuse diagnostics_port::DumpConsensusStateRequest;\nuse requests::{\n    AcceptTransactionRequest, BeginGossipRequest, BlockAccumulatorRequest,\n    BlockSynchronizerRequest, BlockValidationRequest, ChainspecRawBytesRequest, ConsensusRequest,\n    ContractRuntimeRequest, FetcherRequest, MakeBlockExecutableRequest, MarkBlockCompletedRequest,\n    MetricsRequest, NetworkInfoRequest, NetworkRequest, ReactorInfoRequest, SetNodeStopRequest,\n    StorageRequest, SyncGlobalStateRequest, TransactionBufferRequest, TrieAccumulatorRequest,\n    UpgradeWatcherRequest,\n};\n\n/// A resource that will never be available, thus trying to acquire it will wait forever.\nstatic UNOBTAINABLE: Lazy<Semaphore> = Lazy::new(|| Semaphore::new(0));\n\n/// A pinned, boxed future that produces one or more events.\npub(crate) type Effect<Ev> = BoxFuture<'static, Multiple<Ev>>;\n\n/// Multiple effects in a container.\npub(crate) type Effects<Ev> = Multiple<Effect<Ev>>;\n\n/// A small collection of rarely more than two items.\n///\n/// Stored in a `SmallVec` to avoid allocations in case there are less than three items grouped. The\n/// size of two items is chosen because one item is the most common use case, and large items are\n/// typically boxed. In the latter case two pointers and one enum variant discriminator is almost\n/// the same size as an empty vec, which is two pointers.\npub(crate) type Multiple<T> = SmallVec<[T; 2]>;\n\n/// The type of peers that should receive the gossip message.\n#[derive(Debug, Serialize, PartialEq, Eq, Hash, Copy, Clone, DataSize)]\npub(crate) enum GossipTarget {\n    /// Both validators and non validators.\n    Mixed(EraId),\n    /// All peers.\n    All,\n}\n\nimpl Display for GossipTarget {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            GossipTarget::Mixed(era_id) => write!(formatter, \"gossip target mixed for {}\", era_id),\n            GossipTarget::All => write!(formatter, \"gossip target all\"),\n        }\n    }\n}\n\n/// A responder satisfying a request.\n#[must_use]\n#[derive(DataSize)]\npub(crate) struct Responder<T> {\n    /// Sender through which the response ultimately should be sent.\n    sender: Option<oneshot::Sender<T>>,\n    /// Reactor flag indicating shutdown.\n    is_shutting_down: SharedFlag,\n}\n\n/// A responder that will automatically send a `None` on drop.\n#[must_use]\n#[derive(DataSize, Debug)]\npub(crate) struct AutoClosingResponder<T>(Responder<Option<T>>);\n\nimpl<T> AutoClosingResponder<T> {\n    /// Creates a new auto closing responder from a responder of `Option<T>`.\n    pub(crate) fn from_opt_responder(responder: Responder<Option<T>>) -> Self {\n        AutoClosingResponder(responder)\n    }\n\n    /// Extracts the inner responder.\n    fn into_inner(mut self) -> Responder<Option<T>> {\n        let is_shutting_down = self.0.is_shutting_down;\n        mem::replace(\n            &mut self.0,\n            Responder {\n                sender: None,\n                is_shutting_down,\n            },\n        )\n    }\n}\n\nimpl<T: Debug> AutoClosingResponder<T> {\n    /// Send `Some(data)` to the origin of the request.\n    pub(crate) async fn respond(self, data: T) {\n        self.into_inner().respond(Some(data)).await;\n    }\n\n    /// Send `None` to the origin of the request.\n    pub(crate) async fn respond_none(self) {\n        self.into_inner().respond(None).await;\n    }\n}\n\nimpl<T> Drop for AutoClosingResponder<T> {\n    fn drop(&mut self) {\n        if let Some(sender) = self.0.sender.take() {\n            debug!(\n                sending_value = %self.0,\n                \"responding None by dropping auto-close responder\"\n            );\n            // We still haven't answered, send an answer.\n            if let Err(_unsent_value) = sender.send(None) {\n                debug!(\n                    unsent_value = %self.0,\n                    \"failed to auto-close responder, ignoring\"\n                );\n            }\n        }\n    }\n}\n\nimpl<T: 'static + Send> Responder<T> {\n    /// Creates a new `Responder`.\n    #[inline]\n    fn new(sender: oneshot::Sender<T>, is_shutting_down: SharedFlag) -> Self {\n        Responder {\n            sender: Some(sender),\n            is_shutting_down,\n        }\n    }\n\n    /// Helper method for tests.\n    ///\n    /// Allows creating a responder manually, without observing the shutdown flag. This function\n    /// should not be used, unless you are writing alternative infrastructure, e.g. for tests.\n    #[cfg(test)]\n    #[inline]\n    pub(crate) fn without_shutdown(sender: oneshot::Sender<T>) -> Self {\n        Responder::new(sender, SharedFlag::global_shared())\n    }\n}\n\nimpl<T: Debug> Responder<T> {\n    /// Send `data` to the origin of the request.\n    pub(crate) async fn respond(mut self, data: T) {\n        if let Some(sender) = self.sender.take() {\n            if let Err(data) = sender.send(data) {\n                // If we cannot send a response down the channel, it means the original requester is\n                // no longer interested in our response. This typically happens during shutdowns, or\n                // in cases where an originating external request has been cancelled.\n\n                debug!(\n                    data=?FmtLimit::new(1000, &data),\n                    \"ignored failure to send response to request down oneshot channel\"\n                );\n            }\n        } else {\n            error!(\n                data=?FmtLimit::new(1000, &data),\n                \"tried to send a value down a responder channel, but it was already used\"\n            );\n        }\n    }\n}\n\nimpl<T> Debug for Responder<T> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"Responder<{}>\", type_name::<T>(),)\n    }\n}\n\nimpl<T> Display for Responder<T> {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"responder({})\", type_name::<T>(),)\n    }\n}\n\nimpl<T> Drop for Responder<T> {\n    fn drop(&mut self) {\n        if self.sender.is_some() {\n            if self.is_shutting_down.is_set() {\n                debug!(\n                    responder=?self,\n                    \"ignored dropping of responder during shutdown\"\n                );\n            } else {\n                // This is usually a very serious error, as another component will now be stuck.\n                //\n                // See the code `make_request` for more details.\n                error!(\n                    responder=?self,\n                    \"dropped without being responded to outside of shutdown\"\n                );\n            }\n        }\n    }\n}\n\nimpl<T> Serialize for Responder<T> {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        serializer.serialize_str(&format!(\"{:?}\", self))\n    }\n}\n\nimpl<T> Serialize for AutoClosingResponder<T> {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        self.0.serialize(serializer)\n    }\n}\n\n/// Effect extension for futures, used to convert futures into actual effects.\npub(crate) trait EffectExt: Future + Send {\n    /// Finalizes a future into an effect that returns a single event.\n    ///\n    /// The function `f` is used to translate the returned value from an effect into an event.\n    fn event<U, F>(self, f: F) -> Effects<U>\n    where\n        F: FnOnce(Self::Output) -> U + 'static + Send,\n        U: 'static,\n        Self: Sized;\n\n    /// Finalizes a future into an effect that runs but drops the result.\n    fn ignore<Ev>(self) -> Effects<Ev>;\n}\n\n/// Effect extension for futures, used to convert futures returning a `Result` into two different\n/// effects.\npub(crate) trait EffectResultExt {\n    /// The type the future will return if `Ok`.\n    type Value;\n    /// The type the future will return if `Err`.\n    type Error;\n\n    /// Finalizes a future returning a `Result` into two different effects.\n    ///\n    /// The function `f_ok` is used to translate the returned value from an effect into an event,\n    /// while the function `f_err` does the same for a potential error.\n    fn result<U, F, G>(self, f_ok: F, f_err: G) -> Effects<U>\n    where\n        F: FnOnce(Self::Value) -> U + 'static + Send,\n        G: FnOnce(Self::Error) -> U + 'static + Send,\n        U: 'static;\n}\n\nimpl<T> EffectExt for T\nwhere\n    T: Future + Send + 'static + Sized,\n{\n    fn event<U, F>(self, f: F) -> Effects<U>\n    where\n        F: FnOnce(Self::Output) -> U + 'static + Send,\n        U: 'static,\n    {\n        smallvec![self.map(f).map(|item| smallvec![item]).boxed()]\n    }\n\n    fn ignore<Ev>(self) -> Effects<Ev> {\n        smallvec![self.map(|_| Multiple::new()).boxed()]\n    }\n}\n\nimpl<T, V, E> EffectResultExt for T\nwhere\n    T: Future<Output = Result<V, E>> + Send + 'static + Sized,\n{\n    type Value = V;\n    type Error = E;\n\n    fn result<U, F, G>(self, f_ok: F, f_err: G) -> Effects<U>\n    where\n        F: FnOnce(V) -> U + 'static + Send,\n        G: FnOnce(E) -> U + 'static + Send,\n        U: 'static,\n    {\n        smallvec![self\n            .map(|result| result.map_or_else(f_err, f_ok))\n            .map(|item| smallvec![item])\n            .boxed()]\n    }\n}\n\n/// A builder for [`Effect`](type.Effect.html)s.\n///\n/// Provides methods allowing the creation of effects which need to be scheduled on the reactor's\n/// event queue, without giving direct access to this queue.\n///\n/// The `REv` type parameter indicates which reactor event effects created by this builder will\n/// produce as side effects.\n#[derive(Debug)]\npub(crate) struct EffectBuilder<REv: 'static> {\n    /// A handle to the referenced event queue.\n    event_queue: EventQueueHandle<REv>,\n}\n\n// Implement `Clone` and `Copy` manually, as `derive` will make it depend on `REv` otherwise.\nimpl<REv> Clone for EffectBuilder<REv> {\n    fn clone(&self) -> Self {\n        *self\n    }\n}\n\nimpl<REv> Copy for EffectBuilder<REv> {}\n\nimpl<REv> EffectBuilder<REv> {\n    /// Creates a new effect builder.\n    pub(crate) fn new(event_queue: EventQueueHandle<REv>) -> Self {\n        EffectBuilder { event_queue }\n    }\n\n    /// Extract the event queue handle out of the effect builder.\n    pub(crate) fn into_inner(self) -> EventQueueHandle<REv> {\n        self.event_queue\n    }\n\n    /// Performs a request.\n    ///\n    /// Given a request `Q`, that when completed will yield a result of `T`, produces a future that\n    /// will\n    ///\n    /// 1. create an event to send the request to the respective component (thus `Q: Into<REv>`),\n    /// 2. wait for a response and return it.\n    ///\n    /// This function is usually only used internally by effects implemented on the effects builder,\n    /// but IO components may also make use of it.\n    ///\n    /// # Cancellation safety\n    ///\n    /// This future is cancellation safe: If it is dropped without being polled, it indicates\n    /// that the original requester is no longer interested in the result, which will be discarded.\n    pub(crate) async fn make_request<T, Q, F>(self, f: F, queue_kind: QueueKind) -> T\n    where\n        T: Send + 'static,\n        Q: Into<REv>,\n        F: FnOnce(Responder<T>) -> Q,\n    {\n        let (event, wait_future) = self.create_request_parts(f);\n\n        // Schedule the request before awaiting the response.\n        self.event_queue.schedule(event, queue_kind).await;\n        wait_future.await\n    }\n\n    /// Creates the part necessary to make a request.\n    ///\n    /// A request usually consists of two parts: The request event that needs to be scheduled on the\n    /// reactor queue and associated future that allows waiting for the response. This function\n    /// creates both of them without processing or spawning either.\n    ///\n    /// Usually you will want to call the higher level `make_request` function.\n    pub(crate) fn create_request_parts<T, Q, F>(self, f: F) -> (REv, impl Future<Output = T>)\n    where\n        T: Send + 'static,\n        Q: Into<REv>,\n        F: FnOnce(Responder<T>) -> Q,\n    {\n        // Prepare a channel.\n        let (sender, receiver) = oneshot::channel();\n\n        // Create response function.\n        let responder = Responder::new(sender, self.event_queue.shutdown_flag());\n\n        // Now inject the request event into the event loop.\n        let request_event = f(responder).into();\n\n        let fut = async move {\n            match receiver.await {\n                Ok(value) => value,\n                Err(err) => {\n                    // The channel should usually not be closed except during shutdowns, as it\n                    // indicates a panic or disappearance of the remote that is\n                    // supposed to process the request.\n                    //\n                    // If it does happen, we pretend nothing happened instead of crashing.\n                    if self.event_queue.shutdown_flag().is_set() {\n                        debug!(%err, channel=?type_name::<T>(), \"ignoring closed channel due to shutdown\");\n                    } else {\n                        error!(%err, channel=?type_name::<T>(), \"request for channel closed, this may be a bug? \\\n                            check if a component is stuck from now on\");\n                    }\n\n                    // We cannot produce any value to satisfy the request, so we just abandon this\n                    // task by waiting on a resource we can never acquire.\n                    let _ = UNOBTAINABLE.acquire().await;\n                    panic!(\"should never obtain unobtainable semaphore\");\n                }\n            }\n        };\n\n        (request_event, fut)\n    }\n\n    /// Run and end effect immediately.\n    ///\n    /// Can be used to trigger events from effects when combined with `.event`. Do not use this to\n    /// \"do nothing\", as it will still cause a task to be spawned.\n    #[inline(always)]\n    #[allow(clippy::manual_async_fn)]\n    pub(crate) fn immediately(self) -> impl Future<Output = ()> + Send {\n        // Note: This function is implemented manually without `async` sugar because the `Send`\n        // inference seems to not work in all cases otherwise.\n        async {}\n    }\n\n    /// Reports a fatal error.  Normally called via the `crate::fatal!()` macro.\n    ///\n    /// Usually causes the node to cease operations quickly and exit/crash.\n    pub(crate) async fn fatal(self, file: &'static str, line: u32, msg: String)\n    where\n        REv: From<FatalAnnouncement>,\n    {\n        self.event_queue\n            .schedule(FatalAnnouncement { file, line, msg }, QueueKind::Control)\n            .await;\n    }\n\n    /// Sets a timeout.\n    pub(crate) async fn set_timeout(self, timeout: Duration) -> Duration {\n        let then = Instant::now();\n        time::sleep(timeout).await;\n        then.elapsed()\n    }\n\n    /// Retrieve a snapshot of the nodes current metrics formatted as string.\n    ///\n    /// If an error occurred producing the metrics, `None` is returned.\n    pub(crate) async fn get_metrics(self) -> Option<String>\n    where\n        REv: From<MetricsRequest>,\n    {\n        self.make_request(\n            |responder| MetricsRequest::RenderNodeMetricsText { responder },\n            QueueKind::Api,\n        )\n        .await\n    }\n\n    /// Sends a network message.\n    ///\n    /// The message is queued and sent, but no delivery guaranteed. Will return after the message\n    /// has been buffered in the outgoing kernel buffer and thus is subject to backpressure.\n    pub(crate) async fn send_message<P>(self, dest: NodeId, payload: P)\n    where\n        REv: From<NetworkRequest<P>>,\n    {\n        self.make_request(\n            |responder| NetworkRequest::SendMessage {\n                dest: Box::new(dest),\n                payload: Box::new(payload),\n                respond_after_queueing: false,\n                auto_closing_responder: AutoClosingResponder::from_opt_responder(responder),\n            },\n            QueueKind::Network,\n        )\n        .await;\n    }\n\n    /// Enqueues a network message.\n    ///\n    /// The message is queued in \"fire-and-forget\" fashion, there is no guarantee that the peer\n    /// will receive it. Returns as soon as the message is queued inside the networking component.\n    pub(crate) async fn enqueue_message<P>(self, dest: NodeId, payload: P)\n    where\n        REv: From<NetworkRequest<P>>,\n    {\n        self.make_request(\n            |responder| NetworkRequest::SendMessage {\n                dest: Box::new(dest),\n                payload: Box::new(payload),\n                respond_after_queueing: true,\n                auto_closing_responder: AutoClosingResponder::from_opt_responder(responder),\n            },\n            QueueKind::Network,\n        )\n        .await;\n    }\n\n    /// Broadcasts a network message to validator peers in the given era.\n    pub(crate) async fn broadcast_message_to_validators<P>(self, payload: P, era_id: EraId)\n    where\n        REv: From<NetworkRequest<P>>,\n    {\n        self.make_request(\n            |responder| {\n                debug!(\"validator broadcast for {}\", era_id);\n                NetworkRequest::ValidatorBroadcast {\n                    payload: Box::new(payload),\n                    era_id,\n                    auto_closing_responder: AutoClosingResponder::from_opt_responder(responder),\n                }\n            },\n            QueueKind::Network,\n        )\n        .await;\n    }\n\n    /// Gossips a network message.\n    ///\n    /// A low-level \"gossip\" function, selects `count` randomly chosen nodes on the network,\n    /// excluding the indicated ones, and sends each a copy of the message.\n    ///\n    /// Returns the IDs of the chosen nodes.\n    pub(crate) async fn gossip_message<P>(\n        self,\n        payload: P,\n        gossip_target: GossipTarget,\n        count: usize,\n        exclude: HashSet<NodeId>,\n    ) -> HashSet<NodeId>\n    where\n        REv: From<NetworkRequest<P>>,\n        P: Send,\n    {\n        self.make_request(\n            |responder| NetworkRequest::Gossip {\n                payload: Box::new(payload),\n                gossip_target,\n                count,\n                exclude,\n                auto_closing_responder: AutoClosingResponder::from_opt_responder(responder),\n            },\n            QueueKind::Network,\n        )\n        .await\n        .unwrap_or_default()\n    }\n\n    /// Gets a structure describing the current network status.\n    pub(crate) async fn get_network_insights(self) -> NetworkInsights\n    where\n        REv: From<NetworkInfoRequest>,\n    {\n        self.make_request(\n            |responder| NetworkInfoRequest::Insight { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    /// Gets a map of the current network peers to their socket addresses.\n    pub(crate) async fn network_peers(self) -> BTreeMap<NodeId, String>\n    where\n        REv: From<NetworkInfoRequest>,\n    {\n        self.make_request(\n            |responder| NetworkInfoRequest::Peers { responder },\n            QueueKind::Api,\n        )\n        .await\n    }\n\n    /// Gets up to `count` fully-connected network peers in random order.\n    pub async fn get_fully_connected_peers(self, count: usize) -> Vec<NodeId>\n    where\n        REv: From<NetworkInfoRequest>,\n    {\n        self.make_request(\n            |responder| NetworkInfoRequest::FullyConnectedPeers { count, responder },\n            QueueKind::NetworkInfo,\n        )\n        .await\n    }\n\n    /// Gets up to `count` fully-connected network validators in random order.\n    pub async fn get_fully_connected_validators(self, count: usize, era_id: EraId) -> Vec<NodeId>\n    where\n        REv: From<NetworkInfoRequest>,\n    {\n        self.make_request(\n            |responder| NetworkInfoRequest::FullyConnectedValidators {\n                count,\n                era_id,\n                responder,\n            },\n            QueueKind::NetworkInfo,\n        )\n        .await\n    }\n\n    /// Announces which transactions have expired.\n    pub(crate) async fn announce_expired_transactions(self, hashes: Vec<TransactionHash>)\n    where\n        REv: From<TransactionBufferAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                TransactionBufferAnnouncement::TransactionsExpired(hashes),\n                QueueKind::Validation,\n            )\n            .await;\n    }\n\n    /// Announces an incoming network message.\n    pub(crate) async fn announce_incoming<P>(self, sender: NodeId, payload: P)\n    where\n        REv: FromIncoming<P>,\n    {\n        self.event_queue\n            .schedule(\n                <REv as FromIncoming<P>>::from_incoming(sender, payload),\n                QueueKind::NetworkIncoming,\n            )\n            .await;\n    }\n\n    /// Announces that a gossiper has received a new item, where the item's ID is the complete item.\n    pub(crate) async fn announce_complete_item_received_via_gossip<T: GossipItem>(self, item: T::Id)\n    where\n        REv: From<GossiperAnnouncement<T>>,\n    {\n        assert!(\n            T::ID_IS_COMPLETE_ITEM,\n            \"{} must be an item where the ID _is_ the complete item\",\n            item\n        );\n        self.event_queue\n            .schedule(\n                GossiperAnnouncement::NewCompleteItem(item),\n                QueueKind::Gossip,\n            )\n            .await;\n    }\n\n    /// Announces that a gossiper has received a full item, where the item's ID is NOT the complete\n    /// item.\n    pub(crate) async fn announce_item_body_received_via_gossip<T: GossipItem>(\n        self,\n        item: Box<T>,\n        sender: NodeId,\n    ) where\n        REv: From<GossiperAnnouncement<T>>,\n    {\n        self.event_queue\n            .schedule(\n                GossiperAnnouncement::NewItemBody { item, sender },\n                QueueKind::Gossip,\n            )\n            .await;\n    }\n\n    /// Announces that the block accumulator has received and stored a new finality signature.\n    pub(crate) async fn announce_finality_signature_accepted(\n        self,\n        finality_signature: Box<FinalitySignatureV2>,\n    ) where\n        REv: From<BlockAccumulatorAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                BlockAccumulatorAnnouncement::AcceptedNewFinalitySignature { finality_signature },\n                QueueKind::FinalitySignature,\n            )\n            .await;\n    }\n\n    /// Request that a block be made executable, if able to: `ExecutableBlock`.\n    ///\n    /// Completion means that the block can be enqueued for processing by the execution engine via\n    /// the contract_runtime component.\n    pub(crate) async fn make_block_executable(\n        self,\n        block_hash: BlockHash,\n    ) -> Option<ExecutableBlock>\n    where\n        REv: From<MakeBlockExecutableRequest>,\n    {\n        self.make_request(\n            |responder| MakeBlockExecutableRequest {\n                block_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Request that a block with a specific height be marked completed.\n    ///\n    /// Completion means that the block itself (along with its header) and all of its transactions\n    /// have been persisted to storage and its global state root hash is missing no dependencies\n    /// in the global state.\n    pub(crate) async fn mark_block_completed(self, block_height: u64) -> bool\n    where\n        REv: From<MarkBlockCompletedRequest>,\n    {\n        self.make_request(\n            |responder| MarkBlockCompletedRequest {\n                block_height,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Try to accept a transaction received from the JSON-RPC server.\n    pub(crate) async fn try_accept_transaction(\n        self,\n        transaction: Transaction,\n        is_speculative: bool,\n    ) -> Result<(), transaction_acceptor::Error>\n    where\n        REv: From<AcceptTransactionRequest>,\n    {\n        self.make_request(\n            |responder| AcceptTransactionRequest {\n                transaction,\n                is_speculative,\n                responder,\n            },\n            QueueKind::Api,\n        )\n        .await\n    }\n\n    /// Announces that a transaction not previously stored has now been accepted and stored.\n    pub(crate) fn announce_new_transaction_accepted(\n        self,\n        transaction: Arc<Transaction>,\n        source: Source,\n    ) -> impl Future<Output = ()>\n    where\n        REv: From<TransactionAcceptorAnnouncement>,\n    {\n        self.event_queue.schedule(\n            TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                transaction,\n                source,\n            },\n            QueueKind::Validation,\n        )\n    }\n\n    /// Announces that we have received a gossip message from this peer,\n    /// implying the peer holds the indicated item.\n    pub(crate) async fn announce_gossip_received<T>(self, item_id: T::Id, sender: NodeId)\n    where\n        REv: From<GossiperAnnouncement<T>>,\n        T: GossipItem,\n    {\n        self.event_queue\n            .schedule(\n                GossiperAnnouncement::GossipReceived { item_id, sender },\n                QueueKind::Gossip,\n            )\n            .await;\n    }\n\n    /// Announces that we have finished gossiping the indicated item.\n    pub(crate) async fn announce_finished_gossiping<T>(self, item_id: T::Id)\n    where\n        REv: From<GossiperAnnouncement<T>>,\n        T: GossipItem,\n    {\n        self.event_queue\n            .schedule(\n                GossiperAnnouncement::FinishedGossiping(item_id),\n                QueueKind::Gossip,\n            )\n            .await;\n    }\n\n    pub(crate) fn announce_invalid_transaction(\n        self,\n        transaction: Transaction,\n        source: Source,\n    ) -> impl Future<Output = ()>\n    where\n        REv: From<TransactionAcceptorAnnouncement>,\n    {\n        self.event_queue.schedule(\n            TransactionAcceptorAnnouncement::InvalidTransaction {\n                transaction,\n                source,\n            },\n            QueueKind::Validation,\n        )\n    }\n\n    /// Announces upgrade activation point read.\n    pub(crate) async fn upgrade_watcher_announcement(self, maybe_next_upgrade: Option<NextUpgrade>)\n    where\n        REv: From<UpgradeWatcherAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                UpgradeWatcherAnnouncement(maybe_next_upgrade),\n                QueueKind::Control,\n            )\n            .await;\n    }\n\n    /// Announces a committed Step success.\n    pub(crate) async fn announce_commit_step_success(self, era_id: EraId, effects: ExecutionEffects)\n    where\n        REv: From<ContractRuntimeAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ContractRuntimeAnnouncement::CommitStepSuccess { era_id, effects },\n                QueueKind::ContractRuntime,\n            )\n            .await;\n    }\n\n    pub(crate) async fn update_contract_runtime_state(self, new_pre_state: ExecutionPreState)\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.event_queue\n            .schedule(\n                ContractRuntimeRequest::UpdatePreState { new_pre_state },\n                QueueKind::ContractRuntime,\n            )\n            .await;\n    }\n\n    /// Announces validators for upcoming era.\n    pub(crate) async fn announce_upcoming_era_validators(\n        self,\n        era_that_is_ending: EraId,\n        upcoming_era_validators: BTreeMap<EraId, BTreeMap<PublicKey, U512>>,\n    ) where\n        REv: From<ContractRuntimeAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ContractRuntimeAnnouncement::UpcomingEraValidators {\n                    era_that_is_ending,\n                    upcoming_era_validators,\n                },\n                QueueKind::ContractRuntime,\n            )\n            .await;\n    }\n\n    pub(crate) async fn announce_new_era_gas_price(self, era_id: EraId, next_era_gas_price: u8)\n    where\n        REv: From<ContractRuntimeAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ContractRuntimeAnnouncement::NextEraGasPrice {\n                    era_id,\n                    next_era_gas_price,\n                },\n                QueueKind::ContractRuntime,\n            )\n            .await;\n    }\n\n    /// Begins gossiping an item.\n    pub(crate) async fn begin_gossip<T>(self, item_id: T::Id, source: Source, target: GossipTarget)\n    where\n        T: GossipItem,\n        REv: From<BeginGossipRequest<T>>,\n    {\n        self.make_request(\n            |responder| BeginGossipRequest {\n                item_id,\n                source,\n                target,\n                responder,\n            },\n            QueueKind::Gossip,\n        )\n        .await;\n    }\n\n    /// Puts the given block into the linear block store.\n    pub(crate) async fn put_block_to_storage(self, block: Arc<Block>) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutBlock { block, responder },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Puts the given approvals hashes into the linear block store.\n    pub(crate) async fn put_approvals_hashes_to_storage(\n        self,\n        approvals_hashes: Box<ApprovalsHashes>,\n    ) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutApprovalsHashes {\n                approvals_hashes,\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Puts the given block and approvals hashes into the linear block store.\n    pub(crate) async fn put_executed_block_to_storage(\n        self,\n        block: Arc<BlockV2>,\n        approvals_hashes: Box<ApprovalsHashes>,\n        execution_results: HashMap<TransactionHash, ExecutionResult>,\n    ) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutExecutedBlock {\n                block,\n                approvals_hashes,\n                execution_results,\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested block from the linear block store.\n    pub(crate) async fn get_block_from_storage(self, block_hash: BlockHash) -> Option<Block>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlock {\n                block_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_block_utilization(\n        self,\n        era_id: EraId,\n        block_height: u64,\n        transaction_count: u64,\n    ) -> Option<(u64, u64)>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockUtilizationScore {\n                era_id,\n                block_height,\n                switch_block_utilization: transaction_count,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn is_block_stored(self, block_hash: BlockHash) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::IsBlockStored {\n                block_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested `ApprovalsHashes` from storage.\n    pub(crate) async fn get_approvals_hashes_from_storage(\n        self,\n        block_hash: BlockHash,\n    ) -> Option<ApprovalsHashes>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetApprovalsHashes {\n                block_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_raw_data(\n        self,\n        record_id: RecordId,\n        key: Vec<u8>,\n    ) -> Option<DbRawBytesSpec>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetRawData {\n                record_id,\n                key,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested block header from the linear block store.\n    pub(crate) async fn get_block_header_from_storage(\n        self,\n        block_hash: BlockHash,\n        only_from_available_block_range: bool,\n    ) -> Option<BlockHeader>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockHeader {\n                block_hash,\n                only_from_available_block_range,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_block_header_at_height_from_storage(\n        self,\n        block_height: u64,\n        only_from_available_block_range: bool,\n    ) -> Option<BlockHeader>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockHeaderByHeight {\n                block_height,\n                only_from_available_block_range,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_latest_switch_block_header_from_storage(self) -> Option<BlockHeader>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetLatestSwitchBlockHeader { responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_switch_block_header_by_era_id_from_storage(\n        self,\n        era_id: EraId,\n    ) -> Option<BlockHeader>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetSwitchBlockHeaderByEra { era_id, responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested signature for a given block hash.\n    pub(crate) async fn get_signature_from_storage(\n        self,\n        block_hash: BlockHash,\n        public_key: PublicKey,\n    ) -> Option<FinalitySignature>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockSignature {\n                block_hash,\n                public_key: Box::new(public_key),\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_execution_results_from_storage(\n        self,\n        block_hash: BlockHash,\n    ) -> Option<Vec<(TransactionHash, TransactionHeader, ExecutionResult)>>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetExecutionResults {\n                block_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Puts a block header to storage.\n    pub(crate) async fn put_block_header_to_storage(self, block_header: Box<BlockHeader>) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutBlockHeader {\n                block_header,\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Puts the requested block signatures into storage.\n    ///\n    /// If `signatures.proofs` is empty, no attempt to store will be made, an error will be logged,\n    /// and this function will return `false`.\n    pub(crate) async fn put_signatures_to_storage(self, signatures: BlockSignatures) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutBlockSignatures {\n                signatures,\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn put_finality_signature_to_storage(\n        self,\n        signature: FinalitySignature,\n    ) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutFinalitySignature {\n                signature: Box::new(signature),\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested block's transfers from storage.\n    pub(crate) async fn get_block_transfers_from_storage(\n        self,\n        block_hash: BlockHash,\n    ) -> Option<Vec<Transfer>>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockTransfers {\n                block_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Returns the era IDs of the blocks in which the given transactions were executed.  If none\n    /// of the transactions have been executed yet, an empty set will be returned.\n    pub(crate) async fn get_transactions_era_ids(\n        self,\n        transaction_hashes: HashSet<TransactionHash>,\n    ) -> HashSet<EraId>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetTransactionsEraIds {\n                transaction_hashes,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Requests the highest complete block.\n    pub(crate) async fn get_highest_complete_block_from_storage(self) -> Option<Block>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetHighestCompleteBlock { responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Requests the highest complete block header.\n    pub(crate) async fn get_highest_complete_block_header_from_storage(self) -> Option<BlockHeader>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetHighestCompleteBlockHeader { responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Requests the height range of fully available blocks (not just block headers).\n    pub(crate) async fn get_available_block_range_from_storage(self) -> AvailableBlockRange\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetAvailableBlockRange { responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Synchronize global state under the given root hash.\n    pub(crate) async fn sync_global_state(\n        self,\n        block_hash: BlockHash,\n        state_root_hash: Digest,\n    ) -> Result<GlobalStateSynchronizerResponse, GlobalStateSynchronizerError>\n    where\n        REv: From<SyncGlobalStateRequest>,\n    {\n        self.make_request(\n            |responder| SyncGlobalStateRequest {\n                block_hash,\n                state_root_hash,\n                responder,\n            },\n            QueueKind::SyncGlobalState,\n        )\n        .await\n    }\n\n    /// Get a trie or chunk by its ID.\n    pub(crate) async fn get_trie(self, request: TrieRequest) -> TrieResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetTrie { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_reactor_state(self) -> ReactorState\n    where\n        REv: From<ReactorInfoRequest>,\n    {\n        self.make_request(\n            |responder| ReactorInfoRequest::ReactorState { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_last_progress(self) -> LastProgress\n    where\n        REv: From<ReactorInfoRequest>,\n    {\n        self.make_request(\n            |responder| ReactorInfoRequest::LastProgress { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_uptime(self) -> Uptime\n    where\n        REv: From<ReactorInfoRequest>,\n    {\n        self.make_request(\n            |responder| ReactorInfoRequest::Uptime { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_network_name(self) -> NetworkName\n    where\n        REv: From<ReactorInfoRequest>,\n    {\n        self.make_request(\n            |responder| ReactorInfoRequest::NetworkName { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    #[allow(unused)]\n    pub(crate) async fn get_balance_holds_interval(self) -> TimeDiff\n    where\n        REv: From<ReactorInfoRequest>,\n    {\n        self.make_request(\n            |responder| ReactorInfoRequest::BalanceHoldsInterval { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_block_synchronizer_status(self) -> BlockSynchronizerStatus\n    where\n        REv: From<BlockSynchronizerRequest>,\n    {\n        self.make_request(\n            |responder| BlockSynchronizerRequest::Status { responder },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    /// Puts a trie into the trie store; succeeds only if all the children of the trie are already\n    /// present in the store.\n    /// Returns the digest under which the trie was stored if successful.\n    pub(crate) async fn put_trie_if_all_children_present(\n        self,\n        request: PutTrieRequest,\n    ) -> PutTrieResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::PutTrie { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_current_gas_price(self, era_id: EraId) -> Option<u8>\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetEraGasPrice { era_id, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    pub(crate) async fn put_transaction_to_storage(self, transaction: Transaction) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutTransaction {\n                transaction: Arc::new(transaction),\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested transactions from storage.\n    ///\n    /// Returns the \"original\" transactions, which are the first received by the node, along with a\n    /// potentially different set of approvals used during execution of the recorded block.\n    pub(crate) async fn get_transactions_from_storage(\n        self,\n        transaction_hashes: Vec<TransactionHash>,\n    ) -> SmallVec<[Option<(Transaction, Option<BTreeSet<Approval>>)>; 1]>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetTransactions {\n                transaction_hashes,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested transaction and its execution info from storage by TransactionHash.\n    pub(crate) async fn get_transaction_and_exec_info_from_storage(\n        self,\n        transaction_hash: TransactionHash,\n        with_finalized_approvals: bool,\n    ) -> Option<(Transaction, Option<ExecutionInfo>)>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetTransactionAndExecutionInfo {\n                transaction_hash,\n                with_finalized_approvals,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested legacy deploy from the legacy deploy store by DeployHash only.\n    ///\n    /// Returns the legacy deploy containing the set of approvals used during execution of the\n    /// recorded block, if known.\n    pub(crate) async fn get_stored_legacy_deploy(\n        self,\n        deploy_hash: DeployHash,\n    ) -> Option<LegacyDeploy>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetLegacyDeploy {\n                deploy_hash,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets the requested transaction from storage by TransactionId.\n    ///\n    /// Returns the \"original\" transaction, which is the first received by the node, along with a\n    /// potentially different set of approvals used during execution of the recorded block.\n    pub(crate) async fn get_stored_transaction(\n        self,\n        transaction_id: TransactionId,\n    ) -> Option<Transaction>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetTransaction {\n                transaction_id,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn is_transaction_stored(self, transaction_id: TransactionId) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::IsTransactionStored {\n                transaction_id,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Stores the given execution results for the transactions in the given block in the linear\n    /// block store.\n    pub(crate) async fn put_execution_artifacts_to_storage(\n        self,\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        execution_results: HashMap<TransactionHash, ExecutionResult>,\n    ) where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::PutExecutionResults {\n                block_hash: Box::new(block_hash),\n                block_height,\n                era_id,\n                execution_results,\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await;\n    }\n\n    /// Gets the requested block and its finality signatures.\n    pub(crate) async fn get_block_at_height_with_metadata_from_storage(\n        self,\n        block_height: u64,\n        only_from_available_block_range: bool,\n    ) -> Option<BlockWithMetadata>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockAndMetadataByHeight {\n                block_height,\n                only_from_available_block_range,\n                responder,\n            },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn collect_past_blocks_with_metadata(\n        self,\n        range: std::ops::Range<u64>,\n        only_from_available_block_range: bool,\n    ) -> Vec<Option<BlockWithMetadata>>\n    where\n        REv: From<StorageRequest>,\n    {\n        futures::future::join_all(range.into_iter().map(|block_height| {\n            self.get_block_at_height_with_metadata_from_storage(\n                block_height,\n                only_from_available_block_range,\n            )\n        }))\n        .await\n        .into_iter()\n        .collect()\n    }\n\n    /// Gets the requested finality signature from storage.\n    pub(crate) async fn get_finality_signature_from_storage(\n        self,\n        id: Box<FinalitySignatureId>,\n    ) -> Option<FinalitySignature>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetFinalitySignature { id, responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    pub(crate) async fn is_finality_signature_stored(self, id: Box<FinalitySignatureId>) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::IsFinalitySignatureStored { id, responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Fetches an item from a fetcher.\n    pub(crate) async fn fetch<T>(\n        self,\n        id: T::Id,\n        peer: NodeId,\n        validation_metadata: Box<T::ValidationMetadata>,\n    ) -> FetchResult<T>\n    where\n        REv: From<FetcherRequest<T>>,\n        T: FetchItem + 'static,\n    {\n        self.make_request(\n            |responder| FetcherRequest {\n                id,\n                peer,\n                validation_metadata,\n                responder,\n            },\n            QueueKind::Fetch,\n        )\n        .await\n    }\n\n    pub(crate) async fn fetch_trie(\n        self,\n        hash: Digest,\n        peers: Vec<NodeId>,\n    ) -> Result<TrieAccumulatorResponse, TrieAccumulatorError>\n    where\n        REv: From<TrieAccumulatorRequest>,\n    {\n        self.make_request(\n            |responder| TrieAccumulatorRequest {\n                hash,\n                peers,\n                responder,\n            },\n            QueueKind::SyncGlobalState,\n        )\n        .await\n    }\n\n    /// Passes the timestamp of a future block for which transactions are to be proposed.\n    pub(crate) async fn request_appendable_block(\n        self,\n        timestamp: Timestamp,\n        era_id: EraId,\n        request_expiry: Timestamp,\n    ) -> AppendableBlock\n    where\n        REv: From<TransactionBufferRequest>,\n    {\n        self.make_request(\n            |responder| TransactionBufferRequest::GetAppendableBlock {\n                timestamp,\n                era_id,\n                request_expiry,\n                responder,\n            },\n            QueueKind::Consensus,\n        )\n        .await\n    }\n\n    /// Enqueues a finalized block execution.\n    pub(crate) async fn enqueue_block_for_execution(\n        self,\n        executable_block: ExecutableBlock,\n        meta_block_state: MetaBlockState,\n    ) where\n        REv: From<StorageRequest> + From<ContractRuntimeRequest>,\n    {\n        // Get the key block height for the current protocol version's activation point, i.e. the\n        // height of the final block of the previous protocol version.\n        let key_block_height_for_activation_point = self\n            .make_request(\n                |responder| StorageRequest::GetKeyBlockHeightForActivationPoint { responder },\n                QueueKind::FromStorage,\n            )\n            .await\n            .unwrap_or_else(|| {\n                warn!(\"key block height for current activation point unknown\");\n                0\n            });\n\n        self.event_queue\n            .schedule(\n                ContractRuntimeRequest::EnqueueBlockForExecution {\n                    executable_block,\n                    key_block_height_for_activation_point,\n                    meta_block_state,\n                },\n                QueueKind::ContractRuntime,\n            )\n            .await;\n    }\n\n    pub(crate) async fn enqueue_protocol_upgrade(\n        self,\n        upgrade_config: ProtocolUpgradeConfig,\n        next_block_height: u64,\n        parent_hash: BlockHash,\n        parent_seed: Digest,\n    ) where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.event_queue\n            .schedule(\n                ContractRuntimeRequest::DoProtocolUpgrade {\n                    protocol_upgrade_config: upgrade_config,\n                    next_block_height,\n                    parent_hash,\n                    parent_seed,\n                },\n                QueueKind::Control,\n            )\n            .await;\n    }\n\n    /// Checks whether the transactions included in the block exist on the network and that\n    /// the block is valid.\n    pub(crate) async fn validate_block(\n        self,\n        sender: NodeId,\n        proposed_block_height: u64,\n        block: ProposedBlock<ClContext>,\n    ) -> Result<(), Box<InvalidProposalError>>\n    where\n        REv: From<BlockValidationRequest>,\n    {\n        self.make_request(\n            |responder| BlockValidationRequest {\n                proposed_block_height,\n                block,\n                sender,\n                responder,\n            },\n            QueueKind::Regular,\n        )\n        .await\n    }\n\n    /// Announces that a block has been proposed.\n    pub(crate) async fn announce_proposed_block(self, proposed_block: ProposedBlock<ClContext>)\n    where\n        REv: From<ConsensusAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ConsensusAnnouncement::Proposed(Box::new(proposed_block)),\n                QueueKind::Consensus,\n            )\n            .await;\n    }\n\n    /// Announces that a block has been finalized.\n    pub(crate) async fn announce_finalized_block(self, finalized_block: FinalizedBlock)\n    where\n        REv: From<ConsensusAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ConsensusAnnouncement::Finalized(Box::new(finalized_block)),\n                QueueKind::Consensus,\n            )\n            .await;\n    }\n\n    /// Announces that a meta block has been created or its state has changed.\n    pub(crate) async fn announce_meta_block(self, meta_block: MetaBlock)\n    where\n        REv: From<MetaBlockAnnouncement>,\n    {\n        self.event_queue\n            .schedule(MetaBlockAnnouncement(meta_block), QueueKind::Regular)\n            .await;\n    }\n\n    /// Announces that a finalized block has been created, but it was not\n    /// executed.\n    pub(crate) async fn announce_unexecuted_block(self, block_height: u64)\n    where\n        REv: From<UnexecutedBlockAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                UnexecutedBlockAnnouncement(block_height),\n                QueueKind::Regular,\n            )\n            .await;\n    }\n\n    /// An equivocation has been detected.\n    pub(crate) async fn announce_fault_event(\n        self,\n        era_id: EraId,\n        public_key: PublicKey,\n        timestamp: Timestamp,\n    ) where\n        REv: From<ConsensusAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ConsensusAnnouncement::Fault {\n                    era_id,\n                    public_key: Box::new(public_key),\n                    timestamp,\n                },\n                QueueKind::Consensus,\n            )\n            .await;\n    }\n\n    /// Blocks a specific peer due to a transgression.\n    ///\n    /// This function will also emit a log message for the block.\n    pub(crate) async fn announce_block_peer_with_justification(\n        self,\n        offender: NodeId,\n        justification: BlocklistJustification,\n    ) where\n        REv: From<PeerBehaviorAnnouncement>,\n    {\n        warn!(%offender, %justification, \"banning peer\");\n        self.event_queue\n            .schedule(\n                PeerBehaviorAnnouncement::OffenseCommitted {\n                    offender: Box::new(offender),\n                    justification: Box::new(justification),\n                },\n                QueueKind::NetworkInfo,\n            )\n            .await;\n    }\n\n    /// Gets the next scheduled upgrade, if any.\n    pub(crate) async fn get_next_upgrade(self) -> Option<NextUpgrade>\n    where\n        REv: From<UpgradeWatcherRequest> + Send,\n    {\n        self.make_request(UpgradeWatcherRequest, QueueKind::Control)\n            .await\n    }\n\n    /// Requests a query be executed on the Contract Runtime component.\n    pub(crate) async fn query_global_state(self, request: QueryRequest) -> QueryResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::Query { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Retrieves an `AddressableEntity` from under the given entity address (or key, if the former\n    /// is not found) in global state.\n    pub(crate) async fn get_addressable_entity(\n        self,\n        state_root_hash: Digest,\n        entity_addr: EntityAddr,\n    ) -> AddressableEntityResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetAddressableEntity {\n                state_root_hash,\n                entity_addr,\n                responder,\n            },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Retrieves an `EntryPointValue` from under the given key in global state if present.\n    pub(crate) async fn does_entry_point_exist(\n        self,\n        state_root_hash: Digest,\n        contract_hash: HashAddr,\n        entry_point_name: String,\n    ) -> EntryPointExistsResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetEntryPointExists {\n                state_root_hash,\n                contract_hash,\n                entry_point_name,\n                responder,\n            },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Retrieves a `Package` from under the given key in global state if present.\n    pub(crate) async fn get_package(\n        self,\n        state_root_hash: Digest,\n        package_addr: PackageAddr,\n    ) -> Option<Box<Package>>\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        let key = Key::Hash(package_addr);\n        let query_request = QueryRequest::new(state_root_hash, key, vec![]);\n\n        match self.query_global_state(query_request).await {\n            QueryResult::RootNotFound | QueryResult::Failure(_) => None,\n            QueryResult::ValueNotFound(_) => {\n                let query_request =\n                    QueryRequest::new(state_root_hash, Key::SmartContract(package_addr), vec![]);\n                debug!(\"requesting under different key\");\n                if let QueryResult::Success { value, .. } =\n                    self.query_global_state(query_request).await\n                {\n                    value.into_package().map(Box::new)\n                } else {\n                    None\n                }\n            }\n            QueryResult::Success { value, .. } => value\n                .into_contract_package()\n                .map(Package::from)\n                .map(Box::new),\n        }\n    }\n\n    /// Requests a query be executed on the Contract Runtime component.\n    pub(crate) async fn get_balance(self, request: BalanceRequest) -> BalanceResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetBalance { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Returns a map of validators weights for all eras as known from `root_hash`.\n    ///\n    /// This operation is read only.\n    pub(crate) async fn get_era_validators_from_contract_runtime(\n        self,\n        request: EraValidatorsRequest,\n    ) -> EraValidatorsResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetEraValidators { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_seigniorage_recipients_snapshot_from_contract_runtime(\n        self,\n        request: SeigniorageRecipientsRequest,\n    ) -> SeigniorageRecipientsResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetSeigniorageRecipients { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Requests a query be executed on the Contract Runtime component.\n    pub(crate) async fn get_tagged_values(self, request: TaggedValuesRequest) -> TaggedValuesResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetTaggedValues { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    pub(crate) async fn get_prefixed_values(\n        self,\n        request: PrefixedValuesRequest,\n    ) -> PrefixedValuesResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::QueryByPrefix { request, responder },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Returns the value of the execution results checksum stored in the ChecksumRegistry for the\n    /// given state root hash.\n    pub(crate) async fn get_execution_results_checksum(\n        self,\n        state_root_hash: Digest,\n    ) -> ExecutionResultsChecksumResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::GetExecutionResultsChecksum {\n                state_root_hash,\n                responder,\n            },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Get our public key from consensus, and if we're a validator, the next round length.\n    pub(crate) async fn consensus_status(self) -> Option<ConsensusStatus>\n    where\n        REv: From<ConsensusRequest>,\n    {\n        self.make_request(ConsensusRequest::Status, QueueKind::Consensus)\n            .await\n    }\n\n    /// Returns a list of validator status changes, by public key.\n    pub(crate) async fn get_consensus_validator_changes(self) -> ConsensusValidatorChanges\n    where\n        REv: From<ConsensusRequest>,\n    {\n        self.make_request(ConsensusRequest::ValidatorChanges, QueueKind::Consensus)\n            .await\n    }\n\n    /// Dump consensus state for a specific era, using the supplied function to serialize the\n    /// output.\n    pub(crate) async fn diagnostics_port_dump_consensus_state(\n        self,\n        era_id: Option<EraId>,\n        serialize: fn(&EraDump<'_>) -> Result<Vec<u8>, Cow<'static, str>>,\n    ) -> Result<Vec<u8>, Cow<'static, str>>\n    where\n        REv: From<DumpConsensusStateRequest>,\n    {\n        self.make_request(\n            |responder| DumpConsensusStateRequest {\n                era_id,\n                serialize,\n                responder,\n            },\n            QueueKind::Control,\n        )\n        .await\n    }\n\n    /// Dump the event queue contents to the diagnostics port, using the given serializer.\n    pub(crate) async fn diagnostics_port_dump_queue(self, dump_format: QueueDumpFormat)\n    where\n        REv: From<ControlAnnouncement>,\n    {\n        self.make_request(\n            |responder| ControlAnnouncement::QueueDumpRequest {\n                dump_format,\n                finished: responder,\n            },\n            QueueKind::Control,\n        )\n        .await;\n    }\n\n    /// Activates/deactivates a failpoint from a given activation.\n    pub(crate) async fn activate_failpoint(self, activation: FailpointActivation)\n    where\n        REv: From<ControlAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ControlAnnouncement::ActivateFailpoint { activation },\n                QueueKind::Control,\n            )\n            .await;\n    }\n\n    /// Announce that the node be shut down due to a request from a user.\n    pub(crate) async fn announce_user_shutdown_request(self)\n    where\n        REv: From<ControlAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                ControlAnnouncement::ShutdownDueToUserRequest,\n                QueueKind::Control,\n            )\n            .await;\n    }\n\n    /// Announce that a block which wasn't previously stored on this node has been fetched and\n    /// stored.\n    pub(crate) async fn announce_fetched_new_block(self, block: Arc<Block>, peer: NodeId)\n    where\n        REv: From<FetchedNewBlockAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                FetchedNewBlockAnnouncement { block, peer },\n                QueueKind::Fetch,\n            )\n            .await;\n    }\n\n    /// Announce that a finality signature which wasn't previously stored on this node has been\n    /// fetched and stored.\n    pub(crate) async fn announce_fetched_new_finality_signature(\n        self,\n        finality_signature: Box<FinalitySignature>,\n        peer: NodeId,\n    ) where\n        REv: From<FetchedNewFinalitySignatureAnnouncement>,\n    {\n        self.event_queue\n            .schedule(\n                FetchedNewFinalitySignatureAnnouncement {\n                    finality_signature,\n                    peer,\n                },\n                QueueKind::Fetch,\n            )\n            .await;\n    }\n\n    /// Get the bytes for the chainspec file and genesis_accounts\n    /// and global_state bytes if the files are present.\n    pub(crate) async fn get_chainspec_raw_bytes(self) -> Arc<ChainspecRawBytes>\n    where\n        REv: From<ChainspecRawBytesRequest> + Send,\n    {\n        self.make_request(\n            ChainspecRawBytesRequest::GetChainspecRawBytes,\n            QueueKind::NetworkInfo,\n        )\n        .await\n    }\n\n    /// Stores a set of given finalized approvals in storage.\n    ///\n    /// Any previously stored finalized approvals for the given hash are quietly overwritten\n    pub(crate) async fn store_finalized_approvals(\n        self,\n        transaction_hash: TransactionHash,\n        finalized_approvals: BTreeSet<Approval>,\n    ) -> bool\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::StoreFinalizedApprovals {\n                transaction_hash,\n                finalized_approvals,\n                responder,\n            },\n            QueueKind::ToStorage,\n        )\n        .await\n    }\n\n    /// Requests execution of a single transaction, without committing its effects.  Intended to be\n    /// used for debugging & discovery purposes.\n    pub(crate) async fn speculatively_execute(\n        self,\n        block_header: Box<BlockHeader>,\n        transaction: Box<Transaction>,\n    ) -> SpeculativeExecutionResult\n    where\n        REv: From<ContractRuntimeRequest>,\n    {\n        self.make_request(\n            |responder| ContractRuntimeRequest::SpeculativelyExecute {\n                block_header,\n                transaction,\n                responder,\n            },\n            QueueKind::ContractRuntime,\n        )\n        .await\n    }\n\n    /// Reads block execution results (or chunk) from Storage component.\n    pub(crate) async fn get_block_execution_results_or_chunk_from_storage(\n        self,\n        id: BlockExecutionResultsOrChunkId,\n    ) -> Option<BlockExecutionResultsOrChunk>\n    where\n        REv: From<StorageRequest>,\n    {\n        self.make_request(\n            |responder| StorageRequest::GetBlockExecutionResultsOrChunk { id, responder },\n            QueueKind::FromStorage,\n        )\n        .await\n    }\n\n    /// Gets peers for a given block from the block accumulator.\n    pub(crate) async fn get_block_accumulated_peers(\n        self,\n        block_hash: BlockHash,\n    ) -> Option<Vec<NodeId>>\n    where\n        REv: From<BlockAccumulatorRequest>,\n    {\n        self.make_request(\n            |responder| BlockAccumulatorRequest::GetPeersForBlock {\n                block_hash,\n                responder,\n            },\n            QueueKind::NetworkInfo,\n        )\n        .await\n    }\n\n    /// Set a new stopping point for the node.\n    ///\n    /// Returns a potentially previously set stop-at spec.\n    pub(crate) async fn set_node_stop_at(self, stop_at: Option<StopAtSpec>) -> Option<StopAtSpec>\n    where\n        REv: From<SetNodeStopRequest>,\n    {\n        self.make_request(\n            |responder| SetNodeStopRequest { stop_at, responder },\n            QueueKind::Control,\n        )\n        .await\n    }\n}\n\n/// Construct a fatal error effect.\n///\n/// This macro is a convenient wrapper around `EffectBuilder::fatal` that inserts the `file!()` and\n/// `line!()` number automatically.\n#[macro_export]\nmacro_rules! fatal {\n    ($effect_builder:expr, $($arg:tt)*) => {\n        $effect_builder.fatal(file!(), line!(), format!($($arg)*))\n    };\n}\n"
  },
  {
    "path": "node/src/failpoints.rs",
    "content": "//! Failpoint support.\n//!\n//! Failpoints can enabled on the node to inject faulty behavior at runtime, for testing and\n//! benchmarking purposes.\n//!\n//! # General usage\n//!\n//! Failpoints are created in code using `Failpoint`, and activated using a `FailpointActivation`.\n//! See the `failpoints::test::various_usecases` test for an example.\n\nuse std::{\n    fmt::{self, Debug, Display},\n    num::ParseFloatError,\n    str::FromStr,\n};\n\nuse datasize::DataSize;\nuse rand::{distributions::Uniform, prelude::Distribution, Rng};\nuse serde::{de::DeserializeOwned, Serialize};\nuse serde_json::Value;\nuse thiserror::Error;\nuse tracing::{info, instrument, trace, warn};\n\nuse crate::utils::opt_display::OptDisplay;\n\n/// A specific failpoint.\n#[derive(DataSize, Debug)]\npub(crate) struct Failpoint<T>\nwhere\n    T: DataSize,\n{\n    /// Key that activates the given failpoint.\n    #[data_size(skip)]\n    key: &'static str,\n    /// Subkey that potentially activates the given failpoint.\n    subkey: Option<String>,\n    /// The value of the failpoint, if any.\n    value: Option<T>,\n    /// Activation probability.\n    probability: Option<f32>,\n    /// Whether to trigger the failpoint only once.\n    once: bool,\n    /// Whether the failpoint has already fired.\n    fired: bool,\n}\n\nimpl<T> Failpoint<T>\nwhere\n    T: Debug + DeserializeOwned + DataSize,\n{\n    /// Creates a new failpoint with a given key.\n    #[inline(always)]\n    pub(crate) fn new(key: &'static str) -> Self {\n        Failpoint {\n            key,\n            subkey: None,\n            value: None,\n            probability: None,\n            once: false,\n            fired: false,\n        }\n    }\n\n    /// Creates a new failpoint with a given key and optional subkey.\n    #[inline]\n    #[allow(dead_code)]\n    pub(crate) fn new_with_subkey<S: ToString>(key: &'static str, subkey: S) -> Self {\n        Failpoint {\n            key,\n            subkey: Some(subkey.to_string()),\n            value: None,\n            probability: None,\n            once: false,\n            fired: false,\n        }\n    }\n\n    /// Update a failpoint from a given `FailpointActivation`.\n    ///\n    /// The failpoint will be changed if the given activation matches `key` and `subkey` only.\n    #[instrument(level = \"error\",\n                 fields(fp_key=self.key,\n                        fp_subkey=%OptDisplay::new(self.subkey.as_ref(), \"\")\n                       )\n                )]\n    pub(crate) fn update_from(&mut self, activation: &FailpointActivation) {\n        // Check if the failpoint matches.\n        if activation.key != self.key || activation.subkey != self.subkey {\n            trace!(\"not updating failpoint\");\n            return;\n        }\n\n        // Values can fail, so update these first.\n        if let Some(value) = activation.value.as_ref() {\n            match serde_json::from_value::<T>(value.clone()) {\n                Ok(value) => self.value = Some(value),\n                Err(err) => warn!(%err, \"failed to deserialize failpoint value\"),\n            }\n        } else {\n            self.value = None;\n        }\n\n        self.probability = activation.probability;\n        self.once = activation.once;\n        self.fired = false;\n\n        if self.value.is_some() {\n            info!(\"activated failpoint\");\n        } else {\n            info!(\"cleared failpoint\");\n        }\n    }\n\n    /// Fire the failpoint, if active.\n    ///\n    /// Returns the value of the failpoint, if it fired.\n    #[inline(always)]\n    pub(crate) fn fire<R: Rng>(&mut self, rng: &mut R) -> Option<&T> {\n        if self.value.is_some() {\n            self.do_fire(rng)\n        } else {\n            None\n        }\n    }\n\n    /// Inner `fire` implementation.\n    ///\n    /// `fire` is kept small for facilitate inlining and fast processing of disabled failpoints.\n    #[inline]\n    fn do_fire<R: Rng>(&mut self, rng: &mut R) -> Option<&T> {\n        if let Some(p) = self.probability {\n            let p_range = Uniform::new_inclusive(0.0, 1.0);\n            if p_range.sample(rng) > p as f64 {\n                return None;\n            }\n        }\n\n        if self.once && self.fired {\n            return None;\n        }\n\n        self.fired = true;\n        self.value()\n    }\n\n    /// Returns the value of the failpoint, if it is set.\n    #[inline]\n    fn value(&self) -> Option<&T> {\n        self.value.as_ref()\n    }\n}\n\n/// A parsed failpoint activation.\n#[derive(Clone, DataSize, Debug, PartialEq, Serialize)]\npub(crate) struct FailpointActivation {\n    key: String,\n    subkey: Option<String>,\n    #[data_size(skip)] // TODO: Add a `DataSize` implementation for JSON `Value`s.\n    value: Option<Value>,\n    probability: Option<f32>,\n    once: bool,\n}\n\nimpl Display for FailpointActivation {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(&self.key)?;\n\n        if let Some(subkey) = self.subkey.as_ref() {\n            write!(f, \",sub:{}\", subkey)?;\n        }\n\n        if let Some(p) = self.probability {\n            write!(f, \",p:{}\", p)?;\n        }\n\n        if self.once {\n            f.write_str(\",once\")?;\n        }\n\n        if let Some(value) = self.value.as_ref() {\n            // Note on the unwrap: Serializing a `Value` should never fail.\n            write!(f, \"={}\", serde_json::to_string(value).unwrap_or_default())?;\n        }\n\n        Ok(())\n    }\n}\n\nimpl FailpointActivation {\n    /// Creates a new [`FailpointActivation`] with the given `key`.\n    #[inline(always)]\n    pub(crate) fn new<S: ToString>(key: S) -> FailpointActivation {\n        FailpointActivation {\n            key: key.to_string(),\n            subkey: None,\n            value: None,\n            probability: None,\n            once: false,\n        }\n    }\n\n    /// Gets the key of this [`FailpointActivation`].\n    #[inline(always)]\n    pub(crate) fn key(&self) -> &str {\n        &self.key\n    }\n\n    /// Sets the subkey.\n    #[inline(always)]\n    pub(crate) fn subkey<S: ToString>(mut self, subkey: S) -> Self {\n        self.subkey = Some(subkey.to_string());\n        self\n    }\n\n    /// Sets the failpoint's value from JSON.\n    ///\n    /// # Panics\n    ///\n    /// Will panic if `value` does not cleanly serialize to a [`serde_json::Value`].\n    #[inline(always)]\n    #[allow(unused)]\n    pub(crate) fn value<T>(self, value: T) -> Self\n    where\n        T: Serialize,\n    {\n        let value_json: Value =\n            serde_json::to_value(value).expect(\"passed in value does not serialize to JSON\");\n\n        self.value_json(value_json)\n    }\n\n    /// Sets the failpoint's value from JSON.\n    #[inline(always)]\n    pub(crate) fn value_json(mut self, value: Value) -> Self {\n        self.value = Some(value);\n        self\n    }\n\n    /// Sets the probability of the failpoint firing.\n    ///\n    /// The value will be clamped to `[0.0, 1.0]`. A value of `NaN` will be converted to `0.0`.\n    #[inline(always)]\n    pub(crate) fn probability(mut self, probability: f32) -> Self {\n        // Note: We do not use `clamp`, since it does not remove `NaN`s.\n        self.probability = Some(probability.clamp(0.0, 1.0));\n        self\n    }\n\n    /// Sets the failpoint to fire only once.\n    #[inline(always)]\n    pub(crate) fn once(mut self) -> Self {\n        self.once = true;\n        self\n    }\n}\n\n/// Error parsing a failpoint activation.\n#[derive(Debug, Error)]\npub(crate) enum ParseError {\n    /// The provided value for the failpoint was not valid JSON.\n    #[error(\"invalid json value\")]\n    InvalidJson(#[source] serde_json::Error),\n    /// Left hand side contained no segments.\n    #[error(\"no key given\")]\n    MissingKey,\n    /// Invalid floating literal for probability\n    #[error(\"invvalid probability value\")]\n    InvalidProbability(#[source] ParseFloatError),\n    /// The given meta key is not valid.\n    #[error(\"not a known meta key: \\\"{0}\\\"\")]\n    InvalidMeta(String),\n}\n\nimpl FromStr for FailpointActivation {\n    type Err = ParseError;\n\n    fn from_str(raw: &str) -> Result<Self, Self::Err> {\n        let (raw_meta, value) = if let Some((left, right)) = raw.split_once('=') {\n            (\n                left,\n                Some(serde_json::from_str::<Value>(right).map_err(ParseError::InvalidJson)?),\n            )\n        } else {\n            (raw, None)\n        };\n\n        let mut fragments = raw_meta.split(',');\n        let key = fragments.next().ok_or(ParseError::MissingKey)?;\n        let mut fps = FailpointActivation::new(key);\n\n        for fragment in fragments {\n            let (meta, meta_value) = if let Some((left, right)) = fragment.split_once(':') {\n                (left, Some(right))\n            } else {\n                (fragment, None)\n            };\n\n            match (meta, meta_value) {\n                (\"sub\", Some(v)) => {\n                    fps = fps.subkey(v);\n                }\n                (\"p\", Some(raw_p)) => {\n                    fps = fps.probability(raw_p.parse().map_err(ParseError::InvalidProbability)?);\n                }\n                (\"once\", None) => {\n                    fps = fps.once();\n                }\n                (invalid, _) => return Err(ParseError::InvalidMeta(invalid.to_string())),\n            }\n        }\n\n        if let Some(value) = value {\n            fps = fps.value_json(value);\n        }\n\n        Ok(fps)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::str::FromStr;\n\n    use casper_types::{testing::TestRng, TimeDiff};\n    use serde_json::json;\n\n    use crate::testing::init_logging;\n\n    use super::{Failpoint, FailpointActivation};\n\n    #[test]\n    fn parse_failpoints() {\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar\").expect(\"should parse\"),\n            FailpointActivation {\n                key: \"foobar\".to_owned(),\n                subkey: None,\n                value: None,\n                probability: None,\n                once: false\n            }\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar,once\").expect(\"should parse\"),\n            FailpointActivation {\n                key: \"foobar\".to_owned(),\n                subkey: None,\n                value: None,\n                probability: None,\n                once: true\n            }\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar,sub:xyz\").expect(\"should parse\"),\n            FailpointActivation {\n                key: \"foobar\".to_owned(),\n                subkey: Some(\"xyz\".to_owned()),\n                value: None,\n                probability: None,\n                once: false\n            }\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar,p:0.5,sub:xyz,once\").expect(\"should parse\"),\n            FailpointActivation {\n                key: \"foobar\".to_owned(),\n                subkey: Some(\"xyz\".to_owned()),\n                value: None,\n                probability: Some(0.5),\n                once: true\n            }\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar,p:0.5,sub:xyz,once=true\").expect(\"should parse\"),\n            FailpointActivation {\n                key: \"foobar\".to_owned(),\n                subkey: Some(\"xyz\".to_owned()),\n                value: Some(serde_json::json!(true)),\n                probability: Some(0.5),\n                once: true\n            }\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar={\\\"hello\\\": \\\"world\\\", \\\"count\\\": 1}\")\n                .expect(\"should parse\"),\n            FailpointActivation {\n                key: \"foobar\".to_owned(),\n                subkey: None,\n                value: Some(serde_json::json!({\"hello\": \"world\", \"count\": 1})),\n                probability: None,\n                once: false\n            }\n        );\n    }\n\n    #[test]\n    fn clamping_works() {\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(-0.1)\n                .probability,\n            Some(0.0)\n        );\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(0.0)\n                .probability,\n            Some(0.0)\n        );\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(0.1)\n                .probability,\n            Some(0.1)\n        );\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(0.5)\n                .probability,\n            Some(0.5)\n        );\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(0.9)\n                .probability,\n            Some(0.9)\n        );\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(1.0)\n                .probability,\n            Some(1.0)\n        );\n        assert_eq!(\n            FailpointActivation::new(\"test\")\n                .probability(1.1)\n                .probability,\n            Some(1.0)\n        );\n    }\n\n    #[test]\n    fn display_works() {\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar={\\\"hello\\\": \\\"world\\\", \\\"count\\\": 1}\")\n                .expect(\"should parse\")\n                .to_string(),\n            \"foobar={\\\"hello\\\":\\\"world\\\",\\\"count\\\":1}\"\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"foobar,p:0.5,sub:xyz,once=true\")\n                .expect(\"should parse\")\n                .to_string(),\n            \"foobar,sub:xyz,p:0.5,once=true\"\n        );\n\n        assert_eq!(\n            FailpointActivation::from_str(\"abc_123\")\n                .expect(\"should parse\")\n                .to_string(),\n            \"abc_123\"\n        );\n    }\n\n    #[test]\n    fn various_usecases() {\n        // Note: This function deliberately exerts different APIs of `FailpointActivation`. When\n        //       using `FailpointActivation` in tests, it is recommend to construct it using the\n        //       builder pattern as opposed to parsing it from strings.\n\n        init_logging();\n\n        let mut rng = TestRng::new();\n        let mut delay_send_fp = Failpoint::<TimeDiff>::new(\"example.delay_send\");\n\n        assert!(\n            delay_send_fp.fire(&mut rng).is_none(),\n            \"failpoint should be disabled\"\n        );\n\n        let unrelated_activation =\n            FailpointActivation::from_str(\"example.unrelated=\\\"1s\\\"\").unwrap();\n        delay_send_fp.update_from(&unrelated_activation);\n\n        assert!(\n            delay_send_fp.fire(&mut rng).is_none(),\n            \"failpoint should be disabled after unrelated activation\"\n        );\n\n        let activation =\n            FailpointActivation::new(\"example.delay_send\").value(TimeDiff::from_seconds(1));\n\n        delay_send_fp.update_from(&activation);\n\n        let diff = delay_send_fp\n            .fire(&mut rng)\n            .expect(\"should trigger failpoint\");\n        assert_eq!(*diff, TimeDiff::from_str(\"1s\").unwrap());\n\n        // Repeat, since `once` is not enabled.\n        let diff = delay_send_fp\n            .fire(&mut rng)\n            .expect(\"should trigger failpoint a second time\");\n        assert_eq!(*diff, TimeDiff::from_str(\"1s\").unwrap());\n        let diff = delay_send_fp\n            .fire(&mut rng)\n            .expect(\"should trigger failpoint a third time\");\n        assert_eq!(*diff, TimeDiff::from_str(\"1s\").unwrap());\n\n        let deactivation = FailpointActivation::from_str(\"example.delay_send\").unwrap();\n\n        delay_send_fp.update_from(&deactivation);\n\n        assert!(\n            delay_send_fp.fire(&mut rng).is_none(),\n            \"failpoint should be disabled\"\n        );\n\n        let once_activation = FailpointActivation::new(\"example.delay_send\")\n            .once()\n            .value_json(json!(\"2s\"));\n        delay_send_fp.update_from(&once_activation);\n\n        let diff = delay_send_fp\n            .fire(&mut rng)\n            .expect(\"should trigger failpoint\");\n        assert_eq!(*diff, TimeDiff::from_str(\"2s\").unwrap());\n\n        // Repeat failpoint triggered once should not fire again.\n        assert!(delay_send_fp.fire(&mut rng).is_none());\n    }\n\n    #[test]\n    fn activation_primes_properly() {\n        let mut fp = Failpoint::<()>::new(\"some_failpoint\");\n\n        fp.update_from(&FailpointActivation::from_str(\"some_failpoint,p:0.5,once=null\").unwrap());\n\n        assert_eq!(fp.probability, Some(0.5));\n        assert!(fp.once);\n    }\n\n    #[test]\n    fn failpoint_probability_affects_failpoint() {\n        let mut rng = TestRng::new();\n        let mut fp = Failpoint::<()>::new(\"some_failpoint\");\n\n        // Full activation.\n        fp.update_from(&FailpointActivation::from_str(\"some_failpoint=null\").unwrap());\n        assert!(fp.fire(&mut rng).is_some());\n\n        // p:1.0 should be the same\n        fp.update_from(&FailpointActivation::from_str(\"some_failpoint,p:1.0=null\").unwrap());\n        assert!(fp.fire(&mut rng).is_some());\n\n        // p:0.0 essentially disables it\n        fp.update_from(&FailpointActivation::from_str(\"some_failpoint,p:0.0=null\").unwrap());\n        assert!(fp.fire(&mut rng).is_none());\n    }\n}\n"
  },
  {
    "path": "node/src/failpoints_disabled.rs",
    "content": "//! Failpoint stubs.\n//!\n//! This module stubs out enough of the failpoint API to work if the feature is disabled, but never\n//! activates them.\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    marker::PhantomData,\n    str::FromStr,\n};\n\nuse datasize::DataSize;\nuse serde::Serialize;\nuse thiserror::Error;\n\n/// A dummy failpoint.\n#[derive(DataSize, Debug)]\npub(crate) struct Failpoint<T> {\n    _phantom: PhantomData<T>,\n}\n\nimpl<T> Failpoint<T> {\n    /// Creates a new failpoint with a given key.\n    #[inline(always)]\n    pub(crate) fn new(_key: &'static str) -> Self {\n        Failpoint {\n            _phantom: PhantomData,\n        }\n    }\n\n    /// Creates a new failpoint with a given key and optional subkey.\n    #[inline]\n    #[allow(dead_code)]\n    pub(crate) fn new_with_subkey<S: ToString>(_key: &'static str, _subkey: S) -> Self {\n        Failpoint {\n            _phantom: PhantomData,\n        }\n    }\n\n    /// Ignores the failpoint activation.\n    #[inline(always)]\n    pub(crate) fn update_from(&mut self, _activation: &FailpointActivation) {}\n\n    /// Returns `None`.\n    #[inline(always)]\n    pub(crate) fn fire<R>(&mut self, _rng: &mut R) -> Option<&T> {\n        None\n    }\n}\n\n/// A parsed failpoint activation.\n#[derive(Clone, DataSize, Debug, PartialEq, Serialize)]\npub(crate) struct FailpointActivation;\n\nimpl FailpointActivation {\n    #[allow(dead_code)]\n    pub(crate) fn new<S: ToString>(_key: S) -> FailpointActivation {\n        FailpointActivation\n    }\n\n    pub(crate) fn key(&self) -> &str {\n        \"\"\n    }\n}\n\nimpl Display for FailpointActivation {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.write_str(\"(no failpoint support)\")\n    }\n}\n\n/// Error parsing a failpoint activation.\n#[derive(Debug, Error)]\n#[error(\"no failpoint support enabled\")]\npub(crate) struct ParseError;\n\nimpl FromStr for FailpointActivation {\n    type Err = ParseError;\n\n    #[inline(always)]\n    fn from_str(_raw: &str) -> Result<Self, Self::Err> {\n        Err(ParseError)\n    }\n}\n"
  },
  {
    "path": "node/src/lib.rs",
    "content": "//! # Casper blockchain node\n//!\n//! This crate contain the core application for the Casper blockchain. Run with `--help` to see\n//! available command-line arguments.\n//!\n//! ## Application structure\n//!\n//! While the [`main`](fn.main.html) function is the central entrypoint for the node application,\n//! its core event loop is found inside the [reactor](reactor/index.html).\n\n#![doc(html_root_url = \"https://docs.rs/casper-node/2.2.0\")]\n#![doc(\n    html_favicon_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png\",\n    html_logo_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png\",\n    test(attr(deny(warnings)))\n)]\n#![warn(\n    missing_docs,\n    trivial_casts,\n    trivial_numeric_casts,\n    unused_qualifications\n)]\n#![allow(clippy::bool_comparison)]\n\npub mod cli;\npub(crate) mod components;\nmod config_migration;\nmod data_migration;\npub(crate) mod effect;\n#[cfg_attr(not(feature = \"failpoints\"), path = \"failpoints_disabled.rs\")]\npub(crate) mod failpoints;\n\npub mod logging;\npub(crate) mod protocol;\npub(crate) mod reactor;\n#[cfg(test)]\npub(crate) mod testing;\npub(crate) mod tls;\npub mod types;\npub mod utils;\n\nuse std::{\n    env,\n    sync::{atomic::AtomicUsize, Arc},\n};\n\nuse ansi_term::Color::Red;\nuse once_cell::sync::Lazy;\n#[cfg(not(test))]\nuse rand::SeedableRng;\nuse signal_hook::{consts::TERM_SIGNALS, flag};\nuse tracing::warn;\n\npub(crate) use components::{\n    binary_port::Config as BinaryPortConfig, block_accumulator::Config as BlockAccumulatorConfig,\n    block_synchronizer::Config as BlockSynchronizerConfig,\n    block_validator::Config as BlockValidatorConfig, consensus::Config as ConsensusConfig,\n    contract_runtime::Config as ContractRuntimeConfig,\n    diagnostics_port::Config as DiagnosticsPortConfig,\n    event_stream_server::Config as EventStreamServerConfig, fetcher::Config as FetcherConfig,\n    gossiper::Config as GossipConfig, network::Config as NetworkConfig,\n    rest_server::Config as RestServerConfig,\n    transaction_acceptor::Config as TransactionAcceptorConfig,\n    transaction_buffer::Config as TransactionBufferConfig,\n    upgrade_watcher::Config as UpgradeWatcherConfig,\n};\npub use components::{\n    consensus, contract_runtime,\n    storage::{self, Config as StorageConfig},\n};\npub use reactor::main_reactor::Config as MainReactorConfig;\npub(crate) use types::NodeRng;\npub use utils::WithDir;\n\n/// The maximum thread count which should be spawned by the tokio runtime.\npub const MAX_THREAD_COUNT: usize = 512;\n\nfn version_string(color: bool) -> String {\n    let mut version = env!(\"CARGO_PKG_VERSION\").to_string();\n    if let Some(git_sha) = option_env!(\"NODE_GIT_SHA\") {\n        version = format!(\"{}-{}\", version, git_sha);\n    } else {\n        warn!(\n            \"git sha env var unavailable, casper-node build version will not include git short hash\"\n        );\n    }\n\n    // Add a `@DEBUG` (or similar) tag to release string on non-release builds.\n    if env!(\"NODE_BUILD_PROFILE\") != \"release\" {\n        version += \"@\";\n        let profile = env!(\"NODE_BUILD_PROFILE\").to_uppercase();\n        version.push_str(&if color {\n            Red.paint(&profile).to_string()\n        } else {\n            profile\n        });\n    }\n\n    version\n}\n\n/// Color version string for the compiled node. Filled in at build time, output allocated at\n/// runtime.\npub(crate) static VERSION_STRING_COLOR: Lazy<String> = Lazy::new(|| version_string(true));\n\n/// Version string for the compiled node. Filled in at build time, output allocated at runtime.\npub(crate) static VERSION_STRING: Lazy<String> = Lazy::new(|| version_string(false));\n\n/// Global value that indicates the currently running reactor should exit if it is non-zero.\npub(crate) static TERMINATION_REQUESTED: Lazy<Arc<AtomicUsize>> =\n    Lazy::new(|| Arc::new(AtomicUsize::new(0)));\n\n/// Setup UNIX signal hooks for current application.\npub(crate) fn setup_signal_hooks() {\n    for signal in TERM_SIGNALS {\n        flag::register_usize(\n            *signal,\n            Arc::clone(&*TERMINATION_REQUESTED),\n            *signal as usize,\n        )\n        .unwrap_or_else(|error| panic!(\"failed to register signal {}: {}\", signal, error));\n    }\n}\n\n/// Constructs a new `NodeRng`.\n#[cfg(not(test))]\npub(crate) fn new_rng() -> NodeRng {\n    NodeRng::from_entropy()\n}\n\n/// Constructs a new `NodeRng`.\n#[cfg(test)]\npub(crate) fn new_rng() -> NodeRng {\n    NodeRng::new()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn version_string_format() {\n        let string = version_string(false);\n        let (prefix, profile) = string.split_once('@').unwrap_or((string.as_str(), \"\"));\n        let (version, sha) = prefix.split_once('-').unwrap_or((prefix, \"\"));\n\n        assert_eq!(version, env!(\"CARGO_PKG_VERSION\"));\n        assert_eq!(sha, env::var(\"NODE_GIT_SHA\").unwrap_or_default().as_str());\n        if env!(\"NODE_BUILD_PROFILE\") == \"release\" {\n            assert_eq!(profile, \"\");\n        } else {\n            assert_eq!(profile, env!(\"NODE_BUILD_PROFILE\").to_uppercase())\n        }\n    }\n\n    #[test]\n    fn version_string_color_format() {\n        let string = version_string(true);\n        let (prefix, profile) = string.split_once('@').unwrap_or((string.as_str(), \"\"));\n        let (version, sha) = prefix.split_once('-').unwrap_or((prefix, \"\"));\n\n        assert_eq!(version, env!(\"CARGO_PKG_VERSION\"));\n        assert_eq!(sha, env::var(\"NODE_GIT_SHA\").unwrap_or_default().as_str());\n        if env!(\"NODE_BUILD_PROFILE\") == \"release\" {\n            assert_eq!(profile, \"\");\n        } else {\n            assert_eq!(\n                profile,\n                Red.paint(env!(\"NODE_BUILD_PROFILE\").to_uppercase())\n                    .to_string()\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/logging.rs",
    "content": "//! Logging via the tracing crate.\n\nuse std::{env, fmt, io, string::ToString};\n\nuse ansi_term::{Color, Style};\nuse anyhow::anyhow;\nuse datasize::DataSize;\nuse once_cell::sync::OnceCell;\nuse serde::{Deserialize, Serialize};\nuse smallvec::SmallVec;\nuse tracing::{\n    field::{Field, Visit},\n    Event, Level, Subscriber,\n};\nuse tracing_subscriber::{\n    fmt::{\n        format::{self, FieldFn, Format, Json, JsonFields, Writer},\n        time::{FormatTime, SystemTime},\n        FmtContext, FormatEvent, FormatFields, FormattedFields, Layer,\n    },\n    layer::Layered,\n    registry::LookupSpan,\n    reload::{self, Handle},\n    EnvFilter, Registry,\n};\n\nconst LOG_VERBOSITY_LEVEL_ENVVAR: &str = \"RUST_LOG\";\n\nconst LOG_FIELD_MESSAGE: &str = \"message\";\nconst LOG_FIELD_TARGET: &str = \"log.target\";\nconst LOG_FIELD_MODULE: &str = \"log.module_path\";\nconst LOG_FIELD_FILE: &str = \"log.file\";\nconst LOG_FIELD_LINE: &str = \"log.line\";\n\n/// Global reload handle.\n///\n/// We use a static variable for the reload handle since our logger instance is also global.\nstatic RELOAD_HANDLE: OnceCell<ReloadHandle> = OnceCell::new();\n\n/// Logging configuration.\n#[derive(Clone, DataSize, Debug, Default, Deserialize, Serialize)]\n#[serde(deny_unknown_fields)]\npub struct LoggingConfig {\n    /// Output format for log.\n    pub format: LoggingFormat,\n\n    /// Colored output (has no effect if JSON format is enabled).\n    ///\n    /// If set, the logger will inject ANSI color codes into log messages.  This is useful if\n    /// writing out to stdout or stderr on an ANSI terminal, but not so if writing to a logfile.\n    pub color: bool,\n\n    /// Abbreviate module names (has no effect if JSON format is enabled).\n    ///\n    /// If set, human-readable formats will abbreviate module names, `foo::bar::baz::bizz` will\n    /// turn into `f:b:b:bizz`.\n    pub abbreviate_modules: bool,\n}\n\nimpl LoggingConfig {\n    /// Creates a new instance of LoggingConfig.\n    #[cfg(test)]\n    pub fn new(format: LoggingFormat, color: bool, abbreviate_modules: bool) -> Self {\n        LoggingConfig {\n            format,\n            color,\n            abbreviate_modules,\n        }\n    }\n}\n\n/// Logging output format.\n///\n/// Defaults to \"text\"\".\n#[derive(Clone, DataSize, Debug, Deserialize, Serialize, Default)]\n#[serde(rename_all = \"lowercase\")]\npub enum LoggingFormat {\n    /// Text format.\n    #[default]\n    Text,\n    /// JSON format.\n    Json,\n}\n\n/// This is used to implement tracing's `FormatEvent` so that we can customize the way tracing\n/// events are formatted.\npub struct FmtEvent {\n    /// Whether to use ANSI color formatting or not.\n    ansi_color: bool,\n    /// Whether module segments should be shortened to first letter only.\n    abbreviate_modules: bool,\n}\n\nimpl FmtEvent {\n    fn new(ansi_color: bool, abbreviate_modules: bool) -> Self {\n        FmtEvent {\n            ansi_color,\n            abbreviate_modules,\n        }\n    }\n\n    fn enable_dimmed_if_ansi(&self, writer: &mut dyn fmt::Write) -> fmt::Result {\n        if self.ansi_color {\n            write!(writer, \"{}\", Style::new().dimmed().prefix())\n        } else {\n            Ok(())\n        }\n    }\n\n    fn disable_dimmed_if_ansi(&self, writer: &mut dyn fmt::Write) -> fmt::Result {\n        if self.ansi_color {\n            write!(writer, \"{}\", Style::new().dimmed().suffix())\n        } else {\n            Ok(())\n        }\n    }\n}\n\n// Used to gather the relevant details from the fields applied by the `tracing_log::LogTracer`,\n// which is used by logging macros when dependent crates use `log` rather than `tracing`.\n#[derive(Default)]\nstruct FieldVisitor {\n    module: Option<String>,\n    file: Option<String>,\n    line: Option<u32>,\n}\n\nimpl Visit for FieldVisitor {\n    fn record_str(&mut self, field: &Field, value: &str) {\n        if field.name() == LOG_FIELD_MODULE {\n            self.module = Some(value.to_string())\n        } else if field.name() == LOG_FIELD_FILE {\n            self.file = Some(value.to_string())\n        }\n    }\n\n    fn record_u64(&mut self, field: &Field, value: u64) {\n        if field.name() == LOG_FIELD_LINE {\n            self.line = Some(value as u32)\n        }\n    }\n\n    fn record_debug(&mut self, _field: &Field, _value: &dyn fmt::Debug) {}\n}\n\nimpl<S, N> FormatEvent<S, N> for FmtEvent\nwhere\n    S: Subscriber + for<'a> LookupSpan<'a>,\n    N: for<'a> FormatFields<'a> + 'static,\n{\n    fn format_event(\n        &self,\n        ctx: &FmtContext<'_, S, N>,\n        mut writer: Writer<'_>,\n        event: &Event<'_>,\n    ) -> fmt::Result {\n        // print the date/time with dimmed style if `ansi_color` is true\n        self.enable_dimmed_if_ansi(&mut writer)?;\n        SystemTime.format_time(&mut writer)?;\n        self.disable_dimmed_if_ansi(&mut writer)?;\n\n        // print the log level\n        let meta = event.metadata();\n        if self.ansi_color {\n            let color = match *meta.level() {\n                Level::TRACE => Color::Purple,\n                Level::DEBUG => Color::Blue,\n                Level::INFO => Color::Green,\n                Level::WARN => Color::Yellow,\n                Level::ERROR => Color::Red,\n            };\n\n            write!(\n                writer,\n                \" {}{:<6}{}\",\n                color.prefix(),\n                meta.level(),\n                color.suffix()\n            )?;\n        } else {\n            write!(writer, \" {:<6}\", meta.level().to_string())?;\n        }\n\n        // print the span information as per\n        // https://github.com/tokio-rs/tracing/blob/21f28f74/tracing-subscriber/src/fmt/format/mod.rs#L667-L695\n        let mut span_seen = false;\n\n        ctx.visit_spans(|span| {\n            write!(writer, \"{}\", span.metadata().name())?;\n            span_seen = true;\n\n            let ext = span.extensions();\n            let fields = &ext\n                .get::<FormattedFields<N>>()\n                .expect(\"Unable to find FormattedFields in extensions; this is a bug\");\n            if !fields.is_empty() {\n                write!(writer, \"{{{}}}\", fields)?;\n            }\n            writer.write_char(':')\n        })?;\n\n        if span_seen {\n            writer.write_char(' ')?;\n        }\n\n        // print the module path, filename and line number with dimmed style if `ansi_color` is true\n        let mut field_visitor = FieldVisitor::default();\n        event.record(&mut field_visitor);\n        let module = {\n            let full_module_path = meta\n                .module_path()\n                .or(field_visitor.module.as_deref())\n                .unwrap_or_default();\n            if self.abbreviate_modules {\n                // Use a smallvec for going up to six levels deep.\n                let mut parts: SmallVec<[&str; 6]> = full_module_path.split(\"::\").collect();\n\n                let count = parts.len();\n                // Abbreviate all but last segment.\n                if count > 1 {\n                    for part in parts.iter_mut().take(count - 1) {\n                        assert!(part.is_ascii());\n                        *part = &part[0..1];\n                    }\n                }\n                // Use a single `:` to join the abbreviated modules to make the output even shorter.\n                parts.join(\":\")\n            } else {\n                full_module_path.to_owned()\n            }\n        };\n\n        let file = if !self.abbreviate_modules {\n            meta.file()\n                .or(field_visitor.file.as_deref())\n                .unwrap_or_default()\n                .rsplit_once('/')\n                .map(|parts| parts.1)\n                .unwrap_or_default()\n        } else {\n            \"\"\n        };\n\n        let line = meta.line().or(field_visitor.line).unwrap_or_default();\n\n        if !module.is_empty() && (!file.is_empty() || self.abbreviate_modules) {\n            self.enable_dimmed_if_ansi(&mut writer)?;\n            write!(writer, \"[{} {}:{}] \", module, file, line,)?;\n            self.disable_dimmed_if_ansi(&mut writer)?;\n        }\n\n        // print the log message and other fields\n        ctx.format_fields(writer.by_ref(), event)?;\n        writeln!(writer)\n    }\n}\n\n/// Initializes the logging system with the default parameters.\n///\n/// See `init_params` for details.\n#[cfg(test)]\npub fn init() -> anyhow::Result<()> {\n    init_with_config(&Default::default())\n}\n\n/// A handle for reloading the logger.\n#[allow(clippy::type_complexity)] // Cannot be helped, unfortunately.\npub enum ReloadHandle {\n    /// Text-logger reload handle.\n    Text(Handle<EnvFilter, Layered<Layer<Registry, FieldFn<FormatDebugFn>, FmtEvent>, Registry>>),\n    /// JSON-logger reload handle.\n    Json(Handle<EnvFilter, Layered<Layer<Registry, JsonFields, Format<Json>>, Registry>>),\n}\n\nimpl ReloadHandle {\n    /// Swaps out the [`EnvFilter`] used to filter log events.\n    fn reload_env_filter(&self, new_filter: EnvFilter) -> Result<(), reload::Error> {\n        match self {\n            ReloadHandle::Text(handle) => handle.reload(new_filter),\n            ReloadHandle::Json(handle) => handle.reload(new_filter),\n        }\n    }\n\n    /// Returns a string representation of the current [`EnvFilter`], if set.\n    fn display_log_filter(&self) -> Result<String, reload::Error> {\n        match self {\n            ReloadHandle::Text(handle) => handle.with_current(ToString::to_string),\n            ReloadHandle::Json(handle) => handle.with_current(ToString::to_string),\n        }\n    }\n}\n\n/// Swaps out the global [`EnvFilter`].\npub fn reload_global_env_filter(new_filter: EnvFilter) -> anyhow::Result<()> {\n    let handle = RELOAD_HANDLE\n        .get()\n        .ok_or_else(|| anyhow!(\"could not fetch reload handle - logger not initialized?\"))?;\n    handle.reload_env_filter(new_filter)?;\n\n    Ok(())\n}\n\n/// Returns a string representation of the current global [`EnvFilter`], if set.\npub fn display_global_env_filter() -> anyhow::Result<String> {\n    let handle = RELOAD_HANDLE\n        .get()\n        .ok_or_else(|| anyhow!(\"could not fetch reload handle - logger not initialized?\"))?;\n    let formatted = handle.display_log_filter()?;\n\n    Ok(formatted)\n}\n\n/// Type alias for the formatting function used.\npub type FormatDebugFn = fn(&mut Writer, &Field, &dyn fmt::Debug) -> fmt::Result;\n\nfn format_into_debug_writer(\n    writer: &mut Writer,\n    field: &Field,\n    value: &dyn fmt::Debug,\n) -> fmt::Result {\n    match field.name() {\n        LOG_FIELD_MESSAGE => write!(writer, \"{:?}\", value),\n        LOG_FIELD_TARGET | LOG_FIELD_MODULE | LOG_FIELD_FILE | LOG_FIELD_LINE => Ok(()),\n        _ => write!(writer, \"; {}={:?}\", field, value),\n    }\n}\n\n/// Initializes the logging system.\n///\n/// This function should only be called once during the lifetime of the application. Do not call\n/// this outside of the application or testing code, the installed logger is global.\n///\n/// See the `README.md` for hints on how to configure logging at runtime.\n// The `io::stdout as fn()...` casts are necessary, as is the `FormatDebugFn` cast.\n#[allow(trivial_casts)]\npub fn init_with_config(config: &LoggingConfig) -> anyhow::Result<()> {\n    let formatter = format::debug_fn(format_into_debug_writer as FormatDebugFn);\n\n    let filter = EnvFilter::new(\n        env::var(LOG_VERBOSITY_LEVEL_ENVVAR)\n            .as_deref()\n            .unwrap_or(\"warn,casper_node=info\"),\n    );\n\n    match config.format {\n        // Setup a new tracing-subscriber writing to `stdout` for logging.\n        LoggingFormat::Text => {\n            let builder = tracing_subscriber::fmt()\n                .with_writer(io::stdout as fn() -> io::Stdout)\n                .with_env_filter(filter)\n                .fmt_fields(formatter)\n                .event_format(FmtEvent::new(config.color, config.abbreviate_modules))\n                .with_filter_reloading();\n            let handle = ReloadHandle::Text(builder.reload_handle());\n            builder.try_init().map_err(|error| anyhow!(error))?;\n            drop(RELOAD_HANDLE.set(handle));\n            Ok(())\n        }\n\n        // JSON logging writes to `stdout` as well but uses the JSON format.\n        LoggingFormat::Json => {\n            let builder = tracing_subscriber::fmt()\n                .with_writer(io::stdout as fn() -> io::Stdout)\n                .with_env_filter(filter)\n                .json()\n                .with_filter_reloading();\n            let handle = ReloadHandle::Json(builder.reload_handle());\n            builder.try_init().map_err(|error| anyhow!(error))?;\n            drop(RELOAD_HANDLE.set(handle));\n            Ok(())\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/protocol.rs",
    "content": "//! A network message type used for communication between nodes\n\nuse std::{\n    fmt::{self, Display, Formatter},\n    sync::Arc,\n};\n\nuse derive_more::From;\nuse fmt::Debug;\nuse futures::{future::BoxFuture, FutureExt};\nuse hex_fmt::HexFmt;\nuse serde::{Deserialize, Serialize};\nuse strum::EnumDiscriminants;\n\nuse casper_types::{BlockV2, FinalitySignatureV2, Transaction};\n\nuse crate::{\n    components::{\n        consensus,\n        fetcher::{FetchItem, FetchResponse, Tag},\n        gossiper,\n        network::{EstimatorWeights, FromIncoming, GossipedAddress, MessageKind, Payload},\n    },\n    effect::{\n        incoming::{\n            ConsensusDemand, ConsensusMessageIncoming, FinalitySignatureIncoming, GossiperIncoming,\n            NetRequest, NetRequestIncoming, NetResponse, NetResponseIncoming, TrieDemand,\n            TrieRequest, TrieRequestIncoming, TrieResponse, TrieResponseIncoming,\n        },\n        AutoClosingResponder, EffectBuilder,\n    },\n    types::NodeId,\n};\n\n/// Reactor message.\n#[derive(Clone, From, Serialize, Deserialize, EnumDiscriminants)]\n#[strum_discriminants(derive(strum::EnumIter))]\npub(crate) enum Message {\n    /// Consensus component message.\n    #[from]\n    Consensus(consensus::ConsensusMessage),\n    /// Consensus component demand.\n    #[from]\n    ConsensusRequest(consensus::ConsensusRequestMessage),\n    /// Block gossiper component message.\n    #[from]\n    BlockGossiper(gossiper::Message<BlockV2>),\n    /// Deploy gossiper component message.\n    #[from]\n    TransactionGossiper(gossiper::Message<Transaction>),\n    #[from]\n    FinalitySignatureGossiper(gossiper::Message<FinalitySignatureV2>),\n    /// Address gossiper component message.\n    #[from]\n    AddressGossiper(gossiper::Message<GossipedAddress>),\n    /// Request to get an item from a peer.\n    GetRequest {\n        /// The type tag of the requested item.\n        tag: Tag,\n        /// The serialized ID of the requested item.\n        serialized_id: Vec<u8>,\n    },\n    /// Response to a `GetRequest`.\n    GetResponse {\n        /// The type tag of the contained item.\n        tag: Tag,\n        /// The serialized item.\n        serialized_item: Arc<[u8]>,\n    },\n    /// Finality signature.\n    #[from]\n    FinalitySignature(Box<FinalitySignatureV2>),\n}\n\nimpl Payload for Message {\n    #[inline]\n    fn message_kind(&self) -> MessageKind {\n        match self {\n            Message::Consensus(_) => MessageKind::Consensus,\n            Message::ConsensusRequest(_) => MessageKind::Consensus,\n            Message::BlockGossiper(_) => MessageKind::BlockGossip,\n            Message::TransactionGossiper(_) => MessageKind::TransactionGossip,\n            Message::AddressGossiper(_) => MessageKind::AddressGossip,\n            Message::GetRequest { tag, .. } | Message::GetResponse { tag, .. } => match tag {\n                Tag::Transaction | Tag::LegacyDeploy => MessageKind::TransactionTransfer,\n                Tag::Block => MessageKind::BlockTransfer,\n                Tag::BlockHeader => MessageKind::BlockTransfer,\n                Tag::TrieOrChunk => MessageKind::TrieTransfer,\n                Tag::FinalitySignature => MessageKind::Other,\n                Tag::SyncLeap => MessageKind::BlockTransfer,\n                Tag::ApprovalsHashes => MessageKind::BlockTransfer,\n                Tag::BlockExecutionResults => MessageKind::BlockTransfer,\n            },\n            Message::FinalitySignature(_) => MessageKind::Consensus,\n            Message::FinalitySignatureGossiper(_) => MessageKind::FinalitySignatureGossip,\n        }\n    }\n\n    fn is_low_priority(&self) -> bool {\n        // We only deprioritize requested trie nodes, as they are the most commonly requested item\n        // during fast sync.\n        match self {\n            Message::Consensus(_) => false,\n            Message::ConsensusRequest(_) => false,\n            Message::TransactionGossiper(_) => false,\n            Message::BlockGossiper(_) => false,\n            Message::FinalitySignatureGossiper(_) => false,\n            Message::AddressGossiper(_) => false,\n            Message::GetRequest { tag, .. } if *tag == Tag::TrieOrChunk => true,\n            Message::GetRequest { .. } => false,\n            Message::GetResponse { .. } => false,\n            Message::FinalitySignature(_) => false,\n        }\n    }\n\n    #[inline]\n    fn incoming_resource_estimate(&self, weights: &EstimatorWeights) -> u32 {\n        match self {\n            Message::Consensus(_) => weights.consensus,\n            Message::ConsensusRequest(_) => weights.consensus,\n            Message::BlockGossiper(_) => weights.block_gossip,\n            Message::TransactionGossiper(_) => weights.transaction_gossip,\n            Message::FinalitySignatureGossiper(_) => weights.finality_signature_gossip,\n            Message::AddressGossiper(_) => weights.address_gossip,\n            Message::GetRequest { tag, .. } => match tag {\n                Tag::Transaction => weights.transaction_requests,\n                Tag::LegacyDeploy => weights.legacy_deploy_requests,\n                Tag::Block => weights.block_requests,\n                Tag::BlockHeader => weights.block_header_requests,\n                Tag::TrieOrChunk => weights.trie_requests,\n                Tag::FinalitySignature => weights.finality_signature_requests,\n                Tag::SyncLeap => weights.sync_leap_requests,\n                Tag::ApprovalsHashes => weights.approvals_hashes_requests,\n                Tag::BlockExecutionResults => weights.execution_results_requests,\n            },\n            Message::GetResponse { tag, .. } => match tag {\n                Tag::Transaction => weights.transaction_responses,\n                Tag::LegacyDeploy => weights.legacy_deploy_responses,\n                Tag::Block => weights.block_responses,\n                Tag::BlockHeader => weights.block_header_responses,\n                Tag::TrieOrChunk => weights.trie_responses,\n                Tag::FinalitySignature => weights.finality_signature_responses,\n                Tag::SyncLeap => weights.sync_leap_responses,\n                Tag::ApprovalsHashes => weights.approvals_hashes_responses,\n                Tag::BlockExecutionResults => weights.execution_results_responses,\n            },\n            Message::FinalitySignature(_) => weights.finality_signature_broadcasts,\n        }\n    }\n\n    fn is_unsafe_for_syncing_peers(&self) -> bool {\n        match self {\n            Message::Consensus(_) => false,\n            Message::ConsensusRequest(_) => false,\n            Message::BlockGossiper(_) => false,\n            Message::TransactionGossiper(_) => false,\n            Message::FinalitySignatureGossiper(_) => false,\n            Message::AddressGossiper(_) => false,\n            // Trie requests can deadlock between syncing nodes.\n            Message::GetRequest { tag, .. } if *tag == Tag::TrieOrChunk => true,\n            Message::GetRequest { .. } => false,\n            Message::GetResponse { .. } => false,\n            Message::FinalitySignature(_) => false,\n        }\n    }\n}\n\nimpl Message {\n    pub(crate) fn new_get_request<T: FetchItem>(id: &T::Id) -> Result<Self, bincode::Error> {\n        Ok(Message::GetRequest {\n            tag: T::TAG,\n            serialized_id: bincode::serialize(id)?,\n        })\n    }\n\n    pub(crate) fn new_get_response<T: FetchItem>(\n        item: &FetchResponse<T, T::Id>,\n    ) -> Result<Self, bincode::Error> {\n        Ok(Message::GetResponse {\n            tag: T::TAG,\n            serialized_item: item.to_serialized()?.into(),\n        })\n    }\n\n    /// Creates a new get response from already serialized data.\n    pub(crate) fn new_get_response_from_serialized(tag: Tag, serialized_item: Arc<[u8]>) -> Self {\n        Message::GetResponse {\n            tag,\n            serialized_item,\n        }\n    }\n}\n\nimpl Debug for Message {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Message::Consensus(c) => f.debug_tuple(\"Consensus\").field(&c).finish(),\n            Message::ConsensusRequest(c) => f.debug_tuple(\"ConsensusRequest\").field(&c).finish(),\n            Message::BlockGossiper(dg) => f.debug_tuple(\"BlockGossiper\").field(&dg).finish(),\n            Message::TransactionGossiper(dg) => f.debug_tuple(\"DeployGossiper\").field(&dg).finish(),\n            Message::FinalitySignatureGossiper(sig) => f\n                .debug_tuple(\"FinalitySignatureGossiper\")\n                .field(&sig)\n                .finish(),\n            Message::AddressGossiper(ga) => f.debug_tuple(\"AddressGossiper\").field(&ga).finish(),\n            Message::GetRequest { tag, serialized_id } => f\n                .debug_struct(\"GetRequest\")\n                .field(\"tag\", tag)\n                .field(\"serialized_id\", &HexFmt(serialized_id))\n                .finish(),\n            Message::GetResponse {\n                tag,\n                serialized_item,\n            } => f\n                .debug_struct(\"GetResponse\")\n                .field(\"tag\", tag)\n                .field(\n                    \"serialized_item\",\n                    &format!(\"{} bytes\", serialized_item.len()),\n                )\n                .finish(),\n            Message::FinalitySignature(fs) => {\n                f.debug_tuple(\"FinalitySignature\").field(&fs).finish()\n            }\n        }\n    }\n}\nmod specimen_support {\n    use crate::utils::specimen::{\n        largest_get_request, largest_get_response, largest_variant, Cache, LargestSpecimen,\n        SizeEstimator,\n    };\n\n    use super::{Message, MessageDiscriminants};\n\n    impl LargestSpecimen for Message {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            largest_variant::<Self, MessageDiscriminants, _, _>(\n                estimator,\n                |variant| match variant {\n                    MessageDiscriminants::Consensus => {\n                        Message::Consensus(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    MessageDiscriminants::ConsensusRequest => Message::ConsensusRequest(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                    MessageDiscriminants::BlockGossiper => {\n                        Message::BlockGossiper(LargestSpecimen::largest_specimen(estimator, cache))\n                    }\n                    MessageDiscriminants::TransactionGossiper => Message::TransactionGossiper(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                    MessageDiscriminants::FinalitySignatureGossiper => {\n                        Message::FinalitySignatureGossiper(LargestSpecimen::largest_specimen(\n                            estimator, cache,\n                        ))\n                    }\n                    MessageDiscriminants::AddressGossiper => Message::AddressGossiper(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                    MessageDiscriminants::GetRequest => largest_get_request(estimator, cache),\n                    MessageDiscriminants::GetResponse => largest_get_response(estimator, cache),\n                    MessageDiscriminants::FinalitySignature => Message::FinalitySignature(\n                        LargestSpecimen::largest_specimen(estimator, cache),\n                    ),\n                },\n            )\n        }\n    }\n}\n\nimpl Display for Message {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            Message::Consensus(consensus) => write!(f, \"Consensus::{}\", consensus),\n            Message::ConsensusRequest(consensus) => write!(f, \"ConsensusRequest({})\", consensus),\n            Message::BlockGossiper(deploy) => write!(f, \"BlockGossiper::{}\", deploy),\n            Message::TransactionGossiper(txn) => write!(f, \"TransactionGossiper::{}\", txn),\n            Message::FinalitySignatureGossiper(sig) => {\n                write!(f, \"FinalitySignatureGossiper::{}\", sig)\n            }\n            Message::AddressGossiper(gossiped_address) => {\n                write!(f, \"AddressGossiper::({})\", gossiped_address)\n            }\n            Message::GetRequest { tag, serialized_id } => {\n                write!(f, \"GetRequest({}-{:10})\", tag, HexFmt(serialized_id))\n            }\n            Message::GetResponse {\n                tag,\n                serialized_item,\n            } => write!(f, \"GetResponse({}-{:10})\", tag, HexFmt(serialized_item)),\n            Message::FinalitySignature(fs) => {\n                write!(f, \"FinalitySignature::({})\", fs)\n            }\n        }\n    }\n}\n\nimpl<REv> FromIncoming<Message> for REv\nwhere\n    REv: From<ConsensusMessageIncoming>\n        + From<ConsensusDemand>\n        + From<GossiperIncoming<BlockV2>>\n        + From<GossiperIncoming<Transaction>>\n        + From<GossiperIncoming<FinalitySignatureV2>>\n        + From<GossiperIncoming<GossipedAddress>>\n        + From<NetRequestIncoming>\n        + From<NetResponseIncoming>\n        + From<TrieRequestIncoming>\n        + From<TrieDemand>\n        + From<TrieResponseIncoming>\n        + From<FinalitySignatureIncoming>,\n{\n    fn from_incoming(sender: NodeId, payload: Message) -> Self {\n        match payload {\n            Message::Consensus(message) => ConsensusMessageIncoming {\n                sender,\n                message: Box::new(message),\n            }\n            .into(),\n            Message::ConsensusRequest(_message) => {\n                // TODO: Remove this once from_incoming and try_demand_from_incoming are unified.\n                unreachable!(\"called from_incoming with a consensus request\")\n            }\n            Message::BlockGossiper(message) => GossiperIncoming {\n                sender,\n                message: Box::new(message),\n            }\n            .into(),\n            Message::TransactionGossiper(message) => GossiperIncoming {\n                sender,\n                message: Box::new(message),\n            }\n            .into(),\n            Message::FinalitySignatureGossiper(message) => GossiperIncoming {\n                sender,\n                message: Box::new(message),\n            }\n            .into(),\n            Message::AddressGossiper(message) => GossiperIncoming {\n                sender,\n                message: Box::new(message),\n            }\n            .into(),\n            Message::GetRequest { tag, serialized_id } => match tag {\n                Tag::Transaction => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::Transaction(serialized_id)),\n                }\n                .into(),\n                Tag::LegacyDeploy => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::LegacyDeploy(serialized_id)),\n                }\n                .into(),\n                Tag::Block => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::Block(serialized_id)),\n                }\n                .into(),\n                Tag::BlockHeader => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::BlockHeader(serialized_id)),\n                }\n                .into(),\n                Tag::TrieOrChunk => TrieRequestIncoming {\n                    sender,\n                    message: Box::new(TrieRequest(serialized_id)),\n                }\n                .into(),\n                Tag::FinalitySignature => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::FinalitySignature(serialized_id)),\n                }\n                .into(),\n                Tag::SyncLeap => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::SyncLeap(serialized_id)),\n                }\n                .into(),\n                Tag::ApprovalsHashes => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::ApprovalsHashes(serialized_id)),\n                }\n                .into(),\n                Tag::BlockExecutionResults => NetRequestIncoming {\n                    sender,\n                    message: Box::new(NetRequest::BlockExecutionResults(serialized_id)),\n                }\n                .into(),\n            },\n            Message::GetResponse {\n                tag,\n                serialized_item,\n            } => match tag {\n                Tag::Transaction => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::Transaction(serialized_item)),\n                }\n                .into(),\n                Tag::LegacyDeploy => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::LegacyDeploy(serialized_item)),\n                }\n                .into(),\n                Tag::Block => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::Block(serialized_item)),\n                }\n                .into(),\n                Tag::BlockHeader => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::BlockHeader(serialized_item)),\n                }\n                .into(),\n                Tag::TrieOrChunk => TrieResponseIncoming {\n                    sender,\n                    message: Box::new(TrieResponse(serialized_item.to_vec())),\n                }\n                .into(),\n                Tag::FinalitySignature => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::FinalitySignature(serialized_item)),\n                }\n                .into(),\n                Tag::SyncLeap => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::SyncLeap(serialized_item)),\n                }\n                .into(),\n                Tag::ApprovalsHashes => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::ApprovalsHashes(serialized_item)),\n                }\n                .into(),\n                Tag::BlockExecutionResults => NetResponseIncoming {\n                    sender,\n                    message: Box::new(NetResponse::BlockExecutionResults(serialized_item)),\n                }\n                .into(),\n            },\n            Message::FinalitySignature(message) => {\n                FinalitySignatureIncoming { sender, message }.into()\n            }\n        }\n    }\n\n    fn try_demand_from_incoming(\n        effect_builder: EffectBuilder<REv>,\n        sender: NodeId,\n        payload: Message,\n    ) -> Result<(Self, BoxFuture<'static, Option<Message>>), Message>\n    where\n        Self: Sized + Send,\n    {\n        match payload {\n            Message::GetRequest {\n                tag: Tag::TrieOrChunk,\n                serialized_id,\n            } => {\n                let (ev, fut) = effect_builder.create_request_parts(move |responder| TrieDemand {\n                    sender,\n                    request_msg: Box::new(TrieRequest(serialized_id)),\n                    auto_closing_responder: AutoClosingResponder::from_opt_responder(responder),\n                });\n\n                Ok((ev, fut.boxed()))\n            }\n            Message::ConsensusRequest(request_msg) => {\n                let (ev, fut) =\n                    effect_builder.create_request_parts(move |responder| ConsensusDemand {\n                        sender,\n                        request_msg: Box::new(request_msg),\n                        auto_closing_responder: AutoClosingResponder::from_opt_responder(responder),\n                    });\n\n                Ok((ev, fut.boxed()))\n            }\n            _ => Err(payload),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/event_queue_metrics.rs",
    "content": "use std::collections::HashMap;\n\nuse itertools::Itertools;\nuse prometheus::{self, IntGauge, Registry};\nuse tracing::{debug, error};\n\nuse crate::{\n    reactor::{EventQueueHandle, QueueKind},\n    unregister_metric,\n};\n\n/// Metrics for event queue sizes.\n#[derive(Debug)]\npub(super) struct EventQueueMetrics {\n    /// Per queue kind gauges that measure number of event in the queue.\n    event_queue_gauges: HashMap<QueueKind, IntGauge>,\n    /// Total events count.\n    event_total: IntGauge,\n    /// Instance of registry to unregister from when being dropped.\n    registry: Registry,\n}\n\nimpl EventQueueMetrics {\n    /// Initializes event queue sizes metrics.\n    pub(super) fn new<REv: 'static>(\n        registry: Registry,\n        event_queue_handle: EventQueueHandle<REv>,\n    ) -> Result<Self, prometheus::Error> {\n        let mut event_queue_gauges: HashMap<QueueKind, IntGauge> = HashMap::new();\n        for queue_kind in event_queue_handle.event_queues_counts().keys() {\n            let key = format!(\"scheduler_queue_{}_count\", queue_kind.metrics_name());\n            let queue_event_counter = IntGauge::new(\n                key,\n                format!(\n                    \"current number of events in the reactor {} queue\",\n                    queue_kind.metrics_name()\n                ),\n            )?;\n            registry.register(Box::new(queue_event_counter.clone()))?;\n            let result = event_queue_gauges.insert(*queue_kind, queue_event_counter);\n            assert!(result.is_none(), \"Map keys should not be overwritten.\");\n        }\n\n        let event_total = IntGauge::new(\n            \"scheduler_queue_total_count\",\n            \"current total number of events in all reactor queues\",\n        )?;\n        registry.register(Box::new(event_total.clone()))?;\n\n        Ok(EventQueueMetrics {\n            event_queue_gauges,\n            event_total,\n            registry,\n        })\n    }\n\n    /// Updates the event queues size metrics.\n    /// NOTE: Count may be off by one b/c of the way locking works when elements are popped.\n    /// It's fine for its purposes.\n    pub(super) fn record_event_queue_counts<REv: 'static>(\n        &self,\n        event_queue_handle: &EventQueueHandle<REv>,\n    ) {\n        let event_queue_count = event_queue_handle.event_queues_counts();\n\n        let total = event_queue_count.values().sum::<usize>() as i64;\n        self.event_total.set(total);\n\n        let event_counts: String = event_queue_count\n            .iter()\n            .sorted_by_key(|k| k.0)\n            .map(|(queue, event_count)| {\n                self.event_queue_gauges\n                    .get(queue)\n                    .map(|gauge| gauge.set(*event_count as i64))\n                    .expect(\"queue exists.\");\n                format!(\"{}={}\", queue, event_count)\n            })\n            .join(\",\");\n\n        debug!(%total, %event_counts, \"Collected new set of event queue sizes metrics.\")\n    }\n}\n\nimpl Drop for EventQueueMetrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.event_total);\n        self.event_queue_gauges\n            .iter()\n            .for_each(|(key, queue_gauge)| {\n                self.registry\n                    .unregister(Box::new(queue_gauge.clone()))\n                    .unwrap_or_else(|_| error!(\"unregistering {} failed: was not registered\", key));\n            });\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/catch_up.rs",
    "content": "use std::time::Duration;\n\nuse either::Either;\nuse tracing::{debug, info, warn};\n\nuse casper_types::{ActivationPoint, BlockHash, TimeDiff, Timestamp};\n\nuse crate::{\n    components::{\n        block_accumulator::{SyncIdentifier, SyncInstruction},\n        block_synchronizer::BlockSynchronizerProgress,\n        sync_leaper,\n        sync_leaper::{LeapActivityError, LeapState},\n        ValidatorBoundComponent,\n    },\n    effect::{requests::BlockSynchronizerRequest, EffectBuilder, EffectExt, Effects},\n    reactor::{\n        main_reactor::{MainEvent, MainReactor},\n        wrap_effects,\n    },\n    types::{NodeId, SyncLeap, SyncLeapIdentifier},\n    NodeRng,\n};\n\npub(super) enum CatchUpInstruction {\n    Do(Duration, Effects<MainEvent>),\n    CheckLater(String, Duration),\n    Fatal(String),\n    ShutdownForUpgrade,\n    CaughtUp,\n    CommitGenesis,\n    CommitUpgrade,\n}\n\nimpl MainReactor {\n    pub(super) fn catch_up_instruction(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> CatchUpInstruction {\n        // if there is instruction, return to start working on it\n        // else fall thru with the current best available id for block syncing\n        let sync_identifier = match self.catch_up_process() {\n            Either::Right(catch_up_instruction) => return catch_up_instruction,\n            Either::Left(sync_identifier) => sync_identifier,\n        };\n        debug!(\n            ?sync_identifier,\n            block_hash = %sync_identifier.block_hash(),\n            \"CatchUp: sync identifier\"\n        );\n        // we check with the block accumulator before doing sync work as it may be aware of one or\n        // more blocks that are higher than our current highest block\n        let sync_instruction = self.block_accumulator.sync_instruction(sync_identifier);\n        debug!(\n            ?sync_instruction,\n            block_hash = %sync_instruction.block_hash(),\n            \"CatchUp: sync_instruction\"\n        );\n        if let Some(catch_up_instruction) =\n            self.catch_up_sync_instruction(effect_builder, rng, sync_instruction)\n        {\n            // do necessary work to catch up\n            return catch_up_instruction;\n        }\n        // there are no catch up or shutdown instructions, so we must be caught up\n        CatchUpInstruction::CaughtUp\n    }\n\n    fn catch_up_process(&mut self) -> Either<SyncIdentifier, CatchUpInstruction> {\n        let catch_up_progress = self.block_synchronizer.historical_progress();\n        self.update_last_progress(&catch_up_progress, false);\n        match catch_up_progress {\n            BlockSynchronizerProgress::Idle => {\n                // not working on syncing a block (ready to start a new one)\n                match self.trusted_hash {\n                    Some(trusted_hash) => self.catch_up_trusted_hash(trusted_hash),\n                    None => self.catch_up_no_trusted_hash(),\n                }\n            }\n            BlockSynchronizerProgress::Syncing(block_hash, maybe_block_height, last_progress) => {\n                // working on syncing a block\n                self.catch_up_syncing(block_hash, maybe_block_height, last_progress)\n            }\n            BlockSynchronizerProgress::Executing(block_hash, _, _) => {\n                // this code path should be unreachable because we're not\n                // supposed to enqueue historical blocks for execution.\n                Either::Right(CatchUpInstruction::Fatal(format!(\n                    \"CatchUp: block synchronizer attempted to execute block: {}\",\n                    block_hash\n                )))\n            }\n            BlockSynchronizerProgress::Synced(block_hash, block_height, era_id) => Either::Left(\n                // for a synced CatchUp block -> we have header, body, global state, any execution\n                // effects, any referenced deploys, & sufficient finality (by weight) of signatures\n                SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id),\n            ),\n        }\n    }\n\n    fn catch_up_no_trusted_hash(&mut self) -> Either<SyncIdentifier, CatchUpInstruction> {\n        // no trusted hash provided, we will attempt to use local tip if available\n        match self.storage.get_highest_complete_block() {\n            Ok(Some(block)) => {\n                // this is typically a restart scenario; if a node stops and restarts\n                // quickly enough they can rejoin the network from their highest local block\n                // if too much time has passed, the node will shutdown and require a\n                // trusted block hash to be provided via the config file\n                info!(\"CatchUp: local tip detected, no trusted hash\");\n                Either::Left(SyncIdentifier::LocalTip(\n                    *block.hash(),\n                    block.height(),\n                    block.era_id(),\n                ))\n            }\n            Ok(None) => {\n                match self\n                    .storage\n                    .read_highest_switch_block_headers(1)\n                    .map(|headers| headers.first().cloned())\n                {\n                    Ok(Some(_)) => {\n                        // no trusted hash, no local block, no error, must be waiting for genesis\n                        info!(\"CatchUp: waiting to store genesis immediate switch block\");\n                        Either::Right(CatchUpInstruction::CheckLater(\n                            \"waiting for genesis immediate switch block to be stored\".to_string(),\n                            self.control_logic_default_delay.into(),\n                        ))\n                    }\n                    Ok(None) => {\n                        // no trusted hash, no local block, might be genesis\n                        self.catch_up_check_genesis()\n                    }\n                    Err(storage_err) => Either::Right(CatchUpInstruction::Fatal(format!(\n                        \"CatchUp: Could not read storage to find highest switch block header: {}\",\n                        storage_err\n                    ))),\n                }\n            }\n            Err(err) => Either::Right(CatchUpInstruction::Fatal(format!(\n                \"CatchUp: fatal block store error when attempting to read \\\n                                    highest complete block: {}\",\n                err\n            ))),\n        }\n    }\n\n    fn catch_up_check_genesis(&mut self) -> Either<SyncIdentifier, CatchUpInstruction> {\n        match self.chainspec.protocol_config.activation_point {\n            ActivationPoint::Genesis(timestamp) => {\n                // this bootstraps a network; it only occurs once ever on a given network but is\n                // very load-bearing as errors in this logic can prevent the network from coming\n                // into existence or surviving its initial existence.\n\n                let now = Timestamp::now();\n                let grace_period = timestamp.saturating_add(TimeDiff::from_seconds(180));\n                if now > grace_period {\n                    return Either::Right(CatchUpInstruction::Fatal(\n                        \"CatchUp: late for genesis; cannot proceed without trusted hash\"\n                            .to_string(),\n                    ));\n                }\n                let time_remaining = timestamp.saturating_diff(now);\n                if time_remaining > TimeDiff::default() {\n                    return Either::Right(CatchUpInstruction::CheckLater(\n                        format!(\"waiting for genesis activation at {}\", timestamp),\n                        Duration::from(time_remaining),\n                    ));\n                }\n                Either::Right(CatchUpInstruction::CommitGenesis)\n            }\n            ActivationPoint::EraId(_) => {\n                // no trusted hash, no local block, not genesis\n                Either::Right(CatchUpInstruction::Fatal(\n                    \"CatchUp: cannot proceed without trusted hash\".to_string(),\n                ))\n            }\n        }\n    }\n\n    fn catch_up_trusted_hash(\n        &mut self,\n        trusted_hash: BlockHash,\n    ) -> Either<SyncIdentifier, CatchUpInstruction> {\n        // if we have a configured trusted hash and we have the header for that block,\n        // use the higher block height of the local tip and the trusted header\n        match self.storage.read_block_header_by_hash(&trusted_hash) {\n            Ok(Some(trusted_header)) => {\n                match self.storage.get_highest_complete_block() {\n                    Ok(Some(block)) => {\n                        // leap w/ the higher of local tip or trusted hash\n                        let trusted_height = trusted_header.height();\n                        if trusted_height > block.height() {\n                            Either::Left(SyncIdentifier::BlockIdentifier(\n                                trusted_hash,\n                                trusted_height,\n                            ))\n                        } else {\n                            Either::Left(SyncIdentifier::LocalTip(\n                                *block.hash(),\n                                block.height(),\n                                block.era_id(),\n                            ))\n                        }\n                    }\n                    Ok(None) => Either::Left(SyncIdentifier::BlockHash(trusted_hash)),\n                    Err(_) => Either::Right(CatchUpInstruction::Fatal(\n                        \"CatchUp: fatal block store error when attempting to \\\n                                            read highest complete block\"\n                            .to_string(),\n                    )),\n                }\n            }\n            Ok(None) => {\n                // we do not have the header for the trusted hash. we may have local tip,\n                // but we start with the configured trusted hash in this scenario as it is\n                // necessary to allow a node to re-join if their local state is stale\n                Either::Left(SyncIdentifier::BlockHash(trusted_hash))\n            }\n            Err(err) => Either::Right(CatchUpInstruction::Fatal(format!(\n                \"CatchUp: fatal block store error when attempting to read \\\n                                    highest complete block: {}\",\n                err\n            ))),\n        }\n    }\n\n    fn catch_up_syncing(\n        &mut self,\n        block_hash: BlockHash,\n        maybe_block_height: Option<u64>,\n        last_progress: Timestamp,\n    ) -> Either<SyncIdentifier, CatchUpInstruction> {\n        // if we have not made progress on our attempt to catch up with the network, increment\n        // attempts counter and try again; the crank logic will shut the node down on the next\n        // crank if we've exceeded our reattempts\n        let idleness = Timestamp::now().saturating_diff(last_progress);\n        if idleness > self.idle_tolerance {\n            self.attempts += 1;\n            warn!(\n                %last_progress,\n                remaining_attempts = self.max_attempts.saturating_sub(self.attempts),\n                \"CatchUp: idleness detected\"\n            );\n        }\n        match maybe_block_height {\n            None => Either::Left(SyncIdentifier::BlockHash(block_hash)),\n            Some(block_height) => {\n                Either::Left(SyncIdentifier::BlockIdentifier(block_hash, block_height))\n            }\n        }\n    }\n\n    fn catch_up_sync_instruction(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        sync_instruction: SyncInstruction,\n    ) -> Option<CatchUpInstruction> {\n        match sync_instruction {\n            SyncInstruction::Leap { block_hash }\n            | SyncInstruction::LeapIntervalElapsed { block_hash } => {\n                Some(self.catch_up_leap(effect_builder, rng, block_hash))\n            }\n            SyncInstruction::BlockSync { block_hash } => {\n                Some(self.catch_up_block_sync(effect_builder, block_hash))\n            }\n            SyncInstruction::CaughtUp { .. } => self.catch_up_check_transition(),\n        }\n    }\n\n    fn catch_up_leap(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        block_hash: BlockHash,\n    ) -> CatchUpInstruction {\n        // register block builder so that control logic can tell that block is Syncing,\n        // otherwise block_synchronizer detects as Idle which can cause unnecessary churn\n        // on subsequent cranks while leaper is awaiting responses.\n        self.block_synchronizer\n            .register_block_by_hash(block_hash, true);\n        let leap_status = self.sync_leaper.leap_status();\n        info!(%block_hash, %leap_status, \"CatchUp: status\");\n        match leap_status {\n            LeapState::Idle => self.catch_up_leaper_idle(effect_builder, rng, block_hash),\n            LeapState::Awaiting { .. } => CatchUpInstruction::CheckLater(\n                \"sync leaper is awaiting response\".to_string(),\n                self.control_logic_default_delay.into(),\n            ),\n            LeapState::Received {\n                best_available,\n                from_peers,\n                ..\n            } => self.catch_up_leap_received(effect_builder, rng, *best_available, from_peers),\n            LeapState::Failed { error, .. } => {\n                self.catch_up_leap_failed(effect_builder, rng, block_hash, error)\n            }\n        }\n    }\n\n    fn catch_up_leap_failed(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        block_hash: BlockHash,\n        error: LeapActivityError,\n    ) -> CatchUpInstruction {\n        self.attempts += 1;\n        warn!(\n            %error,\n            remaining_attempts = %self.max_attempts.saturating_sub(self.attempts),\n            \"CatchUp: failed leap\",\n        );\n        self.catch_up_leaper_idle(effect_builder, rng, block_hash)\n    }\n\n    fn catch_up_leaper_idle(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        block_hash: BlockHash,\n    ) -> CatchUpInstruction {\n        // we get a random sampling of peers to ask.\n        let peers_to_ask = self.net.fully_connected_peers_random(\n            rng,\n            self.chainspec.core_config.simultaneous_peer_requests as usize,\n        );\n        if peers_to_ask.is_empty() {\n            return CatchUpInstruction::CheckLater(\n                \"no peers\".to_string(),\n                self.chainspec.core_config.minimum_block_time.into(),\n            );\n        }\n\n        // latch accumulator progress to allow sync-leap time to do work\n        self.block_accumulator.reset_last_progress();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(block_hash);\n        let effects = effect_builder.immediately().event(move |_| {\n            MainEvent::SyncLeaper(sync_leaper::Event::AttemptLeap {\n                sync_leap_identifier,\n                peers_to_ask,\n            })\n        });\n        CatchUpInstruction::Do(self.control_logic_default_delay.into(), effects)\n    }\n\n    fn catch_up_leap_received(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        sync_leap: SyncLeap,\n        from_peers: Vec<NodeId>,\n    ) -> CatchUpInstruction {\n        let block_hash = sync_leap.highest_block_hash();\n        let block_height = sync_leap.highest_block_height();\n        info!(\n            %sync_leap,\n            %block_height,\n            %block_hash,\n            \"CatchUp: leap received\"\n        );\n\n        for validator_weights in sync_leap.era_validator_weights(\n            self.validator_matrix.fault_tolerance_threshold(),\n            &self.chainspec.protocol_config,\n        ) {\n            self.validator_matrix\n                .register_era_validator_weights(validator_weights);\n        }\n\n        let mut effects = Effects::new();\n\n        effects.extend(wrap_effects(\n            MainEvent::BlockAccumulator,\n            self.block_accumulator\n                .handle_validators(effect_builder, rng),\n        ));\n\n        effects.extend(wrap_effects(\n            MainEvent::BlockSynchronizer,\n            self.block_synchronizer\n                .handle_validators(effect_builder, rng),\n        ));\n\n        self.block_synchronizer\n            .register_sync_leap(&sync_leap, from_peers, true);\n\n        CatchUpInstruction::Do(self.control_logic_default_delay.into(), effects)\n    }\n\n    fn catch_up_block_sync(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        block_hash: BlockHash,\n    ) -> CatchUpInstruction {\n        if self\n            .block_synchronizer\n            .register_block_by_hash(block_hash, true)\n        {\n            // NeedNext will self perpetuate until nothing is needed for this block\n            let mut effects = Effects::new();\n            effects.extend(effect_builder.immediately().event(|_| {\n                MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::NeedNext)\n            }));\n            CatchUpInstruction::Do(Duration::ZERO, effects)\n        } else {\n            CatchUpInstruction::CheckLater(\n                format!(\"block_synchronizer is currently working on {}\", block_hash),\n                self.control_logic_default_delay.into(),\n            )\n        }\n    }\n\n    fn catch_up_check_transition(&mut self) -> Option<CatchUpInstruction> {\n        // we may be starting back up after a shutdown for upgrade; if so we need to\n        // commit upgrade now before proceeding further\n        if self.should_commit_upgrade() {\n            return Some(CatchUpInstruction::CommitUpgrade);\n        }\n        // we may need to shutdown to go thru an upgrade\n        if self.should_shutdown_for_upgrade() {\n            Some(CatchUpInstruction::ShutdownForUpgrade)\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse tracing::error;\n\nuse casper_types::Chainspec;\n\nuse crate::{\n    logging::LoggingConfig, types::NodeConfig, BinaryPortConfig, BlockAccumulatorConfig,\n    BlockSynchronizerConfig, BlockValidatorConfig, ConsensusConfig, ContractRuntimeConfig,\n    DiagnosticsPortConfig, EventStreamServerConfig, FetcherConfig, GossipConfig, NetworkConfig,\n    RestServerConfig, StorageConfig, TransactionAcceptorConfig, TransactionBufferConfig,\n    UpgradeWatcherConfig,\n};\n\n/// Root configuration.\n#[derive(Clone, DataSize, Debug, Default, Serialize, Deserialize)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct Config {\n    /// Config values for the node.\n    pub node: NodeConfig,\n    /// Config values for logging.\n    pub logging: LoggingConfig,\n    /// Config values for consensus.\n    pub consensus: ConsensusConfig,\n    /// Config values for network.\n    pub network: NetworkConfig,\n    /// Config values for the event stream server.\n    pub event_stream_server: EventStreamServerConfig,\n    /// Config values for the REST server.\n    pub rest_server: RestServerConfig,\n    /// Config values for storage.\n    pub storage: StorageConfig,\n    /// Config values for gossip.\n    pub gossip: GossipConfig,\n    /// Config values for fetchers.\n    pub fetcher: FetcherConfig,\n    /// Config values for the contract runtime.\n    pub contract_runtime: ContractRuntimeConfig,\n    /// Config values for the transaction acceptor.\n    pub transaction_acceptor: TransactionAcceptorConfig,\n    /// Config values for the transaction buffer.\n    pub transaction_buffer: TransactionBufferConfig,\n    /// Config values for the diagnostics port.\n    pub diagnostics_port: DiagnosticsPortConfig,\n    /// Config values for the block accumulator.\n    pub block_accumulator: BlockAccumulatorConfig,\n    /// Config values for the block synchronizer.\n    pub block_synchronizer: BlockSynchronizerConfig,\n    /// Config values for the block validator.\n    pub block_validator: BlockValidatorConfig,\n    /// Config values for the upgrade watcher.\n    pub upgrade_watcher: UpgradeWatcherConfig,\n    /// Config values for the BinaryPort server.\n    pub binary_port_server: BinaryPortConfig,\n}\n\nimpl Config {\n    /// This modifies `self` so that all configured options are within the bounds set in the\n    /// provided chainspec.\n    pub(crate) fn ensure_valid(&mut self, chainspec: &Chainspec) {\n        if self.transaction_acceptor.timestamp_leeway\n            > chainspec.transaction_config.max_timestamp_leeway\n        {\n            error!(\n                configured_timestamp_leeway = %self.transaction_acceptor.timestamp_leeway,\n                max_timestamp_leeway = %chainspec.transaction_config.max_timestamp_leeway,\n                \"setting value for 'transaction_acceptor.timestamp_leeway' to maximum permitted by \\\n                chainspec 'transaction_config.max_timestamp_leeway'\",\n            );\n            self.transaction_acceptor.timestamp_leeway =\n                chainspec.transaction_config.max_timestamp_leeway;\n        }\n    }\n\n    /// Set network config.\n    #[cfg(test)]\n    pub(crate) fn with_network_config(mut self, network_config: NetworkConfig) -> Self {\n        self.network = network_config;\n        self\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/control.rs",
    "content": "use std::time::Duration;\nuse tracing::{debug, error, info, trace};\n\nuse casper_storage::data_access_layer::GenesisResult;\nuse casper_types::{BlockHash, BlockHeader, Digest, EraId, PublicKey, Timestamp};\n\nuse crate::{\n    components::{\n        binary_port,\n        block_synchronizer::{self, BlockSynchronizerProgress},\n        contract_runtime::ExecutionPreState,\n        diagnostics_port, event_stream_server, network, rest_server, upgrade_watcher,\n    },\n    effect::{announcements::ControlAnnouncement, EffectBuilder, EffectExt, Effects},\n    fatal,\n    reactor::main_reactor::{\n        catch_up::CatchUpInstruction, genesis_instruction::GenesisInstruction,\n        keep_up::KeepUpInstruction, upgrade_shutdown::UpgradeShutdownInstruction,\n        upgrading_instruction::UpgradingInstruction, utils, validate::ValidateInstruction,\n        MainEvent, MainReactor, ReactorState,\n    },\n    types::{BlockPayload, ExecutableBlock, FinalizedBlock, InternalEraReport, MetaBlockState},\n    NodeRng,\n};\n\nimpl MainReactor {\n    pub(super) fn crank(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> Effects<MainEvent> {\n        if self.attempts > self.max_attempts {\n            return fatal!(effect_builder, \"exceeded reattempt tolerance\").ignore();\n        }\n        let (delay, mut effects) = self.do_crank(effect_builder, rng);\n        effects.extend(\n            async move {\n                if !delay.is_zero() {\n                    tokio::time::sleep(delay).await;\n                }\n            }\n            .event(|_| MainEvent::ReactorCrank),\n        );\n        effects\n    }\n\n    fn do_crank(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> (Duration, Effects<MainEvent>) {\n        const INITIALIZATION_DELAY_SPEED_UP_FACTOR: u64 = 4;\n\n        match self.state {\n            ReactorState::Initialize => {\n                // We can be more greedy when cranking through the initialization process as the\n                // progress is expected to happen quickly.\n                let initialization_logic_default_delay =\n                    self.control_logic_default_delay / INITIALIZATION_DELAY_SPEED_UP_FACTOR;\n\n                match self.initialize_next_component(effect_builder) {\n                    Some(effects) => (initialization_logic_default_delay.into(), effects),\n                    None => {\n                        if self.sync_handling.is_isolated() {\n                            // If node is \"isolated\" it doesn't care about peers\n                            if let Err(msg) = self.refresh_contract_runtime() {\n                                return (\n                                    Duration::ZERO,\n                                    fatal!(effect_builder, \"{}\", msg).ignore(),\n                                );\n                            }\n                            self.state = ReactorState::KeepUp;\n                            return (Duration::ZERO, Effects::new());\n                        }\n                        if false == self.net.has_sufficient_fully_connected_peers() {\n                            info!(\"Initialize: awaiting sufficient fully-connected peers\");\n                            return (initialization_logic_default_delay.into(), Effects::new());\n                        }\n                        if let Err(msg) = self.refresh_contract_runtime() {\n                            return (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore());\n                        }\n                        info!(\"Initialize: switch to CatchUp\");\n                        self.state = ReactorState::CatchUp;\n                        (Duration::ZERO, Effects::new())\n                    }\n                }\n            }\n            ReactorState::Upgrading => match self.upgrading_instruction() {\n                UpgradingInstruction::CheckLater(msg, wait) => {\n                    debug!(\"Upgrading: {}\", msg);\n                    (wait, Effects::new())\n                }\n                UpgradingInstruction::CatchUp => {\n                    info!(\"Upgrading: switch to CatchUp\");\n                    self.state = ReactorState::CatchUp;\n                    (Duration::ZERO, Effects::new())\n                }\n            },\n            ReactorState::CatchUp => match self.catch_up_instruction(effect_builder, rng) {\n                CatchUpInstruction::Fatal(msg) => {\n                    (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore())\n                }\n                CatchUpInstruction::ShutdownForUpgrade => {\n                    info!(\"CatchUp: shutting down for upgrade\");\n                    self.switch_to_shutdown_for_upgrade();\n                    (Duration::ZERO, Effects::new())\n                }\n                CatchUpInstruction::CommitGenesis => match self.commit_genesis(effect_builder) {\n                    GenesisInstruction::Validator(duration, effects) => {\n                        info!(\"CatchUp: switch to Validate at genesis\");\n                        self.block_synchronizer.purge();\n                        self.state = ReactorState::Validate;\n                        (duration, effects)\n                    }\n                    GenesisInstruction::NonValidator(duration, effects) => {\n                        info!(\"CatchUp: non-validator committed genesis\");\n                        self.state = ReactorState::CatchUp;\n                        (duration, effects)\n                    }\n                    GenesisInstruction::Fatal(msg) => (\n                        Duration::ZERO,\n                        fatal!(effect_builder, \"failed to commit genesis: {}\", msg).ignore(),\n                    ),\n                },\n                CatchUpInstruction::CommitUpgrade => match self.commit_upgrade(effect_builder) {\n                    Ok(effects) => {\n                        info!(\"CatchUp: switch to Upgrading\");\n                        self.block_synchronizer.purge();\n                        self.state = ReactorState::Upgrading;\n                        self.last_progress = Timestamp::now();\n                        self.attempts = 0;\n                        (Duration::ZERO, effects)\n                    }\n                    Err(msg) => (\n                        Duration::ZERO,\n                        fatal!(effect_builder, \"failed to commit upgrade: {}\", msg).ignore(),\n                    ),\n                },\n                CatchUpInstruction::CheckLater(msg, wait) => {\n                    debug!(\"CatchUp: {}\", msg);\n                    (wait, Effects::new())\n                }\n                CatchUpInstruction::Do(wait, effects) => {\n                    debug!(\"CatchUp: node is processing effects\");\n                    (wait, effects)\n                }\n                CatchUpInstruction::CaughtUp => {\n                    if let Err(msg) = self.refresh_contract_runtime() {\n                        return (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore());\n                    }\n                    // shut down instead of switching to KeepUp if catch up and shutdown mode is\n                    // enabled\n                    if self.sync_handling.is_complete_block() {\n                        info!(\"CatchUp: immediate shutdown after catching up\");\n                        self.state = ReactorState::ShutdownAfterCatchingUp;\n                        (Duration::ZERO, Effects::new())\n                    } else {\n                        // purge to avoid polluting the status endpoints w/ stale state\n                        info!(\"CatchUp: switch to KeepUp\");\n                        self.block_synchronizer.purge();\n                        self.state = ReactorState::KeepUp;\n                        (Duration::ZERO, Effects::new())\n                    }\n                }\n            },\n            ReactorState::KeepUp => match self.keep_up_instruction(effect_builder, rng) {\n                KeepUpInstruction::Fatal(msg) => {\n                    (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore())\n                }\n                KeepUpInstruction::ShutdownForUpgrade => {\n                    info!(\"KeepUp: switch to ShutdownForUpgrade\");\n                    self.switch_to_shutdown_for_upgrade();\n                    (Duration::ZERO, Effects::new())\n                }\n                KeepUpInstruction::CheckLater(msg, wait) => {\n                    debug!(\"KeepUp: {}\", msg);\n                    (wait, Effects::new())\n                }\n                KeepUpInstruction::Do(wait, effects) => {\n                    debug!(\"KeepUp: node is processing effects\");\n                    (wait, effects)\n                }\n                KeepUpInstruction::CatchUp => {\n                    self.block_synchronizer.purge();\n                    self.sync_leaper.purge();\n                    info!(\"KeepUp: switch to CatchUp\");\n                    self.state = ReactorState::CatchUp;\n                    (Duration::ZERO, Effects::new())\n                }\n                KeepUpInstruction::Validate(effects) => {\n                    info!(\"KeepUp: switch to Validate\");\n                    // purge to avoid polluting the status endpoints w/ stale state\n                    self.block_synchronizer.purge();\n                    self.state = ReactorState::Validate;\n                    (Duration::ZERO, effects)\n                }\n            },\n            ReactorState::Validate => match self.validate_instruction(effect_builder, rng) {\n                ValidateInstruction::Fatal(msg) => {\n                    (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore())\n                }\n                ValidateInstruction::ShutdownForUpgrade => {\n                    info!(\"Validate: switch to ShutdownForUpgrade\");\n                    self.switch_to_shutdown_for_upgrade();\n                    (Duration::ZERO, Effects::new())\n                }\n                ValidateInstruction::CheckLater(msg, wait) => {\n                    debug!(\"Validate: {}\", msg);\n                    (wait, Effects::new())\n                }\n                ValidateInstruction::Do(wait, effects) => {\n                    trace!(\"Validate: node is processing effects\");\n                    (wait, effects)\n                }\n                ValidateInstruction::CatchUp => match self.deactivate_consensus_voting() {\n                    Ok(_) => {\n                        info!(\"Validate: switch to CatchUp\");\n                        self.state = ReactorState::CatchUp;\n                        (Duration::ZERO, Effects::new())\n                    }\n                    Err(msg) => (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore()),\n                },\n                ValidateInstruction::KeepUp => match self.deactivate_consensus_voting() {\n                    Ok(_) => {\n                        info!(\"Validate: switch to KeepUp\");\n                        self.state = ReactorState::KeepUp;\n                        (Duration::ZERO, Effects::new())\n                    }\n                    Err(msg) => (Duration::ZERO, fatal!(effect_builder, \"{}\", msg).ignore()),\n                },\n            },\n            ReactorState::ShutdownForUpgrade => {\n                match self.upgrade_shutdown_instruction(effect_builder) {\n                    UpgradeShutdownInstruction::Fatal(msg) => (\n                        Duration::ZERO,\n                        fatal!(effect_builder, \"ShutdownForUpgrade: {}\", msg).ignore(),\n                    ),\n                    UpgradeShutdownInstruction::CheckLater(msg, wait) => {\n                        debug!(\"ShutdownForUpgrade: {}\", msg);\n                        (wait, Effects::new())\n                    }\n                    UpgradeShutdownInstruction::Do(wait, effects) => {\n                        trace!(\"ShutdownForUpgrade: node is processing effects\");\n                        (wait, effects)\n                    }\n                }\n            }\n            ReactorState::ShutdownAfterCatchingUp => {\n                let effects = effect_builder.immediately().event(|()| {\n                    MainEvent::ControlAnnouncement(ControlAnnouncement::ShutdownAfterCatchingUp)\n                });\n                (Duration::ZERO, effects)\n            }\n        }\n    }\n\n    // NOTE: the order in which components are initialized is purposeful,\n    // so don't alter the order without understanding the semantics\n    fn initialize_next_component(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n    ) -> Option<Effects<MainEvent>> {\n        // open the diagnostic port first to make sure it can bind & to be responsive during init.\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.diagnostics_port,\n            MainEvent::DiagnosticsPort(diagnostics_port::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n        // init event stream to make sure it can bind & allow early client connection\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.event_stream_server,\n            MainEvent::EventStreamServer(event_stream_server::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n        // init upgrade watcher to make sure we have file access & to observe possible upgrade\n        // this should be init'd before the rest & rpc servers as the status endpoints include\n        // detected upgrade info.\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.upgrade_watcher,\n            MainEvent::UpgradeWatcher(upgrade_watcher::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n\n        // initialize transaction buffer from local storage; on a new node this is nearly a noop\n        // but on a restarting node it can be relatively time consuming (depending upon TTL and\n        // how many transactions there have been within the TTL)\n        if let Some(effects) = self\n            .transaction_buffer\n            .initialize_component(effect_builder, &self.storage)\n        {\n            return Some(effects);\n        }\n\n        // bring up networking near-to-last to avoid unnecessary premature connectivity\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.net,\n            MainEvent::Network(network::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n\n        // bring up the BlockSynchronizer after Network to start it's self-perpetuating\n        // dishonest peer announcing behavior\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.block_synchronizer,\n            MainEvent::BlockSynchronizer(block_synchronizer::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n\n        // bring up rpc and rest server last to defer complications (such as put_transaction) and\n        // for it to be able to answer to /status, which requires various other components to be\n        // initialized\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.rest_server,\n            MainEvent::RestServer(rest_server::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n\n        // bring up binary port\n        if let Some(effects) = utils::initialize_component(\n            effect_builder,\n            &mut self.binary_port,\n            MainEvent::BinaryPort(binary_port::Event::Initialize),\n        ) {\n            return Some(effects);\n        }\n\n        None\n    }\n\n    fn commit_genesis(&mut self, effect_builder: EffectBuilder<MainEvent>) -> GenesisInstruction {\n        let genesis_timestamp = match self\n            .chainspec\n            .protocol_config\n            .activation_point\n            .genesis_timestamp()\n        {\n            None => {\n                return GenesisInstruction::Fatal(\n                    \"CommitGenesis: invalid chainspec activation point\".to_string(),\n                );\n            }\n            Some(timestamp) => timestamp,\n        };\n\n        // global state starts empty and gets populated based upon chainspec artifacts\n        let post_state_hash = match self.contract_runtime.commit_genesis(\n            self.chainspec.clone().as_ref(),\n            self.chainspec_raw_bytes.clone().as_ref(),\n        ) {\n            GenesisResult::Fatal(msg) => {\n                return GenesisInstruction::Fatal(msg);\n            }\n            GenesisResult::Failure(err) => {\n                return GenesisInstruction::Fatal(format!(\"genesis error: {}\", err));\n            }\n            GenesisResult::Success {\n                post_state_hash, ..\n            } => post_state_hash,\n        };\n\n        info!(\n            %post_state_hash,\n            %genesis_timestamp,\n            network_name = %self.chainspec.network_config.name,\n            \"CommitGenesis: successful commit; initializing contract runtime\"\n        );\n\n        let genesis_block_height = 0;\n        self.initialize_contract_runtime(\n            genesis_block_height,\n            post_state_hash,\n            BlockHash::default(),\n            Digest::default(),\n        );\n\n        let era_id = EraId::default();\n\n        // as this is a genesis validator, there is no historical syncing necessary\n        // thus, the retrograde latch is immediately set\n        self.validator_matrix\n            .register_retrograde_latch(Some(era_id));\n\n        // new networks will create a switch block at genesis to\n        // surface the genesis validators. older networks did not\n        // have this behavior.\n        let genesis_switch_block = FinalizedBlock::new(\n            BlockPayload::default(),\n            Some(InternalEraReport::default()),\n            genesis_timestamp,\n            era_id,\n            genesis_block_height,\n            PublicKey::System,\n        );\n\n        // this genesis block has no transactions, and will get\n        // handed off to be stored & marked complete after\n        // sufficient finality signatures have been collected.\n        let effects = effect_builder\n            .enqueue_block_for_execution(\n                ExecutableBlock::from_finalized_block_and_transactions(\n                    genesis_switch_block,\n                    vec![],\n                ),\n                MetaBlockState::new_not_to_be_gossiped(),\n            )\n            .ignore();\n\n        if self\n            .chainspec\n            .network_config\n            .accounts_config\n            .is_genesis_validator(self.validator_matrix.public_signing_key())\n        {\n            // validators should switch over and start making blocks\n            GenesisInstruction::Validator(Duration::ZERO, effects)\n        } else {\n            // non-validators should start receiving gossip about the block at height 1 soon\n            GenesisInstruction::NonValidator(self.control_logic_default_delay.into(), effects)\n        }\n    }\n\n    fn upgrading_instruction(&self) -> UpgradingInstruction {\n        UpgradingInstruction::should_commit_upgrade(\n            self.should_commit_upgrade(),\n            self.control_logic_default_delay.into(),\n            self.last_progress,\n            self.upgrade_timeout,\n        )\n    }\n\n    fn commit_upgrade(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n    ) -> Result<Effects<MainEvent>, String> {\n        let header = match self.get_local_tip_header()? {\n            Some(header) if header.is_switch_block() => header,\n            Some(_) => {\n                return Err(\"Latest complete block is not a switch block\".to_string());\n            }\n            None => {\n                return Err(\"No complete block found in storage\".to_string());\n            }\n        };\n\n        match self.chainspec.upgrade_config_from_parts(\n            *header.state_root_hash(),\n            header.protocol_version(),\n            self.chainspec.protocol_config.activation_point.era_id(),\n            self.chainspec_raw_bytes.clone(),\n        ) {\n            Ok(cfg) => {\n                let mut effects = Effects::new();\n                let next_block_height = header.height() + 1;\n                effects.extend(\n                    effect_builder\n                        .enqueue_protocol_upgrade(\n                            cfg,\n                            next_block_height,\n                            header.block_hash(),\n                            *header.accumulated_seed(),\n                        )\n                        .ignore(),\n                );\n                Ok(effects)\n            }\n            Err(msg) => Err(msg),\n        }\n    }\n\n    pub(super) fn should_shutdown_for_upgrade(&self) -> bool {\n        let recent_switch_block_headers = match self.storage.read_highest_switch_block_headers(1) {\n            Ok(headers) => headers,\n            Err(error) => {\n                error!(\n                    \"{:?}: error getting recent switch block headers: {}\",\n                    self.state, error\n                );\n                return false;\n            }\n        };\n\n        if let Some(block_header) = recent_switch_block_headers.last() {\n            let highest_block_complete =\n                self.storage.highest_complete_block_height() == Some(block_header.height());\n            return highest_block_complete\n                && self\n                    .upgrade_watcher\n                    .should_upgrade_after(block_header.era_id());\n        }\n        false\n    }\n\n    pub(super) fn should_commit_upgrade(&self) -> bool {\n        match self.get_local_tip_header() {\n            Ok(Some(block_header)) if block_header.is_switch_block() => {\n                block_header.is_last_block_before_activation(&self.chainspec.protocol_config)\n            }\n            Ok(Some(_) | None) => false,\n            Err(msg) => {\n                error!(\"{:?}: {}\", self.state, msg);\n                false\n            }\n        }\n    }\n\n    fn refresh_contract_runtime(&mut self) -> Result<(), String> {\n        if let Some(block_header) = self.get_local_tip_header()? {\n            let block_height = block_header.height();\n            let state_root_hash = block_header.state_root_hash();\n            let block_hash = block_header.block_hash();\n            let accumulated_seed = *block_header.accumulated_seed();\n            self.initialize_contract_runtime(\n                block_height + 1,\n                *state_root_hash,\n                block_hash,\n                accumulated_seed,\n            );\n        }\n        Ok(())\n    }\n\n    fn initialize_contract_runtime(\n        &mut self,\n        next_block_height: u64,\n        pre_state_root_hash: Digest,\n        parent_hash: BlockHash,\n        parent_seed: Digest,\n    ) {\n        // a better approach might be to have an announcement for immediate switch block\n        // creation, which the contract runtime handles and sets itself into\n        // the proper state to handle the unexpected block.\n        // in the meantime, this is expedient.\n        let initial_pre_state = ExecutionPreState::new(\n            next_block_height,\n            pre_state_root_hash,\n            parent_hash,\n            parent_seed,\n        );\n        self.contract_runtime.set_initial_state(initial_pre_state);\n    }\n\n    pub(super) fn update_last_progress(\n        &mut self,\n        block_synchronizer_progress: &BlockSynchronizerProgress,\n        is_sync_back: bool,\n    ) {\n        if let BlockSynchronizerProgress::Syncing(_, _, last_progress) = block_synchronizer_progress\n        {\n            // do idleness / reattempt checking\n            let sync_progress = *last_progress;\n            if sync_progress > self.last_progress {\n                self.last_progress = sync_progress;\n                // if any progress has been made, reset attempts\n                self.attempts = 0;\n                let state = if is_sync_back {\n                    \"Historical\".to_string()\n                } else {\n                    format!(\"{}\", self.state)\n                };\n                debug!(\n                    \"{}: last_progress: {} {}\",\n                    state, self.last_progress, block_synchronizer_progress\n                );\n            }\n            if self.last_progress.elapsed() > self.idle_tolerance {\n                self.attempts += 1;\n            }\n        }\n    }\n\n    fn deactivate_consensus_voting(&mut self) -> Result<(), String> {\n        let deactivated_era_id = self.consensus.deactivate_current_era()?;\n        info!(\n            era_id = %deactivated_era_id,\n            \"{:?}: consensus deactivated\",\n            self.state\n        );\n        Ok(())\n    }\n\n    fn switch_to_shutdown_for_upgrade(&mut self) {\n        self.state = ReactorState::ShutdownForUpgrade;\n        self.switched_to_shutdown_for_upgrade = Timestamp::now();\n    }\n\n    fn get_local_tip_header(&self) -> Result<Option<BlockHeader>, String> {\n        match self\n            .storage\n            .get_highest_complete_block()\n            .map_err(|err| format!(\"Could not read highest complete block: {}\", err))?\n        {\n            Some(local_tip) => Ok(Some(local_tip.take_header())),\n            None => Ok(None),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/error.rs",
    "content": "use thiserror::Error;\n\nuse casper_execution_engine::engine_state;\nuse casper_types::{bytesrepr, crypto::ErrorExt as CryptoError};\n\nuse crate::{\n    components::{\n        binary_port::BinaryPortInitializationError,\n        contract_runtime::{self, BlockExecutionError},\n        diagnostics_port, network, storage, upgrade_watcher,\n    },\n    utils::{ListeningError, LoadError},\n};\n\n/// Error type returned by the validator reactor.\n#[derive(Debug, Error)]\npub(crate) enum Error {\n    /// `UpgradeWatcher` component error.\n    #[error(\"upgrade watcher error: {0}\")]\n    UpgradeWatcher(#[from] upgrade_watcher::Error),\n\n    /// Metrics-related error\n    #[error(\"prometheus (metrics) error: {0}\")]\n    Metrics(#[from] prometheus::Error),\n\n    /// `Network` component error.\n    #[error(\"network error: {0}\")]\n    Network(#[from] network::Error),\n\n    /// An error starting one of the HTTP servers.\n    #[error(\"http server listening error: {0}\")]\n    HttpServerListening(#[from] ListeningError),\n\n    /// `Storage` component error.\n    #[error(\"storage error: {0}\")]\n    Storage(#[from] storage::FatalStorageError),\n\n    /// `Consensus` component error.\n    #[error(\"consensus error: {0}\")]\n    Consensus(#[from] anyhow::Error),\n\n    /// `ContractRuntime` component error.\n    #[error(\"contract runtime config error: {0}\")]\n    ContractRuntime(#[from] contract_runtime::ConfigError),\n\n    /// Block execution error.\n    #[error(transparent)]\n    BlockExecution(#[from] BlockExecutionError),\n\n    /// Engine state error.\n    #[error(transparent)]\n    EngineState(#[from] engine_state::Error),\n\n    /// [`bytesrepr`] error.\n    #[error(\"bytesrepr error: {0}\")]\n    BytesRepr(bytesrepr::Error),\n\n    /// `DiagnosticsPort` component error.\n    #[error(\"diagnostics port: {0}\")]\n    DiagnosticsPort(#[from] diagnostics_port::Error),\n\n    /// Error while loading the signing key pair.\n    #[error(\"signing key pair load error: {0}\")]\n    LoadSigningKeyPair(#[from] LoadError<CryptoError>),\n\n    /// `BinaryPort` component error.\n    #[error(\"binary port: {0}\")]\n    BinaryPort(#[from] BinaryPortInitializationError),\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(err: bytesrepr::Error) -> Self {\n        Self::BytesRepr(err)\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/event.rs",
    "content": "use std::fmt::{self, Debug, Display, Formatter};\n\nuse derive_more::From;\nuse serde::Serialize;\n\nuse casper_types::{\n    system::auction::EraValidators, Block, BlockHeader, BlockV2, EraId, FinalitySignature,\n    FinalitySignatureV2, Transaction,\n};\n\nuse crate::{\n    components::{\n        binary_port, block_accumulator,\n        block_synchronizer::{self, GlobalStateSynchronizerEvent, TrieAccumulatorEvent},\n        block_validator, consensus, contract_runtime, diagnostics_port, event_stream_server,\n        fetcher, gossiper,\n        network::{self, GossipedAddress},\n        rest_server, shutdown_trigger, storage, sync_leaper, transaction_acceptor,\n        transaction_buffer, upgrade_watcher,\n    },\n    effect::{\n        announcements::{\n            BlockAccumulatorAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement,\n            ControlAnnouncement, FatalAnnouncement, FetchedNewBlockAnnouncement,\n            FetchedNewFinalitySignatureAnnouncement, GossiperAnnouncement, MetaBlockAnnouncement,\n            PeerBehaviorAnnouncement, TransactionAcceptorAnnouncement,\n            TransactionBufferAnnouncement, UnexecutedBlockAnnouncement, UpgradeWatcherAnnouncement,\n        },\n        diagnostics_port::DumpConsensusStateRequest,\n        incoming::{\n            ConsensusDemand, ConsensusMessageIncoming, FinalitySignatureIncoming, GossiperIncoming,\n            NetRequestIncoming, NetResponseIncoming, TrieDemand, TrieRequestIncoming,\n            TrieResponseIncoming,\n        },\n        requests::{\n            AcceptTransactionRequest, BeginGossipRequest, BlockAccumulatorRequest,\n            BlockSynchronizerRequest, BlockValidationRequest, ChainspecRawBytesRequest,\n            ConsensusRequest, ContractRuntimeRequest, FetcherRequest, MakeBlockExecutableRequest,\n            MarkBlockCompletedRequest, MetricsRequest, NetworkInfoRequest, NetworkRequest,\n            ReactorInfoRequest, RestRequest, SetNodeStopRequest, StorageRequest,\n            SyncGlobalStateRequest, TransactionBufferRequest, TrieAccumulatorRequest,\n            UpgradeWatcherRequest,\n        },\n    },\n    protocol::Message,\n    reactor::ReactorEvent,\n    types::{BlockExecutionResultsOrChunk, LegacyDeploy, SyncLeap, TrieOrChunk},\n};\nuse casper_storage::block_store::types::ApprovalsHashes;\n\n// Enforce an upper bound for the `MainEvent` size, which is already quite hefty.\n// 192 is six 256 bit copies, ideally we'd be below, but for now we enforce this as an upper limit.\n// 200 is where the `large_enum_variant` clippy lint draws the line as well.\nconst _MAIN_EVENT_SIZE: usize = size_of::<MainEvent>();\n//const_assert!(_MAIN_EVENT_SIZE <= 192);\n\n/// Top-level event for the reactor.\n#[derive(Debug, From, Serialize)]\n#[must_use]\npub(crate) enum MainEvent {\n    #[from]\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    FatalAnnouncement(FatalAnnouncement),\n\n    /// Check the status of the reactor, should only be raised by the reactor itself\n    ReactorCrank,\n\n    #[from]\n    UpgradeWatcher(#[serde(skip_serializing)] upgrade_watcher::Event),\n    #[from]\n    UpgradeWatcherRequest(#[serde(skip_serializing)] UpgradeWatcherRequest),\n    #[from]\n    UpgradeWatcherAnnouncement(#[serde(skip_serializing)] UpgradeWatcherAnnouncement),\n    #[from]\n    BinaryPort(#[serde(skip_serializing)] binary_port::Event),\n    #[from]\n    RestServer(#[serde(skip_serializing)] rest_server::Event),\n    #[from]\n    MetricsRequest(#[serde(skip_serializing)] MetricsRequest),\n    #[from]\n    ChainspecRawBytesRequest(#[serde(skip_serializing)] ChainspecRawBytesRequest),\n    #[from]\n    EventStreamServer(#[serde(skip_serializing)] event_stream_server::Event),\n    #[from]\n    ShutdownTrigger(shutdown_trigger::Event),\n    #[from]\n    DiagnosticsPort(diagnostics_port::Event),\n    #[from]\n    DumpConsensusStateRequest(DumpConsensusStateRequest),\n    #[from]\n    Network(network::Event<Message>),\n    #[from]\n    NetworkRequest(#[serde(skip_serializing)] NetworkRequest<Message>),\n    #[from]\n    NetworkInfoRequest(#[serde(skip_serializing)] NetworkInfoRequest),\n    #[from]\n    NetworkPeerBehaviorAnnouncement(PeerBehaviorAnnouncement),\n    #[from]\n    NetworkPeerRequestingData(NetRequestIncoming),\n    #[from]\n    NetworkPeerProvidingData(NetResponseIncoming),\n    #[from]\n    AddressGossiper(gossiper::Event<GossipedAddress>),\n    #[from]\n    AddressGossiperCrank(BeginGossipRequest<GossipedAddress>),\n    #[from]\n    AddressGossiperIncoming(GossiperIncoming<GossipedAddress>),\n    #[from]\n    AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement<GossipedAddress>),\n    #[from]\n    SyncLeaper(sync_leaper::Event),\n    #[from]\n    SyncLeapFetcher(#[serde(skip_serializing)] fetcher::Event<SyncLeap>),\n    #[from]\n    SyncLeapFetcherRequest(#[serde(skip_serializing)] FetcherRequest<SyncLeap>),\n    #[from]\n    Consensus(#[serde(skip_serializing)] consensus::Event),\n    #[from]\n    ConsensusMessageIncoming(ConsensusMessageIncoming),\n    #[from]\n    ConsensusDemand(ConsensusDemand),\n    #[from]\n    ConsensusAnnouncement(#[serde(skip_serializing)] ConsensusAnnouncement),\n    #[from]\n    BlockHeaderFetcher(#[serde(skip_serializing)] fetcher::Event<BlockHeader>),\n    #[from]\n    BlockHeaderFetcherRequest(#[serde(skip_serializing)] FetcherRequest<BlockHeader>),\n    #[from]\n    BlockValidator(#[serde(skip_serializing)] block_validator::Event),\n    #[from]\n    BlockValidatorRequest(#[serde(skip_serializing)] BlockValidationRequest),\n    #[from]\n    BlockAccumulator(#[serde(skip_serializing)] block_accumulator::Event),\n    #[from]\n    BlockAccumulatorRequest(#[serde(skip_serializing)] BlockAccumulatorRequest),\n    #[from]\n    BlockAccumulatorAnnouncement(#[serde(skip_serializing)] BlockAccumulatorAnnouncement),\n    #[from]\n    BlockSynchronizer(#[serde(skip_serializing)] block_synchronizer::Event),\n    #[from]\n    BlockSynchronizerRequest(#[serde(skip_serializing)] BlockSynchronizerRequest),\n\n    #[from]\n    ApprovalsHashesFetcher(#[serde(skip_serializing)] fetcher::Event<ApprovalsHashes>),\n    #[from]\n    ApprovalsHashesFetcherRequest(#[serde(skip_serializing)] FetcherRequest<ApprovalsHashes>),\n\n    #[from]\n    BlockGossiper(#[serde(skip_serializing)] gossiper::Event<BlockV2>),\n    #[from]\n    BlockGossiperIncoming(GossiperIncoming<BlockV2>),\n    #[from]\n    BlockGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement<BlockV2>),\n    #[from]\n    BlockFetcher(#[serde(skip_serializing)] fetcher::Event<Block>),\n    #[from]\n    BlockFetcherRequest(#[serde(skip_serializing)] FetcherRequest<Block>),\n    #[from]\n    BlockFetcherAnnouncement(#[serde(skip_serializing)] FetchedNewBlockAnnouncement),\n    #[from]\n    MakeBlockExecutableRequest(MakeBlockExecutableRequest),\n    #[from]\n    MarkBlockCompletedRequest(MarkBlockCompletedRequest),\n    #[from]\n    FinalitySignatureIncoming(FinalitySignatureIncoming),\n    #[from]\n    FinalitySignatureGossiper(#[serde(skip_serializing)] gossiper::Event<FinalitySignatureV2>),\n    #[from]\n    FinalitySignatureGossiperIncoming(GossiperIncoming<FinalitySignatureV2>),\n    #[from]\n    FinalitySignatureGossiperAnnouncement(\n        #[serde(skip_serializing)] GossiperAnnouncement<FinalitySignatureV2>,\n    ),\n    #[from]\n    FinalitySignatureFetcher(#[serde(skip_serializing)] fetcher::Event<FinalitySignature>),\n    #[from]\n    FinalitySignatureFetcherRequest(#[serde(skip_serializing)] FetcherRequest<FinalitySignature>),\n    #[from]\n    FinalitySignatureFetcherAnnouncement(\n        #[serde(skip_serializing)] FetchedNewFinalitySignatureAnnouncement,\n    ),\n    #[from]\n    TransactionAcceptor(#[serde(skip_serializing)] transaction_acceptor::Event),\n    #[from]\n    AcceptTransactionRequest(AcceptTransactionRequest),\n    #[from]\n    TransactionAcceptorAnnouncement(#[serde(skip_serializing)] TransactionAcceptorAnnouncement),\n    #[from]\n    TransactionGossiper(#[serde(skip_serializing)] gossiper::Event<Transaction>),\n    #[from]\n    TransactionGossiperIncoming(GossiperIncoming<Transaction>),\n    #[from]\n    TransactionGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement<Transaction>),\n    #[from]\n    TransactionBuffer(#[serde(skip_serializing)] transaction_buffer::Event),\n    #[from]\n    TransactionBufferAnnouncement(#[serde(skip_serializing)] TransactionBufferAnnouncement),\n    #[from]\n    LegacyDeployFetcher(#[serde(skip_serializing)] fetcher::Event<LegacyDeploy>),\n    #[from]\n    LegacyDeployFetcherRequest(#[serde(skip_serializing)] FetcherRequest<LegacyDeploy>),\n    #[from]\n    TransactionFetcher(#[serde(skip_serializing)] fetcher::Event<Transaction>),\n    #[from]\n    TransactionFetcherRequest(#[serde(skip_serializing)] FetcherRequest<Transaction>),\n    #[from]\n    TransactionBufferRequest(TransactionBufferRequest),\n    #[from]\n    ContractRuntime(contract_runtime::Event),\n    #[from]\n    ContractRuntimeRequest(ContractRuntimeRequest),\n    #[from]\n    ContractRuntimeAnnouncement(#[serde(skip_serializing)] ContractRuntimeAnnouncement),\n    #[from]\n    TrieOrChunkFetcher(#[serde(skip_serializing)] fetcher::Event<TrieOrChunk>),\n    #[from]\n    TrieOrChunkFetcherRequest(#[serde(skip_serializing)] FetcherRequest<TrieOrChunk>),\n    #[from]\n    BlockExecutionResultsOrChunkFetcher(\n        #[serde(skip_serializing)] fetcher::Event<BlockExecutionResultsOrChunk>,\n    ),\n    #[from]\n    BlockExecutionResultsOrChunkFetcherRequest(\n        #[serde(skip_serializing)] FetcherRequest<BlockExecutionResultsOrChunk>,\n    ),\n    #[from]\n    TrieRequestIncoming(TrieRequestIncoming),\n    #[from]\n    TrieDemand(TrieDemand),\n    #[from]\n    TrieResponseIncoming(TrieResponseIncoming),\n    #[from]\n    Storage(storage::Event),\n    #[from]\n    StorageRequest(StorageRequest),\n    #[from]\n    SetNodeStopRequest(SetNodeStopRequest),\n    #[from]\n    MainReactorRequest(ReactorInfoRequest),\n    #[from]\n    MetaBlockAnnouncement(MetaBlockAnnouncement),\n    #[from]\n    UnexecutedBlockAnnouncement(UnexecutedBlockAnnouncement),\n\n    // Event related to figuring out validators for blocks after upgrades.\n    GotBlockAfterUpgradeEraValidators(EraId, EraValidators, EraValidators),\n}\n\nimpl ReactorEvent for MainEvent {\n    fn is_control(&self) -> bool {\n        matches!(self, MainEvent::ControlAnnouncement(_))\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        if let Self::ControlAnnouncement(ctrl_ann) = self {\n            Some(ctrl_ann)\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    fn description(&self) -> &'static str {\n        match self {\n            MainEvent::ReactorCrank => \"ReactorCrank\",\n            MainEvent::Network(_) => \"Network\",\n            MainEvent::SyncLeaper(_) => \"SyncLeaper\",\n            MainEvent::TransactionBuffer(_) => \"TransactionBuffer\",\n            MainEvent::Storage(_) => \"Storage\",\n            MainEvent::RestServer(_) => \"RestServer\",\n            MainEvent::EventStreamServer(_) => \"EventStreamServer\",\n            MainEvent::UpgradeWatcher(_) => \"UpgradeWatcher\",\n            MainEvent::Consensus(_) => \"Consensus\",\n            MainEvent::TransactionAcceptor(_) => \"TransactionAcceptor\",\n            MainEvent::AcceptTransactionRequest(_) => \"AcceptTransactionRequest\",\n            MainEvent::LegacyDeployFetcher(_) => \"LegacyDeployFetcher\",\n            MainEvent::TransactionFetcher(_) => \"TransactionFetcher\",\n            MainEvent::TransactionGossiper(_) => \"TransactionGossiper\",\n            MainEvent::FinalitySignatureGossiper(_) => \"FinalitySignatureGossiper\",\n            MainEvent::AddressGossiper(_) => \"AddressGossiper\",\n            MainEvent::BlockValidator(_) => \"BlockValidator\",\n            MainEvent::ContractRuntimeRequest(_) => \"ContractRuntimeRequest\",\n            MainEvent::BlockHeaderFetcher(_) => \"BlockHeaderFetcher\",\n            MainEvent::TrieOrChunkFetcher(_) => \"TrieOrChunkFetcher\",\n            MainEvent::BlockExecutionResultsOrChunkFetcher(_) => {\n                \"BlockExecutionResultsOrChunkFetcher\"\n            }\n            MainEvent::FinalitySignatureFetcher(_) => \"FinalitySignatureFetcher\",\n            MainEvent::SyncLeapFetcher(_) => \"SyncLeapFetcher\",\n            MainEvent::ApprovalsHashesFetcher(_) => \"ApprovalsHashesFetcher\",\n            MainEvent::ShutdownTrigger(_) => \"ShutdownTrigger\",\n            MainEvent::DiagnosticsPort(_) => \"DiagnosticsPort\",\n            MainEvent::NetworkRequest(_) => \"NetworkRequest\",\n            MainEvent::NetworkInfoRequest(_) => \"NetworkInfoRequest\",\n            MainEvent::BlockHeaderFetcherRequest(_) => \"BlockHeaderFetcherRequest\",\n            MainEvent::TrieOrChunkFetcherRequest(_) => \"TrieOrChunkFetcherRequest\",\n            MainEvent::BlockExecutionResultsOrChunkFetcherRequest(_) => {\n                \"BlockExecutionResultsOrChunkFetcherRequest\"\n            }\n            MainEvent::LegacyDeployFetcherRequest(_) => \"LegacyDeployFetcherRequest\",\n            MainEvent::TransactionFetcherRequest(_) => \"TransactionFetcherRequest\",\n            MainEvent::FinalitySignatureFetcherRequest(_) => \"FinalitySignatureFetcherRequest\",\n            MainEvent::SyncLeapFetcherRequest(_) => \"SyncLeapFetcherRequest\",\n            MainEvent::ApprovalsHashesFetcherRequest(_) => \"ApprovalsHashesFetcherRequest\",\n            MainEvent::TransactionBufferRequest(_) => \"TransactionBufferRequest\",\n            MainEvent::BlockValidatorRequest(_) => \"BlockValidatorRequest\",\n            MainEvent::MetricsRequest(_) => \"MetricsRequest\",\n            MainEvent::ChainspecRawBytesRequest(_) => \"ChainspecRawBytesRequest\",\n            MainEvent::UpgradeWatcherRequest(_) => \"UpgradeWatcherRequest\",\n            MainEvent::StorageRequest(_) => \"StorageRequest\",\n            MainEvent::MarkBlockCompletedRequest(_) => \"MarkBlockCompletedRequest\",\n            MainEvent::DumpConsensusStateRequest(_) => \"DumpConsensusStateRequest\",\n            MainEvent::ControlAnnouncement(_) => \"ControlAnnouncement\",\n            MainEvent::FatalAnnouncement(_) => \"FatalAnnouncement\",\n            MainEvent::TransactionAcceptorAnnouncement(_) => \"TransactionAcceptorAnnouncement\",\n            MainEvent::ConsensusAnnouncement(_) => \"ConsensusAnnouncement\",\n            MainEvent::ContractRuntimeAnnouncement(_) => \"ContractRuntimeAnnouncement\",\n            MainEvent::TransactionGossiperAnnouncement(_) => \"TransactionGossiperAnnouncement\",\n            MainEvent::AddressGossiperAnnouncement(_) => \"AddressGossiperAnnouncement\",\n            MainEvent::UpgradeWatcherAnnouncement(_) => \"UpgradeWatcherAnnouncement\",\n            MainEvent::NetworkPeerBehaviorAnnouncement(_) => \"BlocklistAnnouncement\",\n            MainEvent::TransactionBufferAnnouncement(_) => \"TransactionBufferAnnouncement\",\n            MainEvent::FinalitySignatureFetcherAnnouncement(_) => {\n                \"FinalitySignatureFetcherAnnouncement\"\n            }\n            MainEvent::AddressGossiperCrank(_) => \"BeginAddressGossipRequest\",\n            MainEvent::ConsensusMessageIncoming(_) => \"ConsensusMessageIncoming\",\n            MainEvent::ConsensusDemand(_) => \"ConsensusDemand\",\n            MainEvent::TransactionGossiperIncoming(_) => \"TransactionGossiperIncoming\",\n            MainEvent::FinalitySignatureGossiperIncoming(_) => \"FinalitySignatureGossiperIncoming\",\n            MainEvent::AddressGossiperIncoming(_) => \"AddressGossiperIncoming\",\n            MainEvent::NetworkPeerRequestingData(_) => \"NetRequestIncoming\",\n            MainEvent::NetworkPeerProvidingData(_) => \"NetResponseIncoming\",\n            MainEvent::TrieRequestIncoming(_) => \"TrieRequestIncoming\",\n            MainEvent::TrieDemand(_) => \"TrieDemand\",\n            MainEvent::TrieResponseIncoming(_) => \"TrieResponseIncoming\",\n            MainEvent::FinalitySignatureIncoming(_) => \"FinalitySignatureIncoming\",\n            MainEvent::ContractRuntime(_) => \"ContractRuntime\",\n            MainEvent::FinalitySignatureGossiperAnnouncement(_) => {\n                \"FinalitySignatureGossiperAnnouncement\"\n            }\n            MainEvent::BlockAccumulator(_) => \"BlockAccumulator\",\n            MainEvent::BlockAccumulatorRequest(_) => \"BlockAccumulatorRequest\",\n            MainEvent::BlockAccumulatorAnnouncement(_) => \"BlockAccumulatorAnnouncement\",\n            MainEvent::BlockSynchronizer(_) => \"BlockSynchronizer\",\n            MainEvent::BlockSynchronizerRequest(_) => \"BlockSynchronizerRequest\",\n            MainEvent::BlockGossiper(_) => \"BlockGossiper\",\n            MainEvent::BlockGossiperIncoming(_) => \"BlockGossiperIncoming\",\n            MainEvent::BlockGossiperAnnouncement(_) => \"BlockGossiperAnnouncement\",\n            MainEvent::BlockFetcher(_) => \"BlockFetcher\",\n            MainEvent::BlockFetcherRequest(_) => \"BlockFetcherRequest\",\n            MainEvent::BlockFetcherAnnouncement(_) => \"BlockFetcherAnnouncement\",\n            MainEvent::SetNodeStopRequest(_) => \"SetNodeStopRequest\",\n            MainEvent::MainReactorRequest(_) => \"MainReactorRequest\",\n            MainEvent::MakeBlockExecutableRequest(_) => \"MakeBlockExecutableRequest\",\n            MainEvent::MetaBlockAnnouncement(_) => \"MetaBlockAnnouncement\",\n            MainEvent::UnexecutedBlockAnnouncement(_) => \"UnexecutedBlockAnnouncement\",\n            MainEvent::GotBlockAfterUpgradeEraValidators(_, _, _) => {\n                \"GotImmediateSwitchBlockEraValidators\"\n            }\n            MainEvent::BinaryPort(_) => \"BinaryPort\",\n        }\n    }\n}\n\nimpl Display for MainEvent {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            MainEvent::ReactorCrank => write!(f, \"reactor crank\"),\n            MainEvent::Storage(event) => write!(f, \"storage: {}\", event),\n            MainEvent::Network(event) => write!(f, \"network: {}\", event),\n            MainEvent::SyncLeaper(event) => write!(f, \"sync leaper: {}\", event),\n            MainEvent::TransactionBuffer(event) => write!(f, \"transaction buffer: {}\", event),\n            MainEvent::RestServer(event) => write!(f, \"rest server: {}\", event),\n            MainEvent::EventStreamServer(event) => {\n                write!(f, \"event stream server: {}\", event)\n            }\n            MainEvent::UpgradeWatcher(event) => write!(f, \"upgrade watcher: {}\", event),\n            MainEvent::Consensus(event) => write!(f, \"consensus: {}\", event),\n            MainEvent::TransactionAcceptor(event) => write!(f, \"transaction acceptor: {}\", event),\n            MainEvent::AcceptTransactionRequest(req) => write!(f, \"{}\", req),\n            MainEvent::LegacyDeployFetcher(event) => write!(f, \"legacy deploy fetcher: {}\", event),\n            MainEvent::TransactionFetcher(event) => write!(f, \"transaction fetcher: {}\", event),\n            MainEvent::TransactionGossiper(event) => write!(f, \"transaction gossiper: {}\", event),\n            MainEvent::FinalitySignatureGossiper(event) => {\n                write!(f, \"block signature gossiper: {}\", event)\n            }\n            MainEvent::AddressGossiper(event) => write!(f, \"address gossiper: {}\", event),\n            MainEvent::ContractRuntimeRequest(event) => {\n                write!(f, \"contract runtime request: {:?}\", event)\n            }\n            MainEvent::BlockValidator(event) => write!(f, \"block validator: {}\", event),\n            MainEvent::BlockHeaderFetcher(event) => {\n                write!(f, \"block header fetcher: {}\", event)\n            }\n            MainEvent::TrieOrChunkFetcher(event) => {\n                write!(f, \"trie or chunk fetcher: {}\", event)\n            }\n            MainEvent::BlockExecutionResultsOrChunkFetcher(event) => {\n                write!(f, \"block execution results or chunk fetcher: {}\", event)\n            }\n            MainEvent::FinalitySignatureFetcher(event) => {\n                write!(f, \"finality signature fetcher: {}\", event)\n            }\n            MainEvent::SyncLeapFetcher(event) => {\n                write!(f, \"sync leap fetcher: {}\", event)\n            }\n            MainEvent::ApprovalsHashesFetcher(event) => {\n                write!(f, \"approvals hashes fetcher: {}\", event)\n            }\n            MainEvent::BlockAccumulator(event) => {\n                write!(f, \"block accumulator: {}\", event)\n            }\n            MainEvent::BlockAccumulatorRequest(req) => {\n                write!(f, \"block accumulator request: {}\", req)\n            }\n            MainEvent::BlockAccumulatorAnnouncement(ann) => {\n                write!(f, \"block accumulator announcement: {}\", ann)\n            }\n            MainEvent::BlockSynchronizer(event) => {\n                write!(f, \"block synchronizer: {}\", event)\n            }\n            MainEvent::BlockSynchronizerRequest(req) => {\n                write!(f, \"block synchronizer request: {}\", req)\n            }\n            MainEvent::ShutdownTrigger(event) => write!(f, \"shutdown trigger: {}\", event),\n            MainEvent::DiagnosticsPort(event) => write!(f, \"diagnostics port: {}\", event),\n            MainEvent::NetworkRequest(req) => write!(f, \"network request: {}\", req),\n            MainEvent::NetworkInfoRequest(req) => {\n                write!(f, \"network info request: {}\", req)\n            }\n            MainEvent::ChainspecRawBytesRequest(req) => {\n                write!(f, \"chainspec loader request: {}\", req)\n            }\n            MainEvent::UpgradeWatcherRequest(req) => {\n                write!(f, \"upgrade watcher request: {}\", req)\n            }\n            MainEvent::StorageRequest(req) => write!(f, \"storage request: {}\", req),\n            MainEvent::MarkBlockCompletedRequest(req) => {\n                write!(f, \"mark block completed request: {}\", req)\n            }\n            MainEvent::BlockHeaderFetcherRequest(request) => {\n                write!(f, \"block header fetcher request: {}\", request)\n            }\n            MainEvent::TrieOrChunkFetcherRequest(request) => {\n                write!(f, \"trie or chunk fetcher request: {}\", request)\n            }\n            MainEvent::BlockExecutionResultsOrChunkFetcherRequest(request) => {\n                write!(\n                    f,\n                    \"block execution results or chunk fetcher request: {}\",\n                    request\n                )\n            }\n            MainEvent::LegacyDeployFetcherRequest(request) => {\n                write!(f, \"legacy deploy fetcher request: {}\", request)\n            }\n            MainEvent::TransactionFetcherRequest(request) => {\n                write!(f, \"transaction fetcher request: {}\", request)\n            }\n            MainEvent::FinalitySignatureFetcherRequest(request) => {\n                write!(f, \"finality signature fetcher request: {}\", request)\n            }\n            MainEvent::SyncLeapFetcherRequest(request) => {\n                write!(f, \"sync leap fetcher request: {}\", request)\n            }\n            MainEvent::ApprovalsHashesFetcherRequest(request) => {\n                write!(f, \"approvals hashes fetcher request: {}\", request)\n            }\n            MainEvent::AddressGossiperCrank(request) => {\n                write!(f, \"begin address gossip request: {}\", request)\n            }\n            MainEvent::TransactionBufferRequest(req) => {\n                write!(f, \"transaction buffer request: {}\", req)\n            }\n            MainEvent::BlockValidatorRequest(req) => {\n                write!(f, \"block validator request: {}\", req)\n            }\n            MainEvent::MetricsRequest(req) => write!(f, \"metrics request: {}\", req),\n            MainEvent::ControlAnnouncement(ctrl_ann) => write!(f, \"control: {}\", ctrl_ann),\n            MainEvent::FatalAnnouncement(fatal_ann) => write!(f, \"fatal: {}\", fatal_ann),\n            MainEvent::DumpConsensusStateRequest(req) => {\n                write!(f, \"dump consensus state: {}\", req)\n            }\n            MainEvent::TransactionAcceptorAnnouncement(ann) => {\n                write!(f, \"transaction acceptor announcement: {}\", ann)\n            }\n            MainEvent::ConsensusAnnouncement(ann) => {\n                write!(f, \"consensus announcement: {}\", ann)\n            }\n            MainEvent::ContractRuntimeAnnouncement(ann) => {\n                write!(f, \"block-executor announcement: {}\", ann)\n            }\n            MainEvent::TransactionGossiperAnnouncement(ann) => {\n                write!(f, \"transaction gossiper announcement: {}\", ann)\n            }\n            MainEvent::FinalitySignatureGossiperAnnouncement(ann) => {\n                write!(f, \"block signature gossiper announcement: {}\", ann)\n            }\n            MainEvent::AddressGossiperAnnouncement(ann) => {\n                write!(f, \"address gossiper announcement: {}\", ann)\n            }\n            MainEvent::TransactionBufferAnnouncement(ann) => {\n                write!(f, \"transaction buffer announcement: {}\", ann)\n            }\n            MainEvent::UpgradeWatcherAnnouncement(ann) => {\n                write!(f, \"chainspec loader announcement: {}\", ann)\n            }\n            MainEvent::NetworkPeerBehaviorAnnouncement(ann) => {\n                write!(f, \"blocklist announcement: {}\", ann)\n            }\n            MainEvent::FinalitySignatureFetcherAnnouncement(ann) => {\n                write!(f, \"finality signature fetcher announcement: {}\", ann)\n            }\n            MainEvent::ConsensusMessageIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::ConsensusDemand(inner) => Display::fmt(inner, f),\n            MainEvent::TransactionGossiperIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::FinalitySignatureGossiperIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::AddressGossiperIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::NetworkPeerRequestingData(inner) => Display::fmt(inner, f),\n            MainEvent::NetworkPeerProvidingData(inner) => Display::fmt(inner, f),\n            MainEvent::TrieRequestIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::TrieDemand(inner) => Display::fmt(inner, f),\n            MainEvent::TrieResponseIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::FinalitySignatureIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::ContractRuntime(inner) => Display::fmt(inner, f),\n            MainEvent::BlockGossiper(inner) => Display::fmt(inner, f),\n            MainEvent::BlockGossiperIncoming(inner) => Display::fmt(inner, f),\n            MainEvent::BlockGossiperAnnouncement(inner) => Display::fmt(inner, f),\n            MainEvent::BlockFetcher(inner) => Display::fmt(inner, f),\n            MainEvent::BlockFetcherRequest(inner) => Display::fmt(inner, f),\n            MainEvent::BlockFetcherAnnouncement(inner) => Display::fmt(inner, f),\n            MainEvent::SetNodeStopRequest(inner) => Display::fmt(inner, f),\n            MainEvent::MainReactorRequest(inner) => Display::fmt(inner, f),\n            MainEvent::MakeBlockExecutableRequest(inner) => Display::fmt(inner, f),\n            MainEvent::MetaBlockAnnouncement(inner) => Display::fmt(inner, f),\n            MainEvent::UnexecutedBlockAnnouncement(inner) => Display::fmt(inner, f),\n            MainEvent::GotBlockAfterUpgradeEraValidators(era_id, _, _) => {\n                write!(\n                    f,\n                    \"got era validators for block after an upgrade in era {}\",\n                    era_id\n                )\n            }\n            MainEvent::BinaryPort(inner) => Display::fmt(inner, f),\n        }\n    }\n}\n\nimpl From<SyncGlobalStateRequest> for MainEvent {\n    fn from(request: SyncGlobalStateRequest) -> Self {\n        MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer(\n            request.into(),\n        ))\n    }\n}\n\nimpl From<TrieAccumulatorRequest> for MainEvent {\n    fn from(request: TrieAccumulatorRequest) -> Self {\n        MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer(\n            GlobalStateSynchronizerEvent::TrieAccumulator(request.into()),\n        ))\n    }\n}\n\nimpl From<GlobalStateSynchronizerEvent> for MainEvent {\n    fn from(event: GlobalStateSynchronizerEvent) -> Self {\n        MainEvent::BlockSynchronizer(event.into())\n    }\n}\n\nimpl From<TrieAccumulatorEvent> for MainEvent {\n    fn from(event: TrieAccumulatorEvent) -> Self {\n        MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer(\n            event.into(),\n        ))\n    }\n}\n\nimpl From<RestRequest> for MainEvent {\n    fn from(request: RestRequest) -> Self {\n        MainEvent::RestServer(rest_server::Event::RestRequest(request))\n    }\n}\n\nimpl From<NetworkRequest<consensus::ConsensusMessage>> for MainEvent {\n    fn from(request: NetworkRequest<consensus::ConsensusMessage>) -> Self {\n        MainEvent::NetworkRequest(request.map_payload(Message::from))\n    }\n}\n\nimpl From<NetworkRequest<gossiper::Message<Transaction>>> for MainEvent {\n    fn from(request: NetworkRequest<gossiper::Message<Transaction>>) -> Self {\n        MainEvent::NetworkRequest(request.map_payload(Message::from))\n    }\n}\n\nimpl From<NetworkRequest<gossiper::Message<BlockV2>>> for MainEvent {\n    fn from(request: NetworkRequest<gossiper::Message<BlockV2>>) -> Self {\n        MainEvent::NetworkRequest(request.map_payload(Message::from))\n    }\n}\n\nimpl From<NetworkRequest<gossiper::Message<FinalitySignatureV2>>> for MainEvent {\n    fn from(request: NetworkRequest<gossiper::Message<FinalitySignatureV2>>) -> Self {\n        MainEvent::NetworkRequest(request.map_payload(Message::from))\n    }\n}\n\nimpl From<NetworkRequest<gossiper::Message<GossipedAddress>>> for MainEvent {\n    fn from(request: NetworkRequest<gossiper::Message<GossipedAddress>>) -> Self {\n        MainEvent::NetworkRequest(request.map_payload(Message::from))\n    }\n}\n\nimpl From<ConsensusRequest> for MainEvent {\n    fn from(request: ConsensusRequest) -> Self {\n        MainEvent::Consensus(consensus::Event::ConsensusRequest(request))\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/fetchers.rs",
    "content": "use datasize::DataSize;\nuse prometheus::Registry;\n\nuse casper_types::{Block, BlockHeader, FinalitySignature, Transaction};\n\nuse crate::{\n    components::{fetcher, fetcher::Fetcher, Component},\n    effect::{announcements::TransactionAcceptorAnnouncement, EffectBuilder, Effects},\n    reactor,\n    reactor::main_reactor::MainEvent,\n    types::{BlockExecutionResultsOrChunk, LegacyDeploy, SyncLeap, TrieOrChunk},\n    utils::Source,\n    FetcherConfig, NodeRng,\n};\nuse casper_storage::block_store::types::ApprovalsHashes;\n\n#[derive(DataSize, Debug)]\npub(super) struct Fetchers {\n    sync_leap_fetcher: Fetcher<SyncLeap>,\n    block_fetcher: Fetcher<Block>,\n    block_header_by_hash_fetcher: Fetcher<BlockHeader>,\n    approvals_hashes_fetcher: Fetcher<ApprovalsHashes>,\n    finality_signature_fetcher: Fetcher<FinalitySignature>,\n    legacy_deploy_fetcher: Fetcher<LegacyDeploy>,\n    transaction_fetcher: Fetcher<Transaction>,\n    trie_or_chunk_fetcher: Fetcher<TrieOrChunk>,\n    block_execution_results_or_chunk_fetcher: Fetcher<BlockExecutionResultsOrChunk>,\n}\n\nimpl Fetchers {\n    pub(super) fn new(\n        config: &FetcherConfig,\n        metrics_registry: &Registry,\n    ) -> Result<Self, prometheus::Error> {\n        Ok(Fetchers {\n            sync_leap_fetcher: Fetcher::new(\"sync_leap_fetcher\", config, metrics_registry)?,\n            block_header_by_hash_fetcher: Fetcher::new(\"block_header\", config, metrics_registry)?,\n            approvals_hashes_fetcher: Fetcher::new(\"approvals_hashes\", config, metrics_registry)?,\n            finality_signature_fetcher: Fetcher::new(\n                \"finality_signature_fetcher\",\n                config,\n                metrics_registry,\n            )?,\n            legacy_deploy_fetcher: Fetcher::new(\"legacy_deploy\", config, metrics_registry)?,\n            block_fetcher: Fetcher::new(\"block\", config, metrics_registry)?,\n            transaction_fetcher: Fetcher::new(\"transaction\", config, metrics_registry)?,\n            trie_or_chunk_fetcher: Fetcher::new(\"trie_or_chunk\", config, metrics_registry)?,\n            block_execution_results_or_chunk_fetcher: Fetcher::new(\n                \"block_execution_results_or_chunk_fetcher\",\n                config,\n                metrics_registry,\n            )?,\n        })\n    }\n\n    pub(super) fn dispatch_fetcher_event(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        event: MainEvent,\n    ) -> Effects<MainEvent> {\n        match event {\n            MainEvent::BlockFetcher(event) => reactor::wrap_effects(\n                MainEvent::BlockFetcher,\n                self.block_fetcher.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::BlockFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::BlockFetcher,\n                self.block_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::SyncLeapFetcher(event) => reactor::wrap_effects(\n                MainEvent::SyncLeapFetcher,\n                self.sync_leap_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::SyncLeapFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::SyncLeapFetcher,\n                self.sync_leap_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::BlockHeaderFetcher(event) => reactor::wrap_effects(\n                MainEvent::BlockHeaderFetcher,\n                self.block_header_by_hash_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::BlockHeaderFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::BlockHeaderFetcher,\n                self.block_header_by_hash_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::ApprovalsHashesFetcher(event) => reactor::wrap_effects(\n                MainEvent::ApprovalsHashesFetcher,\n                self.approvals_hashes_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::ApprovalsHashesFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::ApprovalsHashesFetcher,\n                self.approvals_hashes_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::FinalitySignatureFetcher(event) => reactor::wrap_effects(\n                MainEvent::FinalitySignatureFetcher,\n                self.finality_signature_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::FinalitySignatureFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::FinalitySignatureFetcher,\n                self.finality_signature_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::LegacyDeployFetcher(event) => reactor::wrap_effects(\n                MainEvent::LegacyDeployFetcher,\n                self.legacy_deploy_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::LegacyDeployFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::LegacyDeployFetcher,\n                self.legacy_deploy_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::TransactionFetcher(event) => reactor::wrap_effects(\n                MainEvent::TransactionFetcher,\n                self.transaction_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::TransactionFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::TransactionFetcher,\n                self.transaction_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::TrieOrChunkFetcher(event) => reactor::wrap_effects(\n                MainEvent::TrieOrChunkFetcher,\n                self.trie_or_chunk_fetcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::TrieOrChunkFetcherRequest(request) => reactor::wrap_effects(\n                MainEvent::TrieOrChunkFetcher,\n                self.trie_or_chunk_fetcher\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::BlockExecutionResultsOrChunkFetcher(event) => reactor::wrap_effects(\n                MainEvent::BlockExecutionResultsOrChunkFetcher,\n                self.block_execution_results_or_chunk_fetcher.handle_event(\n                    effect_builder,\n                    rng,\n                    event,\n                ),\n            ),\n            MainEvent::BlockExecutionResultsOrChunkFetcherRequest(request) => {\n                reactor::wrap_effects(\n                    MainEvent::BlockExecutionResultsOrChunkFetcher,\n                    self.block_execution_results_or_chunk_fetcher.handle_event(\n                        effect_builder,\n                        rng,\n                        request.into(),\n                    ),\n                )\n            }\n\n            // MISC DISPATCHING\n            MainEvent::TransactionAcceptorAnnouncement(\n                TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                    transaction,\n                    source,\n                },\n            ) if matches!(source, Source::Peer(..)) => reactor::wrap_effects(\n                MainEvent::TransactionFetcher,\n                self.transaction_fetcher.handle_event(\n                    effect_builder,\n                    rng,\n                    fetcher::Event::GotRemotely {\n                        item: Box::new((*transaction).clone()),\n                        source,\n                    },\n                ),\n            ),\n            // allow non-fetcher events to fall thru\n            _ => Effects::new(),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/genesis_instruction.rs",
    "content": "use std::time::Duration;\n\nuse crate::{effect::Effects, reactor::main_reactor::MainEvent};\n\npub(super) enum GenesisInstruction {\n    Validator(Duration, Effects<MainEvent>),\n    NonValidator(Duration, Effects<MainEvent>),\n    Fatal(String),\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/keep_up.rs",
    "content": "use std::{\n    fmt::{Display, Formatter},\n    time::Duration,\n};\n\nuse either::Either;\nuse tracing::{debug, error, info, warn};\n\nuse casper_storage::data_access_layer::EraValidatorsRequest;\nuse casper_types::{ActivationPoint, BlockHash, BlockHeader, EraId, Timestamp};\n\nuse crate::{\n    components::{\n        block_accumulator::{SyncIdentifier, SyncInstruction},\n        block_synchronizer::BlockSynchronizerProgress,\n        storage::HighestOrphanedBlockResult,\n        sync_leaper,\n        sync_leaper::{LeapActivityError, LeapState},\n    },\n    effect::{\n        requests::BlockSynchronizerRequest, EffectBuilder, EffectExt, EffectResultExt, Effects,\n    },\n    reactor::main_reactor::{MainEvent, MainReactor},\n    types::{GlobalStatesMetadata, MaxTtl, SyncLeap, SyncLeapIdentifier},\n    NodeRng,\n};\n\npub(super) enum KeepUpInstruction {\n    Validate(Effects<MainEvent>),\n    Do(Duration, Effects<MainEvent>),\n    CheckLater(String, Duration),\n    CatchUp,\n    ShutdownForUpgrade,\n    Fatal(String),\n}\n\n#[derive(Debug, Clone, Copy)]\nenum SyncBackInstruction {\n    Sync {\n        sync_hash: BlockHash,\n        sync_era: EraId,\n    },\n    Syncing,\n    TtlSynced,\n    GenesisSynced,\n    NoSync,\n}\n\nimpl Display for SyncBackInstruction {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            SyncBackInstruction::Sync { sync_hash, .. } => {\n                write!(f, \"attempt to sync {}\", sync_hash)\n            }\n            SyncBackInstruction::Syncing => write!(f, \"syncing\"),\n            SyncBackInstruction::TtlSynced => write!(f, \"ttl reached\"),\n            SyncBackInstruction::GenesisSynced => write!(f, \"genesis reached\"),\n            SyncBackInstruction::NoSync => write!(f, \"configured to not sync\"),\n        }\n    }\n}\n\nimpl MainReactor {\n    pub(super) fn keep_up_instruction(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> KeepUpInstruction {\n        if self.should_shutdown_for_upgrade() {\n            // controlled shutdown for protocol upgrade.\n            return KeepUpInstruction::ShutdownForUpgrade;\n        }\n\n        // if there is instruction, return to start working on it\n        // else fall thru with the current best available id for block syncing\n        let sync_identifier = match self.keep_up_process() {\n            Either::Right(keep_up_instruction) => return keep_up_instruction,\n            Either::Left(sync_identifier) => sync_identifier,\n        };\n        debug!(\n            ?sync_identifier,\n            \"KeepUp: sync identifier {}\",\n            sync_identifier.block_hash()\n        );\n        // we check with the block accumulator before doing sync work as it may be aware of one or\n        // more blocks that are higher than our current highest block\n        let sync_instruction = self.block_accumulator.sync_instruction(sync_identifier);\n        debug!(\n            ?sync_instruction,\n            \"KeepUp: sync_instruction {}\",\n            sync_instruction.block_hash()\n        );\n        if let Some(keep_up_instruction) =\n            self.keep_up_sync_instruction(effect_builder, sync_instruction)\n        {\n            return keep_up_instruction;\n        }\n\n        // we appear to be keeping up with the network and have some cycles to get other work done\n        // check to see if we should attempt to sync a missing historical block (if any)\n        debug!(\"KeepUp: keeping up with the network; try to sync an historical block\");\n        if let Some(keep_up_instruction) = self.sync_back_keep_up_instruction(effect_builder, rng) {\n            return keep_up_instruction;\n        }\n\n        // we are keeping up, and don't need to sync an historical block; check to see if this\n        // node should be participating in consensus this era (necessary for re-start scenarios)\n        self.keep_up_should_validate(effect_builder, rng)\n            .unwrap_or_else(|| {\n                KeepUpInstruction::CheckLater(\n                    \"node is keeping up\".to_string(),\n                    self.control_logic_default_delay.into(),\n                )\n            })\n    }\n\n    fn keep_up_should_validate(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> Option<KeepUpInstruction> {\n        if let ActivationPoint::Genesis(genesis_timestamp) =\n            self.chainspec.protocol_config.activation_point\n        {\n            // this is a non-validator node in KeepUp prior to genesis; there is no reason to\n            // check consensus in this state, and it log spams if we do, so exiting early\n            if genesis_timestamp > Timestamp::now() {\n                return None;\n            }\n        }\n\n        if self.sync_handling.is_no_sync() {\n            // node is not permitted to be a validator with no_sync behavior.\n            return None;\n        }\n\n        if self.block_synchronizer.forward_progress().is_active() {\n            debug!(\"KeepUp: still syncing a block\");\n            return None;\n        }\n\n        let queue_depth = self.contract_runtime.queue_depth();\n        if queue_depth > 0 {\n            debug!(\"KeepUp: should_validate queue_depth {}\", queue_depth);\n            return None;\n        }\n        match self.create_required_eras(effect_builder, rng) {\n            Ok(Some(effects)) => Some(KeepUpInstruction::Validate(effects)),\n            Ok(None) => None,\n            Err(msg) => Some(KeepUpInstruction::Fatal(msg)),\n        }\n    }\n\n    fn keep_up_process(&mut self) -> Either<SyncIdentifier, KeepUpInstruction> {\n        let forward_progress = self.block_synchronizer.forward_progress();\n        self.update_last_progress(&forward_progress, false);\n        match forward_progress {\n            BlockSynchronizerProgress::Idle => {\n                // not working on syncing a block (ready to start a new one)\n                self.keep_up_idle()\n            }\n            BlockSynchronizerProgress::Syncing(block_hash, block_height, _) => {\n                // working on syncing a block\n                Either::Left(self.keep_up_syncing(block_hash, block_height))\n            }\n            // waiting for execution - forward only\n            BlockSynchronizerProgress::Executing(block_hash, block_height, era_id) => {\n                Either::Left(self.keep_up_executing(block_hash, block_height, era_id))\n            }\n            BlockSynchronizerProgress::Synced(block_hash, block_height, era_id) => {\n                // for a synced forward block -> we have header, body, any referenced deploys,\n                // sufficient finality (by weight) of signatures, associated global state and\n                // execution effects.\n                Either::Left(self.keep_up_synced(block_hash, block_height, era_id))\n            }\n        }\n    }\n\n    fn keep_up_idle(&mut self) -> Either<SyncIdentifier, KeepUpInstruction> {\n        match self.storage.get_highest_complete_block() {\n            Ok(Some(block)) => Either::Left(SyncIdentifier::LocalTip(\n                *block.hash(),\n                block.height(),\n                block.era_id(),\n            )),\n            Ok(None) => {\n                // something out of the ordinary occurred; it isn't legit to be in keep up mode\n                // with no complete local blocks. go back to catch up which will either correct\n                // or handle retry / shutdown behavior.\n                error!(\"KeepUp: block synchronizer idle, local storage has no complete blocks\");\n                Either::Right(KeepUpInstruction::CatchUp)\n            }\n            Err(error) => Either::Right(KeepUpInstruction::Fatal(format!(\n                \"failed to read highest complete block: {}\",\n                error\n            ))),\n        }\n    }\n\n    fn keep_up_syncing(\n        &mut self,\n        block_hash: BlockHash,\n        block_height: Option<u64>,\n    ) -> SyncIdentifier {\n        match block_height {\n            None => SyncIdentifier::BlockHash(block_hash),\n            Some(height) => SyncIdentifier::BlockIdentifier(block_hash, height),\n        }\n    }\n\n    fn keep_up_executing(\n        &mut self,\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n    ) -> SyncIdentifier {\n        SyncIdentifier::ExecutingBlockIdentifier(block_hash, block_height, era_id)\n    }\n\n    fn keep_up_synced(\n        &mut self,\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n    ) -> SyncIdentifier {\n        debug!(\"KeepUp: synced block: {}\", block_hash);\n        // important: scrape forward synchronizer here to return it to idle status\n        self.block_synchronizer.purge_forward();\n        SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id)\n    }\n\n    fn keep_up_sync_instruction(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        sync_instruction: SyncInstruction,\n    ) -> Option<KeepUpInstruction> {\n        match sync_instruction {\n            SyncInstruction::Leap { .. } | SyncInstruction::LeapIntervalElapsed { .. } => {\n                if !self.sync_handling.is_isolated() {\n                    // the block accumulator is unsure what our block position is relative to the\n                    // network and wants to check peers for their notion of current tip.\n                    // to do this, we switch back to CatchUp which will engage the necessary\n                    // machinery to poll the network via the SyncLeap mechanic. if it turns out\n                    // we are actually at or near tip after all, we simply switch back to KeepUp\n                    // and continue onward. the accumulator is designed to periodically do this\n                    // if we've received no gossip about new blocks from peers within an interval.\n                    // this is to protect against partitioning and is not problematic behavior\n                    // when / if it occurs.\n                    Some(KeepUpInstruction::CatchUp)\n                } else {\n                    // If the node operates in isolated mode the assumption is that it might not\n                    // have any peers. So going back to CatchUp to query their\n                    // notion of tip might effectively disable nodes components to respond.\n                    // That's why - for isolated mode - we bypass this mechanism.\n                    None\n                }\n            }\n            SyncInstruction::BlockSync { block_hash } => {\n                debug!(\"KeepUp: BlockSync: {:?}\", block_hash);\n                if self\n                    .block_synchronizer\n                    .register_block_by_hash(block_hash, false)\n                {\n                    info!(%block_hash, \"KeepUp: BlockSync: registered block by hash\");\n                    Some(KeepUpInstruction::Do(\n                        Duration::ZERO,\n                        effect_builder.immediately().event(|_| {\n                            MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::NeedNext)\n                        }),\n                    ))\n                } else {\n                    // this block has already been registered and is being worked on\n                    None\n                }\n            }\n            SyncInstruction::CaughtUp { .. } => {\n                // the accumulator thinks we are at the tip of the network and we don't need\n                // to do anything for the next one yet.\n                None\n            }\n        }\n    }\n\n    fn sync_back_keep_up_instruction(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> Option<KeepUpInstruction> {\n        let sync_back_progress = self.block_synchronizer.historical_progress();\n        debug!(?sync_back_progress, \"KeepUp: historical sync back progress\");\n        self.update_last_progress(&sync_back_progress, true);\n        match self.sync_back_instruction(&sync_back_progress) {\n            Ok(Some(sbi @ sync_back_instruction)) => match sync_back_instruction {\n                SyncBackInstruction::NoSync\n                | SyncBackInstruction::GenesisSynced\n                | SyncBackInstruction::TtlSynced => {\n                    // we don't need to sync any historical blocks currently, so we clear both the\n                    // historical synchronizer and the sync back leap activity since they will not\n                    // be required anymore\n                    debug!(\"KeepUp: {}\", sbi);\n                    self.block_synchronizer.purge_historical();\n                    self.sync_leaper.purge();\n                    None\n                }\n                SyncBackInstruction::Syncing => {\n                    debug!(\"KeepUp: syncing historical; checking later\");\n                    Some(KeepUpInstruction::CheckLater(\n                        format!(\"historical {}\", SyncBackInstruction::Syncing),\n                        self.control_logic_default_delay.into(),\n                    ))\n                }\n                SyncBackInstruction::Sync {\n                    sync_hash,\n                    sync_era,\n                } => {\n                    debug!(%sync_hash, ?sync_era, validator_matrix_eras=?self.validator_matrix.eras(), \"KeepUp: historical sync back instruction\");\n                    if self.validator_matrix.has_era(&sync_era) {\n                        Some(self.sync_back_register(effect_builder, rng, sync_hash))\n                    } else {\n                        Some(self.sync_back_leap(effect_builder, rng, sync_hash))\n                    }\n                }\n            },\n            Ok(None) => None,\n            Err(msg) => Some(KeepUpInstruction::Fatal(msg)),\n        }\n    }\n\n    // Attempts to read the validators from the global states of the block after the upgrade and its\n    // parent; initiates fetching of the missing global states, if any.\n    fn try_read_validators_for_block_after_upgrade(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        global_states_metadata: GlobalStatesMetadata,\n    ) -> KeepUpInstruction {\n        // We try to read the validator sets from global states of two blocks - if either returns\n        // `RootNotFound`, we'll initiate fetching of the corresponding global state.\n        let effects = async move {\n            // Send the requests to contract runtime.\n            let before_era_validators_request =\n                EraValidatorsRequest::new(global_states_metadata.before_state_hash);\n            let before_era_validators_result = effect_builder\n                .get_era_validators_from_contract_runtime(before_era_validators_request)\n                .await;\n\n            let after_era_validators_request =\n                EraValidatorsRequest::new(global_states_metadata.after_state_hash);\n            let after_era_validators_result = effect_builder\n                .get_era_validators_from_contract_runtime(after_era_validators_request)\n                .await;\n\n            let lhs = before_era_validators_result.take_era_validators();\n            let rhs = after_era_validators_result.take_era_validators();\n\n            match (lhs, rhs) {\n                // ++ -> return era validator weights for before & after\n                (Some(before_era_validators), Some(after_era_validators)) => {\n                    Ok((before_era_validators, after_era_validators))\n                }\n                // -- => Both were absent - fetch global states for both blocks.\n                (None, None) => Err(vec![\n                    (\n                        global_states_metadata.before_hash,\n                        global_states_metadata.before_state_hash,\n                    ),\n                    (\n                        global_states_metadata.after_hash,\n                        global_states_metadata.after_state_hash,\n                    ),\n                ]),\n                // +- => The after-block's global state was missing - return the hashes.\n                (Some(_), None) => Err(vec![(\n                    global_states_metadata.after_hash,\n                    global_states_metadata.after_state_hash,\n                )]),\n                // -+ => The before-block's global state was missing - return the hashes.\n                (None, Some(_)) => Err(vec![(\n                    global_states_metadata.before_hash,\n                    global_states_metadata.before_state_hash,\n                )]),\n            }\n        }\n        .result(\n            // We got the era validators - just emit the event that will cause them to be compared,\n            // validators matrix to be updated and reactor to be cranked.\n            move |(before_era_validators, after_era_validators)| {\n                MainEvent::GotBlockAfterUpgradeEraValidators(\n                    global_states_metadata.after_era_id,\n                    before_era_validators,\n                    after_era_validators,\n                )\n            },\n            // A global state was missing - we ask the BlockSynchronizer to fetch what is needed.\n            |global_states_to_sync| {\n                MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::SyncGlobalStates(\n                    global_states_to_sync,\n                ))\n            },\n        );\n        // In either case, there are effects to be processed by the reactor.\n        KeepUpInstruction::Do(Duration::ZERO, effects)\n    }\n\n    fn sync_back_leap(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        parent_hash: BlockHash,\n    ) -> KeepUpInstruction {\n        // in this flow, we are leveraging the SyncLeap behavior to go backwards\n        // rather than forwards. as we walk backwards from tip we know the block hash\n        // of the parent of the earliest contiguous block we have locally (aka a\n        // \"parent_hash\") but we don't know what era that parent block is in and we\n        // may or may not know the validator set for that era to validate finality\n        // signatures against. we use the leaper to gain awareness of the necessary\n        // trusted ancestors to our earliest contiguous block to do necessary validation.\n        let sync_back_status = self.sync_leaper.leap_status();\n        info!(\n            \"KeepUp: historical sync back status {} {}\",\n            parent_hash, sync_back_status\n        );\n        debug!(\n            ?parent_hash,\n            ?sync_back_status,\n            \"KeepUp: historical sync back status\"\n        );\n        match sync_back_status {\n            LeapState::Idle => {\n                debug!(\"KeepUp: historical sync back idle\");\n                self.sync_back_leaper_idle(effect_builder, rng, parent_hash, Duration::ZERO)\n            }\n            LeapState::Awaiting { .. } => KeepUpInstruction::CheckLater(\n                \"KeepUp: historical sync back is awaiting response\".to_string(),\n                self.control_logic_default_delay.into(),\n            ),\n            LeapState::Received {\n                best_available,\n                from_peers: _,\n                ..\n            } => self.sync_back_leap_received(effect_builder, *best_available),\n            LeapState::Failed { error, .. } => {\n                self.sync_back_leap_failed(effect_builder, rng, parent_hash, error)\n            }\n        }\n    }\n\n    fn sync_back_leap_failed(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        parent_hash: BlockHash,\n        error: LeapActivityError,\n    ) -> KeepUpInstruction {\n        warn!(\n            %error,\n            \"KeepUp: failed historical sync back\",\n        );\n        self.sync_back_leaper_idle(\n            effect_builder,\n            rng,\n            parent_hash,\n            self.control_logic_default_delay.into(),\n        )\n    }\n\n    fn sync_back_leaper_idle(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        parent_hash: BlockHash,\n        offset: Duration,\n    ) -> KeepUpInstruction {\n        // we get a random sampling of peers to ask.\n        let peers_to_ask = self.net.fully_connected_peers_random(\n            rng,\n            self.chainspec.core_config.simultaneous_peer_requests as usize,\n        );\n        if peers_to_ask.is_empty() {\n            return KeepUpInstruction::CheckLater(\n                \"no peers\".to_string(),\n                self.control_logic_default_delay.into(),\n            );\n        }\n\n        // latch accumulator progress to allow sync-leap time to do work\n        self.block_accumulator.reset_last_progress();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_historical(parent_hash);\n\n        let effects = effect_builder.immediately().event(move |_| {\n            MainEvent::SyncLeaper(sync_leaper::Event::AttemptLeap {\n                sync_leap_identifier,\n                peers_to_ask,\n            })\n        });\n        KeepUpInstruction::Do(offset, effects)\n    }\n\n    fn sync_back_leap_received(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        sync_leap: SyncLeap,\n    ) -> KeepUpInstruction {\n        // use the leap response to update our recent switch block data (if relevant) and\n        // era validator weights. if there are other processes which are holding on discovery\n        // of relevant newly-seen era validator weights, they should naturally progress\n        // themselves via notification on the event loop.\n        let block_hash = sync_leap.highest_block_hash();\n        let block_height = sync_leap.highest_block_height();\n        info!(%sync_leap, %block_height, %block_hash, \"KeepUp: historical sync_back received\");\n\n        let era_validator_weights = sync_leap.era_validator_weights(\n            self.validator_matrix.fault_tolerance_threshold(),\n            &self.chainspec.protocol_config,\n        );\n        for evw in era_validator_weights {\n            let era_id = evw.era_id();\n            debug!(%era_id, \"KeepUp: attempt to register historical validators for era\");\n            if self.validator_matrix.register_era_validator_weights(evw) {\n                info!(\"KeepUp: got historical era {}\", era_id);\n            } else {\n                debug!(%era_id, \"KeepUp: historical era already present or is not relevant\");\n            }\n        }\n\n        if let Some(global_states_metadata) = sync_leap.global_states_for_sync_across_upgrade() {\n            self.try_read_validators_for_block_after_upgrade(effect_builder, global_states_metadata)\n        } else {\n            KeepUpInstruction::CheckLater(\n                \"historical sync back received\".to_string(),\n                Duration::ZERO,\n            )\n        }\n    }\n\n    fn sync_back_register(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        parent_hash: BlockHash,\n    ) -> KeepUpInstruction {\n        if self\n            .block_synchronizer\n            .register_block_by_hash(parent_hash, true)\n        {\n            // sync the parent_hash block; we get a random sampling of peers to ask.\n            // it is possible that we may get a random sampling that do not have the data\n            // we need, but the synchronizer should (eventually) detect that and ask for\n            // more peers via the NeedNext behavior.\n            let peers_to_ask = self.net.fully_connected_peers_random(\n                rng,\n                self.chainspec.core_config.simultaneous_peer_requests as usize,\n            );\n            debug!(\n                \"KeepUp: historical register_block_by_hash: {} peers count: {:?}\",\n                parent_hash,\n                peers_to_ask.len()\n            );\n            self.block_synchronizer\n                .register_peers(parent_hash, peers_to_ask);\n            KeepUpInstruction::Do(\n                Duration::ZERO,\n                effect_builder.immediately().event(|_| {\n                    MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::NeedNext)\n                }),\n            )\n        } else {\n            KeepUpInstruction::CheckLater(\n                format!(\"historical syncing {}\", parent_hash),\n                self.control_logic_default_delay.into(),\n            )\n        }\n    }\n\n    fn sync_back_instruction(\n        &mut self,\n        block_synchronizer_progress: &BlockSynchronizerProgress,\n    ) -> Result<Option<SyncBackInstruction>, String> {\n        match block_synchronizer_progress {\n            BlockSynchronizerProgress::Syncing(_, _, _) => {\n                debug!(\"KeepUp: still syncing historical block\");\n                return Ok(Some(SyncBackInstruction::Syncing));\n            }\n            BlockSynchronizerProgress::Executing(block_hash, height, _) => {\n                warn!(\n                    %block_hash,\n                    %height,\n                    \"Historical block synchronizer should not be waiting for the block to be executed\"\n                );\n            }\n            BlockSynchronizerProgress::Idle | BlockSynchronizerProgress::Synced(_, _, _) => {}\n        }\n        // in this flow there is no significant difference between Idle & Synced, as unlike in\n        // catchup and keepup flows there is no special activity necessary upon getting to Synced\n        // on an old block. in either case we will attempt to get the next needed block (if any).\n        // note: for a synced historical block we have header, body, global state, any execution\n        // effects, any referenced deploys, & sufficient finality (by weight) of signatures.\n        match self.storage.get_highest_orphaned_block_header() {\n            HighestOrphanedBlockResult::Orphan(highest_orphaned_block_header) => {\n                if let Some(synched) = self.synched(&highest_orphaned_block_header)? {\n                    debug!(?synched, \"synched result\");\n                    return Ok(Some(synched));\n                }\n                let (sync_hash, sync_era) =\n                    self.sync_hash_and_era(&highest_orphaned_block_header)?;\n                debug!(?sync_era, %sync_hash, \"KeepUp: historical sync target era and block hash\");\n\n                self.validator_matrix\n                    .register_retrograde_latch(Some(sync_era));\n                Ok(Some(SyncBackInstruction::Sync {\n                    sync_hash,\n                    sync_era,\n                }))\n            }\n            HighestOrphanedBlockResult::MissingHeader(height) => Err(format!(\n                \"KeepUp: storage is missing historical block header for height {}\",\n                height\n            )),\n            HighestOrphanedBlockResult::MissingHighestSequence => {\n                Err(\"KeepUp: storage is missing historical highest block sequence\".to_string())\n            }\n        }\n    }\n\n    fn synched(\n        &self,\n        highest_orphaned_block_header: &BlockHeader,\n    ) -> Result<Option<SyncBackInstruction>, String> {\n        // if we're configured to not sync, don't sync.\n        if self.sync_handling.is_no_sync() {\n            return Ok(Some(SyncBackInstruction::NoSync));\n        }\n\n        // if we've reached genesis, there's nothing left to sync.\n        if highest_orphaned_block_header.is_genesis() {\n            return Ok(Some(SyncBackInstruction::GenesisSynced));\n        }\n\n        if self.sync_handling.is_sync_to_genesis() {\n            return Ok(None);\n        }\n\n        // if sync to genesis is false, we require sync to ttl; i.e. if the TTL is 18\n        // hours we require sync back to see a contiguous / unbroken\n        // range of at least 18 hours worth of blocks. note however\n        // that we measure from the start of the active era (for consensus reasons),\n        // so this can be up to TTL + era length in practice\n\n        if let Some(highest_switch_block_header) = self\n            .storage\n            .read_highest_switch_block_headers(1)\n            .map_err(|err| err.to_string())?\n            .last()\n        {\n            debug!(\n                highest_switch_timestamp=?highest_switch_block_header.timestamp(),\n                highest_orphaned_timestamp=?highest_orphaned_block_header.timestamp(),\n                \"checking max ttl\");\n            let max_ttl: MaxTtl = self.chainspec.transaction_config.max_ttl.into();\n            if max_ttl.synced_to_ttl(\n                highest_switch_block_header.timestamp(),\n                highest_orphaned_block_header,\n            ) {\n                debug!(\"is synced to ttl\");\n                return Ok(Some(SyncBackInstruction::TtlSynced));\n            }\n        }\n\n        Ok(None)\n    }\n\n    fn sync_hash_and_era(\n        &self,\n        highest_orphaned_block_header: &BlockHeader,\n    ) -> Result<(BlockHash, EraId), String> {\n        let parent_hash = highest_orphaned_block_header.parent_hash();\n        debug!(?highest_orphaned_block_header, %parent_hash, \"KeepUp: highest orphaned historical block\");\n\n        // if we are in genesis era but do not have validators loaded for genesis era,\n        // attempt to skip to switch block of era 1 and leap from there; other validators\n        // must cite era 0 to prove trusted ancestors for era 1, which will resolve the issue\n        // when received by this node.\n        if highest_orphaned_block_header.era_id().is_genesis()\n            && !self\n                .validator_matrix\n                .has_era(&highest_orphaned_block_header.era_id())\n        {\n            match self\n                .storage\n                .get_switch_block_by_era_id(&highest_orphaned_block_header.era_id().successor())\n            {\n                Ok(Some(switch)) => {\n                    debug!(\n                        ?highest_orphaned_block_header,\n                        \"KeepUp: historical sync in genesis era attempting correction for unmatrixed genesis validators\"\n                    );\n                    return Ok((*switch.hash(), switch.era_id()));\n                }\n                Ok(None) => return Err(\n                    \"In genesis era with no genesis validators and missing next era switch block\"\n                        .to_string(),\n                ),\n                Err(err) => return Err(err.to_string()),\n            }\n        }\n\n        match self.storage.read_block_header_by_hash(parent_hash) {\n            Ok(Some(parent_block_header)) => {\n                // even if we don't have a complete block (all parts and dependencies)\n                // we may have the parent's block header; if we do we also\n                // know its era which allows us to know if we have the validator\n                // set for that era or not\n                debug!(\n                    ?parent_block_header,\n                    \"KeepUp: historical sync found parent block header in storage\"\n                );\n                Ok((\n                    parent_block_header.block_hash(),\n                    parent_block_header.era_id(),\n                ))\n            }\n            Ok(None) => {\n                debug!(%parent_hash, \"KeepUp: historical sync did not find block header in storage\");\n                let era_id = match highest_orphaned_block_header.era_id().predecessor() {\n                    None => EraId::from(0),\n                    Some(predecessor) => {\n                        // we do not have the parent header and thus don't know what era\n                        // the parent block is in (it could be the same era or the previous\n                        // era). we assume the worst case and ask for the earlier era's\n                        // proof; subtracting 1 here is safe\n                        // since the case where era id is 0 is\n                        // handled above\n                        predecessor\n                    }\n                };\n                Ok((*parent_hash, era_id))\n            }\n            Err(err) => Err(err.to_string()),\n        }\n    }\n}\n\n#[cfg(test)]\npub(crate) fn synced_to_ttl(\n    latest_switch_block_header: &BlockHeader,\n    highest_orphaned_block_header: &BlockHeader,\n    max_ttl: casper_types::TimeDiff,\n) -> Result<bool, String> {\n    Ok(highest_orphaned_block_header.height() == 0\n        || is_timestamp_at_ttl(\n            latest_switch_block_header.timestamp(),\n            highest_orphaned_block_header.timestamp(),\n            max_ttl,\n        ))\n}\n\n#[cfg(test)]\nfn is_timestamp_at_ttl(\n    latest_switch_block_timestamp: Timestamp,\n    lowest_block_timestamp: Timestamp,\n    max_ttl: casper_types::TimeDiff,\n) -> bool {\n    lowest_block_timestamp < latest_switch_block_timestamp.saturating_sub(max_ttl)\n}\n\n#[cfg(test)]\nmod tests {\n    use std::str::FromStr;\n\n    use casper_types::{testing::TestRng, TestBlockBuilder, TimeDiff, Timestamp};\n\n    use crate::reactor::main_reactor::keep_up::{is_timestamp_at_ttl, synced_to_ttl};\n\n    const TWO_DAYS_SECS: u32 = 60 * 60 * 24 * 2;\n    const MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400);\n\n    #[test]\n    fn should_be_at_ttl() {\n        let latest_switch_block_timestamp = Timestamp::from_str(\"2010-06-15 00:00:00.000\").unwrap();\n        let lowest_block_timestamp = Timestamp::from_str(\"2010-06-10 00:00:00.000\").unwrap();\n        let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS);\n        assert!(is_timestamp_at_ttl(\n            latest_switch_block_timestamp,\n            lowest_block_timestamp,\n            max_ttl\n        ));\n    }\n\n    #[test]\n    fn should_not_be_at_ttl() {\n        let latest_switch_block_timestamp = Timestamp::from_str(\"2010-06-15 00:00:00.000\").unwrap();\n        let lowest_block_timestamp = Timestamp::from_str(\"2010-06-14 00:00:00.000\").unwrap();\n        let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS);\n        assert!(!is_timestamp_at_ttl(\n            latest_switch_block_timestamp,\n            lowest_block_timestamp,\n            max_ttl\n        ));\n    }\n\n    #[test]\n    fn should_detect_ttl_at_the_boundary() {\n        let latest_switch_block_timestamp = Timestamp::from_str(\"2010-06-15 00:00:00.000\").unwrap();\n        let lowest_block_timestamp = Timestamp::from_str(\"2010-06-12 23:59:59.999\").unwrap();\n        let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS);\n        assert!(is_timestamp_at_ttl(\n            latest_switch_block_timestamp,\n            lowest_block_timestamp,\n            max_ttl\n        ));\n\n        let latest_switch_block_timestamp = Timestamp::from_str(\"2010-06-15 00:00:00.000\").unwrap();\n        let lowest_block_timestamp = Timestamp::from_str(\"2010-06-13 00:00:00.000\").unwrap();\n        let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS);\n        assert!(!is_timestamp_at_ttl(\n            latest_switch_block_timestamp,\n            lowest_block_timestamp,\n            max_ttl\n        ));\n\n        let latest_switch_block_timestamp = Timestamp::from_str(\"2010-06-15 00:00:00.000\").unwrap();\n        let lowest_block_timestamp = Timestamp::from_str(\"2010-06-13 00:00:00.001\").unwrap();\n        let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS);\n        assert!(!is_timestamp_at_ttl(\n            latest_switch_block_timestamp,\n            lowest_block_timestamp,\n            max_ttl\n        ));\n    }\n\n    #[test]\n    fn should_detect_ttl_at_genesis() {\n        let rng = &mut TestRng::new();\n\n        let latest_switch_block = TestBlockBuilder::new()\n            .era(100)\n            .height(1000)\n            .switch_block(true)\n            .build_versioned(rng);\n\n        let latest_orphaned_block = TestBlockBuilder::new()\n            .era(0)\n            .height(0)\n            .switch_block(true)\n            .build_versioned(rng);\n\n        assert_eq!(latest_orphaned_block.height(), 0);\n        assert_eq!(\n            synced_to_ttl(\n                &latest_switch_block.clone_header(),\n                &latest_orphaned_block.clone_header(),\n                MAX_TTL\n            ),\n            Ok(true)\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/memory_metrics.rs",
    "content": "use datasize::DataSize;\nuse prometheus::{self, Histogram, HistogramOpts, IntGauge, Registry};\nuse tracing::debug;\n\nuse super::MainReactor;\nuse crate::unregister_metric;\n\n/// Metrics for estimated heap memory usage for the main reactor.\n#[derive(Debug)]\npub(super) struct MemoryMetrics {\n    mem_total: IntGauge,\n    mem_metrics: IntGauge,\n    mem_net: IntGauge,\n    mem_address_gossiper: IntGauge,\n    mem_storage: IntGauge,\n    mem_contract_runtime: IntGauge,\n    mem_rpc_server: IntGauge,\n    mem_rest_server: IntGauge,\n    mem_event_stream_server: IntGauge,\n    mem_consensus: IntGauge,\n    mem_transaction_gossiper: IntGauge,\n    mem_finality_signature_gossiper: IntGauge,\n    mem_block_gossiper: IntGauge,\n    mem_transaction_buffer: IntGauge,\n    mem_block_validator: IntGauge,\n    mem_sync_leaper: IntGauge,\n    mem_transaction_acceptor: IntGauge,\n    mem_block_synchronizer: IntGauge,\n    mem_block_accumulator: IntGauge,\n    mem_fetchers: IntGauge,\n    mem_diagnostics_port: IntGauge,\n    mem_upgrade_watcher: IntGauge,\n    mem_binary_port: IntGauge,\n    /// Histogram detailing how long it took to measure memory usage.\n    mem_estimator_runtime_s: Histogram,\n    registry: Registry,\n}\n\nimpl MemoryMetrics {\n    /// Initializes a new set of memory metrics.\n    pub(super) fn new(registry: Registry) -> Result<Self, prometheus::Error> {\n        let mem_total = IntGauge::new(\"mem_total\", \"total memory usage in bytes\")?;\n        let mem_metrics = IntGauge::new(\"mem_metrics\", \"metrics memory usage in bytes\")?;\n        let mem_net = IntGauge::new(\"mem_net\", \"network memory usage in bytes\")?;\n        let mem_address_gossiper = IntGauge::new(\n            \"mem_address_gossiper\",\n            \"address_gossiper memory usage in bytes\",\n        )?;\n        let mem_storage = IntGauge::new(\"mem_storage\", \"storage memory usage in bytes\")?;\n        let mem_contract_runtime = IntGauge::new(\n            \"mem_contract_runtime\",\n            \"contract runtime memory usage in bytes\",\n        )?;\n        let mem_rpc_server = IntGauge::new(\"mem_rpc_server\", \"rpc server memory usage in bytes\")?;\n        let mem_rest_server =\n            IntGauge::new(\"mem_rest_server\", \"rest server memory usage in bytes\")?;\n        let mem_event_stream_server = IntGauge::new(\n            \"mem_event_stream_server\",\n            \"event stream server memory usage in bytes\",\n        )?;\n        let mem_consensus = IntGauge::new(\"mem_consensus\", \"consensus memory usage in bytes\")?;\n        let mem_fetchers = IntGauge::new(\"mem_fetchers\", \"combined fetcher memory usage in bytes\")?;\n        let mem_transaction_gossiper = IntGauge::new(\n            \"mem_transaction_gossiper\",\n            \"transaction gossiper memory usage in bytes\",\n        )?;\n        let mem_finality_signature_gossiper = IntGauge::new(\n            \"mem_finality_signature_gossiper\",\n            \"finality signature gossiper memory usage in bytes\",\n        )?;\n        let mem_block_gossiper =\n            IntGauge::new(\"mem_block_gossiper\", \"block gossiper memory usage in bytes\")?;\n        let mem_transaction_buffer = IntGauge::new(\n            \"mem_transaction_buffer\",\n            \"transaction buffer memory usage in bytes\",\n        )?;\n        let mem_block_validator = IntGauge::new(\n            \"mem_block_validator\",\n            \"block validator memory usage in bytes\",\n        )?;\n        let mem_sync_leaper =\n            IntGauge::new(\"mem_sync_leaper\", \"sync leaper memory usage in bytes\")?;\n        let mem_transaction_acceptor = IntGauge::new(\n            \"mem_transaction_acceptor\",\n            \"transaction acceptor memory usage in bytes\",\n        )?;\n        let mem_block_synchronizer = IntGauge::new(\n            \"mem_block_synchronizer\",\n            \"block synchronizer memory usage in bytes\",\n        )?;\n        let mem_block_accumulator = IntGauge::new(\n            \"mem_block_accumulator\",\n            \"block accumulator memory usage in bytes\",\n        )?;\n        let mem_diagnostics_port = IntGauge::new(\n            \"mem_diagnostics_port\",\n            \"diagnostics port memory usage in bytes\",\n        )?;\n        let mem_upgrade_watcher = IntGauge::new(\n            \"mem_upgrade_watcher\",\n            \"upgrade watcher memory usage in bytes\",\n        )?;\n        let mem_binary_port =\n            IntGauge::new(\"mem_binary_port\", \"binary port memory usage in bytes\")?;\n        let mem_estimator_runtime_s = Histogram::with_opts(\n            HistogramOpts::new(\n                \"mem_estimator_runtime_s\",\n                \"time in seconds to estimate memory usage\",\n            )\n            //  Create buckets from one nanosecond to eight seconds.\n            .buckets(prometheus::exponential_buckets(0.000_4, 2.0, 13)?),\n        )?;\n\n        registry.register(Box::new(mem_total.clone()))?;\n        registry.register(Box::new(mem_metrics.clone()))?;\n        registry.register(Box::new(mem_net.clone()))?;\n        registry.register(Box::new(mem_address_gossiper.clone()))?;\n        registry.register(Box::new(mem_storage.clone()))?;\n        registry.register(Box::new(mem_contract_runtime.clone()))?;\n        registry.register(Box::new(mem_rpc_server.clone()))?;\n        registry.register(Box::new(mem_rest_server.clone()))?;\n        registry.register(Box::new(mem_event_stream_server.clone()))?;\n        registry.register(Box::new(mem_consensus.clone()))?;\n        registry.register(Box::new(mem_fetchers.clone()))?;\n        registry.register(Box::new(mem_transaction_gossiper.clone()))?;\n        registry.register(Box::new(mem_finality_signature_gossiper.clone()))?;\n        registry.register(Box::new(mem_block_gossiper.clone()))?;\n        registry.register(Box::new(mem_transaction_buffer.clone()))?;\n        registry.register(Box::new(mem_block_validator.clone()))?;\n        registry.register(Box::new(mem_sync_leaper.clone()))?;\n        registry.register(Box::new(mem_transaction_acceptor.clone()))?;\n        registry.register(Box::new(mem_block_synchronizer.clone()))?;\n        registry.register(Box::new(mem_block_accumulator.clone()))?;\n        registry.register(Box::new(mem_diagnostics_port.clone()))?;\n        registry.register(Box::new(mem_upgrade_watcher.clone()))?;\n        registry.register(Box::new(mem_binary_port.clone()))?;\n        registry.register(Box::new(mem_estimator_runtime_s.clone()))?;\n\n        Ok(MemoryMetrics {\n            mem_total,\n            mem_metrics,\n            mem_net,\n            mem_address_gossiper,\n            mem_storage,\n            mem_contract_runtime,\n            mem_rpc_server,\n            mem_rest_server,\n            mem_event_stream_server,\n            mem_consensus,\n            mem_fetchers,\n            mem_transaction_gossiper,\n            mem_finality_signature_gossiper,\n            mem_block_gossiper,\n            mem_transaction_buffer,\n            mem_block_validator,\n            mem_sync_leaper,\n            mem_transaction_acceptor,\n            mem_block_synchronizer,\n            mem_block_accumulator,\n            mem_diagnostics_port,\n            mem_upgrade_watcher,\n            mem_binary_port,\n            mem_estimator_runtime_s,\n            registry,\n        })\n    }\n\n    /// Estimates memory usage and updates metrics.\n    pub(super) fn estimate(&self, reactor: &MainReactor) {\n        let timer = self.mem_estimator_runtime_s.start_timer();\n\n        let metrics = reactor.metrics.estimate_heap_size() as i64;\n        let network = reactor.net.estimate_heap_size() as i64;\n        let address_gossiper = reactor.address_gossiper.estimate_heap_size() as i64;\n        let storage = reactor.storage.estimate_heap_size() as i64;\n        let contract_runtime = reactor.contract_runtime.estimate_heap_size() as i64;\n        let rest_server = reactor.rest_server.estimate_heap_size() as i64;\n        let event_stream_server = reactor.event_stream_server.estimate_heap_size() as i64;\n        let consensus = reactor.consensus.estimate_heap_size() as i64;\n        let fetchers = reactor.fetchers.estimate_heap_size() as i64;\n        let transaction_gossiper = reactor.transaction_gossiper.estimate_heap_size() as i64;\n        let finality_signature_gossiper =\n            reactor.finality_signature_gossiper.estimate_heap_size() as i64;\n        let block_gossiper = reactor.block_gossiper.estimate_heap_size() as i64;\n        let transaction_buffer = reactor.transaction_buffer.estimate_heap_size() as i64;\n        let block_validator = reactor.block_validator.estimate_heap_size() as i64;\n        let sync_leaper = reactor.sync_leaper.estimate_heap_size() as i64;\n        let transaction_acceptor = reactor.transaction_acceptor.estimate_heap_size() as i64;\n        let block_synchronizer = reactor.block_synchronizer.estimate_heap_size() as i64;\n        let block_accumulator = reactor.block_accumulator.estimate_heap_size() as i64;\n        let diagnostics_port = reactor.diagnostics_port.estimate_heap_size() as i64;\n        let upgrade_watcher = reactor.upgrade_watcher.estimate_heap_size() as i64;\n        let binary_port = reactor.binary_port.estimate_heap_size() as i64;\n\n        let total = metrics\n            + network\n            + address_gossiper\n            + storage\n            + contract_runtime\n            + rest_server\n            + event_stream_server\n            + consensus\n            + fetchers\n            + transaction_gossiper\n            + finality_signature_gossiper\n            + block_gossiper\n            + transaction_buffer\n            + block_validator\n            + sync_leaper\n            + transaction_acceptor\n            + block_synchronizer\n            + block_accumulator\n            + diagnostics_port\n            + upgrade_watcher\n            + binary_port;\n\n        self.mem_net.set(network);\n        self.mem_address_gossiper.set(address_gossiper);\n        self.mem_storage.set(storage);\n        self.mem_contract_runtime.set(contract_runtime);\n        self.mem_rest_server.set(rest_server);\n        self.mem_event_stream_server.set(event_stream_server);\n        self.mem_consensus.set(consensus);\n        self.mem_fetchers.set(fetchers);\n        self.mem_transaction_gossiper.set(transaction_gossiper);\n        self.mem_finality_signature_gossiper\n            .set(finality_signature_gossiper);\n        self.mem_block_gossiper.set(block_gossiper);\n        self.mem_transaction_buffer.set(transaction_buffer);\n        self.mem_block_validator.set(block_validator);\n        self.mem_sync_leaper.set(sync_leaper);\n        self.mem_transaction_acceptor.set(transaction_acceptor);\n        self.mem_block_synchronizer.set(block_synchronizer);\n        self.mem_block_accumulator.set(block_accumulator);\n        self.mem_diagnostics_port.set(diagnostics_port);\n        self.mem_upgrade_watcher.set(upgrade_watcher);\n        self.mem_binary_port.set(binary_port);\n\n        self.mem_total.set(total);\n        self.mem_metrics.set(metrics);\n\n        // Stop the timer explicitly, don't count logging.\n        let duration_s = timer.stop_and_record();\n\n        debug!(%total,\n               %duration_s,\n               %metrics,\n               %network,\n               %address_gossiper,\n               %storage,\n               %contract_runtime,\n               %rest_server,\n               %event_stream_server,\n               %consensus,\n               %fetchers,\n               %transaction_gossiper,\n               %finality_signature_gossiper,\n               %block_gossiper,\n               %transaction_buffer,\n               %block_validator,\n               %sync_leaper,\n               %transaction_acceptor,\n               %block_synchronizer,\n               %block_accumulator,\n               %diagnostics_port,\n               %upgrade_watcher,\n               %binary_port,\n               \"Collected new set of memory metrics.\");\n    }\n}\n\nimpl Drop for MemoryMetrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.mem_total);\n        unregister_metric!(self.registry, self.mem_metrics);\n        unregister_metric!(self.registry, self.mem_estimator_runtime_s);\n\n        unregister_metric!(self.registry, self.mem_net);\n        unregister_metric!(self.registry, self.mem_address_gossiper);\n        unregister_metric!(self.registry, self.mem_storage);\n        unregister_metric!(self.registry, self.mem_contract_runtime);\n        unregister_metric!(self.registry, self.mem_rpc_server);\n        unregister_metric!(self.registry, self.mem_rest_server);\n        unregister_metric!(self.registry, self.mem_event_stream_server);\n        unregister_metric!(self.registry, self.mem_consensus);\n        unregister_metric!(self.registry, self.mem_fetchers);\n        unregister_metric!(self.registry, self.mem_transaction_gossiper);\n        unregister_metric!(self.registry, self.mem_finality_signature_gossiper);\n        unregister_metric!(self.registry, self.mem_block_gossiper);\n        unregister_metric!(self.registry, self.mem_transaction_buffer);\n        unregister_metric!(self.registry, self.mem_block_validator);\n        unregister_metric!(self.registry, self.mem_sync_leaper);\n        unregister_metric!(self.registry, self.mem_transaction_acceptor);\n        unregister_metric!(self.registry, self.mem_block_synchronizer);\n        unregister_metric!(self.registry, self.mem_block_accumulator);\n        unregister_metric!(self.registry, self.mem_diagnostics_port);\n        unregister_metric!(self.registry, self.mem_upgrade_watcher);\n        unregister_metric!(self.registry, self.mem_binary_port);\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/reactor_state.rs",
    "content": "use datasize::DataSize;\nuse derive_more::Display;\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// The state of the reactor.\n#[cfg_attr(doc, aquamarine::aquamarine)]\n/// ```mermaid\n/// flowchart TD\n///     %%{init: { 'flowchart': {'diagramPadding':100} }}%%\n///     style Start fill:#66ccff,stroke:#333,stroke-width:4px\n///     style End fill:#66ccff,stroke:#333,stroke-width:4px\n///     \n///     Start --> Initialize\n///     Initialize --> CatchUp\n///     CatchUp --> KeepUp\n///     KeepUp --> CatchUp\n///     KeepUp --> Validate\n///     Validate --> KeepUp\n///     CatchUp --> ShutdownForUpgrade\n///     CatchUp --> ShutdownAfterCatchingUp\n///     KeepUp --> ShutdownForUpgrade\n///     Validate --> ShutdownForUpgrade\n///     CatchUp --> Upgrading\n///     CatchUp -->|at genesis| Validate\n///     Upgrading --> CatchUp\n///     ShutdownForUpgrade --> End\n/// ```\n/// ```mermaid\n/// flowchart TD\n///     style Start fill:#66ccff,stroke:#333,stroke-width:4px\n///     style End fill:#66ccff,stroke:#333,stroke-width:4px\n///     style F fill:#ffcc66,stroke:#333,stroke-width:4px\n///     style G fill:#ffcc66,stroke:#333,stroke-width:4px\n///     title[CatchUp process]\n///     title---Start\n///     style title fill:#FFF,stroke:#FFF\n///     linkStyle 0 stroke-width:0;\n///\n///     Start --> A[\"get sync identifier (sync starting point)\"]\n///     A --> BlockHash\n///     A --> BlockIdentifier\n///     A --> SyncedBlockIdentifier\n///     A --> LocalTip\n///     BlockHash --> E[process identifier in<br/>block accumulator]\n///     BlockIdentifier --> E\n///     SyncedBlockIdentifier --> E\n///     LocalTip --> E\n///     CaughtUp --> H[handle upgrade<br/>if needed]\n///     H --> End\n///     E -->|more data needed<br/>from network<br/>to let us sync near tip| Leap\n///     E -->|block represented by<br/>identifier is not stored<br/>locally, sync it| BlockSync\n///     E -->|we think we're close<br/>enough to the tip|CaughtUp\n///     Leap --> F[initiate SyncLeap<br/>and retry later]\n///     BlockSync --> G[initiate BlockSync<br/>and retry later]\n/// ```\n#[derive(\n    Copy, Clone, PartialEq, Eq, Serialize, Deserialize, DataSize, Debug, Display, JsonSchema,\n)]\n#[schemars(description = \"The state of the reactor.\")]\npub enum ReactorState {\n    /// Get all components and reactor state set up on start.\n    Initialize,\n    /// Orient to the network and attempt to catch up to tip.\n    CatchUp,\n    /// Running commit upgrade and creating immediate switch block.\n    Upgrading,\n    /// Stay caught up with tip.\n    KeepUp,\n    /// Node is currently caught up and is an active validator.\n    Validate,\n    /// Node should be shut down for upgrade.\n    ShutdownForUpgrade,\n    /// Node should shut down after catching up.\n    ShutdownAfterCatchingUp,\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/auction.rs",
    "content": "use std::sync::Arc;\n\nuse crate::reactor::main_reactor::tests::{\n    configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes,\n    ERA_ONE, ERA_TWO, ONE_MIN, TEN_SECS,\n};\nuse casper_types::{\n    execution::{ExecutionResult, TransformKindV2},\n    system::{auction::BidAddr, AUCTION},\n    Deploy, Key, PublicKey, StoredValue, TimeDiff, Timestamp, Transaction, U512,\n};\n\n#[tokio::test]\nasync fn run_withdraw_bid_network() {\n    let alice_stake = 200_000_000_000_u64;\n    let initial_stakes = InitialStakes::FromVec(vec![alice_stake.into(), 10_000_000_000]);\n\n    let unbonding_delay = 2;\n\n    let mut fixture = TestFixture::new(\n        initial_stakes,\n        Some(ConfigsOverride {\n            unbonding_delay,\n            ..Default::default()\n        }),\n    )\n    .await;\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n\n    // Wait for all nodes to complete block 0.\n    fixture.run_until_block_height(0, ONE_MIN).await;\n\n    // Ensure our post genesis assumption that Alice has a bid is correct.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, true);\n\n    // Create & sign deploy to withdraw Alice's full stake.\n    let mut deploy = Deploy::withdraw_bid(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        alice_public_key.clone(),\n        alice_stake.into(),\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n    deploy.sign(&alice_secret_key);\n    let txn = Transaction::Deploy(deploy);\n    let txn_hash = txn.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    // Ensure execution succeeded and that there is a Prune transform for the bid's key.\n    let bid_key = Key::BidAddr(BidAddr::from(alice_public_key.clone()));\n    fixture\n        .successful_execution_transforms(&txn_hash)\n        .iter()\n        .find(|transform| match transform.kind() {\n            TransformKindV2::Prune(prune_key) => prune_key == &bid_key,\n            _ => false,\n        })\n        .expect(\"should have a prune record for bid\");\n\n    // Crank the network forward until the era ends.\n    fixture\n        .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN)\n        .await;\n\n    // The bid record should have been pruned once unbonding ran.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, false);\n\n    // Crank the network forward until the unbonding queue is processed.\n    fixture\n        .run_until_stored_switch_block_header(\n            ERA_ONE.saturating_add(unbonding_delay + 1),\n            ONE_MIN * 2,\n        )\n        .await;\n}\n\n#[tokio::test]\nasync fn should_error_on_validator_unbond_to_large() {\n    let alice_stake = 200_000_000_000_u64;\n    let initial_stakes = InitialStakes::FromVec(vec![alice_stake.into(), 10_000_000_000]);\n\n    let unbonding_delay = 2;\n\n    let mut fixture = TestFixture::new(\n        initial_stakes,\n        Some(ConfigsOverride {\n            unbonding_delay,\n            ..Default::default()\n        }),\n    )\n    .await;\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n\n    // Wait for all nodes to complete block 0.\n    fixture.run_until_block_height(0, ONE_MIN).await;\n\n    // Ensure our post genesis assumption that Alice has a bid is correct.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, true);\n\n    let too_much_stake = alice_stake + 1;\n\n    // Create & sign deploy to withdraw MORE than Alice's full stake.\n    let mut deploy = Deploy::withdraw_bid(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        alice_public_key.clone(),\n        too_much_stake.into(),\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n    deploy.sign(&alice_secret_key);\n    let txn = Transaction::Deploy(deploy);\n    let txn_hash = txn.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    let result = fixture.transaction_execution_result(&txn_hash);\n\n    if let ExecutionResult::V2(exec_result) = result {\n        let msg = exec_result\n            .error_message\n            .expect(\"error message should not be none\");\n\n        assert_eq!(\n            msg, \"ApiError::AuctionError(UnbondTooLarge) [64532]\",\n            \"{}\",\n            msg\n        );\n    } else {\n        panic!(\"unexpected execution result\");\n    }\n}\n\n#[tokio::test]\nasync fn run_undelegate_bid_network() {\n    let alice_stake = 200_000_000_000_u64;\n    let bob_stake = 300_000_000_000_u64;\n    let initial_stakes = InitialStakes::FromVec(vec![alice_stake.into(), bob_stake.into()]);\n\n    let unbonding_delay = 2;\n\n    let mut fixture = TestFixture::new(\n        initial_stakes,\n        Some(ConfigsOverride {\n            unbonding_delay,\n            ..Default::default()\n        }),\n    )\n    .await;\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let bob_public_key = PublicKey::from(&*fixture.node_contexts[1].secret_key);\n\n    // Wait for all nodes to complete block 0.\n    fixture.run_until_block_height(0, ONE_MIN).await;\n\n    // Ensure our post genesis assumption that Alice and Bob have bids is correct.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, true);\n    fixture.check_bid_existence_at_tip(&bob_public_key, None, true);\n    // Alice should not have a delegation bid record for Bob (yet).\n    fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false);\n\n    // Have Alice delegate to Bob.\n    //\n    // Note, in the real world validators usually don't also delegate to other validators,  but in\n    // this test fixture the only accounts in the system are those created for genesis validators.\n    let alice_delegation_amount =\n        U512::from(fixture.chainspec.core_config.minimum_delegation_amount);\n    let mut deploy = Deploy::delegate(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        bob_public_key.clone(),\n        alice_public_key.clone(),\n        alice_delegation_amount,\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n    deploy.sign(&alice_secret_key);\n    let txn = Transaction::Deploy(deploy);\n    let txn_hash = txn.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    // Ensure execution succeeded and that there is a Write transform for the bid's key.\n    let bid_key = Key::BidAddr(BidAddr::new_from_public_keys(\n        &bob_public_key,\n        Some(&alice_public_key),\n    ));\n    fixture\n        .successful_execution_transforms(&txn_hash)\n        .iter()\n        .find(|transform| match transform.kind() {\n            TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => {\n                Key::from(bid_kind.bid_addr()) == bid_key\n            }\n            _ => false,\n        })\n        .expect(\"should have a write record for delegate bid\");\n\n    // Alice should now have a delegation bid record for Bob.\n    fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), true);\n\n    // Create & sign transaction to undelegate from Alice to Bob.\n    let mut deploy = Deploy::undelegate(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        bob_public_key.clone(),\n        alice_public_key.clone(),\n        alice_delegation_amount,\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n    deploy.sign(&alice_secret_key);\n    let txn = Transaction::Deploy(deploy);\n    let txn_hash = txn.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    // Ensure execution succeeded and that there is a Prune transform for the bid's key.\n    fixture\n        .successful_execution_transforms(&txn_hash)\n        .iter()\n        .find(|transform| match transform.kind() {\n            TransformKindV2::Prune(prune_key) => prune_key == &bid_key,\n            _ => false,\n        })\n        .expect(\"should have a prune record for undelegated bid\");\n\n    // Crank the network forward until the era ends.\n    fixture\n        .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN)\n        .await;\n\n    // Ensure the validator records are still present but the undelegated bid is gone.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, true);\n    fixture.check_bid_existence_at_tip(&bob_public_key, None, true);\n    fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false);\n\n    // Crank the network forward until the unbonding queue is processed.\n    fixture\n        .run_until_stored_switch_block_header(\n            ERA_ONE.saturating_add(unbonding_delay + 1),\n            ONE_MIN * 2,\n        )\n        .await;\n}\n\n#[tokio::test]\nasync fn run_redelegate_bid_network() {\n    let alice_stake = 200_000_000_000_u64;\n    let bob_stake = 300_000_000_000_u64;\n    let charlie_stake = 300_000_000_000_u64;\n    let initial_stakes = InitialStakes::FromVec(vec![\n        alice_stake.into(),\n        bob_stake.into(),\n        charlie_stake.into(),\n    ]);\n\n    let spec_override = ConfigsOverride {\n        unbonding_delay: 1,\n        minimum_era_height: 5,\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await;\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let bob_public_key = PublicKey::from(&*fixture.node_contexts[1].secret_key);\n    let charlie_public_key = PublicKey::from(&*fixture.node_contexts[2].secret_key);\n\n    // Wait for all nodes to complete block 0.\n    fixture.run_until_block_height(0, ONE_MIN).await;\n\n    // Ensure our post genesis assumption that Alice, Bob and Charlie have bids is correct.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, true);\n    fixture.check_bid_existence_at_tip(&bob_public_key, None, true);\n    fixture.check_bid_existence_at_tip(&charlie_public_key, None, true);\n    // Alice should not have a delegation bid record for Bob or Charlie (yet).\n    fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false);\n    fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), false);\n\n    // Have Alice delegate to Bob.\n    let alice_delegation_amount =\n        U512::from(fixture.chainspec.core_config.minimum_delegation_amount);\n    let mut deploy = Deploy::delegate(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        bob_public_key.clone(),\n        alice_public_key.clone(),\n        alice_delegation_amount,\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n    deploy.sign(&alice_secret_key);\n    let txn = Transaction::Deploy(deploy);\n    let txn_hash = txn.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, ONE_MIN)\n        .await;\n\n    // Ensure execution succeeded and that there is a Write transform for the bid's key.\n    let bid_key = Key::BidAddr(BidAddr::new_from_public_keys(\n        &bob_public_key,\n        Some(&alice_public_key),\n    ));\n\n    fixture\n        .successful_execution_transforms(&txn_hash)\n        .iter()\n        .find(|transform| match transform.kind() {\n            TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => {\n                Key::from(bid_kind.bid_addr()) == bid_key\n            }\n            _ => false,\n        })\n        .expect(\"should have a write record for delegate bid\");\n\n    // Alice should now have a delegation bid record for Bob.\n    fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), true);\n\n    // Create & sign transaction to undelegate Alice from Bob and delegate to Charlie.\n    let mut deploy = Deploy::redelegate(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        bob_public_key.clone(),\n        alice_public_key.clone(),\n        charlie_public_key.clone(),\n        alice_delegation_amount,\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n\n    deploy.sign(&alice_secret_key);\n    let transaction = Transaction::Deploy(deploy);\n    let transaction_hash = transaction.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(transaction).await;\n    fixture\n        .run_until_executed_transaction(&transaction_hash, TEN_SECS)\n        .await;\n\n    // Ensure execution succeeded and that there is a Prune transform for the bid's key.\n    fixture\n        .successful_execution_transforms(&transaction_hash)\n        .iter()\n        .find(|transform| match transform.kind() {\n            TransformKindV2::Prune(prune_key) => prune_key == &bid_key,\n            _ => false,\n        })\n        .expect(\"should have a prune record for undelegated bid\");\n\n    // Original delegation bid should be removed.\n    fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false);\n    // Redelegate doesn't occur until after unbonding delay elapses.\n    fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), false);\n\n    // Crank the network forward to run out the unbonding delay.\n    // First, close out the era the redelegate was processed in.\n    fixture\n        .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN)\n        .await;\n    // The undelegate is in the unbonding queue.\n    fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), false);\n    // Unbonding delay is 1 on this test network, so step 1 more era.\n    fixture\n        .run_until_stored_switch_block_header(ERA_TWO, ONE_MIN)\n        .await;\n\n    // Ensure the validator records are still present.\n    fixture.check_bid_existence_at_tip(&alice_public_key, None, true);\n    fixture.check_bid_existence_at_tip(&bob_public_key, None, true);\n    // Ensure redelegated bid exists.\n    fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), true);\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/binary_port.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap},\n    convert::{TryFrom, TryInto},\n    iter,\n    sync::Arc,\n    time::Duration,\n};\n\nuse casper_binary_port::{\n    AccountInformation, AddressableEntityInformation, BalanceResponse, BinaryMessage,\n    BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, Command, CommandHeader,\n    ConsensusStatus, ConsensusValidatorChanges, ContractInformation, DictionaryItemIdentifier,\n    DictionaryQueryResult, EntityIdentifier, EraIdentifier, ErrorCode, GetRequest,\n    GetTrieFullResult, GlobalStateEntityQualifier, GlobalStateQueryResult, GlobalStateRequest,\n    InformationRequest, InformationRequestTag, KeyPrefix, LastProgress, NetworkName, NodeStatus,\n    PackageIdentifier, PurseIdentifier, ReactorStateName, RecordId, ResponseType, RewardResponse,\n    Uptime, ValueWithProof,\n};\nuse casper_storage::global_state::state::CommitProvider;\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AssociatedKeys, NamedKeyAddr, NamedKeyValue},\n    bytesrepr::{Bytes, FromBytes, ToBytes},\n    contracts::{ContractHash, ContractPackage, ContractPackageHash},\n    execution::{Effects, TransformKindV2, TransformV2},\n    system::auction::DelegatorKind,\n    testing::TestRng,\n    Account, AddressableEntity, AvailableBlockRange, Block, BlockHash, BlockHeader,\n    BlockIdentifier, BlockSynchronizerStatus, BlockWithSignatures, ByteCode, ByteCodeAddr,\n    ByteCodeHash, ByteCodeKind, CLValue, CLValueDictionary, ChainspecRawBytes, Contract,\n    ContractRuntimeTag, ContractWasm, ContractWasmHash, DictionaryAddr, Digest, EntityAddr,\n    EntityKind, EntityVersions, GlobalStateIdentifier, Key, KeyTag, NextUpgrade, Package,\n    PackageAddr, PackageHash, Peers, ProtocolVersion, PublicKey, Rewards, SecretKey, StoredValue,\n    Transaction, Transfer, URef, U512,\n};\nuse futures::{SinkExt, StreamExt};\nuse rand::Rng;\nuse tokio::{net::TcpStream, time::timeout};\nuse tokio_util::codec::Framed;\n\nuse crate::{\n    reactor::{main_reactor::MainReactor, Runner},\n    testing::{\n        self, filter_reactor::FilterReactor, network::TestingNetwork, ConditionCheckReactor,\n    },\n    types::{transaction::transaction_v1_builder::TransactionV1Builder, NodeId},\n};\n\nuse crate::reactor::main_reactor::tests::{\n    fixture::TestFixture, initial_stakes::InitialStakes, ERA_ONE,\n};\n\nconst GUARANTEED_BLOCK_HEIGHT: u64 = 4;\n\nconst TEST_DICT_NAME: &str = \"test_dict\";\nconst TEST_DICT_ITEM_KEY: &str = \"test_key\";\nconst MESSAGE_SIZE: u32 = 1024 * 1024 * 10;\n\nstruct TestData {\n    rng: TestRng,\n    protocol_version: ProtocolVersion,\n    chainspec_raw_bytes: ChainspecRawBytes,\n    highest_block: Block,\n    secret_signing_key: Arc<SecretKey>,\n    state_root_hash: Digest,\n    effects: TestEffects,\n    era_one_validator: PublicKey,\n}\n\nfn network_produced_blocks(\n    nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<FilterReactor<MainReactor>>>>,\n    block_count: u64,\n) -> bool {\n    nodes.values().all(|node| {\n        node.reactor()\n            .inner()\n            .inner()\n            .storage()\n            .get_available_block_range()\n            .high()\n            >= block_count\n    })\n}\n\nasync fn setup() -> (\n    Framed<TcpStream, BinaryMessageCodec>,\n    (\n        impl futures::Future<Output = (TestingNetwork<FilterReactor<MainReactor>>, TestRng)>,\n        TestData,\n    ),\n) {\n    let mut fixture = TestFixture::new(\n        InitialStakes::AllEqual {\n            count: 4,\n            stake: 100,\n        },\n        None,\n    )\n    .await;\n    let chainspec_raw_bytes = ChainspecRawBytes::clone(&fixture.chainspec_raw_bytes);\n    let mut rng = fixture.rng_mut().create_child();\n    let net = fixture.network_mut();\n    net.settle_on(\n        &mut rng,\n        |nodes| network_produced_blocks(nodes, GUARANTEED_BLOCK_HEIGHT),\n        Duration::from_secs(59),\n    )\n    .await;\n    let (_, first_node) = net\n        .nodes()\n        .iter()\n        .next()\n        .expect(\"should have at least one node\");\n    let secret_signing_key = first_node\n        .main_reactor()\n        .validator_matrix\n        .secret_signing_key()\n        .clone();\n    let highest_block = net\n        .nodes()\n        .iter()\n        .find_map(|(_, runner)| {\n            runner\n                .reactor()\n                .inner()\n                .inner()\n                .storage()\n                .read_highest_block()\n        })\n        .expect(\"should have highest block\");\n    let era_end = first_node\n        .main_reactor()\n        .storage()\n        .get_switch_block_by_era_id(&ERA_ONE)\n        .expect(\"should not fail retrieving switch block\")\n        .expect(\"should have switch block\")\n        .clone_era_end()\n        .expect(\"should have era end\");\n    let Rewards::V2(rewards) = era_end.rewards() else {\n        panic!(\"should have rewards V2\");\n    };\n\n    let effects = test_effects(&mut rng);\n\n    let state_root_hash = first_node\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .commit_effects(*highest_block.state_root_hash(), effects.effects.clone())\n        .expect(\"should commit effects\");\n\n    // Get the binary port address.\n    let binary_port_addr = first_node\n        .main_reactor()\n        .binary_port\n        .bind_address()\n        .expect(\"should be bound\");\n\n    let protocol_version = first_node.main_reactor().chainspec.protocol_version();\n    // We let the entire network run in the background, until our request completes.\n    let finish_cranking = fixture.run_until_stopped(rng.create_child());\n\n    // Set-up client.\n    let address = format!(\"localhost:{}\", binary_port_addr.port());\n    let stream = TcpStream::connect(address.clone())\n        .await\n        .expect(\"should create stream\");\n\n    (\n        Framed::new(stream, BinaryMessageCodec::new(MESSAGE_SIZE)),\n        (\n            finish_cranking,\n            TestData {\n                rng,\n                protocol_version,\n                chainspec_raw_bytes,\n                highest_block,\n                secret_signing_key,\n                state_root_hash,\n                effects,\n                era_one_validator: rewards\n                    .last_key_value()\n                    .expect(\"should have at least one reward\")\n                    .0\n                    .clone(),\n            },\n        ),\n    )\n}\n\nfn test_effects(rng: &mut TestRng) -> TestEffects {\n    // we set up some basic data for global state tests, including an account and a dictionary\n    let pre_migration_account_hash = AccountHash::new(rng.gen());\n    let post_migration_account_hash = AccountHash::new(rng.gen());\n    let main_purse: URef = rng.gen();\n\n    let pre_migration_contract_package_hash = ContractPackageHash::new(rng.gen());\n    let pre_migration_contract_hash = ContractHash::new(rng.gen());\n    let post_migration_contract_package_hash = ContractPackageHash::new(rng.gen());\n    let post_migration_contract_hash = ContractHash::new(rng.gen());\n    let wasm_hash = ContractWasmHash::new(rng.gen());\n\n    let package_addr: PackageAddr = rng.gen();\n    let package_access_key: URef = rng.gen();\n    let entity_addr: EntityAddr = rng.gen();\n    let entity_bytecode_hash: ByteCodeHash = ByteCodeHash::new(rng.gen());\n\n    let dict_seed_uref: URef = rng.gen();\n    let dict_key = Key::dictionary(dict_seed_uref, TEST_DICT_ITEM_KEY.as_bytes());\n    let dict_value = CLValueDictionary::new(\n        CLValue::from_t(rng.gen::<i32>()).unwrap(),\n        dict_seed_uref.addr().to_vec(),\n        TEST_DICT_ITEM_KEY.as_bytes().to_vec(),\n    );\n\n    let mut effects = Effects::new();\n\n    effects.push(TransformV2::new(\n        Key::Account(pre_migration_account_hash),\n        TransformKindV2::Write(StoredValue::Account(Account::new(\n            pre_migration_account_hash,\n            iter::once((TEST_DICT_NAME.to_owned(), Key::URef(dict_seed_uref)))\n                .collect::<BTreeMap<_, _>>()\n                .into(),\n            main_purse,\n            Default::default(),\n            Default::default(),\n        ))),\n    ));\n    effects.push(TransformV2::new(\n        Key::Account(post_migration_account_hash),\n        TransformKindV2::Write(StoredValue::CLValue(\n            CLValue::from_t(Key::AddressableEntity(entity_addr)).expect(\"should create CLValue\"),\n        )),\n    ));\n    effects.push(TransformV2::new(\n        dict_key,\n        TransformKindV2::Write(StoredValue::CLValue(CLValue::from_t(dict_value).unwrap())),\n    ));\n    effects.push(TransformV2::new(\n        Key::NamedKey(\n            NamedKeyAddr::new_from_string(entity_addr, TEST_DICT_NAME.to_owned())\n                .expect(\"should create named key addr\"),\n        ),\n        TransformKindV2::Write(StoredValue::NamedKey(\n            NamedKeyValue::from_concrete_values(\n                Key::URef(dict_seed_uref),\n                TEST_DICT_NAME.to_owned(),\n            )\n            .expect(\"should create named key value\"),\n        )),\n    ));\n    effects.push(TransformV2::new(\n        Key::Balance(main_purse.addr()),\n        TransformKindV2::Write(StoredValue::CLValue(\n            CLValue::from_t(U512::one()).expect(\"should create CLValue\"),\n        )),\n    ));\n\n    effects.push(TransformV2::new(\n        Key::Hash(pre_migration_contract_package_hash.value()),\n        TransformKindV2::Write(StoredValue::ContractPackage(ContractPackage::new(\n            package_access_key,\n            Default::default(),\n            Default::default(),\n            Default::default(),\n            Default::default(),\n        ))),\n    ));\n    effects.push(TransformV2::new(\n        Key::Hash(post_migration_contract_package_hash.value()),\n        TransformKindV2::Write(StoredValue::CLValue(\n            CLValue::from_t((Key::SmartContract(package_addr), package_access_key))\n                .expect(\"should create CLValue\"),\n        )),\n    ));\n\n    effects.push(TransformV2::new(\n        Key::Hash(pre_migration_contract_hash.value()),\n        TransformKindV2::Write(StoredValue::Contract(Contract::new(\n            pre_migration_contract_package_hash,\n            wasm_hash,\n            Default::default(),\n            Default::default(),\n            ProtocolVersion::V2_0_0,\n        ))),\n    ));\n    effects.push(TransformV2::new(\n        Key::Hash(post_migration_contract_hash.value()),\n        TransformKindV2::Write(StoredValue::CLValue(\n            CLValue::from_t(Key::AddressableEntity(entity_addr)).expect(\"should create CLValue\"),\n        )),\n    ));\n\n    effects.push(TransformV2::new(\n        Key::Hash(wasm_hash.value()),\n        TransformKindV2::Write(StoredValue::ContractWasm(ContractWasm::new(\n            rng.random_vec(10..100),\n        ))),\n    ));\n\n    effects.push(TransformV2::new(\n        Key::SmartContract(package_addr),\n        TransformKindV2::Write(StoredValue::SmartContract(Package::new(\n            EntityVersions::default(),\n            Default::default(),\n            Default::default(),\n            Default::default(),\n        ))),\n    ));\n    effects.push(TransformV2::new(\n        Key::AddressableEntity(entity_addr),\n        TransformKindV2::Write(StoredValue::AddressableEntity(AddressableEntity::new(\n            PackageHash::new(package_addr),\n            entity_bytecode_hash,\n            ProtocolVersion::V2_0_0,\n            main_purse,\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        ))),\n    ));\n    effects.push(TransformV2::new(\n        Key::ByteCode(ByteCodeAddr::new_wasm_addr(entity_bytecode_hash.value())),\n        TransformKindV2::Write(StoredValue::ByteCode(ByteCode::new(\n            ByteCodeKind::V1CasperWasm,\n            rng.random_vec(10..100),\n        ))),\n    ));\n\n    TestEffects {\n        effects,\n        pre_migration_account_hash,\n        post_migration_account_hash,\n        pre_migration_contract_package_hash,\n        post_migration_contract_package_hash,\n        pre_migration_contract_hash,\n        post_migration_contract_hash,\n        package_addr,\n        entity_addr,\n        dict_seed_uref,\n    }\n}\n\nstruct TestEffects {\n    effects: Effects,\n    pre_migration_account_hash: AccountHash,\n    post_migration_account_hash: AccountHash,\n    pre_migration_contract_package_hash: ContractPackageHash,\n    post_migration_contract_package_hash: ContractPackageHash,\n    pre_migration_contract_hash: ContractHash,\n    post_migration_contract_hash: ContractHash,\n    package_addr: PackageAddr,\n    entity_addr: EntityAddr,\n    dict_seed_uref: URef,\n}\n\nstruct TestCase {\n    name: &'static str,\n    request: Command,\n    asserter: Box<dyn Fn(&BinaryResponse) -> bool>,\n}\n\nfn validate_metadata(\n    response: &BinaryResponse,\n    expected_payload_type: Option<ResponseType>,\n) -> bool {\n    response.is_success()\n        && response.returned_data_type_tag()\n            == expected_payload_type.map(|payload_type| payload_type as u8)\n        && expected_payload_type.is_none_or(|_| !response.payload().is_empty())\n}\n\nfn validate_deserialization<T>(response: &BinaryResponse) -> Option<T>\nwhere\n    T: FromBytes,\n{\n    FromBytes::from_bytes(response.payload())\n        .ok()\n        .map(|(data, remainder)| {\n            assert!(remainder.is_empty());\n            data\n        })\n}\n\nfn assert_response<T, F>(\n    response: &BinaryResponse,\n    payload_type: Option<ResponseType>,\n    validator: F,\n) -> bool\nwhere\n    T: FromBytes,\n    F: FnOnce(T) -> bool,\n{\n    validate_metadata(response, payload_type)\n        && payload_type\n            .is_none_or(|_| validate_deserialization::<T>(response).is_some_and(validator))\n}\n\n#[tokio::test]\nasync fn binary_port_component_handles_all_requests() {\n    testing::init_logging();\n\n    let (\n        mut client,\n        (\n            finish_cranking,\n            TestData {\n                mut rng,\n                protocol_version,\n                chainspec_raw_bytes: network_chainspec_raw_bytes,\n                highest_block,\n                secret_signing_key,\n                state_root_hash,\n                effects,\n                era_one_validator,\n            },\n        ),\n    ) = setup().await;\n\n    let test_cases = &[\n        block_header_info(*highest_block.hash()),\n        block_with_signatures_info(*highest_block.hash()),\n        peers(),\n        uptime(),\n        last_progress(),\n        reactor_state(),\n        network_name(),\n        consensus_validator_changes(),\n        block_synchronizer_status(),\n        available_block_range(highest_block.height()),\n        next_upgrade(),\n        consensus_status(),\n        chainspec_raw_bytes(network_chainspec_raw_bytes),\n        latest_switch_block_header(),\n        node_status(protocol_version),\n        get_block_header(highest_block.clone_header()),\n        get_block_transfers(highest_block.clone_header()),\n        get_era_summary(state_root_hash),\n        get_all_bids(state_root_hash),\n        get_trie(state_root_hash),\n        get_dictionary_item_by_addr(\n            state_root_hash,\n            *Key::dictionary(effects.dict_seed_uref, TEST_DICT_ITEM_KEY.as_bytes())\n                .as_dictionary()\n                .unwrap(),\n        ),\n        get_dictionary_item_by_seed_uref(\n            state_root_hash,\n            effects.dict_seed_uref,\n            TEST_DICT_ITEM_KEY.to_owned(),\n        ),\n        get_dictionary_item_by_legacy_named_key(\n            state_root_hash,\n            effects.pre_migration_account_hash,\n            TEST_DICT_NAME.to_owned(),\n            TEST_DICT_ITEM_KEY.to_owned(),\n        ),\n        get_dictionary_item_by_named_key(\n            state_root_hash,\n            effects.entity_addr,\n            TEST_DICT_NAME.to_owned(),\n            TEST_DICT_ITEM_KEY.to_owned(),\n        ),\n        try_spec_exec_invalid(&mut rng),\n        try_accept_transaction_invalid(&mut rng),\n        try_accept_transaction(&secret_signing_key),\n        get_balance(state_root_hash, effects.pre_migration_account_hash),\n        get_balance_account_not_found(state_root_hash),\n        get_balance_purse_uref_not_found(state_root_hash),\n        get_named_keys_by_prefix(state_root_hash, effects.entity_addr),\n        get_reward(\n            Some(EraIdentifier::Era(ERA_ONE)),\n            era_one_validator.clone(),\n            None,\n        ),\n        get_reward(\n            Some(EraIdentifier::Block(BlockIdentifier::Height(1))),\n            era_one_validator,\n            None,\n        ),\n        get_protocol_version(protocol_version),\n        get_entity(state_root_hash, effects.entity_addr),\n        get_entity_without_bytecode(state_root_hash, effects.entity_addr),\n        get_entity_pre_migration_account(state_root_hash, effects.pre_migration_account_hash),\n        get_entity_post_migration_account(state_root_hash, effects.post_migration_account_hash),\n        get_entity_pre_migration_contract(state_root_hash, effects.pre_migration_contract_hash),\n        get_entity_post_migration_contract(state_root_hash, effects.post_migration_contract_hash),\n        get_package(state_root_hash, effects.package_addr),\n        get_package_pre_migration(state_root_hash, effects.pre_migration_contract_package_hash),\n        get_package_post_migration(\n            state_root_hash,\n            effects.post_migration_contract_package_hash,\n        ),\n    ];\n\n    for (\n        index,\n        TestCase {\n            name,\n            request,\n            asserter,\n        },\n    ) in test_cases.iter().enumerate()\n    {\n        let original_request_bytes = {\n            let header = CommandHeader::new(request.tag(), index as u16);\n            let header_bytes = ToBytes::to_bytes(&header).expect(\"should serialize\");\n            let request_bytes = ToBytes::to_bytes(&request).expect(\"should serialize\");\n\n            [header_bytes, request_bytes].concat()\n        };\n\n        client\n            .send(BinaryMessage::new(original_request_bytes.clone()))\n            .await\n            .expect(\"should send message\");\n\n        let response = timeout(Duration::from_secs(10), client.next())\n            .await\n            .unwrap_or_else(|err| panic!(\"{}: should complete without timeout: {}\", name, err))\n            .unwrap_or_else(|| panic!(\"{}: should have bytes\", name))\n            .unwrap_or_else(|err| panic!(\"{}: should have ok response: {}\", name, err));\n        let (binary_response_and_request, _): (BinaryResponseAndRequest, _) =\n            FromBytes::from_bytes(response.payload()).expect(\"should deserialize response\");\n\n        let bytes_sent_via_tcp = Bytes::from(original_request_bytes).to_bytes().unwrap();\n        let mirrored_request_bytes = binary_response_and_request.request();\n        assert_eq!(mirrored_request_bytes, bytes_sent_via_tcp, \"{}\", name);\n\n        binary_response_and_request.request();\n\n        assert!(asserter(binary_response_and_request.response()), \"{}\", name);\n    }\n\n    let (_net, _rng) = timeout(Duration::from_secs(10), finish_cranking)\n        .await\n        .unwrap_or_else(|_| panic!(\"should finish cranking without timeout\"));\n}\n\nfn block_header_info(hash: BlockHash) -> TestCase {\n    TestCase {\n        name: \"block_header_info\",\n        request: Command::Get(\n            InformationRequest::BlockHeader(Some(BlockIdentifier::Hash(hash)))\n                .try_into()\n                .expect(\"should convert\"),\n        ),\n        asserter: Box::new(move |response| {\n            assert_response::<BlockHeader, _>(response, Some(ResponseType::BlockHeader), |header| {\n                header.block_hash() == hash\n            })\n        }),\n    }\n}\n\nfn block_with_signatures_info(hash: BlockHash) -> TestCase {\n    TestCase {\n        name: \"block_with_signatures_info\",\n        request: Command::Get(\n            InformationRequest::BlockWithSignatures(Some(BlockIdentifier::Hash(hash)))\n                .try_into()\n                .expect(\"should convert\"),\n        ),\n        asserter: Box::new(move |response| {\n            assert_response::<BlockWithSignatures, _>(\n                response,\n                Some(ResponseType::BlockWithSignatures),\n                |header| *header.block().hash() == hash,\n            )\n        }),\n    }\n}\n\nfn peers() -> TestCase {\n    TestCase {\n        name: \"peers\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::Peers.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<Peers, _>(response, Some(ResponseType::Peers), |peers| {\n                !peers.into_inner().is_empty()\n            })\n        }),\n    }\n}\n\nfn uptime() -> TestCase {\n    TestCase {\n        name: \"uptime\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::Uptime.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<Uptime, _>(response, Some(ResponseType::Uptime), |uptime| {\n                uptime.into_inner() > 0\n            })\n        }),\n    }\n}\n\nfn last_progress() -> TestCase {\n    TestCase {\n        name: \"last_progress\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::LastProgress.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<LastProgress, _>(\n                response,\n                Some(ResponseType::LastProgress),\n                |last_progress| last_progress.into_inner().millis() > 0,\n            )\n        }),\n    }\n}\n\nfn reactor_state() -> TestCase {\n    TestCase {\n        name: \"reactor_state\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::ReactorState.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<ReactorStateName, _>(\n                response,\n                Some(ResponseType::ReactorState),\n                |reactor_state| matches!(reactor_state.into_inner().as_str(), \"Validate\"),\n            )\n        }),\n    }\n}\n\nfn network_name() -> TestCase {\n    TestCase {\n        name: \"network_name\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::NetworkName.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<NetworkName, _>(\n                response,\n                Some(ResponseType::NetworkName),\n                |network_name| &network_name.into_inner() == \"casper-example\",\n            )\n        }),\n    }\n}\n\nfn consensus_validator_changes() -> TestCase {\n    TestCase {\n        name: \"consensus_validator_changes\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::ConsensusValidatorChanges.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<ConsensusValidatorChanges, _>(\n                response,\n                Some(ResponseType::ConsensusValidatorChanges),\n                |cvc| cvc.into_inner().is_empty(),\n            )\n        }),\n    }\n}\n\nfn block_synchronizer_status() -> TestCase {\n    TestCase {\n        name: \"block_synchronizer_status\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::BlockSynchronizerStatus.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<BlockSynchronizerStatus, _>(\n                response,\n                Some(ResponseType::BlockSynchronizerStatus),\n                |bss| bss.historical().is_none() && bss.forward().is_none(),\n            )\n        }),\n    }\n}\n\nfn available_block_range(expected_height: u64) -> TestCase {\n    TestCase {\n        name: \"available_block_range\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::AvailableBlockRange.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<AvailableBlockRange, _>(\n                response,\n                Some(ResponseType::AvailableBlockRange),\n                |abr| abr.low() == 0 && abr.high() >= expected_height,\n            )\n        }),\n    }\n}\n\nfn next_upgrade() -> TestCase {\n    TestCase {\n        name: \"next_upgrade\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::NextUpgrade.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| assert_response::<NextUpgrade, _>(response, None, |_| true)),\n    }\n}\n\nfn consensus_status() -> TestCase {\n    TestCase {\n        name: \"consensus_status\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::ConsensusStatus.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<ConsensusStatus, _>(\n                response,\n                Some(ResponseType::ConsensusStatus),\n                |_| true,\n            )\n        }),\n    }\n}\n\nfn chainspec_raw_bytes(network_chainspec_raw_bytes: ChainspecRawBytes) -> TestCase {\n    TestCase {\n        name: \"chainspec_raw_bytes\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::ChainspecRawBytes.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<ChainspecRawBytes, _>(\n                response,\n                Some(ResponseType::ChainspecRawBytes),\n                |crb| crb == network_chainspec_raw_bytes,\n            )\n        }),\n    }\n}\n\nfn latest_switch_block_header() -> TestCase {\n    TestCase {\n        name: \"latest_switch_block_header\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::LatestSwitchBlockHeader.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<BlockHeader, _>(response, Some(ResponseType::BlockHeader), |header| {\n                header.is_switch_block()\n            })\n        }),\n    }\n}\n\nfn node_status(expected_version: ProtocolVersion) -> TestCase {\n    TestCase {\n        name: \"node_status\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: InformationRequestTag::NodeStatus.into(),\n            key: vec![],\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<NodeStatus, _>(\n                response,\n                Some(ResponseType::NodeStatus),\n                |node_status| {\n                    node_status.protocol_version == expected_version\n                        && !node_status.peers.into_inner().is_empty()\n                        && node_status.chainspec_name == \"casper-example\"\n                        && node_status.last_added_block_info.is_some()\n                        && node_status.our_public_signing_key.is_some()\n                        && node_status.block_sync.historical().is_none()\n                        && node_status.block_sync.forward().is_none()\n                        && matches!(node_status.reactor_state.into_inner().as_str(), \"Validate\")\n                        && node_status.latest_switch_block_hash.is_some()\n                },\n            )\n        }),\n    }\n}\n\nfn get_block_header(expected: BlockHeader) -> TestCase {\n    TestCase {\n        name: \"get_block_header\",\n        request: Command::Get(GetRequest::Record {\n            record_type_tag: RecordId::BlockHeader.into(),\n            key: expected.block_hash().to_bytes().unwrap(),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<BlockHeader, _>(response, Some(ResponseType::BlockHeader), |header| {\n                header == expected\n            })\n        }),\n    }\n}\n\nfn get_block_transfers(expected: BlockHeader) -> TestCase {\n    TestCase {\n        name: \"get_block_transfers\",\n        request: Command::Get(GetRequest::Record {\n            record_type_tag: RecordId::Transfer.into(),\n            key: expected.block_hash().to_bytes().unwrap(),\n        }),\n        asserter: Box::new(move |response| {\n            validate_metadata(response, Some(ResponseType::Transfers))\n                && bincode::deserialize::<Vec<Transfer>>(response.payload()).is_ok()\n        }),\n    }\n}\n\nfn get_era_summary(state_root_hash: Digest) -> TestCase {\n    TestCase {\n        name: \"get_era_summary\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::Item {\n                base_key: Key::EraSummary,\n                path: vec![],\n            },\n        )))),\n        asserter: Box::new(|response| {\n            assert_response::<GlobalStateQueryResult, _>(\n                response,\n                Some(ResponseType::GlobalStateQueryResult),\n                |res| {\n                    let (value, _) = res.into_inner();\n                    matches!(value, StoredValue::EraInfo(_))\n                },\n            )\n        }),\n    }\n}\n\nfn get_all_bids(state_root_hash: Digest) -> TestCase {\n    TestCase {\n        name: \"get_all_bids\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::AllItems {\n                key_tag: KeyTag::Bid,\n            },\n        )))),\n        asserter: Box::new(|response| {\n            assert_response::<Vec<StoredValue>, _>(\n                response,\n                Some(ResponseType::StoredValues),\n                |res| res.iter().all(|v| matches!(v, StoredValue::BidKind(_))),\n            )\n        }),\n    }\n}\n\nfn get_trie(digest: Digest) -> TestCase {\n    TestCase {\n        name: \"get_trie\",\n        request: Command::Get(GetRequest::Trie { trie_key: digest }),\n        asserter: Box::new(|response| {\n            assert_response::<GetTrieFullResult, _>(\n                response,\n                Some(ResponseType::GetTrieFullResult),\n                |res| res.into_inner().is_some(),\n            )\n        }),\n    }\n}\n\nfn get_dictionary_item_by_addr(state_root_hash: Digest, addr: DictionaryAddr) -> TestCase {\n    TestCase {\n        name: \"get_dictionary_item_by_addr\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::DictionaryItem {\n                identifier: DictionaryItemIdentifier::DictionaryItem(addr),\n            },\n        )))),\n        asserter: Box::new(move |response| {\n            assert_response::<DictionaryQueryResult, _>(\n                response,\n                Some(ResponseType::DictionaryQueryResult),\n                |res| {\n                    matches!(\n                        res.into_inner(),\n                        (key, res) if key == Key::Dictionary(addr) && res.value().as_cl_value().is_some()\n                    )\n                },\n            )\n        }),\n    }\n}\n\nfn get_dictionary_item_by_seed_uref(\n    state_root_hash: Digest,\n    seed_uref: URef,\n    dictionary_item_key: String,\n) -> TestCase {\n    TestCase {\n        name: \"get_dictionary_item_by_seed_uref\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::DictionaryItem {\n                identifier: DictionaryItemIdentifier::URef {\n                    seed_uref,\n                    dictionary_item_key: dictionary_item_key.clone(),\n                },\n            },\n        )))),\n        asserter: Box::new(move |response| {\n            assert_response::<DictionaryQueryResult, _>(\n                response,\n                Some(ResponseType::DictionaryQueryResult),\n                |res| {\n                    let expected_key = Key::dictionary(seed_uref, dictionary_item_key.as_bytes());\n                    matches!(\n                        res.into_inner(),\n                        (key, res) if key == expected_key && res.value().as_cl_value().is_some()\n                    )\n                },\n            )\n        }),\n    }\n}\n\nfn get_dictionary_item_by_legacy_named_key(\n    state_root_hash: Digest,\n    hash: AccountHash,\n    dictionary_name: String,\n    dictionary_item_key: String,\n) -> TestCase {\n    TestCase {\n        name: \"get_dictionary_item_by_legacy_named_key\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::DictionaryItem {\n                identifier: DictionaryItemIdentifier::AccountNamedKey {\n                    hash,\n                    dictionary_name,\n                    dictionary_item_key,\n                },\n            },\n        )))),\n        asserter: Box::new(|response| {\n            assert_response::<DictionaryQueryResult, _>(\n                response,\n                Some(ResponseType::DictionaryQueryResult),\n                |res| matches!(res.into_inner(),(_, res) if res.value().as_cl_value().is_some()),\n            )\n        }),\n    }\n}\n\nfn get_dictionary_item_by_named_key(\n    state_root_hash: Digest,\n    addr: EntityAddr,\n    dictionary_name: String,\n    dictionary_item_key: String,\n) -> TestCase {\n    TestCase {\n        name: \"get_dictionary_item_by_named_key\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::DictionaryItem {\n                identifier: DictionaryItemIdentifier::EntityNamedKey {\n                    addr,\n                    dictionary_name,\n                    dictionary_item_key,\n                },\n            },\n        )))),\n        asserter: Box::new(|response| {\n            assert_response::<DictionaryQueryResult, _>(\n                response,\n                Some(ResponseType::DictionaryQueryResult),\n                |res| matches!(res.into_inner(),(_, res) if res.value().as_cl_value().is_some()),\n            )\n        }),\n    }\n}\n\nfn get_balance(state_root_hash: Digest, account_hash: AccountHash) -> TestCase {\n    TestCase {\n        name: \"get_balance\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::Balance {\n                purse_identifier: PurseIdentifier::Account(account_hash),\n            },\n        )))),\n        asserter: Box::new(|response| {\n            assert_response::<BalanceResponse, _>(\n                response,\n                Some(ResponseType::BalanceResponse),\n                |res| res.available_balance == U512::one(),\n            )\n        }),\n    }\n}\n\nfn get_balance_account_not_found(state_root_hash: Digest) -> TestCase {\n    TestCase {\n        name: \"get_balance_account_not_found\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::Balance {\n                purse_identifier: PurseIdentifier::Account(AccountHash([9; 32])),\n            },\n        )))),\n        asserter: Box::new(|response| response.error_code() == ErrorCode::PurseNotFound as u16),\n    }\n}\n\nfn get_balance_purse_uref_not_found(state_root_hash: Digest) -> TestCase {\n    TestCase {\n        name: \"get_balance_purse_uref_not_found\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::Balance {\n                purse_identifier: PurseIdentifier::Purse(URef::new([9; 32], Default::default())),\n            },\n        )))),\n        asserter: Box::new(|response| response.error_code() == ErrorCode::PurseNotFound as u16),\n    }\n}\n\nfn get_named_keys_by_prefix(state_root_hash: Digest, entity_addr: EntityAddr) -> TestCase {\n    TestCase {\n        name: \"get_named_keys_by_prefix\",\n        request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new(\n            Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n            GlobalStateEntityQualifier::ItemsByPrefix {\n                key_prefix: KeyPrefix::NamedKeysByEntity(entity_addr),\n            },\n        )))),\n        asserter: Box::new(|response| {\n            assert_response::<Vec<StoredValue>, _>(\n                response,\n                Some(ResponseType::StoredValues),\n                |res| res.iter().all(|v| matches!(v, StoredValue::NamedKey(_))),\n            )\n        }),\n    }\n}\n\nfn get_reward(\n    era_identifier: Option<EraIdentifier>,\n    validator: PublicKey,\n    delegator: Option<DelegatorKind>,\n) -> TestCase {\n    let key = InformationRequest::Reward {\n        era_identifier,\n        validator: validator.into(),\n        delegator: delegator.map(Box::new),\n    };\n\n    TestCase {\n        name: \"get_reward\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<RewardResponse, _>(response, Some(ResponseType::Reward), |reward| {\n                // test fixture sets delegation rate to 0\n                reward.amount() > U512::zero() && reward.delegation_rate() == 0\n            })\n        }),\n    }\n}\n\nfn get_protocol_version(expected: ProtocolVersion) -> TestCase {\n    let key = InformationRequest::ProtocolVersion;\n\n    TestCase {\n        name: \"get_protocol_version\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: vec![],\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<ProtocolVersion, _>(\n                response,\n                Some(ResponseType::ProtocolVersion),\n                |version| expected == version,\n            )\n        }),\n    }\n}\n\nfn get_entity(state_root_hash: Digest, entity_addr: EntityAddr) -> TestCase {\n    let key = InformationRequest::Entity {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: EntityIdentifier::EntityAddr(entity_addr),\n        include_bytecode: true,\n    };\n\n    TestCase {\n        name: \"get_entity\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<AddressableEntityInformation, _>(\n                response,\n                Some(ResponseType::AddressableEntityInformation),\n                |res| res.bytecode().is_some(),\n            )\n        }),\n    }\n}\n\nfn get_entity_without_bytecode(state_root_hash: Digest, entity_addr: EntityAddr) -> TestCase {\n    let key = InformationRequest::Entity {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: EntityIdentifier::EntityAddr(entity_addr),\n        include_bytecode: false,\n    };\n\n    TestCase {\n        name: \"get_entity_without_bytecode\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(|response| {\n            assert_response::<AddressableEntityInformation, _>(\n                response,\n                Some(ResponseType::AddressableEntityInformation),\n                |res| res.bytecode().is_none(),\n            )\n        }),\n    }\n}\n\nfn get_entity_pre_migration_account(\n    state_root_hash: Digest,\n    account_hash: AccountHash,\n) -> TestCase {\n    let key = InformationRequest::Entity {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: EntityIdentifier::AccountHash(account_hash),\n        include_bytecode: false,\n    };\n\n    TestCase {\n        name: \"get_entity_pre_migration_account\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<AccountInformation, _>(\n                response,\n                Some(ResponseType::AccountInformation),\n                |res| res.account().account_hash() == account_hash,\n            )\n        }),\n    }\n}\n\nfn get_entity_post_migration_account(\n    state_root_hash: Digest,\n    account_hash: AccountHash,\n) -> TestCase {\n    let key = InformationRequest::Entity {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: EntityIdentifier::AccountHash(account_hash),\n        include_bytecode: false,\n    };\n\n    TestCase {\n        name: \"get_entity_post_migration_account\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<AddressableEntityInformation, _>(\n                response,\n                Some(ResponseType::AddressableEntityInformation),\n                |_| true,\n            )\n        }),\n    }\n}\n\nfn get_entity_pre_migration_contract(\n    state_root_hash: Digest,\n    contract_hash: ContractHash,\n) -> TestCase {\n    let key = InformationRequest::Entity {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: EntityIdentifier::ContractHash(contract_hash),\n        include_bytecode: true,\n    };\n\n    TestCase {\n        name: \"get_entity_pre_migration_contract\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<ContractInformation, _>(\n                response,\n                Some(ResponseType::ContractInformation),\n                |res| res.wasm().is_some(),\n            )\n        }),\n    }\n}\n\nfn get_entity_post_migration_contract(\n    state_root_hash: Digest,\n    contract_hash: ContractHash,\n) -> TestCase {\n    let key = InformationRequest::Entity {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: EntityIdentifier::ContractHash(contract_hash),\n        include_bytecode: true,\n    };\n\n    TestCase {\n        name: \"get_entity_post_migration_contract\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<AddressableEntityInformation, _>(\n                response,\n                Some(ResponseType::AddressableEntityInformation),\n                |res| res.bytecode().is_some(),\n            )\n        }),\n    }\n}\n\nfn get_package(state_root_hash: Digest, package_addr: PackageAddr) -> TestCase {\n    let key = InformationRequest::Package {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: PackageIdentifier::PackageAddr(package_addr),\n    };\n\n    TestCase {\n        name: \"get_package\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<ValueWithProof<Package>, _>(\n                response,\n                Some(ResponseType::PackageWithProof),\n                |_| true,\n            )\n        }),\n    }\n}\n\nfn get_package_pre_migration(\n    state_root_hash: Digest,\n    contract_package_hash: ContractPackageHash,\n) -> TestCase {\n    let key = InformationRequest::Package {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: PackageIdentifier::ContractPackageHash(contract_package_hash),\n    };\n\n    TestCase {\n        name: \"get_package_pre_migration\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<ValueWithProof<ContractPackage>, _>(\n                response,\n                Some(ResponseType::ContractPackageWithProof),\n                |_| true,\n            )\n        }),\n    }\n}\n\nfn get_package_post_migration(\n    state_root_hash: Digest,\n    contract_package_hash: ContractPackageHash,\n) -> TestCase {\n    let key = InformationRequest::Package {\n        state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)),\n        identifier: PackageIdentifier::ContractPackageHash(contract_package_hash),\n    };\n\n    TestCase {\n        name: \"get_package_post_migration\",\n        request: Command::Get(GetRequest::Information {\n            info_type_tag: key.tag().into(),\n            key: key.to_bytes().expect(\"should serialize key\"),\n        }),\n        asserter: Box::new(move |response| {\n            assert_response::<ValueWithProof<Package>, _>(\n                response,\n                Some(ResponseType::PackageWithProof),\n                |_| true,\n            )\n        }),\n    }\n}\n\nfn try_accept_transaction(key: &SecretKey) -> TestCase {\n    let transaction = Transaction::V1(\n        TransactionV1Builder::new_targeting_invocable_entity_via_alias(\n            \"Test\",\n            \"call\",\n            casper_types::TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_secret_key(key)\n        .with_chain_name(\"casper-example\")\n        .build()\n        .unwrap(),\n    );\n    TestCase {\n        name: \"try_accept_transaction\",\n        request: Command::TryAcceptTransaction { transaction },\n        asserter: Box::new(|response| response.error_code() == ErrorCode::NoError as u16),\n    }\n}\n\nfn try_accept_transaction_invalid(rng: &mut TestRng) -> TestCase {\n    let transaction = Transaction::V1(TransactionV1Builder::new_random(rng).build().unwrap());\n    TestCase {\n        name: \"try_accept_transaction_invalid\",\n        request: Command::TryAcceptTransaction { transaction },\n        asserter: Box::new(|response| ErrorCode::try_from(response.error_code()).is_ok()),\n    }\n}\n\nfn try_spec_exec_invalid(rng: &mut TestRng) -> TestCase {\n    let transaction = Transaction::V1(TransactionV1Builder::new_random(rng).build().unwrap());\n    TestCase {\n        name: \"try_spec_exec_invalid\",\n        request: Command::TrySpeculativeExec { transaction },\n        asserter: Box::new(|response| ErrorCode::try_from(response.error_code()).is_ok()),\n    }\n}\n\n#[tokio::test]\nasync fn binary_port_component_rejects_requests_with_invalid_header_version() {\n    testing::init_logging();\n\n    let (mut client, (finish_cranking, _)) = setup().await;\n\n    let request = Command::Get(GetRequest::Information {\n        info_type_tag: InformationRequestTag::Uptime.into(),\n        key: vec![],\n    });\n\n    let mut header = CommandHeader::new(request.tag(), 0);\n\n    // Make the binary protocol version incompatible.\n    header.set_binary_request_version(header.version() + 1);\n\n    let header_bytes = ToBytes::to_bytes(&header).expect(\"should serialize\");\n    let original_request_bytes = header_bytes\n        .iter()\n        .chain(\n            ToBytes::to_bytes(&request)\n                .expect(\"should serialize\")\n                .iter(),\n        )\n        .cloned()\n        .collect::<Vec<_>>();\n    client\n        .send(BinaryMessage::new(original_request_bytes.clone()))\n        .await\n        .expect(\"should send message\");\n    let response = timeout(Duration::from_secs(10), client.next())\n        .await\n        .unwrap_or_else(|_| panic!(\"should complete without timeout\"))\n        .unwrap_or_else(|| panic!(\"should have bytes\"))\n        .unwrap_or_else(|_| panic!(\"should have ok response\"));\n    let (binary_response_and_request, _): (BinaryResponseAndRequest, _) =\n        FromBytes::from_bytes(response.payload()).expect(\"should deserialize response\");\n\n    assert_eq!(\n        binary_response_and_request.response().error_code(),\n        ErrorCode::CommandHeaderVersionMismatch as u16\n    );\n\n    let (_net, _rng) = timeout(Duration::from_secs(10), finish_cranking)\n        .await\n        .unwrap_or_else(|_| panic!(\"should finish cranking without timeout\"));\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/configs_override.rs",
    "content": "use std::collections::BTreeSet;\n\nuse num_rational::Ratio;\n\nuse casper_types::{\n    ConsensusProtocolName, FeeHandling, HoldBalanceHandling, PricingHandling, PublicKey,\n    RefundHandling, TimeDiff, TransactionV1Config,\n};\n\nuse crate::types::SyncHandling;\n\n/// Options to allow overriding default chainspec and config settings.\npub(crate) struct ConfigsOverride {\n    pub era_duration: TimeDiff,\n    pub minimum_block_time: TimeDiff,\n    pub minimum_era_height: u64,\n    pub unbonding_delay: u64,\n    pub round_seigniorage_rate: Ratio<u64>,\n    pub consensus_protocol: ConsensusProtocolName,\n    pub finders_fee: Ratio<u64>,\n    pub finality_signature_proportion: Ratio<u64>,\n    pub signature_rewards_max_delay: u64,\n    pub storage_multiplier: u8,\n    pub max_gas_price: u8,\n    pub min_gas_price: u8,\n    pub upper_threshold: u64,\n    pub lower_threshold: u64,\n    pub max_block_size: u32,\n    pub block_gas_limit: u64,\n    pub refund_handling_override: Option<RefundHandling>,\n    pub fee_handling_override: Option<FeeHandling>,\n    pub pricing_handling_override: Option<PricingHandling>,\n    pub allow_prepaid_override: Option<bool>,\n    pub balance_hold_interval_override: Option<TimeDiff>,\n    pub administrators: Option<BTreeSet<PublicKey>>,\n    pub chain_name: Option<String>,\n    pub gas_hold_balance_handling: Option<HoldBalanceHandling>,\n    pub transaction_v1_override: Option<TransactionV1Config>,\n    pub node_config_override: NodeConfigOverride,\n    pub minimum_delegation_rate: u8,\n}\n\nimpl ConfigsOverride {\n    pub(crate) fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self {\n        self.refund_handling_override = Some(refund_handling);\n        self\n    }\n\n    pub(crate) fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self {\n        self.fee_handling_override = Some(fee_handling);\n        self\n    }\n\n    pub(crate) fn with_pricing_handling(mut self, pricing_handling: PricingHandling) -> Self {\n        self.pricing_handling_override = Some(pricing_handling);\n        self\n    }\n\n    #[allow(unused)]\n    pub(crate) fn with_allow_prepaid(mut self, allow_prepaid: bool) -> Self {\n        self.allow_prepaid_override = Some(allow_prepaid);\n        self\n    }\n\n    pub(crate) fn with_balance_hold_interval(mut self, balance_hold_interval: TimeDiff) -> Self {\n        self.balance_hold_interval_override = Some(balance_hold_interval);\n        self\n    }\n\n    pub(crate) fn with_min_gas_price(mut self, min_gas_price: u8) -> Self {\n        self.min_gas_price = min_gas_price;\n        self\n    }\n\n    pub(crate) fn with_max_gas_price(mut self, max_gas_price: u8) -> Self {\n        self.max_gas_price = max_gas_price;\n        self\n    }\n\n    pub(crate) fn with_lower_threshold(mut self, lower_threshold: u64) -> Self {\n        self.lower_threshold = lower_threshold;\n        self\n    }\n\n    pub(crate) fn with_upper_threshold(mut self, upper_threshold: u64) -> Self {\n        self.upper_threshold = upper_threshold;\n        self\n    }\n\n    pub(crate) fn with_block_size(mut self, max_block_size: u32) -> Self {\n        self.max_block_size = max_block_size;\n        self\n    }\n\n    pub(crate) fn with_block_gas_limit(mut self, block_gas_limit: u64) -> Self {\n        self.block_gas_limit = block_gas_limit;\n        self\n    }\n\n    pub(crate) fn with_minimum_era_height(mut self, minimum_era_height: u64) -> Self {\n        self.minimum_era_height = minimum_era_height;\n        self\n    }\n\n    pub(crate) fn with_administrators(mut self, administrators: BTreeSet<PublicKey>) -> Self {\n        self.administrators = Some(administrators);\n        self\n    }\n\n    pub(crate) fn with_chain_name(mut self, chain_name: String) -> Self {\n        self.chain_name = Some(chain_name);\n        self\n    }\n\n    pub(crate) fn with_gas_hold_balance_handling(\n        mut self,\n        gas_hold_balance_handling: HoldBalanceHandling,\n    ) -> Self {\n        self.gas_hold_balance_handling = Some(gas_hold_balance_handling);\n        self\n    }\n\n    pub(crate) fn with_transaction_v1_config(\n        mut self,\n        transaction_v1config: TransactionV1Config,\n    ) -> Self {\n        self.transaction_v1_override = Some(transaction_v1config);\n        self\n    }\n\n    pub(crate) fn with_minimum_delegation_rate(mut self, minimum_delegation_rate: u8) -> Self {\n        self.minimum_delegation_rate = minimum_delegation_rate;\n        self\n    }\n}\n\nimpl Default for ConfigsOverride {\n    fn default() -> Self {\n        ConfigsOverride {\n            era_duration: TimeDiff::from_millis(0), // zero means use the default value\n            minimum_block_time: \"1second\".parse().unwrap(),\n            minimum_era_height: 2,\n            unbonding_delay: 3,\n            round_seigniorage_rate: Ratio::new(1, 100),\n            consensus_protocol: ConsensusProtocolName::Zug,\n            finders_fee: Ratio::new(1, 4),\n            finality_signature_proportion: Ratio::new(1, 3),\n            signature_rewards_max_delay: 5,\n            storage_multiplier: 1,\n            max_gas_price: 3,\n            min_gas_price: 1,\n            upper_threshold: 90,\n            lower_threshold: 50,\n            max_block_size: 10_485_760u32,\n            block_gas_limit: 10_000_000_000_000u64,\n            refund_handling_override: None,\n            fee_handling_override: None,\n            pricing_handling_override: None,\n            allow_prepaid_override: None,\n            balance_hold_interval_override: None,\n            administrators: None,\n            chain_name: None,\n            gas_hold_balance_handling: None,\n            transaction_v1_override: None,\n            node_config_override: NodeConfigOverride::default(),\n            minimum_delegation_rate: 0,\n        }\n    }\n}\n\n#[derive(Clone, Default)]\npub(crate) struct NodeConfigOverride {\n    pub sync_handling_override: Option<SyncHandling>,\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/consensus_rules.rs",
    "content": "use std::{collections::BTreeMap, sync::Arc};\n\nuse either::Either;\nuse tokio::time::{self};\nuse tracing::{error, info};\n\nuse casper_types::{\n    system::auction::BidsExt, ConsensusProtocolName, EraId, PublicKey, SecretKey, Timestamp, U512,\n};\n\nuse crate::{\n    components::consensus::{self, NewBlockPayload},\n    effect::{requests::NetworkRequest, EffectExt},\n    protocol::Message,\n    reactor::main_reactor::{\n        tests::{\n            configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes,\n            switch_blocks::SwitchBlocks, ERA_TWO, ONE_MIN,\n        },\n        MainEvent,\n    },\n    types::BlockPayload,\n};\n\n#[tokio::test]\nasync fn run_equivocator_network() {\n    let mut rng = crate::new_rng();\n\n    let alice_secret_key = Arc::new(SecretKey::random(&mut rng));\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let bob_secret_key = Arc::new(SecretKey::random(&mut rng));\n    let bob_public_key = PublicKey::from(&*bob_secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut rng));\n    let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n    let mut stakes = BTreeMap::new();\n    stakes.insert(\n        alice_public_key.clone(),\n        (U512::from(u64::MAX), U512::from(1)),\n    );\n    stakes.insert(\n        bob_public_key.clone(),\n        (U512::from(u64::MAX), U512::from(1)),\n    );\n    stakes.insert(\n        charlie_public_key,\n        (U512::from(u64::MAX), U512::from(u64::MAX)),\n    );\n\n    // Here's where things go wrong: Bob doesn't run a node at all, and Alice runs two!\n    let secret_keys = vec![\n        alice_secret_key.clone(),\n        alice_secret_key,\n        charlie_secret_key,\n    ];\n\n    // We configure the era to take 15 rounds. That should guarantee that the two nodes equivocate.\n    let spec_override = ConfigsOverride {\n        minimum_era_height: 10,\n        consensus_protocol: ConsensusProtocolName::Highway,\n        storage_multiplier: 2,\n        ..Default::default()\n    };\n\n    let mut fixture =\n        TestFixture::new_with_keys(rng, secret_keys, stakes.clone(), Some(spec_override)).await;\n\n    let min_round_len = fixture.chainspec.core_config.minimum_block_time;\n    let mut maybe_first_message_time = None;\n\n    let mut alice_reactors = fixture\n        .network\n        .reactors_mut()\n        .filter(|reactor| *reactor.inner().consensus().public_key() == alice_public_key);\n\n    // Delay all messages to and from the first of Alice's nodes until three rounds after the first\n    // message.  Further, significantly delay any incoming pings to avoid the node detecting the\n    // doppelganger and deactivating itself.\n    alice_reactors.next().unwrap().set_filter(move |event| {\n        if crate::reactor::main_reactor::tests::is_ping(&event) {\n            return Either::Left(time::sleep((min_round_len * 30).into()).event(move |_| event));\n        }\n        let now = Timestamp::now();\n        match &event {\n            MainEvent::ConsensusMessageIncoming(_) => {}\n            MainEvent::NetworkRequest(\n                NetworkRequest::SendMessage { payload, .. }\n                | NetworkRequest::ValidatorBroadcast { payload, .. }\n                | NetworkRequest::Gossip { payload, .. },\n            ) if matches!(**payload, Message::Consensus(_)) => {}\n            _ => return Either::Right(event),\n        };\n        let first_message_time = *maybe_first_message_time.get_or_insert(now);\n        if now < first_message_time + min_round_len * 3 {\n            return Either::Left(time::sleep(min_round_len.into()).event(move |_| event));\n        }\n        Either::Right(event)\n    });\n\n    // Significantly delay all incoming pings to the second of Alice's nodes.\n    alice_reactors.next().unwrap().set_filter(move |event| {\n        if crate::reactor::main_reactor::tests::is_ping(&event) {\n            return Either::Left(time::sleep((min_round_len * 30).into()).event(move |_| event));\n        }\n        Either::Right(event)\n    });\n\n    drop(alice_reactors);\n\n    let era_count = 4;\n\n    let timeout = ONE_MIN * (era_count + 1) as u32;\n    info!(\"Waiting for {} eras to end.\", era_count);\n    fixture\n        .run_until_stored_switch_block_header(EraId::new(era_count - 1), timeout)\n        .await;\n\n    // network settled; select data to analyze\n    let switch_blocks = SwitchBlocks::collect(fixture.network.nodes(), era_count);\n    let mut era_bids = BTreeMap::new();\n    for era in 0..era_count {\n        era_bids.insert(era, switch_blocks.bids(fixture.network.nodes(), era));\n    }\n\n    // Since this setup sometimes produces no equivocation or an equivocation in era 2 rather than\n    // era 1, we set an offset here.  If neither era has an equivocation, exit early.\n    // TODO: Remove this once https://github.com/casper-network/casper-node/issues/1859 is fixed.\n    for switch_block in &switch_blocks.headers {\n        let era_id = switch_block.era_id();\n        let count = switch_blocks.equivocators(era_id.value()).len();\n        info!(\"equivocators in {}: {}\", era_id, count);\n    }\n    let offset = if !switch_blocks.equivocators(1).is_empty() {\n        0\n    } else if !switch_blocks.equivocators(2).is_empty() {\n        error!(\"failed to equivocate in era 1 - asserting equivocation detected in era 2\");\n        1\n    } else {\n        error!(\"failed to equivocate in era 1 or 2\");\n        return;\n    };\n\n    // Era 0 consists only of the genesis block.\n    // In era 1, Alice equivocates. Since eviction takes place with a delay of one\n    // (`auction_delay`) era, she is still included in the next era's validator set.\n    let next_era_id = 1 + offset;\n\n    assert_eq!(\n        switch_blocks.equivocators(next_era_id),\n        [alice_public_key.clone()]\n    );\n    let next_era_bids = era_bids.get(&next_era_id).expect(\"should have offset era\");\n\n    let next_era_alice = next_era_bids\n        .validator_bid(&alice_public_key)\n        .expect(\"should have Alice's offset bid\");\n    assert!(\n        next_era_alice.inactive(),\n        \"Alice's bid should be inactive in offset era.\"\n    );\n    assert!(switch_blocks\n        .next_era_validators(next_era_id)\n        .contains_key(&alice_public_key));\n\n    // In era 2 Alice is banned. Banned validators count neither as faulty nor inactive, even\n    // though they cannot participate. In the next era, she will be evicted.\n    let future_era_id = 2 + offset;\n    assert_eq!(switch_blocks.equivocators(future_era_id), []);\n    let future_era_bids = era_bids\n        .get(&future_era_id)\n        .expect(\"should have future era\");\n    let future_era_alice = future_era_bids\n        .validator_bid(&alice_public_key)\n        .expect(\"should have Alice's future bid\");\n    assert!(\n        future_era_alice.inactive(),\n        \"Alice's bid should be inactive in future era.\"\n    );\n    assert!(!switch_blocks\n        .next_era_validators(future_era_id)\n        .contains_key(&alice_public_key));\n\n    // In era 3 Alice is not a validator anymore and her bid remains deactivated.\n    let era_3 = 3;\n    if offset == 0 {\n        assert_eq!(switch_blocks.equivocators(era_3), []);\n        let era_3_bids = era_bids.get(&era_3).expect(\"should have era 3 bids\");\n        let era_3_alice = era_3_bids\n            .validator_bid(&alice_public_key)\n            .expect(\"should have Alice's era 3 bid\");\n        assert!(\n            era_3_alice.inactive(),\n            \"Alice's bid should be inactive in era 3.\"\n        );\n        assert!(!switch_blocks\n            .next_era_validators(era_3)\n            .contains_key(&alice_public_key));\n    }\n\n    // Bob is inactive.\n    assert_eq!(\n        switch_blocks.inactive_validators(1),\n        [bob_public_key.clone()]\n    );\n    assert_eq!(\n        switch_blocks.inactive_validators(2),\n        [bob_public_key.clone()]\n    );\n\n    for (era, bids) in era_bids {\n        for (public_key, stake) in &stakes {\n            let bid = bids\n                .validator_bid(public_key)\n                .expect(\"should have bid for public key {public_key} in era {era}\");\n            let staked_amount = bid.staked_amount();\n            assert!(\n                staked_amount >= stake.1,\n                \"expected stake {} for public key {} in era {}, found {}\",\n                staked_amount,\n                public_key,\n                era,\n                stake.1\n            );\n        }\n    }\n}\n\n// This test exercises a scenario in which a proposed block contains invalid accusations.\n// Blocks containing no transactions or transfers used to be incorrectly marked as not needing\n// validation even if they contained accusations, which opened up a security hole through which a\n// malicious validator could accuse whomever they wanted of equivocating and have these\n// accusations accepted by the other validators. This has been patched and the test asserts that\n// such a scenario is no longer possible.\n#[tokio::test]\nasync fn empty_proposed_block_validation_regression() {\n    let initial_stakes = InitialStakes::AllEqual {\n        count: 4,\n        stake: 100,\n    };\n    let spec_override = ConfigsOverride {\n        minimum_era_height: 15,\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await;\n\n    let malicious_validator =\n        PublicKey::from(fixture.node_contexts.first().unwrap().secret_key.as_ref());\n    info!(\"Malicious validator: {}\", malicious_validator);\n    let everyone_else: Vec<_> = fixture\n        .node_contexts\n        .iter()\n        .skip(1)\n        .map(|node_context| PublicKey::from(node_context.secret_key.as_ref()))\n        .collect();\n    let malicious_id = fixture.node_contexts.first().unwrap().id;\n    let malicious_runner = fixture.network.nodes_mut().get_mut(&malicious_id).unwrap();\n    malicious_runner\n        .reactor_mut()\n        .inner_mut()\n        .set_filter(move |event| match event {\n            MainEvent::Consensus(consensus::Event::NewBlockPayload(NewBlockPayload {\n                era_id,\n                block_payload: _,\n                block_context,\n            })) => {\n                info!(\"Accusing everyone else!\");\n                // We hook into the NewBlockPayload event to replace the block being proposed with\n                // an empty one that accuses all the validators, except the malicious validator.\n                Either::Right(MainEvent::Consensus(consensus::Event::NewBlockPayload(\n                    NewBlockPayload {\n                        era_id,\n                        block_payload: Arc::new(BlockPayload::new(\n                            BTreeMap::new(),\n                            everyone_else.clone(),\n                            Default::default(),\n                            false,\n                            1u8,\n                        )),\n                        block_context,\n                    },\n                )))\n            }\n            event => Either::Right(event),\n        });\n\n    info!(\"Waiting for the first era after genesis to end.\");\n    fixture.run_until_consensus_in_era(ERA_TWO, ONE_MIN).await;\n    let switch_blocks = SwitchBlocks::collect(fixture.network.nodes(), 2);\n\n    // Nobody actually double-signed. The accusations should have had no effect.\n    assert_eq!(\n        switch_blocks.equivocators(0),\n        [],\n        \"expected no equivocators\"\n    );\n    // If the malicious validator was the first proposer, all their Highway units might be invalid,\n    // because they all refer to the invalid proposal, so they might get flagged as inactive. No\n    // other validators should be considered inactive.\n    match switch_blocks.inactive_validators(0) {\n        [] => {}\n        [inactive_validator] if malicious_validator == *inactive_validator => {}\n        inactive => panic!(\"unexpected inactive validators: {:?}\", inactive),\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/fixture.rs",
    "content": "use itertools::Itertools;\nuse std::{\n    collections::BTreeMap, convert::TryFrom, iter, net::SocketAddr, str::FromStr, sync::Arc,\n    time::Duration,\n};\n\nuse num_rational::Ratio;\nuse num_traits::Zero;\nuse rand::Rng;\nuse tempfile::TempDir;\nuse tokio::time::error::Elapsed;\nuse tracing::info;\n\nuse casper_storage::{\n    data_access_layer::{\n        balance::{BalanceHandling, BalanceResult},\n        BalanceRequest, BidsRequest, BidsResult, ProofHandling,\n    },\n    global_state::state::{StateProvider, StateReader},\n};\nuse casper_types::{\n    execution::{ExecutionResult, TransformV2},\n    system::auction::{DelegationRate, DelegatorKind},\n    testing::TestRng,\n    AccountConfig, AccountsConfig, ActivationPoint, AddressableEntityHash, Block, BlockBody,\n    BlockHash, BlockV2, CLValue, Chainspec, ChainspecRawBytes, EraId, Key, Motes, NextUpgrade,\n    ProtocolVersion, PublicKey, SecretKey, StoredValue, SystemHashRegistry, TimeDiff, Timestamp,\n    Transaction, TransactionHash, ValidatorConfig, U512,\n};\n\nuse crate::{\n    components::{gossiper, network, storage},\n    effect::EffectExt,\n    reactor::main_reactor::{\n        tests::{\n            configs_override::{ConfigsOverride, NodeConfigOverride},\n            initial_stakes::InitialStakes,\n            Nodes, ERA_TWO,\n        },\n        Config, MainReactor, ReactorState,\n    },\n    testing::{self, filter_reactor::FilterReactor, network::TestingNetwork},\n    types::NodeId,\n    utils::{External, Loadable, Source, RESOURCES_PATH},\n    WithDir,\n};\n\npub(crate) struct NodeContext {\n    pub id: NodeId,\n    pub secret_key: Arc<SecretKey>,\n    pub config: Config,\n    pub storage_dir: TempDir,\n}\n\npub(crate) struct TestFixture {\n    pub rng: TestRng,\n    pub node_contexts: Vec<NodeContext>,\n    pub network: TestingNetwork<FilterReactor<MainReactor>>,\n    pub chainspec: Arc<Chainspec>,\n    pub chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n}\n\nimpl TestFixture {\n    /// Sets up a new fixture with the number of nodes indicated by `initial_stakes`.\n    ///\n    /// Runs the network until all nodes are initialized (i.e. none of their reactor states are\n    /// still `ReactorState::Initialize`).\n    pub(crate) async fn new(\n        initial_stakes: InitialStakes,\n        spec_override: Option<ConfigsOverride>,\n    ) -> Self {\n        let rng = TestRng::new();\n        Self::new_with_rng(initial_stakes, spec_override, rng).await\n    }\n\n    pub(crate) async fn new_with_rng(\n        initial_stakes: InitialStakes,\n        spec_override: Option<ConfigsOverride>,\n        mut rng: TestRng,\n    ) -> Self {\n        let stake_values = match initial_stakes {\n            InitialStakes::FromVec(stakes) => {\n                stakes.into_iter().map(|stake| stake.into()).collect()\n            }\n            InitialStakes::Random { count } => {\n                // By default, we use very large stakes so we would catch overflow issues.\n                iter::from_fn(|| Some(U512::from(rng.gen_range(100..999)) * U512::from(u128::MAX)))\n                    .take(count)\n                    .collect()\n            }\n            InitialStakes::AllEqual { count, stake } => {\n                vec![stake.into(); count]\n            }\n        };\n\n        let secret_keys: Vec<Arc<SecretKey>> = (0..stake_values.len())\n            .map(|_| Arc::new(SecretKey::random(&mut rng)))\n            .collect();\n\n        let stakes = secret_keys\n            .iter()\n            .zip(stake_values)\n            .map(|(secret_key, stake)| {\n                (\n                    PublicKey::from(secret_key.as_ref()),\n                    (U512::from(700_000_000_000_000_000u64), stake),\n                )\n            })\n            .collect();\n\n        Self::new_with_keys(rng, secret_keys, stakes, spec_override).await\n    }\n\n    pub(crate) async fn new_with_keys(\n        rng: TestRng,\n        secret_keys: Vec<Arc<SecretKey>>,\n        stakes: BTreeMap<PublicKey, (U512, U512)>,\n        spec_override: Option<ConfigsOverride>,\n    ) -> Self {\n        testing::init_logging();\n\n        // Load the `local` chainspec.\n        let (mut chainspec, chainspec_raw_bytes) =\n            <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n\n        // Override accounts with those generated from the keys.\n        let accounts = stakes\n            .into_iter()\n            .map(|(public_key, (balance, bonded_amount))| {\n                let validator_config =\n                    ValidatorConfig::new(Motes::new(bonded_amount), DelegationRate::zero());\n                AccountConfig::new(public_key, Motes::new(balance), Some(validator_config))\n            })\n            .collect();\n        let delegators = vec![];\n        let administrators = vec![];\n        chainspec.network_config.accounts_config =\n            AccountsConfig::new(accounts, delegators, administrators);\n\n        // Allow 2 seconds startup time per validator.\n        let genesis_time = Timestamp::now() + TimeDiff::from_seconds(secret_keys.len() as u32 * 2);\n        info!(\n            \"creating test chain configuration, genesis: {}\",\n            genesis_time\n        );\n        chainspec.protocol_config.activation_point = ActivationPoint::Genesis(genesis_time);\n        chainspec.core_config.finality_threshold_fraction = Ratio::new(34, 100);\n        chainspec.core_config.era_duration = TimeDiff::from_millis(0);\n        chainspec.core_config.auction_delay = 1;\n        chainspec.core_config.validator_slots = 100;\n        let ConfigsOverride {\n            era_duration,\n            minimum_block_time,\n            minimum_era_height,\n            unbonding_delay,\n            round_seigniorage_rate,\n            consensus_protocol,\n            finders_fee,\n            finality_signature_proportion,\n            signature_rewards_max_delay,\n            storage_multiplier,\n            max_gas_price,\n            min_gas_price,\n            upper_threshold,\n            lower_threshold,\n            max_block_size,\n            block_gas_limit,\n            refund_handling_override,\n            fee_handling_override,\n            pricing_handling_override,\n            allow_prepaid_override,\n            balance_hold_interval_override,\n            administrators,\n            chain_name,\n            gas_hold_balance_handling,\n            transaction_v1_override,\n            node_config_override,\n            minimum_delegation_rate,\n        } = spec_override.unwrap_or_default();\n        if era_duration != TimeDiff::from_millis(0) {\n            chainspec.core_config.era_duration = era_duration;\n        }\n        info!(?block_gas_limit);\n        chainspec.core_config.minimum_block_time = minimum_block_time;\n        chainspec.core_config.minimum_era_height = minimum_era_height;\n        chainspec.core_config.unbonding_delay = unbonding_delay;\n        chainspec.core_config.round_seigniorage_rate = round_seigniorage_rate;\n        chainspec.core_config.consensus_protocol = consensus_protocol;\n        chainspec.core_config.finders_fee = finders_fee;\n        chainspec.core_config.finality_signature_proportion = finality_signature_proportion;\n        chainspec.core_config.minimum_block_time = minimum_block_time;\n        chainspec.core_config.minimum_era_height = minimum_era_height;\n        chainspec.core_config.minimum_delegation_rate = minimum_delegation_rate;\n        chainspec.vacancy_config.min_gas_price = min_gas_price;\n        chainspec.vacancy_config.max_gas_price = max_gas_price;\n        chainspec.vacancy_config.upper_threshold = upper_threshold;\n        chainspec.vacancy_config.lower_threshold = lower_threshold;\n        chainspec.transaction_config.block_gas_limit = block_gas_limit;\n        chainspec.transaction_config.max_block_size = max_block_size;\n        chainspec.highway_config.maximum_round_length =\n            chainspec.core_config.minimum_block_time * 2;\n        chainspec.core_config.signature_rewards_max_delay = signature_rewards_max_delay;\n\n        if let Some(refund_handling) = refund_handling_override {\n            chainspec.core_config.refund_handling = refund_handling;\n        }\n        if let Some(fee_handling) = fee_handling_override {\n            chainspec.core_config.fee_handling = fee_handling;\n        }\n        if let Some(pricing_handling) = pricing_handling_override {\n            chainspec.core_config.pricing_handling = pricing_handling;\n        }\n        if let Some(allow_prepaid) = allow_prepaid_override {\n            chainspec.core_config.allow_prepaid = allow_prepaid;\n        }\n        if let Some(balance_hold_interval) = balance_hold_interval_override {\n            chainspec.core_config.gas_hold_interval = balance_hold_interval;\n        }\n        if let Some(administrators) = administrators {\n            chainspec.core_config.administrators = administrators;\n        }\n        if let Some(chain_name) = chain_name {\n            chainspec.network_config.name = chain_name;\n        }\n        if let Some(gas_hold_balance_handling) = gas_hold_balance_handling {\n            chainspec.core_config.gas_hold_balance_handling = gas_hold_balance_handling;\n        }\n        if let Some(transaction_v1_config) = transaction_v1_override {\n            chainspec.transaction_config.transaction_v1_config = transaction_v1_config\n        }\n\n        let applied_block_gas_limit = chainspec.transaction_config.block_gas_limit;\n\n        info!(?applied_block_gas_limit);\n\n        let mut fixture = TestFixture {\n            rng,\n            node_contexts: vec![],\n            network: TestingNetwork::new(),\n            chainspec: Arc::new(chainspec),\n            chainspec_raw_bytes: Arc::new(chainspec_raw_bytes),\n        };\n\n        for secret_key in secret_keys {\n            let (config, storage_dir) = fixture.create_node_config(\n                secret_key.as_ref(),\n                None,\n                storage_multiplier,\n                node_config_override.clone(),\n            );\n            fixture.add_node(secret_key, config, storage_dir).await;\n        }\n\n        fixture\n            .run_until(\n                move |nodes: &Nodes| {\n                    nodes.values().all(|runner| {\n                        !matches!(runner.main_reactor().state, ReactorState::Initialize)\n                    })\n                },\n                Duration::from_secs(20),\n            )\n            .await;\n\n        fixture\n    }\n\n    /// Access the environments RNG.\n    #[inline(always)]\n    pub(crate) fn rng_mut(&mut self) -> &mut TestRng {\n        &mut self.rng\n    }\n\n    /// Returns the highest complete block from node 0.\n    ///\n    /// Panics if there is no such block.\n    #[track_caller]\n    pub(crate) fn highest_complete_block(&self) -> Block {\n        let node_0 = self\n            .node_contexts\n            .first()\n            .expect(\"should have at least one node\")\n            .id;\n        self.network\n            .nodes()\n            .get(&node_0)\n            .expect(\"should have node 0\")\n            .main_reactor()\n            .storage()\n            .get_highest_complete_block()\n            .expect(\"should not error reading db\")\n            .expect(\"node 0 should have a complete block\")\n    }\n\n    /// Get block by height\n    pub(crate) fn get_block_by_height(&self, block_height: u64) -> Block {\n        let node_0 = self\n            .node_contexts\n            .first()\n            .expect(\"should have at least one node\")\n            .id;\n\n        self.network\n            .nodes()\n            .get(&node_0)\n            .expect(\"should have node 0\")\n            .main_reactor()\n            .storage()\n            .read_block_by_height(block_height)\n            .expect(\"failure to read block at height\")\n    }\n\n    #[track_caller]\n    pub(crate) fn get_block_gas_price_by_public_key(\n        &self,\n        maybe_public_key: Option<&PublicKey>,\n    ) -> u8 {\n        let node_id = match maybe_public_key {\n            None => {\n                &self\n                    .node_contexts\n                    .first()\n                    .expect(\"should have at least one node\")\n                    .id\n            }\n            Some(public_key) => {\n                let (node_id, _) = self\n                    .network\n                    .nodes()\n                    .iter()\n                    .find(|(_, runner)| runner.main_reactor().consensus.public_key() == public_key)\n                    .expect(\"should have runner\");\n\n                node_id\n            }\n        };\n\n        self.network\n            .nodes()\n            .get(node_id)\n            .expect(\"should have node 0\")\n            .main_reactor()\n            .storage()\n            .get_highest_complete_block()\n            .expect(\"should not error reading db\")\n            .expect(\"node 0 should have a complete block\")\n            .maybe_current_gas_price()\n            .expect(\"must have gas price\")\n    }\n\n    #[track_caller]\n    pub(crate) fn switch_block(&self, era: EraId) -> BlockV2 {\n        let node_0 = self\n            .node_contexts\n            .first()\n            .expect(\"should have at least one node\")\n            .id;\n        self.network\n            .nodes()\n            .get(&node_0)\n            .expect(\"should have node 0\")\n            .main_reactor()\n            .storage()\n            .read_switch_block_by_era_id(era)\n            .and_then(|block| BlockV2::try_from(block).ok())\n            .unwrap_or_else(|| panic!(\"node 0 should have a switch block V2 for {}\", era))\n    }\n\n    #[track_caller]\n    pub(crate) fn create_node_config(\n        &mut self,\n        secret_key: &SecretKey,\n        maybe_trusted_hash: Option<BlockHash>,\n        storage_multiplier: u8,\n        node_config_override: NodeConfigOverride,\n    ) -> (Config, TempDir) {\n        // Set the network configuration.\n        let network_cfg = match self.node_contexts.first() {\n            Some(first_node) => {\n                let known_address =\n                    SocketAddr::from_str(&first_node.config.network.bind_address).unwrap();\n                network::Config::default_local_net(known_address.port())\n            }\n            None => {\n                let port = testing::unused_port_on_localhost();\n                network::Config::default_local_net_first_node(port)\n            }\n        };\n        let mut cfg = Config {\n            network: network_cfg,\n            gossip: gossiper::Config::new_with_small_timeouts(),\n            binary_port_server: crate::BinaryPortConfig {\n                allow_request_get_all_values: true,\n                allow_request_get_trie: true,\n                allow_request_speculative_exec: true,\n                ..Default::default()\n            },\n            ..Default::default()\n        };\n        let NodeConfigOverride {\n            sync_handling_override,\n        } = node_config_override;\n        if let Some(sync_handling) = sync_handling_override {\n            cfg.node.sync_handling = sync_handling;\n        }\n\n        // Additionally set up storage in a temporary directory.\n        let (storage_cfg, temp_dir) = storage::Config::new_for_tests(storage_multiplier);\n        // ...and the secret key for our validator.\n        {\n            let secret_key_path = temp_dir.path().join(\"secret_key\");\n            secret_key\n                .to_file(secret_key_path.clone())\n                .expect(\"could not write secret key\");\n            cfg.consensus.secret_key_path = External::Path(secret_key_path);\n        }\n        cfg.storage = storage_cfg;\n        cfg.node.trusted_hash = maybe_trusted_hash;\n        cfg.contract_runtime.max_global_state_size =\n            Some(1024 * 1024 * storage_multiplier as usize);\n\n        (cfg, temp_dir)\n    }\n\n    /// Adds a node to the network.\n    ///\n    /// If a previously-removed node is to be re-added, then the `secret_key`, `config` and\n    /// `storage_dir` returned in the `NodeContext` during removal should be used here in order to\n    /// ensure the same storage dir is used across both executions.\n    pub(crate) async fn add_node(\n        &mut self,\n        secret_key: Arc<SecretKey>,\n        config: Config,\n        storage_dir: TempDir,\n    ) -> NodeId {\n        let (id, _) = self\n            .network\n            .add_node_with_config_and_chainspec(\n                WithDir::new(RESOURCES_PATH.join(\"local\"), config.clone()),\n                Arc::clone(&self.chainspec),\n                Arc::clone(&self.chainspec_raw_bytes),\n                &mut self.rng,\n            )\n            .await\n            .expect(\"could not add node to reactor\");\n        let node_context = NodeContext {\n            id,\n            secret_key,\n            config,\n            storage_dir,\n        };\n        self.node_contexts.push(node_context);\n        info!(\"added node {} with id {}\", self.node_contexts.len() - 1, id);\n        id\n    }\n\n    #[track_caller]\n    pub(crate) fn remove_and_stop_node(&mut self, index: usize) -> NodeContext {\n        let node_context = self.node_contexts.remove(index);\n        let runner = self.network.remove_node(&node_context.id).unwrap();\n        runner.is_shutting_down.set();\n        info!(\"removed node {} with id {}\", index, node_context.id);\n        node_context\n    }\n\n    /// Runs the network until `condition` is true.\n    ///\n    /// Returns an error if the condition isn't met in time.\n    pub(crate) async fn try_run_until<F>(\n        &mut self,\n        condition: F,\n        within: Duration,\n    ) -> Result<(), Elapsed>\n    where\n        F: Fn(&Nodes) -> bool,\n    {\n        self.network\n            .try_settle_on(&mut self.rng, condition, within)\n            .await\n    }\n\n    /// Runs the network until `condition` is true.\n    ///\n    /// Panics if the condition isn't met in time.\n    pub(crate) async fn run_until<F>(&mut self, condition: F, within: Duration)\n    where\n        F: Fn(&Nodes) -> bool,\n    {\n        self.network\n            .settle_on(&mut self.rng, condition, within)\n            .await\n    }\n\n    /// Runs the network until all nodes reach the given completed block height.\n    ///\n    /// Returns an error if the condition isn't met in time.\n    pub(crate) async fn try_run_until_block_height(\n        &mut self,\n        block_height: u64,\n        within: Duration,\n    ) -> Result<(), Elapsed> {\n        self.try_run_until(\n            move |nodes: &Nodes| {\n                nodes.values().all(|runner| {\n                    runner\n                        .main_reactor()\n                        .storage()\n                        .get_highest_complete_block()\n                        .expect(\"should not error reading db\")\n                        .map(|block| block.height())\n                        == Some(block_height)\n                })\n            },\n            within,\n        )\n        .await\n    }\n\n    /// Runs the network until all nodes reach the given completed block height.\n    ///\n    /// Panics if the condition isn't met in time.\n    pub(crate) async fn run_until_block_height(&mut self, block_height: u64, within: Duration) {\n        self.try_run_until_block_height(block_height, within)\n            .await\n            .unwrap_or_else(|_| {\n                panic!(\n                    \"should reach block {} within {} seconds\",\n                    block_height,\n                    within.as_secs_f64(),\n                )\n            })\n    }\n\n    /// Runs the network until all nodes' consensus components reach the given era.\n    ///\n    /// Panics if the condition isn't met in time.\n    pub(crate) async fn run_until_consensus_in_era(&mut self, era_id: EraId, within: Duration) {\n        self.try_until_consensus_in_era(era_id, within)\n            .await\n            .unwrap_or_else(|_| {\n                panic!(\n                    \"should reach {} within {} seconds\",\n                    era_id,\n                    within.as_secs_f64(),\n                )\n            })\n    }\n\n    /// Runs the network until all nodes' consensus components reach the given era.\n    pub(crate) async fn try_until_consensus_in_era(\n        &mut self,\n        era_id: EraId,\n        within: Duration,\n    ) -> Result<(), Elapsed> {\n        self.try_run_until(\n            move |nodes: &Nodes| {\n                nodes\n                    .values()\n                    .all(|runner| runner.main_reactor().consensus().current_era() == Some(era_id))\n            },\n            within,\n        )\n        .await\n    }\n\n    /// Runs the network until all nodes' storage components have stored the switch block header for\n    /// the given era.\n    ///\n    /// Panics if the condition isn't met in time.\n    pub(crate) async fn run_until_stored_switch_block_header(\n        &mut self,\n        era_id: EraId,\n        within: Duration,\n    ) {\n        self.try_until_stored_switch_block_header(era_id, within)\n            .await\n            .unwrap_or_else(|_| {\n                panic!(\n                    \"should have stored switch block header for {} within {} seconds\",\n                    era_id,\n                    within.as_secs_f64(),\n                )\n            })\n    }\n\n    /// Runs the network until all nodes' storage components have stored the switch block header for\n    /// the given era.\n    pub(crate) async fn try_until_stored_switch_block_header(\n        &mut self,\n        era_id: EraId,\n        within: Duration,\n    ) -> Result<(), Elapsed> {\n        self.try_run_until(\n            move |nodes: &Nodes| {\n                nodes.values().all(|runner| {\n                    let available_block_range =\n                        runner.main_reactor().storage().get_available_block_range();\n                    runner\n                        .main_reactor()\n                        .storage()\n                        .read_highest_switch_block_headers(1)\n                        .unwrap()\n                        .last()\n                        .is_some_and(|header| {\n                            header.era_id() == era_id\n                                && available_block_range.contains(header.height())\n                        })\n                })\n            },\n            within,\n        )\n        .await\n    }\n\n    /// Runs the network until all nodes have executed the given transaction and stored the\n    /// execution result.\n    ///\n    /// Panics if the condition isn't met in time.\n    pub(crate) async fn run_until_executed_transaction(\n        &mut self,\n        txn_hash: &TransactionHash,\n        within: Duration,\n    ) {\n        self.try_run_until(\n            move |nodes: &Nodes| {\n                nodes.values().all(|runner| {\n                    if runner\n                        .main_reactor()\n                        .storage()\n                        .read_execution_result(txn_hash)\n                        .is_some()\n                    {\n                        let exec_info = runner\n                            .main_reactor()\n                            .storage()\n                            .read_execution_info(*txn_hash);\n\n                        if let Some(exec_info) = exec_info {\n                            runner\n                                .main_reactor()\n                                .storage()\n                                .read_block_header_by_height(exec_info.block_height, true)\n                                .unwrap()\n                                .is_some()\n                        } else {\n                            false\n                        }\n                    } else {\n                        false\n                    }\n                })\n            },\n            within,\n        )\n        .await\n        .unwrap_or_else(|_| {\n            panic!(\n                \"should have stored execution result for {} within {} seconds\",\n                txn_hash,\n                within.as_secs_f64(),\n            )\n        })\n    }\n\n    pub(crate) async fn schedule_upgrade_for_era_two(&mut self) {\n        for runner in self.network.runners_mut() {\n            runner\n                .process_injected_effects(|effect_builder| {\n                    let upgrade = NextUpgrade::new(\n                        ActivationPoint::EraId(ERA_TWO),\n                        ProtocolVersion::from_parts(999, 0, 0),\n                    );\n                    effect_builder\n                        .upgrade_watcher_announcement(Some(upgrade))\n                        .ignore()\n                })\n                .await;\n        }\n    }\n\n    #[track_caller]\n    pub(crate) fn check_bid_existence_at_tip(\n        &self,\n        validator_public_key: &PublicKey,\n        delegator_public_key: Option<&PublicKey>,\n        should_exist: bool,\n    ) {\n        let (_, runner) = self\n            .network\n            .nodes()\n            .iter()\n            .find(|(_, runner)| {\n                runner.main_reactor().consensus.public_key() == validator_public_key\n            })\n            .expect(\"should have runner\");\n\n        let highest_block = runner\n            .main_reactor()\n            .storage\n            .read_highest_block_with_signatures(true)\n            .expect(\"should have block\")\n            .into_inner()\n            .0;\n        let bids_request = BidsRequest::new(*highest_block.state_root_hash());\n        let bids_result = runner\n            .main_reactor()\n            .contract_runtime\n            .data_access_layer()\n            .bids(bids_request);\n\n        let delegator_kind = delegator_public_key.map(|pk| DelegatorKind::PublicKey(pk.clone()));\n\n        if let BidsResult::Success { bids } = bids_result {\n            match bids.iter().find(|bid_kind| {\n                &bid_kind.validator_public_key() == validator_public_key\n                    && bid_kind.delegator_kind() == delegator_kind\n            }) {\n                None => {\n                    if should_exist {\n                        panic!(\"should have bid in {}\", highest_block.era_id());\n                    }\n                }\n                Some(bid) => {\n                    if !should_exist && !bid.is_unbond() {\n                        info!(\"unexpected bid record existence: {:?}\", bid);\n                        panic!(\"expected to not have bid\");\n                    }\n                }\n            }\n        } else {\n            panic!(\"network should have bids: {:?}\", bids_result);\n        }\n    }\n\n    /// Returns the hash of the given system contract.\n    #[track_caller]\n    pub(crate) fn system_contract_hash(&self, system_contract_name: &str) -> AddressableEntityHash {\n        let node_0 = self\n            .node_contexts\n            .first()\n            .expect(\"should have at least one node\")\n            .id;\n        let reactor = self\n            .network\n            .nodes()\n            .get(&node_0)\n            .expect(\"should have node 0\")\n            .main_reactor();\n\n        let highest_block = reactor\n            .storage\n            .read_highest_block()\n            .expect(\"should have block\");\n\n        // we need the native auction addr so we can directly call it w/o wasm\n        // we can get it out of the system entity registry which is just a\n        // value in global state under a stable key.\n        let maybe_registry = reactor\n            .contract_runtime\n            .data_access_layer()\n            .checkout(*highest_block.state_root_hash())\n            .expect(\"should checkout\")\n            .expect(\"should have view\")\n            .read(&Key::SystemEntityRegistry)\n            .expect(\"should not have gs storage error\")\n            .expect(\"should have stored value\");\n\n        let system_entity_registry: SystemHashRegistry = match maybe_registry {\n            StoredValue::CLValue(cl_value) => CLValue::into_t(cl_value).unwrap(),\n            _ => {\n                panic!(\"expected CLValue\")\n            }\n        };\n\n        (*system_entity_registry.get(system_contract_name).unwrap()).into()\n    }\n\n    #[track_caller]\n    pub(crate) fn get_current_era_price(&self) -> u8 {\n        let (_, runner) = self\n            .network\n            .nodes()\n            .iter()\n            .next()\n            .expect(\"must have runner\");\n\n        let price = runner.main_reactor().contract_runtime.current_era_price();\n\n        price.gas_price()\n    }\n\n    #[track_caller]\n    pub(crate) fn check_account_balance_hold_at_tip(&self, account_public_key: PublicKey) -> U512 {\n        let (_, runner) = self\n            .network\n            .nodes()\n            .iter()\n            .find(|(_, runner)| runner.main_reactor().consensus.public_key() == &account_public_key)\n            .expect(\"must have runner\");\n\n        let highest_block = runner\n            .main_reactor()\n            .storage\n            .read_highest_block()\n            .expect(\"should have block\");\n\n        let balance_request = BalanceRequest::from_public_key(\n            *highest_block.state_root_hash(),\n            highest_block.protocol_version(),\n            account_public_key,\n            BalanceHandling::Available,\n            ProofHandling::NoProofs,\n        );\n\n        let balance_result = runner\n            .main_reactor()\n            .contract_runtime\n            .data_access_layer()\n            .balance(balance_request);\n\n        match balance_result {\n            BalanceResult::RootNotFound => {\n                panic!(\"Root not found during balance query\")\n            }\n            BalanceResult::Success { proofs_result, .. } => proofs_result.total_held_amount(),\n            BalanceResult::Failure(tce) => {\n                panic!(\"tracking copy error: {:?}\", tce)\n            }\n        }\n    }\n\n    pub(crate) async fn inject_transaction(&mut self, txn: Transaction) {\n        // saturate the network with the transactions via just making them all store and accept it\n        // they're all validators so one of them should propose it\n        for runner in self.network.runners_mut() {\n            runner\n                .process_injected_effects(|effect_builder| {\n                    effect_builder\n                        .put_transaction_to_storage(txn.clone())\n                        .ignore()\n                })\n                .await;\n            runner\n                .process_injected_effects(|effect_builder| {\n                    effect_builder\n                        .announce_new_transaction_accepted(Arc::new(txn.clone()), Source::Client)\n                        .ignore()\n                })\n                .await;\n        }\n    }\n\n    /// Returns the transforms from the stored, successful execution result for the given\n    /// transaction from node 0.\n    ///\n    /// Panics if there is no such execution result, or if it is not a `Success` variant.\n    #[track_caller]\n    pub(crate) fn successful_execution_transforms(\n        &self,\n        txn_hash: &TransactionHash,\n    ) -> Vec<TransformV2> {\n        let node_0 = self\n            .node_contexts\n            .first()\n            .expect(\"should have at least one node\")\n            .id;\n        match self\n            .network\n            .nodes()\n            .get(&node_0)\n            .expect(\"should have node 0\")\n            .main_reactor()\n            .storage()\n            .read_execution_result(txn_hash)\n            .expect(\"node 0 should have given execution result\")\n        {\n            ExecutionResult::V1(_) => unreachable!(),\n            ExecutionResult::V2(execution_result_v2) => {\n                if execution_result_v2.error_message.is_none() {\n                    execution_result_v2.effects.transforms().to_vec()\n                } else {\n                    panic!(\n                        \"transaction execution failed: {:?} gas: {}\",\n                        execution_result_v2.error_message, execution_result_v2.consumed\n                    );\n                }\n            }\n        }\n    }\n\n    /// Returns the execution results from storage.\n    /// Panics on error.\n    #[track_caller]\n    pub(crate) fn transaction_execution_result(\n        &self,\n        txn_hash: &TransactionHash,\n    ) -> ExecutionResult {\n        let node_0 = self\n            .node_contexts\n            .first()\n            .expect(\"should have at least one node\")\n            .id;\n        self.network\n            .nodes()\n            .get(&node_0)\n            .expect(\"should have node 0\")\n            .main_reactor()\n            .storage()\n            .read_execution_result(txn_hash)\n            .expect(\"node 0 should have given execution result\")\n    }\n\n    #[inline(always)]\n    pub(crate) fn network_mut(&mut self) -> &mut TestingNetwork<FilterReactor<MainReactor>> {\n        &mut self.network\n    }\n\n    pub(crate) fn run_until_stopped(\n        self,\n        rng: TestRng,\n    ) -> impl futures::Future<Output = (TestingNetwork<FilterReactor<MainReactor>>, TestRng)> {\n        self.network.crank_until_stopped(rng)\n    }\n\n    /// Runs the network until all nodes have executed the given transaction and stored the\n    /// execution result.\n    ///\n    /// Panics if the condition isn't met in time.\n    pub(crate) async fn assert_execution_in_lane(\n        &mut self,\n        txn_hash: &TransactionHash,\n        lane_id: u8,\n        within: Duration,\n    ) {\n        self.try_run_until(\n            move |nodes: &Nodes| {\n                nodes.values().all(|runner| {\n                    if runner\n                        .main_reactor()\n                        .storage()\n                        .read_execution_result(txn_hash)\n                        .is_some()\n                    {\n                        let exec_info = runner\n                            .main_reactor()\n                            .storage()\n                            .read_execution_info(*txn_hash);\n\n                        if let Some(exec_info) = exec_info {\n                            if let BlockBody::V2(v2_body) = runner\n                                .main_reactor()\n                                .storage()\n                                .read_block_by_height(exec_info.block_height)\n                                .unwrap()\n                                .take_body()\n                            {\n                                v2_body.transactions_by_lane_id(lane_id).contains(txn_hash)\n                            } else {\n                                false\n                            }\n                        } else {\n                            false\n                        }\n                    } else {\n                        false\n                    }\n                })\n            },\n            within,\n        )\n        .await\n        .unwrap_or_else(|_| {\n            panic!(\n                \"should have stored execution result for {} within {} seconds\",\n                txn_hash,\n                within.as_secs_f64(),\n            )\n        })\n    }\n}\n\npub(crate) fn standard_stakes(\n    alice_public_key: PublicKey,\n    bob_public_key: PublicKey,\n    charlie_public_key: Option<PublicKey>,\n) -> BTreeMap<PublicKey, (U512, U512)> {\n    let mut ret = BTreeMap::new();\n    ret.insert(\n        alice_public_key.clone(),\n        (\n            U512::from(100_000_000_000_000_000u64),\n            U512::from(u128::MAX),\n        ),\n    );\n    ret.insert(\n        bob_public_key.clone(),\n        (U512::from(100_000_000_000_000_000u64), U512::from(1)),\n    );\n\n    if let Some(pub_k) = charlie_public_key {\n        ret.insert(pub_k, (U512::from(u32::MAX - 1), U512::from(1)));\n    }\n    ret\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/gas_price.rs",
    "content": "use std::{sync::Arc, time::Duration};\n\nuse casper_types::{\n    testing::TestRng, Chainspec, PricingHandling, PricingMode, PublicKey, SecretKey, TimeDiff,\n    Transaction, TransactionV1Config, U512,\n};\n\nuse crate::{\n    reactor::main_reactor::tests::{\n        configs_override::ConfigsOverride, fixture::TestFixture, ERA_ONE, ONE_MIN,\n    },\n    types::transaction::transaction_v1_builder::TransactionV1Builder,\n};\n\n#[allow(clippy::enum_variant_names)]\nenum GasPriceScenario {\n    SlotUtilization,\n    SizeUtilization(u32),\n    GasConsumptionUtilization(u64),\n}\n\nasync fn run_gas_price_scenario(gas_price_scenario: GasPriceScenario) {\n    let mut rng = TestRng::new();\n    let alice_stake = 200_000_000_000_u64;\n    let bob_stake = 300_000_000_000_u64;\n    let charlie_stake = 300_000_000_000_u64;\n    let initial_stakes: Vec<(U512, U512)> = vec![\n        (U512::from(u64::MAX), alice_stake.into()),\n        (U512::from(u64::MAX), bob_stake.into()),\n        (U512::from(u64::MAX), charlie_stake.into()),\n    ];\n\n    let mut secret_keys: Vec<Arc<SecretKey>> = (0..3)\n        .map(|_| Arc::new(SecretKey::random(&mut rng)))\n        .collect();\n\n    let stakes = secret_keys\n        .iter()\n        .zip(initial_stakes)\n        .map(|(secret_key, (bal, stake))| (PublicKey::from(secret_key.as_ref()), (bal, stake)))\n        .collect();\n\n    let non_validating_secret_key = SecretKey::random(&mut rng);\n    let non_validating_public_key = PublicKey::from(&non_validating_secret_key);\n    secret_keys.push(Arc::new(non_validating_secret_key));\n\n    let max_gas_price: u8 = 3;\n\n    let mut transaction_config = TransactionV1Config::default();\n    transaction_config\n        .native_mint_lane\n        .set_max_transaction_count(1);\n\n    let spec_override = match gas_price_scenario {\n        GasPriceScenario::SlotUtilization => {\n            ConfigsOverride::default().with_transaction_v1_config(transaction_config)\n        }\n        GasPriceScenario::SizeUtilization(block_size) => {\n            ConfigsOverride::default().with_block_size(block_size)\n        }\n        GasPriceScenario::GasConsumptionUtilization(gas_limit) => {\n            ConfigsOverride::default().with_block_gas_limit(gas_limit)\n        }\n    }\n    .with_lower_threshold(5u64)\n    .with_upper_threshold(10u64)\n    .with_minimum_era_height(5)\n    .with_max_gas_price(max_gas_price);\n\n    let mut fixture =\n        TestFixture::new_with_keys(rng, secret_keys, stakes, Some(spec_override)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n\n    fixture\n        .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN)\n        .await;\n\n    let switch_block = fixture.switch_block(ERA_ONE);\n\n    let mut current_era = switch_block.era_id();\n    let chain_name = fixture.chainspec.network_config.name.clone();\n\n    // Run the network at load for at least 5 eras.\n    for _ in 0..5 {\n        let rng = fixture.rng_mut();\n        let target_public_key = PublicKey::random(rng);\n        let fixed_native_mint_transaction =\n            TransactionV1Builder::new_transfer(10_000_000_000u64, None, target_public_key, None)\n                .expect(\"must get builder\")\n                .with_chain_name(chain_name.clone())\n                .with_secret_key(&alice_secret_key)\n                .with_ttl(TimeDiff::from_seconds(120 * 10))\n                .with_pricing_mode(PricingMode::Fixed {\n                    gas_price_tolerance: max_gas_price,\n                    additional_computation_factor: 0,\n                })\n                .build()\n                .expect(\"must get transaction\");\n\n        let txn = Transaction::V1(fixed_native_mint_transaction);\n        fixture.inject_transaction(txn).await;\n        let next_era = current_era.successor();\n        fixture\n            .run_until_stored_switch_block_header(next_era, ONE_MIN)\n            .await;\n        current_era = next_era;\n    }\n\n    let expected_gas_price = fixture.chainspec.vacancy_config.max_gas_price;\n    let actual_gas_price = fixture.get_current_era_price();\n    assert_eq!(actual_gas_price, expected_gas_price);\n    let gas_price_for_non_validating_node =\n        fixture.get_block_gas_price_by_public_key(Some(&non_validating_public_key));\n    assert_eq!(actual_gas_price, gas_price_for_non_validating_node);\n    let rng = fixture.rng_mut();\n    let target_public_key = PublicKey::random(rng);\n\n    let holds_before = fixture.check_account_balance_hold_at_tip(alice_public_key.clone());\n    let amount = 10_000_000_000u64;\n\n    let fixed_native_mint_transaction =\n        TransactionV1Builder::new_transfer(amount, None, target_public_key, None)\n            .expect(\"must get builder\")\n            .with_chain_name(chain_name)\n            .with_secret_key(&alice_secret_key)\n            .with_pricing_mode(PricingMode::Fixed {\n                gas_price_tolerance: max_gas_price,\n                additional_computation_factor: 0,\n            })\n            .build()\n            .expect(\"must get transaction\");\n\n    let txn = Transaction::V1(fixed_native_mint_transaction);\n    let txn_hash = txn.hash();\n\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, Duration::from_secs(20))\n        .await;\n\n    let holds_after = fixture.check_account_balance_hold_at_tip(alice_public_key.clone());\n\n    let current_gas_price = fixture\n        .highest_complete_block()\n        .maybe_current_gas_price()\n        .expect(\"must have gas price\");\n\n    let cost = match fixture.chainspec.core_config.pricing_handling {\n        PricingHandling::PaymentLimited => 0,\n        PricingHandling::Fixed => {\n            fixture.chainspec.system_costs_config.mint_costs().transfer * (current_gas_price as u32)\n        }\n    };\n\n    assert_eq!(holds_after, holds_before + U512::from(cost));\n\n    // Run the network at zero load and ensure the value falls back to the floor.\n    for _ in 0..5 {\n        let next_era = current_era.successor();\n        fixture\n            .run_until_stored_switch_block_header(next_era, ONE_MIN)\n            .await;\n        current_era = next_era;\n    }\n\n    let expected_gas_price = fixture.chainspec.vacancy_config.min_gas_price;\n    let actual_gas_price = fixture.get_current_era_price();\n    assert_eq!(actual_gas_price, expected_gas_price);\n}\n\n#[tokio::test]\nasync fn should_raise_gas_price_to_ceiling_and_reduce_to_floor_based_on_slot_utilization() {\n    let scenario = GasPriceScenario::SlotUtilization;\n    run_gas_price_scenario(scenario).await\n}\n\n#[tokio::test]\nasync fn should_raise_gas_price_to_ceiling_and_reduce_to_floor_based_on_gas_consumption() {\n    let gas_limit = Chainspec::default()\n        .system_costs_config\n        .mint_costs()\n        .transfer as u64;\n    let scenario = GasPriceScenario::GasConsumptionUtilization(gas_limit);\n    run_gas_price_scenario(scenario).await\n}\n\n#[tokio::test]\nasync fn should_raise_gas_price_to_ceiling_and_reduce_to_floor_based_on_size_consumption() {\n    // The size of a native transfer is roughly 300 ~ 400 bytes\n    let size_limit = 600u32;\n    let scenario = GasPriceScenario::SizeUtilization(size_limit);\n    run_gas_price_scenario(scenario).await\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/initial_stakes.rs",
    "content": "pub(crate) enum InitialStakes {\n    FromVec(Vec<u128>),\n    Random { count: usize },\n    AllEqual { count: usize, stake: u128 },\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/network_general.rs",
    "content": "use std::{collections::HashMap, sync::Arc, time::Duration};\n\nuse casper_binary_port::{\n    BinaryMessage, BinaryMessageCodec, BinaryResponseAndRequest, Command, CommandHeader,\n    InformationRequest, Uptime,\n};\nuse either::Either;\nuse futures::{SinkExt, StreamExt};\nuse num_rational::Ratio;\nuse tokio::{\n    net::TcpStream,\n    time::{self, timeout},\n};\nuse tokio_util::codec::Framed;\nuse tracing::info;\n\nuse casper_types::{\n    bytesrepr::{FromBytes, ToBytes},\n    execution::TransformKindV2,\n    system::{auction::BidAddr, AUCTION},\n    testing::TestRng,\n    AvailableBlockRange, Deploy, Key, Peers, PublicKey, SecretKey, StoredValue, TimeDiff,\n    Timestamp, Transaction,\n};\n\nuse crate::{\n    effect::{requests::ContractRuntimeRequest, EffectExt},\n    reactor::{\n        main_reactor::{\n            tests::{\n                configs_override::{ConfigsOverride, NodeConfigOverride},\n                fixture::TestFixture,\n                initial_stakes::InitialStakes,\n                node_has_lowest_available_block_at_or_below_height, Nodes, ERA_ONE, ERA_THREE,\n                ERA_TWO, ERA_ZERO, ONE_MIN, TEN_SECS, THIRTY_SECS,\n            },\n            MainEvent, MainReactor, ReactorState,\n        },\n        Runner,\n    },\n    testing::{filter_reactor::FilterReactor, network::TestingNetwork, ConditionCheckReactor},\n    types::{ExitCode, NodeId, SyncHandling},\n    utils::Source,\n};\n\n#[tokio::test]\nasync fn run_network() {\n    // Set up a network with five nodes and run until in era 2.\n    let initial_stakes = InitialStakes::Random { count: 5 };\n    let mut fixture = TestFixture::new(initial_stakes, None).await;\n    fixture.run_until_consensus_in_era(ERA_TWO, ONE_MIN).await;\n}\n\n#[tokio::test]\nasync fn historical_sync_with_era_height_1() {\n    let initial_stakes = InitialStakes::Random { count: 5 };\n    let spec_override = ConfigsOverride {\n        minimum_block_time: \"4seconds\".parse().unwrap(),\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await;\n\n    // Wait for all nodes to reach era 3.\n    fixture.run_until_consensus_in_era(ERA_THREE, ONE_MIN).await;\n\n    // Create a joiner node.\n    let secret_key = SecretKey::random(&mut fixture.rng);\n    let trusted_hash = *fixture.highest_complete_block().hash();\n    let (mut config, storage_dir) = fixture.create_node_config(\n        &secret_key,\n        Some(trusted_hash),\n        1,\n        NodeConfigOverride::default(),\n    );\n    config.node.sync_handling = SyncHandling::Genesis;\n    let joiner_id = fixture\n        .add_node(Arc::new(secret_key), config, storage_dir)\n        .await;\n\n    // Wait for joiner node to sync back to the block from era 1\n    fixture\n        .run_until(\n            node_has_lowest_available_block_at_or_below_height(1, joiner_id),\n            ONE_MIN,\n        )\n        .await;\n\n    // Remove the weights for era 0 and era 1 from the validator matrix\n    let runner = fixture\n        .network\n        .nodes_mut()\n        .get_mut(&joiner_id)\n        .expect(\"Could not find runner for node {joiner_id}\");\n    let reactor = runner.reactor_mut().inner_mut().inner_mut();\n    reactor.validator_matrix.purge_era_validators(&ERA_ZERO);\n    reactor.validator_matrix.purge_era_validators(&ERA_ONE);\n\n    // Continue syncing and check if the joiner node reaches era 0\n    fixture\n        .run_until(\n            node_has_lowest_available_block_at_or_below_height(0, joiner_id),\n            ONE_MIN,\n        )\n        .await;\n}\n\n#[tokio::test]\nasync fn should_not_historical_sync_no_sync_node() {\n    let initial_stakes = InitialStakes::Random { count: 5 };\n    let spec_override = ConfigsOverride {\n        minimum_block_time: \"4seconds\".parse().unwrap(),\n        minimum_era_height: 2,\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await;\n\n    // Wait for all nodes to complete block 1.\n    fixture.run_until_block_height(1, ONE_MIN).await;\n\n    // Create a joiner node.\n    let highest_block = fixture.highest_complete_block();\n    let trusted_hash = *highest_block.hash();\n    let trusted_height = highest_block.height();\n    assert!(\n        trusted_height > 0,\n        \"trusted height must be non-zero to allow for checking that the joiner doesn't do \\\n        historical syncing\"\n    );\n    info!(\"joining node using block {trusted_height} {trusted_hash}\");\n    let secret_key = SecretKey::random(&mut fixture.rng);\n    let (mut config, storage_dir) = fixture.create_node_config(\n        &secret_key,\n        Some(trusted_hash),\n        1,\n        NodeConfigOverride::default(),\n    );\n    config.node.sync_handling = SyncHandling::NoSync;\n    let joiner_id = fixture\n        .add_node(Arc::new(secret_key), config, storage_dir)\n        .await;\n\n    let joiner_avail_range = |nodes: &Nodes| {\n        nodes\n            .get(&joiner_id)\n            .expect(\"should have joiner\")\n            .main_reactor()\n            .storage()\n            .get_available_block_range()\n    };\n\n    // Run until the joiner doesn't have the default available block range, i.e. it has completed\n    // syncing the initial block.\n    fixture\n        .try_run_until(\n            |nodes: &Nodes| joiner_avail_range(nodes) != AvailableBlockRange::RANGE_0_0,\n            ONE_MIN,\n        )\n        .await\n        .expect(\"timed out waiting for joiner to sync first block\");\n\n    let available_block_range_pre = joiner_avail_range(fixture.network.nodes());\n\n    let pre = available_block_range_pre.low();\n    assert!(\n        pre >= trusted_height,\n        \"should not have acquired a block earlier than trusted hash block {} {}\",\n        pre,\n        trusted_height\n    );\n\n    // Ensure the joiner's chain is advancing.\n    fixture\n        .try_run_until(\n            |nodes: &Nodes| joiner_avail_range(nodes).high() > available_block_range_pre.high(),\n            ONE_MIN,\n        )\n        .await\n        .unwrap_or_else(|_| {\n            panic!(\n                \"timed out waiting for joiner's highest complete block to exceed {}\",\n                available_block_range_pre.high()\n            )\n        });\n\n    // Ensure the joiner is not doing historical sync.\n    fixture\n        .try_run_until(\n            |nodes: &Nodes| joiner_avail_range(nodes).low() < available_block_range_pre.low(),\n            TEN_SECS,\n        )\n        .await\n        .unwrap_err();\n}\n\n#[tokio::test]\nasync fn should_catch_up_and_shutdown() {\n    let initial_stakes = InitialStakes::Random { count: 5 };\n    let spec_override = ConfigsOverride {\n        minimum_block_time: \"4seconds\".parse().unwrap(),\n        minimum_era_height: 2,\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await;\n\n    // Wait for all nodes to complete block 1.\n    fixture.run_until_block_height(1, ONE_MIN).await;\n\n    // Create a joiner node.\n    let highest_block = fixture.highest_complete_block();\n    let trusted_hash = *highest_block.hash();\n    let trusted_height = highest_block.height();\n    assert!(\n        trusted_height > 0,\n        \"trusted height must be non-zero to allow for checking that the joiner doesn't do \\\n        historical syncing\"\n    );\n\n    info!(\"joining node using block {trusted_height} {trusted_hash}\");\n    let secret_key = SecretKey::random(&mut fixture.rng);\n    let (mut config, storage_dir) = fixture.create_node_config(\n        &secret_key,\n        Some(trusted_hash),\n        1,\n        NodeConfigOverride::default(),\n    );\n    config.node.sync_handling = SyncHandling::CompleteBlock;\n    let joiner_id = fixture\n        .add_node(Arc::new(secret_key), config, storage_dir)\n        .await;\n\n    let joiner_avail_range = |nodes: &Nodes| {\n        nodes\n            .get(&joiner_id)\n            .expect(\"should have joiner\")\n            .main_reactor()\n            .storage()\n            .get_available_block_range()\n    };\n\n    // Run until the joiner shuts down after catching up\n    fixture\n        .network\n        .settle_on_node_exit(\n            &mut fixture.rng,\n            &joiner_id,\n            ExitCode::CleanExitDontRestart,\n            ONE_MIN,\n        )\n        .await;\n\n    let available_block_range = joiner_avail_range(fixture.network.nodes());\n\n    let low = available_block_range.low();\n    assert!(\n        low >= trusted_height,\n        \"should not have acquired a block earlier than trusted hash block {low} {trusted_hash}\",\n    );\n\n    let highest_block_height = fixture.highest_complete_block().height();\n    let high = available_block_range.high();\n    assert!(\n        low < high && high <= highest_block_height,\n        \"should have acquired more recent blocks before shutting down {low} {high} {highest_block_height}\",\n    );\n}\n\nfn network_is_in_keepup(\n    nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<FilterReactor<MainReactor>>>>,\n) -> bool {\n    nodes\n        .values()\n        .all(|node| node.reactor().inner().inner().state == ReactorState::KeepUp)\n}\n\nconst MESSAGE_SIZE: u32 = 1024 * 1024 * 10;\n\nasync fn setup_network_and_get_binary_port_handle(\n    initial_stakes: InitialStakes,\n    spec_override: ConfigsOverride,\n) -> (\n    Framed<TcpStream, BinaryMessageCodec>,\n    impl futures::Future<Output = (TestingNetwork<FilterReactor<MainReactor>>, TestRng)>,\n) {\n    let mut fixture = timeout(\n        Duration::from_secs(10),\n        TestFixture::new(initial_stakes, Some(spec_override)),\n    )\n    .await\n    .unwrap();\n    let mut rng = fixture.rng_mut().create_child();\n    let net = fixture.network_mut();\n    net.settle_on(&mut rng, network_is_in_keepup, Duration::from_secs(59))\n        .await;\n    let (_, first_node) = net\n        .nodes()\n        .iter()\n        .next()\n        .expect(\"should have at least one node\");\n    let binary_port_addr = first_node\n        .main_reactor()\n        .binary_port\n        .bind_address()\n        .unwrap();\n    let finish_cranking = fixture.run_until_stopped(rng.create_child());\n    let address = format!(\"localhost:{}\", binary_port_addr.port());\n    let stream = TcpStream::connect(address.clone())\n        .await\n        .expect(\"should create stream\");\n    let client = Framed::new(stream, BinaryMessageCodec::new(MESSAGE_SIZE));\n    (client, finish_cranking)\n}\n\n#[tokio::test]\nasync fn should_start_in_isolation() {\n    let initial_stakes = InitialStakes::Random { count: 1 };\n    let spec_override = ConfigsOverride {\n        node_config_override: NodeConfigOverride {\n            sync_handling_override: Some(SyncHandling::Isolated),\n        },\n        ..Default::default()\n    };\n    let (mut client, finish_cranking) =\n        setup_network_and_get_binary_port_handle(initial_stakes, spec_override).await;\n\n    let uptime_request_bytes = {\n        let request = Command::Get(\n            InformationRequest::Uptime\n                .try_into()\n                .expect(\"should convert\"),\n        );\n        let header = CommandHeader::new(request.tag(), 1_u16);\n        let header_bytes = ToBytes::to_bytes(&header).expect(\"should serialize\");\n        header_bytes\n            .iter()\n            .chain(\n                ToBytes::to_bytes(&request)\n                    .expect(\"should serialize\")\n                    .iter(),\n            )\n            .cloned()\n            .collect::<Vec<_>>()\n    };\n    client\n        .send(BinaryMessage::new(uptime_request_bytes))\n        .await\n        .expect(\"should send message\");\n    let response = timeout(Duration::from_secs(20), client.next())\n        .await\n        .unwrap_or_else(|err| panic!(\"should complete uptime request without timeout: {}\", err))\n        .unwrap_or_else(|| panic!(\"should have bytes\"))\n        .unwrap_or_else(|err| panic!(\"should have ok response: {}\", err));\n    let (binary_response_and_request, _): (BinaryResponseAndRequest, _) =\n        FromBytes::from_bytes(response.payload()).expect(\"should deserialize response\");\n    let response = binary_response_and_request.response().payload();\n    let (uptime, remainder): (Uptime, _) =\n        FromBytes::from_bytes(response).expect(\"Peers should be deserializable\");\n    assert!(remainder.is_empty());\n    assert!(uptime.into_inner() > 0);\n    let (_net, _rng) = timeout(Duration::from_secs(20), finish_cranking)\n        .await\n        .unwrap_or_else(|_| panic!(\"should finish cranking without timeout\"));\n}\n\n#[tokio::test]\nasync fn should_be_peerless_in_isolation() {\n    let initial_stakes = InitialStakes::Random { count: 1 };\n    let spec_override = ConfigsOverride {\n        node_config_override: NodeConfigOverride {\n            sync_handling_override: Some(SyncHandling::Isolated),\n        },\n        ..Default::default()\n    };\n    let (mut client, finish_cranking) =\n        setup_network_and_get_binary_port_handle(initial_stakes, spec_override).await;\n\n    let peers_request_bytes = {\n        let request = Command::Get(\n            InformationRequest::Peers\n                .try_into()\n                .expect(\"should convert\"),\n        );\n        let header = CommandHeader::new(request.tag(), 1_u16);\n        let header_bytes = ToBytes::to_bytes(&header).expect(\"should serialize\");\n        header_bytes\n            .iter()\n            .chain(\n                ToBytes::to_bytes(&request)\n                    .expect(\"should serialize\")\n                    .iter(),\n            )\n            .cloned()\n            .collect::<Vec<_>>()\n    };\n    client\n        .send(BinaryMessage::new(peers_request_bytes))\n        .await\n        .expect(\"should send message\");\n    let response = timeout(Duration::from_secs(20), client.next())\n        .await\n        .unwrap_or_else(|err| panic!(\"should complete peers request without timeout: {}\", err))\n        .unwrap_or_else(|| panic!(\"should have bytes\"))\n        .unwrap_or_else(|err| panic!(\"should have ok response: {}\", err));\n    let (binary_response_and_request, _): (BinaryResponseAndRequest, _) =\n        FromBytes::from_bytes(response.payload()).expect(\"should deserialize response\");\n    let response = binary_response_and_request.response().payload();\n\n    let (peers, remainder): (Peers, _) =\n        FromBytes::from_bytes(response).expect(\"Peers should be deserializable\");\n    assert!(remainder.is_empty());\n    assert!(\n        peers.into_inner().is_empty(),\n        \"should not have peers in isolated mode\"\n    );\n\n    let (_net, _rng) = timeout(Duration::from_secs(20), finish_cranking)\n        .await\n        .unwrap_or_else(|_| panic!(\"should finish cranking without timeout\"));\n}\n\n#[tokio::test]\nasync fn network_should_recover_from_stall() {\n    // Set up a network with three nodes.\n    let initial_stakes = InitialStakes::AllEqual {\n        count: 3,\n        stake: 100,\n    };\n    let mut fixture = TestFixture::new(initial_stakes, None).await;\n\n    // Let all nodes progress until block 2 is marked complete.\n    fixture.run_until_block_height(2, ONE_MIN).await;\n\n    // Kill all nodes except for node 0.\n    let mut stopped_nodes = vec![];\n    for _ in 1..fixture.node_contexts.len() {\n        let node_context = fixture.remove_and_stop_node(1);\n        stopped_nodes.push(node_context);\n    }\n\n    // Expect node 0 can't produce more blocks, i.e. the network has stalled.\n    fixture\n        .try_run_until_block_height(3, ONE_MIN)\n        .await\n        .expect_err(\"should time out\");\n\n    // Restart the stopped nodes.\n    for node_context in stopped_nodes {\n        fixture\n            .add_node(\n                node_context.secret_key,\n                node_context.config,\n                node_context.storage_dir,\n            )\n            .await;\n    }\n\n    // Ensure all nodes progress until block 3 is marked complete.\n    fixture.run_until_block_height(3, TEN_SECS).await;\n}\n\n#[tokio::test]\nasync fn node_should_rejoin_after_ejection() {\n    let initial_stakes = InitialStakes::AllEqual {\n        count: 5,\n        stake: 1_000_000_000,\n    };\n    let minimum_era_height = 4;\n    let configs_override = ConfigsOverride {\n        minimum_era_height,\n        minimum_block_time: \"4096 ms\".parse().unwrap(),\n        round_seigniorage_rate: Ratio::new(1, 1_000_000_000_000),\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(configs_override)).await;\n\n    // Run through the first era.\n    fixture\n        .run_until_block_height(minimum_era_height, ONE_MIN)\n        .await;\n\n    let stopped_node = fixture.remove_and_stop_node(1);\n    let stopped_secret_key = Arc::clone(&stopped_node.secret_key);\n    let stopped_public_key = PublicKey::from(&*stopped_secret_key);\n\n    // Wait until the stopped node is ejected and removed from the validators set.\n    fixture\n        .run_until_consensus_in_era(\n            (fixture.chainspec.core_config.auction_delay + 3).into(),\n            ONE_MIN,\n        )\n        .await;\n\n    // Restart the node.\n    // Use the hash of the current highest complete block as the trusted hash.\n    let mut config = stopped_node.config;\n    config.node.trusted_hash = Some(*fixture.highest_complete_block().hash());\n    fixture\n        .add_node(stopped_node.secret_key, config, stopped_node.storage_dir)\n        .await;\n\n    // Create & sign deploy to reactivate the stopped node's bid.\n    // The bid amount will make sure that the rejoining validator proposes soon after it rejoins.\n    let mut deploy = Deploy::add_bid(\n        fixture.chainspec.network_config.name.clone(),\n        fixture.system_contract_hash(AUCTION),\n        stopped_public_key.clone(),\n        100_000_000_000_000_000_u64.into(),\n        10,\n        Timestamp::now(),\n        TimeDiff::from_seconds(60),\n    );\n    deploy.sign(&stopped_secret_key);\n    let txn = Transaction::Deploy(deploy);\n    let txn_hash = txn.hash();\n\n    // Inject the transaction and run the network until executed.\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, THIRTY_SECS)\n        .await;\n\n    // Ensure execution succeeded and that there is a Write transform for the bid's key.\n    let bid_key = Key::BidAddr(BidAddr::from(stopped_public_key.clone()));\n    fixture\n        .successful_execution_transforms(&txn_hash)\n        .iter()\n        .find(|transform| match transform.kind() {\n            TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => {\n                Key::from(bid_kind.bid_addr()) == bid_key\n            }\n            _ => false,\n        })\n        .expect(\"should have a write record for bid\");\n\n    // Wait until the auction delay passes, plus one era for a margin of error.\n    fixture\n        .run_until_consensus_in_era(\n            (2 * fixture.chainspec.core_config.auction_delay + 6).into(),\n            ONE_MIN,\n        )\n        .await;\n}\n\nasync fn assert_network_shutdown_for_upgrade_with_stakes(initial_stakes: InitialStakes) {\n    let mut fixture = TestFixture::new(initial_stakes, None).await;\n\n    // An upgrade is scheduled for era 2, after the switch block in era 1 (height 2).\n    fixture.schedule_upgrade_for_era_two().await;\n\n    // Run until the nodes shut down for the upgrade.\n    fixture\n        .network\n        .settle_on_exit(&mut fixture.rng, ExitCode::Success, ONE_MIN)\n        .await;\n}\n\n#[tokio::test]\nasync fn nodes_should_have_enough_signatures_before_upgrade_with_equal_stake() {\n    // Equal stake ensures that one node was able to learn about signatures created by the other, by\n    // whatever means necessary (gossiping, broadcasting, fetching, etc.).\n    let initial_stakes = InitialStakes::AllEqual {\n        count: 2,\n        stake: u128::MAX,\n    };\n    assert_network_shutdown_for_upgrade_with_stakes(initial_stakes).await;\n}\n\n#[tokio::test]\nasync fn nodes_should_have_enough_signatures_before_upgrade_with_one_dominant_stake() {\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 255]);\n    assert_network_shutdown_for_upgrade_with_stakes(initial_stakes).await;\n}\n\n#[tokio::test]\nasync fn dont_upgrade_without_switch_block() {\n    let initial_stakes = InitialStakes::Random { count: 2 };\n    let mut fixture = TestFixture::new(initial_stakes, None).await;\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    eprintln!(\n        \"Running 'dont_upgrade_without_switch_block' test with rng={}\",\n        fixture.rng\n    );\n\n    // An upgrade is scheduled for era 2, after the switch block in era 1 (height 2).\n    // We artificially delay the execution of that block.\n    fixture.schedule_upgrade_for_era_two().await;\n    for runner in fixture.network.runners_mut() {\n        let mut exec_request_received = false;\n        runner.reactor_mut().inner_mut().set_filter(move |event| {\n            if let MainEvent::ContractRuntimeRequest(\n                ContractRuntimeRequest::EnqueueBlockForExecution {\n                    executable_block, ..\n                },\n            ) = &event\n            {\n                if executable_block.era_report.is_some()\n                    && executable_block.era_id == ERA_ONE\n                    && !exec_request_received\n                {\n                    info!(\"delaying {}\", executable_block);\n                    exec_request_received = true;\n                    return Either::Left(\n                        time::sleep(Duration::from_secs(10)).event(move |_| event),\n                    );\n                }\n                info!(\"not delaying {}\", executable_block);\n            }\n            Either::Right(event)\n        });\n    }\n\n    // Run until the nodes shut down for the upgrade.\n    fixture\n        .network\n        .settle_on_exit(&mut fixture.rng, ExitCode::Success, ONE_MIN)\n        .await;\n\n    // Verify that the switch block has been stored: Even though it was delayed the node didn't\n    // restart before executing and storing it.\n    for runner in fixture.network.nodes().values() {\n        let header = runner\n            .main_reactor()\n            .storage()\n            .read_block_header_by_height(2, false)\n            .expect(\"failed to read from storage\")\n            .expect(\"missing switch block\");\n        assert_eq!(ERA_ONE, header.era_id(), \"era should be 1\");\n        assert!(header.is_switch_block(), \"header should be switch block\");\n    }\n}\n\n#[tokio::test]\nasync fn should_store_finalized_approvals() {\n    // Set up a network with two nodes where node 0 (Alice) is effectively guaranteed to be the\n    // proposer.\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n    let mut fixture = TestFixture::new(initial_stakes, None).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let bob_secret_key = Arc::clone(&fixture.node_contexts[1].secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); // just for ordering testing purposes\n\n    // Wait for all nodes to complete era 0.\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    // Submit a transaction.\n    let mut transaction_alice_bob = Transaction::from(\n        Deploy::random_valid_native_transfer_without_deps(&mut fixture.rng),\n    );\n    let mut transaction_alice_bob_charlie = transaction_alice_bob.clone();\n    let mut transaction_bob_alice = transaction_alice_bob.clone();\n\n    transaction_alice_bob.sign(&alice_secret_key);\n    transaction_alice_bob.sign(&bob_secret_key);\n\n    transaction_alice_bob_charlie.sign(&alice_secret_key);\n    transaction_alice_bob_charlie.sign(&bob_secret_key);\n    transaction_alice_bob_charlie.sign(&charlie_secret_key);\n\n    transaction_bob_alice.sign(&bob_secret_key);\n    transaction_bob_alice.sign(&alice_secret_key);\n\n    // We will be testing the correct sequence of approvals against the transaction signed by Bob\n    // and Alice.\n    // The transaction signed by Alice and Bob should give the same ordering of approvals.\n    let expected_approvals: Vec<_> = transaction_bob_alice.approvals().iter().cloned().collect();\n\n    // We'll give the transaction signed by Alice, Bob and Charlie to Bob, so these will be his\n    // original approvals. Save these for checks later.\n    let bobs_original_approvals: Vec<_> = transaction_alice_bob_charlie\n        .approvals()\n        .iter()\n        .cloned()\n        .collect();\n    assert_ne!(bobs_original_approvals, expected_approvals);\n\n    let transaction_hash = transaction_alice_bob.hash();\n\n    for runner in fixture.network.runners_mut() {\n        let transaction = if runner.main_reactor().consensus().public_key() == &alice_public_key {\n            // Alice will propose the transaction signed by Alice and Bob.\n            transaction_alice_bob.clone()\n        } else {\n            // Bob will receive the transaction signed by Alice, Bob and Charlie.\n            transaction_alice_bob_charlie.clone()\n        };\n        runner\n            .process_injected_effects(|effect_builder| {\n                effect_builder\n                    .put_transaction_to_storage(transaction.clone())\n                    .ignore()\n            })\n            .await;\n        runner\n            .process_injected_effects(|effect_builder| {\n                effect_builder\n                    .announce_new_transaction_accepted(Arc::new(transaction), Source::Client)\n                    .ignore()\n            })\n            .await;\n    }\n\n    // Run until the transaction gets executed.\n    let has_stored_exec_results = |nodes: &Nodes| {\n        nodes.values().all(|runner| {\n            let read = runner\n                .main_reactor()\n                .storage()\n                .read_execution_result(&transaction_hash);\n            read.is_some()\n        })\n    };\n    fixture.run_until(has_stored_exec_results, ONE_MIN).await;\n\n    // Check if the approvals agree.\n    for runner in fixture.network.nodes().values() {\n        let maybe_dwa = runner\n            .main_reactor()\n            .storage()\n            .get_transaction_with_finalized_approvals_by_hash(&transaction_hash);\n        let maybe_finalized_approvals = maybe_dwa\n            .as_ref()\n            .and_then(|dwa| dwa.1.clone())\n            .map(|fa| fa.iter().cloned().collect());\n        let maybe_original_approvals = maybe_dwa\n            .as_ref()\n            .map(|(transaction, _approvals)| transaction.approvals().iter().cloned().collect());\n        if runner.main_reactor().consensus().public_key() != &alice_public_key {\n            // Bob should have finalized approvals, and his original approvals should be different.\n            assert_eq!(\n                maybe_finalized_approvals.as_ref(),\n                Some(&expected_approvals)\n            );\n            assert_eq!(\n                maybe_original_approvals.as_ref(),\n                Some(&bobs_original_approvals)\n            );\n        } else {\n            // Alice should only have the correct approvals as the original ones, and no finalized\n            // approvals (as they wouldn't be stored, because they would be the same as the\n            // original ones).\n            assert_eq!(maybe_finalized_approvals.as_ref(), None);\n            assert_eq!(maybe_original_approvals.as_ref(), Some(&expected_approvals));\n        }\n    }\n}\n\n#[tokio::test]\nasync fn should_update_last_progress_after_block_execution() {\n    // Set up a network with two nodes.\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n    let mut fixture = TestFixture::new(initial_stakes, None).await;\n\n    // Let all nodes reach consensus in era 0.\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    // Prepare and submit a transaction.\n    let transaction = Transaction::from(Deploy::random_valid_native_transfer_without_deps(\n        &mut fixture.rng,\n    ));\n    let transaction_hash = transaction.hash();\n\n    for runner in fixture.network.runners_mut() {\n        let transaction = transaction.clone();\n        runner\n            .process_injected_effects(|eff| {\n                eff.put_transaction_to_storage(transaction.clone()).ignore()\n            })\n            .await;\n\n        runner\n            .process_injected_effects(|eff| {\n                eff.announce_new_transaction_accepted(Arc::new(transaction), Source::Client)\n                    .ignore()\n            })\n            .await;\n    }\n\n    // For each node, capture its last_progress before execution.\n    let stored_last_progresses: Vec<_> = fixture\n        .network\n        .nodes()\n        .values()\n        .map(|node| {\n            let reactor = node.main_reactor();\n            assert_eq!(reactor.state, ReactorState::Validate);\n            reactor.last_progress\n        })\n        .collect();\n\n    // Run until the transaction gets executed.\n    let has_stored_exec_results = |nodes: &Nodes| {\n        nodes.values().all(|runner| {\n            let read = runner\n                .main_reactor()\n                .storage()\n                .read_execution_result(&transaction_hash);\n            read.is_some()\n        })\n    };\n    fixture.run_until(has_stored_exec_results, ONE_MIN).await;\n\n    // For each node, verify its last_progress has been updated.\n    for (stored_last_progress, node) in stored_last_progresses\n        .into_iter()\n        .zip(fixture.network.nodes().values())\n    {\n        assert!(node.main_reactor().last_progress > stored_last_progress);\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/rewards.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    time::Duration,\n};\n\nuse num::Zero;\nuse num_rational::Ratio;\nuse num_traits::One;\n\nuse casper_storage::{\n    data_access_layer::{TotalSupplyRequest, TotalSupplyResult},\n    global_state::state::StateProvider,\n};\nuse casper_types::{\n    Block, ConsensusProtocolName, EraId, ProtocolVersion, PublicKey, Rewards, TimeDiff, U512,\n};\n\nuse crate::{\n    failpoints::FailpointActivation,\n    reactor::{\n        main_reactor::tests::{\n            configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes,\n            switch_blocks::SwitchBlocks, ERA_THREE, ERA_TWO,\n        },\n        Reactor,\n    },\n};\n\n// Fundamental network parameters that are not critical for assessing reward calculation correctness\nconst STAKE: u128 = 1000000000;\nconst PRIME_STAKES: [u128; 5] = [106907, 106921, 106937, 106949, 106957];\nconst ERA_COUNT: u64 = 3;\nconst ERA_DURATION: u64 = 20000;\n//milliseconds\nconst MIN_HEIGHT: u64 = 6;\nconst BLOCK_TIME: u64 = 1750;\n//milliseconds\nconst TIME_OUT: u64 = 600;\n//seconds\nconst SEIGNIORAGE: (u64, u64) = (1u64, 100u64);\nconst REPRESENTATIVE_NODE_INDEX: usize = 0;\n// Parameters we generally want to vary\nconst CONSENSUS_ZUG: ConsensusProtocolName = ConsensusProtocolName::Zug;\nconst CONSENSUS_HIGHWAY: ConsensusProtocolName = ConsensusProtocolName::Highway;\nconst FINDERS_FEE_ZERO: (u64, u64) = (0u64, 1u64);\nconst FINDERS_FEE_HALF: (u64, u64) = (1u64, 2u64);\n//const FINDERS_FEE_ONE: (u64, u64) = (1u64, 1u64);\nconst FINALITY_SIG_PROP_ZERO: (u64, u64) = (0u64, 1u64);\nconst FINALITY_SIG_PROP_HALF: (u64, u64) = (1u64, 2u64);\nconst FINALITY_SIG_PROP_ONE: (u64, u64) = (1u64, 1u64);\nconst FILTERED_NODES_INDICES: &[usize] = &[3, 4];\nconst FINALITY_SIG_LOOKBACK: u64 = 3;\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_all_finality_small_prime_five_eras() {\n    run_rewards_network_scenario(\n        PRIME_STAKES,\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        &[],\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_ZERO.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_all_finality_small_prime_five_eras_no_lookback() {\n    run_rewards_network_scenario(\n        PRIME_STAKES,\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        &[],\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_ZERO.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: 0,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_no_finality_small_nominal_five_eras() {\n    run_rewards_network_scenario(\n        [STAKE, STAKE, STAKE, STAKE, STAKE],\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        &[],\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_ZERO.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ZERO.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_half_finality_half_finders_small_nominal_five_eras() {\n    run_rewards_network_scenario(\n        [STAKE, STAKE, STAKE, STAKE, STAKE],\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        &[],\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_HALF.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_HALF.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_half_finality_half_finders_small_nominal_five_eras_no_lookback() {\n    run_rewards_network_scenario(\n        [STAKE, STAKE, STAKE, STAKE, STAKE],\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        &[],\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_HALF.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_HALF.into(),\n            signature_rewards_max_delay: 0,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_all_finality_half_finders_small_nominal_five_eras_no_lookback() {\n    run_rewards_network_scenario(\n        [STAKE, STAKE, STAKE, STAKE, STAKE],\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        &[],\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_HALF.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: 0,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_all_finality_half_finders() {\n    run_rewards_network_scenario(\n        [\n            STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE,\n        ],\n        ERA_COUNT,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        FILTERED_NODES_INDICES,\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_HALF.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_all_finality_half_finders_five_eras() {\n    run_rewards_network_scenario(\n        [\n            STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE,\n        ],\n        5,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        FILTERED_NODES_INDICES,\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_HALF.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_zug_all_finality_zero_finders() {\n    run_rewards_network_scenario(\n        [\n            STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE,\n        ],\n        ERA_COUNT,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        FILTERED_NODES_INDICES,\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_ZUG,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_ZERO.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_highway_all_finality_zero_finders() {\n    run_rewards_network_scenario(\n        [\n            STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE,\n        ],\n        ERA_COUNT,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        FILTERED_NODES_INDICES,\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_HIGHWAY,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_ZERO.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\n#[cfg_attr(not(feature = \"failpoints\"), ignore)]\nasync fn run_reward_network_highway_no_finality() {\n    run_rewards_network_scenario(\n        [\n            STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE,\n        ],\n        ERA_COUNT,\n        TIME_OUT,\n        REPRESENTATIVE_NODE_INDEX,\n        FILTERED_NODES_INDICES,\n        ConfigsOverride {\n            consensus_protocol: CONSENSUS_HIGHWAY,\n            era_duration: TimeDiff::from_millis(ERA_DURATION),\n            minimum_era_height: MIN_HEIGHT,\n            minimum_block_time: TimeDiff::from_millis(BLOCK_TIME),\n            round_seigniorage_rate: SEIGNIORAGE.into(),\n            finders_fee: FINDERS_FEE_ZERO.into(),\n            finality_signature_proportion: FINALITY_SIG_PROP_ZERO.into(),\n            signature_rewards_max_delay: FINALITY_SIG_LOOKBACK,\n            ..Default::default()\n        },\n    )\n    .await;\n}\n\n#[tokio::test]\nasync fn rewards_are_calculated() {\n    let initial_stakes = InitialStakes::Random { count: 5 };\n    let spec_override = ConfigsOverride {\n        minimum_era_height: 3,\n        ..Default::default()\n    };\n    let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await;\n    fixture\n        .run_until_consensus_in_era(ERA_THREE, Duration::from_secs(150))\n        .await;\n\n    let switch_block = fixture.switch_block(ERA_TWO);\n\n    for reward in switch_block\n        .era_end()\n        .unwrap()\n        .rewards()\n        .values()\n        .map(|amounts| {\n            amounts\n                .iter()\n                .fold(U512::zero(), |acc, amount| *amount + acc)\n        })\n    {\n        assert_ne!(reward, U512::zero());\n    }\n}\n\nasync fn run_rewards_network_scenario(\n    initial_stakes: impl Into<Vec<u128>>,\n    era_count: u64,\n    time_out: u64, //seconds\n    representative_node_index: usize,\n    filtered_nodes_indices: &[usize],\n    spec_override: ConfigsOverride,\n) {\n    trait AsU512Ext {\n        fn into_u512(self) -> Ratio<U512>;\n    }\n    impl AsU512Ext for Ratio<u64> {\n        fn into_u512(self) -> Ratio<U512> {\n            Ratio::new(U512::from(*self.numer()), U512::from(*self.denom()))\n        }\n    }\n\n    let initial_stakes = initial_stakes.into();\n\n    // Instantiate the chain\n    let mut fixture =\n        TestFixture::new(InitialStakes::FromVec(initial_stakes), Some(spec_override)).await;\n\n    for i in filtered_nodes_indices {\n        let filtered_node = fixture.network.runners_mut().nth(*i).unwrap();\n        filtered_node\n            .reactor_mut()\n            .inner_mut()\n            .activate_failpoint(&FailpointActivation::new(\"finality_signature_creation\"));\n    }\n\n    // Run the network for a specified number of eras\n    let timeout = Duration::from_secs(time_out);\n    fixture\n        .run_until_stored_switch_block_header(EraId::new(era_count - 1), timeout)\n        .await;\n\n    // DATA COLLECTION\n    // Get the switch blocks and bid structs first\n    let switch_blocks = SwitchBlocks::collect(fixture.network.nodes(), era_count);\n\n    // Representative node\n    // (this test should normally run a network at nominal performance with identical nodes)\n    let representative_node = fixture\n        .network\n        .nodes()\n        .values()\n        .nth(representative_node_index)\n        .unwrap();\n    let representative_storage = &representative_node.main_reactor().storage;\n    let representative_runtime = &representative_node.main_reactor().contract_runtime;\n\n    // Recover highest completed block height\n    let highest_completed_height = representative_storage\n        .highest_complete_block_height()\n        .expect(\"missing highest completed block\");\n\n    // Get all the blocks\n    let blocks: Vec<Block> = (0..highest_completed_height + 1)\n        .map(|i| {\n            representative_storage\n                .read_block_by_height(i)\n                .expect(\"block not found\")\n        })\n        .collect();\n\n    let protocol_version = ProtocolVersion::from_parts(2, 0, 0);\n\n    // Get total supply history\n    let total_supply: Vec<U512> = (0..highest_completed_height + 1)\n        .map(|height: u64| {\n            let state_hash = *representative_storage\n                .read_block_header_by_height(height, true)\n                .expect(\"failure to read block header\")\n                .unwrap()\n                .state_root_hash();\n            let total_supply_req = TotalSupplyRequest::new(state_hash, protocol_version);\n            let result = representative_runtime\n                .data_access_layer()\n                .total_supply(total_supply_req);\n\n            if let TotalSupplyResult::Success { total_supply } = result {\n                total_supply\n            } else {\n                panic!(\"expected success, not: {:?}\", result);\n            }\n        })\n        .collect();\n\n    // Tiny helper function\n    #[inline]\n    fn add_to_rewards(\n        recipient: PublicKey,\n        era: EraId,\n        reward: Ratio<U512>,\n        rewards: &mut BTreeMap<PublicKey, BTreeMap<EraId, Ratio<U512>>>,\n    ) {\n        match rewards.get_mut(&recipient) {\n            Some(map) => {\n                *map.entry(era).or_insert(Ratio::zero()) += reward;\n            }\n            None => {\n                let mut map = BTreeMap::new();\n                map.insert(era, reward);\n                rewards.insert(recipient, map);\n            }\n        }\n    }\n\n    let mut recomputed_total_supply = BTreeMap::new();\n    recomputed_total_supply.insert(0, Ratio::from(total_supply[0]));\n    let recomputed_rewards: BTreeMap<_, _> = switch_blocks\n        .headers\n        .iter()\n        .enumerate()\n        .map(|(i, switch_block)| {\n            if switch_block.is_genesis() || switch_block.height() > highest_completed_height {\n                return (i, BTreeMap::new());\n            }\n            let mut recomputed_era_rewards = BTreeMap::new();\n            if !switch_block.is_genesis() {\n                let supply_carryover = recomputed_total_supply\n                    .get(&(i - 1))\n                    .copied()\n                    .expect(\"expected prior recomputed supply value\");\n                recomputed_total_supply.insert(i, supply_carryover);\n            }\n\n            // It's not a genesis block, so we know there's something with a lower era id\n            let previous_switch_block_height = switch_blocks.headers[i - 1].height();\n            let current_era_slated_weights = match switch_blocks.headers[i - 1].clone_era_end() {\n                Some(era_report) => era_report.next_era_validator_weights().clone(),\n                _ => panic!(\"unexpectedly absent era report\"),\n            };\n            let total_current_era_weights = current_era_slated_weights\n                .iter()\n                .fold(U512::zero(), move |acc, s| acc + s.1);\n            let weights_block_idx = if switch_blocks.headers[i - 1].is_genesis() {\n                i - 1\n            } else {\n                i - 2\n            };\n            let (previous_era_slated_weights, total_previous_era_weights) =\n                match switch_blocks.headers[weights_block_idx].clone_era_end() {\n                    Some(era_report) => {\n                        let next_weights = era_report.next_era_validator_weights().clone();\n                        let total_next_weights = next_weights\n                            .iter()\n                            .fold(U512::zero(), move |acc, s| acc + s.1);\n                        (next_weights, total_next_weights)\n                    }\n                    _ => panic!(\"unexpectedly absent era report\"),\n                };\n\n            let rewarded_range =\n                previous_switch_block_height as usize + 1..switch_block.height() as usize + 1;\n            let rewarded_blocks = &blocks[rewarded_range];\n            let block_reward = (Ratio::<U512>::one()\n                - fixture\n                    .chainspec\n                    .core_config\n                    .finality_signature_proportion\n                    .into_u512())\n                * recomputed_total_supply[&(i - 1)]\n                * fixture\n                    .chainspec\n                    .core_config\n                    .round_seigniorage_rate\n                    .into_u512();\n            let signatures_reward = fixture\n                .chainspec\n                .core_config\n                .finality_signature_proportion\n                .into_u512()\n                * recomputed_total_supply[&(i - 1)]\n                * fixture\n                    .chainspec\n                    .core_config\n                    .round_seigniorage_rate\n                    .into_u512();\n            let previous_signatures_reward_idx = if switch_blocks.headers[i - 1].is_genesis() {\n                i - 1\n            } else {\n                i - 2\n            };\n            let previous_signatures_reward = fixture\n                .chainspec\n                .core_config\n                .finality_signature_proportion\n                .into_u512()\n                * recomputed_total_supply[&previous_signatures_reward_idx]\n                * fixture\n                    .chainspec\n                    .core_config\n                    .round_seigniorage_rate\n                    .into_u512();\n\n            rewarded_blocks.iter().for_each(|block: &Block| {\n                // Block production rewards\n                let proposer = block.proposer().clone();\n                add_to_rewards(\n                    proposer.clone(),\n                    block.era_id(),\n                    block_reward,\n                    &mut recomputed_era_rewards,\n                );\n\n                // Recover relevant finality signatures\n                block.rewarded_signatures().iter().enumerate().for_each(\n                    |(offset, signatures_packed)| {\n                        if block.height() as usize - offset - 1\n                            <= previous_switch_block_height as usize\n                        {\n                            let rewarded_contributors = signatures_packed.to_validator_set(\n                                previous_era_slated_weights\n                                    .keys()\n                                    .cloned()\n                                    .collect::<BTreeSet<PublicKey>>(),\n                            );\n                            rewarded_contributors.iter().for_each(|contributor| {\n                                let contributor_proportion = Ratio::new(\n                                    previous_era_slated_weights\n                                        .get(contributor)\n                                        .copied()\n                                        .expect(\"expected current era validator\"),\n                                    total_previous_era_weights,\n                                );\n                                // collection always goes to the era in which the block citing the\n                                // reward was created\n                                add_to_rewards(\n                                    proposer.clone(),\n                                    block.era_id(),\n                                    fixture.chainspec.core_config.finders_fee.into_u512()\n                                        * contributor_proportion\n                                        * previous_signatures_reward,\n                                    &mut recomputed_era_rewards,\n                                );\n                                add_to_rewards(\n                                    contributor.clone(),\n                                    switch_blocks.headers[i - 1].era_id(),\n                                    (Ratio::<U512>::one()\n                                        - fixture.chainspec.core_config.finders_fee.into_u512())\n                                        * contributor_proportion\n                                        * previous_signatures_reward,\n                                    &mut recomputed_era_rewards,\n                                )\n                            });\n                        } else {\n                            let rewarded_contributors = signatures_packed.to_validator_set(\n                                current_era_slated_weights\n                                    .keys()\n                                    .cloned()\n                                    .collect::<BTreeSet<PublicKey>>(),\n                            );\n                            rewarded_contributors.iter().for_each(|contributor| {\n                                let contributor_proportion = Ratio::new(\n                                    *current_era_slated_weights\n                                        .get(contributor)\n                                        .expect(\"expected current era validator\"),\n                                    total_current_era_weights,\n                                );\n                                add_to_rewards(\n                                    proposer.clone(),\n                                    block.era_id(),\n                                    fixture.chainspec.core_config.finders_fee.into_u512()\n                                        * contributor_proportion\n                                        * signatures_reward,\n                                    &mut recomputed_era_rewards,\n                                );\n                                add_to_rewards(\n                                    contributor.clone(),\n                                    block.era_id(),\n                                    (Ratio::<U512>::one()\n                                        - fixture.chainspec.core_config.finders_fee.into_u512())\n                                        * contributor_proportion\n                                        * signatures_reward,\n                                    &mut recomputed_era_rewards,\n                                );\n                            });\n                        }\n                    },\n                );\n            });\n\n            // Make sure we round just as we do in the real code, at the end of an era's\n            // calculation, right before minting and transferring\n            recomputed_era_rewards.iter_mut().for_each(|(_, rewards)| {\n                rewards.values_mut().for_each(|amount| {\n                    *amount = amount.trunc();\n                });\n                let truncated_reward = rewards.values().sum::<Ratio<U512>>();\n                let era_end_supply = recomputed_total_supply\n                    .get_mut(&i)\n                    .expect(\"expected supply at end of era\");\n                *era_end_supply += truncated_reward;\n            });\n\n            (i, recomputed_era_rewards)\n        })\n        .collect();\n\n    // Recalculated total supply is equal to observed total supply\n    switch_blocks.headers.iter().for_each(|header| {\n        if header.height() <= highest_completed_height {\n            assert_eq!(\n                Ratio::from(total_supply[header.height() as usize]),\n                *(recomputed_total_supply\n                    .get(&(header.era_id().value() as usize))\n                    .expect(\"expected recalculated supply\")),\n                \"total supply does not match at height {}\",\n                header.height()\n            );\n        }\n    });\n\n    // Recalculated rewards are equal to observed rewards; total supply increase is equal to total\n    // rewards;\n    recomputed_rewards.iter().for_each(|(era, rewards)| {\n        if era > &0 && switch_blocks.headers[*era].height() <= highest_completed_height {\n            let observed_total_rewards = match switch_blocks.headers[*era]\n                .clone_era_end()\n                .expect(\"expected EraEnd\")\n                .rewards()\n            {\n                Rewards::V1(v1_rewards) => v1_rewards\n                    .iter()\n                    .fold(U512::zero(), |acc, reward| U512::from(*reward.1) + acc),\n                Rewards::V2(v2_rewards) => v2_rewards\n                    .iter()\n                    .flat_map(|(_key, amounts)| amounts)\n                    .fold(U512::zero(), |acc, reward| *reward + acc),\n            };\n            let recomputed_total_rewards: U512 = rewards\n                .values()\n                .flat_map(|amounts| amounts.values().map(|reward| reward.to_integer()))\n                .sum();\n            assert_eq!(\n                Ratio::from(recomputed_total_rewards),\n                Ratio::from(observed_total_rewards),\n                \"total rewards do not match at era {}\\nobserved = {:#?}\\nrecomputed = {:#?}\",\n                era,\n                switch_blocks.headers[*era]\n                    .clone_era_end()\n                    .expect(\"\")\n                    .rewards(),\n                rewards,\n            );\n            assert_eq!(\n                Ratio::from(recomputed_total_rewards),\n                recomputed_total_supply\n                    .get(era)\n                    .expect(\"expected recalculated supply\")\n                    - recomputed_total_supply\n                        .get(&(era - 1))\n                        .expect(\"expected recalculated supply\"),\n                \"supply growth does not match rewards at era {}\",\n                era\n            )\n        }\n    })\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/switch_blocks.rs",
    "content": "use std::collections::BTreeMap;\n\nuse casper_storage::{\n    data_access_layer::{BidsRequest, BidsResult},\n    global_state::state::StateProvider,\n};\nuse casper_types::{system::auction::BidKind, BlockHeader, EraId, PublicKey, U512};\n\nuse crate::reactor::main_reactor::tests::Nodes;\n\n/// A set of consecutive switch blocks.\npub(crate) struct SwitchBlocks {\n    pub headers: Vec<BlockHeader>,\n}\n\nimpl SwitchBlocks {\n    /// Collects all switch blocks of the first `era_count` eras, and asserts that they are equal\n    /// in all nodes.\n    pub(crate) fn collect(nodes: &Nodes, era_count: u64) -> SwitchBlocks {\n        let mut headers = Vec::new();\n        for era_number in 0..era_count {\n            let mut header_iter = nodes.values().map(|runner| {\n                let storage = runner.main_reactor().storage();\n                let maybe_block = storage.read_switch_block_by_era_id(EraId::from(era_number));\n                maybe_block.expect(\"missing switch block\").take_header()\n            });\n            let header = header_iter.next().unwrap();\n            assert_eq!(era_number, header.era_id().value());\n            for other_header in header_iter {\n                assert_eq!(header, other_header);\n            }\n            headers.push(header);\n        }\n        SwitchBlocks { headers }\n    }\n\n    /// Returns the list of equivocators in the given era.\n    pub(crate) fn equivocators(&self, era_number: u64) -> &[PublicKey] {\n        self.headers[era_number as usize]\n            .maybe_equivocators()\n            .expect(\"era end\")\n    }\n\n    /// Returns the list of inactive validators in the given era.\n    pub(crate) fn inactive_validators(&self, era_number: u64) -> &[PublicKey] {\n        self.headers[era_number as usize]\n            .maybe_inactive_validators()\n            .expect(\"era end\")\n    }\n\n    /// Returns the list of validators in the successor era.\n    pub(crate) fn next_era_validators(&self, era_number: u64) -> &BTreeMap<PublicKey, U512> {\n        self.headers[era_number as usize]\n            .next_era_validator_weights()\n            .expect(\"validators\")\n    }\n\n    /// Returns the set of bids in the auction contract at the end of the given era.\n    pub(crate) fn bids(&self, nodes: &Nodes, era_number: u64) -> Vec<BidKind> {\n        let state_root_hash = *self.headers[era_number as usize].state_root_hash();\n        for runner in nodes.values() {\n            let request = BidsRequest::new(state_root_hash);\n            let data_provider = runner.main_reactor().contract_runtime().data_access_layer();\n            if let BidsResult::Success { bids } = data_provider.bids(request) {\n                return bids;\n            }\n        }\n        unreachable!(\"at least one node should have bids for era {}\", era_number);\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/transaction_scenario/asertions.rs",
    "content": "use super::utils::{Assertion, TestStateSnapshot};\nuse crate::reactor::main_reactor::tests::transactions::{\n    assert_exec_result_cost, exec_result_is_success, BalanceAmount,\n};\nuse async_trait::async_trait;\nuse casper_types::{Gas, PublicKey, TransactionHash, U512};\nuse once_cell::sync::Lazy;\nuse std::collections::BTreeMap;\n\npub(crate) struct TransactionSuccessful {\n    hash: TransactionHash,\n}\n\nimpl TransactionSuccessful {\n    pub(crate) fn new(hash: TransactionHash) -> Self {\n        Self { hash }\n    }\n}\n\npub static ZERO_BALANCE_AMOUNT: Lazy<BalanceAmount> = Lazy::new(BalanceAmount::zero);\n\n#[async_trait]\nimpl Assertion for TransactionSuccessful {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>) {\n        let current_state = snapshots_at_heights.last_key_value().unwrap().1;\n        assert!(current_state.exec_infos.contains_key(&self.hash));\n        let exec_info = current_state.exec_infos.get(&self.hash).unwrap();\n        assert!(exec_info.execution_result.is_some());\n        let result = exec_info.execution_result.as_ref().unwrap();\n        assert!(exec_result_is_success(result));\n    }\n}\n\npub(crate) struct TransactionFailure {\n    hash: TransactionHash,\n    expected_error_message: Option<String>,\n}\n\nimpl TransactionFailure {\n    pub(crate) fn new(hash: TransactionHash) -> Self {\n        Self {\n            hash,\n            expected_error_message: None,\n        }\n    }\n\n    pub(crate) fn expected_error_message(hash: TransactionHash, error_message: &str) -> Self {\n        Self {\n            hash,\n            expected_error_message: Some(error_message.to_string()),\n        }\n    }\n}\n\n#[async_trait]\nimpl Assertion for TransactionFailure {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>) {\n        let current_state = snapshots_at_heights.last_key_value().unwrap().1;\n        assert!(current_state.exec_infos.contains_key(&self.hash));\n        let exec_info = current_state.exec_infos.get(&self.hash).unwrap();\n        assert!(exec_info.execution_result.is_some());\n        let result = exec_info.execution_result.as_ref().unwrap();\n        let error_msg = match result {\n            casper_types::execution::ExecutionResult::V1(_) => todo!(),\n            casper_types::execution::ExecutionResult::V2(execution_result_v2) => {\n                execution_result_v2.error_message.clone()\n            }\n        };\n        assert!(error_msg.is_some());\n        if let Some(msg) = &self.expected_error_message {\n            assert_eq!(error_msg.unwrap(), msg.to_string());\n        }\n    }\n}\n\npub(crate) struct ExecResultCost {\n    hash: TransactionHash,\n    expected_cost: U512,\n    expected_consumed_gas: Gas,\n}\n\nimpl ExecResultCost {\n    pub(crate) fn new(\n        hash: TransactionHash,\n        expected_cost: U512,\n        expected_consumed_gas: Gas,\n    ) -> Self {\n        Self {\n            hash,\n            expected_cost,\n            expected_consumed_gas,\n        }\n    }\n}\n\n#[async_trait]\nimpl Assertion for ExecResultCost {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>) {\n        let current_state = snapshots_at_heights.last_key_value().unwrap().1;\n        assert!(current_state.exec_infos.contains_key(&self.hash));\n        let exec_info = current_state.exec_infos.get(&self.hash).unwrap();\n        assert!(exec_info.execution_result.is_some());\n        let result = exec_info.execution_result.as_ref().unwrap();\n        assert_exec_result_cost(\n            result.clone(),\n            self.expected_cost,\n            self.expected_consumed_gas,\n            \"transfer_cost_fixed_price_no_fee_no_refund\",\n        );\n    }\n}\n\npub(crate) struct TotalSupplyChange {\n    //It's a signed integer since we can expect either an increase or decrease.\n    total_supply_change: i64,\n    at_block_height: u64,\n}\n\nimpl TotalSupplyChange {\n    pub(crate) fn new(total_supply_change: i64, at_block_height: u64) -> Self {\n        Self {\n            total_supply_change,\n            at_block_height,\n        }\n    }\n}\n\n#[async_trait]\nimpl Assertion for TotalSupplyChange {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>) {\n        let before = snapshots_at_heights.get(&1).unwrap();\n        let after = snapshots_at_heights.get(&self.at_block_height).unwrap();\n        let before_total_supply = before.total_supply;\n        let got = after.total_supply;\n        let total_supply = self.total_supply_change;\n        let expected = if total_supply > 0 {\n            before_total_supply\n                .checked_add(total_supply.unsigned_abs().into())\n                .unwrap()\n        } else {\n            before_total_supply\n                .checked_sub(total_supply.unsigned_abs().into())\n                .unwrap()\n        };\n        assert_eq!(expected, got);\n    }\n}\n\n#[derive(Copy, Clone, Debug)]\n/// An amount up or down.\npub enum BalanceChange {\n    Up(U512),\n    Down(U512),\n}\n\n/// Assert that the account associated with the given public key has observed a change in balance.\n/// Can assert on total and available balance.\npub(crate) struct PublicKeyBalanceChange {\n    /// public key of the account which needs to be queried\n    public_key: PublicKey,\n    //It's a signed integer since we can expect either an increase or decrease.\n    total_balance_change: BalanceChange,\n    //It's a signed integer since we can expect either an increase or decrease.\n    available_balance_change: BalanceChange,\n}\n\nimpl PublicKeyBalanceChange {\n    pub(crate) fn new(\n        public_key: PublicKey,\n        total_balance_change: BalanceChange,\n        available_balance_change: BalanceChange,\n    ) -> Self {\n        Self {\n            public_key,\n            total_balance_change,\n            available_balance_change,\n        }\n    }\n}\n\n#[async_trait]\nimpl Assertion for PublicKeyBalanceChange {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>) {\n        let account_hash = self.public_key.to_account_hash();\n        let before = snapshots_at_heights.get(&0).unwrap();\n        let after = snapshots_at_heights.last_key_value().unwrap().1;\n        let before_balance = before\n            .balances\n            .get(&account_hash)\n            //There is a chance that the key we're asking for was not an account in\n            // genesis, if that's true we don't expect it to be at height 0.\n            .unwrap_or(&ZERO_BALANCE_AMOUNT);\n\n        let before_total = before_balance.total;\n        let before_available = before_balance.available;\n        let after_total = after.balances.get(&account_hash).unwrap().total;\n        let after_available = after.balances.get(&account_hash).unwrap().available;\n\n        let expected = {\n            match self.total_balance_change {\n                BalanceChange::Up(val) => {\n                    before_total.checked_add(val).expect(\"should mod total up\")\n                }\n                BalanceChange::Down(val) => before_total\n                    .checked_sub(val)\n                    .expect(\"should mod total down\"),\n            }\n        };\n\n        assert_eq!(after_total, expected, \"after_total should match expected\");\n        let expected = {\n            match self.available_balance_change {\n                BalanceChange::Up(val) => before_available\n                    .checked_add(val)\n                    .expect(\"should mod available up\"),\n                BalanceChange::Down(val) => before_available\n                    .checked_sub(val)\n                    .expect(\"should mod available down\"),\n            }\n        };\n        assert_eq!(\n            after_available, expected,\n            \"after_available should match expected\"\n        );\n    }\n}\n\npub(crate) struct PublicKeyTotalMeetsAvailable {\n    public_key: PublicKey,\n}\n\nimpl PublicKeyTotalMeetsAvailable {\n    pub(crate) fn new(public_key: PublicKey) -> Self {\n        Self { public_key }\n    }\n}\n\n#[async_trait]\nimpl Assertion for PublicKeyTotalMeetsAvailable {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>) {\n        let account_hash = self.public_key.to_account_hash();\n        let after = snapshots_at_heights.last_key_value().unwrap().1;\n        let balance = after.balances.get(&account_hash).unwrap();\n        let after_total = balance.total;\n        let after_available = balance.available;\n        assert_eq!(after_total, after_available);\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/transaction_scenario/utils.rs",
    "content": "use async_trait::async_trait;\nuse casper_storage::{\n    data_access_layer::{\n        balance::BalanceHandling,\n        tagged_values::{TaggedValuesRequest, TaggedValuesResult, TaggedValuesSelection},\n        BalanceRequest, BalanceResult, ProofHandling, TotalSupplyRequest, TotalSupplyResult,\n    },\n    global_state::state::StateProvider,\n};\nuse casper_types::{\n    account::AccountHash, bytesrepr::Bytes, testing::TestRng, EraId, ExecutionInfo, FeeHandling,\n    KeyTag, PricingHandling, PricingMode, PublicKey, RefundHandling, SecretKey, TimeDiff,\n    Transaction, TransactionHash, TransactionRuntimeParams, U512,\n};\nuse once_cell::sync::OnceCell;\nuse std::{collections::BTreeMap, sync::Arc, time::Duration};\n\nuse crate::{\n    reactor::main_reactor::tests::{\n        configs_override::ConfigsOverride,\n        fixture::TestFixture,\n        transactions::{\n            BalanceAmount, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, BOB_SECRET_KEY,\n        },\n        ERA_ONE, ONE_MIN, TEN_SECS,\n    },\n    types::transaction::transaction_v1_builder::TransactionV1Builder,\n};\n\npub(crate) struct TestStateSnapshot {\n    pub(crate) exec_infos: BTreeMap<TransactionHash, ExecutionInfo>,\n    pub(crate) balances: BTreeMap<AccountHash, BalanceAmount>,\n    pub(crate) total_supply: U512,\n}\n\n/// This defines the condition\n/// a network should achieve after setup and start\n/// before we can proceed with transaction injection\n#[derive(Clone, Debug)]\npub(crate) enum RunUntilCondition {\n    /// Runs the network until all nodes reach the given completed block height.\n    BlockHeight { block_height: u64, within: Duration },\n    /// Runs the network until all nodes' consensus components reach the given era.\n    ConsensusInEra { era_id: EraId, within: Duration },\n}\n\nimpl RunUntilCondition {\n    async fn run_until(&self, fixture: &mut TestFixture) -> Result<(), TestScenarioError> {\n        match self {\n            RunUntilCondition::BlockHeight {\n                block_height,\n                within,\n            } => {\n                fixture\n                    .try_run_until_block_height(*block_height, *within)\n                    .await\n            }\n            RunUntilCondition::ConsensusInEra { era_id, within } => {\n                fixture.try_until_consensus_in_era(*era_id, *within).await\n            }\n        }\n        .map_err(|_| TestScenarioError::NetworkDidNotStabilize)\n    }\n}\n\n#[derive(Debug)]\npub(crate) enum TestScenarioError {\n    UnexpectedState,\n    NetworkDidNotStabilize,\n    CannotSetBeforeState,\n}\n\nstruct ScenarioDataInstance {\n    fixture: TestFixture,\n    block_height: u64,\n}\n\nimpl ScenarioDataInstance {\n    pub(crate) async fn inject_transaction(&mut self, txn: Transaction) {\n        self.fixture.inject_transaction(txn).await\n    }\n\n    pub(crate) async fn run_until_executed_transaction(\n        &mut self,\n        txn_hash: &TransactionHash,\n        within: Duration,\n    ) {\n        self.fixture\n            .run_until_executed_transaction(txn_hash, within)\n            .await\n    }\n}\n\n#[async_trait]\npub(crate) trait Assertion: Send + Sync {\n    async fn assert(&self, snapshots_at_heights: BTreeMap<u64, TestStateSnapshot>);\n}\n\n#[derive(Debug, Clone, Eq, PartialEq)]\nenum TestScenarioState {\n    PreSetup,\n    PreRun,\n    Running,\n}\n\npub(crate) struct TestScenario {\n    state: TestScenarioState,\n    data: ScenarioDataInstance,\n    initial_run_until: RunUntilCondition,\n    exec_infos: BTreeMap<TransactionHash, ExecutionInfo>,\n    state_before_test: OnceCell<TestStateSnapshot>,\n}\n\nimpl TestScenario {\n    pub(crate) async fn setup(&mut self) -> Result<(), TestScenarioError> {\n        if self.state != TestScenarioState::PreSetup {\n            return Err(TestScenarioError::UnexpectedState);\n        }\n        self.run_until(self.initial_run_until.clone()).await?;\n        self.state_before_test\n            .set(self.get_current_state().await)\n            .map_err(|_| TestScenarioError::CannotSetBeforeState)?;\n        self.state = TestScenarioState::PreRun;\n        Ok(())\n    }\n\n    pub(crate) async fn run(\n        &mut self,\n        to_inject: Vec<Transaction>,\n    ) -> Result<Vec<ExecutionInfo>, TestScenarioError> {\n        if self.state == TestScenarioState::PreSetup {\n            return Err(TestScenarioError::UnexpectedState);\n        }\n        let mut to_ret = vec![];\n        for transaction in &to_inject {\n            let hash = transaction.hash();\n            self.data.inject_transaction(transaction.clone()).await;\n            self.data\n                .run_until_executed_transaction(&hash, TEN_SECS)\n                .await;\n            let (_node_id, runner) = self.data.fixture.network.nodes().iter().next().unwrap();\n            let exec_info = runner\n                .main_reactor()\n                .storage()\n                .read_execution_info(hash)\n                .expect(\"Expected transaction to be included in a block.\");\n            let transaction_block_height = exec_info.block_height;\n            if transaction_block_height > self.data.block_height {\n                self.data.block_height = transaction_block_height;\n            }\n            to_ret.push(exec_info.clone());\n            self.exec_infos.insert(hash, exec_info);\n        }\n        self.state = TestScenarioState::Running;\n        Ok(to_ret)\n    }\n\n    pub(crate) async fn run_until(\n        &mut self,\n        run_until: RunUntilCondition,\n    ) -> Result<(), TestScenarioError> {\n        run_until.run_until(&mut self.data.fixture).await\n    }\n\n    pub(crate) fn chain_name(&self) -> String {\n        self.data.fixture.chainspec.network_config.name.clone()\n    }\n\n    pub(crate) async fn assert<T: Assertion>(&mut self, assertion: T) {\n        if self.state_before_test.get().is_none() {\n            panic!(\"TestScenario not in state eligible to do assertions\");\n        }\n        let max_block_height = self.data.fixture.highest_complete_block().height();\n        let mut snapshots = BTreeMap::new();\n        for i in 0..=max_block_height {\n            snapshots.insert(i, self.get_state_at_height(i).await);\n        }\n        assertion.assert(snapshots).await\n    }\n\n    async fn get_state_at_height(&self, block_height: u64) -> TestStateSnapshot {\n        let all_accounts = self.get_all_accounts(block_height).await;\n        let mut balances = BTreeMap::new();\n        for account_hash in all_accounts {\n            let balance_amount = self.get_balance_amount(account_hash, block_height).await;\n            balances.insert(account_hash, balance_amount);\n        }\n\n        let total_supply = self.get_total_supply(block_height).await;\n        let exec_infos: BTreeMap<TransactionHash, ExecutionInfo> = self\n            .exec_infos\n            .iter()\n            .filter_map(|(k, v)| {\n                if v.block_height <= block_height {\n                    Some((*k, v.clone()))\n                } else {\n                    None\n                }\n            })\n            .collect();\n\n        TestStateSnapshot {\n            exec_infos,\n            balances,\n            total_supply,\n        }\n    }\n\n    async fn get_current_state(&self) -> TestStateSnapshot {\n        let block = self.data.fixture.highest_complete_block();\n        let block_height = block.height();\n        self.get_state_at_height(block_height).await\n    }\n\n    async fn get_all_accounts(&self, block_height: u64) -> Vec<AccountHash> {\n        let fixture = &self.data.fixture;\n        let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n        let block_header = runner\n            .main_reactor()\n            .storage()\n            .read_block_header_by_height(block_height, true)\n            .expect(\"failure to read block header\")\n            .expect(\"should have block header\");\n        let state_hash = *block_header.state_root_hash();\n        let request =\n            TaggedValuesRequest::new(state_hash, TaggedValuesSelection::All(KeyTag::Account));\n        match runner\n            .main_reactor()\n            .contract_runtime()\n            .data_access_layer()\n            .tagged_values(request)\n        {\n            TaggedValuesResult::Success { values, .. } => values\n                .iter()\n                .filter_map(|el| el.as_account().map(|el| el.account_hash()))\n                .collect(),\n            _ => panic!(\"Couldn't get all account hashes\"),\n        }\n    }\n\n    pub(crate) fn get_balance(\n        &self,\n        account_hash: AccountHash,\n        block_height: Option<u64>,\n        get_total: bool,\n    ) -> BalanceResult {\n        let fixture = &self.data.fixture;\n        let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n        let protocol_version = fixture.chainspec.protocol_version();\n        let block_height = block_height.unwrap_or(\n            runner\n                .main_reactor()\n                .storage()\n                .highest_complete_block_height()\n                .expect(\"missing highest completed block\"),\n        );\n        let block_header = runner\n            .main_reactor()\n            .storage()\n            .read_block_header_by_height(block_height, true)\n            .expect(\"failure to read block header\")\n            .unwrap();\n        let state_hash = *block_header.state_root_hash();\n        let balance_handling = if get_total {\n            BalanceHandling::Total\n        } else {\n            BalanceHandling::Available\n        };\n        runner\n            .main_reactor()\n            .contract_runtime()\n            .data_access_layer()\n            .balance(BalanceRequest::from_account_hash(\n                state_hash,\n                protocol_version,\n                account_hash,\n                balance_handling,\n                ProofHandling::NoProofs,\n            ))\n    }\n\n    async fn get_balance_amount(\n        &self,\n        account_hash: AccountHash,\n        block_height: u64,\n    ) -> BalanceAmount {\n        let block_height = Some(block_height);\n\n        let total = self\n            .get_balance(account_hash, block_height, true)\n            .total_balance()\n            .copied()\n            .unwrap_or(U512::zero());\n        let available = self\n            .get_balance(account_hash, block_height, false)\n            .available_balance()\n            .copied()\n            .unwrap_or(U512::zero());\n        BalanceAmount { available, total }\n    }\n\n    async fn get_total_supply(&self, block_height: u64) -> U512 {\n        let fixture = &self.data.fixture;\n        let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n        let protocol_version = fixture.chainspec.protocol_version();\n        let state_hash = *runner\n            .main_reactor()\n            .storage()\n            .read_block_header_by_height(block_height, true)\n            .expect(\"failure to read block header\")\n            .unwrap()\n            .state_root_hash();\n\n        let total_supply_req = TotalSupplyRequest::new(state_hash, protocol_version);\n        let result = runner\n            .main_reactor()\n            .contract_runtime()\n            .data_access_layer()\n            .total_supply(total_supply_req);\n\n        if let TotalSupplyResult::Success { total_supply } = result {\n            total_supply\n        } else {\n            panic!(\"Can't get total supply\")\n        }\n    }\n\n    pub(crate) fn mint_const_transfer_cost(&self) -> u32 {\n        self.data\n            .fixture\n            .chainspec\n            .system_costs_config\n            .mint_costs()\n            .transfer\n    }\n\n    pub(crate) fn native_transfer_minimum_motes(&self) -> u64 {\n        self.data\n            .fixture\n            .chainspec\n            .transaction_config\n            .native_transfer_minimum_motes\n    }\n\n    pub(crate) fn get_gas_limit_for_lane(&self, lane_id: u8) -> Option<u64> {\n        self.data\n            .fixture\n            .chainspec\n            .transaction_config\n            .transaction_v1_config\n            .get_lane_by_id(lane_id)\n            .map(|el| el.max_transaction_gas_limit())\n    }\n\n    pub(crate) fn get_block_height(&self) -> u64 {\n        self.data.block_height\n    }\n}\n\ntype StakesType = Option<(Vec<Arc<SecretKey>>, BTreeMap<PublicKey, (U512, U512)>)>;\n\n#[derive(Default)]\npub(crate) struct TestScenarioBuilder {\n    maybe_stakes_setup: StakesType,\n    maybe_pricing_handling: Option<PricingHandling>,\n    maybe_initial_run_until: Option<RunUntilCondition>,\n    maybe_refund_handling: Option<RefundHandling>,\n    maybe_fee_handling: Option<FeeHandling>,\n    maybe_balance_hold_interval_override: Option<TimeDiff>,\n    maybe_minimum_era_height: Option<u64>,\n    minimum_delegation_rate: u8,\n}\n\nimpl TestScenarioBuilder {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub async fn build(self, rng: &mut TestRng) -> TestScenario {\n        let TestScenarioBuilder {\n            maybe_stakes_setup,\n            maybe_pricing_handling,\n            maybe_initial_run_until,\n            maybe_refund_handling,\n            maybe_fee_handling,\n            maybe_balance_hold_interval_override,\n            maybe_minimum_era_height,\n            minimum_delegation_rate,\n        } = self;\n        let (secret_keys, stakes) = maybe_stakes_setup.unwrap_or({\n            /* Node 0 is effectively guaranteed to be the proposer. */\n            let stakes: BTreeMap<PublicKey, (U512, U512)> = vec![\n                (\n                    ALICE_PUBLIC_KEY.clone(),\n                    (U512::from(u64::MAX), U512::from(u128::MAX)),\n                ),\n                (\n                    BOB_PUBLIC_KEY.clone(),\n                    (U512::from(u64::MAX), U512::from(1)),\n                ),\n            ]\n            .into_iter()\n            .collect();\n            let secret_keys = vec![ALICE_SECRET_KEY.clone(), BOB_SECRET_KEY.clone()];\n            (secret_keys, stakes)\n        });\n\n        let pricing_handling = maybe_pricing_handling.unwrap_or(PricingHandling::Fixed);\n        let initial_run_until =\n            maybe_initial_run_until.unwrap_or(RunUntilCondition::ConsensusInEra {\n                era_id: ERA_ONE,\n                within: ONE_MIN,\n            });\n        let config = ConfigsOverride::default().with_pricing_handling(pricing_handling);\n        let config = if let Some(refund_handling) = maybe_refund_handling {\n            config.with_refund_handling(refund_handling)\n        } else {\n            config\n        };\n        let config = if let Some(fee_handling) = maybe_fee_handling {\n            config.with_fee_handling(fee_handling)\n        } else {\n            config\n        };\n        let config =\n            if let Some(balance_hold_interval_override) = maybe_balance_hold_interval_override {\n                config.with_balance_hold_interval(balance_hold_interval_override)\n            } else {\n                config\n            };\n        let config = if let Some(minimum_era_height) = maybe_minimum_era_height {\n            config.with_minimum_era_height(minimum_era_height)\n        } else {\n            config\n        };\n        let config = config.with_minimum_delegation_rate(minimum_delegation_rate);\n        let child_rng = rng.create_child();\n        let fixture =\n            TestFixture::new_with_keys(child_rng, secret_keys, stakes, Some(config)).await;\n        let data = ScenarioDataInstance {\n            fixture,\n            block_height: 0_u64,\n        };\n\n        TestScenario {\n            state: TestScenarioState::PreSetup,\n            data,\n            initial_run_until,\n            exec_infos: BTreeMap::new(),\n            state_before_test: OnceCell::new(),\n        }\n    }\n\n    /// Sets refund handling config option.\n    pub fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self {\n        self.maybe_refund_handling = Some(refund_handling);\n        self\n    }\n\n    pub(crate) fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self {\n        self.maybe_fee_handling = Some(fee_handling);\n        self\n    }\n\n    pub(crate) fn with_balance_hold_interval(mut self, balance_hold_interval: TimeDiff) -> Self {\n        self.maybe_balance_hold_interval_override = Some(balance_hold_interval);\n        self\n    }\n\n    pub(crate) fn with_minimum_era_height(mut self, minimum_era_height: u64) -> Self {\n        self.maybe_minimum_era_height = Some(minimum_era_height);\n        self\n    }\n\n    pub(crate) fn with_minimum_delegation_rate(mut self, minimum_delegation_rate: u8) -> Self {\n        self.minimum_delegation_rate = minimum_delegation_rate;\n        self\n    }\n}\n\npub(super) fn build_wasm_transction(\n    chain_name: String,\n    from: &SecretKey,\n    pricing: PricingMode,\n) -> Transaction {\n    //These bytes are intentionally so large - this way they fall into \"WASM_LARGE\" category in the\n    // local chainspec Alternatively we could change the chainspec to have a different limits\n    // for the wasm categories, but that would require aligning all tests that use local\n    // chainspec\n    let module_bytes = Bytes::from(vec![1; 172_033]);\n    Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(chain_name)\n        .with_pricing_mode(pricing)\n        .with_initiator_addr(PublicKey::from(from))\n        .build()\n        .unwrap(),\n    )\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/transaction_scenario.rs",
    "content": "mod asertions;\nmod utils;\nuse asertions::{\n    ExecResultCost, PublicKeyBalanceChange, PublicKeyTotalMeetsAvailable, TotalSupplyChange,\n    TransactionFailure, TransactionSuccessful,\n};\nuse casper_types::{\n    system::auction::{DelegatorKind, Reservation},\n    testing::TestRng,\n    FeeHandling, Gas, PricingMode, PublicKey, RefundHandling, TimeDiff, Transaction, U512,\n};\nuse num_rational::Ratio;\nuse utils::{build_wasm_transction, RunUntilCondition, TestScenarioBuilder};\n\nuse crate::{\n    reactor::main_reactor::tests::{\n        transaction_scenario::asertions::BalanceChange,\n        transactions::{\n            invalid_wasm_txn, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, BOB_SECRET_KEY,\n            CHARLIE_PUBLIC_KEY, MIN_GAS_PRICE,\n        },\n        ONE_MIN,\n    },\n    testing::LARGE_WASM_LANE_ID,\n    types::transaction::transaction_v1_builder::TransactionV1Builder,\n};\n\n#[tokio::test]\nasync fn should_accept_transfer_without_id() {\n    let mut rng = TestRng::new();\n    let builder = TestScenarioBuilder::new();\n    let mut test_scenario = builder.build(&mut rng).await;\n\n    let transfer_amount = 2_500_000_001_u64; //This should be\n                                             //1 mote more than the native_transfer_minimum_motes in local\n                                             // chainspec that we use for tests\n    let chain_name = test_scenario.chain_name();\n    test_scenario.setup().await.unwrap();\n\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_transfer(transfer_amount, None, CHARLIE_PUBLIC_KEY.clone(), None)\n            .unwrap()\n            .with_initiator_addr(ALICE_PUBLIC_KEY.clone())\n            .with_pricing_mode(PricingMode::Fixed {\n                gas_price_tolerance: 1,\n                additional_computation_factor: 0,\n            })\n            .with_chain_name(chain_name)\n            .build()\n            .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n\n    test_scenario.assert(TransactionSuccessful::new(hash)).await;\n}\n\n#[tokio::test]\nasync fn should_native_transfer_nofee_norefund_fixed() {\n    const TRANSFER_AMOUNT: u64 = 30_000_000_000;\n    let mut rng = TestRng::new();\n    let builder = TestScenarioBuilder::new()\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_balance_hold_interval(TimeDiff::from_seconds(5));\n    let mut test_scenario = builder.build(&mut rng).await;\n\n    let chain_name = test_scenario.chain_name();\n    test_scenario.setup().await.unwrap();\n\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_transfer(\n            TRANSFER_AMOUNT,\n            None,\n            CHARLIE_PUBLIC_KEY.clone(),\n            Some(0xDEADBEEF),\n        )\n        .unwrap()\n        .with_initiator_addr(ALICE_PUBLIC_KEY.clone())\n        .with_pricing_mode(PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        })\n        .with_chain_name(chain_name)\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n\n    let expected_transfer_gas: U512 = test_scenario.mint_const_transfer_cost().into();\n    test_scenario.assert(TransactionSuccessful::new(hash)).await;\n\n    test_scenario\n        .assert(ExecResultCost::new(\n            hash,\n            expected_transfer_gas,\n            Gas::new(expected_transfer_gas),\n        ))\n        .await;\n\n    let transfer_amount = U512::from(TRANSFER_AMOUNT);\n    let transfer_amount_and_gas: U512 = transfer_amount\n        .checked_add(expected_transfer_gas)\n        .expect(\"should math\");\n\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(\n            ALICE_PUBLIC_KEY.clone(),\n            BalanceChange::Down(transfer_amount),\n            BalanceChange::Down(transfer_amount_and_gas),\n        ))\n        .await;\n    //Charlie should have the transfer amount at his disposal\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(\n            CHARLIE_PUBLIC_KEY.clone(),\n            BalanceChange::Up(transfer_amount),\n            BalanceChange::Up(transfer_amount),\n        ))\n        .await;\n    // Check if the hold is released.\n    let hold_release_block_height = test_scenario.get_block_height() + 9; // Block time is 1s.\n    test_scenario\n        .run_until(RunUntilCondition::BlockHeight {\n            block_height: hold_release_block_height,\n            within: ONE_MIN,\n        })\n        .await\n        .unwrap();\n    test_scenario\n        .assert(PublicKeyTotalMeetsAvailable::new(ALICE_PUBLIC_KEY.clone()))\n        .await;\n}\n\n#[tokio::test]\nasync fn erroneous_native_transfer_nofee_norefund_fixed() {\n    let mut rng = TestRng::new();\n    let builder = TestScenarioBuilder::new()\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_balance_hold_interval(TimeDiff::from_seconds(5));\n    let mut test_scenario = builder.build(&mut rng).await;\n    let chain_name = test_scenario.chain_name();\n    test_scenario.setup().await.unwrap();\n\n    let transfer_amount = test_scenario.native_transfer_minimum_motes() + 100;\n\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_transfer(transfer_amount, None, CHARLIE_PUBLIC_KEY.clone(), None)\n            .unwrap()\n            .with_initiator_addr(PublicKey::from(ALICE_SECRET_KEY.as_ref()))\n            .with_pricing_mode(PricingMode::Fixed {\n                gas_price_tolerance: 1,\n                additional_computation_factor: 0,\n            })\n            .with_chain_name(chain_name.clone())\n            .build()\n            .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n\n    test_scenario.assert(TransactionSuccessful::new(hash)).await;\n\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_transfer(\n            transfer_amount + 100,\n            None,\n            BOB_PUBLIC_KEY.clone(),\n            None,\n        )\n        .unwrap()\n        .with_initiator_addr(CHARLIE_PUBLIC_KEY.clone())\n        .with_pricing_mode(PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        })\n        .with_chain_name(chain_name)\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n    test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed.\n    let expected_transfer_cost = test_scenario.mint_const_transfer_cost() as u64;\n    let expected_transfer_gas: U512 = expected_transfer_cost.into();\n    test_scenario\n        .assert(ExecResultCost::new(\n            hash,\n            expected_transfer_gas,\n            Gas::new(expected_transfer_gas),\n        ))\n        .await;\n    // Even though the transaction failed, a hold must still be in place for the transfer cost.\n    // The hold will show up in \"available\" being smaller than \"total\"\n\n    let transfer_amount_x = U512::from(transfer_amount);\n    let transfer_amount_y = transfer_amount_x\n        .checked_sub(U512::from(expected_transfer_cost))\n        .expect(\"should sub transfer from transfer amount\");\n\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(\n            CHARLIE_PUBLIC_KEY.clone(),\n            BalanceChange::Up(transfer_amount_x),\n            BalanceChange::Up(transfer_amount_y),\n        ))\n        .await;\n}\n\n#[tokio::test]\nasync fn should_cancel_refund_for_erroneous_wasm() {\n    // as a punitive measure, refunds are not issued for erroneous wasms even\n    // if refunds are turned on.\n\n    let mut rng = TestRng::new();\n    let refund_ratio = Ratio::new(1, 2);\n    let builder = TestScenarioBuilder::new()\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n    let mut test_scenario = builder.build(&mut rng).await;\n    let chain_name = test_scenario.chain_name();\n    test_scenario.setup().await.unwrap();\n    let mut txn = build_wasm_transction(\n        chain_name,\n        &BOB_SECRET_KEY,\n        PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        },\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n    test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed.\n    let expected_transaction_cost = 1_000_000_000_000_u64; // transaction gas limit for large wasms lane\n    test_scenario\n        .assert(ExecResultCost::new(\n            hash,\n            expected_transaction_cost.into(),\n            Gas::new(0),\n        ))\n        .await;\n\n    // transaction should have failed.\n    test_scenario.assert(TransactionFailure::new(hash)).await;\n\n    let x = BalanceChange::Down(U512::from(expected_transaction_cost));\n    // Bob gets no refund because the wasm errored\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(BOB_PUBLIC_KEY.clone(), x, x))\n        .await;\n\n    let y = BalanceChange::Up(U512::from(expected_transaction_cost));\n    // Alice should get the all the fee since it's set to pay to proposer\n    // AND Bob didn't get a refund\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(ALICE_PUBLIC_KEY.clone(), y, y))\n        .await;\n}\n\n#[tokio::test]\nasync fn should_not_refund_erroneous_wasm_burn_fixed() {\n    let mut rng = TestRng::new();\n    let refund_ratio = Ratio::new(1, 2);\n    let builder = TestScenarioBuilder::new()\n        .with_refund_handling(RefundHandling::Burn { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_minimum_era_height(5) // make the era longer so that the transaction doesn't land in the switch block.\n        .with_balance_hold_interval(TimeDiff::from_seconds(5));\n    let mut test_scenario = builder.build(&mut rng).await;\n    test_scenario.setup().await.unwrap();\n    let gas_limit = test_scenario\n        .get_gas_limit_for_lane(LARGE_WASM_LANE_ID) // The wasm should fall in this lane\n        .unwrap();\n    let txn = invalid_wasm_txn(\n        BOB_SECRET_KEY.clone(),\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n    );\n    let hash = txn.hash();\n\n    let exec_infos = test_scenario.run(vec![txn]).await.unwrap();\n\n    test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed.\n    test_scenario\n        .assert(ExecResultCost::new(hash, gas_limit.into(), Gas::new(0)))\n        .await;\n    // Supply shouldn't change (refund handling is burn, but the wasm was erroneous so we don't\n    // calulate refund)\n    test_scenario\n        .assert(TotalSupplyChange::new(0, exec_infos[0].block_height))\n        .await;\n    // Bobs transaction was invalid. He should get NO refund. But also -\n    // since no refund is calculated nothing will be burned (despite\n    // RefundHandling::Burn - we don't calculate refunds for erroneous wasms)\n    let gas_limit_x = BalanceChange::Down(U512::from(gas_limit));\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(\n            BOB_PUBLIC_KEY.clone(),\n            gas_limit_x,\n            gas_limit_x,\n        ))\n        .await;\n    let gas_limit_y = BalanceChange::Up(U512::from(gas_limit));\n    // Alice gets payed for executing the transaction since it's set to pay to proposer\n    test_scenario\n        .assert(PublicKeyBalanceChange::new(\n            ALICE_PUBLIC_KEY.clone(),\n            gas_limit_y,\n            gas_limit_y,\n        ))\n        .await;\n}\n\n#[tokio::test]\nasync fn native_add_bid_should_fail_when_minimum_delegation_rate_not_met() {\n    let mut rng = TestRng::new();\n    let mut test_scenario = TestScenarioBuilder::new()\n        .with_minimum_delegation_rate(20)\n        .build(&mut rng)\n        .await;\n    let chain_name = test_scenario.chain_name();\n    test_scenario.setup().await.unwrap();\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_add_bid(\n            ALICE_PUBLIC_KEY.clone(),\n            19,\n            100_000_000_000_u64,\n            None,\n            None,\n            None,\n        )\n        .unwrap()\n        .with_initiator_addr(PublicKey::from(ALICE_SECRET_KEY.as_ref()))\n        .with_pricing_mode(PricingMode::PaymentLimited {\n            payment_amount: 100_000_000_000_u64,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        })\n        .with_chain_name(chain_name.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n    test_scenario\n        .assert(TransactionFailure::expected_error_message(\n            hash,\n            \"ApiError::AuctionError(DelegationRateTooSmall) [64576]\",\n        ))\n        .await;\n}\n\n#[tokio::test]\nasync fn native_add_bid_should_fail_when_minimum_delegation_rate_not_met_in_reservation() {\n    let mut rng = TestRng::new();\n    let mut test_scenario = TestScenarioBuilder::new()\n        .with_minimum_delegation_rate(20)\n        .build(&mut rng)\n        .await;\n    let chain_name = test_scenario.chain_name();\n    test_scenario.setup().await.unwrap();\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_add_bid(\n            ALICE_PUBLIC_KEY.clone(),\n            20,\n            100_000_000_000_u64,\n            None,\n            None,\n            Some(1),\n        )\n        .unwrap()\n        .with_initiator_addr(PublicKey::from(ALICE_SECRET_KEY.as_ref()))\n        .with_pricing_mode(PricingMode::PaymentLimited {\n            payment_amount: 100_000_000_000_u64,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        })\n        .with_chain_name(chain_name.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n    test_scenario.assert(TransactionSuccessful::new(hash)).await;\n\n    // Try reserve a slot with to little delegation rate\n    let reservations = vec![Reservation::new(\n        ALICE_PUBLIC_KEY.clone(),\n        DelegatorKind::PublicKey(BOB_PUBLIC_KEY.clone()),\n        19,\n    )];\n    let mut txn: Transaction = Transaction::from(\n        TransactionV1Builder::new_reserve_slot(reservations)\n            .unwrap()\n            .with_initiator_addr(PublicKey::from(ALICE_SECRET_KEY.as_ref()))\n            .with_pricing_mode(PricingMode::PaymentLimited {\n                payment_amount: 100_000_000_000_u64,\n                gas_price_tolerance: 1,\n                standard_payment: true,\n            })\n            .with_chain_name(chain_name.clone())\n            .build()\n            .unwrap(),\n    );\n    txn.sign(&ALICE_SECRET_KEY);\n    let hash = txn.hash();\n    test_scenario.run(vec![txn]).await.unwrap();\n    test_scenario\n        .assert(TransactionFailure::expected_error_message(\n            hash,\n            \"Auction error: Delegation rate too small\",\n        ))\n        .await;\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests/transactions.rs",
    "content": "use super::{fixture::TestFixture, *};\nuse crate::{\n    testing::LARGE_WASM_LANE_ID,\n    types::{transaction::calculate_transaction_lane_for_transaction, MetaTransaction},\n};\nuse casper_storage::data_access_layer::{\n    AddressableEntityRequest, BalanceIdentifier, BalanceIdentifierPurseRequest,\n    BalanceIdentifierPurseResult, ProofHandling, QueryRequest, QueryResult,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::NamedKeyAddr,\n    runtime_args,\n    system::mint::{ARG_AMOUNT, ARG_TARGET},\n    AccessRights, AddressableEntity, Digest, EntityAddr, ExecutableDeployItem, ExecutionInfo,\n    TransactionRuntimeParams, URef, URefAddr, DEFAULT_TRANSFER_COST,\n};\nuse once_cell::sync::Lazy;\nuse std::collections::BTreeMap;\n\nuse crate::reactor::main_reactor::tests::{\n    configs_override::ConfigsOverride, fixture::standard_stakes, initial_stakes::InitialStakes,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    execution::ExecutionResultV1,\n};\n\npub(crate) static ALICE_SECRET_KEY: Lazy<Arc<SecretKey>> = Lazy::new(|| {\n    Arc::new(SecretKey::ed25519_from_bytes([0xAA; SecretKey::ED25519_LENGTH]).unwrap())\n});\npub(crate) static ALICE_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*ALICE_SECRET_KEY.clone()));\n\npub(crate) static BOB_SECRET_KEY: Lazy<Arc<SecretKey>> = Lazy::new(|| {\n    Arc::new(SecretKey::ed25519_from_bytes([0xBB; SecretKey::ED25519_LENGTH]).unwrap())\n});\npub(crate) static BOB_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*BOB_SECRET_KEY.clone()));\n\npub(crate) static CHARLIE_SECRET_KEY: Lazy<Arc<SecretKey>> = Lazy::new(|| {\n    Arc::new(SecretKey::ed25519_from_bytes([0xCC; SecretKey::ED25519_LENGTH]).unwrap())\n});\npub(crate) static CHARLIE_PUBLIC_KEY: Lazy<PublicKey> =\n    Lazy::new(|| PublicKey::from(&*CHARLIE_SECRET_KEY.clone()));\n\n// The amount of gas it takes to execute the generated do_nothing.wasm.\n// Passing this around as a constant is brittle and should be replaced\n// with a more sustainable solution in the future.\nconst DO_NOTHING_WASM_EXECUTION_GAS: u64 = 117720_u64;\npub(crate) const MIN_GAS_PRICE: u8 = 1;\nconst CHAIN_NAME: &str = \"single-transaction-test-net\";\n\nstruct SingleTransactionTestCase {\n    fixture: TestFixture,\n    alice_public_key: PublicKey,\n    bob_public_key: PublicKey,\n    charlie_public_key: PublicKey,\n}\n\n#[derive(Debug, PartialEq)]\npub(crate) struct BalanceAmount {\n    pub(crate) available: U512,\n    pub(crate) total: U512,\n}\n\nimpl BalanceAmount {\n    pub(crate) fn zero() -> Self {\n        Self {\n            available: U512::zero(),\n            total: U512::zero(),\n        }\n    }\n}\n\nimpl SingleTransactionTestCase {\n    fn default_test_config() -> ConfigsOverride {\n        ConfigsOverride::default()\n            .with_minimum_era_height(5) // make the era longer so that the transaction doesn't land in the switch block.\n            .with_balance_hold_interval(TimeDiff::from_seconds(5))\n            .with_chain_name(\"single-transaction-test-net\".to_string())\n    }\n\n    async fn new(\n        alice_secret_key: Arc<SecretKey>,\n        bob_secret_key: Arc<SecretKey>,\n        charlie_secret_key: Arc<SecretKey>,\n        network_config: Option<ConfigsOverride>,\n    ) -> Self {\n        let rng = TestRng::new();\n\n        let alice_public_key = PublicKey::from(&*alice_secret_key);\n        let bob_public_key = PublicKey::from(&*bob_secret_key);\n        let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n        let stakes = standard_stakes(\n            alice_public_key.clone(),\n            bob_public_key.clone(),\n            Some(charlie_public_key.clone()),\n        );\n\n        let fixture = TestFixture::new_with_keys(\n            rng,\n            vec![alice_secret_key.clone(), bob_secret_key.clone()],\n            stakes,\n            network_config,\n        )\n        .await;\n        Self {\n            fixture,\n            alice_public_key,\n            bob_public_key,\n            charlie_public_key,\n        }\n    }\n\n    async fn new_with_stakes(\n        alice_secret_key: Arc<SecretKey>,\n        bob_secret_key: Arc<SecretKey>,\n        charlie_secret_key: Arc<SecretKey>,\n        network_config: Option<ConfigsOverride>,\n        stakes: BTreeMap<PublicKey, (U512, U512)>,\n    ) -> Self {\n        let rng = TestRng::new();\n\n        let alice_public_key = PublicKey::from(&*alice_secret_key);\n        let bob_public_key = PublicKey::from(&*bob_secret_key);\n        let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n        let fixture = TestFixture::new_with_keys(\n            rng,\n            vec![alice_secret_key.clone(), bob_secret_key.clone()],\n            stakes,\n            network_config,\n        )\n        .await;\n        Self {\n            fixture,\n            alice_public_key,\n            bob_public_key,\n            charlie_public_key,\n        }\n    }\n\n    fn chainspec(&self) -> &Chainspec {\n        &self.fixture.chainspec\n    }\n\n    fn get_balances(\n        &mut self,\n        block_height: Option<u64>,\n    ) -> (BalanceAmount, BalanceAmount, Option<BalanceAmount>) {\n        let alice_total_balance =\n            *get_balance(&self.fixture, &self.alice_public_key, block_height, true)\n                .total_balance()\n                .expect(\"Expected Alice to have a balance.\");\n        let bob_total_balance =\n            *get_balance(&self.fixture, &self.bob_public_key, block_height, true)\n                .total_balance()\n                .expect(\"Expected Bob to have a balance.\");\n\n        let alice_available_balance =\n            *get_balance(&self.fixture, &self.alice_public_key, block_height, false)\n                .available_balance()\n                .expect(\"Expected Alice to have a balance.\");\n        let bob_available_balance =\n            *get_balance(&self.fixture, &self.bob_public_key, block_height, false)\n                .available_balance()\n                .expect(\"Expected Bob to have a balance.\");\n\n        let charlie_available_balance =\n            get_balance(&self.fixture, &self.charlie_public_key, block_height, false)\n                .available_balance()\n                .copied();\n\n        let charlie_total_balance =\n            get_balance(&self.fixture, &self.charlie_public_key, block_height, true)\n                .available_balance()\n                .copied();\n\n        let charlie_amount = charlie_available_balance.map(|avail_balance| BalanceAmount {\n            available: avail_balance,\n            total: charlie_total_balance.unwrap(),\n        });\n\n        (\n            BalanceAmount {\n                available: alice_available_balance,\n                total: alice_total_balance,\n            },\n            BalanceAmount {\n                available: bob_available_balance,\n                total: bob_total_balance,\n            },\n            charlie_amount,\n        )\n    }\n\n    async fn send_transaction(\n        &mut self,\n        txn: Transaction,\n    ) -> (TransactionHash, u64, ExecutionResult) {\n        let txn_hash = txn.hash();\n\n        self.fixture.inject_transaction(txn).await;\n        self.fixture\n            .run_until_executed_transaction(&txn_hash, Duration::from_secs(30))\n            .await;\n\n        let (_node_id, runner) = self.fixture.network.nodes().iter().next().unwrap();\n        let exec_info = runner\n            .main_reactor()\n            .storage()\n            .read_execution_info(txn_hash)\n            .expect(\"Expected transaction to be included in a block.\");\n\n        (\n            txn_hash,\n            exec_info.block_height,\n            exec_info\n                .execution_result\n                .expect(\"Exec result should have been stored.\"),\n        )\n    }\n\n    fn get_total_supply(&mut self, block_height: Option<u64>) -> U512 {\n        let (_node_id, runner) = self.fixture.network.nodes().iter().next().unwrap();\n        let protocol_version = self.fixture.chainspec.protocol_version();\n        let height = block_height.unwrap_or(\n            runner\n                .main_reactor()\n                .storage()\n                .highest_complete_block_height()\n                .expect(\"missing highest completed block\"),\n        );\n        let state_hash = *runner\n            .main_reactor()\n            .storage()\n            .read_block_header_by_height(height, true)\n            .expect(\"failure to read block header\")\n            .unwrap()\n            .state_root_hash();\n\n        let total_supply_req = TotalSupplyRequest::new(state_hash, protocol_version);\n        let result = runner\n            .main_reactor()\n            .contract_runtime()\n            .data_access_layer()\n            .total_supply(total_supply_req);\n\n        if let TotalSupplyResult::Success { total_supply } = result {\n            total_supply\n        } else {\n            panic!(\"Can't get total supply\")\n        }\n    }\n\n    fn get_accumulate_purse_balance(\n        &mut self,\n        block_height: Option<u64>,\n        get_total: bool,\n    ) -> BalanceResult {\n        let (_node_id, runner) = self.fixture.network.nodes().iter().next().unwrap();\n        let protocol_version = self.fixture.chainspec.protocol_version();\n        let block_height = block_height.unwrap_or(\n            runner\n                .main_reactor()\n                .storage()\n                .highest_complete_block_height()\n                .expect(\"missing highest completed block\"),\n        );\n        let block_header = runner\n            .main_reactor()\n            .storage()\n            .read_block_header_by_height(block_height, true)\n            .expect(\"failure to read block header\")\n            .unwrap();\n        let state_hash = *block_header.state_root_hash();\n        let balance_handling = if get_total {\n            BalanceHandling::Total\n        } else {\n            BalanceHandling::Available\n        };\n        runner\n            .main_reactor()\n            .contract_runtime()\n            .data_access_layer()\n            .balance(BalanceRequest::new(\n                state_hash,\n                protocol_version,\n                BalanceIdentifier::Accumulate,\n                balance_handling,\n                ProofHandling::NoProofs,\n            ))\n    }\n}\n\nasync fn transfer_to_account<A: Into<U512>>(\n    fixture: &mut TestFixture,\n    amount: A,\n    from: &SecretKey,\n    to: PublicKey,\n    pricing: PricingMode,\n    transfer_id: Option<u64>,\n) -> (TransactionHash, u64, ExecutionResult) {\n    let chain_name = fixture.chainspec.network_config.name.clone();\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_transfer(amount, None, to, transfer_id)\n            .unwrap()\n            .with_initiator_addr(PublicKey::from(from))\n            .with_pricing_mode(pricing)\n            .with_chain_name(chain_name)\n            .build()\n            .unwrap(),\n    );\n\n    txn.sign(from);\n    let txn_hash = txn.hash();\n\n    fixture.inject_transaction(txn).await;\n\n    info!(\"transfer_to_account starting run_until_executed_transaction\");\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    info!(\"transfer_to_account finished run_until_executed_transaction\");\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let exec_info = runner\n        .main_reactor()\n        .storage()\n        .read_execution_info(txn_hash)\n        .expect(\"Expected transaction to be included in a block.\");\n\n    (\n        txn_hash,\n        exec_info.block_height,\n        exec_info\n            .execution_result\n            .expect(\"Exec result should have been stored.\"),\n    )\n}\n\nasync fn send_add_bid<A: Into<U512>>(\n    fixture: &mut TestFixture,\n    amount: A,\n    signing_key: &SecretKey,\n    pricing: PricingMode,\n) -> (TransactionHash, u64, ExecutionResult) {\n    let chain_name = fixture.chainspec.network_config.name.clone();\n    let public_key = PublicKey::from(signing_key);\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_add_bid(public_key.clone(), 10, amount, None, None, None)\n            .unwrap()\n            .with_initiator_addr(public_key)\n            .with_pricing_mode(pricing)\n            .with_chain_name(chain_name)\n            .build()\n            .unwrap(),\n    );\n\n    txn.sign(signing_key);\n    let txn_hash = txn.hash();\n\n    fixture.inject_transaction(txn).await;\n\n    info!(\"transfer_to_account starting run_until_executed_transaction\");\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    info!(\"transfer_to_account finished run_until_executed_transaction\");\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let exec_info = runner\n        .main_reactor()\n        .storage()\n        .read_execution_info(txn_hash)\n        .expect(\"Expected transaction to be included in a block.\");\n\n    (\n        txn_hash,\n        exec_info.block_height,\n        exec_info\n            .execution_result\n            .expect(\"Exec result should have been stored.\"),\n    )\n}\n\nasync fn send_wasm_transaction(\n    fixture: &mut TestFixture,\n    from: &SecretKey,\n    pricing: PricingMode,\n) -> (TransactionHash, u64, ExecutionResult) {\n    let chain_name = fixture.chainspec.network_config.name.clone();\n\n    //These bytes are intentionally so large - this way they fall into \"WASM_LARGE\" category in the\n    // local chainspec Alternatively we could change the chainspec to have a different limits\n    // for the wasm categories, but that would require aligning all tests that use local\n    // chainspec\n    let module_bytes = Bytes::from(vec![1; 172_033]);\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(chain_name)\n        .with_pricing_mode(pricing)\n        .with_initiator_addr(PublicKey::from(from))\n        .build()\n        .unwrap(),\n    );\n\n    txn.sign(from);\n    let txn_hash = txn.hash();\n\n    fixture.inject_transaction(txn).await;\n    fixture\n        .run_until_executed_transaction(&txn_hash, TEN_SECS)\n        .await;\n\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let exec_info = runner\n        .main_reactor()\n        .storage()\n        .read_execution_info(txn_hash)\n        .expect(\"Expected transaction to be included in a block.\");\n\n    (\n        txn_hash,\n        exec_info.block_height,\n        exec_info\n            .execution_result\n            .expect(\"Exec result should have been stored.\"),\n    )\n}\n\nfn get_main_purse(fixture: &mut TestFixture, account_key: &PublicKey) -> Result<URefAddr, ()> {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let block_height = runner\n        .main_reactor()\n        .storage()\n        .highest_complete_block_height()\n        .expect(\"missing highest completed block\");\n    let block_header = runner\n        .main_reactor()\n        .storage()\n        .read_block_header_by_height(block_height, true)\n        .expect(\"failure to read block header\")\n        .unwrap();\n    let state_hash = *block_header.state_root_hash();\n    let protocol_version = fixture.chainspec.protocol_version();\n    let identifier = BalanceIdentifier::Account(account_key.to_account_hash());\n    let request = BalanceIdentifierPurseRequest::new(state_hash, protocol_version, identifier);\n    match runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .balance_purse(request)\n    {\n        BalanceIdentifierPurseResult::Success { purse_addr } => Ok(purse_addr),\n        BalanceIdentifierPurseResult::RootNotFound | BalanceIdentifierPurseResult::Failure(_) => {\n            Err(())\n        }\n    }\n}\n\npub(crate) fn get_balance(\n    fixture: &TestFixture,\n    account_key: &PublicKey,\n    block_height: Option<u64>,\n    get_total: bool,\n) -> BalanceResult {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let protocol_version = fixture.chainspec.protocol_version();\n    let block_height = block_height.unwrap_or(\n        runner\n            .main_reactor()\n            .storage()\n            .highest_complete_block_height()\n            .expect(\"missing highest completed block\"),\n    );\n    let block_header = runner\n        .main_reactor()\n        .storage()\n        .read_block_header_by_height(block_height, true)\n        .expect(\"failure to read block header\")\n        .expect(\"should have header\");\n    let state_hash = *block_header.state_root_hash();\n    let balance_handling = if get_total {\n        BalanceHandling::Total\n    } else {\n        BalanceHandling::Available\n    };\n    runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .balance(BalanceRequest::from_public_key(\n            state_hash,\n            protocol_version,\n            account_key.clone(),\n            balance_handling,\n            ProofHandling::NoProofs,\n        ))\n}\n\nfn get_bids(fixture: &mut TestFixture, block_height: Option<u64>) -> Option<Vec<BidKind>> {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let block_height = block_height.unwrap_or(\n        runner\n            .main_reactor()\n            .storage()\n            .highest_complete_block_height()\n            .expect(\"missing highest completed block\"),\n    );\n    let block_header = runner\n        .main_reactor()\n        .storage()\n        .read_block_header_by_height(block_height, true)\n        .expect(\"failure to read block header\")\n        .unwrap();\n    let state_hash = *block_header.state_root_hash();\n\n    runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .bids(BidsRequest::new(state_hash))\n        .into_option()\n}\n\nfn get_payment_purse_balance(\n    fixture: &mut TestFixture,\n    block_height: Option<u64>,\n) -> BalanceResult {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let protocol_version = fixture.chainspec.protocol_version();\n    let block_height = block_height.unwrap_or(\n        runner\n            .main_reactor()\n            .storage()\n            .highest_complete_block_height()\n            .expect(\"missing highest completed block\"),\n    );\n    let block_header = runner\n        .main_reactor()\n        .storage()\n        .read_block_header_by_height(block_height, true)\n        .expect(\"failure to read block header\")\n        .unwrap();\n    let state_hash = *block_header.state_root_hash();\n    runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .balance(BalanceRequest::new(\n            state_hash,\n            protocol_version,\n            BalanceIdentifier::Payment,\n            BalanceHandling::Available,\n            ProofHandling::NoProofs,\n        ))\n}\n\nfn get_entity_addr_from_account_hash(\n    fixture: &mut TestFixture,\n    state_root_hash: Digest,\n    account_hash: AccountHash,\n) -> EntityAddr {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let result = match runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .query(QueryRequest::new(\n            state_root_hash,\n            Key::Account(account_hash),\n            vec![],\n        )) {\n        QueryResult::Success { value, .. } => value,\n        err => panic!(\"Expected QueryResult::Success but got {:?}\", err),\n    };\n\n    let key = if fixture.chainspec.core_config.enable_addressable_entity {\n        result\n            .as_cl_value()\n            .expect(\"should have a CLValue\")\n            .to_t::<Key>()\n            .expect(\"should have a Key\")\n    } else {\n        result.as_account().expect(\"must have account\");\n        Key::Account(account_hash)\n    };\n\n    match key {\n        Key::Account(account_has) => EntityAddr::Account(account_has.value()),\n        Key::Hash(hash) => EntityAddr::SmartContract(hash),\n        Key::AddressableEntity(addr) => addr,\n        _ => panic!(\"unexpected key\"),\n    }\n}\n\nfn get_entity(\n    fixture: &mut TestFixture,\n    state_root_hash: Digest,\n    entity_addr: EntityAddr,\n) -> AddressableEntity {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let (key, is_contract) = if fixture.chainspec.core_config.enable_addressable_entity {\n        (Key::AddressableEntity(entity_addr), false)\n    } else {\n        match entity_addr {\n            EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => (Key::Hash(hash), true),\n            EntityAddr::Account(hash) => (Key::Account(AccountHash::new(hash)), false),\n        }\n    };\n\n    let result = match runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .query(QueryRequest::new(state_root_hash, key, vec![]))\n    {\n        QueryResult::Success { value, .. } => value,\n        err => panic!(\"Expected QueryResult::Success but got {:?}\", err),\n    };\n\n    if fixture.chainspec.core_config.enable_addressable_entity {\n        result\n            .into_addressable_entity()\n            .expect(\"should have an AddressableEntity\")\n    } else if is_contract {\n        AddressableEntity::from(result.as_contract().expect(\"must have contract\").clone())\n    } else {\n        AddressableEntity::from(result.as_account().expect(\"must have account\").clone())\n    }\n}\n\nfn get_entity_named_key(\n    fixture: &mut TestFixture,\n    state_root_hash: Digest,\n    entity_addr: EntityAddr,\n    named_key: &str,\n) -> Option<Key> {\n    if fixture.chainspec.core_config.enable_addressable_entity {\n        let key = Key::NamedKey(\n            NamedKeyAddr::new_from_string(entity_addr, named_key.to_owned())\n                .expect(\"should be valid NamedKeyAddr\"),\n        );\n\n        match query_global_state(fixture, state_root_hash, key) {\n            Some(val) => match &*val {\n                StoredValue::NamedKey(named_key) => {\n                    Some(named_key.get_key().expect(\"should have a Key\"))\n                }\n                value => panic!(\"Expected NamedKey but got {:?}\", value),\n            },\n            None => None,\n        }\n    } else {\n        match entity_addr {\n            EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => {\n                match query_global_state(fixture, state_root_hash, Key::Hash(hash)) {\n                    Some(val) => match &*val {\n                        StoredValue::Contract(contract) => {\n                            contract.named_keys().get(named_key).copied()\n                        }\n                        value => panic!(\"Expected Contract but got {:?}\", value),\n                    },\n                    None => None,\n                }\n            }\n            EntityAddr::Account(hash) => {\n                match query_global_state(\n                    fixture,\n                    state_root_hash,\n                    Key::Account(AccountHash::new(hash)),\n                ) {\n                    Some(val) => match &*val {\n                        StoredValue::Account(account) => {\n                            account.named_keys().get(named_key).copied()\n                        }\n                        value => panic!(\"Expected Account but got {:?}\", value),\n                    },\n                    None => None,\n                }\n            }\n        }\n    }\n}\n\nfn query_global_state(\n    fixture: &mut TestFixture,\n    state_root_hash: Digest,\n    key: Key,\n) -> Option<Box<StoredValue>> {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    match runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .query(QueryRequest::new(state_root_hash, key, vec![]))\n    {\n        QueryResult::Success { value, .. } => Some(value),\n        _err => None,\n    }\n}\n\nfn get_entity_by_account_hash(\n    fixture: &mut TestFixture,\n    state_root_hash: Digest,\n    account_hash: AccountHash,\n) -> AddressableEntity {\n    let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap();\n    let key = if fixture.chainspec.core_config.enable_addressable_entity {\n        Key::AddressableEntity(EntityAddr::Account(account_hash.value()))\n    } else {\n        Key::Account(account_hash)\n    };\n    runner\n        .main_reactor()\n        .contract_runtime()\n        .data_access_layer()\n        .addressable_entity(AddressableEntityRequest::new(state_root_hash, key))\n        .into_option()\n        .unwrap_or_else(|| {\n            panic!(\n                \"Expected to find an entity: root_hash {:?}, account hash {:?}\",\n                state_root_hash, account_hash\n            )\n        })\n}\n\npub(crate) fn assert_exec_result_cost(\n    exec_result: ExecutionResult,\n    expected_cost: U512,\n    expected_consumed_gas: Gas,\n    msg: &str,\n) {\n    match exec_result {\n        ExecutionResult::V2(exec_result_v2) => {\n            assert_eq!(exec_result_v2.cost, expected_cost, \"{} cost\", msg);\n            assert_eq!(\n                exec_result_v2.consumed, expected_consumed_gas,\n                \"{} consumed\",\n                msg\n            );\n        }\n        _ => {\n            panic!(\"Unexpected exec result version.\")\n        }\n    }\n}\n\n// Returns `true` is the execution result is a success.\npub fn exec_result_is_success(exec_result: &ExecutionResult) -> bool {\n    match exec_result {\n        ExecutionResult::V2(execution_result_v2) => execution_result_v2.error_message.is_none(),\n        ExecutionResult::V1(ExecutionResultV1::Success { .. }) => true,\n        ExecutionResult::V1(ExecutionResultV1::Failure { .. }) => false,\n    }\n}\n\n#[tokio::test]\nasync fn should_accept_transfer_without_id() {\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n\n    let config = ConfigsOverride::default().with_pricing_handling(PricingHandling::Fixed);\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n    let transfer_amount = fixture\n        .chainspec\n        .transaction_config\n        .native_transfer_minimum_motes\n        + 100;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let (_, _, result) = transfer_to_account(\n        &mut fixture,\n        transfer_amount,\n        &alice_secret_key,\n        PublicKey::from(&*charlie_secret_key),\n        PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        },\n        None,\n    )\n    .await;\n\n    assert!(exec_result_is_success(&result))\n}\n\n#[tokio::test]\nasync fn should_native_transfer_nofee_norefund_fixed() {\n    const TRANSFER_AMOUNT: u64 = 30_000_000_000;\n\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n\n    let config = ConfigsOverride::default()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_balance_hold_interval(TimeDiff::from_seconds(5));\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n    let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true)\n        .available_balance()\n        .expect(\"Expected Alice to have a balance.\");\n\n    let (_txn_hash, block_height, exec_result) = transfer_to_account(\n        &mut fixture,\n        TRANSFER_AMOUNT,\n        &alice_secret_key,\n        PublicKey::from(&*charlie_secret_key),\n        PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        },\n        Some(0xDEADBEEF),\n    )\n    .await;\n\n    let expected_transfer_gas = fixture\n        .chainspec\n        .system_costs_config\n        .mint_costs()\n        .transfer\n        .into();\n    let expected_transfer_cost = expected_transfer_gas; // since we set gas_price_tolerance to 1.\n\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost,\n        Gas::new(expected_transfer_gas),\n        \"transfer_cost_fixed_price_no_fee_no_refund\",\n    );\n\n    let alice_available_balance =\n        get_balance(&fixture, &alice_public_key, Some(block_height), false);\n    let alice_total_balance = get_balance(&fixture, &alice_public_key, Some(block_height), true);\n\n    // since FeeHandling is set to NoFee, we expect that there's a hold on Alice's balance for the\n    // cost of the transfer. The total balance of Alice now should be the initial balance - the\n    // amount transferred to Charlie.\n    let alice_expected_total_balance = alice_initial_balance - TRANSFER_AMOUNT;\n    // The available balance is the initial balance - the amount transferred to Charlie - the hold\n    // for the transfer cost.\n    let alice_expected_available_balance = alice_expected_total_balance - expected_transfer_cost;\n\n    assert_eq!(\n        alice_total_balance\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\")\n            .clone(),\n        alice_expected_total_balance\n    );\n    assert_eq!(\n        alice_available_balance\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\")\n            .clone(),\n        alice_expected_available_balance\n    );\n\n    let charlie_balance = get_balance(&fixture, &charlie_public_key, Some(block_height), false);\n    assert_eq!(\n        charlie_balance\n            .available_balance()\n            .expect(\"Expected Charlie to have a balance\")\n            .clone(),\n        TRANSFER_AMOUNT.into()\n    );\n\n    // Check if the hold is released.\n    let hold_release_block_height = block_height + 8; // Block time is 1s.\n    fixture\n        .run_until_block_height(hold_release_block_height, ONE_MIN)\n        .await;\n\n    let alice_available_balance = get_balance(\n        &fixture,\n        &alice_public_key,\n        Some(hold_release_block_height),\n        false,\n    );\n    let alice_total_balance = get_balance(\n        &fixture,\n        &alice_public_key,\n        Some(hold_release_block_height),\n        true,\n    );\n\n    assert_eq!(\n        alice_available_balance.available_balance(),\n        alice_total_balance.available_balance()\n    );\n}\n\n#[tokio::test]\nasync fn erroneous_native_transfer_nofee_norefund_fixed() {\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n\n    let config = ConfigsOverride::default()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_balance_hold_interval(TimeDiff::from_seconds(5));\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n    let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let transfer_amount = fixture\n        .chainspec\n        .transaction_config\n        .native_transfer_minimum_motes\n        + 100;\n\n    // Transfer some token to Charlie.\n    let (_txn_hash, _block, exec_result) = transfer_to_account(\n        &mut fixture,\n        transfer_amount,\n        &alice_secret_key,\n        PublicKey::from(&*charlie_secret_key),\n        PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        },\n        None,\n    )\n    .await;\n    assert!(exec_result_is_success(&exec_result));\n\n    // Attempt to transfer more than Charlie has to Bob.\n    let bob_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n    let (_txn_hash, block_height, exec_result) = transfer_to_account(\n        &mut fixture,\n        transfer_amount + 100,\n        &charlie_secret_key,\n        PublicKey::from(&*bob_secret_key),\n        PricingMode::Fixed {\n            gas_price_tolerance: 1,\n            additional_computation_factor: 0,\n        },\n        None,\n    )\n    .await;\n    assert!(!exec_result_is_success(&exec_result)); // transaction should have failed.\n\n    let expected_transfer_gas = fixture\n        .chainspec\n        .system_costs_config\n        .mint_costs()\n        .transfer\n        .into();\n    let expected_transfer_cost = expected_transfer_gas; // since we set gas_price_tolerance to 1.\n\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost,\n        Gas::new(expected_transfer_gas),\n        \"failed_transfer_cost_fixed_price_no_fee_no_refund\",\n    );\n\n    // Even though the transaction failed, a hold must still be in place for the transfer cost.\n    let charlie_available_balance =\n        get_balance(&fixture, &charlie_public_key, Some(block_height), false);\n    assert_eq!(\n        charlie_available_balance\n            .available_balance()\n            .expect(\"Expected Charlie to have a balance\")\n            .clone(),\n        U512::from(transfer_amount) - expected_transfer_cost\n    );\n}\n\n#[tokio::test]\nasync fn should_native_transfer_nofee_norefund_payment_limited() {\n    const TRANSFER_AMOUNT: u64 = 30_000_000_000;\n\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n    let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true)\n        .available_balance()\n        .expect(\"Expected Alice to have a balance.\");\n\n    const TRANSFER_PAYMENT: u64 = 100_000_000;\n\n    // This transaction should be included since the tolerance is above the min gas price.\n    let (_txn_hash, block_height, exec_result) = transfer_to_account(\n        &mut fixture,\n        TRANSFER_AMOUNT,\n        &alice_secret_key,\n        PublicKey::from(&*charlie_secret_key),\n        PricingMode::PaymentLimited {\n            payment_amount: TRANSFER_PAYMENT,\n            gas_price_tolerance: MIN_GAS_PRICE + 1,\n            standard_payment: true,\n        },\n        None,\n    )\n    .await;\n\n    let expected_transfer_cost = TRANSFER_PAYMENT * MIN_GAS_PRICE as u64;\n\n    assert!(exec_result_is_success(&exec_result)); // transaction should have succeeded.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost.into(),\n        Gas::new(TRANSFER_PAYMENT),\n        \"transfer_cost_payment_limited_price_no_fee_no_refund\",\n    );\n\n    let alice_available_balance =\n        get_balance(&fixture, &alice_public_key, Some(block_height), false);\n    let alice_total_balance = get_balance(&fixture, &alice_public_key, Some(block_height), true);\n\n    // since FeeHandling is set to NoFee, we expect that there's a hold on Alice's balance for the\n    // cost of the transfer. The total balance of Alice now should be the initial balance - the\n    // amount transferred to Charlie.\n    let alice_expected_total_balance = alice_initial_balance - TRANSFER_AMOUNT;\n    // The available balance is the initial balance - the amount transferred to Charlie - the hold\n    // for the transfer cost.\n    let alice_expected_available_balance = alice_expected_total_balance - expected_transfer_cost;\n\n    assert_eq!(\n        alice_total_balance\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\")\n            .clone(),\n        alice_expected_total_balance\n    );\n    assert_eq!(\n        alice_available_balance\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\")\n            .clone(),\n        alice_expected_available_balance\n    );\n\n    let charlie_balance = get_balance(&fixture, &charlie_public_key, Some(block_height), false);\n    assert_eq!(\n        charlie_balance\n            .available_balance()\n            .expect(\"Expected Charlie to have a balance\")\n            .clone(),\n        TRANSFER_AMOUNT.into()\n    );\n\n    // Check if the hold is released.\n    let hold_release_block_height = block_height + 8; // Block time is 1s.\n    fixture\n        .run_until_block_height(hold_release_block_height, ONE_MIN)\n        .await;\n\n    let alice_available_balance = get_balance(\n        &fixture,\n        &alice_public_key,\n        Some(hold_release_block_height),\n        false,\n    );\n    let alice_total_balance = get_balance(\n        &fixture,\n        &alice_public_key,\n        Some(hold_release_block_height),\n        true,\n    );\n\n    assert_eq!(\n        alice_available_balance.available_balance(),\n        alice_total_balance.available_balance()\n    );\n}\n\n#[tokio::test]\nasync fn should_native_auction_with_nofee_norefund_payment_limited() {\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true)\n        .available_balance()\n        .expect(\"Expected Alice to have a balance.\");\n\n    const BID_PAYMENT_AMOUNT: u64 = 2_500_000_000;\n\n    let bid_amount = fixture.chainspec.core_config.minimum_bid_amount + 1;\n    // This transaction should be included since the tolerance is above the min gas price.\n    let (_txn_hash, block_height, exec_result) = send_add_bid(\n        &mut fixture,\n        bid_amount,\n        &alice_secret_key,\n        PricingMode::PaymentLimited {\n            payment_amount: BID_PAYMENT_AMOUNT,\n            gas_price_tolerance: MIN_GAS_PRICE + 1,\n            standard_payment: true,\n        },\n    )\n    .await;\n\n    let expected_add_bid_consumed = fixture\n        .chainspec\n        .system_costs_config\n        .auction_costs()\n        .add_bid;\n    let expected_add_bid_cost = expected_add_bid_consumed * MIN_GAS_PRICE as u64;\n\n    assert!(exec_result_is_success(&exec_result)); // transaction should have succeeded.\n\n    let transfers = exec_result.transfers();\n    assert!(!transfers.is_empty(), \"transfers should not be empty\");\n    assert_eq!(transfers.len(), 1, \"transfers should have 1 entry\");\n    let transfer = transfers.first().expect(\"transfer entry should exist\");\n    let transfer_amount = transfer.amount();\n    assert_eq!(\n        transfer_amount,\n        U512::from(bid_amount),\n        \"transfer amount should match the bid amount\"\n    );\n\n    assert_exec_result_cost(\n        exec_result,\n        expected_add_bid_cost.into(),\n        expected_add_bid_consumed.into(),\n        \"add_bid_with_classic_pricing_no_fee_no_refund\",\n    );\n\n    let alice_available_balance =\n        get_balance(&fixture, &alice_public_key, Some(block_height), false);\n    let alice_total_balance = get_balance(&fixture, &alice_public_key, Some(block_height), true);\n\n    // since FeeHandling is set to NoFee, we expect that there's a hold on Alice's balance for the\n    // cost of the transfer. The total balance of Alice now should be the initial balance - the\n    // amount transferred to Charlie.\n    let alice_expected_total_balance = alice_initial_balance - bid_amount;\n    // The available balance is the initial balance - the amount transferred to Charlie - the hold\n    // for the transfer cost.\n    let alice_expected_available_balance = alice_expected_total_balance - expected_add_bid_cost;\n\n    assert_eq!(\n        alice_total_balance\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\")\n            .clone(),\n        alice_expected_total_balance\n    );\n    assert_eq!(\n        alice_available_balance\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\")\n            .clone(),\n        alice_expected_available_balance\n    );\n}\n\n#[tokio::test]\n#[should_panic = \"within 10 seconds\"]\nasync fn should_reject_threshold_below_min_gas_price() {\n    const TRANSFER_AMOUNT: u64 = 30_000_000_000;\n\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]);\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    // This transaction should NOT be included since the tolerance is below the min gas price.\n    let (_, _, _) = transfer_to_account(\n        &mut fixture,\n        TRANSFER_AMOUNT,\n        &alice_secret_key,\n        PublicKey::from(&*charlie_secret_key),\n        PricingMode::PaymentLimited {\n            payment_amount: 1000,\n            gas_price_tolerance: MIN_GAS_PRICE - 1,\n            standard_payment: true,\n        },\n        None,\n    )\n    .await;\n}\n\n#[tokio::test]\nasync fn should_not_overcharge_native_operations_fixed() {\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); // Node 0 is effectively guaranteed to be the proposer.\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(1, 2),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let bob_secret_key = Arc::clone(&fixture.node_contexts[1].secret_key);\n    let bob_public_key = PublicKey::from(&*bob_secret_key);\n    let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng));\n    let charlie_public_key = PublicKey::from(&*charlie_secret_key);\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let bob_initial_balance = *get_balance(&fixture, &bob_public_key, None, true)\n        .total_balance()\n        .expect(\"Expected Bob to have a balance.\");\n    let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true)\n        .total_balance()\n        .expect(\"Expected Alice to have a balance.\");\n\n    let transfer_amount = fixture\n        .chainspec\n        .transaction_config\n        .native_transfer_minimum_motes\n        + 100;\n\n    let (_txn_hash, block_height, exec_result) = transfer_to_account(\n        &mut fixture,\n        transfer_amount,\n        &bob_secret_key,\n        PublicKey::from(&*charlie_secret_key),\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n        None,\n    )\n    .await;\n\n    assert!(exec_result_is_success(&exec_result)); // transaction should have succeeded.\n\n    let expected_transfer_gas: u64 = fixture\n        .chainspec\n        .system_costs_config\n        .mint_costs()\n        .transfer\n        .into();\n    let expected_transfer_cost = expected_transfer_gas * MIN_GAS_PRICE as u64;\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost.into(),\n        expected_transfer_gas.into(),\n        \"cost should equal consumed\",\n    );\n\n    let bob_available_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), false)\n        .available_balance()\n        .expect(\"Expected Bob to have a balance\");\n    let bob_total_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), true)\n        .total_balance()\n        .expect(\"Expected Bob to have a balance\");\n\n    let alice_available_balance =\n        *get_balance(&fixture, &alice_public_key, Some(block_height), false)\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\");\n    let alice_total_balance = *get_balance(&fixture, &alice_public_key, Some(block_height), true)\n        .total_balance()\n        .expect(\"Expected Alice to have a balance\");\n\n    // Bob shouldn't get a refund since there is no refund for native transfers.\n    let bob_expected_total_balance = bob_initial_balance - transfer_amount - expected_transfer_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get the full fee since there is no refund for native transfers.\n    let alice_expected_total_balance = alice_initial_balance + expected_transfer_cost;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    let charlie_balance = *get_balance(&fixture, &charlie_public_key, Some(block_height), false)\n        .available_balance()\n        .expect(\"Expected Charlie to have a balance\");\n    assert_eq!(charlie_balance.clone(), transfer_amount.into());\n\n    assert_eq!(\n        bob_available_balance.clone(),\n        bob_expected_available_balance\n    );\n\n    assert_eq!(bob_total_balance.clone(), bob_expected_total_balance);\n\n    assert_eq!(\n        alice_available_balance.clone(),\n        alice_expected_available_balance\n    );\n\n    assert_eq!(alice_total_balance.clone(), alice_expected_total_balance);\n}\n\n#[tokio::test]\nasync fn should_cancel_refund_for_erroneous_wasm() {\n    // as a punitive measure, refunds are not issued for erroneous wasms even\n    // if refunds are turned on.\n\n    let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); // Node 0 is effectively guaranteed to be the proposer.\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut fixture = TestFixture::new(initial_stakes, Some(config)).await;\n\n    let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key);\n    let alice_public_key = PublicKey::from(&*alice_secret_key);\n    let bob_secret_key = Arc::clone(&fixture.node_contexts[1].secret_key);\n    let bob_public_key = PublicKey::from(&*bob_secret_key);\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let bob_initial_balance = *get_balance(&fixture, &bob_public_key, None, true)\n        .total_balance()\n        .expect(\"Expected Bob to have a balance.\");\n    let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true)\n        .total_balance()\n        .expect(\"Expected Alice to have a balance.\");\n\n    let (_txn_hash, block_height, exec_result) = send_wasm_transaction(\n        &mut fixture,\n        &bob_secret_key,\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n    )\n    .await;\n\n    assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid.\n\n    let expected_transaction_gas: u64 = fixture\n        .chainspec\n        .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID);\n    let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64;\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(0),\n        \"wasm_transaction_fees_are_refunded\",\n    );\n\n    let bob_available_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), false)\n        .available_balance()\n        .expect(\"Expected Bob to have a balance\");\n    let bob_total_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), true)\n        .total_balance()\n        .expect(\"Expected Bob to have a balance\");\n\n    let alice_available_balance =\n        *get_balance(&fixture, &alice_public_key, Some(block_height), false)\n            .available_balance()\n            .expect(\"Expected Alice to have a balance\");\n    let alice_total_balance = *get_balance(&fixture, &alice_public_key, Some(block_height), true)\n        .total_balance()\n        .expect(\"Expected Alice to have a balance\");\n\n    // Bob gets no refund because the wasm errored\n    let bob_expected_total_balance = bob_initial_balance - expected_transaction_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get the all the fee since it's set to pay to proposer\n    // AND Bob didn't get a refund\n    let alice_expected_total_balance = alice_initial_balance + expected_transaction_cost;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_available_balance.clone(),\n        bob_expected_available_balance\n    );\n\n    assert_eq!(bob_total_balance.clone(), bob_expected_total_balance);\n\n    assert_eq!(\n        alice_available_balance.clone(),\n        alice_expected_available_balance\n    );\n\n    assert_eq!(alice_total_balance.clone(), alice_expected_total_balance);\n}\n\n#[tokio::test]\nasync fn should_refund_ratio_of_unconsumed_gas_fixed() {\n    let refund_ratio = Ratio::new(1, 3);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n    let txn = valid_wasm_txn(\n        BOB_SECRET_KEY.clone(),\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n    );\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let gas_limit = txn\n        .gas_limit(test.chainspec(), lane_id)\n        .unwrap()\n        .value()\n        .as_u64();\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(exec_result_is_success(&exec_result));\n\n    let expected_transaction_cost = gas_limit * MIN_GAS_PRICE as u64;\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), /* Magic value, this is the amount of gas\n                                                  * consumed by do_nothing.wasm */\n        \"wasm_transaction_fees_are_refunded\",\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n\n    // Bob should get 1/3 of the cost for the unspent gas. Since this transaction consumed 0\n    // gas, the unspent gas is equal to the limit.\n    let refund_amount: u64 = (refund_ratio\n        * Ratio::from(\n            expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * MIN_GAS_PRICE as u64,\n        ))\n    .to_integer();\n\n    let bob_expected_total_balance =\n        bob_initial_balance.total.as_u64() - expected_transaction_cost + refund_amount;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get the non-refunded part of the fee since it's set to pay to proposer\n    let alice_expected_total_balance =\n        alice_initial_balance.total.as_u64() + expected_transaction_cost - refund_amount;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.as_u64(),\n        bob_expected_available_balance\n    );\n\n    assert_eq!(\n        bob_current_balance.total.as_u64(),\n        bob_expected_total_balance\n    );\n\n    assert_eq!(\n        alice_current_balance.available.as_u64(),\n        alice_expected_available_balance\n    );\n\n    assert_eq!(\n        alice_current_balance.total.as_u64(),\n        alice_expected_total_balance\n    );\n}\n\nasync fn should_not_refund_erroneous_wasm_burn(txn_pricing_mode: PricingMode) {\n    // if refund handling is set to burn, and an erroneous wasm is processed\n    // ALL of the spent token is treated as the fee, thus there is no refund, and thus\n    // nothing is burned.\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Burn { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    let expected_transaction_gas: u64 = gas_limit.unwrap_or(\n        test.chainspec()\n            .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID),\n    );\n    let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64;\n\n    assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(0),\n        \"wasm_transaction_refunds_are_burnt\",\n    );\n\n    // Bobs transaction was invalid. He should get NO refund.\n    // Since there is no refund - there will also be nothing burned.\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob doesn't get a refund. The refund is burnt.\n    let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get the non-refunded part of the fee since it's set to pay to proposer\n    let alice_expected_total_balance = alice_initial_balance.total + expected_transaction_cost;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\nasync fn should_burn_refunds(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, _gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 3);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Burn { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let expected_transaction_gas = txn\n        .gas_limit(test.chainspec(), lane_id)\n        .unwrap()\n        .value()\n        .as_u64();\n    let gas_cost = txn\n        .gas_cost(test.chainspec(), lane_id, min_gas_price)\n        .unwrap();\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(exec_result_is_success(&exec_result));\n    assert_exec_result_cost(\n        exec_result,\n        gas_cost.value(),\n        Gas::new(DO_NOTHING_WASM_EXECUTION_GAS),\n        \"wasm_transaction_refunds_are_burnt\",\n    );\n\n    let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64;\n\n    let refund_amount: U512 = (refund_ratio\n        * Ratio::from(\n            expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * min_gas_price as u64,\n        ))\n    .to_integer()\n    .into();\n\n    // Bobs transaction was valid. He should get a refund.\n    // 1/3 of the unspent gas should be burned\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply - refund_amount\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob doesn't get a refund. The refund is burnt.\n    let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get the non-refunded part of the fee since it's set to pay to proposer\n    let alice_expected_total_balance =\n        alice_initial_balance.total + expected_transaction_cost - refund_amount;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_burn_refunds_fixed() {\n    should_burn_refunds(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_burn_refunds_payment_limited() {\n    should_burn_refunds(PricingMode::PaymentLimited {\n        payment_amount: 2_500_000_001,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_not_refund_erroneous_wasm_burn_fixed() {\n    should_not_refund_erroneous_wasm_burn(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_not_refund_erroneous_wasm_burn_payment_limited() {\n    should_not_refund_erroneous_wasm_burn(PricingMode::PaymentLimited {\n        payment_amount: 2_500_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn should_burn_refund_nofee(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, _gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Burn { refund_ratio })\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let gas_limit = txn.gas_limit(test.chainspec(), lane_id).unwrap();\n    let gas_cost = txn\n        .gas_cost(test.chainspec(), lane_id, min_gas_price)\n        .unwrap();\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    let consumed = exec_result.consumed().as_u64();\n    let consumed_price = consumed * min_gas_price as u64;\n    let expected_transaction_cost = gas_cost.value().as_u64();\n    assert!(exec_result_is_success(&exec_result));\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), /* Magic value, this is the amount of gas\n                                                  * consumed by do_nothing.wasm */\n        \"only_refunds_are_burnt_no_fee\",\n    );\n\n    //TODO shouldn't this be (refund_ratio* Ratio::from((expected_transaction_cost -\n    // consumed_price))?\n    let refund_amount: U512 = (refund_ratio\n        * Ratio::from((gas_limit.value().as_u64() * min_gas_price as u64) - consumed_price))\n    .to_integer()\n    .into();\n\n    // We set it up so that the refunds are burnt so check this.\n    let total_supply = test.get_total_supply(Some(block_height));\n    assert_eq!(total_supply, initial_total_supply - refund_amount);\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob doesn't get a refund. The refund is burnt. A hold is put in place for the\n    // transaction cost.\n    let bob_balance_hold = U512::from(expected_transaction_cost) - refund_amount;\n    let bob_expected_total_balance = bob_initial_balance.total - refund_amount;\n    let bob_expected_available_balance = bob_current_balance.total - bob_balance_hold;\n\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_burn_refund_nofee_fixed() {\n    should_burn_refund_nofee(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_burn_refund_nofee_payment_limited() {\n    should_burn_refund_nofee(PricingMode::PaymentLimited {\n        payment_amount: 4_000_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn should_burn_fee_and_burn_refund(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Burn { refund_ratio })\n        .with_fee_handling(FeeHandling::Burn);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n    let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n\n    // Fixed transaction pricing.\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let expected_transaction_gas = gas_limit.unwrap_or(\n        txn.gas_limit(test.chainspec(), lane_id)\n            .unwrap()\n            .value()\n            .as_u64(),\n    );\n    let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(0),\n        \"fees_and_refunds_are_burnt_separately\",\n    );\n\n    // Both refunds and fees should be burnt (even though they are burnt separately). Refund + fee\n    // amounts to the txn cost so expect that the total supply is reduced by that amount.\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply - expected_transaction_cost\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // The refund and the fees are burnt. No holds should be in place.\n    let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost;\n    let bob_expected_available_balance = bob_current_balance.total;\n\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_burn_fee_and_burn_refund_fixed() {\n    should_burn_fee_and_burn_refund(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_burn_fee_and_burn_refund_payment_limited() {\n    should_burn_fee_and_burn_refund(PricingMode::PaymentLimited {\n        payment_amount: 2_500_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn should_burn_fee_erroneous_wasm(txn_pricing_mode: PricingMode) {\n    // if erroneous wasm is processed, all the unconsumed amount goes to the fee\n    // and is thus all of it is burned if FeeHandling == Burn\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::Burn);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n\n    // Fixed transaction pricing.\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let expected_transaction_gas = gas_limit.unwrap_or(\n        txn.gas_limit(test.chainspec(), lane_id)\n            .unwrap()\n            .value()\n            .as_u64(),\n    );\n\n    let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(0),\n        \"refunds_are_payed_and_fees_are_burnt\",\n    );\n\n    // This transaction was erroneous, there should be no refund\n    let refund_amount: U512 = U512::zero();\n\n    // Only fees are burnt, so the refund_amount should still be in the total supply.\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply - expected_transaction_cost + refund_amount\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob should get back the refund. The fees are burnt and no holds should be in place.\n    let bob_expected_total_balance =\n        bob_initial_balance.total - expected_transaction_cost + refund_amount;\n    let bob_expected_available_balance = bob_current_balance.total;\n\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_burn_fee_erroneous_wasm_fixed() {\n    should_burn_fee_erroneous_wasm(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_burn_fee_erroneous_wasm_payment_limited() {\n    should_burn_fee_erroneous_wasm(PricingMode::PaymentLimited {\n        payment_amount: 2_500_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn should_refund_unconsumed_and_gas_hold_fee(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, _gas_limit) = match_pricing_mode(&txn_pricing_mode);\n    let refund_ratio = Ratio::new(1, 3);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n    let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let gas_limit = txn\n        .gas_limit(test.chainspec(), lane_id)\n        .unwrap()\n        .value()\n        .as_u64();\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(exec_result_is_success(&exec_result));\n\n    let expected_transaction_cost = gas_limit * min_gas_price as u64;\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(DO_NOTHING_WASM_EXECUTION_GAS),\n        \"wasm_transaction_fees_are_refunded\",\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n\n    // Bob should get 1/3 of the cost for the unspent gas. Since this transaction consumed 0\n    // gas, the unspent gas is equal to the limit.\n    let refund_amount: u64 = (refund_ratio\n        * Ratio::from(\n            expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * min_gas_price as u64,\n        ))\n    .to_integer();\n\n    // Bob should get back the refund. The fees should be on hold, so Bob's total should be the\n    // same as initial.\n    let bob_expected_total_balance = bob_initial_balance.total;\n    let bob_expected_available_balance =\n        bob_current_balance.total - expected_transaction_cost + refund_amount;\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_refund_unconsumed_and_gas_hold_fee_fixed() {\n    should_refund_unconsumed_and_gas_hold_fee(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_refund_unconsumed_and_gas_hold_fee_payment_limited() {\n    should_refund_unconsumed_and_gas_hold_fee(PricingMode::PaymentLimited {\n        payment_amount: 2_500_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn should_gas_hold_fee_erroneous_wasm(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode);\n    let meta_transaction = MetaTransaction::from_transaction(\n        &txn,\n        test.chainspec().core_config.pricing_handling,\n        &test.chainspec().transaction_config,\n    )\n    .unwrap();\n    // Fixed transaction pricing.\n    let expected_consumed_gas = Gas::new(0); // expect that this transaction doesn't consume any gas since it has invalid wasm.\n    let expected_transaction_gas = gas_limit.unwrap_or(\n        meta_transaction\n            .gas_limit(test.chainspec())\n            .unwrap()\n            .value()\n            .as_u64(),\n    );\n    let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        expected_consumed_gas,\n        \"refunds_are_payed_and_fees_are_on_hold\",\n    );\n\n    // Nothing is burnt so total supply should be the same.\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob should get back the refund. The fees should be on hold, so Bob's total should be the\n    // same as initial.\n    let bob_expected_total_balance = bob_initial_balance.total;\n    // There is no refund for bob because we don't pay refunds for transactions that errored during\n    // execution\n    let bob_expected_available_balance = bob_current_balance.total - expected_transaction_cost;\n\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_gas_hold_fee_erroneous_wasm_fixed() {\n    should_gas_hold_fee_erroneous_wasm(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_gas_hold_fee_erroneous_wasm_payment_limited() {\n    should_gas_hold_fee_erroneous_wasm(PricingMode::PaymentLimited {\n        payment_amount: 2_500_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn should_burn_fee_refund_unconsumed_custom_payment() {\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::Burn);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    // This contract uses custom payment.\n    let contract_file = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"ee_601_regression.wasm\");\n    let module_bytes = Bytes::from(std::fs::read(contract_file).expect(\"cannot read module bytes\"));\n\n    let expected_transaction_gas = 2_500_000_000u64;\n    let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64;\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_pricing_mode(PricingMode::PaymentLimited {\n            payment_amount: expected_transaction_gas,\n            gas_price_tolerance: MIN_GAS_PRICE,\n            standard_payment: false,\n        })\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    match &exec_result {\n        ExecutionResult::V2(exec_result_v2) => {\n            assert_eq!(exec_result_v2.cost, expected_transaction_cost.into());\n        }\n        _ => {\n            panic!(\"Unexpected exec result version.\")\n        }\n    }\n\n    let refund_amount = exec_result.refund().expect(\"should have refund\");\n\n    // Expect that the fees are burnt.\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply - expected_transaction_cost + refund_amount\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob should get a refund. Since the contract doesn't set a custom purse for the refund, it\n    // should get the refund in the main purse.\n    let bob_expected_total_balance =\n        bob_initial_balance.total - expected_transaction_cost + refund_amount;\n    let bob_expected_available_balance = bob_expected_total_balance; // No holds expected.\n\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn should_allow_norefund_nofee_custom_payment() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    // This contract uses custom payment.\n    let contract_file = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"ee_601_regression.wasm\");\n    let module_bytes = Bytes::from(std::fs::read(contract_file).expect(\"cannot read module bytes\"));\n\n    let expected_transaction_gas = 1_000_000_000_000u64;\n    let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64;\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_pricing_mode(PricingMode::PaymentLimited {\n            payment_amount: expected_transaction_gas,\n            gas_price_tolerance: MIN_GAS_PRICE,\n            standard_payment: false,\n        })\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    match exec_result {\n        ExecutionResult::V2(exec_result_v2) => {\n            assert_eq!(exec_result_v2.cost, expected_transaction_cost.into());\n        }\n        _ => {\n            panic!(\"Unexpected exec result version.\")\n        }\n    }\n\n    let payment_purse_balance = get_payment_purse_balance(&mut test.fixture, Some(block_height));\n    assert_eq!(\n        *payment_purse_balance\n            .total_balance()\n            .expect(\"should have total balance\"),\n        U512::zero(),\n        \"payment purse should have a 0 balance\"\n    );\n\n    // we're not burning anything, so total supply should be the same\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply,\n        \"total supply should be the same before and after\"\n    );\n\n    // updated balances\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n\n    // the proposer's balance should be the same because we are in no fee mode\n    assert_eq!(\n        alice_initial_balance, alice_current_balance,\n        \"the proposers balance should be unchanged as we are in no fee mode\"\n    );\n\n    // the initiator should have a hold equal to the cost\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_initial_balance.total,\n        \"bob's total balance should be unchanged as we are in no fee mode\"\n    );\n\n    assert_ne!(\n        bob_current_balance.available.clone(),\n        bob_initial_balance.total,\n        \"bob's available balance and total balance should not be the same\"\n    );\n\n    let bob_expected_available_balance = bob_initial_balance.total - expected_transaction_cost;\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance,\n        \"bob's available balance should reflect a hold for the cost\"\n    );\n}\n\nasync fn transfer_fee_is_burnt_no_refund(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::Burn);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_amount = test\n        .chainspec()\n        .transaction_config\n        .native_transfer_minimum_motes\n        + 100;\n\n    let txn = transfer_txn(\n        ALICE_SECRET_KEY.clone(),\n        &CHARLIE_PUBLIC_KEY,\n        txn_pricing_mode,\n        transfer_amount,\n    );\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, _, charlie_initial_balance) = test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n    let charlie_initial_balance = charlie_initial_balance\n        .expect(\"charlie should have balance\")\n        .total;\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    let expected_transfer_gas: u64 = gas_limit.unwrap_or(\n        test.chainspec()\n            .system_costs_config\n            .mint_costs()\n            .transfer\n            .into(),\n    );\n    let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64;\n\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n    assert_eq!(exec_result.transfers().len(), 1, \"{:?}\", exec_result);\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost.into(),\n        expected_transfer_gas.into(),\n        \"transfer_fee_is_burnt_no_refund\",\n    );\n\n    // The fees should have been burnt so expect the total supply to have been\n    // reduced by the fee that was burnt.\n    let total_supply_after_txn = test.get_total_supply(Some(block_height));\n    assert_ne!(\n        total_supply_after_txn, initial_total_supply,\n        \"total supply should be lowered\"\n    );\n    let diff = initial_total_supply - total_supply_after_txn;\n    assert_eq!(\n        diff,\n        U512::from(expected_transfer_cost),\n        \"total supply should be lowered by expected transfer cost\"\n    );\n\n    // Get the current balances after the transaction and check them.\n    let (alice_current_balance, _, charlie_current_balance) = test.get_balances(Some(block_height));\n    let alice_expected_total_balance =\n        alice_initial_balance.total - transfer_amount - expected_transfer_cost;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    let charlie_current_balance = charlie_current_balance\n        .expect(\"charlie should have balance\")\n        .total;\n    let charlies_expected_balance = charlie_initial_balance + transfer_amount;\n\n    assert_eq!(\n        charlie_current_balance, charlies_expected_balance,\n        \"expected balance does not match\"\n    );\n    assert_eq!(\n        alice_current_balance.available, alice_expected_available_balance,\n        \"alice available balance should match\"\n    );\n    assert_eq!(alice_current_balance.total, alice_expected_total_balance);\n}\n\n#[tokio::test]\nasync fn transfer_fee_is_burnt_no_refund_fixed_pricing() {\n    transfer_fee_is_burnt_no_refund(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn transfer_fee_is_burnt_no_refund_payment_limited_pricing() {\n    transfer_fee_is_burnt_no_refund(PricingMode::PaymentLimited {\n        payment_amount: 100_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\n// PTP == fee pay to proposer\nasync fn fee_ptp_no_refund(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_amount = test\n        .chainspec()\n        .transaction_config\n        .native_transfer_minimum_motes\n        + 100;\n\n    let txn = transfer_txn(\n        BOB_SECRET_KEY.clone(),\n        &CHARLIE_PUBLIC_KEY,\n        txn_pricing_mode,\n        transfer_amount,\n    );\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let charlie_initial_balance = charlie_initial_balance\n        .expect(\"charlie should have balance\")\n        .total;\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    let expected_transfer_gas: u64 = gas_limit.unwrap_or(\n        test.chainspec()\n            .system_costs_config\n            .mint_costs()\n            .transfer\n            .into(),\n    );\n    let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64;\n\n    assert!(exec_result_is_success(&exec_result));\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost.into(),\n        expected_transfer_gas.into(),\n        \"fee_is_payed_to_proposer_no_refund\",\n    );\n\n    // Nothing should be burnt.\n    assert_eq!(\n        initial_total_supply,\n        test.get_total_supply(Some(block_height)),\n        \"total supply should be unchanged\"\n    );\n\n    let (alice_current_balance, bob_current_balance, charlie_current_balance) =\n        test.get_balances(Some(block_height));\n\n    let charlie_current_balance = charlie_current_balance\n        .expect(\"charlie should still have balance\")\n        .total;\n    let charlie_expected_balance = charlie_initial_balance.saturating_add(transfer_amount.into());\n    assert_eq!(\n        charlie_current_balance, charlie_expected_balance,\n        \"charlie's actual balance not expected total\"\n    );\n    // since Alice was the proposer of the block, it should get back the transfer fee since\n    // FeeHandling is set to PayToProposer.\n    let bob_expected_total_balance =\n        bob_initial_balance.total - transfer_amount - expected_transfer_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    let alice_expected_total_balance = alice_initial_balance.total + expected_transfer_cost;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available,\n        bob_expected_available_balance\n    );\n    assert_eq!(bob_current_balance.total, bob_expected_total_balance);\n    assert_eq!(\n        alice_current_balance.available,\n        alice_expected_available_balance\n    );\n    assert_eq!(alice_current_balance.total, alice_expected_total_balance);\n}\n\n#[tokio::test]\nasync fn fee_ptp_norefund_fixed_pricing() {\n    fee_ptp_no_refund(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn fee_ptp_norefund_payment_limited() {\n    fee_ptp_no_refund(PricingMode::PaymentLimited {\n        payment_amount: 100_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn erroneous_wasm_transaction_no_refund(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode.clone());\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    let expected_transaction_gas: u64 = gas_limit.unwrap_or(\n        test.chainspec()\n            .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID),\n    );\n    let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64;\n\n    assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(0),\n        format!(\n            \"erroneous_wasm_transaction_no_refund {:?}\",\n            txn_pricing_mode\n        )\n        .as_str(),\n    );\n\n    // Nothing is burnt so total supply should be the same.\n    assert_eq!(\n        initial_total_supply,\n        test.get_total_supply(Some(block_height))\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob gets no refund, we don't pay refunds on erroneous wasm\n    let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get all the fee since it's set to pay to proposer and Bob got no refund\n    let alice_expected_total_balance = alice_initial_balance.total + expected_transaction_cost;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\nasync fn wasm_transaction_ptp_fee_and_refund(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let refund_ratio = Ratio::new(1, 3);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::Refund { refund_ratio })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode.clone());\n    let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap();\n    let expected_transaction_gas = gas_limit.unwrap_or(\n        txn.gas_limit(test.chainspec(), lane_id)\n            .unwrap()\n            .value()\n            .as_u64(),\n    );\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n    let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(exec_result_is_success(&exec_result));\n\n    let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64;\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        Gas::new(DO_NOTHING_WASM_EXECUTION_GAS),\n        format!(\"wasm_transaction_ptp_fee_and_refund {:?}\", txn_pricing_mode).as_str(),\n    );\n\n    // Nothing is burnt so total supply should be the same.\n    assert_eq!(\n        initial_total_supply,\n        test.get_total_supply(Some(block_height))\n    );\n\n    // Bob should get back half of the cost for the unspent gas. Since this transaction consumed 0\n    // gas, the unspent gas is equal to the limit.\n    let refund_amount: U512 = (refund_ratio\n        * Ratio::from(\n            expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * min_gas_price as u64,\n        ))\n    .to_integer()\n    .into();\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    let bob_expected_total_balance =\n        bob_initial_balance.total - expected_transaction_cost + refund_amount;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    // Alice should get the non-refunded part of the fee since it's set to pay to proposer\n    let alice_expected_total_balance =\n        alice_initial_balance.total + expected_transaction_cost - refund_amount;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n}\n\n#[tokio::test]\nasync fn erroneous_wasm_transaction_norefund_fixed_pricing() {\n    erroneous_wasm_transaction_no_refund(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn wasm_transaction_refund_fixed_pricing() {\n    wasm_transaction_ptp_fee_and_refund(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn wasm_transaction_payment_limited_refund() {\n    erroneous_wasm_transaction_no_refund(PricingMode::PaymentLimited {\n        payment_amount: 2500000000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn fee_is_accumulated_and_distributed_no_refund(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let admins: BTreeSet<PublicKey> = vec![ALICE_PUBLIC_KEY.clone(), BOB_PUBLIC_KEY.clone()]\n        .into_iter()\n        .collect();\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::Accumulate)\n        .with_administrators(admins);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_amount = test\n        .chainspec()\n        .transaction_config\n        .native_transfer_minimum_motes\n        + 100;\n\n    let txn = transfer_txn(\n        BOB_SECRET_KEY.clone(),\n        &CHARLIE_PUBLIC_KEY,\n        txn_pricing_mode,\n        transfer_amount,\n    );\n\n    let expected_transfer_gas: u64 = gas_limit.unwrap_or(\n        test.chainspec()\n            .system_costs_config\n            .mint_costs()\n            .transfer\n            .into(),\n    );\n    let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n    let (alice_initial_balance, bob_initial_balance, charlie_initial_balance) =\n        test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n    let acc_purse_initial_balance = *test\n        .get_accumulate_purse_balance(None, false)\n        .available_balance()\n        .expect(\"Accumulate purse should have a balance.\");\n\n    let charlie_initial_balance = charlie_initial_balance\n        .expect(\"Expected Charlie to have a balance\")\n        .total;\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(exec_result_is_success(&exec_result));\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost.into(),\n        expected_transfer_gas.into(),\n        \"fee_is_accumulated_and_distributed_no_refund\",\n    );\n\n    assert_eq!(\n        initial_total_supply,\n        test.get_total_supply(Some(block_height)),\n        \"total supply should remain unchanged\"\n    );\n\n    let (alice_current_balance, bob_current_balance, charlie_current_balance) =\n        test.get_balances(Some(block_height));\n\n    let charlie_current_balance = charlie_current_balance\n        .expect(\"Expected Charlie to have a balance\")\n        .total;\n    let charlie_expected_balance =\n        charlie_initial_balance.saturating_add(U512::from(transfer_amount));\n    assert_eq!(\n        charlie_current_balance, charlie_expected_balance,\n        \"charlie balance is not expected amount\"\n    );\n\n    let bob_expected_total_balance =\n        bob_initial_balance.total - transfer_amount - expected_transfer_cost;\n    let bob_expected_available_balance = bob_expected_total_balance;\n\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available,\n        bob_expected_available_balance\n    );\n    assert_eq!(bob_current_balance.total, bob_expected_total_balance);\n    assert_eq!(\n        alice_current_balance.available,\n        alice_expected_available_balance\n    );\n    assert_eq!(alice_current_balance.total, alice_expected_total_balance);\n\n    let acc_purse_balance = *test\n        .get_accumulate_purse_balance(Some(block_height), false)\n        .available_balance()\n        .expect(\"Accumulate purse should have a balance.\");\n\n    // The fees should be sent to the accumulation purse.\n    assert_eq!(\n        acc_purse_balance - acc_purse_initial_balance,\n        expected_transfer_cost.into()\n    );\n\n    test.fixture\n        .run_until_block_height(block_height + 10, ONE_MIN)\n        .await;\n\n    let accumulate_purse_balance = *test\n        .get_accumulate_purse_balance(Some(block_height + 10), false)\n        .available_balance()\n        .expect(\"Accumulate purse should have a balance.\");\n\n    assert_eq!(accumulate_purse_balance, U512::from(0));\n}\n\n#[tokio::test]\nasync fn fee_is_accumulated_and_distributed_no_refund_fixed_pricing() {\n    fee_is_accumulated_and_distributed_no_refund(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn fee_is_accumulated_and_distributed_no_refund_payment_limited_pricing() {\n    fee_is_accumulated_and_distributed_no_refund(PricingMode::PaymentLimited {\n        payment_amount: 100_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nfn transfer_txn<A: Into<U512>>(\n    from: Arc<SecretKey>,\n    to: &PublicKey,\n    pricing_mode: PricingMode,\n    amount: A,\n) -> Transaction {\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_transfer(amount, None, to.clone(), None)\n            .unwrap()\n            .with_initiator_addr(PublicKey::from(&*from))\n            .with_pricing_mode(pricing_mode)\n            .with_chain_name(CHAIN_NAME)\n            .build()\n            .unwrap(),\n    );\n    txn.sign(&from);\n    txn\n}\n\npub(crate) fn invalid_wasm_txn(\n    initiator: Arc<SecretKey>,\n    pricing_mode: PricingMode,\n) -> Transaction {\n    //These bytes are intentionally so large - this way they fall into \"WASM_LARGE\" category in the\n    // local chainspec Alternatively we could change the chainspec to have a different limits\n    // for the wasm categories, but that would require aligning all tests that use local\n    // chainspec\n    let module_bytes = Bytes::from(vec![1; 172_033]);\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_pricing_mode(pricing_mode)\n        .with_initiator_addr(PublicKey::from(&*initiator))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&initiator);\n    txn\n}\n\nfn valid_wasm_txn(initiator: Arc<SecretKey>, pricing_mode: PricingMode) -> Transaction {\n    let contract_file = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"do_nothing.wasm\");\n    let module_bytes = Bytes::from(std::fs::read(contract_file).expect(\"cannot read module bytes\"));\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_pricing_mode(pricing_mode)\n        .with_initiator_addr(PublicKey::from(&*initiator))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&initiator);\n    txn\n}\n\nfn match_pricing_mode(txn_pricing_mode: &PricingMode) -> (PricingHandling, u8, Option<u64>) {\n    match txn_pricing_mode {\n        PricingMode::PaymentLimited {\n            gas_price_tolerance,\n            payment_amount,\n            ..\n        } => (\n            PricingHandling::PaymentLimited,\n            *gas_price_tolerance,\n            Some(*payment_amount),\n        ),\n        PricingMode::Fixed {\n            gas_price_tolerance,\n            ..\n        } => (PricingHandling::Fixed, *gas_price_tolerance, None),\n        PricingMode::Prepaid { .. } => unimplemented!(),\n    }\n}\n\n#[tokio::test]\nasync fn holds_should_be_added_and_cleared_fixed_pricing() {\n    holds_should_be_added_and_cleared(PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    })\n    .await;\n}\n\n#[tokio::test]\nasync fn holds_should_be_added_and_cleared_payment_limited_pricing() {\n    holds_should_be_added_and_cleared(PricingMode::PaymentLimited {\n        payment_amount: 100_000_000,\n        gas_price_tolerance: MIN_GAS_PRICE,\n        standard_payment: true,\n    })\n    .await;\n}\n\nasync fn holds_should_be_added_and_cleared(txn_pricing_mode: PricingMode) {\n    let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode);\n\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(price_handling)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_amount = U512::from(\n        test.chainspec()\n            .transaction_config\n            .native_transfer_minimum_motes,\n    );\n\n    // transfer from bob to charlie\n    let txn = transfer_txn(\n        BOB_SECRET_KEY.clone(),\n        &CHARLIE_PUBLIC_KEY,\n        txn_pricing_mode,\n        transfer_amount,\n    );\n\n    let expected_transfer_gas: u64 = gas_limit.unwrap_or(\n        test.chainspec()\n            .system_costs_config\n            .mint_costs()\n            .transfer\n            .into(),\n    );\n    let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (_, bob_initial_balance, charlie_initial_balance) = test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n    let charlie_initial_balance = charlie_initial_balance.expect(\"should have balance\").total;\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result); // transaction should have succeeded.\n    assert_exec_result_cost(\n        exec_result,\n        expected_transfer_cost.into(),\n        expected_transfer_gas.into(),\n        \"holds_should_be_added_and_cleared\",\n    );\n\n    assert_eq!(\n        initial_total_supply,\n        test.get_total_supply(Some(block_height)),\n        \"total supply should remain unchanged\"\n    );\n\n    // Get the current balances after the transaction and check them.\n    let (_, bob_current_balance, charlie_current_balance) = test.get_balances(Some(block_height));\n\n    let charlie_current_balance = charlie_current_balance.expect(\"should have balance\").total;\n    let charlie_expected_balance = charlie_initial_balance.saturating_add(transfer_amount);\n\n    assert_eq!(\n        charlie_current_balance, charlie_expected_balance,\n        \"charlie's balance should equal transfer amount\"\n    );\n    assert_ne!(\n        bob_current_balance.available, bob_current_balance.total,\n        \"total and available should NOT be equal at this point\"\n    );\n    assert_eq!(\n        bob_initial_balance.total,\n        bob_current_balance.total + transfer_amount,\n        \"total balance should be original total balance - transferred amount\"\n    );\n    assert_eq!(\n        bob_initial_balance.total,\n        bob_current_balance.available + expected_transfer_cost + transfer_amount,\n        \"diff from initial balance should equal available + cost + transfer_amount\"\n    );\n\n    test.fixture\n        .run_until_block_height(block_height + 5, ONE_MIN)\n        .await;\n    let (_, bob_balance, _) = test.get_balances(Some(block_height + 5));\n    assert_eq!(\n        bob_balance.available, bob_balance.total,\n        \"total and available should be equal at this point\"\n    );\n}\n\n#[tokio::test]\nasync fn fee_holds_are_amortized() {\n    let refund_ratio = Ratio::new(1, 2);\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::Burn { refund_ratio })\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Amortized)\n        .with_balance_hold_interval(TimeDiff::from_seconds(10));\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n    let txn = invalid_wasm_txn(\n        BOB_SECRET_KEY.clone(),\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n    );\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n    let initial_total_supply = test.get_total_supply(None);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    // Fixed transaction pricing.\n    let expected_transaction_gas: u64 = test\n        .chainspec()\n        .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID);\n\n    let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64;\n    // transaction should not succeed because the wasm bytes are invalid.\n    // this transaction has invalid wasm, so the baseline will be used as consumed\n    assert!(!exec_result_is_success(&exec_result));\n\n    let expected_consumed = Gas::new(0);\n    assert_exec_result_cost(\n        exec_result,\n        expected_transaction_cost.into(),\n        expected_consumed,\n        \"fee_holds_are_amortized\",\n    );\n\n    // This transaction consumed 0 gas, the unspent gas is equal to the limit, so we apply the\n    // refund ratio to the full transaction cost.\n    // error transactions no longer refund\n    let refund_amount = U512::zero();\n\n    // We set it up so that the refunds are burnt so check this.\n    assert_eq!(\n        test.get_total_supply(Some(block_height)),\n        initial_total_supply - refund_amount\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // Bob doesn't get a refund. The refund is burnt. A hold is put in place for the\n    // transaction cost.\n    let bob_balance_hold = U512::from(expected_transaction_cost) - refund_amount;\n    let bob_expected_total_balance = bob_initial_balance.total - refund_amount;\n    let bob_expected_available_balance = bob_current_balance.total - bob_balance_hold;\n\n    // Alice shouldn't get anything since we are operating with no fees\n    let alice_expected_total_balance = alice_initial_balance.total;\n    let alice_expected_available_balance = alice_expected_total_balance;\n\n    assert_eq!(\n        bob_current_balance.available.clone(),\n        bob_expected_available_balance\n    );\n    assert_eq!(\n        bob_current_balance.total.clone(),\n        bob_expected_total_balance\n    );\n    assert_eq!(\n        alice_current_balance.available.clone(),\n        alice_expected_available_balance\n    );\n    assert_eq!(\n        alice_current_balance.total.clone(),\n        alice_expected_total_balance\n    );\n\n    let bob_prev_available_balance = bob_current_balance.available;\n    test.fixture\n        .run_until_block_height(block_height + 1, ONE_MIN)\n        .await;\n    let (_, bob_balance, _) = test.get_balances(Some(block_height + 1));\n    assert!(\n        bob_prev_available_balance < bob_balance.available,\n        \"available should have increased since some part of the hold should have been amortized\"\n    );\n\n    // Check to see if more holds have amortized.\n    let bob_prev_available_balance = bob_current_balance.available;\n    test.fixture\n        .run_until_block_height(block_height + 3, ONE_MIN)\n        .await;\n    let (_, bob_balance, _) = test.get_balances(Some(block_height + 3));\n    assert!(\n        bob_prev_available_balance < bob_balance.available,\n        \"available should have increased since some part of the hold should have been amortized\"\n    );\n\n    // After 10s (10 blocks in this case) the holds should have been completely amortized\n    test.fixture\n        .run_until_block_height(block_height + 10, ONE_MIN)\n        .await;\n    let (_, bob_balance, _) = test.get_balances(Some(block_height + 10));\n    assert_eq!(\n        bob_balance.total, bob_balance.available,\n        \"available should have increased since some part of the hold should have been amortized\"\n    );\n}\n\n#[tokio::test]\nasync fn sufficient_balance_is_available_after_amortization() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Amortized)\n        .with_balance_hold_interval(TimeDiff::from_seconds(10));\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_cost: U512 =\n        U512::from(test.chainspec().system_costs_config.mint_costs().transfer) * MIN_GAS_PRICE;\n    let min_transfer_amount = U512::from(\n        test.chainspec()\n            .transaction_config\n            .native_transfer_minimum_motes,\n    );\n    let half_transfer_cost =\n        (Ratio::new(U512::from(1), U512::from(2)) * transfer_cost).to_integer();\n\n    // Fund Charlie with some token.\n    let transfer_amount = min_transfer_amount * 2 + transfer_cost + half_transfer_cost;\n    let txn = transfer_txn(\n        BOB_SECRET_KEY.clone(),\n        &CHARLIE_PUBLIC_KEY,\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n        transfer_amount,\n    );\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n    let charlie_initial_balance = test.get_balances(None).2.expect(\"should have balance\");\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n    let charlie_expected_balance = charlie_initial_balance.total + transfer_amount;\n    let charlie_current_balance = test.get_balances(Some(block_height)).2.unwrap();\n\n    assert_eq!(\n        charlie_current_balance.available.clone(),\n        charlie_expected_balance,\n        \"balance does not match expected\"\n    );\n\n    // Now Charlie has balance to do 2 transfers of the minimum amount but can't pay for both as the\n    // same time. Let's say the min transfer amount is 2_500_000_000 and the cost of a transfer\n    // is 50_000. Charlie now has 5_000_075_000 as set up above. He can transfer 2_500_000_000\n    // which will put a hold of 50_000. His available balance would be 2_500_025_000.\n    // He can't issue a new transfer of 2_500_000_000 right away because he doesn't have enough\n    // balance to pay for the transfer. He'll need to wait until at least half of the holds\n    // amortize. In this case he needs to wait half of the amortization time for 25_000 to\n    // become available to him. After this period, he will have 2_500_050_000 available which\n    // will allow him to do another transfer.\n    let txn = transfer_txn(\n        CHARLIE_SECRET_KEY.clone(),\n        &BOB_PUBLIC_KEY,\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n        min_transfer_amount,\n    );\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n\n    let charlie_updated_balance = test\n        .get_balances(Some(block_height))\n        .2\n        .expect(\"should have balance\");\n    /* one `min_transfer_amount` * should have gone to Bob. */\n    let expected =\n        charlie_initial_balance.total + min_transfer_amount + transfer_cost + half_transfer_cost;\n    assert_eq!(\n        charlie_updated_balance.total.clone(),\n        expected,\n        \"unexpected balance\"\n    );\n\n    // transfer cost should be held.\n    let expected_available =\n        charlie_initial_balance.total + min_transfer_amount + half_transfer_cost;\n    assert_eq!(\n        charlie_updated_balance.available.clone(),\n        expected_available,\n        \"charlie updated available should represent held cost\"\n    );\n\n    // Let's wait for about 5 sec (5 blocks in this case) which should provide enough time for\n    // half of the holds to get amortized.\n    test.fixture\n        .run_until_block_height(block_height + 5, ONE_MIN)\n        .await;\n    let charlie_post5_balance = test.get_balances(Some(block_height + 5)).2.unwrap();\n    /* right now he should have enough to make a transfer. */\n    assert!(charlie_post5_balance.available >= min_transfer_amount + transfer_cost,);\n    /* some of the holds should still be in place. */\n    assert!(charlie_post5_balance.available < charlie_post5_balance.total,);\n\n    // Send another transfer to Bob for `min_transfer_amount`.\n    let txn = transfer_txn(\n        CHARLIE_SECRET_KEY.clone(),\n        &BOB_PUBLIC_KEY,\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n        min_transfer_amount,\n    );\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert!(exec_result_is_success(&exec_result)); // We expect this transfer to succeed since Charlie has enough balance.\n    let charlie_final_balance = test.get_balances(Some(block_height)).2.unwrap();\n\n    let expected_total = charlie_post5_balance.total - min_transfer_amount;\n    assert_eq!(\n        charlie_final_balance.total, expected_total,\n        \"total should match prior amount minus transferred amount\"\n    );\n\n    assert!(\n        charlie_final_balance.available < charlie_final_balance.total,\n        \"some of the holds should still be in place\"\n    );\n\n    test.fixture\n        .run_until_block_height(block_height + 15, ONE_MIN)\n        .await;\n    let charlie_post15_balance = test.get_balances(Some(block_height + 15)).2.unwrap();\n\n    assert_eq!(\n        charlie_post15_balance.available, charlie_post15_balance.total,\n        \"all holds should have amortized back\"\n    );\n}\n\n#[tokio::test]\nasync fn validator_credit_is_written_and_cleared_after_auction() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_cost: U512 =\n        U512::from(test.chainspec().system_costs_config.mint_costs().transfer) * MIN_GAS_PRICE;\n    let min_transfer_amount = U512::from(\n        test.chainspec()\n            .transaction_config\n            .native_transfer_minimum_motes,\n    );\n    let half_transfer_cost =\n        (Ratio::new(U512::from(1), U512::from(2)) * transfer_cost).to_integer();\n\n    // Fund Charlie with some token.\n    let transfer_amount = min_transfer_amount * 2 + transfer_cost + half_transfer_cost;\n    let txn = transfer_txn(\n        BOB_SECRET_KEY.clone(),\n        &CHARLIE_PUBLIC_KEY,\n        PricingMode::Fixed {\n            gas_price_tolerance: MIN_GAS_PRICE,\n            additional_computation_factor: 0,\n        },\n        transfer_amount,\n    );\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n    let charlie_initial_balance = test.get_balances(None).2.unwrap();\n    assert_eq!(\n        charlie_initial_balance.available.clone(),\n        charlie_initial_balance.total.clone(),\n        \"there should be no holds\"\n    );\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n    let charlie_current_balance = test.get_balances(Some(block_height)).2.unwrap();\n\n    assert_eq!(\n        charlie_current_balance.available, charlie_current_balance.total,\n        \"there should be no holds\"\n    );\n    assert_eq!(\n        charlie_initial_balance.total + transfer_amount,\n        charlie_current_balance.total,\n        \"current balance should include received amount\"\n    );\n\n    let bids =\n        get_bids(&mut test.fixture, Some(block_height)).expect(\"Expected to get some bid records.\");\n\n    let _ = bids\n        .into_iter()\n        .find(|bid_kind| match bid_kind {\n            BidKind::Credit(credit) => {\n                credit.amount() == transfer_cost\n                    && credit.validator_public_key() == &*ALICE_PUBLIC_KEY // Alice is the proposer.\n            }\n            _ => false,\n        })\n        .expect(\"Expected to find the credit for the consumed transfer cost in the bid records.\");\n\n    test.fixture\n        .run_until_consensus_in_era(\n            ERA_ONE.saturating_add(test.chainspec().core_config.auction_delay),\n            ONE_MIN,\n        )\n        .await;\n\n    // Check that the credits were cleared after the auction.\n    let bids = get_bids(&mut test.fixture, None).expect(\"Expected to get some bid records.\");\n    assert!(!bids\n        .into_iter()\n        .any(|bid| matches!(bid, BidKind::Credit(_))));\n}\n\n#[tokio::test]\nasync fn add_and_withdraw_bid_transaction() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let bid_amount = test.chainspec().core_config.minimum_bid_amount + 10;\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_add_bid(\n            PublicKey::from(&**BOB_SECRET_KEY),\n            0,\n            bid_amount,\n            None,\n            None,\n            None,\n        )\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (_, _bob_initial_balance, _) = test.get_balances(None);\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_TWO, ONE_MIN)\n        .await;\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_withdraw_bid(PublicKey::from(&**BOB_SECRET_KEY), bid_amount)\n            .unwrap()\n            .with_chain_name(CHAIN_NAME)\n            .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n            .build()\n            .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n}\n\n#[tokio::test]\nasync fn delegate_and_undelegate_bid_transaction() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let delegate_amount = U512::from(500_000_000_000u64);\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_delegate(\n            PublicKey::from(&**BOB_SECRET_KEY),\n            PublicKey::from(&**ALICE_SECRET_KEY),\n            delegate_amount,\n        )\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_undelegate(\n            PublicKey::from(&**BOB_SECRET_KEY),\n            PublicKey::from(&**ALICE_SECRET_KEY),\n            delegate_amount,\n        )\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result));\n}\n\n#[tokio::test]\nasync fn insufficient_funds_transfer_from_account() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_amount = U512::max_value();\n\n    let txn_v1 =\n        TransactionV1Builder::new_transfer(transfer_amount, None, ALICE_PUBLIC_KEY.clone(), None)\n            .unwrap()\n            .with_chain_name(CHAIN_NAME)\n            .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n            .build()\n            .unwrap();\n\n    let mut txn = Transaction::from(txn_v1);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n    let expected_cost: U512 = U512::from(DEFAULT_TRANSFER_COST);\n\n    assert_eq!(result.error_message.as_deref(), Some(\"Insufficient funds\"));\n    assert_eq!(result.cost, expected_cost);\n}\n\n#[tokio::test]\nasync fn insufficient_funds_add_bid() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (_, bob_initial_balance, _) = test.get_balances(None);\n    let bid_amount = bob_initial_balance.total;\n\n    let txn =\n        TransactionV1Builder::new_add_bid(BOB_PUBLIC_KEY.clone(), 0, bid_amount, None, None, None)\n            .unwrap()\n            .with_chain_name(CHAIN_NAME)\n            .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n            .build()\n            .unwrap();\n    let price = txn.payment_amount().expect(\"must get payment amount\");\n    let mut txn = Transaction::from(txn);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n    let bid_cost: U512 = U512::from(price) * MIN_GAS_PRICE;\n\n    assert_eq!(\n        result.error_message.as_deref(),\n        Some(\"ApiError::AuctionError(TransferToBidPurse) [64516]\")\n    );\n    assert_eq!(result.cost, bid_cost);\n}\n\n#[tokio::test]\nasync fn insufficient_funds_transfer_from_purse() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let purse_name = \"test_purse\";\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // first we set up a purse for Bob\n    let purse_create_contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"transfer_main_purse_to_new_purse.wasm\");\n    let module_bytes =\n        Bytes::from(std::fs::read(purse_create_contract).expect(\"cannot read module bytes\"));\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_runtime_args(runtime_args! { \"destination\" => purse_name, \"amount\" => U512::zero() })\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n\n    let state_root_hash = *test.fixture.highest_complete_block().state_root_hash();\n    let entity_addr = get_entity_addr_from_account_hash(\n        &mut test.fixture,\n        state_root_hash,\n        BOB_PUBLIC_KEY.to_account_hash(),\n    );\n    let key = get_entity_named_key(&mut test.fixture, state_root_hash, entity_addr, purse_name)\n        .expect(\"expected a key\");\n    let uref = *key.as_uref().expect(\"Expected a URef\");\n\n    // now we try to transfer from the purse we just created\n    let transfer_amount = U512::max_value();\n    let txn = TransactionV1Builder::new_transfer(\n        transfer_amount,\n        Some(uref),\n        ALICE_PUBLIC_KEY.clone(),\n        None,\n    )\n    .unwrap()\n    .with_chain_name(CHAIN_NAME)\n    .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n    .build()\n    .unwrap();\n\n    let mut txn = Transaction::from(txn);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n    let expected_cost: U512 = U512::from(DEFAULT_TRANSFER_COST);\n\n    assert_eq!(result.error_message.as_deref(), Some(\"Insufficient funds\"));\n    assert_eq!(result.cost, expected_cost);\n}\n\n#[tokio::test]\nasync fn insufficient_funds_when_caller_lacks_minimum_balance() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (_, bob_initial_balance, _) = test.get_balances(None);\n    let transfer_amount = bob_initial_balance.total - U512::one();\n    let txn =\n        TransactionV1Builder::new_transfer(transfer_amount, None, ALICE_PUBLIC_KEY.clone(), None)\n            .unwrap()\n            .with_chain_name(CHAIN_NAME)\n            .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n            .build()\n            .unwrap();\n\n    let mut txn = Transaction::from(txn);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n    let expected_cost: U512 = U512::from(DEFAULT_TRANSFER_COST);\n\n    assert_eq!(result.error_message.as_deref(), Some(\"Insufficient funds\"));\n    assert_eq!(result.cost, expected_cost);\n}\n\n#[tokio::test]\nasync fn charge_when_session_code_succeeds() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"transfer_purse_to_account.wasm\");\n    let module_bytes = Bytes::from(std::fs::read(contract).expect(\"cannot read module bytes\"));\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n\n    let transferred_amount = 1;\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_runtime_args(runtime_args! {\n            ARG_TARGET => CHARLIE_PUBLIC_KEY.to_account_hash(),\n            ARG_AMOUNT => U512::from(transferred_amount)\n        })\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .with_pricing_mode(PricingMode::Fixed {\n            gas_price_tolerance: 5,\n            additional_computation_factor: 2, /*Makes the transaction\n                                               * \"Large\" despite the fact that the actual\n                                               * WASM bytes categorize it as \"Small\" */\n        })\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // alice should get the fee since she is the proposer.\n    let fee = alice_current_balance.total - alice_initial_balance.total;\n\n    assert!(\n        fee > U512::zero(),\n        \"fee is {}, expected to be greater than 0\",\n        fee\n    );\n    assert_eq!(\n        bob_current_balance.total,\n        bob_initial_balance.total - transferred_amount - fee,\n        \"bob should pay the fee\"\n    );\n}\n\n#[tokio::test]\nasync fn charge_when_session_code_fails_with_user_error() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let revert_contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"revert.wasm\");\n    let module_bytes =\n        Bytes::from(std::fs::read(revert_contract).expect(\"cannot read module bytes\"));\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(\n        matches!(\n            &exec_result,\n            ExecutionResult::V2(res) if res.error_message.as_deref() == Some(\"User error: 100\")\n        ),\n        \"{:?}\",\n        exec_result.error_message()\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // alice should get the fee since she is the proposer.\n    let fee = alice_current_balance.total - alice_initial_balance.total;\n\n    assert!(\n        fee > U512::zero(),\n        \"fee is {}, expected to be greater than 0\",\n        fee\n    );\n    let init = bob_initial_balance.total;\n    let curr = bob_current_balance.total;\n    let actual = curr;\n    let expected = init - fee;\n    assert_eq!(actual, expected, \"init {} curr {} fee {}\", init, curr, fee,);\n}\n\n#[tokio::test]\nasync fn charge_when_session_code_runs_out_of_gas() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let revert_contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"endless_loop.wasm\");\n    let module_bytes =\n        Bytes::from(std::fs::read(revert_contract).expect(\"cannot read module bytes\"));\n\n    let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None);\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(\n        matches!(\n            &exec_result,\n            ExecutionResult::V2(res) if res.error_message.as_deref() == Some(\"Out of gas error\")\n        ),\n        \"{:?}\",\n        exec_result\n    );\n\n    let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height));\n    // alice should get the fee since she is the proposer.\n    let fee = alice_current_balance.total - alice_initial_balance.total;\n\n    assert!(\n        fee > U512::zero(),\n        \"fee is {}, expected to be greater than 0\",\n        fee\n    );\n    assert_eq!(\n        bob_current_balance.total,\n        bob_initial_balance.total - fee,\n        \"bob should pay the fee\"\n    );\n}\n\n#[tokio::test]\nasync fn successful_purse_to_purse_transfer() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let purse_name = \"test_purse\";\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, _, _) = test.get_balances(None);\n\n    // first we set up a purse for Bob\n    let purse_create_contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"transfer_main_purse_to_new_purse.wasm\");\n    let module_bytes =\n        Bytes::from(std::fs::read(purse_create_contract).expect(\"cannot read module bytes\"));\n\n    let baseline_motes = test\n        .fixture\n        .chainspec\n        .core_config\n        .baseline_motes_amount_u512();\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_runtime_args(\n            runtime_args! { \"destination\" => purse_name, \"amount\" => baseline_motes + U512::one() },\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n\n    let state_root_hash = *test.fixture.highest_complete_block().state_root_hash();\n    let bob_addr = get_entity_addr_from_account_hash(\n        &mut test.fixture,\n        state_root_hash,\n        BOB_PUBLIC_KEY.to_account_hash(),\n    );\n    let bob_purse_key =\n        get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name)\n            .expect(\"expected a key\");\n    let bob_purse = *bob_purse_key.as_uref().expect(\"Expected a URef\");\n\n    let alice_addr = get_entity_addr_from_account_hash(\n        &mut test.fixture,\n        state_root_hash,\n        ALICE_PUBLIC_KEY.to_account_hash(),\n    );\n    let alice = get_entity(&mut test.fixture, state_root_hash, alice_addr);\n\n    // now we try to transfer from the purse we just created\n    let transfer_amount = 1;\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_transfer(\n            transfer_amount,\n            Some(bob_purse),\n            alice.main_purse(),\n            None,\n        )\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n\n    let (alice_current_balance, _, _) = test.get_balances(Some(block_height));\n    assert_eq!(\n        alice_current_balance.total,\n        alice_initial_balance.total + transfer_amount,\n    );\n}\n\n#[tokio::test]\nasync fn successful_purse_to_account_transfer() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let purse_name = \"test_purse\";\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let (alice_initial_balance, _, _) = test.get_balances(None);\n\n    // first we set up a purse for Bob\n    let purse_create_contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"transfer_main_purse_to_new_purse.wasm\");\n    let module_bytes =\n        Bytes::from(std::fs::read(purse_create_contract).expect(\"cannot read module bytes\"));\n\n    let baseline_motes = test\n        .fixture\n        .chainspec\n        .core_config\n        .baseline_motes_amount_u512();\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_runtime_args(\n            runtime_args! { \"destination\" => purse_name, \"amount\" => baseline_motes + U512::one() },\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n\n    let state_root_hash = *test.fixture.highest_complete_block().state_root_hash();\n    let bob_addr = get_entity_addr_from_account_hash(\n        &mut test.fixture,\n        state_root_hash,\n        BOB_PUBLIC_KEY.to_account_hash(),\n    );\n    let bob_purse_key =\n        get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name)\n            .expect(\"expected a key\");\n    let bob_purse = *bob_purse_key.as_uref().expect(\"Expected a URef\");\n\n    // now we try to transfer from the purse we just created\n    let transfer_amount = 1;\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_transfer(\n            transfer_amount,\n            Some(bob_purse),\n            ALICE_PUBLIC_KEY.clone(),\n            None,\n        )\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n\n    let (alice_current_balance, _, _) = test.get_balances(Some(block_height));\n    assert_eq!(\n        alice_current_balance.total,\n        alice_initial_balance.total + transfer_amount,\n    );\n}\n\nasync fn bob_transfers_to_charlie_via_native_transfer_deploy(\n    configs_override: ConfigsOverride,\n    with_source: bool,\n) -> ExecutionResult {\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(configs_override),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let state_root_hash = *test.fixture.highest_complete_block().state_root_hash();\n    let entity = get_entity_by_account_hash(\n        &mut test.fixture,\n        state_root_hash,\n        BOB_PUBLIC_KEY.to_account_hash(),\n    );\n\n    let source = if with_source {\n        Some(entity.main_purse())\n    } else {\n        None\n    };\n\n    let mut txn: Transaction = Deploy::native_transfer(\n        CHAIN_NAME.to_string(),\n        source,\n        BOB_PUBLIC_KEY.clone(),\n        CHARLIE_PUBLIC_KEY.clone(),\n        None,\n        Timestamp::now(),\n        TimeDiff::from_seconds(600),\n        10,\n    )\n    .into();\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    exec_result\n}\n\n#[tokio::test]\nasync fn should_transfer_with_source_purse_deploy_fixed_norefund_nofee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n    let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, true).await;\n\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n    assert_eq!(\n        exec_result.transfers().len(),\n        1,\n        \"native transfer should have exactly 1 transfer\"\n    );\n}\n\n#[tokio::test]\nasync fn should_transfer_with_source_purse_deploy_payment_limited_refund_fee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(99, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n    let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, true).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n    assert_eq!(\n        exec_result.transfers().len(),\n        1,\n        \"native transfer should have exactly 1 transfer\"\n    );\n    assert_eq!(\n        exec_result.refund(),\n        Some(U512::zero()),\n        \"cost should equal consumed thus no refund\"\n    );\n}\n\n#[tokio::test]\nasync fn should_charge_for_insufficient_funds_deploy_payment_limited_refund_fee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(75, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let charlie_balance = test\n        .get_balances(None)\n        .2\n        .expect(\"should have charlie balance\")\n        .available;\n\n    assert_eq!(\n        charlie_balance,\n        U512::from(u32::MAX - 1),\n        \"charlie balance should be u32::MAX - 1\"\n    );\n    let payment_amount = charlie_balance.saturating_add(U512::from(1)).as_u64();\n\n    let txn = valid_wasm_txn(\n        CHARLIE_SECRET_KEY.clone(),\n        PricingMode::PaymentLimited {\n            payment_amount,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        },\n    );\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n\n    assert!(!result.effects.is_empty(), \"should have effects\");\n    let expected_cost: U512 = charlie_balance;\n\n    assert_eq!(result.error_message.as_deref(), Some(\"Insufficient funds\"));\n    assert_eq!(result.cost, expected_cost);\n}\n\n#[tokio::test]\nasync fn should_charge_for_marginal_insufficient_funds_deploy_payment_limited_refund_fee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(75, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let base_amount = 100_000_000_000_000_000u64;\n    let charlie_base_amount = 10_000_000_000u64;\n    let mut test = {\n        let alice_public_key = PublicKey::from(&*ALICE_SECRET_KEY.clone());\n        let bob_public_key = PublicKey::from(&*BOB_SECRET_KEY.clone());\n        let charlie_public_key = PublicKey::from(&*CHARLIE_SECRET_KEY.clone());\n\n        let stakes = {\n            let mut ret = BTreeMap::new();\n            ret.insert(\n                alice_public_key.clone(),\n                (U512::from(base_amount), U512::from(u128::MAX)),\n            );\n            ret.insert(\n                bob_public_key.clone(),\n                (U512::from(base_amount), U512::from(1)),\n            );\n\n            ret.insert(\n                charlie_public_key,\n                (U512::from(charlie_base_amount), U512::from(1)),\n            );\n            ret\n        };\n\n        SingleTransactionTestCase::new_with_stakes(\n            ALICE_SECRET_KEY.clone(),\n            BOB_SECRET_KEY.clone(),\n            CHARLIE_SECRET_KEY.clone(),\n            Some(config),\n            stakes,\n        )\n        .await\n    };\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let charlie_balance = test\n        .get_balances(None)\n        .2\n        .expect(\"should have charlie balance\")\n        .available;\n\n    assert_eq!(\n        charlie_balance,\n        U512::from(charlie_base_amount),\n        \"charlie balance should be charlie_base_amount\"\n    );\n    // make payment 1 more than charlie has\n    let payment_amount = charlie_balance.saturating_add(U512::from(1)).as_u64();\n\n    let txn = valid_wasm_txn(\n        CHARLIE_SECRET_KEY.clone(),\n        PricingMode::PaymentLimited {\n            payment_amount,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        },\n    );\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n\n    assert!(!result.effects.is_empty(), \"should have effects\");\n    let expected_cost: U512 = charlie_balance;\n\n    assert_eq!(result.error_message.as_deref(), Some(\"Insufficient funds\"));\n    assert_eq!(result.cost, expected_cost);\n}\n\nasync fn make_new_account_and_exec_wasm(\n    config: ConfigsOverride,\n    initial_balance: u64,\n    wasm_payment_amount: u64,\n) -> Result<ExecutionResult, String> {\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // fund a new account\n    let dan_secret_key =\n        Arc::new(SecretKey::ed25519_from_bytes([0xDD; SecretKey::ED25519_LENGTH]).unwrap());\n    let dan_public_key = PublicKey::from(&*dan_secret_key.clone());\n    let _dan_account_hash = dan_public_key.to_account_hash();\n\n    let transfer_payment_amount = 100_000_000u64;\n    let txn = transfer_txn(\n        ALICE_SECRET_KEY.clone(),\n        &dan_public_key,\n        PricingMode::PaymentLimited {\n            payment_amount: transfer_payment_amount,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n        initial_balance,\n    );\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        return Err(format!(\n            \"Expected ExecutionResult::V2 but got {:?}\",\n            exec_result\n        ));\n    };\n\n    assert!(!result.effects.is_empty(), \"should have effects\");\n    assert!(\n        result.error_message.is_none(),\n        \"transfer to create new account should not have error msg\"\n    );\n\n    let txn = valid_wasm_txn(\n        dan_secret_key.clone(),\n        PricingMode::PaymentLimited {\n            payment_amount: wasm_payment_amount,\n            gas_price_tolerance: 3,\n            standard_payment: true,\n        },\n    );\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n\n    Ok(exec_result)\n}\n\n#[tokio::test]\nasync fn should_charge_new_account_insufficient_funds_deploy_payment_limited_refund_fee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(75, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let dan_base_amount = 10_000_000_000u64;\n    // pay more than available with the new account\n    let wasm_payment_amount = dan_base_amount.saturating_add(100);\n    match make_new_account_and_exec_wasm(config, dan_base_amount, wasm_payment_amount).await {\n        Ok(exec_result) => {\n            let ExecutionResult::V2(exec_result) = exec_result else {\n                panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n            };\n            assert!(!exec_result.effects.is_empty(), \"should have effects\");\n            let expected_cost: U512 = dan_base_amount.into();\n\n            assert_eq!(\n                exec_result.error_message.as_deref(),\n                Some(\"Insufficient funds\")\n            );\n            assert_eq!(\n                exec_result.cost, expected_cost,\n                \"cost should be expected val\"\n            );\n            assert_eq!(exec_result.refund, U512::zero(), \"refund should be 0\");\n        }\n        Err(err_str) => {\n            panic!(\"{}\", err_str)\n        }\n    }\n}\n\n#[tokio::test]\nasync fn should_charge_new_account_insufficient_funds_deploy_payment_limited_refund_fee_price_2() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(75, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_min_gas_price(2)\n        .with_max_gas_price(3);\n\n    let dan_base_amount = 10_000_000_000u64;\n    // pay more than available with the new account\n    let wasm_payment_amount = dan_base_amount.saturating_add(100);\n    match make_new_account_and_exec_wasm(config, dan_base_amount, wasm_payment_amount).await {\n        Ok(exec_result) => {\n            let ExecutionResult::V2(exec_result) = exec_result else {\n                panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n            };\n            assert!(!exec_result.effects.is_empty(), \"should have effects\");\n            let expected_cost: U512 = dan_base_amount.into();\n\n            assert_eq!(\n                exec_result.error_message.as_deref(),\n                Some(\"Insufficient funds\")\n            );\n            assert_eq!(\n                exec_result.cost, expected_cost,\n                \"cost should be expected val\"\n            );\n            assert_eq!(exec_result.refund, U512::zero(), \"refund should be 0\");\n        }\n        Err(err_str) => {\n            panic!(\"{}\", err_str)\n        }\n    }\n}\n\n#[tokio::test]\nasync fn should_transfer_with_main_purse_deploy_fixed_norefund_nofee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n    let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, false).await;\n\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n    assert_eq!(\n        exec_result.transfers().len(),\n        1,\n        \"native transfer should have exactly 1 transfer\"\n    );\n}\n\n#[tokio::test]\nasync fn should_transfer_with_main_purse_deploy_payment_limited_refund_fee() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(99, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n    let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, false).await;\n    assert!(exec_result_is_success(&exec_result), \"{:?}\", exec_result);\n    assert_eq!(\n        exec_result.transfers().len(),\n        1,\n        \"native transfer should have exactly 1 transfer\"\n    );\n    assert_eq!(\n        exec_result.refund(),\n        Some(U512::zero()),\n        \"cost should equal consumed thus no refund\"\n    );\n}\n\n#[tokio::test]\nasync fn out_of_gas_txn_does_not_produce_effects() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::PayToProposer);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // This WASM creates named key called \"new_key\". Then it would loop endlessly trying to write a\n    // value to storage. Eventually it will run out of gas and it should exit causing a revert.\n    let revert_contract = RESOURCES_PATH\n        .join(\"..\")\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\")\n        .join(\"endless_loop_with_effects.wasm\");\n    let module_bytes =\n        Bytes::from(std::fs::read(revert_contract).expect(\"cannot read module bytes\"));\n\n    let mut txn = Transaction::from(\n        TransactionV1Builder::new_session(\n            false,\n            module_bytes,\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n        .build()\n        .unwrap(),\n    );\n    txn.sign(&BOB_SECRET_KEY);\n    let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await;\n    assert!(\n        matches!(\n            &exec_result,\n            ExecutionResult::V2(res) if res.error_message.as_deref() == Some(\"Out of gas error\")\n        ),\n        \"{:?}\",\n        exec_result\n    );\n\n    let state_root_hash = *test\n        .fixture\n        .get_block_by_height(block_height)\n        .state_root_hash();\n    let bob_addr = get_entity_addr_from_account_hash(\n        &mut test.fixture,\n        state_root_hash,\n        BOB_PUBLIC_KEY.to_account_hash(),\n    );\n\n    // Named key should not exist since the execution was reverted because it was out of gas.\n    assert!(\n        get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, \"new_key\").is_none()\n    );\n}\n\n#[tokio::test]\nasync fn gas_holds_accumulate_for_multiple_transactions_in_the_same_block() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_min_gas_price(MIN_GAS_PRICE)\n        .with_max_gas_price(MIN_GAS_PRICE)\n        .with_pricing_handling(PricingHandling::Fixed)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee)\n        .with_balance_hold_interval(TimeDiff::from_seconds(5));\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    const TRANSFER_AMOUNT: u64 = 30_000_000_000;\n\n    let chain_name = test.fixture.chainspec.network_config.name.clone();\n    let txn_pricing_mode = PricingMode::Fixed {\n        gas_price_tolerance: MIN_GAS_PRICE,\n        additional_computation_factor: 0,\n    };\n    let expected_transfer_gas = test.chainspec().system_costs_config.mint_costs().transfer;\n    let expected_transfer_cost: U512 = U512::from(expected_transfer_gas) * MIN_GAS_PRICE;\n\n    let mut txn_1 = Transaction::from(\n        TransactionV1Builder::new_transfer(TRANSFER_AMOUNT, None, CHARLIE_PUBLIC_KEY.clone(), None)\n            .unwrap()\n            .with_initiator_addr(ALICE_PUBLIC_KEY.clone())\n            .with_pricing_mode(txn_pricing_mode.clone())\n            .with_chain_name(chain_name.clone())\n            .build()\n            .unwrap(),\n    );\n    txn_1.sign(&ALICE_SECRET_KEY);\n    let txn_1_hash = txn_1.hash();\n\n    let mut txn_2 = Transaction::from(\n        TransactionV1Builder::new_transfer(\n            2 * TRANSFER_AMOUNT,\n            None,\n            CHARLIE_PUBLIC_KEY.clone(),\n            None,\n        )\n        .unwrap()\n        .with_initiator_addr(ALICE_PUBLIC_KEY.clone())\n        .with_pricing_mode(txn_pricing_mode.clone())\n        .with_chain_name(chain_name.clone())\n        .build()\n        .unwrap(),\n    );\n    txn_2.sign(&ALICE_SECRET_KEY);\n    let txn_2_hash = txn_2.hash();\n\n    let mut txn_3 = Transaction::from(\n        TransactionV1Builder::new_transfer(\n            3 * TRANSFER_AMOUNT,\n            None,\n            CHARLIE_PUBLIC_KEY.clone(),\n            None,\n        )\n        .unwrap()\n        .with_initiator_addr(ALICE_PUBLIC_KEY.clone())\n        .with_pricing_mode(txn_pricing_mode)\n        .with_chain_name(chain_name)\n        .build()\n        .unwrap(),\n    );\n    txn_3.sign(&ALICE_SECRET_KEY);\n    let txn_3_hash = txn_3.hash();\n\n    test.fixture.inject_transaction(txn_1).await;\n    test.fixture.inject_transaction(txn_2).await;\n    test.fixture.inject_transaction(txn_3).await;\n\n    test.fixture\n        .run_until_executed_transaction(&txn_1_hash, TEN_SECS)\n        .await;\n    test.fixture\n        .run_until_executed_transaction(&txn_2_hash, TEN_SECS)\n        .await;\n    test.fixture\n        .run_until_executed_transaction(&txn_3_hash, TEN_SECS)\n        .await;\n\n    let (_node_id, runner) = test.fixture.network.nodes().iter().next().unwrap();\n    let ExecutionInfo {\n        block_height: txn_1_block_height,\n        execution_result: txn_1_exec_result,\n        ..\n    } = runner\n        .main_reactor()\n        .storage()\n        .read_execution_info(txn_1_hash)\n        .expect(\"Expected transaction to be included in a block.\");\n    let ExecutionInfo {\n        block_height: txn_2_block_height,\n        execution_result: txn_2_exec_result,\n        ..\n    } = runner\n        .main_reactor()\n        .storage()\n        .read_execution_info(txn_2_hash)\n        .expect(\"Expected transaction to be included in a block.\");\n    let ExecutionInfo {\n        block_height: txn_3_block_height,\n        execution_result: txn_3_exec_result,\n        ..\n    } = runner\n        .main_reactor()\n        .storage()\n        .read_execution_info(txn_3_hash)\n        .expect(\"Expected transaction to be included in a block.\");\n\n    let txn_1_exec_result = txn_1_exec_result.expect(\"Expected result for txn 1\");\n    let txn_2_exec_result = txn_2_exec_result.expect(\"Expected result for txn 2\");\n    let txn_3_exec_result = txn_3_exec_result.expect(\"Expected result for txn 3\");\n\n    assert!(exec_result_is_success(&txn_1_exec_result));\n    assert!(exec_result_is_success(&txn_2_exec_result));\n    assert!(exec_result_is_success(&txn_3_exec_result));\n\n    assert_exec_result_cost(\n        txn_1_exec_result,\n        expected_transfer_cost,\n        expected_transfer_gas.into(),\n        \"gas_holds_accumulate_for_multiple_transactions_in_the_same_block txn1\",\n    );\n    assert_exec_result_cost(\n        txn_2_exec_result,\n        expected_transfer_cost,\n        expected_transfer_gas.into(),\n        \"gas_holds_accumulate_for_multiple_transactions_in_the_same_block txn2\",\n    );\n    assert_exec_result_cost(\n        txn_3_exec_result,\n        expected_transfer_cost,\n        expected_transfer_gas.into(),\n        \"gas_holds_accumulate_for_multiple_transactions_in_the_same_block txn3\",\n    );\n\n    let max_block_height = std::cmp::max(\n        std::cmp::max(txn_1_block_height, txn_2_block_height),\n        txn_3_block_height,\n    );\n    let alice_total_holds: U512 = get_balance(\n        &test.fixture,\n        &ALICE_PUBLIC_KEY,\n        Some(max_block_height),\n        false,\n    )\n    .proofs_result()\n    .expect(\"Expected Alice to proof results.\")\n    .balance_holds()\n    .expect(\"Expected Alice to have holds.\")\n    .values()\n    .map(|block_holds| block_holds.values().copied().sum())\n    .sum();\n    assert_eq!(\n        alice_total_holds,\n        expected_transfer_cost * 3,\n        \"Total holds amount should be equal to the cost of the 3 transactions.\"\n    );\n\n    test.fixture\n        .run_until_block_height(max_block_height + 5, ONE_MIN)\n        .await;\n    let alice_total_holds: U512 = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, false)\n        .proofs_result()\n        .expect(\"Expected Alice to proof results.\")\n        .balance_holds()\n        .expect(\"Expected Alice to have holds.\")\n        .values()\n        .map(|block_holds| block_holds.values().copied().sum())\n        .sum();\n    assert_eq!(\n        alice_total_holds,\n        U512::from(0),\n        \"Holds should have expired.\"\n    );\n}\n\n#[tokio::test]\nasync fn gh_5058_regression_custom_payment_with_deploy_variant_works() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // This WASM creates named key called \"new_key\". Then it would loop endlessly trying to write a\n    // value to storage. Eventually it will run out of gas and it should exit causing a revert.\n    let base_path = RESOURCES_PATH\n        .parent()\n        .unwrap()\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n\n    let payment_amount = U512::from(2_500_000_000u64);\n\n    let txn = {\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(100);\n        let gas_price = 1;\n        let chain_name = test.chainspec().network_config.name.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: std::fs::read(base_path.join(\"gh_5058_regression.wasm\"))\n                .unwrap()\n                .into(),\n            args: runtime_args! {\n                \"amount\" => payment_amount,\n            },\n        };\n\n        let session = ExecutableDeployItem::ModuleBytes {\n            module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                .unwrap()\n                .into(),\n            args: runtime_args! {},\n        };\n\n        Transaction::Deploy(Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            vec![],\n            chain_name.clone(),\n            payment,\n            session,\n            &ALICE_SECRET_KEY,\n            Some(ALICE_PUBLIC_KEY.clone()),\n        ))\n    };\n\n    let acct = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, true);\n    assert!(acct.total_balance().cloned().unwrap() >= payment_amount);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert_eq!(exec_result.error_message(), None);\n}\n\n#[tokio::test]\nasync fn should_penalize_failed_custom_payment() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // This WASM creates named key called \"new_key\". Then it would loop endlessly trying to write a\n    // value to storage. Eventually it will run out of gas and it should exit causing a revert.\n    let base_path = RESOURCES_PATH\n        .parent()\n        .unwrap()\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n\n    let payment_amount = U512::from(1_000_000u64);\n\n    let txn = {\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(100);\n        let gas_price = 1;\n        let chain_name = test.chainspec().network_config.name.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                .unwrap()\n                .into(),\n            args: runtime_args! {\n                \"amount\" => payment_amount,\n            },\n        };\n\n        let session = ExecutableDeployItem::ModuleBytes {\n            module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                .unwrap()\n                .into(),\n            args: runtime_args! {\n                \"this_is_session\" => true,\n            },\n        };\n\n        Transaction::Deploy(Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            vec![],\n            chain_name.clone(),\n            payment,\n            session,\n            &ALICE_SECRET_KEY,\n            Some(ALICE_PUBLIC_KEY.clone()),\n        ))\n    };\n\n    let acct = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, true);\n    assert!(acct.total_balance().cloned().unwrap() >= payment_amount);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert_ne!(exec_result.error_message(), None);\n\n    assert!(exec_result\n        .error_message()\n        .expect(\"should have err message\")\n        .starts_with(\"Insufficient custom payment\"))\n}\n\n#[tokio::test]\nasync fn gh_5082_install_upgrade_should_allow_adding_new_version() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // This WASM creates named key called \"new_key\". Then it would loop endlessly trying to write a\n    // value to storage. Eventually it will run out of gas and it should exit causing a revert.\n    let base_path = RESOURCES_PATH\n        .parent()\n        .unwrap()\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n\n    let txn_1 = {\n        let chain_name = test.chainspec().network_config.name.clone();\n\n        let module_bytes = std::fs::read(base_path.join(\"do_nothing_stored.wasm\")).unwrap();\n        let mut txn = Transaction::from(\n            TransactionV1Builder::new_session(\n                true,\n                module_bytes.into(),\n                TransactionRuntimeParams::VmCasperV1,\n            )\n            .with_initiator_addr(ALICE_PUBLIC_KEY.clone())\n            .with_pricing_mode(PricingMode::PaymentLimited {\n                payment_amount: 100_000_000_000u64,\n                gas_price_tolerance: 1,\n                standard_payment: true,\n            })\n            .with_chain_name(chain_name)\n            .build()\n            .unwrap(),\n        );\n        txn.sign(&ALICE_SECRET_KEY);\n        txn\n    };\n\n    let (_txn_hash, _block_height, exec_result_1) = test.send_transaction(txn_1).await;\n\n    assert_eq!(exec_result_1.error_message(), None); // should succeed\n\n    let txn_2 = {\n        let chain_name = test.chainspec().network_config.name.clone();\n\n        let module_bytes = std::fs::read(base_path.join(\"do_nothing_stored.wasm\")).unwrap();\n        let mut txn = Transaction::from(\n            TransactionV1Builder::new_session(\n                true,\n                module_bytes.into(),\n                TransactionRuntimeParams::VmCasperV1,\n            )\n            .with_initiator_addr(BOB_PUBLIC_KEY.clone())\n            .with_pricing_mode(PricingMode::PaymentLimited {\n                payment_amount: 100_000_000_000u64,\n                gas_price_tolerance: 1,\n                // This is the key part of the test: we are using `standard_payment == false` to use\n                // session code as payment code. This should fail to add new\n                // contract version.\n                standard_payment: false,\n            })\n            .with_chain_name(chain_name)\n            .build()\n            .unwrap(),\n        );\n        txn.sign(&BOB_SECRET_KEY);\n        txn\n    };\n\n    let (_txn_hash, _block_height, exec_result_2) = test.send_transaction(txn_2).await;\n\n    assert_eq!(\n        exec_result_2.error_message(),\n        Some(\"ApiError::NotAllowedToAddContractVersion [48]\".to_string())\n    ); // should not succeed, adding new contract version during payment is not allowed.\n}\n\n#[tokio::test]\nasync fn should_allow_custom_payment() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::NoRefund)\n        .with_fee_handling(FeeHandling::NoFee);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    // This WASM creates named key called \"new_key\". Then it would loop endlessly trying to write a\n    // value to storage. Eventually it will run out of gas and it should exit causing a revert.\n    let base_path = RESOURCES_PATH\n        .parent()\n        .unwrap()\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n\n    let payment_amount = U512::from(2_500_000_000u64);\n\n    let txn = {\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(100);\n        let gas_price = 1;\n        let chain_name = test.chainspec().network_config.name.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: std::fs::read(base_path.join(\"non_standard_payment.wasm\"))\n                .unwrap()\n                .into(),\n            args: runtime_args! {\n                \"amount\" => payment_amount,\n            },\n        };\n\n        let session = ExecutableDeployItem::ModuleBytes {\n            module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                .unwrap()\n                .into(),\n            args: runtime_args! {\n                \"this_is_session\" => true,\n            },\n        };\n\n        Transaction::Deploy(Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            vec![],\n            chain_name.clone(),\n            payment,\n            session,\n            &ALICE_SECRET_KEY,\n            Some(ALICE_PUBLIC_KEY.clone()),\n        ))\n    };\n\n    let acct = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, true);\n    assert!(acct.total_balance().cloned().unwrap() >= payment_amount);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n\n    assert_eq!(exec_result.error_message(), None);\n    assert!(\n        exec_result.consumed() > U512::zero(),\n        \"should have consumed gas\"\n    );\n}\n\n#[tokio::test]\nasync fn should_allow_native_transfer_v1() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(99, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let transfer_amount = U512::from(100);\n\n    let txn_v1 =\n        TransactionV1Builder::new_transfer(transfer_amount, None, CHARLIE_PUBLIC_KEY.clone(), None)\n            .unwrap()\n            .with_chain_name(CHAIN_NAME)\n            .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n            .build()\n            .unwrap();\n    let payment = txn_v1\n        .payment_amount()\n        .expect(\"must have payment amount as txns are using payment_limited\");\n    let mut txn = Transaction::from(txn_v1);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n\n    assert_ne!(\n        U512::from(payment),\n        result.cost,\n        \"native transfer costing is system limited\"\n    );\n    let expected_cost: U512 = U512::from(DEFAULT_TRANSFER_COST);\n    assert_eq!(result.error_message.as_deref(), None);\n    assert_eq!(result.cost, expected_cost);\n    assert_eq!(result.transfers.len(), 1, \"should have exactly 1 transfer\");\n}\n\n#[tokio::test]\nasync fn should_allow_native_burn() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(99, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    let burn_amount = U512::from(100);\n\n    let txn_v1 = TransactionV1Builder::new_burn(burn_amount, None)\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap();\n    let payment = txn_v1\n        .payment_amount()\n        .expect(\"must have payment amount as txns are using payment_limited\");\n    let mut txn = Transaction::from(txn_v1);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n    let expected_cost: U512 = U512::from(payment) * MIN_GAS_PRICE;\n    assert_eq!(result.error_message.as_deref(), None);\n    assert_eq!(result.cost, expected_cost);\n}\n\n#[tokio::test]\nasync fn should_not_allow_unverified_native_burn() {\n    let config = SingleTransactionTestCase::default_test_config()\n        .with_pricing_handling(PricingHandling::PaymentLimited)\n        .with_refund_handling(RefundHandling::Refund {\n            refund_ratio: Ratio::new(99, 100),\n        })\n        .with_fee_handling(FeeHandling::PayToProposer)\n        .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued);\n\n    let mut test = SingleTransactionTestCase::new(\n        ALICE_SECRET_KEY.clone(),\n        BOB_SECRET_KEY.clone(),\n        CHARLIE_SECRET_KEY.clone(),\n        Some(config),\n    )\n    .await;\n\n    test.fixture\n        .run_until_consensus_in_era(ERA_ONE, ONE_MIN)\n        .await;\n\n    let burn_amount = U512::from(100);\n\n    let alice_uref_addr =\n        get_main_purse(&mut test.fixture, &ALICE_PUBLIC_KEY).expect(\"should have main purse\");\n    let alice_purse = URef::new(alice_uref_addr, AccessRights::all());\n\n    let txn_v1 = TransactionV1Builder::new_burn(burn_amount, Some(alice_purse))\n        .unwrap()\n        .with_chain_name(CHAIN_NAME)\n        .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY))\n        .build()\n        .unwrap();\n    let price = txn_v1\n        .payment_amount()\n        .expect(\"must have payment amount as txns are using payment_limited\");\n    let mut txn = Transaction::from(txn_v1);\n    txn.sign(&BOB_SECRET_KEY);\n\n    let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await;\n    let ExecutionResult::V2(result) = exec_result else {\n        panic!(\"Expected ExecutionResult::V2 but got {:?}\", exec_result);\n    };\n    let expected_cost: U512 = U512::from(price) * MIN_GAS_PRICE;\n    let expected_error = format!(\"Forged reference: {}\", alice_purse);\n    assert_eq!(result.error_message, Some(expected_error));\n    assert_eq!(result.cost, expected_cost);\n}\n\nenum SizingScenario {\n    Gas,\n    SerializedLength,\n}\n\nasync fn run_sizing_scenario(sizing_scenario: SizingScenario) {\n    let mut rng = TestRng::new();\n    let alice_stake = 200_000_000_000_u64;\n    let bob_stake = 300_000_000_000_u64;\n    let charlie_stake = 300_000_000_000_u64;\n    let initial_stakes: Vec<(U512, U512)> = vec![\n        (U512::from(u64::MAX), alice_stake.into()),\n        (U512::from(u64::MAX), bob_stake.into()),\n        (U512::from(u64::MAX), charlie_stake.into()),\n    ];\n\n    let secret_keys: Vec<Arc<SecretKey>> = (0..3)\n        .map(|_| Arc::new(SecretKey::random(&mut rng)))\n        .collect();\n\n    let stakes = secret_keys\n        .iter()\n        .zip(initial_stakes)\n        .map(|(secret_key, stake)| (PublicKey::from(secret_key.as_ref()), stake))\n        .collect();\n\n    let mut fixture = TestFixture::new_with_keys(rng, secret_keys, stakes, None).await;\n\n    fixture\n        .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN)\n        .await;\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let base_path = RESOURCES_PATH\n        .parent()\n        .unwrap()\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n\n    let (payment_1, session_1) = match sizing_scenario {\n        SizingScenario::Gas => {\n            // We create two equally sized deploys, and ensure that they are both\n            // executed in the non largest lane by gas limit.\n            let gas_limit_for_lane_4 = fixture\n                .chainspec\n                .transaction_config\n                .transaction_v1_config\n                .get_max_transaction_gas_limit(4u8);\n\n            let payment = ExecutableDeployItem::ModuleBytes {\n                module_bytes: Bytes::new(),\n                args: runtime_args! {\n                \"amount\" =>  U512::from(gas_limit_for_lane_4),\n                            },\n            };\n\n            let session = ExecutableDeployItem::ModuleBytes {\n                module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                    .unwrap()\n                    .into(),\n                args: runtime_args! {},\n            };\n\n            (payment, session)\n        }\n        SizingScenario::SerializedLength => {\n            let gas_limit_for_lane_3 = fixture\n                .chainspec\n                .transaction_config\n                .transaction_v1_config\n                .get_max_transaction_gas_limit(3u8);\n\n            let payment = ExecutableDeployItem::ModuleBytes {\n                module_bytes: Bytes::new(),\n                args: runtime_args! {\n                    \"amount\" =>  U512::from(gas_limit_for_lane_3)\n                },\n            };\n\n            let session = ExecutableDeployItem::ModuleBytes {\n                module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                    .unwrap()\n                    .into(),\n                args: runtime_args! {},\n            };\n\n            (payment, session)\n        }\n    };\n\n    let timestamp = Timestamp::now();\n    let ttl = TimeDiff::from_seconds(100);\n    let gas_price = 1;\n    let chain_name = fixture.chainspec.network_config.name.clone();\n\n    let transaction_1 = Transaction::Deploy(Deploy::new_signed(\n        timestamp,\n        ttl,\n        gas_price,\n        vec![],\n        chain_name.clone(),\n        payment_1,\n        session_1,\n        &ALICE_SECRET_KEY,\n        Some(ALICE_PUBLIC_KEY.clone()),\n    ));\n\n    let wasm_lanes = fixture\n        .chainspec\n        .transaction_config\n        .transaction_v1_config\n        .wasm_lanes();\n\n    let largest_lane = wasm_lanes\n        .iter()\n        .max_by(|left, right| {\n            left.max_transaction_length()\n                .cmp(&right.max_transaction_length())\n        })\n        .map(|definition| definition.id())\n        .expect(\"must have lane id for largest lane\");\n\n    let (payment_2, session_2) = match sizing_scenario {\n        SizingScenario::Gas => {\n            // We create two equally sized deploys, and ensure that they are both\n            // executed in the non largest lane by gas limit.\n            let gas_limit_for_lane_3 = fixture\n                .chainspec\n                .transaction_config\n                .transaction_v1_config\n                .get_max_transaction_gas_limit(3u8);\n\n            let payment = ExecutableDeployItem::ModuleBytes {\n                module_bytes: Bytes::new(),\n                args: runtime_args! {\n                \"amount\" =>  U512::from(gas_limit_for_lane_3),\n                            },\n            };\n\n            let session = ExecutableDeployItem::ModuleBytes {\n                module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n                    .unwrap()\n                    .into(),\n                args: runtime_args! {},\n            };\n\n            (payment, session)\n        }\n        SizingScenario::SerializedLength => {\n            let largest_lane_gas_limit = fixture\n                .chainspec\n                .transaction_config\n                .transaction_v1_config\n                .get_max_transaction_gas_limit(largest_lane);\n\n            let payment = ExecutableDeployItem::ModuleBytes {\n                module_bytes: Bytes::new(),\n                args: runtime_args! {\n                    \"amount\" =>  U512::from(largest_lane_gas_limit)\n                },\n            };\n\n            let faucet_fund_amount = U512::from(400_000_000_000_000u64);\n\n            let session = ExecutableDeployItem::ModuleBytes {\n                module_bytes: std::fs::read(base_path.join(\"faucet_stored.wasm\"))\n                    .unwrap()\n                    .into(),\n                args: runtime_args! {\"id\" => 1u64, ARG_AMOUNT => faucet_fund_amount },\n            };\n\n            (payment, session)\n        }\n    };\n\n    let transaction_2 = Transaction::Deploy(Deploy::new_signed(\n        timestamp,\n        ttl,\n        gas_price,\n        vec![],\n        chain_name.clone(),\n        payment_2,\n        session_2,\n        &ALICE_SECRET_KEY,\n        Some(ALICE_PUBLIC_KEY.clone()),\n    ));\n\n    // Both deploys are of roughly equal length but should be sized differently based on\n    // their payment amount.\n\n    let txn_1 = transaction_1.hash();\n    let txn_2 = transaction_2.hash();\n\n    fixture.inject_transaction(transaction_1).await;\n    fixture.inject_transaction(transaction_2).await;\n\n    match sizing_scenario {\n        SizingScenario::Gas => {\n            fixture\n                .assert_execution_in_lane(&txn_1, 4u8, TEN_SECS)\n                .await;\n            fixture\n                .assert_execution_in_lane(&txn_2, 3u8, TEN_SECS)\n                .await;\n        }\n        SizingScenario::SerializedLength => {\n            fixture\n                .assert_execution_in_lane(&txn_1, 3u8, TEN_SECS)\n                .await;\n            fixture\n                .assert_execution_in_lane(&txn_2, largest_lane, TEN_SECS)\n                .await;\n        }\n    }\n}\n\n#[tokio::test]\nasync fn should_correctly_assign_wasm_deploys_in_lanes_for_payment_limited_by_gas_limit() {\n    run_sizing_scenario(SizingScenario::Gas).await\n}\n\n#[tokio::test]\nasync fn should_correctly_assign_wasm_deploys_in_lanes_for_payment_limited_by_serialized_length() {\n    run_sizing_scenario(SizingScenario::SerializedLength).await\n}\n\n#[tokio::test]\nasync fn should_assign_deploy_to_largest_lane_by_payment_amount_only_in_payment_limited() {\n    let mut rng = TestRng::new();\n    let alice_stake = 200_000_000_000_u64;\n    let bob_stake = 300_000_000_000_u64;\n    let charlie_stake = 300_000_000_000_u64;\n    let initial_stakes: Vec<(U512, U512)> = vec![\n        (U512::from(u64::MAX), alice_stake.into()),\n        (U512::from(u64::MAX), bob_stake.into()),\n        (U512::from(u64::MAX), charlie_stake.into()),\n    ];\n\n    let secret_keys: Vec<Arc<SecretKey>> = (0..3)\n        .map(|_| Arc::new(SecretKey::random(&mut rng)))\n        .collect();\n\n    let stakes = secret_keys\n        .iter()\n        .zip(initial_stakes)\n        .map(|(secret_key, stake)| (PublicKey::from(secret_key.as_ref()), stake))\n        .collect();\n\n    let mut fixture = TestFixture::new_with_keys(rng, secret_keys, stakes, None).await;\n\n    fixture\n        .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN)\n        .await;\n\n    fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await;\n\n    let base_path = RESOURCES_PATH\n        .parent()\n        .unwrap()\n        .join(\"target\")\n        .join(\"wasm32-unknown-unknown\")\n        .join(\"release\");\n\n    let mut wasm_lanes = fixture\n        .chainspec\n        .transaction_config\n        .transaction_v1_config\n        .wasm_lanes()\n        .clone();\n\n    wasm_lanes.sort_by(|a, b| {\n        a.max_transaction_gas_limit()\n            .cmp(&b.max_transaction_gas_limit())\n    });\n\n    let (smallest_lane_id, smallest_gas_limt, smallest_size_limit_for_deploy) = wasm_lanes\n        .first()\n        .map(|lane_def| {\n            (\n                lane_def.id(),\n                lane_def.max_transaction_gas_limit(),\n                lane_def.max_transaction_length(),\n            )\n        })\n        .expect(\"must have at least one lane\");\n\n    let payment = ExecutableDeployItem::ModuleBytes {\n        module_bytes: Bytes::new(),\n        args: runtime_args! {\n        \"amount\" =>  U512::from(smallest_gas_limt),\n                    },\n    };\n\n    let session = ExecutableDeployItem::ModuleBytes {\n        module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n            .unwrap()\n            .into(),\n        args: runtime_args! {},\n    };\n\n    let timestamp = Timestamp::now();\n    let ttl = TimeDiff::from_seconds(100);\n    let gas_price = 1;\n    let chain_name = fixture.chainspec.network_config.name.clone();\n\n    let transaction = Transaction::Deploy(Deploy::new_signed(\n        timestamp,\n        ttl,\n        gas_price,\n        vec![],\n        chain_name.clone(),\n        payment,\n        session,\n        &ALICE_SECRET_KEY,\n        Some(ALICE_PUBLIC_KEY.clone()),\n    ));\n\n    let small_txn_hash = transaction.hash();\n    let small_txn_size = transaction.serialized_length() as u64;\n    assert!(small_txn_size < smallest_size_limit_for_deploy);\n\n    fixture.inject_transaction(transaction).await;\n\n    fixture\n        .assert_execution_in_lane(&small_txn_hash, smallest_lane_id, TEN_SECS)\n        .await;\n\n    let (largest_lane_id, largest_gas_limt) = wasm_lanes\n        .last()\n        .map(|lane_def| (lane_def.id(), lane_def.max_transaction_gas_limit()))\n        .expect(\"must have at least one lane\");\n\n    assert_ne!(largest_lane_id, smallest_lane_id);\n    assert!(largest_gas_limt > smallest_gas_limt);\n\n    let payment = ExecutableDeployItem::ModuleBytes {\n        module_bytes: Bytes::new(),\n        args: runtime_args! {\n        \"amount\" =>  U512::from(largest_gas_limt),\n                    },\n    };\n\n    let session = ExecutableDeployItem::ModuleBytes {\n        module_bytes: std::fs::read(base_path.join(\"do_nothing.wasm\"))\n            .unwrap()\n            .into(),\n        args: runtime_args! {},\n    };\n\n    let chain_name = fixture.chainspec.network_config.name.clone();\n\n    let transaction = Transaction::Deploy(Deploy::new_signed(\n        timestamp,\n        ttl,\n        gas_price,\n        vec![],\n        chain_name.clone(),\n        payment,\n        session,\n        &ALICE_SECRET_KEY,\n        Some(ALICE_PUBLIC_KEY.clone()),\n    ));\n\n    let largest_txn_hash = transaction.hash();\n\n    let largest_txn_size = transaction.serialized_length() as u64;\n    // This is misnomer, its the size of the deploy meant to be in the\n    // largest lane.\n    assert!(largest_txn_size < smallest_size_limit_for_deploy);\n\n    fixture.inject_transaction(transaction).await;\n\n    fixture\n        .assert_execution_in_lane(&largest_txn_hash, largest_lane_id, TEN_SECS)\n        .await;\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/tests.rs",
    "content": "mod auction;\nmod binary_port;\nmod configs_override;\nmod consensus_rules;\nmod fixture;\nmod gas_price;\nmod initial_stakes;\nmod network_general;\nmod rewards;\nmod switch_blocks;\nmod transaction_scenario;\nmod transactions;\n\nuse std::{collections::BTreeSet, sync::Arc, time::Duration};\n\nuse num_rational::Ratio;\nuse tracing::info;\n\nuse casper_storage::{\n    data_access_layer::{\n        balance::{BalanceHandling, BalanceResult},\n        BalanceRequest, BidsRequest, TotalSupplyRequest, TotalSupplyResult,\n    },\n    global_state::state::StateProvider,\n};\nuse casper_types::{\n    execution::ExecutionResult, system::auction::BidKind, testing::TestRng, Chainspec, Deploy,\n    EraId, FeeHandling, Gas, HoldBalanceHandling, Key, PricingHandling, PricingMode, PublicKey,\n    RefundHandling, SecretKey, StoredValue, TimeDiff, Timestamp, Transaction, TransactionHash,\n    U512,\n};\n\nuse crate::{\n    components::consensus::{ClContext, ConsensusMessage, HighwayMessage, HighwayVertex},\n    effect::incoming::ConsensusMessageIncoming,\n    reactor::{\n        main_reactor::{MainEvent, MainReactor},\n        Runner,\n    },\n    testing::{self, filter_reactor::FilterReactor, ConditionCheckReactor},\n    types::{transaction::transaction_v1_builder::TransactionV1Builder, NodeId},\n    utils::RESOURCES_PATH,\n};\n\nconst ERA_ZERO: EraId = EraId::new(0);\nconst ERA_ONE: EraId = EraId::new(1);\nconst ERA_TWO: EraId = EraId::new(2);\nconst ERA_THREE: EraId = EraId::new(3);\nconst TEN_SECS: Duration = Duration::from_secs(10);\nconst THIRTY_SECS: Duration = Duration::from_secs(30);\nconst ONE_MIN: Duration = Duration::from_secs(60);\n\ntype Nodes = testing::network::Nodes<FilterReactor<MainReactor>>;\n\nimpl Runner<ConditionCheckReactor<FilterReactor<MainReactor>>> {\n    fn main_reactor(&self) -> &MainReactor {\n        self.reactor().inner().inner()\n    }\n}\n\n/// Given a block height and a node id, returns a predicate to check if the lowest available block\n/// for the specified node is at or below the specified height.\nfn node_has_lowest_available_block_at_or_below_height(\n    height: u64,\n    node_id: NodeId,\n) -> impl Fn(&Nodes) -> bool {\n    move |nodes: &Nodes| {\n        nodes.get(&node_id).is_none_or(|runner| {\n            let available_block_range = runner.main_reactor().storage().get_available_block_range();\n            if available_block_range.low() == 0 && available_block_range.high() == 0 {\n                false\n            } else {\n                available_block_range.low() <= height\n            }\n        })\n    }\n}\n\nfn is_ping(event: &MainEvent) -> bool {\n    if let MainEvent::ConsensusMessageIncoming(ConsensusMessageIncoming { message, .. }) = event {\n        if let ConsensusMessage::Protocol { ref payload, .. } = **message {\n            return matches!(\n                payload.deserialize_incoming::<HighwayMessage<ClContext>>(),\n                Ok(HighwayMessage::<ClContext>::NewVertex(HighwayVertex::Ping(\n                    _\n                )))\n            );\n        }\n    }\n    false\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/upgrade_shutdown.rs",
    "content": "use std::{collections::HashMap, time::Duration};\n\nuse datasize::DataSize;\nuse tracing::debug;\n\nuse casper_types::{BlockHash, FinalitySignatureId};\n\nuse crate::{\n    effect::{announcements::ControlAnnouncement, EffectBuilder, EffectExt, Effects},\n    reactor::main_reactor::{MainEvent, MainReactor},\n    types::EraValidatorWeights,\n};\n\nuse casper_types::EraId;\n\nconst DELAY_BEFORE_SHUTDOWN: Duration = Duration::from_secs(2);\n\n#[derive(Debug, DataSize)]\npub(super) struct SignatureGossipTracker {\n    era_id: EraId,\n    finished_gossiping: HashMap<BlockHash, Vec<FinalitySignatureId>>,\n}\n\nimpl SignatureGossipTracker {\n    pub(super) fn new() -> Self {\n        Self {\n            era_id: EraId::from(0),\n            finished_gossiping: Default::default(),\n        }\n    }\n\n    pub(super) fn register_signature(&mut self, signature_id: Box<FinalitySignatureId>) {\n        // ignore the signature if it's from an older era\n        if signature_id.era_id() < self.era_id {\n            return;\n        }\n        // if we registered a signature in a higher era, reset the cache\n        if signature_id.era_id() > self.era_id {\n            self.era_id = signature_id.era_id();\n            self.finished_gossiping = Default::default();\n        }\n        // record that the signature has finished gossiping\n        self.finished_gossiping\n            .entry(*signature_id.block_hash())\n            .or_default()\n            .push(*signature_id);\n    }\n\n    fn finished_gossiping_enough(&self, validator_weights: &EraValidatorWeights) -> bool {\n        if validator_weights.era_id() != self.era_id {\n            debug!(\n                relevant_era=%validator_weights.era_id(),\n                our_era_id=%self.era_id,\n                \"SignatureGossipTracker has no record of the relevant era!\"\n            );\n            return false;\n        }\n        self.finished_gossiping\n            .iter()\n            .all(|(block_hash, signatures)| {\n                let gossiped_weight_sufficient = validator_weights\n                    .signature_weight(signatures.iter().map(|sig_id| sig_id.public_key()))\n                    .is_sufficient(true);\n                debug!(\n                    %gossiped_weight_sufficient,\n                    %block_hash,\n                    \"SignatureGossipTracker: gossiped finality signatures check\"\n                );\n                gossiped_weight_sufficient\n            })\n    }\n}\n\npub(super) enum UpgradeShutdownInstruction {\n    Do(Duration, Effects<MainEvent>),\n    CheckLater(String, Duration),\n    Fatal(String),\n}\n\nimpl MainReactor {\n    pub(super) fn upgrade_shutdown_instruction(\n        &self,\n        effect_builder: EffectBuilder<MainEvent>,\n    ) -> UpgradeShutdownInstruction {\n        if self.switched_to_shutdown_for_upgrade.elapsed() > self.shutdown_for_upgrade_timeout {\n            return self.schedule_shutdown_for_upgrade(effect_builder);\n        }\n        let recent_switch_block_headers = match self.storage.read_highest_switch_block_headers(1) {\n            Ok(headers) => headers,\n            Err(error) => {\n                return UpgradeShutdownInstruction::Fatal(format!(\n                    \"error getting recent switch block headers: {}\",\n                    error\n                ))\n            }\n        };\n        if let Some(block_header) = recent_switch_block_headers.last() {\n            let highest_switch_block_era = block_header.era_id();\n            return match self\n                .validator_matrix\n                .validator_weights(highest_switch_block_era)\n            {\n                Some(validator_weights) => self\n                    .upgrade_shutdown_has_sufficient_finality(effect_builder, &validator_weights),\n                None => UpgradeShutdownInstruction::Fatal(\n                    \"validator_weights cannot be missing\".to_string(),\n                ),\n            };\n        }\n        UpgradeShutdownInstruction::Fatal(\"recent_switch_block_headers cannot be empty\".to_string())\n    }\n\n    fn upgrade_shutdown_has_sufficient_finality(\n        &self,\n        effect_builder: EffectBuilder<MainEvent>,\n        validator_weights: &EraValidatorWeights,\n    ) -> UpgradeShutdownInstruction {\n        let finished_gossiping_enough = self\n            .signature_gossip_tracker\n            .finished_gossiping_enough(validator_weights);\n        if finished_gossiping_enough {\n            self.schedule_shutdown_for_upgrade(effect_builder)\n        } else {\n            UpgradeShutdownInstruction::CheckLater(\n                \"waiting for completion of gossiping signatures\".to_string(),\n                DELAY_BEFORE_SHUTDOWN,\n            )\n        }\n    }\n\n    fn schedule_shutdown_for_upgrade(\n        &self,\n        effect_builder: EffectBuilder<MainEvent>,\n    ) -> UpgradeShutdownInstruction {\n        // Allow a delay to acquire more finality signatures\n        let effects = effect_builder\n            .set_timeout(DELAY_BEFORE_SHUTDOWN)\n            .event(|_| MainEvent::ControlAnnouncement(ControlAnnouncement::ShutdownForUpgrade));\n        // should not need to crank the control logic again as the reactor will shutdown\n        UpgradeShutdownInstruction::Do(DELAY_BEFORE_SHUTDOWN, effects)\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/upgrading_instruction.rs",
    "content": "use std::time::Duration;\n\nuse casper_types::{TimeDiff, Timestamp};\n\npub(super) enum UpgradingInstruction {\n    CheckLater(String, Duration),\n    CatchUp,\n}\n\nimpl UpgradingInstruction {\n    pub(super) fn should_commit_upgrade(\n        should_commit_upgrade: bool,\n        wait: Duration,\n        last_progress: Timestamp,\n        upgrade_timeout: TimeDiff,\n    ) -> UpgradingInstruction {\n        if should_commit_upgrade {\n            if last_progress.elapsed() > upgrade_timeout {\n                UpgradingInstruction::CatchUp\n            } else {\n                UpgradingInstruction::CheckLater(\"awaiting upgrade\".to_string(), wait)\n            }\n        } else {\n            UpgradingInstruction::CatchUp\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/utils.rs",
    "content": "use futures::FutureExt;\nuse smallvec::smallvec;\nuse tracing::info;\n\nuse crate::{\n    components::InitializedComponent,\n    effect::{EffectBuilder, EffectExt, Effects},\n    fatal,\n    reactor::main_reactor::MainEvent,\n};\n\npub(super) fn initialize_component(\n    effect_builder: EffectBuilder<MainEvent>,\n    component: &mut impl InitializedComponent<MainEvent>,\n    initiating_event: MainEvent,\n) -> Option<Effects<MainEvent>> {\n    if component.is_uninitialized() {\n        component.start_initialization();\n        info!(\"pending initialization of {}\", component.name());\n        return Some(smallvec![async { smallvec![initiating_event] }.boxed()]);\n    }\n    if component.is_fatal() {\n        return Some(fatal!(effect_builder, \"{} failed to initialize\", component.name()).ignore());\n    }\n    None\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor/validate.rs",
    "content": "use std::time::Duration;\nuse tracing::{debug, info, warn};\n\nuse crate::{\n    components::{\n        block_accumulator::{SyncIdentifier, SyncInstruction},\n        consensus::ChainspecConsensusExt,\n    },\n    effect::{EffectBuilder, Effects},\n    reactor::{\n        self,\n        main_reactor::{MainEvent, MainReactor},\n    },\n    storage::HighestOrphanedBlockResult,\n    types::MaxTtl,\n    NodeRng,\n};\n\n/// Cranking delay when encountered a non-switch block when checking the validator status.\nconst VALIDATION_STATUS_DELAY_FOR_NON_SWITCH_BLOCK: Duration = Duration::from_secs(2);\n\npub(super) enum ValidateInstruction {\n    Do(Duration, Effects<MainEvent>),\n    CheckLater(String, Duration),\n    CatchUp,\n    KeepUp,\n    ShutdownForUpgrade,\n    Fatal(String),\n}\n\nimpl MainReactor {\n    pub(super) fn validate_instruction(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> ValidateInstruction {\n        let last_progress = self.consensus.last_progress();\n        if last_progress > self.last_progress {\n            self.last_progress = last_progress;\n        }\n\n        let queue_depth = self.contract_runtime.queue_depth();\n        if queue_depth > 0 {\n            warn!(\"Validate: should_validate queue_depth {}\", queue_depth);\n            return ValidateInstruction::CheckLater(\n                \"allow time for contract runtime execution to occur\".to_string(),\n                self.control_logic_default_delay.into(),\n            );\n        }\n\n        match self.storage.get_highest_complete_block() {\n            Ok(Some(highest_complete_block)) => {\n                // If we're lagging behind the rest of the network, fall back out of Validate mode.\n                let sync_identifier = SyncIdentifier::LocalTip(\n                    *highest_complete_block.hash(),\n                    highest_complete_block.height(),\n                    highest_complete_block.era_id(),\n                );\n\n                if let SyncInstruction::Leap { .. } =\n                    self.block_accumulator.sync_instruction(sync_identifier)\n                {\n                    return ValidateInstruction::CatchUp;\n                }\n\n                if !highest_complete_block.is_switch_block() {\n                    return ValidateInstruction::CheckLater(\n                        \"tip is not a switch block, don't change from validate state\".to_string(),\n                        VALIDATION_STATUS_DELAY_FOR_NON_SWITCH_BLOCK,\n                    );\n                }\n            }\n            Ok(None) => {\n                return ValidateInstruction::CheckLater(\n                    \"no complete block found in storage\".to_string(),\n                    self.control_logic_default_delay.into(),\n                );\n            }\n            Err(error) => {\n                return ValidateInstruction::Fatal(format!(\n                    \"Could not read highest complete block from storage due to storage error: {}\",\n                    error\n                ));\n            }\n        }\n\n        if self.should_shutdown_for_upgrade() {\n            return ValidateInstruction::ShutdownForUpgrade;\n        }\n\n        match self.create_required_eras(effect_builder, rng) {\n            Ok(Some(effects)) => {\n                if effects.is_empty() {\n                    ValidateInstruction::CheckLater(\n                        \"consensus state is up to date\".to_string(),\n                        self.control_logic_default_delay.into(),\n                    )\n                } else {\n                    ValidateInstruction::Do(Duration::ZERO, effects)\n                }\n            }\n            Ok(None) => ValidateInstruction::KeepUp,\n            Err(msg) => ValidateInstruction::Fatal(msg),\n        }\n    }\n\n    pub(super) fn create_required_eras(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n    ) -> Result<Option<Effects<MainEvent>>, String> {\n        let recent_switch_block_headers = self\n            .storage\n            .read_highest_switch_block_headers(self.chainspec.number_of_past_switch_blocks_needed())\n            .map_err(|err| err.to_string())?;\n\n        let highest_switch_block_header = match recent_switch_block_headers.last() {\n            None => {\n                debug!(\n                    \"{}: create_required_eras: recent_switch_block_headers is empty\",\n                    self.state\n                );\n                return Ok(None);\n            }\n            Some(header) => header,\n        };\n        debug!(\n            era = highest_switch_block_header.era_id().value(),\n            block_hash = %highest_switch_block_header.block_hash(),\n            height = highest_switch_block_header.height(),\n            \"{}: highest_switch_block_header\", self.state\n        );\n\n        let highest_era_weights = match highest_switch_block_header.next_era_validator_weights() {\n            None => {\n                return Err(format!(\n                    \"{}: highest switch block has no era end: {}\",\n                    self.state, highest_switch_block_header,\n                ));\n            }\n            Some(weights) => weights,\n        };\n        if !highest_era_weights.contains_key(self.consensus.public_key()) {\n            debug!(\n                era = highest_switch_block_header.era_id().successor().value(),\n                \"{}: this is not a validating node in this era\", self.state\n            );\n            return Ok(None);\n        }\n\n        // If the node was validating in the previous era there is a possibility that it didn't get\n        // a chance to apply it's finality signature to the last (or some of the last)\n        // blocks of that era. If that's true - it it might try to do that and for that it\n        // needs to have the validator matrix updated with appropriate era data.\n        // We stop saturating the validator matrix before we get to latest era, because there might\n        // have been a chainspec override of the validators during activation.\n        let number_of_switch_blocks = recent_switch_block_headers.len();\n        for i in 0..(number_of_switch_blocks - 1) {\n            if let Some(block) = recent_switch_block_headers.get(i) {\n                if let Some(validator_weights) = block.next_era_validator_weights() {\n                    self.validator_matrix.register_validator_weights(\n                        block.era_id().successor(),\n                        validator_weights.clone(),\n                    );\n                }\n            }\n        }\n\n        if let HighestOrphanedBlockResult::Orphan(highest_orphaned_block_header) =\n            self.storage.get_highest_orphaned_block_header()\n        {\n            let max_ttl: MaxTtl = self.chainspec.transaction_config.max_ttl.into();\n            if max_ttl.synced_to_ttl(\n                highest_switch_block_header.timestamp(),\n                &highest_orphaned_block_header,\n            ) {\n                debug!(%self.state,\"{}: sufficient TTL awareness to safely participate in consensus\", self.state);\n            } else {\n                info!(\n                    \"{}: insufficient TTL awareness to safely participate in consensus\",\n                    self.state\n                );\n                return Ok(None);\n            }\n        } else {\n            return Err(\"get_highest_orphaned_block_header failed to produce record\".to_string());\n        }\n\n        let era_id = highest_switch_block_header.era_id();\n        if self.upgrade_watcher.should_upgrade_after(era_id) {\n            info!(\n                \"{}: upgrade required after era {}\",\n                self.state,\n                era_id.value()\n            );\n            return Ok(None);\n        }\n\n        let create_required_eras =\n            self.consensus\n                .create_required_eras(effect_builder, rng, &recent_switch_block_headers);\n        match &create_required_eras {\n            Some(effects) => {\n                if effects.is_empty() {\n                    info!(state = %self.state,\"create_required_eras is empty\");\n                } else {\n                    info!(state = %self.state,\"will attempt to create required eras for consensus\");\n                }\n            }\n            None => {\n                info!(state = %self.state,\"create_required_eras is none\");\n            }\n        }\n        Ok(\n            create_required_eras\n                .map(|effects| reactor::wrap_effects(MainEvent::Consensus, effects)),\n        )\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/main_reactor.rs",
    "content": "//! Main reactor for nodes.\n\nmod config;\nmod control;\nmod error;\nmod event;\nmod fetchers;\nmod memory_metrics;\nmod utils;\n\nmod catch_up;\nmod genesis_instruction;\nmod keep_up;\nmod reactor_state;\n#[cfg(test)]\nmod tests;\nmod upgrade_shutdown;\nmod upgrading_instruction;\nmod validate;\n\nuse std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::Instant};\n\nuse datasize::DataSize;\nuse memory_metrics::MemoryMetrics;\nuse prometheus::Registry;\nuse tracing::{debug, error, info, warn};\n\nuse casper_binary_port::{LastProgress, NetworkName, Uptime};\nuse casper_types::{\n    bytesrepr, Block, BlockHash, BlockV2, Chainspec, ChainspecRawBytes, EraId, FinalitySignature,\n    FinalitySignatureV2, PublicKey, TimeDiff, Timestamp, Transaction, U512,\n};\n\n#[cfg(test)]\nuse crate::testing::network::NetworkedReactor;\nuse crate::{\n    components::{\n        binary_port::{BinaryPort, BinaryPortInitializationError, Metrics as BinaryPortMetrics},\n        block_accumulator::{self, BlockAccumulator},\n        block_synchronizer::{self, BlockSynchronizer},\n        block_validator::{self, BlockValidator},\n        consensus::{self, EraSupervisor},\n        contract_runtime::ContractRuntime,\n        diagnostics_port::DiagnosticsPort,\n        event_stream_server::{self, EventStreamServer},\n        gossiper::{self, GossipItem, Gossiper},\n        metrics::Metrics,\n        network::{self, GossipedAddress, Identity as NetworkIdentity, Network},\n        rest_server::RestServer,\n        shutdown_trigger::{self, CompletedBlockInfo, ShutdownTrigger},\n        storage::Storage,\n        sync_leaper::SyncLeaper,\n        transaction_acceptor::{self, TransactionAcceptor},\n        transaction_buffer,\n        transaction_buffer::TransactionBuffer,\n        upgrade_watcher::{self, UpgradeWatcher},\n        Component, ValidatorBoundComponent,\n    },\n    effect::{\n        announcements::{\n            BlockAccumulatorAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement,\n            ControlAnnouncement, FetchedNewBlockAnnouncement,\n            FetchedNewFinalitySignatureAnnouncement, GossiperAnnouncement, MetaBlockAnnouncement,\n            PeerBehaviorAnnouncement, TransactionAcceptorAnnouncement,\n            TransactionBufferAnnouncement, UnexecutedBlockAnnouncement, UpgradeWatcherAnnouncement,\n        },\n        incoming::{NetResponseIncoming, TrieResponseIncoming},\n        requests::{\n            AcceptTransactionRequest, ChainspecRawBytesRequest, ContractRuntimeRequest,\n            ReactorInfoRequest,\n        },\n        EffectBuilder, EffectExt, Effects, GossipTarget,\n    },\n    failpoints::FailpointActivation,\n    fatal,\n    protocol::Message,\n    reactor::{\n        self,\n        event_queue_metrics::EventQueueMetrics,\n        main_reactor::{fetchers::Fetchers, upgrade_shutdown::SignatureGossipTracker},\n        EventQueueHandle, QueueKind,\n    },\n    types::{\n        ForwardMetaBlock, MetaBlock, MetaBlockState, SyncHandling, TrieOrChunk, ValidatorMatrix,\n    },\n    utils::{Source, WithDir},\n    NodeRng,\n};\npub use config::Config;\npub(crate) use error::Error;\npub(crate) use event::MainEvent;\npub(crate) use reactor_state::ReactorState;\n\n/// Main node reactor.\n///\n/// This following diagram represents how the components involved in the **sync process** interact\n/// with each other.\n#[cfg_attr(doc, aquamarine::aquamarine)]\n/// ```mermaid\n/// flowchart TD\n///     G((Network))\n///     E((BlockAccumulator))\n///     H[(Storage)]\n///     I((SyncLeaper))\n///     A((\"Reactor<br/>(control logic)\"))\n///     B((ContractRuntime))\n///     C((BlockSynchronizer))\n///     D((Consensus))\n///     K((Gossiper))\n///     J((Fetcher))\n///     F((TransactionBuffer))\n///\n///     I -->|\"❌<br/>Never get<br/>SyncLeap<br/>from storage\"| H\n///     linkStyle 0 fill:none,stroke:red,color:red\n///\n///     A -->|\"Execute block<br/>(genesis or upgrade)\"| B\n///\n///     G -->|Peers| C\n///     G -->|Peers| D\n///\n///     C -->|Block data| E\n///\n///     J -->|Block data| C\n///\n///     D -->|Execute block| B\n///\n///     A -->|SyncLeap| I\n///\n///     B -->|Put block| H\n///     C -->|Mark block complete| H\n///     E -->|Mark block complete| H\n///     C -->|Execute block| B\n///\n///     C -->|Complete block<br/>with Transactions| F\n///\n///     K -->|Transaction| F\n///     K -->|Block data| E\n/// ```\n#[derive(DataSize, Debug)]\npub(crate) struct MainReactor {\n    // components\n    //   i/o bound components\n    storage: Storage,\n    contract_runtime: ContractRuntime,\n    upgrade_watcher: UpgradeWatcher,\n    rest_server: RestServer,\n    binary_port: BinaryPort,\n    event_stream_server: EventStreamServer,\n    diagnostics_port: DiagnosticsPort,\n    shutdown_trigger: ShutdownTrigger,\n    net: Network<MainEvent, Message>,\n    consensus: EraSupervisor,\n\n    // block handling\n    block_validator: BlockValidator,\n    block_accumulator: BlockAccumulator,\n    block_synchronizer: BlockSynchronizer,\n\n    // transaction handling\n    transaction_acceptor: TransactionAcceptor,\n    transaction_buffer: TransactionBuffer,\n\n    // gossiping components\n    address_gossiper: Gossiper<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, GossipedAddress>,\n    transaction_gossiper: Gossiper<{ Transaction::ID_IS_COMPLETE_ITEM }, Transaction>,\n    block_gossiper: Gossiper<{ BlockV2::ID_IS_COMPLETE_ITEM }, BlockV2>,\n    finality_signature_gossiper:\n        Gossiper<{ FinalitySignatureV2::ID_IS_COMPLETE_ITEM }, FinalitySignatureV2>,\n\n    // record retrieval\n    sync_leaper: SyncLeaper,\n    fetchers: Fetchers, // <-- this contains all fetchers to reduce top-level clutter\n\n    // Non-components.\n    //   metrics\n    metrics: Metrics,\n    #[data_size(skip)] // Never allocates heap data.\n    memory_metrics: MemoryMetrics,\n    #[data_size(skip)]\n    event_queue_metrics: EventQueueMetrics,\n\n    //   ambient settings / data / load-bearing config\n    validator_matrix: ValidatorMatrix,\n    trusted_hash: Option<BlockHash>,\n    chainspec: Arc<Chainspec>,\n    chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n\n    //   control logic\n    state: ReactorState,\n    max_attempts: usize,\n\n    last_progress: Timestamp,\n    attempts: usize,\n    idle_tolerance: TimeDiff,\n    control_logic_default_delay: TimeDiff,\n    shutdown_for_upgrade_timeout: TimeDiff,\n    switched_to_shutdown_for_upgrade: Timestamp,\n    upgrade_timeout: TimeDiff,\n    sync_handling: SyncHandling,\n    signature_gossip_tracker: SignatureGossipTracker,\n    /// The instant at which the node has started.\n    node_startup_instant: Instant,\n\n    finality_signature_creation: bool,\n    prevent_validator_shutdown: bool,\n}\n\nimpl reactor::Reactor for MainReactor {\n    type Event = MainEvent;\n    type Config = WithDir<Config>;\n    type Error = Error;\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        event: MainEvent,\n    ) -> Effects<MainEvent> {\n        match event {\n            MainEvent::ControlAnnouncement(ctrl_ann) => {\n                error!(\"unhandled control announcement: {}\", ctrl_ann);\n                Effects::new()\n            }\n            MainEvent::SetNodeStopRequest(req) => reactor::wrap_effects(\n                MainEvent::ShutdownTrigger,\n                self.shutdown_trigger\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n\n            MainEvent::FatalAnnouncement(fatal_ann) => {\n                if self.consensus.is_active_validator() && self.prevent_validator_shutdown {\n                    warn!(%fatal_ann, \"consensus is active, not shutting down\");\n                    Effects::new()\n                } else {\n                    let ctrl_ann =\n                        MainEvent::ControlAnnouncement(ControlAnnouncement::FatalError {\n                            file: fatal_ann.file,\n                            line: fatal_ann.line,\n                            msg: fatal_ann.msg,\n                        });\n                    effect_builder\n                        .into_inner()\n                        .schedule(ctrl_ann, QueueKind::Control)\n                        .ignore()\n                }\n            }\n\n            // PRIMARY REACTOR STATE CONTROL LOGIC\n            MainEvent::ReactorCrank => self.crank(effect_builder, rng),\n\n            MainEvent::MainReactorRequest(req) => match req {\n                ReactorInfoRequest::ReactorState { responder } => {\n                    responder.respond(self.state).ignore()\n                }\n                ReactorInfoRequest::LastProgress { responder } => responder\n                    .respond(LastProgress::new(self.last_progress))\n                    .ignore(),\n                ReactorInfoRequest::Uptime { responder } => responder\n                    .respond(Uptime::new(self.node_startup_instant.elapsed().as_secs()))\n                    .ignore(),\n                ReactorInfoRequest::NetworkName { responder } => responder\n                    .respond(NetworkName::new(self.chainspec.network_config.name.clone()))\n                    .ignore(),\n                ReactorInfoRequest::BalanceHoldsInterval { responder } => responder\n                    .respond(self.chainspec.core_config.gas_hold_interval)\n                    .ignore(),\n            },\n            MainEvent::MetaBlockAnnouncement(MetaBlockAnnouncement(meta_block)) => self\n                .handle_meta_block(\n                    effect_builder,\n                    rng,\n                    self.finality_signature_creation,\n                    meta_block,\n                ),\n            MainEvent::UnexecutedBlockAnnouncement(UnexecutedBlockAnnouncement(block_height)) => {\n                let only_from_available_block_range = true;\n                if let Ok(Some(block_header)) = self\n                    .storage\n                    .read_block_header_by_height(block_height, only_from_available_block_range)\n                {\n                    let block_hash = block_header.block_hash();\n                    reactor::wrap_effects(\n                        MainEvent::Consensus,\n                        self.consensus.handle_event(\n                            effect_builder,\n                            rng,\n                            consensus::Event::BlockAdded {\n                                header: Box::new(block_header),\n                                header_hash: block_hash,\n                            },\n                        ),\n                    )\n                } else {\n                    // Warn logging here because this codepath of handling an\n                    // `UnexecutedBlockAnnouncement` is coming from the\n                    // contract runtime when a block with a lower height than\n                    // the next expected executable height is enqueued. This\n                    // happens after restarts when consensus is creating the\n                    // required eras and attempts to retrace its steps in the\n                    // era by enqueuing all finalized blocks starting from the\n                    // first one in that era, blocks which should have already\n                    // been executed and marked complete in storage.\n                    warn!(\n                        block_height,\n                        \"Finalized block enqueued for execution, but a complete \\\n                        block header with the same height is not present in storage.\"\n                    );\n                    Effects::new()\n                }\n            }\n\n            // LOCAL I/O BOUND COMPONENTS\n            MainEvent::UpgradeWatcher(event) => reactor::wrap_effects(\n                MainEvent::UpgradeWatcher,\n                self.upgrade_watcher\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::UpgradeWatcherRequest(req) => reactor::wrap_effects(\n                MainEvent::UpgradeWatcher,\n                self.upgrade_watcher\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::UpgradeWatcherAnnouncement(UpgradeWatcherAnnouncement(\n                maybe_next_upgrade,\n            )) => {\n                // register activation point of upgrade w/ block accumulator\n                self.block_accumulator.register_activation_point(\n                    maybe_next_upgrade\n                        .as_ref()\n                        .map(|next_upgrade| next_upgrade.activation_point()),\n                );\n                reactor::wrap_effects(\n                    MainEvent::UpgradeWatcher,\n                    self.upgrade_watcher.handle_event(\n                        effect_builder,\n                        rng,\n                        upgrade_watcher::Event::GotNextUpgrade(maybe_next_upgrade),\n                    ),\n                )\n            }\n            MainEvent::RestServer(event) => reactor::wrap_effects(\n                MainEvent::RestServer,\n                self.rest_server.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::MetricsRequest(req) => reactor::wrap_effects(\n                MainEvent::MetricsRequest,\n                self.metrics.handle_event(effect_builder, rng, req),\n            ),\n            MainEvent::ChainspecRawBytesRequest(\n                ChainspecRawBytesRequest::GetChainspecRawBytes(responder),\n            ) => responder.respond(self.chainspec_raw_bytes.clone()).ignore(),\n            MainEvent::EventStreamServer(event) => reactor::wrap_effects(\n                MainEvent::EventStreamServer,\n                self.event_stream_server\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::ShutdownTrigger(event) => reactor::wrap_effects(\n                MainEvent::ShutdownTrigger,\n                self.shutdown_trigger\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::DiagnosticsPort(event) => reactor::wrap_effects(\n                MainEvent::DiagnosticsPort,\n                self.diagnostics_port\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::DumpConsensusStateRequest(req) => reactor::wrap_effects(\n                MainEvent::Consensus,\n                self.consensus.handle_event(effect_builder, rng, req.into()),\n            ),\n\n            // NETWORK CONNECTION AND ORIENTATION\n            MainEvent::Network(event) => reactor::wrap_effects(\n                MainEvent::Network,\n                self.net.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::NetworkRequest(req) => {\n                let event = MainEvent::Network(network::Event::from(req));\n                self.dispatch_event(effect_builder, rng, event)\n            }\n            MainEvent::NetworkInfoRequest(req) => {\n                let event = MainEvent::Network(network::Event::from(req));\n                self.dispatch_event(effect_builder, rng, event)\n            }\n            MainEvent::NetworkPeerBehaviorAnnouncement(ann) => {\n                let mut effects = Effects::new();\n                match &ann {\n                    PeerBehaviorAnnouncement::OffenseCommitted {\n                        offender,\n                        justification: _,\n                    } => {\n                        let event = MainEvent::BlockSynchronizer(\n                            block_synchronizer::Event::DisconnectFromPeer(**offender),\n                        );\n                        effects.extend(self.dispatch_event(effect_builder, rng, event));\n                    }\n                }\n                effects.extend(self.dispatch_event(\n                    effect_builder,\n                    rng,\n                    MainEvent::Network(ann.into()),\n                ));\n                effects\n            }\n            MainEvent::NetworkPeerRequestingData(incoming) => reactor::wrap_effects(\n                MainEvent::Storage,\n                self.storage\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            MainEvent::NetworkPeerProvidingData(NetResponseIncoming { sender, message }) => {\n                reactor::handle_get_response(self, effect_builder, rng, sender, message)\n            }\n            MainEvent::AddressGossiper(event) => reactor::wrap_effects(\n                MainEvent::AddressGossiper,\n                self.address_gossiper\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::AddressGossiperIncoming(incoming) => reactor::wrap_effects(\n                MainEvent::AddressGossiper,\n                self.address_gossiper\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            MainEvent::AddressGossiperCrank(req) => reactor::wrap_effects(\n                MainEvent::AddressGossiper,\n                self.address_gossiper\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::AddressGossiperAnnouncement(gossiper_ann) => match gossiper_ann {\n                GossiperAnnouncement::GossipReceived { .. }\n                | GossiperAnnouncement::NewItemBody { .. }\n                | GossiperAnnouncement::FinishedGossiping(_) => Effects::new(),\n                GossiperAnnouncement::NewCompleteItem(gossiped_address) => {\n                    let reactor_event =\n                        MainEvent::Network(network::Event::PeerAddressReceived(gossiped_address));\n                    self.dispatch_event(effect_builder, rng, reactor_event)\n                }\n            },\n            MainEvent::SyncLeaper(event) => reactor::wrap_effects(\n                MainEvent::SyncLeaper,\n                self.sync_leaper.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::Consensus(event) => reactor::wrap_effects(\n                MainEvent::Consensus,\n                self.consensus.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::ConsensusMessageIncoming(incoming) => reactor::wrap_effects(\n                MainEvent::Consensus,\n                self.consensus\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            MainEvent::ConsensusDemand(demand) => reactor::wrap_effects(\n                MainEvent::Consensus,\n                self.consensus\n                    .handle_event(effect_builder, rng, demand.into()),\n            ),\n            MainEvent::ConsensusAnnouncement(consensus_announcement) => {\n                match consensus_announcement {\n                    ConsensusAnnouncement::Proposed(block) => {\n                        let reactor_event = MainEvent::TransactionBuffer(\n                            transaction_buffer::Event::BlockProposed(block),\n                        );\n                        self.dispatch_event(effect_builder, rng, reactor_event)\n                    }\n                    ConsensusAnnouncement::Finalized(block) => {\n                        let reactor_event = MainEvent::TransactionBuffer(\n                            transaction_buffer::Event::BlockFinalized(block),\n                        );\n                        self.dispatch_event(effect_builder, rng, reactor_event)\n                    }\n                    ConsensusAnnouncement::Fault {\n                        era_id,\n                        public_key,\n                        timestamp,\n                    } => {\n                        let reactor_event =\n                            MainEvent::EventStreamServer(event_stream_server::Event::Fault {\n                                era_id,\n                                public_key,\n                                timestamp,\n                            });\n                        self.dispatch_event(effect_builder, rng, reactor_event)\n                    }\n                }\n            }\n\n            // BLOCKS\n            MainEvent::BlockValidator(event) => reactor::wrap_effects(\n                MainEvent::BlockValidator,\n                self.block_validator\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::BlockValidatorRequest(req) => self.dispatch_event(\n                effect_builder,\n                rng,\n                MainEvent::BlockValidator(block_validator::Event::from(req)),\n            ),\n            MainEvent::BlockAccumulator(event) => reactor::wrap_effects(\n                MainEvent::BlockAccumulator,\n                self.block_accumulator\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::BlockAccumulatorRequest(request) => reactor::wrap_effects(\n                MainEvent::BlockAccumulator,\n                self.block_accumulator\n                    .handle_event(effect_builder, rng, request.into()),\n            ),\n            MainEvent::BlockSynchronizer(event) => reactor::wrap_effects(\n                MainEvent::BlockSynchronizer,\n                self.block_synchronizer\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::BlockSynchronizerRequest(req) => reactor::wrap_effects(\n                MainEvent::BlockSynchronizer,\n                self.block_synchronizer\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::BlockAccumulatorAnnouncement(\n                BlockAccumulatorAnnouncement::AcceptedNewFinalitySignature { finality_signature },\n            ) => {\n                debug!(\n                    \"notifying finality signature gossiper to start gossiping for: {} , {}\",\n                    finality_signature.block_hash(),\n                    finality_signature.public_key(),\n                );\n                let mut effects = reactor::wrap_effects(\n                    MainEvent::FinalitySignatureGossiper,\n                    self.finality_signature_gossiper.handle_event(\n                        effect_builder,\n                        rng,\n                        gossiper::Event::ItemReceived {\n                            item_id: finality_signature.gossip_id(),\n                            source: Source::Ourself,\n                            target: finality_signature.gossip_target(),\n                        },\n                    ),\n                );\n\n                effects.extend(reactor::wrap_effects(\n                    MainEvent::EventStreamServer,\n                    self.event_stream_server.handle_event(\n                        effect_builder,\n                        rng,\n                        event_stream_server::Event::FinalitySignature(Box::new(\n                            (*finality_signature).into(),\n                        )),\n                    ),\n                ));\n\n                effects\n            }\n            MainEvent::BlockGossiper(event) => reactor::wrap_effects(\n                MainEvent::BlockGossiper,\n                self.block_gossiper.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::BlockGossiperIncoming(incoming) => reactor::wrap_effects(\n                MainEvent::BlockGossiper,\n                self.block_gossiper\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::GossipReceived {\n                item_id: gossiped_block_id,\n                sender,\n            }) => reactor::wrap_effects(\n                MainEvent::BlockAccumulator,\n                self.block_accumulator.handle_event(\n                    effect_builder,\n                    rng,\n                    block_accumulator::Event::RegisterPeer {\n                        block_hash: gossiped_block_id,\n                        era_id: None,\n                        sender,\n                    },\n                ),\n            ),\n            MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::NewCompleteItem(\n                gossiped_block_id,\n            )) => {\n                error!(%gossiped_block_id, \"gossiper should not announce new block\");\n                Effects::new()\n            }\n            MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::NewItemBody {\n                item,\n                sender,\n            }) => reactor::wrap_effects(\n                MainEvent::BlockAccumulator,\n                self.block_accumulator.handle_event(\n                    effect_builder,\n                    rng,\n                    block_accumulator::Event::ReceivedBlock {\n                        block: Arc::new(*item),\n                        sender,\n                    },\n                ),\n            ),\n            MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::FinishedGossiping(\n                _gossiped_block_id,\n            )) => Effects::new(),\n            MainEvent::BlockFetcherAnnouncement(FetchedNewBlockAnnouncement { block, peer }) => {\n                // The block accumulator shouldn't concern itself with historical blocks that are\n                // being fetched. If the block is not convertible to the current version it means\n                // that it is surely a historical block.\n                if let Ok(block) = (*block).clone().try_into() {\n                    reactor::wrap_effects(\n                        MainEvent::BlockAccumulator,\n                        self.block_accumulator.handle_event(\n                            effect_builder,\n                            rng,\n                            block_accumulator::Event::ReceivedBlock {\n                                block: Arc::new(block),\n                                sender: peer,\n                            },\n                        ),\n                    )\n                } else {\n                    Effects::new()\n                }\n            }\n\n            MainEvent::FinalitySignatureIncoming(incoming) => {\n                // Finality signature received via broadcast.\n                let sender = incoming.sender;\n                let finality_signature = incoming.message;\n                debug!(\n                    \"FinalitySignatureIncoming({},{},{},{})\",\n                    finality_signature.era_id(),\n                    finality_signature.block_hash(),\n                    finality_signature.public_key(),\n                    sender\n                );\n                let block_accumulator_event = block_accumulator::Event::ReceivedFinalitySignature {\n                    finality_signature,\n                    sender,\n                };\n                reactor::wrap_effects(\n                    MainEvent::BlockAccumulator,\n                    self.block_accumulator.handle_event(\n                        effect_builder,\n                        rng,\n                        block_accumulator_event,\n                    ),\n                )\n            }\n            MainEvent::FinalitySignatureGossiper(event) => reactor::wrap_effects(\n                MainEvent::FinalitySignatureGossiper,\n                self.finality_signature_gossiper\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::FinalitySignatureGossiperIncoming(incoming) => reactor::wrap_effects(\n                MainEvent::FinalitySignatureGossiper,\n                self.finality_signature_gossiper\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            MainEvent::FinalitySignatureGossiperAnnouncement(\n                GossiperAnnouncement::GossipReceived {\n                    item_id: gossiped_finality_signature_id,\n                    sender,\n                },\n            ) => reactor::wrap_effects(\n                MainEvent::BlockAccumulator,\n                self.block_accumulator.handle_event(\n                    effect_builder,\n                    rng,\n                    block_accumulator::Event::RegisterPeer {\n                        block_hash: *gossiped_finality_signature_id.block_hash(),\n                        era_id: Some(gossiped_finality_signature_id.era_id()),\n                        sender,\n                    },\n                ),\n            ),\n            MainEvent::FinalitySignatureGossiperAnnouncement(\n                GossiperAnnouncement::NewCompleteItem(gossiped_finality_signature_id),\n            ) => {\n                error!(%gossiped_finality_signature_id, \"gossiper should not announce new finality signature\");\n                Effects::new()\n            }\n            MainEvent::FinalitySignatureGossiperAnnouncement(\n                GossiperAnnouncement::NewItemBody { item, sender },\n            ) => reactor::wrap_effects(\n                MainEvent::BlockAccumulator,\n                self.block_accumulator.handle_event(\n                    effect_builder,\n                    rng,\n                    block_accumulator::Event::ReceivedFinalitySignature {\n                        finality_signature: item,\n                        sender,\n                    },\n                ),\n            ),\n            MainEvent::FinalitySignatureGossiperAnnouncement(\n                GossiperAnnouncement::FinishedGossiping(gossiped_finality_signature_id),\n            ) => {\n                self.signature_gossip_tracker\n                    .register_signature(gossiped_finality_signature_id);\n                Effects::new()\n            }\n            MainEvent::FinalitySignatureFetcherAnnouncement(\n                FetchedNewFinalitySignatureAnnouncement {\n                    finality_signature,\n                    peer,\n                },\n            ) => {\n                // If the signature is not convertible to the current version it means\n                // that it is historical.\n                if let FinalitySignature::V2(sig) = *finality_signature {\n                    reactor::wrap_effects(\n                        MainEvent::BlockAccumulator,\n                        self.block_accumulator.handle_event(\n                            effect_builder,\n                            rng,\n                            block_accumulator::Event::ReceivedFinalitySignature {\n                                finality_signature: Box::new(sig),\n                                sender: peer,\n                            },\n                        ),\n                    )\n                } else {\n                    Effects::new()\n                }\n            }\n\n            // TRANSACTIONS\n            MainEvent::TransactionAcceptor(event) => reactor::wrap_effects(\n                MainEvent::TransactionAcceptor,\n                self.transaction_acceptor\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::AcceptTransactionRequest(AcceptTransactionRequest {\n                transaction,\n                is_speculative,\n                responder,\n            }) => {\n                let source = if is_speculative {\n                    Source::SpeculativeExec\n                } else {\n                    Source::Client\n                };\n                let event = transaction_acceptor::Event::Accept {\n                    transaction,\n                    source,\n                    maybe_responder: Some(responder),\n                };\n                reactor::wrap_effects(\n                    MainEvent::TransactionAcceptor,\n                    self.transaction_acceptor\n                        .handle_event(effect_builder, rng, event),\n                )\n            }\n            MainEvent::TransactionAcceptorAnnouncement(\n                TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                    transaction,\n                    source,\n                },\n            ) => {\n                let mut effects = Effects::new();\n\n                match source {\n                    Source::Ourself => (), // internal activity does not require further action\n                    Source::Peer(_) => {\n                        // this is a response to a transaction fetch request, dispatch to fetcher\n                        effects.extend(self.fetchers.dispatch_fetcher_event(\n                            effect_builder,\n                            rng,\n                            MainEvent::TransactionAcceptorAnnouncement(\n                                TransactionAcceptorAnnouncement::AcceptedNewTransaction {\n                                    transaction,\n                                    source,\n                                },\n                            ),\n                        ));\n                    }\n                    Source::Client | Source::PeerGossiped(_) => {\n                        // we must attempt to gossip onwards\n                        effects.extend(self.dispatch_event(\n                            effect_builder,\n                            rng,\n                            MainEvent::TransactionGossiper(gossiper::Event::ItemReceived {\n                                item_id: transaction.gossip_id(),\n                                source,\n                                target: transaction.gossip_target(),\n                            }),\n                        ));\n                        // notify event stream\n                        effects.extend(self.dispatch_event(\n                            effect_builder,\n                            rng,\n                            MainEvent::EventStreamServer(\n                                event_stream_server::Event::TransactionAccepted(Arc::clone(\n                                    &transaction,\n                                )),\n                            ),\n                        ));\n                    }\n                    Source::SpeculativeExec => {\n                        error!(\n                            %transaction,\n                            \"transaction acceptor should not announce speculative exec transactions\"\n                        );\n                    }\n                }\n\n                effects\n            }\n            MainEvent::TransactionAcceptorAnnouncement(\n                TransactionAcceptorAnnouncement::InvalidTransaction {\n                    transaction: _,\n                    source: _,\n                },\n            ) => Effects::new(),\n            MainEvent::TransactionGossiper(event) => reactor::wrap_effects(\n                MainEvent::TransactionGossiper,\n                self.transaction_gossiper\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::TransactionGossiperIncoming(incoming) => reactor::wrap_effects(\n                MainEvent::TransactionGossiper,\n                self.transaction_gossiper\n                    .handle_event(effect_builder, rng, incoming.into()),\n            ),\n            MainEvent::TransactionGossiperAnnouncement(GossiperAnnouncement::GossipReceived {\n                ..\n            }) => {\n                // Ignore the announcement.\n                Effects::new()\n            }\n            MainEvent::TransactionGossiperAnnouncement(GossiperAnnouncement::NewCompleteItem(\n                gossiped_transaction_id,\n            )) => {\n                error!(%gossiped_transaction_id, \"gossiper should not announce new transaction\");\n                Effects::new()\n            }\n            MainEvent::TransactionGossiperAnnouncement(GossiperAnnouncement::NewItemBody {\n                item,\n                sender,\n            }) => reactor::wrap_effects(\n                MainEvent::TransactionAcceptor,\n                self.transaction_acceptor.handle_event(\n                    effect_builder,\n                    rng,\n                    transaction_acceptor::Event::Accept {\n                        transaction: *item,\n                        source: Source::PeerGossiped(sender),\n                        maybe_responder: None,\n                    },\n                ),\n            ),\n            MainEvent::TransactionGossiperAnnouncement(\n                GossiperAnnouncement::FinishedGossiping(gossiped_txn_id),\n            ) => {\n                let reactor_event = MainEvent::TransactionBuffer(\n                    transaction_buffer::Event::ReceiveTransactionGossiped(gossiped_txn_id),\n                );\n                self.dispatch_event(effect_builder, rng, reactor_event)\n            }\n            MainEvent::TransactionBuffer(event) => reactor::wrap_effects(\n                MainEvent::TransactionBuffer,\n                self.transaction_buffer\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::TransactionBufferRequest(req) => self.dispatch_event(\n                effect_builder,\n                rng,\n                MainEvent::TransactionBuffer(req.into()),\n            ),\n            MainEvent::TransactionBufferAnnouncement(\n                TransactionBufferAnnouncement::TransactionsExpired(hashes),\n            ) => {\n                let reactor_event = MainEvent::EventStreamServer(\n                    event_stream_server::Event::TransactionsExpired(hashes),\n                );\n                self.dispatch_event(effect_builder, rng, reactor_event)\n            }\n\n            // CONTRACT RUNTIME & GLOBAL STATE\n            MainEvent::ContractRuntime(event) => reactor::wrap_effects(\n                MainEvent::ContractRuntime,\n                self.contract_runtime\n                    .handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::ContractRuntimeRequest(req) => reactor::wrap_effects(\n                MainEvent::ContractRuntime,\n                self.contract_runtime\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::ContractRuntimeAnnouncement(\n                ContractRuntimeAnnouncement::CommitStepSuccess { era_id, effects },\n            ) => {\n                let reactor_event =\n                    MainEvent::EventStreamServer(event_stream_server::Event::Step {\n                        era_id,\n                        execution_effects: effects,\n                    });\n                self.dispatch_event(effect_builder, rng, reactor_event)\n            }\n            MainEvent::ContractRuntimeAnnouncement(\n                ContractRuntimeAnnouncement::UpcomingEraValidators {\n                    era_that_is_ending,\n                    upcoming_era_validators,\n                },\n            ) => {\n                info!(\n                    \"UpcomingEraValidators era_that_is_ending: {}\",\n                    era_that_is_ending\n                );\n                self.validator_matrix.register_eras(upcoming_era_validators);\n                Effects::new()\n            }\n            MainEvent::ContractRuntimeAnnouncement(\n                ContractRuntimeAnnouncement::NextEraGasPrice {\n                    era_id,\n                    next_era_gas_price,\n                },\n            ) => {\n                info!(\n                    \"New era gas price {} for era {}\",\n                    next_era_gas_price, era_id\n                );\n                let event = MainEvent::ContractRuntimeRequest(\n                    ContractRuntimeRequest::UpdateRuntimePrice(era_id, next_era_gas_price),\n                );\n                let mut effects = self.dispatch_event(effect_builder, rng, event);\n                let reactor_event = MainEvent::TransactionBuffer(\n                    transaction_buffer::Event::UpdateEraGasPrice(era_id, next_era_gas_price),\n                );\n                effects.extend(self.dispatch_event(effect_builder, rng, reactor_event));\n                let reactor_event = MainEvent::BlockValidator(\n                    block_validator::Event::UpdateEraGasPrice(era_id, next_era_gas_price),\n                );\n                effects.extend(self.dispatch_event(effect_builder, rng, reactor_event));\n                effects\n            }\n\n            MainEvent::TrieRequestIncoming(req) => reactor::wrap_effects(\n                MainEvent::ContractRuntime,\n                self.contract_runtime\n                    .handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::TrieDemand(demand) => reactor::wrap_effects(\n                MainEvent::ContractRuntime,\n                self.contract_runtime\n                    .handle_event(effect_builder, rng, demand.into()),\n            ),\n            MainEvent::TrieResponseIncoming(TrieResponseIncoming { sender, message }) => {\n                reactor::handle_fetch_response::<Self, TrieOrChunk>(\n                    self,\n                    effect_builder,\n                    rng,\n                    sender,\n                    &message.0,\n                )\n            }\n\n            // STORAGE\n            MainEvent::Storage(event) => reactor::wrap_effects(\n                MainEvent::Storage,\n                self.storage.handle_event(effect_builder, rng, event),\n            ),\n            MainEvent::StorageRequest(req) => reactor::wrap_effects(\n                MainEvent::Storage,\n                self.storage.handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::MarkBlockCompletedRequest(req) => reactor::wrap_effects(\n                MainEvent::Storage,\n                self.storage.handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::MakeBlockExecutableRequest(req) => reactor::wrap_effects(\n                MainEvent::Storage,\n                self.storage.handle_event(effect_builder, rng, req.into()),\n            ),\n            MainEvent::BinaryPort(req) => reactor::wrap_effects(\n                MainEvent::BinaryPort,\n                self.binary_port.handle_event(effect_builder, rng, req),\n            ),\n\n            // This event gets emitted when we manage to read the era validators from the global\n            // states of a block after an upgrade and its parent. Once that happens, we can check\n            // for the signs of any changes happening during the upgrade and register the correct\n            // set of validators in the validators matrix.\n            MainEvent::GotBlockAfterUpgradeEraValidators(\n                era_id,\n                parent_era_validators,\n                block_era_validators,\n            ) => {\n                // `era_id`, being the era of the block after the upgrade, will be absent in the\n                // validators stored in the block after the upgrade - therefore we will use its\n                // successor for the comparison.\n                let era_to_check = era_id.successor();\n                // We read the validators for era_id+1 from the parent of the block after the\n                // upgrade.\n                let validators_in_parent = match parent_era_validators.get(&era_to_check) {\n                    Some(validators) => validators,\n                    None => {\n                        return fatal!(\n                            effect_builder,\n                            \"couldn't find validators for era {} in parent_era_validators\",\n                            era_to_check\n                        )\n                        .ignore();\n                    }\n                };\n                // We also read the validators from the block after the upgrade itself.\n                let validators_in_block = match block_era_validators.get(&era_to_check) {\n                    Some(validators) => validators,\n                    None => {\n                        return fatal!(\n                            effect_builder,\n                            \"couldn't find validators for era {} in block_era_validators\",\n                            era_to_check\n                        )\n                        .ignore();\n                    }\n                };\n                // Decide which validators to use for `era_id` in the validators matrix.\n                let validators_to_register = if validators_in_parent == validators_in_block {\n                    // Nothing interesting happened - register the regular validators, ie. the\n                    // ones stored for `era_id` in the parent of the block after the upgrade.\n                    match parent_era_validators.get(&era_id) {\n                        Some(validators) => validators,\n                        None => {\n                            return fatal!(\n                                effect_builder,\n                                \"couldn't find validators for era {} in parent_era_validators\",\n                                era_id\n                            )\n                            .ignore();\n                        }\n                    }\n                } else {\n                    // We had an upgrade changing the validators! We use the same validators that\n                    // will be used for the era after the upgrade, as we can't trust the ones we\n                    // would use normally.\n                    validators_in_block\n                };\n                let mut effects = self.update_validator_weights(\n                    effect_builder,\n                    rng,\n                    era_id,\n                    validators_to_register.clone(),\n                );\n                // Crank the reactor so that any synchronizing tasks blocked by the lack of\n                // validators for `era_id` can resume.\n                effects.extend(\n                    effect_builder\n                        .immediately()\n                        .event(|_| MainEvent::ReactorCrank),\n                );\n                effects\n            }\n\n            // DELEGATE ALL FETCHER RELEVANT EVENTS to self.fetchers.dispatch_fetcher_event(..)\n            MainEvent::LegacyDeployFetcher(..)\n            | MainEvent::LegacyDeployFetcherRequest(..)\n            | MainEvent::BlockFetcher(..)\n            | MainEvent::BlockFetcherRequest(..)\n            | MainEvent::TransactionFetcher(..)\n            | MainEvent::TransactionFetcherRequest(..)\n            | MainEvent::BlockHeaderFetcher(..)\n            | MainEvent::BlockHeaderFetcherRequest(..)\n            | MainEvent::TrieOrChunkFetcher(..)\n            | MainEvent::TrieOrChunkFetcherRequest(..)\n            | MainEvent::SyncLeapFetcher(..)\n            | MainEvent::SyncLeapFetcherRequest(..)\n            | MainEvent::ApprovalsHashesFetcher(..)\n            | MainEvent::ApprovalsHashesFetcherRequest(..)\n            | MainEvent::FinalitySignatureFetcher(..)\n            | MainEvent::FinalitySignatureFetcherRequest(..)\n            | MainEvent::BlockExecutionResultsOrChunkFetcher(..)\n            | MainEvent::BlockExecutionResultsOrChunkFetcherRequest(..) => self\n                .fetchers\n                .dispatch_fetcher_event(effect_builder, rng, event),\n        }\n    }\n\n    fn new(\n        config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        network_identity: NetworkIdentity,\n        registry: &Registry,\n        event_queue: EventQueueHandle<Self::Event>,\n        _rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<MainEvent>), Error> {\n        let node_startup_instant = Instant::now();\n\n        let effect_builder = EffectBuilder::new(event_queue);\n\n        let metrics = Metrics::new(registry.clone());\n        let memory_metrics = MemoryMetrics::new(registry.clone())?;\n        let event_queue_metrics = EventQueueMetrics::new(registry.clone(), event_queue)?;\n\n        let protocol_version = chainspec.protocol_config.version;\n        let prevent_validator_shutdown = config.value().node.prevent_validator_shutdown;\n\n        if !chainspec\n            .core_config\n            .rewards_handling\n            .is_valid_configuration()\n        {\n            error!(\"invalid rewards configuration\");\n            return Err(Error::BytesRepr(bytesrepr::Error::Formatting));\n        }\n\n        let trusted_hash = config.value().node.trusted_hash;\n        let (root_dir, config) = config.into_parts();\n        let (our_secret_key, our_public_key) = config.consensus.load_keys(&root_dir)?;\n        let validator_matrix = ValidatorMatrix::new(\n            chainspec.core_config.finality_threshold_fraction,\n            chainspec.name_hash(),\n            chainspec\n                .protocol_config\n                .global_state_update\n                .as_ref()\n                .and_then(|global_state_update| global_state_update.validators.clone()),\n            chainspec.protocol_config.activation_point.era_id(),\n            our_secret_key.clone(),\n            our_public_key.clone(),\n            chainspec.core_config.auction_delay,\n            chainspec.core_config.signature_rewards_max_delay,\n        );\n\n        let storage_config = WithDir::new(&root_dir, config.storage.clone());\n\n        let hard_reset_to_start_of_era = chainspec.hard_reset_to_start_of_era();\n        let storage = Storage::new(\n            &storage_config,\n            hard_reset_to_start_of_era,\n            protocol_version,\n            chainspec.protocol_config.activation_point.era_id(),\n            &chainspec.network_config.name,\n            chainspec.transaction_config.max_ttl.into(),\n            chainspec.core_config.recent_era_count(),\n            Some(registry),\n            config.node.force_resync,\n            chainspec.transaction_config.clone(),\n        )?;\n\n        let contract_runtime = ContractRuntime::new(\n            storage.root_path(),\n            &config.contract_runtime,\n            chainspec.clone(),\n            registry,\n        )?;\n\n        let allow_handshake = config.node.sync_handling != SyncHandling::Isolated;\n\n        let network = Network::new(\n            config.network.clone(),\n            network_identity,\n            Some((our_secret_key, our_public_key)),\n            registry,\n            chainspec.as_ref(),\n            validator_matrix.clone(),\n            allow_handshake,\n        )?;\n\n        let address_gossiper = Gossiper::<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, _>::new(\n            \"address_gossiper\",\n            config.gossip,\n            registry,\n        )?;\n\n        let rest_server = RestServer::new(\n            config.rest_server.clone(),\n            protocol_version,\n            chainspec.network_config.name.clone(),\n        );\n        let binary_port_metrics =\n            BinaryPortMetrics::new(registry).map_err(BinaryPortInitializationError::from)?;\n        let binary_port = BinaryPort::new(\n            config.binary_port_server.clone(),\n            chainspec.clone(),\n            binary_port_metrics,\n        );\n        let event_stream_server = EventStreamServer::new(\n            config.event_stream_server.clone(),\n            storage.root_path().to_path_buf(),\n            protocol_version,\n        );\n        let diagnostics_port =\n            DiagnosticsPort::new(WithDir::new(&root_dir, config.diagnostics_port));\n        let shutdown_trigger = ShutdownTrigger::new();\n\n        // local / remote data management\n        let sync_leaper = SyncLeaper::new(chainspec.clone(), registry)?;\n        let fetchers = Fetchers::new(&config.fetcher, registry)?;\n\n        // gossipers\n        let block_gossiper = Gossiper::<{ BlockV2::ID_IS_COMPLETE_ITEM }, _>::new(\n            \"block_gossiper\",\n            config.gossip,\n            registry,\n        )?;\n        let transaction_gossiper = Gossiper::<{ Transaction::ID_IS_COMPLETE_ITEM }, _>::new(\n            \"transaction_gossiper\",\n            config.gossip,\n            registry,\n        )?;\n        let finality_signature_gossiper = Gossiper::<\n            { FinalitySignatureV2::ID_IS_COMPLETE_ITEM },\n            _,\n        >::new(\n            \"finality_signature_gossiper\", config.gossip, registry\n        )?;\n\n        // consensus\n        let consensus = EraSupervisor::new(\n            storage.root_path(),\n            validator_matrix.clone(),\n            config.consensus,\n            chainspec.clone(),\n            registry,\n        )?;\n\n        // chain / transaction management\n\n        let block_accumulator = BlockAccumulator::new(\n            config.block_accumulator,\n            validator_matrix.clone(),\n            chainspec.core_config.unbonding_delay,\n            chainspec.core_config.minimum_block_time,\n            chainspec.core_config.validator_slots,\n            registry,\n        )?;\n        let block_synchronizer = BlockSynchronizer::new(\n            config.block_synchronizer,\n            chainspec.clone(),\n            chainspec.core_config.simultaneous_peer_requests,\n            validator_matrix.clone(),\n            registry,\n        )?;\n        let block_validator = BlockValidator::new(\n            Arc::clone(&chainspec),\n            validator_matrix.clone(),\n            config.block_validator,\n            chainspec.vacancy_config.min_gas_price,\n        );\n        let upgrade_watcher =\n            UpgradeWatcher::new(chainspec.as_ref(), config.upgrade_watcher, &root_dir)?;\n        let transaction_acceptor = TransactionAcceptor::new(\n            config.transaction_acceptor,\n            Arc::clone(&chainspec),\n            registry,\n        )?;\n        let transaction_buffer =\n            TransactionBuffer::new(Arc::clone(&chainspec), config.transaction_buffer, registry)?;\n\n        let reactor = MainReactor {\n            chainspec,\n            chainspec_raw_bytes,\n            storage,\n            contract_runtime,\n            upgrade_watcher,\n            net: network,\n            address_gossiper,\n\n            rest_server,\n            binary_port,\n            event_stream_server,\n            transaction_acceptor,\n            fetchers,\n\n            block_gossiper,\n            transaction_gossiper,\n            finality_signature_gossiper,\n            sync_leaper,\n            transaction_buffer,\n            consensus,\n            block_validator,\n            block_accumulator,\n            block_synchronizer,\n            diagnostics_port,\n            shutdown_trigger,\n\n            metrics,\n            memory_metrics,\n            event_queue_metrics,\n\n            state: ReactorState::Initialize {},\n            attempts: 0,\n            last_progress: Timestamp::now(),\n            max_attempts: config.node.max_attempts,\n            idle_tolerance: config.node.idle_tolerance,\n            control_logic_default_delay: config.node.control_logic_default_delay,\n            trusted_hash,\n            validator_matrix,\n            sync_handling: config.node.sync_handling,\n            signature_gossip_tracker: SignatureGossipTracker::new(),\n            shutdown_for_upgrade_timeout: config.node.shutdown_for_upgrade_timeout,\n            switched_to_shutdown_for_upgrade: Timestamp::from(0),\n            upgrade_timeout: config.node.upgrade_timeout,\n            node_startup_instant,\n            finality_signature_creation: true,\n            prevent_validator_shutdown,\n        };\n        info!(\"MainReactor: instantiated\");\n\n        // If there's an upgrade staged with the same activation point as the current one, we must\n        // shut down immediately for upgrade.\n        let should_upgrade_immediately = reactor.upgrade_watcher.next_upgrade_activation_point()\n            == Some(reactor.chainspec.protocol_config.activation_point.era_id());\n        let effects = if should_upgrade_immediately {\n            info!(\"MainReactor: immediate shutdown for upgrade\");\n            effect_builder\n                .immediately()\n                .event(|()| MainEvent::ControlAnnouncement(ControlAnnouncement::ShutdownForUpgrade))\n        } else {\n            effect_builder\n                .immediately()\n                .event(|()| MainEvent::ReactorCrank)\n        };\n        Ok((reactor, effects))\n    }\n\n    fn update_metrics(&mut self, event_queue_handle: EventQueueHandle<Self::Event>) {\n        self.memory_metrics.estimate(self);\n        self.event_queue_metrics\n            .record_event_queue_counts(&event_queue_handle)\n    }\n\n    fn activate_failpoint(&mut self, activation: &FailpointActivation) {\n        if activation.key().starts_with(\"consensus\") {\n            <EraSupervisor as Component<MainEvent>>::activate_failpoint(\n                &mut self.consensus,\n                activation,\n            );\n        }\n        if activation.key().starts_with(\"finality_signature_creation\") {\n            self.finality_signature_creation = false;\n        }\n    }\n}\n\nimpl MainReactor {\n    fn update_validator_weights(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        era_id: EraId,\n        validator_weights: BTreeMap<PublicKey, U512>,\n    ) -> Effects<MainEvent> {\n        self.validator_matrix\n            .register_validator_weights(era_id, validator_weights);\n        info!(%era_id, \"validator_matrix updated\");\n        // notify validator bound components\n        let mut effects = reactor::wrap_effects(\n            MainEvent::BlockAccumulator,\n            self.block_accumulator\n                .handle_validators(effect_builder, rng),\n        );\n        effects.extend(reactor::wrap_effects(\n            MainEvent::BlockSynchronizer,\n            self.block_synchronizer\n                .handle_validators(effect_builder, rng),\n        ));\n        effects\n    }\n\n    fn handle_meta_block(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        create_finality_signatures: bool,\n        mut meta_block: MetaBlock,\n    ) -> Effects<MainEvent> {\n        debug!(\n            \"MetaBlock: handling meta block {} {} {:?}\",\n            meta_block.height(),\n            meta_block.hash(),\n            meta_block.state()\n        );\n        if !meta_block.state().is_stored() {\n            return fatal!(\n                effect_builder,\n                \"MetaBlock: block should be stored after execution or accumulation\"\n            )\n            .ignore();\n        }\n\n        let mut effects = Effects::new();\n\n        if meta_block\n            .mut_state()\n            .register_as_sent_to_transaction_buffer()\n            .was_updated()\n        {\n            debug!(\n                \"MetaBlock: notifying transaction buffer: {} {}\",\n                meta_block.height(),\n                meta_block.hash(),\n            );\n\n            match &meta_block {\n                MetaBlock::Forward(fwd_meta_block) => {\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::TransactionBuffer,\n                        self.transaction_buffer.handle_event(\n                            effect_builder,\n                            rng,\n                            transaction_buffer::Event::Block(Arc::clone(&fwd_meta_block.block)),\n                        ),\n                    ));\n                }\n                MetaBlock::Historical(historical_meta_block) => {\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::TransactionBuffer,\n                        self.transaction_buffer.handle_event(\n                            effect_builder,\n                            rng,\n                            transaction_buffer::Event::VersionedBlock(Arc::clone(\n                                &historical_meta_block.block,\n                            )),\n                        ),\n                    ));\n                }\n            }\n        }\n\n        if let MetaBlock::Forward(forward_meta_block) = &meta_block {\n            let block = forward_meta_block.block.clone();\n            if meta_block\n                .mut_state()\n                .register_updated_validator_matrix()\n                .was_updated()\n            {\n                if let Some(validator_weights) = block.header().next_era_validator_weights() {\n                    let era_id = block.era_id();\n                    let next_era_id = era_id.successor();\n                    debug!(\n                        \"MetaBlock: updating validator matrix: {} {} {} {}\",\n                        block.height(),\n                        block.hash(),\n                        era_id,\n                        next_era_id\n                    );\n                    effects.extend(self.update_validator_weights(\n                        effect_builder,\n                        rng,\n                        next_era_id,\n                        validator_weights.clone(),\n                    ));\n                }\n            }\n\n            // Validators gossip the block as soon as they deem it valid, but non-validators\n            // only gossip once the block is marked complete.\n            if let Some(true) = self\n                .validator_matrix\n                .is_self_validator_in_era(block.era_id())\n            {\n                debug!(\n                    \"MetaBlock: updating validator gossip state: {} {}\",\n                    block.height(),\n                    block.hash(),\n                );\n                self.update_meta_block_gossip_state(\n                    effect_builder,\n                    rng,\n                    block.hash(),\n                    block.gossip_target(),\n                    meta_block.mut_state(),\n                    &mut effects,\n                );\n            }\n\n            if !meta_block.state().is_executed() {\n                debug!(\n                    \"MetaBlock: unexecuted block: {} {}\",\n                    block.height(),\n                    block.hash(),\n                );\n                // We've done as much as we can on a valid but un-executed block.\n                return effects;\n            }\n\n            if meta_block\n                .mut_state()\n                .register_we_have_tried_to_sign()\n                .was_updated()\n                && create_finality_signatures\n            {\n                // When this node is a validator in this era, sign and announce.\n                if let Some(finality_signature) = self\n                    .validator_matrix\n                    .create_finality_signature(block.header())\n                {\n                    debug!(\n                        %finality_signature,\n                        \"MetaBlock: registering finality signature: {} {}\",\n                        block.height(),\n                        block.hash(),\n                    );\n\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::Storage,\n                        effect_builder\n                            .put_finality_signature_to_storage(finality_signature.clone().into())\n                            .ignore(),\n                    ));\n\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::BlockAccumulator,\n                        self.block_accumulator.handle_event(\n                            effect_builder,\n                            rng,\n                            block_accumulator::Event::CreatedFinalitySignature {\n                                finality_signature: Box::new(finality_signature.clone()),\n                            },\n                        ),\n                    ));\n\n                    let era_id = finality_signature.era_id();\n                    let payload = Message::FinalitySignature(Box::new(finality_signature));\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::Network,\n                        effect_builder\n                            .broadcast_message_to_validators(payload, era_id)\n                            .ignore(),\n                    ));\n                }\n            }\n        }\n\n        if meta_block\n            .mut_state()\n            .register_as_validator_notified()\n            .was_updated()\n        {\n            debug!(\n                \"MetaBlock: notifying block validator: {} {}\",\n                meta_block.height(),\n                meta_block.hash(),\n            );\n            effects.extend(reactor::wrap_effects(\n                MainEvent::BlockValidator,\n                self.block_validator.handle_event(\n                    effect_builder,\n                    rng,\n                    block_validator::Event::BlockStored(meta_block.height()),\n                ),\n            ));\n        }\n\n        if meta_block\n            .mut_state()\n            .register_as_consensus_notified()\n            .was_updated()\n        {\n            debug!(\n                \"MetaBlock: notifying consensus: {} {}\",\n                meta_block.height(),\n                meta_block.hash(),\n            );\n\n            match &meta_block {\n                MetaBlock::Forward(fwd_meta_block) => {\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::Consensus,\n                        self.consensus.handle_event(\n                            effect_builder,\n                            rng,\n                            consensus::Event::BlockAdded {\n                                header: Box::new(fwd_meta_block.block.header().clone().into()),\n                                header_hash: *fwd_meta_block.block.hash(),\n                            },\n                        ),\n                    ));\n                }\n                MetaBlock::Historical(_historical_meta_block) => {\n                    // Historical meta blocks aren't of interest to consensus - consensus only\n                    // cares about new blocks. Hence, we can just do nothing here.\n                }\n            }\n        }\n\n        if let MetaBlock::Forward(forward_meta_block) = &meta_block {\n            let block = forward_meta_block.block.clone();\n            let execution_results = forward_meta_block.execution_results.clone();\n\n            if meta_block\n                .mut_state()\n                .register_as_accumulator_notified()\n                .was_updated()\n            {\n                debug!(\n                    \"MetaBlock: notifying accumulator: {} {}\",\n                    block.height(),\n                    block.hash(),\n                );\n                let meta_block = ForwardMetaBlock {\n                    block,\n                    execution_results,\n                    state: *meta_block.state(),\n                };\n\n                effects.extend(reactor::wrap_effects(\n                    MainEvent::BlockAccumulator,\n                    self.block_accumulator.handle_event(\n                        effect_builder,\n                        rng,\n                        block_accumulator::Event::ExecutedBlock { meta_block },\n                    ),\n                ));\n                // We've done as much as we can for now, we need to wait for the block\n                // accumulator to mark the block complete before proceeding further.\n                return effects;\n            }\n        }\n\n        // We *always* want to initialize the contract runtime with the highest complete block.\n        // In case of an upgrade, we want the reactor to hold off in the `Upgrading` state until\n        // the immediate switch block is stored and *also* marked complete.\n        // This will allow the contract runtime to initialize properly (see\n        // [`refresh_contract_runtime`]) when the reactor is transitioning from `CatchUp` to\n        // `KeepUp`.\n        if !meta_block.state().is_marked_complete() {\n            error!(\n                block_hash = ?meta_block.hash(),\n                state = ?meta_block.state(),\n                \"should be a complete block after passing to accumulator\"\n            );\n        } else {\n            debug!(\n                \"MetaBlock: block is marked complete: {} {}\",\n                meta_block.height(),\n                meta_block.hash(),\n            );\n        }\n\n        if let MetaBlock::Forward(forward_meta_block) = &meta_block {\n            let block = forward_meta_block.block.clone();\n\n            debug!(\n                \"MetaBlock: update gossip state: {} {}\",\n                block.height(),\n                block.hash(),\n            );\n            self.update_meta_block_gossip_state(\n                effect_builder,\n                rng,\n                block.hash(),\n                block.gossip_target(),\n                meta_block.mut_state(),\n                &mut effects,\n            );\n\n            if meta_block\n                .mut_state()\n                .register_as_synchronizer_notified()\n                .was_updated()\n            {\n                debug!(\n                    \"MetaBlock: notifying block synchronizer: {} {}\",\n                    block.height(),\n                    block.hash(),\n                );\n\n                effects.extend(reactor::wrap_effects(\n                    MainEvent::BlockSynchronizer,\n                    self.block_synchronizer.handle_event(\n                        effect_builder,\n                        rng,\n                        block_synchronizer::Event::MarkBlockExecuted(*block.hash()),\n                    ),\n                ));\n            }\n        }\n\n        debug_assert!(\n            meta_block.state().verify_complete(),\n            \"meta block {} at height {} has invalid state: {:?}\",\n            meta_block.hash(),\n            meta_block.height(),\n            meta_block.state()\n        );\n\n        if meta_block\n            .mut_state()\n            .register_all_actions_done()\n            .was_already_registered()\n        {\n            error!(\n                block_hash = ?meta_block.hash(),\n                state = ?meta_block.state(),\n                \"duplicate meta block announcement emitted\"\n            );\n            return effects;\n        }\n\n        debug!(\n            \"MetaBlock: notifying event stream: {} {}\",\n            meta_block.height(),\n            meta_block.hash(),\n        );\n        let versioned_block: Arc<Block> = match &meta_block {\n            MetaBlock::Forward(fwd_meta_block) => Arc::new((*fwd_meta_block.block).clone().into()),\n            MetaBlock::Historical(historical_meta_block) => historical_meta_block.block.clone(),\n        };\n        effects.extend(reactor::wrap_effects(\n            MainEvent::EventStreamServer,\n            self.event_stream_server.handle_event(\n                effect_builder,\n                rng,\n                event_stream_server::Event::BlockAdded(Arc::clone(&versioned_block)),\n            ),\n        ));\n\n        match &meta_block {\n            MetaBlock::Forward(fwd_meta_block) => {\n                for exec_artifact in fwd_meta_block.execution_results.iter() {\n                    let event = event_stream_server::Event::TransactionProcessed {\n                        transaction_hash: exec_artifact.transaction_hash,\n                        transaction_header: Box::new(exec_artifact.transaction_header.clone()),\n                        block_hash: *fwd_meta_block.block.hash(),\n                        execution_result: Box::new(exec_artifact.execution_result.clone()),\n                        messages: exec_artifact.messages.clone(),\n                    };\n\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::EventStreamServer,\n                        self.event_stream_server\n                            .handle_event(effect_builder, rng, event),\n                    ));\n                }\n            }\n            MetaBlock::Historical(historical_meta_block) => {\n                for (transaction_hash, transaction_header, execution_result) in\n                    historical_meta_block.execution_results.iter()\n                {\n                    let event = event_stream_server::Event::TransactionProcessed {\n                        transaction_hash: *transaction_hash,\n                        transaction_header: Box::new(transaction_header.clone()),\n                        block_hash: *historical_meta_block.block.hash(),\n                        execution_result: Box::new(execution_result.clone()),\n                        messages: Vec::new(),\n                    };\n                    effects.extend(reactor::wrap_effects(\n                        MainEvent::EventStreamServer,\n                        self.event_stream_server\n                            .handle_event(effect_builder, rng, event),\n                    ));\n                }\n            }\n        }\n\n        debug!(\n            \"MetaBlock: notifying shutdown watcher: {} {}\",\n            meta_block.height(),\n            meta_block.hash(),\n        );\n        effects.extend(reactor::wrap_effects(\n            MainEvent::ShutdownTrigger,\n            self.shutdown_trigger.handle_event(\n                effect_builder,\n                rng,\n                shutdown_trigger::Event::CompletedBlock(CompletedBlockInfo::new(\n                    meta_block.height(),\n                    meta_block.era_id(),\n                    meta_block.is_switch_block(),\n                )),\n            ),\n        ));\n\n        effects\n    }\n\n    fn update_meta_block_gossip_state(\n        &mut self,\n        effect_builder: EffectBuilder<MainEvent>,\n        rng: &mut NodeRng,\n        block_hash: &BlockHash,\n        gossip_target: GossipTarget,\n        state: &mut MetaBlockState,\n        effects: &mut Effects<MainEvent>,\n    ) {\n        if state.register_as_gossiped().was_updated() {\n            debug!(\n                \"notifying block gossiper to start gossiping for: {}\",\n                block_hash\n            );\n            effects.extend(reactor::wrap_effects(\n                MainEvent::BlockGossiper,\n                self.block_gossiper.handle_event(\n                    effect_builder,\n                    rng,\n                    gossiper::Event::ItemReceived {\n                        item_id: *block_hash,\n                        source: Source::Ourself,\n                        target: gossip_target,\n                    },\n                ),\n            ));\n        }\n    }\n}\n\n// TEST ENABLEMENT -- used by integration tests elsewhere\n#[cfg(test)]\nimpl MainReactor {\n    pub(crate) fn consensus(&self) -> &EraSupervisor {\n        &self.consensus\n    }\n\n    pub(crate) fn storage(&self) -> &Storage {\n        &self.storage\n    }\n\n    pub(crate) fn contract_runtime(&self) -> &ContractRuntime {\n        &self.contract_runtime\n    }\n}\n\n#[cfg(test)]\nimpl NetworkedReactor for MainReactor {\n    fn node_id(&self) -> crate::types::NodeId {\n        self.net.node_id()\n    }\n}\n"
  },
  {
    "path": "node/src/reactor/queue_kind.rs",
    "content": "//! Queue kinds.\n//!\n//! The reactor's event queue uses different queues to group events by priority and polls them in a\n//! round-robin manner. This way, events are only competing for time within one queue, non-congested\n//! queues can always assume to be speedily processed.\n\nuse std::{fmt::Display, num::NonZeroUsize};\n\nuse enum_iterator::IntoEnumIterator;\nuse serde::Serialize;\n\n/// Scheduling priority.\n///\n/// Priorities are ordered from lowest to highest.\n#[derive(\n    Copy, Clone, Debug, Eq, PartialEq, Hash, IntoEnumIterator, PartialOrd, Ord, Serialize, Default,\n)]\npub enum QueueKind {\n    /// Control messages for the runtime itself.\n    Control,\n    /// Network events that were initiated outside of this node.\n    ///\n    /// Their load may vary and grouping them together in one queue aides DoS protection.\n    NetworkIncoming,\n    /// Network events that are low priority.\n    NetworkLowPriority,\n    /// Network events demand a resource directly.\n    NetworkDemand,\n    /// Network events that were initiated by the local node, such as outgoing messages.\n    Network,\n    /// NetworkInfo events.\n    NetworkInfo,\n    /// Fetch events.\n    Fetch,\n    /// SyncGlobalState events.\n    SyncGlobalState,\n    /// FinalitySignature events.\n    FinalitySignature,\n    /// Events of unspecified priority.\n    ///\n    /// This is the default queue.\n    #[default]\n    Regular,\n    /// Gossiper events.\n    Gossip,\n    /// Get from storage events.\n    FromStorage,\n    /// Put to storage events.\n    ToStorage,\n    /// Contract runtime events.\n    ContractRuntime,\n    /// Consensus events.\n    Consensus,\n    /// Validation events.\n    Validation,\n    /// Reporting events on the local node.\n    ///\n    /// Metric events take precedence over most other events since missing a request for metrics\n    /// might cause the requester to assume that the node is down and forcefully restart it.\n    Api,\n}\n\nimpl Display for QueueKind {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        let str_value = match self {\n            QueueKind::Control => \"Control\",\n            QueueKind::NetworkIncoming => \"NetworkIncoming\",\n            QueueKind::NetworkLowPriority => \"NetworkLowPriority\",\n            QueueKind::NetworkDemand => \"NetworkDemand\",\n            QueueKind::Network => \"Network\",\n            QueueKind::NetworkInfo => \"NetworkInfo\",\n            QueueKind::Fetch => \"Fetch\",\n            QueueKind::Regular => \"Regular\",\n            QueueKind::Gossip => \"Gossip\",\n            QueueKind::FromStorage => \"FromStorage\",\n            QueueKind::ToStorage => \"ToStorage\",\n            QueueKind::ContractRuntime => \"ContractRuntime\",\n            QueueKind::SyncGlobalState => \"SyncGlobalState\",\n            QueueKind::FinalitySignature => \"FinalitySignature\",\n            QueueKind::Consensus => \"Consensus\",\n            QueueKind::Validation => \"Validation\",\n            QueueKind::Api => \"Api\",\n        };\n        write!(f, \"{}\", str_value)\n    }\n}\n\nimpl QueueKind {\n    /// Returns the weight of a specific queue.\n    ///\n    /// The weight determines how many events are at most processed from a specific queue during\n    /// each event processing round.\n    fn weight(self) -> NonZeroUsize {\n        NonZeroUsize::new(match self {\n            QueueKind::NetworkLowPriority => 1,\n            QueueKind::NetworkInfo => 2,\n            QueueKind::NetworkDemand => 2,\n            QueueKind::NetworkIncoming => 8,\n            QueueKind::Network => 4,\n            QueueKind::Regular => 4,\n            QueueKind::Fetch => 4,\n            QueueKind::Gossip => 4,\n            QueueKind::FromStorage => 4,\n            QueueKind::ToStorage => 4,\n            QueueKind::ContractRuntime => 4,\n            QueueKind::SyncGlobalState => 4,\n            QueueKind::Consensus => 4,\n            QueueKind::FinalitySignature => 4,\n            QueueKind::Validation => 8,\n            QueueKind::Api => 8,\n            // Note: Control events should be very rare, but we do want to process them right away.\n            QueueKind::Control => 16,\n        })\n        .expect(\"weight must be positive\")\n    }\n\n    /// Return weights of all possible `Queue`s.\n    pub(crate) fn weights() -> Vec<(Self, NonZeroUsize)> {\n        QueueKind::into_enum_iter()\n            .map(|q| (q, q.weight()))\n            .collect()\n    }\n\n    pub(crate) fn metrics_name(&self) -> &str {\n        match self {\n            QueueKind::Control => \"control\",\n            QueueKind::NetworkIncoming => \"network_incoming\",\n            QueueKind::NetworkDemand => \"network_demands\",\n            QueueKind::NetworkLowPriority => \"network_low_priority\",\n            QueueKind::Network => \"network\",\n            QueueKind::NetworkInfo => \"network_info\",\n            QueueKind::SyncGlobalState => \"sync_global_state\",\n            QueueKind::Fetch => \"fetch\",\n            QueueKind::Gossip => \"gossip\",\n            QueueKind::FromStorage => \"from_storage\",\n            QueueKind::ToStorage => \"to_storage\",\n            QueueKind::ContractRuntime => \"contract_runtime\",\n            QueueKind::Consensus => \"consensus\",\n            QueueKind::Validation => \"validation\",\n            QueueKind::FinalitySignature => \"finality_signature\",\n            QueueKind::Api => \"api\",\n            QueueKind::Regular => \"regular\",\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/reactor.rs",
    "content": "#![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged.\n\n//! Reactor core.\n//!\n//! Any long running instance of the node application uses an event-dispatch pattern: Events are\n//! generated and stored on an event queue, then processed one-by-one. This process happens inside\n//! the reactor, which also exclusively holds the state of the application besides pending events:\n//!\n//! 1. The reactor pops a reactor event off the event queue (called a\n//!    [`Scheduler`](type.Scheduler.html)).\n//! 2. The event is dispatched by the reactor via [`Reactor::dispatch_event`]. Since the reactor\n//!    holds mutable state, it can grant any component that processes an event mutable, exclusive\n//!    access to its state.\n//! 3. Once the [(synchronous)](`crate::components::Component::handle_event`) event processing has\n//!    completed, the component returns an [`effect`](crate::effect).\n//! 4. The reactor spawns a task that executes these effects and possibly schedules more events.\n//! 5. go to 1.\n//!\n//! For descriptions of events and instructions on how to create effects, see the\n//! [`effect`](super::effect) module.\n//!\n//! # Reactors\n//!\n//! There is no single reactor, but rather a reactor for each application type, since it defines\n//! which components are used and how they are wired up. The reactor defines the state by being a\n//! `struct` of components, their initialization through [`Reactor::new`] and event dispatching to\n//! components via [`Reactor::dispatch_event`].\n//!\n//! With all these set up, a reactor can be executed using a [`Runner`], either in a step-wise\n//! manner using [`Runner::crank`] or indefinitely using [`Runner::run`].\n\nmod event_queue_metrics;\npub(crate) mod main_reactor;\nmod queue_kind;\n\nuse std::{\n    any,\n    collections::HashMap,\n    env,\n    fmt::{Debug, Display},\n    io::Write,\n    num::NonZeroU64,\n    str::FromStr,\n    sync::{atomic::Ordering, Arc},\n};\n\nuse datasize::DataSize;\nuse erased_serde::Serialize as ErasedSerialize;\n#[cfg(test)]\nuse fake_instant::FakeClock;\n#[cfg(test)]\nuse futures::future::BoxFuture;\nuse futures::FutureExt;\nuse once_cell::sync::Lazy;\nuse prometheus::{self, Histogram, HistogramOpts, IntCounter, IntGauge, Registry};\nuse quanta::{Clock, IntoNanoseconds};\nuse serde::Serialize;\nuse signal_hook::consts::signal::{SIGINT, SIGQUIT, SIGTERM};\nuse stats_alloc::{Stats, INSTRUMENTED_SYSTEM};\nuse tokio::time::{Duration, Instant};\nuse tracing::{debug_span, error, info, instrument, trace, warn, Instrument, Span};\n\n#[cfg(test)]\nuse crate::components::ComponentState;\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    Block, BlockHeader, Chainspec, ChainspecRawBytes, FinalitySignature, Transaction,\n};\n\n#[cfg(target_os = \"linux\")]\nuse utils::rlimit::{Limit, OpenFiles, ResourceLimit};\n\n#[cfg(test)]\nuse crate::testing::{network::NetworkedReactor, ConditionCheckReactor};\nuse crate::{\n    components::{\n        block_accumulator,\n        fetcher::{self, FetchItem},\n        network::{blocklist::BlocklistJustification, Identity as NetworkIdentity},\n        transaction_acceptor,\n    },\n    effect::{\n        announcements::{ControlAnnouncement, PeerBehaviorAnnouncement, QueueDumpFormat},\n        incoming::NetResponse,\n        Effect, EffectBuilder, EffectExt, Effects,\n    },\n    failpoints::FailpointActivation,\n    types::{BlockExecutionResultsOrChunk, ExitCode, LegacyDeploy, NodeId, SyncLeap, TrieOrChunk},\n    unregister_metric,\n    utils::{self, SharedFlag, WeightedRoundRobin},\n    NodeRng, TERMINATION_REQUESTED,\n};\nuse casper_storage::block_store::types::ApprovalsHashes;\npub(crate) use queue_kind::QueueKind;\n\n/// Default threshold for when an event is considered slow.  Can be overridden by setting the env\n/// var `CL_EVENT_MAX_MICROSECS=<MICROSECONDS>`.\nconst DEFAULT_DISPATCH_EVENT_THRESHOLD: Duration = Duration::from_secs(1);\nconst DISPATCH_EVENT_THRESHOLD_ENV_VAR: &str = \"CL_EVENT_MAX_MICROSECS\";\n#[cfg(test)]\nconst POLL_INTERVAL: Duration = Duration::from_millis(10);\n\nstatic DISPATCH_EVENT_THRESHOLD: Lazy<Duration> = Lazy::new(|| {\n    env::var(DISPATCH_EVENT_THRESHOLD_ENV_VAR)\n        .map(|threshold_str| {\n            let threshold_microsecs = u64::from_str(&threshold_str).unwrap_or_else(|error| {\n                panic!(\n                    \"can't parse env var {}={} as a u64: {}\",\n                    DISPATCH_EVENT_THRESHOLD_ENV_VAR, threshold_str, error\n                )\n            });\n            Duration::from_micros(threshold_microsecs)\n        })\n        .unwrap_or_else(|_| DEFAULT_DISPATCH_EVENT_THRESHOLD)\n});\n\n#[cfg(target_os = \"linux\")]\n/// The desired limit for open files.\nconst TARGET_OPEN_FILES_LIMIT: Limit = 64_000;\n\n#[cfg(target_os = \"linux\")]\n/// Adjusts the maximum number of open file handles upwards towards the hard limit.\nfn adjust_open_files_limit() {\n    // Ensure we have reasonable ulimits.\n    match ResourceLimit::<OpenFiles>::get() {\n        Err(err) => {\n            warn!(%err, \"could not retrieve open files limit\");\n        }\n\n        Ok(current_limit) => {\n            if current_limit.current() < TARGET_OPEN_FILES_LIMIT {\n                let best_possible = if current_limit.max() < TARGET_OPEN_FILES_LIMIT {\n                    warn!(\n                        wanted = TARGET_OPEN_FILES_LIMIT,\n                        hard_limit = current_limit.max(),\n                        \"settling for lower open files limit due to hard limit\"\n                    );\n                    current_limit.max()\n                } else {\n                    TARGET_OPEN_FILES_LIMIT\n                };\n\n                let new_limit = ResourceLimit::<OpenFiles>::fixed(best_possible);\n                if let Err(err) = new_limit.set() {\n                    warn!(%err, current=current_limit.current(), target=best_possible, \"did not succeed in raising open files limit\")\n                } else {\n                    tracing::debug!(?new_limit, \"successfully increased open files limit\");\n                }\n            } else {\n                tracing::debug!(\n                    ?current_limit,\n                    \"not changing open files limit, already sufficient\"\n                );\n            }\n        }\n    }\n}\n\n#[cfg(not(target_os = \"linux\"))]\n/// File handle limit adjustment shim.\nfn adjust_open_files_limit() {\n    info!(\"not on linux, not adjusting open files limit\");\n}\n\n/// Event scheduler\n///\n/// The scheduler is a combination of multiple event queues that are polled in a specific order. It\n/// is the central hook for any part of the program that schedules events directly.\n///\n/// Components rarely use this, but use a bound `EventQueueHandle` instead.\n///\n/// Schedule tuples contain an optional ancestor ID and the actual event. The ancestor ID indicates\n/// which potential previous event resulted in the event being created.\npub(crate) type Scheduler<Ev> = WeightedRoundRobin<(Option<NonZeroU64>, Ev), QueueKind>;\n\n/// Event queue handle\n///\n/// The event queue handle is how almost all parts of the application interact with the reactor\n/// outside of the normal event loop. It gives different parts a chance to schedule messages that\n/// stem from things like external IO.\n#[derive(DataSize, Debug)]\npub(crate) struct EventQueueHandle<REv>\nwhere\n    REv: 'static,\n{\n    /// A reference to the scheduler of the event queue.\n    scheduler: &'static Scheduler<REv>,\n    /// Flag indicating whether or not the reactor processing this event queue is shutting down.\n    is_shutting_down: SharedFlag,\n}\n\n// Implement `Clone` and `Copy` manually, as `derive` will make it depend on `R` and `Ev` otherwise.\nimpl<REv> Clone for EventQueueHandle<REv> {\n    fn clone(&self) -> Self {\n        *self\n    }\n}\nimpl<REv> Copy for EventQueueHandle<REv> {}\n\nimpl<REv> EventQueueHandle<REv> {\n    /// Creates a new event queue handle.\n    pub(crate) fn new(scheduler: &'static Scheduler<REv>, is_shutting_down: SharedFlag) -> Self {\n        EventQueueHandle {\n            scheduler,\n            is_shutting_down,\n        }\n    }\n\n    /// Creates a new event queue handle that is not connected to a shutdown flag.\n    ///\n    /// This method is used in tests, where we are never disabling shutdown warnings anyway.\n    #[cfg(test)]\n    pub(crate) fn without_shutdown(scheduler: &'static Scheduler<REv>) -> Self {\n        EventQueueHandle::new(scheduler, SharedFlag::global_shared())\n    }\n\n    /// Schedule an event on a specific queue.\n    ///\n    /// The scheduled event will not have an ancestor.\n    pub(crate) async fn schedule<Ev>(self, event: Ev, queue_kind: QueueKind)\n    where\n        REv: From<Ev>,\n    {\n        self.schedule_with_ancestor(None, event, queue_kind).await;\n    }\n\n    /// Schedule an event on a specific queue.\n    pub(crate) async fn schedule_with_ancestor<Ev>(\n        self,\n        ancestor: Option<NonZeroU64>,\n        event: Ev,\n        queue_kind: QueueKind,\n    ) where\n        REv: From<Ev>,\n    {\n        self.scheduler\n            .push((ancestor, event.into()), queue_kind)\n            .await;\n    }\n\n    /// Returns number of events in each of the scheduler's queues.\n    pub(crate) fn event_queues_counts(&self) -> HashMap<QueueKind, usize> {\n        self.scheduler.event_queues_counts()\n    }\n\n    /// Returns whether the associated reactor is currently shutting down.\n    pub(crate) fn shutdown_flag(&self) -> SharedFlag {\n        self.is_shutting_down\n    }\n}\n\n/// Reactor core.\n///\n/// Any reactor should implement this trait and be executed by the `reactor::run` function.\npub(crate) trait Reactor: Sized {\n    // Note: We've gone for the `Sized` bound here, since we return an instance in `new`. As an\n    // alternative, `new` could return a boxed instance instead, removing this requirement.\n\n    /// Event type associated with reactor.\n    ///\n    /// Defines what kind of event the reactor processes.\n    type Event: ReactorEvent + Display;\n\n    /// A configuration for the reactor\n    type Config;\n\n    /// The error type returned by the reactor.\n    type Error: Send + 'static;\n\n    /// Dispatches an event on the reactor.\n    ///\n    /// This function is typically only called by the reactor itself to dispatch an event. It is\n    /// safe to call regardless, but will cause the event to skip the queue and things like\n    /// accounting.\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event>;\n\n    /// Creates a new instance of the reactor.\n    ///\n    /// This method creates the full state, which consists of all components, and returns a reactor\n    /// instance along with the effects that the components generated upon instantiation.\n    ///\n    /// If any instantiation fails, an error is returned.\n    fn new(\n        cfg: Self::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        network_identity: NetworkIdentity,\n        registry: &Registry,\n        event_queue: EventQueueHandle<Self::Event>,\n        rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error>;\n\n    /// Instructs the reactor to update performance metrics, if any.\n    fn update_metrics(&mut self, _event_queue_handle: EventQueueHandle<Self::Event>) {}\n\n    /// Activate/deactivate a failpoint.\n    fn activate_failpoint(&mut self, _activation: &FailpointActivation) {\n        // Default is to ignore the failpoint. If failpoint support is enabled for a reactor, route\n        // the activation to the respective components here.\n    }\n\n    /// Returns the state of a named components.\n    ///\n    /// May return `None` if the component cannot be found, or if the reactor does not support\n    /// querying component states.\n    #[allow(dead_code)]\n    #[cfg(test)]\n    fn get_component_state(&self, _name: &str) -> Option<&ComponentState> {\n        None\n    }\n}\n\n/// A reactor event type.\npub(crate) trait ReactorEvent: Send + Debug + From<ControlAnnouncement> + 'static {\n    /// Returns `true` if the event is a control announcement variant.\n    fn is_control(&self) -> bool;\n\n    /// Converts the event into a control announcement without copying.\n    ///\n    /// Note that this function must return `Some` if and only `is_control` returns `true`.\n    fn try_into_control(self) -> Option<ControlAnnouncement>;\n\n    /// Returns a cheap but human-readable description of the event.\n    fn description(&self) -> &'static str {\n        \"anonymous event\"\n    }\n}\n\n/// A drop-like trait for `async` compatible drop-and-wait.\n///\n/// Shuts down a type by explicitly freeing resources, but allowing to wait on cleanup to complete.\n#[cfg(test)]\npub(crate) trait Finalize: Sized {\n    /// Runs cleanup code and waits for a shutdown to complete.\n    ///\n    /// This function must always be optional and a way to wait for all resources to be freed, not\n    /// mandatory for cleanup!\n    fn finalize(self) -> BoxFuture<'static, ()> {\n        async move {}.boxed()\n    }\n}\n\n/// Represents memory statistics in bytes.\nstruct AllocatedMem {\n    /// Total allocated memory in bytes.\n    allocated: u64,\n    /// Total consumed memory in bytes.\n    consumed: u64,\n    /// Total system memory in bytes.\n    total: u64,\n}\n\n/// A runner for a reactor.\n///\n/// The runner manages a reactor's event queue and reactor itself and can run it either continuously\n/// or in a step-by-step manner.\n#[derive(Debug)]\npub(crate) struct Runner<R>\nwhere\n    R: Reactor,\n{\n    /// The scheduler used for the reactor.\n    scheduler: &'static Scheduler<R::Event>,\n\n    /// The reactor instance itself.\n    reactor: R,\n\n    /// Counter for events, to aid tracing.\n    current_event_id: u64,\n\n    /// Timestamp of last reactor metrics update.\n    last_metrics: Instant,\n\n    /// Metrics for the runner.\n    metrics: RunnerMetrics,\n\n    /// Check if we need to update reactor metrics every this many events.\n    event_metrics_threshold: u64,\n\n    /// Only update reactor metrics if at least this much time has passed.\n    event_metrics_min_delay: Duration,\n\n    /// An accurate, possible TSC-supporting clock.\n    clock: Clock,\n\n    /// Flag indicating the reactor is being shut down.\n    is_shutting_down: SharedFlag,\n}\n\n/// Metric data for the Runner\n#[derive(Debug)]\nstruct RunnerMetrics {\n    /// Total number of events processed.\n    events: IntCounter,\n    /// Histogram of how long it took to dispatch an event.\n    event_dispatch_duration: Histogram,\n    /// Total allocated RAM in bytes, as reported by stats_alloc.\n    allocated_ram_bytes: IntGauge,\n    /// Total consumed RAM in bytes, as reported by sys-info.\n    consumed_ram_bytes: IntGauge,\n    /// Total system RAM in bytes, as reported by sys-info.\n    total_ram_bytes: IntGauge,\n    /// Handle to the metrics registry, in case we need to unregister.\n    registry: Registry,\n}\n\nimpl RunnerMetrics {\n    /// Create and register new runner metrics.\n    fn new(registry: &Registry) -> Result<Self, prometheus::Error> {\n        let events = IntCounter::new(\n            \"runner_events\",\n            \"running total count of events handled by this reactor\",\n        )?;\n\n        // Create an event dispatch histogram, putting extra emphasis on the area between 1-10 us.\n        let event_dispatch_duration = Histogram::with_opts(\n            HistogramOpts::new(\n                \"event_dispatch_duration\",\n                \"time in nanoseconds to dispatch an event\",\n            )\n            .buckets(vec![\n                100.0,\n                500.0,\n                1_000.0,\n                5_000.0,\n                10_000.0,\n                20_000.0,\n                50_000.0,\n                100_000.0,\n                200_000.0,\n                300_000.0,\n                400_000.0,\n                500_000.0,\n                600_000.0,\n                700_000.0,\n                800_000.0,\n                900_000.0,\n                1_000_000.0,\n                2_000_000.0,\n                5_000_000.0,\n            ]),\n        )?;\n\n        let allocated_ram_bytes =\n            IntGauge::new(\"allocated_ram_bytes\", \"total allocated ram in bytes\")?;\n        let consumed_ram_bytes =\n            IntGauge::new(\"consumed_ram_bytes\", \"total consumed ram in bytes\")?;\n        let total_ram_bytes = IntGauge::new(\"total_ram_bytes\", \"total system ram in bytes\")?;\n\n        registry.register(Box::new(events.clone()))?;\n        registry.register(Box::new(event_dispatch_duration.clone()))?;\n        registry.register(Box::new(allocated_ram_bytes.clone()))?;\n        registry.register(Box::new(consumed_ram_bytes.clone()))?;\n        registry.register(Box::new(total_ram_bytes.clone()))?;\n\n        Ok(RunnerMetrics {\n            events,\n            event_dispatch_duration,\n            registry: registry.clone(),\n            allocated_ram_bytes,\n            consumed_ram_bytes,\n            total_ram_bytes,\n        })\n    }\n}\n\nimpl Drop for RunnerMetrics {\n    fn drop(&mut self) {\n        unregister_metric!(self.registry, self.events);\n        unregister_metric!(self.registry, self.event_dispatch_duration);\n        unregister_metric!(self.registry, self.allocated_ram_bytes);\n        unregister_metric!(self.registry, self.consumed_ram_bytes);\n        unregister_metric!(self.registry, self.total_ram_bytes);\n    }\n}\n\nimpl<R> Runner<R>\nwhere\n    R: Reactor,\n    R::Event: Serialize,\n    R::Error: From<prometheus::Error>,\n{\n    /// Creates a new runner from a given configuration, using existing metrics.\n    #[instrument(\n        \"init\",\n        level = \"debug\",\n        skip_all,\n        fields(node_id = %NodeId::from(&network_identity))\n    )]\n    pub(crate) async fn with_metrics(\n        cfg: R::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        network_identity: NetworkIdentity,\n        rng: &mut NodeRng,\n        registry: &Registry,\n    ) -> Result<Self, R::Error> {\n        adjust_open_files_limit();\n\n        let event_size = size_of::<R::Event>();\n\n        // Check if the event is of a reasonable size. This only emits a runtime warning at startup\n        // right now, since storage size of events is not an issue per se, but copying might be\n        // expensive if events get too large.\n        if event_size > 16 * size_of::<usize>() {\n            warn!(\n                %event_size, type_name = ?any::type_name::<R::Event>(),\n                \"large event size, consider reducing it or boxing\"\n            );\n        }\n\n        let event_queue_dump_threshold =\n            env::var(\"CL_EVENT_QUEUE_DUMP_THRESHOLD\").map_or(None, |s| s.parse::<usize>().ok());\n\n        let scheduler = utils::leak(Scheduler::new(\n            QueueKind::weights(),\n            event_queue_dump_threshold,\n        ));\n        let is_shutting_down = SharedFlag::new();\n        let event_queue = EventQueueHandle::new(scheduler, is_shutting_down);\n        let (reactor, initial_effects) = R::new(\n            cfg,\n            chainspec,\n            chainspec_raw_bytes,\n            network_identity,\n            registry,\n            event_queue,\n            rng,\n        )?;\n\n        info!(\n            \"Reactor: with_metrics has: {} initial_effects\",\n            initial_effects.len()\n        );\n        // Run all effects from component instantiation.\n        process_effects(None, scheduler, initial_effects, QueueKind::Regular)\n            .instrument(debug_span!(\"process initial effects\"))\n            .await;\n\n        info!(\"reactor main loop is ready\");\n\n        Ok(Runner {\n            scheduler,\n            reactor,\n            current_event_id: 1,\n            metrics: RunnerMetrics::new(registry)?,\n            last_metrics: Instant::now(),\n            event_metrics_min_delay: Duration::from_secs(30),\n            event_metrics_threshold: 1000,\n            clock: Clock::new(),\n            is_shutting_down,\n        })\n    }\n\n    /// Processes a single event on the event queue.\n    ///\n    /// Returns `Some(exit_code)` if processing should stop.\n    #[instrument(\"dispatch\", level = \"debug\", fields(a, ev = self.current_event_id), skip(self, rng))]\n    pub(crate) async fn crank(&mut self, rng: &mut NodeRng) -> Option<ExitCode> {\n        self.metrics.events.inc();\n\n        let event_queue = EventQueueHandle::new(self.scheduler, self.is_shutting_down);\n        let effect_builder = EffectBuilder::new(event_queue);\n\n        // Update metrics like memory usage and event queue sizes.\n        if self.current_event_id % self.event_metrics_threshold == 0 {\n            // We update metrics on the first very event as well to get a good baseline.\n            if self.last_metrics.elapsed() >= self.event_metrics_min_delay {\n                self.reactor.update_metrics(event_queue);\n\n                // Use a fresh timestamp. This skews the metrics collection interval a little bit,\n                // but ensures that if metrics collection time explodes, we are guaranteed a full\n                // `event_metrics_min_delay` of event processing.\n                self.last_metrics = Instant::now();\n            }\n\n            if let Some(AllocatedMem {\n                allocated,\n                consumed,\n                total,\n            }) = Self::get_allocated_memory()\n            {\n                trace!(%allocated, %total, \"memory allocated\");\n                self.metrics.allocated_ram_bytes.set(allocated as i64);\n                self.metrics.consumed_ram_bytes.set(consumed as i64);\n                self.metrics.total_ram_bytes.set(total as i64);\n            }\n        }\n\n        let ((ancestor, event), queue_kind) = self.scheduler.pop().await;\n        trace!(%event, %queue_kind, \"current\");\n        let event_desc = event.description();\n\n        // Create another span for tracing the processing of one event.\n        Span::current().record(\"ev\", self.current_event_id);\n\n        // If we know the ancestor of an event, record it.\n        if let Some(ancestor) = ancestor {\n            Span::current().record(\"a\", ancestor.get());\n        }\n\n        // Dispatch the event, then execute the resulting effect.\n        let start = self.clock.start();\n\n        let (effects, maybe_exit_code, queue_kind) = if event.is_control() {\n            // We've received a control event, which will _not_ be handled by the reactor.\n            match event.try_into_control() {\n                None => {\n                    // If `as_control().is_some()` is true, but `try_into_control` fails, the trait\n                    // is implemented incorrectly.\n                    error!(\n                        \"event::as_control succeeded, but try_into_control failed. this is a bug\"\n                    );\n\n                    // We ignore the event.\n                    (Effects::new(), None, QueueKind::Control)\n                }\n                Some(ControlAnnouncement::ShutdownDueToUserRequest) => (\n                    Effects::new(),\n                    Some(ExitCode::CleanExitDontRestart),\n                    QueueKind::Control,\n                ),\n                Some(ControlAnnouncement::ShutdownForUpgrade) => {\n                    (Effects::new(), Some(ExitCode::Success), QueueKind::Control)\n                }\n                Some(ControlAnnouncement::ShutdownAfterCatchingUp) => (\n                    Effects::new(),\n                    Some(ExitCode::CleanExitDontRestart),\n                    QueueKind::Control,\n                ),\n                Some(ControlAnnouncement::FatalError { file, line, msg }) => {\n                    error!(%file, %line, %msg, \"fatal error via control announcement\");\n                    (Effects::new(), Some(ExitCode::Abort), QueueKind::Control)\n                }\n                Some(ControlAnnouncement::QueueDumpRequest {\n                    dump_format,\n                    finished,\n                }) => {\n                    match dump_format {\n                        QueueDumpFormat::Serde(mut ser) => {\n                            self.scheduler\n                                .dump(move |queue_dump| {\n                                    if let Err(err) =\n                                        queue_dump.erased_serialize(&mut ser.as_serializer())\n                                    {\n                                        warn!(%err, \"queue dump failed to serialize\");\n                                    }\n                                })\n                                .await;\n                        }\n                        QueueDumpFormat::Debug(ref file) => {\n                            match file.try_clone() {\n                                Ok(mut local_file) => {\n                                    self.scheduler\n                                        .dump(move |queue_dump| {\n                                            write!(&mut local_file, \"{:?}\", queue_dump)\n                                                .and_then(|_| local_file.flush())\n                                                .map_err(|err| {\n                                                    warn!(\n                                                        ?err,\n                                                        \"failed to write/flush queue dump using debug format\"\n                                                    );\n                                                })\n                                                .ok();\n                                        })\n                                        .await;\n                                }\n                                Err(err) => warn!(\n                                    %err,\n                                    \"could not create clone of temporary file for queue debug dump\"\n                                ),\n                            };\n                        }\n                    }\n\n                    // Notify requester that we finished writing the queue dump.\n                    finished.respond(()).await;\n\n                    // Do nothing on queue dump otherwise.\n                    (Default::default(), None, QueueKind::Control)\n                }\n                Some(ControlAnnouncement::ActivateFailpoint { activation }) => {\n                    self.reactor.activate_failpoint(&activation);\n\n                    // No other effects, calling the method is all we had to do.\n                    (Effects::new(), None, QueueKind::Control)\n                }\n            }\n        } else {\n            (\n                self.reactor.dispatch_event(effect_builder, rng, event),\n                None,\n                queue_kind,\n            )\n        };\n\n        let end = self.clock.end();\n\n        // Warn if processing took a long time, record to histogram.\n        let delta = self.clock.delta(start, end);\n        if delta > *DISPATCH_EVENT_THRESHOLD {\n            warn!(%event_desc, ns = delta.into_nanos(), \"event took very long to dispatch\");\n        }\n        self.metrics\n            .event_dispatch_duration\n            .observe(delta.into_nanos() as f64);\n\n        // Run effects, with the current event ID as the ancestor for resulting set of events.\n        process_effects(\n            NonZeroU64::new(self.current_event_id),\n            self.scheduler,\n            effects,\n            queue_kind,\n        )\n        .in_current_span()\n        .await;\n\n        self.current_event_id += 1;\n\n        maybe_exit_code\n    }\n\n    /// Gets both the allocated and total memory from sys-info + jemalloc\n    fn get_allocated_memory() -> Option<AllocatedMem> {\n        let mem_info = match sys_info::mem_info() {\n            Ok(mem_info) => mem_info,\n            Err(error) => {\n                warn!(%error, \"unable to get mem_info using sys-info\");\n                return None;\n            }\n        };\n\n        // mem_info gives us kilobytes\n        let total = mem_info.total * 1024;\n        let consumed = total - (mem_info.avail * 1024);\n\n        let Stats {\n            allocations: _,\n            deallocations: _,\n            reallocations: _,\n            bytes_allocated,\n            bytes_deallocated,\n            bytes_reallocated: _,\n        } = INSTRUMENTED_SYSTEM.stats();\n\n        Some(AllocatedMem {\n            allocated: bytes_allocated.saturating_sub(bytes_deallocated) as u64,\n            consumed,\n            total,\n        })\n    }\n\n    /// Runs the reactor until `self.crank` returns `Some` or we get interrupted by a termination\n    /// signal.\n    pub(crate) async fn run(&mut self, rng: &mut NodeRng) -> ExitCode {\n        loop {\n            match TERMINATION_REQUESTED.load(Ordering::SeqCst) as i32 {\n                0 => {\n                    if let Some(exit_code) = self.crank(rng).await {\n                        self.is_shutting_down.set();\n                        break exit_code;\n                    }\n                }\n                SIGINT => {\n                    self.is_shutting_down.set();\n                    break ExitCode::SigInt;\n                }\n                SIGQUIT => {\n                    self.is_shutting_down.set();\n                    break ExitCode::SigQuit;\n                }\n                SIGTERM => {\n                    self.is_shutting_down.set();\n                    break ExitCode::SigTerm;\n                }\n                _ => error!(\"should be unreachable - bug in signal handler\"),\n            }\n        }\n    }\n}\n\n#[cfg(test)]\n#[derive(Eq, PartialEq, Debug)]\npub(crate) enum TryCrankOutcome {\n    NoEventsToProcess,\n    ProcessedAnEvent,\n    ShouldExit(ExitCode),\n    Exited,\n}\n\n#[cfg(test)]\nimpl<R> Runner<R>\nwhere\n    R: Reactor,\n    R::Event: Serialize,\n    R::Error: From<prometheus::Error>,\n{\n    /// Creates a new runner from a given configuration.\n    ///\n    /// Creates a metrics registry that is only going to be used in this runner.\n    pub(crate) async fn new(\n        cfg: R::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        rng: &mut NodeRng,\n    ) -> Result<Self, R::Error> {\n        // Instantiate a new registry for metrics for this reactor.\n        let registry = Registry::new();\n        let network_identity = NetworkIdentity::with_generated_certs().unwrap();\n        Self::with_metrics(\n            cfg,\n            chainspec,\n            chainspec_raw_bytes,\n            network_identity,\n            rng,\n            &registry,\n        )\n        .await\n    }\n\n    /// Create an instance of an `EffectBuilder`.\n    #[cfg(test)]\n    pub(crate) fn effect_builder(&self) -> EffectBuilder<R::Event> {\n        let event_queue = EventQueueHandle::new(self.scheduler, self.is_shutting_down);\n        EffectBuilder::new(event_queue)\n    }\n\n    /// Inject (schedule then process) effects created via a call to `create_effects` which is\n    /// itself passed an instance of an `EffectBuilder`.\n    #[cfg(test)]\n    pub(crate) async fn process_injected_effects<F>(&mut self, create_effects: F)\n    where\n        F: FnOnce(EffectBuilder<R::Event>) -> Effects<R::Event>,\n    {\n        use tracing::{debug_span, Instrument};\n\n        let event_queue = EventQueueHandle::new(self.scheduler, self.is_shutting_down);\n        let effect_builder = EffectBuilder::new(event_queue);\n\n        let effects = create_effects(effect_builder);\n\n        process_effects(None, self.scheduler, effects, QueueKind::Regular)\n            .instrument(debug_span!(\n                \"process injected effects\",\n                ev = self.current_event_id\n            ))\n            .await\n    }\n\n    /// Processes a single event if there is one and we haven't previously handled an exit code.\n    pub(crate) async fn try_crank(&mut self, rng: &mut NodeRng) -> TryCrankOutcome {\n        if self.is_shutting_down.is_set() {\n            TryCrankOutcome::Exited\n        } else if self.scheduler.item_count() == 0 {\n            TryCrankOutcome::NoEventsToProcess\n        } else {\n            match self.crank(rng).await {\n                Some(exit_code) => {\n                    self.is_shutting_down.set();\n                    TryCrankOutcome::ShouldExit(exit_code)\n                }\n                None => TryCrankOutcome::ProcessedAnEvent,\n            }\n        }\n    }\n\n    /// Returns a reference to the reactor.\n    pub(crate) fn reactor(&self) -> &R {\n        &self.reactor\n    }\n\n    /// Returns a mutable reference to the reactor.\n    pub(crate) fn reactor_mut(&mut self) -> &mut R {\n        &mut self.reactor\n    }\n\n    /// Shuts down a reactor, sealing and draining the entire queue before returning it.\n    pub(crate) async fn drain_into_inner(self) -> R {\n        self.is_shutting_down.set();\n        self.scheduler.seal();\n        for (ancestor, event) in self.scheduler.drain_queues().await {\n            tracing::debug!(?ancestor, %event, \"drained event\");\n        }\n        self.reactor\n    }\n}\n\n#[cfg(test)]\nimpl<R> Runner<ConditionCheckReactor<R>>\nwhere\n    R: Reactor + NetworkedReactor,\n    R::Event: Serialize,\n    R::Error: From<prometheus::Error>,\n{\n    /// Cranks the runner until `condition` is true or until `within` has elapsed.\n    ///\n    /// Returns `true` if `condition` has been met within the specified timeout.\n    ///\n    /// Panics if cranking causes the node to return an exit code.\n    pub(crate) async fn crank_until<F>(&mut self, rng: &mut TestRng, condition: F, within: Duration)\n    where\n        F: Fn(&R::Event) -> bool + Send + 'static,\n    {\n        self.reactor.set_condition_checker(Box::new(condition));\n\n        tokio::time::timeout(within, self.crank_and_check_indefinitely(rng))\n            .await\n            .unwrap_or_else(|_| {\n                panic!(\n                    \"Runner::crank_until() timed out after {}s on node {}\",\n                    within.as_secs_f64(),\n                    self.reactor.inner().node_id()\n                )\n            })\n    }\n\n    async fn crank_and_check_indefinitely(&mut self, rng: &mut TestRng) {\n        loop {\n            match self.try_crank(rng).await {\n                TryCrankOutcome::NoEventsToProcess => {\n                    FakeClock::advance_time(POLL_INTERVAL.as_millis() as u64);\n                    tokio::time::sleep(POLL_INTERVAL).await;\n                    continue;\n                }\n                TryCrankOutcome::ProcessedAnEvent => {}\n                TryCrankOutcome::ShouldExit(exit_code) => {\n                    panic!(\"should not exit: {:?}\", exit_code)\n                }\n                TryCrankOutcome::Exited => unreachable!(),\n            }\n\n            if self.reactor.condition_result() {\n                info!(\"{} met condition\", self.reactor.inner().node_id());\n                return;\n            }\n        }\n    }\n}\n\n/// Spawns tasks that will process the given effects.\n///\n/// Result events from processing the events will be scheduled with the given ancestor.\nasync fn process_effects<Ev>(\n    ancestor: Option<NonZeroU64>,\n    scheduler: &'static Scheduler<Ev>,\n    effects: Effects<Ev>,\n    queue_kind: QueueKind,\n) where\n    Ev: Send + 'static,\n{\n    for effect in effects {\n        tokio::spawn(async move {\n            for event in effect.await {\n                scheduler.push((ancestor, event), queue_kind).await;\n            }\n        });\n    }\n}\n\n/// Converts a single effect into another by wrapping it.\nfn wrap_effect<Ev, REv, F>(wrap: F, effect: Effect<Ev>) -> Effect<REv>\nwhere\n    F: Fn(Ev) -> REv + Send + 'static,\n    Ev: Send + 'static,\n    REv: Send + 'static,\n{\n    // The double-boxing here is very unfortunate =(.\n    (async move {\n        let events = effect.await;\n        events.into_iter().map(wrap).collect()\n    })\n    .boxed()\n}\n\n/// Converts multiple effects into another by wrapping.\npub(crate) fn wrap_effects<Ev, REv, F>(wrap: F, effects: Effects<Ev>) -> Effects<REv>\nwhere\n    F: Fn(Ev) -> REv + Send + 'static + Clone,\n    Ev: Send + 'static,\n    REv: Send + 'static,\n{\n    effects\n        .into_iter()\n        .map(move |effect| wrap_effect(wrap.clone(), effect))\n        .collect()\n}\n\nfn handle_fetch_response<R, I>(\n    reactor: &mut R,\n    effect_builder: EffectBuilder<<R as Reactor>::Event>,\n    rng: &mut NodeRng,\n    sender: NodeId,\n    serialized_item: &[u8],\n) -> Effects<<R as Reactor>::Event>\nwhere\n    I: FetchItem,\n    R: Reactor,\n    <R as Reactor>::Event: From<fetcher::Event<I>> + From<PeerBehaviorAnnouncement>,\n{\n    match fetcher::Event::<I>::from_get_response_serialized_item(sender, serialized_item) {\n        Some(fetcher_event) => {\n            Reactor::dispatch_event(reactor, effect_builder, rng, fetcher_event.into())\n        }\n        None => effect_builder\n            .announce_block_peer_with_justification(\n                sender,\n                BlocklistJustification::SentBadItem { tag: I::TAG },\n            )\n            .ignore(),\n    }\n}\n\nfn handle_get_response<R>(\n    reactor: &mut R,\n    effect_builder: EffectBuilder<<R as Reactor>::Event>,\n    rng: &mut NodeRng,\n    sender: NodeId,\n    message: Box<NetResponse>,\n) -> Effects<<R as Reactor>::Event>\nwhere\n    R: Reactor,\n    <R as Reactor>::Event: From<transaction_acceptor::Event>\n        + From<fetcher::Event<FinalitySignature>>\n        + From<fetcher::Event<Block>>\n        + From<fetcher::Event<BlockHeader>>\n        + From<fetcher::Event<BlockExecutionResultsOrChunk>>\n        + From<fetcher::Event<LegacyDeploy>>\n        + From<fetcher::Event<Transaction>>\n        + From<fetcher::Event<SyncLeap>>\n        + From<fetcher::Event<TrieOrChunk>>\n        + From<fetcher::Event<ApprovalsHashes>>\n        + From<block_accumulator::Event>\n        + From<PeerBehaviorAnnouncement>,\n{\n    match *message {\n        NetResponse::Transaction(ref serialized_item) => handle_fetch_response::<R, Transaction>(\n            reactor,\n            effect_builder,\n            rng,\n            sender,\n            serialized_item,\n        ),\n        NetResponse::LegacyDeploy(ref serialized_item) => handle_fetch_response::<R, LegacyDeploy>(\n            reactor,\n            effect_builder,\n            rng,\n            sender,\n            serialized_item,\n        ),\n        NetResponse::Block(ref serialized_item) => {\n            handle_fetch_response::<R, Block>(reactor, effect_builder, rng, sender, serialized_item)\n        }\n        NetResponse::BlockHeader(ref serialized_item) => handle_fetch_response::<R, BlockHeader>(\n            reactor,\n            effect_builder,\n            rng,\n            sender,\n            serialized_item,\n        ),\n        NetResponse::FinalitySignature(ref serialized_item) => {\n            handle_fetch_response::<R, FinalitySignature>(\n                reactor,\n                effect_builder,\n                rng,\n                sender,\n                serialized_item,\n            )\n        }\n        NetResponse::SyncLeap(ref serialized_item) => handle_fetch_response::<R, SyncLeap>(\n            reactor,\n            effect_builder,\n            rng,\n            sender,\n            serialized_item,\n        ),\n        NetResponse::ApprovalsHashes(ref serialized_item) => {\n            handle_fetch_response::<R, ApprovalsHashes>(\n                reactor,\n                effect_builder,\n                rng,\n                sender,\n                serialized_item,\n            )\n        }\n        NetResponse::BlockExecutionResults(ref serialized_item) => {\n            handle_fetch_response::<R, BlockExecutionResultsOrChunk>(\n                reactor,\n                effect_builder,\n                rng,\n                sender,\n                serialized_item,\n            )\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/testing/condition_check_reactor.rs",
    "content": "use std::{\n    fmt::{self, Debug, Formatter},\n    sync::Arc,\n};\n\nuse futures::future::BoxFuture;\nuse prometheus::Registry;\n\nuse casper_types::{Chainspec, ChainspecRawBytes};\n\nuse super::network::NetworkedReactor;\nuse crate::{\n    components::{network::Identity as NetworkIdentity, ComponentState},\n    effect::{EffectBuilder, Effects},\n    reactor::{EventQueueHandle, Finalize, Reactor},\n    types::NodeId,\n    NodeRng,\n};\n\ntype ConditionChecker<R> = Box<dyn Fn(&<R as Reactor>::Event) -> bool + Send>;\n\n/// A reactor wrapping an inner reactor, and which has an optional hook into\n/// `Reactor::dispatch_event()`.\n///\n/// While the hook is not `None`, it's called on every call to `dispatch_event()`, taking a\n/// reference to the current `Event`, and setting a boolean result to true when the condition has\n/// been met.\n///\n/// Once the condition is met, the hook is reset to `None`.\npub(crate) struct ConditionCheckReactor<R: Reactor> {\n    reactor: R,\n    condition_checker: Option<ConditionChecker<R>>,\n    condition_result: bool,\n}\n\nimpl<R: Reactor> ConditionCheckReactor<R> {\n    /// Sets the condition checker hook.\n    pub(crate) fn set_condition_checker(&mut self, condition_checker: ConditionChecker<R>) {\n        self.condition_checker = Some(condition_checker);\n        self.condition_result = false;\n    }\n\n    /// Returns the result of the last execution of the condition checker hook.\n    pub(crate) fn condition_result(&self) -> bool {\n        self.condition_result\n    }\n\n    /// Returns a reference to the wrapped reactor.\n    pub(crate) fn inner(&self) -> &R {\n        &self.reactor\n    }\n\n    /// Returns a mutable reference to the wrapped reactor.\n    pub(crate) fn inner_mut(&mut self) -> &mut R {\n        &mut self.reactor\n    }\n}\n\nimpl<R: Reactor> Reactor for ConditionCheckReactor<R> {\n    type Event = R::Event;\n    type Config = R::Config;\n    type Error = R::Error;\n\n    fn new(\n        config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        network_identity: NetworkIdentity,\n        registry: &Registry,\n        event_queue: EventQueueHandle<Self::Event>,\n        rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let (reactor, effects) = R::new(\n            config,\n            chainspec,\n            chainspec_raw_bytes,\n            network_identity,\n            registry,\n            event_queue,\n            rng,\n        )?;\n        Ok((\n            Self {\n                reactor,\n                condition_checker: None,\n                condition_result: false,\n            },\n            effects,\n        ))\n    }\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        self.condition_result = self\n            .condition_checker\n            .as_ref()\n            .map(|condition_checker| condition_checker(&event))\n            .unwrap_or_default();\n        if self.condition_result {\n            self.condition_checker = None;\n        }\n        self.reactor.dispatch_event(effect_builder, rng, event)\n    }\n\n    fn get_component_state(&self, name: &str) -> Option<&ComponentState> {\n        self.inner().get_component_state(name)\n    }\n}\n\nimpl<R: Reactor + Finalize> Finalize for ConditionCheckReactor<R> {\n    fn finalize(self) -> BoxFuture<'static, ()> {\n        self.reactor.finalize()\n    }\n}\n\nimpl<R: Reactor + NetworkedReactor> NetworkedReactor for ConditionCheckReactor<R> {\n    fn node_id(&self) -> NodeId {\n        self.reactor.node_id()\n    }\n}\n\nimpl<R: Reactor + Debug> Debug for ConditionCheckReactor<R> {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        formatter\n            .debug_struct(\"ConditionCheckReactor\")\n            .field(\"reactor\", &self.reactor)\n            .field(\"condition_check_result\", &self.condition_result)\n            .finish()\n    }\n}\n"
  },
  {
    "path": "node/src/testing/fake_transaction_acceptor.rs",
    "content": "#![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged.\n\n//! The `FakeTransactionAcceptor` behaves as per the real `TransactionAcceptor` but without any\n//! transaction verification being performed.\n//!\n//! When a new transaction is passed in, it is unconditionally accepted.  This means that the\n//! `FakeTransactionAcceptor` puts the transaction to storage, and once that has completed,\n//! announces the transaction if the storage result indicates it's a new transaction.\n\nuse std::sync::Arc;\n\nuse tracing::debug;\n\nuse casper_types::{Chainspec, Timestamp, Transaction};\n\npub(crate) use crate::components::transaction_acceptor::{Error, Event};\nuse crate::{\n    components::{transaction_acceptor::EventMetadata, Component},\n    effect::{\n        announcements::TransactionAcceptorAnnouncement, requests::StorageRequest, EffectBuilder,\n        EffectExt, Effects, Responder,\n    },\n    types::MetaTransaction,\n    utils::Source,\n    NodeRng,\n};\n\nconst COMPONENT_NAME: &str = \"fake_transaction_acceptor\";\n\npub(crate) trait ReactorEventT:\n    From<Event> + From<TransactionAcceptorAnnouncement> + From<StorageRequest> + Send\n{\n}\n\nimpl<REv> ReactorEventT for REv where\n    REv: From<Event> + From<TransactionAcceptorAnnouncement> + From<StorageRequest> + Send\n{\n}\n\n#[derive(Debug)]\npub struct FakeTransactionAcceptor {\n    is_active: bool,\n    chainspec: Chainspec,\n}\n\nimpl FakeTransactionAcceptor {\n    pub(crate) fn new() -> Self {\n        FakeTransactionAcceptor {\n            is_active: true,\n            chainspec: Chainspec::default(),\n        }\n    }\n\n    pub(crate) fn set_active(&mut self, new_setting: bool) {\n        self.is_active = new_setting;\n    }\n\n    fn accept<REv: ReactorEventT>(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        transaction: Transaction,\n        source: Source,\n        maybe_responder: Option<Responder<Result<(), Error>>>,\n    ) -> Effects<Event> {\n        let meta_transaction = MetaTransaction::from_transaction(\n            &transaction,\n            self.chainspec.core_config.pricing_handling,\n            &self.chainspec.transaction_config,\n        )\n        .unwrap();\n        let event_metadata = Box::new(EventMetadata::new(\n            transaction.clone(),\n            meta_transaction,\n            source,\n            maybe_responder,\n            Timestamp::now(),\n        ));\n        effect_builder\n            .put_transaction_to_storage(transaction)\n            .event(move |is_new| Event::PutToStorageResult {\n                event_metadata,\n                is_new,\n            })\n    }\n\n    fn handle_put_to_storage<REv: ReactorEventT>(\n        &self,\n        effect_builder: EffectBuilder<REv>,\n        event_metadata: Box<EventMetadata>,\n        is_new: bool,\n    ) -> Effects<Event> {\n        let EventMetadata {\n            meta_transaction: _,\n            transaction,\n            source,\n            maybe_responder,\n            verification_start_timestamp: _,\n        } = *event_metadata;\n        let mut effects = Effects::new();\n        if is_new {\n            effects.extend(\n                effect_builder\n                    .announce_new_transaction_accepted(Arc::new(transaction), source)\n                    .ignore(),\n            );\n        }\n\n        if let Some(responder) = maybe_responder {\n            effects.extend(responder.respond(Ok(())).ignore());\n        }\n        effects\n    }\n}\n\nimpl<REv: ReactorEventT> Component<REv> for FakeTransactionAcceptor {\n    type Event = Event;\n\n    fn handle_event(\n        &mut self,\n        effect_builder: EffectBuilder<REv>,\n        _rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        if !self.is_active {\n            debug!(\n                ?event,\n                \"FakeTransactionAcceptor: not active - ignoring event\"\n            );\n            return Effects::new();\n        }\n        debug!(?event, \"FakeTransactionAcceptor: handling event\");\n        match event {\n            Event::Accept {\n                transaction,\n                source,\n                maybe_responder,\n            } => self.accept(effect_builder, transaction, source, maybe_responder),\n            Event::PutToStorageResult {\n                event_metadata,\n                is_new,\n                ..\n            } => self.handle_put_to_storage(effect_builder, event_metadata, is_new),\n            _ => unimplemented!(\"unexpected {:?}\", event),\n        }\n    }\n\n    fn name(&self) -> &str {\n        COMPONENT_NAME\n    }\n}\n"
  },
  {
    "path": "node/src/testing/filter_reactor.rs",
    "content": "use std::{\n    fmt::{self, Debug, Formatter},\n    sync::Arc,\n};\n\nuse either::Either;\nuse futures::future::BoxFuture;\nuse prometheus::Registry;\n\nuse casper_types::{Chainspec, ChainspecRawBytes};\n\nuse super::network::NetworkedReactor;\nuse crate::{\n    components::network::Identity as NetworkIdentity,\n    effect::{EffectBuilder, Effects},\n    failpoints::FailpointActivation,\n    reactor::{EventQueueHandle, Finalize, Reactor},\n    types::NodeId,\n    NodeRng,\n};\n\npub(crate) trait EventFilter<Ev>:\n    FnMut(Ev) -> Either<Effects<Ev>, Ev> + Send + 'static\n{\n}\nimpl<Ev, T> EventFilter<Ev> for T where T: FnMut(Ev) -> Either<Effects<Ev>, Ev> + Send + 'static {}\n\n/// A reactor wrapping an inner reactor, which has a hook into `Reactor::dispatch_event()` that\n/// allows overriding or modifying event handling.\npub(crate) struct FilterReactor<R: Reactor> {\n    reactor: R,\n    filter: Box<dyn EventFilter<R::Event>>,\n}\n\n/// A filter that doesn't modify the behavior.\nimpl<R: Reactor> FilterReactor<R> {\n    /// Sets the event filter.\n    pub(crate) fn set_filter(&mut self, filter: impl EventFilter<R::Event>) {\n        self.filter = Box::new(filter);\n    }\n\n    /// Returns a reference to the wrapped reactor.\n    pub(crate) fn inner(&self) -> &R {\n        &self.reactor\n    }\n\n    pub(crate) fn inner_mut(&mut self) -> &mut R {\n        &mut self.reactor\n    }\n}\n\nimpl<R: Reactor> Reactor for FilterReactor<R> {\n    type Event = R::Event;\n    type Config = R::Config;\n    type Error = R::Error;\n\n    fn new(\n        config: Self::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        network_identity: NetworkIdentity,\n        registry: &Registry,\n        event_queue: EventQueueHandle<Self::Event>,\n        rng: &mut NodeRng,\n    ) -> Result<(Self, Effects<Self::Event>), Self::Error> {\n        let (reactor, effects) = R::new(\n            config,\n            chainspec,\n            chainspec_raw_bytes,\n            network_identity,\n            registry,\n            event_queue,\n            rng,\n        )?;\n        let filter = Box::new(Either::Right);\n        Ok((Self { reactor, filter }, effects))\n    }\n\n    fn dispatch_event(\n        &mut self,\n        effect_builder: EffectBuilder<Self::Event>,\n        rng: &mut NodeRng,\n        event: Self::Event,\n    ) -> Effects<Self::Event> {\n        match (self.filter)(event) {\n            Either::Left(effects) => effects,\n            Either::Right(event) => self.reactor.dispatch_event(effect_builder, rng, event),\n        }\n    }\n\n    fn activate_failpoint(&mut self, activation: &FailpointActivation) {\n        self.reactor.activate_failpoint(activation);\n    }\n}\n\nimpl<R: Reactor + Finalize> Finalize for FilterReactor<R> {\n    fn finalize(self) -> BoxFuture<'static, ()> {\n        self.reactor.finalize()\n    }\n}\n\nimpl<R: Reactor + NetworkedReactor> NetworkedReactor for FilterReactor<R> {\n    fn node_id(&self) -> NodeId {\n        self.reactor.node_id()\n    }\n}\n\nimpl<R: Reactor + Debug> Debug for FilterReactor<R> {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        formatter\n            .debug_struct(\"FilterReactor\")\n            .field(\"reactor\", &self.reactor)\n            .finish()\n    }\n}\n"
  },
  {
    "path": "node/src/testing/network.rs",
    "content": "//! A network of test reactors.\n\nuse std::{\n    collections::{hash_map::Entry, HashMap},\n    fmt::Debug,\n    sync::{\n        atomic::{AtomicBool, Ordering},\n        Arc,\n    },\n    time::Duration,\n};\n\nuse fake_instant::FakeClock as Instant;\nuse futures::future::{BoxFuture, FutureExt};\nuse serde::Serialize;\nuse tokio::time::{self, error::Elapsed};\nuse tracing::{debug, error_span};\nuse tracing_futures::Instrument;\n\nuse casper_types::testing::TestRng;\n\nuse casper_types::{Chainspec, ChainspecRawBytes};\n\nuse super::ConditionCheckReactor;\nuse crate::{\n    components::ComponentState,\n    effect::{EffectBuilder, Effects},\n    reactor::{Finalize, Reactor, Runner, TryCrankOutcome},\n    tls::KeyFingerprint,\n    types::{ExitCode, NodeId},\n    utils::Loadable,\n    NodeRng,\n};\n\n/// Type alias for set of nodes inside a network.\n///\n/// Provided as a convenience for writing condition functions for `settle_on` and friends.\npub(crate) type Nodes<R> = HashMap<NodeId, Runner<ConditionCheckReactor<R>>>;\n\n/// A reactor with networking functionality.\n///\n/// Test reactors implementing this SHOULD implement at least the `node_id` function if they have\n/// proper networking functionality.\npub(crate) trait NetworkedReactor: Sized {\n    /// Returns the node ID assigned to this specific reactor instance.\n    ///\n    /// The default implementation generates a pseudo-id base on its memory address.\n    fn node_id(&self) -> NodeId {\n        #[allow(trivial_casts)]\n        let addr = self as *const _ as usize;\n        let mut raw: [u8; KeyFingerprint::LENGTH] = [0; KeyFingerprint::LENGTH];\n        raw[0..(size_of::<usize>())].copy_from_slice(&addr.to_be_bytes());\n        NodeId::from(KeyFingerprint::from(raw))\n    }\n}\n\n/// Time interval for which to poll an observed testing network when no events have occurred.\nconst POLL_INTERVAL: Duration = Duration::from_millis(10);\n\n/// A network of multiple test reactors.\n///\n/// Nodes themselves are not run in the background, rather manual cranking is required through\n/// `crank_all`. As an alternative, the `settle` and `settle_all` functions can be used to continue\n/// cranking until a condition has been reached.\n#[derive(Debug, Default)]\npub(crate) struct TestingNetwork<R: Reactor + NetworkedReactor> {\n    /// Current network.\n    nodes: HashMap<NodeId, Runner<ConditionCheckReactor<R>>>,\n}\n\nimpl<R> TestingNetwork<R>\nwhere\n    R: Reactor + NetworkedReactor,\n    R::Config: Default,\n    <R as Reactor>::Error: Debug,\n    R::Event: Serialize,\n    R::Error: From<prometheus::Error>,\n{\n    /// Creates a new networking node on the network using the default root node port.\n    ///\n    /// # Panics\n    ///\n    /// Panics if a duplicate node ID is being inserted. This should only happen in case a randomly\n    /// generated ID collides.\n    pub(crate) async fn add_node<'a, 'b: 'a>(\n        &'a mut self,\n        rng: &'b mut TestRng,\n    ) -> Result<(NodeId, &'a mut Runner<ConditionCheckReactor<R>>), R::Error> {\n        self.add_node_with_config(Default::default(), rng).await\n    }\n\n    /// Adds `count` new nodes to the network, and returns their IDs.\n    pub(crate) async fn add_nodes(&mut self, rng: &mut TestRng, count: usize) -> Vec<NodeId> {\n        let mut node_ids = vec![];\n        for _ in 0..count {\n            let (node_id, _runner) = self.add_node(rng).await.unwrap();\n            node_ids.push(node_id);\n        }\n        node_ids\n    }\n}\n\nimpl<R> TestingNetwork<R>\nwhere\n    R: Reactor + NetworkedReactor,\n    R::Event: Serialize,\n    R::Error: From<prometheus::Error> + From<R::Error>,\n{\n    /// Creates a new network.\n    pub(crate) fn new() -> Self {\n        TestingNetwork {\n            nodes: HashMap::new(),\n        }\n    }\n\n    /// Creates a new networking node on the network.\n    ///\n    /// # Panics\n    ///\n    /// Panics if a duplicate node ID is being inserted.\n    pub(crate) async fn add_node_with_config<'a, 'b: 'a>(\n        &'a mut self,\n        cfg: R::Config,\n        rng: &'b mut NodeRng,\n    ) -> Result<(NodeId, &'a mut Runner<ConditionCheckReactor<R>>), R::Error> {\n        let (chainspec, chainspec_raw_bytes) =\n            <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n        self.add_node_with_config_and_chainspec(\n            cfg,\n            Arc::new(chainspec),\n            Arc::new(chainspec_raw_bytes),\n            rng,\n        )\n        .await\n    }\n\n    /// Creates a new networking node on the network.\n    ///\n    /// # Panics\n    ///\n    /// Panics if a duplicate node ID is being inserted.\n    pub(crate) async fn add_node_with_config_and_chainspec<'a, 'b: 'a>(\n        &'a mut self,\n        cfg: R::Config,\n        chainspec: Arc<Chainspec>,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n        rng: &'b mut NodeRng,\n    ) -> Result<(NodeId, &'a mut Runner<ConditionCheckReactor<R>>), R::Error> {\n        let runner: Runner<ConditionCheckReactor<R>> =\n            Runner::new(cfg, chainspec, chainspec_raw_bytes, rng).await?;\n\n        let node_id = runner.reactor().node_id();\n\n        let node_ref = match self.nodes.entry(node_id) {\n            Entry::Occupied(_) => {\n                // This happens in the event of the extremely unlikely hash collision, or if the\n                // node ID was set manually.\n                panic!(\"trying to insert a duplicate node {}\", node_id)\n            }\n            Entry::Vacant(entry) => entry.insert(runner),\n        };\n\n        Ok((node_id, node_ref))\n    }\n\n    /// Removes a node from the network.\n    pub(crate) fn remove_node(\n        &mut self,\n        node_id: &NodeId,\n    ) -> Option<Runner<ConditionCheckReactor<R>>> {\n        self.nodes.remove(node_id)\n    }\n\n    /// Crank the specified runner once.\n    pub(crate) async fn crank(&mut self, node_id: &NodeId, rng: &mut TestRng) -> TryCrankOutcome {\n        let runner = self.nodes.get_mut(node_id).expect(\"should find node\");\n        let node_id = runner.reactor().node_id();\n        runner\n            .try_crank(rng)\n            .instrument(error_span!(\"crank\", node_id = %node_id))\n            .await\n    }\n\n    /// Crank only the specified runner until `condition` is true or until `within` has elapsed.\n    ///\n    /// Returns `true` if `condition` has been met within the specified timeout.\n    ///\n    /// Panics if cranking causes the node to return an exit code.\n    pub(crate) async fn crank_until<F>(\n        &mut self,\n        node_id: &NodeId,\n        rng: &mut TestRng,\n        condition: F,\n        within: Duration,\n    ) where\n        F: Fn(&R::Event) -> bool + Send + 'static,\n    {\n        self.nodes\n            .get_mut(node_id)\n            .unwrap()\n            .crank_until(rng, condition, within)\n            .await\n    }\n\n    /// Crank all runners once, returning the number of events processed.\n    ///\n    /// Panics if any node returns an exit code.\n    async fn crank_all(&mut self, rng: &mut TestRng) -> usize {\n        let mut event_count = 0;\n        for node in self.nodes.values_mut() {\n            let node_id = node.reactor().node_id();\n            match node\n                .try_crank(rng)\n                .instrument(error_span!(\"crank\", node_id = %node_id))\n                .await\n            {\n                TryCrankOutcome::NoEventsToProcess => (),\n                TryCrankOutcome::ProcessedAnEvent => event_count += 1,\n                TryCrankOutcome::ShouldExit(exit_code) => {\n                    panic!(\"should not exit: {:?}\", exit_code)\n                }\n                TryCrankOutcome::Exited => unreachable!(),\n            }\n        }\n\n        event_count\n    }\n\n    /// Crank all runners until `condition` is true on the specified runner or until `within` has\n    /// elapsed.\n    ///\n    /// Returns `true` if `condition` has been met within the specified timeout.\n    ///\n    /// Panics if cranking causes the node to return an exit code.\n    pub(crate) async fn crank_all_until<F>(\n        &mut self,\n        node_id: &NodeId,\n        rng: &mut TestRng,\n        condition: F,\n        within: Duration,\n    ) where\n        F: Fn(&R::Event) -> bool + Send + 'static,\n    {\n        self.nodes\n            .get_mut(node_id)\n            .unwrap()\n            .reactor_mut()\n            .set_condition_checker(Box::new(condition));\n\n        time::timeout(within, self.crank_and_check_all_indefinitely(node_id, rng))\n            .await\n            .unwrap()\n    }\n\n    async fn crank_and_check_all_indefinitely(\n        &mut self,\n        node_to_check: &NodeId,\n        rng: &mut TestRng,\n    ) {\n        loop {\n            let mut no_events = true;\n            for node in self.nodes.values_mut() {\n                let node_id = node.reactor().node_id();\n                match node\n                    .try_crank(rng)\n                    .instrument(error_span!(\"crank\", node_id = %node_id))\n                    .await\n                {\n                    TryCrankOutcome::NoEventsToProcess => (),\n                    TryCrankOutcome::ProcessedAnEvent => {\n                        no_events = false;\n                    }\n                    TryCrankOutcome::ShouldExit(exit_code) => {\n                        panic!(\"should not exit: {:?}\", exit_code)\n                    }\n                    TryCrankOutcome::Exited => unreachable!(),\n                }\n                if node_id == *node_to_check && node.reactor().condition_result() {\n                    debug!(\"{} met condition\", node_to_check);\n                    return;\n                }\n            }\n\n            if no_events {\n                Instant::advance_time(POLL_INTERVAL.as_millis() as u64);\n                time::sleep(POLL_INTERVAL).await;\n                continue;\n            }\n        }\n    }\n\n    /// Process events on all nodes until all event queues are empty for at least `quiet_for`.\n    ///\n    /// Panics if after `within` the event queues are still not idle, or if any node returns an exit\n    /// code.\n    pub(crate) async fn settle(\n        &mut self,\n        rng: &mut TestRng,\n        quiet_for: Duration,\n        within: Duration,\n    ) {\n        time::timeout(within, self.settle_indefinitely(rng, quiet_for))\n            .await\n            .unwrap_or_else(|_| {\n                panic!(\n                    \"network did not settle for {:?} within {:?}\",\n                    quiet_for, within\n                )\n            })\n    }\n\n    async fn settle_indefinitely(&mut self, rng: &mut TestRng, quiet_for: Duration) {\n        let mut no_events = false;\n        loop {\n            if self.crank_all(rng).await == 0 {\n                // Stop once we have no pending events and haven't had any for `quiet_for` time.\n                if no_events {\n                    debug!(\"network has been quiet for {:?}\", quiet_for);\n                    break;\n                } else {\n                    no_events = true;\n                    Instant::advance_time(quiet_for.as_millis() as u64);\n                    time::sleep(quiet_for).await;\n                }\n            } else {\n                no_events = false;\n            }\n        }\n    }\n\n    /// Runs the main loop of every reactor until `condition` is true.\n    ///\n    /// Returns an error if the `condition` is not reached inside of `within`.\n    ///\n    /// Panics if any node returns an exit code.  To settle on an exit code, use `settle_on_exit`\n    /// instead.\n    pub(crate) async fn try_settle_on<F>(\n        &mut self,\n        rng: &mut TestRng,\n        condition: F,\n        within: Duration,\n    ) -> Result<(), Elapsed>\n    where\n        F: Fn(&Nodes<R>) -> bool,\n    {\n        time::timeout(within, self.settle_on_indefinitely(rng, condition)).await\n    }\n\n    /// Runs the main loop of every reactor until `condition` is true.\n    ///\n    /// Panics if the `condition` is not reached inside of `within`, or if any node returns an exit\n    /// code.\n    ///\n    /// To settle on an exit code, use `settle_on_exit` instead.\n    pub(crate) async fn settle_on<F>(&mut self, rng: &mut TestRng, condition: F, within: Duration)\n    where\n        F: Fn(&Nodes<R>) -> bool,\n    {\n        self.try_settle_on(rng, condition, within)\n            .await\n            .unwrap_or_else(|_| {\n                panic!(\n                    \"network did not settle on condition within {} seconds\",\n                    within.as_secs_f64()\n                )\n            })\n    }\n\n    async fn settle_on_indefinitely<F>(&mut self, rng: &mut TestRng, condition: F)\n    where\n        F: Fn(&Nodes<R>) -> bool,\n    {\n        loop {\n            if condition(&self.nodes) {\n                debug!(\"network settled on meeting condition\");\n                break;\n            }\n\n            if self.crank_all(rng).await == 0 {\n                // No events processed, wait for a bit to avoid 100% cpu usage.\n                Instant::advance_time(POLL_INTERVAL.as_millis() as u64);\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n    }\n\n    /// Runs the main loop of every reactor until the nodes return the expected exit code.\n    ///\n    /// Panics if the nodes do not exit inside of `within`, or if any node returns an unexpected\n    /// exit code.\n    pub(crate) async fn settle_on_exit(\n        &mut self,\n        rng: &mut TestRng,\n        expected: ExitCode,\n        within: Duration,\n    ) {\n        time::timeout(within, self.settle_on_exit_indefinitely(rng, expected))\n            .await\n            .unwrap_or_else(|_| panic!(\"network did not settle on condition within {:?}\", within))\n    }\n\n    /// Runs the main loop of every reactor until a specified node returns the expected exit code.\n    ///\n    /// Panics if the node does not exit inside of `within`, or if any node returns an unexpected\n    /// exit code.\n    pub(crate) async fn settle_on_node_exit(\n        &mut self,\n        rng: &mut TestRng,\n        node_id: &NodeId,\n        expected: ExitCode,\n        within: Duration,\n    ) {\n        time::timeout(\n            within,\n            self.settle_on_node_exit_indefinitely(rng, node_id, expected),\n        )\n        .await\n        .unwrap_or_else(|elapsed| {\n            panic!(\n                \"network did not settle on condition within {within:?}, time elapsed: {elapsed:?}\",\n            )\n        })\n    }\n\n    /// Keeps cranking the network until every reactor's specified component is in the given state.\n    ///\n    /// # Panics\n    ///\n    /// Panics if any reactor returns `None` on its [`Reactor::get_component_state()`] call.\n    pub(crate) async fn _settle_on_component_state(\n        &mut self,\n        rng: &mut TestRng,\n        name: &str,\n        state: &ComponentState,\n        timeout: Duration,\n    ) {\n        self.settle_on(\n            rng,\n            |net| {\n                net.values()\n                    .all(|runner| match runner.reactor().get_component_state(name) {\n                        Some(actual_state) => actual_state == state,\n                        None => panic!(\"unknown or unsupported component: {}\", name),\n                    })\n            },\n            timeout,\n        )\n        .await;\n    }\n\n    /// Starts a background process that will crank all nodes until stopped.\n    ///\n    /// Returns a future that will, once polled, stop all cranking and return the network and the\n    /// the random number generator. Note that the stop command will be sent as soon as the returned\n    /// future is polled (awaited), but no sooner.\n    pub(crate) fn crank_until_stopped(\n        mut self,\n        mut rng: TestRng,\n    ) -> impl futures::Future<Output = (Self, TestRng)>\n    where\n        R: Send + 'static,\n    {\n        let stop = Arc::new(AtomicBool::new(false));\n        let handle = tokio::spawn({\n            let stop = stop.clone();\n            async move {\n                while !stop.load(Ordering::Relaxed) {\n                    if self.crank_all(&mut rng).await == 0 {\n                        time::sleep(POLL_INTERVAL).await;\n                    };\n                }\n                (self, rng)\n            }\n        });\n\n        async move {\n            // Trigger the background process stop.\n            stop.store(true, Ordering::Relaxed);\n            handle.await.expect(\"failed to join background crank\")\n        }\n    }\n\n    async fn settle_on_exit_indefinitely(&mut self, rng: &mut TestRng, expected: ExitCode) {\n        let mut exited_as_expected = 0;\n        loop {\n            if exited_as_expected == self.nodes.len() {\n                debug!(?expected, \"all nodes exited with expected code\");\n                break;\n            }\n\n            let mut event_count = 0;\n            for node in self.nodes.values_mut() {\n                let node_id = node.reactor().node_id();\n                match node\n                    .try_crank(rng)\n                    .instrument(error_span!(\"crank\", node_id = %node_id))\n                    .await\n                {\n                    TryCrankOutcome::NoEventsToProcess => (),\n                    TryCrankOutcome::ProcessedAnEvent => event_count += 1,\n                    TryCrankOutcome::ShouldExit(exit_code) if exit_code == expected => {\n                        exited_as_expected += 1;\n                        event_count += 1;\n                    }\n                    TryCrankOutcome::ShouldExit(exit_code) => {\n                        panic!(\n                            \"unexpected exit: expected {:?}, got {:?}\",\n                            expected, exit_code\n                        )\n                    }\n                    TryCrankOutcome::Exited => (),\n                }\n            }\n\n            if event_count == 0 {\n                // No events processed, wait for a bit to avoid 100% cpu usage.\n                Instant::advance_time(POLL_INTERVAL.as_millis() as u64);\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n    }\n\n    async fn settle_on_node_exit_indefinitely(\n        &mut self,\n        rng: &mut TestRng,\n        node_id: &NodeId,\n        expected: ExitCode,\n    ) {\n        'outer: loop {\n            let mut event_count = 0;\n            for node in self.nodes.values_mut() {\n                let current_node_id = node.reactor().node_id();\n                match node\n                    .try_crank(rng)\n                    .instrument(error_span!(\"crank\", node_id = %node_id))\n                    .await\n                {\n                    TryCrankOutcome::NoEventsToProcess => (),\n                    TryCrankOutcome::ProcessedAnEvent => event_count += 1,\n                    TryCrankOutcome::ShouldExit(exit_code)\n                        if (exit_code == expected && current_node_id == *node_id) =>\n                    {\n                        debug!(?expected, ?node_id, \"node exited with expected code\");\n                        break 'outer;\n                    }\n                    TryCrankOutcome::ShouldExit(exit_code) => {\n                        panic!(\n                            \"unexpected exit: expected {expected:?} for node {node_id:?}, got {exit_code:?} for node {current_node_id:?}\",\n                        )\n                    }\n                    TryCrankOutcome::Exited => (),\n                }\n            }\n\n            if event_count == 0 {\n                // No events processed, wait for a bit to avoid 100% cpu usage.\n                Instant::advance_time(POLL_INTERVAL.as_millis() as u64);\n                time::sleep(POLL_INTERVAL).await;\n            }\n        }\n    }\n\n    /// Returns the internal map of nodes.\n    pub(crate) fn nodes(&self) -> &HashMap<NodeId, Runner<ConditionCheckReactor<R>>> {\n        &self.nodes\n    }\n\n    /// Returns the internal map of nodes, mutable.\n    pub(crate) fn nodes_mut(&mut self) -> &mut HashMap<NodeId, Runner<ConditionCheckReactor<R>>> {\n        &mut self.nodes\n    }\n\n    /// Returns an iterator over all runners, mutable.\n    pub(crate) fn runners_mut(\n        &mut self,\n    ) -> impl Iterator<Item = &mut Runner<ConditionCheckReactor<R>>> {\n        self.nodes.values_mut()\n    }\n\n    /// Returns an iterator over all reactors, mutable.\n    pub(crate) fn reactors_mut(&mut self) -> impl Iterator<Item = &mut R> {\n        self.runners_mut()\n            .map(|runner| runner.reactor_mut().inner_mut())\n    }\n\n    /// Create effects and dispatch them on the given node.\n    ///\n    /// The effects are created via a call to `create_effects` which is itself passed an instance of\n    /// an `EffectBuilder`.\n    pub(crate) async fn process_injected_effect_on<F>(\n        &mut self,\n        node_id: &NodeId,\n        create_effects: F,\n    ) where\n        F: FnOnce(EffectBuilder<R::Event>) -> Effects<R::Event>,\n    {\n        let runner = self.nodes.get_mut(node_id).unwrap();\n        let node_id = runner.reactor().node_id();\n        runner\n            .process_injected_effects(create_effects)\n            .instrument(error_span!(\"inject\", node_id = %node_id))\n            .await\n    }\n}\n\nimpl<R> Finalize for TestingNetwork<R>\nwhere\n    R: Finalize + NetworkedReactor + Reactor + Send + 'static,\n    R::Event: Serialize + Send + Sync,\n    R::Error: From<prometheus::Error>,\n{\n    fn finalize(self) -> BoxFuture<'static, ()> {\n        // We support finalizing networks where the reactor itself can be finalized.\n\n        async move {\n            // Shutdown the sender of every reactor node to ensure the port is open again.\n            for (_, node) in self.nodes.into_iter() {\n                node.drain_into_inner().await.finalize().await;\n            }\n\n            debug!(\"network finalized\");\n        }\n        .boxed()\n    }\n}\n"
  },
  {
    "path": "node/src/testing/test_clock.rs",
    "content": "//! Testing clock\n//!\n//! A controllable clock for testing.\n//!\n//! # When to use `FakeClock` instead\n//!\n//! The [`TestClock`] is suitable for code written with \"external\" time passed in through its\n//! regular interfaces already in mind. Code that does not conform to this should use `FakeClock`\n//! and conditional compilation (`#[cfg(test)] ...`) instead.\n\nuse std::time::{Duration, Instant};\n\n/// How far back the test clock can go (roughly 10 years).\nconst TEST_CLOCK_LEEWAY: Duration = Duration::from_secs(315_569_520);\n\n/// A rewindable and forwardable clock for testing that does not tick on its own.\n#[derive(Debug)]\npub struct TestClock {\n    /// The current time set on the clock.\n    now: Instant,\n}\n\nimpl Default for TestClock {\n    fn default() -> Self {\n        TestClock::new()\n    }\n}\n\nimpl TestClock {\n    /// Creates a new testing clock.\n    ///\n    /// Testing clocks will not advance unless prompted to do so.\n    pub fn new() -> Self {\n        Self {\n            now: Instant::now() + TEST_CLOCK_LEEWAY,\n        }\n    }\n\n    /// Returns the \"current\" time.\n    pub fn now(&self) -> Instant {\n        self.now\n    }\n\n    /// Advances the clock by duration.\n    pub fn advance(&mut self, duration: Duration) {\n        self.now += duration;\n    }\n\n    /// Turns the clock by duration.\n    pub fn rewind(&mut self, duration: Duration) {\n        self.now -= duration;\n    }\n\n    /// `FakeClock` compatible interface.\n    pub fn advance_time(&mut self, ms: u64) {\n        self.advance(Duration::from_millis(ms))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{thread, time::Duration};\n\n    use super::TestClock;\n\n    #[test]\n    fn test_clock_operation() {\n        let mut clock = TestClock::new();\n\n        let initial = clock.now();\n\n        // Ensure the clock does not advance on its own.\n        thread::sleep(Duration::from_millis(10));\n\n        assert_eq!(initial, clock.now());\n\n        // Ensure the clock can go forwards and backwards.\n        clock.advance(Duration::from_secs(1));\n        clock.advance_time(1_000);\n\n        assert_eq!(clock.now() - initial, Duration::from_secs(2));\n\n        clock.rewind(Duration::from_secs(3));\n        assert_eq!(initial - clock.now(), Duration::from_secs(1));\n    }\n}\n"
  },
  {
    "path": "node/src/testing.rs",
    "content": "//! Testing utilities.\n//!\n//! Contains various parts and components to aid writing tests and simulations using the\n//! `casper-node` library.\n\nmod condition_check_reactor;\nmod fake_transaction_acceptor;\npub(crate) mod filter_reactor;\npub(crate) mod network;\npub(crate) mod test_clock;\n\nuse std::{\n    any::type_name,\n    fmt::Debug,\n    fs,\n    io::Write,\n    marker::PhantomData,\n    ops::Range,\n    sync::atomic::{AtomicU16, Ordering},\n    time,\n};\n\nuse anyhow::Context;\nuse assert_json_diff::{assert_json_eq, assert_json_matches_no_panic, CompareMode, Config};\nuse derive_more::From;\nuse futures::channel::oneshot;\nuse once_cell::sync::Lazy;\nuse rand::Rng;\nuse serde_json::Value;\nuse tempfile::TempDir;\nuse tokio::runtime::{self, Runtime};\nuse tracing::{debug, warn};\n\nuse casper_types::testing::TestRng;\n\nuse crate::{\n    components::Component,\n    effect::{\n        announcements::{ControlAnnouncement, FatalAnnouncement},\n        requests::NetworkRequest,\n        EffectBuilder, Effects, Responder,\n    },\n    logging,\n    protocol::Message,\n    reactor::{EventQueueHandle, QueueKind, ReactorEvent, Scheduler},\n};\npub(crate) use condition_check_reactor::ConditionCheckReactor;\npub(crate) use fake_transaction_acceptor::FakeTransactionAcceptor;\n\n/// Time to wait (at most) for a `fatal` to resolve before considering the dropping of a responder a\n/// problem.\nconst FATAL_GRACE_TIME: time::Duration = time::Duration::from_secs(3);\n\n/// The range of ports used to allocate ports for network ports.\n///\n/// The IANA ephemeral port range is 49152–65535, while Linux uses 32768–60999 by default. Windows\n/// on the other hand uses 1025–60000. Mac OS X seems to use 49152-65535. For this reason this\n/// constant uses different values on different systems.\n#[cfg(not(target_os = \"windows\"))]\nconst TEST_PORT_RANGE: Range<u16> = {\n    // Note: Ensure the range is prime, so that any chosen `TEST_PORT_STRIDE` wraps around without\n    // conflicting.\n\n    // All reasonable non-Windows systems seem to have a \"hole\" just below port 30000.\n    //\n    // This also does not conflict with nctl ports.\n    29000..29997\n};\n\n// On windows, we sneak into the upper end instead.\n#[cfg(target_os = \"windows\")]\nconst TEST_PORT_RANGE: Range<u16> = 60001..60998;\n\n/// Random offset + stride for port generation.\nconst TEST_PORT_STRIDE: u16 = 29;\n\npub(crate) const LARGE_WASM_LANE_ID: u8 = 3;\n\nmacro_rules! map {\n    () => { std::collections::BTreeMap::new() };\n    ( $first_key:expr => $first_value:expr $( , $key:expr => $value:expr )* $(,)? ) => {{\n        let mut map = std::collections::BTreeMap::new();\n        // There is no reason to add twice the same key.\n        // Since it's used for testing, we can panic in such a case:\n        assert!(map.insert($first_key, $first_value).is_none());\n        $(\n            assert!(map.insert($key, $value).is_none());\n        )*\n        map\n    }};\n}\nmacro_rules! set {\n    () => { std::collections::BTreeSet::new() };\n    ( $first_value:expr $( , $value:expr )* $(,)? ) => {{\n        let mut set = std::collections::BTreeSet::new();\n        // There is no reason to add twice the same key.\n        // Since it's used for testing, we can panic in such a case:\n        assert!(set.insert($first_value));\n        $(\n            assert!(set.insert($value));\n        )*\n        set\n    }}\n}\npub(crate) use map;\npub(crate) use set;\n\n/// Create an unused port on localhost.\n///\n/// Returns a random port on localhost, provided that no other applications are binding ports inside\n/// `TEST_PORT_RANGE` and no other testing process is run in parallel. Should the latter happen,\n/// some randomization is used to avoid conflicts, without guarantee of success.\npub(crate) fn unused_port_on_localhost() -> u16 {\n    // Previous iterations of this implementation tried other approaches such as binding an\n    // ephemeral port and using that. This ran into race condition issues when the port was reused\n    // in the timespan where it was released and rebound.\n\n    // The simpler approach is to select a random port from the non-ephemeral range and hope that no\n    // daemons are already bound/listening on it, which should not be the case on a CI system.\n\n    // We use a random offset and stride to stretch this a little bit, should two processes run at\n    // the same time.\n    static NEXT_PORT: Lazy<AtomicU16> = Lazy::new(|| {\n        rand::thread_rng()\n            .gen_range(TEST_PORT_RANGE.start..(TEST_PORT_RANGE.start + TEST_PORT_STRIDE))\n            .into()\n    });\n\n    NEXT_PORT.fetch_add(TEST_PORT_STRIDE, Ordering::SeqCst)\n}\n\n/// Sets up logging for testing.\n///\n/// Can safely be called multiple times.\npub(crate) fn init_logging() {\n    // TODO: Write logs to file by default for each test.\n    logging::init()\n        // Ignore the return value, setting the global subscriber will fail if `init_logging` has\n        // been called before, which we don't care about.\n        .ok();\n}\n\n/// Harness to test a single component as isolated as possible.\n///\n/// Contains enough reactor machinery to drive a single component and a temporary directory.\n///\n/// # Usage\n///\n/// Construction of a harness can be done straightforwardly through the `Default` trait, or the\n/// builder can be used to construct various aspects of it.\npub(crate) struct ComponentHarness<REv: 'static> {\n    /// Test random number generator instance.\n    pub(crate) rng: TestRng,\n    /// Scheduler for events. Only explicitly polled by the harness.\n    pub(crate) scheduler: &'static Scheduler<REv>,\n    /// Effect builder pointing at the scheduler.\n    pub(crate) effect_builder: EffectBuilder<REv>,\n    /// A temporary directory that can be used to store various data.\n    pub(crate) tmp: TempDir,\n    /// The `async` runtime used to execute effects.\n    pub(crate) runtime: Runtime,\n}\n\n/// Builder for a `ComponentHarness`.\npub(crate) struct ComponentHarnessBuilder<REv: 'static> {\n    rng: Option<TestRng>,\n    tmp: Option<TempDir>,\n    _phantom: PhantomData<REv>,\n}\n\nimpl<REv: 'static + Debug> ComponentHarnessBuilder<REv> {\n    /// Builds a component harness instance.\n    ///\n    /// # Panics\n    ///\n    /// Panics if building the harness fails.\n    pub(crate) fn build(self) -> ComponentHarness<REv> {\n        self.try_build().expect(\"failed to build component harness\")\n    }\n\n    /// Sets the on-disk harness folder.\n    pub(crate) fn on_disk(mut self, on_disk: TempDir) -> ComponentHarnessBuilder<REv> {\n        self.tmp = Some(on_disk);\n        self\n    }\n\n    /// Sets the test random number generator.\n    pub(crate) fn rng(mut self, rng: TestRng) -> ComponentHarnessBuilder<REv> {\n        self.rng = Some(rng);\n        self\n    }\n\n    /// Tries to build a component harness.\n    ///\n    /// Construction may fail for various reasons such as not being able to create a temporary\n    /// directory.\n    pub(crate) fn try_build(self) -> anyhow::Result<ComponentHarness<REv>> {\n        let tmp = match self.tmp {\n            Some(tmp) => tmp,\n            None => {\n                TempDir::new().context(\"could not create temporary directory for test harness\")?\n            }\n        };\n\n        let rng = self.rng.unwrap_or_default();\n\n        let scheduler = Box::leak(Box::new(Scheduler::new(QueueKind::weights(), None)));\n        let event_queue_handle = EventQueueHandle::without_shutdown(scheduler);\n        let effect_builder = EffectBuilder::new(event_queue_handle);\n        let runtime = runtime::Builder::new_multi_thread()\n            .enable_all()\n            .build()\n            .context(\"build tokio runtime\")?;\n\n        Ok(ComponentHarness {\n            rng,\n            scheduler,\n            effect_builder,\n            tmp,\n            runtime,\n        })\n    }\n}\n\nimpl<REv: 'static> ComponentHarness<REv> {\n    /// Creates a new component harness builder.\n    pub(crate) fn builder() -> ComponentHarnessBuilder<REv> {\n        ComponentHarnessBuilder {\n            rng: None,\n            tmp: None,\n            _phantom: PhantomData,\n        }\n    }\n\n    /// Deconstructs the harness, keeping the on-disk state and test rng.\n    pub(crate) fn into_parts(self) -> (TempDir, TestRng) {\n        (self.tmp, self.rng)\n    }\n\n    /// Returns whether or not there are pending events on the event queue.\n    pub(crate) fn is_idle(&self) -> bool {\n        self.scheduler.item_count() == 0\n    }\n\n    /// Sends a request, expecting an immediate response.\n    ///\n    /// Sends a request by creating a channel for the response, then mapping it using the function\n    /// `f`. Executes all returned effects, then awaits a response.\n    pub(crate) fn send_request<C, T, F>(&mut self, component: &mut C, f: F) -> T\n    where\n        C: Component<REv>,\n        <C as Component<REv>>::Event: Send + 'static,\n        T: Send + 'static,\n        F: FnOnce(Responder<T>) -> C::Event,\n        REv: ReactorEvent,\n    {\n        // Prepare a channel.\n        let (sender, receiver) = oneshot::channel();\n\n        // Create response function.\n        let responder = Responder::without_shutdown(sender);\n\n        // Create the event for the component.\n        let request_event = f(responder);\n\n        // Send directly to component.\n        let returned_effects = self.send_event(component, request_event);\n\n        // Execute the effects on our dedicated runtime, hopefully creating the responses.\n        let mut join_handles = Vec::new();\n        for effect in returned_effects {\n            join_handles.push(self.runtime.spawn(effect));\n        }\n\n        // Wait for a response to arrive.\n        self.runtime.block_on(receiver).unwrap_or_else(|err| {\n            // A channel was closed and this is usually an error. However, we consider all pending\n            // events, in case we did get a control announcement requiring us to fatal error instead\n            // before panicking on the basis of the missing response.\n\n            // We give each of them a little time to produce the desired event. Note that `join_all`\n            // should be safe to cancel, since we are only awaiting join handles.\n            let join_all = async {\n                for handle in join_handles {\n                    if let Err(err) = handle.await {\n                        warn!(\"Join error while waiting for an effect to finish: {}\", err);\n                    };\n                }\n            };\n\n            if let Err(_timeout) = self.runtime.block_on(async move {\n                // Note: timeout can only be called from within a running running, this is why\n                // we use an extra `async` block here.\n                tokio::time::timeout(FATAL_GRACE_TIME, join_all).await\n            }) {\n                warn!(grace_time=?FATAL_GRACE_TIME, \"while a responder was dropped in a unit test, \\\n                I waited for all other pending effects to complete in case the output of a \\\n                `fatal!` was among them but none of them completed\");\n            }\n\n            // Iterate over all events that currently are inside the queue and fish out any fatal.\n            for _ in 0..(self.scheduler.item_count()) {\n                let ((_ancestor, ev), _queue_kind) = self.runtime.block_on(self.scheduler.pop());\n\n                if !ev.is_control() {\n                    debug!(?ev, \"ignoring event while looking for a fatal\");\n                    continue;\n                }\n                match ev.try_into_control().unwrap() {\n                    ControlAnnouncement::ShutdownDueToUserRequest { .. } => {\n                        panic!(\"a control announcement requesting a shutdown due to user request was received\")\n                    }\n                    ControlAnnouncement::ShutdownForUpgrade { .. } => {\n                        panic!(\"a control announcement requesting a shutdown for upgrade was received\")\n                    }\n                    ControlAnnouncement::ShutdownAfterCatchingUp { .. } => {\n                        panic!(\"a control announcement requesting a shutdown after catching up was received\")\n                    }\n                    fatal @ ControlAnnouncement::FatalError { .. } => {\n                        panic!(\n                            \"a control announcement requesting a fatal error was received: {}\",\n                            fatal\n                        )\n                    }\n                    ControlAnnouncement::QueueDumpRequest { .. } => {\n                        panic!(\"queue dumps are not supported in the test harness\")\n                    }\n                    ControlAnnouncement::ActivateFailpoint { .. } => {\n                        panic!(\"currently no failpoint activations implemented in test harness\")\n                        // TODO: forward to component instead\n                    },\n                }\n            }\n\n            // Barring a `fatal`, the channel should never be closed, ever.\n            panic!(\n                \"request for {} channel closed with return value \\\"{}\\\" in unit test harness\",\n                type_name::<T>(),\n                err,\n            );\n        })\n    }\n\n    /// Sends a single event to a component, returning the created effects.\n    #[inline]\n    pub(crate) fn send_event<C>(&mut self, component: &mut C, ev: C::Event) -> Effects<C::Event>\n    where\n        C: Component<REv>,\n    {\n        component.handle_event(self.effect_builder, &mut self.rng, ev)\n    }\n}\n\nimpl<REv: 'static + Debug> Default for ComponentHarness<REv> {\n    fn default() -> Self {\n        Self::builder().build()\n    }\n}\n\n/// A special event for unit tests.\n///\n/// Essentially discards most events (they are not even processed by the unit testing harness),\n/// except for control announcements, which are preserved.\n#[derive(Debug, From)]\npub(crate) enum UnitTestEvent {\n    /// A preserved control announcement.\n    #[from]\n    ControlAnnouncement(ControlAnnouncement),\n    #[from]\n    FatalAnnouncement(FatalAnnouncement),\n    /// A network request made by the component under test.\n    #[from]\n    NetworkRequest(#[allow(dead_code)] NetworkRequest<Message>),\n}\n\nimpl ReactorEvent for UnitTestEvent {\n    fn is_control(&self) -> bool {\n        match self {\n            UnitTestEvent::ControlAnnouncement(_) | UnitTestEvent::FatalAnnouncement(_) => true,\n            UnitTestEvent::NetworkRequest(_) => false,\n        }\n    }\n\n    fn try_into_control(self) -> Option<ControlAnnouncement> {\n        match self {\n            UnitTestEvent::ControlAnnouncement(ctrl_ann) => Some(ctrl_ann),\n            UnitTestEvent::FatalAnnouncement(FatalAnnouncement { file, line, msg }) => {\n                Some(ControlAnnouncement::FatalError { file, line, msg })\n            }\n            UnitTestEvent::NetworkRequest(_) => None,\n        }\n    }\n}\n\n/// Helper function to simulate the passage of time.\npub(crate) async fn advance_time(duration: time::Duration) {\n    tokio::time::pause();\n    tokio::time::advance(duration).await;\n    tokio::time::resume();\n    debug!(\"advanced time by {} secs\", duration.as_secs());\n}\n\n/// Assert that the file at `schema_path` matches the provided `actual_schema`, which can be derived\n/// from `schemars::schema_for!` or `schemars::schema_for_value!`, for example. This method will\n/// create a temporary file with the actual schema and print the location if it fails.\npub fn assert_schema(schema_path: String, actual_schema: String) {\n    let expected_schema = fs::read_to_string(&schema_path).unwrap();\n    let expected_schema: Value = serde_json::from_str(&expected_schema).unwrap();\n    let mut temp_file = tempfile::Builder::new()\n        .suffix(\".json\")\n        .tempfile_in(env!(\"OUT_DIR\"))\n        .unwrap();\n    temp_file.write_all(actual_schema.as_bytes()).unwrap();\n    let actual_schema: Value = serde_json::from_str(&actual_schema).unwrap();\n    let (_file, temp_file_path) = temp_file.keep().unwrap();\n\n    let result = assert_json_matches_no_panic(\n        &actual_schema,\n        &expected_schema,\n        Config::new(CompareMode::Strict),\n    );\n    assert_eq!(\n        result,\n        Ok(()),\n        \"schema does not match:\\nexpected:\\n{}\\nactual:\\n{}\\n\",\n        schema_path,\n        temp_file_path.display()\n    );\n    assert_json_eq!(actual_schema, expected_schema);\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::HashSet;\n\n    use super::{unused_port_on_localhost, ComponentHarness};\n\n    #[test]\n    fn default_works_without_panicking_for_component_harness() {\n        let _harness = ComponentHarness::<()>::default();\n    }\n\n    #[test]\n    fn can_generate_at_least_100_unused_ports() {\n        let ports: HashSet<u16> = (0..100).map(|_| unused_port_on_localhost()).collect();\n\n        assert_eq!(ports.len(), 100);\n    }\n}\n"
  },
  {
    "path": "node/src/tls.rs",
    "content": "//! Transport layer security and signing based on OpenSSL.\n//!\n//! This module wraps some of the lower-level TLS constructs to provide a reasonably safe-to-use API\n//! surface for the rest of the application. It also fixes the security parameters of the TLS level\n//! in a central place.\n//!\n//! Features include\n//!\n//! * a fixed set of chosen encryption parameters\n//!   ([`SIGNATURE_ALGORITHM`](constant.SIGNATURE_ALGORITHM.html),\n//!   [`SIGNATURE_CURVE`](constant.SIGNATURE_CURVE.html),\n//!   [`SIGNATURE_DIGEST`](constant.SIGNATURE_DIGEST.html)),\n//! * construction of TLS acceptors for listening TCP sockets\n//!   ([`create_tls_acceptor`](fn.create_tls_acceptor.html)),\n//! * construction of TLS connectors for outgoing TCP connections\n//!   ([`create_tls_connector`](fn.create_tls_connector.html)),\n//! * creation and validation of self-signed certificates\n//!   ([`generate_node_cert`](fn.generate_node_cert.html)),\n//! * signing and verification of arbitrary values using keys from certificates\n//!   ([`Signature`](struct.Signature.html), [`Signed`](struct.Signed.html)), and\n//! * `serde` support for certificates ([`x509_serde`](x509_serde/index.html))\n\nuse std::{\n    cmp::Ordering,\n    convert::TryInto,\n    fmt::{self, Debug, Display, Formatter},\n    hash::Hash,\n    marker::PhantomData,\n    path::Path,\n    str,\n    time::{SystemTime, UNIX_EPOCH},\n};\n\nuse casper_types::file_utils::{read_file, ReadFileError};\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse nid::Nid;\nuse openssl::{\n    asn1::{Asn1Integer, Asn1IntegerRef, Asn1Time},\n    bn::{BigNum, BigNumContext},\n    ec::{self, EcKey},\n    error::ErrorStack,\n    hash::{DigestBytes, MessageDigest},\n    nid,\n    pkey::{PKey, PKeyRef, Private, Public},\n    sha,\n    ssl::{SslAcceptor, SslConnector, SslContextBuilder, SslMethod, SslVerifyMode, SslVersion},\n    x509::{X509Builder, X509Name, X509NameBuilder, X509NameRef, X509Ref, X509},\n};\n#[cfg(test)]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\n\n// This is inside a private module so that the generated `BigArray` does not form part of this\n// crate's public API, and hence also doesn't appear in the rustdocs.\nmod big_array {\n    use serde_big_array::big_array;\n\n    big_array! { BigArray; }\n}\n\n/// The chosen signature algorithm (**ECDSA with SHA512**).\nconst SIGNATURE_ALGORITHM: Nid = Nid::ECDSA_WITH_SHA512;\n\n/// The underlying elliptic curve (**P-521**).\nconst SIGNATURE_CURVE: Nid = Nid::SECP521R1;\n\n/// The chosen signature algorithm (**SHA512**).\nconst SIGNATURE_DIGEST: Nid = Nid::SHA512;\n\n/// OpenSSL result type alias.\n///\n/// Many functions rely solely on `openssl` functions and return this kind of result.\ntype SslResult<T> = Result<T, ErrorStack>;\n\n/// SHA512 hash.\n#[derive(Copy, Clone, DataSize, Deserialize, Serialize)]\npub struct Sha512(#[serde(with = \"big_array::BigArray\")] [u8; Sha512::SIZE]);\n\nimpl Sha512 {\n    /// Size of digest in bytes.\n    const SIZE: usize = 64;\n\n    /// OpenSSL NID.\n    const NID: Nid = Nid::SHA512;\n\n    /// Create a new Sha512 by hashing a slice.\n    pub fn new<B: AsRef<[u8]>>(data: B) -> Self {\n        let mut openssl_sha = sha::Sha512::new();\n        openssl_sha.update(data.as_ref());\n        Sha512(openssl_sha.finish())\n    }\n\n    /// Returns bytestring of the hash, with length `Self::SIZE`.\n    fn bytes(&self) -> &[u8] {\n        let bs = &self.0[..];\n\n        debug_assert_eq!(bs.len(), Self::SIZE);\n        bs\n    }\n\n    /// Converts an OpenSSL digest into an `Sha512`.\n    fn from_openssl_digest(digest: &DigestBytes) -> Self {\n        let digest_bytes = digest.as_ref();\n\n        debug_assert_eq!(\n            digest_bytes.len(),\n            Self::SIZE,\n            \"digest is not the right size - check constants in `tls.rs`\"\n        );\n\n        let mut buf = [0; Self::SIZE];\n        buf.copy_from_slice(&digest_bytes[0..Self::SIZE]);\n\n        Sha512(buf)\n    }\n\n    /// Returns a new OpenSSL `MessageDigest` set to SHA-512.\n    fn create_message_digest() -> MessageDigest {\n        // This can only fail if we specify a `Nid` that does not exist, which cannot happen unless\n        // there is something wrong with `Self::NID`.\n        MessageDigest::from_nid(Self::NID).expect(\"Sha512::NID is invalid\")\n    }\n}\n\n/// Certificate fingerprint.\n#[derive(Copy, Clone, DataSize, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]\npub(crate) struct CertFingerprint(Sha512);\n\nimpl Debug for CertFingerprint {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"CertFingerprint({:10})\", HexFmt(self.0.bytes()))\n    }\n}\n\n/// Public key fingerprint.\n#[derive(Copy, Clone, DataSize, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]\npub struct KeyFingerprint(Sha512);\n\nimpl KeyFingerprint {\n    /// Size of digest in bytes.\n    pub const LENGTH: usize = Sha512::SIZE;\n}\n\nimpl AsRef<[u8]> for KeyFingerprint {\n    fn as_ref(&self) -> &[u8] {\n        self.0.bytes()\n    }\n}\n\nimpl Debug for KeyFingerprint {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"KeyFingerprint({:10})\", HexFmt(self.0.bytes()))\n    }\n}\n\nimpl From<[u8; KeyFingerprint::LENGTH]> for KeyFingerprint {\n    fn from(raw_bytes: [u8; KeyFingerprint::LENGTH]) -> Self {\n        KeyFingerprint(Sha512(raw_bytes))\n    }\n}\n\nimpl From<Sha512> for KeyFingerprint {\n    fn from(hash: Sha512) -> Self {\n        Self(hash)\n    }\n}\n\n#[cfg(test)]\nimpl Distribution<KeyFingerprint> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> KeyFingerprint {\n        let mut bytes = [0u8; Sha512::SIZE];\n        rng.fill(&mut bytes[..]);\n        bytes.into()\n    }\n}\n\n/// Cryptographic signature.\n#[derive(Clone, Deserialize, Eq, Hash, PartialEq, Serialize)]\nstruct Signature(Vec<u8>);\n\nimpl Debug for Signature {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"Signature({:10})\", HexFmt(&self.0))\n    }\n}\n\n/// TLS certificate.\n///\n/// Thin wrapper around `X509` enabling things like Serde serialization and fingerprint caching.\n#[derive(Clone, DataSize)]\npub struct TlsCert {\n    /// The wrapped x509 certificate.\n    #[data_size(skip)] // Skip OpenSSL type.\n    x509: X509,\n\n    /// Cached certificate fingerprint.\n    cert_fingerprint: CertFingerprint,\n\n    /// Cached public key fingerprint.\n    key_fingerprint: KeyFingerprint,\n}\n\nimpl TlsCert {\n    /// Returns the certificate's fingerprint.\n    ///\n    /// In contrast to the `public_key_fingerprint`, this fingerprint also contains the certificate\n    /// information.\n    pub(crate) fn fingerprint(&self) -> CertFingerprint {\n        self.cert_fingerprint\n    }\n\n    /// Returns the public key fingerprint.\n    pub(crate) fn public_key_fingerprint(&self) -> KeyFingerprint {\n        self.key_fingerprint\n    }\n\n    /// Returns a reference to the inner x509 certificate.\n    pub(crate) fn as_x509(&self) -> &X509 {\n        &self.x509\n    }\n}\n\nimpl Debug for TlsCert {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"TlsCert({:?})\", self.fingerprint())\n    }\n}\n\nimpl Hash for TlsCert {\n    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {\n        self.fingerprint().hash(state);\n    }\n}\n\nimpl PartialEq for TlsCert {\n    fn eq(&self, other: &Self) -> bool {\n        self.fingerprint() == other.fingerprint()\n    }\n}\n\nimpl Eq for TlsCert {}\n\n/// Error during loading a x509 certificate.\n#[derive(Debug, Error, Serialize)]\npub enum LoadCertError {\n    #[error(\"could not load certificate file: {0}\")]\n    ReadFile(\n        #[serde(skip_serializing)]\n        #[source]\n        ReadFileError,\n    ),\n    #[error(\"unable to load x509 certificate {0:?}\")]\n    X509CertFromPem(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n}\n\n/// Load a certificate from a file.\npub(crate) fn load_cert<P: AsRef<Path>>(src: P) -> Result<X509, LoadCertError> {\n    let pem = read_file(src.as_ref()).map_err(LoadCertError::ReadFile)?;\n    X509::from_pem(&pem).map_err(LoadCertError::X509CertFromPem)\n}\n\n/// Error during loading a secret key.\n#[derive(Debug, Error, Serialize)]\npub(crate) enum LoadSecretKeyError {\n    #[error(\"could not load secret key file: {0}\")]\n    ReadFile(\n        #[serde(skip_serializing)]\n        #[source]\n        ReadFileError,\n    ),\n    #[error(\"unable to load private key from pem {0:?}\")]\n    PrivateKeyFromPem(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n}\n\npub(crate) fn load_secret_key<P: AsRef<Path>>(src: P) -> Result<PKey<Private>, LoadSecretKeyError> {\n    let pem = read_file(src.as_ref()).map_err(LoadSecretKeyError::ReadFile)?;\n    PKey::private_key_from_pem(&pem).map_err(LoadSecretKeyError::PrivateKeyFromPem)\n}\n\n/// A signed value.\n///\n/// Combines a value `V` with a `Signature` and a signature scheme. The signature scheme involves\n/// serializing the value to bytes and signing the result.\n#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]\npub struct Signed<V> {\n    data: Vec<u8>,\n    signature: Signature,\n    _phantom: PhantomData<V>,\n}\n\n/// Generates a self-signed (key, certificate) pair suitable for TLS and signing.\n///\n/// The common name of the certificate will be \"casper-node\".\npub fn generate_node_cert() -> SslResult<(X509, PKey<Private>)> {\n    let private_key = generate_private_key()?;\n    let cert = generate_cert(&private_key, \"casper-node\")?;\n\n    Ok((cert, private_key))\n}\n\n/// Creates a TLS acceptor for a server.\n///\n/// The acceptor will restrict TLS parameters to secure one defined in this crate that are\n/// compatible with connectors built with `create_tls_connector`.\n///\n/// Incoming certificates must still be validated using `validate_cert`.\npub(crate) fn create_tls_acceptor(\n    cert: &X509Ref,\n    private_key: &PKeyRef<Private>,\n) -> SslResult<SslAcceptor> {\n    let mut builder = SslAcceptor::mozilla_modern_v5(SslMethod::tls_server())?;\n    set_context_options(&mut builder, cert, private_key)?;\n\n    Ok(builder.build())\n}\n\n/// Creates a TLS acceptor for a client.\n///\n/// A connector compatible with the acceptor created using `create_tls_acceptor`. Server\n/// certificates must always be validated using `validate_cert` after connecting.\npub(crate) fn create_tls_connector(\n    cert: &X509Ref,\n    private_key: &PKeyRef<Private>,\n) -> SslResult<SslConnector> {\n    let mut builder = SslConnector::builder(SslMethod::tls_client())?;\n    set_context_options(&mut builder, cert, private_key)?;\n\n    Ok(builder.build())\n}\n\n/// Sets common options of both acceptor and connector on TLS context.\n///\n/// Used internally to set various TLS parameters.\nfn set_context_options(\n    ctx: &mut SslContextBuilder,\n    cert: &X509Ref,\n    private_key: &PKeyRef<Private>,\n) -> SslResult<()> {\n    ctx.set_min_proto_version(Some(SslVersion::TLS1_3))?;\n\n    ctx.set_certificate(cert)?;\n    ctx.set_private_key(private_key)?;\n    ctx.check_private_key()?;\n\n    // Note that this does not seem to work as one might naively expect; the client can still send\n    // no certificate and there will be no error from OpenSSL. For this reason, we pass set `PEER`\n    // (causing the request of a cert), but pass all of them through and verify them after the\n    // handshake has completed.\n    ctx.set_verify_callback(SslVerifyMode::PEER, |_, _| true);\n\n    Ok(())\n}\n\n/// Error during certificate validation.\n#[derive(Debug, Error, Serialize)]\npub enum ValidationError {\n    /// Failed to read public key from certificate.\n    #[error(\"error reading public key from certificate: {0:?}\")]\n    CannotReadPublicKey(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Failed to read subject or issuer name.\n    #[error(\"error reading subject or issuer name: {0:?}\")]\n    CorruptSubjectOrIssuer(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Wrong signature scheme.\n    #[error(\"wrong signature scheme\")]\n    WrongSignatureAlgorithm,\n    /// Failed to read or convert times.\n    #[error(\"there was an issue reading or converting times: {0:?}\")]\n    TimeIssue(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Certificate not yet valid.\n    #[error(\"the certificate is not yet valid\")]\n    NotYetValid,\n    /// Certificate expired.\n    #[error(\"the certificate expired\")]\n    Expired,\n    /// Serial number could not be compared to the reference.\n    #[error(\"the serial number could not be compared to the reference: {0:?}\")]\n    InvalidSerialNumber(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Wrong serial number.\n    #[error(\"wrong serial number\")]\n    WrongSerialNumber,\n    /// No valid elliptic curve key could be extracted from certificate.\n    #[error(\"no valid elliptic curve key could be extracted from certificate: {0:?}\")]\n    CouldNotExtractEcKey(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Public key failed sanity check.\n    #[error(\"the given public key fails basic sanity checks: {0:?}\")]\n    KeyFailsCheck(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Wrong elliptic curve.\n    #[error(\"underlying elliptic curve is wrong\")]\n    WrongCurve,\n    /// Certificate not self-signed.\n    #[error(\"certificate is not self-signed\")]\n    NotSelfSigned,\n    /// Failed to validate signature.\n    #[error(\"the signature could not be validated\")]\n    FailedToValidateSignature(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Invalid signature.\n    #[error(\"the signature is invalid\")]\n    InvalidSignature,\n    /// Invalid fingerprint.\n    #[error(\"failed to read fingerprint\")]\n    InvalidFingerprint(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Failed to create a big num context.\n    #[error(\"could not create a big num context\")]\n    BigNumContextNotAvailable(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Failed to encode public key.\n    #[error(\"could not encode public key as bytes\")]\n    PublicKeyEncodingFailed(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n    /// Wrong certificate authority.\n    #[error(\"the certificate is not signed by provided certificate authority\")]\n    WrongCertificateAuthority,\n    /// Failed to read public key from certificate.\n    #[error(\"error reading public key from ca certificate: {0:?}\")]\n    CannotReadCAPublicKey(\n        #[serde(skip_serializing)]\n        #[source]\n        ErrorStack,\n    ),\n}\n\n/// Checks that the certificate is signed by a provided certificate authority and returns the\n/// fingerprint of the public key.\npub(crate) fn validate_cert_with_authority(\n    cert: X509,\n    ca: &X509,\n) -> Result<TlsCert, ValidationError> {\n    let authority_key = ca\n        .public_key()\n        .map_err(ValidationError::CannotReadCAPublicKey)?;\n\n    validate_cert_expiration_date(&cert)?;\n\n    if !cert\n        .verify(authority_key.as_ref())\n        .map_err(ValidationError::FailedToValidateSignature)?\n    {\n        return Err(ValidationError::WrongCertificateAuthority);\n    }\n\n    // Ensure that the key is using the correct curve parameters.\n    tls_cert_from_x509(cert)\n}\n\n/// Checks that the cryptographic parameters on a certificate are correct and returns the\n/// fingerprint of the public key.\n///\n/// At the very least this ensures that no weaker ciphers have been used to forge a certificate.\npub(crate) fn validate_self_signed_cert(cert: X509) -> Result<TlsCert, ValidationError> {\n    if cert.signature_algorithm().object().nid() != SIGNATURE_ALGORITHM {\n        // The signature algorithm is not of the exact kind we are using to generate our\n        // certificates, an attacker could have used a weaker one to generate colliding keys.\n        return Err(ValidationError::WrongSignatureAlgorithm);\n    }\n    // TODO: Lock down extensions on the certificate --- if we manage to lock down the whole cert in\n    //       a way that no additional bytes can be added (all fields are either known or of fixed\n    //       length) we would have an additional hurdle for preimage attacks to clear.\n\n    let subject =\n        name_to_string(cert.subject_name()).map_err(ValidationError::CorruptSubjectOrIssuer)?;\n    let issuer =\n        name_to_string(cert.issuer_name()).map_err(ValidationError::CorruptSubjectOrIssuer)?;\n    if subject != issuer {\n        // All of our certificates are self-signed, so it cannot hurt to check.\n        return Err(ValidationError::NotSelfSigned);\n    }\n\n    // All our certificates have serial number 1.\n    if !num_eq(cert.serial_number(), 1).map_err(ValidationError::InvalidSerialNumber)? {\n        return Err(ValidationError::WrongSerialNumber);\n    }\n\n    // Check expiration times against current time.\n    validate_cert_expiration_date(&cert)?;\n\n    // Ensure that the key is using the correct curve parameters.\n    let (public_key, ec_key) = validate_cert_ec_key(&cert)?;\n    if ec_key.group().curve_name() != Some(SIGNATURE_CURVE) {\n        // The underlying curve is not the one we chose.\n        return Err(ValidationError::WrongCurve);\n    }\n\n    // Finally we can check the actual signature.\n    if !cert\n        .verify(&public_key)\n        .map_err(ValidationError::FailedToValidateSignature)?\n    {\n        return Err(ValidationError::InvalidSignature);\n    }\n\n    tls_cert_from_x509_and_key(cert, ec_key)\n}\n\n/// Creates a [`TlsCert`] instance from [`X509`] cert instance.\n///\n/// This function only ensures that the cert contains EC public key, and is suitable for quickly\n/// validating certs signed by CA.\npub(crate) fn tls_cert_from_x509(cert: X509) -> Result<TlsCert, ValidationError> {\n    let (_public_key, ec_key) = validate_cert_ec_key(&cert)?;\n    tls_cert_from_x509_and_key(cert, ec_key)\n}\n\nfn tls_cert_from_x509_and_key(\n    cert: X509,\n    ec_key: EcKey<Public>,\n) -> Result<TlsCert, ValidationError> {\n    let cert_fingerprint = cert_fingerprint(&cert)?;\n    let key_fingerprint = key_fingerprint(&ec_key)?;\n    Ok(TlsCert {\n        x509: cert,\n        cert_fingerprint,\n        key_fingerprint,\n    })\n}\n\n/// Calculate a fingerprint for the X509 certificate.\npub(crate) fn cert_fingerprint(cert: &X509) -> Result<CertFingerprint, ValidationError> {\n    assert_eq!(Sha512::NID, SIGNATURE_DIGEST);\n    let digest = &cert\n        .digest(Sha512::create_message_digest())\n        .map_err(ValidationError::InvalidFingerprint)?;\n    let cert_fingerprint = CertFingerprint(Sha512::from_openssl_digest(digest));\n    Ok(cert_fingerprint)\n}\n\n/// Calculate a fingerprint for the public EC key.\npub(crate) fn key_fingerprint(ec_key: &EcKey<Public>) -> Result<KeyFingerprint, ValidationError> {\n    let mut big_num_context =\n        BigNumContext::new().map_err(ValidationError::BigNumContextNotAvailable)?;\n    let buf = ec_key\n        .public_key()\n        .to_bytes(\n            ec::EcGroup::from_curve_name(SIGNATURE_CURVE)\n                .expect(\"broken constant SIGNATURE_CURVE\")\n                .as_ref(),\n            ec::PointConversionForm::COMPRESSED,\n            &mut big_num_context,\n        )\n        .map_err(ValidationError::PublicKeyEncodingFailed)?;\n    let key_fingerprint = KeyFingerprint(Sha512::new(buf));\n    Ok(key_fingerprint)\n}\n\n/// Validate cert's public key, and it's EC key parameters.\nfn validate_cert_ec_key(cert: &X509) -> Result<(PKey<Public>, EcKey<Public>), ValidationError> {\n    let public_key = cert\n        .public_key()\n        .map_err(ValidationError::CannotReadPublicKey)?;\n    let ec_key = public_key\n        .ec_key()\n        .map_err(ValidationError::CouldNotExtractEcKey)?;\n    ec_key.check_key().map_err(ValidationError::KeyFailsCheck)?;\n    Ok((public_key, ec_key))\n}\n\n/// Check cert's expiration times against current time.\nfn validate_cert_expiration_date(cert: &X509) -> Result<(), ValidationError> {\n    let asn1_now = Asn1Time::from_unix(now()).map_err(ValidationError::TimeIssue)?;\n    if asn1_now\n        .compare(cert.not_before())\n        .map_err(ValidationError::TimeIssue)?\n        != Ordering::Greater\n    {\n        return Err(ValidationError::NotYetValid);\n    }\n\n    if asn1_now\n        .compare(cert.not_after())\n        .map_err(ValidationError::TimeIssue)?\n        != Ordering::Less\n    {\n        return Err(ValidationError::Expired);\n    }\n\n    Ok(())\n}\n\n/// Returns an OpenSSL compatible timestamp.\nfn now() -> i64 {\n    // Note: We could do the timing dance a little better going straight to the UNIX time functions,\n    //       but this saves us having to bring in `libc` as a dependency.\n    let now = SystemTime::now();\n    let ts: i64 = now\n        .duration_since(UNIX_EPOCH)\n        // This should work unless the clock is set to before 1970.\n        .expect(\"Great Scott! Your clock is horribly broken, Marty.\")\n        .as_secs()\n        // This will fail past year 2038 on 32 bit systems and very far into the future, both cases\n        // we consider out of scope.\n        .try_into()\n        .expect(\"32-bit systems and far future are not supported\");\n\n    ts\n}\n\n/// Creates an ASN1 integer from a `u32`.\nfn mknum(n: u32) -> Result<Asn1Integer, ErrorStack> {\n    let bn = BigNum::from_u32(n)?;\n\n    bn.to_asn1_integer()\n}\n\n/// Creates an ASN1 name from string components.\n///\n/// If `c` or `o` are empty string, they are omitted from the result.\nfn mkname(c: &str, o: &str, cn: &str) -> Result<X509Name, ErrorStack> {\n    let mut builder = X509NameBuilder::new()?;\n\n    if !c.is_empty() {\n        builder.append_entry_by_text(\"C\", c)?;\n    }\n\n    if !o.is_empty() {\n        builder.append_entry_by_text(\"O\", o)?;\n    }\n\n    builder.append_entry_by_text(\"CN\", cn)?;\n    Ok(builder.build())\n}\n\n/// Converts an `X509NameRef` to a human readable string.\nfn name_to_string(name: &X509NameRef) -> SslResult<String> {\n    let mut output = String::new();\n\n    for entry in name.entries() {\n        output.push_str(entry.object().nid().long_name()?);\n        output.push('=');\n        output.push_str(entry.data().as_utf8()?.as_ref());\n        output.push(' ');\n    }\n\n    Ok(output)\n}\n\n/// Checks if an `Asn1IntegerRef` is equal to a given u32.\nfn num_eq(num: &Asn1IntegerRef, other: u32) -> SslResult<bool> {\n    let l = num.to_bn()?;\n    let r = BigNum::from_u32(other)?;\n\n    // The `BigNum` API seems to be really lacking here.\n    Ok(l.is_negative() == r.is_negative() && l.ucmp(r.as_ref()) == Ordering::Equal)\n}\n\n/// Generates a secret key suitable for TLS encryption.\nfn generate_private_key() -> SslResult<PKey<Private>> {\n    // We do not care about browser-compliance, so we're free to use elliptic curves that are more\n    // likely to hold up under pressure than the NIST ones. We want to go with ED25519 because djb\n    // knows best: PKey::generate_ed25519()\n    //\n    // However the following bug currently prevents us from doing so:\n    // https://mta.openssl.org/pipermail/openssl-users/2018-July/008362.html (The same error occurs\n    // when trying to sign the cert inside the builder)\n\n    // Our second choice is 2^521-1, which is slow but a \"nice prime\".\n    // http://blog.cr.yp.to/20140323-ecdsa.html\n\n    // An alternative is https://en.bitcoin.it/wiki/Secp256k1, which puts us at level of bitcoin.\n\n    // TODO: Please verify this for accuracy!\n\n    let ec_group = ec::EcGroup::from_curve_name(SIGNATURE_CURVE)?;\n    let ec_key = EcKey::generate(ec_group.as_ref())?;\n\n    PKey::from_ec_key(ec_key)\n}\n\n/// Generates a self-signed certificate based on `private_key` with given CN.\nfn generate_cert(private_key: &PKey<Private>, cn: &str) -> SslResult<X509> {\n    let mut builder = X509Builder::new()?;\n\n    // x509 v3 commonly used, the version is 0-indexed, thus 2 == v3.\n    builder.set_version(2)?;\n\n    // The serial number is always one, since we are issuing only one cert.\n    builder.set_serial_number(mknum(1)?.as_ref())?;\n\n    let issuer = mkname(\"US\", \"Casper Blockchain\", cn)?;\n\n    // Set the issuer, subject names, putting the \"self\" in \"self-signed\".\n    builder.set_issuer_name(issuer.as_ref())?;\n    builder.set_subject_name(issuer.as_ref())?;\n\n    let ts = now();\n    // We set valid-from to one minute into the past to allow some clock-skew.\n    builder.set_not_before(Asn1Time::from_unix(ts - 60)?.as_ref())?;\n\n    // Valid-until is a little under 10 years, missing at least 2 leap days.\n    builder.set_not_after(Asn1Time::from_unix(ts + 10 * 365 * 24 * 60 * 60)?.as_ref())?;\n\n    // Set the public key and sign.\n    builder.set_pubkey(private_key.as_ref())?;\n    assert_eq!(Sha512::NID, SIGNATURE_DIGEST);\n    builder.sign(private_key.as_ref(), Sha512::create_message_digest())?;\n\n    let cert = builder.build();\n\n    // Cheap sanity check.\n    assert!(\n        validate_self_signed_cert(cert.clone()).is_ok(),\n        \"newly generated cert does not pass our own validity check\"\n    );\n\n    Ok(cert)\n}\n\n// Below are trait implementations for signatures and fingerprints. Both implement the full set of\n// traits that are required to stick into either a `HashMap` or `BTreeMap`.\nimpl PartialEq for Sha512 {\n    #[inline]\n    fn eq(&self, other: &Self) -> bool {\n        self.bytes() == other.bytes()\n    }\n}\n\nimpl Eq for Sha512 {}\n\nimpl Ord for Sha512 {\n    #[inline]\n    fn cmp(&self, other: &Self) -> Ordering {\n        Ord::cmp(self.bytes(), other.bytes())\n    }\n}\n\nimpl PartialOrd for Sha512 {\n    #[inline]\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(Ord::cmp(self, other))\n    }\n}\n\nimpl Debug for Sha512 {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0[..]))\n    }\n}\n\nimpl Display for Sha512 {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0[..]))\n    }\n}\n\nimpl Display for CertFingerprint {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        Display::fmt(&self.0, f)\n    }\n}\n\nimpl Display for KeyFingerprint {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(self.0.bytes()))\n    }\n}\n\nimpl Display for Signature {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0[..]))\n    }\n}\n\nimpl<T> Display for Signed<T>\nwhere\n    T: Display + for<'de> Deserialize<'de>,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        // Decode the data here, even if it is expensive.\n        match bincode::deserialize::<T>(self.data.as_slice()) {\n            Ok(item) => write!(f, \"signed[{}]<{} bytes>\", self.signature, item),\n            Err(_err) => write!(f, \"signed[{}]<CORRUPT>\", self.signature),\n        }\n    }\n}\n\n// Since all `Sha512`s are already hashes, we provide a very cheap hashing function that uses\n// bytes from the fingerprint as input, cutting the number of bytes to be hashed to 1/16th.\n\n// If this is ever a performance bottleneck, a custom hasher can be added that passes these bytes\n// through unchanged.\nimpl Hash for Sha512 {\n    #[inline]\n    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {\n        // Use the first eight bytes when hashing, giving 64 bits pure entropy.\n        let mut chunk = [0u8; 8];\n\n        // TODO: Benchmark if this is really worthwhile over the automatic derivation.\n        chunk.copy_from_slice(&self.bytes()[0..8]);\n\n        state.write_u64(u64::from_le_bytes(chunk));\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn simple_name_to_string() {\n        let name = mkname(\"sc\", \"some_org\", \"some_cn\").expect(\"could not create name\");\n\n        assert_eq!(\n            name_to_string(name.as_ref()).expect(\"name to string failed\"),\n            \"countryName=sc organizationName=some_org commonName=some_cn \"\n        );\n    }\n\n    #[test]\n    fn test_validate_self_signed_cert() {\n        let (cert, private_key) = generate_node_cert().expect(\"failed to generate key, cert pair\");\n\n        // Validates self signed cert\n        let _tls_cert =\n            validate_self_signed_cert(cert).expect(\"generated self signed cert is not valid\");\n\n        // Cert signed by a CA does not validate as self signed\n        let ca_private_key = generate_private_key().expect(\"failed to generate private key\");\n        let ca_signed_cert = make_ca_signed_cert(private_key, ca_private_key);\n\n        let error = validate_self_signed_cert(ca_signed_cert)\n            .expect_err(\"should not validate ca signed cert as self signed\");\n        assert!(\n            matches!(error, ValidationError::InvalidSignature),\n            \"{:?}\",\n            error\n        );\n    }\n\n    #[test]\n    fn test_validate_cert_with_authority() {\n        let (ca_cert, ca_private_key) =\n            generate_node_cert().expect(\"failed to generate key, cert pair\");\n\n        let (different_ca_cert, _ca_private_key) =\n            generate_node_cert().expect(\"failed to generate key, cert pair\");\n\n        let node_private_key = generate_private_key().expect(\"failed to generate private key\");\n\n        let node_cert = make_ca_signed_cert(node_private_key, ca_private_key);\n\n        validate_self_signed_cert(node_cert.clone())\n            .expect_err(\"should not validate CA signed cert as self signed\");\n\n        let _node_tls_cert = validate_cert_with_authority(node_cert.clone(), &ca_cert)\n            .expect(\"should validate with ca cert\");\n\n        let validation_error = validate_cert_with_authority(node_cert, &different_ca_cert)\n            .expect_err(\"should not validate cert against different CA\");\n\n        assert!(\n            matches!(validation_error, ValidationError::WrongCertificateAuthority),\n            \"{:?}\",\n            validation_error\n        );\n    }\n\n    fn make_ca_signed_cert(private_key: PKey<Private>, ca_private_key: PKey<Private>) -> X509 {\n        let mut builder = X509Builder::new().unwrap();\n        builder.set_version(2).unwrap();\n        builder\n            .set_serial_number(mknum(1).unwrap().as_ref())\n            .unwrap();\n        let issuer = mkname(\"US\", \"Casper Blockchain\", \"Casper Network\").unwrap();\n        builder.set_issuer_name(issuer.as_ref()).unwrap();\n        builder.set_subject_name(issuer.as_ref()).unwrap();\n        let ts = now();\n        builder\n            .set_not_before(Asn1Time::from_unix(ts - 60).unwrap().as_ref())\n            .unwrap();\n        builder\n            .set_not_after(\n                Asn1Time::from_unix(ts + 10 * 365 * 24 * 60 * 60)\n                    .unwrap()\n                    .as_ref(),\n            )\n            .unwrap();\n        builder.set_pubkey(private_key.as_ref()).unwrap();\n        assert_eq!(Sha512::NID, SIGNATURE_DIGEST);\n        builder\n            .sign(ca_private_key.as_ref(), Sha512::create_message_digest())\n            .unwrap();\n        builder.build()\n    }\n}\n"
  },
  {
    "path": "node/src/types/appendable_block.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    fmt::{self, Display, Formatter},\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{\n    Approval, Gas, PublicKey, RewardedSignatures, Timestamp, TransactionConfig, TransactionHash,\n    AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512,\n};\n\nuse super::{BlockPayload, TransactionFootprint, VariantMismatch};\n\n#[derive(Debug, Error)]\npub(crate) enum AddError {\n    #[error(\"would exceed maximum count for the category per block\")]\n    Count(u8),\n    #[error(\"would exceed maximum approval count per block\")]\n    ApprovalCount,\n    #[error(\"would exceed maximum gas per block\")]\n    GasLimit,\n    #[error(\"would exceed maximum block size\")]\n    BlockSize,\n    #[error(\"duplicate deploy or transaction\")]\n    Duplicate,\n    #[error(\"deploy or transaction has expired\")]\n    Expired,\n    #[error(transparent)]\n    VariantMismatch(#[from] VariantMismatch),\n    #[error(\"transaction has excessive ttl\")]\n    ExcessiveTtl,\n    #[error(\"transaction is future dated\")]\n    FutureDatedDeploy,\n}\n\n/// A block that is still being added to. It keeps track of and enforces block limits.\n#[derive(Clone, Eq, PartialEq, DataSize, Debug)]\npub(crate) struct AppendableBlock {\n    transaction_config: TransactionConfig,\n    current_gas_price: u8,\n    transactions: BTreeMap<TransactionHash, TransactionFootprint>,\n    timestamp: Timestamp,\n}\n\nimpl AppendableBlock {\n    /// Creates an empty `AppendableBlock`.\n    pub(crate) fn new(\n        transaction_config: TransactionConfig,\n        current_gas_price: u8,\n        timestamp: Timestamp,\n    ) -> Self {\n        AppendableBlock {\n            transaction_config,\n            current_gas_price,\n            transactions: BTreeMap::new(),\n            timestamp,\n        }\n    }\n\n    /// Attempt to append transaction to block.\n    pub(crate) fn add_transaction(\n        &mut self,\n        footprint: &TransactionFootprint,\n    ) -> Result<(), AddError> {\n        if self\n            .transactions\n            .keys()\n            .contains(&footprint.transaction_hash)\n        {\n            return Err(AddError::Duplicate);\n        }\n        if footprint.ttl > self.transaction_config.max_ttl {\n            return Err(AddError::ExcessiveTtl);\n        }\n        if footprint.timestamp > self.timestamp {\n            return Err(AddError::FutureDatedDeploy);\n        }\n        let expires = footprint.timestamp.saturating_add(footprint.ttl);\n        if expires < self.timestamp {\n            return Err(AddError::Expired);\n        }\n        let lane_id = footprint.lane_id;\n        let limit = self\n            .transaction_config\n            .transaction_v1_config\n            .get_max_transaction_count(lane_id);\n        // check total count by category\n        let count = self\n            .transactions\n            .iter()\n            .filter(|(_, item)| item.lane_id == lane_id)\n            .count();\n        if count.checked_add(1).ok_or(AddError::Count(lane_id))? > limit as usize {\n            return Err(AddError::Count(lane_id));\n        }\n        // check total gas\n        let gas_limit: U512 = self\n            .transactions\n            .values()\n            .map(|item| item.gas_limit.value())\n            .sum();\n        if gas_limit\n            .checked_add(footprint.gas_limit.value())\n            .ok_or(AddError::GasLimit)?\n            > U512::from(self.transaction_config.block_gas_limit)\n        {\n            return Err(AddError::GasLimit);\n        }\n        // check total byte size\n        let size: usize = self\n            .transactions\n            .values()\n            .map(|item| item.size_estimate)\n            .sum();\n        if size\n            .checked_add(footprint.size_estimate)\n            .ok_or(AddError::BlockSize)?\n            > self.transaction_config.max_block_size as usize\n        {\n            return Err(AddError::BlockSize);\n        }\n        // check total approvals\n        let count: usize = self\n            .transactions\n            .values()\n            .map(|item| item.approvals_count())\n            .sum();\n        if count\n            .checked_add(footprint.approvals_count())\n            .ok_or(AddError::ApprovalCount)?\n            > self.transaction_config.block_max_approval_count as usize\n        {\n            return Err(AddError::ApprovalCount);\n        }\n        self.transactions\n            .insert(footprint.transaction_hash, footprint.clone());\n        Ok(())\n    }\n\n    /// Creates a `BlockPayload` with the `AppendableBlock`s transactions and transfers, and the\n    /// given random bit and accusations.\n    pub(crate) fn into_block_payload(\n        self,\n        accusations: Vec<PublicKey>,\n        rewarded_signatures: RewardedSignatures,\n        random_bit: bool,\n    ) -> BlockPayload {\n        let AppendableBlock {\n            transactions: footprints,\n            current_gas_price: price,\n            ..\n        } = self;\n\n        fn collate(\n            lane: u8,\n            collater: &mut BTreeMap<u8, Vec<(TransactionHash, BTreeSet<Approval>)>>,\n            items: &BTreeMap<TransactionHash, TransactionFootprint>,\n        ) {\n            let mut ret = vec![];\n            for (x, y) in items.iter().filter(|(_, y)| y.lane_id == lane) {\n                ret.push((*x, y.approvals.clone()));\n            }\n            if !ret.is_empty() {\n                collater.insert(lane, ret);\n            }\n        }\n\n        let mut transactions = BTreeMap::new();\n        collate(MINT_LANE_ID, &mut transactions, &footprints);\n        collate(AUCTION_LANE_ID, &mut transactions, &footprints);\n        collate(INSTALL_UPGRADE_LANE_ID, &mut transactions, &footprints);\n        for lane_id in self\n            .transaction_config\n            .transaction_v1_config\n            .wasm_lanes()\n            .iter()\n            .map(|lane| lane.id())\n        {\n            collate(lane_id, &mut transactions, &footprints);\n        }\n\n        BlockPayload::new(\n            transactions,\n            accusations,\n            rewarded_signatures,\n            random_bit,\n            price,\n        )\n    }\n\n    pub(crate) fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    fn category_lane(&self, lane: u8) -> usize {\n        self.transactions\n            .iter()\n            .filter(|(_, f)| f.lane_id == lane)\n            .count()\n    }\n\n    #[cfg(test)]\n    pub fn transaction_count(&self) -> usize {\n        self.transactions.len()\n    }\n}\n\nimpl Display for AppendableBlock {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        let total_count = self.transactions.len();\n        let mint_count = self.category_lane(MINT_LANE_ID);\n        let auction_count = self.category_lane(AUCTION_LANE_ID);\n        let install_upgrade_count = self.category_lane(INSTALL_UPGRADE_LANE_ID);\n        let wasm_count = total_count - mint_count - auction_count - install_upgrade_count;\n        let total_gas_limit: Gas = self\n            .transactions\n            .values()\n            .map(|f| f.gas_limit)\n            .try_fold(Gas::new(0), |acc, gas| acc.checked_add(gas))\n            .unwrap_or(Gas::MAX);\n        let total_approvals_count: usize = self\n            .transactions\n            .values()\n            .map(|f| f.approvals_count())\n            .sum();\n        let total_size_estimate: usize = self.transactions.values().map(|f| f.size_estimate).sum();\n\n        write!(\n            formatter,\n            \"AppendableBlock(timestamp-{}:\n                mint: {mint_count}, \\\n                auction: {auction_count}, \\\n                install_upgrade: {install_upgrade_count}, \\\n                wasm: {wasm_count}, \\\n                total count: {total_count}, \\\n                approvals: {total_approvals_count}, \\\n                gas: {total_gas_limit}, \\\n                size: {total_size_estimate})\",\n            self.timestamp,\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::{testing::TestRng, SingleBlockRewardedSignatures, TimeDiff};\n\n    use crate::testing::LARGE_WASM_LANE_ID;\n\n    use super::*;\n    use std::collections::HashSet;\n\n    impl AppendableBlock {\n        pub(crate) fn transaction_hashes(&self) -> HashSet<TransactionHash> {\n            self.transactions.keys().copied().collect()\n        }\n    }\n\n    #[test]\n    pub fn should_build_block_payload_from_all_transactions() {\n        let mut test_rng = TestRng::new();\n        let mut appendable_block = AppendableBlock::new(\n            TransactionConfig::default(),\n            0,\n            Timestamp::now() + TimeDiff::from_millis(15000),\n        );\n        let transfer_footprint = TransactionFootprint::random_of_lane(MINT_LANE_ID, &mut test_rng);\n        let auction_footprint =\n            TransactionFootprint::random_of_lane(AUCTION_LANE_ID, &mut test_rng);\n        let install_upgrade_footprint =\n            TransactionFootprint::random_of_lane(INSTALL_UPGRADE_LANE_ID, &mut test_rng);\n        let large_wasm_footprint =\n            TransactionFootprint::random_of_lane(LARGE_WASM_LANE_ID, &mut test_rng);\n        let signatures = RewardedSignatures::new(vec![SingleBlockRewardedSignatures::random(\n            &mut test_rng,\n            2,\n        )]);\n        appendable_block\n            .add_transaction(&transfer_footprint)\n            .unwrap();\n        appendable_block\n            .add_transaction(&auction_footprint)\n            .unwrap();\n        appendable_block\n            .add_transaction(&install_upgrade_footprint)\n            .unwrap();\n        appendable_block\n            .add_transaction(&large_wasm_footprint)\n            .unwrap();\n        let block_payload = appendable_block.into_block_payload(vec![], signatures.clone(), false);\n        let transaction_hashes: BTreeSet<TransactionHash> =\n            block_payload.all_transaction_hashes().collect();\n        assert!(transaction_hashes.contains(&transfer_footprint.transaction_hash));\n        assert!(transaction_hashes.contains(&auction_footprint.transaction_hash));\n        assert!(transaction_hashes.contains(&install_upgrade_footprint.transaction_hash));\n        assert!(transaction_hashes.contains(&large_wasm_footprint.transaction_hash));\n        assert_eq!(transaction_hashes.len(), 4);\n        assert_eq!(*block_payload.rewarded_signatures(), signatures);\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/approvals_hashes.rs",
    "content": "use std::{\n    collections::BTreeMap,\n    fmt::{self, Display, Formatter},\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    global_state::TrieMerkleProof,\n    ApprovalsHash, Block, BlockHash, BlockV1, BlockV2, DeployId, Digest, Key, StoredValue,\n    TransactionId,\n};\n\nuse crate::{\n    components::{\n        contract_runtime::APPROVALS_CHECKSUM_NAME,\n        fetcher::{FetchItem, Tag},\n    },\n    types::{self, VariantMismatch},\n};\n\nuse casper_storage::global_state::trie_store::operations::compute_state_hash;\n\n/// The data which is gossiped by validators to non-validators upon creation of a new block.\n#[derive(DataSize, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub(crate) struct ApprovalsHashes {\n    /// Hash of the block that contains transactions that are relevant to the approvals.\n    block_hash: BlockHash,\n    /// The set of all transactions' finalized approvals' hashes.\n    approvals_hashes: Vec<ApprovalsHash>,\n    /// The Merkle proof of the checksum registry containing the checksum of the finalized\n    /// approvals.\n    #[data_size(skip)]\n    merkle_proof_approvals: TrieMerkleProof<Key, StoredValue>,\n}\n\nimpl ApprovalsHashes {\n    #[allow(dead_code)]\n    pub(crate) fn new(\n        block_hash: BlockHash,\n        approvals_hashes: Vec<ApprovalsHash>,\n        merkle_proof_approvals: TrieMerkleProof<Key, StoredValue>,\n    ) -> Self {\n        Self {\n            block_hash,\n            approvals_hashes,\n            merkle_proof_approvals,\n        }\n    }\n\n    fn verify(&self, block: &Block) -> Result<(), ApprovalsHashesValidationError> {\n        let merkle_proof_approvals = &self.merkle_proof_approvals;\n        if *merkle_proof_approvals.key() != Key::ChecksumRegistry {\n            return Err(ApprovalsHashesValidationError::InvalidKeyType);\n        }\n\n        let proof_state_root_hash = compute_state_hash(merkle_proof_approvals)\n            .map_err(ApprovalsHashesValidationError::TrieMerkleProof)?;\n\n        if proof_state_root_hash != *block.state_root_hash() {\n            return Err(ApprovalsHashesValidationError::StateRootHashMismatch {\n                proof_state_root_hash,\n                block_state_root_hash: *block.state_root_hash(),\n            });\n        }\n\n        let value_in_proof = merkle_proof_approvals\n            .value()\n            .as_cl_value()\n            .and_then(|cl_value| cl_value.clone().into_t().ok())\n            .and_then(|registry: BTreeMap<String, Digest>| {\n                registry.get(APPROVALS_CHECKSUM_NAME).copied()\n            })\n            .ok_or(ApprovalsHashesValidationError::InvalidChecksumRegistry)?;\n\n        let computed_approvals_checksum = match block {\n            Block::V1(v1_block) => compute_legacy_approvals_checksum(self.deploy_ids(v1_block))?,\n            Block::V2(v2_block) => {\n                types::compute_approvals_checksum(self.transaction_ids(v2_block))\n                    .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?\n            }\n        };\n\n        if value_in_proof != computed_approvals_checksum {\n            return Err(ApprovalsHashesValidationError::ApprovalsChecksumMismatch {\n                computed_approvals_checksum,\n                value_in_proof,\n            });\n        }\n\n        Ok(())\n    }\n\n    pub(crate) fn deploy_ids(&self, v1_block: &BlockV1) -> Vec<DeployId> {\n        let approval_hashes = &self.approvals_hashes;\n        v1_block\n            .deploy_and_transfer_hashes()\n            .zip(approval_hashes)\n            .map(|(x, y)| DeployId::new(*x, *y))\n            .collect()\n    }\n\n    pub fn transaction_ids(&self, v2_block: &BlockV2) -> Vec<TransactionId> {\n        let approval_hashes = &self.approvals_hashes;\n        v2_block\n            .all_transactions()\n            .zip(approval_hashes)\n            .map(|(x, y)| TransactionId::new(*x, *y))\n            .collect()\n    }\n\n    pub(crate) fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n}\n\nimpl FetchItem for ApprovalsHashes {\n    type Id = BlockHash;\n    type ValidationError = ApprovalsHashesValidationError;\n    type ValidationMetadata = Block;\n\n    const TAG: Tag = Tag::ApprovalsHashes;\n\n    fn fetch_id(&self) -> Self::Id {\n        *self.block_hash()\n    }\n\n    fn validate(&self, block: &Block) -> Result<(), Self::ValidationError> {\n        self.verify(block)\n    }\n}\n\nimpl Display for ApprovalsHashes {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"approvals hashes for {}\", self.block_hash())\n    }\n}\n\nimpl ToBytes for ApprovalsHashes {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(writer)?;\n        self.approvals_hashes.write_bytes(writer)?;\n        self.merkle_proof_approvals.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length()\n            + self.approvals_hashes.serialized_length()\n            + self.merkle_proof_approvals.serialized_length()\n    }\n}\n\nimpl FromBytes for ApprovalsHashes {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (approvals_hashes, remainder) = Vec::<ApprovalsHash>::from_bytes(remainder)?;\n        let (merkle_proof_approvals, remainder) =\n            TrieMerkleProof::<Key, StoredValue>::from_bytes(remainder)?;\n        Ok((\n            ApprovalsHashes {\n                block_hash,\n                approvals_hashes,\n                merkle_proof_approvals,\n            },\n            remainder,\n        ))\n    }\n}\n\n/// Returns the hash of the bytesrepr-encoded deploy_ids, as used until the `Block` enum became\n/// available.\npub(crate) fn compute_legacy_approvals_checksum(\n    deploy_ids: Vec<DeployId>,\n) -> Result<Digest, ApprovalsHashesValidationError> {\n    let bytes = deploy_ids\n        .into_bytes()\n        .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?;\n    Ok(Digest::hash(bytes))\n}\n\n/// An error that can arise when validating `ApprovalsHashes`.\n#[derive(Error, Debug, DataSize)]\n#[non_exhaustive]\npub(crate) enum ApprovalsHashesValidationError {\n    /// The key provided in the proof is not a `Key::ChecksumRegistry`.\n    #[error(\"key provided in proof is not a Key::ChecksumRegistry\")]\n    InvalidKeyType,\n\n    /// An error while computing the state root hash implied by the Merkle proof.\n    #[error(\"failed to compute state root hash implied by proof\")]\n    TrieMerkleProof(bytesrepr::Error),\n\n    /// The state root hash implied by the Merkle proof doesn't match that in the block.\n    #[error(\"state root hash implied by the Merkle proof doesn't match that in the block\")]\n    StateRootHashMismatch {\n        proof_state_root_hash: Digest,\n        block_state_root_hash: Digest,\n    },\n\n    /// The value provided in the proof cannot be parsed to the checksum registry type.\n    #[error(\"value provided in the proof cannot be parsed to the checksum registry type\")]\n    InvalidChecksumRegistry,\n\n    /// An error while computing the checksum of the approvals.\n    #[error(\"failed to compute checksum of the approvals\")]\n    ApprovalsChecksum(bytesrepr::Error),\n\n    /// The approvals checksum provided doesn't match one calculated from the approvals.\n    #[error(\"provided approvals checksum doesn't match one calculated from the approvals\")]\n    ApprovalsChecksumMismatch {\n        computed_approvals_checksum: Digest,\n        value_in_proof: Digest,\n    },\n\n    #[error(transparent)]\n    #[data_size(skip)]\n    VariantMismatch(#[from] VariantMismatch),\n}\n\nmod specimen_support {\n    use std::collections::BTreeMap;\n\n    use casper_types::{\n        bytesrepr::Bytes,\n        global_state::{Pointer, TrieMerkleProof, TrieMerkleProofStep},\n        CLValue, Digest, Key, StoredValue,\n    };\n\n    use crate::{\n        contract_runtime::{APPROVALS_CHECKSUM_NAME, EXECUTION_RESULTS_CHECKSUM_NAME},\n        utils::specimen::{\n            largest_variant, vec_of_largest_specimen, vec_prop_specimen, Cache, LargestSpecimen,\n            SizeEstimator,\n        },\n    };\n    use casper_storage::block_store::types::ApprovalsHashes;\n\n    impl LargestSpecimen for ApprovalsHashes {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            let data = {\n                let mut map = BTreeMap::new();\n                map.insert(\n                    APPROVALS_CHECKSUM_NAME,\n                    Digest::largest_specimen(estimator, cache),\n                );\n                map.insert(\n                    EXECUTION_RESULTS_CHECKSUM_NAME,\n                    Digest::largest_specimen(estimator, cache),\n                );\n                map\n            };\n            let merkle_proof_approvals = TrieMerkleProof::new(\n                Key::ChecksumRegistry,\n                StoredValue::CLValue(CLValue::from_t(data).expect(\"a correct cl value\")),\n                // 2^64/2^13 = 2^51, so 51 items:\n                vec_of_largest_specimen(estimator, 51, cache).into(),\n            );\n            ApprovalsHashes::new(\n                LargestSpecimen::largest_specimen(estimator, cache),\n                vec_prop_specimen(estimator, \"approvals_hashes\", cache),\n                merkle_proof_approvals,\n            )\n        }\n    }\n\n    impl LargestSpecimen for TrieMerkleProofStep {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            #[derive(strum::EnumIter)]\n            enum TrieMerkleProofStepDiscriminants {\n                Node,\n                Extension,\n            }\n\n            largest_variant(estimator, |variant| match variant {\n                TrieMerkleProofStepDiscriminants::Node => TrieMerkleProofStep::Node {\n                    hole_index: u8::MAX,\n                    indexed_pointers_with_hole: vec![\n                        (\n                            u8::MAX,\n                            Pointer::LeafPointer(LargestSpecimen::largest_specimen(\n                                estimator, cache\n                            ))\n                        );\n                        estimator.parameter(\"max_pointer_per_node\")\n                    ],\n                },\n                TrieMerkleProofStepDiscriminants::Extension => TrieMerkleProofStep::Extension {\n                    affix: Bytes::from(vec![u8::MAX; Key::max_serialized_length()]),\n                },\n            })\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/block_execution_results_or_chunk.rs",
    "content": "use std::fmt::{self, Debug, Display, Formatter};\n\nuse datasize::DataSize;\nuse once_cell::sync::OnceCell;\nuse serde::{Deserialize, Serialize};\nuse tracing::{debug, error};\n\n#[cfg(test)]\nuse casper_types::execution::ExecutionResultV2;\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{\n    bytesrepr::{self, ToBytes},\n    execution::ExecutionResult,\n    BlockHash, ChunkWithProof, ChunkWithProofVerificationError, Digest,\n};\n\nuse super::BlockExecutionResultsOrChunkId;\nuse crate::{\n    components::{\n        block_synchronizer::ExecutionResultsChecksum,\n        fetcher::{FetchItem, Tag},\n    },\n    types::{Chunkable, ValueOrChunk},\n    utils::ds,\n};\n\n/// Represents execution results for all deploys in a single block or a chunk of this complete\n/// value.\n#[derive(Clone, Serialize, Deserialize, Debug, Eq, DataSize)]\npub struct BlockExecutionResultsOrChunk {\n    /// Block to which this value or chunk refers to.\n    pub(super) block_hash: BlockHash,\n    /// Complete execution results for the block or a chunk of the complete data.\n    pub(super) value: ValueOrChunk<Vec<ExecutionResult>>,\n    #[serde(skip)]\n    #[data_size(with = ds::once_cell)]\n    pub(super) is_valid: OnceCell<Result<bool, bytesrepr::Error>>,\n}\n\nimpl BlockExecutionResultsOrChunk {\n    pub(crate) fn new(\n        block_hash: BlockHash,\n        chunk_index: u64,\n        execution_results: Vec<ExecutionResult>,\n    ) -> Option<Self> {\n        fn make_value_or_chunk<T: Chunkable>(\n            data: T,\n            block_hash: &BlockHash,\n            chunk_index: u64,\n        ) -> Option<ValueOrChunk<T>> {\n            match ValueOrChunk::new(data, chunk_index) {\n                Ok(value_or_chunk) => Some(value_or_chunk),\n                Err(error) => {\n                    error!(\n                        %block_hash, %chunk_index, %error,\n                        \"failed to construct `BlockExecutionResultsOrChunk`\"\n                    );\n                    None\n                }\n            }\n        }\n\n        let is_v1 = matches!(execution_results.first(), Some(ExecutionResult::V1(_)));\n\n        // If it's not V1, just construct the `ValueOrChunk` from `Vec<ExecutionResult>`.\n        if !is_v1 {\n            let value = make_value_or_chunk(execution_results, &block_hash, chunk_index)?;\n            return Some(BlockExecutionResultsOrChunk {\n                block_hash,\n                value,\n                is_valid: OnceCell::new(),\n            });\n        }\n\n        // If it is V1, we need to construct the `ValueOrChunk` from a `Vec<ExecutionResultV1>` if\n        // it's big enough to need chunking, otherwise we need to use the `Vec<ExecutionResult>` as\n        // the `ValueOrChunk::Value`.\n        let mut v1_results = Vec::with_capacity(execution_results.len());\n        for result in &execution_results {\n            if let ExecutionResult::V1(v1_result) = result {\n                v1_results.push(v1_result);\n            } else {\n                error!(\n                    ?execution_results,\n                    \"all execution results should be version 1\"\n                );\n                return None;\n            }\n        }\n        if v1_results.serialized_length() <= ChunkWithProof::CHUNK_SIZE_BYTES {\n            // Avoid using `make_value_or_chunk(execution_results, ..)` as that will chunk if\n            // `v1_results.serialized_length() == ChunkWithProof::CHUNK_SIZE_BYTES`, since\n            // `execution_results.serialized_length()` will definitely be greater than\n            // `ChunkWithProof::CHUNK_SIZE_BYTES` due to the extra tag byte specifying V1 in the\n            // enum `ExecutionResult`.\n            let value = ValueOrChunk::Value(execution_results);\n            return Some(BlockExecutionResultsOrChunk {\n                block_hash,\n                value,\n                is_valid: OnceCell::new(),\n            });\n        }\n\n        let v1_value = make_value_or_chunk(v1_results, &block_hash, chunk_index)?;\n        let value = match v1_value {\n            ValueOrChunk::Value(_) => {\n                error!(\n                    ?execution_results,\n                    \"v1 execution results of this size should be chunked\"\n                );\n                return None;\n            }\n            ValueOrChunk::ChunkWithProof(chunk) => ValueOrChunk::ChunkWithProof(chunk),\n        };\n\n        Some(BlockExecutionResultsOrChunk {\n            block_hash,\n            value,\n            is_valid: OnceCell::new(),\n        })\n    }\n\n    /// Verifies equivalence of the execution results (or chunks) Merkle root hash with the\n    /// expected value.\n    pub fn validate(&self, expected: &Digest) -> Result<bool, bytesrepr::Error> {\n        *self.is_valid.get_or_init(|| match &self.value {\n            ValueOrChunk::Value(block_execution_results) => {\n                // If results is not empty and all are V1, convert and verify.\n                let is_v1 = matches!(\n                    block_execution_results.first(),\n                    Some(ExecutionResult::V1(_))\n                );\n                let actual = if is_v1 {\n                    let mut v1_results = Vec::with_capacity(block_execution_results.len());\n                    for result in block_execution_results {\n                        if let ExecutionResult::V1(v1_result) = result {\n                            v1_results.push(v1_result);\n                        } else {\n                            debug!(\n                                ?block_execution_results,\n                                \"all execution results should be version 1\"\n                            );\n                            return Ok(false);\n                        }\n                    }\n                    Chunkable::hash(&v1_results)?\n                } else {\n                    Chunkable::hash(&block_execution_results)?\n                };\n                Ok(&actual == expected)\n            }\n            ValueOrChunk::ChunkWithProof(chunk_with_proof) => {\n                Ok(&chunk_with_proof.proof().root_hash() == expected)\n            }\n        })\n    }\n\n    /// Consumes `self` and returns inner `ValueOrChunk` field.\n    pub fn into_value(self) -> ValueOrChunk<Vec<ExecutionResult>> {\n        self.value\n    }\n\n    /// Returns the hash of the block this execution result belongs to.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_mock_value(rng: &mut TestRng, block_hash: BlockHash) -> Self {\n        Self::new_mock_value_with_multiple_random_results(rng, block_hash, 1)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_mock_value_with_multiple_random_results(\n        rng: &mut TestRng,\n        block_hash: BlockHash,\n        num_results: usize,\n    ) -> Self {\n        let execution_results: Vec<ExecutionResult> = (0..num_results)\n            .map(|_| ExecutionResultV2::random(rng).into())\n            .collect();\n\n        Self {\n            block_hash,\n            value: ValueOrChunk::new(execution_results, 0).unwrap(),\n            is_valid: OnceCell::with_value(Ok(true)),\n        }\n    }\n\n    #[cfg(test)]\n    pub(crate) fn value(&self) -> &ValueOrChunk<Vec<ExecutionResult>> {\n        &self.value\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_from_value(\n        block_hash: BlockHash,\n        value: ValueOrChunk<Vec<ExecutionResult>>,\n    ) -> Self {\n        Self {\n            block_hash,\n            value,\n            is_valid: OnceCell::new(),\n        }\n    }\n}\n\nimpl PartialEq for BlockExecutionResultsOrChunk {\n    fn eq(&self, other: &BlockExecutionResultsOrChunk) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        let BlockExecutionResultsOrChunk {\n            block_hash,\n            value,\n            is_valid: _,\n        } = self;\n        *block_hash == other.block_hash && *value == other.value\n    }\n}\n\nimpl FetchItem for BlockExecutionResultsOrChunk {\n    type Id = BlockExecutionResultsOrChunkId;\n    type ValidationError = ChunkWithProofVerificationError;\n    type ValidationMetadata = ExecutionResultsChecksum;\n\n    const TAG: Tag = Tag::BlockExecutionResults;\n\n    fn fetch_id(&self) -> Self::Id {\n        let chunk_index = match &self.value {\n            ValueOrChunk::Value(_) => 0,\n            ValueOrChunk::ChunkWithProof(chunks) => chunks.proof().index(),\n        };\n        BlockExecutionResultsOrChunkId {\n            chunk_index,\n            block_hash: self.block_hash,\n        }\n    }\n\n    fn validate(&self, metadata: &ExecutionResultsChecksum) -> Result<(), Self::ValidationError> {\n        if let ValueOrChunk::ChunkWithProof(chunk_with_proof) = &self.value {\n            chunk_with_proof.verify()?;\n        }\n        if let ExecutionResultsChecksum::Checkable(expected) = *metadata {\n            if !self\n                .validate(&expected)\n                .map_err(ChunkWithProofVerificationError::Bytesrepr)?\n            {\n                return Err(ChunkWithProofVerificationError::UnexpectedRootHash);\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl Display for BlockExecutionResultsOrChunk {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let size = match &self.value {\n            ValueOrChunk::Value(exec_results) => exec_results.serialized_length(),\n            ValueOrChunk::ChunkWithProof(chunk) => chunk.serialized_length(),\n        };\n        write!(\n            f,\n            \"block execution results or chunk ({size} bytes) for block {}\",\n            self.block_hash.inner()\n        )\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n    use super::BlockExecutionResultsOrChunk;\n    use once_cell::sync::OnceCell;\n\n    impl LargestSpecimen for BlockExecutionResultsOrChunk {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            BlockExecutionResultsOrChunk {\n                block_hash: LargestSpecimen::largest_specimen(estimator, cache),\n                value: LargestSpecimen::largest_specimen(estimator, cache),\n                is_valid: OnceCell::with_value(Ok(true)),\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use casper_types::{\n        execution::{execution_result_v1::ExecutionEffect, ExecutionResultV1},\n        testing::TestRng,\n        ChunkWithProof, TransferAddr,\n    };\n\n    use super::*;\n    use crate::contract_runtime::compute_execution_results_checksum;\n\n    fn compute_execution_results_v1_checksum(\n        v1_execution_results: Vec<&ExecutionResultV1>,\n    ) -> ExecutionResultsChecksum {\n        ExecutionResultsChecksum::Checkable(v1_execution_results.hash().unwrap())\n    }\n\n    /// Checks that a Vec of `ExecutionResultV1`s which are right at the limit to avoid being\n    /// chunked are still not chunked when constructing a BlockExecutionResultsOrChunk from them\n    /// when they are held as a Vec of `ExecutionResult`s.\n    #[test]\n    fn should_not_chunk_for_v1_at_upper_bound() {\n        let rng = &mut TestRng::new();\n\n        // The serialized_length() of this should be equal to `ChunkWithProof::CHUNK_SIZE_BYTES`\n        let execution_results_v1 = vec![ExecutionResultV1::Failure {\n            effect: ExecutionEffect::default(),\n            transfers: vec![TransferAddr::new([1; 32]); 262143],\n            cost: 2_u64.into(),\n            error_message: \"ninebytes\".to_string(),\n        }];\n        assert!(\n            execution_results_v1.serialized_length() == ChunkWithProof::CHUNK_SIZE_BYTES,\n            \"need execution_results_v1.serialized_length() [{}] to be <= \\\n            ChunkWithProof::CHUNK_SIZE_BYTES [{}]\",\n            execution_results_v1.serialized_length(),\n            ChunkWithProof::CHUNK_SIZE_BYTES\n        );\n        // The serialized_length() of this should be greater than `ChunkWithProof::CHUNK_SIZE_BYTES`\n        // meaning it would be chunked unless we explicitly avoid chunking it in the\n        // `BlockExecutionResultsOrChunk` constructor.\n        let execution_results = execution_results_v1\n            .iter()\n            .map(|res| ExecutionResult::V1(res.clone()))\n            .collect::<Vec<_>>();\n        assert!(\n            execution_results.serialized_length() > ChunkWithProof::CHUNK_SIZE_BYTES,\n            \"need execution_results.serialized_length() [{}] to be > \\\n            ChunkWithProof::CHUNK_SIZE_BYTES [{}]\",\n            execution_results_v1.serialized_length(),\n            ChunkWithProof::CHUNK_SIZE_BYTES\n        );\n        assert!(execution_results.serialized_length() > ChunkWithProof::CHUNK_SIZE_BYTES);\n\n        let block_hash = BlockHash::random(rng);\n        let value_or_chunk =\n            BlockExecutionResultsOrChunk::new(block_hash, 0, execution_results).unwrap();\n        assert!(matches!(value_or_chunk.value, ValueOrChunk::Value(_)));\n    }\n\n    #[test]\n    fn should_validate_v1_unchunked_checksum() {\n        let rng = &mut TestRng::new();\n        let execution_results = vec![\n            ExecutionResult::V1(rng.gen()),\n            ExecutionResult::V1(rng.gen()),\n        ];\n        let checksum = compute_execution_results_v1_checksum(\n            execution_results\n                .iter()\n                .map(|exec_result| match exec_result {\n                    ExecutionResult::V1(exec_result) => exec_result,\n                    _ => unreachable!(),\n                })\n                .collect(),\n        );\n\n        let block_hash = BlockHash::random(rng);\n        let block_results =\n            BlockExecutionResultsOrChunk::new(block_hash, 0, execution_results).unwrap();\n        // Ensure the results weren't chunked.\n        assert!(matches!(block_results.value, ValueOrChunk::Value(_)));\n\n        FetchItem::validate(&block_results, &checksum).unwrap();\n    }\n\n    #[test]\n    fn should_validate_v1_chunked_checksum() {\n        let rng = &mut TestRng::new();\n\n        let v1_result: ExecutionResultV1 = rng.gen();\n        // Ensure we fill with enough copies to cause three chunks.\n        let count = (2 * ChunkWithProof::CHUNK_SIZE_BYTES / v1_result.serialized_length()) + 1;\n        let execution_results = vec![ExecutionResult::V1(v1_result); count];\n        let checksum = compute_execution_results_v1_checksum(\n            execution_results\n                .iter()\n                .map(|exec_result| match exec_result {\n                    ExecutionResult::V1(exec_result) => exec_result,\n                    _ => unreachable!(),\n                })\n                .collect(),\n        );\n\n        let block_hash = BlockHash::random(rng);\n        for chunk_index in 0..3 {\n            let block_results = BlockExecutionResultsOrChunk::new(\n                block_hash,\n                chunk_index,\n                execution_results.clone(),\n            )\n            .unwrap();\n            // Ensure the results were chunked.\n            assert!(matches!(\n                block_results.value,\n                ValueOrChunk::ChunkWithProof(_)\n            ));\n\n            FetchItem::validate(&block_results, &checksum).unwrap();\n        }\n    }\n\n    #[test]\n    fn should_validate_v1_empty_checksum() {\n        let rng = &mut TestRng::new();\n        let checksum = compute_execution_results_v1_checksum(vec![]);\n\n        let block_results =\n            BlockExecutionResultsOrChunk::new(BlockHash::random(rng), 0, vec![]).unwrap();\n        FetchItem::validate(&block_results, &checksum).unwrap();\n    }\n\n    #[test]\n    fn should_validate_versioned_unchunked_checksum() {\n        let rng = &mut TestRng::new();\n        let execution_results = vec![\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n        ];\n        let checksum = ExecutionResultsChecksum::Checkable(\n            compute_execution_results_checksum(execution_results.iter()).unwrap(),\n        );\n\n        let block_hash = BlockHash::random(rng);\n        let block_results =\n            BlockExecutionResultsOrChunk::new(block_hash, 0, execution_results).unwrap();\n        // Ensure the results weren't chunked.\n        assert!(matches!(block_results.value, ValueOrChunk::Value(_)));\n\n        FetchItem::validate(&block_results, &checksum).unwrap();\n    }\n\n    #[test]\n    fn should_validate_versioned_chunked_checksum() {\n        let rng = &mut TestRng::new();\n\n        let v2_result = ExecutionResultV2::random(rng);\n        // Ensure we fill with enough copies to cause three chunks.\n        let count = (2 * ChunkWithProof::CHUNK_SIZE_BYTES / v2_result.serialized_length()) + 1;\n        let execution_results = vec![ExecutionResult::V2(Box::new(v2_result)); count];\n        let checksum = ExecutionResultsChecksum::Checkable(\n            compute_execution_results_checksum(execution_results.iter()).unwrap(),\n        );\n\n        let block_hash = BlockHash::random(rng);\n        for chunk_index in 0..3 {\n            let block_results = BlockExecutionResultsOrChunk::new(\n                block_hash,\n                chunk_index,\n                execution_results.clone(),\n            )\n            .unwrap();\n            // Ensure the results were chunked.\n            assert!(matches!(\n                block_results.value,\n                ValueOrChunk::ChunkWithProof(_)\n            ));\n\n            FetchItem::validate(&block_results, &checksum).unwrap();\n        }\n    }\n\n    #[test]\n    fn should_validate_versioned_empty_checksum() {\n        let rng = &mut TestRng::new();\n        let checksum = ExecutionResultsChecksum::Checkable(\n            compute_execution_results_checksum(None.into_iter()).unwrap(),\n        );\n\n        let block_results =\n            BlockExecutionResultsOrChunk::new(BlockHash::random(rng), 0, vec![]).unwrap();\n        FetchItem::validate(&block_results, &checksum).unwrap();\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/block_execution_results_or_chunk_id.rs",
    "content": "use std::{\n    fmt::{self, Debug, Display, Formatter},\n    hash::Hash,\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::BlockHash;\n\n/// ID of the request for block execution results or chunk.\n#[derive(DataSize, Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub(crate) struct BlockExecutionResultsOrChunkId {\n    /// Index of the chunk being requested.\n    pub(super) chunk_index: u64,\n    /// Hash of the block.\n    pub(super) block_hash: BlockHash,\n}\n\nimpl BlockExecutionResultsOrChunkId {\n    /// Returns an instance of post-1.5 request for block execution results.\n    /// The `chunk_index` is set to 0 as the starting point of the fetch cycle.\n    /// If the effects are stored without chunking the index will be 0 as well.\n    pub fn new(block_hash: BlockHash) -> Self {\n        BlockExecutionResultsOrChunkId {\n            chunk_index: 0,\n            block_hash,\n        }\n    }\n\n    /// Returns the request for the `next_chunk` retaining the original request's block hash.\n    pub fn next_chunk(&self, next_chunk: u64) -> Self {\n        BlockExecutionResultsOrChunkId {\n            chunk_index: next_chunk,\n            block_hash: self.block_hash,\n        }\n    }\n\n    pub(crate) fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    pub(crate) fn chunk_index(&self) -> u64 {\n        self.chunk_index\n    }\n}\n\nimpl Display for BlockExecutionResultsOrChunkId {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"execution results for {} or chunk #{}\",\n            self.block_hash, self.chunk_index\n        )\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n    use super::BlockExecutionResultsOrChunkId;\n\n    impl LargestSpecimen for BlockExecutionResultsOrChunkId {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            BlockExecutionResultsOrChunkId {\n                chunk_index: u64::MAX,\n                block_hash: LargestSpecimen::largest_specimen(estimator, cache),\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/block_payload.rs",
    "content": "use std::{\n    cmp::{Ord, PartialOrd},\n    collections::{BTreeMap, BTreeSet},\n    fmt::{self, Display, Formatter},\n    hash::Hash,\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    Approval, PublicKey, RewardedSignatures, TransactionHash, AUCTION_LANE_ID,\n    INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n\n/// The piece of information that will become the content of a future block (isn't finalized or\n/// executed yet)\n///\n/// From the view of the consensus protocol this is the \"consensus value\": The protocol deals with\n/// finalizing an order of `BlockPayload`s. Only after consensus has been reached, the block's\n/// transactions actually get executed, and the executed block gets signed.\n#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct BlockPayload {\n    transactions: BTreeMap<u8, Vec<(TransactionHash, BTreeSet<Approval>)>>,\n    accusations: Vec<PublicKey>,\n    rewarded_signatures: RewardedSignatures,\n    random_bit: bool,\n    current_gas_price: u8,\n}\n\nimpl Default for BlockPayload {\n    fn default() -> Self {\n        Self {\n            transactions: Default::default(),\n            accusations: vec![],\n            rewarded_signatures: Default::default(),\n            random_bit: false,\n            current_gas_price: 1u8,\n        }\n    }\n}\n\nimpl BlockPayload {\n    pub(crate) fn new(\n        transactions: BTreeMap<u8, Vec<(TransactionHash, BTreeSet<Approval>)>>,\n        accusations: Vec<PublicKey>,\n        rewarded_signatures: RewardedSignatures,\n        random_bit: bool,\n        current_gas_price: u8,\n    ) -> Self {\n        BlockPayload {\n            transactions,\n            accusations,\n            rewarded_signatures,\n            random_bit,\n            current_gas_price,\n        }\n    }\n\n    /// Returns the hashes and approvals of the mint transactions within the block.\n    pub fn mint(&self) -> impl Iterator<Item = &(TransactionHash, BTreeSet<Approval>)> {\n        let mut ret = vec![];\n        if let Some(transactions) = self.transactions.get(&MINT_LANE_ID) {\n            for transaction in transactions {\n                ret.push(transaction);\n            }\n        }\n        ret.into_iter()\n    }\n\n    /// Returns the hashes and approvals of the auction transactions within the block.\n    pub fn auction(&self) -> impl Iterator<Item = &(TransactionHash, BTreeSet<Approval>)> {\n        let mut ret = vec![];\n        if let Some(transactions) = self.transactions.get(&AUCTION_LANE_ID) {\n            for transaction in transactions {\n                ret.push(transaction);\n            }\n        }\n        ret.into_iter()\n    }\n\n    /// Returns the hashes and approvals of the installer / upgrader transactions within the block.\n    pub fn install_upgrade(&self) -> impl Iterator<Item = &(TransactionHash, BTreeSet<Approval>)> {\n        let mut ret = vec![];\n        if let Some(transactions) = self.transactions.get(&INSTALL_UPGRADE_LANE_ID) {\n            for transaction in transactions {\n                ret.push(transaction);\n            }\n        }\n        ret.into_iter()\n    }\n\n    /// Returns all the transaction hashes and approvals within the block by lane.\n    pub fn transactions_by_lane(\n        &self,\n        lane: u8,\n    ) -> impl Iterator<Item = &(TransactionHash, BTreeSet<Approval>)> {\n        let mut ret = vec![];\n        if let Some(transactions) = self.transactions.get(&lane) {\n            for transaction in transactions {\n                ret.push(transaction);\n            }\n        }\n        ret.into_iter()\n    }\n\n    pub(crate) fn finalized_payload(&self) -> BTreeMap<u8, Vec<TransactionHash>> {\n        let mut ret = BTreeMap::new();\n        for (category, transactions) in &self.transactions {\n            let transactions = transactions.iter().map(|(tx, _)| *tx).collect();\n            ret.insert(*category, transactions);\n        }\n\n        ret\n    }\n\n    /// Returns true if even 1 transaction is in a lane other than supported.\n    pub fn has_transaction_in_unsupported_lane(&self, supported_lanes: &[u8]) -> bool {\n        // for all transaction lanes, if any of them are not in supported_lanes, true\n        self.transactions\n            .keys()\n            .any(|lane_id| !supported_lanes.contains(lane_id))\n    }\n\n    /// Returns count of transactions by category.\n    pub fn count(&self, lane: Option<u8>) -> usize {\n        match lane {\n            None => self.transactions.values().map(Vec::len).sum(),\n            Some(lane) => match self.transactions.get(&lane) {\n                Some(values) => values.len(),\n                None => 0,\n            },\n        }\n    }\n\n    /// Returns all the transaction hashes and approvals within the block.\n    pub fn all_transactions(&self) -> impl Iterator<Item = &(TransactionHash, BTreeSet<Approval>)> {\n        self.transactions.values().flatten()\n    }\n\n    /// Returns the set of validators that are reported as faulty in this block.\n    pub(crate) fn accusations(&self) -> &Vec<PublicKey> {\n        &self.accusations\n    }\n\n    pub(crate) fn random_bit(&self) -> bool {\n        self.random_bit\n    }\n\n    /// The finality signatures for the past blocks that will be rewarded in this block.\n    pub(crate) fn rewarded_signatures(&self) -> &RewardedSignatures {\n        &self.rewarded_signatures\n    }\n\n    /// The current gas price to execute the payload against.\n    pub(crate) fn current_gas_price(&self) -> u8 {\n        self.current_gas_price\n    }\n\n    pub(crate) fn all_transaction_hashes(&self) -> impl Iterator<Item = TransactionHash> {\n        let mut ret: Vec<TransactionHash> = vec![];\n        for values in self.transactions.values() {\n            for (transaction_hash, _) in values {\n                ret.push(*transaction_hash);\n            }\n        }\n        ret.into_iter()\n    }\n}\n\nimpl Display for BlockPayload {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        let count = self.count(None);\n        write!(formatter, \"payload: {} txns\", count)?;\n        if !self.accusations.is_empty() {\n            write!(formatter, \", {} accusations\", self.accusations.len())?;\n        }\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/block_with_metadata.rs",
    "content": "use casper_types::{Block, BlockSignatures};\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\n\n/// A wrapper around `Block` for the purposes of fetching blocks by height in linear chain.\n#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub struct BlockWithMetadata {\n    pub block: Block,\n    pub block_signatures: BlockSignatures,\n}\n\nimpl fmt::Display for BlockWithMetadata {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"block #{}, {}, with {} block signatures\",\n            self.block.height(),\n            self.block.hash(),\n            self.block_signatures.len()\n        )\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/executable_block.rs",
    "content": "use std::{collections::BTreeMap, fmt};\n\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse casper_types::{\n    BlockV2, EraId, PublicKey, RewardedSignatures, Timestamp, Transaction, TransactionHash,\n    AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512,\n};\n\nuse super::{FinalizedBlock, InternalEraReport};\n\n/// Data necessary for a block to be executed.\n#[derive(DataSize, Debug, Clone, PartialEq, Serialize)]\npub struct ExecutableBlock {\n    pub(crate) rewarded_signatures: RewardedSignatures,\n    pub(crate) timestamp: Timestamp,\n    pub(crate) random_bit: bool,\n    pub(crate) era_report: Option<InternalEraReport>,\n    pub(crate) era_id: EraId,\n    pub(crate) height: u64,\n    pub(crate) proposer: Box<PublicKey>,\n    pub(crate) current_gas_price: u8,\n    /// The transactions for the `FinalizedBlock`.\n    pub(crate) transactions: Vec<Transaction>,\n    pub(crate) transaction_map: BTreeMap<u8, Vec<TransactionHash>>,\n    /// `None` may indicate that the rewards have not been computed yet,\n    /// or that the block is not a switch one.\n    pub(crate) rewards: Option<BTreeMap<PublicKey, Vec<U512>>>,\n    /// `None` may indicate that the next era gas has not been computed yet,\n    /// or that the block is not a switch one.\n    pub(crate) next_era_gas_price: Option<u8>,\n}\n\nimpl ExecutableBlock {\n    pub(crate) fn mint(&self) -> Vec<TransactionHash> {\n        self.transaction_map\n            .get(&MINT_LANE_ID)\n            .cloned()\n            .unwrap_or(vec![])\n    }\n\n    pub(crate) fn auction(&self) -> Vec<TransactionHash> {\n        self.transaction_map\n            .get(&AUCTION_LANE_ID)\n            .cloned()\n            .unwrap_or(vec![])\n    }\n\n    pub(crate) fn install_upgrade(&self) -> Vec<TransactionHash> {\n        self.transaction_map\n            .get(&INSTALL_UPGRADE_LANE_ID)\n            .cloned()\n            .unwrap_or(vec![])\n    }\n\n    /// Creates a new `ExecutedBlock` from a `FinalizedBlock` and its transactions.\n    pub fn from_finalized_block_and_transactions(\n        finalized_block: FinalizedBlock,\n        transactions: Vec<Transaction>,\n    ) -> Self {\n        Self {\n            rewarded_signatures: finalized_block.rewarded_signatures,\n            timestamp: finalized_block.timestamp,\n            random_bit: finalized_block.random_bit,\n            era_report: finalized_block.era_report,\n            era_id: finalized_block.era_id,\n            height: finalized_block.height,\n            proposer: finalized_block.proposer,\n            transactions,\n            transaction_map: finalized_block.transactions,\n            rewards: None,\n            next_era_gas_price: None,\n            current_gas_price: finalized_block.current_gas_price,\n        }\n    }\n\n    /// Creates a new `ExecutedBlock` from a `BlockV2` and its deploys.\n    pub fn from_block_and_transactions(block: BlockV2, transactions: Vec<Transaction>) -> Self {\n        let era_report = block.era_end().map(|ee| InternalEraReport {\n            equivocators: ee.equivocators().into(),\n            inactive_validators: ee.inactive_validators().into(),\n        });\n\n        Self {\n            rewarded_signatures: block.rewarded_signatures().clone(),\n            timestamp: block.timestamp(),\n            random_bit: block.random_bit(),\n            era_report,\n            era_id: block.era_id(),\n            height: block.height(),\n            proposer: Box::new(block.proposer().clone()),\n            transactions,\n            transaction_map: block.transactions().clone(),\n            rewards: block.era_end().map(|era_end| era_end.rewards().clone()),\n            next_era_gas_price: block.era_end().map(|era_end| era_end.next_era_gas_price()),\n            current_gas_price: block.header().current_gas_price(),\n        }\n    }\n}\n\nimpl fmt::Display for ExecutableBlock {\n    fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"executable block #{} in {}, timestamp {}, {} transfers, {} staking txns, {} \\\n            install/upgrade txns\",\n            self.height,\n            self.era_id,\n            self.timestamp,\n            self.mint().len(),\n            self.auction().len(),\n            self.install_upgrade().len(),\n        )?;\n        for (lane, wasm_transaction) in self.transaction_map.iter() {\n            if *lane < 3 {\n                continue;\n            }\n            write!(\n                formatter,\n                \", lane: {} with {} transactions\",\n                *lane,\n                wasm_transaction.len()\n            )?;\n        }\n        if let Some(ref ee) = self.era_report {\n            write!(formatter, \", era_end: {:?}\", ee)?;\n        }\n        if let Some(ref next_era_gas_price) = self.next_era_gas_price {\n            write!(formatter, \", next_era_gas_price: {}\", next_era_gas_price)?;\n        }\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/finalized_block.rs",
    "content": "use std::{\n    cmp::{Ord, PartialOrd},\n    collections::BTreeMap,\n    fmt::{self, Display, Formatter},\n    hash::Hash,\n};\n\n#[cfg(test)]\nuse std::collections::BTreeSet;\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(test)]\nuse casper_types::{SecretKey, Transaction};\n#[cfg(test)]\nuse {casper_types::testing::TestRng, rand::Rng};\n\nuse casper_types::{\n    BlockV2, EraId, PublicKey, RewardedSignatures, Timestamp, TransactionHash, AUCTION_LANE_ID,\n    INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n\nuse super::BlockPayload;\n\n/// The piece of information that will become the content of a future block after it was finalized\n/// and before execution happened yet.\n#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct FinalizedBlock {\n    pub(crate) transactions: BTreeMap<u8, Vec<TransactionHash>>,\n    pub(crate) rewarded_signatures: RewardedSignatures,\n    pub(crate) timestamp: Timestamp,\n    pub(crate) random_bit: bool,\n    pub(crate) era_report: Option<InternalEraReport>,\n    pub(crate) era_id: EraId,\n    pub(crate) height: u64,\n    pub(crate) proposer: Box<PublicKey>,\n    pub(crate) current_gas_price: u8,\n}\n\n/// `EraReport` used only internally. The one in types is a part of `EraEndV1`.\n#[derive(\n    Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize, Default,\n)]\npub struct InternalEraReport {\n    /// The set of equivocators.\n    pub equivocators: Vec<PublicKey>,\n    /// Validators that haven't produced any unit during the era.\n    pub inactive_validators: Vec<PublicKey>,\n}\n\nimpl FinalizedBlock {\n    pub(crate) fn new(\n        block_payload: BlockPayload,\n        era_report: Option<InternalEraReport>,\n        timestamp: Timestamp,\n        era_id: EraId,\n        height: u64,\n        proposer: PublicKey,\n    ) -> Self {\n        let current_gas_price = block_payload.current_gas_price();\n        let transactions = block_payload.finalized_payload();\n\n        FinalizedBlock {\n            transactions,\n            rewarded_signatures: block_payload.rewarded_signatures().clone(),\n            timestamp,\n            random_bit: block_payload.random_bit(),\n            era_report,\n            era_id,\n            height,\n            proposer: Box::new(proposer),\n            current_gas_price,\n        }\n    }\n\n    pub(crate) fn mint(&self) -> Vec<TransactionHash> {\n        self.transactions\n            .get(&MINT_LANE_ID)\n            .map(|transactions| transactions.to_vec())\n            .unwrap_or_default()\n    }\n\n    pub(crate) fn auction(&self) -> Vec<TransactionHash> {\n        self.transactions\n            .get(&AUCTION_LANE_ID)\n            .map(|transactions| transactions.to_vec())\n            .unwrap_or_default()\n    }\n    pub(crate) fn install_upgrade(&self) -> Vec<TransactionHash> {\n        self.transactions\n            .get(&INSTALL_UPGRADE_LANE_ID)\n            .map(|transactions| transactions.to_vec())\n            .unwrap_or_default()\n    }\n\n    /// The list of deploy hashes chained with the list of transfer hashes.\n    pub(crate) fn all_transactions(&self) -> impl Iterator<Item = &TransactionHash> {\n        self.transactions.values().flatten()\n    }\n\n    /// Generates a random instance using a `TestRng` and includes specified deploys.\n    #[cfg(test)]\n    pub(crate) fn random<'a, I: IntoIterator<Item = &'a Transaction>>(\n        rng: &mut TestRng,\n        txns_iter: I,\n    ) -> Self {\n        let era = rng.gen_range(0..5);\n        let height = era * 10 + rng.gen_range(0..10);\n        let is_switch = rng.gen_bool(0.1);\n\n        FinalizedBlock::random_with_specifics(\n            rng,\n            EraId::from(era),\n            height,\n            is_switch,\n            Timestamp::now(),\n            txns_iter,\n        )\n    }\n\n    /// Generates a random instance using a `TestRng`, but using the specified values.\n    /// If `deploy` is `None`, random deploys will be generated, otherwise, the provided `deploy`\n    /// will be used.\n    #[cfg(test)]\n    pub(crate) fn random_with_specifics<'a, I: IntoIterator<Item = &'a Transaction>>(\n        rng: &mut TestRng,\n        era_id: EraId,\n        height: u64,\n        is_switch: bool,\n        timestamp: Timestamp,\n        txns_iter: I,\n    ) -> Self {\n        let mut transactions = BTreeMap::new();\n        let mut standard = vec![];\n        for transaction in txns_iter {\n            standard.push((transaction.hash(), BTreeSet::new()));\n        }\n        transactions.insert(3, standard);\n        let rewarded_signatures = Default::default();\n        let random_bit = rng.gen();\n        let block_payload =\n            BlockPayload::new(transactions, vec![], rewarded_signatures, random_bit, 1u8);\n\n        let era_report = if is_switch {\n            Some(InternalEraReport::random(rng))\n        } else {\n            None\n        };\n        let secret_key: SecretKey = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap();\n        let public_key = PublicKey::from(&secret_key);\n\n        FinalizedBlock::new(\n            block_payload,\n            era_report,\n            timestamp,\n            era_id,\n            height,\n            public_key,\n        )\n    }\n}\n\nimpl From<BlockV2> for FinalizedBlock {\n    fn from(block: BlockV2) -> Self {\n        FinalizedBlock {\n            transactions: block.transactions().clone(),\n            timestamp: block.timestamp(),\n            random_bit: block.random_bit(),\n            era_report: block.era_end().map(|era_end| InternalEraReport {\n                equivocators: Vec::from(era_end.equivocators()),\n                inactive_validators: Vec::from(era_end.inactive_validators()),\n            }),\n            era_id: block.era_id(),\n            height: block.height(),\n            proposer: Box::new(block.proposer().clone()),\n            rewarded_signatures: block.rewarded_signatures().clone(),\n            current_gas_price: block.header().current_gas_price(),\n        }\n    }\n}\n\nimpl Display for FinalizedBlock {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"finalized block #{} in {}, timestamp {}, {} transfers, {} staking txns, {} \\\n            install/upgrade txns,\",\n            self.height,\n            self.era_id,\n            self.timestamp,\n            self.mint().len(),\n            self.auction().len(),\n            self.install_upgrade().len(),\n        )?;\n        for (category, transactions) in self.transactions.iter() {\n            write!(\n                formatter,\n                \"lane: {} has {} transactions\",\n                category,\n                transactions.len()\n            )?;\n        }\n        if let Some(ref ee) = self.era_report {\n            write!(formatter, \", era_end: {:?}\", ee)?;\n        }\n        Ok(())\n    }\n}\n\nimpl InternalEraReport {\n    /// Returns a random `InternalEraReport`.\n    #[cfg(test)]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let equivocators_count = rng.gen_range(0..5);\n        let inactive_count = rng.gen_range(0..5);\n        let equivocators = core::iter::repeat_with(|| PublicKey::random(rng))\n            .take(equivocators_count)\n            .collect();\n        let inactive_validators = core::iter::repeat_with(|| PublicKey::random(rng))\n            .take(inactive_count)\n            .collect();\n\n        InternalEraReport {\n            equivocators,\n            inactive_validators,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::Deploy;\n\n    #[test]\n    fn should_convert_from_proposable_to_finalized_without_dropping_hashes() {\n        let mut rng = TestRng::new();\n\n        let large_lane_id = 3;\n        let standard = Transaction::Deploy(Deploy::random(&mut rng));\n        let hash = standard.hash();\n        let transactions = {\n            let mut ret = BTreeMap::new();\n            ret.insert(large_lane_id, vec![(hash, BTreeSet::new())]);\n            ret.insert(MINT_LANE_ID, vec![]);\n            ret.insert(INSTALL_UPGRADE_LANE_ID, vec![]);\n            ret.insert(AUCTION_LANE_ID, vec![]);\n            ret\n        };\n        let block_payload = BlockPayload::new(transactions, vec![], Default::default(), false, 1u8);\n\n        let fb = FinalizedBlock::new(\n            block_payload,\n            None,\n            Timestamp::now(),\n            EraId::random(&mut rng),\n            90,\n            PublicKey::random(&mut rng),\n        );\n\n        let transactions = fb.transactions.get(&large_lane_id).unwrap();\n        assert!(!transactions.is_empty())\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/invalid_proposal_error.rs",
    "content": "use crate::types::DataSize;\nuse casper_types::{FinalitySignatureId, InvalidTransaction, TransactionHash};\n\n#[derive(DataSize, Debug, Clone)]\npub(crate) enum InvalidProposalError {\n    Appendable(String),\n    InvalidTransaction(String),\n    AncestorTransactionReplay {\n        replayed_transaction_hash: TransactionHash,\n    },\n    UnfetchedTransaction {\n        transaction_hash: TransactionHash,\n    },\n    RewardSignaturesMissingCitedBlock {\n        cited_block_height: u64,\n    },\n    RewardSignatureReplay {\n        cited_block_height: u64,\n    },\n    InvalidFinalitySignature(FinalitySignatureId),\n    ExceedsLaneLimit {\n        lane_id: u8,\n    },\n    UnsupportedLane,\n    InvalidGasPrice {\n        proposed_gas_price: u8,\n        current_gas_price: u8,\n    },\n    InvalidApprovalsHash(String),\n    CompetingApprovals {\n        transaction_hash: TransactionHash,\n    },\n    UnableToFetch,\n    FailedFetcherValidation,\n    UnexpectedFetchStatus,\n    FetchedIncorrectTransactionById {\n        expected_transaction_hash: TransactionHash,\n        actual_transaction_hash: TransactionHash,\n    },\n    TransactionFetchingAborted,\n    FetcherError(String),\n    FinalitySignatureFetchingAborted,\n    TransactionReplayPreviousEra {\n        transaction_era_id: u64,\n        proposed_block_era_id: u64,\n    },\n}\n\nimpl From<crate::types::appendable_block::AddError> for Box<InvalidProposalError> {\n    fn from(appendable_block_error: crate::types::appendable_block::AddError) -> Self {\n        Box::new(InvalidProposalError::Appendable(format!(\n            \"{}\",\n            appendable_block_error\n        )))\n    }\n}\n\nimpl From<InvalidTransaction> for Box<InvalidProposalError> {\n    fn from(invalid_transaction: InvalidTransaction) -> Self {\n        Box::new(InvalidProposalError::InvalidTransaction(format!(\n            \"{}\",\n            invalid_transaction\n        )))\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/meta_block/merge_mismatch_error.rs",
    "content": "use thiserror::Error;\nuse tracing::error;\n\n#[derive(Error, Debug)]\npub(crate) enum MergeMismatchError {\n    #[error(\"block mismatch when merging meta blocks\")]\n    Block,\n    #[error(\"execution results mismatch when merging meta blocks\")]\n    ExecutionResults,\n}\n"
  },
  {
    "path": "node/src/types/block/meta_block/state.rs",
    "content": "use datasize::DataSize;\nuse serde::Serialize;\n\nuse super::MergeMismatchError;\n\n#[derive(Clone, Copy, Debug, DataSize)]\npub(crate) enum StateChange {\n    Updated,\n    AlreadyRegistered,\n}\n\nimpl StateChange {\n    pub(crate) fn was_updated(self) -> bool {\n        matches!(self, StateChange::Updated)\n    }\n\n    pub(crate) fn was_already_registered(self) -> bool {\n        matches!(self, StateChange::AlreadyRegistered)\n    }\n}\n\nimpl From<bool> for StateChange {\n    fn from(current_state: bool) -> Self {\n        if current_state {\n            StateChange::AlreadyRegistered\n        } else {\n            StateChange::Updated\n        }\n    }\n}\n\n#[derive(Clone, Copy, Eq, PartialEq, Default, Serialize, Debug, DataSize)]\npub(crate) struct State {\n    pub(super) stored: bool,\n    pub(super) sent_to_transaction_buffer: bool,\n    pub(super) updated_validator_matrix: bool,\n    pub(super) gossiped: bool,\n    pub(super) executed: bool,\n    pub(super) tried_to_sign: bool,\n    pub(super) consensus_notified: bool,\n    pub(super) accumulator_notified: bool,\n    pub(super) synchronizer_notified: bool,\n    pub(super) validator_notified: bool,\n    pub(super) sufficient_finality: bool,\n    pub(super) marked_complete: bool,\n    pub(super) all_actions_done: bool,\n}\n\nimpl State {\n    /// Returns a new `State` with all fields set to `false`.\n    pub(crate) fn new() -> Self {\n        Self::default()\n    }\n\n    /// Returns a new `State` with all fields set to `false` except for `gossiped`.\n    pub(crate) fn new_not_to_be_gossiped() -> Self {\n        State {\n            gossiped: true,\n            ..Self::default()\n        }\n    }\n\n    /// Returns a new `State` with all fields set to `false` except for `stored`.\n    pub(crate) fn new_already_stored() -> Self {\n        State {\n            stored: true,\n            ..Self::default()\n        }\n    }\n\n    /// Returns a new `State` which a historical block is expected to have after it has been synced.\n    pub(crate) fn new_after_historical_sync() -> Self {\n        State {\n            stored: true,\n            sent_to_transaction_buffer: false,\n            updated_validator_matrix: true,\n            gossiped: true,\n            executed: true,\n            tried_to_sign: true,\n            consensus_notified: false,\n            accumulator_notified: true,\n            synchronizer_notified: true,\n            validator_notified: false,\n            sufficient_finality: true,\n            marked_complete: true,\n            all_actions_done: false,\n        }\n    }\n\n    pub(crate) fn is_stored(&self) -> bool {\n        self.stored\n    }\n\n    pub(crate) fn is_executed(&self) -> bool {\n        self.executed\n    }\n\n    pub(crate) fn has_sufficient_finality(&self) -> bool {\n        self.sufficient_finality\n    }\n\n    pub(crate) fn is_marked_complete(&self) -> bool {\n        self.marked_complete\n    }\n\n    pub(crate) fn register_as_stored(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.stored);\n        self.stored = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_sent_to_transaction_buffer(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.sent_to_transaction_buffer);\n        self.sent_to_transaction_buffer = true;\n        outcome\n    }\n\n    pub(crate) fn register_updated_validator_matrix(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.updated_validator_matrix);\n        self.updated_validator_matrix = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_gossiped(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.gossiped);\n        self.gossiped = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_executed(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.executed);\n        self.executed = true;\n        outcome\n    }\n\n    pub(crate) fn register_we_have_tried_to_sign(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.tried_to_sign);\n        self.tried_to_sign = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_consensus_notified(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.consensus_notified);\n        self.consensus_notified = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_accumulator_notified(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.accumulator_notified);\n        self.accumulator_notified = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_synchronizer_notified(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.synchronizer_notified);\n        self.synchronizer_notified = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_validator_notified(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.validator_notified);\n        self.validator_notified = true;\n        outcome\n    }\n\n    pub(crate) fn register_has_sufficient_finality(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.sufficient_finality);\n        self.sufficient_finality = true;\n        outcome\n    }\n\n    pub(crate) fn register_as_marked_complete(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.marked_complete);\n        self.marked_complete = true;\n        outcome\n    }\n\n    pub(crate) fn register_all_actions_done(&mut self) -> StateChange {\n        let outcome = StateChange::from(self.all_actions_done);\n        self.all_actions_done = true;\n        outcome\n    }\n\n    pub(super) fn merge(mut self, other: State) -> Result<Self, MergeMismatchError> {\n        let State {\n            ref mut stored,\n            ref mut sent_to_transaction_buffer,\n            ref mut updated_validator_matrix,\n            ref mut gossiped,\n            ref mut executed,\n            ref mut tried_to_sign,\n            ref mut consensus_notified,\n            ref mut accumulator_notified,\n            ref mut synchronizer_notified,\n            ref mut validator_notified,\n            ref mut sufficient_finality,\n            ref mut marked_complete,\n            ref mut all_actions_done,\n        } = self;\n\n        *stored |= other.stored;\n        *sent_to_transaction_buffer |= other.sent_to_transaction_buffer;\n        *updated_validator_matrix |= other.updated_validator_matrix;\n        *gossiped |= other.gossiped;\n        *executed |= other.executed;\n        *tried_to_sign |= other.tried_to_sign;\n        *consensus_notified |= other.consensus_notified;\n        *accumulator_notified |= other.accumulator_notified;\n        *synchronizer_notified |= other.synchronizer_notified;\n        *validator_notified |= other.validator_notified;\n        *sufficient_finality |= other.sufficient_finality;\n        *marked_complete |= other.marked_complete;\n        *all_actions_done |= other.all_actions_done;\n\n        Ok(self)\n    }\n\n    pub(crate) fn verify_complete(&self) -> bool {\n        self.stored\n            && self.sent_to_transaction_buffer\n            && self.updated_validator_matrix\n            && self.gossiped\n            && self.executed\n            && self.tried_to_sign\n            && self.consensus_notified\n            && self.accumulator_notified\n            && self.synchronizer_notified\n            && self.validator_notified\n            && self.sufficient_finality\n            && self.marked_complete\n    }\n\n    #[cfg(test)]\n    pub(crate) fn set_sufficient_finality(&mut self, has_sufficient_finality: bool) {\n        self.sufficient_finality = has_sufficient_finality;\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn should_merge() {\n        let all_true = State {\n            stored: true,\n            sent_to_transaction_buffer: true,\n            updated_validator_matrix: true,\n            gossiped: true,\n            executed: true,\n            tried_to_sign: true,\n            consensus_notified: true,\n            accumulator_notified: true,\n            synchronizer_notified: true,\n            validator_notified: true,\n            sufficient_finality: true,\n            marked_complete: true,\n            all_actions_done: true,\n        };\n        let all_false = State::default();\n\n        assert_eq!(all_true.merge(all_false).unwrap(), all_true);\n        assert_eq!(all_false.merge(all_true).unwrap(), all_true);\n        assert_eq!(all_true.merge(all_true).unwrap(), all_true);\n        assert_eq!(all_false.merge(all_false).unwrap(), all_false);\n    }\n}\n"
  },
  {
    "path": "node/src/types/block/meta_block.rs",
    "content": "mod merge_mismatch_error;\nmod state;\n\nuse std::{convert::TryFrom, sync::Arc};\n\nuse crate::types::TransactionHeader;\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse casper_types::{\n    execution::ExecutionResult, ActivationPoint, Block, BlockHash, BlockV2, EraId, TransactionHash,\n};\n\npub(crate) use merge_mismatch_error::MergeMismatchError;\npub(crate) use state::State;\n\nuse crate::contract_runtime::ExecutionArtifact;\n\n/// A block along with its execution results and state recording which actions have been taken\n/// related to the block.\n///\n/// Some or all of these actions should be taken after a block is formed on a node via:\n/// * execution (ContractRuntime executing a FinalizedBlock)\n/// * accumulation (BlockAccumulator receiving a gossiped block and its finality signatures)\n/// * historical sync (BlockSynchronizer fetching all data relating to a block)\n#[derive(Clone, Eq, PartialEq, Serialize, Debug, DataSize)]\npub(crate) enum MetaBlock {\n    Forward(ForwardMetaBlock),\n    Historical(HistoricalMetaBlock),\n}\n\nimpl MetaBlock {\n    pub(crate) fn new_forward(\n        block: Arc<BlockV2>,\n        execution_results: Vec<ExecutionArtifact>,\n        state: State,\n    ) -> Self {\n        Self::Forward(ForwardMetaBlock {\n            block,\n            execution_results,\n            state,\n        })\n    }\n\n    pub(crate) fn new_historical(\n        block: Arc<Block>,\n        execution_results: Vec<(TransactionHash, TransactionHeader, ExecutionResult)>,\n        state: State,\n    ) -> Self {\n        Self::Historical(HistoricalMetaBlock {\n            block,\n            execution_results,\n            state,\n        })\n    }\n\n    pub(crate) fn height(&self) -> u64 {\n        match &self {\n            MetaBlock::Forward(meta_block) => meta_block.block.height(),\n            MetaBlock::Historical(meta_block) => meta_block.block.height(),\n        }\n    }\n\n    pub(crate) fn era_id(&self) -> EraId {\n        match &self {\n            MetaBlock::Forward(meta_block) => meta_block.block.era_id(),\n            MetaBlock::Historical(meta_block) => meta_block.block.era_id(),\n        }\n    }\n\n    pub(crate) fn is_switch_block(&self) -> bool {\n        match &self {\n            MetaBlock::Forward(meta_block) => meta_block.block.is_switch_block(),\n            MetaBlock::Historical(meta_block) => meta_block.block.is_switch_block(),\n        }\n    }\n\n    pub(crate) fn hash(&self) -> BlockHash {\n        match &self {\n            MetaBlock::Forward(meta_block) => *meta_block.block.hash(),\n            MetaBlock::Historical(meta_block) => *meta_block.block.hash(),\n        }\n    }\n\n    pub(crate) fn mut_state(&mut self) -> &mut State {\n        match self {\n            MetaBlock::Forward(meta_block) => &mut meta_block.state,\n            MetaBlock::Historical(meta_block) => &mut meta_block.state,\n        }\n    }\n\n    pub(crate) fn state(&self) -> &State {\n        match &self {\n            MetaBlock::Forward(meta_block) => &meta_block.state,\n            MetaBlock::Historical(meta_block) => &meta_block.state,\n        }\n    }\n}\n\n#[derive(Clone, Eq, PartialEq, Serialize, Debug, DataSize)]\npub(crate) struct ForwardMetaBlock {\n    pub(crate) block: Arc<BlockV2>,\n    pub(crate) execution_results: Vec<ExecutionArtifact>,\n    pub(crate) state: State,\n}\n\n#[derive(Clone, Eq, PartialEq, Serialize, Debug, DataSize)]\npub(crate) struct HistoricalMetaBlock {\n    pub(crate) block: Arc<Block>,\n    pub(crate) execution_results: Vec<(TransactionHash, TransactionHeader, ExecutionResult)>,\n    pub(crate) state: State,\n}\n\nimpl ForwardMetaBlock {\n    pub(crate) fn merge(mut self, other: ForwardMetaBlock) -> Result<Self, MergeMismatchError> {\n        if self.block != other.block {\n            return Err(MergeMismatchError::Block);\n        }\n\n        if self.execution_results.is_empty() {\n            if !other.execution_results.is_empty() {\n                self.execution_results = other.execution_results;\n            }\n        } else if !other.execution_results.is_empty()\n            && self.execution_results != other.execution_results\n        {\n            return Err(MergeMismatchError::ExecutionResults);\n        }\n\n        self.state = self.state.merge(other.state)?;\n\n        Ok(self)\n    }\n\n    /// Is this a switch block?\n    pub(crate) fn is_switch_block(&self) -> bool {\n        self.block.is_switch_block()\n    }\n\n    /// Is this the last block before a protocol version upgrade?\n    pub(crate) fn is_upgrade_boundary(&self, activation_point: ActivationPoint) -> bool {\n        match activation_point {\n            ActivationPoint::EraId(era_id) => {\n                self.is_switch_block() && self.block.era_id().successor() == era_id\n            }\n            ActivationPoint::Genesis(_) => false,\n        }\n    }\n}\n\nimpl TryFrom<MetaBlock> for ForwardMetaBlock {\n    type Error = String;\n\n    fn try_from(value: MetaBlock) -> Result<Self, Self::Error> {\n        match value {\n            MetaBlock::Forward(meta_block) => Ok(meta_block),\n            MetaBlock::Historical(_) => {\n                Err(\"Could not convert Historical Meta Block to Forward Meta Block\".to_string())\n            }\n        }\n    }\n}\n\nimpl From<ForwardMetaBlock> for MetaBlock {\n    fn from(value: ForwardMetaBlock) -> Self {\n        Self::Forward(value)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::convert::TryInto;\n\n    use casper_types::{\n        execution::ExecutionResultV2, testing::TestRng, TestBlockBuilder, TransactionV1,\n    };\n\n    use super::*;\n\n    #[test]\n    fn should_merge_when_same_non_empty_execution_results() {\n        let rng = &mut TestRng::new();\n\n        let block = Arc::new(TestBlockBuilder::new().build(rng));\n        let txn = TransactionV1::random(rng);\n        let execution_results = vec![ExecutionArtifact::new(\n            TransactionHash::V1(*txn.hash()),\n            (&txn).into(),\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n            Vec::new(),\n        )];\n        let state = State::new_already_stored();\n\n        let meta_block1: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), execution_results.clone(), state)\n                .try_into()\n                .unwrap();\n        let meta_block2: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), execution_results.clone(), state)\n                .try_into()\n                .unwrap();\n\n        let merged = meta_block1.clone().merge(meta_block2.clone()).unwrap();\n\n        assert_eq!(merged.block, block);\n        assert_eq!(merged.execution_results, execution_results);\n        assert_eq!(merged.state, State::new_already_stored());\n        assert_eq!(meta_block2.merge(meta_block1).unwrap(), merged)\n    }\n\n    #[test]\n    fn should_merge_when_both_empty_execution_results() {\n        let rng = &mut TestRng::new();\n\n        let block = Arc::new(TestBlockBuilder::new().build(rng));\n        let state = State::new();\n\n        let meta_block1: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), vec![], state)\n                .try_into()\n                .unwrap();\n        let meta_block2: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), vec![], state)\n                .try_into()\n                .unwrap();\n\n        let merged = meta_block1.clone().merge(meta_block2.clone()).unwrap();\n\n        assert_eq!(merged.block, block);\n        assert!(merged.execution_results.is_empty());\n        assert_eq!(merged.state, state);\n        assert_eq!(meta_block2.merge(meta_block1).unwrap(), merged)\n    }\n\n    #[test]\n    fn should_merge_when_one_empty_execution_results() {\n        let rng = &mut TestRng::new();\n\n        let block = Arc::new(TestBlockBuilder::new().build(rng));\n        let txn = TransactionV1::random(rng);\n        let execution_results = vec![ExecutionArtifact::new(\n            TransactionHash::V1(*txn.hash()),\n            (&txn).into(),\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n            Vec::new(),\n        )];\n        let state = State::new_not_to_be_gossiped();\n\n        let meta_block1: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), execution_results.clone(), state)\n                .try_into()\n                .unwrap();\n        let meta_block2: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), vec![], state)\n                .try_into()\n                .unwrap();\n\n        let merged = meta_block1.clone().merge(meta_block2.clone()).unwrap();\n\n        assert_eq!(merged.block, block);\n        assert_eq!(merged.execution_results, execution_results);\n        assert_eq!(merged.state, state);\n        assert_eq!(meta_block2.merge(meta_block1).unwrap(), merged)\n    }\n\n    #[test]\n    fn should_fail_to_merge_different_blocks() {\n        let rng = &mut TestRng::new();\n\n        let block1 = Arc::new(TestBlockBuilder::new().build(rng));\n        let block2 = Arc::new(\n            TestBlockBuilder::new()\n                .era(block1.era_id().successor())\n                .height(block1.height() + 1)\n                .switch_block(true)\n                .build(rng),\n        );\n        let txn = TransactionV1::random(rng);\n        let execution_results = vec![ExecutionArtifact::new(\n            TransactionHash::V1(*txn.hash()),\n            (&txn).into(),\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n            Vec::new(),\n        )];\n        let state = State::new();\n\n        let meta_block1: ForwardMetaBlock =\n            MetaBlock::new_forward(block1, execution_results.clone(), state)\n                .try_into()\n                .unwrap();\n        let meta_block2: ForwardMetaBlock =\n            MetaBlock::new_forward(block2, execution_results, state)\n                .try_into()\n                .unwrap();\n\n        assert!(matches!(\n            meta_block1.clone().merge(meta_block2.clone()),\n            Err(MergeMismatchError::Block)\n        ));\n        assert!(matches!(\n            meta_block2.merge(meta_block1),\n            Err(MergeMismatchError::Block)\n        ));\n    }\n\n    #[test]\n    fn should_fail_to_merge_different_execution_results() {\n        let rng = &mut TestRng::new();\n\n        let block = Arc::new(TestBlockBuilder::new().build(rng));\n        let txn1 = TransactionV1::random(rng);\n        let execution_results1 = vec![ExecutionArtifact::new(\n            TransactionHash::V1(*txn1.hash()),\n            (&txn1).into(),\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n            Vec::new(),\n        )];\n        let txn2 = TransactionV1::random(rng);\n        let execution_results2 = vec![ExecutionArtifact::new(\n            TransactionHash::V1(*txn2.hash()),\n            (&txn2).into(),\n            ExecutionResult::from(ExecutionResultV2::random(rng)),\n            Vec::new(),\n        )];\n        let state = State::new();\n\n        let meta_block1: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), execution_results1, state)\n                .try_into()\n                .unwrap();\n        let meta_block2: ForwardMetaBlock =\n            MetaBlock::new_forward(Arc::clone(&block), execution_results2, state)\n                .try_into()\n                .unwrap();\n\n        assert!(matches!(\n            meta_block1.clone().merge(meta_block2.clone()),\n            Err(MergeMismatchError::ExecutionResults)\n        ));\n        assert!(matches!(\n            meta_block2.merge(meta_block1),\n            Err(MergeMismatchError::ExecutionResults)\n        ));\n    }\n}\n"
  },
  {
    "path": "node/src/types/block.rs",
    "content": "mod approvals_hashes;\nmod block_execution_results_or_chunk;\nmod block_execution_results_or_chunk_id;\nmod block_payload;\nmod block_with_metadata;\nmod executable_block;\nmod finalized_block;\nmod invalid_proposal_error;\nmod meta_block;\n\nuse casper_types::{\n    bytesrepr::{self, ToBytes},\n    Digest, FinalitySignature, SingleBlockRewardedSignatures, TransactionId,\n};\n\npub use block_execution_results_or_chunk::BlockExecutionResultsOrChunk;\npub(crate) use block_execution_results_or_chunk_id::BlockExecutionResultsOrChunkId;\npub use block_payload::BlockPayload;\npub(crate) use block_with_metadata::BlockWithMetadata;\npub use executable_block::ExecutableBlock;\npub use finalized_block::{FinalizedBlock, InternalEraReport};\npub(crate) use invalid_proposal_error::InvalidProposalError;\npub(crate) use meta_block::{\n    ForwardMetaBlock, MergeMismatchError as MetaBlockMergeError, MetaBlock, State as MetaBlockState,\n};\n\n#[cfg_attr(doc, aquamarine::aquamarine)]\n/// ```mermaid\n/// flowchart TD\n///     style Start fill:#66ccff,stroke:#333,stroke-width:4px\n///     style End fill:#66ccff,stroke:#333,stroke-width:4px\n///     style A fill:#ffcc66,stroke:#333,stroke-width:4px\n///     style B fill:#ffcc66,stroke:#333,stroke-width:4px\n///     style Q fill:#ADD8E6,stroke:#333,stroke-width:4px\n///     style S fill:#ADD8E6,stroke:#333,stroke-width:4px\n///     title[FinalitySignature lifecycle]\n///     title---Start\n///     style title fill:#FFF,stroke:#FFF\n///     linkStyle 0 stroke-width:0;\n///     Start --> A[\"Validators\"]\n///     Start --> B[\"Non-validators\"]\n///     A --> C[\"Validator creates FS\"]\n///     A --> D[\"Received</br>broadcasted FS\"]\n///     A --> E[\"Received</br>gossiped FS\"]\n///     D --> I\n///     E --> I\n///     H --> End\n///     C --> G[\"Put FS to storage\"]\n///     G --> H[\"Broadcast FS to Validators\"]\n///     G --> I[\"Register FS</br>in BlockAccumulator\"]\n///     I --> J{\"Has sufficient</br>finality</br>and block?\"}\n///     J --> |Yes| K[\"Put all FS</br>to storage\"]\n///     J --> |No| L[\"Keep waiting</br>for more</br>signatures\"]\n///     B --> F[\"Keeping up with</br>the network\"]\n///     F --> M[\"Received</br>gossiped FS\"]\n///     M --> N[\"Register FS</br>in BlockAccumulator\"]\n///     N --> O{\"Has sufficient</br>finality</br>and block?\"}\n///     O --> |No| L\n///     O --> |Yes| P[\"Put all FS</br>to storage\"]\n///     P --> Q[\"Initiate <b>forward</b></br>sync process</br><i>(click)</i>\"]\n///     Q --> R[\"If forward or historical sync</br>process fetched and</br>stored additional FS</br>register them in</br>BlockAccumulator\"]\n///     B --> S[\"Initiate <b>historical</b></br>sync process</br><i>(click)</i>\"]\n///     S --> R\n///     click Q \"../components/block_synchronizer/block_acquisition/enum.BlockAcquisitionState.html\"\n///     click S \"../components/block_synchronizer/block_acquisition/enum.BlockAcquisitionState.html\"\n///     R --> End\n///     K --> End\n/// ```\n#[allow(dead_code)]\ntype ValidatorFinalitySignature = FinalitySignature;\n\n/// Returns the hash of the bytesrepr-encoded deploy_ids.\npub(crate) fn compute_approvals_checksum(\n    txn_ids: Vec<TransactionId>,\n) -> Result<Digest, bytesrepr::Error> {\n    let bytes = txn_ids.into_bytes()?;\n    Ok(Digest::hash(bytes))\n}\n\n/// Creates a new recorded finality signatures, from a validator matrix, and a block\n/// with metadata.\npub(crate) fn create_single_block_rewarded_signatures(\n    validator_matrix: &super::ValidatorMatrix,\n    past_block_with_metadata: &BlockWithMetadata,\n) -> Option<SingleBlockRewardedSignatures> {\n    validator_matrix\n        .validator_weights(past_block_with_metadata.block.era_id())\n        .map(|weights| {\n            SingleBlockRewardedSignatures::from_validator_set(\n                &past_block_with_metadata\n                    .block_signatures\n                    .signers()\n                    .cloned()\n                    .collect(),\n                weights.validator_public_keys(),\n            )\n        })\n}\n"
  },
  {
    "path": "node/src/types/chunkable.rs",
    "content": "use std::{borrow::Cow, convert::Infallible};\n\nuse casper_types::{\n    bytesrepr::{self, Bytes, ToBytes},\n    execution::{ExecutionResult, ExecutionResultV1, ExecutionResultV2},\n    Digest,\n};\n\nuse super::value_or_chunk::HashingTrieRaw;\n\n/// Implemented for types that are chunked when sending over the wire and/or before storing the\n/// trie store.\npub trait Chunkable {\n    /// Error returned when mapping `Self` into bytes.\n    type Error: std::fmt::Debug;\n    /// Maps `Self` into bytes.\n    ///\n    /// Returns a [`Cow`] instance in case the resulting bytes are the same as input and we don't\n    /// want to reinitialize. This also helps with a case where returning a vector of bytes\n    /// would require instantiating a `Vec<u8>` locally (see [`casper_types::bytesrepr::ToBytes`])\n    /// but can't be returned as reference. Alternative encoding would be to consume `Self` and\n    /// return `Vec<u8>` but that may do it unnecessarily if `Self` would be to used again.\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error>;\n\n    /// Serializes the `self` using the [`Chunkable`] implementation for that type\n    /// and returns a [`Digest`] of the serialized bytes.\n    fn hash(&self) -> Result<Digest, Self::Error> {\n        let bytes = self.as_bytes()?;\n        Ok(Digest::hash_into_chunks_if_necessary(&bytes))\n    }\n}\n\nimpl Chunkable for Vec<u8> {\n    type Error = Infallible;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Borrowed(self))\n    }\n}\n\nimpl Chunkable for Bytes {\n    type Error = Infallible;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Borrowed(self.inner_bytes()))\n    }\n}\n\nimpl Chunkable for HashingTrieRaw {\n    type Error = Infallible;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Borrowed(self.inner().inner().inner_bytes()))\n    }\n}\n\nimpl Chunkable for &Vec<ExecutionResult> {\n    type Error = bytesrepr::Error;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Owned((*self).to_bytes()?))\n    }\n}\n\nimpl Chunkable for Vec<ExecutionResult> {\n    type Error = bytesrepr::Error;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Owned(self.to_bytes()?))\n    }\n}\n\nimpl Chunkable for Vec<&ExecutionResultV1> {\n    type Error = bytesrepr::Error;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Owned(self.to_bytes()?))\n    }\n}\n\nimpl Chunkable for Vec<&ExecutionResultV2> {\n    type Error = bytesrepr::Error;\n\n    fn as_bytes(&self) -> Result<Cow<Vec<u8>>, Self::Error> {\n        Ok(Cow::Owned(self.to_bytes()?))\n    }\n}\n"
  },
  {
    "path": "node/src/types/exit_code.rs",
    "content": "use datasize::DataSize;\nuse signal_hook::consts::signal::{SIGINT, SIGQUIT, SIGTERM};\n\n/// The offset Rust uses by default when generating an exit code after being interrupted by a\n/// termination signal.\nconst SIGNAL_OFFSET: u8 = 128;\n\n/// Exit codes which should be used by the casper-node binary, and provided by the reactor to the\n/// binary.\n///\n/// Note that a panic will result in the Rust process producing an exit code of 101.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, DataSize)]\n#[repr(u8)]\n#[non_exhaustive]\npub enum ExitCode {\n    /// The process should exit with success.  The launcher should proceed to run the next\n    /// installed version of `casper-node`.\n    Success = 0,\n    /// The process should exit with `101`, equivalent to panicking.  The launcher should not\n    /// restart the node.\n    Abort = 101,\n    /// The process should exit with `102`.  It used to be an indication to the launcher\n    /// that it should proceed to run the previous installed version of `casper-node`.\n    /// It is no longer used, but we keep it here to avoid it being reassigned to other features.\n    #[doc(hidden)]\n    DowngradeVersion = 102,\n    /// The process should exit with `103`.  The user requested a node shut down without restart.\n    CleanExitDontRestart = 103,\n    /// The exit code Rust uses by default when interrupted via an `INT` signal.\n    SigInt = SIGNAL_OFFSET + SIGINT as u8,\n    /// The exit code Rust uses by default when interrupted via a `QUIT` signal.\n    SigQuit = SIGNAL_OFFSET + SIGQUIT as u8,\n    /// The exit code Rust uses by default when interrupted via a `TERM` signal.\n    SigTerm = SIGNAL_OFFSET + SIGTERM as u8,\n}\n"
  },
  {
    "path": "node/src/types/max_ttl.rs",
    "content": "use datasize::DataSize;\n\nuse casper_types::{BlockHeader, TimeDiff, Timestamp};\n\n#[derive(DataSize, Debug)]\npub struct MaxTtl(TimeDiff);\n\nimpl MaxTtl {\n    /// Create instance.\n    pub fn new(max_ttl: TimeDiff) -> Self {\n        MaxTtl(max_ttl)\n    }\n\n    /// Get inner value.\n    pub fn value(&self) -> TimeDiff {\n        self.0\n    }\n\n    /// If rearview is earlier than (vantage - ttl duration), ttl has elapsed.\n    pub fn ttl_elapsed(&self, vantage: Timestamp, rearview: Timestamp) -> bool {\n        rearview < vantage.saturating_sub(self.0)\n    }\n\n    /// Determine if orphaned block header is older than ttl requires.\n    pub fn synced_to_ttl(\n        &self,\n        latest_switch_block_timestamp: Timestamp,\n        highest_orphaned_block_header: &BlockHeader,\n    ) -> bool {\n        if highest_orphaned_block_header.is_genesis() {\n            true\n        } else {\n            self.ttl_elapsed(\n                latest_switch_block_timestamp,\n                highest_orphaned_block_header.timestamp(),\n            )\n        }\n    }\n}\n\n/// Wrap a TimeDiff as a MaxTtl.\nimpl From<TimeDiff> for MaxTtl {\n    fn from(value: TimeDiff) -> Self {\n        MaxTtl::new(value)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::{testing::TestRng, TestBlockBuilder, TimeDiff, Timestamp};\n\n    use crate::types::MaxTtl;\n\n    const SUB_MAX_TTL: TimeDiff = TimeDiff::from_millis(1);\n    const MAX_TTL: TimeDiff = TimeDiff::from_millis(2);\n\n    fn assert_ttl(\n        higher: Timestamp,\n        lower: Timestamp,\n        max_ttl: TimeDiff,\n        elapsed_expected: bool,\n        msg: &str,\n    ) {\n        let max_ttl: MaxTtl = max_ttl.into();\n        let elapsed = max_ttl.ttl_elapsed(higher, lower);\n        assert_eq!(elapsed, elapsed_expected, \"{}\", msg);\n    }\n\n    #[test]\n    fn should_elapse() {\n        let higher = Timestamp::now();\n        let lower = higher\n            .saturating_sub(MAX_TTL)\n            .saturating_sub(TimeDiff::from_millis(1));\n        assert_ttl(\n            higher,\n            lower,\n            MAX_TTL,\n            true,\n            \"1 milli over ttl should have elapsed\",\n        );\n    }\n\n    #[test]\n    fn should_not_elapse() {\n        let higher = Timestamp::now();\n        let lower = higher.saturating_sub(SUB_MAX_TTL);\n        assert_ttl(higher, lower, MAX_TTL, false, \"should not have elapsed\");\n    }\n\n    #[test]\n    fn should_not_elapse_with_equal_timestamps() {\n        let timestamp = Timestamp::now();\n        assert_ttl(\n            timestamp,\n            timestamp,\n            MAX_TTL,\n            false,\n            \"equal timestamps should not be elapsed\",\n        );\n    }\n\n    #[test]\n    fn should_not_elapse_on_cusp() {\n        let higher = Timestamp::now();\n        let lower = higher.saturating_sub(MAX_TTL);\n        assert_ttl(\n            higher,\n            lower,\n            MAX_TTL,\n            false,\n            \"should not have elapsed exactly on cusp of ttl\",\n        );\n    }\n\n    #[test]\n    fn should_not_err() {\n        let higher = Timestamp::now();\n        let lower = higher.saturating_sub(SUB_MAX_TTL);\n        let max_ttl: MaxTtl = MAX_TTL.into();\n        let elapsed = max_ttl.ttl_elapsed(lower, higher);\n        assert!(\n            !elapsed,\n            \"can't have elapsed because timestamps are chronologically reversed (programmer error)\"\n        );\n    }\n\n    fn assert_sync_to_ttl(is_genesis: bool, ttl_synced_expected: bool, msg: &str) {\n        let max_ttl: MaxTtl = MAX_TTL.into();\n        let rng = &mut TestRng::new();\n        let (latest_switch_block_timestamp, highest_orphaned_block_header) = if is_genesis {\n            let block = TestBlockBuilder::new()\n                .era(0)\n                .height(0)\n                .switch_block(true)\n                .build(rng);\n            // it does not matter what this value is; if genesis has been reached\n            // while walking backwards, there are no earlier blocks to get\n            // thus all sync scenarios have succeeded / are satisfied\n            let timestamp = Timestamp::random(rng);\n            (timestamp, block.header().clone())\n        } else {\n            let block = TestBlockBuilder::new()\n                .era(1)\n                .height(1)\n                .switch_block(false)\n                .build(rng);\n            // project a sufficiently advanced future timestamp for the test.\n            let mut timestamp = block.timestamp().saturating_add(max_ttl.value());\n            if ttl_synced_expected {\n                timestamp = timestamp.saturating_add(TimeDiff::from_millis(1))\n            }\n            (timestamp, block.header().clone())\n        };\n        let synced = max_ttl.synced_to_ttl(\n            latest_switch_block_timestamp,\n            &highest_orphaned_block_header.into(),\n        );\n        assert_eq!(synced, ttl_synced_expected, \"{}\", msg);\n    }\n\n    #[test]\n    fn should_handle_genesis_special_case() {\n        assert_sync_to_ttl(\n            true,\n            true,\n            \"genesis should always satisfy sync to ttl requirement\",\n        );\n    }\n\n    #[test]\n    fn should_be_synced_to_ttl() {\n        assert_sync_to_ttl(false, true, \"should be sync'd to ttl\");\n    }\n\n    #[test]\n    fn should_not_be_synced_to_ttl() {\n        assert_sync_to_ttl(false, false, \"should not be sync'd to ttl\");\n    }\n}\n"
  },
  {
    "path": "node/src/types/node_config.rs",
    "content": "use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{BlockHash, TimeDiff};\n\nconst DEFAULT_IDLE_TOLERANCE: &str = \"20min\";\nconst DEFAULT_MAX_ATTEMPTS: usize = 3;\nconst DEFAULT_CONTROL_LOGIC_DEFAULT_DELAY: &str = \"1sec\";\nconst DEFAULT_SHUTDOWN_FOR_UPGRADE_TIMEOUT: &str = \"2min\";\nconst DEFAULT_UPGRADE_TIMEOUT: &str = \"30sec\";\n\n/// Node sync configuration.\n#[derive(DataSize, Debug, Deserialize, Serialize, Clone, Default, Eq, PartialEq)]\n#[serde(rename_all = \"lowercase\")]\npub enum SyncHandling {\n    /// Attempt to acquire all historical state back to genesis.\n    Genesis,\n    /// Only attempt to acquire necessary blocks to satisfy Time to Live requirements.\n    #[default]\n    Ttl,\n    /// Don't attempt to sync historical blocks.\n    NoSync,\n    /// Don't attempt to sync historical blocks and shut down node instead of switching to KeepUp\n    /// after acquiring the first complete block\n    CompleteBlock,\n    /// The node operates in isolation - no peers are needed, the node won't wait for peers to\n    /// switch to KeepUp.\n    Isolated,\n}\n\nimpl SyncHandling {\n    /// Sync to Genesis?\n    pub fn is_sync_to_genesis(&self) -> bool {\n        matches!(self, SyncHandling::Genesis)\n    }\n\n    /// Sync to Ttl?\n    pub fn is_sync_to_ttl(&self) -> bool {\n        matches!(self, SyncHandling::Ttl)\n    }\n\n    /// Don't Sync?\n    pub fn is_no_sync(&self) -> bool {\n        matches!(self, SyncHandling::NoSync)\n    }\n\n    /// Don't Sync and shut down?\n    pub fn is_complete_block(&self) -> bool {\n        matches!(self, SyncHandling::CompleteBlock)\n    }\n\n    /// Isolated?\n    pub fn is_isolated(&self) -> bool {\n        matches!(self, SyncHandling::Isolated)\n    }\n}\n\n/// Node fast-sync configuration.\n#[derive(DataSize, Debug, Deserialize, Serialize, Clone)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct NodeConfig {\n    /// Hash used as a trust anchor when joining, if any.\n    pub trusted_hash: Option<BlockHash>,\n\n    /// Which historical sync option?\n    ///  Genesis: sync all the way back to genesis\n    ///  Ttl: sync the necessary number of historical blocks to satisfy TTL requirement.\n    ///  NoSync: don't attempt to get any historical records; i.e. go forward only.\n    pub sync_handling: SyncHandling,\n\n    /// Idle time after which the syncing process is considered stalled.\n    pub idle_tolerance: TimeDiff,\n\n    /// When the syncing process is considered stalled, it'll be retried up to `max_attempts`\n    /// times.\n    pub max_attempts: usize,\n\n    /// Default delay for the control events that have no dedicated delay requirements.\n    pub control_logic_default_delay: TimeDiff,\n\n    /// Flag which forces the node to resync all of the blocks.\n    pub force_resync: bool,\n\n    /// Shutdown for upgrade state timeout, after which the node will upgrade regardless whether\n    /// all the conditions are satisfied.\n    pub shutdown_for_upgrade_timeout: TimeDiff,\n\n    /// Maximum time a node will wait for an upgrade to commit.\n    pub upgrade_timeout: TimeDiff,\n\n    /// If true, prevents a node from shutting down if it is supposed to be a validator in the era.\n    pub prevent_validator_shutdown: bool,\n}\n\nimpl Default for NodeConfig {\n    fn default() -> NodeConfig {\n        NodeConfig {\n            trusted_hash: None,\n            sync_handling: SyncHandling::default(),\n            idle_tolerance: DEFAULT_IDLE_TOLERANCE.parse().unwrap(),\n            max_attempts: DEFAULT_MAX_ATTEMPTS,\n            control_logic_default_delay: DEFAULT_CONTROL_LOGIC_DEFAULT_DELAY.parse().unwrap(),\n            force_resync: false,\n            shutdown_for_upgrade_timeout: DEFAULT_SHUTDOWN_FOR_UPGRADE_TIMEOUT.parse().unwrap(),\n            upgrade_timeout: DEFAULT_UPGRADE_TIMEOUT.parse().unwrap(),\n            prevent_validator_shutdown: false,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/node_id.rs",
    "content": "use std::fmt::{self, Debug, Display, Formatter};\n\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse once_cell::sync::Lazy;\n#[cfg(test)]\nuse rand::Rng;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n\nuse crate::{components::rest_server::DocExample, tls::KeyFingerprint};\n\n/// The network identifier for a node.\n///\n/// A node's ID is derived from the fingerprint of its TLS certificate.\n#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, DataSize)]\npub struct NodeId(KeyFingerprint);\n\nimpl NodeId {\n    /// Generates a random instance using a `TestRng`.\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self(rng.gen())\n    }\n\n    /// Returns the raw bytes of the underlying hash of the ID.\n    #[inline]\n    pub fn hash_bytes(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\n/// Used to serialize and deserialize `NodeID` where the (de)serializer isn't a human-readable type.\n#[derive(Serialize, Deserialize)]\nenum NodeIdAsBytes {\n    Tls(KeyFingerprint),\n}\n\n/// Used to serialize and deserialize `NodeID` where the (de)serializer is a human-readable type.\n#[derive(Serialize, Deserialize)]\nenum NodeIdAsString {\n    Tls(String),\n}\n\nimpl Serialize for NodeId {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            NodeIdAsString::Tls(base16::encode_lower(&self.0)).serialize(serializer)\n        } else {\n            NodeIdAsBytes::Tls(self.0).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for NodeId {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let NodeIdAsString::Tls(hex_value) = NodeIdAsString::deserialize(deserializer)?;\n\n            let bytes = base16::decode(hex_value.as_bytes()).map_err(D::Error::custom)?;\n            if bytes.len() != KeyFingerprint::LENGTH {\n                return Err(SerdeError::custom(\"wrong length\"));\n            }\n            let mut array = [0_u8; KeyFingerprint::LENGTH];\n            array.copy_from_slice(bytes.as_slice());\n\n            Ok(NodeId(KeyFingerprint::from(array)))\n        } else {\n            let NodeIdAsBytes::Tls(key_fingerprint) = NodeIdAsBytes::deserialize(deserializer)?;\n            Ok(NodeId(key_fingerprint))\n        }\n    }\n}\n\nstatic NODE_ID: Lazy<NodeId> =\n    Lazy::new(|| NodeId(KeyFingerprint::from([1u8; KeyFingerprint::LENGTH])));\n\nimpl DocExample for NodeId {\n    fn doc_example() -> &'static Self {\n        &NODE_ID\n    }\n}\n\nimpl Debug for NodeId {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"NodeId({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Display for NodeId {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"tls:{:10}\", HexFmt(&self.0))\n    }\n}\n\nimpl From<KeyFingerprint> for NodeId {\n    fn from(id: KeyFingerprint) -> Self {\n        NodeId(id)\n    }\n}\n\n#[cfg(test)]\nimpl From<[u8; KeyFingerprint::LENGTH]> for NodeId {\n    fn from(raw_bytes: [u8; KeyFingerprint::LENGTH]) -> Self {\n        NodeId(KeyFingerprint::from(raw_bytes))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    const EXAMPLE_HASH_RAW: [u8; 64] = [\n        0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,\n        0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,\n        0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c,\n        0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,\n        0x3c, 0x3d, 0x3e, 0x3f,\n    ];\n\n    #[test]\n    fn serde_roundtrip_tls() {\n        let mut rng = crate::new_rng();\n        let node_id = NodeId::random(&mut rng);\n        let serialized = bincode::serialize(&node_id).unwrap();\n        let decoded = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(node_id, decoded);\n    }\n\n    #[test]\n    fn bincode_known_specimen() {\n        let node_id = NodeId::from(EXAMPLE_HASH_RAW);\n        let serialized = bincode::serialize(&node_id).unwrap();\n\n        // The bincode representation is a 4 byte tag of all zeros, followed by the hash bytes.\n        let expected: [u8; 68] = [\n            0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,\n            0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,\n            0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,\n            0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,\n            0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,\n        ];\n\n        assert_eq!(&expected[..], serialized.as_slice());\n    }\n\n    #[test]\n    fn json_known_specimen() {\n        let node_id = NodeId::from(EXAMPLE_HASH_RAW);\n        let json_string = serde_json::to_string_pretty(&node_id).unwrap();\n\n        let expected = \"{\\n  \\\"Tls\\\": \\\"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f\\\"\\n}\";\n        assert_eq!(expected, json_string.as_str());\n    }\n\n    #[test]\n    fn msgpack_default_settings_known_specimen() {\n        let node_id = NodeId::from(EXAMPLE_HASH_RAW);\n\n        let serialized = rmp_serde::to_vec(&node_id).unwrap();\n\n        let expected: [u8; 132] = [\n            129, 0, 217, 128, 48, 48, 48, 49, 48, 50, 48, 51, 48, 52, 48, 53, 48, 54, 48, 55, 48,\n            56, 48, 57, 48, 97, 48, 98, 48, 99, 48, 100, 48, 101, 48, 102, 49, 48, 49, 49, 49, 50,\n            49, 51, 49, 52, 49, 53, 49, 54, 49, 55, 49, 56, 49, 57, 49, 97, 49, 98, 49, 99, 49,\n            100, 49, 101, 49, 102, 50, 48, 50, 49, 50, 50, 50, 51, 50, 52, 50, 53, 50, 54, 50, 55,\n            50, 56, 50, 57, 50, 97, 50, 98, 50, 99, 50, 100, 50, 101, 50, 102, 51, 48, 51, 49, 51,\n            50, 51, 51, 51, 52, 51, 53, 51, 54, 51, 55, 51, 56, 51, 57, 51, 97, 51, 98, 51, 99, 51,\n            100, 51, 101, 51, 102,\n        ];\n\n        assert_eq!(serialized, expected);\n    }\n\n    #[test]\n    fn json_roundtrip_tls() {\n        let mut rng = crate::new_rng();\n        let node_id = NodeId::random(&mut rng);\n        let json_string = serde_json::to_string_pretty(&node_id).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(node_id, decoded);\n    }\n}\n"
  },
  {
    "path": "node/src/types/status_feed.rs",
    "content": "use std::{\n    collections::BTreeMap,\n    net::{IpAddr, Ipv4Addr, SocketAddr},\n    time::Duration,\n};\n\nuse once_cell::sync::Lazy;\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse casper_binary_port::ConsensusStatus;\nuse casper_types::{\n    ActivationPoint, AvailableBlockRange, Block, BlockHash, BlockSynchronizerStatus, Digest, EraId,\n    NextUpgrade, Peers, ProtocolVersion, PublicKey, TimeDiff, Timestamp,\n};\n\nuse crate::{\n    components::rest_server::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION},\n    reactor::main_reactor::ReactorState,\n    types::NodeId,\n};\n\nstatic CHAINSPEC_INFO: Lazy<ChainspecInfo> = Lazy::new(|| {\n    let next_upgrade = NextUpgrade::new(\n        ActivationPoint::EraId(EraId::from(42)),\n        ProtocolVersion::from_parts(2, 0, 1),\n    );\n    ChainspecInfo {\n        name: String::from(\"casper-example\"),\n        next_upgrade: Some(next_upgrade),\n    }\n});\n\nstatic GET_STATUS_RESULT: Lazy<GetStatusResult> = Lazy::new(|| {\n    let node_id = NodeId::doc_example();\n    let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 54321);\n    let mut peers = BTreeMap::new();\n    peers.insert(*node_id, socket_addr.to_string());\n    let status_feed = StatusFeed {\n        last_added_block: Some(Block::example().clone()),\n        peers,\n        chainspec_info: ChainspecInfo::doc_example().clone(),\n        our_public_signing_key: Some(PublicKey::example().clone()),\n        round_length: Some(TimeDiff::from_millis(1 << 16)),\n        version: crate::VERSION_STRING.as_str(),\n        node_uptime: Duration::from_secs(13),\n        reactor_state: ReactorState::Initialize,\n        last_progress: Timestamp::from(0),\n        available_block_range: AvailableBlockRange::RANGE_0_0,\n        block_sync: BlockSynchronizerStatus::example().clone(),\n        starting_state_root_hash: Digest::default(),\n        latest_switch_block_hash: Some(BlockHash::default()),\n    };\n    GetStatusResult::new(status_feed, DOCS_EXAMPLE_PROTOCOL_VERSION)\n});\n\n/// Summary information from the chainspec.\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct ChainspecInfo {\n    /// Name of the network.\n    name: String,\n    next_upgrade: Option<NextUpgrade>,\n}\n\nimpl DocExample for ChainspecInfo {\n    fn doc_example() -> &'static Self {\n        &CHAINSPEC_INFO\n    }\n}\n\nimpl ChainspecInfo {\n    pub(crate) fn new(chainspec_network_name: String, next_upgrade: Option<NextUpgrade>) -> Self {\n        ChainspecInfo {\n            name: chainspec_network_name,\n            next_upgrade,\n        }\n    }\n}\n\n/// Data feed for client \"info_get_status\" endpoint.\n#[derive(Debug, Serialize)]\npub struct StatusFeed {\n    /// The last block added to the chain.\n    pub last_added_block: Option<Block>,\n    /// The peer nodes which are connected to this node.\n    pub peers: BTreeMap<NodeId, String>,\n    /// The chainspec info for this node.\n    pub chainspec_info: ChainspecInfo,\n    /// Our public signing key.\n    pub our_public_signing_key: Option<PublicKey>,\n    /// The next round length if this node is a validator.\n    pub round_length: Option<TimeDiff>,\n    /// The compiled node version.\n    pub version: &'static str,\n    /// Time that passed since the node has started.\n    pub node_uptime: Duration,\n    /// The current state of node reactor.\n    pub reactor_state: ReactorState,\n    /// Timestamp of the last recorded progress in the reactor.\n    pub last_progress: Timestamp,\n    /// The available block range in storage.\n    pub available_block_range: AvailableBlockRange,\n    /// The status of the block synchronizer builders.\n    pub block_sync: BlockSynchronizerStatus,\n    /// The state root hash of the lowest block in the available block range.\n    pub starting_state_root_hash: Digest,\n    /// The hash of the latest switch block.\n    pub latest_switch_block_hash: Option<BlockHash>,\n}\n\nimpl StatusFeed {\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn new(\n        last_added_block: Option<Block>,\n        peers: BTreeMap<NodeId, String>,\n        chainspec_info: ChainspecInfo,\n        consensus_status: Option<ConsensusStatus>,\n        node_uptime: Duration,\n        reactor_state: ReactorState,\n        last_progress: Timestamp,\n        available_block_range: AvailableBlockRange,\n        block_sync: BlockSynchronizerStatus,\n        starting_state_root_hash: Digest,\n        latest_switch_block_hash: Option<BlockHash>,\n    ) -> Self {\n        let (our_public_signing_key, round_length) =\n            consensus_status.map_or((None, None), |consensus_status| {\n                (\n                    Some(consensus_status.validator_public_key().clone()),\n                    consensus_status.round_length(),\n                )\n            });\n        StatusFeed {\n            last_added_block,\n            peers,\n            chainspec_info,\n            our_public_signing_key,\n            round_length,\n            version: crate::VERSION_STRING.as_str(),\n            node_uptime,\n            reactor_state,\n            last_progress,\n            available_block_range,\n            block_sync,\n            starting_state_root_hash,\n            latest_switch_block_hash,\n        }\n    }\n}\n\n/// Minimal info of a `Block`.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\n#[serde(deny_unknown_fields)]\npub struct MinimalBlockInfo {\n    hash: BlockHash,\n    timestamp: Timestamp,\n    era_id: EraId,\n    height: u64,\n    state_root_hash: Digest,\n    creator: PublicKey,\n}\n\nimpl From<Block> for MinimalBlockInfo {\n    fn from(block: Block) -> Self {\n        let proposer = match &block {\n            Block::V1(v1) => v1.proposer().clone(),\n            Block::V2(v2) => v2.proposer().clone(),\n        };\n\n        MinimalBlockInfo {\n            hash: *block.hash(),\n            timestamp: block.timestamp(),\n            era_id: block.era_id(),\n            height: block.height(),\n            state_root_hash: *block.state_root_hash(),\n            creator: proposer,\n        }\n    }\n}\n\n/// Result for \"info_get_status\" RPC response.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\n#[serde(deny_unknown_fields)]\npub struct GetStatusResult {\n    /// The node ID and network address of each connected peer.\n    pub peers: Peers,\n    /// The RPC API version.\n    #[schemars(with = \"String\")]\n    pub api_version: ProtocolVersion,\n    /// The compiled node version.\n    pub build_version: String,\n    /// The chainspec name.\n    pub chainspec_name: String,\n    /// The state root hash of the lowest block in the available block range.\n    pub starting_state_root_hash: Digest,\n    /// The minimal info of the last block from the linear chain.\n    pub last_added_block_info: Option<MinimalBlockInfo>,\n    /// Our public signing key.\n    pub our_public_signing_key: Option<PublicKey>,\n    /// The next round length if this node is a validator.\n    pub round_length: Option<TimeDiff>,\n    /// Information about the next scheduled upgrade.\n    pub next_upgrade: Option<NextUpgrade>,\n    /// Time that passed since the node has started.\n    pub uptime: TimeDiff,\n    /// The current state of node reactor.\n    pub reactor_state: ReactorState,\n    /// Timestamp of the last recorded progress in the reactor.\n    pub last_progress: Timestamp,\n    /// The available block range in storage.\n    pub available_block_range: AvailableBlockRange,\n    /// The status of the block synchronizer builders.\n    pub block_sync: BlockSynchronizerStatus,\n    /// The hash of the latest switch block.\n    pub latest_switch_block_hash: Option<BlockHash>,\n}\n\nimpl GetStatusResult {\n    #[allow(deprecated)]\n    pub(crate) fn new(status_feed: StatusFeed, api_version: ProtocolVersion) -> Self {\n        GetStatusResult {\n            peers: Peers::from(status_feed.peers),\n            api_version,\n            chainspec_name: status_feed.chainspec_info.name,\n            starting_state_root_hash: status_feed.starting_state_root_hash,\n            last_added_block_info: status_feed.last_added_block.map(Into::into),\n            our_public_signing_key: status_feed.our_public_signing_key,\n            round_length: status_feed.round_length,\n            next_upgrade: status_feed.chainspec_info.next_upgrade,\n            uptime: status_feed.node_uptime.into(),\n            reactor_state: status_feed.reactor_state,\n            last_progress: status_feed.last_progress,\n            available_block_range: status_feed.available_block_range,\n            block_sync: status_feed.block_sync,\n            latest_switch_block_hash: status_feed.latest_switch_block_hash,\n            #[cfg(not(test))]\n            build_version: crate::VERSION_STRING.clone(),\n\n            //  Prevent these values from changing between test sessions\n            #[cfg(test)]\n            build_version: String::from(\"1.0.0-xxxxxxxxx@DEBUG\"),\n        }\n    }\n}\n\nimpl DocExample for GetStatusResult {\n    fn doc_example() -> &'static Self {\n        &GET_STATUS_RESULT\n    }\n}\n"
  },
  {
    "path": "node/src/types/sync_leap.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap},\n    fmt::{self, Display, Formatter},\n    iter,\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse num_rational::Ratio;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{\n    crypto, BlockHash, BlockHeader, BlockHeaderWithSignatures,\n    BlockHeaderWithSignaturesValidationError, BlockSignatures, Digest, EraId, ProtocolConfig,\n};\n\nuse crate::{\n    components::fetcher::{FetchItem, Tag},\n    types::EraValidatorWeights,\n    utils::{self, BlockSignatureError},\n};\n\nuse super::sync_leap_validation_metadata::SyncLeapValidationMetaData;\n\n#[derive(Error, Debug)]\npub(crate) enum SyncLeapValidationError {\n    #[error(\"No ancestors of the trusted block provided.\")]\n    MissingTrustedAncestors,\n    #[error(\"The SyncLeap does not contain proof that all its headers are on the right chain.\")]\n    IncompleteProof,\n    #[error(transparent)]\n    HeadersNotSufficientlySigned(BlockSignatureError),\n    #[error(\"The block signatures are not cryptographically valid: {0}\")]\n    Crypto(crypto::Error),\n    #[error(transparent)]\n    BlockHeaderWithSignatures(BlockHeaderWithSignaturesValidationError),\n    #[error(\"Too many switch blocks: leaping across that many eras is not allowed.\")]\n    TooManySwitchBlocks,\n    #[error(\"Trusted ancestor headers must be in reverse chronological order.\")]\n    TrustedAncestorsNotSorted,\n    #[error(\"Last trusted ancestor is not a switch block.\")]\n    MissingAncestorSwitchBlock,\n    #[error(\n        \"Only the last trusted ancestor is allowed to be a switch block or the genesis block.\"\n    )]\n    UnexpectedAncestorSwitchBlock,\n    #[error(\"Signed block headers present despite trusted_ancestor_only flag.\")]\n    UnexpectedBlockHeadersWithSignatures,\n}\n\n/// Identifier for a SyncLeap.\n#[derive(Debug, Serialize, Deserialize, Copy, Clone, Hash, PartialEq, Eq, DataSize)]\npub(crate) struct SyncLeapIdentifier {\n    /// The block hash of the initial trusted block.\n    block_hash: BlockHash,\n    /// If true, block_header_with_signaturess are not required.\n    trusted_ancestor_only: bool,\n}\n\nimpl SyncLeapIdentifier {\n    pub(crate) fn sync_to_tip(block_hash: BlockHash) -> Self {\n        SyncLeapIdentifier {\n            block_hash,\n            trusted_ancestor_only: false,\n        }\n    }\n\n    pub(crate) fn sync_to_historical(block_hash: BlockHash) -> Self {\n        SyncLeapIdentifier {\n            block_hash,\n            trusted_ancestor_only: true,\n        }\n    }\n\n    pub(crate) fn block_hash(&self) -> BlockHash {\n        self.block_hash\n    }\n\n    pub(crate) fn trusted_ancestor_only(&self) -> bool {\n        self.trusted_ancestor_only\n    }\n}\n\nimpl Display for SyncLeapIdentifier {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"{} trusted_ancestor_only: {}\",\n            self.block_hash, self.trusted_ancestor_only\n        )\n    }\n}\n\n// Additional data for syncing blocks immediately after upgrades\n#[derive(Debug, Clone, Copy)]\npub(crate) struct GlobalStatesMetadata {\n    // Hash, era ID, global state and protocol version of the block after upgrade\n    pub(crate) after_hash: BlockHash,\n    pub(crate) after_era_id: EraId,\n    pub(crate) after_state_hash: Digest,\n    // Hash, global state and protocol version of the block before upgrade\n    pub(crate) before_hash: BlockHash,\n    pub(crate) before_state_hash: Digest,\n}\n\n/// Headers and signatures required to prove that if a given trusted block hash is on the correct\n/// chain, then so is a later header, which should be the most recent one according to the sender.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, DataSize)]\npub(crate) struct SyncLeap {\n    /// Requester indicates if they want only the header and ancestor headers,\n    /// of if they want everything.\n    pub trusted_ancestor_only: bool,\n    /// The header of the trusted block specified by hash by the requester.\n    pub trusted_block_header: BlockHeader,\n    /// The block headers of the trusted block's ancestors, back to the most recent switch block.\n    pub trusted_ancestor_headers: Vec<BlockHeader>,\n    /// The headers of all switch blocks known to the sender, after the trusted block but before\n    /// their highest block, with signatures, plus the signed highest block.\n    pub block_headers_with_signatures: Vec<BlockHeaderWithSignatures>,\n}\n\nimpl SyncLeap {\n    pub(crate) fn era_validator_weights(\n        &self,\n        fault_tolerance_fraction: Ratio<u64>,\n        protocol_config: &ProtocolConfig,\n    ) -> impl Iterator<Item = EraValidatorWeights> + '_ {\n        // determine if the validator set has been updated in the\n        // current protocol version through an emergency upgrade\n        let validators_changed_in_current_protocol = protocol_config\n            .global_state_update\n            .as_ref()\n            .is_some_and(|global_state_update| global_state_update.validators.is_some());\n        let current_protocol_version = protocol_config.version;\n\n        let block_protocol_versions: HashMap<_, _> = self\n            .headers()\n            .map(|hdr| (hdr.height(), hdr.protocol_version()))\n            .collect();\n        self.switch_blocks_headers()\n            .find(|block_header| block_header.is_genesis())\n            .into_iter()\n            .flat_map(move |block_header| {\n                Some(EraValidatorWeights::new(\n                    EraId::default(),\n                    block_header.next_era_validator_weights().cloned()?,\n                    fault_tolerance_fraction,\n                ))\n            })\n            .chain(\n                self.switch_blocks_headers()\n                    // filter out switch blocks preceding upgrades - we don't want to read the era\n                    // validators directly from them, as they might have been altered by the\n                    // upgrade, we'll get them from the blocks' global states instead\n                    //\n                    // we can reliably determine if the validator set was changed by an upgrade to\n                    // the current protocol version by looking at the chainspec. If validators have\n                    // not been altered in any way, then we can use the set reported in the sync\n                    // leap by the previous switch block and not read the global states\n                    .filter(move |block_header| {\n                        block_protocol_versions\n                            .get(&(block_header.height() + 1))\n                            .is_none_or(|other_protocol_version| {\n                                if block_header.protocol_version() == *other_protocol_version {\n                                    true\n                                } else if *other_protocol_version == current_protocol_version {\n                                    !validators_changed_in_current_protocol\n                                } else {\n                                    false\n                                }\n                            })\n                    })\n                    .flat_map(move |block_header| {\n                        Some(EraValidatorWeights::new(\n                            block_header.next_block_era_id(),\n                            block_header.next_era_validator_weights().cloned()?,\n                            fault_tolerance_fraction,\n                        ))\n                    }),\n            )\n    }\n\n    pub(crate) fn global_states_for_sync_across_upgrade(&self) -> Option<GlobalStatesMetadata> {\n        let headers_by_height: HashMap<_, _> =\n            self.headers().map(|hdr| (hdr.height(), hdr)).collect();\n\n        let maybe_header_before_upgrade = self.switch_blocks_headers().find(|header| {\n            headers_by_height\n                .get(&(header.height() + 1))\n                .is_some_and(|other_header| {\n                    other_header.protocol_version() != header.protocol_version()\n                })\n        });\n\n        maybe_header_before_upgrade.map(|before_header| {\n            let after_header = headers_by_height\n                .get(&(before_header.height() + 1))\n                .unwrap(); // safe, because it had to be Some when we checked it above\n            GlobalStatesMetadata {\n                after_hash: after_header.block_hash(),\n                after_era_id: after_header.era_id(),\n                after_state_hash: *after_header.state_root_hash(),\n                before_hash: before_header.block_hash(),\n                before_state_hash: *before_header.state_root_hash(),\n            }\n        })\n    }\n\n    pub(crate) fn highest_block_height(&self) -> u64 {\n        self.headers()\n            .map(BlockHeader::height)\n            .max()\n            .unwrap_or_else(|| self.trusted_block_header.height())\n    }\n\n    pub(crate) fn highest_block_header_and_signatures(\n        &self,\n    ) -> (&BlockHeader, Option<&BlockSignatures>) {\n        let header = self\n            .headers()\n            .max_by_key(|header| header.height())\n            .unwrap_or(&self.trusted_block_header);\n        let signatures = self\n            .block_headers_with_signatures\n            .iter()\n            .find(|block_header_with_signatures| {\n                block_header_with_signatures.block_header().height() == header.height()\n            })\n            .map(|block_header_with_signatures| block_header_with_signatures.block_signatures());\n        (header, signatures)\n    }\n\n    pub(crate) fn highest_block_hash(&self) -> BlockHash {\n        self.highest_block_header_and_signatures().0.block_hash()\n    }\n\n    pub(crate) fn headers(&self) -> impl Iterator<Item = &BlockHeader> {\n        iter::once(&self.trusted_block_header)\n            .chain(&self.trusted_ancestor_headers)\n            .chain(\n                self.block_headers_with_signatures\n                    .iter()\n                    .map(|sh| sh.block_header()),\n            )\n    }\n\n    pub(crate) fn switch_blocks_headers(&self) -> impl Iterator<Item = &BlockHeader> {\n        self.headers().filter(|header| header.is_switch_block())\n    }\n}\n\nimpl Display for SyncLeap {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"sync leap message for trusted {}\",\n            self.trusted_block_header.block_hash()\n        )\n    }\n}\n\nimpl FetchItem for SyncLeap {\n    type Id = SyncLeapIdentifier;\n    type ValidationError = SyncLeapValidationError;\n    type ValidationMetadata = SyncLeapValidationMetaData;\n\n    const TAG: Tag = Tag::SyncLeap;\n\n    fn fetch_id(&self) -> Self::Id {\n        SyncLeapIdentifier {\n            block_hash: self.trusted_block_header.block_hash(),\n            trusted_ancestor_only: self.trusted_ancestor_only,\n        }\n    }\n\n    fn validate(\n        &self,\n        validation_metadata: &SyncLeapValidationMetaData,\n    ) -> Result<(), Self::ValidationError> {\n        if self.trusted_ancestor_headers.is_empty() && self.trusted_block_header.height() > 0 {\n            return Err(SyncLeapValidationError::MissingTrustedAncestors);\n        }\n        if self.block_headers_with_signatures.len() as u64\n            > validation_metadata.recent_era_count.saturating_add(1)\n        {\n            return Err(SyncLeapValidationError::TooManySwitchBlocks);\n        }\n        if self\n            .trusted_ancestor_headers\n            .iter()\n            .tuple_windows()\n            .any(|(child, parent)| *child.parent_hash() != parent.block_hash())\n        {\n            return Err(SyncLeapValidationError::TrustedAncestorsNotSorted);\n        }\n        let mut trusted_ancestor_iter = self.trusted_ancestor_headers.iter().rev();\n        if let Some(last_ancestor) = trusted_ancestor_iter.next() {\n            if !last_ancestor.is_switch_block() && !last_ancestor.is_genesis() {\n                return Err(SyncLeapValidationError::MissingAncestorSwitchBlock);\n            }\n        }\n        if trusted_ancestor_iter.any(BlockHeader::is_switch_block) {\n            return Err(SyncLeapValidationError::UnexpectedAncestorSwitchBlock);\n        }\n        if self.trusted_ancestor_only && !self.block_headers_with_signatures.is_empty() {\n            return Err(SyncLeapValidationError::UnexpectedBlockHeadersWithSignatures);\n        }\n\n        let mut headers: BTreeMap<BlockHash, &BlockHeader> = self\n            .headers()\n            .map(|header| (header.block_hash(), header))\n            .collect();\n        let mut signatures: BTreeMap<EraId, Vec<&BlockSignatures>> = BTreeMap::new();\n        for block_header in &self.block_headers_with_signatures {\n            signatures\n                .entry(block_header.block_signatures().era_id())\n                .or_default()\n                .push(block_header.block_signatures());\n        }\n\n        let mut headers_with_sufficient_finality: Vec<BlockHash> =\n            vec![self.trusted_block_header.block_hash()];\n\n        while let Some(hash) = headers_with_sufficient_finality.pop() {\n            if let Some(header) = headers.remove(&hash) {\n                headers_with_sufficient_finality.push(*header.parent_hash());\n                if let Some(mut validator_weights) = header.next_era_validator_weights() {\n                    // If this is a switch block right before the upgrade to the current protocol\n                    // version, and if this upgrade changes the validator set, use the validator\n                    // weights from the chainspec.\n                    if header.next_block_era_id() == validation_metadata.activation_point.era_id() {\n                        if let Some(updated_weights) = validation_metadata\n                            .global_state_update\n                            .as_ref()\n                            .and_then(|update| update.validators.as_ref())\n                        {\n                            validator_weights = updated_weights\n                        }\n                    }\n\n                    if let Some(era_sigs) = signatures.remove(&header.next_block_era_id()) {\n                        for sigs in era_sigs {\n                            if let Err(err) = utils::check_sufficient_block_signatures(\n                                validator_weights,\n                                validation_metadata.finality_threshold_fraction,\n                                Some(sigs),\n                            ) {\n                                return Err(SyncLeapValidationError::HeadersNotSufficientlySigned(\n                                    err,\n                                ));\n                            }\n                            headers_with_sufficient_finality.push(*sigs.block_hash());\n                        }\n                    }\n                }\n            }\n        }\n\n        // any orphaned headers == incomplete proof\n        let incomplete_headers_proof = !headers.is_empty();\n        // any orphaned signatures == incomplete proof\n        let incomplete_signatures_proof = !signatures.is_empty();\n\n        if incomplete_headers_proof || incomplete_signatures_proof {\n            return Err(SyncLeapValidationError::IncompleteProof);\n        }\n\n        for block_header in &self.block_headers_with_signatures {\n            block_header\n                .is_valid()\n                .map_err(SyncLeapValidationError::BlockHeaderWithSignatures)?;\n        }\n\n        // defer cryptographic verification until last to avoid unnecessary computation\n        for block_header in &self.block_headers_with_signatures {\n            block_header\n                .block_signatures()\n                .is_verified()\n                .map_err(SyncLeapValidationError::Crypto)?;\n        }\n\n        Ok(())\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{\n        estimator_max_rounds_per_era, vec_of_largest_specimen, vec_prop_specimen,\n        BlockHeaderWithoutEraEnd, Cache, LargestSpecimen, SizeEstimator,\n    };\n\n    use super::{SyncLeap, SyncLeapIdentifier};\n\n    impl LargestSpecimen for SyncLeap {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            // Will at most contain as many blocks as a single era. And how many blocks can\n            // there be in an era is determined by the chainspec: it's the\n            // maximum of minimum_era_height and era_duration / minimum_block_time\n            let count = estimator_max_rounds_per_era(estimator).saturating_sub(1);\n\n            let non_switch_block_ancestors: Vec<BlockHeaderWithoutEraEnd> =\n                vec_of_largest_specimen(estimator, count, cache);\n\n            let mut trusted_ancestor_headers =\n                vec![LargestSpecimen::largest_specimen(estimator, cache)];\n            trusted_ancestor_headers.extend(\n                non_switch_block_ancestors\n                    .into_iter()\n                    .map(BlockHeaderWithoutEraEnd::into_block_header),\n            );\n\n            let block_headers_with_signatures =\n                vec_prop_specimen(estimator, \"recent_era_count\", cache);\n            SyncLeap {\n                trusted_ancestor_only: LargestSpecimen::largest_specimen(estimator, cache),\n                trusted_block_header: LargestSpecimen::largest_specimen(estimator, cache),\n                trusted_ancestor_headers,\n                block_headers_with_signatures,\n            }\n        }\n    }\n\n    impl LargestSpecimen for SyncLeapIdentifier {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            SyncLeapIdentifier {\n                block_hash: LargestSpecimen::largest_specimen(estimator, cache),\n                trusted_ancestor_only: true,\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    // The `FetchItem::<SyncLeap>::validate()` function can potentially return the\n    // `SyncLeapValidationError::BlockWithMetadata` error as a result of calling\n    // `BlockHeaderWithMetadata::validate()`, but in practice this will always be detected earlier\n    // as an `SyncLeapValidationError::IncompleteProof` error. Hence, there is no explicit test for\n    // `SyncLeapValidationError::BlockWithMetadata`.\n\n    use std::{\n        collections::{BTreeMap, BTreeSet},\n        iter,\n    };\n\n    use num_rational::Ratio;\n    use rand::Rng;\n\n    use casper_types::{\n        crypto, testing::TestRng, ActivationPoint, Block, BlockHash, BlockHeader,\n        BlockHeaderWithSignatures, BlockSignaturesV2, BlockV2, ChainNameDigest, EraEndV2, EraId,\n        FinalitySignatureV2, GlobalStateUpdate, ProtocolConfig, ProtocolVersion, PublicKey,\n        SecretKey, TestBlockBuilder, Timestamp, TransactionHash, TransactionV1Hash,\n        AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512,\n    };\n\n    use super::SyncLeap;\n    use crate::{\n        components::fetcher::FetchItem,\n        types::{\n            sync_leap::SyncLeapValidationError,\n            sync_leap_validation_metadata::SyncLeapValidationMetaData, EraValidatorWeights,\n            SyncLeapIdentifier,\n        },\n        utils::BlockSignatureError,\n    };\n\n    fn make_block_header_with_signatures_from_height(\n        height: usize,\n        test_chain: &[BlockV2],\n        validators: &[ValidatorSpec],\n        chain_name_hash: ChainNameDigest,\n        add_proofs: bool,\n    ) -> BlockHeaderWithSignatures {\n        let header = Block::from(test_chain.get(height).unwrap()).clone_header();\n        make_block_header_with_signatures_from_header(\n            &header,\n            validators,\n            chain_name_hash,\n            add_proofs,\n        )\n    }\n\n    fn make_block_header_with_signatures_from_header(\n        block_header: &BlockHeader,\n        validators: &[ValidatorSpec],\n        chain_name_hash: ChainNameDigest,\n        add_proofs: bool,\n    ) -> BlockHeaderWithSignatures {\n        let hash = block_header.block_hash();\n        let height = block_header.height();\n        let era_id = block_header.era_id();\n        let mut block_signatures = BlockSignaturesV2::new(hash, height, era_id, chain_name_hash);\n        validators.iter().for_each(\n            |ValidatorSpec {\n                 secret_key,\n                 public_key: _,\n                 weight: _,\n             }| {\n                let fin_sig =\n                    FinalitySignatureV2::create(hash, height, era_id, chain_name_hash, secret_key);\n                if add_proofs {\n                    block_signatures\n                        .insert_signature(fin_sig.public_key().clone(), *fin_sig.signature());\n                }\n            },\n        );\n\n        BlockHeaderWithSignatures::new(block_header.clone(), block_signatures.into())\n    }\n\n    fn make_test_sync_leap_with_chain(\n        validators: &[ValidatorSpec],\n        test_chain: &[BlockV2],\n        query: usize,\n        trusted_ancestor_headers: &[usize],\n        blok_headers_with_signatures: &[usize],\n        chain_name_hash: ChainNameDigest,\n        add_proofs: bool,\n    ) -> SyncLeap {\n        let trusted_block_header = Block::from(test_chain.get(query).unwrap()).clone_header();\n\n        let trusted_ancestor_headers: Vec<_> = trusted_ancestor_headers\n            .iter()\n            .map(|height| Block::from(test_chain.get(*height).unwrap()).clone_header())\n            .collect();\n\n        let block_headers_with_signatures: Vec<_> = blok_headers_with_signatures\n            .iter()\n            .map(|height| {\n                make_block_header_with_signatures_from_height(\n                    *height,\n                    test_chain,\n                    validators,\n                    chain_name_hash,\n                    add_proofs,\n                )\n            })\n            .collect();\n\n        SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header,\n            trusted_ancestor_headers,\n            block_headers_with_signatures,\n        }\n    }\n\n    // Each generated era gets two validators pulled from the provided `validators` set.\n    fn make_test_sync_leap_with_validators(\n        rng: &mut TestRng,\n        validators: &[ValidatorSpec],\n        switch_blocks: &[u64],\n        query: usize,\n        trusted_ancestor_headers: &[usize],\n        block_headers_with_signatures: &[usize],\n        add_proofs: bool,\n    ) -> SyncLeap {\n        let mut test_chain_spec =\n            TestChainSpec::new(rng, Some(switch_blocks.to_vec()), None, validators);\n        let test_chain: Vec<_> = test_chain_spec.iter().take(12).collect();\n        let chain_name_hash = ChainNameDigest::random(rng);\n\n        make_test_sync_leap_with_chain(\n            validators,\n            &test_chain,\n            query,\n            trusted_ancestor_headers,\n            block_headers_with_signatures,\n            chain_name_hash,\n            add_proofs,\n        )\n    }\n\n    fn make_test_sync_leap(\n        rng: &mut TestRng,\n        switch_blocks: &[u64],\n        query: usize,\n        trusted_ancestor_headers: &[usize],\n        block_headers_with_signatures: &[usize],\n        add_proofs: bool,\n    ) -> SyncLeap {\n        const DEFAULT_VALIDATOR_WEIGHT: u32 = 100;\n\n        let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair)\n            .take(2)\n            .map(|(secret_key, public_key)| ValidatorSpec {\n                secret_key,\n                public_key,\n                weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()),\n            })\n            .collect();\n        make_test_sync_leap_with_validators(\n            rng,\n            &validators,\n            switch_blocks,\n            query,\n            trusted_ancestor_headers,\n            block_headers_with_signatures,\n            add_proofs,\n        )\n    }\n\n    fn test_sync_leap_validation_metadata() -> SyncLeapValidationMetaData {\n        let unbonding_delay = 7;\n        let auction_delay = 1;\n        let activation_point = ActivationPoint::EraId(3000.into());\n        let finality_threshold_fraction = Ratio::new(1, 3);\n\n        SyncLeapValidationMetaData::new(\n            unbonding_delay - auction_delay, // As per `CoreConfig::recent_era_count()`.\n            activation_point,\n            None,\n            finality_threshold_fraction,\n        )\n    }\n\n    #[test]\n    fn should_validate_correct_sync_leap() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        // Querying for a non-switch block.\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(result.is_ok());\n\n        // Querying for a switch block.\n        let query = 6;\n        let trusted_ancestor_headers = [5, 4, 3];\n        let block_headers_with_signatures = [9, 11];\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn should_check_trusted_ancestors() {\n        let mut rng = TestRng::new();\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        // Trusted ancestors can't be empty when trusted block height is greater than 0.\n        let block = TestBlockBuilder::new().height(1).build(&mut rng);\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.take_header().into(),\n            trusted_ancestor_headers: Default::default(),\n            block_headers_with_signatures: Default::default(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::MissingTrustedAncestors)\n        ));\n\n        // When trusted block height is 0, validation should not fail due trusted ancestors being\n        // empty.\n        let block = TestBlockBuilder::new().height(0).build(&mut rng);\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.take_header().into(),\n            trusted_ancestor_headers: Default::default(),\n            block_headers_with_signatures: Default::default(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(!matches!(\n            result,\n            Err(SyncLeapValidationError::MissingTrustedAncestors)\n        ));\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn should_check_block_headers_with_signatures_size() {\n        let mut rng = TestRng::new();\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let max_allowed_size = validation_metadata.recent_era_count + 1;\n\n        // Max allowed size should NOT trigger the `TooManySwitchBlocks` error.\n        let generated_block_count = max_allowed_size;\n\n        let block = TestBlockBuilder::new().height(0).build_versioned(&mut rng);\n        let chain_name_hash = ChainNameDigest::random(&mut rng);\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.clone_header(),\n            trusted_ancestor_headers: Default::default(),\n            block_headers_with_signatures: iter::repeat_with(|| {\n                let block = TestBlockBuilder::new().build_versioned(&mut rng);\n                let hash = block.hash();\n                let height = block.height();\n                BlockHeaderWithSignatures::new(\n                    block.clone_header(),\n                    BlockSignaturesV2::new(*hash, height, 0.into(), chain_name_hash).into(),\n                )\n            })\n            .take(generated_block_count as usize)\n            .collect(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(!matches!(\n            result,\n            Err(SyncLeapValidationError::TooManySwitchBlocks)\n        ));\n\n        // Generating one more block should trigger the `TooManySwitchBlocks` error.\n        let generated_block_count = max_allowed_size + 1;\n\n        let block = TestBlockBuilder::new().height(0).build_versioned(&mut rng);\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.take_header(),\n            trusted_ancestor_headers: Default::default(),\n            block_headers_with_signatures: iter::repeat_with(|| {\n                let block = TestBlockBuilder::new().build_versioned(&mut rng);\n                let hash = block.hash();\n                let height = block.height();\n                BlockHeaderWithSignatures::new(\n                    block.clone_header(),\n                    BlockSignaturesV2::new(*hash, height, 0.into(), chain_name_hash).into(),\n                )\n            })\n            .take(generated_block_count as usize)\n            .collect(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::TooManySwitchBlocks)\n        ));\n    }\n\n    #[test]\n    fn should_detect_unsorted_trusted_ancestors() {\n        let mut rng = TestRng::new();\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        // Test block iterator produces blocks in order, however, the `trusted_ancestor_headers` is\n        // expected to be sorted backwards (from the most recent ancestor back to the switch block).\n        // Therefore, the generated blocks should cause the `TrustedAncestorsNotSorted` error to be\n        // triggered.\n        let block = TestBlockBuilder::new().height(0).build(&mut rng);\n        let block_iterator =\n            TestBlockIterator::new(block.clone(), &mut rng, None, None, Default::default());\n        let block = Block::from(block);\n\n        let trusted_ancestor_headers = block_iterator\n            .take(3)\n            .map(|block| block.take_header().into())\n            .collect();\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.take_header(),\n            trusted_ancestor_headers,\n            block_headers_with_signatures: Default::default(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::TrustedAncestorsNotSorted)\n        ));\n\n        // Single trusted ancestor header it should never trigger the `TrustedAncestorsNotSorted`\n        // error.\n        let block = TestBlockBuilder::new().height(0).build(&mut rng);\n        let block_iterator =\n            TestBlockIterator::new(block.clone(), &mut rng, None, None, Default::default());\n\n        let trusted_ancestor_headers = block_iterator\n            .take(1)\n            .map(|block| block.take_header().into())\n            .collect();\n\n        let block = Block::from(block);\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.take_header(),\n            trusted_ancestor_headers,\n            block_headers_with_signatures: Default::default(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(!matches!(\n            result,\n            Err(SyncLeapValidationError::TrustedAncestorsNotSorted)\n        ));\n    }\n\n    #[test]\n    fn should_detect_missing_ancestor_switch_block() {\n        let mut rng = TestRng::new();\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        // Make sure `TestBlockIterator` creates no switch blocks.\n        let switch_blocks = None;\n\n        let block = TestBlockBuilder::new().height(0).build(&mut rng);\n        let block_iterator = TestBlockIterator::new(\n            block.clone(),\n            &mut rng,\n            switch_blocks,\n            None,\n            Default::default(),\n        );\n\n        let trusted_ancestor_headers: Vec<_> = block_iterator\n            .take(3)\n            .map(|block| block.take_header().into())\n            .collect::<Vec<_>>()\n            .into_iter()\n            .rev()\n            .collect();\n\n        let block = Block::from(block);\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: block.take_header(),\n            trusted_ancestor_headers,\n            block_headers_with_signatures: Default::default(),\n        };\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::MissingAncestorSwitchBlock)\n        ));\n    }\n\n    #[test]\n    fn should_detect_unexpected_ancestor_switch_block() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S       S   S           S           S\n        let switch_blocks = [0, 2, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        // Intentionally include two consecutive switch blocks (3, 2) in the\n        // `trusted_ancestor_headers`, which should trigger the error.\n        let trusted_ancestor_headers = [4, 3, 2];\n\n        let query = 5;\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::UnexpectedAncestorSwitchBlock)\n        ));\n    }\n\n    #[test]\n    fn should_detect_unexpected_block_header_with_signatures() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let mut sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // When `trusted_ancestor_only` we expect an error when `block_headers_with_signatures` is\n        // not empty.\n        sync_leap.trusted_ancestor_only = true;\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::UnexpectedBlockHeadersWithSignatures)\n        ));\n    }\n\n    #[test]\n    fn should_detect_not_sufficiently_signed_headers() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = false;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(\n            matches!(result, Err(SyncLeapValidationError::HeadersNotSufficientlySigned(inner))\n             if matches!(&inner, BlockSignatureError::InsufficientWeightForFinality{\n                trusted_validator_weights: _,\n                block_signatures: _,\n                signature_weight,\n                total_validator_weight:_,\n                fault_tolerance_fraction:_ } if signature_weight == &Some(Box::new(0.into()))))\n        );\n    }\n\n    #[test]\n    fn should_detect_orphaned_headers() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let mut sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // Add single orphaned block. Signatures are cloned from a legit block to avoid bailing on\n        // the signature validation check.\n        let orphaned_block = TestBlockBuilder::new().build_versioned(&mut rng);\n        let orphaned_block_header_with_signatures = BlockHeaderWithSignatures::new(\n            orphaned_block.clone_header(),\n            sync_leap\n                .block_headers_with_signatures\n                .first()\n                .unwrap()\n                .block_signatures()\n                .clone(),\n        );\n        sync_leap\n            .block_headers_with_signatures\n            .push(orphaned_block_header_with_signatures);\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::IncompleteProof)\n        ));\n    }\n\n    #[test]\n    fn should_detect_orphaned_signatures() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let mut sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // Insert signature from an era nowhere near the sync leap data. Base it on one of the\n        // existing signatures to avoid bailing on the signature validation check.\n        let mut invalid_block_header_with_signatures = sync_leap\n            .block_headers_with_signatures\n            .first_mut()\n            .unwrap()\n            .clone();\n        invalid_block_header_with_signatures.invalidate_era();\n        sync_leap\n            .block_headers_with_signatures\n            .push(invalid_block_header_with_signatures);\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(\n            result,\n            Err(SyncLeapValidationError::IncompleteProof)\n        ));\n    }\n\n    #[test]\n    fn should_fail_when_signature_fails_crypto_verification() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n        let validation_metadata = test_sync_leap_validation_metadata();\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let mut sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        let mut invalid_block_header_with_signatures =\n            sync_leap.block_headers_with_signatures.pop().unwrap();\n        invalid_block_header_with_signatures.invalidate_last_signature();\n        sync_leap\n            .block_headers_with_signatures\n            .push(invalid_block_header_with_signatures);\n\n        let result = sync_leap.validate(&validation_metadata);\n        assert!(matches!(result, Err(SyncLeapValidationError::Crypto(_))));\n    }\n\n    #[test]\n    fn should_use_correct_validator_weights_on_upgrade() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n\n        const INDEX_OF_THE_LAST_SWITCH_BLOCK: usize = 1;\n        let block_headers_with_signatures = [6, 9, 11];\n\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // Setup upgrade after the last switch block.\n        let upgrade_block = sync_leap\n            .block_headers_with_signatures\n            .get(INDEX_OF_THE_LAST_SWITCH_BLOCK)\n            .unwrap();\n        let upgrade_era = upgrade_block.block_header().era_id().successor();\n        let activation_point = ActivationPoint::EraId(upgrade_era);\n\n        // Set up validator change.\n        const DEFAULT_VALIDATOR_WEIGHT: u64 = 100;\n        let new_validators: BTreeMap<_, _> = iter::repeat_with(crypto::generate_ed25519_keypair)\n            .take(2)\n            .map(|(_, public_key)| (public_key, DEFAULT_VALIDATOR_WEIGHT.into()))\n            .collect();\n        let global_state_update = GlobalStateUpdate {\n            validators: Some(new_validators),\n            entries: Default::default(),\n        };\n\n        let unbonding_delay = 7;\n        let auction_delay = 1;\n        let finality_threshold_fraction = Ratio::new(1, 3);\n        let validation_metadata = SyncLeapValidationMetaData::new(\n            unbonding_delay - auction_delay, // As per `CoreConfig::recent_era_count()`.\n            activation_point,\n            Some(global_state_update),\n            finality_threshold_fraction,\n        );\n\n        let result = sync_leap.validate(&validation_metadata);\n\n        // By asserting on the `HeadersNotSufficientlySigned` error (with bogus validators set to\n        // the original validators from the chain) we can prove that the validators smuggled in the\n        // validation metadata were actually used in the verification process.\n        let expected_bogus_validators: Vec<_> = sync_leap\n            .block_headers_with_signatures\n            .last()\n            .unwrap()\n            .block_signatures()\n            .signers()\n            .cloned()\n            .collect();\n        assert!(\n            matches!(result, Err(SyncLeapValidationError::HeadersNotSufficientlySigned(inner))\n                 if matches!(&inner, BlockSignatureError::BogusValidators{\n                    trusted_validator_weights: _,\n                    block_signatures: _,\n                    bogus_validators\n                } if bogus_validators == &expected_bogus_validators))\n        );\n    }\n\n    #[test]\n    fn should_return_headers() {\n        let mut rng = TestRng::new();\n        let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n        let trusted_block = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let trusted_ancestor_1 = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let trusted_ancestor_2 = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n        let trusted_ancestor_3 = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let signed_block_1 = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let signed_block_2 = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let signed_block_3 = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n        let block_header_with_signatures_1 = make_block_header_with_signatures_from_header(\n            &signed_block_1.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        let block_header_with_signatures_2 = make_block_header_with_signatures_from_header(\n            &signed_block_2.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        let block_header_with_signatures_3 = make_block_header_with_signatures_from_header(\n            &signed_block_3.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: trusted_block.clone_header(),\n            trusted_ancestor_headers: vec![\n                trusted_ancestor_1.clone_header(),\n                trusted_ancestor_2.clone_header(),\n                trusted_ancestor_3.clone_header(),\n            ],\n            block_headers_with_signatures: vec![\n                block_header_with_signatures_1,\n                block_header_with_signatures_2,\n                block_header_with_signatures_3,\n            ],\n        };\n\n        let actual_headers: BTreeSet<_> = sync_leap\n            .headers()\n            .map(|header| header.block_hash())\n            .collect();\n        let expected_headers: BTreeSet<_> = [\n            trusted_block,\n            trusted_ancestor_1,\n            trusted_ancestor_2,\n            trusted_ancestor_3,\n            signed_block_1,\n            signed_block_2,\n            signed_block_3,\n        ]\n        .iter()\n        .map(|block| *block.hash())\n        .collect();\n        assert_eq!(expected_headers, actual_headers);\n    }\n\n    #[test]\n    fn should_return_switch_block_headers() {\n        let mut rng = TestRng::new();\n\n        let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n        let trusted_block = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let trusted_ancestor_1 = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let trusted_ancestor_2 = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n        let trusted_ancestor_3 = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let signed_block_1 = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let signed_block_2 = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let signed_block_3 = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n        let block_header_with_signatures_1 = make_block_header_with_signatures_from_header(\n            &signed_block_1.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        let block_header_with_signatures_2 = make_block_header_with_signatures_from_header(\n            &signed_block_2.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        let block_header_with_signatures_3 = make_block_header_with_signatures_from_header(\n            &signed_block_3.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: trusted_block.clone_header(),\n            trusted_ancestor_headers: vec![\n                trusted_ancestor_1.clone_header(),\n                trusted_ancestor_2.clone_header(),\n                trusted_ancestor_3.clone_header(),\n            ],\n            block_headers_with_signatures: vec![\n                block_header_with_signatures_1.clone(),\n                block_header_with_signatures_2.clone(),\n                block_header_with_signatures_3.clone(),\n            ],\n        };\n\n        let actual_headers: BTreeSet<_> = sync_leap\n            .switch_blocks_headers()\n            .map(|header| header.block_hash())\n            .collect();\n        let expected_headers: BTreeSet<_> = [\n            trusted_ancestor_1.clone(),\n            signed_block_1.clone(),\n            signed_block_2.clone(),\n        ]\n        .iter()\n        .map(|block| *block.hash())\n        .collect();\n        assert_eq!(expected_headers, actual_headers);\n\n        // Also test when the trusted block is a switch block.\n        let trusted_block = TestBlockBuilder::new()\n            .switch_block(true)\n            .build_versioned(&mut rng);\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: trusted_block.clone_header(),\n            trusted_ancestor_headers: vec![\n                trusted_ancestor_1.clone_header(),\n                trusted_ancestor_2.clone_header(),\n                trusted_ancestor_3.clone_header(),\n            ],\n            block_headers_with_signatures: vec![\n                block_header_with_signatures_1,\n                block_header_with_signatures_2,\n                block_header_with_signatures_3,\n            ],\n        };\n        let actual_headers: BTreeSet<_> = sync_leap\n            .switch_blocks_headers()\n            .map(|header| header.block_hash())\n            .collect();\n        let expected_headers: BTreeSet<_> = [\n            trusted_block,\n            trusted_ancestor_1,\n            signed_block_1,\n            signed_block_2,\n        ]\n        .iter()\n        .map(|block| *block.hash())\n        .collect();\n        assert_eq!(expected_headers, actual_headers);\n    }\n\n    #[test]\n    fn should_return_highest_block_header_from_trusted_block() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let valid_sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // `valid_sync_leap` created above is a well formed SyncLeap structure for the test chain.\n        // We can use the blocks it contains to generate SyncLeap structures as required for\n        // the test, because we know the heights of the blocks in the test chain as well as\n        // their sigs.\n        let highest_block = valid_sync_leap\n            .block_headers_with_signatures\n            .last()\n            .unwrap()\n            .block_header()\n            .clone();\n        let lowest_blocks: Vec<_> = valid_sync_leap\n            .trusted_ancestor_headers\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n        let middle_blocks: Vec<_> = valid_sync_leap\n            .block_headers_with_signatures\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n\n        let highest_block_height = highest_block.height();\n        let highest_block_hash = highest_block.block_hash();\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: highest_block.clone(),\n            trusted_ancestor_headers: lowest_blocks,\n            block_headers_with_signatures: middle_blocks,\n        };\n        assert_eq!(\n            sync_leap\n                .highest_block_header_and_signatures()\n                .0\n                .block_hash(),\n            highest_block.block_hash()\n        );\n        assert_eq!(sync_leap.highest_block_hash(), highest_block_hash);\n        assert_eq!(sync_leap.highest_block_height(), highest_block_height);\n    }\n\n    #[test]\n    fn should_return_highest_block_header_from_trusted_ancestors() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let valid_sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // `valid_sync_leap` created above is a well formed SyncLeap structure for the test chain.\n        // We can use the blocks it contains to generate SyncLeap structures as required for\n        // the test, because we know the heights of the blocks in the test chain as well as\n        // their sigs.\n        let highest_block = valid_sync_leap\n            .block_headers_with_signatures\n            .last()\n            .unwrap()\n            .block_header()\n            .clone();\n        let lowest_blocks: Vec<_> = valid_sync_leap\n            .trusted_ancestor_headers\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n        let middle_blocks: Vec<_> = valid_sync_leap\n            .block_headers_with_signatures\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n\n        let highest_block_height = highest_block.height();\n        let highest_block_hash = highest_block.block_hash();\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: lowest_blocks.first().unwrap().clone(),\n            trusted_ancestor_headers: vec![highest_block],\n            block_headers_with_signatures: middle_blocks,\n        };\n        assert_eq!(\n            sync_leap\n                .highest_block_header_and_signatures()\n                .0\n                .block_hash(),\n            highest_block_hash\n        );\n        assert_eq!(sync_leap.highest_block_hash(), highest_block_hash);\n        assert_eq!(sync_leap.highest_block_height(), highest_block_height);\n    }\n\n    #[test]\n    fn should_return_highest_block_header_from_block_headers_with_signatures() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let valid_sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // `valid_sync_leap` created above is a well formed SyncLeap structure for the test chain.\n        // We can use the blocks it contains to generate SyncLeap structures as required for\n        // the test, because we know the heights of the blocks in the test chain as well as\n        // their sigs.\n        let highest_block = valid_sync_leap\n            .block_headers_with_signatures\n            .last()\n            .unwrap()\n            .clone();\n        let lowest_blocks: Vec<_> = valid_sync_leap\n            .trusted_ancestor_headers\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n        let middle_blocks: Vec<_> = valid_sync_leap\n            .block_headers_with_signatures\n            .iter()\n            .take(2)\n            .cloned()\n            .map(|block_header_with_signatures| block_header_with_signatures.block_header().clone())\n            .collect();\n\n        let highest_block_height = highest_block.block_header().height();\n        let highest_block_hash = highest_block.block_header().block_hash();\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: lowest_blocks.first().unwrap().clone(),\n            trusted_ancestor_headers: middle_blocks,\n            block_headers_with_signatures: vec![highest_block.clone()],\n        };\n        assert_eq!(\n            sync_leap\n                .highest_block_header_and_signatures()\n                .0\n                .block_hash(),\n            highest_block.block_header().block_hash()\n        );\n        assert_eq!(sync_leap.highest_block_hash(), highest_block_hash);\n        assert_eq!(sync_leap.highest_block_height(), highest_block_height);\n    }\n\n    #[test]\n    fn should_return_sigs_when_highest_block_is_signed() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        assert!(sync_leap.highest_block_header_and_signatures().1.is_some());\n    }\n\n    #[test]\n    fn should_not_return_sigs_when_highest_block_is_not_signed() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap(\n            &mut rng,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        // `sync_leap` is a well formed SyncLeap structure for the test chain. We can use the blocks\n        // it contains to generate SyncLeap structures as required for the test, because we know the\n        // heights of the blocks in the test chain as well as their sigs.\n        let highest_block = sync_leap\n            .block_headers_with_signatures\n            .last()\n            .unwrap()\n            .clone();\n        let lowest_blocks: Vec<_> = sync_leap\n            .trusted_ancestor_headers\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n        let middle_blocks: Vec<_> = sync_leap\n            .block_headers_with_signatures\n            .iter()\n            .take(2)\n            .cloned()\n            .collect();\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: highest_block.block_header().clone(),\n            trusted_ancestor_headers: lowest_blocks,\n            block_headers_with_signatures: middle_blocks,\n        };\n        assert!(sync_leap.highest_block_header_and_signatures().1.is_none());\n    }\n\n    #[test]\n    fn should_return_era_validator_weights_for_correct_sync_leap() {\n        // Chain\n        // 0   1   2   3   4   5   6   7   8   9   10   11\n        // S           S           S           S\n        let switch_blocks = [0, 3, 6, 9];\n\n        let mut rng = TestRng::new();\n\n        // Test block iterator will pull 2 validators for each created block. Indices 0 and 1 are\n        // used for validators for the trusted ancestor headers.\n        const FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET: usize = 2;\n\n        let validators: Vec<_> = (1..100)\n            .map(|weight| {\n                let (secret_key, public_key) = crypto::generate_ed25519_keypair();\n                ValidatorSpec {\n                    secret_key,\n                    public_key,\n                    weight: Some(U512::from(weight)),\n                }\n            })\n            .collect();\n\n        let query = 5;\n        let trusted_ancestor_headers = [4, 3];\n        let block_headers_with_signatures = [6, 9, 11];\n        let add_proofs = true;\n        let sync_leap = make_test_sync_leap_with_validators(\n            &mut rng,\n            &validators,\n            &switch_blocks,\n            query,\n            &trusted_ancestor_headers,\n            &block_headers_with_signatures,\n            add_proofs,\n        );\n\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        let mut block_iter = sync_leap.block_headers_with_signatures.iter();\n        let first_switch_block = block_iter.next().unwrap().clone();\n        let protocol_version = first_switch_block.block_header().protocol_version();\n        let validator_1 = validators\n            .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET)\n            .unwrap();\n        let validator_2 = validators\n            .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 1)\n            .unwrap();\n        let first_era_validator_weights = EraValidatorWeights::new(\n            first_switch_block.block_header().era_id(),\n            [validator_1, validator_2]\n                .iter()\n                .map(\n                    |ValidatorSpec {\n                         secret_key: _,\n                         public_key,\n                         weight,\n                     }| (public_key.clone(), weight.unwrap()),\n                )\n                .collect(),\n            fault_tolerance_fraction,\n        );\n\n        let second_switch_block = block_iter.next().unwrap().clone();\n        let validator_1 = validators\n            .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 2)\n            .unwrap();\n        let validator_2 = validators\n            .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 3)\n            .unwrap();\n        let second_era_validator_weights = EraValidatorWeights::new(\n            second_switch_block.block_header().era_id(),\n            [validator_1, validator_2]\n                .iter()\n                .map(\n                    |ValidatorSpec {\n                         secret_key: _,\n                         public_key,\n                         weight,\n                     }| (public_key.clone(), weight.unwrap()),\n                )\n                .collect(),\n            fault_tolerance_fraction,\n        );\n\n        let third_block = block_iter.next().unwrap().clone();\n        let validator_1 = validators\n            .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 4)\n            .unwrap();\n        let validator_2 = validators\n            .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 5)\n            .unwrap();\n        let third_era_validator_weights = EraValidatorWeights::new(\n            third_block.block_header().era_id(),\n            [validator_1, validator_2]\n                .iter()\n                .map(\n                    |ValidatorSpec {\n                         secret_key: _,\n                         public_key,\n                         weight,\n                     }| (public_key.clone(), weight.unwrap()),\n                )\n                .collect(),\n            fault_tolerance_fraction,\n        );\n\n        let protocol_config = ProtocolConfig {\n            version: protocol_version,\n            global_state_update: None,\n            activation_point: ActivationPoint::EraId(EraId::random(&mut rng)),\n            hard_reset: rng.gen(),\n        };\n\n        let result: Vec<_> = sync_leap\n            .era_validator_weights(fault_tolerance_fraction, &protocol_config)\n            .collect();\n        assert_eq!(\n            result,\n            vec![\n                first_era_validator_weights,\n                second_era_validator_weights,\n                third_era_validator_weights,\n            ]\n        )\n    }\n\n    #[test]\n    fn should_not_return_global_states_when_no_upgrade() {\n        let mut rng = TestRng::new();\n\n        const DEFAULT_VALIDATOR_WEIGHT: u32 = 100;\n\n        let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n        let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair)\n            .take(2)\n            .map(|(secret_key, public_key)| ValidatorSpec {\n                secret_key,\n                public_key,\n                weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()),\n            })\n            .collect();\n\n        let mut test_chain_spec = TestChainSpec::new(&mut rng, Some(vec![4, 8]), None, &validators);\n        let chain: Vec<_> = test_chain_spec.iter().take(12).collect();\n\n        let sync_leap = make_test_sync_leap_with_chain(\n            &validators,\n            &chain,\n            11,\n            &[10, 9, 8],\n            &[],\n            chain_name_hash,\n            false,\n        );\n\n        let global_states_metadata = sync_leap.global_states_for_sync_across_upgrade();\n        assert!(global_states_metadata.is_none());\n    }\n\n    #[test]\n    fn should_return_global_states_when_upgrade() {\n        let mut rng = TestRng::new();\n\n        const DEFAULT_VALIDATOR_WEIGHT: u32 = 100;\n\n        let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n        let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair)\n            .take(2)\n            .map(|(secret_key, public_key)| ValidatorSpec {\n                secret_key,\n                public_key,\n                weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()),\n            })\n            .collect();\n\n        let mut test_chain_spec =\n            TestChainSpec::new(&mut rng, Some(vec![4, 8]), Some(vec![8]), &validators);\n        let chain: Vec<_> = test_chain_spec.iter().take(12).collect();\n\n        let sync_leap = make_test_sync_leap_with_chain(\n            &validators,\n            &chain,\n            11,\n            &[10, 9, 8],\n            &[],\n            chain_name_hash,\n            false,\n        );\n\n        let global_states_metadata = sync_leap\n            .global_states_for_sync_across_upgrade()\n            .expect(\"should be Some\");\n\n        assert_eq!(global_states_metadata.after_hash, *chain[9].hash());\n        assert_eq!(global_states_metadata.after_era_id, chain[9].era_id());\n        assert_eq!(\n            global_states_metadata.after_state_hash,\n            *chain[9].state_root_hash()\n        );\n\n        assert_eq!(global_states_metadata.before_hash, *chain[8].hash());\n        assert_eq!(\n            global_states_metadata.before_state_hash,\n            *chain[8].state_root_hash()\n        );\n    }\n\n    #[test]\n    fn should_return_global_states_when_immediate_switch_block() {\n        let mut rng = TestRng::new();\n\n        const DEFAULT_VALIDATOR_WEIGHT: u32 = 100;\n\n        let chain_name_hash = ChainNameDigest::random(&mut rng);\n\n        let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair)\n            .take(2)\n            .map(|(secret_key, public_key)| ValidatorSpec {\n                secret_key,\n                public_key,\n                weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()),\n            })\n            .collect();\n\n        let mut test_chain_spec =\n            TestChainSpec::new(&mut rng, Some(vec![4, 8, 9]), Some(vec![8]), &validators);\n        let chain: Vec<_> = test_chain_spec.iter().take(12).collect();\n\n        let sync_leap = make_test_sync_leap_with_chain(\n            &validators,\n            &chain,\n            9,\n            &[8],\n            &[],\n            chain_name_hash,\n            false,\n        );\n\n        let global_states_metadata = sync_leap\n            .global_states_for_sync_across_upgrade()\n            .expect(\"should be Some\");\n\n        assert_eq!(global_states_metadata.after_hash, *chain[9].hash());\n        assert_eq!(global_states_metadata.after_era_id, chain[9].era_id());\n        assert_eq!(\n            global_states_metadata.after_state_hash,\n            *chain[9].state_root_hash()\n        );\n\n        assert_eq!(global_states_metadata.before_hash, *chain[8].hash());\n        assert_eq!(\n            global_states_metadata.before_state_hash,\n            *chain[8].state_root_hash()\n        );\n    }\n\n    #[test]\n    fn era_validator_weights_without_genesis_without_upgrade() {\n        let mut rng = TestRng::new();\n\n        let trusted_block = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let version = ProtocolVersion::from_parts(1, 5, 0);\n\n        let (\n            block_header_with_signatures_1,\n            block_header_with_signatures_2,\n            block_header_with_signatures_3,\n        ) = make_three_switch_blocks_at_era_and_height_and_version(\n            &mut rng,\n            (1, 10, version),\n            (2, 20, version),\n            (3, 30, version),\n        );\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: trusted_block.clone_header(),\n            trusted_ancestor_headers: vec![],\n            block_headers_with_signatures: vec![\n                block_header_with_signatures_1,\n                block_header_with_signatures_2,\n                block_header_with_signatures_3,\n            ],\n        };\n\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        // Assert only if correct eras are selected, since the\n        // `should_return_era_validator_weights_for_correct_sync_leap` test already covers the\n        // actual weight validation.\n\n        let protocol_config = ProtocolConfig {\n            version,\n            global_state_update: None,\n            hard_reset: false,\n            activation_point: ActivationPoint::EraId(EraId::random(&mut rng)),\n        };\n\n        let actual_eras: BTreeSet<u64> = sync_leap\n            .era_validator_weights(fault_tolerance_fraction, &protocol_config)\n            .map(|era_validator_weights| era_validator_weights.era_id().into())\n            .collect();\n        let mut expected_eras: BTreeSet<u64> = BTreeSet::new();\n        // Expect successors of the eras of switch blocks.\n        expected_eras.extend([2, 3, 4]);\n        assert_eq!(expected_eras, actual_eras);\n    }\n\n    #[test]\n    fn era_validator_weights_without_genesis_with_switch_block_preceding_immediate_switch_block() {\n        let mut rng = TestRng::new();\n\n        let trusted_block = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let version_1 = ProtocolVersion::from_parts(1, 4, 0);\n        let version_2 = ProtocolVersion::from_parts(1, 5, 0);\n\n        let (\n            block_header_with_signatures_1,\n            block_header_with_signatures_2,\n            block_header_with_signatures_3,\n        ) = make_three_switch_blocks_at_era_and_height_and_version(\n            &mut rng,\n            (1, 10, version_1),\n            (2, 20, version_1),\n            (3, 21, version_2),\n        );\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: trusted_block.clone_header(),\n            trusted_ancestor_headers: vec![],\n            block_headers_with_signatures: vec![\n                block_header_with_signatures_1,\n                block_header_with_signatures_2,\n                block_header_with_signatures_3,\n            ],\n        };\n\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        // Assert only if correct eras are selected, since the\n        // `should_return_era_validator_weights_for_correct_sync_leap` test already covers the\n        // actual weight validation.\n\n        let protocol_config = ProtocolConfig {\n            version: version_2,\n            global_state_update: Some(GlobalStateUpdate {\n                validators: Some(BTreeMap::new()),\n                entries: BTreeMap::new(),\n            }),\n            hard_reset: false,\n            activation_point: ActivationPoint::EraId(EraId::random(&mut rng)),\n        };\n\n        let actual_eras: BTreeSet<u64> = sync_leap\n            .era_validator_weights(fault_tolerance_fraction, &protocol_config)\n            .map(|era_validator_weights| era_validator_weights.era_id().into())\n            .collect();\n        let mut expected_eras: BTreeSet<u64> = BTreeSet::new();\n\n        // Block #1 (era=1, height=10)\n        // Block #2 (era=2, height=20) - block preceding immediate switch block\n        // Block #3 (era=3, height=21) - immediate switch block.\n        // Expect the successor of block #2 to be not present.\n        expected_eras.extend([2, 4]);\n        assert_eq!(expected_eras, actual_eras);\n\n        let protocol_config = ProtocolConfig {\n            version: version_2,\n            global_state_update: None,\n            hard_reset: rng.gen(),\n            activation_point: ActivationPoint::EraId(EraId::random(&mut rng)),\n        };\n\n        let actual_eras: BTreeSet<u64> = sync_leap\n            .era_validator_weights(fault_tolerance_fraction, &protocol_config)\n            .map(|era_validator_weights| era_validator_weights.era_id().into())\n            .collect();\n        let mut expected_eras: BTreeSet<u64> = BTreeSet::new();\n\n        // Block #1 (era=1, height=10)\n        // Block #2 (era=2, height=20) - block preceding immediate switch block\n        // Block #3 (era=3, height=21) - immediate switch block.\n        // Expect era 3 to be present since the upgrade did not change the validators in any way.\n        expected_eras.extend([2, 3, 4]);\n        assert_eq!(expected_eras, actual_eras);\n    }\n\n    #[test]\n    fn era_validator_weights_with_genesis_without_upgrade() {\n        let mut rng = TestRng::new();\n\n        let trusted_block = TestBlockBuilder::new()\n            .switch_block(false)\n            .build_versioned(&mut rng);\n\n        let version = ProtocolVersion::from_parts(1, 5, 0);\n\n        let (\n            block_header_with_signatures_1,\n            block_header_with_signatures_2,\n            block_header_with_signatures_3,\n        ) = make_three_switch_blocks_at_era_and_height_and_version(\n            &mut rng,\n            (0, 0, version),\n            (1, 10, version),\n            (2, 20, version),\n        );\n\n        let sync_leap = SyncLeap {\n            trusted_ancestor_only: false,\n            trusted_block_header: trusted_block.clone_header(),\n            trusted_ancestor_headers: vec![],\n            block_headers_with_signatures: vec![\n                block_header_with_signatures_1,\n                block_header_with_signatures_2,\n                block_header_with_signatures_3,\n            ],\n        };\n\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        // Assert only if correct eras are selected, since the\n        // `should_return_era_validator_weights_for_correct_sync_leap` test already covers the\n        // actual weight validation.\n        let protocol_config = ProtocolConfig {\n            version,\n            global_state_update: None,\n            hard_reset: false,\n            activation_point: ActivationPoint::EraId(EraId::random(&mut rng)),\n        };\n\n        let actual_eras: BTreeSet<u64> = sync_leap\n            .era_validator_weights(fault_tolerance_fraction, &protocol_config)\n            .map(|era_validator_weights| era_validator_weights.era_id().into())\n            .collect();\n        let mut expected_eras: BTreeSet<u64> = BTreeSet::new();\n        // Expect genesis era id and its successor as well as the successors of the eras of\n        // non-genesis switch blocks.\n        expected_eras.extend([0, 1, 2, 3]);\n        assert_eq!(expected_eras, actual_eras);\n    }\n\n    fn make_three_switch_blocks_at_era_and_height_and_version(\n        rng: &mut TestRng,\n        (era_1, height_1, version_1): (u64, u64, ProtocolVersion),\n        (era_2, height_2, version_2): (u64, u64, ProtocolVersion),\n        (era_3, height_3, version_3): (u64, u64, ProtocolVersion),\n    ) -> (\n        BlockHeaderWithSignatures,\n        BlockHeaderWithSignatures,\n        BlockHeaderWithSignatures,\n    ) {\n        let chain_name_hash = ChainNameDigest::random(rng);\n        let signed_block_1 = TestBlockBuilder::new()\n            .height(height_1)\n            .era(era_1)\n            .protocol_version(version_1)\n            .switch_block(true)\n            .build_versioned(rng);\n        let signed_block_2 = TestBlockBuilder::new()\n            .height(height_2)\n            .era(era_2)\n            .protocol_version(version_2)\n            .switch_block(true)\n            .build_versioned(rng);\n        let signed_block_3 = TestBlockBuilder::new()\n            .height(height_3)\n            .era(era_3)\n            .protocol_version(version_3)\n            .switch_block(true)\n            .build_versioned(rng);\n\n        let block_header_with_signatures_1 = make_block_header_with_signatures_from_header(\n            &signed_block_1.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        let block_header_with_signatures_2 = make_block_header_with_signatures_from_header(\n            &signed_block_2.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        let block_header_with_signatures_3 = make_block_header_with_signatures_from_header(\n            &signed_block_3.clone_header(),\n            &[],\n            chain_name_hash,\n            false,\n        );\n        (\n            block_header_with_signatures_1,\n            block_header_with_signatures_2,\n            block_header_with_signatures_3,\n        )\n    }\n\n    #[test]\n    fn should_construct_proper_sync_leap_identifier() {\n        let mut rng = TestRng::new();\n\n        let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng));\n        assert!(!sync_leap_identifier.trusted_ancestor_only());\n\n        let sync_leap_identifier =\n            SyncLeapIdentifier::sync_to_historical(BlockHash::random(&mut rng));\n        assert!(sync_leap_identifier.trusted_ancestor_only());\n    }\n\n    // Describes a single item from the set of validators that will be used for switch blocks\n    // created by TestChainSpec.\n    pub(crate) struct ValidatorSpec {\n        pub(crate) secret_key: SecretKey,\n        pub(crate) public_key: PublicKey,\n        // If `None`, weight will be chosen randomly.\n        pub(crate) weight: Option<U512>,\n    }\n\n    // Utility struct that can be turned into an iterator that generates\n    // continuous and descending blocks (i.e. blocks that have consecutive height\n    // and parent hashes are correctly set). The height of the first block\n    // in a series is chosen randomly.\n    //\n    // Additionally, this struct allows to generate switch blocks at a specific location in the\n    // chain, for example: Setting `switch_block_indices` to [1, 3] and generating 5 blocks will\n    // cause the 2nd and 4th blocks to be switch blocks. Validators for all eras are filled from\n    // the `validators` parameter.\n    pub(crate) struct TestChainSpec<'a> {\n        block: BlockV2,\n        rng: &'a mut TestRng,\n        switch_block_indices: Option<Vec<u64>>,\n        upgrades_indices: Option<Vec<u64>>,\n        validators: &'a [ValidatorSpec],\n    }\n\n    impl<'a> TestChainSpec<'a> {\n        pub(crate) fn new(\n            test_rng: &'a mut TestRng,\n            switch_block_indices: Option<Vec<u64>>,\n            upgrades_indices: Option<Vec<u64>>,\n            validators: &'a [ValidatorSpec],\n        ) -> Self {\n            let block = TestBlockBuilder::new().build(test_rng);\n            Self {\n                block,\n                rng: test_rng,\n                switch_block_indices,\n                upgrades_indices,\n                validators,\n            }\n        }\n\n        pub(crate) fn iter(&mut self) -> TestBlockIterator {\n            let block_height = self.block.height();\n\n            const DEFAULT_VALIDATOR_WEIGHT: u64 = 100;\n\n            TestBlockIterator::new(\n                self.block.clone(),\n                self.rng,\n                self.switch_block_indices\n                    .clone()\n                    .map(|switch_block_indices| {\n                        switch_block_indices\n                            .iter()\n                            .map(|index| index + block_height)\n                            .collect()\n                    }),\n                self.upgrades_indices.clone().map(|upgrades_indices| {\n                    upgrades_indices\n                        .iter()\n                        .map(|index| index + block_height)\n                        .collect()\n                }),\n                self.validators\n                    .iter()\n                    .map(\n                        |ValidatorSpec {\n                             secret_key: _,\n                             public_key,\n                             weight,\n                         }| {\n                            (\n                                public_key.clone(),\n                                weight.unwrap_or(DEFAULT_VALIDATOR_WEIGHT.into()),\n                            )\n                        },\n                    )\n                    .collect(),\n            )\n        }\n    }\n\n    pub(crate) struct TestBlockIterator<'a> {\n        block: BlockV2,\n        protocol_version: ProtocolVersion,\n        rng: &'a mut TestRng,\n        switch_block_indices: Option<Vec<u64>>,\n        upgrades_indices: Option<Vec<u64>>,\n        validators: Vec<(PublicKey, U512)>,\n        next_validator_index: usize,\n    }\n\n    impl<'a> TestBlockIterator<'a> {\n        pub fn new(\n            block: BlockV2,\n            rng: &'a mut TestRng,\n            switch_block_indices: Option<Vec<u64>>,\n            upgrades_indices: Option<Vec<u64>>,\n            validators: Vec<(PublicKey, U512)>,\n        ) -> Self {\n            let protocol_version = block.protocol_version();\n            Self {\n                block,\n                protocol_version,\n                rng,\n                switch_block_indices,\n                upgrades_indices,\n                validators,\n                next_validator_index: 0,\n            }\n        }\n    }\n\n    impl Iterator for TestBlockIterator<'_> {\n        type Item = BlockV2;\n\n        fn next(&mut self) -> Option<Self::Item> {\n            let (is_successor_of_switch_block, is_upgrade, maybe_validators) = match &self\n                .switch_block_indices\n            {\n                Some(switch_block_heights)\n                    if switch_block_heights.contains(&self.block.height()) =>\n                {\n                    let prev_height = self.block.height().saturating_sub(1);\n                    let is_successor_of_switch_block = switch_block_heights.contains(&prev_height);\n                    let is_upgrade = is_successor_of_switch_block\n                        && self\n                            .upgrades_indices\n                            .as_ref()\n                            .is_some_and(|upgrades_indices| {\n                                upgrades_indices.contains(&prev_height)\n                            });\n                    (\n                        is_successor_of_switch_block,\n                        is_upgrade,\n                        Some(self.validators.clone()),\n                    )\n                }\n                Some(switch_block_heights) => {\n                    let prev_height = self.block.height().saturating_sub(1);\n                    let is_successor_of_switch_block = switch_block_heights.contains(&prev_height);\n                    let is_upgrade = is_successor_of_switch_block\n                        && self\n                            .upgrades_indices\n                            .as_ref()\n                            .is_some_and(|upgrades_indices| {\n                                upgrades_indices.contains(&prev_height)\n                            });\n                    (is_successor_of_switch_block, is_upgrade, None)\n                }\n                None => (false, false, None),\n            };\n\n            let maybe_validators = if let Some(validators) = maybe_validators {\n                let first_validator = validators.get(self.next_validator_index).unwrap();\n                let second_validator = validators.get(self.next_validator_index + 1).unwrap();\n\n                // Put two validators in each switch block.\n                let mut validators_for_block = BTreeMap::new();\n                validators_for_block.insert(first_validator.0.clone(), first_validator.1);\n                validators_for_block.insert(second_validator.0.clone(), second_validator.1);\n                self.next_validator_index += 2;\n\n                // If we're out of validators, do round robin on the provided list.\n                if self.next_validator_index >= self.validators.len() {\n                    self.next_validator_index = 0;\n                }\n                Some(validators_for_block)\n            } else {\n                None\n            };\n\n            if is_upgrade {\n                self.protocol_version = ProtocolVersion::from_parts(\n                    self.protocol_version.value().major,\n                    self.protocol_version.value().minor + 1,\n                    self.protocol_version.value().patch,\n                );\n            }\n\n            let gas_price: u8 = 1u8;\n\n            let era_end = maybe_validators.map(|validators| {\n                let rnd = EraEndV2::random(self.rng);\n                EraEndV2::new(\n                    Vec::from(rnd.equivocators()),\n                    Vec::from(rnd.inactive_validators()),\n                    validators,\n                    rnd.rewards().clone(),\n                    gas_price,\n                )\n            });\n            let next_block_era_id = if is_successor_of_switch_block {\n                self.block.era_id().successor()\n            } else {\n                self.block.era_id()\n            };\n            let count = self.rng.gen_range(0..6);\n            let mint_hashes =\n                iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng)))\n                    .take(count)\n                    .collect();\n            let count = self.rng.gen_range(0..6);\n            let auction_hashes =\n                iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng)))\n                    .take(count)\n                    .collect();\n            let count = self.rng.gen_range(0..6);\n            let install_upgrade_hashes =\n                iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng)))\n                    .take(count)\n                    .collect();\n            let count = self.rng.gen_range(0..6);\n            let standard_hashes =\n                iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng)))\n                    .take(count)\n                    .collect();\n\n            let transactions = {\n                let mut ret = BTreeMap::new();\n                ret.insert(MINT_LANE_ID, mint_hashes);\n                ret.insert(AUCTION_LANE_ID, auction_hashes);\n                ret.insert(INSTALL_UPGRADE_LANE_ID, install_upgrade_hashes);\n                ret.insert(3, standard_hashes);\n                ret\n            };\n\n            let next = BlockV2::new(\n                *self.block.hash(),\n                *self.block.accumulated_seed(),\n                *self.block.state_root_hash(),\n                self.rng.gen(),\n                era_end,\n                Timestamp::now(),\n                next_block_era_id,\n                self.block.height() + 1,\n                self.protocol_version,\n                PublicKey::random(self.rng),\n                transactions,\n                Default::default(),\n                gas_price,\n                Default::default(),\n            );\n\n            self.block = next.clone();\n            Some(next)\n        }\n    }\n\n    #[test]\n    fn should_create_valid_chain() {\n        let mut rng = TestRng::new();\n        let mut test_block = TestChainSpec::new(&mut rng, None, None, &[]);\n        let mut block_batch = test_block.iter().take(100);\n        let mut parent_block: BlockV2 = block_batch.next().unwrap();\n        for current_block in block_batch {\n            assert_eq!(\n                current_block.height(),\n                parent_block.height() + 1,\n                \"height should grow monotonically\"\n            );\n            assert_eq!(\n                current_block.parent_hash(),\n                parent_block.hash(),\n                \"block's parent should point at previous block\"\n            );\n            parent_block = current_block;\n        }\n    }\n\n    #[test]\n    fn should_create_switch_blocks() {\n        let switch_block_indices = vec![0, 10, 76];\n\n        let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair)\n            .take(2)\n            .map(|(secret_key, public_key)| ValidatorSpec {\n                secret_key,\n                public_key,\n                weight: None,\n            })\n            .collect();\n\n        let mut rng = TestRng::new();\n        let mut test_block = TestChainSpec::new(\n            &mut rng,\n            Some(switch_block_indices.clone()),\n            None,\n            &validators,\n        );\n        let block_batch: Vec<_> = test_block.iter().take(100).collect();\n\n        let base_height = block_batch.first().expect(\"should have block\").height();\n\n        for block in block_batch {\n            if switch_block_indices\n                .iter()\n                .map(|index| index + base_height)\n                .any(|index| index == block.height())\n            {\n                assert!(block.is_switch_block())\n            } else {\n                assert!(!block.is_switch_block())\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/sync_leap_validation_metadata.rs",
    "content": "use casper_types::{ActivationPoint, Chainspec, GlobalStateUpdate};\nuse datasize::DataSize;\nuse num_rational::Ratio;\nuse serde::Serialize;\n\n#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize)]\npub(crate) struct SyncLeapValidationMetaData {\n    pub(crate) recent_era_count: u64,\n    pub(crate) activation_point: ActivationPoint,\n    pub(crate) global_state_update: Option<GlobalStateUpdate>,\n    #[data_size(skip)]\n    pub(crate) finality_threshold_fraction: Ratio<u64>,\n}\n\nimpl SyncLeapValidationMetaData {\n    #[cfg(test)]\n    pub fn new(\n        recent_era_count: u64,\n        activation_point: ActivationPoint,\n        global_state_update: Option<GlobalStateUpdate>,\n        finality_threshold_fraction: Ratio<u64>,\n    ) -> Self {\n        Self {\n            recent_era_count,\n            activation_point,\n            global_state_update,\n            finality_threshold_fraction,\n        }\n    }\n\n    pub(crate) fn from_chainspec(chainspec: &Chainspec) -> Self {\n        Self {\n            recent_era_count: chainspec.core_config.recent_era_count(),\n            activation_point: chainspec.protocol_config.activation_point,\n            global_state_update: chainspec.protocol_config.global_state_update.clone(),\n            finality_threshold_fraction: chainspec.core_config.finality_threshold_fraction,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/arg_handling.rs",
    "content": "//! Collection of helper functions and structures to reason about amorphic RuntimeArgs.\nuse core::marker::PhantomData;\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::FromBytes,\n    system::auction::{DelegatorKind, Reservation, ARG_VALIDATOR},\n    CLType, CLTyped, CLValue, CLValueError, Chainspec, InvalidTransactionV1, PublicKey,\n    RuntimeArgs, TransactionArgs, URef, U512,\n};\n#[cfg(test)]\nuse casper_types::{bytesrepr::ToBytes, TransferTarget};\nuse tracing::debug;\n\nconst TRANSFER_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\nconst TRANSFER_ARG_SOURCE: OptionalArg<URef> = OptionalArg::new(\"source\");\nconst TRANSFER_ARG_TARGET: &str = \"target\";\n// \"id\" for legacy reasons, if the argument is passed it is [Option]\nconst TRANSFER_ARG_ID: OptionalArg<Option<u64>> = OptionalArg::new(\"id\");\n\nconst BURN_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\nconst BURN_ARG_SOURCE: OptionalArg<URef> = OptionalArg::new(\"source\");\n\nconst ADD_BID_ARG_PUBLIC_KEY: RequiredArg<PublicKey> = RequiredArg::new(\"public_key\");\nconst ADD_BID_ARG_DELEGATION_RATE: RequiredArg<u8> = RequiredArg::new(\"delegation_rate\");\nconst ADD_BID_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\nconst ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT: OptionalArg<u64> =\n    OptionalArg::new(\"minimum_delegation_amount\");\nconst ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT: OptionalArg<u64> =\n    OptionalArg::new(\"maximum_delegation_amount\");\nconst ADD_BID_ARG_RESERVED_SLOTS: OptionalArg<u32> = OptionalArg::new(\"reserved_slots\");\n\nconst WITHDRAW_BID_ARG_PUBLIC_KEY: RequiredArg<PublicKey> = RequiredArg::new(\"public_key\");\nconst WITHDRAW_BID_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst DELEGATE_ARG_DELEGATOR: RequiredArg<PublicKey> = RequiredArg::new(\"delegator\");\nconst DELEGATE_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst DELEGATE_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst UNDELEGATE_ARG_DELEGATOR: RequiredArg<PublicKey> = RequiredArg::new(\"delegator\");\nconst UNDELEGATE_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst UNDELEGATE_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst REDELEGATE_ARG_DELEGATOR: RequiredArg<PublicKey> = RequiredArg::new(\"delegator\");\nconst REDELEGATE_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst REDELEGATE_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\nconst REDELEGATE_ARG_NEW_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"new_validator\");\n\nconst ACTIVATE_BID_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(ARG_VALIDATOR);\n\nconst CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY: RequiredArg<PublicKey> = RequiredArg::new(\"public_key\");\nconst CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY: RequiredArg<PublicKey> =\n    RequiredArg::new(\"new_public_key\");\n\nconst ADD_RESERVATIONS_ARG_RESERVATIONS: RequiredArg<Vec<Reservation>> =\n    RequiredArg::new(\"reservations\");\n\nconst CANCEL_RESERVATIONS_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst CANCEL_RESERVATIONS_ARG_DELEGATORS: RequiredArg<Vec<DelegatorKind>> =\n    RequiredArg::new(\"delegators\");\n\nstruct RequiredArg<T> {\n    name: &'static str,\n    _phantom: PhantomData<T>,\n}\n\nimpl<T> RequiredArg<T> {\n    const fn new(name: &'static str) -> Self {\n        Self {\n            name,\n            _phantom: PhantomData,\n        }\n    }\n\n    fn get(&self, args: &RuntimeArgs) -> Result<T, InvalidTransactionV1>\n    where\n        T: CLTyped + FromBytes,\n    {\n        let cl_value = args.get(self.name).ok_or_else(|| {\n            debug!(\"missing required runtime argument '{}'\", self.name);\n            InvalidTransactionV1::MissingArg {\n                arg_name: self.name.to_string(),\n            }\n        })?;\n        parse_cl_value(cl_value, self.name)\n    }\n\n    #[cfg(test)]\n    fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError>\n    where\n        T: CLTyped + ToBytes,\n    {\n        args.insert(self.name, value)\n    }\n}\n\nstruct OptionalArg<T> {\n    name: &'static str,\n    _phantom: PhantomData<T>,\n}\n\nimpl<T> OptionalArg<T> {\n    const fn new(name: &'static str) -> Self {\n        Self {\n            name,\n            _phantom: PhantomData,\n        }\n    }\n\n    fn get(&self, args: &RuntimeArgs) -> Result<Option<T>, InvalidTransactionV1>\n    where\n        T: CLTyped + FromBytes,\n    {\n        let cl_value = match args.get(self.name) {\n            Some(value) => value,\n            None => return Ok(None),\n        };\n        let value = parse_cl_value::<T>(cl_value, self.name)?;\n        Ok(Some(value))\n    }\n\n    #[cfg(test)]\n    fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError>\n    where\n        T: CLTyped + ToBytes,\n    {\n        args.insert(self.name, value)\n    }\n}\n\nfn parse_cl_value<T: CLTyped + FromBytes>(\n    cl_value: &CLValue,\n    arg_name: &str,\n) -> Result<T, InvalidTransactionV1> {\n    cl_value.to_t::<T>().map_err(|error| {\n        let error = match error {\n            CLValueError::Serialization(error) => InvalidTransactionV1::InvalidArg {\n                arg_name: arg_name.to_string(),\n                error,\n            },\n            CLValueError::Type(_) => InvalidTransactionV1::unexpected_arg_type(\n                arg_name.to_string(),\n                vec![T::cl_type()],\n                cl_value.cl_type().clone(),\n            ),\n        };\n        debug!(\"{error}\");\n        error\n    })\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a transfer transaction.\n#[cfg(test)]\npub fn new_transfer_args<A: Into<U512>, T: Into<TransferTarget>>(\n    amount: A,\n    maybe_source: Option<URef>,\n    target: T,\n    maybe_id: Option<u64>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    if let Some(source) = maybe_source {\n        TRANSFER_ARG_SOURCE.insert(&mut args, source)?;\n    }\n    match target.into() {\n        TransferTarget::PublicKey(public_key) => args.insert(TRANSFER_ARG_TARGET, public_key)?,\n        TransferTarget::AccountHash(account_hash) => {\n            args.insert(TRANSFER_ARG_TARGET, account_hash)?\n        }\n        TransferTarget::URef(uref) => args.insert(TRANSFER_ARG_TARGET, uref)?,\n    }\n    TRANSFER_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    if maybe_id.is_some() {\n        TRANSFER_ARG_ID.insert(&mut args, maybe_id)?;\n    }\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in a transfer transaction.\npub fn has_valid_transfer_args(\n    args: &TransactionArgs,\n    native_transfer_minimum_motes: u64,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n\n    let amount = TRANSFER_ARG_AMOUNT.get(args)?;\n    if amount < U512::from(native_transfer_minimum_motes) {\n        debug!(\n            minimum = %native_transfer_minimum_motes,\n            %amount,\n            \"insufficient transfer amount\"\n        );\n        return Err(InvalidTransactionV1::InsufficientTransferAmount {\n            minimum: native_transfer_minimum_motes,\n            attempted: amount,\n        });\n    }\n    let _source = TRANSFER_ARG_SOURCE.get(args)?;\n\n    let target_cl_value = args.get(TRANSFER_ARG_TARGET).ok_or_else(|| {\n        debug!(\"missing required runtime argument '{TRANSFER_ARG_TARGET}'\");\n        InvalidTransactionV1::MissingArg {\n            arg_name: TRANSFER_ARG_TARGET.to_string(),\n        }\n    })?;\n    match target_cl_value.cl_type() {\n        CLType::PublicKey => {\n            let _ = parse_cl_value::<PublicKey>(target_cl_value, TRANSFER_ARG_TARGET);\n        }\n        CLType::ByteArray(32) => {\n            let _ = parse_cl_value::<AccountHash>(target_cl_value, TRANSFER_ARG_TARGET);\n        }\n        CLType::URef => {\n            let _ = parse_cl_value::<URef>(target_cl_value, TRANSFER_ARG_TARGET);\n        }\n        _ => {\n            debug!(\n                \"expected runtime argument '{TRANSFER_ARG_TARGET}' to be of type {}, {} or {},\n                but is {}\",\n                CLType::PublicKey,\n                CLType::ByteArray(32),\n                CLType::URef,\n                target_cl_value.cl_type()\n            );\n            return Err(InvalidTransactionV1::unexpected_arg_type(\n                TRANSFER_ARG_TARGET.to_string(),\n                vec![CLType::PublicKey, CLType::ByteArray(32), CLType::URef],\n                target_cl_value.cl_type().clone(),\n            ));\n        }\n    }\n\n    let _maybe_id = TRANSFER_ARG_ID.get(args)?;\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a burn transaction.\n#[cfg(test)]\npub fn new_burn_args<A: Into<U512>>(\n    amount: A,\n    maybe_source: Option<URef>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    if let Some(source) = maybe_source {\n        BURN_ARG_SOURCE.insert(&mut args, source)?;\n    }\n    BURN_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in a burn transaction.\npub fn has_valid_burn_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> {\n    let native_burn_minimum_motes = 1;\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n\n    let amount = BURN_ARG_AMOUNT.get(args)?;\n    if amount < U512::from(native_burn_minimum_motes) {\n        debug!(\n            minimum = %native_burn_minimum_motes,\n            %amount,\n            \"insufficient burn amount\"\n        );\n        return Err(InvalidTransactionV1::InsufficientBurnAmount {\n            minimum: native_burn_minimum_motes,\n            attempted: amount,\n        });\n    }\n    let _source = BURN_ARG_SOURCE.get(args)?;\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in an add_bid transaction.\n#[cfg(test)]\npub fn new_add_bid_args<A: Into<U512>>(\n    public_key: PublicKey,\n    delegation_rate: u8,\n    amount: A,\n    maybe_minimum_delegation_amount: Option<u64>,\n    maybe_maximum_delegation_amount: Option<u64>,\n    maybe_reserved_slots: Option<u32>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    ADD_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?;\n    ADD_BID_ARG_DELEGATION_RATE.insert(&mut args, delegation_rate)?;\n    ADD_BID_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    if let Some(minimum_delegation_amount) = maybe_minimum_delegation_amount {\n        ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT.insert(&mut args, minimum_delegation_amount)?;\n    };\n    if let Some(maximum_delegation_amount) = maybe_maximum_delegation_amount {\n        ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT.insert(&mut args, maximum_delegation_amount)?;\n    };\n    if let Some(reserved_slots) = maybe_reserved_slots {\n        ADD_BID_ARG_RESERVED_SLOTS.insert(&mut args, reserved_slots)?;\n    };\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in an add_bid transaction.\npub fn has_valid_add_bid_args(\n    chainspec: &Chainspec,\n    args: &TransactionArgs,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _public_key = ADD_BID_ARG_PUBLIC_KEY.get(args)?;\n    let _delegation_rate = ADD_BID_ARG_DELEGATION_RATE.get(args)?;\n    let amount = ADD_BID_ARG_AMOUNT.get(args)?;\n    if amount.is_zero() {\n        return Err(InvalidTransactionV1::InsufficientAmount { attempted: amount });\n    }\n    let minimum_delegation_amount = ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT.get(args)?;\n    if let Some(attempted) = minimum_delegation_amount {\n        let floor = chainspec.core_config.minimum_delegation_amount;\n        if attempted < floor {\n            return Err(InvalidTransactionV1::InvalidMinimumDelegationAmount { floor, attempted });\n        }\n    }\n    let maximum_delegation_amount = ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT.get(args)?;\n    if let Some(attempted) = maximum_delegation_amount {\n        let ceiling = chainspec.core_config.maximum_delegation_amount;\n        if attempted > ceiling {\n            return Err(InvalidTransactionV1::InvalidMaximumDelegationAmount {\n                ceiling,\n                attempted,\n            });\n        }\n    }\n    let reserved_slots = ADD_BID_ARG_RESERVED_SLOTS.get(args)?;\n    if let Some(attempted) = reserved_slots {\n        let ceiling = chainspec.core_config.max_delegators_per_validator;\n        if attempted > ceiling {\n            return Err(InvalidTransactionV1::InvalidReservedSlots {\n                ceiling,\n                attempted: attempted as u64,\n            });\n        }\n    }\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a withdraw_bid transaction.\n#[cfg(test)]\npub fn new_withdraw_bid_args<A: Into<U512>>(\n    public_key: PublicKey,\n    amount: A,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    WITHDRAW_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?;\n    WITHDRAW_BID_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in a withdraw_bid transaction.\npub fn has_valid_withdraw_bid_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _public_key = WITHDRAW_BID_ARG_PUBLIC_KEY.get(args)?;\n    let _amount = WITHDRAW_BID_ARG_AMOUNT.get(args)?;\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a delegate transaction.\n#[cfg(test)]\npub fn new_delegate_args<A: Into<U512>>(\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: A,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    DELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?;\n    DELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?;\n    DELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in a delegate transaction.\npub fn has_valid_delegate_args(\n    chainspec: &Chainspec,\n    args: &TransactionArgs,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _delegator = DELEGATE_ARG_DELEGATOR.get(args)?;\n    let _validator = DELEGATE_ARG_VALIDATOR.get(args)?;\n    let amount = DELEGATE_ARG_AMOUNT.get(args)?;\n    // We don't check for minimum since this could be a second delegation\n    let maximum_delegation_amount = chainspec.core_config.maximum_delegation_amount;\n    if amount > maximum_delegation_amount.into() {\n        return Err(InvalidTransactionV1::InvalidDelegationAmount {\n            ceiling: maximum_delegation_amount,\n            attempted: amount,\n        });\n    }\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in an undelegate transaction.\n#[cfg(test)]\npub fn new_undelegate_args<A: Into<U512>>(\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: A,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    UNDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?;\n    UNDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?;\n    UNDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in an undelegate transaction.\npub fn has_valid_undelegate_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _delegator = UNDELEGATE_ARG_DELEGATOR.get(args)?;\n    let _validator = UNDELEGATE_ARG_VALIDATOR.get(args)?;\n    let _amount = UNDELEGATE_ARG_AMOUNT.get(args)?;\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a redelegate transaction.\n#[cfg(test)]\npub fn new_redelegate_args<A: Into<U512>>(\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: A,\n    new_validator: PublicKey,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    REDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?;\n    REDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?;\n    REDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    REDELEGATE_ARG_NEW_VALIDATOR.insert(&mut args, new_validator)?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in a redelegate transaction.\npub fn has_valid_redelegate_args(\n    chainspec: &Chainspec,\n    args: &TransactionArgs,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _delegator = REDELEGATE_ARG_DELEGATOR.get(args)?;\n    let _validator = REDELEGATE_ARG_VALIDATOR.get(args)?;\n    let _new_validator = REDELEGATE_ARG_NEW_VALIDATOR.get(args)?;\n    let amount = REDELEGATE_ARG_AMOUNT.get(args)?;\n    // We don't check for minimum since this could be a second delegation\n    let maximum_delegation_amount = chainspec.core_config.maximum_delegation_amount;\n    if amount > maximum_delegation_amount.into() {\n        return Err(InvalidTransactionV1::InvalidDelegationAmount {\n            attempted: amount,\n            ceiling: maximum_delegation_amount,\n        });\n    }\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a delegate transaction.\n#[cfg(test)]\npub fn new_activate_bid_args(validator: PublicKey) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    ACTIVATE_BID_ARG_VALIDATOR.insert(&mut args, validator)?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in an activate bid transaction.\npub fn has_valid_activate_bid_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _validator = ACTIVATE_BID_ARG_VALIDATOR.get(args)?;\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a change bid public key transaction.\n#[cfg(test)]\npub fn new_change_bid_public_key_args(\n    public_key: PublicKey,\n    new_public_key: PublicKey,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.insert(&mut args, public_key)?;\n    CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.insert(&mut args, new_public_key)?;\n    Ok(args)\n}\n\n/// Checks the given `RuntimeArgs` are suitable for use in a change bid public key transaction.\npub fn has_valid_change_bid_public_key_args(\n    args: &TransactionArgs,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _public_key = CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.get(args)?;\n    let _new_public_key = CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.get(args)?;\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in an add reservations transaction.\n#[cfg(test)]\npub fn new_add_reservations_args(\n    reservations: Vec<Reservation>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    ADD_RESERVATIONS_ARG_RESERVATIONS.insert(&mut args, reservations)?;\n    Ok(args)\n}\n\n/// Checks the given `TransactionArgs` are suitable for use in an add reservations transaction.\npub fn has_valid_add_reservations_args(\n    chainspec: &Chainspec,\n    args: &TransactionArgs,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let reservations = ADD_RESERVATIONS_ARG_RESERVATIONS.get(args)?;\n    let ceiling = chainspec.core_config.max_delegators_per_validator;\n    let attempted: u32 = reservations.len().try_into().map_err(|_| {\n        //This will only happen if reservations.len is bigger than u32\n        InvalidTransactionV1::InvalidReservedSlots {\n            ceiling,\n            attempted: reservations.len() as u64,\n        }\n    })?;\n    if attempted > ceiling {\n        return Err(InvalidTransactionV1::InvalidReservedSlots {\n            ceiling,\n            attempted: attempted as u64,\n        });\n    }\n    Ok(())\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a cancel reservations transaction.\n#[cfg(test)]\npub fn new_cancel_reservations_args(\n    validator: PublicKey,\n    delegators: Vec<DelegatorKind>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    CANCEL_RESERVATIONS_ARG_VALIDATOR.insert(&mut args, validator)?;\n    CANCEL_RESERVATIONS_ARG_DELEGATORS.insert(&mut args, delegators)?;\n    Ok(args)\n}\n\n/// Checks the given `TransactionArgs` are suitable for use in an add reservations transaction.\npub fn has_valid_cancel_reservations_args(\n    args: &TransactionArgs,\n) -> Result<(), InvalidTransactionV1> {\n    let args = args\n        .as_named()\n        .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?;\n    let _validator = CANCEL_RESERVATIONS_ARG_VALIDATOR.get(args)?;\n    let _delegators = CANCEL_RESERVATIONS_ARG_DELEGATORS.get(args)?;\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {\n    use core::ops::Range;\n\n    use super::*;\n    use casper_execution_engine::engine_state::engine_config::{\n        DEFAULT_MAXIMUM_DELEGATION_AMOUNT, DEFAULT_MINIMUM_DELEGATION_AMOUNT,\n    };\n    use casper_types::{runtime_args, testing::TestRng, CLType, TransactionArgs};\n    use rand::Rng;\n\n    #[test]\n    fn should_validate_transfer_args() {\n        let rng = &mut TestRng::new();\n        let min_motes = 10_u64;\n        // Check random args, PublicKey target, within motes limit.\n        let args = new_transfer_args(\n            U512::from(rng.gen_range(min_motes..=u64::MAX)),\n            rng.gen::<bool>().then(|| rng.gen()),\n            PublicKey::random(rng),\n            rng.gen::<bool>().then(|| rng.gen()),\n        )\n        .unwrap();\n        has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap();\n\n        // Check random args, AccountHash target, within motes limit.\n        let args = new_transfer_args(\n            U512::from(rng.gen_range(min_motes..=u64::MAX)),\n            rng.gen::<bool>().then(|| rng.gen()),\n            rng.gen::<AccountHash>(),\n            rng.gen::<bool>().then(|| rng.gen()),\n        )\n        .unwrap();\n        has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap();\n\n        // Check random args, URef target, within motes limit.\n        let args = new_transfer_args(\n            U512::from(rng.gen_range(min_motes..=u64::MAX)),\n            rng.gen::<bool>().then(|| rng.gen()),\n            rng.gen::<URef>(),\n            rng.gen::<bool>().then(|| rng.gen()),\n        )\n        .unwrap();\n        has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap();\n\n        // Check at minimum motes limit.\n        let args = new_transfer_args(\n            U512::from(min_motes),\n            rng.gen::<bool>().then(|| rng.gen()),\n            PublicKey::random(rng),\n            rng.gen::<bool>().then(|| rng.gen()),\n        )\n        .unwrap();\n        has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap();\n\n        // Check with extra arg.\n        let mut args = new_transfer_args(\n            U512::from(min_motes),\n            rng.gen::<bool>().then(|| rng.gen()),\n            PublicKey::random(rng),\n            rng.gen::<bool>().then(|| rng.gen()),\n        )\n        .unwrap();\n        args.insert(\"a\", 1).unwrap();\n        has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap();\n    }\n\n    #[test]\n    fn transfer_args_with_low_amount_should_be_invalid() {\n        let rng = &mut TestRng::new();\n        let min_motes = 10_u64;\n\n        let args = runtime_args! {\n            TRANSFER_ARG_AMOUNT.name => U512::from(min_motes - 1),\n            TRANSFER_ARG_TARGET => PublicKey::random(rng)\n        };\n\n        let expected_error = InvalidTransactionV1::InsufficientTransferAmount {\n            minimum: min_motes,\n            attempted: U512::from(min_motes - 1),\n        };\n\n        assert_eq!(\n            has_valid_transfer_args(&TransactionArgs::Named(args), min_motes),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn transfer_args_with_missing_required_should_be_invalid() {\n        let rng = &mut TestRng::new();\n        let min_motes = 10_u64;\n\n        // Missing \"target\".\n        let args = runtime_args! {\n            TRANSFER_ARG_AMOUNT.name => U512::from(min_motes),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: TRANSFER_ARG_TARGET.to_string(),\n        };\n        assert_eq!(\n            has_valid_transfer_args(&TransactionArgs::Named(args), min_motes),\n            Err(expected_error)\n        );\n\n        // Missing \"amount\".\n        let args = runtime_args! {\n            TRANSFER_ARG_TARGET => PublicKey::random(rng)\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: TRANSFER_ARG_AMOUNT.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_transfer_args(&TransactionArgs::Named(args), min_motes),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn transfer_args_with_wrong_type_should_be_invalid() {\n        let rng = &mut TestRng::new();\n        let min_motes = 10_u64;\n\n        // Wrong \"target\" type (a required arg).\n        let args = runtime_args! {\n            TRANSFER_ARG_AMOUNT.name => U512::from(min_motes),\n            TRANSFER_ARG_TARGET => \"wrong\"\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            TRANSFER_ARG_TARGET.to_string(),\n            vec![CLType::PublicKey, CLType::ByteArray(32), CLType::URef],\n            CLType::String,\n        );\n        assert_eq!(\n            has_valid_transfer_args(&TransactionArgs::Named(args), min_motes),\n            Err(expected_error)\n        );\n\n        // Wrong \"source\" type (an optional arg).\n        let args = runtime_args! {\n            TRANSFER_ARG_AMOUNT.name => U512::from(min_motes),\n            TRANSFER_ARG_SOURCE.name => 1_u8,\n            TRANSFER_ARG_TARGET => PublicKey::random(rng)\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            TRANSFER_ARG_SOURCE.name.to_string(),\n            vec![URef::cl_type()],\n            CLType::U8,\n        );\n        assert_eq!(\n            has_valid_transfer_args(&TransactionArgs::Named(args), min_motes),\n            Err(expected_error)\n        );\n    }\n    #[cfg(test)]\n    fn check_add_bid_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> {\n        has_valid_add_bid_args(&Chainspec::default(), args)\n    }\n\n    #[test]\n    fn should_validate_add_bid_args() {\n        let rng = &mut TestRng::new();\n        let floor = DEFAULT_MINIMUM_DELEGATION_AMOUNT;\n        let ceiling = DEFAULT_MAXIMUM_DELEGATION_AMOUNT;\n        let reserved_max = 1200; // there doesn't seem to be a const for this?\n        let minimum_delegation_amount = rng.gen::<bool>().then(|| rng.gen_range(floor..floor * 2));\n        let maximum_delegation_amount = rng.gen::<bool>().then(|| rng.gen_range(floor..ceiling));\n        let reserved_slots = rng.gen::<bool>().then(|| rng.gen_range(0..reserved_max));\n\n        // Check random args.\n        let mut args = new_add_bid_args(\n            PublicKey::random(rng),\n            rng.gen(),\n            rng.gen::<u64>(),\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            reserved_slots,\n        )\n        .unwrap();\n        check_add_bid_args(&TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        check_add_bid_args(&TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn add_bid_args_with_missing_required_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Missing \"public_key\".\n        let args = runtime_args! {\n            ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::<u8>(),\n            ADD_BID_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: ADD_BID_ARG_PUBLIC_KEY.name.to_string(),\n        };\n        assert_eq!(\n            check_add_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"delegation_rate\".\n        let args = runtime_args! {\n            ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n            ADD_BID_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: ADD_BID_ARG_DELEGATION_RATE.name.to_string(),\n        };\n        assert_eq!(\n            check_add_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"amount\".\n        let args = runtime_args! {\n            ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n            ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::<u8>()\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: ADD_BID_ARG_AMOUNT.name.to_string(),\n        };\n        assert_eq!(\n            check_add_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn add_bid_args_with_wrong_type_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Wrong \"amount\" type.\n        let args = runtime_args! {\n            ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n            ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::<u8>(),\n            ADD_BID_ARG_AMOUNT.name => rng.gen::<u64>()\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            ADD_BID_ARG_AMOUNT.name.to_string(),\n            vec![CLType::U512],\n            CLType::U64,\n        );\n        assert_eq!(\n            check_add_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_withdraw_bid_args() {\n        let rng = &mut TestRng::new();\n\n        // Check random args.\n        let mut args = new_withdraw_bid_args(PublicKey::random(rng), rng.gen::<u64>()).unwrap();\n        has_valid_withdraw_bid_args(&TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_withdraw_bid_args(&TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn withdraw_bid_args_with_missing_required_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Missing \"public_key\".\n        let args = runtime_args! {\n            WITHDRAW_BID_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: WITHDRAW_BID_ARG_PUBLIC_KEY.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_withdraw_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"amount\".\n        let args = runtime_args! {\n            WITHDRAW_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: WITHDRAW_BID_ARG_AMOUNT.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_withdraw_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn withdraw_bid_args_with_wrong_type_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Wrong \"amount\" type.\n        let args = runtime_args! {\n            WITHDRAW_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n            WITHDRAW_BID_ARG_AMOUNT.name => rng.gen::<u64>()\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            WITHDRAW_BID_ARG_AMOUNT.name.to_string(),\n            vec![CLType::U512],\n            CLType::U64,\n        );\n        assert_eq!(\n            has_valid_withdraw_bid_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_delegate_args() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Check random args.\n        let mut args = new_delegate_args(\n            PublicKey::random(rng),\n            PublicKey::random(rng),\n            rng.gen_range(0_u64..1_000_000_000_000_000_000_u64),\n        )\n        .unwrap();\n        has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn delegate_args_with_too_big_amount_should_fail() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Check random args.\n        let args = new_delegate_args(\n            PublicKey::random(rng),\n            PublicKey::random(rng),\n            1_000_000_000_000_000_001_u64,\n        )\n        .unwrap();\n        let expected_error = InvalidTransactionV1::InvalidDelegationAmount {\n            ceiling: 1_000_000_000_000_000_000_u64,\n            attempted: 1_000_000_000_000_000_001_u64.into(),\n        };\n        assert_eq!(\n            has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn delegate_args_with_missing_required_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Missing \"delegator\".\n        let args = runtime_args! {\n            DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            DELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: DELEGATE_ARG_DELEGATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"validator\".\n        let args = runtime_args! {\n            DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            DELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: DELEGATE_ARG_VALIDATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"amount\".\n        let args = runtime_args! {\n            DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: DELEGATE_ARG_AMOUNT.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn delegate_args_with_wrong_type_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Wrong \"amount\" type.\n        let args = runtime_args! {\n            DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            DELEGATE_ARG_AMOUNT.name => rng.gen::<u64>()\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            DELEGATE_ARG_AMOUNT.name.to_string(),\n            vec![CLType::U512],\n            CLType::U64,\n        );\n        assert_eq!(\n            has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_undelegate_args() {\n        let rng = &mut TestRng::new();\n\n        // Check random args.\n        let mut args = new_undelegate_args(\n            PublicKey::random(rng),\n            PublicKey::random(rng),\n            rng.gen::<u64>(),\n        )\n        .unwrap();\n        has_valid_undelegate_args(&TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_undelegate_args(&TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn undelegate_args_with_missing_required_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Missing \"delegator\".\n        let args = runtime_args! {\n            UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            UNDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: UNDELEGATE_ARG_DELEGATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_undelegate_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"validator\".\n        let args = runtime_args! {\n            UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            UNDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::<u64>())\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: UNDELEGATE_ARG_VALIDATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_undelegate_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"amount\".\n        let args = runtime_args! {\n            UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: UNDELEGATE_ARG_AMOUNT.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_undelegate_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn undelegate_args_with_wrong_type_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Wrong \"amount\" type.\n        let args = runtime_args! {\n            UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            UNDELEGATE_ARG_AMOUNT.name => rng.gen::<u64>()\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            UNDELEGATE_ARG_AMOUNT.name.to_string(),\n            vec![CLType::U512],\n            CLType::U64,\n        );\n        assert_eq!(\n            has_valid_undelegate_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_redelegate_args() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Check random args.\n        let mut args = new_redelegate_args(\n            PublicKey::random(rng),\n            PublicKey::random(rng),\n            rng.gen_range(0_u64..1_000_000_000_000_000_000_u64),\n            PublicKey::random(rng),\n        )\n        .unwrap();\n        has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn redelegate_args_with_too_much_amount_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n        let args = new_redelegate_args(\n            PublicKey::random(rng),\n            PublicKey::random(rng),\n            1_000_000_000_000_000_001_u64,\n            PublicKey::random(rng),\n        )\n        .unwrap();\n        let expected_error = InvalidTransactionV1::InvalidDelegationAmount {\n            ceiling: 1_000_000_000_000_000_000_u64,\n            attempted: 1_000_000_000_000_000_001_u64.into(),\n        };\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn redelegate_args_with_missing_required_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Missing \"delegator\".\n        let args = runtime_args! {\n            REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen_range(0_u64..1_000_000_000_000_000_000_u64)),\n            REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: REDELEGATE_ARG_DELEGATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"validator\".\n        let args = runtime_args! {\n            REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen_range(0_u64..1_000_000_000_000_000_000_u64),),\n            REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: REDELEGATE_ARG_VALIDATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"amount\".\n        let args = runtime_args! {\n            REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: REDELEGATE_ARG_AMOUNT.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"new_validator\".\n        let args = runtime_args! {\n            REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::<u64>()),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: REDELEGATE_ARG_NEW_VALIDATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn redelegate_args_with_wrong_type_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Wrong \"amount\" type.\n        let args = runtime_args! {\n            REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng),\n            REDELEGATE_ARG_AMOUNT.name => rng.gen_range(0_u64..1_000_000_000_000_000_000_u64),\n            REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            REDELEGATE_ARG_AMOUNT.name.to_string(),\n            vec![CLType::U512],\n            CLType::U64,\n        );\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_change_bid_public_key_args() {\n        let rng = &mut TestRng::new();\n\n        // Check random args.\n        let mut args =\n            new_change_bid_public_key_args(PublicKey::random(rng), PublicKey::random(rng)).unwrap();\n        has_valid_change_bid_public_key_args(&TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn change_bid_public_key_args_with_missing_required_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Missing \"public_key\".\n        let args = runtime_args! {\n            CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"new_public_key\".\n        let args = runtime_args! {\n            CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn change_bid_public_key_args_with_wrong_type_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Wrong \"public_key\" type.\n        let args = runtime_args! {\n            CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name => rng.gen::<u8>(),\n            CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name.to_string(),\n            vec![CLType::PublicKey],\n            CLType::U8,\n        );\n        assert_eq!(\n            has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Wrong \"new_public_key\" type.\n        let args = runtime_args! {\n            CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name => PublicKey::random(rng),\n            CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name => rng.gen::<u8>(),\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name.to_string(),\n            vec![CLType::PublicKey],\n            CLType::U8,\n        );\n        assert_eq!(\n            has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_add_reservations_args() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        let reservations = rng.random_vec(1..100);\n\n        // Check random args.\n        let mut args = new_add_reservations_args(reservations).unwrap();\n        has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn add_reservations_args_with_too_many_reservations_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n        // local chainspec allows 1200 delegators to a validator\n        let reservations = rng.random_vec(1201..=1201);\n        let args = new_add_reservations_args(reservations).unwrap();\n\n        let expected_error = InvalidTransactionV1::InvalidReservedSlots {\n            ceiling: 1200,\n            attempted: 1201,\n        };\n        assert_eq!(\n            has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn add_reservations_args_with_missing_required_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        // Missing \"reservations\".\n        let args = runtime_args! {};\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: ADD_RESERVATIONS_ARG_RESERVATIONS.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn add_reservations_args_with_wrong_type_should_be_invalid() {\n        let chainspec = Chainspec::default();\n        let rng = &mut TestRng::new();\n\n        // Wrong \"reservations\" type.\n        let args = runtime_args! {\n            ADD_RESERVATIONS_ARG_RESERVATIONS.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            ADD_RESERVATIONS_ARG_RESERVATIONS.name.to_string(),\n            vec![CLType::List(Box::new(CLType::Any))],\n            CLType::PublicKey,\n        );\n        assert_eq!(\n            has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn should_validate_cancel_reservations_args() {\n        let rng = &mut TestRng::new();\n\n        let validator = PublicKey::random(rng);\n        let delegators = rng.random_vec(0..100);\n\n        // Check random args.\n        let mut args = new_cancel_reservations_args(validator, delegators).unwrap();\n        has_valid_cancel_reservations_args(&TransactionArgs::Named(args.clone())).unwrap();\n\n        // Check with extra arg.\n        args.insert(\"a\", 1).unwrap();\n        has_valid_cancel_reservations_args(&TransactionArgs::Named(args)).unwrap();\n    }\n\n    #[test]\n    fn cancel_reservations_args_with_missing_required_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Missing \"validator\".\n        let args = runtime_args! {\n            CANCEL_RESERVATIONS_ARG_DELEGATORS.name  => rng.random_vec::<Range<usize>, DelegatorKind>(0..100),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: CANCEL_RESERVATIONS_ARG_VALIDATOR.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_cancel_reservations_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Missing \"delegators\".\n        let args = runtime_args! {\n            CANCEL_RESERVATIONS_ARG_VALIDATOR.name => PublicKey::random(rng),\n        };\n        let expected_error = InvalidTransactionV1::MissingArg {\n            arg_name: CANCEL_RESERVATIONS_ARG_DELEGATORS.name.to_string(),\n        };\n        assert_eq!(\n            has_valid_cancel_reservations_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn cancel_reservations_args_with_wrong_type_should_be_invalid() {\n        let rng = &mut TestRng::new();\n\n        // Wrong \"validator\" type.\n        let args = runtime_args! {\n            CANCEL_RESERVATIONS_ARG_VALIDATOR.name => rng.random_vec::<Range<usize>, PublicKey>(0..100),\n            CANCEL_RESERVATIONS_ARG_DELEGATORS.name => rng.random_vec::<Range<usize>, DelegatorKind>(0..100),\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            CANCEL_RESERVATIONS_ARG_VALIDATOR.name.to_string(),\n            vec![CLType::PublicKey],\n            CLType::List(Box::new(CLType::PublicKey)),\n        );\n        assert_eq!(\n            has_valid_cancel_reservations_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n\n        // Wrong \"delegators\" type.\n        let args = runtime_args! {\n            CANCEL_RESERVATIONS_ARG_VALIDATOR.name => PublicKey::random(rng),\n            CANCEL_RESERVATIONS_ARG_DELEGATORS.name => rng.gen::<u8>(),\n        };\n        let expected_error = InvalidTransactionV1::unexpected_arg_type(\n            CANCEL_RESERVATIONS_ARG_DELEGATORS.name.to_string(),\n            vec![CLType::List(Box::new(CLType::Any))],\n            CLType::U8,\n        );\n        assert_eq!(\n            has_valid_cancel_reservations_args(&TransactionArgs::Named(args)),\n            Err(expected_error)\n        );\n    }\n\n    #[test]\n    fn native_calls_require_named_args() {\n        let chainspec = Chainspec::default();\n        let args = TransactionArgs::Bytesrepr(vec![b'a'; 100].into());\n        let expected_error = InvalidTransactionV1::ExpectedNamedArguments;\n        assert_eq!(\n            has_valid_transfer_args(&args, 0).as_ref(),\n            Err(&expected_error)\n        );\n        assert_eq!(check_add_bid_args(&args).as_ref(), Err(&expected_error));\n        assert_eq!(\n            has_valid_withdraw_bid_args(&args).as_ref(),\n            Err(&expected_error)\n        );\n        assert_eq!(\n            has_valid_delegate_args(&chainspec, &args).as_ref(),\n            Err(&expected_error)\n        );\n        assert_eq!(\n            has_valid_undelegate_args(&args).as_ref(),\n            Err(&expected_error)\n        );\n        assert_eq!(\n            has_valid_redelegate_args(&chainspec, &args).as_ref(),\n            Err(&expected_error)\n        );\n        assert_eq!(\n            has_valid_add_reservations_args(&chainspec, &args).as_ref(),\n            Err(&expected_error)\n        );\n        assert_eq!(\n            has_valid_cancel_reservations_args(&args).as_ref(),\n            Err(&expected_error)\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/deploy/legacy_deploy.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Deploy, DeployHash, InvalidDeploy, Transaction,\n};\n\nuse crate::components::fetcher::{EmptyValidationMetadata, FetchItem, Tag};\n\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, DataSize, Debug)]\npub(crate) struct LegacyDeploy(Deploy);\n\nimpl FetchItem for LegacyDeploy {\n    type Id = DeployHash;\n    type ValidationError = InvalidDeploy;\n    type ValidationMetadata = EmptyValidationMetadata;\n\n    const TAG: Tag = Tag::LegacyDeploy;\n\n    fn fetch_id(&self) -> Self::Id {\n        *self.0.hash()\n    }\n\n    fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> {\n        self.0.has_valid_hash()\n    }\n}\n\nimpl ToBytes for LegacyDeploy {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for LegacyDeploy {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Deploy::from_bytes(bytes).map(|(inner, remainder)| (LegacyDeploy(inner), remainder))\n    }\n}\n\nimpl From<LegacyDeploy> for Deploy {\n    fn from(legacy_deploy: LegacyDeploy) -> Self {\n        legacy_deploy.0\n    }\n}\n\nimpl From<LegacyDeploy> for Transaction {\n    fn from(legacy_deploy: LegacyDeploy) -> Self {\n        Self::Deploy(legacy_deploy.0)\n    }\n}\n\nimpl From<Deploy> for LegacyDeploy {\n    fn from(deploy: Deploy) -> Self {\n        Self(deploy)\n    }\n}\n\nimpl Display for LegacyDeploy {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"legacy-{}\", self.0)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = crate::new_rng();\n        let legacy_deploy = LegacyDeploy::from(Deploy::random(&mut rng));\n        bytesrepr::test_serialization_roundtrip(&legacy_deploy);\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n    use super::LegacyDeploy;\n\n    impl LargestSpecimen for LegacyDeploy {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            LegacyDeploy(LargestSpecimen::largest_specimen(estimator, cache))\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/deploy.rs",
    "content": "mod legacy_deploy;\n\npub(crate) use legacy_deploy::LegacyDeploy;\n"
  },
  {
    "path": "node/src/types/transaction/fields_container.rs",
    "content": "#[cfg(test)]\nuse super::arg_handling;\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    TransactionArgs, TransactionEntryPoint, TransactionScheduling, TransactionTarget,\n};\n#[cfg(test)]\nuse casper_types::{\n    testing::TestRng, PublicKey, RuntimeArgs, TransactionInvocationTarget,\n    TransactionRuntimeParams, TransferTarget, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID,\n    MINT_LANE_ID,\n};\n#[cfg(test)]\nuse rand::{Rng, RngCore};\nuse std::collections::BTreeMap;\n\npub(crate) const ARGS_MAP_KEY: u16 = 0;\npub(crate) const TARGET_MAP_KEY: u16 = 1;\npub(crate) const ENTRY_POINT_MAP_KEY: u16 = 2;\npub(crate) const SCHEDULING_MAP_KEY: u16 = 3;\n\n#[derive(Clone, Eq, PartialEq, Debug)]\npub(crate) enum FieldsContainerError {\n    CouldNotSerializeField { field_index: u16 },\n}\n\npub(crate) struct FieldsContainer {\n    pub(super) args: TransactionArgs,\n    pub(super) target: TransactionTarget,\n    pub(super) entry_point: TransactionEntryPoint,\n    pub(super) scheduling: TransactionScheduling,\n}\n\nimpl FieldsContainer {\n    pub(crate) fn new(\n        args: TransactionArgs,\n        target: TransactionTarget,\n        entry_point: TransactionEntryPoint,\n        scheduling: TransactionScheduling,\n    ) -> Self {\n        FieldsContainer {\n            args,\n            target,\n            entry_point,\n            scheduling,\n        }\n    }\n\n    pub(crate) fn to_map(&self) -> Result<BTreeMap<u16, Bytes>, FieldsContainerError> {\n        let mut map: BTreeMap<u16, Bytes> = BTreeMap::new();\n        map.insert(\n            ARGS_MAP_KEY,\n            self.args.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: ARGS_MAP_KEY,\n                }\n            })?,\n        );\n        map.insert(\n            TARGET_MAP_KEY,\n            self.target.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: TARGET_MAP_KEY,\n                }\n            })?,\n        );\n        map.insert(\n            ENTRY_POINT_MAP_KEY,\n            self.entry_point.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: ENTRY_POINT_MAP_KEY,\n                }\n            })?,\n        );\n        map.insert(\n            SCHEDULING_MAP_KEY,\n            self.scheduling.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: SCHEDULING_MAP_KEY,\n                }\n            })?,\n        );\n        Ok(map)\n    }\n\n    /// Returns a random `FieldsContainer`.\n    #[cfg(test)]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        use casper_types::URef;\n\n        match rng.gen_range(0..=12) {\n            0 => {\n                let amount = rng.gen_range(2_500_000_000..=u64::MAX);\n                let maybe_source: Option<URef> = rng.gen();\n                let target = TransferTarget::random(rng);\n                let maybe_id = rng.gen::<bool>().then(|| rng.gen());\n                let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id)\n                    .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Transfer,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            1 => {\n                let public_key = PublicKey::random(rng);\n                let delegation_rate = rng.gen();\n                let amount = rng.gen::<u64>();\n                let minimum_delegation_amount = rng.gen::<bool>().then(|| rng.gen());\n                let maximum_delegation_amount =\n                    minimum_delegation_amount.map(|minimum_delegation_amount| {\n                        minimum_delegation_amount + rng.gen::<u32>() as u64\n                    });\n                let reserved_slots = rng.gen::<bool>().then(|| rng.gen::<u32>());\n                let args = arg_handling::new_add_bid_args(\n                    public_key,\n                    delegation_rate,\n                    amount,\n                    minimum_delegation_amount,\n                    maximum_delegation_amount,\n                    reserved_slots,\n                )\n                .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::AddBid,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            2 => {\n                let public_key = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let args = arg_handling::new_withdraw_bid_args(public_key, amount).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::WithdrawBid,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            3 => {\n                let delegator = PublicKey::random(rng);\n                let validator = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let args = arg_handling::new_delegate_args(delegator, validator, amount).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Delegate,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            4 => {\n                let delegator = PublicKey::random(rng);\n                let validator = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let args = arg_handling::new_undelegate_args(delegator, validator, amount).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Undelegate,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            5 => {\n                let delegator = PublicKey::random(rng);\n                let validator = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let new_validator = PublicKey::random(rng);\n                let args =\n                    arg_handling::new_redelegate_args(delegator, validator, amount, new_validator)\n                        .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Redelegate,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            6 => Self::random_standard(rng),\n            7 => {\n                let mut buffer = vec![0u8; rng.gen_range(1..100)];\n                rng.fill_bytes(buffer.as_mut());\n                let is_install_upgrade = rng.gen();\n                let target = TransactionTarget::Session {\n                    is_install_upgrade,\n                    module_bytes: Bytes::from(buffer),\n                    runtime: TransactionRuntimeParams::VmCasperV1,\n                };\n                FieldsContainer::new(\n                    TransactionArgs::Named(RuntimeArgs::random(rng)),\n                    target,\n                    TransactionEntryPoint::Call,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            8 => {\n                let amount = rng.gen::<u64>();\n                let maybe_source: Option<URef> = rng.gen();\n                let args = arg_handling::new_burn_args(amount, maybe_source).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Burn,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            9 => {\n                let validator = PublicKey::random(rng);\n                let args = arg_handling::new_activate_bid_args(validator).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::ActivateBid,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            10 => {\n                let public_key = PublicKey::random(rng);\n                let new_public_key = PublicKey::random(rng);\n                let args = arg_handling::new_change_bid_public_key_args(public_key, new_public_key)\n                    .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::ChangeBidPublicKey,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            11 => {\n                let number = rng.gen_range(0..500);\n                let mut reservations = vec![];\n                for _ in 0..number {\n                    reservations.push(rng.gen());\n                }\n                let args = arg_handling::new_add_reservations_args(reservations).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::AddReservations,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            12 => {\n                let validator = PublicKey::random(rng);\n                let number = rng.gen_range(0..500);\n                let mut delegators = vec![];\n                for _ in 0..number {\n                    delegators.push(rng.gen());\n                }\n                let args =\n                    arg_handling::new_cancel_reservations_args(validator, delegators).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::CancelReservations,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            _ => unreachable!(),\n        }\n    }\n\n    /// Returns a random `FieldsContainer`.\n    #[cfg(test)]\n    pub fn random_of_lane(rng: &mut TestRng, lane_id: u8) -> Self {\n        match lane_id {\n            MINT_LANE_ID => Self::random_transfer(rng),\n            AUCTION_LANE_ID => Self::random_staking(rng),\n            INSTALL_UPGRADE_LANE_ID => Self::random_install_upgrade(rng),\n            _ => Self::random_standard(rng),\n        }\n    }\n\n    #[cfg(test)]\n    fn random_install_upgrade(rng: &mut TestRng) -> Self {\n        let target = TransactionTarget::Session {\n            module_bytes: Bytes::from(rng.random_vec(0..100)),\n            runtime: TransactionRuntimeParams::VmCasperV1,\n            is_install_upgrade: true,\n        };\n        FieldsContainer::new(\n            TransactionArgs::Named(RuntimeArgs::random(rng)),\n            target,\n            TransactionEntryPoint::Call,\n            TransactionScheduling::random(rng),\n        )\n    }\n\n    #[cfg(test)]\n    fn random_staking(rng: &mut TestRng) -> Self {\n        let public_key = PublicKey::random(rng);\n        let delegation_rate = rng.gen();\n        let amount = rng.gen::<u64>();\n        let minimum_delegation_amount = rng.gen::<bool>().then(|| rng.gen());\n        let maximum_delegation_amount = minimum_delegation_amount\n            .map(|minimum_delegation_amount| minimum_delegation_amount + rng.gen::<u32>() as u64);\n        let reserved_slots = rng.gen::<bool>().then(|| rng.gen::<u32>());\n        let args = arg_handling::new_add_bid_args(\n            public_key,\n            delegation_rate,\n            amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            reserved_slots,\n        )\n        .unwrap();\n        FieldsContainer::new(\n            TransactionArgs::Named(args),\n            TransactionTarget::Native,\n            TransactionEntryPoint::AddBid,\n            TransactionScheduling::random(rng),\n        )\n    }\n\n    #[cfg(test)]\n    fn random_transfer(rng: &mut TestRng) -> Self {\n        let amount = rng.gen_range(2_500_000_000..=u64::MAX);\n        let maybe_source = if rng.gen() { Some(rng.gen()) } else { None };\n        let target = TransferTarget::random(rng);\n        let maybe_id = rng.gen::<bool>().then(|| rng.gen());\n        let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id).unwrap();\n        FieldsContainer::new(\n            TransactionArgs::Named(args),\n            TransactionTarget::Native,\n            TransactionEntryPoint::Transfer,\n            TransactionScheduling::random(rng),\n        )\n    }\n\n    #[cfg(test)]\n    fn random_standard(rng: &mut TestRng) -> Self {\n        let target = TransactionTarget::Stored {\n            id: TransactionInvocationTarget::random(rng),\n            runtime: TransactionRuntimeParams::VmCasperV1,\n        };\n        FieldsContainer::new(\n            TransactionArgs::Named(RuntimeArgs::random(rng)),\n            target,\n            TransactionEntryPoint::Custom(rng.random_string(1..11)),\n            TransactionScheduling::random(rng),\n        )\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/initiator_addr_and_secret_key.rs",
    "content": "use casper_types::{InitiatorAddr, PublicKey, SecretKey};\n\n/// Used when constructing a deploy or transaction.\n#[derive(Debug)]\npub(crate) enum InitiatorAddrAndSecretKey<'a> {\n    /// Provides both the initiator address and the secret key (not necessarily for the same\n    /// initiator address) used to sign the deploy or transaction.\n    Both {\n        /// The initiator address of the account.\n        initiator_addr: InitiatorAddr,\n        /// The secret key used to sign the deploy or transaction.\n        secret_key: &'a SecretKey,\n    },\n    /// The initiator address only (no secret key).  The deploy or transaction will be created\n    /// unsigned.\n    #[allow(unused)]\n    InitiatorAddr(InitiatorAddr),\n    /// The initiator address will be derived from the provided secret key, and the deploy or\n    /// transaction will be signed by the same secret key.\n    #[allow(unused)]\n    SecretKey(&'a SecretKey),\n}\n\nimpl InitiatorAddrAndSecretKey<'_> {\n    /// The address of the initiator of a `TransactionV1`.\n    pub fn initiator_addr(&self) -> InitiatorAddr {\n        match self {\n            InitiatorAddrAndSecretKey::Both { initiator_addr, .. }\n            | InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) => initiator_addr.clone(),\n            InitiatorAddrAndSecretKey::SecretKey(secret_key) => {\n                InitiatorAddr::PublicKey(PublicKey::from(*secret_key))\n            }\n        }\n    }\n\n    /// The secret key of the initiator of a `TransactionV1`.\n    pub fn secret_key(&self) -> Option<&SecretKey> {\n        match self {\n            InitiatorAddrAndSecretKey::Both { secret_key, .. }\n            | InitiatorAddrAndSecretKey::SecretKey(secret_key) => Some(secret_key),\n            InitiatorAddrAndSecretKey::InitiatorAddr(_) => None,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/meta_transaction/meta_deploy.rs",
    "content": "use datasize::DataSize;\nuse once_cell::sync::OnceCell;\nuse serde::Serialize;\n\n#[cfg(test)]\nuse casper_types::TransactionLaneDefinition;\nuse casper_types::{\n    calculate_lane_id_for_deploy, Deploy, ExecutableDeployItem, InitiatorAddr, InvalidTransaction,\n    PricingHandling, TransactionV1Config,\n};\n#[derive(Clone, Debug, Serialize, DataSize)]\npub(crate) struct MetaDeploy {\n    deploy: Deploy,\n    //We need to keep this id here since we can fetch it only from chainspec.\n    lane_id: u8,\n    #[data_size(skip)]\n    #[serde(skip)]\n    initiator_addr: OnceCell<InitiatorAddr>,\n}\n\nimpl MetaDeploy {\n    pub(crate) fn from_deploy(\n        deploy: Deploy,\n        pricing_handling: PricingHandling,\n        config: &TransactionV1Config,\n    ) -> Result<Self, InvalidTransaction> {\n        let lane_id = calculate_lane_id_for_deploy(&deploy, pricing_handling, config)\n            .map_err(InvalidTransaction::Deploy)?;\n        let initiator_addr = OnceCell::new();\n        Ok(MetaDeploy {\n            deploy,\n            lane_id,\n            initiator_addr,\n        })\n    }\n\n    pub(crate) fn initiator_addr(&self) -> &InitiatorAddr {\n        self.initiator_addr\n            .get_or_init(|| InitiatorAddr::PublicKey(self.deploy.account().clone()))\n    }\n\n    pub(crate) fn lane_id(&self) -> u8 {\n        self.lane_id\n    }\n\n    pub(crate) fn session(&self) -> &ExecutableDeployItem {\n        self.deploy.session()\n    }\n\n    pub(crate) fn deploy(&self) -> &Deploy {\n        &self.deploy\n    }\n}\n\n#[cfg(test)]\npub(crate) fn calculate_lane_id_of_biggest_wasm(\n    wasm_lanes: &[TransactionLaneDefinition],\n) -> Option<u8> {\n    wasm_lanes\n        .iter()\n        .max_by(|left, right| {\n            left.max_transaction_length()\n                .cmp(&right.max_transaction_length())\n        })\n        .map(|definition| definition.id())\n}\n#[cfg(test)]\nmod tests {\n    use super::calculate_lane_id_of_biggest_wasm;\n    use casper_types::TransactionLaneDefinition;\n    #[test]\n    fn calculate_lane_id_of_biggest_wasm_should_return_none_on_empty() {\n        let wasms = vec![];\n        assert!(calculate_lane_id_of_biggest_wasm(&wasms).is_none());\n    }\n\n    #[test]\n    fn calculate_lane_id_of_biggest_wasm_should_return_biggest() {\n        let wasms = vec![\n            TransactionLaneDefinition::new(0, 1, 2, 3, 4),\n            TransactionLaneDefinition::new(1, 10, 2, 3, 4),\n        ];\n        assert_eq!(calculate_lane_id_of_biggest_wasm(&wasms), Some(1));\n        let wasms = vec![\n            TransactionLaneDefinition::new(0, 1, 2, 3, 4),\n            TransactionLaneDefinition::new(1, 10, 2, 3, 4),\n            TransactionLaneDefinition::new(2, 7, 2, 3, 4),\n        ];\n        assert_eq!(calculate_lane_id_of_biggest_wasm(&wasms), Some(1));\n\n        let wasms = vec![\n            TransactionLaneDefinition::new(0, 1, 2, 3, 4),\n            TransactionLaneDefinition::new(1, 10, 2, 3, 4),\n            TransactionLaneDefinition::new(2, 70, 2, 3, 4),\n        ];\n        assert_eq!(calculate_lane_id_of_biggest_wasm(&wasms), Some(2));\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/meta_transaction/meta_transaction_v1.rs",
    "content": "use crate::types::transaction::arg_handling;\nuse casper_types::{\n    bytesrepr::ToBytes, calculate_transaction_lane, crypto, Approval, Chainspec,\n    ContractRuntimeTag, Digest, DisplayIter, Gas, HashAddr, InitiatorAddr, InvalidTransaction,\n    InvalidTransactionV1, PricingHandling, PricingMode, TimeDiff, Timestamp, TransactionArgs,\n    TransactionConfig, TransactionEntryPoint, TransactionInvocationTarget,\n    TransactionRuntimeParams, TransactionScheduling, TransactionTarget, TransactionV1,\n    TransactionV1Config, TransactionV1ExcessiveSizeError, TransactionV1Hash, AUCTION_LANE_ID,\n    MINT_LANE_ID, U512,\n};\nuse core::fmt::{self, Debug, Display, Formatter};\nuse datasize::DataSize;\nuse once_cell::sync::OnceCell;\nuse serde::Serialize;\nuse std::collections::BTreeSet;\nuse tracing::debug;\n\nconst ARGS_MAP_KEY: u16 = 0;\nconst TARGET_MAP_KEY: u16 = 1;\nconst ENTRY_POINT_MAP_KEY: u16 = 2;\nconst SCHEDULING_MAP_KEY: u16 = 3;\nconst EXPECTED_NUMBER_OF_FIELDS: usize = 4;\n\n#[derive(Clone, Debug, Serialize, DataSize)]\npub(crate) struct MetaTransactionV1 {\n    hash: TransactionV1Hash,\n    chain_name: String,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n    pricing_mode: PricingMode,\n    initiator_addr: InitiatorAddr,\n    args: TransactionArgs,\n    target: TransactionTarget,\n    entry_point: TransactionEntryPoint,\n    lane_id: u8,\n    scheduling: TransactionScheduling,\n    approvals: BTreeSet<Approval>,\n    serialized_length: usize,\n    payload_hash: Digest,\n    has_valid_hash: Result<(), InvalidTransactionV1>,\n    #[serde(skip)]\n    #[data_size(skip)]\n    is_verified: OnceCell<Result<(), InvalidTransactionV1>>,\n}\n\nimpl MetaTransactionV1 {\n    pub(crate) fn from_transaction_v1(\n        v1: &TransactionV1,\n        transaction_v1_config: &TransactionV1Config,\n    ) -> Result<MetaTransactionV1, InvalidTransaction> {\n        let args_binary_len = v1\n            .payload()\n            .fields()\n            .get(&ARGS_MAP_KEY)\n            .map(|field| field.len())\n            .unwrap_or(0);\n        let args: TransactionArgs = v1.deserialize_field(ARGS_MAP_KEY).map_err(|error| {\n            InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error })\n        })?;\n        let target: TransactionTarget = v1.deserialize_field(TARGET_MAP_KEY).map_err(|error| {\n            InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error })\n        })?;\n        let entry_point: TransactionEntryPoint =\n            v1.deserialize_field(ENTRY_POINT_MAP_KEY).map_err(|error| {\n                InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error })\n            })?;\n        let scheduling: TransactionScheduling =\n            v1.deserialize_field(SCHEDULING_MAP_KEY).map_err(|error| {\n                InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error })\n            })?;\n\n        if v1.number_of_fields() != EXPECTED_NUMBER_OF_FIELDS {\n            return Err(InvalidTransaction::V1(\n                InvalidTransactionV1::UnexpectedTransactionFieldEntries,\n            ));\n        }\n\n        let payload_hash = v1.payload_hash()?;\n        let serialized_length = v1.serialized_length();\n        let pricing_mode = v1.payload().pricing_mode();\n        let lane_id = calculate_transaction_lane(\n            &entry_point,\n            &target,\n            pricing_mode,\n            transaction_v1_config,\n            serialized_length as u64,\n            args_binary_len as u64,\n        )?;\n        let has_valid_hash = v1.has_valid_hash();\n        let approvals = v1.approvals().clone();\n        Ok(MetaTransactionV1::new(\n            *v1.hash(),\n            v1.chain_name().to_string(),\n            v1.timestamp(),\n            v1.ttl(),\n            v1.pricing_mode().clone(),\n            v1.initiator_addr().clone(),\n            args,\n            target,\n            entry_point,\n            lane_id,\n            scheduling,\n            serialized_length,\n            payload_hash,\n            approvals,\n            has_valid_hash,\n        ))\n    }\n\n    fn is_native_mint(&self) -> bool {\n        self.lane_id == MINT_LANE_ID\n    }\n\n    fn is_native_auction(&self) -> bool {\n        self.lane_id == AUCTION_LANE_ID\n    }\n\n    pub(crate) fn is_v1_wasm(&self) -> bool {\n        match &self.target {\n            TransactionTarget::Native => false,\n            TransactionTarget::Stored {\n                runtime: stored_runtime,\n                ..\n            } => {\n                matches!(stored_runtime, TransactionRuntimeParams::VmCasperV1 { .. })\n                    && (!self.is_native_mint() && !self.is_native_auction())\n            }\n            TransactionTarget::Session {\n                runtime: session_runtime,\n                ..\n            } => {\n                matches!(session_runtime, TransactionRuntimeParams::VmCasperV1 { .. })\n                    && (!self.is_native_mint() && !self.is_native_auction())\n            }\n        }\n    }\n\n    pub(crate) fn is_v2_wasm(&self) -> bool {\n        match &self.target {\n            TransactionTarget::Native => false,\n            TransactionTarget::Stored {\n                runtime: stored_runtime,\n                ..\n            } => {\n                matches!(stored_runtime, TransactionRuntimeParams::VmCasperV2 { .. })\n                    && (!self.is_native_mint() && !self.is_native_auction())\n            }\n            TransactionTarget::Session {\n                runtime: session_runtime,\n                ..\n            } => {\n                matches!(session_runtime, TransactionRuntimeParams::VmCasperV2 { .. })\n                    && (!self.is_native_mint() && !self.is_native_auction())\n            }\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn new(\n        hash: TransactionV1Hash,\n        chain_name: String,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        pricing_mode: PricingMode,\n        initiator_addr: InitiatorAddr,\n        args: TransactionArgs,\n        target: TransactionTarget,\n        entry_point: TransactionEntryPoint,\n        lane_id: u8,\n        scheduling: TransactionScheduling,\n        serialized_length: usize,\n        payload_hash: Digest,\n        approvals: BTreeSet<Approval>,\n        has_valid_hash: Result<(), InvalidTransactionV1>,\n    ) -> Self {\n        Self {\n            hash,\n            chain_name,\n            timestamp,\n            ttl,\n            pricing_mode,\n            initiator_addr,\n            args,\n            target,\n            entry_point,\n            lane_id,\n            scheduling,\n            approvals,\n            serialized_length,\n            payload_hash,\n            has_valid_hash,\n            is_verified: OnceCell::new(),\n        }\n    }\n\n    /// Returns the runtime args of the transaction.\n    pub(crate) fn args(&self) -> &TransactionArgs {\n        &self.args\n    }\n\n    /// Returns the `DeployHash` identifying this `Deploy`.\n    pub(crate) fn hash(&self) -> &TransactionV1Hash {\n        &self.hash\n    }\n\n    /// Returns the `Approvals`.\n    pub(crate) fn approvals(&self) -> &BTreeSet<Approval> {\n        &self.approvals\n    }\n\n    /// Returns `Ok` if and only if:\n    ///   * the transaction hash is correct (see [`TransactionV1::has_valid_hash`] for details)\n    ///   * approvals are non-empty, and\n    ///   * all approvals are valid signatures of the signed hash\n    pub(crate) fn verify(&self) -> Result<(), InvalidTransactionV1> {\n        self.is_verified.get_or_init(|| self.do_verify()).clone()\n    }\n\n    /// Returns `Ok` if and only if this transaction's body hashes to the value of `body_hash()`,\n    /// and if this transaction's header hashes to the value claimed as the transaction hash.\n    pub(crate) fn has_valid_hash(&self) -> &Result<(), InvalidTransactionV1> {\n        &self.has_valid_hash\n    }\n\n    fn do_verify(&self) -> Result<(), InvalidTransactionV1> {\n        if self.approvals.is_empty() {\n            debug!(?self, \"transaction has no approvals\");\n            return Err(InvalidTransactionV1::EmptyApprovals);\n        }\n\n        self.has_valid_hash().clone()?;\n\n        for (index, approval) in self.approvals.iter().enumerate() {\n            if let Err(error) = crypto::verify(self.hash, approval.signature(), approval.signer()) {\n                debug!(\n                    ?self,\n                    \"failed to verify transaction approval {}: {}\", index, error\n                );\n                return Err(InvalidTransactionV1::InvalidApproval { index, error });\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Returns the entry point of the transaction.\n    pub(crate) fn entry_point(&self) -> &TransactionEntryPoint {\n        &self.entry_point\n    }\n\n    /// Returns the hash_addr and entry point name of a smart contract, if applicable.\n    pub(crate) fn contract_direct_address(&self) -> Option<(HashAddr, String)> {\n        let hash_addr = self.target().contract_hash_addr()?;\n        let entry_point = self.entry_point.custom_entry_point()?;\n        Some((hash_addr, entry_point))\n    }\n\n    /// Returns the transaction lane.\n    pub(crate) fn lane_id(&self) -> u8 {\n        self.lane_id\n    }\n\n    /// Returns payload hash of the transaction.\n    pub(crate) fn payload_hash(&self) -> &Digest {\n        &self.payload_hash\n    }\n\n    /// Returns the pricing mode for the transaction.\n    pub(crate) fn pricing_mode(&self) -> &PricingMode {\n        &self.pricing_mode\n    }\n\n    /// Returns the initiator_addr of the transaction.\n    pub(crate) fn initiator_addr(&self) -> &InitiatorAddr {\n        &self.initiator_addr\n    }\n\n    /// Returns the target of the transaction.\n    pub(crate) fn target(&self) -> &TransactionTarget {\n        &self.target\n    }\n\n    /// Returns `true` if the serialized size of the transaction is not greater than\n    /// `max_transaction_size`.\n    fn is_valid_size(\n        &self,\n        max_transaction_size: u32,\n    ) -> Result<(), TransactionV1ExcessiveSizeError> {\n        let actual_transaction_size = self.serialized_length;\n        if actual_transaction_size > max_transaction_size as usize {\n            return Err(TransactionV1ExcessiveSizeError {\n                max_transaction_size,\n                actual_transaction_size,\n            });\n        }\n        Ok(())\n    }\n\n    /// Returns the creation timestamp of the `Deploy`.\n    pub(crate) fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid.\n    ///\n    /// After this duration has ended, the `Deploy` will be considered expired.\n    pub(crate) fn ttl(&self) -> TimeDiff {\n        self.ttl\n    }\n    /// Returns the scheduling of the transaction.\n    pub(crate) fn contract_runtime_tag(&self) -> Option<ContractRuntimeTag> {\n        match &self.target {\n            TransactionTarget::Native => None,\n            TransactionTarget::Stored { runtime, .. } => Some(runtime.contract_runtime_tag()),\n            TransactionTarget::Session { runtime, .. } => Some(runtime.contract_runtime_tag()),\n        }\n    }\n\n    /// Returns `Ok` if and only if:\n    ///   * the chain_name is correct,\n    ///   * the configured parameters are complied with at the given timestamp\n    pub(crate) fn is_config_compliant(\n        &self,\n        chainspec: &Chainspec,\n        timestamp_leeway: TimeDiff,\n        at: Timestamp,\n    ) -> Result<(), InvalidTransactionV1> {\n        let transaction_config = chainspec.transaction_config.clone();\n\n        match self.contract_runtime_tag() {\n            Some(expected_runtime @ ContractRuntimeTag::VmCasperV1) => {\n                if !transaction_config.runtime_config.vm_casper_v1 {\n                    // NOTE: In current implementation native transactions should be executed on\n                    // both VmCasperV1 and VmCasperV2. This may change once we\n                    // have a more stable VmCasperV2 that can also process calls\n                    // to system contracts in VM2 chunked args style.\n\n                    return Err(InvalidTransactionV1::InvalidTransactionRuntime {\n                        expected: expected_runtime,\n                    });\n                }\n\n                if !self.args.is_named() {\n                    // VmCasperV1 runtime expected named arguments and does not support bytes\n                    // variant.\n                    return Err(InvalidTransactionV1::ExpectedNamedArguments);\n                }\n            }\n            Some(expected_runtime @ ContractRuntimeTag::VmCasperV2) => {\n                if !transaction_config.runtime_config.vm_casper_v2 {\n                    // NOTE: In current implementation native transactions should be executed on\n                    // both VmCasperV1 and VmCasperV2. This may change once we\n                    // have a more stable VmCasperV2 that can also process calls\n                    // to system contracts in VM2 chunked args style.\n\n                    return Err(InvalidTransactionV1::InvalidTransactionRuntime {\n                        expected: expected_runtime,\n                    });\n                }\n\n                if !self.args.is_bytesrepr() {\n                    // VmCasperV2 runtime expected bytes arguments and does not support named\n                    // variant.\n                    return Err(InvalidTransactionV1::ExpectedBytesArguments);\n                }\n\n                match self.pricing_mode {\n                    PricingMode::PaymentLimited {\n                        standard_payment,\n                        payment_amount,\n                        ..\n                    } => {\n                        // the transaction acceptor enforces this on an actual network,\n                        // rejecting 0 payment txn's right away.\n                        // however, direct tests don't engage the acceptor.\n                        if payment_amount == 0u64 {\n                            return Err(InvalidTransactionV1::InvalidPaymentAmount);\n                        }\n                        if !standard_payment {\n                            // V2 runtime expects standard payment in the payment limited mode.\n                            return Err(InvalidTransactionV1::InvalidPricingMode {\n                                price_mode: self.pricing_mode.clone(),\n                            });\n                        }\n                    }\n                    PricingMode::Fixed { .. } => {}\n                    PricingMode::Prepaid { .. } => {}\n                }\n\n                if let TransactionTarget::Stored {\n                    id:\n                        id @ TransactionInvocationTarget::ByPackageHash { .. }\n                        | id @ TransactionInvocationTarget::ByPackageName { .. },\n                    runtime: _,\n                } = self.target.clone()\n                {\n                    return Err(InvalidTransactionV1::UnsupportedInvocationTarget { id: Some(id) });\n                }\n            }\n            None => {\n                // noop\n            }\n        }\n\n        self.is_valid_size(\n            transaction_config\n                .transaction_v1_config\n                .get_max_serialized_length(self.lane_id) as u32,\n        )?;\n\n        let chain_name = chainspec.network_config.name.clone();\n\n        if self.chain_name != chain_name {\n            debug!(\n                transaction_hash = %self.hash(),\n                chain_name = %self.chain_name,\n                timestamp= %self.timestamp,\n                ttl= %self.ttl,\n                pricing_mode= %self.pricing_mode,\n                initiator_addr= %self.initiator_addr,\n                target= %self.target,\n                entry_point= %self.entry_point,\n                lane_id= %self.lane_id,\n                scheduling= %self.scheduling,\n                \"invalid chain identifier\"\n            );\n            return Err(InvalidTransactionV1::InvalidChainName {\n                expected: chain_name,\n                got: self.chain_name.to_string(),\n            });\n        }\n\n        let price_handling = chainspec.core_config.pricing_handling;\n        let pricing_mode = &self.pricing_mode;\n\n        match pricing_mode {\n            PricingMode::PaymentLimited { payment_amount, .. } => {\n                if *payment_amount == 0u64 {\n                    return Err(InvalidTransactionV1::InvalidPaymentAmount);\n                }\n                if let PricingHandling::PaymentLimited = price_handling {\n                    if self.is_native_mint() {\n                        let entry_point = &self.entry_point;\n                        let expected_payment = match &entry_point {\n                            TransactionEntryPoint::Transfer => {\n                                chainspec.system_costs_config.mint_costs().transfer\n                            }\n                            TransactionEntryPoint::Burn => {\n                                chainspec.system_costs_config.mint_costs().burn\n                            }\n                            _ => {\n                                return Err(InvalidTransactionV1::UnexpectedEntryPoint {\n                                    entry_point: entry_point.clone(),\n                                    lane_id: self.lane_id,\n                                })\n                            }\n                        };\n                        if *payment_amount < expected_payment.into() {\n                            return Err(InvalidTransactionV1::InvalidPaymentAmount);\n                        }\n                    } else if self.is_native_auction() {\n                        let entry_point = &self.entry_point;\n                        let expected_payment = match &entry_point {\n                            TransactionEntryPoint::AddBid | TransactionEntryPoint::ActivateBid => {\n                                chainspec.system_costs_config.auction_costs().add_bid\n                            }\n                            TransactionEntryPoint::WithdrawBid => {\n                                chainspec.system_costs_config.auction_costs().withdraw_bid\n                            }\n                            TransactionEntryPoint::Delegate => {\n                                chainspec.system_costs_config.auction_costs().delegate\n                            }\n                            TransactionEntryPoint::Undelegate => {\n                                chainspec.system_costs_config.auction_costs().undelegate\n                            }\n                            TransactionEntryPoint::Redelegate => {\n                                chainspec.system_costs_config.auction_costs().redelegate\n                            }\n                            TransactionEntryPoint::ChangeBidPublicKey => {\n                                chainspec\n                                    .system_costs_config\n                                    .auction_costs()\n                                    .change_bid_public_key\n                            }\n                            TransactionEntryPoint::AddReservations => {\n                                chainspec\n                                    .system_costs_config\n                                    .auction_costs()\n                                    .add_reservations\n                            }\n                            TransactionEntryPoint::CancelReservations => {\n                                chainspec\n                                    .system_costs_config\n                                    .auction_costs()\n                                    .cancel_reservations\n                            }\n                            _ => {\n                                return Err(InvalidTransactionV1::UnexpectedEntryPoint {\n                                    entry_point: entry_point.clone(),\n                                    lane_id: self.lane_id,\n                                })\n                            }\n                        };\n                        if *payment_amount < expected_payment {\n                            return Err(InvalidTransactionV1::InvalidPaymentAmount);\n                        }\n                    } else if *payment_amount < chainspec.core_config.baseline_motes_amount {\n                        return Err(InvalidTransactionV1::InvalidPaymentAmount);\n                    }\n                } else {\n                    return Err(InvalidTransactionV1::InvalidPricingMode {\n                        price_mode: pricing_mode.clone(),\n                    });\n                }\n            }\n            PricingMode::Fixed { .. } => {\n                if let PricingHandling::Fixed = price_handling {\n                } else {\n                    return Err(InvalidTransactionV1::InvalidPricingMode {\n                        price_mode: pricing_mode.clone(),\n                    });\n                }\n            }\n            PricingMode::Prepaid { .. } => {\n                if !chainspec.core_config.allow_prepaid {\n                    // Currently Prepaid isn't implemented, and we should\n                    // not be accepting transactions with this mode.\n                    return Err(InvalidTransactionV1::InvalidPricingMode {\n                        price_mode: pricing_mode.clone(),\n                    });\n                }\n            }\n        }\n\n        let min_gas_price = chainspec.vacancy_config.min_gas_price;\n        let gas_price_tolerance = self.gas_price_tolerance();\n        if gas_price_tolerance < min_gas_price {\n            return Err(InvalidTransactionV1::GasPriceToleranceTooLow {\n                min_gas_price_tolerance: min_gas_price,\n                provided_gas_price_tolerance: gas_price_tolerance,\n            });\n        }\n\n        self.is_header_metadata_valid(&transaction_config, timestamp_leeway, at, &self.hash)?;\n\n        let max_associated_keys = chainspec.core_config.max_associated_keys;\n\n        if self.approvals.len() > max_associated_keys as usize {\n            debug!(\n                transaction_hash = %self.hash(),\n                number_of_approvals = %self.approvals.len(),\n                max_associated_keys = %max_associated_keys,\n                \"number of transaction approvals exceeds the limit\"\n            );\n            return Err(InvalidTransactionV1::ExcessiveApprovals {\n                got: self.approvals.len() as u32,\n                max_associated_keys,\n            });\n        }\n\n        let gas_limit = self\n            .pricing_mode\n            .gas_limit(chainspec, self.lane_id)\n            .map_err(Into::<InvalidTransactionV1>::into)?;\n        let block_gas_limit = Gas::new(U512::from(transaction_config.block_gas_limit));\n        if gas_limit > block_gas_limit {\n            debug!(\n                amount = %gas_limit,\n                %block_gas_limit,\n                \"transaction gas limit exceeds block gas limit\"\n            );\n            return Err(InvalidTransactionV1::ExceedsBlockGasLimit {\n                block_gas_limit: transaction_config.block_gas_limit,\n                got: Box::new(gas_limit.value()),\n            });\n        }\n\n        self.is_body_metadata_valid(chainspec, &transaction_config)\n    }\n\n    fn is_body_metadata_valid(\n        &self,\n        chainspec: &Chainspec,\n        config: &TransactionConfig,\n    ) -> Result<(), InvalidTransactionV1> {\n        let lane_id = self.lane_id;\n        if !config.transaction_v1_config.is_supported(lane_id) {\n            return Err(InvalidTransactionV1::InvalidTransactionLane(lane_id));\n        }\n\n        let max_serialized_length = config\n            .transaction_v1_config\n            .get_max_serialized_length(lane_id);\n        let actual_length = self.serialized_length;\n        if actual_length > max_serialized_length as usize {\n            return Err(InvalidTransactionV1::ExcessiveSize(\n                TransactionV1ExcessiveSizeError {\n                    max_transaction_size: max_serialized_length as u32,\n                    actual_transaction_size: actual_length,\n                },\n            ));\n        }\n\n        let max_args_length = config.transaction_v1_config.get_max_args_length(lane_id);\n\n        let args_length = self.args.serialized_length();\n        if args_length > max_args_length as usize {\n            debug!(\n                args_length,\n                max_args_length = max_args_length,\n                \"transaction runtime args excessive size\"\n            );\n            return Err(InvalidTransactionV1::ExcessiveArgsLength {\n                max_length: max_args_length as usize,\n                got: args_length,\n            });\n        }\n\n        match &self.target {\n            TransactionTarget::Native => match self.entry_point {\n                TransactionEntryPoint::Call => {\n                    debug!(\n                        entry_point = %self.entry_point,\n                        \"native transaction cannot have call entry point\"\n                    );\n                    Err(InvalidTransactionV1::EntryPointCannotBeCall)\n                }\n                TransactionEntryPoint::Custom(_) => {\n                    debug!(\n                        entry_point = %self.entry_point,\n                        \"native transaction cannot have custom entry point\"\n                    );\n                    Err(InvalidTransactionV1::EntryPointCannotBeCustom {\n                        entry_point: self.entry_point.clone(),\n                    })\n                }\n                TransactionEntryPoint::Transfer => arg_handling::has_valid_transfer_args(\n                    &self.args,\n                    config.native_transfer_minimum_motes,\n                ),\n                TransactionEntryPoint::Burn => arg_handling::has_valid_burn_args(&self.args),\n                TransactionEntryPoint::AddBid => {\n                    arg_handling::has_valid_add_bid_args(chainspec, &self.args)\n                }\n                TransactionEntryPoint::WithdrawBid => {\n                    arg_handling::has_valid_withdraw_bid_args(&self.args)\n                }\n                TransactionEntryPoint::Delegate => {\n                    arg_handling::has_valid_delegate_args(chainspec, &self.args)\n                }\n                TransactionEntryPoint::Undelegate => {\n                    arg_handling::has_valid_undelegate_args(&self.args)\n                }\n                TransactionEntryPoint::Redelegate => {\n                    arg_handling::has_valid_redelegate_args(chainspec, &self.args)\n                }\n                TransactionEntryPoint::ActivateBid => {\n                    arg_handling::has_valid_activate_bid_args(&self.args)\n                }\n                TransactionEntryPoint::ChangeBidPublicKey => {\n                    arg_handling::has_valid_change_bid_public_key_args(&self.args)\n                }\n                TransactionEntryPoint::AddReservations => {\n                    arg_handling::has_valid_add_reservations_args(chainspec, &self.args)\n                }\n                TransactionEntryPoint::CancelReservations => {\n                    arg_handling::has_valid_cancel_reservations_args(&self.args)\n                }\n            },\n            TransactionTarget::Stored { .. } => match &self.entry_point {\n                TransactionEntryPoint::Custom(_) => Ok(()),\n                TransactionEntryPoint::Call\n                | TransactionEntryPoint::Transfer\n                | TransactionEntryPoint::Burn\n                | TransactionEntryPoint::AddBid\n                | TransactionEntryPoint::WithdrawBid\n                | TransactionEntryPoint::Delegate\n                | TransactionEntryPoint::Undelegate\n                | TransactionEntryPoint::Redelegate\n                | TransactionEntryPoint::ActivateBid\n                | TransactionEntryPoint::ChangeBidPublicKey\n                | TransactionEntryPoint::AddReservations\n                | TransactionEntryPoint::CancelReservations => {\n                    debug!(\n                        entry_point = %self.entry_point,\n                        \"transaction targeting stored entity/package must have custom entry point\"\n                    );\n                    Err(InvalidTransactionV1::EntryPointMustBeCustom {\n                        entry_point: self.entry_point.clone(),\n                    })\n                }\n            },\n            TransactionTarget::Session { module_bytes, .. } => match &self.entry_point {\n                TransactionEntryPoint::Call | TransactionEntryPoint::Custom(_) => {\n                    if module_bytes.is_empty() {\n                        debug!(\"transaction with session code must not have empty module bytes\");\n                        return Err(InvalidTransactionV1::EmptyModuleBytes);\n                    }\n                    Ok(())\n                }\n                TransactionEntryPoint::Transfer\n                | TransactionEntryPoint::Burn\n                | TransactionEntryPoint::AddBid\n                | TransactionEntryPoint::WithdrawBid\n                | TransactionEntryPoint::Delegate\n                | TransactionEntryPoint::Undelegate\n                | TransactionEntryPoint::Redelegate\n                | TransactionEntryPoint::ActivateBid\n                | TransactionEntryPoint::ChangeBidPublicKey\n                | TransactionEntryPoint::AddReservations\n                | TransactionEntryPoint::CancelReservations => {\n                    debug!(\n                        entry_point = %self.entry_point,\n                        \"transaction with session code must use custom or default 'call' entry point\"\n                    );\n                    Err(InvalidTransactionV1::EntryPointMustBeCustom {\n                        entry_point: self.entry_point.clone(),\n                    })\n                }\n            },\n        }\n    }\n\n    fn is_header_metadata_valid(\n        &self,\n        config: &TransactionConfig,\n        timestamp_leeway: TimeDiff,\n        at: Timestamp,\n        transaction_hash: &TransactionV1Hash,\n    ) -> Result<(), InvalidTransactionV1> {\n        if self.ttl() > config.max_ttl {\n            debug!(\n                %transaction_hash,\n                transaction_header = %self,\n                max_ttl = %config.max_ttl,\n                \"transaction ttl excessive\"\n            );\n            return Err(InvalidTransactionV1::ExcessiveTimeToLive {\n                max_ttl: config.max_ttl,\n                got: self.ttl(),\n            });\n        }\n\n        if self.timestamp() > at + timestamp_leeway {\n            debug!(\n                %transaction_hash, transaction_header = %self, %at,\n                \"transaction timestamp in the future\"\n            );\n            return Err(InvalidTransactionV1::TimestampInFuture {\n                validation_timestamp: at,\n                timestamp_leeway,\n                got: self.timestamp(),\n            });\n        }\n\n        Ok(())\n    }\n\n    /// Returns the gas price tolerance for the given transaction.\n    pub(crate) fn gas_price_tolerance(&self) -> u8 {\n        match self.pricing_mode {\n            PricingMode::PaymentLimited {\n                gas_price_tolerance,\n                ..\n            } => gas_price_tolerance,\n            PricingMode::Fixed {\n                gas_price_tolerance,\n                ..\n            } => gas_price_tolerance,\n            PricingMode::Prepaid { .. } => {\n                // TODO: Change this when prepaid gets implemented.\n                0u8\n            }\n        }\n    }\n\n    /// Returns the serialized length of the transaction.\n    pub(crate) fn serialized_length(&self) -> usize {\n        self.serialized_length\n    }\n\n    /// Returns the gas limit for the transaction.\n    pub(crate) fn gas_limit(&self, chainspec: &Chainspec) -> Result<Gas, InvalidTransaction> {\n        if self.is_native_transfer() {\n            return Ok(Gas::new(\n                chainspec.system_costs_config.mint_costs().transfer,\n            ));\n        }\n        self.pricing_mode()\n            .gas_limit(chainspec, self.lane_id)\n            .map_err(Into::into)\n    }\n\n    /// Returns the seed of the transaction.\n    pub(crate) fn seed(&self) -> Option<[u8; 32]> {\n        match &self.target {\n            TransactionTarget::Native => None,\n            TransactionTarget::Stored { id: _, runtime: _ } => None,\n            TransactionTarget::Session {\n                is_install_upgrade: _,\n                runtime,\n                module_bytes: _,\n            } => runtime.seed(),\n        }\n    }\n\n    /// Returns the transferred value of the transaction.\n    pub(crate) fn transferred_value(&self) -> u64 {\n        match &self.target {\n            TransactionTarget::Native => 0,\n            TransactionTarget::Stored { id: _, runtime } => match runtime {\n                TransactionRuntimeParams::VmCasperV1 => 0,\n                TransactionRuntimeParams::VmCasperV2 {\n                    transferred_value, ..\n                } => *transferred_value,\n            },\n            TransactionTarget::Session {\n                is_install_upgrade: _,\n                runtime,\n                module_bytes: _,\n            } => match runtime {\n                TransactionRuntimeParams::VmCasperV1 => 0,\n                TransactionRuntimeParams::VmCasperV2 {\n                    transferred_value,\n                    seed: _,\n                } => *transferred_value,\n            },\n        }\n    }\n\n    /// Is this a native transfer?\n    pub(crate) fn is_native_transfer(&self) -> bool {\n        if !self.is_native_mint() {\n            return false;\n        }\n\n        matches!(self.entry_point(), TransactionEntryPoint::Transfer)\n    }\n}\n\nimpl Display for MetaTransactionV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"meta-transaction-v1[hash: {}, chain_name: {}, timestamp: {}, ttl: {}, pricing_mode: {}, initiator_addr: {}, target: {}, entry_point: {}, lane_id: {}, scheduling: {}, approvals: {}]\",\n            self.hash,\n            self.chain_name,\n            self.timestamp,\n            self.ttl,\n            self.pricing_mode,\n            self.initiator_addr,\n            self.target,\n            self.entry_point,\n            self.lane_id,\n            self.scheduling,\n            DisplayIter::new(self.approvals.iter())\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::MetaTransactionV1;\n    use crate::types::transaction::transaction_v1_builder::TransactionV1Builder;\n    use casper_types::{\n        testing::TestRng, InvalidTransaction, InvalidTransactionV1, PricingMode, SecretKey,\n        TransactionInvocationTarget, TransactionLaneDefinition, TransactionRuntimeParams,\n        TransactionV1Config,\n    };\n\n    #[test]\n    fn limited_amount_should_determine_transaction_lane_for_session() {\n        let rng = &mut TestRng::new();\n        let secret_key = SecretKey::random(rng);\n        let pricing_mode = PricingMode::PaymentLimited {\n            payment_amount: 1001,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        };\n\n        let transaction_v1 = TransactionV1Builder::new_session(\n            false,\n            vec![1; 30].into(),\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(\"x\".to_string())\n        .with_pricing_mode(pricing_mode)\n        .with_secret_key(&secret_key)\n        .build()\n        .unwrap();\n        let config = build_v1_config();\n\n        let meta_transaction = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config)\n            .expect(\"meta transaction should be valid\");\n        assert_eq!(meta_transaction.lane_id(), 4);\n    }\n\n    #[test]\n    fn limited_amount_should_fail_if_does_not_fit_in_any_lane() {\n        let rng = &mut TestRng::new();\n        let secret_key = SecretKey::random(rng);\n        let pricing_mode = PricingMode::PaymentLimited {\n            payment_amount: 1000000,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        };\n\n        let transaction_v1 = TransactionV1Builder::new_session(\n            false,\n            vec![1; 30].into(),\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(\"x\".to_string())\n        .with_pricing_mode(pricing_mode)\n        .with_secret_key(&secret_key)\n        .build()\n        .unwrap();\n        let config = build_v1_config();\n\n        let res = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config);\n        assert!(matches!(\n            res,\n            Err(InvalidTransaction::V1(InvalidTransactionV1::NoLaneMatch))\n        ))\n    }\n\n    #[test]\n    fn limited_amount_should_fail_if_transaction_size_does_not_fit_in_any_lane() {\n        let rng = &mut TestRng::new();\n        let secret_key = SecretKey::random(rng);\n        let pricing_mode = PricingMode::PaymentLimited {\n            payment_amount: 100,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        };\n\n        let transaction_v1 = TransactionV1Builder::new_session(\n            false,\n            vec![1; 3000].into(),\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(\"x\".to_string())\n        .with_pricing_mode(pricing_mode)\n        .with_secret_key(&secret_key)\n        .build()\n        .unwrap();\n        let mut config = TransactionV1Config::default();\n        config.set_wasm_lanes(vec![\n            TransactionLaneDefinition::new(3, 200, 100, 100, 10),\n            TransactionLaneDefinition::new(4, 500, 100, 10000, 10),\n        ]);\n\n        let res = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config);\n        assert!(matches!(\n            res,\n            Err(InvalidTransaction::V1(InvalidTransactionV1::NoLaneMatch))\n        ))\n    }\n\n    #[test]\n    fn limited_amount_should_determine_transaction_lane_for_stored() {\n        let rng = &mut TestRng::new();\n        let secret_key = SecretKey::random(rng);\n        let pricing_mode = PricingMode::PaymentLimited {\n            payment_amount: 1001,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        };\n\n        let transaction_v1 = TransactionV1Builder::new_targeting_stored(\n            TransactionInvocationTarget::ByName(\"xyz\".to_string()),\n            \"abc\",\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_chain_name(\"x\".to_string())\n        .with_secret_key(&secret_key)\n        .with_pricing_mode(pricing_mode)\n        .build()\n        .unwrap();\n        let config = build_v1_config();\n\n        let meta_transaction = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config)\n            .expect(\"meta transaction should be valid\");\n        assert_eq!(meta_transaction.lane_id(), 4);\n    }\n\n    fn build_v1_config() -> TransactionV1Config {\n        let mut config = TransactionV1Config::default();\n        config.set_wasm_lanes(vec![\n            TransactionLaneDefinition::new(3, 10000, 100, 100, 10),\n            TransactionLaneDefinition::new(4, 10001, 100, 10000, 10),\n            TransactionLaneDefinition::new(5, 10002, 100, 1000, 10),\n        ]);\n        config\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/meta_transaction/transaction_header.rs",
    "content": "use casper_types::{DeployHeader, InitiatorAddr, TimeDiff, Timestamp, Transaction, TransactionV1};\nuse core::fmt::{self, Display, Formatter};\nuse datasize::DataSize;\nuse serde::Serialize;\n\n#[derive(Debug, Clone, DataSize, PartialEq, Eq, Serialize)]\npub(crate) struct TransactionV1Metadata {\n    initiator_addr: InitiatorAddr,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n}\n\nimpl TransactionV1Metadata {\n    pub(crate) fn initiator_addr(&self) -> &InitiatorAddr {\n        &self.initiator_addr\n    }\n\n    pub(crate) fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    pub(crate) fn ttl(&self) -> TimeDiff {\n        self.ttl\n    }\n}\n\nimpl Display for TransactionV1Metadata {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"transaction-v1-metadata[initiator_addr: {}]\",\n            self.initiator_addr,\n        )\n    }\n}\n\n#[derive(Debug, Clone, DataSize, Serialize, PartialEq, Eq)]\n/// A versioned wrapper for a transaction header or deploy header.\npub(crate) enum TransactionHeader {\n    Deploy(DeployHeader),\n    V1(TransactionV1Metadata),\n}\n\nimpl From<DeployHeader> for TransactionHeader {\n    fn from(header: DeployHeader) -> Self {\n        Self::Deploy(header)\n    }\n}\n\nimpl From<&TransactionV1> for TransactionHeader {\n    fn from(transaction_v1: &TransactionV1) -> Self {\n        let meta = TransactionV1Metadata {\n            initiator_addr: transaction_v1.initiator_addr().clone(),\n            timestamp: transaction_v1.timestamp(),\n            ttl: transaction_v1.ttl(),\n        };\n        Self::V1(meta)\n    }\n}\n\nimpl From<&Transaction> for TransactionHeader {\n    fn from(transaction: &Transaction) -> Self {\n        match transaction {\n            Transaction::Deploy(deploy) => deploy.header().clone().into(),\n            Transaction::V1(v1) => v1.into(),\n        }\n    }\n}\n\nimpl Display for TransactionHeader {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionHeader::Deploy(header) => Display::fmt(header, formatter),\n            TransactionHeader::V1(meta) => Display::fmt(meta, formatter),\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/meta_transaction.rs",
    "content": "mod meta_deploy;\nmod meta_transaction_v1;\nmod transaction_header;\nuse casper_execution_engine::engine_state::{SessionDataDeploy, SessionDataV1, SessionInputData};\n#[cfg(test)]\nuse casper_types::InvalidTransactionV1;\nuse casper_types::{\n    account::AccountHash, bytesrepr::ToBytes, Approval, Chainspec, Digest, ExecutableDeployItem,\n    Gas, GasLimited, HashAddr, InitiatorAddr, InvalidTransaction, Phase, PricingHandling,\n    PricingMode, TimeDiff, Timestamp, Transaction, TransactionArgs, TransactionConfig,\n    TransactionEntryPoint, TransactionHash, TransactionTarget, INSTALL_UPGRADE_LANE_ID,\n};\nuse core::fmt::{self, Debug, Display, Formatter};\nuse meta_deploy::MetaDeploy;\npub(crate) use meta_transaction_v1::MetaTransactionV1;\nuse serde::Serialize;\nuse std::{borrow::Cow, collections::BTreeSet};\npub(crate) use transaction_header::*;\n\n#[cfg(test)]\nuse super::fields_container::{ARGS_MAP_KEY, ENTRY_POINT_MAP_KEY, TARGET_MAP_KEY};\n\n#[derive(Clone, Debug, Serialize)]\npub(crate) enum MetaTransaction {\n    Deploy(MetaDeploy),\n    V1(MetaTransactionV1),\n}\n\nimpl MetaTransaction {\n    /// Returns the `TransactionHash` identifying this transaction.\n    pub(crate) fn hash(&self) -> TransactionHash {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => {\n                TransactionHash::from(*meta_deploy.deploy().hash())\n            }\n            MetaTransaction::V1(txn) => TransactionHash::from(*txn.hash()),\n        }\n    }\n\n    /// Timestamp.\n    pub(crate) fn timestamp(&self) -> Timestamp {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().header().timestamp(),\n            MetaTransaction::V1(v1) => v1.timestamp(),\n        }\n    }\n\n    /// Time to live.\n    pub(crate) fn ttl(&self) -> TimeDiff {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().header().ttl(),\n            MetaTransaction::V1(v1) => v1.ttl(),\n        }\n    }\n\n    /// Returns the `Approval`s for this transaction.\n    pub(crate) fn approvals(&self) -> BTreeSet<Approval> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().approvals().clone(),\n            MetaTransaction::V1(v1) => v1.approvals().clone(),\n        }\n    }\n\n    /// Returns the address of the initiator of the transaction.\n    pub(crate) fn initiator_addr(&self) -> &InitiatorAddr {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.initiator_addr(),\n            MetaTransaction::V1(txn) => txn.initiator_addr(),\n        }\n    }\n\n    /// Returns the set of account hashes corresponding to the public keys of the approvals.\n    pub(crate) fn signers(&self) -> BTreeSet<AccountHash> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n            MetaTransaction::V1(txn) => txn\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n        }\n    }\n\n    /// Returns `true` if `self` represents a native transfer deploy or a native V1 transaction.\n    pub(crate) fn is_native(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().is_transfer(),\n            MetaTransaction::V1(v1_txn) => *v1_txn.target() == TransactionTarget::Native,\n        }\n    }\n\n    /// Should this transaction use standard payment processing?\n    pub(crate) fn is_standard_payment(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .payment()\n                .is_standard_payment(Phase::Payment),\n            MetaTransaction::V1(v1) => {\n                if let PricingMode::PaymentLimited {\n                    standard_payment, ..\n                } = v1.pricing_mode()\n                {\n                    *standard_payment\n                } else {\n                    true\n                }\n            }\n        }\n    }\n\n    /// Should this transaction use custom payment processing?\n    pub(crate) fn is_custom_payment(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => !meta_deploy\n                .deploy()\n                .payment()\n                .is_standard_payment(Phase::Payment),\n            MetaTransaction::V1(v1) => {\n                if let PricingMode::PaymentLimited {\n                    standard_payment, ..\n                } = v1.pricing_mode()\n                {\n                    !*standard_payment\n                } else {\n                    false\n                }\n            }\n        }\n    }\n\n    /// Authorization keys.\n    pub(crate) fn authorization_keys(&self) -> BTreeSet<AccountHash> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n            MetaTransaction::V1(transaction_v1) => transaction_v1\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n        }\n    }\n\n    /// The session args.\n    pub(crate) fn session_args(&self) -> Cow<TransactionArgs> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => Cow::Owned(TransactionArgs::Named(\n                meta_deploy.deploy().session().args().clone(),\n            )),\n            MetaTransaction::V1(transaction_v1) => Cow::Borrowed(transaction_v1.args()),\n        }\n    }\n\n    /// The entry point.\n    pub(crate) fn entry_point(&self) -> TransactionEntryPoint {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => {\n                meta_deploy.deploy().session().entry_point_name().into()\n            }\n            MetaTransaction::V1(transaction_v1) => transaction_v1.entry_point().clone(),\n        }\n    }\n\n    /// The transaction lane.\n    pub(crate) fn transaction_lane(&self) -> u8 {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.lane_id(),\n            MetaTransaction::V1(v1) => v1.lane_id(),\n        }\n    }\n\n    /// Returns the gas price tolerance.\n    pub(crate) fn gas_price_tolerance(&self) -> Result<u8, InvalidTransaction> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .gas_price_tolerance()\n                .map_err(InvalidTransaction::from),\n            MetaTransaction::V1(v1) => Ok(v1.gas_price_tolerance()),\n        }\n    }\n\n    pub(crate) fn gas_limit(&self, chainspec: &Chainspec) -> Result<Gas, InvalidTransaction> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .gas_limit(chainspec)\n                .map_err(InvalidTransaction::from),\n            MetaTransaction::V1(v1) => v1.gas_limit(chainspec),\n        }\n    }\n\n    /// Is the transaction the original transaction variant.\n    pub(crate) fn is_deploy_transaction(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(_) => true,\n            MetaTransaction::V1(_) => false,\n        }\n    }\n\n    /// Does this transaction provide the hash addr for a specific contract to invoke directly?\n    pub(crate) fn is_contract_by_hash_invocation(&self) -> bool {\n        self.contract_direct_address().is_some()\n    }\n\n    /// Returns a `hash_addr` for a targeted contract, if known.\n    pub(crate) fn contract_direct_address(&self) -> Option<(HashAddr, String)> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => {\n                if let ExecutableDeployItem::StoredContractByHash {\n                    hash, entry_point, ..\n                } = meta_deploy.session()\n                {\n                    return Some((hash.value(), entry_point.clone()));\n                }\n            }\n            MetaTransaction::V1(v1) => {\n                return v1.contract_direct_address();\n            }\n        }\n        None\n    }\n\n    /// Create a new `MetaTransaction` from a `Transaction`.\n    pub(crate) fn from_transaction(\n        transaction: &Transaction,\n        pricing_handling: PricingHandling,\n        transaction_config: &TransactionConfig,\n    ) -> Result<Self, InvalidTransaction> {\n        match transaction {\n            Transaction::Deploy(deploy) => MetaDeploy::from_deploy(\n                deploy.clone(),\n                pricing_handling,\n                &transaction_config.transaction_v1_config,\n            )\n            .map(MetaTransaction::Deploy),\n            Transaction::V1(v1) => MetaTransactionV1::from_transaction_v1(\n                v1,\n                &transaction_config.transaction_v1_config,\n            )\n            .map(MetaTransaction::V1),\n        }\n    }\n\n    pub(crate) fn is_config_compliant(\n        &self,\n        chainspec: &Chainspec,\n        timestamp_leeway: TimeDiff,\n        at: Timestamp,\n    ) -> Result<(), InvalidTransaction> {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy\n                .deploy()\n                .is_config_compliant(chainspec, timestamp_leeway, at)\n                .map_err(InvalidTransaction::from),\n            MetaTransaction::V1(v1) => v1\n                .is_config_compliant(chainspec, timestamp_leeway, at)\n                .map_err(InvalidTransaction::from),\n        }\n    }\n\n    pub(crate) fn payload_hash(&self) -> Digest {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => *meta_deploy.deploy().body_hash(),\n            MetaTransaction::V1(v1) => *v1.payload_hash(),\n        }\n    }\n\n    pub(crate) fn to_session_input_data(&self) -> SessionInputData {\n        let initiator_addr = self.initiator_addr();\n        let is_standard_payment = self.is_standard_payment();\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => {\n                let deploy = meta_deploy.deploy();\n                let data = SessionDataDeploy::new(\n                    deploy.hash(),\n                    deploy.session(),\n                    initiator_addr,\n                    self.signers().clone(),\n                    is_standard_payment,\n                );\n                SessionInputData::DeploySessionData { data }\n            }\n            MetaTransaction::V1(v1) => {\n                let data = SessionDataV1::new(\n                    v1.args().as_named().expect(\"V1 wasm args should be named and validated at the transaction acceptor level\"),\n                    v1.target(),\n                    v1.entry_point(),\n                    v1.lane_id() == INSTALL_UPGRADE_LANE_ID,\n                    v1.hash(),\n                    v1.pricing_mode(),\n                    initiator_addr,\n                    self.signers().clone(),\n                    is_standard_payment,\n                );\n                SessionInputData::SessionDataV1 { data }\n            }\n        }\n    }\n\n    /// Returns the `SessionInputData` for a payment code if present.\n    pub(crate) fn to_payment_input_data(&self) -> SessionInputData {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => {\n                let initiator_addr = meta_deploy.initiator_addr();\n                let is_standard_payment = matches!(meta_deploy.deploy().payment(), ExecutableDeployItem::ModuleBytes { module_bytes, .. } if module_bytes.is_empty());\n                let deploy = meta_deploy.deploy();\n                let data = SessionDataDeploy::new(\n                    deploy.hash(),\n                    deploy.payment(),\n                    initiator_addr,\n                    self.signers().clone(),\n                    is_standard_payment,\n                );\n                SessionInputData::DeploySessionData { data }\n            }\n            MetaTransaction::V1(v1) => {\n                let initiator_addr = v1.initiator_addr();\n\n                let is_standard_payment = if let PricingMode::PaymentLimited {\n                    standard_payment,\n                    ..\n                } = v1.pricing_mode()\n                {\n                    *standard_payment\n                } else {\n                    true\n                };\n\n                // Under V1 transaction we don't have a separate payment code, and custom payment is\n                // executed as session code with a phase set to Payment.\n                let data = SessionDataV1::new(\n                    v1.args().as_named().expect(\"V1 wasm args should be named and validated at the transaction acceptor level\"),\n                    v1.target(),\n                    v1.entry_point(),\n                    v1.lane_id() == INSTALL_UPGRADE_LANE_ID,\n                    v1.hash(),\n                    v1.pricing_mode(),\n                    initiator_addr,\n                    self.signers().clone(),\n                    is_standard_payment,\n                );\n                SessionInputData::SessionDataV1 { data }\n            }\n        }\n    }\n\n    /// Size estimate.\n    pub(crate) fn size_estimate(&self) -> usize {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().serialized_length(),\n            MetaTransaction::V1(v1) => v1.serialized_length(),\n        }\n    }\n\n    pub(crate) fn is_v1_wasm(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(_) => true,\n            MetaTransaction::V1(v1) => v1.is_v1_wasm(),\n        }\n    }\n\n    pub(crate) fn is_v2_wasm(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(_) => false,\n            MetaTransaction::V1(v1) => v1.is_v2_wasm(),\n        }\n    }\n\n    pub(crate) fn seed(&self) -> Option<[u8; 32]> {\n        match self {\n            MetaTransaction::Deploy(_) => None,\n            MetaTransaction::V1(v1) => v1.seed(),\n        }\n    }\n\n    pub(crate) fn is_install_or_upgrade(&self) -> bool {\n        match self {\n            MetaTransaction::Deploy(_) => false,\n            MetaTransaction::V1(meta_transaction_v1) => {\n                meta_transaction_v1.lane_id() == INSTALL_UPGRADE_LANE_ID\n            }\n        }\n    }\n\n    pub(crate) fn transferred_value(&self) -> Option<u64> {\n        match self {\n            MetaTransaction::Deploy(_) => None,\n            MetaTransaction::V1(v1) => Some(v1.transferred_value()),\n        }\n    }\n\n    pub(crate) fn target(&self) -> Option<TransactionTarget> {\n        match self {\n            MetaTransaction::Deploy(_) => None,\n            MetaTransaction::V1(v1) => Some(v1.target().clone()),\n        }\n    }\n}\n\nimpl Display for MetaTransaction {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            MetaTransaction::Deploy(meta_deploy) => Display::fmt(meta_deploy.deploy(), formatter),\n            MetaTransaction::V1(txn) => Display::fmt(txn, formatter),\n        }\n    }\n}\n\n#[cfg(test)]\n/// Calculates the laned based on properties of the transaction\npub(crate) fn calculate_transaction_lane_for_transaction(\n    transaction: &Transaction,\n    chainspec: &Chainspec,\n) -> Result<u8, InvalidTransaction> {\n    use casper_types::calculate_transaction_lane;\n\n    match transaction {\n        Transaction::Deploy(_) => {\n            let meta = MetaTransaction::from_transaction(\n                transaction,\n                chainspec.core_config.pricing_handling,\n                &chainspec.transaction_config,\n            )?;\n            Ok(meta.transaction_lane())\n        }\n        Transaction::V1(v1) => {\n            let args_binary_len = v1\n                .payload()\n                .fields()\n                .get(&ARGS_MAP_KEY)\n                .map(|field| field.len())\n                .unwrap_or(0);\n            let target: TransactionTarget =\n                v1.deserialize_field(TARGET_MAP_KEY).map_err(|error| {\n                    InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error })\n                })?;\n            let entry_point: TransactionEntryPoint =\n                v1.deserialize_field(ENTRY_POINT_MAP_KEY).map_err(|error| {\n                    InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error })\n                })?;\n            let serialized_length = v1.serialized_length();\n            let pricing_mode = v1.payload().pricing_mode();\n            calculate_transaction_lane(\n                &entry_point,\n                &target,\n                pricing_mode,\n                &chainspec.transaction_config.transaction_v1_config,\n                serialized_length as u64,\n                args_binary_len as u64,\n            )\n            .map_err(InvalidTransaction::V1)\n        }\n    }\n}\n\n#[cfg(test)]\nmod proptests {\n    use super::*;\n    use casper_types::{gens::legal_transaction_arb, TransactionLaneDefinition};\n    use proptest::prelude::*;\n\n    proptest! {\n        #[test]\n        fn construction_roundtrip(transaction in legal_transaction_arb()) {\n            let mut transaction_config = TransactionConfig::default();\n            transaction_config.transaction_v1_config.set_wasm_lanes(vec![\n                TransactionLaneDefinition::new(3, u64::MAX / 2, 10000, u64::MAX / 2, 10),\n                TransactionLaneDefinition::new(4, u64::MAX, 10000, u64::MAX, 10),\n                ]);\n            let maybe_transaction = MetaTransaction::from_transaction(&transaction, PricingHandling::PaymentLimited, &transaction_config);\n            prop_assert!(maybe_transaction.is_ok(), \"{:?}\", maybe_transaction);\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/transaction_footprint.rs",
    "content": "use crate::types::MetaTransaction;\n#[cfg(test)]\nuse casper_types::{testing::TestRng, U512};\nuse casper_types::{\n    Approval, Chainspec, Digest, Gas, InvalidTransaction, InvalidTransactionV1, TimeDiff,\n    Timestamp, Transaction, TransactionHash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID,\n    MINT_LANE_ID,\n};\nuse datasize::DataSize;\n#[cfg(test)]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\nuse std::collections::BTreeSet;\n\n#[derive(Clone, Debug, DataSize, Eq, PartialEq, Serialize, Deserialize)]\n#[serde(deny_unknown_fields)]\n/// The block footprint of a transaction.\npub(crate) struct TransactionFootprint {\n    /// The identifying hash.\n    pub(crate) transaction_hash: TransactionHash,\n    /// Transaction payload hash.\n    pub(crate) payload_hash: Digest,\n    /// The estimated gas consumption.\n    pub(crate) gas_limit: Gas,\n    /// The gas tolerance.\n    pub(crate) gas_price_tolerance: u8,\n    /// The bytesrepr serialized length.\n    pub(crate) size_estimate: usize,\n    /// The transaction lane_id.\n    pub(crate) lane_id: u8,\n    /// Timestamp of the transaction.\n    pub(crate) timestamp: Timestamp,\n    /// Time to live for the transaction.\n    pub(crate) ttl: TimeDiff,\n    /// The approvals.\n    pub(crate) approvals: BTreeSet<Approval>,\n}\n\nimpl TransactionFootprint {\n    pub(crate) fn new(\n        chainspec: &Chainspec,\n        transaction: &Transaction,\n    ) -> Result<Self, InvalidTransaction> {\n        let transaction = MetaTransaction::from_transaction(\n            transaction,\n            chainspec.core_config.pricing_handling,\n            &chainspec.transaction_config,\n        )?;\n        Self::new_from_meta_transaction(chainspec, &transaction)\n    }\n\n    fn new_from_meta_transaction(\n        chainspec: &Chainspec,\n        transaction: &MetaTransaction,\n    ) -> Result<Self, InvalidTransaction> {\n        let gas_price_tolerance = transaction.gas_price_tolerance()?;\n        let gas_limit = transaction.gas_limit(chainspec)?;\n        let lane_id = transaction.transaction_lane();\n        if !chainspec\n            .transaction_config\n            .transaction_v1_config\n            .is_supported(lane_id)\n        {\n            return Err(InvalidTransaction::V1(\n                InvalidTransactionV1::InvalidTransactionLane(lane_id),\n            ));\n        }\n        let transaction_hash = transaction.hash();\n        let size_estimate = transaction.size_estimate();\n        let payload_hash = transaction.payload_hash();\n        let timestamp = transaction.timestamp();\n        let ttl = transaction.ttl();\n        let approvals = transaction.approvals();\n        Ok(TransactionFootprint {\n            transaction_hash,\n            payload_hash,\n            gas_limit,\n            gas_price_tolerance,\n            size_estimate,\n            lane_id,\n            timestamp,\n            ttl,\n            approvals,\n        })\n    }\n\n    /// Sets approvals.\n    pub(crate) fn with_approvals(mut self, approvals: BTreeSet<Approval>) -> Self {\n        self.approvals = approvals;\n        self\n    }\n\n    /// The approval count, if known.\n    pub(crate) fn approvals_count(&self) -> usize {\n        self.approvals.len()\n    }\n\n    /// Is mint interaction.\n    pub(crate) fn is_mint(&self) -> bool {\n        if self.lane_id == MINT_LANE_ID {\n            return true;\n        }\n\n        false\n    }\n\n    /// Is auction interaction.\n    pub(crate) fn is_auction(&self) -> bool {\n        if self.lane_id == AUCTION_LANE_ID {\n            return true;\n        }\n\n        false\n    }\n\n    pub(crate) fn is_install_upgrade(&self) -> bool {\n        if self.lane_id == INSTALL_UPGRADE_LANE_ID {\n            return true;\n        }\n\n        false\n    }\n\n    pub(crate) fn is_wasm_based(&self) -> bool {\n        if !self.is_mint() && !self.is_auction() && !self.is_install_upgrade() {\n            return true;\n        }\n\n        false\n    }\n\n    pub(crate) fn gas_price_tolerance(&self) -> u8 {\n        self.gas_price_tolerance\n    }\n\n    #[cfg(test)]\n    pub fn random_of_lane(lane_id: u8, rng: &mut TestRng) -> Self {\n        let transaction_hash = TransactionHash::random(rng);\n        let payload_hash = Digest::random(rng);\n        let gas_limit = Gas::new(U512::from(1));\n        let gas_price_tolerance = rng.gen();\n        let size_estimate = rng.gen_range(1000..2000);\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_millis(15000);\n        let mut approvals = BTreeSet::new();\n        approvals.insert(Approval::random(rng));\n        TransactionFootprint {\n            transaction_hash,\n            payload_hash,\n            gas_limit,\n            gas_price_tolerance,\n            size_estimate,\n            lane_id,\n            timestamp,\n            ttl,\n            approvals,\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction/transaction_v1_builder.rs",
    "content": "#[cfg(test)]\nuse super::arg_handling;\nuse super::fields_container::{FieldsContainer, FieldsContainerError};\nuse crate::types::transaction::initiator_addr_and_secret_key::InitiatorAddrAndSecretKey;\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    Digest, InitiatorAddr, PricingMode, RuntimeArgs, SecretKey, TimeDiff, Timestamp,\n    TransactionArgs, TransactionEntryPoint, TransactionRuntimeParams, TransactionScheduling,\n    TransactionTarget, TransactionV1, TransactionV1Payload,\n};\n#[cfg(test)]\nuse casper_types::{\n    contracts::ProtocolVersionMajor, system::auction::Reservation, testing::TestRng,\n    AddressableEntityHash, Approval, CLValueError, EntityVersion, PackageHash, PublicKey,\n    TransactionConfig, TransactionInvocationTarget, TransferTarget, URef, U512,\n};\nuse core::marker::PhantomData;\n#[cfg(test)]\nuse rand::Rng;\nuse std::collections::{BTreeMap, BTreeSet};\n\n/// A builder for constructing `TransactionV1` instances with various configuration options.\n///\n/// The `TransactionV1Builder` provides a flexible API for specifying different transaction\n/// parameters like the target, scheduling, entry point, and signing options. Once all the required\n/// fields are set, the transaction can be built by calling [`build`](Self::build).\n///\n/// # Fields\n///\n/// - `args`: Arguments passed to the transaction's runtime, initialized to\n///   [`RuntimeArgs::new`](RuntimeArgs::new).\n/// - `target`: Specifies the target of the transaction, which can be native or other custom\n///   targets. Defaults to [`TransactionTarget::Native`](TransactionTarget::Native).\n/// - `scheduling`: Determines the scheduling mechanism of the transaction, e.g., standard or\n///   immediate, and is initialized to\n///   [`TransactionScheduling::Standard`](TransactionScheduling::Standard).\n/// - `entry_point`: Defines the transaction's entry point, such as transfer or another defined\n///   action. Defaults to [`TransactionEntryPoint::Transfer`](TransactionEntryPoint::Transfer).\n/// - `chain_name`: The name of the blockchain where the transaction will be executed. Initially set\n///   to `None` and must be provided before building the transaction.\n///\n/// ## Time-Related Fields\n/// - `timestamp`: The timestamp at which the transaction is created. It is either set to the\n///   current time using [`Timestamp::now`](Timestamp::now) or [`Timestamp::zero`](Timestamp::zero)\n///   without the `std-fs-io` feature.\n/// - `ttl`: Time-to-live for the transaction, specified as a [`TimeDiff`], representing how long\n///   the transaction is valid for execution. Defaults to [`Self::DEFAULT_TTL`].\n///\n/// ## Pricing and Initiator Fields\n/// - `pricing_mode`: Specifies the pricing mode to use for transaction execution (e.g., fixed or\n///   dynamic). Defaults to [`Self::DEFAULT_PRICING_MODE`].\n/// - `initiator_addr`: The address of the initiator who creates and signs the transaction.\n///   Initially set to `None` and must be set before building.\n///\n/// ## Signing Fields\n/// - `secret_key`: The secret key used to sign the transaction. This field is conditional based on\n///   the compilation environment:\n///    - In normal mode, it holds a reference to the secret key (`Option<&'a SecretKey>`).\n///    - In testing mode or with the `std` feature enabled, it holds an owned secret key\n///      (`Option<SecretKey>`).\n///\n/// ## Invalid Approvals\n/// - `invalid_approvals`: A collection of invalid approvals used for testing purposes. This field\n///   is available only when the `std` or `testing` features are enabled, or in a test environment.\n///\n/// ## Phantom Data\n/// - `_phantom_data`: Ensures the correct lifetime `'a` is respected for the builder, helping with\n///   proper borrowing and memory safety.\n#[derive(Debug)]\npub(crate) struct TransactionV1Builder<'a> {\n    /// Arguments passed to the transaction's runtime.\n    args: TransactionArgs,\n    /// The target of the transaction (e.g., native).\n    target: TransactionTarget,\n    /// Defines how the transaction is scheduled (e.g., standard, immediate).\n    scheduling: TransactionScheduling,\n    /// Specifies the entry point of the transaction (e.g., transfer).\n    entry_point: TransactionEntryPoint,\n    /// The name of the blockchain where the transaction will be executed.\n    chain_name: Option<String>,\n    /// The timestamp of the transaction.\n    timestamp: Timestamp,\n    /// The time-to-live for the transaction, representing how long it's valid for execution.\n    ttl: TimeDiff,\n    /// The pricing mode used for the transaction's execution cost.\n    pricing_mode: PricingMode,\n    /// The address of the transaction initiator.\n    initiator_addr: Option<InitiatorAddr>,\n    /// The secret key used for signing the transaction (in normal mode).\n    #[cfg(not(test))]\n    secret_key: Option<&'a SecretKey>,\n    /// The secret key used for signing the transaction (in testing or with `std` feature).\n    #[cfg(test)]\n    secret_key: Option<SecretKey>,\n    /// A list of invalid approvals for testing purposes.\n    #[cfg(test)]\n    invalid_approvals: Vec<Approval>,\n    /// Additional fields\n    #[cfg(test)]\n    additional_fields: BTreeMap<u16, Bytes>,\n    /// Phantom data to ensure the correct lifetime for references.\n    _phantom_data: PhantomData<&'a ()>,\n}\n\nimpl<'a> TransactionV1Builder<'a> {\n    /// The default time-to-live for transactions, i.e. 30 minutes.\n    pub const DEFAULT_TTL: TimeDiff = TimeDiff::from_millis(30 * 60 * 1_000);\n    /// The default pricing mode for v1 transactions, ie FIXED cost.\n    pub const DEFAULT_PRICING_MODE: PricingMode = PricingMode::PaymentLimited {\n        payment_amount: 10_000_000_000,\n        gas_price_tolerance: 3,\n        standard_payment: true,\n    };\n    /// The default scheduling for transactions, i.e. `Standard`.\n    pub const DEFAULT_SCHEDULING: TransactionScheduling = TransactionScheduling::Standard;\n\n    /// Creates a new `TransactionV1Builder` instance with default settings.\n    ///\n    /// # Important\n    ///\n    /// Before calling [`build`](Self::build), you must ensure that either:\n    /// - A chain name is provided by calling [`with_chain_name`](Self::with_chain_name),\n    /// - An initiator address is set by calling [`with_initiator_addr`](Self::with_initiator_addr),\n    /// - or a secret key is set by calling [`with_secret_key`](Self::with_secret_key).\n    ///\n    /// # Default Values\n    /// This function sets the following default values upon creation:\n    ///\n    /// - `chain_name`: Initialized to `None`.\n    /// - `timestamp`: Set to the current time using [`Timestamp::now`](Timestamp::now), or\n    ///   [`Timestamp::zero`](Timestamp::zero) if the `std-fs-io` feature is disabled.\n    /// - `ttl`: Defaults to [`Self::DEFAULT_TTL`].\n    /// - `pricing_mode`: Defaults to [`Self::DEFAULT_PRICING_MODE`].\n    /// - `initiator_addr`: Initialized to `None`.\n    /// - `secret_key`: Initialized to `None`.\n    ///\n    /// Additionally, the following internal fields are configured:\n    ///\n    /// - `args`: Initialized to an empty [`RuntimeArgs::new`](RuntimeArgs::new).\n    /// - `entry_point`: Set to\n    ///   [`TransactionEntryPoint::Transfer`](TransactionEntryPoint::Transfer).\n    /// - `target`: Defaults to [`TransactionTarget::Native`](TransactionTarget::Native).\n    /// - `scheduling`: Defaults to\n    ///   [`TransactionScheduling::Standard`](TransactionScheduling::Standard).\n    ///\n    /// # Testing and Additional Configuration\n    ///\n    /// - If the `std` or `testing` feature is enabled, or in test configurations, the\n    ///   `invalid_approvals` field is initialized as an empty vector.\n    ///\n    /// # Returns\n    ///\n    /// A new `TransactionV1Builder` instance.\n    pub(crate) fn new() -> Self {\n        let timestamp = Timestamp::now();\n\n        TransactionV1Builder {\n            args: TransactionArgs::Named(RuntimeArgs::new()),\n            entry_point: TransactionEntryPoint::Transfer,\n            target: TransactionTarget::Native,\n            scheduling: TransactionScheduling::Standard,\n            chain_name: None,\n            timestamp,\n            ttl: Self::DEFAULT_TTL,\n            pricing_mode: Self::DEFAULT_PRICING_MODE,\n            initiator_addr: None,\n            secret_key: None,\n            _phantom_data: PhantomData,\n            #[cfg(test)]\n            invalid_approvals: vec![],\n            #[cfg(test)]\n            additional_fields: BTreeMap::new(),\n        }\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native transfer transaction.\n    #[cfg(test)]\n    pub(crate) fn new_transfer<A: Into<U512>, T: Into<TransferTarget>>(\n        amount: A,\n        maybe_source: Option<URef>,\n        target: T,\n        maybe_id: Option<u64>,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::Transfer;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native burn transaction.\n    #[cfg(test)]\n    pub(crate) fn new_burn<A: Into<U512>>(\n        amount: A,\n        maybe_source: Option<URef>,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_burn_args(amount, maybe_source)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.entry_point = TransactionEntryPoint::Burn;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native reserve slot\n    /// transaction.\n    #[cfg(test)]\n    pub(crate) fn new_reserve_slot(reservations: Vec<Reservation>) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_add_reservations_args(reservations)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::AddReservations;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native add_bid\n    /// transaction.\n    #[cfg(test)]\n    pub(crate) fn new_add_bid<A: Into<U512>>(\n        public_key: PublicKey,\n        delegation_rate: u8,\n        amount: A,\n        minimum_delegation_amount: Option<u64>,\n        maximum_delegation_amount: Option<u64>,\n        reserved_slots: Option<u32>,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_add_bid_args(\n            public_key,\n            delegation_rate,\n            amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            reserved_slots,\n        )?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::AddBid;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native withdraw_bid\n    /// transaction.\n    #[cfg(test)]\n    pub(crate) fn new_withdraw_bid<A: Into<U512>>(\n        public_key: PublicKey,\n        amount: A,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_withdraw_bid_args(public_key, amount)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::WithdrawBid;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native delegate transaction.\n    #[cfg(test)]\n    pub(crate) fn new_delegate<A: Into<U512>>(\n        delegator: PublicKey,\n        validator: PublicKey,\n        amount: A,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_delegate_args(delegator, validator, amount)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::Delegate;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native undelegate transaction.\n    #[cfg(test)]\n    pub(crate) fn new_undelegate<A: Into<U512>>(\n        delegator: PublicKey,\n        validator: PublicKey,\n        amount: A,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_undelegate_args(delegator, validator, amount)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::Undelegate;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a native redelegate transaction.\n    #[cfg(test)]\n    pub(crate) fn new_redelegate<A: Into<U512>>(\n        delegator: PublicKey,\n        validator: PublicKey,\n        amount: A,\n        new_validator: PublicKey,\n    ) -> Result<Self, CLValueError> {\n        let args = arg_handling::new_redelegate_args(delegator, validator, amount, new_validator)?;\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(args);\n        builder.target = TransactionTarget::Native;\n        builder.entry_point = TransactionEntryPoint::Redelegate;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        Ok(builder)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_targeting_stored<E: Into<String>>(\n        id: TransactionInvocationTarget,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n    ) -> Self {\n        let target = TransactionTarget::Stored { id, runtime };\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(RuntimeArgs::new());\n        builder.target = target;\n        builder.entry_point = TransactionEntryPoint::Custom(entry_point.into());\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        builder\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_targeting_stored_with_runtime_args<E: Into<String>>(\n        id: TransactionInvocationTarget,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n        runtime_args: RuntimeArgs,\n    ) -> Self {\n        let target = TransactionTarget::Stored { id, runtime };\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(runtime_args);\n        builder.target = target;\n        builder.entry_point = TransactionEntryPoint::Custom(entry_point.into());\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        builder\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a stored\n    /// entity.\n    #[cfg(test)]\n    pub(crate) fn new_targeting_invocable_entity<E: Into<String>>(\n        hash: AddressableEntityHash,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n    ) -> Self {\n        let id = TransactionInvocationTarget::new_invocable_entity(hash);\n        Self::new_targeting_stored(id, entry_point, runtime)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a stored\n    /// entity via its alias.\n    #[cfg(test)]\n    pub(crate) fn new_targeting_invocable_entity_via_alias<A: Into<String>, E: Into<String>>(\n        alias: A,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n    ) -> Self {\n        let id = TransactionInvocationTarget::new_invocable_entity_alias(alias.into());\n        Self::new_targeting_stored(id, entry_point, runtime)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a\n    /// package.\n    #[cfg(test)]\n    pub(crate) fn new_targeting_package<E: Into<String>>(\n        hash: PackageHash,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n    ) -> Self {\n        let id = TransactionInvocationTarget::new_package_with_major(\n            hash,\n            version,\n            protocol_version_major,\n        );\n        Self::new_targeting_stored(id, entry_point, runtime)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a\n    /// package.\n    #[cfg(test)]\n    pub(crate) fn new_targeting_package_with_runtime_args<E: Into<String>>(\n        hash: PackageHash,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n        runtime_args: RuntimeArgs,\n    ) -> Self {\n        let id = TransactionInvocationTarget::ByPackageHash {\n            addr: hash.value(),\n            version,\n            protocol_version_major,\n        };\n        Self::new_targeting_stored_with_runtime_args(id, entry_point, runtime, runtime_args)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a\n    /// package via its alias.\n    #[cfg(test)]\n    pub(crate) fn new_targeting_package_via_alias<A: Into<String>, E: Into<String>>(\n        alias: A,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        entry_point: E,\n        runtime: TransactionRuntimeParams,\n    ) -> Self {\n        let id = TransactionInvocationTarget::new_package_alias_with_major(\n            alias.into(),\n            version,\n            protocol_version_major,\n        );\n        Self::new_targeting_stored(id, entry_point, runtime)\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction for running session\n    /// logic, i.e. compiled Wasm.\n    pub(crate) fn new_session(\n        is_install_upgrade: bool,\n        module_bytes: Bytes,\n        runtime: TransactionRuntimeParams,\n    ) -> Self {\n        let target = TransactionTarget::Session {\n            is_install_upgrade,\n            module_bytes,\n            runtime,\n        };\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(RuntimeArgs::new());\n        builder.target = target;\n        builder.entry_point = TransactionEntryPoint::Call;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        builder\n    }\n\n    /// Returns a new `TransactionV1Builder` suitable for building a transaction for running session\n    /// logic, i.e. compiled Wasm.\n    #[cfg(test)]\n    pub(crate) fn new_session_with_runtime_args(\n        is_install_upgrade: bool,\n        module_bytes: Bytes,\n        runtime: TransactionRuntimeParams,\n        runtime_args: RuntimeArgs,\n    ) -> Self {\n        let target = TransactionTarget::Session {\n            is_install_upgrade,\n            module_bytes,\n            runtime,\n        };\n        let mut builder = TransactionV1Builder::new();\n        builder.args = TransactionArgs::Named(runtime_args);\n        builder.target = target;\n        builder.entry_point = TransactionEntryPoint::Call;\n        builder.scheduling = Self::DEFAULT_SCHEDULING;\n        builder\n    }\n\n    /// Returns a new `TransactionV1Builder` which will build a random, valid but possibly expired\n    /// transaction.\n    ///\n    /// The transaction can be made invalid in the following ways:\n    ///   * unsigned by calling `with_no_secret_key`\n    ///   * given an invalid approval by calling `with_invalid_approval`\n    #[cfg(test)]\n    pub(crate) fn new_random(rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random(rng);\n        let ttl_millis = rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis());\n        let fields = FieldsContainer::random(rng);\n        TransactionV1Builder {\n            chain_name: Some(rng.random_string(5..10)),\n            timestamp: Timestamp::random(rng),\n            ttl: TimeDiff::from_millis(ttl_millis),\n            args: TransactionArgs::Named(RuntimeArgs::random(rng)),\n            target: fields.target,\n            entry_point: fields.entry_point,\n            scheduling: fields.scheduling,\n            pricing_mode: PricingMode::PaymentLimited {\n                payment_amount: 2_500_000_000,\n                gas_price_tolerance: 3,\n                standard_payment: true,\n            },\n            initiator_addr: Some(InitiatorAddr::PublicKey(PublicKey::from(&secret_key))),\n            secret_key: Some(secret_key),\n            _phantom_data: PhantomData,\n            invalid_approvals: vec![],\n            #[cfg(test)]\n            additional_fields: BTreeMap::new(),\n        }\n    }\n\n    #[cfg(test)]\n    pub(crate) fn new_random_with_category_and_timestamp_and_ttl(\n        rng: &mut TestRng,\n        lane: u8,\n        timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        let secret_key = SecretKey::random(rng);\n        let ttl_millis = ttl.map_or(\n            rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()),\n            |ttl| ttl.millis(),\n        );\n        let FieldsContainer {\n            args,\n            target,\n            entry_point,\n            scheduling,\n        } = FieldsContainer::random_of_lane(rng, lane);\n        TransactionV1Builder {\n            chain_name: Some(rng.random_string(5..10)),\n            timestamp: timestamp.unwrap_or(Timestamp::now()),\n            ttl: TimeDiff::from_millis(ttl_millis),\n            args,\n            target,\n            entry_point,\n            scheduling,\n            pricing_mode: PricingMode::PaymentLimited {\n                payment_amount: 2_500_000_000,\n                gas_price_tolerance: 3,\n                standard_payment: true,\n            },\n            initiator_addr: Some(InitiatorAddr::PublicKey(PublicKey::from(&secret_key))),\n            secret_key: Some(secret_key),\n            _phantom_data: PhantomData,\n            invalid_approvals: vec![],\n            #[cfg(test)]\n            additional_fields: BTreeMap::new(),\n        }\n    }\n\n    /// Sets the `chain_name` in the transaction.\n    ///\n    /// Must be provided or building will fail.\n    pub(crate) fn with_chain_name<C: Into<String>>(mut self, chain_name: C) -> Self {\n        self.chain_name = Some(chain_name.into());\n        self\n    }\n\n    /// Sets the `timestamp` in the transaction.\n    ///\n    /// If not provided, the timestamp will be set to the time when the builder was constructed.\n    pub(crate) fn with_timestamp(mut self, timestamp: Timestamp) -> Self {\n        self.timestamp = timestamp;\n        self\n    }\n\n    /// Sets the `ttl` (time-to-live) in the transaction.\n    ///\n    /// If not provided, the ttl will be set to [`Self::DEFAULT_TTL`].\n    pub(crate) fn with_ttl(mut self, ttl: TimeDiff) -> Self {\n        self.ttl = ttl;\n        self\n    }\n\n    /// Sets the `pricing_mode` in the transaction.\n    ///\n    /// If not provided, the pricing mode will be set to [`Self::DEFAULT_PRICING_MODE`].\n    #[cfg(test)]\n    pub(crate) fn with_pricing_mode(mut self, pricing_mode: PricingMode) -> Self {\n        self.pricing_mode = pricing_mode;\n        self\n    }\n\n    /// Sets the `initiator_addr` in the transaction.\n    ///\n    /// If not provided, the public key derived from the secret key used in the builder will be\n    /// used as the `InitiatorAddr::PublicKey` in the transaction.\n    #[cfg(test)]\n    pub(crate) fn with_initiator_addr<I: Into<InitiatorAddr>>(mut self, initiator_addr: I) -> Self {\n        self.initiator_addr = Some(initiator_addr.into());\n        self\n    }\n\n    /// Sets the secret key used to sign the transaction on calling [`build`](Self::build).\n    ///\n    /// If not provided, the transaction can still be built, but will be unsigned and will be\n    /// invalid until subsequently signed.\n    pub(crate) fn with_secret_key(mut self, secret_key: &'a SecretKey) -> Self {\n        #[cfg(not(test))]\n        {\n            self.secret_key = Some(secret_key);\n        }\n        #[cfg(test)]\n        {\n            self.secret_key = Some(\n                SecretKey::from_der(secret_key.to_der().expect(\"should der-encode\"))\n                    .expect(\"should der-decode\"),\n            );\n        }\n        self\n    }\n\n    /// Manually sets additional fields\n    #[cfg(test)]\n    pub(crate) fn with_additional_fields(\n        mut self,\n        additional_fields: BTreeMap<u16, Bytes>,\n    ) -> Self {\n        self.additional_fields = additional_fields;\n        self\n    }\n\n    /// Sets the runtime args in the transaction.\n    ///\n    /// NOTE: this overwrites any existing runtime args.  To append to existing args, use\n    /// [`TransactionV1Builder::with_runtime_arg`].\n    #[cfg(test)]\n    pub(crate) fn with_runtime_args(mut self, args: RuntimeArgs) -> Self {\n        self.args = TransactionArgs::Named(args);\n        self\n    }\n\n    /// Sets the transaction args in the transaction.\n    ///\n    /// NOTE: this overwrites any existing transaction_args args.\n    #[cfg(test)]\n    pub fn with_transaction_args(mut self, args: TransactionArgs) -> Self {\n        self.args = args;\n        self\n    }\n\n    /// Returns the new transaction, or an error if non-defaulted fields were not set.\n    ///\n    /// For more info, see [the `TransactionBuilder` documentation](TransactionV1Builder).\n    pub(crate) fn build(self) -> Result<TransactionV1, TransactionV1BuilderError> {\n        self.do_build()\n    }\n\n    #[cfg(not(test))]\n    fn do_build(self) -> Result<TransactionV1, TransactionV1BuilderError> {\n        let initiator_addr_and_secret_key = match (self.initiator_addr, self.secret_key) {\n            (Some(initiator_addr), Some(secret_key)) => InitiatorAddrAndSecretKey::Both {\n                initiator_addr,\n                secret_key,\n            },\n            (Some(initiator_addr), None) => {\n                InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr)\n            }\n            (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key),\n            (None, None) => return Err(TransactionV1BuilderError::MissingInitiatorAddr),\n        };\n\n        let chain_name = self\n            .chain_name\n            .ok_or(TransactionV1BuilderError::MissingChainName)?;\n\n        let container =\n            FieldsContainer::new(self.args, self.target, self.entry_point, self.scheduling)\n                .to_map()\n                .map_err(|err| match err {\n                    FieldsContainerError::CouldNotSerializeField { field_index } => {\n                        TransactionV1BuilderError::CouldNotSerializeField { field_index }\n                    }\n                })?;\n\n        let transaction = build_transaction(\n            chain_name,\n            self.timestamp,\n            self.ttl,\n            self.pricing_mode,\n            container,\n            initiator_addr_and_secret_key,\n        );\n\n        Ok(transaction)\n    }\n\n    #[cfg(test)]\n    fn do_build(self) -> Result<TransactionV1, TransactionV1BuilderError> {\n        let initiator_addr_and_secret_key = match (self.initiator_addr, &self.secret_key) {\n            (Some(initiator_addr), Some(secret_key)) => InitiatorAddrAndSecretKey::Both {\n                initiator_addr,\n                secret_key,\n            },\n            (Some(initiator_addr), None) => {\n                InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr)\n            }\n            (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key),\n            (None, None) => return Err(TransactionV1BuilderError::MissingInitiatorAddr),\n        };\n\n        let chain_name = self\n            .chain_name\n            .ok_or(TransactionV1BuilderError::MissingChainName)?;\n        let mut container =\n            FieldsContainer::new(self.args, self.target, self.entry_point, self.scheduling)\n                .to_map()\n                .map_err(|err| match err {\n                    FieldsContainerError::CouldNotSerializeField { field_index } => {\n                        TransactionV1BuilderError::CouldNotSerializeField { field_index }\n                    }\n                })?;\n        let mut additional_fields = self.additional_fields.clone();\n        container.append(&mut additional_fields);\n\n        let mut transaction = build_transaction(\n            chain_name,\n            self.timestamp,\n            self.ttl,\n            self.pricing_mode,\n            container,\n            initiator_addr_and_secret_key,\n        );\n\n        transaction.apply_approvals(self.invalid_approvals);\n\n        Ok(transaction)\n    }\n}\n\nfn build_transaction(\n    chain_name: String,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n    pricing_mode: PricingMode,\n    fields: BTreeMap<u16, Bytes>,\n    initiator_addr_and_secret_key: InitiatorAddrAndSecretKey,\n) -> TransactionV1 {\n    let initiator_addr = initiator_addr_and_secret_key.initiator_addr();\n    let transaction_v1_payload = TransactionV1Payload::new(\n        chain_name,\n        timestamp,\n        ttl,\n        pricing_mode,\n        initiator_addr,\n        fields,\n    );\n    let hash = Digest::hash(\n        transaction_v1_payload\n            .to_bytes()\n            .unwrap_or_else(|error| panic!(\"should serialize body: {}\", error)),\n    );\n    let mut transaction = TransactionV1::new(hash.into(), transaction_v1_payload, BTreeSet::new());\n\n    if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() {\n        transaction.sign(secret_key);\n    }\n    transaction\n}\n\nuse core::fmt::{self, Display, Formatter};\n\n/// Errors returned while building a [`TransactionV1`] using a [`TransactionV1Builder`].\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[non_exhaustive]\npub(crate) enum TransactionV1BuilderError {\n    /// Failed to build transaction due to missing initiator_addr.\n    ///\n    /// Call [`TransactionV1Builder::with_initiator_addr`] or\n    /// [`TransactionV1Builder::with_secret_key`] before calling [`TransactionV1Builder::build`].\n    MissingInitiatorAddr,\n    /// Failed to build transaction due to missing chain name.\n    ///\n    /// Call [`TransactionV1Builder::with_chain_name`] before calling\n    /// [`TransactionV1Builder::build`].\n    MissingChainName,\n    /// Failed to build transaction due to an error when calling `to_bytes` on one of the payload\n    /// `field`.\n    CouldNotSerializeField {\n        /// The field index that failed to serialize.\n        field_index: u16,\n    },\n}\n\nimpl Display for TransactionV1BuilderError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionV1BuilderError::MissingInitiatorAddr => {\n                write!(\n                    formatter,\n                    \"transaction requires account - use `with_account` or `with_secret_key`\"\n                )\n            }\n            TransactionV1BuilderError::MissingChainName => {\n                write!(\n                    formatter,\n                    \"transaction requires chain name - use `with_chain_name`\"\n                )\n            }\n            TransactionV1BuilderError::CouldNotSerializeField { field_index } => {\n                write!(formatter, \"Cannot serialize field at index {}\", field_index)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/transaction.rs",
    "content": "pub(crate) mod arg_handling;\nmod deploy;\nmod meta_transaction;\nmod transaction_footprint;\npub(crate) use deploy::LegacyDeploy;\n#[cfg(test)]\npub(crate) use meta_transaction::calculate_transaction_lane_for_transaction;\npub(crate) use meta_transaction::{MetaTransaction, TransactionHeader};\npub(crate) use transaction_footprint::TransactionFootprint;\npub(crate) mod fields_container;\npub(crate) mod initiator_addr_and_secret_key;\npub(crate) mod transaction_v1_builder;\n"
  },
  {
    "path": "node/src/types/validator_matrix.rs",
    "content": "#[cfg(test)]\nuse std::iter;\nuse std::{\n    collections::{BTreeMap, HashSet},\n    fmt::{self, Debug, Formatter},\n    sync::{Arc, RwLock, RwLockReadGuard},\n};\n\nuse datasize::DataSize;\nuse itertools::Itertools;\nuse num_rational::Ratio;\nuse serde::Serialize;\nuse tracing::info;\n\nuse casper_types::{\n    BlockHeaderV2, ChainNameDigest, EraId, FinalitySignatureV2, PublicKey, SecretKey, U512,\n};\n\nconst MINIMUM_CUSP_ERA_COUNT: u64 = 2;\nconst PROPOSED_BLOCK_ERA_TOLERANCE: u64 = 1;\n\n#[derive(Eq, PartialEq, Debug, Copy, Clone, DataSize)]\npub(crate) enum SignatureWeight {\n    /// Too few signatures to make any guarantees about the block's finality.\n    Insufficient,\n    /// At least one honest validator has signed the block.\n    Weak,\n    /// There can be no blocks on other forks that also have this many signatures.\n    Strict,\n}\n\nimpl SignatureWeight {\n    pub(crate) fn is_sufficient(&self, requires_strict_finality: bool) -> bool {\n        match self {\n            SignatureWeight::Insufficient => false,\n            SignatureWeight::Weak => false == requires_strict_finality,\n            SignatureWeight::Strict => true,\n        }\n    }\n}\n\n#[derive(Clone, DataSize)]\npub(crate) struct ValidatorMatrix {\n    inner: Arc<RwLock<BTreeMap<EraId, EraValidatorWeights>>>,\n    chainspec_name_hash: ChainNameDigest,\n    chainspec_validators: Option<Arc<BTreeMap<PublicKey, U512>>>,\n    chainspec_activation_era: EraId,\n    #[data_size(skip)]\n    finality_threshold_fraction: Ratio<u64>,\n    secret_signing_key: Arc<SecretKey>,\n    public_signing_key: PublicKey,\n    auction_delay: u64,\n    signature_rewards_max_delay: u64,\n    retrograde_latch: Option<EraId>,\n}\n\nimpl ValidatorMatrix {\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn new(\n        finality_threshold_fraction: Ratio<u64>,\n        chainspec_name_hash: ChainNameDigest,\n        chainspec_validators: Option<BTreeMap<PublicKey, U512>>,\n        chainspec_activation_era: EraId,\n        secret_signing_key: Arc<SecretKey>,\n        public_signing_key: PublicKey,\n        auction_delay: u64,\n        signature_rewards_max_delay: u64,\n    ) -> Self {\n        let inner = Arc::new(RwLock::new(BTreeMap::new()));\n        ValidatorMatrix {\n            inner,\n            finality_threshold_fraction,\n            chainspec_name_hash,\n            chainspec_validators: chainspec_validators.map(Arc::new),\n            chainspec_activation_era,\n            secret_signing_key,\n            public_signing_key,\n            auction_delay,\n            signature_rewards_max_delay,\n            retrograde_latch: None,\n        }\n    }\n\n    /// Creates a new validator matrix with just a single validator.\n    #[cfg(test)]\n    pub(crate) fn new_with_validator(secret_signing_key: Arc<SecretKey>) -> Self {\n        let public_signing_key = PublicKey::from(&*secret_signing_key);\n        let finality_threshold_fraction = Ratio::new(1, 3);\n        let era_id = EraId::new(0);\n        let weights = EraValidatorWeights::new(\n            era_id,\n            iter::once((public_signing_key.clone(), 100.into())).collect(),\n            finality_threshold_fraction,\n        );\n        ValidatorMatrix {\n            inner: Arc::new(RwLock::new(iter::once((era_id, weights)).collect())),\n            chainspec_name_hash: ChainNameDigest::from_chain_name(\"casper-example\"),\n            chainspec_validators: None,\n            chainspec_activation_era: EraId::from(0),\n            finality_threshold_fraction,\n            public_signing_key,\n            secret_signing_key,\n            auction_delay: 1,\n            signature_rewards_max_delay: 3,\n            retrograde_latch: None,\n        }\n    }\n\n    /// Creates a new validator matrix with multiple validators.\n    #[cfg(test)]\n    pub(crate) fn new_with_validators<I: IntoIterator<Item = PublicKey>>(\n        secret_signing_key: Arc<SecretKey>,\n        public_keys: I,\n    ) -> Self {\n        let public_signing_key = PublicKey::from(&*secret_signing_key);\n        let finality_threshold_fraction = Ratio::new(1, 3);\n        let era_id = EraId::new(0);\n        let weights = EraValidatorWeights::new(\n            era_id,\n            public_keys\n                .into_iter()\n                .map(|pub_key| (pub_key, 100.into()))\n                .collect(),\n            finality_threshold_fraction,\n        );\n        ValidatorMatrix {\n            inner: Arc::new(RwLock::new(iter::once((era_id, weights)).collect())),\n            chainspec_name_hash: ChainNameDigest::from_chain_name(\"casper-example\"),\n            chainspec_validators: None,\n            chainspec_activation_era: EraId::from(0),\n            finality_threshold_fraction,\n            public_signing_key,\n            secret_signing_key,\n            auction_delay: 1,\n            signature_rewards_max_delay: 3,\n            retrograde_latch: None,\n        }\n    }\n\n    /// Sets signature_rewards_max_delay to imputed value.\n    #[cfg(test)]\n    pub(crate) fn with_signature_rewards_max_delay(\n        mut self,\n        signature_rewards_max_delay: u64,\n    ) -> Self {\n        self.signature_rewards_max_delay = signature_rewards_max_delay;\n        self\n    }\n\n    #[cfg(test)]\n    pub(crate) fn public_keys(&self, era_id: &EraId) -> Vec<PublicKey> {\n        let mut ret = vec![];\n        if let Some(evw) = self.read_inner().get(era_id) {\n            for validator_public_key in evw.validator_public_keys() {\n                ret.push(validator_public_key.clone());\n            }\n        }\n        ret\n    }\n\n    // Register the era of the highest orphaned block.\n    pub(crate) fn register_retrograde_latch(&mut self, latch_era: Option<EraId>) {\n        self.retrograde_latch = latch_era;\n    }\n\n    // When the chain starts, the validator weights will be the same until the unbonding delay is\n    // elapsed. This allows us to possibly infer the weights of other eras if the era registered is\n    // within the unbonding delay.\n    // Currently, we only infer the validator weights for era 0 from the set registered for era 1.\n    // This is needed for the case where we want to sync leap to a block in era 0 of a pre 1.5.0\n    // network for which we cant get the validator weights from a switch block.\n    pub(crate) fn register_era_validator_weights(\n        &mut self,\n        validators: EraValidatorWeights,\n    ) -> bool {\n        let was_present = self.register_era_validator_weights_bounded(validators.clone());\n        if validators.era_id() == EraId::from(1) {\n            self.register_era_validator_weights_bounded(EraValidatorWeights::new(\n                EraId::from(0),\n                validators.validator_weights,\n                validators.finality_threshold_fraction,\n            ));\n            info!(\"ValidatorMatrix: Inferred validator weights for Era 0 from weights in Era 1\");\n        }\n        was_present\n    }\n\n    fn register_era_validator_weights_bounded(&mut self, validators: EraValidatorWeights) -> bool {\n        let era_id = validators.era_id;\n        let mut guard = self\n            .inner\n            .write()\n            .expect(\"poisoned lock on validator matrix\");\n        let is_new = guard.insert(era_id, validators).is_none();\n\n        let latch_era = if let Some(era) = self.retrograde_latch.as_ref() {\n            *era\n        } else {\n            return is_new;\n        };\n\n        let nth = self.cache_tail_max_len();\n        // avoid multiplication\n        let excess_entry_count = guard.len().saturating_sub(nth).saturating_sub(nth);\n        let mut removed = false;\n        for _ in 0..excess_entry_count {\n            let median_era = guard.keys().rev().nth(nth).copied().unwrap();\n            if median_era <= latch_era {\n                break;\n            } else {\n                guard.remove(&median_era);\n                if median_era == era_id {\n                    removed = true;\n                }\n            }\n        }\n        is_new && !removed\n    }\n\n    pub(crate) fn register_validator_weights(\n        &mut self,\n        era_id: EraId,\n        validator_weights: BTreeMap<PublicKey, U512>,\n    ) {\n        if self.read_inner().contains_key(&era_id) == false {\n            self.register_era_validator_weights(EraValidatorWeights::new(\n                era_id,\n                validator_weights,\n                self.finality_threshold_fraction,\n            ));\n        }\n    }\n\n    pub(crate) fn register_eras(\n        &mut self,\n        era_weights: BTreeMap<EraId, BTreeMap<PublicKey, U512>>,\n    ) {\n        for (era_id, weights) in era_weights {\n            self.register_validator_weights(era_id, weights);\n        }\n    }\n\n    pub(crate) fn has_era(&self, era_id: &EraId) -> bool {\n        self.read_inner().contains_key(era_id)\n    }\n\n    pub(crate) fn validator_weights(&self, era_id: EraId) -> Option<EraValidatorWeights> {\n        if let (true, Some(chainspec_validators)) = (\n            era_id == self.chainspec_activation_era,\n            self.chainspec_validators.as_ref(),\n        ) {\n            Some(EraValidatorWeights::new(\n                era_id,\n                (**chainspec_validators).clone(),\n                self.finality_threshold_fraction,\n            ))\n        } else {\n            self.read_inner().get(&era_id).cloned()\n        }\n    }\n\n    pub(crate) fn fault_tolerance_threshold(&self) -> Ratio<u64> {\n        self.finality_threshold_fraction\n    }\n\n    pub(crate) fn is_empty(&self) -> bool {\n        self.read_inner().is_empty()\n    }\n\n    /// Returns whether `pub_key` is the ID of a validator in this era, or `None` if the validator\n    /// information for that era is missing.\n    pub(crate) fn is_validator_in_era(\n        &self,\n        era_id: EraId,\n        public_key: &PublicKey,\n    ) -> Option<bool> {\n        if let (true, Some(chainspec_validators)) = (\n            era_id == self.chainspec_activation_era,\n            self.chainspec_validators.as_ref(),\n        ) {\n            Some(chainspec_validators.contains_key(public_key))\n        } else {\n            self.read_inner()\n                .get(&era_id)\n                .map(|validator_weights| validator_weights.is_validator(public_key))\n        }\n    }\n\n    pub(crate) fn public_signing_key(&self) -> &PublicKey {\n        &self.public_signing_key\n    }\n\n    pub(crate) fn secret_signing_key(&self) -> &Arc<SecretKey> {\n        &self.secret_signing_key\n    }\n\n    /// Returns whether `pub_key` is the ID of a validator in this era, or `None` if the validator\n    /// information for that era is missing.\n    pub(crate) fn is_self_validator_in_era(&self, era_id: EraId) -> Option<bool> {\n        self.is_validator_in_era(era_id, &self.public_signing_key)\n    }\n\n    /// Determine if the active validator is in a current or upcoming set of active validators.\n    #[inline]\n    pub(crate) fn is_active_or_upcoming_validator(&self, public_key: &PublicKey) -> bool {\n        // This function is potentially expensive and could be memoized, with the cache being\n        // invalidated when the max value of the `BTreeMap` changes.\n        self.read_inner()\n            .values()\n            .rev()\n            .take(self.auction_delay as usize + 1)\n            .any(|validator_weights| validator_weights.is_validator(public_key))\n    }\n\n    pub(crate) fn create_finality_signature(\n        &self,\n        block_header: &BlockHeaderV2,\n    ) -> Option<FinalitySignatureV2> {\n        if self\n            .is_self_validator_in_era(block_header.era_id())\n            .unwrap_or(false)\n        {\n            return Some(FinalitySignatureV2::create(\n                block_header.block_hash(),\n                block_header.height(),\n                block_header.era_id(),\n                self.chainspec_name_hash,\n                &self.secret_signing_key,\n            ));\n        }\n        None\n    }\n\n    fn read_inner(&self) -> RwLockReadGuard<BTreeMap<EraId, EraValidatorWeights>> {\n        self.inner.read().unwrap()\n    }\n\n    pub(crate) fn eras(&self) -> Vec<EraId> {\n        self.read_inner().keys().copied().collect_vec()\n    }\n\n    pub fn chain_name_hash(&self) -> ChainNameDigest {\n        self.chainspec_name_hash\n    }\n\n    #[cfg(test)]\n    pub(crate) fn purge_era_validators(&mut self, era_id: &EraId) {\n        self.inner.write().unwrap().remove(era_id);\n    }\n\n    fn cache_head_max_len(&self) -> usize {\n        MINIMUM_CUSP_ERA_COUNT.saturating_add(self.auction_delay) as usize\n    }\n\n    fn cache_tail_max_len(&self) -> usize {\n        let min_plus_auction_delay = self.cache_head_max_len();\n        let signature_rewards_max_delay =\n            self.signature_rewards_max_delay\n                .saturating_add(PROPOSED_BLOCK_ERA_TOLERANCE) as usize;\n        min_plus_auction_delay.max(signature_rewards_max_delay)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn entries_max(&self) -> usize {\n        self.cache_tail_max_len() * 2\n    }\n}\n\nimpl Debug for ValidatorMatrix {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"ValidatorMatrix\")\n            .field(\"weights\", &*self.read_inner())\n            .field(\n                \"finality_threshold_fraction\",\n                &self.finality_threshold_fraction,\n            )\n            .finish()\n    }\n}\n\n#[derive(DataSize, Debug, Eq, PartialEq, Serialize, Default, Clone)]\npub(crate) struct EraValidatorWeights {\n    era_id: EraId,\n    validator_weights: BTreeMap<PublicKey, U512>,\n    #[data_size(skip)]\n    finality_threshold_fraction: Ratio<u64>,\n}\n\nimpl EraValidatorWeights {\n    pub(crate) fn new(\n        era_id: EraId,\n        validator_weights: BTreeMap<PublicKey, U512>,\n        finality_threshold_fraction: Ratio<u64>,\n    ) -> Self {\n        EraValidatorWeights {\n            era_id,\n            validator_weights,\n            finality_threshold_fraction,\n        }\n    }\n\n    pub(crate) fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    pub(crate) fn is_empty(&self) -> bool {\n        self.validator_weights.is_empty()\n    }\n\n    pub(crate) fn get_total_weight(&self) -> U512 {\n        self.validator_weights.values().copied().sum()\n    }\n\n    pub(crate) fn validator_public_keys(&self) -> impl Iterator<Item = &PublicKey> {\n        self.validator_weights.keys()\n    }\n\n    pub(crate) fn into_validator_public_keys(self) -> impl Iterator<Item = PublicKey> {\n        self.validator_weights.into_keys()\n    }\n\n    pub(crate) fn missing_validators<'a>(\n        &self,\n        validator_keys: impl Iterator<Item = &'a PublicKey>,\n    ) -> impl Iterator<Item = &PublicKey> {\n        let provided_keys: HashSet<_> = validator_keys.cloned().collect();\n        self.validator_weights\n            .keys()\n            .filter(move |&validator| !provided_keys.contains(validator))\n    }\n\n    pub(crate) fn bogus_validators<'a>(\n        &self,\n        validator_keys: impl Iterator<Item = &'a PublicKey>,\n    ) -> Vec<PublicKey> {\n        validator_keys\n            .filter(move |validator_key| !self.validator_weights.keys().contains(validator_key))\n            .cloned()\n            .collect()\n    }\n\n    pub(crate) fn get_weight(&self, public_key: &PublicKey) -> U512 {\n        match self.validator_weights.get(public_key) {\n            None => U512::zero(),\n            Some(w) => *w,\n        }\n    }\n\n    pub(crate) fn is_validator(&self, public_key: &PublicKey) -> bool {\n        self.validator_weights.contains_key(public_key)\n    }\n\n    pub(crate) fn signed_weight<'a>(\n        &self,\n        validator_keys: impl Iterator<Item = &'a PublicKey>,\n    ) -> U512 {\n        validator_keys\n            .map(|validator_key| self.get_weight(validator_key))\n            .sum()\n    }\n\n    pub(crate) fn signature_weight<'a>(\n        &self,\n        validator_keys: impl Iterator<Item = &'a PublicKey>,\n    ) -> SignatureWeight {\n        // sufficient is ~33.4%, strict is ~66.7% by default in highway\n        // in some cases, we may already have strict weight or better before even starting.\n        // this is optimal, but in the cases where we do not we are willing to start work\n        // on acquiring block data on a block for which we have at least sufficient weight.\n        // nevertheless, we will try to attain strict weight before fully accepting such\n        // a block.\n        let finality_threshold_fraction = self.finality_threshold_fraction;\n        let strict = Ratio::new(1, 2) * (Ratio::from_integer(1) + finality_threshold_fraction);\n        let total_era_weight = self.get_total_weight();\n\n        let signature_weight = self.signed_weight(validator_keys);\n        if signature_weight * U512::from(*strict.denom())\n            > total_era_weight * U512::from(*strict.numer())\n        {\n            return SignatureWeight::Strict;\n        }\n        if signature_weight * U512::from(*finality_threshold_fraction.denom())\n            > total_era_weight * U512::from(*finality_threshold_fraction.numer())\n        {\n            return SignatureWeight::Weak;\n        }\n        SignatureWeight::Insufficient\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::iter;\n\n    use casper_types::EraId;\n    use num_rational::Ratio;\n\n    use crate::{\n        components::consensus::tests::utils::{\n            ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, CAROL_PUBLIC_KEY,\n        },\n        types::SignatureWeight,\n    };\n\n    use super::{EraValidatorWeights, ValidatorMatrix};\n\n    fn empty_era_validator_weights(era_id: EraId) -> EraValidatorWeights {\n        EraValidatorWeights::new(\n            era_id,\n            iter::once((ALICE_PUBLIC_KEY.clone(), 100.into())).collect(),\n            Ratio::new(1, 3),\n        )\n    }\n\n    #[test]\n    fn signature_weight_at_boundary_equal_weights() {\n        let weights = EraValidatorWeights::new(\n            EraId::default(),\n            [\n                (ALICE_PUBLIC_KEY.clone(), 100.into()),\n                (BOB_PUBLIC_KEY.clone(), 100.into()),\n                (CAROL_PUBLIC_KEY.clone(), 100.into()),\n            ]\n            .into(),\n            Ratio::new(1, 3),\n        );\n\n        assert_eq!(\n            weights.signature_weight([ALICE_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Insufficient\n        );\n        assert_eq!(\n            weights.signature_weight([BOB_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Insufficient\n        );\n        assert_eq!(\n            weights.signature_weight([CAROL_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Insufficient\n        );\n        assert_eq!(\n            weights.signature_weight([ALICE_PUBLIC_KEY.clone(), BOB_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Weak\n        );\n        assert_eq!(\n            weights.signature_weight([ALICE_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Weak\n        );\n        assert_eq!(\n            weights.signature_weight([BOB_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Weak\n        );\n        assert_eq!(\n            weights.signature_weight(\n                [\n                    ALICE_PUBLIC_KEY.clone(),\n                    BOB_PUBLIC_KEY.clone(),\n                    CAROL_PUBLIC_KEY.clone()\n                ]\n                .iter()\n            ),\n            SignatureWeight::Strict\n        );\n    }\n\n    #[test]\n    fn signature_weight_at_boundary_unequal_weights() {\n        let weights = EraValidatorWeights::new(\n            EraId::default(),\n            [\n                (ALICE_PUBLIC_KEY.clone(), 101.into()),\n                (BOB_PUBLIC_KEY.clone(), 100.into()),\n                (CAROL_PUBLIC_KEY.clone(), 100.into()),\n            ]\n            .into(),\n            Ratio::new(1, 3),\n        );\n\n        assert_eq!(\n            weights.signature_weight([ALICE_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Weak\n        );\n        assert_eq!(\n            weights.signature_weight([BOB_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Insufficient\n        );\n        assert_eq!(\n            weights.signature_weight([CAROL_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Insufficient\n        );\n        assert_eq!(\n            weights.signature_weight([ALICE_PUBLIC_KEY.clone(), BOB_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Strict\n        );\n        assert_eq!(\n            weights.signature_weight([ALICE_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Strict\n        );\n        assert_eq!(\n            weights.signature_weight([BOB_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()),\n            SignatureWeight::Weak\n        );\n        assert_eq!(\n            weights.signature_weight(\n                [\n                    ALICE_PUBLIC_KEY.clone(),\n                    BOB_PUBLIC_KEY.clone(),\n                    CAROL_PUBLIC_KEY.clone()\n                ]\n                .iter()\n            ),\n            SignatureWeight::Strict\n        );\n    }\n\n    #[test]\n    fn register_validator_weights_pruning() {\n        // Create a validator matrix and saturate it with entries.\n        let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone());\n        let mut era_validator_weights = vec![validator_matrix.validator_weights(0.into()).unwrap()];\n\n        let entries_max = validator_matrix.entries_max();\n        era_validator_weights.extend(\n            (1..entries_max as u64)\n                .map(EraId::from)\n                .map(empty_era_validator_weights),\n        );\n        for evw in era_validator_weights\n            .iter()\n            .take(entries_max)\n            .skip(1)\n            .cloned()\n        {\n            assert!(\n                validator_matrix.register_era_validator_weights(evw),\n                \"register_era_validator_weights\"\n            );\n        }\n        let actual = validator_matrix\n            .read_inner()\n            .keys()\n            .copied()\n            .map(EraId::value)\n            .collect::<Vec<u64>>();\n        // For a `entries_max` value of 8, the validator\n        // matrix should contain eras 0 through 7 inclusive.\n        assert_eq!(vec![0u64, 1, 2, 3, 4, 5, 6, 7], actual);\n\n        // Now that we have 6 entries in the validator matrix, try adding more.\n        // We should have an entry for era 3 (we have eras 0 through 5\n        // inclusive).\n        let median = entries_max as u64 / 2;\n        assert!(\n            validator_matrix.has_era(&median.into()),\n            \"should have median era {}\",\n            median\n        );\n        // Add era 7, which would be the 7th entry in the matrix. Skipping era\n        // 6 should have no effect on the pruning.\n        era_validator_weights.push(empty_era_validator_weights((entries_max as u64 + 1).into()));\n\n        // set retrograde latch to simulate a fully synced node\n        validator_matrix.register_retrograde_latch(Some(EraId::new(0)));\n\n        // Now the entry for era 3 should be dropped, and we should be left with\n        // the 4 lowest eras [0, 1, 2, 3] and 4 highest eras [5, 6, 7, 9].\n        assert!(validator_matrix\n            .register_era_validator_weights(era_validator_weights.last().cloned().unwrap()));\n        assert!(\n            !validator_matrix.has_era(&median.into()),\n            \"should not have median era {}\",\n            median\n        );\n        let len = validator_matrix.read_inner().len();\n        assert_eq!(\n            len, entries_max,\n            \"expected entries {} actual entries: {}\",\n            entries_max, len\n        );\n        let expected = vec![0u64, 1, 2, 3, 5, 6, 7, 9];\n        let actual = validator_matrix\n            .read_inner()\n            .keys()\n            .copied()\n            .map(EraId::value)\n            .collect::<Vec<u64>>();\n        assert_eq!(expected, actual, \"{:?} {:?}\", expected, actual);\n\n        // Adding existing eras shouldn't change the state.\n        let old_state: Vec<EraId> = validator_matrix.read_inner().keys().copied().collect();\n        let repeat = era_validator_weights\n            .last()\n            .cloned()\n            .expect(\"should have last entry\");\n        assert!(\n            !validator_matrix.register_era_validator_weights(repeat),\n            \"should not re-register already registered era\"\n        );\n        let new_state: Vec<EraId> = validator_matrix.read_inner().keys().copied().collect();\n        assert_eq!(old_state, new_state, \"state should be unchanged\");\n    }\n\n    #[test]\n    fn register_validator_weights_latched_pruning() {\n        // TODO: write a version of this test that is not hardcoded with so many assumptions about\n        // the internal state of the matrix. The replacement test should dynamically\n        // determine the range and misc idx and count variables rather than hard coding\n        // them.\n\n        // Create a validator matrix and saturate it with entries.\n        let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone())\n            .with_signature_rewards_max_delay(2);\n        // Set the retrograde latch to 10 so we can register all eras lower or\n        // equal to 10.\n        let entries_max = validator_matrix.entries_max();\n        validator_matrix.register_retrograde_latch(Some(EraId::from(10)));\n        let mut era_validator_weights = vec![validator_matrix.validator_weights(0.into()).unwrap()];\n        era_validator_weights.extend(\n            (1..=entries_max as u64)\n                .map(EraId::from)\n                .map(empty_era_validator_weights),\n        );\n        for evw in era_validator_weights\n            .iter()\n            .take(entries_max + 1)\n            .skip(1)\n            .cloned()\n        {\n            assert!(\n                validator_matrix.register_era_validator_weights(evw),\n                \"register_era_validator_weights\"\n            );\n        }\n\n        // Register eras [7, 8, 9].\n        era_validator_weights.extend((7..=9).map(EraId::from).map(empty_era_validator_weights));\n        for evw in era_validator_weights.iter().rev().take(3).cloned() {\n            assert!(\n                validator_matrix.register_era_validator_weights(evw),\n                \"register_era_validator_weights\"\n            );\n        }\n\n        // Set the retrograde latch to era 5.\n        validator_matrix.register_retrograde_latch(Some(EraId::from(5)));\n        // Add era 10 to the weights.\n        era_validator_weights.push(empty_era_validator_weights(EraId::from(10)));\n        assert_eq!(era_validator_weights.len(), 11);\n        // As the current weights in the matrix are [0, ..., 9], register era\n        // 10. This should succeed anyway since it's the highest weight.\n        assert!(\n            validator_matrix.register_era_validator_weights(era_validator_weights[10].clone()),\n            \"register_era_validator_weights\"\n        );\n        // The latch was previously set to 5, so now all weights which are\n        // neither the lowest 3, highest 3, or higher than the latched era\n        // should have been purged.\n        // Given we had weights [0, ..., 10] and the latch is 5, we should\n        // be left with [0, 1, 2, 3, 4, 5, 8, 9, 10].\n        for era in 0..=5 {\n            assert!(validator_matrix.has_era(&EraId::from(era)));\n        }\n        for era in 6..=7 {\n            assert!(!validator_matrix.has_era(&EraId::from(era)));\n        }\n        for era in 8..=10 {\n            assert!(validator_matrix.has_era(&EraId::from(era)));\n        }\n\n        // Make sure era 6, which was previously purged, is not registered as\n        // it is greater than the latch, which is 5.\n        assert!(\n            !validator_matrix.register_era_validator_weights(era_validator_weights[6].clone()),\n            \"register_era_validator_weights\"\n        );\n\n        // Set the retrograde latch to era 6.\n        validator_matrix.register_retrograde_latch(Some(EraId::from(6)));\n        // Make sure era 6 is now registered.\n        assert!(\n            validator_matrix.register_era_validator_weights(era_validator_weights[6].clone()),\n            \"register_era_validator_weights\"\n        );\n\n        // Set the retrograde latch to era 1.\n        validator_matrix.register_retrograde_latch(Some(EraId::from(1)));\n        // Register era 10 again to drive the purging mechanism.\n        assert!(\n            !validator_matrix.register_era_validator_weights(era_validator_weights[10].clone()),\n            \"register_era_validator_weights\"\n        );\n        // The latch was previously set to 1, so now all weights which are\n        // neither the lowest 3, highest 3, or higher than the latched era\n        // should have been purged.\n        // Given we had weights [0, 1, 2, 3, 4, 5, 6, 8, 9, 10] and the latch\n        // is 1, we should be left with [0, 1, 2, 8, 9, 10].\n        for era in 0..=2 {\n            assert!(validator_matrix.has_era(&EraId::from(era)));\n        }\n        for era in 3..=7 {\n            assert!(!validator_matrix.has_era(&EraId::from(era)));\n        }\n        for era in 8..=10 {\n            assert!(validator_matrix.has_era(&EraId::from(era)));\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types/value_or_chunk.rs",
    "content": "use std::fmt::{self, Debug, Display, Formatter};\n\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse once_cell::sync::OnceCell;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\n\nuse casper_storage::global_state::trie::TrieRaw;\nuse casper_types::{\n    execution::ExecutionResult, ChunkWithProof, ChunkWithProofVerificationError, Digest,\n    MerkleConstructionError,\n};\n\nuse super::Chunkable;\nuse crate::{\n    components::fetcher::{EmptyValidationMetadata, FetchItem, Tag},\n    utils::ds,\n};\n\n/// Represents a value or a chunk of data with attached proof.\n///\n/// Chunk with attached proof is used when the requested\n/// value is larger than [ChunkWithProof::CHUNK_SIZE_BYTES].\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, DataSize)]\npub enum ValueOrChunk<V> {\n    /// Represents a value.\n    Value(V),\n    /// Represents a chunk of data with attached proof.\n    ChunkWithProof(ChunkWithProof),\n}\n\n/// Error returned when constructing an instance of [`ValueOrChunk`].\n#[derive(Debug, Error)]\npub enum ChunkingError {\n    /// Merkle proof construction error.\n    #[error(\"error constructing Merkle proof for chunk\")]\n    MerkleConstruction(\n        #[from]\n        #[source]\n        MerkleConstructionError,\n    ),\n    /// Serialization error.\n    #[error(\"error serializing data into chunks: {0}\")]\n    SerializationError(String),\n}\n\nimpl<V> ValueOrChunk<V> {\n    /// Creates an instance of [`ValueOrChunk::Value`] if data size is less than or equal to\n    /// [`ChunkWithProof::CHUNK_SIZE_BYTES`] or a [`ValueOrChunk::ChunkWithProof`] if it is greater.\n    /// In the latter case it will return only the `chunk_index`-th chunk of the value's byte\n    /// representation.\n    ///\n    /// NOTE: The [`Chunkable`] instance used here needs to match the one used when calling\n    /// [`Digest::hash_into_chunks_if_necessary`]. This is to ensure that type is turned into\n    /// bytes consistently before chunking and hashing. If not then the Merkle proofs for chunks\n    /// won't match.\n    pub fn new(data: V, chunk_index: u64) -> Result<Self, ChunkingError>\n    where\n        V: Chunkable,\n    {\n        let bytes = Chunkable::as_bytes(&data).map_err(|error| {\n            ChunkingError::SerializationError(format!(\n                \"failed to chunk {:?}: {:?}\",\n                std::any::type_name::<V>(),\n                error\n            ))\n        })?;\n        // NOTE: Cannot accept the chunk size bytes as an argument without changing the\n        // IndexedMerkleProof. The chunk size there is hardcoded and will be used when\n        // determining the chunk.\n        if bytes.len() <= ChunkWithProof::CHUNK_SIZE_BYTES {\n            Ok(ValueOrChunk::Value(data))\n        } else {\n            let chunk_with_proof = ChunkWithProof::new(&bytes, chunk_index)\n                .map_err(ChunkingError::MerkleConstruction)?;\n            Ok(ValueOrChunk::ChunkWithProof(chunk_with_proof))\n        }\n    }\n}\n\nimpl Display for ValueOrChunk<HashingTrieRaw> {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            ValueOrChunk::Value(data) => write!(f, \"value {}\", data),\n            ValueOrChunk::ChunkWithProof(chunk) => write!(\n                f,\n                \"chunk #{} with proof, root hash {}\",\n                chunk.proof().index(),\n                chunk.proof().root_hash()\n            ),\n        }\n    }\n}\n\nimpl Display for ValueOrChunk<Vec<ExecutionResult>> {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            ValueOrChunk::Value(data) => write!(f, \"value: {} execution results\", data.len()),\n            ValueOrChunk::ChunkWithProof(chunk) => write!(\n                f,\n                \"chunk #{} with proof, root hash {}\",\n                chunk.proof().index(),\n                chunk.proof().root_hash()\n            ),\n        }\n    }\n}\n\n#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, DataSize)]\npub struct HashingTrieRaw {\n    inner: TrieRaw,\n    #[serde(skip)]\n    #[data_size(with = ds::once_cell)]\n    hash: OnceCell<Digest>,\n}\n\nimpl From<TrieRaw> for HashingTrieRaw {\n    fn from(inner: TrieRaw) -> HashingTrieRaw {\n        HashingTrieRaw {\n            inner,\n            hash: OnceCell::new(),\n        }\n    }\n}\n\nimpl Display for HashingTrieRaw {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(self.inner.inner()))\n    }\n}\n\nimpl HashingTrieRaw {\n    fn hash(&self) -> Digest {\n        *self.hash.get_or_init(|| Digest::hash(self.inner.inner()))\n    }\n\n    pub fn inner(&self) -> &TrieRaw {\n        &self.inner\n    }\n\n    pub fn into_inner(self) -> TrieRaw {\n        self.inner\n    }\n}\n\n/// Represents an enum that can contain either a whole trie or a chunk of it.\npub type TrieOrChunk = ValueOrChunk<HashingTrieRaw>;\n\nimpl FetchItem for TrieOrChunk {\n    type Id = TrieOrChunkId;\n    type ValidationError = ChunkWithProofVerificationError;\n    type ValidationMetadata = EmptyValidationMetadata;\n\n    const TAG: Tag = Tag::TrieOrChunk;\n\n    fn fetch_id(&self) -> Self::Id {\n        match self {\n            TrieOrChunk::Value(trie_raw) => TrieOrChunkId(0, trie_raw.hash()),\n            TrieOrChunk::ChunkWithProof(chunked_data) => TrieOrChunkId(\n                chunked_data.proof().index(),\n                chunked_data.proof().root_hash(),\n            ),\n        }\n    }\n\n    fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> {\n        match self {\n            TrieOrChunk::Value(_) => Ok(()),\n            TrieOrChunk::ChunkWithProof(chunk_with_proof) => chunk_with_proof.verify(),\n        }\n    }\n}\n\n/// Represents the ID of a `TrieOrChunk` - containing the index and the root hash.\n/// The root hash is the hash of the trie node as a whole.\n/// The index is the index of a chunk if the node's size is too large and requires chunking. For\n/// small nodes, it's always 0.\n#[derive(DataSize, Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct TrieOrChunkId(pub u64, pub Digest);\n\nimpl TrieOrChunkId {\n    /// Returns the trie key part of the ID.\n    pub fn digest(&self) -> &Digest {\n        &self.1\n    }\n\n    /// Given a serialized ID, deserializes it for display purposes.\n    fn fmt_serialized(f: &mut Formatter, serialized_id: &[u8]) -> fmt::Result {\n        match bincode::deserialize::<Self>(serialized_id) {\n            Ok(ref trie_or_chunk_id) => Display::fmt(trie_or_chunk_id, f),\n            Err(_) => f.write_str(\"<invalid>\"),\n        }\n    }\n}\n\n/// Helper struct to on-demand deserialize a trie or chunk ID for display purposes.\npub struct TrieOrChunkIdDisplay<'a>(pub &'a [u8]);\n\nimpl Display for TrieOrChunkIdDisplay<'_> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        TrieOrChunkId::fmt_serialized(f, self.0)\n    }\n}\n\nimpl Display for TrieOrChunkId {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"({}, {})\", self.0, self.1)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::{bytesrepr::Bytes, ChunkWithProof};\n\n    use super::ValueOrChunk;\n\n    #[test]\n    fn returns_value_or_chunk() {\n        let input: Bytes = vec![1u8; 1].into();\n        let value = ValueOrChunk::new(input, 0).unwrap();\n        assert!(matches!(value, ValueOrChunk::Value { .. }));\n\n        let input: Bytes = vec![1u8; ChunkWithProof::CHUNK_SIZE_BYTES + 1].into();\n        let value_or_chunk = ValueOrChunk::new(input.clone(), 0).unwrap();\n        let first_chunk = match value_or_chunk {\n            ValueOrChunk::Value(_) => panic!(\"expected chunk\"),\n            ValueOrChunk::ChunkWithProof(chunk) => chunk,\n        };\n\n        // try to read all the chunks\n        let chunk_count = first_chunk.proof().count();\n        let mut chunks = vec![first_chunk];\n\n        for i in 1..chunk_count {\n            match ValueOrChunk::new(input.clone(), i).unwrap() {\n                ValueOrChunk::Value(_) => panic!(\"expected chunk\"),\n                ValueOrChunk::ChunkWithProof(chunk) => chunks.push(chunk),\n            }\n        }\n\n        // there should be no chunk with index `chunk_count`\n        assert!(matches!(\n            ValueOrChunk::new(input.clone(), chunk_count),\n            Err(super::ChunkingError::MerkleConstruction(_))\n        ));\n\n        // all chunks should be valid\n        assert!(chunks.iter().all(|chunk| chunk.verify().is_ok()));\n\n        // reassemble the data\n        let data: Vec<u8> = chunks\n            .into_iter()\n            .flat_map(|chunk| chunk.into_chunk())\n            .collect();\n\n        // Since `Bytes` are chunked \"as-is\", there's no deserialization of the bytes required.\n        let retrieved_bytes: Bytes = data.into();\n\n        assert_eq!(input, retrieved_bytes);\n    }\n}\n\nmod specimen_support {\n    use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator};\n\n    use super::{TrieOrChunkId, ValueOrChunk};\n\n    impl LargestSpecimen for TrieOrChunkId {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            TrieOrChunkId(\n                LargestSpecimen::largest_specimen(estimator, cache),\n                LargestSpecimen::largest_specimen(estimator, cache),\n            )\n        }\n    }\n\n    impl<V> LargestSpecimen for ValueOrChunk<V> {\n        fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n            // By definition, the chunk is always the largest (8MiB):\n            ValueOrChunk::ChunkWithProof(LargestSpecimen::largest_specimen(estimator, cache))\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/types.rs",
    "content": "//! Common types used across multiple components.\n\npub(crate) mod appendable_block;\nmod block;\nmod chunkable;\nmod exit_code;\nmod max_ttl;\nmod node_config;\nmod node_id;\n/// Peers map.\nmod status_feed;\nmod sync_leap;\npub(crate) mod sync_leap_validation_metadata;\npub(crate) mod transaction;\nmod validator_matrix;\nmod value_or_chunk;\n\nuse std::fmt::Debug;\n\nuse rand::{CryptoRng, RngCore};\n#[cfg(not(test))]\nuse rand_chacha::ChaCha20Rng;\nuse thiserror::Error;\n\npub(crate) use block::{\n    compute_approvals_checksum, create_single_block_rewarded_signatures,\n    BlockExecutionResultsOrChunkId, BlockPayload, BlockWithMetadata, ForwardMetaBlock,\n    InvalidProposalError, MetaBlock, MetaBlockMergeError, MetaBlockState,\n};\npub use block::{BlockExecutionResultsOrChunk, ExecutableBlock, FinalizedBlock, InternalEraReport};\npub use chunkable::Chunkable;\npub use datasize::DataSize;\npub use exit_code::ExitCode;\npub(crate) use max_ttl::MaxTtl;\npub use node_config::{NodeConfig, SyncHandling};\npub(crate) use node_id::NodeId;\npub use status_feed::{ChainspecInfo, GetStatusResult, StatusFeed};\npub(crate) use sync_leap::{GlobalStatesMetadata, SyncLeap, SyncLeapIdentifier};\npub(crate) use transaction::{\n    LegacyDeploy, MetaTransaction, TransactionFootprint, TransactionHeader,\n};\npub(crate) use validator_matrix::{EraValidatorWeights, SignatureWeight, ValidatorMatrix};\npub use value_or_chunk::{\n    ChunkingError, TrieOrChunk, TrieOrChunkId, TrieOrChunkIdDisplay, ValueOrChunk,\n};\n\n/// An object-safe RNG trait that requires a cryptographically strong random number generator.\npub trait CryptoRngCore: CryptoRng + RngCore {}\n\nimpl<T> CryptoRngCore for T where T: CryptoRng + RngCore + ?Sized {}\n\n/// The cryptographically secure RNG used throughout the node.\n#[cfg(not(test))]\npub type NodeRng = ChaCha20Rng;\n\n/// The RNG used throughout the node for testing.\n#[cfg(test)]\npub type NodeRng = casper_types::testing::TestRng;\n\n/// The variants in the given types are expected to all be the same.\n#[derive(Debug, Error)]\n#[error(\"mismatch in variants: {0:?}\")]\npub struct VariantMismatch(pub(super) Box<dyn Debug + Send + Sync>);\n"
  },
  {
    "path": "node/src/utils/block_signatures.rs",
    "content": "use std::collections::BTreeMap;\n\nuse num::rational::Ratio;\nuse thiserror::Error;\n\nuse casper_types::{BlockSignatures, PublicKey, U512};\n\n/// Computes the quorum for the fraction of weight of signatures that will be considered\n/// sufficient. This is the lowest weight so that any two sets of validators with that weight have\n/// at least one honest validator in common.\nfn quorum_fraction(fault_tolerance_fraction: Ratio<u64>) -> Ratio<u64> {\n    (fault_tolerance_fraction + 1) / 2\n}\n\n/// Returns `Ok(())` if the block signatures' total weight exceeds the threshold which is\n/// calculated using the provided quorum formula. Returns an error if it doesn't, or if one of the\n/// signatures does not belong to a validator.\n///\n/// This does _not_ cryptographically verify the signatures.\npub(crate) fn check_sufficient_block_signatures_with_quorum_formula<F>(\n    trusted_validator_weights: &BTreeMap<PublicKey, U512>,\n    fault_tolerance_fraction: Ratio<u64>,\n    maybe_block_signatures: Option<&BlockSignatures>,\n    quorum_formula: F,\n) -> Result<(), BlockSignatureError>\nwhere\n    F: Fn(Ratio<u64>) -> Ratio<u64>,\n{\n    // Calculate the weight of the signatures\n    let mut signature_weight: U512 = U512::zero();\n    let mut minimum_weight: Option<U512> = None;\n\n    let total_weight: U512 = trusted_validator_weights\n        .iter()\n        .map(|(_, weight)| *weight)\n        .sum();\n\n    match maybe_block_signatures {\n        Some(block_signatures) => {\n            let mut bogus_validators = vec![];\n            for public_key in block_signatures.signers() {\n                match trusted_validator_weights.get(public_key) {\n                    None => {\n                        bogus_validators.push(public_key.clone());\n                        continue;\n                    }\n                    Some(validator_weight) => {\n                        if minimum_weight.is_none_or(|min_w| *validator_weight < min_w) {\n                            minimum_weight = Some(*validator_weight);\n                        }\n                        signature_weight += *validator_weight;\n                    }\n                }\n            }\n            if !bogus_validators.is_empty() {\n                return Err(BlockSignatureError::BogusValidators {\n                    trusted_validator_weights: trusted_validator_weights.clone(),\n                    block_signatures: Box::new(block_signatures.clone()),\n                    bogus_validators,\n                });\n            }\n\n            let quorum_fraction = (quorum_formula)(fault_tolerance_fraction);\n            // Verify: signature_weight / total_weight >= lower_bound\n            // Equivalent to the following\n            if signature_weight * U512::from(*quorum_fraction.denom())\n                <= total_weight * U512::from(*quorum_fraction.numer())\n            {\n                return Err(BlockSignatureError::InsufficientWeightForFinality {\n                    trusted_validator_weights: trusted_validator_weights.clone(),\n                    block_signatures: maybe_block_signatures\n                        .map(|signatures| Box::new(signatures.clone())),\n                    signature_weight: Some(Box::new(signature_weight)),\n                    total_validator_weight: Box::new(total_weight),\n                    fault_tolerance_fraction,\n                });\n            }\n\n            Ok(())\n        }\n        None => {\n            // No signatures provided, return early.\n            Err(BlockSignatureError::InsufficientWeightForFinality {\n                trusted_validator_weights: trusted_validator_weights.clone(),\n                block_signatures: None,\n                signature_weight: None,\n                total_validator_weight: Box::new(total_weight),\n                fault_tolerance_fraction,\n            })\n        }\n    }\n}\n\n/// Returns `Ok(())` if the block signatures' total weight exceeds the threshold calculated by\n/// the [quorum_fraction] function. Returns an error if it doesn't, or if one of the signatures does\n/// not belong to a validator.\n///\n/// This does _not_ cryptographically verify the signatures.\npub(crate) fn check_sufficient_block_signatures(\n    trusted_validator_weights: &BTreeMap<PublicKey, U512>,\n    fault_tolerance_fraction: Ratio<u64>,\n    block_signatures: Option<&BlockSignatures>,\n) -> Result<(), BlockSignatureError> {\n    check_sufficient_block_signatures_with_quorum_formula(\n        trusted_validator_weights,\n        fault_tolerance_fraction,\n        block_signatures,\n        quorum_fraction,\n    )\n}\n\n#[derive(Error, Debug)]\npub(crate) enum BlockSignatureError {\n    #[error(\n        \"Block signatures contain bogus validator. \\\n         trusted validator weights: {trusted_validator_weights:?}, \\\n         block signatures: {block_signatures:?}, \\\n         bogus validator public keys: {bogus_validators:?}\"\n    )]\n    BogusValidators {\n        trusted_validator_weights: BTreeMap<PublicKey, U512>,\n        block_signatures: Box<BlockSignatures>,\n        bogus_validators: Vec<PublicKey>,\n    },\n\n    #[error(\n        \"Insufficient weight for finality. \\\n         trusted validator weights: {trusted_validator_weights:?}, \\\n         block signatures: {block_signatures:?}, \\\n         signature weight: {signature_weight:?}, \\\n         total validator weight: {total_validator_weight}, \\\n         fault tolerance fraction: {fault_tolerance_fraction}\"\n    )]\n    InsufficientWeightForFinality {\n        trusted_validator_weights: BTreeMap<PublicKey, U512>,\n        block_signatures: Option<Box<BlockSignatures>>,\n        signature_weight: Option<Box<U512>>,\n        total_validator_weight: Box<U512>,\n        fault_tolerance_fraction: Ratio<u64>,\n    },\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use casper_types::{\n        crypto, testing::TestRng, BlockHash, BlockSignaturesV2, ChainNameDigest, EraId,\n        FinalitySignature, SecretKey,\n    };\n\n    use super::*;\n\n    const TEST_VALIDATOR_WEIGHT: usize = 1;\n\n    fn generate_validators(\n        n_validators: usize,\n    ) -> (BTreeMap<PublicKey, SecretKey>, BTreeMap<PublicKey, U512>) {\n        let mut keys = BTreeMap::new();\n        let mut weights = BTreeMap::new();\n\n        for _ in 0..n_validators {\n            let (secret_key, pub_key) = crypto::generate_ed25519_keypair();\n            keys.insert(pub_key.clone(), secret_key);\n            weights.insert(pub_key, U512::from(TEST_VALIDATOR_WEIGHT));\n        }\n\n        (keys, weights)\n    }\n\n    fn create_signatures(\n        rng: &mut TestRng,\n        validators: &BTreeMap<PublicKey, SecretKey>,\n        n_sigs: usize,\n    ) -> BlockSignaturesV2 {\n        let era_id = EraId::new(rng.gen_range(10..100));\n\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let chain_name_hash = ChainNameDigest::random(rng);\n\n        let mut sigs = BlockSignaturesV2::new(block_hash, block_height, era_id, chain_name_hash);\n\n        for (pub_key, secret_key) in validators.iter().take(n_sigs) {\n            let sig = crypto::sign(block_hash, secret_key, pub_key);\n            sigs.insert_signature(pub_key.clone(), sig);\n        }\n\n        sigs\n    }\n\n    #[test]\n    fn block_signatures_sufficiency() {\n        const TOTAL_VALIDATORS: usize = 20;\n        const TOTAL_VALIDATORS_WEIGHT: usize = TOTAL_VALIDATORS * TEST_VALIDATOR_WEIGHT;\n        const INSUFFICIENT_BLOCK_SIGNATURES: usize = 13;\n        const JUST_ENOUGH_BLOCK_SIGNATURES: usize = 14;\n\n        let mut rng = TestRng::new();\n\n        // Total validator weights is 20 (1 for each validator).\n        let (validators, validator_weights) = generate_validators(TOTAL_VALIDATORS);\n\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        // for 20 validators with 20 total validator weight,\n        //   and `fault_tolerance_fraction` = 1/3 (~=  6.666)\n        //   and the `quorum fraction` = 2/3         (~= 13.333)\n        //\n        // we need signatures of weight:\n        //   - 13 or less for `InsufficientWeightForFinality`\n        //   - 14 for Ok\n\n        let insufficient = create_signatures(&mut rng, &validators, INSUFFICIENT_BLOCK_SIGNATURES);\n        let just_enough_weight =\n            create_signatures(&mut rng, &validators, JUST_ENOUGH_BLOCK_SIGNATURES);\n\n        let result = check_sufficient_block_signatures(\n            &validator_weights,\n            fault_tolerance_fraction,\n            Some(&BlockSignatures::from(insufficient)),\n        );\n        assert!(matches!(\n            result,\n            Err(BlockSignatureError::InsufficientWeightForFinality {\n                trusted_validator_weights: _,\n                block_signatures: _,\n                signature_weight,\n                total_validator_weight,\n                fault_tolerance_fraction: _\n            }) if *total_validator_weight == TOTAL_VALIDATORS_WEIGHT.into() && **signature_weight.as_ref().unwrap() == INSUFFICIENT_BLOCK_SIGNATURES.into()\n        ));\n\n        let result = check_sufficient_block_signatures(\n            &validator_weights,\n            fault_tolerance_fraction,\n            Some(&BlockSignatures::from(just_enough_weight)),\n        );\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn block_signatures_sufficiency_with_quorum_formula() {\n        const TOTAL_VALIDATORS: usize = 20;\n        const TOTAL_VALIDATORS_WEIGHT: usize = TOTAL_VALIDATORS * TEST_VALIDATOR_WEIGHT;\n        const INSUFFICIENT_BLOCK_SIGNATURES: usize = 6;\n        const JUST_ENOUGH_BLOCK_SIGNATURES: usize = 7;\n\n        let mut rng = TestRng::new();\n\n        // Total validator weights is 20 (1 for each validator).\n        let (validators, validator_weights) = generate_validators(TOTAL_VALIDATORS_WEIGHT);\n\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        // `identity` function is transparent, so the calculated quorum fraction will be equal to\n        // the `fault_tolerance_fraction`.\n        let custom_quorum_formula = std::convert::identity;\n\n        // for 20 validators with 20 total validator weight,\n        //   and `fault_tolerance_fraction` = 1/3 (~= 6.666)\n        //   and the `quorum fraction` = 1/3         (~= 6.666)\n        //\n        // we need signatures of weight:\n        //   - 6 or less for `InsufficientWeightForFinality`\n        //   - 7 for Ok\n\n        let insufficient = create_signatures(&mut rng, &validators, INSUFFICIENT_BLOCK_SIGNATURES);\n        let just_enough_weight =\n            create_signatures(&mut rng, &validators, JUST_ENOUGH_BLOCK_SIGNATURES);\n\n        let result = check_sufficient_block_signatures_with_quorum_formula(\n            &validator_weights,\n            fault_tolerance_fraction,\n            Some(&BlockSignatures::from(insufficient)),\n            custom_quorum_formula,\n        );\n        assert!(matches!(\n            result,\n            Err(BlockSignatureError::InsufficientWeightForFinality {\n                trusted_validator_weights: _,\n                block_signatures: _,\n                signature_weight,\n                total_validator_weight,\n                fault_tolerance_fraction: _\n            }) if *total_validator_weight == TOTAL_VALIDATORS_WEIGHT.into() && **signature_weight.as_ref().unwrap() == INSUFFICIENT_BLOCK_SIGNATURES.into()\n        ));\n\n        let result = check_sufficient_block_signatures_with_quorum_formula(\n            &validator_weights,\n            fault_tolerance_fraction,\n            Some(&BlockSignatures::from(just_enough_weight)),\n            custom_quorum_formula,\n        );\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn block_signatures_sufficiency_with_quorum_formula_without_signatures() {\n        const TOTAL_VALIDATORS: usize = 20;\n        const TOTAL_VALIDATORS_WEIGHT: usize = TOTAL_VALIDATORS * TEST_VALIDATOR_WEIGHT;\n        let (_, validator_weights) = generate_validators(TOTAL_VALIDATORS_WEIGHT);\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n        let custom_quorum_formula = std::convert::identity;\n\n        let result = check_sufficient_block_signatures_with_quorum_formula(\n            &validator_weights,\n            fault_tolerance_fraction,\n            None,\n            custom_quorum_formula,\n        );\n        assert!(matches!(\n            result,\n            Err(BlockSignatureError::InsufficientWeightForFinality {\n                trusted_validator_weights: _,\n                block_signatures,\n                signature_weight,\n                total_validator_weight,\n                fault_tolerance_fraction: _\n            }) if *total_validator_weight == TOTAL_VALIDATORS_WEIGHT.into() && signature_weight.is_none() && block_signatures.is_none()\n        ));\n    }\n\n    #[test]\n    fn detects_bogus_validator() {\n        const TOTAL_VALIDATORS: usize = 20;\n        const JUST_ENOUGH_BLOCK_SIGNATURES: usize = 14;\n\n        let mut rng = TestRng::new();\n\n        let (validators, validator_weights) = generate_validators(TOTAL_VALIDATORS);\n        let fault_tolerance_fraction = Ratio::new_raw(1, 3);\n\n        // Generate correct signatures.\n        let mut signatures = create_signatures(&mut rng, &validators, JUST_ENOUGH_BLOCK_SIGNATURES);\n        let result = check_sufficient_block_signatures(\n            &validator_weights,\n            fault_tolerance_fraction,\n            Some(&BlockSignatures::from(signatures.clone())),\n        );\n        assert!(result.is_ok());\n\n        // Smuggle bogus proofs in.\n        let block_hash = *signatures.block_hash();\n        let block_height = signatures.block_height();\n        let era_id = signatures.era_id();\n        let chain_name_hash = signatures.chain_name_hash();\n        let finality_sig_1 = FinalitySignature::random_for_block(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            &mut rng,\n        );\n        signatures.insert_signature(\n            finality_sig_1.public_key().clone(),\n            *finality_sig_1.signature(),\n        );\n        let finality_sig_2 = FinalitySignature::random_for_block(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            &mut rng,\n        );\n        signatures.insert_signature(\n            finality_sig_2.public_key().clone(),\n            *finality_sig_2.signature(),\n        );\n        let result = check_sufficient_block_signatures(\n            &validator_weights,\n            fault_tolerance_fraction,\n            Some(&BlockSignatures::from(signatures)),\n        );\n        let error = result.unwrap_err();\n        if let BlockSignatureError::BogusValidators {\n            trusted_validator_weights: _,\n            block_signatures: _,\n            bogus_validators,\n        } = error\n        {\n            assert!(bogus_validators.contains(finality_sig_1.public_key()));\n            assert!(bogus_validators.contains(finality_sig_2.public_key()));\n            assert_eq!(bogus_validators.len(), 2);\n        } else {\n            panic!(\"unexpected err: {}\", error);\n        }\n    }\n}\n"
  },
  {
    "path": "node/src/utils/chain_specification/error.rs",
    "content": "use thiserror::Error;\nuse uint::FromDecStrErr;\n\nuse casper_types::{file_utils::ReadFileError, GlobalStateUpdateError};\n\n/// Error returned when loading the chainspec.\n#[derive(Debug, Error)]\npub enum Error {\n    /// Error while decoding the chainspec from TOML format.\n    #[error(\"decoding from TOML error: {0}\")]\n    DecodingFromToml(#[from] toml::de::Error),\n\n    /// Error while decoding Motes from a decimal format.\n    #[error(\"decoding motes from base-10 error: {0}\")]\n    DecodingMotes(#[from] FromDecStrErr),\n\n    /// Error loading the chainspec.\n    #[error(\"could not load chainspec: {0}\")]\n    LoadChainspec(ReadFileError),\n\n    /// Error loading the chainspec accounts.\n    #[error(\"could not load chainspec accounts: {0}\")]\n    LoadChainspecAccounts(#[from] ChainspecAccountsLoadError),\n\n    /// Error loading the global state update.\n    #[error(\"could not load the global state update: {0}\")]\n    LoadGlobalStateUpgrade(#[from] GlobalStateUpdateLoadError),\n}\n\n/// Error loading chainspec accounts file.\n#[derive(Debug, Error)]\npub enum ChainspecAccountsLoadError {\n    /// Error loading the accounts file.\n    #[error(\"could not load accounts: {0}\")]\n    LoadAccounts(#[from] ReadFileError),\n\n    /// Error while decoding the chainspec accounts from TOML format.\n    #[error(\"decoding from TOML error: {0}\")]\n    DecodingFromToml(#[from] toml::de::Error),\n\n    /// Error while decoding a chainspec account's key hash from hex format.\n    #[error(\"decoding from hex error: {0}\")]\n    DecodingFromHex(#[from] base16::DecodeError),\n\n    /// Error while decoding Motes from a decimal format.\n    #[error(\"decoding motes from base-10 error: {0}\")]\n    DecodingMotes(#[from] FromDecStrErr),\n\n    /// Error while decoding a chainspec account's key hash from base-64 format.\n    #[error(\"crypto module error: {0}\")]\n    Crypto(#[from] casper_types::crypto::ErrorExt),\n}\n\n/// Error loading global state update file.\n#[derive(Debug, Error)]\npub enum GlobalStateUpdateLoadError {\n    /// Error loading the accounts file.\n    #[error(\"could not load the file: {0}\")]\n    LoadFile(#[from] ReadFileError),\n\n    /// Error while decoding the chainspec accounts from TOML format.\n    #[error(\"decoding from TOML error: {0}\")]\n    DecodingFromToml(#[from] toml::de::Error),\n\n    /// Error decoding kvp items.\n    #[error(\"decoding key value entries error: {0}\")]\n    DecodingKeyValuePairs(#[from] GlobalStateUpdateError),\n}\n"
  },
  {
    "path": "node/src/utils/chain_specification/parse_toml.rs",
    "content": "//! Helper struct and function for parsing a chainspec configuration file into its respective domain\n//! object.\n//!\n//! The runtime representation defined by the chainspec object graph is all-inclusive.\n//! However, as an implementation detail, the reference implementation splits the data up into\n//! multiple topical files.\n//!\n//! In addition to the mandatory base chainspec file, there is a file containing genesis account\n//! definitions for a given network (produced at genesis). This file contains all accounts that will\n//! be (or were, historically) created at genesis, their initial balances, initial staking (both\n//! validators and delegators). The total initial supply of a new network is a consequence of the\n//! sum of the token issued to these accounts. For a test network or small sidechain, the contents\n//! of this file might be small but for a full sized network there is quite a lot of data.\n//!\n//!\n//! Further, when protocol version upgrades are put forth they are allowed to have a file containing\n//! proposed changes to global state that if accepted will be applied as of the upgrade's block\n//! height and onward. This file is optional (more clearly, on an as needed basis only), a given\n//! network might not ever have such a file over its lifetime, and the contents of the file can\n//! be arbitrarily large as it contains encoded bytes of data. Each such file is directly associated\n//! to the specific chainspec file the changes are proposed with; each one is essentially a one off.\n//!\n//! This capability can and has been used to allow the introduction of new capabilities to the\n//! system which require some introduction of value(s) to global state to enable; this is a purely\n//! additive / extension type upgrade. However, this capability can also be leveraged as part of a\n//! social consensus to make changes to the validator set and / or to assert new values for existing\n//! global state entries. In either case, the contents of the file are parseable and verifiable in\n//! advance of their acceptance and application to a given network.\n\nuse std::{convert::TryFrom, path::Path};\n\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    bytesrepr::Bytes, file_utils, AccountsConfig, ActivationPoint, Chainspec, ChainspecRawBytes,\n    CoreConfig, GlobalStateUpdate, GlobalStateUpdateConfig, HighwayConfig, NetworkConfig,\n    ProtocolConfig, ProtocolVersion, StorageCosts, SystemConfig, TransactionConfig, VacancyConfig,\n    WasmConfig,\n};\n\nuse crate::utils::{\n    chain_specification::error::{ChainspecAccountsLoadError, Error, GlobalStateUpdateLoadError},\n    Loadable,\n};\n\n// The names of chainspec related files on disk.\n/// The chainspec file name.\npub const CHAINSPEC_FILENAME: &str = \"chainspec.toml\";\n/// The genesis accounts file name.\npub const CHAINSPEC_ACCOUNTS_FILENAME: &str = \"accounts.toml\";\n/// The global state update file name.\npub const CHAINSPEC_GLOBAL_STATE_FILENAME: &str = \"global_state.toml\";\n\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\nstruct TomlNetwork {\n    name: String,\n    maximum_net_message_size: u32,\n}\n\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\nstruct TomlProtocol {\n    version: ProtocolVersion,\n    hard_reset: bool,\n    activation_point: ActivationPoint,\n}\n\n/// A chainspec configuration as laid out in the TOML-encoded configuration file.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub(super) struct TomlChainspec {\n    protocol: TomlProtocol,\n    network: TomlNetwork,\n    core: CoreConfig,\n    transactions: TransactionConfig,\n    highway: HighwayConfig,\n    wasm: WasmConfig,\n    system_costs: SystemConfig,\n    vacancy: VacancyConfig,\n    storage_costs: StorageCosts,\n}\n\nimpl From<&Chainspec> for TomlChainspec {\n    fn from(chainspec: &Chainspec) -> Self {\n        let protocol = TomlProtocol {\n            version: chainspec.protocol_config.version,\n            hard_reset: chainspec.protocol_config.hard_reset,\n            activation_point: chainspec.protocol_config.activation_point,\n        };\n        let network = TomlNetwork {\n            name: chainspec.network_config.name.clone(),\n            maximum_net_message_size: chainspec.network_config.maximum_net_message_size,\n        };\n        let core = chainspec.core_config.clone();\n        let transactions = chainspec.transaction_config.clone();\n        let highway = chainspec.highway_config;\n        let wasm = chainspec.wasm_config;\n        let system_costs = chainspec.system_costs_config;\n        let vacancy = chainspec.vacancy_config;\n        let storage_costs = chainspec.storage_costs;\n\n        TomlChainspec {\n            protocol,\n            network,\n            core,\n            transactions,\n            highway,\n            wasm,\n            system_costs,\n            vacancy,\n            storage_costs,\n        }\n    }\n}\n\npub(super) fn parse_toml<P: AsRef<Path>>(\n    chainspec_path: P,\n) -> Result<(Chainspec, ChainspecRawBytes), Error> {\n    let chainspec_bytes =\n        file_utils::read_file(chainspec_path.as_ref()).map_err(Error::LoadChainspec)?;\n    let toml_chainspec: TomlChainspec =\n        toml::from_str(std::str::from_utf8(&chainspec_bytes).unwrap())?;\n\n    let root = chainspec_path\n        .as_ref()\n        .parent()\n        .unwrap_or_else(|| Path::new(\"\"));\n\n    // accounts.toml must live in the same directory as chainspec.toml.\n    let (accounts_config, maybe_genesis_accounts_bytes) = parse_toml_accounts(root)?;\n\n    let network_config = NetworkConfig {\n        name: toml_chainspec.network.name,\n        accounts_config,\n        maximum_net_message_size: toml_chainspec.network.maximum_net_message_size,\n    };\n\n    // global_state_update.toml must live in the same directory as chainspec.toml.\n    let (global_state_update, maybe_global_state_bytes) = match parse_toml_global_state(root)? {\n        Some((config, bytes)) => (\n            Some(\n                GlobalStateUpdate::try_from(config)\n                    .map_err(GlobalStateUpdateLoadError::DecodingKeyValuePairs)?,\n            ),\n            Some(bytes),\n        ),\n        None => (None, None),\n    };\n\n    let protocol_config = ProtocolConfig {\n        version: toml_chainspec.protocol.version,\n        hard_reset: toml_chainspec.protocol.hard_reset,\n        activation_point: toml_chainspec.protocol.activation_point,\n        global_state_update,\n    };\n\n    let chainspec = Chainspec {\n        protocol_config,\n        network_config,\n        core_config: toml_chainspec.core,\n        transaction_config: toml_chainspec.transactions,\n        highway_config: toml_chainspec.highway,\n        wasm_config: toml_chainspec.wasm,\n        system_costs_config: toml_chainspec.system_costs,\n        vacancy_config: toml_chainspec.vacancy,\n        storage_costs: toml_chainspec.storage_costs,\n    };\n    let chainspec_raw_bytes = ChainspecRawBytes::new(\n        Bytes::from(chainspec_bytes),\n        maybe_genesis_accounts_bytes,\n        maybe_global_state_bytes,\n    );\n\n    Ok((chainspec, chainspec_raw_bytes))\n}\n\nimpl Loadable for (Chainspec, ChainspecRawBytes) {\n    type Error = Error;\n\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Self::Error> {\n        parse_toml(path.as_ref().join(CHAINSPEC_FILENAME))\n    }\n}\n\n/// Returns `Self` and the raw bytes of the file.\n///\n/// If the file doesn't exist, returns `Ok` with an empty `AccountsConfig` and `None` bytes.\npub(super) fn parse_toml_accounts<P: AsRef<Path>>(\n    dir_path: P,\n) -> Result<(AccountsConfig, Option<Bytes>), ChainspecAccountsLoadError> {\n    let accounts_path = dir_path.as_ref().join(CHAINSPEC_ACCOUNTS_FILENAME);\n    if !accounts_path.is_file() {\n        let config = AccountsConfig::new(vec![], vec![], vec![]);\n        let maybe_bytes = None;\n        return Ok((config, maybe_bytes));\n    }\n    let bytes = file_utils::read_file(accounts_path)?;\n    let config: AccountsConfig = toml::from_str(std::str::from_utf8(&bytes).unwrap())?;\n    Ok((config, Some(Bytes::from(bytes))))\n}\n\npub(super) fn parse_toml_global_state<P: AsRef<Path>>(\n    path: P,\n) -> Result<Option<(GlobalStateUpdateConfig, Bytes)>, GlobalStateUpdateLoadError> {\n    let update_path = path.as_ref().join(CHAINSPEC_GLOBAL_STATE_FILENAME);\n    if !update_path.is_file() {\n        return Ok(None);\n    }\n    let bytes = file_utils::read_file(update_path)?;\n    let config = toml::from_str(std::str::from_utf8(&bytes).unwrap())?;\n    Ok(Some((config, Bytes::from(bytes))))\n}\n"
  },
  {
    "path": "node/src/utils/chain_specification.rs",
    "content": "pub(crate) mod error;\npub(crate) mod parse_toml;\n\nuse std::collections::HashSet;\n\nuse num_rational::Ratio;\nuse once_cell::sync::Lazy;\nuse tracing::{error, info, warn};\n\nuse casper_types::{\n    system::auction::VESTING_SCHEDULE_LENGTH_MILLIS, Chainspec, ConsensusProtocolName, CoreConfig,\n    ProtocolConfig, TimeDiff, TransactionConfig, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID,\n    MINT_LANE_ID,\n};\n\nuse crate::components::network;\n\nstatic RESERVED_LANE_IDS: Lazy<Vec<u8>> =\n    Lazy::new(|| vec![MINT_LANE_ID, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID]);\n\n/// Returns `false` and logs errors if the values set in the config don't make sense.\n#[tracing::instrument(ret, level = \"info\", skip(chainspec), fields(hash = % chainspec.hash()))]\npub fn validate_chainspec(chainspec: &Chainspec) -> bool {\n    info!(\"begin chainspec validation\");\n\n    if chainspec.core_config.unbonding_delay <= chainspec.core_config.auction_delay {\n        warn!(\n                \"unbonding delay is set to {} but it should be greater than the auction delay (currently set to {})\",\n                chainspec.core_config.unbonding_delay, chainspec.core_config.auction_delay);\n        return false;\n    }\n\n    // If the era duration is set to zero, we will treat it as explicitly stating that eras\n    // should be defined by height only.\n    if chainspec.core_config.era_duration.millis() > 0\n        && chainspec.core_config.era_duration\n            < chainspec.core_config.minimum_block_time * chainspec.core_config.minimum_era_height\n    {\n        warn!(\"era duration is less than minimum era height * block time!\");\n    }\n\n    if chainspec.core_config.consensus_protocol == ConsensusProtocolName::Highway {\n        if chainspec.core_config.minimum_block_time > chainspec.highway_config.maximum_round_length\n        {\n            error!(\n                minimum_block_time = %chainspec.core_config.minimum_block_time,\n                maximum_round_length = %chainspec.highway_config.maximum_round_length,\n                \"minimum_block_time must be less or equal than maximum_round_length\",\n            );\n            return false;\n        }\n        match chainspec.highway_config.is_valid() {\n            Ok(_) => return true,\n            Err(msg) => {\n                error!(msg);\n                return false;\n            }\n        }\n    }\n\n    // We don't support lookback by more than one era in the rewards scheme.\n    if chainspec.core_config.minimum_era_height < chainspec.core_config.signature_rewards_max_delay\n    {\n        error!(\n            minimum_era_height = %chainspec.core_config.minimum_era_height,\n            signature_rewards_max_delay = %chainspec.core_config.signature_rewards_max_delay,\n            \"signature_rewards_max_delay must be less than minimum_era_height\"\n        );\n        return false;\n    }\n\n    network::within_message_size_limit_tolerance(chainspec)\n        && validate_protocol_config(&chainspec.protocol_config)\n        && validate_core_config(&chainspec.core_config)\n        && validate_transaction_config(&chainspec.transaction_config)\n}\n\n/// Checks whether the values set in the config make sense and returns `false` if they don't.\npub(crate) fn validate_protocol_config(_protocol_config: &ProtocolConfig) -> bool {\n    true\n}\n\n/// Returns `false` if unbonding delay is not greater than auction delay to ensure\n/// that `recent_era_count()` yields a value of at least 1.\npub(crate) fn validate_core_config(core_config: &CoreConfig) -> bool {\n    if core_config.unbonding_delay <= core_config.auction_delay {\n        warn!(\n            unbonding_delay = core_config.unbonding_delay,\n            auction_delay = core_config.auction_delay,\n            \"unbonding delay should be greater than auction delay\",\n        );\n        return false;\n    }\n\n    // If the era duration is set to zero, we will treat it as explicitly stating that eras\n    // should be defined by height only.  Warn only.\n    if core_config.era_duration.millis() > 0\n        && core_config.era_duration.millis()\n            < core_config.minimum_era_height * core_config.minimum_block_time.millis()\n    {\n        warn!(\"era duration is less than minimum era height * round length!\");\n    }\n\n    if core_config.finality_threshold_fraction <= Ratio::new(0, 1)\n        || core_config.finality_threshold_fraction >= Ratio::new(1, 1)\n    {\n        error!(\n            ftf = %core_config.finality_threshold_fraction,\n            \"finality threshold fraction is not in the range (0, 1)\",\n        );\n        return false;\n    }\n\n    if core_config.finality_signature_proportion <= Ratio::new(0, 1)\n        || core_config.finality_signature_proportion >= Ratio::new(1, 1)\n    {\n        error!(\n            fsp = %core_config.finality_signature_proportion,\n            \"finality signature proportion is not in the range (0, 1)\",\n        );\n        return false;\n    }\n    if core_config.finders_fee <= Ratio::new(0, 1) || core_config.finders_fee >= Ratio::new(1, 1) {\n        error!(\n            fsp = %core_config.finders_fee,\n            \"finder's fee proportion is not in the range (0, 1)\",\n        );\n        return false;\n    }\n\n    if core_config.vesting_schedule_period > TimeDiff::from_millis(VESTING_SCHEDULE_LENGTH_MILLIS) {\n        error!(\n            vesting_schedule_millis = core_config.vesting_schedule_period.millis(),\n            max_millis = VESTING_SCHEDULE_LENGTH_MILLIS,\n            \"vesting schedule period too long\",\n        );\n        return false;\n    }\n\n    true\n}\n\n/// Validates `TransactionConfig` parameters\npub(crate) fn validate_transaction_config(transaction_config: &TransactionConfig) -> bool {\n    // The total number of transactions should not exceed the number of approvals because each\n    // transaction needs at least one approval to be valid.\n    let total_txn_slots = transaction_config\n        .transaction_v1_config\n        .get_max_block_count();\n    if transaction_config.block_max_approval_count < total_txn_slots as u32 {\n        return false;\n    }\n    let mut seen_max_transaction_size = HashSet::new();\n    if transaction_config\n        .transaction_v1_config\n        .wasm_lanes()\n        .is_empty()\n    {\n        error!(\"Wasm lanes chainspec config is empty.\");\n        return false;\n    }\n    for wasm_lane_config in transaction_config.transaction_v1_config.wasm_lanes().iter() {\n        if RESERVED_LANE_IDS.contains(&wasm_lane_config.id()) {\n            error!(\"One of the defined wasm lanes has declared an id that is reserved for system lanes. Offending lane id: {}\", wasm_lane_config.id());\n            return false;\n        }\n        let max_transaction_length = wasm_lane_config.max_transaction_length();\n        if seen_max_transaction_size.contains(&max_transaction_length) {\n            error!(\"Found wasm lane configuration that has non-unique max_transaction_length. Duplicate value: {}\", max_transaction_length);\n            return false;\n        }\n        seen_max_transaction_size.insert(max_transaction_length);\n    }\n\n    let mut seen_max_gas_prices = HashSet::new();\n    for wasm_lane_config in transaction_config.transaction_v1_config.wasm_lanes().iter() {\n        //No need to check reserved lanes, we just did that\n        let max_transaction_gas_limit = wasm_lane_config.max_transaction_gas_limit();\n        if seen_max_gas_prices.contains(&max_transaction_gas_limit) {\n            error!(\"Found wasm lane configuration that has non-unique max_transaction_gas_limit. Duplicate value: {}\", max_transaction_gas_limit);\n            return false;\n        }\n        seen_max_gas_prices.insert(max_transaction_gas_limit);\n    }\n    true\n}\n\n#[cfg(test)]\nmod tests {\n    use std::fs;\n\n    use num_rational::Ratio;\n    use once_cell::sync::Lazy;\n\n    use casper_types::{\n        bytesrepr::FromBytes, ActivationPoint, BrTableCost, ChainspecRawBytes, ControlFlowCosts,\n        CoreConfig, EraId, GlobalStateUpdate, HighwayConfig, HostFunction, HostFunctionCostsV1,\n        HostFunctionCostsV2, HostFunctionV2, MessageLimits, Motes, OpcodeCosts, ProtocolConfig,\n        ProtocolVersion, StoredValue, TestBlockBuilder, TimeDiff, Timestamp, TransactionConfig,\n        TransactionLaneDefinition, TransactionV1Config, WasmConfig, WasmV1Config, WasmV2Config,\n        MINT_LANE_ID,\n    };\n\n    use super::*;\n    use crate::{\n        testing::init_logging,\n        utils::{Loadable, RESOURCES_PATH},\n    };\n\n    const EXPECTED_GENESIS_COSTS: OpcodeCosts = OpcodeCosts {\n        bit: 13,\n        add: 14,\n        mul: 15,\n        div: 16,\n        load: 17,\n        store: 18,\n        op_const: 19,\n        local: 20,\n        global: 21,\n        control_flow: ControlFlowCosts {\n            block: 1,\n            op_loop: 2,\n            op_if: 3,\n            op_else: 4,\n            end: 5,\n            br: 6,\n            br_if: 7,\n            br_table: BrTableCost {\n                cost: 0,\n                size_multiplier: 1,\n            },\n            op_return: 8,\n            call: 9,\n            call_indirect: 10,\n            drop: 11,\n            select: 12,\n        },\n        integer_comparison: 22,\n        conversion: 23,\n        unreachable: 24,\n        nop: 25,\n        current_memory: 26,\n        grow_memory: 27,\n        sign: 28,\n    };\n    static EXPECTED_GENESIS_HOST_FUNCTION_COSTS: Lazy<HostFunctionCostsV1> =\n        Lazy::new(|| HostFunctionCostsV1 {\n            read_value: HostFunction::new(127, [0, 1, 0]),\n            dictionary_get: HostFunction::new(128, [0, 1, 0]),\n            write: HostFunction::new(140, [0, 1, 0, 2]),\n            dictionary_put: HostFunction::new(141, [0, 1, 2, 3]),\n            add: HostFunction::new(100, [0, 1, 2, 3]),\n            new_uref: HostFunction::new(122, [0, 1, 2]),\n            load_named_keys: HostFunction::new(121, [0, 1]),\n            ret: HostFunction::new(133, [0, 1]),\n            get_key: HostFunction::new(113, [0, 1, 2, 3, 4]),\n            has_key: HostFunction::new(119, [0, 1]),\n            put_key: HostFunction::new(125, [0, 1, 2, 3]),\n            remove_key: HostFunction::new(132, [0, 1]),\n            revert: HostFunction::new(134, [0]),\n            is_valid_uref: HostFunction::new(120, [0, 1]),\n            add_associated_key: HostFunction::new(101, [0, 1, 2]),\n            remove_associated_key: HostFunction::new(129, [0, 1]),\n            update_associated_key: HostFunction::new(139, [0, 1, 2]),\n            set_action_threshold: HostFunction::new(135, [0, 1]),\n            get_caller: HostFunction::new(112, [0]),\n            get_blocktime: HostFunction::new(111, [0]),\n            create_purse: HostFunction::new(108, [0, 1]),\n            transfer_to_account: HostFunction::new(138, [0, 1, 2, 3, 4, 5, 6]),\n            transfer_from_purse_to_account: HostFunction::new(136, [0, 1, 2, 3, 4, 5, 6, 7, 8]),\n            transfer_from_purse_to_purse: HostFunction::new(137, [0, 1, 2, 3, 4, 5, 6, 7]),\n            get_balance: HostFunction::new(110, [0, 1, 2]),\n            get_phase: HostFunction::new(117, [0]),\n            get_system_contract: HostFunction::new(118, [0, 1, 2]),\n            get_main_purse: HostFunction::new(114, [0]),\n            read_host_buffer: HostFunction::new(126, [0, 1, 2]),\n            create_contract_package_at_hash: HostFunction::new(106, [0, 1]),\n            create_contract_user_group: HostFunction::new(107, [0, 1, 2, 3, 4, 5, 6, 7]),\n            add_contract_version: HostFunction::new(102, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n            add_contract_version_with_message_topics: HostFunction::new(\n                102,\n                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n            ),\n            add_package_version_with_message_topics: HostFunction::new(\n                102,\n                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n            ),\n            disable_contract_version: HostFunction::new(109, [0, 1, 2, 3]),\n            call_contract: HostFunction::new(104, [0, 1, 2, 3, 4, 5, 6]),\n            call_versioned_contract: HostFunction::new(105, [0, 1, 2, 3, 4, 5, 6, 7, 8]),\n            get_named_arg_size: HostFunction::new(116, [0, 1, 2]),\n            get_named_arg: HostFunction::new(115, [0, 1, 2, 3]),\n            remove_contract_user_group: HostFunction::new(130, [0, 1, 2, 3]),\n            provision_contract_user_group_uref: HostFunction::new(124, [0, 1, 2, 3, 4]),\n            remove_contract_user_group_urefs: HostFunction::new(131, [0, 1, 2, 3, 4, 5]),\n            print: HostFunction::new(123, [0, 1]),\n            blake2b: HostFunction::new(133, [0, 1, 2, 3]),\n            random_bytes: HostFunction::new(123, [0, 1]),\n            enable_contract_version: HostFunction::new(142, [0, 1, 2, 3]),\n            generic_hash: HostFunction::new(152, [0, 1, 2, 3, 4]),\n            manage_message_topic: HostFunction::new(100, [0, 1, 2, 4]),\n            emit_message: HostFunction::new(100, [0, 1, 2, 3]),\n            cost_increase_per_message: 50,\n            get_block_info: HostFunction::new(330, [0, 0]),\n            recover_secp256k1: HostFunction::new(331, [0, 1, 2, 3, 4, 5]),\n            verify_signature: HostFunction::new(332, [0, 1, 2, 3, 4, 5]),\n            call_package_version: HostFunction::new(105, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),\n        });\n    static EXPECTED_GENESIS_HOST_FUNCTION_COSTS_V2: Lazy<HostFunctionCostsV2> =\n        Lazy::new(|| HostFunctionCostsV2 {\n            read: HostFunctionV2::new(100, [0, 1, 2, 3, 4, 5]),\n            write: HostFunctionV2::new(101, [0, 1, 2, 3, 4]),\n            remove: HostFunctionV2::new(114, [0, 1, 2]),\n            copy_input: HostFunctionV2::new(102, [0, 1]),\n            ret: HostFunctionV2::new(103, [0, 1]),\n            create: HostFunctionV2::new(104, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n            transfer: HostFunctionV2::new(108, [0, 1, 2]),\n            env_balance: HostFunctionV2::new(109, [0, 1, 2, 3]),\n            upgrade: HostFunctionV2::new(110, [0, 1, 2, 3, 4, 5]),\n            call: HostFunctionV2::new(111, [0, 1, 2, 3, 4, 5, 6, 7, 8]),\n            print: HostFunctionV2::new(112, [0, 1]),\n            emit: HostFunctionV2::new(113, [0, 1, 2, 3]),\n            env_info: HostFunctionV2::new(114, [0, 1]),\n        });\n    static EXPECTED_GENESIS_WASM_COSTS: Lazy<WasmConfig> = Lazy::new(|| {\n        let wasm_v1_config = WasmV1Config::new(\n            17, // initial_memory\n            19, // max_stack_height\n            EXPECTED_GENESIS_COSTS,\n            *EXPECTED_GENESIS_HOST_FUNCTION_COSTS,\n        );\n        let wasm_v2_config = WasmV2Config::new(\n            17, // initial_memory\n            EXPECTED_GENESIS_COSTS,\n            *EXPECTED_GENESIS_HOST_FUNCTION_COSTS_V2,\n        );\n        WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config)\n    });\n\n    #[test]\n    fn core_config_toml_roundtrip() {\n        let mut rng = crate::new_rng();\n        let config = CoreConfig::random(&mut rng);\n        let encoded = toml::to_string_pretty(&config).unwrap();\n        let decoded = toml::from_str(&encoded).unwrap();\n        assert_eq!(config, decoded);\n    }\n\n    #[test]\n    fn transaction_config_toml_roundtrip() {\n        let mut rng = crate::new_rng();\n        let config = TransactionConfig::random(&mut rng);\n        let encoded = toml::to_string_pretty(&config).unwrap();\n        let decoded = toml::from_str(&encoded).unwrap();\n        assert_eq!(config, decoded);\n    }\n\n    #[test]\n    fn protocol_config_toml_roundtrip() {\n        let mut rng = crate::new_rng();\n        let config = ProtocolConfig::random(&mut rng);\n        let encoded = toml::to_string_pretty(&config).unwrap();\n        let decoded = toml::from_str(&encoded).unwrap();\n        assert_eq!(config, decoded);\n    }\n\n    #[test]\n    fn highway_config_toml_roundtrip() {\n        let mut rng = crate::new_rng();\n        let config = HighwayConfig::random(&mut rng);\n        let encoded = toml::to_string_pretty(&config).unwrap();\n        let decoded = toml::from_str(&encoded).unwrap();\n        assert_eq!(config, decoded);\n    }\n\n    #[test]\n    fn should_validate_round_length() {\n        let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"local\");\n\n        // Minimum block time greater than maximum round length.\n        chainspec.core_config.consensus_protocol = ConsensusProtocolName::Highway;\n        chainspec.core_config.minimum_block_time = TimeDiff::from_millis(8);\n        chainspec.highway_config.maximum_round_length = TimeDiff::from_millis(7);\n        assert!(\n            !validate_chainspec(&chainspec),\n            \"chainspec should not be valid\"\n        );\n\n        chainspec.core_config.minimum_block_time = TimeDiff::from_millis(7);\n        chainspec.highway_config.maximum_round_length = TimeDiff::from_millis(7);\n        assert!(validate_chainspec(&chainspec), \"chainspec should be valid\");\n    }\n\n    #[ignore = \"We probably need to reconsider our approach here\"]\n    #[test]\n    fn should_have_deterministic_chainspec_hash() {\n        const PATH: &str = \"test/valid/0_9_0\";\n        const PATH_UNORDERED: &str = \"test/valid/0_9_0_unordered\";\n\n        let accounts: Vec<u8> = {\n            let path = RESOURCES_PATH.join(PATH).join(\"accounts.toml\");\n            fs::read(path).expect(\"should read file\")\n        };\n\n        let accounts_unordered: Vec<u8> = {\n            let path = RESOURCES_PATH.join(PATH_UNORDERED).join(\"accounts.toml\");\n            fs::read(path).expect(\"should read file\")\n        };\n\n        // Different accounts.toml file content\n        assert_ne!(accounts, accounts_unordered);\n\n        let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(PATH);\n        let (chainspec_unordered, _) =\n            <(Chainspec, ChainspecRawBytes)>::from_resources(PATH_UNORDERED);\n\n        // Deserializes into equal objects\n        assert_eq!(chainspec, chainspec_unordered);\n\n        // With equal hashes\n        assert_eq!(chainspec.hash(), chainspec_unordered.hash());\n    }\n\n    #[test]\n    fn should_have_valid_finality_threshold() {\n        let mut rng = crate::new_rng();\n        let mut core_config = CoreConfig::random(&mut rng);\n        // Should be valid for FTT > 0 and < 1.\n        core_config.finality_threshold_fraction = Ratio::new(1, u64::MAX);\n        assert!(\n            validate_core_config(&core_config),\n            \"1 over max should be valid ftt\"\n        );\n        core_config.finality_threshold_fraction = Ratio::new(u64::MAX - 1, u64::MAX);\n        assert!(\n            validate_core_config(&core_config),\n            \"less than max over max should be valid ftt\"\n        );\n        core_config.finality_threshold_fraction = Ratio::new(0, 1);\n        assert!(\n            !validate_core_config(&core_config),\n            \"FTT == 0 or >= 1 should be invalid ftt\"\n        );\n        core_config.finality_threshold_fraction = Ratio::new(1, 1);\n        assert!(\n            !validate_core_config(&core_config),\n            \"1 over 1 should be invalid ftt\"\n        );\n        core_config.finality_threshold_fraction = Ratio::new(u64::MAX, u64::MAX);\n        assert!(\n            !validate_core_config(&core_config),\n            \"max over max should be invalid ftt\"\n        );\n        core_config.finality_threshold_fraction = Ratio::new(u64::MAX, u64::MAX - 1);\n        assert!(\n            !validate_core_config(&core_config),\n            \"max over less than max should be invalid ftt\"\n        );\n    }\n\n    #[test]\n    fn should_have_valid_transaction_counts() {\n        let transaction_v1_config = TransactionV1Config::default();\n\n        let transaction_v1_config =\n            transaction_v1_config.with_count_limits(Some(100), Some(1), None, None);\n\n        let transaction_config = TransactionConfig {\n            block_max_approval_count: 100,\n            transaction_v1_config,\n            ..Default::default()\n        };\n        assert!(\n            !validate_transaction_config(&transaction_config),\n            \"max approval count that is not at least equal to sum of `block_max_[txn type]_count`s \\\n            should be invalid\"\n        );\n\n        let transaction_v1_config = TransactionV1Config::default();\n\n        let transaction_v1_config =\n            transaction_v1_config.with_count_limits(Some(100), Some(50), Some(25), Some(25));\n\n        let transaction_config = TransactionConfig {\n            block_max_approval_count: 200,\n            transaction_v1_config,\n            ..Default::default()\n        };\n        assert!(\n            validate_transaction_config(&transaction_config),\n            \"max approval count equal to sum of `block_max_[txn type]_count`s should be valid\"\n        );\n\n        let transaction_v1_config = TransactionV1Config::default();\n        let transaction_v1_config =\n            transaction_v1_config.with_count_limits(Some(100), Some(50), Some(25), Some(24));\n\n        let transaction_config = TransactionConfig {\n            block_max_approval_count: 200,\n            transaction_v1_config,\n            ..Default::default()\n        };\n        assert!(\n            validate_transaction_config(&transaction_config),\n            \"max approval count greater than sum of `block_max_[txn type]_count`s should be valid\"\n        );\n    }\n\n    #[test]\n    fn should_perform_checks_with_global_state_update() {\n        let mut rng = crate::new_rng();\n        let mut protocol_config = ProtocolConfig::random(&mut rng);\n\n        // We force `global_state_update` to be `Some`.\n        protocol_config.global_state_update = Some(GlobalStateUpdate::random(&mut rng));\n\n        // TODO: seems like either protocol config validity should be implemented, or this sham of\n        // a test should be removed.\n        assert!(validate_protocol_config(&protocol_config), \"currently there are no validation rules for this config, so minimal type correctness should be valid\");\n    }\n\n    #[test]\n    fn should_perform_checks_without_global_state_update() {\n        let mut rng = crate::new_rng();\n        let mut protocol_config = ProtocolConfig::random(&mut rng);\n\n        // We force `global_state_update` to be `None`.\n        protocol_config.global_state_update = None;\n\n        // TODO: seems like either protocol config validity should be implemented, or this sham of\n        // a test should be removed.\n        assert!(validate_protocol_config(&protocol_config), \"currently there are no validation rules for this config, so minimal type correctness should be valid\");\n    }\n\n    #[test]\n    fn should_recognize_blocks_before_activation_point() {\n        let past_version = ProtocolVersion::from_parts(1, 0, 0);\n        let current_version = ProtocolVersion::from_parts(2, 0, 0);\n        let future_version = ProtocolVersion::from_parts(3, 0, 0);\n\n        let upgrade_era = EraId::from(5);\n        let previous_era = upgrade_era.saturating_sub(1);\n\n        let rng = &mut crate::new_rng();\n        let protocol_config = ProtocolConfig {\n            version: current_version,\n            hard_reset: false,\n            activation_point: ActivationPoint::EraId(upgrade_era),\n            global_state_update: None,\n        };\n\n        let block = TestBlockBuilder::new()\n            .era(previous_era)\n            .height(100)\n            .protocol_version(past_version)\n            .switch_block(true)\n            .build(rng);\n        assert!(\n            block\n                .header()\n                .is_last_block_before_activation(&protocol_config),\n            \"The block before this protocol version: a switch block with previous era and version.\"\n        );\n\n        //\n        let block = TestBlockBuilder::new()\n            .era(upgrade_era)\n            .height(100)\n            .protocol_version(past_version)\n            .switch_block(true)\n            .build(rng);\n        assert!(\n            !block\n                .header()\n                .is_last_block_before_activation(&protocol_config),\n            \"Not the activation point: wrong era.\"\n        );\n        let block = TestBlockBuilder::new()\n            .era(previous_era)\n            .height(100)\n            .protocol_version(current_version)\n            .switch_block(true)\n            .build(rng);\n        assert!(\n            !block\n                .header()\n                .is_last_block_before_activation(&protocol_config),\n            \"Not the activation point: wrong version.\"\n        );\n\n        let block = TestBlockBuilder::new()\n            .era(previous_era)\n            .height(100)\n            .protocol_version(future_version)\n            .switch_block(true)\n            .build(rng);\n        assert!(\n            !block\n                .header()\n                .is_last_block_before_activation(&protocol_config),\n            \"Alleged upgrade is in the past\"\n        );\n\n        let block = TestBlockBuilder::new()\n            .era(previous_era)\n            .height(100)\n            .protocol_version(past_version)\n            .switch_block(false)\n            .build(rng);\n        assert!(\n            !block\n                .header()\n                .is_last_block_before_activation(&protocol_config),\n            \"Not the activation point: not a switch block.\"\n        );\n    }\n\n    #[test]\n    fn should_have_valid_production_chainspec() {\n        init_logging();\n\n        let (chainspec, _raw_bytes): (Chainspec, ChainspecRawBytes) =\n            Loadable::from_resources(\"production\");\n\n        assert!(validate_chainspec(&chainspec));\n    }\n\n    fn check_spec(spec: Chainspec, is_first_version: bool) {\n        if is_first_version {\n            assert_eq!(\n                spec.protocol_config.version,\n                ProtocolVersion::from_parts(0, 9, 0)\n            );\n            assert_eq!(\n                spec.protocol_config.activation_point.genesis_timestamp(),\n                Some(Timestamp::from(1600454700000))\n            );\n            assert_eq!(spec.network_config.accounts_config.accounts().len(), 4);\n\n            let accounts: Vec<_> = {\n                let mut accounts = spec.network_config.accounts_config.accounts().to_vec();\n                accounts.sort_by_key(|account_config| {\n                    (account_config.balance(), account_config.bonded_amount())\n                });\n                accounts\n            };\n\n            for (index, account_config) in accounts.into_iter().enumerate() {\n                assert_eq!(account_config.balance(), Motes::new(index + 1),);\n                assert_eq!(\n                    account_config.bonded_amount(),\n                    Motes::new((index as u64 + 1) * 10)\n                );\n            }\n        } else {\n            assert_eq!(\n                spec.protocol_config.version,\n                ProtocolVersion::from_parts(1, 0, 0)\n            );\n            assert_eq!(\n                spec.protocol_config.activation_point.era_id(),\n                EraId::from(1)\n            );\n            assert!(spec.network_config.accounts_config.accounts().is_empty());\n            assert!(spec.protocol_config.global_state_update.is_some());\n            assert!(spec\n                .protocol_config\n                .global_state_update\n                .as_ref()\n                .unwrap()\n                .validators\n                .is_some());\n            for value in spec\n                .protocol_config\n                .global_state_update\n                .unwrap()\n                .entries\n                .values()\n            {\n                assert!(StoredValue::from_bytes(value).is_ok());\n            }\n        }\n\n        assert_eq!(spec.network_config.name, \"test-chain\");\n\n        assert_eq!(spec.core_config.era_duration, TimeDiff::from_seconds(180));\n        assert_eq!(spec.core_config.minimum_era_height, 9);\n        assert_eq!(\n            spec.core_config.finality_threshold_fraction,\n            Ratio::new(2, 25)\n        );\n        assert_eq!(\n            spec.highway_config.maximum_round_length,\n            TimeDiff::from_seconds(525)\n        );\n\n        assert_eq!(\n            spec.transaction_config.deploy_config.max_payment_cost,\n            Motes::new(9)\n        );\n        assert_eq!(\n            spec.transaction_config.max_ttl,\n            TimeDiff::from_seconds(26_300_160)\n        );\n        assert_eq!(spec.transaction_config.max_block_size, 12);\n        assert_eq!(\n            spec.transaction_config\n                .transaction_v1_config\n                .get_max_transaction_count(MINT_LANE_ID),\n            125\n        );\n        assert_eq!(spec.transaction_config.block_gas_limit, 13);\n\n        assert_eq!(spec.wasm_config, *EXPECTED_GENESIS_WASM_COSTS);\n    }\n\n    #[ignore = \"We probably need to reconsider our approach here\"]\n    #[test]\n    fn check_bundled_spec() {\n        let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"test/valid/0_9_0\");\n        check_spec(chainspec, true);\n        let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(\"test/valid/1_0_0\");\n        check_spec(chainspec, false);\n    }\n\n    #[test]\n    fn should_fail_when_wasm_lanes_have_duplicate_max_transaction_length() {\n        let mut v1_config = TransactionV1Config::default();\n        let definition_1 = TransactionLaneDefinition::new(3, 100, 100, 100, 10);\n        let definition_2 = TransactionLaneDefinition::new(4, 10000, 100, 101, 10);\n        let definition_3 = TransactionLaneDefinition::new(5, 1000, 100, 102, 10);\n        v1_config.set_wasm_lanes(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        let transaction_config = TransactionConfig {\n            transaction_v1_config: v1_config.clone(),\n            ..Default::default()\n        };\n        assert!(validate_transaction_config(&transaction_config));\n        let mut definition_2 = definition_2.clone();\n        definition_2.set_max_transaction_length(definition_1.max_transaction_length());\n        v1_config.set_wasm_lanes(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        let transaction_config = TransactionConfig {\n            transaction_v1_config: v1_config,\n            ..Default::default()\n        };\n        assert!(!validate_transaction_config(&transaction_config));\n    }\n\n    #[test]\n    fn should_fail_when_wasm_lanes_have_duplicate_max_gas_price() {\n        let mut v1_config = TransactionV1Config::default();\n        let definition_1 = TransactionLaneDefinition::new(3, 100, 100, 100, 10);\n        let definition_2 = TransactionLaneDefinition::new(4, 10000, 100, 101, 10);\n        let definition_3 = TransactionLaneDefinition::new(5, 1000, 100, 102, 10);\n        v1_config.set_wasm_lanes(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        let transaction_config = TransactionConfig {\n            transaction_v1_config: v1_config.clone(),\n            ..Default::default()\n        };\n        assert!(validate_transaction_config(&transaction_config));\n        let mut definition_2 = definition_2.clone();\n        definition_2.set_max_transaction_gas_limit(definition_1.max_transaction_gas_limit());\n        v1_config.set_wasm_lanes(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        let transaction_config = TransactionConfig {\n            transaction_v1_config: v1_config,\n            ..Default::default()\n        };\n        assert!(!validate_transaction_config(&transaction_config));\n    }\n\n    #[test]\n    fn should_fail_when_wasm_lanes_have_reseved_ids() {\n        fail_validation_with_lane_id(MINT_LANE_ID);\n        fail_validation_with_lane_id(AUCTION_LANE_ID);\n        fail_validation_with_lane_id(INSTALL_UPGRADE_LANE_ID);\n    }\n\n    fn fail_validation_with_lane_id(lane_id: u8) {\n        let mut v1_config = TransactionV1Config::default();\n        let definition_1 = TransactionLaneDefinition::new(lane_id, 100, 100, 100, 10);\n        v1_config.set_wasm_lanes(vec![definition_1.clone()]);\n        let transaction_config = TransactionConfig {\n            transaction_v1_config: v1_config.clone(),\n            ..Default::default()\n        };\n        assert!(!validate_transaction_config(&transaction_config));\n    }\n\n    #[test]\n    fn should_valid_no_wasm_lanes() {\n        let mut v1_config = TransactionV1Config::default();\n        v1_config.set_wasm_lanes(vec![]);\n        let transaction_config = TransactionConfig {\n            transaction_v1_config: v1_config.clone(),\n            ..Default::default()\n        };\n        assert!(!validate_transaction_config(&transaction_config));\n    }\n}\n"
  },
  {
    "path": "node/src/utils/config_specification.rs",
    "content": "use crate::MainReactorConfig as Config;\n\npub fn validate_config(config: &Config) -> bool {\n    if config.network.blocklist_retain_max_duration < config.network.blocklist_retain_min_duration {\n        return false;\n    }\n    true\n}\n\n#[cfg(test)]\nmod tests {\n    use super::validate_config;\n    use crate::MainReactorConfig as Config;\n    use casper_types::TimeDiff;\n\n    #[test]\n    fn validate_config_should_fail_malformed_blocklist_definition() {\n        let mut config = Config::default();\n        config.network.blocklist_retain_max_duration = TimeDiff::from_seconds(10);\n        config.network.blocklist_retain_min_duration = TimeDiff::from_seconds(11);\n        assert!(!validate_config(&config));\n    }\n\n    #[test]\n    fn validate_config_should_not_fail_when_blocklist_definitions_are_ok() {\n        let mut config = Config::default();\n        config.network.blocklist_retain_max_duration = TimeDiff::from_seconds(11);\n        config.network.blocklist_retain_min_duration = TimeDiff::from_seconds(10);\n        assert!(validate_config(&config));\n        config.network.blocklist_retain_max_duration = TimeDiff::from_seconds(10);\n        config.network.blocklist_retain_min_duration = TimeDiff::from_seconds(10);\n        assert!(validate_config(&config));\n    }\n}\n"
  },
  {
    "path": "node/src/utils/display_error.rs",
    "content": "//! Error formatting workaround.\n//!\n//! This module can be removed once/if the tracing issue\n//! <https://github.com/tokio-rs/tracing/issues/1308> has been resolved, which adds a special syntax\n//! for this case and the known issue <https://github.com/tokio-rs/tracing/issues/1308> has been\n//! fixed, which cuts traces short after the first cause.\n//!\n//! In the meantime, the `display_error` function should be used to format errors in log messages.\n\nuse std::{\n    error,\n    fmt::{self, Display, Formatter},\n};\n\nuse tracing::field;\n\n/// Wraps an error to ensure it gets properly captured by tracing.\npub(crate) fn display_error<'a, T>(err: &'a T) -> field::DisplayValue<ErrFormatter<'a, T>>\nwhere\n    T: error::Error + 'a,\n{\n    field::display(ErrFormatter(err))\n}\n\n/// An error formatter.\n#[derive(Clone, Copy, Debug)]\npub(crate) struct ErrFormatter<'a, T>(pub &'a T);\n\nimpl<T> Display for ErrFormatter<'_, T>\nwhere\n    T: error::Error,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let mut opt_source: Option<&(dyn error::Error)> = Some(self.0);\n\n        while let Some(source) = opt_source {\n            write!(f, \"{}\", source)?;\n            opt_source = source.source();\n\n            if opt_source.is_some() {\n                f.write_str(\": \")?;\n            }\n        }\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use thiserror::Error;\n\n    use super::ErrFormatter;\n\n    #[derive(Debug, Error)]\n    #[error(\"this is baz\")]\n    struct Baz;\n\n    #[derive(Debug, Error)]\n    #[error(\"this is bar\")]\n    struct Bar(#[source] Baz);\n\n    #[derive(Debug, Error)]\n    enum MyError {\n        #[error(\"this is foo\")]\n        Foo {\n            #[source]\n            bar: Bar,\n        },\n    }\n\n    #[test]\n    fn test_formatter_formats_single() {\n        let single = Baz;\n\n        assert_eq!(ErrFormatter(&single).to_string().as_str(), \"this is baz\");\n    }\n\n    #[test]\n    fn test_formatter_formats_nested() {\n        let nested = MyError::Foo { bar: Bar(Baz) };\n\n        assert_eq!(\n            ErrFormatter(&nested).to_string().as_str(),\n            \"this is foo: this is bar: this is baz\"\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/utils/ds.rs",
    "content": "//! Datasize helper functions.\n\nuse std::collections::HashMap;\n\nuse datasize::DataSize;\nuse either::Either;\nuse once_cell::sync::OnceCell;\nuse rand::{\n    rngs::StdRng,\n    seq::{IteratorRandom, SliceRandom},\n    SeedableRng,\n};\n\n/// Number of items to sample when sampling a large collection.\nconst SAMPLE_SIZE: usize = 50;\n\n/// Creates an RNG for sampling based on the length of a collection.\nfn sampling_rng(len: usize) -> StdRng {\n    // We use a fixed seed RNG here and hope the size will provide enough entropy to avoid gross\n    // misestimations. This has the added benefit that repeated measurements will consider the\n    // same nodes, reducing jitter and making this a pure function.\n\n    // Initialize a buffer suitable for the seed, which might be larger than our length bytes.\n    let mut seed = <StdRng as SeedableRng>::Seed::default();\n    let len_be = len.to_be_bytes();\n\n    // Mix in entropy from length.\n    for (b1, b2) in seed.iter_mut().zip(len_be.iter()) {\n        *b1 ^= *b2;\n    }\n\n    StdRng::from_seed(seed)\n}\n\n/// Given a length and a total of `sampled` bytes from sampling `SAMPLE_SIZE` items, return an\n/// estimate for the total heap memory consumption of the collection.\nfn scale_sample(len: usize, sampled: usize) -> usize {\n    sampled * len / SAMPLE_SIZE\n}\n\n/// Extrapolate memory usage of a `Vec` by from a random subset of `SAMPLE_SIZE` items.\n#[allow(clippy::ptr_arg)]\npub fn vec_sample<T>(vec: &Vec<T>) -> usize\nwhere\n    T: DataSize,\n{\n    if vec.len() < SAMPLE_SIZE {\n        vec.estimate_heap_size()\n    } else {\n        let base_size = vec.capacity() * size_of::<T>();\n\n        let mut rng = sampling_rng(vec.len());\n        let sampled = vec\n            .as_slice()\n            .choose_multiple(&mut rng, SAMPLE_SIZE)\n            .map(DataSize::estimate_heap_size)\n            .sum();\n        base_size + scale_sample(vec.len(), sampled)\n    }\n}\n\n/// Extrapolate memory usage of a `HashMap` by from a random subset of `SAMPLE_SIZE` items.\npub fn hashmap_sample<K, V>(map: &HashMap<K, V>) -> usize\nwhere\n    K: DataSize,\n    V: DataSize,\n{\n    // Copied from\n    // https://github.com/CasperLabs/datasize-rs/blob/e04c3251eb5473651a0abf55c18869acaef635c1/datasize/src/std.rs#L201-L220\n    fn estimate_hashbrown_rawtable<T>(capacity: usize) -> usize {\n        let buckets = if capacity < 8 {\n            if capacity < 4 {\n                4\n            } else {\n                8\n            }\n        } else {\n            (capacity * 8 / 7).next_power_of_two()\n        };\n        let size = size_of::<T>();\n        let ctrl_offset = size * buckets;\n        ctrl_offset + buckets\n    }\n\n    if map.len() < SAMPLE_SIZE {\n        map.estimate_heap_size()\n    } else {\n        let base_size = estimate_hashbrown_rawtable::<(K, V)>(map.capacity());\n\n        let mut rng = sampling_rng(map.len());\n\n        let sampled = map\n            .iter()\n            .choose_multiple(&mut rng, SAMPLE_SIZE)\n            .into_iter()\n            .map(|(k, v)| k.estimate_heap_size() + v.estimate_heap_size())\n            .sum();\n\n        base_size + scale_sample(map.len(), sampled)\n    }\n}\n\npub(crate) fn once_cell<T>(cell: &OnceCell<T>) -> usize\nwhere\n    T: DataSize,\n{\n    cell.get().map_or(0, |value| value.estimate_heap_size())\n}\n\npub(crate) fn maybe_either<T, U>(either: &Option<Either<T, U>>) -> usize\nwhere\n    T: DataSize,\n    U: DataSize,\n{\n    match either {\n        None => 0,\n        Some(Either::Left(left)) => left.estimate_heap_size(),\n        Some(Either::Right(right)) => right.estimate_heap_size(),\n    }\n}\n\n#[cfg(test)]\n#[allow(clippy::assertions_on_constants)] // used by sanity checks around `SAMPLE_SIZE`\nmod tests {\n    use std::collections::HashMap;\n\n    use datasize::DataSize;\n\n    use super::{hashmap_sample, vec_sample, SAMPLE_SIZE};\n\n    #[test]\n    fn vec_sample_below_sample_size() {\n        let data: Vec<Box<u32>> = vec![];\n\n        assert_eq!(vec_sample(&data), data.estimate_heap_size());\n\n        assert!(SAMPLE_SIZE > 3);\n        let data2: Vec<Box<u32>> = vec![Box::new(1), Box::new(2), Box::new(3)];\n\n        assert_eq!(vec_sample(&data2), data2.estimate_heap_size());\n    }\n\n    #[test]\n    fn vec_sample_above_sample_size() {\n        let num_items = SAMPLE_SIZE * 5;\n\n        // We make all items equal in size, so that we know the outcome of a random sampling.\n        let data: Vec<Vec<u32>> = (0..num_items)\n            .map(|_| vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n            .collect();\n\n        assert_eq!(vec_sample(&data), data.estimate_heap_size());\n    }\n\n    #[test]\n    fn hashmap_sample_below_sample_size() {\n        let data: HashMap<u32, Box<u32>> = HashMap::new();\n\n        assert_eq!(hashmap_sample(&data), data.estimate_heap_size());\n\n        assert!(SAMPLE_SIZE > 3);\n        let mut data2: HashMap<u32, Box<u32>> = HashMap::new();\n        data2.insert(1, Box::new(1));\n        data2.insert(2, Box::new(2));\n        data2.insert(3, Box::new(3));\n\n        assert_eq!(hashmap_sample(&data2), data2.estimate_heap_size());\n    }\n\n    #[test]\n    fn hashmap_sample_above_sample_size() {\n        let num_items = SAMPLE_SIZE * 5;\n\n        // We make all items equal in size, so that we know the outcome of a random sampling.\n        let data: HashMap<usize, Vec<u32>> = (0..num_items)\n            .map(|idx| (idx, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))\n            .collect();\n\n        assert_eq!(hashmap_sample(&data), data.estimate_heap_size());\n    }\n}\n"
  },
  {
    "path": "node/src/utils/external.rs",
    "content": "//! External resource handling\n//!\n//! The `External` type abstracts away the loading of external resources. See the type documentation\n//! for details.\n\nuse std::{\n    fmt::{Debug, Display},\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse casper_types::{\n    crypto,\n    file_utils::{read_file, ReadFileError},\n};\nuse datasize::DataSize;\n#[cfg(test)]\nuse once_cell::sync::Lazy;\nuse openssl::{\n    pkey::{PKey, Private},\n    x509::X509,\n};\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\n\nuse casper_types::SecretKey;\n\nuse crate::tls::{self, LoadCertError, LoadSecretKeyError};\n\n/// Path to bundled resources.\n#[cfg(test)]\npub static RESOURCES_PATH: Lazy<PathBuf> =\n    Lazy::new(|| PathBuf::from(env!(\"CARGO_MANIFEST_DIR\")).join(\"../resources\"));\n\n/// External resource.\n///\n/// An `External` resource can be given in two ways: Either as an immediate value, or through a\n/// path, provided the value implements `Loadable`.\n///\n/// Serializing and deserializing an `External` value is only possible if it is in path form. This\n/// is especially useful when writing structure configurations.\n///\n/// An `External` also always provides a default, which will always result in an error when `load`\n/// is called. Should the underlying type `T` implement `Default`, the `with_default` can be\n/// used instead.\n#[derive(Clone, DataSize, Eq, Debug, Deserialize, PartialEq, Serialize, Default)]\n#[serde(untagged)]\npub enum External {\n    /// Value that should be loaded from an external path.\n    Path(PathBuf),\n    /// The value has not been specified, but a default has been requested.\n    #[serde(skip)]\n    #[default]\n    Missing,\n}\n\nimpl External {\n    /// Loads the value if not loaded already, resolving relative paths from `root` or returns\n    /// available value. If the value is `Missing`, returns an error.\n    pub fn load<T, P>(self, root: P) -> Result<T, LoadError<T::Error>>\n    where\n        T: Loadable,\n        P: AsRef<Path>,\n    {\n        match self {\n            External::Path(path) => {\n                let full_path = if path.is_relative() {\n                    root.as_ref().join(&path)\n                } else {\n                    path\n                };\n\n                T::from_path(&full_path).map_err(move |error| LoadError::Failed {\n                    error,\n                    // We canonicalize `full_path` here, with `ReadFileError` we get extra\n                    // information about the absolute path this way if the latter is relative. It\n                    // will still be relative if the current path does not exist.\n                    path: full_path.canonicalize().unwrap_or(full_path),\n                })\n            }\n            External::Missing => Err(LoadError::Missing),\n        }\n    }\n}\n\n/// A value that can be loaded from a file.\npub trait Loadable: Sized {\n    /// Error that can occur when attempting to load.\n    type Error: Debug + Display;\n\n    /// Loads a value from the given input path.\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Self::Error>;\n\n    /// Load a test-only instance from the local path.\n    #[cfg(test)]\n    fn from_resources<P: AsRef<Path>>(rel_path: P) -> Self {\n        Self::from_path(RESOURCES_PATH.join(rel_path.as_ref())).unwrap_or_else(|error| {\n            panic!(\n                \"could not load resources from {}: {}\",\n                rel_path.as_ref().display(),\n                error\n            )\n        })\n    }\n}\n\nfn display_res_path<E>(result: &Result<PathBuf, E>) -> String {\n    result\n        .as_ref()\n        .map(|p| p.display().to_string())\n        .unwrap_or_else(|_| String::new())\n}\n\n/// Error loading external value.\n#[derive(Debug, Error)]\npub enum LoadError<E: Debug + Display> {\n    /// Failed to load from path.\n    #[error(\"could not load from {}: {error}\", display_res_path(&.path.canonicalize()))]\n    Failed {\n        /// Path that failed to load.\n        path: PathBuf,\n        /// Error load failed with.\n        error: E,\n    },\n    /// A value was missing.\n    #[error(\"value is missing (default requested)\")]\n    Missing,\n}\n\n// We supply a few useful implementations for external types.\nimpl Loadable for X509 {\n    type Error = anyhow::Error;\n\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Self::Error> {\n        let error = match tls::load_cert(path) {\n            Ok(cert) => return Ok(cert),\n            Err(LoadCertError::ReadFile(error)) => {\n                anyhow::Error::new(error).context(\"failed to load certificate\")\n            }\n            Err(LoadCertError::X509CertFromPem(error)) => {\n                anyhow::Error::new(error).context(\"parsing certificate\")\n            }\n        };\n        Err(error)\n    }\n}\n\nimpl Loadable for PKey<Private> {\n    type Error = anyhow::Error;\n\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Self::Error> {\n        let error = match tls::load_secret_key(path) {\n            Ok(secret_key) => return Ok(secret_key),\n            Err(LoadSecretKeyError::ReadFile(error)) => {\n                anyhow::Error::new(error).context(\"failed to load private key\")\n            }\n            Err(LoadSecretKeyError::PrivateKeyFromPem(error)) => {\n                anyhow::Error::new(error).context(\"parsing private key\")\n            }\n        };\n\n        Err(error)\n    }\n}\n\nimpl Loadable for Arc<SecretKey> {\n    type Error = crypto::ErrorExt;\n\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Self::Error> {\n        Ok(Arc::new(SecretKey::from_file(path)?))\n    }\n}\n\nimpl Loadable for Vec<u8> {\n    type Error = ReadFileError;\n\n    fn from_path<P: AsRef<Path>>(path: P) -> Result<Self, Self::Error> {\n        read_file(path)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::External;\n\n    #[test]\n    fn test_to_string() {\n        let val: External = External::Path(\"foo/bar.toml\".into());\n        assert_eq!(\n            \"\\\"foo/bar.toml\\\"\",\n            serde_json::to_string(&val).expect(\"serialization error\")\n        );\n    }\n\n    #[test]\n    fn test_load_from_string() {\n        let input = \"\\\"foo/bar.toml\\\"\";\n\n        let val: External = serde_json::from_str(input).expect(\"deserialization failed\");\n\n        assert_eq!(External::Path(\"foo/bar.toml\".into()), val);\n    }\n}\n"
  },
  {
    "path": "node/src/utils/fmt_limit.rs",
    "content": "//! Wrappers to display a limited amount of data from collections using `fmt`.\n\nuse std::fmt::{self, Debug, Formatter, Write};\n\n/// A display wrapper showing a limited amount of a formatted rendering.\n///\n/// Any characters exceeding the given length will be omitted and replaced by `...`.\npub(crate) struct FmtLimit<'a, T> {\n    limit: usize,\n    item: &'a T,\n}\n\nimpl<'a, T> FmtLimit<'a, T> {\n    /// Creates a new limited formatter.\n    #[inline]\n    pub(crate) fn new(limit: usize, item: &'a T) -> Self {\n        FmtLimit { limit, item }\n    }\n}\n\n/// Helper that limits writing to a given `fmt::Writer`.\nstruct LimitWriter<'a, W> {\n    /// The wrapper writer.\n    inner: &'a mut W,\n    /// How many characters are left.\n    left: usize,\n    /// Whether or not the writer is \"closed\".\n    ///\n    /// Closing happens when an additional character is written after `left` has reached 0 and will\n    /// trigger the ellipses to be written out.\n    closed: bool,\n}\n\nimpl<'a, W> LimitWriter<'a, W> {\n    /// Constructs a new `LimitWriter`.\n    #[inline]\n    fn new(inner: &'a mut W, limit: usize) -> Self {\n        LimitWriter {\n            inner,\n            left: limit,\n            closed: false,\n        }\n    }\n}\n\nimpl<W> Write for LimitWriter<'_, W>\nwhere\n    W: Write,\n{\n    #[inline]\n    fn write_str(&mut self, s: &str) -> fmt::Result {\n        if self.closed {\n            return Ok(());\n        }\n\n        if self.left == 0 {\n            self.closed = true;\n            self.inner.write_str(\"...\")?;\n            return Ok(());\n        }\n\n        // A tad bit slow, but required for correct unicode output.\n        for c in s.chars().take(self.left) {\n            self.write_char(c)?;\n        }\n\n        Ok(())\n    }\n\n    #[inline]\n    fn write_char(&mut self, c: char) -> fmt::Result {\n        if self.closed {\n            return Ok(());\n        }\n\n        if self.left == 0 {\n            self.closed = true;\n            self.inner.write_str(\"...\")?;\n            return Ok(());\n        }\n\n        self.left -= 1;\n        self.inner.write_char(c)\n    }\n}\n\nimpl<T> Debug for FmtLimit<'_, T>\nwhere\n    T: Debug,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let mut limit_writer = LimitWriter::new(f, self.limit);\n        write!(&mut limit_writer, \"{:?}\", self.item)\n    }\n}\n\n// Note: If required, a `Display` implementation can be added easily for `FmtLimit`.\n\n#[cfg(test)]\nmod tests {\n    use crate::utils::fmt_limit::FmtLimit;\n\n    #[test]\n    fn limit_debug_works() {\n        let collection: Vec<_> = (0..5).collect();\n\n        // Sanity check.\n        assert_eq!(format!(\"{:?}\", collection), \"[0, 1, 2, 3, 4]\");\n\n        assert_eq!(format!(\"{:?}\", FmtLimit::new(3, &collection)), \"[0,...\");\n        assert_eq!(format!(\"{:?}\", FmtLimit::new(0, &collection)), \"...\");\n        assert_eq!(\n            format!(\"{:?}\", FmtLimit::new(1000, &collection)),\n            \"[0, 1, 2, 3, 4]\"\n        );\n        assert_eq!(\n            format!(\"{:?}\", FmtLimit::new(15, &collection)),\n            \"[0, 1, 2, 3, 4]\"\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/utils/opt_display.rs",
    "content": "//! `Display` wrapper for optional values.\n//!\n//! Allows displaying an `Option<T>`, where `T` already implements `Display`.\n\nuse std::fmt::{Display, Formatter, Result};\n\nuse serde::Serialize;\n\n/// Wrapper around `Option` that implements `Display`.\n///\n/// For convenience, it also includes a `Serialize` implementation that works identical to the\n/// underlying `Option<T>` serialization.\npub struct OptDisplay<'a, T> {\n    /// The actual `Option` being displayed.\n    inner: Option<T>,\n    /// Value to substitute if `inner` is `None`.\n    empty_display: &'a str,\n}\n\nimpl<T> Serialize for OptDisplay<'_, T>\nwhere\n    T: Serialize,\n{\n    fn serialize<S>(&self, serializer: S) -> core::result::Result<S::Ok, S::Error>\n    where\n        S: serde::Serializer,\n    {\n        self.inner.serialize(serializer)\n    }\n}\n\nimpl<'a, T: Display> OptDisplay<'a, T> {\n    /// Creates a new `OptDisplay`.\n    #[inline]\n    pub fn new(maybe_display: Option<T>, empty_display: &'a str) -> Self {\n        Self {\n            inner: maybe_display,\n            empty_display,\n        }\n    }\n}\n\nimpl<T: Display> Display for OptDisplay<'_, T> {\n    #[inline]\n    fn fmt(&self, f: &mut Formatter<'_>) -> Result {\n        match self.inner {\n            None => f.write_str(self.empty_display),\n            Some(ref val) => val.fmt(f),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::OptDisplay;\n\n    #[test]\n    fn opt_display_works() {\n        let some_value: Option<u32> = Some(12345);\n\n        assert_eq!(\n            OptDisplay::new(some_value.as_ref(), \"does not matter\").to_string(),\n            \"12345\"\n        );\n\n        let none_value: Option<u32> = None;\n        assert_eq!(\n            OptDisplay::new(none_value.as_ref(), \"should be none\").to_string(),\n            \"should be none\"\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/utils/rlimit.rs",
    "content": "//! Limit retrieval and set.\n//!\n//! Allows retrieval and setting of resource limits.\n//!\n//! This module wraps a fair number of libc types to make external use as pleasant as possible.\n\nuse std::{\n    any,\n    fmt::{self, Debug},\n    io,\n    marker::PhantomData,\n    mem::MaybeUninit,\n};\n\nuse fmt::Formatter;\n\n/// A scalar limit.\npub type Limit = libc::rlim_t;\n\n#[cfg(target_os = \"linux\")]\npub type LimitResourceId = libc::__rlimit_resource_t;\n\n#[cfg(target_os = \"macos\")]\npub type LimitResourceId = libc::c_int;\n\n/// A kind of limit that can be set/retrieved.\npub trait LimitKind {\n    /// The `resource` id use for libc calls.\n    const LIBC_RESOURCE: LimitResourceId;\n}\n\n/// Maximum number of open files (`ulimit -n`).\n#[derive(Copy, Clone, Debug)]\npub struct OpenFiles;\n\nimpl LimitKind for OpenFiles {\n    const LIBC_RESOURCE: LimitResourceId = libc::RLIMIT_NOFILE;\n}\n\n/// Wrapper around libc resource limit type.\n#[derive(Copy, Clone)]\npub struct ResourceLimit<T> {\n    limit: libc::rlimit,\n    kind: PhantomData<T>,\n}\n\nimpl<T> Debug for ResourceLimit<T>\nwhere\n    T: Copy,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let name = format!(\"ResourceLimit<{}>\", any::type_name::<T>());\n        f.debug_struct(&name)\n            .field(\"cur\", &self.current())\n            .field(\"max\", &self.max())\n            .finish()\n    }\n}\n\nimpl<T> ResourceLimit<T> {\n    /// Creates a new resource limit.\n    #[inline]\n    pub fn new(current: Limit, max: Limit) -> Self {\n        ResourceLimit {\n            limit: libc::rlimit {\n                rlim_cur: current,\n                rlim_max: max,\n            },\n            kind: PhantomData,\n        }\n    }\n\n    /// Creates a new resource limit, setting hard and soft limit to the same value.\n    #[inline]\n    pub fn fixed(limit: Limit) -> Self {\n        Self::new(limit, limit)\n    }\n\n    /// The current or \"soft\" limit.\n    #[inline]\n    pub fn current(self) -> Limit {\n        self.limit.rlim_cur\n    }\n\n    /// The maximum allowed or \"hard\" limit .\n    #[inline]\n    pub fn max(self) -> Limit {\n        self.limit.rlim_max\n    }\n}\n\nimpl<T> ResourceLimit<T>\nwhere\n    T: LimitKind,\n{\n    /// Retrieves the given resource limit from the operating system.\n    #[inline]\n    pub fn get() -> io::Result<Self> {\n        let mut dest: MaybeUninit<libc::rlimit> = MaybeUninit::zeroed();\n\n        let outcome = unsafe { libc::getrlimit(T::LIBC_RESOURCE, dest.as_mut_ptr()) };\n\n        match outcome {\n            -1 => Err(io::Error::last_os_error()),\n            0 => Ok(ResourceLimit {\n                limit: unsafe { dest.assume_init() },\n                kind: PhantomData,\n            }),\n            // This should never happen, so we notify the user.\n            _ => Err(io::Error::new(\n                io::ErrorKind::Other,\n                format!(\"expected return value of -1 or 0, but got {}\", outcome),\n            )),\n        }\n    }\n\n    /// Sets the specified limit via operation system.\n    pub fn set(self) -> io::Result<()> {\n        let outcome = unsafe { libc::setrlimit(T::LIBC_RESOURCE, &self.limit) };\n\n        match outcome {\n            -1 => Err(io::Error::last_os_error()),\n            0 => Ok(()),\n            // This should never happen, so we notify the user.\n            _ => Err(io::Error::new(\n                io::ErrorKind::Other,\n                format!(\"expected return value of -1 or 0, but got {}\", outcome),\n            )),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::{OpenFiles, ResourceLimit};\n\n    #[test]\n    fn get_and_reset_ulimit() {\n        // Retrieve limit and set the exact same limit again.\n        let limit = ResourceLimit::<OpenFiles>::get().expect(\"could not retrieve initial limit\");\n        println!(\"{:?}\", limit);\n\n        // Note: We could change it to something different, but we do not want to risk influencing\n        // other tests, so this is safest.\n        limit.set().expect(\"could not re-set limit\");\n\n        println!(\n            \"{:?}\",\n            ResourceLimit::<OpenFiles>::get().expect(\"could not retrieve limit a second time\")\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/utils/round_robin.rs",
    "content": "//! Weighted round-robin scheduling.\n//!\n//! This module implements a weighted round-robin scheduler that ensures no deadlocks occur, but\n//! still allows prioritizing events from one source over another. The module uses `tokio`'s\n//! synchronization primitives under the hood.\n\nuse std::{\n    collections::{BTreeMap, HashMap, VecDeque},\n    fmt::{Debug, Display},\n    hash::Hash,\n    num::NonZeroUsize,\n    sync::atomic::{AtomicBool, AtomicUsize, Ordering},\n};\n\nuse enum_iterator::IntoEnumIterator;\nuse serde::Serialize;\nuse tokio::sync::{Mutex, MutexGuard, Semaphore};\nuse tracing::{debug, warn};\n\n/// Weighted round-robin scheduler.\n///\n/// The weighted round-robin scheduler keeps queues internally and returns an item from a queue\n/// when asked. Each queue is assigned a weight, which is simply the amount of items maximally\n/// returned from it before moving on to the next queue.\n///\n/// If a queue is empty, it is skipped until the next round. Queues are processed in the order they\n/// are passed to the constructor function.\n///\n/// The scheduler keeps track internally which queue needs to be popped next.\n#[derive(Debug)]\npub struct WeightedRoundRobin<I, K> {\n    /// Current iteration state.\n    state: Mutex<IterationState<K>>,\n\n    /// A list of slots that are round-robin'd.\n    slots: Vec<Slot<K>>,\n\n    /// Actual queues.\n    queues: HashMap<K, QueueState<I>>,\n\n    /// Number of items in all queues combined.\n    total: Semaphore,\n\n    /// Whether or not the queue is sealed (not accepting any more items).\n    sealed: AtomicBool,\n\n    /// Dump count of events only when there is a 10%+ increase of events compared to the previous\n    /// report. Setting to `None` disables the dump function.\n    recent_event_count_peak: Option<AtomicUsize>,\n}\n\n/// State that wraps queue and its event count.\n#[derive(Debug)]\nstruct QueueState<I> {\n    /// A queue's event counter.\n    ///\n    /// Do not modify this unless you are holding the `queue` lock.\n    event_count: AtomicUsize,\n    queue: Mutex<VecDeque<I>>,\n}\n\nimpl<I> QueueState<I> {\n    fn new() -> Self {\n        QueueState {\n            event_count: AtomicUsize::new(0),\n            queue: Mutex::new(VecDeque::new()),\n        }\n    }\n\n    /// Remove all events from a queue.\n    #[cfg(test)]\n    async fn drain(&self) -> Vec<I> {\n        let mut guard = self.queue.lock().await;\n        let events: Vec<I> = guard.drain(..).collect();\n        self.event_count.fetch_sub(events.len(), Ordering::SeqCst);\n        events\n    }\n\n    #[inline]\n    async fn push_back(&self, element: I) {\n        self.queue.lock().await.push_back(element);\n        self.event_count.fetch_add(1, Ordering::SeqCst);\n    }\n\n    #[inline]\n    fn dec_count(&self) {\n        self.event_count.fetch_sub(1, Ordering::SeqCst);\n    }\n\n    #[inline]\n    fn event_count(&self) -> usize {\n        self.event_count.load(Ordering::SeqCst)\n    }\n}\n\n/// The inner state of the queue iteration.\n#[derive(Copy, Clone, Debug)]\nstruct IterationState<K> {\n    /// The currently active slot.\n    ///\n    /// Once it has no tickets left, the next slot is loaded.\n    active_slot: Slot<K>,\n\n    /// The position of the active slot. Used to calculate the next slot.\n    active_slot_idx: usize,\n}\n\n/// An internal slot in the round-robin scheduler.\n///\n/// A slot marks the scheduling position, i.e. which queue we are currently polling and how many\n/// tickets it has left before the next one is due.\n#[derive(Copy, Clone, Debug)]\nstruct Slot<K> {\n    /// The key, identifying a queue.\n    key: K,\n\n    /// Number of items to return before moving on to the next queue.\n    tickets: usize,\n}\n\n#[derive(Debug, Serialize)]\n/// A dump of the internal queues.\npub struct QueueDump<'a, K, I>\nwhere\n    K: Ord + Eq,\n{\n    /// Queues being dumped.\n    ///\n    /// A `BTreeMap` is used to make the ordering constant, it will be in the natural order defined\n    /// by `Ord` on `K`.\n    queues: BTreeMap<K, &'a VecDeque<I>>,\n}\n\nimpl<I, K> WeightedRoundRobin<I, K>\nwhere\n    I: Debug,\n    K: Copy + Clone + Eq + Hash + IntoEnumIterator + Debug,\n{\n    /// Creates a new weighted round-robin scheduler.\n    ///\n    /// Creates a queue for each pair given in `weights`. The second component of each `weight` is\n    /// the number of times to return items from one queue before moving on to the next one.\n    pub(crate) fn new(\n        weights: Vec<(K, NonZeroUsize)>,\n        initial_event_count_threshold: Option<usize>,\n    ) -> Self {\n        assert!(!weights.is_empty(), \"must provide at least one slot\");\n\n        let queues = weights\n            .iter()\n            .map(|(idx, _)| (*idx, QueueState::new()))\n            .collect();\n        let slots: Vec<Slot<K>> = weights\n            .into_iter()\n            .map(|(key, tickets)| Slot {\n                key,\n                tickets: tickets.get(),\n            })\n            .collect();\n        let active_slot = slots[0];\n\n        WeightedRoundRobin {\n            state: Mutex::new(IterationState {\n                active_slot,\n                active_slot_idx: 0,\n            }),\n            slots,\n            queues,\n            total: Semaphore::new(0),\n            sealed: AtomicBool::new(false),\n            recent_event_count_peak: initial_event_count_threshold.map(AtomicUsize::new),\n        }\n    }\n\n    /// Dump the queue contents to the given dumper function.\n    pub async fn dump<F: FnOnce(&QueueDump<K, I>)>(&self, dumper: F)\n    where\n        K: Ord,\n    {\n        let locks = self.lock_queues().await;\n        let mut queues = BTreeMap::new();\n        for (kind, guard) in &locks {\n            let queue = &**guard;\n            queues.insert(*kind, queue);\n        }\n\n        let queue_dump = QueueDump { queues };\n        dumper(&queue_dump);\n    }\n\n    /// Lock all queues in a well-defined order to avoid deadlocks conditions.\n    async fn lock_queues(&self) -> Vec<(K, MutexGuard<'_, VecDeque<I>>)> {\n        let mut locks = Vec::new();\n        for kind in K::into_enum_iter() {\n            let queue_guard = self\n                .queues\n                .get(&kind)\n                .expect(\"missing queue while locking\")\n                .queue\n                .lock()\n                .await;\n\n            locks.push((kind, queue_guard));\n        }\n\n        locks\n    }\n}\n\nfn should_dump_queues(total: usize, recent_threshold: usize) -> bool {\n    total > ((recent_threshold * 11) / 10)\n}\n\nimpl<I, K> WeightedRoundRobin<I, K>\nwhere\n    K: Copy + Clone + Eq + Hash + Display,\n{\n    /// Pushes an item to a queue identified by key.\n    ///\n    /// ## Panics\n    ///\n    /// Panics if the queue identified by key `queue` does not exist.\n    pub(crate) async fn push(&self, item: I, queue: K) {\n        if self.sealed.load(Ordering::SeqCst) {\n            debug!(\"queue sealed, dropping item\");\n            return;\n        }\n\n        self.queues\n            .get(&queue)\n            .expect(\"tried to push to non-existent queue\")\n            .push_back(item)\n            .await;\n\n        // NOTE: Count may be off by one b/c of the way locking works when elements are popped.\n        // It's fine for its purposes.\n        if let Some(recent_event_count_peak) = &self.recent_event_count_peak {\n            let total = self.queues.iter().map(|q| q.1.event_count()).sum::<usize>();\n            let recent_threshold = recent_event_count_peak.load(Ordering::SeqCst);\n            if should_dump_queues(total, recent_threshold) {\n                recent_event_count_peak.store(total, Ordering::SeqCst);\n                let info: Vec<_> = self\n                    .queues\n                    .iter()\n                    .map(|q| (q.0.to_string(), q.1.event_count()))\n                    .filter(|(_, count)| count > &0)\n                    .collect();\n                warn!(\"Current event queue size ({total}) is above the threshold ({recent_threshold}): details {info:?}\");\n            }\n        }\n\n        // We increase the item count after we've put the item into the queue.\n        self.total.add_permits(1);\n    }\n\n    /// Returns the next item from queue.\n    ///\n    /// Asynchronously waits until a queue is non-empty or panics if an internal error occurred.\n    pub(crate) async fn pop(&self) -> (I, K) {\n        // Safe to `expect` here as the only way for acquiring a permit to fail would be if the\n        // `self.total` semaphore were closed.\n        self.total.acquire().await.expect(\"should acquire\").forget();\n\n        let mut inner = self.state.lock().await;\n\n        // We know we have at least one item in a queue.\n        loop {\n            let queue_state = self\n                .queues\n                // The queue disappearing should never happen.\n                .get(&inner.active_slot.key)\n                .expect(\"the queue disappeared. this should not happen\");\n\n            let mut current_queue = queue_state.queue.lock().await;\n\n            if inner.active_slot.tickets == 0 || current_queue.is_empty() {\n                // Go to next queue slot if we've exhausted the current queue.\n                inner.active_slot_idx = (inner.active_slot_idx + 1) % self.slots.len();\n                inner.active_slot = self.slots[inner.active_slot_idx];\n                continue;\n            }\n\n            // We have hit a queue that is not empty. Decrease tickets and pop.\n            inner.active_slot.tickets -= 1;\n\n            let item = current_queue\n                .pop_front()\n                // We hold the queue's lock and checked `is_empty` earlier.\n                .expect(\"item disappeared. this should not happen\");\n            queue_state.dec_count();\n            break (item, inner.active_slot.key);\n        }\n    }\n\n    /// Drains all events from a specific queue.\n    #[cfg(test)]\n    pub(crate) async fn drain_queue(&self, queue: K) -> Vec<I> {\n        let events = self\n            .queues\n            .get(&queue)\n            .expect(\"queue to be drained disappeared\")\n            .drain()\n            .await;\n\n        // TODO: This is racy if someone is calling `pop` at the same time.\n        self.total\n            .acquire_many(events.len() as u32)\n            .await\n            .expect(\"could not acquire tickets during drain\")\n            .forget();\n\n        events\n    }\n\n    /// Drains all events from all queues.\n    #[cfg(test)]\n    pub async fn drain_queues(&self) -> Vec<I> {\n        let mut events = Vec::new();\n        let keys: Vec<K> = self.queues.keys().cloned().collect();\n\n        for kind in keys {\n            events.extend(self.drain_queue(kind).await);\n        }\n        events\n    }\n\n    /// Seals the queue, preventing it from accepting any more items.\n    ///\n    /// Items pushed into the queue via `push` will be dropped immediately.\n    #[cfg(test)]\n    pub fn seal(&self) {\n        self.sealed.store(true, Ordering::SeqCst);\n    }\n\n    /// Returns the number of events currently in the queue.\n    #[cfg(test)]\n    pub(crate) fn item_count(&self) -> usize {\n        self.total.available_permits()\n    }\n\n    /// Returns the number of events in each of the queues.\n    pub(crate) fn event_queues_counts(&self) -> HashMap<K, usize> {\n        self.queues\n            .iter()\n            .map(|(key, queue)| (*key, queue.event_count()))\n            .collect()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::num::NonZeroUsize;\n\n    use futures::{future::FutureExt, join};\n\n    use super::*;\n\n    #[repr(usize)]\n    #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, IntoEnumIterator)]\n    enum QueueKind {\n        One = 1,\n        Two,\n    }\n\n    fn weights() -> Vec<(QueueKind, NonZeroUsize)> {\n        unsafe {\n            vec![\n                (QueueKind::One, NonZeroUsize::new_unchecked(1)),\n                (QueueKind::Two, NonZeroUsize::new_unchecked(2)),\n            ]\n        }\n    }\n\n    impl Display for QueueKind {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            match self {\n                QueueKind::One => write!(f, \"One\"),\n                QueueKind::Two => write!(f, \"Two\"),\n            }\n        }\n    }\n\n    #[tokio::test]\n    async fn should_respect_weighting() {\n        let scheduler = WeightedRoundRobin::<char, QueueKind>::new(weights(), None);\n        // Push three items on to each queue\n        let future1 = scheduler\n            .push('a', QueueKind::One)\n            .then(|_| scheduler.push('b', QueueKind::One))\n            .then(|_| scheduler.push('c', QueueKind::One));\n        let future2 = scheduler\n            .push('d', QueueKind::Two)\n            .then(|_| scheduler.push('e', QueueKind::Two))\n            .then(|_| scheduler.push('f', QueueKind::Two));\n        join!(future2, future1);\n\n        // We should receive the popped values in the order a, d, e, b, f, c\n        assert_eq!(('a', QueueKind::One), scheduler.pop().await);\n        assert_eq!(('d', QueueKind::Two), scheduler.pop().await);\n        assert_eq!(('e', QueueKind::Two), scheduler.pop().await);\n        assert_eq!(('b', QueueKind::One), scheduler.pop().await);\n        assert_eq!(('f', QueueKind::Two), scheduler.pop().await);\n        assert_eq!(('c', QueueKind::One), scheduler.pop().await);\n    }\n\n    #[tokio::test]\n    async fn can_seal_queue() {\n        let scheduler = WeightedRoundRobin::<char, QueueKind>::new(weights(), None);\n\n        assert_eq!(scheduler.item_count(), 0);\n        scheduler.push('a', QueueKind::One).await;\n        assert_eq!(scheduler.item_count(), 1);\n        scheduler.push('b', QueueKind::Two).await;\n        assert_eq!(scheduler.item_count(), 2);\n\n        scheduler.seal();\n        assert_eq!(scheduler.item_count(), 2);\n        scheduler.push('c', QueueKind::One).await;\n        assert_eq!(scheduler.item_count(), 2);\n        scheduler.push('d', QueueKind::One).await;\n        assert_eq!(scheduler.item_count(), 2);\n\n        assert_eq!(('a', QueueKind::One), scheduler.pop().await);\n        assert_eq!(scheduler.item_count(), 1);\n        assert_eq!(('b', QueueKind::Two), scheduler.pop().await);\n        assert_eq!(scheduler.item_count(), 0);\n        assert!(scheduler.drain_queues().await.is_empty());\n    }\n\n    #[test]\n    fn should_calculate_dump_threshold() {\n        let total = 0;\n        let recent_threshold = 100;\n        assert!(!should_dump_queues(total, recent_threshold));\n\n        let total = 100;\n        let recent_threshold = 100;\n        assert!(!should_dump_queues(total, recent_threshold));\n\n        let total = 109;\n        let recent_threshold = 100;\n        assert!(!should_dump_queues(total, recent_threshold));\n\n        let total = 110;\n        let recent_threshold = 100;\n        assert!(!should_dump_queues(total, recent_threshold));\n\n        // Dump only if there is 10%+ increase in event count\n        let total = 111;\n        let recent_threshold = 100;\n        assert!(should_dump_queues(total, recent_threshold));\n\n        let total = 112;\n        let recent_threshold = 100;\n        assert!(should_dump_queues(total, recent_threshold));\n\n        let total = 1_000_000;\n        let recent_threshold = 100;\n        assert!(should_dump_queues(total, recent_threshold));\n    }\n}\n"
  },
  {
    "path": "node/src/utils/specimen.rs",
    "content": "//! Specimen support.\n//!\n//! Structs implementing the specimen trait allow for specific sample instances being created, such\n//! as the biggest possible.\n\nuse std::{\n    any::{Any, TypeId},\n    collections::{BTreeMap, BTreeSet, HashMap},\n    convert::{TryFrom, TryInto},\n    iter::FromIterator,\n    net::{Ipv6Addr, SocketAddr, SocketAddrV6},\n    sync::Arc,\n};\n\nuse either::Either;\nuse once_cell::sync::OnceCell;\nuse serde::Serialize;\nuse strum::{EnumIter, IntoEnumIterator};\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::Bytes,\n    crypto::{sign, PublicKey, Signature},\n    AccessRights, Approval, ApprovalsHash, AsymmetricType, Block, BlockHash, BlockHeader,\n    BlockHeaderV1, BlockHeaderV2, BlockHeaderWithSignatures, BlockSignatures, BlockSignaturesV2,\n    BlockV2, ChainNameDigest, ChunkWithProof, Deploy, DeployHash, DeployId, Digest, EraEndV1,\n    EraEndV2, EraId, EraReport, ExecutableDeployItem, FinalitySignature, FinalitySignatureId,\n    FinalitySignatureV2, PackageHash, ProtocolVersion, RewardedSignatures, RuntimeArgs, SecretKey,\n    SemVer, SingleBlockRewardedSignatures, TimeDiff, Timestamp, Transaction, TransactionHash,\n    TransactionId, TransactionRuntimeParams, TransactionV1, TransactionV1Hash, URef,\n    AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, KEY_HASH_LENGTH, MINT_LANE_ID, U512,\n};\n\nuse crate::{\n    components::{\n        consensus::{max_rounds_per_era, utils::ValidatorMap},\n        fetcher::Tag,\n    },\n    protocol::Message,\n    types::{\n        transaction::transaction_v1_builder::TransactionV1Builder, BlockExecutionResultsOrChunk,\n        BlockPayload, FinalizedBlock, InternalEraReport, LegacyDeploy, SyncLeap, TrieOrChunk,\n    },\n};\nuse casper_storage::block_store::types::ApprovalsHashes;\n\n/// The largest valid unicode codepoint that can be encoded to UTF-8.\npub(crate) const HIGHEST_UNICODE_CODEPOINT: char = '\\u{10FFFF}';\nconst LARGE_WASM_LANE_ID: u8 = 3;\n\n/// A cache used for memoization, typically on a single estimator.\n#[derive(Debug, Default)]\npub(crate) struct Cache {\n    /// A map of items that have been hashed. Indexed by type.\n    items: HashMap<TypeId, Vec<Box<dyn Any>>>,\n}\n\nimpl Cache {\n    /// Retrieves a potentially memoized instance.\n    pub(crate) fn get<T: Any>(&mut self) -> Option<&T> {\n        self.get_all::<T>()\n            .first()\n            .map(|box_any| box_any.downcast_ref::<T>().expect(\"cache corrupted\"))\n    }\n\n    /// Sets the memoized instance if not already set.\n    ///\n    /// Returns a reference to the memoized instance. Note that this may be an instance other than\n    /// the passed in `item`, if the cache entry was not empty before/\n    pub(crate) fn set<T: Any>(&mut self, item: T) -> &T {\n        let items = self.get_all::<T>();\n        if items.is_empty() {\n            let boxed_item: Box<dyn Any> = Box::new(item);\n            items.push(boxed_item);\n        }\n        self.get::<T>().expect(\"should not be empty\")\n    }\n\n    /// Get or insert the vector storing item instances.\n    fn get_all<T: Any>(&mut self) -> &mut Vec<Box<dyn Any>> {\n        self.items.entry(TypeId::of::<T>()).or_default()\n    }\n}\n\n/// Given a specific type instance, estimates its serialized size.\npub(crate) trait SizeEstimator {\n    /// Estimate the serialized size of a value.\n    fn estimate<T: Serialize>(&self, val: &T) -> usize;\n\n    /// Requires a parameter.\n    ///\n    /// Parameters indicate potential specimens which values to expect, e.g. a maximum number of\n    /// items configured for a specific collection.\n    ///\n    /// ## Panics\n    ///\n    /// - If the named parameter is not set, panics.\n    /// - If `T` is of an invalid type.\n    fn parameter<T: TryFrom<i64>>(&self, name: &'static str) -> T;\n\n    /// Require a parameter, cast into a boolean.\n    ///\n    /// See [`parameter`] for details. Will return `false` if the stored value is `0`,\n    /// otherwise `true`.\n    ///\n    /// This method exists because `bool` does not implement `TryFrom<i64>`.\n    ///\n    /// ## Panics\n    ///\n    /// Same as [`parameter`].\n    fn parameter_bool(&self, name: &'static str) -> bool {\n        self.parameter::<i64>(name) != 0\n    }\n}\n\n/// Supports returning a maximum size specimen.\n///\n/// \"Maximum size\" refers to the instance that uses the highest amount of memory and is also most\n/// likely to have the largest representation when serialized.\npub(crate) trait LargestSpecimen: Sized {\n    /// Returns the largest possible specimen for this type.\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self;\n}\n\n/// Supports generating a unique sequence of specimen that are as large as possible.\npub(crate) trait LargeUniqueSequence<E>\nwhere\n    Self: Sized + Ord,\n    E: SizeEstimator,\n{\n    /// Create a new sequence of the largest possible unique specimens.\n    ///\n    /// Note that multiple calls to this function will return overlapping sequences.\n    // Note: This functions returns a materialized sequence instead of a generator to avoid\n    //       complications with borrowing `E`.\n    fn large_unique_sequence(estimator: &E, count: usize, cache: &mut Cache) -> BTreeSet<Self>;\n}\n\n/// Produces the largest variant of a specific `enum` using an estimator and a generation function.\npub(crate) fn largest_variant<T, D, E, F>(estimator: &E, generator: F) -> T\nwhere\n    T: Serialize,\n    D: IntoEnumIterator,\n    E: SizeEstimator,\n    F: FnMut(D) -> T,\n{\n    D::iter()\n        .map(generator)\n        .max_by_key(|candidate| estimator.estimate(candidate))\n        .expect(\"should have at least one candidate\")\n}\n\n/// Generates a vec of a given size filled with the largest specimen.\npub(crate) fn vec_of_largest_specimen<T: LargestSpecimen, E: SizeEstimator>(\n    estimator: &E,\n    count: usize,\n    cache: &mut Cache,\n) -> Vec<T> {\n    let mut vec = Vec::new();\n    for _ in 0..count {\n        vec.push(LargestSpecimen::largest_specimen(estimator, cache));\n    }\n    vec\n}\n\n/// Generates a vec of the largest specimen, with a size from a property.\npub(crate) fn vec_prop_specimen<T: LargestSpecimen, E: SizeEstimator>(\n    estimator: &E,\n    parameter_name: &'static str,\n    cache: &mut Cache,\n) -> Vec<T> {\n    let mut count = estimator.parameter(parameter_name);\n    if count < 0 {\n        count = 0;\n    }\n\n    vec_of_largest_specimen(estimator, count as usize, cache)\n}\n\n/// Generates a `BTreeMap` with the size taken from a property.\n///\n/// Keys are generated uniquely using `LargeUniqueSequence`, while values will be largest specimen.\npub(crate) fn btree_map_distinct_from_prop<K, V, E>(\n    estimator: &E,\n    parameter_name: &'static str,\n    cache: &mut Cache,\n) -> BTreeMap<K, V>\nwhere\n    V: LargestSpecimen,\n    K: Ord + LargeUniqueSequence<E> + Sized,\n    E: SizeEstimator,\n{\n    let mut count = estimator.parameter(parameter_name);\n    if count < 0 {\n        count = 0;\n    }\n\n    K::large_unique_sequence(estimator, count as usize, cache)\n        .into_iter()\n        .map(|key| (key, LargestSpecimen::largest_specimen(estimator, cache)))\n        .collect()\n}\n\n/// Generates a `BTreeSet` with the size taken from a property.\n///\n/// Value are generated uniquely using `LargeUniqueSequence`.\npub(crate) fn btree_set_distinct_from_prop<T, E>(\n    estimator: &E,\n    parameter_name: &'static str,\n    cache: &mut Cache,\n) -> BTreeSet<T>\nwhere\n    T: Ord + LargeUniqueSequence<E> + Sized,\n    E: SizeEstimator,\n{\n    let mut count = estimator.parameter(parameter_name);\n    if count < 0 {\n        count = 0;\n    }\n\n    T::large_unique_sequence(estimator, count as usize, cache)\n}\n\n/// Generates a `BTreeSet` with a given amount of items.\n///\n/// Value are generated uniquely using `LargeUniqueSequence`.\npub(crate) fn btree_set_distinct<T, E>(\n    estimator: &E,\n    count: usize,\n    cache: &mut Cache,\n) -> BTreeSet<T>\nwhere\n    T: Ord + LargeUniqueSequence<E> + Sized,\n    E: SizeEstimator,\n{\n    T::large_unique_sequence(estimator, count, cache)\n}\n\nimpl LargestSpecimen for SocketAddr {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        SocketAddr::V6(SocketAddrV6::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for SocketAddrV6 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        SocketAddrV6::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for Ipv6Addr {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        // Leading zeros get shorted, ensure there are none in the address.\n        Ipv6Addr::new(\n            0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,\n        )\n    }\n}\n\nimpl LargestSpecimen for bool {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        true\n    }\n}\n\nimpl LargestSpecimen for u8 {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        u8::MAX\n    }\n}\n\nimpl LargestSpecimen for u16 {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        u16::MAX\n    }\n}\n\nimpl LargestSpecimen for u32 {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        u32::MAX\n    }\n}\n\nimpl LargestSpecimen for u64 {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        u64::MAX\n    }\n}\n\nimpl LargestSpecimen for u128 {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        u128::MAX\n    }\n}\n\nimpl<T: LargestSpecimen + Copy, const N: usize> LargestSpecimen for [T; N] {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        [LargestSpecimen::largest_specimen(estimator, cache); N]\n    }\n}\n\nimpl<T> LargestSpecimen for Option<T>\nwhere\n    T: LargestSpecimen,\n{\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        Some(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl<T> LargestSpecimen for Box<T>\nwhere\n    T: LargestSpecimen,\n{\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        Box::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl<T> LargestSpecimen for Arc<T>\nwhere\n    T: LargestSpecimen,\n{\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        Arc::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl<T1, T2> LargestSpecimen for (T1, T2)\nwhere\n    T1: LargestSpecimen,\n    T2: LargestSpecimen,\n{\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        (\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl<T1, T2, T3> LargestSpecimen for (T1, T2, T3)\nwhere\n    T1: LargestSpecimen,\n    T2: LargestSpecimen,\n    T3: LargestSpecimen,\n{\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        (\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\n// Various third party crates.\n\nimpl<L, R> LargestSpecimen for Either<L, R>\nwhere\n    L: LargestSpecimen + Serialize,\n    R: LargestSpecimen + Serialize,\n{\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let l = L::largest_specimen(estimator, cache);\n        let r = R::largest_specimen(estimator, cache);\n\n        if estimator.estimate(&l) >= estimator.estimate(&r) {\n            Either::Left(l)\n        } else {\n            Either::Right(r)\n        }\n    }\n}\n\n// impls for `casper_types`, which is technically a foreign crate -- so we put them here.\nimpl LargestSpecimen for ProtocolVersion {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        ProtocolVersion::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for URef {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        URef::new(\n            [LargestSpecimen::largest_specimen(estimator, cache); 32],\n            AccessRights::READ_ADD_WRITE,\n        )\n    }\n}\n\nimpl LargestSpecimen for AccountHash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        AccountHash::new([LargestSpecimen::largest_specimen(estimator, cache); 32])\n    }\n}\n\nimpl LargestSpecimen for SemVer {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        SemVer {\n            major: LargestSpecimen::largest_specimen(estimator, cache),\n            minor: LargestSpecimen::largest_specimen(estimator, cache),\n            patch: LargestSpecimen::largest_specimen(estimator, cache),\n        }\n    }\n}\n\nimpl LargestSpecimen for PublicKey {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        PublicKey::large_unique_sequence(estimator, 1, cache)\n            .into_iter()\n            .next()\n            .unwrap()\n    }\n}\n\n// Dummy implementation to replace the buggy real one below:\nimpl<E> LargeUniqueSequence<E> for PublicKey\nwhere\n    E: SizeEstimator,\n{\n    fn large_unique_sequence(estimator: &E, count: usize, cache: &mut Cache) -> BTreeSet<Self> {\n        let data_vec = cache.get_all::<Self>();\n\n        /// Generates a secret key from a fixed, numbered seed.\n        fn generate_key<E: SizeEstimator>(estimator: &E, seed: usize) -> PublicKey {\n            // Like `Signature`, we do not wish to pollute the types crate here.\n            #[derive(Copy, Clone, Debug, EnumIter)]\n            enum PublicKeyDiscriminants {\n                System,\n                Ed25519,\n                Secp256k1,\n            }\n            largest_variant::<PublicKey, PublicKeyDiscriminants, _, _>(estimator, |variant| {\n                // We take advantage of two things here:\n                //\n                // 1. The required seed bytes for Ed25519 and Secp256k1 are both the same length of\n                //    32 bytes.\n                // 2. While Secp256k1 does not allow the most trivial seed bytes of 0x00..0001, a a\n                //    hash function output seems to satisfy it, and our current hashing scheme also\n                //    output 32 bytes.\n                let seed_bytes = Digest::hash(seed.to_be_bytes()).value();\n\n                match variant {\n                    PublicKeyDiscriminants::System => PublicKey::system(),\n                    PublicKeyDiscriminants::Ed25519 => {\n                        let ed25519_sec = SecretKey::ed25519_from_bytes(seed_bytes)\n                            .expect(\"unable to create ed25519 key from seed bytes\");\n                        PublicKey::from(&ed25519_sec)\n                    }\n                    PublicKeyDiscriminants::Secp256k1 => {\n                        let secp256k1_sec = SecretKey::secp256k1_from_bytes(seed_bytes)\n                            .expect(\"unable to create secp256k1 key from seed bytes\");\n                        PublicKey::from(&secp256k1_sec)\n                    }\n                }\n            })\n        }\n\n        while data_vec.len() < count {\n            let seed = data_vec.len();\n            let key = generate_key(estimator, seed);\n            data_vec.push(Box::new(key));\n        }\n\n        debug_assert!(data_vec.len() >= count);\n        let output_set: BTreeSet<Self> = data_vec[..count]\n            .iter()\n            .map(|item| item.downcast_ref::<Self>().expect(\"cache corrupted\"))\n            .cloned()\n            .collect();\n        debug_assert_eq!(output_set.len(), count);\n\n        output_set\n    }\n}\n\nimpl<E> LargeUniqueSequence<E> for Digest\nwhere\n    E: SizeEstimator,\n{\n    fn large_unique_sequence(_estimator: &E, count: usize, _cache: &mut Cache) -> BTreeSet<Self> {\n        (0..count).map(|n| Digest::hash(n.to_ne_bytes())).collect()\n    }\n}\n\nimpl LargestSpecimen for Signature {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        if let Some(item) = cache.get::<Self>() {\n            return *item;\n        }\n\n        // Note: We do not use strum generated discriminator enums for the signature, as we do not\n        //       want to make `strum` a direct dependency of `casper-types`, to keep its size down.\n        #[derive(Debug, Copy, Clone, EnumIter)]\n        enum SignatureDiscriminants {\n            System,\n            Ed25519,\n            Secp256k1,\n        }\n\n        *cache.set(largest_variant::<Self, SignatureDiscriminants, _, _>(\n            estimator,\n            |variant| match variant {\n                SignatureDiscriminants::System => Signature::system(),\n                SignatureDiscriminants::Ed25519 => {\n                    let ed25519_sec = &SecretKey::generate_ed25519().expect(\"a correct secret\");\n\n                    sign([0_u8], ed25519_sec, &ed25519_sec.into())\n                }\n                SignatureDiscriminants::Secp256k1 => {\n                    let secp256k1_sec = &SecretKey::generate_secp256k1().expect(\"a correct secret\");\n\n                    sign([0_u8], secp256k1_sec, &secp256k1_sec.into())\n                }\n            },\n        ))\n    }\n}\n\nimpl LargestSpecimen for EraId {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        EraId::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for Timestamp {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        const MAX_TIMESTAMP_HUMAN_READABLE: u64 = 253_402_300_799;\n        Timestamp::from(MAX_TIMESTAMP_HUMAN_READABLE)\n    }\n}\n\nimpl LargestSpecimen for TimeDiff {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        TimeDiff::from_millis(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for BlockHeaderV1 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        BlockHeaderV1::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            OnceCell::with_value(LargestSpecimen::largest_specimen(estimator, cache)),\n        )\n    }\n}\n\nimpl LargestSpecimen for BlockHeaderV2 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        BlockHeaderV2::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            OnceCell::with_value(LargestSpecimen::largest_specimen(estimator, cache)),\n        )\n    }\n}\n\nimpl LargestSpecimen for BlockHeader {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let v1 = BlockHeaderV1::largest_specimen(estimator, cache);\n        let v2 = BlockHeaderV2::largest_specimen(estimator, cache);\n\n        if estimator.estimate(&v1) > estimator.estimate(&v2) {\n            BlockHeader::V1(v1)\n        } else {\n            BlockHeader::V2(v2)\n        }\n    }\n}\n\n/// A wrapper around `BlockHeader` that implements `LargestSpecimen` without including the era\n/// end.\npub(crate) struct BlockHeaderWithoutEraEnd(BlockHeaderV2);\n\nimpl BlockHeaderWithoutEraEnd {\n    pub(crate) fn into_block_header(self) -> BlockHeader {\n        BlockHeader::V2(self.0)\n    }\n}\n\nimpl LargestSpecimen for BlockHeaderWithoutEraEnd {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        BlockHeaderWithoutEraEnd(BlockHeaderV2::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            None,\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            OnceCell::with_value(LargestSpecimen::largest_specimen(estimator, cache)),\n        ))\n    }\n}\n\nimpl LargestSpecimen for EraEndV1 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        EraEndV1::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            btree_map_distinct_from_prop(estimator, \"validator_count\", cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for EraEndV2 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let rewards = {\n            let count = estimator.parameter(\"validator_count\");\n\n            PublicKey::large_unique_sequence(estimator, count, cache)\n                .into_iter()\n                // at most two reward amounts per validator\n                .map(|key| (key, vec_of_largest_specimen(estimator, 2, cache)))\n                .collect()\n        };\n        EraEndV2::new(\n            vec_prop_specimen(estimator, \"validator_count\", cache),\n            vec_prop_specimen(estimator, \"validator_count\", cache),\n            btree_map_distinct_from_prop(estimator, \"validator_count\", cache),\n            rewards,\n            1u8,\n        )\n    }\n}\n\nimpl LargestSpecimen for InternalEraReport {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        InternalEraReport {\n            equivocators: vec_prop_specimen(estimator, \"validator_count\", cache),\n            inactive_validators: vec_prop_specimen(estimator, \"validator_count\", cache),\n        }\n    }\n}\n\nimpl LargestSpecimen for BlockHeaderWithSignatures {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        BlockHeaderWithSignatures::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for BlockSignatures {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let mut block_signatures = BlockSignaturesV2::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        );\n        let sigs = btree_map_distinct_from_prop(estimator, \"validator_count\", cache);\n        sigs.into_iter().for_each(|(public_key, sig)| {\n            block_signatures.insert_signature(public_key, sig);\n        });\n        BlockSignatures::V2(block_signatures)\n    }\n}\n\nimpl LargestSpecimen for BlockV2 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let mint_hashes = vec![\n            TransactionHash::largest_specimen(estimator, cache);\n            estimator.parameter::<usize>(\"max_mint_per_block\")\n        ];\n        let auction_hashes = vec![\n            TransactionHash::largest_specimen(estimator, cache);\n            estimator.parameter::<usize>(\"max_auctions_per_block\")\n        ];\n        let install_upgrade_hashes =\n            vec![\n                TransactionHash::largest_specimen(estimator, cache);\n                estimator.parameter::<usize>(\"max_install_upgrade_transactions_per_block\")\n            ];\n        let standard_hashes = vec![\n            TransactionHash::largest_specimen(estimator, cache);\n            estimator\n                .parameter::<usize>(\"max_standard_transactions_per_block\")\n        ];\n\n        let transactions = {\n            let mut ret = BTreeMap::new();\n            ret.insert(MINT_LANE_ID, mint_hashes);\n            ret.insert(AUCTION_LANE_ID, auction_hashes);\n            ret.insert(INSTALL_UPGRADE_LANE_ID, install_upgrade_hashes);\n            ret.insert(3, standard_hashes);\n            ret\n        };\n\n        BlockV2::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            transactions,\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for Block {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        Block::V2(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for FinalizedBlock {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        FinalizedBlock::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for FinalitySignature {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        FinalitySignature::V2(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for FinalitySignatureV2 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        FinalitySignatureV2::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for FinalitySignatureId {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        FinalitySignatureId::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for EraReport<PublicKey> {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        EraReport::new(\n            vec_prop_specimen(estimator, \"validator_count\", cache),\n            btree_map_distinct_from_prop(estimator, \"validator_count\", cache),\n            vec_prop_specimen(estimator, \"validator_count\", cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for BlockHash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        BlockHash::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for ChainNameDigest {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        // ChainNameDigest is fixed size by definition, so any value will do.\n        ChainNameDigest::from_chain_name(\"\")\n    }\n}\n\n// impls for `casper_hashing`, which is technically a foreign crate -- so we put them here.\nimpl LargestSpecimen for Digest {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        // Hashes are fixed size by definition, so any value will do.\n        Digest::hash(\"\")\n    }\n}\n\nimpl LargestSpecimen for BlockPayload {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        // We cannot just use the standard largest specimen for `TransactionHashWithApprovals`, as\n        // this would cause a quadratic increase in transactions. Instead, we generate one\n        // large transaction that contains the number of approvals if they are spread out\n        // across the block.\n\n        let large_txn = match Transaction::largest_specimen(estimator, cache) {\n            Transaction::Deploy(deploy) => {\n                Transaction::Deploy(deploy.with_approvals(btree_set_distinct_from_prop(\n                    estimator,\n                    \"average_approvals_per_transaction_in_block\",\n                    cache,\n                )))\n            }\n            Transaction::V1(v1) => {\n                Transaction::V1(v1.with_approvals(btree_set_distinct_from_prop(\n                    estimator,\n                    \"average_approvals_per_transaction_in_block\",\n                    cache,\n                )))\n            }\n        };\n\n        let large_txn_hash_with_approvals = (large_txn.hash(), large_txn.approvals());\n\n        let mut transactions = BTreeMap::new();\n        transactions.insert(\n            MINT_LANE_ID,\n            vec![\n                large_txn_hash_with_approvals.clone();\n                estimator.parameter::<usize>(\"max_mint_per_block\")\n            ],\n        );\n        transactions.insert(\n            AUCTION_LANE_ID,\n            vec![\n                large_txn_hash_with_approvals.clone();\n                estimator.parameter::<usize>(\"max_auctions_per_block\")\n            ],\n        );\n        transactions.insert(\n            LARGE_WASM_LANE_ID,\n            vec![\n                large_txn_hash_with_approvals.clone();\n                estimator.parameter::<usize>(\"max_standard_transactions_per_block\")\n            ],\n        );\n        transactions.insert(\n            INSTALL_UPGRADE_LANE_ID,\n            vec![\n                large_txn_hash_with_approvals;\n                estimator.parameter::<usize>(\"max_install_upgrade_transactions_per_block\")\n            ],\n        );\n\n        BlockPayload::new(\n            transactions,\n            vec_prop_specimen(estimator, \"max_accusations_per_block\", cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for RewardedSignatures {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        RewardedSignatures::new(\n            std::iter::repeat(LargestSpecimen::largest_specimen(estimator, cache))\n                .take(estimator.parameter(\"signature_rewards_max_delay\")),\n        )\n    }\n}\n\nimpl LargestSpecimen for SingleBlockRewardedSignatures {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, _cache: &mut Cache) -> Self {\n        SingleBlockRewardedSignatures::pack(\n            std::iter::repeat(1).take(estimator.parameter(\"validator_count\")),\n        )\n    }\n}\n\nimpl LargestSpecimen for DeployHash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        DeployHash::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for Approval {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        Approval::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl<E> LargeUniqueSequence<E> for Approval\nwhere\n    Self: Sized + Ord,\n    E: SizeEstimator,\n{\n    fn large_unique_sequence(estimator: &E, count: usize, cache: &mut Cache) -> BTreeSet<Self> {\n        PublicKey::large_unique_sequence(estimator, count, cache)\n            .into_iter()\n            .map(|public_key| {\n                Approval::new(\n                    public_key,\n                    LargestSpecimen::largest_specimen(estimator, cache),\n                )\n            })\n            .collect()\n    }\n}\n\nimpl LargestSpecimen for (TransactionHash, Option<BTreeSet<Approval>>) {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        // Note: This is an upper bound, the actual value is lower. We are keeping the order of\n        //       magnitude intact though.\n        let max_items = estimator.parameter::<usize>(\"max_transfers_per_block\")\n            + estimator.parameter::<usize>(\"max_standard_per_block\");\n\n        let transaction = (\n            TransactionHash::largest_specimen(estimator, cache),\n            Some(btree_set_distinct(estimator, max_items, cache)),\n        );\n        let v1 = (\n            TransactionHash::largest_specimen(estimator, cache),\n            Some(btree_set_distinct(estimator, max_items, cache)),\n        );\n\n        if estimator.estimate(&transaction) > estimator.estimate(&v1) {\n            transaction\n        } else {\n            v1\n        }\n    }\n}\n\nimpl LargestSpecimen for Deploy {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        // Note: Deploys have a maximum size enforced on their serialized representation. A deploy\n        //       generated here is guaranteed to exceed this maximum size due to the session code\n        //       being this maximum size already (see the [`LargestSpecimen`] implementation of\n        //       [`ExecutableDeployItem`]). For this reason, we leave `dependencies` and `payment`\n        //       small.\n        Deploy::new_signed(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            Default::default(), // See note.\n            largest_chain_name(estimator),\n            LargestSpecimen::largest_specimen(estimator, cache),\n            ExecutableDeployItem::Transfer {\n                args: Default::default(), // See note.\n            },\n            &LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for DeployId {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        DeployId::new(\n            LargestSpecimen::largest_specimen(estimator, cache),\n            LargestSpecimen::largest_specimen(estimator, cache),\n        )\n    }\n}\n\nimpl LargestSpecimen for ApprovalsHash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let deploy_ah = ApprovalsHash(LargestSpecimen::largest_specimen(estimator, cache));\n        let txn_v1_ah = ApprovalsHash(LargestSpecimen::largest_specimen(estimator, cache));\n\n        if estimator.estimate(&deploy_ah) >= estimator.estimate(&txn_v1_ah) {\n            deploy_ah\n        } else {\n            txn_v1_ah\n        }\n    }\n}\n\nimpl LargestSpecimen for TransactionV1Hash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        TransactionV1Hash::new(LargestSpecimen::largest_specimen(estimator, cache))\n    }\n}\n\nimpl LargestSpecimen for TransactionV1 {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        // See comment in `impl LargestSpecimen for ExecutableDeployItem` below for rationale here.\n        let max_size_with_margin =\n            estimator.parameter::<i32>(\"max_transaction_size\").max(0) as usize + 10 * 4;\n        TransactionV1Builder::new_session(\n            true,\n            Bytes::from(vec_of_largest_specimen(\n                estimator,\n                max_size_with_margin,\n                cache,\n            )),\n            TransactionRuntimeParams::VmCasperV1,\n        )\n        .with_secret_key(&LargestSpecimen::largest_specimen(estimator, cache))\n        .with_timestamp(LargestSpecimen::largest_specimen(estimator, cache))\n        .with_ttl(LargestSpecimen::largest_specimen(estimator, cache))\n        .with_chain_name(largest_chain_name(estimator))\n        .build()\n        .unwrap()\n    }\n}\n\nimpl LargestSpecimen for TransactionId {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let deploy_hash =\n            TransactionHash::Deploy(LargestSpecimen::largest_specimen(estimator, cache));\n        let v1_hash = TransactionHash::V1(LargestSpecimen::largest_specimen(estimator, cache));\n\n        let deploy = TransactionId::new(\n            deploy_hash,\n            LargestSpecimen::largest_specimen(estimator, cache),\n        );\n        let v1 = TransactionId::new(v1_hash, LargestSpecimen::largest_specimen(estimator, cache));\n\n        if estimator.estimate(&deploy) >= estimator.estimate(&v1) {\n            deploy\n        } else {\n            v1\n        }\n    }\n}\n\nimpl LargestSpecimen for Transaction {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let deploy = Transaction::Deploy(LargestSpecimen::largest_specimen(estimator, cache));\n        let v1 = Transaction::V1(LargestSpecimen::largest_specimen(estimator, cache));\n\n        if estimator.estimate(&deploy) >= estimator.estimate(&v1) {\n            deploy\n        } else {\n            v1\n        }\n    }\n}\n\nimpl LargestSpecimen for TransactionHash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let deploy_hash =\n            TransactionHash::Deploy(LargestSpecimen::largest_specimen(estimator, cache));\n        let v1_hash = TransactionHash::V1(LargestSpecimen::largest_specimen(estimator, cache));\n\n        if estimator.estimate(&deploy_hash) >= estimator.estimate(&v1_hash) {\n            deploy_hash\n        } else {\n            v1_hash\n        }\n    }\n}\n\n// EE impls\nimpl LargestSpecimen for ExecutableDeployItem {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        // `module_bytes` already blows this up to the maximum deploy size, so we use this variant\n        // as the largest always and don't need to fill in any args.\n        //\n        // However, this does not hold true for all encoding schemes: An inefficient encoding can\n        // easily, via `RuntimeArgs`, result in a much larger encoded size, e.g. when encoding an\n        // array of 1-byte elements in a format that uses string quoting and a delimiter to seperate\n        // elements.\n        //\n        // We compromise by not supporting encodings this inefficient and add 10 * a 32-bit integer\n        // as a safety margin for tags and length prefixes.\n        let max_size_with_margin =\n            estimator.parameter::<i32>(\"max_transaction_size\").max(0) as usize + 10 * 4;\n\n        ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::from(vec_of_largest_specimen(\n                estimator,\n                max_size_with_margin,\n                cache,\n            )),\n            args: RuntimeArgs::new(),\n        }\n    }\n}\n\nimpl LargestSpecimen for U512 {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        U512::max_value()\n    }\n}\n\nimpl LargestSpecimen for PackageHash {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        PackageHash::new([LargestSpecimen::largest_specimen(estimator, cache); KEY_HASH_LENGTH])\n    }\n}\n\nimpl LargestSpecimen for ChunkWithProof {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        ChunkWithProof::new(&[0xFF; Self::CHUNK_SIZE_BYTES], 0)\n            .expect(\"the chunk to be correctly created\")\n    }\n}\n\nimpl LargestSpecimen for SecretKey {\n    fn largest_specimen<E: SizeEstimator>(_estimator: &E, _cache: &mut Cache) -> Self {\n        SecretKey::ed25519_from_bytes([u8::MAX; 32]).expect(\"valid secret key bytes\")\n    }\n}\n\nimpl<T: LargestSpecimen> LargestSpecimen for ValidatorMap<T> {\n    fn largest_specimen<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Self {\n        let max_validators = estimator.parameter(\"validator_count\");\n\n        ValidatorMap::from_iter(\n            std::iter::repeat_with(|| LargestSpecimen::largest_specimen(estimator, cache))\n                .take(max_validators),\n        )\n    }\n}\n\n/// Returns the largest `Message::GetRequest`.\npub(crate) fn largest_get_request<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Message {\n    largest_variant::<Message, Tag, _, _>(estimator, |variant| {\n        match variant {\n            Tag::Transaction => Message::new_get_request::<Transaction>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::LegacyDeploy => Message::new_get_request::<LegacyDeploy>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::Block => Message::new_get_request::<Block>(&LargestSpecimen::largest_specimen(\n                estimator, cache,\n            )),\n            Tag::BlockHeader => Message::new_get_request::<BlockHeader>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::TrieOrChunk => Message::new_get_request::<TrieOrChunk>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::FinalitySignature => Message::new_get_request::<FinalitySignature>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::SyncLeap => Message::new_get_request::<SyncLeap>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::ApprovalsHashes => Message::new_get_request::<ApprovalsHashes>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::BlockExecutionResults => Message::new_get_request::<BlockExecutionResultsOrChunk>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n        }\n        .expect(\"did not expect new_get_request from largest deploy to fail\")\n    })\n}\n\n/// Returns the largest `Message::GetResponse`.\npub(crate) fn largest_get_response<E: SizeEstimator>(estimator: &E, cache: &mut Cache) -> Message {\n    largest_variant::<Message, Tag, _, _>(estimator, |variant| {\n        match variant {\n            Tag::Transaction => Message::new_get_response::<Transaction>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::LegacyDeploy => Message::new_get_response::<LegacyDeploy>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::Block => Message::new_get_response::<Block>(&LargestSpecimen::largest_specimen(\n                estimator, cache,\n            )),\n            Tag::BlockHeader => Message::new_get_response::<BlockHeader>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::TrieOrChunk => Message::new_get_response::<TrieOrChunk>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::FinalitySignature => Message::new_get_response::<FinalitySignature>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::SyncLeap => Message::new_get_response::<SyncLeap>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::ApprovalsHashes => Message::new_get_response::<ApprovalsHashes>(\n                &LargestSpecimen::largest_specimen(estimator, cache),\n            ),\n            Tag::BlockExecutionResults => {\n                Message::new_get_response::<BlockExecutionResultsOrChunk>(\n                    &LargestSpecimen::largest_specimen(estimator, cache),\n                )\n            }\n        }\n        .expect(\"did not expect new_get_response from largest deploy to fail\")\n    })\n}\n\n/// Returns the largest string allowed for a chain name.\nfn largest_chain_name<E: SizeEstimator>(estimator: &E) -> String {\n    string_max_characters(estimator.parameter(\"network_name_limit\"))\n}\n\n/// Returns a string with `len`s characters of the largest possible size.\nfn string_max_characters(max_char: usize) -> String {\n    std::iter::repeat(HIGHEST_UNICODE_CODEPOINT)\n        .take(max_char)\n        .collect()\n}\n\n/// Returns the max rounds per era with the specimen parameters.\n///\n/// See the [`max_rounds_per_era`] function.\npub(crate) fn estimator_max_rounds_per_era(estimator: &impl SizeEstimator) -> usize {\n    let minimum_era_height = estimator.parameter(\"minimum_era_height\");\n    let era_duration_ms = TimeDiff::from_millis(estimator.parameter(\"era_duration_ms\"));\n    let minimum_round_length_ms =\n        TimeDiff::from_millis(estimator.parameter(\"minimum_round_length_ms\"));\n\n    max_rounds_per_era(minimum_era_height, era_duration_ms, minimum_round_length_ms)\n        .try_into()\n        .expect(\"to be a valid `usize`\")\n}\n\n#[cfg(test)]\nmod tests {\n    use super::Cache;\n\n    #[test]\n    fn memoization_cache_simple() {\n        let mut cache = Cache::default();\n\n        assert!(cache.get::<u32>().is_none());\n        assert!(cache.get::<String>().is_none());\n\n        cache.set::<u32>(1234);\n        assert_eq!(cache.get::<u32>(), Some(&1234));\n\n        cache.set::<String>(\"a string is not copy\".to_owned());\n        assert_eq!(\n            cache.get::<String>().map(String::as_str),\n            Some(\"a string is not copy\")\n        );\n        assert_eq!(cache.get::<u32>(), Some(&1234));\n\n        cache.set::<String>(\"this should not overwrite\".to_owned());\n        assert_eq!(\n            cache.get::<String>().map(String::as_str),\n            Some(\"a string is not copy\")\n        );\n    }\n}\n"
  },
  {
    "path": "node/src/utils/umask.rs",
    "content": "//! Umask setting functions.\n\n/// File mode.\npub(crate) type Mode = libc::mode_t;\n\n/// Set the umask to `new_mode`, returning the current mode.\nfn umask(new_mode: Mode) -> Mode {\n    // `umask` is always successful (according to the manpage), so there is no error condition to\n    // check.\n    unsafe { libc::umask(new_mode) }\n}\n\n/// Sets a new umask, returning a guard that will restore the current umask when dropped.\npub(crate) fn temp_umask(new_mode: Mode) -> UmaskGuard {\n    let prev = umask(new_mode);\n    UmaskGuard { prev }\n}\n\n/// Guard for umask, will restore the contained umask on drop.\n#[derive(Clone, Debug)]\npub(crate) struct UmaskGuard {\n    /// The mode stored in the guard.\n    prev: Mode,\n}\n\nimpl Drop for UmaskGuard {\n    fn drop(&mut self) {\n        umask(self.prev);\n    }\n}\n"
  },
  {
    "path": "node/src/utils/work_queue.rs",
    "content": "//! Work queue for finite work.\n//!\n//! A queue that allows for processing a variable amount of work that may spawn more jobs, but is\n//! expected to finish eventually.\n\nuse std::{\n    collections::VecDeque,\n    sync::{Arc, Mutex},\n};\n\nuse futures::{stream, Stream};\nuse tokio::sync::Notify;\n\n/// Multi-producer, multi-consumer async job queue with end conditions.\n///\n/// Keeps track of in-progress jobs and can indicate to workers that all work has been finished.\n/// Intended to be used for jobs that will spawn other jobs during processing, but stop once all\n/// jobs have finished.\n///\n/// # Example use\n///\n/// ```rust\n/// #![allow(non_snake_case)]\n/// # use std::{sync::Arc, time::Duration};\n/// #\n/// # use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};\n/// #\n/// # use casper_node::utils::work_queue::WorkQueue;\n/// #\n/// type DemoJob = (&'static str, usize);\n///\n/// /// Job processing function.\n/// ///\n/// /// For a given job `(name, n)`, returns two jobs with `n = n - 1`, unless `n == 0`.\n/// async fn process_job(job: DemoJob) -> Vec<DemoJob> {\n///     tokio::time::sleep(Duration::from_millis(25)).await;\n///\n///     let (tag, n) = job;\n///\n///     if n == 0 {\n///         Vec::new()\n///     } else {\n///         vec![(tag, n - 1), (tag, n - 1)]\n///     }\n/// }\n///\n/// /// Job-processing worker.\n/// ///\n/// /// `id` is the worker ID for logging.\n/// async fn worker(id: usize, q: Arc<WorkQueue<DemoJob>>) {\n///     println!(\"worker {}: init\", id);\n///\n///     while let Some(job) = q.next_job().await {\n///         println!(\"worker {}: start job {:?}\", id, job.inner());\n///         for new_job in process_job(job.inner().clone()).await {\n///             q.push_job(new_job);\n///         }\n///         println!(\"worker {}: finish job {:?}\", id, job.inner());\n///     }\n///\n///     println!(\"worker {}: shutting down\", id);\n/// }\n///\n/// const WORKER_COUNT: usize = 3;\n/// #\n/// # async fn test_func() {\n/// let q = Arc::new(WorkQueue::default());\n/// q.push_job((\"A\", 3));\n///\n/// let workers: FuturesUnordered<_> = (0..WORKER_COUNT).map(|id| worker(id, q.clone())).collect();\n///\n/// // Wait for all workers to finish.\n/// workers.for_each(|_| async move {}).await;\n/// # }\n/// # let rt = tokio::runtime::Runtime::new().unwrap();\n/// # let handle = rt.handle();\n/// # handle.block_on(test_func());\n/// ```\n#[derive(Debug)]\npub struct WorkQueue<T> {\n    /// Inner workings of the queue.\n    inner: Mutex<QueueInner<T>>,\n    /// Notifier for waiting tasks.\n    notify: Notify,\n}\n\n/// Queue inner state.\n#[derive(Debug)]\nstruct QueueInner<T> {\n    /// Jobs currently in the queue.\n    jobs: VecDeque<T>,\n    /// Number of jobs that have been popped from the queue using `next_job` but not finished.\n    in_progress: usize,\n}\n\n// Manual default implementation, since the derivation would require a `T: Default` trait bound.\nimpl<T> Default for WorkQueue<T> {\n    fn default() -> Self {\n        Self {\n            inner: Default::default(),\n            notify: Default::default(),\n        }\n    }\n}\n\nimpl<T> Default for QueueInner<T> {\n    fn default() -> Self {\n        Self {\n            jobs: Default::default(),\n            in_progress: Default::default(),\n        }\n    }\n}\n\nimpl<T> WorkQueue<T> {\n    /// Pop a job from the queue.\n    ///\n    /// If there is a job in the queue, returns the job and increases the internal in progress\n    /// counter by one.\n    ///\n    /// If there are still jobs in progress, but none queued, waits until either of these conditions\n    /// changes, then retries.\n    ///\n    /// If there are no jobs available and no jobs in progress, returns `None`.\n    pub async fn next_job(self: &Arc<Self>) -> Option<JobHandle<T>> {\n        loop {\n            let waiting;\n            {\n                let mut inner = self.inner.lock().expect(\"lock poisoned\");\n                match inner.jobs.pop_front() {\n                    Some(job) => {\n                        // We got a job, increase the `in_progress` count and return.\n                        inner.in_progress += 1;\n                        return Some(JobHandle {\n                            job,\n                            queue: self.clone(),\n                        });\n                    }\n                    None => {\n                        // No job found. Check if we are completely done.\n                        if inner.in_progress == 0 {\n                            // No more jobs, no jobs in progress. We are done!\n                            return None;\n                        }\n\n                        // Otherwise, we have to wait.\n                        waiting = self.notify.notified();\n                    }\n                }\n            }\n\n            // Note: Any notification sent while executing this segment (after the guard has been\n            // dropped, but before `waiting.await` has been entered) will still be picked up by\n            // `waiting.await`, as the call to `notified()` marks the beginning of the waiting\n            // period, not `waiting.await`. See `tests::notification_assumption_holds`.\n\n            // After freeing the lock, wait for a new job to arrive or be finished.\n            waiting.await;\n        }\n    }\n\n    /// Pushes a job onto the queue.\n    ///\n    /// If there are any worker waiting on `next_job`, one of them will receive the job.\n    pub fn push_job(&self, job: T) {\n        let mut inner = self.inner.lock().expect(\"lock poisoned\");\n\n        inner.jobs.push_back(job);\n        self.notify.notify_waiters();\n    }\n\n    /// Returns the number of jobs in the queue.\n    pub fn num_jobs(&self) -> usize {\n        self.inner.lock().expect(\"lock poisoned\").jobs.len()\n    }\n\n    /// Creates a streaming consumer of the work queue.\n    #[inline]\n    pub fn to_stream(self: Arc<Self>) -> impl Stream<Item = JobHandle<T>> {\n        stream::unfold(self, |work_queue| async move {\n            let next = work_queue.next_job().await;\n            next.map(|handle| (handle, work_queue))\n        })\n    }\n\n    /// Mark job completion.\n    ///\n    /// This is an internal function to be used by `JobHandle`, which locks the internal queue and\n    /// decreases the in-progress count by one.\n    fn complete_job(&self) {\n        let mut inner = self.inner.lock().expect(\"lock poisoned\");\n\n        inner.in_progress -= 1;\n        self.notify.notify_waiters();\n    }\n}\n\n/// Handle containing a job.\n///\n/// Holds a job popped from the job queue.\n///\n/// The job will be considered completed once `JobHandle` has been dropped.\n#[derive(Debug)]\npub struct JobHandle<T> {\n    /// The protected job.\n    job: T,\n    /// Queue job was removed from.\n    queue: Arc<WorkQueue<T>>,\n}\n\nimpl<T> JobHandle<T> {\n    /// Returns a reference to the inner job.\n    pub fn inner(&self) -> &T {\n        &self.job\n    }\n}\n\nimpl<T> Drop for JobHandle<T> {\n    fn drop(&mut self) {\n        self.queue.complete_job();\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{\n        sync::{\n            atomic::{AtomicU32, Ordering},\n            Arc,\n        },\n        time::Duration,\n    };\n\n    use futures::{FutureExt, StreamExt};\n    use tokio::sync::Notify;\n\n    use super::WorkQueue;\n\n    #[derive(Debug)]\n    struct TestJob(u32);\n\n    // Verify that the assumption made about `Notification` -- namely that a call to `notified()` is\n    // enough to \"register\" the waiter -- holds.\n    #[test]\n    fn notification_assumption_holds() {\n        let not = Notify::new();\n\n        // First attempt to await a notification, should return pending.\n        assert!(not.notified().now_or_never().is_none());\n\n        // Second, we notify, then try notification again. Should also return pending, as we were\n        // \"not around\" when the notification happened.\n        not.notify_waiters();\n        assert!(not.notified().now_or_never().is_none());\n\n        // Finally, we \"register\" for notification beforehand.\n        let waiter = not.notified();\n        not.notify_waiters();\n        assert!(waiter.now_or_never().is_some());\n    }\n\n    /// Process a job, sleeping a short amout of time on every 5th job.\n    async fn job_worker_simple(queue: Arc<WorkQueue<TestJob>>, sum: Arc<AtomicU32>) {\n        while let Some(job) = queue.next_job().await {\n            if job.inner().0 % 5 == 0 {\n                tokio::time::sleep(Duration::from_millis(50)).await;\n            }\n\n            sum.fetch_add(job.inner().0, Ordering::SeqCst);\n        }\n    }\n\n    /// Process a job, sleeping a short amount of time on every job.\n    ///\n    /// Spawns two additional jobs for every job processed, decreasing the job number until reaching\n    /// zero.\n    async fn job_worker_binary(queue: Arc<WorkQueue<TestJob>>, sum: Arc<AtomicU32>) {\n        while let Some(job) = queue.next_job().await {\n            tokio::time::sleep(Duration::from_millis(10)).await;\n\n            sum.fetch_add(job.inner().0, Ordering::SeqCst);\n\n            if job.inner().0 > 0 {\n                queue.push_job(TestJob(job.inner().0 - 1));\n                queue.push_job(TestJob(job.inner().0 - 1));\n            }\n        }\n    }\n\n    #[tokio::test]\n    async fn empty_queue_exits_immediately() {\n        let q: Arc<WorkQueue<TestJob>> = Arc::new(Default::default());\n        assert!(q.next_job().await.is_none());\n    }\n\n    #[tokio::test]\n    async fn large_front_loaded_queue_terminates() {\n        let num_jobs = 1_000;\n        let q: Arc<WorkQueue<TestJob>> = Arc::new(Default::default());\n        for job in (0..num_jobs).map(TestJob) {\n            q.push_job(job);\n        }\n\n        let mut workers = Vec::new();\n        let output = Arc::new(AtomicU32::new(0));\n        for _ in 0..3 {\n            workers.push(tokio::spawn(job_worker_simple(q.clone(), output.clone())));\n        }\n\n        // We use a different pattern for waiting here, see the doctest for a solution that does not\n        // spawn.\n        for worker in workers {\n            worker.await.expect(\"task panicked\");\n        }\n\n        let expected_total = (num_jobs * (num_jobs - 1)) / 2;\n        assert_eq!(output.load(Ordering::SeqCst), expected_total);\n    }\n\n    #[tokio::test]\n    async fn stream_interface_works() {\n        let num_jobs = 1_000;\n        let q: Arc<WorkQueue<TestJob>> = Arc::new(Default::default());\n        for job in (0..num_jobs).map(TestJob) {\n            q.push_job(job);\n        }\n\n        let mut current = 0;\n        let mut stream = Box::pin(q.to_stream());\n        while let Some(job) = stream.next().await {\n            assert_eq!(job.inner().0, current);\n            current += 1;\n        }\n    }\n\n    #[tokio::test]\n    async fn complex_queue_terminates() {\n        let num_jobs = 5;\n        let q: Arc<WorkQueue<TestJob>> = Arc::new(Default::default());\n        for _ in 0..num_jobs {\n            q.push_job(TestJob(num_jobs));\n        }\n\n        let mut workers = Vec::new();\n        let output = Arc::new(AtomicU32::new(0));\n        for _ in 0..3 {\n            workers.push(tokio::spawn(job_worker_binary(q.clone(), output.clone())));\n        }\n\n        // We use a different pattern for waiting here, see the doctest for a solution that does not\n        // spawn.\n        for worker in workers {\n            worker.await.expect(\"task panicked\");\n        }\n\n        // A single job starting at `k` will add `SUM_{n=0}^{k} (k-n) * 2^n`, which is\n        // 57 for `k=5`. We start 5 jobs, so we expect `5 * 57 = 285` to be the result.\n        let expected_total = 285;\n        assert_eq!(output.load(Ordering::SeqCst), expected_total);\n    }\n}\n"
  },
  {
    "path": "node/src/utils.rs",
    "content": "//! Various functions that are not limited to a particular module, but are too small to warrant\n//! being factored out into standalone crates.\n\nmod block_signatures;\npub(crate) mod chain_specification;\npub(crate) mod config_specification;\nmod display_error;\npub(crate) mod ds;\nmod external;\npub(crate) mod fmt_limit;\npub(crate) mod opt_display;\n#[cfg(target_os = \"linux\")]\npub(crate) mod rlimit;\npub(crate) mod round_robin;\npub(crate) mod specimen;\npub(crate) mod umask;\npub mod work_queue;\n\nuse std::{\n    fmt::{self, Debug, Display, Formatter},\n    io,\n    net::{SocketAddr, ToSocketAddrs},\n    ops::{Add, BitXorAssign, Div},\n    path::{Path, PathBuf},\n    sync::atomic::{AtomicBool, Ordering},\n    time::{Instant, SystemTime},\n};\n\n#[cfg(test)]\nuse std::{any, sync::Arc, time::Duration};\n\nuse datasize::DataSize;\nuse hyper::server::{conn::AddrIncoming, Builder, Server};\n#[cfg(test)]\nuse once_cell::sync::Lazy;\nuse prometheus::{self, Histogram, HistogramOpts, Registry};\nuse serde::Serialize;\nuse thiserror::Error;\nuse tracing::{error, warn};\n\nuse crate::types::NodeId;\npub(crate) use block_signatures::{check_sufficient_block_signatures, BlockSignatureError};\npub(crate) use display_error::display_error;\n#[cfg(test)]\npub(crate) use external::RESOURCES_PATH;\npub use external::{External, LoadError, Loadable};\npub(crate) use round_robin::WeightedRoundRobin;\n\n/// DNS resolution error.\n#[derive(Debug, Error)]\n#[error(\"could not resolve `{address}`: {kind}\")]\npub struct ResolveAddressError {\n    /// Address that failed to resolve.\n    address: String,\n    /// Reason for resolution failure.\n    kind: ResolveAddressErrorKind,\n}\n\n/// DNS resolution error kind.\n#[derive(Debug)]\nenum ResolveAddressErrorKind {\n    /// Resolve returned an error.\n    ErrorResolving(io::Error),\n    /// Resolution did not yield any address.\n    NoAddressFound,\n}\n\nimpl Display for ResolveAddressErrorKind {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ResolveAddressErrorKind::ErrorResolving(err) => {\n                write!(f, \"could not run dns resolution: {}\", err)\n            }\n            ResolveAddressErrorKind::NoAddressFound => {\n                write!(f, \"no addresses found\")\n            }\n        }\n    }\n}\n\n/// Backport of `Result::flatten`, see <https://github.com/rust-lang/rust/issues/70142>.\npub trait FlattenResult {\n    /// The output of the flattening operation.\n    type Output;\n\n    /// Flattens one level.\n    ///\n    /// This function is named `flatten_result` instead of `flatten` to avoid name collisions once\n    /// `Result::flatten` stabilizes.\n    fn flatten_result(self) -> Self::Output;\n}\n\nimpl<T, E> FlattenResult for Result<Result<T, E>, E> {\n    type Output = Result<T, E>;\n\n    #[inline]\n    fn flatten_result(self) -> Self::Output {\n        match self {\n            Ok(Ok(v)) => Ok(v),\n            Ok(Err(e)) => Err(e),\n            Err(e) => Err(e),\n        }\n    }\n}\n\n/// Parses a network address from a string, with DNS resolution.\npub(crate) fn resolve_address(address: &str) -> Result<SocketAddr, ResolveAddressError> {\n    address\n        .to_socket_addrs()\n        .map_err(|err| ResolveAddressError {\n            address: address.to_string(),\n            kind: ResolveAddressErrorKind::ErrorResolving(err),\n        })?\n        .next()\n        .ok_or_else(|| ResolveAddressError {\n            address: address.to_string(),\n            kind: ResolveAddressErrorKind::NoAddressFound,\n        })\n}\n\n/// An error starting one of the HTTP servers.\n#[derive(Debug, Error)]\npub(crate) enum ListeningError {\n    /// Failed to resolve address.\n    #[error(\"failed to resolve network address: {0}\")]\n    ResolveAddress(ResolveAddressError),\n\n    /// Failed to listen.\n    #[error(\"failed to listen on {address}: {error}\")]\n    Listen {\n        /// The address attempted to listen on.\n        address: SocketAddr,\n        /// The failure reason.\n        error: Box<dyn std::error::Error + Send + Sync>,\n    },\n}\n\npub(crate) fn start_listening(address: &str) -> Result<Builder<AddrIncoming>, ListeningError> {\n    let address = resolve_address(address).map_err(|error| {\n        warn!(%error, %address, \"failed to start HTTP server, cannot parse address\");\n        ListeningError::ResolveAddress(error)\n    })?;\n\n    Server::try_bind(&address).map_err(|error| {\n        warn!(%error, %address, \"failed to start HTTP server\");\n        ListeningError::Listen {\n            address,\n            error: Box::new(error),\n        }\n    })\n}\n\n/// Moves a value to the heap and then forgets about, leaving only a static reference behind.\n#[inline]\npub(crate) fn leak<T>(value: T) -> &'static T {\n    Box::leak(Box::new(value))\n}\n\n/// A flag shared across multiple subsystem.\n#[derive(Copy, Clone, DataSize, Debug)]\npub(crate) struct SharedFlag(&'static AtomicBool);\n\nimpl SharedFlag {\n    /// Creates a new shared flag.\n    ///\n    /// The flag is initially not set.\n    pub(crate) fn new() -> Self {\n        SharedFlag(leak(AtomicBool::new(false)))\n    }\n\n    /// Checks whether the flag is set.\n    pub(crate) fn is_set(self) -> bool {\n        self.0.load(Ordering::SeqCst)\n    }\n\n    /// Set the flag.\n    pub(crate) fn set(self) {\n        self.0.store(true, Ordering::SeqCst);\n    }\n\n    /// Returns a shared instance of the flag for testing.\n    ///\n    /// The returned flag should **never** have `set` be called upon it.\n    #[cfg(test)]\n    pub(crate) fn global_shared() -> Self {\n        static SHARED_FLAG: Lazy<SharedFlag> = Lazy::new(SharedFlag::new);\n\n        *SHARED_FLAG\n    }\n}\n\nimpl Default for SharedFlag {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\n/// With-directory context.\n///\n/// Associates a type with a \"working directory\".\n#[derive(Clone, DataSize, Debug)]\npub struct WithDir<T> {\n    dir: PathBuf,\n    value: T,\n}\n\nimpl<T> WithDir<T> {\n    /// Creates a new with-directory context.\n    pub fn new<P: Into<PathBuf>>(path: P, value: T) -> Self {\n        WithDir {\n            dir: path.into(),\n            value,\n        }\n    }\n\n    /// Returns a reference to the inner path.\n    pub fn dir(&self) -> &Path {\n        self.dir.as_ref()\n    }\n\n    /// Deconstructs a with-directory context.\n    pub(crate) fn into_parts(self) -> (PathBuf, T) {\n        (self.dir, self.value)\n    }\n\n    /// Maps an internal value onto a reference.\n    pub fn map_ref<U, F: FnOnce(&T) -> U>(&self, f: F) -> WithDir<U> {\n        WithDir {\n            dir: self.dir.clone(),\n            value: f(&self.value),\n        }\n    }\n\n    /// Get a reference to the inner value.\n    pub fn value(&self) -> &T {\n        &self.value\n    }\n\n    /// Get a mutable reference to the inner value.\n    pub fn value_mut(&mut self) -> &mut T {\n        &mut self.value\n    }\n\n    /// Adds `self.dir` as a parent if `path` is relative, otherwise returns `path` unchanged.\n    pub fn with_dir(&self, path: PathBuf) -> PathBuf {\n        if path.is_relative() {\n            self.dir.join(path)\n        } else {\n            path\n        }\n    }\n}\n\n/// The source of a piece of data.\n#[derive(Clone, Debug, Serialize)]\npub(crate) enum Source {\n    /// A peer with the wrapped ID.\n    PeerGossiped(NodeId),\n    /// A peer with the wrapped ID.\n    Peer(NodeId),\n    /// A client.\n    Client,\n    /// A client via the speculative_exec server.\n    SpeculativeExec,\n    /// This node.\n    Ourself,\n}\n\nimpl Source {\n    #[allow(clippy::wrong_self_convention)]\n    pub(crate) fn is_client(&self) -> bool {\n        match self {\n            Source::Client | Source::SpeculativeExec => true,\n            Source::PeerGossiped(_) | Source::Peer(_) | Source::Ourself => false,\n        }\n    }\n\n    /// If `self` represents a peer, returns its ID, otherwise returns `None`.\n    pub(crate) fn node_id(&self) -> Option<NodeId> {\n        match self {\n            Source::Peer(node_id) | Source::PeerGossiped(node_id) => Some(*node_id),\n            Source::Client | Source::SpeculativeExec | Source::Ourself => None,\n        }\n    }\n}\n\nimpl Display for Source {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Source::PeerGossiped(node_id) | Source::Peer(node_id) => {\n                Display::fmt(node_id, formatter)\n            }\n            Source::Client => write!(formatter, \"client\"),\n            Source::SpeculativeExec => write!(formatter, \"client (speculative exec)\"),\n            Source::Ourself => write!(formatter, \"ourself\"),\n        }\n    }\n}\n\n/// Divides `numerator` by `denominator` and rounds to the closest integer (round half down).\n///\n/// `numerator + denominator / 2` must not overflow, and `denominator` must not be zero.\npub(crate) fn div_round<T>(numerator: T, denominator: T) -> T\nwhere\n    T: Add<Output = T> + Div<Output = T> + From<u8> + Copy,\n{\n    (numerator + denominator / T::from(2)) / denominator\n}\n\n/// Creates a prometheus Histogram and registers it.\npub(crate) fn register_histogram_metric(\n    registry: &Registry,\n    metric_name: &str,\n    metric_help: &str,\n    buckets: Vec<f64>,\n) -> Result<Histogram, prometheus::Error> {\n    let histogram_opts = HistogramOpts::new(metric_name, metric_help).buckets(buckets);\n    let histogram = Histogram::with_opts(histogram_opts)?;\n    registry.register(Box::new(histogram.clone()))?;\n    Ok(histogram)\n}\n\n/// Unregisters a metric from the Prometheus registry.\n#[macro_export]\nmacro_rules! unregister_metric {\n    ($registry:expr, $metric:expr) => {\n        $registry\n            .unregister(Box::new($metric.clone()))\n            .unwrap_or_else(|_| {\n                tracing::error!(\n                    \"unregistering {} failed: was not registered\",\n                    stringify!($metric)\n                )\n            });\n    };\n}\n\n/// XORs two byte sequences.\n///\n/// # Panics\n///\n/// Panics if `lhs` and `rhs` are not of equal length.\n#[inline]\npub(crate) fn xor(lhs: &mut [u8], rhs: &[u8]) {\n    // Implementing SIMD support is left as an exercise for the reader.\n    assert_eq!(lhs.len(), rhs.len(), \"xor inputs should have equal length\");\n    lhs.iter_mut()\n        .zip(rhs.iter())\n        .for_each(|(sb, &cb)| sb.bitxor_assign(cb));\n}\n\n/// Wait until all strong references for a particular arc have been dropped.\n///\n/// Downgrades and immediately drops the `Arc`, keeping only a weak reference. The reference will\n/// then be polled `attempts` times, unless it has a strong reference count of 0.\n///\n/// Returns whether or not `arc` has zero strong references left.\n///\n/// # Note\n///\n/// Using this function is usually a potential architectural issue and it should be used very\n/// sparingly. Consider introducing a different access pattern for the value under `Arc`.\n#[cfg(test)]\npub(crate) async fn wait_for_arc_drop<T>(\n    arc: Arc<T>,\n    attempts: usize,\n    retry_delay: Duration,\n) -> bool {\n    // Ensure that if we do hold the last reference, we are now going to 0.\n    let weak = Arc::downgrade(&arc);\n    drop(arc);\n\n    for _ in 0..attempts {\n        let strong_count = weak.strong_count();\n\n        if strong_count == 0 {\n            // Everything has been dropped, we are done.\n            return true;\n        }\n\n        tokio::time::sleep(retry_delay).await;\n    }\n\n    error!(\n        attempts, ?retry_delay, ty=%any::type_name::<T>(),\n        \"failed to clean up shared reference\"\n    );\n\n    false\n}\n\n/// An anchor for converting an `Instant` into a wall-clock (`SystemTime`) time.\n#[derive(Copy, Clone, Debug)]\npub(crate) struct TimeAnchor {\n    /// The reference instant used for conversion.\n    now: Instant,\n    /// The reference wall-clock timestamp used for conversion.\n    wall_clock_now: SystemTime,\n}\n\nimpl TimeAnchor {\n    /// Creates a new time anchor.\n    ///\n    /// Will take a sample of the monotonic clock and the current time and store it in the anchor.\n    pub(crate) fn now() -> Self {\n        TimeAnchor {\n            now: Instant::now(),\n            wall_clock_now: SystemTime::now(),\n        }\n    }\n\n    /// Converts a point in time from the monotonic clock to wall clock time, using this anchor.\n    #[inline]\n    pub(crate) fn convert(&self, then: Instant) -> SystemTime {\n        if then > self.now {\n            self.wall_clock_now + then.duration_since(self.now)\n        } else {\n            self.wall_clock_now - self.now.duration_since(then)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{sync::Arc, time::Duration};\n\n    use crate::utils::SharedFlag;\n\n    use super::{wait_for_arc_drop, xor};\n\n    #[test]\n    fn xor_works() {\n        let mut lhs = [0x43, 0x53, 0xf2, 0x2f, 0xa9, 0x70, 0xfb, 0xf4];\n        let rhs = [0x04, 0x0b, 0x5c, 0xa1, 0xef, 0x11, 0x12, 0x23];\n        let xor_result = [0x47, 0x58, 0xae, 0x8e, 0x46, 0x61, 0xe9, 0xd7];\n\n        xor(&mut lhs, &rhs);\n\n        assert_eq!(lhs, xor_result);\n    }\n\n    #[test]\n    #[should_panic(expected = \"equal length\")]\n    fn xor_panics_on_uneven_inputs() {\n        let mut lhs = [0x43, 0x53, 0xf2, 0x2f, 0xa9, 0x70, 0xfb, 0xf4];\n        let rhs = [0x04, 0x0b, 0x5c, 0xa1, 0xef, 0x11];\n\n        xor(&mut lhs, &rhs);\n    }\n\n    #[tokio::test]\n    async fn arc_drop_waits_for_drop() {\n        let retry_delay = Duration::from_millis(25);\n        let attempts = 15;\n\n        let arc = Arc::new(());\n\n        let arc_in_background = arc.clone();\n        let _weak_in_background = Arc::downgrade(&arc);\n\n        // At this point, the Arc has the following refernces:\n        //\n        // * main test task (`arc`, strong)\n        // * background strong reference (`arc_in_background`)\n        // * background weak reference (`weak_in_background`)\n\n        // Phase 1: waiting for the arc should fail, because there still is the background\n        // reference.\n        assert!(!wait_for_arc_drop(arc, attempts, retry_delay).await);\n\n        // We \"restore\" the arc from the background arc.\n        let arc = arc_in_background.clone();\n\n        // Add another \"foreground\" weak reference.\n        let weak = Arc::downgrade(&arc);\n\n        // Phase 2: Our background tasks drops its reference, now we should succeed.\n        drop(arc_in_background);\n        assert!(wait_for_arc_drop(arc, attempts, retry_delay).await);\n\n        // Immedetialy after, we should not be able to obtain a strong reference anymore.\n        // This test fails only if we have a race condition, so false positive tests are possible.\n        assert!(weak.upgrade().is_none());\n    }\n\n    #[test]\n    fn shared_flag_sanity_check() {\n        let flag = SharedFlag::new();\n        let copied = flag;\n\n        assert!(!flag.is_set());\n        assert!(!copied.is_set());\n        assert!(!flag.is_set());\n        assert!(!copied.is_set());\n\n        flag.set();\n\n        assert!(flag.is_set());\n        assert!(copied.is_set());\n        assert!(flag.is_set());\n        assert!(copied.is_set());\n    }\n}\n"
  },
  {
    "path": "resources/README.md",
    "content": "# resources\n\nResources artifacts for node-rs.\n\n## categories\n\n* test: resources used by test fixtures\n* production: resources used by released software\n* local: resources used when running a node outside of a production environment; most typically used when running a node locally.\n"
  },
  {
    "path": "resources/integration-test/accounts.toml",
    "content": "[[accounts]]\npublic_key = \"01054c929d687267a30341c759b4a9cab1238cb2de65546be43dad479c50745724\"\nbalance = \"9999500000000000000\"\n\n[[accounts]]\npublic_key = \"011d86fcc3e438fcb47d4d9af77e9db97ca1c322c3e87d5a4ea6f3386b9ddcd6ed\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"100000000000000\"\ndelegation_rate = 10\n\n[[accounts]]\npublic_key = \"0115c9b40c06ff99b0cbadf1140b061b5dbf92103e66a6330fbcc7768f5219c1ce\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"100000000000000\"\ndelegation_rate = 10\n\n[[accounts]]\npublic_key = \"019e7b8bdec03ba83be4f5443d9f7f9111c77fec984ce9bb5bb7eb3da1e689c02d\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"100000000000000\"\ndelegation_rate = 10\n\n[[accounts]]\npublic_key = \"017fec504c642f2b321b8591f1c3008348c57a81acafceb5a392cf8416a5fb4a3c\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"100000000000000\"\ndelegation_rate = 10\n\n[[accounts]]\npublic_key = \"011b19ef983c039a2a335f2f35199bf8cad5ba2c583bd709748feb76f24ffb1bab\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"100000000000000\"\n"
  },
  {
    "path": "resources/integration-test/chainspec.toml",
    "content": "[protocol]\n# Protocol version.\nversion = '2.2.2'\n# Whether we need to clear latest blocks back to the switch block just before the activation point or not.\nhard_reset = true\n# This protocol version becomes active at this point.\n#\n# If it is a timestamp string, it represents the timestamp for the genesis block.  This is the beginning of era 0.  By\n# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up\n# and running to start the blockchain.  This timestamp is also used in seeding the pseudo-random number generator used\n# in contract-runtime for computing genesis post-state hash.\n#\n# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era.\nactivation_point = 19672\n\n[network]\n# Human readable name for convenience; the genesis_hash is the true identifier.  The name influences the genesis hash by\n# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis\n# post-state hash.\nname = 'integration-test'\n# The maximum size of an acceptable networking message in bytes.  Any message larger than this will\n# be rejected at the networking level.\nmaximum_net_message_size = 25_165_824\n\n[core]\n# Era duration.\nera_duration = '120 minutes'\n# Minimum number of blocks per era.  An era will take longer than `era_duration` if that is necessary to reach the\n# minimum height.\nminimum_era_height = 100\n# Minimum difference between a block's and its child's timestamp.\nminimum_block_time = '8000 ms'\n# Number of slots available in validator auction.\nvalidator_slots = 100\n# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer.\n# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as\n# finalized: A higher value F makes it safer to rely on finalized blocks.  It also makes it more difficult to finalize\n# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly.\nfinality_threshold_fraction = [1, 3]\n# Protocol version from which nodes are required to hold strict finality signatures.\nstart_protocol_version_with_strict_finality_signatures_required = '1.5.0'\n# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'.\n# Used to determine finality sufficiency for new joiners syncing blocks created\n# in a protocol version before\n# `start_protocol_version_with_strict_finality_signatures_required`.\nlegacy_required_finality = 'Strict'\n# Number of eras before an auction actually defines the set of validators.  If you bond with a sufficient bid in era N,\n# you will be a validator in era N + auction_delay + 1.\nauction_delay = 1\n# The period after genesis during which a genesis validator's bid is locked.\nlocked_funds_period = '0 days'\n# The period in which genesis validator's bid is released over time after it's unlocked.\nvesting_schedule_period = '0 weeks'\n# Default number of eras that need to pass to be able to withdraw unbonded funds.\nunbonding_delay = 7\n# Round seigniorage rate represented as a fraction of the total supply.\n#\n# Annual issuance: 8%\n# Minimum block time: 8000 milliseconds\n# Ticks per year: 31536000000\n#\n# (1+0.08)^((8000)/31536000000)-1 is expressed as a fractional number below\n# Python:\n# from fractions import Fraction\n# Fraction((1 + 0.08)**((8000)/31536000000) - 1).limit_denominator(1000000000)\nround_seigniorage_rate = [11, 563427926]\n# Maximum number of associated keys for a single account.\nmax_associated_keys = 100\n# Maximum height of contract runtime call stack.\nmax_runtime_call_stack_height = 12\n# Minimum allowed delegation amount in motes\nminimum_delegation_amount = 500_000_000_000\n# Maximum allowed delegation amount in motes\nmaximum_delegation_amount = 1_000_000_000_000_000_000\n# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than\n# the value specified will be treated as a full unbond of a validator and their associated delegators\nminimum_bid_amount = 500_000_000_000\n# Global state prune batch size (0 = this feature is off)\nprune_batch_size = 0\n# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\nstrict_argument_checking = false\n# Number of simultaneous peer requests.\nsimultaneous_peer_requests = 5\n# The consensus protocol to use. Options are \"Zug\" and \"Highway\".\nconsensus_protocol = 'Zug'\n# The maximum amount of delegators per validator.\nmax_delegators_per_validator = 1200\n# Minimum delegation rate validators can specify (0-100).\nminimum_delegation_rate = 0\n# The split in finality signature rewards between block producer and participating signers.\nfinders_fee = [1, 5]\n# The proportion of baseline rewards going to reward finality signatures specifically.\nfinality_signature_proportion = [95, 100]\n# Lookback interval indicating which past block we are looking at to reward.\nsignature_rewards_max_delay = 6\n# Allows transfers between accounts in the blockchain network.\n#\n# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators.\n# Changing this option makes sense only on private chains.\nallow_unrestricted_transfers = true\n# Enables the auction entry points 'delegate' and 'add_bid'.\n#\n# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These\n# auction entry points will return an error if called when this option is set to false.\nallow_auction_bids = true\n# If set to false, then consensus doesn't compute rewards and always uses 0.\ncompute_rewards = true\n# Defines how refunds of the unused portion of payment amounts are calculated and handled.\n#\n# Valid options are:\n#   'refund': a ratio of the unspent token is returned to the spender.\n#   'burn': a ratio of the unspent token is burned.\n#   'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio.\n# This causes excess payment amounts to be sent to either a\n# pre-defined purse, or back to the sender.  The refunded amount is calculated as the given ratio of the payment amount\n# minus the execution costs.\nrefund_handling = { type = 'refund', refund_ratio = [75, 100] }\n# Defines how fees are handled.\n#\n# Valid options are:\n#   'no_fee': fees are eliminated.\n#   'pay_to_proposer': fees are paid to the block proposer\n#   'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all\n#                 administrator accounts\n#   'burn': fees are burned\nfee_handling = { type = 'burn' }\n# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake.\nvalidator_credit_cap = [1, 5]\n# Defines how pricing is handled.\n#\n# Valid options are:\n#   'payment_limited': senders of transaction self-specify how much they pay.\n#   'fixed': costs are fixed, per the cost table\n#   'prepaid': prepaid transaction (currently not supported)\npricing_handling = { type = 'payment_limited' }\n# Does the network allow pre-payment for future\n# execution? Currently not supported.\n#\nallow_prepaid = false\n# Defines how gas holds affect available balance calculations.\n#\n# Valid options are:\n#   'accrued': sum of full value of all non-expired holds.\n#   'amortized': sum of each hold is amortized over the time remaining until expiry.\n#\n# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`,\n#   with accrued, the full hold amount would be applied\n#   with amortized, half the hold amount would be applied\ngas_hold_balance_handling = { type = 'accrued' }\n# Defines how long gas holds last.\n#\n# If fee_handling is set to 'no_fee', the system places a balance hold on the payer\n# equal to the value the fee would have been. Such balance holds expire after a time\n# interval has elapsed. This setting controls how long that interval is. The available\n# balance of a purse equals its total balance minus the held amount(s) of non-expired\n# holds (see gas_hold_balance_handling setting for details of how that is calculated).\n#\n# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse,\n# a hold for 100 is placed on that purse and is considered when calculating total balance\n# for 24 hours starting from the block_time when the hold was placed.\ngas_hold_interval = '24 hours'\n# List of public keys of administrator accounts. Setting this option makes only on private chains which require\n# administrator accounts for regulatory reasons.\nadministrators = []\n# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable\n# entity in lazy manner.\n# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade;\n# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage\n# will be written underneath Key::Hash.\n# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account\n# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated\n# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten\n# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top\n# level records\n# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade\n# the flag cannot be disabled in a future protocol upgrade.\nenable_addressable_entity = false\n# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount.\nbaseline_motes_amount = 2_500_000_000\n# Flag on whether ambiguous entity versions returns an execution error.\ntrap_on_ambiguous_entity_version = false\n# Controls how rewards are handled by the network\nrewards_handling = { type = 'sustain', ratio = [2,8], purse_address = \"uref-b306a4cf02c7725847a1b16035754dd89a6386ba29a774f0a7d8e920ad6d4a77-007\" }\n\n\n[highway]\n# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length.\nmaximum_round_length = '66 seconds'\n\n[transactions]\n# The duration after the transaction timestamp that it can be included in a block.\nmax_ttl = '2 hours'\n# The maximum number of approvals permitted in a single block.\nblock_max_approval_count = 2600\n# Maximum block size in bytes including transactions contained by the block.  0 means unlimited.\nmax_block_size = 2_621_400\n# The upper limit of total gas of all transactions in a block.\nblock_gas_limit = 812_500_000_000\n# The minimum amount in motes for a valid native transfer.\nnative_transfer_minimum_motes = 2_500_000_000\n# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file.\nmax_timestamp_leeway = '5 seconds'\n\n# Configuration of the transaction runtime.\n[transactions.enabled_runtime]\nvm_casper_v1 = true\nvm_casper_v2 = false\n\n[transactions.v1]\n# The configuration settings for the lanes of transactions including both native and Wasm based interactions.\n# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1\n# respectively\n# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction\n# within a given lane.\n# The maximum length in bytes of runtime args per V1 transaction.\n# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels)\n# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and\n# the lane must be present and defined.\n# Different casper networks may not impose such a restriction.\n# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane\n# [2] -> Max args length size in bytes for a given transaction in a certain lane\n# [3] -> Transaction gas limit for a given transaction in a certain lane\n# [4] -> The maximum number of transactions the lane can contain\nnative_mint_lane = [0, 2048, 1024, 100_000_000, 325]\nnative_auction_lane = [1, 3096, 2048, 2_500_000_000, 325]\ninstall_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1]\nwasm_lanes = [\n    [3, 750_000, 2048, 1_000_000_000_000, 1],\n    [4, 131_072, 1024, 100_000_000_000, 2],\n    [5, 65_536, 512, 5_000_000_000, 40]\n]\n\n[transactions.deploy]\n# The maximum number of Motes allowed to be spent during payment.  0 means unlimited.\nmax_payment_cost = '0'\n# The limit of length of serialized payment code arguments.\npayment_args_max_length = 1024\n# The limit of length of serialized session code arguments.\nsession_args_max_length = 1024\n\n[wasm.v1]\n# Amount of free memory (in 64kB pages) each contract can use for stack.\nmax_memory = 64\n# Max stack height (native WebAssembly stack limiter).\nmax_stack_height = 500\n\n[storage_costs]\n# Gas charged per byte stored in the global state.\ngas_per_byte = 1_117_587\n\n# For each opcode cost below there exists a static cost and a dynamic cost.\n# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks.\n[wasm.v1.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v1.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v1.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs\n[wasm.v1.host_function_costs]\nadd = { cost = 5_800, arguments = [0, 0, 0, 0] }\nadd_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] }\nadd_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] }\nadd_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nadd_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nblake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] }\ncall_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] }\ncall_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\ncreate_contract_package_at_hash = { cost = 200, arguments = [0, 0] }\ncreate_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ncreate_purse = { cost = 2_500_000_000, arguments = [0, 0] }\ndisable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nget_balance = { cost = 3_000_000, arguments = [0, 0, 0] }\nget_blocktime = { cost = 330, arguments = [0] }\nget_caller = { cost = 380, arguments = [0] }\nget_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] }\nget_main_purse = { cost = 1_300, arguments = [0] }\nget_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] }\nget_named_arg_size = { cost = 200, arguments = [0, 0, 0] }\nget_phase = { cost = 710, arguments = [0] }\nget_system_contract = { cost = 1_100, arguments = [0, 0, 0] }\nhas_key = { cost = 1_500, arguments = [0, 840] }\nis_valid_uref = { cost = 760, arguments = [0, 0] }\nload_named_keys = { cost = 42_000, arguments = [0, 0] }\nnew_uref = { cost = 17_000, arguments = [0, 0, 590] }\nrandom_bytes = { cost = 200, arguments = [0, 0] }\nprint = { cost = 20_000, arguments = [0, 4_600] }\nprovision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] }\nput_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] }\nread_host_buffer = { cost = 3_500, arguments = [0, 310, 0] }\nread_value = { cost = 60_000, arguments = [0, 120_000, 0] }\ndictionary_get = { cost = 5_500, arguments = [0, 590, 0] }\nremove_associated_key = { cost = 4_200, arguments = [0, 0] }\nremove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] }\nremove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] }\nremove_key = { cost = 61_000, arguments = [0, 3_200] }\nret = { cost = 23_000, arguments = [0, 420_000] }\nrevert = { cost = 500, arguments = [0] }\nset_action_threshold = { cost = 74_000, arguments = [0, 0] }\ntransfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] }\nupdate_associated_key = { cost = 4_200, arguments = [0, 0, 0] }\nwrite = { cost = 14_000, arguments = [0, 0, 0, 980] }\ndictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] }\nenable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nmanage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] }\nemit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] }\ngeneric_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] }\ncost_increase_per_message = 50\nget_block_info = { cost = 330, arguments = [0, 0] }\nrecover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\nverify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\ncall_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\n\n[wasm.v2]\n# Amount of free memory each contract can use for stack.\nmax_memory = 64\n\n[wasm.v2.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v2.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v2.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n[wasm.v2.host_function_costs]\nread = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\nwrite = { cost = 0, arguments = [0, 0, 0, 0, 0] }\nremove = { cost = 0, arguments = [0, 0, 0] }\ncopy_input = { cost = 0, arguments = [0, 0] }\nret = { cost = 0, arguments = [0, 0] }\ncreate = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer = { cost = 0, arguments = [0, 0, 0] }\nenv_balance = { cost = 0, arguments = [0, 0, 0, 0] }\nupgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\ncall = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\nprint = { cost = 0, arguments = [0, 0] }\nemit = { cost = 0, arguments = [0, 0, 0, 0] }\nenv_info = { cost = 0, arguments = [0, 0] }\n\n[wasm.messages_limits]\nmax_topic_name_size = 256\nmax_topics_per_contract = 128\nmax_message_size = 1_024\n\n[system_costs]\n# Penalty charge for calling invalid entry point in a system contract.\nno_such_entrypoint = 2_500_000_000\n\n[system_costs.auction_costs]\nget_era_validators = 2_500_000_000\nread_seigniorage_recipients = 5_000_000_000\nadd_bid = 2_500_000_000\nwithdraw_bid = 2_500_000_000\ndelegate = 2_500_000_000\nundelegate = 2_500_000_000\nrun_auction = 2_500_000_000\nslash = 2_500_000_000\ndistribute = 2_500_000_000\nwithdraw_delegator_reward = 5_000_000_000\nwithdraw_validator_reward = 5_000_000_000\nread_era_id = 2_500_000_000\nactivate_bid = 2_500_000_000\nredelegate = 2_500_000_000\nchange_bid_public_key = 5_000_000_000\nadd_reservations = 2_500_000_000\ncancel_reservations = 2_500_000_000\n\n[system_costs.mint_costs]\nmint = 2_500_000_000\nreduce_total_supply = 2_500_000_000\ncreate = 2_500_000_000\nbalance = 100_000_000\nburn = 100_000_000\ntransfer = 100_000_000\nread_base_round_reward = 2_500_000_000\nmint_into_existing_purse = 2_500_000_000\n\n[system_costs.handle_payment_costs]\nget_payment_purse = 10_000\nset_refund_purse = 10_000\nget_refund_purse = 10_000\nfinalize_payment = 2_500_000_000\n\n[system_costs.standard_payment_costs]\npay = 10_000\n\n[vacancy]\n# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network.\n#\n# The network starts with a current_gas_price of min_gas_price.\n#\n# Each block has multiple limits (bytes, transactions, transfers, gas, etc.)\n# The utilization for a block is determined by the highest percentage utilization of each these limits.\n#\n# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here)\n#     19 transactons -> 19/20 or 95%\n#     600 transfers -> 600/650 or 92.3%\n#     resulting block utilization is 95\n#\n# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is\n# adjusted with the following:\n#\n# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price.\n# If utilization falls between the thresholds, current_gas_price is not changed.\n# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price.\n#\n# The cost charged for the transaction is simply the gas_used * current_gas_price.\nupper_threshold = 90\nlower_threshold = 50\nmax_gas_price = 1\nmin_gas_price = 1\n"
  },
  {
    "path": "resources/integration-test/config-example.toml",
    "content": "# ================================\n# Configuration options for a node\n# ================================\n[node]\n\n# If set, use this hash as a trust anchor when joining an existing network.\n#trusted_hash = 'HEX-FORMATTED BLOCK HASH'\n\n# Historical sync behavior for this node. Options are:\n#  'ttl'      (node will attempt to acquire all block data to comply with time to live enforcement)\n#  'genesis'  (node will attempt to acquire all block data back to genesis)\n#  'nosync'   (node will only acquire blocks moving forward)\n#  'isolated' (node will initialize without peers and will not accept peers)\n#  'completeblock' (node will acquire complete block and shutdown)\n# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`.\n#       it is recommended for dedicated validator nodes to be in ttl mode to increase\n#       their ability to maintain maximal uptime...if a long-running genesis validator\n#       goes offline and comes back up while in genesis mode, it must backfill\n#       any gaps in its block awareness before resuming validation.\n#\n#       it is recommended for reporting non-validator nodes to be in genesis mode to\n#       enable support for queries at any block height.\n#\n#       it is recommended for non-validator working nodes (for dapp support, etc) to run in\n#       ttl or nosync mode (depending upon their specific data requirements).\n#\n#       thus for instance a node backing a block explorer would prefer genesis mode,\n#       while a node backing a dapp interested in very recent activity would prefer to run in nosync mode,\n#       and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode.\n# note: as time goes on, the time to sync back to genesis takes progressively longer.\n# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting\n#       (it is currently ~2 hours by default on production and production-like networks but subject to change).\n# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating\n#        in consensus / switching to validate mode. it is primarily for lightweight nodes that are\n#        only interested in recent activity.\n# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to\n#       binary port, rest server, event server, and diagnostic port connections.\nsync_handling = 'genesis'\n\n# Idle time after which the syncing process is considered stalled.\nidle_tolerance = '20 minutes'\n\n# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times.\nmax_attempts = 3\n\n# Default delay for the control events that have no dedicated delay requirements.\ncontrol_logic_default_delay = '1 second'\n\n# Flag which forces the node to resync all the blocks.\nforce_resync = false\n\n# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all\n# conditions are satisfied.\nshutdown_for_upgrade_timeout = '2 minutes'\n\n# Maximum time a node will wait for an upgrade to commit.\nupgrade_timeout = '30 seconds'\n\n# The node detects when it should do a controlled shutdown when it is in a detectably bad state\n# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be\n# allowed to shutdown, and if restarted that node will generally recover gracefully and resume\n# normal operation. However, actively validating nodes have subjective state in memory that is\n# lost on shutdown / restart and must be reacquired from other validating nodes on restart.\n# If all validating nodes shutdown in the middle of an era, social consensus is required to restart\n# the network. As a mitigation for that, the following config can be set to true on some validator\n# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled\n# shutdown events and stay up. This allows them to act as sentinels for the consensus data for\n# other restarting nodes. This config is inert on non-validating nodes.\nprevent_validator_shutdown = false\n\n# =================================\n# Configuration options for logging\n# =================================\n[logging]\n\n# Output format.  Possible values are 'text' or 'json'.\nformat = 'json'\n\n# Colored output.  Has no effect if format = 'json'.\ncolor = false\n\n# Abbreviate module names in text output.  Has no effect if format = 'json'.\nabbreviate_modules = false\n\n\n# ===================================\n# Configuration options for consensus\n# ===================================\n[consensus]\n\n# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign\n# consensus messages.\nsecret_key_path = '/etc/casper/validator_keys/secret_key.pem'\n\n# The maximum number of blocks by which execution is allowed to lag behind finalization.\n# If it is more than that, consensus will pause, and resume once the executor has caught up.\nmax_execution_delay = 6\n\n\n# =======================================\n# Configuration options for Zug consensus\n# =======================================\n[consensus.zug]\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nsync_state_interval = '1 second'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of\n# echo messages, before they vote to make the round skippable and move on to the next proposer.\nproposal_timeout = '5 seconds'\n\n# The additional proposal delay that is still considered fast enough, in percent. This should\n# take into account variables like empty vs. full blocks, network traffic etc.\n# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one\n# while idle this should be at least 50, meaning that the timeout is 50% longer than\n# necessary for a quorum of recent proposals, approximately.\nproposal_grace_period = 200\n\n# The average number of rounds after which the proposal timeout adapts by a factor of 2.\n# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve.\nproposal_timeout_inertia = 10\n\n# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp\n# lies in the future by more than that are rejected.\nclock_tolerance = '1 second'\n\n\n# ===========================================\n# Configuration options for Highway consensus\n# ===========================================\n[consensus.highway]\n\n# The duration for which incoming vertices with missing dependencies should be kept in a queue.\npending_vertex_timeout = '30 minutes'\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nrequest_state_interval = '20 seconds'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# Log the synchronizer state periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_synchronizer_interval = '5 seconds'\n\n# Log the size of every incoming and outgoing serialized unit.\nlog_unit_sizes = false\n\n# The maximum number of peers we request the same vertex from in parallel.\nmax_requests_for_vertex = 5\n\n# The maximum number of dependencies we request per validator in a batch.\n# Limits requests per validator in panorama - in order to get a total number of\n# requests, multiply by # of validators.\nmax_request_batch_size = 20\n\n[consensus.highway.round_success_meter]\n# The number of most recent rounds we will be keeping track of.\nnum_rounds_to_consider = 40\n\n# The number of successful rounds that triggers us to slow down: With this many or fewer\n# successes per `num_rounds_to_consider`, we increase our round length.\nnum_rounds_slowdown = 10\n\n# The number of successful rounds that triggers us to speed up: With this many or more successes\n# per `num_rounds_to_consider`, we decrease our round length.\nnum_rounds_speedup = 32\n\n# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if\n# we have few enough failures.\nacceleration_parameter = 40\n\n# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which\n# we will use for looking for a summit in order to determine a proposal's finality.\n# The required quorum in a summit we will look for to check if a round was successful is\n# determined by this FTT.\nacceleration_ftt = [1, 100]\n\n\n# ====================================\n# Configuration options for networking\n# ====================================\n[network]\n\n# The public address of the node.\n#\n# It must be publicly available in order to allow peers to connect to this node.\n# If the port is set to 0, the actual bound port will be substituted.\npublic_address = '<IP ADDRESS>:0'\n\n# Address to bind to for listening.\n# If port is set to 0, a random port will be used.\nbind_address = '0.0.0.0:35000'\n\n# Addresses to connect to in order to join the network.\n#\n# If not set, this node will not be able to attempt to connect to the network.  Instead it will\n# depend upon peers connecting to it.  This is normally only useful for the first node of the\n# network.\n#\n# Multiple addresses can be given and the node will attempt to connect to each, requiring at least\n# one connection.\nknown_addresses = ['3.81.135.135:35000','34.207.240.74:35000','18.208.195.207:35000','3.90.67.160:35000']\n\n# Minimum number of fully-connected peers to consider network component initialized.\nmin_peers_for_initialization = 2\n\n# The interval between each fresh round of gossiping the node's public address.\ngossip_interval = '120 seconds'\n\n# Initial delay for starting address gossipping after the network starts. This should be slightly\n# more than the expected time required for initial connections to complete.\ninitial_gossip_delay = '5 seconds'\n\n# How long a connection is allowed to be stuck as pending before it is abandoned.\nmax_addr_pending_time = '1 minute'\n\n# Maximum time allowed for a connection handshake between two nodes to be completed. Connections\n# exceeding this threshold are considered unlikely to be healthy or even malicious and thus\n# terminated.\nhandshake_timeout = '20 seconds'\n\n# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional\n# connections will be rejected. A value of `0` means unlimited.\nmax_incoming_peer_connections = 3\n\n# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers.\n# A value of `0` means unlimited.\nmax_outgoing_byte_rate_non_validators = 6553600\n\n# The maximum allowed total impact of requests from non-validating peers per second answered.\n# A value of `0` means unlimited.\nmax_incoming_message_rate_non_validators = 3000\n\n# Maximum number of requests for data from a single peer that are allowed be buffered. A value of\n# `0` means unlimited.\nmax_in_flight_demands = 50\n\n# Version threshold to enable tarpit for.\n#\n# When set to a version (the value may be `null` to disable the feature), any peer that reports a\n# protocol version equal or below the threshold will be rejected only after holding open the\n# connection for a specific (`tarpit_duration`) amount of time.\n#\n# This option makes most sense to enable on known nodes with addresses where legacy nodes that are\n# still in operation are connecting to, as these older versions will only attempt to reconnect to\n# other nodes once they have exhausted their set of known nodes.\ntarpit_version_threshold = '1.2.1'\n\n# How long to hold connections to trapped legacy nodes.\ntarpit_duration = '10 minutes'\n\n# The probability [0.0, 1.0] of this node trapping a legacy node.\n#\n# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a\n# single known node to hold open a connection to prevent the node from reconnecting. This should be\n# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of\n# legacy nodes running this software.\ntarpit_chance = 0.2\n\n# Minimum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_min_duration = '2 minutes'\n\n# Maximum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_max_duration = '10 minutes'\n\n# Identity of a node\n#\n# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate.\n# This option makes sense for some private chains where for security reasons joining new nodes is restricted.\n# [network.identity]\n# tls_certificate = \"node_cert.pem\"\n# secret_key = \"node.pem\"\n# ca_certificate = \"ca_cert.pem\"\n\n# Weights for impact estimation of incoming messages, used in combination with\n# `max_incoming_message_rate_non_validators`.\n#\n# Any weight set to 0 means that the category of traffic is exempt from throttling.\n[network.estimator_weights]\nconsensus = 0\nblock_gossip = 1\ntransaction_gossip = 0\nfinality_signature_gossip = 1\naddress_gossip = 0\nfinality_signature_broadcasts = 0\ntransaction_requests = 1\ntransaction_responses = 0\nlegacy_deploy_requests = 1\nlegacy_deploy_responses = 0\nblock_requests = 1\nblock_responses = 0\nblock_header_requests = 1\nblock_header_responses = 0\ntrie_requests = 1\ntrie_responses = 0\nfinality_signature_requests = 1\nfinality_signature_responses = 0\nsync_leap_requests = 1\nsync_leap_responses = 0\napprovals_hashes_requests = 1\napprovals_hashes_responses = 0\nexecution_results_requests = 1\nexecution_results_responses = 0\n\n# ==================================================\n# Configuration options for the BinaryPort server\n# ==================================================\n[binary_port_server]\n\n# Flag which enables the BinaryPort server.\nenable_server = true\n\n# Listening address for BinaryPort server.\naddress = '0.0.0.0:7779'\n\n# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_all_values = true\n\n# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_trie = false\n\n# Flag that enables the `TrySpeculativeExec` request. Disabled by default.\nallow_request_speculative_exec = false\n\n# Maximum size of a message in bytes.\nmax_message_size_bytes = 134_217_728\n\n# Maximum number of connections to the server.\nmax_connections = 5\n\n# The global max rate of requests (per second) before they are limited.\n# The implementation uses a sliding window algorithm.\nqps_limit = 1000\n\n# Initial time given to a connection before it expires\ninitial_connection_lifetime = '10 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n# [`Command::Get(GetRequest::Record)`] is sent to the node\nget_record_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Information)`] is sent to the node\nget_information_request_termination_delay = '5 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::State)`] is sent to the node\nget_state_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Trie)`] is sent to the node\nget_trie_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TryAcceptTransaction`] is sent to the node\naccept_transaction_request_termination_delay = '24 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TrySpeculativeExec`] is sent to the node\nspeculative_exec_request_termination_delay = '0 seconds'\n\n\n# ==============================================\n# Configuration options for the REST HTTP server\n# ==============================================\n[rest_server]\n\n# Flag which enables the REST HTTP server.\nenable_server = true\n\n# Listening address for REST HTTP server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the REST HTTP server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:8888'\n\n# The global max rate of requests (per second) before they are limited.\n# Request will be delayed to the next 1 second bucket once limited.\nqps_limit = 100\n\n# Specifies which origin will be reported as allowed by REST server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n\n# ==========================================================\n# Configuration options for the SSE HTTP event stream server\n# ==========================================================\n[event_stream_server]\n\n# Flag which enables the SSE HTTP event stream server.\nenable_server = true\n\n# Listening address for SSE HTTP event stream server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:9999'\n\n# The number of event stream events to buffer.\nevent_stream_buffer_length = 5000\n\n# The maximum number of subscribers across all event streams the server will permit at any one time.\nmax_concurrent_subscribers = 100\n\n# Specifies which origin will be reported as allowed by event stream server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n# ===============================================\n# Configuration options for the storage component\n# ===============================================\n[storage]\n\n# Path (absolute, or relative to this config.toml) to the folder where any files created\n# or read by the storage component will exist. A subfolder named with the network name will be\n# automatically created and used for the storage component files.\n#\n# If the folder doesn't exist, it and any required parents will be created.\n#\n# If unset, the path must be supplied as an argument via the CLI.\npath = '/var/lib/casper/casper-node'\n\n# Maximum size of the database to use for the block store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 483_183_820_800 == 450 GiB.\nmax_block_store_size = 483_183_820_800\n\n# Maximum size of the database to use for the deploy store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the deploy metadata.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_metadata_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the state snapshots.\n#\n# The size should be a multiple of the OS page size.\n#\n# 10_737_418_240 == 10 GiB.\nmax_state_store_size = 10_737_418_240\n\n# Memory deduplication.\n#\n# If enabled, nodes will attempt to share loaded objects if possible.\nenable_mem_deduplication = true\n\n# Memory duplication garbage collection.\n#\n# Sets the frequency how often the memory pool cache is swept for free references.\n# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept.\nmem_pool_prune_interval = 4096\n\n\n# ===================================\n# Configuration options for gossiping\n# ===================================\n[gossip]\n\n# Target number of peers to infect with a given piece of data.\ninfection_target = 3\n\n# The saturation limit as a percentage, with a maximum value of 99.  Used as a termination\n# condition.\n#\n# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't\n# manage to newly infect 3 peers.  We will stop gossiping once we know of more than 15 holders\n# excluding us since 80% saturation would imply 3 new infections in 15 peers.\nsaturation_limit_percent = 80\n\n# The maximum duration for which to keep finished entries.\n#\n# The longer they are retained, the lower the likelihood of re-gossiping a piece of data.  However,\n# the longer they are retained, the larger the list of finished entries can grow.\nfinished_entry_duration = '1 minute'\n\n# The timeout duration for a single gossip request, i.e. for a single gossip message\n# sent from this node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\ngossip_request_timeout = '30 seconds'\n\n# The timeout duration for retrieving the remaining part(s) of newly-discovered data\n# from a peer which gossiped information about that data to this node.\nget_remainder_timeout = '5 seconds'\n\n# The timeout duration for a newly-received, gossiped item to be validated and stored by another\n# component before the gossiper abandons waiting to gossip the item onwards.\nvalidate_and_store_timeout = '1 minute'\n\n\n# ===============================================\n# Configuration options for the block accumulator\n# ===============================================\n[block_accumulator]\n\n# Block height difference threshold for starting to execute the blocks.\nattempt_execution_threshold = 6\n\n# Accepted time interval for inactivity in block accumulator.\ndead_air_interval = '3 minutes'\n\n# Time after which the block acceptors are considered old and can be purged.\npurge_interval = '1 minute'\n\n\n# ================================================\n# Configuration options for the block synchronizer\n# ================================================\n[block_synchronizer]\n\n# Maximum number of fetch-trie tasks to run in parallel during block synchronization.\nmax_parallel_trie_fetches = 5000\n\n# Time interval for the node to ask for refreshed peers.\npeer_refresh_interval = '90 seconds'\n\n# Time interval for the node to check what the block synchronizer needs to acquire next.\nneed_next_interval = '1 second'\n\n# Time interval for recurring disconnection of dishonest peers.\ndisconnect_dishonest_peers_interval = '10 seconds'\n\n# Time interval for resetting the latch in block builders.\nlatch_reset_interval = '5 seconds'\n\n\n# =============================================\n# Configuration options for the block validator\n# =============================================\n[block_validator]\n\n# Maximum number of completed entries to retain.\n#\n# A higher value can avoid creating needless validation work on an already-validated proposed\n# block, but comes at the cost of increased memory consumption.\nmax_completed_entries = 6\n\n\n# ==================================\n# Configuration options for fetchers\n# ==================================\n[fetcher]\n\n# The timeout duration for a single fetcher request, i.e. for a single fetcher message\n# sent from this node to another node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\nget_from_peer_timeout = '10 seconds'\n\n\n# ========================================================\n# Configuration options for the contract runtime component\n# ========================================================\n[contract_runtime]\n\n# Optional maximum size of the database to use for the global state store.\n#\n# If unset, defaults to 805,306,368,000 == 750 GiB.\n#\n# The size should be a multiple of the OS page size.\nmax_global_state_size = 2_089_072_132_096\n\n# Optional depth limit to use for global state queries.\n#\n# If unset, defaults to 5.\n#max_query_depth = 5\n\n# Enable manual synchronizing to disk.\n#\n# If unset, defaults to true.\n#enable_manual_sync = true\n\n\n# ==================================================\n# Configuration options for the transaction acceptor\n# ==================================================\n[transaction_acceptor]\n\n# The leeway allowed when considering whether a transaction is future-dated or not.\n#\n# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the\n# future are still acceptable.\n#\n# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting\n# `transaction.max_timestamp_leeway`.\ntimestamp_leeway = '2 seconds'\n\n\n# ===========================================\n# Configuration options for the transaction buffer\n# ===========================================\n[transaction_buffer]\n\n# The interval of checking for expired transactions.\nexpiry_check_interval = '1 minute'\n\n\n# ==============================================\n# Configuration options for the diagnostics port\n# ==============================================\n[diagnostics_port]\n\n# If set, the diagnostics port will be available on a UNIX socket.\nenabled = false\n\n# Filename for the UNIX domain socket the diagnostics port listens on.\nsocket_path = \"debug.socket\"\n\n# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the\n# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`,\n# which allows for group access as well.\nsocket_umask = 0o077\n\n\n# =============================================\n# Configuration options for the upgrade watcher\n# =============================================\n[upgrade_watcher]\n\n# How often to scan file system for available upgrades.\nupgrade_check_interval = '30 seconds'\n"
  },
  {
    "path": "resources/integration-test/global_state.toml",
    "content": "# Key c0c0e911bab21aff5a7e5aa8da03bac7875dd94a155a87c1b21a99944ac00cc8 is the main purse\n# of the account 01b9857739f3ea626d1768911657933809690c09f24bb9e7ce00ad2302646d4ec9\n#\n# Value AAkAAAAIAArz/CpsEC0I is base64 encoding of 3247214263222340096, representing a \n# U512 type in the system\n\n[[entries]]\nkey = \"balance-c0c0e911bab21aff5a7e5aa8da03bac7875dd94a155a87c1b21a99944ac00cc8\"\nvalue = \"AAkAAAAIAArz/CpsEC0I\"\n"
  },
  {
    "path": "resources/local/accounts.toml",
    "content": "[[accounts]]\npublic_key = \"01522ef6c89038019cb7af05c340623804392dd2bb1f4dab5e4a9c3ab752fc0179\"\nbalance = \"1000000000000000000000000000\"\n\n[[accounts]]\npublic_key = \"01f60bce2bb1059c41910eac1e7ee6c3ef4c8fcc63a901eb9603c1524cadfb0c18\"\nbalance = \"1000000000000000000\"\n\n[accounts.validator]\nbonded_amount = \"500000000000000\"\n\n[[accounts]]\npublic_key = \"018f5a3ee4c1221686fcdbe6c0b6168acb24025c5485f59f7c4039ffc444fb7509\"\nbalance = \"1000000000000000000\"\n\n[accounts.validator]\nbonded_amount = \"400000000000000\"\n\n[[accounts]]\npublic_key = \"01449c5f751e465adabc9885fe8f33b522ab2c069997148e4b65fddde9c4440d31\"\nbalance = \"1000000000000000000\"\n\n[accounts.validator]\nbonded_amount = \"300000000000000\"\n\n[[accounts]]\npublic_key = \"01b3feeec1d91c7c2f070052315258eeaaf7c24029a80a1aea285814e9f9a20d36\"\nbalance = \"1000000000000000000\"\n\n[accounts.validator]\nbonded_amount = \"200000000000000\"\ndelegation_rate = 10\n\n[[delegators]]\nvalidator_public_key = \"01b3feeec1d91c7c2f070052315258eeaaf7c24029a80a1aea285814e9f9a20d36\"\ndelegator_public_key = \"020248509e67db3127f82d5224c5c18eac00f96d1edeadbadc8eb2c8606227b56873\"\nbalance = \"1000000000000000000\"\ndelegated_amount = \"100000000000001\""
  },
  {
    "path": "resources/local/chainspec.toml.in",
    "content": "[protocol]\n# Protocol version.\nversion = '2.1.1'\n# Whether we need to clear latest blocks back to the switch block just before the activation point or not.\nhard_reset = false\n# This protocol version becomes active at this point.\n#\n# If it is a timestamp string, it represents the timestamp for the genesis block.  This is the beginning of era 0.  By\n# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up\n# and running to start the blockchain.  This timestamp is also used in seeding the pseudo-random number generator used\n# in contract-runtime for computing genesis post-state hash.\n#\n# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era.\nactivation_point = '${TIMESTAMP}'\n\n[network]\n# Human readable name for convenience; the genesis_hash is the true identifier.  The name influences the genesis hash by\n# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis\n# post-state hash.\nname = 'casper-example'\n# The maximum size of an acceptable networking message in bytes.  Any message larger than this will\n# be rejected at the networking level.\nmaximum_net_message_size = 25_165_824\n\n[core]\n# Era duration.\nera_duration = '41 seconds'\n# Minimum number of blocks per era.  An era will take longer than `era_duration` if that is necessary to reach the\n# minimum height.\nminimum_era_height = 5\n# Minimum difference between a block's and its child's timestamp.\nminimum_block_time = '4096 ms'\n# Number of slots available in validator auction.\nvalidator_slots = 7\n# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer.\n# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as\n# finalized: A higher value F makes it safer to rely on finalized blocks.  It also makes it more difficult to finalize\n# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly.\nfinality_threshold_fraction = [1, 3]\n# Protocol version from which nodes are required to hold strict finality signatures.\nstart_protocol_version_with_strict_finality_signatures_required = '1.5.0'\n# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'.\n# Used to determine finality sufficiency for new joiners syncing blocks created\n# in a protocol version before\n# `start_protocol_version_with_strict_finality_signatures_required`.\nlegacy_required_finality = 'Strict'\n\n# Number of eras before an auction actually defines the set of validators.  If you bond with a sufficient bid in era N,\n# you will be a validator in era N + auction_delay + 1.\nauction_delay = 1\n# The period after genesis during which a genesis validator's bid is locked.\nlocked_funds_period = '0 days'\n# The period in which genesis validator's bid is released over time after it's unlocked.\nvesting_schedule_period = '0 weeks'\n# Default number of eras that need to pass to be able to withdraw unbonded funds.\nunbonding_delay = 7\n# Round seigniorage rate represented as a fraction of the total supply.\n#\n# A rate that makes the rewards roughly 0.05% of the initial stake per block under default NCTL settings.\nround_seigniorage_rate = [1, 4_200_000_000_000_000_000]\n# Maximum number of associated keys for a single account.\nmax_associated_keys = 100\n# Maximum height of contract runtime call stack.\nmax_runtime_call_stack_height = 12\n# Minimum allowed delegation amount in motes\nminimum_delegation_amount = 500_000_000_000\n# Maximum allowed delegation amount in motes\nmaximum_delegation_amount = 1_000_000_000_000_000_000\n# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than\n# the value specified will be treated as a full unbond of a validator and their associated delegators\nminimum_bid_amount = 100_000_000_000_000\n# Global state prune batch size (0 = this feature is off)\nprune_batch_size = 0\n# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\nstrict_argument_checking = false\n# Number of simultaneous peer requests.\nsimultaneous_peer_requests = 5\n# The consensus protocol to use. Options are \"Zug\" and \"Highway\".\nconsensus_protocol = 'Zug'\n# The maximum amount of delegators per validator.\nmax_delegators_per_validator = 1200\n# Minimum delegation rate validators can specify (0-100).\nminimum_delegation_rate = 0\n# The split in finality signature rewards between block producer and participating signers.\nfinders_fee = [1, 5]\n# The proportion of baseline rewards going to reward finality signatures specifically.\nfinality_signature_proportion = [1, 2]\n# Lookback interval indicating which past block we are looking at to reward.\nsignature_rewards_max_delay = 3\n# Allows transfers between accounts in the blockchain network.\n#\n# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators.\n# Changing this option makes sense only on private chains.\nallow_unrestricted_transfers = true\n# Enables the auction entry points 'delegate' and 'add_bid'.\n#\n# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These\n# auction entry points will return an error if called when this option is set to false.\nallow_auction_bids = true\n# If set to false, then consensus doesn't compute rewards and always uses 0.\ncompute_rewards = true\n# Defines how refunds of the unused portion of payment amounts are calculated and handled.\n#\n# Valid options are:\n#   'refund': a ratio of the unspent token is returned to the spender.\n#   'burn': a ratio of the unspent token is burned.\n#   'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio.\n# This causes excess payment amounts to be sent to either a\n# pre-defined purse, or back to the sender.  The refunded amount is calculated as the given ratio of the payment amount\n# minus the execution costs.\nrefund_handling = { type = 'refund', refund_ratio = [75, 100] }\n# Defines how fees are handled.\n#\n# Valid options are:\n#   'no_fee': fees are eliminated.\n#   'pay_to_proposer': fees are paid to the block proposer\n#   'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all\n#                 administrator accounts\n#   'burn': fees are burned\nfee_handling = { type = 'pay_to_proposer' }\n# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake.\nvalidator_credit_cap = [1, 5]\n# Defines how pricing is handled.\n#\n# Valid options are:\n#   'payment_limited': senders of transaction self-specify how much they pay.\n#   'fixed': costs are fixed, per the cost table\n#   'prepaid': prepaid transaction (currently not supported)\npricing_handling = { type = 'payment_limited' }\n# Does the network allow pre-payment for future\n# execution? Currently not supported.\n#\nallow_prepaid = false\n# Defines how gas holds affect available balance calculations.\n#\n# Valid options are:\n#   'accrued': sum of full value of all non-expired holds.\n#   'amortized': sum of each hold is amortized over the time remaining until expiry.\n#\n# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`,\n#   with accrued, the full hold amount would be applied\n#   with amortized, half the hold amount would be applied\ngas_hold_balance_handling = { type = 'accrued' }\n# Defines how long gas holds last.\n#\n# If fee_handling is set to 'no_fee', the system places a balance hold on the payer\n# equal to the value the fee would have been. Such balance holds expire after a time\n# interval has elapsed. This setting controls how long that interval is. The available\n# balance of a purse equals its total balance minus the held amount(s) of non-expired\n# holds (see gas_hold_balance_handling setting for details of how that is calculated).\n#\n# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse,\n# a hold for 100 is placed on that purse and is considered when calculating total balance\n# for 24 hours starting from the block_time when the hold was placed.\ngas_hold_interval = '24 hours'\n# List of public keys of administrator accounts. Setting this option makes only on private chains which require\n# administrator accounts for regulatory reasons.\nadministrators = []\n# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable\n# entity in lazy manner.\n# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade;\n# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage\n# will be written underneath Key::Hash.\n# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account\n# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated\n# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten\n# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top\n# level records\n# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade\n# the flag cannot be disabled in a future protocol upgrade.\nenable_addressable_entity = false\n\n# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount.\nbaseline_motes_amount = 2_500_000_000\n# Flag on whether ambiguous entity versions returns an execution error.\ntrap_on_ambiguous_entity_version = false\nrewards_handling = { type = 'standard'}\n\n[highway]\n# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length.\nmaximum_round_length = '17 seconds'\n\n[transactions]\n# The duration after the transaction timestamp that it can be included in a block.\nmax_ttl = '2 hours'\n# The maximum number of approvals permitted in a single block.\nblock_max_approval_count = 2600\n# Maximum block size in bytes including transactions contained by the block.  0 means unlimited.\nmax_block_size = 5_242_880\n# The upper limit of total gas of all transactions in a block.\nblock_gas_limit = 1_625_000_000_000\n# The minimum amount in motes for a valid native transfer.\nnative_transfer_minimum_motes = 2_500_000_000\n# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file.\nmax_timestamp_leeway = '5 seconds'\n# Configuration of the transaction runtime.\n[transactions.enabled_runtime]\nvm_casper_v1 = true\nvm_casper_v2 = false\n\n[transactions.v1]\n# The configuration settings for the lanes of transactions including both native and Wasm based interactions.\n# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1\n# respectively\n# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction\n# within a given lane.\n# The maximum length in bytes of runtime args per V1 transaction.\n# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels)\n# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and\n# the lane must be present and defined.\n# Different casper networks may not impose such a restriction.\n# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane\n# [2] -> Max args length size in bytes for a given transaction in a certain lane\n# [3] -> Transaction gas limit for a given transaction in a certain lane\n# [4] -> The maximum number of transactions the lane can contain\nnative_mint_lane = [0, 2048, 1024, 100_000_000, 650]\nnative_auction_lane = [1, 3096, 2048, 2_500_000_000, 650]\ninstall_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1]\nwasm_lanes = [\n    [3, 262_144, 1024, 1_000_000_000_000, 1],\n    [4, 131_072, 1024, 100_000_000_000, 2],\n    [5, 65_536, 512, 5_000_000_000, 80]\n]\n\n[transactions.deploy]\n# The maximum number of Motes allowed to be spent during payment.  0 means unlimited.\nmax_payment_cost = '0'\n# The limit of length of serialized payment code arguments.\npayment_args_max_length = 1024\n# The limit of length of serialized session code arguments.\nsession_args_max_length = 1024\n\n[wasm.v1]\n# Amount of free memory (in 64kB pages) each contract can use for stack.\nmax_memory = 64\n# Max stack height (native WebAssembly stack limiter).\nmax_stack_height = 500\n\n[storage_costs]\n# Gas charged per byte stored in the global state.\ngas_per_byte = 1_117_587\n\n# For each opcode cost below there exists a static cost and a dynamic cost.\n# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks.\n[wasm.v1.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v1.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v1.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs\n[wasm.v1.host_function_costs]\nadd = { cost = 5_800, arguments = [0, 0, 0, 0] }\nadd_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] }\nadd_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] }\nadd_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nadd_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nblake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] }\ncall_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] }\ncall_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\ncreate_contract_package_at_hash = { cost = 200, arguments = [0, 0] }\ncreate_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ncreate_purse = { cost = 2_500_000_000, arguments = [0, 0] }\ndisable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nget_balance = { cost = 3_000_000, arguments = [0, 0, 0] }\nget_blocktime = { cost = 330, arguments = [0] }\nget_caller = { cost = 380, arguments = [0] }\nget_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] }\nget_main_purse = { cost = 1_300, arguments = [0] }\nget_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] }\nget_named_arg_size = { cost = 200, arguments = [0, 0, 0] }\nget_phase = { cost = 710, arguments = [0] }\nget_system_contract = { cost = 1_100, arguments = [0, 0, 0] }\nhas_key = { cost = 1_500, arguments = [0, 840] }\nis_valid_uref = { cost = 760, arguments = [0, 0] }\nload_named_keys = { cost = 42_000, arguments = [0, 0] }\nnew_uref = { cost = 17_000, arguments = [0, 0, 590] }\nrandom_bytes = { cost = 200, arguments = [0, 0] }\nprint = { cost = 20_000, arguments = [0, 4_600] }\nprovision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] }\nput_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] }\nread_host_buffer = { cost = 3_500, arguments = [0, 310, 0] }\nread_value = { cost = 60_000, arguments = [0, 120_000, 0] }\ndictionary_get = { cost = 5_500, arguments = [0, 590, 0] }\nremove_associated_key = { cost = 4_200, arguments = [0, 0] }\nremove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] }\nremove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] }\nremove_key = { cost = 61_000, arguments = [0, 3_200] }\nret = { cost = 23_000, arguments = [0, 420_000] }\nrevert = { cost = 500, arguments = [0] }\nset_action_threshold = { cost = 74_000, arguments = [0, 0] }\ntransfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] }\nupdate_associated_key = { cost = 4_200, arguments = [0, 0, 0] }\nwrite = { cost = 14_000, arguments = [0, 0, 0, 980] }\ndictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] }\nenable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nmanage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] }\nemit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] }\ngeneric_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] }\ncost_increase_per_message = 50\nget_block_info = { cost = 330, arguments = [0, 0] }\nrecover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\nverify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\ncall_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\n\n[wasm.v2]\n# Amount of free memory each contract can use for stack.\nmax_memory = 17\n\n[wasm.v2.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v2.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v2.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n[wasm.v2.host_function_costs]\nread = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\nwrite = { cost = 0, arguments = [0, 0, 0, 0, 0] }\nremove = { cost = 0, arguments = [0, 0, 0] }\ncopy_input = { cost = 0, arguments = [0, 0] }\nret = { cost = 0, arguments = [0, 0] }\ncreate = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer = { cost = 0, arguments = [0, 0, 0] }\nenv_balance = { cost = 0, arguments = [0, 0, 0, 0] }\nupgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\ncall = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\nprint = { cost = 0, arguments = [0, 0] }\nemit = { cost = 0, arguments = [0, 0, 0, 0] }\nenv_info = { cost = 0, arguments = [0, 0] }\n\n[wasm.messages_limits]\nmax_topic_name_size = 256\nmax_topics_per_contract = 128\nmax_message_size = 1_024\n\n[system_costs]\n# Penalty charge for calling invalid entry point in a system contract.\nno_such_entrypoint = 2_500_000_000\n\n[system_costs.auction_costs]\nget_era_validators = 2_500_000_000\nread_seigniorage_recipients = 5_000_000_000\nadd_bid = 2_500_000_000\nwithdraw_bid = 2_500_000_000\ndelegate = 2_500_000_000\nundelegate = 2_500_000_000\nrun_auction = 2_500_000_000\nslash = 2_500_000_000\ndistribute = 2_500_000_000\nwithdraw_delegator_reward = 5_000_000_000\nwithdraw_validator_reward = 5_000_000_000\nread_era_id = 2_500_000_000\nactivate_bid = 2_500_000_000\nredelegate = 2_500_000_000\nchange_bid_public_key = 5_000_000_000\nadd_reservations = 2_500_000_000\ncancel_reservations = 2_500_000_000\n\n[system_costs.mint_costs]\nmint = 2_500_000_000\nreduce_total_supply = 2_500_000_000\ncreate = 2_500_000_000\nbalance = 100_000_000\nburn = 100_000_000\ntransfer = 100_000_000\nread_base_round_reward = 2_500_000_000\nmint_into_existing_purse = 2_500_000_000\n\n[system_costs.handle_payment_costs]\nget_payment_purse = 10_000\nset_refund_purse = 10_000\nget_refund_purse = 10_000\nfinalize_payment = 2_500_000_000\n\n[system_costs.standard_payment_costs]\npay = 10_000\n\n[vacancy]\n# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network.\n#\n# The network starts with a current_gas_price of min_gas_price.\n#\n# Each block has multiple limits (bytes, transactions, transfers, gas, etc.)\n# The utilization for a block is determined by the highest percentage utilization of each these limits.\n#\n# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here)\n#     19 transactons -> 19/20 or 95%\n#     600 transfers -> 600/650 or 92.3%\n#     resulting block utilization is 95\n#\n# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is\n# adjusted with the following:\n#\n# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price.\n# If utilization falls between the thresholds, current_gas_price is not changed.\n# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price.\n#\n# The cost charged for the transaction is simply the gas_used * current_gas_price.\nupper_threshold = 90\nlower_threshold = 50\nmax_gas_price = 3\nmin_gas_price = 1\n"
  },
  {
    "path": "resources/local/config.toml",
    "content": "# ================================\n# Configuration options for a node\n# ================================\n[node]\n\n# If set, use this hash as a trust anchor when joining an existing network.\n#trusted_hash = 'HEX-FORMATTED BLOCK HASH'\n\n# Historical sync behavior for this node. Options are:\n#  'ttl'      (node will attempt to acquire all block data to comply with time to live enforcement)\n#  'genesis'  (node will attempt to acquire all block data back to genesis)\n#  'nosync'   (node will only acquire blocks moving forward)\n#  'isolated' (node will initialize without peers and will not accept peers)\n#  'completeblock' (node will acquire complete block and shutdown)\n# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`.\n#       it is recommended for dedicated validator nodes to be in ttl mode to increase\n#       their ability to maintain maximal uptime...if a long-running genesis validator\n#       goes offline and comes back up while in genesis mode, it must backfill\n#       any gaps in its block awareness before resuming validation.\n#\n#       it is recommended for reporting non-validator nodes to be in genesis mode to\n#       enable support for queries at any block height.\n#\n#       it is recommended for non-validator working nodes (for dapp support, etc) to run in\n#       ttl or nosync mode (depending upon their specific data requirements).\n#\n#       thus for instance a node backing a block explorer would prefer genesis mode,\n#       while a node backing a dapp interested in very recent activity would prefer to run in nosync mode,\n#       and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode.\n# note: as time goes on, the time to sync back to genesis takes progressively longer.\n# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting\n#       (it is currently ~18 hours by default on production and production-like networks but subject to change).\n# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating\n#        in consensus / switching to validate mode. it is primarily for lightweight nodes that are\n#        only interested in recent activity.\n# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to\n#       binary port, rest server, event server, and diagnostic port connections.\nsync_handling = 'genesis'\n\n# Idle time after which the syncing process is considered stalled.\nidle_tolerance = '20 minutes'\n\n# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times.\nmax_attempts = 3\n\n# Default delay for the control events that have no dedicated delay requirements.\ncontrol_logic_default_delay = '1 second'\n\n# Flag which forces the node to resync all of the blocks.\nforce_resync = false\n\n# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all\n# conditions are satisfied.\nshutdown_for_upgrade_timeout = '2 minutes'\n\n# Maximum time a node will wait for an upgrade to commit.\nupgrade_timeout = '3 hours'\n\n# The node detects when it should do a controlled shutdown when it is in a detectably bad state\n# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be\n# allowed to shutdown, and if restarted that node will generally recover gracefully and resume\n# normal operation. However, actively validating nodes have subjective state in memory that is\n# lost on shutdown / restart and must be reacquired from other validating nodes on restart.\n# If all validating nodes shutdown in the middle of an era, social consensus is required to restart\n# the network. As a mitigation for that, the following config can be set to true on some validator\n# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled\n# shutdown events and stay up. This allows them to act as sentinels for the consensus data for\n# other restarting nodes. This config is inert on non-validating nodes.\nprevent_validator_shutdown = false\n\n# =================================\n# Configuration options for logging\n# =================================\n[logging]\n\n# Output format.  Possible values are 'text' or 'json'.\nformat = 'text'\n\n# Colored output.  Has no effect if format = 'json'.\ncolor = false\n\n# Abbreviate module names in text output.  Has no effect if format = 'json'.\nabbreviate_modules = false\n\n\n# ===================================\n# Configuration options for consensus\n# ===================================\n[consensus]\n\n# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign\n# consensus messages.\nsecret_key_path = 'secret_key.pem'\n\n# The maximum number of blocks by which execution is allowed to lag behind finalization.\n# If it is more than that, consensus will pause, and resume once the executor has caught up.\nmax_execution_delay = 3\n\n\n# =======================================\n# Configuration options for Zug consensus\n# =======================================\n[consensus.zug]\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nsync_state_interval = '50 ms'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of\n# echo messages, before they vote to make the round skippable and move on to the next proposer.\nproposal_timeout = '10 seconds'\n\n# The additional proposal delay that is still considered fast enough, in percent. This should\n# take into account variables like empty vs. full blocks, network traffic etc.\n# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one\n# while idle this should be at least 50, meaning that the timeout is 50% longer than\n# necessary for a quorum of recent proposals, approximately.\nproposal_grace_period = 200\n\n# The average number of rounds after which the proposal timeout adapts by a factor of 2.\n# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve.\nproposal_timeout_inertia = 10\n\n# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp\n# lies in the future by more than that are rejected.\nclock_tolerance = '1 second'\n\n\n# ===========================================\n# Configuration options for Highway consensus\n# ===========================================\n[consensus.highway]\n\n# The duration for which incoming vertices with missing dependencies should be kept in a queue.\npending_vertex_timeout = '1 minute'\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nrequest_state_interval = '20 seconds'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '15 seconds'\n\n# Log the synchronizer state periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_synchronizer_interval = '5 seconds'\n\n# Log the size of every incoming and outgoing serialized unit.\nlog_unit_sizes = false\n\n# The maximum number of peers we request the same vertex from in parallel.\nmax_requests_for_vertex = 5\n\n# The maximum number of dependencies we request per validator in a batch.\n# Limits requests per validator in panorama - in order to get a total number of\n# requests, multiply by # of validators.\nmax_request_batch_size = 20\n\n[consensus.highway.round_success_meter]\n# The number of most recent rounds we will be keeping track of.\nnum_rounds_to_consider = 40\n\n# The number of successful rounds that triggers us to slow down: With this many or fewer\n# successes per `num_rounds_to_consider`, we increase our round length.\nnum_rounds_slowdown = 10\n\n# The number of successful rounds that triggers us to speed up: With this many or more successes\n# per `num_rounds_to_consider`, we decrease our round length.\nnum_rounds_speedup = 32\n\n# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if\n# we have few enough failures.\nacceleration_parameter = 40\n\n# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which\n# we will use for looking for a summit in order to determine a proposal's finality.\n# The required quorum in a summit we will look for to check if a round was successful is\n# determined by this FTT.\nacceleration_ftt = [1, 100]\n\n\n# ====================================\n# Configuration options for networking\n# ====================================\n[network]\n\n# The public address of the node.\n#\n# It must be publicly available in order to allow peers to connect to this node.\n# If the port is set to 0, the actual bound port will be substituted.\npublic_address = '127.0.0.1:0'\n\n# Address to bind to for listening.\n# If port is set to 0, a random port will be used.\nbind_address = '0.0.0.0:34553'\n\n# Addresses to connect to in order to join the network.\n#\n# If not set, this node will not be able to attempt to connect to the network.  Instead it will\n# depend upon peers connecting to it.  This is normally only useful for the first node of the\n# network.\n#\n# Multiple addresses can be given and the node will attempt to connect to each, requiring at least\n# one connection.\nknown_addresses = ['127.0.0.1:34553']\n\n# Minimum number of fully-connected peers to consider network component initialized.\nmin_peers_for_initialization = 3\n\n# The interval between each fresh round of gossiping the node's public address.\ngossip_interval = '30 seconds'\n\n# Initial delay for starting address gossipping after the network starts. This should be slightly\n# more than the expected time required for initial connections to complete.\ninitial_gossip_delay = '5 seconds'\n\n# How long a connection is allowed to be stuck as pending before it is abandoned.\nmax_addr_pending_time = '1 minute'\n\n# Maximum time allowed for a connection handshake between two nodes to be completed. Connections\n# exceeding this threshold are considered unlikely to be healthy or even malicious and thus\n# terminated.\nhandshake_timeout = '20 seconds'\n\n# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional\n# connections will be rejected. A value of `0` means unlimited.\nmax_incoming_peer_connections = 3\n\n# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers.\n# A value of `0` means unlimited.\nmax_outgoing_byte_rate_non_validators = 0\n\n# The maximum allowed total impact of requests from non-validating peers per second answered.\n# A value of `0` means unlimited.\nmax_incoming_message_rate_non_validators = 0\n\n# Maximum number of requests for data from a single peer that are allowed be buffered. A value of\n# `0` means unlimited.\nmax_in_flight_demands = 50\n\n# Version threshold to enable tarpit for.\n#\n# When set to a version (the value may be `null` to disable the feature), any peer that reports a\n# protocol version equal or below the threshold will be rejected only after holding open the\n# connection for a specific (`tarpit_duration`) amount of time.\n#\n# This option makes most sense to enable on known nodes with addresses where legacy nodes that are\n# still in operation are connecting to, as these older versions will only attempt to reconnect to\n# other nodes once they have exhausted their set of known nodes.\ntarpit_version_threshold = '1.2.1'\n\n# How long to hold connections to trapped legacy nodes.\ntarpit_duration = '10 minutes'\n\n# The probability [0.0, 1.0] of this node trapping a legacy node.\n#\n# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a\n# single known node to hold open a connection to prevent the node from reconnecting. This should be\n# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of\n# legacy nodes running this software.\ntarpit_chance = 0.2\n\n# Minimum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_min_duration = '2 minutes'\n\n# Maximum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_max_duration = '10 minutes'\n\n# Identity of a node\n#\n# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate.\n# This option makes sense for some private chains where for security reasons joining new nodes is restricted.\n# [network.identity]\n# tls_certificate = \"local_node_cert.pem\"\n# secret_key = \"local_node.pem\"\n# ca_certificate = \"ca_cert.pem\"\n\n# Weights for impact estimation of incoming messages, used in combination with\n# `max_incoming_message_rate_non_validators`.\n#\n# Any weight set to 0 means that the category of traffic is exempt from throttling.\n[network.estimator_weights]\nconsensus = 0\nblock_gossip = 1\ntransaction_gossip = 0\nfinality_signature_gossip = 1\naddress_gossip = 0\nfinality_signature_broadcasts = 0\ntransaction_requests = 1\ntransaction_responses = 0\nlegacy_deploy_requests = 1\nlegacy_deploy_responses = 0\nblock_requests = 1\nblock_responses = 0\nblock_header_requests = 1\nblock_header_responses = 0\ntrie_requests = 1\ntrie_responses = 0\nfinality_signature_requests = 1\nfinality_signature_responses = 0\nsync_leap_requests = 1\nsync_leap_responses = 0\napprovals_hashes_requests = 1\napprovals_hashes_responses = 0\nexecution_results_requests = 1\nexecution_results_responses = 0\n\n# ==================================================\n# Configuration options for the BinaryPort server\n# ==================================================\n[binary_port_server]\n\n# Flag which enables the BinaryPort server.\nenable_server = true\n\n# Listening address for BinaryPort server.\naddress = '0.0.0.0:7779'\n\n# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\n# We enable it for NCTL testing since we need deeper inspection for the network in tests.\nallow_request_get_all_values = true\n\n# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_trie = false\n\n# Flag that enables the `TrySpeculativeExec` request. Disabled by default.\nallow_request_speculative_exec = false\n\n# Maximum size of a message in bytes.\nmax_message_size_bytes = 4_194_304\n\n# Maximum number of connections to the server.\nmax_connections = 5\n\n# The global max rate of requests (per second) before they are limited.\n# The implementation uses a sliding window algorithm.\nqps_limit = 110\n\n# Initial time given to a connection before it expires\ninitial_connection_lifetime = '10 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n# [`Command::Get(GetRequest::Record)`] is sent to the node\nget_record_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Information)`] is sent to the node\nget_information_request_termination_delay = '5 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::State)`] is sent to the node\nget_state_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Trie)`] is sent to the node\nget_trie_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TryAcceptTransaction`] is sent to the node\naccept_transaction_request_termination_delay = '24 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TrySpeculativeExec`] is sent to the node\nspeculative_exec_request_termination_delay = '0 seconds'\n\n# ==============================================\n# Configuration options for the REST HTTP server\n# ==============================================\n[rest_server]\n\n# Flag which enables the REST HTTP server.\nenable_server = true\n\n# Listening address for REST HTTP server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the REST HTTP server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:8888'\n\n# The global max rate of requests (per second) before they are limited.\n# Request will be delayed to the next 1 second bucket once limited.\nqps_limit = 100\n\n# Specifies which origin will be reported as allowed by REST server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n\n# ==========================================================\n# Configuration options for the SSE HTTP event stream server\n# ==========================================================\n[event_stream_server]\n\n# Flag which enables the SSE HTTP event stream server.\nenable_server = true\n\n# Listening address for SSE HTTP event stream server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:9999'\n\n# The number of event stream events to buffer.\nevent_stream_buffer_length = 5000\n\n# The maximum number of subscribers across all event streams the server will permit at any one time.\nmax_concurrent_subscribers = 100\n\n# Specifies which origin will be reported as allowed by event stream server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n# ===============================================\n# Configuration options for the storage component\n# ===============================================\n[storage]\n\n# Path (absolute, or relative to this config.toml) to the folder where any files created\n# or read by the storage component will exist. A subfolder named with the network name will be\n# automatically created and used for the storage component files.\n#\n# If the folder doesn't exist, it and any required parents will be created.\n#\n# If unset, the path must be supplied as an argument via the CLI.\npath = '../node-storage'\n\n# Maximum size of the database to use for the block store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 483_183_820_800 == 18 GiB.\nmax_block_store_size = 19_327_352_832\n\n# Maximum size of the database to use for the deploy store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 12 GiB.\nmax_deploy_store_size = 12_884_901_888\n\n# Maximum size of the database to use for the deploy metadata.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 12 GiB.\nmax_deploy_metadata_store_size = 12_884_901_888\n\n# Maximum size of the database to use for the state snapshots.\n#\n# The size should be a multiple of the OS page size.\n#\n# 10_737_418_240 == 10 GiB.\nmax_state_store_size = 10_737_418_240\n\n# Memory deduplication.\n#\n# If enabled, nodes will attempt to share loaded objects if possible.\nenable_mem_deduplication = true\n\n# Memory duplication garbage collection.\n#\n# Sets the frequency how often the memory pool cache is swept for free references.\n# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept.\nmem_pool_prune_interval = 4096\n\n\n# ===================================\n# Configuration options for gossiping\n# ===================================\n[gossip]\n\n# Target number of peers to infect with a given piece of data.\ninfection_target = 3\n\n# The saturation limit as a percentage, with a maximum value of 99.  Used as a termination\n# condition.\n#\n# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't\n# manage to newly infect 3 peers.  We will stop gossiping once we know of more than 15 holders\n# excluding us since 80% saturation would imply 3 new infections in 15 peers.\nsaturation_limit_percent = 80\n\n# The maximum duration for which to keep finished entries.\n#\n# The longer they are retained, the lower the likelihood of re-gossiping a piece of data.  However,\n# the longer they are retained, the larger the list of finished entries can grow.\nfinished_entry_duration = '1 minute'\n\n# The timeout duration for a single gossip request, i.e. for a single gossip message\n# sent from this node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\ngossip_request_timeout = '10 seconds'\n\n# The timeout duration for retrieving the remaining part(s) of newly-discovered data\n# from a peer which gossiped information about that data to this node.\nget_remainder_timeout = '5 seconds'\n\n# The timeout duration for a newly-received, gossiped item to be validated and stored by another\n# component before the gossiper abandons waiting to gossip the item onwards.\nvalidate_and_store_timeout = '1 minute'\n\n\n# ===============================================\n# Configuration options for the block accumulator\n# ===============================================\n[block_accumulator]\n\n# Block height difference threshold for starting to execute the blocks.\nattempt_execution_threshold = 3\n\n# Accepted time interval for inactivity in block accumulator.\ndead_air_interval = '3 minutes'\n\n# Time after which the block acceptors are considered old and can be purged.\npurge_interval = '5 minutes'\n\n\n# ================================================\n# Configuration options for the block synchronizer\n# ================================================\n[block_synchronizer]\n\n# Maximum number of fetch-trie tasks to run in parallel during block synchronization.\nmax_parallel_trie_fetches = 5000\n\n# Time interval for the node to ask for refreshed peers.\npeer_refresh_interval = '90 seconds'\n\n# Time interval for the node to check what the block synchronizer needs to acquire next.\nneed_next_interval = '1 second'\n\n# Time interval for recurring disconnection of dishonest peers.\ndisconnect_dishonest_peers_interval = '10 seconds'\n\n# Time interval for resetting the latch in block builders.\nlatch_reset_interval = '5 seconds'\n\n\n# =============================================\n# Configuration options for the block validator\n# =============================================\n[block_validator]\n\n# Maximum number of completed entries to retain.\n#\n# A higher value can avoid creating needless validation work on an already-validated proposed\n# block, but comes at the cost of increased memory consumption.\nmax_completed_entries = 3\n\n\n# ==================================\n# Configuration options for fetchers\n# ==================================\n[fetcher]\n\n# The timeout duration for a single fetcher request, i.e. for a single fetcher message\n# sent from this node to another node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\nget_from_peer_timeout = '10 seconds'\n\n\n# ========================================================\n# Configuration options for the contract runtime component\n# ========================================================\n[contract_runtime]\n\n# Optional maximum size of the database to use for the global state store.\n#\n# If unset, defaults to 805,306,368,000 == 750 GiB.\n#\n# The size should be a multiple of the OS page size.\nmax_global_state_size = 32_212_254_720\n\n# Optional depth limit to use for global state queries.\n#\n# If unset, defaults to 5.\nmax_query_depth = 5\n\n# Enable manual synchronizing to disk.\n#\n# If unset, defaults to true.\nenable_manual_sync = true\n\n\n# ==================================================\n# Configuration options for the transaction acceptor\n# ==================================================\n[transaction_acceptor]\n\n# The leeway allowed when considering whether a transaction is future-dated or not.\n#\n# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the\n# future are still acceptable.\n#\n# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting\n# `transaction.max_timestamp_leeway`.\ntimestamp_leeway = '2 seconds'\n\n\n# ===========================================\n# Configuration options for the transaction buffer\n# ===========================================\n[transaction_buffer]\n\n# The interval of checking for expired transactions.\nexpiry_check_interval = '1 minute'\n\n\n# ==============================================\n# Configuration options for the diagnostics port\n# ==============================================\n[diagnostics_port]\n\n# If set, the diagnostics port will be available on a UNIX socket.\nenabled = true\n\n# Filename for the UNIX domain socket the diagnostics port listens on.\nsocket_path = \"debug.socket\"\n\n# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the\n# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`,\n# which allows for group access as well.\nsocket_umask = 0o077\n\n\n# =============================================\n# Configuration options for the upgrade watcher\n# =============================================\n[upgrade_watcher]\n\n# How often to scan file system for available upgrades.\nupgrade_check_interval = '30 seconds'\n"
  },
  {
    "path": "resources/local/secret_keys/faucet.pem",
    "content": "-----BEGIN PRIVATE KEY-----\nMC4CAQAwBQYDK2VwBCIEIEwqVauHk/MivDPfsVHPCO5keI6g13BRmHeh91xsIQSY\n-----END PRIVATE KEY-----"
  },
  {
    "path": "resources/local/secret_keys/node-1.pem",
    "content": "-----BEGIN PRIVATE KEY-----\nMC4CAQAwBQYDK2VwBCIEIL2b95VeBZG0BH+7E8pCw0fjmvHXtE/7+koPslqMODH1\n-----END PRIVATE KEY-----"
  },
  {
    "path": "resources/local/secret_keys/node-2.pem",
    "content": "-----BEGIN PRIVATE KEY-----\nMC4CAQAwBQYDK2VwBCIEIGM3jj4V7R5AZr+K2qd4Q4h44PQ8yNu2x8FoPVM9eot7\n-----END PRIVATE KEY-----"
  },
  {
    "path": "resources/local/secret_keys/node-3.pem",
    "content": "-----BEGIN PRIVATE KEY-----\nMC4CAQAwBQYDK2VwBCIEIP0wiXI+KquMZaOituFUqPRTyXJ97KBd7AvUnYDbsGF9\n-----END PRIVATE KEY-----"
  },
  {
    "path": "resources/local/secret_keys/node-4.pem",
    "content": "-----BEGIN PRIVATE KEY-----\nMC4CAQAwBQYDK2VwBCIEIF+8cSb0U9H2riT7WRwQBLigcNrZAn4iQ2nU+T/ButI6\n-----END PRIVATE KEY-----"
  },
  {
    "path": "resources/local/secret_keys/node-5.pem",
    "content": "-----BEGIN EC PRIVATE KEY-----\nMC4CAQEEINWsatn4MCCpRt6V6ESI+upHhqulRNrJpemnV9FF7vfDoAcGBSuBBAAK\n-----END EC PRIVATE KEY-----"
  },
  {
    "path": "resources/mainnet/accounts.toml",
    "content": "[[accounts]]\npublic_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"835521000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"010a78eef78966a56a633c665411388f97f850609960d5d898f3992272b8d4bcca\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"118750000000000\"\ndelegation_rate = 9\n[[accounts]]\npublic_key = \"011907bc6fa90ccb8cacc150170b887b3dd97a96fc029b515365141d1e5d4f7983\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"836069000000000\"\ndelegation_rate = 9\n[[accounts]]\npublic_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"012f5501236a3bd82d22ee7675e78174ee723565495fd0f43be8a011bfa6494687\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01a849b574b01c775754db774f7243c4eae01f8919ba3d5c624282485af07b18ae\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01e61c8b8227afd8f7d4daece145546aa6775cf1c4ebfb6f3f56c18df558aed72d\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01c2de71b99085b38f3ba0dece0c1e128de7fe69526beb66951b99f5c7272c3980\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01c5594d384a35a7520f00643c054bc5f574d04fa88d73fb215614fea35178d9b2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"017f7380736731efa408c323b38a13541be1c99a4aefb0d5b59c6a4aecac494105\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01a6901408eda702a653805f50060bfe00d5e962747ee7133df64bd7bab50b4643\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1840864000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"018f84c6fc037284f189cc8cb49f89212ff434a5eb050e48cdd164ff3890fbff69\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 15\n[[accounts]]\npublic_key = \"013eb6775484f25ac4e126e93a350ef2bc259385da5141d331829af7f755b03844\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01faec72d138026edbd470d9ba1f6a05c5fabaa98da8bb41c8c92041d2f58337d2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 30\n[[accounts]]\npublic_key = \"01269c84a9153623fc47288f5a2b1bd681fc7d01b1f6144626583a56d3ba8f7c09\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"01ad71484b09749a49463d3af2a8a3addd71509a92f447e9b06cbddbf60b45cbf7\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"011117189c666f81c5160cd610ee383dc9b2d0361f004934754d39752eedc64957\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 20\n[[accounts]]\npublic_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1014561000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01219550874603647a55b98844a7e41accc5c3076b6c1fbb30002e7f6031524fa2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"01c7de2f333695c4db43cce8c555f74540176071a9eeb8438eb335634b9729ee6b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"970728000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"012dc55b77b2a9faf75dbaed15775ddfc48e60c4596608318c8b96b1900bdf1d5f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01d2212cbecf229cd13deb7d2b650ed72cc109763398d95aa7743a1559e7eb4781\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"014ec8bfba46dbd592c0b44dd356abffb203330ddeced26c7522656b9bff85e7bc\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"20745205000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01b1c48c69eeb5339ae43be062dee21c46d61346b9ee1f83d4e924000833c5a3e4\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1689877000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"017b9a85b657e0a8c2e01bf2d80b6b2e6f8d8b4bc6d7c479f21e59dceea761710b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01eedfd20f75528c50aae557d15dff5ca6379ca8401bceb8e969cd0cb1ea52ec7f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"017d940644deeb8eea3d94f62cfa65a9bea4eb2e8ec9433104bb145d73fd39e98f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0163e03c3aa2b383f9d1b2f7c69498d339dcd1061059792ce51afda49135ff7876\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"015dfd4b3f997b1eb1a7292eb501845931b8aa9869988a5caa2be79ac4f5ff8a21\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0113442fd0dc052634fb42943e8ba095a404ea12cded84fbe4b1536ded94dab10f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"666667000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"014c2573f59c70775a41d9cb69b9e3146e6d51567d33a141423b5bf371967e5902\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0190c434129ecbaeb34d33185ab6bf97c3c493fc50121a56a9ed8c4c52855b5ac1\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"014b466f5c6c87bb1d2566d166120e320a724231374cd0775e0e347afed70a4745\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"015da78bc6315643fbcf57d87a50853c67d8b271b615f38f6538f78a844cc4501d\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"0142093cad632cc989b3e9952a4e571cab44d90a7bf9e3badd0d91c2dc2ead332a\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1278629000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01ae2af99944b0b9f92fccf425aa51894ebbad0f4e8e42c66a71dcb999a3bd94ed\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1912869000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01aa2976834459371b1cf7f476873dd091a0e364bd18abed8e77659b83fd892084\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0202BB1CDBEC959D918875610AECE2CD043ED8F1ED42461215812C5328592C7F4F58\"\nbalance = \"600000000000000000\"\n\n[[accounts]]\npublic_key = \"02036210C108F7E819A38AE05CC7B1DC39A2E6366F1404CE4C12E9C05B3832D811BB\"\nbalance = \"800000000000000000\"\n\n[[accounts]]\npublic_key = \"02035C47CCBEAA32040D6904B6DC163C3B546314C52B2A78583835F54A224AB365A4\"\nbalance = \"1000000000000000000\"\n\n[[accounts]]\npublic_key = \"0203A5C03D9F7D6885276D0FAA25D412ECC1369F6573856C321D99CAFC0DAC2A8C12\"\nbalance = \"1100000000000000000\"\n\n[[accounts]]\npublic_key = \"02024C5E3BA7B1DA49CDA950319AEC914CD3C720FBEC3DCF25AA4ADD631E28F70AA9\"\nbalance = \"2964329384000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01eb9c059b2624f475bb0726d8b2e382b025818dce3ec1f5ecf7f80ca708b6440e\"\nbalance = \"0\"\nvalidator_public_key = \"0190c434129ecbaeb34d33185ab6bf97c3c493fc50121a56a9ed8c4c52855b5ac1\"\ndelegated_amount = \"1000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ebb0e23660d71a0fcd19ac6d96b9c0a95fa9081465667cb6e6626580e5804a5b\"\nbalance = \"0\"\nvalidator_public_key = \"014ec8bfba46dbd592c0b44dd356abffb203330ddeced26c7522656b9bff85e7bc\"\ndelegated_amount = \"200904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016b1ed3c0bbc59e853802a8fdf935df5cf68c775df2a4f5efba4fed634fa9884e\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"325500000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ff815bba34957e2a1a575be558ae7bdabd709e6104a1a12f77b3f348e5978f89\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"331562000000000\"\n\n[[delegators]]\ndelegator_public_key = \"018639e0a75d9abcce90c643e240b8c9738ff52ea34429c3fdbe7a2660078b57ef\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f04b56bcb8c4c4fcd3700f74a46d679d947ea1294887b6555a649291df827a0\"\nbalance = \"0\"\nvalidator_public_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\ndelegated_amount = \"333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0167f6eb1ab61c9432aba0db31462b3f23dccd0dbf25b9d2f6d686286b354a4a4f\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"374280000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d571ce1c05f491ed8201196af1852ec70d3f56e71b494865e37994e20112d8e6\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"520622000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012dd5b64d805a9811ea8653bf5aa51c4bcde5b95656127bd0056f9653ac7ce0ad\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b6d6b0f8483a7a870f148cfcf4f9fbde80103f771e1576b1cba17368f2d187fb\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01fe2573caf5a3344c85f1ac2fb81bd6eaad225e160014c2a45aded7c7784940c8\"\nbalance = \"0\"\nvalidator_public_key = \"012dc55b77b2a9faf75dbaed15775ddfc48e60c4596608318c8b96b1900bdf1d5f\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019893aaea11a3f0f2ee2963741f1dc6414e5af05f707d008e40f86288895e975c\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01777ac4c7b5cd6697fb0e3163258db9bf05c25b8b5214c2b475f1015e60dcb148\"\nbalance = \"0\"\nvalidator_public_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\ndelegated_amount = \"592166000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01bbc0bcfb61b479c557d9e478fd590ec7aaf29807c9583884b31cb7565f94b445\"\nbalance = \"0\"\nvalidator_public_key = \"01ad71484b09749a49463d3af2a8a3addd71509a92f447e9b06cbddbf60b45cbf7\"\ndelegated_amount = \"601324000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f67c12f92e8776ff20800af0c7836692540cc6dacad6aea4cf61e0cc8191e467\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"689764000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0122f0609630cf52066b0992bf460881224f3fa26015e859d378e86fef63285316\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"702736000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01372cbceb4f20bfaa2955e44b337d98e5b1d52bd2bce7766443e23359b1204c06\"\nbalance = \"0\"\nvalidator_public_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\ndelegated_amount = \"823609817600000\"\n\n[[delegators]]\ndelegator_public_key = \"012945e0cbac47660dd84d89d30fad046c5a41682fae34f15333c7b0256a9747ff\"\nbalance = \"0\"\nvalidator_public_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\ndelegated_amount = \"836069000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013234c7da09119de9460f9b699b1a857730fe3ae80bef24d91910cea667b4e6b9\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"936995290800000\"\n\n[[delegators]]\ndelegator_public_key = \"0138320bc703eccf17e8129505cf1318709a2d36ff55a9e37114ca7ece8a1d52b7\"\nbalance = \"0\"\nvalidator_public_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\ndelegated_amount = \"941168989200000\"\n\n[[delegators]]\ndelegator_public_key = \"019b51d1b374644628d7741f6c0980654fbdace31208268b9db736d2f5e42cc83a\"\nbalance = \"0\"\nvalidator_public_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\ndelegated_amount = \"1026248000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0168be9a3ecd4ec3fc420705a810824f10e95a8156017f32ed1a0d3cecf6c8e29d\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"1064988708400000\"\n\n[[delegators]]\ndelegator_public_key = \"010e7b14719461b8fdc67c9c4770acd4f1b1249ce18d758cc951708c2400fa721e\"\nbalance = \"0\"\nvalidator_public_key = \"017f7380736731efa408c323b38a13541be1c99a4aefb0d5b59c6a4aecac494105\"\ndelegated_amount = \"1077931000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b04bbff56d65dc268e35bfe2932d4a3c365a4c584b0b966f46a9c297d940da04\"\nbalance = \"0\"\nvalidator_public_key = \"01269c84a9153623fc47288f5a2b1bd681fc7d01b1f6144626583a56d3ba8f7c09\"\ndelegated_amount = \"1162909000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0189cf6a5362829ee43e8e4d9510f386288e8a90d6e08baa0615b65614d398dafb\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"1196729000000000\"\n\n[[delegators]]\ndelegator_public_key = \"014cfe52b8cd6ed8fc48cec019cb081b4f15cf1ff26f5c282c601a6e4332f3c27d\"\nbalance = \"0\"\nvalidator_public_key = \"01219550874603647a55b98844a7e41accc5c3076b6c1fbb30002e7f6031524fa2\"\ndelegated_amount = \"1410054000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02038F968DFE84882E08330C961D4AAA60BAA4D58D512DE0CCBD83EB365E34ACAF5B\"\nbalance = \"0\"\nvalidator_public_key = \"01e61c8b8227afd8f7d4daece145546aa6775cf1c4ebfb6f3f56c18df558aed72d\"\ndelegated_amount = \"1473322000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0122cb4d0d9fdc1a3a984913832b5320ba56203cf8bbca0f2f020f1dbc400ef048\"\nbalance = \"0\"\nvalidator_public_key = \"01d2212cbecf229cd13deb7d2b650ed72cc109763398d95aa7743a1559e7eb4781\"\ndelegated_amount = \"1497566000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0113838fd13ecb46a06276780100f3643bcaf3abb7d5ad5aa1c7e48aea9470eb05\"\nbalance = \"0\"\nvalidator_public_key = \"013eb6775484f25ac4e126e93a350ef2bc259385da5141d331829af7f755b03844\"\ndelegated_amount = \"1621310000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b4e17cd2556abb6bae2dc67fc8fbe3dda52451fc300308c2ef6d246569eb6a20\"\nbalance = \"0\"\nvalidator_public_key = \"01c2de71b99085b38f3ba0dece0c1e128de7fe69526beb66951b99f5c7272c3980\"\ndelegated_amount = \"1768864000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012034668d7a5844f4fce1c3672c8d54daa81adb6d230a4aeeccaa8b369f0178fc\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"1780596000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b243a9ab89f5a2332fc505942f94f0ca26306fe162720dd6c32072f71d0798d2\"\nbalance = \"0\"\nvalidator_public_key = \"017f7380736731efa408c323b38a13541be1c99a4aefb0d5b59c6a4aecac494105\"\ndelegated_amount = \"1833864000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019ecde420395f4a9b70be2d4db0ab914682e04705cc2d7d3d73d958c7e69bfff8\"\nbalance = \"0\"\nvalidator_public_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\ndelegated_amount = \"1866864000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0123bf1bd107d82bb42f4a92fc1ce19251f341802e1ecfa6ed88a3c7bbbdc302d1\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"1925841000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012ff1f06f2125cbdaa5d0baa8bdd1a1073972d623968d600feb11435a672f4ab0\"\nbalance = \"0\"\nvalidator_public_key = \"01a849b574b01c775754db774f7243c4eae01f8919ba3d5c624282485af07b18ae\"\ndelegated_amount = \"2100764000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010cba0e742dc4406477b3921a3f51d0a73d897be13b8236f4401e11f42dc678f4\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f0b5e0d025c16f39e35ed32e885c9f7f0a23d021d6909c333911eeb2311d190\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"01e838a3cf194a4eea5b2396e201595388625b324af017788bb51302b1da27a810\"\nbalance = \"0\"\nvalidator_public_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0131ba554e54c35fb9992f6ebdb290db6087264ad7d0687edb19b6dd998fc2e932\"\nbalance = \"0\"\nvalidator_public_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f583e3dfda41ccbba0c51e12ef45eaf1bfd7d776d5cc482196a808ffe89da52f\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"2363562000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01128ddb51119f1df535cf3a763996344ab0cc79038faaee0aaaf098a078031ce6\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"2522693000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013bf164b73b877e52118101751feb2b7b129ad159e91a4c00360c990594e9b81f\"\nbalance = \"0\"\nvalidator_public_key = \"01eedfd20f75528c50aae557d15dff5ca6379ca8401bceb8e969cd0cb1ea52ec7f\"\ndelegated_amount = \"2541914000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016a0f6e2a18688f89b4262d048d57bce3df7aad70c1f5fae7be7daee02cb0edb1\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"2600000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01c90604bec2e48eae8e86fdebecd9cc3502bf6944fcf1ec08f071e3985e063430\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"2780822000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0130e7a05a136b5fb5c05ec3d78ab4fd872240ddb2b3d8c4448fab48d45b27efa9\"\nbalance = \"0\"\nvalidator_public_key = \"01c7de2f333695c4db43cce8c555f74540176071a9eeb8438eb335634b9729ee6b\"\ndelegated_amount = \"3189401194000000\"\n\n[[delegators]]\ndelegator_public_key = \"013cbe8ca475de38ac5ae85af0536308e21160a13986526d235c28e4796d3ddd99\"\nbalance = \"0\"\nvalidator_public_key = \"012dc55b77b2a9faf75dbaed15775ddfc48e60c4596608318c8b96b1900bdf1d5f\"\ndelegated_amount = \"3553406000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01bcd376f4dc285025624279ba8e17673cce6bfd02750ea09c982a337049b3cc9d\"\nbalance = \"0\"\nvalidator_public_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\ndelegated_amount = \"3609904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013932752d32f3b41a9b98f3723553ab0b1fcf54909fa6c47168961e8e6e1bbc41\"\nbalance = \"0\"\nvalidator_public_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\ndelegated_amount = \"3609904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0133671964312ad012f760869601d4897b9f549177d1a98c8f210ee270f6b5fa7b\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d0f3fa56e64675fab02f2245b5e0caa5503c53bc57af7649af5dd8aa36141f44\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"010f228ca392ae12a90c52d64dfbf1fc9fae8b55d4d721ba76452a8fc9166efb40\"\nbalance = \"0\"\nvalidator_public_key = \"014ec8bfba46dbd592c0b44dd356abffb203330ddeced26c7522656b9bff85e7bc\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"01831d114210b0a134a5bc283634b8bb14ff2b4c4a226da3935fa0640686d78c37\"\nbalance = \"0\"\nvalidator_public_key = \"01c7de2f333695c4db43cce8c555f74540176071a9eeb8438eb335634b9729ee6b\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"0122d1a7ba329a32a5a8fe829b58c28ea7d4e60d48323e08c2821b7afeda0f4c32\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a60e0885f4968dd3107e088b4cb5798af665859b769bc1aa86909a5b67f66a66\"\nbalance = \"0\"\nvalidator_public_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\ndelegated_amount = \"3752637000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019470ef3d314f868e7ce50f5461129c3975ed45e3acab96282dbbd170e1b1a440\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"4091774000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b879d9c6c650841e4c74c4ba0f3cfe64d1abd5f5f59e741686199e59e1ecfd96\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"4107852000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d9c9c23bfebaf2f610e3a6d93c751550d4f1cfda2fe8a1d5e8e2f6b0402dad29\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4171233000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01942a994f8bb903cdaa261482285a11acc68ed40a1e5fc65e146e4fe276f8af60\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"4184795000000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f0dc0b5ff839ba1ce85e05190019a442a13c6e5be52721003d9b52e0b73af77\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a68ccf92ac4d9d42b898d4dd8ddbacea4346934f81db54788f539831b8618136\"\nbalance = \"0\"\nvalidator_public_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ad831573d5d5458b1bd68bd08e40ff2369a4db6237e2fed887013da84f2c8b3f\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01dfa7caca2088155d24dc3c2448e2cc9a348ba18663f5317092cde5ec4502105f\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202B974A04E7202FA7EC857E10FB05C90E42D65E3372EE88122DB2CF59092015CA9\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0145a996c9488cca5c9c43e857b896577651170c9f97081e7489c651766b07f0b7\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0159d9fc631ba08168180d4c53496aba704d9a12ec4c0f2d0ee93933064b9dc125\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02024C11C976162D23618E1697449C3B59851411DC46309D5CFD3F3F1E9875EF668F\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d12bf7dc4cdf030cbd145c85211382bace22e53b79c835ba6784a0eff149f7ef\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"4333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0138a4adfe8a3c469ca5990deddda87edf6e46f69c3aa572dc609fc5c2443d9196\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"4333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"01fa0fa7b11fdf42aea514a88e3a2263187d2239cb8ede4bce3456689c023fc393\"\nbalance = \"0\"\nvalidator_public_key = \"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8\"\ndelegated_amount = \"4542658000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0115cc7743558b5da74cd37b1212da8bdc075c2499a33e1a394ae2ce3920f05d31\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"4543137000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0192a12fc746753edb35078c241a2570bac512f636d3cb3fee055ad9d4c1ae82b9\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"5872836000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0192dbcad8d815289b2faaf6c31549c3bb02453c16df87810bf02c944f06513f3b\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"6500000100000000\"\n\n[[delegators]]\ndelegator_public_key = \"01119bcbf29e296d53aabb24f412ba77b8c6d522d253ed49fe44b01e013813d462\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"6898630000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203E1AFB65278CA78989411B095CA2CE59F4C3357E9B4C92A198FA304F8C7D4BF11\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"6908904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01e0574869b56bab125ed724490096029d2264dc091f21f6de9beb0b91c204fede\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"6929452000000000\"\n\n[[delegators]]\ndelegator_public_key = \"015bceda8667f573df0db5855339cff300f0ad9cc8f3cc8cc7bed7126759e4bd7c\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"6952055000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01de1d428201c4d37a2efb7b86e949e47dd557733da977299c2cb576fb68bb4bf5\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"6960274000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b8fe7a2e409e49365873f8f25fb4e582b6954fff29cebf23a3ba621b7b94110e\"\nbalance = \"0\"\nvalidator_public_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\ndelegated_amount = \"7219808000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f3fc2d686758ad3692f90806496a3a7a1fa13bcc5df13b3c75c1ef5a15c9a0a8\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"7305479000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016012fd412191dd06ebfdfa40b9d5f61aa028caea6de3d5bb8aef95ce943ece18\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"7703521000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01c4dcd4f6676ea296c013c9d2ce82e78b6eff0617f061d50626748a1d9196dcfb\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"7800000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b7b1b5856758bd59ebb8beebccd6dbf4bc489628032921dd83e54c919b4d0a72\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"7800000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012325dc6969af448e93fe5299571ae6b4f0ad87b0a51df9f34225b242116416d1\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"7800000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b40c8de03d4a9ab171b69ff5d2ad5110ac6a80cbd7238b1725295d9e3973ab62\"\nbalance = \"0\"\nvalidator_public_key = \"014c2573f59c70775a41d9cb69b9e3146e6d51567d33a141423b5bf371967e5902\"\ndelegated_amount = \"7979415000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010a6eb8216afcaa59f9202d8bb12e30caf0c46f6c0da08ced34471d80cdfde650\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"8125479000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013ad9596f78cd673365d01737911b7dd703bd02c566d5b4688716b524c5f9863f\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"8566913000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0106c2df6fd5c11fd54b54cee7160a9230b222654c55ba80dd83e10f316397ed48\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"8586795000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ed392301164a763bbebe7320fff4effbba4f88705f8a409df2246b638cc231d7\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"8666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019426bf1f656fa65c8e1d8b514e788121c4b894cc060f6ce22c84989739356e4c\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"8666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01baa42a20a89b7dab3a6f0232277d1c9d159bdddec18e2d5e67924605159bfd43\"\nbalance = \"0\"\nvalidator_public_key = \"012f5501236a3bd82d22ee7675e78174ee723565495fd0f43be8a011bfa6494687\"\ndelegated_amount = \"8667224000000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f065acdced524ba07b125a3325565b28f702509d3d09bae001cee972a66d53a\"\nbalance = \"0\"\nvalidator_public_key = \"01faec72d138026edbd470d9ba1f6a05c5fabaa98da8bb41c8c92041d2f58337d2\"\ndelegated_amount = \"8667224000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d64c582fa65d96eeda1631a4d02575b9c261603fc39005efdcca4a3d1fe12fda\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"8767679000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0191ccaf4a29012fc8b9eed1375580a17b5cfee696cbc5c91f5426f93beb4d6a85\"\nbalance = \"0\"\nvalidator_public_key = \"017b9a85b657e0a8c2e01bf2d80b6b2e6f8d8b4bc6d7c479f21e59dceea761710b\"\ndelegated_amount = \"9620685000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0147a1349d67565cfb8493a6ac02e27d9ea6c42760b03bca1c8eeb3056a0d75c1a\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"9695479000000000\"\n\n[[delegators]]\ndelegator_public_key = \"017bc476fecb69244f011e63f642936dfb062b1735f4d20c918bbd457bed121927\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"9755323000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a4873a5470e85db27b36de0064e7b4540f7a17dece44812591fd6fd36a424cc9\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"10326370000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01006212e8028fc7b3a6816783d7d6d3bab4763dee3063a127ba7808d245c10897\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"10434247000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202086CC55934061E263B6027B989EB461719443188618AE2AA5AB8CB2126650215\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"11100000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020292C62C906FC3869E861E96342581FEF0A995B04125CAEB41FFEF1D72E8E38F9D\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"13000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020238AF667C991D21BC88E00AF9580C301B184BA97EA59A4D28D4C2FF44FD632756\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"13739726000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203950BA427954BA8F1F12EDE9FB25CE5B7839979E0AA190EF77CAB96F06C51CA84\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"13739726000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202CD2D46B5F4DF1B6FFFAEA82DC2A0E9022DDACAD549C728DC9023721D3CA4C057\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"13768493000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01e47ca5e1c3cb306ea859320e185860ee26edec4409ce8c47ebbfc770c04b52e3\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"13776712500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0136258664950cc030dbdc68e430f6961bef95c6ab2d269dc776ef2c9fa7bbca32\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"13776712500000000\"\n\n[[delegators]]\ndelegator_public_key = \"020232EE968558C70952E472BD4AC47D5830F317E91A00A033B45FBAB2F7B75E0158\"\nbalance = \"0\"\nvalidator_public_key = \"018f84c6fc037284f189cc8cb49f89212ff434a5eb050e48cdd164ff3890fbff69\"\ndelegated_amount = \"13793151000000000\"\n\n[[delegators]]\ndelegator_public_key = \"017a030a8b66381b1c79d265667a4a9a8b4639a1fef9e62828644a37508d70e138\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"13854795000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203C03E5418F35C87C1A90231E6D215012DFD641F665B0BFB229804880A9E1F29E5\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"13863014000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02034ADD5EE30D7C425EE71FA17E2A45CB72479DC35CDDD0A9B9652FFAD95432CF15\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"13912329000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012f912118b53471ed8dfcf01698eeedd0b4a4b31de9566016accfaeb9fc0c5ce1\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"14222260000000000\"\n\n[[delegators]]\ndelegator_public_key = \"017c6bdf6a7557fd8bbf9111acdce3bd563108a6397f4d016c36ef3f818cf3d64b\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"14727850000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01cc9cb4323129c2181782312b98e479cfd96dc45b9c4d2089bd9a60fe319e3278\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"15166666900000000\"\n\n[[delegators]]\ndelegator_public_key = \"012f0507177cb1e490bb677e0432441acca28d6241a6224b71012baa42cf05a6dc\"\nbalance = \"0\"\nvalidator_public_key = \"015dfd4b3f997b1eb1a7292eb501845931b8aa9869988a5caa2be79ac4f5ff8a21\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016151a2c7c6ccab4ddfbb9a96cccb8213d84c4f7d556b99f04e8126825dd8ea2e\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d2ebc5ea92af48f4b90df249a4d7d75f60c6790a5bf1a8cbbbc3f46cbb709d0f\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01cd75340a885776903ffc364d62b550922fe2d8f740cb36e0714ca8238b9811b4\"\nbalance = \"0\"\nvalidator_public_key = \"014c2573f59c70775a41d9cb69b9e3146e6d51567d33a141423b5bf371967e5902\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01583bd62ec1d48d2551869bc152f913c1a65151f50d4364e96713b14156010521\"\nbalance = \"0\"\nvalidator_public_key = \"013eb6775484f25ac4e126e93a350ef2bc259385da5141d331829af7f755b03844\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020237D50DFE66C504A312FBAFFA1D51DEEB5914F6CECFE3A03FC4F836039779B970\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02021650375BE7436B43A819FC422C300EF4A28798B1FCC31D48C4C0E4CEE7428F06\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01356542750dc229efee77e2d2b61cb6b79f01ecf801445df47b538478807d8345\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"20702055000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a451ce6b1705f0e028f8ee24b92d2e0a20d8a6e97c9ea8c604bcf0de3bd2f563\"\nbalance = \"0\"\nvalidator_public_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\ndelegated_amount = \"20720548000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020289E62A85C4569BE688DDC0B5B4FB9FCB7AC3F897DD2A97CD714F8F262E97CB66\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"21462192000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202CE2C3FC6AD9283600EC50B10EE79D84F86E3BAFF1F9AA5A993175E037346ED60\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01454c9053822d7c1d294a5a776ba851b0cdac6a46eae4474be0f2dd661f26932c\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202BE54DF25ACE6ACA0C17E932D04B904BA1968B8256A335C74D23095ED1925CE2C\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202899C2942A8E5C43C5E9792D9BE75A786B04C0BE4BAADB39379E9902897003117\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02034D66B512C22620E400842AFC21B11BCCD07F7076E897E534F4CB90E620FDAC9B\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01798eed4fbf6b0de81c8c73054a21590c625b95306fb60668b36e65cfa1670132\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"26000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012d7449cde960bb5cc61c3ffc6303682eea04d93752a84cc61dfe40ebf995d1eb\"\nbalance = \"0\"\nvalidator_public_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\ndelegated_amount = \"26830137000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203431B8CD5A135CBF15A538D6ACD3C4FC1F38207BE12D7AF609862FCC070B1E674\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"27588419226000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203334DACD640BC716063D57B3F4332623843A87874A32C4BB7C3760DF23D4B57E5\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"27610959000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203A8B1E98C42B4FC4A10D7B983D760D2C33FE8D626F2111AE99CC74777E9FB4363\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"27717808000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202D2239D7F079EA9E9153C61641A895EF984D9AB15BC25591A61EBE5369E9B037C\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"27890411000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02031AAF193C78712F1647D6A787C33D126DD3166461D48F99F9D3B42E240B040B4C\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"30333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203A0FE0B5EF6EE7F63A53D6E13716C0BFAE5DE24AA1FD54A99AA7B2CB5A367A2AB\"\nbalance = \"0\"\nvalidator_public_key = \"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8\"\ndelegated_amount = \"30476344000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0139c59b850e9a8d26b4778918177deebfe1fcfbbcde715901914d84fdc9704e43\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"32164972000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010aeee26a8dcd60bcb69cb00abeb290ee95363aa5e53510b206cfb446f6792413\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"33162132000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01192b1f62f97d6b5edf84fcb113b14b875b0e9cc96d7d054d70098c383e5fbf14\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"34791096000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01045ac0fd84e85c852526fa58447fd36c1054cc67cde05992acf65411bb568d01\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"34791096000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010cdf3c6f2ecf000b7b6adf053d2755d3eabb41a5529aa8c1e6b1f958dc8f0b49\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"36527397000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020218BBAA66CF1B8F7631B5887A1CC1C5F57FCEFD54D8399632EA5D84C029740720\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"37143395000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016dd414ee063ec490d07aee039f7f65d51bf0e80d8f8d9265ad129a2ab56934ae\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"39866667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202B279F0FB3825C3B4B090DF06B5675FA59E6EF29D22C1F7FE1B8CAC8CB7CCFD12\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"39980061000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01431edff6e7748d0e1d7595168e9be134851b391fdbf808660a7cb2e4efc91cf8\"\nbalance = \"0\"\nvalidator_public_key = \"017d940644deeb8eea3d94f62cfa65a9bea4eb2e8ec9433104bb145d73fd39e98f\"\ndelegated_amount = \"41589041000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01c7e757c775939a61c977637d76ec699a4c6c5fc21697f91d67ee9f001df69297\"\nbalance = \"0\"\nvalidator_public_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\ndelegated_amount = \"42723288000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b65597c2a70551546e8c99513a6c6f746fa09ca6d9f43b41a9202272d5f952bf\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"43333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02021903CE3B15FB13B32E8E7AB7F4BEC39F8B341EA6C0C9670FD2C16B0591043F7A\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"43333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202EFD4EE6001163101D035C6652E60C2E5C96129F17A6E4834101EC6E9CD57F002\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"43333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202D30C2BE47CE9AE62E36F5A53225D6590667B6809EA5F6DD53C3083518BAC2A1F\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"49800621774000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a41493ada4d34760f669588c4812d9859334914feec7419d7f612043838a3e7a\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"65000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020284B66997B94DEDB322F34C6A29EC524561CBBF20BF1ECA67024C3E94E7F36ED5\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"65000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a79ba9e6ee83292a586213843d4471e1ca04de505885934696d9bb62b5823d2e\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"65000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020344840C1945419E68E5D30D7549F5C1D667869D4EAD5AA1C740DFE4B697E35369\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"70122374000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f6324ce3c4c82b54a0cea605a11873d7d9a15be82686e39c10c1227418ec8e25\"\nbalance = \"0\"\nvalidator_public_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\ndelegated_amount = \"105020548000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020346A5AE20EACADECF6C9E740C478D5A9FAD50757DA6F122560C1441328E0DED21\"\nbalance = \"0\"\nvalidator_public_key = \"011117189c666f81c5160cd610ee383dc9b2d0361f004934754d39752eedc64957\"\ndelegated_amount = \"111285969000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020391DFD3E9B1673BBCE2EA44471E55D1B168769BA0315104AF43290CEFAA2EEEAC\"\nbalance = \"0\"\nvalidator_public_key = \"011117189c666f81c5160cd610ee383dc9b2d0361f004934754d39752eedc64957\"\ndelegated_amount = \"116026124000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01dba4402d0e87c7d224cbf5d67e50dfefc95d7333050d83af50b3e610a135dcb7\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"150297534000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01846c1a2bba6908a154b2070d973caf2641ff37180d33c1ed6147616d74a5fa0e\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"160793460000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02021E54DB769A70BA0E54D2550D73A31B6168F3054A73CDAD30A3F4172C6E7903A9\"\nbalance = \"0\"\nvalidator_public_key = \"0163e03c3aa2b383f9d1b2f7c69498d339dcd1061059792ce51afda49135ff7876\"\ndelegated_amount = \"250000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203E81558ED17FF382273BC9006E3DA1409A85E149EE4A143FEBBF603088D7FBA1F\"\nbalance = \"0\"\nvalidator_public_key = \"01aa2976834459371b1cf7f476873dd091a0e364bd18abed8e77659b83fd892084\"\ndelegated_amount = \"250000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010b24b2974ac8dd9027a06be487c383732a5e4605d254fb017d9fce388d347e77\"\nbalance = \"0\"\nvalidator_public_key = \"0190c434129ecbaeb34d33185ab6bf97c3c493fc50121a56a9ed8c4c52855b5ac1\"\ndelegated_amount = \"453839640000000000\"\n\n\n"
  },
  {
    "path": "resources/mainnet/chainspec.toml",
    "content": "[protocol]\n# Protocol version.\nversion = '2.2.0'\n# Whether we need to clear latest blocks back to the switch block just before the activation point or not.\nhard_reset = true\n# This protocol version becomes active at this point.\n#\n# If it is a timestamp string, it represents the timestamp for the genesis block.  This is the beginning of era 0.  By\n# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up\n# and running to start the blockchain.  This timestamp is also used in seeding the pseudo-random number generator used\n# in contract-runtime for computing genesis post-state hash.\n#\n# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era.\nactivation_point = 21742\n\n[network]\n# Human readable name for convenience; the genesis_hash is the true identifier.  The name influences the genesis hash by\n# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis\n# post-state hash.\nname = 'casper'\n# The maximum size of an acceptable networking message in bytes.  Any message larger than this will\n# be rejected at the networking level.\nmaximum_net_message_size = 25_165_824\n\n[core]\n# Era duration.\nera_duration = '120 minutes'\n# Minimum number of blocks per era.  An era will take longer than `era_duration` if that is necessary to reach the\n# minimum height.\nminimum_era_height = 100\n# Minimum difference between a block's and its child's timestamp.\nminimum_block_time = '8000 ms'\n# Number of slots available in validator auction.\nvalidator_slots = 100\n# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer.\n# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as\n# finalized: A higher value F makes it safer to rely on finalized blocks.  It also makes it more difficult to finalize\n# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly.\nfinality_threshold_fraction = [1, 3]\n# Protocol version from which nodes are required to hold strict finality signatures.\nstart_protocol_version_with_strict_finality_signatures_required = '1.5.0'\n# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'.\n# Used to determine finality sufficiency for new joiners syncing blocks created\n# in a protocol version before\n# `start_protocol_version_with_strict_finality_signatures_required`.\nlegacy_required_finality = 'Any'\n# Number of eras before an auction actually defines the set of validators.  If you bond with a sufficient bid in era N,\n# you will be a validator in era N + auction_delay + 1.\nauction_delay = 1\n# The period after genesis during which a genesis validator's bid is locked.\nlocked_funds_period = '0 days'\n# The period in which genesis validator's bid is released over time after it's unlocked.\nvesting_schedule_period = '0 weeks'\n# Default number of eras that need to pass to be able to withdraw unbonded funds.\nunbonding_delay = 7\n# Round seigniorage rate represented as a fraction of the total supply.\n#\n# Annual issuance: 8%\n# Minimum block time: 8000 milliseconds\n# Ticks per year: 31536000000\n#\n# (1+0.08)^((8000)/31536000000)-1 is expressed as a fractional number below\n# Python:\n# from fractions import Fraction\n# Fraction((1 + 0.08)**((8000)/31536000000) - 1).limit_denominator(1000000000)\nround_seigniorage_rate = [11, 563427926]\n# Maximum number of associated keys for a single account.\nmax_associated_keys = 100\n# Maximum height of contract runtime call stack.\nmax_runtime_call_stack_height = 12\n# Minimum allowed delegation amount in motes\nminimum_delegation_amount = 500_000_000_000\n# Maximum allowed delegation amount in motes\nmaximum_delegation_amount = 1_000_000_000_000_000_000\n# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than\n# the value specified will be treated as a full unbond of a validator and their associated delegators\nminimum_bid_amount = 500_000_000_000\n# Global state prune batch size (0 = this feature is off)\nprune_batch_size = 0\n# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\nstrict_argument_checking = false\n# Number of simultaneous peer requests.\nsimultaneous_peer_requests = 5\n# The consensus protocol to use. Options are \"Zug\" and \"Highway\".\nconsensus_protocol = 'Zug'\n# The maximum amount of delegators per validator.\nmax_delegators_per_validator = 1200\n# Minimum delegation rate validators can specify (0-100).\nminimum_delegation_rate = 0\n# The split in finality signature rewards between block producer and participating signers.\nfinders_fee = [1, 5]\n# The proportion of baseline rewards going to reward finality signatures specifically.\nfinality_signature_proportion = [95, 100]\n# Lookback interval indicating which past block we are looking at to reward.\nsignature_rewards_max_delay = 6\n# Allows transfers between accounts in the blockchain network.\n#\n# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators.\n# Changing this option makes sense only on private chains.\nallow_unrestricted_transfers = true\n# Enables the auction entry points 'delegate' and 'add_bid'.\n#\n# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These\n# auction entry points will return an error if called when this option is set to false.\nallow_auction_bids = true\n# If set to false, then consensus doesn't compute rewards and always uses 0.\ncompute_rewards = true\n# Defines how refunds of the unused portion of payment amounts are calculated and handled.\n#\n# Valid options are:\n#   'refund': a ratio of the unspent token is returned to the spender.\n#   'burn': a ratio of the unspent token is burned.\n#   'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio.\n# This causes excess payment amounts to be sent to either a\n# pre-defined purse, or back to the sender.  The refunded amount is calculated as the given ratio of the payment amount\n# minus the execution costs.\nrefund_handling = { type = 'refund', refund_ratio = [75, 100] }\n# Defines how fees are handled.\n#\n# Valid options are:\n#   'no_fee': fees are eliminated.\n#   'pay_to_proposer': fees are paid to the block proposer\n#   'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all\n#                 administrator accounts\n#   'burn': fees are burned\nfee_handling = { type = 'burn' }\n# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake.\nvalidator_credit_cap = [1, 5]\n# Defines how pricing is handled.\n#\n# Valid options are:\n#   'payment_limited': senders of transaction self-specify how much they pay.\n#   'fixed': costs are fixed, per the cost table\n#   'prepaid': prepaid transaction (currently not supported)\npricing_handling = { type = 'payment_limited' }\n# Does the network allow pre-payment for future\n# execution? Currently not supported.\n#\nallow_prepaid = false\n# Defines how gas holds affect available balance calculations.\n#\n# Valid options are:\n#   'accrued': sum of full value of all non-expired holds.\n#   'amortized': sum of each hold is amortized over the time remaining until expiry.\n#\n# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`,\n#   with accrued, the full hold amount would be applied\n#   with amortized, half the hold amount would be applied\ngas_hold_balance_handling = { type = 'accrued' }\n# Defines how long gas holds last.\n#\n# If fee_handling is set to 'no_fee', the system places a balance hold on the payer\n# equal to the value the fee would have been. Such balance holds expire after a time\n# interval has elapsed. This setting controls how long that interval is. The available\n# balance of a purse equals its total balance minus the held amount(s) of non-expired\n# holds (see gas_hold_balance_handling setting for details of how that is calculated).\n#\n# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse,\n# a hold for 100 is placed on that purse and is considered when calculating total balance\n# for 24 hours starting from the block_time when the hold was placed.\ngas_hold_interval = '24 hours'\n# List of public keys of administrator accounts. Setting this option makes only on private chains which require\n# administrator accounts for regulatory reasons.\nadministrators = []\n# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable\n# entity in lazy manner.\n# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade;\n# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage\n# will be written underneath Key::Hash.\n# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account\n# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated\n# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten\n# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top\n# level records\n# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade\n# the flag cannot be disabled in a future protocol upgrade.\nenable_addressable_entity = false\n# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount.\nbaseline_motes_amount = 2_500_000_000\n# Flag on whether ambiguous entity versions returns an execution error.\ntrap_on_ambiguous_entity_version = false\n# purse uref-5bd5c35f2897844b307b9efbb2d7f01f4d51a2ecea8d8b88abd4e3c3a4594693-007 belongs to\n# CA Ecosystem Sustainability account 020245b32289d807f3fcd5f9dbec94ff51ee57857f4b39a3b9d6fb99305317d18a32\nrewards_handling =  { type = 'sustain', ratio = [2,8], purse_address = \"uref-5bd5c35f2897844b307b9efbb2d7f01f4d51a2ecea8d8b88abd4e3c3a4594693-007\" }\n\n[highway]\n# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length.\nmaximum_round_length = '66 seconds'\n\n[transactions]\n# The duration after the transaction timestamp that it can be included in a block.\nmax_ttl = '2 hours'\n# The maximum number of approvals permitted in a single block.\nblock_max_approval_count = 2600\n# Maximum block size in bytes including transactions contained by the block.  0 means unlimited.\nmax_block_size = 2_621_400\n# The upper limit of total gas of all transactions in a block.\nblock_gas_limit = 812_500_000_000\n# The minimum amount in motes for a valid native transfer.\nnative_transfer_minimum_motes = 2_500_000_000\n# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file.\nmax_timestamp_leeway = '5 seconds'\n\n# Configuration of the transaction runtime.\n[transactions.enabled_runtime]\nvm_casper_v1 = true\nvm_casper_v2 = false\n\n[transactions.v1]\n# The configuration settings for the lanes of transactions including both native and Wasm based interactions.\n# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1\n# respectively\n# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction\n# within a given lane.\n# The maximum length in bytes of runtime args per V1 transaction.\n# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels)\n# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and\n# the lane must be present and defined.\n# Different casper networks may not impose such a restriction.\n# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane\n# [2] -> Max args length size in bytes for a given transaction in a certain lane\n# [3] -> Transaction gas limit for a given transaction in a certain lane\n# [4] -> The maximum number of transactions the lane can contain\nnative_mint_lane = [0, 2048, 1024, 100_000_000, 325]\nnative_auction_lane = [1, 3096, 2048, 2_500_000_000, 325]\ninstall_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1]\nwasm_lanes = [\n    [3, 750_000, 2048, 1_000_000_000_000, 1],\n    [4, 131_072, 1024, 100_000_000_000, 2],\n    [5, 65_536, 512, 5_000_000_000, 40]\n]\n\n[transactions.deploy]\n# The maximum number of Motes allowed to be spent during payment.  0 means unlimited.\nmax_payment_cost = '0'\n# The limit of length of serialized payment code arguments.\npayment_args_max_length = 1024\n# The limit of length of serialized session code arguments.\nsession_args_max_length = 1024\n\n[wasm.v1]\n# Amount of free memory (in 64kB pages) each contract can use for stack.\nmax_memory = 64\n# Max stack height (native WebAssembly stack limiter).\nmax_stack_height = 500\n\n[storage_costs]\n# Gas charged per byte stored in the global state.\ngas_per_byte = 1_117_587\n\n# For each opcode cost below there exists a static cost and a dynamic cost.\n# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks.\n[wasm.v1.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v1.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v1.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs\n[wasm.v1.host_function_costs]\nadd = { cost = 5_800, arguments = [0, 0, 0, 0] }\nadd_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] }\nadd_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] }\nadd_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nadd_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nblake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] }\ncall_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] }\ncall_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\ncreate_contract_package_at_hash = { cost = 200, arguments = [0, 0] }\ncreate_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ncreate_purse = { cost = 2_500_000_000, arguments = [0, 0] }\ndisable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nget_balance = { cost = 3_000_000, arguments = [0, 0, 0] }\nget_blocktime = { cost = 330, arguments = [0] }\nget_caller = { cost = 380, arguments = [0] }\nget_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] }\nget_main_purse = { cost = 1_300, arguments = [0] }\nget_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] }\nget_named_arg_size = { cost = 200, arguments = [0, 0, 0] }\nget_phase = { cost = 710, arguments = [0] }\nget_system_contract = { cost = 1_100, arguments = [0, 0, 0] }\nhas_key = { cost = 1_500, arguments = [0, 840] }\nis_valid_uref = { cost = 760, arguments = [0, 0] }\nload_named_keys = { cost = 42_000, arguments = [0, 0] }\nnew_uref = { cost = 17_000, arguments = [0, 0, 590] }\nrandom_bytes = { cost = 200, arguments = [0, 0] }\nprint = { cost = 20_000, arguments = [0, 4_600] }\nprovision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] }\nput_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] }\nread_host_buffer = { cost = 3_500, arguments = [0, 310, 0] }\nread_value = { cost = 60_000, arguments = [0, 120_000, 0] }\ndictionary_get = { cost = 5_500, arguments = [0, 590, 0] }\nremove_associated_key = { cost = 4_200, arguments = [0, 0] }\nremove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] }\nremove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] }\nremove_key = { cost = 61_000, arguments = [0, 3_200] }\nret = { cost = 23_000, arguments = [0, 420_000] }\nrevert = { cost = 500, arguments = [0] }\nset_action_threshold = { cost = 74_000, arguments = [0, 0] }\ntransfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] }\nupdate_associated_key = { cost = 4_200, arguments = [0, 0, 0] }\nwrite = { cost = 14_000, arguments = [0, 0, 0, 980] }\ndictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] }\nenable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nmanage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] }\nemit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] }\ngeneric_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] }\ncost_increase_per_message = 50\nget_block_info = { cost = 330, arguments = [0, 0] }\nrecover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\nverify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\ncall_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\n\n[wasm.v2]\n# Amount of free memory each contract can use for stack.\nmax_memory = 64\n\n[wasm.v2.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v2.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v2.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n[wasm.v2.host_function_costs]\nread = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\nwrite = { cost = 0, arguments = [0, 0, 0, 0, 0] }\nremove = { cost = 0, arguments = [0, 0, 0] }\ncopy_input = { cost = 0, arguments = [0, 0] }\nret = { cost = 0, arguments = [0, 0] }\ncreate = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer = { cost = 0, arguments = [0, 0, 0] }\nenv_balance = { cost = 0, arguments = [0, 0, 0, 0] }\nupgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\ncall = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\nprint = { cost = 0, arguments = [0, 0] }\nemit = { cost = 0, arguments = [0, 0, 0, 0] }\nenv_info = { cost = 0, arguments = [0, 0] }\n\n[wasm.messages_limits]\nmax_topic_name_size = 256\nmax_topics_per_contract = 128\nmax_message_size = 1_024\n\n[system_costs]\n# Penalty charge for calling invalid entry point in a system contract.\nno_such_entrypoint = 2_500_000_000\n\n[system_costs.auction_costs]\nget_era_validators = 2_500_000_000\nread_seigniorage_recipients = 5_000_000_000\nadd_bid = 2_500_000_000\nwithdraw_bid = 2_500_000_000\ndelegate = 2_500_000_000\nundelegate = 2_500_000_000\nrun_auction = 2_500_000_000\nslash = 2_500_000_000\ndistribute = 2_500_000_000\nwithdraw_delegator_reward = 5_000_000_000\nwithdraw_validator_reward = 5_000_000_000\nread_era_id = 2_500_000_000\nactivate_bid = 2_500_000_000\nredelegate = 2_500_000_000\nchange_bid_public_key = 5_000_000_000\nadd_reservations = 2_500_000_000\ncancel_reservations = 2_500_000_000\n\n[system_costs.mint_costs]\nmint = 2_500_000_000\nreduce_total_supply = 2_500_000_000\ncreate = 2_500_000_000\nbalance = 100_000_000\nburn = 100_000_000\ntransfer = 100_000_000\nread_base_round_reward = 2_500_000_000\nmint_into_existing_purse = 2_500_000_000\n\n[system_costs.handle_payment_costs]\nget_payment_purse = 10_000\nset_refund_purse = 10_000\nget_refund_purse = 10_000\nfinalize_payment = 2_500_000_000\n\n[system_costs.standard_payment_costs]\npay = 10_000\n\n[vacancy]\n# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network.\n#\n# The network starts with a current_gas_price of min_gas_price.\n#\n# Each block has multiple limits (bytes, transactions, transfers, gas, etc.)\n# The utilization for a block is determined by the highest percentage utilization of each these limits.\n#\n# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here)\n#     19 transactons -> 19/20 or 95%\n#     600 transfers -> 600/650 or 92.3%\n#     resulting block utilization is 95\n#\n# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is\n# adjusted with the following:\n#\n# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price.\n# If utilization falls between the thresholds, current_gas_price is not changed.\n# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price.\n#\n# The cost charged for the transaction is simply the gas_used * current_gas_price.\nupper_threshold = 90\nlower_threshold = 50\nmax_gas_price = 1\nmin_gas_price = 1\n"
  },
  {
    "path": "resources/mainnet/config-example.toml",
    "content": "# ================================\n# Configuration options for a node\n# ================================\n[node]\n\n# If set, use this hash as a trust anchor when joining an existing network.\n#trusted_hash = 'HEX-FORMATTED BLOCK HASH'\n\n# Historical sync behavior for this node. Options are:\n#  'ttl'      (node will attempt to acquire all block data to comply with time to live enforcement)\n#  'genesis'  (node will attempt to acquire all block data back to genesis)\n#  'nosync'   (node will only acquire blocks moving forward)\n#  'isolated' (node will initialize without peers and will not accept peers)\n#  'completeblock' (node will acquire complete block and shutdown)\n# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`.\n#       it is recommended for dedicated validator nodes to be in ttl mode to increase\n#       their ability to maintain maximal uptime...if a long-running genesis validator\n#       goes offline and comes back up while in genesis mode, it must backfill\n#       any gaps in its block awareness before resuming validation.\n#\n#       it is recommended for reporting non-validator nodes to be in genesis mode to\n#       enable support for queries at any block height.\n#\n#       it is recommended for non-validator working nodes (for dapp support, etc) to run in\n#       ttl or nosync mode (depending upon their specific data requirements).\n#\n#       thus for instance a node backing a block explorer would prefer genesis mode,\n#       while a node backing a dapp interested in very recent activity would prefer to run in nosync mode,\n#       and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode.\n# note: as time goes on, the time to sync back to genesis takes progressively longer.\n# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting\n#       (it is currently ~2 hours by default on production and production-like networks but subject to change).\n# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating\n#        in consensus / switching to validate mode. it is primarily for lightweight nodes that are\n#        only interested in recent activity.\n# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to\n#       binary port, rest server, event server, and diagnostic port connections.\nsync_handling = 'ttl'\n\n# Idle time after which the syncing process is considered stalled.\nidle_tolerance = '20 minutes'\n\n# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times.\nmax_attempts = 3\n\n# Default delay for the control events that have no dedicated delay requirements.\ncontrol_logic_default_delay = '1 second'\n\n# Flag which forces the node to resync all the blocks.\nforce_resync = false\n\n# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all\n# conditions are satisfied.\nshutdown_for_upgrade_timeout = '2 minutes'\n\n# Maximum time a node will wait for an upgrade to commit.\nupgrade_timeout = '30 seconds'\n\n# The node detects when it should do a controlled shutdown when it is in a detectably bad state\n# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be\n# allowed to shutdown, and if restarted that node will generally recover gracefully and resume\n# normal operation. However, actively validating nodes have subjective state in memory that is\n# lost on shutdown / restart and must be reacquired from other validating nodes on restart.\n# If all validating nodes shutdown in the middle of an era, social consensus is required to restart\n# the network. As a mitigation for that, the following config can be set to true on some validator\n# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled\n# shutdown events and stay up. This allows them to act as sentinels for the consensus data for\n# other restarting nodes. This config is inert on non-validating nodes.\nprevent_validator_shutdown = false\n\n# =================================\n# Configuration options for logging\n# =================================\n[logging]\n\n# Output format.  Possible values are 'text' or 'json'.\nformat = 'json'\n\n# Colored output.  Has no effect if format = 'json'.\ncolor = false\n\n# Abbreviate module names in text output.  Has no effect if format = 'json'.\nabbreviate_modules = false\n\n\n# ===================================\n# Configuration options for consensus\n# ===================================\n[consensus]\n\n# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign\n# consensus messages.\nsecret_key_path = '/etc/casper/validator_keys/secret_key.pem'\n\n# The maximum number of blocks by which execution is allowed to lag behind finalization.\n# If it is more than that, consensus will pause, and resume once the executor has caught up.\nmax_execution_delay = 6\n\n\n# =======================================\n# Configuration options for Zug consensus\n# =======================================\n[consensus.zug]\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nsync_state_interval = '1 second'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of\n# echo messages, before they vote to make the round skippable and move on to the next proposer.\nproposal_timeout = '5 seconds'\n\n# The additional proposal delay that is still considered fast enough, in percent. This should\n# take into account variables like empty vs. full blocks, network traffic etc.\n# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one\n# while idle this should be at least 50, meaning that the timeout is 50% longer than\n# necessary for a quorum of recent proposals, approximately.\nproposal_grace_period = 200\n\n# The average number of rounds after which the proposal timeout adapts by a factor of 2.\n# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve.\nproposal_timeout_inertia = 10\n\n# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp\n# lies in the future by more than that are rejected.\nclock_tolerance = '1 second'\n\n\n# ===========================================\n# Configuration options for Highway consensus\n# ===========================================\n[consensus.highway]\n\n# The duration for which incoming vertices with missing dependencies should be kept in a queue.\npending_vertex_timeout = '30 minutes'\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nrequest_state_interval = '20 seconds'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# Log the synchronizer state periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_synchronizer_interval = '5 seconds'\n\n# Log the size of every incoming and outgoing serialized unit.\nlog_unit_sizes = false\n\n# The maximum number of peers we request the same vertex from in parallel.\nmax_requests_for_vertex = 5\n\n# The maximum number of dependencies we request per validator in a batch.\n# Limits requests per validator in panorama - in order to get a total number of\n# requests, multiply by # of validators.\nmax_request_batch_size = 20\n\n[consensus.highway.round_success_meter]\n# The number of most recent rounds we will be keeping track of.\nnum_rounds_to_consider = 40\n\n# The number of successful rounds that triggers us to slow down: With this many or fewer\n# successes per `num_rounds_to_consider`, we increase our round length.\nnum_rounds_slowdown = 10\n\n# The number of successful rounds that triggers us to speed up: With this many or more successes\n# per `num_rounds_to_consider`, we decrease our round length.\nnum_rounds_speedup = 32\n\n# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if\n# we have few enough failures.\nacceleration_parameter = 40\n\n# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which\n# we will use for looking for a summit in order to determine a proposal's finality.\n# The required quorum in a summit we will look for to check if a round was successful is\n# determined by this FTT.\nacceleration_ftt = [1, 100]\n\n\n# ====================================\n# Configuration options for networking\n# ====================================\n[network]\n\n# The public address of the node.\n#\n# It must be publicly available in order to allow peers to connect to this node.\n# If the port is set to 0, the actual bound port will be substituted.\npublic_address = '<IP ADDRESS>:0'\n\n# Address to bind to for listening.\n# If port is set to 0, a random port will be used.\nbind_address = '0.0.0.0:35000'\n\n# Addresses to connect to in order to join the network.\n#\n# If not set, this node will not be able to attempt to connect to the network.  Instead it will\n# depend upon peers connecting to it.  This is normally only useful for the first node of the\n# network.\n#\n# Multiple addresses can be given and the node will attempt to connect to each, requiring at least\n# one connection.\nknown_addresses = ['51.81.106.54:35000','135.148.34.108:35000','135.148.169.178:35000','51.83.238.2:35000','142.4.215.112:35000']\n\n# Minimum number of fully-connected peers to consider network component initialized.\nmin_peers_for_initialization = 3\n\n# The interval between each fresh round of gossiping the node's public address.\ngossip_interval = '120 seconds'\n\n# Initial delay for starting address gossipping after the network starts. This should be slightly\n# more than the expected time required for initial connections to complete.\ninitial_gossip_delay = '5 seconds'\n\n# How long a connection is allowed to be stuck as pending before it is abandoned.\nmax_addr_pending_time = '1 minute'\n\n# Maximum time allowed for a connection handshake between two nodes to be completed. Connections\n# exceeding this threshold are considered unlikely to be healthy or even malicious and thus\n# terminated.\nhandshake_timeout = '20 seconds'\n\n# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional\n# connections will be rejected. A value of `0` means unlimited.\nmax_incoming_peer_connections = 3\n\n# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers.\n# A value of `0` means unlimited.\nmax_outgoing_byte_rate_non_validators = 6553600\n\n# The maximum allowed total impact of requests from non-validating peers per second answered.\n# A value of `0` means unlimited.\nmax_incoming_message_rate_non_validators = 3000\n\n# Maximum number of requests for data from a single peer that are allowed be buffered. A value of\n# `0` means unlimited.\nmax_in_flight_demands = 50\n\n# Version threshold to enable tarpit for.\n#\n# When set to a version (the value may be `null` to disable the feature), any peer that reports a\n# protocol version equal or below the threshold will be rejected only after holding open the\n# connection for a specific (`tarpit_duration`) amount of time.\n#\n# This option makes most sense to enable on known nodes with addresses where legacy nodes that are\n# still in operation are connecting to, as these older versions will only attempt to reconnect to\n# other nodes once they have exhausted their set of known nodes.\ntarpit_version_threshold = '1.2.1'\n\n# How long to hold connections to trapped legacy nodes.\ntarpit_duration = '10 minutes'\n\n# The probability [0.0, 1.0] of this node trapping a legacy node.\n#\n# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a\n# single known node to hold open a connection to prevent the node from reconnecting. This should be\n# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of\n# legacy nodes running this software.\ntarpit_chance = 0.2\n\n# Minimum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_min_duration = '2 minutes'\n\n# Maximum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_max_duration = '10 minutes'\n\n# Identity of a node\n#\n# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate.\n# This option makes sense for some private chains where for security reasons joining new nodes is restricted.\n# [network.identity]\n# tls_certificate = \"node_cert.pem\"\n# secret_key = \"node.pem\"\n# ca_certificate = \"ca_cert.pem\"\n\n# Weights for impact estimation of incoming messages, used in combination with\n# `max_incoming_message_rate_non_validators`.\n#\n# Any weight set to 0 means that the category of traffic is exempt from throttling.\n[network.estimator_weights]\nconsensus = 0\nblock_gossip = 1\ntransaction_gossip = 0\nfinality_signature_gossip = 1\naddress_gossip = 0\nfinality_signature_broadcasts = 0\ntransaction_requests = 1\ntransaction_responses = 0\nlegacy_deploy_requests = 1\nlegacy_deploy_responses = 0\nblock_requests = 1\nblock_responses = 0\nblock_header_requests = 1\nblock_header_responses = 0\ntrie_requests = 1\ntrie_responses = 0\nfinality_signature_requests = 1\nfinality_signature_responses = 0\nsync_leap_requests = 1\nsync_leap_responses = 0\napprovals_hashes_requests = 1\napprovals_hashes_responses = 0\nexecution_results_requests = 1\nexecution_results_responses = 0\n\n# ==================================================\n# Configuration options for the BinaryPort server\n# ==================================================\n[binary_port_server]\n\n# Flag which enables the BinaryPort server.\nenable_server = true\n\n# Listening address for BinaryPort server.\naddress = '0.0.0.0:7779'\n\n# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_all_values = false\n\n# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_trie = false\n\n# Flag that enables the `TrySpeculativeExec` request. Disabled by default.\nallow_request_speculative_exec = false\n\n# Maximum size of a message in bytes.\nmax_message_size_bytes = 134_217_728\n\n# Maximum number of connections to the server.\nmax_connections = 5\n\n# The global max rate of requests (per second) before they are limited.\n# The implementation uses a sliding window algorithm.\nqps_limit = 110\n\n# Initial time given to a connection before it expires\ninitial_connection_lifetime = '10 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n# [`Command::Get(GetRequest::Record)`] is sent to the node\nget_record_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Information)`] is sent to the node\nget_information_request_termination_delay = '5 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::State)`] is sent to the node\nget_state_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Trie)`] is sent to the node\nget_trie_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TryAcceptTransaction`] is sent to the node\naccept_transaction_request_termination_delay = '24 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TrySpeculativeExec`] is sent to the node\nspeculative_exec_request_termination_delay = '0 seconds'\n\n\n# ==============================================\n# Configuration options for the REST HTTP server\n# ==============================================\n[rest_server]\n\n# Flag which enables the REST HTTP server.\nenable_server = true\n\n# Listening address for REST HTTP server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the REST HTTP server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:8888'\n\n# The global max rate of requests (per second) before they are limited.\n# Request will be delayed to the next 1 second bucket once limited.\nqps_limit = 100\n\n# Specifies which origin will be reported as allowed by REST server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n\n# ==========================================================\n# Configuration options for the SSE HTTP event stream server\n# ==========================================================\n[event_stream_server]\n\n# Flag which enables the SSE HTTP event stream server.\nenable_server = true\n\n# Listening address for SSE HTTP event stream server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:9999'\n\n# The number of event stream events to buffer.\nevent_stream_buffer_length = 5000\n\n# The maximum number of subscribers across all event streams the server will permit at any one time.\nmax_concurrent_subscribers = 100\n\n# Specifies which origin will be reported as allowed by event stream server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n# ===============================================\n# Configuration options for the storage component\n# ===============================================\n[storage]\n\n# Path (absolute, or relative to this config.toml) to the folder where any files created\n# or read by the storage component will exist. A subfolder named with the network name will be\n# automatically created and used for the storage component files.\n#\n# If the folder doesn't exist, it and any required parents will be created.\n#\n# If unset, the path must be supplied as an argument via the CLI.\npath = '/var/lib/casper/casper-node'\n\n# Maximum size of the database to use for the block store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 483_183_820_800 == 450 GiB.\nmax_block_store_size = 483_183_820_800\n\n# Maximum size of the database to use for the deploy store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the deploy metadata.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_metadata_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the state snapshots.\n#\n# The size should be a multiple of the OS page size.\n#\n# 10_737_418_240 == 10 GiB.\nmax_state_store_size = 10_737_418_240\n\n# Memory deduplication.\n#\n# If enabled, nodes will attempt to share loaded objects if possible.\nenable_mem_deduplication = true\n\n# Memory duplication garbage collection.\n#\n# Sets the frequency how often the memory pool cache is swept for free references.\n# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept.\nmem_pool_prune_interval = 4096\n\n\n# ===================================\n# Configuration options for gossiping\n# ===================================\n[gossip]\n\n# Target number of peers to infect with a given piece of data.\ninfection_target = 3\n\n# The saturation limit as a percentage, with a maximum value of 99.  Used as a termination\n# condition.\n#\n# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't\n# manage to newly infect 3 peers.  We will stop gossiping once we know of more than 15 holders\n# excluding us since 80% saturation would imply 3 new infections in 15 peers.\nsaturation_limit_percent = 80\n\n# The maximum duration for which to keep finished entries.\n#\n# The longer they are retained, the lower the likelihood of re-gossiping a piece of data.  However,\n# the longer they are retained, the larger the list of finished entries can grow.\nfinished_entry_duration = '1 minute'\n\n# The timeout duration for a single gossip request, i.e. for a single gossip message\n# sent from this node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\ngossip_request_timeout = '30 seconds'\n\n# The timeout duration for retrieving the remaining part(s) of newly-discovered data\n# from a peer which gossiped information about that data to this node.\nget_remainder_timeout = '5 seconds'\n\n# The timeout duration for a newly-received, gossiped item to be validated and stored by another\n# component before the gossiper abandons waiting to gossip the item onwards.\nvalidate_and_store_timeout = '1 minute'\n\n\n# ===============================================\n# Configuration options for the block accumulator\n# ===============================================\n[block_accumulator]\n\n# Block height difference threshold for starting to execute the blocks.\nattempt_execution_threshold = 6\n\n# Accepted time interval for inactivity in block accumulator.\ndead_air_interval = '3 minutes'\n\n# Time after which the block acceptors are considered old and can be purged.\npurge_interval = '1 minute'\n\n\n# ================================================\n# Configuration options for the block synchronizer\n# ================================================\n[block_synchronizer]\n\n# Maximum number of fetch-trie tasks to run in parallel during block synchronization.\nmax_parallel_trie_fetches = 5000\n\n# Time interval for the node to ask for refreshed peers.\npeer_refresh_interval = '90 seconds'\n\n# Time interval for the node to check what the block synchronizer needs to acquire next.\nneed_next_interval = '1 second'\n\n# Time interval for recurring disconnection of dishonest peers.\ndisconnect_dishonest_peers_interval = '10 seconds'\n\n# Time interval for resetting the latch in block builders.\nlatch_reset_interval = '5 seconds'\n\n\n# =============================================\n# Configuration options for the block validator\n# =============================================\n[block_validator]\n\n# Maximum number of completed entries to retain.\n#\n# A higher value can avoid creating needless validation work on an already-validated proposed\n# block, but comes at the cost of increased memory consumption.\nmax_completed_entries = 6\n\n\n# ==================================\n# Configuration options for fetchers\n# ==================================\n[fetcher]\n\n# The timeout duration for a single fetcher request, i.e. for a single fetcher message\n# sent from this node to another node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\nget_from_peer_timeout = '10 seconds'\n\n\n# ========================================================\n# Configuration options for the contract runtime component\n# ========================================================\n[contract_runtime]\n\n# Optional maximum size of the database to use for the global state store.\n#\n# If unset, defaults to 805,306,368,000 == 750 GiB.\n#\n# The size should be a multiple of the OS page size.\nmax_global_state_size = 2_089_072_132_096\n\n# Optional depth limit to use for global state queries.\n#\n# If unset, defaults to 5.\n#max_query_depth = 5\n\n# Enable manual synchronizing to disk.\n#\n# If unset, defaults to true.\n#enable_manual_sync = true\n\n\n# ==================================================\n# Configuration options for the transaction acceptor\n# ==================================================\n[transaction_acceptor]\n\n# The leeway allowed when considering whether a transaction is future-dated or not.\n#\n# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the\n# future are still acceptable.\n#\n# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting\n# `transaction.max_timestamp_leeway`.\ntimestamp_leeway = '2 seconds'\n\n\n# ===========================================\n# Configuration options for the transaction buffer\n# ===========================================\n[transaction_buffer]\n\n# The interval of checking for expired transactions.\nexpiry_check_interval = '1 minute'\n\n\n# ==============================================\n# Configuration options for the diagnostics port\n# ==============================================\n[diagnostics_port]\n\n# If set, the diagnostics port will be available on a UNIX socket.\nenabled = false\n\n# Filename for the UNIX domain socket the diagnostics port listens on.\nsocket_path = \"debug.socket\"\n\n# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the\n# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`,\n# which allows for group access as well.\nsocket_umask = 0o077\n\n\n# =============================================\n# Configuration options for the upgrade watcher\n# =============================================\n[upgrade_watcher]\n\n# How often to scan file system for available upgrades.\nupgrade_check_interval = '30 seconds'\n"
  },
  {
    "path": "resources/mainnet/global_state.toml",
    "content": "# The below entry will write the value into an account purse balance.\n#\n# Key fd37afc9c5c70f04300e55977a07db92eada2f0b3538840752afb8cade90ed3b is the main purse\n# for CA Treasury 1 account 02033c97db0328743d0a3b7654751efab570caa6e6c011869f34e65560dc011a348c\n#\n# Value AAkAAAAIutchOy3mGUII is base64 encoding of 4763091162861000634 motes in U512 data type.\n# This represents 33% of the supply at block 6972606 (14433609569450460644)\n# plus 4942348218 motes - the current balance of this account.\n[[entries]]\nkey = \"balance-fd37afc9c5c70f04300e55977a07db92eada2f0b3538840752afb8cade90ed3b\"\nvalue = \"AAkAAAAIutchOy3mGUII\"\n"
  },
  {
    "path": "resources/production/accounts.toml",
    "content": "[[accounts]]\npublic_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"835521000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"010a78eef78966a56a633c665411388f97f850609960d5d898f3992272b8d4bcca\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"118750000000000\"\ndelegation_rate = 9\n[[accounts]]\npublic_key = \"011907bc6fa90ccb8cacc150170b887b3dd97a96fc029b515365141d1e5d4f7983\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"836069000000000\"\ndelegation_rate = 9\n[[accounts]]\npublic_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"012f5501236a3bd82d22ee7675e78174ee723565495fd0f43be8a011bfa6494687\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01a849b574b01c775754db774f7243c4eae01f8919ba3d5c624282485af07b18ae\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01e61c8b8227afd8f7d4daece145546aa6775cf1c4ebfb6f3f56c18df558aed72d\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01c2de71b99085b38f3ba0dece0c1e128de7fe69526beb66951b99f5c7272c3980\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01c5594d384a35a7520f00643c054bc5f574d04fa88d73fb215614fea35178d9b2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"017f7380736731efa408c323b38a13541be1c99a4aefb0d5b59c6a4aecac494105\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01a6901408eda702a653805f50060bfe00d5e962747ee7133df64bd7bab50b4643\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1840864000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"018f84c6fc037284f189cc8cb49f89212ff434a5eb050e48cdd164ff3890fbff69\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 15\n[[accounts]]\npublic_key = \"013eb6775484f25ac4e126e93a350ef2bc259385da5141d331829af7f755b03844\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01faec72d138026edbd470d9ba1f6a05c5fabaa98da8bb41c8c92041d2f58337d2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 30\n[[accounts]]\npublic_key = \"01269c84a9153623fc47288f5a2b1bd681fc7d01b1f6144626583a56d3ba8f7c09\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"01ad71484b09749a49463d3af2a8a3addd71509a92f447e9b06cbddbf60b45cbf7\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"011117189c666f81c5160cd610ee383dc9b2d0361f004934754d39752eedc64957\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 20\n[[accounts]]\npublic_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1014561000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01219550874603647a55b98844a7e41accc5c3076b6c1fbb30002e7f6031524fa2\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"01c7de2f333695c4db43cce8c555f74540176071a9eeb8438eb335634b9729ee6b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"970728000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"012dc55b77b2a9faf75dbaed15775ddfc48e60c4596608318c8b96b1900bdf1d5f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01d2212cbecf229cd13deb7d2b650ed72cc109763398d95aa7743a1559e7eb4781\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 8\n[[accounts]]\npublic_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"014ec8bfba46dbd592c0b44dd356abffb203330ddeced26c7522656b9bff85e7bc\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"20745205000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01b1c48c69eeb5339ae43be062dee21c46d61346b9ee1f83d4e924000833c5a3e4\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1689877000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"017b9a85b657e0a8c2e01bf2d80b6b2e6f8d8b4bc6d7c479f21e59dceea761710b\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 7\n[[accounts]]\npublic_key = \"01eedfd20f75528c50aae557d15dff5ca6379ca8401bceb8e969cd0cb1ea52ec7f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"017d940644deeb8eea3d94f62cfa65a9bea4eb2e8ec9433104bb145d73fd39e98f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0163e03c3aa2b383f9d1b2f7c69498d339dcd1061059792ce51afda49135ff7876\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"015dfd4b3f997b1eb1a7292eb501845931b8aa9869988a5caa2be79ac4f5ff8a21\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0113442fd0dc052634fb42943e8ba095a404ea12cded84fbe4b1536ded94dab10f\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"666667000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"014c2573f59c70775a41d9cb69b9e3146e6d51567d33a141423b5bf371967e5902\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0190c434129ecbaeb34d33185ab6bf97c3c493fc50121a56a9ed8c4c52855b5ac1\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"014b466f5c6c87bb1d2566d166120e320a724231374cd0775e0e347afed70a4745\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"015da78bc6315643fbcf57d87a50853c67d8b271b615f38f6538f78a844cc4501d\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"0142093cad632cc989b3e9952a4e571cab44d90a7bf9e3badd0d91c2dc2ead332a\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1278629000000000\"\ndelegation_rate = 5\n[[accounts]]\npublic_key = \"01ae2af99944b0b9f92fccf425aa51894ebbad0f4e8e42c66a71dcb999a3bd94ed\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1912869000000000\"\ndelegation_rate = 10\n[[accounts]]\npublic_key = \"01aa2976834459371b1cf7f476873dd091a0e364bd18abed8e77659b83fd892084\"\nbalance = \"0\"\n\n[accounts.validator]\nbonded_amount = \"1000000000\"\ndelegation_rate = 100\n[[accounts]]\npublic_key = \"0202BB1CDBEC959D918875610AECE2CD043ED8F1ED42461215812C5328592C7F4F58\"\nbalance = \"600000000000000000\"\n\n[[accounts]]\npublic_key = \"02036210C108F7E819A38AE05CC7B1DC39A2E6366F1404CE4C12E9C05B3832D811BB\"\nbalance = \"800000000000000000\"\n\n[[accounts]]\npublic_key = \"02035C47CCBEAA32040D6904B6DC163C3B546314C52B2A78583835F54A224AB365A4\"\nbalance = \"1000000000000000000\"\n\n[[accounts]]\npublic_key = \"0203A5C03D9F7D6885276D0FAA25D412ECC1369F6573856C321D99CAFC0DAC2A8C12\"\nbalance = \"1100000000000000000\"\n\n[[accounts]]\npublic_key = \"02024C5E3BA7B1DA49CDA950319AEC914CD3C720FBEC3DCF25AA4ADD631E28F70AA9\"\nbalance = \"2964329384000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01eb9c059b2624f475bb0726d8b2e382b025818dce3ec1f5ecf7f80ca708b6440e\"\nbalance = \"0\"\nvalidator_public_key = \"0190c434129ecbaeb34d33185ab6bf97c3c493fc50121a56a9ed8c4c52855b5ac1\"\ndelegated_amount = \"1000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ebb0e23660d71a0fcd19ac6d96b9c0a95fa9081465667cb6e6626580e5804a5b\"\nbalance = \"0\"\nvalidator_public_key = \"014ec8bfba46dbd592c0b44dd356abffb203330ddeced26c7522656b9bff85e7bc\"\ndelegated_amount = \"200904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016b1ed3c0bbc59e853802a8fdf935df5cf68c775df2a4f5efba4fed634fa9884e\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"325500000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ff815bba34957e2a1a575be558ae7bdabd709e6104a1a12f77b3f348e5978f89\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"331562000000000\"\n\n[[delegators]]\ndelegator_public_key = \"018639e0a75d9abcce90c643e240b8c9738ff52ea34429c3fdbe7a2660078b57ef\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f04b56bcb8c4c4fcd3700f74a46d679d947ea1294887b6555a649291df827a0\"\nbalance = \"0\"\nvalidator_public_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\ndelegated_amount = \"333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0167f6eb1ab61c9432aba0db31462b3f23dccd0dbf25b9d2f6d686286b354a4a4f\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"374280000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d571ce1c05f491ed8201196af1852ec70d3f56e71b494865e37994e20112d8e6\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"520622000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012dd5b64d805a9811ea8653bf5aa51c4bcde5b95656127bd0056f9653ac7ce0ad\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b6d6b0f8483a7a870f148cfcf4f9fbde80103f771e1576b1cba17368f2d187fb\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01fe2573caf5a3344c85f1ac2fb81bd6eaad225e160014c2a45aded7c7784940c8\"\nbalance = \"0\"\nvalidator_public_key = \"012dc55b77b2a9faf75dbaed15775ddfc48e60c4596608318c8b96b1900bdf1d5f\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019893aaea11a3f0f2ee2963741f1dc6414e5af05f707d008e40f86288895e975c\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"569402000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01777ac4c7b5cd6697fb0e3163258db9bf05c25b8b5214c2b475f1015e60dcb148\"\nbalance = \"0\"\nvalidator_public_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\ndelegated_amount = \"592166000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01bbc0bcfb61b479c557d9e478fd590ec7aaf29807c9583884b31cb7565f94b445\"\nbalance = \"0\"\nvalidator_public_key = \"01ad71484b09749a49463d3af2a8a3addd71509a92f447e9b06cbddbf60b45cbf7\"\ndelegated_amount = \"601324000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f67c12f92e8776ff20800af0c7836692540cc6dacad6aea4cf61e0cc8191e467\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"689764000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0122f0609630cf52066b0992bf460881224f3fa26015e859d378e86fef63285316\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"702736000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01372cbceb4f20bfaa2955e44b337d98e5b1d52bd2bce7766443e23359b1204c06\"\nbalance = \"0\"\nvalidator_public_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\ndelegated_amount = \"823609817600000\"\n\n[[delegators]]\ndelegator_public_key = \"012945e0cbac47660dd84d89d30fad046c5a41682fae34f15333c7b0256a9747ff\"\nbalance = \"0\"\nvalidator_public_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\ndelegated_amount = \"836069000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013234c7da09119de9460f9b699b1a857730fe3ae80bef24d91910cea667b4e6b9\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"936995290800000\"\n\n[[delegators]]\ndelegator_public_key = \"0138320bc703eccf17e8129505cf1318709a2d36ff55a9e37114ca7ece8a1d52b7\"\nbalance = \"0\"\nvalidator_public_key = \"014382d46e2543ab2832c04936f8c205847040426abb56065bbf7b2f7e1d33f200\"\ndelegated_amount = \"941168989200000\"\n\n[[delegators]]\ndelegator_public_key = \"019b51d1b374644628d7741f6c0980654fbdace31208268b9db736d2f5e42cc83a\"\nbalance = \"0\"\nvalidator_public_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\ndelegated_amount = \"1026248000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0168be9a3ecd4ec3fc420705a810824f10e95a8156017f32ed1a0d3cecf6c8e29d\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"1064988708400000\"\n\n[[delegators]]\ndelegator_public_key = \"010e7b14719461b8fdc67c9c4770acd4f1b1249ce18d758cc951708c2400fa721e\"\nbalance = \"0\"\nvalidator_public_key = \"017f7380736731efa408c323b38a13541be1c99a4aefb0d5b59c6a4aecac494105\"\ndelegated_amount = \"1077931000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b04bbff56d65dc268e35bfe2932d4a3c365a4c584b0b966f46a9c297d940da04\"\nbalance = \"0\"\nvalidator_public_key = \"01269c84a9153623fc47288f5a2b1bd681fc7d01b1f6144626583a56d3ba8f7c09\"\ndelegated_amount = \"1162909000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0189cf6a5362829ee43e8e4d9510f386288e8a90d6e08baa0615b65614d398dafb\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"1196729000000000\"\n\n[[delegators]]\ndelegator_public_key = \"014cfe52b8cd6ed8fc48cec019cb081b4f15cf1ff26f5c282c601a6e4332f3c27d\"\nbalance = \"0\"\nvalidator_public_key = \"01219550874603647a55b98844a7e41accc5c3076b6c1fbb30002e7f6031524fa2\"\ndelegated_amount = \"1410054000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02038F968DFE84882E08330C961D4AAA60BAA4D58D512DE0CCBD83EB365E34ACAF5B\"\nbalance = \"0\"\nvalidator_public_key = \"01e61c8b8227afd8f7d4daece145546aa6775cf1c4ebfb6f3f56c18df558aed72d\"\ndelegated_amount = \"1473322000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0122cb4d0d9fdc1a3a984913832b5320ba56203cf8bbca0f2f020f1dbc400ef048\"\nbalance = \"0\"\nvalidator_public_key = \"01d2212cbecf229cd13deb7d2b650ed72cc109763398d95aa7743a1559e7eb4781\"\ndelegated_amount = \"1497566000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0113838fd13ecb46a06276780100f3643bcaf3abb7d5ad5aa1c7e48aea9470eb05\"\nbalance = \"0\"\nvalidator_public_key = \"013eb6775484f25ac4e126e93a350ef2bc259385da5141d331829af7f755b03844\"\ndelegated_amount = \"1621310000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b4e17cd2556abb6bae2dc67fc8fbe3dda52451fc300308c2ef6d246569eb6a20\"\nbalance = \"0\"\nvalidator_public_key = \"01c2de71b99085b38f3ba0dece0c1e128de7fe69526beb66951b99f5c7272c3980\"\ndelegated_amount = \"1768864000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012034668d7a5844f4fce1c3672c8d54daa81adb6d230a4aeeccaa8b369f0178fc\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"1780596000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b243a9ab89f5a2332fc505942f94f0ca26306fe162720dd6c32072f71d0798d2\"\nbalance = \"0\"\nvalidator_public_key = \"017f7380736731efa408c323b38a13541be1c99a4aefb0d5b59c6a4aecac494105\"\ndelegated_amount = \"1833864000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019ecde420395f4a9b70be2d4db0ab914682e04705cc2d7d3d73d958c7e69bfff8\"\nbalance = \"0\"\nvalidator_public_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\ndelegated_amount = \"1866864000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0123bf1bd107d82bb42f4a92fc1ce19251f341802e1ecfa6ed88a3c7bbbdc302d1\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"1925841000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012ff1f06f2125cbdaa5d0baa8bdd1a1073972d623968d600feb11435a672f4ab0\"\nbalance = \"0\"\nvalidator_public_key = \"01a849b574b01c775754db774f7243c4eae01f8919ba3d5c624282485af07b18ae\"\ndelegated_amount = \"2100764000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010cba0e742dc4406477b3921a3f51d0a73d897be13b8236f4401e11f42dc678f4\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f0b5e0d025c16f39e35ed32e885c9f7f0a23d021d6909c333911eeb2311d190\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"01e838a3cf194a4eea5b2396e201595388625b324af017788bb51302b1da27a810\"\nbalance = \"0\"\nvalidator_public_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0131ba554e54c35fb9992f6ebdb290db6087264ad7d0687edb19b6dd998fc2e932\"\nbalance = \"0\"\nvalidator_public_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\ndelegated_amount = \"2166666500000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f583e3dfda41ccbba0c51e12ef45eaf1bfd7d776d5cc482196a808ffe89da52f\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"2363562000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01128ddb51119f1df535cf3a763996344ab0cc79038faaee0aaaf098a078031ce6\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"2522693000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013bf164b73b877e52118101751feb2b7b129ad159e91a4c00360c990594e9b81f\"\nbalance = \"0\"\nvalidator_public_key = \"01eedfd20f75528c50aae557d15dff5ca6379ca8401bceb8e969cd0cb1ea52ec7f\"\ndelegated_amount = \"2541914000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016a0f6e2a18688f89b4262d048d57bce3df7aad70c1f5fae7be7daee02cb0edb1\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"2600000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01c90604bec2e48eae8e86fdebecd9cc3502bf6944fcf1ec08f071e3985e063430\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"2780822000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0130e7a05a136b5fb5c05ec3d78ab4fd872240ddb2b3d8c4448fab48d45b27efa9\"\nbalance = \"0\"\nvalidator_public_key = \"01c7de2f333695c4db43cce8c555f74540176071a9eeb8438eb335634b9729ee6b\"\ndelegated_amount = \"3189401194000000\"\n\n[[delegators]]\ndelegator_public_key = \"013cbe8ca475de38ac5ae85af0536308e21160a13986526d235c28e4796d3ddd99\"\nbalance = \"0\"\nvalidator_public_key = \"012dc55b77b2a9faf75dbaed15775ddfc48e60c4596608318c8b96b1900bdf1d5f\"\ndelegated_amount = \"3553406000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01bcd376f4dc285025624279ba8e17673cce6bfd02750ea09c982a337049b3cc9d\"\nbalance = \"0\"\nvalidator_public_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\ndelegated_amount = \"3609904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013932752d32f3b41a9b98f3723553ab0b1fcf54909fa6c47168961e8e6e1bbc41\"\nbalance = \"0\"\nvalidator_public_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\ndelegated_amount = \"3609904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0133671964312ad012f760869601d4897b9f549177d1a98c8f210ee270f6b5fa7b\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d0f3fa56e64675fab02f2245b5e0caa5503c53bc57af7649af5dd8aa36141f44\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"010f228ca392ae12a90c52d64dfbf1fc9fae8b55d4d721ba76452a8fc9166efb40\"\nbalance = \"0\"\nvalidator_public_key = \"014ec8bfba46dbd592c0b44dd356abffb203330ddeced26c7522656b9bff85e7bc\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"01831d114210b0a134a5bc283634b8bb14ff2b4c4a226da3935fa0640686d78c37\"\nbalance = \"0\"\nvalidator_public_key = \"01c7de2f333695c4db43cce8c555f74540176071a9eeb8438eb335634b9729ee6b\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"0122d1a7ba329a32a5a8fe829b58c28ea7d4e60d48323e08c2821b7afeda0f4c32\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"3684493200000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a60e0885f4968dd3107e088b4cb5798af665859b769bc1aa86909a5b67f66a66\"\nbalance = \"0\"\nvalidator_public_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\ndelegated_amount = \"3752637000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019470ef3d314f868e7ce50f5461129c3975ed45e3acab96282dbbd170e1b1a440\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"4091774000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b879d9c6c650841e4c74c4ba0f3cfe64d1abd5f5f59e741686199e59e1ecfd96\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"4107852000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d9c9c23bfebaf2f610e3a6d93c751550d4f1cfda2fe8a1d5e8e2f6b0402dad29\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4171233000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01942a994f8bb903cdaa261482285a11acc68ed40a1e5fc65e146e4fe276f8af60\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"4184795000000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f0dc0b5ff839ba1ce85e05190019a442a13c6e5be52721003d9b52e0b73af77\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a68ccf92ac4d9d42b898d4dd8ddbacea4346934f81db54788f539831b8618136\"\nbalance = \"0\"\nvalidator_public_key = \"01ee8b5ec54251a9f2f92edbbf328fcdb78449ae2cca2743d591f1b7e83b6d9049\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ad831573d5d5458b1bd68bd08e40ff2369a4db6237e2fed887013da84f2c8b3f\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01dfa7caca2088155d24dc3c2448e2cc9a348ba18663f5317092cde5ec4502105f\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202B974A04E7202FA7EC857E10FB05C90E42D65E3372EE88122DB2CF59092015CA9\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0145a996c9488cca5c9c43e857b896577651170c9f97081e7489c651766b07f0b7\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0159d9fc631ba08168180d4c53496aba704d9a12ec4c0f2d0ee93933064b9dc125\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02024C11C976162D23618E1697449C3B59851411DC46309D5CFD3F3F1E9875EF668F\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"4333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d12bf7dc4cdf030cbd145c85211382bace22e53b79c835ba6784a0eff149f7ef\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"4333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0138a4adfe8a3c469ca5990deddda87edf6e46f69c3aa572dc609fc5c2443d9196\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"4333333500000000\"\n\n[[delegators]]\ndelegator_public_key = \"01fa0fa7b11fdf42aea514a88e3a2263187d2239cb8ede4bce3456689c023fc393\"\nbalance = \"0\"\nvalidator_public_key = \"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8\"\ndelegated_amount = \"4542658000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0115cc7743558b5da74cd37b1212da8bdc075c2499a33e1a394ae2ce3920f05d31\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"4543137000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0192a12fc746753edb35078c241a2570bac512f636d3cb3fee055ad9d4c1ae82b9\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"5872836000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0192dbcad8d815289b2faaf6c31549c3bb02453c16df87810bf02c944f06513f3b\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"6500000100000000\"\n\n[[delegators]]\ndelegator_public_key = \"01119bcbf29e296d53aabb24f412ba77b8c6d522d253ed49fe44b01e013813d462\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"6898630000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203E1AFB65278CA78989411B095CA2CE59F4C3357E9B4C92A198FA304F8C7D4BF11\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"6908904000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01e0574869b56bab125ed724490096029d2264dc091f21f6de9beb0b91c204fede\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"6929452000000000\"\n\n[[delegators]]\ndelegator_public_key = \"015bceda8667f573df0db5855339cff300f0ad9cc8f3cc8cc7bed7126759e4bd7c\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"6952055000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01de1d428201c4d37a2efb7b86e949e47dd557733da977299c2cb576fb68bb4bf5\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"6960274000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b8fe7a2e409e49365873f8f25fb4e582b6954fff29cebf23a3ba621b7b94110e\"\nbalance = \"0\"\nvalidator_public_key = \"013f4f5b2da0d7ed5cf78d5f153a5517ff203c0ed3570061476e793c81669e77d9\"\ndelegated_amount = \"7219808000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f3fc2d686758ad3692f90806496a3a7a1fa13bcc5df13b3c75c1ef5a15c9a0a8\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"7305479000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016012fd412191dd06ebfdfa40b9d5f61aa028caea6de3d5bb8aef95ce943ece18\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"7703521000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01c4dcd4f6676ea296c013c9d2ce82e78b6eff0617f061d50626748a1d9196dcfb\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"7800000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b7b1b5856758bd59ebb8beebccd6dbf4bc489628032921dd83e54c919b4d0a72\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"7800000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012325dc6969af448e93fe5299571ae6b4f0ad87b0a51df9f34225b242116416d1\"\nbalance = \"0\"\nvalidator_public_key = \"0167e08c3b05017d329444dc7d22518ba652cecb2c54669a69e5808ebcab25e42c\"\ndelegated_amount = \"7800000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b40c8de03d4a9ab171b69ff5d2ad5110ac6a80cbd7238b1725295d9e3973ab62\"\nbalance = \"0\"\nvalidator_public_key = \"014c2573f59c70775a41d9cb69b9e3146e6d51567d33a141423b5bf371967e5902\"\ndelegated_amount = \"7979415000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010a6eb8216afcaa59f9202d8bb12e30caf0c46f6c0da08ced34471d80cdfde650\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"8125479000000000\"\n\n[[delegators]]\ndelegator_public_key = \"013ad9596f78cd673365d01737911b7dd703bd02c566d5b4688716b524c5f9863f\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"8566913000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0106c2df6fd5c11fd54b54cee7160a9230b222654c55ba80dd83e10f316397ed48\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"8586795000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01ed392301164a763bbebe7320fff4effbba4f88705f8a409df2246b638cc231d7\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"8666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"019426bf1f656fa65c8e1d8b514e788121c4b894cc060f6ce22c84989739356e4c\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"8666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01baa42a20a89b7dab3a6f0232277d1c9d159bdddec18e2d5e67924605159bfd43\"\nbalance = \"0\"\nvalidator_public_key = \"012f5501236a3bd82d22ee7675e78174ee723565495fd0f43be8a011bfa6494687\"\ndelegated_amount = \"8667224000000000\"\n\n[[delegators]]\ndelegator_public_key = \"015f065acdced524ba07b125a3325565b28f702509d3d09bae001cee972a66d53a\"\nbalance = \"0\"\nvalidator_public_key = \"01faec72d138026edbd470d9ba1f6a05c5fabaa98da8bb41c8c92041d2f58337d2\"\ndelegated_amount = \"8667224000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d64c582fa65d96eeda1631a4d02575b9c261603fc39005efdcca4a3d1fe12fda\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"8767679000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0191ccaf4a29012fc8b9eed1375580a17b5cfee696cbc5c91f5426f93beb4d6a85\"\nbalance = \"0\"\nvalidator_public_key = \"017b9a85b657e0a8c2e01bf2d80b6b2e6f8d8b4bc6d7c479f21e59dceea761710b\"\ndelegated_amount = \"9620685000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0147a1349d67565cfb8493a6ac02e27d9ea6c42760b03bca1c8eeb3056a0d75c1a\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"9695479000000000\"\n\n[[delegators]]\ndelegator_public_key = \"017bc476fecb69244f011e63f642936dfb062b1735f4d20c918bbd457bed121927\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"9755323000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a4873a5470e85db27b36de0064e7b4540f7a17dece44812591fd6fd36a424cc9\"\nbalance = \"0\"\nvalidator_public_key = \"0149fcd9cc0986ba04d77ad48c7def4860bf6952ccb075666e579549386fd2f143\"\ndelegated_amount = \"10326370000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01006212e8028fc7b3a6816783d7d6d3bab4763dee3063a127ba7808d245c10897\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"10434247000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202086CC55934061E263B6027B989EB461719443188618AE2AA5AB8CB2126650215\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"11100000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020292C62C906FC3869E861E96342581FEF0A995B04125CAEB41FFEF1D72E8E38F9D\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"13000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020238AF667C991D21BC88E00AF9580C301B184BA97EA59A4D28D4C2FF44FD632756\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"13739726000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203950BA427954BA8F1F12EDE9FB25CE5B7839979E0AA190EF77CAB96F06C51CA84\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"13739726000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202CD2D46B5F4DF1B6FFFAEA82DC2A0E9022DDACAD549C728DC9023721D3CA4C057\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"13768493000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01e47ca5e1c3cb306ea859320e185860ee26edec4409ce8c47ebbfc770c04b52e3\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"13776712500000000\"\n\n[[delegators]]\ndelegator_public_key = \"0136258664950cc030dbdc68e430f6961bef95c6ab2d269dc776ef2c9fa7bbca32\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"13776712500000000\"\n\n[[delegators]]\ndelegator_public_key = \"020232EE968558C70952E472BD4AC47D5830F317E91A00A033B45FBAB2F7B75E0158\"\nbalance = \"0\"\nvalidator_public_key = \"018f84c6fc037284f189cc8cb49f89212ff434a5eb050e48cdd164ff3890fbff69\"\ndelegated_amount = \"13793151000000000\"\n\n[[delegators]]\ndelegator_public_key = \"017a030a8b66381b1c79d265667a4a9a8b4639a1fef9e62828644a37508d70e138\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"13854795000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203C03E5418F35C87C1A90231E6D215012DFD641F665B0BFB229804880A9E1F29E5\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"13863014000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02034ADD5EE30D7C425EE71FA17E2A45CB72479DC35CDDD0A9B9652FFAD95432CF15\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"13912329000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012f912118b53471ed8dfcf01698eeedd0b4a4b31de9566016accfaeb9fc0c5ce1\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"14222260000000000\"\n\n[[delegators]]\ndelegator_public_key = \"017c6bdf6a7557fd8bbf9111acdce3bd563108a6397f4d016c36ef3f818cf3d64b\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"14727850000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01cc9cb4323129c2181782312b98e479cfd96dc45b9c4d2089bd9a60fe319e3278\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"15166666900000000\"\n\n[[delegators]]\ndelegator_public_key = \"012f0507177cb1e490bb677e0432441acca28d6241a6224b71012baa42cf05a6dc\"\nbalance = \"0\"\nvalidator_public_key = \"015dfd4b3f997b1eb1a7292eb501845931b8aa9869988a5caa2be79ac4f5ff8a21\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016151a2c7c6ccab4ddfbb9a96cccb8213d84c4f7d556b99f04e8126825dd8ea2e\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01d2ebc5ea92af48f4b90df249a4d7d75f60c6790a5bf1a8cbbbc3f46cbb709d0f\"\nbalance = \"0\"\nvalidator_public_key = \"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01cd75340a885776903ffc364d62b550922fe2d8f740cb36e0714ca8238b9811b4\"\nbalance = \"0\"\nvalidator_public_key = \"014c2573f59c70775a41d9cb69b9e3146e6d51567d33a141423b5bf371967e5902\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01583bd62ec1d48d2551869bc152f913c1a65151f50d4364e96713b14156010521\"\nbalance = \"0\"\nvalidator_public_key = \"013eb6775484f25ac4e126e93a350ef2bc259385da5141d331829af7f755b03844\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020237D50DFE66C504A312FBAFFA1D51DEEB5914F6CECFE3A03FC4F836039779B970\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02021650375BE7436B43A819FC422C300EF4A28798B1FCC31D48C4C0E4CEE7428F06\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"17333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01356542750dc229efee77e2d2b61cb6b79f01ecf801445df47b538478807d8345\"\nbalance = \"0\"\nvalidator_public_key = \"01d904f8135553a4810487b0d754f9f1c8efb1ee91c96f36e92b9631120a7ece06\"\ndelegated_amount = \"20702055000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a451ce6b1705f0e028f8ee24b92d2e0a20d8a6e97c9ea8c604bcf0de3bd2f563\"\nbalance = \"0\"\nvalidator_public_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\ndelegated_amount = \"20720548000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020289E62A85C4569BE688DDC0B5B4FB9FCB7AC3F897DD2A97CD714F8F262E97CB66\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"21462192000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202CE2C3FC6AD9283600EC50B10EE79D84F86E3BAFF1F9AA5A993175E037346ED60\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01454c9053822d7c1d294a5a776ba851b0cdac6a46eae4474be0f2dd661f26932c\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202BE54DF25ACE6ACA0C17E932D04B904BA1968B8256A335C74D23095ED1925CE2C\"\nbalance = \"0\"\nvalidator_public_key = \"0182f835993ce0d3596147429ea432b3a025580f458f50bbbaccbbe4c73f1f1113\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202899C2942A8E5C43C5E9792D9BE75A786B04C0BE4BAADB39379E9902897003117\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02034D66B512C22620E400842AFC21B11BCCD07F7076E897E534F4CB90E620FDAC9B\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"21666667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01798eed4fbf6b0de81c8c73054a21590c625b95306fb60668b36e65cfa1670132\"\nbalance = \"0\"\nvalidator_public_key = \"01765c9d153550285d2e6da2f4407ca8b6c5d2f94098e9794a02609df1068474ef\"\ndelegated_amount = \"26000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"012d7449cde960bb5cc61c3ffc6303682eea04d93752a84cc61dfe40ebf995d1eb\"\nbalance = \"0\"\nvalidator_public_key = \"01b32b134afdf8585b8c8181f81375a98c5ed12e145c9e6bfea33a55eeccf1fa22\"\ndelegated_amount = \"26830137000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203431B8CD5A135CBF15A538D6ACD3C4FC1F38207BE12D7AF609862FCC070B1E674\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"27588419226000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203334DACD640BC716063D57B3F4332623843A87874A32C4BB7C3760DF23D4B57E5\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"27610959000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203A8B1E98C42B4FC4A10D7B983D760D2C33FE8D626F2111AE99CC74777E9FB4363\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"27717808000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202D2239D7F079EA9E9153C61641A895EF984D9AB15BC25591A61EBE5369E9B037C\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"27890411000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02031AAF193C78712F1647D6A787C33D126DD3166461D48F99F9D3B42E240B040B4C\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"30333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203A0FE0B5EF6EE7F63A53D6E13716C0BFAE5DE24AA1FD54A99AA7B2CB5A367A2AB\"\nbalance = \"0\"\nvalidator_public_key = \"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8\"\ndelegated_amount = \"30476344000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0139c59b850e9a8d26b4778918177deebfe1fcfbbcde715901914d84fdc9704e43\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"32164972000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010aeee26a8dcd60bcb69cb00abeb290ee95363aa5e53510b206cfb446f6792413\"\nbalance = \"0\"\nvalidator_public_key = \"0144118517b5ba7e7ca917d5a418c5fb8bf49e279427435d6597f59f8c5bf9ff1f\"\ndelegated_amount = \"33162132000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01192b1f62f97d6b5edf84fcb113b14b875b0e9cc96d7d054d70098c383e5fbf14\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"34791096000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01045ac0fd84e85c852526fa58447fd36c1054cc67cde05992acf65411bb568d01\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"34791096000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010cdf3c6f2ecf000b7b6adf053d2755d3eabb41a5529aa8c1e6b1f958dc8f0b49\"\nbalance = \"0\"\nvalidator_public_key = \"01652d9fbd8dbb443af0122cd4347f4107e697306e5b90f93dbf959f7612e5e7d2\"\ndelegated_amount = \"36527397000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020218BBAA66CF1B8F7631B5887A1CC1C5F57FCEFD54D8399632EA5D84C029740720\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"37143395000000000\"\n\n[[delegators]]\ndelegator_public_key = \"016dd414ee063ec490d07aee039f7f65d51bf0e80d8f8d9265ad129a2ab56934ae\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"39866667000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202B279F0FB3825C3B4B090DF06B5675FA59E6EF29D22C1F7FE1B8CAC8CB7CCFD12\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"39980061000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01431edff6e7748d0e1d7595168e9be134851b391fdbf808660a7cb2e4efc91cf8\"\nbalance = \"0\"\nvalidator_public_key = \"017d940644deeb8eea3d94f62cfa65a9bea4eb2e8ec9433104bb145d73fd39e98f\"\ndelegated_amount = \"41589041000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01c7e757c775939a61c977637d76ec699a4c6c5fc21697f91d67ee9f001df69297\"\nbalance = \"0\"\nvalidator_public_key = \"01ba1dcfbe8dba48b88674bb007c000391c0ea36b5d80570c113a42a9823d702c2\"\ndelegated_amount = \"42723288000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01b65597c2a70551546e8c99513a6c6f746fa09ca6d9f43b41a9202272d5f952bf\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"43333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02021903CE3B15FB13B32E8E7AB7F4BEC39F8B341EA6C0C9670FD2C16B0591043F7A\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"43333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202EFD4EE6001163101D035C6652E60C2E5C96129F17A6E4834101EC6E9CD57F002\"\nbalance = \"0\"\nvalidator_public_key = \"01419478cc7a68037c553c0214d595cb6b432c71ef73ece0d7a5f98c5eb1ecb44a\"\ndelegated_amount = \"43333333000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0202D30C2BE47CE9AE62E36F5A53225D6590667B6809EA5F6DD53C3083518BAC2A1F\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"49800621774000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a41493ada4d34760f669588c4812d9859334914feec7419d7f612043838a3e7a\"\nbalance = \"0\"\nvalidator_public_key = \"010a8ac8d23e6c57fa340c552ddf9199d9cba9166ecc0daee640053ebfc6254610\"\ndelegated_amount = \"65000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020284B66997B94DEDB322F34C6A29EC524561CBBF20BF1ECA67024C3E94E7F36ED5\"\nbalance = \"0\"\nvalidator_public_key = \"01717c1899762ffdbd12def897ac905f1debff38e8bafb081620cb6da5a6bb1f25\"\ndelegated_amount = \"65000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01a79ba9e6ee83292a586213843d4471e1ca04de505885934696d9bb62b5823d2e\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"65000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020344840C1945419E68E5D30D7549F5C1D667869D4EAD5AA1C740DFE4B697E35369\"\nbalance = \"0\"\nvalidator_public_key = \"015fd964620f98e551065079e142840dac3fb25bd97a0d4722411cb439f9247d72\"\ndelegated_amount = \"70122374000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01f6324ce3c4c82b54a0cea605a11873d7d9a15be82686e39c10c1227418ec8e25\"\nbalance = \"0\"\nvalidator_public_key = \"01b4815d64e022127b117d25436e4b9a9d9d1338b0c6cb09db3995d4d3ac84d86b\"\ndelegated_amount = \"105020548000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020346A5AE20EACADECF6C9E740C478D5A9FAD50757DA6F122560C1441328E0DED21\"\nbalance = \"0\"\nvalidator_public_key = \"011117189c666f81c5160cd610ee383dc9b2d0361f004934754d39752eedc64957\"\ndelegated_amount = \"111285969000000000\"\n\n[[delegators]]\ndelegator_public_key = \"020391DFD3E9B1673BBCE2EA44471E55D1B168769BA0315104AF43290CEFAA2EEEAC\"\nbalance = \"0\"\nvalidator_public_key = \"011117189c666f81c5160cd610ee383dc9b2d0361f004934754d39752eedc64957\"\ndelegated_amount = \"116026124000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01dba4402d0e87c7d224cbf5d67e50dfefc95d7333050d83af50b3e610a135dcb7\"\nbalance = \"0\"\nvalidator_public_key = \"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\"\ndelegated_amount = \"150297534000000000\"\n\n[[delegators]]\ndelegator_public_key = \"01846c1a2bba6908a154b2070d973caf2641ff37180d33c1ed6147616d74a5fa0e\"\nbalance = \"0\"\nvalidator_public_key = \"0145316b84fee8735291f5a29206e5211c74ab828b0382bb475a4a6e799894ea11\"\ndelegated_amount = \"160793460000000000\"\n\n[[delegators]]\ndelegator_public_key = \"02021E54DB769A70BA0E54D2550D73A31B6168F3054A73CDAD30A3F4172C6E7903A9\"\nbalance = \"0\"\nvalidator_public_key = \"0163e03c3aa2b383f9d1b2f7c69498d339dcd1061059792ce51afda49135ff7876\"\ndelegated_amount = \"250000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"0203E81558ED17FF382273BC9006E3DA1409A85E149EE4A143FEBBF603088D7FBA1F\"\nbalance = \"0\"\nvalidator_public_key = \"01aa2976834459371b1cf7f476873dd091a0e364bd18abed8e77659b83fd892084\"\ndelegated_amount = \"250000000000000000\"\n\n[[delegators]]\ndelegator_public_key = \"010b24b2974ac8dd9027a06be487c383732a5e4605d254fb017d9fce388d347e77\"\nbalance = \"0\"\nvalidator_public_key = \"0190c434129ecbaeb34d33185ab6bf97c3c493fc50121a56a9ed8c4c52855b5ac1\"\ndelegated_amount = \"453839640000000000\"\n\n\n"
  },
  {
    "path": "resources/production/chainspec.toml",
    "content": "[protocol]\n# Protocol version.\nversion = '2.1.2'\n# Whether we need to clear latest blocks back to the switch block just before the activation point or not.\nhard_reset = true\n# This protocol version becomes active at this point.\n#\n# If it is a timestamp string, it represents the timestamp for the genesis block.  This is the beginning of era 0.  By\n# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up\n# and running to start the blockchain.  This timestamp is also used in seeding the pseudo-random number generator used\n# in contract-runtime for computing genesis post-state hash.\n#\n# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era.\nactivation_point = 19665\n\n[network]\n# Human readable name for convenience; the genesis_hash is the true identifier.  The name influences the genesis hash by\n# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis\n# post-state hash.\nname = 'casper'\n# The maximum size of an acceptable networking message in bytes.  Any message larger than this will\n# be rejected at the networking level.\nmaximum_net_message_size = 25_165_824\n\n[core]\n# Era duration.\nera_duration = '120 minutes'\n# Minimum number of blocks per era.  An era will take longer than `era_duration` if that is necessary to reach the\n# minimum height.\nminimum_era_height = 100\n# Minimum difference between a block's and its child's timestamp.\nminimum_block_time = '8000 ms'\n# Number of slots available in validator auction.\nvalidator_slots = 100\n# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer.\n# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as\n# finalized: A higher value F makes it safer to rely on finalized blocks.  It also makes it more difficult to finalize\n# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly.\nfinality_threshold_fraction = [1, 3]\n# Protocol version from which nodes are required to hold strict finality signatures.\nstart_protocol_version_with_strict_finality_signatures_required = '1.5.0'\n# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'.\n# Used to determine finality sufficiency for new joiners syncing blocks created\n# in a protocol version before\n# `start_protocol_version_with_strict_finality_signatures_required`.\nlegacy_required_finality = 'Strict'\n# Number of eras before an auction actually defines the set of validators.  If you bond with a sufficient bid in era N,\n# you will be a validator in era N + auction_delay + 1.\nauction_delay = 1\n# The period after genesis during which a genesis validator's bid is locked.\nlocked_funds_period = '0 days'\n# The period in which genesis validator's bid is released over time after it's unlocked.\nvesting_schedule_period = '0 weeks'\n# Default number of eras that need to pass to be able to withdraw unbonded funds.\nunbonding_delay = 7\n# Round seigniorage rate represented as a fraction of the total supply.\n#\n# Annual issuance: 8%\n# Minimum block time: 8000 milliseconds\n# Ticks per year: 31536000000\n#\n# (1+0.08)^((8000)/31536000000)-1 is expressed as a fractional number below\n# Python:\n# from fractions import Fraction\n# Fraction((1 + 0.08)**((8000)/31536000000) - 1).limit_denominator(1000000000)\nround_seigniorage_rate = [11, 563427926]\n# Maximum number of associated keys for a single account.\nmax_associated_keys = 100\n# Maximum height of contract runtime call stack.\nmax_runtime_call_stack_height = 12\n# Minimum allowed delegation amount in motes\nminimum_delegation_amount = 500_000_000_000\n# Maximum allowed delegation amount in motes\nmaximum_delegation_amount = 1_000_000_000_000_000_000\n# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than\n# the value specified will be treated as a full unbond of a validator and their associated delegators\nminimum_bid_amount = 500_000_000_000\n# Global state prune batch size (0 = this feature is off)\nprune_batch_size = 0\n# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\nstrict_argument_checking = false\n# Number of simultaneous peer requests.\nsimultaneous_peer_requests = 5\n# The consensus protocol to use. Options are \"Zug\" and \"Highway\".\nconsensus_protocol = 'Zug'\n# The maximum amount of delegators per validator.\nmax_delegators_per_validator = 1200\n# Minimum delegation rate validators can specify (0-100).\nminimum_delegation_rate = 0\n# The split in finality signature rewards between block producer and participating signers.\nfinders_fee = [1, 5]\n# The proportion of baseline rewards going to reward finality signatures specifically.\nfinality_signature_proportion = [95, 100]\n# Lookback interval indicating which past block we are looking at to reward.\nsignature_rewards_max_delay = 6\n# Allows transfers between accounts in the blockchain network.\n#\n# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators.\n# Changing this option makes sense only on private chains.\nallow_unrestricted_transfers = true\n# Enables the auction entry points 'delegate' and 'add_bid'.\n#\n# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These\n# auction entry points will return an error if called when this option is set to false.\nallow_auction_bids = true\n# If set to false, then consensus doesn't compute rewards and always uses 0.\ncompute_rewards = true\n# Defines how refunds of the unused portion of payment amounts are calculated and handled.\n#\n# Valid options are:\n#   'refund': a ratio of the unspent token is returned to the spender.\n#   'burn': a ratio of the unspent token is burned.\n#   'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio.\n# This causes excess payment amounts to be sent to either a\n# pre-defined purse, or back to the sender.  The refunded amount is calculated as the given ratio of the payment amount\n# minus the execution costs.\nrefund_handling = { type = 'refund', refund_ratio = [75, 100] }\n# Defines how fees are handled.\n#\n# Valid options are:\n#   'no_fee': fees are eliminated.\n#   'pay_to_proposer': fees are paid to the block proposer\n#   'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all\n#                 administrator accounts\n#   'burn': fees are burned\nfee_handling = { type = 'burn' }\n# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake.\nvalidator_credit_cap = [1, 5]\n# Defines how pricing is handled.\n#\n# Valid options are:\n#   'payment_limited': senders of transaction self-specify how much they pay.\n#   'fixed': costs are fixed, per the cost table\n#   'prepaid': prepaid transaction (currently not supported)\npricing_handling = { type = 'payment_limited' }\n# Does the network allow pre-payment for future\n# execution? Currently not supported.\n#\nallow_prepaid = false\n# Defines how gas holds affect available balance calculations.\n#\n# Valid options are:\n#   'accrued': sum of full value of all non-expired holds.\n#   'amortized': sum of each hold is amortized over the time remaining until expiry.\n#\n# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`,\n#   with accrued, the full hold amount would be applied\n#   with amortized, half the hold amount would be applied\ngas_hold_balance_handling = { type = 'accrued' }\n# Defines how long gas holds last.\n#\n# If fee_handling is set to 'no_fee', the system places a balance hold on the payer\n# equal to the value the fee would have been. Such balance holds expire after a time\n# interval has elapsed. This setting controls how long that interval is. The available\n# balance of a purse equals its total balance minus the held amount(s) of non-expired\n# holds (see gas_hold_balance_handling setting for details of how that is calculated).\n#\n# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse,\n# a hold for 100 is placed on that purse and is considered when calculating total balance\n# for 24 hours starting from the block_time when the hold was placed.\ngas_hold_interval = '24 hours'\n# List of public keys of administrator accounts. Setting this option makes only on private chains which require\n# administrator accounts for regulatory reasons.\nadministrators = []\n# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable\n# entity in lazy manner.\n# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade;\n# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage\n# will be written underneath Key::Hash.\n# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account\n# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated\n# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten\n# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top\n# level records\n# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade\n# the flag cannot be disabled in a future protocol upgrade.\nenable_addressable_entity = false\n# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount.\nbaseline_motes_amount = 2_500_000_000\n# Flag on whether ambiguous entity versions returns an execution error.\ntrap_on_ambiguous_entity_version = false\nrewards_handling = { type = 'standard' }\n\n\n[highway]\n# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length.\nmaximum_round_length = '66 seconds'\n\n[transactions]\n# The duration after the transaction timestamp that it can be included in a block.\nmax_ttl = '2 hours'\n# The maximum number of approvals permitted in a single block.\nblock_max_approval_count = 2600\n# Maximum block size in bytes including transactions contained by the block.  0 means unlimited.\nmax_block_size = 2_621_400\n# The upper limit of total gas of all transactions in a block.\nblock_gas_limit = 812_500_000_000\n# The minimum amount in motes for a valid native transfer.\nnative_transfer_minimum_motes = 2_500_000_000\n# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file.\nmax_timestamp_leeway = '5 seconds'\n\n# Configuration of the transaction runtime.\n[transactions.enabled_runtime]\nvm_casper_v1 = true\nvm_casper_v2 = false\n\n[transactions.v1]\n# The configuration settings for the lanes of transactions including both native and Wasm based interactions.\n# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1\n# respectively\n# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction\n# within a given lane.\n# The maximum length in bytes of runtime args per V1 transaction.\n# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels)\n# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and\n# the lane must be present and defined.\n# Different casper networks may not impose such a restriction.\n# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane\n# [2] -> Max args length size in bytes for a given transaction in a certain lane\n# [3] -> Transaction gas limit for a given transaction in a certain lane\n# [4] -> The maximum number of transactions the lane can contain\nnative_mint_lane = [0, 2048, 1024, 100_000_000, 325]\nnative_auction_lane = [1, 3096, 2048, 2_500_000_000, 325]\ninstall_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1]\nwasm_lanes = [\n    [3, 750_000, 2048, 1_000_000_000_000, 1],\n    [4, 131_072, 1024, 100_000_000_000, 2],\n    [5, 65_536, 512, 5_000_000_000, 40]\n]\n\n[transactions.deploy]\n# The maximum number of Motes allowed to be spent during payment.  0 means unlimited.\nmax_payment_cost = '0'\n# The limit of length of serialized payment code arguments.\npayment_args_max_length = 1024\n# The limit of length of serialized session code arguments.\nsession_args_max_length = 1024\n\n[wasm.v1]\n# Amount of free memory (in 64kB pages) each contract can use for stack.\nmax_memory = 64\n# Max stack height (native WebAssembly stack limiter).\nmax_stack_height = 500\n\n[storage_costs]\n# Gas charged per byte stored in the global state.\ngas_per_byte = 1_117_587\n\n# For each opcode cost below there exists a static cost and a dynamic cost.\n# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks.\n[wasm.v1.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v1.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v1.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs\n[wasm.v1.host_function_costs]\nadd = { cost = 5_800, arguments = [0, 0, 0, 0] }\nadd_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] }\nadd_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] }\nadd_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nadd_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nblake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] }\ncall_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] }\ncall_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\ncreate_contract_package_at_hash = { cost = 200, arguments = [0, 0] }\ncreate_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ncreate_purse = { cost = 2_500_000_000, arguments = [0, 0] }\ndisable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nget_balance = { cost = 3_000_000, arguments = [0, 0, 0] }\nget_blocktime = { cost = 330, arguments = [0] }\nget_caller = { cost = 380, arguments = [0] }\nget_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] }\nget_main_purse = { cost = 1_300, arguments = [0] }\nget_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] }\nget_named_arg_size = { cost = 200, arguments = [0, 0, 0] }\nget_phase = { cost = 710, arguments = [0] }\nget_system_contract = { cost = 1_100, arguments = [0, 0, 0] }\nhas_key = { cost = 1_500, arguments = [0, 840] }\nis_valid_uref = { cost = 760, arguments = [0, 0] }\nload_named_keys = { cost = 42_000, arguments = [0, 0] }\nnew_uref = { cost = 17_000, arguments = [0, 0, 590] }\nrandom_bytes = { cost = 200, arguments = [0, 0] }\nprint = { cost = 20_000, arguments = [0, 4_600] }\nprovision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] }\nput_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] }\nread_host_buffer = { cost = 3_500, arguments = [0, 310, 0] }\nread_value = { cost = 60_000, arguments = [0, 120_000, 0] }\ndictionary_get = { cost = 5_500, arguments = [0, 590, 0] }\nremove_associated_key = { cost = 4_200, arguments = [0, 0] }\nremove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] }\nremove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] }\nremove_key = { cost = 61_000, arguments = [0, 3_200] }\nret = { cost = 23_000, arguments = [0, 420_000] }\nrevert = { cost = 500, arguments = [0] }\nset_action_threshold = { cost = 74_000, arguments = [0, 0] }\ntransfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] }\nupdate_associated_key = { cost = 4_200, arguments = [0, 0, 0] }\nwrite = { cost = 14_000, arguments = [0, 0, 0, 980] }\ndictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] }\nenable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nmanage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] }\nemit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] }\ngeneric_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] }\ncost_increase_per_message = 50\nget_block_info = { cost = 330, arguments = [0, 0] }\nrecover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\nverify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\ncall_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\n\n[wasm.v2]\n# Amount of free memory each contract can use for stack.\nmax_memory = 64\n\n[wasm.v2.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v2.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v2.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n[wasm.v2.host_function_costs]\nread = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\nwrite = { cost = 0, arguments = [0, 0, 0, 0, 0] }\nremove = { cost = 0, arguments = [0, 0, 0] }\ncopy_input = { cost = 0, arguments = [0, 0] }\nret = { cost = 0, arguments = [0, 0] }\ncreate = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer = { cost = 0, arguments = [0, 0, 0] }\nenv_balance = { cost = 0, arguments = [0, 0, 0, 0] }\nupgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\ncall = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\nprint = { cost = 0, arguments = [0, 0] }\nemit = { cost = 0, arguments = [0, 0, 0, 0] }\nenv_info = { cost = 0, arguments = [0, 0] }\n\n[wasm.messages_limits]\nmax_topic_name_size = 256\nmax_topics_per_contract = 128\nmax_message_size = 1_024\n\n[system_costs]\n# Penalty charge for calling invalid entry point in a system contract.\nno_such_entrypoint = 2_500_000_000\n\n[system_costs.auction_costs]\nget_era_validators = 2_500_000_000\nread_seigniorage_recipients = 5_000_000_000\nadd_bid = 2_500_000_000\nwithdraw_bid = 2_500_000_000\ndelegate = 2_500_000_000\nundelegate = 2_500_000_000\nrun_auction = 2_500_000_000\nslash = 2_500_000_000\ndistribute = 2_500_000_000\nwithdraw_delegator_reward = 5_000_000_000\nwithdraw_validator_reward = 5_000_000_000\nread_era_id = 2_500_000_000\nactivate_bid = 2_500_000_000\nredelegate = 2_500_000_000\nchange_bid_public_key = 5_000_000_000\nadd_reservations = 2_500_000_000\ncancel_reservations = 2_500_000_000\n\n[system_costs.mint_costs]\nmint = 2_500_000_000\nreduce_total_supply = 2_500_000_000\ncreate = 2_500_000_000\nbalance = 100_000_000\nburn = 100_000_000\ntransfer = 100_000_000\nread_base_round_reward = 2_500_000_000\nmint_into_existing_purse = 2_500_000_000\n\n[system_costs.handle_payment_costs]\nget_payment_purse = 10_000\nset_refund_purse = 10_000\nget_refund_purse = 10_000\nfinalize_payment = 2_500_000_000\n\n[system_costs.standard_payment_costs]\npay = 10_000\n\n[vacancy]\n# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network.\n#\n# The network starts with a current_gas_price of min_gas_price.\n#\n# Each block has multiple limits (bytes, transactions, transfers, gas, etc.)\n# The utilization for a block is determined by the highest percentage utilization of each these limits.\n#\n# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here)\n#     19 transactons -> 19/20 or 95%\n#     600 transfers -> 600/650 or 92.3%\n#     resulting block utilization is 95\n#\n# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is\n# adjusted with the following:\n#\n# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price.\n# If utilization falls between the thresholds, current_gas_price is not changed.\n# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price.\n#\n# The cost charged for the transaction is simply the gas_used * current_gas_price.\nupper_threshold = 90\nlower_threshold = 50\nmax_gas_price = 1\nmin_gas_price = 1\n"
  },
  {
    "path": "resources/production/config-example.toml",
    "content": "# ================================\n# Configuration options for a node\n# ================================\n[node]\n\n# If set, use this hash as a trust anchor when joining an existing network.\n#trusted_hash = 'HEX-FORMATTED BLOCK HASH'\n\n# Historical sync behavior for this node. Options are:\n#  'ttl'      (node will attempt to acquire all block data to comply with time to live enforcement)\n#  'genesis'  (node will attempt to acquire all block data back to genesis)\n#  'nosync'   (node will only acquire blocks moving forward)\n#  'isolated' (node will initialize without peers and will not accept peers)\n#  'completeblock' (node will acquire complete block and shutdown)\n# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`.\n#       it is recommended for dedicated validator nodes to be in ttl mode to increase\n#       their ability to maintain maximal uptime...if a long-running genesis validator\n#       goes offline and comes back up while in genesis mode, it must backfill\n#       any gaps in its block awareness before resuming validation.\n#\n#       it is recommended for reporting non-validator nodes to be in genesis mode to\n#       enable support for queries at any block height.\n#\n#       it is recommended for non-validator working nodes (for dapp support, etc) to run in\n#       ttl or nosync mode (depending upon their specific data requirements).\n#\n#       thus for instance a node backing a block explorer would prefer genesis mode,\n#       while a node backing a dapp interested in very recent activity would prefer to run in nosync mode,\n#       and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode.\n# note: as time goes on, the time to sync back to genesis takes progressively longer.\n# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting\n#       (it is currently ~2 hours by default on production and production-like networks but subject to change).\n# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating\n#        in consensus / switching to validate mode. it is primarily for lightweight nodes that are\n#        only interested in recent activity.\n# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to\n#       binary port, rest server, event server, and diagnostic port connections.\nsync_handling = 'ttl'\n\n# Idle time after which the syncing process is considered stalled.\nidle_tolerance = '20 minutes'\n\n# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times.\nmax_attempts = 3\n\n# Default delay for the control events that have no dedicated delay requirements.\ncontrol_logic_default_delay = '1 second'\n\n# Flag which forces the node to resync all the blocks.\nforce_resync = false\n\n# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all\n# conditions are satisfied.\nshutdown_for_upgrade_timeout = '2 minutes'\n\n# Maximum time a node will wait for an upgrade to commit.\nupgrade_timeout = '30 seconds'\n\n# The node detects when it should do a controlled shutdown when it is in a detectably bad state\n# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be\n# allowed to shutdown, and if restarted that node will generally recover gracefully and resume\n# normal operation. However, actively validating nodes have subjective state in memory that is\n# lost on shutdown / restart and must be reacquired from other validating nodes on restart.\n# If all validating nodes shutdown in the middle of an era, social consensus is required to restart\n# the network. As a mitigation for that, the following config can be set to true on some validator\n# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled\n# shutdown events and stay up. This allows them to act as sentinels for the consensus data for\n# other restarting nodes. This config is inert on non-validating nodes.\nprevent_validator_shutdown = false\n\n# =================================\n# Configuration options for logging\n# =================================\n[logging]\n\n# Output format.  Possible values are 'text' or 'json'.\nformat = 'json'\n\n# Colored output.  Has no effect if format = 'json'.\ncolor = false\n\n# Abbreviate module names in text output.  Has no effect if format = 'json'.\nabbreviate_modules = false\n\n\n# ===================================\n# Configuration options for consensus\n# ===================================\n[consensus]\n\n# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign\n# consensus messages.\nsecret_key_path = '/etc/casper/validator_keys/secret_key.pem'\n\n# The maximum number of blocks by which execution is allowed to lag behind finalization.\n# If it is more than that, consensus will pause, and resume once the executor has caught up.\nmax_execution_delay = 6\n\n\n# =======================================\n# Configuration options for Zug consensus\n# =======================================\n[consensus.zug]\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nsync_state_interval = '1 second'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of\n# echo messages, before they vote to make the round skippable and move on to the next proposer.\nproposal_timeout = '5 seconds'\n\n# The additional proposal delay that is still considered fast enough, in percent. This should\n# take into account variables like empty vs. full blocks, network traffic etc.\n# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one\n# while idle this should be at least 50, meaning that the timeout is 50% longer than\n# necessary for a quorum of recent proposals, approximately.\nproposal_grace_period = 200\n\n# The average number of rounds after which the proposal timeout adapts by a factor of 2.\n# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve.\nproposal_timeout_inertia = 10\n\n# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp\n# lies in the future by more than that are rejected.\nclock_tolerance = '1 second'\n\n\n# ===========================================\n# Configuration options for Highway consensus\n# ===========================================\n[consensus.highway]\n\n# The duration for which incoming vertices with missing dependencies should be kept in a queue.\npending_vertex_timeout = '30 minutes'\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nrequest_state_interval = '20 seconds'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# Log the synchronizer state periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_synchronizer_interval = '5 seconds'\n\n# Log the size of every incoming and outgoing serialized unit.\nlog_unit_sizes = false\n\n# The maximum number of peers we request the same vertex from in parallel.\nmax_requests_for_vertex = 5\n\n# The maximum number of dependencies we request per validator in a batch.\n# Limits requests per validator in panorama - in order to get a total number of\n# requests, multiply by # of validators.\nmax_request_batch_size = 20\n\n[consensus.highway.round_success_meter]\n# The number of most recent rounds we will be keeping track of.\nnum_rounds_to_consider = 40\n\n# The number of successful rounds that triggers us to slow down: With this many or fewer\n# successes per `num_rounds_to_consider`, we increase our round length.\nnum_rounds_slowdown = 10\n\n# The number of successful rounds that triggers us to speed up: With this many or more successes\n# per `num_rounds_to_consider`, we decrease our round length.\nnum_rounds_speedup = 32\n\n# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if\n# we have few enough failures.\nacceleration_parameter = 40\n\n# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which\n# we will use for looking for a summit in order to determine a proposal's finality.\n# The required quorum in a summit we will look for to check if a round was successful is\n# determined by this FTT.\nacceleration_ftt = [1, 100]\n\n\n# ====================================\n# Configuration options for networking\n# ====================================\n[network]\n\n# The public address of the node.\n#\n# It must be publicly available in order to allow peers to connect to this node.\n# If the port is set to 0, the actual bound port will be substituted.\npublic_address = '<IP ADDRESS>:0'\n\n# Address to bind to for listening.\n# If port is set to 0, a random port will be used.\nbind_address = '0.0.0.0:35000'\n\n# Addresses to connect to in order to join the network.\n#\n# If not set, this node will not be able to attempt to connect to the network.  Instead it will\n# depend upon peers connecting to it.  This is normally only useful for the first node of the\n# network.\n#\n# Multiple addresses can be given and the node will attempt to connect to each, requiring at least\n# one connection.\nknown_addresses = ['168.119.137.143:35000', '47.251.14.254:35000', '47.242.53.164:35000', '46.101.61.107:35000', '47.88.87.63:35000', '35.152.42.229:35000', '206.189.47.102:35000', '134.209.243.124:35000', '148.251.190.103:35000', '167.172.32.44:35000', '165.22.252.48:35000', '18.219.70.138:35000', '3.225.191.9:35000', '3.221.194.62:35000', '101.36.120.117:35000', '54.151.24.120:35000', '148.251.135.60:35000', '18.188.103.230:35000', '54.215.53.35:35000', '88.99.95.7:35000', '99.81.225.72:35000', '52.207.122.179:35000', '3.135.134.105:35000', '62.171.135.101:35000', '139.162.132.144:35000', '63.33.251.206:35000', '135.181.165.110:35000', '135.181.134.57:35000', '94.130.107.198:35000', '54.180.220.20:35000', '188.40.83.254:35000', '157.90.131.121:35000', '134.209.110.11:35000', '168.119.69.6:35000', '45.76.251.225:35000', '168.119.209.31:35000', '31.7.207.16:35000', '209.145.60.74:35000', '54.252.66.23:35000', '134.209.16.172:35000', '178.238.235.196:35000', '18.217.20.213:35000', '3.14.161.135:35000', '3.12.207.193:35000', '3.12.207.193:35000']\n\n# Minimum number of fully-connected peers to consider network component initialized.\nmin_peers_for_initialization = 3\n\n# The interval between each fresh round of gossiping the node's public address.\ngossip_interval = '120 seconds'\n\n# Initial delay for starting address gossipping after the network starts. This should be slightly\n# more than the expected time required for initial connections to complete.\ninitial_gossip_delay = '5 seconds'\n\n# How long a connection is allowed to be stuck as pending before it is abandoned.\nmax_addr_pending_time = '1 minute'\n\n# Maximum time allowed for a connection handshake between two nodes to be completed. Connections\n# exceeding this threshold are considered unlikely to be healthy or even malicious and thus\n# terminated.\nhandshake_timeout = '20 seconds'\n\n# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional\n# connections will be rejected. A value of `0` means unlimited.\nmax_incoming_peer_connections = 3\n\n# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers.\n# A value of `0` means unlimited.\nmax_outgoing_byte_rate_non_validators = 6553600\n\n# The maximum allowed total impact of requests from non-validating peers per second answered.\n# A value of `0` means unlimited.\nmax_incoming_message_rate_non_validators = 3000\n\n# Maximum number of requests for data from a single peer that are allowed be buffered. A value of\n# `0` means unlimited.\nmax_in_flight_demands = 50\n\n# Version threshold to enable tarpit for.\n#\n# When set to a version (the value may be `null` to disable the feature), any peer that reports a\n# protocol version equal or below the threshold will be rejected only after holding open the\n# connection for a specific (`tarpit_duration`) amount of time.\n#\n# This option makes most sense to enable on known nodes with addresses where legacy nodes that are\n# still in operation are connecting to, as these older versions will only attempt to reconnect to\n# other nodes once they have exhausted their set of known nodes.\ntarpit_version_threshold = '1.2.1'\n\n# How long to hold connections to trapped legacy nodes.\ntarpit_duration = '10 minutes'\n\n# The probability [0.0, 1.0] of this node trapping a legacy node.\n#\n# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a\n# single known node to hold open a connection to prevent the node from reconnecting. This should be\n# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of\n# legacy nodes running this software.\ntarpit_chance = 0.2\n\n# Minimum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_min_duration = '2 minutes'\n\n# Maximum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_max_duration = '10 minutes'\n\n# Identity of a node\n#\n# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate.\n# This option makes sense for some private chains where for security reasons joining new nodes is restricted.\n# [network.identity]\n# tls_certificate = \"node_cert.pem\"\n# secret_key = \"node.pem\"\n# ca_certificate = \"ca_cert.pem\"\n\n# Weights for impact estimation of incoming messages, used in combination with\n# `max_incoming_message_rate_non_validators`.\n#\n# Any weight set to 0 means that the category of traffic is exempt from throttling.\n[network.estimator_weights]\nconsensus = 0\nblock_gossip = 1\ntransaction_gossip = 0\nfinality_signature_gossip = 1\naddress_gossip = 0\nfinality_signature_broadcasts = 0\ntransaction_requests = 1\ntransaction_responses = 0\nlegacy_deploy_requests = 1\nlegacy_deploy_responses = 0\nblock_requests = 1\nblock_responses = 0\nblock_header_requests = 1\nblock_header_responses = 0\ntrie_requests = 1\ntrie_responses = 0\nfinality_signature_requests = 1\nfinality_signature_responses = 0\nsync_leap_requests = 1\nsync_leap_responses = 0\napprovals_hashes_requests = 1\napprovals_hashes_responses = 0\nexecution_results_requests = 1\nexecution_results_responses = 0\n\n# ==================================================\n# Configuration options for the BinaryPort server\n# ==================================================\n[binary_port_server]\n\n# Flag which enables the BinaryPort server.\nenable_server = true\n\n# Listening address for BinaryPort server.\naddress = '0.0.0.0:7779'\n\n# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_all_values = false\n\n# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_trie = false\n\n# Flag that enables the `TrySpeculativeExec` request. Disabled by default.\nallow_request_speculative_exec = false\n\n# Maximum size of a message in bytes.\nmax_message_size_bytes = 134_217_728\n\n# Maximum number of connections to the server.\nmax_connections = 5\n\n# The global max rate of requests (per second) before they are limited.\n# The implementation uses a sliding window algorithm.\nqps_limit = 110\n\n# Initial time given to a connection before it expires\ninitial_connection_lifetime = '10 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n# [`Command::Get(GetRequest::Record)`] is sent to the node\nget_record_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Information)`] is sent to the node\nget_information_request_termination_delay = '5 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::State)`] is sent to the node\nget_state_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Trie)`] is sent to the node\nget_trie_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TryAcceptTransaction`] is sent to the node\naccept_transaction_request_termination_delay = '24 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TrySpeculativeExec`] is sent to the node\nspeculative_exec_request_termination_delay = '0 seconds'\n\n\n# ==============================================\n# Configuration options for the REST HTTP server\n# ==============================================\n[rest_server]\n\n# Flag which enables the REST HTTP server.\nenable_server = true\n\n# Listening address for REST HTTP server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the REST HTTP server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:8888'\n\n# The global max rate of requests (per second) before they are limited.\n# Request will be delayed to the next 1 second bucket once limited.\nqps_limit = 100\n\n# Specifies which origin will be reported as allowed by REST server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n\n# ==========================================================\n# Configuration options for the SSE HTTP event stream server\n# ==========================================================\n[event_stream_server]\n\n# Flag which enables the SSE HTTP event stream server.\nenable_server = true\n\n# Listening address for SSE HTTP event stream server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:9999'\n\n# The number of event stream events to buffer.\nevent_stream_buffer_length = 5000\n\n# The maximum number of subscribers across all event streams the server will permit at any one time.\nmax_concurrent_subscribers = 100\n\n# Specifies which origin will be reported as allowed by event stream server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n# ===============================================\n# Configuration options for the storage component\n# ===============================================\n[storage]\n\n# Path (absolute, or relative to this config.toml) to the folder where any files created\n# or read by the storage component will exist. A subfolder named with the network name will be\n# automatically created and used for the storage component files.\n#\n# If the folder doesn't exist, it and any required parents will be created.\n#\n# If unset, the path must be supplied as an argument via the CLI.\npath = '/var/lib/casper/casper-node'\n\n# Maximum size of the database to use for the block store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 483_183_820_800 == 450 GiB.\nmax_block_store_size = 483_183_820_800\n\n# Maximum size of the database to use for the deploy store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the deploy metadata.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_metadata_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the state snapshots.\n#\n# The size should be a multiple of the OS page size.\n#\n# 10_737_418_240 == 10 GiB.\nmax_state_store_size = 10_737_418_240\n\n# Memory deduplication.\n#\n# If enabled, nodes will attempt to share loaded objects if possible.\nenable_mem_deduplication = true\n\n# Memory duplication garbage collection.\n#\n# Sets the frequency how often the memory pool cache is swept for free references.\n# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept.\nmem_pool_prune_interval = 4096\n\n\n# ===================================\n# Configuration options for gossiping\n# ===================================\n[gossip]\n\n# Target number of peers to infect with a given piece of data.\ninfection_target = 3\n\n# The saturation limit as a percentage, with a maximum value of 99.  Used as a termination\n# condition.\n#\n# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't\n# manage to newly infect 3 peers.  We will stop gossiping once we know of more than 15 holders\n# excluding us since 80% saturation would imply 3 new infections in 15 peers.\nsaturation_limit_percent = 80\n\n# The maximum duration for which to keep finished entries.\n#\n# The longer they are retained, the lower the likelihood of re-gossiping a piece of data.  However,\n# the longer they are retained, the larger the list of finished entries can grow.\nfinished_entry_duration = '1 minute'\n\n# The timeout duration for a single gossip request, i.e. for a single gossip message\n# sent from this node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\ngossip_request_timeout = '30 seconds'\n\n# The timeout duration for retrieving the remaining part(s) of newly-discovered data\n# from a peer which gossiped information about that data to this node.\nget_remainder_timeout = '5 seconds'\n\n# The timeout duration for a newly-received, gossiped item to be validated and stored by another\n# component before the gossiper abandons waiting to gossip the item onwards.\nvalidate_and_store_timeout = '1 minute'\n\n\n# ===============================================\n# Configuration options for the block accumulator\n# ===============================================\n[block_accumulator]\n\n# Block height difference threshold for starting to execute the blocks.\nattempt_execution_threshold = 6\n\n# Accepted time interval for inactivity in block accumulator.\ndead_air_interval = '3 minutes'\n\n# Time after which the block acceptors are considered old and can be purged.\npurge_interval = '1 minute'\n\n\n# ================================================\n# Configuration options for the block synchronizer\n# ================================================\n[block_synchronizer]\n\n# Maximum number of fetch-trie tasks to run in parallel during block synchronization.\nmax_parallel_trie_fetches = 5000\n\n# Time interval for the node to ask for refreshed peers.\npeer_refresh_interval = '90 seconds'\n\n# Time interval for the node to check what the block synchronizer needs to acquire next.\nneed_next_interval = '1 second'\n\n# Time interval for recurring disconnection of dishonest peers.\ndisconnect_dishonest_peers_interval = '10 seconds'\n\n# Time interval for resetting the latch in block builders.\nlatch_reset_interval = '5 seconds'\n\n\n# =============================================\n# Configuration options for the block validator\n# =============================================\n[block_validator]\n\n# Maximum number of completed entries to retain.\n#\n# A higher value can avoid creating needless validation work on an already-validated proposed\n# block, but comes at the cost of increased memory consumption.\nmax_completed_entries = 6\n\n\n# ==================================\n# Configuration options for fetchers\n# ==================================\n[fetcher]\n\n# The timeout duration for a single fetcher request, i.e. for a single fetcher message\n# sent from this node to another node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\nget_from_peer_timeout = '10 seconds'\n\n\n# ========================================================\n# Configuration options for the contract runtime component\n# ========================================================\n[contract_runtime]\n\n# Optional maximum size of the database to use for the global state store.\n#\n# If unset, defaults to 805,306,368,000 == 750 GiB.\n#\n# The size should be a multiple of the OS page size.\nmax_global_state_size = 2_089_072_132_096\n\n# Optional depth limit to use for global state queries.\n#\n# If unset, defaults to 5.\n#max_query_depth = 5\n\n# Enable manual synchronizing to disk.\n#\n# If unset, defaults to true.\n#enable_manual_sync = true\n\n\n# ==================================================\n# Configuration options for the transaction acceptor\n# ==================================================\n[transaction_acceptor]\n\n# The leeway allowed when considering whether a transaction is future-dated or not.\n#\n# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the\n# future are still acceptable.\n#\n# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting\n# `transaction.max_timestamp_leeway`.\ntimestamp_leeway = '2 seconds'\n\n\n# ===========================================\n# Configuration options for the transaction buffer\n# ===========================================\n[transaction_buffer]\n\n# The interval of checking for expired transactions.\nexpiry_check_interval = '1 minute'\n\n\n# ==============================================\n# Configuration options for the diagnostics port\n# ==============================================\n[diagnostics_port]\n\n# If set, the diagnostics port will be available on a UNIX socket.\nenabled = false\n\n# Filename for the UNIX domain socket the diagnostics port listens on.\nsocket_path = \"debug.socket\"\n\n# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the\n# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`,\n# which allows for group access as well.\nsocket_umask = 0o077\n\n\n# =============================================\n# Configuration options for the upgrade watcher\n# =============================================\n[upgrade_watcher]\n\n# How often to scan file system for available upgrades.\nupgrade_check_interval = '30 seconds'\n"
  },
  {
    "path": "resources/test/rest_schema_chainspec_bytes.json",
    "content": "{\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n  \"title\": \"GetChainspecResult\",\n  \"description\": \"Result for the \\\"info_get_chainspec\\\" RPC.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"api_version\",\n    \"chainspec_bytes\"\n  ],\n  \"properties\": {\n    \"api_version\": {\n      \"description\": \"The RPC API version.\",\n      \"type\": \"string\"\n    },\n    \"chainspec_bytes\": {\n      \"description\": \"The chainspec file bytes.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/ChainspecRawBytes\"\n        }\n      ]\n    }\n  },\n  \"definitions\": {\n    \"ChainspecRawBytes\": {\n      \"description\": \"The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"chainspec_bytes\"\n      ],\n      \"properties\": {\n        \"chainspec_bytes\": {\n          \"description\": \"Raw bytes of the current chainspec.toml file.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Bytes\"\n            }\n          ]\n        },\n        \"maybe_genesis_accounts_bytes\": {\n          \"description\": \"Raw bytes of the current genesis accounts.toml file.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/Bytes\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"maybe_global_state_bytes\": {\n          \"description\": \"Raw bytes of the current global_state.toml file.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/Bytes\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      }\n    },\n    \"Bytes\": {\n      \"description\": \"Hex-encoded bytes.\",\n      \"type\": \"string\"\n    }\n  }\n}"
  },
  {
    "path": "resources/test/rest_schema_status.json",
    "content": "{\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n  \"title\": \"GetStatusResult\",\n  \"description\": \"Result for \\\"info_get_status\\\" RPC response.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"api_version\",\n    \"available_block_range\",\n    \"block_sync\",\n    \"build_version\",\n    \"chainspec_name\",\n    \"last_progress\",\n    \"peers\",\n    \"reactor_state\",\n    \"starting_state_root_hash\",\n    \"uptime\"\n  ],\n  \"properties\": {\n    \"peers\": {\n      \"description\": \"The node ID and network address of each connected peer.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Peers\"\n        }\n      ]\n    },\n    \"api_version\": {\n      \"description\": \"The RPC API version.\",\n      \"type\": \"string\"\n    },\n    \"build_version\": {\n      \"description\": \"The compiled node version.\",\n      \"type\": \"string\"\n    },\n    \"chainspec_name\": {\n      \"description\": \"The chainspec name.\",\n      \"type\": \"string\"\n    },\n    \"starting_state_root_hash\": {\n      \"description\": \"The state root hash of the lowest block in the available block range.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Digest\"\n        }\n      ]\n    },\n    \"last_added_block_info\": {\n      \"description\": \"The minimal info of the last block from the linear chain.\",\n      \"anyOf\": [\n        {\n          \"$ref\": \"#/definitions/MinimalBlockInfo\"\n        },\n        {\n          \"type\": \"null\"\n        }\n      ]\n    },\n    \"our_public_signing_key\": {\n      \"description\": \"Our public signing key.\",\n      \"anyOf\": [\n        {\n          \"$ref\": \"#/definitions/PublicKey\"\n        },\n        {\n          \"type\": \"null\"\n        }\n      ]\n    },\n    \"round_length\": {\n      \"description\": \"The next round length if this node is a validator.\",\n      \"anyOf\": [\n        {\n          \"$ref\": \"#/definitions/TimeDiff\"\n        },\n        {\n          \"type\": \"null\"\n        }\n      ]\n    },\n    \"next_upgrade\": {\n      \"description\": \"Information about the next scheduled upgrade.\",\n      \"anyOf\": [\n        {\n          \"$ref\": \"#/definitions/NextUpgrade\"\n        },\n        {\n          \"type\": \"null\"\n        }\n      ]\n    },\n    \"uptime\": {\n      \"description\": \"Time that passed since the node has started.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/TimeDiff\"\n        }\n      ]\n    },\n    \"reactor_state\": {\n      \"description\": \"The current state of node reactor.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/ReactorState\"\n        }\n      ]\n    },\n    \"last_progress\": {\n      \"description\": \"Timestamp of the last recorded progress in the reactor.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Timestamp\"\n        }\n      ]\n    },\n    \"available_block_range\": {\n      \"description\": \"The available block range in storage.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/AvailableBlockRange\"\n        }\n      ]\n    },\n    \"block_sync\": {\n      \"description\": \"The status of the block synchronizer builders.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/BlockSynchronizerStatus\"\n        }\n      ]\n    },\n    \"latest_switch_block_hash\": {\n      \"description\": \"The hash of the latest switch block.\",\n      \"anyOf\": [\n        {\n          \"$ref\": \"#/definitions/BlockHash\"\n        },\n        {\n          \"type\": \"null\"\n        }\n      ]\n    }\n  },\n  \"additionalProperties\": false,\n  \"definitions\": {\n    \"Peers\": {\n      \"description\": \"Map of peer IDs to network addresses.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/PeerEntry\"\n      }\n    },\n    \"PeerEntry\": {\n      \"description\": \"Node peer entry.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"address\",\n        \"node_id\"\n      ],\n      \"properties\": {\n        \"node_id\": {\n          \"description\": \"Node id.\",\n          \"type\": \"string\"\n        },\n        \"address\": {\n          \"description\": \"Node address.\",\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Digest\": {\n      \"description\": \"Hex-encoded hash digest.\",\n      \"type\": \"string\"\n    },\n    \"MinimalBlockInfo\": {\n      \"description\": \"Minimal info of a `Block`.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"creator\",\n        \"era_id\",\n        \"hash\",\n        \"height\",\n        \"state_root_hash\",\n        \"timestamp\"\n      ],\n      \"properties\": {\n        \"hash\": {\n          \"$ref\": \"#/definitions/BlockHash\"\n        },\n        \"timestamp\": {\n          \"$ref\": \"#/definitions/Timestamp\"\n        },\n        \"era_id\": {\n          \"$ref\": \"#/definitions/EraId\"\n        },\n        \"height\": {\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"state_root_hash\": {\n          \"$ref\": \"#/definitions/Digest\"\n        },\n        \"creator\": {\n          \"$ref\": \"#/definitions/PublicKey\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"BlockHash\": {\n      \"description\": \"Hex-encoded cryptographic hash of a block.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Digest\"\n        }\n      ]\n    },\n    \"Timestamp\": {\n      \"description\": \"Timestamp formatted as per RFC 3339\",\n      \"type\": \"string\"\n    },\n    \"EraId\": {\n      \"description\": \"Era ID newtype.\",\n      \"type\": \"integer\",\n      \"format\": \"uint64\",\n      \"minimum\": 0.0\n    },\n    \"PublicKey\": {\n      \"description\": \"Hex-encoded cryptographic public key, including the algorithm tag prefix.\",\n      \"examples\": [\n        {\n          \"name\": \"SystemPublicKey\",\n          \"description\": \"A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's\",\n          \"value\": \"00\"\n        },\n        {\n          \"name\": \"Ed25519PublicKey\",\n          \"description\": \"An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters\",\n          \"value\": \"018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c\"\n        },\n        {\n          \"name\": \"Secp256k1PublicKey\",\n          \"description\": \"A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters\",\n          \"value\": \"0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084\"\n        }\n      ],\n      \"type\": \"string\"\n    },\n    \"TimeDiff\": {\n      \"description\": \"Human-readable duration.\",\n      \"type\": \"string\"\n    },\n    \"NextUpgrade\": {\n      \"description\": \"Information about the next protocol upgrade.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"activation_point\",\n        \"protocol_version\"\n      ],\n      \"properties\": {\n        \"activation_point\": {\n          \"$ref\": \"#/definitions/ActivationPoint\"\n        },\n        \"protocol_version\": {\n          \"$ref\": \"#/definitions/ProtocolVersion\"\n        }\n      }\n    },\n    \"ActivationPoint\": {\n      \"description\": \"The first era to which the associated protocol version applies.\",\n      \"anyOf\": [\n        {\n          \"description\": \"Era id.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        {\n          \"description\": \"Genesis timestamp.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Timestamp\"\n            }\n          ]\n        }\n      ]\n    },\n    \"ProtocolVersion\": {\n      \"description\": \"Casper Platform protocol version\",\n      \"type\": \"string\"\n    },\n    \"ReactorState\": {\n      \"description\": \"The state of the reactor.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Get all components and reactor state set up on start.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Initialize\"\n          ]\n        },\n        {\n          \"description\": \"Orient to the network and attempt to catch up to tip.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"CatchUp\"\n          ]\n        },\n        {\n          \"description\": \"Running commit upgrade and creating immediate switch block.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Upgrading\"\n          ]\n        },\n        {\n          \"description\": \"Stay caught up with tip.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"KeepUp\"\n          ]\n        },\n        {\n          \"description\": \"Node is currently caught up and is an active validator.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Validate\"\n          ]\n        },\n        {\n          \"description\": \"Node should be shut down for upgrade.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"ShutdownForUpgrade\"\n          ]\n        },\n        {\n          \"description\": \"Node should shut down after catching up.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"ShutdownAfterCatchingUp\"\n          ]\n        }\n      ]\n    },\n    \"AvailableBlockRange\": {\n      \"description\": \"An unbroken, inclusive range of blocks.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"high\",\n        \"low\"\n      ],\n      \"properties\": {\n        \"low\": {\n          \"description\": \"The inclusive lower bound of the range.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"high\": {\n          \"description\": \"The inclusive upper bound of the range.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"BlockSynchronizerStatus\": {\n      \"description\": \"The status of the block synchronizer.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"historical\": {\n          \"description\": \"The status of syncing a historical block, if any.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockSyncStatus\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"forward\": {\n          \"description\": \"The status of syncing a forward block, if any.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockSyncStatus\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"BlockSyncStatus\": {\n      \"description\": \"The status of syncing an individual block.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"acquisition_state\",\n        \"block_hash\"\n      ],\n      \"properties\": {\n        \"block_hash\": {\n          \"description\": \"The block hash.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"block_height\": {\n          \"description\": \"The height of the block, if known.\",\n          \"type\": [\n            \"integer\",\n            \"null\"\n          ],\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"acquisition_state\": {\n          \"description\": \"The state of acquisition of the data associated with the block.\",\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": false\n    }\n  }\n}"
  },
  {
    "path": "resources/test/rest_schema_validator_changes.json",
    "content": "{\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n  \"title\": \"GetValidatorChangesResult\",\n  \"description\": \"Result for the \\\"info_get_validator_changes\\\" RPC.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"api_version\",\n    \"changes\"\n  ],\n  \"properties\": {\n    \"api_version\": {\n      \"description\": \"The RPC API version.\",\n      \"type\": \"string\"\n    },\n    \"changes\": {\n      \"description\": \"The validators' status changes.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/JsonValidatorChanges\"\n      }\n    }\n  },\n  \"additionalProperties\": false,\n  \"definitions\": {\n    \"JsonValidatorChanges\": {\n      \"description\": \"The changes in a validator's status.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"public_key\",\n        \"status_changes\"\n      ],\n      \"properties\": {\n        \"public_key\": {\n          \"description\": \"The public key of the validator.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"status_changes\": {\n          \"description\": \"The set of changes to the validator's status.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/JsonValidatorStatusChange\"\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"PublicKey\": {\n      \"description\": \"Hex-encoded cryptographic public key, including the algorithm tag prefix.\",\n      \"examples\": [\n        {\n          \"name\": \"SystemPublicKey\",\n          \"description\": \"A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's\",\n          \"value\": \"00\"\n        },\n        {\n          \"name\": \"Ed25519PublicKey\",\n          \"description\": \"An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters\",\n          \"value\": \"018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c\"\n        },\n        {\n          \"name\": \"Secp256k1PublicKey\",\n          \"description\": \"A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters\",\n          \"value\": \"0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084\"\n        }\n      ],\n      \"type\": \"string\"\n    },\n    \"JsonValidatorStatusChange\": {\n      \"description\": \"A single change to a validator's status in the given era.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"era_id\",\n        \"validator_change\"\n      ],\n      \"properties\": {\n        \"era_id\": {\n          \"description\": \"The era in which the change occurred.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"validator_change\": {\n          \"description\": \"The change in validator status.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/ValidatorChange\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"EraId\": {\n      \"description\": \"Era ID newtype.\",\n      \"type\": \"integer\",\n      \"format\": \"uint64\",\n      \"minimum\": 0.0\n    },\n    \"ValidatorChange\": {\n      \"description\": \"A change to a validator's status between two eras.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The validator got newly added to the validator set.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Added\"\n          ]\n        },\n        {\n          \"description\": \"The validator was removed from the validator set.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Removed\"\n          ]\n        },\n        {\n          \"description\": \"The validator was banned from this era.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Banned\"\n          ]\n        },\n        {\n          \"description\": \"The validator was excluded from proposing new blocks in this era.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"CannotPropose\"\n          ]\n        },\n        {\n          \"description\": \"We saw the validator misbehave in this era.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"SeenAsFaulty\"\n          ]\n        }\n      ]\n    }\n  }\n}"
  },
  {
    "path": "resources/test/sse_data_schema.json",
    "content": "{\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n  \"title\": \"SseData\",\n  \"description\": \"The \\\"data\\\" field of the events sent on the event stream to clients.\",\n  \"oneOf\": [\n    {\n      \"description\": \"The version of this node's API server.  This event will always be the first sent to a new client, and will have no associated event ID provided.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"ApiVersion\"\n      ],\n      \"properties\": {\n        \"ApiVersion\": {\n          \"$ref\": \"#/definitions/ProtocolVersion\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"The given block has been added to the linear chain and stored locally.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"BlockAdded\"\n      ],\n      \"properties\": {\n        \"BlockAdded\": {\n          \"type\": \"object\",\n          \"required\": [\n            \"block\",\n            \"block_hash\"\n          ],\n          \"properties\": {\n            \"block_hash\": {\n              \"$ref\": \"#/definitions/BlockHash\"\n            },\n            \"block\": {\n              \"$ref\": \"#/definitions/Block\"\n            }\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"The given transaction has been newly-accepted by this node.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"TransactionAccepted\"\n      ],\n      \"properties\": {\n        \"TransactionAccepted\": {\n          \"type\": \"object\",\n          \"required\": [\n            \"transaction\"\n          ],\n          \"properties\": {\n            \"transaction\": {\n              \"description\": \"a transaction\",\n              \"allOf\": [\n                {\n                  \"$ref\": \"#/definitions/Transaction\"\n                }\n              ]\n            }\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"The given transaction has been executed, committed and forms part of the given block.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"TransactionProcessed\"\n      ],\n      \"properties\": {\n        \"TransactionProcessed\": {\n          \"type\": \"object\",\n          \"required\": [\n            \"block_hash\",\n            \"execution_result\",\n            \"initiator_addr\",\n            \"messages\",\n            \"timestamp\",\n            \"transaction_hash\",\n            \"ttl\"\n          ],\n          \"properties\": {\n            \"transaction_hash\": {\n              \"$ref\": \"#/definitions/TransactionHash\"\n            },\n            \"initiator_addr\": {\n              \"$ref\": \"#/definitions/InitiatorAddr\"\n            },\n            \"timestamp\": {\n              \"$ref\": \"#/definitions/Timestamp\"\n            },\n            \"ttl\": {\n              \"$ref\": \"#/definitions/TimeDiff\"\n            },\n            \"block_hash\": {\n              \"$ref\": \"#/definitions/BlockHash\"\n            },\n            \"execution_result\": {\n              \"$ref\": \"#/definitions/ExecutionResult\"\n            },\n            \"messages\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/Message\"\n              }\n            }\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"The given transaction has expired.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"TransactionExpired\"\n      ],\n      \"properties\": {\n        \"TransactionExpired\": {\n          \"type\": \"object\",\n          \"required\": [\n            \"transaction_hash\"\n          ],\n          \"properties\": {\n            \"transaction_hash\": {\n              \"$ref\": \"#/definitions/TransactionHash\"\n            }\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"Generic representation of validator's fault in an era.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"Fault\"\n      ],\n      \"properties\": {\n        \"Fault\": {\n          \"type\": \"object\",\n          \"required\": [\n            \"era_id\",\n            \"public_key\",\n            \"timestamp\"\n          ],\n          \"properties\": {\n            \"era_id\": {\n              \"$ref\": \"#/definitions/EraId\"\n            },\n            \"public_key\": {\n              \"$ref\": \"#/definitions/PublicKey\"\n            },\n            \"timestamp\": {\n              \"$ref\": \"#/definitions/Timestamp\"\n            }\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"New finality signature received.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"FinalitySignature\"\n      ],\n      \"properties\": {\n        \"FinalitySignature\": {\n          \"$ref\": \"#/definitions/FinalitySignature\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"The execution effects produced by a `StepRequest`.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"Step\"\n      ],\n      \"properties\": {\n        \"Step\": {\n          \"type\": \"object\",\n          \"required\": [\n            \"era_id\",\n            \"execution_effects\"\n          ],\n          \"properties\": {\n            \"era_id\": {\n              \"$ref\": \"#/definitions/EraId\"\n            },\n            \"execution_effects\": {\n              \"$ref\": \"#/definitions/Effects\"\n            }\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    {\n      \"description\": \"The node is about to shut down.\",\n      \"type\": \"string\",\n      \"enum\": [\n        \"Shutdown\"\n      ]\n    }\n  ],\n  \"definitions\": {\n    \"ProtocolVersion\": {\n      \"description\": \"Casper Platform protocol version\",\n      \"type\": \"string\"\n    },\n    \"BlockHash\": {\n      \"description\": \"Hex-encoded cryptographic hash of a block.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Digest\"\n        }\n      ]\n    },\n    \"Digest\": {\n      \"description\": \"Hex-encoded hash digest.\",\n      \"type\": \"string\"\n    },\n    \"Block\": {\n      \"description\": \"A block after execution.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The legacy, initial version of the block.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version1\"\n          ],\n          \"properties\": {\n            \"Version1\": {\n              \"$ref\": \"#/definitions/BlockV1\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"The version 2 of the block.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version2\"\n          ],\n          \"properties\": {\n            \"Version2\": {\n              \"$ref\": \"#/definitions/BlockV2\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"BlockV1\": {\n      \"description\": \"A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 1.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"body\",\n        \"hash\",\n        \"header\"\n      ],\n      \"properties\": {\n        \"hash\": {\n          \"description\": \"The block hash identifying this block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"header\": {\n          \"description\": \"The header portion of the block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHeaderV1\"\n            }\n          ]\n        },\n        \"body\": {\n          \"description\": \"The body portion of the block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockBodyV1\"\n            }\n          ]\n        }\n      }\n    },\n    \"BlockHeaderV1\": {\n      \"description\": \"The header portion of a block.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"accumulated_seed\",\n        \"body_hash\",\n        \"era_id\",\n        \"height\",\n        \"parent_hash\",\n        \"protocol_version\",\n        \"random_bit\",\n        \"state_root_hash\",\n        \"timestamp\"\n      ],\n      \"properties\": {\n        \"parent_hash\": {\n          \"description\": \"The parent block's hash.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"state_root_hash\": {\n          \"description\": \"The root hash of global state after the deploys in this block have been executed.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Digest\"\n            }\n          ]\n        },\n        \"body_hash\": {\n          \"description\": \"The hash of the block's body.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Digest\"\n            }\n          ]\n        },\n        \"random_bit\": {\n          \"description\": \"A random bit needed for initializing a future era.\",\n          \"type\": \"boolean\"\n        },\n        \"accumulated_seed\": {\n          \"description\": \"A seed needed for initializing a future era.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Digest\"\n            }\n          ]\n        },\n        \"era_end\": {\n          \"description\": \"The `EraEnd` of a block if it is a switch block.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/EraEndV1\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"timestamp\": {\n          \"description\": \"The timestamp from when the block was proposed.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Timestamp\"\n            }\n          ]\n        },\n        \"era_id\": {\n          \"description\": \"The era ID in which this block was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"height\": {\n          \"description\": \"The height of this block, i.e. the number of ancestors.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"protocol_version\": {\n          \"description\": \"The protocol version of the network from when this block was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/ProtocolVersion\"\n            }\n          ]\n        }\n      }\n    },\n    \"EraEndV1\": {\n      \"description\": \"Information related to the end of an era, and validator weights for the following era.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"era_report\",\n        \"next_era_validator_weights\"\n      ],\n      \"properties\": {\n        \"era_report\": {\n          \"description\": \"Equivocation, reward and validator inactivity information.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraReport_for_PublicKey\"\n            }\n          ]\n        },\n        \"next_era_validator_weights\": {\n          \"description\": \"The validators for the upcoming era and their respective weights.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_ValidatorWeight\"\n            }\n          ]\n        }\n      }\n    },\n    \"EraReport_for_PublicKey\": {\n      \"description\": \"Equivocation, reward and validator inactivity information.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"equivocators\",\n        \"inactive_validators\",\n        \"rewards\"\n      ],\n      \"properties\": {\n        \"equivocators\": {\n          \"description\": \"The set of equivocators.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/PublicKey\"\n          }\n        },\n        \"rewards\": {\n          \"description\": \"Rewards for finalization of earlier blocks.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_EraReward\"\n            }\n          ]\n        },\n        \"inactive_validators\": {\n          \"description\": \"Validators that haven't produced any unit during the era.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/PublicKey\"\n          }\n        }\n      }\n    },\n    \"PublicKey\": {\n      \"description\": \"Hex-encoded cryptographic public key, including the algorithm tag prefix.\",\n      \"examples\": [\n        {\n          \"name\": \"SystemPublicKey\",\n          \"description\": \"A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's\",\n          \"value\": \"00\"\n        },\n        {\n          \"name\": \"Ed25519PublicKey\",\n          \"description\": \"An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters\",\n          \"value\": \"018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c\"\n        },\n        {\n          \"name\": \"Secp256k1PublicKey\",\n          \"description\": \"A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters\",\n          \"value\": \"0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084\"\n        }\n      ],\n      \"type\": \"string\"\n    },\n    \"Array_of_EraReward\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/EraReward\"\n      }\n    },\n    \"EraReward\": {\n      \"description\": \"A validator's public key paired with a measure of the value of its contribution to consensus, as a fraction of the configured maximum block reward.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"validator\"\n      ],\n      \"properties\": {\n        \"validator\": {\n          \"description\": \"The validator's public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"The reward amount.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        }\n      }\n    },\n    \"Array_of_ValidatorWeight\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/ValidatorWeight\"\n      }\n    },\n    \"ValidatorWeight\": {\n      \"description\": \"A validator's public key paired with its weight, i.e. the total number of motes staked by it and its delegators.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"validator\",\n        \"weight\"\n      ],\n      \"properties\": {\n        \"validator\": {\n          \"description\": \"The validator's public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"weight\": {\n          \"description\": \"The validator's weight.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        }\n      }\n    },\n    \"U512\": {\n      \"description\": \"Decimal representation of a 512-bit integer.\",\n      \"type\": \"string\"\n    },\n    \"Timestamp\": {\n      \"description\": \"Timestamp formatted as per RFC 3339\",\n      \"type\": \"string\"\n    },\n    \"EraId\": {\n      \"description\": \"Era ID newtype.\",\n      \"type\": \"integer\",\n      \"format\": \"uint64\",\n      \"minimum\": 0.0\n    },\n    \"BlockBodyV1\": {\n      \"description\": \"The body portion of a block. Version 1.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"deploy_hashes\",\n        \"proposer\",\n        \"transfer_hashes\"\n      ],\n      \"properties\": {\n        \"proposer\": {\n          \"description\": \"The public key of the validator which proposed the block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"deploy_hashes\": {\n          \"description\": \"The deploy hashes of the non-transfer deploys within the block.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/DeployHash\"\n          }\n        },\n        \"transfer_hashes\": {\n          \"description\": \"The deploy hashes of the transfers within the block.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/DeployHash\"\n          }\n        }\n      }\n    },\n    \"DeployHash\": {\n      \"description\": \"Hex-encoded deploy hash.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Digest\"\n        }\n      ]\n    },\n    \"BlockV2\": {\n      \"description\": \"A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 2.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"body\",\n        \"hash\",\n        \"header\"\n      ],\n      \"properties\": {\n        \"hash\": {\n          \"description\": \"The block hash identifying this block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"header\": {\n          \"description\": \"The header portion of the block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHeaderV2\"\n            }\n          ]\n        },\n        \"body\": {\n          \"description\": \"The body portion of the block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockBodyV2\"\n            }\n          ]\n        }\n      }\n    },\n    \"BlockHeaderV2\": {\n      \"description\": \"The header portion of a block.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"accumulated_seed\",\n        \"body_hash\",\n        \"current_gas_price\",\n        \"era_id\",\n        \"height\",\n        \"parent_hash\",\n        \"proposer\",\n        \"protocol_version\",\n        \"random_bit\",\n        \"state_root_hash\",\n        \"timestamp\"\n      ],\n      \"properties\": {\n        \"parent_hash\": {\n          \"description\": \"The parent block's hash.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"state_root_hash\": {\n          \"description\": \"The root hash of global state after the deploys in this block have been executed.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Digest\"\n            }\n          ]\n        },\n        \"body_hash\": {\n          \"description\": \"The hash of the block's body.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Digest\"\n            }\n          ]\n        },\n        \"random_bit\": {\n          \"description\": \"A random bit needed for initializing a future era.\",\n          \"type\": \"boolean\"\n        },\n        \"accumulated_seed\": {\n          \"description\": \"A seed needed for initializing a future era.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Digest\"\n            }\n          ]\n        },\n        \"era_end\": {\n          \"description\": \"The `EraEnd` of a block if it is a switch block.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/EraEndV2\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"timestamp\": {\n          \"description\": \"The timestamp from when the block was proposed.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Timestamp\"\n            }\n          ]\n        },\n        \"era_id\": {\n          \"description\": \"The era ID in which this block was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"height\": {\n          \"description\": \"The height of this block, i.e. the number of ancestors.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"protocol_version\": {\n          \"description\": \"The protocol version of the network from when this block was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/ProtocolVersion\"\n            }\n          ]\n        },\n        \"proposer\": {\n          \"description\": \"The public key of the validator which proposed the block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"current_gas_price\": {\n          \"description\": \"The gas price of the era\",\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        },\n        \"last_switch_block_hash\": {\n          \"description\": \"The most recent switch block hash.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      }\n    },\n    \"EraEndV2\": {\n      \"description\": \"Information related to the end of an era, and validator weights for the following era.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"equivocators\",\n        \"inactive_validators\",\n        \"next_era_gas_price\",\n        \"next_era_validator_weights\",\n        \"rewards\"\n      ],\n      \"properties\": {\n        \"equivocators\": {\n          \"description\": \"The set of equivocators.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/PublicKey\"\n          }\n        },\n        \"inactive_validators\": {\n          \"description\": \"Validators that haven't produced any unit during the era.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/PublicKey\"\n          }\n        },\n        \"next_era_validator_weights\": {\n          \"description\": \"The validators for the upcoming era and their respective weights.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_ValidatorWeight\"\n            }\n          ]\n        },\n        \"rewards\": {\n          \"description\": \"The rewards distributed to the validators.\",\n          \"type\": \"object\",\n          \"additionalProperties\": {\n            \"type\": \"array\",\n            \"items\": {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          }\n        },\n        \"next_era_gas_price\": {\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        }\n      }\n    },\n    \"BlockBodyV2\": {\n      \"description\": \"The body portion of a block. Version 2.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"rewarded_signatures\",\n        \"transactions\"\n      ],\n      \"properties\": {\n        \"transactions\": {\n          \"description\": \"Map of transactions mapping categories to a list of transaction hashes.\",\n          \"type\": \"object\",\n          \"additionalProperties\": {\n            \"type\": \"array\",\n            \"items\": {\n              \"$ref\": \"#/definitions/TransactionHash\"\n            }\n          }\n        },\n        \"rewarded_signatures\": {\n          \"description\": \"List of identifiers for finality signatures for a particular past block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/RewardedSignatures\"\n            }\n          ]\n        }\n      }\n    },\n    \"TransactionHash\": {\n      \"description\": \"A versioned wrapper for a transaction hash or deploy hash.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A deploy hash.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Deploy\"\n          ],\n          \"properties\": {\n            \"Deploy\": {\n              \"$ref\": \"#/definitions/DeployHash\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A version 1 transaction hash.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version1\"\n          ],\n          \"properties\": {\n            \"Version1\": {\n              \"$ref\": \"#/definitions/TransactionV1Hash\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"TransactionV1Hash\": {\n      \"description\": \"Hex-encoded TransactionV1 hash.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Digest\"\n        }\n      ]\n    },\n    \"RewardedSignatures\": {\n      \"description\": \"Describes finality signatures that will be rewarded in a block. Consists of a vector of `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor block. The first entry represents the signatures for the parent block, the second for the parent of the parent, and so on.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/SingleBlockRewardedSignatures\"\n      }\n    },\n    \"SingleBlockRewardedSignatures\": {\n      \"description\": \"List of identifiers for finality signatures for a particular past block.\\n\\nThat past block height is current_height - signature_rewards_max_delay, the latter being defined in the chainspec.\\n\\nWe need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality signers because we need a bit of time to get the block finality.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"integer\",\n        \"format\": \"uint8\",\n        \"minimum\": 0.0\n      }\n    },\n    \"Transaction\": {\n      \"description\": \"A versioned wrapper for a transaction or deploy.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A deploy.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Deploy\"\n          ],\n          \"properties\": {\n            \"Deploy\": {\n              \"$ref\": \"#/definitions/Deploy\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A version 1 transaction.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version1\"\n          ],\n          \"properties\": {\n            \"Version1\": {\n              \"$ref\": \"#/definitions/TransactionV1\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"Deploy\": {\n      \"description\": \"A signed smart contract.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"approvals\",\n        \"hash\",\n        \"header\",\n        \"payment\",\n        \"session\"\n      ],\n      \"properties\": {\n        \"hash\": {\n          \"$ref\": \"#/definitions/DeployHash\"\n        },\n        \"header\": {\n          \"$ref\": \"#/definitions/DeployHeader\"\n        },\n        \"payment\": {\n          \"$ref\": \"#/definitions/ExecutableDeployItem\"\n        },\n        \"session\": {\n          \"$ref\": \"#/definitions/ExecutableDeployItem\"\n        },\n        \"approvals\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/Approval\"\n          },\n          \"uniqueItems\": true\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"DeployHeader\": {\n      \"description\": \"The header portion of a [`Deploy`].\",\n      \"type\": \"object\",\n      \"required\": [\n        \"account\",\n        \"body_hash\",\n        \"chain_name\",\n        \"dependencies\",\n        \"gas_price\",\n        \"timestamp\",\n        \"ttl\"\n      ],\n      \"properties\": {\n        \"account\": {\n          \"$ref\": \"#/definitions/PublicKey\"\n        },\n        \"timestamp\": {\n          \"$ref\": \"#/definitions/Timestamp\"\n        },\n        \"ttl\": {\n          \"$ref\": \"#/definitions/TimeDiff\"\n        },\n        \"gas_price\": {\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"body_hash\": {\n          \"$ref\": \"#/definitions/Digest\"\n        },\n        \"dependencies\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/DeployHash\"\n          }\n        },\n        \"chain_name\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"TimeDiff\": {\n      \"description\": \"Human-readable duration.\",\n      \"type\": \"string\"\n    },\n    \"ExecutableDeployItem\": {\n      \"description\": \"The executable component of a [`Deploy`].\",\n      \"oneOf\": [\n        {\n          \"description\": \"Executable specified as raw bytes that represent Wasm code and an instance of [`RuntimeArgs`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"ModuleBytes\"\n          ],\n          \"properties\": {\n            \"ModuleBytes\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"args\",\n                \"module_bytes\"\n              ],\n              \"properties\": {\n                \"module_bytes\": {\n                  \"description\": \"Hex-encoded raw Wasm bytes.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/Bytes\"\n                    }\n                  ]\n                },\n                \"args\": {\n                  \"description\": \"Runtime arguments.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/RuntimeArgs\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of [`RuntimeArgs`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"StoredContractByHash\"\n          ],\n          \"properties\": {\n            \"StoredContractByHash\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"args\",\n                \"entry_point\",\n                \"hash\"\n              ],\n              \"properties\": {\n                \"hash\": {\n                  \"description\": \"Hex-encoded contract hash.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/ContractHash\"\n                    }\n                  ]\n                },\n                \"entry_point\": {\n                  \"description\": \"Name of an entry point.\",\n                  \"type\": \"string\"\n                },\n                \"args\": {\n                  \"description\": \"Runtime arguments.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/RuntimeArgs\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"StoredContractByName\"\n          ],\n          \"properties\": {\n            \"StoredContractByName\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"args\",\n                \"entry_point\",\n                \"name\"\n              ],\n              \"properties\": {\n                \"name\": {\n                  \"description\": \"Named key.\",\n                  \"type\": \"string\"\n                },\n                \"entry_point\": {\n                  \"description\": \"Name of an entry point.\",\n                  \"type\": \"string\"\n                },\n                \"args\": {\n                  \"description\": \"Runtime arguments.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/RuntimeArgs\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Stored versioned contract referenced by its [`PackageHash`], entry point and an instance of [`RuntimeArgs`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"StoredVersionedContractByHash\"\n          ],\n          \"properties\": {\n            \"StoredVersionedContractByHash\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"args\",\n                \"entry_point\",\n                \"hash\"\n              ],\n              \"properties\": {\n                \"hash\": {\n                  \"description\": \"Hex-encoded contract package hash.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/ContractPackageHash\"\n                    }\n                  ]\n                },\n                \"version\": {\n                  \"description\": \"An optional version of the contract to call. It will default to the highest enabled version if no value is specified.\",\n                  \"type\": [\n                    \"integer\",\n                    \"null\"\n                  ],\n                  \"format\": \"uint32\",\n                  \"minimum\": 0.0\n                },\n                \"entry_point\": {\n                  \"description\": \"Entry point name.\",\n                  \"type\": \"string\"\n                },\n                \"args\": {\n                  \"description\": \"Runtime arguments.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/RuntimeArgs\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"StoredVersionedContractByName\"\n          ],\n          \"properties\": {\n            \"StoredVersionedContractByName\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"args\",\n                \"entry_point\",\n                \"name\"\n              ],\n              \"properties\": {\n                \"name\": {\n                  \"description\": \"Named key.\",\n                  \"type\": \"string\"\n                },\n                \"version\": {\n                  \"description\": \"An optional version of the contract to call. It will default to the highest enabled version if no value is specified.\",\n                  \"type\": [\n                    \"integer\",\n                    \"null\"\n                  ],\n                  \"format\": \"uint32\",\n                  \"minimum\": 0.0\n                },\n                \"entry_point\": {\n                  \"description\": \"Entry point name.\",\n                  \"type\": \"string\"\n                },\n                \"args\": {\n                  \"description\": \"Runtime arguments.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/RuntimeArgs\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A native transfer which does not contain or reference a Wasm code.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Transfer\"\n          ],\n          \"properties\": {\n            \"Transfer\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"args\"\n              ],\n              \"properties\": {\n                \"args\": {\n                  \"description\": \"Runtime arguments.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/RuntimeArgs\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"Bytes\": {\n      \"description\": \"Hex-encoded bytes.\",\n      \"type\": \"string\"\n    },\n    \"RuntimeArgs\": {\n      \"description\": \"Represents a collection of arguments passed to a smart contract.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/NamedArg\"\n      }\n    },\n    \"NamedArg\": {\n      \"description\": \"Named arguments to a contract.\",\n      \"type\": \"array\",\n      \"items\": [\n        {\n          \"type\": \"string\"\n        },\n        {\n          \"$ref\": \"#/definitions/CLValue\"\n        }\n      ],\n      \"maxItems\": 2,\n      \"minItems\": 2\n    },\n    \"CLValue\": {\n      \"description\": \"A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\\n\\nIt holds the underlying data as a type-erased, serialized `Vec<u8>` and also holds the CLType of the underlying data as a separate member.\\n\\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bytes\",\n        \"cl_type\"\n      ],\n      \"properties\": {\n        \"cl_type\": {\n          \"$ref\": \"#/definitions/CLType\"\n        },\n        \"bytes\": {\n          \"type\": \"string\"\n        },\n        \"parsed\": true\n      },\n      \"additionalProperties\": false\n    },\n    \"CLType\": {\n      \"description\": \"Casper types, i.e. types which can be stored and manipulated by smart contracts.\\n\\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).\",\n      \"oneOf\": [\n        {\n          \"description\": \"`bool` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Bool\"\n          ]\n        },\n        {\n          \"description\": \"`i32` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"I32\"\n          ]\n        },\n        {\n          \"description\": \"`i64` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"I64\"\n          ]\n        },\n        {\n          \"description\": \"`u8` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"U8\"\n          ]\n        },\n        {\n          \"description\": \"`u32` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"U32\"\n          ]\n        },\n        {\n          \"description\": \"`u64` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"U64\"\n          ]\n        },\n        {\n          \"description\": \"[`U128`] large unsigned integer type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"U128\"\n          ]\n        },\n        {\n          \"description\": \"[`U256`] large unsigned integer type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"U256\"\n          ]\n        },\n        {\n          \"description\": \"[`U512`] large unsigned integer type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"U512\"\n          ]\n        },\n        {\n          \"description\": \"`()` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Unit\"\n          ]\n        },\n        {\n          \"description\": \"`String` primitive.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"String\"\n          ]\n        },\n        {\n          \"description\": \"[`Key`] system type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Key\"\n          ]\n        },\n        {\n          \"description\": \"[`URef`] system type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"URef\"\n          ]\n        },\n        {\n          \"description\": \"[`PublicKey`](crate::PublicKey) system type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"PublicKey\"\n          ]\n        },\n        {\n          \"description\": \"`Option` of a `CLType`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Option\"\n          ],\n          \"properties\": {\n            \"Option\": {\n              \"$ref\": \"#/definitions/CLType\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Variable-length list of a single `CLType` (comparable to a `Vec`).\",\n          \"type\": \"object\",\n          \"required\": [\n            \"List\"\n          ],\n          \"properties\": {\n            \"List\": {\n              \"$ref\": \"#/definitions/CLType\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Fixed-length list of a single `CLType` (comparable to a Rust array).\",\n          \"type\": \"object\",\n          \"required\": [\n            \"ByteArray\"\n          ],\n          \"properties\": {\n            \"ByteArray\": {\n              \"type\": \"integer\",\n              \"format\": \"uint32\",\n              \"minimum\": 0.0\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"`Result` with `Ok` and `Err` variants of `CLType`s.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Result\"\n          ],\n          \"properties\": {\n            \"Result\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"err\",\n                \"ok\"\n              ],\n              \"properties\": {\n                \"ok\": {\n                  \"$ref\": \"#/definitions/CLType\"\n                },\n                \"err\": {\n                  \"$ref\": \"#/definitions/CLType\"\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Map with keys of a single `CLType` and values of a single `CLType`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Map\"\n          ],\n          \"properties\": {\n            \"Map\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"key\",\n                \"value\"\n              ],\n              \"properties\": {\n                \"key\": {\n                  \"$ref\": \"#/definitions/CLType\"\n                },\n                \"value\": {\n                  \"$ref\": \"#/definitions/CLType\"\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"1-ary tuple of a `CLType`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Tuple1\"\n          ],\n          \"properties\": {\n            \"Tuple1\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/CLType\"\n              },\n              \"maxItems\": 1,\n              \"minItems\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"2-ary tuple of `CLType`s.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Tuple2\"\n          ],\n          \"properties\": {\n            \"Tuple2\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/CLType\"\n              },\n              \"maxItems\": 2,\n              \"minItems\": 2\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"3-ary tuple of `CLType`s.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Tuple3\"\n          ],\n          \"properties\": {\n            \"Tuple3\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/CLType\"\n              },\n              \"maxItems\": 3,\n              \"minItems\": 3\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Unspecified type.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Any\"\n          ]\n        }\n      ]\n    },\n    \"ContractHash\": {\n      \"description\": \"The hash address of the contract\",\n      \"type\": \"string\"\n    },\n    \"ContractPackageHash\": {\n      \"description\": \"The hash address of the contract package\",\n      \"type\": \"string\"\n    },\n    \"Approval\": {\n      \"description\": \"A struct containing a signature of a transaction hash and the public key of the signer.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"signature\",\n        \"signer\"\n      ],\n      \"properties\": {\n        \"signer\": {\n          \"$ref\": \"#/definitions/PublicKey\"\n        },\n        \"signature\": {\n          \"$ref\": \"#/definitions/Signature\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Signature\": {\n      \"description\": \"Hex-encoded cryptographic signature, including the algorithm tag prefix.\",\n      \"type\": \"string\"\n    },\n    \"TransactionV1\": {\n      \"description\": \"A unit of work sent by a client to the network, which when executed can cause global state to be altered.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"approvals\",\n        \"hash\",\n        \"payload\"\n      ],\n      \"properties\": {\n        \"hash\": {\n          \"$ref\": \"#/definitions/TransactionV1Hash\"\n        },\n        \"payload\": {\n          \"$ref\": \"#/definitions/TransactionV1Payload\"\n        },\n        \"approvals\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/Approval\"\n          },\n          \"uniqueItems\": true\n        }\n      }\n    },\n    \"TransactionV1Payload\": {\n      \"description\": \"Internal payload of the transaction. The actual data over which the signing is done.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"chain_name\",\n        \"fields\",\n        \"initiator_addr\",\n        \"pricing_mode\",\n        \"timestamp\",\n        \"ttl\"\n      ],\n      \"properties\": {\n        \"initiator_addr\": {\n          \"$ref\": \"#/definitions/InitiatorAddr\"\n        },\n        \"timestamp\": {\n          \"$ref\": \"#/definitions/Timestamp\"\n        },\n        \"ttl\": {\n          \"$ref\": \"#/definitions/TimeDiff\"\n        },\n        \"chain_name\": {\n          \"type\": \"string\"\n        },\n        \"pricing_mode\": {\n          \"$ref\": \"#/definitions/PricingMode\"\n        },\n        \"fields\": {\n          \"type\": \"object\",\n          \"additionalProperties\": true\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"InitiatorAddr\": {\n      \"description\": \"The address of the initiator of a TransactionV1.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The public key of the initiator.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"PublicKey\"\n          ],\n          \"properties\": {\n            \"PublicKey\": {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"The account hash derived from the public key of the initiator.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AccountHash\"\n          ],\n          \"properties\": {\n            \"AccountHash\": {\n              \"$ref\": \"#/definitions/AccountHash\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"AccountHash\": {\n      \"description\": \"Account hash as a formatted string.\",\n      \"type\": \"string\"\n    },\n    \"PricingMode\": {\n      \"description\": \"Pricing mode of a Transaction.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The original payment model, where the creator of the transaction specifies how much they will pay, at what gas price.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"PaymentLimited\"\n          ],\n          \"properties\": {\n            \"PaymentLimited\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"gas_price_tolerance\",\n                \"payment_amount\",\n                \"standard_payment\"\n              ],\n              \"properties\": {\n                \"payment_amount\": {\n                  \"description\": \"User-specified payment amount.\",\n                  \"type\": \"integer\",\n                  \"format\": \"uint64\",\n                  \"minimum\": 0.0\n                },\n                \"gas_price_tolerance\": {\n                  \"description\": \"User-specified gas_price tolerance (minimum 1). This is interpreted to mean \\\"do not include this transaction in a block if the current gas price is greater than this number\\\"\",\n                  \"type\": \"integer\",\n                  \"format\": \"uint8\",\n                  \"minimum\": 0.0\n                },\n                \"standard_payment\": {\n                  \"description\": \"Standard payment.\",\n                  \"type\": \"boolean\"\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"The cost of the transaction is determined by the cost table, per the transaction category.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Fixed\"\n          ],\n          \"properties\": {\n            \"Fixed\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"additional_computation_factor\",\n                \"gas_price_tolerance\"\n              ],\n              \"properties\": {\n                \"additional_computation_factor\": {\n                  \"description\": \"User-specified additional computation factor (minimum 0). If \\\"0\\\" is provided, no additional logic is applied to the computation limit. Each value above \\\"0\\\" tells the node that it needs to treat the transaction as if it uses more gas than it's serialized size indicates. Each \\\"1\\\" will increase the \\\"wasm lane\\\" size bucket for this transaction by 1. So if the size of the transaction indicates bucket \\\"0\\\" and \\\"additional_computation_factor = 2\\\", the transaction will be treated as a \\\"2\\\".\",\n                  \"type\": \"integer\",\n                  \"format\": \"uint8\",\n                  \"minimum\": 0.0\n                },\n                \"gas_price_tolerance\": {\n                  \"description\": \"User-specified gas_price tolerance (minimum 1). This is interpreted to mean \\\"do not include this transaction in a block if the current gas price is greater than this number\\\"\",\n                  \"type\": \"integer\",\n                  \"format\": \"uint8\",\n                  \"minimum\": 0.0\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"The payment for this transaction was previously paid, as proven by the receipt hash (this is for future use, not currently implemented).\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Prepaid\"\n          ],\n          \"properties\": {\n            \"Prepaid\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"receipt\"\n              ],\n              \"properties\": {\n                \"receipt\": {\n                  \"description\": \"Pre-paid receipt.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/Digest\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"ExecutionResult\": {\n      \"description\": \"The versioned result of executing a single deploy.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Version 1 of execution result type.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version1\"\n          ],\n          \"properties\": {\n            \"Version1\": {\n              \"$ref\": \"#/definitions/ExecutionResultV1\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Version 2 of execution result type.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version2\"\n          ],\n          \"properties\": {\n            \"Version2\": {\n              \"$ref\": \"#/definitions/ExecutionResultV2\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"ExecutionResultV1\": {\n      \"description\": \"The result of executing a single deploy.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The result of a failed execution.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Failure\"\n          ],\n          \"properties\": {\n            \"Failure\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"cost\",\n                \"effect\",\n                \"error_message\",\n                \"transfers\"\n              ],\n              \"properties\": {\n                \"effect\": {\n                  \"description\": \"The effect of executing the deploy.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/ExecutionEffect\"\n                    }\n                  ]\n                },\n                \"transfers\": {\n                  \"description\": \"A record of version 1 Transfers performed while executing the deploy.\",\n                  \"type\": \"array\",\n                  \"items\": {\n                    \"$ref\": \"#/definitions/TransferAddr\"\n                  }\n                },\n                \"cost\": {\n                  \"description\": \"The cost of executing the deploy.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/U512\"\n                    }\n                  ]\n                },\n                \"error_message\": {\n                  \"description\": \"The error message associated with executing the deploy.\",\n                  \"type\": \"string\"\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"The result of a successful execution.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Success\"\n          ],\n          \"properties\": {\n            \"Success\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"cost\",\n                \"effect\",\n                \"transfers\"\n              ],\n              \"properties\": {\n                \"effect\": {\n                  \"description\": \"The effect of executing the deploy.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/ExecutionEffect\"\n                    }\n                  ]\n                },\n                \"transfers\": {\n                  \"description\": \"A record of Transfers performed while executing the deploy.\",\n                  \"type\": \"array\",\n                  \"items\": {\n                    \"$ref\": \"#/definitions/TransferAddr\"\n                  }\n                },\n                \"cost\": {\n                  \"description\": \"The cost of executing the deploy.\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/U512\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"ExecutionEffect\": {\n      \"description\": \"The sequence of execution transforms from a single deploy.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"operations\",\n        \"transforms\"\n      ],\n      \"properties\": {\n        \"operations\": {\n          \"description\": \"The resulting operations.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/Operation\"\n          }\n        },\n        \"transforms\": {\n          \"description\": \"The sequence of execution transforms.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/TransformV1\"\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Operation\": {\n      \"description\": \"An operation performed while executing a deploy.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"key\",\n        \"kind\"\n      ],\n      \"properties\": {\n        \"key\": {\n          \"description\": \"The formatted string of the `Key`.\",\n          \"type\": \"string\"\n        },\n        \"kind\": {\n          \"description\": \"The type of operation.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/OpKind\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"OpKind\": {\n      \"description\": \"The type of operation performed while executing a deploy.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A read operation.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Read\"\n          ]\n        },\n        {\n          \"description\": \"A write operation.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Write\"\n          ]\n        },\n        {\n          \"description\": \"An addition.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Add\"\n          ]\n        },\n        {\n          \"description\": \"An operation which has no effect.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"NoOp\"\n          ]\n        },\n        {\n          \"description\": \"A prune operation.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Prune\"\n          ]\n        }\n      ]\n    },\n    \"TransformV1\": {\n      \"description\": \"A transformation performed while executing a deploy.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"key\",\n        \"transform\"\n      ],\n      \"properties\": {\n        \"key\": {\n          \"description\": \"The formatted string of the `Key`.\",\n          \"type\": \"string\"\n        },\n        \"transform\": {\n          \"description\": \"The transformation.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/TransformKindV1\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"TransformKindV1\": {\n      \"description\": \"The actual transformation performed while executing a deploy.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A transform having no effect.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Identity\"\n          ]\n        },\n        {\n          \"description\": \"Writes the given CLValue to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteCLValue\"\n          ],\n          \"properties\": {\n            \"WriteCLValue\": {\n              \"$ref\": \"#/definitions/CLValue\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given Account to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteAccount\"\n          ],\n          \"properties\": {\n            \"WriteAccount\": {\n              \"$ref\": \"#/definitions/AccountHash\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes a smart contract as Wasm to global state.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"WriteContractWasm\"\n          ]\n        },\n        {\n          \"description\": \"Writes a smart contract to global state.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"WriteContract\"\n          ]\n        },\n        {\n          \"description\": \"Writes a smart contract package to global state.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"WriteContractPackage\"\n          ]\n        },\n        {\n          \"description\": \"Writes the given DeployInfo to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteDeployInfo\"\n          ],\n          \"properties\": {\n            \"WriteDeployInfo\": {\n              \"$ref\": \"#/definitions/DeployInfo\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given EraInfo to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteEraInfo\"\n          ],\n          \"properties\": {\n            \"WriteEraInfo\": {\n              \"$ref\": \"#/definitions/EraInfo\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given version 1 Transfer to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteTransfer\"\n          ],\n          \"properties\": {\n            \"WriteTransfer\": {\n              \"$ref\": \"#/definitions/TransferV1\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given Bid to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteBid\"\n          ],\n          \"properties\": {\n            \"WriteBid\": {\n              \"$ref\": \"#/definitions/Bid\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given Withdraw to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteWithdraw\"\n          ],\n          \"properties\": {\n            \"WriteWithdraw\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/WithdrawPurse\"\n              }\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds the given `i32`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddInt32\"\n          ],\n          \"properties\": {\n            \"AddInt32\": {\n              \"type\": \"integer\",\n              \"format\": \"int32\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds the given `u64`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt64\"\n          ],\n          \"properties\": {\n            \"AddUInt64\": {\n              \"type\": \"integer\",\n              \"format\": \"uint64\",\n              \"minimum\": 0.0\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds the given `U128`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt128\"\n          ],\n          \"properties\": {\n            \"AddUInt128\": {\n              \"$ref\": \"#/definitions/U128\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds the given `U256`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt256\"\n          ],\n          \"properties\": {\n            \"AddUInt256\": {\n              \"$ref\": \"#/definitions/U256\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds the given `U512`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt512\"\n          ],\n          \"properties\": {\n            \"AddUInt512\": {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds the given collection of named keys.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddKeys\"\n          ],\n          \"properties\": {\n            \"AddKeys\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/NamedKey\"\n              }\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A failed transformation, containing an error message.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Failure\"\n          ],\n          \"properties\": {\n            \"Failure\": {\n              \"type\": \"string\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given Unbonding to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteUnbonding\"\n          ],\n          \"properties\": {\n            \"WriteUnbonding\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/UnbondingPurse\"\n              }\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the addressable entity to global state.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"WriteAddressableEntity\"\n          ]\n        },\n        {\n          \"description\": \"Removes pathing to keyed value within global state. This is a form of soft delete; the underlying value remains in global state and is reachable from older global state root hashes where it was included in the hash up.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Prune\"\n          ],\n          \"properties\": {\n            \"Prune\": {\n              \"$ref\": \"#/definitions/Key\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Writes the given BidKind to global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"WriteBidKind\"\n          ],\n          \"properties\": {\n            \"WriteBidKind\": {\n              \"$ref\": \"#/definitions/BidKind\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"DeployInfo\": {\n      \"description\": \"Information relating to the given Deploy.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"deploy_hash\",\n        \"from\",\n        \"gas\",\n        \"source\",\n        \"transfers\"\n      ],\n      \"properties\": {\n        \"deploy_hash\": {\n          \"description\": \"Hex-encoded Deploy hash.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/DeployHash\"\n            }\n          ]\n        },\n        \"transfers\": {\n          \"description\": \"Version 1 transfers performed by the Deploy.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/TransferAddr\"\n          }\n        },\n        \"from\": {\n          \"description\": \"Account identifier of the creator of the Deploy.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountHash\"\n            }\n          ]\n        },\n        \"source\": {\n          \"description\": \"Source purse used for payment of the Deploy.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"gas\": {\n          \"description\": \"Gas cost of executing the Deploy.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"TransferAddr\": {\n      \"description\": \"Hex-encoded version 1 transfer address.\",\n      \"type\": \"string\"\n    },\n    \"URef\": {\n      \"description\": \"Hex-encoded, formatted URef.\",\n      \"type\": \"string\"\n    },\n    \"EraInfo\": {\n      \"description\": \"Auction metadata.  Intended to be recorded at each era.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"seigniorage_allocations\"\n      ],\n      \"properties\": {\n        \"seigniorage_allocations\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/SeigniorageAllocation\"\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"SeigniorageAllocation\": {\n      \"description\": \"Information about a seigniorage allocation\",\n      \"oneOf\": [\n        {\n          \"description\": \"Info about a seigniorage allocation for a validator\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Validator\"\n          ],\n          \"properties\": {\n            \"Validator\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"amount\",\n                \"validator_public_key\"\n              ],\n              \"properties\": {\n                \"validator_public_key\": {\n                  \"description\": \"Validator's public key\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/PublicKey\"\n                    }\n                  ]\n                },\n                \"amount\": {\n                  \"description\": \"Allocated amount\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/U512\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Info about a seigniorage allocation for a delegator\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Delegator\"\n          ],\n          \"properties\": {\n            \"Delegator\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"amount\",\n                \"delegator_public_key\",\n                \"validator_public_key\"\n              ],\n              \"properties\": {\n                \"delegator_public_key\": {\n                  \"description\": \"Delegator's public key\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/PublicKey\"\n                    }\n                  ]\n                },\n                \"validator_public_key\": {\n                  \"description\": \"Validator's public key\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/PublicKey\"\n                    }\n                  ]\n                },\n                \"amount\": {\n                  \"description\": \"Allocated amount\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/U512\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Info about a seigniorage allocation for a delegator\",\n          \"type\": \"object\",\n          \"required\": [\n            \"DelegatorKind\"\n          ],\n          \"properties\": {\n            \"DelegatorKind\": {\n              \"type\": \"object\",\n              \"required\": [\n                \"amount\",\n                \"delegator_kind\",\n                \"validator_public_key\"\n              ],\n              \"properties\": {\n                \"delegator_kind\": {\n                  \"description\": \"Delegator kind\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/DelegatorKind\"\n                    }\n                  ]\n                },\n                \"validator_public_key\": {\n                  \"description\": \"Validator's public key\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/PublicKey\"\n                    }\n                  ]\n                },\n                \"amount\": {\n                  \"description\": \"Allocated amount\",\n                  \"allOf\": [\n                    {\n                      \"$ref\": \"#/definitions/U512\"\n                    }\n                  ]\n                }\n              },\n              \"additionalProperties\": false\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"DelegatorKind\": {\n      \"description\": \"Auction bid variants. Kinds of delegation bids.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Delegation from public key.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"PublicKey\"\n          ],\n          \"properties\": {\n            \"PublicKey\": {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Delegation from purse.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Purse\"\n          ],\n          \"properties\": {\n            \"Purse\": {\n              \"type\": \"string\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"TransferV1\": {\n      \"description\": \"Represents a version 1 transfer from one purse to another.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"deploy_hash\",\n        \"from\",\n        \"gas\",\n        \"source\",\n        \"target\"\n      ],\n      \"properties\": {\n        \"deploy_hash\": {\n          \"description\": \"Hex-encoded Deploy hash of Deploy that created the transfer.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/DeployHash\"\n            }\n          ]\n        },\n        \"from\": {\n          \"description\": \"Account from which transfer was executed\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountHash\"\n            }\n          ]\n        },\n        \"to\": {\n          \"description\": \"Account to which funds are transferred\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountHash\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"source\": {\n          \"description\": \"Source purse\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"target\": {\n          \"description\": \"Target purse\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"Transfer amount\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"gas\": {\n          \"description\": \"Gas\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"id\": {\n          \"description\": \"User-defined id\",\n          \"type\": [\n            \"integer\",\n            \"null\"\n          ],\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Bid\": {\n      \"description\": \"An entry in the validator map.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bonding_purse\",\n        \"delegation_rate\",\n        \"delegators\",\n        \"inactive\",\n        \"staked_amount\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"validator_public_key\": {\n          \"description\": \"Validator public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"bonding_purse\": {\n          \"description\": \"The purse that was used for bonding.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"staked_amount\": {\n          \"description\": \"The amount of tokens staked by a validator (not including delegators).\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"delegation_rate\": {\n          \"description\": \"Delegation rate.\",\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        },\n        \"vesting_schedule\": {\n          \"description\": \"Vesting schedule for a genesis validator. `None` if non-genesis validator.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/VestingSchedule\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"delegators\": {\n          \"description\": \"This validator's delegators, indexed by their public keys.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_PublicKeyAndDelegator\"\n            }\n          ]\n        },\n        \"inactive\": {\n          \"description\": \"`true` if validator has been \\\"evicted\\\".\",\n          \"type\": \"boolean\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"VestingSchedule\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"initial_release_timestamp_millis\"\n      ],\n      \"properties\": {\n        \"initial_release_timestamp_millis\": {\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"locked_amounts\": {\n          \"type\": [\n            \"array\",\n            \"null\"\n          ],\n          \"items\": {\n            \"$ref\": \"#/definitions/U512\"\n          },\n          \"maxItems\": 14,\n          \"minItems\": 14\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Array_of_PublicKeyAndDelegator\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/PublicKeyAndDelegator\"\n      }\n    },\n    \"PublicKeyAndDelegator\": {\n      \"description\": \"A delegator associated with the given validator.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"delegator\",\n        \"delegator_public_key\"\n      ],\n      \"properties\": {\n        \"delegator_public_key\": {\n          \"description\": \"The public key of the delegator.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"delegator\": {\n          \"description\": \"The delegator details.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Delegator\"\n            }\n          ]\n        }\n      }\n    },\n    \"Delegator\": {\n      \"description\": \"Represents a party delegating their stake to a validator (or \\\"delegatee\\\")\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bonding_purse\",\n        \"delegator_public_key\",\n        \"staked_amount\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"delegator_public_key\": {\n          \"$ref\": \"#/definitions/PublicKey\"\n        },\n        \"staked_amount\": {\n          \"$ref\": \"#/definitions/U512\"\n        },\n        \"bonding_purse\": {\n          \"$ref\": \"#/definitions/URef\"\n        },\n        \"validator_public_key\": {\n          \"$ref\": \"#/definitions/PublicKey\"\n        },\n        \"vesting_schedule\": {\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/VestingSchedule\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"WithdrawPurse\": {\n      \"description\": \"A withdraw purse, a legacy structure.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"bonding_purse\",\n        \"era_of_creation\",\n        \"unbonder_public_key\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"bonding_purse\": {\n          \"description\": \"Bonding Purse\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"validator_public_key\": {\n          \"description\": \"Validators public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"unbonder_public_key\": {\n          \"description\": \"Unbonders public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"era_of_creation\": {\n          \"description\": \"Era in which this unbonding request was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"Unbonding Amount.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"U128\": {\n      \"description\": \"Decimal representation of a 128-bit integer.\",\n      \"type\": \"string\"\n    },\n    \"U256\": {\n      \"description\": \"Decimal representation of a 256-bit integer.\",\n      \"type\": \"string\"\n    },\n    \"NamedKey\": {\n      \"description\": \"A key with a name.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"key\",\n        \"name\"\n      ],\n      \"properties\": {\n        \"name\": {\n          \"description\": \"The name of the entry.\",\n          \"type\": \"string\"\n        },\n        \"key\": {\n          \"description\": \"The value of the entry: a casper `Key` type.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Key\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Key\": {\n      \"description\": \"The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, user accounts) are stored in global state.\",\n      \"type\": \"string\"\n    },\n    \"UnbondingPurse\": {\n      \"description\": \"Unbonding purse.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"bonding_purse\",\n        \"era_of_creation\",\n        \"unbonder_public_key\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"bonding_purse\": {\n          \"description\": \"Bonding Purse\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"validator_public_key\": {\n          \"description\": \"Validators public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"unbonder_public_key\": {\n          \"description\": \"Unbonders public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"era_of_creation\": {\n          \"description\": \"Era in which this unbonding request was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"Unbonding Amount.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"new_validator\": {\n          \"description\": \"The validator public key to re-delegate to.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"BidKind\": {\n      \"description\": \"Auction bid variants.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A unified record indexed on validator data, with an embedded collection of all delegator bids assigned to that validator. The Unified variant is for legacy retrograde support, new instances will not be created going forward.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Unified\"\n          ],\n          \"properties\": {\n            \"Unified\": {\n              \"$ref\": \"#/definitions/Bid\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A bid record containing only validator data.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Validator\"\n          ],\n          \"properties\": {\n            \"Validator\": {\n              \"$ref\": \"#/definitions/ValidatorBid\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A bid record containing only delegator data.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Delegator\"\n          ],\n          \"properties\": {\n            \"Delegator\": {\n              \"$ref\": \"#/definitions/DelegatorBid\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A bridge record pointing to a new `ValidatorBid` after the public key was changed.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Bridge\"\n          ],\n          \"properties\": {\n            \"Bridge\": {\n              \"$ref\": \"#/definitions/Bridge\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Credited amount.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Credit\"\n          ],\n          \"properties\": {\n            \"Credit\": {\n              \"$ref\": \"#/definitions/ValidatorCredit\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Reservation\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Reservation\"\n          ],\n          \"properties\": {\n            \"Reservation\": {\n              \"$ref\": \"#/definitions/Reservation\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Unbond\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Unbond\"\n          ],\n          \"properties\": {\n            \"Unbond\": {\n              \"$ref\": \"#/definitions/Unbond\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"ValidatorBid\": {\n      \"description\": \"An entry in the validator map.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bonding_purse\",\n        \"delegation_rate\",\n        \"inactive\",\n        \"maximum_delegation_amount\",\n        \"minimum_delegation_amount\",\n        \"reserved_slots\",\n        \"staked_amount\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"validator_public_key\": {\n          \"description\": \"Validator public key\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"bonding_purse\": {\n          \"description\": \"The purse that was used for bonding.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"staked_amount\": {\n          \"description\": \"The amount of tokens staked by a validator (not including delegators).\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"delegation_rate\": {\n          \"description\": \"Delegation rate\",\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        },\n        \"vesting_schedule\": {\n          \"description\": \"Vesting schedule for a genesis validator. `None` if non-genesis validator.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/VestingSchedule\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"inactive\": {\n          \"description\": \"`true` if validator has been \\\"evicted\\\"\",\n          \"type\": \"boolean\"\n        },\n        \"minimum_delegation_amount\": {\n          \"description\": \"Minimum allowed delegation amount in motes\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"maximum_delegation_amount\": {\n          \"description\": \"Maximum allowed delegation amount in motes\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"reserved_slots\": {\n          \"description\": \"Slots reserved for specific delegators\",\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"DelegatorBid\": {\n      \"description\": \"Represents a party delegating their stake to a validator (or \\\"delegatee\\\")\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bonding_purse\",\n        \"delegator_kind\",\n        \"staked_amount\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"delegator_kind\": {\n          \"$ref\": \"#/definitions/DelegatorKind\"\n        },\n        \"staked_amount\": {\n          \"$ref\": \"#/definitions/U512\"\n        },\n        \"bonding_purse\": {\n          \"$ref\": \"#/definitions/URef\"\n        },\n        \"validator_public_key\": {\n          \"$ref\": \"#/definitions/PublicKey\"\n        },\n        \"vesting_schedule\": {\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/VestingSchedule\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Bridge\": {\n      \"description\": \"A bridge record pointing to a new `ValidatorBid` after the public key was changed.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"era_id\",\n        \"new_validator_public_key\",\n        \"old_validator_public_key\"\n      ],\n      \"properties\": {\n        \"old_validator_public_key\": {\n          \"description\": \"Previous validator public key associated with the bid.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"new_validator_public_key\": {\n          \"description\": \"New validator public key associated with the bid.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"era_id\": {\n          \"description\": \"Era when bridge record was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"ValidatorCredit\": {\n      \"description\": \"Validator credit record.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"era_id\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"validator_public_key\": {\n          \"description\": \"Validator public key\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"era_id\": {\n          \"description\": \"The era id the credit was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"The credit amount.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Reservation\": {\n      \"description\": \"Represents a validator reserving a slot for specific delegator\",\n      \"type\": \"object\",\n      \"required\": [\n        \"delegation_rate\",\n        \"delegator_kind\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"delegator_kind\": {\n          \"description\": \"Delegator kind.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/DelegatorKind\"\n            }\n          ]\n        },\n        \"validator_public_key\": {\n          \"description\": \"Validator public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"delegation_rate\": {\n          \"description\": \"Individual delegation rate.\",\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Unbond\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"eras\",\n        \"unbond_kind\",\n        \"validator_public_key\"\n      ],\n      \"properties\": {\n        \"validator_public_key\": {\n          \"description\": \"Validators public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        },\n        \"unbond_kind\": {\n          \"description\": \"Unbond kind.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/UnbondKind\"\n            }\n          ]\n        },\n        \"eras\": {\n          \"description\": \"Unbond amounts per era.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/UnbondEra\"\n          }\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"UnbondKind\": {\n      \"description\": \"Unbond variants.\",\n      \"oneOf\": [\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"Validator\"\n          ],\n          \"properties\": {\n            \"Validator\": {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"DelegatedPublicKey\"\n          ],\n          \"properties\": {\n            \"DelegatedPublicKey\": {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"DelegatedPurse\"\n          ],\n          \"properties\": {\n            \"DelegatedPurse\": {\n              \"type\": \"string\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"UnbondEra\": {\n      \"description\": \"Unbond amounts per era.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"bonding_purse\",\n        \"era_of_creation\"\n      ],\n      \"properties\": {\n        \"bonding_purse\": {\n          \"description\": \"Bonding Purse\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"era_of_creation\": {\n          \"description\": \"Era in which this unbonding request was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"Unbonding Amount.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"new_validator\": {\n          \"description\": \"The validator public key to re-delegate to.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"ExecutionResultV2\": {\n      \"description\": \"The result of executing a single transaction.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"consumed\",\n        \"cost\",\n        \"current_price\",\n        \"effects\",\n        \"initiator\",\n        \"limit\",\n        \"refund\",\n        \"size_estimate\",\n        \"transfers\"\n      ],\n      \"properties\": {\n        \"initiator\": {\n          \"description\": \"Who initiated this transaction.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/InitiatorAddr\"\n            }\n          ]\n        },\n        \"error_message\": {\n          \"description\": \"If there is no error message, this execution was processed successfully. If there is an error message, this execution failed to fully process for the stated reason.\",\n          \"type\": [\n            \"string\",\n            \"null\"\n          ]\n        },\n        \"current_price\": {\n          \"description\": \"The current gas price. I.e. how many motes are charged for each unit of computation.\",\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        },\n        \"limit\": {\n          \"description\": \"The maximum allowed gas limit for this transaction\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Gas\"\n            }\n          ]\n        },\n        \"consumed\": {\n          \"description\": \"How much gas was consumed executing this transaction.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Gas\"\n            }\n          ]\n        },\n        \"cost\": {\n          \"description\": \"How much was paid for this transaction.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"refund\": {\n          \"description\": \"How much unconsumed gas was refunded (if any)?\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"transfers\": {\n          \"description\": \"A record of transfers performed while executing this transaction.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/Transfer\"\n          }\n        },\n        \"size_estimate\": {\n          \"description\": \"The size estimate of the transaction\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"effects\": {\n          \"description\": \"The effects of executing this transaction.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Effects\"\n            }\n          ]\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Gas\": {\n      \"description\": \"The `Gas` struct represents a `U512` amount of gas.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/U512\"\n        }\n      ]\n    },\n    \"Transfer\": {\n      \"description\": \"A versioned wrapper for a transfer.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A version 1 transfer.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version1\"\n          ],\n          \"properties\": {\n            \"Version1\": {\n              \"$ref\": \"#/definitions/TransferV1\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A version 2 transfer.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Version2\"\n          ],\n          \"properties\": {\n            \"Version2\": {\n              \"$ref\": \"#/definitions/TransferV2\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"TransferV2\": {\n      \"description\": \"Represents a version 2 transfer from one purse to another.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"amount\",\n        \"from\",\n        \"gas\",\n        \"source\",\n        \"target\",\n        \"transaction_hash\"\n      ],\n      \"properties\": {\n        \"transaction_hash\": {\n          \"description\": \"Transaction that created the transfer.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/TransactionHash\"\n            }\n          ]\n        },\n        \"from\": {\n          \"description\": \"Entity from which transfer was executed.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/InitiatorAddr\"\n            }\n          ]\n        },\n        \"to\": {\n          \"description\": \"Account to which funds are transferred.\",\n          \"anyOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountHash\"\n            },\n            {\n              \"type\": \"null\"\n            }\n          ]\n        },\n        \"source\": {\n          \"description\": \"Source purse.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"target\": {\n          \"description\": \"Target purse.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"amount\": {\n          \"description\": \"Transfer amount.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          ]\n        },\n        \"gas\": {\n          \"description\": \"Gas.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Gas\"\n            }\n          ]\n        },\n        \"id\": {\n          \"description\": \"User-defined ID.\",\n          \"type\": [\n            \"integer\",\n            \"null\"\n          ],\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"Effects\": {\n      \"description\": \"A log of all transforms produced during execution.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/TransformV2\"\n      }\n    },\n    \"TransformV2\": {\n      \"description\": \"A transformation performed while executing a deploy.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"key\",\n        \"kind\"\n      ],\n      \"properties\": {\n        \"key\": {\n          \"$ref\": \"#/definitions/Key\"\n        },\n        \"kind\": {\n          \"$ref\": \"#/definitions/TransformKindV2\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"TransformKindV2\": {\n      \"description\": \"Representation of a single transformation occurring during execution.\\n\\nNote that all arithmetic variants of `TransformKindV2` are commutative which means that a given collection of them can be executed in any order to produce the same end result.\",\n      \"oneOf\": [\n        {\n          \"description\": \"An identity transformation that does not modify a value in the global state.\\n\\nCreated as a result of reading from the global state.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Identity\"\n          ]\n        },\n        {\n          \"description\": \"Writes a new value in the global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Write\"\n          ],\n          \"properties\": {\n            \"Write\": {\n              \"$ref\": \"#/definitions/StoredValue\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in the global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddInt32\"\n          ],\n          \"properties\": {\n            \"AddInt32\": {\n              \"type\": \"integer\",\n              \"format\": \"int32\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in the global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt64\"\n          ],\n          \"properties\": {\n            \"AddUInt64\": {\n              \"type\": \"integer\",\n              \"format\": \"uint64\",\n              \"minimum\": 0.0\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in the global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt128\"\n          ],\n          \"properties\": {\n            \"AddUInt128\": {\n              \"$ref\": \"#/definitions/U128\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in the global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt256\"\n          ],\n          \"properties\": {\n            \"AddUInt256\": {\n              \"$ref\": \"#/definitions/U256\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in the global state.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddUInt512\"\n          ],\n          \"properties\": {\n            \"AddUInt512\": {\n              \"$ref\": \"#/definitions/U512\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Adds new named keys to an existing entry in the global state.\\n\\nThis transform assumes that the existing stored value is either an Account or a Contract.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddKeys\"\n          ],\n          \"properties\": {\n            \"AddKeys\": {\n              \"$ref\": \"#/definitions/NamedKeys\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Removes the pathing to the global state entry of the specified key. The pruned element remains reachable from previously generated global state root hashes, but will not be included in the next generated global state root hash and subsequent state accumulated from it.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Prune\"\n          ],\n          \"properties\": {\n            \"Prune\": {\n              \"$ref\": \"#/definitions/Key\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Represents the case where applying a transform would cause an error.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Failure\"\n          ],\n          \"properties\": {\n            \"Failure\": {\n              \"$ref\": \"#/definitions/TransformError\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"StoredValue\": {\n      \"description\": \"A value stored in Global State.\",\n      \"oneOf\": [\n        {\n          \"description\": \"A CLValue.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"CLValue\"\n          ],\n          \"properties\": {\n            \"CLValue\": {\n              \"$ref\": \"#/definitions/CLValue\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"An account.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Account\"\n          ],\n          \"properties\": {\n            \"Account\": {\n              \"$ref\": \"#/definitions/Account\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Contract wasm.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"ContractWasm\"\n          ],\n          \"properties\": {\n            \"ContractWasm\": {\n              \"$ref\": \"#/definitions/ContractWasm\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A contract.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Contract\"\n          ],\n          \"properties\": {\n            \"Contract\": {\n              \"$ref\": \"#/definitions/Contract\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A contract package.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"ContractPackage\"\n          ],\n          \"properties\": {\n            \"ContractPackage\": {\n              \"$ref\": \"#/definitions/ContractPackage\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A version 1 transfer.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Transfer\"\n          ],\n          \"properties\": {\n            \"Transfer\": {\n              \"$ref\": \"#/definitions/TransferV1\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Info about a deploy.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"DeployInfo\"\n          ],\n          \"properties\": {\n            \"DeployInfo\": {\n              \"$ref\": \"#/definitions/DeployInfo\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Info about an era.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"EraInfo\"\n          ],\n          \"properties\": {\n            \"EraInfo\": {\n              \"$ref\": \"#/definitions/EraInfo\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Variant that stores [`Bid`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Bid\"\n          ],\n          \"properties\": {\n            \"Bid\": {\n              \"$ref\": \"#/definitions/Bid\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Variant that stores withdraw information.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Withdraw\"\n          ],\n          \"properties\": {\n            \"Withdraw\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/WithdrawPurse\"\n              }\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Unbonding information.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Unbonding\"\n          ],\n          \"properties\": {\n            \"Unbonding\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/UnbondingPurse\"\n              }\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"An `AddressableEntity`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"AddressableEntity\"\n          ],\n          \"properties\": {\n            \"AddressableEntity\": {\n              \"$ref\": \"#/definitions/AddressableEntity\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Variant that stores [`BidKind`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"BidKind\"\n          ],\n          \"properties\": {\n            \"BidKind\": {\n              \"$ref\": \"#/definitions/BidKind\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A smart contract `Package`.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"SmartContract\"\n          ],\n          \"properties\": {\n            \"SmartContract\": {\n              \"$ref\": \"#/definitions/Package\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A record of byte code.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"ByteCode\"\n          ],\n          \"properties\": {\n            \"ByteCode\": {\n              \"$ref\": \"#/definitions/ByteCode\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Variant that stores a message topic.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"MessageTopic\"\n          ],\n          \"properties\": {\n            \"MessageTopic\": {\n              \"$ref\": \"#/definitions/MessageTopicSummary\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Variant that stores a message digest.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Message\"\n          ],\n          \"properties\": {\n            \"Message\": {\n              \"$ref\": \"#/definitions/MessageChecksum\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A NamedKey record.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"NamedKey\"\n          ],\n          \"properties\": {\n            \"NamedKey\": {\n              \"$ref\": \"#/definitions/NamedKeyValue\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"A prepayment record.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Prepayment\"\n          ],\n          \"properties\": {\n            \"Prepayment\": {\n              \"$ref\": \"#/definitions/PrepaymentKind\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"An entrypoint record.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"EntryPoint\"\n          ],\n          \"properties\": {\n            \"EntryPoint\": {\n              \"$ref\": \"#/definitions/EntryPointValue\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of a [`crate::CLValue`] and [`crate::CLType`].\",\n          \"type\": \"object\",\n          \"required\": [\n            \"RawBytes\"\n          ],\n          \"properties\": {\n            \"RawBytes\": {\n              \"type\": \"string\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"Account\": {\n      \"description\": \"Represents an Account in the global state.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"account_hash\",\n        \"action_thresholds\",\n        \"associated_keys\",\n        \"main_purse\",\n        \"named_keys\"\n      ],\n      \"properties\": {\n        \"account_hash\": {\n          \"$ref\": \"#/definitions/AccountHash\"\n        },\n        \"named_keys\": {\n          \"$ref\": \"#/definitions/NamedKeys\"\n        },\n        \"main_purse\": {\n          \"$ref\": \"#/definitions/URef\"\n        },\n        \"associated_keys\": {\n          \"$ref\": \"#/definitions/AccountAssociatedKeys\"\n        },\n        \"action_thresholds\": {\n          \"$ref\": \"#/definitions/AccountActionThresholds\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"NamedKeys\": {\n      \"description\": \"A collection of named keys.\",\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/NamedKey\"\n      }\n    },\n    \"AccountAssociatedKeys\": {\n      \"description\": \"A collection of weighted public keys (represented as account hashes) associated with an account.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Array_of_AssociatedKey\"\n        }\n      ]\n    },\n    \"Array_of_AssociatedKey\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/AssociatedKey\"\n      }\n    },\n    \"AssociatedKey\": {\n      \"description\": \"A weighted public key.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"account_hash\",\n        \"weight\"\n      ],\n      \"properties\": {\n        \"account_hash\": {\n          \"description\": \"The account hash of the public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountHash\"\n            }\n          ]\n        },\n        \"weight\": {\n          \"description\": \"The weight assigned to the public key.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountAssociatedKeyWeight\"\n            }\n          ]\n        }\n      }\n    },\n    \"AccountAssociatedKeyWeight\": {\n      \"description\": \"The weight associated with public keys in an account's associated keys.\",\n      \"type\": \"integer\",\n      \"format\": \"uint8\",\n      \"minimum\": 0.0\n    },\n    \"AccountActionThresholds\": {\n      \"description\": \"Thresholds that have to be met when executing an action of a certain type.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"deployment\",\n        \"key_management\"\n      ],\n      \"properties\": {\n        \"deployment\": {\n          \"description\": \"Threshold for deploy execution.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountAssociatedKeyWeight\"\n            }\n          ]\n        },\n        \"key_management\": {\n          \"description\": \"Threshold for managing action threshold.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/AccountAssociatedKeyWeight\"\n            }\n          ]\n        }\n      }\n    },\n    \"ContractWasm\": {\n      \"description\": \"A container for contract's WASM bytes.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bytes\"\n      ],\n      \"properties\": {\n        \"bytes\": {\n          \"$ref\": \"#/definitions/Bytes\"\n        }\n      }\n    },\n    \"Contract\": {\n      \"description\": \"Methods and type signatures supported by a contract.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"contract_package_hash\",\n        \"contract_wasm_hash\",\n        \"entry_points\",\n        \"named_keys\",\n        \"protocol_version\"\n      ],\n      \"properties\": {\n        \"contract_package_hash\": {\n          \"$ref\": \"#/definitions/ContractPackageHash\"\n        },\n        \"contract_wasm_hash\": {\n          \"$ref\": \"#/definitions/ContractWasmHash\"\n        },\n        \"named_keys\": {\n          \"$ref\": \"#/definitions/NamedKeys\"\n        },\n        \"entry_points\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/EntryPoint\"\n          }\n        },\n        \"protocol_version\": {\n          \"$ref\": \"#/definitions/ProtocolVersion\"\n        }\n      }\n    },\n    \"ContractWasmHash\": {\n      \"description\": \"The hash address of the contract wasm\",\n      \"type\": \"string\"\n    },\n    \"EntryPoint\": {\n      \"description\": \"Type signature of a method. Order of arguments matter since can be referenced by index as well as name.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"access\",\n        \"args\",\n        \"entry_point_type\",\n        \"name\",\n        \"ret\"\n      ],\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"args\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/Parameter\"\n          }\n        },\n        \"ret\": {\n          \"$ref\": \"#/definitions/CLType\"\n        },\n        \"access\": {\n          \"$ref\": \"#/definitions/EntryPointAccess\"\n        },\n        \"entry_point_type\": {\n          \"$ref\": \"#/definitions/EntryPointType\"\n        }\n      }\n    },\n    \"Parameter\": {\n      \"description\": \"Parameter to a method\",\n      \"type\": \"object\",\n      \"required\": [\n        \"cl_type\",\n        \"name\"\n      ],\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"cl_type\": {\n          \"$ref\": \"#/definitions/CLType\"\n        }\n      }\n    },\n    \"EntryPointAccess\": {\n      \"description\": \"Enum describing the possible access control options for a contract entry point (method).\",\n      \"oneOf\": [\n        {\n          \"description\": \"Anyone can call this method (no access controls).\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Public\"\n          ]\n        },\n        {\n          \"description\": \"Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Groups\"\n          ],\n          \"properties\": {\n            \"Groups\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/Group\"\n              }\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Can't be accessed directly but are kept in the derived wasm bytes.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Template\"\n          ]\n        }\n      ]\n    },\n    \"Group\": {\n      \"description\": \"A (labelled) \\\"user group\\\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.\",\n      \"type\": \"string\"\n    },\n    \"EntryPointType\": {\n      \"description\": \"Context of method execution\\n\\nMost significant bit represents version i.e. - 0b0 -> 0.x/1.x (session & contracts) - 0b1 -> 2.x and later (introduced installer, utility entry points)\",\n      \"oneOf\": [\n        {\n          \"description\": \"Runs using the calling entity's context. In v1.x this was used for both \\\"session\\\" code run using the originating Account's context, and also for \\\"StoredSession\\\" code that ran in the caller's context. While this made systemic sense due to the way the runtime context nesting works, this dual usage was very confusing to most human beings.\\n\\nIn v2.x the renamed Caller variant is exclusively used for wasm run using the initiating account entity's context. Previously installed 1.x stored session code should continue to work as the binary value matches but we no longer allow such logic to be upgraded, nor do we allow new stored session to be installed.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Caller\"\n          ]\n        },\n        {\n          \"description\": \"Runs using the called entity's context.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Called\"\n          ]\n        },\n        {\n          \"description\": \"Extract a subset of bytecode and installs it as a new smart contract. Runs using the called entity's context.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Factory\"\n          ]\n        }\n      ]\n    },\n    \"ContractPackage\": {\n      \"description\": \"Contract definition, metadata, and security container.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"access_key\",\n        \"disabled_versions\",\n        \"groups\",\n        \"lock_status\",\n        \"versions\"\n      ],\n      \"properties\": {\n        \"access_key\": {\n          \"description\": \"Key used to add or disable versions\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/URef\"\n            }\n          ]\n        },\n        \"versions\": {\n          \"description\": \"All versions (enabled & disabled)\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/ContractVersion\"\n          }\n        },\n        \"disabled_versions\": {\n          \"description\": \"Disabled versions\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/ContractVersionKey\"\n          },\n          \"uniqueItems\": true\n        },\n        \"groups\": {\n          \"description\": \"Mapping maintaining the set of URefs associated with each \\\"user group\\\". This can be used to control access to methods in a particular version of the contract. A method is callable by any context which \\\"knows\\\" any of the URefs associated with the method's user group.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_NamedUserGroup\"\n            }\n          ]\n        },\n        \"lock_status\": {\n          \"description\": \"A flag that determines whether a contract is locked\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/ContractPackageStatus\"\n            }\n          ]\n        }\n      }\n    },\n    \"ContractVersion\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"contract_hash\",\n        \"contract_version\",\n        \"protocol_version_major\"\n      ],\n      \"properties\": {\n        \"protocol_version_major\": {\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        },\n        \"contract_version\": {\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        },\n        \"contract_hash\": {\n          \"$ref\": \"#/definitions/ContractHash\"\n        }\n      }\n    },\n    \"ContractVersionKey\": {\n      \"description\": \"Major element of `ProtocolVersion` combined with `ContractVersion`.\",\n      \"type\": \"array\",\n      \"items\": [\n        {\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        },\n        {\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        }\n      ],\n      \"maxItems\": 2,\n      \"minItems\": 2\n    },\n    \"Array_of_NamedUserGroup\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/NamedUserGroup\"\n      }\n    },\n    \"NamedUserGroup\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"group_name\",\n        \"group_users\"\n      ],\n      \"properties\": {\n        \"group_name\": {\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Group\"\n            }\n          ]\n        },\n        \"group_users\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/URef\"\n          },\n          \"uniqueItems\": true\n        }\n      }\n    },\n    \"ContractPackageStatus\": {\n      \"description\": \"A enum to determine the lock status of the contract package.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The package is locked and cannot be versioned.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Locked\"\n          ]\n        },\n        {\n          \"description\": \"The package is unlocked and can be versioned.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Unlocked\"\n          ]\n        }\n      ]\n    },\n    \"AddressableEntity\": {\n      \"description\": \"Methods and type signatures supported by a contract.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"action_thresholds\",\n        \"associated_keys\",\n        \"byte_code_hash\",\n        \"entity_kind\",\n        \"main_purse\",\n        \"package_hash\",\n        \"protocol_version\"\n      ],\n      \"properties\": {\n        \"protocol_version\": {\n          \"$ref\": \"#/definitions/ProtocolVersion\"\n        },\n        \"entity_kind\": {\n          \"$ref\": \"#/definitions/EntityKind\"\n        },\n        \"package_hash\": {\n          \"$ref\": \"#/definitions/PackageHash\"\n        },\n        \"byte_code_hash\": {\n          \"$ref\": \"#/definitions/ByteCodeHash\"\n        },\n        \"main_purse\": {\n          \"$ref\": \"#/definitions/URef\"\n        },\n        \"associated_keys\": {\n          \"$ref\": \"#/definitions/EntityAssociatedKeys\"\n        },\n        \"action_thresholds\": {\n          \"$ref\": \"#/definitions/EntityActionThresholds\"\n        }\n      }\n    },\n    \"EntityKind\": {\n      \"description\": \"The type of Package.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Package associated with a native contract implementation.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"System\"\n          ],\n          \"properties\": {\n            \"System\": {\n              \"$ref\": \"#/definitions/SystemEntityType\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Package associated with an Account hash.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Account\"\n          ],\n          \"properties\": {\n            \"Account\": {\n              \"$ref\": \"#/definitions/AccountHash\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Packages associated with Wasm stored on chain.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"SmartContract\"\n          ],\n          \"properties\": {\n            \"SmartContract\": {\n              \"$ref\": \"#/definitions/ContractRuntimeTag\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"SystemEntityType\": {\n      \"description\": \"System contract types.\\n\\nUsed by converting to a `u32` and passing as the `system_contract_index` argument of `ext_ffi::casper_get_system_contract()`.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Mint contract.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Mint\"\n          ]\n        },\n        {\n          \"description\": \"Handle Payment contract.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"HandlePayment\"\n          ]\n        },\n        {\n          \"description\": \"Standard Payment contract.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"StandardPayment\"\n          ]\n        },\n        {\n          \"description\": \"Auction contract.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Auction\"\n          ]\n        }\n      ]\n    },\n    \"ContractRuntimeTag\": {\n      \"description\": \"Runtime used to execute a Transaction.\",\n      \"type\": \"string\",\n      \"enum\": [\n        \"VmCasperV1\",\n        \"VmCasperV2\"\n      ]\n    },\n    \"PackageHash\": {\n      \"description\": \"The hex-encoded address of the Package.\",\n      \"type\": \"string\"\n    },\n    \"ByteCodeHash\": {\n      \"description\": \"The hash address of the contract wasm\",\n      \"type\": \"string\"\n    },\n    \"EntityAssociatedKeys\": {\n      \"description\": \"A collection of weighted public keys (represented as account hashes) associated with an account.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Array_of_AssociatedKey\"\n        }\n      ]\n    },\n    \"EntityActionThresholds\": {\n      \"description\": \"Thresholds that have to be met when executing an action of a certain type.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"deployment\",\n        \"key_management\",\n        \"upgrade_management\"\n      ],\n      \"properties\": {\n        \"deployment\": {\n          \"description\": \"Threshold for deploy execution.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EntityAssociatedKeyWeight\"\n            }\n          ]\n        },\n        \"upgrade_management\": {\n          \"description\": \"Threshold for upgrading contracts.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EntityAssociatedKeyWeight\"\n            }\n          ]\n        },\n        \"key_management\": {\n          \"description\": \"Threshold for managing action threshold.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EntityAssociatedKeyWeight\"\n            }\n          ]\n        }\n      }\n    },\n    \"EntityAssociatedKeyWeight\": {\n      \"description\": \"The weight associated with public keys in an account's associated keys.\",\n      \"type\": \"integer\",\n      \"format\": \"uint8\",\n      \"minimum\": 0.0\n    },\n    \"Package\": {\n      \"description\": \"Entity definition, metadata, and security container.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"disabled_versions\",\n        \"groups\",\n        \"lock_status\",\n        \"versions\"\n      ],\n      \"properties\": {\n        \"versions\": {\n          \"description\": \"All versions (enabled & disabled).\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_EntityVersionAndEntityAddr\"\n            }\n          ]\n        },\n        \"disabled_versions\": {\n          \"description\": \"Collection of disabled entity versions. The runtime will not permit disabled entity versions to be executed.\",\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/EntityVersionKey\"\n          },\n          \"uniqueItems\": true\n        },\n        \"groups\": {\n          \"description\": \"Mapping maintaining the set of URefs associated with each \\\"user group\\\". This can be used to control access to methods in a particular version of the entity. A method is callable by any context which \\\"knows\\\" any of the URefs associated with the method's user group.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Array_of_NamedUserGroup\"\n            }\n          ]\n        },\n        \"lock_status\": {\n          \"description\": \"A flag that determines whether a entity is locked\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PackageStatus\"\n            }\n          ]\n        }\n      }\n    },\n    \"Array_of_EntityVersionAndEntityAddr\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/definitions/EntityVersionAndEntityAddr\"\n      }\n    },\n    \"EntityVersionAndEntityAddr\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"entity_addr\",\n        \"entity_version_key\"\n      ],\n      \"properties\": {\n        \"entity_version_key\": {\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EntityVersionKey\"\n            }\n          ]\n        },\n        \"entity_addr\": {\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EntityAddr\"\n            }\n          ]\n        }\n      }\n    },\n    \"EntityVersionKey\": {\n      \"description\": \"Major element of `ProtocolVersion` combined with `EntityVersion`.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"entity_version\",\n        \"protocol_version_major\"\n      ],\n      \"properties\": {\n        \"protocol_version_major\": {\n          \"description\": \"Major element of `ProtocolVersion` a `ContractVersion` is compatible with.\",\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        },\n        \"entity_version\": {\n          \"description\": \"Automatically incremented value for a contract version within a major `ProtocolVersion`.\",\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        }\n      }\n    },\n    \"EntityAddr\": {\n      \"description\": \"The address for an AddressableEntity which contains the 32 bytes and tagging information.\",\n      \"anyOf\": [\n        {\n          \"description\": \"The address for a system entity account or contract.\",\n          \"type\": \"string\"\n        },\n        {\n          \"description\": \"The address of an entity that corresponds to an Account.\",\n          \"type\": \"string\"\n        },\n        {\n          \"description\": \"The address of an entity that corresponds to a Userland smart contract.\",\n          \"type\": \"string\"\n        }\n      ]\n    },\n    \"PackageStatus\": {\n      \"description\": \"A enum to determine the lock status of the package.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The package is locked and cannot be versioned.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Locked\"\n          ]\n        },\n        {\n          \"description\": \"The package is unlocked and can be versioned.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Unlocked\"\n          ]\n        }\n      ]\n    },\n    \"ByteCode\": {\n      \"description\": \"A container for contract's Wasm bytes.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"bytes\",\n        \"kind\"\n      ],\n      \"properties\": {\n        \"kind\": {\n          \"$ref\": \"#/definitions/ByteCodeKind\"\n        },\n        \"bytes\": {\n          \"$ref\": \"#/definitions/Bytes\"\n        }\n      }\n    },\n    \"ByteCodeKind\": {\n      \"description\": \"The type of Byte code.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Empty byte code.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Empty\"\n          ]\n        },\n        {\n          \"description\": \"Byte code to be executed with the version 1 Casper execution engine.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"V1CasperWasm\"\n          ]\n        },\n        {\n          \"description\": \"Byte code to be executed with the version 2 Casper execution engine.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"V2CasperWasm\"\n          ]\n        }\n      ]\n    },\n    \"MessageTopicSummary\": {\n      \"description\": \"Summary of a message topic that will be stored in global state.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"blocktime\",\n        \"message_count\",\n        \"topic_name\"\n      ],\n      \"properties\": {\n        \"message_count\": {\n          \"description\": \"Number of messages in this topic.\",\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        },\n        \"blocktime\": {\n          \"description\": \"Block timestamp in which these messages were emitted.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockTime\"\n            }\n          ]\n        },\n        \"topic_name\": {\n          \"description\": \"Name of the topic.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"BlockTime\": {\n      \"description\": \"A newtype wrapping a [`u64`] which represents the block time.\",\n      \"type\": \"integer\",\n      \"format\": \"uint64\",\n      \"minimum\": 0.0\n    },\n    \"MessageChecksum\": {\n      \"description\": \"Message checksum as a formatted string.\",\n      \"type\": \"string\"\n    },\n    \"NamedKeyValue\": {\n      \"description\": \"A NamedKey value.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"name\",\n        \"named_key\"\n      ],\n      \"properties\": {\n        \"named_key\": {\n          \"description\": \"The actual `Key` encoded as a CLValue.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/CLValue\"\n            }\n          ]\n        },\n        \"name\": {\n          \"description\": \"The name of the `Key` encoded as a CLValue.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/CLValue\"\n            }\n          ]\n        }\n      }\n    },\n    \"PrepaymentKind\": {\n      \"description\": \"Container for bytes recording location, type and data for a gas pre payment\",\n      \"type\": \"object\",\n      \"required\": [\n        \"prepayment_data\",\n        \"prepayment_kind\",\n        \"receipt\"\n      ],\n      \"properties\": {\n        \"receipt\": {\n          \"$ref\": \"#/definitions/Digest\"\n        },\n        \"prepayment_kind\": {\n          \"type\": \"integer\",\n          \"format\": \"uint8\",\n          \"minimum\": 0.0\n        },\n        \"prepayment_data\": {\n          \"$ref\": \"#/definitions/Bytes\"\n        }\n      }\n    },\n    \"EntryPointValue\": {\n      \"description\": \"The encaspulated representation of entrypoints.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Entrypoints to be executed against the V1 Casper VM.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"V1CasperVm\"\n          ],\n          \"properties\": {\n            \"V1CasperVm\": {\n              \"$ref\": \"#/definitions/EntityEntryPoint\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"EntityEntryPoint\": {\n      \"description\": \"Type signature of a method. Order of arguments matter since can be referenced by index as well as name.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"access\",\n        \"args\",\n        \"entry_point_payment\",\n        \"entry_point_type\",\n        \"name\",\n        \"ret\"\n      ],\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\"\n        },\n        \"args\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/definitions/Parameter\"\n          }\n        },\n        \"ret\": {\n          \"$ref\": \"#/definitions/CLType\"\n        },\n        \"access\": {\n          \"$ref\": \"#/definitions/EntryPointAccess\"\n        },\n        \"entry_point_type\": {\n          \"$ref\": \"#/definitions/EntryPointType\"\n        },\n        \"entry_point_payment\": {\n          \"$ref\": \"#/definitions/EntryPointPayment\"\n        }\n      }\n    },\n    \"EntryPointPayment\": {\n      \"description\": \"An enum specifying who pays for the invocation and execution of the entrypoint.\",\n      \"oneOf\": [\n        {\n          \"description\": \"The caller must cover costs\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Caller\"\n          ]\n        },\n        {\n          \"description\": \"Will cover costs if directly invoked.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"DirectInvocationOnly\"\n          ]\n        },\n        {\n          \"description\": \"will cover costs to execute self including any subsequent invoked contracts\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"SelfOnward\"\n          ]\n        }\n      ]\n    },\n    \"TransformError\": {\n      \"description\": \"Error type for applying and combining transforms.\\n\\nA `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible (e.g. trying to add a number to a string).\",\n      \"oneOf\": [\n        {\n          \"description\": \"Error while (de)serializing data.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Serialization\"\n          ],\n          \"properties\": {\n            \"Serialization\": {\n              \"$ref\": \"#/definitions/BytesreprError\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Type mismatch error.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"TypeMismatch\"\n          ],\n          \"properties\": {\n            \"TypeMismatch\": {\n              \"$ref\": \"#/definitions/TypeMismatch\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Type no longer supported.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Deprecated\"\n          ]\n        }\n      ]\n    },\n    \"BytesreprError\": {\n      \"description\": \"Serialization and deserialization errors.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Early end of stream while deserializing.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"EarlyEndOfStream\"\n          ]\n        },\n        {\n          \"description\": \"Formatting error while deserializing.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"Formatting\"\n          ]\n        },\n        {\n          \"description\": \"Not all input bytes were consumed in [`deserialize`].\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"LeftOverBytes\"\n          ]\n        },\n        {\n          \"description\": \"Out of memory error.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"OutOfMemory\"\n          ]\n        },\n        {\n          \"description\": \"No serialized representation is available for a value.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"NotRepresentable\"\n          ]\n        },\n        {\n          \"description\": \"Exceeded a recursion depth limit.\",\n          \"type\": \"string\",\n          \"enum\": [\n            \"ExceededRecursionDepth\"\n          ]\n        }\n      ]\n    },\n    \"TypeMismatch\": {\n      \"description\": \"An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"expected\",\n        \"found\"\n      ],\n      \"properties\": {\n        \"expected\": {\n          \"description\": \"The name of the expected type.\",\n          \"type\": \"string\"\n        },\n        \"found\": {\n          \"description\": \"The actual type found.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"Message\": {\n      \"description\": \"Message that was emitted by an addressable entity during execution.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"block_index\",\n        \"entity_addr\",\n        \"message\",\n        \"topic_index\",\n        \"topic_name\",\n        \"topic_name_hash\"\n      ],\n      \"properties\": {\n        \"entity_addr\": {\n          \"description\": \"The identity of the entity that produced the message.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EntityAddr\"\n            }\n          ]\n        },\n        \"message\": {\n          \"description\": \"The payload of the message.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/MessagePayload\"\n            }\n          ]\n        },\n        \"topic_name\": {\n          \"description\": \"The name of the topic on which the message was emitted on.\",\n          \"type\": \"string\"\n        },\n        \"topic_name_hash\": {\n          \"description\": \"The hash of the name of the topic.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/TopicNameHash\"\n            }\n          ]\n        },\n        \"topic_index\": {\n          \"description\": \"Message index in the topic.\",\n          \"type\": \"integer\",\n          \"format\": \"uint32\",\n          \"minimum\": 0.0\n        },\n        \"block_index\": {\n          \"description\": \"Message index in the block.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        }\n      }\n    },\n    \"MessagePayload\": {\n      \"description\": \"The payload of the message emitted by an addressable entity during execution.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Human readable string message.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"String\"\n          ],\n          \"properties\": {\n            \"String\": {\n              \"type\": \"string\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Message represented as raw bytes.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"Bytes\"\n          ],\n          \"properties\": {\n            \"Bytes\": {\n              \"$ref\": \"#/definitions/Bytes\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"TopicNameHash\": {\n      \"description\": \"The hash of the name of the message topic.\",\n      \"type\": \"string\"\n    },\n    \"FinalitySignature\": {\n      \"description\": \"A validator's signature of a block, confirming it is finalized.\",\n      \"oneOf\": [\n        {\n          \"description\": \"Version 1 of the finality signature.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"V1\"\n          ],\n          \"properties\": {\n            \"V1\": {\n              \"$ref\": \"#/definitions/FinalitySignatureV1\"\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"description\": \"Version 2 of the finality signature.\",\n          \"type\": \"object\",\n          \"required\": [\n            \"V2\"\n          ],\n          \"properties\": {\n            \"V2\": {\n              \"$ref\": \"#/definitions/FinalitySignatureV2\"\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"FinalitySignatureV1\": {\n      \"description\": \"A validator's signature of a block, confirming it is finalized.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"block_hash\",\n        \"era_id\",\n        \"public_key\",\n        \"signature\"\n      ],\n      \"properties\": {\n        \"block_hash\": {\n          \"description\": \"The block hash of the associated block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"era_id\": {\n          \"description\": \"The era in which the associated block was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"signature\": {\n          \"description\": \"The signature over the block hash of the associated block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Signature\"\n            }\n          ]\n        },\n        \"public_key\": {\n          \"description\": \"The public key of the signing validator.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        }\n      }\n    },\n    \"FinalitySignatureV2\": {\n      \"description\": \"A validator's signature of a block, confirming it is finalized.\",\n      \"type\": \"object\",\n      \"required\": [\n        \"block_hash\",\n        \"block_height\",\n        \"chain_name_hash\",\n        \"era_id\",\n        \"public_key\",\n        \"signature\"\n      ],\n      \"properties\": {\n        \"block_hash\": {\n          \"description\": \"The block hash of the associated block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/BlockHash\"\n            }\n          ]\n        },\n        \"block_height\": {\n          \"description\": \"The height of the associated block.\",\n          \"type\": \"integer\",\n          \"format\": \"uint64\",\n          \"minimum\": 0.0\n        },\n        \"era_id\": {\n          \"description\": \"The era in which the associated block was created.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/EraId\"\n            }\n          ]\n        },\n        \"chain_name_hash\": {\n          \"description\": \"The hash of the chain name of the associated block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/ChainNameDigest\"\n            }\n          ]\n        },\n        \"signature\": {\n          \"description\": \"The signature over the block hash of the associated block.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/Signature\"\n            }\n          ]\n        },\n        \"public_key\": {\n          \"description\": \"The public key of the signing validator.\",\n          \"allOf\": [\n            {\n              \"$ref\": \"#/definitions/PublicKey\"\n            }\n          ]\n        }\n      }\n    },\n    \"ChainNameDigest\": {\n      \"description\": \"Hex-encoded cryptographic hash of a chain name.\",\n      \"allOf\": [\n        {\n          \"$ref\": \"#/definitions/Digest\"\n        }\n      ]\n    }\n  }\n}"
  },
  {
    "path": "resources/test/storage/1.5.2/storage-1/storage_info.json",
    "content": "{\n  \"net_name\": \"casper-net-1\",\n  \"protocol_version\": \"1.5.2\",\n  \"block_range\": [\n    35,\n    94\n  ],\n  \"blocks\": {\n    \"a0d2b616b99812e49553852118012173bf190dcac8299be9d319773a7405e7ac\": {\n      \"height\": 86,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"a0d2b616b99812e49553852118012173bf190dcac8299be9d319773a7405e7ac\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"017bbb6f24b706b7a16c44dbe75c8fad2cd7701db361225ad296b06aa66853ece94804e8cecee76f61828ebad3422706b278da46845150265664091aaec62a890a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01fc09409ba0e990d22eebb79bf1c85d27b2d145ee110541420a059497b9114fa17b8ab486cddbd7a927d638d61d8edff424284525638e14c0f76d3fcde225d10b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0120f0ca080c2237c1e04470057959ff3b165e2547d8821f857fafbb238073d5327d46b587136ccf8a4733d53f0996baa11153ad25cfb97ae3a6ffb50433254a0c\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"016c5d7bfdb97e82ed4e30c39c434cbd3458e33c35c5a31800820afee42efc35fb4f37d044adf049fc82db00773ad517fe411e58cac579b7a12143b39e4d30ec09\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0154d4c6d16afb98ff1720369d741aa884a5ba785ff862a34050902e67a28d564e64b0ab26b47ab268c17a7c91853184d0b4184035faa99b4e5cab038e2f3dea0c\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"cb8148f82ff933b3653fbd63faf7c7c212bd34e1f7654364a56b129f73cdefaf\": {\n      \"height\": 46,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"cb8148f82ff933b3653fbd63faf7c7c212bd34e1f7654364a56b129f73cdefaf\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01cb46f48eafdde8d963bd1c19e4a81935eb95865bb09613eb1994ff4fc92ab3de29acdfea42a7d3f03a7131164751512a6704fa7e9a05d4d0b3531943abfee20d\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01953d4b92fd808c0821865b29b20a52b04c1dd3857d2b12e9a33ad3baf2e12b7a9245a93b82043f96bf11f7b3dacf2176b992c74cd893bfd97a199647e50bc105\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01325850bdcc15a8b59d629d04c909f2864c1b24969ea1b6dc8c0c6d5e57b6fe1d7f92a58fd145cf076c291440807e89299a7687e8eb0c53aa0de846d7a6fc9505\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01f6affd41cedaa733f855f249804563a1b7fc061a163bf1d1b9aba04a31409f2a8b50ad947f52597f046f5a2135c978608b838f871898ae0db0f221e4faee4307\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"015fcd0f05be54b0df41151bd91b8e0eabb525b4808e9c96a166298dda9233cb229b85c5f5fd5e7f91af1d525c9bb6bf7114f36223893220770c31d32f9b206302\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"1021135eeec9fa9b32bf3e06b2689f44bb9ffdb34665542c2d117b0b0a9731fb\": {\n      \"height\": 78,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"1021135eeec9fa9b32bf3e06b2689f44bb9ffdb34665542c2d117b0b0a9731fb\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01c1cb9ed59ea1eae441cdefc7307f451da29bde85d4f49c258edef8e4dac09209b1902aed3fb1a5321c83c883515337b010226f8682d88e61dcb7a55a1391e205\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01d404d4ab96399e749e6b4c5d3b334aa8084d7b48a7e5c80705cfb7aa8b47f4c2742200425f214ed7777c0fbd4d273c7a8656b79c2812731a206010e93fe17207\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0189356a822c83edfe5e417047a67b48ffe5c045d49fa1881b4b182a5c2e046f9d68eb3907c982215f7cceca592fe39a282250c17b689caddebb5d37e857a5db08\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01568a9c0f3a5f170bb9885a25920e894103bf178812696af7194e33d1a0c50d80e56ab2497019e32dbfb81b6cdb152e4eceac304667045010e375ec053a71f00a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01754e08049770f2952fed60192b3b6a2a65829255585be04590ef460142a4d60ca677ffd14a1dc9257675e5a841965e08fef7f35c8f294d4ea3878f55128b3309\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"faf8913c012d445d4dc7af178d15aa8b041a3b45e63980995bd7a62da5d8673f\": {\n      \"height\": 80,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"faf8913c012d445d4dc7af178d15aa8b041a3b45e63980995bd7a62da5d8673f\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0178f5aca8508cea6c6f9b22b99e5ed66c183c3c9909ffa77d29e658b0310ed58a854a11fad7789c8a4e7ddc89a342443a411f3e152bf3cbce715604db636a2205\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"013ce1a805d847b5b949176c60a68dba2360536caf0583c5c20bedb7226c56c97cb5462c516b852ea801284f06005c77a4fbe3391712bfb6ab935bef6bd740610f\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"016e767114f25421c8856718447372737dfb8b967041fba6a263df12af41ec80d643c3897493757d2b7033768b02334eee786141ac128e82dae77b10d196f26407\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"013b74f945aab7f55c14d5bd5d1b72c6a901b909bb02ae96cf598ef56669d03adca45a0e841851988a2dcbe4d2425a24fc3746a5f8468245f31beaad032188ff0d\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01fc10350304daba32973ad769db4f25c19236032df95b725ee8c024aaa811ec5d296a2a34043d769562bece9a193d40cceda9f5ccfbf0a707de1dd67b779bc106\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"2d8b530265d358bd58fcf0c9b61f21d2eb308113816839a86d28a6ab11831337\": {\n      \"height\": 58,\n      \"era\": 6,\n      \"approvals_hashes\": [\n        \"0064f9e5242f911736c124c436eb7ae425eb37d750cfc15612f5b3df2075a76d\",\n        \"9be78f45253429d11bce1db6c044d29b3e0b0fc6e8e0e98015bf9b8ed326c0d0\",\n        \"e6ecd44c2983fe9ac8f164589549d210bf67b7b74cddff4fbdf60fe42e41d7eb\",\n        \"f9e2354c4262da873c910bccb1230e35c29b822974d7e28ccd2362d2136e86f5\",\n        \"47a41934a6dc1003650c7277659747319ce39ba508c4f13ccdfd58d7ec48ebd7\",\n        \"96b0ae48e4fea3e2866e3c9a1475913ef54033fd46bdef328bce02874116bd5d\",\n        \"5da97f90027452baa240b38388a3d59c85f75bc6bac746c51b4b5631d74859c8\",\n        \"c046041fb4f3b5a56da32a5a2a753d99fa2b422d63c5d8bdaae1cfb72e18a071\",\n        \"660bcc38f3b43ceb9c25a0a17e0170c6e2755c3bcfa74104a5bf99b939738323\",\n        \"78985db9f0d30a91a6f3ca643888f33a09e2d5d58a45a84e5d09012535b962f3\"\n      ],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"2d8b530265d358bd58fcf0c9b61f21d2eb308113816839a86d28a6ab11831337\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01986f42e383aa51382402f99edfdf4c43751229d4c29e8cd0cd3f47bad40baa2e1e4c5980f8408c8eda7cdd71ad41f2ae65cfc27dcbbf7697ad87499638da4709\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0177d63b478b1bfef60caba68703cf28b925a87f5b541af969e641fdc198659f5a4c75dc452ee77a86d05851facdc121fd43dccc20508b741b9a04c95e0e9ebe06\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"019ec6d148d34914a34ec0159e49d5e48f7a64f3677b9d2895d9831ce83b9d412d81debfbd683e903fc16c365684398e2fcad892d862a524fac8e93e9f0d970a09\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01d80f7bcc382e4e777256d70d2f5bf3a57872c959f70c9724ed565472171e51e08bc0a27a561f4d3f99a8b1e54f8a4c5fc7ba82891471f69b6715000ee3bc870b\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"019b4a46d9af2f49c70e6969bf02c2a63f7d0e257255e09c7444fe281efa00351376d4962ae412dcac2b8532d372603075e6e38e51f834420b5255aa0224f0a101\"\n          }\n        }\n      },\n      \"deploy_hashes\": [\n        \"163025024c68c001b1a940c2e63ae4e4654adea462078841b2f66ff6c9e04074\",\n        \"1b9d7591e1e800a19a99647849decdb074925f270ea16540ec9b356de9721e4c\",\n        \"448bbee6f586414eccb05fb97d28f26e59a4bc542497b323bcad51d71af92d6e\",\n        \"a9af14844140bab502565a0e6cacbff0227b19e4209d3495c97e275e5b6a6dac\",\n        \"706050b93a7e9bc5a19fd5fab792eb5ef5a9854ec3181fbb054f5dc7d4ee170f\",\n        \"a97dc9782e2dce920821d08ee85f2853b5dbf97c57c9349772ed36c723eb3542\",\n        \"ef35f210444ef48eb4bea3ea58867037cbd4106af59cba36a533e20625c01523\",\n        \"660eeb2cb9fdd88ad3296de498e38b9b97291cccf19cfb1b5c477c2c94f87b65\",\n        \"82c80204bbf55182a6785e92d4ff59f500d9cf561aca305b25d7f95ef258f7bf\",\n        \"e55f6646b64c204ca7f55d244a8879e5ac1e982a049b1d577f80418274490249\"\n      ]\n    },\n    \"a7e8a96a2608a249a831fc57607507f7fe6873afcf0c8af8f61ff05f5e2c8394\": {\n      \"height\": 51,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"a7e8a96a2608a249a831fc57607507f7fe6873afcf0c8af8f61ff05f5e2c8394\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01735a315b9a6ce63741e84a34b9e27e2245235eb9184573b016b5b6eb7a0038a0badbc19e709eeecd9f7ece2b26b89f00655dc5be57032ca7fc4fb33e5f29b106\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0141977992a0317989ad9b774c49a1cbf3751b402b6b32d2de48add0f8a863e61a2c1a711636a7bb609f79a830ac31cb770d0f3a2b83b186d98c1d8ac6c455ec07\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"019c8898f2368e6ce797ddf4d400526f43de91b66b6717fad9c305e3c5931e10dfad523bd154e7d6e6a3861847f4fa956746a3578f15b193ea621938acf8e4a509\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01ac6142980c8e91e26ac54634ba97390faa4501961d7a120b099cbf4184ae055ae2dc72352d371628fb84b02d1df20d1f191cacb84350941cbdf94873471a9a03\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0195bd1a586ccc900162d525ddcf6649002979b15eff3972b7ba1d3ae1369b78d9c8226910dc911f2076a636cc3502514d3dc57ba21b12020a3867755a009bf10b\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"938b44fa8b62572a4820cf285f8aa3c695b03e2ebc1e6cba754523504f01831c\": {\n      \"height\": 84,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"938b44fa8b62572a4820cf285f8aa3c695b03e2ebc1e6cba754523504f01831c\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01f5e877256c046b04c8f52d4346c89801b29ab02e639259f9a0fed5a8e2dc3b3ef543914fa3592356ce9b2246982615708306592409ddc70808237fc926b09f0a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"018b01cf26c7f550ce26f1ef5611d0df09da556d2611c3dcc1c4ddf5762da0c30e6374349c3206d9083fff4377537f2f79e97a7e266ddfdfe06c87ba7d326df70f\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01ef1752923dedaf0a815ead66d49f2cf889fafb52542014d396a97e415768122b05cdff8f8ceb21680b350ada7004564b20a4d351fd26241c121e54f0a6c3b109\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"015068e11c1963cf5c06fc37589a348873b823228b9185146d04f9cda469578b41fe617a5ab442654437bbca398ccebf43290c1497d6f77e1b05bf166a64cb0c0c\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01eace705660fd13b1a10e6a0f03e14763acd1d4109397c07febd4a6f481ae73a26c30422cbf9418229820a9e2d5c35caac9cf2a05665e65de4fcf37a9caffef08\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"c01371740d9b2883c92b5c519469cae84f82f6057586f33735c58f6e0db40e79\": {\n      \"height\": 60,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"c01371740d9b2883c92b5c519469cae84f82f6057586f33735c58f6e0db40e79\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"017230f32818eef81ef2596462de3d2a5f5cfc9d73e30c8ceb757750383e78a3da1bd5f887b71b81a862ec5a01b8e219b03df294817c873f608f3ae61b6ab93e09\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01ab097d5ca951b9fc1abc56956c98456b0c704a72fe8f94775b6e4a263db67a644547d12f28b66d3eb7b36582aaee9c8ddfbf9633885fb703aeb0a2e72be8280c\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"018e374c4989aee1e44574d7d22cf07cb776f7110c3f7c9b1bdae00051f2b8fb8dc7502011048979a55a0514d9be5e800f6cb24df250aa0a297a122b69d33d6f0f\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0161529228a54d9decee38997411c5a8a2a027c2230e264a8ac58a01d6165d69a5a8ee36daa60ec380659a3d7655dc3d6846e621ad12a50a3959439130cad4150a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01ac20ebeb1438ab507eeee56d9475a8c9fd9254fc76c3852191f0c877be362e4a86fb832c35502feb570c02642deec0ae664d2308f9bb0c2be9166d3e3e42cb0a\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"021aaf989b2159ff7c551ccab2108dec6d2b846f0c9207e9cf0667b357be5659\": {\n      \"height\": 76,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"021aaf989b2159ff7c551ccab2108dec6d2b846f0c9207e9cf0667b357be5659\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01b82b9d6a43f06068d4aa0a0d7e2e1f255766c5a6cb88508a2c7460acb652b98ee8a5f04418d39c412ed29be6cf4edfe503bc71557ee62bffa7184999c652440e\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01ac8e62376c5f50d2ea8bbb8af8eb482cf9e6e113d337ba9153416be5c4495e5d6c9a80be220ece312e2aec26cbfba6a3263ea3903f6b5f881d6d04d48ccf3b07\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0119db4e2beb2724955ce91715cd71e949313c0f171bdb726eae621d60f9e27df2588a0cddc4ba368bd6264b31e50ef1bdcb8ec88a84ae6af6ab6ac00a10a0a800\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"011719bb72ac094fe584d06049b4392f2ed846365153ba656d808408b6c78234cb269e63b33f64fdab1554a3069cf3e86f394a24b805cc1c91db118e9d74cf9903\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01759a3353032a34b28413e46d7267119d6e9d7f82e8a4bf90823dc7750d84d47070b18adcede5f3bb799df1560b5b47d946265c0b703f3bc9f8e6322412101c04\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"4d050f8a53cb8e171c63a865bea6dfb25608c14c561c675fc09a863544e19415\": {\n      \"height\": 70,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"4d050f8a53cb8e171c63a865bea6dfb25608c14c561c675fc09a863544e19415\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"015ab8f04b55a396f6e409dfaf6d228ff912a7eead71425961ea3c9bfd891056187a6ca0081947e87d58fa4cea6f99c4a1751c7638536752a12397a983049d320c\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"013a71c907808ff9c27f28af23703ff3add6f8a97b5c350c711a9c7a5d3d359ceb9afcce84a48ecd065fc8ac26b9afa4f92462472e3d9afb4a8666bcb924e83103\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"017666860f8f4f90d4381edb09fcce00e029a2cd3bafecbdd7525a485d3a864404b7a8c167fafc02d88d0050b7e78ae3793d33860adae5584906d3fad15155480f\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01acb4b4cda08418052eb283ec2cb16b950e6658fe0bda574575448005e6238f46cc266219e0bfa8d4590db4f2283438c596e20817e03145e5e7da6373a2f70e0f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01fd7f47f38b47cb2731d85a2b11147a9520c69663fcec65d6303a485317d8ccc3e1a79402048cf1210956a02facc27bf1af18b6ce2aeeeb98e0d14c5f7900fc0e\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"bb75eabb9db28f69281325959e32149faeaeebbd122df7c5157c7f2b06f80ebe\": {\n      \"height\": 45,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"bb75eabb9db28f69281325959e32149faeaeebbd122df7c5157c7f2b06f80ebe\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01ab2546144a84918f9020045a663b643c248e21fb5d15fac4e242f7c159938e6b7b3b604403ebe7a85f4dee4cb6c263c6c1900e29b3451a3c3d55df017c40d70e\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01bc20e0dc59e54176b7da3351939e8dd8f4056dc1708505bc2a86be5dbf20459afcc16629aa3460ab8dbfd259a4fbd0d0a6e7b1bc72083cef12c790d119d3070a\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01e1e64bfc0685927602d2e1b46a6112a00c38316b82190518bb8302c5aeb3daf6c592e49c595e8f9f54d5d1269a201181edd34dfe400eaef3fc47d32e2b86a80c\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"014c63ade982bb75e47057ac48fd6b710f02fa174d9671e6cfb0fbfdd90e85d2e6e302e74ad3cba017a3e4fbfb9fcb17327548341b5cebde32ac1a80d5d6b3b001\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01baa09af7100c9b383668543ef9789a4989bdb0cb91a943cb605cfcbe33da7381e2f6441f503ec926fce6536ea4d8fa3469320133f73e2845c4fd0e7bc5cc5a09\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"d1d65bdbdc1fe4e7301ab4d921bc504772c3dbab47310be453bd606edf286efd\": {\n      \"height\": 54,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"d1d65bdbdc1fe4e7301ab4d921bc504772c3dbab47310be453bd606edf286efd\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01ec296f2a7a748b81b4773fcb253e422cbfcfca3fd1cabe95104df7f942683e2c4ad86a9fc63c6f6c59c7b04ef95276b8574dc8c71a475f60109af51342492e0f\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"014e5cc193ce3eac79bfd2f2369bdaf7a6eb748a507a4e7c5fde0b1a6b6f05267a3df285f40d44c3c4753af2ab7c7f88cada326fb85c413f5ad506d29f8053a40d\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0132b18719d4d9b64ca4e92af2e0fa47562af672e7c017e9256dfb784e31cc8195ef410fb31bf1a05adcf5f029e3b1bee6634c5c4f80d383afad09512156f7560b\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0157d8d02e9f4f619bc0a8a95f3bc61cc6521b744263b31889f6da4fdb3ffadb033e57ced766e231d0bbbdc7e3ba9ef662ea1f14cbd85dea9f6f0f058e1d943607\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"013ae0332623ebd4c86ba3c087ef2c68e9ae08e851afd56557398d33cee5e39391ff8b879cdf815de544d6df7e703522e1006040880a9893a1c7f53e873e3c0f00\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"20855b7a61e36df78070a1b697b8b187b20efc49ecf450554283e384ff0e5902\": {\n      \"height\": 67,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"20855b7a61e36df78070a1b697b8b187b20efc49ecf450554283e384ff0e5902\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01603fde1ea29deb3a5e35bc20c1544ffc5fbd8f58e6a0b4ad9a738ccae97274b023d6ea40cdc177f1ddfa8d10e0ec6c6645e8d854c36408e51b8229df48bfa50e\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01122abfa228c75a8b1bd71099678eacb2f8cc62cf95121ff058076bd0da5ccced23332e7426dad453a6305e44c1ca4c294a5086a1f0da267a21534bba51a8650d\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01bab63348e6be0f65c9af7d6b9ab92cca90779b9960818c908667e79508a065d87cf13ced784a43fc716bc8973e28a40d65a98c23cbe3ab3caedafd970bdf290d\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01768af65f974975528b23eae96a5f7739b806d32e56e6509057e9b0d96f089ad80d6e719d4367cafb6aeb5c8e05b5e1d211b52fde31092e04e363ac8242ba010e\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"012eae04c947c8c3335d5542c33da1f93a2202fdb03aeae04ce30d28393ad7f94f80a5d164f1370126b80b7a3d26cf213b5c0940a50cc6e2f0f58198231b02fa07\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"ddb95006ea8d3293555af637f0fde53e5d14f6b8b9c61d1c433155af734d6498\": {\n      \"height\": 43,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"ddb95006ea8d3293555af637f0fde53e5d14f6b8b9c61d1c433155af734d6498\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"014d867980ef65c933c3e4e6b50dfd2e83223530b5d6eac8650daf2079e00eda0fce2dc222c7d3fb50bac6098dd20b208bcf424ab88c2c3aa2d73d20ab228a3909\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01a2b3aaba8ed701ef46ec55e0b8c5e7b35a8f980497025238687b2129b80cb3198c3195c7d21913f72b234728a5e9698b073c0f0b26062df2d62161c8f41e9606\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"015dc3a199f4998b3edd8b32d8c432bd2efd149425781b5a6892e9a5d51ed9e9f23b4538c2685d8c304c3ff3e953b7b038b68ef950309bbc9e99391fee626bee0e\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"019885590c89a861bf6f05fd00b2821f940b8442a9a50eb808bd8d1149791ef76200680c8afa1e68889fcf47d180949949309a1ba3b15cc1c4d6a865f937e22a0b\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"018a70d0003eba0f4a99bc1d22113e0d25877e9ae0f090617458e9b069c93d3a27f003265517b04f984139d3d207c4e641e32289359c8bf1e5f0c256f65c29c005\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"42a656756c7b5bbfcdf32db19cf177361e9c0a6705a16a0d494e320175f09224\": {\n      \"height\": 53,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"42a656756c7b5bbfcdf32db19cf177361e9c0a6705a16a0d494e320175f09224\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0173857a991249ccb028f8cb1f52324b4bb5a67ef3a3a60f6e7df61086c5ea8956519a288133d024cd76ab9b0bcd3958f85b26001322f9714d3159e1288fea7d0b\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01a0f04275b62ae0a12982c9b86785f71d4567c6f44f6757b7dbbb9b932cf00c8ea6e4c3131b81380d29b5fd91e25695c77f198ce8c397c103695aca73faf97f0c\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01e2170db2b4270bbd43861eeaad31bc76c1b4099f3d6ec827b15575d65f6f933d6bcd2a303396ae0bb617632dc00a89698c63dc280f689943aaefdc5782111009\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01725c203217ba1e454dbbcca334604a1df68f416169fd566d1d0e8821a066e39098236b43cb1a77d8f3a1b447287400dcaf3d5beddcba4c8f2e2fe222c63feb0a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01375999d63755c55b80bcd79c3647fa9e52f945b2824ce848e5094d439455765876c66b88b641c26622b649009ee41722746b26dd024d55bfcffc497ccc312304\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"4013cc057d7774fd5aaef3e8062d3cf3786cb094d7109ebfce2d5f86903ec736\": {\n      \"height\": 88,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"4013cc057d7774fd5aaef3e8062d3cf3786cb094d7109ebfce2d5f86903ec736\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0160c30d9f4d76b1fa89f3f130afbd930953921931daae3dea5053adba02a9203b9542a586061c798be54d017bae4c51dcd675389a14ccdb00694236d599cdb507\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01bb81d1b641fc92acce550af000f764563fa553c71079aa66be8511d42da68892506c64257a3147427908bfc6398243c92856c8ee965c92e63924b7503f7a000f\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01500a006a8fcf834465929d0f60f4f38ee2212f5f6b98c92afe2ff39329050e973ded4afff955f853301b12cb4e7c6c3cf174d904cefc8b1d704a11dc16ad3e0a\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"016c66db6b6b65e87bef11e4b8284b7f30a62f6080e0303fd79669f1e4a0be83a0133248062ec7fab1c166d0c7de33a5eec9093681fac72253d4422517bde88a0f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0164d80fe439397439efac06962f3233bdffd4bd2620af6eb42c34c7388d52d8e4b1f8499ff185a4fa58ebdc9b54d57a255d8bd903ed5c0d7f7bbaccb04dcbdd0b\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"1bbba85fdaa53289c5a5ef5a10e264aecfc23b2cf385883c38f0afeb3430bfc2\": {\n      \"height\": 52,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"1bbba85fdaa53289c5a5ef5a10e264aecfc23b2cf385883c38f0afeb3430bfc2\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0137c87dfc36d74d40e3454751acc46d95cbb8501cf892024ceee50455681a4368815d99a7d5a924ce8cf551be8c57d980e69d101a9559f305621aa733294ce200\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01d7ba75b022046120ca777a9bb07d6b12d5be56738f1e63a83f8d529c1e352adc3bd66ec2611e83895cfccbfe0526b088de6fc1f44dcb70c9f111126f89335903\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01f317117ff091da876d47f9e836e3e716725fd41a5ace99a91903807fe0424056aa0ba43aea4bfdd1ffb39b18fc3ed262b88a0b4c099d867d26060c956c03c404\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0105c51cfa823470fe3527c6836295b04e97499a4ca9789b77c9cc8131136245311794d085f7226c362876f8c68a731c96fc9feaaebe511072ad5119c7e3293e0a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01ad671b04cd47e3a57dcd42d0be5a630b6e67f6f796998f6b72cb19f948911cf61ae4b0cc7283b683cd9f22edf5bbe6f3f84022cebb8d6a8c688ef23f9001690e\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"5d27c40448a47e4dea45466443471a7782c0a562124bf7a153ec54e90271f20c\": {\n      \"height\": 92,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"5d27c40448a47e4dea45466443471a7782c0a562124bf7a153ec54e90271f20c\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01fe941f1c097daf47b2713344f452bcfe348a6efde20bf12235f0da754973c1dcf1107151990b14597c9bcce46e2058b0867bf06e5b1a6d12e9855f645c99170f\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"013760f3af9afe14eaef1327b6d7d1c021e29fb9a6d17f9592fced143a1bda881d7a273c76174ad83105ea0e314cf46351e19dd9e8fe4f8e4b5e7815be38c0c90d\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01b10ae47b777786bb666a137ba4fd027ac31b8ca005eaa0ec5518346f24f1b5148e35f899593ad6a24132cff3a2fd1e1983eac6bb25e747067464dc2d93062205\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01894ec9959620928f5799d796c38ad1d2000f6e9a6f6dbcfd38351d6b6464ec21f76442f8774f1bf368914be0e967c2be4b991443eae3de3d5df60807dc826600\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"015027a55452e3ac79c60cf9a4baf581631351f7dd9761a52256fdec17c15c1a42901d40814fdc2379b79220c3995337d887048d3035549dc9ad14c631949cdb06\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"63a250af208974b970f7c2328bfab4aa3740023b43d29ca34012ea6afe585ca7\": {\n      \"height\": 47,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"63a250af208974b970f7c2328bfab4aa3740023b43d29ca34012ea6afe585ca7\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01850a1fba8ea4a524cad7239db5a7de0360f899d54f5dea1cec06d8a3071eb95f49a9758deb9649118576405c323b0c95d8b736adf9102baed075a0d5bf623408\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"010055bd55638a89aa31b7e67cb0966bd6b13dd38677f9ba9cb4d5ff74061d4c9f792dc044df008d7dab9b6ef83f706d1bb6c70481e9b00ac16827c11a334e3a05\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"011a3864d9e55d38771119cb8bae3f645528bb5fe8231bf67c7b6bc6b7915c18227041a8307190e7250ebc95b20a2a645294484a4032c65380525f7f7258f38b0f\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01d141af946b2d80b91f0a3f42a599023713934329e15573b0df41e2fe7691049214780b23c72a75f54a9e04d4f6e11654c50475b195de1efd02c3ec62c875ea06\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01ceb94402ce8a411b40e7ed0be769cc2c56261965fdfea963a1c06ca6c6499e49fd706313c534740ad46add89f4b89b6bce3119733ab58b7ad7c7c6c56643b103\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"774bb86933d8150611299b63e4e8b6a3d38c074dd3867f4b66acfa91f903c9fc\": {\n      \"height\": 48,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"774bb86933d8150611299b63e4e8b6a3d38c074dd3867f4b66acfa91f903c9fc\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"011f7e0806092aea798b88e1ba3d37064c95cbdc3859376f4fd547b13789af4f7f3dd9c986493fd54a79fdacadab767779e6d209111b54c73bfa7e3f6923bee102\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01450fc312565737a0dfc1ac94ab7a38d778a7255233642448f3165b299e0b3710a855cb45156638de7c95c09b3c7d629813414fba8c944bec5a0cfcfa5753d90e\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01ccd464f69ef99cf714011427c62ceeb2e5ec46b03ca26b13298117377a9c5b17c79c498d44572cf4f911ee196a6560ec7d4652808268a92bc7fab706939f5d07\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0178ad515b357cc1475759362c718fda98c774c962eeefb41147518193575f852cd62b48e3e4b5186db55b0a5db52f2502bd84f9f9007ba6f02e520ab67fcc130c\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01353b0a9e972ea1c5c644966a30d07c89ad11395ddd0a2dbe008c1fee8d6a8803a1cb66e50e90eb6fb774aebff644f49d9b28eb3c7958b66fc522ba5fec172e04\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"2d890cf5edce165cf1362bc54bb8e4b14e9a3a42f412980d9cf5f688ab7ed431\": {\n      \"height\": 87,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"2d890cf5edce165cf1362bc54bb8e4b14e9a3a42f412980d9cf5f688ab7ed431\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"018acfc82599e1947af1f45403226cfc3e27d11243bf372ef4ed99290bab7de2e2719f0df7427c73cc76c67f96279503f7a2cf28643d5907dff3c55369e96e1103\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01fbde4d5d3d57828919d518de6cab2650860a4aee8c651ea841e280233014c56a9bd01b43e2b7b04e8a6daf6b9994ca24b699a0feb62365263a51d7b6a3c6570b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01177c37af6d7db73303f6b4bcb0f7cabb801e6e83520eba88511fc82dc92d28fb2e99af7f5787bf4131603ef92ef09806e96c710e322572566e33aae5e4706502\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"012c9cf741bca2447c8b31bae1f23c12d354c46bcde06114d7b5dfb138dfe497bf32aefe6547835c3bf5ab34203828433d3c49ed822ea8cab64f738fe298d51d0f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01e788ff68add1ec4ba30ad2473a1e1914f49145069baf70b727ab3d85f1870196fc61347d4e763d41dbc4e84f958f7343149704b9c022893d4e043a51e536a30e\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"a7099f4d3a5735ec54e833b0dab9d2f38e02bfbbfd8bbf00d3d4f59bd62e1ba3\": {\n      \"height\": 50,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"a7099f4d3a5735ec54e833b0dab9d2f38e02bfbbfd8bbf00d3d4f59bd62e1ba3\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01600ce47d211a19cd35d543e960351f55c36cea88d414500da0aebeefec27a829049cac0c52aae955b9d44dc8d94c3f984b379c06ff4365a062c96bf269238104\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01ba1784f771ef81b9d5240291e4ac119e080d779b1c1a6298405b26a30bac98330c08ebded884d38abf969fd9c31d4a85d9b0fce19a244bf68b3d9d8693cf3d0f\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01b18d572138b3617b3109d4a0023e8cd9f8f21b565daefcdc8a4b4f628a528af78543f46030e617510f8d922d5031d9b93574c0bacaec8929b1b0c8d3c5006a0d\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01100026f933d94a640238e4fd855ff863c3a38a7e2641deac6684aad30a5c3152236cd8a8111c310ac412a22dbcb62e02e24343b7567da67aadaf74a360d62f02\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0118037852b59d1ec371b72d76cd8c5cdad48eb80c129da9b5a318a6ab8016d10db39d4d5348280bc2edab04562c5c900f316ddd60cf9a8a5863087e6054fc5c05\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"cac38102cd15c1e21fea483838849d63d8b3510045f759a6b8a020e1286b1416\": {\n      \"height\": 79,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"cac38102cd15c1e21fea483838849d63d8b3510045f759a6b8a020e1286b1416\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01853926a0fb42bcb00a01e4df9e7584e1db64363a4ef6d859e3a5d073e9164f819667201f920e9ecb39daf9b05ccb41ce6a829b12d50d800a042e0d1dc718f402\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01d881f0de6381836da827f110f01b7a17c34a62c588abb0c066fbfc687a5704418d778fb37527fd9bfa73ba76dca33f5f5ba966b087d340a9a792ed88182fe005\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01ca0ca0c7f0dade288f21449ab4bf242e227cd8165f60c21949198f8142d98284240d6134f26cdba9dde60aee3c3a017f71470fd0632f8283e25de82cbd3fa108\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"011a92c7a2c0daf8b15a71cfc8adc0401a5b071df843fd725a5bf66051cf02e3c8df4b60ae56dc091a2049e1a8f9e8cd51116f851264109efcc081c9d9b06bc400\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01940c4473f9845daff31c33ac9c3165110cecff7d58911816faa3643f2965f9e31f79995b475c501d69cae782bdfd1f1ed66b51ff64188de83a4fd3213a877e00\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"8813bc48aab46912eb829f5d8c4d0b1803c3be64e2fe6feab5d7b5a7c10b03fa\": {\n      \"height\": 42,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"8813bc48aab46912eb829f5d8c4d0b1803c3be64e2fe6feab5d7b5a7c10b03fa\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01c168ebc69fa7514b8c206ac889ba92877fe4223f1665360815dcca4ae958bb488bcc43d2401829555a0eee623732add264bdd36ce0c6356b492c024c3d64b905\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01ce66ce24e35a47e834f780f8cd1223bc1929e48bac474efc4a6c12adbbaa401656f7e8ed2df1806220a464979ac5c9b7fc4e4e2494d595b666941a2a40003b04\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01bb83f0fb82cda0243c0bad4e517dbaf97502e482eeb38bbd5655e71508983dc55078a7bd1738c342c20fe41932ee0e352a95de4923b87a5648ab823c5cd0a106\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01ea32550e0f59532718629577bac54e50e23124dc128dd30b93ce18cb13add5ba55c483b2bb56bb34aeded9c8f777451cc563d56d71370ef728180cad11cc5705\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"019d1e480814b691626f18759e9db54b66e594d2b6ad1d3870b6b1e487877e5d9426b87f68bb4d86e258f6decf30c32fb8896051c716726120a59a4e45656b0906\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"945da24b9d3ea9dcc6272a6b4bb5902f3d420950a24bab1cb8bfd83a2bb04431\": {\n      \"height\": 38,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"945da24b9d3ea9dcc6272a6b4bb5902f3d420950a24bab1cb8bfd83a2bb04431\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01fc1b2e5e932457f0442aa46a1ab1217ae6f8a9d67526276cfd22515960b94e696a9be191fe41aca5f4e939c7c7ab9201ce09457d077da8bfd75bbd9b0d01e102\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01cee51ed263f70fb33d95490cd51838bc049c215c83067fa95bb345f0ffc1a0b2774ba4ed7c9f68e69010702f4007766b12a7fcb556acbf2fa2dcad165da6f608\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01f4d6df04b1c154cc18213ad42e0e10990b0ddbecc100673f07ed0b09a27ae84e88d5d1b2b223bc6bd3ccb779d1c6096bc837f8ac40522c4773bbe916bc4b870f\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01482bd61421bd6a6728a3197c70de900ce34915796f515ac2782a41c6026bd9ade2d0ee9b8e3a48cc395236f48f043260dd2541ad1079be63aa557d639af0490b\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"012b1cdbea5b8c9325218264e82c6883aa18724de24b4c87fcbce8a34a240520db70e3d388b39cf01de6bd1511680a5f19ddf47bb2cad440b0cdd9adbda98ca907\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"f620ad616351b9dcb82ca5fb430db0d11aed4328e6be83382b27ff5157500b78\": {\n      \"height\": 64,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"f620ad616351b9dcb82ca5fb430db0d11aed4328e6be83382b27ff5157500b78\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01138febb7d549d35448dfa542d7aec5da50ca025b9f4f503e875198ce097b67d3c1e43877fde83e74a50c6097c3e7e6eba4dcc6a60104cc9f998f8b2c2a557f07\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01d4982bc2d7ab129dd6192d6ea54292b1c783f08f46fae7c3c4a75344f220113693a44395e6d1ac76c2c41a5ff4f78084e94d5419413f8494eae22402afa6f503\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01ec9ee13ff765d09d6c490c2e52959a94f5b02398303610851e60205b3d07dee8c61e4f907ed5e7fe091fc4c0b557c2484e77dbce68d4d9a164d6ac239068ec08\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01f7961cef7d6c84414783e36e80e11c1944667b9b9eb68962203db212ecca702367e3a1554d450cbfe40b9fec97dfb8698d658f6cd20b6e792576ff2cd197b80d\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"014219be42f2b95b5110c6aa038a70b885fec984f5ad6ffc2a2611571c6685b46fcea00c3f2868492aa73b2e6b6129ddf84ea1f8dab889d69e433f888c7faeb10f\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"0665324472c593720b2a5cb9c81f2ec6a40401a7c22396addb1ddca4a8d56292\": {\n      \"height\": 37,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"0665324472c593720b2a5cb9c81f2ec6a40401a7c22396addb1ddca4a8d56292\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01eff1532f2df9481408adf43453e5bb88823eb8f297a6b35001099225cd95035a18fcbe5e0508d6829245d036137edf8944465ae7803b772a741ae91876033c03\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01d94cb2df59468ded1a195e71ef8f024c0d783fa16e81e595f7e3c2476e29920159f549774ced5fec64cc1d015f832fc5d6a5cc963a5fc4d2cb5012a8313af204\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0166d01aa11620ec909866f23f326c87104253c4e63a639ae174ae1aab3d1957bb3bff80dabd28f0e5259cd683fd4572bc71f7075fb49d621af04002bf6bf10d01\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01145e7bc9d3338f4cf8ee44043e0f3882fa5ef7dc9314d020ac032011a5dd578eec1414f0e1651c5cf752f1e04360bff3db8bf237c51a99622800941bafc4f60f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"011d699445ef39cba40f288b4a90beea58b45d9320123abaf275f844393eb6c1a0123f75b55fa2a769d9faf39f99cb245cb96402221aa743f9d538195d8b78160f\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"b9cf69f0fa345fcc557db5d8db68268f2f6d35101c61f1098318fb26286d6c7c\": {\n      \"height\": 90,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"b9cf69f0fa345fcc557db5d8db68268f2f6d35101c61f1098318fb26286d6c7c\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01e2c28c8dcbb67c069370425d841593721d366f12193eef17b99bc4d34f151cb19b00079da647e040735bb08877636b2c960357c012a7806ac587ac79688fbd08\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01fb5b99658b031151fac010eeedabb53a1c1420832e536ce91d8b61ab34a237f764b90e6e51944de94c3c4a98591268997a53bbc85968bc14289ad5d9994e2e0b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01057a3cd15979ebb7bb7d634e5ccacde8ddb4981ea2681754caf37ff045d97035220b5f3770db73b3a9fbf1f2e295a84ec5dac308f331bae6bbaed0ecfbff6407\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01dff3ef0b5d35b93efb64cdc90389be98731b23bbfb35b31b95c9d2f97f729599731bbe1cfb34c1bbc16dc3acd9cc0a97c2eb9b0936ffa77bbaaa77fe4bb16008\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01c3037327b46b9468a95494ad1a4b6d18f35021e4c908751a37cfb5fcea6b93048e875affc68338abb75f9ae83767d719505cc26bf7e385c8f430101f03214f01\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"7408c61539684ee7746e96dbb9f16345034152e8c651a73b04f26b9f2d360d15\": {\n      \"height\": 72,\n      \"era\": 8,\n      \"approvals_hashes\": [\n        \"033d6b795ef871489d1a18d3ca2b74c41e7db65f6940bb9dd65a9257f6a0b2b2\",\n        \"1b6c898189cb2b53ee452bd6d3e92bc744b4580ac9ea0eb5b4be28c7a9c3468c\",\n        \"f7e95f9c1f0736ec473d4cb0a274bf95f711f380465709ccf756371f17a05f86\",\n        \"7d55ce986588c8b189853b25db367464a02e1b79e32c67a92fdbeacbfcf7ec6e\",\n        \"80b04ad30cf4fdf9ac2a3e90bb10ca02330623f1d43a5738a77251b78d6b7e8f\",\n        \"97b64e2b22a94cabb6cd2da9d18e960b465bb2aeef5cceee6732297fc835fc35\",\n        \"57e568c46020c01e47d75d35fd5be5bf4679017e390354f462ec6fb4f38df535\",\n        \"45e94f63fa6d799022387d572ec1111e8634e795a795c15e21b4f86386f5d4de\",\n        \"2292a74ef35c7047ba9dd5b8ff330636015625dcf9463e4ce13e7e7de87504c2\",\n        \"b256b2aca6af557b5738a8f52955667533ab6e3052b764604c45d223f933bde8\",\n        \"e3bb473e8848ee0ea8bced89e4e0856c9b0d39394c93868550c6035fb2e65092\",\n        \"93ea21227094da28071acc87909e99a5207cc9813b50e501f0d7768f492b8d36\",\n        \"c422832b16c800b9675ad5e5d50d78b2a53cc10b83cd22420583fada372a363e\",\n        \"2bb82ec1f0e804cfe500b1636592101a037cae9b041fe4ff21027bb7db0fb581\",\n        \"e56caeaf7e9ef61863344697c846a27f65adefb78e06ceaf9e173573fde12469\",\n        \"3ab38de4ec925f81a74028ff4772017138cbab0184f8d85b7a932be5c174d182\",\n        \"1a9003c6c109f152b878ed6ff6ad6c6d6c0f1ced6d28d2e77711ee8d9b244f2b\",\n        \"ca0ada18b435d0be2aaec004a049d09b3f7c51b18a840f75dcf50a5cab007c47\",\n        \"35624839bfa79935fdf31135bda492c149950d692c5fd04b247e0870b121e569\",\n        \"fbe260ce449087a7d19074be566722bd980504bb810b59c9b1edb7292b9bf8d6\"\n      ],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"7408c61539684ee7746e96dbb9f16345034152e8c651a73b04f26b9f2d360d15\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01c4ad89a993e09c8e673098d63ac8ab280c71ee5a47247fa422c7696788ce26bdb781f3f9034978ff97845363efbe80ece80d37d19a4615b431015715d4088307\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01de890ce9f6248fa04bbc6ff75ed76fd75cfa33da54a423a985d8fb437dc7c1513cc0b07c1832c81dc08538a1bcef31be87d0590d1267b7866a0f087aa9c9fa0c\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01847e68664fa01e189075e8753d76c96fd998ae9c2bca94bdde83ac24c400ba75d7d75e3e9eecd14443bacafaae57de873f17cd19cc63cf1c2519942b6893d80d\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01813c754eb967477728d33535f423516f88ea2bd5e4b63b215ba35a9e1111b38013616dce39acb9377c2b6d739c92067b70ce28dff54ce9245cd90b058364fa00\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"019d74e99365f3888cdf80ae1c8123afae2c8623b412eed9d6f7127c20a4cbc7d1d918efc8f32c65e5ed93133468b410baee70d72d00a33dd070fbd1d44c16bc01\"\n          }\n        }\n      },\n      \"deploy_hashes\": [\n        \"9ab82d1a1c70f7ae053c6c45157d40c6ac0d93d087d4f574d35edbe39dd483b0\",\n        \"b87c10c4ba0846b615f77ee42f50a10279a9ed4d810671623952aa8d81988234\",\n        \"a397110248532ae6749617a4bf1f26b3a0130ff03c97d3489d1d66163c169857\",\n        \"3306c637dcb597811449816c39a4d8293ce21edc60310b56cdabeaa6ba252bf8\",\n        \"159ffefef5d88b2cb99fcceb471a3d2f0c2910c7c9de2db2c1e9845b60766224\",\n        \"58223e9b91d3ec7b73453217adbff91ccc2d34d514105bf9889599d1e9122b21\",\n        \"ed4ce8a502ef432f8d66be72afd80fe1415f15e710e39d2b63971a6f737c2c4c\",\n        \"abf88aa99476275ca657eaec26264c8b441fc5e6a8682f234e73d63163ed2415\",\n        \"35216eb0d783e53accf6e74a118c85e9674c856c04d22330cbd8497913741507\",\n        \"888c0d7a5988cb47eb83e1e7c4fa58a8d229b58b61cf932cc8f0622c864e707c\",\n        \"2651cb3fb060926d9f5edadfec7a41c7ff71f29f9fb9548f1c3624f5351e9a6b\",\n        \"4821b3e2a53f31ccbe3645a8fc17aab6e6ec9e7dc38fbfbbc2e96194da443181\",\n        \"1c98f5f335f0cf980192d56548a80ffb7b8d8c30e60f82765bc1a7ed9b41542a\",\n        \"5eb79399b62d08320e321d73ceb50523327d26149c6728a8d8a1ff33dabe722a\",\n        \"d6926f2291b62c998cfd73ffc154d1e8259dbc3daa101fdaa24752e65d85027d\",\n        \"ab8bc98911a606aade80c3e24b854d971d52d9e20a12744e0f97dcf95fea9ac9\",\n        \"607559135a1b9c446b53446b3cff100b3ff0f6efb474019a09562b83b833bd1c\",\n        \"ebb11e7fb132d580752254a1166bcf9fe2569f4952c97227de8847eb456c094c\",\n        \"ece0da05da8a349d5fcf89552c24dea8f17f876697e272b2175790a11b0d8521\",\n        \"e5f4a0ec09d7918c9871de663a677c2ad6bb88141e3825a5a8e57b4956a62d3a\"\n      ]\n    },\n    \"36f0f62d44b2471a6e2cfd45c0aa1d7eba737857a09735cee3252c65cc83217a\": {\n      \"height\": 57,\n      \"era\": 6,\n      \"approvals_hashes\": [\n        \"8311310da1eeed47708ec3b218eefe82627dca8ca04255d13d99c494805c9b96\",\n        \"10d4be0f65ac1ae7682c43ab9f873e6f6f9199c240746e7c61c7451b633dd678\",\n        \"a2a6a958006556addd9fd4bf15d6c04bb0de083bfa2acecf9aeb3e700a767d0b\",\n        \"0cdd390c6c003ea0e9e0266a08adfc789c91ebd948324500b60399c3efd255b0\",\n        \"9423d0418543b7942d9e27a3696c775363edd18ace6f65dbce465e869f1edc3a\",\n        \"2a2da83f3fb70d8f332e707602bad7e761629b9a1c4f88b5b3aed5a41b55c3ee\",\n        \"8d2fb469089235c04cc11987aec5cc9a6c60e53d81a91b7ecfd9ee6e6b257869\"\n      ],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"36f0f62d44b2471a6e2cfd45c0aa1d7eba737857a09735cee3252c65cc83217a\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01a79f9284083f38c817015c8b6be19c621a424ae128a84d4ca795a94f019db74a7c1c656cf05881a5751f23a8bf98284b82c21d099608e2337b8529d0bee3e30b\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01c9bc50cd5bbbf9827a859ca1726af5f417dd03243971a6e2747f789a5149552eaefdcd8fc31ee9539bdea8df6a11a035af5e4a92b08cb5063db17f7c96fb2906\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0134b3d37a02fd2f5d8071be3c7f96c4d9ed701c5b36b1f1bbd3c1ddcac5470c3d4eb53b49695ddc5e05e9e7f7177a183167b7795f565e5b2b69f29e71edecb006\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0129b3ae0d98a771adba2d71b5289683cd07b032d5ea63962598eb5b46924cb9c40687553edf024f9cb0da7b7ce1c97a70224fc404d814523e1b8191f6001da10f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0177c18ff2a970d84e16d26b1310f4331fec9ce0adfeeb9151851bfde2a78e08f8e7f520031daab513ecdd57f53203c0a0e393190e01999dac20171b0c48ba1f0b\"\n          }\n        }\n      },\n      \"deploy_hashes\": [\n        \"2af0f120ccd04bead484b6053d87cb41930e650cfb8f2b7c2ba37c5f3f5de701\",\n        \"aa2ae8d13b0087c762dfc34cc5b3d0a30d04382a2fc707f259225915b342c09f\",\n        \"2a6c3ec008d34e9fc614bc6c264af5e3834a847d5690211a3393ffdfa4206a1b\",\n        \"975cd4ddf71c0fe4c81439e9b4fc09ace17fd644efd455b995c3445403b18cba\",\n        \"faaff0d5bb9e1b68616a7225b37840f87bba48b2257607033a96fccaa4e0401c\",\n        \"f1c728bb6a8217bcf4c41d0d415d08ffd043f2b15de2c949ddad4e072cf56c57\",\n        \"6942cff9e560c7d0864b812f30bea56e59e26be5136bc188197e1916fb3fab9d\"\n      ]\n    },\n    \"29936b2e3e9920b624bb1539840949fcfb21e27bf347f0433d73861da6f9d608\": {\n      \"height\": 81,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"29936b2e3e9920b624bb1539840949fcfb21e27bf347f0433d73861da6f9d608\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"018ef77cb741d610411cf2e1d54ec63c63adc87449557c3adc200812e8d263c5b550a928dacbbad43738b0847d1acffcbafa75bc2ba352d2fee3a431d6e947eb00\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"018382f4d0d690c2efd8a6178cd35c1c60beec2a9a8e9acd1b296e90fc9c15f8cc26e6e9864bd5c1bb74533b6969876274dab437183a7640ec2eb0ac7776abc108\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"015c86bf7799ca7ed974fabcaba50a0a210fb393d09a0e259a82458adbb1c50e1450abf4483255e55b1850c2a5734263d7db68228903e8560a634483e4641aed01\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"010a1709e850f9878452e17427780a884ff4203a217d91c8e2b1d4dac5467eda84048433bf61c8094f747a62a219572308cc1cafd93ec13ef462e6711ae99ee600\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0137567bf30d37f7cfa272df1441867a714e1a1eb63e6a360a7db59e4e95b7ca37cd3343f20006364279f2942559bdb4448c161752b05c07e6806730dc9b2d3c06\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"c06cad6478773686dd1fb7d9658f9e91fadfa222464d0403ddea9f6430f93146\": {\n      \"height\": 56,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"c06cad6478773686dd1fb7d9658f9e91fadfa222464d0403ddea9f6430f93146\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0107c592e78c968908a5cd06395b0c2f4d866b72221f3aa301695d215c66e9a9970cfe4b6bda6b918d9771ebaee0dc57577dd6b6e58af77fab78bd718636b2bd0a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01a849ed4d48ae86b377c419117d2db755be4f84fde5a0659cd925049f504b5180bb67a29eac411b2406a7ed95016bf46da619719dad187d2d0894593687bdab0e\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"018381440af0fcc446d7bf2edd6f059b9ae107c344b0c4e2f4c8df509a353f0363c91ec8607ed813a9cd4d03c5b14e16bd4788d008f04546713f70578898ef8904\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"013e79ba6d2de29b15a021ddc4f7eedea643d12a088cefb60e8b6bcad8fa7fc7da39264799efd94764aeb5d039f62d015a2c702a973c91d48c96da605c21a1900f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01dd0c290c1c8a973856a80360e4913a027daca1bee0f6f58fd2049c9b4fa9ffd323f81a891701a1c5961f38a789ef049310e99593de0d5a6954a065d1e072ad00\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"81230f3a7a613e48bcbc2b0c4edfb4681e03eee92b096498452a13c0bdc8abfd\": {\n      \"height\": 63,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"81230f3a7a613e48bcbc2b0c4edfb4681e03eee92b096498452a13c0bdc8abfd\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01effc516754793d6a5c005a8174b127d7efdf49c9d3eb20996a5271a29284e7769096a8343e4e4d3004540c75b18332910c51e8a7b0c82ca8862175144ad98307\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"013035aa4e7638186e8bead9684406b51bbb85964c1666a11de3a6bdc72cde5494b4c10a94a06d94a5b3e4f8ebafd5c9363deee7909e5101e47f32cddde295b500\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01f34639e11f292ab50f4aa54760771dee89868c6a52cee726d5dc2f936ce77375276647dceefa45ad01dfd747541dab17bcf04114f1c03047443e8e167f66e005\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01b24a0876a2397467050e3d94ad046c40513237ec1dd73680dd8628773fcb07b2b1f5162b0451a6a5bae93dd9b068e4733098efad3c68fa6099d85e1e1f37e50a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0178d98ec1b7133e4a330744368f46864e14362f48abe874982017113df6254e0252fb12216820e4b895f200546e25d545d5361fae2081d29ab5a33a445ed53101\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"f3643655bbbcd415f479d538dd91b44cc22f2bdfa5ad46dac3b73f825fe80891\": {\n      \"height\": 55,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"f3643655bbbcd415f479d538dd91b44cc22f2bdfa5ad46dac3b73f825fe80891\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"018fed3db8814f9995c64246d0ce76d48c588c13e7278f08ce3a389c954aa039b751f4680f822c2bb33b3ebc1629c417f77a24183c290e378ec0dc983c619a060f\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01d8e641443bc14227e9113f8cb70406e42375f1a43acb324252742a9acd04aa08f825f4da9df9c2dc3d0a4ca81c9bdbb632cfe7211e147666e16542df824a3006\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"011a8608ace69f02910d465277af8567620b7041f72a1e7f9549493b2e7cfa3b78cbafe8fddd01d210021b3a992c07a1050b6f970db7d1a9ef56915b52cb3f8c0a\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01b3a798b231ee2ca7088e73b09dfdb8373b50fc82299d5cf84c56a3ff8838ee5a35e97c874c5b717f5cf6c264f89f45c2a28e75d19788e235dda343536f3ea509\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01badf5a3f14073def89b4b9bbc7f84082eaf40cac5cd004321962485e6050d3241a87eb85650de8cdaa9d02cbfc8a126728738638a10c8c640209c0590102d706\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"dc27efe32e93fa37926307465eaaecf460ab5d286f5f891995055b661619fa7f\": {\n      \"height\": 82,\n      \"era\": 9,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"dc27efe32e93fa37926307465eaaecf460ab5d286f5f891995055b661619fa7f\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01c2f572ada6b53c50a439ee9ddb5ae17100af1eb676e7a35f940bd2f25b0dd8b00e715c8db34ff573d067d6f446e552faa15895be3117337b9047fd4e0b4d2901\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0163481871dc8147578d9c2ebafc2cfd69cd908b8c820dc9a1d2a318e55df7917cac5c855eaced0c72f1bca45aa45faca557395494c404d399fc1ac84303ff420a\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"013afb0d0731d39e793433e949478dcfd88eddf64410183cb5703895cec51f59015c4103e8f4ca8da1a8771e0e5bf62c8dcdb82d12a50ab016c14c5e0c3670f80b\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0113f14644afc884a7b9149c7b83b3f69efb4c557bbc34140bcac7396768a81caa4b53fb3d483a8692e7d230ee1505a2d7984510d1b1eba3322e83ae4bbbc8cc0f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01752cf4c4f4c2d1688b10727229575bba62ca400b6c56994d06c77c8b3a66190c1bf6356a2c0c3609b85ff419b839c3997287b7077e86215fe09fa158a5bf3501\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"50e9cffa88931fb44296172cbe2d2989a94ec2d5b2cbb80ab7364d4ce6ffd0a6\": {\n      \"height\": 94,\n      \"era\": 10,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"50e9cffa88931fb44296172cbe2d2989a94ec2d5b2cbb80ab7364d4ce6ffd0a6\",\n          \"era_id\": 10,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01b1af9625ea93e953a48a2223dac3c9f5c014037c22d8de356a0f4337c325afa838614f54b575210602f01a08a4e87b52117f64fe9cb540fecb839dda05764101\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"018c5c0b7d36458ec0e4b6b7677104c91ddb6e29efb3d9c2df0ce945d835269f9635e6cbf8661fdd0332a5e085fcd56b092ed81ea03bc66ba761397d3fb14e0a08\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01ab069545602645cab96be41e6bf0d6925a209caee37cab1025f7cf9befe19cd2f676c14671fd2a230895cc23eb712acfbe04d6c94f27431915a0c8528f2a4402\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01a7082345c330f2a6b19b5f13f9e3ba18a00de5363c589ab3cd17512b9881cb160c6396d8eacb4fb8c8d000f2d8c7cea802cea79cbe2a057dcb76e0f60ec9940c\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0143ff53b152977e00cc68ca3f1710d112c691295e28dd02fb025019a1a50bc4d0fc0723617085544d67d58153933dae9d02eb85e0f453065ca926c8d91f2edb0e\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"18a51771a0a694e7602c53b4510c4b63b63086c926256a635f84e4ef017f800e\": {\n      \"height\": 85,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"18a51771a0a694e7602c53b4510c4b63b63086c926256a635f84e4ef017f800e\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"015038be26fff75bb8725e496f7de8d2c1f04e697f3e9fa9427df75f631574340c36ef605dc9cf1d27aafcdf42e9b5a9e03719a6ae86b3a6151d905988b1e61b04\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01c554f533d5ef7e9ba1432ef3350783f9e709a4c4414aac6a3f23d9ccf51f8893e4373147e54cf27619792d8dbc1403aafe12f3d8c06b821bfa7d9a94a12a8706\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"017db5d39ae7841c10f510cffb2945f9850287c30f3515f81fb49cb1488f3378cceb127af16c8f29d1e115521aa942f1eee0522dbd884c34ea9c95fba63d40520e\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"018a27a18d0020ee7ee8479d2964886dc4509cd4f3f7704b847d403c9c63d1fae4c78851f26b1e89e2f6c2222dd6c5c2fe62236d8063ddd065899e63dd5b93a10b\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0116b448374a553ad4223c18a2cdb7f65905e7917e28c3c90cd53c8dacd330f6da62d0f9b8d9a86f15e1a1c0fa819ee849e6c5de3beb33ceea7f973b505c56480e\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"329b31a78a7df60a673962a768736c58f32df95a8d15c3ed64cfde1fd4d0c2d9\": {\n      \"height\": 77,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"329b31a78a7df60a673962a768736c58f32df95a8d15c3ed64cfde1fd4d0c2d9\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01268e33fc72093ef389abe40d03173f535fb2dfd7c2421f276e6ed79ef7fbc76b9f305f789175ca049c39ce2e8a27e0972858120e84ef9b3633df6681d329b70c\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01a01bb0ac13f5134863c1fd170dee15f712f5289e3f16ea33baba305a246aace391cc6cb3980282a09a464724302d8973ec00efd53bd623377ce793be21bb340c\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"012a931e60a1d7753eabe8ec2f60ee83606aff2892dd52dc1b756e093a54631a99484e6d9a45c72d0669528b1b84af123d2040b6ce79988433dfd74b14c033f103\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01f7645dbd75afccff1583fad3f534105d62ad2766d87ff3fc574994b5f16f4726ac851566545bde8db635fd5b4a86bb01a7f0e19123ef961e8082350ea3954b05\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01c6cc33a7555c4aa29fbd5a110a3ac238f2beee5f047a91a19c484a48ecd2fdb6b70370ab863c39be8474578d7a747bde4ace519f8a0f4dc824b2dd7c6dc8fd05\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"8846f414cfecd61e3a405cbf1aece1fe1c98fb55dbaa33480d2222158254bbdd\": {\n      \"height\": 59,\n      \"era\": 6,\n      \"approvals_hashes\": [\n        \"01b558bc601689ca51ebcfd32613794e08df5e7b15697c27aac2ba22ee9344ce\",\n        \"ea07919ed117b48a625f6b2139432eb08602f98755e7e9a2c5b98a0373674da0\",\n        \"3bb65a1cd0eefd75ee0ec864ad65d2bb6844391b645812887779f680a2097694\"\n      ],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"8846f414cfecd61e3a405cbf1aece1fe1c98fb55dbaa33480d2222158254bbdd\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"014ded5a73f5a4ffc0ba7d074fd40a768ff3b6cad345b5c588b2811536d5cc653ea3964cbe8f281c4d99dedfcf1dcab08de4ebb98046b4011b941cdd4aa70a7702\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01638a6ca05e18e467e070a652d8220835be3a3f61c1f06e40d316ebd97c3b59ca612264b26617ece9f0f00aa73ae2e31e0cc4e09ebc732cace053ce7cc359490b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"018bcca0710e6298a14eee2b4d997d55d46c8cd8c048662073f3d10dda3ca5123b41c622205bccf42601c47fcd7594ee35d39170d5499b6ce129d953529fed9903\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0143a0c296853f95679687b58a6d6e84e7dec9864428c5f689b0941f0965fd05fc196521d3e20879d156d80cd849d202f8d62c772272e989765290d28036751303\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01678054f97cb18785d59a7b4f5adafa916449e42f30759d90aa70dbb51bd5843d3395baf8cf9796cc4b0423d115d772d1e87b78e4e41340ec3f24fdff0b763607\"\n          }\n        }\n      },\n      \"deploy_hashes\": [\n        \"2533d10d8eaaae38d9b3b8870e17ed535434e4708a84e444c669299e84abde3a\",\n        \"4a743f9843488b64a3940da46870977917f6e58e32bf1dbdeec84f8c24a33116\",\n        \"2fb450c21452b99bb8a21bc3a71a9e3266f540cde8577f671b857c0edcda2827\"\n      ]\n    },\n    \"a62654b2f9a65e0591e38015b617c6fe37e3a50dba474223030d75129d4adaf6\": {\n      \"height\": 41,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"a62654b2f9a65e0591e38015b617c6fe37e3a50dba474223030d75129d4adaf6\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01efe56d094104ede2a186932c26d733d512313a1e57866732ca4cdc554ef1b5c7b09a869f81ae1ab100f22d5facd4255642dadc0a08da4454b0034f3bcde6b308\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"018067e568f86370ec5cac5b2d229300d5b4629ef3ee3e3347a7cb5f33770ebbeec9a0ce415db388572b99f1f53fc5c256149561e088646b99140358d56e808f0b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"015054092a62bed9e1ea285f35bc310fabf9d3f96c79ce5976a3e3735b83c05bd4fb41e02b80d15c7ff5f50f5cae79c177d067131698cae07310e69b4c9c553f09\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01a1145c92db74488261224c0db0d9427df46206c16e0b1fff899aac7b5125b4dce0e716e8aa4a6c6042feb3674ccf4f081318ca7488e2cda456c32d03ae11410d\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"011821e258815fe27b80d20c86ad666f9d1b0b6aedd5ccef1d94048b7a530b552e30e0b7497ec18ae0f1738602392be9d2f6f1128c2b0006309ce2f340eff2ad0b\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"18c1d3267d5cd2f311a501760704f262a12996a589623f7f73903ea28bc3b21f\": {\n      \"height\": 35,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"18c1d3267d5cd2f311a501760704f262a12996a589623f7f73903ea28bc3b21f\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01b9eb76556d4919712a797c40dbb42446622cd6f10ee55af8d2f06e41fa72176bb60d32489dce6512586382d6ea39aa3d4fcef0570e63eb29db57d60e95acf309\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01aeb94eb2d7c54e7e533c89de372f97016dbdf6bae5d6baa3c19ce065f2ef353bedced4c0e5263e0c636b7aa2ac8a06a5402676830aa9683bf9200793c420bd03\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"013f8b80fa31c7a3db73852912ac31188c5660a327d6a370e2db6ee70fd569b56a3511178ac54b597b2cca47e5b268f449c7b84d19f186e7bc13909f0e305e4f06\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01f375e17bdda7fb9125c3a0d768f88846debf1af07e9ef2466ee94cf9db5e0af088d41a08cb6970e55d1a7c8167fa63ee12edea54c59fa8be9ad04e9b11c8ca07\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01b97099e25cb2df1caf813236a7ae4d1dc64f1487ffbfa696fe7c1bf663001bda78e127df1f3b1838c0e73715b72f64f9152021a98d645bc297fb6442e45df101\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"f517a95ec6050156a8537bc4ab6b2b2a115dbf8583d92bff986a94ae6cb2ff09\": {\n      \"height\": 83,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"f517a95ec6050156a8537bc4ab6b2b2a115dbf8583d92bff986a94ae6cb2ff09\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0156cca848f711896442a5e1a46526b0b0015f4edf74d3e6c332b3df999b98db476dfde8e3fc0dc8e0d66eb8aa22855c07b76b05c37980c743071c7c711445fd09\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01bda830c252d5ecf66653a5dd9a7f2a5fcf6e6778adabf66d106d302a109682898649dd9bb64c54fc6669392bfbbdc641921b2fe48c38f483c84de22892f78d0c\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0149bbc7f26f0e5a76b0cd21131d3945c81e3fe8332b6279a0fa4d7e6d3f53ba2390f4a509dbf31b65a1876da9e0ac29a4e73cba637b24353934f026eae9d7fa04\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"015b962326367032127bd44a645fc0ca36307399c5fc433e7b386782170aaff64680133312cd870146a626643fed8a29ee3a6f814b9938ccf67ddf499adc51ca07\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01d3c8f430aacda4738b6d785ca0e83a8dfaa9ad7c940bbc8bd52938f0caf2629316581cf7c72e5499f160f9a33883d7d43258893a2b43abd0882a272e04ef160a\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"1a9bf2c6bcb799136040ee0dafd44b5eb126220e098161f82641aeb6ae85461e\": {\n      \"height\": 66,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"1a9bf2c6bcb799136040ee0dafd44b5eb126220e098161f82641aeb6ae85461e\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01cf50f3a4e5ec2b55ff33c893669b243c7127208c30396cbefa7303b7eb46f466d9b282f3bd4d55c2a65acd5242eeadb35cea9c23adfb750a162631cfe293c50d\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01b3d4d4e7c7e52d8869d0ff74952621b665beebf7ad9e7783dfbcf0c24f586de55ede57086afd2551f60b0baba2b7d8b8cd06134bf12f464d592eda6c6b69bc03\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01e85321fdb3f9f38ce4b60e1f57d737ebaa608b7055d109a6556bcd0543f8ed828e68231d2fa178ce01b4e6ba5b72704ab486e0c4f3ef70f87eae337549db6c03\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01e4613eb3e6923f89c4e405145ce388320303acfa708813038f1a68c2ceeeba20138a3c3c2014243970b8ec88d265c30d459a8677fd14c338e7bb365ec7acf70d\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01cfb8a99e19d240745dcf9f27e8e0e2c35df90508b50d827165cf188e78b21a49694db6216eb438334686f803c2f4fd45ca64fd0edcd10e8a8f410bdfd11dcf01\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"b77cbbd252fa04e3685e3b22331670fcbe2215bd1dba06e2e49d9d9e4f055557\": {\n      \"height\": 40,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"b77cbbd252fa04e3685e3b22331670fcbe2215bd1dba06e2e49d9d9e4f055557\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01c3ae7049e70e00ad85842d10b4b38e5da8abac966bf429cd8f8bed8a567b6f2a54668db968a4d0aa734e57d660341a68b14ecb482f4628d8c2121c432c41e909\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"018d6291632d14c7b0a1c55b9f5a41c99125d2437828bf36e1805b5247dad90a312e3aab3045393d1d5861a028461853a90fd96279eb7a09e8e77262a830e5ca0d\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01f195903943eb942244027e30431d523c886aad058edf781294f4860378aea569e7ba3c76df3b46db2439e21c05a7f1eb3e0fe21a54dff09789fea7cc16f2aa09\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"019e56d1429f3151d1eea1f113184e6a254880c14bd28dece3a9c5130dc7e53b867462fc8c948cb1c65749f5fb99d0eade89e37961a4ffd6f48570b872eb3eb80c\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01364daea728bc656324c9251559af5629667645c7b45c0189d653819a837f86ed1e1745622f581c589fadb138032b49fa4365a8c443b0f99695f99c8af552b106\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"89f42a9616f94d71f1c31589ab89ac38a600ec58fad8713fd3c28b8c1f4e7ec8\": {\n      \"height\": 74,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"89f42a9616f94d71f1c31589ab89ac38a600ec58fad8713fd3c28b8c1f4e7ec8\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"011e94d090a77c80e296a516664a577558c58242504b77002e049cebacfe0bd9801f54efcee0521d4a8088ec23eb1c55ecc165d9438840bf0ac0d1d711544be603\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01423220f2b56273e6abb6fe5460bde496a7ebf12e998e0bf049d31324d899a5260195db7740a8f7c724ea65cf1114471dc393f380ce7231327e013db74fbb5102\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01050b83336eff034207c4f40164132059755162ed52d3959f573c8c456b66ed70e249a914b98099954034a3ad7373c8688e29f77164341037ece4cbd44fa5cc0b\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"015e98ea39d87ed3e310ad12282b2f59dd61d2668d9a22bdf97b9d58f1a2f15b8764ef0593642b4121602699ae87011398551f0db115e872e8a4c0b02a0c362b03\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01e5fbcd676f616e7b4b7ded48d651346361dd0e7eddad6ea78705d056068d089e5fec73439a075f5ddb26326012163159bb8aa5ae7bc12f89ff05875604b25f0a\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"96607840ea4a72f4cc942fec7a10a902e19ae1bf2513acced2787e972eb209f4\": {\n      \"height\": 93,\n      \"era\": 10,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"96607840ea4a72f4cc942fec7a10a902e19ae1bf2513acced2787e972eb209f4\",\n          \"era_id\": 10,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0172af335870196156ad1bfff98728b613bdb1c09a37a06ebacdf5fcce47bd763298bb0449141641793184418263c0d5c186e9f060bf897939edbf7e8860c3fe06\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01e0bdaa0d179740660606aa662eeabfbc5024ff5cac6b3a8df1bb66133fc6ab722ee61bfd70b50437de820103f7b0b0b0c0625658de2cd05c7c35d57d4c79bb02\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01fb60bd2cd3c17c2d1f2439b88571024d3ce70dfdd0aaad41ba7a6e5e29601ae555aa9720b865c3269638c009de6ab6b8814bed9a079a9f9b2b762c9c3becc507\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"015a909ae323a4ee11c86946bb760fd3f391a430861a9ed2f4baf97bbce48980f073d51bcd36fbeba42e1df0c12b21c0be0b98dfae00d7241ad1991205df474f06\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01d7ad431bee6078c51a13c278ffeff0118007c7c65d15a5db2d501d563227cac967b4b2ce5b707b16954a0187a5c15a06f975abc5afe78f30100e145fcb77610b\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"846bda8fbf66660999eebe45678129278147be95efe5d919640ebe0eab6efabc\": {\n      \"height\": 36,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"846bda8fbf66660999eebe45678129278147be95efe5d919640ebe0eab6efabc\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01fb136aa412c3b925ac255aa80b06d8bb3800bc89c7c1a349cb4b92f2153a9d1bec2d5b2c67be7f4c7fe767be9978801042ca7e0e0f474b700898d9209c83ad00\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0124438488b90c154e048fc88d983ea130b04f56ca9e7153d9eebca7c0745c6f110e1a77cc5198754c35230adb02051caf7692083c8a7390830023d6abc9603201\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0110dbfcf752496964d5b48518e5a135740e53f09aa6bad6ee42ba67771a314ed5ce502ec5275a5aa908cdb68e0a49428928f2bcff517e4c76a4d34df69ba34a05\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"016c4fcda8a74bc271ad1125a409b2e8b285334f1fc201c0a289878043b55ef36871099f9490493e34de35eb501aa377787fff971ba9c393e1c422becfad7b1a01\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"011fc6a840c70999ceb9a0ff937f5b0cd5dbfea15129c086b19fd77b01c28b702a810d6bbaed3b2780c25a3c00bafe6f14ecca250a3947112013438cd68805d800\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"aa88cfaee3c7993f8d30a0dbb6039d2d009b34725d747b7c639a29de810ab274\": {\n      \"height\": 44,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"aa88cfaee3c7993f8d30a0dbb6039d2d009b34725d747b7c639a29de810ab274\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"018ac0475e0c87be0df6dcd945ad05ec8bbc57dc2dae6c464e66c760e2bdcc817315b98f14dc254b2890a7ca42dcbd31de1bae0d00187b55807fc9e894b64b860a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01e72790907e1802bb1640bd4630a97d9c92dc8b478cff92e2b9d567b6fd83802e779c4c4d27a340079dc86aa3681e51b95b6428dc39534136fe419e1d8ebea303\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"018a4ddc2606619dc1be1cbf0b00fd30b06ac95e844e6822d1d970160aa0f83eab5b8a751d4ea6c5c4b67460246cf6dab88d878d102a0be81cf905aa0e4bd6b10b\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"017f349109f4e27147947ed81825d88cfd17ca95e9ea98b4eee836edf8502a5c4eb92ac6b29096c3664a31b3089090d0a1e6d2a4fd4f21ef64186fd7496d5b5f0c\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"016d5e2c01638072e25bfa793347fab8595b74f35bb8c8730c26b1ffaa39b1d684b6d0ed42267217b6a4d7b634d0f14db84476b8ad1572eb2fc110ecea6b71710d\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"6f386926f07ad93d0a9330d20ddea32f22596cb5c6ac96e81fcc32dc164c26a9\": {\n      \"height\": 73,\n      \"era\": 8,\n      \"approvals_hashes\": [\n        \"4997980abd95d62dcfb204e1bdbe897052417bf2608fa4b9558e5979d970aadf\",\n        \"c686ca14f7b5e3f161c1e62722adf980983bcef63364bfd8f2990ed510f0ebce\",\n        \"5c9387c7727ef45134fbf420c99bdb17dd0020b995589f781101b56e71a6a071\"\n      ],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"6f386926f07ad93d0a9330d20ddea32f22596cb5c6ac96e81fcc32dc164c26a9\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"018ff5d691f249abbbecbb932e09f5b6875ddab8ffac2784ddf2c268e2a2ea1ceab1e35def2d81293227d91da1ffc2504fc2101711dccc47242c5aabab811dfc04\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01edee50b7f072fc34dd6b337cbc25f31b38ad4b76609ca05d3e0ed464bfcce43eaf9ff5d5685bd20f6be191177fb3ef48ce892c89c16540e67da8face85702f00\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01f37a1537b47a5c258984acf00724d0166b09a7c282ecb5facb355aaab659d1c1d66aee63fb1dab6eb39667462aefbbcff2c037a9069eeee95d710fe603c96c0c\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01dd313aa8cf069742b71a7607052aaaed0eb0c5a1b853caf9d4cb4620eaf6922859c33745f27e206a032cc9b4dd8e58cb767c7fdda1533ead09f22a0e42560f01\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"012f6a17c7ce971c07ce80b3a9f145e90ff25162e1b87b75b9e3f7469dd353cef6ca7b4e7551d1a63453042bbd054306773a67ce3e406204cfec42338d5d7f4a05\"\n          }\n        }\n      },\n      \"deploy_hashes\": [\n        \"3f69510d52aaa21601d82c4d415856c15af50e9b250a04f6f038c427670fdcbf\",\n        \"5ae77f177af25fb12cee62c75261249c108151a02d91f4febcf5afee9c4c8140\",\n        \"00c5500f389bc2d1cba67c45d9f8132f2bc7fccdc0402ea42b73d76f71961a3e\"\n      ]\n    },\n    \"5bcd384899f8d5a0c05a73b8e77e7276084b06a0d18e8b8196bbc79886d36408\": {\n      \"height\": 69,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"5bcd384899f8d5a0c05a73b8e77e7276084b06a0d18e8b8196bbc79886d36408\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01e5706817fe115d655ce3bdbcc39d3205ab630cb7de6022596156971d9e114990415a957060d43963df17147473ace84263eea39a4b24858967dc0a4bfaa4e105\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01dc75709ba51fb806fc2b5e58af0be5e3c7f324a4dae46a70145eac0e3fe6fda1420ba7f4c8de2d5e9782870e157fd0a663f3cbd465edeb66ce324dd030a9b60e\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01f38c71752cece41707687655c25baa20da9312bbfc975f9006cfba58a1c7133228291347e7e1bc2188eac4332bca8dca7bbe3877bac4efee1d3a485bc59d5c05\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"019aae6cd225ef80c5bda0459cc7537b856b814b7eac74d9751072474534fc9b4179ef740d65b067e2329da274ea1adbb4f29964c83914600a51e6fe0ed5c5f004\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01c88ef5480217a1a3877a1b173867993134174ade09d47689b70903a8f7b373df9e9ccb79867f96db7b6312148fa2a975617cd3249c9da25bbaa8709803d3ea09\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"7438b956bfcde6a2d7ba7dfc16877f6a317350e6dfec7242c6213988770b1b36\": {\n      \"height\": 39,\n      \"era\": 4,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"7438b956bfcde6a2d7ba7dfc16877f6a317350e6dfec7242c6213988770b1b36\",\n          \"era_id\": 4,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0109106bc12a79112245151aa5aa8d0f2c9c988cb2dc6d620288c58686e6a523372579b3dac27401e59765e0dc491fc0dedb0e3aa989bb0a2a8f50cb312fbfe507\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"013aac1eb740575433812580497dd9ce79a9d7c88233a75f8bfda086973539a91f47eba7f473c175af5b53945b5c7f717e52604fcd45d9fda02a4f3e381ce7ad07\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01d5644ed7210c70c4f43f48b309928e90142b460baca1cc1c37228120d97ccc5c63b057b75ff58c4bc97ed25358aefa563ac27aa4f390b28f172bce1be66c560a\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"010ea241ab144f62c7d527fc61bd95e32a2c3ab0a63a2356d1422c5ad99b49b83a76b7b771ebfedd3324436bd877b7f9753b5eee5ed8e1e667017d3c56484f400a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"012bbb276ec8b5badbf2bc05517c12d7807db0aa5d01f133ebc33c4f1d783a5cbcbf51b6ca6355ae26d06e0166f02f42584ec46ba8f4368a519f492d2124e6d30d\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"327321d3af7a7c7ab1a71ba11bc72f144be0c87a09d92cce6adc4be37f7877e0\": {\n      \"height\": 68,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"327321d3af7a7c7ab1a71ba11bc72f144be0c87a09d92cce6adc4be37f7877e0\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"0133a8c2e506191a0851cdbee9ba3723a5437786c313c1e2b29f7f6218babd36aa003dfc3ec8a9e579720f96bc6fe55e30c5d01e9fe2f6b3ed74d671437a41060a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0138afc27d809fd593127b76fc806750183f0ecf975b2cf8035357c129cbe4992afc333eeda56ee80a950df8537a581658e3a79e139dafcdee4514ce7014620801\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"019f96974fd00d6f42de44ec0fbae92a6b994a9ccb76e2dd6704faa99abfaadf2017ea44ee5e7da6f83d920828319052e99a0d8351223b6da084da425930e6af0e\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01f02c2a88fddda27a7f71248a2bd3660e38670079b51ef93df19a78dca7f1e0dfacb2dbd3b372393463673f57189185d9b36140f0695b3e1fffff0f7b195d4c07\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"016805140ea5322148483659d822673c4740c56d8af711049fc6fa5e612286847cef550dea8853b46e2cc73d3994f051ed86c18f65251fbbec2eb78e7a5d74660a\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"f50447de37b23c28f4133fc2cde2c772bce0af1e49d76690532b614af1d0e33c\": {\n      \"height\": 75,\n      \"era\": 8,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"f50447de37b23c28f4133fc2cde2c772bce0af1e49d76690532b614af1d0e33c\",\n          \"era_id\": 8,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01ff02b2ddda99b82fdf9be6215156af4eb61f8e43d0a4039969499352ac1eaee59ef9e435cb6a0128af12b11408e19f1377416c2851daab569f93ea09ec68ab03\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01a9baa6a882daa9f3b71ba60a833a67509f1f29234b2645db1fa611907690677d840691692c263cec187bf017755cdc3193fe85e8f4c2209633d02459c3b1e10b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01be743da4a9a8185972a40e46b624aa9e979f50d8153bb82a19db559abfc25823cae65d9e1beb0d5399c9ba5c6f3902025d524e46bccb6799cd5fec4c83687f06\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01c08b376e860b67b7f1fd81e5f631d8d9b2fa12652153b4d4f7495d12875cd6ef5db500a4c899113b0fc3ba2c0c0f5d0044064b7d94c9c411abea58e72129450a\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0199ec9093a7114f833fad696893d9ef4d2d98b11491b0fb7add9ae5b3801bc0d9caf471a3e4baf08e78563810f32da0615118c579261a057126abcf8ea447d10a\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"d08662b263ca4651b9bd64fbcd3f70f47b5c6ce9ee64115cc52b8b03852bd41f\": {\n      \"height\": 71,\n      \"era\": 7,\n      \"approvals_hashes\": [\n        \"2ef51ff9566e7eb1e1ea28274d96057d01036318ea5f2a6e02d859f7ef252b48\",\n        \"9e529633309b294838f942be44093904e042bcf7d0fb908ca2e2dc926370c654\",\n        \"04379f664120d7d140880706c6b3bac23211376e792e02343339b6e89c8fc524\"\n      ],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"d08662b263ca4651b9bd64fbcd3f70f47b5c6ce9ee64115cc52b8b03852bd41f\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"016f901233dc957cb9cacd2d90007997afc8a7bd63d937e7690d69eefeb9d5eeb1505a201ff67aa4f4990f71cb2f8b0668b9b9b37d56e5141071c76e0a6bc55a0f\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0152826f8a87e6209ccb6f19710e932c21fd1bfd1ce35a23eb5122c5128ddbc22987df7808e9ba183f0443fbbd7d492fb1416b07ec6ff464dea68f1c4c377f5903\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"012260ca9f54fc49cf678608f7b7182ed837054ee7845f981c3c246ca4bc309452727c727eef5cd8361002f8e83b1e527b947eacb78721259ed3760e4cb858c902\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0140301844864cac3b64377783c7eeb72f5fcf25390d14aabc8395fc78eb42d5d392ffc7d782bdbaf630dac750cc41520a6ca7476e61bc00dc69e4e35967915c0f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0168af3aef7194e69365d59426af9febec01748061139f9e871e49f07ff8bb556c48bdd6b964b52a3e7c15f79e736cf26d2a112465c6f55b14282afc501e0d930e\"\n          }\n        }\n      },\n      \"deploy_hashes\": [\n        \"36916afec0f03d9c623d5bea8a75fcd524200171f86bffad870b76c4bbcb34a7\",\n        \"6a7ed752ef92764a64ebf88df0c44ed7d58211f89dd7bd6add801540b78e8785\",\n        \"d5825b9890a8ff70091f8aae7ef7cc50455c5689b7b0b1c9b37a4e04785e5805\"\n      ]\n    },\n    \"d760a41123785884162e45edfb1401603c60714c6cc79cad915387640b3c7c80\": {\n      \"height\": 89,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"d760a41123785884162e45edfb1401603c60714c6cc79cad915387640b3c7c80\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"014a69d3c75ca2cff5cc25aed853d355adbae83609646c26b19c6837a4905db46e15064751a4a2253ddb231ca1ae18c8f5b88dcd48dd7ea9385b0d9befd6f4e40b\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01162d57c9602c3bc3387e1d01cfd9f112dc592ad7d763505962c57753e5f2bceeb8fe3766e17b0dcb9e059e127263c12415b08a75a7cafb03a1736c521c7b2e0d\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"013e252c82dd7f7deee6afa749a24cbd0792364d79354b1a1cc7b364a42d518d681ea15ca18a61dd90ae3cd2c3107996b1c59218c341ab400ad09ce54f12dae608\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"013c5fe5a70327570dc4f6a835ca353b95939a3e96678151d7e7d49bc7401feac63916c6d4a86dff1f97e5cc83e70fdafaef455cc0666bc73505b6e8a192c2f70d\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01e4cc1ec44a703b1a0b8a7b93f75ef0023f38c33e545b1263637470f8c2804c5d09bac20ae122dafca70a439353cc69f5e8518b9f8065bbd95371dcf97f892d02\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"8af440e30a8a0f3959f6504b19fe387a62d2b021c28862afbfd97707f6e1ef6c\": {\n      \"height\": 61,\n      \"era\": 6,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"8af440e30a8a0f3959f6504b19fe387a62d2b021c28862afbfd97707f6e1ef6c\",\n          \"era_id\": 6,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01a93fa7ed0fa5008c81ddca9dc760875a9bd84eba5c4ca561bb8e7434bb1a89b6d22fe129dd5bac3c68fa941e6d4dd0fc8f29bec15a0d78c9862db91b0a0b2e0f\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01407ba0fe0b7e70211bef8b39fd27f5edf4d2e3aff02194dbefcf9f734821255f3fb9cd0662f1192197375ca22c56499935e0fc5816df9df1bdefa67d62cdc00c\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"0140f7c24cd4ee18805778359f799451d5a4da6b70026a92144e60bacdc6b603b884d486fc2072e13616049f56974823fc5d1e07553d08fbda583e1118eccc2404\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01ec88cba12e12a6de4e49e81cb5bbcafaae69b5a9e25991cc848700586920aa1aebe78a1545ef59929e37575d6211fdd9f660dbea6a692a669d82457b419f9802\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0135ab7477aed57f31f7211ef9302e963a4b071346fe6956ad5518241f472b3fb3d469df0aae58b3c27e396775a908ea9a60ddb94c7bc3c0689f04fb3d65f92200\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"8952435b382f2e66462b4e04cc17b67b80a978bfef4a57b2fdc465839890f240\": {\n      \"height\": 65,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"8952435b382f2e66462b4e04cc17b67b80a978bfef4a57b2fdc465839890f240\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"012a5ae2665f218cbe7e70e0a5a3da0e612b93eba6abb900a77279b98121ea26ebbf973e44e6dacfcf3c028d04c061ff31023f63af0b50705dd6b2d845a438e80a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"018241745d90073634de1705af754d7227a4d37056077c4d2b136309feccd7faa0c76174a185b9a2b69cc1d03fff38debcb34bfeeb2088f6a2204eaac480748608\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01bfdce3909d4474e2cb2feea13de1d4597d1ad0cdee56713fe48aac8e2550267eff2032ff1a34b9454398f0c9685e10ee9bddff9ff2e6edb27dbb0dfd91df610e\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01cfbaba578f82a6f6f5bb3336b6d2c7288d6e704a5b1d92348a2ece3cbe1b52258424dfbf414905f415a9a10469b1d117e582c4bb97c19bdc4c5b4a6063468808\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01844e70fea67d5a715dc65d099fcd25e7bc60bf98f884bc416f735514596dba83d1fbceaebb29768ef1075d941cb66806f19634ccde8f777aa7408d319410fe0d\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"ee09fa098a05f4fb6db77d700805389d4c099e09a7fe44cda0ca3baacf78c150\": {\n      \"height\": 62,\n      \"era\": 7,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"ee09fa098a05f4fb6db77d700805389d4c099e09a7fe44cda0ca3baacf78c150\",\n          \"era_id\": 7,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01bcab36324f166cfe835ac4e7e3b87798eba274c89aa27f89f694382f6aed169ec15b9c2326b9ac57ec2a020a6c75ebb258000a6a16f7fcb2a659f293c51b480a\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"01ad6e3cdbcba3ba61cb69d7fe0849765554cde2b6ab03ca1bd5efc4a87ad9347d131b55b15e312e768702625d9a7784ba3091e062d038a2fc7ece027b5d96740b\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"013520068954881e81331a299ad2143b6dfb3ce929e01f8f1ac2927a571fbcd28867cbaf9cb029571e59206b9f508644b9ef200ce0cd26eadfd62cd2767f213209\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"014fe6a703b2dd02d13b33fe43b3d8684ad71c98f543778aec5365c61a1ae0a0646bc29a52c268d2a3dcde9d7f61cd82a4ab4e3f8df94309a277b1b77536ba610e\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"0153e3f03cd865894151085dcbb843837e909bc9145196ee4334b1b40fcfb91c12a950fdb0fd56d56344d03eb921750da89bce51e15b3314103a2b6e7a17364e04\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"89af4615b4254df1d8af49becad7e998e4b296d1cb0cfea07524a0fd1589223b\": {\n      \"height\": 49,\n      \"era\": 5,\n      \"approvals_hashes\": null,\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"89af4615b4254df1d8af49becad7e998e4b296d1cb0cfea07524a0fd1589223b\",\n          \"era_id\": 5,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"015e07a579305638638cfe266b6e2e1f11d1bc8b07a615173f688cf9e7df0584b8265a689d722b52ed10bc7f0d75fa0e7c1036f35392fd3ea6439c40e241157203\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"0187f05e9224380f0f81fa77d36f74c36cd39eaa711b46bb4b4b8978ec5690adc1208052ba43b5755368817290615481a09d044da2b9872cad2fcc024392138a03\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"01115c591b8f77c142304e682b0517dac6128eb27bbcf83f849f9f77a67480410cf7948bd48f956fe18628224cf2bc319b50fb1916d979068990fc8892774dde09\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"01816d3e9cf8836ece45c7a54dd2372deffc416ef0af03f508f5fe210b78debb7196f01d28e5a92cc0c9b5919eecdb48d1ed87698ae1b210ca06a3596f2f6b320e\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"019cc260438112197ff50d777a2d4bf83920ecc317a293affe5cafbf6794bb09d9b32d6515cab233668d76d8efea5c53c018c75a82a73fc86f9c7add1e4ba82602\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    },\n    \"3de15c392cd63c17e53fcbdbb6ff544d86016bcee9b79e79f63f33ac14c489f9\": {\n      \"height\": 91,\n      \"era\": 9,\n      \"approvals_hashes\": [],\n      \"signatures\": {\n        \"V1\": {\n          \"block_hash\": \"3de15c392cd63c17e53fcbdbb6ff544d86016bcee9b79e79f63f33ac14c489f9\",\n          \"era_id\": 9,\n          \"proofs\": {\n            \"0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92\": \"01f9c5bd4be0acf631e5bf9cfb1e9588aeef2fe0d6965d79b5496257dc06d4261e85e9ff0960a95d13313c268d2adb49b57bd77b34185d3636f22e89cb7fed3508\",\n            \"016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c\": \"011561b7700db49c4d2c4915c7af51c8c018b29d5180b954db947b51c095915e322698dbdd035cf82aac82e3f415256eb728ccd5e1e9fce91517320f38bf4e8b05\",\n            \"019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523\": \"017498f7a7c23d2205f47fe8cb1492bf7975ed80ad81e9b9e4e6de97de685cdeb7b0a422114a84b10e025d90f61327010b0c9644ec12a689dbb07fc1eccb340a0e\",\n            \"01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4\": \"0156a5c0ef6d96b450d16248a65c68668852fc32429a859b8af29d81c9c28839f6ad2f2b872434cb6d6a6a447b21364f9b79d5026c44113d131152725a1dc6f10f\",\n            \"01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca\": \"01fa32312fadb626abc4ff34692e05016d71b2cc5586332060c94cdacd1bd2060af1694f9cb42514b4816aea1ea6e644160bcf5b8bce67b505107e9220565ba901\"\n          }\n        }\n      },\n      \"deploy_hashes\": []\n    }\n  },\n  \"deploys\": [\n    \"2af0f120ccd04bead484b6053d87cb41930e650cfb8f2b7c2ba37c5f3f5de701\",\n    \"aa2ae8d13b0087c762dfc34cc5b3d0a30d04382a2fc707f259225915b342c09f\",\n    \"2a6c3ec008d34e9fc614bc6c264af5e3834a847d5690211a3393ffdfa4206a1b\",\n    \"975cd4ddf71c0fe4c81439e9b4fc09ace17fd644efd455b995c3445403b18cba\",\n    \"faaff0d5bb9e1b68616a7225b37840f87bba48b2257607033a96fccaa4e0401c\",\n    \"f1c728bb6a8217bcf4c41d0d415d08ffd043f2b15de2c949ddad4e072cf56c57\",\n    \"6942cff9e560c7d0864b812f30bea56e59e26be5136bc188197e1916fb3fab9d\",\n    \"163025024c68c001b1a940c2e63ae4e4654adea462078841b2f66ff6c9e04074\",\n    \"1b9d7591e1e800a19a99647849decdb074925f270ea16540ec9b356de9721e4c\",\n    \"448bbee6f586414eccb05fb97d28f26e59a4bc542497b323bcad51d71af92d6e\",\n    \"a9af14844140bab502565a0e6cacbff0227b19e4209d3495c97e275e5b6a6dac\",\n    \"706050b93a7e9bc5a19fd5fab792eb5ef5a9854ec3181fbb054f5dc7d4ee170f\",\n    \"a97dc9782e2dce920821d08ee85f2853b5dbf97c57c9349772ed36c723eb3542\",\n    \"ef35f210444ef48eb4bea3ea58867037cbd4106af59cba36a533e20625c01523\",\n    \"660eeb2cb9fdd88ad3296de498e38b9b97291cccf19cfb1b5c477c2c94f87b65\",\n    \"82c80204bbf55182a6785e92d4ff59f500d9cf561aca305b25d7f95ef258f7bf\",\n    \"e55f6646b64c204ca7f55d244a8879e5ac1e982a049b1d577f80418274490249\",\n    \"2533d10d8eaaae38d9b3b8870e17ed535434e4708a84e444c669299e84abde3a\",\n    \"4a743f9843488b64a3940da46870977917f6e58e32bf1dbdeec84f8c24a33116\",\n    \"2fb450c21452b99bb8a21bc3a71a9e3266f540cde8577f671b857c0edcda2827\",\n    \"36916afec0f03d9c623d5bea8a75fcd524200171f86bffad870b76c4bbcb34a7\",\n    \"6a7ed752ef92764a64ebf88df0c44ed7d58211f89dd7bd6add801540b78e8785\",\n    \"d5825b9890a8ff70091f8aae7ef7cc50455c5689b7b0b1c9b37a4e04785e5805\",\n    \"9ab82d1a1c70f7ae053c6c45157d40c6ac0d93d087d4f574d35edbe39dd483b0\",\n    \"b87c10c4ba0846b615f77ee42f50a10279a9ed4d810671623952aa8d81988234\",\n    \"a397110248532ae6749617a4bf1f26b3a0130ff03c97d3489d1d66163c169857\",\n    \"3306c637dcb597811449816c39a4d8293ce21edc60310b56cdabeaa6ba252bf8\",\n    \"159ffefef5d88b2cb99fcceb471a3d2f0c2910c7c9de2db2c1e9845b60766224\",\n    \"58223e9b91d3ec7b73453217adbff91ccc2d34d514105bf9889599d1e9122b21\",\n    \"ed4ce8a502ef432f8d66be72afd80fe1415f15e710e39d2b63971a6f737c2c4c\",\n    \"abf88aa99476275ca657eaec26264c8b441fc5e6a8682f234e73d63163ed2415\",\n    \"35216eb0d783e53accf6e74a118c85e9674c856c04d22330cbd8497913741507\",\n    \"888c0d7a5988cb47eb83e1e7c4fa58a8d229b58b61cf932cc8f0622c864e707c\",\n    \"2651cb3fb060926d9f5edadfec7a41c7ff71f29f9fb9548f1c3624f5351e9a6b\",\n    \"4821b3e2a53f31ccbe3645a8fc17aab6e6ec9e7dc38fbfbbc2e96194da443181\",\n    \"1c98f5f335f0cf980192d56548a80ffb7b8d8c30e60f82765bc1a7ed9b41542a\",\n    \"5eb79399b62d08320e321d73ceb50523327d26149c6728a8d8a1ff33dabe722a\",\n    \"d6926f2291b62c998cfd73ffc154d1e8259dbc3daa101fdaa24752e65d85027d\",\n    \"ab8bc98911a606aade80c3e24b854d971d52d9e20a12744e0f97dcf95fea9ac9\",\n    \"607559135a1b9c446b53446b3cff100b3ff0f6efb474019a09562b83b833bd1c\",\n    \"ebb11e7fb132d580752254a1166bcf9fe2569f4952c97227de8847eb456c094c\",\n    \"ece0da05da8a349d5fcf89552c24dea8f17f876697e272b2175790a11b0d8521\",\n    \"e5f4a0ec09d7918c9871de663a677c2ad6bb88141e3825a5a8e57b4956a62d3a\",\n    \"3f69510d52aaa21601d82c4d415856c15af50e9b250a04f6f038c427670fdcbf\",\n    \"5ae77f177af25fb12cee62c75261249c108151a02d91f4febcf5afee9c4c8140\",\n    \"00c5500f389bc2d1cba67c45d9f8132f2bc7fccdc0402ea42b73d76f71961a3e\"\n  ]\n}"
  },
  {
    "path": "resources/testnet/accounts.toml",
    "content": "[[accounts]]\npublic_key = \"018afa98ca4be12d613617f7339a2d576950a2f9a92102ca4d6508ee31b54d2c02\"\nbalance = \"9999995600000000000\"\n\n[[accounts]]\npublic_key = \"0106ca7c39cd272dbf21a86eeb3b36b7c26e2e9b94af64292419f7862936bca2ca\"\nbalance = \"100000000000\"\n\n[accounts.validator]\nbonded_amount = \"1000000000000\"\ndelegation_rate = 10\n\n[[accounts]]\npublic_key = \"017d96b9a63abcb61c870a4f55187a0a7ac24096bdb5fc585c12a686a4d892009e\"\nbalance = \"100000000000\"\n\n[accounts.validator]\nbonded_amount = \"1000000000000\"\ndelegation_rate = 10\n\n[[accounts]]\npublic_key = \"016f6ed70e4a5acec750dc087674e5de2ad7b6d9595945c4059c5ca1a47d4dd3ab\"\nbalance = \"1100000000000\"\n\n[[accounts]]\npublic_key = \"01fddc7e47ac36a240007eba368e417dd52d824026fb9baaa1454817993ce985ea\"\nbalance = \"1100000000000\"\n"
  },
  {
    "path": "resources/testnet/chainspec.toml",
    "content": "[protocol]\n# Protocol version.\nversion = '2.2.0'\n# Whether we need to clear latest blocks back to the switch block just before the activation point or not.\nhard_reset = true\n# This protocol version becomes active at this point.\n#\n# If it is a timestamp string, it represents the timestamp for the genesis block.  This is the beginning of era 0.  By\n# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up\n# and running to start the blockchain.  This timestamp is also used in seeding the pseudo-random number generator used\n# in contract-runtime for computing genesis post-state hash.\n#\n# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era.\nactivation_point = 21500\n\n[network]\n# Human readable name for convenience; the genesis_hash is the true identifier.  The name influences the genesis hash by\n# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis\n# post-state hash.\nname = 'casper-test'\n# The maximum size of an acceptable networking message in bytes.  Any message larger than this will\n# be rejected at the networking level.\nmaximum_net_message_size = 25_165_824\n\n[core]\n# Era duration.\nera_duration = '120 minutes'\n# Minimum number of blocks per era.  An era will take longer than `era_duration` if that is necessary to reach the\n# minimum height.\nminimum_era_height = 100\n# Minimum difference between a block's and its child's timestamp.\nminimum_block_time = '8000 ms'\n# Number of slots available in validator auction.\nvalidator_slots = 100\n# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer.\n# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as\n# finalized: A higher value F makes it safer to rely on finalized blocks.  It also makes it more difficult to finalize\n# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly.\nfinality_threshold_fraction = [1, 3]\n# Protocol version from which nodes are required to hold strict finality signatures.\nstart_protocol_version_with_strict_finality_signatures_required = '1.5.0'\n# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'.\n# Used to determine finality sufficiency for new joiners syncing blocks created\n# in a protocol version before\n# `start_protocol_version_with_strict_finality_signatures_required`.\nlegacy_required_finality = 'Any'\n# Number of eras before an auction actually defines the set of validators.  If you bond with a sufficient bid in era N,\n# you will be a validator in era N + auction_delay + 1.\nauction_delay = 1\n# The period after genesis during which a genesis validator's bid is locked.\nlocked_funds_period = '0 days'\n# The period in which genesis validator's bid is released over time after it's unlocked.\nvesting_schedule_period = '0 weeks'\n# Default number of eras that need to pass to be able to withdraw unbonded funds.\nunbonding_delay = 7\n# Round seigniorage rate represented as a fraction of the total supply.\n#\n# Annual issuance: 0.25%\n# Minimum block time: 8000 milliseconds\n# Ticks per year: 31536000000\n#\n# (1+0.0025)^((8000)/31536000000)-1 is expressed as a fractional number below\n# Python:\n# from fractions import Fraction\n# Fraction((1 + 0.0025)**((8000)/31536000000) - 1).limit_denominator(1000000000)\nround_seigniorage_rate = [1, 1000000000]\n# Maximum number of associated keys for a single account.\nmax_associated_keys = 100\n# Maximum height of contract runtime call stack.\nmax_runtime_call_stack_height = 12\n# Minimum allowed delegation amount in motes\nminimum_delegation_amount = 500_000_000_000\n# Maximum allowed delegation amount in motes\nmaximum_delegation_amount = 1_000_000_000_000_000_000\n# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than\n# the value specified will be treated as a full unbond of a validator and their associated delegators\nminimum_bid_amount = 500_000_000_000\n# Global state prune batch size (0 = this feature is off)\nprune_batch_size = 0\n# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`.\nstrict_argument_checking = false\n# Number of simultaneous peer requests.\nsimultaneous_peer_requests = 5\n# The consensus protocol to use. Options are \"Zug\" and \"Highway\".\nconsensus_protocol = 'Zug'\n# The maximum amount of delegators per validator.\nmax_delegators_per_validator = 1200\n# Minimum delegation rate validators can specify (0-100).\nminimum_delegation_rate = 0\n# The split in finality signature rewards between block producer and participating signers.\nfinders_fee = [1, 5]\n# The proportion of baseline rewards going to reward finality signatures specifically.\nfinality_signature_proportion = [95, 100]\n# Lookback interval indicating which past block we are looking at to reward.\nsignature_rewards_max_delay = 6\n# Allows transfers between accounts in the blockchain network.\n#\n# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators.\n# Changing this option makes sense only on private chains.\nallow_unrestricted_transfers = true\n# Enables the auction entry points 'delegate' and 'add_bid'.\n#\n# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These\n# auction entry points will return an error if called when this option is set to false.\nallow_auction_bids = true\n# If set to false, then consensus doesn't compute rewards and always uses 0.\ncompute_rewards = true\n# Defines how refunds of the unused portion of payment amounts are calculated and handled.\n#\n# Valid options are:\n#   'refund': a ratio of the unspent token is returned to the spender.\n#   'burn': a ratio of the unspent token is burned.\n#   'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio.\n# This causes excess payment amounts to be sent to either a\n# pre-defined purse, or back to the sender.  The refunded amount is calculated as the given ratio of the payment amount\n# minus the execution costs.\nrefund_handling = { type = 'refund', refund_ratio = [75, 100] }\n# Defines how fees are handled.\n#\n# Valid options are:\n#   'no_fee': fees are eliminated.\n#   'pay_to_proposer': fees are paid to the block proposer\n#   'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all\n#                 administrator accounts\n#   'burn': fees are burned\nfee_handling = { type = 'burn' }\n# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake.\nvalidator_credit_cap = [1, 5]\n# Defines how pricing is handled.\n#\n# Valid options are:\n#   'payment_limited': senders of transaction self-specify how much they pay.\n#   'fixed': costs are fixed, per the cost table\n#   'prepaid': prepaid transaction (currently not supported)\npricing_handling = { type = 'payment_limited' }\n# Does the network allow pre-payment for future\n# execution? Currently not supported.\n#\nallow_prepaid = false\n# Defines how gas holds affect available balance calculations.\n#\n# Valid options are:\n#   'accrued': sum of full value of all non-expired holds.\n#   'amortized': sum of each hold is amortized over the time remaining until expiry.\n#\n# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`,\n#   with accrued, the full hold amount would be applied\n#   with amortized, half the hold amount would be applied\ngas_hold_balance_handling = { type = 'accrued' }\n# Defines how long gas holds last.\n#\n# If fee_handling is set to 'no_fee', the system places a balance hold on the payer\n# equal to the value the fee would have been. Such balance holds expire after a time\n# interval has elapsed. This setting controls how long that interval is. The available\n# balance of a purse equals its total balance minus the held amount(s) of non-expired\n# holds (see gas_hold_balance_handling setting for details of how that is calculated).\n#\n# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse,\n# a hold for 100 is placed on that purse and is considered when calculating total balance\n# for 24 hours starting from the block_time when the hold was placed.\ngas_hold_interval = '24 hours'\n# List of public keys of administrator accounts. Setting this option makes only on private chains which require\n# administrator accounts for regulatory reasons.\nadministrators = []\n# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable\n# entity in lazy manner.\n# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade;\n# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage\n# will be written underneath Key::Hash.\n# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account\n# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated\n# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten\n# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top\n# level records\n# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade\n# the flag cannot be disabled in a future protocol upgrade.\nenable_addressable_entity = false\n# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount.\nbaseline_motes_amount = 2_500_000_000\n# Flag on whether ambiguous entity versions returns an execution error.\ntrap_on_ambiguous_entity_version = false\n# Controls how rewards are handled by the network\n# purse uref-b06a1ab0cfb52b5d4f9a08b68a5dbe78e999de0b0484c03e64f5c03897cf637b-007 belongs to \n# account 018afa98ca4be12d613617f7339a2d576950a2f9a92102ca4d6508ee31b54d2c02 (faucet account for testnet)\nrewards_handling = { type = 'sustain', ratio = [2,8], purse_address = \"uref-b06a1ab0cfb52b5d4f9a08b68a5dbe78e999de0b0484c03e64f5c03897cf637b-007\" }\n\n\n[highway]\n# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length.\nmaximum_round_length = '66 seconds'\n\n[transactions]\n# The duration after the transaction timestamp that it can be included in a block.\nmax_ttl = '2 hours'\n# The maximum number of approvals permitted in a single block.\nblock_max_approval_count = 2600\n# Maximum block size in bytes including transactions contained by the block.  0 means unlimited.\nmax_block_size = 2_621_400\n# The upper limit of total gas of all transactions in a block.\nblock_gas_limit = 812_500_000_000\n# The minimum amount in motes for a valid native transfer.\nnative_transfer_minimum_motes = 2_500_000_000\n# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file.\nmax_timestamp_leeway = '5 seconds'\n\n# Configuration of the transaction runtime.\n[transactions.enabled_runtime]\nvm_casper_v1 = true\nvm_casper_v2 = false\n\n[transactions.v1]\n# The configuration settings for the lanes of transactions including both native and Wasm based interactions.\n# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1\n# respectively\n# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction\n# within a given lane.\n# The maximum length in bytes of runtime args per V1 transaction.\n# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels)\n# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and\n# the lane must be present and defined.\n# Different casper networks may not impose such a restriction.\n# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane\n# [2] -> Max args length size in bytes for a given transaction in a certain lane\n# [3] -> Transaction gas limit for a given transaction in a certain lane\n# [4] -> The maximum number of transactions the lane can contain\nnative_mint_lane = [0, 2048, 1024, 100_000_000, 325]\nnative_auction_lane = [1, 3096, 2048, 2_500_000_000, 325]\ninstall_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1]\nwasm_lanes = [\n    [3, 750_000, 2048, 1_000_000_000_000, 1],\n    [4, 131_072, 1024, 100_000_000_000, 2],\n    [5, 65_536, 512, 5_000_000_000, 40]\n]\n\n[transactions.deploy]\n# The maximum number of Motes allowed to be spent during payment.  0 means unlimited.\nmax_payment_cost = '0'\n# The limit of length of serialized payment code arguments.\npayment_args_max_length = 1024\n# The limit of length of serialized session code arguments.\nsession_args_max_length = 1024\n\n[wasm.v1]\n# Amount of free memory (in 64kB pages) each contract can use for stack.\nmax_memory = 64\n# Max stack height (native WebAssembly stack limiter).\nmax_stack_height = 500\n\n[storage_costs]\n# Gas charged per byte stored in the global state.\ngas_per_byte = 1_117_587\n\n# For each opcode cost below there exists a static cost and a dynamic cost.\n# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks.\n[wasm.v1.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v1.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v1.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs\n[wasm.v1.host_function_costs]\nadd = { cost = 5_800, arguments = [0, 0, 0, 0] }\nadd_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] }\nadd_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] }\nadd_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nadd_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] }\nblake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] }\ncall_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] }\ncall_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\ncreate_contract_package_at_hash = { cost = 200, arguments = [0, 0] }\ncreate_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ncreate_purse = { cost = 2_500_000_000, arguments = [0, 0] }\ndisable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nget_balance = { cost = 3_000_000, arguments = [0, 0, 0] }\nget_blocktime = { cost = 330, arguments = [0] }\nget_caller = { cost = 380, arguments = [0] }\nget_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] }\nget_main_purse = { cost = 1_300, arguments = [0] }\nget_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] }\nget_named_arg_size = { cost = 200, arguments = [0, 0, 0] }\nget_phase = { cost = 710, arguments = [0] }\nget_system_contract = { cost = 1_100, arguments = [0, 0, 0] }\nhas_key = { cost = 1_500, arguments = [0, 840] }\nis_valid_uref = { cost = 760, arguments = [0, 0] }\nload_named_keys = { cost = 42_000, arguments = [0, 0] }\nnew_uref = { cost = 17_000, arguments = [0, 0, 590] }\nrandom_bytes = { cost = 200, arguments = [0, 0] }\nprint = { cost = 20_000, arguments = [0, 4_600] }\nprovision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] }\nput_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] }\nread_host_buffer = { cost = 3_500, arguments = [0, 310, 0] }\nread_value = { cost = 60_000, arguments = [0, 120_000, 0] }\ndictionary_get = { cost = 5_500, arguments = [0, 590, 0] }\nremove_associated_key = { cost = 4_200, arguments = [0, 0] }\nremove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] }\nremove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] }\nremove_key = { cost = 61_000, arguments = [0, 3_200] }\nret = { cost = 23_000, arguments = [0, 420_000] }\nrevert = { cost = 500, arguments = [0] }\nset_action_threshold = { cost = 74_000, arguments = [0, 0] }\ntransfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] }\nupdate_associated_key = { cost = 4_200, arguments = [0, 0, 0] }\nwrite = { cost = 14_000, arguments = [0, 0, 0, 980] }\ndictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] }\nenable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] }\nmanage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] }\nemit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] }\ngeneric_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] }\ncost_increase_per_message = 50\nget_block_info = { cost = 330, arguments = [0, 0] }\nrecover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\nverify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] }\ncall_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] }\n\n[wasm.v2]\n# Amount of free memory each contract can use for stack.\nmax_memory = 64\n\n[wasm.v2.opcode_costs]\n# Bit operations multiplier.\nbit = 105\n# Arithmetic add operations multiplier.\nadd = 105\n# Mul operations multiplier.\nmul = 105\n# Div operations multiplier.\ndiv = 105\n# Memory load operation multiplier.\nload = 105\n# Memory store operation multiplier.\nstore = 105\n# Const store operation multiplier.\nconst = 105\n# Local operations multiplier.\nlocal = 105\n# Global operations multiplier.\nglobal = 105\n# Integer operations multiplier.\ninteger_comparison = 105\n# Conversion operations multiplier.\nconversion = 105\n# Unreachable operation multiplier.\nunreachable = 105\n# Nop operation multiplier.\nnop = 105\n# Get current memory operation multiplier.\ncurrent_memory = 105\n# Grow memory cost, per page (64kb).\ngrow_memory = 900\n# Sign extension operations cost\nsign = 105\n\n# Control flow operations multiplier.\n[wasm.v2.opcode_costs.control_flow]\nblock = 255\nloop = 255\nif = 105\nelse = 105\nend = 105\nbr = 1665\nbr_if = 510\nreturn = 105\nselect = 105\ncall = 225\ncall_indirect = 270\ndrop = 105\n\n[wasm.v2.opcode_costs.control_flow.br_table]\n# Fixed cost per `br_table` opcode\ncost = 150\n# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier`\nsize_multiplier = 100\n\n[wasm.v2.host_function_costs]\nread = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\nwrite = { cost = 0, arguments = [0, 0, 0, 0, 0] }\nremove = { cost = 0, arguments = [0, 0, 0] }\ncopy_input = { cost = 0, arguments = [0, 0] }\nret = { cost = 0, arguments = [0, 0] }\ncreate = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }\ntransfer = { cost = 0, arguments = [0, 0, 0] }\nenv_balance = { cost = 0, arguments = [0, 0, 0, 0] }\nupgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] }\ncall = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] }\nprint = { cost = 0, arguments = [0, 0] }\nemit = { cost = 0, arguments = [0, 0, 0, 0] }\nenv_info = { cost = 0, arguments = [0, 0] }\n\n[wasm.messages_limits]\nmax_topic_name_size = 256\nmax_topics_per_contract = 128\nmax_message_size = 1_024\n\n[system_costs]\n# Penalty charge for calling invalid entry point in a system contract.\nno_such_entrypoint = 2_500_000_000\n\n[system_costs.auction_costs]\nget_era_validators = 2_500_000_000\nread_seigniorage_recipients = 5_000_000_000\nadd_bid = 2_500_000_000\nwithdraw_bid = 2_500_000_000\ndelegate = 2_500_000_000\nundelegate = 2_500_000_000\nrun_auction = 2_500_000_000\nslash = 2_500_000_000\ndistribute = 2_500_000_000\nwithdraw_delegator_reward = 5_000_000_000\nwithdraw_validator_reward = 5_000_000_000\nread_era_id = 2_500_000_000\nactivate_bid = 2_500_000_000\nredelegate = 2_500_000_000\nchange_bid_public_key = 5_000_000_000\nadd_reservations = 2_500_000_000\ncancel_reservations = 2_500_000_000\n\n[system_costs.mint_costs]\nmint = 2_500_000_000\nreduce_total_supply = 2_500_000_000\ncreate = 2_500_000_000\nbalance = 100_000_000\nburn = 100_000_000\ntransfer = 100_000_000\nread_base_round_reward = 2_500_000_000\nmint_into_existing_purse = 2_500_000_000\n\n[system_costs.handle_payment_costs]\nget_payment_purse = 10_000\nset_refund_purse = 10_000\nget_refund_purse = 10_000\nfinalize_payment = 2_500_000_000\n\n[system_costs.standard_payment_costs]\npay = 10_000\n\n[vacancy]\n# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network.\n#\n# The network starts with a current_gas_price of min_gas_price.\n#\n# Each block has multiple limits (bytes, transactions, transfers, gas, etc.)\n# The utilization for a block is determined by the highest percentage utilization of each these limits.\n#\n# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here)\n#     19 transactons -> 19/20 or 95%\n#     600 transfers -> 600/650 or 92.3%\n#     resulting block utilization is 95\n#\n# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is\n# adjusted with the following:\n#\n# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price.\n# If utilization falls between the thresholds, current_gas_price is not changed.\n# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price.\n#\n# The cost charged for the transaction is simply the gas_used * current_gas_price.\nupper_threshold = 90\nlower_threshold = 50\nmax_gas_price = 1\nmin_gas_price = 1\n"
  },
  {
    "path": "resources/testnet/config-example.toml",
    "content": "# ================================\n# Configuration options for a node\n# ================================\n[node]\n\n# If set, use this hash as a trust anchor when joining an existing network.\n#trusted_hash = 'HEX-FORMATTED BLOCK HASH'\n\n# Historical sync behavior for this node. Options are:\n#  'ttl'      (node will attempt to acquire all block data to comply with time to live enforcement)\n#  'genesis'  (node will attempt to acquire all block data back to genesis)\n#  'nosync'   (node will only acquire blocks moving forward)\n#  'isolated' (node will initialize without peers and will not accept peers)\n#  'completeblock' (node will acquire complete block and shutdown)\n# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`.\n#       it is recommended for dedicated validator nodes to be in ttl mode to increase\n#       their ability to maintain maximal uptime...if a long-running genesis validator\n#       goes offline and comes back up while in genesis mode, it must backfill\n#       any gaps in its block awareness before resuming validation.\n#\n#       it is recommended for reporting non-validator nodes to be in genesis mode to\n#       enable support for queries at any block height.\n#\n#       it is recommended for non-validator working nodes (for dapp support, etc) to run in\n#       ttl or nosync mode (depending upon their specific data requirements).\n#\n#       thus for instance a node backing a block explorer would prefer genesis mode,\n#       while a node backing a dapp interested in very recent activity would prefer to run in nosync mode,\n#       and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode.\n# note: as time goes on, the time to sync back to genesis takes progressively longer.\n# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting\n#       (it is currently ~2 hours by default on production and production-like networks but subject to change).\n# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating\n#        in consensus / switching to validate mode. it is primarily for lightweight nodes that are\n#        only interested in recent activity.\n# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to\n#       binary port, rest server, event server, and diagnostic port connections.\nsync_handling = 'ttl'\n\n# Idle time after which the syncing process is considered stalled.\nidle_tolerance = '20 minutes'\n\n# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times.\nmax_attempts = 3\n\n# Default delay for the control events that have no dedicated delay requirements.\ncontrol_logic_default_delay = '1 second'\n\n# Flag which forces the node to resync all the blocks.\nforce_resync = false\n\n# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all\n# conditions are satisfied.\nshutdown_for_upgrade_timeout = '2 minutes'\n\n# Maximum time a node will wait for an upgrade to commit.\nupgrade_timeout = '30 seconds'\n\n# The node detects when it should do a controlled shutdown when it is in a detectably bad state\n# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be\n# allowed to shutdown, and if restarted that node will generally recover gracefully and resume\n# normal operation. However, actively validating nodes have subjective state in memory that is\n# lost on shutdown / restart and must be reacquired from other validating nodes on restart.\n# If all validating nodes shutdown in the middle of an era, social consensus is required to restart\n# the network. As a mitigation for that, the following config can be set to true on some validator\n# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled\n# shutdown events and stay up. This allows them to act as sentinels for the consensus data for\n# other restarting nodes. This config is inert on non-validating nodes.\nprevent_validator_shutdown = false\n\n# =================================\n# Configuration options for logging\n# =================================\n[logging]\n\n# Output format.  Possible values are 'text' or 'json'.\nformat = 'json'\n\n# Colored output.  Has no effect if format = 'json'.\ncolor = false\n\n# Abbreviate module names in text output.  Has no effect if format = 'json'.\nabbreviate_modules = false\n\n\n# ===================================\n# Configuration options for consensus\n# ===================================\n[consensus]\n\n# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign\n# consensus messages.\nsecret_key_path = '/etc/casper/validator_keys/secret_key.pem'\n\n# The maximum number of blocks by which execution is allowed to lag behind finalization.\n# If it is more than that, consensus will pause, and resume once the executor has caught up.\nmax_execution_delay = 6\n\n\n# =======================================\n# Configuration options for Zug consensus\n# =======================================\n[consensus.zug]\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nsync_state_interval = '1 second'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of\n# echo messages, before they vote to make the round skippable and move on to the next proposer.\nproposal_timeout = '5 seconds'\n\n# The additional proposal delay that is still considered fast enough, in percent. This should\n# take into account variables like empty vs. full blocks, network traffic etc.\n# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one\n# while idle this should be at least 50, meaning that the timeout is 50% longer than\n# necessary for a quorum of recent proposals, approximately.\nproposal_grace_period = 200\n\n# The average number of rounds after which the proposal timeout adapts by a factor of 2.\n# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve.\nproposal_timeout_inertia = 10\n\n# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp\n# lies in the future by more than that are rejected.\nclock_tolerance = '1 second'\n\n\n# ===========================================\n# Configuration options for Highway consensus\n# ===========================================\n[consensus.highway]\n\n# The duration for which incoming vertices with missing dependencies should be kept in a queue.\npending_vertex_timeout = '30 minutes'\n\n# Request the latest protocol state from a random peer periodically, with this interval.\n# '0 seconds' means it is disabled and we never request the protocol state from a peer.\nrequest_state_interval = '20 seconds'\n\n# Log inactive or faulty validators periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_participation_interval = '1 minute'\n\n# Log the synchronizer state periodically, with this interval.\n# '0 seconds' means it is disabled and we never print the log message.\nlog_synchronizer_interval = '5 seconds'\n\n# Log the size of every incoming and outgoing serialized unit.\nlog_unit_sizes = false\n\n# The maximum number of peers we request the same vertex from in parallel.\nmax_requests_for_vertex = 5\n\n# The maximum number of dependencies we request per validator in a batch.\n# Limits requests per validator in panorama - in order to get a total number of\n# requests, multiply by # of validators.\nmax_request_batch_size = 20\n\n[consensus.highway.round_success_meter]\n# The number of most recent rounds we will be keeping track of.\nnum_rounds_to_consider = 40\n\n# The number of successful rounds that triggers us to slow down: With this many or fewer\n# successes per `num_rounds_to_consider`, we increase our round length.\nnum_rounds_slowdown = 10\n\n# The number of successful rounds that triggers us to speed up: With this many or more successes\n# per `num_rounds_to_consider`, we decrease our round length.\nnum_rounds_speedup = 32\n\n# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if\n# we have few enough failures.\nacceleration_parameter = 40\n\n# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which\n# we will use for looking for a summit in order to determine a proposal's finality.\n# The required quorum in a summit we will look for to check if a round was successful is\n# determined by this FTT.\nacceleration_ftt = [1, 100]\n\n\n# ====================================\n# Configuration options for networking\n# ====================================\n[network]\n\n# The public address of the node.\n#\n# It must be publicly available in order to allow peers to connect to this node.\n# If the port is set to 0, the actual bound port will be substituted.\npublic_address = '<IP ADDRESS>:0'\n\n# Address to bind to for listening.\n# If port is set to 0, a random port will be used.\nbind_address = '0.0.0.0:35000'\n\n# Addresses to connect to in order to join the network.\n#\n# If not set, this node will not be able to attempt to connect to the network.  Instead it will\n# depend upon peers connecting to it.  This is normally only useful for the first node of the\n# network.\n#\n# Multiple addresses can be given and the node will attempt to connect to each, requiring at least\n# one connection.\nknown_addresses = ['135.148.34.2:35000','135.148.34.29:35000','188.165.231.104:35000','49.12.126.24:35000']\n\n# Minimum number of fully-connected peers to consider network component initialized.\nmin_peers_for_initialization = 3\n\n# The interval between each fresh round of gossiping the node's public address.\ngossip_interval = '120 seconds'\n\n# Initial delay for starting address gossipping after the network starts. This should be slightly\n# more than the expected time required for initial connections to complete.\ninitial_gossip_delay = '5 seconds'\n\n# How long a connection is allowed to be stuck as pending before it is abandoned.\nmax_addr_pending_time = '1 minute'\n\n# Maximum time allowed for a connection handshake between two nodes to be completed. Connections\n# exceeding this threshold are considered unlikely to be healthy or even malicious and thus\n# terminated.\nhandshake_timeout = '20 seconds'\n\n# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional\n# connections will be rejected. A value of `0` means unlimited.\nmax_incoming_peer_connections = 3\n\n# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers.\n# A value of `0` means unlimited.\nmax_outgoing_byte_rate_non_validators = 6553600\n\n# The maximum allowed total impact of requests from non-validating peers per second answered.\n# A value of `0` means unlimited.\nmax_incoming_message_rate_non_validators = 3000\n\n# Maximum number of requests for data from a single peer that are allowed be buffered. A value of\n# `0` means unlimited.\nmax_in_flight_demands = 50\n\n# Version threshold to enable tarpit for.\n#\n# When set to a version (the value may be `null` to disable the feature), any peer that reports a\n# protocol version equal or below the threshold will be rejected only after holding open the\n# connection for a specific (`tarpit_duration`) amount of time.\n#\n# This option makes most sense to enable on known nodes with addresses where legacy nodes that are\n# still in operation are connecting to, as these older versions will only attempt to reconnect to\n# other nodes once they have exhausted their set of known nodes.\ntarpit_version_threshold = '1.2.1'\n\n# How long to hold connections to trapped legacy nodes.\ntarpit_duration = '10 minutes'\n\n# The probability [0.0, 1.0] of this node trapping a legacy node.\n#\n# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a\n# single known node to hold open a connection to prevent the node from reconnecting. This should be\n# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of\n# legacy nodes running this software.\ntarpit_chance = 0.2\n\n# Minimum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_min_duration = '2 minutes'\n\n# Maximum time a peer is kept on block list before being redeemed. The actual\n# timeout duration is calculated by selecting a random value between\n# <blocklist_retain_min_duration, blocklist_retain_max_duration>.\nblocklist_retain_max_duration = '10 minutes'\n\n# Identity of a node\n#\n# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate.\n# This option makes sense for some private chains where for security reasons joining new nodes is restricted.\n# [network.identity]\n# tls_certificate = \"node_cert.pem\"\n# secret_key = \"node.pem\"\n# ca_certificate = \"ca_cert.pem\"\n\n# Weights for impact estimation of incoming messages, used in combination with\n# `max_incoming_message_rate_non_validators`.\n#\n# Any weight set to 0 means that the category of traffic is exempt from throttling.\n[network.estimator_weights]\nconsensus = 0\nblock_gossip = 1\ntransaction_gossip = 0\nfinality_signature_gossip = 1\naddress_gossip = 0\nfinality_signature_broadcasts = 0\ntransaction_requests = 1\ntransaction_responses = 0\nlegacy_deploy_requests = 1\nlegacy_deploy_responses = 0\nblock_requests = 1\nblock_responses = 0\nblock_header_requests = 1\nblock_header_responses = 0\ntrie_requests = 1\ntrie_responses = 0\nfinality_signature_requests = 1\nfinality_signature_responses = 0\nsync_leap_requests = 1\nsync_leap_responses = 0\napprovals_hashes_requests = 1\napprovals_hashes_responses = 0\nexecution_results_requests = 1\nexecution_results_responses = 0\n\n# ==================================================\n# Configuration options for the BinaryPort server\n# ==================================================\n[binary_port_server]\n\n# Flag which enables the BinaryPort server.\nenable_server = true\n\n# Listening address for BinaryPort server.\naddress = '0.0.0.0:7779'\n\n# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_all_values = true\n\n# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node.\nallow_request_get_trie = false\n\n# Flag that enables the `TrySpeculativeExec` request. Disabled by default.\nallow_request_speculative_exec = false\n\n# Maximum size of a message in bytes.\nmax_message_size_bytes = 134_217_728\n\n# Maximum number of connections to the server.\nmax_connections = 5\n\n# The global max rate of requests (per second) before they are limited.\n# The implementation uses a sliding window algorithm.\nqps_limit = 110\n\n# Initial time given to a connection before it expires\ninitial_connection_lifetime = '10 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n# [`Command::Get(GetRequest::Record)`] is sent to the node\nget_record_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Information)`] is sent to the node\nget_information_request_termination_delay = '5 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::State)`] is sent to the node\nget_state_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::Get(GetRequest::Trie)`] is sent to the node\nget_trie_request_termination_delay = '0 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TryAcceptTransaction`] is sent to the node\naccept_transaction_request_termination_delay = '24 seconds'\n\n#The amount of time which is given to a connection to extend it's lifetime when a valid\n#[`Command::TrySpeculativeExec`] is sent to the node\nspeculative_exec_request_termination_delay = '0 seconds'\n\n\n# ==============================================\n# Configuration options for the REST HTTP server\n# ==============================================\n[rest_server]\n\n# Flag which enables the REST HTTP server.\nenable_server = true\n\n# Listening address for REST HTTP server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the REST HTTP server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:8888'\n\n# The global max rate of requests (per second) before they are limited.\n# Request will be delayed to the next 1 second bucket once limited.\nqps_limit = 100\n\n# Specifies which origin will be reported as allowed by REST server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n\n# ==========================================================\n# Configuration options for the SSE HTTP event stream server\n# ==========================================================\n[event_stream_server]\n\n# Flag which enables the SSE HTTP event stream server.\nenable_server = true\n\n# Listening address for SSE HTTP event stream server.  If the port is set to 0, a random port will be used.\n#\n# If the specified port cannot be bound to, a random port will be tried instead.  If binding fails,\n# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected.\n#\n# The actual bound address will be reported via a log line if logging is enabled.\naddress = '0.0.0.0:9999'\n\n# The number of event stream events to buffer.\nevent_stream_buffer_length = 5000\n\n# The maximum number of subscribers across all event streams the server will permit at any one time.\nmax_concurrent_subscribers = 100\n\n# Specifies which origin will be reported as allowed by event stream server.\n#\n# If left empty, CORS will be disabled.\n# If set to '*', any origin is allowed.\n# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin).\ncors_origin = ''\n\n# ===============================================\n# Configuration options for the storage component\n# ===============================================\n[storage]\n\n# Path (absolute, or relative to this config.toml) to the folder where any files created\n# or read by the storage component will exist. A subfolder named with the network name will be\n# automatically created and used for the storage component files.\n#\n# If the folder doesn't exist, it and any required parents will be created.\n#\n# If unset, the path must be supplied as an argument via the CLI.\npath = '/var/lib/casper/casper-node'\n\n# Maximum size of the database to use for the block store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 483_183_820_800 == 450 GiB.\nmax_block_store_size = 483_183_820_800\n\n# Maximum size of the database to use for the deploy store.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the deploy metadata.\n#\n# The size should be a multiple of the OS page size.\n#\n# 322_122_547_200 == 300 GiB.\nmax_deploy_metadata_store_size = 322_122_547_200\n\n# Maximum size of the database to use for the state snapshots.\n#\n# The size should be a multiple of the OS page size.\n#\n# 10_737_418_240 == 10 GiB.\nmax_state_store_size = 10_737_418_240\n\n# Memory deduplication.\n#\n# If enabled, nodes will attempt to share loaded objects if possible.\nenable_mem_deduplication = true\n\n# Memory duplication garbage collection.\n#\n# Sets the frequency how often the memory pool cache is swept for free references.\n# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept.\nmem_pool_prune_interval = 4096\n\n\n# ===================================\n# Configuration options for gossiping\n# ===================================\n[gossip]\n\n# Target number of peers to infect with a given piece of data.\ninfection_target = 3\n\n# The saturation limit as a percentage, with a maximum value of 99.  Used as a termination\n# condition.\n#\n# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't\n# manage to newly infect 3 peers.  We will stop gossiping once we know of more than 15 holders\n# excluding us since 80% saturation would imply 3 new infections in 15 peers.\nsaturation_limit_percent = 80\n\n# The maximum duration for which to keep finished entries.\n#\n# The longer they are retained, the lower the likelihood of re-gossiping a piece of data.  However,\n# the longer they are retained, the larger the list of finished entries can grow.\nfinished_entry_duration = '1 minute'\n\n# The timeout duration for a single gossip request, i.e. for a single gossip message\n# sent from this node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\ngossip_request_timeout = '30 seconds'\n\n# The timeout duration for retrieving the remaining part(s) of newly-discovered data\n# from a peer which gossiped information about that data to this node.\nget_remainder_timeout = '5 seconds'\n\n# The timeout duration for a newly-received, gossiped item to be validated and stored by another\n# component before the gossiper abandons waiting to gossip the item onwards.\nvalidate_and_store_timeout = '1 minute'\n\n\n# ===============================================\n# Configuration options for the block accumulator\n# ===============================================\n[block_accumulator]\n\n# Block height difference threshold for starting to execute the blocks.\nattempt_execution_threshold = 6\n\n# Accepted time interval for inactivity in block accumulator.\ndead_air_interval = '3 minutes'\n\n# Time after which the block acceptors are considered old and can be purged.\npurge_interval = '1 minute'\n\n\n# ================================================\n# Configuration options for the block synchronizer\n# ================================================\n[block_synchronizer]\n\n# Maximum number of fetch-trie tasks to run in parallel during block synchronization.\nmax_parallel_trie_fetches = 5000\n\n# Time interval for the node to ask for refreshed peers.\npeer_refresh_interval = '90 seconds'\n\n# Time interval for the node to check what the block synchronizer needs to acquire next.\nneed_next_interval = '1 second'\n\n# Time interval for recurring disconnection of dishonest peers.\ndisconnect_dishonest_peers_interval = '10 seconds'\n\n# Time interval for resetting the latch in block builders.\nlatch_reset_interval = '5 seconds'\n\n\n# =============================================\n# Configuration options for the block validator\n# =============================================\n[block_validator]\n\n# Maximum number of completed entries to retain.\n#\n# A higher value can avoid creating needless validation work on an already-validated proposed\n# block, but comes at the cost of increased memory consumption.\nmax_completed_entries = 6\n\n\n# ==================================\n# Configuration options for fetchers\n# ==================================\n[fetcher]\n\n# The timeout duration for a single fetcher request, i.e. for a single fetcher message\n# sent from this node to another node, it will be considered timed out if the expected response from that peer is\n# not received within this specified duration.\nget_from_peer_timeout = '10 seconds'\n\n\n# ========================================================\n# Configuration options for the contract runtime component\n# ========================================================\n[contract_runtime]\n\n# Optional maximum size of the database to use for the global state store.\n#\n# If unset, defaults to 805,306,368,000 == 750 GiB.\n#\n# The size should be a multiple of the OS page size.\nmax_global_state_size = 2_089_072_132_096\n\n# Optional depth limit to use for global state queries.\n#\n# If unset, defaults to 5.\n#max_query_depth = 5\n\n# Enable manual synchronizing to disk.\n#\n# If unset, defaults to true.\n#enable_manual_sync = true\n\n\n# ==================================================\n# Configuration options for the transaction acceptor\n# ==================================================\n[transaction_acceptor]\n\n# The leeway allowed when considering whether a transaction is future-dated or not.\n#\n# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the\n# future are still acceptable.\n#\n# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting\n# `transaction.max_timestamp_leeway`.\ntimestamp_leeway = '2 seconds'\n\n\n# ===========================================\n# Configuration options for the transaction buffer\n# ===========================================\n[transaction_buffer]\n\n# The interval of checking for expired transactions.\nexpiry_check_interval = '1 minute'\n\n\n# ==============================================\n# Configuration options for the diagnostics port\n# ==============================================\n[diagnostics_port]\n\n# If set, the diagnostics port will be available on a UNIX socket.\nenabled = false\n\n# Filename for the UNIX domain socket the diagnostics port listens on.\nsocket_path = \"debug.socket\"\n\n# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the\n# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`,\n# which allows for group access as well.\nsocket_umask = 0o077\n\n\n# =============================================\n# Configuration options for the upgrade watcher\n# =============================================\n[upgrade_watcher]\n\n# How often to scan file system for available upgrades.\nupgrade_check_interval = '30 seconds'\n"
  },
  {
    "path": "resources/testnet/global_state.toml",
    "content": "# The below entry will write the value into an account purse balance.\n#\n# Key f574ebcb676fb9fdee9f24b1d18a298616b08e64705aac773c7f8bf3bd213eb0 is the main purse\n# for account 016f1236ff250d958b7f7ded4a942bd79e201120949b283f7019da970299a34dce\n#\n# Value AAkAAAAIAJIsyddVti4I is base64 encoding of 3332312435249620480 motes in U512 data type.\n# This represents 33% of the total_supply at block 7163088 (10097916461362486077)\n# plus 3 CSPR (3000000000 motes) balance of this account at upgrade.\n[[entries]]\nkey = \"balance-f574ebcb676fb9fdee9f24b1d18a298616b08e64705aac773c7f8bf3bd213eb0\"\nvalue = \"AAkAAAAIAKarJ4HAPi4I\"\n"
  },
  {
    "path": "rust-toolchain.toml",
    "content": "[toolchain]\nchannel = \"1.85.1\"\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "wrap_comments = true\ncomment_width = 100\nimports_granularity = \"Crate\"\nedition = \"2021\"\n"
  },
  {
    "path": "smart_contracts/contract/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.  The format is based on [Keep a Changelog].\n\n[comment]: <> (Added:      new features)\n[comment]: <> (Changed:    changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed:    now removed features)\n[comment]: <> (Fixed:      any bug fixes)\n[comment]: <> (Security:   in case of vulnerabilities)\n\n\n\n## 4.0.0\n\n### Added\n* Add `storage::enable_contract_version` for enabling a specific version of a contract.\n\n\n\n## 3.0.0\n\n### Added\n* Support fetching the calling account's authorization keys via the new function `runtime::list_authorization_keys` which calls the new `ext_ffi::casper_load_authorization_keys`.\n* Support providing 32 random bytes via the new function `runtime::random_bytes` which calls the new `ext_ffi::casper_random_bytes`.\n* Add `storage::read_from_key` for reading a value under a given `Key`.\n* Add `storage::dictionary_read` for reading a value from a dictionary under a given `Key`, calling the new `ext_ffi::casper_dictionary_read`.\n* Add `storage::named_dictionary_put` for writing a named value to a named dictionary.\n* Add `storage::named_dictionary_get` for reading a named value from a named dictionary.\n\n### Changed\n* Update pinned version of Rust to `nightly-2022-08-03`.\n* Increase `DICTIONARY_ITEM_KEY_MAX_LENGTH` to 128.\n\n\n\n## 2.0.0\n\n### Changed\n* Update `casper-types` to v2.0.0 due to additional `Key` variant, requiring a major version bump here.\n\n\n\n## 1.4.4\n\n### Changed\n* Minor refactor of `system::create_purse()`.\n\n\n\n## [1.4.0] - 2021-10-04\n\n### Added\n* Add `no-std-helpers` feature, enabled by default, which provides no-std panic/oom handlers and a global allocator as a convenience.\n* Add new APIs for transferring tokens to the main purse associated with a public key: `transfer_to_public_key` and `transfer_from_purse_to_public_key`.\n\n### Deprecated\n* Feature `std` is deprecated as it is now a no-op, since there is no benefit to linking the std lib via this crate.\n\n\n\n## [1.3.0] - 2021-07-19\n\n### Changed\n* Update pinned version of Rust to `nightly-2021-06-17`.\n\n\n\n## [1.2.0] - 2021-05-28\n\n### Changed\n* Change to Apache 2.0 license.\n\n\n\n## [1.1.1] - 2021-04-19\n\nNo changes.\n\n\n\n## [1.1.0] - 2021-04-13 [YANKED]\n\nNo changes.\n\n\n\n## [1.0.1] - 2021-04-08\n\nNo changes.\n\n\n\n## [1.0.0] - 2021-03-30\n\n### Added\n* Initial release of smart contract API compatible with Casper mainnet.\n\n\n\n[Keep a Changelog]: https://keepachangelog.com/en/1.0.0\n[unreleased]: https://github.com/casper-network/casper-node/compare/v1.4.0...dev\n[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0\n[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0\n[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0\n[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1\n[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0\n"
  },
  {
    "path": "smart_contracts/contract/Cargo.toml",
    "content": "[package]\nname = \"casper-contract\"\nversion = \"5.1.1\" # when updating, also update 'html_root_url' in lib.rs\nauthors = [\"Michał Papierski <michal@casper.network>\", \"Mateusz Górski <gorski.mateusz@protonmail.ch>\"]\nedition = \"2021\"\ndescription = \"A library for developing Casper network smart contracts.\"\nreadme = \"README.md\"\ndocumentation = \"https://docs.rs/casper-contract\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/master/smart_contracts/contract\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\ncasper-types = { version = \"7.0.0\", path = \"../../types\" }\nhex_fmt = \"0.3.0\"\nversion-sync = { version = \"0.9\", optional = true }\nwee_alloc = { version = \"0.4.5\", optional = true }\n\n[features]\ndefault = [\"no-std-helpers\"]\nno-std-helpers = [\"wee_alloc\"]\ntest-support = []\n# DEPRECATED - enabling `std` has no effect.\nstd = []\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "smart_contracts/contract/README.md",
    "content": "# `casper-contract`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-contract)](https://crates.io/crates/casper-contract)\n[![Documentation](https://docs.rs/casper-contract/badge.svg)](https://docs.rs/casper-contract)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nA library for developing Casper network smart contracts.\n\n## no_std\n\nThe crate is `no_std`, but uses the `core` and `alloc` crates.  It is recommended to build Wasm smart contracts in a\n`no_std` environment as this generally yields smaller, and hence cheaper, binaries.\n\n## Compile-time features\n\n### `no-std-helpers`\n\nEnabled by default.\n\nGiven that the library is intended to be consumed by smart-contract binaries, and that in a `no_std` environment these\nwill all require to provide an [alloc error handler](https://github.com/rust-lang/rust/issues/51540) and an\n[eh_personality](https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#more-about-the-language-items),\nthen this crate provides these when `no-std-helpers` is enabled.  This unfortunately requires the use of nightly Rust.\n\nFor further convenience, enabling this feature also provides a global allocator suitable for use in a `no_std`\nenvironment.\n\nIf you wish to use a different global allocator, or provide different panic/out-of-memory handlers, then add the\nfollowing to your Cargo.toml:\n\n```toml\ncasper-contract = { version = \"1\", default-features = false }\n```\n\n### `test-support`\n\nDisabled by default.\n\nTo help support smart contract debugging, enabling the `test-support` feature makes the function\n`contract_api::runtime::print(text: &str)` available.  If the contract is being tested offchain using the\n`casper-engine-test-support` crate, then the contract can output text to the console for debugging.\n\n```toml\ncasper-contract = { version = \"1\", features = [\"test-support\"] }\n```\n\n## License\n\nLicensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE).\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/account.rs",
    "content": "//! Functions for managing accounts.\n\nuse alloc::vec::Vec;\nuse core::convert::TryFrom;\n\nuse casper_types::{\n    account::{\n        AccountHash, ActionType, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure,\n        UpdateKeyFailure, Weight,\n    },\n    bytesrepr, URef, UREF_SERIALIZED_LENGTH,\n};\n\nuse super::to_ptr;\nuse crate::{contract_api, ext_ffi, unwrap_or_revert::UnwrapOrRevert};\n\n/// Retrieves the ID of the account's main purse.\npub fn get_main_purse() -> URef {\n    let dest_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH);\n    let bytes = unsafe {\n        ext_ffi::casper_get_main_purse(dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            UREF_SERIALIZED_LENGTH,\n            UREF_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Sets the given [`ActionType`]'s threshold to the provided value.\npub fn set_action_threshold(\n    action_type: ActionType,\n    threshold: Weight,\n) -> Result<(), SetThresholdFailure> {\n    let action_type = action_type as u32;\n    let threshold = threshold.value().into();\n    let result = unsafe { ext_ffi::casper_set_action_threshold(action_type, threshold) };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(SetThresholdFailure::try_from(result).unwrap_or_revert())\n    }\n}\n\n/// Adds the given [`AccountHash`] with associated [`Weight`] to the account's associated keys.\npub fn add_associated_key(account_hash: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> {\n    let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash);\n    // Cast of u8 (weight) into i32 is assumed to be always safe\n    let result = unsafe {\n        ext_ffi::casper_add_associated_key(\n            account_hash_ptr,\n            account_hash_size,\n            weight.value().into(),\n        )\n    };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(AddKeyFailure::try_from(result).unwrap_or_revert())\n    }\n}\n\n/// Removes the given [`AccountHash`] from the account's associated keys.\npub fn remove_associated_key(account_hash: AccountHash) -> Result<(), RemoveKeyFailure> {\n    let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash);\n    let result =\n        unsafe { ext_ffi::casper_remove_associated_key(account_hash_ptr, account_hash_size) };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(RemoveKeyFailure::try_from(result).unwrap_or_revert())\n    }\n}\n\n/// Updates the [`Weight`] of the given [`AccountHash`] in the account's associated keys.\npub fn update_associated_key(\n    account_hash: AccountHash,\n    weight: Weight,\n) -> Result<(), UpdateKeyFailure> {\n    let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash);\n    // Cast of u8 (weight) into i32 is assumed to be always safe\n    let result = unsafe {\n        ext_ffi::casper_update_associated_key(\n            account_hash_ptr,\n            account_hash_size,\n            weight.value().into(),\n        )\n    };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(UpdateKeyFailure::try_from(result).unwrap_or_revert())\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/cryptography.rs",
    "content": "//! Functions with cryptographic utils.\n\nuse casper_types::{\n    api_error,\n    bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    ApiError, HashAlgorithm, PublicKey, Signature, BLAKE2B_DIGEST_LENGTH,\n};\n\nuse crate::{ext_ffi, unwrap_or_revert::UnwrapOrRevert};\n\n/// Computes digest hash, using provided algorithm type.\npub fn generic_hash<T: AsRef<[u8]>>(input: T, algo: HashAlgorithm) -> [u8; 32] {\n    let mut ret = [0; 32];\n\n    let result = unsafe {\n        ext_ffi::casper_generic_hash(\n            input.as_ref().as_ptr(),\n            input.as_ref().len(),\n            algo as u8,\n            ret.as_mut_ptr(),\n            BLAKE2B_DIGEST_LENGTH,\n        )\n    };\n    api_error::result_from(result).unwrap_or_revert();\n    ret\n}\n\n/// Attempts to recover a Secp256k1 [`PublicKey`] from a message and a signature over it.\npub fn recover_secp256k1<T: AsRef<[u8]>>(\n    data: T,\n    signature: &Signature,\n    recovery_id: u8,\n) -> Result<PublicKey, ApiError> {\n    let mut buffer = [0; U8_SERIALIZED_LENGTH + PublicKey::SECP256K1_LENGTH];\n    let signature_bytes = signature.to_bytes().unwrap_or_revert();\n\n    let result = unsafe {\n        ext_ffi::casper_recover_secp256k1(\n            data.as_ref().as_ptr(),\n            data.as_ref().len(),\n            signature_bytes.as_ptr(),\n            signature_bytes.len(),\n            buffer.as_mut_ptr(),\n            recovery_id,\n        )\n    };\n\n    PublicKey::from_bytes(&buffer)\n        .map(|(key, _)| key)\n        .map_err(|_| ApiError::from(result as u32))\n}\n\n/// Verifies the signature of the given message against the given public key.\npub fn verify_signature<T: AsRef<[u8]>>(\n    data: T,\n    signature: &Signature,\n    public_key: &PublicKey,\n) -> Result<(), ApiError> {\n    let signature_bytes = signature.to_bytes().unwrap_or_revert();\n    let public_key_bytes = public_key.to_bytes().unwrap_or_revert();\n\n    let result = unsafe {\n        ext_ffi::casper_verify_signature(\n            data.as_ref().as_ptr(),\n            data.as_ref().len(),\n            signature_bytes.as_ptr(),\n            signature_bytes.len(),\n            public_key_bytes.as_ptr(),\n            public_key_bytes.len(),\n        )\n    };\n\n    api_error::result_from(result)\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/entity.rs",
    "content": "//! Functions for managing accounts.\n\nuse alloc::vec::Vec;\nuse core::convert::TryFrom;\n\nuse casper_types::{\n    account::{\n        AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure,\n    },\n    addressable_entity::{ActionType, Weight},\n    bytesrepr, URef, UREF_SERIALIZED_LENGTH,\n};\n\nuse super::to_ptr;\nuse crate::{contract_api, ext_ffi, unwrap_or_revert::UnwrapOrRevert};\n\n/// Retrieves the ID of the account's main purse.\npub fn get_main_purse() -> URef {\n    let dest_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH);\n    let bytes = unsafe {\n        ext_ffi::casper_get_main_purse(dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            UREF_SERIALIZED_LENGTH,\n            UREF_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Sets the given [`ActionType`]'s threshold to the provided value.\npub fn set_action_threshold(\n    action_type: ActionType,\n    threshold: Weight,\n) -> Result<(), SetThresholdFailure> {\n    let action_type = action_type as u32;\n    let threshold = threshold.value().into();\n    let result = unsafe { ext_ffi::casper_set_action_threshold(action_type, threshold) };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(SetThresholdFailure::try_from(result).unwrap_or_revert())\n    }\n}\n\n/// Adds the given [`AccountHash`] with associated [`Weight`] to the account's associated keys.\npub fn add_associated_key(account_hash: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> {\n    let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash);\n    // Cast of u8 (weight) into i32 is assumed to be always safe\n    let result = unsafe {\n        ext_ffi::casper_add_associated_key(\n            account_hash_ptr,\n            account_hash_size,\n            weight.value().into(),\n        )\n    };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(AddKeyFailure::try_from(result).unwrap_or_revert())\n    }\n}\n\n/// Removes the given [`AccountHash`] from the account's associated keys.\npub fn remove_associated_key(account_hash: AccountHash) -> Result<(), RemoveKeyFailure> {\n    let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash);\n    let result =\n        unsafe { ext_ffi::casper_remove_associated_key(account_hash_ptr, account_hash_size) };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(RemoveKeyFailure::try_from(result).unwrap_or_revert())\n    }\n}\n\n/// Updates the [`Weight`] of the given [`AccountHash`] in the account's associated keys.\npub fn update_associated_key(\n    account_hash: AccountHash,\n    weight: Weight,\n) -> Result<(), UpdateKeyFailure> {\n    let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash);\n    // Cast of u8 (weight) into i32 is assumed to be always safe\n    let result = unsafe {\n        ext_ffi::casper_update_associated_key(\n            account_hash_ptr,\n            account_hash_size,\n            weight.value().into(),\n        )\n    };\n    if result == 0 {\n        Ok(())\n    } else {\n        Err(UpdateKeyFailure::try_from(result).unwrap_or_revert())\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/mod.rs",
    "content": "//! Contains support for writing smart contracts.\n\npub mod account;\npub mod cryptography;\npub mod entity;\npub mod runtime;\npub mod storage;\npub mod system;\n\nuse alloc::{\n    alloc::{alloc, Layout},\n    vec::Vec,\n};\nuse core::{mem, ptr::NonNull};\n\nuse casper_types::{bytesrepr::ToBytes, ApiError};\n\nuse crate::unwrap_or_revert::UnwrapOrRevert;\n\n/// Calculates size and alignment for an array of T.\nconst fn size_align_for_array<T>(n: usize) -> (usize, usize) {\n    (n * size_of::<T>(), mem::align_of::<T>())\n}\n\n/// Allocates bytes\npub fn alloc_bytes(n: usize) -> NonNull<u8> {\n    let (size, align) = size_align_for_array::<u8>(n);\n    // We treat allocated memory as raw bytes, that will be later passed to deserializer which also\n    // operates on raw bytes.\n    let layout = Layout::from_size_align(size, align)\n        .map_err(|_| ApiError::AllocLayout)\n        .unwrap_or_revert();\n    let raw_ptr = unsafe { alloc(layout) };\n    NonNull::new(raw_ptr)\n        .ok_or(ApiError::OutOfMemory)\n        .unwrap_or_revert()\n}\n\nfn to_ptr<T: ToBytes>(t: T) -> (*const u8, usize, Vec<u8>) {\n    let bytes = t.into_bytes().unwrap_or_revert();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size, bytes)\n}\n\nfn dictionary_item_key_to_ptr(dictionary_item_key: &str) -> (*const u8, usize) {\n    let bytes = dictionary_item_key.as_bytes();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size)\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/runtime.rs",
    "content": "//! Functions for interacting with the current runtime.\n\nuse alloc::{collections::BTreeSet, vec, vec::Vec};\nuse core::mem::MaybeUninit;\n\nuse casper_types::{\n    account::AccountHash,\n    api_error,\n    bytesrepr::{self, FromBytes, U64_SERIALIZED_LENGTH},\n    contract_messages::{MessagePayload, MessageTopicOperation},\n    contracts::{ContractHash, ContractPackageHash, ContractVersion, NamedKeys},\n    system::CallerInfo,\n    ApiError, BlockTime, CLTyped, CLValue, Digest, EntityVersion, HashAlgorithm, Key, Phase,\n    ProtocolVersion, RuntimeArgs, URef, BLAKE2B_DIGEST_LENGTH, BLOCKTIME_SERIALIZED_LENGTH,\n    PHASE_SERIALIZED_LENGTH,\n};\n\nuse crate::{contract_api, ext_ffi, unwrap_or_revert::UnwrapOrRevert};\n\n/// Number of random bytes returned from the `random_bytes()` function.\nconst RANDOM_BYTES_COUNT: usize = 32;\n\nconst ACCOUNT: u8 = 0;\n\n#[repr(u8)]\nenum CallerIndex {\n    Initiator = 0,\n    Immediate = 1,\n    FullStack = 2,\n}\n\n/// Returns the given [`CLValue`] to the host, terminating the currently running module.\n///\n/// Note this function is only relevant to contracts stored on chain which are invoked via\n/// [`call_contract`] and can thus return a value to their caller.  The return value of a directly\n/// deployed contract is never used.\npub fn ret(value: CLValue) -> ! {\n    let (ptr, size, _bytes) = contract_api::to_ptr(value);\n    unsafe {\n        ext_ffi::casper_ret(ptr, size);\n    }\n}\n\n/// Stops execution of a contract and reverts execution effects with a given [`ApiError`].\n///\n/// The provided `ApiError` is returned in the form of a numeric exit code to the caller via the\n/// deploy response.\npub fn revert<T: Into<ApiError>>(error: T) -> ! {\n    unsafe {\n        ext_ffi::casper_revert(error.into().into());\n    }\n}\n\n/// Calls the given stored contract, passing the given arguments to it.\n///\n/// If the stored contract calls [`ret`], then that value is returned from `call_contract`.  If the\n/// stored contract calls [`revert`], then execution stops and `call_contract` doesn't return.\n/// Otherwise `call_contract` returns `()`.\npub fn call_contract<T: CLTyped + FromBytes>(\n    contract_hash: ContractHash,\n    entry_point_name: &str,\n    runtime_args: RuntimeArgs,\n) -> T {\n    let (contract_hash_ptr, contract_hash_size, _bytes1) = contract_api::to_ptr(contract_hash);\n    let (entry_point_name_ptr, entry_point_name_size, _bytes2) =\n        contract_api::to_ptr(entry_point_name);\n    let (runtime_args_ptr, runtime_args_size, _bytes3) = contract_api::to_ptr(runtime_args);\n\n    let bytes_written = {\n        let mut bytes_written = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_call_contract(\n                contract_hash_ptr,\n                contract_hash_size,\n                entry_point_name_ptr,\n                entry_point_name_size,\n                runtime_args_ptr,\n                runtime_args_size,\n                bytes_written.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        unsafe { bytes_written.assume_init() }\n    };\n    deserialize_contract_result(bytes_written)\n}\n\n/// Invokes the specified `entry_point_name` of stored logic at a specific `contract_package_hash`\n/// address, for the most current version of a contract package by default or a specific\n/// `contract_version` if one is provided, and passing the provided `runtime_args` to it\n///\n/// If the stored contract calls [`ret`], then that value is returned from\n/// `call_versioned_contract`.  If the stored contract calls [`revert`], then execution stops and\n/// `call_versioned_contract` doesn't return. Otherwise `call_versioned_contract` returns `()`.\npub fn call_versioned_contract<T: CLTyped + FromBytes>(\n    contract_package_hash: ContractPackageHash,\n    contract_version: Option<ContractVersion>,\n    entry_point_name: &str,\n    runtime_args: RuntimeArgs,\n) -> T {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(contract_package_hash);\n    let (contract_version_ptr, contract_version_size, _bytes2) =\n        contract_api::to_ptr(contract_version);\n    let (entry_point_name_ptr, entry_point_name_size, _bytes3) =\n        contract_api::to_ptr(entry_point_name);\n    let (runtime_args_ptr, runtime_args_size, _bytes4) = contract_api::to_ptr(runtime_args);\n\n    let bytes_written = {\n        let mut bytes_written = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_call_versioned_contract(\n                contract_package_hash_ptr,\n                contract_package_hash_size,\n                contract_version_ptr,\n                contract_version_size,\n                entry_point_name_ptr,\n                entry_point_name_size,\n                runtime_args_ptr,\n                runtime_args_size,\n                bytes_written.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        unsafe { bytes_written.assume_init() }\n    };\n    deserialize_contract_result(bytes_written)\n}\n\n/// Invokes the specified `entry_point_name` of stored logic at a specific `contract_package_hash`\n/// address, for a specific pair of `major_version` and `contract_version`\n/// and passing the provided `runtime_args` to it\n///\n/// If the stored contract calls [`ret`], then that value is returned from\n/// `call_package_version`.  If the stored contract calls [`revert`], then execution stops and\n/// `call_package_version` doesn't return. Otherwise `call_package_version` returns `()`.\npub fn call_package_version<T: CLTyped + FromBytes>(\n    contract_package_hash: ContractPackageHash,\n    major_version: Option<u32>,\n    contract_version: Option<EntityVersion>,\n    entry_point_name: &str,\n    runtime_args: RuntimeArgs,\n) -> T {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(contract_package_hash);\n    let (major_version_ptr, major_version_size, _bytes_5) = contract_api::to_ptr(major_version);\n    let (contract_version_ptr, contract_version_size, _bytes2) =\n        contract_api::to_ptr(contract_version);\n    let (entry_point_name_ptr, entry_point_name_size, _bytes3) =\n        contract_api::to_ptr(entry_point_name);\n    let (runtime_args_ptr, runtime_args_size, _bytes4) = contract_api::to_ptr(runtime_args);\n\n    let bytes_written = {\n        let mut bytes_written = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_call_package_version(\n                contract_package_hash_ptr,\n                contract_package_hash_size,\n                major_version_ptr,\n                major_version_size,\n                contract_version_ptr,\n                contract_version_size,\n                entry_point_name_ptr,\n                entry_point_name_size,\n                runtime_args_ptr,\n                runtime_args_size,\n                bytes_written.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        unsafe { bytes_written.assume_init() }\n    };\n    deserialize_contract_result(bytes_written)\n}\n\nfn deserialize_contract_result<T: CLTyped + FromBytes>(bytes_written: usize) -> T {\n    let serialized_result = if bytes_written == 0 {\n        // If no bytes were written, the host buffer hasn't been set and hence shouldn't be read.\n        vec![]\n    } else {\n        // NOTE: this is a copy of the contents of `read_host_buffer()`.  Calling that directly from\n        // here causes several contracts to fail with a Wasmi `Unreachable` error.\n        let bytes_non_null_ptr = contract_api::alloc_bytes(bytes_written);\n        let mut dest: Vec<u8> = unsafe {\n            Vec::from_raw_parts(bytes_non_null_ptr.as_ptr(), bytes_written, bytes_written)\n        };\n        read_host_buffer_into(&mut dest).unwrap_or_revert();\n        dest\n    };\n\n    bytesrepr::deserialize(serialized_result).unwrap_or_revert()\n}\n\n/// Returns size in bytes of a given named argument passed to the host for the current module\n/// invocation.\n///\n/// This will return either Some with the size of argument if present, or None if given argument is\n/// not passed.\nfn get_named_arg_size(name: &str) -> Option<usize> {\n    let mut arg_size: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_named_arg_size(\n            name.as_bytes().as_ptr(),\n            name.len(),\n            &mut arg_size as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => Some(arg_size),\n        Err(ApiError::MissingArgument) => None,\n        Err(e) => revert(e),\n    }\n}\n\n/// Returns given named argument passed to the host for the current module invocation.\n///\n/// Note that this is only relevant to contracts stored on-chain since a contract deployed directly\n/// is not invoked with any arguments.\npub fn get_named_arg<T: FromBytes>(name: &str) -> T {\n    let arg_size = get_named_arg_size(name).unwrap_or_revert_with(ApiError::MissingArgument);\n    let arg_bytes = if arg_size > 0 {\n        let res = {\n            let data_non_null_ptr = contract_api::alloc_bytes(arg_size);\n            let ret = unsafe {\n                ext_ffi::casper_get_named_arg(\n                    name.as_bytes().as_ptr(),\n                    name.len(),\n                    data_non_null_ptr.as_ptr(),\n                    arg_size,\n                )\n            };\n            let data =\n                unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) };\n            api_error::result_from(ret).map(|_| data)\n        };\n        // Assumed to be safe as `get_named_arg_size` checks the argument already\n        res.unwrap_or_revert()\n    } else {\n        // Avoids allocation with 0 bytes and a call to get_named_arg\n        Vec::new()\n    };\n    bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument)\n}\n\n/// Returns given named argument passed to the host for the current module invocation.\n/// If the argument is not found, returns `None`.\n///\n/// Note that this is only relevant to contracts stored on-chain since a contract deployed directly\n/// is not invoked with any arguments.\npub fn try_get_named_arg<T: FromBytes>(name: &str) -> Option<T> {\n    let arg_size = get_named_arg_size(name)?;\n    let arg_bytes = if arg_size > 0 {\n        let res = {\n            let data_non_null_ptr = contract_api::alloc_bytes(arg_size);\n            let ret = unsafe {\n                ext_ffi::casper_get_named_arg(\n                    name.as_bytes().as_ptr(),\n                    name.len(),\n                    data_non_null_ptr.as_ptr(),\n                    arg_size,\n                )\n            };\n            let data =\n                unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) };\n            api_error::result_from(ret).map(|_| data)\n        };\n        // Assumed to be safe as `get_named_arg_size` checks the argument already\n        res.unwrap_or_revert()\n    } else {\n        // Avoids allocation with 0 bytes and a call to get_named_arg\n        Vec::new()\n    };\n    bytesrepr::deserialize(arg_bytes).ok()\n}\n\n/// Returns the caller of the current context, i.e. the [`AccountHash`] of the account which made\n/// the deploy request.\npub fn get_caller() -> AccountHash {\n    let output_size = {\n        let mut output_size = MaybeUninit::uninit();\n        let ret = unsafe { ext_ffi::casper_get_caller(output_size.as_mut_ptr()) };\n        api_error::result_from(ret).unwrap_or_revert();\n        unsafe { output_size.assume_init() }\n    };\n    let buf = read_host_buffer(output_size).unwrap_or_revert();\n    bytesrepr::deserialize(buf).unwrap_or_revert()\n}\n\n/// Returns the current [`BlockTime`].\npub fn get_blocktime() -> BlockTime {\n    let dest_non_null_ptr = contract_api::alloc_bytes(BLOCKTIME_SERIALIZED_LENGTH);\n    let bytes = unsafe {\n        ext_ffi::casper_get_blocktime(dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            BLOCKTIME_SERIALIZED_LENGTH,\n            BLOCKTIME_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// The default length of hashes such as account hash, state hash, hash addresses, etc.\npub const DEFAULT_HASH_LENGTH: u8 = 32;\n/// The default size of ProtocolVersion. It's 3×u32 (major, minor, patch), so 12 bytes.\npub const PROTOCOL_VERSION_LENGTH: u8 = 12;\n///The default size of the addressable entity flag.\npub const ADDRESSABLE_ENTITY_LENGTH: u8 = 1;\n/// Index for the block time field of block info.\npub const BLOCK_TIME_FIELD_IDX: u8 = 0;\n/// Index for the block height field of block info.\npub const BLOCK_HEIGHT_FIELD_IDX: u8 = 1;\n/// Index for the parent block hash field of block info.\npub const PARENT_BLOCK_HASH_FIELD_IDX: u8 = 2;\n/// Index for the state hash field of block info.\npub const STATE_HASH_FIELD_IDX: u8 = 3;\n/// Index for the protocol version field of block info.\npub const PROTOCOL_VERSION_FIELD_IDX: u8 = 4;\n/// Index for the addressable entity field of block info.\npub const ADDRESSABLE_ENTITY_FIELD_IDX: u8 = 5;\n\n/// Returns the block height.\npub fn get_block_height() -> u64 {\n    let dest_non_null_ptr = contract_api::alloc_bytes(U64_SERIALIZED_LENGTH);\n    let bytes = unsafe {\n        ext_ffi::casper_get_block_info(BLOCK_HEIGHT_FIELD_IDX, dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            U64_SERIALIZED_LENGTH,\n            U64_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the parent block hash.\npub fn get_parent_block_hash() -> Digest {\n    let dest_non_null_ptr = contract_api::alloc_bytes(DEFAULT_HASH_LENGTH as usize);\n    let bytes = unsafe {\n        ext_ffi::casper_get_block_info(PARENT_BLOCK_HASH_FIELD_IDX, dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            DEFAULT_HASH_LENGTH as usize,\n            DEFAULT_HASH_LENGTH as usize,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the state root hash.\npub fn get_state_hash() -> Digest {\n    let dest_non_null_ptr = contract_api::alloc_bytes(DEFAULT_HASH_LENGTH as usize);\n    let bytes = unsafe {\n        ext_ffi::casper_get_block_info(STATE_HASH_FIELD_IDX, dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            DEFAULT_HASH_LENGTH as usize,\n            DEFAULT_HASH_LENGTH as usize,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the protocol version.\npub fn get_protocol_version() -> ProtocolVersion {\n    let dest_non_null_ptr = contract_api::alloc_bytes(PROTOCOL_VERSION_LENGTH as usize);\n    let bytes = unsafe {\n        ext_ffi::casper_get_block_info(PROTOCOL_VERSION_FIELD_IDX, dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            PROTOCOL_VERSION_LENGTH as usize,\n            PROTOCOL_VERSION_LENGTH as usize,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns whether or not the addressable entity is turned on.\npub fn get_addressable_entity() -> bool {\n    let dest_non_null_ptr = contract_api::alloc_bytes(ADDRESSABLE_ENTITY_LENGTH as usize);\n    let bytes = unsafe {\n        ext_ffi::casper_get_block_info(ADDRESSABLE_ENTITY_FIELD_IDX, dest_non_null_ptr.as_ptr());\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            ADDRESSABLE_ENTITY_LENGTH as usize,\n            ADDRESSABLE_ENTITY_LENGTH as usize,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the current [`Phase`].\npub fn get_phase() -> Phase {\n    let dest_non_null_ptr = contract_api::alloc_bytes(PHASE_SERIALIZED_LENGTH);\n    unsafe { ext_ffi::casper_get_phase(dest_non_null_ptr.as_ptr()) };\n    let bytes = unsafe {\n        Vec::from_raw_parts(\n            dest_non_null_ptr.as_ptr(),\n            PHASE_SERIALIZED_LENGTH,\n            PHASE_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the requested named [`Key`] from the current context.\n///\n/// The current context is either the caller's account or a stored contract depending on whether the\n/// currently-executing module is a direct call or a sub-call respectively.\npub fn get_key(name: &str) -> Option<Key> {\n    let (name_ptr, name_size, _bytes) = contract_api::to_ptr(name);\n    let mut key_bytes = vec![0u8; Key::max_serialized_length()];\n    let mut total_bytes: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_key(\n            name_ptr,\n            name_size,\n            key_bytes.as_mut_ptr(),\n            key_bytes.len(),\n            &mut total_bytes as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => {}\n        Err(ApiError::MissingKey) => return None,\n        Err(e) => revert(e),\n    }\n    key_bytes.truncate(total_bytes);\n    let key: Key = bytesrepr::deserialize(key_bytes).unwrap_or_revert();\n    Some(key)\n}\n\n/// Returns `true` if `name` exists in the current context's named keys.\n///\n/// The current context is either the caller's account or a stored contract depending on whether the\n/// currently-executing module is a direct call or a sub-call respectively.\npub fn has_key(name: &str) -> bool {\n    let (name_ptr, name_size, _bytes) = contract_api::to_ptr(name);\n    let result = unsafe { ext_ffi::casper_has_key(name_ptr, name_size) };\n    result == 0\n}\n\n/// Stores the given [`Key`] under `name` in the current context's named keys.\n///\n/// The current context is either the caller's account or a stored contract depending on whether the\n/// currently-executing module is a direct call or a sub-call respectively.\npub fn put_key(name: &str, key: Key) {\n    let (name_ptr, name_size, _bytes) = contract_api::to_ptr(name);\n    let (key_ptr, key_size, _bytes2) = contract_api::to_ptr(key);\n    unsafe { ext_ffi::casper_put_key(name_ptr, name_size, key_ptr, key_size) };\n}\n\n/// Removes the [`Key`] stored under `name` in the current context's named keys.\n///\n/// The current context is either the caller's account or a stored contract depending on whether the\n/// currently-executing module is a direct call or a sub-call respectively.\npub fn remove_key(name: &str) {\n    let (name_ptr, name_size, _bytes) = contract_api::to_ptr(name);\n    unsafe { ext_ffi::casper_remove_key(name_ptr, name_size) }\n}\n\n/// Returns the set of [`AccountHash`] from the calling account's context `authorization_keys`.\npub fn list_authorization_keys() -> BTreeSet<AccountHash> {\n    let (total_authorization_keys, result_size) = {\n        let mut authorization_keys = MaybeUninit::uninit();\n        let mut result_size = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_load_authorization_keys(\n                authorization_keys.as_mut_ptr(),\n                result_size.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        let total_authorization_keys = unsafe { authorization_keys.assume_init() };\n        let result_size = unsafe { result_size.assume_init() };\n        (total_authorization_keys, result_size)\n    };\n\n    if total_authorization_keys == 0 {\n        return BTreeSet::new();\n    }\n\n    let bytes = read_host_buffer(result_size).unwrap_or_revert();\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the named keys of the current context.\n///\n/// The current context is either the caller's account or a stored contract depending on whether the\n/// currently-executing module is a direct call or a sub-call respectively.\npub fn list_named_keys() -> NamedKeys {\n    let (total_keys, result_size) = {\n        let mut total_keys = MaybeUninit::uninit();\n        let mut result_size = 0;\n        let ret = unsafe {\n            ext_ffi::casper_load_named_keys(total_keys.as_mut_ptr(), &mut result_size as *mut usize)\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        let total_keys = unsafe { total_keys.assume_init() };\n        (total_keys, result_size)\n    };\n    if total_keys == 0 {\n        return NamedKeys::new();\n    }\n    let bytes = read_host_buffer(result_size).unwrap_or_revert();\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Validates uref against named keys.\npub fn is_valid_uref(uref: URef) -> bool {\n    let (uref_ptr, uref_size, _bytes) = contract_api::to_ptr(uref);\n    let result = unsafe { ext_ffi::casper_is_valid_uref(uref_ptr, uref_size) };\n    result != 0\n}\n\n/// Returns a 32-byte BLAKE2b digest\npub fn blake2b<T: AsRef<[u8]>>(input: T) -> [u8; BLAKE2B_DIGEST_LENGTH] {\n    let mut ret = [0; BLAKE2B_DIGEST_LENGTH];\n    let result = unsafe {\n        ext_ffi::casper_generic_hash(\n            input.as_ref().as_ptr(),\n            input.as_ref().len(),\n            HashAlgorithm::Blake2b as u8,\n            ret.as_mut_ptr(),\n            BLAKE2B_DIGEST_LENGTH,\n        )\n    };\n    api_error::result_from(result).unwrap_or_revert();\n    ret\n}\n\n/// Returns 32 pseudo random bytes.\npub fn random_bytes() -> [u8; RANDOM_BYTES_COUNT] {\n    let mut ret = [0; RANDOM_BYTES_COUNT];\n    let result = unsafe { ext_ffi::casper_random_bytes(ret.as_mut_ptr(), RANDOM_BYTES_COUNT) };\n    api_error::result_from(result).unwrap_or_revert();\n    ret\n}\n\nfn read_host_buffer_into(dest: &mut [u8]) -> Result<usize, ApiError> {\n    let mut bytes_written = MaybeUninit::uninit();\n    let ret = unsafe {\n        ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr())\n    };\n    // NOTE: When rewriting below expression as `result_from(ret).map(|_| unsafe { ... })`, and the\n    // caller ignores the return value, execution of the contract becomes unstable and ultimately\n    // leads to `Unreachable` error.\n    api_error::result_from(ret)?;\n    Ok(unsafe { bytes_written.assume_init() })\n}\n\npub(crate) fn read_host_buffer(size: usize) -> Result<Vec<u8>, ApiError> {\n    let mut dest: Vec<u8> = if size == 0 {\n        Vec::new()\n    } else {\n        let bytes_non_null_ptr = contract_api::alloc_bytes(size);\n        unsafe { Vec::from_raw_parts(bytes_non_null_ptr.as_ptr(), size, size) }\n    };\n    read_host_buffer_into(&mut dest)?;\n    Ok(dest)\n}\n\n/// Returns the call stack.\npub fn get_call_stack() -> Vec<CallerInfo> {\n    let (call_stack_len, result_size) = {\n        let mut call_stack_len: usize = 0;\n        let mut result_size: usize = 0;\n        let ret = unsafe {\n            ext_ffi::casper_load_caller_information(\n                CallerIndex::FullStack as u8,\n                &mut call_stack_len as *mut usize,\n                &mut result_size as *mut usize,\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        (call_stack_len, result_size)\n    };\n    if call_stack_len == 0 {\n        return Vec::new();\n    }\n    let bytes = read_host_buffer(result_size).unwrap_or_revert();\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\nfn get_initiator_or_immediate(action: u8) -> Result<CallerInfo, ApiError> {\n    let (call_stack_len, result_size) = {\n        let mut call_stack_len: usize = 0;\n        let mut result_size: usize = 0;\n        let ret = unsafe {\n            ext_ffi::casper_load_caller_information(\n                action,\n                &mut call_stack_len as *mut usize,\n                &mut result_size as *mut usize,\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        (call_stack_len, result_size)\n    };\n    if call_stack_len == 0 {\n        return Err(ApiError::InvalidCallerInfoRequest);\n    }\n    let bytes = read_host_buffer(result_size).unwrap_or_revert();\n    let caller: Vec<CallerInfo> = bytesrepr::deserialize(bytes).unwrap_or_revert();\n\n    if caller.len() != 1 {\n        return Err(ApiError::Unhandled);\n    };\n    let first = caller.first().unwrap_or_revert().clone();\n    Ok(first)\n}\n\n/// Returns the call stack initiator\npub fn get_call_initiator() -> Result<AccountHash, ApiError> {\n    let caller = get_initiator_or_immediate(CallerIndex::Initiator as u8)?;\n    if caller.kind() != ACCOUNT {\n        return Err(ApiError::Unhandled);\n    };\n    if let Some(cl_value) = caller.get_field_by_index(ACCOUNT) {\n        let maybe_account_hash = cl_value\n            .to_t::<Option<AccountHash>>()\n            .map_err(|_| ApiError::CLTypeMismatch)?;\n        match maybe_account_hash {\n            Some(hash) => Ok(hash),\n            None => Err(ApiError::None),\n        }\n    } else {\n        Err(ApiError::PurseNotCreated)\n    }\n}\n\n/// Returns the immidiate caller within the call stack.\npub fn get_immediate_caller() -> Result<CallerInfo, ApiError> {\n    get_initiator_or_immediate(CallerIndex::Immediate as u8)\n}\n\n/// Manages a message topic.\npub fn manage_message_topic(\n    topic_name: &str,\n    operation: MessageTopicOperation,\n) -> Result<(), ApiError> {\n    if topic_name.is_empty() {\n        return Err(ApiError::InvalidArgument);\n    }\n\n    let (operation_ptr, operation_size, _bytes) = contract_api::to_ptr(operation);\n    let result = unsafe {\n        ext_ffi::casper_manage_message_topic(\n            topic_name.as_ptr(),\n            topic_name.len(),\n            operation_ptr,\n            operation_size,\n        )\n    };\n    api_error::result_from(result)\n}\n\n/// Emits a message on a topic.\npub fn emit_message(topic_name: &str, message: &MessagePayload) -> Result<(), ApiError> {\n    if topic_name.is_empty() {\n        return Err(ApiError::InvalidArgument);\n    }\n\n    let (message_ptr, message_size, _bytes) = contract_api::to_ptr(message);\n\n    let result = unsafe {\n        ext_ffi::casper_emit_message(\n            topic_name.as_ptr(),\n            topic_name.len(),\n            message_ptr,\n            message_size,\n        )\n    };\n\n    api_error::result_from(result)\n}\n\n#[cfg(feature = \"test-support\")]\n/// Prints a debug message\npub fn print(text: &str) {\n    let (text_ptr, text_size, _bytes) = contract_api::to_ptr(text);\n    unsafe { ext_ffi::casper_print(text_ptr, text_size) }\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/storage.rs",
    "content": "//! Functions for accessing and mutating local and global state.\n\nuse alloc::{\n    collections::{BTreeMap, BTreeSet},\n    format,\n    string::String,\n    vec,\n    vec::Vec,\n};\nuse core::{convert::From, mem::MaybeUninit};\n\nuse casper_types::{\n    addressable_entity::EntryPoints,\n    api_error,\n    bytesrepr::{self, FromBytes, ToBytes},\n    contract_messages::MessageTopicOperation,\n    contracts::{ContractHash, ContractPackageHash, ContractVersion, NamedKeys},\n    AccessRights, ApiError, CLTyped, CLValue, EntityVersion, HashAddr, Key, URef,\n    DICTIONARY_ITEM_KEY_MAX_LENGTH, UREF_SERIALIZED_LENGTH,\n};\n\nuse crate::{\n    contract_api::{self, runtime, runtime::revert},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\n/// Reads value under `uref` in the global state.\npub fn read<T: CLTyped + FromBytes>(uref: URef) -> Result<Option<T>, bytesrepr::Error> {\n    let key: Key = uref.into();\n    read_from_key(key)\n}\n\n/// Reads value under `key` in the global state.\npub fn read_from_key<T: CLTyped + FromBytes>(key: Key) -> Result<Option<T>, bytesrepr::Error> {\n    let (key_ptr, key_size, _bytes) = contract_api::to_ptr(key);\n\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe { ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr()) };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { value_size.assume_init() },\n            Err(ApiError::ValueNotFound) => return Ok(None),\n            Err(e) => runtime::revert(e),\n        }\n    };\n\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    Ok(Some(bytesrepr::deserialize(value_bytes)?))\n}\n\n/// Reads value under `uref` in the global state, reverts if value not found or is not `T`.\npub fn read_or_revert<T: CLTyped + FromBytes>(uref: URef) -> T {\n    read(uref)\n        .unwrap_or_revert_with(ApiError::Read)\n        .unwrap_or_revert_with(ApiError::ValueNotFound)\n}\n\n/// Writes `value` under `uref` in the global state.\npub fn write<T: CLTyped + ToBytes>(uref: URef, value: T) {\n    let key = Key::from(uref);\n    let (key_ptr, key_size, _bytes1) = contract_api::to_ptr(key);\n\n    let cl_value = CLValue::from_t(value).unwrap_or_revert();\n    let (cl_value_ptr, cl_value_size, _bytes2) = contract_api::to_ptr(cl_value);\n\n    unsafe {\n        ext_ffi::casper_write(key_ptr, key_size, cl_value_ptr, cl_value_size);\n    }\n}\n\n/// Adds `value` to the one currently under `uref` in the global state.\npub fn add<T: CLTyped + ToBytes>(uref: URef, value: T) {\n    let key = Key::from(uref);\n    let (key_ptr, key_size, _bytes1) = contract_api::to_ptr(key);\n\n    let cl_value = CLValue::from_t(value).unwrap_or_revert();\n    let (cl_value_ptr, cl_value_size, _bytes2) = contract_api::to_ptr(cl_value);\n\n    unsafe {\n        // Could panic if `value` cannot be added to the given value in memory.\n        ext_ffi::casper_add(key_ptr, key_size, cl_value_ptr, cl_value_size);\n    }\n}\n\n/// Returns a new unforgeable pointer, where the value is initialized to `init`.\npub fn new_uref<T: CLTyped + ToBytes>(init: T) -> URef {\n    let uref_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH);\n    let cl_value = CLValue::from_t(init).unwrap_or_revert();\n    let (cl_value_ptr, cl_value_size, _cl_value_bytes) = contract_api::to_ptr(cl_value);\n    let bytes = unsafe {\n        ext_ffi::casper_new_uref(uref_non_null_ptr.as_ptr(), cl_value_ptr, cl_value_size); // URef has `READ_ADD_WRITE`\n        Vec::from_raw_parts(\n            uref_non_null_ptr.as_ptr(),\n            UREF_SERIALIZED_LENGTH,\n            UREF_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Create a new contract stored under a Key::Hash at version 1. You may upgrade this contract in\n/// the future; if you want a contract that is locked (i.e. cannot be upgraded) call\n/// `new_locked_contract` instead.\n/// if `named_keys` is provided, puts all of the included named keys into the newly created\n///     contract version's named keys.\n/// if `hash_name` is provided, puts Key::Hash(contract_package_hash) into the\n///     installing account's named keys under `hash_name`.\n/// if `uref_name` is provided, puts Key::URef(access_uref) into the installing account's named\n///     keys under `uref_name`\npub fn new_contract(\n    entry_points: EntryPoints,\n    named_keys: Option<NamedKeys>,\n    hash_name: Option<String>,\n    uref_name: Option<String>,\n    message_topics: Option<BTreeMap<String, MessageTopicOperation>>,\n) -> (ContractHash, EntityVersion) {\n    create_contract(\n        entry_points,\n        named_keys,\n        hash_name,\n        uref_name,\n        message_topics,\n        false,\n    )\n}\n\n/// Create a locked contract stored under a Key::Hash, which can never be upgraded. This is an\n/// irreversible decision; for a contract that can be upgraded use `new_contract` instead.\n/// if `named_keys` is provided, puts all of the included named keys into the newly created\n///     contract version's named keys.\n/// if `hash_name` is provided, puts Key::Hash(contract_package_hash) into the\n///     installing account's named keys under `hash_name`.\n/// if `uref_name` is provided, puts Key::URef(access_uref) into the installing account's named\n///     keys under `uref_name`\npub fn new_locked_contract(\n    entry_points: EntryPoints,\n    named_keys: Option<NamedKeys>,\n    hash_name: Option<String>,\n    uref_name: Option<String>,\n    message_topics: Option<BTreeMap<String, MessageTopicOperation>>,\n) -> (ContractHash, EntityVersion) {\n    create_contract(\n        entry_points,\n        named_keys,\n        hash_name,\n        uref_name,\n        message_topics,\n        true,\n    )\n}\n\nfn create_contract(\n    entry_points: EntryPoints,\n    named_keys: Option<NamedKeys>,\n    hash_name: Option<String>,\n    uref_name: Option<String>,\n    message_topics: Option<BTreeMap<String, MessageTopicOperation>>,\n    is_locked: bool,\n) -> (ContractHash, EntityVersion) {\n    let (contract_package_hash, access_uref) = create_contract_package(is_locked);\n\n    if let Some(hash_name) = hash_name {\n        runtime::put_key(&hash_name, Key::Hash(contract_package_hash.value()));\n    };\n\n    if let Some(uref_name) = uref_name {\n        runtime::put_key(&uref_name, access_uref.into());\n    };\n\n    let named_keys = named_keys.unwrap_or_default();\n\n    let message_topics = message_topics.unwrap_or_default();\n\n    add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        message_topics,\n    )\n}\n\n/// Create a new (versioned) contract stored under a Key::Hash. Initially there\n/// are no versions; a version must be added via `add_contract_version` before\n/// the contract can be executed.\npub fn create_contract_package_at_hash() -> (ContractPackageHash, URef) {\n    create_contract_package(false)\n}\n\nfn create_contract_package(is_locked: bool) -> (ContractPackageHash, URef) {\n    let mut hash_addr: HashAddr = ContractPackageHash::default().value();\n    let mut access_addr = [0u8; 32];\n    unsafe {\n        ext_ffi::casper_create_contract_package_at_hash(\n            hash_addr.as_mut_ptr(),\n            access_addr.as_mut_ptr(),\n            is_locked,\n        );\n    }\n    let contract_package_hash: ContractPackageHash = hash_addr.into();\n    let access_uref = URef::new(access_addr, AccessRights::READ_ADD_WRITE);\n\n    (contract_package_hash, access_uref)\n}\n\n/// Create a new \"user group\" for a (versioned) contract. User groups associate\n/// a set of URefs with a label. Entry points on a contract can be given a list of\n/// labels they accept and the runtime will check that a URef from at least one\n/// of the allowed groups is present in the caller's context before\n/// execution. This allows access control for entry_points of a contract. This\n/// function returns the list of new URefs created for the group (the list will\n/// contain `num_new_urefs` elements).\npub fn create_contract_user_group(\n    contract_package_hash: ContractPackageHash,\n    group_label: &str,\n    num_new_urefs: u8, // number of new urefs to populate the group with\n    existing_urefs: BTreeSet<URef>, // also include these existing urefs in the group\n) -> Result<Vec<URef>, ApiError> {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(contract_package_hash);\n    let (label_ptr, label_size, _bytes3) = contract_api::to_ptr(group_label);\n    let (existing_urefs_ptr, existing_urefs_size, _bytes4) = contract_api::to_ptr(existing_urefs);\n\n    let value_size = {\n        let mut output_size = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_create_contract_user_group(\n                contract_package_hash_ptr,\n                contract_package_hash_size,\n                label_ptr,\n                label_size,\n                num_new_urefs,\n                existing_urefs_ptr,\n                existing_urefs_size,\n                output_size.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        unsafe { output_size.assume_init() }\n    };\n\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    Ok(bytesrepr::deserialize(value_bytes).unwrap_or_revert())\n}\n\n/// Extends specified group with a new `URef`.\npub fn provision_contract_user_group_uref(\n    package_hash: ContractPackageHash,\n    label: &str,\n) -> Result<URef, ApiError> {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(package_hash);\n    let (label_ptr, label_size, _bytes2) = contract_api::to_ptr(label);\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_provision_contract_user_group_uref(\n                contract_package_hash_ptr,\n                contract_package_hash_size,\n                label_ptr,\n                label_size,\n                value_size.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret)?;\n        unsafe { value_size.assume_init() }\n    };\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    Ok(bytesrepr::deserialize(value_bytes).unwrap_or_revert())\n}\n\n/// Removes specified urefs from a named group.\npub fn remove_contract_user_group_urefs(\n    package_hash: ContractPackageHash,\n    label: &str,\n    urefs: BTreeSet<URef>,\n) -> Result<(), ApiError> {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(package_hash);\n    let (label_ptr, label_size, _bytes3) = contract_api::to_ptr(label);\n    let (urefs_ptr, urefs_size, _bytes4) = contract_api::to_ptr(urefs);\n    let ret = unsafe {\n        ext_ffi::casper_remove_contract_user_group_urefs(\n            contract_package_hash_ptr,\n            contract_package_hash_size,\n            label_ptr,\n            label_size,\n            urefs_ptr,\n            urefs_size,\n        )\n    };\n    api_error::result_from(ret)\n}\n\n/// Remove a named group from given contract.\npub fn remove_contract_user_group(\n    package_hash: ContractPackageHash,\n    label: &str,\n) -> Result<(), ApiError> {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(package_hash);\n    let (label_ptr, label_size, _bytes3) = contract_api::to_ptr(label);\n    let ret = unsafe {\n        ext_ffi::casper_remove_contract_user_group(\n            contract_package_hash_ptr,\n            contract_package_hash_size,\n            label_ptr,\n            label_size,\n        )\n    };\n    api_error::result_from(ret)\n}\n\n/// Add version to existing Package.\npub fn add_contract_version(\n    package_hash: ContractPackageHash,\n    entry_points: EntryPoints,\n    named_keys: NamedKeys,\n    message_topics: BTreeMap<String, MessageTopicOperation>,\n) -> (ContractHash, EntityVersion) {\n    // Retain the underscore as Wasm transpiliation requires it.\n    let (package_hash_ptr, package_hash_size, _package_hash_bytes) =\n        contract_api::to_ptr(package_hash);\n    let (entry_points_ptr, entry_points_size, _entry_point_bytes) =\n        contract_api::to_ptr(entry_points);\n    let (named_keys_ptr, named_keys_size, _named_keys_bytes) = contract_api::to_ptr(named_keys);\n    let (message_topics_ptr, message_topics_size, _message_topics) =\n        contract_api::to_ptr(message_topics);\n\n    let mut output_ptr = vec![0u8; 32];\n    // let mut total_bytes: usize = 0;\n\n    let mut entity_version: ContractVersion = 0;\n\n    let ret = unsafe {\n        ext_ffi::casper_add_contract_version_with_message_topics(\n            package_hash_ptr,\n            package_hash_size,\n            &mut entity_version as *mut ContractVersion, // Fixed width\n            entry_points_ptr,\n            entry_points_size,\n            named_keys_ptr,\n            named_keys_size,\n            message_topics_ptr,\n            message_topics_size,\n            output_ptr.as_mut_ptr(),\n            output_ptr.len(),\n            // &mut total_bytes as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => {}\n        Err(e) => revert(e),\n    }\n    // output_ptr.truncate(32usize);\n    let entity_hash: ContractHash = match bytesrepr::deserialize(output_ptr) {\n        Ok(hash) => hash,\n        Err(err) => panic!(\"{}\", format!(\"{:?}\", err)),\n    };\n    (entity_hash, entity_version)\n}\n\n/// Disables a specific version of a contract within the contract package identified by\n/// `contract_package_hash`. Once disabled, the specified version will no longer be\n/// callable by `call_versioned_contract`. Please note that the contract must have been\n/// previously created using `create_contract` or `create_contract_package_at_hash`.\n///\n/// # Arguments\n///\n/// * `contract_package_hash` - The hash of the contract package containing the version to be\n///   disabled.\n/// * `contract_hash` - The hash of the specific contract version to be disabled.\n///\n/// # Errors\n///\n/// Returns a `Result` indicating success or an `ApiError` if the operation fails.\npub fn disable_contract_version(\n    contract_package_hash: ContractPackageHash,\n    contract_hash: ContractHash,\n) -> Result<(), ApiError> {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(contract_package_hash);\n    let (contract_hash_ptr, contract_hash_size, _bytes2) = contract_api::to_ptr(contract_hash);\n\n    let result = unsafe {\n        ext_ffi::casper_disable_contract_version(\n            contract_package_hash_ptr,\n            contract_package_hash_size,\n            contract_hash_ptr,\n            contract_hash_size,\n        )\n    };\n\n    api_error::result_from(result)\n}\n\n/// Enables a specific version of a contract from the contract package stored at the given hash.\n/// Once enabled, that version of the contract becomes callable again by `call_versioned_contract`.\n///\n/// # Arguments\n///\n/// * `contract_package_hash` - The hash of the contract package containing the desired version.\n/// * `contract_hash` - The hash of the specific contract version to be enabled.\n///\n/// # Errors\n///\n/// Returns a `Result` indicating success or an `ApiError` if the operation fails.\npub fn enable_contract_version(\n    contract_package_hash: ContractPackageHash,\n    contract_hash: ContractHash,\n) -> Result<(), ApiError> {\n    let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) =\n        contract_api::to_ptr(contract_package_hash);\n    let (contract_hash_ptr, contract_hash_size, _bytes2) = contract_api::to_ptr(contract_hash);\n\n    let result = unsafe {\n        ext_ffi::casper_enable_contract_version(\n            contract_package_hash_ptr,\n            contract_package_hash_size,\n            contract_hash_ptr,\n            contract_hash_size,\n        )\n    };\n\n    api_error::result_from(result)\n}\n\n/// Creates new [`URef`] that represents a seed for a dictionary partition of the global state and\n/// puts it under named keys.\npub fn new_dictionary(dictionary_name: &str) -> Result<URef, ApiError> {\n    if dictionary_name.is_empty() || runtime::has_key(dictionary_name) {\n        return Err(ApiError::InvalidArgument);\n    }\n\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe { ext_ffi::casper_new_dictionary(value_size.as_mut_ptr()) };\n        api_error::result_from(ret)?;\n        unsafe { value_size.assume_init() }\n    };\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    let uref: URef = bytesrepr::deserialize(value_bytes).unwrap_or_revert();\n    runtime::put_key(dictionary_name, Key::from(uref));\n    Ok(uref)\n}\n\n/// Retrieve `value` stored under `dictionary_item_key` in the dictionary accessed by\n/// `dictionary_seed_uref`.\npub fn dictionary_get<V: CLTyped + FromBytes>(\n    dictionary_seed_uref: URef,\n    dictionary_item_key: &str,\n) -> Result<Option<V>, bytesrepr::Error> {\n    let (uref_ptr, uref_size, _bytes1) = contract_api::to_ptr(dictionary_seed_uref);\n    let (dictionary_item_key_ptr, dictionary_item_key_size) =\n        contract_api::dictionary_item_key_to_ptr(dictionary_item_key);\n\n    if dictionary_item_key_size > DICTIONARY_ITEM_KEY_MAX_LENGTH {\n        revert(ApiError::DictionaryItemKeyExceedsLength)\n    }\n\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_dictionary_get(\n                uref_ptr,\n                uref_size,\n                dictionary_item_key_ptr,\n                dictionary_item_key_size,\n                value_size.as_mut_ptr(),\n            )\n        };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { value_size.assume_init() },\n            Err(ApiError::ValueNotFound) => return Ok(None),\n            Err(e) => runtime::revert(e),\n        }\n    };\n\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    Ok(Some(bytesrepr::deserialize(value_bytes)?))\n}\n\n/// Writes `value` under `dictionary_item_key` in the dictionary accessed by `dictionary_seed_uref`.\npub fn dictionary_put<V: CLTyped + ToBytes>(\n    dictionary_seed_uref: URef,\n    dictionary_item_key: &str,\n    value: V,\n) {\n    let (uref_ptr, uref_size, _bytes1) = contract_api::to_ptr(dictionary_seed_uref);\n    let (dictionary_item_key_ptr, dictionary_item_key_size) =\n        contract_api::dictionary_item_key_to_ptr(dictionary_item_key);\n\n    if dictionary_item_key_size > DICTIONARY_ITEM_KEY_MAX_LENGTH {\n        revert(ApiError::DictionaryItemKeyExceedsLength)\n    }\n\n    let cl_value = CLValue::from_t(value).unwrap_or_revert();\n    let (cl_value_ptr, cl_value_size, _bytes) = contract_api::to_ptr(cl_value);\n\n    let result = unsafe {\n        let ret = ext_ffi::casper_dictionary_put(\n            uref_ptr,\n            uref_size,\n            dictionary_item_key_ptr,\n            dictionary_item_key_size,\n            cl_value_ptr,\n            cl_value_size,\n        );\n        api_error::result_from(ret)\n    };\n\n    result.unwrap_or_revert()\n}\n\n/// Reads value under `dictionary_key` in the global state.\npub fn dictionary_read<T: CLTyped + FromBytes>(dictionary_key: Key) -> Result<Option<T>, ApiError> {\n    if !dictionary_key.is_dictionary_key() {\n        return Err(ApiError::UnexpectedKeyVariant);\n    }\n\n    let (key_ptr, key_size, _bytes) = contract_api::to_ptr(dictionary_key);\n\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret =\n            unsafe { ext_ffi::casper_dictionary_read(key_ptr, key_size, value_size.as_mut_ptr()) };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { value_size.assume_init() },\n            Err(ApiError::ValueNotFound) => return Ok(None),\n            Err(e) => runtime::revert(e),\n        }\n    };\n\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    Ok(Some(bytesrepr::deserialize(value_bytes)?))\n}\n\nfn get_named_uref(name: &str) -> URef {\n    match runtime::get_key(name).unwrap_or_revert_with(ApiError::GetKey) {\n        Key::URef(uref) => uref,\n        _ => revert(ApiError::UnexpectedKeyVariant),\n    }\n}\n\n/// Gets a value out of a named dictionary.\npub fn named_dictionary_get<V: CLTyped + FromBytes>(\n    dictionary_name: &str,\n    dictionary_item_key: &str,\n) -> Result<Option<V>, bytesrepr::Error> {\n    dictionary_get(get_named_uref(dictionary_name), dictionary_item_key)\n}\n\n/// Writes a value in a named dictionary.\npub fn named_dictionary_put<V: CLTyped + ToBytes>(\n    dictionary_name: &str,\n    dictionary_item_key: &str,\n    value: V,\n) {\n    dictionary_put(get_named_uref(dictionary_name), dictionary_item_key, value)\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/contract_api/system.rs",
    "content": "//! Functions for interacting with the system contracts.\n\nuse alloc::vec::Vec;\nuse core::mem::MaybeUninit;\n\nuse casper_types::{\n    account::AccountHash, api_error, bytesrepr, contracts::ContractHash, system::SystemEntityType,\n    ApiError, HashAddr, PublicKey, TransferResult, TransferredTo, URef, U512,\n    UREF_SERIALIZED_LENGTH,\n};\n\nuse crate::{\n    contract_api::{self, account, runtime},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nfn get_system_contract(system_contract: SystemEntityType) -> ContractHash {\n    let system_contract_index = system_contract.into();\n    let contract_hash: ContractHash = {\n        let result = {\n            let mut hash_data_raw: HashAddr = ContractHash::default().value();\n            let value = unsafe {\n                ext_ffi::casper_get_system_contract(\n                    system_contract_index,\n                    hash_data_raw.as_mut_ptr(),\n                    hash_data_raw.len(),\n                )\n            };\n            api_error::result_from(value).map(|_| hash_data_raw)\n        };\n        // Revert for any possible error that happened on host side\n        #[allow(clippy::redundant_closure)] // false positive\n        let contract_hash_bytes = result.unwrap_or_else(|e| runtime::revert(e));\n        // Deserializes a valid URef passed from the host side\n        bytesrepr::deserialize(contract_hash_bytes.to_vec()).unwrap_or_revert()\n    };\n    contract_hash\n}\n\n/// Returns a read-only pointer to the Mint contract.\n///\n/// Any failure will trigger [`revert`](runtime::revert) with an appropriate [`ApiError`].\npub fn get_mint() -> ContractHash {\n    get_system_contract(SystemEntityType::Mint)\n}\n\n/// Returns a read-only pointer to the Handle Payment contract.\n///\n/// Any failure will trigger [`revert`](runtime::revert) with an appropriate [`ApiError`].\npub fn get_handle_payment() -> ContractHash {\n    get_system_contract(SystemEntityType::HandlePayment)\n}\n\n/// Returns a read-only pointer to the Standard Payment contract.\n///\n/// Any failure will trigger [`revert`](runtime::revert) with an appropriate [`ApiError`].\npub fn get_standard_payment() -> ContractHash {\n    get_system_contract(SystemEntityType::StandardPayment)\n}\n\n/// Returns a read-only pointer to the Auction contract.\n///\n/// Any failure will trigger [`revert`](runtime::revert) with appropriate [`ApiError`].\npub fn get_auction() -> ContractHash {\n    get_system_contract(SystemEntityType::Auction)\n}\n\n/// Creates a new empty purse and returns its [`URef`].\npub fn create_purse() -> URef {\n    let purse_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH);\n    let ret = unsafe {\n        ext_ffi::casper_create_purse(purse_non_null_ptr.as_ptr(), UREF_SERIALIZED_LENGTH)\n    };\n    api_error::result_from(ret).unwrap_or_revert();\n    let bytes = unsafe {\n        Vec::from_raw_parts(\n            purse_non_null_ptr.as_ptr(),\n            UREF_SERIALIZED_LENGTH,\n            UREF_SERIALIZED_LENGTH,\n        )\n    };\n    bytesrepr::deserialize(bytes).unwrap_or_revert()\n}\n\n/// Returns the balance in motes of the given purse.\npub fn get_purse_balance(purse: URef) -> Option<U512> {\n    let (purse_ptr, purse_size, _bytes) = contract_api::to_ptr(purse);\n\n    let value_size = {\n        let mut output_size = MaybeUninit::uninit();\n        let ret =\n            unsafe { ext_ffi::casper_get_balance(purse_ptr, purse_size, output_size.as_mut_ptr()) };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { output_size.assume_init() },\n            Err(ApiError::InvalidPurse) => return None,\n            Err(error) => runtime::revert(error),\n        }\n    };\n    let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert();\n    let value: U512 = bytesrepr::deserialize(value_bytes).unwrap_or_revert();\n    Some(value)\n}\n\n/// Returns the balance in motes of the account's main purse.\npub fn get_balance() -> Option<U512> {\n    get_purse_balance(account::get_main_purse())\n}\n\n/// Transfers `amount` of motes from the default purse of the account to `target`\n/// account.  If `target` does not exist it will be created.\npub fn transfer_to_account(target: AccountHash, amount: U512, id: Option<u64>) -> TransferResult {\n    let (target_ptr, target_size, _bytes1) = contract_api::to_ptr(target);\n    let (amount_ptr, amount_size, _bytes2) = contract_api::to_ptr(amount);\n    let (id_ptr, id_size, _bytes3) = contract_api::to_ptr(id);\n    let mut maybe_result_value = MaybeUninit::uninit();\n\n    let return_code = unsafe {\n        ext_ffi::casper_transfer_to_account(\n            target_ptr,\n            target_size,\n            amount_ptr,\n            amount_size,\n            id_ptr,\n            id_size,\n            maybe_result_value.as_mut_ptr(),\n        )\n    };\n\n    // Propagate error (if any)\n    api_error::result_from(return_code)?;\n\n    // Return appropriate result if transfer was successful\n    let transferred_to_value = unsafe { maybe_result_value.assume_init() };\n    TransferredTo::result_from(transferred_to_value)\n}\n\n/// Transfers `amount` of motes from the main purse of the caller's account to the main purse of\n/// `target`.  If the account referenced by `target` does not exist, it will be created.\npub fn transfer_to_public_key(target: PublicKey, amount: U512, id: Option<u64>) -> TransferResult {\n    let target = AccountHash::from(&target);\n    transfer_to_account(target, amount, id)\n}\n\n/// Transfers `amount` of motes from `source` purse to `target` account.  If `target` does not exist\n/// it will be created.\npub fn transfer_from_purse_to_account(\n    source: URef,\n    target: AccountHash,\n    amount: U512,\n    id: Option<u64>,\n) -> TransferResult {\n    let (source_ptr, source_size, _bytes1) = contract_api::to_ptr(source);\n    let (target_ptr, target_size, _bytes2) = contract_api::to_ptr(target);\n    let (amount_ptr, amount_size, _bytes3) = contract_api::to_ptr(amount);\n    let (id_ptr, id_size, _bytes4) = contract_api::to_ptr(id);\n\n    let mut maybe_result_value = MaybeUninit::uninit();\n    let return_code = unsafe {\n        ext_ffi::casper_transfer_from_purse_to_account(\n            source_ptr,\n            source_size,\n            target_ptr,\n            target_size,\n            amount_ptr,\n            amount_size,\n            id_ptr,\n            id_size,\n            maybe_result_value.as_mut_ptr(),\n        )\n    };\n\n    // Propagate error (if any)\n    api_error::result_from(return_code)?;\n\n    // Return appropriate result if transfer was successful\n    let transferred_to_value = unsafe { maybe_result_value.assume_init() };\n    TransferredTo::result_from(transferred_to_value)\n}\n\n/// Transfers `amount` of motes from `source` to the main purse of `target`.  If the account\n/// referenced by `target` does not exist, it will be created.\npub fn transfer_from_purse_to_public_key(\n    source: URef,\n    target: PublicKey,\n    amount: U512,\n    id: Option<u64>,\n) -> TransferResult {\n    let target = AccountHash::from(&target);\n    transfer_from_purse_to_account(source, target, amount, id)\n}\n\n/// Transfers `amount` of motes from `source` purse to `target` purse.  If `target` does not exist\n/// the transfer fails.\npub fn transfer_from_purse_to_purse(\n    source: URef,\n    target: URef,\n    amount: U512,\n    id: Option<u64>,\n) -> Result<(), ApiError> {\n    let (source_ptr, source_size, _bytes1) = contract_api::to_ptr(source);\n    let (target_ptr, target_size, _bytes2) = contract_api::to_ptr(target);\n    let (amount_ptr, amount_size, _bytes3) = contract_api::to_ptr(amount);\n    let (id_ptr, id_size, _bytes4) = contract_api::to_ptr(id);\n    let result = unsafe {\n        ext_ffi::casper_transfer_from_purse_to_purse(\n            source_ptr,\n            source_size,\n            target_ptr,\n            target_size,\n            amount_ptr,\n            amount_size,\n            id_ptr,\n            id_size,\n        )\n    };\n    api_error::result_from(result)\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/ext_ffi.rs",
    "content": "//! Contains low-level bindings for host-side (\"external\") functions.\n//!\n//! Generally should not be used directly.  See the [`contract_api`](crate::contract_api) for\n//! high-level bindings suitable for writing smart contracts.\n\n#[cfg(doc)]\nuse alloc::collections::BTreeMap;\n\nextern \"C\" {\n    /// The bytes in the span of wasm memory from `key_ptr` to `key_ptr + key_size` must correspond\n    /// to a valid global state key, otherwise the function will fail. If the key is de-serialized\n    /// successfully, then the result of the read is serialized and buffered in the runtime. This\n    /// result can be obtained via the [`casper_read_host_buffer`] function. Returns standard error\n    /// code.\n    ///\n    /// # Arguments\n    ///\n    /// * `key_ptr` - pointer (offset in wasm linear memory) to serialized form of the key to read\n    /// * `key_size` - size of the serialized key (in bytes)\n    /// * `output_size` - pointer to a value where host will write size of bytes read from given key\n    pub fn casper_read_value(key_ptr: *const u8, key_size: usize, output_size: *mut usize) -> i32;\n    /// This function writes the provided value (read via de-serializing the bytes\n    /// in wasm memory from offset `value_ptr` to `value_ptr + value_size`) under\n    /// the provided key (read via de-serializing the bytes in wasm memory from\n    /// offset `key_ptr` to `key_ptr + key_size`) in the global state. This\n    /// function will cause a `Trap` if the key or value fail to de-serialize or\n    /// if writing to that key is not permitted.\n    ///\n    /// # Arguments\n    ///\n    /// * `key_ptr` - pointer to bytes representing the key to write to\n    /// * `key_size` - size of the key (in bytes)\n    /// * `value_ptr` - pointer to bytes representing the value to write at the key\n    /// * `value_size` - size of the value (in bytes)\n    pub fn casper_write(\n        key_ptr: *const u8,\n        key_size: usize,\n        value_ptr: *const u8,\n        value_size: usize,\n    );\n    /// This function adds the provided value (read via de-serializing the bytes\n    /// in wasm memory from offset `value_ptr` to `value_ptr + value_size`) to the\n    /// current value under the provided key (read via de-serializing the bytes in\n    /// wasm memory from offset `key_ptr` to `key_ptr + key_size`) in the global\n    /// state. This function will cause a `Trap` if the key or value fail to\n    /// de-serialize or if adding to that key is not permitted, or no value\n    /// presently exists at that key.\n    ///\n    /// # Arguments\n    ///\n    /// * `key_ptr` - pointer to bytes representing the key to write to\n    /// * `key_size` - size of the key (in bytes)\n    /// * `value_ptr` - pointer to bytes representing the value to write at the key\n    /// * `value_size` - size of the value (in bytes)\n    pub fn casper_add(key_ptr: *const u8, key_size: usize, value_ptr: *const u8, value_size: usize);\n    /// This function causes the runtime to generate a new `URef`, with\n    /// the provided value stored under it in the global state. The new\n    /// `URef` is written (in serialized form) to the wasm linear\n    /// memory starting from the `key_ptr` offset. Note that data corruption is possible if not\n    /// enough memory is allocated for the `URef` at `key_ptr`. This\n    /// function will cause a `Trap` if the bytes in wasm memory from offset `value_ptr` to\n    /// `value_ptr + value_size` cannot be de-serialized into a `Value`.\n    ///\n    /// # Arguments\n    ///\n    /// * `key_ptr` - pointer to the offset in wasm memory where the new `URef` will be written\n    /// * `value_ptr` - pointer to bytes representing the value to write under the new `URef`\n    /// * `value_size` - size of the value (in bytes)\n    pub fn casper_new_uref(uref_ptr: *mut u8, value_ptr: *const u8, value_size: usize);\n    /// This function loads a set of authorized keys used to sign this deploy from the host.\n    /// The data will be available through the host buffer and can be copied to Wasm memory through\n    /// [`casper_read_host_buffer`].\n    ///\n    /// # Arguments\n    ///\n    /// * `total_keys`: number of authorization keys used to sign this deploy\n    /// * `result_size`: size of the data loaded in the host\n    pub fn casper_load_authorization_keys(total_keys: *mut usize, result_size: *mut usize) -> i32;\n    /// This function loads a set of named keys from the host. The data will be available through\n    /// the host buffer and can be copied to Wasm memory through [`casper_read_host_buffer`].\n    pub fn casper_load_named_keys(total_keys: *mut usize, result_size: *mut usize) -> i32;\n    /// This function causes a `Trap`, terminating the currently running module,\n    /// but first copies the bytes from `value_ptr` to `value_ptr + value_size` to\n    /// a buffer which is returned to the calling module (if this module was\n    /// invoked by [`casper_call_contract`] or [`casper_call_versioned_contract`]). Additionally,\n    /// the known `URef`s of the calling context are augmented with the\n    /// `URef`s de-serialized from wasm memory offset\n    /// `extra_urefs_ptr` to `extra_urefs_ptr + extra_urefs_size`. This function will cause a\n    /// `Trap` if the bytes at `extra_urefs_ptr` cannot be de-serialized as type `Vec<URef>`, or\n    /// if any of the extra `URef`s are invalid in the current\n    /// context.\n    ///\n    /// # Arguments\n    ///\n    /// * `value_ptr`: pointer to bytes representing the value to return to the caller\n    /// * `value_size`: size of the value (in bytes)\n    pub fn casper_ret(value_ptr: *const u8, value_size: usize) -> !;\n    /// Retrieves a key from the named keys by name and writes it to the output buffer.\n    pub fn casper_get_key(\n        name_ptr: *const u8,\n        name_size: usize,\n        output_ptr: *mut u8,\n        output_size: usize,\n        bytes_written_ptr: *mut usize,\n    ) -> i32;\n    /// This function checks if the key with the given name is present in the named keys.\n    pub fn casper_has_key(name_ptr: *const u8, name_size: usize) -> i32;\n    /// This function stores a key under the given name in the named keys.\n    pub fn casper_put_key(\n        name_ptr: *const u8,\n        name_size: usize,\n        key_ptr: *const u8,\n        key_size: usize,\n    );\n    /// This function removes a key with the given name from the named keys.\n    pub fn casper_remove_key(name_ptr: *const u8, name_size: usize);\n    /// This function causes a `Trap` which terminates the currently running\n    /// module. Additionally, it signals that the current entire phase of\n    /// execution of the deploy should be terminated as well, and that the effects\n    /// of the execution up to this point should be reverted. The error code\n    /// provided to this function will be included in the error message of the\n    /// deploy in the block in which it is included.\n    ///\n    /// # Arguments\n    ///\n    /// * `status` - error code of the revert\n    pub fn casper_revert(status: u32) -> !;\n    /// This function checks if all the keys contained in the given `Value` are\n    /// valid in the current context (i.e. the `Value` does not contain any forged\n    /// `URef`s). This function causes a `Trap` if the bytes in wasm\n    /// memory from offset `value_ptr` to `value_ptr + value_size` cannot be de-serialized as\n    /// type `Value`.\n    pub fn casper_is_valid_uref(uref_ptr: *const u8, uref_size: usize) -> i32;\n    /// This function attempts to add the given public key as an associated key to\n    /// the current account. Presently only 32-byte keys are supported; it is up\n    /// to the caller to ensure that the 32-bytes starting from offset\n    /// `public_key` represent the key they wish to add. Weights are internally\n    /// represented by a `u8`, this function will cause a `Trap` if the weight is\n    /// not between 0 and 255 inclusively. The result returned is a status code\n    /// for adding the key where 0 represents success, 1 means no more keys can be\n    /// added to this account (only 10 keys can be added), 2 means the key is\n    /// already associated (if you wish to change the weight of an associated key\n    /// then used [`casper_update_associated_key`]), and 3 means permission denied (this\n    /// could be because the function was called outside of session code or\n    /// because the key management threshold was not met by the keys authorizing\n    /// the deploy).\n    ///\n    /// Returns status code for adding the key, where 0 represents success and non-zero represents\n    /// failure.\n    ///\n    /// # Arguments\n    ///\n    /// * `public_key` - pointer to the bytes in wasm memory representing the public key to add,\n    ///   presently only 32-byte public keys are supported.\n    /// * `weight` - the weight to assign to this public key\n    pub fn casper_add_associated_key(\n        account_hash_ptr: *const u8,\n        account_hash_size: usize,\n        weight: i32,\n    ) -> i32;\n    /// This function attempts to remove the given public key from the associated\n    /// keys of the current account. Presently only 32-byte keys are supported; it\n    /// is up to the caller to ensure that the 32-bytes starting from offset\n    /// `public_key` represent the key they wish to remove. The result returned is\n    /// a status code for adding the key where 0 represents success, 1 means the\n    /// key was not associated to begin with, 2 means means permission denied\n    /// (this could be because the function was called outside of session code or\n    /// because the key management threshold was not met by the keys authorizing\n    /// the deploy), and 3 means this key cannot be removed because otherwise it\n    /// would be impossible to meet either the deploy or key management\n    /// thresholds.\n    ///\n    /// Returns status code for adding the key, where 0 represents success and non-zero represents\n    /// failure.\n    ///\n    /// # Arguments\n    ///\n    /// * `public_key` - pointer to the bytes in wasm memory representing the public key to update,\n    ///   presently only 32-byte public keys are supported.\n    /// * `weight` - the weight to assign to this public key\n    pub fn casper_remove_associated_key(\n        account_hash_ptr: *const u8,\n        account_hash_size: usize,\n    ) -> i32;\n    /// This function attempts to update the given public key as an associated key\n    /// to the current account. Presently only 32-byte keys are supported; it is\n    /// up to the caller to ensure that the 32-bytes starting from offset\n    /// `public_key` represent the key they wish to add. Weights are internally\n    /// represented by a `u8`, this function will cause a `Trap` if the weight is\n    /// not between 0 and 255 inclusively. The result returned is a status code\n    /// for adding the key where 0 represents success, 1 means the key was not\n    /// associated to the account (to add a new key use `add_associated_key`), 2\n    /// means means permission denied (this could be because the function was\n    /// called outside of session code or because the key management threshold was\n    /// not met by the keys authorizing the deploy), and 3 means this key cannot\n    /// be changed to the specified weight because then it would be impossible to\n    /// meet either the deploy or key management thresholds (you may wish to try\n    /// again with a higher weight or after lowering the action thresholds).\n    ///\n    /// # Arguments\n    ///\n    /// * `public_key` - pointer to the bytes in wasm memory representing the public key to update,\n    ///   presently only 32-byte public keys are supported.\n    /// * `weight` - the weight to assign to this public key\n    pub fn casper_update_associated_key(\n        account_hash_ptr: *const u8,\n        account_hash_size: usize,\n        weight: i32,\n    ) -> i32;\n    /// This function changes the threshold to perform the specified action. The\n    /// action index is interpreted as follows: 0 means deployment and 1 means key\n    /// management. Thresholds are represented internally as a `u8`, this function\n    /// will cause a `Trap` if the new threshold is not between 0 and 255\n    /// inclusively. The return value is a status code where 0 means success, 1\n    /// means the key management threshold cannot be set lower than the deploy\n    /// threshold, 2 means the deployment threshold cannot be set higher than the\n    /// key management threshold, 3 means permission denied (this could be because\n    /// the function was called outside of session code or because the key\n    /// management threshold was not met by the keys authorizing the deploy), and\n    /// 4 means the threshold would be set higher than the total weight of\n    /// associated keys (and therefore would be impossible to meet).\n    ///\n    /// # Arguments\n    ///\n    /// * `action` - index representing the action threshold to set\n    /// * `threshold` - new value of the threshold for performing this action\n    pub fn casper_set_action_threshold(permission_level: u32, threshold: u32) -> i32;\n    /// Returns the caller of the current context, i.e. the [`casper_types::account::AccountHash`]\n    /// of the account which made the transaction request. The value stored in the host\n    /// buffer is always 32-bytes in length.\n    ///\n    /// # Arguments\n    ///\n    /// * `output_size_ptr` - pointer to a value where the size of the account hash will be set.\n    pub fn casper_get_caller(output_size_ptr: *mut usize) -> i32;\n    /// This function gets the timestamp which will be in the block this deploy is\n    /// included in. The return value is always a 64-bit unsigned integer,\n    /// representing the number of milliseconds since the Unix epoch. It is up to\n    /// the caller to ensure there are 8 bytes allocated at `dest_ptr`, otherwise\n    /// data corruption in the wasm memory may occur.\n    ///\n    /// # Arguments\n    ///\n    /// * `dest_ptr` - pointer in wasm memory where to write the result\n    pub fn casper_get_blocktime(dest_ptr: *const u8);\n    /// This function uses the mint contract to create a new, empty purse. If the\n    /// call is successful then the `URef` (in serialized form) is written\n    /// to the indicated place in wasm memory. It is up to the caller to ensure at\n    /// least `purse_size` bytes are allocated at `purse_ptr`, otherwise\n    /// data corruption may occur. This function causes a `Trap` if\n    /// `purse_size` is not equal to 38.\n    ///\n    /// # Arguments\n    ///\n    /// * `purse_ptr` - pointer to position in wasm memory where to write the created `URef`\n    /// * `purse_size` - allocated size for the `URef`\n    pub fn casper_create_purse(purse_ptr: *const u8, purse_size: usize) -> i32;\n    /// This function uses the mint contract’s transfer function to transfer\n    /// tokens from the current account’s main purse to the main purse of the\n    /// target account. If the target account does not exist then it is\n    /// automatically created, and the tokens are transferred to the main purse of\n    /// the new account. The target is a serialized `PublicKey` (i.e. 36 bytes\n    /// where the first 4 bytes are the number `32` in little endian encoding, and\n    /// the remaining 32-bytes are the public key). The amount must be a\n    /// serialized 512-bit unsigned integer. This function causes a `Trap` if the\n    /// target cannot be de-serialized as a `PublicKey` or the amount cannot be\n    /// de-serialized into a `U512`. The return value indicated what occurred,\n    /// where 0 means a successful transfer to an existing account, 1 means a\n    /// successful transfer to a new account, and 2 means the transfer failed\n    /// (this could be because the current account’s main purse had insufficient\n    /// tokens or because the function was called outside of session code and so\n    /// does not have access to the account’s main purse).\n    ///\n    /// # Arguments\n    ///\n    /// * `target_ptr` - pointer in wasm memory to bytes representing the target account to transfer\n    ///   to\n    /// * `target_size` - size of the target (in bytes)\n    /// * `amount_ptr` - pointer in wasm memory to bytes representing the amount to transfer to the\n    ///   target account\n    /// * `amount_size` - size of the amount (in bytes)\n    /// * `id_ptr` - pointer in wasm memory to bytes representing the user-defined transaction id\n    /// * `id_size` - size of the id (in bytes)\n    /// * `result_ptr` - pointer in wasm memory to a value where `TransferredTo` value would be set\n    ///   on successful transfer.\n    pub fn casper_transfer_to_account(\n        target_ptr: *const u8,\n        target_size: usize,\n        amount_ptr: *const u8,\n        amount_size: usize,\n        id_ptr: *const u8,\n        id_size: usize,\n        result_ptr: *const i32,\n    ) -> i32;\n    /// This function uses the mint contract’s transfer function to transfer\n    /// tokens from the specified purse to the main purse of the target account.\n    /// If the target account does not exist then it is automatically created, and\n    /// the tokens are transferred to the main purse of the new account. The\n    /// source is a serialized `URef`.\n    /// The target is a serialized `PublicKey` (i.e. 36 bytes where the\n    /// first 4 bytes are the number `32` in little endian encoding, and the\n    /// remaining 32-bytes are the public key). The amount must be a serialized\n    /// 512-bit unsigned integer. This function causes a `Trap` if the source\n    /// cannot be de-serialized as a `URef`, or the target cannot be\n    /// de-serialized as a `PublicKey` or the amount cannot be de-serialized into\n    /// a `U512`. The return value indicated what occurred, where 0 means a\n    /// successful transfer to an existing account, 1 means a successful transfer\n    /// to a new account, and 2 means the transfer failed (this could be because\n    /// the source purse had insufficient tokens or because there was not valid\n    /// access to the source purse).\n    ///\n    /// # Arguments\n    ///\n    /// * `source_ptr` - pointer in wasm memory to bytes representing the source `URef` to transfer\n    ///   from\n    /// * `source_size` - size of the source `URef` (in bytes)\n    /// * `target_ptr` - pointer in wasm memory to bytes representing the target account to transfer\n    ///   to\n    /// * `target_size` - size of the target (in bytes)\n    /// * `amount_ptr` - pointer in wasm memory to bytes representing the amount to transfer to the\n    ///   target account\n    /// * `amount_size` - size of the amount (in bytes)\n    /// * `id_ptr` - pointer in wasm memory to bytes representing the user-defined transaction id\n    /// * `id_size` - size of the id (in bytes)\n    /// * `result_ptr` - pointer in wasm memory to a value where `TransferredTo` value would be set\n    ///   on successful transfer.\n    pub fn casper_transfer_from_purse_to_account(\n        source_ptr: *const u8,\n        source_size: usize,\n        target_ptr: *const u8,\n        target_size: usize,\n        amount_ptr: *const u8,\n        amount_size: usize,\n        id_ptr: *const u8,\n        id_size: usize,\n        result_ptr: *const i32,\n    ) -> i32;\n    /// This function uses the mint contract’s transfer function to transfer\n    /// tokens from the specified source purse to the specified target purse. If\n    /// the target account does not exist then it is automatically created, and\n    /// the tokens are transferred to the main purse of the new account. The\n    /// source is a serialized `URef`.\n    /// The target is also a serialized `URef`. The amount must be a\n    /// serialized 512-bit unsigned integer. This function causes a `Trap` if the\n    /// source or target cannot be de-serialized as a `URef` or the amount\n    /// cannot be de-serialized into a `U512`. The return value indicated what\n    /// occurred, where 0 means a successful transfer, 1 means the transfer\n    /// failed (this could be because the source purse had insufficient tokens or\n    /// because there was not valid access to the source purse or target purse).\n    ///\n    /// # Arguments\n    ///\n    /// * `source_ptr` - pointer in wasm memory to bytes representing the source `URef` to transfer\n    ///   from\n    /// * `source_size` - size of the source `URef` (in bytes)\n    /// * `target_ptr` - pointer in wasm memory to bytes representing the target `URef` to transfer\n    ///   to\n    /// * `target_size` - size of the target (in bytes)\n    /// * `amount_ptr` - pointer in wasm memory to bytes representing the amount to transfer to the\n    ///   target account\n    /// * `amount_size` - size of the amount (in bytes)\n    /// * `id_ptr` - pointer in wasm memory to bytes representing the user-defined transaction id\n    /// * `id_size` - size of the id (in bytes)\n    pub fn casper_transfer_from_purse_to_purse(\n        source_ptr: *const u8,\n        source_size: usize,\n        target_ptr: *const u8,\n        target_size: usize,\n        amount_ptr: *const u8,\n        amount_size: usize,\n        id_ptr: *const u8,\n        id_size: usize,\n    ) -> i32;\n    /// This function uses the mint contract's balance function to get the balance\n    /// of the specified purse. It causes a `Trap` if the bytes in wasm memory\n    /// from `purse_ptr` to `purse_ptr + purse_size` cannot be\n    /// de-serialized as a `URef`. The return value is the size of the\n    /// result in bytes. The result is copied to the host buffer and thus can be obtained\n    /// by any function which copies the buffer into wasm memory (e.g.\n    /// `get_read`). The result bytes are serialized from type `Option<U512>` and\n    /// should be interpreted as such.\n    ///\n    /// # Arguments\n    ///\n    /// * `purse_ptr` - pointer in wasm memory to the bytes representing the `URef` of the purse to\n    ///   get the balance of\n    /// * `purse_size` - size of the `URef` (in bytes)\n    pub fn casper_get_balance(\n        purse_ptr: *const u8,\n        purse_size: usize,\n        result_size: *mut usize,\n    ) -> i32;\n    /// This function writes bytes representing the current phase of the deploy\n    /// execution to the specified pointer. The size of the result is always one\n    /// byte, it is up to the caller to ensure one byte of memory is allocated at\n    /// `dest_ptr`, otherwise data corruption in the wasm memory could occur. The\n    /// one byte is interpreted as follows: 0 means a system phase (should never\n    /// be encountered by user deploys), 1 means the payment phase, 2 means the\n    /// session phase and 3 means the finalization phase (should never be\n    /// encountered by user code).\n    ///\n    /// # Arguments\n    ///\n    /// * `dest_ptr` - pointer to position in wasm memory to write the result\n    pub fn casper_get_phase(dest_ptr: *mut u8);\n    /// Retrieves a system contract by index and writes it to the destination pointer.\n    pub fn casper_get_system_contract(\n        system_contract_index: u32,\n        dest_ptr: *mut u8,\n        dest_size: usize,\n    ) -> i32;\n    /// Retrieves the main purse and writes it to the destination pointer.\n    pub fn casper_get_main_purse(dest_ptr: *mut u8);\n    /// This function copies the contents of the current runtime buffer into the\n    /// wasm memory, beginning at the provided offset. It is intended that this\n    /// function be called after a call to a function that uses host buffer. It is up to the caller\n    /// to ensure that the proper amount of memory is allocated for this write,\n    /// otherwise data corruption in the wasm memory may occur due to this call\n    /// overwriting some bytes unintentionally. The size of the data which will be\n    /// written is stored on the host. The bytes which are written are those corresponding to the\n    /// value returned by the called contract; it is up to the developer to know how to attempt\n    /// to interpret those bytes.\n    ///\n    /// # Arguments\n    ///\n    /// * `dest_ptr` - pointer (offset in wasm memory) to the location where the host buffer should\n    ///   be written\n    /// * `dest_size` - size of output buffer\n    /// * `bytes_written` - a pointer to a value where amount of bytes written will be set\n    pub fn casper_read_host_buffer(\n        dest_ptr: *mut u8,\n        dest_size: usize,\n        bytes_written: *mut usize,\n    ) -> i32;\n    /// Creates new contract package at hash. Returns both newly generated\n    /// [`casper_types::PackageHash`] and a [`casper_types::URef`] for further\n    /// modifying access.\n    pub fn casper_create_contract_package_at_hash(\n        hash_addr_ptr: *mut u8,\n        access_addr_ptr: *mut u8,\n        is_locked: bool,\n    );\n    /// Creates new named contract user group under a contract package.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `label_ptr` - serialized group label\n    /// * `label_size` - size of serialized group label\n    /// * `num_new_urefs` - amount of new urefs to be provisioned by host\n    /// * `existing_urefs_ptr` - serialized list of existing [`casper_types::URef`]s\n    /// * `existing_urefs_size` - size of serialized list of  [`casper_types::URef`]s\n    /// * `output_size_ptr` - pointer to a value where a size of list of [`casper_types::URef`]s\n    ///   written to host buffer will be set.\n    pub fn casper_create_contract_user_group(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        label_ptr: *const u8,\n        label_size: usize,\n        num_new_urefs: u8,\n        existing_urefs_ptr: *const u8,\n        existing_urefs_size: usize,\n        output_size_ptr: *mut usize,\n    ) -> i32;\n    /// Adds new contract version to a contract package without message topics.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `version_ptr` - output parameter where new version assigned by host is set\n    /// * `entry_points_ptr` - pointer to serialized [`casper_types::EntryPoints`]\n    /// * `entry_points_size` - size of serialized [`casper_types::EntryPoints`]\n    /// * `named_keys_ptr` - pointer to serialized [`casper_types::NamedKeys`]\n    /// * `named_keys_size` - size of serialized [`casper_types::NamedKeys`]\n    /// * `output_ptr` - pointer to a memory where host assigned contract hash is set to\n    /// * `output_size` - size of memory area that host can write to\n    /// * `bytes_written_ptr` - pointer to a value where host will set a number of bytes written to\n    ///   the `output_size` pointer\n    pub fn casper_add_contract_version(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        version_ptr: *const u32,\n        entry_points_ptr: *const u8,\n        entry_points_size: usize,\n        named_keys_ptr: *const u8,\n        named_keys_size: usize,\n        output_ptr: *mut u8,\n        output_size: usize,\n        bytes_written_ptr: *mut usize,\n    ) -> i32;\n    /// Adds a new version to a contract package with message topics.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized package hash.\n    /// * `contract_package_hash_size` - size of package hash in serialized form.\n    /// * `version_ptr` - output parameter where new version assigned by host is set\n    /// * `entry_points_ptr` - pointer to serialized [`casper_types::EntryPoints`]\n    /// * `entry_points_size` - size of serialized [`casper_types::EntryPoints`]\n    /// * `named_keys_ptr` - pointer to serialized [`casper_types::NamedKeys`]\n    /// * `named_keys_size` - size of serialized [`casper_types::NamedKeys`]\n    /// * `message_topics_ptr` - pointer to serialized BTreeMap<String, MessageTopicOperation>\n    ///   containing message topic names and the operation to pe performed on each one.\n    /// * `message_topics_size` - size of serialized BTreeMap<String, MessageTopicOperation>\n    /// * `output_ptr` - pointer to a memory where host assigned contract hash is set to\n    /// * `output_size` - expected width of output (currently 32)\n    pub fn casper_add_contract_version_with_message_topics(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        version_ptr: *const u32,\n        entry_points_ptr: *const u8,\n        entry_points_size: usize,\n        named_keys_ptr: *const u8,\n        named_keys_size: usize,\n        message_topics_ptr: *const u8,\n        message_topics_size: usize,\n        output_ptr: *mut u8,\n        output_size: usize,\n    ) -> i32;\n    /// Adds a new version to a package.\n    ///\n    /// # Arguments\n    ///\n    /// * `package_hash_ptr` - pointer to serialized package hash.\n    /// * `package_hash_size` - size of package hash in serialized form.\n    /// * `version_ptr` - output parameter where new version assigned by host is set\n    /// * `entry_points_ptr` - pointer to serialized [`casper_types::EntryPoints`]\n    /// * `entry_points_size` - size of serialized [`casper_types::EntryPoints`]\n    /// * `named_keys_ptr` - pointer to serialized [`casper_types::NamedKeys`]\n    /// * `named_keys_size` - size of serialized [`casper_types::NamedKeys`]\n    /// * `message_topics_ptr` - pointer to serialized BTreeMap<String, MessageTopicOperation>\n    ///   containing message topic names and the operation to pe performed on each one.\n    /// * `message_topics_size` - size of serialized BTreeMap<String, MessageTopicOperation>\n    /// * `output_ptr` - pointer to a memory where host assigned contract hash is set to\n    /// * `output_size` - expected width of output (currently 32)\n    pub fn casper_add_package_version_with_message_topics(\n        package_hash_ptr: *const u8,\n        package_hash_size: usize,\n        version_ptr: *const u32,\n        entry_points_ptr: *const u8,\n        entry_points_size: usize,\n        named_keys_ptr: *const u8,\n        named_keys_size: usize,\n        message_topics_ptr: *const u8,\n        message_topics_size: usize,\n        output_ptr: *mut u8,\n        output_size: usize,\n    ) -> i32;\n    /// Disables contract in a contract package. Returns non-zero standard error for a failure,\n    /// otherwise a zero indicates success.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `contract_hash_ptr` - pointer to serialized contract hash.\n    /// * `contract_hash_size` - size of contract hash in serialized form.\n    pub fn casper_disable_contract_version(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        contract_hash_ptr: *const u8,\n        contract_hash_size: usize,\n    ) -> i32;\n    /// Calls a contract by its hash. Requires entry point name that has to be present on a\n    /// specified contract, and serialized named arguments. Returns a standard error code in\n    /// case of failure, otherwise a successful execution returns zero. Bytes returned from contract\n    /// execution are set to `result_size` pointer.\n    ///\n    /// # Arguments\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `entry_point_name_ptr` - pointer to serialized contract entry point name\n    /// * `entry_point_name_size` - size of serialized contract entry point name\n    /// * `runtime_args_ptr` - pointer to serialized runtime arguments\n    /// * `runtime_args_size` - size of serialized runtime arguments\n    /// * `result_size` - a pointer to a value which will be set to a size of bytes of called\n    ///   contract return value\n    pub fn casper_call_contract(\n        contract_hash_ptr: *const u8,\n        contract_hash_size: usize,\n        entry_point_name_ptr: *const u8,\n        entry_point_name_size: usize,\n        runtime_args_ptr: *const u8,\n        runtime_args_size: usize,\n        result_size: *mut usize,\n    ) -> i32;\n    /// Calls a contract by its package hash. Optionally accepts a serialized `Option<u32>` as a\n    /// version that for `None` case would call most recent version for given protocol version,\n    /// otherwise it selects a specific contract version. Requires an entry point name\n    /// registered in a given version of contract. Returns a standard error code in case of\n    /// failure, otherwise a successful execution returns zero. Bytes returned from contract\n    /// execution are set to `result_size` pointer\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `contract_version_ptr` - Contract package hash in a serialized form\n    /// * `contract_version_size` -\n    /// * `entry_point_name_ptr` -\n    /// * `entry_point_name_size` -\n    /// * `runtime_args_ptr` -\n    /// * `runtime_args_size` -\n    /// * `result_size` -\n    pub fn casper_call_versioned_contract(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        contract_version_ptr: *const u8,\n        contract_version_size: usize,\n        entry_point_name_ptr: *const u8,\n        entry_point_name_size: usize,\n        runtime_args_ptr: *const u8,\n        runtime_args_size: usize,\n        result_size: *mut usize,\n    ) -> i32;\n    /// This function queries the host side to check for given named argument existence and returns\n    /// a size in bytes of given argument. Returns zero for success or non-zero value for\n    /// failure as described in standard error codes.\n    ///\n    /// # Arguments\n    ///\n    /// * `name_ptr` - pointer (offset in wasm memory) to the location where serialized argument\n    ///   name is present\n    /// * `name_size` - size of serialized bytes of argument name\n    /// * `dest_ptr` - pointer to the location where argument bytes will be copied from the host\n    ///   side\n    /// * `dest_size` - size of destination pointer\n    pub fn casper_get_named_arg_size(\n        name_ptr: *const u8,\n        name_size: usize,\n        dest_size: *mut usize,\n    ) -> i32;\n    /// This function copies the contents of the current runtime buffer into the\n    /// wasm memory, beginning at the provided offset. It is intended that this\n    /// function be called after a call to `load_arg`. It is up to the caller to\n    /// ensure that the proper amount of memory is allocated for this write,\n    /// otherwise data corruption in the wasm memory may occur due to this call\n    /// overwriting some bytes unintentionally. The size of the data which will be\n    /// written is returned from the `load_arg` call. The bytes which are written\n    /// are the those corresponding to the provided argument; it is up to the\n    /// developer to know how to attempt to interpret those bytes.\n    ///\n    /// # Arguments\n    ///\n    /// * `name_ptr` - pointer (offset in wasm memory) to the location where serialized argument\n    ///   name is present\n    /// * `name_size` - size of serialized bytes of argument name\n    /// * `dest_ptr` - pointer to the location where argument bytes will be copied from the host\n    ///   side\n    /// * `dest_size` - size of destination pointer\n    pub fn casper_get_named_arg(\n        name_ptr: *const u8,\n        name_size: usize,\n        dest_ptr: *mut u8,\n        dest_size: usize,\n    ) -> i32;\n    /// Removes group from given contract package.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `label_ptr` - serialized group label\n    /// * `label_size` - size of serialized group label\n    pub fn casper_remove_contract_user_group(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        label_ptr: *const u8,\n        label_size: usize,\n    ) -> i32;\n    /// Requests host to provision additional [`casper_types::URef`] to a specified group\n    /// identified by its label. Returns standard error code for non-zero value, otherwise zero\n    /// indicated success.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `label_ptr` - serialized group label\n    /// * `label_size` - size of serialized group label\n    /// * `value_size_ptr` - size of data written to a host buffer will be saved here\n    pub fn casper_provision_contract_user_group_uref(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        label_ptr: *const u8,\n        label_size: usize,\n        value_size_ptr: *const usize,\n    ) -> i32;\n    /// Removes user group urefs. Accepts a contract package hash, label name of a group, and a list\n    /// of urefs that will be removed from the group.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `label_ptr` - serialized group label\n    /// * `label_size` - size of serialized group label\n    /// * `urefs_ptr` - pointer to serialized list of urefs\n    /// * `urefs_size` - size of serialized list of urefs\n    pub fn casper_remove_contract_user_group_urefs(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        label_ptr: *const u8,\n        label_size: usize,\n        urefs_ptr: *const u8,\n        urefs_size: usize,\n    ) -> i32;\n    /// Returns a 32-byte BLAKE2b hash digest from the given input bytes\n    ///\n    /// # Arguments\n    /// * `in_ptr` - pointer to bytes\n    /// * `in_size` - length of bytes\n    /// * `out_ptr` - pointer to the location where argument bytes will be copied from the host side\n    /// * `out_size` - size of output pointer\n    #[deprecated(note = \"Superseded by ext_ffi::casper_generic_hash\")]\n    pub fn casper_blake2b(\n        in_ptr: *const u8,\n        in_size: usize,\n        out_ptr: *mut u8,\n        out_size: usize,\n    ) -> i32;\n    /// Returns the elements on the call stack tracked by the runtime\n    ///\n    /// # Arguments\n    /// * `call_stack_len_ptr` - pointer to the length of the caller information.\n    /// * `result_size_ptr` - pointer to the size of the serialized caller information.\n    #[deprecated]\n    pub fn casper_load_call_stack(\n        call_stack_len_ptr: *mut usize,\n        result_size_ptr: *mut usize,\n    ) -> i32;\n    /// Prints data directly to standard output on the host.\n    ///\n    /// # Arguments\n    ///\n    /// * `text_ptr` - pointer to serialized text to print\n    /// * `text_size` - size of serialized text to print\n    #[cfg(feature = \"test-support\")]\n    pub fn casper_print(text_ptr: *const u8, text_size: usize);\n    /// Creates new URef that points to a dictionary partition of global state.\n    ///\n    /// # Arguments\n    ///\n    /// * `output_size` - pointer to a value where host will write size of bytes of created URef.\n    pub fn casper_new_dictionary(output_size_ptr: *mut usize) -> i32;\n    /// The bytes in wasm memory from offset `key_ptr` to `key_ptr + key_size`\n    /// will be used together with the current context’s seed to form a dictionary.\n    /// The value at that dictionary is read from the global state, serialized and\n    /// buffered in the runtime. This result can be obtained via the [`casper_read_host_buffer`]\n    /// function.\n    ///\n    /// # Arguments\n    ///\n    /// * `uref_ptr` - pointer to bytes representing the user-defined key\n    /// * `uref_size` - size of the key (in bytes)\n    /// * `key_bytes_ptr` - pointer to bytes representing the user-defined key\n    /// * `key_bytes_size` - size of the user-defined key\n    /// * `output_size` - pointer to a value where host will write size of bytes read from given key\n    pub fn casper_dictionary_get(\n        uref_ptr: *const u8,\n        uref_size: usize,\n        key_bytes_ptr: *const u8,\n        key_bytes_size: usize,\n        output_size: *mut usize,\n    ) -> i32;\n    /// The bytes in the span of wasm memory from `key_ptr` to `key_ptr + key_size` must correspond\n    /// to a valid global state dictionary key, otherwise the function will fail.\n    /// If the Key::Dictionary is de-serialized successfully, then the result of the read is\n    /// serialized and buffered in the runtime. This result can be obtained via the\n    /// [`casper_read_host_buffer`] function. Returns standard error code.\n    ///\n    /// # Arguments\n    ///\n    /// * `key_ptr` - pointer (offset in wasm linear memory) to serialized form of the\n    ///   Key::Dictionary to read\n    /// * `key_size` - size of the serialized Key::Dictionary (in bytes)\n    /// * `output_size` - pointer to a value where host will write size of bytes read from given key\n    pub fn casper_dictionary_read(\n        key_ptr: *const u8,\n        key_size: usize,\n        output_size: *mut usize,\n    ) -> i32;\n    /// The bytes in wasm memory from offset `key_ptr` to `key_ptr + key_size`\n    /// will be used together with the passed URef's seed to form a dictionary.\n    /// This function writes the provided value (read via de-serializing the bytes\n    /// in wasm memory from offset `value_ptr` to `value_ptr + value_size`) under\n    /// that dictionary in the global state. This function will cause a `Trap` if\n    /// the value fails to de-serialize.\n    ///\n    /// # Arguments\n    ///\n    /// * `uref_ptr` - pointer to bytes representing the user-defined key\n    /// * `uref_size` - size of the key (in bytes)\n    /// * `key_ptr` - pointer to bytes representing the user-defined key to write to\n    /// * `key_size` - size of the key (in bytes)\n    /// * `value_ptr` - pointer to bytes representing the value to write at the key\n    /// * `value_size` - size of the value (in bytes)\n    pub fn casper_dictionary_put(\n        uref_ptr: *const u8,\n        uref_size: usize,\n        key_ptr: *const u8,\n        key_size: usize,\n        value_ptr: *const u8,\n        value_size: usize,\n    ) -> i32;\n    /// Returns 32 pseudo random bytes.\n    ///\n    /// # Arguments\n    /// * `out_ptr` - pointer to the location where argument bytes will be copied from the host side\n    /// * `out_size` - size of output pointer\n    pub fn casper_random_bytes(out_ptr: *mut u8, out_size: usize) -> i32;\n    /// Enables contract in a contract package. Returns non-zero standard error for a failure,\n    /// otherwise a zero indicates success.\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `contract_hash_ptr` - pointer to serialized contract hash.\n    /// * `contract_hash_size` - size of contract hash in serialized form.\n    pub fn casper_enable_contract_version(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        contract_hash_ptr: *const u8,\n        contract_hash_size: usize,\n    ) -> i32;\n    /// Manages a message topic.\n    ///\n    /// # Arguments\n    ///\n    /// * `topic_name_ptr` - pointer to the topic name UTF-8 string.\n    /// * `topic_name_size` - size of the serialized name string.\n    /// * `operation_ptr` - pointer to the management operation to be performed for the specified\n    ///   topic.\n    /// * `operation_ptr_size` - size of the operation.\n    pub fn casper_manage_message_topic(\n        topic_name_ptr: *const u8,\n        topic_name_size: usize,\n        operation_ptr: *const u8,\n        operation_size: usize,\n    ) -> i32;\n    /// Emits a new message on the specified topic.\n    ///\n    /// # Arguments\n    ///\n    /// * `topic_name_ptr` - pointer to the topic name UTF-8 string where the message will be\n    ///   emitted.\n    /// * `topic_name_size` - size of the serialized name string.\n    /// * `message_ptr` - pointer to the serialized message payload to be emitted.\n    /// * `message_size` - size of the serialized message payload.\n    pub fn casper_emit_message(\n        topic_name_ptr: *const u8,\n        topic_name_size: usize,\n        message_ptr: *const u8,\n        message_size: usize,\n    ) -> i32;\n\n    /// Returns information about the current call stack tracked by the runtime\n    /// based on an action\n    /// `0` => Initiator of the call chain\n    /// `1` => Immediate caller\n    /// `2` => The entire call stack\n    ///\n    /// # Arguments\n    /// `action`: u8 which encodes the information requested by the caller.\n    /// * `call_stack_len_ptr` - pointer to the length of the caller information.\n    /// * `result_size_ptr` - pointer to the size of the serialized caller information.\n    pub fn casper_load_caller_information(\n        action: u8,\n        call_stack_len_ptr: *mut usize,\n        result_size_ptr: *mut usize,\n    ) -> i32;\n\n    /// This function gets the requested field at `field_idx`. It is up to\n    /// the caller to ensure that the correct number of bytes for the field data\n    /// are allocated at `dest_ptr`, otherwise data corruption in the wasm memory may occur.\n    ///\n    /// # Arguments\n    ///\n    /// * `field_idx` - what info field is requested?\n    /// * 0 => block time (functionally equivalent to earlier get_blocktime ffi)\n    /// * 1 => block height\n    /// * 2 => parent block hash\n    /// * 3 => state hash\n    /// * 4 => current protocol version\n    /// * 5 => is addressable entity enabled\n    /// * `dest_ptr` => pointer in wasm memory where to write the result\n    pub fn casper_get_block_info(field_idx: u8, dest_ptr: *const u8);\n\n    /// Computes digest hash, using provided algorithm type.\n    ///\n    /// # Arguments\n    ///\n    /// * `in_ptr` - pointer to the location where argument bytes will be copied from the host side\n    /// * `in_size` - size of output pointer\n    /// * `hash_algo_type` - integer representation of HashAlgorithm enum variant\n    /// * `out_ptr` - pointer to the location where argument bytes will be copied to the host side\n    /// * `out_size` - size of output pointer\n    pub fn casper_generic_hash(\n        in_ptr: *const u8,\n        in_size: usize,\n        hash_algo_type: u8,\n        out_ptr: *const u8,\n        out_size: usize,\n    ) -> i32;\n\n    /// Recovers a Secp256k1 public key from a signed message\n    /// and a signature used in the process of signing.\n    ///\n    /// # Arguments\n    ///\n    /// * `message_ptr` - pointer to the signed data\n    /// * `message_size` - length of the signed data in bytes\n    /// * `signature_ptr` - pointer to byte-encoded signature\n    /// * `signature_size` - length of the byte-encoded signature\n    /// * `out_ptr` - pointer to a buffer of size PublicKey::SECP256K1_LENGTH which will be\n    ///   populated with the recovered key's bytes representation\n    /// * `recovery_id` - an integer value 0, 1, 2, or 3 used to select the correct public key from\n    ///   the signature:\n    ///   - Low bit (0/1): was the y-coordinate of the affine point resulting from the fixed-base\n    ///     multiplication 𝑘×𝑮 odd?\n    ///   - Hi bit (3/4): did the affine x-coordinate of 𝑘×𝑮 overflow the order of the scalar field,\n    ///     requiring a reduction when computing r?\n    pub fn casper_recover_secp256k1(\n        message_ptr: *const u8,\n        message_size: usize,\n        signature_ptr: *const u8,\n        signature_size: usize,\n        out_ptr: *const u8,\n        recovery_id: u8,\n    ) -> i32;\n\n    /// Verifies the signature of the given message against the given public key.\n    ///\n    /// # Arguments\n    ///\n    /// * `message_ptr` - pointer to the signed data\n    /// * `message_size` - length of the signed data in bytes\n    /// * `signature_ptr` - pointer to byte-encoded signature\n    /// * `signature_size` - length of the byte-encoded signature\n    /// * `public_key_ptr` - pointer to byte-encoded public key\n    /// * `public_key_size` - length of the byte-encoded public key\n    pub fn casper_verify_signature(\n        message_ptr: *const u8,\n        message_size: usize,\n        signature_ptr: *const u8,\n        signature_size: usize,\n        public_key_ptr: *const u8,\n        public_key_size: usize,\n    ) -> i32;\n    /// Calls a contract by its package hash. Requires both a major and contract version. Requires\n    /// an entry point name registered in a given version of contract. Returns a standard error\n    /// code in case of failure, otherwise a successful execution returns zero. Bytes returned\n    /// from contract execution are set to `result_size` pointer\n    ///\n    /// # Arguments\n    ///\n    /// * `contract_package_hash_ptr` - pointer to serialized contract package hash.\n    /// * `contract_package_hash_size` - size of contract package hash in serialized form.\n    /// * `contract_version_ptr` - Contract package hash in a serialized form\n    /// * `contract_version_size` -\n    /// * `entry_point_name_ptr` -\n    /// * `entry_point_name_size` -\n    /// * `runtime_args_ptr` -\n    /// * `runtime_args_size` -\n    /// * `result_size` -\n    pub fn casper_call_package_version(\n        contract_package_hash_ptr: *const u8,\n        contract_package_hash_size: usize,\n        major_version_ptr: *const u8,\n        major_version_size: usize,\n        contract_version_ptr: *const u8,\n        contract_version_size: usize,\n        entry_point_name_ptr: *const u8,\n        entry_point_name_size: usize,\n        runtime_args_ptr: *const u8,\n        runtime_args_size: usize,\n        result_size: *mut usize,\n    ) -> i32;\n}\n"
  },
  {
    "path": "smart_contracts/contract/src/lib.rs",
    "content": "//! A Rust library for writing smart contracts on the\n//! [Casper Platform](https://docs.casper.network/dapp-dev-guide).\n//!\n//! # `no_std`\n//!\n//! The library is `no_std`, but uses the `core` and `alloc` crates.\n//!\n//! # Example\n//!\n//! The following example contains session code which persists an integer value under an unforgeable\n//! reference.  It then stores the unforgeable reference under a name in context-local storage.\n//!\n//! # Writing Smart Contracts\n//!\n//! ```no_run\n//! #![no_std]\n//! #![no_main]\n//!\n//! use casper_contract::contract_api::{runtime, storage};\n//! use casper_types::{Key, URef};\n//!\n//! const KEY: &str = \"special_value\";\n//! const ARG_VALUE: &str = \"value\";\n//!\n//! fn store(value: i32) {\n//!     // Store `value` under a new unforgeable reference.\n//!     let value_ref: URef = storage::new_uref(value);\n//!\n//!     // Wrap the unforgeable reference in a value of type `Key`.\n//!     let value_key: Key = value_ref.into();\n//!\n//!     // Store this key under the name \"special_value\" in context-local storage.\n//!     runtime::put_key(KEY, value_key);\n//! }\n//!\n//! // All session code must have a `call` entrypoint.\n//! #[no_mangle]\n//! pub extern \"C\" fn call() {\n//!     // Get the optional first argument supplied to the argument.\n//!     let value: i32 = runtime::get_named_arg(ARG_VALUE);\n//!     store(value);\n//! }\n//! ```\n//!\n//! Support for writing smart contracts are contained in the [`contract_api`] module and its\n//! submodules.\n\n#![cfg_attr(not(test), no_std)]\n#![cfg_attr(all(not(test), feature = \"no-std-helpers\"), allow(internal_features))]\n#![cfg_attr(\n    all(not(test), feature = \"no-std-helpers\"),\n    feature(alloc_error_handler, core_intrinsics, lang_items)\n)]\n#![doc(html_root_url = \"https://docs.rs/casper-contract/5.1.1\")]\n#![doc(\n    html_favicon_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png\",\n    html_logo_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png\"\n)]\n#![warn(missing_docs)]\n\nextern crate alloc;\n\npub mod contract_api;\npub mod ext_ffi;\n#[cfg(all(not(test), feature = \"no-std-helpers\", not(feature = \"std\")))]\nmod no_std_handlers;\npub mod unwrap_or_revert;\n\n/// An instance of [`WeeAlloc`](https://docs.rs/wee_alloc) which allows contracts built as `no_std`\n/// to avoid having to provide a global allocator themselves.\n#[cfg(all(not(test), feature = \"no-std-helpers\"))]\n#[global_allocator]\npub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;\n"
  },
  {
    "path": "smart_contracts/contract/src/no_std_handlers.rs",
    "content": "//! Contains definitions for panic and allocation error handlers.\n\n/// A panic handler for use in a `no_std` environment which simply aborts the process.\n#[panic_handler]\n#[no_mangle]\npub fn panic(_info: &core::panic::PanicInfo) -> ! {\n    #[cfg(feature = \"test-support\")]\n    crate::contract_api::runtime::print(&alloc::format!(\"{_info}\"));\n    core::intrinsics::abort();\n}\n\n/// An out-of-memory allocation error handler for use in a `no_std` environment which simply aborts\n/// the process.\n#[alloc_error_handler]\n#[no_mangle]\npub fn oom(_: core::alloc::Layout) -> ! {\n    core::intrinsics::abort();\n}\n\n#[lang = \"eh_personality\"]\nextern \"C\" fn eh_personality() {}\n"
  },
  {
    "path": "smart_contracts/contract/src/unwrap_or_revert.rs",
    "content": "//! Home of [`UnwrapOrRevert`], a convenience trait for unwrapping values.\n\nuse casper_types::ApiError;\n\nuse crate::contract_api::runtime;\n\n/// A trait which provides syntactic sugar for unwrapping a type or calling\n/// [`runtime::revert`] if this fails.  It is implemented for `Result` and `Option`.\npub trait UnwrapOrRevert<T> {\n    /// Unwraps the value into its inner type or calls [`runtime::revert`] with a\n    /// predetermined error code on failure.\n    fn unwrap_or_revert(self) -> T;\n\n    /// Unwraps the value into its inner type or calls [`runtime::revert`] with the\n    /// provided `error` on failure.\n    fn unwrap_or_revert_with<E: Into<ApiError>>(self, error: E) -> T;\n}\n\nimpl<T, E: Into<ApiError>> UnwrapOrRevert<T> for Result<T, E> {\n    fn unwrap_or_revert(self) -> T {\n        self.unwrap_or_else(|error| runtime::revert(error.into()))\n    }\n\n    fn unwrap_or_revert_with<F: Into<ApiError>>(self, error: F) -> T {\n        self.unwrap_or_else(|_| runtime::revert(error.into()))\n    }\n}\n\nimpl<T> UnwrapOrRevert<T> for Option<T> {\n    fn unwrap_or_revert(self) -> T {\n        self.unwrap_or_else(|| runtime::revert(ApiError::None))\n    }\n\n    fn unwrap_or_revert_with<E: Into<ApiError>>(self, error: E) -> T {\n        self.unwrap_or_else(|| runtime::revert(error.into()))\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contract/tests/version_numbers.rs",
    "content": "#[cfg(feature = \"version-sync\")]\n#[test]\nfn test_html_root_url() {\n    version_sync::assert_html_root_url_updated!(\"src/lib.rs\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/.cargo/config.toml",
    "content": "[build]\ntarget = \"wasm32-unknown-unknown\"\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-01/Cargo.toml",
    "content": "[package]\nname = \"create-test-node-01\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"create_test_node_01\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncreate-test-node-shared = { path = \"../create-test-node-shared\" }\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-01/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nconst NODE_01_ADDR: &[u8; 64] = b\"d853ee569a6cf4315a26cf1190f9b55003aae433bd732453b967742b883da0b2\";\nconst INITIAL_AMOUNT: u64 = 1_000_000;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    create_test_node_shared::create_account(NODE_01_ADDR, INITIAL_AMOUNT)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-02/Cargo.toml",
    "content": "[package]\nname = \"create-test-node-02\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"create_test_node_02\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncreate-test-node-shared = { path = \"../create-test-node-shared\" }\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-02/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nconst NODE_02_ADDR: &[u8; 64] = b\"4ee7ad9b21fd625481d0a94c618a15ab92503a7457e428a4dcd9dd6f100e979b\";\nconst INITIAL_AMOUNT: u64 = 1_000_000;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    create_test_node_shared::create_account(NODE_02_ADDR, INITIAL_AMOUNT)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-03/Cargo.toml",
    "content": "[package]\nname = \"create-test-node-03\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"create_test_node_03\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncreate-test-node-shared = { path = \"../create-test-node-shared\" }\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-03/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nconst NODE_03_ADDR: &[u8; 64] = b\"a3b2fd2971f2de5145d2342df38555ce97070a27ef7e74b63e08c482697308dd\";\nconst INITIAL_AMOUNT: u64 = 1_000_000;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    create_test_node_shared::create_account(NODE_03_ADDR, INITIAL_AMOUNT)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-shared/Cargo.toml",
    "content": "[package]\nname = \"create-test-node-shared\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[dependencies]\nbase16 = { version = \"0.2.1\", default-features = false }\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/SRE/create-test-node-shared/src/lib.rs",
    "content": "#![no_std]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, ApiError, TransferredTo, U512};\n\n#[repr(u16)]\nenum Error {\n    AccountAlreadyExists = 10,\n    TransferFailed = 11,\n    FailedToParseAccountHash = 12,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\nfn parse_account_hash(hex: &[u8]) -> AccountHash {\n    let mut buffer = [0u8; 32];\n    let bytes_written = base16::decode_slice(hex, &mut buffer)\n        .ok()\n        .unwrap_or_revert_with(Error::FailedToParseAccountHash);\n    if bytes_written != buffer.len() {\n        runtime::revert(Error::FailedToParseAccountHash)\n    }\n    AccountHash::new(buffer)\n}\n\npub fn create_account(account_addr: &[u8; 64], initial_amount: u64) {\n    let account_hash = parse_account_hash(account_addr);\n    let amount: U512 = U512::from(initial_amount);\n\n    match system::transfer_to_account(account_hash, amount, None)\n        .unwrap_or_revert_with(Error::TransferFailed)\n    {\n        TransferredTo::NewAccount => (),\n        TransferredTo::ExistingAccount => runtime::revert(Error::AccountAlreadyExists),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/admin/disable-contract/Cargo.toml",
    "content": "[package]\nname = \"disable-contract\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"disable_contract\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/admin/disable-contract/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{AddressableEntityHash, PackageHash};\n\nconst ARG_CONTRACT_PACKAGE_HASH: &str = \"contract_package_hash\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // This contract can be run only by an administrator account.\n    let contract_package_hash: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n    let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n    storage::disable_contract_version(contract_package_hash.into(), contract_hash.into())\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/admin/enable-contract/Cargo.toml",
    "content": "[package]\nname = \"enable-contract\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"enable_contract\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/admin/enable-contract/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{AddressableEntityHash, PackageHash};\n\nconst ARG_CONTRACT_PACKAGE_HASH: &str = \"contract_package_hash\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // This contract can be run only by an administrator account.\n    let contract_package_hash: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n    let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n    storage::enable_contract_version(contract_package_hash.into(), contract_hash.into())\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/bench/create-accounts/Cargo.toml",
    "content": "[package]\nname = \"create-accounts\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"create_accounts\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/bench/create-accounts/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, ApiError, U512};\n\nconst ARG_ACCOUNTS: &str = \"accounts\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let accounts: Vec<AccountHash> = runtime::get_named_arg(ARG_ACCOUNTS);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let seed_amount = amount / accounts.len();\n    for account_hash in accounts {\n        system::transfer_to_account(account_hash, seed_amount, None)\n            .unwrap_or_revert_with(ApiError::Transfer);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/bench/create-purses/Cargo.toml",
    "content": "[package]\nname = \"create-purses\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"create_purses\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/bench/create-purses/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::U512;\n\nconst ARG_TOTAL_PURSES: &str = \"total_purses\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let total_purses: u64 = runtime::get_named_arg(ARG_TOTAL_PURSES);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let seed_amount = amount / total_purses;\n\n    for i in 0..total_purses {\n        let new_purse = system::create_purse();\n        system::transfer_from_purse_to_purse(\n            account::get_main_purse(),\n            new_purse,\n            seed_amount,\n            None,\n        )\n        .unwrap_or_revert();\n\n        let name = format!(\"purse:{}\", i);\n        runtime::put_key(&name, new_purse.into());\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/bench/transfer-to-existing-account/Cargo.toml",
    "content": "[package]\nname = \"transfer-to-existing-account\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_to_existing_account\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/bench/transfer-to-existing-account/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, ApiError, TransferredTo, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[repr(u16)]\nenum Error {\n    TransferredToNewAccount = 0,\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account: AccountHash = runtime::get_named_arg(ARG_TARGET);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let result = system::transfer_to_account(account, amount, None).unwrap_or_revert();\n    match result {\n        TransferredTo::ExistingAccount => {\n            // This is the expected result, as all accounts have to be initialized beforehand\n        }\n        TransferredTo::NewAccount => {\n            runtime::revert(ApiError::User(Error::TransferredToNewAccount as u16))\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/bench/transfer-to-purse/Cargo.toml",
    "content": "[package]\nname = \"transfer-to-purse\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_to_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/bench/transfer-to-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{URef, U512};\n\nconst ARG_TARGET_PURSE: &str = \"target_purse\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let target_purse: URef = runtime::get_named_arg(ARG_TARGET_PURSE);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let source_purse = account::get_main_purse();\n\n    system::transfer_from_purse_to_purse(source_purse, target_purse, amount, None)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/activate-bid/Cargo.toml",
    "content": "[package]\nname = \"activate-bid\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"activate_bid\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/activate-bid/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{runtime_args, system::auction, PublicKey};\n\nconst ARG_VALIDATOR: &str = \"validator\";\n\nfn activate_bid(public_key: PublicKey) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_VALIDATOR => public_key,\n    };\n    runtime::call_contract::<()>(contract_hash, auction::METHOD_ACTIVATE_BID, args);\n}\n\n// Accepts a public key. Issues an activate-bid bid to the auction contract.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let public_key: PublicKey = runtime::get_named_arg(ARG_VALIDATOR);\n    activate_bid(public_key);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/add-bid/Cargo.toml",
    "content": "[package]\nname = \"add-bid\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"add_bid\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/add-bid/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::{\n    contract_api,\n    contract_api::{runtime, system},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    api_error, bytesrepr, bytesrepr::FromBytes, runtime_args, system::auction, ApiError, PublicKey,\n    U512,\n};\n\nfn get_named_arg_size(name: &str) -> Option<usize> {\n    let mut arg_size: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_named_arg_size(\n            name.as_bytes().as_ptr(),\n            name.len(),\n            &mut arg_size as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => Some(arg_size),\n        Err(ApiError::MissingArgument) => None,\n        Err(e) => runtime::revert(e),\n    }\n}\n\n// The optional here is literal and does not co-relate to an Option enum type.\n// If the argument has been provided it is accepted, and is then turned into a Some.\n// If the argument is not provided at all, then it is considered as None.\npub fn get_optional_named_args<T: FromBytes>(name: &str) -> Option<T> {\n    let arg_size = get_named_arg_size(name)?;\n    let arg_bytes = if arg_size > 0 {\n        let res = {\n            let data_non_null_ptr = contract_api::alloc_bytes(arg_size);\n            let ret = unsafe {\n                ext_ffi::casper_get_named_arg(\n                    name.as_bytes().as_ptr(),\n                    name.len(),\n                    data_non_null_ptr.as_ptr(),\n                    arg_size,\n                )\n            };\n            let data =\n                unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) };\n            api_error::result_from(ret).map(|_| data)\n        };\n        // Assumed to be safe as `get_named_arg_size` checks the argument already\n        res.unwrap_or_revert()\n    } else {\n        // Avoids allocation with 0 bytes and a call to get_named_arg\n        Vec::new()\n    };\n\n    bytesrepr::deserialize(arg_bytes).ok()\n}\n\nfn add_bid(\n    public_key: PublicKey,\n    bond_amount: U512,\n    delegation_rate: auction::DelegationRate,\n    minimum_delegation_amount: Option<u64>,\n    maximum_delegation_amount: Option<u64>,\n    reserved_slots: Option<u32>,\n) {\n    let contract_hash = system::get_auction();\n    let mut args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_AMOUNT => bond_amount,\n        auction::ARG_DELEGATION_RATE => delegation_rate,\n    };\n    // Optional arguments\n    if let Some(minimum_delegation_amount) = minimum_delegation_amount {\n        let _ = args.insert(\n            auction::ARG_MINIMUM_DELEGATION_AMOUNT,\n            minimum_delegation_amount,\n        );\n    }\n    if let Some(maximum_delegation_amount) = maximum_delegation_amount {\n        let _ = args.insert(\n            auction::ARG_MAXIMUM_DELEGATION_AMOUNT,\n            maximum_delegation_amount,\n        );\n    }\n    if let Some(reserved_slots) = reserved_slots {\n        let _ = args.insert(auction::ARG_RESERVED_SLOTS, reserved_slots);\n    }\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_ADD_BID, args);\n}\n\n// Bidding contract.\n//\n// Accepts a public key, amount and a delegation rate.\n// Issues an add bid request to the auction contract.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let public_key = runtime::get_named_arg(auction::ARG_PUBLIC_KEY);\n    let bond_amount = runtime::get_named_arg(auction::ARG_AMOUNT);\n    let delegation_rate = runtime::get_named_arg(auction::ARG_DELEGATION_RATE);\n\n    // Optional arguments\n    let minimum_delegation_amount = get_optional_named_args(auction::ARG_MINIMUM_DELEGATION_AMOUNT);\n    let maximum_delegation_amount = get_optional_named_args(auction::ARG_MAXIMUM_DELEGATION_AMOUNT);\n    let reserved_slots = get_optional_named_args(auction::ARG_RESERVED_SLOTS);\n\n    add_bid(\n        public_key,\n        bond_amount,\n        delegation_rate,\n        minimum_delegation_amount,\n        maximum_delegation_amount,\n        reserved_slots,\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/add-reservations/Cargo.toml",
    "content": "[package]\nname = \"add-reservations\"\nversion = \"0.1.0\"\nauthors = [\"Jacek Chmielewski <jchmielewski@teonite.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"add_reservations\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/add-reservations/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{\n    runtime_args,\n    system::auction::{self, Reservation},\n};\n\nfn add_reservations(reservations: Vec<Reservation>) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_RESERVATIONS => reservations,\n    };\n    runtime::call_contract::<()>(contract_hash, auction::METHOD_ADD_RESERVATIONS, args);\n}\n\n// Add delegators to validator's reserved list.\n//\n// Accepts reservations.\n// Issues an add_reservations request to the auction contract.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let reservations: Vec<Reservation> = runtime::get_named_arg(auction::ARG_RESERVATIONS);\n\n    add_reservations(reservations);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/burn/Cargo.toml",
    "content": "[package]\nname = \"burn\"\nversion = \"0.1.0\"\nauthors = [\"Igor Bunar <igor@casper.network>\", \"Jan Hoffmann <jan@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"burn\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/burn/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\nuse alloc::{string::String, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{account, alloc_bytes, runtime, system},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{api_error, bytesrepr, runtime_args, system::mint, ApiError, Key, URef, U512};\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\nfn burn(uref: URef, amount: U512) -> Result<(), mint::Error> {\n    let contract_hash = system::get_mint();\n    let args = runtime_args! {\n        mint::ARG_PURSE => uref,\n        mint::ARG_AMOUNT => amount,\n    };\n    runtime::call_contract(contract_hash, mint::METHOD_BURN, args)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let purse_uref = match get_named_arg_option::<String>(ARG_PURSE_NAME) {\n        Some(name) => {\n            // if a key was provided and there is no value under it we revert\n            // to prevent user from accidentaly burning tokens from the main purse\n            // eg. if they make a typo\n            let Some(Key::URef(purse_uref)) = runtime::get_key(&name) else {\n                runtime::revert(ApiError::InvalidPurseName)\n            };\n            purse_uref\n        }\n        None => account::get_main_purse(),\n    };\n    let amount: U512 = runtime::get_named_arg(mint::ARG_AMOUNT);\n\n    burn(purse_uref, amount).unwrap_or_revert();\n}\n\nfn get_named_arg_size(name: &str) -> Option<usize> {\n    let mut arg_size: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_named_arg_size(\n            name.as_bytes().as_ptr(),\n            name.len(),\n            &mut arg_size as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => Some(arg_size),\n        Err(ApiError::MissingArgument) => None,\n        Err(e) => runtime::revert(e),\n    }\n}\n\nfn get_named_arg_option<T: bytesrepr::FromBytes>(name: &str) -> Option<T> {\n    let arg_size = get_named_arg_size(name).unwrap_or_revert_with(ApiError::MissingArgument);\n    let arg_bytes = if arg_size > 0 {\n        let res = {\n            let data_non_null_ptr = alloc_bytes(arg_size);\n            let ret = unsafe {\n                ext_ffi::casper_get_named_arg(\n                    name.as_bytes().as_ptr(),\n                    name.len(),\n                    data_non_null_ptr.as_ptr(),\n                    arg_size,\n                )\n            };\n            let data =\n                unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) };\n            if ret != 0 {\n                return None;\n            }\n            data\n        };\n        res\n    } else {\n        // Avoids allocation with 0 bytes and a call to get_named_arg\n        Vec::new()\n    };\n\n    let deserialized_data =\n        bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument);\n    Some(deserialized_data)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/call-contract/Cargo.toml",
    "content": "[package]\nname = \"call-contract\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"call_contract\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/call-contract/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse core::mem::MaybeUninit;\n\nuse alloc::string::String;\nuse casper_contract::{contract_api::runtime, ext_ffi, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::{\n    api_error,\n    bytesrepr::{self, Bytes, ToBytes},\n    AddressableEntityHash, ApiError, RuntimeArgs,\n};\n\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\nconst ARG_ENTRYPOINT: &str = \"entrypoint\";\nconst ARG_ARGUMENTS: &str = \"arguments\";\n\n// Generic call contract contract.\n//\n// Accepts entrypoint name, and saves possible return value into URef stored in named keys.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n    let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT);\n    let arguments: RuntimeArgs = runtime::get_named_arg(ARG_ARGUMENTS);\n\n    let _result_bytes = call_contract_forward(entrypoint, contract_hash, arguments);\n}\n\nfn deserialize_contract_result(bytes_written: usize) -> Option<Bytes> {\n    if bytes_written == 0 {\n        // If no bytes were written, the host buffer hasn't been set and hence shouldn't be read.\n        None\n    } else {\n        // NOTE: this is a copy of the contents of `read_host_buffer()`.  Calling that directly from\n        // here causes several contracts to fail with a Wasmi `Unreachable` error.\n        let mut dest = vec![0; bytes_written];\n        let real_size = read_host_buffer_into(&mut dest).unwrap_or_revert();\n        assert_eq!(dest.len(), real_size);\n\n        let bytes: Bytes = bytesrepr::deserialize_from_slice(&dest[..real_size]).unwrap_or_revert();\n\n        Some(bytes)\n    }\n}\n\nfn read_host_buffer_into(dest: &mut [u8]) -> Result<usize, ApiError> {\n    let mut bytes_written = MaybeUninit::uninit();\n    let ret = unsafe {\n        ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr())\n    };\n    api_error::result_from(ret)?;\n    Ok(unsafe { bytes_written.assume_init() })\n}\n\n/// Calls a contract and returns unwrapped [`CLValue`].\nfn call_contract_forward(\n    entrypoint: String,\n    contract_hash: AddressableEntityHash,\n    arguments: RuntimeArgs,\n) -> Option<Bytes> {\n    let entry_point_name: &str = &entrypoint;\n    let contract_hash_ptr = contract_hash.to_bytes().unwrap_or_revert();\n    let entry_point_name = entry_point_name.to_bytes().unwrap_or_revert();\n    let runtime_args_ptr = arguments.to_bytes().unwrap_or_revert();\n    let bytes_written = {\n        let mut bytes_written = MaybeUninit::uninit();\n        let ret = unsafe {\n            ext_ffi::casper_call_contract(\n                contract_hash_ptr.as_ptr(),\n                contract_hash_ptr.len(),\n                entry_point_name.as_ptr(),\n                entry_point_name.len(),\n                runtime_args_ptr.as_ptr(),\n                runtime_args_ptr.len(),\n                bytes_written.as_mut_ptr(),\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        unsafe { bytes_written.assume_init() }\n    };\n    deserialize_contract_result(bytes_written)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/call-package-version-by-hash/Cargo.toml",
    "content": "[package]\nname = \"call-package-version-by-hash\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[[bin]]\nname = \"call_package_version_by_hash\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/call-package-version-by-hash/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse casper_contract::contract_api::runtime;\nuse casper_types::{contracts::ContractPackageHash, runtime_args};\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let package_hash: ContractPackageHash = runtime::get_named_arg(\"contract_package_hash\");\n    let entity_version: Option<u32> = runtime::get_named_arg(\"version\");\n    let major_version: Option<u32> = runtime::get_named_arg(\"major_version\");\n    let entry_point_name: String = runtime::get_named_arg(\"entry_point\");\n    let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME);\n\n    runtime::call_package_version(\n        package_hash,\n        major_version,\n        entity_version,\n        &entry_point_name,\n        runtime_args! {\n            ARG_PURSE_NAME => purse_name\n        },\n    )\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/cancel-reservations/Cargo.toml",
    "content": "[package]\nname = \"cancel-reservations\"\nversion = \"0.1.0\"\nauthors = [\"Jacek Chmielewski <jchmielewski@teonite.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"cancel_reservations\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/cancel-reservations/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{\n    runtime_args,\n    system::{auction, auction::DelegatorKind},\n    PublicKey,\n};\n\nfn cancel_reservations(validator: PublicKey, delegators: Vec<DelegatorKind>) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_DELEGATORS => delegators,\n    };\n    runtime::call_contract::<()>(contract_hash, auction::METHOD_CANCEL_RESERVATIONS, args);\n}\n\n// Remove delegators from validator's reserved list.\n//\n// Accepts delegators' and validator's public keys.\n// Issues a cancel_reservations request to the auction contract.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let delegators = runtime::get_named_arg(auction::ARG_DELEGATORS);\n    let validator = runtime::get_named_arg(auction::ARG_VALIDATOR);\n\n    cancel_reservations(validator, delegators);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/change_bid_public_key/Cargo.toml",
    "content": "[package]\nname = \"change_bid_public_key\"\nversion = \"0.1.0\"\nauthors = [\"Maciej Wójcik <mwojcik@teonite.com>\"]\nedition = \"2018\"\n\n[[bin]]\nname = \"change_bid_public_key\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/change_bid_public_key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{\n    runtime_args,\n    system::auction::{ARG_NEW_PUBLIC_KEY, ARG_PUBLIC_KEY, METHOD_CHANGE_BID_PUBLIC_KEY},\n    PublicKey,\n};\n\nfn change_bid_public_key(public_key: PublicKey, new_public_key: PublicKey) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        ARG_PUBLIC_KEY => public_key,\n        ARG_NEW_PUBLIC_KEY => new_public_key\n    };\n    runtime::call_contract::<()>(contract_hash, METHOD_CHANGE_BID_PUBLIC_KEY, args);\n}\n\n// Change validator bid public key.\n//\n// Accepts current bid's public key and new public key.\n// Updates existing validator bid and all related delegator bids with\n// the new public key.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY);\n    let new_public_key = runtime::get_named_arg(ARG_NEW_PUBLIC_KEY);\n    change_bid_public_key(public_key, new_public_key);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/delegate/Cargo.toml",
    "content": "[package]\nname = \"delegate\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"delegate\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/delegate/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{runtime_args, system::auction, PublicKey, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst ARG_VALIDATOR: &str = \"validator\";\nconst ARG_DELEGATOR: &str = \"delegator\";\n\nfn delegate(delegator: PublicKey, validator: PublicKey, amount: U512) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => delegator,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_DELEGATE, args);\n}\n\n// Delegate contract.\n//\n// Accepts a delegator's public key, validator's public key, amount and a delegation rate.\n// Issues an delegation request to the auction contract.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let delegator = runtime::get_named_arg(ARG_DELEGATOR);\n    let validator = runtime::get_named_arg(ARG_VALIDATOR);\n    let amount = runtime::get_named_arg(ARG_AMOUNT);\n\n    delegate(delegator, validator, amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/disable-contract-by-contract-hash/Cargo.toml",
    "content": "[package]\nname = \"disable-contract-by-contract-hash\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"disable_contract_by_contract_hash\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n\n"
  },
  {
    "path": "smart_contracts/contracts/client/disable-contract-by-contract-hash/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::contracts::{ContractHash, ContractPackageHash};\n\nconst ARG_CONTRACT_PACKAGE_HASH: &str = \"contract_package_hash\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // This contract can be run only by an administrator account.\n    let contract_package_hash: ContractPackageHash =\n        runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n    let contract_hash: ContractHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n    storage::disable_contract_version(contract_package_hash, contract_hash).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/named-purse-payment/Cargo.toml",
    "content": "[package]\nname = \"named-purse-payment\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"named_purse_payment\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/named-purse-payment/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{runtime_args, ApiError, RuntimeArgs, URef, U512};\n\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\nconst SET_REFUND_PURSE: &str = \"set_refund_purse\";\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PURSE: &str = \"purse\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\n/// This logic is intended to be used as SESSION PAYMENT LOGIC\n/// Alternate payment logic that allows payment from a purse other than the executing [Account]'s\n/// main purse. A `Key::Uref` to the source purse must already exist in the executing context's\n/// named keys under the name passed in as the `purse_name` argument.\n#[no_mangle]\npub extern \"C\" fn call() {\n    // source purse uref by name (from current context's named keys)\n    let purse_uref = {\n        let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME);\n        runtime::get_key(&purse_name)\n            .unwrap_or_revert_with(ApiError::InvalidPurseName)\n            .into_uref()\n            .unwrap_or_revert_with(ApiError::InvalidPurse)\n    };\n\n    // amount to transfer from named purse to payment purse\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    // handle payment contract\n    let handle_payment_hash = system::get_handle_payment();\n\n    // set refund purse to source purse\n    {\n        let contract_hash = handle_payment_hash;\n        let args = runtime_args! {\n            ARG_PURSE => purse_uref,\n        };\n        runtime::call_contract::<()>(contract_hash, SET_REFUND_PURSE, args);\n    }\n\n    // get payment purse for current execution\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_hash,\n        GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(purse_uref, payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/non-standard-payment/Cargo.toml",
    "content": "[package]\nname = \"non-standard-payment\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@gmail.com>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"non_standard_payment\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/non-standard-payment/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::{\n    contract_api::{self, account, runtime, system},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    api_error,\n    bytesrepr::{self, FromBytes},\n    ApiError, RuntimeArgs, URef, U512,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_SOURCE_UREF: &str = \"source\";\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\n\n/// This logic is intended to be used as SESSION PAYMENT LOGIC\n/// Alternate payment logic that allows payment from a purse other than the executing [Account]'s\n/// main purse. A `Key::Uref` to the source purse must already exist in the executing context's\n/// named keys under the name passed in as the `purse_name` argument.\n#[no_mangle]\npub extern \"C\" fn call() {\n    // source purse uref by name (from current context's named keys)\n    let purse_uref = {\n        match get_named_arg_if_exists(ARG_SOURCE_UREF) {\n            Some(purse_uref) => purse_uref,\n            None => account::get_main_purse(),\n        }\n    };\n\n    // handle payment contract\n    let handle_payment_contract_hash = system::get_handle_payment();\n\n    // get payment purse for current execution\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_contract_hash,\n        GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    // amount to transfer from named purse to payment purse\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(purse_uref, payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n\nfn get_named_arg_if_exists<T: FromBytes>(name: &str) -> Option<T> {\n    let arg_size = {\n        let mut arg_size: usize = 0;\n        let ret = unsafe {\n            ext_ffi::casper_get_named_arg_size(\n                name.as_bytes().as_ptr(),\n                name.len(),\n                &mut arg_size as *mut usize,\n            )\n        };\n        match api_error::result_from(ret) {\n            Ok(_) => Some(arg_size),\n            Err(ApiError::MissingArgument) => None,\n            Err(e) => runtime::revert(e),\n        }\n    }?;\n    let arg_bytes = if arg_size > 0 {\n        let res = {\n            let data_non_null_ptr = contract_api::alloc_bytes(arg_size);\n            let ret = unsafe {\n                ext_ffi::casper_get_named_arg(\n                    name.as_bytes().as_ptr(),\n                    name.len(),\n                    data_non_null_ptr.as_ptr(),\n                    arg_size,\n                )\n            };\n            let data =\n                unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) };\n            api_error::result_from(ret).map(|_| data)\n        };\n        // Assumed to be safe as `get_named_arg_size` checks the argument already\n        res.unwrap_or_revert()\n    } else {\n        // Avoids allocation with 0 bytes and a call to get_named_arg\n        Vec::new()\n    };\n    let value = bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument);\n    Some(value)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/redelegate/Cargo.toml",
    "content": "[package]\nname = \"redelegate\"\nversion = \"0.1.0\"\nauthors = [\"Karan Dhareshwar <karan@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"redelegate\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/redelegate/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{runtime_args, system::auction, PublicKey, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_DELEGATOR: &str = \"delegator\";\nconst ARG_VALIDATOR: &str = \"validator\";\nconst ARG_NEW_VALIDATOR: &str = \"new_validator\";\n\nfn redelegate(delegator: PublicKey, validator: PublicKey, amount: U512, new_validator: PublicKey) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => delegator,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n        auction::ARG_NEW_VALIDATOR => new_validator\n    };\n    let _amount: U512 = runtime::call_contract(contract_hash, auction::METHOD_REDELEGATE, args);\n}\n\n// Redelegate contract.\n//\n// Accepts a delegator's public key, validator's public key to be undelegated,\n// a new_validator's public key to redelegate to and an amount\n// to withdraw (of type `U512`).\n#[no_mangle]\npub extern \"C\" fn call() {\n    let delegator = runtime::get_named_arg(ARG_DELEGATOR);\n    let validator = runtime::get_named_arg(ARG_VALIDATOR);\n    let amount = runtime::get_named_arg(ARG_AMOUNT);\n    let new_validator = runtime::get_named_arg(ARG_NEW_VALIDATOR);\n    redelegate(delegator, validator, amount, new_validator);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/revert/Cargo.toml",
    "content": "[package]\nname = \"revert\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"revert\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/revert/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::ApiError;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    runtime::revert(ApiError::User(100))\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-account/Cargo.toml",
    "content": "[package]\nname = \"transfer-to-account\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_to_account\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-account/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    transfer_to_account::delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-account/src/lib.rs",
    "content": "#![no_std]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n/// Executes mote transfer to supplied account hash.\n/// Transfers the requested amount.\npub fn delegate() {\n    let account_hash: AccountHash = runtime::get_named_arg(ARG_TARGET);\n    let transfer_amount: u64 = runtime::get_named_arg(ARG_AMOUNT);\n    let u512_motes = U512::from(transfer_amount);\n    system::transfer_to_account(account_hash, u512_motes, None).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-account-u512/Cargo.toml",
    "content": "[package]\nname = \"transfer-to-account-u512\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_to_account_u512\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-account-u512/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    transfer_to_account_u512::delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-account-u512/src/lib.rs",
    "content": "#![no_std]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n/// Executes mote transfer to supplied account hash.\n/// Transfers the requested amount.\n#[no_mangle]\npub fn delegate() {\n    let account_hash: AccountHash = runtime::get_named_arg(ARG_TARGET);\n    let transfer_amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    system::transfer_to_account(account_hash, transfer_amount, None).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-named-purse/Cargo.toml",
    "content": "[package]\nname = \"transfer-to-named-purse\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_to_named_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-named-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, Key, U512};\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME);\n\n    let purse_uref = match runtime::get_key(&purse_name) {\n        Some(Key::URef(uref)) => uref,\n        Some(_) => {\n            // Found a key but it is not a purse\n            runtime::revert(ApiError::UnexpectedKeyVariant);\n        }\n        None => {\n            // Creates new named purse\n            let new_purse = system::create_purse();\n            runtime::put_key(&purse_name, new_purse.into());\n            new_purse\n        }\n    };\n\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let source_purse = account::get_main_purse();\n\n    if !amount.is_zero() {\n        system::transfer_from_purse_to_purse(source_purse, purse_uref, amount, None)\n            .unwrap_or_revert();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-public-key/Cargo.toml",
    "content": "[package]\nname = \"transfer-to-public-key\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_to_public_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/transfer-to-public-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{PublicKey, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n/// Executes mote transfer to supplied account hash.\n/// Transfers the requested amount.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account_hash: PublicKey = runtime::get_named_arg(ARG_TARGET);\n    let transfer_amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    system::transfer_to_public_key(account_hash, transfer_amount, None).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/undelegate/Cargo.toml",
    "content": "[package]\nname = \"undelegate\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"undelegate\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/undelegate/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{runtime_args, system::auction, PublicKey, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_DELEGATOR: &str = \"delegator\";\nconst ARG_VALIDATOR: &str = \"validator\";\n\nfn undelegate(delegator: PublicKey, validator: PublicKey, amount: U512) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => delegator,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n    };\n    let _amount: U512 = runtime::call_contract(contract_hash, auction::METHOD_UNDELEGATE, args);\n}\n\n// Undelegate contract.\n//\n// Accepts a delegator's public key, validator's public key to be undelegated, and an amount\n// to withdraw (of type `U512`).\n#[no_mangle]\npub extern \"C\" fn call() {\n    let delegator = runtime::get_named_arg(ARG_DELEGATOR);\n    let validator = runtime::get_named_arg(ARG_VALIDATOR);\n    let amount = runtime::get_named_arg(ARG_AMOUNT);\n    undelegate(delegator, validator, amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/client/withdraw-bid/Cargo.toml",
    "content": "[package]\nname = \"withdraw-bid\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"withdraw_bid\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/client/withdraw-bid/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{runtime_args, system::auction, PublicKey, U512};\n\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nfn withdraw_bid(public_key: PublicKey, unbond_amount: U512) -> U512 {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_AMOUNT => unbond_amount,\n        auction::ARG_PUBLIC_KEY => public_key,\n    };\n    runtime::call_contract(contract_hash, auction::METHOD_WITHDRAW_BID, args)\n}\n\n// Withdraw bid contract.\n//\n// Accepts a public key to be removed, and an amount to withdraw (of type `U512`).\n// Saves the withdrawn funds in the account's context to keep track of the funds.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY);\n    let amount = runtime::get_named_arg(ARG_AMOUNT);\n    withdraw_bid(public_key, amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/explorer/faucet/Cargo.toml",
    "content": "[package]\nname = \"faucet\"\nversion = \"0.1.0\"\nauthors = [\"Mateusz Górski <gorski.mateusz@protonmail.ch>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"faucet\"\npath = \"src/bin/main.rs\"\ndoctest = false\ntest = false\nbench = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/explorer/faucet/README.md",
    "content": "# Faucet Contract\n\nThe Faucet is a contract that is designed to allow users to create a new account or to allow existing users to fund their account easily.\n\n## Install Session\nThe install session is responsible for creating the faucet's stored contract package and setting up a few named keys in the account used to perform the install. The installer performs the following actions.\n\n1. Takes the `id` parameter and uses it to keep track of the faucet being set up. As an example, if the operator installing the faucet passes `1337` as the `id` named argument to the installer session, it will create a named key called `faucet_1337` for the account used to call the install session.\n1. Calls the stored faucet's `init` entry point to initialize a new purse that will be used to fund the faucet. This purse is stored under the named keys of the account used to install the faucet. The `init` entrypoint also initializes the faucet's state and writes to its named keys.\n1. Funds the faucet's purse with the amount of motes declared in the `amount` runtime argument.\n\n\n## Set Variables\n\n> NOTE: Before the faucet can be called, the `set_variables` entrypoint must be called.\n>\nThis is a list of the required runtime arguments for calling `set_variables`\n* `available_amount: U512` - The total amount available for distribution each interval.\n* `distributions_per_interval: u64` - The maximum number of distributions to be made each interval.\n* `time_interval: u64` - The amount of time in milliseconds that must pass before the available amount is replenished.\n\n\nYou can adjust the faucet's distribution rate by modifying the variables. If the faucet has distributed a total of `available_amount` in one `time_interval`, then no more token will be available to distribute until `last_distribution_at + time_interval < blocktime`. However, the installer of this contract is not rate limited and may continue to distribute funds from the faucet freely.\n\n## Calling the Faucet\n\nThe faucet will calculate a distribution amount as a ratio of the available amount per interval to the max distribution amount per interval. As an example, if the installer sets the available amount per interval to `100_000_000` and the max distributions per interval to `2`. When an existing user calls the faucet, `50_000_000` motes will be distributed to the caller. If a second user calls the faucet, they will also receive `50_000_000` motes. The remaining amount will now be `0` tokens. If a third user calls the faucet then they will not receive any token.\nAfter an interval passes after then last user was funded, the available amount will be replenished.\n\n`distributions_per_interval`, `available_amount`, `time_interval` and `max_distributions_per_interval`\nmust be set and must be a number greater than `0` for the contract to run properly.\nIf you try to invoke the contract before these variables are set, then you'll get an error.\n\n### Costs by Entry Point\n\n| feature                  | cost              |\n|--------------------------|-------------------|\n| faucet install           | `1492_30_872_143` |\n| faucet set variables     | `79_455_975`      |\n| faucet call by installer | `265_26_265_33`   |\n| faucet call by user      | `2_558_318_531`   |"
  },
  {
    "path": "smart_contracts/contracts/explorer/faucet/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    faucet::delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/explorer/faucet/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::{vec, vec::Vec};\nuse core::mem::MaybeUninit;\n\nuse casper_contract::{\n    contract_api::{self, runtime, storage, system},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash,\n    api_error,\n    bytesrepr::{self, FromBytes, ToBytes},\n    ApiError, BlockTime, CLTyped, Key, PublicKey, URef, U512,\n};\n\npub const ARG_AMOUNT: &str = \"amount\";\npub const ARG_TARGET: &str = \"target\";\npub const ARG_ID: &str = \"id\";\npub const ARG_TIME_INTERVAL: &str = \"time_interval\";\npub const ARG_AVAILABLE_AMOUNT: &str = \"available_amount\";\npub const ARG_DISTRIBUTIONS_PER_INTERVAL: &str = \"distributions_per_interval\";\npub const REMAINING_REQUESTS: &str = \"remaining_requests\";\npub const AVAILABLE_AMOUNT: &str = \"available_amount\";\npub const TIME_INTERVAL: &str = \"time_interval\";\npub const DISTRIBUTIONS_PER_INTERVAL: &str = \"distributions_per_interval\";\npub const LAST_DISTRIBUTION_TIME: &str = \"last_distribution_time\";\npub const FAUCET_PURSE: &str = \"faucet_purse\";\npub const INSTALLER: &str = \"installer\";\npub const TWO_HOURS_AS_MILLIS: u64 = 7_200_000;\npub const CONTRACT_NAME: &str = \"faucet\";\npub const HASH_KEY_NAME: &str = \"faucet_package\";\npub const ACCESS_KEY_NAME: &str = \"faucet_package_access\";\npub const CONTRACT_VERSION: &str = \"faucet_contract_version\";\npub const AUTHORIZED_ACCOUNT: &str = \"authorized_account\";\n\npub const ENTRY_POINT_FAUCET: &str = \"call_faucet\";\npub const ENTRY_POINT_INIT: &str = \"init\";\npub const ENTRY_POINT_SET_VARIABLES: &str = \"set_variables\";\npub const ENTRY_POINT_AUTHORIZE_TO: &str = \"authorize_to\";\n\n#[repr(u16)]\nenum FaucetError {\n    InvalidAccount = 1,\n    MissingInstaller = 2,\n    InvalidInstaller = 3,\n    InstallerDoesNotFundItself = 4,\n    MissingDistributionTime = 5,\n    InvalidDistributionTime = 6,\n    MissingAvailableAmount = 7,\n    InvalidAvailableAmount = 8,\n    MissingTimeInterval = 9,\n    InvalidTimeInterval = 10,\n    MissingId = 11,\n    InvalidId = 12,\n    FailedToTransfer = 13,\n    FailedToGetArgBytes = 14,\n    MissingFaucetPurse = 15,\n    InvalidFaucetPurse = 16,\n    MissingRemainingRequests = 17,\n    InvalidRemainingRequests = 18,\n    MissingDistributionsPerInterval = 19,\n    InvalidDistributionsPerInterval = 20,\n    UnexpectedKeyVariant = 21,\n    MissingAuthorizedAccount = 22,\n    InvalidAuthorizedAccount = 23,\n    AuthorizedAccountDoesNotFundInstaller = 24,\n    FaucetCallByUserWithAuthorizedAccountSet = 25,\n}\n\nimpl From<FaucetError> for ApiError {\n    fn from(e: FaucetError) -> Self {\n        ApiError::User(e as u16)\n    }\n}\n\n#[no_mangle]\npub fn set_variables() {\n    let installer = get_account_hash_with_user_errors(\n        INSTALLER,\n        FaucetError::MissingInstaller,\n        FaucetError::InvalidInstaller,\n    );\n\n    if installer != runtime::get_caller() {\n        runtime::revert(FaucetError::InvalidAccount);\n    }\n\n    if let Some(new_time_interval) = get_optional_named_arg_with_user_errors::<u64>(\n        ARG_TIME_INTERVAL,\n        FaucetError::MissingTimeInterval,\n        FaucetError::InvalidTimeInterval,\n    ) {\n        let time_interval_uref = get_uref_with_user_errors(\n            TIME_INTERVAL,\n            FaucetError::MissingTimeInterval,\n            FaucetError::InvalidTimeInterval,\n        );\n        storage::write(time_interval_uref, new_time_interval);\n    }\n\n    if let Some(new_available_amount) = get_optional_named_arg_with_user_errors::<U512>(\n        ARG_AVAILABLE_AMOUNT,\n        FaucetError::MissingAvailableAmount,\n        FaucetError::InvalidAvailableAmount,\n    ) {\n        let available_amount_uref = get_uref_with_user_errors(\n            AVAILABLE_AMOUNT,\n            FaucetError::MissingAvailableAmount,\n            FaucetError::InvalidAvailableAmount,\n        );\n        storage::write(available_amount_uref, new_available_amount);\n    }\n\n    if let Some(new_distributions_per_interval) = get_optional_named_arg_with_user_errors::<u64>(\n        ARG_DISTRIBUTIONS_PER_INTERVAL,\n        FaucetError::MissingDistributionsPerInterval,\n        FaucetError::InvalidDistributionsPerInterval,\n    ) {\n        let distributions_per_interval_uref = get_uref_with_user_errors(\n            DISTRIBUTIONS_PER_INTERVAL,\n            FaucetError::MissingDistributionsPerInterval,\n            FaucetError::InvalidDistributionsPerInterval,\n        );\n        let remaining_requests_uref = get_uref_with_user_errors(\n            REMAINING_REQUESTS,\n            FaucetError::MissingRemainingRequests,\n            FaucetError::InvalidRemainingRequests,\n        );\n\n        storage::write(\n            distributions_per_interval_uref,\n            new_distributions_per_interval,\n        );\n        // remaining requests == distributions per interval.\n        storage::write(\n            remaining_requests_uref,\n            U512::from(new_distributions_per_interval),\n        );\n    }\n}\n\n#[no_mangle]\npub fn authorize_to() {\n    let installer = get_account_hash_with_user_errors(\n        INSTALLER,\n        FaucetError::MissingInstaller,\n        FaucetError::InvalidInstaller,\n    );\n\n    if runtime::get_caller() != installer {\n        runtime::revert(FaucetError::InvalidAccount);\n    }\n\n    let authorized_account_public_key = get_optional_named_arg_with_user_errors::<PublicKey>(\n        ARG_TARGET,\n        FaucetError::MissingAuthorizedAccount,\n        FaucetError::InvalidAuthorizedAccount,\n    );\n\n    let authorized_account_uref = get_uref_with_user_errors(\n        AUTHORIZED_ACCOUNT,\n        FaucetError::MissingAuthorizedAccount,\n        FaucetError::InvalidAuthorizedAccount,\n    );\n\n    storage::write(authorized_account_uref, authorized_account_public_key);\n}\n\n#[no_mangle]\npub fn delegate() {\n    let id = get_optional_named_arg_with_user_errors(\n        ARG_ID,\n        FaucetError::MissingId,\n        FaucetError::InvalidId,\n    );\n\n    let caller = runtime::get_caller();\n    let installer = get_account_hash_with_user_errors(\n        INSTALLER,\n        FaucetError::MissingInstaller,\n        FaucetError::InvalidInstaller,\n    );\n\n    let authorized_account_uref = get_uref_with_user_errors(\n        AUTHORIZED_ACCOUNT,\n        FaucetError::MissingAuthorizedAccount,\n        FaucetError::InvalidAuthorizedAccount,\n    );\n\n    let maybe_authorized_account_public_key: Option<PublicKey> = read_with_user_errors(\n        authorized_account_uref,\n        FaucetError::MissingAuthorizedAccount,\n        FaucetError::InvalidAuthorizedAccount,\n    );\n\n    let maybe_authorized_account =\n        maybe_authorized_account_public_key.map(|pk| pk.to_account_hash());\n\n    let last_distribution_time_uref = get_uref_with_user_errors(\n        LAST_DISTRIBUTION_TIME,\n        FaucetError::MissingDistributionTime,\n        FaucetError::InvalidDistributionTime,\n    );\n\n    let last_distribution_time: u64 = read_with_user_errors(\n        last_distribution_time_uref,\n        FaucetError::MissingDistributionTime,\n        FaucetError::InvalidDistributionTime,\n    );\n\n    let time_interval_uref = get_uref_with_user_errors(\n        TIME_INTERVAL,\n        FaucetError::MissingTimeInterval,\n        FaucetError::InvalidTimeInterval,\n    );\n\n    let time_interval: u64 = read_with_user_errors(\n        time_interval_uref,\n        FaucetError::MissingTimeInterval,\n        FaucetError::InvalidTimeInterval,\n    );\n\n    let blocktime = runtime::get_blocktime();\n\n    if blocktime > BlockTime::new(last_distribution_time + time_interval) {\n        reset_remaining_requests();\n        set_last_distribution_time(blocktime);\n    }\n\n    if caller == installer {\n        let target: AccountHash = runtime::get_named_arg(ARG_TARGET);\n        // the authorized caller or the installer may pass an explicit amount.\n        // if they do not, the faucet can calculate an amount for them.\n        let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n        if target == installer {\n            runtime::revert(FaucetError::InstallerDoesNotFundItself);\n        }\n\n        transfer(target, amount, id);\n    } else if let Some(authorized_account) = maybe_authorized_account {\n        if caller == authorized_account {\n            let target: AccountHash = runtime::get_named_arg(ARG_TARGET);\n            let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n            if target == installer {\n                runtime::revert(FaucetError::AuthorizedAccountDoesNotFundInstaller);\n            }\n\n            transfer(target, amount, id);\n        } else {\n            runtime::revert(FaucetError::FaucetCallByUserWithAuthorizedAccountSet);\n        }\n    } else {\n        let amount = get_distribution_amount_rate_limited();\n\n        transfer(caller, amount, id);\n        decrease_remaining_requests();\n    }\n}\n\nfn transfer(target: AccountHash, amount: U512, id: Option<u64>) {\n    let faucet_purse = get_uref_with_user_errors(\n        FAUCET_PURSE,\n        FaucetError::MissingFaucetPurse,\n        FaucetError::InvalidFaucetPurse,\n    );\n\n    system::transfer_from_purse_to_account(faucet_purse, target, amount, id)\n        .unwrap_or_revert_with(FaucetError::FailedToTransfer);\n}\n\nfn get_distribution_amount_rate_limited() -> U512 {\n    let distributions_per_interval_uref = get_uref_with_user_errors(\n        DISTRIBUTIONS_PER_INTERVAL,\n        FaucetError::MissingDistributionsPerInterval,\n        FaucetError::InvalidDistributionsPerInterval,\n    );\n\n    let distributions_per_interval: u64 = read_with_user_errors(\n        distributions_per_interval_uref,\n        FaucetError::MissingDistributionsPerInterval,\n        FaucetError::InvalidDistributionsPerInterval,\n    );\n\n    if distributions_per_interval == 0 {\n        return U512::zero();\n    }\n\n    let available_amount_uref = get_uref_with_user_errors(\n        AVAILABLE_AMOUNT,\n        FaucetError::MissingAvailableAmount,\n        FaucetError::InvalidAvailableAmount,\n    );\n\n    let available_amount: U512 = read_with_user_errors(\n        available_amount_uref,\n        FaucetError::MissingAvailableAmount,\n        FaucetError::InvalidAvailableAmount,\n    );\n\n    if available_amount.is_zero() {\n        return available_amount;\n    }\n\n    let remaining_requests_uref = get_uref_with_user_errors(\n        REMAINING_REQUESTS,\n        FaucetError::MissingRemainingRequests,\n        FaucetError::InvalidRemainingRequests,\n    );\n\n    let remaining_requests: U512 = read_with_user_errors(\n        remaining_requests_uref,\n        FaucetError::MissingRemainingRequests,\n        FaucetError::InvalidRemainingRequests,\n    );\n\n    if remaining_requests.is_zero() {\n        return remaining_requests;\n    }\n\n    available_amount / U512::from(distributions_per_interval)\n}\n\nfn reset_remaining_requests() {\n    let distributions_per_interval_uref = get_uref_with_user_errors(\n        DISTRIBUTIONS_PER_INTERVAL,\n        FaucetError::MissingDistributionsPerInterval,\n        FaucetError::InvalidDistributionsPerInterval,\n    );\n\n    let distributions_per_interval: u64 = read_with_user_errors(\n        distributions_per_interval_uref,\n        FaucetError::MissingDistributionsPerInterval,\n        FaucetError::InvalidDistributionsPerInterval,\n    );\n\n    let remaining_requests_uref = get_uref_with_user_errors(\n        REMAINING_REQUESTS,\n        FaucetError::MissingRemainingRequests,\n        FaucetError::InvalidRemainingRequests,\n    );\n\n    storage::write(\n        remaining_requests_uref,\n        U512::from(distributions_per_interval),\n    );\n}\n\nfn decrease_remaining_requests() -> U512 {\n    let remaining_requests_uref = get_uref_with_user_errors(\n        REMAINING_REQUESTS,\n        FaucetError::MissingRemainingRequests,\n        FaucetError::InvalidRemainingRequests,\n    );\n\n    let remaining_requests: U512 = read_with_user_errors(\n        remaining_requests_uref,\n        FaucetError::MissingRemainingRequests,\n        FaucetError::InvalidRemainingRequests,\n    );\n\n    let new_remaining_requests = remaining_requests.saturating_sub(1.into());\n    storage::write(remaining_requests_uref, new_remaining_requests);\n\n    new_remaining_requests\n}\n\nfn set_last_distribution_time(t: BlockTime) {\n    let last_distribution_time_uref = get_uref_with_user_errors(\n        LAST_DISTRIBUTION_TIME,\n        FaucetError::MissingDistributionTime,\n        FaucetError::InvalidDistributionTime,\n    );\n\n    storage::write::<u64>(last_distribution_time_uref, t.into());\n}\n\nfn get_named_arg_size(name: &str) -> Option<usize> {\n    let mut arg_size: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_named_arg_size(\n            name.as_bytes().as_ptr(),\n            name.len(),\n            &mut arg_size as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => Some(arg_size),\n        Err(ApiError::MissingArgument) => None,\n        Err(e) => runtime::revert(e),\n    }\n}\n\nfn get_optional_named_arg_with_user_errors<T: FromBytes>(\n    name: &str,\n    missing: FaucetError,\n    invalid: FaucetError,\n) -> Option<T> {\n    match get_named_arg_with_user_errors(name, missing, invalid) {\n        Ok(val) => val,\n        Err(err @ FaucetError::InvalidId) => runtime::revert(err),\n        Err(_) => None,\n    }\n}\n\nfn get_named_arg_with_user_errors<T: FromBytes>(\n    name: &str,\n    missing: FaucetError,\n    invalid: FaucetError,\n) -> Result<T, FaucetError> {\n    let arg_size = get_named_arg_size(name).ok_or(missing)?;\n    let arg_bytes = if arg_size > 0 {\n        let res = {\n            let data_non_null_ptr = contract_api::alloc_bytes(arg_size);\n            let ret = unsafe {\n                ext_ffi::casper_get_named_arg(\n                    name.as_bytes().as_ptr(),\n                    name.len(),\n                    data_non_null_ptr.as_ptr(),\n                    arg_size,\n                )\n            };\n            let data =\n                unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) };\n            api_error::result_from(ret).map(|_| data)\n        };\n        // Assumed to be safe as `get_named_arg_size` checks the argument already\n        res.unwrap_or_revert_with(FaucetError::FailedToGetArgBytes)\n    } else {\n        // Avoids allocation with 0 bytes and a call to get_named_arg\n        Vec::new()\n    };\n\n    bytesrepr::deserialize(arg_bytes).map_err(|_| invalid)\n}\n\nfn get_account_hash_with_user_errors(\n    name: &str,\n    missing: FaucetError,\n    invalid: FaucetError,\n) -> AccountHash {\n    let key = get_key_with_user_errors(name, missing, invalid);\n    key.into_account()\n        .unwrap_or_revert_with(FaucetError::UnexpectedKeyVariant)\n}\n\nfn get_uref_with_user_errors(name: &str, missing: FaucetError, invalid: FaucetError) -> URef {\n    let key = get_key_with_user_errors(name, missing, invalid);\n    key.into_uref()\n        .unwrap_or_revert_with(FaucetError::UnexpectedKeyVariant)\n}\n\nfn get_key_with_user_errors(name: &str, missing: FaucetError, invalid: FaucetError) -> Key {\n    let (name_ptr, name_size, _bytes) = to_ptr(name);\n    let mut key_bytes = vec![0u8; Key::max_serialized_length()];\n    let mut total_bytes: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_key(\n            name_ptr,\n            name_size,\n            key_bytes.as_mut_ptr(),\n            key_bytes.len(),\n            &mut total_bytes as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => {}\n        Err(ApiError::MissingKey) => runtime::revert(missing),\n        Err(e) => runtime::revert(e),\n    }\n    key_bytes.truncate(total_bytes);\n\n    bytesrepr::deserialize(key_bytes).unwrap_or_revert_with(invalid)\n}\n\nfn read_with_user_errors<T: CLTyped + FromBytes>(\n    uref: URef,\n    missing: FaucetError,\n    invalid: FaucetError,\n) -> T {\n    let key: Key = uref.into();\n    let (key_ptr, key_size, _bytes) = to_ptr(key);\n\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe { ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr()) };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { value_size.assume_init() },\n            Err(ApiError::ValueNotFound) => runtime::revert(missing),\n            Err(e) => runtime::revert(e),\n        }\n    };\n\n    let value_bytes = read_host_buffer(value_size).unwrap_or_revert();\n\n    bytesrepr::deserialize(value_bytes).unwrap_or_revert_with(invalid)\n}\n\nfn read_host_buffer_into(dest: &mut [u8]) -> Result<usize, ApiError> {\n    let mut bytes_written = MaybeUninit::uninit();\n    let ret = unsafe {\n        ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr())\n    };\n    // NOTE: When rewriting below expression as `result_from(ret).map(|_| unsafe { ... })`, and the\n    // caller ignores the return value, execution of the contract becomes unstable and ultimately\n    // leads to `Unreachable` error.\n    api_error::result_from(ret)?;\n    Ok(unsafe { bytes_written.assume_init() })\n}\n\nfn read_host_buffer(size: usize) -> Result<Vec<u8>, ApiError> {\n    let mut dest: Vec<u8> = if size == 0 {\n        Vec::new()\n    } else {\n        let bytes_non_null_ptr = contract_api::alloc_bytes(size);\n        unsafe { Vec::from_raw_parts(bytes_non_null_ptr.as_ptr(), size, size) }\n    };\n    read_host_buffer_into(&mut dest)?;\n    Ok(dest)\n}\n\nfn to_ptr<T: ToBytes>(t: T) -> (*const u8, usize, Vec<u8>) {\n    let bytes = t.into_bytes().unwrap_or_revert();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size, bytes)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/explorer/faucet-stored/Cargo.toml",
    "content": "[package]\nname = \"faucet-stored\"\nversion = \"0.1.0\"\nauthors = [\"Mateusz Górski <gorski.mateusz@protonmail.ch>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"faucet_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\nfaucet = { path = \"../faucet\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/explorer/faucet-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{boxed::Box, format, string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    contracts::NamedKeys, AddressableEntityHash, ApiError, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, PublicKey,\n    URef, U512,\n};\n\n#[repr(u16)]\nenum InstallerSessionError {\n    FailedToTransfer = 101,\n}\n\n#[no_mangle]\npub extern \"C\" fn call_faucet() {\n    faucet::delegate();\n}\n\nfn build_named_keys_and_purse() -> (NamedKeys, URef) {\n    let mut named_keys = NamedKeys::new();\n    let purse = system::create_purse();\n\n    // This session constructs a NamedKeys struct and later passes it to the\n    // storage::new_contract() function. This is simpler and more efficient than creating a custom\n    // \"init\" entry point for the stored contract in this case but it is not the best approach for\n    // every case. If you need to use the values that are stored under a new contract's named keys,\n    // you may store them under the named keys of the account that was used to deploy the session.\n    // However, this is not the best solution for every case, and there is another option.\n    //\n    // A custom \"init\" entrypoint would be useful for setting each required\n    // named key for the contract, but can only be called after the contract has been created\n    // using storage::new_contract(). Other entry points in the stored contract may require\n    // extra logic to check for the presence of, load and validate data stored under named keys.\n    // This would be useful in cases where a stored contract is creating another contract using\n    // storage::new_contract(), especially if values computed for initializing the new contract are\n    // also needed by the contract doing the initializing.\n    named_keys.insert(faucet::FAUCET_PURSE.to_string(), purse.into());\n\n    named_keys.insert(faucet::INSTALLER.to_string(), runtime::get_caller().into());\n    named_keys.insert(\n        faucet::TIME_INTERVAL.to_string(),\n        storage::new_uref(faucet::TWO_HOURS_AS_MILLIS).into(),\n    );\n    named_keys.insert(\n        faucet::LAST_DISTRIBUTION_TIME.to_string(),\n        storage::new_uref(0u64).into(),\n    );\n    named_keys.insert(\n        faucet::AVAILABLE_AMOUNT.to_string(),\n        storage::new_uref(U512::zero()).into(),\n    );\n    named_keys.insert(\n        faucet::REMAINING_REQUESTS.to_string(),\n        storage::new_uref(U512::zero()).into(),\n    );\n    named_keys.insert(\n        faucet::DISTRIBUTIONS_PER_INTERVAL.to_string(),\n        storage::new_uref(0u64).into(),\n    );\n\n    // The AUTHORIZED_ACCOUNT named key holds an optional public key. If the public key is set,\n    // the account referenced by this public key will be granted a special privilege as the only\n    // authorized caller of the faucet's ENTRY_POINT_FAUCET.\n    // Only the authorized account will be able to issue token distributions from the faucet. The\n    // authorized account should call the faucet in the same way that the installer would,\n    // passing faucet::ARG_AMOUNT, faucet::ARG_TARGET and faucet::ARG_ID runtime arguments.\n    //\n    // The AUTHORIZED_ACCOUNT and faucet installer account have different responsibilities. While\n    // both of them may issue token using the ENTRY_POINT_FAUCET, only the faucet installer may\n    // configure the contract through the ENTRY_POINT_SET_VARIABLES, and only the faucet installer\n    // may set an authorized account through the ENTRY_POINT_AUTHORIZE_TO. The AUTHORIZED_ACCOUNT's\n    // responsibility would be to determine to whom and what amount of token should be issued\n    // through the faucet contract.\n    //\n    // While the AUTHORIZED_ACCOUNT named key is set to None::<PublicKey>, the ENTRY_POINT_FAUCET\n    // will be publicly accessible and users may call ENTRY_POINT_FAUCET without a\n    // faucet::ARG_TARGET or faucet::ARG_AMOUNT. The contract will automatically issue them a\n    // computed amount of token.\n    //\n    // This enables the faucet contract to support a wider range of use cases, where in some cases\n    // the faucet installer does not want the ENTRY_POINT_FAUCET to be called directly by users for\n    // security reasons. Another case would be where this contract is deployed to a private Casper\n    // Network where all users are trusted to use the faucet to issue themselves token\n    // distributions responsibly.\n    named_keys.insert(\n        faucet::AUTHORIZED_ACCOUNT.to_string(),\n        storage::new_uref(None::<PublicKey>).into(),\n    );\n\n    (named_keys, purse)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let id: u64 = runtime::get_named_arg(faucet::ARG_ID);\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let faucet = EntityEntryPoint::new(\n            faucet::ENTRY_POINT_FAUCET,\n            vec![\n                Parameter::new(faucet::ARG_ID, CLType::Option(Box::new(CLType::U64))),\n                Parameter::new(faucet::ARG_TARGET, CLType::PublicKey),\n                Parameter::new(faucet::ARG_AMOUNT, CLType::U512),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        let set_variables = EntityEntryPoint::new(\n            faucet::ENTRY_POINT_SET_VARIABLES,\n            vec![\n                Parameter::new(\n                    faucet::ARG_AVAILABLE_AMOUNT,\n                    CLType::Option(Box::new(CLType::U512)),\n                ),\n                Parameter::new(\n                    faucet::ARG_TIME_INTERVAL,\n                    CLType::Option(Box::new(CLType::U64)),\n                ),\n                Parameter::new(\n                    faucet::ARG_DISTRIBUTIONS_PER_INTERVAL,\n                    CLType::Option(Box::new(CLType::U64)),\n                ),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        let authorize_to = EntityEntryPoint::new(\n            faucet::ENTRY_POINT_AUTHORIZE_TO,\n            vec![Parameter::new(\n                faucet::ARG_TARGET,\n                CLType::Option(Box::new(CLType::PublicKey)),\n            )],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(faucet);\n        entry_points.add_entry_point(set_variables);\n        entry_points.add_entry_point(authorize_to);\n\n        entry_points\n    };\n\n    // The installer will create the faucet purse and give it to the newly installed\n    // contract so that the installing account and the newly installed contract will\n    // have a handle on it via the shared purse URef.\n    //\n    // The faucet named keys include the faucet purse, these are the named keys that we pass to the\n    // faucet.\n    let (faucet_named_keys, faucet_purse) = build_named_keys_and_purse();\n\n    // This is where the contract package is created and the first version of the faucet contract is\n    // installed within it. The contract package hash for the created contract package will be\n    // stored in the installing account's named keys under the faucet::PACKAGE_HASH_KEY_NAME, this\n    // allows later usage via the installing account to easily refer to and access the contract\n    // package and thus all versions stored in it.\n    //\n    // The access URef for the contract package will also be stored in the installing account's\n    // named keys under faucet::ACCESS_KEY_NAME; this URef controls administrative access to the\n    // contract package which includes the ability to install new versions of the contract\n    // logic, administer group-based security (if any), and so on.\n    //\n    // The installing account may decide to grant this access uref to another account (not\n    // demonstrated here), which would allow that account equivalent full administrative control\n    // over the contract. This should only be done intentionally because it is not revocable.\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(faucet_named_keys),\n        Some(format!(\"{}_{}\", faucet::HASH_KEY_NAME, id)),\n        Some(format!(\"{}_{}\", faucet::ACCESS_KEY_NAME, id)),\n        None,\n    );\n\n    // As a convenience, a specific contract version can be referred to either by its contract hash\n    // or by the combination of the contract package hash and a contract version key. This comes\n    // down to developer preference. The contract package hash is a stable hash, so this may be\n    // preferable if you don't want to worry about contract hashes changing. Existing contracts'\n    // hashes can be stored under a URef under a named key and later used for calling the contract\n    // by hash. If you wanted to change the hash stored under the named key, your contract would\n    // have to have an entrypoint that would allow you to do so.\n    //\n    // Another option is to store a contract package hash that your contract depends on. When\n    // calling a contract using the package hash alone, the execution engine will find the latest\n    // contract version for you automatically. To avoid breaking changes, you may want to use a\n    // contract package hash and a contract version. That way, whenever the contract package\n    // that the calling contract depends on changes, the version can be updated through an\n    // entrypoint that allows an authorized caller to set named keys.\n    //\n    // In some cases it may be desireable to pass one or both of the contract package hash and\n    // version into contract or session code as a runtime argument. As an example, if a user\n    // regularly makes calls to a contract package via session code, they could have the session\n    // code take runtime arguments for one or both of the contract package hash and version. This\n    // way, if the contract package is updated, they could easily use the latest version without\n    // needing to edit their session code. The same technique could be applied to stored contracts\n    // that need to call other contracts by their contract package hash and version.\n\n    // Here we are saving newly created contracts hash, the contract package hash and contract\n    // version, and an access URef under the installer's named keys. It's important to note that\n    // you'll need the access URef if you ever want to modify the contract package in the future.\n    // It's also important to note that it will be impossible to reference any of these values again\n    // if they're not stored under named keys.\n    //\n    // These named keys all end with the \"id\" runtime argument that is passed into this session.\n    // This is keep separate instances of this faucet contract namespaced in case the installer\n    // wants to install multiple instances of the contract using the same account.\n    runtime::put_key(\n        &format!(\"{}_{}\", faucet::CONTRACT_VERSION, id),\n        storage::new_uref(contract_version).into(),\n    );\n    runtime::put_key(\n        &format!(\"{}_{}\", faucet::CONTRACT_NAME, id),\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n\n    // This is specifically for this installing account, which would allow one installing account\n    // to potentially have multiple faucet contract packages.\n    runtime::put_key(\n        &format!(\"{}_{}\", faucet::FAUCET_PURSE, id),\n        faucet_purse.into(),\n    );\n\n    let main_purse = account::get_main_purse();\n\n    // Initial funding amount. In other words, when the faucet contract is set up, this is its\n    // starting tokens transferred from the installing account's main purse as a one-time\n    // initialization.\n    let amount = runtime::get_named_arg(faucet::ARG_AMOUNT);\n\n    system::transfer_from_purse_to_purse(main_purse, faucet_purse, amount, Some(id))\n        .unwrap_or_revert_with(ApiError::User(\n            InstallerSessionError::FailedToTransfer as u16,\n        ));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/nctl/nctl-dictionary/Cargo.toml",
    "content": "[package]\nname = \"nctl-dictionary\"\nversion = \"0.1.0\"\nedition = \"2018\"\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/nctl/nctl-dictionary/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[cfg(not(target_arch = \"wasm32\"))]\ncompile_error!(\"target arch should be wasm32: compile with '--target wasm32-unknown-unknown'\");\n\n// We need to explicitly import the std alloc crate and `alloc::string::String` as we're in a\n// `no_std` environment.\nextern crate alloc;\n\nuse casper_contract::{contract_api::storage, unwrap_or_revert::UnwrapOrRevert};\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let seed_uref = storage::new_dictionary(\"nctl_dictionary\").unwrap_or_revert();\n    storage::dictionary_put(seed_uref, \"foo\", 1u64);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/profiling/host-function-metrics/Cargo.toml",
    "content": "[package]\nname = \"host-function-metrics\"\nversion = \"0.1.0\"\nauthors = [\"Fraser Hutchison <fraser@casperlabs.io>\"]\nedition = \"2021\"\n\n[lib]\ncrate-type = [\"cdylib\"]\nbench = false\ndoctest = false\ntest = false\n\n[features]\ndefault = [\"casper-contract/test-support\", \"rand/small_rng\"]\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\nrand = { version = \"0.8.3\", default-features = false }\n"
  },
  {
    "path": "smart_contracts/contracts/profiling/host-function-metrics/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::{boxed::Box, collections::BTreeMap, string::String, vec, vec::Vec};\nuse core::iter;\n\nuse rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::{AccountHash, ActionType, Weight},\n    bytesrepr::Bytes,\n    contracts::{ContractHash, ContractVersion, NamedKeys},\n    runtime_args, ApiError, BlockTime, CLType, CLValue, EntityEntryPoint, EntryPointAccess,\n    EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, Phase, U512,\n};\n\nconst MIN_FUNCTION_NAME_LENGTH: usize = 1;\nconst MAX_FUNCTION_NAME_LENGTH: usize = 100;\n\nconst NAMED_KEY_COUNT: usize = 100;\nconst MIN_NAMED_KEY_NAME_LENGTH: usize = 10;\nconst MAX_NAMED_KEY_NAME_LENGTH: usize = 100;\nconst VALUE_FOR_ADDITION_1: u64 = 1;\nconst VALUE_FOR_ADDITION_2: u64 = 2;\nconst TRANSFER_AMOUNT: u64 = 1_000_000;\n\nconst ARG_SEED: &str = \"seed\";\nconst ARG_OTHERS: &str = \"others\";\nconst ARG_BYTES: &str = \"bytes\";\n\n#[repr(u16)]\nenum Error {\n    GetCaller = 0,\n    GetBlockTime = 1,\n    GetPhase = 2,\n    HasKey = 3,\n    GetKey = 4,\n    NamedKeys = 5,\n    ReadOrRevert = 6,\n    IsValidURef = 7,\n    Transfer = 8,\n    Revert = 9,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> ApiError {\n        ApiError::User(error as u16)\n    }\n}\n\nfn create_random_names(rng: &mut SmallRng) -> impl Iterator<Item = String> + '_ {\n    iter::repeat_with(move || {\n        let key_length: usize = rng.gen_range(MIN_NAMED_KEY_NAME_LENGTH..MAX_NAMED_KEY_NAME_LENGTH);\n        rng.sample_iter(&Alphanumeric)\n            .map(char::from)\n            .take(key_length)\n            .collect::<String>()\n    })\n    .take(NAMED_KEY_COUNT)\n}\n\nfn truncate_named_keys(named_keys: NamedKeys, rng: &mut SmallRng) -> NamedKeys {\n    let truncated_len = rng.gen_range(1..=named_keys.len());\n    let mut vec = named_keys.into_inner().into_iter().collect::<Vec<_>>();\n    vec.truncate(truncated_len);\n    NamedKeys::from(vec.into_iter().collect::<BTreeMap<_, _>>())\n}\n\n// Executes the named key functions from the `runtime` module and most of the functions from the\n// `storage` module.\nfn large_function() {\n    let seed: u64 = runtime::get_named_arg(ARG_SEED);\n    let random_bytes: Bytes = runtime::get_named_arg(ARG_BYTES);\n\n    let uref = storage::new_uref(random_bytes.clone());\n\n    let mut rng = SmallRng::seed_from_u64(seed);\n    let mut key_name = String::new();\n    for random_name in create_random_names(&mut rng) {\n        key_name = random_name;\n        runtime::put_key(&key_name, Key::from(uref));\n    }\n\n    if !runtime::has_key(&key_name) {\n        runtime::revert(Error::HasKey);\n    }\n\n    if runtime::get_key(&key_name) != Some(Key::from(uref)) {\n        runtime::revert(Error::GetKey);\n    }\n\n    runtime::remove_key(&key_name);\n\n    let named_keys = runtime::list_named_keys();\n    if named_keys.len() != NAMED_KEY_COUNT - 1 {\n        runtime::revert(Error::NamedKeys)\n    }\n\n    storage::write(uref, random_bytes.clone());\n    let retrieved_value: Bytes = storage::read_or_revert(uref);\n    if retrieved_value != random_bytes {\n        runtime::revert(Error::ReadOrRevert);\n    }\n\n    storage::write(uref, VALUE_FOR_ADDITION_1);\n    storage::add(uref, VALUE_FOR_ADDITION_2);\n\n    let keys_to_return = truncate_named_keys(named_keys, &mut rng);\n    runtime::ret(CLValue::from_t(keys_to_return).unwrap_or_revert());\n}\n\nfn small_function() {\n    if runtime::get_phase() != Phase::Session {\n        runtime::revert(Error::GetPhase);\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let seed: u64 = runtime::get_named_arg(ARG_SEED);\n    let (random_bytes, source_account, destination_account): (Bytes, AccountHash, AccountHash) =\n        runtime::get_named_arg(ARG_OTHERS);\n    let random_bytes: Vec<u8> = random_bytes.into();\n\n    // ========== storage, execution and upgrading of contracts ====================================\n\n    // Store large function with no named keys, then execute it to get named keys returned.\n    let mut rng = SmallRng::seed_from_u64(seed);\n    let large_function_name: String =\n        \"l\".repeat(rng.gen_range(MIN_FUNCTION_NAME_LENGTH..=MAX_FUNCTION_NAME_LENGTH));\n\n    let entry_point_name = &large_function_name;\n    let runtime_args = runtime_args! {\n        ARG_SEED => seed,\n        ARG_BYTES => random_bytes.clone()\n    };\n\n    let (contract_hash, _contract_version) = store_function(entry_point_name, None);\n    let named_keys: NamedKeys =\n        runtime::call_contract(contract_hash, entry_point_name, runtime_args.clone());\n\n    let (contract_hash, _contract_version) =\n        store_function(entry_point_name, Some(named_keys.clone()));\n    // Store large function with 10 named keys, then execute it.\n    runtime::call_contract::<NamedKeys>(contract_hash, entry_point_name, runtime_args.clone());\n\n    // Small function\n    let small_function_name =\n        \"s\".repeat(rng.gen_range(MIN_FUNCTION_NAME_LENGTH..=MAX_FUNCTION_NAME_LENGTH));\n\n    let entry_point_name = &small_function_name;\n\n    // Store small function with no named keys, then execute it.\n    let (contract_hash, _contract_version) =\n        store_function(entry_point_name, Some(NamedKeys::new()));\n    runtime::call_contract::<()>(contract_hash, entry_point_name, runtime_args.clone());\n\n    let (contract_hash, _contract_version) = store_function(entry_point_name, Some(named_keys));\n    // Store small function with 10 named keys, then execute it.\n    runtime::call_contract::<()>(contract_hash, entry_point_name, runtime_args);\n\n    // ========== functions from `account` module ==================================================\n\n    let main_purse = account::get_main_purse();\n    account::set_action_threshold(ActionType::Deployment, Weight::new(1)).unwrap_or_revert();\n    account::add_associated_key(destination_account, Weight::new(1)).unwrap_or_revert();\n    account::update_associated_key(destination_account, Weight::new(1)).unwrap_or_revert();\n    account::remove_associated_key(destination_account).unwrap_or_revert();\n\n    // ========== functions from `system` module ===================================================\n\n    let _ = system::get_mint();\n\n    let new_purse = system::create_purse();\n\n    let transfer_amount = U512::from(TRANSFER_AMOUNT);\n    system::transfer_from_purse_to_purse(main_purse, new_purse, transfer_amount, None)\n        .unwrap_or_revert();\n\n    let balance = system::get_purse_balance(new_purse).unwrap_or_revert();\n    if balance != transfer_amount {\n        runtime::revert(Error::Transfer);\n    }\n\n    system::transfer_from_purse_to_account(new_purse, destination_account, transfer_amount, None)\n        .unwrap_or_revert();\n\n    system::transfer_to_account(destination_account, transfer_amount, None).unwrap_or_revert();\n\n    // ========== remaining functions from `runtime` module ========================================\n\n    if !runtime::is_valid_uref(main_purse) {\n        runtime::revert(Error::IsValidURef);\n    }\n\n    if runtime::get_blocktime() != BlockTime::new(0) {\n        runtime::revert(Error::GetBlockTime);\n    }\n\n    if runtime::get_caller() != source_account {\n        runtime::revert(Error::GetCaller);\n    }\n\n    runtime::print(&String::from_utf8_lossy(&random_bytes));\n\n    runtime::revert(Error::Revert);\n}\n\nfn store_function(\n    entry_point_name: &str,\n    named_keys: Option<NamedKeys>,\n) -> (ContractHash, ContractVersion) {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            entry_point_name,\n            vec![\n                Parameter::new(ARG_SEED, CLType::U64),\n                Parameter::new(ARG_BYTES, CLType::List(Box::new(CLType::U8))),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    storage::new_contract(entry_points, named_keys, None, None, None)\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn s() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\"\nfn ssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern\n\"C\" fn sssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub\nextern \"C\" fn ssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub\nextern \"C\" fn ssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub\nextern \"C\" fn ssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {\n    small_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()\n{ small_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn l() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn ll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\"\nfn llllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern\n\"C\" fn lllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub\nextern \"C\" fn llllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub\nextern \"C\" fn llllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub\nextern \"C\" fn llllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::\nskip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {\n    large_function()\n}\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nlllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n\n#[rustfmt::skip]\n#[no_mangle]\npub extern \"C\" fn\nllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()\n{ large_function() }\n"
  },
  {
    "path": "smart_contracts/contracts/profiling/state-initializer/Cargo.toml",
    "content": "[package]\nname = \"state-initializer\"\nversion = \"0.1.0\"\nauthors = [\"Fraser Hutchison <fraser@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"state_initializer\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/profiling/state-initializer/src/main.rs",
    "content": "//! Transfers the requested amount of motes to the first account and zero motes to the second\n//! account.\n#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{account::AccountHash, ApiError, TransferredTo, U512};\n\nconst ARG_ACCOUNT1_ACCOUNT_HASH: &str = \"account_1_account_hash\";\nconst ARG_ACCOUNT1_AMOUNT: &str = \"account_1_amount\";\nconst ARG_ACCOUNT2_ACCOUNT_HASH: &str = \"account_2_account_hash\";\n\n#[repr(u16)]\nenum Error {\n    AccountAlreadyExists = 0,\n}\n\nfn create_account_with_amount(account: AccountHash, amount: U512) {\n    match system::transfer_to_account(account, amount, None) {\n        Ok(TransferredTo::NewAccount) => (),\n        Ok(TransferredTo::ExistingAccount) => {\n            runtime::revert(ApiError::User(Error::AccountAlreadyExists as u16))\n        }\n        Err(_) => runtime::revert(ApiError::Transfer),\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account_hash1: AccountHash = runtime::get_named_arg(ARG_ACCOUNT1_ACCOUNT_HASH);\n    let amount: U512 = runtime::get_named_arg(ARG_ACCOUNT1_AMOUNT);\n    create_account_with_amount(account_hash1, amount);\n\n    let account_hash2: AccountHash = runtime::get_named_arg(ARG_ACCOUNT2_ACCOUNT_HASH);\n    create_account_with_amount(account_hash2, U512::zero());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/add-associated-key/Cargo.toml",
    "content": "[package]\nname = \"add-associated-key\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"add_associated_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/add-associated-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::account::{AccountHash, Weight};\n\nconst ARG_ACCOUNT: &str = \"account\";\nconst ARG_WEIGHT: &str = \"weight\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account: AccountHash = runtime::get_named_arg(ARG_ACCOUNT);\n    let weight: Weight = runtime::get_named_arg(ARG_WEIGHT);\n    account::add_associated_key(account, weight).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/add-gas-subcall/Cargo.toml",
    "content": "[package]\nname = \"add-gas-subcall\"\nversion = \"0.1.0\"\nauthors = [\"Fraser Hutchison <fraser@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"add_gas_subcall\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/add-gas-subcall/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{runtime, storage};\n\nuse casper_types::{\n    contracts::{ContractHash, ContractVersion},\n    runtime_args, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, Parameter,\n};\n\nconst SUBCALL_NAME: &str = \"add_gas\";\nconst DATA_KEY: &str = \"data\";\nconst ADD_GAS_FROM_SESSION: &str = \"add-gas-from-session\";\nconst ADD_GAS_VIA_SUBCALL: &str = \"add-gas-via-subcall\";\n\nconst ARG_GAS_AMOUNT: &str = \"gas_amount\";\nconst ARG_METHOD_NAME: &str = \"method_name\";\n\n/// This should consume at least `amount * gas_per_byte + C` gas\n/// where C contains wasm overhead and host function calls.\nfn consume_at_least_gas_amount(amount: usize) {\n    if amount > 0 {\n        let data_uref = match runtime::get_key(DATA_KEY) {\n            Some(Key::URef(uref)) => uref,\n            Some(_key) => runtime::revert(ApiError::UnexpectedKeyVariant),\n            None => {\n                let uref = storage::new_uref(());\n                runtime::put_key(DATA_KEY, uref.into());\n                uref\n            }\n        };\n\n        let data = vec![0; amount];\n        storage::write(data_uref, data);\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn add_gas() {\n    let amount: u32 = runtime::get_named_arg(ARG_GAS_AMOUNT);\n\n    consume_at_least_gas_amount(amount as usize);\n}\n\nfn store() -> (ContractHash, ContractVersion) {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            SUBCALL_NAME,\n            vec![Parameter::new(ARG_GAS_AMOUNT, CLType::I32)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    storage::new_contract(entry_points, None, None, None, None)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: i32 = runtime::get_named_arg(ARG_GAS_AMOUNT);\n    let method_name: String = runtime::get_named_arg(ARG_METHOD_NAME);\n\n    match method_name.as_str() {\n        ADD_GAS_FROM_SESSION => consume_at_least_gas_amount(amount as usize),\n        ADD_GAS_VIA_SUBCALL => {\n            let (contract_hash, _contract_version) = store();\n            runtime::call_contract(\n                contract_hash,\n                SUBCALL_NAME,\n                runtime_args! { ARG_GAS_AMOUNT => amount, },\n            )\n        }\n        _ => runtime::revert(ApiError::InvalidArgument),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/add-update-associated-key/Cargo.toml",
    "content": "[package]\nname = \"add-update-associated-key\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"add_update_associated_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/add-update-associated-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::account::{AccountHash, Weight};\n\nconst INIT_WEIGHT: u8 = 1;\nconst MOD_WEIGHT: u8 = 2;\n\nconst ARG_ACCOUNT: &str = \"account\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account: AccountHash = runtime::get_named_arg(ARG_ACCOUNT);\n\n    let weight1 = Weight::new(INIT_WEIGHT);\n    account::add_associated_key(account, weight1).unwrap_or_revert();\n\n    let weight2 = Weight::new(MOD_WEIGHT);\n    account::update_associated_key(account, weight2).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/auction-bidding/Cargo.toml",
    "content": "[package]\nname = \"auction-bidding\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"auction_bidding\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/auction-bidding/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse auction::{DelegationRate, METHOD_ADD_BID};\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    account::AccountHash, contracts::ContractHash, runtime_args, system::auction, ApiError,\n    PublicKey, U512,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_ENTRY_POINT: &str = \"entry_point\";\nconst ARG_ACCOUNT_HASH: &str = \"account_hash\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\nconst TEST_BOND: &str = \"bond\";\nconst TEST_SEED_NEW_ACCOUNT: &str = \"seed_new_account\";\nconst DELEGATION_RATE: DelegationRate = 42;\n\n#[repr(u16)]\nenum Error {\n    UnableToSeedAccount,\n    UnknownCommand,\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let command: String = runtime::get_named_arg(ARG_ENTRY_POINT);\n\n    match command.as_str() {\n        TEST_BOND => bond(),\n        TEST_SEED_NEW_ACCOUNT => seed_new_account(),\n        _ => runtime::revert(ApiError::User(Error::UnknownCommand as u16)),\n    }\n}\n\nfn bond() {\n    let auction_contract_hash = system::get_auction();\n    let amount = runtime::get_named_arg(ARG_AMOUNT);\n    let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY);\n    call_bond(auction_contract_hash, public_key, amount);\n}\n\nfn call_bond(auction: ContractHash, public_key: PublicKey, bond_amount: U512) {\n    let args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        auction::ARG_AMOUNT => bond_amount,\n    };\n\n    let _amount: U512 = runtime::call_contract(auction, METHOD_ADD_BID, args);\n}\n\nfn seed_new_account() {\n    let source = account::get_main_purse();\n    let target: AccountHash = runtime::get_named_arg(ARG_ACCOUNT_HASH);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    system::transfer_from_purse_to_account(source, target, amount, None)\n        .unwrap_or_revert_with(ApiError::User(Error::UnableToSeedAccount as u16));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/auction-bids/Cargo.toml",
    "content": "[package]\nname = \"auction-bids\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"auction_bids\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/auction-bids/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{runtime, system};\n\nuse casper_types::{\n    runtime_args,\n    system::auction::{\n        ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_PUBLIC_KEY,\n        ARG_VALIDATOR, METHOD_ADD_BID, METHOD_DELEGATE, METHOD_DISTRIBUTE, METHOD_RUN_AUCTION,\n        METHOD_UNDELEGATE,\n    },\n    ApiError, PublicKey, U512,\n};\n\nconst ARG_ENTRY_POINT: &str = \"entry_point\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_DELEGATE: &str = \"delegate\";\nconst ARG_UNDELEGATE: &str = \"undelegate\";\nconst ARG_RUN_AUCTION: &str = \"run_auction\";\nconst ARG_ADD_BID: &str = \"add_bid\";\n\n#[repr(u16)]\nenum Error {\n    UnknownCommand,\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let command: String = runtime::get_named_arg(ARG_ENTRY_POINT);\n\n    match command.as_str() {\n        ARG_DELEGATE => {\n            delegate();\n        }\n        ARG_UNDELEGATE => {\n            undelegate();\n        }\n        ARG_RUN_AUCTION => run_auction(),\n        METHOD_DISTRIBUTE => distribute(),\n        ARG_ADD_BID => {\n            add_bid();\n        }\n        _ => runtime::revert(ApiError::User(Error::UnknownCommand as u16)),\n    };\n}\n\nfn delegate() -> U512 {\n    let auction = system::get_auction();\n    let delegator: PublicKey = runtime::get_named_arg(ARG_DELEGATOR);\n    let validator: PublicKey = runtime::get_named_arg(ARG_VALIDATOR);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let args = runtime_args! {\n        ARG_DELEGATOR => delegator,\n        ARG_VALIDATOR => validator,\n        ARG_AMOUNT => amount,\n    };\n\n    runtime::call_contract(auction, METHOD_DELEGATE, args)\n}\n\nfn undelegate() -> U512 {\n    let auction = system::get_auction();\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let delegator: PublicKey = runtime::get_named_arg(ARG_DELEGATOR);\n    let validator: PublicKey = runtime::get_named_arg(ARG_VALIDATOR);\n\n    let args = runtime_args! {\n        ARG_AMOUNT => amount,\n        ARG_VALIDATOR => validator,\n        ARG_DELEGATOR => delegator,\n    };\n\n    runtime::call_contract(auction, METHOD_UNDELEGATE, args)\n}\n\nfn add_bid() -> U512 {\n    let auction = system::get_auction();\n    let validator: PublicKey = runtime::get_named_arg(ARG_PUBLIC_KEY);\n    let delegation_rate: u8 = runtime::get_named_arg(ARG_DELEGATION_RATE);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let args = runtime_args! {\n        ARG_AMOUNT => amount,\n        ARG_PUBLIC_KEY => validator,\n        ARG_DELEGATION_RATE => delegation_rate,\n    };\n\n    runtime::call_contract(auction, METHOD_ADD_BID, args)\n}\n\nfn run_auction() {\n    let auction = system::get_auction();\n    let era_end_timestamp_millis: u64 = runtime::get_named_arg(ARG_ERA_END_TIMESTAMP_MILLIS);\n    let args = runtime_args! { ARG_ERA_END_TIMESTAMP_MILLIS => era_end_timestamp_millis };\n    runtime::call_contract::<()>(auction, METHOD_RUN_AUCTION, args);\n}\n\nfn distribute() {\n    let auction = system::get_auction();\n    let proposer: PublicKey = runtime::get_named_arg(ARG_VALIDATOR);\n    let args = runtime_args! {\n        ARG_VALIDATOR => proposer\n    };\n    runtime::call_contract::<()>(auction, METHOD_DISTRIBUTE, args);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/blake2b/Cargo.toml",
    "content": "[package]\nname = \"blake2b\"\nversion = \"0.8.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"blake2b\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/blake2b/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\n\nconst INPUT_LENGTH: usize = 32;\n\nconst HASH_RESULT: &str = \"hash_result\";\n\nconst ARG_BYTES: &str = \"bytes\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let bytes: [u8; INPUT_LENGTH] = runtime::get_named_arg(ARG_BYTES);\n    let hash = runtime::blake2b(bytes);\n    let uref = storage::new_uref(hash);\n    runtime::put_key(HASH_RESULT, uref.into())\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-context/Cargo.toml",
    "content": "[package]\nname = \"contract-context\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"contract_context\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-context/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::ToString, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    contracts::{ContractHash, ContractPackageHash, ContractVersion, NamedKeys},\n    runtime_args, AddressableEntityHash, CLType, EntryPointPayment, Key, ENTITY_INITIAL_VERSION,\n};\n\nconst PACKAGE_HASH_KEY: &str = \"package_hash_key\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\nconst CONTRACT_HASH_KEY: &str = \"contract_hash_key\";\nconst CONTRACT_CODE: &str = \"contract_code_test\";\nconst SESSION_CODE: &str = \"session_code_test\";\nconst NEW_KEY: &str = \"new_key\";\nconst NAMED_KEY: &str = \"contract_named_key\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[no_mangle]\npub extern \"C\" fn session_code_test() {\n    assert!(runtime::get_key(PACKAGE_HASH_KEY).is_some());\n    assert!(runtime::get_key(PACKAGE_ACCESS_KEY).is_some());\n    assert!(runtime::get_key(NAMED_KEY).is_none());\n}\n\n#[no_mangle]\npub extern \"C\" fn contract_code_test() {\n    assert!(runtime::get_key(PACKAGE_HASH_KEY).is_none());\n    assert!(runtime::get_key(PACKAGE_ACCESS_KEY).is_none());\n    assert!(runtime::get_key(NAMED_KEY).is_some());\n}\n\n#[no_mangle]\npub extern \"C\" fn session_code_caller_as_session() {\n    let contract_package_hash = runtime::get_key(PACKAGE_HASH_KEY)\n        .expect(\"should have contract package key\")\n        .into_entity_hash_addr()\n        .unwrap_or_revert();\n\n    runtime::call_versioned_contract::<()>(\n        contract_package_hash.into(),\n        Some(ENTITY_INITIAL_VERSION),\n        SESSION_CODE,\n        runtime_args! {},\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn add_new_key() {\n    let uref = storage::new_uref(());\n    runtime::put_key(NEW_KEY, uref.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn add_new_key_as_session() {\n    let contract_package_hash = runtime::get_key(PACKAGE_HASH_KEY)\n        .expect(\"should have package hash\")\n        .into_entity_hash_addr()\n        .unwrap_or_revert()\n        .into();\n\n    assert!(runtime::get_key(NEW_KEY).is_none());\n    runtime::call_versioned_contract::<()>(\n        contract_package_hash,\n        Some(ENTITY_INITIAL_VERSION),\n        \"add_new_key\",\n        runtime_args! {},\n    );\n    assert!(runtime::get_key(NEW_KEY).is_some());\n}\n\n#[no_mangle]\npub extern \"C\" fn session_code_caller_as_contract() {\n    let contract_package_key: Key = runtime::get_named_arg(PACKAGE_HASH_KEY);\n    let contract_package_hash = contract_package_key.into_package_hash().unwrap_or_revert();\n    runtime::call_versioned_contract::<()>(\n        contract_package_hash.into(),\n        Some(ENTITY_INITIAL_VERSION),\n        SESSION_CODE,\n        runtime_args! {},\n    );\n}\n\nfn create_entrypoints_1() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n\n    let contract_code_test = EntityEntryPoint::new(\n        CONTRACT_CODE.to_string(),\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(contract_code_test);\n\n    let session_code_caller_as_contract = EntityEntryPoint::new(\n        \"session_code_caller_as_contract\".to_string(),\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(session_code_caller_as_contract);\n\n    entry_points\n}\n\nfn install_version_1(package_hash: ContractPackageHash) -> (ContractHash, ContractVersion) {\n    let contract_named_keys = {\n        let contract_variable = storage::new_uref(0);\n\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"contract_named_key\".to_string(), contract_variable.into());\n        named_keys\n    };\n\n    let entry_points = create_entrypoints_1();\n    storage::add_contract_version(\n        package_hash,\n        entry_points,\n        contract_named_keys,\n        BTreeMap::new(),\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Session contract\n    let (contract_package_hash, access_uref) = storage::create_contract_package_at_hash();\n\n    runtime::put_key(PACKAGE_HASH_KEY, contract_package_hash.into());\n    runtime::put_key(PACKAGE_ACCESS_KEY, access_uref.into());\n    let (contract_hash, contract_version) = install_version_1(contract_package_hash);\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        CONTRACT_HASH_KEY,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-funds/Cargo.toml",
    "content": "[package]\nname = \"contract-funds\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"contract_funds\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-funds/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, Parameter},\n    contracts::NamedKeys,\n    AddressableEntityHash, CLTyped, CLValue, EntryPointPayment, EntryPoints, Key, URef,\n};\n\nconst GET_PAYMENT_PURSE_NAME: &str = \"get_payment_purse\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"contract_own_funds\";\nconst HASH_KEY_NAME: &str = \"contract_own_funds_hash\";\nconst ACCESS_KEY_NAME: &str = \"contract_own_funds_access\";\nconst ARG_TARGET: &str = \"target\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\nconst PAYMENT_PURSE_KEY: &str = \"payment_purse\";\n\n#[no_mangle]\npub extern \"C\" fn get_payment_purse() {\n    let purse_uref = runtime::get_key(PAYMENT_PURSE_KEY)\n        .and_then(Key::into_uref)\n        .unwrap_or_revert();\n\n    let attenuated_purse = purse_uref.into_add();\n\n    runtime::ret(CLValue::from_t(attenuated_purse).unwrap_or_revert());\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let faucet_entrypoint = EntityEntryPoint::new(\n            GET_PAYMENT_PURSE_NAME.to_string(),\n            vec![Parameter::new(ARG_TARGET, AccountHash::cl_type())],\n            URef::cl_type(),\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(faucet_entrypoint);\n        entry_points\n    };\n\n    let named_keys = {\n        let faucet_funds = system::create_purse();\n\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(PAYMENT_PURSE_KEY.to_string(), faucet_funds.into());\n        named_keys\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        HASH_KEY_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-funds-call/Cargo.toml",
    "content": "[package]\nname = \"contract-funds-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"contract_funds_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-funds-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{contracts::ContractHash, Key, RuntimeArgs, URef, U512};\n\nconst GET_PAYMENT_PURSE_NAME: &str = \"get_payment_purse\";\nconst HASH_KEY_NAME: &str = \"contract_own_funds_hash\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nfn get_payment_purse() -> URef {\n    let contract_hash = get_entity_hash_name();\n    runtime::call_contract(\n        contract_hash,\n        GET_PAYMENT_PURSE_NAME,\n        RuntimeArgs::default(),\n    )\n}\n\nfn get_entity_hash_name() -> ContractHash {\n    runtime::get_key(HASH_KEY_NAME)\n        .and_then(Key::into_entity_hash_addr)\n        .map(ContractHash::new)\n        .unwrap_or_revert()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let payment_purse = get_payment_purse();\n\n    system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-messages-emitter/Cargo.toml",
    "content": "[package]\nname = \"contract-messages-emitter\"\nversion = \"0.1.0\"\nauthors = [\"Alexandru Sardan <alexandru@casperlabs.io>\"]\nedition = \"2018\"\n\n[[bin]]\nname = \"contract_messages_emitter\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-messages-emitter/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{\n    collections::BTreeMap,\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    api_error::ApiError,\n    contract_messages::MessageTopicOperation,\n    contracts::NamedKeys,\n    CLType, CLTyped, EntryPointPayment, Parameter, RuntimeArgs,\n};\n\nconst ENTRY_POINT_INIT: &str = \"init\";\nconst ENTRY_POINT_EMIT_MESSAGE: &str = \"emit_message\";\nconst ENTRY_POINT_EMIT_MULTIPLE_MESSAGES: &str = \"emit_multiple_messages\";\nconst ENTRY_POINT_ADD_TOPIC: &str = \"add_topic\";\nconst MESSAGE_EMITTER_INITIALIZED: &str = \"message_emitter_initialized\";\nconst ARG_MESSAGE_SUFFIX_NAME: &str = \"message_suffix\";\nconst ARG_NUM_MESSAGES_TO_EMIT: &str = \"num_messages_to_emit\";\nconst ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT: &str = \"register_default_topic_with_init\";\nconst ARG_TOPIC_NAME: &str = \"topic_name\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"messages_emitter_package_hash\";\nconst ACCESS_KEY_NAME: &str = \"messages_emitter_access\";\n\npub const MESSAGE_EMITTER_GENERIC_TOPIC: &str = \"generic_messages\";\npub const MESSAGE_PREFIX: &str = \"generic message: \";\n\n#[no_mangle]\npub extern \"C\" fn emit_message() {\n    let suffix: String = runtime::get_named_arg(ARG_MESSAGE_SUFFIX_NAME);\n\n    runtime::emit_message(\n        MESSAGE_EMITTER_GENERIC_TOPIC,\n        &format!(\"{}{}\", MESSAGE_PREFIX, suffix).into(),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn emit_multiple_messages() {\n    let num_messages: u32 = runtime::get_named_arg(ARG_NUM_MESSAGES_TO_EMIT);\n\n    for i in 0..num_messages {\n        runtime::emit_message(\n            MESSAGE_EMITTER_GENERIC_TOPIC,\n            &format!(\"{}{}\", MESSAGE_PREFIX, i).into(),\n        )\n        .unwrap_or_revert();\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn add_topic() {\n    let topic_name: String = runtime::get_named_arg(ARG_TOPIC_NAME);\n\n    runtime::manage_message_topic(topic_name.as_str(), MessageTopicOperation::Add)\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn init() {\n    if runtime::has_key(MESSAGE_EMITTER_INITIALIZED) {\n        runtime::revert(ApiError::User(0));\n    }\n\n    runtime::manage_message_topic(MESSAGE_EMITTER_GENERIC_TOPIC, MessageTopicOperation::Add)\n        .unwrap_or_revert();\n\n    runtime::put_key(MESSAGE_EMITTER_INITIALIZED, storage::new_uref(()).into());\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let register_topic_with_init: bool =\n        runtime::get_named_arg(ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT);\n\n    let mut emitter_entry_points = EntryPoints::new();\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_INIT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_EMIT_MESSAGE,\n        vec![Parameter::new(ARG_MESSAGE_SUFFIX_NAME, String::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_ADD_TOPIC,\n        vec![Parameter::new(ARG_TOPIC_NAME, String::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_EMIT_MULTIPLE_MESSAGES,\n        vec![Parameter::new(ARG_NUM_MESSAGES_TO_EMIT, u32::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    if register_topic_with_init {\n        let (stored_contract_hash, _contract_version) = storage::new_contract(\n            emitter_entry_points,\n            Some(NamedKeys::new()),\n            Some(PACKAGE_HASH_KEY_NAME.into()),\n            Some(ACCESS_KEY_NAME.into()),\n            None,\n        );\n\n        // Call contract to initialize it and register the default topic.\n        runtime::call_contract::<()>(\n            stored_contract_hash,\n            ENTRY_POINT_INIT,\n            RuntimeArgs::default(),\n        );\n    } else {\n        let new_topics = BTreeMap::from([(\n            MESSAGE_EMITTER_GENERIC_TOPIC.to_string(),\n            MessageTopicOperation::Add,\n        )]);\n        // Register the default topic on contract creation and not through the initializer.\n        let (_stored_contract_hash, _contract_version) = storage::new_contract(\n            emitter_entry_points,\n            Some(NamedKeys::new()),\n            Some(PACKAGE_HASH_KEY_NAME.into()),\n            Some(ACCESS_KEY_NAME.into()),\n            Some(new_topics),\n        );\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-messages-from-account/Cargo.toml",
    "content": "[package]\nname = \"contract-messages-from-account\"\nversion = \"0.1.0\"\nauthors = [\"Alexandru Sardan <alexandru@casperlabs.io>\"]\nedition = \"2018\"\n\n[[bin]]\nname = \"contract_messages_from_account\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-messages-from-account/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert};\n\nuse casper_types::contract_messages::MessageTopicOperation;\n\nconst TOPIC_NAME: &str = \"messages_topic\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    runtime::manage_message_topic(TOPIC_NAME, MessageTopicOperation::Add).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-messages-upgrader/Cargo.toml",
    "content": "[package]\nname = \"contract-messages-upgrader\"\nversion = \"0.1.0\"\nauthors = [\"Alexandru Sardan <alexandru@casperlabs.io>\"]\nedition = \"2018\"\n\n[[bin]]\nname = \"contract_messages_upgrader\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", features = [\"test-support\"] }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/contract-messages-upgrader/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{\n    collections::BTreeMap,\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    api_error::ApiError,\n    contract_messages::MessageTopicOperation,\n    contracts::NamedKeys,\n    runtime_args, CLType, CLTyped, EntryPointPayment, PackageHash, Parameter, RuntimeArgs,\n};\n\nconst ENTRY_POINT_INIT: &str = \"init\";\nconst FIRST_VERSION_ENTRY_POINT_EMIT_MESSAGE: &str = \"emit_message\";\nconst ENTRY_POINT_EMIT_MESSAGE: &str = \"upgraded_emit_message\";\nconst ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION: &str = \"emit_message_from_each_version\";\nconst UPGRADED_MESSAGE_EMITTER_INITIALIZED: &str = \"upgraded_message_emitter_initialized\";\nconst ARG_MESSAGE_SUFFIX_NAME: &str = \"message_suffix\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"messages_emitter_package_hash\";\nconst ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT: &str = \"register_default_topic_with_init\";\n\npub const MESSAGE_EMITTER_GENERIC_TOPIC: &str = \"new_topic_after_upgrade\";\npub const MESSAGE_PREFIX: &str = \"generic message: \";\n\n#[no_mangle]\npub extern \"C\" fn upgraded_emit_message() {\n    let suffix: String = runtime::get_named_arg(ARG_MESSAGE_SUFFIX_NAME);\n\n    runtime::emit_message(\n        MESSAGE_EMITTER_GENERIC_TOPIC,\n        &format!(\"{}{}\", MESSAGE_PREFIX, suffix).into(),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn emit_message_from_each_version() {\n    let suffix: String = runtime::get_named_arg(ARG_MESSAGE_SUFFIX_NAME);\n\n    let contract_package_hash: PackageHash = runtime::get_key(PACKAGE_HASH_KEY_NAME)\n        .expect(\"should have contract package key\")\n        .into_package_addr()\n        .unwrap_or_revert()\n        .into();\n\n    // Emit a message from this contract.\n    runtime::emit_message(\n        MESSAGE_EMITTER_GENERIC_TOPIC,\n        &\"emitting multiple messages\".into(),\n    )\n    .unwrap_or_revert();\n\n    // Call previous contract version which will emit a message.\n    runtime::call_package_version::<()>(\n        contract_package_hash.into(),\n        Some(2),\n        Some(1),\n        FIRST_VERSION_ENTRY_POINT_EMIT_MESSAGE,\n        runtime_args! {\n            ARG_MESSAGE_SUFFIX_NAME => suffix.clone(),\n        },\n    );\n\n    // Emit another message from this version.\n    runtime::emit_message(\n        MESSAGE_EMITTER_GENERIC_TOPIC,\n        &format!(\"{}{}\", MESSAGE_PREFIX, suffix).into(),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn init() {\n    if runtime::has_key(UPGRADED_MESSAGE_EMITTER_INITIALIZED) {\n        runtime::revert(ApiError::User(0));\n    }\n\n    runtime::manage_message_topic(MESSAGE_EMITTER_GENERIC_TOPIC, MessageTopicOperation::Add)\n        .unwrap_or_revert();\n\n    runtime::put_key(\n        UPGRADED_MESSAGE_EMITTER_INITIALIZED,\n        storage::new_uref(()).into(),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let register_topic_with_init: bool =\n        runtime::get_named_arg(ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT);\n\n    let mut emitter_entry_points = EntryPoints::new();\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_INIT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_EMIT_MESSAGE,\n        vec![Parameter::new(ARG_MESSAGE_SUFFIX_NAME, String::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    emitter_entry_points.add_entry_point(EntityEntryPoint::new(\n        ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION,\n        vec![Parameter::new(ARG_MESSAGE_SUFFIX_NAME, String::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let message_emitter_package_hash: PackageHash = runtime::get_key(PACKAGE_HASH_KEY_NAME)\n        .unwrap_or_revert()\n        .into_package_addr()\n        .unwrap_or_revert()\n        .into();\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(\n        PACKAGE_HASH_KEY_NAME.into(),\n        message_emitter_package_hash.into(),\n    );\n\n    if register_topic_with_init {\n        let (contract_hash, _contract_version) = storage::add_contract_version(\n            message_emitter_package_hash.into(),\n            emitter_entry_points,\n            named_keys,\n            BTreeMap::new(),\n        );\n\n        // Call contract to initialize it\n        runtime::call_contract::<()>(contract_hash, ENTRY_POINT_INIT, RuntimeArgs::default());\n    } else {\n        let new_topics = BTreeMap::from([(\n            MESSAGE_EMITTER_GENERIC_TOPIC.to_string(),\n            MessageTopicOperation::Add,\n        )]);\n        let (_contract_hash, _contract_version) = storage::add_contract_version(\n            message_emitter_package_hash.into(),\n            emitter_entry_points,\n            named_keys,\n            new_topics,\n        );\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/counter-factory/Cargo.toml",
    "content": "[package]\nname = \"counter-factory\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2018\"\n\n[[bin]]\nname = \"counter_factory\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/counter-factory/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::{String, ToString};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPoints, Parameters},\n    bytesrepr::FromBytes,\n    contracts::NamedKeys,\n    ApiError, CLType, CLTyped, EntryPointAccess, EntryPointPayment, EntryPointType, Key, URef,\n    U512,\n};\n\nconst ACCESS_KEY_NAME: &str = \"factory_access\";\nconst ARG_INITIAL_VALUE: &str = \"initial_value\";\nconst ARG_NAME: &str = \"name\";\nconst CONTRACT_FACTORY_DEFAULT_ENTRY_POINT: &str = \"contract_factory_default\";\nconst CONTRACT_FACTORY_ENTRY_POINT: &str = \"contract_factory\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\nconst CURRENT_VALUE_KEY: &str = \"current_value\";\nconst DECREASE_ENTRY_POINT: &str = \"decrement\";\nconst HASH_KEY_NAME: &str = \"factory_hash\";\nconst INCREASE_ENTRY_POINT: &str = \"increment\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"factory_package_hash\";\n\nfn get_named_uref(name: &str) -> Result<URef, ApiError> {\n    runtime::get_key(name)\n        .ok_or(ApiError::MissingKey)?\n        .into_uref()\n        .ok_or(ApiError::UnexpectedKeyVariant)\n}\n\nfn read_uref<T: CLTyped + FromBytes>(uref: URef) -> Result<T, ApiError> {\n    let value: T = storage::read(uref)?.ok_or(ApiError::ValueNotFound)?;\n    Ok(value)\n}\n\nfn modify_counter(func: impl FnOnce(U512) -> U512) -> Result<(), ApiError> {\n    let current_value_uref = get_named_uref(CURRENT_VALUE_KEY)?;\n    let value: U512 = read_uref(current_value_uref)?;\n    let new_value = func(value);\n    storage::write(current_value_uref, new_value);\n    Ok(())\n}\n\n#[no_mangle]\npub extern \"C\" fn increment() {\n    modify_counter(|value| value + U512::one()).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn decrement() {\n    modify_counter(|value| value - U512::one()).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn contract_factory() {\n    let name: String = runtime::get_named_arg(ARG_NAME);\n    let initial_value: U512 = runtime::get_named_arg(ARG_INITIAL_VALUE);\n    installer(name, initial_value);\n}\n\n#[no_mangle]\npub extern \"C\" fn contract_factory_default() {\n    let name: String = runtime::get_named_arg(ARG_NAME);\n    installer(name, U512::zero());\n}\n\nfn installer(name: String, initial_value: U512) {\n    let named_keys = {\n        let new_uref = storage::new_uref(initial_value);\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(CURRENT_VALUE_KEY.to_string(), new_uref.into());\n        named_keys\n    };\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point: EntityEntryPoint = EntityEntryPoint::new(\n            INCREASE_ENTRY_POINT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point: EntityEntryPoint = EntityEntryPoint::new(\n            DECREASE_ENTRY_POINT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(&name, Key::Hash(contract_hash.value()));\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point: EntityEntryPoint = EntityEntryPoint::new(\n            CONTRACT_FACTORY_ENTRY_POINT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Factory,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point: EntityEntryPoint = EntityEntryPoint::new(\n            CONTRACT_FACTORY_DEFAULT_ENTRY_POINT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Factory,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point: EntityEntryPoint = EntityEntryPoint::new(\n            INCREASE_ENTRY_POINT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Template,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point: EntityEntryPoint = EntityEntryPoint::new(\n            DECREASE_ENTRY_POINT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Template,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(HASH_KEY_NAME, Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/create-purse-01/Cargo.toml",
    "content": "[package]\nname = \"create-purse-01\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\", \"Ed Hastings <ed@casper.network\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"create_purse_01\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/create-purse-01/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    create_purse_01::delegate()\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/create-purse-01/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{runtime, system};\n\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\npub fn delegate() {\n    let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME);\n    let purse = system::create_purse();\n    runtime::put_key(&purse_name, purse.into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/deserialize-error/Cargo.toml",
    "content": "[package]\nname = \"deserialize-error\"\nversion = \"0.1.0\"\nauthors = [\"Mateusz Górski <gorski.mateusz@protonmail.ch>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"deserialize_error\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/deserialize-error/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::{self, contract_api::storage, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::{\n    addressable_entity::Parameters, api_error, bytesrepr::ToBytes, contracts::ContractHash, CLType,\n    EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints,\n};\n\n#[no_mangle]\npub extern \"C\" fn do_nothing() {\n    // A function that does nothing.\n    // This is used to just pass the checks in `call_contract` on the host side.\n}\n\n// Attacker copied to_ptr from `alloc_utils` as it was private\nfn to_ptr<T: ToBytes>(t: T) -> (*const u8, usize, Vec<u8>) {\n    let bytes = t.into_bytes().unwrap_or_revert();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size, bytes)\n}\n\nmod malicious_ffi {\n    // Potential attacker has available every FFI for himself\n    extern \"C\" {\n        pub fn casper_call_contract(\n            contract_hash_ptr: *const u8,\n            contract_hash_size: usize,\n            entry_point_name_ptr: *const u8,\n            entry_point_name_size: usize,\n            runtime_args_ptr: *const u8,\n            runtime_args_size: usize,\n            result_size: *mut usize,\n        ) -> i32;\n    }\n}\n\n// This is half-baked runtime::call_contract with changed `extra_urefs`\n// parameter with a desired payload that's supposed to bring the node down.\npub fn my_call_contract(contract_hash: ContractHash, entry_point_name: &str) -> usize {\n    let (contract_hash_ptr, contract_hash_size, _bytes1) = to_ptr(contract_hash);\n\n    let entry_point_name = ToBytes::to_bytes(entry_point_name).unwrap();\n    let malicious_args = [255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9];\n\n    {\n        let mut bytes_written = 0usize;\n        let ret = unsafe {\n            malicious_ffi::casper_call_contract(\n                contract_hash_ptr,\n                contract_hash_size,\n                entry_point_name.as_ptr(),\n                entry_point_name.len(),\n                malicious_args.as_ptr(),\n                malicious_args.len(),\n                &mut bytes_written as *mut usize,\n            )\n        };\n        api_error::result_from(ret).unwrap_or_revert();\n        bytes_written\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            \"do_nothing\",\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    let (contract_hash, _contract_version) =\n        storage::new_contract(entry_points, None, None, None, None);\n\n    my_call_contract(contract_hash, \"do_nothing\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary/Cargo.toml",
    "content": "[package]\nname = \"dictionary\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"dictionary\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[features]\ndefault = [\"casper-contract/default\"]\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", default-features = false }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    dictionary::delegate()\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::{\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::mem::MaybeUninit;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::EntityKindTag, api_error, bytesrepr::ToBytes, contracts::NamedKeys,\n    AccessRights, AddressableEntityHash, ApiError, CLType, CLValue, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef,\n};\n\npub const DICTIONARY_NAME: &str = \"local\";\npub const DICTIONARY_PUT_KEY: &str = \"item_key\";\npub const HELLO_PREFIX: &str = \" Hello, \";\npub const WORLD_SUFFIX: &str = \"world!\";\npub const MODIFY_WRITE_ENTRYPOINT: &str = \"modify_write\";\npub const SHARE_RO_ENTRYPOINT: &str = \"share_ro\";\npub const SHARE_W_ENTRYPOINT: &str = \"share_w\";\npub const CONTRACT_HASH_NAME: &str = \"contract_hash\";\nconst CONTRACT_PACKAGE_HASH_NAME: &str = \"package_hash_name\";\npub const DEFAULT_DICTIONARY_NAME: &str = \"Default Key\";\npub const DEFAULT_DICTIONARY_VALUE: &str = \"Default Value\";\npub const DICTIONARY_REF: &str = \"new_dictionary\";\npub const MALICIOUS_KEY_NAME: &str = \"invalid dictionary name\";\npub const INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT: &str = \"invalid_put_dictionary_item_key\";\npub const INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT: &str = \"invalid_get_dictionary_item_key\";\n\n#[no_mangle]\nfn modify_write() {\n    // Preserve for further modifications\n    let dictionary_seed_uref = match runtime::get_key(DICTIONARY_NAME) {\n        Some(key) => key.into_uref().unwrap_or_revert(),\n        None => runtime::revert(ApiError::GetKey),\n    };\n\n    // Appends \" Hello, world!\" to a [66; 32] dictionary with spaces trimmed.\n    // Two runs should yield value \"Hello, world! Hello, world!\" read from dictionary\n    let mut res: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_PUT_KEY)\n        .unwrap_or_default()\n        .unwrap_or_default();\n\n    res.push_str(HELLO_PREFIX);\n    // Write \"Hello, \"\n    storage::dictionary_put(dictionary_seed_uref, DICTIONARY_PUT_KEY, res);\n\n    // Read (this should exercise cache)\n    let mut res: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_PUT_KEY)\n        .unwrap_or_revert()\n        .unwrap_or_revert();\n    // Append\n    res.push_str(WORLD_SUFFIX);\n    // Write\n    storage::dictionary_put(\n        dictionary_seed_uref,\n        DICTIONARY_PUT_KEY,\n        res.trim().to_string(),\n    );\n}\n\nfn get_dictionary_seed_uref() -> URef {\n    let key = runtime::get_key(DICTIONARY_NAME).unwrap_or_revert();\n    key.into_uref().unwrap_or_revert()\n}\n\n#[no_mangle]\nfn share_ro() {\n    let uref_ro = get_dictionary_seed_uref().into_read();\n    runtime::ret(CLValue::from_t(uref_ro).unwrap_or_revert())\n}\n\n#[no_mangle]\nfn share_w() {\n    let uref_w = get_dictionary_seed_uref().into_write();\n    runtime::ret(CLValue::from_t(uref_w).unwrap_or_revert())\n}\n\nfn to_ptr<T: ToBytes>(t: T) -> (*const u8, usize, Vec<u8>) {\n    let bytes = t.into_bytes().unwrap_or_revert();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size, bytes)\n}\n\n#[no_mangle]\nfn invalid_put_dictionary_item_key() {\n    let dictionary_seed_uref = get_dictionary_seed_uref();\n    let (uref_ptr, uref_size, _bytes1) = to_ptr(dictionary_seed_uref);\n\n    let bad_dictionary_item_key = alloc::vec![0, 159, 146, 150];\n    let bad_dictionary_item_key_ptr = bad_dictionary_item_key.as_ptr();\n    let bad_dictionary_item_key_size = bad_dictionary_item_key.len();\n\n    let cl_value = CLValue::unit();\n    let (cl_value_ptr, cl_value_size, _bytes) = to_ptr(cl_value);\n\n    let result = unsafe {\n        let ret = test_ffi::casper_dictionary_put(\n            uref_ptr,\n            uref_size,\n            bad_dictionary_item_key_ptr,\n            bad_dictionary_item_key_size,\n            cl_value_ptr,\n            cl_value_size,\n        );\n        api_error::result_from(ret)\n    };\n\n    result.unwrap_or_revert()\n}\n\n#[no_mangle]\nfn invalid_get_dictionary_item_key() {\n    let dictionary_seed_uref = get_dictionary_seed_uref();\n    let (uref_ptr, uref_size, _bytes1) = to_ptr(dictionary_seed_uref);\n\n    let bad_dictionary_item_key = alloc::vec![0, 159, 146, 150];\n    let bad_dictionary_item_key_ptr = bad_dictionary_item_key.as_ptr();\n    let bad_dictionary_item_key_size = bad_dictionary_item_key.len();\n\n    let _value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe {\n            test_ffi::casper_dictionary_get(\n                uref_ptr,\n                uref_size,\n                bad_dictionary_item_key_ptr,\n                bad_dictionary_item_key_size,\n                value_size.as_mut_ptr(),\n            )\n        };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { value_size.assume_init() },\n            Err(e) => runtime::revert(e),\n        }\n    };\n}\n\nmod test_ffi {\n    extern \"C\" {\n        pub fn casper_dictionary_put(\n            uref_ptr: *const u8,\n            uref_size: usize,\n            key_ptr: *const u8,\n            key_size: usize,\n            value_ptr: *const u8,\n            value_size: usize,\n        ) -> i32;\n\n        pub fn casper_dictionary_get(\n            uref_ptr: *const u8,\n            uref_size: usize,\n            key_bytes_ptr: *const u8,\n            key_bytes_size: usize,\n            output_size: *mut usize,\n        ) -> i32;\n    }\n}\n\npub fn delegate() {\n    // Empty key name is invalid\n    assert!(storage::new_dictionary(\"\").is_err());\n    // Assert that we don't have this key yet\n    assert!(!runtime::has_key(MALICIOUS_KEY_NAME));\n    // Create and put a new dictionary in named keys\n    storage::new_dictionary(MALICIOUS_KEY_NAME).unwrap();\n    // Can't do it twice\n    assert!(storage::new_dictionary(MALICIOUS_KEY_NAME).is_err());\n\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        MODIFY_WRITE_ENTRYPOINT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        SHARE_RO_ENTRYPOINT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        SHARE_W_ENTRYPOINT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    let named_keys = {\n        let uref = {\n            let dictionary_uref = storage::new_dictionary(DICTIONARY_REF).unwrap_or_revert();\n            assert_eq!(\n                dictionary_uref.access_rights() & AccessRights::READ_ADD_WRITE,\n                AccessRights::READ_ADD_WRITE\n            );\n\n            storage::dictionary_put(\n                dictionary_uref,\n                DEFAULT_DICTIONARY_NAME,\n                DEFAULT_DICTIONARY_VALUE,\n            );\n            dictionary_uref\n        };\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(DICTIONARY_NAME.to_string(), uref.into());\n        named_keys\n    };\n\n    let (entity_hash, _version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(CONTRACT_PACKAGE_HASH_NAME.to_string()),\n        None,\n        None,\n    );\n\n    let entity_key = Key::addressable_entity_key(\n        EntityKindTag::SmartContract,\n        AddressableEntityHash::new(entity_hash.value()),\n    );\n\n    runtime::put_key(CONTRACT_HASH_NAME, entity_key);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-call/Cargo.toml",
    "content": "[package]\nname = \"dictionary-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"dictionary_call\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[features]\ndefault = [\"casper-contract/default\", \"dictionary/default\"]\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", default-features = false }\ncasper-types = { path = \"../../../../types\" }\ndictionary = { path = \"../dictionary\", default-features = false }\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-call/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::{String, ToString};\nuse core::str::FromStr;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    bytesrepr::FromBytes, contracts::ContractHash, AddressableEntityHash, CLTyped, RuntimeArgs,\n    URef,\n};\n\nuse dictionary::{\n    DEFAULT_DICTIONARY_NAME, DEFAULT_DICTIONARY_VALUE, INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT,\n    INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT,\n};\nuse dictionary_call::{\n    Operation, ARG_CONTRACT_HASH, ARG_FORGED_UREF, ARG_OPERATION, ARG_SHARE_UREF_ENTRYPOINT,\n    NEW_DICTIONARY_ITEM_KEY, NEW_DICTIONARY_VALUE,\n};\n\n/// Calls dictionary contract by hash as passed by `ARG_CONTRACT_HASH` argument and returns a\n/// single value.\nfn call_dictionary_contract<T: CLTyped + FromBytes>(entrypoint: &str) -> T {\n    let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n    runtime::call_contract(contract_hash.into(), entrypoint, RuntimeArgs::default())\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let operation = {\n        let arg_operation: String = runtime::get_named_arg(ARG_OPERATION);\n        Operation::from_str(&arg_operation).unwrap_or_revert()\n    };\n\n    match operation {\n        Operation::Write => {\n            let entrypoint: String = runtime::get_named_arg(ARG_SHARE_UREF_ENTRYPOINT);\n            let uref = call_dictionary_contract(&entrypoint);\n            let value: String = NEW_DICTIONARY_VALUE.to_string();\n            storage::dictionary_put(uref, NEW_DICTIONARY_ITEM_KEY, value);\n        }\n        Operation::Read => {\n            let entrypoint: String = runtime::get_named_arg(ARG_SHARE_UREF_ENTRYPOINT);\n            let uref = call_dictionary_contract(&entrypoint);\n            let maybe_value =\n                storage::dictionary_get(uref, DEFAULT_DICTIONARY_NAME).unwrap_or_revert();\n            // Whether the value exists or not we're mostly interested in validation of access\n            // rights\n            let value: String = maybe_value.unwrap_or_default();\n            assert_eq!(value, DEFAULT_DICTIONARY_VALUE);\n        }\n        Operation::ForgedURefWrite => {\n            let uref: URef = runtime::get_named_arg(ARG_FORGED_UREF);\n            let value: String = NEW_DICTIONARY_VALUE.to_string();\n            storage::dictionary_put(uref, NEW_DICTIONARY_ITEM_KEY, value);\n        }\n        Operation::InvalidPutDictionaryItemKey => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n            runtime::call_contract(\n                contract_hash.into(),\n                INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT,\n                RuntimeArgs::default(),\n            )\n        }\n        Operation::InvalidGetDictionaryItemKey => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n            runtime::call_contract(\n                ContractHash::new(contract_hash.value()),\n                INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT,\n                RuntimeArgs::default(),\n            )\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-call/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::str::FromStr;\n\nuse casper_types::ApiError;\n\npub const ARG_OPERATION: &str = \"operation\";\npub const ARG_CONTRACT_HASH: &str = \"contract_hash\";\npub const OP_WRITE: &str = \"write\";\npub const OP_READ: &str = \"read\";\npub const OP_FORGED_UREF_WRITE: &str = \"forged_uref_write\";\npub const OP_INVALID_PUT_DICTIONARY_ITEM_KEY: &str = \"invalid_put_dictionary_item_key\";\npub const OP_INVALID_GET_DICTIONARY_ITEM_KEY: &str = \"invalid_get_dictionary_item_key\";\npub const NEW_DICTIONARY_ITEM_KEY: &str = \"New key\";\npub const NEW_DICTIONARY_VALUE: &str = \"New value\";\npub const ARG_SHARE_UREF_ENTRYPOINT: &str = \"share_uref_entrypoint\";\npub const ARG_FORGED_UREF: &str = \"forged_uref\";\n\n#[repr(u16)]\npub enum Error {\n    InvalidOperation,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\npub enum Operation {\n    Write,\n    Read,\n    ForgedURefWrite,\n    InvalidPutDictionaryItemKey,\n    InvalidGetDictionaryItemKey,\n}\n\nimpl FromStr for Operation {\n    type Err = Error;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        if s == OP_WRITE {\n            Ok(Operation::Write)\n        } else if s == OP_READ {\n            Ok(Operation::Read)\n        } else if s == OP_FORGED_UREF_WRITE {\n            Ok(Operation::ForgedURefWrite)\n        } else if s == OP_INVALID_PUT_DICTIONARY_ITEM_KEY {\n            Ok(Operation::InvalidPutDictionaryItemKey)\n        } else if s == OP_INVALID_GET_DICTIONARY_ITEM_KEY {\n            Ok(Operation::InvalidGetDictionaryItemKey)\n        } else {\n            Err(Error::InvalidOperation)\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-item-key-length/Cargo.toml",
    "content": "[package]\nname = \"dictionary-item-key-length\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[[bin]]\nname = \"dictionary-item-key-check\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-item-key-length/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nconst OVERSIZED_DICTIONARY_ITEM_KEY: &str = \"nZ1a27wa2MYty0KpPcl9WOYAFygPUWSqSTN5hyDi1MlfOk2RmykDdwM4HENeXEIUlnZ1a27wa2MYty0KpPcl9WOYAFygPUWSqSTN5hyDi1MlfOk2RmykDdwM4HENeXEIUl\";\nconst DICTIONARY_NAME: &str = \"dictionary-name\";\nconst DICTIONARY_VALUE: &str = \"dictionary-value\";\nconst DICTIONARY_OP: &str = \"dictionary-operation\";\nconst OP_PUT: &str = \"put\";\nconst OP_GET: &str = \"get\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let dictionary_uref = storage::new_dictionary(DICTIONARY_NAME).unwrap_or_revert();\n    let operation: String = runtime::get_named_arg(DICTIONARY_OP);\n    if operation == OP_GET {\n        let _ = storage::dictionary_get::<String>(dictionary_uref, OVERSIZED_DICTIONARY_ITEM_KEY);\n    } else if operation == OP_PUT {\n        storage::dictionary_put(\n            dictionary_uref,\n            OVERSIZED_DICTIONARY_ITEM_KEY,\n            DICTIONARY_VALUE,\n        );\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-read/Cargo.toml",
    "content": "[package]\nname = \"dictionary-read\"\nversion = \"0.1.0\"\nedition = \"2018\"\n\n[[bin]]\nname = \"dictionary_read\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/dictionary-read/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::{String, ToString};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, Key};\n\nconst DICTIONARY_NAME: &str = \"dictionary-name\";\nconst DICTIONARY_ITEM_KEY: &str = \"dictionary-item-key\";\nconst DICTIONARY_VALUE: &str = \"dictionary-value\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let dictionary_seed_uref = storage::new_dictionary(DICTIONARY_NAME).unwrap_or_revert();\n    storage::dictionary_put(\n        dictionary_seed_uref,\n        DICTIONARY_ITEM_KEY,\n        DICTIONARY_VALUE.to_string(),\n    );\n    let dictionary_address_key =\n        Key::dictionary(dictionary_seed_uref, DICTIONARY_ITEM_KEY.as_bytes());\n    let value_via_read_address: String = storage::dictionary_read(dictionary_address_key)\n        .unwrap_or_revert()\n        .unwrap_or_revert();\n    let value_via_get: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_ITEM_KEY)\n        .unwrap_or_revert()\n        .unwrap_or_revert();\n    if value_via_read_address != *DICTIONARY_VALUE {\n        runtime::revert(ApiError::User(16u16))\n    }\n    if value_via_get != value_via_read_address {\n        runtime::revert(ApiError::User(17u16))\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing/Cargo.toml",
    "content": "[package]\nname = \"do-nothing\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"do_nothing\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::runtime;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let _named_keys = runtime::list_named_keys();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing-stored/Cargo.toml",
    "content": "[package]\nname = \"do-nothing-stored\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"do_nothing_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPoints, Parameters},\n    CLType, EntryPointAccess, EntryPointPayment, EntryPointType, Key,\n};\n\nconst ENTRY_FUNCTION_NAME: &str = \"delegate\";\nconst HASH_KEY_NAME: &str = \"do_nothing_hash\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"do_nothing_package_hash\";\nconst ACCESS_KEY_NAME: &str = \"do_nothing_access\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[no_mangle]\npub extern \"C\" fn delegate() {\n    let _named_keys = runtime::list_named_keys();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_FUNCTION_NAME,\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(PACKAGE_HASH_KEY_NAME.into()),\n        Some(ACCESS_KEY_NAME.into()),\n        None,\n    );\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(HASH_KEY_NAME, Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing-stored-caller/Cargo.toml",
    "content": "[package]\nname = \"do-nothing-stored-caller\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"do_nothing_stored_caller\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing-stored-caller/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::{runtime_args, EntityVersion, PackageHash};\n\nconst ENTRY_FUNCTION_NAME: &str = \"delegate\";\nconst PURSE_NAME_ARG_NAME: &str = \"purse_name\";\nconst ARG_CONTRACT_PACKAGE: &str = \"contract_package\";\nconst ARG_NEW_PURSE_NAME: &str = \"new_purse_name\";\nconst ARG_MAJOR_VERSION: &str = \"major_version\";\nconst ARG_VERSION: &str = \"version\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_package_hash: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE);\n    let new_purse_name: String = runtime::get_named_arg(ARG_NEW_PURSE_NAME);\n    let major_version: u32 = runtime::get_named_arg(ARG_MAJOR_VERSION);\n    let version_number: EntityVersion = runtime::get_named_arg(ARG_VERSION);\n\n    let runtime_args = runtime_args! {\n        PURSE_NAME_ARG_NAME => new_purse_name,\n    };\n\n    runtime::call_package_version(\n        contract_package_hash.into(),\n        Some(major_version),\n        Some(version_number),\n        ENTRY_FUNCTION_NAME,\n        runtime_args,\n    )\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing-stored-upgrader/Cargo.toml",
    "content": "[package]\nname = \"do-nothing-stored-upgrader\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"do_nothing_stored_upgrader\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncreate-purse-01 = { path = \"../create-purse-01\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/do-nothing-stored-upgrader/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{\n    collections::BTreeMap,\n    string::{String, ToString},\n};\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse core::convert::TryInto;\n\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    contracts::NamedKeys,\n    CLType, CLTyped, EntryPointPayment, Key, PackageHash, Parameter, URef,\n};\n\nconst ENTRY_FUNCTION_NAME: &str = \"delegate\";\nconst DO_NOTHING_PACKAGE_HASH_KEY_NAME: &str = \"do_nothing_package_hash\";\nconst DO_NOTHING_ACCESS_KEY_NAME: &str = \"do_nothing_access\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\nconst ARG_PURSE_NAME: &str = \"purse_name\";\n\n#[no_mangle]\npub extern \"C\" fn delegate() {\n    let _named_keys = runtime::list_named_keys();\n    runtime::put_key(\"called_do_nothing_ver_2\", Key::Hash([1u8; 32]));\n    create_purse_01::delegate()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let delegate = EntityEntryPoint::new(\n            ENTRY_FUNCTION_NAME.to_string(),\n            vec![Parameter::new(ARG_PURSE_NAME, String::cl_type())],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(delegate);\n\n        entry_points\n    };\n\n    let do_nothing_package_hash: PackageHash = runtime::get_key(DO_NOTHING_PACKAGE_HASH_KEY_NAME)\n        .unwrap_or_revert()\n        .into_hash_addr()\n        .unwrap_or_revert()\n        .into();\n\n    let _do_nothing_uref: URef = runtime::get_key(DO_NOTHING_ACCESS_KEY_NAME)\n        .unwrap_or_revert()\n        .try_into()\n        .unwrap_or_revert();\n\n    let (contract_hash, contract_version) = storage::add_contract_version(\n        do_nothing_package_hash.into(),\n        entry_points,\n        NamedKeys::new(),\n        BTreeMap::new(),\n    );\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\"end of upgrade\", Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1071-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-1071-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_1071_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1071-regression/src/lib.rs",
    "content": "#![no_std]\n\nuse casper_contract::contract_api::{runtime, storage};\n\n#[no_mangle]\npub extern \"C\" fn new_uref() {\n    let new_uref = storage::new_uref(0);\n    runtime::put_key(&new_uref.to_formatted_string(), new_uref.into())\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1071-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst CONTRACT_HASH_NAME: &str = \"contract\";\n\nconst NEW_UREF: &str = \"new_uref\";\n\n// This import below somehow bypasses linkers ability to verify that ext_ffi's new_uref import has\n// different signature than the new_uref we're defining in a lib.\n#[allow(unused_imports)]\nuse ee_1071_regression::new_uref;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            NEW_UREF,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    let (contract_hash, _contract_version) =\n        storage::new_contract(entry_points, None, None, None, None);\n\n    runtime::put_key(\n        CONTRACT_HASH_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1129-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-1129-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_1129_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1129-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::ToString;\n\nuse casper_contract::contract_api::{runtime, storage, system};\nuse casper_types::{\n    addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst ENTRY_POINT_NAME: &str = \"create_purse\";\nconst CONTRACT_KEY: &str = \"contract\";\nconst ACCESS_KEY: &str = \"access\";\nconst CONTRACT_PACKAGE_KEY: &str = \"contract_package\";\n\n#[no_mangle]\npub extern \"C\" fn create_purse() {\n    // This should exercise common issues with unsafe providers in mint: new_uref, dictionary_put\n    // and put_key.\n    let _purse = system::create_purse();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_POINT_NAME,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n\n    let (contract_hash, _version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(CONTRACT_PACKAGE_KEY.to_string()),\n        Some(ACCESS_KEY.to_string()),\n        None,\n    );\n\n    runtime::put_key(\n        CONTRACT_KEY,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1217-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-1217-regression\"\nversion = \"0.1.0\"\nauthors = [\"Daniel Werner <dan.werner@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_1217_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1217-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\n// casper_contract is required for it's [global_alloc] as well as handlers (such as panic_handler)\nuse casper_contract::contract_api::{runtime, storage, system};\nuse casper_types::{\n    runtime_args, system::auction, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, PublicKey, U512,\n};\n\nconst PACKAGE_NAME: &str = \"call_auction\";\nconst PACKAGE_ACCESS_KEY_NAME: &str = \"call_auction_access\";\n\nconst METHOD_ADD_BID_CONTRACT_NAME: &str = \"add_bid_contract\";\nconst METHOD_ADD_BID_SESSION_NAME: &str = \"add_bid_session\";\nconst METHOD_WITHDRAW_BID_CONTRACT_NAME: &str = \"withdraw_bid_contract\";\nconst METHOD_WITHDRAW_BID_SESSION_NAME: &str = \"withdraw_bid_session\";\nconst METHOD_DELEGATE_CONTRACT_NAME: &str = \"delegate_contract\";\nconst METHOD_DELEGATE_SESSION_NAME: &str = \"delegate_session\";\nconst METHOD_UNDELEGATE_CONTRACT_NAME: &str = \"undelegate_contract\";\nconst METHOD_UNDELEGATE_SESSION_NAME: &str = \"undelegate_session\";\nconst METHOD_ACTIVATE_BID_CONTRACT_NAME: &str = \"activate_bid_contract\";\nconst METHOD_ACTIVATE_BID_SESSION_NAME: &str = \"activate_bid_session\";\n\nfn add_bid() {\n    let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY);\n    let auction = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_AMOUNT => U512::from(2), // smaller amount results in Error::BondTooSmall\n        auction::ARG_DELEGATION_RATE => 42u8,\n    };\n    runtime::call_contract::<U512>(auction, auction::METHOD_ADD_BID, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn add_bid_contract() {\n    add_bid()\n}\n\n#[no_mangle]\npub extern \"C\" fn add_bid_session() {\n    add_bid()\n}\n\npub fn withdraw_bid() {\n    let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY);\n    let auction = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_AMOUNT => U512::one(),\n    };\n    runtime::call_contract::<U512>(auction, auction::METHOD_WITHDRAW_BID, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn withdraw_bid_contract() {\n    withdraw_bid()\n}\n\n#[no_mangle]\npub extern \"C\" fn withdraw_bid_session() {\n    withdraw_bid()\n}\n\nfn activate_bid() {\n    let public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR);\n    let auction = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_VALIDATOR => public_key,\n    };\n    runtime::call_contract::<()>(auction, auction::METHOD_ACTIVATE_BID, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn activate_bid_contract() {\n    activate_bid()\n}\n\n#[no_mangle]\npub extern \"C\" fn activate_bid_session() {\n    activate_bid()\n}\n\n#[no_mangle]\npub extern \"C\" fn delegate() {\n    let delegator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR);\n    let validator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR);\n    let auction = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => delegator_public_key,\n        auction::ARG_VALIDATOR => validator_public_key,\n        auction::ARG_AMOUNT => U512::one(),\n    };\n    runtime::call_contract::<U512>(auction, auction::METHOD_DELEGATE, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn delegate_contract() {\n    delegate()\n}\n\n#[no_mangle]\npub extern \"C\" fn delegate_session() {\n    delegate()\n}\n\n#[no_mangle]\npub extern \"C\" fn undelegate() {\n    let delegator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR);\n    let validator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR);\n    let auction = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => delegator_public_key,\n        auction::ARG_VALIDATOR => validator_public_key,\n        auction::ARG_AMOUNT => U512::one(),\n    };\n    runtime::call_contract::<U512>(auction, auction::METHOD_UNDELEGATE, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn undelegate_contract() {\n    undelegate()\n}\n\n#[no_mangle]\npub extern \"C\" fn undelegate_session() {\n    undelegate()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let add_bid_session_entry_point = EntityEntryPoint::new(\n            METHOD_ADD_BID_SESSION_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let add_bid_contract_entry_point = EntityEntryPoint::new(\n            METHOD_ADD_BID_CONTRACT_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let withdraw_bid_session_entry_point = EntityEntryPoint::new(\n            METHOD_WITHDRAW_BID_SESSION_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let withdraw_bid_contract_entry_point = EntityEntryPoint::new(\n            METHOD_WITHDRAW_BID_CONTRACT_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let delegate_session_entry_point = EntityEntryPoint::new(\n            METHOD_DELEGATE_SESSION_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let delegate_contract_entry_point = EntityEntryPoint::new(\n            METHOD_DELEGATE_CONTRACT_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let undelegate_session_entry_point = EntityEntryPoint::new(\n            METHOD_UNDELEGATE_SESSION_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let undelegate_contract_entry_point = EntityEntryPoint::new(\n            METHOD_UNDELEGATE_CONTRACT_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let activate_bid_session_entry_point = EntityEntryPoint::new(\n            METHOD_ACTIVATE_BID_SESSION_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let activate_bid_contract_entry_point = EntityEntryPoint::new(\n            METHOD_ACTIVATE_BID_CONTRACT_NAME.to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(add_bid_session_entry_point);\n        entry_points.add_entry_point(add_bid_contract_entry_point);\n        entry_points.add_entry_point(withdraw_bid_session_entry_point);\n        entry_points.add_entry_point(withdraw_bid_contract_entry_point);\n        entry_points.add_entry_point(delegate_session_entry_point);\n        entry_points.add_entry_point(delegate_contract_entry_point);\n        entry_points.add_entry_point(undelegate_session_entry_point);\n        entry_points.add_entry_point(undelegate_contract_entry_point);\n        entry_points.add_entry_point(activate_bid_session_entry_point);\n        entry_points.add_entry_point(activate_bid_contract_entry_point);\n        entry_points\n    };\n\n    let (_contract_hash, _contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(PACKAGE_NAME.to_string()),\n        Some(PACKAGE_ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1225-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-1225-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_1225_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-1225-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    runtime_args, system::handle_payment, ApiError, Phase, RuntimeArgs, URef, U512,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[repr(u16)]\nenum Error {\n    InvalidPhase,\n}\n\nimpl From<Error> for ApiError {\n    fn from(e: Error) -> Self {\n        ApiError::User(e as u16)\n    }\n}\n\nfn get_payment_purse() -> URef {\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    )\n}\n\nfn set_refund_purse(new_refund_purse: URef) {\n    let args = runtime_args! {\n        handle_payment::ARG_PURSE => new_refund_purse,\n    };\n\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_SET_REFUND_PURSE,\n        args,\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    if runtime::get_phase() != Phase::Payment {\n        runtime::revert(Error::InvalidPhase);\n    }\n\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    // Attempt to get refund into a payment purse.\n    let payment_purse = get_payment_purse();\n    set_refund_purse(payment_purse);\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-221-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-221-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_221_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-221-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::Key;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let res1 = runtime::get_key(\"nonexistinguref\");\n    assert!(res1.is_none());\n\n    let key = Key::URef(storage::new_uref(()));\n    runtime::put_key(\"nonexistinguref\", key);\n\n    let res2 = runtime::get_key(\"nonexistinguref\");\n\n    assert_eq!(res2, Some(key));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-401-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-401-regression\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_401_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-401-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, AddressableEntityHash, CLType, CLValue, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef,\n};\n\nconst HELLO_EXT: &str = \"hello_ext\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[no_mangle]\npub extern \"C\" fn hello_ext() {\n    let test_string = String::from(\"Hello, world!\");\n    let test_uref: URef = storage::new_uref(test_string);\n    let return_value = CLValue::from_t(test_uref).unwrap_or_revert();\n    runtime::ret(return_value)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            HELLO_EXT,\n            Parameters::new(),\n            CLType::URef,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    let (contract_hash, contract_version) =\n        storage::new_contract(entry_points, None, None, None, None);\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        HELLO_EXT,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-401-regression-call/Cargo.toml",
    "content": "[package]\nname = \"ee-401-regression-call\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_401_regression_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-401-regression-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::ToString;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{AddressableEntityHash, ApiError, RuntimeArgs, URef};\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_hash: AddressableEntityHash = runtime::get_key(\"hello_ext\")\n        .unwrap_or_revert_with(ApiError::GetKey)\n        .into_entity_hash_addr()\n        .unwrap_or_revert()\n        .into();\n\n    let result: URef =\n        runtime::call_contract(contract_hash.into(), \"hello_ext\", RuntimeArgs::default());\n\n    let value = storage::read(result);\n\n    assert_eq!(Ok(Some(\"Hello, world!\".to_string())), value);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-441-rng-state/Cargo.toml",
    "content": "[package]\nname = \"ee-441-rng-state\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_441_rng_state\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-441-rng-state/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, CLType, CLValue, EntityEntryPoint, EntryPointAccess,\n    EntryPointPayment, EntryPointType, EntryPoints, Key, RuntimeArgs, URef, U512,\n};\n\nconst ARG_FLAG: &str = \"flag\";\n\n#[no_mangle]\npub extern \"C\" fn do_nothing() {\n    // Doesn't advance RNG of the runtime\n    runtime::ret(CLValue::from_t(\"Hello, world!\").unwrap_or_revert())\n}\n\n#[no_mangle]\npub extern \"C\" fn do_something() {\n    // Advances RNG of the runtime\n    let test_string = String::from(\"Hello, world!\");\n\n    let test_uref: URef = storage::new_uref(test_string);\n    let return_value = CLValue::from_t(test_uref).unwrap_or_revert();\n    runtime::ret(return_value)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let flag: String = runtime::get_named_arg(ARG_FLAG);\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let do_nothing_entry_point = EntityEntryPoint::new(\n            \"do_nothing\",\n            Parameters::default(),\n            CLType::String,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(do_nothing_entry_point);\n\n        let do_something_entry_point = EntityEntryPoint::new(\n            \"do_something\",\n            Parameters::default(),\n            CLType::URef,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(do_something_entry_point);\n\n        entry_points\n    };\n    let (contract_hash, _contract_version) =\n        storage::new_contract(entry_points, None, None, None, None);\n\n    if flag == \"pass1\" {\n        // Two calls should forward the internal RNG. This pass is a baseline.\n        let uref1: URef = storage::new_uref(U512::from(0));\n        let uref2: URef = storage::new_uref(U512::from(1));\n        runtime::put_key(\"uref1\", Key::URef(uref1));\n        runtime::put_key(\"uref2\", Key::URef(uref2));\n    } else if flag == \"pass2\" {\n        let uref1: URef = storage::new_uref(U512::from(0));\n        runtime::put_key(\"uref1\", Key::URef(uref1));\n        // do_nothing doesn't do anything. It SHOULD not forward the internal RNG.\n        let result: String =\n            runtime::call_contract(contract_hash, \"do_nothing\", RuntimeArgs::default());\n        assert_eq!(result, \"Hello, world!\");\n        let uref2: URef = storage::new_uref(U512::from(1));\n        runtime::put_key(\"uref2\", Key::URef(uref2));\n    } else if flag == \"pass3\" {\n        let uref1: URef = storage::new_uref(U512::from(0));\n        runtime::put_key(\"uref1\", Key::URef(uref1));\n        // do_something returns a new uref, and it should forward the internal RNG.\n        let uref2: URef =\n            runtime::call_contract(contract_hash, \"do_something\", RuntimeArgs::default());\n        runtime::put_key(\"uref2\", Key::URef(uref2));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-460-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-460-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_460_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-460-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{account::AccountHash, system::mint, ApiError, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let account_hash = AccountHash::new([42; 32]);\n    let result = system::transfer_to_account(account_hash, amount, None);\n    let expected_error: ApiError = mint::Error::InsufficientFunds.into();\n    assert_eq!(result, Err(expected_error))\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-532-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-532-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_532_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-532-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n// Required to bring `#[panic_handler]` from `contract::handlers` into scope.\n#![allow(unused_imports, clippy::single_component_path_imports)]\nuse casper_contract;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Does nothing\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-536-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-536-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_536_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-536-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::{\n        AccountHash, ActionType, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure, Weight,\n    },\n    ApiError,\n};\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Starts with deployment=1, key_management=1\n    let key_1 = AccountHash::new([42; 32]);\n    let key_2 = AccountHash::new([43; 32]);\n\n    // Total keys weight = 11 (identity + new key's weight)\n    account::add_associated_key(key_1, Weight::new(10)).unwrap_or_revert();\n    account::add_associated_key(key_2, Weight::new(11)).unwrap_or_revert();\n\n    account::set_action_threshold(ActionType::KeyManagement, Weight::new(13)).unwrap_or_revert();\n    account::set_action_threshold(ActionType::Deployment, Weight::new(10)).unwrap_or_revert();\n\n    match account::remove_associated_key(key_2) {\n        Err(RemoveKeyFailure::ThresholdViolation) => {\n            // Shouldn't be able to remove key because key weight == 11 and\n            // removing would violate the constraint\n        }\n        Err(_) => runtime::revert(ApiError::User(300)),\n        Ok(_) => runtime::revert(ApiError::User(301)),\n    }\n\n    match account::set_action_threshold(ActionType::KeyManagement, Weight::new(255)) {\n        Err(SetThresholdFailure::InsufficientTotalWeight) => {\n            // Changing key management threshold to this value would lock down\n            // account for future operations\n        }\n        Err(_) => runtime::revert(ApiError::User(400)),\n        Ok(_) => runtime::revert(ApiError::User(401)),\n    }\n    // Key management threshold is 11, so changing threshold of key from 10 to 11\n    // would violate\n    match account::update_associated_key(key_2, Weight::new(1)) {\n        Err(UpdateKeyFailure::ThresholdViolation) => {\n            // Changing it would mean the total weight would be identity(1) +\n            // key_1(10) + key_2(1) < key_mgmt(13)\n        }\n        Err(_) => runtime::revert(ApiError::User(500)),\n        Ok(_) => runtime::revert(ApiError::User(501)),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-539-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-539-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_539_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-539-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::account::{AccountHash, ActionType, Weight};\n\nconst ARG_KEY_MANAGEMENT_THRESHOLD: &str = \"key_management_threshold\";\nconst ARG_DEPLOYMENT_THRESHOLD: &str = \"deployment_threshold\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    account::add_associated_key(AccountHash::new([123; 32]), Weight::new(254)).unwrap_or_revert();\n    let key_management_threshold: Weight = runtime::get_named_arg(ARG_KEY_MANAGEMENT_THRESHOLD);\n    let deployment_threshold: Weight = runtime::get_named_arg(ARG_DEPLOYMENT_THRESHOLD);\n\n    account::set_action_threshold(ActionType::KeyManagement, key_management_threshold)\n        .unwrap_or_revert();\n    account::set_action_threshold(ActionType::Deployment, deployment_threshold).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-549-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-549-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_549_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-549-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::runtime_args;\n\nconst SET_REFUND_PURSE: &str = \"set_refund_purse\";\nconst ARG_PURSE: &str = \"purse\";\n\nfn malicious_revenue_stealing_contract() {\n    let contract_hash = system::get_handle_payment();\n\n    let args = runtime_args! {\n        ARG_PURSE => system::create_purse(),\n    };\n\n    runtime::call_contract::<()>(contract_hash, SET_REFUND_PURSE, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    malicious_revenue_stealing_contract()\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-550-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-550-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_550_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-550-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::{AccountHash, ActionType, Weight},\n    ApiError,\n};\n\n#[repr(u16)]\nenum Error {\n    AddKey1 = 0,\n    AddKey2 = 1,\n    SetActionThreshold = 2,\n    RemoveKey = 3,\n    UpdateKey = 4,\n    UnknownPass = 5,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\nconst KEY_1_ADDR: [u8; 32] = [100; 32];\nconst KEY_2_ADDR: [u8; 32] = [101; 32];\n\nconst ARG_PASS: &str = \"pass\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let pass: String = runtime::get_named_arg(ARG_PASS);\n    match pass.as_str() {\n        \"init_remove\" => {\n            account::add_associated_key(AccountHash::new(KEY_1_ADDR), Weight::new(2))\n                .unwrap_or_revert_with(Error::AddKey1);\n            account::add_associated_key(AccountHash::new(KEY_2_ADDR), Weight::new(255))\n                .unwrap_or_revert_with(Error::AddKey2);\n            account::set_action_threshold(ActionType::KeyManagement, Weight::new(254))\n                .unwrap_or_revert_with(Error::SetActionThreshold);\n        }\n        \"test_remove\" => {\n            // Deployed with two keys of weights 2 and 255 (total saturates at 255) to satisfy new\n            // threshold\n            account::remove_associated_key(AccountHash::new(KEY_1_ADDR))\n                .unwrap_or_revert_with(Error::RemoveKey);\n        }\n\n        \"init_update\" => {\n            account::add_associated_key(AccountHash::new(KEY_1_ADDR), Weight::new(3))\n                .unwrap_or_revert_with(Error::AddKey1);\n            account::add_associated_key(AccountHash::new(KEY_2_ADDR), Weight::new(255))\n                .unwrap_or_revert_with(Error::AddKey2);\n            account::set_action_threshold(ActionType::KeyManagement, Weight::new(254))\n                .unwrap_or_revert_with(Error::SetActionThreshold);\n        }\n        \"test_update\" => {\n            // Deployed with two keys of weights 3 and 255 (total saturates at 255) to satisfy new\n            // threshold\n            account::update_associated_key(AccountHash::new(KEY_1_ADDR), Weight::new(1))\n                .unwrap_or_revert_with(Error::UpdateKey);\n        }\n        _ => {\n            runtime::revert(Error::UnknownPass);\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-572-regression-create/Cargo.toml",
    "content": "[package]\nname = \"ee-572-regression-create\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_572_regression_create\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-572-regression-create/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse core::convert::Into;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, AccessRights, AddressableEntityHash, CLType, CLValue,\n    EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef,\n};\n\nconst DATA: &str = \"data\";\nconst CONTRACT_NAME: &str = \"create\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[no_mangle]\npub extern \"C\" fn create() {\n    let reference: URef = storage::new_uref(DATA);\n    let read_only_reference: URef = URef::new(reference.addr(), AccessRights::READ);\n    let return_value = CLValue::from_t(read_only_reference).unwrap_or_revert();\n    runtime::ret(return_value)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            \"create\",\n            Parameters::default(),\n            CLType::URef,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    let (contract_hash, contract_version) =\n        storage::new_contract(entry_points, None, None, None, None);\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        CONTRACT_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-572-regression-escalate/Cargo.toml",
    "content": "[package]\nname = \"ee-572-regression-escalate\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_572_regression_escalate\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-572-regression-escalate/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{AccessRights, AddressableEntityHash, RuntimeArgs, URef};\n\nconst REPLACEMENT_DATA: &str = \"bawitdaba\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n    let reference: URef =\n        runtime::call_contract(contract_hash.into(), \"create\", RuntimeArgs::default());\n    let forged_reference: URef = URef::new(reference.addr(), AccessRights::READ_ADD_WRITE);\n    storage::write(forged_reference, REPLACEMENT_DATA)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-584-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-584-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_584_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-584-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::ApiError;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let _ = storage::new_uref(String::from(\"Hello, World!\"));\n    runtime::revert(ApiError::User(999))\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-597-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-597-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_597_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-597-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{\n    contracts::ContractHash,\n    runtime_args,\n    system::auction::{self, DelegationRate},\n    PublicKey, SecretKey, U512,\n};\n\nconst DELEGATION_RATE: DelegationRate = 42;\n\nfn bond(contract_hash: ContractHash, bond_amount: U512) {\n    let valid_secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    let valid_public_key = PublicKey::from(&valid_secret_key);\n\n    let runtime_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => valid_public_key,\n        auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        auction::ARG_AMOUNT => bond_amount,\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_ADD_BID, runtime_args);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // bond amount == 0 should fail\n    bond(system::get_auction(), U512::from(0));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-598-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-598-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_598_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-598-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse auction::DelegationRate;\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{contracts::ContractHash, runtime_args, system::auction, PublicKey, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\nconst DELEGATION_RATE: DelegationRate = 42;\n\nfn add_bid(contract_hash: ContractHash, public_key: PublicKey, bond_amount: U512) {\n    let runtime_args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_DELEGATION_RATE => DELEGATION_RATE,\n        auction::ARG_AMOUNT => bond_amount,\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_ADD_BID, runtime_args);\n}\n\nfn withdraw_bid(contract_hash: ContractHash, public_key: PublicKey, unbond_amount: U512) -> U512 {\n    let args = runtime_args! {\n        auction::ARG_AMOUNT => unbond_amount,\n        auction::ARG_PUBLIC_KEY => public_key,\n    };\n    runtime::call_contract(contract_hash, auction::METHOD_WITHDRAW_BID, args)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let public_key: PublicKey = runtime::get_named_arg(ARG_PUBLIC_KEY);\n    // unbond attempt for more than is staked should fail\n    let contract_hash = system::get_auction();\n    add_bid(contract_hash, public_key.clone(), amount);\n    withdraw_bid(contract_hash, public_key, amount + 1);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-599-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-599-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_599_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-599-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n#![allow(unused_imports)]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::String};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash, addressable_entity::Parameters, contracts::NamedKeys,\n    AddressableEntityHash, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, RuntimeArgs, URef, U512,\n};\n\nconst DONATION_AMOUNT: u64 = 1;\n// Different name just to make sure any routine that deals with named keys coming from different\n// sources wouldn't overlap (if ever that's possible)\nconst DONATION_PURSE_COPY: &str = \"donation_purse_copy\";\nconst DONATION_PURSE: &str = \"donation_purse\";\nconst MAINTAINER: &str = \"maintainer\";\nconst METHOD_CALL: &str = \"call\";\nconst METHOD_INSTALL: &str = \"install\";\nconst TRANSFER_FROM_PURSE_TO_ACCOUNT: &str = \"transfer_from_purse_to_account_ext\";\nconst TRANSFER_FROM_PURSE_TO_PURSE: &str = \"transfer_from_purse_to_purse_ext\";\nconst TRANSFER_FUNDS_KEY: &str = \"transfer_funds\";\nconst TRANSFER_TO_ACCOUNT: &str = \"transfer_to_account_ext\";\n\nconst ARG_METHOD: &str = \"method\";\nconst ARG_CONTRACTKEY: &str = \"contract_key\";\nconst ARG_SUBCONTRACTMETHODFWD: &str = \"sub_contract_method_fwd\";\n\n#[repr(u16)]\nenum ContractError {\n    InvalidDelegateMethod = 0,\n}\n\nimpl From<ContractError> for ApiError {\n    fn from(error: ContractError) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\nfn get_maintainer_account_hash() -> Result<AccountHash, ApiError> {\n    // Obtain maintainer address from the contract's named keys\n    let maintainer_key = runtime::get_key(MAINTAINER).ok_or(ApiError::GetKey)?;\n    maintainer_key\n        .into_account()\n        .ok_or(ApiError::UnexpectedKeyVariant)\n}\n\nfn get_donation_purse() -> Result<URef, ApiError> {\n    let donation_key = runtime::get_key(DONATION_PURSE).ok_or(ApiError::GetKey)?;\n    donation_key\n        .into_uref()\n        .ok_or(ApiError::UnexpectedKeyVariant)\n}\n\n#[no_mangle]\npub extern \"C\" fn transfer_from_purse_to_purse_ext() {\n    // Donation box is the purse funds will be transferred into\n    let donation_purse = get_donation_purse().unwrap_or_revert();\n\n    let main_purse = account::get_main_purse();\n\n    system::transfer_from_purse_to_purse(\n        main_purse,\n        donation_purse,\n        U512::from(DONATION_AMOUNT),\n        None,\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn get_main_purse_ext() {}\n\n#[no_mangle]\npub extern \"C\" fn transfer_from_purse_to_account_ext() {\n    let main_purse = account::get_main_purse();\n    // This is the address of account which installed the contract\n    let maintainer_account_hash = get_maintainer_account_hash().unwrap_or_revert();\n    system::transfer_from_purse_to_account(\n        main_purse,\n        maintainer_account_hash,\n        U512::from(DONATION_AMOUNT),\n        None,\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn transfer_to_account_ext() {\n    // This is the address of account which installed the contract\n    let maintainer_account_hash = get_maintainer_account_hash().unwrap_or_revert();\n    system::transfer_to_account(maintainer_account_hash, U512::from(DONATION_AMOUNT), None)\n        .unwrap_or_revert();\n    let _main_purse = account::get_main_purse();\n}\n\n/// Registers a function and saves it in callers named keys\nfn delegate() -> Result<(), ApiError> {\n    let method: String = runtime::get_named_arg(ARG_METHOD);\n    match method.as_str() {\n        METHOD_INSTALL => {\n            // Create a purse that should be known to the contract regardless of the\n            // calling context still owned by the account that deploys the contract\n            let purse = system::create_purse();\n            let maintainer = runtime::get_caller();\n            // Keys below will make it possible to use within the called contract\n            let known_keys: NamedKeys = {\n                let mut keys = NamedKeys::new();\n                // \"donation_purse\" is the purse owner of the contract can transfer funds from\n                // callers\n                keys.insert(DONATION_PURSE.into(), purse.into());\n                // \"maintainer\" is the person who installed this contract\n                keys.insert(MAINTAINER.into(), Key::Account(maintainer));\n                keys\n            };\n            // Install the contract with associated owner-related keys\n            // let contract_ref = storage::store_function_at_hash(TRANSFER_FUNDS_EXT, known_keys);\n\n            let entry_points = {\n                let mut entry_points = EntryPoints::new();\n\n                let entry_point_1 = EntityEntryPoint::new(\n                    TRANSFER_FROM_PURSE_TO_ACCOUNT,\n                    Parameters::default(),\n                    CLType::Unit,\n                    EntryPointAccess::Public,\n                    EntryPointType::Called,\n                    EntryPointPayment::Caller,\n                );\n\n                entry_points.add_entry_point(entry_point_1);\n\n                let entry_point_2 = EntityEntryPoint::new(\n                    TRANSFER_TO_ACCOUNT,\n                    Parameters::default(),\n                    CLType::Unit,\n                    EntryPointAccess::Public,\n                    EntryPointType::Called,\n                    EntryPointPayment::Caller,\n                );\n\n                entry_points.add_entry_point(entry_point_2);\n\n                let entry_point_3 = EntityEntryPoint::new(\n                    TRANSFER_TO_ACCOUNT,\n                    Parameters::default(),\n                    CLType::Unit,\n                    EntryPointAccess::Public,\n                    EntryPointType::Called,\n                    EntryPointPayment::Caller,\n                );\n\n                entry_points.add_entry_point(entry_point_3);\n\n                let entry_point_4 = EntityEntryPoint::new(\n                    TRANSFER_FROM_PURSE_TO_PURSE,\n                    Parameters::default(),\n                    CLType::Unit,\n                    EntryPointAccess::Public,\n                    EntryPointType::Called,\n                    EntryPointPayment::Caller,\n                );\n\n                entry_points.add_entry_point(entry_point_4);\n\n                entry_points\n            };\n\n            let (contract_hash, _contract_version) =\n                storage::new_contract(entry_points, Some(known_keys), None, None, None);\n            runtime::put_key(\n                TRANSFER_FUNDS_KEY,\n                Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n            );\n            // For easy access in outside world here `donation` purse is also attached\n            // to the account\n            runtime::put_key(DONATION_PURSE_COPY, purse.into());\n        }\n        METHOD_CALL => {\n            // This comes from outside i.e. after deploying the contract, this key is queried,\n            // and then passed into the call\n            let contract_key: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACTKEY);\n\n            // This is a method that's gets forwarded into the sub contract\n            let subcontract_method: String = runtime::get_named_arg(ARG_SUBCONTRACTMETHODFWD);\n            runtime::call_contract::<()>(\n                contract_key.into(),\n                &subcontract_method,\n                RuntimeArgs::default(),\n            );\n        }\n        _ => return Err(ContractError::InvalidDelegateMethod.into()),\n    }\n    Ok(())\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    delegate().unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-601-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-601-regression\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_601_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-601-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::{String, ToString};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, Phase, RuntimeArgs, URef, U512};\n\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\nconst NEW_UREF_RESULT_UREF_NAME: &str = \"new_uref_result\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[repr(u16)]\nenum Error {\n    InvalidPhase = 0,\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let phase = runtime::get_phase();\n    if phase == Phase::Payment {\n        let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n        let payment_purse: URef = runtime::call_contract(\n            system::get_handle_payment(),\n            GET_PAYMENT_PURSE,\n            RuntimeArgs::default(),\n        );\n\n        system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None)\n            .unwrap_or_revert()\n    }\n\n    let value: Option<&str> = {\n        match phase {\n            Phase::Payment => Some(\"payment\"),\n            Phase::Session => Some(\"session\"),\n            _ => None,\n        }\n    };\n    let value = value.unwrap_or_revert_with(ApiError::User(Error::InvalidPhase as u16));\n    let result_key = storage::new_uref(value.to_string()).into();\n    let mut uref_name: String = NEW_UREF_RESULT_UREF_NAME.to_string();\n    uref_name.push('-');\n    uref_name.push_str(value);\n    runtime::put_key(&uref_name, result_key);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-771-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-771-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_771_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-771-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::ToString;\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    addressable_entity::Parameters,\n    contracts::{ContractHash, ContractVersion},\n    AddressableEntityHash, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, NamedKeys, RuntimeArgs,\n};\n\nconst ENTRY_POINT_NAME: &str = \"contract_ext\";\nconst CONTRACT_KEY: &str = \"contract\";\n\n#[no_mangle]\npub extern \"C\" fn contract_ext() {\n    match runtime::get_key(CONTRACT_KEY) {\n        Some(contract_key) => {\n            // Calls a stored contract if exists.\n            runtime::call_contract(\n                contract_key\n                    .into_entity_hash_addr()\n                    .expect(\"should be a hash\")\n                    .into(),\n                \"contract_ext\",\n                RuntimeArgs::default(),\n            )\n        }\n        None => {\n            // If given key doesn't exist it's the tail call, and an error is triggered.\n            let entry_points = {\n                let mut entry_points = EntryPoints::new();\n\n                let entry_point = EntityEntryPoint::new(\n                    \"functiondoesnotexist\",\n                    Parameters::default(),\n                    CLType::Unit,\n                    EntryPointAccess::Public,\n                    EntryPointType::Called,\n                    EntryPointPayment::Caller,\n                );\n\n                entry_points.add_entry_point(entry_point);\n\n                entry_points\n            };\n            storage::new_contract(entry_points, None, None, None, None);\n        }\n    }\n}\n\nfn store(named_keys: NamedKeys) -> (ContractHash, ContractVersion) {\n    // extern \"C\" fn call(named_keys: NamedKeys) {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_POINT_NAME,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n    storage::new_contract(entry_points, Some(named_keys), None, None, None)\n}\n\nfn install() -> ContractHash {\n    let (contract_hash, _contract_version) = store(NamedKeys::new());\n\n    let mut keys = NamedKeys::new();\n    keys.insert(\n        CONTRACT_KEY.to_string(),\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n    let (contract_hash, _contract_version) = store(keys);\n\n    let mut keys_2 = NamedKeys::new();\n    keys_2.insert(\n        CONTRACT_KEY.to_string(),\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n    let (contract_hash, _contract_version) = store(keys_2);\n\n    runtime::put_key(\n        CONTRACT_KEY,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n\n    contract_hash\n}\n\nfn dispatch(contract_hash: ContractHash) {\n    runtime::call_contract(contract_hash, \"contract_ext\", RuntimeArgs::default())\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_key = install();\n    dispatch(contract_key)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-966-regression/Cargo.toml",
    "content": "[package]\nname = \"ee-966-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ee_966_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n"
  },
  {
    "path": "smart_contracts/contracts/test/ee-966-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n#![allow(internal_features)]\n#![feature(lang_items)]\n\nextern crate core;\n\n#[cfg(target_arch = \"wasm32\")]\nuse core::arch::wasm32;\n\nconst MAX_MEMORY_PAGES: usize = 64;\nconst GROW_MARGIN: usize = 2;\n\nmod internal_ffi {\n    extern \"C\" {\n        pub fn casper_revert(status: u32) -> !;\n    }\n}\n\n#[repr(u32)]\npub enum ApiError {\n    OutOfMemory = 20,\n    Unhandled = 31,\n}\n\nfn revert(value: ApiError) -> ! {\n    unsafe {\n        internal_ffi::casper_revert(value as u32);\n    }\n}\n\n#[cfg(target_arch = \"wasm32\")]\nconst DEFAULT_MEMORY_INDEX: u32 = 0; // currently wasm spec supports only single memory\n\n#[cfg(target_arch = \"wasm32\")]\npub fn memory_size() -> usize {\n    wasm32::memory_size(DEFAULT_MEMORY_INDEX)\n}\n\n#[cfg(not(target_arch = \"wasm32\"))]\npub fn memory_size() -> usize {\n    revert(ApiError::Unhandled)\n}\n\n#[cfg(target_arch = \"wasm32\")]\npub fn memory_grow(new_pages: usize) {\n    let ptr = wasm32::memory_grow(DEFAULT_MEMORY_INDEX, new_pages);\n\n    if ptr == usize::MAX {\n        revert(ApiError::OutOfMemory);\n    }\n}\n\n#[cfg(not(target_arch = \"wasm32\"))]\npub fn memory_grow(_: usize) {\n    revert(ApiError::Unhandled)\n}\n\n#[panic_handler]\npub fn panic(_info: &::core::panic::PanicInfo) -> ! {\n    revert(ApiError::OutOfMemory)\n}\n\n#[lang = \"eh_personality\"]\nextern \"C\" fn eh_personality() {}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let initial_memory_pages = memory_size();\n\n    // Grow memory into exactly MAX_MEMORY_PAGES - GROW_MARGIN\n    memory_grow(MAX_MEMORY_PAGES - initial_memory_pages - GROW_MARGIN);\n    assert_eq!(memory_size(), MAX_MEMORY_PAGES - GROW_MARGIN);\n\n    // Now we are occupying exactly MAX_MEMORY_PAGES\n    memory_grow(GROW_MARGIN);\n    assert_eq!(memory_size(), MAX_MEMORY_PAGES);\n\n    // This will fail\n    memory_grow(1);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/endless-loop/Cargo.toml",
    "content": "[package]\nname = \"endless-loop\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"endless_loop\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/endless-loop/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse casper_contract::contract_api::{account, storage};\nuse casper_types::bytesrepr::Bytes;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let uref = storage::new_uref(());\n    loop {\n        let _ = account::get_main_purse();\n        let data: Bytes = vec![0u8; 4096].into();\n        storage::write(uref, data);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml",
    "content": "[package]\nname = \"endless-loop-with-effects\"\nversion = \"0.1.0\"\nauthors = [\"Alex Sardan <alexandru@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"endless_loop_with_effects\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{account, runtime, storage};\nuse casper_types::Key;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let mut data: u32 = 1;\n    let uref = storage::new_uref(data);\n    runtime::put_key(\"new_key\", Key::from(uref));\n    loop {\n        let _ = account::get_main_purse();\n        data += 1;\n        storage::write(uref, data);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/expensive-calculation/Cargo.toml",
    "content": "[package]\nname = \"expensive-calculation\"\nversion = \"0.1.0\"\nauthors = [\"Bartłomiej Kamiński <bart@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"expensive_calculation\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/expensive-calculation/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst ENTRY_FUNCTION_NAME: &str = \"calculate\";\n\n#[no_mangle]\npub extern \"C\" fn calculate() -> u64 {\n    let large_prime: u64 = 0xffff_fffb;\n\n    let mut result: u64 = 42;\n    // calculate 42^4242 mod large_prime\n    for _ in 1..4242 {\n        result *= 42;\n        result %= large_prime;\n    }\n\n    result\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_FUNCTION_NAME,\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    };\n\n    let (contract_hash, contract_version) =\n        storage::new_contract(entry_points, None, None, None, None);\n    runtime::put_key(\n        \"contract_version\",\n        storage::new_uref(contract_version).into(),\n    );\n    runtime::put_key(\n        \"expensive-calculation\",\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/finalize-payment/Cargo.toml",
    "content": "[package]\nname = \"finalize-payment\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"finalize_payment\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/finalize-payment/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, contracts::ContractHash, runtime_args, URef, U512};\n\npub const ARG_AMOUNT: &str = \"amount\";\npub const ARG_AMOUNT_SPENT: &str = \"amount_spent\";\npub const ARG_REFUND_FLAG: &str = \"refund\";\npub const ARG_PURSE: &str = \"purse\";\npub const ARG_ACCOUNT_KEY: &str = \"account\";\npub const ARG_PURSE_NAME: &str = \"purse_name\";\n\nfn set_refund_purse(contract_hash: ContractHash, purse: URef) {\n    runtime::call_contract(\n        contract_hash,\n        \"set_refund_purse\",\n        runtime_args! {\n            ARG_PURSE => purse,\n        },\n    )\n}\n\nfn get_payment_purse(contract_hash: ContractHash) -> URef {\n    runtime::call_contract(contract_hash, \"get_payment_purse\", runtime_args! {})\n}\n\nfn submit_payment(contract_hash: ContractHash, amount: U512) {\n    let payment_purse = get_payment_purse(contract_hash);\n    let main_purse = account::get_main_purse();\n    system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert()\n}\n\nfn finalize_payment(contract_hash: ContractHash, amount_spent: U512, account: AccountHash) {\n    runtime::call_contract(\n        contract_hash,\n        \"finalize_payment\",\n        runtime_args! {\n            ARG_AMOUNT => amount_spent,\n            ARG_ACCOUNT_KEY => account,\n        },\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_hash = system::get_handle_payment();\n\n    let payment_amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let refund_purse_flag: u8 = runtime::get_named_arg(ARG_REFUND_FLAG);\n    let maybe_amount_spent: Option<U512> = runtime::get_named_arg(ARG_AMOUNT_SPENT);\n    let maybe_account: Option<AccountHash> = runtime::get_named_arg(ARG_ACCOUNT_KEY);\n    let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME);\n\n    submit_payment(contract_hash, payment_amount);\n\n    if refund_purse_flag != 0 {\n        let refund_purse = {\n            let stored_purse_key = runtime::get_key(&purse_name).unwrap_or_revert();\n            stored_purse_key.into_uref().unwrap_or_revert()\n        };\n        set_refund_purse(contract_hash, refund_purse);\n    }\n\n    if let (Some(amount_spent), Some(account)) = (maybe_amount_spent, maybe_account) {\n        finalize_payment(contract_hash, amount_spent, account);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/generic-hash/Cargo.toml",
    "content": "[package]\nname = \"generic-hash\"\nversion = \"0.1.0\"\nauthors = [\"Igor Bunar <igor@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"generic_hash\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/generic-hash/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{cryptography, runtime};\nuse casper_types::crypto::HashAlgorithm;\n\nconst ARG_ALGORITHM: &str = \"algorithm\";\nconst ARG_DATA: &str = \"data\";\nconst ARG_EXPECTED: &str = \"expected\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let data: String = runtime::get_named_arg(ARG_DATA);\n    let expected: [u8; 32] = runtime::get_named_arg(ARG_EXPECTED);\n    let algorithm_repr: u8 = runtime::get_named_arg(ARG_ALGORITHM);\n\n    let algorithm = HashAlgorithm::try_from(algorithm_repr).expect(\"Invalid enum repr\");\n    let hash = cryptography::generic_hash(data, algorithm);\n\n    assert_eq!(hash, expected, \"Hash mismatch\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-arg/Cargo.toml",
    "content": "[package]\nname = \"get-arg\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_arg\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-arg/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::U512;\n\nconst ARG_VALUE0: &str = \"value0\";\nconst ARG_VALUE1: &str = \"value1\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let value0: String = runtime::get_named_arg(ARG_VALUE0);\n    assert_eq!(value0, \"Hello, world!\");\n\n    let value1: U512 = runtime::get_named_arg(ARG_VALUE1);\n    assert_eq!(value1, U512::from(42));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-blockinfo/Cargo.toml",
    "content": "[package]\nname = \"get-blockinfo\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_blockinfo\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-blockinfo/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, runtime::revert},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, FromBytes},\n    ApiError, BlockTime, Digest, ProtocolVersion,\n};\n\nconst ARG_FIELD_IDX: &str = \"field_idx\";\nconst FIELD_IDX_BLOCK_TIME: u8 = 0;\nconst FIELD_IDX_BLOCK_HEIGHT: u8 = 1;\nconst FIELD_IDX_PARENT_BLOCK_HASH: u8 = 2;\nconst FIELD_IDX_STATE_HASH: u8 = 3;\nconst FIELD_IDX_PROTOCOL_VERSION: u8 = 4;\nconst FIELD_IDX_ADDRESSABLE_ENTITY: u8 = 5;\n\nconst CURRENT_UBOUND: u8 = FIELD_IDX_ADDRESSABLE_ENTITY;\nconst ARG_KNOWN_BLOCK_TIME: &str = \"known_block_time\";\nconst ARG_KNOWN_BLOCK_HEIGHT: &str = \"known_block_height\";\nconst ARG_KNOWN_BLOCK_PARENT_HASH: &str = \"known_block_parent_hash\";\nconst ARG_KNOWN_STATE_HASH: &str = \"known_state_hash\";\nconst ARG_KNOWN_PROTOCOL_VERSION: &str = \"known_protocol_version\";\nconst ARG_KNOWN_ADDRESSABLE_ENTITY: &str = \"known_addressable_entity\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let field_idx: u8 = runtime::get_named_arg(ARG_FIELD_IDX);\n    if field_idx > CURRENT_UBOUND {\n        revert(ApiError::Unhandled);\n    }\n    if field_idx == FIELD_IDX_BLOCK_TIME {\n        let expected = BlockTime::new(runtime::get_named_arg(ARG_KNOWN_BLOCK_TIME));\n        let actual: BlockTime = runtime::get_blocktime();\n        if expected != actual {\n            revert(ApiError::User(field_idx as u16));\n        }\n    }\n    if field_idx == FIELD_IDX_BLOCK_HEIGHT {\n        let expected: u64 = runtime::get_named_arg(ARG_KNOWN_BLOCK_HEIGHT);\n        let actual = runtime::get_block_height();\n        if expected != actual {\n            revert(ApiError::User(field_idx as u16));\n        }\n    }\n    if field_idx == FIELD_IDX_PARENT_BLOCK_HASH {\n        let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_BLOCK_PARENT_HASH);\n        let (expected, _rem) = Digest::from_bytes(bytes.inner_bytes())\n            .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 1));\n        let actual = runtime::get_parent_block_hash();\n        if expected != actual {\n            revert(ApiError::User(field_idx as u16));\n        }\n    }\n    if field_idx == FIELD_IDX_STATE_HASH {\n        let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_STATE_HASH);\n        let (expected, _rem) = Digest::from_bytes(bytes.inner_bytes())\n            .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 2));\n        let actual = runtime::get_state_hash();\n        if expected != actual {\n            revert(ApiError::User(field_idx as u16));\n        }\n    }\n    if field_idx == FIELD_IDX_PROTOCOL_VERSION {\n        let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_PROTOCOL_VERSION);\n        let (expected, _rem) = ProtocolVersion::from_bytes(bytes.inner_bytes())\n            .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 3));\n        let actual = runtime::get_protocol_version();\n        if expected != actual {\n            revert(ApiError::User(field_idx as u16));\n        }\n    }\n    if field_idx == FIELD_IDX_ADDRESSABLE_ENTITY {\n        let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_ADDRESSABLE_ENTITY);\n        let (expected, _rem) = bool::from_bytes(bytes.inner_bytes())\n            .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 4));\n        let actual = runtime::get_addressable_entity();\n        if expected != actual {\n            revert(ApiError::User(field_idx as u16));\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-blocktime/Cargo.toml",
    "content": "[package]\nname = \"get-blocktime\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>, Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_blocktime\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-blocktime/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::BlockTime;\n\nconst ARG_KNOWN_BLOCK_TIME: &str = \"known_block_time\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let known_block_time: u64 = runtime::get_named_arg(ARG_KNOWN_BLOCK_TIME);\n    let actual_block_time: BlockTime = runtime::get_blocktime();\n\n    assert_eq!(\n        actual_block_time,\n        BlockTime::new(known_block_time),\n        \"actual block time not known block time\"\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/Cargo.toml",
    "content": "[package]\nname = \"get-call-stack-call-recursive-subcall\"\nversion = \"0.1.0\"\nauthors = [\"Daniel Werner <dan.werner@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_call_stack_call_recursive_subcall\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\nget-call-stack-recursive-subcall = { path = \"../get-call-stack-recursive-subcall\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::vec::Vec;\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{runtime_args, ApiError, Key, Phase, U512};\nuse get_call_stack_recursive_subcall::{standard_payment, Call, ContractAddress};\n\nconst ARG_CALLS: &str = \"calls\";\nconst ARG_CURRENT_DEPTH: &str = \"current_depth\";\nconst AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let calls: Vec<Call> = runtime::get_named_arg(ARG_CALLS);\n    let current_depth: u8 = runtime::get_named_arg(ARG_CURRENT_DEPTH);\n    let amount: U512 = runtime::get_named_arg(AMOUNT);\n    let calls_count = calls.len() as u8;\n\n    // The important bit\n    {\n        let call_stack = runtime::get_call_stack();\n        let name = alloc::format!(\"call_stack-{}\", current_depth);\n        let call_stack_at = storage::new_uref(call_stack);\n        runtime::put_key(&name, Key::URef(call_stack_at));\n    }\n\n    if current_depth == 0 && runtime::get_phase() == Phase::Payment {\n        standard_payment(amount);\n    }\n\n    if current_depth == calls_count {\n        return;\n    }\n\n    let args = runtime_args! {\n        ARG_CALLS => calls.clone(),\n        ARG_CURRENT_DEPTH => current_depth + 1,\n    };\n\n    match calls.get(current_depth as usize) {\n        Some(Call {\n            contract_address: ContractAddress::ContractPackageHash(contract_package_hash),\n            target_method,\n            ..\n        }) => {\n            runtime::call_versioned_contract::<()>(\n                *contract_package_hash,\n                None,\n                target_method,\n                args,\n            );\n        }\n        Some(Call {\n            contract_address: ContractAddress::ContractHash(contract_hash),\n            target_method,\n            ..\n        }) => {\n            runtime::call_contract::<()>(*contract_hash, target_method, args);\n        }\n        _ => runtime::revert(ApiError::User(0)),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-call-stack-recursive-subcall/Cargo.toml",
    "content": "[package]\nname = \"get-call-stack-recursive-subcall\"\nversion = \"0.1.0\"\nauthors = [\"Daniel Werner <dan.werner@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_call_stack_recursive_subcall\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[features]\ndefault = [\"casper-contract/default\"]\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", default-features = false }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::{string::String, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    bytesrepr,\n    bytesrepr::{Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contracts::{ContractHash, ContractPackageHash},\n    runtime_args, ApiError, CLType, CLTyped, EntryPointType, Key, Phase, RuntimeArgs, Tagged, URef,\n    U512,\n};\n\npub const CONTRACT_PACKAGE_NAME: &str = \"forwarder\";\npub const PACKAGE_ACCESS_KEY_NAME: &str = \"forwarder_access\";\npub const CONTRACT_NAME: &str = \"our_contract_name\";\n\npub const METHOD_FORWARDER_CONTRACT_NAME: &str = \"forwarder_contract\";\npub const METHOD_FORWARDER_SESSION_NAME: &str = \"forwarder_session\";\n\npub const ARG_CALLS: &str = \"calls\";\npub const ARG_CURRENT_DEPTH: &str = \"current_depth\";\n\nconst DEFAULT_PAYMENT: u64 = 1_500_000_000_000;\n\n#[repr(u8)]\nenum ContractAddressTag {\n    ContractHash = 0,\n    ContractPackageHash,\n}\n\n#[derive(Debug, Copy, Clone)]\npub enum ContractAddress {\n    ContractHash(ContractHash),\n    ContractPackageHash(ContractPackageHash),\n}\n\nimpl Tagged<u8> for ContractAddress {\n    fn tag(&self) -> u8 {\n        match self {\n            ContractAddress::ContractHash(_) => ContractAddressTag::ContractHash as u8,\n            ContractAddress::ContractPackageHash(_) => {\n                ContractAddressTag::ContractPackageHash as u8\n            }\n        }\n    }\n}\n\nimpl ToBytes for ContractAddress {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.push(self.tag());\n        match self {\n            ContractAddress::ContractHash(contract_hash) => {\n                result.append(&mut contract_hash.to_bytes()?)\n            }\n            ContractAddress::ContractPackageHash(contract_package_hash) => {\n                result.append(&mut contract_package_hash.to_bytes()?)\n            }\n        }\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                ContractAddress::ContractHash(contract_hash) => contract_hash.serialized_length(),\n                ContractAddress::ContractPackageHash(contract_package_hash) => {\n                    contract_package_hash.serialized_length()\n                }\n            }\n    }\n}\n\nimpl FromBytes for ContractAddress {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == ContractAddressTag::ContractHash as u8 => {\n                let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?;\n                Ok((ContractAddress::ContractHash(contract_hash), remainder))\n            }\n            tag if tag == ContractAddressTag::ContractPackageHash as u8 => {\n                let (contract_package_hash, remainder) =\n                    ContractPackageHash::from_bytes(remainder)?;\n                Ok((\n                    ContractAddress::ContractPackageHash(contract_package_hash),\n                    remainder,\n                ))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct Call {\n    pub contract_address: ContractAddress,\n    pub target_method: String,\n    pub entry_point_type: EntryPointType,\n}\n\nimpl ToBytes for Call {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.append(&mut self.contract_address.to_bytes()?);\n        result.append(&mut self.target_method.to_bytes()?);\n        result.append(&mut self.entry_point_type.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.contract_address.serialized_length()\n            + self.target_method.serialized_length()\n            + self.entry_point_type.serialized_length()\n    }\n}\n\nimpl FromBytes for Call {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (contract_address, remainder) = ContractAddress::from_bytes(bytes)?;\n        let (target_method, remainder) = String::from_bytes(remainder)?;\n        let (entry_point_type, remainder) = EntryPointType::from_bytes(remainder)?;\n        Ok((\n            Call {\n                contract_address,\n                target_method,\n                entry_point_type,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl CLTyped for Call {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\npub fn standard_payment(amount: U512) {\n    const METHOD_GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\n\n    let main_purse = account::get_main_purse();\n\n    let handle_payment_pointer = system::get_handle_payment();\n\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_pointer,\n        METHOD_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert()\n}\n\npub fn recurse() {\n    let calls: Vec<Call> = runtime::get_named_arg(ARG_CALLS);\n    let current_depth: u8 = runtime::get_named_arg(ARG_CURRENT_DEPTH);\n\n    // The important bit\n    {\n        let call_stack = runtime::get_call_stack();\n        let name = alloc::format!(\"call_stack-{}\", current_depth);\n        let call_stack_at = storage::new_uref(call_stack);\n        runtime::put_key(&name, Key::URef(call_stack_at));\n    }\n\n    if current_depth == 0 && runtime::get_phase() == Phase::Payment {\n        standard_payment(U512::from(DEFAULT_PAYMENT))\n    }\n\n    if current_depth == calls.len() as u8 {\n        return;\n    }\n\n    let args = runtime_args! {\n        ARG_CALLS => calls.clone(),\n        ARG_CURRENT_DEPTH => current_depth + 1u8,\n    };\n\n    match calls.get(current_depth as usize) {\n        Some(Call {\n            contract_address: ContractAddress::ContractPackageHash(contract_package_hash),\n            target_method,\n            ..\n        }) => {\n            runtime::call_versioned_contract::<()>(\n                *contract_package_hash,\n                None,\n                target_method,\n                args,\n            );\n        }\n        Some(Call {\n            contract_address: ContractAddress::ContractHash(contract_hash),\n            target_method,\n            ..\n        }) => {\n            runtime::call_contract::<()>(*contract_hash, target_method, args);\n        }\n        _ => runtime::revert(ApiError::User(0)),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{boxed::Box, string::ToString, vec};\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints,\n    Key, Parameter,\n};\n\nuse get_call_stack_recursive_subcall::{\n    ARG_CALLS, ARG_CURRENT_DEPTH, CONTRACT_NAME, CONTRACT_PACKAGE_NAME,\n    METHOD_FORWARDER_CONTRACT_NAME, METHOD_FORWARDER_SESSION_NAME, PACKAGE_ACCESS_KEY_NAME,\n};\n\n#[no_mangle]\npub extern \"C\" fn forwarder_contract() {\n    get_call_stack_recursive_subcall::recurse()\n}\n\n#[no_mangle]\npub extern \"C\" fn forwarder_session() {\n    get_call_stack_recursive_subcall::recurse()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let forwarder_contract_entry_point = EntityEntryPoint::new(\n            METHOD_FORWARDER_CONTRACT_NAME.to_string(),\n            vec![\n                Parameter::new(ARG_CALLS, CLType::List(Box::new(CLType::Any))),\n                Parameter::new(ARG_CURRENT_DEPTH, CLType::U8),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let forwarder_session_entry_point = EntityEntryPoint::new(\n            METHOD_FORWARDER_SESSION_NAME.to_string(),\n            vec![\n                Parameter::new(ARG_CALLS, CLType::List(Box::new(CLType::Any))),\n                Parameter::new(ARG_CURRENT_DEPTH, CLType::U8),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Caller,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(forwarder_contract_entry_point);\n        entry_points.add_entry_point(forwarder_session_entry_point);\n        entry_points\n    };\n\n    let (contract_hash, _contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(CONTRACT_PACKAGE_NAME.to_string()),\n        Some(PACKAGE_ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n\n    runtime::put_key(CONTRACT_NAME, Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-caller/Cargo.toml",
    "content": "[package]\nname = \"get-caller\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>, Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_caller\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-caller/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::account::AccountHash;\n\nconst ARG_ACCOUNT: &str = \"account\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let known_account_hash: AccountHash = runtime::get_named_arg(ARG_ACCOUNT);\n    let caller_account_hash: AccountHash = runtime::get_caller();\n    assert_eq!(\n        caller_account_hash, known_account_hash,\n        \"caller account hash was not known account hash\"\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-caller-subcall/Cargo.toml",
    "content": "[package]\nname = \"get-caller-subcall\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>, Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_caller_subcall\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-caller-subcall/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, RuntimeArgs,\n};\n\nconst ENTRY_POINT_NAME: &str = \"get_caller_ext\";\nconst HASH_KEY_NAME: &str = \"caller_subcall\";\nconst ACCESS_KEY_NAME: &str = \"caller_subcall_access\";\nconst ARG_ACCOUNT: &str = \"account\";\n\n#[no_mangle]\npub extern \"C\" fn get_caller_ext() {\n    let caller_account_hash: AccountHash = runtime::get_caller();\n    runtime::ret(CLValue::from_t(caller_account_hash).unwrap_or_revert());\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let known_account_hash: AccountHash = runtime::get_named_arg(ARG_ACCOUNT);\n    let caller_account_hash: AccountHash = runtime::get_caller();\n    assert_eq!(\n        caller_account_hash, known_account_hash,\n        \"caller account hash was not known account hash\"\n    );\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        // takes no args, ret's PublicKey\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_POINT_NAME.to_string(),\n            Vec::new(),\n            CLType::ByteArray(32),\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    };\n\n    let (contract_hash, _contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n\n    let subcall_account_hash: AccountHash =\n        runtime::call_contract(contract_hash, ENTRY_POINT_NAME, RuntimeArgs::default());\n    assert_eq!(\n        subcall_account_hash, known_account_hash,\n        \"subcall account hash was not known account hash\"\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-payment-purse/Cargo.toml",
    "content": "[package]\nname = \"get-payment-purse\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_payment_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-payment-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, RuntimeArgs, URef, U512};\n\n#[repr(u16)]\nenum Error {\n    TransferFromSourceToPayment = 0,\n    TransferFromPaymentToSource,\n    GetBalance,\n    CheckBalance,\n}\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ENTRY_POINT_GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // amount passed to payment contract\n    let payment_fund: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let contract_hash = system::get_handle_payment();\n    let source_purse = account::get_main_purse();\n    let payment_amount: U512 = 100.into();\n    let payment_purse: URef = runtime::call_contract(\n        contract_hash,\n        ENTRY_POINT_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    // can deposit\n    system::transfer_from_purse_to_purse(source_purse, payment_purse, payment_amount, None)\n        .unwrap_or_revert_with(ApiError::User(Error::TransferFromSourceToPayment as u16));\n\n    let payment_balance = system::get_purse_balance(payment_purse)\n        .unwrap_or_revert_with(ApiError::User(Error::GetBalance as u16));\n\n    if payment_balance.saturating_sub(payment_fund) != payment_amount {\n        runtime::revert(ApiError::User(Error::CheckBalance as u16))\n    }\n\n    // cannot withdraw\n    if system::transfer_from_purse_to_purse(payment_purse, source_purse, payment_amount, None)\n        .is_ok()\n    {\n        runtime::revert(ApiError::User(Error::TransferFromPaymentToSource as u16));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-phase/Cargo.toml",
    "content": "[package]\nname = \"get-phase\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_phase\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-phase/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::Phase;\n\nconst ARG_PHASE: &str = \"phase\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let known_phase: Phase = runtime::get_named_arg(ARG_PHASE);\n    let get_phase = runtime::get_phase();\n    assert_eq!(\n        get_phase, known_phase,\n        \"get_phase did not return known_phase\"\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-phase-payment/Cargo.toml",
    "content": "[package]\nname = \"get-phase-payment\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"get_phase_payment\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/get-phase-payment/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{Phase, RuntimeArgs, URef, U512};\n\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\nconst ARG_PHASE: &str = \"phase\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nfn standard_payment(amount: U512) {\n    let main_purse = account::get_main_purse();\n\n    let handle_payment_pointer = system::get_handle_payment();\n\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_pointer,\n        GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let known_phase: Phase = runtime::get_named_arg(ARG_PHASE);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let get_phase = runtime::get_phase();\n    assert_eq!(\n        get_phase, known_phase,\n        \"get_phase did not return known_phase\"\n    );\n\n    standard_payment(amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1470-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-1470-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_1470_regression\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[features]\ndefault = [\"casper-contract/default\"]\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", default-features = false }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1470-regression/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::collections::BTreeMap;\nuse casper_contract::contract_api::{runtime, storage};\n\nuse casper_types::{\n    contracts::NamedKeys, CLType, CLTyped, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Group, Key, Parameter,\n};\nuse gh_1470_regression::{\n    Arg1Type, Arg2Type, Arg3Type, Arg4Type, Arg5Type, ARG1, ARG2, ARG3, ARG4, ARG5,\n    CONTRACT_HASH_NAME, GROUP_LABEL, GROUP_UREF_NAME, PACKAGE_HASH_NAME,\n    RESTRICTED_DO_NOTHING_ENTRYPOINT, RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT,\n};\n\n#[no_mangle]\npub extern \"C\" fn restricted_do_nothing_contract() {\n    let _arg1: Arg1Type = runtime::get_named_arg(ARG1);\n    let _arg2: Arg2Type = runtime::get_named_arg(ARG2);\n\n    // ARG3 is defined in entrypoint but optional and might not be passed in all cases\n}\n\n#[no_mangle]\npub extern \"C\" fn restricted_with_extra_arg() {\n    let _arg1: Arg1Type = runtime::get_named_arg(ARG1);\n    let _arg2: Arg2Type = runtime::get_named_arg(ARG2);\n    let _arg3: Arg3Type = runtime::get_named_arg(ARG3);\n\n    // Those arguments are not present in entry point definition but are always passed by caller\n    let _arg4: Arg4Type = runtime::get_named_arg(ARG4);\n    let _arg5: Arg5Type = runtime::get_named_arg(ARG5);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let (contract_package_hash, _access_uref) = storage::create_contract_package_at_hash();\n\n    let admin_group = storage::create_contract_user_group(\n        contract_package_hash,\n        GROUP_LABEL,\n        1,\n        Default::default(),\n    )\n    .unwrap();\n\n    runtime::put_key(GROUP_UREF_NAME, admin_group[0].into());\n\n    let mut entry_points = EntryPoints::new();\n\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        RESTRICTED_DO_NOTHING_ENTRYPOINT,\n        vec![\n            Parameter::new(ARG2, Arg2Type::cl_type()),\n            Parameter::new(ARG1, Arg1Type::cl_type()),\n            Parameter::new(ARG3, Arg3Type::cl_type()),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Groups(vec![Group::new(GROUP_LABEL)]),\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT,\n        vec![\n            Parameter::new(ARG3, Arg3Type::cl_type()),\n            Parameter::new(ARG2, Arg2Type::cl_type()),\n            Parameter::new(ARG1, Arg1Type::cl_type()),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Groups(vec![Group::new(GROUP_LABEL)]),\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let named_keys = NamedKeys::new();\n\n    let (contract_hash, _) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n\n    runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value()));\n    runtime::put_key(PACKAGE_HASH_NAME, contract_package_hash.into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1470-regression/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_types::U512;\n\npub const GROUP_LABEL: &str = \"group_label\";\npub const GROUP_UREF_NAME: &str = \"group_uref\";\npub const CONTRACT_HASH_NAME: &str = \"contract_hash\";\npub const PACKAGE_HASH_NAME: &str = \"contract_package_hash\";\npub const RESTRICTED_DO_NOTHING_ENTRYPOINT: &str = \"restricted_do_nothing_contract\";\npub const RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT: &str = \"restricted_with_extra_arg\";\n\npub const ARG1: &str = \"arg1\";\npub type Arg1Type = String;\n\npub const ARG2: &str = \"arg2\";\npub type Arg2Type = U512;\n\npub const ARG3: &str = \"arg3\";\npub type Arg3Type = Option<u64>;\n\npub const ARG4: &str = \"arg4\";\npub type Arg4Type = bool;\n\npub const ARG5: &str = \"arg5\";\npub type Arg5Type = Option<bool>;\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1470-regression-call/Cargo.toml",
    "content": "[package]\nname = \"gh-1470-regression-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_1470_regression_call\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[features]\ndefault = [\"casper-contract/default\", \"gh-1470-regression/default\"]\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", default-features = false }\ncasper-types = { path = \"../../../../types\" }\ngh-1470-regression = { path = \"../gh-1470-regression\", default-features = false }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1470-regression-call/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse core::str::FromStr;\nuse gh_1470_regression_call::{ARG_CONTRACT_HASH, ARG_CONTRACT_PACKAGE_HASH, ARG_TEST_METHOD};\n\nuse casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::{runtime_args, AddressableEntityHash, PackageHash};\n\nuse gh_1470_regression_call::TestMethod;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let test_method = {\n        let arg_test_method: String = runtime::get_named_arg(ARG_TEST_METHOD);\n        TestMethod::from_str(&arg_test_method).unwrap_or_revert()\n    };\n\n    let correct_runtime_args = runtime_args! {\n        gh_1470_regression::ARG3 => gh_1470_regression::Arg3Type::default(),\n        gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(),\n        gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(),\n    };\n\n    let no_runtime_args = runtime_args! {};\n\n    let type_mismatch_runtime_args = runtime_args! {\n        gh_1470_regression::ARG2 => gh_1470_regression::Arg1Type::default(),\n        gh_1470_regression::ARG3 => gh_1470_regression::Arg2Type::default(),\n        gh_1470_regression::ARG1 => gh_1470_regression::Arg3Type::default(),\n    };\n\n    let optional_type_mismatch_runtime_args = runtime_args! {\n        gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(),\n        gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(),\n        gh_1470_regression::ARG3 => gh_1470_regression::Arg4Type::default(),\n    };\n\n    let correct_without_optional_args = runtime_args! {\n        gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(),\n        gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(),\n    };\n\n    let extra_runtime_args = runtime_args! {\n        gh_1470_regression::ARG3 => gh_1470_regression::Arg3Type::default(),\n        gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(),\n        gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(),\n        gh_1470_regression::ARG4 => gh_1470_regression::Arg4Type::default(),\n        gh_1470_regression::ARG5 => gh_1470_regression::Arg5Type::default(),\n    };\n\n    assert_ne!(correct_runtime_args, optional_type_mismatch_runtime_args);\n\n    match test_method {\n        TestMethod::CallDoNothing => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n            runtime::call_contract::<()>(\n                contract_hash.into(),\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                correct_runtime_args,\n            );\n        }\n        TestMethod::CallVersionedDoNothing => {\n            let contract_package_hash: PackageHash =\n                runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n\n            runtime::call_versioned_contract::<()>(\n                contract_package_hash.into(),\n                None,\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                correct_runtime_args,\n            );\n        }\n        TestMethod::CallDoNothingNoArgs => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n            runtime::call_contract::<()>(\n                contract_hash.into(),\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                no_runtime_args,\n            );\n        }\n        TestMethod::CallVersionedDoNothingNoArgs => {\n            let contract_package_hash: PackageHash =\n                runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n\n            runtime::call_versioned_contract::<()>(\n                contract_package_hash.into(),\n                None,\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                no_runtime_args,\n            );\n        }\n        TestMethod::CallDoNothingTypeMismatch => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n            runtime::call_contract::<()>(\n                contract_hash.into(),\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                type_mismatch_runtime_args,\n            );\n        }\n\n        TestMethod::CallVersionedDoNothingTypeMismatch => {\n            let contract_package_hash: PackageHash =\n                runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n\n            runtime::call_versioned_contract::<()>(\n                contract_package_hash.into(),\n                None,\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                type_mismatch_runtime_args,\n            );\n        }\n        TestMethod::CallDoNothingNoOptionals => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n            runtime::call_contract::<()>(\n                contract_hash.into(),\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                correct_without_optional_args,\n            );\n        }\n        TestMethod::CallVersionedDoNothingNoOptionals => {\n            let contract_package_hash: PackageHash =\n                runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n\n            runtime::call_versioned_contract::<()>(\n                contract_package_hash.into(),\n                None,\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                correct_without_optional_args,\n            );\n        }\n        TestMethod::CallDoNothingExtra => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n            runtime::call_contract::<()>(\n                contract_hash.into(),\n                gh_1470_regression::RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT,\n                extra_runtime_args,\n            );\n        }\n        TestMethod::CallVersionedDoNothingExtra => {\n            let contract_package_hash: PackageHash =\n                runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n\n            runtime::call_versioned_contract::<()>(\n                contract_package_hash.into(),\n                None,\n                gh_1470_regression::RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT,\n                extra_runtime_args,\n            );\n        }\n        TestMethod::CallDoNothingOptionalTypeMismatch => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n\n            runtime::call_contract::<()>(\n                contract_hash.into(),\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                optional_type_mismatch_runtime_args,\n            );\n        }\n        TestMethod::CallVersionedDoNothingOptionalTypeMismatch => {\n            let contract_package_hash: PackageHash =\n                runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH);\n\n            runtime::call_versioned_contract::<()>(\n                contract_package_hash.into(),\n                None,\n                gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT,\n                optional_type_mismatch_runtime_args,\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1470-regression-call/src/lib.rs",
    "content": "#![no_std]\n\nuse core::str::FromStr;\n\nuse casper_types::ApiError;\n\npub const ARG_CONTRACT_HASH: &str = \"payment_contract\";\npub const ARG_CONTRACT_PACKAGE_HASH: &str = \"contract_package_hash\";\npub const ARG_TEST_METHOD: &str = \"test_method\";\n\n#[repr(u16)]\npub enum Error {\n    InvalidMethod = 0,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\npub const METHOD_CALL_DO_NOTHING: &str = \"call_do_nothing\";\npub const METHOD_CALL_VERSIONED_DO_NOTHING: &str = \"call_versioned_do_nothing\";\n\npub const METHOD_CALL_DO_NOTHING_NO_ARGS: &str = \"call_do_nothing_no_args\";\npub const METHOD_CALL_VERSIONED_DO_NOTHING_NO_ARGS: &str = \"call_versioned_do_nothing_no_args\";\n\npub const METHOD_CALL_DO_NOTHING_TYPE_MISMATCH: &str = \"call_do_nothing_type_mismatch\";\npub const METHOD_CALL_VERSIONED_DO_NOTHING_TYPE_MISMATCH: &str =\n    \"call_versioned_do_nothing_type_mismatch\";\n\npub const METHOD_CALL_DO_NOTHING_OPTIONAL_TYPE_MISMATCH: &str =\n    \"call_do_nothing_optional_type_mismatch\";\npub const METHOD_CALL_VERSIONED_DO_NOTHING_OPTIONAL_TYPE_MISMATCH: &str =\n    \"call_versioned_do_nothing_optional_type_mismatch\";\n\npub const METHOD_CALL_DO_NOTHING_NO_OPTIONALS: &str = \"call_do_nothing_no_optionals\";\npub const METHOD_CALL_VERSIONED_DO_NOTHING_NO_OPTIONALS: &str =\n    \"call_versioned_do_nothing_no_optionals\";\n\npub const METHOD_CALL_DO_NOTHING_EXTRA: &str = \"call_do_nothing_extra\";\npub const METHOD_CALL_VERSIONED_DO_NOTHING_EXTRA: &str = \"call_versioned_do_nothing_extra\";\n\npub enum TestMethod {\n    CallDoNothing,\n    CallVersionedDoNothing,\n    CallDoNothingNoArgs,\n    CallVersionedDoNothingNoArgs,\n    CallDoNothingTypeMismatch,\n    CallVersionedDoNothingTypeMismatch,\n    CallDoNothingOptionalTypeMismatch,\n    CallVersionedDoNothingOptionalTypeMismatch,\n    CallDoNothingNoOptionals,\n    CallVersionedDoNothingNoOptionals,\n    CallDoNothingExtra,\n    CallVersionedDoNothingExtra,\n}\n\nimpl FromStr for TestMethod {\n    type Err = Error;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        if s == METHOD_CALL_DO_NOTHING {\n            Ok(TestMethod::CallDoNothing)\n        } else if s == METHOD_CALL_VERSIONED_DO_NOTHING {\n            Ok(TestMethod::CallVersionedDoNothing)\n        } else if s == METHOD_CALL_DO_NOTHING_NO_ARGS {\n            Ok(TestMethod::CallDoNothingNoArgs)\n        } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_NO_ARGS {\n            Ok(TestMethod::CallVersionedDoNothingNoArgs)\n        } else if s == METHOD_CALL_DO_NOTHING_TYPE_MISMATCH {\n            Ok(TestMethod::CallDoNothingTypeMismatch)\n        } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_TYPE_MISMATCH {\n            Ok(TestMethod::CallVersionedDoNothingTypeMismatch)\n        } else if s == METHOD_CALL_DO_NOTHING_OPTIONAL_TYPE_MISMATCH {\n            Ok(TestMethod::CallDoNothingOptionalTypeMismatch)\n        } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_OPTIONAL_TYPE_MISMATCH {\n            Ok(TestMethod::CallVersionedDoNothingOptionalTypeMismatch)\n        } else if s == METHOD_CALL_DO_NOTHING_NO_OPTIONALS {\n            Ok(TestMethod::CallDoNothingNoOptionals)\n        } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_NO_OPTIONALS {\n            Ok(TestMethod::CallVersionedDoNothingNoOptionals)\n        } else if s == METHOD_CALL_DO_NOTHING_EXTRA {\n            Ok(TestMethod::CallDoNothingExtra)\n        } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_EXTRA {\n            Ok(TestMethod::CallVersionedDoNothingExtra)\n        } else {\n            Err(Error::InvalidMethod)\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1688-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-1688-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_1688_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-1688-regression/src/main.rs",
    "content": "#![no_main]\n#![no_std]\n\nextern crate alloc;\n\nuse alloc::string::ToString;\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst METHOD_PUT_KEY: &str = \"put_key\";\nconst NEW_KEY_NAME: &str = \"Hello\";\nconst NEW_KEY_VALUE: &str = \"World\";\nconst CONTRACT_PACKAGE_KEY: &str = \"contract_package\";\nconst CONTRACT_HASH_KEY: &str = \"contract_hash\";\n\n#[no_mangle]\nfn put_key() {\n    let value = storage::new_uref(NEW_KEY_VALUE);\n    runtime::put_key(NEW_KEY_NAME, value.into());\n}\n\n#[no_mangle]\nfn call() {\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        METHOD_PUT_KEY,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let (contract_hash, _version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(CONTRACT_PACKAGE_KEY.to_string()),\n        None,\n        None,\n    );\n    runtime::put_key(\n        CONTRACT_HASH_KEY,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-2280-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-2280-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_2280_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-2280-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{\n        EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter,\n    },\n    AddressableEntityHash, CLType, CLTyped, EntryPointPayment, Key, NamedKeys, U512,\n};\n\nconst FAUCET_NAME: &str = \"faucet\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"gh_2280\";\nconst HASH_KEY_NAME: &str = \"gh_2280_hash\";\nconst ACCESS_KEY_NAME: &str = \"gh_2280_access\";\nconst ARG_TARGET: &str = \"target\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\nconst ARG_FAUCET_FUNDS: &str = \"faucet_initial_balance\";\nconst FAUCET_FUNDS_KEY: &str = \"faucet_funds\";\n\n#[no_mangle]\npub extern \"C\" fn faucet() {\n    let purse_uref = runtime::get_key(FAUCET_FUNDS_KEY)\n        .and_then(Key::into_uref)\n        .unwrap_or_revert();\n\n    let account_hash: AccountHash = runtime::get_named_arg(ARG_TARGET);\n    system::transfer_from_purse_to_account(purse_uref, account_hash, U512::from(1u64), None)\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let faucet_entrypoint = EntityEntryPoint::new(\n            FAUCET_NAME.to_string(),\n            vec![Parameter::new(ARG_TARGET, AccountHash::cl_type())],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(faucet_entrypoint);\n        entry_points\n    };\n\n    let faucet_initial_balance: U512 = runtime::get_named_arg(ARG_FAUCET_FUNDS);\n\n    let named_keys = {\n        let faucet_funds = {\n            let purse = system::create_purse();\n\n            let id: Option<u64> = None;\n            system::transfer_from_purse_to_purse(\n                account::get_main_purse(),\n                purse,\n                faucet_initial_balance,\n                id,\n            )\n            .unwrap_or_revert();\n\n            purse\n        };\n\n        let mut named_keys = NamedKeys::new();\n\n        named_keys.insert(FAUCET_FUNDS_KEY.to_string(), faucet_funds.into());\n\n        named_keys\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        HASH_KEY_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-2280-regression-call/Cargo.toml",
    "content": "[package]\nname = \"gh-2280-regression-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_2280_regression_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-2280-regression-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::runtime;\n\nuse casper_types::{\n    account::AccountHash, contracts::ContractHash, runtime_args, AddressableEntityHash,\n};\n\nconst FAUCET_NAME: &str = \"faucet\";\nconst ARG_TARGET: &str = \"target\";\nconst ARG_CONTRACT_HASH: &str = \"contract_hash\";\n\nfn call_faucet(contract_hash: ContractHash, target: AccountHash) {\n    let faucet_args = runtime_args! {\n        ARG_TARGET => target,\n    };\n    runtime::call_contract(contract_hash, FAUCET_NAME, faucet_args)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH);\n    let target: AccountHash = runtime::get_named_arg(ARG_TARGET);\n\n    call_faucet(contract_hash.into(), target);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-3097-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-3097-regression\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_3097_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-3097-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::collections::BTreeMap;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, contracts::NamedKeys, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst CONTRACT_PACKAGE_HASH_KEY: &str = \"contract_package_hash\";\nconst DISABLED_CONTRACT_HASH_KEY: &str = \"disabled_contract_hash\";\nconst ENABLED_CONTRACT_HASH_KEY: &str = \"enabled_contract_hash\";\n\n#[no_mangle]\npub extern \"C\" fn do_something() {\n    let _ = runtime::list_authorization_keys();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let do_something = EntityEntryPoint::new(\n            \"do_something\",\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(do_something);\n\n        entry_points\n    };\n\n    let (contract_package_hash, _access_key) = storage::create_contract_package_at_hash();\n\n    let (disabled_contract_hash, _version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points.clone(),\n        NamedKeys::new(),\n        BTreeMap::new(),\n    );\n\n    let (enabled_contract_hash, _version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        NamedKeys::new(),\n        BTreeMap::new(),\n    );\n\n    runtime::put_key(CONTRACT_PACKAGE_HASH_KEY, contract_package_hash.into());\n\n    runtime::put_key(\n        DISABLED_CONTRACT_HASH_KEY,\n        Key::Hash(disabled_contract_hash.value()),\n    );\n    runtime::put_key(\n        ENABLED_CONTRACT_HASH_KEY,\n        Key::Hash(enabled_contract_hash.value()),\n    );\n\n    storage::disable_contract_version(contract_package_hash, disabled_contract_hash)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-3097-regression-call/Cargo.toml",
    "content": "[package]\nname = \"gh-3097-regression-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_3097_regression_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-3097-regression-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::{\n    contracts::{ContractHash, ContractPackageHash, ContractVersion},\n    ApiError, RuntimeArgs,\n};\n\nconst CONTRACT_PACKAGE_HASH_KEY: &str = \"contract_package_hash\";\nconst DO_SOMETHING_ENTRYPOINT: &str = \"do_something\";\nconst ARG_METHOD: &str = \"method\";\nconst ARG_CONTRACT_HASH_KEY: &str = \"contract_hash_key\";\nconst ARG_MAJOR_VERSION: &str = \"major_version\";\nconst ARG_CONTRACT_VERSION: &str = \"contract_version\";\nconst METHOD_CALL_CONTRACT: &str = \"call_contract\";\nconst METHOD_CALL_VERSIONED_CONTRACT: &str = \"call_versioned_contract\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let method: String = runtime::get_named_arg(ARG_METHOD);\n    if method == METHOD_CALL_CONTRACT {\n        let contract_hash_key_name: String = runtime::get_named_arg(ARG_CONTRACT_HASH_KEY);\n        let contract_hash = runtime::get_key(&contract_hash_key_name)\n            .ok_or(ApiError::MissingKey)\n            .unwrap_or_revert()\n            .into_entity_hash_addr()\n            .ok_or(ApiError::UnexpectedKeyVariant)\n            .map(ContractHash::new)\n            .unwrap_or_revert();\n        runtime::call_contract::<()>(\n            contract_hash,\n            DO_SOMETHING_ENTRYPOINT,\n            RuntimeArgs::default(),\n        )\n    } else if method == METHOD_CALL_VERSIONED_CONTRACT {\n        let contract_package_hash = runtime::get_key(CONTRACT_PACKAGE_HASH_KEY)\n            .ok_or(ApiError::MissingKey)\n            .unwrap_or_revert()\n            .into_package_addr()\n            .ok_or(ApiError::UnexpectedKeyVariant)\n            .map(ContractPackageHash::new)\n            .unwrap_or_revert();\n\n        let major_version = runtime::get_named_arg(ARG_MAJOR_VERSION);\n        let contract_version =\n            runtime::get_named_arg::<Option<ContractVersion>>(ARG_CONTRACT_VERSION);\n        match contract_version {\n            None => {\n                runtime::call_versioned_contract::<()>(\n                    contract_package_hash,\n                    None,\n                    DO_SOMETHING_ENTRYPOINT,\n                    RuntimeArgs::default(),\n                );\n            }\n            Some(contract_version) => {\n                runtime::call_package_version::<()>(\n                    contract_package_hash,\n                    Some(major_version),\n                    Some(contract_version),\n                    DO_SOMETHING_ENTRYPOINT,\n                    RuntimeArgs::default(),\n                );\n            }\n        }\n    } else {\n        runtime::revert(ApiError::User(0));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-4771-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-4771-regression\"\nversion = \"0.1.0\"\nauthors = [\"Rafał Chabowski <rafal@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_4771_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-4771-regression/src/main.rs",
    "content": "#![no_main]\n#![no_std]\n\nextern crate alloc;\n\nuse alloc::string::ToString;\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst METHOD_TEST_ENTRY_POINT: &str = \"test_entry_point\";\nconst NEW_KEY_NAME: &str = \"Hello\";\nconst NEW_KEY_VALUE: &str = \"World\";\nconst CONTRACT_PACKAGE_KEY: &str = \"contract_package\";\nconst CONTRACT_HASH_KEY: &str = \"contract_hash\";\n\n#[no_mangle]\nfn test_entry_point() {\n    let value = storage::new_uref(NEW_KEY_VALUE);\n    runtime::put_key(NEW_KEY_NAME, value.into());\n}\n\n#[no_mangle]\nfn call() {\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        METHOD_TEST_ENTRY_POINT,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let (contract_hash, _version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(CONTRACT_PACKAGE_KEY.to_string()),\n        None,\n        None,\n    );\n    runtime::put_key(\n        CONTRACT_HASH_KEY,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-4898-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-4898-regression\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_4898_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }"
  },
  {
    "path": "smart_contracts/contracts/test/gh-4898-regression/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\nuse alloc::string::String;\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::Key;\n\nconst ARG_DATA: &str = \"data\";\n\n#[no_mangle]\nfn is_key(key_str: &str) -> bool {\n    Key::from_formatted_str(key_str).is_ok()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let data: String = runtime::get_named_arg(ARG_DATA);\n\n    assert!(is_key(&data), \"Data should be a key\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-5058-regression/Cargo.toml",
    "content": "[package]\nname = \"gh-5058-regression\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[[bin]]\nname = \"gh_5058_regression\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/gh-5058-regression/src/main.rs",
    "content": "#![no_main]\n#![no_std]\n\nextern crate alloc;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    runtime_args, system::handle_payment, ApiError, Phase, RuntimeArgs, URef, U512,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[repr(u16)]\nenum Error {\n    InvalidPhase,\n}\n\nimpl From<Error> for ApiError {\n    fn from(e: Error) -> Self {\n        ApiError::User(e as u16)\n    }\n}\n\nfn get_payment_purse() -> URef {\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    )\n}\n\nfn set_refund_purse(new_refund_purse: URef) {\n    let args = runtime_args! {\n        handle_payment::ARG_PURSE => new_refund_purse,\n    };\n\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_SET_REFUND_PURSE,\n        args,\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    if runtime::get_phase() != Phase::Payment {\n        runtime::revert(Error::InvalidPhase);\n    }\n\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let payment_purse = get_payment_purse();\n    set_refund_purse(account::get_main_purse());\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/groups/Cargo.toml",
    "content": "[package]\nname = \"groups\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"groups\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/groups/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{\n    collections::{BTreeMap, BTreeSet},\n    string::ToString,\n    vec::Vec,\n};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    contracts::ContractPackageHash,\n    runtime_args,\n    system::{handle_payment, standard_payment},\n    CLType, CLTyped, EntryPointPayment, Key, NamedKeys, Parameter, RuntimeArgs, URef,\n    ENTITY_INITIAL_VERSION, U512,\n};\n\nconst PACKAGE_HASH_KEY: &str = \"package_hash_key\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\nconst RESTRICTED_CONTRACT: &str = \"restricted_contract\";\nconst RESTRICTED_SESSION: &str = \"restricted_session\";\nconst RESTRICTED_SESSION_CALLER: &str = \"restricted_session_caller\";\nconst UNRESTRICTED_CONTRACT_CALLER: &str = \"unrestricted_contract_caller\";\nconst RESTRICTED_CONTRACT_CALLER_AS_SESSION: &str = \"restricted_contract_caller_as_session\";\nconst UNCALLABLE_SESSION: &str = \"uncallable_session\";\nconst UNCALLABLE_CONTRACT: &str = \"uncallable_contract\";\nconst CALL_RESTRICTED_ENTRY_POINTS: &str = \"call_restricted_entry_points\";\nconst RESTRICTED_STANDARD_PAYMENT: &str = \"restricted_standard_payment\";\nconst ARG_PACKAGE_HASH: &str = \"package_hash\";\n\n#[no_mangle]\npub extern \"C\" fn restricted_session() {}\n\n#[no_mangle]\npub extern \"C\" fn restricted_contract() {}\n\n#[no_mangle]\npub extern \"C\" fn restricted_session_caller() {\n    let package_hash: Key = runtime::get_named_arg(ARG_PACKAGE_HASH);\n    let contract_package_hash = package_hash\n        .into_entity_hash_addr()\n        .unwrap_or_revert()\n        .into();\n    runtime::call_versioned_contract(\n        contract_package_hash,\n        Some(ENTITY_INITIAL_VERSION),\n        RESTRICTED_SESSION,\n        runtime_args! {},\n    )\n}\n\nfn contract_caller() {\n    let package_hash: ContractPackageHash = runtime::get_named_arg(ARG_PACKAGE_HASH);\n    let contract_version = ENTITY_INITIAL_VERSION;\n    let runtime_args = runtime_args! {};\n    runtime::call_versioned_contract(\n        package_hash,\n        Some(contract_version),\n        RESTRICTED_CONTRACT,\n        runtime_args,\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn unrestricted_contract_caller() {\n    contract_caller();\n}\n\n#[no_mangle]\npub extern \"C\" fn restricted_contract_caller_as_session() {\n    contract_caller();\n}\n\n#[no_mangle]\npub extern \"C\" fn uncallable_session() {}\n\n#[no_mangle]\npub extern \"C\" fn uncallable_contract() {}\n\nfn get_payment_purse() -> URef {\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn restricted_standard_payment() {\n    let amount: U512 = runtime::get_named_arg(standard_payment::ARG_AMOUNT);\n\n    let payment_purse = get_payment_purse();\n    system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call_restricted_entry_points() {\n    // We're aggressively removing exports that aren't exposed through contract header so test\n    // ensures that those exports are still inside WASM.\n    uncallable_session();\n    uncallable_contract();\n}\n\nfn create_group(package_hash: ContractPackageHash) -> URef {\n    let new_uref_1 = storage::new_uref(());\n    runtime::put_key(\"saved_uref\", new_uref_1.into());\n\n    let mut existing_urefs = BTreeSet::new();\n    existing_urefs.insert(new_uref_1);\n\n    let new_urefs = storage::create_contract_user_group(package_hash, \"Group 1\", 1, existing_urefs)\n        .unwrap_or_revert();\n    assert_eq!(new_urefs.len(), 1);\n    new_urefs[0]\n}\n\n/// Restricted uref comes from creating a group and will be assigned to a smart contract\nfn create_entry_points_1() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n    let restricted_session = EntityEntryPoint::new(\n        RESTRICTED_SESSION.to_string(),\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::groups(&[\"Group 1\"]),\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(restricted_session);\n\n    let restricted_contract = EntityEntryPoint::new(\n        RESTRICTED_CONTRACT.to_string(),\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::groups(&[\"Group 1\"]),\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(restricted_contract);\n\n    let restricted_session_caller = EntityEntryPoint::new(\n        RESTRICTED_SESSION_CALLER.to_string(),\n        vec![Parameter::new(ARG_PACKAGE_HASH, CLType::Key)],\n        CLType::I32,\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(restricted_session_caller);\n\n    let restricted_contract = EntityEntryPoint::new(\n        RESTRICTED_CONTRACT.to_string(),\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::groups(&[\"Group 1\"]),\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(restricted_contract);\n\n    let unrestricted_contract_caller = EntityEntryPoint::new(\n        UNRESTRICTED_CONTRACT_CALLER.to_string(),\n        Vec::new(),\n        CLType::I32,\n        // Made public because we've tested deploy level auth into a contract in\n        // RESTRICTED_CONTRACT entrypoint\n        EntryPointAccess::Public,\n        // NOTE: Public contract authorizes any contract call, because this contract has groups\n        // uref in its named keys\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(unrestricted_contract_caller);\n\n    let unrestricted_contract_caller_as_session = EntityEntryPoint::new(\n        RESTRICTED_CONTRACT_CALLER_AS_SESSION.to_string(),\n        Vec::new(),\n        CLType::I32,\n        // Made public because we've tested deploy level auth into a contract in\n        // RESTRICTED_CONTRACT entrypoint\n        EntryPointAccess::Public,\n        // NOTE: Public contract authorizes any contract call, because this contract has groups\n        // uref in its named keys\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(unrestricted_contract_caller_as_session);\n\n    let uncallable_session = EntityEntryPoint::new(\n        UNCALLABLE_SESSION.to_string(),\n        Vec::new(),\n        CLType::I32,\n        // Made public because we've tested deploy level auth into a contract in\n        // RESTRICTED_CONTRACT entrypoint\n        EntryPointAccess::groups(&[]),\n        // NOTE: Public contract authorizes any contract call, because this contract has groups\n        // uref in its named keys\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(uncallable_session);\n\n    let uncallable_contract = EntityEntryPoint::new(\n        UNCALLABLE_CONTRACT.to_string(),\n        Vec::new(),\n        CLType::I32,\n        // Made public because we've tested deploy level auth into a contract in\n        // RESTRICTED_CONTRACT entrypoint\n        EntryPointAccess::groups(&[]),\n        // NOTE: Public contract authorizes any contract call, because this contract has groups\n        // uref in its named keys\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(uncallable_contract);\n\n    // Directly calls entry_points that are protected with empty group of lists to verify that even\n    // though they're not callable externally, they're still visible in the WASM.\n    let call_restricted_entry_points = EntityEntryPoint::new(\n        CALL_RESTRICTED_ENTRY_POINTS.to_string(),\n        Vec::new(),\n        CLType::I32,\n        // Made public because we've tested deploy level auth into a contract in\n        // RESTRICTED_CONTRACT entrypoint\n        EntryPointAccess::Public,\n        // NOTE: Public contract authorizes any contract call, because this contract has groups\n        // uref in its named keys\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(call_restricted_entry_points);\n\n    let restricted_standard_payment = EntityEntryPoint::new(\n        RESTRICTED_STANDARD_PAYMENT.to_string(),\n        vec![Parameter::new(\n            standard_payment::ARG_AMOUNT,\n            U512::cl_type(),\n        )],\n        CLType::Unit,\n        EntryPointAccess::groups(&[\"Group 1\"]),\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(restricted_standard_payment);\n\n    entry_points\n}\n\nfn install_version_1(contract_package_hash: ContractPackageHash, restricted_uref: URef) {\n    let contract_named_keys = {\n        let contract_variable = storage::new_uref(0);\n\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"contract_named_key\".to_string(), contract_variable.into());\n        named_keys.insert(\"restricted_uref\".to_string(), restricted_uref.into());\n        named_keys\n    };\n\n    let entry_points = create_entry_points_1();\n    storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        contract_named_keys,\n        BTreeMap::new(),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Session contract\n    let (contract_package_hash, access_uref) = storage::create_contract_package_at_hash();\n\n    runtime::put_key(PACKAGE_HASH_KEY, contract_package_hash.into());\n    runtime::put_key(PACKAGE_ACCESS_KEY, access_uref.into());\n\n    let restricted_uref = create_group(contract_package_hash);\n\n    install_version_1(contract_package_hash, restricted_uref);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/host-function-costs/Cargo.toml",
    "content": "[package]\nname = \"host-function-costs\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"host_function_costs\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/host-function-costs/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse core::iter;\n\nuse alloc::{boxed::Box, collections::BTreeMap, string::String, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::{AccountHash, ActionType, Weight},\n    bytesrepr::Bytes,\n    contracts::NamedKeys,\n    runtime_args, ApiError, BlockTime, CLType, CLTyped, CLValue, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, Phase,\n    RuntimeArgs, U512,\n};\n\nconst DO_NOTHING_NAME: &str = \"do_nothing\";\nconst DO_SOMETHING_NAME: &str = \"do_something\";\nconst DO_HOST_FUNCTION_CALLS_NAME: &str = \"do_host_function_calls\";\nconst HASH_KEY_NAME: &str = \"contract_package\";\nconst CONTRACT_KEY_NAME: &str = \"contract\";\nconst CALLS_DO_NOTHING_LEVEL1_NAME: &str = \"calls_do_nothing_level1\";\nconst CALLS_DO_NOTHING_LEVEL2_NAME: &str = \"calls_do_nothing_level2\";\nconst TRANSFER_AMOUNT: u64 = 1_000_000;\nconst ARG_SOURCE_ACCOUNT: &str = \"source_account\";\nconst ARG_KEY_NAME: &str = \"seed\";\nconst ARG_BYTES: &str = \"bytes\";\nconst NAMED_KEY_COUNT: usize = 10;\nconst VALUE_FOR_ADDITION_1: u64 = 1;\nconst VALUE_FOR_ADDITION_2: u64 = 2;\nconst SHORT_FUNCTION_NAME_1: &str = \"s\";\nconst SHORT_FUNCTION_NAME_100: &str = \"sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss\";\nconst LONG_FUNCTION_NAME_1: &str = \"l\";\nconst LONG_FUNCTION_NAME_100: &str = \"llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll\";\nconst ARG_SIZE_FUNCTION_NAME: &str = \"arg_size_function\";\nconst ARG_SIZE_FUNCTION_CALL_1_NAME: &str = \"arg_size_function_call_1\";\nconst ARG_SIZE_FUNCTION_CALL_100_NAME: &str = \"arg_size_function_call_100\";\n\n// A destination account hash that does not necessarily must exists\nconst DESTINATION_ACCOUNT_HASH: AccountHash = AccountHash::new([0x0A; 32]);\n\n#[repr(u16)]\nenum Error {\n    GetCaller = 0,\n    GetBlockTime = 1,\n    GetPhase = 2,\n    HasKey = 3,\n    GetKey = 4,\n    NamedKeys = 5,\n    ReadOrRevert = 6,\n    IsValidURef = 7,\n    Transfer = 8,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> ApiError {\n        ApiError::User(error as u16)\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn do_nothing() {}\n\n#[no_mangle]\npub extern \"C\" fn do_something() {\n    let _result = uses_opcodes();\n}\n\nfn uses_opcodes() -> Box<u64> {\n    let long_bytes = DO_NOTHING_NAME\n        .chars()\n        .chain(DO_SOMETHING_NAME.chars())\n        .chain(DO_HOST_FUNCTION_CALLS_NAME.chars())\n        .chain(HASH_KEY_NAME.chars())\n        .chain(CONTRACT_KEY_NAME.chars());\n\n    // Exercises various opcodes. Should cost more than \"do_nothing\".\n    let mut amount = Box::new(1);\n    for (i, c) in long_bytes.enumerate() {\n        *amount += c as u64;\n        *amount *= c as u64;\n        *amount ^= i as u64;\n        *amount |= i as u64;\n        *amount &= i as u64;\n    }\n\n    amount\n}\n\n#[no_mangle]\npub extern \"C\" fn small_function() {\n    if runtime::get_phase() != Phase::Session {\n        runtime::revert(Error::GetPhase);\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn s() {\n    small_function();\n}\n\n#[no_mangle]\npub extern \"C\" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss(\n) {\n    small_function();\n}\n\n#[no_mangle]\npub extern \"C\" fn l() {\n    account_function()\n}\n\n#[no_mangle]\npub extern \"C\" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll(\n) {\n    account_function()\n}\n\n#[no_mangle]\npub extern \"C\" fn arg_size_function() {\n    let _bytes: Bytes = runtime::get_named_arg(ARG_BYTES);\n}\n\n// Executes the named key functions from the `runtime` module and most of the functions from the\n// `storage` module.\n#[no_mangle]\npub extern \"C\" fn storage_function() {\n    let key_name: String = runtime::get_named_arg(ARG_KEY_NAME);\n    let random_bytes: Bytes = runtime::get_named_arg(ARG_BYTES);\n\n    let uref = storage::new_uref(random_bytes.clone());\n\n    runtime::put_key(&key_name, Key::from(uref));\n\n    if !runtime::has_key(&key_name) {\n        runtime::revert(Error::HasKey);\n    }\n\n    if runtime::get_key(&key_name) != Some(Key::from(uref)) {\n        runtime::revert(Error::GetKey);\n    }\n\n    runtime::remove_key(&key_name);\n\n    let named_keys = runtime::list_named_keys();\n    if named_keys.len() != NAMED_KEY_COUNT - 1 {\n        runtime::revert(Error::NamedKeys)\n    }\n\n    storage::write(uref, random_bytes.clone());\n    let retrieved_value: Bytes = storage::read_or_revert(uref);\n    if retrieved_value != random_bytes {\n        runtime::revert(Error::ReadOrRevert);\n    }\n\n    storage::write(uref, VALUE_FOR_ADDITION_1);\n    storage::add(uref, VALUE_FOR_ADDITION_2);\n\n    let keys_to_return = runtime::list_named_keys();\n    runtime::ret(CLValue::from_t(keys_to_return).unwrap_or_revert());\n}\n\n#[no_mangle]\npub extern \"C\" fn account_function() {\n    let source_account: AccountHash = runtime::get_named_arg(ARG_SOURCE_ACCOUNT);\n\n    // ========== functions from `account` module ==================================================\n\n    let main_purse = account::get_main_purse();\n    account::set_action_threshold(ActionType::Deployment, Weight::new(1)).unwrap_or_revert();\n    account::add_associated_key(DESTINATION_ACCOUNT_HASH, Weight::new(1)).unwrap_or_revert();\n    account::update_associated_key(DESTINATION_ACCOUNT_HASH, Weight::new(1)).unwrap_or_revert();\n    account::remove_associated_key(DESTINATION_ACCOUNT_HASH).unwrap_or_revert();\n\n    // ========== functions from `system` module ===================================================\n\n    let _ = system::get_mint();\n\n    let new_purse = system::create_purse();\n\n    let transfer_amount = U512::from(TRANSFER_AMOUNT);\n    system::transfer_from_purse_to_purse(main_purse, new_purse, transfer_amount, None)\n        .unwrap_or_revert();\n\n    let balance = system::get_purse_balance(new_purse).unwrap_or_revert();\n    if balance != transfer_amount {\n        runtime::revert(Error::Transfer);\n    }\n\n    system::transfer_from_purse_to_account(\n        new_purse,\n        DESTINATION_ACCOUNT_HASH,\n        transfer_amount,\n        None,\n    )\n    .unwrap_or_revert();\n\n    // ========== remaining functions from `runtime` module ========================================\n\n    if !runtime::is_valid_uref(main_purse) {\n        runtime::revert(Error::IsValidURef);\n    }\n\n    if runtime::get_blocktime() != BlockTime::new(0) {\n        runtime::revert(Error::GetBlockTime);\n    }\n\n    if runtime::get_caller() != source_account {\n        runtime::revert(Error::GetCaller);\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn calls_do_nothing_level1() {\n    let contract_package_hash = runtime::get_key(HASH_KEY_NAME)\n        .and_then(Key::into_package_addr)\n        .expect(\"should have key\")\n        .into();\n    runtime::call_versioned_contract(\n        contract_package_hash,\n        None,\n        DO_NOTHING_NAME,\n        RuntimeArgs::default(),\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn calls_do_nothing_level2() {\n    let contract_package_hash = runtime::get_key(HASH_KEY_NAME)\n        .and_then(Key::into_package_addr)\n        .expect(\"should have key\")\n        .into();\n    runtime::call_versioned_contract(\n        contract_package_hash,\n        None,\n        CALLS_DO_NOTHING_LEVEL1_NAME,\n        RuntimeArgs::default(),\n    )\n}\n\nfn measure_arg_size(bytes: usize) {\n    let contract_package_hash = runtime::get_key(HASH_KEY_NAME)\n        .and_then(Key::into_package_addr)\n        .expect(\"should have key\")\n        .into();\n\n    let argument: Vec<u8> = iter::repeat_n(b'1', bytes).collect();\n\n    runtime::call_versioned_contract::<()>(\n        contract_package_hash,\n        None,\n        ARG_SIZE_FUNCTION_NAME,\n        runtime_args! {\n            ARG_BYTES => argument,\n        },\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn arg_size_function_call_1() {\n    measure_arg_size(0);\n}\n\n#[no_mangle]\npub extern \"C\" fn arg_size_function_call_100() {\n    measure_arg_size(100);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            DO_NOTHING_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            DO_SOMETHING_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            CALLS_DO_NOTHING_LEVEL1_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point = EntityEntryPoint::new(\n            CALLS_DO_NOTHING_LEVEL2_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            SHORT_FUNCTION_NAME_1,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point = EntityEntryPoint::new(\n            SHORT_FUNCTION_NAME_100,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            LONG_FUNCTION_NAME_1,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point = EntityEntryPoint::new(\n            LONG_FUNCTION_NAME_100,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            ARG_SIZE_FUNCTION_NAME,\n            vec![Parameter::new(ARG_BYTES, <Vec<u8>>::cl_type())],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            \"account_function\",\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            \"storage_function\",\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            ARG_SIZE_FUNCTION_CALL_1_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            ARG_SIZE_FUNCTION_CALL_100_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n\n    let (contract_package_hash, _access_uref) = storage::create_contract_package_at_hash();\n\n    runtime::put_key(HASH_KEY_NAME, contract_package_hash.into());\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(HASH_KEY_NAME.into(), contract_package_hash.into());\n\n    let (contract_hash, _version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n    runtime::put_key(CONTRACT_KEY_NAME, Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/key-management-thresholds/Cargo.toml",
    "content": "[package]\nname = \"key-management-thresholds\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"key_management_thresholds\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/key-management-thresholds/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::{\n        AccountHash, ActionType, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure,\n        UpdateKeyFailure, Weight,\n    },\n    ApiError,\n};\n\nconst ARG_STAGE: &str = \"stage\";\n#[no_mangle]\npub extern \"C\" fn call() {\n    let stage: String = runtime::get_named_arg(ARG_STAGE);\n\n    if stage == \"init\" {\n        // executed with weight >= 1\n        account::add_associated_key(AccountHash::new([42; 32]), Weight::new(100))\n            .unwrap_or_revert();\n        // this key will be used to test permission denied when removing keys with low\n        // total weight\n        account::add_associated_key(AccountHash::new([43; 32]), Weight::new(1)).unwrap_or_revert();\n        account::add_associated_key(AccountHash::new([1; 32]), Weight::new(1)).unwrap_or_revert();\n        account::set_action_threshold(ActionType::KeyManagement, Weight::new(101))\n            .unwrap_or_revert();\n    } else if stage == \"test-permission-denied\" {\n        // Has to be executed with keys of total weight < 255\n        match account::add_associated_key(AccountHash::new([44; 32]), Weight::new(1)) {\n            Ok(_) => runtime::revert(ApiError::User(200)),\n            Err(AddKeyFailure::PermissionDenied) => {}\n            Err(_) => runtime::revert(ApiError::User(201)),\n        }\n\n        match account::update_associated_key(AccountHash::new([43; 32]), Weight::new(2)) {\n            Ok(_) => runtime::revert(ApiError::User(300)),\n            Err(UpdateKeyFailure::PermissionDenied) => {}\n            Err(_) => runtime::revert(ApiError::User(301)),\n        }\n        match account::remove_associated_key(AccountHash::new([43; 32])) {\n            Ok(_) => runtime::revert(ApiError::User(400)),\n            Err(RemoveKeyFailure::PermissionDenied) => {}\n            Err(_) => runtime::revert(ApiError::User(401)),\n        }\n\n        match account::set_action_threshold(ActionType::KeyManagement, Weight::new(255)) {\n            Ok(_) => runtime::revert(ApiError::User(500)),\n            Err(SetThresholdFailure::PermissionDeniedError) => {}\n            Err(_) => runtime::revert(ApiError::User(501)),\n        }\n    } else if stage == \"test-key-mgmnt-succeed\" {\n        // Has to be executed with keys of total weight >= 254\n        account::add_associated_key(AccountHash::new([44; 32]), Weight::new(1)).unwrap_or_revert();\n        // Updates [43;32] key weight created in init stage\n        account::update_associated_key(AccountHash::new([44; 32]), Weight::new(2))\n            .unwrap_or_revert();\n        // Removes [43;32] key created in init stage\n        account::remove_associated_key(AccountHash::new([44; 32])).unwrap_or_revert();\n        // Sets action threshold\n        account::set_action_threshold(ActionType::KeyManagement, Weight::new(100))\n            .unwrap_or_revert();\n    } else {\n        runtime::revert(ApiError::User(1))\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/key-putter/Cargo.toml",
    "content": "[package]\nname = \"key-putter\"\nversion = \"0.1.0\"\nauthors = [\"CasperLabs <https://discord.com/invite/Q38s3Vh>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"key_putter\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/key-putter/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[cfg(not(target_arch = \"wasm32\"))]\ncompile_error!(\"target arch should be wasm32: compile with '--target wasm32-unknown-unknown'\");\n\n// This code imports necessary aspects of external crates that we will use in our contract code.\nextern crate alloc;\n// Importing Rust types.\nuse alloc::{\n    collections::btree_map::BTreeMap,\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\n// Importing aspects of the Casper platform.\nuse casper_contract::contract_api::{runtime, storage};\n// Importing specific Casper types.\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint as EntryPoint, EntryPoints},\n    bytesrepr::FromBytes,\n    contracts::NamedKeys,\n    ApiError, CLType, CLTyped, EntryPointAccess, EntryPointPayment, EntryPointType, Key, URef,\n};\n/// Constants for the keys pointing to values stored in the account's named keys.\nconst CONTRACT_PACKAGE_NAME: &str = \"package_name\";\nconst CONTRACT_ACCESS_UREF: &str = \"access_uref\";\n\n/// Creating constants for the various contract entry points.\nconst ENTRY_POINT_PUT_KEY: &str = \"put_key\";\n\n/// Constants for the keys pointing to values stored in the contract's named keys.\nconst CONTRACT_VERSION_KEY: &str = \"version\";\nconst ALL_CONTRACTS_COUNTER: &str = \"all_contracts_counter\";\nconst CONTRACT_KEY: &str = \"key_putter\";\n\nconst KEY_PLACEHOLDER: &str = \"key_placeholder\";\n\n#[no_mangle]\nfn put_key() {\n    let named_keys = runtime::list_named_keys();\n    let mut number_of_matches = 0;\n    for key in named_keys.names() {\n        if key.to_string().starts_with(\"v_\") {\n            number_of_matches += 1;\n        }\n    }\n    let key = if number_of_matches <= 0 {\n        \"Contract not installed?\".to_string()\n    } else {\n        format!(\"v_{number_of_matches}\")\n    };\n    let value_to_store = match get_stored_value::<String>(&key) {\n        Some(value_to_store) => value_to_store,\n        None => format!(\"Nothing found under key {key}\"),\n    };\n    let value = storage::new_uref(value_to_store);\n    runtime::put_key(KEY_PLACEHOLDER, value.into());\n}\n\npub fn install(contract_version: u32) {\n    let mut named_keys = NamedKeys::new();\n    let key = format!(\"v_{contract_version}\");\n    let value = format!(\"key_putter_v{contract_version}\");\n    named_keys.insert(key, storage::new_uref(value).into());\n    // Create the entry points for this contract.\n    let mut entry_points = EntryPoints::new();\n\n    entry_points.add_entry_point(EntryPoint::new(\n        ENTRY_POINT_PUT_KEY,\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    // Create a new contract package that can be upgraded.\n    let (stored_contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(CONTRACT_PACKAGE_NAME.to_string()),\n        Some(CONTRACT_ACCESS_UREF.to_string()),\n        None,\n    );\n\n    // Store the contract version in the context's named keys.\n    let version_uref = storage::new_uref(contract_version);\n    runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into());\n\n    // Create a named key for the contract hash.\n    runtime::put_key(CONTRACT_KEY, stored_contract_hash.into());\n\n    let all_contracts_counter_uref = storage::new_uref(contract_version);\n    runtime::put_key(ALL_CONTRACTS_COUNTER, all_contracts_counter_uref.into());\n}\n\npub fn upgrade(contract_version: u32) {\n    let package_key = runtime::get_key(CONTRACT_PACKAGE_NAME).unwrap();\n    let mut named_keys = NamedKeys::new();\n    let key = format!(\"v_{contract_version}\");\n    let value = format!(\"key_putter_v{contract_version}\");\n    named_keys.insert(key, storage::new_uref(value).into());\n    // Create the entry points for this contract.\n    let mut entry_points = EntryPoints::new();\n\n    entry_points.add_entry_point(EntryPoint::new(\n        ENTRY_POINT_PUT_KEY,\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    let contract_package_hash = match package_key {\n        Key::Hash(hash_addr) => hash_addr,\n        _ => panic!(\"shouldn't happen\"),\n    };\n    let (contract_hash, updated_contract_version) = storage::add_contract_version(\n        contract_package_hash.into(),\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n    let version_uref = storage::new_uref(updated_contract_version);\n    runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into());\n\n    // Create a named key for the contract hash.\n    runtime::put_key(CONTRACT_KEY, contract_hash.into());\n\n    let all_contracts_counter_uref = storage::new_uref(contract_version);\n    runtime::put_key(ALL_CONTRACTS_COUNTER, all_contracts_counter_uref.into());\n}\n\n/// Entry point that executes automatically when a caller installs the contract.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let package_key = runtime::get_key(CONTRACT_PACKAGE_NAME);\n    if package_key.is_none() {\n        //install\n        install(1);\n    } else {\n        let all_contracts_counter = get_stored_value::<u32>(ALL_CONTRACTS_COUNTER).unwrap();\n        upgrade(all_contracts_counter + 1)\n    }\n}\n\n/// Reads value from a named key.\npub fn get_stored_value<T>(name: &str) -> Option<T>\nwhere\n    T: FromBytes + CLTyped,\n{\n    let uref = get_uref(name);\n    storage::read(uref).unwrap()\n}\n\n/// Gets [`URef`] under a name.\nfn get_uref(name: &str) -> URef {\n    let key = runtime::get_key(name).ok_or(ApiError::MissingKey).unwrap();\n    key.try_into().unwrap()\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/list-authorization-keys/Cargo.toml",
    "content": "[package]\nname = \"list-authorization-keys\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"list_authorization_keys\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/list-authorization-keys/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::collections::BTreeSet;\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::{account::AccountHash, ApiError};\n\nconst ARG_EXPECTED_AUTHORIZATION_KEYS: &str = \"expected_authorization_keys\";\n\n#[repr(u16)]\nenum UserError {\n    AssertionFail = 0,\n}\n\nimpl From<UserError> for ApiError {\n    fn from(error: UserError) -> ApiError {\n        ApiError::User(error as u16)\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let expected_authorized_keys: BTreeSet<AccountHash> =\n        runtime::get_named_arg(ARG_EXPECTED_AUTHORIZATION_KEYS);\n\n    let actual_authorized_keys = runtime::list_authorization_keys();\n\n    if expected_authorized_keys != actual_authorized_keys {\n        runtime::revert(UserError::AssertionFail)\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/list-named-keys/Cargo.toml",
    "content": "[package]\nname = \"list-named-keys\"\nversion = \"0.1.0\"\nauthors = [\"Fraser Hutchison <fraser@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"list_named_keys\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/list-named-keys/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::String, vec::Vec};\n\nuse casper_contract::contract_api::runtime;\nuse casper_types::contracts::NamedKeys;\n\nconst ARG_INITIAL_NAMED_KEYS: &str = \"initial_named_args\";\nconst ARG_NEW_NAMED_KEYS: &str = \"new_named_keys\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Account starts with two known named keys: mint uref & handle payment uref.\n    let expected_initial_named_keys: NamedKeys = runtime::get_named_arg(ARG_INITIAL_NAMED_KEYS);\n\n    let actual_named_keys = runtime::list_named_keys();\n    assert_eq!(expected_initial_named_keys, actual_named_keys);\n\n    // Add further named keys and assert that each is returned in `list_named_keys()`.\n    let new_named_keys: NamedKeys = runtime::get_named_arg(ARG_NEW_NAMED_KEYS);\n    let mut expected_named_keys = expected_initial_named_keys;\n\n    for (name, key) in new_named_keys.iter() {\n        runtime::put_key(name, *key);\n        assert!(expected_named_keys.insert(name.clone(), *key).is_none());\n        let actual_named_keys = runtime::list_named_keys();\n        assert_eq!(expected_named_keys, actual_named_keys);\n    }\n\n    // Remove all named keys and check that removed keys aren't returned in `list_named_keys()`.\n    let all_key_names: Vec<String> = expected_named_keys.names().cloned().collect();\n    for key in all_key_names {\n        runtime::remove_key(&key);\n        assert!(expected_named_keys.remove(&key).is_some());\n        let actual_named_keys = runtime::list_named_keys();\n        assert_eq!(expected_named_keys, actual_named_keys);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/load-caller-info/Cargo.toml",
    "content": "[package]\nname = \"load-caller-info\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[[bin]]\nname = \"load_caller_info\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/load-caller-info/src/main.rs",
    "content": "#![no_main]\n#![no_std]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{runtime, runtime::revert, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    AddressableEntityHash, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key,\n};\n\nconst PACKAGE_NAME: &str = \"load_caller_info_package\";\nconst CONTRACT_HASH: &str = \"load_caller_info_contract_hash\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\n\n#[no_mangle]\npub extern \"C\" fn initiator() {\n    let initiator = runtime::get_call_initiator().unwrap_or_revert();\n    runtime::put_key(\"initiator\", Key::URef(storage::new_uref(initiator)))\n}\n\n#[no_mangle]\npub extern \"C\" fn get_immediate_caller() {\n    let initiator = runtime::get_immediate_caller().unwrap_or_revert();\n    runtime::put_key(\"immediate\", Key::URef(storage::new_uref(initiator)))\n}\n\n#[no_mangle]\npub extern \"C\" fn get_full_stack() {\n    let initiator = runtime::get_call_stack();\n    if initiator.is_empty() {\n        revert(ApiError::User(10))\n    }\n    runtime::put_key(\"full\", Key::URef(storage::new_uref(initiator)))\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let initiator_entry_point = EntityEntryPoint::new(\n            \"initiator\".to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let immediate_entry_point = EntityEntryPoint::new(\n            \"get_immediate_caller\".to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        let full_stack_entry_point = EntityEntryPoint::new(\n            \"get_full_stack\".to_string(),\n            vec![],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(initiator_entry_point);\n        entry_points.add_entry_point(immediate_entry_point);\n        entry_points.add_entry_point(full_stack_entry_point);\n        entry_points\n    };\n\n    let (contract_hash, _contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(PACKAGE_NAME.to_string()),\n        Some(PACKAGE_ACCESS_KEY.to_string()),\n        None,\n    );\n\n    runtime::put_key(\n        CONTRACT_HASH,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/main-purse/Cargo.toml",
    "content": "[package]\nname = \"main-purse\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>, Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"main_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/main-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{account, runtime};\nuse casper_types::{AccessRights, ApiError, URef};\n\nconst ARG_PURSE: &str = \"purse\";\n\n#[repr(u16)]\nenum Error {\n    MainPurseShouldNotBeWriteable = 1,\n    MainPurseShouldHaveReadAddRights = 2,\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let known_main_purse: URef = runtime::get_named_arg(ARG_PURSE);\n    let main_purse: URef = account::get_main_purse();\n    if known_main_purse.is_writeable() {\n        runtime::revert(ApiError::User(Error::MainPurseShouldNotBeWriteable as u16))\n    }\n    if main_purse.with_access_rights(AccessRights::READ_ADD) != known_main_purse {\n        runtime::revert(ApiError::User(\n            Error::MainPurseShouldHaveReadAddRights as u16,\n        ));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/manage-groups/Cargo.toml",
    "content": "[package]\nname = \"manage-groups\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"manage_groups\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/manage-groups/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{\n    boxed::Box,\n    collections::{BTreeMap, BTreeSet},\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse core::{convert::TryInto, iter::FromIterator, mem::MaybeUninit};\n\nuse casper_contract::{\n    contract_api::{self, runtime, storage},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    api_error,\n    bytesrepr::{self, ToBytes},\n    contracts::{ContractPackage, ContractPackageHash, NamedKeys},\n    ApiError, CLType, EntryPointPayment, Group, Key, Parameter, URef,\n};\n\nconst PACKAGE_HASH_KEY: &str = \"package_hash_key\";\nconst PACKAGE_ACCESS_KEY: &str = \"package_access_key\";\nconst CREATE_GROUP: &str = \"create_group\";\nconst REMOVE_GROUP: &str = \"remove_group\";\nconst EXTEND_GROUP_UREFS: &str = \"extend_group_urefs\";\nconst REMOVE_GROUP_UREFS: &str = \"remove_group_urefs\";\nconst GROUP_NAME_ARG: &str = \"group_name\";\nconst UREFS_ARG: &str = \"urefs\";\nconst TOTAL_NEW_UREFS_ARG: &str = \"total_new_urefs\";\nconst TOTAL_EXISTING_UREFS_ARG: &str = \"total_existing_urefs\";\nconst UREF_INDICES_ARG: &str = \"uref_indices\";\n\n#[no_mangle]\npub extern \"C\" fn create_group() {\n    let package_hash_key =\n        runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15));\n    let contract_package_hash = package_hash_key\n        .into_hash_addr()\n        .unwrap_or_revert_with(ApiError::User(16));\n    let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG);\n    let total_urefs: u64 = runtime::get_named_arg(TOTAL_NEW_UREFS_ARG);\n    let total_existing_urefs: u64 = runtime::get_named_arg(TOTAL_EXISTING_UREFS_ARG);\n    let existing_urefs: Vec<URef> = (0..total_existing_urefs).map(storage::new_uref).collect();\n\n    storage::create_contract_user_group(\n        ContractPackageHash::new(contract_package_hash),\n        &group_name,\n        total_urefs as u8,\n        BTreeSet::from_iter(existing_urefs),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn remove_group() {\n    let package_hash_key =\n        runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15));\n    let contract_package_hash = package_hash_key\n        .into_hash_addr()\n        .unwrap_or_revert_with(ApiError::User(16));\n    let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG);\n    storage::remove_contract_user_group(\n        ContractPackageHash::new(contract_package_hash),\n        &group_name,\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn extend_group_urefs() {\n    let package_hash_key =\n        runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15));\n    let contract_package_hash = package_hash_key\n        .into_hash_addr()\n        .unwrap_or_revert_with(ApiError::User(16));\n    let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG);\n    let new_urefs_count: u64 = runtime::get_named_arg(TOTAL_NEW_UREFS_ARG);\n\n    // Provisions additional urefs inside group\n    for _ in 1..=new_urefs_count {\n        let _new_uref = storage::provision_contract_user_group_uref(\n            ContractPackageHash::new(contract_package_hash),\n            &group_name,\n        )\n        .unwrap_or_revert();\n    }\n}\n\nfn read_host_buffer_into(dest: &mut [u8]) -> Result<usize, ApiError> {\n    let mut bytes_written = MaybeUninit::uninit();\n    let ret = unsafe {\n        ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr())\n    };\n    // NOTE: When rewriting below expression as `result_from(ret).map(|_| unsafe { ... })`, and the\n    // caller ignores the return value, execution of the contract becomes unstable and ultimately\n    // leads to `Unreachable` error.\n    api_error::result_from(ret)?;\n    Ok(unsafe { bytes_written.assume_init() })\n}\n\nfn read_contract_package(\n    package_hash: ContractPackageHash,\n) -> Result<Option<ContractPackage>, ApiError> {\n    let key = Key::from(package_hash);\n    let (key_ptr, key_size, _bytes) = {\n        let bytes = key.into_bytes().unwrap_or_revert();\n        let ptr = bytes.as_ptr();\n        let size = bytes.len();\n        (ptr, size, bytes)\n    };\n\n    let value_size = {\n        let mut value_size = MaybeUninit::uninit();\n        let ret = unsafe { ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr()) };\n        match api_error::result_from(ret) {\n            Ok(_) => unsafe { value_size.assume_init() },\n            Err(ApiError::ValueNotFound) => return Ok(None),\n            Err(e) => runtime::revert(e),\n        }\n    };\n\n    let value_bytes = {\n        let mut dest: Vec<u8> = if value_size == 0 {\n            Vec::new()\n        } else {\n            let bytes_non_null_ptr = contract_api::alloc_bytes(value_size);\n            unsafe { Vec::from_raw_parts(bytes_non_null_ptr.as_ptr(), value_size, value_size) }\n        };\n        read_host_buffer_into(&mut dest)?;\n        dest\n    };\n\n    Ok(Some(bytesrepr::deserialize(value_bytes)?))\n}\n\n#[no_mangle]\npub extern \"C\" fn remove_group_urefs() {\n    let package_hash_key =\n        runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15));\n    let contract_package_hash = package_hash_key\n        .into_hash_addr()\n        .unwrap_or_revert_with(ApiError::User(16));\n    let _package_access_key: URef = runtime::get_key(PACKAGE_ACCESS_KEY)\n        .unwrap_or_revert()\n        .try_into()\n        .unwrap();\n    let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG);\n    let ordinals: Vec<u64> = runtime::get_named_arg(UREF_INDICES_ARG);\n\n    let contract_package: ContractPackage =\n        read_contract_package(ContractPackageHash::new(contract_package_hash))\n            .unwrap_or_revert()\n            .unwrap_or_revert();\n\n    let group_urefs = contract_package\n        .groups()\n        .get(&Group::new(\"Group 1\"))\n        .unwrap_or_revert();\n    let group_urefs_vec = Vec::from_iter(group_urefs);\n\n    let mut urefs_to_remove = BTreeSet::new();\n    for ordinal in ordinals {\n        urefs_to_remove.insert(\n            group_urefs_vec\n                .get(ordinal as usize)\n                .cloned()\n                .cloned()\n                .unwrap_or_revert(),\n        );\n    }\n\n    storage::remove_contract_user_group_urefs(\n        ContractPackageHash::new(contract_package_hash),\n        &group_name,\n        urefs_to_remove,\n    )\n    .unwrap_or_revert();\n}\n\n/// Restricted uref comes from creating a group and will be assigned to a smart contract\nfn create_entry_points_1() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n    let restricted_session = EntityEntryPoint::new(\n        CREATE_GROUP.to_string(),\n        vec![\n            Parameter::new(GROUP_NAME_ARG, CLType::String),\n            Parameter::new(TOTAL_EXISTING_UREFS_ARG, CLType::U64),\n            Parameter::new(TOTAL_NEW_UREFS_ARG, CLType::U64),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(restricted_session);\n\n    let remove_group = EntityEntryPoint::new(\n        REMOVE_GROUP.to_string(),\n        vec![Parameter::new(GROUP_NAME_ARG, CLType::String)],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(remove_group);\n\n    let entry_point_name = EXTEND_GROUP_UREFS.to_string();\n    let extend_group_urefs = EntityEntryPoint::new(\n        entry_point_name,\n        vec![\n            Parameter::new(GROUP_NAME_ARG, CLType::String),\n            Parameter::new(TOTAL_NEW_UREFS_ARG, CLType::U64),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(extend_group_urefs);\n\n    let entry_point_name = REMOVE_GROUP_UREFS.to_string();\n    let remove_group_urefs = EntityEntryPoint::new(\n        entry_point_name,\n        vec![\n            Parameter::new(GROUP_NAME_ARG, CLType::String),\n            Parameter::new(UREFS_ARG, CLType::List(Box::new(CLType::URef))),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(remove_group_urefs);\n    entry_points\n}\n\nfn install_version_1(package_hash: ContractPackageHash) {\n    let contract_named_keys = NamedKeys::new();\n\n    let entry_points = create_entry_points_1();\n    storage::add_contract_version(\n        package_hash,\n        entry_points,\n        contract_named_keys,\n        BTreeMap::new(),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let (package_hash, access_uref) = storage::create_contract_package_at_hash();\n\n    runtime::put_key(PACKAGE_HASH_KEY, package_hash.into());\n    runtime::put_key(PACKAGE_ACCESS_KEY, access_uref.into());\n\n    install_version_1(package_hash);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/measure-gas-subcall/Cargo.toml",
    "content": "[package]\nname = \"measure-gas-subcall\"\nversion = \"0.1.0\"\nauthors = [\"Bartłomiej Kamiński <bart@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"measure_gas_subcall\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/measure-gas-subcall/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, contracts::ContractHash, ApiError, CLType, CLValue,\n    EntityEntryPoint, EntityVersion, EntryPointAccess, EntryPointPayment, EntryPointType,\n    EntryPoints, Phase, RuntimeArgs,\n};\n\nconst ARG_TARGET: &str = \"target_contract\";\nconst NOOP_EXT: &str = \"noop_ext\";\nconst GET_PHASE_EXT: &str = \"get_phase_ext\";\n\n#[repr(u16)]\nenum CustomError {\n    UnexpectedPhaseInline = 0,\n    UnexpectedPhaseSub = 1,\n}\n\n#[no_mangle]\npub extern \"C\" fn get_phase_ext() {\n    let phase = runtime::get_phase();\n    runtime::ret(CLValue::from_t(phase).unwrap_or_revert())\n}\n\n#[no_mangle]\npub extern \"C\" fn noop_ext() {\n    runtime::ret(CLValue::from_t(()).unwrap_or_revert())\n}\n\nfn store() -> (ContractHash, EntityVersion) {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point_1 = EntityEntryPoint::new(\n            NOOP_EXT,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point_1);\n\n        let entry_point_2 = EntityEntryPoint::new(\n            GET_PHASE_EXT,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entry_point_2);\n\n        entry_points\n    };\n    storage::new_contract(entry_points, None, None, None, None)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    const NOOP_EXT: &str = \"noop_ext\";\n    const GET_PHASE_EXT: &str = \"get_phase_ext\";\n\n    let method_name: String = runtime::get_named_arg(ARG_TARGET);\n    match method_name.as_str() {\n        \"no-subcall\" => {\n            let phase = runtime::get_phase();\n            if phase != Phase::Session {\n                runtime::revert(ApiError::User(CustomError::UnexpectedPhaseInline as u16))\n            }\n        }\n        \"do-nothing\" => {\n            let (reference, _contract_version) = store();\n            runtime::call_contract(reference, NOOP_EXT, RuntimeArgs::default())\n        }\n        \"do-something\" => {\n            let (reference, _contract_version) = store();\n            let phase: Phase =\n                runtime::call_contract(reference, GET_PHASE_EXT, RuntimeArgs::default());\n            if phase != Phase::Session {\n                runtime::revert(ApiError::User(CustomError::UnexpectedPhaseSub as u16))\n            }\n        }\n        _ => {}\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/mint-purse/Cargo.toml",
    "content": "[package]\nname = \"mint-purse\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"mint_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/mint-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{runtime_args, system::mint, ApiError, URef, U512};\n\nconst METHOD_MINT: &str = \"mint\";\nconst METHOD_BALANCE: &str = \"balance\";\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_PURSE: &str = \"purse\";\n\n#[repr(u16)]\nenum Error {\n    BalanceNotFound = 0,\n    BalanceMismatch,\n}\n\nfn mint_purse(amount: U512) -> Result<URef, mint::Error> {\n    runtime::call_contract(\n        system::get_mint(),\n        METHOD_MINT,\n        runtime_args! {\n            ARG_AMOUNT => amount,\n        },\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: U512 = 12345.into();\n    let new_purse = mint_purse(amount).unwrap_or_revert();\n\n    let mint = system::get_mint();\n\n    let balance: Option<U512> = runtime::call_contract(\n        mint,\n        METHOD_BALANCE,\n        runtime_args! {\n            ARG_PURSE => new_purse,\n        },\n    );\n\n    match balance {\n        None => runtime::revert(ApiError::User(Error::BalanceNotFound as u16)),\n        Some(balance) if balance == amount => (),\n        _ => runtime::revert(ApiError::User(Error::BalanceMismatch as u16)),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/mint-transfer-proxy/Cargo.toml",
    "content": "[package]\nname = \"mint-transfer-proxy\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"mint_transfer_proxy\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/mint-transfer-proxy/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n// casper_contract is required for it's [global_alloc] as well as handlers (such as panic_handler)\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, runtime_args, system::mint, URef, U512};\n\nfn mint_transfer(\n    maybe_to: Option<AccountHash>,\n    source: URef,\n    target: URef,\n    amount: U512,\n    id: Option<u64>,\n) -> Result<(), mint::Error> {\n    let args = runtime_args! {\n        mint::ARG_TO => maybe_to,\n        mint::ARG_SOURCE => source,\n        mint::ARG_TARGET => target,\n        mint::ARG_AMOUNT => amount,\n        mint::ARG_ID => id,\n    };\n    let mint_hash = system::get_mint();\n    runtime::call_contract(mint_hash, mint::METHOD_TRANSFER, args)\n}\n\nfn delegate() {\n    let to: Option<AccountHash> = runtime::get_named_arg(\"to\");\n    let amount: U512 = runtime::get_named_arg(\"amount\");\n    let main_purse = account::get_main_purse();\n    let target_purse = main_purse;\n    let id: Option<u64> = None;\n    mint_transfer(to, main_purse, target_purse, amount, id).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/multisig-authorization/Cargo.toml",
    "content": "[package]\nname = \"multisig-authorization\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"multisig_authorization\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/multisig-authorization/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeSet, string::ToString};\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    account::AccountHash, addressable_entity::Parameters, AddressableEntityHash, ApiError, CLType,\n    EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n};\n\nconst ROLE_A_KEYS: [AccountHash; 3] = [\n    AccountHash::new([1; 32]),\n    AccountHash::new([2; 32]),\n    AccountHash::new([3; 32]),\n];\nconst ROLE_B_KEYS: [AccountHash; 3] = [\n    AccountHash::new([4; 32]),\n    AccountHash::new([5; 32]),\n    AccountHash::new([6; 32]),\n];\n\nconst ACCESS_KEY: &str = \"access_key\";\nconst CONTRACT_KEY: &str = \"contract\";\nconst ENTRYPOINT_A: &str = \"entrypoint_a\";\nconst ENTRYPOINT_B: &str = \"entrypoint_b\";\nconst CONTRACT_PACKAGE_KEY: &str = \"contract_package\";\n\n#[repr(u16)]\nenum UserError {\n    /// Deploy was signed using a key that does not belong to a role.\n    PermissionDenied = 0,\n}\n\nimpl From<UserError> for ApiError {\n    fn from(user_error: UserError) -> Self {\n        ApiError::User(user_error as u16)\n    }\n}\n\n/// Checks if at least one of provided authorization keys belongs to a role defined as a slice of\n/// `AccountHash`es.\nfn has_role_access_to(role_keys: &[AccountHash]) -> bool {\n    let authorization_keys = runtime::list_authorization_keys();\n    let role_b_keys: BTreeSet<AccountHash> = role_keys.iter().copied().collect();\n    authorization_keys.intersection(&role_b_keys).count() > 0\n}\n\n#[no_mangle]\npub extern \"C\" fn entrypoint_a() {\n    if !has_role_access_to(&ROLE_A_KEYS) {\n        // None of the authorization keys used to sign this deploy matched ROLE_A\n        runtime::revert(UserError::PermissionDenied)\n    }\n\n    // Restricted code\n}\n\n#[no_mangle]\npub extern \"C\" fn entrypoint_b() {\n    if !has_role_access_to(&ROLE_B_KEYS) {\n        // None of the authorization keys used to sign this deploy matched ROLE_B\n        runtime::revert(UserError::PermissionDenied)\n    }\n\n    // Restricted code\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entrypoint_a = EntityEntryPoint::new(\n            ENTRYPOINT_A,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        let entrypoint_b = EntityEntryPoint::new(\n            ENTRYPOINT_B,\n            Parameters::default(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        entry_points.add_entry_point(entrypoint_a);\n        entry_points.add_entry_point(entrypoint_b);\n\n        entry_points\n    };\n\n    let (contract_hash, _version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(CONTRACT_PACKAGE_KEY.to_string()),\n        Some(ACCESS_KEY.to_string()),\n        None,\n    );\n\n    runtime::put_key(\n        CONTRACT_KEY,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-dictionary-test/Cargo.toml",
    "content": "[package]\nname = \"named-dictionary-test\"\nversion = \"0.1.0\"\nauthors = [\"Luís Fernando Schultz Xavier da Silveira <luis@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"named-dictionary-test\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-dictionary-test/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\nuse alloc::{\n    collections::BTreeMap,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\ntype DictIndex = u8; // Must fit into `usize`.\ntype KeySeed = u8;\ntype Value = u8;\nconst DICTIONARY_NAMES: &[&str] = &[\n    \"the\", \"quick\", \"brown\", \"fox\", \"jumps\", \"over\", \"the_\", \"lazy\", \"dog\",\n];\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let puts: Vec<(DictIndex, KeySeed, Value)> = runtime::get_named_arg(\"puts\");\n\n    for name in DICTIONARY_NAMES {\n        let _ = storage::new_dictionary(name).unwrap_or_revert();\n    }\n\n    let mut maps: Vec<BTreeMap<String, Value>> = (0..DICTIONARY_NAMES.len())\n        .map(|_| BTreeMap::new())\n        .collect();\n    for (dict_index, key_seed, value) in puts {\n        let dict_index = dict_index as usize;\n        assert!(dict_index < DICTIONARY_NAMES.len());\n        let key = key_seed.to_string();\n        assert_eq!(\n            maps[dict_index].get(&key),\n            storage::named_dictionary_get(DICTIONARY_NAMES[dict_index], &key)\n                .unwrap_or_revert()\n                .as_ref()\n        );\n        storage::named_dictionary_put(DICTIONARY_NAMES[dict_index], &key, value);\n        maps[dict_index].insert(key, value);\n    }\n\n    for i in 0..DICTIONARY_NAMES.len() {\n        for (key, &value) in maps[i].iter() {\n            assert_eq!(\n                storage::named_dictionary_get(DICTIONARY_NAMES[i], key).unwrap_or_revert(),\n                Some(value)\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-keys/Cargo.toml",
    "content": "[package]\nname = \"named-keys\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"named_keys\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-keys/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::{String, ToString};\nuse core::convert::TryInto;\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{bytesrepr::ToBytes, ApiError, CLTyped, Key, U512};\n\nfn create_uref<T: CLTyped + ToBytes>(key_name: &str, value: T) {\n    let key: Key = storage::new_uref(value).into();\n    runtime::put_key(key_name, key);\n}\n\nconst COMMAND_CREATE_UREF1: &str = \"create-uref1\";\nconst COMMAND_CREATE_UREF2: &str = \"create-uref2\";\nconst COMMAND_REMOVE_UREF1: &str = \"remove-uref1\";\nconst COMMAND_REMOVE_UREF2: &str = \"remove-uref2\";\nconst COMMAND_TEST_READ_UREF1: &str = \"test-read-uref1\";\nconst COMMAND_TEST_READ_UREF2: &str = \"test-read-uref2\";\nconst COMMAND_INCREASE_UREF2: &str = \"increase-uref2\";\nconst COMMAND_OVERWRITE_UREF2: &str = \"overwrite-uref2\";\nconst ARG_COMMAND: &str = \"command\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let command: String = runtime::get_named_arg(ARG_COMMAND);\n\n    match command.as_str() {\n        COMMAND_CREATE_UREF1 => create_uref(\"hello-world\", String::from(\"Hello, world!\")),\n        COMMAND_CREATE_UREF2 => create_uref(\"big-value\", U512::max_value()),\n        COMMAND_REMOVE_UREF1 => runtime::remove_key(\"hello-world\"),\n        COMMAND_REMOVE_UREF2 => runtime::remove_key(\"big-value\"),\n        COMMAND_TEST_READ_UREF1 => {\n            // Read data hidden behind `URef1` uref\n            let hello_world: String = storage::read(\n                (*runtime::list_named_keys()\n                    .get(\"hello-world\")\n                    .expect(\"Unable to get hello-world\"))\n                .try_into()\n                .expect(\"Unable to convert to uref\"),\n            )\n            .expect(\"Unable to deserialize URef\")\n            .expect(\"Unable to find value\");\n            assert_eq!(hello_world, \"Hello, world!\");\n\n            // Read data through dedicated FFI function\n            let uref1 = runtime::get_key(\"hello-world\").unwrap_or_revert();\n\n            let uref = uref1.try_into().unwrap_or_revert_with(ApiError::User(101));\n            let hello_world = storage::read(uref);\n            assert_eq!(hello_world, Ok(Some(\"Hello, world!\".to_string())));\n        }\n        COMMAND_TEST_READ_UREF2 => {\n            // Get the big value back\n            let big_value_key =\n                runtime::get_key(\"big-value\").unwrap_or_revert_with(ApiError::User(102));\n            let big_value_ref = big_value_key.try_into().unwrap_or_revert();\n            let big_value = storage::read(big_value_ref);\n            assert_eq!(big_value, Ok(Some(U512::max_value())));\n        }\n        COMMAND_INCREASE_UREF2 => {\n            // Get the big value back\n            let big_value_key =\n                runtime::get_key(\"big-value\").unwrap_or_revert_with(ApiError::User(102));\n            let big_value_ref = big_value_key.try_into().unwrap_or_revert();\n            // Increase by 1\n            storage::add(big_value_ref, U512::one());\n            let new_big_value = storage::read(big_value_ref);\n            assert_eq!(new_big_value, Ok(Some(U512::zero())));\n        }\n        COMMAND_OVERWRITE_UREF2 => {\n            // Get the big value back\n            let big_value_key =\n                runtime::get_key(\"big-value\").unwrap_or_revert_with(ApiError::User(102));\n            let big_value_ref = big_value_key.try_into().unwrap_or_revert();\n            // I can overwrite some data under the pointer\n            storage::write(big_value_ref, U512::from(123_456_789u64));\n            let new_value = storage::read(big_value_ref);\n            assert_eq!(new_value, Ok(Some(U512::from(123_456_789u64))));\n        }\n        _ => runtime::revert(ApiError::InvalidArgument),\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-keys-stored/Cargo.toml",
    "content": "[package]\nname = \"named-keys-stored\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"named_keys_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-keys-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::ToString};\n\nuse casper_contract::{\n    self,\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, ApiError, CLType, EntityEntryPoint, EntryPointAccess,\n    EntryPointPayment, EntryPointType, EntryPoints, Key, NamedKeys, PackageHash, RuntimeArgs,\n};\n\nconst ENTRY_POINT_CONTRACT: &str = \"named_keys_contract\";\nconst ENTRY_POINT_CONTRACT_TO_CONTRACT: &str = \"named_keys_contract_to_contract\";\nconst ENTRY_POINT_SESSION_TO_SESSION: &str = \"named_keys_session_to_session\";\nconst ENTRY_POINT_SESSION: &str = \"named_keys_session\";\nconst CONTRACT_PACKAGE_HASH_NAME: &str = \"contract_package_stored\";\nconst CONTRACT_HASH_NAME: &str = \"contract_stored\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[repr(u16)]\nenum Error {\n    HasWrongNamedKeys,\n    FoundNamedKey1,\n    FoundNamedKey2,\n    FoundNamedKey3,\n    FoundNamedKey4,\n    UnexpectedContractValidURef,\n    UnexpectedAccountValidURef,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn named_keys_contract() {\n    if runtime::get_key(\"account_named_key_1\").is_some()\n        || runtime::get_key(\"account_named_key_2\").is_some()\n        || runtime::get_key(\"account_named_key_3\").is_some()\n        || runtime::get_key(\"account_named_key_4\").is_some()\n    {\n        runtime::revert(Error::HasWrongNamedKeys);\n    }\n\n    if runtime::get_key(\"named_key_1\").is_none() {\n        runtime::revert(Error::FoundNamedKey1);\n    }\n    if runtime::get_key(\"named_key_2\").is_none() {\n        runtime::revert(Error::FoundNamedKey2);\n    }\n    if runtime::get_key(\"named_key_3\").is_none() {\n        runtime::revert(Error::FoundNamedKey3);\n    }\n    let uref_key = runtime::get_key(\"named_key_4\").unwrap_or_revert_with(Error::FoundNamedKey4);\n    let uref = uref_key.into_uref().unwrap();\n    if !runtime::is_valid_uref(uref) {\n        runtime::revert(Error::UnexpectedContractValidURef);\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn named_keys_session() {\n    if runtime::get_key(\"named_key_1\").is_some()\n        || runtime::get_key(\"named_key_2\").is_some()\n        || runtime::get_key(\"named_key_3\").is_some()\n        || runtime::get_key(\"named_key_4\").is_some()\n    {\n        runtime::revert(Error::HasWrongNamedKeys);\n    }\n\n    if runtime::get_key(\"account_named_key_1\").is_none() {\n        runtime::revert(Error::FoundNamedKey1);\n    }\n    if runtime::get_key(\"account_named_key_2\").is_none() {\n        runtime::revert(Error::FoundNamedKey2);\n    }\n    if runtime::get_key(\"account_named_key_3\").is_none() {\n        runtime::revert(Error::FoundNamedKey3);\n    }\n    if runtime::get_key(\"account_named_key_4\").is_none() {\n        runtime::revert(Error::FoundNamedKey4);\n    }\n    let uref_key = runtime::get_key(\"account_named_key_4\")\n        .unwrap_or_revert_with(Error::UnexpectedContractValidURef);\n    let uref = uref_key.into_uref().unwrap();\n    if !runtime::is_valid_uref(uref) {\n        runtime::revert(Error::UnexpectedAccountValidURef);\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn named_keys_contract_to_contract() {\n    let package_hash = runtime::get_key(CONTRACT_PACKAGE_HASH_NAME)\n        .and_then(Key::into_package_addr)\n        .map(PackageHash::new)\n        .unwrap_or_revert();\n\n    runtime::call_versioned_contract::<()>(\n        package_hash.into(),\n        None,\n        ENTRY_POINT_CONTRACT,\n        RuntimeArgs::default(),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn named_keys_session_to_session() {\n    let package_hash = runtime::get_key(CONTRACT_PACKAGE_HASH_NAME)\n        .and_then(Key::into_package_addr)\n        .map(PackageHash::new)\n        .unwrap_or_revert();\n\n    runtime::call_versioned_contract::<()>(\n        package_hash.into(),\n        None,\n        ENTRY_POINT_SESSION,\n        RuntimeArgs::default(),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    runtime::put_key(\"account_named_key_1\", Key::Hash([10; 32]));\n    runtime::put_key(\"account_named_key_2\", Key::Hash([11; 32]));\n    runtime::put_key(\"account_named_key_3\", Key::Hash([12; 32]));\n    let uref = storage::new_uref(());\n    runtime::put_key(\"account_named_key_4\", Key::from(uref));\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let contract_entrypoint = EntityEntryPoint::new(\n            ENTRY_POINT_CONTRACT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(contract_entrypoint);\n        let session_entrypoint = EntityEntryPoint::new(\n            ENTRY_POINT_SESSION.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(session_entrypoint);\n        let contract_to_contract_entrypoint = EntityEntryPoint::new(\n            ENTRY_POINT_CONTRACT_TO_CONTRACT.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(contract_to_contract_entrypoint);\n        let contract_to_contract_entrypoint = EntityEntryPoint::new(\n            ENTRY_POINT_SESSION_TO_SESSION.to_string(),\n            Parameters::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(contract_to_contract_entrypoint);\n        entry_points\n    };\n\n    let (contract_package_hash, _access) = storage::create_contract_package_at_hash();\n\n    let named_keys = {\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"named_key_1\".to_string(), Key::Hash([1; 32]));\n        named_keys.insert(\"named_key_2\".to_string(), Key::Hash([2; 32]));\n        named_keys.insert(\"named_key_3\".to_string(), Key::Hash([3; 32]));\n        let uref = storage::new_uref(());\n        named_keys.insert(\"named_key_4\".to_string(), Key::from(uref));\n        named_keys.insert(\n            CONTRACT_PACKAGE_HASH_NAME.to_string(),\n            Key::from(contract_package_hash),\n        );\n        named_keys\n    };\n\n    let (contract_hash, contract_version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(CONTRACT_PACKAGE_HASH_NAME, contract_package_hash.into());\n    runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-keys-stored-call/Cargo.toml",
    "content": "[package]\nname = \"named-keys-stored-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"named_keys_stored_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/named-keys-stored-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{self, contract_api::runtime, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::{Key, RuntimeArgs};\n\nconst CONTRACT_HASH_NAME: &str = \"contract_stored\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_hash = runtime::get_key(CONTRACT_HASH_NAME)\n        .and_then(Key::into_entity_hash)\n        .unwrap_or_revert();\n\n    let entry_point: String = runtime::get_named_arg(\"entry_point\");\n\n    runtime::call_contract::<()>(contract_hash.into(), &entry_point, RuntimeArgs::default());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/new-named-uref/Cargo.toml",
    "content": "[package]\nname = \"new-named-uref\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"new_named_uref\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/new-named-uref/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{runtime, storage};\n\nconst ARG_UREF_NAME: &str = \"uref_name\";\nconst INITIAL_DATA: &str = \"bawitdaba\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let uref_name: String = runtime::get_named_arg(ARG_UREF_NAME);\n    let uref = storage::new_uref(String::from(INITIAL_DATA));\n    runtime::put_key(&uref_name, uref.into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ordered-transforms/Cargo.toml",
    "content": "[package]\nname = \"ordered-transforms\"\nversion = \"0.1.0\"\nauthors = [\"Luís Fernando Schultz Xavier da Silveira <luis@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"ordered-transforms\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ordered-transforms/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{string::ToString, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    contracts::NamedKeys, AddressableEntityHash, CLType, CLTyped, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, URef,\n};\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        \"perform_operations\",\n        vec![Parameter::new(\n            \"operations\",\n            Vec::<(u8, u32, i32)>::cl_type(),\n        )],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let n: u32 = runtime::get_named_arg(\"n\");\n    let mut named_keys = NamedKeys::new();\n    for i in 0..n {\n        named_keys.insert(format!(\"uref-{}\", i), Key::URef(storage::new_uref(0_i32)));\n    }\n    named_keys.insert(\"n-urefs\".to_string(), Key::URef(storage::new_uref(n)));\n\n    let (contract_hash, _contract_version) =\n        storage::new_locked_contract(entry_points, Some(named_keys), None, None, None);\n    runtime::put_key(\n        \"ordered-transforms-contract-hash\",\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn perform_operations() {\n    // List of operations to be performed by the contract.\n    // An operation is a tuple (t, i, v) where:\n    // * `t` is the operation type: 0 for reading, 1 for writing and 2 for adding;\n    // * `i` is the URef index;\n    // * `v` is the value to write or add (always zero for reads).\n    let operations: Vec<(u8, u32, i32)> = runtime::get_named_arg(\"operations\");\n    let n: u32 = storage::read(match runtime::get_key(\"n-urefs\").unwrap_or_revert() {\n        Key::URef(uref) => uref,\n        _ => panic!(\"Bad number of URefs.\"),\n    })\n    .unwrap_or_revert()\n    .unwrap_or_revert();\n    let urefs: Vec<URef> = (0..n)\n        .map(\n            |i| match runtime::get_key(&format!(\"uref-{}\", i)).unwrap_or_revert() {\n                Key::URef(uref) => uref,\n                _ => panic!(\"Bad URef.\"),\n            },\n        )\n        .collect();\n\n    for (t, i, v) in operations {\n        let uref = *urefs.get(i as usize).unwrap_or_revert();\n        match t {\n            0 => {\n                let _: Option<i32> = storage::read(uref).unwrap_or_revert();\n            }\n            1 => storage::write(uref, v),\n            2 => storage::add(uref, v),\n            _ => panic!(\"Bad transform type\"),\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/overwrite-uref-content/Cargo.toml",
    "content": "[package]\nname = \"overwrite-uref-content\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@papierski.net>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"overwrite_uref_content\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/overwrite-uref-content/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{AccessRights, ApiError, URef};\n\nconst ARG_CONTRACT_UREF: &str = \"contract_uref\";\n\n#[repr(u16)]\nenum Error {\n    InvalidURefArg,\n}\n\nconst REPLACEMENT_DATA: &str = \"bawitdaba\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let uref: URef = runtime::get_named_arg(ARG_CONTRACT_UREF);\n\n    let is_valid = runtime::is_valid_uref(uref);\n    if !is_valid {\n        runtime::revert(ApiError::User(Error::InvalidURefArg as u16))\n    }\n\n    let forged_reference: URef = URef::new(uref.addr(), AccessRights::READ_ADD_WRITE);\n\n    storage::write(forged_reference, REPLACEMENT_DATA)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/payment-purse-persist/Cargo.toml",
    "content": "[package]\nname = \"payment-purse-persist\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@gmail.com>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"payment_purse_persist\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/payment-purse-persist/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse casper_contract::contract_api::{runtime, runtime::put_key, system};\nuse casper_types::{contracts::ContractPackageHash, runtime_args, ApiError, RuntimeArgs, URef};\n\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\nconst THIS_SHOULD_FAIL: &str = \"this_should_fail\";\n\nconst ARG_METHOD: &str = \"method\";\n\n/// This logic is intended to be used as SESSION PAYMENT LOGIC\n/// It gets the payment purse and attempts and attempts to persist it,\n/// which should fail.\n#[no_mangle]\npub extern \"C\" fn call() {\n    let method: String = runtime::get_named_arg(ARG_METHOD);\n\n    // handle payment contract\n    let handle_payment_contract_hash = system::get_handle_payment();\n\n    // get payment purse for current execution\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_contract_hash,\n        GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    if method == \"put_key\" {\n        // attempt to persist the payment purse, which should fail\n        put_key(THIS_SHOULD_FAIL, payment_purse.into());\n    } else if method == \"call_contract\" {\n        // attempt to call a contract with the payment purse, which should fail\n        let _payment_purse: URef = runtime::call_contract(\n            handle_payment_contract_hash,\n            GET_PAYMENT_PURSE,\n            runtime_args! {\n                \"payment_purse\" => payment_purse,\n            },\n        );\n\n        // should never reach here\n        runtime::revert(ApiError::User(1000));\n    } else if method == \"call_versioned_contract\" {\n        // attempt to call a versioned contract with the payment purse, which should fail\n        let _payment_purse: URef = runtime::call_versioned_contract(\n            ContractPackageHash::new(handle_payment_contract_hash.value()),\n            None, // Latest\n            GET_PAYMENT_PURSE,\n            runtime_args! {\n                \"payment_purse\" => payment_purse,\n            },\n        );\n\n        // should never reach here\n        runtime::revert(ApiError::User(1001));\n    } else {\n        runtime::revert(ApiError::User(2000));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored/Cargo.toml",
    "content": "[package]\nname = \"purse-holder-stored\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"purse_holder_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse alloc::string::String;\nuse casper_contract::{\n    self,\n    contract_api::{runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    AddressableEntityHash, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, Parameter,\n};\n\npub const METHOD_ADD: &str = \"add\";\npub const METHOD_REMOVE: &str = \"remove\";\npub const METHOD_VERSION: &str = \"version\";\n\nconst ENTRY_POINT_ADD: &str = \"add_named_purse\";\nconst ENTRY_POINT_VERSION: &str = \"version\";\nconst HASH_KEY_NAME: &str = \"purse_holder\";\nconst ACCESS_KEY_NAME: &str = \"purse_holder_access\";\nconst ARG_PURSE: &str = \"purse_name\";\nconst ARG_IS_LOCKED: &str = \"is_locked\";\nconst VERSION: &str = \"1.0.0\";\nconst PURSE_HOLDER_STORED_CONTRACT_NAME: &str = \"purse_holder_stored\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[no_mangle]\npub extern \"C\" fn add_named_purse() {\n    let purse_name: String = runtime::get_named_arg(ARG_PURSE);\n    let purse = system::create_purse();\n    runtime::put_key(&purse_name, purse.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn version() {\n    let ret = CLValue::from_t(VERSION).unwrap_or_revert();\n    runtime::ret(ret);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let is_locked: bool = runtime::get_named_arg(ARG_IS_LOCKED);\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let add = EntityEntryPoint::new(\n            ENTRY_POINT_ADD.to_string(),\n            vec![Parameter::new(ARG_PURSE, CLType::String)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(add);\n        let version = EntityEntryPoint::new(\n            ENTRY_POINT_VERSION.to_string(),\n            vec![],\n            CLType::String,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(version);\n        entry_points\n    };\n\n    let (contract_hash, contract_version) = if !is_locked {\n        storage::new_contract(\n            entry_points,\n            None,\n            Some(HASH_KEY_NAME.to_string()),\n            Some(ACCESS_KEY_NAME.to_string()),\n            None,\n        )\n    } else {\n        storage::new_locked_contract(\n            entry_points,\n            None,\n            Some(HASH_KEY_NAME.to_string()),\n            Some(ACCESS_KEY_NAME.to_string()),\n            None,\n        )\n    };\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        PURSE_HOLDER_STORED_CONTRACT_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n    runtime::put_key(ENTRY_POINT_VERSION, storage::new_uref(VERSION).into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored-caller/Cargo.toml",
    "content": "[package]\nname = \"purse-holder-stored-caller\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"purse_holder_stored_caller\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored-caller/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{runtime_args, AddressableEntityHash, RuntimeArgs};\n\nconst METHOD_VERSION: &str = \"version\";\nconst HASH_KEY_NAME: &str = \"purse_holder\";\nconst ENTRY_POINT_NAME: &str = \"entry_point\";\nconst PURSE_NAME: &str = \"purse_name\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_point_name: String = runtime::get_named_arg(ENTRY_POINT_NAME);\n\n    match entry_point_name.as_str() {\n        METHOD_VERSION => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(HASH_KEY_NAME);\n            let version: String = runtime::call_contract(\n                contract_hash.into(),\n                &entry_point_name,\n                RuntimeArgs::default(),\n            );\n            let version_key = storage::new_uref(version).into();\n            runtime::put_key(METHOD_VERSION, version_key);\n        }\n        _ => {\n            let contract_hash: AddressableEntityHash = runtime::get_named_arg(HASH_KEY_NAME);\n            let purse_name: String = runtime::get_named_arg(PURSE_NAME);\n\n            let args = runtime_args! {\n                PURSE_NAME => purse_name,\n            };\n            runtime::call_contract::<()>(contract_hash.into(), &entry_point_name, args);\n        }\n    };\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored-upgrader/Cargo.toml",
    "content": "[package]\nname = \"purse-holder-stored-upgrader\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"purse_holder_stored_upgrader\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored-upgrader/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::String};\n\nuse casper_contract::{\n    contract_api::{runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    contracts::NamedKeys, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, PackageHash, Parameter, URef,\n};\n\npub const METHOD_ADD: &str = \"add\";\npub const METHOD_REMOVE: &str = \"remove\";\npub const METHOD_VERSION: &str = \"version\";\npub const ARG_PURSE_NAME: &str = \"purse_name\";\npub const NEW_VERSION: &str = \"1.0.1\";\nconst VERSION: &str = \"version\";\nconst ACCESS_KEY_NAME: &str = \"purse_holder_access\";\nconst PURSE_HOLDER_STORED_CONTRACT_NAME: &str = \"purse_holder_stored\";\nconst ARG_CONTRACT_PACKAGE: &str = \"contract_package\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\nfn purse_name() -> String {\n    runtime::get_named_arg(ARG_PURSE_NAME)\n}\n\n#[no_mangle]\npub extern \"C\" fn add() {\n    let purse_name = purse_name();\n    let purse = system::create_purse();\n    runtime::put_key(&purse_name, purse.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn remove() {\n    let purse_name = purse_name();\n    runtime::remove_key(&purse_name);\n}\n\n#[no_mangle]\npub extern \"C\" fn version() {\n    runtime::ret(CLValue::from_t(VERSION).unwrap_or_revert())\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_package: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE);\n    let _access_key: URef = runtime::get_key(ACCESS_KEY_NAME)\n        .expect(\"should have access key\")\n        .into_uref()\n        .expect(\"should be uref\");\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let add = EntityEntryPoint::new(\n            METHOD_ADD,\n            vec![Parameter::new(ARG_PURSE_NAME, CLType::String)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(add);\n        let version = EntityEntryPoint::new(\n            METHOD_VERSION,\n            vec![],\n            CLType::String,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(version);\n\n        let remove = EntityEntryPoint::new(\n            METHOD_REMOVE,\n            vec![Parameter::new(ARG_PURSE_NAME, CLType::String)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(remove);\n        entry_points\n    };\n    // this should overwrite the previous contract obj with the new contract obj at the same uref\n    let (new_contract_hash, new_contract_version) = storage::add_contract_version(\n        contract_package.into(),\n        entry_points,\n        NamedKeys::new(),\n        BTreeMap::new(),\n    );\n    runtime::put_key(\n        PURSE_HOLDER_STORED_CONTRACT_NAME,\n        Key::Hash(new_contract_hash.value()),\n    );\n    runtime::put_key(\n        CONTRACT_VERSION,\n        storage::new_uref(new_contract_version).into(),\n    );\n    // set new version\n    let version_key = storage::new_uref(NEW_VERSION).into();\n    runtime::put_key(VERSION, version_key);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/Cargo.toml",
    "content": "[package]\nname = \"purse-holder-stored-upgrader-v2-2\"\nversion = \"0.1.0\"\nauthors = [\"Karan Dhareshwar <karan@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"purse_holder_stored_upgrader_v2_2\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::String};\n\nuse casper_contract::{\n    contract_api::{runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    contracts::NamedKeys, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, PackageHash, Parameter, URef,\n};\n\npub const METHOD_DELEGATE: &str = \"delegate\";\npub const METHOD_REMOVE: &str = \"remove\";\npub const METHOD_VERSION: &str = \"version\";\npub const ARG_PURSE_NAME: &str = \"purse_name\";\npub const NEW_VERSION: &str = \"1.0.1\";\nconst VERSION: &str = \"version\";\nconst ACCESS_KEY_NAME: &str = \"purse_holder_access\";\nconst PURSE_HOLDER_STORED_CONTRACT_NAME: &str = \"purse_holder_stored\";\nconst ARG_CONTRACT_PACKAGE: &str = \"contract_package\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\nfn purse_name() -> String {\n    runtime::get_named_arg(ARG_PURSE_NAME)\n}\n\n#[no_mangle]\npub extern \"C\" fn delegate() {\n    let purse_name = purse_name();\n    let purse = system::create_purse();\n    runtime::put_key(&purse_name, purse.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn remove() {\n    let purse_name = purse_name();\n    runtime::remove_key(&purse_name);\n}\n\n#[no_mangle]\npub extern \"C\" fn version() {\n    runtime::ret(CLValue::from_t(VERSION).unwrap_or_revert())\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_package: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE);\n    let _access_key: URef = runtime::get_key(ACCESS_KEY_NAME)\n        .expect(\"should have access key\")\n        .into_uref()\n        .expect(\"should be uref\");\n\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let add = EntityEntryPoint::new(\n            METHOD_DELEGATE,\n            vec![Parameter::new(ARG_PURSE_NAME, CLType::String)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(add);\n        let version = EntityEntryPoint::new(\n            METHOD_VERSION,\n            vec![],\n            CLType::String,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(version);\n\n        let remove = EntityEntryPoint::new(\n            METHOD_REMOVE,\n            vec![Parameter::new(ARG_PURSE_NAME, CLType::String)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(remove);\n        entry_points\n    };\n    // this should overwrite the previous contract obj with the new contract obj at the same uref\n    let (new_contract_hash, new_contract_version) = storage::add_contract_version(\n        contract_package.into(),\n        entry_points,\n        NamedKeys::new(),\n        BTreeMap::new(),\n    );\n    runtime::put_key(\n        PURSE_HOLDER_STORED_CONTRACT_NAME,\n        Key::Hash(new_contract_hash.value()),\n    );\n    runtime::put_key(\n        CONTRACT_VERSION,\n        storage::new_uref(new_contract_version).into(),\n    );\n    // set new version\n    let version_key = storage::new_uref(NEW_VERSION).into();\n    runtime::put_key(VERSION, version_key);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/random-bytes/Cargo.toml",
    "content": "[package]\nname = \"random-bytes\"\nversion = \"0.1.0\"\nauthors = [\"Rafał Chabowski <rafal@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"random_bytes\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/random-bytes/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::Phase;\n\nconst RANDOM_BYTES_RESULT: &str = \"random_bytes_result\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let get_phase = runtime::get_phase();\n    assert_ne!(\n        Phase::Payment,\n        get_phase,\n        \"should not be invoked in payment phase\"\n    );\n\n    let random_bytes = runtime::random_bytes();\n    let uref = storage::new_uref(random_bytes);\n    runtime::put_key(RANDOM_BYTES_RESULT, uref.into())\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/random-bytes-payment/Cargo.toml",
    "content": "[package]\nname = \"random-bytes-payment\"\nversion = \"0.1.0\"\nauthors = [\"Rafał Chabowski <rafal@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"random_bytes_payment\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/random-bytes-payment/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{Phase, RuntimeArgs, URef, U512};\n\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\nconst ARG_AMOUNT: &str = \"amount\";\n\nfn standard_payment(amount: U512) {\n    let main_purse = account::get_main_purse();\n\n    let handle_payment_pointer = system::get_handle_payment();\n\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_pointer,\n        GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert()\n}\n\nconst RANDOM_BYTES_PAYMENT_RESULT: &str = \"random_bytes_payment_result\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let get_phase = runtime::get_phase();\n    assert_eq!(\n        Phase::Payment,\n        get_phase,\n        \"should only be invoked in payment phase\"\n    );\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let random_bytes = runtime::random_bytes();\n    let uref = storage::new_uref(random_bytes);\n    runtime::put_key(RANDOM_BYTES_PAYMENT_RESULT, uref.into());\n\n    standard_payment(amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/read-from-key/Cargo.toml",
    "content": "[package]\nname = \"read-from-key\"\nversion = \"0.1.0\"\nedition = \"2018\"\n\n[[bin]]\nname = \"read_from_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }"
  },
  {
    "path": "smart_contracts/contracts/test/read-from-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::{String, ToString};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, Key};\n\nconst DICTIONARY_NAME: &str = \"dictionary-name\";\nconst DICTIONARY_ITEM_KEY: &str = \"dictionary-item-key\";\nconst DICTIONARY_VALUE: &str = \"dictionary-value\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let dictionary_seed_uref = storage::new_dictionary(DICTIONARY_NAME).unwrap_or_revert();\n    storage::dictionary_put(\n        dictionary_seed_uref,\n        DICTIONARY_ITEM_KEY,\n        DICTIONARY_VALUE.to_string(),\n    );\n    let dictionary_address_key =\n        Key::dictionary(dictionary_seed_uref, DICTIONARY_ITEM_KEY.as_bytes());\n    let value_via_read = storage::read_from_key::<String>(dictionary_address_key)\n        .unwrap_or_revert()\n        .unwrap_or_revert();\n    let value_via_get: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_ITEM_KEY)\n        .unwrap_or_revert()\n        .unwrap_or_revert();\n    if value_via_read != *DICTIONARY_VALUE {\n        runtime::revert(ApiError::User(16u16))\n    }\n    if value_via_get != value_via_read {\n        runtime::revert(ApiError::User(17u16))\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/recover-secp256k1/Cargo.toml",
    "content": "[package]\nname = \"recover-secp256k1\"\nversion = \"0.1.0\"\nauthors = [\"Igor Bunar <igor@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"recover_secp256k1\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/recover-secp256k1/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\nuse alloc::string::String;\nuse casper_contract::{\n    contract_api::{cryptography, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    bytesrepr::{Bytes, FromBytes},\n    PublicKey, Signature,\n};\n\nconst ARG_MESSAGE: &str = \"message\";\nconst ARG_SIGNATURE_BYTES: &str = \"signature_bytes\";\nconst ARG_RECOVERY_ID: &str = \"recovery_id\";\nconst ARG_EXPECTED: &str = \"expected\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let message: String = runtime::get_named_arg(ARG_MESSAGE);\n    let signature_bytes: Bytes = runtime::get_named_arg(ARG_SIGNATURE_BYTES);\n    let recovery_id: u8 = runtime::get_named_arg(ARG_RECOVERY_ID);\n    let expected: PublicKey = runtime::get_named_arg(ARG_EXPECTED);\n\n    let (signature, _) = Signature::from_bytes(&signature_bytes).unwrap();\n    let recovered_pk = cryptography::recover_secp256k1(message.as_bytes(), &signature, recovery_id)\n        .unwrap_or_revert();\n\n    assert_eq!(recovered_pk, expected, \"PublicKey mismatch\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/refund-purse/Cargo.toml",
    "content": "[package]\nname = \"refund-purse\"\nversion = \"0.1.0\"\nauthors = [\"Michael Birch <birchmd@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"refund_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/refund-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{contracts::ContractHash, runtime_args, ApiError, URef, U512};\n\n#[repr(u16)]\nenum Error {\n    ShouldNotExist = 0,\n    NotFound,\n    Invalid,\n    IncorrectAccessRights,\n}\n\npub const ARG_PURSE: &str = \"purse\";\nconst ARG_PAYMENT_AMOUNT: &str = \"payment_amount\";\nconst SET_REFUND_PURSE: &str = \"set_refund_purse\";\nconst GET_REFUND_PURSE: &str = \"get_refund_purse\";\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\nconst ARG_PURSE_NAME_1: &str = \"purse_name_1\";\nconst ARG_PURSE_NAME_2: &str = \"purse_name_2\";\n\nfn set_refund_purse(contract_hash: ContractHash, p: &URef) {\n    runtime::call_contract(\n        contract_hash,\n        SET_REFUND_PURSE,\n        runtime_args! {\n            ARG_PURSE => *p,\n        },\n    )\n}\n\nfn get_refund_purse(handle_payment: ContractHash) -> Option<URef> {\n    runtime::call_contract(handle_payment, GET_REFUND_PURSE, runtime_args! {})\n}\n\nfn get_payment_purse(handle_payment: ContractHash) -> URef {\n    runtime::call_contract(handle_payment, GET_PAYMENT_PURSE, runtime_args! {})\n}\n\nfn submit_payment(handle_payment: ContractHash, amount: U512) {\n    let payment_purse = get_payment_purse(handle_payment);\n    let main_purse = account::get_main_purse();\n    system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let handle_payment = system::get_handle_payment();\n\n    let refund_purse_name_1: String = runtime::get_named_arg(ARG_PURSE_NAME_1);\n    let refund_purse_name_2: String = runtime::get_named_arg(ARG_PURSE_NAME_2);\n\n    let refund_purse_1 = runtime::get_key(&refund_purse_name_1)\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    {\n        // get_refund_purse should return None before setting it\n        let refund_result = get_refund_purse(handle_payment);\n        if refund_result.is_some() {\n            runtime::revert(ApiError::User(Error::ShouldNotExist as u16));\n        }\n\n        // it should return Some(x) after calling set_refund_purse(x)\n        set_refund_purse(handle_payment, &refund_purse_1);\n        let refund_purse = match get_refund_purse(handle_payment) {\n            None => runtime::revert(ApiError::User(Error::NotFound as u16)),\n            Some(x) if x.addr() == refund_purse_1.addr() => x,\n            Some(_) => runtime::revert(ApiError::User(Error::Invalid as u16)),\n        };\n\n        // the returned purse should not have any access rights\n        if refund_purse.is_addable() || refund_purse.is_writeable() || refund_purse.is_readable() {\n            runtime::revert(ApiError::User(Error::IncorrectAccessRights as u16))\n        }\n    }\n    {\n        let refund_purse_2 = runtime::get_key(&refund_purse_name_2)\n            .unwrap_or_revert()\n            .into_uref()\n            .unwrap_or_revert();\n        // get_refund_purse should return correct value after setting a second time\n        set_refund_purse(handle_payment, &refund_purse_2);\n        match get_refund_purse(handle_payment) {\n            None => runtime::revert(ApiError::User(Error::NotFound as u16)),\n            Some(uref) if uref.addr() == refund_purse_2.addr() => (),\n            Some(_) => runtime::revert(ApiError::User(Error::Invalid as u16)),\n        }\n\n        let payment_amount: U512 = runtime::get_named_arg(ARG_PAYMENT_AMOUNT);\n        submit_payment(handle_payment, payment_amount);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20210707/Cargo.toml",
    "content": "[package]\nname = \"regression-20210707\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20210707\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20210707/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::string::ToString;\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash,\n    contracts::NamedKeys,\n    runtime_args,\n    system::{handle_payment, mint},\n    AccessRights, AddressableEntityHash, CLType, CLTyped, EntityEntryPoint, EntryPointAccess,\n    EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, RuntimeArgs, URef, U512,\n};\n\nconst HARDCODED_UREF: URef = URef::new([42; 32], AccessRights::READ_ADD_WRITE);\n\nconst PACKAGE_HASH_NAME: &str = \"package_hash_name\";\nconst ACCESS_UREF_NAME: &str = \"uref_name\";\nconst CONTRACT_HASH_NAME: &str = \"contract_hash\";\n\nconst ARG_SOURCE: &str = \"source\";\nconst ARG_RECIPIENT: &str = \"recipient\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_TARGET: &str = \"target\";\n\nconst METHOD_SEND_TO_ACCOUNT: &str = \"send_to_account\";\nconst METHOD_SEND_TO_PURSE: &str = \"send_to_purse\";\nconst METHOD_HARDCODED_PURSE_SRC: &str = \"hardcoded_purse_src\";\nconst METHOD_STORED_PAYMENT: &str = \"stored_payment\";\nconst METHOD_HARDCODED_PAYMENT: &str = \"hardcoded_payment\";\n\npub fn get_payment_purse() -> URef {\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    )\n}\n\npub fn set_refund_purse(refund_purse: URef) {\n    let args = runtime_args! {\n        mint::ARG_PURSE => refund_purse,\n    };\n    runtime::call_contract(\n        system::get_handle_payment(),\n        handle_payment::METHOD_SET_REFUND_PURSE,\n        args,\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn send_to_account() {\n    let source = runtime::get_key(\"purse\")\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    let recipient: AccountHash = runtime::get_named_arg(ARG_RECIPIENT);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    system::transfer_from_purse_to_account(source, recipient, amount, None).unwrap();\n}\n\n#[no_mangle]\npub extern \"C\" fn send_to_purse() {\n    let source = runtime::get_key(\"purse\")\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    let target: URef = runtime::get_named_arg(ARG_TARGET);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    system::transfer_from_purse_to_purse(source, target, amount, None).unwrap();\n}\n\n#[no_mangle]\npub extern \"C\" fn hardcoded_purse_src() {\n    let source = HARDCODED_UREF;\n    let target = runtime::get_key(\"purse\")\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    system::transfer_from_purse_to_purse(source, target, amount, None).unwrap();\n}\n\n#[no_mangle]\npub extern \"C\" fn stored_payment() {\n    // Refund purse\n    let refund_purse: URef = runtime::get_key(\"purse\")\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    // Who will be charged\n    let source: URef = runtime::get_named_arg(ARG_SOURCE);\n    // How much to pay for execution\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    // set refund purse to specified purse\n    set_refund_purse(refund_purse);\n\n    // get payment purse for current execution\n    let payment_purse: URef = get_payment_purse();\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(source, payment_purse, amount, None).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn hardcoded_payment() {\n    // Refund purse\n    let refund_purse: URef = runtime::get_key(\"purse\")\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    // Who will be charged\n    let source: URef = HARDCODED_UREF;\n    // How much to pay for execution\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    // set refund purse to specified purse\n    set_refund_purse(refund_purse);\n\n    // get payment purse for current execution\n    let payment_purse: URef = get_payment_purse();\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(source, payment_purse, amount, None).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let mut entry_points = EntryPoints::new();\n\n    let send_to_account = EntityEntryPoint::new(\n        METHOD_SEND_TO_ACCOUNT,\n        vec![\n            Parameter::new(ARG_SOURCE, URef::cl_type()),\n            Parameter::new(ARG_RECIPIENT, AccountHash::cl_type()),\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    let send_to_purse = EntityEntryPoint::new(\n        METHOD_SEND_TO_PURSE,\n        vec![\n            Parameter::new(ARG_SOURCE, URef::cl_type()),\n            Parameter::new(ARG_TARGET, URef::cl_type()),\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    let hardcoded_src = EntityEntryPoint::new(\n        METHOD_HARDCODED_PURSE_SRC,\n        vec![\n            Parameter::new(ARG_TARGET, URef::cl_type()),\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    let stored_payment = EntityEntryPoint::new(\n        METHOD_STORED_PAYMENT,\n        vec![\n            Parameter::new(ARG_SOURCE, URef::cl_type()),\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    let hardcoded_payment = EntityEntryPoint::new(\n        METHOD_HARDCODED_PAYMENT,\n        vec![Parameter::new(ARG_AMOUNT, CLType::U512)],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    entry_points.add_entry_point(send_to_account);\n    entry_points.add_entry_point(send_to_purse);\n    entry_points.add_entry_point(hardcoded_src);\n    entry_points.add_entry_point(stored_payment);\n    entry_points.add_entry_point(hardcoded_payment);\n\n    let amount: U512 = runtime::get_named_arg(\"amount\");\n\n    let named_keys = {\n        let purse = system::create_purse();\n        system::transfer_from_purse_to_purse(account::get_main_purse(), purse, amount, None)\n            .unwrap_or_revert();\n\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"purse\".to_string(), purse.into());\n        named_keys\n    };\n\n    let (contract_hash, _version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(PACKAGE_HASH_NAME.to_string()),\n        Some(ACCESS_UREF_NAME.to_string()),\n        None,\n    );\n    runtime::put_key(\n        CONTRACT_HASH_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20210831/Cargo.toml",
    "content": "[package]\nname = \"regression-20210831\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20210831\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20210831/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::ToString};\n\nuse casper_contract::{\n    contract_api::{runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    bytesrepr::FromBytes,\n    contracts::{ContractPackageHash, NamedKeys},\n    runtime_args,\n    system::auction::{self, DelegationRate},\n    CLType, CLTyped, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, Parameter, PublicKey, RuntimeArgs, U512,\n};\n\nconst METHOD_ADD_BID_PROXY_CALL_1: &str = \"add_bid_proxy_call_1\";\nconst METHOD_ADD_BID_PROXY_CALL: &str = \"add_bid_proxy_call\";\n\nconst METHOD_WITHDRAW_PROXY_CALL: &str = \"withdraw_proxy_call\";\nconst METHOD_WITHDRAW_PROXY_CALL_1: &str = \"withdraw_proxy_call_1\";\n\nconst METHOD_DELEGATE_PROXY_CALL: &str = \"delegate_proxy_call\";\nconst METHOD_DELEGATE_PROXY_CALL_1: &str = \"delegate_proxy_call_1\";\n\nconst METHOD_UNDELEGATE_PROXY_CALL: &str = \"undelegate_proxy_call\";\nconst METHOD_UNDELEGATE_PROXY_CALL_1: &str = \"undelegate_proxy_call_1\";\n\nconst METHOD_ACTIVATE_BID_CALL: &str = \"activate_bid_proxy_call\";\nconst METHOD_ACTIVATE_BID_CALL_1: &str = \"activate_bid_proxy_call_1\";\n\nconst PACKAGE_HASH_NAME: &str = \"package_hash_name\";\nconst ACCESS_UREF_NAME: &str = \"uref_name\";\nconst CONTRACT_HASH_NAME: &str = \"contract_hash\";\n\nfn forwarded_add_bid_args() -> RuntimeArgs {\n    let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY);\n    let delegation_rate: DelegationRate = runtime::get_named_arg(auction::ARG_DELEGATION_RATE);\n    let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT);\n\n    runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_DELEGATION_RATE => delegation_rate,\n        auction::ARG_AMOUNT => amount,\n    }\n}\n\nfn forwarded_withdraw_bid_args() -> RuntimeArgs {\n    let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY);\n    let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT);\n\n    runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_AMOUNT => amount,\n    }\n}\n\nfn forwarded_delegate_args() -> RuntimeArgs {\n    let delegator: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR);\n    let validator: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR);\n    let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT);\n\n    runtime_args! {\n        auction::ARG_DELEGATOR => delegator,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n    }\n}\n\nfn forwarded_undelegate_args() -> RuntimeArgs {\n    let delegator: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR);\n    let validator: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR);\n    let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT);\n\n    runtime_args! {\n        auction::ARG_DELEGATOR => delegator,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n    }\n}\n\nfn forwarded_activate_bid_args() -> RuntimeArgs {\n    let validator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR);\n\n    runtime_args! {\n        auction::ARG_VALIDATOR => validator_public_key,\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn withdraw_proxy_call_1() {\n    let auction_contract_hash = system::get_auction();\n\n    let withdraw_bid_args = forwarded_withdraw_bid_args();\n\n    let result: U512 = runtime::call_contract(\n        auction_contract_hash,\n        auction::METHOD_WITHDRAW_BID,\n        withdraw_bid_args,\n    );\n\n    runtime::ret(CLValue::from_t(result).unwrap_or_revert());\n}\n\nfn forward_call_to_this<T: CLTyped + FromBytes>(entry_point: &str, runtime_args: RuntimeArgs) -> T {\n    let this = runtime::get_key(PACKAGE_HASH_NAME)\n        .and_then(Key::into_package_addr)\n        .map(ContractPackageHash::new)\n        .unwrap_or_revert();\n    runtime::call_versioned_contract(this, None, entry_point, runtime_args)\n}\n\nfn call_auction<T: CLTyped + FromBytes>(entry_point: &str, args: RuntimeArgs) -> T {\n    runtime::call_contract(system::get_auction(), entry_point, args)\n}\n\n#[no_mangle]\npub extern \"C\" fn add_bid_proxy_call() {\n    forward_call_to_this(METHOD_ADD_BID_PROXY_CALL_1, forwarded_add_bid_args())\n}\n\n#[no_mangle]\npub extern \"C\" fn add_bid_proxy_call_1() {\n    let _result: U512 = call_auction(auction::METHOD_ADD_BID, forwarded_add_bid_args());\n}\n\n#[no_mangle]\npub extern \"C\" fn withdraw_proxy_call() {\n    let _result: U512 =\n        forward_call_to_this(METHOD_WITHDRAW_PROXY_CALL_1, forwarded_withdraw_bid_args());\n}\n\n#[no_mangle]\npub extern \"C\" fn delegate_proxy_call_1() {\n    let result: U512 = call_auction(auction::METHOD_DELEGATE, forwarded_delegate_args());\n    runtime::ret(CLValue::from_t(result).unwrap_or_revert());\n}\n\n#[no_mangle]\npub extern \"C\" fn delegate_proxy_call() {\n    let _result: U512 =\n        forward_call_to_this(METHOD_DELEGATE_PROXY_CALL_1, forwarded_delegate_args());\n}\n\n#[no_mangle]\npub extern \"C\" fn undelegate_proxy_call_1() {\n    let result: U512 = call_auction(auction::METHOD_UNDELEGATE, forwarded_undelegate_args());\n    runtime::ret(CLValue::from_t(result).unwrap_or_revert());\n}\n\n#[no_mangle]\npub extern \"C\" fn undelegate_proxy_call() {\n    let _result: U512 =\n        forward_call_to_this(METHOD_UNDELEGATE_PROXY_CALL_1, forwarded_undelegate_args());\n}\n\n#[no_mangle]\npub extern \"C\" fn activate_bid_proxy_call_1() {\n    call_auction::<()>(auction::METHOD_ACTIVATE_BID, forwarded_activate_bid_args());\n}\n\n#[no_mangle]\npub extern \"C\" fn activate_bid_proxy_call() {\n    forward_call_to_this(METHOD_ACTIVATE_BID_CALL_1, forwarded_activate_bid_args())\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let mut entry_points = EntryPoints::new();\n\n    let add_bid_proxy_call_1 = EntityEntryPoint::new(\n        METHOD_ADD_BID_PROXY_CALL_1,\n        vec![\n            Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_DELEGATION_RATE, DelegationRate::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(add_bid_proxy_call_1);\n\n    let add_bid_proxy_call = EntityEntryPoint::new(\n        METHOD_ADD_BID_PROXY_CALL,\n        vec![\n            Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_DELEGATION_RATE, DelegationRate::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(add_bid_proxy_call);\n\n    let withdraw_proxy_call_1 = EntityEntryPoint::new(\n        METHOD_WITHDRAW_PROXY_CALL_1,\n        vec![\n            Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    let withdraw_proxy_call = EntityEntryPoint::new(\n        METHOD_WITHDRAW_PROXY_CALL,\n        vec![\n            Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    let delegate_proxy_call = EntityEntryPoint::new(\n        METHOD_DELEGATE_PROXY_CALL,\n        vec![\n            Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    let delegate_proxy_call_1 = EntityEntryPoint::new(\n        METHOD_DELEGATE_PROXY_CALL_1,\n        vec![\n            Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    let undelegate_proxy_call = EntityEntryPoint::new(\n        METHOD_UNDELEGATE_PROXY_CALL,\n        vec![\n            Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    let undelegate_proxy_call_1 = EntityEntryPoint::new(\n        METHOD_UNDELEGATE_PROXY_CALL_1,\n        vec![\n            Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(auction::ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    let activate_bid_proxy_call = EntityEntryPoint::new(\n        METHOD_ACTIVATE_BID_CALL,\n        vec![Parameter::new(auction::ARG_VALIDATOR, CLType::PublicKey)],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    let activate_bid_proxy_call_1 = EntityEntryPoint::new(\n        METHOD_ACTIVATE_BID_CALL_1,\n        vec![Parameter::new(auction::ARG_VALIDATOR, CLType::PublicKey)],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n\n    entry_points.add_entry_point(withdraw_proxy_call);\n    entry_points.add_entry_point(withdraw_proxy_call_1);\n\n    entry_points.add_entry_point(delegate_proxy_call);\n    entry_points.add_entry_point(delegate_proxy_call_1);\n\n    entry_points.add_entry_point(undelegate_proxy_call);\n    entry_points.add_entry_point(undelegate_proxy_call_1);\n\n    entry_points.add_entry_point(activate_bid_proxy_call);\n    entry_points.add_entry_point(activate_bid_proxy_call_1);\n\n    let (contract_package_hash, access_uref) = storage::create_contract_package_at_hash();\n\n    // runtime::put_key(PACKAGE_HASH_NAME, contract_package_hash);\n    runtime::put_key(ACCESS_UREF_NAME, access_uref.into());\n\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(PACKAGE_HASH_NAME.to_string(), contract_package_hash.into());\n\n    let (contract_hash, _version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n    runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220204/Cargo.toml",
    "content": "[package]\nname = \"regression-20220204\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220204\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220204/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::String};\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash, contracts::NamedKeys, CLType, CLTyped, EntityEntryPoint,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, URef, U512,\n};\n\nconst TRANSFER_AS_CONTRACT: &str = \"transfer_as_contract\";\nconst NONTRIVIAL_ARG_AS_CONTRACT: &str = \"nontrivial_arg_as_contract\";\nconst ARG_PURSE: &str = \"purse\";\nconst PURSE_KEY: &str = \"purse\";\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\nconst PACKAGE_HASH_NAME: &str = \"package-contract-hash\";\n\ntype NonTrivialArg = BTreeMap<String, Key>;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let (contract_package_hash, _access_uref) = storage::create_contract_package_at_hash();\n\n    runtime::put_key(PACKAGE_HASH_NAME, contract_package_hash.into());\n\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        TRANSFER_AS_CONTRACT,\n        vec![Parameter::new(ARG_PURSE, URef::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    type NonTrivialArg = BTreeMap<String, Key>;\n\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        NONTRIVIAL_ARG_AS_CONTRACT,\n        vec![Parameter::new(ARG_PURSE, NonTrivialArg::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let named_keys = {\n        let mut named_keys = NamedKeys::new();\n        let purse = system::create_purse();\n        named_keys.insert(PURSE_KEY.into(), purse.into());\n        named_keys\n    };\n\n    let (contract_hash, _contract_version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n\n    runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value()));\n}\n\n#[no_mangle]\npub extern \"C\" fn transfer_as_contract() {\n    let source_purse: URef = runtime::get_named_arg(ARG_PURSE);\n    let target_purse = runtime::get_key(PURSE_KEY)\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n\n    assert!(\n        !source_purse.is_writeable(),\n        \"Host should modify write bits in passed main purse\"\n    );\n    assert!(runtime::is_valid_uref(source_purse));\n\n    let extended = source_purse.into_read_add_write();\n    assert!(!runtime::is_valid_uref(extended));\n\n    system::transfer_from_purse_to_purse(extended, target_purse, U512::one(), Some(42))\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn transfer_as_session() {\n    let source_purse: URef = runtime::get_named_arg(ARG_PURSE);\n\n    assert!(!source_purse.is_writeable());\n\n    assert!(runtime::is_valid_uref(source_purse));\n    let extended = source_purse.into_read_add_write();\n    assert!(runtime::is_valid_uref(extended));\n\n    system::transfer_from_purse_to_account(\n        extended,\n        AccountHash::new([0; 32]),\n        U512::one(),\n        Some(42),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn transfer_main_purse_as_session() {\n    let source_purse: URef = account::get_main_purse();\n\n    assert!(runtime::is_valid_uref(source_purse));\n    let extended = source_purse.into_write();\n    assert!(runtime::is_valid_uref(extended));\n\n    system::transfer_from_purse_to_account(\n        extended,\n        AccountHash::new([0; 32]),\n        U512::one(),\n        Some(42),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn nontrivial_arg_as_contract() {\n    let non_trivial_arg: NonTrivialArg = runtime::get_named_arg(ARG_PURSE);\n    let source_purse: URef = non_trivial_arg\n        .into_values()\n        .filter_map(Key::into_uref)\n        .next()\n        .unwrap();\n\n    let target_purse = runtime::get_key(PURSE_KEY)\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n\n    assert!(!source_purse.is_writeable());\n    assert!(runtime::is_valid_uref(source_purse));\n\n    let extended = source_purse.into_read_add_write();\n    assert!(!runtime::is_valid_uref(extended));\n\n    system::transfer_from_purse_to_purse(extended, target_purse, U512::one(), Some(42))\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220204-call/Cargo.toml",
    "content": "[package]\nname = \"regression-20220204-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220204_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220204-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{contracts::ContractHash, runtime_args, AccessRights};\n\nconst ARG_PURSE: &str = \"purse\";\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\nconst ARG_ENTRYPOINT: &str = \"entrypoint\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let new_access_rights: AccessRights = runtime::get_named_arg(\"new_access_rights\");\n\n    let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT);\n\n    let contract_hash_key = runtime::get_key(CONTRACT_HASH_NAME).unwrap_or_revert();\n\n    let contract_hash = contract_hash_key\n        .into_entity_hash_addr()\n        .map(ContractHash::new)\n        .unwrap_or_revert();\n\n    let main_purse_modified = account::get_main_purse().with_access_rights(new_access_rights);\n\n    runtime::call_contract::<()>(\n        contract_hash,\n        &entrypoint,\n        runtime_args! {\n            ARG_PURSE => main_purse_modified,\n        },\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220204-nontrivial/Cargo.toml",
    "content": "[package]\nname = \"regression-20220204-nontrivial\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220204_nontrivial\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220204-nontrivial/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{\n    collections::BTreeMap,\n    string::{String, ToString},\n};\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{runtime_args, AccessRights, AddressableEntityHash, Key};\n\nconst ARG_PURSE: &str = \"purse\";\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\nconst ARG_ENTRYPOINT: &str = \"entrypoint\";\n\ntype NonTrivialArg = BTreeMap<String, Key>;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let new_access_rights: AccessRights = runtime::get_named_arg(\"new_access_rights\");\n\n    let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT);\n\n    let contract_hash_key = runtime::get_key(CONTRACT_HASH_NAME).unwrap_or_revert();\n    let contract_hash = contract_hash_key\n        .into_entity_hash_addr()\n        .map(AddressableEntityHash::new)\n        .unwrap_or_revert();\n\n    let main_purse_modified = account::get_main_purse().with_access_rights(new_access_rights);\n\n    let mut nontrivial_arg = NonTrivialArg::new();\n    nontrivial_arg.insert(\"anything\".to_string(), Key::from(main_purse_modified));\n\n    runtime::call_contract::<()>(\n        contract_hash.into(),\n        &entrypoint,\n        runtime_args! {\n            ARG_PURSE => nontrivial_arg,\n        },\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220207/Cargo.toml",
    "content": "[package]\nname = \"regression-20220207\"\nversion = \"0.1.0\"\nauthors = [\"Mateusz Górski <gorski.mateusz@protonmail.ch>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220207\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220207/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{self, account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, URef, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT_TO_SEND: &str = \"amount_to_send\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let source_purse: URef = account::get_main_purse();\n    let amount_to_send: U512 = runtime::get_named_arg(ARG_AMOUNT_TO_SEND);\n    let target_account: AccountHash = runtime::get_named_arg(ARG_TARGET);\n\n    contract_api::system::transfer_from_purse_to_account(\n        source_purse,\n        target_account,\n        amount_to_send,\n        None,\n    )\n    .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220208/Cargo.toml",
    "content": "[package]\nname = \"regression-20220208\"\nversion = \"0.1.0\"\nauthors = [\"Mateusz Górski <gorski.mateusz@protonmail.ch>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220208\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220208/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{self, account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, URef, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT_PART_1: &str = \"amount_part_1\";\nconst ARG_AMOUNT_PART_2: &str = \"amount_part_2\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let source_purse: URef = account::get_main_purse();\n    let amount_part_1: U512 = runtime::get_named_arg(ARG_AMOUNT_PART_1);\n    let amount_part_2: U512 = runtime::get_named_arg(ARG_AMOUNT_PART_2);\n    let target_account: AccountHash = runtime::get_named_arg(ARG_TARGET);\n\n    contract_api::system::transfer_from_purse_to_account(\n        source_purse,\n        target_account,\n        amount_part_1,\n        None,\n    )\n    .unwrap_or_revert();\n\n    contract_api::system::transfer_from_purse_to_account(\n        source_purse,\n        target_account,\n        amount_part_2,\n        None,\n    )\n    .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220211/Cargo.toml",
    "content": "[package]\nname = \"regression-20220211\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220211\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220211/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::Parameters, AccessRights, AddressableEntityHash, CLType, CLValue,\n    EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef,\n};\n\nconst RET_AS_CONTRACT: &str = \"ret_as_contract\";\nconst RET_AS_SESSION: &str = \"ret_as_session\";\nconst PUT_KEY_AS_SESSION: &str = \"put_key_as_session\";\nconst PUT_KEY_AS_CONTRACT: &str = \"put_key_as_contract\";\nconst READ_AS_SESSION: &str = \"read_as_session\";\nconst READ_AS_CONTRACT: &str = \"read_as_contract\";\nconst WRITE_AS_SESSION: &str = \"write_as_session\";\nconst WRITE_AS_CONTRACT: &str = \"write_as_contract\";\nconst ADD_AS_SESSION: &str = \"add_as_session\";\nconst ADD_AS_CONTRACT: &str = \"add_as_contract\";\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        RET_AS_CONTRACT,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        RET_AS_SESSION,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        PUT_KEY_AS_SESSION,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        PUT_KEY_AS_CONTRACT,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        READ_AS_SESSION,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        READ_AS_CONTRACT,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        WRITE_AS_SESSION,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        WRITE_AS_CONTRACT,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        ADD_AS_SESSION,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        ADD_AS_CONTRACT,\n        Parameters::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    let (contract_hash, _contract_version) =\n        storage::new_locked_contract(entry_points, None, None, None, None);\n\n    runtime::put_key(\n        CONTRACT_HASH_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn ret_as_contract() {\n    let uref = URef::default().into_read_add_write();\n    runtime::ret(CLValue::from_t(uref).unwrap_or_revert())\n}\n\n#[no_mangle]\npub extern \"C\" fn ret_as_session() {\n    let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE);\n    runtime::ret(CLValue::from_t(uref).unwrap_or_revert());\n}\n\n#[no_mangle]\npub extern \"C\" fn write_as_contract() {\n    let uref = URef::default().into_read_add_write();\n    storage::write(uref, ());\n}\n\n#[no_mangle]\npub extern \"C\" fn write_as_session() {\n    let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE);\n    storage::write(uref, ());\n}\n\n#[no_mangle]\npub extern \"C\" fn read_as_contract() {\n    let uref = URef::default().into_read_add_write();\n    let _: Option<()> = storage::read(uref).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn read_as_session() {\n    let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE);\n    let _: Option<()> = storage::read(uref).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn put_key_as_contract() {\n    let uref = URef::default().into_read_add_write();\n    runtime::put_key(\"\", uref.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn put_key_as_session() {\n    let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE);\n    runtime::put_key(\"\", uref.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn add_as_contract() {\n    let uref = URef::default().into_read_add_write();\n    storage::write(uref, ());\n}\n\n#[no_mangle]\npub extern \"C\" fn add_as_session() {\n    let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE);\n    storage::write(uref, ());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220211-call/Cargo.toml",
    "content": "[package]\nname = \"regression-20220211-call\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220211_call\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220211-call/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::{contracts::ContractHash, RuntimeArgs, URef};\n\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\nconst ARG_ENTRYPOINT: &str = \"entrypoint\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT);\n    let contract_hash_key = runtime::get_key(CONTRACT_HASH_NAME).unwrap_or_revert();\n    let contract_hash = contract_hash_key\n        .into_entity_hash_addr()\n        .map(ContractHash::new)\n        .unwrap_or_revert();\n\n    let hardcoded_uref: URef =\n        runtime::call_contract(contract_hash, &entrypoint, RuntimeArgs::default());\n\n    assert!(!runtime::is_valid_uref(hardcoded_uref));\n    assert!(!hardcoded_uref.is_writeable(),);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220222/Cargo.toml",
    "content": "[package]\nname = \"regression-20220222\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220222\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-20220222/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, AccessRights, ApiError, URef, URefAddr, U512};\n\nconst ALICE_ADDR: AccountHash = AccountHash::new([42; 32]);\n\n#[repr(u16)]\nenum Error {\n    PurseDoesNotGrantImplicitAddAccess = 0,\n    TemporaryAddAccessPersists = 1,\n}\n\nimpl From<Error> for ApiError {\n    fn from(error: Error) -> Self {\n        ApiError::User(error as u16)\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let alice_purse_addr: URefAddr = runtime::get_named_arg(\"alice_purse_addr\");\n\n    let alice_purse = URef::new(alice_purse_addr, AccessRights::ADD);\n\n    if runtime::is_valid_uref(alice_purse) {\n        // Shouldn't be valid uref\n        runtime::revert(Error::PurseDoesNotGrantImplicitAddAccess);\n    }\n\n    let source = account::get_main_purse();\n\n    let _failsafe = system::transfer_from_purse_to_account(source, ALICE_ADDR, U512::one(), None)\n        .unwrap_or_revert();\n\n    if runtime::is_valid_uref(alice_purse) {\n        // Should not be escalated since add access was granted temporarily for transfer.\n        runtime::revert(Error::TemporaryAddAccessPersists);\n    }\n\n    // Should fail\n    runtime::put_key(\"put_key_with_add_should_fail\", alice_purse.into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-add-bid/Cargo.toml",
    "content": "[package]\nname = \"regression-add-bid\"\nversion = \"0.1.0\"\nauthors = [\"Karan Dhareshwar <karan@casper.network>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_add_bid\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-add-bid/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{\n    runtime_args,\n    system::auction::{self, DelegationRate},\n    PublicKey, U512,\n};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_DELEGATION_RATE: &str = \"delegation_rate\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\n\nfn add_bid(public_key: PublicKey, bond_amount: U512, delegation_rate: DelegationRate) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_PUBLIC_KEY => public_key,\n        auction::ARG_AMOUNT => bond_amount + U512::one(),\n        auction::ARG_DELEGATION_RATE => delegation_rate,\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_ADD_BID, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY);\n    let bond_amount = runtime::get_named_arg(ARG_AMOUNT);\n    let delegation_rate = runtime::get_named_arg(ARG_DELEGATION_RATE);\n\n    add_bid(public_key, bond_amount, delegation_rate);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-delegate/Cargo.toml",
    "content": "[package]\nname = \"regression-delegate\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_delegate\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-delegate/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::{runtime_args, system::auction, PublicKey, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\n\nconst ARG_VALIDATOR: &str = \"validator\";\nconst ARG_DELEGATOR: &str = \"delegator\";\n\nfn delegate(delegator: PublicKey, validator: PublicKey, amount: U512) {\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR => delegator,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount + U512::one(),\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_DELEGATE, args);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let delegator = runtime::get_named_arg(ARG_DELEGATOR);\n    let validator = runtime::get_named_arg(ARG_VALIDATOR);\n    let amount = runtime::get_named_arg(ARG_AMOUNT);\n\n    delegate(delegator, validator, amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-payment/Cargo.toml",
    "content": "[package]\nname = \"regression-payment\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_payment\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-payment/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    system::{handle_payment, standard_payment},\n    RuntimeArgs, URef, U512,\n};\n\nfn pay(amount: U512) {\n    // amount to transfer from named purse to payment purse\n    let purse_uref = account::get_main_purse();\n\n    // handle payment contract\n    let handle_payment_contract_hash = system::get_handle_payment();\n\n    // get payment purse for current execution\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_contract_hash,\n        handle_payment::METHOD_GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(purse_uref, payment_purse, amount + U512::one(), None)\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: U512 = runtime::get_named_arg(standard_payment::ARG_AMOUNT);\n    pay(amount);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-transfer/Cargo.toml",
    "content": "[package]\nname = \"regression-transfer\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_transfer\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression-transfer/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, runtime_args, system::mint, URef, U512};\n\nfn call_mint_transfer(\n    to: Option<AccountHash>,\n    source: URef,\n    target: URef,\n    amount: U512,\n    id: Option<u64>,\n) -> Result<(), mint::Error> {\n    let args = runtime_args! {\n        mint::ARG_TO => to,\n        mint::ARG_SOURCE => source,\n        mint::ARG_TARGET => target,\n        mint::ARG_AMOUNT => amount + U512::one(),\n        mint::ARG_ID => id,\n    };\n    runtime::call_contract(system::get_mint(), mint::METHOD_TRANSFER, args)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let to: Option<AccountHash> = runtime::get_named_arg(mint::ARG_TO);\n    let source: URef = account::get_main_purse();\n    let target: URef = runtime::get_named_arg(mint::ARG_TARGET);\n    let amount: U512 = runtime::get_named_arg(mint::ARG_AMOUNT);\n    let id: Option<u64> = runtime::get_named_arg(mint::ARG_ID);\n\n    call_mint_transfer(to, source, target, amount, id).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression_20211110/Cargo.toml",
    "content": "[package]\nname = \"regression_20211110\"\nversion = \"0.1.0\"\nauthors = [\"Luís Fernando Schultz Xavier da Silveira <luis@casperlabs.io>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20211110\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression_20211110/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{\n    contracts::ContractHash, runtime_args, AddressableEntityHash, CLType, CLTyped,\n    EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key,\n    Parameter,\n};\n\nconst RECURSE_ENTRYPOINT: &str = \"recurse\";\nconst ARG_TARGET: &str = \"target\";\nconst CONTRACT_HASH_NAME: &str = \"regression-contract-hash\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        RECURSE_ENTRYPOINT,\n        vec![Parameter::new(ARG_TARGET, AddressableEntityHash::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let (contract_hash, _contract_version) =\n        storage::new_locked_contract(entry_points, None, None, None, None);\n\n    runtime::put_key(\n        CONTRACT_HASH_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n\n#[no_mangle]\npub extern \"C\" fn recurse() {\n    let target: AddressableEntityHash = runtime::get_named_arg(ARG_TARGET);\n    runtime::call_contract(\n        ContractHash::new(target.value()),\n        RECURSE_ENTRYPOINT,\n        runtime_args! { ARG_TARGET => target },\n    )\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression_20220119/Cargo.toml",
    "content": "[package]\nname = \"regression_20220119\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"regression_20220119\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression_20220119/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[macro_use]\nextern crate alloc;\n\nuse casper_contract::ext_ffi;\nuse casper_types::{api_error, ApiError, UREF_SERIALIZED_LENGTH};\n\nfn custom_create_purse(buffer_size: usize) -> Result<(), ApiError> {\n    let big_purse = vec![0u8; buffer_size];\n    let ret = unsafe { ext_ffi::casper_create_purse(big_purse.as_ptr(), big_purse.len()) };\n    api_error::result_from(ret)\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    assert_eq!(custom_create_purse(1024), Ok(()));\n    assert_eq!(custom_create_purse(0), Err(ApiError::PurseNotCreated));\n    assert_eq!(custom_create_purse(3), Err(ApiError::PurseNotCreated));\n    assert_eq!(custom_create_purse(UREF_SERIALIZED_LENGTH), Ok(()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression_20240105/Cargo.toml",
    "content": "[package]\nname = \"regression_20240105\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\", features = [\"test-support\"] }\ncasper-types = { path = \"../../../../types\" }\n\n[[bin]]\nname = \"regression_20240105\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n"
  },
  {
    "path": "smart_contracts/contracts/test/regression_20240105/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{\n    collections::{BTreeMap, BTreeSet},\n    format,\n    string::{String, ToString},\n    vec,\n    vec::Vec,\n};\nuse core::mem::MaybeUninit;\n\nuse casper_contract::{\n    contract_api,\n    contract_api::{account, runtime, storage, system},\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::{AccountHash, ActionType, Weight},\n    addressable_entity::MAX_GROUPS,\n    api_error,\n    bytesrepr::ToBytes,\n    contracts::{ContractHash, ContractPackageHash},\n    runtime_args, AccessRights, ApiError, CLType, CLValue, EntityEntryPoint, EntryPointAccess,\n    EntryPointPayment, EntryPointType, EntryPoints, EraId, Key, NamedKeys, Parameter,\n    TransferredTo, URef, U512,\n};\n\nconst NOOP: &str = \"noop\";\n\nfn to_ptr<T: ToBytes>(t: &T) -> (*const u8, usize, Vec<u8>) {\n    let bytes = t.to_bytes().unwrap_or_revert();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size, bytes)\n}\n\n#[no_mangle]\nextern \"C\" fn noop() {}\n\nfn store_noop_contract(maybe_contract_pkg_hash: Option<ContractPackageHash>) -> ContractHash {\n    let mut entry_points = EntryPoints::new();\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        NOOP,\n        vec![],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    ));\n    match maybe_contract_pkg_hash {\n        Some(contract_pkg_hash) => {\n            let (contract_hash, _version) = storage::add_contract_version(\n                contract_pkg_hash,\n                entry_points,\n                NamedKeys::new(),\n                BTreeMap::new(),\n            );\n            contract_hash\n        }\n        None => {\n            let (contract_hash, _version) =\n                storage::new_contract(entry_points, None, None, None, None);\n            contract_hash\n        }\n    }\n}\n\nfn get_name() -> String {\n    let large_name: bool = runtime::get_named_arg(\"large_name\");\n    if large_name {\n        \"a\".repeat(10_000)\n    } else {\n        \"a\".to_string()\n    }\n}\n\nfn get_named_arg_size(name: &str) -> usize {\n    let mut arg_size: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_named_arg_size(\n            name.as_bytes().as_ptr(),\n            name.len(),\n            &mut arg_size as *mut usize,\n        )\n    };\n    api_error::result_from(ret).unwrap_or_revert();\n    arg_size\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let fn_arg: String = runtime::get_named_arg(\"fn\");\n    match fn_arg.as_str() {\n        \"write\" => {\n            let len: u32 = runtime::get_named_arg(\"len\");\n            let uref = storage::new_uref(());\n            let key = Key::from(uref);\n            let (key_ptr, key_size, _bytes1) = to_ptr(&key);\n            let value = vec![u8::MAX; len as usize];\n            let cl_value = CLValue::from_t(value).unwrap_or_revert();\n            let (cl_value_ptr, cl_value_size, _bytes2) = to_ptr(&cl_value);\n            for _i in 0..u64::MAX {\n                unsafe {\n                    ext_ffi::casper_write(key_ptr, key_size, cl_value_ptr, cl_value_size);\n                }\n            }\n        }\n        \"read\" => {\n            let len: Option<u32> = runtime::get_named_arg(\"len\");\n            let key = match len {\n                Some(len) => {\n                    let key = Key::URef(storage::new_uref(()));\n                    let uref = storage::new_uref(());\n                    storage::write(uref, vec![u8::MAX; len as usize]);\n                    key\n                }\n                None => Key::Hash([0; 32]),\n            };\n            let key_bytes = key.into_bytes().unwrap();\n            let key_ptr = key_bytes.as_ptr();\n            let key_size = key_bytes.len();\n            let mut buffer = vec![0; len.unwrap_or_default() as usize];\n            for _i in 0..u64::MAX {\n                let mut value_size = MaybeUninit::uninit();\n                let ret = unsafe {\n                    ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr())\n                };\n                // If we actually read a value, we need to clear the host buffer before trying to\n                // read another value.\n                if len.is_some() {\n                    assert_eq!(ret, 0);\n                } else {\n                    assert_eq!(ret, u32::from(ApiError::ValueNotFound) as i32);\n                    continue;\n                }\n                unsafe {\n                    value_size.assume_init();\n                }\n                let mut bytes_written = MaybeUninit::uninit();\n                let ret = unsafe {\n                    ext_ffi::casper_read_host_buffer(\n                        buffer.as_mut_ptr(),\n                        buffer.len(),\n                        bytes_written.as_mut_ptr(),\n                    )\n                };\n                assert_eq!(ret, 0);\n            }\n        }\n        \"add\" => {\n            let large: bool = runtime::get_named_arg(\"large\");\n            if large {\n                let uref = storage::new_uref(U512::zero());\n                for _i in 0..u64::MAX {\n                    storage::add(uref, U512::MAX)\n                }\n            } else {\n                let uref = storage::new_uref(0_i32);\n                for _i in 0..u64::MAX {\n                    storage::add(uref, 1_i32)\n                }\n            }\n        }\n        \"new\" => {\n            let len: u32 = runtime::get_named_arg(\"len\");\n            for _i in 0..u64::MAX {\n                let _n = storage::new_uref(vec![u32::MAX; len as usize]);\n            }\n        }\n        \"call_contract\" => {\n            let args_len: u32 = runtime::get_named_arg(\"args_len\");\n            let args = runtime_args! { \"a\" => vec![u8::MAX; args_len as usize] };\n            let contract_hash = store_noop_contract(None);\n            let (contract_hash_ptr, contract_hash_size, _bytes1) = to_ptr(&contract_hash);\n            let (entry_point_name_ptr, entry_point_name_size, _bytes2) = to_ptr(&NOOP);\n            let (runtime_args_ptr, runtime_args_size, _bytes3) = to_ptr(&args);\n            let mut bytes_written = MaybeUninit::uninit();\n            for _i in 0..u64::MAX {\n                let ret = unsafe {\n                    ext_ffi::casper_call_contract(\n                        contract_hash_ptr,\n                        contract_hash_size,\n                        entry_point_name_ptr,\n                        entry_point_name_size,\n                        runtime_args_ptr,\n                        runtime_args_size,\n                        bytes_written.as_mut_ptr(),\n                    )\n                };\n                api_error::result_from(ret).unwrap_or_revert();\n            }\n        }\n        \"get_key\" => {\n            let maybe_large_key: Option<bool> = runtime::get_named_arg(\"large_key\");\n            match maybe_large_key {\n                Some(large_key) => {\n                    let name = get_name();\n                    let key = if large_key {\n                        let uref = storage::new_uref(());\n                        Key::URef(uref)\n                    } else {\n                        Key::EraInfo(EraId::new(0))\n                    };\n                    runtime::put_key(&name, key);\n                    for _i in 0..u64::MAX {\n                        let _k = runtime::get_key(&name);\n                    }\n                }\n                None => {\n                    for i in 0..u64::MAX {\n                        let _k = runtime::get_key(i.to_string().as_str());\n                    }\n                }\n            }\n        }\n        \"has_key\" => {\n            let exists: bool = runtime::get_named_arg(\"key_exists\");\n            if exists {\n                let name = get_name();\n                runtime::put_key(&name, Key::EraInfo(EraId::new(0)));\n                for _i in 0..u64::MAX {\n                    let _b = runtime::has_key(&name);\n                }\n            } else {\n                for i in 0..u64::MAX {\n                    let _b = runtime::has_key(i.to_string().as_str());\n                }\n            }\n        }\n        \"put_key\" => {\n            let base_name = get_name();\n            let large_key: bool = runtime::get_named_arg(\"large_key\");\n            let key = if large_key {\n                let uref = storage::new_uref(());\n                Key::URef(uref)\n            } else {\n                Key::EraInfo(EraId::new(0))\n            };\n            let maybe_num_keys: Option<u32> = runtime::get_named_arg(\"num_keys\");\n            let num_keys = maybe_num_keys.unwrap_or(u32::MAX);\n            for i in 0..num_keys {\n                runtime::put_key(format!(\"{base_name}{i}\").as_str(), key);\n            }\n        }\n        \"is_valid_uref\" => {\n            let valid: bool = runtime::get_named_arg(\"valid\");\n            let uref = if valid {\n                storage::new_uref(())\n            } else {\n                URef::new([1; 32], AccessRights::default())\n            };\n            for _i in 0..u64::MAX {\n                let is_valid = runtime::is_valid_uref(uref);\n                assert_eq!(valid, is_valid);\n            }\n        }\n        \"add_associated_key\" => {\n            let remove_after_adding: bool = runtime::get_named_arg(\"remove_after_adding\");\n            let account_hash = AccountHash::new([1; 32]);\n            let weight = Weight::new(1);\n            for _i in 0..u64::MAX {\n                if remove_after_adding {\n                    account::add_associated_key(account_hash, weight).unwrap_or_revert();\n                    // Remove to avoid getting a duplicate key error on next iteration.\n                    account::remove_associated_key(account_hash).unwrap_or_revert();\n                } else {\n                    let _e = account::add_associated_key(account_hash, weight);\n                }\n            }\n        }\n        \"remove_associated_key\" => {\n            for _i in 0..u64::MAX {\n                account::remove_associated_key(AccountHash::new([1; 32])).unwrap_err();\n            }\n        }\n        \"update_associated_key\" => {\n            let exists: bool = runtime::get_named_arg(\"exists\");\n            let account_hash = AccountHash::new([1; 32]);\n            if exists {\n                account::add_associated_key(account_hash, Weight::new(1)).unwrap_or_revert();\n                for i in 0..u64::MAX {\n                    account::update_associated_key(account_hash, Weight::new(i as u8))\n                        .unwrap_or_revert();\n                }\n            } else {\n                for i in 0..u64::MAX {\n                    account::update_associated_key(account_hash, Weight::new(i as u8)).unwrap_err();\n                }\n            }\n        }\n        \"set_action_threshold\" => {\n            for _i in 0..u64::MAX {\n                account::set_action_threshold(ActionType::Deployment, Weight::new(1))\n                    .unwrap_or_revert();\n            }\n        }\n        \"load_named_keys\" => {\n            let num_keys: u32 = runtime::get_named_arg(\"num_keys\");\n            if num_keys == 0 {\n                for _i in 0..u64::MAX {\n                    assert!(runtime::list_named_keys().is_empty());\n                }\n                return;\n            }\n            // Where `num_keys` > 0, we should have put the required number of named keys in a\n            // previous execution via the `put_key` flow of this contract.\n            for _i in 0..u64::MAX {\n                assert_eq!(runtime::list_named_keys().len() as u32, num_keys);\n            }\n        }\n        \"remove_key\" => {\n            let name = get_name();\n            for _i in 0..u64::MAX {\n                runtime::remove_key(&name)\n            }\n        }\n        \"get_caller\" => {\n            for _i in 0..u64::MAX {\n                let _c = runtime::get_caller();\n            }\n        }\n        \"get_blocktime\" => {\n            for _i in 0..u64::MAX {\n                let _b = runtime::get_blocktime();\n            }\n        }\n        \"create_purse\" => {\n            for _i in 0..u64::MAX {\n                let _u = system::create_purse();\n            }\n        }\n        \"transfer_to_account\" => {\n            let account_exists: bool = runtime::get_named_arg(\"account_exists\");\n            let amount = U512::one();\n            let id = Some(u64::MAX);\n            if account_exists {\n                let target = AccountHash::new([1; 32]);\n                let to = system::transfer_to_account(target, amount, id).unwrap_or_revert();\n                assert_eq!(to, TransferredTo::NewAccount);\n                for _i in 0..u64::MAX {\n                    let to = system::transfer_to_account(target, amount, id).unwrap_or_revert();\n                    assert_eq!(to, TransferredTo::ExistingAccount);\n                }\n            } else {\n                let mut array = [0_u8; 32];\n                for index in 0..32 {\n                    for i in 1..=u8::MAX {\n                        array[index] = i;\n                        let target = AccountHash::new(array);\n                        let to = system::transfer_to_account(target, amount, id).unwrap_or_revert();\n                        assert_eq!(to, TransferredTo::NewAccount);\n                    }\n                }\n            }\n        }\n        \"transfer_from_purse_to_account\" => {\n            let account_exists: bool = runtime::get_named_arg(\"account_exists\");\n            let source = account::get_main_purse();\n            let amount = U512::one();\n            let id = Some(u64::MAX);\n            if account_exists {\n                let target = AccountHash::new([1; 32]);\n                let to = system::transfer_to_account(target, amount, id).unwrap_or_revert();\n                assert_eq!(to, TransferredTo::NewAccount);\n                for _i in 0..u64::MAX {\n                    let to = system::transfer_from_purse_to_account(source, target, amount, id)\n                        .unwrap_or_revert();\n                    assert_eq!(to, TransferredTo::ExistingAccount);\n                }\n            } else {\n                let mut array = [0_u8; 32];\n                for index in 0..32 {\n                    for i in 1..=u8::MAX {\n                        array[index] = i;\n                        let target = AccountHash::new(array);\n                        let to = system::transfer_from_purse_to_account(source, target, amount, id)\n                            .unwrap_or_revert();\n                        assert_eq!(to, TransferredTo::NewAccount);\n                    }\n                }\n            }\n        }\n        \"transfer_from_purse_to_purse\" => {\n            let source = account::get_main_purse();\n            let target = system::create_purse();\n            let amount = U512::one();\n            let id = Some(u64::MAX);\n            system::transfer_from_purse_to_purse(source, target, amount, id).unwrap_or_revert();\n            for _i in 0..u64::MAX {\n                system::transfer_from_purse_to_purse(source, target, amount, id).unwrap_or_revert();\n            }\n        }\n        \"get_balance\" => {\n            let purse_exists: bool = runtime::get_named_arg(\"purse_exists\");\n            let uref = if purse_exists {\n                account::get_main_purse()\n            } else {\n                URef::new([1; 32], AccessRights::empty())\n            };\n            for _i in 0..u64::MAX {\n                let maybe_balance = system::get_purse_balance(uref);\n                assert_eq!(maybe_balance.is_some(), purse_exists);\n            }\n        }\n        \"get_phase\" => {\n            for _i in 0..u64::MAX {\n                let _p = runtime::get_phase();\n            }\n        }\n        \"get_system_contract\" => {\n            for _i in 0..u64::MAX {\n                let _h = system::get_mint();\n            }\n        }\n        \"get_main_purse\" => {\n            for _i in 0..u64::MAX {\n                let _u = account::get_main_purse();\n            }\n        }\n        \"read_host_buffer\" => {\n            // The case where the host buffer is repeatedly filled is covered in the `read`\n            // branch above.  All we do here is check repeatedly where `read_host_buffer` returns\n            // `HostBufferEmpty`.\n            let mut buffer = vec![0; 1];\n            let mut bytes_written = MaybeUninit::uninit();\n            for _i in 0..u64::MAX {\n                let ret = unsafe {\n                    ext_ffi::casper_read_host_buffer(\n                        buffer.as_mut_ptr(),\n                        buffer.len(),\n                        bytes_written.as_mut_ptr(),\n                    )\n                };\n                assert_eq!(ret, u32::from(ApiError::HostBufferEmpty) as i32);\n            }\n        }\n        \"create_contract_package_at_hash\" => {\n            for _i in 0..u64::MAX {\n                let _h = storage::create_contract_package_at_hash();\n            }\n        }\n        \"add_contract_version\" => {\n            let entry_points_len: u32 = runtime::get_named_arg(\"entry_points_len\");\n            let mut entry_points = EntryPoints::new();\n            for entry_point_index in 0..entry_points_len {\n                entry_points.add_entry_point(EntityEntryPoint::new(\n                    format!(\"function_{entry_point_index}\"),\n                    vec![Parameter::new(\"a\", CLType::PublicKey); 10],\n                    CLType::Unit,\n                    EntryPointAccess::Public,\n                    EntryPointType::Caller,\n                    EntryPointPayment::Caller,\n                ));\n            }\n            let named_keys_len: u32 = runtime::get_named_arg(\"named_keys_len\");\n            let mut named_keys = NamedKeys::new();\n            for named_key_index in 0..named_keys_len {\n                let _ = named_keys.insert(named_key_index.to_string(), Key::Hash([1; 32]));\n            }\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            for i in 1..u64::MAX {\n                let (_h, version) = storage::add_contract_version(\n                    contract_pkg_hash,\n                    entry_points.clone(),\n                    named_keys.clone(),\n                    BTreeMap::new(),\n                );\n                assert_eq!(version, i as u32);\n            }\n        }\n        \"disable_contract_version\" => {\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            let (contract_hash, _version) = storage::add_contract_version(\n                contract_pkg_hash,\n                EntryPoints::new(),\n                NamedKeys::new(),\n                BTreeMap::new(),\n            );\n            for _i in 0..u64::MAX {\n                storage::disable_contract_version(contract_pkg_hash, contract_hash)\n                    .unwrap_or_revert();\n            }\n        }\n        \"call_versioned_contract\" => {\n            let args_len: u32 = runtime::get_named_arg(\"args_len\");\n            let args = runtime_args! { \"a\" => vec![u8::MAX; args_len as usize] };\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            let _ = store_noop_contract(Some(contract_pkg_hash));\n            let (contract_pkg_hash_ptr, contract_pkg_hash_size, _bytes1) =\n                to_ptr(&contract_pkg_hash);\n            let (contract_version_ptr, contract_version_size, _bytes2) = to_ptr(&Some(1_u32));\n            let (entry_point_name_ptr, entry_point_name_size, _bytes3) = to_ptr(&NOOP);\n            let (runtime_args_ptr, runtime_args_size, _bytes4) = to_ptr(&args);\n            let mut bytes_written = MaybeUninit::uninit();\n            for _i in 0..u64::MAX {\n                let ret = unsafe {\n                    ext_ffi::casper_call_versioned_contract(\n                        contract_pkg_hash_ptr,\n                        contract_pkg_hash_size,\n                        contract_version_ptr,\n                        contract_version_size,\n                        entry_point_name_ptr,\n                        entry_point_name_size,\n                        runtime_args_ptr,\n                        runtime_args_size,\n                        bytes_written.as_mut_ptr(),\n                    )\n                };\n                api_error::result_from(ret).unwrap_or_revert();\n            }\n        }\n        \"create_contract_user_group\" => {\n            let label_len: u32 = runtime::get_named_arg(\"label_len\");\n            assert!(label_len > 0);\n            let label_prefix: String = \"a\".repeat(label_len as usize - 1);\n            let num_new_urefs: u8 = runtime::get_named_arg(\"num_new_urefs\");\n            let num_existing_urefs: u8 = runtime::get_named_arg(\"num_existing_urefs\");\n            let mut existing_urefs = BTreeSet::new();\n            for _ in 0..num_existing_urefs {\n                existing_urefs.insert(storage::new_uref(()));\n            }\n            let (existing_urefs_ptr, existing_urefs_size, _bytes1) = to_ptr(&existing_urefs);\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            let (contract_pkg_hash_ptr, contract_pkg_hash_size, _bytes2) =\n                to_ptr(&contract_pkg_hash);\n            let mut index = 0_u8;\n            let mut label = String::new();\n            let allow_exceeding_max_groups: bool =\n                runtime::get_named_arg(\"allow_exceeding_max_groups\");\n            let expect_failure = num_new_urefs == u8::MAX || allow_exceeding_max_groups;\n            let mut buffer = vec![0_u8; 5_000];\n            let mut output_size = MaybeUninit::uninit();\n            let mut bytes_written = MaybeUninit::uninit();\n            loop {\n                if index == MAX_GROUPS && !allow_exceeding_max_groups {\n                    // We need to remove the group to avoid hitting the `contracts::MAX_GROUPS`\n                    // limit (currently 10).\n                    let result = storage::remove_contract_user_group(contract_pkg_hash, &label);\n                    if !expect_failure {\n                        result.unwrap_or_revert();\n                    }\n                } else {\n                    label = format!(\"{label_prefix}{index}\");\n                    index += 1;\n                }\n                let (label_ptr, label_size, _bytes3) = to_ptr(&label);\n                let ret = unsafe {\n                    ext_ffi::casper_create_contract_user_group(\n                        contract_pkg_hash_ptr,\n                        contract_pkg_hash_size,\n                        label_ptr,\n                        label_size,\n                        num_new_urefs,\n                        existing_urefs_ptr,\n                        existing_urefs_size,\n                        output_size.as_mut_ptr(),\n                    )\n                };\n                if !expect_failure {\n                    api_error::result_from(ret).unwrap_or_revert();\n                    let ret = unsafe {\n                        ext_ffi::casper_read_host_buffer(\n                            buffer.as_mut_ptr(),\n                            buffer.len(),\n                            bytes_written.as_mut_ptr(),\n                        )\n                    };\n                    api_error::result_from(ret).unwrap_or_revert();\n                }\n            }\n        }\n        \"print\" => {\n            let num_chars: u32 = runtime::get_named_arg(\"num_chars\");\n            let value: String = \"a\".repeat(num_chars as usize);\n            for _i in 0..u64::MAX {\n                runtime::print(&value);\n            }\n        }\n        \"get_runtime_arg_size\" => {\n            let name = \"arg\";\n            for _i in 0..u64::MAX {\n                let _s = get_named_arg_size(name);\n            }\n        }\n        \"get_runtime_arg\" => {\n            let name = \"arg\";\n            let arg_size = get_named_arg_size(name);\n            let data_non_null_ptr = contract_api::alloc_bytes(arg_size);\n            for _i in 0..u64::MAX {\n                let ret = unsafe {\n                    ext_ffi::casper_get_named_arg(\n                        name.as_bytes().as_ptr(),\n                        name.len(),\n                        data_non_null_ptr.as_ptr(),\n                        arg_size,\n                    )\n                };\n                api_error::result_from(ret).unwrap_or_revert();\n            }\n        }\n        \"remove_contract_user_group\" => {\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            for _i in 0..u64::MAX {\n                storage::remove_contract_user_group(contract_pkg_hash, \"a\").unwrap_err();\n            }\n        }\n        \"extend_contract_user_group_urefs\" => {\n            let allow_exceeding_max_urefs: bool =\n                runtime::get_named_arg(\"allow_exceeding_max_urefs\");\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            let label = \"a\";\n            let _ =\n                storage::create_contract_user_group(contract_pkg_hash, label, 0, BTreeSet::new())\n                    .unwrap_or_revert();\n            for _i in 0..u64::MAX {\n                if allow_exceeding_max_urefs {\n                    let _r = storage::provision_contract_user_group_uref(contract_pkg_hash, label);\n                } else {\n                    let uref =\n                        storage::provision_contract_user_group_uref(contract_pkg_hash, label)\n                            .unwrap_or_revert();\n                    storage::remove_contract_user_group_urefs(\n                        contract_pkg_hash,\n                        label,\n                        BTreeSet::from_iter(Some(uref)),\n                    )\n                    .unwrap_or_revert();\n                }\n            }\n        }\n        \"remove_contract_user_group_urefs\" => {\n            // The success case is covered in `create_contract_user_group` above.  We only test\n            // for unknown user groups here.\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            for _i in 0..u64::MAX {\n                storage::remove_contract_user_group(contract_pkg_hash, \"a\").unwrap_err();\n            }\n        }\n        \"blake2b\" => {\n            let len: u32 = runtime::get_named_arg(\"len\");\n            let data = vec![1; len as usize];\n            for _i in 0..u64::MAX {\n                let _hash = runtime::blake2b(&data);\n            }\n        }\n        \"new_dictionary\" => {\n            let mut buffer = vec![0_u8; 33]; // bytesrepr-serialized length of URef\n            for _i in 0..u64::MAX {\n                let mut value_size = MaybeUninit::uninit();\n                let ret = unsafe { ext_ffi::casper_new_dictionary(value_size.as_mut_ptr()) };\n                api_error::result_from(ret).unwrap_or_revert();\n                assert_eq!(buffer.len(), unsafe { value_size.assume_init() });\n                let mut bytes_written = MaybeUninit::uninit();\n                let ret = unsafe {\n                    ext_ffi::casper_read_host_buffer(\n                        buffer.as_mut_ptr(),\n                        buffer.len(),\n                        bytes_written.as_mut_ptr(),\n                    )\n                };\n                assert_eq!(ret, 0);\n            }\n        }\n        \"dictionary_get\" => {\n            let name_len: u32 = runtime::get_named_arg(\"name_len\");\n            let name: String = \"a\".repeat(name_len as usize);\n            let value_len: u32 = runtime::get_named_arg(\"value_len\");\n            let value = vec![u8::MAX; value_len as usize];\n            let uref = storage::new_dictionary(\"a\").unwrap_or_revert();\n            storage::dictionary_put(uref, &name, value);\n\n            for _i in 0..u64::MAX {\n                let read_value: Vec<u8> = storage::dictionary_get(uref, &name)\n                    .unwrap_or_revert()\n                    .unwrap_or_revert();\n                assert_eq!(read_value.len(), value_len as usize);\n            }\n        }\n        \"dictionary_put\" => {\n            let name_len: u32 = runtime::get_named_arg(\"name_len\");\n            let name: String = \"a\".repeat(name_len as usize);\n\n            let value_len: u32 = runtime::get_named_arg(\"value_len\");\n            let value = vec![u8::MAX; value_len as usize];\n\n            let uref = storage::new_dictionary(\"a\").unwrap_or_revert();\n            let (uref_ptr, uref_size, _bytes1) = to_ptr(&uref);\n\n            let (item_name_ptr, item_name_size, _bytes2) = to_ptr(&name);\n\n            let cl_value = CLValue::from_t(value).unwrap_or_revert();\n            let (cl_value_ptr, cl_value_size, _bytes3) = to_ptr(&cl_value);\n\n            for _i in 0..u64::MAX {\n                let ret = unsafe {\n                    ext_ffi::casper_dictionary_put(\n                        uref_ptr,\n                        uref_size,\n                        item_name_ptr,\n                        item_name_size,\n                        cl_value_ptr,\n                        cl_value_size,\n                    )\n                };\n                api_error::result_from(ret).unwrap_or_revert();\n            }\n        }\n        \"load_call_stack\" => {\n            for _i in 0..u64::MAX {\n                let call_stack = runtime::get_call_stack();\n                assert_eq!(call_stack.len(), 1);\n            }\n        }\n        \"load_authorization_keys\" => {\n            let setup: bool = runtime::get_named_arg(\"setup\");\n            if setup {\n                let weight = Weight::new(1);\n                for i in 1..100 {\n                    let account_hash = AccountHash::new([i; 32]);\n                    account::add_associated_key(account_hash, weight).unwrap_or_revert();\n                }\n            } else {\n                for _i in 0..u64::MAX {\n                    let _k = runtime::list_authorization_keys();\n                }\n            }\n        }\n        \"random_bytes\" => {\n            for _i in 0..u64::MAX {\n                let _n = runtime::random_bytes();\n            }\n        }\n        \"dictionary_read\" => {\n            let name_len: u32 = runtime::get_named_arg(\"name_len\");\n            let name: String = \"a\".repeat(name_len as usize);\n            let value_len: u32 = runtime::get_named_arg(\"value_len\");\n            let value = vec![u8::MAX; value_len as usize];\n            let uref = storage::new_dictionary(\"a\").unwrap_or_revert();\n            storage::dictionary_put(uref, &name, value);\n            let key = Key::dictionary(uref, name.as_bytes());\n\n            for _i in 0..u64::MAX {\n                let read_value: Vec<u8> = storage::dictionary_read(key)\n                    .unwrap_or_revert()\n                    .unwrap_or_revert();\n                assert_eq!(read_value.len(), value_len as usize);\n            }\n        }\n        \"enable_contract_version\" => {\n            let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash();\n            let (contract_hash, _version) = storage::add_contract_version(\n                contract_pkg_hash,\n                EntryPoints::new(),\n                NamedKeys::new(),\n                BTreeMap::new(),\n            );\n            for _i in 0..u64::MAX {\n                storage::enable_contract_version(contract_pkg_hash, contract_hash)\n                    .unwrap_or_revert();\n            }\n        }\n        _ => panic!(),\n    }\n}\n\n#[no_mangle]\nextern \"C\" fn function_0() {}\n#[no_mangle]\nextern \"C\" fn function_1() {}\n#[no_mangle]\nextern \"C\" fn function_2() {}\n#[no_mangle]\nextern \"C\" fn function_3() {}\n#[no_mangle]\nextern \"C\" fn function_4() {}\n#[no_mangle]\nextern \"C\" fn function_5() {}\n#[no_mangle]\nextern \"C\" fn function_6() {}\n#[no_mangle]\nextern \"C\" fn function_7() {}\n#[no_mangle]\nextern \"C\" fn function_8() {}\n#[no_mangle]\nextern \"C\" fn function_9() {}\n#[no_mangle]\nextern \"C\" fn function_10() {}\n#[no_mangle]\nextern \"C\" fn function_11() {}\n#[no_mangle]\nextern \"C\" fn function_12() {}\n#[no_mangle]\nextern \"C\" fn function_13() {}\n#[no_mangle]\nextern \"C\" fn function_14() {}\n#[no_mangle]\nextern \"C\" fn function_15() {}\n#[no_mangle]\nextern \"C\" fn function_16() {}\n#[no_mangle]\nextern \"C\" fn function_17() {}\n#[no_mangle]\nextern \"C\" fn function_18() {}\n#[no_mangle]\nextern \"C\" fn function_19() {}\n#[no_mangle]\nextern \"C\" fn function_20() {}\n#[no_mangle]\nextern \"C\" fn function_21() {}\n#[no_mangle]\nextern \"C\" fn function_22() {}\n#[no_mangle]\nextern \"C\" fn function_23() {}\n#[no_mangle]\nextern \"C\" fn function_24() {}\n#[no_mangle]\nextern \"C\" fn function_25() {}\n#[no_mangle]\nextern \"C\" fn function_26() {}\n#[no_mangle]\nextern \"C\" fn function_27() {}\n#[no_mangle]\nextern \"C\" fn function_28() {}\n#[no_mangle]\nextern \"C\" fn function_29() {}\n#[no_mangle]\nextern \"C\" fn function_30() {}\n#[no_mangle]\nextern \"C\" fn function_31() {}\n#[no_mangle]\nextern \"C\" fn function_32() {}\n#[no_mangle]\nextern \"C\" fn function_33() {}\n#[no_mangle]\nextern \"C\" fn function_34() {}\n#[no_mangle]\nextern \"C\" fn function_35() {}\n#[no_mangle]\nextern \"C\" fn function_36() {}\n#[no_mangle]\nextern \"C\" fn function_37() {}\n#[no_mangle]\nextern \"C\" fn function_38() {}\n#[no_mangle]\nextern \"C\" fn function_39() {}\n#[no_mangle]\nextern \"C\" fn function_40() {}\n#[no_mangle]\nextern \"C\" fn function_41() {}\n#[no_mangle]\nextern \"C\" fn function_42() {}\n#[no_mangle]\nextern \"C\" fn function_43() {}\n#[no_mangle]\nextern \"C\" fn function_44() {}\n#[no_mangle]\nextern \"C\" fn function_45() {}\n#[no_mangle]\nextern \"C\" fn function_46() {}\n#[no_mangle]\nextern \"C\" fn function_47() {}\n#[no_mangle]\nextern \"C\" fn function_48() {}\n#[no_mangle]\nextern \"C\" fn function_49() {}\n#[no_mangle]\nextern \"C\" fn function_50() {}\n#[no_mangle]\nextern \"C\" fn function_51() {}\n#[no_mangle]\nextern \"C\" fn function_52() {}\n#[no_mangle]\nextern \"C\" fn function_53() {}\n#[no_mangle]\nextern \"C\" fn function_54() {}\n#[no_mangle]\nextern \"C\" fn function_55() {}\n#[no_mangle]\nextern \"C\" fn function_56() {}\n#[no_mangle]\nextern \"C\" fn function_57() {}\n#[no_mangle]\nextern \"C\" fn function_58() {}\n#[no_mangle]\nextern \"C\" fn function_59() {}\n#[no_mangle]\nextern \"C\" fn function_60() {}\n#[no_mangle]\nextern \"C\" fn function_61() {}\n#[no_mangle]\nextern \"C\" fn function_62() {}\n#[no_mangle]\nextern \"C\" fn function_63() {}\n#[no_mangle]\nextern \"C\" fn function_64() {}\n#[no_mangle]\nextern \"C\" fn function_65() {}\n#[no_mangle]\nextern \"C\" fn function_66() {}\n#[no_mangle]\nextern \"C\" fn function_67() {}\n#[no_mangle]\nextern \"C\" fn function_68() {}\n#[no_mangle]\nextern \"C\" fn function_69() {}\n#[no_mangle]\nextern \"C\" fn function_70() {}\n#[no_mangle]\nextern \"C\" fn function_71() {}\n#[no_mangle]\nextern \"C\" fn function_72() {}\n#[no_mangle]\nextern \"C\" fn function_73() {}\n#[no_mangle]\nextern \"C\" fn function_74() {}\n#[no_mangle]\nextern \"C\" fn function_75() {}\n#[no_mangle]\nextern \"C\" fn function_76() {}\n#[no_mangle]\nextern \"C\" fn function_77() {}\n#[no_mangle]\nextern \"C\" fn function_78() {}\n#[no_mangle]\nextern \"C\" fn function_79() {}\n#[no_mangle]\nextern \"C\" fn function_80() {}\n#[no_mangle]\nextern \"C\" fn function_81() {}\n#[no_mangle]\nextern \"C\" fn function_82() {}\n#[no_mangle]\nextern \"C\" fn function_83() {}\n#[no_mangle]\nextern \"C\" fn function_84() {}\n#[no_mangle]\nextern \"C\" fn function_85() {}\n#[no_mangle]\nextern \"C\" fn function_86() {}\n#[no_mangle]\nextern \"C\" fn function_87() {}\n#[no_mangle]\nextern \"C\" fn function_88() {}\n#[no_mangle]\nextern \"C\" fn function_89() {}\n#[no_mangle]\nextern \"C\" fn function_90() {}\n#[no_mangle]\nextern \"C\" fn function_91() {}\n#[no_mangle]\nextern \"C\" fn function_92() {}\n#[no_mangle]\nextern \"C\" fn function_93() {}\n#[no_mangle]\nextern \"C\" fn function_94() {}\n#[no_mangle]\nextern \"C\" fn function_95() {}\n#[no_mangle]\nextern \"C\" fn function_96() {}\n#[no_mangle]\nextern \"C\" fn function_97() {}\n#[no_mangle]\nextern \"C\" fn function_98() {}\n#[no_mangle]\nextern \"C\" fn function_99() {}\n"
  },
  {
    "path": "smart_contracts/contracts/test/remove-associated-key/Cargo.toml",
    "content": "[package]\nname = \"remove-associated-key\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"remove_associated_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/remove-associated-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::account::AccountHash;\n\nconst ARG_ACCOUNT: &str = \"account\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account: AccountHash = runtime::get_named_arg(ARG_ACCOUNT);\n    account::remove_associated_key(account).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/ret-uref/Cargo.toml",
    "content": "[package]\nname = \"ret-uref\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[[bin]]\nname = \"ret_uref\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/ret-uref/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{\n    string::{String, ToString},\n    vec,\n};\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    runtime_args, AddressableEntityHash, CLType, CLValue, EntityEntryPoint, EntryPointAccess,\n    EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, URef,\n};\n\nconst ACCESS_UREF: &str = \"access_uref\";\nconst PUT_UREF: &str = \"put_uref\";\nconst GET_UREF: &str = \"get_uref\";\nconst INSERT_UREF: &str = \"insert_uref\";\nconst HASH_KEY_NAME: &str = \"ret_uref_contract_hash\";\n\n#[no_mangle]\npub extern \"C\" fn put_uref() {\n    let access_uref: URef = runtime::get_named_arg(ACCESS_UREF);\n    runtime::put_key(ACCESS_UREF, access_uref.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn get_uref() {\n    let uref = runtime::get_key(ACCESS_UREF)\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    runtime::ret(CLValue::from_t(uref).unwrap_or_revert())\n}\n\n#[no_mangle]\npub extern \"C\" fn insert_uref() {\n    let contract_hash = runtime::get_named_arg(\"contract_hash\");\n    let uref_name: String = runtime::get_named_arg(\"name\");\n    let access_uref: URef = runtime::call_contract(contract_hash, GET_UREF, runtime_args! {});\n    runtime::put_key(&uref_name, access_uref.into());\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let put_uref_entrypoint = EntityEntryPoint::new(\n            PUT_UREF.to_string(),\n            vec![Parameter::new(ACCESS_UREF, CLType::URef)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(put_uref_entrypoint);\n        let get_uref_entrypoint = EntityEntryPoint::new(\n            GET_UREF.to_string(),\n            vec![],\n            CLType::URef,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(get_uref_entrypoint);\n        let insert_uref_entrypoint = EntityEntryPoint::new(\n            INSERT_UREF.to_string(),\n            vec![Parameter::new(\"contract_hash\", CLType::ByteArray(32))],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Caller,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(insert_uref_entrypoint);\n        entry_points\n    };\n    let (contract_hash, _) = storage::new_contract(entry_points, None, None, None, None);\n    runtime::put_key(\n        HASH_KEY_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/set-action-thresholds/Cargo.toml",
    "content": "[package]\nname = \"set-action-thresholds\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"set_action_thresholds\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/set-action-thresholds/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::account::{ActionType, Weight};\n\nconst ARG_KEY_MANAGEMENT_THRESHOLD: &str = \"key_management_threshold\";\nconst ARG_DEPLOY_THRESHOLD: &str = \"deploy_threshold\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let key_management_threshold: Weight = runtime::get_named_arg(ARG_KEY_MANAGEMENT_THRESHOLD);\n    let deploy_threshold: Weight = runtime::get_named_arg(ARG_DEPLOY_THRESHOLD);\n\n    if key_management_threshold != Weight::new(0) {\n        account::set_action_threshold(ActionType::KeyManagement, key_management_threshold)\n            .unwrap_or_revert()\n    }\n\n    if deploy_threshold != Weight::new(0) {\n        account::set_action_threshold(ActionType::Deployment, deploy_threshold).unwrap_or_revert()\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/staking/Cargo.toml",
    "content": "[package]\nname = \"staking\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"staking\"\npath = \"src/bin/main.rs\"\ndoctest = false\ntest = false\nbench = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/staking/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    staking::run();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/staking/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::{\n    string::{String, ToString},\n    vec,\n    vec::Vec,\n};\n\nuse casper_contract::{\n    contract_api::{\n        runtime::{self, revert},\n        storage::read_from_key,\n        system,\n    },\n    ext_ffi,\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash,\n    api_error,\n    bytesrepr::{self, ToBytes},\n    runtime_args,\n    system::auction::{self, BidAddr, BidKind},\n    ApiError, CLValue, Key, PublicKey, URef, U512,\n};\n\npub const STAKING_ID: &str = \"staking_contract\";\n\npub const ARG_ACTION: &str = \"action\";\npub const ARG_AMOUNT: &str = \"amount\";\npub const ARG_VALIDATOR: &str = \"validator\";\npub const ARG_NEW_VALIDATOR: &str = \"new_validator\";\n\npub const STAKING_PURSE: &str = \"staking_purse\";\npub const INSTALLER: &str = \"installer\";\npub const CONTRACT_NAME: &str = \"staking\";\npub const HASH_KEY_NAME: &str = \"staking_package\";\npub const ACCESS_KEY_NAME: &str = \"staking_package_access\";\npub const CONTRACT_VERSION: &str = \"staking_contract_version\";\npub const ENTRY_POINT_RUN: &str = \"run\";\n\n#[repr(u16)]\nenum StakingError {\n    InvalidAccount = 1,\n    MissingInstaller = 2,\n    InvalidInstaller = 3,\n    MissingStakingPurse = 4,\n    InvalidStakingPurse = 5,\n    UnexpectedKeyVariant = 6,\n    UnexpectedAction = 7,\n    MissingValidator = 8,\n    MissingNewValidator = 9,\n}\n\nimpl From<StakingError> for ApiError {\n    fn from(e: StakingError) -> Self {\n        ApiError::User(e as u16)\n    }\n}\n\n#[no_mangle]\npub fn run() {\n    let caller = runtime::get_caller();\n    let installer = get_account_hash_with_user_errors(\n        INSTALLER,\n        StakingError::MissingInstaller,\n        StakingError::InvalidInstaller,\n    );\n\n    if caller != installer {\n        revert(ApiError::User(StakingError::InvalidAccount as u16));\n    }\n\n    let action: String = runtime::get_named_arg(ARG_ACTION);\n\n    if action == *\"UNSTAKE\".to_string() {\n        unstake();\n    } else if action == *\"STAKE\".to_string() {\n        stake();\n    } else if action == *\"STAKE_ALL\".to_string() {\n        stake_all();\n    } else if action == *\"RESTAKE\".to_string() {\n        restake();\n    } else if action == *\"STAKED_AMOUNT\".to_string() {\n        read_staked_amount_gs();\n    } else {\n        revert(ApiError::User(StakingError::UnexpectedAction as u16));\n    }\n}\n\nfn unstake() {\n    let args = get_unstaking_args(false);\n    let contract_hash = system::get_auction();\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_UNDELEGATE, args);\n}\n\nfn restake() {\n    let args = get_unstaking_args(true);\n    let contract_hash = system::get_auction();\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_REDELEGATE, args);\n}\n\nfn stake() {\n    let staking_purse = get_uref_with_user_errors(\n        STAKING_PURSE,\n        StakingError::MissingStakingPurse,\n        StakingError::InvalidStakingPurse,\n    );\n    let validator: PublicKey = match runtime::try_get_named_arg(ARG_VALIDATOR) {\n        Some(validator_public_key) => validator_public_key,\n        None => revert(ApiError::User(StakingError::MissingValidator as u16)),\n    };\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR_PURSE => staking_purse,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_DELEGATE, args);\n}\n\nfn stake_all() {\n    let staking_purse = get_uref_with_user_errors(\n        STAKING_PURSE,\n        StakingError::MissingStakingPurse,\n        StakingError::InvalidStakingPurse,\n    );\n    let validator: PublicKey = match runtime::try_get_named_arg(ARG_VALIDATOR) {\n        Some(validator_public_key) => validator_public_key,\n        None => revert(ApiError::User(StakingError::MissingValidator as u16)),\n    };\n    let amount: U512 = system::get_purse_balance(staking_purse).unwrap_or_revert();\n    let contract_hash = system::get_auction();\n    let args = runtime_args! {\n        auction::ARG_DELEGATOR_PURSE => staking_purse,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_AMOUNT => amount,\n    };\n    runtime::call_contract::<U512>(contract_hash, auction::METHOD_DELEGATE, args);\n}\n\npub fn read_staked_amount_gs() {\n    let purse = get_uref_with_user_errors(\n        STAKING_PURSE,\n        StakingError::MissingStakingPurse,\n        StakingError::InvalidStakingPurse,\n    );\n\n    let validator = match runtime::try_get_named_arg::<PublicKey>(ARG_VALIDATOR) {\n        Some(validator_public_key) => validator_public_key,\n        None => revert(ApiError::User(StakingError::MissingValidator as u16)),\n    };\n\n    let key = Key::BidAddr(BidAddr::DelegatedPurse {\n        validator: validator.to_account_hash(),\n        delegator: purse.addr(),\n    });\n\n    let bid = read_from_key::<BidKind>(key);\n\n    let staked_amount = if let Ok(Some(BidKind::Delegator(delegator_bid))) = bid {\n        delegator_bid.staked_amount()\n    } else {\n        U512::zero()\n    };\n\n    runtime::ret(CLValue::from_t(staked_amount).unwrap_or_revert());\n}\n\nfn get_unstaking_args(is_restake: bool) -> casper_types::RuntimeArgs {\n    let staking_purse = get_uref_with_user_errors(\n        STAKING_PURSE,\n        StakingError::MissingStakingPurse,\n        StakingError::InvalidStakingPurse,\n    );\n    let validator: PublicKey = match runtime::try_get_named_arg(ARG_VALIDATOR) {\n        Some(validator_public_key) => validator_public_key,\n        None => revert(ApiError::User(StakingError::MissingValidator as u16)),\n    };\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    if !is_restake {\n        return runtime_args! {\n            auction::ARG_DELEGATOR_PURSE => staking_purse,\n            auction::ARG_VALIDATOR => validator,\n            auction::ARG_AMOUNT => amount,\n        };\n    }\n\n    let new_validator: PublicKey = match runtime::try_get_named_arg(ARG_NEW_VALIDATOR) {\n        Some(validator_public_key) => validator_public_key,\n        None => revert(ApiError::User(StakingError::MissingNewValidator as u16)),\n    };\n\n    runtime_args! {\n        auction::ARG_DELEGATOR_PURSE => staking_purse,\n        auction::ARG_VALIDATOR => validator,\n        auction::ARG_NEW_VALIDATOR => new_validator,\n        auction::ARG_AMOUNT => amount,\n    }\n}\n\nfn get_account_hash_with_user_errors(\n    name: &str,\n    missing: StakingError,\n    invalid: StakingError,\n) -> AccountHash {\n    let key = get_key_with_user_errors(name, missing, invalid);\n    key.into_account()\n        .unwrap_or_revert_with(StakingError::UnexpectedKeyVariant)\n}\n\nfn get_uref_with_user_errors(name: &str, missing: StakingError, invalid: StakingError) -> URef {\n    let key = get_key_with_user_errors(name, missing, invalid);\n    key.into_uref()\n        .unwrap_or_revert_with(StakingError::UnexpectedKeyVariant)\n}\n\nfn get_key_with_user_errors(name: &str, missing: StakingError, invalid: StakingError) -> Key {\n    let (name_ptr, name_size, _bytes) = to_ptr(name);\n    let mut key_bytes = vec![0u8; Key::max_serialized_length()];\n    let mut total_bytes: usize = 0;\n    let ret = unsafe {\n        ext_ffi::casper_get_key(\n            name_ptr,\n            name_size,\n            key_bytes.as_mut_ptr(),\n            key_bytes.len(),\n            &mut total_bytes as *mut usize,\n        )\n    };\n    match api_error::result_from(ret) {\n        Ok(_) => {}\n        Err(ApiError::MissingKey) => revert(missing),\n        Err(e) => revert(e),\n    }\n    key_bytes.truncate(total_bytes);\n\n    bytesrepr::deserialize(key_bytes).unwrap_or_revert_with(invalid)\n}\n\nfn to_ptr<T: ToBytes>(t: T) -> (*const u8, usize, Vec<u8>) {\n    let bytes = t.into_bytes().unwrap_or_revert();\n    let ptr = bytes.as_ptr();\n    let size = bytes.len();\n    (ptr, size, bytes)\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/staking-stored/Cargo.toml",
    "content": "[package]\nname = \"staking-stored\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"staking_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\nstaking = { path = \"../staking\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/staking-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    contracts::NamedKeys, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, Parameter, URef,\n};\n\n#[repr(u16)]\nenum InstallerSessionError {\n    FailedToTransfer = 101,\n}\n\n#[no_mangle]\npub extern \"C\" fn call_staking() {\n    staking::run();\n}\n\nfn build_named_keys_and_purse() -> (NamedKeys, URef) {\n    let mut named_keys = NamedKeys::new();\n    let purse = system::create_purse();\n\n    named_keys.insert(staking::STAKING_PURSE.to_string(), purse.into());\n    named_keys.insert(staking::INSTALLER.to_string(), runtime::get_caller().into());\n\n    (named_keys, purse)\n}\n\nfn entry_points() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n\n    entry_points.add_entry_point(EntityEntryPoint::new(\n        staking::ENTRY_POINT_RUN,\n        vec![\n            Parameter::new(staking::ARG_ACTION, CLType::String),\n            Parameter::new(staking::ARG_AMOUNT, CLType::U512),\n            Parameter::new(staking::ARG_VALIDATOR, CLType::PublicKey),\n            Parameter::new(staking::ARG_NEW_VALIDATOR, CLType::PublicKey),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    entry_points\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = entry_points();\n\n    let (staking_named_keys, staking_purse) = build_named_keys_and_purse();\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(staking_named_keys),\n        Some(staking::HASH_KEY_NAME.to_string()),\n        Some(staking::ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n\n    runtime::put_key(\n        staking::CONTRACT_VERSION,\n        storage::new_uref(contract_version).into(),\n    );\n\n    runtime::put_key(staking::CONTRACT_NAME, Key::Hash(contract_hash.value()));\n\n    // Initial funding amount.\n    let amount = runtime::get_named_arg(staking::ARG_AMOUNT);\n    system::transfer_from_purse_to_purse(account::get_main_purse(), staking_purse, amount, None)\n        .unwrap_or_revert_with(ApiError::User(\n            InstallerSessionError::FailedToTransfer as u16,\n        ));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/storage-costs/Cargo.toml",
    "content": "[package]\nname = \"storage-costs\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"storage_costs\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/storage-costs/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::ToString, vec::Vec};\n\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    contracts::NamedKeys, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, U512,\n};\n\nconst WRITE_FUNCTION_SMALL_NAME: &str = \"write_function_small\";\nconst WRITE_FUNCTION_LARGE_NAME: &str = \"write_function_large\";\nconst ADD_FUNCTION_SMALL_NAME: &str = \"add_function_small\";\nconst ADD_FUNCTION_LARGE_NAME: &str = \"add_function_large\";\nconst WRITE_KEY_NAME: &str = \"write\";\nconst ADD_KEY_NAME: &str = \"add\";\nconst WRITE_SMALL_VALUE: &[u8] = b\"1\";\nconst WRITE_LARGE_VALUE: &[u8] = b\"1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\";\nconst HASH_KEY_NAME: &str = \"contract_package\";\nconst CONTRACT_KEY_NAME: &str = \"contract\";\nconst ADD_SMALL_VALUE: u64 = 1;\nconst ADD_LARGE_VALUE: u64 = u64::MAX;\nconst NEW_UREF_FUNCTION: &str = \"new_uref_function\";\nconst PUT_KEY_FUNCTION: &str = \"put_key_function\";\nconst REMOVE_KEY_FUNCTION: &str = \"remove_key_function\";\nconst NEW_KEY_NAME: &str = \"new_key\";\nconst CREATE_CONTRACT_PACKAGE_AT_HASH_FUNCTION: &str = \"create_contract_package_at_hash_function\";\nconst CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION: &str = \"create_contract_user_group_function\";\nconst PROVISION_UREFS_FUNCTION: &str = \"provision_urefs_function\";\nconst ACCESS_KEY_NAME: &str = \"access_key\";\nconst REMOVE_CONTRACT_USER_GROUP_FUNCTION: &str = \"remove_contract_user_group_function\";\nconst LABEL_NAME: &str = \"Label\";\nconst NEW_UREF_SUBCALL_FUNCTION: &str = \"new_uref_subcall\";\n\n#[no_mangle]\npub extern \"C\" fn write_function_small() {\n    let uref = runtime::get_key(WRITE_KEY_NAME)\n        .and_then(Key::into_uref)\n        .unwrap_or_revert();\n    storage::write(uref, WRITE_SMALL_VALUE.to_vec());\n}\n\n#[no_mangle]\npub extern \"C\" fn write_function_large() {\n    let uref = runtime::get_key(WRITE_KEY_NAME)\n        .and_then(Key::into_uref)\n        .unwrap_or_revert();\n    storage::write(uref, WRITE_LARGE_VALUE.to_vec());\n}\n\n#[no_mangle]\npub extern \"C\" fn add_function_small() {\n    let uref = runtime::get_key(ADD_KEY_NAME)\n        .and_then(Key::into_uref)\n        .unwrap_or_revert();\n    storage::add(uref, U512::from(ADD_SMALL_VALUE));\n}\n\n#[no_mangle]\npub extern \"C\" fn add_function_large() {\n    let uref = runtime::get_key(ADD_KEY_NAME)\n        .and_then(Key::into_uref)\n        .unwrap_or_revert();\n    storage::add(uref, U512::from(ADD_LARGE_VALUE));\n}\n\n#[no_mangle]\npub extern \"C\" fn new_uref_function() {\n    let _new_uref = storage::new_uref(0u64);\n}\n\n#[no_mangle]\npub extern \"C\" fn put_key_function() {\n    runtime::put_key(NEW_KEY_NAME, Key::Hash([0; 32]));\n}\n\n#[no_mangle]\npub extern \"C\" fn remove_key_function() {\n    runtime::remove_key(WRITE_KEY_NAME);\n}\n\n#[no_mangle]\npub extern \"C\" fn create_contract_package_at_hash_function() {\n    let (_contract_package_hash, _access_key) = storage::create_contract_package_at_hash();\n}\n\n#[no_mangle]\npub extern \"C\" fn create_contract_user_group_function() {\n    let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME)\n        .and_then(Key::into_package_hash)\n        .expect(\"should have package hash\");\n    let _result = storage::create_contract_user_group(\n        contract_package_hash.into(),\n        LABEL_NAME,\n        0,\n        Default::default(),\n    )\n    .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn provision_urefs_function() {\n    let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME)\n        .and_then(Key::into_package_hash)\n        .expect(\"should have package hash\");\n    let _result =\n        storage::provision_contract_user_group_uref(contract_package_hash.into(), LABEL_NAME)\n            .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn remove_contract_user_group_function() {\n    let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME)\n        .and_then(Key::into_package_hash)\n        .expect(\"should have package hash\");\n    storage::remove_contract_user_group(contract_package_hash.into(), LABEL_NAME)\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn new_uref_subcall() {\n    let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME)\n        .and_then(Key::into_package_hash)\n        .expect(\"should have package hash\");\n    runtime::call_versioned_contract(\n        contract_package_hash.into(),\n        None,\n        NEW_UREF_FUNCTION,\n        Default::default(),\n    )\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            WRITE_FUNCTION_SMALL_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point = EntityEntryPoint::new(\n            WRITE_FUNCTION_LARGE_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point = EntityEntryPoint::new(\n            ADD_FUNCTION_SMALL_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        let entry_point = EntityEntryPoint::new(\n            ADD_FUNCTION_LARGE_NAME,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            NEW_UREF_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            PUT_KEY_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            REMOVE_KEY_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            CREATE_CONTRACT_PACKAGE_AT_HASH_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            PROVISION_UREFS_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            REMOVE_CONTRACT_USER_GROUP_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        let entry_point = EntityEntryPoint::new(\n            NEW_UREF_SUBCALL_FUNCTION,\n            Vec::new(),\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n\n        entry_points\n    };\n\n    let (contract_package_hash, access_uref) = storage::create_contract_package_at_hash();\n    runtime::put_key(HASH_KEY_NAME, contract_package_hash.into());\n\n    let named_keys = {\n        let mut named_keys = NamedKeys::new();\n\n        let uref_for_writing = storage::new_uref(());\n        named_keys.insert(WRITE_KEY_NAME.to_string(), uref_for_writing.into());\n\n        let uref_for_adding = storage::new_uref(U512::zero());\n        named_keys.insert(ADD_KEY_NAME.to_string(), uref_for_adding.into());\n\n        named_keys.insert(\n            CONTRACT_KEY_NAME.to_string(),\n            Key::SmartContract(contract_package_hash.value()),\n        );\n        named_keys.insert(ACCESS_KEY_NAME.to_string(), access_uref.into());\n\n        named_keys\n    };\n\n    let (contract_hash, _version) = storage::add_contract_version(\n        contract_package_hash,\n        entry_points,\n        named_keys,\n        BTreeMap::new(),\n    );\n    runtime::put_key(CONTRACT_KEY_NAME, Key::Hash(contract_hash.value()));\n    runtime::put_key(ACCESS_KEY_NAME, access_uref.into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/system-contract-hashes/Cargo.toml",
    "content": "[package]\nname = \"system-contract-hashes\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ehastings@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"system_contract_hashes\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/system-contract-hashes/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::{runtime, system};\nuse casper_types::system::{AUCTION, HANDLE_PAYMENT, MINT};\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    runtime::put_key(MINT, system::get_mint().into());\n    runtime::put_key(HANDLE_PAYMENT, system::get_handle_payment().into());\n    runtime::put_key(AUCTION, system::get_auction().into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/test-payment-stored/Cargo.toml",
    "content": "[package]\nname = \"test-payment-stored\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\", \"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"test_payment_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/test-payment-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::{\n        EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter,\n    },\n    system::standard_payment,\n    AddressableEntityHash, CLType, EntryPointPayment, Key, RuntimeArgs, URef, U512,\n};\n\nconst ENTRY_FUNCTION_NAME: &str = \"pay\";\nconst HASH_KEY_NAME: &str = \"test_payment_hash\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"test_payment_package_hash\";\nconst ACCESS_KEY_NAME: &str = \"test_payment_access\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\nconst GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\n\n#[no_mangle]\npub extern \"C\" fn pay() {\n    // amount to transfer from named purse to payment purse\n    let amount: U512 = runtime::get_named_arg(standard_payment::ARG_AMOUNT);\n\n    let purse_uref = account::get_main_purse();\n\n    // handle payment contract\n    let handle_payment_contract_hash = system::get_handle_payment();\n\n    // get payment purse for current execution\n    let payment_purse: URef = runtime::call_contract(\n        handle_payment_contract_hash,\n        GET_PAYMENT_PURSE,\n        RuntimeArgs::default(),\n    );\n\n    // transfer amount from named purse to payment purse, which will be used to pay for execution\n    system::transfer_from_purse_to_purse(purse_uref, payment_purse, amount, None)\n        .unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_FUNCTION_NAME.to_string(),\n            vec![Parameter::new(standard_payment::ARG_AMOUNT, CLType::U512)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    };\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        HASH_KEY_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-main-purse-to-new-purse/Cargo.toml",
    "content": "[package]\nname = \"transfer-main-purse-to-new-purse\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_main_purse_to_new_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-main-purse-to-new-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{URef, U512};\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_DESTINATION: &str = \"destination\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let destination_name: String = runtime::get_named_arg(ARG_DESTINATION);\n\n    let source: URef = account::get_main_purse();\n    let destination = system::create_purse();\n    system::transfer_from_purse_to_purse(source, destination, amount, None).unwrap_or_revert();\n    runtime::put_key(&destination_name, destination.into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-main-purse-to-two-purses/Cargo.toml",
    "content": "[package]\nname = \"transfer-main-purse-to-two-purses\"\nversion = \"0.1.0\"\nauthors = [\"Joe Sacher <joe.sacher@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_main_purse_to_two_purses\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-main-purse-to-two-purses/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, URef, U512};\n\nconst DESTINATION_PURSE_ONE: &str = \"destination_purse_one\";\nconst DESTINATION_PURSE_TWO: &str = \"destination_purse_two\";\nconst TRANSFER_AMOUNT_ONE: &str = \"transfer_amount_one\";\nconst TRANSFER_AMOUNT_TWO: &str = \"transfer_amount_two\";\n\n#[repr(u16)]\nenum CustomError {\n    TransferToPurseOneFailed = 101,\n    TransferToPurseTwoFailed = 102,\n}\n\nfn get_or_create_purse(purse_name: &str) -> URef {\n    match runtime::get_key(purse_name) {\n        None => {\n            // Create and store purse if doesn't exist\n            let purse = system::create_purse();\n            runtime::put_key(purse_name, purse.into());\n            purse\n        }\n        Some(purse_key) => match purse_key.as_uref() {\n            Some(uref) => *uref,\n            None => runtime::revert(ApiError::UnexpectedKeyVariant),\n        },\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let main_purse: URef = account::get_main_purse();\n\n    let destination_purse_one_name: String = runtime::get_named_arg(DESTINATION_PURSE_ONE);\n\n    let destination_purse_one = get_or_create_purse(&destination_purse_one_name);\n\n    let destination_purse_two_name: String = runtime::get_named_arg(DESTINATION_PURSE_TWO);\n    let transfer_amount_one: U512 = runtime::get_named_arg(TRANSFER_AMOUNT_ONE);\n\n    let destination_purse_two = get_or_create_purse(&destination_purse_two_name);\n\n    let transfer_amount_two: U512 = runtime::get_named_arg(TRANSFER_AMOUNT_TWO);\n\n    system::transfer_from_purse_to_purse(\n        main_purse,\n        destination_purse_one,\n        transfer_amount_one,\n        None,\n    )\n    .unwrap_or_revert_with(ApiError::User(CustomError::TransferToPurseOneFailed as u16));\n    system::transfer_from_purse_to_purse(\n        main_purse,\n        destination_purse_two,\n        transfer_amount_two,\n        None,\n    )\n    .unwrap_or_revert_with(ApiError::User(CustomError::TransferToPurseTwoFailed as u16));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-account\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_account\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    transfer_purse_to_account::delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account/src/lib.rs",
    "content": "#![no_std]\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, URef, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\npub fn delegate() {\n    let source: URef = account::get_main_purse();\n    let target: AccountHash = runtime::get_named_arg(ARG_TARGET);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let _transfer_result =\n        system::transfer_from_purse_to_account(source, target, amount, None).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account-stored/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-account-stored\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\", \"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_account_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\ntransfer-purse-to-account = { path = \"../transfer-purse-to-account\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\n\nuse casper_contract::contract_api::{runtime, storage};\n\nuse casper_types::{\n    addressable_entity::{\n        EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter,\n    },\n    AddressableEntityHash, CLType, EntryPointPayment, Key,\n};\n\nconst ENTRY_FUNCTION_NAME: &str = \"transfer\";\nconst PACKAGE_HASH_KEY_NAME: &str = \"transfer_purse_to_account\";\nconst HASH_KEY_NAME: &str = \"transfer_purse_to_account_hash\";\nconst ACCESS_KEY_NAME: &str = \"transfer_purse_to_account_access\";\nconst ARG_0_NAME: &str = \"target_account_addr\";\nconst ARG_1_NAME: &str = \"amount\";\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\n#[no_mangle]\npub extern \"C\" fn transfer() {\n    transfer_purse_to_account::delegate();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut entry_points = EntryPoints::new();\n\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_FUNCTION_NAME.to_string(),\n            vec![\n                Parameter::new(ARG_0_NAME, CLType::ByteArray(32)),\n                Parameter::new(ARG_1_NAME, CLType::U512),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Caller,\n            EntryPointPayment::Caller,\n        );\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        None,\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        HASH_KEY_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account-with-id/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-account-with-id\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_account_with_id\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account-with-id/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    transfer_purse_to_account_with_id::delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-account-with-id/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::format;\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, ApiError, Key, URef, U512};\n\nconst TRANSFER_RESULT_UREF_NAME: &str = \"transfer_result\";\nconst MAIN_PURSE_FINAL_BALANCE_UREF_NAME: &str = \"final_balance\";\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_ID: &str = \"id\";\n\npub fn delegate() {\n    let source: URef = account::get_main_purse();\n    let target: AccountHash = runtime::get_named_arg(ARG_TARGET);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n    let id: Option<u64> = runtime::get_named_arg(ARG_ID);\n\n    let transfer_result = system::transfer_from_purse_to_account(source, target, amount, id);\n\n    let final_balance =\n        system::get_purse_balance(source).unwrap_or_revert_with(ApiError::User(103));\n\n    let result = format!(\"{:?}\", transfer_result);\n\n    let result_uref: Key = storage::new_uref(result).into();\n    runtime::put_key(TRANSFER_RESULT_UREF_NAME, result_uref);\n    runtime::put_key(\n        MAIN_PURSE_FINAL_BALANCE_UREF_NAME,\n        storage::new_uref(final_balance).into(),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-accounts\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_accounts\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::contract_api::account;\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let source = account::get_main_purse();\n    transfer_purse_to_accounts::delegate(source);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::collections::BTreeMap;\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, URef, U512};\n\nconst ARG_TARGETS: &str = \"targets\";\n\npub fn delegate(source: URef) {\n    let targets: BTreeMap<AccountHash, (U512, Option<u64>)> = runtime::get_named_arg(ARG_TARGETS);\n\n    for (target, (amount, id)) in targets {\n        system::transfer_from_purse_to_account(source, target, amount, id).unwrap_or_revert();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts-stored/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-accounts-stored\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\", \"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_accounts_stored\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\ntransfer-purse-to-accounts = { path = \"../transfer-purse-to-accounts\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts-stored/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, string::ToString, vec};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{\n        EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter,\n    },\n    contracts::NamedKeys,\n    AddressableEntityHash, CLType, CLTyped, EntryPointPayment, Key, U512,\n};\n\nconst ENTRY_FUNCTION_NAME: &str = \"transfer\";\n\nconst PACKAGE_HASH_KEY_NAME: &str = \"transfer_purse_to_accounts\";\nconst HASH_KEY_NAME: &str = \"transfer_purse_to_accounts_hash\";\nconst ACCESS_KEY_NAME: &str = \"transfer_purse_to_accounts_access\";\n\nconst ARG_AMOUNT: &str = \"amount\";\nconst ARG_SOURCE: &str = \"source\";\nconst ARG_TARGETS: &str = \"targets\";\n\nconst CONTRACT_VERSION: &str = \"contract_version\";\n\nconst PURSE_KEY_NAME: &str = \"purse\";\n\n#[no_mangle]\npub extern \"C\" fn transfer() {\n    let purse = runtime::get_key(PURSE_KEY_NAME)\n        .unwrap_or_revert()\n        .into_uref()\n        .unwrap_or_revert();\n    transfer_purse_to_accounts::delegate(purse);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entry_points = {\n        let mut tmp = EntryPoints::new();\n        let entry_point = EntityEntryPoint::new(\n            ENTRY_FUNCTION_NAME.to_string(),\n            vec![\n                Parameter::new(ARG_SOURCE, CLType::URef),\n                Parameter::new(\n                    ARG_TARGETS,\n                    <BTreeMap<AccountHash, (U512, Option<u64>)>>::cl_type(),\n                ),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        tmp.add_entry_point(entry_point);\n        tmp\n    };\n\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let named_keys = {\n        let purse = system::create_purse();\n        system::transfer_from_purse_to_purse(account::get_main_purse(), purse, amount, None)\n            .unwrap_or_revert();\n\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(PURSE_KEY_NAME.to_string(), purse.into());\n        named_keys\n    };\n\n    let (contract_hash, contract_version) = storage::new_contract(\n        entry_points,\n        Some(named_keys),\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_KEY_NAME.to_string()),\n        None,\n    );\n\n    runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into());\n    runtime::put_key(\n        HASH_KEY_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-accounts-subcall\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_accounts_subcall\"\npath = \"src/bin/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/src/bin/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    transfer_purse_to_accounts_subcall::delegate();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/src/lib.rs",
    "content": "#![no_std]\n\nextern crate alloc;\n\nuse alloc::collections::BTreeMap;\n\nuse casper_contract::{\n    contract_api::{account, runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{account::AccountHash, contracts::ContractHash, runtime_args, Key, URef, U512};\n\nconst ENTRYPOINT: &str = \"transfer\";\nconst ARG_SOURCE: &str = \"source\";\nconst ARG_TARGETS: &str = \"targets\";\n\nconst HASH_KEY_NAME: &str = \"transfer_purse_to_accounts_hash\";\n\npub fn delegate() {\n    let source: URef = account::get_main_purse();\n    let targets: BTreeMap<AccountHash, (U512, Option<u64>)> = runtime::get_named_arg(ARG_TARGETS);\n\n    for (target, (amount, id)) in &targets {\n        system::transfer_from_purse_to_account(source, *target, *amount, *id).unwrap_or_revert();\n    }\n\n    let contract_hash = runtime::get_key(HASH_KEY_NAME)\n        .and_then(Key::into_entity_hash)\n        .map(|e_hash| ContractHash::new(e_hash.value()))\n        .unwrap_or_revert();\n\n    runtime::call_contract(\n        contract_hash,\n        ENTRYPOINT,\n        runtime_args! {\n            ARG_SOURCE => source,\n            ARG_TARGETS => targets\n        },\n    )\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-public-key/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-public-key\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_public_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-public-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{runtime, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{PublicKey, U512};\n\nconst ARG_TARGET: &str = \"target\";\nconst ARG_SOURCE_PURSE: &str = \"source_purse\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let source_purse = runtime::get_named_arg(ARG_SOURCE_PURSE);\n    let target: PublicKey = runtime::get_named_arg(ARG_TARGET);\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    system::transfer_from_purse_to_public_key(source_purse, target, amount, None)\n        .unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-purse/Cargo.toml",
    "content": "[package]\nname = \"transfer-purse-to-purse\"\nversion = \"0.1.0\"\nauthors = [\"Henry Till <henrytill@gmail.com>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"transfer_purse_to_purse\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/transfer-purse-to-purse/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{format, string::String};\n\nuse casper_contract::{\n    contract_api::{account, runtime, storage, system},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{ApiError, Key, URef, U512};\n\nconst PURSE_MAIN: &str = \"purse:main\";\nconst PURSE_TRANSFER_RESULT: &str = \"purse_transfer_result\";\nconst MAIN_PURSE_BALANCE: &str = \"main_purse_balance\";\n\nconst ARG_SOURCE: &str = \"source\";\nconst ARG_TARGET: &str = \"target\";\nconst ARG_AMOUNT: &str = \"amount\";\n\n#[repr(u16)]\nenum CustomError {\n    InvalidSourcePurseKey = 103,\n    UnexpectedSourcePurseKeyVariant = 104,\n    InvalidDestinationPurseKey = 105,\n    UnexpectedDestinationPurseKeyVariant = 106,\n    UnableToGetBalance = 107,\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let main_purse: URef = account::get_main_purse();\n    // add or update `main_purse` if it doesn't exist already\n    runtime::put_key(PURSE_MAIN, Key::from(main_purse));\n\n    let src_purse_name: String = runtime::get_named_arg(ARG_SOURCE);\n\n    let src_purse_key = runtime::get_key(&src_purse_name)\n        .unwrap_or_revert_with(ApiError::User(CustomError::InvalidSourcePurseKey as u16));\n\n    let src_purse = match src_purse_key.as_uref() {\n        Some(uref) => uref,\n        None => runtime::revert(ApiError::User(\n            CustomError::UnexpectedSourcePurseKeyVariant as u16,\n        )),\n    };\n    let dst_purse_name: String = runtime::get_named_arg(ARG_TARGET);\n\n    let dst_purse = if !runtime::has_key(&dst_purse_name) {\n        // If `dst_purse_name` is not in known urefs list then create a new purse\n        let purse = system::create_purse();\n        // and save it in known urefs\n        runtime::put_key(&dst_purse_name, purse.into());\n        purse\n    } else {\n        let destination_purse_key = runtime::get_key(&dst_purse_name).unwrap_or_revert_with(\n            ApiError::User(CustomError::InvalidDestinationPurseKey as u16),\n        );\n        match destination_purse_key.as_uref() {\n            Some(uref) => *uref,\n            None => runtime::revert(ApiError::User(\n                CustomError::UnexpectedDestinationPurseKeyVariant as u16,\n            )),\n        }\n    };\n    let amount: U512 = runtime::get_named_arg(ARG_AMOUNT);\n\n    let transfer_result = system::transfer_from_purse_to_purse(*src_purse, dst_purse, amount, None);\n\n    // Assert is done here\n    let final_balance = system::get_purse_balance(main_purse)\n        .unwrap_or_revert_with(ApiError::User(CustomError::UnableToGetBalance as u16));\n\n    let result = format!(\"{:?}\", transfer_result);\n    // Add new urefs\n    let result_key: Key = storage::new_uref(result).into();\n    runtime::put_key(PURSE_TRANSFER_RESULT, result_key);\n    runtime::put_key(MAIN_PURSE_BALANCE, storage::new_uref(final_balance).into());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/update-associated-key/Cargo.toml",
    "content": "[package]\nname = \"update-associated-key\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"update_associated_key\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/update-associated-key/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nuse casper_contract::{\n    contract_api::{account, runtime},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::account::{AccountHash, Weight};\n\nconst ARG_ACCOUNT: &str = \"account\";\nconst ARG_WEIGHT: &str = \"weight\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let account: AccountHash = runtime::get_named_arg(ARG_ACCOUNT);\n    let weight: Weight = runtime::get_named_arg(ARG_WEIGHT);\n    account::update_associated_key(account, weight).unwrap_or_revert();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/upgrade-threshold/Cargo.toml",
    "content": "[package]\nname = \"upgrade-threshold\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[[bin]]\nname = \"upgrade_threshold\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }"
  },
  {
    "path": "smart_contracts/contracts/test/upgrade-threshold/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{string::ToString, vec};\nuse casper_contract::{\n    contract_api::{entity, runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionType, Weight},\n    AddressableEntityHash, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment,\n    EntryPointType, EntryPoints, Key, Parameter,\n};\n\nconst ARG_ENTITY_ACCOUNT_HASH: &str = \"entity_account_hash\";\nconst ARG_KEY_WEIGHT: &str = \"key_weight\";\nconst ARG_NEW_UPGRADE_THRESHOLD: &str = \"new_threshold\";\n\nconst ENTRYPOINT_ADD_ASSOCIATED_KEY: &str = \"add_associated_key\";\nconst ENTRYPOINT_MANAGE_ACTION_THRESHOLD: &str = \"manage_action_threshold\";\n\nconst PACKAGE_HASH_KEY_NAME: &str = \"contract_package_hash\";\nconst ACCESS_UREF_NAME: &str = \"access_uref\";\nconst CONTRACT_HASH_NAME: &str = \"contract_hash_name\";\n\n#[no_mangle]\npub extern \"C\" fn add_associated_key() {\n    let entity_account_hash: AccountHash = runtime::get_named_arg(ARG_ENTITY_ACCOUNT_HASH);\n    let weight: u8 = runtime::get_named_arg(ARG_KEY_WEIGHT);\n    entity::add_associated_key(entity_account_hash, Weight::new(weight)).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn manage_action_threshold() {\n    let new_threshold = runtime::get_named_arg::<u8>(ARG_NEW_UPGRADE_THRESHOLD);\n    entity::set_action_threshold(ActionType::UpgradeManagement, Weight::new(new_threshold))\n        .unwrap_or_revert()\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let entrypoints = {\n        let mut entrypoints = EntryPoints::new();\n        let add_associated_key_entry_point = EntityEntryPoint::new(\n            ENTRYPOINT_ADD_ASSOCIATED_KEY,\n            vec![\n                Parameter::new(ARG_ENTITY_ACCOUNT_HASH, CLType::ByteArray(32)),\n                Parameter::new(ARG_KEY_WEIGHT, CLType::U8),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entrypoints.add_entry_point(add_associated_key_entry_point);\n        let manage_action_threshold_entrypoint = EntityEntryPoint::new(\n            ENTRYPOINT_MANAGE_ACTION_THRESHOLD,\n            vec![Parameter::new(ARG_NEW_UPGRADE_THRESHOLD, CLType::U8)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entrypoints.add_entry_point(manage_action_threshold_entrypoint);\n        entrypoints\n    };\n    let (contract_hash, _) = storage::new_contract(\n        entrypoints,\n        None,\n        Some(PACKAGE_HASH_KEY_NAME.to_string()),\n        Some(ACCESS_UREF_NAME.to_string()),\n        None,\n    );\n    runtime::put_key(\n        CONTRACT_HASH_NAME,\n        Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/upgrade-threshold-upgrader/Cargo.toml",
    "content": "[package]\nname = \"upgrade-threshold-upgrader\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[[bin]]\nname = \"upgrade_threshold_upgrader\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }"
  },
  {
    "path": "smart_contracts/contracts/test/upgrade-threshold-upgrader/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::{collections::BTreeMap, vec};\nuse casper_contract::{\n    contract_api::{entity, runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionType, Weight},\n    contracts::NamedKeys,\n    CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints,\n    Key, PackageHash, Parameter,\n};\n\nconst ARG_ENTITY_ACCOUNT_HASH: &str = \"entity_account_hash\";\nconst ARG_KEY_WEIGHT: &str = \"key_weight\";\nconst ARG_NEW_UPGRADE_THRESHOLD: &str = \"new_threshold\";\nconst ARG_CONTRACT_PACKAGE: &str = \"contract_package_hash\";\n\nconst ENTRYPOINT_ADD_ASSOCIATED_KEY: &str = \"add_associated_key\";\nconst ENTRYPOINT_MANAGE_ACTION_THRESHOLD: &str = \"manage_action_threshold\";\nconst ENTRYPOINT_REMOVE_ASSOCIATED_KEY: &str = \"remove_associated_key\";\n\nconst CONTRACT_HASH_NAME: &str = \"contract_hash_name\";\n\n#[no_mangle]\npub extern \"C\" fn add_associated_key() {\n    let entity_account_hash: AccountHash = runtime::get_named_arg(ARG_ENTITY_ACCOUNT_HASH);\n    let weight: u8 = runtime::get_named_arg(ARG_KEY_WEIGHT);\n    entity::add_associated_key(entity_account_hash, Weight::new(weight)).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn manage_action_threshold() {\n    let new_threshold = runtime::get_named_arg(ARG_NEW_UPGRADE_THRESHOLD);\n    entity::set_action_threshold(ActionType::UpgradeManagement, new_threshold).unwrap_or_revert()\n}\n\n#[no_mangle]\npub extern \"C\" fn remove_associated_key() {\n    let entity_account_hash: AccountHash = runtime::get_named_arg(ARG_ENTITY_ACCOUNT_HASH);\n    entity::remove_associated_key(entity_account_hash).unwrap_or_revert();\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let contract_package: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE);\n    let entry_points = {\n        let mut entrypoints = EntryPoints::new();\n        let add_associated_key_entry_point = EntityEntryPoint::new(\n            ENTRYPOINT_ADD_ASSOCIATED_KEY,\n            vec![\n                Parameter::new(ARG_ENTITY_ACCOUNT_HASH, CLType::ByteArray(32)),\n                Parameter::new(ARG_KEY_WEIGHT, CLType::U8),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entrypoints.add_entry_point(add_associated_key_entry_point);\n        let manage_action_threshold_entrypoint = EntityEntryPoint::new(\n            ENTRYPOINT_MANAGE_ACTION_THRESHOLD,\n            vec![Parameter::new(ARG_NEW_UPGRADE_THRESHOLD, CLType::U8)],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entrypoints.add_entry_point(manage_action_threshold_entrypoint);\n        let remove_associated_key_entry_point = EntityEntryPoint::new(\n            ENTRYPOINT_REMOVE_ASSOCIATED_KEY,\n            vec![Parameter::new(\n                ARG_ENTITY_ACCOUNT_HASH,\n                CLType::ByteArray(32),\n            )],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n        entrypoints.add_entry_point(remove_associated_key_entry_point);\n        entrypoints\n    };\n    // this should overwrite the previous contract obj with the new contract obj at the same uref\n    let (new_contract_hash, _new_contract_version) = storage::add_contract_version(\n        contract_package.into(),\n        entry_points,\n        NamedKeys::new(),\n        BTreeMap::new(),\n    );\n    runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(new_contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/test/verify-signature/Cargo.toml",
    "content": "[package]\nname = \"verify-signature\"\nversion = \"0.1.0\"\nauthors = [\"Igor Bunar <igor@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"verify_signature\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/test/verify-signature/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\nuse alloc::string::String;\nuse casper_contract::contract_api::{cryptography, runtime};\nuse casper_types::{\n    bytesrepr::{Bytes, FromBytes},\n    PublicKey, Signature,\n};\n\nconst ARG_MESSAGE: &str = \"message\";\nconst ARG_SIGNATURE_BYTES: &str = \"signature_bytes\";\nconst ARG_PUBLIC_KEY: &str = \"public_key\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    let message: String = runtime::get_named_arg(ARG_MESSAGE);\n    let signature_bytes: Bytes = runtime::get_named_arg(ARG_SIGNATURE_BYTES);\n    let public_key: PublicKey = runtime::get_named_arg(ARG_PUBLIC_KEY);\n\n    let (signature, _) = Signature::from_bytes(&signature_bytes).unwrap();\n    let verify = cryptography::verify_signature(message.as_bytes(), &signature, &public_key);\n\n    assert!(verify.is_ok());\n}\n"
  },
  {
    "path": "smart_contracts/contracts/tutorial/counter-installer/Cargo.toml",
    "content": "[package]\nname = \"counter-installer\"\nversion = \"0.1.0\"\nauthors = [\"Ed Hastings <ed@casper.network>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"counter_installer\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/tutorial/counter-installer/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[cfg(not(target_arch = \"wasm32\"))]\ncompile_error!(\"target arch should be wasm32: compile with '--target wasm32-unknown-unknown'\");\n\nextern crate alloc;\n\nuse alloc::{\n    string::{String, ToString},\n    vec::Vec,\n};\nuse casper_contract::{\n    contract_api::{runtime, storage},\n    unwrap_or_revert::UnwrapOrRevert,\n};\nuse casper_types::{\n    addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints},\n    api_error::ApiError,\n    contracts::NamedKeys,\n    CLType, CLValue, EntryPointPayment, Key, URef,\n};\n\nconst COUNT_KEY: &str = \"count\";\nconst COUNTER_INC: &str = \"counter_inc\";\nconst COUNTER_GET: &str = \"counter_get\";\nconst COUNTER_KEY: &str = \"counter\";\nconst CONTRACT_VERSION_KEY: &str = \"version\";\n\n#[no_mangle]\npub extern \"C\" fn counter_inc() {\n    let uref: URef = runtime::get_key(COUNT_KEY)\n        .unwrap_or_revert_with(ApiError::MissingKey)\n        .into_uref()\n        .unwrap_or_revert_with(ApiError::UnexpectedKeyVariant);\n    storage::add(uref, 1);\n}\n\n#[no_mangle]\npub extern \"C\" fn counter_get() {\n    let uref: URef = runtime::get_key(COUNT_KEY)\n        .unwrap_or_revert_with(ApiError::MissingKey)\n        .into_uref()\n        .unwrap_or_revert_with(ApiError::UnexpectedKeyVariant);\n    let result: i32 = storage::read(uref)\n        .unwrap_or_revert_with(ApiError::Read)\n        .unwrap_or_revert_with(ApiError::ValueNotFound);\n    let typed_result = CLValue::from_t(result).unwrap_or_revert();\n    runtime::ret(typed_result);\n}\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Initialize counter to 0.\n    let counter_local_key = storage::new_uref(0_i32);\n\n    // Create initial named keys of the contract.\n    let mut counter_named_keys = NamedKeys::new();\n    let key_name = String::from(COUNT_KEY);\n    counter_named_keys.insert(key_name, counter_local_key.into());\n\n    // Create entry points to get the counter value and to increment the counter by 1.\n    let mut counter_entry_points = EntryPoints::new();\n    counter_entry_points.add_entry_point(EntityEntryPoint::new(\n        COUNTER_INC,\n        Vec::new(),\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n    counter_entry_points.add_entry_point(EntityEntryPoint::new(\n        COUNTER_GET,\n        Vec::new(),\n        CLType::I32,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    ));\n\n    let (stored_contract_hash, contract_version) = storage::new_contract(\n        counter_entry_points,\n        Some(counter_named_keys),\n        Some(\"counter_package_name\".to_string()),\n        Some(\"counter_access_uref\".to_string()),\n        None,\n    );\n\n    // To create a locked contract instead, use new_locked_contract and throw away the contract\n    // version returned\n    // let (stored_contract_hash, _) =\n    //     storage::new_locked_contract(counter_entry_points, Some(counter_named_keys), None, None);\n\n    // The current version of the contract will be reachable through named keys\n    let version_uref = storage::new_uref(contract_version);\n    runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into());\n\n    // Hash of the installed contract will be reachable through named keys\n    runtime::put_key(COUNTER_KEY, Key::Hash(stored_contract_hash.value()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/tutorial/hello-world/Cargo.toml",
    "content": "[package]\nname = \"hello-world\"\nversion = \"0.1.0\"\nauthors = [\"darthsiroftardis <karan@casper.network>\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"hello_world\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/tutorial/hello-world/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse alloc::string::String;\nuse casper_contract::contract_api::{runtime, storage};\nuse casper_types::{Key, URef};\n\nconst KEY: &str = \"special_value\";\nconst ARG_MESSAGE: &str = \"message\";\n\nfn store(value: String) {\n    // Store `value` under a new unforgeable reference.\n    let value_ref: URef = storage::new_uref(value);\n\n    // Wrap the unforgeable reference in a value of type `Key`.\n    let value_key: Key = value_ref.into();\n\n    // Store this key under the name \"special_value\" in context-local storage.\n    runtime::put_key(KEY, value_key);\n}\n\n// All session code must have a `call` entrypoint.\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Get the optional first argument supplied to the argument.\n    let value: String = runtime::get_named_arg(ARG_MESSAGE);\n    store(value);\n}\n"
  },
  {
    "path": "smart_contracts/contracts/tutorial/increment-counter/Cargo.toml",
    "content": "[package]\nname = \"increment-counter\"\nversion = \"1.0.0\"\nauthors = [\"Maciej Zielinski\", \"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[[bin]]\nname = \"increment_counter\"\npath = \"src/main.rs\"\nbench = false\ndoctest = false\ntest = false\n\n[dependencies]\ncasper-contract = { path = \"../../../contract\" }\ncasper-types = { path = \"../../../../types\" }\n"
  },
  {
    "path": "smart_contracts/contracts/tutorial/increment-counter/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\n#[cfg(not(target_arch = \"wasm32\"))]\ncompile_error!(\"target arch should be wasm32: compile with '--target wasm32-unknown-unknown'\");\n\nextern crate alloc;\n\nuse casper_types::{ApiError, Key, RuntimeArgs};\n\nuse casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert};\nuse casper_types::contracts::ContractHash;\n\nconst COUNTER_KEY: &str = \"counter\";\nconst COUNTER_INC: &str = \"counter_inc\";\nconst COUNTER_GET: &str = \"counter_get\";\n\n#[no_mangle]\npub extern \"C\" fn call() {\n    // Read the Counter smart contract's ContractHash.\n    let contract_hash = {\n        let counter_uref = runtime::get_key(COUNTER_KEY).unwrap_or_revert_with(ApiError::GetKey);\n        if let Key::Hash(hash) = counter_uref {\n            ContractHash::new(hash)\n        } else {\n            runtime::revert(ApiError::User(66));\n        }\n    };\n\n    // Call Counter to get the current value.\n    let current_counter_value: u32 =\n        runtime::call_contract(contract_hash, COUNTER_GET, RuntimeArgs::new());\n\n    // Call Counter to increment the value.\n    let _: () = runtime::call_contract(contract_hash, COUNTER_INC, RuntimeArgs::new());\n\n    // Call Counter to get the new value.\n    let new_counter_value: u32 =\n        runtime::call_contract(contract_hash, COUNTER_GET, RuntimeArgs::new());\n\n    // Expect counter to increment by one.\n    if new_counter_value - current_counter_value != 1u32 {\n        runtime::revert(ApiError::User(67));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18/Cargo.toml",
    "content": "[package]\nname = \"vm2-cep18\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-sdk = { path = \"../../../sdk\" }\n\n[target.'cfg(not(target_arch = \"wasm32\"))'.dependencies]\nserde_json = \"1\"\ncasper-contract-sdk = { path = \"../../../sdk\", features = [\"cli\"] }\n\n[dev-dependencies]\ncasper-contract-sdk-codegen = { path = \"../../../sdk_codegen\" }\n\n[build-dependencies]\ncasper-contract-sdk-codegen = { path = \"../../../sdk_codegen\" }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18/build.rs",
    "content": "// use std::{env, fs, path::Path};\n\n// use casper_contract_sdk_codegen::Codegen;\n\n// const SCHEMA: &str = include_str!(\"cep18_schema.json\");\n\nfn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n\n    // casper_contract_sdk::build_flags();\n\n    // let mut codegen = Codegen::from_str(SCHEMA).unwrap();\n    // let source = codegen.gen();\n\n    // let target_dir = env::var_os(\"OUT_DIR\").unwrap();\n    // let target_path = Path::new(&target_dir).join(\"cep18_schema.rs\");\n    // fs::write(&target_path, source).unwrap();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18/src/lib.rs",
    "content": "use casper_contract_sdk::{\n    contrib::access_control::{AccessControl, AccessControlExt, AccessControlState},\n    prelude::*,\n    types::U256,\n};\n\nuse casper_contract_sdk::contrib::cep18::{\n    Burnable, BurnableExt, CEP18Ext, CEP18State, Mintable, MintableExt, ADMIN_ROLE, CEP18,\n};\n\n#[casper(contract_state)]\npub struct TokenContract {\n    state: CEP18State,\n    access_control: AccessControlState,\n}\n\nimpl Default for TokenContract {\n    fn default() -> Self {\n        panic!(\"nope\");\n    }\n    //\n}\n\n#[casper]\nimpl TokenContract {\n    #[casper(constructor)]\n    pub fn new(token_name: String) -> Self {\n        // TODO: If argument has same name as another entrypoint there's a compile error for some\n        // reason, so can't use \"name\"\n        let mut state = CEP18State::new(&token_name, \"Default symbol\", 8, U256::from(0u64));\n        state.enable_mint_burn = true;\n\n        let mut token = Self {\n            state,\n            access_control: AccessControlState::default(),\n        };\n\n        let caller = casper::get_caller();\n        token.grant_role(caller, ADMIN_ROLE);\n\n        // Give caller some tokens\n        token.mint(caller, U256::from(10_000u64)).expect(\"Mint\");\n\n        token\n    }\n\n    pub fn my_balance(&self) -> U256 {\n        CEP18::state(self)\n            .balances\n            .get(&casper::get_caller())\n            .unwrap_or_default()\n    }\n}\n\n#[casper(path = casper_contract_sdk::contrib::cep18)]\nimpl CEP18 for TokenContract {\n    fn state(&self) -> &CEP18State {\n        &self.state\n    }\n\n    fn state_mut(&mut self) -> &mut CEP18State {\n        &mut self.state\n    }\n}\n\n#[casper(path = casper_contract_sdk::contrib::access_control)]\nimpl AccessControl for TokenContract {\n    fn state(&self) -> &AccessControlState {\n        &self.access_control\n    }\n\n    fn state_mut(&mut self) -> &mut AccessControlState {\n        &mut self.access_control\n    }\n}\n\n#[casper(path = casper_contract_sdk::contrib::cep18)]\nimpl Mintable for TokenContract {}\n\n#[casper(path = casper_contract_sdk::contrib::cep18)]\nimpl Burnable for TokenContract {}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use casper_contract_sdk::{\n        casper::{\n            self,\n            native::{\n                current_environment, dispatch_with, with_current_environment, Environment,\n                DEFAULT_ADDRESS,\n            },\n            Entity,\n        },\n        casper_executor_wasm_common::keyspace::Keyspace,\n        contrib::cep18::Cep18Error,\n        ContractHandle, ToCallData,\n    };\n\n    const ALICE: Entity = Entity::Account([1; 32]);\n    const BOB: Entity = Entity::Account([2; 32]);\n\n    #[test]\n    fn it_works() {\n        let stub = Environment::new(Default::default(), DEFAULT_ADDRESS);\n\n        let result = casper::native::dispatch_with(stub, || {\n            let mut contract = TokenContract::new(\"Foo Token\".to_string());\n\n            assert_eq!(contract.require_any_role(&[ADMIN_ROLE]), Ok(()));\n\n            assert_eq!(contract.name(), \"Foo Token\");\n            assert_eq!(contract.balance_of(ALICE), U256::from(0u64));\n            assert_eq!(contract.balance_of(BOB), U256::from(0u64));\n\n            contract.approve(BOB, U256::from(111u64)).unwrap();\n            assert_eq!(contract.balance_of(ALICE), U256::from(0u64));\n            contract.mint(ALICE, U256::from(1000u64)).unwrap();\n            assert_eq!(contract.balance_of(ALICE), U256::from(1000u64));\n\n            // Caller has 10k tokens mintes (coming from constructor)\n            assert_eq!(\n                contract.balance_of(casper::get_caller()),\n                U256::from(10_000u64)\n            );\n            assert_eq!(\n                contract.transfer(ALICE, U256::from(10_001u64)),\n                Err(Cep18Error::InsufficientBalance)\n            );\n            assert_eq!(contract.transfer(ALICE, U256::from(10_000u64)), Ok(()));\n        });\n        assert!(matches!(result, Ok(())));\n    }\n\n    #[test]\n    fn e2e() {\n        // let db = casper::native::Container::default();\n        // let env = Environment::new(db.clone(), DEFAULT_ADDRESS);\n\n        let result = casper::native::dispatch(move || {\n            assert_eq!(casper::get_caller(), DEFAULT_ADDRESS);\n\n            let constructor = TokenContractRef::new(\"Foo Token\".to_string());\n\n            // casper_call(address, value, selector!(\"nme\"), ());\n            let ctor_input_data = constructor.input_data();\n            let create_result = casper::create(\n                None,\n                0,\n                Some(constructor.entry_point()),\n                ctor_input_data.as_ref().map(|data| data.as_slice()),\n                None,\n            )\n            .expect(\"Should create\");\n\n            let new_env = with_current_environment(|env| env);\n            let new_env = new_env.smart_contract(Entity::Contract(create_result.contract_address));\n            dispatch_with(new_env, || {\n                // This is the caller of the contract\n                casper::read_into_vec(Keyspace::State)\n                    .expect(\"ok\")\n                    .expect(\"ok\");\n            })\n            .unwrap();\n\n            // assert_eq!(casper::get_caller(), DEFAULT_ADDRESS);\n\n            let cep18_handle =\n                ContractHandle::<TokenContractRef>::from_address(create_result.contract_address);\n\n            {\n                // As a builder that allows you to specify value to pass etc.\n                cep18_handle\n                    .build_call()\n                    .with_transferred_value(0)\n                    .call(|cep18| cep18.name())\n                    .expect(\"Should call\");\n            }\n\n            let name1: String = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.name())\n                .expect(\"Should call\");\n\n            let name2: String = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.name())\n                .expect(\"Should call\");\n\n            assert_eq!(name1, name2);\n            assert_eq!(name2, \"Foo Token\");\n            let symbol: String = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.symbol())\n                .expect(\"Should call\");\n            assert_eq!(symbol, \"Default symbol\");\n\n            let alice_balance: U256 = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.balance_of(ALICE))\n                .expect(\"Should call\");\n            assert_eq!(alice_balance, U256::from(0u64));\n\n            let bob_balance: U256 = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.balance_of(BOB))\n                .expect(\"Should call\");\n            assert_eq!(bob_balance, U256::from(0u64));\n\n            let _mint_succeed: () = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.mint(ALICE, U256::from(1000u64)))\n                .expect(\"Should succeed\")\n                .expect(\"Mint succeeded\");\n\n            let alice_balance_after: U256 = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.balance_of(ALICE))\n                .expect(\"Should call\");\n            assert_eq!(alice_balance_after, U256::from(1000u64));\n\n            // Default account -> ALICE\n\n            let default_addr_balance: U256 = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.balance_of(DEFAULT_ADDRESS))\n                .expect(\"Should call\");\n            assert_eq!(default_addr_balance, U256::from(10_000u64));\n\n            assert_eq!(\n                cep18_handle\n                    .build_call()\n                    .call(|cep18| cep18.transfer(ALICE, U256::from(10_001u64)))\n                    .expect(\"Should call\"),\n                Err(Cep18Error::InsufficientBalance)\n            );\n            assert_eq!(casper::get_caller(), DEFAULT_ADDRESS);\n\n            let alice_env = current_environment().session(ALICE);\n\n            casper::native::dispatch_with(alice_env, || {\n                assert_eq!(casper::get_caller(), ALICE);\n                assert_eq!(\n                    cep18_handle\n                        .call(|cep18| cep18.my_balance())\n                        .expect(\"Should call\"),\n                    U256::from(1000u64)\n                );\n                assert_eq!(\n                    cep18_handle\n                        .build_call()\n                        .call(|cep18| cep18.transfer(BOB, U256::from(1u64)))\n                        .expect(\"Should call\"),\n                    Ok(())\n                );\n            })\n            .expect(\"Success\");\n\n            let bob_balance = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.balance_of(BOB))\n                .expect(\"Should call\");\n            assert_eq!(bob_balance, U256::from(1u64));\n\n            let alice_balance = cep18_handle\n                .build_call()\n                .call(|cep18| cep18.balance_of(ALICE))\n                .expect(\"Should call\");\n            assert_eq!(alice_balance, U256::from(999u64));\n        });\n\n        assert!(matches!(result, Ok(())));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18/tests/integration._rs",
    "content": "use casper_contract_sdk::{\n    host::native::{dispatch_with, with_stub, Stub},\n    Contract,\n};\nuse casper_contract_sdk_codegen::support::IntoResult;\nuse vm2_cep18::contract::CEP18;\n\nmod bindings {\n    include!(concat!(env!(\"OUT_DIR\"), \"/cep18_schema.rs\"));\n}\n\n#[test]\nfn foo() {\n    let stub = Stub::default();\n\n    let ret = dispatch_with(stub, || {\n        let client = bindings::CEP18Client::new::<CEP18>(\"Token Name\".to_string())\n            .expect(\"Constructor should work\");\n\n        // Calling the `transfer` entry point with the following arguments:\n        let transfer_call_result = client\n            .transfer([1; 32], 42)\n            .expect(\"Calling transfer entry point should work\");\n\n        assert!(!transfer_call_result.did_revert());\n\n        // Actual returned data, deserialized from the returned bytes.\n        let transfer_return_value = transfer_call_result.into_return_value();\n\n        assert_eq!(\n            transfer_return_value.clone(),\n            bindings::Result_____vm2_cep18__error__Cep18Error_::Err(\n                bindings::vm2_cep18__error__Cep18Error::InsufficientBalance(())\n            )\n        );\n\n        // Codegen can convert into standard Result type.\n        assert_eq!(\n            transfer_return_value.into_result(),\n            Err(bindings::vm2_cep18__error__Cep18Error::InsufficientBalance(\n                ()\n            ))\n        );\n    });\n\n    assert_eq!(ret, Ok(()));\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18-caller/Cargo.toml",
    "content": "[package]\nname = \"vm2-cep18-caller\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-sdk = { path = \"../../../sdk\" }\nvm2-cep18 = { path = \"../vm2-cep18\" }\nborsh = { version = \"1.5\", features = [\"derive\"] }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18-caller/build.rs",
    "content": "// use std::{env, fs, path::Path};\n\n// use casper_contract_sdk_codegen::Codegen;\n\n// const SCHEMA: &str = include_str!(\"cep18_schema.json\");\n\nfn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n\n    // let mut codegen = Codegen::from_str(SCHEMA).unwrap();\n    // let source = codegen.gen();\n\n    // let target_dir = env::var_os(\"OUT_DIR\").unwrap();\n    // let target_path = Path::new(&target_dir).join(\"cep18_schema.rs\");\n    // fs::write(&target_path, source).unwrap();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-cep18-caller/src/lib.rs",
    "content": "#![cfg_attr(target_family = \"wasm\", no_main)]\n\npub mod exports {\n    use casper_contract_sdk::{\n        contrib::cep18::{CEP18Ext, MintableExt},\n        prelude::*,\n        types::{Address, U256},\n        ContractHandle,\n    };\n    use vm2_cep18::TokenContractRef;\n\n    #[casper(export)]\n    pub fn call(address: Address) -> String {\n        use casper_contract_sdk::casper::Entity;\n\n        log!(\"Hello {address:?}\");\n        let handle = ContractHandle::<TokenContractRef>::from_address(address);\n\n        // Mint tokens, then check the balance of the account that called this contract\n        handle\n            .call(|contract| contract.mint(Entity::Account([99; 32]), U256::from(100u64)))\n            .expect(\"Should call\")\n            .expect(\"Should mint\");\n\n        let balance_result = handle\n            .call(|contract| contract.balance_of(Entity::Account([99; 32])))\n            .expect(\"Should call\");\n\n        assert_eq!(balance_result, U256::from(100u64));\n\n        let name_result = handle\n            .call(|contract| contract.name())\n            .expect(\"Should call\");\n        log!(\"Name: {name_result:?}\");\n        let transfer_result = handle\n            .call(|contract| contract.transfer(Entity::Account([100; 32]), U256::from(100u64)))\n            .expect(\"Should call\");\n\n        log!(\"Transfer: {transfer_result:?}\");\n\n        log!(\"Success\");\n\n        name_result\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-flipper/Cargo.toml",
    "content": "[package]\nname = \"vm2-flipper\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-sdk = { path = \"../../../sdk\" }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-flipper/build.rs",
    "content": "fn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-flipper/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n#![cfg_attr(target_arch = \"wasm32\", no_std)]\n\nuse casper_contract_sdk::prelude::*;\n\n/// This contract implements a simple flipper.\n#[casper(contract_state)]\npub struct Flipper {\n    /// The current state of the flipper.\n    value: bool,\n}\n\nimpl Default for Flipper {\n    fn default() -> Self {\n        panic!(\"Unable to instantiate contract without a constructor\");\n    }\n}\n\n#[casper]\nimpl Flipper {\n    #[casper(constructor)]\n    pub fn new(init_value: bool) -> Self {\n        Self { value: init_value }\n    }\n\n    #[casper(constructor)]\n    pub fn default() -> Self {\n        Self::new(Default::default())\n    }\n\n    pub fn flip(&mut self) {\n        self.value = !self.value;\n    }\n\n    pub fn get(&self) -> bool {\n        self.value\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_flipper() {\n        let mut flipper = Flipper::new(false);\n        assert_eq!(flipper.get(), false);\n        flipper.flip();\n        assert_eq!(flipper.get(), true);\n        flipper.flip();\n        assert_eq!(flipper.get(), false);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/Cargo.toml",
    "content": "[package]\nname = \"vm2-harness\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-executor-wasm-common = { path = \"../../../../executor/wasm_common\" }\ncasper-contract-macros = { path = \"../../../macros\" }\ncasper-contract-sdk = { path = \"../../../sdk\" }\nimpls = \"1\"\nthiserror = \"2\"\n\n[target.'cfg(not(target_arch = \"wasm32\"))'.dependencies]\nserde_json = \"1\"\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/build.rs",
    "content": "fn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/contracts/harness.rs",
    "content": "use std::{\n    collections::{BTreeSet, HashMap, LinkedList},\n    ptr::NonNull,\n};\n\nuse casper_contract_macros::casper;\nuse casper_contract_sdk::{\n    casper::{self, Entity},\n    casper_executor_wasm_common::{\n        entry_point::{\n            ENTRY_POINT_PAYMENT_CALLER, ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY,\n            ENTRY_POINT_PAYMENT_SELF_ONWARD,\n        },\n        error::CommonResult,\n        keyspace::Keyspace,\n    },\n    collections::Map,\n    log, revert,\n    types::CallError,\n    ContractHandle,\n};\n\nuse crate::traits::{DepositExt, DepositRef};\n\npub(crate) const INITIAL_GREETING: &str = \"This is initial data set from a constructor\";\npub(crate) const BALANCES_PREFIX: &str = \"b\";\n\n#[derive(Debug)]\n#[casper(contract_state)]\npub struct Harness {\n    counter: u64,\n    greeting: String,\n    address_inside_constructor: Option<Entity>,\n    balances: Map<Entity, u64>,\n    block_time: u64,\n}\n\n// #[casper(path = crate::traits)]\n// impl Fallback for Harness {\n//     fn fallback(&mut self) {\n//         // Called when no entrypoint is matched\n//         //\n//         // Is invoked when\n//         // a) user performs plan CSPR transfer (not a contract call)\n//         //   a.1) if there's no fallback entrypoint, the transfer will fail\n//         //   a.2) if there's fallback entrypoint, it will be called\n//         // b) user calls a contract with no matching entrypoint\n//         //   b.1) if there's no fallback entrypoint, the call will fail\n//         //   b.2) if there's fallback entrypoint, it will be called and user can\n\n//         log!(\n//             \"Harness received fallback entrypoint value={}\",\n//             host::get_value()\n//         );\n//     }\n// }\n\n#[derive(Debug, thiserror::Error, PartialEq)]\n#[casper]\npub enum CustomError {\n    #[error(\"foo\")]\n    Foo,\n    #[error(\"bar\")]\n    Bar = 42,\n    #[error(\"error with body {0}\")]\n    WithBody(String),\n    #[error(\"error with named variant name={name}; age={age}\")]\n    Named { name: String, age: u64 },\n    #[error(\"transfer error {0}\")]\n    Transfer(String),\n    #[error(\"deposit error {0}\")]\n    Deposit(CallError),\n}\n\nimpl Default for Harness {\n    fn default() -> Self {\n        Self {\n            counter: 0,\n            greeting: \"Default value\".to_string(),\n            address_inside_constructor: None,\n            balances: Map::new(BALANCES_PREFIX),\n            block_time: 0,\n        }\n    }\n}\n\npub type Result2 = Result<(), CustomError>;\n\n#[casper]\nimpl Harness {\n    // #[casper(event)]\n    // type TestMessage;\n\n    #[casper(constructor)]\n    pub fn constructor_with_args(who: String) -> Self {\n        // Event::register();\n\n        log!(\"👋 Hello from constructor with args: {who}\");\n\n        assert_eq!(\n            casper::write(Keyspace::PaymentInfo(\"this does not exists\"), &[0]),\n            Err(CommonResult::NotFound)\n        );\n\n        {\n            for payment_info in [\n                ENTRY_POINT_PAYMENT_CALLER,\n                ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY,\n                ENTRY_POINT_PAYMENT_SELF_ONWARD,\n            ] {\n                casper::write(Keyspace::PaymentInfo(\"counter\"), &[payment_info]).unwrap();\n\n                let mut buffer = [255; 1];\n                assert_eq!(\n                    casper::read(Keyspace::PaymentInfo(\"counter\"), |size| {\n                        assert_eq!(size, 1, \"Size should be 1\");\n                        NonNull::new(&mut buffer[0])\n                    }),\n                    Ok(Some(()))\n                );\n                assert_eq!(&buffer, &[payment_info]);\n            }\n\n            assert_eq!(\n                casper::write(Keyspace::PaymentInfo(\"counter\"), &[255, 255]),\n                Err(CommonResult::InvalidInput)\n            );\n        }\n\n        Self {\n            counter: 0,\n            greeting: format!(\"Hello, {who}!\"),\n            address_inside_constructor: Some(casper::get_caller()),\n            balances: Map::new(BALANCES_PREFIX),\n            block_time: casper::get_block_time(),\n        }\n    }\n\n    #[casper(constructor)]\n    pub fn failing_constructor(who: String) -> Self {\n        log!(\"👋 Hello from failing constructor with args: {who}\");\n        revert!();\n    }\n\n    #[casper(constructor)]\n    pub fn trapping_constructor() -> Self {\n        log!(\"👋 Hello from trapping constructor\");\n        // TODO: Storage doesn't fork as of yet, need to integrate casper-storage crate and leverage\n        // the tracking copy.\n        panic!(\"This will revert the execution of this constructor and won't create a new package\");\n    }\n\n    #[casper(constructor)]\n    pub fn initialize() -> Self {\n        log!(\"👋 Hello from constructor\");\n        Self {\n            counter: 0,\n            greeting: INITIAL_GREETING.to_string(),\n            address_inside_constructor: Some(casper::get_caller()),\n            balances: Map::new(BALANCES_PREFIX),\n            block_time: casper::get_block_time(),\n        }\n    }\n\n    #[casper(constructor, payable)]\n    pub fn payable_constructor() -> Self {\n        log!(\n            \"👋 Hello from payable constructor value={}\",\n            casper::transferred_value()\n        );\n        Self {\n            counter: 0,\n            greeting: INITIAL_GREETING.to_string(),\n            address_inside_constructor: Some(casper::get_caller()),\n            balances: Map::new(BALANCES_PREFIX),\n            block_time: casper::get_block_time(),\n        }\n    }\n\n    #[casper(constructor, payable)]\n    pub fn payable_failing_constructor() -> Self {\n        log!(\n            \"👋 Hello from payable failign constructor value={}\",\n            casper::transferred_value()\n        );\n        revert!();\n    }\n\n    #[casper(constructor, payable)]\n    pub fn payable_trapping_constructor() -> Self {\n        log!(\n            \"👋 Hello from payable trapping constructor value={}\",\n            casper::transferred_value()\n        );\n        panic!(\"This will revert the execution of this constructor and won't create a new package\")\n    }\n\n    pub fn get_greeting(&self) -> &str {\n        &self.greeting\n    }\n\n    pub fn increment_counter(&mut self) {\n        self.counter += 1;\n    }\n\n    pub fn counter(&self) -> u64 {\n        self.counter\n    }\n\n    pub fn set_greeting(&mut self, greeting: String) {\n        self.counter += 1;\n        log!(\"Saving greeting {}\", greeting);\n        self.greeting = greeting;\n    }\n\n    pub fn emit_unreachable_trap(&mut self) -> ! {\n        self.counter += 1;\n        panic!(\"unreachable\");\n    }\n\n    #[casper(revert_on_error)]\n    pub fn emit_revert_with_data(&mut self) -> Result<(), CustomError> {\n        // revert(code), ret(bytes)\n\n        // casper_return(flags, bytes) flags == 0, flags & FLAG_REVERT\n        log!(\"emit_revert_with_data state={:?}\", self);\n        log!(\n            \"Reverting with data before {counter}\",\n            counter = self.counter\n        );\n        self.counter += 1;\n        log!(\n            \"Reverting with data after {counter}\",\n            counter = self.counter\n        );\n        // Here we can't use revert!() macro, as it explicitly calls `return` and does not involve\n        // writing the state again.\n        Err(CustomError::Bar)\n    }\n\n    pub fn emit_revert_without_data(&mut self) -> ! {\n        self.counter += 1;\n        revert!()\n    }\n\n    pub fn get_address_inside_constructor(&self) -> Entity {\n        self.address_inside_constructor\n            .expect(\"Constructor was expected to be caller\")\n    }\n\n    #[casper(revert_on_error)]\n    pub fn should_revert_on_error(&self, flag: bool) -> Result2 {\n        if flag {\n            Err(CustomError::WithBody(\"Reverted\".into()))\n        } else {\n            Ok(())\n        }\n    }\n\n    #[allow(dead_code)]\n    fn private_function_that_should_not_be_exported(&self) {\n        log!(\"This function should not be callable from outside\");\n    }\n\n    pub(crate) fn restricted_function_that_should_be_part_of_manifest(&self) {\n        log!(\"This function should be callable from outside\");\n    }\n\n    pub fn entry_point_without_state() {\n        log!(\"This function does not require state\");\n    }\n\n    pub fn entry_point_without_state_with_args_and_output(mut arg: String) -> String {\n        log!(\"This function does not require state\");\n        arg.push_str(\"extra\");\n        arg\n    }\n\n    pub fn into_modified_greeting(mut self) -> String {\n        self.greeting.push_str(\"!\");\n        self.greeting\n    }\n\n    pub fn into_greeting(self) -> String {\n        self.greeting\n    }\n\n    #[casper(payable)]\n    pub fn payable_entrypoint(&mut self) -> Result<(), CustomError> {\n        log!(\n            \"This is a payable entrypoint value={}\",\n            casper::transferred_value()\n        );\n        Ok(())\n    }\n\n    // enum Error {\n    //     TooLow { expected: u64}\n    // }\n\n    // // #[casper(payable)]\n    // pub fn mint_wrapped_token(&mut self) -> Result<(), Error> {\n\n    //     if host::get_transferred_value() < EXPECTED_AMOUNT {\n    //         // abort!(\"This function is not payable\");\n    //         return Err(Error::TooLow { expected: EXPECTED_AMOUNT });\n    //         // abort!(\"This function is not payable\");\n    //         // panic_str\n    //         // abort!(\"\")\n    //     }\n\n    //     let transferred_value = host::get_transferred_value();\n    //     self.balances[sender] += transferred_value;\n    // }\n\n    #[casper(payable, revert_on_error)]\n    pub fn payable_failing_entrypoint(&self) -> Result<(), CustomError> {\n        log!(\n            \"This is a payable entrypoint with value={}\",\n            casper::transferred_value()\n        );\n        if casper::transferred_value() == 123 {\n            Err(CustomError::Foo)\n        } else {\n            Ok(())\n        }\n    }\n\n    #[casper(payable, revert_on_error)]\n    pub fn perform_token_deposit(&mut self, balance_before: u64) -> Result<(), CustomError> {\n        let caller = casper::get_caller();\n        let value = casper::transferred_value();\n\n        if dbg!(value) == 0 {\n            return Err(CustomError::WithBody(\n                \"Value should be greater than 0\".into(),\n            ));\n        }\n\n        assert_eq!(\n            balance_before\n                .checked_sub(value)\n                .unwrap_or_else(|| panic!(\"Balance before should be larger or equal to the value (caller={caller:?}, value={value})\")),\n            casper::get_balance_of(&caller),\n            \"Balance mismatch; token transfer should happen before a contract call\"\n        );\n\n        log!(\"Depositing {value} from {caller:?}\");\n        let current_balance = self.balances.get(&caller).unwrap_or(0);\n        self.balances.insert(&caller, &(current_balance + value));\n        Ok(())\n    }\n\n    #[casper(revert_on_error)]\n    pub fn withdraw(&mut self, balance_before: u64, amount: u64) -> Result<(), CustomError> {\n        let caller = casper::get_caller();\n        log!(\"Withdrawing {amount} into {caller:?}\");\n        let current_balance = self.balances.get(&caller).unwrap_or(0);\n        if current_balance < amount {\n            return Err(CustomError::WithBody(\"Insufficient balance\".into()));\n        }\n\n        match caller {\n            Entity::Account(account) => {\n                // if this fails, the transfer will be reverted and the state will be rolled back\n                match casper::transfer(&account, amount) {\n                    Ok(()) => {}\n                    Err(call_error) => {\n                        log!(\"Unable to perform a transfer: {call_error:?}\");\n                        return Err(CustomError::Transfer(call_error.to_string()));\n                    }\n                }\n            }\n            Entity::Contract(contract) => {\n                let result = ContractHandle::<DepositRef>::from_address(contract)\n                    .build_call()\n                    .with_transferred_value(amount)\n                    .try_call(|harness| harness.deposit());\n\n                match result {\n                    Ok(call_result) => {\n                        if let Err(call_error) = call_result.result {\n                            log!(\"CallResult: Unable to perform a transfer: {call_error:?}\");\n                            return Err(CustomError::Deposit(call_error));\n                        }\n                    }\n                    Err(call_error) => {\n                        log!(\"try_call: Unable to perform a transfer: {call_error:?}\");\n                        return Err(CustomError::Deposit(call_error));\n                    }\n                }\n\n                // if let Err(call_error) = result.unwrap().result {\n                //     log!(\"Unable to perform a transfer: {call_error:?}\");\n                //     return Err(CustomError::Deposit(call_error));\n                // }\n            }\n        }\n\n        // TODO: transfer should probably pass CallError (i.e. reverted means mint transfer failed\n        // with error, or something like that) return Err(CustomError::WithBody(\"Transfer\n        // failed\".into())); }\n\n        let balance_after = balance_before + amount;\n\n        assert_eq!(\n            casper::get_balance_of(&caller),\n            balance_after,\n            \"Balance should be updated after withdrawal\"\n        );\n\n        self.balances.insert(&caller, &(current_balance - amount));\n        Ok(())\n    }\n\n    pub fn balance(&self) -> u64 {\n        if casper::transferred_value() != 0 {\n            panic!(\"This function is not payable\");\n        }\n        let caller = casper::get_caller();\n        self.balances.get(&caller).unwrap_or(0)\n    }\n\n    pub fn new_method(\n        &self,\n        _arg1: i32,\n        _arg2: i64,\n        _arg3: u32,\n        _arg4: u64,\n        _arg5: u64,\n        _arg6: Vec<u64>,\n        _arg7: bool,\n        _arg8: i8,\n        _arg9: String,\n        _arg10: Vec<u8>,\n        _arg11: [i32; 5],\n        _arg12: Option<String>,\n        _arg13: Result<(), ()>,\n        _arg14: Box<i32>,\n        _arg15: String,\n        _arg16: i32,\n        _arg17: u64,\n        _arg18: (i32, i32),\n        _arg19: HashMap<String, i32>,\n        _arg20: BTreeSet<i32>,\n        _arg21: LinkedList<String>,\n        _arg22: String,\n        _arg23: u64,\n    ) {\n        log!(\"Nothing\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/contracts/no_fallback.rs",
    "content": "use casper_contract_macros::casper;\nuse casper_contract_sdk::casper;\n\n/// A contract that can't receive tokens through a plain `fallback` method.\n#[derive(Default)]\n#[casper(contract_state)]\npub struct NoFallback {\n    initial_balance: u64,\n    received_balance: u64,\n}\n\n#[casper]\nimpl NoFallback {\n    #[casper(constructor)]\n    pub fn no_fallback_initialize() -> Self {\n        Self {\n            initial_balance: casper::transferred_value(),\n            received_balance: 0,\n        }\n    }\n\n    pub fn hello(&self) -> &str {\n        \"Hello, World!\"\n    }\n\n    #[casper(payable)]\n    pub fn receive_funds(&mut self) {\n        let value = casper::transferred_value();\n        self.received_balance += value;\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/contracts/token_owner.rs",
    "content": "use casper_contract_sdk::prelude::*;\n\nuse casper_contract_macros::casper;\nuse casper_contract_sdk::{\n    casper::{self, Entity},\n    log, revert,\n    types::{Address, CallError},\n    ContractHandle,\n};\n\nuse crate::traits::{Deposit, DepositExt};\n\nuse super::harness::HarnessRef;\n\n#[derive(Debug, PartialEq)]\n#[casper]\npub enum TokenOwnerError {\n    CallError(CallError),\n    DepositError(String),\n    WithdrawError(String),\n}\n\nimpl From<CallError> for TokenOwnerError {\n    fn from(v: CallError) -> Self {\n        Self::CallError(v)\n    }\n}\n\npub type Data = Vec<u8>; // TODO: CasperABI does not support generic parameters and it fails to compile, we need to support\n                         // this in the macro\n\n#[casper]\n#[derive(Debug, Default, PartialEq)]\npub enum FallbackHandler {\n    /// Accept tokens and do nothing.\n    #[default]\n    AcceptTokens,\n    /// Reject tokens with revert.\n    RejectWithRevert,\n    /// Reject tokens with trap.\n    RejectWithTrap,\n    /// Reject tokens with a revert with data.\n    RejectWithData(Data),\n}\n\n#[derive(Default)]\n#[casper(contract_state)]\npub struct TokenOwnerContract {\n    initial_balance: u64,\n    received_tokens: u64,\n    fallback_handler: FallbackHandler,\n}\n\n#[casper]\nimpl TokenOwnerContract {\n    #[casper(constructor, payable)]\n    pub fn token_owner_initialize() -> Self {\n        Self {\n            initial_balance: casper::transferred_value(),\n            received_tokens: 0,\n            fallback_handler: FallbackHandler::AcceptTokens,\n        }\n    }\n\n    pub fn do_deposit(\n        &self,\n        self_address: Address,\n        contract_address: Address,\n        amount: u64,\n    ) -> Result<(), TokenOwnerError> {\n        let self_balance = casper::get_balance_of(&Entity::Contract(self_address));\n        let res = ContractHandle::<HarnessRef>::from_address(contract_address)\n            .build_call()\n            .with_transferred_value(amount)\n            .call(|harness| harness.perform_token_deposit(self_balance))?;\n        match &res {\n            Ok(()) => log!(\"Token owner deposited {amount} to {contract_address:?}\"),\n            Err(e) => log!(\"Token owner failed to deposit {amount} to {contract_address:?}: {e:?}\"),\n        }\n        res.map_err(|error| TokenOwnerError::DepositError(error.to_string()))?;\n        Ok(())\n    }\n\n    pub fn do_withdraw(\n        &self,\n        self_address: Address,\n        contract_address: Address,\n        amount: u64,\n    ) -> Result<(), TokenOwnerError> {\n        let self_entity = Entity::Contract(self_address);\n        let self_balance = casper::get_balance_of(&self_entity);\n\n        let res = ContractHandle::<HarnessRef>::from_address(contract_address)\n            .build_call()\n            .call(|harness| {\n                // Be careful about re-entrancy here: we are calling a contract that can call back\n                // while we're still not done with this entry point. If &mut self is\n                // used, then the proc macro will save the state while the state was already saved\n                // at the end of `receive()` call. To protect against re-entrancy\n                // attacks, please use `&self` or `self`.\n                harness.withdraw(self_balance, amount)\n            });\n\n        let res = res?;\n\n        match &res {\n            Ok(()) => {\n                log!(\"Token owner withdrew {amount} from {contract_address:?}\");\n                assert_eq!(\n                    casper::get_balance_of(&self_entity),\n                    self_balance + amount,\n                    \"Balance should change\"\n                );\n            }\n            Err(e) => {\n                log!(\"Token owner failed to withdraw {amount} from {contract_address:?}: {e:?}\");\n                assert_eq!(\n                    casper::get_balance_of(&self_entity),\n                    self_balance,\n                    \"Balance should NOT change\"\n                );\n            }\n        }\n\n        res.map_err(|error| TokenOwnerError::WithdrawError(error.to_string()))?;\n        Ok(())\n    }\n\n    pub fn total_received_tokens(&self) -> u64 {\n        self.received_tokens\n    }\n\n    pub fn set_fallback_handler(&mut self, handler: FallbackHandler) {\n        self.fallback_handler = handler;\n    }\n}\n\n#[casper(path = crate::traits)]\nimpl Deposit for TokenOwnerContract {\n    fn deposit(&mut self) {\n        log!(\n            \"Received deposit with value = {} current handler is {:?}\",\n            casper::transferred_value(),\n            self.fallback_handler\n        );\n        match std::mem::replace(&mut self.fallback_handler, FallbackHandler::AcceptTokens) {\n            FallbackHandler::AcceptTokens => {\n                let value = casper::transferred_value();\n                log!(\n                    \"TokenOwnerContract received fallback entrypoint with value={}\",\n                    value\n                );\n                self.received_tokens += value;\n            }\n            FallbackHandler::RejectWithRevert => {\n                // This will cause a revert.\n                log!(\"TokenOwnerContract rejected with revert\");\n                revert!();\n            }\n            FallbackHandler::RejectWithTrap => {\n                // This will cause a trap.\n                unreachable!(\"its a trap\");\n            }\n            FallbackHandler::RejectWithData(data) => {\n                // This will cause a revert with data.\n                revert!(data);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/contracts.rs",
    "content": "pub mod harness;\npub mod no_fallback;\npub mod token_owner;\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n\npub mod contracts;\npub mod traits;\n\n#[cfg(test)]\nmod tests {\n\n    use casper_contract_sdk::casper::native::{self, dispatch, EntryPointKind};\n\n    use crate::contracts::harness::{Harness, HarnessRef, INITIAL_GREETING};\n\n    #[test]\n    fn test() {\n        dispatch(|| {\n            native::invoke_export_by_name(\"call\");\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn exports() {\n        let exports = native::ENTRY_POINTS\n            .into_iter()\n            .filter_map(|e| match e.kind {\n                EntryPointKind::SmartContract { .. } => None,\n                EntryPointKind::TraitImpl { .. } => None,\n                EntryPointKind::Function { name } => Some(name),\n            })\n            .collect::<Vec<_>>();\n        assert_eq!(exports, vec![\"call\"]);\n    }\n\n    #[test]\n    fn should_greet() {\n        let mut flipper = Harness::constructor_with_args(\"Hello\".into());\n        assert_eq!(flipper.get_greeting(), \"Hello\"); // TODO: Initializer\n        flipper.set_greeting(\"Hi\".into());\n        assert_eq!(flipper.get_greeting(), \"Hi\");\n    }\n\n    #[test]\n    fn unittest() {\n        dispatch(|| {\n            let mut foo = Harness::initialize();\n            assert_eq!(foo.get_greeting(), INITIAL_GREETING);\n            foo.set_greeting(\"New greeting\".to_string());\n            assert_eq!(foo.get_greeting(), \"New greeting\");\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn foo() {\n        assert_eq!(Harness::default().into_greeting(), \"Default value\");\n    }\n}\n\n#[cfg(not(target_arch = \"wasm32\"))]\nfn main() {\n    panic!(\"Execute \\\"cargo test\\\" to test the contract, \\\"cargo build\\\" to build it\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/main.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n\npub mod contracts;\npub mod traits;\n\n#[macro_use]\nextern crate alloc;\n\nuse casper_contract_macros::casper;\nuse casper_contract_sdk::{\n    casper::{self, emit, emit_raw, Entity},\n    casper_executor_wasm_common::{error::CommonResult, keyspace::Keyspace},\n    log,\n    types::{Address, CallError},\n};\n\nuse contracts::token_owner::TokenOwnerContractRef;\n\n#[casper(message)]\npub struct TestMessage {\n    pub message: String,\n}\n\n#[derive(Default)]\nstruct Seed {\n    value: u64,\n}\n\nimpl Seed {\n    fn next_seed(&mut self) -> [u8; 32] {\n        let current_value = {\n            let mut value: [u8; 32] = Default::default();\n            value[32 - 8..].copy_from_slice(&self.value.to_be_bytes());\n            value\n        };\n        self.value += 1;\n        current_value\n    }\n}\n\nfn next_test(counter: &mut u32, name: &str) -> u32 {\n    let current = *counter;\n    log!(\"Test {}. Running test: {name}\", current);\n    *counter += 1;\n    current\n}\n\nfn perform_test(seed: &mut Seed, flipper_address: Address) {\n    use casper_contract_sdk::ContractBuilder;\n    use contracts::harness::{CustomError, INITIAL_GREETING};\n\n    use crate::contracts::{harness::HarnessRef, token_owner::FallbackHandler};\n\n    log!(\"calling create\");\n\n    let session_caller = casper::get_caller();\n    assert_ne!(session_caller, Entity::Account([0; 32]));\n\n    // Constructor without args\n    let mut counter = 1;\n\n    {\n        next_test(&mut counter, \"Traps and reverts\");\n\n        let contract_handle = ContractBuilder::<HarnessRef>::new()\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::initialize())\n            .expect(\"Should create\");\n        log!(\"success\");\n        log!(\"contract_address: {:?}\", contract_handle.contract_address());\n\n        // Verify that the address captured inside constructor is not the same as caller.\n        let greeting_result = contract_handle\n            .call(|harness| harness.get_greeting())\n            .expect(\"Should call\");\n        log!(\"Getting greeting: {greeting_result}\");\n        assert_eq!(greeting_result, INITIAL_GREETING);\n\n        let () = contract_handle\n            .call(|harness| harness.set_greeting(\"Foo\".into()))\n            .expect(\"Should call\");\n\n        log!(\"New greeting saved\");\n        let greeting_result = contract_handle\n            .call(|harness| harness.get_greeting())\n            .expect(\"Should call\");\n        assert_eq!(greeting_result, \"Foo\");\n\n        log!(\"Emitting unreachable trap\");\n\n        let call_result = contract_handle.call(|harness| harness.emit_unreachable_trap());\n        assert_eq!(call_result, Err(CallError::CalleeTrapped));\n\n        log!(\"Trap recovered\");\n\n        {\n            let counter_value_before = contract_handle\n                .call(|harness| harness.counter())\n                .expect(\"Should call\");\n\n            // increase counter\n            let () = contract_handle\n                .call(|harness| harness.increment_counter())\n                .expect(\"Should call\");\n\n            let counter_value_after = contract_handle\n                .call(|harness| harness.counter())\n                .expect(\"Should call\");\n\n            assert_eq!(counter_value_before + 1, counter_value_after);\n        }\n\n        {\n            let counter_value_before = contract_handle\n                .call(|harness| harness.counter())\n                .expect(\"Should call\");\n\n            let call_result = contract_handle\n                .try_call(|harness| harness.emit_revert_with_data())\n                .expect(\"Call succeed\");\n\n            assert_eq!(call_result.result, Err(CallError::CalleeReverted));\n            assert_eq!(call_result.into_result().unwrap(), Err(CustomError::Bar),);\n\n            let counter_value_after = contract_handle\n                .call(|harness| harness.counter())\n                .expect(\"Should call\");\n\n            assert_eq!(counter_value_before, counter_value_after);\n        }\n\n        log!(\"Revert with data success\");\n\n        let call_result = contract_handle\n            .try_call(|harness| harness.emit_revert_without_data())\n            .expect(\"Call succeed\");\n        assert_eq!(call_result.result, Err(CallError::CalleeReverted));\n        assert_eq!(call_result.data, None);\n\n        log!(\"Revert without data success\");\n\n        let call_result = contract_handle\n            .try_call(|harness| harness.should_revert_on_error(false))\n            .expect(\"Call succeed\");\n        assert!(!call_result.did_revert());\n        assert_eq!(call_result.into_result().unwrap(), Ok(()));\n\n        log!(\"Revert on error success (ok case)\");\n\n        let call_result = contract_handle\n            .try_call(|harness| harness.should_revert_on_error(true))\n            .expect(\"Call succeed\");\n        assert!(call_result.did_revert());\n        assert_eq!(\n            call_result.into_result().unwrap(),\n            Err(CustomError::WithBody(\"Reverted\".to_string()))\n        );\n\n        log!(\"Revert on error success (err case)\");\n        // let should_revert_on_error: TypedCall<(bool,), Result<(), CustomError>> =\n        //     TypedCall::new(contract_address, selector!(\"should_revert_on_error\"));\n        // let result = should_revert_on_error.call((false,));\n        // assert!(!result.did_revert());\n\n        // let result = should_revert_on_error.call((true,));\n        // assert!(result.did_revert());\n        // assert_eq!(\n        //     result.into_return_value(),\n        //     Err(CustomError::WithBody(\"Reverted\".to_string()))\n        // );\n    }\n\n    // Constructor with args\n\n    {\n        next_test(&mut counter, \"Constructor with args\");\n\n        let contract_handle = ContractBuilder::<HarnessRef>::new()\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::constructor_with_args(\"World\".into()))\n            .expect(\"Should create\");\n        log!(\"success 2\");\n        log!(\"contract_address: {:?}\", contract_handle.contract_address());\n\n        // Calling constructor twice should fail\n        let error = match contract_handle\n            .try_call(|_| HarnessRef::constructor_with_args(\"World\".into()))\n        {\n            Ok(_) => panic!(\"Constructor should fail to initialize already initialized contract\"),\n            Err(error) => error,\n        };\n        assert_eq!(error, CallError::CalleeTrapped);\n\n        let result = contract_handle\n            .call(|harness| harness.get_greeting())\n            .expect(\"Should call\");\n        assert_eq!(result, \"Hello, World!\".to_string(),);\n    }\n\n    {\n        next_test(&mut counter, \"Failing constructor\");\n\n        let error = match ContractBuilder::<HarnessRef>::new()\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::failing_constructor(\"World\".to_string()))\n        {\n            Ok(_) => panic!(\"Constructor that reverts should fail to create\"),\n            Err(error) => error,\n        };\n        assert_eq!(error, CallError::CalleeReverted);\n\n        let error = match ContractBuilder::<HarnessRef>::new()\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::trapping_constructor())\n        {\n            Ok(_) => panic!(\"Constructor that traps should fail to create\"),\n            Err(error) => error,\n        };\n        assert_eq!(error, CallError::CalleeTrapped);\n    }\n\n    //\n    // Check payable entrypoints\n    //\n\n    {\n        next_test(&mut counter, \"Checking payable entrypoints\");\n\n        let contract_handle = ContractBuilder::<HarnessRef>::new()\n            .with_transferred_value(1)\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::payable_constructor())\n            .expect(\"Should create\");\n\n        assert_eq!(contract_handle.balance(), 1);\n\n        log!(\"success 2\");\n        log!(\"contract_address: {:?}\", contract_handle.contract_address());\n\n        // Transferring 500 motes before payable entrypoint is executed\n\n        let result_1 = contract_handle\n            .build_call()\n            .with_transferred_value(500)\n            .call(|harness| harness.payable_entrypoint())\n            .expect(\"Should call\");\n        assert_eq!(result_1, Ok(()));\n\n        // Transferring 499 motes before payable entrypoint is executed\n\n        let result_2 = contract_handle\n            .build_call()\n            .with_transferred_value(499)\n            .call(|harness| harness.payable_entrypoint())\n            .expect(\"Should call\");\n        assert_eq!(result_2, Ok(()));\n\n        // Check balance after payable constructor and two successful calls\n        assert_eq!(contract_handle.balance(), 1 + 500 + 499);\n\n        let result_3 = contract_handle\n            .build_call()\n            .with_transferred_value(123)\n            .call(|harness| harness.payable_failing_entrypoint())\n            .expect(\"Should call\");\n        assert_eq!(result_3, Err(CustomError::Foo));\n        // Check balance after failed call, should be the same as before\n        assert_eq!(contract_handle.balance(), 1 + 500 + 499);\n    }\n\n    // Deposit and withdraw\n    // 1. wasm (caller = A, callee = B)\n    //   2. create (caller = B, callee = C)\n    //   3. call (caller = B, callee = C)\n    //     4. create (caller = C, callee = D)\n    //     5. call (caller = C, callee = D)\n\n    {\n        let current_test = next_test(&mut counter, \"Deposit and withdraw\");\n\n        let contract_handle = ContractBuilder::<HarnessRef>::new()\n            .with_transferred_value(0)\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::payable_constructor())\n            .expect(\"Should create\");\n\n        let caller = casper::get_caller();\n\n        {\n            next_test(\n                &mut counter,\n                &format!(\"{current_test} Depositing as an account\"),\n            );\n            let account_balance_1 = casper::get_balance_of(&caller);\n            contract_handle\n                .build_call()\n                .with_transferred_value(100)\n                .call(|harness| harness.perform_token_deposit(account_balance_1))\n                .expect(\"Should call\")\n                .expect(\"Should succeed\");\n            let account_balance_2 = casper::get_balance_of(&caller);\n            assert_eq!(account_balance_2, account_balance_1 - 100);\n\n            contract_handle\n                .build_call()\n                .with_transferred_value(25)\n                .call(|harness| harness.perform_token_deposit(account_balance_2))\n                .expect(\"Should call\")\n                .expect(\"Should succeed\");\n\n            let account_balance_after = casper::get_balance_of(&caller);\n            assert_eq!(account_balance_after, account_balance_1 - 125);\n        }\n\n        let current_contract_balance = contract_handle\n            .build_call()\n            .call(|harness| harness.balance())\n            .expect(\"Should call\");\n        assert_eq!(current_contract_balance, 100 + 25);\n\n        {\n            next_test(\n                &mut counter,\n                &format!(\"{current_test} Withdrawing as an account\"),\n            );\n            let account_balance_before = casper::get_balance_of(&caller);\n            contract_handle\n                .build_call()\n                .call(|harness| harness.withdraw(account_balance_before, 50))\n                .expect(\"Should call\")\n                .expect(\"Should succeed\");\n            let account_balance_after = casper::get_balance_of(&caller);\n            assert_ne!(account_balance_after, account_balance_before);\n            assert_eq!(account_balance_after, account_balance_before + 50);\n\n            let current_deposit_balance = contract_handle\n                .build_call()\n                .call(|harness| harness.balance())\n                .expect(\"Should call\");\n            assert_eq!(current_deposit_balance, 100 + 25 - 50);\n\n            assert_eq!(contract_handle.balance(), 100 + 25 - 50);\n        }\n    }\n\n    //\n    // Perform tests with a contract acting as an owner of funds deposited into other contract\n    //\n\n    {\n        next_test(\n            &mut counter,\n            \"Contract acts as owner of funds deposited into other contract\",\n        );\n\n        let caller = casper::get_caller();\n\n        let harness = ContractBuilder::<HarnessRef>::new()\n            .with_transferred_value(0)\n            .with_seed(&seed.next_seed())\n            .create(|| HarnessRef::constructor_with_args(\"Contract\".into()))\n            .expect(\"Should create\");\n\n        let initial_balance = 1000;\n\n        let token_owner = ContractBuilder::<TokenOwnerContractRef>::new()\n            .with_transferred_value(initial_balance)\n            .with_seed(&seed.next_seed())\n            .create(|| TokenOwnerContractRef::token_owner_initialize())\n            .expect(\"Should create\");\n        assert_eq!(token_owner.balance(), initial_balance);\n\n        // token owner contract performs a deposit into a harness contract through `deposit` payable\n        // entrypoint caller: no change\n        // token owner: -50\n        // harness: +50\n        {\n            next_test(&mut counter, \"Subtest 1\");\n            let caller_balance_before = casper::get_balance_of(&caller);\n            let token_owner_balance_before = token_owner.balance();\n            let harness_balance_before = harness.balance();\n\n            let initial_deposit = 500;\n\n            token_owner\n                .call(|contract| {\n                    contract.do_deposit(\n                        token_owner.contract_address(),\n                        harness.contract_address(),\n                        initial_deposit,\n                    )\n                })\n                .expect(\"Should call\")\n                .expect(\"Should succeed\");\n\n            assert_eq!(\n                casper::get_balance_of(&caller),\n                caller_balance_before,\n                \"Caller funds should not change\"\n            );\n            assert_eq!(\n                token_owner.balance(),\n                token_owner_balance_before - initial_deposit,\n                \"Token owner balance should decrease\"\n            );\n            assert_eq!(harness.balance(), harness_balance_before + initial_deposit);\n        }\n\n        // token owner contract performs a withdrawal from a harness contract through `withdraw`\n        // entrypoint caller: no change\n        // token owner: +50\n        // harness: -50\n        {\n            next_test(&mut counter, \"Subtest 2\");\n            let caller_balance_before = casper::get_balance_of(&caller);\n            let token_owner_balance_before = token_owner.balance();\n            let harness_balance_before = harness.balance();\n\n            token_owner\n                .call(|contract| {\n                    contract.do_withdraw(\n                        token_owner.contract_address(),\n                        harness.contract_address(),\n                        50,\n                    )\n                })\n                .expect(\"Should call\")\n                .expect(\"Should succeed\");\n\n            assert_eq!(\n                casper::get_balance_of(&caller),\n                caller_balance_before,\n                \"Caller funds should not change\"\n            );\n            assert_eq!(\n                token_owner.balance(),\n                token_owner_balance_before + 50,\n                \"Token owner balance should increase\"\n            );\n            assert_eq!(harness.balance(), harness_balance_before - 50);\n            let total_received_tokens = token_owner\n                .call(|contract| contract.total_received_tokens())\n                .expect(\"Should call\");\n            assert_eq!(total_received_tokens, 50);\n        }\n\n        {\n            next_test(\n                &mut counter,\n                \"Token owner will revert inside fallback while plain transfer\",\n            );\n            {\n                let harness_balance_before = harness.balance();\n                token_owner\n                    .call(|contract| {\n                        contract.set_fallback_handler(FallbackHandler::RejectWithRevert)\n                    })\n                    .expect(\"Should call\");\n                let harness_balance_after = harness.balance();\n                assert_eq!(harness_balance_before, harness_balance_after);\n            }\n\n            {\n                let harness_balance_before = harness.balance();\n                let withdraw_result = token_owner\n                    .call(|contract| {\n                        contract.do_withdraw(\n                            token_owner.contract_address(),\n                            harness.contract_address(),\n                            50,\n                        )\n                    })\n                    .expect(\"Should call\");\n                let harness_balance_after = harness.balance();\n                assert_eq!(harness_balance_before, harness_balance_after);\n                assert_eq!(\n                    withdraw_result,\n                    Err(\n                        crate::contracts::token_owner::TokenOwnerError::WithdrawError(\n                            \"deposit error callee reverted\".to_string()\n                        )\n                    )\n                );\n            }\n        }\n\n        {\n            next_test(\n                &mut counter,\n                \"Token owner will trap inside fallback while plain transfer\",\n            );\n            {\n                let harness_balance_before = harness.balance();\n                token_owner\n                    .call(|contract| contract.set_fallback_handler(FallbackHandler::RejectWithTrap))\n                    .expect(\"Should call\");\n                let harness_balance_after = harness.balance();\n                assert_eq!(harness_balance_before, harness_balance_after);\n            }\n\n            {\n                let harness_balance_before = harness.balance();\n                let withdraw_result = token_owner\n                    .call(|contract| {\n                        contract.do_withdraw(\n                            token_owner.contract_address(),\n                            harness.contract_address(),\n                            50,\n                        )\n                    })\n                    .expect(\"Should call\");\n                let harness_balance_after = harness.balance();\n                assert_eq!(harness_balance_before, harness_balance_after);\n                assert_eq!(\n                    withdraw_result,\n                    Err(\n                        crate::contracts::token_owner::TokenOwnerError::WithdrawError(\n                            \"deposit error callee trapped\".to_string()\n                        )\n                    )\n                );\n            }\n        }\n\n        {\n            next_test(\n                &mut counter,\n                \"Token owner will revert with data inside fallback while plain transfer\",\n            );\n            {\n                let harness_balance_before = harness.balance();\n                token_owner\n                    .call(|contract| {\n                        contract.set_fallback_handler(FallbackHandler::RejectWithData(vec![\n                            1, 2, 3, 4, 5,\n                        ]))\n                    })\n                    .expect(\"Should call\");\n                let harness_balance_after = harness.balance();\n                assert_eq!(harness_balance_before, harness_balance_after);\n            }\n\n            {\n                let harness_balance_before = harness.balance();\n                let withdraw_result = token_owner\n                    .call(|contract| {\n                        contract.do_withdraw(\n                            token_owner.contract_address(),\n                            harness.contract_address(),\n                            50,\n                        )\n                    })\n                    .expect(\"Should call\");\n                let harness_balance_after = harness.balance();\n                assert_eq!(harness_balance_before, harness_balance_after);\n                assert_eq!(\n                    withdraw_result,\n                    Err(\n                        crate::contracts::token_owner::TokenOwnerError::WithdrawError(\n                            \"deposit error callee reverted\".to_string()\n                        )\n                    )\n                );\n            }\n        }\n    }\n\n    // {\n    //     let _current_test = next_test(\n    //         &mut counter,\n    //         \"Plain transfer to a contract does not work without fallback\",\n    //     );\n    //     let flipper_address = Entity::Contract(flipper_address);\n\n    //     // assert_eq!(\n    //     //     host::casper_transfer(&flipper_address, 123),\n    //     //     Err(CallError::NotCallable)\n    //     // );\n    // }\n\n    {\n        let _current_test = next_test(\n            &mut counter,\n            \"Calling non-existing entrypoint does not crash\",\n        );\n        let (output, result) =\n            casper::casper_call(&flipper_address, 0, \"non_existing_entrypoint\", &[]);\n        assert_eq!(result, Err(CallError::NotCallable));\n        assert_eq!(output, None);\n    }\n\n    {\n        let _current_test = next_test(&mut counter, \"Message passing\");\n\n        for i in 0..10 {\n            assert_eq!(\n                emit(TestMessage {\n                    message: format!(\"Hello, world: {i}!\"),\n                }),\n                Ok(())\n            );\n        }\n\n        let small_topic_name = \"a\".repeat(32);\n        let large_topic_name = \"a\".repeat(257);\n        let large_payload_data = vec![0; 16384];\n\n        assert_eq!(\n            emit_raw(&large_topic_name, &[]),\n            Err(CommonResult::TopicTooLong)\n        );\n        assert_eq!(\n            emit_raw(&small_topic_name, &large_payload_data),\n            Err(CommonResult::PayloadTooLong)\n        );\n\n        for i in 0..127u64 {\n            assert_eq!(\n                emit_raw(&format!(\"Topic{i}\"), &i.to_be_bytes()),\n                Ok(()),\n                \"Emitting message with small payload failed\"\n            );\n        }\n\n        assert_eq!(\n            emit_raw(&format!(\"Topic128\"), &[128]),\n            Err(CommonResult::TooManyTopics),\n            \"Emitting message with small payload failed\"\n        );\n    }\n\n    {\n        next_test(&mut counter, \"Removing from global state\");\n        let key = [0, 1, 2, 3];\n        let value_1 = [4, 5, 6, 7];\n        let value_2 = [8, 9, 10, 11, 12, 13, 14, 15];\n        let keyspace = Keyspace::Context(&key);\n        // No value exists\n        assert_eq!(casper::read(keyspace, |_size| None), Ok(None));\n\n        // Write a value\n        casper::write(keyspace, &value_1).unwrap();\n        // Value exists\n        assert_eq!(casper::read_into_vec(keyspace), Ok(Some(value_1.to_vec())));\n        // Remove the value\n        casper::remove(keyspace).unwrap();\n        // No value exists\n        assert_eq!(casper::read_into_vec(keyspace), Ok(None));\n        // Removing again (aka removing non-existent key) should raise an error\n        assert_eq!(casper::remove(keyspace), Err(CommonResult::NotFound));\n        // Re-reading already purged value wouldn't be an issue\n        assert_eq!(casper::read_into_vec(keyspace), Ok(None));\n        // Write a new value under same key\n        casper::write(keyspace, &value_2).unwrap();\n        // New value exists\n        assert_eq!(casper::read_into_vec(keyspace), Ok(Some(value_2.to_vec())));\n\n        // Attempting to remove a definetely non-existent key should be an error\n        let keyspace = Keyspace::Context(b\"this key definetely does not exists\");\n        let result = casper::remove(keyspace);\n        assert_eq!(result, Err(CommonResult::NotFound));\n    }\n\n    log!(\"👋 Goodbye\");\n}\n\n#[casper(export)]\npub fn call(flipper_address: Address) {\n    let mut seed = Seed::default();\n    perform_test(&mut seed, flipper_address);\n}\n\n#[casper(export)]\npub fn yet_another_exported_function(arg1: u64, arg2: String) {\n    log!(\"Yet another exported function with args arg1={arg1} arg2={arg2}\");\n}\n\n#[cfg(test)]\nmod tests {\n    use casper::native::{dispatch_with, EntryPointKind, Environment, ENTRY_POINTS};\n    use casper_contract_sdk::casper::native::{self, dispatch};\n    use contracts::harness::{Harness, INITIAL_GREETING};\n\n    use super::*;\n    #[test]\n    fn can_call_exported_function() {\n        super::yet_another_exported_function(1234u64, \"Hello, world!\".to_string());\n\n        let input_data = casper_contract_sdk::serializers::borsh::to_vec(&(\n            4321u64,\n            \"!world, Hello\".to_string(),\n        ))\n        .unwrap();\n\n        dispatch_with(Environment::default().with_input_data(input_data), || {\n            native::invoke_export_by_name(\"yet_another_exported_function\");\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn exports() {\n        assert!(ENTRY_POINTS\n            .iter()\n            .any(|export| export.kind == EntryPointKind::Function { name: \"call\" }));\n    }\n\n    #[test]\n    fn should_greet() {\n        let mut flipper = Harness::constructor_with_args(\"Hello\".into());\n        assert_eq!(flipper.get_greeting(), \"Hello\"); // TODO: Initializer\n        flipper.set_greeting(\"Hi\".into());\n        assert_eq!(flipper.get_greeting(), \"Hi\");\n    }\n\n    #[test]\n    fn unittest() {\n        dispatch(|| {\n            let mut foo = Harness::initialize();\n            assert_eq!(foo.get_greeting(), INITIAL_GREETING);\n            foo.set_greeting(\"New greeting\".to_string());\n            assert_eq!(foo.get_greeting(), \"New greeting\");\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn foo() {\n        assert_eq!(Harness::default().into_greeting(), \"Default value\");\n    }\n}\n\n#[cfg(not(target_arch = \"wasm32\"))]\nfn main() {\n    panic!(\"Execute \\\"cargo test\\\" to test the contract, \\\"cargo build\\\" to build it\");\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-harness/src/traits.rs",
    "content": "use casper_contract_macros::casper;\n\n/// Deposit interface for contracts to receive tokens.\n///\n/// Useful for contracts that need to receive tokens.\n#[casper]\npub trait Deposit {\n    /// Deposit tokens into the contract.\n    #[casper(payable)]\n    fn deposit(&mut self);\n}\n\n#[casper]\npub trait SupportsALotOfArguments {\n    fn very_long_list_of_arguments(\n        &mut self,\n        a0: u64,\n        a1: u64,\n        a2: u64,\n        a3: u64,\n        a4: String,\n        a5: String,\n        a6: u64,\n        a7: u64,\n        a8: u64,\n        a9: u64,\n        a10: u32,\n        a11: u16,\n        a12: String,\n        a13: bool,\n        a14: u32,\n        a15: Vec<String>,\n        a16: Vec<u64>,\n        a17: String,\n        a18: String,\n        a19: Option<String>,\n        a20: u64,\n        a21: u32,\n        a22: (u64, u32, u16, u8),\n        a23: (String, String, String, String, u64),\n    );\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-host/Cargo.toml",
    "content": "[package]\nname = \"vm2-host\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-macros = { path = \"../../../macros\" }\ncasper-contract-sdk = { path = \"../../../sdk\" }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-host/build.rs",
    "content": "fn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-host/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n\nuse casper_contract_sdk::{\n    casper_executor_wasm_common::{flags::ReturnFlags, keyspace::Keyspace},\n    prelude::*,\n};\n\nconst CURRENT_VERSION: &str = \"v1\";\n\n// This contract is used to assert that calling host functions consumes gas.\n// It is by design that it does nothing other than calling appropriate host functions.\n\n// There is no need for these functions to actually do anything meaningful, and it's alright\n// if they short-circuit.\n\n#[casper(contract_state)]\npub struct MinHostWrapper;\n\nimpl Default for MinHostWrapper {\n    fn default() -> Self {\n        panic!(\"Unable to instantiate contract without a constructor\");\n    }\n}\n\n#[casper]\nimpl MinHostWrapper {\n    #[casper(constructor)]\n    pub fn new(with_host_fn_call: String) -> Self {\n        let ret = Self;\n        match with_host_fn_call.as_str() {\n            \"get_caller\" => {\n                ret.get_caller();\n            }\n            \"get_block_time\" => {\n                ret.get_block_time();\n            }\n            \"get_value\" => {\n                ret.get_transferred_value();\n            }\n            \"get_balance_of\" => {\n                ret.get_balance_of();\n            }\n            \"call\" => {\n                ret.call();\n            }\n            \"input\" => {\n                ret.input();\n            }\n            \"create\" => {\n                ret.create();\n            }\n            \"print\" => {\n                ret.print();\n            }\n            \"read\" => {\n                ret.read();\n            }\n            \"ret\" => {\n                ret.ret();\n            }\n            \"transfer\" => {\n                ret.transfer();\n            }\n            \"upgrade\" => {\n                ret.upgrade();\n            }\n            \"write\" => {\n                ret.write();\n            }\n            \"write_n_bytes\" => {\n                ret.write();\n            }\n            _ => panic!(\"Unknown host function\"),\n        }\n        ret\n    }\n\n    #[casper(constructor)]\n    pub fn new_with_write(byte_count: u64) -> Self {\n        let ret = Self;\n        ret.write_n_bytes(byte_count);\n        ret\n    }\n\n    #[casper(constructor)]\n    pub fn default() -> Self {\n        Self\n    }\n\n    pub fn version(&self) -> &str {\n        CURRENT_VERSION\n    }\n\n    pub fn get_caller(&self) -> Entity {\n        casper::get_caller()\n    }\n\n    pub fn get_block_time(&self) -> u64 {\n        casper::get_block_time()\n    }\n\n    pub fn get_transferred_value(&self) -> u64 {\n        casper::transferred_value()\n    }\n\n    pub fn get_balance_of(&self) -> u64 {\n        casper::get_balance_of(&Entity::Account([0u8; 32]))\n    }\n\n    pub fn call(&self) {\n        casper::casper_call(&[0u8; 32], 0, \"\", &[]).1.ok();\n    }\n\n    pub fn input(&self) {\n        casper::copy_input();\n    }\n\n    pub fn create(&self) {\n        casper::create(None, 0, None, None, None).ok();\n    }\n\n    pub fn print(&self) {\n        casper::print(\"\");\n    }\n\n    pub fn read(&self) {\n        casper::read(Keyspace::Context(&[]), |_| None).ok();\n    }\n\n    pub fn ret(&self) {\n        casper::ret(ReturnFlags::empty(), None);\n    }\n\n    pub fn transfer(&self) {\n        casper::transfer(&[0; 32], 0).ok();\n    }\n\n    pub fn upgrade(&self) {\n        casper::upgrade(&[], None, None).ok();\n    }\n\n    pub fn write(&self) {\n        casper::write(Keyspace::Context(&[]), &[]).ok();\n    }\n\n    pub fn write_n_bytes(&self, n: u64) {\n        let buffer = vec![0; n as usize];\n        casper::write(Keyspace::Context(&[0]), &buffer).ok();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/Cargo.toml",
    "content": "[package]\nname = \"vm2-legacy-counter-proxy\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n\n[dependencies]\ncasper-contract-macros = { path = \"../../../macros\" }\ncasper-contract-sdk = { path = \"../../../sdk\" }\n\n[target.'cfg(not(target_arch = \"wasm32\"))'.dependencies]\nserde_json = \"1\"\ncasper-contract-sdk = { path = \"../../../sdk\", features = [\"cli\"] }\n\n[dev-dependencies]\ncasper-contract-sdk-codegen = { path = \"../../../sdk_codegen\" }\n\n[build-dependencies]\ncasper-contract-sdk-codegen = { path = \"../../../sdk_codegen\" }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/build.rs",
    "content": "// use std::{env, fs, path::Path};\n\n// use casper_contract_sdk_codegen::Codegen;\n\n// const SCHEMA: &str = include_str!(\"cep18_schema.json\");\n\nfn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n\n    // casper_contract_sdk::build_flags();\n\n    // let mut codegen = Codegen::from_str(SCHEMA).unwrap();\n    // let source = codegen.gen();\n\n    // let target_dir = env::var_os(\"OUT_DIR\").unwrap();\n    // let target_path = Path::new(&target_dir).join(\"cep18_schema.rs\");\n    // fs::write(&target_path, source).unwrap();\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n#![cfg_attr(target_arch = \"wasm32\", no_std)]\n\nuse casper_contract_macros::{casper, PanicOnDefault};\nuse casper_contract_sdk::{casper, log, types::Address};\n\n/// This contract implements a simple LegacyCounterProxy.\n#[derive(PanicOnDefault)]\n#[casper(contract_state)]\npub struct LegacyCounterProxy {\n    /// Legacy address of the counter contract.\n    legacy_address: Address,\n}\n\nconst EMPTY_RUNTIME_ARGS: [u8; 4] = 0u32.to_le_bytes();\nconst CL_VALUE_UNIT_BYTES: [u8; 5] = [0, 0, 0, 0, 9];\n\n#[casper]\nimpl LegacyCounterProxy {\n    #[casper(constructor)]\n    pub fn new(legacy_address: Address) -> Self {\n        Self { legacy_address }\n    }\n\n    pub fn perform_test(&self) {\n        let (counter_get_result_1, host_error) =\n            casper::casper_call(&self.legacy_address, 0, \"counter_get\", &EMPTY_RUNTIME_ARGS);\n        log!(\"counter_get_result_before: {:?}\", counter_get_result_1);\n        let _ = host_error.expect(\"No error 1\");\n\n        let (inc_result_1, host_error) =\n            casper::casper_call(&self.legacy_address, 0, \"counter_inc\", &EMPTY_RUNTIME_ARGS);\n        log!(\"inc_result {:?}\", inc_result_1);\n        assert_eq!(inc_result_1, Some(CL_VALUE_UNIT_BYTES.to_vec()));\n        let _ = host_error.expect(\"No error 2\");\n\n        let (counter_get_result_2, host_error) =\n            casper::casper_call(&self.legacy_address, 0, \"counter_get\", &EMPTY_RUNTIME_ARGS);\n        let _ = host_error.expect(\"No error 3\");\n        log!(\"counter_get_result_after: {:?}\", counter_get_result_2);\n        assert_ne!(counter_get_result_1, counter_get_result_2);\n\n        let (inc_result_2, host_error) =\n            casper::casper_call(&self.legacy_address, 0, \"counter_inc\", &EMPTY_RUNTIME_ARGS);\n        log!(\"inc_result {:?}\", inc_result_2);\n        assert_eq!(inc_result_2, Some(CL_VALUE_UNIT_BYTES.to_vec()));\n        let _ = host_error.expect(\"No error 4\");\n\n        let (counter_get_result_3, host_error) =\n            casper::casper_call(&self.legacy_address, 0, \"counter_get\", &EMPTY_RUNTIME_ARGS);\n        let _ = host_error.expect(\"No error 3\");\n        log!(\"counter_get_result_after: {:?}\", counter_get_result_3);\n        assert_ne!(counter_get_result_2, counter_get_result_3);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-trait/Cargo.toml",
    "content": "[package]\nname = \"vm2-trait\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-macros = { path = \"../../../macros\" }\ncasper-contract-sdk = { path = \"../../../sdk\" }\nbase16 = \"0.2.1\"\n\n[target.'cfg(not(target_arch = \"wasm32\"))'.dependencies]\nserde_json = \"1\"\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-trait/build.rs",
    "content": "fn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-trait/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n#![cfg_attr(target_arch = \"wasm32\", no_std)]\n\nuse casper_contract_macros::{blake2b256, casper};\nuse casper_contract_sdk::{\n    casper,\n    contrib::{\n        access_control::{AccessControl, AccessControlExt, AccessControlState, Role},\n        ownable::{Ownable, OwnableError, OwnableExt, OwnableState},\n    },\n    log,\n    prelude::*,\n    ContractBuilder, ContractHandle,\n};\n\npub const GREET_RETURN_VALUE: u64 = 123456789;\n\n#[casper]\npub trait HasFallback {\n    #[casper(fallback)]\n    fn this_is_fallback_method(&self) {\n        log!(\"Fallback called with value={}\", casper::transferred_value());\n    }\n}\n\n#[casper]\npub trait Trait1 {\n    fn abstract_greet(&self);\n\n    fn greet(&self, who: String) -> u64 {\n        log!(\"Hello from greet, {who}!\");\n        GREET_RETURN_VALUE\n    }\n\n    fn adder(&self, lhs: u64, rhs: u64) -> u64;\n}\n\n#[casper]\n#[derive(Copy, Clone, Default)]\npub struct CounterState {\n    value: u64,\n}\n\n#[casper]\npub trait Counter {\n    fn increment(&mut self) {\n        log!(\"Incrementing!\");\n        self.counter_state_mut().value += 1;\n    }\n\n    fn decrement(&mut self) {\n        log!(\"Decrementing!\");\n        self.counter_state_mut().value -= 1;\n    }\n\n    fn get_counter_value(&self) -> u64 {\n        self.counter_state().value\n    }\n\n    fn get_counter_state(&self) -> CounterState {\n        self.counter_state().clone()\n    }\n\n    #[casper(private)]\n    fn counter_state(&self) -> &CounterState;\n\n    #[casper(private)]\n    fn counter_state_mut(&mut self) -> &mut CounterState;\n}\n\n#[casper(contract_state)]\n#[derive(Default)]\npub struct HasTraits {\n    counter_state: CounterState,\n    ownable_state: OwnableState,\n    access_control_state: AccessControlState,\n}\n\n#[casper]\nimpl Trait1 for HasTraits {\n    fn abstract_greet(&self) {\n        log!(\"Hello from abstract greet impl!\");\n    }\n\n    fn adder(&self, lhs: u64, rhs: u64) -> u64 {\n        lhs + rhs\n    }\n}\n\n#[casper]\nimpl HasFallback for HasTraits {}\n\n// Implementing traits does not require extra annotation as the trait dispatcher is generated at the\n// trait level.\n#[casper]\nimpl Counter for HasTraits {\n    fn counter_state_mut(&mut self) -> &mut CounterState {\n        &mut self.counter_state\n    }\n    fn counter_state(&self) -> &CounterState {\n        &self.counter_state\n    }\n}\n\n#[casper(path = casper_contract_sdk::contrib::ownable)]\nimpl Ownable for HasTraits {\n    fn state(&self) -> &OwnableState {\n        &self.ownable_state\n    }\n    fn state_mut(&mut self) -> &mut OwnableState {\n        &mut self.ownable_state\n    }\n}\n\n#[casper]\npub enum UserRole {\n    Admin,\n    User,\n}\n\nimpl Into<Role> for UserRole {\n    fn into(self) -> Role {\n        match self {\n            UserRole::Admin => blake2b256!(\"admin\"),\n            UserRole::User => blake2b256!(\"user\"),\n        }\n    }\n}\n\n#[casper(path = casper_contract_sdk::contrib::access_control)]\nimpl AccessControl for HasTraits {\n    fn state(&self) -> &AccessControlState {\n        &self.access_control_state\n    }\n    fn state_mut(&mut self) -> &mut AccessControlState {\n        &mut self.access_control_state\n    }\n}\n\n#[casper]\nimpl HasTraits {\n    #[casper(constructor)]\n    pub fn new(counter_value: u64) -> Self {\n        log!(\"Calling new constructor with value={counter_value}\");\n        Self {\n            counter_state: CounterState {\n                value: counter_value,\n            },\n            ownable_state: OwnableState::default(),\n            access_control_state: AccessControlState::default(),\n        }\n    }\n    pub fn foobar(&self) {\n        // Can extend contract that implements a trait to also call methods provided by a trait.\n        let counter_state = self.counter_state();\n        log!(\"Foobar! Counter value: {}\", counter_state.value);\n    }\n\n    pub fn only_for_owner(&mut self) -> Result<(), OwnableError> {\n        self.only_owner()?;\n        log!(\"Only for owner!\");\n        Ok(())\n    }\n}\n\n#[casper]\nimpl HasTraits {\n    pub fn multiple_impl_blocks_should_work() {\n        log!(\"Multiple impl blocks work!\");\n    }\n}\n\nfn perform_test() {\n    let contract_handle = ContractBuilder::<HasTraitsRef>::new()\n        .default_create()\n        .expect(\"should create contract\");\n\n    let trait1_handle =\n        ContractHandle::<Trait1Ref>::from_address(contract_handle.contract_address());\n    let counter_handle =\n        ContractHandle::<CounterRef>::from_address(contract_handle.contract_address());\n\n    {\n        let greet_result: u64 = contract_handle\n            .build_call()\n            .call(|has_traits| has_traits.greet(\"World\".into()))\n            .expect(\"Call as Trait1Ref\");\n        assert_eq!(greet_result, GREET_RETURN_VALUE);\n    }\n\n    {\n        let () = trait1_handle\n            .call(|trait1ref| trait1ref.abstract_greet())\n            .expect(\"Call as Trait1Ref\");\n    }\n\n    {\n        let result: u64 = contract_handle\n            .build_call()\n            .call(|trait1ref| trait1ref.adder(1111, 2222))\n            .expect(\"Call as Trait1Ref\");\n        assert_eq!(result, 1111 + 2222);\n    }\n\n    //\n    // Counter trait\n    //\n\n    {\n        let counter_value = counter_handle\n            .call(|counter| counter.get_counter_value())\n            .expect(\"Call\");\n        assert_eq!(counter_value, 0);\n\n        // call increase\n        let () = counter_handle\n            .call(|counter| counter.increment())\n            .expect(\"Call\");\n\n        // get value\n        let counter_value = counter_handle\n            .call(|counter| counter.get_counter_value())\n            .expect(\"Call\");\n\n        // check that the value increased\n        assert_eq!(counter_value, 1);\n\n        // call decrease\n        let () = counter_handle\n            .call(|counter| counter.decrement())\n            .expect(\"Call\");\n\n        // get value and compare the difference\n        let counter_value = counter_handle\n            .call(|counter| counter.get_counter_value())\n            .expect(\"Call\");\n        assert_eq!(counter_value, 0);\n    }\n}\n\n#[casper(export)]\npub fn call() {\n    log!(\"Hello\");\n    perform_test();\n    log!(\"🎉 Success\");\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::BTreeSet;\n\n    use crate::{Counter, CounterExt, HasTraits, HasTraitsRef};\n\n    use casper_sdk::{\n        abi::{CasperABI, StructField},\n        abi_generator,\n        casper::native::{dispatch, dispatch_with, Environment},\n        casper_executor_wasm_common::flags::EntryPointFlags,\n        log,\n        schema::{SchemaEntryPoint, SchemaType},\n        ContractRef,\n    };\n\n    #[test]\n    fn unit_test() {\n        dispatch(|| {\n            let mut has_traits = HasTraits::default();\n            has_traits.increment();\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn trait_has_schema() {\n        // We can't attach methods to trait itself, but we can generate an \"${TRAIT}Ext\" struct and\n        // attach extra information to it. let schema = Trait1::schema();\n        let counter_schema = abi_generator::casper_collect_schema();\n\n        assert_eq!(\n            counter_schema.type_,\n            SchemaType::Contract {\n                state: \"vm2_trait::CounterState\".to_string(),\n            }\n        );\n\n        // Order of entry point definitions is not guaranteed.\n        assert_eq!(\n            BTreeSet::from_iter(counter_schema.entry_points.clone()),\n            BTreeSet::from_iter([\n                SchemaEntryPoint {\n                    name: \"get_counter_value\".to_string(),\n                    arguments: vec![],\n                    result: \"U64\".to_string(),\n                    flags: EntryPointFlags::empty()\n                },\n                SchemaEntryPoint {\n                    name: \"get_counter_state\".to_string(),\n                    arguments: vec![],\n                    result: \"vm2_trait::CounterState\".to_string(),\n                    flags: EntryPointFlags::empty()\n                },\n                SchemaEntryPoint {\n                    name: \"decrement\".to_string(),\n                    arguments: vec![],\n                    result: \"()\".to_string(),\n                    flags: EntryPointFlags::empty()\n                },\n                SchemaEntryPoint {\n                    name: \"increment\".to_string(),\n                    arguments: vec![],\n                    result: \"()\".to_string(),\n                    flags: EntryPointFlags::empty()\n                },\n            ])\n        );\n    }\n\n    #[test]\n    fn schema_has_traits() {\n        let schema = abi_generator::casper_collect_schema();\n\n        assert_eq!(\n            schema.type_,\n            SchemaType::Contract {\n                state: \"vm2_trait::HasTraits\".to_string()\n            }\n        );\n\n        assert!(\n            schema.entry_points.iter().any(|e| e.name == \"foobar\"),\n            \"Method inside impl block\"\n        );\n\n        assert!(\n            schema.entry_points.iter().any(|e| e.name == \"increment\"),\n            \"Method inside Counter trait\"\n        );\n\n        let get_counter_state = schema\n            .entry_points\n            .iter()\n            .find(|e| e.name == \"get_counter_state\")\n            .unwrap();\n        let counter_state_def = schema\n            .definitions\n            .get(&get_counter_state.result)\n            .expect(\"Has counter state definition\");\n\n        let expected_definition = vec![StructField {\n            name: \"value\".to_string(),\n            decl: <u64>::declaration(),\n        }];\n        assert_eq!(\n            counter_state_def\n                .as_struct()\n                .expect(\"Counter State is struct\"),\n            expected_definition.as_slice()\n        );\n\n        assert!(\n            !schema\n                .entry_points\n                .iter()\n                .any(|e| e.name == \"counter_state\"),\n            \"Trait method marked as private\"\n        );\n        assert!(\n            !schema\n                .entry_points\n                .iter()\n                .any(|e| e.name == \"counter_state_mut\"),\n            \"Trait method marked as private\"\n        );\n\n        let fallback = schema\n            .entry_points\n            .iter()\n            .filter_map(|e| if e.name == \"fallback\" { Some(e) } else { None })\n            .next()\n            .expect(\"Fallback method present in schema\");\n\n        assert_eq!(fallback.flags, EntryPointFlags::FALLBACK);\n    }\n\n    #[test]\n    fn foo() {\n        let _ = dispatch_with(Environment::default(), || {\n            super::perform_test();\n        });\n\n        log!(\"Success\");\n    }\n\n    #[test]\n    fn bar() {\n        let inst = <HasTraitsRef as ContractRef>::new();\n        let _call_data = inst.get_counter_value();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-upgradable/Cargo.toml",
    "content": "[package]\nname = \"vm2-upgradable\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-macros = { path = \"../../../macros\" }\ncasper-contract-sdk = { path = \"../../../sdk\" }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-upgradable/build.rs",
    "content": "fn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-upgradable/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n\nuse casper_contract_macros::casper;\nuse casper_contract_sdk::{casper, casper::Entity, log, prelude::*};\n\nconst CURRENT_VERSION: &str = \"v1\";\n\n/// This contract implements a simple flipper.\n#[casper(contract_state)]\npub struct UpgradableContract {\n    /// The current state of the flipper.\n    value: u8,\n    /// The owner of the contract.\n    owner: Entity,\n}\n\nimpl Default for UpgradableContract {\n    fn default() -> Self {\n        panic!(\"Unable to instantiate contract without a constructor\");\n    }\n}\n\n// trait ContractPackage {\n//     fn versions: BTreeMap<>,\n\n// }\n\n#[casper]\nimpl UpgradableContract {\n    #[casper(constructor)]\n    pub fn new(initial_value: u8) -> Self {\n        let caller = casper::get_caller();\n        Self {\n            value: initial_value,\n            owner: caller,\n        }\n    }\n\n    #[casper(constructor)]\n    pub fn default() -> Self {\n        Self::new(Default::default())\n    }\n\n    pub fn increment(&mut self) {\n        self.value += 1;\n    }\n\n    pub fn get(&self) -> u8 {\n        self.value\n    }\n\n    pub fn version(&self) -> &str {\n        CURRENT_VERSION\n    }\n\n    // pub fn is_disabled(&self) {\n    //     self.disabled\n    // }\n\n    // pub fn do_something(&self) {\n    //     if self.disabled {\n    //         panic!(\"nope\")\n\n    //     }\n    // }\n\n    #[skip_arg_parsing]\n    pub fn perform_upgrade(&self, new_code: Vec<u8>) {\n        if casper::get_caller() != self.owner {\n            panic!(\"Only the owner can perform upgrades\");\n        }\n        log!(\"V1: starting upgrade process current value={}\", self.value);\n        log!(\"New code length: {}\", new_code.len());\n        log!(\"New code first 10 bytes: {:?}\", &new_code[..10]);\n        // TODO: Enforce valid wasm validation\n        casper::upgrade(&new_code, Some(\"migrate\"), None).unwrap();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-upgradable-v2/Cargo.toml",
    "content": "[package]\nname = \"vm2-upgradable-v2\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n[lib]\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\ncasper-contract-macros = { path = \"../../../macros\" }\ncasper-contract-sdk = { path = \"../../../sdk\" }\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-upgradable-v2/build.rs",
    "content": "fn main() {\n    // Check if target arch is wasm32 and set link flags accordingly\n    if std::env::var(\"TARGET\").unwrap() == \"wasm32-unknown-unknown\" {\n        println!(\"cargo:rustc-link-arg=--import-memory\");\n        println!(\"cargo:rustc-link-arg=--export-table\");\n    }\n}\n"
  },
  {
    "path": "smart_contracts/contracts/vm2/vm2-upgradable-v2/src/lib.rs",
    "content": "#![cfg_attr(target_arch = \"wasm32\", no_main)]\n#![cfg_attr(target_arch = \"wasm32\", no_std)]\n\nuse casper_contract_macros::casper;\nuse casper_contract_sdk::{\n    casper::{self, Entity},\n    log,\n    serializers::borsh::BorshDeserialize,\n};\n\nconst CURRENT_VERSION: &str = \"v2\";\n\n#[derive(BorshDeserialize, Debug)]\n#[borsh(crate = \"casper_contract_sdk::serializers::borsh\")]\npub struct UpgradableContractV1 {\n    /// The current state of the flipper.\n    value: u8,\n    /// The owner of the contract.\n    owner: Entity,\n}\n\nimpl Default for UpgradableContractV1 {\n    fn default() -> Self {\n        panic!(\"Unable to instantiate contract without a constructor\");\n    }\n}\n\n/// This contract implements a simple flipper.\n#[derive(Debug)]\n#[casper(contract_state)]\npub struct UpgradableContractV2 {\n    /// The current state of the flipper.\n    value: u64,\n    /// The owner of the contract.\n    owner: Entity,\n}\n\nimpl From<UpgradableContractV1> for UpgradableContractV2 {\n    fn from(old: UpgradableContractV1) -> Self {\n        Self {\n            value: old.value as u64,\n            owner: old.owner,\n        }\n    }\n}\n\nimpl Default for UpgradableContractV2 {\n    fn default() -> Self {\n        panic!(\"Unable to instantiate contract without a constructor\");\n    }\n}\n\n#[casper]\nimpl UpgradableContractV2 {\n    #[casper(constructor)]\n    pub fn new(initial_value: u64) -> Self {\n        let caller = casper::get_caller();\n        Self {\n            value: initial_value,\n            owner: caller,\n        }\n    }\n\n    #[casper(constructor)]\n    pub fn default() -> Self {\n        Self::new(Default::default())\n    }\n\n    pub fn increment(&mut self) {\n        self.increment_by(1);\n    }\n\n    pub fn increment_by(&mut self, value: u64) {\n        let old_value = self.value;\n        self.value = value.wrapping_add(value);\n        log!(\n            \"Incrementing value by {value} from {} to {}\",\n            old_value,\n            self.value\n        );\n    }\n\n    pub fn get(&self) -> u64 {\n        self.value\n    }\n\n    pub fn version(&self) -> &str {\n        CURRENT_VERSION\n    }\n\n    #[casper(ignore_state)]\n    pub fn migrate() {\n        log!(\"Reading old state...\");\n        let old_state: UpgradableContractV1 = casper::read_state().unwrap();\n        log!(\"Old state {old_state:?}\");\n        let new_state = UpgradableContractV2::from(old_state);\n        log!(\"Success! New state: {new_state:?}\");\n        casper::write_state(&new_state).unwrap();\n    }\n\n    #[casper(ignore_state)]\n    pub fn perform_upgrade() {\n        let new_code = casper::copy_input();\n        log!(\"V2: New code length: {}\", new_code.len());\n        log!(\"V2: New code first 10 bytes: {:?}\", &new_code[..10]);\n\n        let upgrade_result = casper::upgrade(&new_code, Some(\"migrate\"), None);\n        log!(\"{:?}\", upgrade_result);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/macros/Cargo.toml",
    "content": "[package]\nname = \"casper-contract-macros\"\nversion = \"0.1.3\"\nedition = \"2021\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndescription = \"Casper contract macros package\"\ndocumentation = \"https://docs.rs/casper-contract-macros\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/smart_contracts/macros\"\nlicense = \"Apache-2.0\"\n\n[lib]\nproc-macro = true\n\n[dependencies]\nsyn = { version = \"2\", features = [\"full\", \"extra-traits\"] }\nquote = \"1\"\ncasper-executor-wasm-common = { version = \"0.1.3\", path = \"../../executor/wasm_common\" }\ncasper-contract-sdk-sys = { version = \"0.1.3\", path = \"../sdk_sys\" }\npaste = \"1\"\ndarling = \"0.20\"\nproc-macro2 = \"1.0\"\nstatic_assertions = \"1.1.0\"\n\nblake2-rfc = { version = \"0.2.18\", default-features = false, features = [\n    \"std\",\n] }\n\n[features]\ndefault = []\n__abi_generator = []\n__embed_schema = []\n"
  },
  {
    "path": "smart_contracts/macros/src/lib.rs",
    "content": "pub(crate) mod utils;\n\nextern crate proc_macro;\n\nuse darling::{ast, FromAttributes, FromMeta};\nuse proc_macro::TokenStream;\nuse proc_macro2::Span;\nuse quote::{format_ident, quote, ToTokens};\nuse syn::{\n    parse_macro_input, Fields, ItemEnum, ItemFn, ItemImpl, ItemStruct, ItemTrait, ItemUnion,\n    LitStr, Type,\n};\n\nuse casper_executor_wasm_common::flags::EntryPointFlags;\nconst CASPER_RESERVED_FALLBACK_EXPORT: &str = \"__casper_fallback\";\n\n#[derive(Debug, FromAttributes)]\n#[darling(attributes(casper))]\nstruct MethodAttribute {\n    #[darling(default)]\n    constructor: bool,\n    #[darling(default)]\n    ignore_state: bool,\n    #[darling(default)]\n    revert_on_error: bool,\n    /// Explicitly mark method as private so it's not externally callable.\n    #[darling(default)]\n    private: bool,\n    #[darling(default)]\n    payable: bool,\n    #[darling(default)]\n    fallback: bool,\n}\n\n#[derive(Debug, FromMeta)]\nstruct StructMeta {\n    #[darling(default)]\n    path: Option<syn::Path>,\n    /// Contract state is a special struct that is used to store the state of the contract.\n    #[darling(default)]\n    contract_state: bool,\n    /// Message is a special struct that is used to send messages to other contracts.\n    #[darling(default)]\n    message: bool,\n}\n\n#[derive(Debug, FromMeta)]\nstruct EnumMeta {\n    #[darling(default)]\n    path: Option<syn::Path>,\n}\n\n#[derive(Debug, FromMeta)]\nstruct TraitMeta {\n    path: Option<syn::Path>,\n    export: Option<bool>,\n}\n\n#[derive(Debug, FromMeta)]\nenum ItemFnMeta {\n    Export,\n}\n\n#[derive(Debug, FromMeta)]\nstruct ImplTraitForContractMeta {\n    /// Fully qualified path of the trait.\n    #[darling(default)]\n    path: Option<syn::Path>,\n    /// Does not produce Wasm exports for the entry points.\n    #[darling(default)]\n    compile_as_dependency: bool,\n}\n\nfn generate_call_data_return(output: &syn::ReturnType) -> proc_macro2::TokenStream {\n    match output {\n        syn::ReturnType::Default => {\n            quote! { () }\n        }\n        syn::ReturnType::Type(_, ty) => match ty.as_ref() {\n            Type::Never(_) => {\n                quote! { () }\n            }\n            Type::Reference(reference) => {\n                // ty.uses_lifetimes(options, lifetimes)\n                let mut new_ref = reference.clone();\n                new_ref.lifetime = Some(syn::Lifetime::new(\"'a\", Span::call_site()));\n                quote! { <<#new_ref as core::ops::Deref>::Target as casper_contract_sdk::prelude::borrow::ToOwned>::Owned }\n            }\n            _ => {\n                quote! { #ty }\n            }\n        },\n    }\n}\n\n#[proc_macro_attribute]\npub fn casper(attrs: TokenStream, item: TokenStream) -> TokenStream {\n    // let attrs: Meta = parse_macro_input!(attrs as Meta);\n    let attr_args = match ast::NestedMeta::parse_meta_list(attrs.into()) {\n        Ok(v) => v,\n        Err(e) => {\n            return TokenStream::from(e.to_compile_error());\n        }\n    };\n\n    let has_fallback_selector = false;\n\n    if let Ok(item_struct) = syn::parse::<ItemStruct>(item.clone()) {\n        let struct_meta = StructMeta::from_list(&attr_args).unwrap();\n        if struct_meta.message {\n            process_casper_message_for_struct(&item_struct, struct_meta)\n        } else if struct_meta.contract_state {\n            // #[casper(contract_state)]\n            process_casper_contract_state_for_struct(&item_struct, struct_meta)\n        } else {\n            // For any other struct that will be part of a schema\n            // #[casper]\n            let partial = generate_casper_state_for_struct(&item_struct, struct_meta);\n            quote! {\n                #partial\n            }\n            .into()\n        }\n    } else if let Ok(item_enum) = syn::parse::<ItemEnum>(item.clone()) {\n        let enum_meta = EnumMeta::from_list(&attr_args).unwrap();\n        let partial = generate_casper_state_for_enum(&item_enum, enum_meta);\n        quote! {\n            #partial\n        }\n        .into()\n    } else if let Ok(item_trait) = syn::parse::<ItemTrait>(item.clone()) {\n        let trait_meta = TraitMeta::from_list(&attr_args).unwrap();\n        casper_trait_definition(item_trait, trait_meta)\n    } else if let Ok(entry_points) = syn::parse::<ItemImpl>(item.clone()) {\n        if let Some((_not, trait_path, _for)) = entry_points.trait_.as_ref() {\n            let impl_meta = ImplTraitForContractMeta::from_list(&attr_args).unwrap();\n            generate_impl_trait_for_contract(&entry_points, trait_path, impl_meta)\n        } else {\n            generate_impl_for_contract(entry_points, has_fallback_selector)\n        }\n    } else if let Ok(func) = syn::parse::<ItemFn>(item.clone()) {\n        let func_meta = ItemFnMeta::from_list(&attr_args).unwrap();\n        match func_meta {\n            ItemFnMeta::Export => generate_export_function(&func),\n        }\n    } else {\n        let err = syn::Error::new(\n            Span::call_site(),\n            \"State attribute can only be applied to struct or enum\",\n        );\n        TokenStream::from(err.to_compile_error())\n    }\n}\n\nfn process_casper_message_for_struct(\n    item_struct: &ItemStruct,\n    struct_meta: StructMeta,\n) -> TokenStream {\n    let struct_name = &item_struct.ident;\n\n    let crate_path = match &struct_meta.path {\n        Some(path) => quote! { #path },\n        None => quote! { casper_contract_sdk },\n    };\n\n    let borsh_path = {\n        let crate_path_str = match &struct_meta.path {\n            Some(path) => path.to_token_stream().to_string(),\n            None => \"casper_contract_sdk\".to_string(),\n        };\n        syn::LitStr::new(\n            &format!(\"{}::serializers::borsh\", crate_path_str),\n            Span::call_site(),\n        )\n    };\n\n    let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone());\n\n    let maybe_abi_collectors;\n    let maybe_entrypoint_defs;\n\n    #[cfg(feature = \"__abi_generator\")]\n    {\n        maybe_abi_collectors = quote! {\n            const _: () = {\n                #[#crate_path::linkme::distributed_slice(#crate_path::abi_generator::ABI_COLLECTORS)]\n                #[linkme(crate = #crate_path::linkme)]\n                static COLLECTOR: fn(&mut #crate_path::abi::Definitions) = |defs| {\n                    defs.populate_one::<#struct_name>();\n                };\n            };\n        };\n\n        maybe_entrypoint_defs = quote! {\n            const _: () = {\n                #[#crate_path::linkme::distributed_slice(#crate_path::abi_generator::MESSAGES)]\n                #[linkme(crate = #crate_path::linkme)]\n                static MESSAGE: #crate_path::abi_generator::Message = #crate_path::abi_generator::Message {\n                    name: <#struct_name as #crate_path::Message>::TOPIC,\n                    decl: concat!(module_path!(), \"::\", stringify!(#struct_name)),\n                 };\n            };\n        }\n    }\n    #[cfg(not(feature = \"__abi_generator\"))]\n    {\n        maybe_abi_collectors = quote! {};\n        maybe_entrypoint_defs = quote! {};\n    }\n\n    quote! {\n        #[derive(#crate_path::serializers::borsh::BorshSerialize)]\n        #[borsh(crate = #borsh_path)]\n        #maybe_derive_abi\n        #item_struct\n\n        impl #crate_path::Message for #struct_name {\n            const TOPIC: &'static str = stringify!(#struct_name);\n\n            #[inline]\n            fn payload(&self) -> Vec<u8> {\n                #crate_path::serializers::borsh::to_vec(self).unwrap()\n            }\n        }\n\n        #maybe_abi_collectors\n        #maybe_entrypoint_defs\n\n    }\n    .into()\n}\n\nfn generate_export_function(func: &ItemFn) -> TokenStream {\n    let func_name = &func.sig.ident;\n    let mut arg_names = Vec::new();\n    let mut args_attrs = Vec::new();\n    for input in &func.sig.inputs {\n        let (name, ty) = match input {\n            syn::FnArg::Receiver(receiver) => {\n                todo!(\"{receiver:?}\")\n            }\n            syn::FnArg::Typed(typed) => match typed.pat.as_ref() {\n                syn::Pat::Ident(ident) => (&ident.ident, &typed.ty),\n                _ => todo!(\"export: other typed variant\"),\n            },\n        };\n        arg_names.push(name);\n        args_attrs.push(quote! {\n            #name: #ty\n        });\n    }\n    let _ctor_name = format_ident!(\"{func_name}_ctor\");\n\n    let exported_func_name = format_ident!(\"__casper_export_{func_name}\");\n    quote! {\n        #[export_name = stringify!(#func_name)]\n        #[no_mangle]\n        pub extern \"C\" fn #exported_func_name() {\n            #[cfg(target_arch = \"wasm32\")]\n            {\n                casper_contract_sdk::set_panic_hook();\n            }\n\n            #func\n\n            #[derive(casper_contract_sdk::serializers::borsh::BorshDeserialize)]\n            #[borsh(crate = \"casper_contract_sdk::serializers::borsh\")]\n            struct Arguments {\n                #(#args_attrs,)*\n            }\n            let input = casper_contract_sdk::prelude::casper::copy_input();\n            let args: Arguments = casper_contract_sdk::serializers::borsh::from_slice(&input).unwrap();\n            let _ret = #func_name(#(args.#arg_names,)*);\n        }\n\n        #[cfg(not(target_arch = \"wasm32\"))]\n        #func\n\n        #[cfg(not(target_arch = \"wasm32\"))]\n        const _: () = {\n            #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::casper::native::ENTRY_POINTS)]\n            #[linkme(crate = casper_contract_sdk::linkme)]\n            pub static EXPORTS: casper_contract_sdk::casper::native::EntryPoint = casper_contract_sdk::casper::native::EntryPoint {\n                kind: casper_contract_sdk::casper::native::EntryPointKind::Function { name: stringify!(#func_name) },\n                fptr: || { #exported_func_name(); },\n                module_path: module_path!(),\n                file: file!(),\n                line: line!(),\n            };\n        };\n    }.into()\n}\n\nfn generate_impl_for_contract(\n    mut entry_points: ItemImpl,\n    _has_fallback_selector: bool,\n) -> TokenStream {\n    #[cfg(feature = \"__abi_generator\")]\n    let mut populate_definitions_linkme = Vec::new();\n    let impl_trait = match entry_points.trait_.as_ref() {\n        Some((None, path, _for)) => Some(path),\n        Some((Some(_not), _path, _for)) => {\n            panic!(\"Exclamation mark not supported\");\n        }\n        None => None,\n    };\n    let struct_name = match entry_points.self_ty.as_ref() {\n        Type::Path(ref path) => &path.path,\n\n        other => todo!(\"Unsupported {other:?}\"),\n    };\n    let defs = vec![quote! {}]; // TODO: Dummy element which may not be necessary but is used for expansion later\n    #[cfg(feature = \"__abi_generator\")]\n    let mut defs = defs;\n    #[cfg(feature = \"__abi_generator\")]\n    let mut defs_linkme = Vec::new();\n    let mut names = Vec::new();\n    let mut extern_entry_points = Vec::new();\n    let _abi_generator_entry_points = [quote! {}]; // TODO: Dummy element which may not be necessary but is used for expansion later\n    let mut manifest_entry_point_enum_variants = Vec::new();\n    let mut manifest_entry_point_enum_match_name = Vec::new();\n    let mut manifest_entry_point_input_data = Vec::new();\n    let mut extra_code = Vec::new();\n\n    for entry_point in &mut entry_points.items {\n        let mut populate_definitions = Vec::new();\n\n        let method_attribute;\n        let mut flag_value = EntryPointFlags::empty();\n\n        // let selector_value;\n\n        let func = match entry_point {\n            syn::ImplItem::Const(_) => todo!(\"Const\"),\n            syn::ImplItem::Fn(ref mut func) => {\n                let vis = &func.vis;\n                match vis {\n                    syn::Visibility::Public(_) => {}\n                    syn::Visibility::Inherited => {\n                        // As the doc says this \"usually means private\"\n                        continue;\n                    }\n                    syn::Visibility::Restricted(_restricted) => {}\n                }\n\n                // func.sig.re\n                let never_returns = match &func.sig.output {\n                    syn::ReturnType::Default => false,\n                    syn::ReturnType::Type(_, ty) => matches!(ty.as_ref(), Type::Never(_)),\n                };\n\n                method_attribute = MethodAttribute::from_attributes(&func.attrs).unwrap();\n\n                func.attrs.clear();\n\n                let func_name = func.sig.ident.clone();\n                if func_name.to_string().starts_with(\"__casper_\") {\n                    return TokenStream::from(\n                        syn::Error::new(\n                            Span::call_site(),\n                            \"Function names starting with '__casper_' are reserved\",\n                        )\n                        .to_compile_error(),\n                    );\n                }\n\n                let export_name = if method_attribute.fallback {\n                    format_ident!(\"{}\", CASPER_RESERVED_FALLBACK_EXPORT)\n                } else {\n                    format_ident!(\"{}\", &func_name)\n                };\n\n                names.push(func_name.clone());\n\n                let arg_names_and_types = func\n                    .sig\n                    .inputs\n                    .iter()\n                    .filter_map(|arg| match arg {\n                        syn::FnArg::Receiver(_) => None,\n                        syn::FnArg::Typed(typed) => match typed.pat.as_ref() {\n                            syn::Pat::Ident(ident) => Some((&ident.ident, &typed.ty)),\n                            _ => todo!(),\n                        },\n                    })\n                    .collect::<Vec<_>>();\n\n                let arg_names: Vec<_> =\n                    arg_names_and_types.iter().map(|(name, _ty)| name).collect();\n                let arg_types: Vec<_> = arg_names_and_types.iter().map(|(_name, ty)| ty).collect();\n                let arg_attrs: Vec<_> = arg_names_and_types\n                    .iter()\n                    .map(|(name, ty)| quote! { #name: #ty })\n                    .collect();\n\n                // Entry point has &self or &mut self\n                let mut entry_point_requires_state: bool = false;\n\n                let handle_write_state = match func.sig.inputs.first() {\n                    Some(syn::FnArg::Receiver(receiver)) if receiver.mutability.is_some() => {\n                        entry_point_requires_state = true;\n\n                        if !never_returns && receiver.reference.is_some() {\n                            // &mut self does write updated state\n                            Some(quote! {\n                                casper_contract_sdk::casper::write_state(&instance).unwrap();\n                            })\n                        } else {\n                            // mut self does not write updated state as the\n                            // method call\n                            // will consume self and there's nothing to persist.\n                            None\n                        }\n                    }\n                    Some(syn::FnArg::Receiver(receiver)) if receiver.mutability.is_none() => {\n                        entry_point_requires_state = true;\n\n                        // &self does not write state\n                        None\n                    }\n                    Some(syn::FnArg::Receiver(receiver)) if receiver.lifetime().is_some() => {\n                        panic!(\"Lifetimes are currently not supported\");\n                    }\n                    Some(_) | None => {\n                        if !never_returns && method_attribute.constructor {\n                            Some(quote! {\n                                casper_contract_sdk::casper::write_state(&_ret).unwrap();\n                            })\n                        } else {\n                            None\n                        }\n                    }\n                };\n\n                let call_data_return_lifetime = if method_attribute.constructor {\n                    quote! {\n                        #struct_name\n                    }\n                } else {\n                    generate_call_data_return(&func.sig.output)\n                };\n                let _func_sig_output = match &func.sig.output {\n                    syn::ReturnType::Default => {\n                        quote! { () }\n                    }\n                    syn::ReturnType::Type(_, ty) => {\n                        quote! { #ty }\n                    }\n                };\n\n                let handle_ret = if never_returns {\n                    None\n                } else {\n                    match func.sig.output {\n                        syn::ReturnType::Default => {\n                            // Do not call casper_return if there is no return value\n                            None\n                        }\n                        _ if method_attribute.constructor => {\n                            // Constructor does not return serialized state but is expected to save\n                            // state, or explicitly revert.\n                            // TODO: Add support for Result<Self, Error> and revert_on_error if\n                            // possible.\n                            Some(quote! {\n                                let _ = flags; // hide the warning\n                            })\n                        }\n                        syn::ReturnType::Type(..) => {\n                            // There is a return value so call casper_return.\n                            Some(quote! {\n                                let ret_bytes = casper_contract_sdk::serializers::borsh::to_vec(&_ret).unwrap();\n                                casper_contract_sdk::casper::ret(flags, Some(&ret_bytes));\n                            })\n                        }\n                    }\n                };\n\n                assert_eq!(arg_names.len(), arg_types.len());\n\n                let mut prelude = Vec::new();\n\n                prelude.push(quote! {\n                    #[derive(casper_contract_sdk::serializers::borsh::BorshDeserialize)]\n                    #[borsh(crate = \"casper_contract_sdk::serializers::borsh\")]\n                    struct Arguments {\n                        #(#arg_attrs,)*\n                    }\n\n\n                    let input = casper_contract_sdk::prelude::casper::copy_input();\n                    let args: Arguments = casper_contract_sdk::serializers::borsh::from_slice(&input).unwrap();\n                });\n\n                if method_attribute.constructor {\n                    prelude.push(quote! {\n                        if casper_contract_sdk::casper::has_state().unwrap() {\n                            panic!(\"State of the contract is already present; unable to proceed with the constructor\");\n                        }\n                    });\n                }\n\n                if !method_attribute.payable {\n                    let panic_msg = format!(\n                        r#\"Entry point \"{func_name}\" is not payable and does not accept tokens\"#\n                    );\n                    prelude.push(quote! {\n                        if casper_contract_sdk::casper::transferred_value() != 0 {\n                            // TODO: Be precise and unambigious about the error\n                            panic!(#panic_msg);\n                        }\n                    });\n                }\n\n                let handle_err = if !never_returns && method_attribute.revert_on_error {\n                    if let syn::ReturnType::Default = func.sig.output {\n                        panic!(\"Cannot revert on error if there is no return value\");\n                    }\n\n                    quote! {\n                        let _ret: &Result<_, _> = &_ret;\n                        if _ret.is_err() {\n                            flags |= casper_contract_sdk::casper_executor_wasm_common::flags::ReturnFlags::REVERT;\n                        }\n\n                    }\n                } else {\n                    quote! {}\n                };\n\n                let handle_call = if entry_point_requires_state {\n                    quote! {\n                        let mut instance: #struct_name = casper_contract_sdk::casper::read_state().unwrap();\n                        let _ret = instance.#func_name(#(args.#arg_names,)*);\n                    }\n                } else if method_attribute.constructor {\n                    quote! {\n                        let _ret = <#struct_name>::#func_name(#(args.#arg_names,)*);\n                    }\n                } else {\n                    quote! {\n                        let _ret = <#struct_name>::#func_name(#(args.#arg_names,)*);\n                    }\n                };\n                if method_attribute.constructor {\n                    flag_value |= EntryPointFlags::CONSTRUCTOR;\n                }\n\n                if method_attribute.fallback {\n                    flag_value |= EntryPointFlags::FALLBACK;\n                }\n\n                let _bits = flag_value.bits();\n\n                let extern_func_name = format_ident!(\"__casper_export_{func_name}\");\n\n                extern_entry_points.push(quote! {\n\n                    #[export_name = stringify!(#export_name)]\n                    #vis extern \"C\" fn #extern_func_name() {\n                        // Set panic hook (assumes std is enabled etc.)\n                        #[cfg(target_arch = \"wasm32\")]\n                        {\n                            casper_contract_sdk::set_panic_hook();\n                        }\n\n                        #(#prelude;)*\n\n                        let mut flags = casper_contract_sdk::casper_executor_wasm_common::flags::ReturnFlags::empty();\n\n                        #handle_call;\n\n                        #handle_err;\n\n                        #handle_write_state;\n\n                        #handle_ret;\n                    }\n\n                    #[cfg(not(target_arch = \"wasm32\"))]\n                    const _: () = {\n                        #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::casper::native::ENTRY_POINTS)]\n                        #[linkme(crate = casper_contract_sdk::linkme)]\n                        pub static EXPORTS: casper_contract_sdk::casper::native::EntryPoint = casper_contract_sdk::casper::native::EntryPoint {\n                            kind: casper_contract_sdk::casper::native::EntryPointKind::SmartContract { name: stringify!(#export_name), struct_name: stringify!(#struct_name) },\n                            fptr: || -> () { #extern_func_name(); },\n                            module_path: module_path!(),\n                            file: file!(),\n                            line: line!(),\n                        };\n                    };\n\n                });\n\n                manifest_entry_point_enum_variants.push(quote! {\n                    #func_name {\n                        #(#arg_names: #arg_types,)*\n                    }\n                });\n\n                manifest_entry_point_enum_match_name.push(quote! {\n                    #func_name\n                });\n\n                manifest_entry_point_input_data.push(quote! {\n                    Self::#func_name { #(#arg_names,)* } => {\n                        let into_tuple = (#(#arg_names,)*);\n                        into_tuple.serialize(writer)\n                    }\n                });\n\n                match entry_points.self_ty.as_ref() {\n                    Type::Path(ref path) => {\n                        let ident = syn::Ident::new(\n                            &format!(\"{}_{}\", path.path.get_ident().unwrap(), func_name),\n                            Span::call_site(),\n                        );\n\n                        let input_data_content = if arg_names.is_empty() {\n                            quote! {\n                                None\n                            }\n                        } else {\n                            quote! {\n                                Some(casper_contract_sdk::serializers::borsh::to_vec(&self).expect(\"Serialization to succeed\"))\n                            }\n                        };\n\n                        let self_ty =\n                            if method_attribute.constructor || method_attribute.ignore_state {\n                                None\n                            } else {\n                                Some(quote! {\n                                   &self,\n                                })\n                            };\n\n                        if !method_attribute.fallback {\n                            extra_code.push(quote! {\n                                        pub fn #func_name<'a>(#self_ty #(#arg_names: #arg_types,)*) -> impl casper_contract_sdk::ToCallData<Return<'a> = #call_data_return_lifetime> {\n                                            #[derive(casper_contract_sdk::serializers::borsh::BorshSerialize, PartialEq, Debug)]\n                                            #[borsh(crate = \"casper_contract_sdk::serializers::borsh\")]\n                                            struct #ident {\n                                                #(#arg_names: #arg_types,)*\n                                            }\n\n                                            impl casper_contract_sdk::ToCallData for #ident {\n                                                // const SELECTOR: vm_common::selector::Selector = vm_common::selector::Selector::new(#selector_value);\n\n                                                type Return<'a> = #call_data_return_lifetime;\n\n                                                fn entry_point(&self) -> &str { stringify!(#func_name) }\n\n                                                fn input_data(&self) -> Option<casper_contract_sdk::serializers::borsh::__private::maybestd::vec::Vec<u8>> {\n                                                    #input_data_content\n                                                }\n                                            }\n\n                                            #ident {\n                                                #(#arg_names,)*\n                                            }\n                                        }\n                                    });\n                        }\n                    }\n\n                    _ => todo!(\"Different self_ty currently unsupported\"),\n                }\n\n                func.clone()\n            }\n            syn::ImplItem::Type(_) => todo!(),\n            syn::ImplItem::Macro(_) => todo!(),\n            syn::ImplItem::Verbatim(_) => todo!(),\n            _ => todo!(),\n        };\n\n        let mut args = Vec::new();\n\n        for input in &func.sig.inputs {\n            let typed = match input {\n                syn::FnArg::Receiver(_receiver) => continue,\n                syn::FnArg::Typed(typed) => typed,\n            };\n            // typed\n            let name = match &typed.pat.as_ref() {\n                syn::Pat::Const(_) => todo!(\"Const\"),\n                syn::Pat::Ident(ident) => ident,\n                syn::Pat::Lit(_) => todo!(\"Lit\"),\n                syn::Pat::Macro(_) => todo!(\"Macro\"),\n                syn::Pat::Or(_) => todo!(\"Or\"),\n                syn::Pat::Paren(_) => todo!(\"Paren\"),\n                syn::Pat::Path(_) => todo!(\"Path\"),\n                syn::Pat::Range(_) => todo!(\"Range\"),\n                syn::Pat::Reference(_) => todo!(\"Reference\"),\n                syn::Pat::Rest(_) => todo!(\"Rest\"),\n                syn::Pat::Slice(_) => todo!(\"Slice\"),\n                syn::Pat::Struct(_) => todo!(\"Struct\"),\n                syn::Pat::Tuple(_) => todo!(\"Tuple\"),\n                syn::Pat::TupleStruct(_) => todo!(\"TupleStruct\"),\n                syn::Pat::Type(_) => todo!(\"Type\"),\n                syn::Pat::Verbatim(_) => todo!(\"Verbatim\"),\n                syn::Pat::Wild(_) => todo!(\"Wild\"),\n                _ => todo!(),\n            };\n            let ty = &typed.ty;\n\n            populate_definitions.push(quote! {\n                definitions.populate_one::<#ty>();\n            });\n\n            args.push(quote! {\n                casper_contract_sdk::schema::SchemaArgument {\n                    name: stringify!(#name).into(),\n                    decl: <#ty as casper_contract_sdk::abi::CasperABI>::declaration(),\n                }\n            });\n        }\n\n        #[cfg(feature = \"__abi_generator\")]\n        {\n            let bits = flag_value.bits();\n\n            let result = match &func.sig.output {\n                syn::ReturnType::Default => {\n                    populate_definitions.push(quote! {\n                        definitions.populate_one::<()>();\n                    });\n\n                    quote! { <() as casper_contract_sdk::abi::CasperABI>::declaration() }\n                }\n                syn::ReturnType::Type(_, ty) => match ty.as_ref() {\n                    Type::Never(_) => {\n                        populate_definitions.push(quote! {\n                            definitions.populate_one::<()>();\n                        });\n\n                        quote! { <() as casper_contract_sdk::abi::CasperABI>::declaration() }\n                    }\n                    _ => {\n                        populate_definitions.push(quote! {\n                            definitions.populate_one::<#ty>();\n                        });\n\n                        quote! { <#ty as casper_contract_sdk::abi::CasperABI>::declaration() }\n                    }\n                },\n            };\n\n            let func_name = &func.sig.ident;\n\n            let linkme_schema_entry_point_ident =\n                format_ident!(\"__casper_schema_entry_point_{func_name}\");\n\n            defs.push(quote! {\n                fn #linkme_schema_entry_point_ident() -> casper_contract_sdk::schema::SchemaEntryPoint {\n                    casper_contract_sdk::schema::SchemaEntryPoint {\n                        name: stringify!(#func_name).into(),\n                        arguments: vec![ #(#args,)* ],\n                        result: #result,\n                        flags: casper_contract_sdk::casper_executor_wasm_common::flags::EntryPointFlags::from_bits(#bits).unwrap(),\n                    }\n                }\n            });\n            defs_linkme.push(linkme_schema_entry_point_ident);\n\n            let linkme_abi_populate_defs_ident =\n                format_ident!(\"__casper_populate_definitions_{func_name}\");\n\n            defs.push(quote! {\n                fn #linkme_abi_populate_defs_ident(definitions: &mut casper_contract_sdk::abi::Definitions) {\n                    #(#populate_definitions)*;\n                }\n            });\n\n            populate_definitions_linkme.push(linkme_abi_populate_defs_ident);\n        }\n    }\n    // let entry_points_len = entry_points.len();\n    let st_name = struct_name.get_ident().unwrap();\n    let maybe_abi_collectors;\n    let maybe_entrypoint_defs;\n    #[cfg(feature = \"__abi_generator\")]\n    {\n        maybe_abi_collectors = quote! {\n            #(\n                const _: () = {\n                    #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::abi_generator::ABI_COLLECTORS)]\n                    #[linkme(crate = casper_contract_sdk::linkme)]\n                    static COLLECTOR: fn(&mut casper_contract_sdk::abi::Definitions) = <#struct_name>::#populate_definitions_linkme;\n                };\n            )*\n        };\n\n        maybe_entrypoint_defs = quote! {\n            #(\n\n                const _: () = {\n                    #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::abi_generator::ENTRYPOINTS)]\n                    #[linkme(crate = casper_contract_sdk::linkme)]\n                    static ENTRY_POINTS: fn() -> casper_contract_sdk::schema::SchemaEntryPoint = <#struct_name>::#defs_linkme;\n                };\n            )*\n        }\n    }\n    #[cfg(not(feature = \"__abi_generator\"))]\n    {\n        maybe_abi_collectors = quote! {};\n        maybe_entrypoint_defs = quote! {};\n    }\n    let handle_manifest = match impl_trait {\n        Some(_path) => {\n            // Do not generate a manifest if we're implementing a trait.\n            // The expectation is that you list the traits below under\n            // #[derive(Contract)] and the rest is handled by a macro\n            None\n        }\n        None => Some(quote! {\n\n            #[doc(hidden)]\n            impl #struct_name {\n                #(#defs)*\n            }\n\n            #maybe_abi_collectors\n\n            #maybe_entrypoint_defs\n            #(#extern_entry_points)*\n\n        }),\n    };\n    let ref_struct_name = format_ident!(\"{st_name}Ref\");\n\n    quote! {\n        #entry_points\n\n        #handle_manifest\n\n        impl #ref_struct_name {\n            #(#extra_code)*\n        }\n    }\n    .into()\n}\n\nfn generate_impl_trait_for_contract(\n    entry_points: &ItemImpl,\n    trait_path: &syn::Path,\n    impl_meta: ImplTraitForContractMeta,\n) -> TokenStream {\n    let self_ty = match entry_points.self_ty.as_ref() {\n        Type::Path(ref path) => &path.path,\n        other => todo!(\"Unsupported {other:?}\"),\n    };\n    let self_ty = quote! { #self_ty };\n    let mut code = Vec::new();\n\n    let trait_name = trait_path\n        .segments\n        .last()\n        .expect(\"Expected non-empty path\")\n        .ident\n        .clone();\n\n    let path_to_macro = match &impl_meta.path {\n        Some(path) => quote! { #path },\n        None => {\n            quote! { self }\n        }\n    };\n\n    let path_to_crate: proc_macro2::TokenStream = match &impl_meta.path {\n        Some(path) => {\n            let crate_name = path\n                .segments\n                .first()\n                .expect(\"Expected non-empty path\")\n                .ident\n                .clone();\n\n            if crate_name == \"crate\" {\n                // This is local, can't refer by absolute path\n                quote! { #path }\n            } else {\n                quote! { #crate_name }\n            }\n        }\n        None => {\n            quote! { self }\n        }\n    };\n\n    let macro_name = format_ident!(\"enumerate_{trait_name}_symbols\");\n    let ref_trait = format_ident!(\"{}Ext\", trait_path.segments.last().unwrap().ident);\n    let ref_name = format_ident!(\"{}Ref\", self_ty.to_token_stream().to_string());\n\n    let visitor = if impl_meta.compile_as_dependency {\n        quote! {\n            const _: () = {\n                macro_rules! visitor {\n                    ($( $vis:vis $name:ident as $export_name:ident => $dispatch:ident , $schema:ident , )*) => {\n                        $(\n                            $vis fn $name() {\n                                #path_to_macro::$dispatch::<#self_ty>();\n                            }\n                        )*\n                    }\n                }\n\n                #path_to_crate::#macro_name!(visitor);\n            };\n        }\n    } else {\n        quote! {\n            const _: () = {\n                macro_rules! visitor {\n                    ($( $vis:vis $name:ident as $export_name:ident => $dispatch:ident , $schema:ident , )*) => {\n                        $(\n                            #[export_name = stringify!($export_name)]\n                            $vis extern \"C\" fn $name() {\n                                #path_to_macro::$dispatch::<#self_ty>();\n                            }\n\n                            #[cfg(not(target_arch = \"wasm32\"))]\n                            const _: () = {\n                                #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::casper::native::ENTRY_POINTS)]\n                                #[linkme(crate = casper_contract_sdk::linkme)]\n                                pub static EXPORTS: casper_contract_sdk::casper::native::EntryPoint = casper_contract_sdk::casper::native::EntryPoint {\n                                    kind: casper_contract_sdk::casper::native::EntryPointKind::TraitImpl { trait_name: stringify!(#trait_name), impl_name: stringify!(#self_ty), name: stringify!($export_name) },\n                                    fptr: || -> () { $name(); },\n                                    module_path: module_path!(),\n                                    file: file!(),\n                                    line: line!(),\n                                };\n                            };\n\n                            const _: () = {\n                                #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::abi_generator::ENTRYPOINTS)]\n                                #[linkme(crate = casper_contract_sdk::linkme)]\n                                static ENTRY_POINTS: fn() -> casper_contract_sdk::schema::SchemaEntryPoint = <#ref_name as #ref_trait>::$schema;\n                            };\n                        )*\n                    }\n                }\n\n                #path_to_crate::#macro_name!(visitor);\n            };\n        }\n    };\n\n    code.push(visitor);\n\n    let ref_trait = format_ident!(\"{}Ext\", trait_path.require_ident().unwrap());\n\n    let ref_name = format_ident!(\"{self_ty}Ref\");\n\n    code.push(quote! {\n        impl #ref_trait for #ref_name {}\n    });\n\n    quote! {\n        #entry_points\n\n        #(#code)*\n    }\n    .into()\n}\n\nfn casper_trait_definition(mut item_trait: ItemTrait, trait_meta: TraitMeta) -> TokenStream {\n    let crate_path = match &trait_meta.path {\n        Some(path) => quote! { #path },\n        None => quote! { casper_contract_sdk },\n    };\n\n    let borsh_path = {\n        let crate_path_str = match &trait_meta.path {\n            Some(path) => path.to_token_stream().to_string(),\n            None => \"casper_contract_sdk\".to_string(),\n        };\n        syn::LitStr::new(\n            &format!(\"{}::serializers::borsh\", crate_path_str),\n            Span::call_site(),\n        )\n    };\n\n    let trait_name = &item_trait.ident;\n\n    let vis = &item_trait.vis;\n    let mut dispatch_functions = Vec::new();\n    // let mut dispatch_table = Vec::new();\n    let mut extra_code = Vec::new();\n    // let mut schema_entry_points = Vec::new();\n    let mut populate_definitions = Vec::new();\n    let mut macro_symbols = Vec::new();\n    for entry_point in &mut item_trait.items {\n        match entry_point {\n            syn::TraitItem::Const(_) => todo!(\"Const\"),\n            syn::TraitItem::Fn(func) => {\n                // let vis  =func.vis;\n                let method_attribute = MethodAttribute::from_attributes(&func.attrs).unwrap();\n                func.attrs.clear();\n\n                if method_attribute.private {\n                    continue;\n                }\n\n                let func_name = func.sig.ident.clone();\n                let func_name_str = func_name.to_string();\n\n                if func_name.to_string().starts_with(\"__casper_\") {\n                    return TokenStream::from(\n                        syn::Error::new(\n                            Span::call_site(),\n                            \"Function names starting with '__casper_' are reserved\",\n                        )\n                        .to_compile_error(),\n                    );\n                }\n\n                let export_name = if method_attribute.fallback {\n                    CASPER_RESERVED_FALLBACK_EXPORT.to_string()\n                } else {\n                    format!(\"{}_{}\", trait_name, func_name_str)\n                };\n\n                let export_ident = format_ident!(\"{}\", &func_name_str);\n\n                let result = match &func.sig.output {\n                    syn::ReturnType::Default => {\n                        populate_definitions.push(quote! {\n                            definitions.populate_one::<()>();\n                        });\n\n                        quote! { <() as #crate_path::abi::CasperABI>::declaration() }\n                    }\n                    syn::ReturnType::Type(_, ty) => {\n                        if let Type::Never(_) = ty.as_ref() {\n                            populate_definitions.push(quote! {\n                                definitions.populate_one::<()>();\n                            });\n\n                            quote! { <() as #crate_path::abi::CasperABI>::declaration() }\n                        } else {\n                            populate_definitions.push(quote! {\n                                definitions.populate_one::<#ty>();\n                            });\n\n                            quote! { <#ty as #crate_path::abi::CasperABI>::declaration() }\n                        }\n                    }\n                };\n\n                let call_data_return_lifetime = generate_call_data_return(&func.sig.output);\n\n                let dispatch_func_name = format_ident!(\"{trait_name}_{func_name}_dispatch\");\n\n                let arg_names_and_types = func\n                    .sig\n                    .inputs\n                    .iter()\n                    .filter_map(|arg| match arg {\n                        syn::FnArg::Receiver(_) => None,\n                        syn::FnArg::Typed(typed) => match typed.pat.as_ref() {\n                            syn::Pat::Ident(ident) => Some((&ident.ident, &typed.ty)),\n                            _ => todo!(),\n                        },\n                    })\n                    .collect::<Vec<_>>();\n\n                let arg_names: Vec<_> =\n                    arg_names_and_types.iter().map(|(name, _ty)| name).collect();\n                let arg_types: Vec<_> = arg_names_and_types.iter().map(|(_name, ty)| ty).collect();\n                // let mut arg_pairs: Vec\n                let args_attrs: Vec<_> = arg_names_and_types\n                    .iter()\n                    .map(|(name, ty)| {\n                        quote! {\n                            #name: #ty\n                        }\n                    })\n                    .collect();\n\n                let mut args = Vec::new();\n                for (name, ty) in &arg_names_and_types {\n                    populate_definitions.push(quote! {\n                        definitions.populate_one::<()>();\n                    });\n                    args.push(quote! {\n                        casper_contract_sdk::schema::SchemaArgument {\n                            name: stringify!(#name).into(),\n                            decl: <#ty as #crate_path::abi::CasperABI>::declaration(),\n                        }\n                    });\n                }\n\n                let flags = EntryPointFlags::empty();\n\n                let _flags = flags.bits();\n\n                let handle_dispatch = match func.sig.inputs.first() {\n                    Some(syn::FnArg::Receiver(_receiver)) => {\n                        assert!(\n                            !method_attribute.private,\n                            \"can't make dispatcher for private method\"\n                        );\n                        quote! {\n                            #vis extern \"C\" fn #dispatch_func_name<T>()\n                            where\n                                T: #trait_name\n                                    + #crate_path::serializers::borsh::BorshDeserialize\n                                    + #crate_path::serializers::borsh::BorshSerialize\n                                    + Default\n                            {\n                                #[derive(#crate_path::serializers::borsh::BorshDeserialize)]\n                                #[borsh(crate = #borsh_path)]\n                                struct Arguments {\n                                    #(#args_attrs,)*\n                                }\n\n                                let mut flags = #crate_path::casper_executor_wasm_common::flags::ReturnFlags::empty();\n                                let mut instance: T = #crate_path::casper::read_state().unwrap();\n                                let input = #crate_path::prelude::casper::copy_input();\n                                let args: Arguments = #crate_path::serializers::borsh::from_slice(&input).unwrap();\n\n                                let ret = instance.#func_name(#(args.#arg_names,)*);\n\n                                #crate_path::casper::write_state(&instance).unwrap();\n\n                                let ret_bytes = #crate_path::serializers::borsh::to_vec(&ret).unwrap();\n                                #crate_path::casper::ret(flags, Some(&ret_bytes));\n                            }\n                        }\n                    }\n\n                    None | Some(syn::FnArg::Typed(_)) => {\n                        assert!(\n                            !method_attribute.private,\n                            \"can't make dispatcher for private static method\"\n                        );\n                        quote! {\n                            #vis extern \"C\"  fn #dispatch_func_name<T: #trait_name>() {\n                                #[derive(#crate_path::serializers::borsh::BorshDeserialize)]\n                                #[borsh(crate = #borsh_path)]\n                                struct Arguments {\n                                    #(#args_attrs,)*\n                                }\n\n\n                                let input = #crate_path::prelude::casper::copy_input();\n                                let args: Arguments = #crate_path::serializers::borsh::from_slice(&input).unwrap();\n\n\n                                let _ret = <T as #trait_name>::#func_name(#(args.#arg_names,)*);\n                            }\n                        }\n                    }\n                };\n\n                let schema_helper_ident = format_ident!(\"__casper_schema_entry_point_{func_name}\");\n                extra_code.push(quote! {\n                    fn #schema_helper_ident () -> casper_contract_sdk::schema::SchemaEntryPoint {\n                        casper_contract_sdk::schema::SchemaEntryPoint {\n                            name: stringify!(#export_name).into(),\n                            arguments: vec![ #(#args,)* ],\n                            result: #result,\n                            flags: casper_contract_sdk::casper_executor_wasm_common::flags::EntryPointFlags::from_bits(#_flags).unwrap(),\n                        }\n                    }\n                });\n\n                macro_symbols.push(quote! {\n                    #vis #func_name as #export_ident => #dispatch_func_name , #schema_helper_ident\n                });\n\n                dispatch_functions.push(quote! { #handle_dispatch });\n\n                let input_data_content = if arg_names.is_empty() {\n                    quote! {\n                        None\n                    }\n                } else {\n                    quote! {\n                        Some(#crate_path::serializers::borsh::to_vec(&self).expect(\"Serialization to succeed\"))\n                    }\n                };\n                let self_ty = if method_attribute.constructor || method_attribute.ignore_state {\n                    None\n                } else {\n                    Some(quote! {\n                        self,\n                    })\n                };\n\n                let is_fallback = method_attribute.fallback;\n\n                if !is_fallback {\n                    let entry_point_lit = LitStr::new(&export_name, Span::call_site());\n                    extra_code.push(quote! {\n                        fn #func_name<'a>(#self_ty #(#arg_names: #arg_types,)*) -> impl #crate_path::ToCallData<Return<'a> = #call_data_return_lifetime> {\n                            #[derive(#crate_path::serializers::borsh::BorshSerialize)]\n                            #[borsh(crate = #borsh_path)]\n                            struct CallData {\n                                #(pub #arg_names: #arg_types,)*\n                            }\n\n                            impl #crate_path::ToCallData for CallData {\n                                // const SELECTOR: vm_common::selector::Selector = vm_common::selector::Selector::new(#selector_value);\n\n                                type Return<'a> = #call_data_return_lifetime;\n\n                                fn entry_point(&self) -> &str { #entry_point_lit }\n                                fn input_data(&self) -> Option<Vec<u8>> {\n                                    #input_data_content\n                                }\n                            }\n\n                            CallData {\n                                #(#arg_names,)*\n                            }\n                        }\n                    });\n                }\n            }\n            syn::TraitItem::Type(_) => {\n                return syn::Error::new(Span::call_site(), \"Unsupported generic associated types\")\n                    .to_compile_error()\n                    .into();\n            }\n            syn::TraitItem::Macro(_) => todo!(\"Macro\"),\n            syn::TraitItem::Verbatim(_) => todo!(\"Verbatim\"),\n            other => todo!(\"Other {other:?}\"),\n        }\n    }\n    let ref_struct = format_ident!(\"{trait_name}Ref\");\n    let ref_struct_trait = format_ident!(\"{trait_name}Ext\");\n\n    let macro_name = format_ident!(\"enumerate_{trait_name}_symbols\");\n\n    let maybe_exported_macro = if !trait_meta.export.unwrap_or(false) {\n        quote! {\n            #[allow(non_snake_case, unused_macros)]\n            macro_rules! #macro_name {\n                ($mac:ident) => {\n                    $mac! {\n                        #(#macro_symbols,)*\n                    }\n                }\n            }\n            pub(crate) use #macro_name;\n        }\n    } else {\n        quote! {\n            #[allow(non_snake_case, unused_macros)]\n            #[macro_export]\n            macro_rules! #macro_name {\n                ($mac:ident) => {\n                    $mac! {\n                        #(#macro_symbols,)*\n                    }\n                }\n            }\n        }\n    };\n\n    let extension_struct = quote! {\n        #vis trait #ref_struct_trait: Sized {\n            #(#extra_code)*\n        }\n\n        #vis struct #ref_struct;\n\n        impl #ref_struct {\n\n        }\n\n        #maybe_exported_macro\n\n        #(#dispatch_functions)*\n\n        // TODO: Rename Ext with Ref, since Ref struct can be pub(crate)'d\n        impl #ref_struct_trait for #ref_struct {}\n            impl #crate_path::ContractRef for #ref_struct {\n                fn new() -> Self {\n                    #ref_struct\n                }\n            }\n    };\n    quote! {\n        #item_trait\n\n        #extension_struct\n    }\n    .into()\n}\n\nfn generate_casper_state_for_struct(\n    item_struct: &ItemStruct,\n    struct_meta: StructMeta,\n) -> impl quote::ToTokens {\n    let crate_path = match &struct_meta.path {\n        Some(path) => quote! { #path },\n        None => quote! { casper_contract_sdk },\n    };\n\n    let borsh_path = {\n        let crate_path_str = match &struct_meta.path {\n            Some(path) => path.to_token_stream().to_string(),\n            None => \"casper_contract_sdk\".to_string(),\n        };\n        syn::LitStr::new(\n            &format!(\"{}::serializers::borsh\", crate_path_str),\n            Span::call_site(),\n        )\n    };\n    let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone());\n\n    quote! {\n        #[derive(#crate_path::serializers::borsh::BorshSerialize, #crate_path::serializers::borsh::BorshDeserialize)]\n        #[borsh(crate = #borsh_path)]\n        #maybe_derive_abi\n        #item_struct\n    }\n}\n\nfn generate_casper_state_for_enum(\n    item_enum: &ItemEnum,\n    enum_meta: EnumMeta,\n) -> impl quote::ToTokens {\n    let crate_path = match &enum_meta.path {\n        Some(path) => quote! { #path },\n        None => quote! { casper_contract_sdk },\n    };\n\n    let borsh_path = {\n        let crate_path_str = match &enum_meta.path {\n            Some(path) => path.to_token_stream().to_string(),\n            None => \"casper_contract_sdk\".to_string(),\n        };\n        syn::LitStr::new(\n            &format!(\"{}::serializers::borsh\", crate_path_str),\n            Span::call_site(),\n        )\n    };\n\n    let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone());\n\n    quote! {\n        #[derive(#crate_path::serializers::borsh::BorshSerialize, #crate_path::serializers::borsh::BorshDeserialize)]\n        #[borsh(use_discriminant = true, crate = #borsh_path)]\n        #[repr(u32)]\n        #maybe_derive_abi\n        #item_enum\n    }\n}\n\nfn get_maybe_derive_abi(_crate_path: impl ToTokens) -> impl ToTokens {\n    #[cfg(feature = \"__abi_generator\")]\n    {\n        quote! {\n            #[derive(#_crate_path::macros::CasperABI)]\n        }\n    }\n\n    #[cfg(not(feature = \"__abi_generator\"))]\n    {\n        quote! {}\n    }\n}\n\nfn process_casper_contract_state_for_struct(\n    contract_struct: &ItemStruct,\n    struct_meta: StructMeta,\n) -> TokenStream {\n    let struct_name = &contract_struct.ident;\n    let ref_name = format_ident!(\"{struct_name}Ref\");\n    let vis = &contract_struct.vis;\n\n    let crate_path = match &struct_meta.path {\n        Some(path) => quote! { #path },\n        None => quote! { casper_contract_sdk },\n    };\n    let borsh_path = {\n        let crate_path_str = match &struct_meta.path {\n            Some(path) => path.to_token_stream().to_string(),\n            None => \"casper_contract_sdk\".to_string(),\n        };\n        syn::LitStr::new(\n            &format!(\"{}::serializers::borsh\", crate_path_str),\n            Span::call_site(),\n        )\n    };\n\n    let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone());\n\n    // Optionally, generate a schema export if the appropriate flag\n    // is set.\n    let maybe_casper_schema = {\n        #[cfg(feature = \"__embed_schema\")]\n        quote! {\n            const SCHEMA: Option<&str> = option_env!(\"__CARGO_CASPER_INJECT_SCHEMA_MARKER\");\n\n            #[no_mangle]\n            pub extern \"C\" fn __casper_schema() {\n                use #crate_path::casper::ret;\n                use #crate_path::casper_executor_wasm_common::flags::ReturnFlags;\n                let bytes = SCHEMA.unwrap_or_default().as_bytes();\n                ret(ReturnFlags::empty(), Some(bytes));\n            }\n        }\n        #[cfg(not(feature = \"__embed_schema\"))]\n        quote! {}\n    };\n\n    quote! {\n        #[derive(#crate_path::serializers::borsh::BorshSerialize, #crate_path::serializers::borsh::BorshDeserialize)]\n        #[borsh(crate = #borsh_path)]\n        #maybe_derive_abi\n        #contract_struct\n\n        #vis struct #ref_name;\n\n        impl #crate_path::ContractRef for #ref_name {\n            fn new() -> Self {\n                #ref_name\n            }\n        }\n\n        #maybe_casper_schema\n    }\n    .into()\n}\n\n#[proc_macro_attribute]\npub fn entry_point(_attr: TokenStream, item: TokenStream) -> TokenStream {\n    let func = parse_macro_input!(item as ItemFn);\n\n    let vis = &func.vis;\n    let _sig = &func.sig;\n    let func_name = &func.sig.ident;\n\n    let block = &func.block;\n\n    let mut handle_args = Vec::new();\n    let mut params = Vec::new();\n\n    for arg in &func.sig.inputs {\n        let typed = match arg {\n            syn::FnArg::Receiver(_) => todo!(),\n            syn::FnArg::Typed(typed) => typed,\n        };\n\n        let name = match typed.pat.as_ref() {\n            syn::Pat::Ident(ident) => &ident.ident,\n            _ => todo!(),\n        };\n\n        let ty = &typed.ty;\n\n        let tok = quote! {\n            let #typed = casper_contract_sdk::get_named_arg(stringify!(#name)).expect(\"should get named arg\");\n        };\n        handle_args.push(tok);\n\n        let tok2 = quote! {\n            (stringify!(#name), <#ty>::cl_type())\n        };\n        params.push(tok2);\n    }\n\n    // let len = params.len();\n\n    let output = &func.sig.output;\n\n    // let const_tok =\n\n    let gen = quote! {\n        // const paste!(#func_name, _ENTRY_POINT): &str = #func_name;\n\n        #vis fn #func_name() {\n            #(#handle_args)*;\n\n            let closure = || #output {\n                #block\n            };\n\n            let result = closure();\n\n            // casper_contract_sdk::EntryPoint {\n            //     name: #func_name,\n            //     params: &[\n            //         #(#params,)*\n            //     ],\n            //     func: closure,\n            // }\n\n            result.expect(\"should work\")\n        }\n    };\n\n    println!(\"{gen}\");\n\n    // quote!(fn foo() {})\n    // item\n    gen.into()\n}\n\n// #[proc_macro_derive(CasperSchema, attributes(casper))]\n// pub fn derive_casper_schema(input: TokenStream) -> TokenStream {\n//     let contract = parse_macro_input!(input as DeriveInput);\n\n//     let contract_attributes = ContractAttributes::from_attributes(&contract.attrs).unwrap();\n\n//     let _data_struct = match &contract.data {\n//         Data::Struct(s) => s,\n//         Data::Enum(_) => todo!(\"Enum\"),\n//         Data::Union(_) => todo!(\"Union\"),\n//     };\n\n//     let name = &contract.ident;\n\n//     // let mut extra_code = Vec::new();\n//     // if let Some(traits) = contract_attributes.impl_traits {\n//     //     for path in traits.iter() {\n//     //         let ext_struct = format_ident!(\"{}Ref\", path.require_ident().unwrap());\n//     //         extra_code.push(quote! {\n//     //             {\n//     //                 let entry_points = <#ext_struct>::__casper_schema_entry_points();\n//     //                 schema.entry_points.extend(entry_points);\n//     //                 <#ext_struct>::__casper_populate_definitions(&mut schema.definitions);\n//     //             }\n//     //         });\n//     //     }\n\n//     //     let macro_name = format_ident!(\"enumerate_{path}_symbols\");\n\n//     //     extra_code.push(quote! {\n//     //         const _: () = {\n//     //             macro_rules! #macro_name {\n//     //                 ($mac:ident) => {\n//     //                     $mac! {\n//     //                         #(#extra_code)*\n//     //                     }\n//     //                 }\n//     //             }\n//     //         }\n//     //     })\n//     // }\n\n//     quote! {\n//         impl casper_contract_sdk::schema::CasperSchema for #name {\n//             fn schema() -> casper_contract_sdk::schema::Schema {\n//                 let mut schema = Self::__casper_schema();\n\n//                 // #(#extra_code)*;\n\n//                 schema\n//                 // schema.entry_points.ext\n//             }\n//         }\n//     }\n//     .into()\n// }\n\n#[proc_macro_derive(CasperABI, attributes(casper))]\npub fn derive_casper_abi(input: TokenStream) -> TokenStream {\n    let res = if let Ok(input) = syn::parse::<ItemStruct>(input.clone()) {\n        let mut populate_definitions = Vec::new();\n        let name = input.ident.clone();\n        let mut items = Vec::new();\n        for field in &input.fields {\n            match &field.ty {\n                Type::Path(path) => {\n                    for segment in &path.path.segments {\n                        let field_name = &field.ident;\n\n                        populate_definitions.push(quote! {\n                            definitions.populate_one::<#segment>();\n                        });\n\n                        items.push(quote! {\n                            casper_contract_sdk::abi::StructField {\n                                name: stringify!(#field_name).into(),\n                                decl: <#segment>::declaration(),\n                            }\n                        });\n                    }\n                }\n                other_ty => todo!(\"Unsupported type {other_ty:?}\"),\n            }\n        }\n\n        Ok(quote! {\n            impl casper_contract_sdk::abi::CasperABI for #name {\n                fn populate_definitions(definitions: &mut casper_contract_sdk::abi::Definitions) {\n                    #(#populate_definitions)*;\n                }\n\n                fn declaration() -> casper_contract_sdk::abi::Declaration {\n                    const DECL: &str = concat!(module_path!(), \"::\", stringify!(#name));\n                    DECL.into()\n                }\n\n                fn definition() -> casper_contract_sdk::abi::Definition {\n                    casper_contract_sdk::abi::Definition::Struct {\n                        items: vec![\n                            #(#items,)*\n                        ]\n                    }\n                }\n            }\n        })\n    } else if let Ok(input) = syn::parse::<ItemEnum>(input.clone()) {\n        // TODO: Check visibility\n        let name = input.ident.clone();\n\n        let mut all_definitions = Vec::new();\n        let mut all_variants = Vec::new();\n        let mut populate_definitions = Vec::new();\n        let mut has_unit_definition = false;\n\n        // populate_definitions.push(quote! {\n        //     definitions.populate_one::<#name>();\n        // });\n\n        all_definitions.push(quote! {\n            casper_contract_sdk::abi::Definition::Enum {\n                name: stringify!(#name).into(),\n            }\n        });\n\n        let mut current_discriminant = 0;\n\n        for variant in &input.variants {\n            if let Some(discriminant) = &variant.discriminant {\n                match &discriminant.1 {\n                    syn::Expr::Lit(lit) => match &lit.lit {\n                        syn::Lit::Int(int) => {\n                            current_discriminant = int.base10_parse::<u64>().unwrap();\n                        }\n                        _ => todo!(),\n                    },\n                    _ => todo!(),\n                }\n            }\n\n            let variant_name = &variant.ident;\n\n            let variant_decl = match &variant.fields {\n                Fields::Unit => {\n                    // NOTE: Generate an empty struct here for a definition.\n                    if !has_unit_definition {\n                        populate_definitions.push(quote! {\n                            definitions.populate_one::<()>();\n                        });\n                        has_unit_definition = true;\n                    }\n\n                    quote! {\n                        <()>::declaration()\n                    }\n                }\n                Fields::Named(named) => {\n                    let mut fields = Vec::new();\n\n                    let variant_name = format_ident!(\"{name}_{variant_name}\");\n\n                    for field in &named.named {\n                        let field_name = &field.ident;\n                        match &field.ty {\n                            Type::Path(path) => {\n                                populate_definitions.push(quote! {\n                                    definitions.populate_one::<#path>();\n                                });\n\n                                fields.push(quote! {\n                                    casper_contract_sdk::abi::StructField {\n                                        name: stringify!(#field_name).into(),\n                                        decl: <#path as casper_contract_sdk::abi::CasperABI>::declaration()\n                                    }\n                                });\n                            }\n                            other_ty => todo!(\"Unsupported type {other_ty:?}\"),\n                        }\n                    }\n\n                    populate_definitions.push(quote! {\n                        definitions.populate_custom(\n                            stringify!(#variant_name).into(),\n                            casper_contract_sdk::abi::Definition::Struct {\n                                items: vec![\n                                    #(#fields,)*\n                                ],\n                            });\n                    });\n\n                    quote! {\n                        stringify!(#variant_name).into()\n                    }\n                }\n                Fields::Unnamed(unnamed_fields) => {\n                    let mut fields = Vec::new();\n\n                    let variant_name = format_ident!(\"{name}_{variant_name}\");\n\n                    for field in &unnamed_fields.unnamed {\n                        match &field.ty {\n                            Type::Path(path) => {\n                                for segment in &path.path.segments {\n                                    let type_name = &segment.ident;\n                                    populate_definitions.push(quote! {\n                                        definitions.populate_one::<#type_name>();\n                                    });\n\n                                    fields.push(quote! {\n                                        <#type_name as casper_contract_sdk::abi::CasperABI>::declaration()\n                                    });\n                                }\n                            }\n                            other_ty => todo!(\"Unsupported type {other_ty:?}\"),\n                        }\n                    }\n\n                    populate_definitions.push(quote! {\n                        definitions.populate_custom(\n                            stringify!(#variant_name).into(),\n                            casper_contract_sdk::abi::Definition::Tuple {\n                                items: vec![\n                                    #(#fields,)*\n                                ],\n                            });\n                    });\n\n                    quote! {\n                        stringify!(#variant_name).into()\n                    }\n                }\n            };\n\n            all_variants.push(quote! {\n                casper_contract_sdk::abi::EnumVariant {\n                    name: stringify!(#variant_name).into(),\n                    discriminant: #current_discriminant,\n                    decl: #variant_decl,\n                }\n            });\n\n            current_discriminant += 1;\n        }\n\n        Ok(quote! {\n            impl casper_contract_sdk::abi::CasperABI for #name {\n                fn populate_definitions(definitions: &mut casper_contract_sdk::abi::Definitions) {\n                    #(#populate_definitions)*;\n                }\n\n                fn declaration() -> casper_contract_sdk::abi::Declaration {\n                    const DECL: &str = concat!(module_path!(), \"::\", stringify!(#name));\n                    DECL.into()\n                }\n\n                fn definition() -> casper_contract_sdk::abi::Definition {\n                    casper_contract_sdk::abi::Definition::Enum {\n                        items: vec![\n                            #(#all_variants,)*\n                        ],\n                    }\n                }\n            }\n        })\n    } else if syn::parse::<ItemUnion>(input).is_ok() {\n        Err(syn::Error::new(\n            Span::call_site(),\n            \"Borsh schema does not support unions yet.\",\n        ))\n    } else {\n        // Derive macros can only be defined on structs, enums, and unions.\n        unreachable!()\n    };\n    TokenStream::from(match res {\n        Ok(res) => res,\n        Err(err) => err.to_compile_error(),\n    })\n}\n\n#[proc_macro]\npub fn blake2b256(input: TokenStream) -> TokenStream {\n    let input = parse_macro_input!(input as LitStr);\n    let bytes = input.value();\n\n    let hash = utils::compute_blake2b256(bytes.as_bytes());\n\n    TokenStream::from(quote! {\n        [ #(#hash),* ]\n    })\n}\n\n#[proc_macro]\npub fn test(item: TokenStream) -> TokenStream {\n    let input = parse_macro_input!(item as ItemFn);\n    TokenStream::from(quote! {\n        #[test]\n        #input\n    })\n}\n\n/// `PanicOnDefault` generates implementation for `Default` trait that panics with the following\n/// message `The contract is not initialized` when `default()` is called.\n///\n/// This is to protect againsts default-initialization of contracts in a situation where no\n/// constructor is called, and an entrypoint is invoked before the contract is initialized.\n#[proc_macro_derive(PanicOnDefault)]\npub fn derive_no_default(item: TokenStream) -> TokenStream {\n    if let Ok(input) = syn::parse::<ItemStruct>(item) {\n        let name = &input.ident;\n        TokenStream::from(quote! {\n            impl ::core::default::Default for #name {\n                fn default() -> Self {\n                    panic!(\"The contract is not initialized\");\n                }\n            }\n        })\n    } else {\n        TokenStream::from(\n            syn::Error::new(\n                Span::call_site(),\n                \"PanicOnDefault can only be used on type declarations sections.\",\n            )\n            .to_compile_error(),\n        )\n    }\n}\n"
  },
  {
    "path": "smart_contracts/macros/src/utils.rs",
    "content": "pub(crate) fn compute_blake2b256(bytes: &[u8]) -> [u8; 32] {\n    let mut context = blake2_rfc::blake2b::Blake2b::new(32);\n    context.update(bytes);\n    context.finalize().as_bytes().try_into().unwrap()\n}\n"
  },
  {
    "path": "smart_contracts/rust-toolchain",
    "content": "nightly-2025-02-16"
  },
  {
    "path": "smart_contracts/sdk/Cargo.toml",
    "content": "[package]\nname = \"casper-contract-sdk\"\nversion = \"0.1.3\"\nedition = \"2021\"\ndescription = \"Casper contract sdk package\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndocumentation = \"https://docs.rs/casper-contract-sdk\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/smart_contracts/sdk\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbase16 = \"0.2.1\"\nbitflags = \"2.9\"\nbnum = { version = \"0.13.0\", features = [\"borsh\", \"num-integer\", \"num-traits\"] }\nborsh = { version = \"1.5\", features = [\"derive\"] }\nbytes = \"1.10\"\ncasper-executor-wasm-common = { version = \"0.1.3\", path = \"../../executor/wasm_common\" }\ncasper-contract-macros = { version = \"0.1.3\", path = \"../macros\" }\ncasper-contract-sdk-sys = { version = \"0.1.3\", path = \"../sdk_sys\" }\ncfg-if = \"1.0.0\"\nclap = { version = \"4\", optional = true, features = [\"derive\"] }\nconst-fnv1a-hash = \"1.1.0\"\nimpl-trait-for-tuples = \"0.2.2\"\nserde = { version = \"1\", features = [\"derive\"] }\nserde_json = \"1\"\nthiserror = { version = \"2\", optional = true }\n\n[target.'cfg(not(target_arch = \"wasm32\"))'.dependencies]\nrand = \"0.8.5\"\nonce_cell = \"1.19.0\"\nlinkme = \"=0.3.29\"\n\n[features]\ndefault = [\"std\"]\nstd = []\n\ncli = [\"clap\", \"thiserror\"]\n__abi_generator = [\"casper-contract-macros/__abi_generator\"]\n__embed_schema = [\"casper-contract-macros/__embed_schema\"]\n"
  },
  {
    "path": "smart_contracts/sdk/src/abi.rs",
    "content": "use core::mem;\n\nuse crate::prelude::{\n    collections,\n    collections::{BTreeMap, BTreeSet, HashMap, LinkedList},\n    str::FromStr,\n};\nuse impl_trait_for_tuples::impl_for_tuples;\nuse serde::{Deserialize, Serialize};\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)]\npub struct EnumVariant {\n    pub name: String,\n    pub discriminant: u64,\n    pub decl: Declaration,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)]\npub struct StructField {\n    pub name: String,\n    pub decl: Declaration,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)]\npub enum Primitive {\n    Char,\n    U8,\n    I8,\n    U16,\n    I16,\n    U32,\n    I32,\n    U64,\n    I64,\n    U128,\n    I128,\n    F32,\n    F64,\n    Bool,\n}\n\nimpl FromStr for Primitive {\n    type Err = &'static str;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        use Primitive::*;\n        match s {\n            \"Char\" => Ok(Char),\n            \"U8\" => Ok(U8),\n            \"I8\" => Ok(I8),\n            \"U16\" => Ok(U16),\n            \"I16\" => Ok(I16),\n            \"U32\" => Ok(U32),\n            \"I32\" => Ok(I32),\n            \"U64\" => Ok(U64),\n            \"I64\" => Ok(I64),\n            \"U128\" => Ok(U128),\n            \"I128\" => Ok(I128),\n            \"F32\" => Ok(F32),\n            \"F64\" => Ok(F64),\n            \"Bool\" => Ok(Bool),\n            _ => Err(\"Unknown primitive type\"),\n        }\n    }\n}\n\npub trait Keyable {\n    const PRIMITIVE: Primitive;\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]\n#[serde(tag = \"type\")]\npub enum Definition {\n    /// Primitive type.\n    ///\n    /// Examples: u64, i32, f32, bool, etc\n    Primitive(Primitive),\n    /// A mapping.\n    ///\n    /// Example Rust types: BTreeMap<K, V>.\n    Mapping {\n        key: Declaration,\n        value: Declaration,\n    },\n    /// Arbitrary sequence of values.\n    ///\n    /// Example Rust types: `Vec<T>`, `&[T]`, `[T; N]`, `Box<[T]>`\n    Sequence {\n        /// If length is known, then it specifies that this definition should be be represented as\n        /// an array of a fixed size.\n        decl: Declaration,\n    },\n    FixedSequence {\n        /// If length is known, then it specifies that this definition should be be represented as\n        /// an array of a fixed size.\n        length: u32, // None -> Vec<T> Some(N) [T; N]\n        decl: Declaration,\n    },\n    /// A tuple of multiple values of various types.\n    ///\n    /// Can be also used to represent a heterogeneous list.\n    Tuple {\n        items: Vec<Declaration>,\n    },\n    Enum {\n        items: Vec<EnumVariant>,\n    },\n    Struct {\n        items: Vec<StructField>,\n    },\n}\n\nimpl Definition {\n    pub fn unit() -> Self {\n        // Empty struct should be equivalent to `()` in Rust in other languages.\n        Definition::Tuple { items: Vec::new() }\n    }\n\n    pub fn as_struct(&self) -> Option<&[StructField]> {\n        if let Self::Struct { items } = self {\n            Some(items.as_slice())\n        } else {\n            None\n        }\n    }\n\n    pub fn as_enum(&self) -> Option<&[EnumVariant]> {\n        if let Self::Enum { items } = self {\n            Some(items.as_slice())\n        } else {\n            None\n        }\n    }\n\n    pub fn as_tuple(&self) -> Option<&[Declaration]> {\n        if let Self::Tuple { items } = self {\n            Some(items.as_slice())\n        } else {\n            None\n        }\n    }\n}\n\n#[derive(Default, Debug, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub struct Definitions(BTreeMap<Declaration, Definition>);\n\nimpl Definitions {\n    pub fn populate_one<T: CasperABI>(&mut self) {\n        T::populate_definitions(self);\n\n        let decl = T::declaration();\n        let def = T::definition();\n\n        self.populate_custom(decl, def);\n    }\n\n    pub fn populate_custom(&mut self, decl: Declaration, def: Definition) {\n        let previous = self.0.insert(decl.clone(), def.clone());\n        if previous.is_some() && previous != Some(def.clone()) {\n            panic!(\"Type {decl} has multiple definitions ({previous:?} != {def:?}).\");\n        }\n    }\n\n    pub fn iter(&self) -> impl Iterator<Item = (&Declaration, &Definition)> {\n        self.0.iter()\n    }\n\n    pub fn get(&self, decl: &str) -> Option<&Definition> {\n        self.0.get(decl)\n    }\n\n    pub fn first(&self) -> Option<(&Declaration, &Definition)> {\n        self.0.iter().next()\n    }\n\n    /// Returns true if the given declaration has a definition in this set.\n    pub fn has_definition(&self, decl: &Declaration) -> bool {\n        self.0.contains_key(decl)\n    }\n}\n\nimpl IntoIterator for Definitions {\n    type Item = (Declaration, Definition);\n    type IntoIter = collections::btree_map::IntoIter<Declaration, Definition>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.into_iter()\n    }\n}\n\npub type Declaration = String;\n\npub trait CasperABI {\n    fn populate_definitions(definitions: &mut Definitions);\n    fn declaration() -> Declaration; // \"String\"\n    fn definition() -> Definition; // Sequence { Char }\n}\n\nimpl<T> CasperABI for &T\nwhere\n    T: CasperABI,\n{\n    fn populate_definitions(definitions: &mut Definitions) {\n        T::populate_definitions(definitions);\n    }\n\n    fn declaration() -> Declaration {\n        T::declaration()\n    }\n\n    fn definition() -> Definition {\n        T::definition()\n    }\n}\n\nimpl<T> CasperABI for Box<T>\nwhere\n    T: CasperABI,\n{\n    fn populate_definitions(definitions: &mut Definitions) {\n        T::populate_definitions(definitions);\n    }\n\n    fn declaration() -> Declaration {\n        T::declaration()\n    }\n\n    fn definition() -> Definition {\n        T::definition()\n    }\n}\n\nmacro_rules! impl_abi_for_types {\n    // Accepts following syntax: impl_abi_for_types(u8, u16, u32, u64, String => \"string\", f32, f64)\n    ($($ty:ty $(=> $name:expr)?,)* ) => {\n        $(\n            impl_abi_for_types!(@impl $ty $(=> $name)?);\n        )*\n    };\n\n    (@impl $ty:ty ) => {\n       impl_abi_for_types!(@impl $ty => stringify!($ty));\n    };\n\n    (@impl $ty:ty => $def:expr ) => {\n        impl CasperABI for $ty {\n            fn populate_definitions(_definitions: &mut Definitions) {\n            }\n\n            fn declaration() -> Declaration {\n                stringify!($def).into()\n            }\n\n            fn definition() -> Definition {\n                use Primitive::*;\n                const PRIMITIVE: Primitive = $def;\n                Definition::Primitive(PRIMITIVE)\n            }\n        }\n\n        impl Keyable for $ty {\n            const PRIMITIVE: Primitive = {\n                use Primitive::*;\n                $def\n            };\n        }\n    };\n}\n\nimpl CasperABI for () {\n    fn populate_definitions(_definitions: &mut Definitions) {}\n\n    fn declaration() -> Declaration {\n        \"()\".into()\n    }\n\n    fn definition() -> Definition {\n        Definition::unit()\n    }\n}\n\nimpl_abi_for_types!(\n    char => Char,\n    bool => Bool,\n    u8 => U8,\n    u16 => U16,\n    u32 => U32,\n    u64 => U64,\n    u128 => U128,\n    i8 => I8,\n    i16 => I16,\n    i32 => I32,\n    i64 => I64,\n    f32 => F32,\n    f64 => F64,\n    i128 => I128,\n);\n\n#[impl_for_tuples(1, 12)]\nimpl CasperABI for Tuple {\n    fn populate_definitions(_definitions: &mut Definitions) {\n        for_tuples!( #( _definitions.populate_one::<Tuple>(); )* )\n    }\n\n    fn declaration() -> Declaration {\n        let items = <[_]>::into_vec(Box::new([for_tuples!( #( Tuple::declaration() ),* )]));\n        format!(\"({})\", items.join(\", \"))\n    }\n\n    fn definition() -> Definition {\n        let items = <[_]>::into_vec(Box::new([for_tuples!( #( Tuple::declaration() ),* )]));\n        Definition::Tuple { items }\n    }\n}\n\nimpl<T: CasperABI, E: CasperABI> CasperABI for Result<T, E> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<T>();\n        definitions.populate_one::<E>();\n    }\n\n    fn declaration() -> Declaration {\n        let t_decl = T::declaration();\n        let e_decl = E::declaration();\n        format!(\"Result<{t_decl}, {e_decl}>\")\n    }\n\n    fn definition() -> Definition {\n        Definition::Enum {\n            items: vec![\n                EnumVariant {\n                    name: \"Ok\".into(),\n                    discriminant: 0,\n                    decl: T::declaration(),\n                },\n                EnumVariant {\n                    name: \"Err\".into(),\n                    discriminant: 1,\n                    decl: E::declaration(),\n                },\n            ],\n        }\n    }\n}\n\nimpl<T: CasperABI> CasperABI for Option<T> {\n    fn declaration() -> Declaration {\n        format!(\"Option<{}>\", T::declaration())\n    }\n    fn definition() -> Definition {\n        Definition::Enum {\n            items: vec![\n                EnumVariant {\n                    name: \"None\".into(),\n                    discriminant: 0,\n                    decl: <()>::declaration(),\n                },\n                EnumVariant {\n                    name: \"Some\".into(),\n                    discriminant: 1,\n                    decl: T::declaration(),\n                },\n            ],\n        }\n    }\n\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<()>();\n        definitions.populate_one::<T>();\n    }\n}\n\nimpl<T: CasperABI> CasperABI for Vec<T> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<T>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"Vec<{}>\", T::declaration())\n    }\n    fn definition() -> Definition {\n        Definition::Sequence {\n            decl: T::declaration(),\n        }\n    }\n}\n\nimpl<T: CasperABI, const N: usize> CasperABI for [T; N] {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<T>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"[{}; {N}]\", T::declaration())\n    }\n    fn definition() -> Definition {\n        Definition::FixedSequence {\n            length: N.try_into().expect(\"N is too big\"),\n            decl: T::declaration(),\n        }\n    }\n}\n\nimpl<K: CasperABI, V: CasperABI> CasperABI for BTreeMap<K, V> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<K>();\n        definitions.populate_one::<V>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"BTreeMap<{}, {}>\", K::declaration(), V::declaration())\n    }\n\n    fn definition() -> Definition {\n        Definition::Mapping {\n            key: K::declaration(),\n            value: V::declaration(),\n        }\n    }\n}\n\nimpl<K: CasperABI, V: CasperABI> CasperABI for HashMap<K, V> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<K>();\n        definitions.populate_one::<V>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"HashMap<{}, {}>\", K::declaration(), V::declaration())\n    }\n\n    fn definition() -> Definition {\n        Definition::Mapping {\n            key: K::declaration(),\n            value: V::declaration(),\n        }\n    }\n}\n\nimpl CasperABI for String {\n    fn populate_definitions(_definitions: &mut Definitions) {}\n\n    fn declaration() -> Declaration {\n        \"String\".into()\n    }\n    fn definition() -> Definition {\n        Definition::Sequence {\n            decl: char::declaration(),\n        }\n    }\n}\n\nimpl CasperABI for str {\n    fn populate_definitions(_definitions: &mut Definitions) {}\n\n    fn declaration() -> Declaration {\n        \"String\".into()\n    }\n    fn definition() -> Definition {\n        Definition::Sequence {\n            decl: char::declaration(),\n        }\n    }\n}\n\nimpl CasperABI for &str {\n    fn populate_definitions(_definitions: &mut Definitions) {}\n\n    fn declaration() -> Declaration {\n        \"String\".into()\n    }\n\n    fn definition() -> Definition {\n        Definition::Sequence {\n            decl: char::declaration(),\n        }\n    }\n}\n\nimpl<T: CasperABI> CasperABI for LinkedList<T> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<T>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"LinkedList<{}>\", T::declaration())\n    }\n    fn definition() -> Definition {\n        Definition::Sequence {\n            decl: T::declaration(),\n        }\n    }\n}\n\nimpl<T: CasperABI> CasperABI for BTreeSet<T> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<T>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"BTreeSet<{}>\", T::declaration())\n    }\n    fn definition() -> Definition {\n        Definition::Sequence {\n            decl: T::declaration(),\n        }\n    }\n}\n\nimpl<const N: usize> CasperABI for bnum::BUint<N> {\n    fn populate_definitions(definitions: &mut Definitions) {\n        definitions.populate_one::<u64>();\n    }\n\n    fn declaration() -> Declaration {\n        let width_bytes: usize = mem::size_of::<bnum::BUint<N>>();\n        let width_bits: usize = width_bytes * 8;\n        format!(\"U{width_bits}\")\n    }\n\n    fn definition() -> Definition {\n        let length: u32 = N.try_into().expect(\"N is too big\");\n        Definition::FixedSequence {\n            length,\n            decl: u64::declaration(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        abi::{CasperABI, Definition},\n        types::U256,\n    };\n\n    #[test]\n    fn u256_schema() {\n        assert_eq!(U256::declaration(), \"U256\");\n        assert_eq!(\n            U256::definition(),\n            Definition::FixedSequence {\n                length: 4,\n                decl: u64::declaration()\n            }\n        );\n\n        let mut value = U256::from(u128::MAX);\n        value += U256::from(1u64);\n        let bytes = borsh::to_vec(&value).unwrap();\n        // Ensure bnum's borsh serialize/deserialize is what we consider \"FixedSequence\"\n        let bytes_back: [u64; 4] = borsh::from_slice(&bytes).unwrap();\n        let value_back = U256::from_digits(bytes_back);\n        assert_eq!(value, value_back);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/abi_generator.rs",
    "content": "use core::{mem, ptr::NonNull};\n\nuse crate::{\n    abi::{Declaration, Definitions},\n    linkme::distributed_slice,\n    schema::{Schema, SchemaMessage, SchemaType},\n};\n\n#[derive(Debug)]\npub struct Param {\n    pub name: &'static str,\n    pub decl: Declaration,\n}\n\n#[derive(Debug)]\npub struct EntryPoint {\n    pub name: &'static str,\n    pub params: &'static [&'static Param],\n    pub result_decl: Declaration,\n}\n\n#[derive(Debug, Clone)]\npub struct Message {\n    pub name: &'static str,\n    pub decl: &'static str,\n}\n\npub struct Manifest {\n    pub name: &'static str,\n    pub entry_points: &'static [EntryPoint],\n}\n\n/// All of the entry points generated by proc macro will be registered here.\n#[distributed_slice]\n#[linkme(crate = crate::linkme)]\npub static ENTRYPOINTS: [fn() -> crate::schema::SchemaEntryPoint] = [..];\n\n#[distributed_slice]\n#[linkme(crate = crate::linkme)]\npub static ABI_COLLECTORS: [fn(&mut crate::abi::Definitions)] = [..];\n\n#[distributed_slice]\n#[linkme(crate = crate::linkme)]\npub static MESSAGES: [Message] = [..];\n\npub fn casper_collect_schema() -> Schema {\n    // Collect definitions\n    let definitions = {\n        let mut definitions = Definitions::default();\n\n        for abi_collector in ABI_COLLECTORS {\n            abi_collector(&mut definitions);\n        }\n\n        definitions\n    };\n\n    // Collect messages\n    let messages = {\n        let mut messages = Vec::new();\n\n        for message in MESSAGES {\n            messages.push(SchemaMessage {\n                name: message.name.to_owned(),\n                decl: message.decl.to_owned(),\n            });\n        }\n\n        messages\n    };\n\n    // Collect entrypoints\n    let entry_points = {\n        let mut entry_points = Vec::new();\n        for entrypoint in ENTRYPOINTS {\n            entry_points.push(entrypoint());\n        }\n        entry_points\n    };\n\n    // Construct a schema object from the extracted information\n    Schema {\n        name: \"contract\".to_string(),\n        version: None,\n        type_: SchemaType::Contract {\n            state: \"Contract\".to_string(),\n        },\n        definitions,\n        entry_points,\n        messages,\n    }\n}\n\n/// This function is called by the host to collect the schema from the contract.\n///\n/// This is considered internal implementation detail and should not be used directly.\n/// Primary user of this API is `cargo-casper` tool that will use it to extract schema from the\n/// contract.\n///\n/// # Safety\n/// Pointer to json bytes passed to the callback is valid only within the scope of that function.\n#[export_name = \"__cargo_casper_collect_schema\"]\npub unsafe extern \"C\" fn cargo_casper_collect_schema(size_ptr: *mut u64) -> *mut u8 {\n    let schema = casper_collect_schema();\n    // Write the schema using the provided writer\n    let mut json_bytes = serde_json::to_vec(&schema).expect(\"Serialized schema\");\n    NonNull::new(size_ptr)\n        .expect(\"expected non-null ptr\")\n        .write(json_bytes.len().try_into().expect(\"usize to u64\"));\n    let ptr = json_bytes.as_mut_ptr();\n    mem::forget(json_bytes);\n    ptr\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/casper/native.rs",
    "content": "use std::{\n    cell::RefCell,\n    collections::{BTreeMap, BTreeSet, VecDeque},\n    convert::Infallible,\n    fmt,\n    panic::{self, UnwindSafe},\n    ptr::{self, NonNull},\n    slice,\n    sync::{Arc, RwLock},\n};\n\nuse crate::linkme::distributed_slice;\nuse bytes::Bytes;\nuse casper_executor_wasm_common::{\n    env_info::EnvInfo,\n    error::{\n        CALLEE_REVERTED, CALLEE_SUCCEEDED, CALLEE_TRAPPED, HOST_ERROR_INTERNAL,\n        HOST_ERROR_NOT_FOUND, HOST_ERROR_SUCCESS,\n    },\n    flags::ReturnFlags,\n};\n#[cfg(not(target_arch = \"wasm32\"))]\nuse rand::Rng;\n\nuse super::Entity;\nuse crate::types::Address;\n\n/// The kind of export that is being registered.\n///\n/// This is used to identify the type of export and its name.\n///\n/// Depending on the location of given function it may be registered as a:\n///\n/// * `SmartContract` (if it's part of a `impl Contract` block),\n/// * `TraitImpl` (if it's part of a `impl Trait for Contract` block),\n/// * `Function` (if it's a standalone function).\n///\n/// This is used to dispatch exports under native code i.e. you want to write a test that calls\n/// \"foobar\" regardless of location.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\npub enum EntryPointKind {\n    /// Smart contract.\n    ///\n    /// This is used to identify the smart contract and its name.\n    ///\n    /// The `struct_name` is the name of the smart contract that is being registered.\n    /// The `name` is the name of the function that is being registered.\n    SmartContract {\n        struct_name: &'static str,\n        name: &'static str,\n    },\n    /// Trait implementation.\n    ///\n    /// This is used to identify the trait implementation and its name.\n    ///\n    /// The `trait_name` is the name of the trait that is being implemented.\n    /// The `impl_name` is the name of the implementation.\n    /// The `name` is the name of the function that is being implemented.\n    TraitImpl {\n        trait_name: &'static str,\n        impl_name: &'static str,\n        name: &'static str,\n    },\n    /// Function export.\n    ///\n    /// This is used to identify the function export and its name.\n    ///\n    /// The `name` is the name of the function that is being exported.\n    Function { name: &'static str },\n}\n\nimpl EntryPointKind {\n    pub fn name(&self) -> &'static str {\n        match self {\n            EntryPointKind::SmartContract { name, .. }\n            | EntryPointKind::TraitImpl { name, .. }\n            | EntryPointKind::Function { name } => name,\n        }\n    }\n}\n\n/// Export is a structure that contains information about the exported function.\n///\n/// This is used to register the export and its name and physical location in the smart contract\n/// source code.\npub struct EntryPoint {\n    /// The kind of entry point that is being registered.\n    pub kind: EntryPointKind,\n    pub fptr: fn() -> (),\n    pub module_path: &'static str,\n    pub file: &'static str,\n    pub line: u32,\n}\n\n#[distributed_slice]\n#[linkme(crate = crate::linkme)]\npub static ENTRY_POINTS: [EntryPoint];\n\nimpl fmt::Debug for EntryPoint {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let Self {\n            kind,\n            fptr: _,\n            module_path,\n            file,\n            line,\n        } = self;\n\n        f.debug_struct(\"Export\")\n            .field(\"kind\", kind)\n            .field(\"fptr\", &\"<fptr>\")\n            .field(\"module_path\", module_path)\n            .field(\"file\", file)\n            .field(\"line\", line)\n            .finish()\n    }\n}\n\n/// Invokes an export by its name.\n///\n/// This function is used to invoke an export by its name regardless of its location in the smart\n/// contract.\npub fn invoke_export_by_name(name: &str) {\n    let exports_by_name: Vec<_> = ENTRY_POINTS\n        .iter()\n        .filter(|export| export.kind.name() == name)\n        .collect();\n\n    assert_eq!(\n        exports_by_name.len(),\n        1,\n        \"Expected exactly one export {name} found, but got {exports_by_name:?}\"\n    );\n\n    (exports_by_name[0].fptr)();\n}\n\n#[derive(Debug)]\npub enum NativeTrap {\n    Return(ReturnFlags, Bytes),\n    Panic(Box<dyn std::any::Any + Send + 'static>),\n}\n\npub type Container = BTreeMap<u64, BTreeMap<Bytes, Bytes>>;\n\n#[derive(Clone, Debug)]\n#[allow(dead_code)]\npub struct NativeParam(pub(crate) String);\n\nimpl From<&casper_contract_sdk_sys::Param> for NativeParam {\n    fn from(val: &casper_contract_sdk_sys::Param) -> Self {\n        let name =\n            String::from_utf8_lossy(unsafe { slice::from_raw_parts(val.name_ptr, val.name_len) })\n                .into_owned();\n        NativeParam(name)\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct Environment {\n    pub db: Arc<RwLock<Container>>,\n    contracts: Arc<RwLock<BTreeSet<Address>>>,\n    // input_data: Arc<RwLock<Option<Bytes>>>,\n    input_data: Option<Bytes>,\n    caller: Entity,\n    callee: Entity,\n}\n\nimpl Default for Environment {\n    fn default() -> Self {\n        Self {\n            db: Default::default(),\n            contracts: Default::default(),\n            input_data: Default::default(),\n            caller: DEFAULT_ADDRESS,\n            callee: DEFAULT_ADDRESS,\n        }\n    }\n}\n\npub const DEFAULT_ADDRESS: Entity = Entity::Account([42; 32]);\n\nimpl Environment {\n    #[must_use]\n    pub fn new(db: Container, caller: Entity) -> Self {\n        Self {\n            db: Arc::new(RwLock::new(db)),\n            contracts: Default::default(),\n            input_data: Default::default(),\n            caller,\n            callee: caller,\n        }\n    }\n\n    #[must_use]\n    pub fn with_caller(&self, caller: Entity) -> Self {\n        let mut env = self.clone();\n        env.caller = caller;\n        env\n    }\n\n    #[must_use]\n    pub fn smart_contract(&self, callee: Entity) -> Self {\n        let mut env = self.clone();\n        env.caller = self.callee;\n        env.callee = callee;\n        env\n    }\n\n    #[must_use]\n    pub fn session(&self, callee: Entity) -> Self {\n        let mut env = self.clone();\n        env.caller = callee;\n        env.callee = callee;\n        env\n    }\n\n    #[must_use]\n    pub fn with_callee(&self, callee: Entity) -> Self {\n        let mut env = self.clone();\n        env.callee = callee;\n        env\n    }\n\n    #[must_use]\n    pub fn with_input_data(&self, input_data: Vec<u8>) -> Self {\n        let mut env = self.clone();\n        env.input_data = Some(Bytes::from(input_data));\n        env\n    }\n}\n\nimpl Environment {\n    fn key_prefix(&self, key: &[u8]) -> Vec<u8> {\n        let entity = self.callee;\n\n        let mut bytes = Vec::new();\n        bytes.extend(entity.tag().to_le_bytes());\n        bytes.extend(entity.address());\n        bytes.extend(key);\n\n        bytes\n    }\n\n    fn casper_read(\n        &self,\n        key_space: u64,\n        key_ptr: *const u8,\n        key_size: usize,\n        info: *mut casper_contract_sdk_sys::ReadInfo,\n        alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8,\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> Result<u32, NativeTrap> {\n        let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) };\n        let key_bytes = self.key_prefix(key_bytes);\n\n        let Ok(db) = self.db.read() else {\n            return Ok(HOST_ERROR_INTERNAL);\n        };\n\n        let value = match db.get(&key_space) {\n            Some(values) => values.get(key_bytes.as_slice()).cloned(),\n            None => return Ok(HOST_ERROR_NOT_FOUND),\n        };\n        match value {\n            Some(tagged_value) => {\n                let ptr = NonNull::new(alloc(tagged_value.len(), alloc_ctx as _));\n\n                if let Some(ptr) = ptr {\n                    unsafe {\n                        (*info).data = ptr.as_ptr();\n                        (*info).size = tagged_value.len();\n                    }\n\n                    unsafe {\n                        ptr::copy_nonoverlapping(\n                            tagged_value.as_ptr(),\n                            ptr.as_ptr(),\n                            tagged_value.len(),\n                        );\n                    }\n                }\n\n                Ok(HOST_ERROR_SUCCESS)\n            }\n            None => Ok(HOST_ERROR_NOT_FOUND),\n        }\n    }\n\n    fn casper_write(\n        &self,\n        key_space: u64,\n        key_ptr: *const u8,\n        key_size: usize,\n        value_ptr: *const u8,\n        value_size: usize,\n    ) -> Result<u32, NativeTrap> {\n        assert!(!key_ptr.is_null());\n        assert!(!value_ptr.is_null());\n        // let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) };\n        let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) }.to_owned();\n        let key_bytes = self.key_prefix(&key_bytes);\n\n        let value_bytes = unsafe { slice::from_raw_parts(value_ptr, value_size) };\n\n        let mut db = self.db.write().unwrap();\n        db.entry(key_space).or_default().insert(\n            Bytes::from(key_bytes.to_vec()),\n            Bytes::from(value_bytes.to_vec()),\n        );\n        Ok(HOST_ERROR_SUCCESS)\n    }\n\n    fn casper_remove(\n        &self,\n        key_space: u64,\n        key_ptr: *const u8,\n        key_size: usize,\n    ) -> Result<u32, NativeTrap> {\n        assert!(!key_ptr.is_null());\n        let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) };\n        let key_bytes = self.key_prefix(key_bytes);\n\n        let mut db = self.db.write().unwrap();\n        if let Some(values) = db.get_mut(&key_space) {\n            values.remove(key_bytes.as_slice());\n            Ok(HOST_ERROR_SUCCESS)\n        } else {\n            Ok(HOST_ERROR_NOT_FOUND)\n        }\n    }\n\n    fn casper_print(&self, msg_ptr: *const u8, msg_size: usize) -> Result<(), NativeTrap> {\n        let msg_bytes = unsafe { slice::from_raw_parts(msg_ptr, msg_size) };\n        let msg = std::str::from_utf8(msg_bytes).expect(\"Valid UTF-8 string\");\n        println!(\"💻 {msg}\");\n        Ok(())\n    }\n\n    fn casper_return(\n        &self,\n        flags: u32,\n        data_ptr: *const u8,\n        data_len: usize,\n    ) -> Result<Infallible, NativeTrap> {\n        let return_flags = ReturnFlags::from_bits_truncate(flags);\n        let data = if data_ptr.is_null() {\n            Bytes::new()\n        } else {\n            Bytes::copy_from_slice(unsafe { slice::from_raw_parts(data_ptr, data_len) })\n        };\n        Err(NativeTrap::Return(return_flags, data))\n    }\n\n    fn casper_copy_input(\n        &self,\n        alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8,\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> Result<*mut u8, NativeTrap> {\n        let input_data = self.input_data.clone();\n        let input_data = input_data.as_ref().cloned().unwrap_or_default();\n        let ptr = NonNull::new(alloc(input_data.len(), alloc_ctx as _));\n\n        match ptr {\n            Some(ptr) => {\n                if !input_data.is_empty() {\n                    unsafe {\n                        ptr::copy_nonoverlapping(\n                            input_data.as_ptr(),\n                            ptr.as_ptr(),\n                            input_data.len(),\n                        );\n                    }\n                }\n                Ok(unsafe { ptr.as_ptr().add(input_data.len()) })\n            }\n            None => Ok(ptr::null_mut()),\n        }\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn casper_create(\n        &self,\n        code_ptr: *const u8,\n        code_size: usize,\n        transferred_value: u64,\n        constructor_ptr: *const u8,\n        constructor_size: usize,\n        input_ptr: *const u8,\n        input_size: usize,\n        seed_ptr: *const u8,\n        seed_size: usize,\n        result_ptr: *mut casper_contract_sdk_sys::CreateResult,\n    ) -> Result<u32, NativeTrap> {\n        // let manifest =\n        //     NonNull::new(manifest_ptr as *mut casper_contract_sdk_sys::Manifest).expect(\"Manifest\n        // instance\");\n        let code = if code_ptr.is_null() {\n            None\n        } else {\n            Some(unsafe { slice::from_raw_parts(code_ptr, code_size) })\n        };\n\n        if code.is_some() {\n            panic!(\"Supplying code is not supported yet in native mode\");\n        }\n\n        let constructor = if constructor_ptr.is_null() {\n            None\n        } else {\n            Some(unsafe { slice::from_raw_parts(constructor_ptr, constructor_size) })\n        };\n\n        let input_data = if input_ptr.is_null() {\n            None\n        } else {\n            Some(unsafe { slice::from_raw_parts(input_ptr, input_size) })\n        };\n\n        let _seed = if seed_ptr.is_null() {\n            None\n        } else {\n            Some(unsafe { slice::from_raw_parts(seed_ptr, seed_size) })\n        };\n\n        assert_eq!(\n            transferred_value, 0,\n            \"Creating new contracts with transferred value is not supported in native mode\"\n        );\n\n        let mut rng = rand::thread_rng();\n        let contract_address = rng.gen();\n        let package_address = rng.gen();\n\n        let mut result = NonNull::new(result_ptr).expect(\"Valid pointer\");\n        unsafe {\n            result.as_mut().contract_address = package_address;\n        }\n\n        let mut contracts = self.contracts.write().unwrap();\n        contracts.insert(contract_address);\n\n        if let Some(entry_point) = constructor {\n            let entry_point = ENTRY_POINTS\n                .iter()\n                .find(|export| export.kind.name().as_bytes() == entry_point)\n                .expect(\"Entry point exists\");\n\n            let mut stub = with_current_environment(|stub| stub);\n            stub.input_data = input_data.map(Bytes::copy_from_slice);\n\n            stub.caller = stub.callee;\n            stub.callee = Entity::Contract(package_address);\n\n            // stub.callee\n            // Call constructor, expect a trap\n            let result = dispatch_with(stub, || {\n                // TODO: Handle panic inside constructor\n                (entry_point.fptr)();\n            });\n\n            match result {\n                Ok(()) => {}\n                Err(NativeTrap::Return(flags, bytes)) => {\n                    if flags.contains(ReturnFlags::REVERT) {\n                        todo!(\"Constructor returned with a revert flag\");\n                    }\n                    assert!(bytes.is_empty(), \"When returning from the constructor it is expected that no bytes are passed in a return function\");\n                }\n                Err(NativeTrap::Panic(_panic)) => {\n                    todo!();\n                }\n            }\n        }\n\n        Ok(HOST_ERROR_SUCCESS)\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn casper_call(\n        &self,\n        address_ptr: *const u8,\n        address_size: usize,\n        transferred_value: u64,\n        entry_point_ptr: *const u8,\n        entry_point_size: usize,\n        input_ptr: *const u8,\n        input_size: usize,\n        alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8, /* For capturing output\n                                                                         * data */\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> Result<u32, NativeTrap> {\n        let address = unsafe { slice::from_raw_parts(address_ptr, address_size) };\n        let input_data = unsafe { slice::from_raw_parts(input_ptr, input_size) };\n        let entry_point = {\n            let entry_point_ptr = NonNull::new(entry_point_ptr.cast_mut()).expect(\"Valid pointer\");\n            let entry_point =\n                unsafe { slice::from_raw_parts(entry_point_ptr.as_ptr(), entry_point_size) };\n            let entry_point = std::str::from_utf8(entry_point).expect(\"Valid UTF-8 string\");\n            entry_point.to_string()\n        };\n\n        assert_eq!(\n            transferred_value, 0,\n            \"Transferred value is not supported in native mode\"\n        );\n\n        let export = ENTRY_POINTS\n            .iter()\n            .find(|export|\n                matches!(export.kind, EntryPointKind::SmartContract { name, .. } | EntryPointKind::TraitImpl { name, .. }\n                    if name == entry_point)\n            )\n            .expect(\"Existing entry point\");\n\n        let mut new_stub = with_current_environment(|stub| stub.clone());\n        new_stub.input_data = Some(Bytes::copy_from_slice(input_data));\n        new_stub.caller = new_stub.callee;\n        new_stub.callee = Entity::Contract(address.try_into().expect(\"Size to match\"));\n\n        let ret = dispatch_with(new_stub, || {\n            // We need to convert any panic inside the entry point into a native trap. This probably\n            // should be done in a more configurable way.\n            dispatch_export_call(|| {\n                (export.fptr)();\n            })\n        });\n\n        let unfolded = match ret {\n            Ok(Ok(())) => Ok(()),\n            Ok(Err(error)) | Err(error) => Err(error),\n        };\n\n        match unfolded {\n            Ok(()) => Ok(CALLEE_SUCCEEDED),\n            Err(NativeTrap::Return(flags, bytes)) => {\n                let ptr = NonNull::new(alloc(bytes.len(), alloc_ctx.cast_mut()));\n                if let Some(output_ptr) = ptr {\n                    unsafe {\n                        ptr::copy_nonoverlapping(bytes.as_ptr(), output_ptr.as_ptr(), bytes.len());\n                    }\n                }\n\n                if flags.contains(ReturnFlags::REVERT) {\n                    Ok(CALLEE_REVERTED)\n                } else {\n                    Ok(CALLEE_SUCCEEDED)\n                }\n            }\n            Err(NativeTrap::Panic(panic)) => {\n                eprintln!(\"Panic {panic:?}\");\n                Ok(CALLEE_TRAPPED)\n            }\n        }\n    }\n\n    #[doc = r\"Obtain data from the blockchain environemnt of current wasm invocation.\n\nExample paths:\n\n* `env_read([CASPER_CALLER], 1, nullptr, &caller_addr)` -> read caller's address into\n  `caller_addr` memory.\n* `env_read([CASPER_CHAIN, BLOCK_HASH, 0], 3, nullptr, &block_hash)` -> read hash of the\n  current block into `block_hash` memory.\n* `env_read([CASPER_CHAIN, BLOCK_HASH, 5], 3, nullptr, &block_hash)` -> read hash of the 5th\n  block from the current one into `block_hash` memory.\n* `env_read([CASPER_AUTHORIZED_KEYS], 1, nullptr, &authorized_keys)` -> read list of\n  authorized keys into `authorized_keys` memory.\"]\n    fn casper_env_read(\n        &self,\n        _env_path: *const u64,\n        _env_path_size: usize,\n        _alloc: Option<extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8>,\n        _alloc_ctx: *const core::ffi::c_void,\n    ) -> Result<*mut u8, NativeTrap> {\n        todo!()\n    }\n\n    fn casper_env_info(&self, info_ptr: *const u8, info_size: u32) -> Result<u32, NativeTrap> {\n        assert_eq!(info_size as usize, size_of::<EnvInfo>());\n        let mut env_info = NonNull::new(info_ptr as *mut u8)\n            .expect(\"Valid ptr\")\n            .cast::<EnvInfo>();\n        let env_info = unsafe { env_info.as_mut() };\n        *env_info = EnvInfo {\n            block_time: 0,\n            transferred_value: 0,\n            caller_addr: *self.caller.address(),\n            caller_kind: self.caller.tag(),\n            callee_addr: *self.callee.address(),\n            callee_kind: self.callee.tag(),\n        };\n        Ok(HOST_ERROR_SUCCESS)\n    }\n}\n\nthread_local! {\n    pub(crate) static LAST_TRAP: RefCell<Option<NativeTrap>> = const { RefCell::new(None) };\n    static ENV_STACK: RefCell<VecDeque<Environment>> = RefCell::new(VecDeque::from_iter([\n        // Stack of environments has a default element so unit tests do not require extra effort.\n        // Environment::default()\n    ]));\n}\n\npub fn with_current_environment<T>(f: impl FnOnce(Environment) -> T) -> T {\n    ENV_STACK.with(|stack| {\n        let stub = {\n            let borrowed = stack.borrow();\n            let front = borrowed.front().expect(\"Stub exists\").clone();\n            front\n        };\n        f(stub)\n    })\n}\n\npub fn current_environment() -> Environment {\n    with_current_environment(|env| env)\n}\n\nfn handle_ret_with<T>(value: Result<T, NativeTrap>, ret: impl FnOnce() -> T) -> T {\n    match value {\n        Ok(result) => {\n            LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take());\n            result\n        }\n        Err(trap) => {\n            let result = ret();\n            LAST_TRAP.with(|last_trap| last_trap.borrow_mut().replace(trap));\n            result\n        }\n    }\n}\n\nfn dispatch_export_call<F>(func: F) -> Result<(), NativeTrap>\nwhere\n    F: FnOnce() + Send + UnwindSafe,\n{\n    let call_result = panic::catch_unwind(|| {\n        func();\n    });\n    match call_result {\n        Ok(()) => {\n            let last_trap = LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take());\n            match last_trap {\n                Some(last_trap) => Err(last_trap),\n                None => Ok(()),\n            }\n        }\n        Err(error) => Err(NativeTrap::Panic(error)),\n    }\n}\n\nfn handle_ret<T: Default>(value: Result<T, NativeTrap>) -> T {\n    handle_ret_with(value, || T::default())\n}\n\n/// Dispatches a function with a default environment.\npub fn dispatch<T>(f: impl FnOnce() -> T) -> Result<T, NativeTrap> {\n    dispatch_with(Environment::default(), f)\n}\n\n/// Dispatches a function with a given environment.\npub fn dispatch_with<T>(stub: Environment, f: impl FnOnce() -> T) -> Result<T, NativeTrap> {\n    ENV_STACK.with(|stack| {\n        let mut borrowed = stack.borrow_mut();\n        borrowed.push_front(stub);\n    });\n\n    // Clear previous trap (if present)\n    LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take());\n\n    // Call a function\n    let result = f();\n\n    // Check if a trap was set and return it if so (otherwise return the result).\n    let last_trap = LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take());\n\n    let result = if let Some(trap) = last_trap {\n        Err(trap)\n    } else {\n        Ok(result)\n    };\n\n    // Pop the stub from the stack\n    ENV_STACK.with(|stack| {\n        let mut borrowed = stack.borrow_mut();\n        borrowed.pop_front();\n    });\n\n    result\n}\n\nmod symbols {\n    // TODO: Figure out how to use for_each_host_function macro here and deal with never type in\n    // casper_return\n    #[no_mangle]\n    /// Read value from a storage available for caller's entity address.\n    pub extern \"C\" fn casper_read(\n        key_space: u64,\n        key_ptr: *const u8,\n        key_size: usize,\n        info: *mut ::casper_contract_sdk_sys::ReadInfo,\n        alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8,\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> u32 {\n        let _name = \"casper_read\";\n        let _args = (&key_space, &key_ptr, &key_size, &info, &alloc, &alloc_ctx);\n        let _call_result = with_current_environment(|stub| {\n            stub.casper_read(key_space, key_ptr, key_size, info, alloc, alloc_ctx)\n        });\n        crate::casper::native::handle_ret(_call_result)\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_write(\n        key_space: u64,\n        key_ptr: *const u8,\n        key_size: usize,\n        value_ptr: *const u8,\n        value_size: usize,\n    ) -> u32 {\n        let _name = \"casper_write\";\n        let _args = (&key_space, &key_ptr, &key_size, &value_ptr, &value_size);\n        let _call_result = with_current_environment(|stub| {\n            stub.casper_write(key_space, key_ptr, key_size, value_ptr, value_size)\n        });\n        crate::casper::native::handle_ret(_call_result)\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_remove(key_space: u64, key_ptr: *const u8, key_size: usize) -> u32 {\n        let _name = \"casper_remove\";\n        let _args = (&key_space, &key_ptr, &key_size);\n        let _call_result =\n            with_current_environment(|stub| stub.casper_remove(key_space, key_ptr, key_size));\n        crate::casper::native::handle_ret(_call_result)\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_print(msg_ptr: *const u8, msg_size: usize) {\n        let _name = \"casper_print\";\n        let _args = (&msg_ptr, &msg_size);\n        let _call_result = with_current_environment(|stub| stub.casper_print(msg_ptr, msg_size));\n        crate::casper::native::handle_ret(_call_result);\n    }\n\n    use casper_executor_wasm_common::error::HOST_ERROR_SUCCESS;\n\n    use crate::casper::native::LAST_TRAP;\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_return(flags: u32, data_ptr: *const u8, data_len: usize) {\n        let _name = \"casper_return\";\n        let _args = (&flags, &data_ptr, &data_len);\n        let _call_result =\n            with_current_environment(|stub| stub.casper_return(flags, data_ptr, data_len));\n        let err = _call_result.unwrap_err(); // SAFE\n        LAST_TRAP.with(|last_trap| last_trap.borrow_mut().replace(err));\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_copy_input(\n        alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8,\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> *mut u8 {\n        let _name = \"casper_copy_input\";\n        let _args = (&alloc, &alloc_ctx);\n        let _call_result =\n            with_current_environment(|stub| stub.casper_copy_input(alloc, alloc_ctx));\n        crate::casper::native::handle_ret_with(_call_result, ptr::null_mut)\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_create(\n        code_ptr: *const u8,\n        code_size: usize,\n        transferred_value: u64,\n        constructor_ptr: *const u8,\n        constructor_size: usize,\n        input_ptr: *const u8,\n        input_size: usize,\n        seed_ptr: *const u8,\n        seed_size: usize,\n        result_ptr: *mut casper_contract_sdk_sys::CreateResult,\n    ) -> u32 {\n        let _call_result = with_current_environment(|stub| {\n            stub.casper_create(\n                code_ptr,\n                code_size,\n                transferred_value,\n                constructor_ptr,\n                constructor_size,\n                input_ptr,\n                input_size,\n                seed_ptr,\n                seed_size,\n                result_ptr,\n            )\n        });\n        crate::casper::native::handle_ret(_call_result)\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_call(\n        address_ptr: *const u8,\n        address_size: usize,\n        transferred_value: u64,\n        entry_point_ptr: *const u8,\n        entry_point_size: usize,\n        input_ptr: *const u8,\n        input_size: usize,\n        alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8, /* For capturing output\n                                                                         * data */\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> u32 {\n        let _call_result = with_current_environment(|stub| {\n            stub.casper_call(\n                address_ptr,\n                address_size,\n                transferred_value,\n                entry_point_ptr,\n                entry_point_size,\n                input_ptr,\n                input_size,\n                alloc,\n                alloc_ctx,\n            )\n        });\n        crate::casper::native::handle_ret(_call_result)\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_upgrade(\n        _code_ptr: *const u8,\n        _code_size: usize,\n        _entry_point_ptr: *const u8,\n        _entry_point_size: usize,\n        _input_ptr: *const u8,\n        _input_size: usize,\n    ) -> u32 {\n        todo!()\n    }\n\n    use core::slice;\n    use std::ptr;\n\n    use super::with_current_environment;\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_env_read(\n        env_path: *const u64,\n        env_path_size: usize,\n        alloc: Option<extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8>,\n        alloc_ctx: *const core::ffi::c_void,\n    ) -> *mut u8 {\n        let _name = \"casper_env_read\";\n        let _args = (&env_path, &env_path_size, &alloc, &alloc_ctx);\n        let _call_result = with_current_environment(|stub| {\n            stub.casper_env_read(env_path, env_path_size, alloc, alloc_ctx)\n        });\n        crate::casper::native::handle_ret_with(_call_result, ptr::null_mut)\n    }\n    #[no_mangle]\n    pub extern \"C\" fn casper_env_balance(\n        _entity_kind: u32,\n        _entity_addr_ptr: *const u8,\n        _entity_addr_len: usize,\n    ) -> u64 {\n        todo!()\n    }\n    #[no_mangle]\n    pub extern \"C\" fn casper_transfer(\n        _entity_kind: u32,\n        _entity_addr_ptr: *const u8,\n        _entity_addr_len: usize,\n        _amount: u64,\n    ) -> u32 {\n        todo!()\n    }\n    #[no_mangle]\n    pub extern \"C\" fn casper_emit(\n        topic_ptr: *const u8,\n        topic_size: usize,\n        data_ptr: *const u8,\n        data_size: usize,\n    ) -> u32 {\n        let topic = unsafe { slice::from_raw_parts(topic_ptr, topic_size) };\n        let data = unsafe { slice::from_raw_parts(data_ptr, data_size) };\n        let topic = std::str::from_utf8(topic).expect(\"Valid UTF-8 string\");\n        println!(\"Emitting event with topic: {topic:?} and data: {data:?}\");\n        HOST_ERROR_SUCCESS\n    }\n\n    #[no_mangle]\n    pub extern \"C\" fn casper_env_info(info_ptr: *const u8, info_size: u32) -> u32 {\n        let ret = with_current_environment(|env| env.casper_env_info(info_ptr, info_size));\n        crate::casper::native::handle_ret(ret)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_executor_wasm_common::keyspace::Keyspace;\n\n    use crate::casper;\n\n    use super::*;\n\n    #[test]\n    fn foo() {\n        dispatch(|| {\n            casper::print(\"Hello\");\n            casper::write(Keyspace::Context(b\"test\"), b\"value 1\").unwrap();\n\n            let change_context_1 =\n                with_current_environment(|stub| stub.smart_contract(Entity::Contract([1; 32])));\n\n            dispatch_with(change_context_1, || {\n                casper::write(Keyspace::Context(b\"test\"), b\"value 2\").unwrap();\n                casper::write(Keyspace::State, b\"state\").unwrap();\n            })\n            .unwrap();\n\n            let change_context_1 =\n                with_current_environment(|stub| stub.smart_contract(Entity::Contract([1; 32])));\n            dispatch_with(change_context_1, || {\n                assert_eq!(\n                    casper::read_into_vec(Keyspace::Context(b\"test\")),\n                    Ok(Some(b\"value 2\".to_vec()))\n                );\n                assert_eq!(\n                    casper::read_into_vec(Keyspace::State),\n                    Ok(Some(b\"state\".to_vec()))\n                );\n            })\n            .unwrap();\n\n            assert_eq!(casper::get_caller(), DEFAULT_ADDRESS);\n            assert_eq!(\n                casper::read_into_vec(Keyspace::Context(b\"test\")),\n                Ok(Some(b\"value 1\".to_vec()))\n            );\n        })\n        .unwrap();\n    }\n    #[test]\n    fn test() {\n        dispatch_with(Environment::default(), || {\n            let msg = \"Hello\";\n            let () = with_current_environment(|stub| stub.casper_print(msg.as_ptr(), msg.len()))\n                .expect(\"Ok\");\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_returns() {\n        dispatch_with(Environment::default(), || {\n            let _ = with_current_environment(|stub| stub.casper_return(0, ptr::null(), 0));\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/casper.rs",
    "content": "#[cfg(all(not(target_arch = \"wasm32\"), feature = \"std\"))]\npub mod native;\n\nuse crate::{\n    abi::{CasperABI, EnumVariant},\n    prelude::{\n        ffi::c_void,\n        marker::PhantomData,\n        mem::MaybeUninit,\n        ptr::{self, NonNull},\n    },\n    reserve_vec_space,\n    serializers::borsh::{BorshDeserialize, BorshSerialize},\n    types::{Address, CallError},\n    Message, ToCallData,\n};\n\nuse casper_contract_sdk_sys::casper_env_info;\nuse casper_executor_wasm_common::{\n    env_info::EnvInfo,\n    error::{result_from_code, CommonResult, HOST_ERROR_SUCCESS},\n    flags::ReturnFlags,\n    keyspace::{Keyspace, KeyspaceTag},\n};\n\n/// Print a message.\n#[inline]\npub fn print(msg: &str) {\n    unsafe { casper_contract_sdk_sys::casper_print(msg.as_ptr(), msg.len()) };\n}\n\npub enum Alloc<F: FnOnce(usize) -> Option<ptr::NonNull<u8>>> {\n    Callback(F),\n    Static(ptr::NonNull<u8>),\n}\n\nextern \"C\" fn alloc_callback<F: FnOnce(usize) -> Option<ptr::NonNull<u8>>>(\n    len: usize,\n    ctx: *mut c_void,\n) -> *mut u8 {\n    let opt_closure = ctx.cast::<Option<F>>();\n    let allocated_ptr = unsafe { (*opt_closure).take().unwrap()(len) };\n    match allocated_ptr {\n        Some(ptr) => ptr.as_ptr(),\n        None => ptr::null_mut(),\n    }\n}\n\n/// Provided callback should ensure that it can provide a pointer that can store `size` bytes.\n/// Function returns last pointer after writing data, or None otherwise.\npub fn copy_input_into<F: FnOnce(usize) -> Option<ptr::NonNull<u8>>>(\n    alloc: Option<F>,\n) -> Option<NonNull<u8>> {\n    let ret = unsafe {\n        casper_contract_sdk_sys::casper_copy_input(\n            alloc_callback::<F>,\n            &alloc as *const _ as *mut c_void,\n        )\n    };\n    NonNull::<u8>::new(ret)\n}\n\n/// Copy input data into a vector.\npub fn copy_input() -> Vec<u8> {\n    let mut vec = Vec::new();\n    let last_ptr = copy_input_into(Some(|size| reserve_vec_space(&mut vec, size)));\n    match last_ptr {\n        Some(_last_ptr) => vec,\n        None => {\n            // TODO: size of input was 0, we could properly deal with this case by not calling alloc\n            // cb if size==0\n            Vec::new()\n        }\n    }\n}\n\n/// Provided callback should ensure that it can provide a pointer that can store `size` bytes.\npub fn copy_input_to(dest: &mut [u8]) -> Option<&[u8]> {\n    let last_ptr = copy_input_into(Some(|size| {\n        if size > dest.len() {\n            None\n        } else {\n            // SAFETY: `dest` is guaranteed to be non-null and large enough to hold `size`\n            // bytes.\n            Some(unsafe { ptr::NonNull::new_unchecked(dest.as_mut_ptr()) })\n        }\n    }));\n\n    let end_ptr = last_ptr?;\n    let length = unsafe { end_ptr.as_ptr().offset_from(dest.as_mut_ptr()) };\n    let length: usize = length.try_into().unwrap();\n    Some(&dest[..length])\n}\n\n/// Return from the contract.\npub fn ret(flags: ReturnFlags, data: Option<&[u8]>) {\n    let (data_ptr, data_len) = match data {\n        Some(data) => (data.as_ptr(), data.len()),\n        None => (ptr::null(), 0),\n    };\n    unsafe { casper_contract_sdk_sys::casper_return(flags.bits(), data_ptr, data_len) };\n    #[cfg(target_arch = \"wasm32\")]\n    unreachable!()\n}\n\n/// Read from the global state.\npub fn read<F: FnOnce(usize) -> Option<ptr::NonNull<u8>>>(\n    key: Keyspace,\n    f: F,\n) -> Result<Option<()>, CommonResult> {\n    let (key_space, key_bytes) = match key {\n        Keyspace::State => (KeyspaceTag::State as u64, &[][..]),\n        Keyspace::Context(key_bytes) => (KeyspaceTag::Context as u64, key_bytes),\n        Keyspace::NamedKey(key_bytes) => (KeyspaceTag::NamedKey as u64, key_bytes.as_bytes()),\n        Keyspace::PaymentInfo(payload) => (KeyspaceTag::PaymentInfo as u64, payload.as_bytes()),\n    };\n\n    let mut info = casper_contract_sdk_sys::ReadInfo {\n        data: ptr::null(),\n        size: 0,\n    };\n\n    extern \"C\" fn alloc_cb<F: FnOnce(usize) -> Option<ptr::NonNull<u8>>>(\n        len: usize,\n        ctx: *mut c_void,\n    ) -> *mut u8 {\n        let opt_closure = ctx as *mut Option<F>;\n        let allocated_ptr = unsafe { (*opt_closure).take().unwrap()(len) };\n        match allocated_ptr {\n            Some(mut ptr) => unsafe { ptr.as_mut() },\n            None => ptr::null_mut(),\n        }\n    }\n\n    let ctx = &Some(f) as *const _ as *mut _;\n\n    let ret = unsafe {\n        casper_contract_sdk_sys::casper_read(\n            key_space,\n            key_bytes.as_ptr(),\n            key_bytes.len(),\n            &mut info as *mut casper_contract_sdk_sys::ReadInfo,\n            alloc_cb::<F>,\n            ctx,\n        )\n    };\n\n    match result_from_code(ret) {\n        Ok(()) => Ok(Some(())),\n        Err(CommonResult::NotFound) => Ok(None),\n        Err(err) => Err(err),\n    }\n}\n\n/// Write to the global state.\npub fn write(key: Keyspace, value: &[u8]) -> Result<(), CommonResult> {\n    let (key_space, key_bytes) = match key {\n        Keyspace::State => (KeyspaceTag::State as u64, &[][..]),\n        Keyspace::Context(key_bytes) => (KeyspaceTag::Context as u64, key_bytes),\n        Keyspace::NamedKey(key_bytes) => (KeyspaceTag::NamedKey as u64, key_bytes.as_bytes()),\n        Keyspace::PaymentInfo(payload) => (KeyspaceTag::PaymentInfo as u64, payload.as_bytes()),\n    };\n    let ret = unsafe {\n        casper_contract_sdk_sys::casper_write(\n            key_space,\n            key_bytes.as_ptr(),\n            key_bytes.len(),\n            value.as_ptr(),\n            value.len(),\n        )\n    };\n    result_from_code(ret)\n}\n\n/// Remove from the global state.\npub fn remove(key: Keyspace) -> Result<(), CommonResult> {\n    let (key_space, key_bytes) = match key {\n        Keyspace::State => (KeyspaceTag::State as u64, &[][..]),\n        Keyspace::Context(key_bytes) => (KeyspaceTag::Context as u64, key_bytes),\n        Keyspace::NamedKey(key_bytes) => (KeyspaceTag::NamedKey as u64, key_bytes.as_bytes()),\n        Keyspace::PaymentInfo(payload) => (KeyspaceTag::PaymentInfo as u64, payload.as_bytes()),\n    };\n    let ret = unsafe {\n        casper_contract_sdk_sys::casper_remove(key_space, key_bytes.as_ptr(), key_bytes.len())\n    };\n    result_from_code(ret)\n}\n\n/// Create a new contract instance.\npub fn create(\n    code: Option<&[u8]>,\n    transferred_value: u64,\n    constructor: Option<&str>,\n    input_data: Option<&[u8]>,\n    seed: Option<&[u8; 32]>,\n) -> Result<casper_contract_sdk_sys::CreateResult, CallError> {\n    let (code_ptr, code_size): (*const u8, usize) = match code {\n        Some(code) => (code.as_ptr(), code.len()),\n        None => (ptr::null(), 0),\n    };\n\n    let mut result = MaybeUninit::uninit();\n\n    let call_error = unsafe {\n        casper_contract_sdk_sys::casper_create(\n            code_ptr,\n            code_size,\n            transferred_value,\n            constructor.map(|s| s.as_ptr()).unwrap_or(ptr::null()),\n            constructor.map(|s| s.len()).unwrap_or(0),\n            input_data.map(|s| s.as_ptr()).unwrap_or(ptr::null()),\n            input_data.map(|s| s.len()).unwrap_or(0),\n            seed.map(|s| s.as_ptr()).unwrap_or(ptr::null()),\n            seed.map(|s| s.len()).unwrap_or(0),\n            result.as_mut_ptr(),\n        )\n    };\n\n    if call_error == 0 {\n        let result = unsafe { result.assume_init() };\n        Ok(result)\n    } else {\n        Err(CallError::try_from(call_error).expect(\"Unexpected error code\"))\n    }\n}\n\npub(crate) fn call_into<F: FnOnce(usize) -> Option<ptr::NonNull<u8>>>(\n    address: &Address,\n    transferred_value: u64,\n    entry_point: &str,\n    input_data: &[u8],\n    alloc: Option<F>,\n) -> Result<(), CallError> {\n    let result_code = unsafe {\n        casper_contract_sdk_sys::casper_call(\n            address.as_ptr(),\n            address.len(),\n            transferred_value,\n            entry_point.as_ptr(),\n            entry_point.len(),\n            input_data.as_ptr(),\n            input_data.len(),\n            alloc_callback::<F>,\n            &alloc as *const _ as *mut _,\n        )\n    };\n    call_result_from_code(result_code)\n}\n\nfn call_result_from_code(result_code: u32) -> Result<(), CallError> {\n    if result_code == HOST_ERROR_SUCCESS {\n        Ok(())\n    } else {\n        Err(CallError::try_from(result_code).expect(\"Unexpected error code\"))\n    }\n}\n\n/// Call a contract.\npub fn casper_call(\n    address: &Address,\n    transferred_value: u64,\n    entry_point: &str,\n    input_data: &[u8],\n) -> (Option<Vec<u8>>, Result<(), CallError>) {\n    let mut output = None;\n    let result_code = call_into(\n        address,\n        transferred_value,\n        entry_point,\n        input_data,\n        Some(|size| {\n            let mut vec = Vec::new();\n            reserve_vec_space(&mut vec, size);\n            let result = Some(unsafe { ptr::NonNull::new_unchecked(vec.as_mut_ptr()) });\n            output = Some(vec);\n            result\n        }),\n    );\n    (output, result_code)\n}\n\n/// Upgrade the contract.\npub fn upgrade(\n    code: &[u8],\n    entry_point: Option<&str>,\n    input_data: Option<&[u8]>,\n) -> Result<(), CallError> {\n    let code_ptr = code.as_ptr();\n    let code_size = code.len();\n    let entry_point_ptr = entry_point.map(str::as_ptr).unwrap_or(ptr::null());\n    let entry_point_size = entry_point.map(str::len).unwrap_or(0);\n    let input_ptr = input_data.map(|s| s.as_ptr()).unwrap_or(ptr::null());\n    let input_size = input_data.map(|s| s.len()).unwrap_or(0);\n\n    let result_code = unsafe {\n        casper_contract_sdk_sys::casper_upgrade(\n            code_ptr,\n            code_size,\n            entry_point_ptr,\n            entry_point_size,\n            input_ptr,\n            input_size,\n        )\n    };\n    match call_result_from_code(result_code) {\n        Ok(()) => Ok(()),\n        Err(err) => Err(err),\n    }\n}\n\n/// Read from the global state into a vector.\npub fn read_into_vec(key: Keyspace) -> Result<Option<Vec<u8>>, CommonResult> {\n    let mut vec = Vec::new();\n    let out = read(key, |size| reserve_vec_space(&mut vec, size))?.map(|()| vec);\n    Ok(out)\n}\n\n/// Read from the global state into a vector.\npub fn has_state() -> Result<bool, CommonResult> {\n    // TODO: Host side optimized `casper_exists` to check if given entry exists in the global state.\n    let mut vec = Vec::new();\n    let read_info = read(Keyspace::State, |size| reserve_vec_space(&mut vec, size))?;\n    match read_info {\n        Some(()) => Ok(true),\n        None => Ok(false),\n    }\n}\n\n/// Read state from the global state.\npub fn read_state<T: Default + BorshDeserialize>() -> Result<T, CommonResult> {\n    let mut vec = Vec::new();\n    let read_info = read(Keyspace::State, |size| reserve_vec_space(&mut vec, size))?;\n    match read_info {\n        Some(()) => Ok(borsh::from_slice(&vec).unwrap()),\n        None => Ok(T::default()),\n    }\n}\n\n/// Write state to the global state.\npub fn write_state<T: BorshSerialize>(state: &T) -> Result<(), CommonResult> {\n    let new_state = borsh::to_vec(state).unwrap();\n    write(Keyspace::State, &new_state)?;\n    Ok(())\n}\n\n#[derive(Debug)]\npub struct CallResult<T: ToCallData> {\n    pub data: Option<Vec<u8>>,\n    pub result: Result<(), CallError>,\n    pub marker: PhantomData<T>,\n}\n\nimpl<T: ToCallData> CallResult<T> {\n    pub fn into_result<'a>(self) -> Result<T::Return<'a>, CallError>\n    where\n        <T as ToCallData>::Return<'a>: BorshDeserialize,\n    {\n        match self.result {\n            Ok(()) | Err(CallError::CalleeReverted) => {\n                let data = self.data.unwrap_or_default();\n                Ok(borsh::from_slice(&data).unwrap())\n            }\n            Err(call_error) => Err(call_error),\n        }\n    }\n\n    pub fn did_revert(&self) -> bool {\n        self.result == Err(CallError::CalleeReverted)\n    }\n}\n\n/// Call a contract.\npub fn call<T: ToCallData>(\n    contract_address: &Address,\n    transferred_value: u64,\n    call_data: T,\n) -> Result<CallResult<T>, CallError> {\n    let input_data = call_data.input_data().unwrap_or_default();\n\n    let (maybe_data, result_code) = casper_call(\n        contract_address,\n        transferred_value,\n        call_data.entry_point(),\n        &input_data,\n    );\n    match result_code {\n        Ok(()) | Err(CallError::CalleeReverted) => Ok(CallResult::<T> {\n            data: maybe_data,\n            result: result_code,\n            marker: PhantomData,\n        }),\n        Err(error) => Err(error),\n    }\n}\n\n/// Get the environment info.\npub fn get_env_info() -> EnvInfo {\n    let ret = {\n        let mut info = MaybeUninit::<EnvInfo>::uninit();\n\n        let ret = unsafe { casper_env_info(info.as_mut_ptr().cast(), size_of::<EnvInfo>() as u32) };\n        result_from_code(ret).map(|()| {\n            // SAFETY: The size of `EnvInfo` is known and the pointer is valid.\n            unsafe { info.assume_init() }\n        })\n    };\n\n    match ret {\n        Ok(info) => info,\n        Err(err) => panic!(\"Failed to get environment info: {:?}\", err),\n    }\n}\n\n/// Get the caller.\n#[must_use]\npub fn get_caller() -> Entity {\n    let info = get_env_info();\n    Entity::from_parts(info.caller_kind, info.caller_addr).expect(\"Invalid caller kind\")\n}\n\n#[must_use]\npub fn get_callee() -> Entity {\n    let info = get_env_info();\n    Entity::from_parts(info.callee_kind, info.callee_addr).expect(\"Invalid callee kind\")\n}\n\n/// Enum representing either an account or a contract.\n#[derive(\n    BorshSerialize, BorshDeserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord,\n)]\npub enum Entity {\n    Account([u8; 32]),\n    Contract([u8; 32]),\n}\n\nimpl Entity {\n    /// Get the tag of the entity.\n    #[must_use]\n    pub fn tag(&self) -> u32 {\n        match self {\n            Entity::Account(_) => 0,\n            Entity::Contract(_) => 1,\n        }\n    }\n\n    #[must_use]\n    pub fn from_parts(tag: u32, address: [u8; 32]) -> Option<Self> {\n        match tag {\n            0 => Some(Self::Account(address)),\n            1 => Some(Self::Contract(address)),\n            _ => None,\n        }\n    }\n\n    #[must_use]\n    pub fn address(&self) -> &Address {\n        match self {\n            Entity::Account(addr) | Entity::Contract(addr) => addr,\n        }\n    }\n}\n\nimpl CasperABI for Entity {\n    fn populate_definitions(definitions: &mut crate::abi::Definitions) {\n        definitions.populate_one::<[u8; 32]>();\n    }\n\n    fn declaration() -> crate::abi::Declaration {\n        \"Entity\".into()\n    }\n\n    fn definition() -> crate::abi::Definition {\n        crate::abi::Definition::Enum {\n            items: vec![\n                EnumVariant {\n                    name: \"Account\".into(),\n                    discriminant: 0,\n                    decl: <[u8; 32] as CasperABI>::declaration(),\n                },\n                EnumVariant {\n                    name: \"Contract\".into(),\n                    discriminant: 1,\n                    decl: <[u8; 32] as CasperABI>::declaration(),\n                },\n            ],\n        }\n    }\n}\n\n/// Get the balance of an account or contract.\n#[must_use]\npub fn get_balance_of(entity_kind: &Entity) -> u64 {\n    let (kind, addr) = match entity_kind {\n        Entity::Account(addr) => (0, addr),\n        Entity::Contract(addr) => (1, addr),\n    };\n    let mut output: MaybeUninit<u64> = MaybeUninit::uninit();\n    let ret = unsafe {\n        casper_contract_sdk_sys::casper_env_balance(\n            kind,\n            addr.as_ptr(),\n            addr.len(),\n            output.as_mut_ptr().cast(),\n        )\n    };\n    if ret == 1 {\n        unsafe { output.assume_init() }\n    } else {\n        0\n    }\n}\n\n/// Get the transferred token value passed to the contract.\n#[must_use]\npub fn transferred_value() -> u64 {\n    let info = get_env_info();\n    info.transferred_value\n}\n\n/// Transfer tokens from the current contract to another account or contract.\npub fn transfer(target_account: &Address, amount: u64) -> Result<(), CallError> {\n    let amount: *const c_void = &amount as *const _ as *const c_void;\n    let result_code = unsafe {\n        casper_contract_sdk_sys::casper_transfer(\n            target_account.as_ptr(),\n            target_account.len(),\n            amount,\n        )\n    };\n    call_result_from_code(result_code)\n}\n\n/// Get the current block time.\n#[inline]\npub fn get_block_time() -> u64 {\n    let info = get_env_info();\n    info.block_time\n}\n\n#[doc(hidden)]\npub fn emit_raw(topic: &str, payload: &[u8]) -> Result<(), CommonResult> {\n    let ret = unsafe {\n        casper_contract_sdk_sys::casper_emit(\n            topic.as_ptr(),\n            topic.len(),\n            payload.as_ptr(),\n            payload.len(),\n        )\n    };\n    result_from_code(ret)\n}\n\n/// Emit a message.\npub fn emit<M>(message: M) -> Result<(), CommonResult>\nwhere\n    M: Message,\n{\n    let topic = M::TOPIC;\n    let payload = message.payload();\n    emit_raw(topic, &payload)\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/cli/validation.rs",
    "content": "use thiserror::Error;\n\n#[derive(Debug, Error)]\n#[non_exhaustive]\npub enum Validation {\n    #[error(\"Contract does not have any entry points\")]\n    NoEntryPoints,\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/iterable_map.rs",
    "content": "use core::marker::PhantomData;\n\nuse borsh::{BorshDeserialize, BorshSerialize};\nuse bytes::BufMut;\nuse casper_executor_wasm_common::keyspace::Keyspace;\nuse const_fnv1a_hash::fnv1a_hash_64;\n\nuse crate::casper::{self, read_into_vec};\n\n/// A pointer that uniquely identifies a value written into the map.\n#[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Copy, PartialEq)]\npub struct IterableMapPtr {\n    /// The key hash\n    pub(crate) hash: u64,\n    /// In case of a collision, signifies the index of this element\n    /// in a bucket\n    pub(crate) index: u64,\n}\n\n/// Trait for types that can be used as keys in [IterableMap].\n/// Must produce a deterministic hash.\n///\n/// A blanket implementation is provided for all types that implement\n/// [BorshSerialize].\npub trait IterableMapHash: PartialEq + BorshSerialize + BorshDeserialize {\n    fn compute_hash(&self) -> u64 {\n        let mut bytes = Vec::new();\n        self.serialize(&mut bytes).unwrap();\n        fnv1a_hash_64(&bytes, None)\n    }\n}\n\n// No blanket IterableMapKey implementation. Explicit impls prevent conflicts with\n// user‑provided implementations; a blanket impl would forbid custom hashes.\nimpl IterableMapHash for u8 {}\nimpl IterableMapHash for u16 {}\nimpl IterableMapHash for u32 {}\nimpl IterableMapHash for u64 {}\nimpl IterableMapHash for u128 {}\nimpl IterableMapHash for i8 {}\nimpl IterableMapHash for i16 {}\nimpl IterableMapHash for i32 {}\nimpl IterableMapHash for i64 {}\nimpl IterableMapHash for i128 {}\nimpl IterableMapHash for String {}\n\n/// A map over global state that allows iteration. Each entry at key `K_n` stores `(K_{n}, V,\n/// K_{n-1})`, where `V` is the value and `K_{n-1}` is the key hash of the previous entry.\n///\n/// This creates a constant spatial overhead; every entry stores a pointer\n/// to the one inserted before it.\n///\n/// Enables iteration without a guaranteed ordering; updating an existing\n/// key does not affect position.\n///\n/// Under the hood, this is a singly-linked HashMap with linear probing for collision resolution.\n/// Supports full traversal, typically in reverse-insertion order.\n#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)]\n#[borsh(crate = \"crate::serializers::borsh\")]\npub struct IterableMap<K, V> {\n    pub(crate) prefix: String,\n\n    // Keys are hashed to u128 internally, but K is preserved to enforce type safety.\n    // While this map could accept arbitrary u128 keys, requiring a concrete K prevents\n    // misuse and clarifies intent at the type level.\n    pub(crate) tail_key_hash: Option<IterableMapPtr>,\n    _marker: PhantomData<(K, V)>,\n}\n\n/// Single entry in `IterableMap`. Stores the value and the hash of the previous entry's key.\n#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)]\n#[borsh(crate = \"crate::serializers::borsh\")]\npub struct IterableMapEntry<K, V> {\n    pub(crate) key: K,\n    pub(crate) value: Option<V>,\n    pub(crate) previous: Option<IterableMapPtr>,\n}\n\nimpl<K, V> IterableMap<K, V>\nwhere\n    K: IterableMapHash,\n    V: BorshSerialize + BorshDeserialize,\n{\n    /// Creates an empty [IterableMap] with the given prefix.\n    pub fn new<S: Into<String>>(prefix: S) -> Self {\n        Self {\n            prefix: prefix.into(),\n            tail_key_hash: None,\n            _marker: PhantomData,\n        }\n    }\n\n    /// Inserts a key-value pair into the map.\n    ///\n    /// If the map did not have this key present, `None` is returned.\n    ///\n    /// If the map did have this key present, the value is updated, and the old value is returned.\n    ///\n    /// This has an amortized complexity of O(1), with a worst-case of O(n) when running into\n    /// collisions.\n    pub fn insert(&mut self, key: K, value: V) -> Option<V> {\n        // Find an address we can write to\n        let (ptr, at_ptr) = self.get_writable_slot(&key);\n\n        // Either overwrite an existing entry, or create a new one.\n        let (entry_to_write, previous) = match at_ptr {\n            Some(mut entry) => {\n                if entry.value.is_none() {\n                    // Reuse tombstone as a new insertion\n                    entry.key = key;\n                    entry.previous = self.tail_key_hash;\n                    entry.value = Some(value);\n                    self.tail_key_hash = Some(ptr);\n                    (entry, None)\n                } else {\n                    // Overwrite an existing value\n                    let old = entry.value;\n                    entry.value = Some(value);\n                    (entry, old)\n                }\n            }\n            None => {\n                let entry = IterableMapEntry {\n                    key,\n                    value: Some(value),\n                    previous: self.tail_key_hash,\n                };\n\n                // Additionally, since this is a new entry, we need to update the tail\n                self.tail_key_hash = Some(ptr);\n\n                (entry, None)\n            }\n        };\n\n        // Write the entry and return previous value if it exists\n        let mut entry_bytes = Vec::new();\n        entry_to_write.serialize(&mut entry_bytes).unwrap();\n\n        let prefix = self.create_prefix_from_ptr(&ptr);\n        let keyspace = Keyspace::Context(&prefix);\n        casper::write(keyspace, &entry_bytes).unwrap();\n\n        previous\n    }\n\n    /// Returns a value corresponding to the key.\n    pub fn get(&self, key: &K) -> Option<V> {\n        // If a slot is writable, it implicitly belongs the key\n        let (_, at_ptr) = self.get_writable_slot(key);\n        at_ptr.and_then(|entry| entry.value)\n    }\n\n    /// Removes a key from the map. Returns the associated value if the key exists.\n    ///\n    /// Has a worst-case runtime of O(n).\n    pub fn remove(&mut self, key: &K) -> Option<V> {\n        // Find the entry for the key that we're about to remove.\n        let (to_remove_ptr, at_remove_ptr) = self.find_slot(key)?;\n\n        let to_remove_prefix = self.create_prefix_from_ptr(&to_remove_ptr);\n        let to_remove_context_key = Keyspace::Context(&to_remove_prefix);\n\n        // See if the removed entry is a part of a collision resolution chain\n        // by investigating its potential child.\n        let to_remove_ptr_child_prefix = self.create_prefix_from_ptr(&IterableMapPtr {\n            index: to_remove_ptr.index + 1,\n            ..to_remove_ptr\n        });\n        let to_remove_ptr_child_keyspace = Keyspace::Context(&to_remove_ptr_child_prefix);\n\n        if self.get_entry(to_remove_ptr_child_keyspace).is_some() {\n            // A child exists, so we need to retain this element to maintain\n            // collision resolution soundness. Instead of purging, mark as\n            // tombstone.\n            let tombstone = IterableMapEntry {\n                value: None,\n                ..at_remove_ptr\n            };\n\n            // Write the updated value\n            let mut entry_bytes = Vec::new();\n            tombstone.serialize(&mut entry_bytes).unwrap();\n            casper::write(to_remove_context_key, &entry_bytes).unwrap();\n        } else {\n            // There is no child, so we can safely purge this entry entirely.\n            casper::remove(to_remove_context_key).unwrap();\n        }\n\n        // Edge case when removing tail\n        if self.tail_key_hash == Some(to_remove_ptr) {\n            self.tail_key_hash = at_remove_ptr.previous;\n            return at_remove_ptr.value;\n        }\n\n        // Scan the map, find entry to remove, join adjacent entries\n        let mut current_hash = self.tail_key_hash;\n        while let Some(key) = current_hash {\n            let current_prefix = self.create_prefix_from_ptr(&key);\n            let current_context_key = Keyspace::Context(&current_prefix);\n            let mut current_entry = self.get_entry(current_context_key).unwrap();\n\n            // If there is no previous entry, then we've finished iterating.\n            //\n            // This shouldn't happen, as the outer logic prevents from running\n            // into such case, ie. we early exit if the entry to remove doesn't\n            // exist.\n            let Some(next_hash) = current_entry.previous else {\n                panic!(\"Unexpected end of IterableMap\");\n            };\n\n            // If the next entry is the one to be removed, repoint the current\n            // one to the one preceeding the one to remove.\n            if next_hash == to_remove_ptr {\n                // Advance current past the element to remove\n                current_entry.previous = at_remove_ptr.previous;\n\n                // Re-write the updated current entry\n                let mut entry_bytes = Vec::new();\n                current_entry.serialize(&mut entry_bytes).unwrap();\n                casper::write(current_context_key, &entry_bytes).unwrap();\n\n                return at_remove_ptr.value;\n            }\n\n            // Advance backwards\n            current_hash = current_entry.previous;\n        }\n\n        None\n    }\n\n    /// Clears the map, removing all key-value pairs.\n    pub fn clear(&mut self) {\n        for key in self.keys() {\n            let prefix = self.create_prefix_from_key(&key);\n            {\n                let key = Keyspace::Context(&prefix);\n                casper::remove(key).unwrap()\n            };\n        }\n\n        self.tail_key_hash = None;\n    }\n\n    /// Returns true if the map contains a value for the specified key.\n    pub fn contains_key(&self, key: &K) -> bool {\n        self.get(key).is_some()\n    }\n\n    /// Creates an iterator visiting all the values in arbitrary order.\n    pub fn keys(&self) -> impl Iterator<Item = K> + '_ {\n        self.iter().map(|(key, _)| key)\n    }\n\n    /// Creates an iterator visiting all the values in arbitrary order.\n    pub fn values(&self) -> impl Iterator<Item = V> + '_ {\n        self.iter().map(|(_, value)| value)\n    }\n\n    // Returns true if the map contains no elements.\n    pub fn is_empty(&self) -> bool {\n        self.tail_key_hash.is_none()\n    }\n\n    /// Returns an iterator over the entries in the map.\n    ///\n    /// Traverses entries in reverse-insertion order.\n    /// Each item is a tuple of the hashed key and the value.\n    pub fn iter(&self) -> IterableMapIter<K, V> {\n        IterableMapIter {\n            prefix: &self.prefix,\n            current: self.tail_key_hash,\n            _marker: PhantomData,\n        }\n    }\n\n    /// Returns the number of entries in the map.\n    ///\n    /// This is an O(n) operation.\n    pub fn len(&self) -> usize {\n        self.iter().count()\n    }\n\n    /// Find the slot containing key, if any.\n    fn find_slot(&self, key: &K) -> Option<(IterableMapPtr, IterableMapEntry<K, V>)> {\n        let mut bucket_ptr = self.create_root_ptr_from_key(key);\n\n        // Probe until we find either an existing slot, a tombstone or empty space.\n        // This should rarely iterate more than once assuming a solid hashing algorithm.\n        loop {\n            let prefix = self.create_prefix_from_ptr(&bucket_ptr);\n            let keyspace = Keyspace::Context(&prefix);\n\n            if let Some(entry) = self.get_entry(keyspace) {\n                // Existing value, check if the keys match\n                if entry.key == *key && entry.value.is_some() {\n                    // We have found a slot where this key lives, return it\n                    return Some((bucket_ptr, entry));\n                } else {\n                    // We found a slot for this key hash, but either the keys mismatch,\n                    // or it's vacant, so we need to probe further.\n                    bucket_ptr.index += 1;\n                    continue;\n                }\n            } else {\n                // We've reached empty address space, so the slot doesn't actually exist.\n                return None;\n            }\n        }\n    }\n\n    /// Find the next slot we can safely write to. This is either a slot already owned and\n    /// assigned to the key, a vacant tombstone, or empty memory.\n    fn get_writable_slot(&self, key: &K) -> (IterableMapPtr, Option<IterableMapEntry<K, V>>) {\n        let mut bucket_ptr = self.create_root_ptr_from_key(key);\n\n        // Probe until we find either an existing slot, a tombstone or empty space.\n        // This should rarely iterate more than once assuming a solid hashing algorithm.\n        loop {\n            let prefix = self.create_prefix_from_ptr(&bucket_ptr);\n            let keyspace = Keyspace::Context(&prefix);\n\n            if let Some(entry) = self.get_entry(keyspace) {\n                // Existing value, check if the keys match\n                if entry.key == *key {\n                    // We have found an existing slot for that key, return it\n                    return (bucket_ptr, Some(entry));\n                } else if entry.value.is_none() {\n                    // If the value is None, then this is a tombstone, and we\n                    // can write over it.\n                    return (bucket_ptr, Some(entry));\n                } else {\n                    // We found a slot for this key hash, but the keys mismatch,\n                    // and it's not vacant, so this is a collision and we need to\n                    // probe further.\n                    bucket_ptr.index += 1;\n                    continue;\n                }\n            } else {\n                // We've reached empty address space, so we can write here\n                return (bucket_ptr, None);\n            }\n        }\n    }\n\n    fn get_entry(&self, keyspace: Keyspace) -> Option<IterableMapEntry<K, V>> {\n        match read_into_vec(keyspace) {\n            Ok(Some(vec)) => {\n                let entry: IterableMapEntry<K, V> = borsh::from_slice(&vec).unwrap();\n                Some(entry)\n            }\n            Ok(None) => None,\n            Err(_) => None,\n        }\n    }\n\n    fn create_prefix_from_key(&self, key: &K) -> Vec<u8> {\n        let ptr = self.create_root_ptr_from_key(key);\n        self.create_prefix_from_ptr(&ptr)\n    }\n\n    fn create_root_ptr_from_key(&self, key: &K) -> IterableMapPtr {\n        IterableMapPtr {\n            hash: key.compute_hash(),\n            index: 0,\n        }\n    }\n\n    fn create_prefix_from_ptr(&self, hash: &IterableMapPtr) -> Vec<u8> {\n        let mut context_key = Vec::new();\n        context_key.extend(self.prefix.as_bytes());\n        context_key.extend(b\"_\");\n        context_key.put_u64_le(hash.hash);\n        context_key.extend(b\"_\");\n        context_key.put_u64_le(hash.index);\n        context_key\n    }\n}\n\n/// Iterator over entries in an [`IterableMap`].\n///\n/// Traverses the map in reverse-insertion order, following the internal\n/// linked structure via hashed key references [`u128`].\n///\n/// Yields a tuple (K, V), where the key is the hashed\n/// representation of the original key. The original key type `K` is not recoverable.\n///\n/// Each iteration step deserializes a single entry from storage.\n///\n/// This iterator performs no allocation beyond internal buffers,\n/// and deserialization errors are treated as iteration termination.\npub struct IterableMapIter<'a, K, V> {\n    prefix: &'a str,\n    current: Option<IterableMapPtr>,\n    _marker: PhantomData<(K, V)>,\n}\n\nimpl<'a, K, V> IntoIterator for &'a IterableMap<K, V>\nwhere\n    K: BorshDeserialize,\n    V: BorshDeserialize,\n{\n    type Item = (K, V);\n    type IntoIter = IterableMapIter<'a, K, V>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        IterableMapIter {\n            prefix: &self.prefix,\n            current: self.tail_key_hash,\n            _marker: PhantomData,\n        }\n    }\n}\n\nimpl<K, V> Iterator for IterableMapIter<'_, K, V>\nwhere\n    K: BorshDeserialize,\n    V: BorshDeserialize,\n{\n    type Item = (K, V);\n\n    fn next(&mut self) -> Option<Self::Item> {\n        let current_hash = self.current?;\n        let mut key_bytes = Vec::new();\n        key_bytes.extend(self.prefix.as_bytes());\n        key_bytes.extend(b\"_\");\n        key_bytes.put_u64_le(current_hash.hash);\n        key_bytes.extend(b\"_\");\n        key_bytes.put_u64_le(current_hash.index);\n\n        let context_key = Keyspace::Context(&key_bytes);\n\n        match read_into_vec(context_key) {\n            Ok(Some(vec)) => {\n                let entry: IterableMapEntry<K, V> = borsh::from_slice(&vec).unwrap();\n                self.current = entry.previous;\n                Some((\n                    entry.key,\n                    entry\n                        .value\n                        .expect(\"Tombstone values should be unlinked on removal\"),\n                ))\n            }\n            Ok(None) => None,\n            Err(_) => None,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::casper::native::dispatch;\n\n    const TEST_MAP_PREFIX: &str = \"test_map\";\n\n    #[test]\n    fn insert_and_get() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            assert_eq!(map.len(), 0);\n\n            assert_eq!(map.get(&1), None);\n\n            map.insert(1, \"a\".to_string());\n            assert_eq!(map.len(), 1);\n\n            assert_eq!(map.get(&1), Some(\"a\".to_string()));\n\n            map.insert(2, \"b\".to_string());\n            assert_eq!(map.len(), 2);\n\n            assert_eq!(map.get(&2), Some(\"b\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn overwrite_existing_key() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n\n            assert_eq!(map.insert(1, \"a\".to_string()), None);\n            assert_eq!(map.insert(1, \"b\".to_string()), Some(\"a\".to_string()));\n            assert_eq!(map.get(&1), Some(\"b\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn remove_tail_entry() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            assert_eq!(map.len(), 0);\n            map.insert(1, \"a\".to_string());\n            assert_eq!(map.len(), 1);\n            map.insert(2, \"b\".to_string());\n            assert_eq!(map.len(), 2);\n            assert_eq!(map.remove(&2), Some(\"b\".to_string()));\n            assert_eq!(map.len(), 1);\n            assert_eq!(map.get(&2), None);\n            assert_eq!(map.get(&1), Some(\"a\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn remove_middle_entry() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            assert_eq!(map.len(), 0);\n\n            map.insert(1, \"a\".to_string());\n            assert_eq!(map.len(), 1);\n\n            map.insert(2, \"b\".to_string());\n            assert_eq!(map.len(), 2);\n\n            map.insert(3, \"c\".to_string());\n            assert_eq!(map.len(), 3);\n\n            assert_eq!(map.remove(&2), Some(\"b\".to_string()));\n            assert_eq!(map.len(), 2);\n\n            assert_eq!(map.get(&2), None);\n            assert_eq!(map.get(&1), Some(\"a\".to_string()));\n            assert_eq!(map.get(&3), Some(\"c\".to_string()));\n\n            assert_eq!(map.len(), 2);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn remove_nonexistent_key_does_nothing() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n\n            map.insert(1, \"a\".to_string());\n\n            assert_eq!(map.remove(&999), None);\n            assert_eq!(map.get(&1), Some(\"a\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn iterates_all_entries_in_reverse_insertion_order() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            map.insert(3, \"c\".to_string());\n\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(\n                values,\n                vec![\"c\".to_string(), \"b\".to_string(), \"a\".to_string(),]\n            );\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn iteration_skips_deleted_entries() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            map.insert(3, \"c\".to_string());\n\n            map.remove(&2);\n\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"c\".to_string(), \"a\".to_string(),]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn empty_map_behaves_sanely() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n\n            assert_eq!(map.get(&1), None);\n            assert_eq!(map.remove(&1), None);\n            assert_eq!(map.iter().count(), 0);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn separate_maps_do_not_conflict() {\n        dispatch(|| {\n            let mut map1 = IterableMap::<u64, String>::new(\"map1\");\n            let mut map2 = IterableMap::<u64, String>::new(\"map2\");\n\n            map1.insert(1, \"a\".to_string());\n            map2.insert(1, \"b\".to_string());\n\n            assert_eq!(map1.get(&1), Some(\"a\".to_string()));\n            assert_eq!(map2.get(&1), Some(\"b\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn insert_same_value_under_different_keys() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n\n            map.insert(1, \"shared\".to_string());\n            map.insert(2, \"shared\".to_string());\n\n            assert_eq!(map.get(&1), Some(\"shared\".to_string()));\n            assert_eq!(map.get(&2), Some(\"shared\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn clear_removes_all_entries() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            map.clear();\n            assert!(map.is_empty());\n            assert_eq!(map.iter().count(), 0);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn keys_returns_reverse_insertion_order() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            let hashes: Vec<_> = map.keys().collect();\n            assert_eq!(hashes, vec![2, 1]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn values_returns_values_in_reverse_insertion_order() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"b\".to_string(), \"a\".to_string()]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn contains_key_returns_correctly() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            assert!(!map.contains_key(&1));\n            map.insert(1, \"a\".to_string());\n            assert!(map.contains_key(&1));\n            map.remove(&1);\n            assert!(!map.contains_key(&1));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn multiple_removals_and_insertions() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            map.insert(3, \"c\".to_string());\n            map.remove(&2);\n            assert_eq!(map.get(&2), None);\n            assert_eq!(map.get(&1), Some(\"a\".to_string()));\n            assert_eq!(map.get(&3), Some(\"c\".to_string()));\n\n            map.insert(4, \"d\".to_string());\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"d\", \"c\", \"a\"]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn struct_as_key() {\n        #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, PartialEq, Eq)]\n        struct TestKey {\n            id: u64,\n            name: String,\n        }\n\n        impl IterableMapHash for TestKey {}\n\n        dispatch(|| {\n            let key1 = TestKey {\n                id: 1,\n                name: \"Key1\".to_string(),\n            };\n            let key2 = TestKey {\n                id: 2,\n                name: \"Key2\".to_string(),\n            };\n            let mut map = IterableMap::<TestKey, String>::new(TEST_MAP_PREFIX);\n\n            map.insert(key1.clone(), \"a\".to_string());\n            map.insert(key2.clone(), \"b\".to_string());\n\n            assert_eq!(map.get(&key1), Some(\"a\".to_string()));\n            assert_eq!(map.get(&key2), Some(\"b\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn remove_middle_of_long_chain() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            map.insert(3, \"c\".to_string());\n            map.insert(4, \"d\".to_string());\n            map.insert(5, \"e\".to_string());\n\n            // The order is 5,4,3,2,1\n            map.remove(&3); // Remove the middle entry\n\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"e\", \"d\", \"b\", \"a\"]);\n\n            // Check that entry 4's previous is now 2's hash\n            let ptr4 = map.create_root_ptr_from_key(&4u64);\n            let prefix = map.create_prefix_from_ptr(&ptr4);\n            let entry = map.get_entry(Keyspace::Context(&prefix)).unwrap();\n            assert_eq!(entry.previous, Some(map.create_root_ptr_from_key(&2u64)));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn insert_after_remove_updates_head() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            map.remove(&2);\n            map.insert(3, \"c\".to_string());\n\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"c\", \"a\"]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn reinsert_removed_key() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.remove(&1);\n            map.insert(1, \"b\".to_string());\n\n            assert_eq!(map.get(&1), Some(\"b\".to_string()));\n            assert_eq!(map.iter().next().unwrap().1, \"b\".to_string());\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn iteration_reflects_modifications() {\n        dispatch(|| {\n            let mut map = IterableMap::<u64, String>::new(TEST_MAP_PREFIX);\n            map.insert(1, \"a\".to_string());\n            map.insert(2, \"b\".to_string());\n            let mut iter = map.iter();\n            assert_eq!(iter.next().unwrap().1, \"b\".to_string());\n\n            map.remove(&2);\n            map.insert(3, \"c\".to_string());\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"c\", \"a\"]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn unit_struct_as_key() {\n        #[derive(BorshSerialize, BorshDeserialize, PartialEq)]\n        struct UnitKey;\n\n        impl IterableMapHash for UnitKey {}\n\n        dispatch(|| {\n            let mut map = IterableMap::<UnitKey, String>::new(TEST_MAP_PREFIX);\n            map.insert(UnitKey, \"value\".to_string());\n            assert_eq!(map.get(&UnitKey), Some(\"value\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]\n    struct CollidingKey(u64, u64);\n\n    impl IterableMapHash for CollidingKey {\n        fn compute_hash(&self) -> u64 {\n            let mut bytes = Vec::new();\n            // Only serialize first field for hash computation\n            self.0.serialize(&mut bytes).unwrap();\n            fnv1a_hash_64(&bytes, None)\n        }\n    }\n\n    #[test]\n    fn basic_collision_handling() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            // Both keys will have same hash but different actual keys\n            let k1 = CollidingKey(42, 1);\n            let k2 = CollidingKey(42, 2);\n\n            map.insert(k1.clone(), \"first\".to_string());\n            map.insert(k2.clone(), \"second\".to_string());\n\n            assert_eq!(map.get(&k1), Some(\"first\".to_string()));\n            assert_eq!(map.get(&k2), Some(\"second\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn tombstone_handling() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            let k1 = CollidingKey(42, 1);\n            let k2 = CollidingKey(42, 2);\n            let k3 = CollidingKey(42, 3);\n\n            map.insert(k1.clone(), \"first\".to_string());\n            map.insert(k2.clone(), \"second\".to_string());\n            map.insert(k3.clone(), \"third\".to_string());\n\n            // Remove middle entry\n            assert_eq!(map.remove(&k2), Some(\"second\".to_string()));\n\n            // Verify tombstone state\n            let (_, entry) = map.get_writable_slot(&k2);\n            assert!(entry.unwrap().value.is_none());\n\n            // Verify chain integrity\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"third\", \"first\"]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn tombstone_reuse() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            let k1 = CollidingKey(42, 1);\n            let k2 = CollidingKey(42, 2);\n\n            map.insert(k1.clone(), \"first\".to_string());\n            map.insert(k2.clone(), \"second\".to_string());\n\n            // Removing k1 while k2 exists guarantees k1 turns into\n            // a tombstone\n            map.remove(&k1);\n\n            // Reinsert into tombstone slot\n            map.insert(k1.clone(), \"reused\".to_string());\n\n            assert_eq!(map.get(&k1), Some(\"reused\".to_string()));\n            assert_eq!(map.get(&k2), Some(\"second\".to_string()));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn full_deletion_handling() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            let k1 = CollidingKey(42, 1);\n            map.insert(k1.clone(), \"lonely\".to_string());\n\n            assert_eq!(map.remove(&k1), Some(\"lonely\".to_string()));\n\n            // Verify complete removal\n            let (_, entry) = map.get_writable_slot(&k1);\n            assert!(entry.is_none());\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn collision_chain_iteration() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            let keys = [\n                CollidingKey(42, 1),\n                CollidingKey(42, 2),\n                CollidingKey(42, 3),\n            ];\n\n            for (i, k) in keys.iter().enumerate() {\n                map.insert(k.clone(), format!(\"value-{}\", i));\n            }\n\n            // Remove middle entry\n            map.remove(&keys[1]);\n\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"value-2\", \"value-0\"]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn complex_collision_chain() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            // Create 5 colliding keys\n            let keys: Vec<_> = (0..5).map(|i| CollidingKey(42, i)).collect();\n\n            // Insert all\n            for k in &keys {\n                map.insert(k.clone(), format!(\"{}\", k.1));\n            }\n\n            // Remove even indexes\n            for k in keys.iter().step_by(2) {\n                map.remove(k);\n            }\n\n            // Insert new values\n            map.insert(keys[0].clone(), \"reinserted\".to_string());\n            map.insert(CollidingKey(42, 5), \"new\".to_string());\n\n            // Verify final state\n            let expected = vec![\n                (\"new\".to_string(), 5),\n                (\"reinserted\".to_string(), 0),\n                (\"3\".to_string(), 3),\n                (\"1\".to_string(), 1),\n            ];\n\n            let results: Vec<_> = map.iter().map(|(k, v)| (v, k.1)).collect();\n\n            assert_eq!(results, expected);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn cross_bucket_reference() {\n        dispatch(|| {\n            let mut map = IterableMap::<CollidingKey, String>::new(TEST_MAP_PREFIX);\n\n            // Create keys with different hashes but chained references\n            let k1 = CollidingKey(1, 0);\n            let k2 = CollidingKey(2, 0);\n            let k3 = CollidingKey(1, 1); // Collides with k1\n\n            map.insert(k1.clone(), \"first\".to_string());\n            map.insert(k2.clone(), \"second\".to_string());\n            map.insert(k3.clone(), \"third\".to_string());\n\n            // Remove k2 which is referenced by k3\n            map.remove(&k2);\n\n            // Verify iteration skips removed entry\n            let values: Vec<_> = map.values().collect();\n            assert_eq!(values, vec![\"third\", \"first\"]);\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/iterable_set.rs",
    "content": "use borsh::{BorshDeserialize, BorshSerialize};\n\nuse super::{IterableMap, IterableMapHash};\n\n/// An iterable set backed by a map.\npub struct IterableSet<V> {\n    pub(crate) map: IterableMap<V, ()>,\n}\n\nimpl<V: IterableMapHash + BorshSerialize + BorshDeserialize + Clone> IterableSet<V> {\n    /// Creates an empty [IterableMap] with the given prefix.\n    pub fn new<S: Into<String>>(prefix: S) -> Self {\n        Self {\n            map: IterableMap::new(prefix),\n        }\n    }\n\n    /// Inserts a value into the set.\n    pub fn insert(&mut self, value: V) {\n        self.map.insert(value, ());\n    }\n\n    /// Removes a value from the set.\n    ///\n    /// Has a worst-case runtime of O(n).\n    pub fn remove(&mut self, value: &V) {\n        self.map.remove(value);\n    }\n\n    /// Returns true if the set contains a value.\n    pub fn contains(&self, value: &V) -> bool {\n        self.map.get(value).is_some()\n    }\n\n    /// Creates an iterator visiting all the values in arbitrary order.\n    pub fn iter(&self) -> impl Iterator<Item = V> + '_ {\n        self.map.iter().map(|(value, _)| value)\n    }\n\n    // Returns true if the set contains no elements.\n    pub fn is_empty(&self) -> bool {\n        self.map.is_empty()\n    }\n\n    /// Clears the set, removing all values.\n    pub fn clear(&mut self) {\n        self.map.clear();\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::casper::native::dispatch;\n    use borsh::{BorshDeserialize, BorshSerialize};\n\n    #[test]\n    fn basic_insert_contains() {\n        dispatch(|| {\n            let mut set = IterableSet::new(\"test_set\");\n            assert!(!set.contains(&1));\n\n            set.insert(1);\n            assert!(set.contains(&1));\n\n            set.insert(2);\n            assert!(set.contains(&2));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn remove_elements() {\n        dispatch(|| {\n            let mut set = IterableSet::new(\"test_set\");\n            set.insert(1);\n            set.insert(2);\n\n            set.remove(&1);\n            assert!(!set.contains(&1));\n            assert!(set.contains(&2));\n\n            set.remove(&2);\n            assert!(set.is_empty());\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn iterator_order_and_contents() {\n        dispatch(|| {\n            let mut set = IterableSet::new(\"test_set\");\n            set.insert(1);\n            set.insert(2);\n            set.insert(3);\n\n            let mut items: Vec<_> = set.iter().collect();\n            items.sort();\n            assert_eq!(items, vec![1, 2, 3]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn clear_functionality() {\n        dispatch(|| {\n            let mut set = IterableSet::new(\"test_set\");\n            set.insert(1);\n            set.insert(2);\n\n            assert!(!set.is_empty());\n            set.clear();\n            assert!(set.is_empty());\n            assert_eq!(set.iter().count(), 0);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn multiple_sets_independence() {\n        dispatch(|| {\n            let mut set1 = IterableSet::new(\"set1\");\n            let mut set2 = IterableSet::new(\"set2\");\n\n            set1.insert(1);\n            set2.insert(1);\n\n            assert!(set1.contains(&1));\n            assert!(set2.contains(&1));\n\n            set1.remove(&1);\n            assert!(!set1.contains(&1));\n            assert!(set2.contains(&1));\n        })\n        .unwrap();\n    }\n\n    #[derive(BorshSerialize, BorshDeserialize, Clone, Debug, PartialEq)]\n    struct TestStruct {\n        field1: u64,\n        field2: String,\n    }\n\n    impl IterableMapHash for TestStruct {}\n\n    #[test]\n    fn struct_values() {\n        dispatch(|| {\n            let val1 = TestStruct {\n                field1: 1,\n                field2: \"a\".to_string(),\n            };\n            let val2 = TestStruct {\n                field1: 2,\n                field2: \"b\".to_string(),\n            };\n\n            let mut set = IterableSet::new(\"test_set\");\n            set.insert(val1.clone());\n            set.insert(val2.clone());\n\n            assert!(set.contains(&val1));\n            assert!(set.contains(&val2));\n\n            let mut collected: Vec<_> = set.iter().collect();\n            collected.sort_by(|a, b| a.field1.cmp(&b.field1));\n            assert_eq!(collected, vec![val1, val2]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn duplicate_insertions() {\n        dispatch(|| {\n            let mut set = IterableSet::new(\"test_set\");\n            set.insert(1);\n            set.insert(1); // Should be no-op\n\n            assert_eq!(set.iter().count(), 1);\n            set.remove(&1);\n            assert!(set.is_empty());\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn empty_set_behavior() {\n        dispatch(|| {\n            let set = IterableSet::<u64>::new(\"test_set\");\n            assert!(set.is_empty());\n            assert_eq!(set.iter().count(), 0);\n\n            let mut set = set;\n            set.remove(&999); // Shouldn't panic\n            assert!(set.is_empty());\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn complex_operations_sequence() {\n        dispatch(|| {\n            let mut set = IterableSet::new(\"test_set\");\n            set.insert(1);\n            set.insert(2);\n            set.remove(&1);\n            set.insert(3);\n            set.clear();\n            set.insert(4);\n\n            let items: Vec<_> = set.iter().collect();\n            assert_eq!(items, vec![4]);\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/lookup_key.rs",
    "content": "use borsh::BorshSerialize;\n\npub trait LookupKey<'a>: Default {\n    type Output: AsRef<[u8]> + 'a;\n    fn lookup<T: BorshSerialize>(&self, prefix: &'a [u8], key: &T) -> Self::Output;\n}\n\npub trait LookupKeyOwned: for<'a> LookupKey<'a> {}\nimpl<T> LookupKeyOwned for T where T: for<'a> LookupKey<'a> {}\n\n#[derive(Default)]\npub struct Identity;\nimpl<'a> LookupKey<'a> for Identity {\n    type Output = &'a [u8];\n\n    #[inline(always)]\n    fn lookup<T: BorshSerialize>(&self, prefix: &'a [u8], _key: &T) -> Self::Output {\n        prefix\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn identity_should_work() {\n        let identity = Identity;\n        let prefix = b\"foo\";\n        let key = 123u64;\n        assert_eq!(identity.lookup(prefix, &key), prefix);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/map.rs",
    "content": "use crate::{\n    abi::{CasperABI, Declaration, Definition, StructField},\n    casper::{self, read_into_vec},\n    serializers::borsh::{BorshDeserialize, BorshSerialize},\n};\nuse casper_executor_wasm_common::keyspace::Keyspace;\nuse const_fnv1a_hash::fnv1a_hash_str_64;\n\nuse crate::prelude::marker::PhantomData;\n\n#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)]\n#[borsh(crate = \"crate::serializers::borsh\")]\npub struct Map<K, V> {\n    pub(crate) name: String,\n    pub(crate) _marker: PhantomData<(K, V)>,\n}\n\n/// Computes the prefix for a given key.\n#[allow(dead_code)]\npub(crate) const fn compute_prefix(input: &str) -> [u8; 8] {\n    let hash = fnv1a_hash_str_64(input);\n    hash.to_le_bytes()\n}\n\nimpl<K, V> Map<K, V>\nwhere\n    K: BorshSerialize,\n    V: BorshSerialize + BorshDeserialize,\n{\n    pub fn new<S: Into<String>>(name: S) -> Self {\n        Self {\n            name: name.into(),\n            _marker: PhantomData,\n        }\n    }\n\n    pub fn insert(&mut self, key: &K, value: &V) {\n        let mut context_key = Vec::new();\n        context_key.extend(self.name.as_bytes());\n        // NOTE: We may want to create new keyspace for a hashed context element to avoid hashing in\n        // the wasm.\n        key.serialize(&mut context_key).unwrap();\n        let prefix = Keyspace::Context(&context_key);\n        casper::write(prefix, &borsh::to_vec(value).unwrap()).unwrap();\n    }\n\n    pub fn remove(&mut self, key: &K) {\n        let prefix_bytes = self.compute_prefix_for_key(key);\n        let prefix = Keyspace::Context(&prefix_bytes);\n        casper::remove(prefix).unwrap();\n    }\n\n    pub fn get(&self, key: &K) -> Option<V> {\n        let mut key_bytes = self.name.as_bytes().to_owned();\n        key.serialize(&mut key_bytes).unwrap();\n        let prefix = Keyspace::Context(&key_bytes);\n        read_into_vec(prefix)\n            .unwrap()\n            .map(|vec| borsh::from_slice(&vec).unwrap())\n    }\n\n    fn compute_prefix_for_key(&self, key: &K) -> Vec<u8> {\n        let mut context_key = Vec::new();\n        context_key.extend(self.name.as_bytes());\n        key.serialize(&mut context_key).unwrap();\n        context_key\n    }\n}\n\nimpl<K: CasperABI, V: CasperABI> CasperABI for Map<K, V> {\n    fn populate_definitions(definitions: &mut crate::abi::Definitions) {\n        definitions.populate_one::<K>();\n        definitions.populate_one::<V>();\n    }\n\n    fn declaration() -> Declaration {\n        format!(\"Map<{}, {}>\", K::declaration(), V::declaration())\n    }\n    #[inline]\n    fn definition() -> Definition {\n        Definition::Struct {\n            items: vec![StructField {\n                name: \"prefix\".into(),\n                decl: u64::declaration(),\n            }],\n        }\n    }\n}\n\n#[cfg(test)]\npub(crate) mod tests {\n    use super::*;\n\n    #[test]\n    fn test_compute_prefix() {\n        let prefix = compute_prefix(\"hello\");\n        assert_eq!(prefix.as_slice(), &[11, 189, 170, 128, 70, 216, 48, 164]);\n        let back = u64::from_le_bytes(prefix);\n        assert_eq!(fnv1a_hash_str_64(\"hello\"), back);\n    }\n\n    #[ignore]\n    #[test]\n    fn test_map() {\n        let mut map = Map::<u64, u64>::new(\"test\");\n        map.insert(&1, &2);\n        assert_eq!(map.get(&1), Some(2));\n        assert_eq!(map.get(&2), None);\n        map.insert(&2, &3);\n        assert_eq!(map.get(&1), Some(2));\n        assert_eq!(map.get(&2), Some(3));\n\n        let mut map = Map::<u64, u64>::new(\"test2\");\n        assert_eq!(map.get(&1), None);\n        map.insert(&1, &22);\n        assert_eq!(map.get(&1), Some(22));\n        assert_eq!(map.get(&2), None);\n        map.insert(&2, &33);\n        assert_eq!(map.get(&1), Some(22));\n        assert_eq!(map.get(&2), Some(33));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/set.rs",
    "content": "use crate::prelude::marker::PhantomData;\n\nuse crate::{casper, serializers::borsh::BorshSerialize};\nuse casper_executor_wasm_common::keyspace::Keyspace;\n\nuse super::lookup_key::{Identity, LookupKey, LookupKeyOwned};\n\n#[derive(Clone)]\npub struct Set<T, L = Identity>\nwhere\n    T: BorshSerialize,\n{\n    prefix: String,\n    lookup: L,\n    _marker: PhantomData<T>,\n}\n\nimpl<T, L> Set<T, L>\nwhere\n    T: BorshSerialize,\n    L: LookupKeyOwned,\n    for<'a> <L as LookupKey<'a>>::Output: AsRef<[u8]>,\n{\n    pub fn new(prefix: String) -> Self {\n        Self {\n            prefix,\n            lookup: L::default(),\n            _marker: PhantomData,\n        }\n    }\n\n    pub fn insert(&mut self, key: T) {\n        let lookup_key = self.lookup.lookup(self.prefix.as_bytes(), &key);\n        casper::write(Keyspace::Context(lookup_key.as_ref()), &[]).unwrap();\n    }\n\n    pub fn contains_key(&self, key: T) -> bool {\n        let lookup_key = self.lookup.lookup(self.prefix.as_bytes(), &key);\n        let entry = casper::read(Keyspace::Context(lookup_key.as_ref()), |_size| None).unwrap();\n        entry.is_some()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::prelude::*;\n\n    use crate::serializers::borsh::BorshSerialize;\n\n    use super::Set;\n\n    #[derive(BorshSerialize)]\n    #[borsh(crate = \"crate::serializers::borsh\")]\n    pub enum Flag {\n        A,\n        B,\n        C,\n    }\n\n    #[ignore]\n    #[test]\n    fn should_insert() {\n        let mut set: Set<Flag> = Set::new(\"Prefix\".to_string());\n\n        assert!(!set.contains_key(Flag::A));\n        assert!(!set.contains_key(Flag::B));\n        assert!(!set.contains_key(Flag::C));\n\n        set.insert(Flag::A);\n        assert!(set.contains_key(Flag::A));\n\n        set.insert(Flag::B);\n        assert!(set.contains_key(Flag::B));\n\n        set.insert(Flag::C);\n        assert!(set.contains_key(Flag::C));\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/sorted_vector.rs",
    "content": "use crate::serializers::borsh::{BorshDeserialize, BorshSerialize};\n\nuse crate::abi::CasperABI;\n\nuse super::Vector;\n\n#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)]\n#[borsh(crate = \"crate::serializers::borsh\")]\npub struct SortedVector<T: Ord> {\n    vector: Vector<T>,\n}\n\nimpl<T: Ord + CasperABI> CasperABI for SortedVector<T> {\n    fn populate_definitions(definitions: &mut crate::abi::Definitions) {\n        T::populate_definitions(definitions)\n    }\n\n    fn declaration() -> crate::abi::Declaration {\n        format!(\"SortedVector<{}>\", T::declaration())\n    }\n\n    fn definition() -> crate::abi::Definition {\n        crate::abi::Definition::Struct {\n            items: vec![\n                crate::abi::StructField {\n                    name: \"prefix\".into(),\n                    decl: String::declaration(),\n                },\n                crate::abi::StructField {\n                    name: \"length\".into(),\n                    decl: u64::declaration(),\n                },\n            ],\n        }\n    }\n}\n\nimpl<T> SortedVector<T>\nwhere\n    T: BorshSerialize + BorshDeserialize + Ord,\n{\n    pub fn new<S: Into<String>>(prefix: S) -> Self {\n        Self {\n            vector: Vector::new(prefix),\n        }\n    }\n\n    pub fn push(&mut self, value: T) {\n        let pos = self.vector.binary_search(&value).unwrap_or_else(|e| e);\n        self.vector.insert(pos, value);\n    }\n\n    pub fn remove(&mut self, index: u64) -> Option<T> {\n        self.vector.remove(index)\n    }\n\n    #[inline]\n    pub fn contains(&self, value: &T) -> bool {\n        self.vector.binary_search(value).is_ok()\n    }\n\n    #[inline(always)]\n    pub fn get(&self, index: u64) -> Option<T> {\n        self.vector.get(index)\n    }\n\n    #[inline(always)]\n    pub fn iter(&self) -> impl Iterator<Item = T> + '_ {\n        self.vector.iter()\n    }\n\n    #[inline(always)]\n    pub fn len(&self) -> u64 {\n        self.vector.len()\n    }\n\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        self.vector.is_empty()\n    }\n\n    #[inline(always)]\n    pub fn retain<F>(&mut self, f: F)\n    where\n        F: FnMut(&T) -> bool,\n    {\n        self.vector.retain(f);\n    }\n}\n\n#[cfg(all(test, feature = \"std\"))]\nmod tests {\n    use crate::casper::native::dispatch;\n\n    use super::*;\n\n    #[test]\n    fn test_sorted_vector() {\n        dispatch(|| {\n            let mut sorted_vector = SortedVector::new(\"sorted_vector\");\n\n            sorted_vector.push(2);\n            sorted_vector.push(1);\n            sorted_vector.push(3);\n            sorted_vector.push(0);\n            sorted_vector.push(0);\n            sorted_vector.push(3);\n\n            assert!(sorted_vector.contains(&0));\n            assert!(sorted_vector.contains(&2));\n            assert!(!sorted_vector.contains(&15));\n\n            let vec_1: Vec<_> = sorted_vector.iter().collect();\n            assert_eq!(vec_1, vec![0, 0, 1, 2, 3, 3]);\n\n            sorted_vector.remove(2);\n\n            let vec_2: Vec<_> = sorted_vector.iter().collect();\n            assert_eq!(vec_2, vec![0, 0, 2, 3, 3]);\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections/vector.rs",
    "content": "use crate::{\n    abi::{CasperABI, Declaration, Definition, Definitions, StructField},\n    casper::{self, read_into_vec},\n    prelude::{cmp::Ordering, marker::PhantomData},\n    serializers::borsh::{BorshDeserialize, BorshSerialize},\n};\n\nuse casper_executor_wasm_common::keyspace::Keyspace;\n\n#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)]\n#[borsh(crate = \"crate::serializers::borsh\")]\npub struct Vector<T> {\n    pub(crate) prefix: String,\n    pub(crate) length: u64,\n    pub(crate) _marker: PhantomData<T>,\n}\n\nimpl<T: CasperABI> CasperABI for Vector<T> {\n    fn populate_definitions(_definitions: &mut Definitions) {}\n\n    fn declaration() -> Declaration {\n        format!(\"Vector<{}>\", T::declaration())\n    }\n\n    fn definition() -> Definition {\n        Definition::Struct {\n            items: vec![\n                StructField {\n                    name: \"prefix\".into(),\n                    decl: String::declaration(),\n                },\n                StructField {\n                    name: \"length\".into(),\n                    decl: u64::declaration(),\n                },\n            ],\n        }\n    }\n}\n\nimpl<T> Vector<T>\nwhere\n    T: BorshSerialize + BorshDeserialize,\n{\n    /// Constructs a new, empty [`Vector<T>`].\n    ///\n    /// The vector header will not write itself to the GS, even if\n    /// values are pushed onto it later.\n    pub fn new<S: Into<String>>(prefix: S) -> Self {\n        Self {\n            prefix: prefix.into(),\n            length: 0,\n            _marker: PhantomData,\n        }\n    }\n\n    /// Appends an element to the back of a collection.\n    pub fn push(&mut self, value: T) {\n        let prefix_bytes = self.compute_prefix_bytes_for_index(self.length);\n        let prefix = Keyspace::Context(&prefix_bytes);\n        casper::write(prefix, &borsh::to_vec(&value).unwrap()).unwrap();\n        self.length += 1;\n    }\n\n    /// Removes the last element from a vector and returns it, or None if it is empty.\n    pub fn pop(&mut self) -> Option<T> {\n        if self.is_empty() {\n            return None;\n        }\n        self.swap_remove(self.len() - 1)\n    }\n\n    /// Returns true if the slice contains an element with the given value.\n    ///\n    /// This operation is O(n).\n    pub fn contains(&self, value: &T) -> bool\n    where\n        T: PartialEq,\n    {\n        self.iter().any(|v| v == *value)\n    }\n\n    /// Returns an element at index, deserialized.\n    pub fn get(&self, index: u64) -> Option<T> {\n        let prefix = self.compute_prefix_bytes_for_index(index);\n        let item_keyspace = Keyspace::Context(&prefix);\n        read_into_vec(item_keyspace)\n            .unwrap()\n            .map(|vec| borsh::from_slice(&vec).unwrap())\n    }\n\n    /// Returns an iterator over self, with elements deserialized.\n    pub fn iter(&self) -> impl Iterator<Item = T> + '_ {\n        (0..self.length).map(move |i| self.get(i).unwrap())\n    }\n\n    /// Inserts an element at position `index` within the vector, shifting all elements after it to\n    /// the right.\n    pub fn insert(&mut self, index: u64, value: T) {\n        assert!(index <= self.length, \"index out of bounds\");\n\n        // Shift elements to the right\n        for i in (index..self.length).rev() {\n            if let Some(src_value) = self.get(i) {\n                self.write(i + 1, src_value);\n            }\n        }\n\n        // Write the new value at the specified index\n        self.write(index, value);\n\n        self.length += 1;\n    }\n\n    /// Clears the vector, removing all values from the global state.\n    /// This is potentially expensive, as it requires an iteration over all elements to remove them\n    /// from the global state.\n    pub fn clear(&mut self) {\n        for i in 0..self.length {\n            let prefix_bytes = self.compute_prefix_bytes_for_index(i);\n            let item_keyspace = Keyspace::Context(&prefix_bytes);\n            casper::remove(item_keyspace).unwrap();\n        }\n        self.length = 0;\n    }\n\n    /// Returns the number of elements in the vector, also referred to as its ‘length’.\n    #[inline(always)]\n    pub fn len(&self) -> u64 {\n        self.length\n    }\n\n    /// Returns `true` if the vector contains no elements.\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        self.length == 0\n    }\n\n    /// Binary searches this vector for a given element. If the vector is not sorted, the returned\n    /// result is unspecified and meaningless.\n    pub fn binary_search(&self, value: &T) -> Result<u64, u64>\n    where\n        T: Ord,\n    {\n        self.binary_search_by(|v| v.cmp(value))\n    }\n\n    /// Binary searches this slice with a comparator function.\n    ///\n    /// The comparator function should return an [Ordering] that indicates whether its argument is\n    /// `Less`, `Equal` or `Greater` the desired target. If the slice is not sorted or if the\n    /// comparator function does not implement an order consistent with the sort order of the\n    /// underlying slice, the returned result is unspecified and meaningless.\n    pub fn binary_search_by<F>(&self, mut f: F) -> Result<u64, u64>\n    where\n        F: FnMut(&T) -> Ordering,\n    {\n        // INVARIANTS:\n        // - 0 <= left <= left + size = right <= self.len()\n        // - f returns Less for everything in self[..left]\n        // - f returns Greater for everything in self[right..]\n        let mut size = self.len();\n        let mut left = 0;\n        let mut right = size;\n        while left < right {\n            let mid = left + size / 2;\n\n            // SAFETY: the while condition means `size` is strictly positive, so\n            // `size/2 < size`. Thus `left + size/2 < left + size`, which\n            // coupled with the `left + size <= self.len()` invariant means\n            // we have `left + size/2 < self.len()`, and this is in-bounds.\n            let cmp = f(&self.get(mid).unwrap());\n\n            // This control flow produces conditional moves, which results in\n            // fewer branches and instructions than if/else or matching on\n            // cmp::Ordering.\n            // This is x86 asm for u8: https://rust.godbolt.org/z/698eYffTx.\n            left = if cmp == Ordering::Less { mid + 1 } else { left };\n            right = if cmp == Ordering::Greater { mid } else { right };\n            if cmp == Ordering::Equal {\n                // SAFETY: same as the `get_unchecked` above\n                assert!(mid < self.len());\n                return Ok(mid);\n            }\n\n            size = right - left;\n        }\n\n        // SAFETY: directly true from the overall invariant.\n        // Note that this is `<=`, unlike the assume in the `Ok` path.\n        assert!(left <= self.len());\n        Err(left)\n    }\n\n    /// Removes the element at the specified index and returns it.\n    ///\n    /// Note: Because this shifts over the remaining elements, it has a\n    /// worst-case performance of O(n). If you don’t need the order of\n    /// elements to be preserved, use `swap_remove` instead.\n    pub fn remove(&mut self, index: u64) -> Option<T> {\n        if index >= self.length {\n            return None;\n        }\n\n        let value_to_remove = self.get(index).unwrap();\n\n        // Shift elements to the left\n        for i in index..(self.length - 1) {\n            if let Some(next_value) = self.get(i + 1) {\n                self.write(i, next_value);\n            }\n        }\n\n        // Remove the last element from storage\n        self.length -= 1;\n        casper::remove(Keyspace::Context(\n            &self.compute_prefix_bytes_for_index(self.length),\n        ))\n        .unwrap();\n\n        Some(value_to_remove)\n    }\n\n    /// Removes the element at the specified index and returns it.\n    ///\n    /// The removed element is replaced by the last element of the vector.\n    /// This does not preserve ordering of the remaining elements, but is O(1).\n    pub fn swap_remove(&mut self, index: u64) -> Option<T> {\n        if index >= self.length {\n            return None;\n        }\n\n        let value_to_remove = self.get(index).unwrap();\n        let last_value = self.get(self.len() - 1).unwrap();\n\n        if index != self.len() - 1 {\n            self.write(index, last_value);\n        }\n\n        self.length -= 1;\n        casper::remove(Keyspace::Context(\n            &self.compute_prefix_bytes_for_index(self.length),\n        ))\n        .unwrap();\n\n        Some(value_to_remove)\n    }\n\n    /// Retains only the elements specified by the predicate.\n    pub fn retain<F>(&mut self, mut f: F)\n    where\n        F: FnMut(&T) -> bool,\n    {\n        let mut i = 0;\n        while i < self.length {\n            if !f(&self.get(i).unwrap()) {\n                self.remove(i).unwrap();\n            } else {\n                i += 1;\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn compute_prefix_bytes_for_index(&self, index: u64) -> Vec<u8> {\n        compute_prefix_bytes_for_index(&self.prefix, index)\n    }\n\n    fn write(&self, index: u64, value: T) {\n        let prefix_bytes = self.compute_prefix_bytes_for_index(index);\n        let prefix = Keyspace::Context(&prefix_bytes);\n        casper::write(prefix, &borsh::to_vec(&value).unwrap()).unwrap();\n    }\n}\n\nfn compute_prefix_bytes_for_index(prefix: &str, index: u64) -> Vec<u8> {\n    let mut prefix_bytes = prefix.as_bytes().to_owned();\n    prefix_bytes.extend(&index.to_le_bytes());\n    prefix_bytes\n}\n\n#[cfg(all(test, feature = \"std\"))]\npub(crate) mod tests {\n    use core::ptr::NonNull;\n\n    use self::casper::native::dispatch;\n\n    use super::*;\n\n    const TEST_VEC_PREFIX: &str = \"test_vector\";\n    type VecU64 = Vector<u64>;\n\n    fn get_vec_elements_from_storage(prefix: &str) -> Vec<u64> {\n        let mut values = Vec::new();\n        for idx in 0..64 {\n            let prefix = compute_prefix_bytes_for_index(prefix, idx);\n            let mut value: [u8; 8] = [0; 8];\n            let result = casper::read(Keyspace::Context(&prefix), |size| {\n                assert_eq!(size, 8);\n                NonNull::new(value.as_mut_ptr())\n            })\n            .unwrap();\n\n            if result.is_some() {\n                values.push(u64::from_le_bytes(value));\n            }\n        }\n        values\n    }\n\n    #[test]\n    fn should_not_panic_with_empty_vec() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            assert_eq!(vec.len(), 0);\n            assert_eq!(vec.remove(0), None);\n            vec.retain(|_| false);\n            let _ = vec.binary_search(&123);\n            assert_eq!(\n                get_vec_elements_from_storage(TEST_VEC_PREFIX),\n                Vec::<u64>::new()\n            );\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn should_retain() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n\n            vec.push(1);\n            vec.push(2);\n            vec.push(3);\n            vec.push(4);\n            vec.push(5);\n\n            vec.retain(|v| *v % 2 == 0);\n\n            let vec: Vec<_> = vec.iter().collect();\n            assert_eq!(vec, vec![2, 4]);\n\n            assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![2, 4]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_vec() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n\n            assert!(vec.get(0).is_none());\n            vec.push(111);\n            assert_eq!(vec.get(0), Some(111));\n            vec.push(222);\n            assert_eq!(vec.get(1), Some(222));\n\n            vec.insert(0, 42);\n            vec.insert(0, 41);\n            vec.insert(1, 43);\n            vec.insert(5, 333);\n            vec.insert(5, 334);\n            assert_eq!(vec.remove(5), Some(334));\n            assert_eq!(vec.remove(55), None);\n\n            let mut iter = vec.iter();\n            assert_eq!(iter.next(), Some(41));\n            assert_eq!(iter.next(), Some(43));\n            assert_eq!(iter.next(), Some(42));\n            assert_eq!(iter.next(), Some(111));\n            assert_eq!(iter.next(), Some(222));\n            assert_eq!(iter.next(), Some(333));\n            assert_eq!(iter.next(), None);\n\n            {\n                let ser = borsh::to_vec(&vec).unwrap();\n                let deser: Vector<u64> = borsh::from_slice(&ser).unwrap();\n                let mut iter = deser.iter();\n                assert_eq!(iter.next(), Some(41));\n                assert_eq!(iter.next(), Some(43));\n                assert_eq!(iter.next(), Some(42));\n                assert_eq!(iter.next(), Some(111));\n                assert_eq!(iter.next(), Some(222));\n                assert_eq!(iter.next(), Some(333));\n                assert_eq!(iter.next(), None);\n            }\n\n            assert_eq!(\n                get_vec_elements_from_storage(TEST_VEC_PREFIX),\n                vec![41, 43, 42, 111, 222, 333]\n            );\n\n            let vec2 = VecU64::new(\"test1\");\n            assert_eq!(vec2.get(0), None);\n\n            assert_eq!(get_vec_elements_from_storage(\"test1\"), Vec::<u64>::new());\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_pop() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            assert_eq!(vec.pop(), None);\n            vec.push(1);\n            vec.push(2);\n            assert_eq!(vec.pop(), Some(2));\n            assert_eq!(vec.len(), 1);\n            assert_eq!(vec.pop(), Some(1));\n            assert!(vec.is_empty());\n\n            assert_eq!(\n                get_vec_elements_from_storage(TEST_VEC_PREFIX),\n                Vec::<u64>::new()\n            );\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_contains() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.push(1);\n            vec.push(2);\n            assert!(vec.contains(&1));\n            assert!(vec.contains(&2));\n            assert!(!vec.contains(&3));\n            vec.remove(0);\n            assert!(!vec.contains(&1));\n            assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![2]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_clear() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.push(1);\n            vec.push(2);\n            vec.clear();\n            assert_eq!(vec.len(), 0);\n            assert!(vec.is_empty());\n            assert_eq!(vec.get(0), None);\n            vec.push(3);\n            assert_eq!(vec.get(0), Some(3));\n\n            assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![3]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_binary_search() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.push(1);\n            vec.push(2);\n            vec.push(3);\n            vec.push(4);\n            vec.push(5);\n            assert_eq!(vec.binary_search(&3), Ok(2));\n            assert_eq!(vec.binary_search(&0), Err(0));\n            assert_eq!(vec.binary_search(&6), Err(5));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_swap_remove() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.push(1);\n            vec.push(2);\n            vec.push(3);\n            vec.push(4);\n            assert_eq!(vec.swap_remove(1), Some(2));\n            assert_eq!(vec.iter().collect::<Vec<_>>(), vec![1, 4, 3]);\n            assert_eq!(vec.swap_remove(2), Some(3));\n            assert_eq!(vec.iter().collect::<Vec<_>>(), vec![1, 4]);\n\n            assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![1, 4]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_insert_at_len() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.push(1);\n            vec.insert(1, 2);\n            assert_eq!(vec.iter().collect::<Vec<_>>(), vec![1, 2]);\n            assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![1, 2]);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_struct_elements() {\n        #[derive(BorshSerialize, BorshDeserialize, PartialEq, Debug)]\n        struct TestStruct {\n            field: u64,\n        }\n\n        dispatch(|| {\n            let mut vec = Vector::new(TEST_VEC_PREFIX);\n            vec.push(TestStruct { field: 1 });\n            vec.push(TestStruct { field: 2 });\n            assert_eq!(vec.get(1), Some(TestStruct { field: 2 }));\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_multiple_operations() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            assert!(vec.is_empty());\n            vec.push(1);\n            vec.insert(0, 2);\n            vec.push(3);\n            assert_eq!(vec.iter().collect::<Vec<_>>(), vec![2, 1, 3]);\n            assert_eq!(vec.swap_remove(0), Some(2));\n            assert_eq!(vec.iter().collect::<Vec<_>>(), vec![3, 1]);\n            assert_eq!(vec.pop(), Some(1));\n            assert_eq!(vec.get(0), Some(3));\n            vec.clear();\n            assert!(vec.is_empty());\n\n            assert_eq!(\n                get_vec_elements_from_storage(TEST_VEC_PREFIX),\n                Vec::<u64>::new()\n            );\n        })\n        .unwrap();\n    }\n\n    #[test]\n    fn test_remove_invalid_index() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.push(1);\n            assert_eq!(vec.remove(1), None);\n            assert_eq!(vec.remove(0), Some(1));\n            assert_eq!(vec.remove(0), None);\n        })\n        .unwrap();\n    }\n\n    #[test]\n    #[should_panic(expected = \"index out of bounds\")]\n    fn test_insert_out_of_bounds() {\n        dispatch(|| {\n            let mut vec = VecU64::new(TEST_VEC_PREFIX);\n            vec.insert(1, 1);\n        })\n        .unwrap();\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/collections.rs",
    "content": "mod lookup_key;\n\nmod iterable_map;\nmod iterable_set;\nmod map;\nmod set;\npub mod sorted_vector;\nmod vector;\n\npub use map::Map;\npub use set::Set;\npub use vector::Vector;\n\npub use iterable_map::{IterableMap, IterableMapHash, IterableMapIter, IterableMapPtr};\npub use iterable_set::IterableSet;\n"
  },
  {
    "path": "smart_contracts/sdk/src/contrib/access_control.rs",
    "content": "#[allow(unused_imports)]\nuse crate as casper_contract_sdk; // Workaround for absolute crate path in derive CasperABI macro\n\nuse casper_contract_macros::casper;\n\nuse crate::{\n    casper::{self, Entity},\n    collections::{sorted_vector::SortedVector, Map},\n};\n\n/// A role is a unique identifier for a specific permission or set of permissions.\n///\n/// You can use `blake2b256` macro to generate a unique identifier for a role at compile time.\npub type Role = [u8; 32];\n\n/// A role is a unique identifier for a specific permission or set of permissions.\nconst ROLES_PREFIX: &str = \"roles\";\n\n/// The state of the access control contract, which contains a mapping of entities to their roles.\n#[casper(path = \"crate\")]\npub struct AccessControlState {\n    roles: Map<Entity, SortedVector<Role>>,\n}\n\nimpl AccessControlState {\n    /// Creates a new instance of `AccessControlState`.\n    pub fn new() -> Self {\n        Self {\n            roles: Map::new(ROLES_PREFIX),\n        }\n    }\n}\n\nimpl Default for AccessControlState {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\n/// Represents the possible errors that can occur during access control operations.\n#[casper(path = \"crate\")]\n#[derive(PartialEq, Eq, Copy, Clone, Debug)]\npub enum AccessControlError {\n    /// The caller is not authorized to perform the action.\n    NotAuthorized,\n}\n\n/// The AccessControl trait provides a simple role-based access control mechanism.\n/// It allows for multiple roles to be assigned to an account, and provides functions to check,\n/// grant, and revoke roles.\n/// It also provides functions to check if the caller has a specific role or any of a set of roles.\n///\n/// The roles are stored in a `Map` where the key is the account address and the value is a\n/// `SortedVector` of roles.\n///\n/// None of these methods are turned into smart contract entry points, so they are not exposed\n/// accidentally.\n///\n/// The `AccessControl` trait is designed to be used with the `casper` macro, which generates\n/// the necessary boilerplate code for the contract.\n#[casper(path = \"crate\", export = true)]\npub trait AccessControl {\n    /// The state of the contract, which contains the roles.\n    #[casper(private)]\n    fn state(&self) -> &AccessControlState;\n    /// The mutable state of the contract, which allows modifying the roles.\n    #[casper(private)]\n    fn state_mut(&mut self) -> &mut AccessControlState;\n\n    /// Checks if the given account has the specified role.\n    #[casper(private)]\n    fn has_role(&self, entity: Entity, role: Role) -> bool {\n        match self.state().roles.get(&entity) {\n            Some(roles) => roles.contains(&role),\n            None => false,\n        }\n    }\n\n    #[casper(private)]\n    fn has_any_role(&self, entity: Entity, roles: &[Role]) -> bool {\n        match self.state().roles.get(&entity) {\n            Some(roles_vec) => roles_vec.iter().any(|r| roles.contains(&r)),\n            None => false,\n        }\n    }\n\n    /// Grants a role to an account. If the account already has the role, it does nothing.\n    #[casper(private)]\n    fn grant_role(&mut self, entity: Entity, role: Role) {\n        match self.state_mut().roles.get(&entity) {\n            Some(mut roles) => {\n                if roles.contains(&role) {\n                    return;\n                }\n                roles.push(role);\n            }\n            None => {\n                let mut roles = SortedVector::new(format!(\n                    \"{ROLES_PREFIX}-{:02x}{}\",\n                    entity.tag(),\n                    base16::encode_lower(&entity.address())\n                ));\n                roles.push(role);\n                self.state_mut().roles.insert(&entity, &roles);\n            }\n        }\n    }\n\n    /// Revokes a role from an account. If the account does not have the role, it does nothing.\n    #[casper(private)]\n    fn revoke_role(&mut self, entity: Entity, role: Role) {\n        if let Some(mut roles) = self.state_mut().roles.get(&entity) {\n            roles.retain(|r| r != &role);\n        }\n    }\n\n    /// Checks if the caller has the specified role and reverts if not.\n    #[casper(private)]\n    fn require_role(&self, role: Role) -> Result<(), AccessControlError> {\n        let caller = casper::get_caller();\n        if !self.has_role(caller, role) {\n            // Caller does not have specified role.\n            return Err(AccessControlError::NotAuthorized);\n        }\n        Ok(())\n    }\n\n    /// Checks if the caller has any of the specified roles and reverts if not.\n    #[casper(private)]\n    fn require_any_role(&self, roles: &[Role]) -> Result<(), AccessControlError> {\n        let caller = casper::get_caller();\n        if !self.has_any_role(caller, roles) {\n            // Caller does not have any of the specified roles.\n            return Err(AccessControlError::NotAuthorized);\n        }\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/contrib/cep18.rs",
    "content": "//! CEP-18 token standard.\n//!\n//! This module implements the CEP-18 token standard, which is a fungible token standard\n//! for the Casper blockchain. It provides a set of functions and traits for creating, transferring,\n//! and managing fungible tokens.\n//!\n//! The CEP-18 standard is designed to be simple and efficient, allowing developers to easily\n//! create and manage fungible tokens on the Casper blockchain. It includes support for\n//! minting, burning, and transferring tokens, as well as managing allowances and balances.\n//!\n//! The standard also includes support for events, allowing developers to emit events\n//! when tokens are transferred, minted, or burned. This allows for easy tracking\n//! and monitoring of token activity on the blockchain.\n//!\n//! It only requires implementation of `CEP18` trait for your contract to receive already\n//! implemented entry points.\n//!\n//! # Example CEP18 token contract\n//!\n//! ```rust\n//! use casper_contract_sdk::prelude::*;\n//! use casper_contract_sdk::contrib::cep18::{CEP18, CEP18State, CEP18Ext, Mintable, Burnable};\n//! # use casper_contract_sdk::collections::Map;\n//! # use casper_contract_sdk::macros::casper;\n//! # use casper_contract_sdk::types::U256;\n//!\n//! #[casper(contract_state)]\n//! struct MyToken {\n//!    state: CEP18State,\n//! }\n//!\n//! impl Default for MyToken {\n//!   fn default() -> Self {\n//!     Self {\n//!       state: CEP18State::new(\"MyToken\", \"MTK\", 18, U256::from(10_000_000_000u64)),\n//!     }\n//!   }\n//! }\n//!\n//! #[casper]\n//! impl MyToken {\n//!   #[casper(constructor)]\n//!   pub fn new() -> Self {\n//!     let my_token = Self::default();\n//!     // Perform extra initialization if needed i.e. mint tokens, set genesis balance holders etc.\n//!     my_token\n//!   }\n//! }\n//!\n//! #[casper(path = casper_contract_sdk::contrib::cep18)]\n//! impl CEP18 for MyToken {\n//!   fn state(&self) -> &CEP18State {\n//!     &self.state\n//!   }\n//!\n//!   fn state_mut(&mut self) -> &mut CEP18State {\n//!     &mut self.state\n//!   }\n//! }\n//! ```\nuse bnum::types::U256;\nuse borsh::{BorshDeserialize, BorshSerialize};\nuse casper_contract_macros::CasperABI;\n\nuse super::access_control::{AccessControl, AccessControlError, Role};\n#[allow(unused_imports)]\nuse crate as casper_contract_sdk;\nuse crate::{collections::Map, macros::blake2b256, prelude::*};\n\n/// While the code consuming this contract needs to define further error variants, it can\n/// return those via the `Error::User` variant or equivalently via the `ApiError::User`\n/// variant.\n#[derive(Debug, PartialEq, Eq, CasperABI, BorshSerialize, BorshDeserialize)]\n#[casper]\npub enum Cep18Error {\n    /// CEP-18 contract called from within an invalid context.\n    InvalidContext,\n    /// Spender does not have enough balance.\n    InsufficientBalance,\n    /// Spender does not have enough allowance approved.\n    InsufficientAllowance,\n    /// Operation would cause an integer overflow.\n    Overflow,\n    /// A required package hash was not specified.\n    PackageHashMissing,\n    /// The package hash specified does not represent a package.\n    PackageHashNotPackage,\n    /// An invalid event mode was specified.\n    InvalidEventsMode,\n    /// The event mode required was not specified.\n    MissingEventsMode,\n    /// An unknown error occurred.\n    Phantom,\n    /// Failed to read the runtime arguments provided.\n    FailedToGetArgBytes,\n    /// The caller does not have sufficient security access.\n    InsufficientRights,\n    /// The list of Admin accounts provided is invalid.\n    InvalidAdminList,\n    /// The list of accounts that can mint tokens is invalid.\n    InvalidMinterList,\n    /// The list of accounts with no access rights is invalid.\n    InvalidNoneList,\n    /// The flag to enable the mint and burn mode is invalid.\n    InvalidEnableMBFlag,\n    /// This contract instance cannot be initialized again.\n    AlreadyInitialized,\n    ///  The mint and burn mode is disabled.\n    MintBurnDisabled,\n    CannotTargetSelfUser,\n    InvalidBurnTarget,\n}\n\nimpl From<AccessControlError> for Cep18Error {\n    fn from(error: AccessControlError) -> Self {\n        match error {\n            AccessControlError::NotAuthorized => Cep18Error::InsufficientRights,\n        }\n    }\n}\n\n#[casper(message, path = crate)]\npub struct Transfer {\n    pub from: Option<Entity>,\n    pub to: Entity,\n    pub amount: U256,\n}\n\n#[casper(message, path = crate)]\npub struct Approve {\n    pub owner: Entity,\n    pub spender: Entity,\n    pub amount: U256,\n}\n\npub const ADMIN_ROLE: Role = blake2b256!(\"admin\");\npub const MINTER_ROLE: Role = blake2b256!(\"minter\");\n\n#[casper(path = crate)]\npub struct CEP18State {\n    pub name: String,\n    pub symbol: String,\n    pub decimals: u8,\n    pub total_supply: U256,\n    pub balances: Map<Entity, U256>,\n    pub allowances: Map<(Entity, Entity), U256>,\n    pub enable_mint_burn: bool,\n}\n\nimpl CEP18State {\n    fn transfer_balance(\n        &mut self,\n        sender: &Entity,\n        recipient: &Entity,\n        amount: U256,\n    ) -> Result<(), Cep18Error> {\n        if amount.is_zero() {\n            return Ok(());\n        }\n\n        let sender_balance = self.balances.get(sender).unwrap_or_default();\n\n        let new_sender_balance = sender_balance\n            .checked_sub(amount)\n            .ok_or(Cep18Error::InsufficientBalance)?;\n\n        let recipient_balance = self.balances.get(recipient).unwrap_or_default();\n\n        let new_recipient_balance = recipient_balance\n            .checked_add(amount)\n            .ok_or(Cep18Error::Overflow)?;\n\n        self.balances.insert(sender, &new_sender_balance);\n        self.balances.insert(recipient, &new_recipient_balance);\n        Ok(())\n    }\n}\n\nimpl CEP18State {\n    pub fn new(name: &str, symbol: &str, decimals: u8, total_supply: U256) -> CEP18State {\n        CEP18State {\n            name: name.to_string(),\n            symbol: symbol.to_string(),\n            decimals,\n            total_supply,\n            balances: Map::new(\"balances\"),\n            allowances: Map::new(\"allowances\"),\n            enable_mint_burn: false,\n        }\n    }\n}\n\n#[casper(path = crate, export = true)]\npub trait CEP18 {\n    #[casper(private)]\n    fn state(&self) -> &CEP18State;\n\n    #[casper(private)]\n    fn state_mut(&mut self) -> &mut CEP18State;\n\n    fn name(&self) -> &str {\n        &self.state().name\n    }\n\n    fn symbol(&self) -> &str {\n        &self.state().symbol\n    }\n\n    fn decimals(&self) -> u8 {\n        self.state().decimals\n    }\n\n    fn total_supply(&self) -> U256 {\n        self.state().total_supply\n    }\n\n    fn balance_of(&self, address: Entity) -> U256 {\n        self.state().balances.get(&address).unwrap_or_default()\n    }\n\n    fn allowance(&self, spender: Entity, owner: Entity) {\n        self.state()\n            .allowances\n            .get(&(spender, owner))\n            .unwrap_or_default();\n    }\n\n    #[casper(revert_on_error)]\n    fn approve(&mut self, spender: Entity, amount: U256) -> Result<(), Cep18Error> {\n        let owner = casper::get_caller();\n        if owner == spender {\n            return Err(Cep18Error::CannotTargetSelfUser);\n        }\n        let lookup_key = (owner, spender);\n        self.state_mut().allowances.insert(&lookup_key, &amount);\n        casper::emit(Approve {\n            owner,\n            spender,\n            amount,\n        })\n        .expect(\"failed to emit message\");\n        Ok(())\n    }\n\n    #[casper(revert_on_error)]\n    fn decrease_allowance(&mut self, spender: Entity, amount: U256) -> Result<(), Cep18Error> {\n        let owner = casper::get_caller();\n        if owner == spender {\n            return Err(Cep18Error::CannotTargetSelfUser);\n        }\n        let lookup_key = (owner, spender);\n        let allowance = self.state().allowances.get(&lookup_key).unwrap_or_default();\n        let allowance = allowance.saturating_sub(amount);\n        self.state_mut().allowances.insert(&lookup_key, &allowance);\n        Ok(())\n    }\n\n    #[casper(revert_on_error)]\n    fn increase_allowance(&mut self, spender: Entity, amount: U256) -> Result<(), Cep18Error> {\n        let owner = casper::get_caller();\n        if owner == spender {\n            return Err(Cep18Error::CannotTargetSelfUser);\n        }\n        let lookup_key = (owner, spender);\n        let allowance = self.state().allowances.get(&lookup_key).unwrap_or_default();\n        let allowance = allowance.saturating_add(amount);\n        self.state_mut().allowances.insert(&lookup_key, &allowance);\n        Ok(())\n    }\n\n    #[casper(revert_on_error)]\n    fn transfer(&mut self, recipient: Entity, amount: U256) -> Result<(), Cep18Error> {\n        let sender = casper::get_caller();\n        if sender == recipient {\n            return Err(Cep18Error::CannotTargetSelfUser);\n        }\n        self.state_mut()\n            .transfer_balance(&sender, &recipient, amount)?;\n\n        // NOTE: This is operation is fallible, although it's not expected to fail under any\n        // circumstances (number of topics per contract, payload size, topic size, number of\n        // messages etc. are all under control).\n        casper::emit(Transfer {\n            from: Some(sender),\n            to: recipient,\n            amount,\n        })\n        .expect(\"failed to emit message\");\n\n        Ok(())\n    }\n\n    #[casper(revert_on_error)]\n    fn transfer_from(\n        &mut self,\n        owner: Entity,\n        recipient: Entity,\n        amount: U256,\n    ) -> Result<(), Cep18Error> {\n        let spender = casper::get_caller();\n        if owner == recipient {\n            return Err(Cep18Error::CannotTargetSelfUser);\n        }\n\n        if amount.is_zero() {\n            return Ok(());\n        }\n\n        let spender_allowance = self\n            .state()\n            .allowances\n            .get(&(owner, spender))\n            .unwrap_or_default();\n        let new_spender_allowance = spender_allowance\n            .checked_sub(amount)\n            .ok_or(Cep18Error::InsufficientAllowance)?;\n\n        self.state_mut()\n            .transfer_balance(&owner, &recipient, amount)?;\n\n        self.state_mut()\n            .allowances\n            .insert(&(owner, spender), &new_spender_allowance);\n\n        casper::emit(Transfer {\n            from: Some(owner),\n            to: recipient,\n            amount,\n        })\n        .expect(\"failed to emit message\");\n\n        Ok(())\n    }\n}\n\n#[casper(path = crate, export = true)]\npub trait Mintable: CEP18 + AccessControl {\n    #[casper(revert_on_error)]\n    fn mint(&mut self, owner: Entity, amount: U256) -> Result<(), Cep18Error> {\n        if !CEP18::state(self).enable_mint_burn {\n            return Err(Cep18Error::MintBurnDisabled);\n        }\n\n        AccessControl::require_any_role(self, &[ADMIN_ROLE, MINTER_ROLE])?;\n\n        let balance = CEP18::state(self).balances.get(&owner).unwrap_or_default();\n        let new_balance = balance.checked_add(amount).ok_or(Cep18Error::Overflow)?;\n        CEP18::state_mut(self).balances.insert(&owner, &new_balance);\n        CEP18::state_mut(self).total_supply = CEP18::state(self)\n            .total_supply\n            .checked_add(amount)\n            .ok_or(Cep18Error::Overflow)?;\n\n        casper::emit(Transfer {\n            from: None,\n            to: owner,\n            amount,\n        })\n        .expect(\"failed to emit message\");\n\n        Ok(())\n    }\n}\n\n#[casper(path = crate, export = true)]\npub trait Burnable: CEP18 {\n    #[casper(revert_on_error)]\n    fn burn(&mut self, owner: Entity, amount: U256) -> Result<(), Cep18Error> {\n        if !self.state().enable_mint_burn {\n            return Err(Cep18Error::MintBurnDisabled);\n        }\n\n        if owner != casper::get_caller() {\n            return Err(Cep18Error::InvalidBurnTarget);\n        }\n\n        let balance = self.state().balances.get(&owner).unwrap_or_default();\n        let new_balance = balance.checked_add(amount).ok_or(Cep18Error::Overflow)?;\n        self.state_mut().balances.insert(&owner, &new_balance);\n        self.state_mut().total_supply = self\n            .state()\n            .total_supply\n            .checked_sub(amount)\n            .ok_or(Cep18Error::Overflow)?;\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/contrib/ownable.rs",
    "content": "//! This module provides an implementation of the Ownable pattern for smart contracts.\n//!\n//! The Ownable pattern is a common design pattern in smart contracts that allows for\n//! a single owner to control the contract. This module provides a simple implementation\n//! of this pattern, allowing for ownership to be transferred or renounced.\nuse borsh::{BorshDeserialize, BorshSerialize};\nuse casper_contract_macros::CasperABI;\n\n#[allow(unused_imports)]\nuse crate as casper_contract_sdk;\nuse crate::{casper::Entity, macros::casper};\n\n/// The state of the Ownable contract, which contains the owner of the contract.\n#[casper(path = crate)]\npub struct OwnableState {\n    owner: Option<Entity>,\n}\n\nimpl Default for OwnableState {\n    fn default() -> Self {\n        Self {\n            owner: Some(crate::casper::get_caller()),\n        }\n    }\n}\n\n/// Represents the possible errors that can occur during ownership operations.\n#[derive(CasperABI, BorshSerialize, BorshDeserialize)]\n#[casper(path = crate)]\npub enum OwnableError {\n    /// The caller is not authorized to perform the action.\n    NotAuthorized,\n}\n\n/// The Ownable trait provides a simple ownership model for smart contracts.\n/// It allows for a single owner to be set, and provides functions to transfer or renounce\n/// ownership.\n#[casper(path = crate, export = true)]\npub trait Ownable {\n    #[casper(private)]\n    fn state(&self) -> &OwnableState;\n    #[casper(private)]\n    fn state_mut(&mut self) -> &mut OwnableState;\n\n    /// Checks if the caller is the owner of the contract.\n    ///\n    /// This function is used to restrict access to certain functions to only the owner.\n    #[casper(private)]\n    fn only_owner(&self) -> Result<(), OwnableError> {\n        let caller = crate::casper::get_caller();\n        match self.state().owner {\n            Some(owner) if caller != owner => {\n                return Err(OwnableError::NotAuthorized);\n            }\n            None => {\n                return Err(OwnableError::NotAuthorized);\n            }\n            Some(_owner) => {}\n        }\n        Ok(())\n    }\n\n    /// Transfers ownership of the contract to a new owner.\n    #[casper(revert_on_error)]\n    fn transfer_ownership(&mut self, new_owner: Entity) -> Result<(), OwnableError> {\n        self.only_owner()?;\n        self.state_mut().owner = Some(new_owner);\n        Ok(())\n    }\n\n    /// Returns the current owner of the contract.\n    fn owner(&self) -> Option<Entity> {\n        self.state().owner\n    }\n\n    /// Renounces ownership of the contract, making it no longer owned by any entity.\n    ///\n    /// This function can only be called by the current owner of the contract\n    /// once the contract is deployed. After calling this function, the contract\n    /// will no longer have an owner, and no entity will be able to call\n    /// functions that require ownership.\n    #[casper(revert_on_error)]\n    fn renounce_ownership(&mut self) -> Result<(), OwnableError> {\n        self.only_owner()?;\n        self.state_mut().owner = None;\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/contrib/pausable.rs",
    "content": "//! This module provides a trait for pausable contracts.\n//!\n//! The `Pausable` trait allows contracts to be paused and unpaused, which can be useful\n//! in scenarios where the contract needs to be temporarily disabled for maintenance or\n//! security reasons. The trait provides methods to check the current pause state, as well\n//! as to pause and unpause the contract.\n//!\n//! The `Pausable` trait is designed to be used with the `casper` macro, which generates\n//! the necessary boilerplate code for the contract.\n//!\n//! For security reasons you may want to combine `AccessControl` or `Ownable` with\n//! this trait to ensure that only selected entities can manage the pause state.\nuse crate::{self as casper_contract_sdk, casper, casper::Entity, macros::casper};\n\n#[casper]\npub struct PausedState {\n    paused: bool,\n}\n\n#[casper(path = crate)]\npub enum PausableError {\n    EnforcedPause,\n    ExpectedPause,\n}\n\n/// The `Paused` event is emitted when the contract is paused.\n#[casper(message, path = crate)]\npub struct Paused {\n    entity: Entity,\n}\n\n/// The `Unpaused` event is emitted when the contract is unpaused.\n#[casper(message, path = crate)]\npub struct Unpaused {\n    entity: Entity,\n}\n\n/// Pausable is a trait that provides a simple way to pause and unpause a contract.\n#[casper(path = crate, export = true)]\npub trait Pausable {\n    /// The state of the contract, which contains the paused state.\n    #[casper(private)]\n    fn state(&self) -> &PausedState;\n    /// The mutable state of the contract, which allows modifying the paused state.\n    #[casper(private)]\n    fn state_mut(&mut self) -> &mut PausedState;\n\n    /// Checks if the contract is paused.\n    #[casper(private)]\n    fn paused(&self) -> bool {\n        self.state().paused\n    }\n\n    #[casper(private)]\n    fn pause(&mut self) -> Result<(), PausableError> {\n        self.enforce_unpaused()?;\n        self.state_mut().paused = true;\n        casper::emit(Paused {\n            entity: casper::get_caller(),\n        })\n        .expect(\"Emit\");\n        Ok(())\n    }\n\n    #[casper(private)]\n    fn unpause(&mut self) -> Result<(), PausableError> {\n        self.enforce_paused()?;\n        self.state_mut().paused = false;\n        casper::emit(Unpaused {\n            entity: casper::get_caller(),\n        })\n        .expect(\"Emit\");\n        Ok(())\n    }\n\n    #[casper(private)]\n    fn enforce_paused(&self) -> Result<(), PausableError> {\n        if self.paused() {\n            Ok(())\n        } else {\n            Err(PausableError::ExpectedPause)\n        }\n    }\n\n    #[casper(private)]\n    fn enforce_unpaused(&self) -> Result<(), PausableError> {\n        if !self.paused() {\n            Ok(())\n        } else {\n            Err(PausableError::EnforcedPause)\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/contrib.rs",
    "content": "pub mod access_control;\npub mod cep18;\npub mod ownable;\npub mod pausable;\n"
  },
  {
    "path": "smart_contracts/sdk/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(not(feature = \"std\"))]\n#[macro_use]\nextern crate alloc;\n\npub mod abi;\npub mod prelude;\npub mod serializers;\n#[cfg(not(target_arch = \"wasm32\"))]\npub use linkme;\n\n#[cfg(not(target_arch = \"wasm32\"))]\npub mod abi_generator;\npub mod casper;\npub mod collections;\npub mod contrib;\n#[cfg(feature = \"std\")]\npub mod schema;\npub mod types;\n\nuse crate::prelude::{marker::PhantomData, ptr::NonNull};\n\nuse crate::serializers::borsh::{BorshDeserialize, BorshSerialize};\nuse casper::{CallResult, Entity};\npub use casper_contract_macros as macros;\npub use casper_contract_sdk_sys as sys;\npub use casper_executor_wasm_common;\nuse types::{Address, CallError};\n\ncfg_if::cfg_if! {\n    if #[cfg(feature = \"std\")] {\n        #[inline]\n        pub fn set_panic_hook() {\n            static SET_HOOK: std::sync::Once = std::sync::Once::new();\n            SET_HOOK.call_once(|| {\n                std::panic::set_hook(Box::new(|panic_info| {\n                    let msg = panic_info.to_string();\n                    casper::print(&msg);\n                }));\n            });\n        }\n    }\n    else {\n        pub fn set_panic_hook() {\n            // TODO: What to do?\n        }\n    }\n}\n\npub fn reserve_vec_space(vec: &mut Vec<u8>, size: usize) -> Option<NonNull<u8>> {\n    if size == 0 {\n        None\n    } else {\n        *vec = Vec::with_capacity(size);\n        unsafe {\n            vec.set_len(size);\n        }\n        NonNull::new(vec.as_mut_ptr())\n    }\n}\n\npub trait ContractRef {\n    fn new() -> Self;\n}\n\npub trait ToCallData {\n    type Return<'a>;\n\n    fn entry_point(&self) -> &str;\n\n    fn input_data(&self) -> Option<crate::prelude::Vec<u8>>;\n}\n\n/// To derive this contract you have to use `#[casper]` macro on top of impl block.\n///\n/// This proc macro handles generation of a manifest.\npub trait Contract {\n    type Ref: ContractRef;\n\n    fn name() -> &'static str;\n    fn create<T: ToCallData>(\n        value: u64,\n        call_data: T,\n    ) -> Result<ContractHandle<Self::Ref>, CallError>;\n    fn default_create() -> Result<ContractHandle<Self::Ref>, CallError>;\n    fn upgrade<T: ToCallData>(code: Option<&[u8]>, call_data: T) -> Result<(), CallError>;\n}\n\n#[derive(Debug)]\npub enum Access {\n    Private,\n    Public,\n}\n\n// A println! like macro that calls `host::print` function.\n#[cfg(target_arch = \"wasm32\")]\n#[macro_export]\nmacro_rules! log {\n    ($($arg:tt)*) => ({\n        $crate::prelude::casper::print(&$crate::prelude::format!($($arg)*));\n    })\n}\n\n#[cfg(not(target_arch = \"wasm32\"))]\n#[macro_export]\nmacro_rules! log {\n    ($($arg:tt)*) => ({\n        eprintln!(\"📝 {}\", &$crate::prelude::format!($($arg)*));\n    })\n}\n\n#[macro_export]\nmacro_rules! revert {\n    () => {{\n        $crate::casper::ret(\n            $crate::casper_executor_wasm_common::flags::ReturnFlags::REVERT,\n            None,\n        );\n        unreachable!()\n    }};\n    ($arg:expr) => {{\n        let value = $arg;\n        let data =\n            $crate::serializers::borsh::to_vec(&value).expect(\"Revert value should serialize\");\n        $crate::casper::ret(\n            $crate::casper_executor_wasm_common::flags::ReturnFlags::REVERT,\n            Some(data.as_slice()),\n        );\n        #[allow(unreachable_code)]\n        value\n    }};\n}\n\npub trait UnwrapOrRevert<T> {\n    /// Unwraps the value into its inner type or calls [`crate::casper::ret`] with a\n    /// predetermined error code on failure.\n    fn unwrap_or_revert(self) -> T;\n}\n\nimpl<T, E> UnwrapOrRevert<T> for Result<T, E>\nwhere\n    E: BorshSerialize,\n{\n    fn unwrap_or_revert(self) -> T {\n        self.unwrap_or_else(|error| {\n            let error_data = borsh::to_vec(&error).expect(\"Revert value should serialize\");\n            casper::ret(\n                casper_executor_wasm_common::flags::ReturnFlags::REVERT,\n                Some(error_data.as_slice()),\n            );\n            unreachable!(\"Support for unwrap_or_revert\")\n        })\n    }\n}\n\n#[derive(Debug)]\npub struct ContractHandle<T: ContractRef> {\n    contract_address: Address,\n    marker: PhantomData<T>,\n}\n\nimpl<T: ContractRef> ContractHandle<T> {\n    #[must_use]\n    pub const fn from_address(contract_address: Address) -> Self {\n        ContractHandle {\n            contract_address,\n            marker: PhantomData,\n        }\n    }\n\n    pub fn build_call(&self) -> CallBuilder<T> {\n        CallBuilder {\n            address: self.contract_address,\n            marker: PhantomData,\n            transferred_value: None,\n        }\n    }\n\n    /// A shorthand form to call contracts with default settings.\n    #[inline]\n    pub fn call<'a, CallData: ToCallData>(\n        &self,\n        func: impl FnOnce(T) -> CallData,\n    ) -> Result<CallData::Return<'a>, CallError>\n    where\n        CallData::Return<'a>: BorshDeserialize,\n    {\n        self.build_call().call(func)\n    }\n\n    /// A shorthand form to call contracts with default settings.\n    #[inline]\n    pub fn try_call<CallData: ToCallData>(\n        &self,\n        func: impl FnOnce(T) -> CallData,\n    ) -> Result<CallResult<CallData>, CallError> {\n        self.build_call().try_call(func)\n    }\n\n    #[must_use]\n    pub fn contract_address(&self) -> Address {\n        self.contract_address\n    }\n\n    #[must_use]\n    pub fn entity(&self) -> Entity {\n        Entity::Contract(self.contract_address)\n    }\n\n    /// Returns the balance of the contract.\n    #[must_use]\n    pub fn balance(&self) -> u64 {\n        casper::get_balance_of(&Entity::Contract(self.contract_address))\n    }\n}\n\npub struct CallBuilder<T: ContractRef> {\n    address: Address,\n    transferred_value: Option<u64>,\n    marker: PhantomData<T>,\n}\n\nimpl<T: ContractRef> CallBuilder<T> {\n    #[must_use]\n    pub fn new(address: Address) -> Self {\n        CallBuilder {\n            address,\n            transferred_value: None,\n            marker: PhantomData,\n        }\n    }\n\n    #[must_use]\n    pub fn with_transferred_value(mut self, transferred_value: u64) -> Self {\n        self.transferred_value = Some(transferred_value);\n        self\n    }\n\n    /// Casts the call builder to a different contract reference.\n    #[must_use]\n    pub fn cast<U: ContractRef>(self) -> CallBuilder<U> {\n        CallBuilder {\n            address: self.address,\n            transferred_value: self.transferred_value,\n            marker: PhantomData,\n        }\n    }\n\n    pub fn try_call<CallData: ToCallData>(\n        &self,\n        func: impl FnOnce(T) -> CallData,\n    ) -> Result<CallResult<CallData>, CallError> {\n        let inst = T::new();\n        let call_data = func(inst);\n        casper::call(\n            &self.address,\n            self.transferred_value.unwrap_or(0),\n            call_data,\n        )\n    }\n\n    pub fn call<'a, CallData: ToCallData>(\n        &self,\n        func: impl FnOnce(T) -> CallData,\n    ) -> Result<CallData::Return<'a>, CallError>\n    where\n        CallData::Return<'a>: BorshDeserialize,\n    {\n        let inst = T::new();\n        let call_data = func(inst);\n        let call_result = casper::call(\n            &self.address,\n            self.transferred_value.unwrap_or(0),\n            call_data,\n        )?;\n        call_result.into_result()\n    }\n}\n\npub struct ContractBuilder<'a, T: ContractRef> {\n    transferred_value: Option<u64>,\n    code: Option<&'a [u8]>,\n    seed: Option<&'a [u8; 32]>,\n    marker: PhantomData<T>,\n}\n\nimpl<T: ContractRef> Default for ContractBuilder<'_, T> {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\nimpl<'a, T: ContractRef> ContractBuilder<'a, T> {\n    #[must_use]\n    pub fn new() -> Self {\n        ContractBuilder {\n            transferred_value: None,\n            code: None,\n            seed: None,\n            marker: PhantomData,\n        }\n    }\n\n    #[must_use]\n    pub fn with_transferred_value(mut self, transferred_value: u64) -> Self {\n        self.transferred_value = Some(transferred_value);\n        self\n    }\n\n    #[must_use]\n    pub fn with_code(mut self, code: &'a [u8]) -> Self {\n        self.code = Some(code);\n        self\n    }\n\n    #[must_use]\n    pub fn with_seed(mut self, seed: &'a [u8; 32]) -> Self {\n        self.seed = Some(seed);\n        self\n    }\n\n    pub fn create<CallData: ToCallData>(\n        &self,\n        func: impl FnOnce() -> CallData,\n    ) -> Result<ContractHandle<T>, CallError>\n    where\n        CallData::Return<'a>: BorshDeserialize,\n    {\n        let value = self.transferred_value.unwrap_or(0);\n        let call_data = func();\n        let input_data = call_data.input_data();\n        let seed = self.seed;\n        let create_result = casper::create(\n            self.code,\n            value,\n            Some(call_data.entry_point()),\n            input_data.as_deref(),\n            seed,\n        )?;\n        Ok(ContractHandle::from_address(create_result.contract_address))\n    }\n\n    pub fn default_create(&self) -> Result<ContractHandle<T>, CallError> {\n        if self.transferred_value.is_some() {\n            panic!(\"Value should not be set for default create\");\n        }\n\n        let value = self.transferred_value.unwrap_or(0);\n        let seed = self.seed;\n        let create_result = casper::create(self.code, value, None, None, seed)?;\n        Ok(ContractHandle::from_address(create_result.contract_address))\n    }\n}\n\n/// Trait for converting a message data to a string.\npub trait Message: BorshSerialize {\n    const TOPIC: &'static str;\n    /// Converts the message data to a string.\n    fn payload(&self) -> Vec<u8>;\n}\n\n#[cfg(test)]\nmod tests {\n    #[test]\n    fn test_call_builder() {}\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/prelude.rs",
    "content": "use cfg_if::cfg_if;\n\ncfg_if! {\n    if #[cfg(feature = \"std\")] {\n        pub use ::std::{format, borrow, string, vec, boxed, fmt, str, marker, ffi, ptr, mem, cmp};\n\n        pub mod collections {\n            pub use ::std::collections::btree_map::{self, BTreeMap};\n            pub use ::std::collections::{linked_list::{self, LinkedList}};\n            pub use ::std::collections::{hash_map::{self, HashMap}};\n            pub use ::std::collections::{btree_set::{self, BTreeSet}};\n        }\n    }\n    else {\n        pub use ::alloc::{format, borrow, string, vec, boxed, fmt, str};\n\n        pub use ::core::{marker, ffi, ptr, mem, cmp};\n\n        pub mod collections {\n            pub use ::alloc::collections::btree_map::{self, BTreeMap};\n            pub use ::alloc::collections::{linked_list::{self, LinkedList}};\n            pub use ::alloc::collections::{hash_map::{self, HashMap}};\n            pub use ::alloc::collections::{btree_set::{self, BTreeSet}};\n        }\n    }\n}\n\npub use self::{\n    borrow::ToOwned,\n    boxed::Box,\n    string::{String, ToString},\n    vec::Vec,\n};\n\npub use crate::{\n    casper::{self, Entity},\n    log,\n    macros::{self, casper, PanicOnDefault},\n    revert,\n};\n\n#[cfg(test)]\nmod tests {\n\n    #[test]\n    fn test_format() {\n        assert_eq!(super::format!(\"Hello, {}!\", \"world\"), \"Hello, world!\");\n    }\n\n    #[test]\n    fn test_string() {\n        let s = super::String::from(\"hello\");\n        assert_eq!(s, \"hello\");\n    }\n\n    #[test]\n    #[allow(clippy::vec_init_then_push)]\n    fn test_vec() {\n        let mut v = super::Vec::new();\n        v.push(1);\n        v.push(2);\n        assert_eq!(v.len(), 2);\n        assert_eq!(v[0], 1);\n        assert_eq!(v[1], 2);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk/src/schema.rs",
    "content": "pub trait CasperSchema {\n    fn schema() -> Schema;\n}\n\nuse std::fmt::LowerHex;\n\nuse bitflags::Flags;\nuse casper_executor_wasm_common::flags::EntryPointFlags;\nuse serde::{Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::abi::{Declaration, Definitions};\n\npub fn serialize_bits<T, S>(data: &T, serializer: S) -> Result<S::Ok, S::Error>\nwhere\n    S: Serializer,\n    T: Flags,\n    T::Bits: Serialize,\n{\n    data.bits().serialize(serializer)\n}\n\npub fn deserialize_bits<'de, D, F>(deserializer: D) -> Result<F, D::Error>\nwhere\n    D: Deserializer<'de>,\n    F: Flags,\n    F::Bits: Deserialize<'de> + LowerHex,\n{\n    let raw: F::Bits = F::Bits::deserialize(deserializer)?;\n    F::from_bits(raw).ok_or(serde::de::Error::custom(format!(\n        \"Unexpected flags value 0x{raw:#08x}\"\n    )))\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)]\npub struct SchemaArgument {\n    pub name: String,\n    pub decl: Declaration,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)]\npub struct SchemaEntryPoint {\n    pub name: String,\n    pub arguments: Vec<SchemaArgument>,\n    pub result: Declaration,\n    #[serde(\n        serialize_with = \"serialize_bits\",\n        deserialize_with = \"deserialize_bits\"\n    )]\n    pub flags: EntryPointFlags,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)]\n#[serde(tag = \"type\")]\npub enum SchemaType {\n    /// Contract schemas contain a state structure that we want to mark in the schema.\n    Contract { state: Declaration },\n    /// Schemas of interface type does not contain state.\n    Interface,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)]\npub struct SchemaMessage {\n    pub name: String,\n    pub decl: Declaration,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)]\npub struct Schema {\n    pub name: String,\n    pub version: Option<String>,\n    #[serde(rename = \"type\")]\n    pub type_: SchemaType,\n    pub definitions: Definitions,\n    pub entry_points: Vec<SchemaEntryPoint>,\n    pub messages: Vec<SchemaMessage>,\n}\n\n#[derive(Debug)]\npub struct EntryPoint<'a, F: Fn()> {\n    pub name: &'a str,\n    pub params: &'a [&'a str],\n    pub func: F,\n}\n\n#[cfg(not(target_family = \"wasm\"))]\nuse std::{cell::RefCell, collections::BTreeMap};\n\n#[cfg(not(target_family = \"wasm\"))]\nthread_local! {\n    pub static DISPATCHER: RefCell<BTreeMap<String, extern \"C\" fn()>> = RefCell::default();\n}\n\n// #[cfg(not(target_family = \"wasm\"))]\n// #[no_mangle]\n// pub unsafe fn register_func(name: &str, f: extern \"C\" fn() -> ()) {\n//     println!(\"registering function {}\", name);\n//     DISPATCHER.with(|foo| foo.borrow_mut().insert(name.to_string(), f));\n// }\n"
  },
  {
    "path": "smart_contracts/sdk/src/selector.rs",
    "content": ""
  },
  {
    "path": "smart_contracts/sdk/src/serializers.rs",
    "content": "pub use ::borsh;\n"
  },
  {
    "path": "smart_contracts/sdk/src/types.rs",
    "content": "use casper_executor_wasm_common::error::{\n    CALLEE_GAS_DEPLETED, CALLEE_NOT_CALLABLE, CALLEE_REVERTED, CALLEE_TRAPPED,\n};\n\nuse crate::{\n    abi::{CasperABI, Declaration, Definition, EnumVariant},\n    prelude::fmt,\n    serializers::borsh::{BorshDeserialize, BorshSerialize},\n};\n\npub type Address = [u8; 32];\npub use bnum::types::U256;\n\n// Keep in sync with [`casper_executor_wasm_common::error::CallError`].\n#[derive(Debug, Copy, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]\n#[borsh(crate = \"crate::serializers::borsh\")]\npub enum CallError {\n    CalleeReverted,\n    CalleeTrapped,\n    CalleeGasDepleted,\n    NotCallable,\n}\n\nimpl fmt::Display for CallError {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match self {\n            CallError::CalleeReverted => write!(f, \"callee reverted\"),\n            CallError::CalleeTrapped => write!(f, \"callee trapped\"),\n            CallError::CalleeGasDepleted => write!(f, \"callee gas depleted\"),\n            CallError::NotCallable => write!(f, \"not callable\"),\n        }\n    }\n}\n\nimpl TryFrom<u32> for CallError {\n    type Error = ();\n\n    fn try_from(value: u32) -> Result<Self, Self::Error> {\n        match value {\n            CALLEE_REVERTED => Ok(Self::CalleeReverted),\n            CALLEE_TRAPPED => Ok(Self::CalleeTrapped),\n            CALLEE_GAS_DEPLETED => Ok(Self::CalleeGasDepleted),\n            CALLEE_NOT_CALLABLE => Ok(Self::NotCallable),\n            _ => Err(()),\n        }\n    }\n}\n\nimpl CasperABI for CallError {\n    fn populate_definitions(_definitions: &mut crate::abi::Definitions) {}\n\n    fn declaration() -> Declaration {\n        \"CallError\".into()\n    }\n\n    fn definition() -> Definition {\n        Definition::Enum {\n            items: vec![\n                EnumVariant {\n                    name: \"CalleeReverted\".into(),\n                    discriminant: 0,\n                    decl: <()>::declaration(),\n                },\n                EnumVariant {\n                    name: \"CalleeTrapped\".into(),\n                    discriminant: 1,\n                    decl: <()>::declaration(),\n                },\n                EnumVariant {\n                    name: \"CalleeGasDepleted\".into(),\n                    discriminant: 2,\n                    decl: <()>::declaration(),\n                },\n                EnumVariant {\n                    name: \"CodeNotFound\".into(),\n                    discriminant: 3,\n                    decl: <()>::declaration(),\n                },\n            ],\n        }\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk_codegen/Cargo.toml",
    "content": "[package]\nname = \"casper-contract-sdk-codegen\"\nversion = \"0.1.3\"\nedition = \"2021\"\ndescription = \"Casper contract sdk codegen package\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndocumentation = \"https://docs.rs/casper-contract-sdk-codegen\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/smart_contracts/sdk_codegen\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\ncasper-contract-sdk = { version = \"0.1.3\", path = \"../sdk\" }\ncodegen = \"0.2.0\"\nserde = { version = \"1\", features = [\"derive\"] }\nserde_json = { version = \"1\", features = [\"preserve_order\"] }\nindexmap = \"2.1.0\"\nsyn = \"2\"\n\n[dev-dependencies]\ntrybuild = \"1\"\ntempfile = \"3.2.0\"\nborsh = { version = \"1.5\", features = [\"derive\"] }\n"
  },
  {
    "path": "smart_contracts/sdk_codegen/src/lib.rs",
    "content": "pub mod support;\n\nuse casper_contract_sdk::{\n    abi::{Declaration, Definition, Primitive},\n    casper_executor_wasm_common::flags::EntryPointFlags,\n    schema::{Schema, SchemaType},\n};\nuse codegen::{Field, Scope, Type};\nuse indexmap::IndexMap;\nuse serde::{Deserialize, Serialize};\nuse std::{\n    collections::{BTreeMap, VecDeque},\n    iter,\n    str::FromStr,\n};\n\nconst DEFAULT_DERIVED_TRAITS: &[&str] = &[\n    \"Clone\",\n    \"Debug\",\n    \"PartialEq\",\n    \"Eq\",\n    \"PartialOrd\",\n    \"Ord\",\n    \"Hash\",\n    \"BorshSerialize\",\n    \"BorshDeserialize\",\n];\n\n/// Replaces characters that are not valid in Rust identifiers with underscores.\nfn slugify_type(input: &str) -> String {\n    let mut output = String::with_capacity(input.len());\n\n    for c in input.chars() {\n        if c.is_ascii_alphanumeric() {\n            output.push(c);\n        } else {\n            output.push('_');\n        }\n    }\n\n    output\n}\n\n#[derive(Debug, Deserialize, Serialize)]\nenum Specialized {\n    Result { ok: Declaration, err: Declaration },\n    Option { some: Declaration },\n}\n\n#[derive(Deserialize, Serialize)]\npub struct Codegen {\n    schema: Schema,\n    type_mapping: BTreeMap<Declaration, String>,\n    specialized_types: BTreeMap<Declaration, Specialized>,\n}\n\nimpl FromStr for Codegen {\n    type Err = serde_json::Error;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        let schema: Schema = serde_json::from_str(s)?;\n        Ok(Self::new(schema))\n    }\n}\n\nimpl Codegen {\n    pub fn new(schema: Schema) -> Self {\n        Self {\n            schema,\n            type_mapping: Default::default(),\n            specialized_types: Default::default(),\n        }\n    }\n\n    pub fn from_file(path: &str) -> Result<Self, std::io::Error> {\n        let file = std::fs::File::open(path)?;\n        let schema: Schema = serde_json::from_reader(file)?;\n        Ok(Self::new(schema))\n    }\n\n    pub fn gen(&mut self) -> String {\n        let mut scope = Scope::new();\n\n        scope.import(\"borsh\", \"self\");\n        scope.import(\"borsh\", \"BorshSerialize\");\n        scope.import(\"borsh\", \"BorshDeserialize\");\n        scope.import(\"casper_contract_sdk_codegen::support\", \"IntoResult\");\n        scope.import(\"casper_contract_sdk_codegen::support\", \"IntoOption\");\n        scope.import(\"casper_contract_sdk\", \"Selector\");\n        scope.import(\"casper_contract_sdk\", \"ToCallData\");\n\n        let _head = self\n            .schema\n            .definitions\n            .first()\n            .expect(\"No definitions found.\");\n\n        match &self.schema.type_ {\n            SchemaType::Contract { state } => {\n                if !self.schema.definitions.has_definition(state) {\n                    panic!(\n                        \"Missing state definition. Expected to find a definition for {}.\",\n                        &state\n                    )\n                };\n            }\n            SchemaType::Interface => {}\n        }\n\n        // Initialize a queue with the first definition\n        let mut queue = VecDeque::new();\n\n        // Create a set to keep track of processed definitions\n        let mut processed = std::collections::HashSet::new();\n\n        let mut graph: IndexMap<_, VecDeque<_>> = IndexMap::new();\n\n        for (def_index, (next_decl, next_def)) in self.schema.definitions.iter().enumerate() {\n            println!(\n                \"{def_index}. decl={decl}\",\n                def_index = def_index,\n                decl = next_decl\n            );\n\n            queue.push_back(next_decl);\n\n            while let Some(decl) = queue.pop_front() {\n                if processed.contains(decl) {\n                    continue;\n                }\n\n                processed.insert(decl);\n                graph.entry(next_decl).or_default().push_back(decl);\n                // graph.find\n\n                match Primitive::from_str(decl) {\n                    Ok(primitive) => {\n                        println!(\"Processing primitive type {primitive:?}\");\n                        continue;\n                    }\n                    Err(_) => {\n                        // Not a primitive type\n                    }\n                };\n\n                let def = self\n                    .schema\n                    .definitions\n                    .get(decl)\n                    .unwrap_or_else(|| panic!(\"Missing definition for {}\", decl));\n\n                // graph.entry(next_decl).or_default().push(decl);\n                // println!(\"Processing type {decl}\");\n\n                // Enqueue all unprocessed definitions that depend on the current definition\n                match def {\n                    Definition::Primitive(_primitive) => {\n                        continue;\n                    }\n                    Definition::Mapping { key, value } => {\n                        if !processed.contains(key) {\n                            queue.push_front(key);\n                            continue;\n                        }\n\n                        if !processed.contains(value) {\n                            queue.push_front(value);\n                            continue;\n                        }\n                    }\n                    Definition::Sequence { decl } => {\n                        queue.push_front(decl);\n                    }\n                    Definition::FixedSequence { length: _, decl } => {\n                        if !processed.contains(decl) {\n                            queue.push_front(decl);\n                            continue;\n                        }\n                    }\n                    Definition::Tuple { items } => {\n                        for item in items {\n                            if !processed.contains(item) {\n                                queue.push_front(item);\n                                continue;\n                            }\n                        }\n\n                        // queue.push_front(decl);\n                    }\n                    Definition::Enum { items } => {\n                        for item in items {\n                            if !processed.contains(&item.decl) {\n                                queue.push_front(&item.decl);\n                                continue;\n                            }\n                        }\n                    }\n                    Definition::Struct { items } => {\n                        for item in items {\n                            if !processed.contains(&item.decl) {\n                                queue.push_front(&item.decl);\n                                continue;\n                            }\n                        }\n                    }\n                }\n            }\n\n            match next_def {\n                Definition::Primitive(_) => {}\n                Definition::Mapping { key, value } => {\n                    assert!(processed.contains(key));\n                    assert!(processed.contains(value));\n                }\n                Definition::Sequence { decl } => {\n                    assert!(processed.contains(decl));\n                }\n                Definition::FixedSequence { length: _, decl } => {\n                    assert!(processed.contains(decl));\n                }\n                Definition::Tuple { items } => {\n                    for item in items {\n                        assert!(processed.contains(&item));\n                    }\n                }\n                Definition::Enum { items } => {\n                    for item in items {\n                        assert!(processed.contains(&item.decl));\n                    }\n                }\n                Definition::Struct { items } => {\n                    for item in items {\n                        assert!(processed.contains(&item.decl));\n                    }\n                }\n            }\n        }\n        dbg!(&graph);\n\n        let mut counter = iter::successors(Some(0usize), |prev| prev.checked_add(1));\n\n        for (_decl, deps) in graph {\n            for decl in deps.into_iter().rev() {\n                // println!(\"generate {decl}\");\n\n                let def = self\n                    .schema\n                    .definitions\n                    .get(decl)\n                    .cloned()\n                    .or_else(|| Primitive::from_str(decl).ok().map(Definition::Primitive))\n                    .unwrap_or_else(|| panic!(\"Missing definition for {}\", decl));\n\n                match def {\n                    Definition::Primitive(primitive) => {\n                        let (from, to) = match primitive {\n                            Primitive::Char => (\"Char\", \"char\"),\n                            Primitive::U8 => (\"U8\", \"u8\"),\n                            Primitive::I8 => (\"I8\", \"i8\"),\n                            Primitive::U16 => (\"U16\", \"u16\"),\n                            Primitive::I16 => (\"I16\", \"i16\"),\n                            Primitive::U32 => (\"U32\", \"u32\"),\n                            Primitive::I32 => (\"I32\", \"i32\"),\n                            Primitive::U64 => (\"U64\", \"u64\"),\n                            Primitive::I64 => (\"I64\", \"i64\"),\n                            Primitive::U128 => (\"U128\", \"u128\"),\n                            Primitive::I128 => (\"I128\", \"i128\"),\n                            Primitive::Bool => (\"Bool\", \"bool\"),\n                            Primitive::F32 => (\"F32\", \"f32\"),\n                            Primitive::F64 => (\"F64\", \"f64\"),\n                        };\n\n                        scope.new_type_alias(from, to).vis(\"pub\");\n                        self.type_mapping.insert(decl.to_string(), from.to_string());\n                    }\n                    Definition::Mapping { key: _, value: _ } => {\n                        // println!(\"Processing mapping type {key:?} -> {value:?}\");\n                        todo!()\n                    }\n                    Definition::Sequence { decl: seq_decl } => {\n                        println!(\"Processing sequence type {decl:?}\");\n                        if decl.as_str() == \"String\"\n                            && Primitive::from_str(&seq_decl) == Ok(Primitive::Char)\n                        {\n                            self.type_mapping\n                                .insert(\"String\".to_owned(), \"String\".to_owned());\n                        } else {\n                            let mapped_type = self\n                                .type_mapping\n                                .get(&seq_decl)\n                                .unwrap_or_else(|| panic!(\"Missing type mapping for {}\", seq_decl));\n                            let type_name =\n                                format!(\"Sequence{}_{seq_decl}\", counter.next().unwrap());\n                            scope.new_type_alias(&type_name, format!(\"Vec<{}>\", mapped_type));\n                            self.type_mapping.insert(decl.to_string(), type_name);\n                        }\n                    }\n                    Definition::FixedSequence {\n                        length,\n                        decl: fixed_seq_decl,\n                    } => {\n                        let mapped_type =\n                            self.type_mapping.get(&fixed_seq_decl).unwrap_or_else(|| {\n                                panic!(\"Missing type mapping for {}\", fixed_seq_decl)\n                            });\n\n                        let type_name = format!(\n                            \"FixedSequence{}_{length}_{fixed_seq_decl}\",\n                            counter.next().unwrap()\n                        );\n                        scope.new_type_alias(&type_name, format!(\"[{}; {}]\", mapped_type, length));\n                        self.type_mapping.insert(decl.to_string(), type_name);\n                    }\n                    Definition::Tuple { items } => {\n                        if decl.as_str() == \"()\" && items.is_empty() {\n                            self.type_mapping.insert(\"()\".to_owned(), \"()\".to_owned());\n                            continue;\n                        }\n\n                        println!(\"Processing tuple type {items:?}\");\n                        let struct_name = slugify_type(decl);\n\n                        let r#struct = scope\n                            .new_struct(&struct_name)\n                            .doc(&format!(\"Declared as {decl}\"));\n\n                        for trait_name in DEFAULT_DERIVED_TRAITS {\n                            r#struct.derive(trait_name);\n                        }\n\n                        if items.is_empty() {\n                            r#struct.tuple_field(Type::new(\"()\"));\n                        } else {\n                            for item in items {\n                                let mapped_type = self\n                                    .type_mapping\n                                    .get(&item)\n                                    .unwrap_or_else(|| panic!(\"Missing type mapping for {}\", item));\n                                r#struct.tuple_field(mapped_type);\n                            }\n                        }\n\n                        self.type_mapping.insert(decl.to_string(), struct_name);\n                    }\n                    Definition::Enum { items } => {\n                        println!(\"Processing enum type {decl} {items:?}\");\n\n                        let mut items: Vec<&casper_contract_sdk::abi::EnumVariant> =\n                            items.iter().collect();\n\n                        let mut specialized = None;\n\n                        if decl.starts_with(\"Result\")\n                            && items.len() == 2\n                            && items[0].name == \"Ok\"\n                            && items[1].name == \"Err\"\n                        {\n                            specialized = Some(Specialized::Result {\n                                ok: items[0].decl.clone(),\n                                err: items[1].decl.clone(),\n                            });\n\n                            // NOTE: Because we're not doing the standard library Result, and also\n                            // to simplify things we're using default impl of\n                            // BorshSerialize/BorshDeserialize, we have to flip the order of enums.\n                            // The standard library defines Result as Ok, Err, but the borsh impl\n                            // serializes Err as 0, and Ok as 1. So, by flipping the order we can\n                            // enforce byte for byte compatibility between our \"custom\" Result and a\n                            // real Result.\n                            items.reverse();\n                        }\n\n                        if decl.starts_with(\"Option\")\n                            && items.len() == 2\n                            && items[0].name == \"None\"\n                            && items[1].name == \"Some\"\n                        {\n                            specialized = Some(Specialized::Option {\n                                some: items[1].decl.clone(),\n                            });\n\n                            items.reverse();\n                        }\n\n                        let enum_name = slugify_type(decl);\n\n                        let r#enum = scope\n                            .new_enum(&enum_name)\n                            .vis(\"pub\")\n                            .doc(&format!(\"Declared as {decl}\"));\n\n                        for trait_name in DEFAULT_DERIVED_TRAITS {\n                            r#enum.derive(trait_name);\n                        }\n\n                        for item in &items {\n                            let variant = r#enum.new_variant(&item.name);\n\n                            let def = self.type_mapping.get(&item.decl).unwrap_or_else(|| {\n                                panic!(\"Missing type mapping for {}\", item.decl)\n                            });\n\n                            variant.tuple(def);\n                        }\n\n                        self.type_mapping\n                            .insert(decl.to_string(), enum_name.to_owned());\n\n                        match specialized {\n                            Some(Specialized::Result { ok, err }) => {\n                                let ok_type = self\n                                    .type_mapping\n                                    .get(&ok)\n                                    .unwrap_or_else(|| panic!(\"Missing type mapping for {}\", ok));\n                                let err_type = self\n                                    .type_mapping\n                                    .get(&err)\n                                    .unwrap_or_else(|| panic!(\"Missing type mapping for {}\", err));\n\n                                let impl_block = scope\n                                    .new_impl(&enum_name)\n                                    .impl_trait(format!(\"IntoResult<{ok_type}, {err_type}>\"));\n\n                                let func = impl_block.new_fn(\"into_result\").arg_self().ret(\n                                    Type::new(format!(\n                                        \"Result<{ok_type}, {err_type}>\",\n                                        ok_type = ok_type,\n                                        err_type = err_type\n                                    )),\n                                );\n                                func.line(\"match self {\")\n                                    .line(format!(\"{enum_name}::Ok(ok) => Ok(ok),\"))\n                                    .line(format!(\"{enum_name}::Err(err) => Err(err),\"))\n                                    .line(\"}\");\n                            }\n                            Some(Specialized::Option { some }) => {\n                                let some_type = self.type_mapping.get(&some).unwrap_or_else(|| {\n                                    panic!(\"Missing type mapping for {}\", &some)\n                                });\n\n                                let impl_block = scope\n                                    .new_impl(&enum_name)\n                                    .impl_trait(format!(\"IntoOption<{some_type}>\"));\n\n                                let func = impl_block\n                                    .new_fn(\"into_option\")\n                                    .arg_self()\n                                    .ret(Type::new(format!(\"Option<{some_type}>\",)));\n                                func.line(\"match self {\")\n                                    .line(format!(\"{enum_name}::None => None,\"))\n                                    .line(format!(\"{enum_name}::Some(some) => Some(some),\"))\n                                    .line(\"}\");\n                            }\n                            None => {}\n                        }\n                    }\n                    Definition::Struct { items } => {\n                        println!(\"Processing struct type {items:?}\");\n\n                        let type_name = slugify_type(decl);\n\n                        let r#struct = scope.new_struct(&type_name);\n\n                        for trait_name in DEFAULT_DERIVED_TRAITS {\n                            r#struct.derive(trait_name);\n                        }\n\n                        for item in items {\n                            let mapped_type =\n                                self.type_mapping.get(&item.decl).unwrap_or_else(|| {\n                                    panic!(\"Missing type mapping for {}\", item.decl)\n                                });\n                            let field = Field::new(&item.name, Type::new(mapped_type))\n                                .doc(format!(\"Declared as {}\", item.decl))\n                                .to_owned();\n\n                            r#struct.push_field(field);\n                        }\n                        self.type_mapping.insert(decl.to_string(), type_name);\n                    }\n                }\n            }\n        }\n\n        let struct_name = format!(\"{}Client\", self.schema.name);\n        let client = scope.new_struct(&struct_name).vis(\"pub\");\n\n        for trait_name in DEFAULT_DERIVED_TRAITS {\n            client.derive(trait_name);\n        }\n\n        let mut field = Field::new(\"address\", Type::new(\"[u8; 32]\"));\n        field.vis(\"pub\");\n\n        client.push_field(field);\n\n        let client_impl = scope.new_impl(&struct_name);\n\n        for entry_point in &self.schema.entry_points {\n            let func = client_impl.new_fn(&entry_point.name);\n            func.vis(\"pub\");\n\n            let result_type = self\n                .type_mapping\n                .get(&entry_point.result)\n                .unwrap_or_else(|| panic!(\"Missing type mapping for {}\", entry_point.result));\n\n            if entry_point.flags.contains(EntryPointFlags::CONSTRUCTOR) {\n                func.ret(Type::new(format!(\n                    \"Result<{}, casper_contract_sdk::types::CallError>\",\n                    &struct_name\n                )))\n                .generic(\"C\")\n                .bound(\"C\", \"casper_contract_sdk::Contract\");\n            } else {\n                func.ret(Type::new(format!(\n                    \"Result<casper_contract_sdk::host::CallResult<{result_type}>, casper_contract_sdk::types::CallError>\"\n                )));\n                func.arg_ref_self();\n            }\n\n            for arg in &entry_point.arguments {\n                let mapped_type = self\n                    .type_mapping\n                    .get(&arg.decl)\n                    .unwrap_or_else(|| panic!(\"Missing type mapping for {}\", arg.decl));\n                let arg_ty = Type::new(mapped_type);\n                func.arg(&arg.name, arg_ty);\n            }\n\n            func.line(\"let value = 0; // TODO: Transferring values\");\n\n            let input_struct_name =\n                format!(\"{}_{}\", slugify_type(&self.schema.name), &entry_point.name);\n\n            if entry_point.arguments.is_empty() {\n                func.line(format!(r#\"let call_data = {input_struct_name};\"#));\n            } else {\n                func.line(format!(r#\"let call_data = {input_struct_name} {{ \"#));\n                for arg in &entry_point.arguments {\n                    func.line(format!(\"{},\", arg.name));\n                }\n                func.line(\"};\");\n            }\n\n            if entry_point.flags.contains(EntryPointFlags::CONSTRUCTOR) {\n                // if !entry_point.arguments.is_empty() {\n                //     func.line(r#\"let create_result = C::create(SELECTOR, Some(&input_data))?;\"#);\n                // } else {\n                func.line(r#\"let create_result = C::create(call_data)?;\"#);\n                // }\n\n                func.line(format!(\n                    r#\"let result = {struct_name} {{ address: create_result.contract_address }};\"#,\n                    struct_name = &struct_name\n                ));\n                func.line(\"Ok(result)\");\n                continue;\n            } else {\n                func.line(r#\"casper_contract_sdk::host::call(&self.address, value, call_data)\"#);\n            }\n        }\n\n        for entry_point in &self.schema.entry_points {\n            // Generate arg structure similar to what casper-contract-macros is doing\n            let struct_name = format!(\"{}_{}\", &self.schema.name, &entry_point.name);\n            let input_struct = scope.new_struct(&struct_name);\n\n            for trait_name in DEFAULT_DERIVED_TRAITS {\n                input_struct.derive(trait_name);\n            }\n\n            for argument in &entry_point.arguments {\n                let mapped_type = self.type_mapping.get(&argument.decl).unwrap_or_else(|| {\n                    panic!(\n                        \"Missing type mapping for {} when generating input arg {}\",\n                        argument.decl, &struct_name\n                    )\n                });\n                input_struct.push_field(Field::new(&argument.name, Type::new(mapped_type)));\n            }\n\n            let impl_block = scope.new_impl(&struct_name).impl_trait(\"ToCallData\");\n\n            let input_data_func = impl_block\n                .new_fn(\"input_data\")\n                .arg_ref_self()\n                .ret(Type::new(\"Option<Vec<u8>>\"));\n\n            if entry_point.arguments.is_empty() {\n                input_data_func.line(r#\"None\"#);\n            } else {\n                input_data_func\n                        .line(r#\"let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\"#)\n                        .line(r#\"Some(input_data)\"#);\n            }\n        }\n\n        scope.to_string()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn should_slugify_complex_type() {\n        let input = \"Option<Result<(), vm2_cep18::error::Cep18Error>>\";\n        let expected = \"Option_Result_____vm2_cep18__error__Cep18Error__\";\n\n        assert_eq!(slugify_type(input), expected);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk_codegen/src/support.rs",
    "content": "//! Support library for generated code.\n\npub trait IntoResult<T, E> {\n    fn into_result(self) -> Result<T, E>;\n}\n\npub trait IntoOption<T> {\n    fn into_option(self) -> Option<T>;\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[derive(Debug, PartialEq, Eq)]\n    struct MyOk;\n    #[derive(Debug, PartialEq, Eq)]\n    struct MyErr;\n\n    #[derive(Debug, PartialEq, Eq)]\n\n    enum CustomResult {\n        Ok(MyOk),\n        Err(MyErr),\n    }\n\n    #[derive(Debug, PartialEq, Eq)]\n    enum CustomOption {\n        Some(MyOk),\n        None,\n    }\n\n    impl IntoResult<MyOk, MyErr> for CustomResult {\n        fn into_result(self) -> Result<MyOk, MyErr> {\n            match self {\n                CustomResult::Ok(ok) => Ok(ok),\n                CustomResult::Err(err) => Err(err),\n            }\n        }\n    }\n\n    impl IntoOption<MyOk> for CustomOption {\n        fn into_option(self) -> Option<MyOk> {\n            match self {\n                CustomOption::Some(value) => Some(value),\n                CustomOption::None => None,\n            }\n        }\n    }\n\n    #[test]\n    fn test_into_result() {\n        let ok = CustomResult::Ok(MyOk);\n        let err = CustomResult::Err(MyErr);\n\n        assert_eq!(ok.into_result(), Ok(MyOk));\n        assert_eq!(err.into_result(), Err(MyErr));\n    }\n\n    #[test]\n    fn test_into_option() {\n        let some = CustomOption::Some(MyOk);\n        let none = CustomOption::None;\n\n        assert_eq!(some.into_option(), Some(MyOk));\n        assert_eq!(none.into_option(), None);\n    }\n}\n"
  },
  {
    "path": "smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.json",
    "content": "{\n  \"name\": \"TokenContract\",\n  \"version\": \"0.1.0\",\n  \"type\": {\n    \"type\": \"Contract\",\n    \"state\": \"vm2_cep18::contract::TokenContract\"\n  },\n  \"definitions\": {\n    \"()\": {\n      \"type\": \"Tuple\",\n      \"items\": []\n    },\n    \"([U8; 32], [U8; 32])\": {\n      \"type\": \"Tuple\",\n      \"items\": [\n        \"[U8; 32]\",\n        \"[U8; 32]\"\n      ]\n    },\n    \"Bool\": {\n      \"type\": \"Primitive\",\n      \"Bool\": null\n    },\n    \"Map<([U8; 32], [U8; 32]), U64>\": {\n      \"type\": \"Struct\",\n      \"items\": [\n        {\n          \"name\": \"prefix\",\n          \"decl\": \"U64\"\n        }\n      ]\n    },\n    \"Map<[U8; 32], U64>\": {\n      \"type\": \"Struct\",\n      \"items\": [\n        {\n          \"name\": \"prefix\",\n          \"decl\": \"U64\"\n        }\n      ]\n    },\n    \"Map<[U8; 32], vm2_cep18::security_badge::SecurityBadge>\": {\n      \"type\": \"Struct\",\n      \"items\": [\n        {\n          \"name\": \"prefix\",\n          \"decl\": \"U64\"\n        }\n      ]\n    },\n    \"Result<(), vm2_cep18::error::Cep18Error>\": {\n      \"type\": \"Enum\",\n      \"items\": [\n        {\n          \"name\": \"Ok\",\n          \"discriminant\": 0,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"Err\",\n          \"discriminant\": 1,\n          \"decl\": \"vm2_cep18::error::Cep18Error\"\n        }\n      ]\n    },\n    \"String\": {\n      \"type\": \"Sequence\",\n      \"decl\": \"Char\"\n    },\n    \"U64\": {\n      \"type\": \"Primitive\",\n      \"U64\": null\n    },\n    \"U8\": {\n      \"type\": \"Primitive\",\n      \"U8\": null\n    },\n    \"[U8; 32]\": {\n      \"type\": \"FixedSequence\",\n      \"length\": 32,\n      \"decl\": \"U8\"\n    },\n    \"vm2_cep18::contract::TokenContract\": {\n      \"type\": \"Struct\",\n      \"items\": [\n        {\n          \"name\": \"state\",\n          \"decl\": \"vm2_cep18::traits::CEP18State\"\n        }\n      ]\n    },\n    \"vm2_cep18::error::Cep18Error\": {\n      \"type\": \"Enum\",\n      \"items\": [\n        {\n          \"name\": \"InvalidContext\",\n          \"discriminant\": 0,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InsufficientBalance\",\n          \"discriminant\": 1,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InsufficientAllowance\",\n          \"discriminant\": 2,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"Overflow\",\n          \"discriminant\": 3,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"PackageHashMissing\",\n          \"discriminant\": 4,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"PackageHashNotPackage\",\n          \"discriminant\": 5,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InvalidEventsMode\",\n          \"discriminant\": 6,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"MissingEventsMode\",\n          \"discriminant\": 7,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"Phantom\",\n          \"discriminant\": 8,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"FailedToGetArgBytes\",\n          \"discriminant\": 9,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InsufficientRights\",\n          \"discriminant\": 10,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InvalidAdminList\",\n          \"discriminant\": 11,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InvalidMinterList\",\n          \"discriminant\": 12,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InvalidNoneList\",\n          \"discriminant\": 13,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InvalidEnableMBFlag\",\n          \"discriminant\": 14,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"AlreadyInitialized\",\n          \"discriminant\": 15,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"MintBurnDisabled\",\n          \"discriminant\": 16,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"CannotTargetSelfUser\",\n          \"discriminant\": 17,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"InvalidBurnTarget\",\n          \"discriminant\": 18,\n          \"decl\": \"()\"\n        }\n      ]\n    },\n    \"vm2_cep18::security_badge::SecurityBadge\": {\n      \"type\": \"Enum\",\n      \"items\": [\n        {\n          \"name\": \"Admin\",\n          \"discriminant\": 0,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"Minter\",\n          \"discriminant\": 1,\n          \"decl\": \"()\"\n        },\n        {\n          \"name\": \"None\",\n          \"discriminant\": 2,\n          \"decl\": \"()\"\n        }\n      ]\n    },\n    \"vm2_cep18::traits::CEP18State\": {\n      \"type\": \"Struct\",\n      \"items\": [\n        {\n          \"name\": \"name\",\n          \"decl\": \"String\"\n        },\n        {\n          \"name\": \"symbol\",\n          \"decl\": \"String\"\n        },\n        {\n          \"name\": \"decimals\",\n          \"decl\": \"U8\"\n        },\n        {\n          \"name\": \"total_supply\",\n          \"decl\": \"U64\"\n        },\n        {\n          \"name\": \"balances\",\n          \"decl\": \"Map<[U8; 32], U64>\"\n        },\n        {\n          \"name\": \"allowances\",\n          \"decl\": \"Map<([U8; 32], [U8; 32]), U64>\"\n        },\n        {\n          \"name\": \"security_badges\",\n          \"decl\": \"Map<[U8; 32], vm2_cep18::security_badge::SecurityBadge>\"\n        },\n        {\n          \"name\": \"enable_mint_burn\",\n          \"decl\": \"Bool\"\n        }\n      ]\n    }\n  },\n  \"entry_points\": [\n    {\n      \"name\": \"new\",\n      \"arguments\": [\n        {\n          \"name\": \"token_name\",\n          \"decl\": \"String\"\n        }\n      ],\n      \"result\": \"vm2_cep18::contract::TokenContract\",\n      \"flags\": 1\n    },\n    {\n      \"name\": \"my_balance\",\n      \"arguments\": [],\n      \"result\": \"U64\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"name\",\n      \"arguments\": [],\n      \"result\": \"String\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"symbol\",\n      \"arguments\": [],\n      \"result\": \"String\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"decimals\",\n      \"arguments\": [],\n      \"result\": \"U8\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"total_supply\",\n      \"arguments\": [],\n      \"result\": \"U64\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"balance_of\",\n      \"arguments\": [\n        {\n          \"name\": \"address\",\n          \"decl\": \"[U8; 32]\"\n        }\n      ],\n      \"result\": \"U64\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"allowance\",\n      \"arguments\": [\n        {\n          \"name\": \"spender\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"owner\",\n          \"decl\": \"[U8; 32]\"\n        }\n      ],\n      \"result\": \"()\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"approve\",\n      \"arguments\": [\n        {\n          \"name\": \"spender\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"decrease_allowance\",\n      \"arguments\": [\n        {\n          \"name\": \"spender\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"increase_allowance\",\n      \"arguments\": [\n        {\n          \"name\": \"spender\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"transfer\",\n      \"arguments\": [\n        {\n          \"name\": \"recipient\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"transfer_from\",\n      \"arguments\": [\n        {\n          \"name\": \"owner\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"recipient\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"mint\",\n      \"arguments\": [\n        {\n          \"name\": \"owner\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    },\n    {\n      \"name\": \"burn\",\n      \"arguments\": [\n        {\n          \"name\": \"owner\",\n          \"decl\": \"[U8; 32]\"\n        },\n        {\n          \"name\": \"amount\",\n          \"decl\": \"U64\"\n        }\n      ],\n      \"result\": \"Result<(), vm2_cep18::error::Cep18Error>\",\n      \"flags\": 0\n    }\n  ]\n}\n"
  },
  {
    "path": "smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.rs",
    "content": "#![allow(dead_code, unused_variables, non_camel_case_types)]use borsh::{self, BorshSerialize, BorshDeserialize};\nuse casper_contract_sdk_codegen::support::{IntoResult, IntoOption};\nuse casper_contract_sdk::{Selector, ToCallData};\n\npub type U8 = u8;\ntype FixedSequence0_32_U8 = [U8; 32];\n/// Declared as ([U8; 32], [U8; 32])\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct __U8__32____U8__32__(FixedSequence0_32_U8, FixedSequence0_32_U8);\n\npub type Bool = bool;\npub type U64 = u64;\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct Map___U8__32____U8__32____U64_ {\n    /// Declared as U64\n    prefix: U64,\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct Map__U8__32___U64_ {\n    /// Declared as U64\n    prefix: U64,\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct Map__U8__32___vm2_cep18__security_badge__SecurityBadge_ {\n    /// Declared as U64\n    prefix: U64,\n}\n\n/// Declared as vm2_cep18::error::Cep18Error\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\npub enum vm2_cep18__error__Cep18Error {\n    InvalidContext(()),\n    InsufficientBalance(()),\n    InsufficientAllowance(()),\n    Overflow(()),\n    PackageHashMissing(()),\n    PackageHashNotPackage(()),\n    InvalidEventsMode(()),\n    MissingEventsMode(()),\n    Phantom(()),\n    FailedToGetArgBytes(()),\n    InsufficientRights(()),\n    InvalidAdminList(()),\n    InvalidMinterList(()),\n    InvalidNoneList(()),\n    InvalidEnableMBFlag(()),\n    AlreadyInitialized(()),\n    MintBurnDisabled(()),\n    CannotTargetSelfUser(()),\n    InvalidBurnTarget(()),\n}\n\n/// Declared as Result<(), vm2_cep18::error::Cep18Error>\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\npub enum Result_____vm2_cep18__error__Cep18Error_ {\n    Err(vm2_cep18__error__Cep18Error),\n    Ok(()),\n}\n\nimpl IntoResult<(), vm2_cep18__error__Cep18Error> for Result_____vm2_cep18__error__Cep18Error_ {\n    fn into_result(self) -> Result<(), vm2_cep18__error__Cep18Error> {\n        match self {\n        Result_____vm2_cep18__error__Cep18Error_::Ok(ok) => Ok(ok),\n        Result_____vm2_cep18__error__Cep18Error_::Err(err) => Err(err),\n        }\n    }\n}\n\npub type Char = char;\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct vm2_cep18__traits__CEP18State {\n    /// Declared as String\n    name: String,\n    /// Declared as String\n    symbol: String,\n    /// Declared as U8\n    decimals: U8,\n    /// Declared as U64\n    total_supply: U64,\n    /// Declared as Map<[U8; 32], U64>\n    balances: Map__U8__32___U64_,\n    /// Declared as Map<([U8; 32], [U8; 32]), U64>\n    allowances: Map___U8__32____U8__32____U64_,\n    /// Declared as Map<[U8; 32], vm2_cep18::security_badge::SecurityBadge>\n    security_badges: Map__U8__32___vm2_cep18__security_badge__SecurityBadge_,\n    /// Declared as Bool\n    enable_mint_burn: Bool,\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct vm2_cep18__contract__TokenContract {\n    /// Declared as vm2_cep18::traits::CEP18State\n    state: vm2_cep18__traits__CEP18State,\n}\n\n/// Declared as vm2_cep18::security_badge::SecurityBadge\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\npub enum vm2_cep18__security_badge__SecurityBadge {\n    Admin(()),\n    Minter(()),\n    None(()),\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\npub struct TokenContractClient {\n    pub address: [u8; 32],\n}\n\nimpl TokenContractClient {\n    pub fn new<C>(token_name: String) -> Result<TokenContractClient, casper_contract_sdk::types::CallError>\n    where C: casper_contract_sdk::Contract,\n    {\n        const SELECTOR: Selector = Selector::new(2611912030);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_new { \n        token_name,\n        };\n        let create_result = C::create(call_data)?;\n        let result = TokenContractClient { address: create_result.contract_address };\n        Ok(result)\n    }\n\n    pub fn my_balance(&self) -> Result<casper_contract_sdk::host::CallResult<U64>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(926069361);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_my_balance;\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn name(&self) -> Result<casper_contract_sdk::host::CallResult<String>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(987428621);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_name;\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn symbol(&self) -> Result<casper_contract_sdk::host::CallResult<String>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(2614203198);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_symbol;\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn decimals(&self) -> Result<casper_contract_sdk::host::CallResult<U8>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(2176884103);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_decimals;\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn total_supply(&self) -> Result<casper_contract_sdk::host::CallResult<U64>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(3680728488);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_total_supply;\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn balance_of(&self, address: FixedSequence0_32_U8) -> Result<casper_contract_sdk::host::CallResult<U64>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(259349078);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_balance_of { \n        address,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn allowance(&self, spender: FixedSequence0_32_U8, owner: FixedSequence0_32_U8) -> Result<casper_contract_sdk::host::CallResult<()>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(1778390622);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_allowance { \n        spender,\n        owner,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn approve(&self, spender: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(1746036384);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_approve { \n        spender,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn decrease_allowance(&self, spender: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(4187548633);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_decrease_allowance { \n        spender,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn increase_allowance(&self, spender: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(4115780642);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_increase_allowance { \n        spender,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn transfer(&self, recipient: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(2225167777);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_transfer { \n        recipient,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn transfer_from(&self, owner: FixedSequence0_32_U8, recipient: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(188313368);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_transfer_from { \n        owner,\n        recipient,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn mint(&self, owner: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(3487406754);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_mint { \n        owner,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n\n    pub fn burn(&self, owner: FixedSequence0_32_U8, amount: U64) -> Result<casper_contract_sdk::host::CallResult<Result_____vm2_cep18__error__Cep18Error_>, casper_contract_sdk::types::CallError> {\n        const SELECTOR: Selector = Selector::new(2985279867);\n        let value = 0; // TODO: Transferring values\n        let call_data = TokenContract_burn { \n        owner,\n        amount,\n        };\n        casper_contract_sdk::host::call(&self.address, value, call_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_new {\n    token_name: String,\n}\n\nimpl ToCallData for TokenContract_new {\n     const SELECTOR: Selector = Selector::new(2611912030);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_my_balance;\n\nimpl ToCallData for TokenContract_my_balance {\n     const SELECTOR: Selector = Selector::new(926069361);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        None\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_name;\n\nimpl ToCallData for TokenContract_name {\n     const SELECTOR: Selector = Selector::new(987428621);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        None\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_symbol;\n\nimpl ToCallData for TokenContract_symbol {\n     const SELECTOR: Selector = Selector::new(2614203198);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        None\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_decimals;\n\nimpl ToCallData for TokenContract_decimals {\n     const SELECTOR: Selector = Selector::new(2176884103);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        None\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_total_supply;\n\nimpl ToCallData for TokenContract_total_supply {\n     const SELECTOR: Selector = Selector::new(3680728488);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        None\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_balance_of {\n    address: FixedSequence0_32_U8,\n}\n\nimpl ToCallData for TokenContract_balance_of {\n     const SELECTOR: Selector = Selector::new(259349078);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_allowance {\n    spender: FixedSequence0_32_U8,\n    owner: FixedSequence0_32_U8,\n}\n\nimpl ToCallData for TokenContract_allowance {\n     const SELECTOR: Selector = Selector::new(1778390622);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_approve {\n    spender: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_approve {\n     const SELECTOR: Selector = Selector::new(1746036384);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_decrease_allowance {\n    spender: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_decrease_allowance {\n     const SELECTOR: Selector = Selector::new(4187548633);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_increase_allowance {\n    spender: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_increase_allowance {\n     const SELECTOR: Selector = Selector::new(4115780642);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_transfer {\n    recipient: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_transfer {\n     const SELECTOR: Selector = Selector::new(2225167777);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_transfer_from {\n    owner: FixedSequence0_32_U8,\n    recipient: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_transfer_from {\n     const SELECTOR: Selector = Selector::new(188313368);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_mint {\n    owner: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_mint {\n     const SELECTOR: Selector = Selector::new(3487406754);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)]\nstruct TokenContract_burn {\n    owner: FixedSequence0_32_U8,\n    amount: U64,\n}\n\nimpl ToCallData for TokenContract_burn {\n     const SELECTOR: Selector = Selector::new(2985279867);\n    fn input_data(&self) -> Option<Vec<u8>> {\n        let input_data = borsh::to_vec(&self).expect(\"Serialization to succeed\");\n        Some(input_data)\n    }\n}fn main() {}"
  },
  {
    "path": "smart_contracts/sdk_codegen/tests/test_build.rs",
    "content": "use std::{fs, io::Write, path::PathBuf, str::FromStr};\n\nuse casper_contract_sdk_codegen::Codegen;\n\nconst FIXTURE_1: &str = include_str!(\"fixtures/cep18_schema.json\");\n\nconst PROLOG: &str = \"#![allow(dead_code, unused_variables, non_camel_case_types)]\";\nconst EPILOG: &str = \"fn main() {}\";\n\n#[ignore = \"Not yet supported\"]\n#[test]\nfn it_works() -> Result<(), std::io::Error> {\n    let mut schema = Codegen::from_str(FIXTURE_1)?;\n    let mut code = schema.gen();\n    code.insert_str(0, PROLOG);\n\n    code += EPILOG;\n\n    let mut tmp = tempfile::Builder::new()\n        .prefix(\"cep18_schema\")\n        .suffix(\".rs\")\n        .tempfile()?;\n    tmp.write_all(code.as_bytes())?;\n\n    let path = PathBuf::from(env!(\"CARGO_MANIFEST_DIR\"))\n        .join(\"tests\")\n        .join(\"fixtures\")\n        .join(\"cep18_schema.rs\");\n    fs::write(path, code.as_bytes())?;\n    tmp.flush()?;\n    let t = trybuild::TestCases::new();\n    t.pass(tmp.path());\n    Ok(())\n}\n"
  },
  {
    "path": "smart_contracts/sdk_sys/Cargo.toml",
    "content": "[package]\nname = \"casper-contract-sdk-sys\"\nversion = \"0.1.3\"\nedition = \"2021\"\ndescription = \"Casper contract sdk sys package\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\ndocumentation = \"https://docs.rs/casper-contract-sdk-sys\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/dev/smart_contracts/sdk_sys\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\n"
  },
  {
    "path": "smart_contracts/sdk_sys/src/for_each_host_function.rs",
    "content": "#[macro_export]\nmacro_rules! for_each_host_function {\n    ($mac:ident) => {\n        $mac! {\n            #[doc = \"Read value from a storage available for caller's entity address.\"]\n            pub fn casper_read(\n                key_space: u64,\n                key_ptr: *const u8,\n                key_size: usize,\n                info: *mut $crate::ReadInfo,\n                alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8,\n                alloc_ctx: *const core::ffi::c_void,\n            ) -> u32;\n            pub fn casper_write(\n                key_space: u64,\n                key_ptr: *const u8,\n                key_size: usize,\n                value_ptr: *const u8,\n                value_size: usize,\n            ) -> u32;\n            pub fn casper_remove(\n                key_space: u64,\n                key_ptr: *const u8,\n                key_size: usize,\n            ) -> u32;\n            pub fn casper_print(msg_ptr: *const u8, msg_size: usize,);\n            pub fn casper_return(flags: u32, data_ptr: *const u8, data_len: usize,);\n            pub fn casper_copy_input(\n                alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8,\n                alloc_ctx: *const core::ffi::c_void,\n            ) -> *mut u8;\n            pub fn casper_create(\n                code_ptr: *const u8,\n                code_size: usize,\n                transferred_value: u64,\n                constructor_ptr: *const u8,\n                constructor_size: usize,\n                input_ptr: *const u8,\n                input_size: usize,\n                seed_ptr: *const u8,\n                seed_size: usize,\n                result_ptr: *mut $crate::CreateResult,\n            ) -> u32;\n\n            // We don't offer any special protection against smart contracts on the host side\n            pub fn casper_call(\n                address_ptr: *const u8,\n                address_size: usize,\n                transferred_amount: u64,\n                entry_point_ptr: *const u8,\n                entry_point_size: usize,\n                input_ptr: *const u8,\n                input_size: usize,\n                alloc: extern \"C\" fn(usize, *mut core::ffi::c_void) -> *mut u8, // For capturing output data\n                alloc_ctx: *const core::ffi::c_void,\n            ) -> u32;\n            pub fn casper_upgrade(\n                code_ptr: *const u8,\n                code_size: usize,\n                entry_point_ptr: *const u8,\n                entry_point_size: usize,\n                input_ptr: *const u8,\n                input_size: usize,\n            ) -> u32;\n            #[doc = r\"Get balance of an entity by its address.\"]\n            pub fn casper_env_balance(entity_kind: u32, entity_addr_ptr: *const u8, entity_addr_len: usize, output_ptr: *mut core::ffi::c_void,) -> u32;\n            pub fn casper_env_info(info_ptr: *const u8, info_size: u32,) -> u32;\n            pub fn casper_transfer(entity_addr_ptr: *const u8, entity_addr_len: usize, amount: *const core::ffi::c_void,) -> u32;\n            pub fn casper_emit(topic_ptr: *const u8, topic_size: usize, payload_ptr: *const u8, payload_size: usize,) -> u32;\n        }\n    };\n}\n"
  },
  {
    "path": "smart_contracts/sdk_sys/src/lib.rs",
    "content": "pub mod for_each_host_function;\n\n#[repr(C)]\npub struct Param {\n    pub name_ptr: *const u8,\n    pub name_len: usize,\n}\n\n/// Signature of a function pointer that a host understands.\npub type Fptr = extern \"C\" fn() -> ();\n\n#[derive(Debug)]\n#[repr(C)]\npub struct ReadInfo {\n    pub data: *const u8,\n    /// Size in bytes.\n    pub size: usize,\n}\n\n#[repr(C)]\n#[derive(Debug)]\npub struct CreateResult {\n    pub contract_address: [u8; 32],\n}\n\n#[repr(C)]\n#[derive(Debug)]\npub struct UpgradeResult {\n    pub package_address: [u8; 32],\n    pub contract_address: [u8; 32],\n    pub version: u32,\n}\n\nmacro_rules! visit_host_function {\n    ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => {\n        $(\n            $(#[$cfg])? $vis fn $name($($($arg: $argty,)*)?) $(-> $ret)?;\n        )*\n    }\n}\n\nextern \"C\" {\n    for_each_host_function!(visit_host_function);\n}\n\nmacro_rules! visit_host_function_name {\n    ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => {\n        &[\n            $(\n                stringify!($name),\n            )*\n        ]\n    }\n}\n\npub const HOST_FUNCTIONS: &[&str] = for_each_host_function!(visit_host_function_name);\n\n#[cfg(test)]\nmod tests {\n    use std::collections::BTreeSet;\n\n    use crate::HOST_FUNCTIONS;\n\n    mod separate_module {\n        use crate::for_each_host_function;\n\n        macro_rules! visit_host_function {\n            ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => {\n                $(\n                    #[allow(dead_code, unused_variables, clippy::too_many_arguments)]\n                    $(#[$cfg])? $vis fn $name($($($arg: $argty,)*)?) $(-> $ret)? {\n                        todo!(\"Called fn {}\", stringify!($name));\n                    }\n                )*\n            }\n        }\n        for_each_host_function!(visit_host_function);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Called fn casper_print\")]\n    fn different_module() {\n        const MSG: &str = \"foobar\";\n        separate_module::casper_print(MSG.as_ptr(), MSG.len());\n    }\n\n    #[test]\n    fn all_host_functions() {\n        let host_functions = BTreeSet::from_iter(HOST_FUNCTIONS);\n        assert!(host_functions.contains(&\"casper_call\"));\n    }\n}\n"
  },
  {
    "path": "storage/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.  The format is based on [Keep a Changelog].\n\n[comment]: <> (Added:      new features)\n[comment]: <> (Changed:    changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed:    now removed features)\n[comment]: <> (Fixed:      any bug fixes)\n[comment]: <> (Security:   in case of vulnerabilities)\n\n## 5.0.0\n\n### Added\n\n* Added a field `rewards` handling to Config in the `runtime_native` module_\n\n### Changed\n\n* Modified the behavior of the protocol upgrade logic to add a sustain purse to the mints named keys if the rewards\n  handling to sustain\n* Modified the behavior of the protocol upgrade logic to recalculate the total supply at the point of protocol upgrade\n* Modified the Genesis flow to support the rewards handling mode sustain in the Account/Contract model\n* Modified the auction logic to keep track of a minimum delegation rate for validators\n\n### Fixed\n\n* Fixed a bug introduced during protocol version 2.0 in the genesis logic that did not include delegator stakes towards\n  the total supply\n\n## 4.0.1\n\n### Changed\n* Changed the withdraw bid behavior to return an UnbondingAmountTooLarge error instead of forcing a unbonding of the valdiator's bid \n\n### Fixed\n* Fixed an issue in the storage create which allowed delegators to exceed the maximum limit set by the validator for the validator's bid\n\n## 4.0.0\n\n### Added\n* Added `maximum_delegation_amount` field to the runtime native config struct.\n\n### Fixed\n* Fixed an issue regarding incorrect setting of delegator min max limits on validator bids\n\n## 3.0.0\n\n### Changed\n* Update `casper-types` to v4.0.1, requiring a major version bump here.\n\n\n\n## 2.0.0\n\n### Added\n* Add `ChunkWithProof` to support chunking of large values, and associated Merkle-proofs of these.\n\n\n\n## 1.4.4\n\n### Changed\n* Update dependencies.\n\n\n\n## 1.4.0\n\n### Added\n* Initial release of crate providing `Digest` type and hashing methods, including the structs to handle proofs for chunks of data.\n\n\n\n[Keep a Changelog]: https://keepachangelog.com/en/1.0.0\n[unreleased]: https://github.com/casper-network/casper-node/tree/dev\n"
  },
  {
    "path": "storage/Cargo.toml",
    "content": "[package]\nname = \"casper-storage\"\nversion = \"5.0.0\"\nedition = \"2018\"\nauthors = [\"Ed Hastings <ed@casper.network>\"]\ndescription = \"Storage for a node on the Casper network.\"\nreadme = \"README.md\"\ndocumentation = \"https://docs.rs/casper-storage\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/master/storage\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbincode = \"1.3.1\"\ncasper-types = { version = \"7.0.0\", path = \"../types\", features = [\"datasize\", \"json-schema\", \"std\"] }\ndatasize = \"0.2.4\"\neither = \"1.8.1\"\nlmdb-rkv = \"0.14\"\nnum = { version = \"0.4.0\", default-features = false }\nnum-derive = { workspace = true }\nnum-rational = { version = \"0.4.0\", features = [\"serde\"] }\nnum-traits = { workspace = true }\nproptest = { version = \"1.0.0\", optional = true }\nserde = { version = \"1\", features = [\"derive\"] }\ntempfile = \"3.1.0\"\nthiserror = \"1.0.18\"\ntracing = \"0.1.18\"\nuuid = { version = \"0.8.1\", features = [\"serde\", \"v4\"] }\nlinked-hash-map = \"0.5.3\"\nonce_cell = \"1.18.0\"\nrand = \"0.8.3\"\nrand_chacha = \"0.3.0\"\nitertools = \"0.10.5\"\nparking_lot = \"0.12.1\"\n\n[dev-dependencies]\nassert_matches = \"1.3.0\"\nanyhow = \"1.0.33\"\ncasper-types = { path = \"../types\", features = [\"testing\"] }\nproptest = \"1.0.0\"\nrand = \"0.8.3\"\nserde_json = \"1\"\nbase16 = \"0.2.1\"\ncriterion = { version = \"0.5.1\", features = [\"html_reports\"] }\npprof = { version = \"0.14.0\", features = [\"flamegraph\", \"criterion\"] }\n\n[package.metadata.docs.rs]\nall-features = true\nrustc-args = [\"--cfg\", \"docsrs\"]\n\n[[bench]]\nname = \"global_state_key_write_bench\"\nharness = false\n"
  },
  {
    "path": "storage/README.md",
    "content": "# `casper-storage`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-hashing)](https://crates.io/crates/casper-storage)\n[![Documentation](https://docs.rs/casper-hashing/badge.svg)](https://docs.rs/casper-storage)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nA library providing storage functionality for Casper nodes.\n\n## License\n\nLicensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE).\n"
  },
  {
    "path": "storage/benches/global_state_key_write_bench.rs",
    "content": "use std::time::{Duration, Instant};\n\nuse criterion::{criterion_group, criterion_main, Criterion, Throughput};\nuse pprof::criterion::{Output, PProfProfiler};\n\nuse casper_storage::global_state::{\n    error,\n    store::Store,\n    transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource},\n    trie::Trie,\n    trie_store::{\n        lmdb::LmdbTrieStore,\n        operations::{batch_write, WriteResult},\n    },\n};\nuse casper_types::{bytesrepr::ToBytes, testing::TestRng, Digest, Key};\nuse lmdb::{DatabaseFlags, RwTransaction};\nuse rand::Rng;\nuse tempfile::tempdir;\n\nuse casper_storage::global_state::trie_store::operations::write;\n\npub(crate) const DB_SIZE: usize = 8_520_428_800;\npub(crate) const MAX_READERS: u32 = 512;\n\nfn write_sequential(\n    trie_store: &LmdbTrieStore,\n    txn: &mut RwTransaction,\n    mut root_hash: Digest,\n    data: Vec<(Key, u32)>,\n) -> Digest {\n    for (key, value) in data.iter() {\n        let write_result =\n            write::<Key, u32, _, _, error::Error>(txn, trie_store, &root_hash, key, value).unwrap();\n        match write_result {\n            WriteResult::Written(hash) => {\n                root_hash = hash;\n            }\n            WriteResult::AlreadyExists => (),\n            WriteResult::RootNotFound => panic!(\"invalid root hash\"),\n        };\n    }\n    root_hash\n}\n\nfn create_empty_store() -> (LmdbEnvironment, LmdbTrieStore) {\n    let _temp_dir = tempdir().unwrap();\n    let environment = LmdbEnvironment::new(_temp_dir.path(), DB_SIZE, MAX_READERS, true).unwrap();\n    let store = LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()).unwrap();\n\n    (environment, store)\n}\n\nfn store_empty_root(env: &LmdbEnvironment, store: &LmdbTrieStore) -> Digest {\n    let trie: Trie<Key, u32> = Trie::node(&[]);\n    let trie_bytes = trie.to_bytes().unwrap();\n    let hash = Digest::hash(trie_bytes);\n\n    let mut txn = env.create_read_write_txn().unwrap();\n    store.put(&mut txn, &hash, &trie).unwrap();\n    txn.commit().unwrap();\n\n    hash\n}\n\nfn sequential_write_bench(c: &mut Criterion, rng: &mut TestRng) {\n    let mut sequential_write_group = c.benchmark_group(\"trie_store_sequential_write\");\n    for batch_size in [1000, 10_000] {\n        sequential_write_group.throughput(Throughput::Elements(batch_size as u64));\n\n        if batch_size > 150_000 {\n            // Reduce the sample size to allow faster runtime.\n            sequential_write_group.sample_size(30);\n        }\n\n        sequential_write_group.bench_function(format!(\"write_sequential_{}\", batch_size), |b| {\n            b.iter_custom(|iter| {\n                let mut total = Duration::default();\n                for _ in 0..iter {\n                    let (env, store) = create_empty_store();\n                    let root_hash = store_empty_root(&env, &store);\n                    let mut txn = env.create_read_write_txn().unwrap();\n                    let data: Vec<(Key, u32)> =\n                        (0u32..batch_size).map(|val| (rng.gen(), val)).collect();\n\n                    let start = Instant::now();\n                    write_sequential(&store, &mut txn, root_hash, data);\n                    total = total.checked_add(start.elapsed()).unwrap();\n                }\n\n                total\n            })\n        });\n    }\n    sequential_write_group.finish();\n}\n\nfn batch_write_with_empty_store(c: &mut Criterion, rng: &mut TestRng) {\n    let mut batch_write_group = c.benchmark_group(\"batch_write_with_empty_store\");\n\n    for batch_size in [1000, 10_000] {\n        batch_write_group.throughput(Throughput::Elements(batch_size as u64));\n\n        if batch_size > 150_000 {\n            // Reduce the sample size to allow faster runtime.\n            batch_write_group.sample_size(30);\n        }\n\n        batch_write_group.bench_function(format!(\"write_batch_{}\", batch_size), |b| {\n            b.iter_custom(|iter| {\n                let mut total = Duration::default();\n                for _ in 0..iter {\n                    let (environment, store) = create_empty_store();\n                    let root_hash = store_empty_root(&environment, &store);\n                    let mut txn = environment.create_read_write_txn().unwrap();\n                    let data: Vec<(Key, u32)> =\n                        (0u32..batch_size).map(|val| (rng.gen(), val)).collect();\n\n                    let start = Instant::now();\n                    let _ = batch_write::<Key, u32, _, _, _, error::Error>(\n                        &mut txn,\n                        &store,\n                        &root_hash,\n                        data.into_iter(),\n                    )\n                    .unwrap();\n                    total = total.checked_add(start.elapsed()).unwrap();\n                }\n\n                total\n            })\n        });\n    }\n    batch_write_group.finish();\n}\n\nfn batch_write_with_populated_store(c: &mut Criterion, rng: &mut TestRng) {\n    let mut batch_write_group = c.benchmark_group(\"batch_write_with_populated_store\");\n\n    for batch_size in [1000, 10_000] {\n        batch_write_group.throughput(Throughput::Elements(batch_size as u64));\n\n        if batch_size > 150_000 {\n            // Reduce the sample size to allow faster runtime.\n            batch_write_group.sample_size(30);\n        }\n\n        batch_write_group.bench_function(format!(\"write_batch_{}\", batch_size), |b| {\n            b.iter_custom(|iter| {\n                let mut total = Duration::default();\n                for _ in 0..iter {\n                    let (environment, store) = create_empty_store();\n                    let root_hash = store_empty_root(&environment, &store);\n                    let mut txn = environment.create_read_write_txn().unwrap();\n                    let initial_data: Vec<(Key, u32)> =\n                        (0u32..200).map(|val| (rng.gen(), val)).collect();\n\n                    // Pre-populate trie store with some data.\n                    let root_hash = write_sequential(&store, &mut txn, root_hash, initial_data);\n\n                    // Create a cache backed up by the pre-populated store. Any already existing\n                    // nodes will be read-back into the cache.\n                    let data: Vec<(Key, u32)> =\n                        (0u32..batch_size).map(|val| (rng.gen(), val)).collect();\n\n                    let start = Instant::now();\n                    let _ = batch_write::<Key, u32, _, _, _, error::Error>(\n                        &mut txn,\n                        &store,\n                        &root_hash,\n                        data.into_iter(),\n                    )\n                    .unwrap();\n                    total = total.checked_add(start.elapsed()).unwrap();\n                }\n\n                total\n            })\n        });\n    }\n    batch_write_group.finish();\n}\n\nfn trie_store_batch_write_bench(c: &mut Criterion) {\n    let mut rng = TestRng::new();\n\n    sequential_write_bench(c, &mut rng);\n    batch_write_with_empty_store(c, &mut rng);\n    batch_write_with_populated_store(c, &mut rng);\n}\n\ncriterion_group! {\n  name = benches;\n  config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));\n  targets = trie_store_batch_write_bench\n}\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage/src/address_generator.rs",
    "content": "//! Generates unique 32-byte addresses.\nuse rand::{RngCore, SeedableRng};\nuse rand_chacha::ChaChaRng;\n\nuse casper_types::{AccessRights, Digest, Phase, URef};\n\n/// The length of an address.\npub const ADDRESS_LENGTH: usize = 32;\n\n/// Alias for an array of bytes that represents an address.\npub type Address = [u8; ADDRESS_LENGTH];\n\nconst SEED_LENGTH: usize = 32;\n\n/// An `AddressGenerator` generates `URef` addresses.\npub struct AddressGenerator(ChaChaRng);\n\nimpl AddressGenerator {\n    /// Creates an [`AddressGenerator`] from a 32-byte hash digest and [`Phase`].\n    pub fn new(hash: &[u8], phase: Phase) -> AddressGenerator {\n        AddressGeneratorBuilder::new()\n            .seed_with(hash)\n            .seed_with(&[phase as u8])\n            .build()\n    }\n\n    /// Creates a new [`Address`] by using an internal instance of PRNG.\n    pub fn create_address(&mut self) -> Address {\n        let mut buff = [0u8; ADDRESS_LENGTH];\n        self.0.fill_bytes(&mut buff);\n        buff\n    }\n\n    /// Creates a new [`Address`] by hashing an output from [`AddressGenerator::create_address`]\n    /// with a blake2b256.\n    pub fn new_hash_address(&mut self) -> Address {\n        Digest::hash(self.create_address()).value()\n    }\n\n    /// Creates a new [`URef`] with a new address generated.\n    pub fn new_uref(&mut self, access_rights: AccessRights) -> URef {\n        let addr = self.create_address();\n        URef::new(addr, access_rights)\n    }\n}\n\n/// A builder for [`AddressGenerator`].\n#[derive(Default)]\npub struct AddressGeneratorBuilder {\n    data: Vec<u8>,\n}\n\nimpl AddressGeneratorBuilder {\n    /// Creates a new builder.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    /// Extends the seed with more data.\n    pub fn seed_with(mut self, bytes: &[u8]) -> Self {\n        self.data.extend(bytes);\n        self\n    }\n\n    /// Creates a new [`AddressGenerator`].\n    ///\n    /// This method hashes the seed bytes, and seeds the PRNG with it.\n    pub fn build(self) -> AddressGenerator {\n        let seed: [u8; SEED_LENGTH] = Digest::hash(self.data).value();\n        AddressGenerator(ChaChaRng::from_seed(seed))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::Phase;\n\n    use super::AddressGenerator;\n\n    const DEPLOY_HASH_1: [u8; 32] = [1u8; 32];\n    const DEPLOY_HASH_2: [u8; 32] = [2u8; 32];\n\n    #[test]\n    fn should_generate_different_numbers_for_different_seeds() {\n        let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session);\n        let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_2, Phase::Session);\n        let random_a = ag_a.create_address();\n        let random_b = ag_b.create_address();\n\n        assert_ne!(random_a, random_b)\n    }\n\n    #[test]\n    fn should_generate_same_numbers_for_same_seed() {\n        let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session);\n        let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session);\n        let random_a = ag_a.create_address();\n        let random_b = ag_b.create_address();\n\n        assert_eq!(random_a, random_b)\n    }\n\n    #[test]\n    fn should_not_generate_same_numbers_for_different_phase() {\n        let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Payment);\n        let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session);\n        let mut ag_c = AddressGenerator::new(&DEPLOY_HASH_1, Phase::FinalizePayment);\n        let random_a = ag_a.create_address();\n        let random_b = ag_b.create_address();\n        let random_c = ag_c.create_address();\n\n        assert_ne!(\n            random_a, random_b,\n            \"different phase should have different output\"\n        );\n\n        assert_ne!(\n            random_a, random_c,\n            \"different phase should have different output\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/block_provider.rs",
    "content": "use super::error::BlockStoreError;\n\n/// A block store that supports read/write operations consistently.\npub trait BlockStoreProvider {\n    /// Reader alias.\n    type Reader<'a>: BlockStoreTransaction\n    where\n        Self: 'a;\n    /// ReaderWriter alias.\n    type ReaderWriter<'a>: BlockStoreTransaction\n    where\n        Self: 'a;\n\n    /// Check out read only handle.\n    fn checkout_ro(&self) -> Result<Self::Reader<'_>, BlockStoreError>;\n    /// Check out read write handle.\n    fn checkout_rw(&mut self) -> Result<Self::ReaderWriter<'_>, BlockStoreError>;\n}\n\n/// Block store transaction.\npub trait BlockStoreTransaction {\n    /// Commit changes to the block store.\n    fn commit(self) -> Result<(), BlockStoreError>;\n\n    /// Roll back any temporary changes to the block store.\n    fn rollback(self);\n}\n\n/// Data reader definition.\npub trait DataReader<K, T> {\n    /// Read item at key.\n    fn read(&self, key: K) -> Result<Option<T>, BlockStoreError>;\n    /// Returns true if item exists at key, else false.\n    fn exists(&self, key: K) -> Result<bool, BlockStoreError>;\n}\n\n/// Data write definition.\npub trait DataWriter<K, T> {\n    /// Write item to store and return key.\n    fn write(&mut self, data: &T) -> Result<K, BlockStoreError>;\n    /// Delete item at key from store.\n    fn delete(&mut self, key: K) -> Result<(), BlockStoreError>;\n}\n"
  },
  {
    "path": "storage/src/block_store/error.rs",
    "content": "use casper_types::{BlockHash, EraId, TransactionHash};\nuse std::fmt::Debug;\nuse thiserror::Error;\n\n/// Block store error.\n#[derive(Debug, Error)]\npub enum BlockStoreError {\n    /// Found a duplicate block entry of the specified height.\n    #[error(\"duplicate entries for block at height {height}: {first} / {second}\")]\n    DuplicateBlock {\n        /// Height at which duplicate was found.\n        height: u64,\n        /// First block hash encountered at `height`.\n        first: BlockHash,\n        /// Second block hash encountered at `height`.\n        second: BlockHash,\n    },\n    /// Found a duplicate switch-block entry of the specified height.\n    #[error(\"duplicate entries for switch block at era id {era_id}: {first} / {second}\")]\n    DuplicateEraId {\n        /// Era ID at which duplicate was found.\n        era_id: EraId,\n        /// First block hash encountered at `era_id`.\n        first: BlockHash,\n        /// Second block hash encountered at `era_id`.\n        second: BlockHash,\n    },\n    /// Found a duplicate transaction entry.\n    #[error(\"duplicate entries for blocks for transaction {transaction_hash}: {first} / {second}\")]\n    DuplicateTransaction {\n        /// Transaction hash at which duplicate was found.\n        transaction_hash: TransactionHash,\n        /// First block hash encountered at `transaction_hash`.\n        first: BlockHash,\n        /// Second block hash encountered at `transaction_hash`.\n        second: BlockHash,\n    },\n    /// Internal error.\n    #[error(\"internal database error: {0}\")]\n    InternalStorage(Box<dyn std::error::Error + Send + Sync>),\n    /// The operation is unsupported.\n    #[error(\"unsupported operation\")]\n    UnsupportedOperation,\n}\n"
  },
  {
    "path": "storage/src/block_store/lmdb/indexed_lmdb_block_store.rs",
    "content": "use std::{\n    borrow::Cow,\n    collections::{btree_map, hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},\n};\n\nuse super::{\n    lmdb_block_store::LmdbBlockStore, lmdb_ext::LmdbExtError, temp_map::TempMap, DbTableId,\n};\nuse datasize::DataSize;\nuse lmdb::{\n    Environment, RoTransaction, RwCursor, RwTransaction, Transaction as LmdbTransaction, WriteFlags,\n};\n\nuse tracing::info;\n\nuse super::versioned_databases::VersionedDatabases;\nuse crate::block_store::{\n    block_provider::{BlockStoreTransaction, DataReader, DataWriter},\n    types::{\n        ApprovalsHashes, BlockExecutionResults, BlockHashHeightAndEra, BlockHeight, BlockTransfers,\n        LatestSwitchBlock, StateStore, StateStoreKey, Tip, TransactionFinalizedApprovals,\n    },\n    BlockStoreError, BlockStoreProvider, DbRawBytesSpec,\n};\nuse casper_types::{\n    execution::ExecutionResult, Approval, Block, BlockBody, BlockHash, BlockHeader,\n    BlockSignatures, Digest, EraId, ProtocolVersion, Transaction, TransactionHash, Transfer,\n};\n\n/// Indexed lmdb block store.\n#[derive(DataSize, Debug)]\npub struct IndexedLmdbBlockStore {\n    /// Block store\n    block_store: LmdbBlockStore,\n    /// A map of block height to block ID.\n    block_height_index: BTreeMap<u64, BlockHash>,\n    /// A map of era ID to switch block ID.\n    switch_block_era_id_index: BTreeMap<EraId, BlockHash>,\n    /// A map of transaction hashes to hashes, heights and era IDs of blocks containing them.\n    transaction_hash_index: BTreeMap<TransactionHash, BlockHashHeightAndEra>,\n}\n\nimpl IndexedLmdbBlockStore {\n    fn get_reader(&self) -> Result<IndexedLmdbBlockStoreReadTransaction<'_>, BlockStoreError> {\n        let txn = self\n            .block_store\n            .env\n            .begin_ro_txn()\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        Ok(IndexedLmdbBlockStoreReadTransaction {\n            txn,\n            block_store: self,\n        })\n    }\n\n    /// Inserts the relevant entries to the index.\n    ///\n    /// If a duplicate entry is encountered, index is not updated and an error is returned.\n    fn insert_to_transaction_index(\n        transaction_hash_index: &mut BTreeMap<TransactionHash, BlockHashHeightAndEra>,\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        transaction_hashes: Vec<TransactionHash>,\n    ) -> Result<(), BlockStoreError> {\n        if let Some(hash) = transaction_hashes.iter().find(|hash| {\n            transaction_hash_index\n                .get(hash)\n                .is_some_and(|old_details| old_details.block_hash != block_hash)\n        }) {\n            return Err(BlockStoreError::DuplicateTransaction {\n                transaction_hash: *hash,\n                first: transaction_hash_index[hash].block_hash,\n                second: block_hash,\n            });\n        }\n\n        for hash in transaction_hashes {\n            transaction_hash_index.insert(\n                hash,\n                BlockHashHeightAndEra::new(block_hash, block_height, era_id),\n            );\n        }\n\n        Ok(())\n    }\n\n    /// Inserts the relevant entries to the two indices.\n    ///\n    /// If a duplicate entry is encountered, neither index is updated and an error is returned.\n    pub(super) fn insert_to_block_header_indices(\n        block_height_index: &mut BTreeMap<u64, BlockHash>,\n        switch_block_era_id_index: &mut BTreeMap<EraId, BlockHash>,\n        block_header: &BlockHeader,\n    ) -> Result<(), BlockStoreError> {\n        let block_hash = block_header.block_hash();\n        if let Some(first) = block_height_index.get(&block_header.height()) {\n            if *first != block_hash {\n                return Err(BlockStoreError::DuplicateBlock {\n                    height: block_header.height(),\n                    first: *first,\n                    second: block_hash,\n                });\n            }\n        }\n\n        if block_header.is_switch_block() {\n            match switch_block_era_id_index.entry(block_header.era_id()) {\n                btree_map::Entry::Vacant(entry) => {\n                    let _ = entry.insert(block_hash);\n                }\n                btree_map::Entry::Occupied(entry) => {\n                    if *entry.get() != block_hash {\n                        return Err(BlockStoreError::DuplicateEraId {\n                            era_id: block_header.era_id(),\n                            first: *entry.get(),\n                            second: block_hash,\n                        });\n                    }\n                }\n            }\n        }\n\n        let _ = block_height_index.insert(block_header.height(), block_hash);\n        Ok(())\n    }\n\n    /// Ctor.\n    pub fn new(\n        block_store: LmdbBlockStore,\n        hard_reset_to_start_of_era: Option<EraId>,\n        protocol_version: ProtocolVersion,\n    ) -> Result<IndexedLmdbBlockStore, BlockStoreError> {\n        // We now need to restore the block-height index. Log messages allow timing here.\n        info!(\"indexing block store\");\n        let mut block_height_index = BTreeMap::new();\n        let mut switch_block_era_id_index = BTreeMap::new();\n        let mut transaction_hash_index = BTreeMap::new();\n        let mut block_txn = block_store\n            .env\n            .begin_rw_txn()\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        let mut deleted_block_hashes = HashSet::new();\n        // Map of all block body hashes, with their values representing whether to retain the\n        // corresponding block bodies or not.\n        let mut block_body_hashes = HashMap::new();\n        let mut deleted_transaction_hashes = HashSet::<TransactionHash>::new();\n\n        let mut init_fn =\n            |cursor: &mut RwCursor, block_header: BlockHeader| -> Result<(), BlockStoreError> {\n                let should_retain_block = match hard_reset_to_start_of_era {\n                    Some(invalid_era) => {\n                        // Retain blocks from eras before the hard reset era, and blocks after this\n                        // era if they are from the current protocol version (as otherwise a node\n                        // restart would purge them again, despite them being valid).\n                        block_header.era_id() < invalid_era\n                            || block_header.protocol_version() == protocol_version\n                    }\n                    None => true,\n                };\n\n                // If we don't already have the block body hash in the collection, insert it with\n                // the value `should_retain_block`.\n                //\n                // If there is an existing value, the updated value should be `false` iff the\n                // existing value and `should_retain_block` are both `false`.\n                // Otherwise the updated value should be `true`.\n                match block_body_hashes.entry(*block_header.body_hash()) {\n                    Entry::Vacant(entry) => {\n                        entry.insert(should_retain_block);\n                    }\n                    Entry::Occupied(entry) => {\n                        let value = entry.into_mut();\n                        *value = *value || should_retain_block;\n                    }\n                }\n\n                let body_txn = block_store\n                    .env\n                    .begin_ro_txn()\n                    .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n                let maybe_block_body = block_store\n                    .block_body_dbs\n                    .get(&body_txn, block_header.body_hash())\n                    .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n                if !should_retain_block {\n                    let _ = deleted_block_hashes.insert(block_header.block_hash());\n\n                    match &maybe_block_body {\n                        Some(BlockBody::V1(v1_body)) => deleted_transaction_hashes.extend(\n                            v1_body\n                                .deploy_and_transfer_hashes()\n                                .map(TransactionHash::from),\n                        ),\n                        Some(BlockBody::V2(v2_body)) => {\n                            let transactions = v2_body.all_transactions();\n                            deleted_transaction_hashes.extend(transactions)\n                        }\n                        None => (),\n                    }\n\n                    cursor\n                        .del(WriteFlags::empty())\n                        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n                    return Ok(());\n                }\n\n                Self::insert_to_block_header_indices(\n                    &mut block_height_index,\n                    &mut switch_block_era_id_index,\n                    &block_header,\n                )?;\n\n                if let Some(block_body) = maybe_block_body {\n                    let transaction_hashes = match block_body {\n                        BlockBody::V1(v1) => v1\n                            .deploy_and_transfer_hashes()\n                            .map(TransactionHash::from)\n                            .collect(),\n                        BlockBody::V2(v2) => v2.all_transactions().copied().collect(),\n                    };\n                    Self::insert_to_transaction_index(\n                        &mut transaction_hash_index,\n                        block_header.block_hash(),\n                        block_header.height(),\n                        block_header.era_id(),\n                        transaction_hashes,\n                    )?;\n                }\n\n                Ok(())\n            };\n\n        block_store\n            .block_header_dbs\n            .for_each_value_in_current(&mut block_txn, &mut init_fn)?;\n        block_store\n            .block_header_dbs\n            .for_each_value_in_legacy(&mut block_txn, &mut init_fn)?;\n\n        info!(\"block store reindexing complete\");\n        block_txn\n            .commit()\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        let deleted_block_body_hashes = block_body_hashes\n            .into_iter()\n            .filter_map(|(body_hash, retain)| (!retain).then_some(body_hash))\n            .collect();\n        initialize_block_body_dbs(\n            &block_store.env,\n            block_store.block_body_dbs,\n            deleted_block_body_hashes,\n        )?;\n        initialize_block_metadata_dbs(\n            &block_store.env,\n            block_store.block_metadata_dbs,\n            deleted_block_hashes,\n        )?;\n        initialize_execution_result_dbs(\n            &block_store.env,\n            block_store.execution_result_dbs,\n            deleted_transaction_hashes,\n        )\n        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(Self {\n            block_store,\n            block_height_index,\n            switch_block_era_id_index,\n            transaction_hash_index,\n        })\n    }\n}\n\n/// Purges stale entries from the block body databases.\nfn initialize_block_body_dbs(\n    env: &Environment,\n    block_body_dbs: VersionedDatabases<Digest, BlockBody>,\n    deleted_block_body_hashes: HashSet<Digest>,\n) -> Result<(), BlockStoreError> {\n    info!(\"initializing block body databases\");\n    let mut txn = env\n        .begin_rw_txn()\n        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n    for body_hash in deleted_block_body_hashes {\n        block_body_dbs\n            .delete(&mut txn, &body_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n    }\n    txn.commit()\n        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n    info!(\"block body database initialized\");\n    Ok(())\n}\n\n/// Purges stale entries from the block metadata database.\nfn initialize_block_metadata_dbs(\n    env: &Environment,\n    block_metadata_dbs: VersionedDatabases<BlockHash, BlockSignatures>,\n    deleted_block_hashes: HashSet<BlockHash>,\n) -> Result<(), BlockStoreError> {\n    let block_count_to_be_deleted = deleted_block_hashes.len();\n    info!(\n        block_count_to_be_deleted,\n        \"initializing block metadata database\"\n    );\n    let mut txn = env\n        .begin_rw_txn()\n        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n    for block_hash in deleted_block_hashes {\n        block_metadata_dbs\n            .delete(&mut txn, &block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?\n    }\n    txn.commit()\n        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n    info!(\"block metadata database initialized\");\n    Ok(())\n}\n\n/// Purges stale entries from the execution result databases.\nfn initialize_execution_result_dbs(\n    env: &Environment,\n    execution_result_dbs: VersionedDatabases<TransactionHash, ExecutionResult>,\n    deleted_transaction_hashes: HashSet<TransactionHash>,\n) -> Result<(), LmdbExtError> {\n    let exec_results_count_to_be_deleted = deleted_transaction_hashes.len();\n    info!(\n        exec_results_count_to_be_deleted,\n        \"initializing execution result databases\"\n    );\n    let mut txn = env.begin_rw_txn()?;\n    for hash in deleted_transaction_hashes {\n        execution_result_dbs.delete(&mut txn, &hash)?;\n    }\n    txn.commit()?;\n    info!(\"execution result databases initialized\");\n    Ok(())\n}\n\npub struct IndexedLmdbBlockStoreRWTransaction<'t> {\n    txn: RwTransaction<'t>,\n    block_store: &'t LmdbBlockStore,\n    block_height_index: TempMap<'t, u64, BlockHash>,\n    switch_block_era_id_index: TempMap<'t, EraId, BlockHash>,\n    transaction_hash_index: TempMap<'t, TransactionHash, BlockHashHeightAndEra>,\n}\n\nimpl IndexedLmdbBlockStoreRWTransaction<'_> {\n    /// Check if the block height index can be updated.\n    fn should_update_block_height_index(\n        &self,\n        block_height: u64,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        if let Some(first) = self.block_height_index.get(&block_height) {\n            // There is a block in the index at this height\n            if first != *block_hash {\n                Err(BlockStoreError::DuplicateBlock {\n                    height: block_height,\n                    first,\n                    second: *block_hash,\n                })\n            } else {\n                // Same value already in index, no need to update it.\n                Ok(false)\n            }\n        } else {\n            // Value not in index, update.\n            Ok(true)\n        }\n    }\n\n    /// Check if the switch block index can be updated.\n    fn should_update_switch_block_index(\n        &self,\n        block_header: &BlockHeader,\n    ) -> Result<bool, BlockStoreError> {\n        if block_header.is_switch_block() {\n            let era_id = block_header.era_id();\n            if let Some(entry) = self.switch_block_era_id_index.get(&era_id) {\n                let block_hash = block_header.block_hash();\n                if entry != block_hash {\n                    Err(BlockStoreError::DuplicateEraId {\n                        era_id,\n                        first: entry,\n                        second: block_hash,\n                    })\n                } else {\n                    // already in index, no need to update.\n                    Ok(false)\n                }\n            } else {\n                // not in the index, update.\n                Ok(true)\n            }\n        } else {\n            // not a switch block.\n            Ok(false)\n        }\n    }\n\n    // Check if the transaction hash index can be updated.\n    fn should_update_transaction_hash_index(\n        &self,\n        transaction_hashes: &[TransactionHash],\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        if let Some(hash) = transaction_hashes.iter().find(|hash| {\n            self.transaction_hash_index\n                .get(hash)\n                .is_some_and(|old_details| old_details.block_hash != *block_hash)\n        }) {\n            return Err(BlockStoreError::DuplicateTransaction {\n                transaction_hash: *hash,\n                first: self.transaction_hash_index.get(hash).unwrap().block_hash,\n                second: *block_hash,\n            });\n        }\n        Ok(true)\n    }\n}\n\npub struct IndexedLmdbBlockStoreReadTransaction<'t> {\n    txn: RoTransaction<'t>,\n    block_store: &'t IndexedLmdbBlockStore,\n}\n\nenum LmdbBlockStoreIndex {\n    BlockHeight(IndexPosition<u64>),\n    SwitchBlockEraId(IndexPosition<EraId>),\n}\n\nenum IndexPosition<K> {\n    Tip,\n    Key(K),\n}\n\nenum DataType {\n    Block,\n    BlockHeader,\n    ApprovalsHashes,\n    BlockSignatures,\n}\n\nimpl IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn block_hash_from_index(&self, index: LmdbBlockStoreIndex) -> Option<&BlockHash> {\n        match index {\n            LmdbBlockStoreIndex::BlockHeight(position) => match position {\n                IndexPosition::Tip => self.block_store.block_height_index.values().last(),\n                IndexPosition::Key(height) => self.block_store.block_height_index.get(&height),\n            },\n            LmdbBlockStoreIndex::SwitchBlockEraId(position) => match position {\n                IndexPosition::Tip => self.block_store.switch_block_era_id_index.values().last(),\n                IndexPosition::Key(era_id) => {\n                    self.block_store.switch_block_era_id_index.get(&era_id)\n                }\n            },\n        }\n    }\n\n    fn read_block_indexed(\n        &self,\n        index: LmdbBlockStoreIndex,\n    ) -> Result<Option<Block>, BlockStoreError> {\n        self.block_hash_from_index(index)\n            .and_then(|block_hash| {\n                self.block_store\n                    .block_store\n                    .get_single_block(&self.txn, block_hash)\n                    .transpose()\n            })\n            .transpose()\n    }\n\n    fn read_block_header_indexed(\n        &self,\n        index: LmdbBlockStoreIndex,\n    ) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.block_hash_from_index(index)\n            .and_then(|block_hash| {\n                self.block_store\n                    .block_store\n                    .get_single_block_header(&self.txn, block_hash)\n                    .transpose()\n            })\n            .transpose()\n    }\n\n    fn read_block_signatures_indexed(\n        &self,\n        index: LmdbBlockStoreIndex,\n    ) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.block_hash_from_index(index)\n            .and_then(|block_hash| {\n                self.block_store\n                    .block_store\n                    .get_block_signatures(&self.txn, block_hash)\n                    .transpose()\n            })\n            .transpose()\n    }\n\n    fn read_approvals_hashes_indexed(\n        &self,\n        index: LmdbBlockStoreIndex,\n    ) -> Result<Option<ApprovalsHashes>, BlockStoreError> {\n        self.block_hash_from_index(index)\n            .and_then(|block_hash| {\n                self.block_store\n                    .block_store\n                    .read_approvals_hashes(&self.txn, block_hash)\n                    .transpose()\n            })\n            .transpose()\n    }\n\n    fn contains_data_indexed(\n        &self,\n        index: LmdbBlockStoreIndex,\n        data_type: DataType,\n    ) -> Result<bool, BlockStoreError> {\n        self.block_hash_from_index(index)\n            .map_or(Ok(false), |block_hash| match data_type {\n                DataType::Block => self\n                    .block_store\n                    .block_store\n                    .block_exists(&self.txn, block_hash),\n                DataType::BlockHeader => self\n                    .block_store\n                    .block_store\n                    .block_header_exists(&self.txn, block_hash),\n                DataType::ApprovalsHashes => self\n                    .block_store\n                    .block_store\n                    .approvals_hashes_exist(&self.txn, block_hash),\n                DataType::BlockSignatures => self\n                    .block_store\n                    .block_store\n                    .block_signatures_exist(&self.txn, block_hash),\n            })\n    }\n}\n\nimpl BlockStoreTransaction for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn commit(self) -> Result<(), BlockStoreError> {\n        Ok(())\n    }\n\n    fn rollback(self) {\n        self.txn.abort();\n    }\n}\n\nimpl BlockStoreTransaction for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn commit(self) -> Result<(), BlockStoreError> {\n        self.txn\n            .commit()\n            .map_err(|e| BlockStoreError::InternalStorage(Box::new(LmdbExtError::from(e))))?;\n\n        self.block_height_index.commit();\n        self.switch_block_era_id_index.commit();\n        self.transaction_hash_index.commit();\n        Ok(())\n    }\n\n    fn rollback(self) {\n        self.txn.abort();\n    }\n}\n\nimpl BlockStoreProvider for IndexedLmdbBlockStore {\n    type Reader<'t> = IndexedLmdbBlockStoreReadTransaction<'t>;\n    type ReaderWriter<'t> = IndexedLmdbBlockStoreRWTransaction<'t>;\n\n    fn checkout_ro(&self) -> Result<Self::Reader<'_>, BlockStoreError> {\n        self.get_reader()\n    }\n\n    fn checkout_rw(&mut self) -> Result<Self::ReaderWriter<'_>, BlockStoreError> {\n        let txn = self\n            .block_store\n            .env\n            .begin_rw_txn()\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(IndexedLmdbBlockStoreRWTransaction {\n            txn,\n            block_store: &self.block_store,\n            block_height_index: TempMap::new(&mut self.block_height_index),\n            switch_block_era_id_index: TempMap::new(&mut self.switch_block_era_id_index),\n            transaction_hash_index: TempMap::new(&mut self.transaction_hash_index),\n        })\n    }\n}\n\nimpl DataReader<BlockHash, Block> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<Block>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .get_single_block(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_store.block_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<BlockHash, BlockHeader> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .get_single_block_header(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .block_header_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<BlockHash, ApprovalsHashes> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<ApprovalsHashes>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .read_approvals_hashes(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .block_header_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<BlockHash, BlockSignatures> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .get_block_signatures(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .block_signatures_exist(&self.txn, &key)\n    }\n}\n\nimpl DataReader<BlockHeight, Block> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHeight) -> Result<Option<Block>, BlockStoreError> {\n        self.read_block_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)))\n    }\n\n    fn exists(&self, key: BlockHeight) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)),\n            DataType::Block,\n        )\n    }\n}\n\nimpl DataReader<BlockHeight, BlockHeader> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHeight) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.read_block_header_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)))\n    }\n\n    fn exists(&self, key: BlockHeight) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)),\n            DataType::BlockHeader,\n        )\n    }\n}\n\nimpl DataReader<BlockHeight, ApprovalsHashes> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHeight) -> Result<Option<ApprovalsHashes>, BlockStoreError> {\n        self.read_approvals_hashes_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(\n            key,\n        )))\n    }\n\n    fn exists(&self, key: BlockHeight) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)),\n            DataType::ApprovalsHashes,\n        )\n    }\n}\n\nimpl DataReader<BlockHeight, BlockSignatures> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: BlockHeight) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.read_block_signatures_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(\n            key,\n        )))\n    }\n\n    fn exists(&self, key: BlockHeight) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)),\n            DataType::BlockSignatures,\n        )\n    }\n}\n\n/// Retrieves single switch block by era ID by looking it up in the index and returning it.\nimpl DataReader<EraId, Block> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: EraId) -> Result<Option<Block>, BlockStoreError> {\n        self.read_block_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(\n            key,\n        )))\n    }\n\n    fn exists(&self, key: EraId) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)),\n            DataType::Block,\n        )\n    }\n}\n\n/// Retrieves single switch block header by era ID by looking it up in the index and returning\n/// it.\nimpl DataReader<EraId, BlockHeader> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: EraId) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.read_block_header_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(\n            key,\n        )))\n    }\n\n    fn exists(&self, key: EraId) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)),\n            DataType::BlockHeader,\n        )\n    }\n}\n\nimpl DataReader<EraId, ApprovalsHashes> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: EraId) -> Result<Option<ApprovalsHashes>, BlockStoreError> {\n        self.read_approvals_hashes_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(\n            IndexPosition::Key(key),\n        ))\n    }\n\n    fn exists(&self, key: EraId) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)),\n            DataType::ApprovalsHashes,\n        )\n    }\n}\n\nimpl DataReader<EraId, BlockSignatures> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: EraId) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.read_block_signatures_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(\n            IndexPosition::Key(key),\n        ))\n    }\n\n    fn exists(&self, key: EraId) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)),\n            DataType::BlockSignatures,\n        )\n    }\n}\n\nimpl DataReader<Tip, BlockHeader> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, _key: Tip) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.read_block_header_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip))\n    }\n\n    fn exists(&self, _key: Tip) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip),\n            DataType::BlockHeader,\n        )\n    }\n}\n\nimpl DataReader<Tip, Block> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, _key: Tip) -> Result<Option<Block>, BlockStoreError> {\n        self.read_block_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip))\n    }\n\n    fn exists(&self, _key: Tip) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip),\n            DataType::Block,\n        )\n    }\n}\n\nimpl DataReader<LatestSwitchBlock, BlockHeader> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, _key: LatestSwitchBlock) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.read_block_header_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Tip))\n    }\n\n    fn exists(&self, _key: LatestSwitchBlock) -> Result<bool, BlockStoreError> {\n        self.contains_data_indexed(\n            LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Tip),\n            DataType::BlockHeader,\n        )\n    }\n}\n\nimpl DataReader<TransactionHash, BlockHashHeightAndEra>\n    for IndexedLmdbBlockStoreReadTransaction<'_>\n{\n    fn read(&self, key: TransactionHash) -> Result<Option<BlockHashHeightAndEra>, BlockStoreError> {\n        Ok(self.block_store.transaction_hash_index.get(&key).copied())\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        Ok(self.block_store.transaction_hash_index.contains_key(&key))\n    }\n}\n\nimpl DataReader<TransactionHash, Transaction> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: TransactionHash) -> Result<Option<Transaction>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .transaction_dbs\n            .get(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .transaction_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<TransactionHash, BTreeSet<Approval>> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: TransactionHash) -> Result<Option<BTreeSet<Approval>>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .finalized_transaction_approvals_dbs\n            .get(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .finalized_transaction_approvals_dbs\n            .exists(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl DataReader<TransactionHash, ExecutionResult> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, key: TransactionHash) -> Result<Option<ExecutionResult>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .execution_result_dbs\n            .get(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .execution_result_dbs\n            .exists(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl DataReader<StateStoreKey, Vec<u8>> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(&self, StateStoreKey(key): StateStoreKey) -> Result<Option<Vec<u8>>, BlockStoreError> {\n        self.block_store\n            .block_store\n            .read_state_store(&self.txn, &key)\n    }\n\n    fn exists(&self, StateStoreKey(key): StateStoreKey) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .block_store\n            .state_store_key_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<(DbTableId, Vec<u8>), DbRawBytesSpec> for IndexedLmdbBlockStoreReadTransaction<'_> {\n    fn read(\n        &self,\n        (id, key): (DbTableId, Vec<u8>),\n    ) -> Result<Option<DbRawBytesSpec>, BlockStoreError> {\n        if key.is_empty() {\n            return Ok(None);\n        }\n        let store = &self.block_store.block_store;\n        let res = match id {\n            DbTableId::BlockHeader => store.block_header_dbs.get_raw(&self.txn, &key),\n            DbTableId::BlockBody => store.block_body_dbs.get_raw(&self.txn, &key),\n            DbTableId::ApprovalsHashes => store.approvals_hashes_dbs.get_raw(&self.txn, &key),\n            DbTableId::BlockMetadata => store.block_metadata_dbs.get_raw(&self.txn, &key),\n            DbTableId::Transaction => store.transaction_dbs.get_raw(&self.txn, &key),\n            DbTableId::ExecutionResult => store.execution_result_dbs.get_raw(&self.txn, &key),\n            DbTableId::Transfer => store.transfer_dbs.get_raw(&self.txn, &key),\n            DbTableId::FinalizedTransactionApprovals => store\n                .finalized_transaction_approvals_dbs\n                .get_raw(&self.txn, &key),\n        };\n        res.map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: (DbTableId, Vec<u8>)) -> Result<bool, BlockStoreError> {\n        self.read(key).map(|res| res.is_some())\n    }\n}\n\nimpl DataWriter<BlockHash, Block> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    /// Writes a block to storage.\n    ///\n    /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it\n    /// couldn't be written because it already existed, and `Err(_)` if there was an error.\n    fn write(&mut self, data: &Block) -> Result<BlockHash, BlockStoreError> {\n        let block_header = data.clone_header();\n        let block_hash = data.hash();\n        let block_height = data.height();\n        let era_id = data.era_id();\n        let transaction_hashes: Vec<TransactionHash> = match &data {\n            Block::V1(v1) => v1\n                .deploy_and_transfer_hashes()\n                .map(TransactionHash::from)\n                .collect(),\n            Block::V2(v2) => v2.all_transactions().copied().collect(),\n        };\n\n        let update_height_index =\n            self.should_update_block_height_index(block_height, block_hash)?;\n        let update_switch_block_index = self.should_update_switch_block_index(&block_header)?;\n        let update_transaction_hash_index =\n            self.should_update_transaction_hash_index(&transaction_hashes, block_hash)?;\n\n        let key = self.block_store.write_block(&mut self.txn, data)?;\n\n        if update_height_index {\n            self.block_height_index.insert(block_height, *block_hash);\n        }\n\n        if update_switch_block_index {\n            self.switch_block_era_id_index.insert(era_id, *block_hash);\n        }\n\n        if update_transaction_hash_index {\n            for hash in transaction_hashes {\n                self.transaction_hash_index.insert(\n                    hash,\n                    BlockHashHeightAndEra::new(*block_hash, block_height, era_id),\n                );\n            }\n        }\n\n        Ok(key)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        let maybe_block = self.block_store.get_single_block(&self.txn, &key)?;\n\n        if let Some(block) = maybe_block {\n            let transaction_hashes: Vec<TransactionHash> = match &block {\n                Block::V1(v1) => v1\n                    .deploy_and_transfer_hashes()\n                    .map(TransactionHash::from)\n                    .collect(),\n                Block::V2(v2) => v2.all_transactions().copied().collect(),\n            };\n\n            self.block_store.delete_block_header(&mut self.txn, &key)?;\n\n            /*\n            TODO: currently we don't delete the block body since other blocks may reference it.\n            self.block_store\n                .delete_block_body(&mut self.txn, block.body_hash())?;\n            */\n\n            self.block_height_index.remove(block.height());\n\n            if block.is_switch_block() {\n                self.switch_block_era_id_index.remove(block.era_id());\n            }\n\n            for hash in transaction_hashes {\n                self.transaction_hash_index.remove(hash);\n            }\n\n            self.block_store\n                .delete_finality_signatures(&mut self.txn, &key)?;\n        }\n        Ok(())\n    }\n}\n\nimpl DataWriter<BlockHash, ApprovalsHashes> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn write(&mut self, data: &ApprovalsHashes) -> Result<BlockHash, BlockStoreError> {\n        self.block_store.write_approvals_hashes(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store\n            .delete_approvals_hashes(&mut self.txn, &key)\n    }\n}\n\nimpl DataWriter<BlockHash, BlockSignatures> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn write(&mut self, data: &BlockSignatures) -> Result<BlockHash, BlockStoreError> {\n        self.block_store\n            .write_finality_signatures(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store\n            .delete_finality_signatures(&mut self.txn, &key)\n    }\n}\n\nimpl DataWriter<BlockHash, BlockHeader> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn write(&mut self, data: &BlockHeader) -> Result<BlockHash, BlockStoreError> {\n        let block_hash = data.block_hash();\n        let block_height = data.height();\n        let era_id = data.era_id();\n\n        let update_height_index =\n            self.should_update_block_height_index(block_height, &block_hash)?;\n        let update_switch_block_index = self.should_update_switch_block_index(data)?;\n\n        let key = self.block_store.write_block_header(&mut self.txn, data)?;\n\n        if update_height_index {\n            self.block_height_index.insert(block_height, block_hash);\n        }\n\n        if update_switch_block_index {\n            self.switch_block_era_id_index.insert(era_id, block_hash);\n        }\n\n        Ok(key)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        let maybe_block_header = self.block_store.get_single_block_header(&self.txn, &key)?;\n\n        if let Some(block_header) = maybe_block_header {\n            self.block_store.delete_block_header(&mut self.txn, &key)?;\n\n            if block_header.is_switch_block() {\n                self.switch_block_era_id_index.remove(block_header.era_id());\n            }\n\n            self.block_height_index.remove(block_header.height());\n        }\n        Ok(())\n    }\n}\n\nimpl DataWriter<TransactionHash, Transaction> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn write(&mut self, data: &Transaction) -> Result<TransactionHash, BlockStoreError> {\n        self.block_store.write_transaction(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> {\n        self.block_store.delete_transaction(&mut self.txn, &key)\n    }\n}\n\nimpl DataWriter<TransactionHash, TransactionFinalizedApprovals>\n    for IndexedLmdbBlockStoreRWTransaction<'_>\n{\n    fn write(\n        &mut self,\n        data: &TransactionFinalizedApprovals,\n    ) -> Result<TransactionHash, BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .put(\n                &mut self.txn,\n                &data.transaction_hash,\n                &data.finalized_approvals,\n                true,\n            )\n            .map(|_| data.transaction_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .delete(&mut self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl DataWriter<BlockHashHeightAndEra, BlockExecutionResults>\n    for IndexedLmdbBlockStoreRWTransaction<'_>\n{\n    fn write(\n        &mut self,\n        data: &BlockExecutionResults,\n    ) -> Result<BlockHashHeightAndEra, BlockStoreError> {\n        let transaction_hashes: Vec<TransactionHash> = data.exec_results.keys().copied().collect();\n        let block_hash = data.block_info.block_hash;\n        let block_height = data.block_info.block_height;\n        let era_id = data.block_info.era_id;\n\n        let update_transaction_hash_index =\n            self.should_update_transaction_hash_index(&transaction_hashes, &block_hash)?;\n\n        let _ = self.block_store.write_execution_results(\n            &mut self.txn,\n            &block_hash,\n            data.exec_results.clone(),\n        )?;\n\n        if update_transaction_hash_index {\n            for hash in transaction_hashes {\n                self.transaction_hash_index.insert(\n                    hash,\n                    BlockHashHeightAndEra::new(block_hash, block_height, era_id),\n                );\n            }\n        }\n\n        Ok(data.block_info)\n    }\n\n    fn delete(&mut self, _key: BlockHashHeightAndEra) -> Result<(), BlockStoreError> {\n        Err(BlockStoreError::UnsupportedOperation)\n    }\n}\n\nimpl DataWriter<BlockHash, BlockTransfers> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn write(&mut self, data: &BlockTransfers) -> Result<BlockHash, BlockStoreError> {\n        self.block_store\n            .write_transfers(&mut self.txn, &data.block_hash, &data.transfers)\n            .map(|_| data.block_hash)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store.delete_transfers(&mut self.txn, &key)\n    }\n}\n\nimpl DataWriter<Cow<'static, [u8]>, StateStore> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn write(&mut self, data: &StateStore) -> Result<Cow<'static, [u8]>, BlockStoreError> {\n        self.block_store\n            .write_state_store(&mut self.txn, data.key.clone(), &data.value)?;\n        Ok(data.key.clone())\n    }\n\n    fn delete(&mut self, key: Cow<'static, [u8]>) -> Result<(), BlockStoreError> {\n        self.block_store.delete_state_store(&mut self.txn, key)\n    }\n}\n\nimpl DataReader<TransactionHash, Transaction> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, query: TransactionHash) -> Result<Option<Transaction>, BlockStoreError> {\n        self.block_store\n            .transaction_dbs\n            .get(&self.txn, &query)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, query: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store.transaction_exists(&self.txn, &query)\n    }\n}\n\nimpl DataReader<BlockHash, BlockSignatures> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.block_store.get_block_signatures(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_signatures_exist(&self.txn, &key)\n    }\n}\n\nimpl DataReader<TransactionHash, BTreeSet<Approval>> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, query: TransactionHash) -> Result<Option<BTreeSet<Approval>>, BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .get(&self.txn, &query)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, query: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .exists(&self.txn, &query)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl DataReader<BlockHash, Block> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<Block>, BlockStoreError> {\n        self.block_store.get_single_block(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<BlockHash, BlockHeader> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.block_store.get_single_block_header(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_header_exists(&self.txn, &key)\n    }\n}\n\nimpl DataReader<TransactionHash, ExecutionResult> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, query: TransactionHash) -> Result<Option<ExecutionResult>, BlockStoreError> {\n        self.block_store\n            .execution_result_dbs\n            .get(&self.txn, &query)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, query: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .execution_result_dbs\n            .exists(&self.txn, &query)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl DataReader<BlockHash, Vec<Transfer>> for IndexedLmdbBlockStoreRWTransaction<'_> {\n    fn read(&self, key: BlockHash) -> Result<Option<Vec<Transfer>>, BlockStoreError> {\n        self.block_store.get_transfers(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.has_transfers(&self.txn, &key)\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/lmdb/lmdb_block_store.rs",
    "content": "use std::{\n    borrow::Cow,\n    collections::{BTreeSet, HashMap},\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse datasize::DataSize;\nuse tracing::{debug, error};\n\nuse casper_types::{\n    execution::{execution_result_v1, ExecutionResult, ExecutionResultV1},\n    Approval, Block, BlockBody, BlockHash, BlockHeader, BlockSignatures, Digest, Transaction,\n    TransactionHash, Transfer,\n};\n\nuse super::{\n    lmdb_ext::{LmdbExtError, TransactionExt},\n    versioned_databases::VersionedDatabases,\n};\nuse crate::block_store::{\n    error::BlockStoreError,\n    types::{\n        ApprovalsHashes, BlockExecutionResults, BlockHashHeightAndEra, BlockTransfers, StateStore,\n        TransactionFinalizedApprovals, Transfers,\n    },\n    BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter,\n};\nuse lmdb::{\n    Database, DatabaseFlags, Environment, EnvironmentFlags, RoTransaction, RwTransaction,\n    Transaction as LmdbTransaction, WriteFlags,\n};\n\n/// Filename for the LMDB database created by the Storage component.\nconst STORAGE_DB_FILENAME: &str = \"storage.lmdb\";\n\n/// We can set this very low, as there is only a single reader/writer accessing the component at any\n/// one time.\nconst MAX_TRANSACTIONS: u32 = 5;\n\n/// Maximum number of allowed dbs.\nconst MAX_DB_COUNT: u32 = 17;\n\n/// OS-specific lmdb flags.\n#[cfg(not(target_os = \"macos\"))]\nconst OS_FLAGS: EnvironmentFlags = EnvironmentFlags::WRITE_MAP;\n\n/// OS-specific lmdb flags.\n///\n/// Mac OS X exhibits performance regressions when `WRITE_MAP` is used.\n#[cfg(target_os = \"macos\")]\nconst OS_FLAGS: EnvironmentFlags = EnvironmentFlags::empty();\n\n/// Lmdb block store.\n#[derive(DataSize, Debug)]\npub struct LmdbBlockStore {\n    /// Storage location.\n    root: PathBuf,\n    /// Environment holding LMDB databases.\n    #[data_size(skip)]\n    pub(super) env: Arc<Environment>,\n    /// The block header databases.\n    pub(super) block_header_dbs: VersionedDatabases<BlockHash, BlockHeader>,\n    /// The block body databases.\n    pub(super) block_body_dbs: VersionedDatabases<Digest, BlockBody>,\n    /// The approvals hashes databases.\n    pub(super) approvals_hashes_dbs: VersionedDatabases<BlockHash, ApprovalsHashes>,\n    /// The block metadata db.\n    pub(super) block_metadata_dbs: VersionedDatabases<BlockHash, BlockSignatures>,\n    /// The transaction databases.\n    pub(super) transaction_dbs: VersionedDatabases<TransactionHash, Transaction>,\n    /// Databases of `ExecutionResult`s indexed by transaction hash for current DB or by deploy\n    /// hash for legacy DB.\n    pub(super) execution_result_dbs: VersionedDatabases<TransactionHash, ExecutionResult>,\n    /// The transfer databases.\n    pub(super) transfer_dbs: VersionedDatabases<BlockHash, Transfers>,\n    /// The state storage database.\n    #[data_size(skip)]\n    state_store_db: Database,\n    /// The finalized transaction approvals databases.\n    pub(super) finalized_transaction_approvals_dbs:\n        VersionedDatabases<TransactionHash, BTreeSet<Approval>>,\n}\n\nimpl LmdbBlockStore {\n    /// Ctor.\n    pub fn new(root_path: &Path, total_size: usize) -> Result<Self, BlockStoreError> {\n        // Create the environment and databases.\n        let env = new_environment(total_size, root_path)?;\n\n        let block_header_dbs = VersionedDatabases::new(&env, \"block_header\", \"block_header_v2\")\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let block_body_dbs =\n            VersionedDatabases::<_, BlockBody>::new(&env, \"block_body\", \"block_body_v2\")\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let block_metadata_dbs =\n            VersionedDatabases::new(&env, \"block_metadata\", \"block_metadata_v2\")\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let transaction_dbs = VersionedDatabases::new(&env, \"deploys\", \"transactions\")\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let execution_result_dbs =\n            VersionedDatabases::new(&env, \"deploy_metadata\", \"execution_results\")\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let transfer_dbs = VersionedDatabases::new(&env, \"transfer\", \"versioned_transfers\")\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let state_store_db = env\n            .create_db(Some(\"state_store\"), DatabaseFlags::empty())\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        let finalized_transaction_approvals_dbs =\n            VersionedDatabases::new(&env, \"finalized_approvals\", \"versioned_finalized_approvals\")\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        let approvals_hashes_dbs =\n            VersionedDatabases::new(&env, \"approvals_hashes\", \"versioned_approvals_hashes\")\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(Self {\n            root: root_path.to_path_buf(),\n            env: Arc::new(env),\n            block_header_dbs,\n            block_body_dbs,\n            approvals_hashes_dbs,\n            block_metadata_dbs,\n            transaction_dbs,\n            execution_result_dbs,\n            transfer_dbs,\n            state_store_db,\n            finalized_transaction_approvals_dbs,\n        })\n    }\n\n    /// Write finality signatures.\n    pub fn write_finality_signatures(\n        &self,\n        txn: &mut RwTransaction,\n        signatures: &BlockSignatures,\n    ) -> Result<BlockHash, BlockStoreError> {\n        let block_hash = signatures.block_hash();\n        let _ = self\n            .block_metadata_dbs\n            .put(txn, block_hash, signatures, true)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(*block_hash)\n    }\n\n    pub(crate) fn delete_finality_signatures(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n    ) -> Result<(), BlockStoreError> {\n        self.block_metadata_dbs\n            .delete(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn transaction_exists<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        transaction_hash: &TransactionHash,\n    ) -> Result<bool, BlockStoreError> {\n        self.transaction_dbs\n            .exists(txn, transaction_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Returns `true` if the given block's header and body are stored.\n    pub(crate) fn block_exists<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        let block_header = match self.get_single_block_header(txn, block_hash)? {\n            Some(block_header) => block_header,\n            None => {\n                return Ok(false);\n            }\n        };\n        self.block_body_dbs\n            .exists(txn, block_header.body_hash())\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Returns `true` if the given block's header is stored.\n    pub(crate) fn block_header_exists<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        self.block_header_dbs\n            .exists(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn get_transfers<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<Option<Vec<Transfer>>, BlockStoreError> {\n        Ok(self\n            .transfer_dbs\n            .get(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?\n            .map(Transfers::into_owned))\n    }\n\n    pub(crate) fn has_transfers<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        self.transfer_dbs\n            .exists(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn read_state_store<K: AsRef<[u8]>, Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        key: &K,\n    ) -> Result<Option<Vec<u8>>, BlockStoreError> {\n        let bytes = match txn.get(self.state_store_db, &key) {\n            Ok(slice) => Some(slice.to_owned()),\n            Err(lmdb::Error::NotFound) => None,\n            Err(err) => return Err(BlockStoreError::InternalStorage(Box::new(err))),\n        };\n        Ok(bytes)\n    }\n\n    /// Retrieves approvals hashes by block hash.\n    pub(crate) fn read_approvals_hashes<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<Option<ApprovalsHashes>, BlockStoreError> {\n        self.approvals_hashes_dbs\n            .get(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn approvals_hashes_exist<Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        self.approvals_hashes_dbs\n            .exists(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Put a single transaction into storage.\n    pub(crate) fn write_transaction(\n        &self,\n        txn: &mut RwTransaction,\n        transaction: &Transaction,\n    ) -> Result<TransactionHash, BlockStoreError> {\n        let transaction_hash = transaction.hash();\n        self.transaction_dbs\n            .put(txn, &transaction_hash, transaction, false)\n            .map(|_| transaction_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn delete_transaction(\n        &self,\n        txn: &mut RwTransaction,\n        transaction_hash: &TransactionHash,\n    ) -> Result<(), BlockStoreError> {\n        self.transaction_dbs\n            .delete(txn, transaction_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn write_transfers(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n        transfers: &[Transfer],\n    ) -> Result<bool, BlockStoreError> {\n        self.transfer_dbs\n            .put(\n                txn,\n                block_hash,\n                &Transfers::from(transfers.to_owned()),\n                true,\n            )\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn delete_transfers(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n    ) -> Result<(), BlockStoreError> {\n        self.transfer_dbs\n            .delete(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Writes a key to the state storage database.\n    // See note below why `key` and `data` are not `&[u8]`s.\n    pub(crate) fn write_state_store(\n        &self,\n        txn: &mut RwTransaction,\n        key: Cow<'static, [u8]>,\n        data: &Vec<u8>,\n    ) -> Result<(), BlockStoreError> {\n        // Note: The interface of `lmdb` seems suboptimal: `&K` and `&V` could simply be `&[u8]` for\n        //       simplicity. At the very least it seems to be missing a `?Sized` trait bound. For\n        //       this reason, we need to use actual sized types in the function signature above.\n        txn.put(self.state_store_db, &key, data, WriteFlags::default())\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(())\n    }\n\n    pub(crate) fn state_store_key_exists<K: AsRef<[u8]>, Tx: lmdb::Transaction>(\n        &self,\n        txn: &Tx,\n        key: &K,\n    ) -> Result<bool, BlockStoreError> {\n        txn.value_exists(self.state_store_db, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn delete_state_store(\n        &self,\n        txn: &mut RwTransaction,\n        key: Cow<'static, [u8]>,\n    ) -> Result<(), BlockStoreError> {\n        txn.del(self.state_store_db, &key, None)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Retrieves a single block header in a given transaction from storage.\n    pub(crate) fn get_single_block_header<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<Option<BlockHeader>, BlockStoreError> {\n        let block_header = match self\n            .block_header_dbs\n            .get(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?\n        {\n            Some(block_header) => block_header,\n            None => return Ok(None),\n        };\n        block_header.set_block_hash(*block_hash);\n        Ok(Some(block_header))\n    }\n\n    /// Retrieves block signatures for a block with a given block hash.\n    pub(crate) fn get_block_signatures<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.block_metadata_dbs\n            .get(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn block_signatures_exist<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        self.block_metadata_dbs\n            .exists(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Retrieves a single block from storage.\n    pub(crate) fn get_single_block<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        block_hash: &BlockHash,\n    ) -> Result<Option<Block>, BlockStoreError> {\n        let block_header: BlockHeader = match self.get_single_block_header(txn, block_hash)? {\n            Some(block_header) => block_header,\n            None => {\n                debug!(\n                    ?block_hash,\n                    \"get_single_block: missing block header for {}\", block_hash\n                );\n                return Ok(None);\n            }\n        };\n\n        let maybe_block_body = self\n            .block_body_dbs\n            .get(txn, block_header.body_hash())\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)));\n        let block_body = match maybe_block_body? {\n            Some(block_body) => block_body,\n            None => {\n                debug!(\n                    ?block_header,\n                    \"get_single_block: missing block body for {}\",\n                    block_header.block_hash()\n                );\n                return Ok(None);\n            }\n        };\n        let block = Block::new_from_header_and_body(block_header, block_body)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        Ok(Some(block))\n    }\n\n    /// Writes a block to storage.\n    ///\n    /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it\n    /// couldn't be written because it already existed, and `Err(_)` if there was an error.\n    pub(crate) fn write_block(\n        &self,\n        txn: &mut RwTransaction,\n        block: &Block,\n    ) -> Result<BlockHash, BlockStoreError> {\n        let block_hash = *block.hash();\n        let _ = self\n            .block_body_dbs\n            .put(txn, block.body_hash(), &block.clone_body(), true)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        let block_header = block.clone_header();\n        let _ = self\n            .block_header_dbs\n            .put(txn, block.hash(), &block_header, true)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(block_hash)\n    }\n\n    pub(crate) fn write_block_header(\n        &self,\n        txn: &mut RwTransaction,\n        block_header: &BlockHeader,\n    ) -> Result<BlockHash, BlockStoreError> {\n        let block_hash = block_header.block_hash();\n        self.block_header_dbs\n            .put(txn, &block_hash, block_header, true)\n            .map(|_| block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn delete_block_header(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n    ) -> Result<(), BlockStoreError> {\n        self.block_header_dbs\n            .delete(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn delete_block_body(\n        &self,\n        txn: &mut RwTransaction,\n        block_body_hash: &Digest,\n    ) -> Result<(), BlockStoreError> {\n        self.block_body_dbs\n            .delete(txn, block_body_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    /// Writes approvals hashes to storage.\n    pub(crate) fn write_approvals_hashes(\n        &self,\n        txn: &mut RwTransaction,\n        approvals_hashes: &ApprovalsHashes,\n    ) -> Result<BlockHash, BlockStoreError> {\n        let block_hash = approvals_hashes.block_hash();\n        let _ = self\n            .approvals_hashes_dbs\n            .put(txn, block_hash, approvals_hashes, true)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        Ok(*block_hash)\n    }\n\n    pub(crate) fn delete_approvals_hashes(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n    ) -> Result<(), BlockStoreError> {\n        self.approvals_hashes_dbs\n            .delete(txn, block_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    pub(crate) fn write_execution_results(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n        execution_results: HashMap<TransactionHash, ExecutionResult>,\n    ) -> Result<bool, BlockStoreError> {\n        let mut transfers: Vec<Transfer> = vec![];\n        for (transaction_hash, execution_result) in execution_results.into_iter() {\n            transfers.extend(successful_transfers(&execution_result));\n\n            let maybe_stored_execution_result: Option<ExecutionResult> = self\n                .checkout_ro()\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?\n                .read(transaction_hash)?;\n\n            // If we have a previous execution result, we can continue if it is the same.\n            match maybe_stored_execution_result {\n                Some(stored_execution_result) if stored_execution_result == execution_result => {\n                    continue\n                }\n                Some(_) | None => (),\n            }\n\n            let was_written = self\n                .execution_result_dbs\n                .put(txn, &transaction_hash, &execution_result, true)\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n            if !was_written {\n                error!(\n                    ?block_hash,\n                    ?transaction_hash,\n                    \"failed to write execution results\"\n                );\n                debug_assert!(was_written);\n            }\n        }\n\n        let was_written = self\n            .transfer_dbs\n            .put(txn, block_hash, &Transfers::from(transfers), true)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        if !was_written {\n            error!(?block_hash, \"failed to write transfers\");\n            debug_assert!(was_written);\n        }\n        Ok(was_written)\n    }\n\n    pub(crate) fn delete_execution_results(\n        &self,\n        txn: &mut RwTransaction,\n        block_hash: &BlockHash,\n    ) -> Result<bool, BlockStoreError> {\n        let block = self.get_single_block(txn, block_hash)?;\n\n        if let Some(block) = block {\n            for txn_hash in block.all_transaction_hashes() {\n                self.execution_result_dbs\n                    .delete(txn, &txn_hash)\n                    .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n            }\n        }\n        Ok(true)\n    }\n}\n\npub(crate) fn new_environment(\n    total_size: usize,\n    root: &Path,\n) -> Result<Environment, BlockStoreError> {\n    Environment::new()\n        .set_flags(\n            OS_FLAGS\n                // We manage our own directory.\n                | EnvironmentFlags::NO_SUB_DIR\n                // Disable thread local storage, strongly suggested for operation with tokio.\n                | EnvironmentFlags::NO_TLS\n                // Disable read-ahead. Our data is not stored/read in sequence that would benefit from the read-ahead.\n                | EnvironmentFlags::NO_READAHEAD,\n        )\n        .set_max_readers(MAX_TRANSACTIONS)\n        .set_max_dbs(MAX_DB_COUNT)\n        .set_map_size(total_size)\n        .open(&root.join(STORAGE_DB_FILENAME))\n        .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n}\n\n/// Returns all `Transform::WriteTransfer`s from the execution effects if this is an\n/// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`.\nfn successful_transfers(execution_result: &ExecutionResult) -> Vec<Transfer> {\n    let mut all_transfers: Vec<Transfer> = vec![];\n    match execution_result {\n        ExecutionResult::V1(ExecutionResultV1::Success { effect, .. }) => {\n            for transform_entry in &effect.transforms {\n                if let execution_result_v1::TransformKindV1::WriteTransfer(transfer_v1) =\n                    &transform_entry.transform\n                {\n                    all_transfers.push(Transfer::V1(transfer_v1.clone()));\n                }\n            }\n        }\n        ExecutionResult::V2(execution_result_v2) => {\n            if execution_result_v2.error_message.is_none() {\n                for transfer in &execution_result_v2.transfers {\n                    all_transfers.push(transfer.clone());\n                }\n            }\n            // else no-op: we only record transfers from successful executions.\n        }\n        ExecutionResult::V1(ExecutionResultV1::Failure { .. }) => {\n            // No-op: we only record transfers from successful executions.\n        }\n    }\n\n    all_transfers\n}\n\nimpl BlockStoreProvider for LmdbBlockStore {\n    type Reader<'t> = LmdbBlockStoreTransaction<'t, RoTransaction<'t>>;\n    type ReaderWriter<'t> = LmdbBlockStoreTransaction<'t, RwTransaction<'t>>;\n\n    fn checkout_ro(&self) -> Result<Self::Reader<'_>, BlockStoreError> {\n        let txn = self\n            .env\n            .begin_ro_txn()\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        Ok(LmdbBlockStoreTransaction {\n            txn,\n            block_store: self,\n        })\n    }\n\n    fn checkout_rw(&mut self) -> Result<Self::ReaderWriter<'_>, BlockStoreError> {\n        let txn = self\n            .env\n            .begin_rw_txn()\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n\n        Ok(LmdbBlockStoreTransaction {\n            txn,\n            block_store: self,\n        })\n    }\n}\n\npub struct LmdbBlockStoreTransaction<'t, T>\nwhere\n    T: LmdbTransaction,\n{\n    txn: T,\n    block_store: &'t LmdbBlockStore,\n}\n\nimpl<T> BlockStoreTransaction for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn commit(self) -> Result<(), BlockStoreError> {\n        self.txn\n            .commit()\n            .map_err(|e| BlockStoreError::InternalStorage(Box::new(LmdbExtError::from(e))))\n    }\n\n    fn rollback(self) {\n        self.txn.abort();\n    }\n}\n\nimpl<T> DataReader<BlockHash, Block> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: BlockHash) -> Result<Option<Block>, BlockStoreError> {\n        self.block_store.get_single_block(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_exists(&self.txn, &key)\n    }\n}\n\nimpl<T> DataReader<BlockHash, BlockHeader> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: BlockHash) -> Result<Option<BlockHeader>, BlockStoreError> {\n        self.block_store.get_single_block_header(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_header_exists(&self.txn, &key)\n    }\n}\n\nimpl<T> DataReader<BlockHash, ApprovalsHashes> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: BlockHash) -> Result<Option<ApprovalsHashes>, BlockStoreError> {\n        self.block_store.read_approvals_hashes(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_header_exists(&self.txn, &key)\n    }\n}\n\nimpl<T> DataReader<BlockHash, BlockSignatures> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: BlockHash) -> Result<Option<BlockSignatures>, BlockStoreError> {\n        self.block_store.get_block_signatures(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.block_signatures_exist(&self.txn, &key)\n    }\n}\n\nimpl<T> DataReader<TransactionHash, Transaction> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: TransactionHash) -> Result<Option<Transaction>, BlockStoreError> {\n        self.block_store\n            .transaction_dbs\n            .get(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store.transaction_exists(&self.txn, &key)\n    }\n}\n\nimpl<T> DataReader<TransactionHash, BTreeSet<Approval>> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: TransactionHash) -> Result<Option<BTreeSet<Approval>>, BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .get(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .exists(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl<T> DataReader<TransactionHash, ExecutionResult> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: TransactionHash) -> Result<Option<ExecutionResult>, BlockStoreError> {\n        self.block_store\n            .execution_result_dbs\n            .get(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn exists(&self, key: TransactionHash) -> Result<bool, BlockStoreError> {\n        self.block_store\n            .execution_result_dbs\n            .exists(&self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl<T> DataReader<BlockHash, Vec<Transfer>> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    T: LmdbTransaction,\n{\n    fn read(&self, key: BlockHash) -> Result<Option<Vec<Transfer>>, BlockStoreError> {\n        self.block_store.get_transfers(&self.txn, &key)\n    }\n\n    fn exists(&self, key: BlockHash) -> Result<bool, BlockStoreError> {\n        self.block_store.has_transfers(&self.txn, &key)\n    }\n}\n\nimpl<T, K> DataReader<K, Vec<u8>> for LmdbBlockStoreTransaction<'_, T>\nwhere\n    K: AsRef<[u8]>,\n    T: LmdbTransaction,\n{\n    fn read(&self, key: K) -> Result<Option<Vec<u8>>, BlockStoreError> {\n        self.block_store.read_state_store(&self.txn, &key)\n    }\n\n    fn exists(&self, key: K) -> Result<bool, BlockStoreError> {\n        self.block_store.state_store_key_exists(&self.txn, &key)\n    }\n}\n\nimpl<'t> DataWriter<BlockHash, Block> for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> {\n    /// Writes a block to storage.\n    fn write(&mut self, data: &Block) -> Result<BlockHash, BlockStoreError> {\n        self.block_store.write_block(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        let maybe_block = self.block_store.get_single_block_header(&self.txn, &key)?;\n\n        if let Some(block_header) = maybe_block {\n            self.block_store.delete_block_header(&mut self.txn, &key)?;\n            self.block_store\n                .delete_block_body(&mut self.txn, block_header.body_hash())?;\n        }\n        Ok(())\n    }\n}\n\nimpl<'t> DataWriter<BlockHash, ApprovalsHashes>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(&mut self, data: &ApprovalsHashes) -> Result<BlockHash, BlockStoreError> {\n        self.block_store.write_approvals_hashes(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store\n            .delete_approvals_hashes(&mut self.txn, &key)\n    }\n}\n\nimpl<'t> DataWriter<BlockHash, BlockSignatures>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(&mut self, data: &BlockSignatures) -> Result<BlockHash, BlockStoreError> {\n        self.block_store\n            .write_finality_signatures(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store\n            .delete_finality_signatures(&mut self.txn, &key)\n    }\n}\n\nimpl<'t> DataWriter<BlockHash, BlockHeader> for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> {\n    fn write(&mut self, data: &BlockHeader) -> Result<BlockHash, BlockStoreError> {\n        self.block_store.write_block_header(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store.delete_block_header(&mut self.txn, &key)\n    }\n}\n\nimpl<'t> DataWriter<TransactionHash, Transaction>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(&mut self, data: &Transaction) -> Result<TransactionHash, BlockStoreError> {\n        self.block_store.write_transaction(&mut self.txn, data)\n    }\n\n    fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> {\n        self.block_store.delete_transaction(&mut self.txn, &key)\n    }\n}\n\nimpl<'t> DataWriter<BlockHash, BlockTransfers>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(&mut self, data: &BlockTransfers) -> Result<BlockHash, BlockStoreError> {\n        self.block_store\n            .write_transfers(&mut self.txn, &data.block_hash, &data.transfers)\n            .map(|_| data.block_hash)\n    }\n\n    fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> {\n        self.block_store.delete_transfers(&mut self.txn, &key)\n    }\n}\n\nimpl<'t> DataWriter<Cow<'static, [u8]>, StateStore>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(&mut self, data: &StateStore) -> Result<Cow<'static, [u8]>, BlockStoreError> {\n        self.block_store\n            .write_state_store(&mut self.txn, data.key.clone(), &data.value)?;\n        Ok(data.key.clone())\n    }\n\n    fn delete(&mut self, key: Cow<'static, [u8]>) -> Result<(), BlockStoreError> {\n        self.block_store.delete_state_store(&mut self.txn, key)\n    }\n}\n\nimpl<'t> DataWriter<TransactionHash, TransactionFinalizedApprovals>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(\n        &mut self,\n        data: &TransactionFinalizedApprovals,\n    ) -> Result<TransactionHash, BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .put(\n                &mut self.txn,\n                &data.transaction_hash,\n                &data.finalized_approvals,\n                true,\n            )\n            .map(|_| data.transaction_hash)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n\n    fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> {\n        self.block_store\n            .finalized_transaction_approvals_dbs\n            .delete(&mut self.txn, &key)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))\n    }\n}\n\nimpl<'t> DataWriter<BlockHashHeightAndEra, BlockExecutionResults>\n    for LmdbBlockStoreTransaction<'t, RwTransaction<'t>>\n{\n    fn write(\n        &mut self,\n        data: &BlockExecutionResults,\n    ) -> Result<BlockHashHeightAndEra, BlockStoreError> {\n        let block_hash = data.block_info.block_hash;\n\n        let _ = self.block_store.write_execution_results(\n            &mut self.txn,\n            &block_hash,\n            data.exec_results.clone(),\n        )?;\n\n        Ok(data.block_info)\n    }\n\n    fn delete(&mut self, key: BlockHashHeightAndEra) -> Result<(), BlockStoreError> {\n        let block_hash = key.block_hash;\n\n        let _ = self\n            .block_store\n            .delete_execution_results(&mut self.txn, &block_hash)?;\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/lmdb/lmdb_ext.rs",
    "content": "//! LMDB extensions.\n//!\n//! Various traits and helper functions to extend the lower level LMDB functions. Unifies\n//! lower-level storage errors from lmdb and serialization issues.\n//!\n//! ## Serialization\n//!\n//! The module also centralizes settings and methods for serialization for all parts of storage.\n//!\n//! Serialization errors are unified into a generic, type erased `std` error to allow for easy\n//! interchange of the serialization format if desired.\n\nuse std::{any::TypeId, collections::BTreeSet};\n\nuse lmdb::{Database, RwTransaction, Transaction, WriteFlags};\nuse serde::de::DeserializeOwned;\n#[cfg(test)]\nuse serde::Serialize;\nuse thiserror::Error;\nuse tracing::{error, warn};\n\nuse crate::block_store::types::{ApprovalsHashes, DeployMetadataV1};\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    execution::ExecutionResult,\n    system::auction::UnbondingPurse,\n    Approval, BlockBody, BlockHeader, BlockSignatures, Deploy, DeployHash, Transfer,\n};\n\nconst UNBONDING_PURSE_V2_MAGIC_BYTES: &[u8] = &[121, 17, 133, 179, 91, 63, 69, 222];\n\n/// Error wrapper for lower-level storage errors.\n///\n/// Used to classify storage errors, allowing more accurate reporting on potential issues and\n/// crashes. Indicates how to proceed (clearing storage entirely or just restarting) in most cases.\n///\n/// Note that accessing a storage with an incompatible version of this software is also considered a\n/// case of corruption.\n#[derive(Debug, Error)]\npub enum LmdbExtError {\n    /// The internal database is corrupted and can probably not be salvaged.\n    #[error(\"internal storage corrupted: {0}\")]\n    LmdbCorrupted(lmdb::Error),\n    /// The data stored inside the internal database is corrupted or formatted wrong.\n    #[error(\"internal data corrupted: {0}\")]\n    DataCorrupted(Box<dyn std::error::Error + Send + Sync>),\n    /// A resource has been exhausted at runtime, restarting (potentially with different settings)\n    /// might fix the problem. Storage integrity is still intact.\n    #[error(\"storage exhausted resource (but still intact): {0}\")]\n    ResourceExhausted(lmdb::Error),\n    /// Error neither corruption nor resource exhaustion occurred, likely a programming error.\n    #[error(\"unknown LMDB or serialization error, likely from a bug: {0}\")]\n    Other(Box<dyn std::error::Error + Send + Sync>),\n}\n\n#[derive(Debug, Error)]\n#[error(\"{0}\")]\npub struct BytesreprError(pub bytesrepr::Error);\n\n// Classifies an `lmdb::Error` according to our scheme. This one of the rare cases where we accept a\n// blanked `From<>` implementation for error type conversion.\nimpl From<lmdb::Error> for LmdbExtError {\n    fn from(lmdb_error: lmdb::Error) -> Self {\n        match lmdb_error {\n            lmdb::Error::PageNotFound\n            | lmdb::Error::Corrupted\n            | lmdb::Error::Panic\n            | lmdb::Error::VersionMismatch\n            | lmdb::Error::Invalid\n            | lmdb::Error::Incompatible => LmdbExtError::LmdbCorrupted(lmdb_error),\n\n            lmdb::Error::MapFull\n            | lmdb::Error::DbsFull\n            | lmdb::Error::ReadersFull\n            | lmdb::Error::TlsFull\n            | lmdb::Error::TxnFull\n            | lmdb::Error::CursorFull\n            | lmdb::Error::PageFull\n            | lmdb::Error::MapResized => LmdbExtError::ResourceExhausted(lmdb_error),\n\n            lmdb::Error::NotFound\n            | lmdb::Error::BadRslot\n            | lmdb::Error::BadTxn\n            | lmdb::Error::BadValSize\n            | lmdb::Error::BadDbi\n            | lmdb::Error::KeyExist\n            | lmdb::Error::Other(_) => LmdbExtError::Other(Box::new(lmdb_error)),\n        }\n    }\n}\n\n/// Additional methods on transaction.\npub(super) trait TransactionExt {\n    /// Helper function to load a value from a database.\n    fn get_value<K: AsRef<[u8]>, V: 'static + DeserializeOwned>(\n        &self,\n        db: Database,\n        key: &K,\n    ) -> Result<Option<V>, LmdbExtError>;\n\n    /// Returns `true` if the given key has an entry in the given database.\n    fn value_exists<K: AsRef<[u8]>>(&self, db: Database, key: &K) -> Result<bool, LmdbExtError>;\n\n    /// Helper function to load a value from a database using the `bytesrepr` `ToBytes`/`FromBytes`\n    /// serialization.\n    fn get_value_bytesrepr<K: ToBytes + std::fmt::Display, V: FromBytes + 'static>(\n        &self,\n        db: Database,\n        key: &K,\n    ) -> Result<Option<V>, LmdbExtError>;\n\n    fn value_exists_bytesrepr<K: ToBytes>(\n        &self,\n        db: Database,\n        key: &K,\n    ) -> Result<bool, LmdbExtError>;\n}\n\n/// Additional methods on write transactions.\npub(super) trait WriteTransactionExt {\n    /// Helper function to write a value to a database.\n    ///\n    /// Returns `true` if the value has actually been written, `false` if the key already existed.\n    ///\n    /// Setting `overwrite` to true will cause the value to always be written instead.\n    #[cfg(test)]\n    fn put_value<K: AsRef<[u8]>, V: 'static + Serialize>(\n        &mut self,\n        db: Database,\n        key: &K,\n        value: &V,\n        overwrite: bool,\n    ) -> Result<bool, LmdbExtError>;\n\n    /// Helper function to write a value to a database using the `bytesrepr` `ToBytes`/`FromBytes`\n    /// serialization.\n    ///\n    /// Returns `true` if the value has actually been written, `false` if the key already existed.\n    ///\n    /// Setting `overwrite` to true will cause the value to always be written instead.\n    fn put_value_bytesrepr<K: ToBytes, V: ToBytes>(\n        &mut self,\n        db: Database,\n        key: &K,\n        value: &V,\n        overwrite: bool,\n    ) -> Result<bool, LmdbExtError>;\n}\n\nimpl<T> TransactionExt for T\nwhere\n    T: Transaction,\n{\n    #[inline]\n    fn get_value<K: AsRef<[u8]>, V: 'static + DeserializeOwned>(\n        &self,\n        db: Database,\n        key: &K,\n    ) -> Result<Option<V>, LmdbExtError> {\n        match self.get(db, key) {\n            // Deserialization failures are likely due to storage corruption.\n            Ok(raw) => deserialize_internal(raw),\n            Err(lmdb::Error::NotFound) => Ok(None),\n            Err(err) => Err(err.into()),\n        }\n    }\n\n    #[inline]\n    fn value_exists<K: AsRef<[u8]>>(&self, db: Database, key: &K) -> Result<bool, LmdbExtError> {\n        match self.get(db, key) {\n            Ok(_raw) => Ok(true),\n            Err(lmdb::Error::NotFound) => Ok(false),\n            Err(err) => Err(err.into()),\n        }\n    }\n\n    #[inline]\n    fn get_value_bytesrepr<K: ToBytes + std::fmt::Display, V: FromBytes + 'static>(\n        &self,\n        db: Database,\n        key: &K,\n    ) -> Result<Option<V>, LmdbExtError> {\n        let serialized_key = serialize_bytesrepr(key)?;\n        match self.get(db, &serialized_key) {\n            // Deserialization failures are likely due to storage corruption.\n            Ok(raw) => match deserialize_bytesrepr(raw) {\n                Ok(ret) => Ok(Some(ret)),\n                Err(err) => {\n                    error!(%key, %err, raw_len = raw.len(), \"get_value_bytesrepr deserialization\");\n                    Err(err)\n                }\n            },\n            Err(lmdb::Error::NotFound) => Ok(None),\n            Err(err) => Err(err.into()),\n        }\n    }\n\n    #[inline]\n    fn value_exists_bytesrepr<K: ToBytes>(\n        &self,\n        db: Database,\n        key: &K,\n    ) -> Result<bool, LmdbExtError> {\n        let serialized_key = serialize_bytesrepr(key)?;\n        match self.get(db, &serialized_key) {\n            Ok(_raw) => Ok(true),\n            Err(lmdb::Error::NotFound) => Ok(false),\n            Err(err) => Err(err.into()),\n        }\n    }\n}\n\n/// Serializes `value` into the buffer.\n/// In case the `value` is of the `UnbondingPurse` type it uses the specialized\n/// function to provide compatibility with the legacy version of the `UnbondingPurse` struct.\n/// See [`serialize_unbonding_purse`] for more details.\n// TODO: Get rid of the 'static bound.\n#[cfg(test)]\npub(crate) fn serialize_internal<V: 'static + Serialize>(\n    value: &V,\n) -> Result<Vec<u8>, LmdbExtError> {\n    let buffer = if TypeId::of::<UnbondingPurse>() == TypeId::of::<V>() {\n        serialize_unbonding_purse(value)?\n    } else {\n        serialize(value)?\n    };\n    Ok(buffer)\n}\n\n/// Deserializes an object from the raw bytes.\n/// In case the expected object is of the `UnbondingPurse` type it uses the specialized\n/// function to provide compatibility with the legacy version of the `UnbondingPurse` struct.\n/// See [`deserialize_unbonding_purse`] for more details.\npub(crate) fn deserialize_internal<V: 'static + DeserializeOwned>(\n    raw: &[u8],\n) -> Result<Option<V>, LmdbExtError> {\n    if TypeId::of::<UnbondingPurse>() == TypeId::of::<V>() {\n        deserialize_unbonding_purse(raw).map(Some)\n    } else {\n        deserialize(raw).map(Some)\n    }\n}\n\nimpl WriteTransactionExt for RwTransaction<'_> {\n    #[cfg(test)]\n    fn put_value<K: AsRef<[u8]>, V: 'static + Serialize>(\n        &mut self,\n        db: Database,\n        key: &K,\n        value: &V,\n        overwrite: bool,\n    ) -> Result<bool, LmdbExtError> {\n        let buffer = serialize_internal(value)?;\n\n        let flags = if overwrite {\n            WriteFlags::empty()\n        } else {\n            WriteFlags::NO_OVERWRITE\n        };\n\n        match self.put(db, key, &buffer, flags) {\n            Ok(()) => Ok(true),\n            // If we did not add the value due to it already existing, just return `false`.\n            Err(lmdb::Error::KeyExist) => Ok(false),\n            Err(err) => Err(err.into()),\n        }\n    }\n\n    fn put_value_bytesrepr<K: ToBytes, V: ToBytes>(\n        &mut self,\n        db: Database,\n        key: &K,\n        value: &V,\n        overwrite: bool,\n    ) -> Result<bool, LmdbExtError> {\n        let serialized_key = serialize_bytesrepr(key)?;\n        let serialized_value = serialize_bytesrepr(value)?;\n\n        let flags = if overwrite {\n            WriteFlags::empty()\n        } else {\n            WriteFlags::NO_OVERWRITE\n        };\n\n        match self.put(db, &serialized_key, &serialized_value, flags) {\n            Ok(()) => Ok(true),\n            // If we did not add the value due to it already existing, just return `false`.\n            Err(lmdb::Error::KeyExist) => Ok(false),\n            Err(err) => Err(err.into()),\n        }\n    }\n}\n\n/// Deserializes from a buffer.\n#[inline(always)]\npub(super) fn deserialize<T: DeserializeOwned + 'static>(raw: &[u8]) -> Result<T, LmdbExtError> {\n    match bincode::deserialize(raw) {\n        Ok(value) => Ok(value),\n        Err(err) => {\n            // unfortunately, type_name is unstable\n            let type_name = {\n                if TypeId::of::<DeployMetadataV1>() == TypeId::of::<T>() {\n                    \"DeployMetadataV1\".to_string()\n                } else if TypeId::of::<BlockHeader>() == TypeId::of::<T>() {\n                    \"BlockHeader\".to_string()\n                } else if TypeId::of::<BlockBody>() == TypeId::of::<T>() {\n                    \"BlockBody\".to_string()\n                } else if TypeId::of::<BlockSignatures>() == TypeId::of::<T>() {\n                    \"BlockSignatures\".to_string()\n                } else if TypeId::of::<DeployHash>() == TypeId::of::<T>() {\n                    \"DeployHash\".to_string()\n                } else if TypeId::of::<Deploy>() == TypeId::of::<T>() {\n                    \"Deploy\".to_string()\n                } else if TypeId::of::<ApprovalsHashes>() == TypeId::of::<T>() {\n                    \"ApprovalsHashes\".to_string()\n                } else if TypeId::of::<BTreeSet<Approval>>() == TypeId::of::<T>() {\n                    \"BTreeSet<Approval>\".to_string()\n                } else if TypeId::of::<ExecutionResult>() == TypeId::of::<T>() {\n                    \"ExecutionResult\".to_string()\n                } else if TypeId::of::<Vec<Transfer>>() == TypeId::of::<T>() {\n                    \"Transfers\".to_string()\n                } else {\n                    format!(\"{:?}\", TypeId::of::<T>())\n                }\n            };\n            warn!(?err, ?raw, \"{}: bincode deserialization failed\", type_name);\n            Err(LmdbExtError::DataCorrupted(Box::new(err)))\n        }\n    }\n}\n\n/// Returns `true` if the specified bytes represent the legacy version of `UnbondingPurse`.\nfn is_legacy(raw: &[u8]) -> bool {\n    !raw.starts_with(UNBONDING_PURSE_V2_MAGIC_BYTES)\n}\n\n/// Deserializes `UnbondingPurse` from a buffer.\n/// To provide backward compatibility with the previous version of the `UnbondingPurse`,\n/// it checks if the raw bytes stream begins with \"magic bytes\". If yes, the magic bytes are\n/// stripped and the struct is deserialized as a new version. Otherwise, the raw bytes\n/// are treated as bytes representing the legacy `UnbondingPurse` and deserialized accordingly.\n/// In order for the latter scenario to work, the raw bytes stream is extended with\n/// bytes that represent the `None` serialized with `bincode` - these bytes simulate\n/// the existence of the `new_validator` field added to the `UnbondingPurse` struct.\npub(super) fn deserialize_unbonding_purse<T: DeserializeOwned + 'static>(\n    raw: &[u8],\n) -> Result<T, LmdbExtError> {\n    const BINCODE_ENCODED_NONE: [u8; 4] = [0; 4];\n    if is_legacy(raw) {\n        deserialize(&[raw, &BINCODE_ENCODED_NONE].concat())\n    } else {\n        deserialize(&raw[UNBONDING_PURSE_V2_MAGIC_BYTES.len()..])\n    }\n}\n\n/// Serializes into a buffer.\n#[cfg(test)]\n#[inline(always)]\npub(super) fn serialize<T: Serialize>(value: &T) -> Result<Vec<u8>, LmdbExtError> {\n    bincode::serialize(value).map_err(|err| LmdbExtError::Other(Box::new(err)))\n}\n\n/// Serializes `UnbondingPurse` into a buffer.\n/// To provide backward compatibility with the previous version of the `UnbondingPurse`,\n/// the serialized bytes are prefixed with the \"magic bytes\", which will be used by the\n/// deserialization routine to detect the version of the `UnbondingPurse` struct.\n#[cfg(test)]\n#[inline(always)]\npub(super) fn serialize_unbonding_purse<T: Serialize>(value: &T) -> Result<Vec<u8>, LmdbExtError> {\n    let mut serialized = UNBONDING_PURSE_V2_MAGIC_BYTES.to_vec();\n    serialized.extend(bincode::serialize(value).map_err(|err| LmdbExtError::Other(Box::new(err)))?);\n    Ok(serialized)\n}\n\n/// Deserializes from a buffer.\n#[inline(always)]\npub(super) fn deserialize_bytesrepr<T: FromBytes + 'static>(raw: &[u8]) -> Result<T, LmdbExtError> {\n    match T::from_bytes(raw).map(|val| val.0) {\n        Ok(ret) => Ok(ret),\n        Err(err) => {\n            // unfortunately, type_name is unstable\n            let type_name = {\n                if TypeId::of::<DeployMetadataV1>() == TypeId::of::<T>() {\n                    \"DeployMetadataV1\".to_string()\n                } else if TypeId::of::<BlockHeader>() == TypeId::of::<T>() {\n                    \"BlockHeader\".to_string()\n                } else if TypeId::of::<BlockBody>() == TypeId::of::<T>() {\n                    \"BlockBody\".to_string()\n                } else if TypeId::of::<BlockSignatures>() == TypeId::of::<T>() {\n                    \"BlockSignatures\".to_string()\n                } else if TypeId::of::<DeployHash>() == TypeId::of::<T>() {\n                    \"DeployHash\".to_string()\n                } else if TypeId::of::<Deploy>() == TypeId::of::<T>() {\n                    \"Deploy\".to_string()\n                } else if TypeId::of::<ApprovalsHashes>() == TypeId::of::<T>() {\n                    \"ApprovalsHashes\".to_string()\n                } else if TypeId::of::<BTreeSet<Approval>>() == TypeId::of::<T>() {\n                    \"BTreeSet<Approval>\".to_string()\n                } else if TypeId::of::<ExecutionResult>() == TypeId::of::<T>() {\n                    \"ExecutionResult\".to_string()\n                } else if TypeId::of::<Vec<Transfer>>() == TypeId::of::<T>() {\n                    \"Transfers\".to_string()\n                } else {\n                    format!(\"{:?}\", TypeId::of::<T>())\n                }\n            };\n            error!(\"deserialize_bytesrepr failed to deserialize: {}\", type_name);\n            Err(LmdbExtError::DataCorrupted(Box::new(BytesreprError(err))))\n        }\n    }\n}\n\n/// Serializes into a buffer.\n#[inline(always)]\npub(super) fn serialize_bytesrepr<T: ToBytes>(value: &T) -> Result<Vec<u8>, LmdbExtError> {\n    value\n        .to_bytes()\n        .map_err(|err| LmdbExtError::Other(Box::new(BytesreprError(err))))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::{AccessRights, EraId, PublicKey, SecretKey, URef, U512};\n\n    #[test]\n    fn should_read_legacy_unbonding_purse() {\n        // These bytes represent the `UnbondingPurse` struct with the `new_validator` field removed\n        // and serialized with `bincode`.\n        // In theory, we can generate these bytes by serializing the `WithdrawPurse`, but at some\n        // point, these two structs may diverge and it's a safe bet to rely on the bytes\n        // that are consistent with what we keep in the current storage.\n        const LEGACY_BYTES: &str = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e07010000002000000000000000197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610100000020000000000000004508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ffffffffffffffffff40feffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\";\n\n        let decoded = base16::decode(LEGACY_BYTES).expect(\"decode\");\n        let deserialized: UnbondingPurse = deserialize_internal(&decoded)\n            .expect(\"should deserialize w/o error\")\n            .expect(\"should be Some\");\n\n        // Make sure the new field is set to default.\n        assert_eq!(*deserialized.new_validator(), Option::default())\n    }\n\n    #[test]\n    fn unbonding_purse_serialization_roundtrip() {\n        let original = UnbondingPurse::new(\n            URef::new([14; 32], AccessRights::READ_ADD_WRITE),\n            {\n                let secret_key =\n                    SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n                PublicKey::from(&secret_key)\n            },\n            {\n                let secret_key =\n                    SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap();\n                PublicKey::from(&secret_key)\n            },\n            EraId::MAX,\n            U512::max_value() - 1,\n            Some({\n                let secret_key =\n                    SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap();\n                PublicKey::from(&secret_key)\n            }),\n        );\n\n        let serialized = serialize_internal(&original).expect(\"serialization\");\n        let deserialized: UnbondingPurse = deserialize_internal(&serialized)\n            .expect(\"should deserialize w/o error\")\n            .expect(\"should be Some\");\n\n        assert_eq!(original, deserialized);\n\n        // Explicitly assert that the `new_validator` is not `None`\n        assert!(deserialized.new_validator().is_some())\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/lmdb/mod.rs",
    "content": "mod lmdb_ext;\nmod temp_map;\nmod versioned_databases;\n\nmod indexed_lmdb_block_store;\nmod lmdb_block_store;\n\nuse core::convert::TryFrom;\npub use indexed_lmdb_block_store::IndexedLmdbBlockStore;\npub use lmdb_block_store::LmdbBlockStore;\n\n#[cfg(test)]\nuse rand::Rng;\nuse serde::Serialize;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\n\n/// An identifier of db tables.\n#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)]\n#[repr(u16)]\npub enum DbTableId {\n    /// Refers to `BlockHeader` db table.\n    BlockHeader = 0,\n    /// Refers to `BlockBody` db table.\n    BlockBody = 1,\n    /// Refers to `ApprovalsHashes` db table.\n    ApprovalsHashes = 2,\n    /// Refers to `BlockMetadata` db table.\n    BlockMetadata = 3,\n    /// Refers to `Transaction` db table.\n    Transaction = 4,\n    /// Refers to `ExecutionResult` db table.\n    ExecutionResult = 5,\n    /// Refers to `Transfer` db table.\n    Transfer = 6,\n    /// Refers to `FinalizedTransactionApprovals` db table.\n    FinalizedTransactionApprovals = 7,\n}\n\nimpl DbTableId {\n    /// Returns a random `DbTableId`.\n    #[cfg(test)]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..8) {\n            0 => DbTableId::BlockHeader,\n            1 => DbTableId::BlockBody,\n            2 => DbTableId::ApprovalsHashes,\n            3 => DbTableId::BlockMetadata,\n            4 => DbTableId::Transaction,\n            5 => DbTableId::ExecutionResult,\n            6 => DbTableId::Transfer,\n            7 => DbTableId::FinalizedTransactionApprovals,\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl TryFrom<u16> for DbTableId {\n    type Error = UnknownDbTableId;\n\n    fn try_from(value: u16) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(DbTableId::BlockHeader),\n            1 => Ok(DbTableId::BlockBody),\n            2 => Ok(DbTableId::ApprovalsHashes),\n            3 => Ok(DbTableId::BlockMetadata),\n            4 => Ok(DbTableId::Transaction),\n            5 => Ok(DbTableId::ExecutionResult),\n            6 => Ok(DbTableId::Transfer),\n            7 => Ok(DbTableId::FinalizedTransactionApprovals),\n            _ => Err(UnknownDbTableId(value)),\n        }\n    }\n}\n\nimpl From<DbTableId> for u16 {\n    fn from(value: DbTableId) -> Self {\n        value as u16\n    }\n}\n\nimpl core::fmt::Display for DbTableId {\n    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n        match self {\n            DbTableId::BlockHeader => write!(f, \"BlockHeader\"),\n            DbTableId::BlockBody => write!(f, \"BlockBody\"),\n            DbTableId::ApprovalsHashes => write!(f, \"ApprovalsHashes\"),\n            DbTableId::BlockMetadata => write!(f, \"BlockMetadata\"),\n            DbTableId::Transaction => write!(f, \"Transaction\"),\n            DbTableId::ExecutionResult => write!(f, \"ExecutionResult\"),\n            DbTableId::Transfer => write!(f, \"Transfer\"),\n            DbTableId::FinalizedTransactionApprovals => write!(f, \"FinalizedTransactionApprovals\"),\n        }\n    }\n}\n\n/// Error returned when trying to convert a `u16` into a `DbTableId`.\n#[derive(Debug, PartialEq, Eq)]\npub struct UnknownDbTableId(u16);\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::testing::TestRng;\n\n    #[test]\n    fn tag_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = DbTableId::random(rng);\n        let tag = u16::from(val);\n        assert_eq!(DbTableId::try_from(tag), Ok(val));\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/lmdb/temp_map.rs",
    "content": "use std::collections::BTreeMap;\n\nenum EntryState<V> {\n    Deleted,\n    Occupied(V),\n}\n\n/// A wrapper over a BTreeMap that stores changes to the backing map only temporarily.\n/// The backing map will not be altered until the temporary changes are committed.\npub(crate) struct TempMap<'a, K, V: 'a> {\n    base_index: &'a mut BTreeMap<K, V>,\n    new_index: BTreeMap<K, EntryState<V>>,\n}\n\nimpl<'a, K, V> TempMap<'a, K, V>\nwhere\n    K: Ord,\n    V: 'a + Copy,\n{\n    /// Creates a new temporary map that is backed by a BTreeMap\n    pub(crate) fn new(base_index: &'a mut BTreeMap<K, V>) -> Self {\n        Self {\n            base_index,\n            new_index: BTreeMap::<K, EntryState<V>>::new(),\n        }\n    }\n\n    /// Reads the value contained in the map at the specified key.\n    pub(crate) fn get(&self, key: &K) -> Option<V> {\n        if let Some(state) = self.new_index.get(key) {\n            match state {\n                EntryState::Occupied(val) => Some(*val),\n                EntryState::Deleted => None,\n            }\n        } else {\n            self.base_index.get(key).copied()\n        }\n    }\n\n    /// Checks if a key exists in this map.\n    pub(crate) fn contains_key(&self, key: &K) -> bool {\n        if self.new_index.contains_key(key) {\n            true\n        } else {\n            self.base_index.contains_key(key)\n        }\n    }\n\n    /// Sets the value at the specified key index.\n    pub(crate) fn insert(&mut self, key: K, val: V) {\n        self.new_index.insert(key, EntryState::Occupied(val));\n    }\n\n    /// Removes the value from the map.\n    pub(crate) fn remove(&mut self, key: K) {\n        if self.contains_key(&key) {\n            self.new_index.insert(key, EntryState::Deleted);\n        }\n    }\n\n    /// Saves temporary changes to the backing map.\n    pub(crate) fn commit(self) {\n        for (key, val) in self.new_index {\n            match val {\n                EntryState::Occupied(val) => self.base_index.insert(key, val),\n                EntryState::Deleted => self.base_index.remove(&key),\n            };\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/lmdb/versioned_databases.rs",
    "content": "use datasize::DataSize;\nuse lmdb::{\n    Cursor, Database, DatabaseFlags, Environment, RwCursor, RwTransaction,\n    Transaction as LmdbTransaction,\n};\nuse serde::de::DeserializeOwned;\n#[cfg(test)]\nuse serde::Serialize;\nuse std::{collections::BTreeSet, marker::PhantomData};\nuse tracing::error;\n\nuse casper_types::{\n    bytesrepr::{FromBytes, ToBytes},\n    execution::ExecutionResult,\n    Approval, BlockBody, BlockBodyV1, BlockHash, BlockHeader, BlockHeaderV1, BlockSignatures,\n    BlockSignaturesV1, Deploy, DeployHash, Digest, Transaction, TransactionHash, TransferV1,\n};\n\nuse super::{\n    super::{\n        error::BlockStoreError,\n        types::{ApprovalsHashes, DeployMetadataV1, LegacyApprovalsHashes, Transfers},\n        DbRawBytesSpec,\n    },\n    lmdb_ext::{self, LmdbExtError, TransactionExt, WriteTransactionExt},\n};\n\npub(crate) trait VersionedKey: ToBytes {\n    type Legacy: AsRef<[u8]>;\n\n    fn legacy_key(&self) -> Option<&Self::Legacy>;\n}\n\npub(crate) trait VersionedValue: ToBytes + FromBytes {\n    type Legacy: 'static + DeserializeOwned + Into<Self>;\n}\n\nimpl VersionedKey for TransactionHash {\n    type Legacy = DeployHash;\n\n    fn legacy_key(&self) -> Option<&Self::Legacy> {\n        match self {\n            TransactionHash::Deploy(deploy_hash) => Some(deploy_hash),\n            TransactionHash::V1(_) => None,\n        }\n    }\n}\n\nimpl VersionedKey for BlockHash {\n    type Legacy = BlockHash;\n\n    fn legacy_key(&self) -> Option<&Self::Legacy> {\n        Some(self)\n    }\n}\n\nimpl VersionedKey for Digest {\n    type Legacy = Digest;\n\n    fn legacy_key(&self) -> Option<&Self::Legacy> {\n        Some(self)\n    }\n}\n\nimpl VersionedValue for Transaction {\n    type Legacy = Deploy;\n}\n\nimpl VersionedValue for BlockHeader {\n    type Legacy = BlockHeaderV1;\n}\n\nimpl VersionedValue for BlockBody {\n    type Legacy = BlockBodyV1;\n}\n\nimpl VersionedValue for ApprovalsHashes {\n    type Legacy = LegacyApprovalsHashes;\n}\n\nimpl VersionedValue for ExecutionResult {\n    type Legacy = DeployMetadataV1;\n}\n\nimpl VersionedValue for BTreeSet<Approval> {\n    type Legacy = BTreeSet<Approval>;\n}\n\nimpl VersionedValue for BlockSignatures {\n    type Legacy = BlockSignaturesV1;\n}\n\nimpl VersionedValue for Transfers {\n    type Legacy = Vec<TransferV1>;\n}\n\n/// A pair of databases, one holding the original legacy form of the data, and the other holding the\n/// new versioned, future-proof form of the data.\n///\n/// Specific entries should generally not be repeated - they will either be held in the legacy or\n/// the current DB, but not both.  Data is not migrated from legacy to current, but newly-stored\n/// data will always be written to the current DB, even if it is of the type `V::Legacy`.\n///\n/// Exceptions to this can occur if a pre-existing legacy entry is re-stored, in which case there\n/// will be a duplicated entry in the `legacy` and `current` DBs.  This should not be a common\n/// occurrence though.\n#[derive(Eq, PartialEq, DataSize, Debug)]\npub(crate) struct VersionedDatabases<K, V> {\n    /// Legacy form of the data, with the key as `K::Legacy` type (converted to bytes using\n    /// `AsRef<[u8]>`) and the value bincode-encoded.\n    #[data_size(skip)]\n    pub legacy: Database,\n    /// Current form of the data, with the key as `K` bytesrepr-encoded and the value as `V` also\n    /// bytesrepr-encoded.\n    #[data_size(skip)]\n    pub current: Database,\n    _phantom: PhantomData<(K, V)>,\n}\n\nimpl<K, V> Clone for VersionedDatabases<K, V> {\n    fn clone(&self) -> Self {\n        *self\n    }\n}\n\nimpl<K, V> Copy for VersionedDatabases<K, V> {}\n\nimpl<K, V> VersionedDatabases<K, V>\nwhere\n    K: VersionedKey + std::fmt::Display,\n    V: VersionedValue + 'static,\n{\n    pub(super) fn new(\n        env: &Environment,\n        legacy_name: &str,\n        current_name: &str,\n    ) -> Result<Self, lmdb::Error> {\n        Ok(VersionedDatabases {\n            legacy: env.create_db(Some(legacy_name), DatabaseFlags::empty())?,\n            current: env.create_db(Some(current_name), DatabaseFlags::empty())?,\n            _phantom: PhantomData,\n        })\n    }\n\n    pub(super) fn put(\n        &self,\n        txn: &mut RwTransaction,\n        key: &K,\n        value: &V,\n        overwrite: bool,\n    ) -> Result<bool, LmdbExtError> {\n        txn.put_value_bytesrepr(self.current, key, value, overwrite)\n    }\n\n    pub(super) fn get<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        key: &K,\n    ) -> Result<Option<V>, LmdbExtError> {\n        match txn.get_value_bytesrepr(self.current, key) {\n            Ok(Some(value)) => return Ok(Some(value)),\n            Ok(None) => {\n                // check legacy db\n            }\n            Err(err) => {\n                error!(%err, \"versioned_database: failed to retrieve record from current db\");\n                return Err(err);\n            }\n        }\n\n        let legacy_key = match key.legacy_key() {\n            Some(key) => key,\n            None => return Ok(None),\n        };\n\n        Ok(txn\n            .get_value::<_, V::Legacy>(self.legacy, legacy_key)?\n            .map(Into::into))\n    }\n\n    pub(super) fn get_raw<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        key: &[u8],\n    ) -> Result<Option<DbRawBytesSpec>, LmdbExtError> {\n        if key.is_empty() {\n            return Ok(None);\n        }\n        let value = txn.get(self.current, &key);\n        match value {\n            Ok(raw_bytes) => Ok(Some(DbRawBytesSpec::new_current(raw_bytes))),\n            Err(lmdb::Error::NotFound) => {\n                let value = txn.get(self.legacy, &key);\n                match value {\n                    Ok(raw_bytes) => Ok(Some(DbRawBytesSpec::new_legacy(raw_bytes))),\n                    Err(lmdb::Error::NotFound) => Ok(None),\n                    Err(err) => Err(err.into()),\n                }\n            }\n            Err(err) => Err(err.into()),\n        }\n    }\n\n    pub(super) fn exists<Tx: LmdbTransaction>(\n        &self,\n        txn: &Tx,\n        key: &K,\n    ) -> Result<bool, LmdbExtError> {\n        if txn.value_exists_bytesrepr(self.current, key)? {\n            return Ok(true);\n        }\n\n        let legacy_key = match key.legacy_key() {\n            Some(key) => key,\n            None => return Ok(false),\n        };\n\n        txn.value_exists(self.legacy, legacy_key)\n    }\n\n    /// Deletes the value under `key` from both the current and legacy DBs.\n    ///\n    /// Returns `Ok` if the value is successfully deleted from either or both the DBs, or if the\n    /// value did not exist in either.\n    pub(super) fn delete(&self, txn: &mut RwTransaction, key: &K) -> Result<(), LmdbExtError> {\n        let serialized_key = lmdb_ext::serialize_bytesrepr(key)?;\n        let current_result = match txn.del(self.current, &serialized_key, None) {\n            Ok(_) | Err(lmdb::Error::NotFound) => Ok(()),\n            Err(error) => Err(error.into()),\n        };\n        // Avoid returning early for the case where `current_result` is Ok, since some\n        // `VersionedDatabases` could possibly have the same entry in both DBs.\n\n        let legacy_key = match key.legacy_key() {\n            Some(key) => key,\n            None => return current_result,\n        };\n\n        let legacy_result = match txn.del(self.legacy, legacy_key, None) {\n            Ok(_) | Err(lmdb::Error::NotFound) => Ok(()),\n            Err(error) => Err(error.into()),\n        };\n\n        match (current_result, legacy_result) {\n            (Err(error), _) => Err(error),\n            (_, Err(error)) => Err(error),\n            (Ok(_), Ok(_)) => Ok(()),\n        }\n    }\n\n    /// Iterates every row in the current database, deserializing the value and calling `f` with the\n    /// cursor and the parsed value.\n    pub(super) fn for_each_value_in_current<'a, F>(\n        &self,\n        txn: &'a mut RwTransaction,\n        f: &mut F,\n    ) -> Result<(), BlockStoreError>\n    where\n        F: FnMut(&mut RwCursor<'a>, V) -> Result<(), BlockStoreError>,\n    {\n        let mut cursor = txn\n            .open_rw_cursor(self.current)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        for row in cursor.iter() {\n            let (_, raw_val) =\n                row.map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n            let value: V = lmdb_ext::deserialize_bytesrepr(raw_val)\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n            f(&mut cursor, value)?;\n        }\n        Ok(())\n    }\n\n    /// Iterates every row in the legacy database, deserializing the value and calling `f` with the\n    /// cursor and the parsed value.\n    pub(super) fn for_each_value_in_legacy<'a, F>(\n        &self,\n        txn: &'a mut RwTransaction,\n        f: &mut F,\n    ) -> Result<(), BlockStoreError>\n    where\n        F: FnMut(&mut RwCursor<'a>, V) -> Result<(), BlockStoreError>,\n    {\n        let mut cursor = txn\n            .open_rw_cursor(self.legacy)\n            .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n        for row in cursor.iter() {\n            let (_, raw_val) =\n                row.map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n            let value: V::Legacy = lmdb_ext::deserialize(raw_val)\n                .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?;\n            f(&mut cursor, value.into())?;\n        }\n        Ok(())\n    }\n\n    /// Writes to the `legacy` database.\n    #[cfg(test)]\n    pub(super) fn put_legacy(\n        &self,\n        txn: &mut RwTransaction,\n        legacy_key: &K::Legacy,\n        legacy_value: &V::Legacy,\n        overwrite: bool,\n    ) -> bool\n    where\n        V::Legacy: Serialize,\n    {\n        txn.put_value(self.legacy, legacy_key, legacy_value, overwrite)\n            .expect(\"should put legacy value\")\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::block_store::lmdb::lmdb_block_store::new_environment;\n    use lmdb::WriteFlags;\n    use std::collections::HashMap;\n\n    use tempfile::TempDir;\n\n    use casper_types::testing::TestRng;\n\n    use super::*;\n\n    struct Fixture {\n        rng: TestRng,\n        env: Environment,\n        dbs: VersionedDatabases<TransactionHash, Transaction>,\n        random_transactions: HashMap<TransactionHash, Transaction>,\n        legacy_transactions: HashMap<DeployHash, Deploy>,\n        _data_dir: TempDir,\n    }\n\n    impl Fixture {\n        fn new() -> Fixture {\n            let rng = TestRng::new();\n            let data_dir = TempDir::new().expect(\"should create temp dir\");\n            let env = new_environment(1024 * 1024, data_dir.path()).unwrap();\n            let dbs = VersionedDatabases::new(&env, \"legacy\", \"current\").unwrap();\n            let mut fixture = Fixture {\n                rng,\n                env,\n                dbs,\n                random_transactions: HashMap::new(),\n                legacy_transactions: HashMap::new(),\n                _data_dir: data_dir,\n            };\n            for _ in 0..3 {\n                let transaction = Transaction::random(&mut fixture.rng);\n                assert!(fixture\n                    .random_transactions\n                    .insert(transaction.hash(), transaction)\n                    .is_none());\n                let deploy = Deploy::random(&mut fixture.rng);\n                assert!(fixture\n                    .legacy_transactions\n                    .insert(*deploy.hash(), deploy)\n                    .is_none());\n            }\n            fixture\n        }\n    }\n\n    #[test]\n    fn should_put() {\n        let fixture = Fixture::new();\n        let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap();\n\n        // Should return `true` on first `put`.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        assert!(fixture\n            .dbs\n            .put(&mut txn, transaction_hash, transaction, true)\n            .unwrap());\n\n        // Should return `false` on duplicate `put` if not set to overwrite.\n        assert!(!fixture\n            .dbs\n            .put(&mut txn, transaction_hash, transaction, false)\n            .unwrap());\n\n        // Should return `true` on duplicate `put` if set to overwrite.\n        assert!(fixture\n            .dbs\n            .put(&mut txn, transaction_hash, transaction, true)\n            .unwrap());\n    }\n\n    #[test]\n    fn should_get() {\n        let mut fixture = Fixture::new();\n        let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap();\n        let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap();\n\n        // Inject the deploy into the legacy DB and store the random transaction in the current DB.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true));\n        assert!(fixture\n            .dbs\n            .put(&mut txn, transaction_hash, transaction, true)\n            .unwrap());\n        txn.commit().unwrap();\n\n        // Should get the deploy.\n        let txn = fixture.env.begin_ro_txn().unwrap();\n        assert_eq!(\n            fixture\n                .dbs\n                .get(&txn, &TransactionHash::from(*deploy_hash))\n                .unwrap(),\n            Some(Transaction::from(deploy.clone()))\n        );\n\n        // Should get the random transaction.\n        assert_eq!(\n            fixture.dbs.get(&txn, transaction_hash).unwrap(),\n            Some(transaction.clone())\n        );\n\n        // Should return `Ok(None)` for non-existent data.\n        let random_hash = Transaction::random(&mut fixture.rng).hash();\n        assert!(fixture.dbs.get(&txn, &random_hash).unwrap().is_none());\n    }\n\n    #[test]\n    fn should_exist() {\n        let mut fixture = Fixture::new();\n        let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap();\n        let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap();\n\n        // Inject the deploy into the legacy DB and store the random transaction in the current DB.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true));\n        assert!(fixture\n            .dbs\n            .put(&mut txn, transaction_hash, transaction, true)\n            .unwrap());\n        txn.commit().unwrap();\n\n        // The deploy should exist.\n        let txn = fixture.env.begin_ro_txn().unwrap();\n        assert!(fixture\n            .dbs\n            .exists(&txn, &TransactionHash::from(*deploy_hash))\n            .unwrap());\n\n        // The random transaction should exist.\n        assert!(fixture.dbs.exists(&txn, transaction_hash).unwrap());\n\n        // Random data should not exist.\n        let random_hash = Transaction::random(&mut fixture.rng).hash();\n        assert!(!fixture.dbs.exists(&txn, &random_hash).unwrap());\n    }\n\n    #[test]\n    fn should_delete() {\n        let mut fixture = Fixture::new();\n        let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap();\n        let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap();\n\n        // Inject the deploy into the legacy DB and store the random transaction in the current DB.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true));\n        assert!(fixture\n            .dbs\n            .put(&mut txn, transaction_hash, transaction, true)\n            .unwrap());\n        // Also store the legacy deploy in the `current` DB.  While being an edge case, we still\n        // need to ensure that deleting removes both copies of the deploy.\n        assert!(fixture\n            .dbs\n            .put(\n                &mut txn,\n                &TransactionHash::from(*deploy_hash),\n                &Transaction::from(deploy.clone()),\n                true\n            )\n            .unwrap());\n        txn.commit().unwrap();\n\n        // Should delete the deploy.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        fixture\n            .dbs\n            .delete(&mut txn, &TransactionHash::from(*deploy_hash))\n            .unwrap();\n        assert!(!fixture\n            .dbs\n            .exists(&txn, &TransactionHash::from(*deploy_hash))\n            .unwrap());\n\n        // Should delete the random transaction.\n        fixture.dbs.delete(&mut txn, transaction_hash).unwrap();\n        assert!(!fixture.dbs.exists(&txn, transaction_hash).unwrap());\n\n        // Should report success when attempting to delete non-existent data.\n        let random_hash = Transaction::random(&mut fixture.rng).hash();\n        fixture.dbs.delete(&mut txn, &random_hash).unwrap();\n    }\n\n    #[test]\n    fn should_iterate_current() {\n        let fixture = Fixture::new();\n\n        // Store all random transactions.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        for (transaction_hash, transaction) in fixture.random_transactions.iter() {\n            assert!(fixture\n                .dbs\n                .put(&mut txn, transaction_hash, transaction, true)\n                .unwrap());\n        }\n        txn.commit().unwrap();\n\n        // Iterate `current`, deleting each cursor entry and gathering the visited values in a map.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        let mut visited = HashMap::new();\n        let mut visitor = |cursor: &mut RwCursor, transaction: Transaction| {\n            cursor.del(WriteFlags::empty()).unwrap();\n            let _ = visited.insert(transaction.hash(), transaction);\n            Ok(())\n        };\n        fixture\n            .dbs\n            .for_each_value_in_current(&mut txn, &mut visitor)\n            .unwrap();\n        txn.commit().unwrap();\n\n        // Ensure all values were visited and the DB doesn't contain them any more.\n        assert_eq!(visited, fixture.random_transactions);\n        let txn = fixture.env.begin_ro_txn().unwrap();\n        for transaction_hash in fixture.random_transactions.keys() {\n            assert!(!fixture.dbs.exists(&txn, transaction_hash).unwrap());\n        }\n\n        // Ensure a second run is a no-op.\n        let mut visitor = |_cursor: &mut RwCursor, _transaction: Transaction| {\n            panic!(\"should never get called\");\n        };\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        fixture\n            .dbs\n            .for_each_value_in_current(&mut txn, &mut visitor)\n            .unwrap();\n    }\n\n    #[test]\n    fn should_iterate_legacy() {\n        let fixture = Fixture::new();\n\n        // Store all legacy transactions.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        for (deploy_hash, deploy) in fixture.legacy_transactions.iter() {\n            assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true));\n        }\n        txn.commit().unwrap();\n\n        // Iterate `legacy`, deleting each cursor entry and gathering the visited values in a map.\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        let mut visited = HashMap::new();\n        let mut visitor = |cursor: &mut RwCursor, transaction: Transaction| {\n            cursor.del(WriteFlags::empty()).unwrap();\n            match transaction {\n                Transaction::Deploy(deploy) => {\n                    let _ = visited.insert(*deploy.hash(), deploy);\n                }\n                Transaction::V1(_) => unreachable!(),\n            }\n            Ok(())\n        };\n        fixture\n            .dbs\n            .for_each_value_in_legacy(&mut txn, &mut visitor)\n            .unwrap();\n        txn.commit().unwrap();\n\n        // Ensure all values were visited and the DB doesn't contain them any more.\n        assert_eq!(visited, fixture.legacy_transactions);\n        let txn = fixture.env.begin_ro_txn().unwrap();\n        for deploy_hash in fixture.legacy_transactions.keys() {\n            assert!(!fixture\n                .dbs\n                .exists(&txn, &TransactionHash::from(*deploy_hash))\n                .unwrap());\n        }\n\n        // Ensure a second run is a no-op.\n        let mut visitor = |_cursor: &mut RwCursor, _transaction: Transaction| {\n            panic!(\"should never get called\");\n        };\n        let mut txn = fixture.env.begin_rw_txn().unwrap();\n        fixture\n            .dbs\n            .for_each_value_in_legacy(&mut txn, &mut visitor)\n            .unwrap();\n    }\n\n    #[test]\n    fn should_get_on_empty_key() {\n        let fixture = Fixture::new();\n        let txn = fixture.env.begin_ro_txn().unwrap();\n        let key = vec![];\n        let res = fixture.dbs.get_raw(&txn, &key);\n        assert!(matches!(res, Ok(None)));\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/mod.rs",
    "content": "mod block_provider;\nmod error;\n/// Block store lmdb logic.\npub mod lmdb;\n/// Block store types.\npub mod types;\n\npub use block_provider::{BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter};\npub use error::BlockStoreError;\n\n/// Stores raw bytes from the DB along with the flag indicating whether data come from legacy or\n/// current version of the DB.\n#[derive(Debug)]\npub struct DbRawBytesSpec {\n    is_legacy: bool,\n    raw_bytes: Vec<u8>,\n}\n\nimpl DbRawBytesSpec {\n    /// Creates a variant indicating that raw bytes are coming from the legacy database.\n    pub fn new_legacy(raw_bytes: &[u8]) -> Self {\n        Self {\n            is_legacy: true,\n            raw_bytes: raw_bytes.to_vec(),\n        }\n    }\n\n    /// Creates a variant indicating that raw bytes are coming from the current database.\n    pub fn new_current(raw_bytes: &[u8]) -> Self {\n        Self {\n            is_legacy: false,\n            raw_bytes: raw_bytes.to_vec(),\n        }\n    }\n\n    /// Is legacy?\n    pub fn is_legacy(&self) -> bool {\n        self.is_legacy\n    }\n\n    /// Raw bytes.\n    pub fn into_raw_bytes(self) -> Vec<u8> {\n        self.raw_bytes\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/types/approvals_hashes.rs",
    "content": "use std::{\n    collections::BTreeMap,\n    fmt::{self, Debug, Display, Formatter},\n};\n\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    global_state::TrieMerkleProof,\n    ApprovalsHash, Block, BlockHash, BlockV1, BlockV2, DeployId, Digest, Key, StoredValue,\n    TransactionId,\n};\n\nuse crate::global_state::trie_store::operations::compute_state_hash;\n\npub(crate) const APPROVALS_CHECKSUM_NAME: &str = \"approvals_checksum\";\n\n/// Returns the hash of the bytesrepr-encoded deploy_ids.\nfn compute_approvals_checksum(txn_ids: Vec<TransactionId>) -> Result<Digest, bytesrepr::Error> {\n    let bytes = txn_ids.into_bytes()?;\n    Ok(Digest::hash(bytes))\n}\n\n/// The data which is gossiped by validators to non-validators upon creation of a new block.\n#[derive(DataSize, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub struct ApprovalsHashes {\n    /// Hash of the block that contains deploys that are relevant to the approvals.\n    block_hash: BlockHash,\n    /// The set of all deploys' finalized approvals' hashes.\n    approvals_hashes: Vec<ApprovalsHash>,\n    /// The Merkle proof of the checksum registry containing the checksum of the finalized\n    /// approvals.\n    #[data_size(skip)]\n    merkle_proof_approvals: TrieMerkleProof<Key, StoredValue>,\n}\n\nimpl ApprovalsHashes {\n    /// Ctor.\n    pub fn new(\n        block_hash: BlockHash,\n        approvals_hashes: Vec<ApprovalsHash>,\n        merkle_proof_approvals: TrieMerkleProof<Key, StoredValue>,\n    ) -> Self {\n        Self {\n            block_hash,\n            approvals_hashes,\n            merkle_proof_approvals,\n        }\n    }\n\n    /// Verify block.\n    pub fn verify(&self, block: &Block) -> Result<(), ApprovalsHashesValidationError> {\n        if *self.merkle_proof_approvals.key() != Key::ChecksumRegistry {\n            return Err(ApprovalsHashesValidationError::InvalidKeyType);\n        }\n\n        let proof_state_root_hash = compute_state_hash(&self.merkle_proof_approvals)\n            .map_err(ApprovalsHashesValidationError::TrieMerkleProof)?;\n\n        if proof_state_root_hash != *block.state_root_hash() {\n            return Err(ApprovalsHashesValidationError::StateRootHashMismatch {\n                proof_state_root_hash,\n                block_state_root_hash: *block.state_root_hash(),\n            });\n        }\n\n        let value_in_proof = self\n            .merkle_proof_approvals\n            .value()\n            .as_cl_value()\n            .and_then(|cl_value| cl_value.clone().into_t().ok())\n            .and_then(|registry: BTreeMap<String, Digest>| {\n                registry.get(APPROVALS_CHECKSUM_NAME).copied()\n            })\n            .ok_or(ApprovalsHashesValidationError::InvalidChecksumRegistry)?;\n\n        let computed_approvals_checksum = match block {\n            Block::V1(v1_block) => compute_legacy_approvals_checksum(self.deploy_ids(v1_block)?)?,\n            Block::V2(v2_block) => compute_approvals_checksum(self.transaction_ids(v2_block)?)\n                .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?,\n        };\n\n        if value_in_proof != computed_approvals_checksum {\n            return Err(ApprovalsHashesValidationError::ApprovalsChecksumMismatch {\n                computed_approvals_checksum,\n                value_in_proof,\n            });\n        }\n\n        Ok(())\n    }\n\n    /// Deploy ids.\n    pub(crate) fn deploy_ids(\n        &self,\n        v1_block: &BlockV1,\n    ) -> Result<Vec<DeployId>, ApprovalsHashesValidationError> {\n        let deploy_approvals_hashes = self.approvals_hashes.clone();\n        Ok(v1_block\n            .deploy_and_transfer_hashes()\n            .zip(deploy_approvals_hashes)\n            .map(|(deploy_hash, deploy_approvals_hash)| {\n                DeployId::new(*deploy_hash, deploy_approvals_hash)\n            })\n            .collect())\n    }\n\n    /// Transaction ids.\n    pub fn transaction_ids(\n        &self,\n        v2_block: &BlockV2,\n    ) -> Result<Vec<TransactionId>, ApprovalsHashesValidationError> {\n        v2_block\n            .all_transactions()\n            .zip(self.approvals_hashes.clone())\n            .map(|(txn_hash, txn_approvals_hash)| {\n                Ok(TransactionId::new(*txn_hash, txn_approvals_hash))\n            })\n            .collect()\n    }\n\n    /// Block hash.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Approvals hashes.\n    pub fn approvals_hashes(&self) -> Vec<ApprovalsHash> {\n        self.approvals_hashes.clone()\n    }\n}\n\nimpl Display for ApprovalsHashes {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"approvals hashes for {}\", self.block_hash())\n    }\n}\n\nimpl ToBytes for ApprovalsHashes {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(writer)?;\n        self.approvals_hashes.write_bytes(writer)?;\n        self.merkle_proof_approvals.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length()\n            + self.approvals_hashes.serialized_length()\n            + self.merkle_proof_approvals.serialized_length()\n    }\n}\n\nimpl FromBytes for ApprovalsHashes {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (approvals_hashes, remainder) = Vec::<ApprovalsHash>::from_bytes(remainder)?;\n        let (merkle_proof_approvals, remainder) =\n            TrieMerkleProof::<Key, StoredValue>::from_bytes(remainder)?;\n        Ok((\n            ApprovalsHashes {\n                block_hash,\n                approvals_hashes,\n                merkle_proof_approvals,\n            },\n            remainder,\n        ))\n    }\n}\n\n/// Returns the hash of the bytesrepr-encoded deploy_ids, as used until the `Block` enum became\n/// available.\npub(crate) fn compute_legacy_approvals_checksum(\n    deploy_ids: Vec<DeployId>,\n) -> Result<Digest, ApprovalsHashesValidationError> {\n    let bytes = deploy_ids\n        .into_bytes()\n        .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?;\n    Ok(Digest::hash(bytes))\n}\n\n/// An error that can arise when validating `ApprovalsHashes`.\n#[derive(Error, Debug, DataSize)]\n#[non_exhaustive]\npub enum ApprovalsHashesValidationError {\n    /// The key provided in the proof is not a `Key::ChecksumRegistry`.\n    #[error(\"key provided in proof is not a Key::ChecksumRegistry\")]\n    InvalidKeyType,\n\n    /// An error while computing the state root hash implied by the Merkle proof.\n    #[error(\"failed to compute state root hash implied by proof\")]\n    TrieMerkleProof(bytesrepr::Error),\n\n    /// The state root hash implied by the Merkle proof doesn't match that in the block.\n    #[error(\"state root hash implied by the Merkle proof doesn't match that in the block\")]\n    StateRootHashMismatch {\n        /// Proof state root hash.\n        proof_state_root_hash: Digest,\n        /// Block state root hash.\n        block_state_root_hash: Digest,\n    },\n\n    /// The value provided in the proof cannot be parsed to the checksum registry type.\n    #[error(\"value provided in the proof cannot be parsed to the checksum registry type\")]\n    InvalidChecksumRegistry,\n\n    /// An error while computing the checksum of the approvals.\n    #[error(\"failed to compute checksum of the approvals\")]\n    ApprovalsChecksum(bytesrepr::Error),\n\n    /// The approvals checksum provided doesn't match one calculated from the approvals.\n    #[error(\"provided approvals checksum doesn't match one calculated from the approvals\")]\n    ApprovalsChecksumMismatch {\n        /// Computed approvals checksum.\n        computed_approvals_checksum: Digest,\n        /// Value in proof.\n        value_in_proof: Digest,\n    },\n\n    /// Variant mismatch.\n    #[error(\"mismatch in variants: {0:?}\")]\n    #[data_size(skip)]\n    VariantMismatch(Box<dyn Debug + Send + Sync>),\n}\n\n/// Initial version of `ApprovalsHashes` prior to `casper-node` v2.0.0.\n#[derive(Deserialize)]\npub(crate) struct LegacyApprovalsHashes {\n    block_hash: BlockHash,\n    approvals_hashes: Vec<ApprovalsHash>,\n    merkle_proof_approvals: TrieMerkleProof<Key, StoredValue>,\n}\n\nimpl From<LegacyApprovalsHashes> for ApprovalsHashes {\n    fn from(\n        LegacyApprovalsHashes {\n            block_hash,\n            approvals_hashes,\n            merkle_proof_approvals,\n        }: LegacyApprovalsHashes,\n    ) -> Self {\n        ApprovalsHashes::new(block_hash, approvals_hashes, merkle_proof_approvals)\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/types/block_hash_height_and_era.rs",
    "content": "use datasize::DataSize;\n#[cfg(test)]\nuse rand::Rng;\n\n#[cfg(test)]\nuse casper_types::testing::TestRng;\nuse casper_types::{BlockHash, BlockHashAndHeight, EraId};\n\n/// Aggregates block identifying information.\n#[derive(Clone, Copy, Debug, DataSize)]\npub struct BlockHashHeightAndEra {\n    /// Block hash.\n    pub block_hash: BlockHash,\n    /// Block height.\n    pub block_height: u64,\n    /// EraId\n    pub era_id: EraId,\n}\n\nimpl BlockHashHeightAndEra {\n    /// Creates a new [`BlockHashHeightAndEra`] from parts.\n    pub fn new(block_hash: BlockHash, block_height: u64, era_id: EraId) -> Self {\n        BlockHashHeightAndEra {\n            block_hash,\n            block_height,\n            era_id,\n        }\n    }\n\n    /// Returns the block hash.\n    #[cfg(test)]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self {\n            block_hash: BlockHash::random(rng),\n            block_height: rng.gen(),\n            era_id: EraId::random(rng),\n        }\n    }\n}\n\nimpl From<BlockHashHeightAndEra> for BlockHashAndHeight {\n    fn from(bhhe: BlockHashHeightAndEra) -> Self {\n        BlockHashAndHeight::new(bhhe.block_hash, bhhe.block_height)\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/types/deploy_metadata_v1.rs",
    "content": "use std::collections::HashMap;\n\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    execution::{ExecutionResult, ExecutionResultV1},\n    BlockHash,\n};\n\n/// Version 1 metadata related to a single deploy prior to `casper-node` v2.0.0.\n#[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq, Eq)]\npub(crate) struct DeployMetadataV1 {\n    /// The hash of the single block containing the related deploy, along with the results of\n    /// executing it.\n    ///\n    /// Due to reasons, this was implemented as a map, despite the guarantee that there will only\n    /// ever be a single entry.\n    pub(super) execution_results: HashMap<BlockHash, ExecutionResultV1>,\n}\n\nimpl From<DeployMetadataV1> for ExecutionResult {\n    fn from(v1_results: DeployMetadataV1) -> Self {\n        let v1_result = v1_results\n            .execution_results\n            .into_iter()\n            .next()\n            // Safe to unwrap as it's guaranteed to contain exactly one entry.\n            .expect(\"must be exactly one result\")\n            .1;\n        ExecutionResult::V1(v1_result)\n    }\n}\n"
  },
  {
    "path": "storage/src/block_store/types/mod.rs",
    "content": "mod approvals_hashes;\nmod block_hash_height_and_era;\nmod deploy_metadata_v1;\nmod transfers;\n\nuse std::{\n    borrow::Cow,\n    collections::{BTreeSet, HashMap},\n};\n\npub use approvals_hashes::{ApprovalsHashes, ApprovalsHashesValidationError};\npub use block_hash_height_and_era::BlockHashHeightAndEra;\nuse casper_types::{\n    execution::ExecutionResult, Approval, Block, BlockHash, BlockHeader, TransactionHash, Transfer,\n};\n\npub(crate) use approvals_hashes::LegacyApprovalsHashes;\npub(crate) use deploy_metadata_v1::DeployMetadataV1;\npub(in crate::block_store) use transfers::Transfers;\n\n/// Exeuction results.\npub type ExecutionResults = HashMap<TransactionHash, ExecutionResult>;\n\n/// Transaction finalized approvals.\npub struct TransactionFinalizedApprovals {\n    /// Transaction hash.\n    pub transaction_hash: TransactionHash,\n    /// Finalized approvals.\n    pub finalized_approvals: BTreeSet<Approval>,\n}\n\n/// Block execution results.\npub struct BlockExecutionResults {\n    /// Block info.\n    pub block_info: BlockHashHeightAndEra,\n    /// Execution results.\n    pub exec_results: ExecutionResults,\n}\n\n/// Block transfers.\npub struct BlockTransfers {\n    /// Block hash.\n    pub block_hash: BlockHash,\n    /// Transfers.\n    pub transfers: Vec<Transfer>,\n}\n\n/// State store.\npub struct StateStore {\n    /// Key.\n    pub key: Cow<'static, [u8]>,\n    /// Value.\n    pub value: Vec<u8>,\n}\n\n/// State store key.\npub struct StateStoreKey(pub(super) Cow<'static, [u8]>);\n\nimpl StateStoreKey {\n    /// Ctor.\n    pub fn new(key: Cow<'static, [u8]>) -> Self {\n        StateStoreKey(key)\n    }\n}\n\n/// Block tip anchor.\npub struct Tip;\n\n/// Latest switch block anchor.\npub struct LatestSwitchBlock;\n\n/// Block height.\npub type BlockHeight = u64;\n\n/// Switch block header alias.\npub type SwitchBlockHeader = BlockHeader;\n\n/// Switch block alias.\npub type SwitchBlock = Block;\n"
  },
  {
    "path": "storage/src/block_store/types/transfers.rs",
    "content": "use serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Transfer, TransferV1,\n};\n\n/// A wrapped `Vec<Transfer>`, used as the value type in the `transfer_dbs`.\n///\n/// It exists to allow the `impl From<Vec<TransferV1>>` to be written, making the type suitable for\n/// use as a parameter in a `VersionedDatabases`.\n#[derive(Clone, Serialize, Deserialize, Debug, Default, PartialEq, Eq)]\npub(in crate::block_store) struct Transfers(Vec<Transfer>);\n\nimpl Transfers {\n    pub(in crate::block_store) fn into_owned(self) -> Vec<Transfer> {\n        self.0\n    }\n}\n\nimpl From<Vec<TransferV1>> for Transfers {\n    fn from(v1_transfers: Vec<TransferV1>) -> Self {\n        Transfers(v1_transfers.into_iter().map(Transfer::V1).collect())\n    }\n}\n\nimpl From<Vec<Transfer>> for Transfers {\n    fn from(transfers: Vec<Transfer>) -> Self {\n        Transfers(transfers)\n    }\n}\n\nimpl ToBytes for Transfers {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for Transfers {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Vec::<Transfer>::from_bytes(bytes)\n            .map(|(transfers, remainder)| (Transfers(transfers), remainder))\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/addressable_entity.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{AddressableEntity, Digest, Key};\n\n/// Represents a request to obtain an addressable entity.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct AddressableEntityRequest {\n    state_hash: Digest,\n    key: Key,\n}\n\nimpl AddressableEntityRequest {\n    /// Creates new request.\n    pub fn new(state_hash: Digest, key: Key) -> Self {\n        AddressableEntityRequest { state_hash, key }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns key.\n    pub fn key(&self) -> Key {\n        self.key\n    }\n}\n\n/// Represents a result of a `addressable_entity` request.\n#[derive(Debug)]\npub enum AddressableEntityResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// Contains an addressable entity from global state.\n    Success {\n        /// An addressable entity.\n        entity: AddressableEntity,\n    },\n    /// Failure.\n    Failure(TrackingCopyError),\n}\n\nimpl AddressableEntityResult {\n    /// Returns wrapped addressable entity if this represents a successful query result.\n    pub fn into_option(self) -> Option<AddressableEntity> {\n        if let Self::Success { entity } = self {\n            Some(entity)\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/auction.rs",
    "content": "use std::collections::BTreeSet;\n\nuse serde::Serialize;\nuse thiserror::Error;\nuse tracing::error;\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::FromBytes,\n    execution::Effects,\n    system::{\n        auction,\n        auction::{DelegationRate, DelegatorKind, Reservation},\n    },\n    CLTyped, CLValue, CLValueError, Chainspec, Digest, InitiatorAddr, ProtocolVersion, PublicKey,\n    RuntimeArgs, TransactionEntryPoint, TransactionHash, Transfer, URefAddr, U512,\n};\n\nuse crate::{\n    system::runtime_native::Config as NativeRuntimeConfig, tracking_copy::TrackingCopyError,\n};\n\n/// An error returned when constructing an [`AuctionMethod`].\n#[derive(Clone, Eq, PartialEq, Error, Serialize, Debug)]\npub enum AuctionMethodError {\n    /// Provided entry point is not one of the Auction ones.\n    #[error(\"invalid entry point for auction: {0}\")]\n    InvalidEntryPoint(TransactionEntryPoint),\n    /// Required arg missing.\n    #[error(\"missing '{0}' arg\")]\n    MissingArg(String),\n    /// Failed to parse the given arg.\n    #[error(\"failed to parse '{arg}' arg: {error}\")]\n    CLValue {\n        /// The arg name.\n        arg: String,\n        /// The failure.\n        error: CLValueError,\n    },\n}\n\n/// Auction method to interact with.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum AuctionMethod {\n    /// Activate bid.\n    ActivateBid {\n        /// Validator public key (must match initiating address).\n        validator: PublicKey,\n    },\n    /// Add bid.\n    AddBid {\n        /// Validator public key (must match initiating address).\n        public_key: PublicKey,\n        /// Delegation rate for this validator bid.\n        delegation_rate: DelegationRate,\n        /// Bid amount.\n        amount: U512,\n        /// Minimum delegation amount for this validator bid.\n        /// if provided by the user is set to Some\n        minimum_delegation_amount: Option<u64>,\n        /// Maximum delegation amount for this validator bid.\n        /// if provided by the user is set to Some\n        maximum_delegation_amount: Option<u64>,\n        /// The minimum bid amount a validator must submit to have\n        /// their bid considered as valid.\n        minimum_bid_amount: u64,\n        /// Number of delegator slots which can be reserved for specific delegators\n        reserved_slots: u32,\n    },\n    /// Withdraw bid.\n    WithdrawBid {\n        /// Validator public key.\n        public_key: PublicKey,\n        /// Bid amount.\n        amount: U512,\n        /// The minimum bid amount a validator, if a validator reduces their stake\n        /// below this amount, then it is treated as a complete withdrawal.\n        minimum_bid_amount: u64,\n    },\n    /// Delegate to validator.\n    Delegate {\n        /// Delegator public key.\n        delegator: DelegatorKind,\n        /// Validator public key.\n        validator: PublicKey,\n        /// Delegation amount.\n        amount: U512,\n        /// Max delegators per validator.\n        max_delegators_per_validator: u32,\n    },\n    /// Undelegate from validator.\n    Undelegate {\n        /// Delegator public key.\n        delegator: DelegatorKind,\n        /// Validator public key.\n        validator: PublicKey,\n        /// Undelegation amount.\n        amount: U512,\n    },\n    /// Undelegate from validator and attempt delegation to new validator after unbonding delay\n    /// elapses.\n    Redelegate {\n        /// Delegator public key.\n        delegator: DelegatorKind,\n        /// Validator public key.\n        validator: PublicKey,\n        /// Redelegation amount.\n        amount: U512,\n        /// New validator public key.\n        new_validator: PublicKey,\n    },\n    /// Change the public key associated with a validator to a different public key.\n    ChangeBidPublicKey {\n        /// Current public key.\n        public_key: PublicKey,\n        /// New public key.\n        new_public_key: PublicKey,\n    },\n    /// Add delegator slot reservations.\n    AddReservations {\n        /// List of reservations.\n        reservations: Vec<Reservation>,\n    },\n    /// Remove delegator slot reservations for delegators with specified public keys.\n    CancelReservations {\n        /// Validator public key.\n        validator: PublicKey,\n        /// List of delegator public keys.\n        delegators: Vec<DelegatorKind>,\n        /// Max delegators per validator.\n        max_delegators_per_validator: u32,\n    },\n}\n\nimpl AuctionMethod {\n    /// Form auction method from parts.\n    pub fn from_parts(\n        entry_point: TransactionEntryPoint,\n        runtime_args: &RuntimeArgs,\n        chainspec: &Chainspec,\n    ) -> Result<Self, AuctionMethodError> {\n        match entry_point {\n            TransactionEntryPoint::Call\n            | TransactionEntryPoint::Custom(_)\n            | TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn => {\n                Err(AuctionMethodError::InvalidEntryPoint(entry_point))\n            }\n            TransactionEntryPoint::ActivateBid => Self::new_activate_bid(runtime_args),\n            TransactionEntryPoint::AddBid => {\n                Self::new_add_bid(runtime_args, chainspec.core_config.minimum_bid_amount)\n            }\n            TransactionEntryPoint::WithdrawBid => {\n                Self::new_withdraw_bid(runtime_args, chainspec.core_config.minimum_bid_amount)\n            }\n            TransactionEntryPoint::Delegate => Self::new_delegate(\n                runtime_args,\n                chainspec.core_config.max_delegators_per_validator,\n            ),\n            TransactionEntryPoint::Undelegate => Self::new_undelegate(runtime_args),\n            TransactionEntryPoint::Redelegate => Self::new_redelegate(runtime_args),\n            TransactionEntryPoint::ChangeBidPublicKey => {\n                Self::new_change_bid_public_key(runtime_args)\n            }\n            TransactionEntryPoint::AddReservations => Self::new_add_reservations(runtime_args),\n            TransactionEntryPoint::CancelReservations => Self::new_cancel_reservations(\n                runtime_args,\n                chainspec.core_config.max_delegators_per_validator,\n            ),\n        }\n    }\n\n    fn new_activate_bid(runtime_args: &RuntimeArgs) -> Result<Self, AuctionMethodError> {\n        let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n        Ok(Self::ActivateBid { validator })\n    }\n\n    fn new_add_bid(\n        runtime_args: &RuntimeArgs,\n        global_minimum_bid_amount: u64,\n    ) -> Result<Self, AuctionMethodError> {\n        let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?;\n        let delegation_rate = Self::get_named_argument(runtime_args, auction::ARG_DELEGATION_RATE)?;\n        let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n        let minimum_delegation_amount =\n            Self::try_get_named_argument(runtime_args, auction::ARG_MINIMUM_DELEGATION_AMOUNT)?;\n        let maximum_delegation_amount =\n            Self::try_get_named_argument(runtime_args, auction::ARG_MAXIMUM_DELEGATION_AMOUNT)?;\n        let reserved_slots =\n            Self::get_named_argument(runtime_args, auction::ARG_RESERVED_SLOTS).unwrap_or(0);\n\n        Ok(Self::AddBid {\n            public_key,\n            delegation_rate,\n            amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            minimum_bid_amount: global_minimum_bid_amount,\n            reserved_slots,\n        })\n    }\n\n    fn new_withdraw_bid(\n        runtime_args: &RuntimeArgs,\n        global_minimum_bid_amount: u64,\n    ) -> Result<Self, AuctionMethodError> {\n        let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?;\n        let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n        Ok(Self::WithdrawBid {\n            public_key,\n            amount,\n            minimum_bid_amount: global_minimum_bid_amount,\n        })\n    }\n\n    fn new_delegate(\n        runtime_args: &RuntimeArgs,\n        max_delegators_per_validator: u32,\n    ) -> Result<Self, AuctionMethodError> {\n        let delegator = {\n            match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) {\n                Ok(pk) => DelegatorKind::PublicKey(pk),\n                Err(_) => {\n                    let purse: URefAddr =\n                        Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR_PURSE)?;\n                    DelegatorKind::Purse(purse)\n                }\n            }\n        };\n        let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n        let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n\n        Ok(Self::Delegate {\n            delegator,\n            validator,\n            amount,\n            max_delegators_per_validator,\n        })\n    }\n\n    fn new_undelegate(runtime_args: &RuntimeArgs) -> Result<Self, AuctionMethodError> {\n        let delegator = {\n            match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) {\n                Ok(pk) => DelegatorKind::PublicKey(pk),\n                Err(_) => {\n                    let purse: URefAddr =\n                        Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR_PURSE)?;\n                    DelegatorKind::Purse(purse)\n                }\n            }\n        };\n        let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n        let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n\n        Ok(Self::Undelegate {\n            delegator,\n            validator,\n            amount,\n        })\n    }\n\n    fn new_redelegate(runtime_args: &RuntimeArgs) -> Result<Self, AuctionMethodError> {\n        let delegator = {\n            match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) {\n                Ok(pk) => DelegatorKind::PublicKey(pk),\n                Err(_) => {\n                    let purse: URefAddr =\n                        Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR_PURSE)?;\n                    DelegatorKind::Purse(purse)\n                }\n            }\n        };\n        let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n        let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?;\n        let new_validator = Self::get_named_argument(runtime_args, auction::ARG_NEW_VALIDATOR)?;\n\n        Ok(Self::Redelegate {\n            delegator,\n            validator,\n            amount,\n            new_validator,\n        })\n    }\n\n    fn new_change_bid_public_key(runtime_args: &RuntimeArgs) -> Result<Self, AuctionMethodError> {\n        let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?;\n        let new_public_key = Self::get_named_argument(runtime_args, auction::ARG_NEW_PUBLIC_KEY)?;\n\n        Ok(Self::ChangeBidPublicKey {\n            public_key,\n            new_public_key,\n        })\n    }\n\n    fn new_add_reservations(runtime_args: &RuntimeArgs) -> Result<Self, AuctionMethodError> {\n        let reservations = Self::get_named_argument(runtime_args, auction::ARG_RESERVATIONS)?;\n\n        Ok(Self::AddReservations { reservations })\n    }\n\n    fn new_cancel_reservations(\n        runtime_args: &RuntimeArgs,\n        max_delegators_per_validator: u32,\n    ) -> Result<Self, AuctionMethodError> {\n        let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?;\n        let delegators = Self::get_named_argument(runtime_args, auction::ARG_DELEGATORS)?;\n\n        Ok(Self::CancelReservations {\n            validator,\n            delegators,\n            max_delegators_per_validator,\n        })\n    }\n\n    fn get_named_argument<T: FromBytes + CLTyped>(\n        args: &RuntimeArgs,\n        name: &str,\n    ) -> Result<T, AuctionMethodError> {\n        let arg: &CLValue = args\n            .get(name)\n            .ok_or_else(|| AuctionMethodError::MissingArg(name.to_string()))?;\n        arg.to_t().map_err(|error| AuctionMethodError::CLValue {\n            arg: name.to_string(),\n            error,\n        })\n    }\n\n    fn try_get_named_argument<T: FromBytes + CLTyped>(\n        args: &RuntimeArgs,\n        name: &str,\n    ) -> Result<Option<T>, AuctionMethodError> {\n        match args.get(name) {\n            Some(arg) => {\n                let arg = arg\n                    .clone()\n                    .into_t()\n                    .map_err(|error| AuctionMethodError::CLValue {\n                        arg: name.to_string(),\n                        error,\n                    })?;\n                Ok(Some(arg))\n            }\n            None => Ok(None),\n        }\n    }\n}\n\n/// Bidding request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BiddingRequest {\n    /// The runtime config.\n    pub(crate) config: NativeRuntimeConfig,\n    /// State root hash.\n    pub(crate) state_hash: Digest,\n    /// The protocol version.\n    pub(crate) protocol_version: ProtocolVersion,\n    /// The auction method.\n    pub(crate) auction_method: AuctionMethod,\n    /// Transaction hash.\n    pub(crate) transaction_hash: TransactionHash,\n    /// Base account.\n    pub(crate) initiator: InitiatorAddr,\n    /// List of authorizing accounts.\n    pub(crate) authorization_keys: BTreeSet<AccountHash>,\n}\n\nimpl BiddingRequest {\n    /// Creates new request instance with runtime args.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        initiator: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        auction_method: AuctionMethod,\n    ) -> Self {\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            auction_method,\n        }\n    }\n\n    /// Returns the config.\n    pub fn config(&self) -> &NativeRuntimeConfig {\n        &self.config\n    }\n\n    /// Returns the state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns the auction method.\n    pub fn auction_method(&self) -> &AuctionMethod {\n        &self.auction_method\n    }\n\n    /// Returns the transaction hash.\n    pub fn transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Returns the initiator.\n    pub fn initiator(&self) -> &InitiatorAddr {\n        &self.initiator\n    }\n\n    /// Returns the authorization keys.\n    pub fn authorization_keys(&self) -> &BTreeSet<AccountHash> {\n        &self.authorization_keys\n    }\n}\n\n/// Auction method ret.\n#[derive(Debug, Clone)]\npub enum AuctionMethodRet {\n    /// Unit.\n    Unit,\n    /// Updated amount.\n    UpdatedAmount(U512),\n}\n\n/// Bidding result.\n#[derive(Debug)]\npub enum BiddingResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Bidding request succeeded\n    Success {\n        /// Transfer records.\n        transfers: Vec<Transfer>,\n        /// Effects of bidding interaction.\n        effects: Effects,\n        /// The ret value, if any.\n        ret: AuctionMethodRet,\n    },\n    /// Bidding request failed.\n    Failure(TrackingCopyError),\n}\n\nimpl BiddingResult {\n    /// Is this a success.\n    pub fn is_success(&self) -> bool {\n        matches!(self, BiddingResult::Success { .. })\n    }\n\n    /// Effects.\n    pub fn effects(&self) -> Effects {\n        match self {\n            BiddingResult::RootNotFound | BiddingResult::Failure(_) => Effects::new(),\n            BiddingResult::Success { effects, .. } => effects.clone(),\n        }\n    }\n\n    /// Returns the tracking copy error if present.\n    pub fn maybe_error(&self) -> Option<TrackingCopyError> {\n        if let Self::Failure(tce) = self {\n            return Some(tce.clone());\n        }\n\n        None\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/balance.rs",
    "content": "//! Types for balance queries.\nuse casper_types::{\n    account::AccountHash,\n    global_state::TrieMerkleProof,\n    system::{\n        handle_payment::{ACCUMULATION_PURSE_KEY, PAYMENT_PURSE_KEY, REFUND_PURSE_KEY},\n        mint::BalanceHoldAddrTag,\n        HANDLE_PAYMENT,\n    },\n    AccessRights, BlockTime, Digest, EntityAddr, HoldBalanceHandling, InitiatorAddr, Key,\n    ProtocolVersion, PublicKey, StoredValue, TimeDiff, URef, URefAddr, U512,\n};\nuse itertools::Itertools;\nuse num_rational::Ratio;\nuse num_traits::CheckedMul;\nuse std::{\n    collections::{btree_map::Entry, BTreeMap},\n    fmt::{Display, Formatter},\n};\nuse tracing::error;\n\nuse crate::{\n    global_state::state::StateReader,\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt},\n    TrackingCopy,\n};\n\n/// How to handle available balance inquiry?\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub enum BalanceHandling {\n    /// Ignore balance holds.\n    #[default]\n    Total,\n    /// Adjust for balance holds (if any).\n    Available,\n}\n\n/// Merkle proof handling options.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub enum ProofHandling {\n    /// Do not attempt to provide proofs.\n    #[default]\n    NoProofs,\n    /// Provide proofs.\n    Proofs,\n}\n\n/// Represents a way to make a balance inquiry.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum BalanceIdentifier {\n    /// Use system refund purse (held by handle payment system contract).\n    Refund,\n    /// Use system payment purse (held by handle payment system contract).\n    Payment,\n    /// Use system accumulate purse (held by handle payment system contract).\n    Accumulate,\n    /// Use purse associated to specified uref.\n    Purse(URef),\n    /// Use main purse of entity derived from public key.\n    Public(PublicKey),\n    /// Use main purse of entity from account hash.\n    Account(AccountHash),\n    /// Use main purse of entity.\n    Entity(EntityAddr),\n    /// Use purse at Key::Purse(URefAddr).\n    Internal(URefAddr),\n    /// Penalized account identifier.\n    PenalizedAccount(AccountHash),\n    /// Penalized payment identifier.\n    PenalizedPayment,\n}\n\nimpl BalanceIdentifier {\n    /// Returns underlying uref addr from balance identifier, if any.\n    pub fn as_purse_addr(&self) -> Option<URefAddr> {\n        match self {\n            BalanceIdentifier::Internal(addr) => Some(*addr),\n            BalanceIdentifier::Purse(uref) => Some(uref.addr()),\n            BalanceIdentifier::Public(_)\n            | BalanceIdentifier::Account(_)\n            | BalanceIdentifier::PenalizedAccount(_)\n            | BalanceIdentifier::PenalizedPayment\n            | BalanceIdentifier::Entity(_)\n            | BalanceIdentifier::Refund\n            | BalanceIdentifier::Payment\n            | BalanceIdentifier::Accumulate => None,\n        }\n    }\n\n    /// Return purse_uref, if able.\n    pub fn purse_uref<S>(\n        &self,\n        tc: &mut TrackingCopy<S>,\n        protocol_version: ProtocolVersion,\n    ) -> Result<URef, TrackingCopyError>\n    where\n        S: StateReader<Key, StoredValue, Error = crate::global_state::error::Error>,\n    {\n        let purse_uref = match self {\n            BalanceIdentifier::Internal(addr) => URef::new(*addr, AccessRights::READ),\n            BalanceIdentifier::Purse(purse_uref) => *purse_uref,\n            BalanceIdentifier::Public(public_key) => {\n                let account_hash = public_key.to_account_hash();\n                match tc.runtime_footprint_by_account_hash(protocol_version, account_hash) {\n                    Ok((_, entity)) => entity\n                        .main_purse()\n                        .ok_or(TrackingCopyError::Authorization)?,\n                    Err(tce) => return Err(tce),\n                }\n            }\n            BalanceIdentifier::Account(account_hash)\n            | BalanceIdentifier::PenalizedAccount(account_hash) => {\n                match tc.runtime_footprint_by_account_hash(protocol_version, *account_hash) {\n                    Ok((_, entity)) => entity\n                        .main_purse()\n                        .ok_or(TrackingCopyError::Authorization)?,\n                    Err(tce) => return Err(tce),\n                }\n            }\n            BalanceIdentifier::Entity(entity_addr) => {\n                match tc.runtime_footprint_by_entity_addr(*entity_addr) {\n                    Ok(entity) => entity\n                        .main_purse()\n                        .ok_or(TrackingCopyError::Authorization)?,\n                    Err(tce) => return Err(tce),\n                }\n            }\n            BalanceIdentifier::Refund => {\n                self.get_system_purse(tc, HANDLE_PAYMENT, REFUND_PURSE_KEY)?\n            }\n            BalanceIdentifier::Payment | BalanceIdentifier::PenalizedPayment => {\n                self.get_system_purse(tc, HANDLE_PAYMENT, PAYMENT_PURSE_KEY)?\n            }\n            BalanceIdentifier::Accumulate => {\n                self.get_system_purse(tc, HANDLE_PAYMENT, ACCUMULATION_PURSE_KEY)?\n            }\n        };\n        Ok(purse_uref)\n    }\n\n    fn get_system_purse<S>(\n        &self,\n        tc: &mut TrackingCopy<S>,\n        system_contract_name: &str,\n        named_key_name: &str,\n    ) -> Result<URef, TrackingCopyError>\n    where\n        S: StateReader<Key, StoredValue, Error = crate::global_state::error::Error>,\n    {\n        let system_contract_registry = tc.get_system_entity_registry()?;\n\n        let entity_hash = system_contract_registry\n            .get(system_contract_name)\n            .ok_or_else(|| {\n                error!(\"Missing system handle payment contract hash\");\n                TrackingCopyError::MissingSystemContractHash(system_contract_name.to_string())\n            })?;\n\n        let named_keys = tc\n            .runtime_footprint_by_entity_addr(EntityAddr::System(*entity_hash))?\n            .take_named_keys();\n\n        let named_key =\n            named_keys\n                .get(named_key_name)\n                .ok_or(TrackingCopyError::NamedKeyNotFound(\n                    named_key_name.to_string(),\n                ))?;\n        let uref = named_key\n            .as_uref()\n            .ok_or(TrackingCopyError::UnexpectedKeyVariant(*named_key))?;\n        Ok(*uref)\n    }\n\n    /// Is this balance identifier for penalty?\n    pub fn is_penalty(&self) -> bool {\n        matches!(\n            self,\n            BalanceIdentifier::PenalizedAccount(_) | BalanceIdentifier::PenalizedPayment\n        )\n    }\n}\n\nimpl Default for BalanceIdentifier {\n    fn default() -> Self {\n        BalanceIdentifier::Purse(URef::default())\n    }\n}\n\nimpl From<InitiatorAddr> for BalanceIdentifier {\n    fn from(value: InitiatorAddr) -> Self {\n        match value {\n            InitiatorAddr::PublicKey(public_key) => BalanceIdentifier::Public(public_key),\n            InitiatorAddr::AccountHash(account_hash) => BalanceIdentifier::Account(account_hash),\n        }\n    }\n}\n\n/// Processing hold balance handling.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub struct ProcessingHoldBalanceHandling {}\n\nimpl ProcessingHoldBalanceHandling {\n    /// Returns new instance.\n    pub fn new() -> Self {\n        ProcessingHoldBalanceHandling::default()\n    }\n\n    /// Returns handling.\n    pub fn handling(&self) -> HoldBalanceHandling {\n        HoldBalanceHandling::Accrued\n    }\n\n    /// Returns true if handling is amortized.\n    pub fn is_amortized(&self) -> bool {\n        false\n    }\n\n    /// Returns hold interval.\n    pub fn interval(&self) -> TimeDiff {\n        TimeDiff::default()\n    }\n}\n\nimpl From<(HoldBalanceHandling, u64)> for ProcessingHoldBalanceHandling {\n    fn from(_value: (HoldBalanceHandling, u64)) -> Self {\n        ProcessingHoldBalanceHandling::default()\n    }\n}\n\n/// Gas hold balance handling.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub struct GasHoldBalanceHandling {\n    handling: HoldBalanceHandling,\n    interval: TimeDiff,\n}\n\nimpl GasHoldBalanceHandling {\n    /// Returns new instance.\n    pub fn new(handling: HoldBalanceHandling, interval: TimeDiff) -> Self {\n        GasHoldBalanceHandling { handling, interval }\n    }\n\n    /// Returns handling.\n    pub fn handling(&self) -> HoldBalanceHandling {\n        self.handling\n    }\n\n    /// Returns interval.\n    pub fn interval(&self) -> TimeDiff {\n        self.interval\n    }\n\n    /// Returns true if handling is amortized.\n    pub fn is_amortized(&self) -> bool {\n        matches!(self.handling, HoldBalanceHandling::Amortized)\n    }\n}\n\nimpl From<(HoldBalanceHandling, TimeDiff)> for GasHoldBalanceHandling {\n    fn from(value: (HoldBalanceHandling, TimeDiff)) -> Self {\n        GasHoldBalanceHandling {\n            handling: value.0,\n            interval: value.1,\n        }\n    }\n}\n\nimpl From<(HoldBalanceHandling, u64)> for GasHoldBalanceHandling {\n    fn from(value: (HoldBalanceHandling, u64)) -> Self {\n        GasHoldBalanceHandling {\n            handling: value.0,\n            interval: TimeDiff::from_millis(value.1),\n        }\n    }\n}\n\n/// Represents a balance request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BalanceRequest {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    identifier: BalanceIdentifier,\n    balance_handling: BalanceHandling,\n    proof_handling: ProofHandling,\n}\n\nimpl BalanceRequest {\n    /// Creates a new [`BalanceRequest`].\n    pub fn new(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        identifier: BalanceIdentifier,\n        balance_handling: BalanceHandling,\n        proof_handling: ProofHandling,\n    ) -> Self {\n        BalanceRequest {\n            state_hash,\n            protocol_version,\n            identifier,\n            balance_handling,\n            proof_handling,\n        }\n    }\n\n    /// Creates a new [`BalanceRequest`].\n    pub fn from_purse(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        purse_uref: URef,\n        balance_handling: BalanceHandling,\n        proof_handling: ProofHandling,\n    ) -> Self {\n        BalanceRequest {\n            state_hash,\n            protocol_version,\n            identifier: BalanceIdentifier::Purse(purse_uref),\n            balance_handling,\n            proof_handling,\n        }\n    }\n\n    /// Creates a new [`BalanceRequest`].\n    pub fn from_public_key(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        public_key: PublicKey,\n        balance_handling: BalanceHandling,\n        proof_handling: ProofHandling,\n    ) -> Self {\n        BalanceRequest {\n            state_hash,\n            protocol_version,\n            identifier: BalanceIdentifier::Public(public_key),\n            balance_handling,\n            proof_handling,\n        }\n    }\n\n    /// Creates a new [`BalanceRequest`].\n    pub fn from_account_hash(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        account_hash: AccountHash,\n        balance_handling: BalanceHandling,\n        proof_handling: ProofHandling,\n    ) -> Self {\n        BalanceRequest {\n            state_hash,\n            protocol_version,\n            identifier: BalanceIdentifier::Account(account_hash),\n            balance_handling,\n            proof_handling,\n        }\n    }\n\n    /// Creates a new [`BalanceRequest`].\n    pub fn from_entity_addr(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        entity_addr: EntityAddr,\n        balance_handling: BalanceHandling,\n        proof_handling: ProofHandling,\n    ) -> Self {\n        BalanceRequest {\n            state_hash,\n            protocol_version,\n            identifier: BalanceIdentifier::Entity(entity_addr),\n            balance_handling,\n            proof_handling,\n        }\n    }\n\n    /// Creates a new [`BalanceRequest`].\n    pub fn from_internal(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        balance_addr: URefAddr,\n        balance_handling: BalanceHandling,\n        proof_handling: ProofHandling,\n    ) -> Self {\n        BalanceRequest {\n            state_hash,\n            protocol_version,\n            identifier: BalanceIdentifier::Internal(balance_addr),\n            balance_handling,\n            proof_handling,\n        }\n    }\n\n    /// Returns a state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns the identifier [`BalanceIdentifier`].\n    pub fn identifier(&self) -> &BalanceIdentifier {\n        &self.identifier\n    }\n\n    /// Returns the block time.\n    pub fn balance_handling(&self) -> BalanceHandling {\n        self.balance_handling\n    }\n\n    /// Returns proof handling.\n    pub fn proof_handling(&self) -> ProofHandling {\n        self.proof_handling\n    }\n}\n\n/// Available balance checker.\npub trait AvailableBalanceChecker {\n    /// Calculate and return available balance.\n    fn available_balance(\n        &self,\n        block_time: BlockTime,\n        total_balance: U512,\n        gas_hold_balance_handling: GasHoldBalanceHandling,\n        processing_hold_balance_handling: ProcessingHoldBalanceHandling,\n    ) -> Result<U512, BalanceFailure> {\n        if self.is_empty() {\n            return Ok(total_balance);\n        }\n\n        let gas_held = match gas_hold_balance_handling.handling() {\n            HoldBalanceHandling::Accrued => self.accrued(BalanceHoldAddrTag::Gas),\n            HoldBalanceHandling::Amortized => {\n                let interval = gas_hold_balance_handling.interval();\n                self.amortization(BalanceHoldAddrTag::Gas, block_time, interval)?\n            }\n        };\n\n        let processing_held = match processing_hold_balance_handling.handling() {\n            HoldBalanceHandling::Accrued => self.accrued(BalanceHoldAddrTag::Processing),\n            HoldBalanceHandling::Amortized => {\n                let interval = processing_hold_balance_handling.interval();\n                self.amortization(BalanceHoldAddrTag::Processing, block_time, interval)?\n            }\n        };\n\n        let held = gas_held.saturating_add(processing_held);\n\n        if held > total_balance {\n            return Ok(U512::zero());\n        }\n\n        debug_assert!(\n            total_balance >= held,\n            \"it should not be possible to hold more than the total available\"\n        );\n        match total_balance.checked_sub(held) {\n            Some(available_balance) => Ok(available_balance),\n            None => {\n                error!(%held, %total_balance, \"held amount exceeds total balance, which should never occur.\");\n                Err(BalanceFailure::HeldExceedsTotal)\n            }\n        }\n    }\n\n    /// Calculates amortization.\n    fn amortization(\n        &self,\n        hold_kind: BalanceHoldAddrTag,\n        block_time: BlockTime,\n        interval: TimeDiff,\n    ) -> Result<U512, BalanceFailure> {\n        let mut held = U512::zero();\n        let block_time = block_time.value();\n        let interval = interval.millis();\n\n        for (hold_created_time, holds) in self.holds(hold_kind) {\n            let hold_created_time = hold_created_time.value();\n            if hold_created_time > block_time {\n                continue;\n            }\n            let expiry = hold_created_time.saturating_add(interval);\n            if block_time > expiry {\n                continue;\n            }\n            // total held amount\n            let held_ratio = Ratio::new_raw(\n                holds.values().copied().collect_vec().into_iter().sum(),\n                U512::one(),\n            );\n            // remaining time\n            let remaining_time = U512::from(expiry.saturating_sub(block_time));\n            // remaining time over total time\n            let ratio = Ratio::new_raw(remaining_time, U512::from(interval));\n            /*\n                EXAMPLE: 1000 held for 24 hours\n                if 1 hours has elapsed, held amount = 1000 * (23/24) == 958\n                if 2 hours has elapsed, held amount = 1000 * (22/24) == 916\n                ...\n                if 23 hours has elapsed, held amount    = 1000 * (1/24) == 41\n                if 23.50 hours has elapsed, held amount = 1000 * (1/48) == 20\n                if 23.75 hours has elapsed, held amount = 1000 * (1/96) == 10\n                                                (54000 ms / 5184000 ms)\n            */\n            match held_ratio.checked_mul(&ratio) {\n                Some(amortized) => held += amortized.to_integer(),\n                None => return Err(BalanceFailure::AmortizationFailure),\n            }\n        }\n        Ok(held)\n    }\n\n    /// Return accrued amount.\n    fn accrued(&self, hold_kind: BalanceHoldAddrTag) -> U512;\n\n    /// Return holds.\n    fn holds(&self, hold_kind: BalanceHoldAddrTag) -> BTreeMap<BlockTime, BalanceHolds>;\n\n    /// Return true if empty.\n    fn is_empty(&self) -> bool;\n}\n\n/// Balance holds with Merkle proofs.\npub type BalanceHolds = BTreeMap<BalanceHoldAddrTag, U512>;\n\nimpl AvailableBalanceChecker for BTreeMap<BlockTime, BalanceHolds> {\n    fn accrued(&self, hold_kind: BalanceHoldAddrTag) -> U512 {\n        self.values()\n            .filter_map(|holds| holds.get(&hold_kind).copied())\n            .collect_vec()\n            .into_iter()\n            .sum()\n    }\n\n    fn holds(&self, hold_kind: BalanceHoldAddrTag) -> BTreeMap<BlockTime, BalanceHolds> {\n        let mut ret = BTreeMap::new();\n        for (k, v) in self {\n            if let Some(hold) = v.get(&hold_kind) {\n                let mut inner = BTreeMap::new();\n                inner.insert(hold_kind, *hold);\n                ret.insert(*k, inner);\n            }\n        }\n        ret\n    }\n\n    fn is_empty(&self) -> bool {\n        self.is_empty()\n    }\n}\n\n/// Balance holds with Merkle proofs.\npub type BalanceHoldsWithProof =\n    BTreeMap<BalanceHoldAddrTag, (U512, TrieMerkleProof<Key, StoredValue>)>;\n\nimpl AvailableBalanceChecker for BTreeMap<BlockTime, BalanceHoldsWithProof> {\n    fn accrued(&self, hold_kind: BalanceHoldAddrTag) -> U512 {\n        self.values()\n            .filter_map(|holds| holds.get(&hold_kind))\n            .map(|(amount, _)| *amount)\n            .collect_vec()\n            .into_iter()\n            .sum()\n    }\n\n    fn holds(&self, hold_kind: BalanceHoldAddrTag) -> BTreeMap<BlockTime, BalanceHolds> {\n        let mut ret: BTreeMap<BlockTime, BalanceHolds> = BTreeMap::new();\n        for (block_time, holds_with_proof) in self {\n            let mut holds: BTreeMap<BalanceHoldAddrTag, U512> = BTreeMap::new();\n            for (addr, (held, _)) in holds_with_proof {\n                if addr == &hold_kind {\n                    match holds.entry(*addr) {\n                        Entry::Vacant(v) => v.insert(*held),\n                        Entry::Occupied(mut o) => &mut o.insert(*held),\n                    };\n                }\n            }\n            if !holds.is_empty() {\n                match ret.entry(*block_time) {\n                    Entry::Vacant(v) => v.insert(holds),\n                    Entry::Occupied(mut o) => &mut o.insert(holds),\n                };\n            }\n        }\n        ret\n    }\n\n    fn is_empty(&self) -> bool {\n        self.is_empty()\n    }\n}\n\n/// Proofs result.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum ProofsResult {\n    /// Not requested.\n    NotRequested {\n        /// Any time-relevant active holds on the balance, without proofs.\n        balance_holds: BTreeMap<BlockTime, BalanceHolds>,\n    },\n    /// Proofs.\n    Proofs {\n        /// A proof that the given value is present in the Merkle trie.\n        total_balance_proof: Box<TrieMerkleProof<Key, StoredValue>>,\n        /// Any time-relevant active holds on the balance, with proofs..\n        balance_holds: BTreeMap<BlockTime, BalanceHoldsWithProof>,\n    },\n}\n\nimpl ProofsResult {\n    /// Returns total balance proof, if any.\n    pub fn total_balance_proof(&self) -> Option<&TrieMerkleProof<Key, StoredValue>> {\n        match self {\n            ProofsResult::NotRequested { .. } => None,\n            ProofsResult::Proofs {\n                total_balance_proof,\n                ..\n            } => Some(total_balance_proof),\n        }\n    }\n\n    /// Returns balance holds, if any.\n    pub fn balance_holds_with_proof(&self) -> Option<&BTreeMap<BlockTime, BalanceHoldsWithProof>> {\n        match self {\n            ProofsResult::NotRequested { .. } => None,\n            ProofsResult::Proofs { balance_holds, .. } => Some(balance_holds),\n        }\n    }\n\n    /// Returns balance holds, if any.\n    pub fn balance_holds(&self) -> Option<&BTreeMap<BlockTime, BalanceHolds>> {\n        match self {\n            ProofsResult::NotRequested { balance_holds } => Some(balance_holds),\n            ProofsResult::Proofs { .. } => None,\n        }\n    }\n\n    /// Returns the total held amount.\n    pub fn total_held_amount(&self) -> U512 {\n        match self {\n            ProofsResult::NotRequested { balance_holds } => balance_holds\n                .values()\n                .flat_map(|holds| holds.values().copied())\n                .collect_vec()\n                .into_iter()\n                .sum(),\n            ProofsResult::Proofs { balance_holds, .. } => balance_holds\n                .values()\n                .flat_map(|holds| holds.values().map(|(v, _)| *v))\n                .collect_vec()\n                .into_iter()\n                .sum(),\n        }\n    }\n\n    /// Returns the available balance, calculated using imputed values.\n    #[allow(clippy::result_unit_err)]\n    pub fn available_balance(\n        &self,\n        block_time: BlockTime,\n        total_balance: U512,\n        gas_hold_balance_handling: GasHoldBalanceHandling,\n        processing_hold_balance_handling: ProcessingHoldBalanceHandling,\n    ) -> Result<U512, BalanceFailure> {\n        match self {\n            ProofsResult::NotRequested { balance_holds } => balance_holds.available_balance(\n                block_time,\n                total_balance,\n                gas_hold_balance_handling,\n                processing_hold_balance_handling,\n            ),\n            ProofsResult::Proofs { balance_holds, .. } => balance_holds.available_balance(\n                block_time,\n                total_balance,\n                gas_hold_balance_handling,\n                processing_hold_balance_handling,\n            ),\n        }\n    }\n}\n\n/// Balance failure.\n#[derive(Debug, Clone)]\npub enum BalanceFailure {\n    /// Failed to calculate amortization (checked multiplication).\n    AmortizationFailure,\n    /// Held amount exceeds total balance, which should never occur.\n    HeldExceedsTotal,\n}\n\nimpl Display for BalanceFailure {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            BalanceFailure::AmortizationFailure => {\n                write!(\n                    f,\n                    \"AmortizationFailure: failed to calculate amortization (checked multiplication).\"\n                )\n            }\n            BalanceFailure::HeldExceedsTotal => {\n                write!(\n                    f,\n                    \"HeldExceedsTotal: held amount exceeds total balance, which should never occur.\"\n                )\n            }\n        }\n    }\n}\n\n/// Result enum that represents all possible outcomes of a balance request.\n#[derive(Debug, Clone)]\npub enum BalanceResult {\n    /// Returned if a passed state root hash is not found.\n    RootNotFound,\n    /// A query returned a balance.\n    Success {\n        /// The purse address.\n        purse_addr: URefAddr,\n        /// The purses total balance, not considering holds.\n        total_balance: U512,\n        /// The available balance (total balance - sum of all active holds).\n        available_balance: U512,\n        /// Proofs result.\n        proofs_result: ProofsResult,\n    },\n    /// Failure.\n    Failure(TrackingCopyError),\n}\n\nimpl BalanceResult {\n    /// Returns the purse address for a [`BalanceResult::Success`] variant.\n    pub fn purse_addr(&self) -> Option<URefAddr> {\n        match self {\n            BalanceResult::Success { purse_addr, .. } => Some(*purse_addr),\n            _ => None,\n        }\n    }\n\n    /// Returns the total balance for a [`BalanceResult::Success`] variant.\n    pub fn total_balance(&self) -> Option<&U512> {\n        match self {\n            BalanceResult::Success { total_balance, .. } => Some(total_balance),\n            _ => None,\n        }\n    }\n\n    /// Returns the available balance for a [`BalanceResult::Success`] variant.\n    pub fn available_balance(&self) -> Option<&U512> {\n        match self {\n            BalanceResult::Success {\n                available_balance, ..\n            } => Some(available_balance),\n            _ => None,\n        }\n    }\n\n    /// Returns the Merkle proofs, if any.\n    pub fn proofs_result(self) -> Option<ProofsResult> {\n        match self {\n            BalanceResult::Success { proofs_result, .. } => Some(proofs_result),\n            _ => None,\n        }\n    }\n\n    /// Is the available balance sufficient to cover the cost?\n    pub fn is_sufficient(&self, cost: U512) -> bool {\n        match self {\n            BalanceResult::RootNotFound | BalanceResult::Failure(_) => false,\n            BalanceResult::Success {\n                available_balance, ..\n            } => available_balance >= &cost,\n        }\n    }\n\n    /// Was the balance request successful?\n    pub fn is_success(&self) -> bool {\n        match self {\n            BalanceResult::RootNotFound | BalanceResult::Failure(_) => false,\n            BalanceResult::Success { .. } => true,\n        }\n    }\n\n    /// Tracking copy error, if any.\n    pub fn error(&self) -> Option<&TrackingCopyError> {\n        match self {\n            BalanceResult::RootNotFound | BalanceResult::Success { .. } => None,\n            BalanceResult::Failure(err) => Some(err),\n        }\n    }\n}\n\nimpl From<TrackingCopyError> for BalanceResult {\n    fn from(tce: TrackingCopyError) -> Self {\n        BalanceResult::Failure(tce)\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/balance_hold.rs",
    "content": "use crate::{\n    data_access_layer::{balance::BalanceFailure, BalanceIdentifier},\n    tracking_copy::TrackingCopyError,\n};\nuse casper_types::{\n    account::AccountHash,\n    execution::Effects,\n    system::mint::{BalanceHoldAddr, BalanceHoldAddrTag},\n    Digest, ProtocolVersion, StoredValue, U512,\n};\nuse std::fmt::{Display, Formatter};\nuse thiserror::Error;\n\n/// Balance hold kind.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub enum BalanceHoldKind {\n    /// All balance holds.\n    #[default]\n    All,\n    /// Selection of a specific kind of balance.\n    Tag(BalanceHoldAddrTag),\n}\n\nimpl BalanceHoldKind {\n    /// Returns true of imputed tag applies to instance.\n    pub fn matches(&self, balance_hold_addr_tag: BalanceHoldAddrTag) -> bool {\n        match self {\n            BalanceHoldKind::All => true,\n            BalanceHoldKind::Tag(tag) => tag == &balance_hold_addr_tag,\n        }\n    }\n}\n\n/// Balance hold mode.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum BalanceHoldMode {\n    /// Balance hold request.\n    Hold {\n        /// Balance identifier.\n        identifier: BalanceIdentifier,\n        /// Hold amount.\n        hold_amount: U512,\n        /// How should insufficient balance be handled.\n        insufficient_handling: InsufficientBalanceHandling,\n    },\n    /// Clear balance holds.\n    Clear {\n        /// Identifier of balance to be cleared of holds.\n        identifier: BalanceIdentifier,\n    },\n}\n\nimpl Default for BalanceHoldMode {\n    fn default() -> Self {\n        BalanceHoldMode::Hold {\n            insufficient_handling: InsufficientBalanceHandling::HoldRemaining,\n            hold_amount: U512::zero(),\n            identifier: BalanceIdentifier::Account(AccountHash::default()),\n        }\n    }\n}\n\n/// How to handle available balance is less than hold amount?\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub enum InsufficientBalanceHandling {\n    /// Hold however much balance remains.\n    #[default]\n    HoldRemaining,\n    /// No operation. Aka, do not place a hold.\n    Noop,\n}\n\n/// Balance hold request.\n#[derive(Debug, Clone, PartialEq, Eq, Default)]\npub struct BalanceHoldRequest {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    hold_kind: BalanceHoldKind,\n    hold_mode: BalanceHoldMode,\n}\n\nimpl BalanceHoldRequest {\n    /// Creates a new [`BalanceHoldRequest`] for adding a gas balance hold.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new_gas_hold(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        identifier: BalanceIdentifier,\n        hold_amount: U512,\n        insufficient_handling: InsufficientBalanceHandling,\n    ) -> Self {\n        let hold_kind = BalanceHoldKind::Tag(BalanceHoldAddrTag::Gas);\n        let hold_mode = BalanceHoldMode::Hold {\n            identifier,\n            hold_amount,\n            insufficient_handling,\n        };\n        BalanceHoldRequest {\n            state_hash,\n            protocol_version,\n            hold_kind,\n            hold_mode,\n        }\n    }\n\n    /// Creates a new [`BalanceHoldRequest`] for adding a processing balance hold.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new_processing_hold(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        identifier: BalanceIdentifier,\n        hold_amount: U512,\n        insufficient_handling: InsufficientBalanceHandling,\n    ) -> Self {\n        let hold_kind = BalanceHoldKind::Tag(BalanceHoldAddrTag::Processing);\n        let hold_mode = BalanceHoldMode::Hold {\n            identifier,\n            hold_amount,\n            insufficient_handling,\n        };\n        BalanceHoldRequest {\n            state_hash,\n            protocol_version,\n            hold_kind,\n            hold_mode,\n        }\n    }\n\n    /// Creates a new [`BalanceHoldRequest`] for clearing holds.\n    pub fn new_clear(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        hold_kind: BalanceHoldKind,\n        identifier: BalanceIdentifier,\n    ) -> Self {\n        let hold_mode = BalanceHoldMode::Clear { identifier };\n        BalanceHoldRequest {\n            state_hash,\n            protocol_version,\n            hold_kind,\n            hold_mode,\n        }\n    }\n\n    /// Returns a state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Balance hold kind.\n    pub fn balance_hold_kind(&self) -> BalanceHoldKind {\n        self.hold_kind\n    }\n\n    /// Balance hold mode.\n    pub fn balance_hold_mode(&self) -> BalanceHoldMode {\n        self.hold_mode.clone()\n    }\n}\n\n/// Possible balance hold errors.\n#[derive(Error, Debug, Clone)]\n#[non_exhaustive]\npub enum BalanceHoldError {\n    /// Tracking copy error.\n    TrackingCopy(TrackingCopyError),\n    /// Balance error.\n    Balance(BalanceFailure),\n    /// Insufficient balance error.\n    InsufficientBalance {\n        /// Remaining balance error.\n        remaining_balance: U512,\n    },\n    /// Unexpected wildcard variant error.\n    UnexpectedWildcardVariant, // programmer error,\n    /// Unexpected hold value error.\n    UnexpectedHoldValue(StoredValue),\n}\n\nimpl From<BalanceFailure> for BalanceHoldError {\n    fn from(be: BalanceFailure) -> Self {\n        BalanceHoldError::Balance(be)\n    }\n}\n\nimpl From<TrackingCopyError> for BalanceHoldError {\n    fn from(tce: TrackingCopyError) -> Self {\n        BalanceHoldError::TrackingCopy(tce)\n    }\n}\n\nimpl Display for BalanceHoldError {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            BalanceHoldError::TrackingCopy(err) => {\n                write!(f, \"TrackingCopy: {:?}\", err)\n            }\n            BalanceHoldError::InsufficientBalance { remaining_balance } => {\n                write!(f, \"InsufficientBalance: {}\", remaining_balance)\n            }\n            BalanceHoldError::UnexpectedWildcardVariant => {\n                write!(\n                    f,\n                    \"UnexpectedWildcardVariant: unsupported use of BalanceHoldKind::All\"\n                )\n            }\n            BalanceHoldError::Balance(be) => Display::fmt(be, f),\n            BalanceHoldError::UnexpectedHoldValue(value) => {\n                write!(f, \"Found an unexpected hold value in storage: {:?}\", value,)\n            }\n        }\n    }\n}\n\n/// Result enum that represents all possible outcomes of a balance hold request.\n#[derive(Debug)]\npub enum BalanceHoldResult {\n    /// Returned if a passed state root hash is not found.\n    RootNotFound,\n    /// Returned if global state does not have an entry for block time.\n    BlockTimeNotFound,\n    /// Balance hold successfully placed.\n    Success {\n        /// Hold addresses, if any.\n        holds: Option<Vec<BalanceHoldAddr>>,\n        /// Purse total balance.\n        total_balance: Box<U512>,\n        /// Purse available balance after hold placed.\n        available_balance: Box<U512>,\n        /// How much were we supposed to hold?\n        hold: Box<U512>,\n        /// How much did we actually hold?\n        held: Box<U512>,\n        /// Effects of balance interaction.\n        effects: Box<Effects>,\n    },\n    /// Failed to place balance hold.\n    Failure(BalanceHoldError),\n}\n\nimpl BalanceHoldResult {\n    /// Success ctor.\n    pub fn success(\n        holds: Option<Vec<BalanceHoldAddr>>,\n        total_balance: U512,\n        available_balance: U512,\n        hold: U512,\n        held: U512,\n        effects: Effects,\n    ) -> Self {\n        BalanceHoldResult::Success {\n            holds,\n            total_balance: Box::new(total_balance),\n            available_balance: Box::new(available_balance),\n            hold: Box::new(hold),\n            held: Box::new(held),\n            effects: Box::new(effects),\n        }\n    }\n\n    /// Returns the total balance for a [`BalanceHoldResult::Success`] variant.\n    pub fn total_balance(&self) -> Option<&U512> {\n        match self {\n            BalanceHoldResult::Success { total_balance, .. } => Some(total_balance),\n            _ => None,\n        }\n    }\n\n    /// Returns the available balance for a [`BalanceHoldResult::Success`] variant.\n    pub fn available_balance(&self) -> Option<&U512> {\n        match self {\n            BalanceHoldResult::Success {\n                available_balance, ..\n            } => Some(available_balance),\n            _ => None,\n        }\n    }\n\n    /// Returns the held amount for a [`BalanceHoldResult::Success`] variant.\n    pub fn held(&self) -> Option<&U512> {\n        match self {\n            BalanceHoldResult::Success { held, .. } => Some(held),\n            _ => None,\n        }\n    }\n\n    /// Hold address, if any.\n    pub fn holds(&self) -> Option<Vec<BalanceHoldAddr>> {\n        match self {\n            BalanceHoldResult::RootNotFound\n            | BalanceHoldResult::BlockTimeNotFound\n            | BalanceHoldResult::Failure(_) => None,\n            BalanceHoldResult::Success { holds, .. } => holds.clone(),\n        }\n    }\n\n    /// Does this result contain any hold addresses?\n    pub fn has_holds(&self) -> bool {\n        match self.holds() {\n            None => false,\n            Some(holds) => !holds.is_empty(),\n        }\n    }\n\n    /// Was the hold fully covered?\n    pub fn is_fully_covered(&self) -> bool {\n        match self {\n            BalanceHoldResult::RootNotFound\n            | BalanceHoldResult::BlockTimeNotFound\n            | BalanceHoldResult::Failure(_) => false,\n            BalanceHoldResult::Success { hold, held, .. } => hold == held,\n        }\n    }\n\n    /// Was the hold successful?\n    pub fn is_success(&self) -> bool {\n        matches!(self, BalanceHoldResult::Success { .. })\n    }\n\n    /// Was the root not found?\n    pub fn is_root_not_found(&self) -> bool {\n        matches!(self, BalanceHoldResult::RootNotFound)\n    }\n\n    /// The effects, if any.\n    pub fn effects(&self) -> Effects {\n        match self {\n            BalanceHoldResult::RootNotFound\n            | BalanceHoldResult::BlockTimeNotFound\n            | BalanceHoldResult::Failure(_) => Effects::new(),\n            BalanceHoldResult::Success { effects, .. } => *effects.clone(),\n        }\n    }\n\n    /// Error message.\n    pub fn error_message(&self) -> Option<String> {\n        let msg = match self {\n            BalanceHoldResult::Success { hold, held, .. } => {\n                if hold == held {\n                    return None;\n                } else {\n                    format!(\n                        \"insufficient balance to cover hold amount: {}, held remaining amount: {}\",\n                        hold, held\n                    )\n                }\n            }\n            BalanceHoldResult::RootNotFound => \"root not found\".to_string(),\n            BalanceHoldResult::BlockTimeNotFound => \"block time not found\".to_string(),\n            BalanceHoldResult::Failure(bhe) => {\n                format!(\"{:?}\", bhe)\n            }\n        };\n        Some(msg)\n    }\n}\n\nimpl From<BalanceFailure> for BalanceHoldResult {\n    fn from(be: BalanceFailure) -> Self {\n        BalanceHoldResult::Failure(be.into())\n    }\n}\n\nimpl From<TrackingCopyError> for BalanceHoldResult {\n    fn from(tce: TrackingCopyError) -> Self {\n        BalanceHoldResult::Failure(tce.into())\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/balance_identifier_purse.rs",
    "content": "use crate::{data_access_layer::BalanceIdentifier, tracking_copy::TrackingCopyError};\nuse casper_types::{Digest, ProtocolVersion, URefAddr};\n\n/// Represents a balance identifier purse request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BalanceIdentifierPurseRequest {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    identifier: BalanceIdentifier,\n}\n\nimpl BalanceIdentifierPurseRequest {\n    /// Creates a new [`BalanceIdentifierPurseRequest`].\n    pub fn new(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        identifier: BalanceIdentifier,\n    ) -> Self {\n        BalanceIdentifierPurseRequest {\n            state_hash,\n            protocol_version,\n            identifier,\n        }\n    }\n\n    /// Returns a state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns the identifier [`BalanceIdentifier`].\n    pub fn identifier(&self) -> &BalanceIdentifier {\n        &self.identifier\n    }\n}\n\n/// Result enum that represents all possible outcomes of a balance request.\n#[derive(Debug, Clone)]\npub enum BalanceIdentifierPurseResult {\n    /// Returned if a passed state root hash is not found.\n    RootNotFound,\n    /// A query returned a balance.\n    Success {\n        /// The purse address.\n        purse_addr: URefAddr,\n    },\n    /// Failure.\n    Failure(TrackingCopyError),\n}\n\nimpl BalanceIdentifierPurseResult {\n    /// Returns the purse address for a [`BalanceIdentifierPurseResult::Success`] variant.\n    pub fn purse_addr(&self) -> Option<URefAddr> {\n        match self {\n            BalanceIdentifierPurseResult::Success { purse_addr, .. } => Some(*purse_addr),\n            _ => None,\n        }\n    }\n\n    /// Was the balance request successful?\n    pub fn is_success(&self) -> bool {\n        match self {\n            BalanceIdentifierPurseResult::RootNotFound\n            | BalanceIdentifierPurseResult::Failure(_) => false,\n            BalanceIdentifierPurseResult::Success { .. } => true,\n        }\n    }\n\n    /// Tracking copy error, if any.\n    pub fn error(&self) -> Option<&TrackingCopyError> {\n        match self {\n            BalanceIdentifierPurseResult::RootNotFound\n            | BalanceIdentifierPurseResult::Success { .. } => None,\n            BalanceIdentifierPurseResult::Failure(err) => Some(err),\n        }\n    }\n}\n\nimpl From<TrackingCopyError> for BalanceIdentifierPurseResult {\n    fn from(tce: TrackingCopyError) -> Self {\n        BalanceIdentifierPurseResult::Failure(tce)\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/bids.rs",
    "content": "//! Support for obtaining current bids from the auction system.\nuse crate::tracking_copy::TrackingCopyError;\n\nuse casper_types::{system::auction::BidKind, Digest};\n\n/// Represents a request to obtain current bids in the auction system.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BidsRequest {\n    state_hash: Digest,\n}\n\nimpl BidsRequest {\n    /// Creates new request.\n    pub fn new(state_hash: Digest) -> Self {\n        BidsRequest { state_hash }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n}\n\n/// Represents a result of a `get_bids` request.\n#[derive(Debug)]\npub enum BidsResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Contains current bids returned from the global state.\n    Success {\n        /// Current bids.\n        bids: Vec<BidKind>,\n    },\n    /// Failure.\n    Failure(TrackingCopyError),\n}\n\nimpl BidsResult {\n    /// Returns wrapped [`Vec<BidKind>`] if this represents a successful query result.\n    pub fn into_option(self) -> Option<Vec<BidKind>> {\n        if let Self::Success { bids } = self {\n            Some(bids)\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/block_global.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{execution::Effects, BlockTime, Digest, ProtocolVersion};\nuse std::fmt::{Display, Formatter};\nuse thiserror::Error;\n\n/// Block global kind.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\npub enum BlockGlobalKind {\n    /// Block time.\n    BlockTime(BlockTime),\n    /// Message count.\n    MessageCount(u64),\n    /// Protocol version.\n    ProtocolVersion(ProtocolVersion),\n    /// Addressable entity flag.\n    AddressableEntity(bool),\n}\n\nimpl Default for BlockGlobalKind {\n    fn default() -> Self {\n        BlockGlobalKind::BlockTime(BlockTime::default())\n    }\n}\n\n/// Block global request.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub struct BlockGlobalRequest {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    block_global_kind: BlockGlobalKind,\n}\n\nimpl BlockGlobalRequest {\n    /// Returns block time setting request.\n    pub fn block_time(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        block_time: BlockTime,\n    ) -> Self {\n        let block_global_kind = BlockGlobalKind::BlockTime(block_time);\n        BlockGlobalRequest {\n            state_hash,\n            protocol_version,\n            block_global_kind,\n        }\n    }\n\n    /// Returns protocol version setting request.\n    pub fn set_protocol_version(state_hash: Digest, protocol_version: ProtocolVersion) -> Self {\n        let block_global_kind = BlockGlobalKind::ProtocolVersion(protocol_version);\n        BlockGlobalRequest {\n            state_hash,\n            protocol_version,\n            block_global_kind,\n        }\n    }\n\n    /// Returns addressable entity flag setting request.\n    pub fn set_addressable_entity(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        addressable_entity: bool,\n    ) -> Self {\n        let block_global_kind = BlockGlobalKind::AddressableEntity(addressable_entity);\n        BlockGlobalRequest {\n            state_hash,\n            protocol_version,\n            block_global_kind,\n        }\n    }\n\n    /// Returns state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns block global kind.\n    pub fn block_global_kind(&self) -> BlockGlobalKind {\n        self.block_global_kind\n    }\n}\n\n/// Block global result.\n#[derive(Error, Debug, Clone)]\npub enum BlockGlobalResult {\n    /// Returned if a passed state root hash is not found.\n    RootNotFound,\n    /// Failed to store block global data.\n    Failure(TrackingCopyError),\n    /// Successfully stored block global data.\n    Success {\n        /// State hash after data committed to the global state.\n        post_state_hash: Digest,\n        /// The effects of putting the data to global state.\n        effects: Box<Effects>,\n    },\n}\n\nimpl Display for BlockGlobalResult {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            BlockGlobalResult::RootNotFound => f.write_str(\"root not found\"),\n            BlockGlobalResult::Failure(tce) => {\n                write!(f, \"failed {}\", tce)\n            }\n            BlockGlobalResult::Success { .. } => f.write_str(\"success\"),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/block_rewards.rs",
    "content": "use std::collections::BTreeMap;\n\nuse thiserror::Error;\n\nuse casper_types::{\n    execution::Effects, system::auction::Error as AuctionError, BlockTime, Digest, ProtocolVersion,\n    PublicKey, U512,\n};\n\nuse crate::{\n    system::{runtime_native::Config, transfer::TransferError},\n    tracking_copy::TrackingCopyError,\n};\n\n/// Block rewards request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BlockRewardsRequest {\n    config: Config,\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    rewards: BTreeMap<PublicKey, Vec<U512>>,\n    block_time: BlockTime,\n}\n\nimpl BlockRewardsRequest {\n    /// Ctor.\n    pub fn new(\n        config: Config,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        block_time: BlockTime,\n        rewards: BTreeMap<PublicKey, Vec<U512>>,\n    ) -> Self {\n        BlockRewardsRequest {\n            config,\n            state_hash,\n            protocol_version,\n            rewards,\n            block_time,\n        }\n    }\n\n    /// Returns config.\n    pub fn config(&self) -> &Config {\n        &self.config\n    }\n\n    /// Returns state_hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns protocol_version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns rewards.\n    pub fn rewards(&self) -> &BTreeMap<PublicKey, Vec<U512>> {\n        &self.rewards\n    }\n\n    /// Returns block time.\n    pub fn block_time(&self) -> BlockTime {\n        self.block_time\n    }\n}\n\n/// Block rewards error.\n#[derive(Clone, Error, Debug)]\npub enum BlockRewardsError {\n    /// Undistributed rewards error.\n    #[error(\"Undistributed rewards\")]\n    UndistributedRewards,\n    /// Tracking copy error.\n    #[error(transparent)]\n    TrackingCopy(TrackingCopyError),\n    /// Registry entry not found error.\n    #[error(\"Registry entry not found: {0}\")]\n    RegistryEntryNotFound(String),\n    /// Transfer error.\n    #[error(transparent)]\n    Transfer(TransferError),\n    /// Auction error.\n    #[error(\"Auction error: {0}\")]\n    Auction(AuctionError),\n}\n\n/// Block reward result.\n#[derive(Debug, Clone)]\npub enum BlockRewardsResult {\n    /// Root not found in global state.\n    RootNotFound,\n    /// Block rewards failure error.\n    Failure(BlockRewardsError),\n    /// Success result.\n    Success {\n        /// State hash after distribution outcome is committed to the global state.\n        post_state_hash: Digest,\n        /// Effects of the distribution process.\n        effects: Effects,\n    },\n}\n\nimpl BlockRewardsResult {\n    /// Returns true if successful, else false.\n    pub fn is_success(&self) -> bool {\n        matches!(self, BlockRewardsResult::Success { .. })\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/contract.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{Contract, Digest, Key};\n\n/// Represents a request to obtain contract.\npub struct ContractRequest {\n    state_hash: Digest,\n    key: Key,\n}\n\nimpl ContractRequest {\n    /// ctor\n    pub fn new(state_hash: Digest, key: Key) -> Self {\n        ContractRequest { state_hash, key }\n    }\n\n    /// Returns key.\n    pub fn key(&self) -> Key {\n        self.key\n    }\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n}\n\n/// Represents a result of a `contract` request.\n#[derive(Debug)]\npub enum ContractResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// This variant will be returned if the contract was found.\n    Success {\n        /// A contract.\n        contract: Contract,\n    },\n    /// Failure result.\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/entry_points.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{Digest, EntryPointValue, HashAddr};\n\n/// Represents a request to obtain entry point.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct EntryPointRequest {\n    state_hash: Digest,\n    entry_point_name: String,\n    contract_hash: HashAddr,\n}\n\nimpl EntryPointRequest {\n    /// ctor\n    pub fn new(state_hash: Digest, entry_point_name: String, contract_hash: HashAddr) -> Self {\n        EntryPointRequest {\n            state_hash,\n            entry_point_name,\n            contract_hash,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns entry_point_name.\n    pub fn entry_point_name(&self) -> &str {\n        &self.entry_point_name\n    }\n\n    /// Returns contract_hash.\n    pub fn contract_hash(&self) -> HashAddr {\n        self.contract_hash\n    }\n}\n\nimpl From<EntryPointExistsRequest> for EntryPointRequest {\n    fn from(value: EntryPointExistsRequest) -> Self {\n        EntryPointRequest {\n            state_hash: value.state_hash,\n            entry_point_name: value.entry_point_name,\n            contract_hash: value.contract_hash,\n        }\n    }\n}\n\n/// Represents a result of a `entry_point` request.\n#[derive(Debug)]\npub enum EntryPointResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// Contains an addressable entity from global state.\n    Success {\n        /// An addressable entity.\n        entry_point: EntryPointValue,\n    },\n    /// Failure result.\n    Failure(TrackingCopyError),\n}\n\n/// Represents a request to check entry point existence.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct EntryPointExistsRequest {\n    state_hash: Digest,\n    entry_point_name: String,\n    contract_hash: HashAddr,\n}\n\nimpl EntryPointExistsRequest {\n    /// ctor\n    pub fn new(state_hash: Digest, entry_point_name: String, contract_hash: HashAddr) -> Self {\n        EntryPointExistsRequest {\n            state_hash,\n            entry_point_name,\n            contract_hash,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns entry_point_name.\n    pub fn entry_point_name(&self) -> &str {\n        &self.entry_point_name\n    }\n\n    /// Returns contract_hash.\n    pub fn contract_hash(&self) -> HashAddr {\n        self.contract_hash\n    }\n}\n\n/// Represents a result of `entry_point_exists` request.\n#[derive(Debug)]\npub enum EntryPointExistsResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// This variant will be returned if the entry point was found.\n    Success,\n    /// Failure result.\n    Failure(TrackingCopyError),\n}\n\nimpl EntryPointExistsResult {\n    /// Returns `true` if the result is `Success`.\n    pub fn is_success(self) -> bool {\n        matches!(self, Self::Success { .. })\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/era_validators.rs",
    "content": "//! Support for querying era validators.\n\nuse crate::tracking_copy::TrackingCopyError;\nuse casper_types::{system::auction::EraValidators, Digest};\nuse std::fmt::{Display, Formatter};\n\n/// Request for era validators.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct EraValidatorsRequest {\n    state_hash: Digest,\n}\n\nimpl EraValidatorsRequest {\n    /// Constructs a new EraValidatorsRequest.\n    pub fn new(state_hash: Digest) -> Self {\n        EraValidatorsRequest { state_hash }\n    }\n\n    /// Get the state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n}\n\n/// Result enum that represents all possible outcomes of a era validators request.\n#[derive(Debug)]\npub enum EraValidatorsResult {\n    /// Returned if auction is not found. This is a catastrophic outcome.\n    AuctionNotFound,\n    /// Returned if a passed state root hash is not found. This is recoverable.\n    RootNotFound,\n    /// Value not found. This is not erroneous if the record does not exist.\n    ValueNotFound(String),\n    /// There is no systemic issue, but the query itself errored.\n    Failure(TrackingCopyError),\n    /// The query succeeded.\n    Success {\n        /// Era Validators.\n        era_validators: EraValidators,\n    },\n}\n\nimpl EraValidatorsResult {\n    /// Returns true if success.\n    pub fn is_success(&self) -> bool {\n        matches!(self, EraValidatorsResult::Success { .. })\n    }\n\n    /// Takes era validators.\n    pub fn take_era_validators(self) -> Option<EraValidators> {\n        match self {\n            EraValidatorsResult::AuctionNotFound\n            | EraValidatorsResult::RootNotFound\n            | EraValidatorsResult::ValueNotFound(_)\n            | EraValidatorsResult::Failure(_) => None,\n            EraValidatorsResult::Success { era_validators } => Some(era_validators),\n        }\n    }\n}\n\nimpl Display for EraValidatorsResult {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            EraValidatorsResult::AuctionNotFound => write!(f, \"system auction not found\"),\n            EraValidatorsResult::RootNotFound => write!(f, \"state root not found\"),\n            EraValidatorsResult::ValueNotFound(msg) => write!(f, \"value not found: {}\", msg),\n            EraValidatorsResult::Failure(tce) => write!(f, \"{}\", tce),\n            EraValidatorsResult::Success { .. } => {\n                write!(f, \"success\")\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/execution_results_checksum.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::Digest;\n\n/// Execution results checksum literal.\npub const EXECUTION_RESULTS_CHECKSUM_NAME: &str = \"execution_results_checksum\";\n\n/// Represents a request to obtain current execution results checksum.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ExecutionResultsChecksumRequest {\n    state_hash: Digest,\n}\n\nimpl ExecutionResultsChecksumRequest {\n    /// Creates new request.\n    pub fn new(state_hash: Digest) -> Self {\n        ExecutionResultsChecksumRequest { state_hash }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n}\n\n/// Represents a result of a `execution_results_checksum` request.\n#[derive(Debug)]\npub enum ExecutionResultsChecksumResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Returned if system registry is not found.\n    RegistryNotFound,\n    /// Returned if checksum is not found.\n    ChecksumNotFound,\n    /// Contains current checksum returned from the global state.\n    Success {\n        /// Current checksum.\n        checksum: Digest,\n    },\n    /// Error occurred.\n    Failure(TrackingCopyError),\n}\n\nimpl ExecutionResultsChecksumResult {\n    /// Returns a Result matching the original api for this functionality.\n    pub fn as_legacy(&self) -> Result<Option<Digest>, TrackingCopyError> {\n        match self {\n            ExecutionResultsChecksumResult::RootNotFound\n            | ExecutionResultsChecksumResult::RegistryNotFound\n            | ExecutionResultsChecksumResult::ChecksumNotFound => Ok(None),\n            ExecutionResultsChecksumResult::Success { checksum } => Ok(Some(*checksum)),\n            ExecutionResultsChecksumResult::Failure(err) => Err(err.clone()),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/fee.rs",
    "content": "use std::collections::BTreeSet;\nuse thiserror::Error;\n\nuse crate::system::{\n    runtime_native::{Config as NativeRuntimeConfig, TransferConfig},\n    transfer::TransferError,\n};\nuse casper_types::{\n    account::AccountHash, execution::Effects, BlockTime, Digest, FeeHandling, ProtocolVersion,\n    Transfer,\n};\n\nuse crate::tracking_copy::TrackingCopyError;\n\n/// Fee request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct FeeRequest {\n    config: NativeRuntimeConfig,\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    block_time: BlockTime,\n}\n\nimpl FeeRequest {\n    /// Ctor.\n    pub fn new(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        block_time: BlockTime,\n    ) -> Self {\n        FeeRequest {\n            config,\n            state_hash,\n            protocol_version,\n            block_time,\n        }\n    }\n\n    /// Returns config.\n    pub fn config(&self) -> &NativeRuntimeConfig {\n        &self.config\n    }\n\n    /// Returns state_hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns protocol_version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns fee handling setting.\n    pub fn fee_handling(&self) -> &FeeHandling {\n        self.config.fee_handling()\n    }\n\n    /// Returns block time.\n    pub fn block_time(&self) -> BlockTime {\n        self.block_time\n    }\n\n    /// Returns administrative accounts, if any.\n    pub fn administrative_accounts(&self) -> Option<&BTreeSet<AccountHash>> {\n        match self.config.transfer_config() {\n            TransferConfig::Administered {\n                administrative_accounts,\n                ..\n            } => Some(administrative_accounts),\n            TransferConfig::Unadministered => None,\n        }\n    }\n\n    /// Should we attempt to distribute fees?\n    pub fn should_distribute_fees(&self) -> bool {\n        // we only distribute if chainspec FeeHandling == Accumulate\n        // and if there are administrative accounts to receive the fees.\n        // the various public networks do not use this option.\n        if !self.fee_handling().is_accumulate() {\n            return false;\n        }\n\n        matches!(\n            self.config.transfer_config(),\n            TransferConfig::Administered { .. }\n        )\n    }\n}\n\n/// Fee error.\n#[derive(Clone, Error, Debug)]\npub enum FeeError {\n    /// No fees distributed error.\n    #[error(\"Undistributed fees\")]\n    NoFeesDistributed,\n    /// Tracking copy error.\n    #[error(transparent)]\n    TrackingCopy(TrackingCopyError),\n    /// Registry entry not found.\n    #[error(\"Registry entry not found: {0}\")]\n    RegistryEntryNotFound(String),\n    /// Transfer error.\n    #[error(transparent)]\n    Transfer(TransferError),\n    /// Named keys not found.\n    #[error(\"Named keys not found\")]\n    NamedKeysNotFound,\n    /// Administrative accounts not found.\n    #[error(\"Administrative accounts not found\")]\n    AdministrativeAccountsNotFound,\n}\n\n/// Fee result.\n#[derive(Debug, Clone)]\npub enum FeeResult {\n    /// Root not found in global state.\n    RootNotFound,\n    /// Failure result.\n    Failure(FeeError),\n    /// Success result.\n    Success {\n        /// List of transfers that happened during execution.\n        transfers: Vec<Transfer>,\n        /// State hash after fee distribution outcome is committed to the global state.\n        post_state_hash: Digest,\n        /// Effects of the fee distribution process.\n        effects: Effects,\n    },\n}\n\nimpl FeeResult {\n    /// Returns true if successful, else false.\n    pub fn is_success(&self) -> bool {\n        matches!(self, FeeResult::Success { .. })\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/flush.rs",
    "content": "use crate::global_state::error::Error as GlobalStateError;\n\n/// Request to flush state.\npub struct FlushRequest {}\n\nimpl FlushRequest {\n    /// Returns a new instance of FlushRequest.\n    pub fn new() -> Self {\n        FlushRequest {}\n    }\n}\n\nimpl Default for FlushRequest {\n    fn default() -> Self {\n        FlushRequest::new()\n    }\n}\n\n/// Represents a result of a `flush` request.\npub enum FlushResult {\n    /// Manual sync is disabled in config settings.\n    ManualSyncDisabled,\n    /// Successfully flushed.\n    Success,\n    /// Failed to flush.\n    Failure(GlobalStateError),\n}\n\nimpl FlushResult {\n    /// Flush succeeded\n    pub fn flushed(&self) -> bool {\n        matches!(self, FlushResult::Success)\n    }\n\n    /// Transforms flush result to global state error, if relevant.\n    pub fn as_error(self) -> Result<(), GlobalStateError> {\n        match self {\n            FlushResult::ManualSyncDisabled | FlushResult::Success => Ok(()),\n            FlushResult::Failure(gse) => Err(gse),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/forced_undelegate.rs",
    "content": "use casper_types::{\n    execution::Effects, system::auction::Error as AuctionError, BlockTime, Digest, ProtocolVersion,\n};\nuse thiserror::Error;\n\nuse crate::{\n    system::{runtime_native::Config, transfer::TransferError},\n    tracking_copy::TrackingCopyError,\n};\n\n/// Forced undelegate request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ForcedUndelegateRequest {\n    config: Config,\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n    block_time: BlockTime,\n}\n\nimpl ForcedUndelegateRequest {\n    /// Ctor.\n    pub fn new(\n        config: Config,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        block_time: BlockTime,\n    ) -> Self {\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            block_time,\n        }\n    }\n\n    /// Returns config.\n    pub fn config(&self) -> &Config {\n        &self.config\n    }\n\n    /// Returns state_hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns protocol_version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns block time.\n    pub fn block_time(&self) -> BlockTime {\n        self.block_time\n    }\n}\n\n/// Forced undelegation error.\n#[derive(Clone, Error, Debug)]\npub enum ForcedUndelegateError {\n    /// Tracking copy error.\n    #[error(transparent)]\n    TrackingCopy(TrackingCopyError),\n    /// Registry entry not found error.\n    #[error(\"Registry entry not found: {0}\")]\n    RegistryEntryNotFound(String),\n    /// Transfer error.\n    #[error(transparent)]\n    Transfer(TransferError),\n    /// Auction error.\n    #[error(\"Auction error: {0}\")]\n    Auction(AuctionError),\n}\n\n/// Forced undelegation result.\n#[derive(Debug, Clone)]\npub enum ForcedUndelegateResult {\n    /// Root hash not found in global state.\n    RootNotFound,\n    /// Forced undelegation failed.\n    Failure(ForcedUndelegateError),\n    /// Forced undelegation succeeded.\n    Success {\n        /// State hash after distribution outcome is committed to the global state.\n        post_state_hash: Digest,\n        /// Effects of the distribution process.\n        effects: Effects,\n    },\n}\n\nimpl ForcedUndelegateResult {\n    /// Returns true if successful, else false.\n    pub fn is_success(&self) -> bool {\n        matches!(self, Self::Success { .. })\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/genesis.rs",
    "content": "use num_rational::Ratio;\n#[cfg(test)]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n\nuse casper_types::{\n    execution::Effects, ChainspecRegistry, Digest, GenesisAccount, GenesisConfig, GenesisValidator,\n    ProtocolVersion, PublicKey,\n};\n\nuse crate::system::genesis::GenesisError;\n\n/// Represents a configuration of a genesis process.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct GenesisRequest {\n    chainspec_hash: Digest,\n    protocol_version: ProtocolVersion,\n    config: GenesisConfig,\n    chainspec_registry: ChainspecRegistry,\n}\n\nimpl GenesisRequest {\n    /// Creates a new genesis config object.\n    pub fn new(\n        chainspec_hash: Digest,\n        protocol_version: ProtocolVersion,\n        config: GenesisConfig,\n        chainspec_registry: ChainspecRegistry,\n    ) -> Self {\n        GenesisRequest {\n            chainspec_hash,\n            protocol_version,\n            config,\n            chainspec_registry,\n        }\n    }\n\n    /// Set enable entity.\n    pub fn set_enable_entity(&mut self, enable: bool) {\n        self.config.set_enable_entity(enable);\n    }\n\n    /// Push genesis validator.\n    pub fn push_genesis_account(&mut self, genesis_account: GenesisAccount) {\n        self.config.push_account(genesis_account);\n    }\n\n    /// Push genesis validator.\n    pub fn push_genesis_validator(\n        &mut self,\n        public_key: &PublicKey,\n        genesis_validator: GenesisValidator,\n    ) {\n        self.config\n            .push_genesis_validator(public_key, genesis_validator);\n    }\n\n    /// Returns chainspec_hash.\n    pub fn chainspec_hash(&self) -> Digest {\n        self.chainspec_hash\n    }\n\n    /// Returns protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns configuration details of the genesis process.\n    pub fn config(&self) -> &GenesisConfig {\n        &self.config\n    }\n\n    /// Returns chainspec registry.\n    pub fn chainspec_registry(&self) -> &ChainspecRegistry {\n        &self.chainspec_registry\n    }\n\n    /// Push a rewards ratio into the genesis request.\n    pub fn push_rewards_ratio(&mut self, rewards_ratio: Ratio<u64>) {\n        self.config.push_rewards_ratio(rewards_ratio)\n    }\n}\n\n#[cfg(test)]\nimpl Distribution<GenesisRequest> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> GenesisRequest {\n        let input: [u8; 32] = rng.gen();\n        let chainspec_hash = Digest::hash(input);\n        let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen());\n        let config = rng.gen();\n\n        let chainspec_file_bytes: [u8; 10] = rng.gen();\n        let genesis_account_file_bytes: [u8; 15] = rng.gen();\n        let chainspec_registry =\n            ChainspecRegistry::new_with_genesis(&chainspec_file_bytes, &genesis_account_file_bytes);\n        GenesisRequest::new(chainspec_hash, protocol_version, config, chainspec_registry)\n    }\n}\n\n/// Represents a result of a `genesis` request.\n#[derive(Debug, Clone)]\npub enum GenesisResult {\n    /// Genesis fatal.\n    Fatal(String),\n    /// Genesis failure.\n    Failure(GenesisError),\n    /// Genesis success.\n    Success {\n        /// State hash after genesis is committed to the global state.\n        post_state_hash: Digest,\n        /// Effects of genesis.\n        effects: Effects,\n    },\n}\n\nimpl GenesisResult {\n    /// Is success.\n    pub fn is_success(&self) -> bool {\n        matches!(self, GenesisResult::Success { .. })\n    }\n\n    /// Returns a Result matching the original api for this functionality.\n    pub fn as_legacy(self) -> Result<(Digest, Effects), Box<GenesisError>> {\n        match self {\n            GenesisResult::Fatal(_) => Err(Box::new(GenesisError::StateUninitialized)),\n            GenesisResult::Failure(err) => Err(Box::new(err)),\n            GenesisResult::Success {\n                post_state_hash,\n                effects,\n            } => Ok((post_state_hash, effects)),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/handle_fee.rs",
    "content": "use crate::{\n    data_access_layer::BalanceIdentifier, system::runtime_native::Config as NativeRuntimeConfig,\n    tracking_copy::TrackingCopyError,\n};\nuse casper_types::{\n    execution::Effects, Digest, EraId, InitiatorAddr, ProtocolVersion, PublicKey, TransactionHash,\n    Transfer, U512,\n};\n\n/// Handle fee mode.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum HandleFeeMode {\n    /// Pay the fee.\n    Pay {\n        /// Initiator.\n        initiator_addr: Box<InitiatorAddr>,\n        /// Source.\n        source: Box<BalanceIdentifier>,\n        /// Target.\n        target: Box<BalanceIdentifier>,\n        /// Amount.\n        amount: U512,\n    },\n    /// Burn the fee.\n    Burn {\n        /// Source.\n        source: BalanceIdentifier,\n        /// Amount.\n        amount: Option<U512>,\n    },\n    /// Validator credit (used in no fee mode).\n    Credit {\n        /// Validator.\n        validator: Box<PublicKey>,\n        /// Amount.\n        amount: U512,\n        /// EraId.\n        era_id: EraId,\n    },\n}\n\nimpl HandleFeeMode {\n    /// Ctor for Pay mode.\n    pub fn pay(\n        initiator_addr: Box<InitiatorAddr>,\n        source: BalanceIdentifier,\n        target: BalanceIdentifier,\n        amount: U512,\n    ) -> Self {\n        HandleFeeMode::Pay {\n            initiator_addr,\n            source: Box::new(source),\n            target: Box::new(target),\n            amount,\n        }\n    }\n\n    /// What source should be used to burn from, and how much?\n    /// If amount is None or greater than the available balance, the full available balance\n    /// will be burned. If amount is less than available balance, only that much will be\n    /// burned leaving a remaining balance.\n    pub fn burn(source: BalanceIdentifier, amount: Option<U512>) -> Self {\n        HandleFeeMode::Burn { source, amount }\n    }\n\n    /// Applies a staking credit to the imputed proposer for the imputed amount at the end\n    /// of the current era when the auction process is executed.\n    pub fn credit(validator: Box<PublicKey>, amount: U512, era_id: EraId) -> Self {\n        HandleFeeMode::Credit {\n            validator,\n            amount,\n            era_id,\n        }\n    }\n}\n\n/// Handle fee request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct HandleFeeRequest {\n    /// The runtime config.\n    pub(crate) config: NativeRuntimeConfig,\n    /// State root hash.\n    pub(crate) state_hash: Digest,\n    /// The protocol version.\n    pub(crate) protocol_version: ProtocolVersion,\n    /// Transaction hash.\n    pub(crate) transaction_hash: TransactionHash,\n    /// Handle fee mode.\n    pub(crate) handle_fee_mode: HandleFeeMode,\n}\n\nimpl HandleFeeRequest {\n    /// Creates new request instance with runtime args.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        handle_fee_mode: HandleFeeMode,\n    ) -> Self {\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            handle_fee_mode,\n        }\n    }\n\n    /// Returns config.\n    pub fn config(&self) -> &NativeRuntimeConfig {\n        &self.config\n    }\n\n    /// Returns state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns handle protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns handle transaction hash.\n    pub fn transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Returns handle fee mode.\n    pub fn handle_fee_mode(&self) -> &HandleFeeMode {\n        &self.handle_fee_mode\n    }\n}\n\n/// Result enum that represents all possible outcomes of a handle  request.\n#[derive(Debug)]\npub enum HandleFeeResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Handle request succeeded.\n    Success {\n        /// Transfers.\n        transfers: Vec<Transfer>,\n        /// Handle fee effects.\n        effects: Effects,\n    },\n    /// Handle  request failed.\n    Failure(TrackingCopyError),\n}\n\nimpl HandleFeeResult {\n    /// The effects, if any.\n    pub fn effects(&self) -> Effects {\n        match self {\n            HandleFeeResult::RootNotFound | HandleFeeResult::Failure(_) => Effects::new(),\n            HandleFeeResult::Success { effects, .. } => effects.clone(),\n        }\n    }\n\n    /// The error message, if any.\n    pub fn error_message(&self) -> Option<String> {\n        match self {\n            HandleFeeResult::RootNotFound => Some(\"root not found\".to_string()),\n            HandleFeeResult::Failure(tce) => Some(format!(\"{}\", tce)),\n            HandleFeeResult::Success { .. } => None,\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/handle_refund.rs",
    "content": "use crate::{\n    data_access_layer::BalanceIdentifier, system::runtime_native::Config as NativeRuntimeConfig,\n    tracking_copy::TrackingCopyError,\n};\nuse casper_types::{\n    execution::Effects, Digest, InitiatorAddr, Phase, ProtocolVersion, TransactionHash, Transfer,\n    U512,\n};\nuse num_rational::Ratio;\n\n/// Selects refund operation.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum HandleRefundMode {\n    /// This variant will cause the refund amount to be calculated and then burned.\n    Burn {\n        /// Refund limit.\n        limit: U512,\n        /// Refund cost.\n        cost: U512,\n        /// Refund consumed.\n        consumed: U512,\n        /// Refund gas price.\n        gas_price: u8,\n        /// Refund source.\n        source: Box<BalanceIdentifier>,\n        /// Refund ratio.\n        ratio: Ratio<u64>,\n        /// Available.\n        available: U512,\n    },\n    /// This variant will cause the refund amount to be calculated and the refund to be executed.\n    Refund {\n        /// Refund initiator.\n        initiator_addr: Box<InitiatorAddr>,\n        /// Refund limit.\n        limit: U512,\n        /// Refund cost.\n        cost: U512,\n        /// Refund consumed.\n        consumed: U512,\n        /// Refund gas price.\n        gas_price: u8,\n        /// Refund ratio.\n        ratio: Ratio<u64>,\n        /// Refund source.\n        source: Box<BalanceIdentifier>,\n        /// Target for refund.\n        target: Box<BalanceIdentifier>,\n        /// Available.\n        available: U512,\n    },\n    /// This variant handles the edge case of custom payment plus no fee plus no refund.\n    /// This ultimately turns into a hold on the initiator, but it takes extra steps to get there\n    /// because the payment has already been fully processed up front and must first be unwound.\n    RefundNoFeeCustomPayment {\n        /// Refund initiator.\n        initiator_addr: Box<InitiatorAddr>,\n        /// Refund limit.\n        limit: U512,\n        /// Refund cost.\n        cost: U512,\n        /// Refund gas price.\n        gas_price: u8,\n    },\n    /// This variant only calculates and returns the refund amount. It does not\n    /// execute a refund.\n    CalculateAmount {\n        /// Refund limit.\n        limit: U512,\n        /// Refund cost.\n        cost: U512,\n        /// Refund consumed.\n        consumed: U512,\n        /// Refund gas price.\n        gas_price: u8,\n        /// Refund ratio.\n        ratio: Ratio<u64>,\n        /// Available.\n        available: U512,\n    },\n    /// This variant will cause the refund purse tracked by handle_payment to be set.\n    SetRefundPurse {\n        /// Target for refund, which will receive any refunded token while set.\n        target: Box<BalanceIdentifier>,\n    },\n    /// This variant will cause the refund purse tracked by handle_payment to be cleared.\n    ClearRefundPurse,\n}\n\nimpl HandleRefundMode {\n    /// Returns the appropriate phase for the mode.\n    pub fn phase(&self) -> Phase {\n        match self {\n            HandleRefundMode::Burn { .. }\n            | HandleRefundMode::Refund { .. }\n            | HandleRefundMode::RefundNoFeeCustomPayment { .. }\n            | HandleRefundMode::CalculateAmount { .. } => Phase::FinalizePayment,\n\n            HandleRefundMode::ClearRefundPurse | HandleRefundMode::SetRefundPurse { .. } => {\n                Phase::Payment\n            }\n        }\n    }\n}\n\n/// Handle refund request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct HandleRefundRequest {\n    /// The runtime config.\n    pub(crate) config: NativeRuntimeConfig,\n    /// State root hash.\n    pub(crate) state_hash: Digest,\n    /// The protocol version.\n    pub(crate) protocol_version: ProtocolVersion,\n    /// Transaction hash.\n    pub(crate) transaction_hash: TransactionHash,\n    /// Refund handling.\n    pub(crate) refund_mode: HandleRefundMode,\n}\n\nimpl HandleRefundRequest {\n    /// Creates a new instance.\n    pub fn new(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        refund_mode: HandleRefundMode,\n    ) -> Self {\n        HandleRefundRequest {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            refund_mode,\n        }\n    }\n\n    /// Returns a reference to the config.\n    pub fn config(&self) -> &NativeRuntimeConfig {\n        &self.config\n    }\n\n    /// Returns the state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns the transaction hash.\n    pub fn transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Returns the refund mode.\n    pub fn refund_mode(&self) -> &HandleRefundMode {\n        &self.refund_mode\n    }\n}\n\n/// Handle refund result.\n#[derive(Debug)]\npub enum HandleRefundResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Handle refund request succeeded.\n    Success {\n        /// Transfers.\n        transfers: Vec<Transfer>,\n        /// The effects.\n        effects: Effects,\n        /// The amount, if any.\n        amount: Option<U512>,\n    },\n    /// Invalid phase selected (programmer error).\n    InvalidPhase,\n    /// Handle refund request failed.\n    Failure(TrackingCopyError),\n}\n\nimpl HandleRefundResult {\n    /// The effects, if any.\n    pub fn effects(&self) -> Effects {\n        match self {\n            HandleRefundResult::RootNotFound\n            | HandleRefundResult::InvalidPhase\n            | HandleRefundResult::Failure(_) => Effects::new(),\n            HandleRefundResult::Success { effects, .. } => effects.clone(),\n        }\n    }\n\n    /// The refund amount.\n    pub fn refund_amount(&self) -> U512 {\n        match self {\n            HandleRefundResult::RootNotFound\n            | HandleRefundResult::InvalidPhase\n            | HandleRefundResult::Failure(_) => U512::zero(),\n            HandleRefundResult::Success {\n                amount: refund_amount,\n                ..\n            } => refund_amount.unwrap_or(U512::zero()),\n        }\n    }\n\n    /// The error message, if any.\n    pub fn error_message(&self) -> Option<String> {\n        match self {\n            HandleRefundResult::RootNotFound => Some(\"root not found\".to_string()),\n            HandleRefundResult::InvalidPhase => Some(\"invalid phase selected\".to_string()),\n            HandleRefundResult::Failure(tce) => Some(format!(\"{}\", tce)),\n            HandleRefundResult::Success { .. } => None,\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/key_prefix.rs",
    "content": "use casper_types::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contract_messages::TopicNameHash,\n    system::{auction::BidAddrTag, mint::BalanceHoldAddrTag},\n    EntityAddr, KeyTag, URefAddr,\n};\n\n/// Key prefixes used for querying the global state.\n#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]\npub enum KeyPrefix {\n    /// Retrieves all delegator bid addresses for a given validator.\n    DelegatorBidAddrsByValidator(AccountHash),\n    /// Retrieves all entries for a given hash addr.\n    MessageEntriesByEntity(EntityAddr),\n    /// Retrieves all messages for a given hash addr and topic.\n    MessagesByEntityAndTopic(EntityAddr, TopicNameHash),\n    /// Retrieves all named keys for a given entity.\n    NamedKeysByEntity(EntityAddr),\n    /// Retrieves all gas balance holds for a given purse.\n    GasBalanceHoldsByPurse(URefAddr),\n    /// Retrieves all processing balance holds for a given purse.\n    ProcessingBalanceHoldsByPurse(URefAddr),\n    /// Retrieves all V1 entry points for a given entity.\n    EntryPointsV1ByEntity(EntityAddr),\n    /// Retrieves all V2 entry points for a given entity.\n    EntryPointsV2ByEntity(EntityAddr),\n}\n\nimpl ToBytes for KeyPrefix {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                KeyPrefix::DelegatorBidAddrsByValidator(validator) => {\n                    U8_SERIALIZED_LENGTH + validator.serialized_length()\n                }\n                KeyPrefix::MessageEntriesByEntity(hash_addr) => hash_addr.serialized_length(),\n                KeyPrefix::MessagesByEntityAndTopic(hash_addr, topic) => {\n                    hash_addr.serialized_length() + topic.serialized_length()\n                }\n                KeyPrefix::NamedKeysByEntity(entity) => entity.serialized_length(),\n                KeyPrefix::GasBalanceHoldsByPurse(uref) => {\n                    U8_SERIALIZED_LENGTH + uref.serialized_length()\n                }\n                KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => {\n                    U8_SERIALIZED_LENGTH + uref.serialized_length()\n                }\n                KeyPrefix::EntryPointsV1ByEntity(entity) => {\n                    U8_SERIALIZED_LENGTH + entity.serialized_length()\n                }\n                KeyPrefix::EntryPointsV2ByEntity(entity) => {\n                    U8_SERIALIZED_LENGTH + entity.serialized_length()\n                }\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            KeyPrefix::DelegatorBidAddrsByValidator(validator) => {\n                writer.push(KeyTag::BidAddr as u8);\n                writer.push(BidAddrTag::DelegatedAccount as u8);\n                validator.write_bytes(writer)?;\n            }\n            KeyPrefix::MessageEntriesByEntity(hash_addr) => {\n                writer.push(KeyTag::Message as u8);\n                hash_addr.write_bytes(writer)?;\n            }\n            KeyPrefix::MessagesByEntityAndTopic(hash_addr, topic) => {\n                writer.push(KeyTag::Message as u8);\n                hash_addr.write_bytes(writer)?;\n                topic.write_bytes(writer)?;\n            }\n            KeyPrefix::NamedKeysByEntity(entity) => {\n                writer.push(KeyTag::NamedKey as u8);\n                entity.write_bytes(writer)?;\n            }\n            KeyPrefix::GasBalanceHoldsByPurse(uref) => {\n                writer.push(KeyTag::BalanceHold as u8);\n                writer.push(BalanceHoldAddrTag::Gas as u8);\n                uref.write_bytes(writer)?;\n            }\n            KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => {\n                writer.push(KeyTag::BalanceHold as u8);\n                writer.push(BalanceHoldAddrTag::Processing as u8);\n                uref.write_bytes(writer)?;\n            }\n            KeyPrefix::EntryPointsV1ByEntity(entity) => {\n                writer.push(KeyTag::EntryPoint as u8);\n                writer.push(0);\n                entity.write_bytes(writer)?;\n            }\n            KeyPrefix::EntryPointsV2ByEntity(entity) => {\n                writer.push(KeyTag::EntryPoint as u8);\n                writer.push(1);\n                entity.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for KeyPrefix {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        let result = match tag {\n            tag if tag == KeyTag::BidAddr as u8 => {\n                let (bid_addr_tag, remainder) = u8::from_bytes(remainder)?;\n                match bid_addr_tag {\n                    tag if tag == BidAddrTag::DelegatedAccount as u8 => {\n                        let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                        (\n                            KeyPrefix::DelegatorBidAddrsByValidator(validator),\n                            remainder,\n                        )\n                    }\n                    _ => return Err(bytesrepr::Error::Formatting),\n                }\n            }\n            tag if tag == KeyTag::Message as u8 => {\n                let (hash_addr, remainder) = EntityAddr::from_bytes(remainder)?;\n                if remainder.is_empty() {\n                    (KeyPrefix::MessageEntriesByEntity(hash_addr), remainder)\n                } else {\n                    let (topic, remainder) = TopicNameHash::from_bytes(remainder)?;\n                    (\n                        KeyPrefix::MessagesByEntityAndTopic(hash_addr, topic),\n                        remainder,\n                    )\n                }\n            }\n            tag if tag == KeyTag::NamedKey as u8 => {\n                let (entity, remainder) = EntityAddr::from_bytes(remainder)?;\n                (KeyPrefix::NamedKeysByEntity(entity), remainder)\n            }\n            tag if tag == KeyTag::BalanceHold as u8 => {\n                let (balance_hold_addr_tag, remainder) = u8::from_bytes(remainder)?;\n                let (uref, remainder) = URefAddr::from_bytes(remainder)?;\n                match balance_hold_addr_tag {\n                    tag if tag == BalanceHoldAddrTag::Gas as u8 => {\n                        (KeyPrefix::GasBalanceHoldsByPurse(uref), remainder)\n                    }\n                    tag if tag == BalanceHoldAddrTag::Processing as u8 => {\n                        (KeyPrefix::ProcessingBalanceHoldsByPurse(uref), remainder)\n                    }\n                    _ => return Err(bytesrepr::Error::Formatting),\n                }\n            }\n            tag if tag == KeyTag::EntryPoint as u8 => {\n                let (entry_point_type, remainder) = u8::from_bytes(remainder)?;\n                let (entity, remainder) = EntityAddr::from_bytes(remainder)?;\n                match entry_point_type {\n                    0 => (KeyPrefix::EntryPointsV1ByEntity(entity), remainder),\n                    1 => (KeyPrefix::EntryPointsV2ByEntity(entity), remainder),\n                    _ => return Err(bytesrepr::Error::Formatting),\n                }\n            }\n            _ => return Err(bytesrepr::Error::Formatting),\n        };\n        Ok(result)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::testing::TestRng;\n    use rand::Rng;\n\n    use casper_types::{\n        addressable_entity::NamedKeyAddr,\n        contract_messages::MessageAddr,\n        gens::{account_hash_arb, entity_addr_arb, topic_name_hash_arb, u8_slice_32},\n        system::{auction::BidAddr, mint::BalanceHoldAddr},\n        BlockTime, EntryPointAddr, Key,\n    };\n\n    use super::*;\n    use proptest::prelude::*;\n\n    pub fn key_prefix_arb() -> impl Strategy<Value = KeyPrefix> {\n        prop_oneof![\n            account_hash_arb().prop_map(KeyPrefix::DelegatorBidAddrsByValidator),\n            entity_addr_arb().prop_map(KeyPrefix::MessageEntriesByEntity),\n            (entity_addr_arb(), topic_name_hash_arb()).prop_map(|(entity_addr, topic)| {\n                KeyPrefix::MessagesByEntityAndTopic(entity_addr, topic)\n            }),\n            entity_addr_arb().prop_map(KeyPrefix::NamedKeysByEntity),\n            u8_slice_32().prop_map(KeyPrefix::GasBalanceHoldsByPurse),\n            u8_slice_32().prop_map(KeyPrefix::ProcessingBalanceHoldsByPurse),\n            entity_addr_arb().prop_map(KeyPrefix::EntryPointsV1ByEntity),\n            entity_addr_arb().prop_map(KeyPrefix::EntryPointsV2ByEntity),\n        ]\n    }\n\n    proptest! {\n        #[test]\n        fn bytesrepr_roundtrip(key_prefix in key_prefix_arb()) {\n            bytesrepr::test_serialization_roundtrip(&key_prefix);\n        }\n    }\n\n    #[test]\n    fn key_serializer_compat() {\n        // This test ensures that the `KeyPrefix` deserializer is compatible with the `Key`\n        // serializer. Combined with the `bytesrepr_roundtrip` test, this ensures that\n        // `KeyPrefix` is binary compatible with `Key`.\n\n        let rng = &mut TestRng::new();\n\n        let hash1 = rng.gen();\n        let hash2 = rng.gen();\n\n        for (key, prefix) in [\n            (\n                Key::BidAddr(BidAddr::new_delegator_account_addr((hash1, hash2))),\n                KeyPrefix::DelegatorBidAddrsByValidator(AccountHash::new(hash1)),\n            ),\n            (\n                Key::Message(MessageAddr::new_message_addr(\n                    EntityAddr::SmartContract(hash1),\n                    TopicNameHash::new(hash2),\n                    0,\n                )),\n                KeyPrefix::MessagesByEntityAndTopic(\n                    EntityAddr::SmartContract(hash1),\n                    TopicNameHash::new(hash2),\n                ),\n            ),\n            (\n                Key::NamedKey(NamedKeyAddr::new_named_key_entry(\n                    EntityAddr::Account(hash1),\n                    hash2,\n                )),\n                KeyPrefix::NamedKeysByEntity(EntityAddr::Account(hash1)),\n            ),\n            (\n                Key::BalanceHold(BalanceHoldAddr::new_gas(hash1, BlockTime::new(0))),\n                KeyPrefix::GasBalanceHoldsByPurse(hash1),\n            ),\n            (\n                Key::BalanceHold(BalanceHoldAddr::new_processing(hash1, BlockTime::new(0))),\n                KeyPrefix::ProcessingBalanceHoldsByPurse(hash1),\n            ),\n            (\n                Key::EntryPoint(\n                    EntryPointAddr::new_v1_entry_point_addr(EntityAddr::Account(hash1), \"name\")\n                        .expect(\"should create entry point\"),\n                ),\n                KeyPrefix::EntryPointsV1ByEntity(EntityAddr::Account(hash1)),\n            ),\n        ] {\n            let key_bytes = key.to_bytes().expect(\"should serialize key\");\n            let (parsed_key_prefix, remainder) =\n                KeyPrefix::from_bytes(&key_bytes).expect(\"should deserialize key prefix\");\n            assert_eq!(parsed_key_prefix, prefix, \"key: {:?}\", key);\n            assert!(!remainder.is_empty(), \"key: {:?}\", key);\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/message_topics.rs",
    "content": "use casper_types::{addressable_entity::MessageTopics, Digest, EntityAddr};\n\nuse crate::tracking_copy::TrackingCopyError;\n\n/// Request for a message topics.\npub struct MessageTopicsRequest {\n    state_hash: Digest,\n    entity_addr: EntityAddr,\n}\n\nimpl MessageTopicsRequest {\n    /// Creates new request object.\n    pub fn new(state_hash: Digest, entity_addr: EntityAddr) -> Self {\n        Self {\n            state_hash,\n            entity_addr,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the hash addr.\n    pub fn entity_addr(&self) -> EntityAddr {\n        self.entity_addr\n    }\n}\n\n/// Result of a global state query request.\n#[derive(Debug)]\npub enum MessageTopicsResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Successful query.\n    Success {\n        /// Stored value under a path.\n        message_topics: MessageTopics,\n    },\n    /// Tracking Copy Error\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/mint.rs",
    "content": "use std::collections::BTreeSet;\n\nuse crate::{\n    data_access_layer::BalanceIdentifier,\n    system::{\n        burn::{BurnArgs, BurnError},\n        runtime_native::{Config as NativeRuntimeConfig, TransferConfig},\n        transfer::{TransferArgs, TransferError},\n    },\n    tracking_copy::TrackingCopyCache,\n};\nuse casper_types::{\n    account::AccountHash, execution::Effects, Digest, InitiatorAddr, ProtocolVersion, RuntimeArgs,\n    TransactionHash, Transfer, U512,\n};\n\n/// Transfer arguments using balance identifiers.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct BalanceIdentifierTransferArgs {\n    to: Option<AccountHash>,\n    source: BalanceIdentifier,\n    target: BalanceIdentifier,\n    amount: U512,\n    arg_id: Option<u64>,\n}\n\nimpl BalanceIdentifierTransferArgs {\n    /// Ctor.\n    pub fn new(\n        to: Option<AccountHash>,\n        source: BalanceIdentifier,\n        target: BalanceIdentifier,\n        amount: U512,\n        arg_id: Option<u64>,\n    ) -> Self {\n        BalanceIdentifierTransferArgs {\n            to,\n            source,\n            target,\n            amount,\n            arg_id,\n        }\n    }\n\n    /// Get to.\n    pub fn to(&self) -> Option<AccountHash> {\n        self.to\n    }\n\n    /// Get source.\n    pub fn source(&self) -> &BalanceIdentifier {\n        &self.source\n    }\n\n    /// Get target.\n    pub fn target(&self) -> &BalanceIdentifier {\n        &self.target\n    }\n\n    /// Get amount.\n    pub fn amount(&self) -> U512 {\n        self.amount\n    }\n\n    /// Get arg_id.\n    pub fn arg_id(&self) -> Option<u64> {\n        self.arg_id\n    }\n}\n\n/// Transfer details.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum TransferRequestArgs {\n    /// Provides opaque arguments in runtime format.\n    Raw(RuntimeArgs),\n    /// Provides explicit structured args.\n    Explicit(TransferArgs),\n    /// Provides support for transfers using balance identifiers.\n    /// The source and target purses will get resolved on usage.\n    Indirect(Box<BalanceIdentifierTransferArgs>),\n}\n\n/// Request for motes transfer.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct TransferRequest {\n    /// Config.\n    config: NativeRuntimeConfig,\n    /// State root hash.\n    state_hash: Digest,\n    /// Protocol version.\n    protocol_version: ProtocolVersion,\n    /// Transaction hash.\n    transaction_hash: TransactionHash,\n    /// Base account.\n    initiator: InitiatorAddr,\n    /// List of authorizing accounts.\n    authorization_keys: BTreeSet<AccountHash>,\n    /// Args.\n    args: TransferRequestArgs,\n}\n\nimpl TransferRequest {\n    /// Creates new request object.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        initiator: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        args: TransferArgs,\n    ) -> Self {\n        let args = TransferRequestArgs::Explicit(args);\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            args,\n        }\n    }\n\n    /// Creates new request instance with runtime args.\n    #[allow(clippy::too_many_arguments)]\n    pub fn with_runtime_args(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        initiator: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        args: RuntimeArgs,\n    ) -> Self {\n        let args = TransferRequestArgs::Raw(args);\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            args,\n        }\n    }\n\n    /// Creates new request object using balance identifiers.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new_indirect(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        initiator: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        args: BalanceIdentifierTransferArgs,\n    ) -> Self {\n        let args = TransferRequestArgs::Indirect(Box::new(args));\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            args,\n        }\n    }\n\n    /// Returns a reference to the runtime config.\n    pub fn config(&self) -> &NativeRuntimeConfig {\n        &self.config\n    }\n\n    /// Returns a reference to the transfer config.\n    pub fn transfer_config(&self) -> &TransferConfig {\n        self.config.transfer_config()\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns initiator.\n    pub fn initiator(&self) -> &InitiatorAddr {\n        &self.initiator\n    }\n\n    /// Returns authorization keys.\n    pub fn authorization_keys(&self) -> &BTreeSet<AccountHash> {\n        &self.authorization_keys\n    }\n\n    /// Returns protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns transaction hash.\n    pub fn transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Returns transfer args.\n    pub fn args(&self) -> &TransferRequestArgs {\n        &self.args\n    }\n\n    /// Into args.\n    pub fn into_args(self) -> TransferRequestArgs {\n        self.args\n    }\n\n    /// Used by `WasmTestBuilder` to set the appropriate state root hash and transfer config before\n    /// executing the transfer.\n    #[doc(hidden)]\n    pub fn set_state_hash_and_config(&mut self, state_hash: Digest, config: NativeRuntimeConfig) {\n        self.state_hash = state_hash;\n        self.config = config;\n    }\n}\n\n/// Transfer result.\n#[derive(Debug, Clone)]\npub enum TransferResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Transfer succeeded\n    Success {\n        /// List of transfers that happened during execution.\n        transfers: Vec<Transfer>,\n        /// Effects of transfer.\n        effects: Effects,\n        /// Cached tracking copy operations.\n        cache: TrackingCopyCache,\n    },\n    /// Transfer failed\n    Failure(TransferError),\n}\n\nimpl TransferResult {\n    /// Returns the effects, if any.\n    pub fn effects(&self) -> Effects {\n        match self {\n            TransferResult::RootNotFound | TransferResult::Failure(_) => Effects::new(),\n            TransferResult::Success { effects, .. } => effects.clone(),\n        }\n    }\n\n    /// Returns transfers.\n    pub fn transfers(&self) -> Vec<Transfer> {\n        match self {\n            TransferResult::RootNotFound | TransferResult::Failure(_) => vec![],\n            TransferResult::Success { transfers, .. } => transfers.clone(),\n        }\n    }\n\n    /// Returns transfer error, if any.\n    pub fn error(&self) -> Option<TransferError> {\n        if let Self::Failure(error) = self {\n            Some(error.clone())\n        } else {\n            None\n        }\n    }\n}\n\n/// Burn details.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum BurnRequestArgs {\n    /// Provides opaque arguments in runtime format.\n    Raw(RuntimeArgs),\n    /// Provides explicit structured args.\n    Explicit(BurnArgs),\n}\n\n/// Request for motes burn.\npub struct BurnRequest {\n    /// Config.\n    config: NativeRuntimeConfig,\n    /// State root hash.\n    state_hash: Digest,\n    /// Protocol version.\n    protocol_version: ProtocolVersion,\n    /// Transaction hash.\n    transaction_hash: TransactionHash,\n    /// Base account.\n    initiator: InitiatorAddr,\n    /// List of authorizing accounts.\n    authorization_keys: BTreeSet<AccountHash>,\n    /// Args.\n    args: BurnRequestArgs,\n}\n\nimpl BurnRequest {\n    /// Creates new request object.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        initiator: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        args: BurnArgs,\n    ) -> Self {\n        let args = BurnRequestArgs::Explicit(args);\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            args,\n        }\n    }\n\n    /// Creates new request instance with runtime args.\n    #[allow(clippy::too_many_arguments)]\n    pub fn with_runtime_args(\n        config: NativeRuntimeConfig,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        transaction_hash: TransactionHash,\n        initiator: InitiatorAddr,\n        authorization_keys: BTreeSet<AccountHash>,\n        args: RuntimeArgs,\n    ) -> Self {\n        let args = BurnRequestArgs::Raw(args);\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n            args,\n        }\n    }\n\n    /// Returns a reference to the runtime config.\n    pub fn config(&self) -> &NativeRuntimeConfig {\n        &self.config\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns initiator.\n    pub fn initiator(&self) -> &InitiatorAddr {\n        &self.initiator\n    }\n\n    /// Returns authorization keys.\n    pub fn authorization_keys(&self) -> &BTreeSet<AccountHash> {\n        &self.authorization_keys\n    }\n\n    /// Returns protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns transaction hash.\n    pub fn transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Returns transfer args.\n    pub fn args(&self) -> &BurnRequestArgs {\n        &self.args\n    }\n\n    /// Into args.\n    pub fn into_args(self) -> BurnRequestArgs {\n        self.args\n    }\n\n    /// Used by `WasmTestBuilder` to set the appropriate state root hash and runtime config before\n    /// executing the burn.\n    #[doc(hidden)]\n    pub fn set_state_hash_and_config(&mut self, state_hash: Digest, config: NativeRuntimeConfig) {\n        self.state_hash = state_hash;\n        self.config = config;\n    }\n}\n\n/// Burn result.\n#[derive(Debug, Clone)]\npub enum BurnResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Transfer succeeded\n    Success {\n        /// Effects of transfer.\n        effects: Effects,\n        /// Cached tracking copy operations.\n        cache: TrackingCopyCache,\n    },\n    /// Burn failed\n    Failure(BurnError),\n}\n\nimpl BurnResult {\n    /// Returns the effects, if any.\n    pub fn effects(&self) -> Effects {\n        match self {\n            BurnResult::RootNotFound | BurnResult::Failure(_) => Effects::new(),\n            BurnResult::Success { effects, .. } => effects.clone(),\n        }\n    }\n\n    /// Returns burn error, if any.\n    pub fn error(&self) -> Option<BurnError> {\n        if let Self::Failure(error) = self {\n            Some(error.clone())\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/prefixed_values.rs",
    "content": "//! Support for obtaining all values with a given key prefix.\nuse crate::{tracking_copy::TrackingCopyError, KeyPrefix};\nuse casper_types::{Digest, StoredValue};\n\n/// Represents a request to obtain all values with a given key prefix.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct PrefixedValuesRequest {\n    state_hash: Digest,\n    key_prefix: KeyPrefix,\n}\n\nimpl PrefixedValuesRequest {\n    /// Creates new request.\n    pub fn new(state_hash: Digest, key_prefix: KeyPrefix) -> Self {\n        Self {\n            state_hash,\n            key_prefix,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns key prefix.\n    pub fn key_prefix(&self) -> &KeyPrefix {\n        &self.key_prefix\n    }\n}\n\n/// Represents a result of a `items_by_prefix` request.\n#[derive(Debug)]\npub enum PrefixedValuesResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Contains values returned from the global state.\n    Success {\n        /// The requested prefix.\n        key_prefix: KeyPrefix,\n        /// Current values.\n        values: Vec<StoredValue>,\n    },\n    /// Failure.\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/protocol_upgrade.rs",
    "content": "use casper_types::{execution::Effects, Digest, ProtocolUpgradeConfig};\n\nuse crate::system::protocol_upgrade::ProtocolUpgradeError;\n\n/// Request to upgrade the protocol.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ProtocolUpgradeRequest {\n    config: ProtocolUpgradeConfig,\n}\n\nimpl ProtocolUpgradeRequest {\n    /// Creates a new instance of ProtocolUpgradeRequest.\n    pub fn new(config: ProtocolUpgradeConfig) -> Self {\n        ProtocolUpgradeRequest { config }\n    }\n\n    /// Get the protocol upgrade config.\n    pub fn config(&self) -> &ProtocolUpgradeConfig {\n        &self.config\n    }\n\n    /// Get the pre_state_hash to apply protocol upgrade to.\n    pub fn pre_state_hash(&self) -> Digest {\n        self.config.pre_state_hash()\n    }\n}\n\n/// Response to attempt to upgrade the protocol.\n#[derive(Debug, Clone)]\npub enum ProtocolUpgradeResult {\n    /// Global state root not found.\n    RootNotFound,\n    /// Protocol upgraded successfully.\n    Success {\n        /// State hash after protocol upgrade is committed to the global state.\n        post_state_hash: Digest,\n        /// Effects of protocol upgrade.\n        effects: Effects,\n    },\n    /// Failed to upgrade protocol.\n    Failure(ProtocolUpgradeError),\n}\n\nimpl ProtocolUpgradeResult {\n    /// Is success.\n    pub fn is_success(&self) -> bool {\n        matches!(self, ProtocolUpgradeResult::Success { .. })\n    }\n\n    /// Is an error\n    pub fn is_err(&self) -> bool {\n        match self {\n            ProtocolUpgradeResult::RootNotFound | ProtocolUpgradeResult::Failure(_) => true,\n            ProtocolUpgradeResult::Success { .. } => false,\n        }\n    }\n}\n\nimpl From<ProtocolUpgradeError> for ProtocolUpgradeResult {\n    fn from(err: ProtocolUpgradeError) -> Self {\n        ProtocolUpgradeResult::Failure(err)\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/prune.rs",
    "content": "//! Support for pruning leaf nodes from the merkle trie.\nuse crate::{\n    global_state::trie_store::operations::TriePruneResult, tracking_copy::TrackingCopyError,\n};\nuse casper_types::{execution::Effects, Digest, Key};\n\n/// Represents the configuration of a prune operation.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct PruneRequest {\n    state_hash: Digest,\n    keys_to_prune: Vec<Key>,\n}\n\nimpl PruneRequest {\n    /// Create new prune config.\n    pub fn new(state_hash: Digest, keys_to_prune: Vec<Key>) -> Self {\n        PruneRequest {\n            state_hash,\n            keys_to_prune,\n        }\n    }\n\n    /// Returns the current state root state hash\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the list of keys to delete.\n    pub fn keys_to_prune(&self) -> &[Key] {\n        &self.keys_to_prune\n    }\n}\n\n/// The result of performing a prune.\n#[derive(Debug, Clone)]\npub enum PruneResult {\n    /// Root not found.\n    RootNotFound,\n    /// Key does not exists.\n    MissingKey,\n    /// Failed to prune.\n    Failure(TrackingCopyError),\n    /// New state root hash generated after elements were pruned.\n    Success {\n        /// State root hash.\n        post_state_hash: Digest,\n        /// Effects of executing a step request.\n        effects: Effects,\n    },\n}\n\nimpl From<TriePruneResult> for PruneResult {\n    fn from(value: TriePruneResult) -> Self {\n        match value {\n            TriePruneResult::Pruned(post_state_hash) => PruneResult::Success {\n                post_state_hash,\n                effects: Effects::default(),\n            },\n            TriePruneResult::MissingKey => PruneResult::MissingKey,\n            TriePruneResult::RootNotFound => PruneResult::RootNotFound,\n            TriePruneResult::Failure(gse) => PruneResult::Failure(TrackingCopyError::Storage(gse)),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/query.rs",
    "content": "//! Support for global state queries.\nuse casper_types::{global_state::TrieMerkleProof, Digest, Key, StoredValue};\n\nuse crate::tracking_copy::{TrackingCopyError, TrackingCopyQueryResult};\n\n/// Request for a global state query.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct QueryRequest {\n    state_hash: Digest,\n    key: Key,\n    path: Vec<String>,\n}\n\nimpl QueryRequest {\n    /// Creates new request object.\n    pub fn new(state_hash: Digest, key: Key, path: Vec<String>) -> Self {\n        QueryRequest {\n            state_hash,\n            key,\n            path,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns a key.\n    pub fn key(&self) -> Key {\n        self.key\n    }\n\n    /// Returns a query path.\n    pub fn path(&self) -> &[String] {\n        &self.path\n    }\n}\n\nimpl From<TrackingCopyQueryResult> for QueryResult {\n    fn from(tracking_copy_query_result: TrackingCopyQueryResult) -> Self {\n        match tracking_copy_query_result {\n            TrackingCopyQueryResult::ValueNotFound(message) => QueryResult::ValueNotFound(message),\n            TrackingCopyQueryResult::CircularReference(message) => {\n                QueryResult::Failure(TrackingCopyError::CircularReference(message))\n            }\n            TrackingCopyQueryResult::Success { value, proofs } => {\n                let value = Box::new(value);\n                QueryResult::Success { value, proofs }\n            }\n            TrackingCopyQueryResult::DepthLimit { depth } => {\n                QueryResult::Failure(TrackingCopyError::QueryDepthLimit { depth })\n            }\n            TrackingCopyQueryResult::RootNotFound => QueryResult::RootNotFound,\n        }\n    }\n}\n\n/// Result of a global state query request.\n#[derive(Debug)]\npub enum QueryResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// Successful query.\n    Success {\n        /// Stored value under a path.\n        value: Box<StoredValue>,\n        /// Merkle proof of the query.\n        proofs: Vec<TrieMerkleProof<Key, StoredValue>>,\n    },\n    /// Tracking Copy Error\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/round_seigniorage.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{Digest, ProtocolVersion, U512};\nuse num_rational::Ratio;\n\n/// Request to get the current round seigniorage rate.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct RoundSeigniorageRateRequest {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n}\n\nimpl RoundSeigniorageRateRequest {\n    /// Create instance of RoundSeigniorageRateRequest.\n    pub fn new(state_hash: Digest, protocol_version: ProtocolVersion) -> Self {\n        RoundSeigniorageRateRequest {\n            state_hash,\n            protocol_version,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n}\n\n/// Represents a result of a `round_seigniorage_rate` request.\n#[derive(Debug)]\npub enum RoundSeigniorageRateResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// The mint is not found.\n    MintNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// The round seigniorage rate at the specified state hash.\n    Success {\n        /// The current rate.\n        rate: Ratio<U512>,\n    },\n    /// Failure.\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/seigniorage_recipients.rs",
    "content": "//! Support for querying seigniorage recipients.\n\nuse crate::tracking_copy::TrackingCopyError;\nuse casper_types::{system::auction::SeigniorageRecipientsSnapshot, Digest};\nuse num_rational::Ratio;\nuse std::fmt::{Display, Formatter};\n\n/// Request for seigniorage recipients.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct SeigniorageRecipientsRequest {\n    state_hash: Digest,\n}\n\nimpl SeigniorageRecipientsRequest {\n    /// Constructs a new SeigniorageRecipientsRequest.\n    pub fn new(state_hash: Digest) -> Self {\n        SeigniorageRecipientsRequest { state_hash }\n    }\n\n    /// Get the state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n}\n\n/// Result enum that represents all possible outcomes of a seignorage recipients request.\n#[derive(Debug)]\npub enum SeigniorageRecipientsResult {\n    /// Returned if auction is not found. This is a catastrophic outcome.\n    AuctionNotFound,\n    /// Returned if a passed state root hash is not found. This is recoverable.\n    RootNotFound,\n    /// Value not found. This is not erroneous if the record does not exist.\n    ValueNotFound(String),\n    /// There is no systemic issue, but the query itself errored.\n    Failure(TrackingCopyError),\n    /// The query succeeded.\n    Success {\n        /// Seigniorage recipients.\n        seigniorage_recipients: SeigniorageRecipientsSnapshot,\n        /// The rewards ratio for the given snapshot\n        rewards_ratio: Ratio<u64>,\n    },\n}\n\nimpl SeigniorageRecipientsResult {\n    /// Returns true if success.\n    pub fn is_success(&self) -> bool {\n        matches!(self, SeigniorageRecipientsResult::Success { .. })\n    }\n\n    /// Takes seigniorage recipients.\n    pub fn into_option(self) -> Option<SeigniorageRecipientsSnapshot> {\n        match self {\n            SeigniorageRecipientsResult::AuctionNotFound\n            | SeigniorageRecipientsResult::RootNotFound\n            | SeigniorageRecipientsResult::ValueNotFound(_)\n            | SeigniorageRecipientsResult::Failure(_) => None,\n            SeigniorageRecipientsResult::Success {\n                seigniorage_recipients,\n                ..\n            } => Some(seigniorage_recipients),\n        }\n    }\n}\n\nimpl Display for SeigniorageRecipientsResult {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            SeigniorageRecipientsResult::AuctionNotFound => write!(f, \"system auction not found\"),\n            SeigniorageRecipientsResult::RootNotFound => write!(f, \"state root not found\"),\n            SeigniorageRecipientsResult::ValueNotFound(msg) => {\n                write!(f, \"value not found: {}\", msg)\n            }\n            SeigniorageRecipientsResult::Failure(tce) => write!(f, \"{}\", tce),\n            SeigniorageRecipientsResult::Success { .. } => {\n                write!(f, \"success\")\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/step.rs",
    "content": "//! Support for a step method.\n//!\n//! A step request executes auction code, slashes validators, evicts validators and distributes\n//! rewards.\n\nuse std::vec::Vec;\nuse thiserror::Error;\n\nuse casper_types::{execution::Effects, CLValueError, Digest, EraId, ProtocolVersion, PublicKey};\n\nuse crate::{\n    global_state::error::Error as GlobalStateError,\n    system::runtime_native::{Config, TransferConfig},\n    tracking_copy::TrackingCopyError,\n};\n\n/// The definition of a slash item.\n#[derive(Debug, Clone)]\npub struct SlashItem {\n    /// The public key of the validator that will be slashed.\n    pub validator_id: PublicKey,\n}\n\nimpl SlashItem {\n    /// Creates a new slash item.\n    pub fn new(validator_id: PublicKey) -> Self {\n        Self { validator_id }\n    }\n}\n\n/// The definition of a reward item.\n#[derive(Debug, Clone)]\npub struct RewardItem {\n    /// The public key of the validator that will be rewarded.\n    pub validator_id: PublicKey,\n    /// Amount of motes that will be distributed as rewards.\n    pub value: u64,\n}\n\nimpl RewardItem {\n    /// Creates new reward item.\n    pub fn new(validator_id: PublicKey, value: u64) -> Self {\n        Self {\n            validator_id,\n            value,\n        }\n    }\n}\n\n/// The definition of an evict item.\n#[derive(Debug, Clone)]\npub struct EvictItem {\n    /// The public key of the validator that will be evicted.\n    pub validator_id: PublicKey,\n}\n\nimpl EvictItem {\n    /// Creates new evict item.\n    pub fn new(validator_id: PublicKey) -> Self {\n        Self { validator_id }\n    }\n}\n\n/// Representation of a step request.\n#[derive(Debug)]\npub struct StepRequest {\n    /// Config\n    config: Config,\n\n    /// State root hash.\n    state_hash: Digest,\n\n    /// Protocol version for this request.\n    protocol_version: ProtocolVersion,\n    /// List of validators to be slashed.\n    ///\n    /// A slashed validator is removed from the next validator set.\n    slash_items: Vec<SlashItem>,\n    /// List of validators to be evicted.\n    ///\n    /// Compared to a slashing, evictions are deactivating a given validator, but his stake is\n    /// unchanged. A further re-activation is possible.\n    evict_items: Vec<EvictItem>,\n    /// Specifies which era validators will be returned based on `next_era_id`.\n    ///\n    /// Intended use is to always specify the current era id + 1 which will return computed era at\n    /// the end of this step request.\n    next_era_id: EraId,\n\n    /// Timestamp in milliseconds representing end of the current era.\n    era_end_timestamp_millis: u64,\n}\n\nimpl StepRequest {\n    /// Creates new step request.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        config: Config,\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        slash_items: Vec<SlashItem>,\n        evict_items: Vec<EvictItem>,\n        next_era_id: EraId,\n        era_end_timestamp_millis: u64,\n    ) -> Self {\n        Self {\n            config,\n            state_hash,\n            protocol_version,\n            slash_items,\n            evict_items,\n            next_era_id,\n            era_end_timestamp_millis,\n        }\n    }\n\n    /// Returns the config.\n    pub fn config(&self) -> &Config {\n        &self.config\n    }\n\n    /// Returns the transfer config.\n    pub fn transfer_config(&self) -> TransferConfig {\n        self.config.transfer_config().clone()\n    }\n\n    /// Returns list of slashed validators.\n    pub fn slashed_validators(&self) -> Vec<PublicKey> {\n        self.slash_items\n            .iter()\n            .map(|si| si.validator_id.clone())\n            .collect()\n    }\n\n    /// Returns pre_state_hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns protocol_version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns slash_items.\n    pub fn slash_items(&self) -> &Vec<SlashItem> {\n        &self.slash_items\n    }\n\n    /// Returns evict_items.\n    pub fn evict_items(&self) -> &Vec<EvictItem> {\n        &self.evict_items\n    }\n    /// Returns next_era_id.\n    pub fn next_era_id(&self) -> EraId {\n        self.next_era_id\n    }\n\n    /// Returns era_end_timestamp_millis.\n    pub fn era_end_timestamp_millis(&self) -> u64 {\n        self.era_end_timestamp_millis\n    }\n}\n\n/// Representation of all possible failures of a step request.\n#[derive(Clone, Error, Debug)]\npub enum StepError {\n    /// Error using the auction contract.\n    #[error(\"Auction error\")]\n    Auction,\n    /// Error executing a slashing operation.\n    #[error(\"Slashing error\")]\n    SlashingError,\n    /// Tracking copy error.\n    #[error(\"{0}\")]\n    TrackingCopy(TrackingCopyError),\n    /// Failed to find auction contract.\n    #[error(\"Auction not found\")]\n    AuctionNotFound,\n    /// Failed to find mint contract.\n    #[error(\"Mint not found\")]\n    MintNotFound,\n}\n\nimpl From<TrackingCopyError> for StepError {\n    fn from(tce: TrackingCopyError) -> Self {\n        Self::TrackingCopy(tce)\n    }\n}\n\nimpl From<GlobalStateError> for StepError {\n    fn from(gse: GlobalStateError) -> Self {\n        Self::TrackingCopy(TrackingCopyError::Storage(gse))\n    }\n}\n\nimpl From<CLValueError> for StepError {\n    fn from(cve: CLValueError) -> Self {\n        StepError::TrackingCopy(TrackingCopyError::CLValue(cve))\n    }\n}\n\n/// Outcome of running step process.\n#[derive(Debug)]\npub enum StepResult {\n    /// Global state root not found.\n    RootNotFound,\n    /// Step process ran successfully.\n    Success {\n        /// State hash after step outcome is committed to the global state.\n        post_state_hash: Digest,\n        /// Effects of the step process.\n        effects: Effects,\n    },\n    /// Failed to execute step.\n    Failure(StepError),\n}\n\nimpl StepResult {\n    /// Returns if step is successful.\n    pub fn is_success(&self) -> bool {\n        matches!(self, StepResult::Success { .. })\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/system_entity_registry.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{\n    system::{AUCTION, HANDLE_PAYMENT, MINT},\n    Digest, Key, ProtocolVersion, SystemHashRegistry,\n};\n\n/// Used to specify is the requestor wants the registry itself or a named entry within it.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum SystemEntityRegistrySelector {\n    /// Requests all system entity entries.\n    All,\n    /// Requests system entity by name.\n    ByName(String),\n}\n\nimpl SystemEntityRegistrySelector {\n    /// Create instance asking for the entire registry.\n    pub fn all() -> Self {\n        SystemEntityRegistrySelector::All\n    }\n\n    /// Create instance asking for mint.\n    pub fn mint() -> Self {\n        SystemEntityRegistrySelector::ByName(MINT.to_string())\n    }\n\n    /// Create instance asking for auction.\n    pub fn auction() -> Self {\n        SystemEntityRegistrySelector::ByName(AUCTION.to_string())\n    }\n\n    /// Create instance asking for handle payment.\n    pub fn handle_payment() -> Self {\n        SystemEntityRegistrySelector::ByName(HANDLE_PAYMENT.to_string())\n    }\n\n    /// Name of selected entity, if any.\n    pub fn name(&self) -> Option<String> {\n        match self {\n            SystemEntityRegistrySelector::All => None,\n            SystemEntityRegistrySelector::ByName(name) => Some(name.clone()),\n        }\n    }\n}\n\n/// Represents a request to obtain the system entity registry or an entry within it.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct SystemEntityRegistryRequest {\n    /// State root hash.\n    state_hash: Digest,\n    /// Protocol version.\n    protocol_version: ProtocolVersion,\n    /// Selector.\n    selector: SystemEntityRegistrySelector,\n    enable_addressable_entity: bool,\n}\n\nimpl SystemEntityRegistryRequest {\n    /// Create new request.\n    pub fn new(\n        state_hash: Digest,\n        protocol_version: ProtocolVersion,\n        selector: SystemEntityRegistrySelector,\n        enable_addressable_entity: bool,\n    ) -> Self {\n        SystemEntityRegistryRequest {\n            state_hash,\n            protocol_version,\n            selector,\n            enable_addressable_entity,\n        }\n    }\n\n    /// Returns the state hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the current selector.\n    pub fn selector(&self) -> &SystemEntityRegistrySelector {\n        &self.selector\n    }\n\n    /// Protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Enable the addressable entity and migrate accounts/contracts to entities.\n    pub fn enable_addressable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n}\n\n/// The payload of a successful request.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum SystemEntityRegistryPayload {\n    /// All registry entries.\n    All(SystemHashRegistry),\n    /// Specific system entity registry entry.\n    EntityKey(Key),\n}\n\n/// The result of a system entity registry request.\n#[derive(Debug)]\npub enum SystemEntityRegistryResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// The system contract registry was not found. This is a valid outcome\n    /// on older networks, which did not have the system contract registry prior\n    /// to protocol version 1.4\n    SystemEntityRegistryNotFound,\n    /// The named entity was not found in the registry.\n    NamedEntityNotFound(String),\n    /// Successful request.\n    Success {\n        /// What was asked for.\n        selected: SystemEntityRegistrySelector,\n        /// The payload asked for.\n        payload: SystemEntityRegistryPayload,\n    },\n    /// Failed to get requested data.\n    Failure(TrackingCopyError),\n}\n\nimpl SystemEntityRegistryResult {\n    /// Is success.\n    pub fn is_success(&self) -> bool {\n        matches!(self, SystemEntityRegistryResult::Success { .. })\n    }\n\n    /// As registry payload.\n    pub fn as_registry_payload(&self) -> Result<SystemEntityRegistryPayload, String> {\n        match self {\n            SystemEntityRegistryResult::RootNotFound => Err(\"Root not found\".to_string()),\n            SystemEntityRegistryResult::SystemEntityRegistryNotFound => {\n                Err(\"System entity registry not found\".to_string())\n            }\n            SystemEntityRegistryResult::NamedEntityNotFound(name) => {\n                Err(format!(\"Named entity not found: {:?}\", name))\n            }\n            SystemEntityRegistryResult::Failure(tce) => Err(format!(\"{:?}\", tce)),\n            SystemEntityRegistryResult::Success { payload, .. } => Ok(payload.clone()),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/tagged_values.rs",
    "content": "//! Support for obtaining all values under the given key tag.\nuse crate::tracking_copy::TrackingCopyError;\nuse casper_types::{Digest, KeyTag, StoredValue};\n\n/// Tagged values selector.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\npub enum TaggedValuesSelection {\n    /// All values under the specified key tag.\n    All(KeyTag),\n}\n\n/// Represents a request to obtain all values under the given key tag.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct TaggedValuesRequest {\n    state_hash: Digest,\n    selection: TaggedValuesSelection,\n}\n\nimpl TaggedValuesRequest {\n    /// Creates new request.\n    pub fn new(state_hash: Digest, selection: TaggedValuesSelection) -> Self {\n        Self {\n            state_hash,\n            selection,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns key tag.\n    pub fn key_tag(&self) -> KeyTag {\n        match self.selection {\n            TaggedValuesSelection::All(key_tag) => key_tag,\n        }\n    }\n\n    /// Returns selection criteria.\n    pub fn selection(&self) -> TaggedValuesSelection {\n        self.selection\n    }\n}\n\n/// Represents a result of a `get_all_values` request.\n#[derive(Debug)]\npub enum TaggedValuesResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// Contains values returned from the global state.\n    Success {\n        /// The requested selection.\n        selection: TaggedValuesSelection,\n        /// Current values.\n        values: Vec<StoredValue>,\n    },\n    /// Tagged value failure.\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/total_supply.rs",
    "content": "use crate::tracking_copy::TrackingCopyError;\nuse casper_types::{Digest, ProtocolVersion, U512};\n\n/// Request for total supply.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct TotalSupplyRequest {\n    state_hash: Digest,\n    protocol_version: ProtocolVersion,\n}\n\nimpl TotalSupplyRequest {\n    /// Creates an instance of TotalSupplyRequest.\n    pub fn new(state_hash: Digest, protocol_version: ProtocolVersion) -> Self {\n        TotalSupplyRequest {\n            state_hash,\n            protocol_version,\n        }\n    }\n\n    /// Returns state root hash.\n    pub fn state_hash(&self) -> Digest {\n        self.state_hash\n    }\n\n    /// Returns the protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n}\n\n/// Represents a result of a `total_supply` request.\n#[derive(Debug)]\npub enum TotalSupplyResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// The mint is not found.\n    MintNotFound,\n    /// Value not found.\n    ValueNotFound(String),\n    /// The total supply at the specified state hash.\n    Success {\n        /// The total supply in motes.\n        total_supply: U512,\n    },\n    /// Failed to get total supply.\n    Failure(TrackingCopyError),\n}\n"
  },
  {
    "path": "storage/src/data_access_layer/trie.rs",
    "content": "use casper_types::Digest;\n\nuse crate::global_state::{error::Error as GlobalStateError, trie::TrieRaw};\n\n/// Request for a trie element.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct TrieRequest {\n    trie_key: Digest,\n    chunk_id: Option<u64>,\n}\n\nimpl TrieRequest {\n    /// Creates an instance of TrieRequest.\n    pub fn new(trie_key: Digest, chunk_id: Option<u64>) -> Self {\n        TrieRequest { trie_key, chunk_id }\n    }\n\n    /// Trie key.\n    pub fn trie_key(&self) -> Digest {\n        self.trie_key\n    }\n\n    /// Chunk id.\n    pub fn chunk_id(&self) -> Option<u64> {\n        self.chunk_id\n    }\n\n    /// Has chunk id.\n    pub fn has_chunk_id(&self) -> bool {\n        self.chunk_id.is_some()\n    }\n}\n\n/// A trie element.\n#[derive(Debug)]\npub enum TrieElement {\n    /// Raw bytes.\n    Raw(TrieRaw),\n    /// Chunk.\n    Chunked(TrieRaw, u64),\n}\n\n/// Represents a result of a `trie` request.\n#[derive(Debug)]\npub enum TrieResult {\n    /// Value not found.\n    ValueNotFound(String),\n    /// The trie element at the specified key.\n    Success {\n        /// A trie element.\n        element: TrieElement,\n    },\n    /// Failed to get the trie element.\n    Failure(GlobalStateError),\n}\n\nimpl TrieResult {\n    /// Transform trie result to raw state.\n    pub fn into_raw(self) -> Result<Option<TrieRaw>, GlobalStateError> {\n        match self {\n            TrieResult::ValueNotFound(_) => Ok(None),\n            TrieResult::Success { element } => match element {\n                TrieElement::Raw(raw) | TrieElement::Chunked(raw, _) => Ok(Some(raw)),\n            },\n            TrieResult::Failure(err) => Err(err),\n        }\n    }\n}\n\n/// Request for a trie element to be persisted.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct PutTrieRequest {\n    raw: TrieRaw,\n}\n\nimpl PutTrieRequest {\n    /// Creates an instance of PutTrieRequest.\n    pub fn new(raw: TrieRaw) -> Self {\n        PutTrieRequest { raw }\n    }\n\n    /// The raw bytes of the trie element.\n    pub fn raw(&self) -> &TrieRaw {\n        &self.raw\n    }\n\n    /// Take raw trie value.\n    pub fn take_raw(self) -> TrieRaw {\n        self.raw\n    }\n}\n\n/// Represents a result of a `put_trie` request.\n#[derive(Debug)]\npub enum PutTrieResult {\n    /// The trie element is persisted.\n    Success {\n        /// The hash of the persisted trie element.\n        hash: Digest,\n    },\n    /// Failed to persist the trie element.\n    Failure(GlobalStateError),\n}\n\nimpl PutTrieResult {\n    /// Returns a Result matching the original api for this functionality.\n    pub fn as_legacy(&self) -> Result<Digest, GlobalStateError> {\n        match self {\n            PutTrieResult::Success { hash } => Ok(*hash),\n            PutTrieResult::Failure(err) => Err(err.clone()),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/data_access_layer.rs",
    "content": "use crate::global_state::{\n    error::Error as GlobalStateError,\n    state::{CommitProvider, StateProvider},\n};\nuse casper_types::{execution::Effects, Digest};\n\nuse crate::tracking_copy::TrackingCopy;\n\nmod addressable_entity;\n/// Auction provider.\npub mod auction;\n/// Balance provider.\npub mod balance;\nmod balance_hold;\nmod balance_identifier_purse;\n/// Bids provider.\npub mod bids;\nmod block_global;\n/// Block rewards provider.\npub mod block_rewards;\nmod contract;\nmod entry_points;\n/// Era validators provider.\npub mod era_validators;\nmod execution_results_checksum;\nmod fee;\nmod flush;\n/// Forced undelegate provider.\npub mod forced_undelegate;\nmod genesis;\n/// Handle fee provider.\npub mod handle_fee;\nmod handle_refund;\nmod key_prefix;\n/// Message topics.\npub mod message_topics;\n/// Mint provider.\npub mod mint;\n/// Prefixed values provider.\npub mod prefixed_values;\nmod protocol_upgrade;\n/// Prune provider.\npub mod prune;\n/// Query provider.\npub mod query;\nmod round_seigniorage;\nmod seigniorage_recipients;\n/// Step provider.\npub mod step;\nmod system_entity_registry;\n/// Tagged values provider.\npub mod tagged_values;\nmod total_supply;\nmod trie;\n\npub use addressable_entity::{AddressableEntityRequest, AddressableEntityResult};\npub use auction::{AuctionMethod, BiddingRequest, BiddingResult};\npub use balance::{\n    BalanceHolds, BalanceHoldsWithProof, BalanceIdentifier, BalanceRequest, BalanceResult,\n    GasHoldBalanceHandling, ProofHandling, ProofsResult,\n};\npub use balance_hold::{\n    BalanceHoldError, BalanceHoldKind, BalanceHoldMode, BalanceHoldRequest, BalanceHoldResult,\n    InsufficientBalanceHandling,\n};\npub use balance_identifier_purse::{BalanceIdentifierPurseRequest, BalanceIdentifierPurseResult};\npub use bids::{BidsRequest, BidsResult};\npub use block_global::{BlockGlobalKind, BlockGlobalRequest, BlockGlobalResult};\npub use block_rewards::{BlockRewardsError, BlockRewardsRequest, BlockRewardsResult};\npub use contract::{ContractRequest, ContractResult};\npub use entry_points::{\n    EntryPointExistsRequest, EntryPointExistsResult, EntryPointRequest, EntryPointResult,\n};\npub use era_validators::{EraValidatorsRequest, EraValidatorsResult};\npub use execution_results_checksum::{\n    ExecutionResultsChecksumRequest, ExecutionResultsChecksumResult,\n    EXECUTION_RESULTS_CHECKSUM_NAME,\n};\npub use fee::{FeeError, FeeRequest, FeeResult};\npub use flush::{FlushRequest, FlushResult};\npub use genesis::{GenesisRequest, GenesisResult};\npub use handle_fee::{HandleFeeMode, HandleFeeRequest, HandleFeeResult};\npub use handle_refund::{HandleRefundMode, HandleRefundRequest, HandleRefundResult};\npub use key_prefix::KeyPrefix;\npub use message_topics::{MessageTopicsRequest, MessageTopicsResult};\npub use mint::{TransferRequest, TransferResult};\npub use protocol_upgrade::{ProtocolUpgradeRequest, ProtocolUpgradeResult};\npub use prune::{PruneRequest, PruneResult};\npub use query::{QueryRequest, QueryResult};\npub use round_seigniorage::{RoundSeigniorageRateRequest, RoundSeigniorageRateResult};\npub use seigniorage_recipients::{SeigniorageRecipientsRequest, SeigniorageRecipientsResult};\npub use step::{EvictItem, RewardItem, SlashItem, StepError, StepRequest, StepResult};\npub use system_entity_registry::{\n    SystemEntityRegistryPayload, SystemEntityRegistryRequest, SystemEntityRegistryResult,\n    SystemEntityRegistrySelector,\n};\npub use total_supply::{TotalSupplyRequest, TotalSupplyResult};\npub use trie::{PutTrieRequest, PutTrieResult, TrieElement, TrieRequest, TrieResult};\n\n/// Anchor struct for block store functionality.\n#[derive(Default, Copy, Clone)]\npub struct BlockStore(());\n\nimpl BlockStore {\n    /// Ctor.\n    pub fn new() -> Self {\n        BlockStore(())\n    }\n}\n\n/// Data access layer.\n#[derive(Copy, Clone)]\npub struct DataAccessLayer<S> {\n    /// Block store instance.\n    pub block_store: BlockStore,\n    /// Memoized state.\n    pub state: S,\n    /// Max query depth.\n    pub max_query_depth: u64,\n    /// Enable the addressable entity capability.\n    pub enable_addressable_entity: bool,\n}\n\nimpl<S> DataAccessLayer<S> {\n    /// Returns reference to current state of the data access layer.\n    pub fn state(&self) -> &S {\n        &self.state\n    }\n}\n\nimpl<S> CommitProvider for DataAccessLayer<S>\nwhere\n    S: CommitProvider,\n{\n    fn commit_effects(\n        &self,\n        state_hash: Digest,\n        effects: Effects,\n    ) -> Result<Digest, GlobalStateError> {\n        self.state.commit_effects(state_hash, effects)\n    }\n\n    fn commit_values(\n        &self,\n        state_hash: Digest,\n        values_to_write: Vec<(casper_types::Key, casper_types::StoredValue)>,\n        keys_to_prune: std::collections::BTreeSet<casper_types::Key>,\n    ) -> Result<Digest, GlobalStateError> {\n        self.state\n            .commit_values(state_hash, values_to_write, keys_to_prune)\n    }\n}\n\nimpl<S> StateProvider for DataAccessLayer<S>\nwhere\n    S: StateProvider,\n{\n    type Reader = S::Reader;\n\n    fn flush(&self, request: FlushRequest) -> FlushResult {\n        self.state.flush(request)\n    }\n\n    fn empty_root(&self) -> Digest {\n        self.state.empty_root()\n    }\n\n    fn tracking_copy(\n        &self,\n        hash: Digest,\n    ) -> Result<Option<TrackingCopy<S::Reader>>, GlobalStateError> {\n        match self.state.checkout(hash)? {\n            Some(reader) => Ok(Some(TrackingCopy::new(\n                reader,\n                self.max_query_depth,\n                self.enable_addressable_entity,\n            ))),\n            None => Ok(None),\n        }\n    }\n\n    fn checkout(&self, state_hash: Digest) -> Result<Option<Self::Reader>, GlobalStateError> {\n        self.state.checkout(state_hash)\n    }\n\n    fn trie(&self, request: TrieRequest) -> TrieResult {\n        self.state.trie(request)\n    }\n\n    fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult {\n        self.state.put_trie(request)\n    }\n\n    fn missing_children(&self, trie_raw: &[u8]) -> Result<Vec<Digest>, GlobalStateError> {\n        self.state.missing_children(trie_raw)\n    }\n\n    fn enable_entity(&self) -> bool {\n        self.state.enable_entity()\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/error.rs",
    "content": "use std::sync;\n\nuse thiserror::Error;\n\nuse casper_types::{bytesrepr, Digest, Key};\n\nuse crate::global_state::{state::CommitError, trie::TrieRaw};\n\nuse super::trie_store::TrieStoreCacheError;\n\n/// Error enum representing possible errors in global state interactions.\n#[derive(Debug, Clone, Error, PartialEq, Eq)]\n#[non_exhaustive]\npub enum Error {\n    /// LMDB error returned from underlying `lmdb` crate.\n    #[error(transparent)]\n    Lmdb(#[from] lmdb::Error),\n\n    /// (De)serialization error.\n    #[error(\"{0}\")]\n    BytesRepr(#[from] bytesrepr::Error),\n\n    /// Concurrency error.\n    #[error(\"Another thread panicked while holding a lock\")]\n    Poison,\n\n    /// Error committing to execution engine.\n    #[error(transparent)]\n    Commit(#[from] CommitError),\n\n    /// Invalid state root hash.\n    #[error(\"RootNotFound\")]\n    RootNotFound,\n\n    /// Failed to put a trie node into global state because some of its children were missing.\n    #[error(\"Failed to put a trie into global state because some of its children were missing\")]\n    MissingTrieNodeChildren(Digest, TrieRaw, Vec<Digest>),\n\n    /// Failed to prune listed keys.\n    #[error(\"Pruning attempt failed.\")]\n    FailedToPrune(Vec<Key>),\n\n    /// Cannot provide proofs over working state in a cache (programmer error).\n    #[error(\"Attempt to generate proofs using non-empty cache.\")]\n    CannotProvideProofsOverCachedData,\n\n    /// Encountered a cache error.\n    #[error(\"Cache error\")]\n    CacheError(#[from] TrieStoreCacheError),\n}\n\nimpl<T> From<sync::PoisonError<T>> for Error {\n    fn from(_error: sync::PoisonError<T>) -> Self {\n        Error::Poison\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/state/lmdb.rs",
    "content": "use itertools::Itertools;\nuse std::{ops::Deref, sync::Arc};\nuse tracing::{error, warn};\n\nuse lmdb::{DatabaseFlags, RwTransaction};\n\nuse tempfile::TempDir;\n\nuse casper_types::{\n    execution::{Effects, TransformKindV2, TransformV2},\n    global_state::TrieMerkleProof,\n    Digest, Key, StoredValue,\n};\n\nuse super::CommitError;\nuse crate::{\n    data_access_layer::{\n        DataAccessLayer, FlushRequest, FlushResult, PutTrieRequest, PutTrieResult, TrieElement,\n        TrieRequest, TrieResult,\n    },\n    global_state::{\n        error::Error as GlobalStateError,\n        state::{\n            commit, put_stored_values, scratch::ScratchGlobalState, CommitProvider,\n            ScratchProvider, StateProvider, StateReader,\n        },\n        store::Store,\n        transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource},\n        trie::{operations::create_hashed_empty_trie, Trie, TrieRaw},\n        trie_store::{\n            lmdb::{LmdbTrieStore, ScratchTrieStore},\n            operations::{\n                keys_with_prefix, missing_children, prune, put_trie, read, read_with_proof,\n                ReadResult, TriePruneResult,\n            },\n        },\n        DEFAULT_ENABLE_ENTITY, DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_MAX_READERS,\n    },\n    tracking_copy::TrackingCopy,\n};\n\n/// Global state implemented against LMDB as a backing data store.\npub struct LmdbGlobalState {\n    /// Environment for LMDB.\n    pub(crate) environment: Arc<LmdbEnvironment>,\n    /// Trie store held within LMDB.\n    pub(crate) trie_store: Arc<LmdbTrieStore>,\n    /// Empty root hash used for a new trie.\n    pub(crate) empty_root_hash: Digest,\n    /// Max query depth\n    pub max_query_depth: u64,\n    /// Enable the addressable entity and migrate accounts/contracts to entities.\n    pub enable_entity: bool,\n}\n\n/// Represents a \"view\" of global state at a particular root hash.\npub struct LmdbGlobalStateView {\n    /// Environment for LMDB.\n    pub(crate) environment: Arc<LmdbEnvironment>,\n    /// Trie store held within LMDB.\n    pub(crate) store: Arc<LmdbTrieStore>,\n    /// Root hash of this \"view\".\n    pub(crate) root_hash: Digest,\n}\n\nimpl LmdbGlobalState {\n    /// Creates an empty state from an existing environment and trie_store.\n    pub fn empty(\n        environment: Arc<LmdbEnvironment>,\n        trie_store: Arc<LmdbTrieStore>,\n        max_query_depth: u64,\n        enable_entity: bool,\n    ) -> Result<Self, GlobalStateError> {\n        let root_hash: Digest = {\n            let (root_hash, root) = compute_empty_root_hash()?;\n            let mut txn = environment.create_read_write_txn()?;\n            trie_store.put(&mut txn, &root_hash, &root)?;\n            txn.commit()?;\n            environment.env().sync(true)?;\n            root_hash\n        };\n        Ok(LmdbGlobalState::new(\n            environment,\n            trie_store,\n            root_hash,\n            max_query_depth,\n            enable_entity,\n        ))\n    }\n\n    /// Creates a state from an existing environment, store, and root_hash.\n    /// Intended to be used for testing.\n    pub fn new(\n        environment: Arc<LmdbEnvironment>,\n        trie_store: Arc<LmdbTrieStore>,\n        empty_root_hash: Digest,\n        max_query_depth: u64,\n        enable_entity: bool,\n    ) -> Self {\n        LmdbGlobalState {\n            environment,\n            trie_store,\n            empty_root_hash,\n            max_query_depth,\n            enable_entity,\n        }\n    }\n\n    /// Creates an in-memory cache for changes written.\n    pub fn create_scratch(&self) -> ScratchGlobalState {\n        ScratchGlobalState::new(\n            Arc::clone(&self.environment),\n            Arc::clone(&self.trie_store),\n            self.empty_root_hash,\n            self.max_query_depth,\n            self.enable_entity,\n        )\n    }\n\n    /// Gets a scratch trie store.\n    pub(crate) fn get_scratch_store(&self) -> ScratchTrieStore {\n        ScratchTrieStore::new(Arc::clone(&self.trie_store), Arc::clone(&self.environment))\n    }\n\n    /// Write stored values to LMDB.\n    pub fn put_stored_values(\n        &self,\n        prestate_hash: Digest,\n        stored_values: Vec<(Key, StoredValue)>,\n    ) -> Result<Digest, GlobalStateError> {\n        let scratch_trie = self.get_scratch_store();\n        let new_state_root = put_stored_values::<_, _, GlobalStateError>(\n            &scratch_trie,\n            &scratch_trie,\n            prestate_hash,\n            stored_values,\n        )?;\n        scratch_trie.write_root_to_db(new_state_root)?;\n        Ok(new_state_root)\n    }\n\n    /// Get a reference to the lmdb global state's environment.\n    #[must_use]\n    pub fn environment(&self) -> &LmdbEnvironment {\n        &self.environment\n    }\n\n    /// Get a reference to the lmdb global state's trie store.\n    #[must_use]\n    pub fn trie_store(&self) -> &LmdbTrieStore {\n        &self.trie_store\n    }\n\n    /// Returns an initial, empty root hash of the underlying trie.\n    pub fn empty_state_root_hash(&self) -> Digest {\n        self.empty_root_hash\n    }\n}\n\nfn compute_empty_root_hash() -> Result<(Digest, Trie<Key, StoredValue>), GlobalStateError> {\n    let (root_hash, root) = create_hashed_empty_trie::<Key, StoredValue>()?;\n    Ok((root_hash, root))\n}\n\nimpl StateReader<Key, StoredValue> for LmdbGlobalStateView {\n    type Error = GlobalStateError;\n\n    fn read(&self, key: &Key) -> Result<Option<StoredValue>, Self::Error> {\n        let txn = self.environment.create_read_txn()?;\n        let ret = match read::<Key, StoredValue, lmdb::RoTransaction, LmdbTrieStore, Self::Error>(\n            &txn,\n            self.store.deref(),\n            &self.root_hash,\n            key,\n        )? {\n            ReadResult::Found(value) => Some(value),\n            ReadResult::NotFound => None,\n            ReadResult::RootNotFound => panic!(\"LmdbGlobalState has invalid root\"),\n        };\n        txn.commit()?;\n        Ok(ret)\n    }\n\n    fn read_with_proof(\n        &self,\n        key: &Key,\n    ) -> Result<Option<TrieMerkleProof<Key, StoredValue>>, Self::Error> {\n        let txn = self.environment.create_read_txn()?;\n        let ret = match read_with_proof::<\n            Key,\n            StoredValue,\n            lmdb::RoTransaction,\n            LmdbTrieStore,\n            Self::Error,\n        >(&txn, self.store.deref(), &self.root_hash, key)?\n        {\n            ReadResult::Found(value) => Some(value),\n            ReadResult::NotFound => None,\n            ReadResult::RootNotFound => panic!(\"LmdbGlobalState has invalid root\"),\n        };\n        txn.commit()?;\n        Ok(ret)\n    }\n\n    fn keys_with_prefix(&self, prefix: &[u8]) -> Result<Vec<Key>, Self::Error> {\n        let txn = self.environment.create_read_txn()?;\n        let keys_iter = keys_with_prefix::<Key, StoredValue, _, _>(\n            &txn,\n            self.store.deref(),\n            &self.root_hash,\n            prefix,\n        );\n        let mut ret = Vec::new();\n        for result in keys_iter {\n            match result {\n                Ok(key) => ret.push(key),\n                Err(error) => return Err(error),\n            }\n        }\n        txn.commit()?;\n        Ok(ret)\n    }\n}\n\nimpl CommitProvider for LmdbGlobalState {\n    fn commit_effects(\n        &self,\n        prestate_hash: Digest,\n        effects: Effects,\n    ) -> Result<Digest, GlobalStateError> {\n        commit::<LmdbEnvironment, LmdbTrieStore, GlobalStateError>(\n            &self.environment,\n            &self.trie_store,\n            prestate_hash,\n            effects,\n        )\n    }\n\n    fn commit_values(\n        &self,\n        prestate_hash: Digest,\n        values_to_write: Vec<(Key, StoredValue)>,\n        keys_to_prune: std::collections::BTreeSet<Key>,\n    ) -> Result<Digest, GlobalStateError> {\n        let post_write_hash = put_stored_values::<LmdbEnvironment, LmdbTrieStore, GlobalStateError>(\n            &self.environment,\n            &self.trie_store,\n            prestate_hash,\n            values_to_write,\n        )?;\n\n        let mut txn = self.environment.create_read_write_txn()?;\n\n        let maybe_root: Option<Trie<Key, StoredValue>> =\n            self.trie_store.get(&txn, &post_write_hash)?;\n\n        if maybe_root.is_none() {\n            return Err(CommitError::RootNotFound(post_write_hash).into());\n        };\n\n        let mut state_hash = post_write_hash;\n\n        for key in keys_to_prune.into_iter() {\n            let prune_result = prune::<Key, StoredValue, _, LmdbTrieStore, GlobalStateError>(\n                &mut txn,\n                &self.trie_store,\n                &state_hash,\n                &key,\n            )?;\n\n            match prune_result {\n                TriePruneResult::Pruned(root_hash) => {\n                    state_hash = root_hash;\n                }\n                TriePruneResult::MissingKey => {\n                    warn!(\"commit: pruning attempt failed for {}\", key);\n                }\n                TriePruneResult::RootNotFound => {\n                    error!(?state_hash, ?key, \"commit: root not found\");\n                    return Err(CommitError::WriteRootNotFound(state_hash).into());\n                }\n                TriePruneResult::Failure(gse) => {\n                    return Err(gse);\n                }\n            }\n        }\n\n        txn.commit()?;\n\n        Ok(state_hash)\n    }\n}\n\nimpl StateProvider for LmdbGlobalState {\n    type Reader = LmdbGlobalStateView;\n\n    fn flush(&self, _: FlushRequest) -> FlushResult {\n        if self.environment.is_manual_sync_enabled() {\n            match self.environment.sync() {\n                Ok(_) => FlushResult::Success,\n                Err(err) => FlushResult::Failure(err.into()),\n            }\n        } else {\n            FlushResult::ManualSyncDisabled\n        }\n    }\n\n    fn checkout(&self, state_hash: Digest) -> Result<Option<Self::Reader>, GlobalStateError> {\n        let txn = self.environment.create_read_txn()?;\n        let maybe_root: Option<Trie<Key, StoredValue>> = self.trie_store.get(&txn, &state_hash)?;\n        let maybe_state = maybe_root.map(|_| LmdbGlobalStateView {\n            environment: Arc::clone(&self.environment),\n            store: Arc::clone(&self.trie_store),\n            root_hash: state_hash,\n        });\n        txn.commit()?;\n        Ok(maybe_state)\n    }\n\n    fn tracking_copy(\n        &self,\n        hash: Digest,\n    ) -> Result<Option<TrackingCopy<Self::Reader>>, GlobalStateError> {\n        match self.checkout(hash)? {\n            Some(reader) => Ok(Some(TrackingCopy::new(\n                reader,\n                self.max_query_depth,\n                self.enable_entity,\n            ))),\n            None => Ok(None),\n        }\n    }\n\n    fn empty_root(&self) -> Digest {\n        self.empty_root_hash\n    }\n\n    fn trie(&self, request: TrieRequest) -> TrieResult {\n        let key = request.trie_key();\n        let txn = match self.environment.create_read_txn() {\n            Ok(ro) => ro,\n            Err(err) => return TrieResult::Failure(err.into()),\n        };\n        let raw = match Store::<Digest, Trie<Digest, StoredValue>>::get_raw(\n            &*self.trie_store,\n            &txn,\n            &key,\n        ) {\n            Ok(Some(bytes)) => TrieRaw::new(bytes),\n            Ok(None) => {\n                return TrieResult::ValueNotFound(key.to_string());\n            }\n            Err(err) => {\n                return TrieResult::Failure(err);\n            }\n        };\n        match txn.commit() {\n            Ok(_) => match request.chunk_id() {\n                Some(chunk_id) => TrieResult::Success {\n                    element: TrieElement::Chunked(raw, chunk_id),\n                },\n                None => TrieResult::Success {\n                    element: TrieElement::Raw(raw),\n                },\n            },\n            Err(err) => TrieResult::Failure(err.into()),\n        }\n    }\n\n    /// Persists a trie element.\n    fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult {\n        // We only allow bottom-up persistence of trie elements.\n        // Thus we do not persist the element unless we already have all of its descendants\n        // persisted. It is safer to throw away the element and rely on a follow up attempt\n        // to reacquire it later than to allow it to be persisted which would allow runtime\n        // access to acquire a root hash that is missing one or more children which will\n        // result in undefined behavior if a process attempts to access elements below that\n        // root which are not held locally.\n        let bytes = request.raw().inner();\n        match self.missing_children(bytes) {\n            Ok(missing_children) => {\n                if !missing_children.is_empty() {\n                    let hash = Digest::hash_into_chunks_if_necessary(bytes);\n                    return PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren(\n                        hash,\n                        request.take_raw(),\n                        missing_children,\n                    ));\n                }\n            }\n            Err(err) => return PutTrieResult::Failure(err),\n        };\n\n        match self.environment.create_read_write_txn() {\n            Ok(mut txn) => {\n                match put_trie::<Key, StoredValue, RwTransaction, LmdbTrieStore, GlobalStateError>(\n                    &mut txn,\n                    &self.trie_store,\n                    bytes,\n                ) {\n                    Ok(hash) => match txn.commit() {\n                        Ok(_) => PutTrieResult::Success { hash },\n                        Err(err) => PutTrieResult::Failure(err.into()),\n                    },\n                    Err(err) => PutTrieResult::Failure(err),\n                }\n            }\n            Err(err) => PutTrieResult::Failure(err.into()),\n        }\n    }\n\n    /// Finds all of the keys of missing directly descendant `Trie<K,V>` values.\n    fn missing_children(&self, trie_raw: &[u8]) -> Result<Vec<Digest>, GlobalStateError> {\n        let txn = self.environment.create_read_txn()?;\n        let missing_hashes = missing_children::<\n            Key,\n            StoredValue,\n            lmdb::RoTransaction,\n            LmdbTrieStore,\n            GlobalStateError,\n        >(&txn, self.trie_store.deref(), trie_raw)?;\n        txn.commit()?;\n        Ok(missing_hashes)\n    }\n\n    fn enable_entity(&self) -> bool {\n        self.enable_entity\n    }\n}\n\nimpl ScratchProvider for DataAccessLayer<LmdbGlobalState> {\n    /// Provide a local cached-only version of engine-state.\n    fn get_scratch_global_state(&self) -> ScratchGlobalState {\n        self.state().create_scratch()\n    }\n\n    /// Writes state cached in an `EngineState<ScratchEngineState>` to LMDB.\n    fn write_scratch_to_db(\n        &self,\n        state_root_hash: Digest,\n        scratch_global_state: ScratchGlobalState,\n    ) -> Result<Digest, GlobalStateError> {\n        let (stored_values, keys_to_prune) = scratch_global_state.into_inner();\n        let post_state_hash = self\n            .state()\n            .put_stored_values(state_root_hash, stored_values)?;\n        if keys_to_prune.is_empty() {\n            return Ok(post_state_hash);\n        }\n        let prune_keys = keys_to_prune.iter().cloned().collect_vec();\n        match self.prune_keys(post_state_hash, &prune_keys) {\n            TriePruneResult::Pruned(post_state_hash) => Ok(post_state_hash),\n            TriePruneResult::MissingKey => Err(GlobalStateError::FailedToPrune(prune_keys)),\n            TriePruneResult::RootNotFound => Err(GlobalStateError::RootNotFound),\n            TriePruneResult::Failure(gse) => Err(gse),\n        }\n    }\n\n    /// Prune keys.\n    fn prune_keys(&self, mut state_root_hash: Digest, keys: &[Key]) -> TriePruneResult {\n        let scratch_trie_store = self.state().get_scratch_store();\n\n        let mut txn = match scratch_trie_store.create_read_write_txn() {\n            Ok(scratch) => scratch,\n            Err(gse) => return TriePruneResult::Failure(gse),\n        };\n\n        for key in keys {\n            let prune_results = prune::<Key, StoredValue, _, _, GlobalStateError>(\n                &mut txn,\n                &scratch_trie_store,\n                &state_root_hash,\n                key,\n            );\n            match prune_results {\n                Ok(TriePruneResult::Pruned(new_root)) => {\n                    state_root_hash = new_root;\n                }\n                Ok(TriePruneResult::MissingKey) => continue, // idempotent outcome\n                Ok(other) => return other,\n                Err(gse) => return TriePruneResult::Failure(gse),\n            }\n        }\n\n        if let Err(gse) = txn.commit() {\n            return TriePruneResult::Failure(gse);\n        }\n\n        if let Err(gse) = scratch_trie_store.write_root_to_db(state_root_hash) {\n            TriePruneResult::Failure(gse)\n        } else {\n            TriePruneResult::Pruned(state_root_hash)\n        }\n    }\n}\n\n/// Creates prepopulated LMDB global state instance that stores data in a temporary directory. As\n/// soon as the `TempDir` instance is dropped all the data stored will be removed from the disk as\n/// well.\npub fn make_temporary_global_state(\n    initial_data: impl IntoIterator<Item = (Key, StoredValue)>,\n) -> (LmdbGlobalState, Digest, TempDir) {\n    let tempdir = tempfile::tempdir().expect(\"should create tempdir\");\n\n    let lmdb_global_state = {\n        let lmdb_environment = LmdbEnvironment::new(\n            tempdir.path(),\n            DEFAULT_MAX_DB_SIZE,\n            DEFAULT_MAX_READERS,\n            false,\n        )\n        .expect(\"should create lmdb environment\");\n        let lmdb_trie_store = LmdbTrieStore::new(&lmdb_environment, None, DatabaseFlags::default())\n            .expect(\"should create lmdb trie store\");\n        LmdbGlobalState::empty(\n            Arc::new(lmdb_environment),\n            Arc::new(lmdb_trie_store),\n            DEFAULT_MAX_QUERY_DEPTH,\n            DEFAULT_ENABLE_ENTITY,\n        )\n        .expect(\"should create lmdb global state\")\n    };\n\n    let mut root_hash = lmdb_global_state.empty_root_hash;\n\n    let mut effects = Effects::new();\n\n    for (key, stored_value) in initial_data {\n        let transform = TransformV2::new(key.normalize(), TransformKindV2::Write(stored_value));\n        effects.push(transform);\n    }\n\n    root_hash = lmdb_global_state\n        .commit_effects(root_hash, effects)\n        .expect(\"Creation of account should be a success.\");\n\n    (lmdb_global_state, root_hash, tempdir)\n}\n\n#[cfg(test)]\nmod tests {\n    use casper_types::{account::AccountHash, execution::TransformKindV2, CLValue, Digest};\n\n    use crate::global_state::state::scratch::tests::TestPair;\n\n    use super::*;\n\n    fn create_test_pairs() -> Vec<(Key, StoredValue)> {\n        vec![\n            (\n                Key::Account(AccountHash::new([1_u8; 32])),\n                StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n            ),\n            (\n                Key::Account(AccountHash::new([2_u8; 32])),\n                StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()),\n            ),\n        ]\n    }\n\n    fn create_test_pairs_updated() -> [TestPair; 3] {\n        [\n            TestPair {\n                key: Key::Account(AccountHash::new([1u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(\"one\".to_string()).unwrap()),\n            },\n            TestPair {\n                key: Key::Account(AccountHash::new([2u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(\"two\".to_string()).unwrap()),\n            },\n            TestPair {\n                key: Key::Account(AccountHash::new([3u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()),\n            },\n        ]\n    }\n\n    #[test]\n    fn reads_from_a_checkout_return_expected_values() {\n        let test_pairs = create_test_pairs();\n        let (state, root_hash, _tempdir) = make_temporary_global_state(test_pairs.clone());\n        let checkout = state.checkout(root_hash).unwrap().unwrap();\n        for (key, value) in test_pairs {\n            assert_eq!(Some(value), checkout.read(&key).unwrap());\n        }\n    }\n\n    #[test]\n    fn checkout_fails_if_unknown_hash_is_given() {\n        let (state, _, _tempdir) = make_temporary_global_state(create_test_pairs());\n        let fake_hash: Digest = Digest::hash([1u8; 32]);\n        let result = state.checkout(fake_hash).unwrap();\n        assert!(result.is_none());\n    }\n\n    #[test]\n    fn commit_updates_state() {\n        let test_pairs_updated = create_test_pairs_updated();\n\n        let (state, root_hash, _tempdir) = make_temporary_global_state(create_test_pairs());\n\n        let effects = {\n            let mut tmp = Effects::new();\n            for TestPair { key, value } in &test_pairs_updated {\n                let transform = TransformV2::new(*key, TransformKindV2::Write(value.clone()));\n                tmp.push(transform);\n            }\n            tmp\n        };\n\n        let updated_hash = state.commit_effects(root_hash, effects).unwrap();\n\n        let updated_checkout = state.checkout(updated_hash).unwrap().unwrap();\n\n        for TestPair { key, value } in test_pairs_updated.iter().cloned() {\n            assert_eq!(Some(value), updated_checkout.read(&key).unwrap());\n        }\n    }\n\n    #[test]\n    fn commit_updates_state_and_original_state_stays_intact() {\n        let test_pairs_updated = create_test_pairs_updated();\n\n        let (state, root_hash, _tempdir) = make_temporary_global_state(create_test_pairs());\n\n        let effects = {\n            let mut tmp = Effects::new();\n            for TestPair { key, value } in &test_pairs_updated {\n                let transform = TransformV2::new(*key, TransformKindV2::Write(value.clone()));\n                tmp.push(transform);\n            }\n            tmp\n        };\n\n        let updated_hash = state.commit_effects(root_hash, effects).unwrap();\n\n        let updated_checkout = state.checkout(updated_hash).unwrap().unwrap();\n        for TestPair { key, value } in test_pairs_updated.iter().cloned() {\n            assert_eq!(Some(value), updated_checkout.read(&key).unwrap());\n        }\n\n        let original_checkout = state.checkout(root_hash).unwrap().unwrap();\n        for (key, value) in create_test_pairs().iter().cloned() {\n            assert_eq!(Some(value), original_checkout.read(&key).unwrap());\n        }\n        assert_eq!(\n            None,\n            original_checkout.read(&test_pairs_updated[2].key).unwrap()\n        );\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/state/mod.rs",
    "content": "//! Global state.\n\n/// Lmdb implementation of global state.\npub mod lmdb;\n\n/// Lmdb implementation of global state with cache.\npub mod scratch;\n\nuse num_rational::Ratio;\nuse parking_lot::RwLock;\nuse std::{\n    cell::RefCell,\n    collections::{BTreeMap, BTreeSet},\n    convert::TryFrom,\n    rc::Rc,\n    sync::Arc,\n};\n\nuse tracing::{debug, error, info, warn};\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{self, Bytes, ToBytes},\n    contracts::NamedKeys,\n    execution::{Effects, TransformError, TransformInstruction, TransformKindV2, TransformV2},\n    global_state::TrieMerkleProof,\n    system::{\n        self,\n        auction::{\n            SeigniorageRecipientsSnapshot, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY,\n        },\n        mint::{\n            BalanceHoldAddr, BalanceHoldAddrTag, ARG_AMOUNT, MINT_SUSTAIN_PURSE_KEY,\n            ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY,\n        },\n        AUCTION, HANDLE_PAYMENT, MINT,\n    },\n    Account, AddressableEntity, BlockGlobalAddr, CLValue, Digest, EntityAddr, EntityEntryPoint,\n    EntryPointAddr, EntryPointValue, HoldsEpoch, Key, KeyTag, Phase, PublicKey, RuntimeArgs,\n    StoredValue, SystemHashRegistry, REWARDS_HANDLING_RATIO_TAG, U512,\n};\n\n#[cfg(test)]\npub use self::lmdb::make_temporary_global_state;\n\nuse super::trie_store::{operations::batch_write, TrieStoreCacheError};\nuse crate::{\n    data_access_layer::{\n        auction::{AuctionMethodRet, BiddingRequest, BiddingResult},\n        balance::BalanceHandling,\n        era_validators::EraValidatorsResult,\n        handle_fee::{HandleFeeMode, HandleFeeRequest, HandleFeeResult},\n        mint::{\n            BurnRequest, BurnRequestArgs, BurnResult, TransferRequest, TransferRequestArgs,\n            TransferResult,\n        },\n        prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult},\n        tagged_values::{TaggedValuesRequest, TaggedValuesResult},\n        AddressableEntityRequest, AddressableEntityResult, AuctionMethod, BalanceHoldError,\n        BalanceHoldKind, BalanceHoldMode, BalanceHoldRequest, BalanceHoldResult, BalanceIdentifier,\n        BalanceIdentifierPurseRequest, BalanceIdentifierPurseResult, BalanceRequest, BalanceResult,\n        BidsRequest, BidsResult, BlockGlobalKind, BlockGlobalRequest, BlockGlobalResult,\n        BlockRewardsError, BlockRewardsRequest, BlockRewardsResult, ContractRequest,\n        ContractResult, EntryPointExistsRequest, EntryPointExistsResult, EntryPointRequest,\n        EntryPointResult, EraValidatorsRequest, ExecutionResultsChecksumRequest,\n        ExecutionResultsChecksumResult, FeeError, FeeRequest, FeeResult, FlushRequest, FlushResult,\n        GenesisRequest, GenesisResult, HandleRefundMode, HandleRefundRequest, HandleRefundResult,\n        InsufficientBalanceHandling, MessageTopicsRequest, MessageTopicsResult, ProofHandling,\n        ProofsResult, ProtocolUpgradeRequest, ProtocolUpgradeResult, PruneRequest, PruneResult,\n        PutTrieRequest, PutTrieResult, QueryRequest, QueryResult, RoundSeigniorageRateRequest,\n        RoundSeigniorageRateResult, SeigniorageRecipientsRequest, SeigniorageRecipientsResult,\n        StepError, StepRequest, StepResult, SystemEntityRegistryPayload,\n        SystemEntityRegistryRequest, SystemEntityRegistryResult, SystemEntityRegistrySelector,\n        TotalSupplyRequest, TotalSupplyResult, TrieRequest, TrieResult,\n        EXECUTION_RESULTS_CHECKSUM_NAME,\n    },\n    global_state::{\n        error::Error as GlobalStateError,\n        state::scratch::ScratchGlobalState,\n        transaction_source::{Transaction, TransactionSource},\n        trie::Trie,\n        trie_store::{\n            operations::{prune, read, write, ReadResult, TriePruneResult, WriteResult},\n            TrieStore,\n        },\n    },\n    system::{\n        auction::{self, Auction},\n        burn::{BurnError, BurnRuntimeArgsBuilder},\n        genesis::{GenesisError, GenesisInstaller},\n        handle_payment::HandlePayment,\n        mint::Mint,\n        protocol_upgrade::{ProtocolUpgradeError, ProtocolUpgrader},\n        runtime_native::{Id, RuntimeNative},\n        transfer::{TransferArgs, TransferError, TransferRuntimeArgsBuilder, TransferTargetMode},\n    },\n    tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt},\n    AddressGenerator,\n};\n\n/// A trait expressing the reading of state. This trait is used to abstract the underlying store.\npub trait StateReader<K = Key, V = StoredValue>: Sized + Send + Sync {\n    /// An error which occurs when reading state\n    type Error;\n\n    /// Returns the state value from the corresponding key\n    fn read(&self, key: &K) -> Result<Option<V>, Self::Error>;\n\n    /// Returns the merkle proof of the state value from the corresponding key\n    fn read_with_proof(&self, key: &K) -> Result<Option<TrieMerkleProof<K, V>>, Self::Error>;\n\n    /// Returns the keys in the trie matching `prefix`.\n    fn keys_with_prefix(&self, prefix: &[u8]) -> Result<Vec<K>, Self::Error>;\n}\n\n/// An error emitted by the execution engine on commit\n#[derive(Clone, Debug, thiserror::Error, Eq, PartialEq)]\npub enum CommitError {\n    /// Root not found.\n    #[error(\"Root not found: {0:?}\")]\n    RootNotFound(Digest),\n    /// Root not found while attempting to read.\n    #[error(\"Root not found while attempting to read: {0:?}\")]\n    ReadRootNotFound(Digest),\n    /// Root not found while attempting to write.\n    #[error(\"Root not found while writing: {0:?}\")]\n    WriteRootNotFound(Digest),\n    /// Key not found.\n    #[error(\"Key not found: {0}\")]\n    KeyNotFound(Key),\n    /// Transform error.\n    #[error(transparent)]\n    TransformError(TransformError),\n    /// Trie not found while attempting to validate cache write.\n    #[error(\"Trie not found in cache {0}\")]\n    TrieNotFoundInCache(Digest),\n}\n\n/// Scratch provider.\npub trait ScratchProvider: CommitProvider {\n    /// Get scratch state to db.\n    fn get_scratch_global_state(&self) -> ScratchGlobalState;\n    /// Write scratch state to db.\n    fn write_scratch_to_db(\n        &self,\n        state_root_hash: Digest,\n        scratch_global_state: ScratchGlobalState,\n    ) -> Result<Digest, GlobalStateError>;\n    /// Prune items for imputed keys.\n    fn prune_keys(&self, state_root_hash: Digest, keys: &[Key]) -> TriePruneResult;\n}\n\n/// Provides `commit` method.\npub trait CommitProvider: StateProvider {\n    /// Applies changes and returns a new post state hash.\n    /// block_hash is used for computing a deterministic and unique keys.\n    fn commit_effects(\n        &self,\n        state_hash: Digest,\n        effects: Effects,\n    ) -> Result<Digest, GlobalStateError>;\n\n    /// Commit values to global state.\n    fn commit_values(\n        &self,\n        state_hash: Digest,\n        values_to_write: Vec<(Key, StoredValue)>,\n        keys_to_prune: BTreeSet<Key>,\n    ) -> Result<Digest, GlobalStateError>;\n\n    /// Runs and commits the genesis process, once per network.\n    fn genesis(&self, request: GenesisRequest) -> GenesisResult {\n        let initial_root = self.empty_root();\n        let tc = match self.tracking_copy(initial_root) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return GenesisResult::Fatal(\"state uninitialized\".to_string()),\n            Err(err) => {\n                return GenesisResult::Failure(GenesisError::TrackingCopy(\n                    TrackingCopyError::Storage(err),\n                ));\n            }\n        };\n        let chainspec_hash = request.chainspec_hash();\n        let protocol_version = request.protocol_version();\n        let config = request.config();\n\n        let mut genesis_installer: GenesisInstaller<Self> =\n            GenesisInstaller::new(chainspec_hash, protocol_version, config.clone(), tc);\n\n        let chainspec_registry = request.chainspec_registry();\n        if let Err(gen_err) = genesis_installer.install(chainspec_registry.clone()) {\n            return GenesisResult::Failure(*gen_err);\n        }\n\n        let effects = genesis_installer.finalize();\n        match self.commit_effects(initial_root, effects.clone()) {\n            Ok(post_state_hash) => GenesisResult::Success {\n                post_state_hash,\n                effects,\n            },\n            Err(err) => {\n                GenesisResult::Failure(GenesisError::TrackingCopy(TrackingCopyError::Storage(err)))\n            }\n        }\n    }\n\n    /// Runs and commits the protocol upgrade process.\n    fn protocol_upgrade(&self, request: ProtocolUpgradeRequest) -> ProtocolUpgradeResult {\n        let pre_state_hash = request.pre_state_hash();\n        let tc = match self.tracking_copy(pre_state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return ProtocolUpgradeResult::RootNotFound,\n            Err(err) => {\n                return ProtocolUpgradeResult::Failure(ProtocolUpgradeError::TrackingCopy(\n                    TrackingCopyError::Storage(err),\n                ));\n            }\n        };\n\n        let protocol_upgrader: ProtocolUpgrader<Self> =\n            ProtocolUpgrader::new(request.config().clone(), pre_state_hash, tc);\n\n        let post_upgrade_tc = match protocol_upgrader.upgrade(pre_state_hash) {\n            Err(e) => return e.into(),\n            Ok(tc) => tc,\n        };\n\n        let (writes, prunes, effects) = post_upgrade_tc.destructure();\n\n        // commit\n        match self.commit_values(pre_state_hash, writes, prunes) {\n            Ok(post_state_hash) => ProtocolUpgradeResult::Success {\n                post_state_hash,\n                effects,\n            },\n            Err(err) => ProtocolUpgradeResult::Failure(ProtocolUpgradeError::TrackingCopy(\n                TrackingCopyError::Storage(err),\n            )),\n        }\n    }\n\n    /// Safely prune specified keys from global state, using a tracking copy.\n    fn prune(&self, request: PruneRequest) -> PruneResult {\n        let pre_state_hash = request.state_hash();\n        let tc = match self.tracking_copy(pre_state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return PruneResult::RootNotFound,\n            Err(err) => return PruneResult::Failure(TrackingCopyError::Storage(err)),\n        };\n\n        let keys_to_delete = request.keys_to_prune();\n        if keys_to_delete.is_empty() {\n            // effectively a noop\n            return PruneResult::Success {\n                post_state_hash: pre_state_hash,\n                effects: Effects::default(),\n            };\n        }\n\n        for key in keys_to_delete {\n            tc.borrow_mut().prune(*key)\n        }\n\n        let effects = tc.borrow().effects();\n\n        match self.commit_effects(pre_state_hash, effects.clone()) {\n            Ok(post_state_hash) => PruneResult::Success {\n                post_state_hash,\n                effects,\n            },\n            Err(tce) => PruneResult::Failure(tce.into()),\n        }\n    }\n\n    /// Step auction state at era end.\n    fn step(&self, request: StepRequest) -> StepResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return StepResult::RootNotFound,\n            Err(err) => {\n                return StepResult::Failure(StepError::TrackingCopy(TrackingCopyError::Storage(\n                    err,\n                )));\n            }\n        };\n        let protocol_version = request.protocol_version();\n\n        let seed = {\n            // seeds address generator w/ era_end_timestamp_millis\n            let mut bytes = match request.era_end_timestamp_millis().into_bytes() {\n                Ok(bytes) => bytes,\n                Err(bre) => {\n                    return StepResult::Failure(StepError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(bre),\n                    ));\n                }\n            };\n            match &mut protocol_version.into_bytes() {\n                Ok(next) => bytes.append(next),\n                Err(bre) => {\n                    return StepResult::Failure(StepError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(*bre),\n                    ));\n                }\n            };\n            match &mut request.next_era_id().into_bytes() {\n                Ok(next) => bytes.append(next),\n                Err(bre) => {\n                    return StepResult::Failure(StepError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(*bre),\n                    ));\n                }\n            };\n\n            Id::Seed(bytes)\n        };\n\n        let config = request.config();\n        // this runtime uses the system's context\n        let phase = Phase::Session;\n        let address_generator = AddressGenerator::new(&seed.seed(), phase);\n        let mut runtime = match RuntimeNative::new_system_runtime(\n            config.clone(),\n            protocol_version,\n            seed,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            phase,\n        ) {\n            Ok(rt) => rt,\n            Err(tce) => return StepResult::Failure(StepError::TrackingCopy(tce)),\n        };\n\n        let slashed_validators: Vec<PublicKey> = request.slashed_validators();\n        if !slashed_validators.is_empty() {\n            if let Err(err) = runtime.slash(slashed_validators) {\n                error!(\"{}\", err);\n                return StepResult::Failure(StepError::SlashingError);\n            }\n        }\n\n        let era_end_timestamp_millis = request.era_end_timestamp_millis();\n        let evicted_validators = request\n            .evict_items()\n            .iter()\n            .map(|item| item.validator_id.clone())\n            .collect::<Vec<PublicKey>>();\n        let max_delegators_per_validator = config.max_delegators_per_validator();\n        let include_credits = config.include_credits();\n        let credit_cap = config.credit_cap();\n        let minimum_bid_amount = config.minimum_bid_amount();\n\n        if let Err(err) = runtime.run_auction(\n            era_end_timestamp_millis,\n            evicted_validators,\n            max_delegators_per_validator,\n            include_credits,\n            credit_cap,\n            minimum_bid_amount,\n        ) {\n            error!(\"{}\", err);\n            return StepResult::Failure(StepError::Auction);\n        }\n\n        let effects = tc.borrow().effects();\n\n        match self.commit_effects(state_hash, effects.clone()) {\n            Ok(post_state_hash) => StepResult::Success {\n                post_state_hash,\n                effects,\n            },\n            Err(gse) => StepResult::Failure(gse.into()),\n        }\n    }\n\n    /// Distribute block rewards.\n    fn distribute_block_rewards(&self, request: BlockRewardsRequest) -> BlockRewardsResult {\n        let state_hash = request.state_hash();\n        let rewards = request.rewards();\n        if rewards.is_empty() {\n            info!(\"rewards are empty\");\n            // if there are no rewards to distribute, this is effectively a noop\n            return BlockRewardsResult::Success {\n                post_state_hash: state_hash,\n                effects: Effects::new(),\n            };\n        }\n\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return BlockRewardsResult::RootNotFound,\n            Err(err) => {\n                return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy(\n                    TrackingCopyError::Storage(err),\n                ));\n            }\n        };\n\n        let config = request.config();\n        let protocol_version = request.protocol_version();\n        let seed = {\n            let mut bytes = match request.block_time().into_bytes() {\n                Ok(bytes) => bytes,\n                Err(bre) => {\n                    return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(bre),\n                    ));\n                }\n            };\n            match &mut protocol_version.into_bytes() {\n                Ok(next) => bytes.append(next),\n                Err(bre) => {\n                    return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(*bre),\n                    ));\n                }\n            };\n\n            Id::Seed(bytes)\n        };\n\n        // this runtime uses the system's context\n        let phase = Phase::Session;\n        let address_generator = AddressGenerator::new(&seed.seed(), phase);\n\n        let mut runtime = match RuntimeNative::new_system_runtime(\n            config.clone(),\n            protocol_version,\n            seed,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            phase,\n        ) {\n            Ok(rt) => rt,\n            Err(tce) => {\n                return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy(tce));\n            }\n        };\n\n        let rewards_handling = request.config().rewards_handling();\n        let sustain_purse = match runtime\n            .runtime_footprint()\n            .named_keys()\n            .get(MINT_SUSTAIN_PURSE_KEY)\n        {\n            Some(Key::URef(uref)) => Some(*uref),\n            Some(_) | None => None,\n        };\n\n        if let Err(auction_error) =\n            runtime.distribute(rewards.clone(), sustain_purse, rewards_handling)\n        {\n            error!(\n                \"distribute block rewards failed due to auction error {:?}\",\n                auction_error\n            );\n            return BlockRewardsResult::Failure(BlockRewardsError::Auction(auction_error));\n        } else {\n            debug!(\"rewards distribution complete\");\n        }\n\n        let effects = tc.borrow().effects();\n\n        match self.commit_effects(state_hash, effects.clone()) {\n            Ok(post_state_hash) => {\n                debug!(\"reward distribution committed\");\n                BlockRewardsResult::Success {\n                    post_state_hash,\n                    effects,\n                }\n            }\n            Err(gse) => BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy(\n                TrackingCopyError::Storage(gse),\n            )),\n        }\n    }\n\n    /// Distribute fees, if relevant to the chainspec configured behavior.\n    fn distribute_fees(&self, request: FeeRequest) -> FeeResult {\n        let state_hash = request.state_hash();\n        if !request.should_distribute_fees() {\n            // effectively noop\n            return FeeResult::Success {\n                post_state_hash: state_hash,\n                effects: Effects::new(),\n                transfers: vec![],\n            };\n        }\n\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)),\n            Ok(None) => return FeeResult::RootNotFound,\n            Err(gse) => {\n                return FeeResult::Failure(FeeError::TrackingCopy(TrackingCopyError::Storage(gse)));\n            }\n        };\n\n        let config = request.config();\n        let protocol_version = request.protocol_version();\n        let seed = {\n            let mut bytes = match request.block_time().into_bytes() {\n                Ok(bytes) => bytes,\n                Err(bre) => {\n                    return FeeResult::Failure(FeeError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(bre),\n                    ));\n                }\n            };\n            match &mut protocol_version.into_bytes() {\n                Ok(next) => bytes.append(next),\n                Err(bre) => {\n                    return FeeResult::Failure(FeeError::TrackingCopy(\n                        TrackingCopyError::BytesRepr(*bre),\n                    ));\n                }\n            };\n\n            Id::Seed(bytes)\n        };\n\n        // this runtime uses the system's context\n        let phase = Phase::System;\n        let address_generator = AddressGenerator::new(&seed.seed(), phase);\n        let mut runtime = match RuntimeNative::new_system_runtime(\n            config.clone(),\n            protocol_version,\n            seed,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            phase,\n        ) {\n            Ok(rt) => rt,\n            Err(tce) => {\n                return FeeResult::Failure(FeeError::TrackingCopy(tce));\n            }\n        };\n\n        let source = BalanceIdentifier::Accumulate;\n        let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n            Ok(value) => value,\n            Err(tce) => return FeeResult::Failure(FeeError::TrackingCopy(tce)),\n        };\n        // amount = None will distribute the full current balance of the accumulation purse\n        let result = runtime.distribute_accumulated_fees(source_purse, None);\n\n        match result {\n            Ok(_) => {\n                let effects = tc.borrow_mut().effects();\n                let transfers = runtime.into_transfers();\n                let post_state_hash = match self.commit_effects(state_hash, effects.clone()) {\n                    Ok(post_state_hash) => post_state_hash,\n                    Err(gse) => {\n                        return FeeResult::Failure(FeeError::TrackingCopy(\n                            TrackingCopyError::Storage(gse),\n                        ));\n                    }\n                };\n                FeeResult::Success {\n                    effects,\n                    transfers,\n                    post_state_hash,\n                }\n            }\n            Err(hpe) => FeeResult::Failure(FeeError::TrackingCopy(\n                TrackingCopyError::SystemContract(system::Error::HandlePayment(hpe)),\n            )),\n        }\n    }\n\n    /// Gets block global data.\n    fn block_global(&self, request: BlockGlobalRequest) -> BlockGlobalResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)),\n            Ok(None) => return BlockGlobalResult::RootNotFound,\n            Err(gse) => return BlockGlobalResult::Failure(TrackingCopyError::Storage(gse)),\n        };\n\n        // match request\n        match request.block_global_kind() {\n            BlockGlobalKind::BlockTime(block_time) => {\n                let cl_value =\n                    match CLValue::from_t(block_time.value()).map_err(TrackingCopyError::CLValue) {\n                        Ok(cl_value) => cl_value,\n                        Err(tce) => {\n                            return BlockGlobalResult::Failure(tce);\n                        }\n                    };\n                tc.borrow_mut().write(\n                    Key::BlockGlobal(BlockGlobalAddr::BlockTime),\n                    StoredValue::CLValue(cl_value),\n                );\n            }\n            BlockGlobalKind::MessageCount(count) => {\n                let cl_value = match CLValue::from_t(count).map_err(TrackingCopyError::CLValue) {\n                    Ok(cl_value) => cl_value,\n                    Err(tce) => {\n                        return BlockGlobalResult::Failure(tce);\n                    }\n                };\n                tc.borrow_mut().write(\n                    Key::BlockGlobal(BlockGlobalAddr::MessageCount),\n                    StoredValue::CLValue(cl_value),\n                );\n            }\n            BlockGlobalKind::ProtocolVersion(protocol_version) => {\n                let cl_value = match CLValue::from_t(protocol_version.destructure())\n                    .map_err(TrackingCopyError::CLValue)\n                {\n                    Ok(cl_value) => cl_value,\n                    Err(tce) => {\n                        return BlockGlobalResult::Failure(tce);\n                    }\n                };\n                tc.borrow_mut().write(\n                    Key::BlockGlobal(BlockGlobalAddr::ProtocolVersion),\n                    StoredValue::CLValue(cl_value),\n                );\n            }\n            BlockGlobalKind::AddressableEntity(addressable_entity) => {\n                let cl_value =\n                    match CLValue::from_t(addressable_entity).map_err(TrackingCopyError::CLValue) {\n                        Ok(cl_value) => cl_value,\n                        Err(tce) => {\n                            return BlockGlobalResult::Failure(tce);\n                        }\n                    };\n                tc.borrow_mut().write(\n                    Key::BlockGlobal(BlockGlobalAddr::AddressableEntity),\n                    StoredValue::CLValue(cl_value),\n                );\n            }\n        }\n\n        let effects = tc.borrow_mut().effects();\n\n        let post_state_hash = match self.commit_effects(state_hash, effects.clone()) {\n            Ok(post_state_hash) => post_state_hash,\n            Err(gse) => return BlockGlobalResult::Failure(TrackingCopyError::Storage(gse)),\n        };\n\n        BlockGlobalResult::Success {\n            post_state_hash,\n            effects: Box::new(effects),\n        }\n    }\n}\n\n/// A trait expressing operations over the trie.\npub trait StateProvider: Send + Sync + Sized {\n    /// Associated reader type for `StateProvider`.\n    type Reader: StateReader<Key, StoredValue, Error = GlobalStateError>;\n\n    /// Flush the state provider.\n    fn flush(&self, request: FlushRequest) -> FlushResult;\n\n    /// Returns an empty root hash.\n    fn empty_root(&self) -> Digest;\n\n    /// Get a tracking copy.\n    fn tracking_copy(\n        &self,\n        state_hash: Digest,\n    ) -> Result<Option<TrackingCopy<Self::Reader>>, GlobalStateError>;\n\n    /// Checkouts a slice of initial state using root state hash.\n    fn checkout(&self, state_hash: Digest) -> Result<Option<Self::Reader>, GlobalStateError>;\n\n    /// Query state.\n    fn query(&self, request: QueryRequest) -> QueryResult {\n        match self.tracking_copy(request.state_hash()) {\n            Ok(Some(tc)) => match tc.query(request.key(), request.path()) {\n                Ok(ret) => ret.into(),\n                Err(err) => QueryResult::Failure(err),\n            },\n            Ok(None) => QueryResult::RootNotFound,\n            Err(err) => QueryResult::Failure(TrackingCopyError::Storage(err)),\n        }\n    }\n\n    /// Message topics request.\n    fn message_topics(&self, message_topics_request: MessageTopicsRequest) -> MessageTopicsResult {\n        let tc = match self.tracking_copy(message_topics_request.state_hash()) {\n            Ok(Some(tracking_copy)) => tracking_copy,\n            Ok(None) => return MessageTopicsResult::RootNotFound,\n            Err(err) => return MessageTopicsResult::Failure(err.into()),\n        };\n\n        match tc.get_message_topics(message_topics_request.entity_addr()) {\n            Ok(message_topics) => MessageTopicsResult::Success { message_topics },\n            Err(tce) => MessageTopicsResult::Failure(tce),\n        }\n    }\n\n    /// Provides the underlying addr for the imputed balance identifier.\n    fn balance_purse(\n        &self,\n        request: BalanceIdentifierPurseRequest,\n    ) -> BalanceIdentifierPurseResult {\n        let mut tc = match self.tracking_copy(request.state_hash()) {\n            Ok(Some(tracking_copy)) => tracking_copy,\n            Ok(None) => return BalanceIdentifierPurseResult::RootNotFound,\n            Err(err) => return TrackingCopyError::Storage(err).into(),\n        };\n        let balance_identifier = request.identifier();\n        let protocol_version = request.protocol_version();\n        match balance_identifier.purse_uref(&mut tc, protocol_version) {\n            Ok(uref) => BalanceIdentifierPurseResult::Success {\n                purse_addr: uref.addr(),\n            },\n            Err(tce) => BalanceIdentifierPurseResult::Failure(tce),\n        }\n    }\n\n    /// Balance inquiry.\n    fn balance(&self, request: BalanceRequest) -> BalanceResult {\n        let mut tc = match self.tracking_copy(request.state_hash()) {\n            Ok(Some(tracking_copy)) => tracking_copy,\n            Ok(None) => return BalanceResult::RootNotFound,\n            Err(err) => return TrackingCopyError::Storage(err).into(),\n        };\n        let protocol_version = request.protocol_version();\n        let balance_identifier = request.identifier();\n        let purse_key = match balance_identifier.purse_uref(&mut tc, protocol_version) {\n            Ok(value) => value.into(),\n            Err(tce) => return tce.into(),\n        };\n        let (purse_balance_key, purse_addr) = match tc.get_purse_balance_key(purse_key) {\n            Ok(key @ Key::Balance(addr)) => (key, addr),\n            Ok(key) => return TrackingCopyError::UnexpectedKeyVariant(key).into(),\n            Err(tce) => return tce.into(),\n        };\n\n        let (total_balance, proofs_result) = match request.proof_handling() {\n            ProofHandling::NoProofs => {\n                let total_balance = match tc.read(&purse_balance_key) {\n                    Ok(Some(StoredValue::CLValue(cl_value))) => match cl_value.into_t::<U512>() {\n                        Ok(val) => val,\n                        Err(cve) => return TrackingCopyError::CLValue(cve).into(),\n                    },\n                    Ok(Some(_)) => return TrackingCopyError::UnexpectedStoredValueVariant.into(),\n                    Ok(None) => return TrackingCopyError::KeyNotFound(purse_balance_key).into(),\n                    Err(tce) => return tce.into(),\n                };\n                let balance_holds = match request.balance_handling() {\n                    BalanceHandling::Total => BTreeMap::new(),\n                    BalanceHandling::Available => {\n                        match tc.get_balance_hold_config(BalanceHoldAddrTag::Gas) {\n                            Ok(Some((block_time, _, interval))) => {\n                                match tc.get_balance_holds(purse_addr, block_time, interval) {\n                                    Ok(holds) => holds,\n                                    Err(tce) => return tce.into(),\n                                }\n                            }\n                            Ok(None) => BTreeMap::new(),\n                            Err(tce) => return tce.into(),\n                        }\n                    }\n                };\n                (total_balance, ProofsResult::NotRequested { balance_holds })\n            }\n            ProofHandling::Proofs => {\n                let (total_balance, total_balance_proof) =\n                    match tc.get_total_balance_with_proof(purse_balance_key) {\n                        Ok((balance, proof)) => (balance, Box::new(proof)),\n                        Err(tce) => return tce.into(),\n                    };\n\n                let balance_holds = match request.balance_handling() {\n                    BalanceHandling::Total => BTreeMap::new(),\n                    BalanceHandling::Available => {\n                        match tc.get_balance_holds_with_proof(purse_addr) {\n                            Ok(holds) => holds,\n                            Err(tce) => return tce.into(),\n                        }\n                    }\n                };\n\n                (\n                    total_balance,\n                    ProofsResult::Proofs {\n                        total_balance_proof,\n                        balance_holds,\n                    },\n                )\n            }\n        };\n\n        let (block_time, gas_hold_handling) = match tc\n            .get_balance_hold_config(BalanceHoldAddrTag::Gas)\n        {\n            Ok(Some((block_time, handling, interval))) => (block_time, (handling, interval).into()),\n            Ok(None) => {\n                return BalanceResult::Success {\n                    purse_addr,\n                    total_balance,\n                    available_balance: total_balance,\n                    proofs_result,\n                };\n            }\n            Err(tce) => return tce.into(),\n        };\n\n        let processing_hold_handling =\n            match tc.get_balance_hold_config(BalanceHoldAddrTag::Processing) {\n                Ok(Some((_, handling, interval))) => (handling, interval).into(),\n                Ok(None) => {\n                    return BalanceResult::Success {\n                        purse_addr,\n                        total_balance,\n                        available_balance: total_balance,\n                        proofs_result,\n                    };\n                }\n                Err(tce) => return tce.into(),\n            };\n\n        let available_balance = match &proofs_result.available_balance(\n            block_time,\n            total_balance,\n            gas_hold_handling,\n            processing_hold_handling,\n        ) {\n            Ok(available_balance) => *available_balance,\n            Err(be) => return BalanceResult::Failure(TrackingCopyError::Balance(be.clone())),\n        };\n\n        BalanceResult::Success {\n            purse_addr,\n            total_balance,\n            available_balance,\n            proofs_result,\n        }\n    }\n\n    /// Balance hold.\n    fn balance_hold(&self, request: BalanceHoldRequest) -> BalanceHoldResult {\n        let mut tc = match self.tracking_copy(request.state_hash()) {\n            Ok(Some(tracking_copy)) => tracking_copy,\n            Ok(None) => return BalanceHoldResult::RootNotFound,\n            Err(err) => {\n                return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(\n                    TrackingCopyError::Storage(err),\n                ));\n            }\n        };\n        let hold_mode = request.balance_hold_mode();\n        match hold_mode {\n            BalanceHoldMode::Hold {\n                identifier,\n                hold_amount,\n                insufficient_handling,\n            } => {\n                let block_time = match tc.get_block_time() {\n                    Ok(Some(block_time)) => block_time,\n                    Ok(None) => return BalanceHoldResult::BlockTimeNotFound,\n                    Err(tce) => return tce.into(),\n                };\n                let tag = match request.balance_hold_kind() {\n                    BalanceHoldKind::All => {\n                        return BalanceHoldResult::Failure(\n                            BalanceHoldError::UnexpectedWildcardVariant,\n                        );\n                    }\n                    BalanceHoldKind::Tag(tag) => tag,\n                };\n                let balance_request = BalanceRequest::new(\n                    request.state_hash(),\n                    request.protocol_version(),\n                    identifier,\n                    BalanceHandling::Available,\n                    ProofHandling::NoProofs,\n                );\n                let balance_result = self.balance(balance_request);\n                let (total_balance, remaining_balance, purse_addr) = match balance_result {\n                    BalanceResult::RootNotFound => return BalanceHoldResult::RootNotFound,\n                    BalanceResult::Failure(be) => return be.into(),\n                    BalanceResult::Success {\n                        total_balance,\n                        available_balance,\n                        purse_addr,\n                        ..\n                    } => (total_balance, available_balance, purse_addr),\n                };\n\n                let held_amount = {\n                    if remaining_balance >= hold_amount {\n                        // the purse has sufficient balance to fully cover the hold\n                        hold_amount\n                    } else if insufficient_handling == InsufficientBalanceHandling::Noop {\n                        // the purse has insufficient balance and the insufficient\n                        // balance handling mode is noop, so get out\n                        return BalanceHoldResult::Failure(BalanceHoldError::InsufficientBalance {\n                            remaining_balance,\n                        });\n                    } else {\n                        // currently this is always the default HoldRemaining variant.\n                        // the purse holder has insufficient balance to cover the hold,\n                        // but the system will put a hold on whatever balance remains.\n                        // this is basically punitive to block an edge case resource consumption\n                        // attack whereby a malicious purse holder drains a balance to not-zero\n                        // but not-enough-to-cover-holds and then spams a bunch of transactions\n                        // knowing that they will fail due to insufficient funds, but only\n                        // after making the system do the work of processing the balance\n                        // check without penalty to themselves.\n                        remaining_balance\n                    }\n                };\n\n                let balance_hold_addr = match tag {\n                    BalanceHoldAddrTag::Gas => BalanceHoldAddr::Gas {\n                        purse_addr,\n                        block_time,\n                    },\n                    BalanceHoldAddrTag::Processing => BalanceHoldAddr::Processing {\n                        purse_addr,\n                        block_time,\n                    },\n                };\n\n                let hold_key = Key::BalanceHold(balance_hold_addr);\n                let hold_value = match tc.get(&hold_key) {\n                    Ok(Some(StoredValue::CLValue(cl_value))) => {\n                        // There was a previous hold on this balance. We need to add the new hold to\n                        // the old one.\n                        match cl_value.clone().into_t::<U512>() {\n                            Ok(prev_hold) => prev_hold.saturating_add(held_amount),\n                            Err(cve) => {\n                                return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(\n                                    TrackingCopyError::CLValue(cve),\n                                ));\n                            }\n                        }\n                    }\n                    Ok(Some(other_value_variant)) => {\n                        return BalanceHoldResult::Failure(BalanceHoldError::UnexpectedHoldValue(\n                            other_value_variant,\n                        ))\n                    }\n                    Ok(None) => held_amount, // There was no previous hold.\n                    Err(tce) => {\n                        return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce));\n                    }\n                };\n\n                let hold_cl_value = match CLValue::from_t(hold_value) {\n                    Ok(cl_value) => cl_value,\n                    Err(cve) => {\n                        return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(\n                            TrackingCopyError::CLValue(cve),\n                        ));\n                    }\n                };\n                tc.write(hold_key, StoredValue::CLValue(hold_cl_value));\n                let holds = vec![balance_hold_addr];\n\n                let available_balance = remaining_balance.saturating_sub(held_amount);\n                let effects = tc.effects();\n                BalanceHoldResult::success(\n                    Some(holds),\n                    total_balance,\n                    available_balance,\n                    hold_amount,\n                    held_amount,\n                    effects,\n                )\n            }\n            BalanceHoldMode::Clear { identifier } => {\n                let purse_addr = match identifier.purse_uref(&mut tc, request.protocol_version()) {\n                    Ok(source_purse) => source_purse.addr(),\n                    Err(tce) => {\n                        return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce));\n                    }\n                };\n\n                {\n                    // clear holds\n                    let hold_kind = request.balance_hold_kind();\n                    let mut filter = vec![];\n                    let tag = BalanceHoldAddrTag::Processing;\n                    if hold_kind.matches(tag) {\n                        let (block_time, interval) = match tc.get_balance_hold_config(tag) {\n                            Ok(Some((block_time, _, interval))) => (block_time, interval),\n                            Ok(None) => {\n                                return BalanceHoldResult::BlockTimeNotFound;\n                            }\n                            Err(tce) => {\n                                return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(\n                                    tce,\n                                ));\n                            }\n                        };\n                        filter.push((tag, HoldsEpoch::from_millis(block_time.value(), interval)));\n                    }\n                    let tag = BalanceHoldAddrTag::Gas;\n                    if hold_kind.matches(tag) {\n                        let (block_time, interval) = match tc.get_balance_hold_config(tag) {\n                            Ok(Some((block_time, _, interval))) => (block_time, interval),\n                            Ok(None) => {\n                                return BalanceHoldResult::BlockTimeNotFound;\n                            }\n                            Err(tce) => {\n                                return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(\n                                    tce,\n                                ));\n                            }\n                        };\n                        filter.push((tag, HoldsEpoch::from_millis(block_time.value(), interval)));\n                    }\n                    if let Err(tce) = tc.clear_expired_balance_holds(purse_addr, filter) {\n                        return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce));\n                    }\n                }\n\n                // get updated balance\n                let balance_result = self.balance(BalanceRequest::new(\n                    request.state_hash(),\n                    request.protocol_version(),\n                    identifier,\n                    BalanceHandling::Available,\n                    ProofHandling::NoProofs,\n                ));\n                let (total_balance, available_balance) = match balance_result {\n                    BalanceResult::RootNotFound => return BalanceHoldResult::RootNotFound,\n                    BalanceResult::Failure(be) => return be.into(),\n                    BalanceResult::Success {\n                        total_balance,\n                        available_balance,\n                        ..\n                    } => (total_balance, available_balance),\n                };\n                // note that hold & held in this context does not refer to remaining holds,\n                // but rather to the requested hold amount and the resulting held amount for\n                // this execution. as calls to this variant clears holds and does not create\n                // new holds, hold & held are zero and no new hold address exists.\n                let new_hold_addr = None;\n                let hold = U512::zero();\n                let held = U512::zero();\n                let effects = tc.effects();\n                BalanceHoldResult::success(\n                    new_hold_addr,\n                    total_balance,\n                    available_balance,\n                    hold,\n                    held,\n                    effects,\n                )\n            }\n        }\n    }\n\n    /// Get the requested era validators.\n    fn era_validators(&self, request: EraValidatorsRequest) -> EraValidatorsResult {\n        match self.seigniorage_recipients(SeigniorageRecipientsRequest::new(request.state_hash())) {\n            SeigniorageRecipientsResult::RootNotFound => EraValidatorsResult::RootNotFound,\n            SeigniorageRecipientsResult::Failure(err) => EraValidatorsResult::Failure(err),\n            SeigniorageRecipientsResult::ValueNotFound(msg) => {\n                EraValidatorsResult::ValueNotFound(msg)\n            }\n            SeigniorageRecipientsResult::AuctionNotFound => EraValidatorsResult::AuctionNotFound,\n            SeigniorageRecipientsResult::Success {\n                seigniorage_recipients,\n                ..\n            } => {\n                let era_validators = match seigniorage_recipients {\n                    SeigniorageRecipientsSnapshot::V1(snapshot) => {\n                        auction::detail::era_validators_from_legacy_snapshot(snapshot)\n                    }\n                    SeigniorageRecipientsSnapshot::V2(snapshot) => {\n                        auction::detail::era_validators_from_snapshot(snapshot)\n                    }\n                };\n                EraValidatorsResult::Success { era_validators }\n            }\n        }\n    }\n\n    /// Get the requested seigniorage recipients.\n    fn seigniorage_recipients(\n        &self,\n        request: SeigniorageRecipientsRequest,\n    ) -> SeigniorageRecipientsResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return SeigniorageRecipientsResult::RootNotFound,\n            Err(err) => {\n                return SeigniorageRecipientsResult::Failure(TrackingCopyError::Storage(err))\n            }\n        };\n        let scr = match tc.get_system_entity_registry() {\n            Ok(scr) => scr,\n            Err(err) => return SeigniorageRecipientsResult::Failure(err),\n        };\n        let enable_addressable_entity = tc.enable_addressable_entity();\n        match get_snapshot_data(self, &scr, state_hash, enable_addressable_entity) {\n            not_found @ SeigniorageRecipientsResult::ValueNotFound(_) => {\n                if enable_addressable_entity {\n                    //There is a chance that, when looking for systemic data, we could be using a\n                    // state root hash from before the AddressableEntity\n                    // migration boundary. In such a case, we should attempt to look up the data\n                    // under the Account/Contract model instead; e.g. Key::Hash instead of\n                    // Key::AddressableEntity\n                    match get_snapshot_data(self, &scr, state_hash, false) {\n                        SeigniorageRecipientsResult::ValueNotFound(_) => not_found,\n                        other => other,\n                    }\n                } else {\n                    not_found\n                }\n            }\n            other => other,\n        }\n    }\n\n    /// Gets the bids.\n    fn bids(&self, request: BidsRequest) -> BidsResult {\n        let state_hash = request.state_hash();\n        let mut tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return BidsResult::RootNotFound,\n            Err(err) => return BidsResult::Failure(TrackingCopyError::Storage(err)),\n        };\n\n        let bid_keys = match tc.get_keys(&KeyTag::BidAddr) {\n            Ok(ret) => ret,\n            Err(err) => return BidsResult::Failure(err),\n        };\n\n        let mut bids = vec![];\n        for key in bid_keys.iter() {\n            match tc.get(key) {\n                Ok(ret) => match ret {\n                    Some(StoredValue::BidKind(bid_kind)) => {\n                        if !bids.contains(&bid_kind) {\n                            bids.push(bid_kind);\n                        }\n                    }\n                    Some(_) => {\n                        return BidsResult::Failure(\n                            TrackingCopyError::UnexpectedStoredValueVariant,\n                        );\n                    }\n                    None => return BidsResult::Failure(TrackingCopyError::MissingBid(*key)),\n                },\n                Err(error) => return BidsResult::Failure(error),\n            }\n        }\n        BidsResult::Success { bids }\n    }\n\n    /// Direct auction interaction for all variations of bid management.\n    fn bidding(\n        &self,\n        BiddingRequest {\n            config,\n            state_hash,\n            protocol_version,\n            auction_method,\n            transaction_hash,\n            initiator,\n            authorization_keys,\n        }: BiddingRequest,\n    ) -> BiddingResult {\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return BiddingResult::RootNotFound,\n            Err(err) => return BiddingResult::Failure(TrackingCopyError::Storage(err)),\n        };\n\n        let source_account_hash = initiator.account_hash();\n        let (entity_addr, mut footprint, mut entity_access_rights) = match tc\n            .borrow_mut()\n            .authorized_runtime_footprint_with_access_rights(\n                protocol_version,\n                source_account_hash,\n                &authorization_keys,\n                &BTreeSet::default(),\n            ) {\n            Ok(ret) => ret,\n            Err(tce) => {\n                return BiddingResult::Failure(tce);\n            }\n        };\n        let entity_key = Key::AddressableEntity(entity_addr);\n\n        // extend named keys with era end timestamp\n        match tc\n            .borrow_mut()\n            .system_contract_named_key(AUCTION, ERA_END_TIMESTAMP_MILLIS_KEY)\n        {\n            Ok(Some(k)) => {\n                match k.as_uref() {\n                    Some(uref) => entity_access_rights.extend(&[*uref]),\n                    None => {\n                        return BiddingResult::Failure(TrackingCopyError::UnexpectedKeyVariant(k));\n                    }\n                }\n                footprint.insert_into_named_keys(ERA_END_TIMESTAMP_MILLIS_KEY.into(), k);\n            }\n            Ok(None) => {\n                return BiddingResult::Failure(TrackingCopyError::NamedKeyNotFound(\n                    ERA_END_TIMESTAMP_MILLIS_KEY.into(),\n                ));\n            }\n            Err(tce) => {\n                return BiddingResult::Failure(tce);\n            }\n        };\n        // extend named keys with era id\n        match tc\n            .borrow_mut()\n            .system_contract_named_key(AUCTION, ERA_ID_KEY)\n        {\n            Ok(Some(k)) => {\n                match k.as_uref() {\n                    Some(uref) => entity_access_rights.extend(&[*uref]),\n                    None => {\n                        return BiddingResult::Failure(TrackingCopyError::UnexpectedKeyVariant(k));\n                    }\n                }\n                footprint.insert_into_named_keys(ERA_ID_KEY.into(), k);\n            }\n            Ok(None) => {\n                return BiddingResult::Failure(TrackingCopyError::NamedKeyNotFound(\n                    ERA_ID_KEY.into(),\n                ));\n            }\n            Err(tce) => {\n                return BiddingResult::Failure(tce);\n            }\n        };\n\n        let phase = Phase::Session;\n        let id = Id::Transaction(transaction_hash);\n        let address_generator = AddressGenerator::new(&id.seed(), phase);\n        let max_delegators_per_validator = config.max_delegators_per_validator();\n        let minimum_bid_amount = config.minimum_bid_amount();\n\n        let global_minimum_delegation_limit = config.global_minimum_delegation_amount();\n        let global_maximum_delegation_limit = config.global_maximum_delegation_amount();\n        let mut runtime = RuntimeNative::new(\n            config,\n            protocol_version,\n            id,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            source_account_hash,\n            entity_key,\n            footprint,\n            entity_access_rights,\n            U512::MAX,\n            phase,\n        );\n\n        let result = match auction_method {\n            AuctionMethod::ActivateBid { validator } => runtime\n                .activate_bid(validator, minimum_bid_amount)\n                .map(|_| AuctionMethodRet::Unit)\n                .map_err(|auc_err| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                }),\n            AuctionMethod::AddBid {\n                public_key,\n                delegation_rate,\n                amount,\n                minimum_delegation_amount,\n                maximum_delegation_amount,\n                minimum_bid_amount,\n                reserved_slots,\n            } => runtime\n                .get_minimum_delegation_rate()\n                .and_then(|minimum_delegation_rate| {\n                    runtime\n                        .add_bid(\n                            public_key,\n                            delegation_rate,\n                            amount,\n                            minimum_delegation_amount,\n                            maximum_delegation_amount,\n                            minimum_bid_amount,\n                            max_delegators_per_validator,\n                            reserved_slots,\n                            global_minimum_delegation_limit,\n                            global_maximum_delegation_limit,\n                            minimum_delegation_rate,\n                        )\n                        .map(AuctionMethodRet::UpdatedAmount)\n                        .map_err(TrackingCopyError::Api)\n                }),\n            AuctionMethod::WithdrawBid {\n                public_key,\n                amount,\n                minimum_bid_amount,\n            } => runtime\n                .withdraw_bid(public_key, amount, minimum_bid_amount)\n                .map(AuctionMethodRet::UpdatedAmount)\n                .map_err(|auc_err| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                }),\n            AuctionMethod::Delegate {\n                delegator,\n                validator,\n                amount,\n                max_delegators_per_validator,\n            } => runtime\n                .delegate(delegator, validator, amount, max_delegators_per_validator)\n                .map(AuctionMethodRet::UpdatedAmount)\n                .map_err(TrackingCopyError::Api),\n            AuctionMethod::Undelegate {\n                delegator,\n                validator,\n                amount,\n            } => runtime\n                .undelegate(delegator, validator, amount)\n                .map(AuctionMethodRet::UpdatedAmount)\n                .map_err(|auc_err| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                }),\n            AuctionMethod::Redelegate {\n                delegator,\n                validator,\n                amount,\n                new_validator,\n            } => runtime\n                .redelegate(delegator, validator, amount, new_validator)\n                .map(AuctionMethodRet::UpdatedAmount)\n                .map_err(|auc_err| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                }),\n            AuctionMethod::ChangeBidPublicKey {\n                public_key,\n                new_public_key,\n            } => runtime\n                .change_bid_public_key(public_key, new_public_key)\n                .map(|_| AuctionMethodRet::Unit)\n                .map_err(|auc_err| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                }),\n            AuctionMethod::AddReservations { reservations } => runtime\n                .get_minimum_delegation_rate()\n                .and_then(|minimum_delegation_rate| {\n                    runtime\n                        .add_reservations(reservations, minimum_delegation_rate)\n                        .map(|_| AuctionMethodRet::Unit)\n                        .map_err(|auc_err| {\n                            TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                        })\n                }),\n            AuctionMethod::CancelReservations {\n                validator,\n                delegators,\n                max_delegators_per_validator,\n            } => runtime\n                .cancel_reservations(validator, delegators, max_delegators_per_validator)\n                .map(|_| AuctionMethodRet::Unit)\n                .map_err(|auc_err| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auc_err))\n                }),\n        };\n\n        let transfers = runtime.into_transfers();\n        let effects = tc.borrow_mut().effects();\n\n        match result {\n            Ok(ret) => BiddingResult::Success {\n                ret,\n                effects,\n                transfers,\n            },\n            Err(tce) => BiddingResult::Failure(tce),\n        }\n    }\n\n    /// Handle refund.\n    fn handle_refund(\n        &self,\n        HandleRefundRequest {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            refund_mode,\n        }: HandleRefundRequest,\n    ) -> HandleRefundResult {\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return HandleRefundResult::RootNotFound,\n            Err(err) => return HandleRefundResult::Failure(TrackingCopyError::Storage(err)),\n        };\n\n        let id = Id::Transaction(transaction_hash);\n        let phase = refund_mode.phase();\n        let address_generator = Arc::new(RwLock::new(AddressGenerator::new(&id.seed(), phase)));\n        let mut runtime = match phase {\n            Phase::FinalizePayment => {\n                // this runtime uses the system's context\n                match RuntimeNative::new_system_runtime(\n                    config,\n                    protocol_version,\n                    id,\n                    address_generator,\n                    Rc::clone(&tc),\n                    phase,\n                ) {\n                    Ok(rt) => rt,\n                    Err(tce) => {\n                        return HandleRefundResult::Failure(tce);\n                    }\n                }\n            }\n            Phase::Payment => {\n                // this runtime uses the handle payment contract's context\n                match RuntimeNative::new_system_contract_runtime(\n                    config,\n                    protocol_version,\n                    id,\n                    address_generator,\n                    Rc::clone(&tc),\n                    phase,\n                    HANDLE_PAYMENT,\n                ) {\n                    Ok(rt) => rt,\n                    Err(tce) => {\n                        return HandleRefundResult::Failure(tce);\n                    }\n                }\n            }\n            Phase::System | Phase::Session => return HandleRefundResult::InvalidPhase,\n        };\n\n        let result = match refund_mode {\n            HandleRefundMode::CalculateAmount {\n                limit,\n                cost,\n                gas_price,\n                consumed,\n                ratio,\n                available,\n            } => {\n                let (numer, denom) = ratio.into();\n                let ratio = Ratio::new_raw(U512::from(numer), U512::from(denom));\n                let refund_amount = match runtime.calculate_overpayment_and_fee(\n                    limit, gas_price, cost, consumed, ratio, available,\n                ) {\n                    Ok((refund, _)) => Some(refund),\n                    Err(hpe) => {\n                        return HandleRefundResult::Failure(TrackingCopyError::SystemContract(\n                            system::Error::HandlePayment(hpe),\n                        ));\n                    }\n                };\n                Ok(refund_amount)\n            }\n            HandleRefundMode::Refund {\n                initiator_addr,\n                limit,\n                cost,\n                gas_price,\n                consumed,\n                ratio,\n                source,\n                target,\n                available,\n            } => {\n                let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleRefundResult::Failure(tce),\n                };\n                let (numer, denom) = ratio.into();\n                let ratio = Ratio::new_raw(U512::from(numer), U512::from(denom));\n                let refund_amount = match runtime.calculate_overpayment_and_fee(\n                    limit, gas_price, cost, consumed, ratio, available,\n                ) {\n                    Ok((refund, _)) => refund,\n                    Err(hpe) => {\n                        return HandleRefundResult::Failure(TrackingCopyError::SystemContract(\n                            system::Error::HandlePayment(hpe),\n                        ));\n                    }\n                };\n                let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleRefundResult::Failure(tce),\n                };\n                // pay amount from source to target\n                match runtime\n                    .transfer(\n                        Some(initiator_addr.account_hash()),\n                        source_purse,\n                        target_purse,\n                        refund_amount,\n                        None,\n                    )\n                    .map_err(|mint_err| {\n                        TrackingCopyError::SystemContract(system::Error::Mint(mint_err))\n                    }) {\n                    Ok(_) => Ok(Some(refund_amount)),\n                    Err(err) => Err(err),\n                }\n            }\n            HandleRefundMode::RefundNoFeeCustomPayment {\n                initiator_addr,\n                limit,\n                cost,\n                gas_price,\n            } => {\n                let balance_result = self.balance(BalanceRequest::new(\n                    state_hash,\n                    protocol_version,\n                    BalanceIdentifier::Payment,\n                    BalanceHandling::Available,\n                    ProofHandling::NoProofs,\n                ));\n                let available_balance = match balance_result {\n                    BalanceResult::RootNotFound => {\n                        return HandleRefundResult::RootNotFound;\n                    }\n                    BalanceResult::Failure(tce) => {\n                        return HandleRefundResult::Failure(tce);\n                    }\n                    BalanceResult::Success {\n                        available_balance, ..\n                    } => available_balance,\n                };\n\n                let consumed = U512::zero();\n                let ratio = Ratio::new_raw(U512::one(), U512::one());\n\n                let refund_amount = match runtime.calculate_overpayment_and_fee(\n                    limit,\n                    gas_price,\n                    cost,\n                    consumed,\n                    ratio,\n                    available_balance,\n                ) {\n                    Ok((refund, _)) => refund,\n                    Err(hpe) => {\n                        return HandleRefundResult::Failure(TrackingCopyError::SystemContract(\n                            system::Error::HandlePayment(hpe),\n                        ));\n                    }\n                };\n                let source_purse = match BalanceIdentifier::Payment\n                    .purse_uref(&mut tc.borrow_mut(), protocol_version)\n                {\n                    Ok(value) => value,\n                    Err(tce) => return HandleRefundResult::Failure(tce),\n                };\n                let target_purse = match BalanceIdentifier::Refund\n                    .purse_uref(&mut tc.borrow_mut(), protocol_version)\n                {\n                    Ok(value) => value,\n                    Err(tce) => return HandleRefundResult::Failure(tce),\n                };\n                match runtime\n                    .transfer(\n                        Some(initiator_addr.account_hash()),\n                        source_purse,\n                        target_purse,\n                        refund_amount,\n                        None,\n                    )\n                    .map_err(|mint_err| {\n                        TrackingCopyError::SystemContract(system::Error::Mint(mint_err))\n                    }) {\n                    Ok(_) => Ok(Some(U512::zero())), // return 0 in this mode\n                    Err(err) => Err(err),\n                }\n            }\n            HandleRefundMode::Burn {\n                limit,\n                gas_price,\n                cost,\n                consumed,\n                source,\n                ratio,\n                available,\n            } => {\n                let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleRefundResult::Failure(tce),\n                };\n                let (numer, denom) = ratio.into();\n                let ratio = Ratio::new_raw(U512::from(numer), U512::from(denom));\n                let burn_amount = match runtime.calculate_overpayment_and_fee(\n                    limit, gas_price, cost, consumed, ratio, available,\n                ) {\n                    Ok((amount, _)) => Some(amount),\n                    Err(hpe) => {\n                        return HandleRefundResult::Failure(TrackingCopyError::SystemContract(\n                            system::Error::HandlePayment(hpe),\n                        ));\n                    }\n                };\n                match runtime.payment_burn(source_purse, burn_amount) {\n                    Ok(_) => Ok(burn_amount),\n                    Err(hpe) => Err(TrackingCopyError::SystemContract(\n                        system::Error::HandlePayment(hpe),\n                    )),\n                }\n            }\n            HandleRefundMode::SetRefundPurse { target } => {\n                let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleRefundResult::Failure(tce),\n                };\n                match runtime.set_refund_purse(target_purse) {\n                    Ok(_) => Ok(None),\n                    Err(hpe) => Err(TrackingCopyError::SystemContract(\n                        system::Error::HandlePayment(hpe),\n                    )),\n                }\n            }\n            HandleRefundMode::ClearRefundPurse => match runtime.clear_refund_purse() {\n                Ok(_) => Ok(None),\n                Err(hpe) => Err(TrackingCopyError::SystemContract(\n                    system::Error::HandlePayment(hpe),\n                )),\n            },\n        };\n\n        let effects = tc.borrow_mut().effects();\n        let transfers = runtime.into_transfers();\n\n        match result {\n            Ok(amount) => HandleRefundResult::Success {\n                transfers,\n                effects,\n                amount,\n            },\n            Err(tce) => HandleRefundResult::Failure(tce),\n        }\n    }\n\n    /// Handle payment.\n    fn handle_fee(\n        &self,\n        HandleFeeRequest {\n            config,\n            state_hash,\n            protocol_version,\n            transaction_hash,\n            handle_fee_mode,\n        }: HandleFeeRequest,\n    ) -> HandleFeeResult {\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return HandleFeeResult::RootNotFound,\n            Err(err) => return HandleFeeResult::Failure(TrackingCopyError::Storage(err)),\n        };\n\n        // this runtime uses the system's context\n\n        let id = Id::Transaction(transaction_hash);\n        let phase = Phase::FinalizePayment;\n        let address_generator = AddressGenerator::new(&id.seed(), phase);\n\n        let mut runtime = match RuntimeNative::new_system_runtime(\n            config,\n            protocol_version,\n            id,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            phase,\n        ) {\n            Ok(rt) => rt,\n            Err(tce) => {\n                return HandleFeeResult::Failure(tce);\n            }\n        };\n\n        let result = match handle_fee_mode {\n            HandleFeeMode::Credit {\n                validator,\n                amount,\n                era_id,\n            } => runtime\n                .write_validator_credit(*validator, era_id, amount)\n                .map(|_| ())\n                .map_err(|auction_error| {\n                    TrackingCopyError::SystemContract(system::Error::Auction(auction_error))\n                }),\n            HandleFeeMode::Pay {\n                initiator_addr,\n                amount,\n                source,\n                target,\n            } => {\n                let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleFeeResult::Failure(tce),\n                };\n                let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleFeeResult::Failure(tce),\n                };\n                runtime\n                    .transfer(\n                        Some(initiator_addr.account_hash()),\n                        source_purse,\n                        target_purse,\n                        amount,\n                        None,\n                    )\n                    .map_err(|mint_err| {\n                        TrackingCopyError::SystemContract(system::Error::Mint(mint_err))\n                    })\n            }\n            HandleFeeMode::Burn { source, amount } => {\n                let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) {\n                    Ok(value) => value,\n                    Err(tce) => return HandleFeeResult::Failure(tce),\n                };\n                runtime\n                    .payment_burn(source_purse, amount)\n                    .map_err(|handle_payment_error| {\n                        TrackingCopyError::SystemContract(system::Error::HandlePayment(\n                            handle_payment_error,\n                        ))\n                    })\n            }\n        };\n\n        let effects = tc.borrow_mut().effects();\n        let transfers = runtime.into_transfers();\n\n        match result {\n            Ok(_) => HandleFeeResult::Success { transfers, effects },\n            Err(tce) => HandleFeeResult::Failure(tce),\n        }\n    }\n\n    /// Gets the execution result checksum.\n    fn execution_result_checksum(\n        &self,\n        request: ExecutionResultsChecksumRequest,\n    ) -> ExecutionResultsChecksumResult {\n        let state_hash = request.state_hash();\n        let mut tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return ExecutionResultsChecksumResult::RootNotFound,\n            Err(err) => {\n                return ExecutionResultsChecksumResult::Failure(TrackingCopyError::Storage(err));\n            }\n        };\n        match tc.get_checksum_registry() {\n            Ok(Some(registry)) => match registry.get(EXECUTION_RESULTS_CHECKSUM_NAME) {\n                Some(checksum) => ExecutionResultsChecksumResult::Success {\n                    checksum: *checksum,\n                },\n                None => ExecutionResultsChecksumResult::ChecksumNotFound,\n            },\n            Ok(None) => ExecutionResultsChecksumResult::RegistryNotFound,\n            Err(err) => ExecutionResultsChecksumResult::Failure(err),\n        }\n    }\n\n    /// Gets an addressable entity.\n    fn addressable_entity(&self, request: AddressableEntityRequest) -> AddressableEntityResult {\n        let key = request.key();\n        let query_key = match key {\n            Key::Account(_) => {\n                let query_request = QueryRequest::new(request.state_hash(), key, vec![]);\n                match self.query(query_request) {\n                    QueryResult::RootNotFound => return AddressableEntityResult::RootNotFound,\n                    QueryResult::ValueNotFound(msg) => {\n                        return AddressableEntityResult::ValueNotFound(msg);\n                    }\n                    QueryResult::Failure(err) => return AddressableEntityResult::Failure(err),\n                    QueryResult::Success { value, .. } => {\n                        if let StoredValue::Account(account) = *value {\n                            // legacy account that has not been migrated\n                            let entity = AddressableEntity::from(account);\n                            return AddressableEntityResult::Success { entity };\n                        }\n                        if let StoredValue::CLValue(cl_value) = &*value {\n                            // the corresponding entity key should be under the account's key\n                            match cl_value.clone().into_t::<Key>() {\n                                Ok(entity_key @ Key::AddressableEntity(_)) => entity_key,\n                                Ok(invalid_key) => {\n                                    warn!(\n                                        %key,\n                                        %invalid_key,\n                                        type_name = %value.type_name(),\n                                        \"expected a Key::AddressableEntity to be stored under account hash\"\n                                    );\n                                    return AddressableEntityResult::Failure(\n                                        TrackingCopyError::UnexpectedStoredValueVariant,\n                                    );\n                                }\n                                Err(error) => {\n                                    error!(%key, %error, \"expected a CLValue::Key to be stored under account hash\");\n                                    return AddressableEntityResult::Failure(\n                                        TrackingCopyError::CLValue(error),\n                                    );\n                                }\n                            }\n                        } else {\n                            warn!(\n                                %key,\n                                type_name = %value.type_name(),\n                                \"expected a CLValue::Key or Account to be stored under account hash\"\n                            );\n                            return AddressableEntityResult::Failure(\n                                TrackingCopyError::UnexpectedStoredValueVariant,\n                            );\n                        }\n                    }\n                }\n            }\n            Key::Hash(contract_hash) => {\n                let query_request = QueryRequest::new(request.state_hash(), key, vec![]);\n                match self.query(query_request) {\n                    QueryResult::RootNotFound => return AddressableEntityResult::RootNotFound,\n                    QueryResult::ValueNotFound(msg) => {\n                        return AddressableEntityResult::ValueNotFound(msg);\n                    }\n                    QueryResult::Failure(err) => return AddressableEntityResult::Failure(err),\n                    QueryResult::Success { value, .. } => {\n                        if let StoredValue::Contract(contract) = *value {\n                            // legacy contract that has not been migrated\n                            let entity = AddressableEntity::from(contract);\n                            return AddressableEntityResult::Success { entity };\n                        }\n                        Key::AddressableEntity(EntityAddr::SmartContract(contract_hash))\n                    }\n                }\n            }\n            Key::AddressableEntity(_) => key,\n            _ => {\n                return AddressableEntityResult::Failure(TrackingCopyError::UnexpectedKeyVariant(\n                    key,\n                ));\n            }\n        };\n\n        let query_request = QueryRequest::new(request.state_hash(), query_key, vec![]);\n        match self.query(query_request) {\n            QueryResult::RootNotFound => AddressableEntityResult::RootNotFound,\n            QueryResult::ValueNotFound(msg) => AddressableEntityResult::ValueNotFound(msg),\n            QueryResult::Success { value, .. } => {\n                let entity = match value.as_addressable_entity() {\n                    Some(entity) => entity.clone(),\n                    None => {\n                        return AddressableEntityResult::Failure(\n                            TrackingCopyError::UnexpectedStoredValueVariant,\n                        );\n                    }\n                };\n                AddressableEntityResult::Success { entity }\n            }\n            QueryResult::Failure(err) => AddressableEntityResult::Failure(err),\n        }\n    }\n\n    /// Returns the system entity registry or the key for a system entity registered within it.\n    fn system_entity_registry(\n        &self,\n        request: SystemEntityRegistryRequest,\n    ) -> SystemEntityRegistryResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return SystemEntityRegistryResult::RootNotFound,\n            Err(err) => {\n                return SystemEntityRegistryResult::Failure(TrackingCopyError::Storage(err));\n            }\n        };\n\n        let reg = match tc.get_system_entity_registry() {\n            Ok(reg) => reg,\n            Err(tce) => {\n                return SystemEntityRegistryResult::Failure(tce);\n            }\n        };\n\n        let selector = request.selector();\n        match selector {\n            SystemEntityRegistrySelector::All => SystemEntityRegistryResult::Success {\n                selected: selector.clone(),\n                payload: SystemEntityRegistryPayload::All(reg),\n            },\n            SystemEntityRegistrySelector::ByName(name) => match reg.get(name).copied() {\n                Some(entity_hash) => {\n                    let key = if !request.enable_addressable_entity() {\n                        Key::Hash(entity_hash)\n                    } else {\n                        Key::AddressableEntity(EntityAddr::System(entity_hash))\n                    };\n                    SystemEntityRegistryResult::Success {\n                        selected: selector.clone(),\n                        payload: SystemEntityRegistryPayload::EntityKey(key),\n                    }\n                }\n                None => {\n                    error!(\"unexpected query failure; mint not found\");\n                    SystemEntityRegistryResult::NamedEntityNotFound(name.clone())\n                }\n            },\n        }\n    }\n\n    /// Gets an entry point value.\n    fn entry_point(&self, request: EntryPointRequest) -> EntryPointResult {\n        let state_root_hash = request.state_hash();\n        let contract_hash = request.contract_hash();\n        let entry_point_name = request.entry_point_name();\n        match EntryPointAddr::new_v1_entry_point_addr(\n            EntityAddr::SmartContract(contract_hash),\n            entry_point_name,\n        ) {\n            Ok(entry_point_addr) => {\n                let key = Key::EntryPoint(entry_point_addr);\n                let query_request = QueryRequest::new(request.state_hash(), key, vec![]);\n                //We first check if the entry point exists as a stand alone 2.x entity\n                match self.query(query_request) {\n                    QueryResult::RootNotFound => EntryPointResult::RootNotFound,\n                    QueryResult::ValueNotFound(query_result_not_found_msg) => {\n                        //If the entry point was not found as a 2.x entity, we check if it exists\n                        // as part of a 1.x contract\n                        let contract_key = Key::Hash(contract_hash);\n                        let contract_request = ContractRequest::new(state_root_hash, contract_key);\n                        match self.contract(contract_request) {\n                            ContractResult::Failure(tce) => EntryPointResult::Failure(tce),\n                            ContractResult::ValueNotFound(_) => {\n                                EntryPointResult::ValueNotFound(query_result_not_found_msg)\n                            }\n                            ContractResult::RootNotFound => EntryPointResult::RootNotFound,\n                            ContractResult::Success { contract } => {\n                                match contract.entry_points().get(entry_point_name) {\n                                    Some(contract_entry_point) => EntryPointResult::Success {\n                                        entry_point: EntryPointValue::V1CasperVm(\n                                            EntityEntryPoint::from(contract_entry_point),\n                                        ),\n                                    },\n                                    None => {\n                                        EntryPointResult::ValueNotFound(query_result_not_found_msg)\n                                    }\n                                }\n                            }\n                        }\n                    }\n                    QueryResult::Failure(tce) => EntryPointResult::Failure(tce),\n                    QueryResult::Success { value, .. } => {\n                        if let StoredValue::EntryPoint(entry_point) = *value {\n                            EntryPointResult::Success { entry_point }\n                        } else {\n                            error!(\"Expected to get entry point value received other variant\");\n                            EntryPointResult::Failure(\n                                TrackingCopyError::UnexpectedStoredValueVariant,\n                            )\n                        }\n                    }\n                }\n            }\n            Err(_) => EntryPointResult::Failure(\n                //TODO maybe we can have a better error type here\n                TrackingCopyError::ValueNotFound(\"Entry point not found\".to_string()),\n            ),\n        }\n    }\n\n    /// Gets a contract value.\n    fn contract(&self, request: ContractRequest) -> ContractResult {\n        let query_request = QueryRequest::new(request.state_hash(), request.key(), vec![]);\n\n        match self.query(query_request) {\n            QueryResult::RootNotFound => ContractResult::RootNotFound,\n            QueryResult::ValueNotFound(msg) => ContractResult::ValueNotFound(msg),\n            QueryResult::Failure(tce) => ContractResult::Failure(tce),\n            QueryResult::Success { value, .. } => {\n                if let StoredValue::Contract(contract) = *value {\n                    ContractResult::Success { contract }\n                } else {\n                    error!(\"Expected to get contract value received other variant\");\n                    ContractResult::Failure(TrackingCopyError::UnexpectedStoredValueVariant)\n                }\n            }\n        }\n    }\n\n    /// Gets an entry point value.\n    fn entry_point_exists(&self, request: EntryPointExistsRequest) -> EntryPointExistsResult {\n        match self.entry_point(request.into()) {\n            EntryPointResult::RootNotFound => EntryPointExistsResult::RootNotFound,\n            EntryPointResult::ValueNotFound(msg) => EntryPointExistsResult::ValueNotFound(msg),\n            EntryPointResult::Success { .. } => EntryPointExistsResult::Success,\n            EntryPointResult::Failure(error) => EntryPointExistsResult::Failure(error),\n        }\n    }\n\n    /// Gets total supply.\n    fn total_supply(&self, request: TotalSupplyRequest) -> TotalSupplyResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return TotalSupplyResult::RootNotFound,\n            Err(err) => return TotalSupplyResult::Failure(TrackingCopyError::Storage(err)),\n        };\n        let scr = match tc.get_system_entity_registry() {\n            Ok(scr) => scr,\n            Err(err) => return TotalSupplyResult::Failure(err),\n        };\n        let enable_addressable_entity = tc.enable_addressable_entity();\n        match get_total_supply_data(self, &scr, state_hash, enable_addressable_entity) {\n            not_found @ TotalSupplyResult::ValueNotFound(_) => {\n                if enable_addressable_entity {\n                    //There is a chance that, when looking for systemic data, we could be using a\n                    // state root hash from before the AddressableEntity\n                    // migration boundary. In such a case, we should attempt to look up the data\n                    // under the Account/Contract model instead; e.g. Key::Hash instead of\n                    // Key::AddressableEntity\n                    match get_total_supply_data(self, &scr, state_hash, false) {\n                        TotalSupplyResult::ValueNotFound(_) => not_found,\n                        other => other,\n                    }\n                } else {\n                    not_found\n                }\n            }\n            other => other,\n        }\n    }\n\n    /// Gets the current round seigniorage rate.\n    fn round_seigniorage_rate(\n        &self,\n        request: RoundSeigniorageRateRequest,\n    ) -> RoundSeigniorageRateResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return RoundSeigniorageRateResult::RootNotFound,\n            Err(err) => {\n                return RoundSeigniorageRateResult::Failure(TrackingCopyError::Storage(err));\n            }\n        };\n        let scr = match tc.get_system_entity_registry() {\n            Ok(scr) => scr,\n            Err(err) => return RoundSeigniorageRateResult::Failure(err),\n        };\n        let enable_addressable_entity = tc.enable_addressable_entity();\n        match get_round_seigniorage_rate_data(self, &scr, state_hash, enable_addressable_entity) {\n            not_found @ RoundSeigniorageRateResult::ValueNotFound(_) => {\n                if enable_addressable_entity {\n                    //There is a chance that, when looking for systemic data, we could be using a\n                    // state root hash from before the AddressableEntity\n                    // migration boundary. In such a case, we should attempt to look up the data\n                    // under the Account/Contract model instead; e.g. Key::Hash instead of\n                    // Key::AddressableEntity\n                    match get_round_seigniorage_rate_data(self, &scr, state_hash, false) {\n                        RoundSeigniorageRateResult::ValueNotFound(_) => not_found,\n                        other => other,\n                    }\n                } else {\n                    not_found\n                }\n            }\n            other => other,\n        }\n    }\n\n    /// Direct transfer.\n    fn transfer(&self, request: TransferRequest) -> TransferResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return TransferResult::RootNotFound,\n            Err(err) => {\n                return TransferResult::Failure(TransferError::TrackingCopy(\n                    TrackingCopyError::Storage(err),\n                ));\n            }\n        };\n\n        let source_account_hash = request.initiator().account_hash();\n        let protocol_version = request.protocol_version();\n        if let Err(tce) = tc\n            .borrow_mut()\n            .migrate_account(source_account_hash, protocol_version)\n        {\n            return TransferResult::Failure(tce.into());\n        }\n\n        let authorization_keys = request.authorization_keys();\n\n        let config = request.config();\n        let transfer_config = config.transfer_config();\n        let administrative_accounts = transfer_config.administrative_accounts();\n\n        let runtime_args = match request.args() {\n            TransferRequestArgs::Raw(runtime_args) => runtime_args.clone(),\n            TransferRequestArgs::Explicit(transfer_args) => {\n                match RuntimeArgs::try_from(*transfer_args) {\n                    Ok(runtime_args) => runtime_args,\n                    Err(cve) => return TransferResult::Failure(TransferError::CLValue(cve)),\n                }\n            }\n            TransferRequestArgs::Indirect(bita) => {\n                let source_uref = match bita\n                    .source()\n                    .purse_uref(&mut tc.borrow_mut(), protocol_version)\n                {\n                    Ok(source_uref) => source_uref,\n                    Err(tce) => return TransferResult::Failure(TransferError::TrackingCopy(tce)),\n                };\n                let target_uref = match bita\n                    .target()\n                    .purse_uref(&mut tc.borrow_mut(), protocol_version)\n                {\n                    Ok(target_uref) => target_uref,\n                    Err(tce) => return TransferResult::Failure(TransferError::TrackingCopy(tce)),\n                };\n                let transfer_args = TransferArgs::new(\n                    bita.to(),\n                    source_uref,\n                    target_uref,\n                    bita.amount(),\n                    bita.arg_id(),\n                );\n                match RuntimeArgs::try_from(transfer_args) {\n                    Ok(runtime_args) => runtime_args,\n                    Err(cve) => return TransferResult::Failure(TransferError::CLValue(cve)),\n                }\n            }\n        };\n\n        let remaining_spending_limit = match runtime_args.try_get_number(ARG_AMOUNT) {\n            Ok(amount) => amount,\n            Err(cve) => {\n                debug!(\"failed to derive remaining_spending_limit\");\n                return TransferResult::Failure(TransferError::CLValue(cve));\n            }\n        };\n\n        let mut runtime_args_builder = TransferRuntimeArgsBuilder::new(runtime_args);\n\n        let transfer_target_mode = match runtime_args_builder\n            .resolve_transfer_target_mode(protocol_version, Rc::clone(&tc))\n        {\n            Ok(transfer_target_mode) => transfer_target_mode,\n            Err(error) => return TransferResult::Failure(error),\n        };\n\n        // On some private networks, transfers are restricted.\n        // This means that they must either the source or target are an admin account.\n        // This behavior is not used on public networks.\n        if transfer_config.enforce_transfer_restrictions(&source_account_hash) {\n            // if the source is an admin, enforce_transfer_restrictions == false\n            // if the source is not an admin, enforce_transfer_restrictions == true,\n            // and we must check to see if the target is an admin.\n            // if the target is also not an admin, this transfer is not permitted.\n            match transfer_target_mode.target_account_hash() {\n                Some(target_account_hash) => {\n                    let is_target_system_account =\n                        target_account_hash == PublicKey::System.to_account_hash();\n                    let is_target_administrator =\n                        transfer_config.is_administrator(&target_account_hash);\n                    if !(is_target_system_account || is_target_administrator) {\n                        // Transferring from normal account to a purse doesn't work.\n                        return TransferResult::Failure(TransferError::RestrictedTransferAttempted);\n                    }\n                }\n                None => {\n                    // can't allow this transfer because we are not sure if the target is an admin.\n                    return TransferResult::Failure(TransferError::UnableToVerifyTargetIsAdmin);\n                }\n            }\n        }\n\n        let (entity_addr, runtime_footprint, entity_access_rights) = match tc\n            .borrow_mut()\n            .authorized_runtime_footprint_with_access_rights(\n                protocol_version,\n                source_account_hash,\n                authorization_keys,\n                &administrative_accounts,\n            ) {\n            Ok(ret) => ret,\n            Err(tce) => {\n                return TransferResult::Failure(TransferError::TrackingCopy(tce));\n            }\n        };\n        let entity_key = if config.enable_addressable_entity() {\n            Key::AddressableEntity(entity_addr)\n        } else {\n            match entity_addr {\n                EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => Key::Hash(hash),\n                EntityAddr::Account(hash) => Key::Account(AccountHash::new(hash)),\n            }\n        };\n        let id = Id::Transaction(request.transaction_hash());\n        let phase = Phase::Session;\n        let address_generator = AddressGenerator::new(&id.seed(), phase);\n        // IMPORTANT: this runtime _must_ use the payer's context.\n        let mut runtime = RuntimeNative::new(\n            config.clone(),\n            protocol_version,\n            id,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            source_account_hash,\n            entity_key,\n            runtime_footprint.clone(),\n            entity_access_rights,\n            remaining_spending_limit,\n            phase,\n        );\n\n        match transfer_target_mode {\n            TransferTargetMode::ExistingAccount { .. } | TransferTargetMode::PurseExists { .. } => {\n                // Noop\n            }\n            TransferTargetMode::CreateAccount(account_hash) => {\n                let main_purse = match runtime.mint(U512::zero()) {\n                    Ok(uref) => uref,\n                    Err(mint_error) => {\n                        return TransferResult::Failure(TransferError::Mint(mint_error));\n                    }\n                };\n\n                let account = Account::create(account_hash, NamedKeys::new(), main_purse);\n                if let Err(tce) = tc\n                    .borrow_mut()\n                    .create_addressable_entity_from_account(account, protocol_version)\n                {\n                    return TransferResult::Failure(tce.into());\n                }\n            }\n        }\n        let transfer_args = match runtime_args_builder.build(\n            &runtime_footprint,\n            protocol_version,\n            Rc::clone(&tc),\n        ) {\n            Ok(transfer_args) => transfer_args,\n            Err(error) => return TransferResult::Failure(error),\n        };\n        if let Err(mint_error) = runtime.transfer(\n            transfer_args.to(),\n            transfer_args.source(),\n            transfer_args.target(),\n            transfer_args.amount(),\n            transfer_args.arg_id(),\n        ) {\n            return TransferResult::Failure(TransferError::Mint(mint_error));\n        }\n\n        let transfers = runtime.into_transfers();\n\n        let effects = tc.borrow_mut().effects();\n        let cache = tc.borrow_mut().cache();\n\n        TransferResult::Success {\n            transfers,\n            effects,\n            cache,\n        }\n    }\n\n    /// Direct burn.\n    fn burn(&self, request: BurnRequest) -> BurnResult {\n        let state_hash = request.state_hash();\n        let tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => Rc::new(RefCell::new(tc)),\n            Ok(None) => return BurnResult::RootNotFound,\n            Err(err) => {\n                return BurnResult::Failure(BurnError::TrackingCopy(TrackingCopyError::Storage(\n                    err,\n                )));\n            }\n        };\n\n        let source_account_hash = request.initiator().account_hash();\n        let protocol_version = request.protocol_version();\n        if let Err(tce) = tc\n            .borrow_mut()\n            .migrate_account(source_account_hash, protocol_version)\n        {\n            return BurnResult::Failure(tce.into());\n        }\n\n        let authorization_keys = request.authorization_keys();\n\n        let config = request.config();\n\n        let runtime_args = match request.args() {\n            BurnRequestArgs::Raw(runtime_args) => runtime_args.clone(),\n            BurnRequestArgs::Explicit(transfer_args) => {\n                match RuntimeArgs::try_from(*transfer_args) {\n                    Ok(runtime_args) => runtime_args,\n                    Err(cve) => return BurnResult::Failure(BurnError::CLValue(cve)),\n                }\n            }\n        };\n\n        let runtime_args_builder = BurnRuntimeArgsBuilder::new(runtime_args);\n\n        let (entity_addr, mut footprint, mut entity_access_rights) = match tc\n            .borrow_mut()\n            .authorized_runtime_footprint_with_access_rights(\n                protocol_version,\n                source_account_hash,\n                authorization_keys,\n                &BTreeSet::default(),\n            ) {\n            Ok(ret) => ret,\n            Err(tce) => {\n                return BurnResult::Failure(BurnError::TrackingCopy(tce));\n            }\n        };\n        let entity_key = if config.enable_addressable_entity() {\n            Key::AddressableEntity(entity_addr)\n        } else {\n            match entity_addr {\n                EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => Key::Hash(hash),\n                EntityAddr::Account(hash) => Key::Account(AccountHash::new(hash)),\n            }\n        };\n\n        // extend named keys with total supply\n        match tc\n            .borrow_mut()\n            .system_contract_named_key(MINT, TOTAL_SUPPLY_KEY)\n        {\n            Ok(Some(k)) => {\n                match k.as_uref() {\n                    Some(uref) => entity_access_rights.extend(&[*uref]),\n                    None => {\n                        return BurnResult::Failure(BurnError::TrackingCopy(\n                            TrackingCopyError::UnexpectedKeyVariant(k),\n                        ));\n                    }\n                }\n                footprint.insert_into_named_keys(TOTAL_SUPPLY_KEY.into(), k);\n            }\n            Ok(None) => {\n                return BurnResult::Failure(BurnError::TrackingCopy(\n                    TrackingCopyError::NamedKeyNotFound(TOTAL_SUPPLY_KEY.into()),\n                ));\n            }\n            Err(tce) => {\n                return BurnResult::Failure(BurnError::TrackingCopy(tce));\n            }\n        };\n        let id = Id::Transaction(request.transaction_hash());\n        let phase = Phase::Session;\n        let address_generator = AddressGenerator::new(&id.seed(), phase);\n        let burn_args = match runtime_args_builder.build(&footprint, Rc::clone(&tc)) {\n            Ok(burn_args) => burn_args,\n            Err(error) => return BurnResult::Failure(error),\n        };\n\n        // IMPORTANT: this runtime _must_ use the payer's context.\n        let mut runtime = RuntimeNative::new(\n            config.clone(),\n            protocol_version,\n            id,\n            Arc::new(RwLock::new(address_generator)),\n            Rc::clone(&tc),\n            source_account_hash,\n            entity_key,\n            footprint.clone(),\n            entity_access_rights,\n            burn_args.amount(),\n            phase,\n        );\n\n        if let Err(mint_error) = runtime.burn(burn_args.source(), burn_args.amount()) {\n            return BurnResult::Failure(BurnError::Mint(mint_error));\n        }\n\n        let effects = tc.borrow_mut().effects();\n        let cache = tc.borrow_mut().cache();\n\n        BurnResult::Success { effects, cache }\n    }\n\n    /// Gets all values under a given key tag.\n    fn tagged_values(&self, request: TaggedValuesRequest) -> TaggedValuesResult {\n        let state_hash = request.state_hash();\n        let mut tc = match self.tracking_copy(state_hash) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return TaggedValuesResult::RootNotFound,\n            Err(gse) => return TaggedValuesResult::Failure(TrackingCopyError::Storage(gse)),\n        };\n\n        let key_tag = request.key_tag();\n        let keys = match tc.get_keys(&key_tag) {\n            Ok(keys) => keys,\n            Err(tce) => return TaggedValuesResult::Failure(tce),\n        };\n\n        let mut values = vec![];\n        for key in keys {\n            match tc.get(&key) {\n                Ok(Some(value)) => {\n                    values.push(value);\n                }\n                Ok(None) => {}\n                Err(error) => return TaggedValuesResult::Failure(error),\n            }\n        }\n\n        TaggedValuesResult::Success {\n            values,\n            selection: request.selection(),\n        }\n    }\n\n    /// Gets all values under a given key prefix.\n    /// Currently, this ignores the cache and only provides values from the trie.\n    fn prefixed_values(&self, request: PrefixedValuesRequest) -> PrefixedValuesResult {\n        let mut tc = match self.tracking_copy(request.state_hash()) {\n            Ok(Some(tc)) => tc,\n            Ok(None) => return PrefixedValuesResult::RootNotFound,\n            Err(err) => return PrefixedValuesResult::Failure(TrackingCopyError::Storage(err)),\n        };\n        match tc.get_keys_by_prefix(request.key_prefix()) {\n            Ok(keys) => {\n                let mut values = Vec::with_capacity(keys.len());\n                for key in keys {\n                    match tc.get(&key) {\n                        Ok(Some(value)) => values.push(value),\n                        Ok(None) => {}\n                        Err(error) => return PrefixedValuesResult::Failure(error),\n                    }\n                }\n                PrefixedValuesResult::Success {\n                    values,\n                    key_prefix: request.key_prefix().clone(),\n                }\n            }\n            Err(error) => PrefixedValuesResult::Failure(error),\n        }\n    }\n\n    /// Reads a `Trie` from the state if it is present\n    fn trie(&self, request: TrieRequest) -> TrieResult;\n\n    /// Persists a trie element.\n    fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult;\n\n    /// Finds all the children of `trie_raw` which aren't present in the state.\n    fn missing_children(&self, trie_raw: &[u8]) -> Result<Vec<Digest>, GlobalStateError>;\n\n    /// Gets the value of enable entity flag.\n    fn enable_entity(&self) -> bool;\n}\n\nfn get_round_seigniorage_rate_data<T: StateProvider>(\n    state_provider: &T,\n    scr: &SystemHashRegistry,\n    state_hash: Digest,\n    enable_addressable_entity: bool,\n) -> RoundSeigniorageRateResult {\n    let query_request = match scr.get(MINT).copied() {\n        Some(mint_hash) => {\n            let key = if !enable_addressable_entity {\n                Key::Hash(mint_hash)\n            } else {\n                Key::AddressableEntity(EntityAddr::System(mint_hash))\n            };\n            QueryRequest::new(\n                state_hash,\n                key,\n                vec![ROUND_SEIGNIORAGE_RATE_KEY.to_string()],\n            )\n        }\n        None => {\n            error!(\"unexpected query failure; mint not found\");\n            return RoundSeigniorageRateResult::MintNotFound;\n        }\n    };\n\n    match state_provider.query(query_request) {\n        QueryResult::RootNotFound => RoundSeigniorageRateResult::RootNotFound,\n        QueryResult::ValueNotFound(msg) => RoundSeigniorageRateResult::ValueNotFound(msg),\n        QueryResult::Failure(tce) => RoundSeigniorageRateResult::Failure(tce),\n        QueryResult::Success { value, proofs: _ } => {\n            let cl_value = match value.into_cl_value() {\n                Some(cl_value) => cl_value,\n                None => {\n                    error!(\"unexpected query failure; total supply is not a CLValue\");\n                    return RoundSeigniorageRateResult::Failure(\n                        TrackingCopyError::UnexpectedStoredValueVariant,\n                    );\n                }\n            };\n\n            match cl_value.into_t() {\n                Ok(rate) => RoundSeigniorageRateResult::Success { rate },\n                Err(cve) => RoundSeigniorageRateResult::Failure(TrackingCopyError::CLValue(cve)),\n            }\n        }\n    }\n}\n\nfn get_total_supply_data<T: StateProvider>(\n    state_provider: &T,\n    scr: &SystemHashRegistry,\n    state_hash: Digest,\n    enable_addressable_entity: bool,\n) -> TotalSupplyResult {\n    let query_request = match scr.get(MINT).copied() {\n        Some(mint_hash) => {\n            let key = if !enable_addressable_entity {\n                Key::Hash(mint_hash)\n            } else {\n                Key::AddressableEntity(EntityAddr::System(mint_hash))\n            };\n            QueryRequest::new(state_hash, key, vec![TOTAL_SUPPLY_KEY.to_string()])\n        }\n        None => {\n            error!(\"unexpected query failure; mint not found\");\n            return TotalSupplyResult::MintNotFound;\n        }\n    };\n    match state_provider.query(query_request) {\n        QueryResult::RootNotFound => TotalSupplyResult::RootNotFound,\n        QueryResult::ValueNotFound(msg) => TotalSupplyResult::ValueNotFound(msg),\n        QueryResult::Failure(tce) => TotalSupplyResult::Failure(tce),\n        QueryResult::Success { value, proofs: _ } => {\n            let cl_value = match value.into_cl_value() {\n                Some(cl_value) => cl_value,\n                None => {\n                    error!(\"unexpected query failure; total supply is not a CLValue\");\n                    return TotalSupplyResult::Failure(\n                        TrackingCopyError::UnexpectedStoredValueVariant,\n                    );\n                }\n            };\n\n            match cl_value.into_t() {\n                Ok(total_supply) => TotalSupplyResult::Success { total_supply },\n                Err(cve) => TotalSupplyResult::Failure(TrackingCopyError::CLValue(cve)),\n            }\n        }\n    }\n}\n\nfn get_snapshot_data<T: StateProvider>(\n    state_provider: &T,\n    scr: &SystemHashRegistry,\n    state_hash: Digest,\n    enable_addressable_entity: bool,\n) -> SeigniorageRecipientsResult {\n    let (snapshot_query_request, snapshot_version_query_request) =\n        match build_query_requests(scr, state_hash, enable_addressable_entity) {\n            Ok(res) => res,\n            Err(res) => return res,\n        };\n\n    // check if snapshot version flag is present\n    let snapshot_version: Option<u8> =\n        match query_snapshot_version(state_provider, snapshot_version_query_request) {\n            Ok(value) => value,\n            Err(value) => return value,\n        };\n\n    let snapshot = match query_snapshot(state_provider, snapshot_version, snapshot_query_request) {\n        Ok(snapshot) => snapshot,\n        Err(value) => return value,\n    };\n\n    let query_request = QueryRequest::new(state_hash, Key::RewardsHandling, vec![]);\n    let rewards_ratio = match state_provider.query(query_request) {\n        QueryResult::RootNotFound => return SeigniorageRecipientsResult::RootNotFound,\n        QueryResult::ValueNotFound(_) => Ratio::new(0, 1),\n        QueryResult::Success { value, .. } => {\n            if let StoredValue::CLValue(cl_value) = *value {\n                match cl_value.to_t::<BTreeMap<u8, Bytes>>() {\n                    Ok(rewards_handling) => {\n                        match rewards_handling.get(&REWARDS_HANDLING_RATIO_TAG) {\n                            Some(bytes) => {\n                                let ratio =\n                                    match casper_types::bytesrepr::FromBytes::from_bytes(bytes) {\n                                        Ok((ratio, _)) => ratio,\n                                        Err(_) => Ratio::new(0, 1),\n                                    };\n\n                                ratio\n                            }\n                            None => Ratio::new(0, 1),\n                        }\n                    }\n                    Err(_) => Ratio::new(0, 1),\n                }\n            } else {\n                Ratio::new(0, 1)\n            }\n        }\n        QueryResult::Failure(tce) => return SeigniorageRecipientsResult::Failure(tce),\n    };\n\n    SeigniorageRecipientsResult::Success {\n        seigniorage_recipients: snapshot,\n        rewards_ratio,\n    }\n}\n\nfn query_snapshot<T: StateProvider>(\n    state_provider: &T,\n    snapshot_version: Option<u8>,\n    snapshot_query_request: QueryRequest,\n) -> Result<SeigniorageRecipientsSnapshot, SeigniorageRecipientsResult> {\n    match state_provider.query(snapshot_query_request) {\n        QueryResult::RootNotFound => Err(SeigniorageRecipientsResult::RootNotFound),\n        QueryResult::Failure(error) => {\n            error!(?error, \"unexpected tracking copy error\");\n            Err(SeigniorageRecipientsResult::Failure(error))\n        }\n        QueryResult::ValueNotFound(msg) => {\n            error!(%msg, \"value not found\");\n            Err(SeigniorageRecipientsResult::ValueNotFound(msg))\n        }\n        QueryResult::Success { value, proofs: _ } => {\n            let cl_value = match value.into_cl_value() {\n                Some(snapshot_cl_value) => snapshot_cl_value,\n                None => {\n                    error!(\"unexpected query failure; seigniorage recipients snapshot is not a CLValue\");\n                    return Err(SeigniorageRecipientsResult::Failure(\n                        TrackingCopyError::UnexpectedStoredValueVariant,\n                    ));\n                }\n            };\n\n            match snapshot_version {\n                Some(_) => {\n                    let snapshot = match cl_value.into_t() {\n                        Ok(snapshot) => snapshot,\n                        Err(cve) => {\n                            error!(\"Failed to convert snapshot from CLValue\");\n                            return Err(SeigniorageRecipientsResult::Failure(\n                                TrackingCopyError::CLValue(cve),\n                            ));\n                        }\n                    };\n                    Ok(SeigniorageRecipientsSnapshot::V2(snapshot))\n                }\n                None => {\n                    let snapshot = match cl_value.into_t() {\n                        Ok(snapshot) => snapshot,\n                        Err(cve) => {\n                            error!(\"Failed to convert snapshot from CLValue\");\n                            return Err(SeigniorageRecipientsResult::Failure(\n                                TrackingCopyError::CLValue(cve),\n                            ));\n                        }\n                    };\n                    Ok(SeigniorageRecipientsSnapshot::V1(snapshot))\n                }\n            }\n        }\n    }\n}\n\nfn query_snapshot_version<T: StateProvider>(\n    state_provider: &T,\n    snapshot_version_query_request: QueryRequest,\n) -> Result<Option<u8>, SeigniorageRecipientsResult> {\n    match state_provider.query(snapshot_version_query_request) {\n        QueryResult::RootNotFound => Err(SeigniorageRecipientsResult::RootNotFound),\n        QueryResult::Failure(error) => {\n            error!(?error, \"unexpected tracking copy error\");\n            Err(SeigniorageRecipientsResult::Failure(error))\n        }\n        QueryResult::ValueNotFound(_msg) => Ok(None),\n        QueryResult::Success { value, proofs: _ } => {\n            let cl_value = match value.into_cl_value() {\n                Some(snapshot_version_cl_value) => snapshot_version_cl_value,\n                None => {\n                    error!(\"unexpected query failure; seigniorage recipients snapshot version is not a CLValue\");\n                    return Err(SeigniorageRecipientsResult::Failure(\n                        TrackingCopyError::UnexpectedStoredValueVariant,\n                    ));\n                }\n            };\n            match cl_value.into_t() {\n                Ok(snapshot_version) => Ok(Some(snapshot_version)),\n                Err(cve) => Err(SeigniorageRecipientsResult::Failure(\n                    TrackingCopyError::CLValue(cve),\n                )),\n            }\n        }\n    }\n}\n\nfn build_query_requests(\n    scr: &SystemHashRegistry,\n    state_hash: Digest,\n    enable_addressable_entity: bool,\n) -> Result<(QueryRequest, QueryRequest), SeigniorageRecipientsResult> {\n    match scr.get(AUCTION).copied() {\n        Some(auction_hash) => {\n            let key = if !enable_addressable_entity {\n                Key::Hash(auction_hash)\n            } else {\n                Key::AddressableEntity(EntityAddr::System(auction_hash))\n            };\n            Ok((\n                QueryRequest::new(\n                    state_hash,\n                    key,\n                    vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_string()],\n                ),\n                QueryRequest::new(\n                    state_hash,\n                    key,\n                    vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.to_string()],\n                ),\n            ))\n        }\n        None => Err(SeigniorageRecipientsResult::AuctionNotFound),\n    }\n}\n\n/// Write multiple key/stored value pairs to the store in a single rw transaction.\npub fn put_stored_values<'a, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    prestate_hash: Digest,\n    stored_values: Vec<(Key, StoredValue)>,\n) -> Result<Digest, E>\nwhere\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<Key, StoredValue>,\n    S::Error: From<R::Error>,\n    E: From<R::Error>\n        + From<S::Error>\n        + From<bytesrepr::Error>\n        + From<CommitError>\n        + From<TrieStoreCacheError>,\n{\n    let mut txn = environment.create_read_write_txn()?;\n    let state_root = prestate_hash;\n    let maybe_root: Option<Trie<Key, StoredValue>> = store.get(&txn, &state_root)?;\n    if maybe_root.is_none() {\n        return Err(CommitError::RootNotFound(prestate_hash).into());\n    };\n\n    let state_root =\n        batch_write::<_, _, _, _, _, E>(&mut txn, store, &state_root, stored_values.into_iter())?;\n    txn.commit()?;\n    Ok(state_root)\n}\n\n/// Commit `effects` to the store.\npub fn commit<'a, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    prestate_hash: Digest,\n    effects: Effects,\n) -> Result<Digest, E>\nwhere\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<Key, StoredValue>,\n    S::Error: From<R::Error>,\n    E: From<R::Error>\n        + From<S::Error>\n        + From<bytesrepr::Error>\n        + From<CommitError>\n        + From<GlobalStateError>, /* even tho E is currently always GSE, this is required to\n                                   * satisfy the compiler */\n{\n    let mut txn = environment.create_read_write_txn()?;\n    let mut state_root = prestate_hash;\n\n    let maybe_root: Option<Trie<Key, StoredValue>> = store.get(&txn, &state_root)?;\n\n    if maybe_root.is_none() {\n        return Err(CommitError::RootNotFound(prestate_hash).into());\n    };\n\n    for (key, kind) in effects.value().into_iter().map(TransformV2::destructure) {\n        let read_result = read::<_, _, _, _, E>(&txn, store, &state_root, &key)?;\n\n        let instruction = match (read_result, kind) {\n            (_, TransformKindV2::Identity) => {\n                // effectively a noop.\n                continue;\n            }\n            (ReadResult::NotFound, TransformKindV2::Write(new_value)) => {\n                TransformInstruction::store(new_value)\n            }\n            (ReadResult::NotFound, TransformKindV2::Prune(key)) => {\n                // effectively a noop.\n                debug!(\n                    ?state_root,\n                    ?key,\n                    \"commit: attempt to prune nonexistent record; this may happen if a key is both added and pruned in the same commit.\"\n                );\n                continue;\n            }\n            (ReadResult::NotFound, transform_kind) => {\n                error!(\n                    ?state_root,\n                    ?key,\n                    ?transform_kind,\n                    \"commit: key not found while attempting to apply transform\"\n                );\n                return Err(CommitError::KeyNotFound(key).into());\n            }\n            (ReadResult::Found(current_value), transform_kind) => {\n                match transform_kind.apply(current_value) {\n                    Ok(instruction) => instruction,\n                    Err(err) => {\n                        error!(\n                            ?state_root,\n                            ?key,\n                            ?err,\n                            \"commit: key found, but could not apply transform\"\n                        );\n                        return Err(CommitError::TransformError(err).into());\n                    }\n                }\n            }\n            (ReadResult::RootNotFound, transform_kind) => {\n                error!(\n                    ?state_root,\n                    ?key,\n                    ?transform_kind,\n                    \"commit: failed to read state root while processing transform\"\n                );\n                return Err(CommitError::ReadRootNotFound(state_root).into());\n            }\n        };\n\n        match instruction {\n            TransformInstruction::Store(value) => {\n                let write_result =\n                    write::<_, _, _, _, E>(&mut txn, store, &state_root, &key, &value)?;\n\n                match write_result {\n                    WriteResult::Written(root_hash) => {\n                        state_root = root_hash;\n                    }\n                    WriteResult::AlreadyExists => (),\n                    WriteResult::RootNotFound => {\n                        error!(?state_root, ?key, ?value, \"commit: root not found\");\n                        return Err(CommitError::WriteRootNotFound(state_root).into());\n                    }\n                }\n            }\n            TransformInstruction::Prune(key) => {\n                let prune_result = prune::<_, _, _, _, E>(&mut txn, store, &state_root, &key)?;\n\n                match prune_result {\n                    TriePruneResult::Pruned(root_hash) => {\n                        state_root = root_hash;\n                    }\n                    TriePruneResult::MissingKey => {\n                        warn!(\"commit: pruning attempt failed for {}\", key);\n                    }\n                    TriePruneResult::RootNotFound => {\n                        error!(?state_root, ?key, \"commit: root not found\");\n                        return Err(CommitError::WriteRootNotFound(state_root).into());\n                    }\n                    TriePruneResult::Failure(gse) => {\n                        return Err(gse.into()); // currently this is always reflexive\n                    }\n                }\n            }\n        }\n    }\n\n    txn.commit()?;\n\n    Ok(state_root)\n}\n"
  },
  {
    "path": "storage/src/global_state/state/scratch.rs",
    "content": "use lmdb::RwTransaction;\nuse std::{\n    collections::{BTreeMap, BTreeSet, HashMap, VecDeque},\n    mem,\n    ops::Deref,\n    sync::{Arc, RwLock},\n};\n\nuse tracing::{debug, error};\n\nuse casper_types::{\n    bytesrepr::{self, ToBytes},\n    execution::{Effects, TransformInstruction, TransformKindV2, TransformV2},\n    global_state::TrieMerkleProof,\n    Digest, Key, StoredValue,\n};\n\nuse crate::{\n    data_access_layer::{\n        FlushRequest, FlushResult, PutTrieRequest, PutTrieResult, TrieElement, TrieRequest,\n        TrieResult,\n    },\n    global_state::{\n        error::Error as GlobalStateError,\n        state::{CommitError, CommitProvider, StateProvider, StateReader},\n        store::Store,\n        transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource},\n        trie::{Trie, TrieRaw},\n        trie_store::{\n            lmdb::LmdbTrieStore,\n            operations::{\n                keys_with_prefix, missing_children, put_trie, read, read_with_proof, ReadResult,\n            },\n        },\n    },\n};\n\nuse crate::tracking_copy::TrackingCopy;\n\ntype SharedCache = Arc<RwLock<Cache>>;\n\nstruct Cache {\n    cached_values: HashMap<Key, (bool, StoredValue)>,\n    pruned: BTreeSet<Key>,\n    cached_keys: CacheTrie<Key>,\n}\n\nstruct CacheTrieNode<T> {\n    children: BTreeMap<u8, CacheTrieNode<T>>,\n    value: Option<T>,\n}\n\nimpl<T> CacheTrieNode<T> {\n    fn new() -> Self {\n        CacheTrieNode {\n            children: BTreeMap::new(),\n            value: None,\n        }\n    }\n\n    fn remove(&mut self, bytes: &[u8], depth: usize) -> bool {\n        if depth == bytes.len() {\n            if self.value.is_some() {\n                self.value = None;\n                return self.children.is_empty();\n            }\n            return false;\n        }\n\n        if let Some(child_node) = self.children.get_mut(&bytes[depth]) {\n            if child_node.remove(bytes, depth + 1) {\n                self.children.remove(&bytes[depth]);\n                return self.value.is_none() && self.children.is_empty();\n            }\n        }\n        false\n    }\n}\n\nstruct CacheTrie<T: Copy> {\n    root: CacheTrieNode<T>,\n}\n\nimpl<T: Copy> CacheTrie<T> {\n    fn new() -> Self {\n        CacheTrie {\n            root: CacheTrieNode::new(),\n        }\n    }\n\n    fn insert(&mut self, key_bytes: &[u8], key: T) {\n        let mut current_node = &mut self.root;\n        for &byte in key_bytes {\n            current_node = current_node\n                .children\n                .entry(byte)\n                .or_insert(CacheTrieNode::new());\n        }\n        current_node.value = Some(key);\n    }\n\n    fn keys_with_prefix(&self, prefix: &[u8]) -> Vec<T> {\n        let mut current_node = &self.root;\n        let mut result = Vec::new();\n\n        for &byte in prefix {\n            match current_node.children.get(&byte) {\n                Some(node) => current_node = node,\n                None => return result,\n            }\n        }\n\n        self.collect_keys(current_node, &mut result);\n        result\n    }\n\n    fn collect_keys(&self, start_node: &CacheTrieNode<T>, result: &mut Vec<T>) {\n        let mut stack = VecDeque::new();\n        stack.push_back(start_node);\n\n        while let Some(node) = stack.pop_back() {\n            if let Some(key) = node.value {\n                result.push(key);\n            }\n\n            for child_node in node.children.values() {\n                stack.push_back(child_node);\n            }\n        }\n    }\n\n    fn remove(&mut self, key_bytes: &[u8]) -> bool {\n        self.root.remove(key_bytes, 0)\n    }\n}\n\nimpl Cache {\n    fn new() -> Self {\n        Cache {\n            cached_values: HashMap::new(),\n            pruned: BTreeSet::new(),\n            cached_keys: CacheTrie::new(),\n        }\n    }\n\n    /// Returns true if the pruned and cached values are both empty.\n    pub fn is_empty(&self) -> bool {\n        self.cached_values.is_empty() && self.pruned.is_empty()\n    }\n\n    fn insert_write(&mut self, key: Key, value: StoredValue) -> Result<(), bytesrepr::Error> {\n        self.pruned.remove(&key);\n        if self.cached_values.insert(key, (true, value)).is_none() {\n            let key_bytes = key.to_bytes()?;\n            self.cached_keys.insert(&key_bytes, key);\n        };\n        Ok(())\n    }\n\n    fn insert_read(&mut self, key: Key, value: StoredValue) -> Result<(), bytesrepr::Error> {\n        let key_bytes = key.to_bytes()?;\n        self.cached_keys.insert(&key_bytes, key);\n        self.cached_values.entry(key).or_insert((false, value));\n        Ok(())\n    }\n\n    fn prune(&mut self, key: Key) -> Result<(), bytesrepr::Error> {\n        self.cached_values.remove(&key);\n        self.cached_keys.remove(&key.to_bytes()?);\n        self.pruned.insert(key);\n        Ok(())\n    }\n\n    fn get(&self, key: &Key) -> Option<&StoredValue> {\n        if self.pruned.contains(key) {\n            return None;\n        }\n        self.cached_values.get(key).map(|(_dirty, value)| value)\n    }\n\n    /// Consumes self and returns only written values as values that were only read must be filtered\n    /// out to prevent unnecessary writes.\n    fn into_dirty_writes(self) -> (Vec<(Key, StoredValue)>, BTreeSet<Key>) {\n        let stored_values: Vec<(Key, StoredValue)> = self\n            .cached_keys\n            .keys_with_prefix(&[])\n            .into_iter()\n            .filter_map(|key| {\n                self.cached_values.get(&key).and_then(|(dirty, value)| {\n                    if *dirty {\n                        Some((key, value.clone()))\n                    } else {\n                        None\n                    }\n                })\n            })\n            .collect();\n        let keys_to_prune = self.pruned;\n\n        debug!(\n            \"Cache::into_dirty_writes prune_count: {} store_count: {}\",\n            keys_to_prune.len(),\n            stored_values.len()\n        );\n        (stored_values, keys_to_prune)\n    }\n}\n\n/// Global state implemented against LMDB as a backing data store.\npub struct ScratchGlobalState {\n    /// Underlying, cached stored values.\n    cache: SharedCache,\n    /// Environment for LMDB.\n    pub(crate) environment: Arc<LmdbEnvironment>,\n    /// Trie store held within LMDB.\n    pub(crate) trie_store: Arc<LmdbTrieStore>,\n    /// Empty root hash used for a new trie.\n    pub(crate) empty_root_hash: Digest,\n    /// Max query depth\n    pub max_query_depth: u64,\n    /// Enable the addressable entity and migrate accounts/contracts to entities.\n    pub enable_addressable_entity: bool,\n}\n\n/// Represents a \"view\" of global state at a particular root hash.\npub struct ScratchGlobalStateView {\n    cache: SharedCache,\n    /// Environment for LMDB.\n    pub(crate) environment: Arc<LmdbEnvironment>,\n    /// Trie store held within LMDB.\n    pub(crate) trie_store: Arc<LmdbTrieStore>,\n    /// Root hash of this \"view\".\n    pub(crate) root_hash: Digest,\n}\n\nimpl ScratchGlobalStateView {\n    /// Returns true if the pruned and cached values are both empty.\n    pub fn is_empty(&self) -> bool {\n        self.cache.read().unwrap().is_empty()\n    }\n}\n\nimpl ScratchGlobalState {\n    /// Creates a state from an existing environment, store, and root_hash.\n    /// Intended to be used for testing.\n    pub fn new(\n        environment: Arc<LmdbEnvironment>,\n        trie_store: Arc<LmdbTrieStore>,\n        empty_root_hash: Digest,\n        max_query_depth: u64,\n        enable_entity: bool,\n    ) -> Self {\n        ScratchGlobalState {\n            cache: Arc::new(RwLock::new(Cache::new())),\n            environment,\n            trie_store,\n            empty_root_hash,\n            max_query_depth,\n            enable_addressable_entity: enable_entity,\n        }\n    }\n\n    /// Consume self and return inner cache.\n    pub fn into_inner(self) -> (Vec<(Key, StoredValue)>, BTreeSet<Key>) {\n        let cache = mem::replace(&mut *self.cache.write().unwrap(), Cache::new());\n        cache.into_dirty_writes()\n    }\n}\n\nimpl StateReader<Key, StoredValue> for ScratchGlobalStateView {\n    type Error = GlobalStateError;\n\n    fn read(&self, key: &Key) -> Result<Option<StoredValue>, Self::Error> {\n        {\n            let cache = self.cache.read().unwrap();\n            if cache.pruned.contains(key) {\n                return Ok(None);\n            }\n            if let Some(value) = cache.get(key) {\n                return Ok(Some(value.clone()));\n            }\n        }\n        let txn = self.environment.create_read_txn()?;\n        let ret = match read::<Key, StoredValue, lmdb::RoTransaction, LmdbTrieStore, Self::Error>(\n            &txn,\n            self.trie_store.deref(),\n            &self.root_hash,\n            key,\n        )? {\n            ReadResult::Found(value) => {\n                self.cache\n                    .write()\n                    .expect(\"poisoned scratch cache lock\")\n                    .insert_read(*key, value.clone())?;\n                Some(value)\n            }\n            ReadResult::NotFound => None,\n            ReadResult::RootNotFound => panic!(\"ScratchGlobalState has invalid root\"),\n        };\n        txn.commit()?;\n        Ok(ret)\n    }\n\n    fn read_with_proof(\n        &self,\n        key: &Key,\n    ) -> Result<Option<TrieMerkleProof<Key, StoredValue>>, Self::Error> {\n        // if self.cache.is_empty() proceed else error\n        if !self.is_empty() {\n            return Err(Self::Error::CannotProvideProofsOverCachedData);\n        }\n\n        let txn = self.environment.create_read_txn()?;\n        let ret = match read_with_proof::<\n            Key,\n            StoredValue,\n            lmdb::RoTransaction,\n            LmdbTrieStore,\n            Self::Error,\n        >(&txn, self.trie_store.deref(), &self.root_hash, key)?\n        {\n            ReadResult::Found(value) => Some(value),\n            ReadResult::NotFound => None,\n            ReadResult::RootNotFound => panic!(\"LmdbWithCacheGlobalState has invalid root\"),\n        };\n        txn.commit()?;\n        Ok(ret)\n    }\n\n    fn keys_with_prefix(&self, prefix: &[u8]) -> Result<Vec<Key>, Self::Error> {\n        let mut ret = Vec::new();\n        let cache = self.cache.read().expect(\"poisoned scratch cache mutex\");\n        let cached_keys = cache.cached_keys.keys_with_prefix(prefix);\n        ret.extend(cached_keys);\n\n        let txn = self.environment.create_read_txn()?;\n        let keys_iter = keys_with_prefix::<Key, StoredValue, _, _>(\n            &txn,\n            self.trie_store.deref(),\n            &self.root_hash,\n            prefix,\n        );\n        for result in keys_iter {\n            match result {\n                Ok(key) => {\n                    // If the key is pruned then we won't return it. If the key is already cached,\n                    // then it would have been picked up by the code above so we don't add it again\n                    // to avoid duplicates.\n                    if !cache.pruned.contains(&key) && !cache.cached_values.contains_key(&key) {\n                        ret.push(key);\n                    }\n                }\n                Err(error) => return Err(error),\n            }\n        }\n        txn.commit()?;\n        Ok(ret)\n    }\n}\n\nimpl CommitProvider for ScratchGlobalState {\n    /// State hash returned is the one provided, as we do not write to lmdb with this kind of global\n    /// state. Note that the state hash is NOT used, and simply passed back to the caller.\n    fn commit_effects(\n        &self,\n        state_hash: Digest,\n        effects: Effects,\n    ) -> Result<Digest, GlobalStateError> {\n        let txn = self.environment.create_read_txn()?;\n        for (key, kind) in effects.value().into_iter().map(TransformV2::destructure) {\n            let cached_value = self.cache.read().unwrap().get(&key).cloned();\n            let instruction = match (cached_value, kind) {\n                (_, TransformKindV2::Identity) => {\n                    // effectively a noop.\n                    continue;\n                }\n                (None, TransformKindV2::Write(new_value)) => TransformInstruction::store(new_value),\n                (None, transform_kind) => {\n                    // It might be the case that for `Add*` operations we don't have the previous\n                    // value in cache yet.\n                    match read::<\n                        Key,\n                        StoredValue,\n                        lmdb::RoTransaction,\n                        LmdbTrieStore,\n                        GlobalStateError,\n                    >(&txn, self.trie_store.deref(), &state_hash, &key)?\n                    {\n                        ReadResult::Found(current_value) => {\n                            match transform_kind.apply(current_value.clone()) {\n                                Ok(instruction) => instruction,\n                                Err(err) => {\n                                    error!(?key, ?err, \"Key found, but could not apply transform\");\n                                    return Err(CommitError::TransformError(err).into());\n                                }\n                            }\n                        }\n                        ReadResult::NotFound => {\n                            error!(\n                                ?key,\n                                ?transform_kind,\n                                \"Key not found while attempting to apply transform\"\n                            );\n                            return Err(CommitError::KeyNotFound(key).into());\n                        }\n                        ReadResult::RootNotFound => {\n                            error!(root_hash=?state_hash, \"root not found\");\n                            return Err(CommitError::ReadRootNotFound(state_hash).into());\n                        }\n                    }\n                }\n                (Some(current_value), transform_kind) => {\n                    match transform_kind.apply(current_value) {\n                        Ok(instruction) => instruction,\n                        Err(err) => {\n                            error!(?key, ?err, \"Key found, but could not apply transform\");\n                            return Err(CommitError::TransformError(err).into());\n                        }\n                    }\n                }\n            };\n            let mut cache = self.cache.write().unwrap();\n            match instruction {\n                TransformInstruction::Store(value) => {\n                    cache.insert_write(key, value)?;\n                }\n                TransformInstruction::Prune(key) => {\n                    cache.prune(key)?;\n                }\n            }\n        }\n        txn.commit()?;\n        Ok(state_hash)\n    }\n\n    fn commit_values(\n        &self,\n        state_hash: Digest,\n        write_values: Vec<(Key, StoredValue)>,\n        prune_keys: BTreeSet<Key>,\n    ) -> Result<Digest, GlobalStateError> {\n        let mut cache = self.cache.write().unwrap();\n        for (key, value) in write_values {\n            cache.insert_write(key, value)?;\n        }\n\n        for key_to_prune in prune_keys {\n            cache.prune(key_to_prune)?;\n        }\n\n        Ok(state_hash)\n    }\n}\n\nimpl StateProvider for ScratchGlobalState {\n    type Reader = ScratchGlobalStateView;\n\n    fn flush(&self, _: FlushRequest) -> FlushResult {\n        if self.environment.is_manual_sync_enabled() {\n            match self.environment.sync() {\n                Ok(_) => FlushResult::Success,\n                Err(err) => FlushResult::Failure(err.into()),\n            }\n        } else {\n            FlushResult::ManualSyncDisabled\n        }\n    }\n\n    fn empty_root(&self) -> Digest {\n        self.empty_root_hash\n    }\n\n    fn tracking_copy(\n        &self,\n        hash: Digest,\n    ) -> Result<Option<TrackingCopy<Self::Reader>>, GlobalStateError> {\n        match self.checkout(hash)? {\n            Some(tc) => Ok(Some(TrackingCopy::new(\n                tc,\n                self.max_query_depth,\n                self.enable_addressable_entity,\n            ))),\n            None => Ok(None),\n        }\n    }\n\n    fn checkout(&self, state_hash: Digest) -> Result<Option<Self::Reader>, GlobalStateError> {\n        let txn = self.environment.create_read_txn()?;\n        let maybe_root: Option<Trie<Key, StoredValue>> = self.trie_store.get(&txn, &state_hash)?;\n        let maybe_state = maybe_root.map(|_| ScratchGlobalStateView {\n            cache: Arc::clone(&self.cache),\n            environment: Arc::clone(&self.environment),\n            trie_store: Arc::clone(&self.trie_store),\n            root_hash: state_hash,\n        });\n        txn.commit()?;\n        Ok(maybe_state)\n    }\n\n    fn trie(&self, request: TrieRequest) -> TrieResult {\n        let key = request.trie_key();\n        let txn = match self.environment.create_read_txn() {\n            Ok(ro) => ro,\n            Err(err) => return TrieResult::Failure(err.into()),\n        };\n        let raw = match Store::<Digest, Trie<Digest, StoredValue>>::get_raw(\n            &*self.trie_store,\n            &txn,\n            &key,\n        ) {\n            Ok(Some(bytes)) => TrieRaw::new(bytes),\n            Ok(None) => {\n                return TrieResult::ValueNotFound(key.to_string());\n            }\n            Err(err) => {\n                return TrieResult::Failure(err);\n            }\n        };\n        match txn.commit() {\n            Ok(_) => match request.chunk_id() {\n                Some(chunk_id) => TrieResult::Success {\n                    element: TrieElement::Chunked(raw, chunk_id),\n                },\n                None => TrieResult::Success {\n                    element: TrieElement::Raw(raw),\n                },\n            },\n            Err(err) => TrieResult::Failure(err.into()),\n        }\n    }\n\n    /// Persists a trie element.\n    fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult {\n        // We only allow bottom-up persistence of trie elements.\n        // Thus we do not persist the element unless we already have all of its descendants\n        // persisted. It is safer to throw away the element and rely on a follow up attempt\n        // to reacquire it later than to allow it to be persisted which would allow runtime\n        // access to acquire a root hash that is missing one or more children which will\n        // result in undefined behavior if a process attempts to access elements below that\n        // root which are not held locally.\n        let bytes = request.raw().inner();\n        match self.missing_children(bytes) {\n            Ok(missing_children) => {\n                if !missing_children.is_empty() {\n                    let hash = Digest::hash_into_chunks_if_necessary(bytes);\n                    return PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren(\n                        hash,\n                        request.take_raw(),\n                        missing_children,\n                    ));\n                }\n            }\n            Err(err) => return PutTrieResult::Failure(err),\n        };\n\n        match self.environment.create_read_write_txn() {\n            Ok(mut txn) => {\n                match put_trie::<Key, StoredValue, RwTransaction, LmdbTrieStore, GlobalStateError>(\n                    &mut txn,\n                    &self.trie_store,\n                    bytes,\n                ) {\n                    Ok(hash) => match txn.commit() {\n                        Ok(_) => PutTrieResult::Success { hash },\n                        Err(err) => PutTrieResult::Failure(err.into()),\n                    },\n                    Err(err) => PutTrieResult::Failure(err),\n                }\n            }\n            Err(err) => PutTrieResult::Failure(err.into()),\n        }\n    }\n\n    /// Finds all of the keys of missing directly descendant `Trie<K,V>` values\n    fn missing_children(&self, trie_raw: &[u8]) -> Result<Vec<Digest>, GlobalStateError> {\n        let txn = self.environment.create_read_txn()?;\n        let missing_descendants = missing_children::<\n            Key,\n            StoredValue,\n            lmdb::RoTransaction,\n            LmdbTrieStore,\n            GlobalStateError,\n        >(&txn, self.trie_store.deref(), trie_raw)?;\n        txn.commit()?;\n        Ok(missing_descendants)\n    }\n\n    fn enable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n}\n\n#[cfg(test)]\npub(crate) mod tests {\n    use lmdb::DatabaseFlags;\n    use tempfile::tempdir;\n\n    use casper_types::{\n        account::AccountHash,\n        execution::{Effects, TransformKindV2, TransformV2},\n        CLValue, Digest,\n    };\n\n    use super::*;\n    use crate::global_state::{\n        state::{lmdb::LmdbGlobalState, CommitProvider},\n        trie_store::operations::{write, WriteResult},\n    };\n\n    #[cfg(test)]\n    use crate::global_state::{DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS};\n\n    #[derive(Debug, Clone)]\n    pub(crate) struct TestPair {\n        pub key: Key,\n        pub value: StoredValue,\n    }\n\n    pub(crate) fn create_test_pairs() -> [TestPair; 2] {\n        [\n            TestPair {\n                key: Key::Account(AccountHash::new([1_u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n            },\n            TestPair {\n                key: Key::Account(AccountHash::new([2_u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()),\n            },\n        ]\n    }\n\n    pub(crate) fn create_test_pairs_updated() -> [TestPair; 3] {\n        [\n            TestPair {\n                key: Key::Account(AccountHash::new([1u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(\"one\".to_string()).unwrap()),\n            },\n            TestPair {\n                key: Key::Account(AccountHash::new([2u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(\"two\".to_string()).unwrap()),\n            },\n            TestPair {\n                key: Key::Account(AccountHash::new([3u8; 32])),\n                value: StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()),\n            },\n        ]\n    }\n\n    pub(crate) fn create_test_transforms() -> Effects {\n        let mut effects = Effects::new();\n        let transform = TransformV2::new(\n            Key::Account(AccountHash::new([3u8; 32])),\n            TransformKindV2::Write(StoredValue::CLValue(CLValue::from_t(\"one\").unwrap())),\n        );\n        effects.push(transform);\n        effects\n    }\n\n    pub(crate) struct TestState {\n        state: LmdbGlobalState,\n        root_hash: Digest,\n    }\n\n    #[cfg(test)]\n    pub(crate) fn create_test_state() -> TestState {\n        let temp_dir = tempdir().unwrap();\n        let environment = Arc::new(\n            LmdbEnvironment::new(\n                temp_dir.path(),\n                DEFAULT_MAX_DB_SIZE,\n                DEFAULT_MAX_READERS,\n                true,\n            )\n            .unwrap(),\n        );\n        let trie_store =\n            Arc::new(LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()).unwrap());\n\n        let state = LmdbGlobalState::empty(\n            environment,\n            trie_store,\n            crate::global_state::DEFAULT_MAX_QUERY_DEPTH,\n            crate::global_state::DEFAULT_ENABLE_ENTITY,\n        )\n        .unwrap();\n        let mut current_root = state.empty_root_hash;\n        {\n            let mut txn = state.environment.create_read_write_txn().unwrap();\n\n            for TestPair { key, value } in &create_test_pairs() {\n                match write::<_, _, _, LmdbTrieStore, GlobalStateError>(\n                    &mut txn,\n                    &state.trie_store,\n                    &current_root,\n                    key,\n                    value,\n                )\n                .unwrap()\n                {\n                    WriteResult::Written(root_hash) => {\n                        current_root = root_hash;\n                    }\n                    WriteResult::AlreadyExists => (),\n                    WriteResult::RootNotFound => {\n                        panic!(\"LmdbWithCacheGlobalState has invalid root\")\n                    }\n                }\n            }\n\n            txn.commit().unwrap();\n        }\n        TestState {\n            state,\n            root_hash: current_root,\n        }\n    }\n\n    #[test]\n    fn commit_updates_state() {\n        let test_pairs_updated = create_test_pairs_updated();\n\n        let TestState { state, root_hash } = create_test_state();\n\n        let scratch = state.create_scratch();\n\n        let effects = {\n            let mut tmp = Effects::new();\n            for TestPair { key, value } in &test_pairs_updated {\n                let transform = TransformV2::new(*key, TransformKindV2::Write(value.to_owned()));\n                tmp.push(transform);\n            }\n            tmp\n        };\n\n        let scratch_root_hash = scratch.commit_effects(root_hash, effects.clone()).unwrap();\n\n        assert_eq!(\n            scratch_root_hash, root_hash,\n            \"ScratchGlobalState should not modify the state root, as it does no hashing\"\n        );\n\n        let lmdb_hash = state.commit_effects(root_hash, effects).unwrap();\n        let updated_checkout = state.checkout(lmdb_hash).unwrap().unwrap();\n\n        let all_keys = updated_checkout.keys_with_prefix(&[]).unwrap();\n\n        let (stored_values, _) = scratch.into_inner();\n        assert_eq!(all_keys.len(), stored_values.len());\n\n        for key in all_keys {\n            assert_eq!(\n                stored_values\n                    .iter()\n                    .find(|(k, _)| k == &key)\n                    .unwrap()\n                    .1\n                    .clone(),\n                updated_checkout.read(&key).unwrap().unwrap()\n            );\n        }\n\n        for TestPair { key, value } in test_pairs_updated.iter().cloned() {\n            assert_eq!(Some(value), updated_checkout.read(&key).unwrap());\n        }\n    }\n\n    #[test]\n    fn commit_updates_state_with_add() {\n        let test_pairs_updated = create_test_pairs_updated();\n\n        // create two lmdb instances, with a scratch instance on the first\n        let TestState { state, root_hash } = create_test_state();\n        let TestState {\n            state: state2,\n            root_hash: state_2_root_hash,\n        } = create_test_state();\n\n        let scratch = state.create_scratch();\n\n        let effects = {\n            let mut tmp = Effects::new();\n            for TestPair { key, value } in &test_pairs_updated {\n                let transform = TransformV2::new(*key, TransformKindV2::Write(value.to_owned()));\n                tmp.push(transform);\n            }\n            tmp\n        };\n\n        // Commit effects to both databases.\n        scratch.commit_effects(root_hash, effects.clone()).unwrap();\n        let updated_hash = state2.commit_effects(state_2_root_hash, effects).unwrap();\n\n        // Create add transforms as well\n        let add_effects = create_test_transforms();\n        scratch\n            .commit_effects(root_hash, add_effects.clone())\n            .unwrap();\n        let updated_hash = state2.commit_effects(updated_hash, add_effects).unwrap();\n\n        let scratch_checkout = scratch.checkout(root_hash).unwrap().unwrap();\n        let updated_checkout = state2.checkout(updated_hash).unwrap().unwrap();\n        let all_keys = updated_checkout.keys_with_prefix(&[]).unwrap();\n\n        // Check that cache matches the contents of the second instance of lmdb\n        for key in all_keys {\n            assert_eq!(\n                scratch_checkout.read(&key).unwrap().as_ref(),\n                updated_checkout.read(&key).unwrap().as_ref()\n            );\n        }\n    }\n\n    #[test]\n    fn commit_updates_state_and_original_state_stays_intact() {\n        let test_pairs_updated = create_test_pairs_updated();\n\n        let TestState {\n            state, root_hash, ..\n        } = create_test_state();\n\n        let scratch = state.create_scratch();\n\n        let effects = {\n            let mut tmp = Effects::new();\n            for TestPair { key, value } in &test_pairs_updated {\n                let transform = TransformV2::new(*key, TransformKindV2::Write(value.to_owned()));\n                tmp.push(transform);\n            }\n            tmp\n        };\n\n        let updated_hash = scratch.commit_effects(root_hash, effects).unwrap();\n\n        let updated_checkout = scratch.checkout(updated_hash).unwrap().unwrap();\n        for TestPair { key, value } in test_pairs_updated.iter().cloned() {\n            assert_eq!(\n                Some(value),\n                updated_checkout.read(&key).unwrap(),\n                \"ScratchGlobalState should not yet be written to the underlying lmdb state\"\n            );\n        }\n\n        let original_checkout = state.checkout(root_hash).unwrap().unwrap();\n        for TestPair { key, value } in create_test_pairs().iter().cloned() {\n            assert_eq!(Some(value), original_checkout.read(&key).unwrap());\n        }\n        assert_eq!(\n            None,\n            original_checkout.read(&test_pairs_updated[2].key).unwrap()\n        );\n    }\n\n    #[test]\n    fn cache_trie_basic_insert_get() {\n        let mut trie = CacheTrie::new();\n        let key_hello = Key::Hash(*b\"hello...........................\");\n        let key_world = Key::Hash(*b\"world...........................\");\n        let key_hey = Key::Hash(*b\"hey.............................\");\n\n        trie.insert(b\"hello\", key_hello);\n        trie.insert(b\"world\", key_world);\n        trie.insert(b\"hey\", key_hey);\n\n        assert_eq!(trie.keys_with_prefix(b\"he\"), vec![key_hey, key_hello]);\n        assert_eq!(trie.keys_with_prefix(b\"wo\"), vec![key_world]);\n    }\n\n    #[test]\n    fn cache_trie_overlapping_prefix() {\n        let mut trie = CacheTrie::new();\n        let key_apple = Key::Hash(*b\"apple...........................\");\n        let key_app = Key::Hash(*b\"app.............................\");\n        let key_apron = Key::Hash(*b\"apron...........................\");\n\n        trie.insert(b\"apple\", key_apple);\n        trie.insert(b\"app\", key_app);\n        trie.insert(b\"apron\", key_apron);\n\n        assert_eq!(\n            trie.keys_with_prefix(b\"ap\"),\n            vec![key_apron, key_app, key_apple]\n        );\n        assert_eq!(trie.keys_with_prefix(b\"app\"), vec![key_app, key_apple]);\n    }\n\n    #[test]\n    fn cache_trie_leaf_removal() {\n        let mut trie = CacheTrie::new();\n        let key_cat = Key::Hash(*b\"cat.............................\");\n        let key_category = Key::Hash(*b\"category........................\");\n\n        trie.insert(b\"cat\", key_cat);\n        trie.insert(b\"category\", key_category);\n\n        trie.remove(b\"category\");\n        assert_eq!(trie.keys_with_prefix(b\"ca\"), vec![key_cat]);\n    }\n\n    #[test]\n    fn cache_trie_internal_node_removal() {\n        let mut trie = CacheTrie::new();\n        let key_be = Key::Hash(*b\"be..............................\");\n        let key_berry = Key::Hash(*b\"berry...........................\");\n\n        trie.insert(b\"be\", key_be);\n        trie.insert(b\"berry\", key_berry);\n\n        trie.remove(b\"be\");\n        assert_eq!(trie.keys_with_prefix(b\"be\"), vec![key_berry]);\n    }\n\n    #[test]\n    fn cache_trie_non_existent_prefix() {\n        let mut trie = CacheTrie::new();\n\n        let key_apple = Key::Hash(*b\"apple...........................\");\n        let key_mango = Key::Hash(*b\"mango...........................\");\n\n        trie.insert(b\"apple\", key_apple);\n        trie.insert(b\"mango\", key_mango);\n\n        assert_eq!(trie.keys_with_prefix(b\"b\"), Vec::<Key>::new());\n    }\n\n    #[test]\n    fn cache_trie_empty_trie_search() {\n        let trie = CacheTrie::<Key>::new();\n\n        assert_eq!(trie.keys_with_prefix(b\"\"), Vec::<Key>::new());\n    }\n\n    #[test]\n    fn cache_trie_empty_prefix_search_all_keys() {\n        let mut trie = CacheTrie::new();\n        let key_hello = Key::Hash(*b\"hello...........................\");\n        let key_world = Key::Hash(*b\"world...........................\");\n        let key_hey = Key::Hash(*b\"hey.............................\");\n\n        trie.insert(b\"hello\", key_hello);\n        trie.insert(b\"world\", key_world);\n        trie.insert(b\"hey\", key_hey);\n\n        assert_eq!(\n            trie.keys_with_prefix(b\"\"),\n            vec![key_world, key_hey, key_hello]\n        );\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/store/mod.rs",
    "content": "mod store_ext;\n#[cfg(test)]\npub(crate) mod tests;\n\nuse std::borrow::Cow;\n\nuse casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n\npub use self::store_ext::StoreExt;\nuse crate::global_state::transaction_source::{Readable, Writable};\n\n/// Store is responsible for abstracting `get` and `put` operations over the underlying store\n/// specified by its associated `Handle` type.\npub trait Store<K, V> {\n    /// Errors possible from this store.\n    type Error: From<bytesrepr::Error>;\n\n    /// Underlying store type.\n    type Handle;\n\n    /// `handle` returns the underlying store.\n    fn handle(&self) -> Self::Handle;\n\n    /// Deserialize a value.\n    #[inline]\n    fn deserialize_value(&self, bytes: &[u8]) -> Result<V, bytesrepr::Error>\n    where\n        V: FromBytes,\n    {\n        bytesrepr::deserialize_from_slice(bytes)\n    }\n\n    /// Serialize a value.\n    #[inline]\n    fn serialize_value(&self, value: &V) -> Result<Vec<u8>, bytesrepr::Error>\n    where\n        V: ToBytes,\n    {\n        value.to_bytes()\n    }\n\n    /// Returns an optional value (may exist or not) as read through a transaction, or an error\n    /// of the associated `Self::Error` variety.\n    fn get<T>(&self, txn: &T, key: &K) -> Result<Option<V>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        K: AsRef<[u8]>,\n        V: FromBytes,\n        Self::Error: From<T::Error>,\n    {\n        let raw = self.get_raw(txn, key)?;\n        match raw {\n            Some(bytes) => {\n                let value = self.deserialize_value(&bytes)?;\n                Ok(Some(value))\n            }\n            None => Ok(None),\n        }\n    }\n\n    /// Returns an optional value (may exist or not) as read through a transaction, or an error\n    /// of the associated `Self::Error` variety.\n    fn get_raw<T>(&self, txn: &T, key: &K) -> Result<Option<Bytes>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        K: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        let handle = self.handle();\n        Ok(txn.read(handle, key.as_ref())?)\n    }\n\n    /// Puts a `value` into the store at `key` within a transaction, potentially returning an\n    /// error of type `Self::Error` if that fails.\n    fn put<T>(&self, txn: &mut T, key: &K, value: &V) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        K: AsRef<[u8]>,\n        V: ToBytes,\n        Self::Error: From<T::Error>,\n    {\n        let serialized_value = self.serialize_value(value)?;\n        self.put_raw(txn, key, Cow::from(serialized_value))\n    }\n\n    /// Puts a raw `value` into the store at `key` within a transaction, potentially returning an\n    /// error of type `Self::Error` if that fails.\n    ///\n    /// This accepts a [`Cow`] object as a value to allow different implementations to choose if\n    /// they want to use owned value (i.e. put it in a cache without cloning) or the raw bytes\n    /// (write it into a persistent store).\n    fn put_raw<T>(\n        &self,\n        txn: &mut T,\n        key: &K,\n        value_bytes: Cow<'_, [u8]>,\n    ) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        K: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        let handle = self.handle();\n        txn.write(handle, key.as_ref(), &value_bytes)\n            .map_err(Into::into)\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/store/store_ext.rs",
    "content": "//! Extension traits for store.\n\nuse casper_types::bytesrepr::{FromBytes, ToBytes};\n\nuse crate::global_state::{\n    store::Store,\n    transaction_source::{Readable, Writable},\n};\n\n/// Extension trait for Store.\npub trait StoreExt<K, V>: Store<K, V> {\n    /// Returns multiple optional values (each may exist or not) from the store in one transaction.\n    fn get_many<'a, T>(\n        &self,\n        txn: &T,\n        keys: impl Iterator<Item = &'a K>,\n    ) -> Result<Vec<Option<V>>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        K: AsRef<[u8]> + 'a,\n        V: FromBytes,\n        Self::Error: From<T::Error>,\n    {\n        let mut ret: Vec<Option<V>> = Vec::new();\n        for key in keys {\n            let result = self.get(txn, key)?;\n            ret.push(result)\n        }\n        Ok(ret)\n    }\n\n    /// Puts multiple key/value pairs into the store in one transaction, potentially returning an\n    /// error of type `Self::Error` if that fails.\n    fn put_many<'a, T>(\n        &self,\n        txn: &mut T,\n        pairs: impl Iterator<Item = (&'a K, &'a V)>,\n    ) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        K: AsRef<[u8]> + 'a,\n        V: ToBytes + 'a,\n        Self::Error: From<T::Error>,\n    {\n        for (key, value) in pairs {\n            self.put(txn, key, value)?;\n        }\n        Ok(())\n    }\n}\n\nimpl<K, V, T: Store<K, V>> StoreExt<K, V> for T {}\n"
  },
  {
    "path": "storage/src/global_state/store/tests.rs",
    "content": "use std::collections::BTreeMap;\n\nuse casper_types::bytesrepr::{FromBytes, ToBytes};\n\nuse crate::global_state::{\n    store::{Store, StoreExt},\n    transaction_source::{Transaction, TransactionSource},\n};\n\n// should be moved to the `store` module\nfn roundtrip<'a, K, V, X, S>(\n    transaction_source: &'a X,\n    store: &S,\n    items: &BTreeMap<K, V>,\n) -> Result<Vec<Option<V>>, S::Error>\nwhere\n    K: AsRef<[u8]>,\n    V: ToBytes + FromBytes,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S: Store<K, V>,\n    S::Error: From<X::Error>,\n{\n    let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?;\n    store.put_many(&mut txn, items.iter())?;\n    let result = store.get_many(&txn, items.keys());\n    txn.commit()?;\n    result\n}\n\n// should be moved to the `store` module\npub fn roundtrip_succeeds<'a, K, V, X, S>(\n    transaction_source: &'a X,\n    store: &S,\n    items: BTreeMap<K, V>,\n) -> Result<bool, S::Error>\nwhere\n    K: AsRef<[u8]>,\n    V: ToBytes + FromBytes + Clone + PartialEq,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S: Store<K, V>,\n    S::Error: From<X::Error>,\n{\n    let maybe_values: Vec<Option<V>> = roundtrip(transaction_source, store, &items)?;\n    let values = match maybe_values.into_iter().collect::<Option<Vec<V>>>() {\n        Some(values) => values,\n        None => return Ok(false),\n    };\n    Ok(Iterator::eq(items.values(), values.iter()))\n}\n"
  },
  {
    "path": "storage/src/global_state/transaction_source/lmdb.rs",
    "content": "use std::path::Path;\n\nuse casper_types::bytesrepr::Bytes;\nuse lmdb::{\n    self, Database, Environment, EnvironmentFlags, RoTransaction, RwTransaction, WriteFlags,\n};\n\nuse crate::global_state::{\n    error,\n    transaction_source::{Readable, Transaction, TransactionSource, Writable},\n    trie_store::lmdb::ScratchTrieStore,\n    MAX_DBS,\n};\n\n/// Filename for the LMDB database created by the EE.\nconst EE_DB_FILENAME: &str = \"data.lmdb\";\n\nimpl Transaction for ScratchTrieStore {\n    type Error = error::Error;\n    type Handle = ScratchTrieStore;\n    fn commit(self) -> Result<(), Self::Error> {\n        // NO OP as scratch doesn't use transactions.\n        Ok(())\n    }\n}\n\nimpl Readable for ScratchTrieStore {\n    fn read(&self, handle: Self::Handle, key: &[u8]) -> Result<Option<Bytes>, Self::Error> {\n        let txn = self.env.create_read_txn()?;\n        match lmdb::Transaction::get(&txn, handle.store.get_db(), &key) {\n            Ok(bytes) => Ok(Some(Bytes::from(bytes))),\n            Err(lmdb::Error::NotFound) => Ok(None),\n            Err(e) => Err(error::Error::Lmdb(e)),\n        }\n    }\n}\n\nimpl Writable for ScratchTrieStore {\n    fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error> {\n        let mut txn = self.env.create_read_write_txn()?;\n        txn.put(handle.store.get_db(), &key, &value, WriteFlags::empty())\n            .map_err(error::Error::Lmdb)?;\n        Ok(())\n    }\n}\n\nimpl<'a> TransactionSource<'a> for ScratchTrieStore {\n    type Error = error::Error;\n    type Handle = ScratchTrieStore;\n    type ReadTransaction = ScratchTrieStore;\n    type ReadWriteTransaction = ScratchTrieStore;\n    fn create_read_txn(&'a self) -> Result<Self::ReadTransaction, Self::Error> {\n        Ok(self.clone())\n    }\n\n    fn create_read_write_txn(&'a self) -> Result<Self::ReadWriteTransaction, Self::Error> {\n        Ok(self.clone())\n    }\n}\n\nimpl Transaction for RoTransaction<'_> {\n    type Error = lmdb::Error;\n\n    type Handle = Database;\n\n    fn commit(self) -> Result<(), Self::Error> {\n        lmdb::Transaction::commit(self)\n    }\n}\n\nimpl Readable for RoTransaction<'_> {\n    fn read(&self, handle: Self::Handle, key: &[u8]) -> Result<Option<Bytes>, Self::Error> {\n        match lmdb::Transaction::get(self, handle, &key) {\n            Ok(bytes) => Ok(Some(Bytes::from(bytes))),\n            Err(lmdb::Error::NotFound) => Ok(None),\n            Err(e) => Err(e),\n        }\n    }\n}\n\nimpl<'a> Transaction for RwTransaction<'a> {\n    type Error = lmdb::Error;\n\n    type Handle = Database;\n\n    fn commit(self) -> Result<(), Self::Error> {\n        <RwTransaction<'a> as lmdb::Transaction>::commit(self)\n    }\n}\n\nimpl Readable for RwTransaction<'_> {\n    fn read(&self, handle: Self::Handle, key: &[u8]) -> Result<Option<Bytes>, Self::Error> {\n        match lmdb::Transaction::get(self, handle, &key) {\n            Ok(bytes) => Ok(Some(Bytes::from(bytes))),\n            Err(lmdb::Error::NotFound) => Ok(None),\n            Err(e) => Err(e),\n        }\n    }\n}\n\nimpl Writable for RwTransaction<'_> {\n    fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error> {\n        self.put(handle, &key, &value, WriteFlags::empty())\n    }\n}\n\n/// The environment for an LMDB-backed trie store.\n///\n/// Wraps [`lmdb::Environment`].\n#[derive(Debug)]\npub struct LmdbEnvironment {\n    env: Environment,\n    manual_sync_enabled: bool,\n}\n\nimpl LmdbEnvironment {\n    /// Constructor for `LmdbEnvironment`.\n    pub fn new<P: AsRef<Path>>(\n        path: P,\n        map_size: usize,\n        max_readers: u32,\n        manual_sync_enabled: bool,\n    ) -> Result<Self, error::Error> {\n        let lmdb_flags = if manual_sync_enabled {\n            // These options require that we manually call sync on the environment for the EE.\n            EnvironmentFlags::NO_SUB_DIR\n                | EnvironmentFlags::NO_READAHEAD\n                | EnvironmentFlags::MAP_ASYNC\n                | EnvironmentFlags::WRITE_MAP\n                | EnvironmentFlags::NO_META_SYNC\n        } else {\n            EnvironmentFlags::NO_SUB_DIR | EnvironmentFlags::NO_READAHEAD\n        };\n\n        let env = Environment::new()\n            // Set the flag to manage our own directory like in the storage component.\n            .set_flags(lmdb_flags)\n            .set_max_dbs(MAX_DBS)\n            .set_map_size(map_size)\n            .set_max_readers(max_readers)\n            .open(&path.as_ref().join(EE_DB_FILENAME))?;\n        Ok(LmdbEnvironment {\n            env,\n            manual_sync_enabled,\n        })\n    }\n\n    /// Returns a reference to the wrapped `Environment`.\n    pub fn env(&self) -> &Environment {\n        &self.env\n    }\n\n    /// Returns if this environment was constructed with manual synchronization enabled.\n    pub fn is_manual_sync_enabled(&self) -> bool {\n        self.manual_sync_enabled\n    }\n\n    /// Manually synchronize LMDB to disk.\n    pub fn sync(&self) -> Result<(), lmdb::Error> {\n        self.env.sync(true)\n    }\n}\n\nimpl<'a> TransactionSource<'a> for LmdbEnvironment {\n    type Error = lmdb::Error;\n\n    type Handle = Database;\n\n    type ReadTransaction = RoTransaction<'a>;\n\n    type ReadWriteTransaction = RwTransaction<'a>;\n\n    fn create_read_txn(&'a self) -> Result<RoTransaction<'a>, Self::Error> {\n        self.env.begin_ro_txn()\n    }\n\n    fn create_read_write_txn(&'a self) -> Result<RwTransaction<'a>, Self::Error> {\n        self.env.begin_rw_txn()\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/transaction_source/mod.rs",
    "content": "use casper_types::bytesrepr::Bytes;\n\n/// LMDB implementation of transaction source.\npub mod lmdb;\n\n/// A transaction which can be committed or aborted.\npub trait Transaction: Sized {\n    /// An error which can occur while reading or writing during a transaction,\n    /// or committing the transaction.\n    type Error;\n\n    /// An entity which is being read from or written to during a transaction.\n    type Handle;\n\n    /// Commits the transaction.\n    fn commit(self) -> Result<(), Self::Error>;\n\n    /// Aborts the transaction.\n    ///\n    /// Any pending operations will not be saved.\n    fn abort(self) {\n        unimplemented!(\"Abort operations should be performed in Drop implementations.\")\n    }\n}\n\n/// A transaction with the capability to read from a given [`Handle`](Transaction::Handle).\npub trait Readable: Transaction {\n    /// Returns the value from the corresponding key from a given [`Transaction::Handle`].\n    fn read(&self, handle: Self::Handle, key: &[u8]) -> Result<Option<Bytes>, Self::Error>;\n}\n\n/// A transaction with the capability to write to a given [`Handle`](Transaction::Handle).\npub trait Writable: Transaction {\n    /// Inserts a key-value pair into a given [`Transaction::Handle`].\n    fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error>;\n}\n\n/// A source of transactions e.g. values that implement [`Readable`]\n/// and/or [`Writable`].\npub trait TransactionSource<'a> {\n    /// An error which can occur while creating a read or read-write\n    /// transaction.\n    type Error;\n\n    /// An entity which is being read from or written to during a transaction.\n    type Handle;\n\n    /// Represents the type of read transactions.\n    type ReadTransaction: Readable<Error = Self::Error, Handle = Self::Handle>;\n\n    /// Represents the type of read-write transactions.\n    type ReadWriteTransaction: Readable<Error = Self::Error, Handle = Self::Handle>\n        + Writable<Error = Self::Error, Handle = Self::Handle>;\n\n    /// Creates a read transaction.\n    fn create_read_txn(&'a self) -> Result<Self::ReadTransaction, Self::Error>;\n\n    /// Creates a read-write transaction.\n    fn create_read_write_txn(&'a self) -> Result<Self::ReadWriteTransaction, Self::Error>;\n}\n"
  },
  {
    "path": "storage/src/global_state/trie/gens.rs",
    "content": "//! Generators for trie related types.\nuse proptest::{collection::vec, option, prelude::*};\n\nuse casper_types::{\n    gens::{key_arb, stored_value_arb, trie_pointer_arb},\n    Key, StoredValue,\n};\n\nuse super::{Pointer, PointerBlock, Trie};\n\n/// Generates a trie pointer block.\npub fn trie_pointer_block_arb() -> impl Strategy<Value = PointerBlock> {\n    vec(option::of(trie_pointer_arb()), 256).prop_map(|vec| {\n        let mut ret: [Option<Pointer>; 256] = [Default::default(); 256];\n        ret.clone_from_slice(vec.as_slice());\n        ret.into()\n    })\n}\n\n/// Generates a trie leaf.\npub fn trie_leaf_arb() -> impl Strategy<Value = Trie<Key, StoredValue>> {\n    (key_arb(), stored_value_arb()).prop_map(|(key, value)| Trie::Leaf { key, value })\n}\n\n/// Generates a trie node with a single child.\npub fn trie_extension_arb() -> impl Strategy<Value = Trie<Key, StoredValue>> {\n    (vec(any::<u8>(), 0..32), trie_pointer_arb())\n        .prop_map(|(affix, pointer)| Trie::extension(affix, pointer))\n}\n\n/// Generates a trie node with multiple children.\npub fn trie_node_arb() -> impl Strategy<Value = Trie<Key, StoredValue>> {\n    trie_pointer_block_arb().prop_map(|pointer_block| Trie::Node {\n        pointer_block: Box::new(pointer_block),\n    })\n}\n"
  },
  {
    "path": "storage/src/global_state/trie/mod.rs",
    "content": "//! Core types for a Merkle Trie\n\nuse std::{\n    convert::{TryFrom, TryInto},\n    fmt::{self, Debug, Display, Formatter},\n    iter::Flatten,\n    mem::MaybeUninit,\n    slice,\n};\n\nuse datasize::DataSize;\nuse num_derive::{FromPrimitive, ToPrimitive};\nuse num_traits::{FromPrimitive, ToPrimitive};\nuse serde::{\n    de::{self, MapAccess, Visitor},\n    ser::SerializeMap,\n    Deserialize, Deserializer, Serialize, Serializer,\n};\n\nuse casper_types::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    global_state::Pointer,\n    Digest,\n};\n\n#[cfg(test)]\npub mod gens;\n\n#[cfg(test)]\nmod tests;\n\npub(crate) const USIZE_EXCEEDS_U8: &str = \"usize exceeds u8\";\npub(crate) const RADIX: usize = 256;\n\n/// A parent is represented as a pair of a child index and a node or extension.\npub type Parents<K, V> = Vec<(u8, Trie<K, V>)>;\n\n/// Type alias for values under pointer blocks.\npub type PointerBlockValue = Option<Pointer>;\n\n/// Type alias for arrays of pointer block values.\npub type PointerBlockArray = [PointerBlockValue; RADIX];\n\n/// Represents the underlying structure of a node in a Merkle Trie\n#[derive(Copy, Clone)]\npub struct PointerBlock(PointerBlockArray);\n\nimpl Serialize for PointerBlock {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        // We are going to use the sparse representation of pointer blocks\n        // non-None entries and their indices will be output\n\n        // Create the sequence serializer, reserving the necessary number of slots\n        let elements_count = self.0.iter().filter(|element| element.is_some()).count();\n        let mut map = serializer.serialize_map(Some(elements_count))?;\n\n        // Store the non-None entries with their indices\n        for (index, maybe_pointer_block) in self.0.iter().enumerate() {\n            if let Some(pointer_block_value) = maybe_pointer_block {\n                map.serialize_entry(&(index as u8), pointer_block_value)?;\n            }\n        }\n        map.end()\n    }\n}\n\nimpl<'de> Deserialize<'de> for PointerBlock {\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: Deserializer<'de>,\n    {\n        struct PointerBlockDeserializer;\n\n        impl<'de> Visitor<'de> for PointerBlockDeserializer {\n            type Value = PointerBlock;\n\n            fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {\n                formatter.write_str(\"sparse representation of a PointerBlock\")\n            }\n\n            fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>\n            where\n                M: MapAccess<'de>,\n            {\n                let mut pointer_block = PointerBlock::new();\n\n                // Unpack the sparse representation\n                while let Some((index, pointer_block_value)) = access.next_entry::<u8, Pointer>()? {\n                    let element = pointer_block.0.get_mut(usize::from(index)).ok_or_else(|| {\n                        de::Error::custom(format!(\"invalid index {} in pointer block value\", index))\n                    })?;\n                    *element = Some(pointer_block_value);\n                }\n\n                Ok(pointer_block)\n            }\n        }\n        deserializer.deserialize_map(PointerBlockDeserializer)\n    }\n}\n\nimpl PointerBlock {\n    /// No-arg constructor for `PointerBlock`. Delegates to `Default::default()`.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    /// Constructs a `PointerBlock` from a slice of indexed `Pointer`s.\n    pub fn from_indexed_pointers(indexed_pointers: &[(u8, Pointer)]) -> Self {\n        let mut ret = PointerBlock::new();\n        for (idx, ptr) in indexed_pointers.iter() {\n            ret[*idx as usize] = Some(*ptr);\n        }\n        ret\n    }\n\n    /// Deconstructs a `PointerBlock` into an iterator of indexed `Pointer`s.\n    pub fn as_indexed_pointers(&self) -> impl Iterator<Item = (u8, Pointer)> + '_ {\n        self.0\n            .iter()\n            .enumerate()\n            .filter_map(|(index, maybe_pointer)| {\n                maybe_pointer\n                    .map(|value| (index.try_into().expect(USIZE_EXCEEDS_U8), value.to_owned()))\n            })\n    }\n\n    /// Gets the count of children for this `PointerBlock`.\n    pub fn child_count(&self) -> usize {\n        self.as_indexed_pointers().count()\n    }\n}\n\nimpl From<PointerBlockArray> for PointerBlock {\n    fn from(src: PointerBlockArray) -> Self {\n        PointerBlock(src)\n    }\n}\n\nimpl PartialEq for PointerBlock {\n    #[inline]\n    fn eq(&self, other: &PointerBlock) -> bool {\n        self.0[..] == other.0[..]\n    }\n}\n\nimpl Eq for PointerBlock {}\n\nimpl Default for PointerBlock {\n    fn default() -> Self {\n        PointerBlock([Default::default(); RADIX])\n    }\n}\n\nimpl ToBytes for PointerBlock {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        for pointer in self.0.iter() {\n            result.append(&mut pointer.to_bytes()?);\n        }\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.iter().map(ToBytes::serialized_length).sum()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        for pointer in self.0.iter() {\n            pointer.write_bytes(writer)?;\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for PointerBlock {\n    fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let pointer_block_array = {\n            // With MaybeUninit here we can avoid default initialization of result array below.\n            let mut result: MaybeUninit<PointerBlockArray> = MaybeUninit::uninit();\n            let result_ptr = result.as_mut_ptr() as *mut PointerBlockValue;\n            for i in 0..RADIX {\n                let (t, remainder) = match FromBytes::from_bytes(bytes) {\n                    Ok(success) => success,\n                    Err(error) => {\n                        for j in 0..i {\n                            unsafe { result_ptr.add(j).drop_in_place() }\n                        }\n                        return Err(error);\n                    }\n                };\n                unsafe { result_ptr.add(i).write(t) };\n                bytes = remainder;\n            }\n            unsafe { result.assume_init() }\n        };\n        Ok((PointerBlock(pointer_block_array), bytes))\n    }\n}\n\nimpl core::ops::Index<usize> for PointerBlock {\n    type Output = PointerBlockValue;\n\n    #[inline]\n    fn index(&self, index: usize) -> &Self::Output {\n        let PointerBlock(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl core::ops::IndexMut<usize> for PointerBlock {\n    #[inline]\n    fn index_mut(&mut self, index: usize) -> &mut Self::Output {\n        let PointerBlock(dat) = self;\n        &mut dat[index]\n    }\n}\n\nimpl core::ops::Index<core::ops::Range<usize>> for PointerBlock {\n    type Output = [PointerBlockValue];\n\n    #[inline]\n    fn index(&self, index: core::ops::Range<usize>) -> &[PointerBlockValue] {\n        let PointerBlock(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl core::ops::Index<core::ops::RangeTo<usize>> for PointerBlock {\n    type Output = [PointerBlockValue];\n\n    #[inline]\n    fn index(&self, index: core::ops::RangeTo<usize>) -> &[PointerBlockValue] {\n        let PointerBlock(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl core::ops::Index<core::ops::RangeFrom<usize>> for PointerBlock {\n    type Output = [PointerBlockValue];\n\n    #[inline]\n    fn index(&self, index: core::ops::RangeFrom<usize>) -> &[PointerBlockValue] {\n        let PointerBlock(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl core::ops::Index<core::ops::RangeFull> for PointerBlock {\n    type Output = [PointerBlockValue];\n\n    #[inline]\n    fn index(&self, index: core::ops::RangeFull) -> &[PointerBlockValue] {\n        let PointerBlock(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl ::std::fmt::Debug for PointerBlock {\n    #[allow(clippy::assertions_on_constants)]\n    fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {\n        assert!(RADIX > 1, \"RADIX must be > 1\");\n        write!(f, \"{}([\", stringify!(PointerBlock))?;\n        write!(f, \"{:?}\", self.0[0])?;\n        for item in self.0[1..].iter() {\n            write!(f, \", {:?}\", item)?;\n        }\n        write!(f, \"])\")\n    }\n}\n\n/// Newtype representing a trie node in its raw form without deserializing into `Trie`.\n#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, DataSize)]\npub struct TrieRaw(Bytes);\n\nimpl TrieRaw {\n    /// Constructs an instance of [`TrieRaw`].\n    pub fn new(bytes: Bytes) -> Self {\n        TrieRaw(bytes)\n    }\n\n    /// Consumes self and returns inner bytes.\n    pub fn into_inner(self) -> Bytes {\n        self.0\n    }\n\n    /// Returns a reference inner bytes.\n    pub fn inner(&self) -> &Bytes {\n        &self.0\n    }\n\n    /// Returns a hash of the inner bytes.\n    pub fn hash(&self) -> Digest {\n        Digest::hash_into_chunks_if_necessary(self.inner())\n    }\n}\n\nimpl ToBytes for TrieRaw {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for TrieRaw {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, rem) = Bytes::from_bytes(bytes)?;\n        Ok((TrieRaw(bytes), rem))\n    }\n}\n\n/// Represents all possible serialization tags for a [`Trie`] enum.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)]\n#[repr(u8)]\npub(crate) enum TrieTag {\n    /// Represents a tag for a [`Trie::Leaf`] variant.\n    Leaf = 0,\n    /// Represents a tag for a [`Trie::Node`] variant.\n    Node = 1,\n    /// Represents a tag for a [`Trie::Extension`] variant.\n    Extension = 2,\n}\n\nimpl From<TrieTag> for u8 {\n    fn from(value: TrieTag) -> Self {\n        TrieTag::to_u8(&value).unwrap() // SAFETY: TrieTag is represented as u8.\n    }\n}\n\n/// Represents a Merkle Trie.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\npub enum Trie<K, V> {\n    /// Trie leaf.\n    Leaf {\n        /// Leaf key.\n        key: K,\n        /// Leaf value.\n        value: V,\n    },\n    /// Trie node.\n    Node {\n        /// Node pointer block.\n        pointer_block: Box<PointerBlock>,\n    },\n    /// Trie extension node.\n    Extension {\n        /// Extension node affix bytes.\n        affix: Bytes,\n        /// Extension node pointer.\n        pointer: Pointer,\n    },\n}\n\nimpl<K, V> Display for Trie<K, V>\nwhere\n    K: Debug,\n    V: Debug,\n{\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{:?}\", self)\n    }\n}\n\nimpl<K, V> Trie<K, V> {\n    fn tag(&self) -> TrieTag {\n        match self {\n            Trie::Leaf { .. } => TrieTag::Leaf,\n            Trie::Node { .. } => TrieTag::Node,\n            Trie::Extension { .. } => TrieTag::Extension,\n        }\n    }\n\n    /// Tag type for current trie element.\n    pub fn tag_type(&self) -> String {\n        match self {\n            Trie::Leaf { .. } => \"Leaf\".to_string(),\n            Trie::Node { .. } => \"Node\".to_string(),\n            Trie::Extension { .. } => \"Extension\".to_string(),\n        }\n    }\n\n    /// Constructs a [`Trie::Leaf`] from a given key and value.\n    pub fn leaf(key: K, value: V) -> Self {\n        Trie::Leaf { key, value }\n    }\n\n    /// Constructs a [`Trie::Node`] from a given slice of indexed pointers.\n    pub fn node(indexed_pointers: &[(u8, Pointer)]) -> Self {\n        let pointer_block = PointerBlock::from_indexed_pointers(indexed_pointers);\n        let pointer_block = Box::new(pointer_block);\n        Trie::Node { pointer_block }\n    }\n\n    /// Constructs a [`Trie::Extension`] from a given affix and pointer.\n    pub fn extension(affix: Vec<u8>, pointer: Pointer) -> Self {\n        Trie::Extension {\n            affix: affix.into(),\n            pointer,\n        }\n    }\n\n    /// Gets a reference to the root key of this Trie.\n    pub fn key(&self) -> Option<&K> {\n        match self {\n            Trie::Leaf { key, .. } => Some(key),\n            _ => None,\n        }\n    }\n\n    /// Returns the hash of this Trie.\n    pub fn trie_hash(&self) -> Result<Digest, bytesrepr::Error>\n    where\n        Self: ToBytes,\n    {\n        self.to_bytes()\n            .map(|bytes| Digest::hash_into_chunks_if_necessary(&bytes))\n    }\n\n    /// Returns bytes representation of this Trie and the hash over those bytes.\n    pub fn trie_hash_and_bytes(&self) -> Result<(Digest, Vec<u8>), bytesrepr::Error>\n    where\n        Self: ToBytes,\n    {\n        self.to_bytes()\n            .map(|bytes| (Digest::hash_into_chunks_if_necessary(&bytes), bytes))\n    }\n\n    /// Returns a pointer block, if possible.\n    pub fn as_pointer_block(&self) -> Option<&PointerBlock> {\n        if let Self::Node { pointer_block } = self {\n            Some(pointer_block.as_ref())\n        } else {\n            None\n        }\n    }\n\n    /// Returns an iterator over descendants of the trie.\n    pub fn iter_children(&self) -> DescendantsIterator {\n        match self {\n            Trie::<K, V>::Leaf { .. } => DescendantsIterator::ZeroOrOne(None),\n            Trie::Node { pointer_block } => DescendantsIterator::PointerBlock {\n                iter: pointer_block.0.iter().flatten(),\n            },\n            Trie::Extension { pointer, .. } => {\n                DescendantsIterator::ZeroOrOne(Some(pointer.into_hash()))\n            }\n        }\n    }\n}\n\n/// Bytes representation of a `Trie` that is a `Trie::Leaf` variant.\n/// The bytes for this trie leaf also include the `Trie::Tag`.\n#[derive(Debug, Clone, PartialEq)]\npub(crate) struct TrieLeafBytes(Bytes);\n\nimpl TrieLeafBytes {\n    pub(crate) fn bytes(&self) -> &Bytes {\n        &self.0\n    }\n\n    pub(crate) fn try_deserialize_leaf_key<K: FromBytes>(\n        &self,\n    ) -> Result<(K, &[u8]), bytesrepr::Error> {\n        let (tag_byte, rem) = u8::from_bytes(&self.0)?;\n        let tag = TrieTag::from_u8(tag_byte).ok_or(bytesrepr::Error::Formatting)?;\n        assert_eq!(\n            tag,\n            TrieTag::Leaf,\n            \"Unexpected layout for trie leaf bytes. Expected `TrieTag::Leaf` but got {:?}\",\n            tag\n        );\n        K::from_bytes(rem)\n    }\n}\n\nimpl From<&[u8]> for TrieLeafBytes {\n    fn from(value: &[u8]) -> Self {\n        Self(value.into())\n    }\n}\n\nimpl From<Vec<u8>> for TrieLeafBytes {\n    fn from(value: Vec<u8>) -> Self {\n        Self(value.into())\n    }\n}\n\n/// Like `Trie` but does not deserialize the leaf when constructed.\n#[derive(Debug, Clone, PartialEq)]\npub(crate) enum LazilyDeserializedTrie {\n    /// Serialized trie leaf bytes\n    Leaf(TrieLeafBytes),\n    /// Trie node.\n    Node { pointer_block: Box<PointerBlock> },\n    /// Trie extension node.\n    Extension { affix: Bytes, pointer: Pointer },\n}\n\nimpl LazilyDeserializedTrie {\n    pub(crate) fn iter_children(&self) -> DescendantsIterator {\n        match self {\n            LazilyDeserializedTrie::Leaf(_) => {\n                // Leaf bytes does not have any children\n                DescendantsIterator::ZeroOrOne(None)\n            }\n            LazilyDeserializedTrie::Node { pointer_block } => DescendantsIterator::PointerBlock {\n                iter: pointer_block.0.iter().flatten(),\n            },\n            LazilyDeserializedTrie::Extension { pointer, .. } => {\n                DescendantsIterator::ZeroOrOne(Some(pointer.into_hash()))\n            }\n        }\n    }\n}\n\nimpl FromBytes for LazilyDeserializedTrie {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag_byte, rem) = u8::from_bytes(bytes)?;\n        let tag = TrieTag::from_u8(tag_byte).ok_or(bytesrepr::Error::Formatting)?;\n        match tag {\n            TrieTag::Leaf => Ok((LazilyDeserializedTrie::Leaf(bytes.into()), &[])),\n            TrieTag::Node => {\n                let (pointer_block, rem) = PointerBlock::from_bytes(rem)?;\n                Ok((\n                    LazilyDeserializedTrie::Node {\n                        pointer_block: Box::new(pointer_block),\n                    },\n                    rem,\n                ))\n            }\n            TrieTag::Extension => {\n                let (affix, rem) = FromBytes::from_bytes(rem)?;\n                let (pointer, rem) = Pointer::from_bytes(rem)?;\n                Ok((LazilyDeserializedTrie::Extension { affix, pointer }, rem))\n            }\n        }\n    }\n}\n\nimpl<K, V> TryFrom<Trie<K, V>> for LazilyDeserializedTrie\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    type Error = bytesrepr::Error;\n\n    fn try_from(value: Trie<K, V>) -> Result<Self, Self::Error> {\n        match value {\n            Trie::Leaf { .. } => {\n                let serialized_bytes = ToBytes::to_bytes(&value)?;\n                Ok(LazilyDeserializedTrie::Leaf(serialized_bytes.into()))\n            }\n            Trie::Node { pointer_block } => Ok(LazilyDeserializedTrie::Node { pointer_block }),\n            Trie::Extension { affix, pointer } => {\n                Ok(LazilyDeserializedTrie::Extension { affix, pointer })\n            }\n        }\n    }\n}\n\n/// An iterator over the descendants of a trie node.\npub enum DescendantsIterator<'a> {\n    /// A leaf (zero descendants) or extension (one descendant) being iterated.\n    ZeroOrOne(Option<Digest>),\n    /// A pointer block being iterated.\n    PointerBlock {\n        /// An iterator over the non-None entries of the `PointerBlock`.\n        iter: Flatten<slice::Iter<'a, Option<Pointer>>>,\n    },\n}\n\nimpl Iterator for DescendantsIterator<'_> {\n    type Item = Digest;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        match *self {\n            DescendantsIterator::ZeroOrOne(ref mut maybe_digest) => maybe_digest.take(),\n            DescendantsIterator::PointerBlock { ref mut iter } => {\n                iter.next().map(|pointer| *pointer.hash())\n            }\n        }\n    }\n}\n\nimpl<K, V> ToBytes for Trie<K, V>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut ret)?;\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Trie::Leaf { key, value } => key.serialized_length() + value.serialized_length(),\n                Trie::Node { pointer_block } => pointer_block.serialized_length(),\n                Trie::Extension { affix, pointer } => {\n                    affix.serialized_length() + pointer.serialized_length()\n                }\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        // NOTE: When changing this make sure all partial deserializers that are referencing\n        // `LazyTrieLeaf` are also updated.\n        writer.push(u8::from(self.tag()));\n        match self {\n            Trie::Leaf { key, value } => {\n                key.write_bytes(writer)?;\n                value.write_bytes(writer)?;\n            }\n            Trie::Node { pointer_block } => pointer_block.write_bytes(writer)?,\n            Trie::Extension { affix, pointer } => {\n                affix.write_bytes(writer)?;\n                pointer.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl<K: FromBytes, V: FromBytes> FromBytes for Trie<K, V> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag_byte, rem) = u8::from_bytes(bytes)?;\n        let tag = TrieTag::from_u8(tag_byte).ok_or(bytesrepr::Error::Formatting)?;\n        match tag {\n            TrieTag::Leaf => {\n                let (key, rem) = K::from_bytes(rem)?;\n                let (value, rem) = V::from_bytes(rem)?;\n                Ok((Trie::Leaf { key, value }, rem))\n            }\n            TrieTag::Node => {\n                let (pointer_block, rem) = PointerBlock::from_bytes(rem)?;\n                Ok((\n                    Trie::Node {\n                        pointer_block: Box::new(pointer_block),\n                    },\n                    rem,\n                ))\n            }\n            TrieTag::Extension => {\n                let (affix, rem) = FromBytes::from_bytes(rem)?;\n                let (pointer, rem) = Pointer::from_bytes(rem)?;\n                Ok((Trie::Extension { affix, pointer }, rem))\n            }\n        }\n    }\n}\n\nimpl<K: FromBytes, V: FromBytes> TryFrom<LazilyDeserializedTrie> for Trie<K, V> {\n    type Error = bytesrepr::Error;\n\n    fn try_from(value: LazilyDeserializedTrie) -> Result<Self, Self::Error> {\n        match value {\n            LazilyDeserializedTrie::Leaf(leaf_bytes) => {\n                let (key, value_bytes) = leaf_bytes.try_deserialize_leaf_key()?;\n                let value = bytesrepr::deserialize_from_slice(value_bytes)?;\n                Ok(Self::Leaf { key, value })\n            }\n            LazilyDeserializedTrie::Node { pointer_block } => Ok(Self::Node { pointer_block }),\n            LazilyDeserializedTrie::Extension { affix, pointer } => {\n                Ok(Self::Extension { affix, pointer })\n            }\n        }\n    }\n}\n\npub(crate) mod operations {\n    use casper_types::{\n        bytesrepr::{self, ToBytes},\n        Digest,\n    };\n\n    use crate::global_state::trie::Trie;\n\n    /// Creates a tuple containing an empty root hash and an empty root (a node\n    /// with an empty pointer block)\n    pub fn create_hashed_empty_trie<K: ToBytes, V: ToBytes>(\n    ) -> Result<(Digest, Trie<K, V>), bytesrepr::Error> {\n        let root: Trie<K, V> = Trie::Node {\n            pointer_block: Default::default(),\n        };\n        let root_bytes: Vec<u8> = root.to_bytes()?;\n        Ok((Digest::hash(root_bytes), root))\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie/tests.rs",
    "content": "#[test]\nfn radix_is_256() {\n    assert_eq!(\n        super::RADIX,\n        256,\n        \"Changing RADIX alone might cause things to break\"\n    );\n}\n\nmod pointer_block {\n    use casper_types::U256;\n\n    use crate::global_state::trie::*;\n\n    /// A defense against changes to [`RADIX`](history::trie::RADIX).\n    #[test]\n    fn debug_formatter_succeeds() {\n        let _ = format!(\"{:?}\", PointerBlock::new());\n    }\n\n    #[test]\n    fn assignment_and_indexing() {\n        let test_hash = Digest::hash(b\"TrieTrieAgain\");\n        let leaf_pointer = Some(Pointer::LeafPointer(test_hash));\n        let mut pointer_block = PointerBlock::new();\n        pointer_block[0] = leaf_pointer;\n        pointer_block[RADIX - 1] = leaf_pointer;\n        assert_eq!(leaf_pointer, pointer_block[0]);\n        assert_eq!(leaf_pointer, pointer_block[RADIX - 1]);\n        assert_eq!(None, pointer_block[1]);\n        assert_eq!(None, pointer_block[RADIX - 2]);\n    }\n\n    #[test]\n    #[should_panic]\n    fn assignment_off_end() {\n        let test_hash = Digest::hash(b\"TrieTrieAgain\");\n        let leaf_pointer = Some(Pointer::LeafPointer(test_hash));\n        let mut pointer_block = PointerBlock::new();\n        pointer_block[RADIX] = leaf_pointer;\n    }\n\n    #[test]\n    #[should_panic]\n    fn indexing_off_end() {\n        let pointer_block = PointerBlock::new();\n        let _val = pointer_block[RADIX];\n    }\n\n    #[test]\n    fn trie_node_descendants_iterator() {\n        fn digest_from_value<T: Into<U256>>(value: T) -> Digest {\n            let mut value_bytes = [0; Digest::LENGTH];\n            let u256: U256 = value.into();\n            u256.to_big_endian(&mut value_bytes);\n            Digest::from(value_bytes)\n        }\n\n        let pointers: Vec<_> = (0..=255u8)\n            .rev()\n            .filter_map(|index| {\n                let hash = digest_from_value(index);\n                if index % 3 == 0 {\n                    Some((index, Pointer::NodePointer(hash)))\n                } else if index % 3 == 1 {\n                    Some((index, Pointer::LeafPointer(hash)))\n                } else if index % 3 == 2 {\n                    None\n                } else {\n                    unreachable!()\n                }\n            })\n            .collect();\n\n        let trie = Trie::<(), ()>::Node {\n            pointer_block: Box::new(PointerBlock::from_indexed_pointers(pointers.as_slice())),\n        };\n        let mut descendants = trie.iter_children();\n        let hashes: Vec<Digest> = descendants.by_ref().collect();\n        assert_eq!(\n            hashes,\n            pointers\n                .into_iter()\n                .rev() // reverse again for correct order\n                .map(|(_idx, pointer)| *pointer.hash())\n                .collect::<Vec<Digest>>()\n        );\n\n        assert_eq!(descendants.next(), None);\n        assert_eq!(descendants.next(), None);\n    }\n}\n\nmod proptests {\n    use std::convert::TryInto;\n\n    use proptest::prelude::*;\n\n    use casper_types::{\n        bytesrepr::{self, deserialize_from_slice, FromBytes, ToBytes},\n        gens::{all_keys_arb, blake2b_hash_arb, trie_pointer_arb},\n        Digest, Key, StoredValue,\n    };\n\n    use crate::global_state::trie::{gens::*, LazilyDeserializedTrie, PointerBlock, Trie};\n\n    fn test_trie_roundtrip_to_lazy_trie<K, V>(trie: &Trie<K, V>)\n    where\n        K: ToBytes + FromBytes + PartialEq + std::fmt::Debug + Clone,\n        V: ToBytes + FromBytes + PartialEq + std::fmt::Debug + Clone,\n    {\n        let serialized = ToBytes::to_bytes(trie).expect(\"Unable to serialize data\");\n\n        let expected_lazy_trie_leaf: LazilyDeserializedTrie = (*trie)\n            .clone()\n            .try_into()\n            .expect(\"Cannot convert Trie<K, V> to LazilyDeserializedTrie\");\n\n        let deserialized_from_slice: LazilyDeserializedTrie =\n            deserialize_from_slice(&serialized).expect(\"Unable to deserialize data\");\n        assert_eq!(expected_lazy_trie_leaf, deserialized_from_slice);\n        assert_eq!(\n            *trie,\n            deserialized_from_slice\n                .clone()\n                .try_into()\n                .expect(\"Expected to be able to convert LazilyDeserializedTrie to Trie<K, V>\")\n        );\n        if let LazilyDeserializedTrie::Leaf(leaf_bytes) = deserialized_from_slice {\n            let (key, _) = leaf_bytes\n                .try_deserialize_leaf_key::<K>()\n                .expect(\"Should have been able to deserialize key\");\n            assert_eq!(key, *trie.key().unwrap());\n        };\n\n        let deserialized: LazilyDeserializedTrie =\n            bytesrepr::deserialize(serialized).expect(\"Unable to deserialize data\");\n        assert_eq!(expected_lazy_trie_leaf, deserialized);\n        assert_eq!(\n            *trie,\n            deserialized\n                .clone()\n                .try_into()\n                .expect(\"Expected to be able to convert LazilyDeserializedTrie to Trie<K, V>\")\n        );\n        if let LazilyDeserializedTrie::Leaf(leaf_bytes) = deserialized {\n            let (key, _) = leaf_bytes\n                .try_deserialize_leaf_key::<K>()\n                .expect(\"Should have been able to deserialize key\");\n            assert_eq!(key, *trie.key().unwrap());\n        };\n    }\n\n    proptest! {\n        #[test]\n        fn roundtrip_blake2b_hash(hash in blake2b_hash_arb()) {\n            bytesrepr::test_serialization_roundtrip(&hash);\n        }\n\n        #[test]\n        fn roundtrip_trie_pointer(pointer in trie_pointer_arb()) {\n            bytesrepr::test_serialization_roundtrip(&pointer);\n        }\n\n        #[test]\n        fn roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) {\n            bytesrepr::test_serialization_roundtrip(&pointer_block);\n        }\n\n        #[test]\n        fn bytesrepr_roundtrip_trie_leaf(trie_leaf in trie_leaf_arb()) {\n            bytesrepr::test_serialization_roundtrip(&trie_leaf);\n        }\n\n        #[test]\n        fn bytesrepr_roundtrip_trie_leaf_to_lazy_trie(trie_leaf in trie_leaf_arb()) {\n            test_trie_roundtrip_to_lazy_trie(&trie_leaf)\n        }\n\n        #[test]\n        fn bytesrepr_roundtrip_trie_extension_to_lazy_trie(trie_extension in trie_extension_arb()) {\n            test_trie_roundtrip_to_lazy_trie(&trie_extension)\n        }\n\n        #[test]\n        fn bytesrepr_roundtrip_trie_node_to_lazy_trie(trie_node in trie_node_arb()) {\n            test_trie_roundtrip_to_lazy_trie(&trie_node);\n        }\n\n        #[test]\n        fn bytesrepr_roundtrip_trie_extension(trie_extension in trie_extension_arb()) {\n            bytesrepr::test_serialization_roundtrip(&trie_extension);\n        }\n\n        #[test]\n        fn bytesrepr_roundtrip_trie_node(trie_node in trie_node_arb()) {\n            bytesrepr::test_serialization_roundtrip(&trie_node);\n        }\n\n        #[test]\n        fn roundtrip_key(key in all_keys_arb()) {\n            bytesrepr::test_serialization_roundtrip(&key);\n        }\n\n        #[test]\n        fn serde_roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) {\n             let json_str = serde_json::to_string(&pointer_block)?;\n             let deserialized_pointer_block: PointerBlock = serde_json::from_str(&json_str)?;\n             assert_eq!(pointer_block, deserialized_pointer_block)\n        }\n\n        #[test]\n        fn serde_roundtrip_trie_leaf(trie_leaf in trie_leaf_arb()) {\n             let json_str = serde_json::to_string(&trie_leaf)?;\n             let deserialized_trie: Trie<Key, StoredValue> = serde_json::from_str(&json_str)?;\n             assert_eq!(trie_leaf, deserialized_trie)\n        }\n\n        #[test]\n        fn serde_roundtrip_trie_node(trie_node in trie_node_arb()) {\n             let json_str = serde_json::to_string(&trie_node)?;\n             let deserialized_trie: Trie<Key, StoredValue> = serde_json::from_str(&json_str)?;\n             assert_eq!(trie_node, deserialized_trie)\n        }\n\n        #[test]\n        fn serde_roundtrip_trie_extension(trie_extension in trie_extension_arb()) {\n             let json_str = serde_json::to_string(&trie_extension)?;\n             let deserialized_trie: Trie<Key, StoredValue> = serde_json::from_str(&json_str)?;\n             assert_eq!(trie_extension, deserialized_trie)\n        }\n\n        #[test]\n        fn bincode_roundtrip_trie_leaf(trie_leaf in trie_leaf_arb()) {\n           let bincode_bytes = bincode::serialize(&trie_leaf)?;\n           let deserialized_trie = bincode::deserialize(&bincode_bytes)?;\n           assert_eq!(trie_leaf, deserialized_trie)\n        }\n\n        #[test]\n        fn bincode_roundtrip_trie_node(trie_node in trie_node_arb()) {\n           let bincode_bytes = bincode::serialize(&trie_node)?;\n           let deserialized_trie = bincode::deserialize(&bincode_bytes)?;\n           assert_eq!(trie_node, deserialized_trie)\n        }\n\n        #[test]\n        fn bincode_roundtrip_trie_extension(trie_extension in trie_extension_arb()) {\n           let bincode_bytes = bincode::serialize(&trie_extension)?;\n           let deserialized_trie = bincode::deserialize(&bincode_bytes)?;\n           assert_eq!(trie_extension, deserialized_trie)\n        }\n\n        #[test]\n        fn bincode_roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) {\n             let bincode_bytes = bincode::serialize(&pointer_block)?;\n             let deserialized_pointer_block = bincode::deserialize(&bincode_bytes)?;\n             assert_eq!(pointer_block, deserialized_pointer_block)\n        }\n\n        #[test]\n        fn bincode_roundtrip_key(key in all_keys_arb()) {\n             let bincode_bytes = bincode::serialize(&key)?;\n             let deserialized_key = bincode::deserialize(&bincode_bytes)?;\n             prop_assert_eq!(key, deserialized_key)\n        }\n\n        #[test]\n        fn serde_roundtrip_key(key in all_keys_arb()) {\n             let json_str = serde_json::to_string(&key)?;\n             let deserialized_key = serde_json::from_str(&json_str)?;\n             assert_eq!(key, deserialized_key)\n        }\n\n        #[test]\n        fn iter_children_trie_leaf(trie_leaf in trie_leaf_arb()) {\n            assert!(trie_leaf.iter_children().next().is_none());\n        }\n\n        #[test]\n        fn iter_children_trie_extension(trie_extension in trie_extension_arb()) {\n            let children = if let Trie::Extension { pointer, .. } = trie_extension {\n                vec![*pointer.hash()]\n            } else {\n                unreachable!()\n            };\n            assert_eq!(children, trie_extension.iter_children().collect::<Vec<Digest>>());\n        }\n\n        #[test]\n        fn iter_children_trie_node(trie_node in trie_node_arb()) {\n            let children: Vec<Digest> = trie_node.as_pointer_block().unwrap()\n                    .as_indexed_pointers()\n                    .map(|(_index, ptr)| *ptr.hash())\n                    .collect();\n            assert_eq!(children, trie_node.iter_children().collect::<Vec<Digest>>());\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/cache/mod.rs",
    "content": "use std::borrow::Cow;\n\nuse casper_types::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    Digest, Pointer,\n};\n\nuse crate::global_state::{\n    transaction_source::{Readable, Writable},\n    trie::{PointerBlock, Trie, RADIX},\n};\n\nuse super::{operations::common_prefix, TrieStore};\n\n#[derive(Clone, Debug, thiserror::Error, Eq, PartialEq)]\npub enum CacheError {\n    /// Root not found.\n    #[error(\"Root not found: {0:?}\")]\n    RootNotFound(Digest),\n}\n\n// Pointer used by the cache to determine if the node is stored or is loaded in memory.\n#[derive(Debug, Clone, PartialEq, Eq)]\nenum CachePointer<K, V> {\n    InMem(TrieCacheNode<K, V>),\n    Stored(Pointer),\n}\n\nimpl<K, V> CachePointer<K, V> {\n    /// Loads the node in memory from the specified store if it's not already loaded.\n    /// Returns an error if the node can't be found in the store.\n    fn load_from_store<T, S, E>(&mut self, txn: &T, store: &S) -> Result<(), E>\n    where\n        K: FromBytes,\n        V: FromBytes,\n        T: Readable<Handle = S::Handle>,\n        S: TrieStore<K, V>,\n        S::Error: From<T::Error>,\n        E: From<S::Error> + From<bytesrepr::Error> + From<CacheError>,\n    {\n        if let CachePointer::Stored(pointer) = self {\n            let Some(stored_node) = store.get(txn, pointer.hash())? else {\n                return Err(CacheError::RootNotFound(pointer.into_hash()).into());\n            };\n            let trie_cache_node = stored_node.into();\n            *self = CachePointer::InMem(trie_cache_node);\n        }\n        Ok(())\n    }\n}\n\n/// A node representation used by the cache. This follows the Trie implementation for easy\n/// conversion.\n#[derive(Debug, Clone, PartialEq, Eq)]\nenum TrieCacheNode<K, V> {\n    Leaf {\n        key: K,\n        value: V,\n    },\n    Branch {\n        pointer_block: Vec<Option<CachePointer<K, V>>>,\n    },\n    Extension {\n        affix: Bytes,\n        pointer: Box<CachePointer<K, V>>,\n    },\n}\n\nimpl<K, V> From<Trie<K, V>> for TrieCacheNode<K, V> {\n    fn from(node: Trie<K, V>) -> Self {\n        match node {\n            Trie::Leaf { key, value } => Self::Leaf { key, value },\n            Trie::Node { pointer_block } => {\n                let mut new_pointer_block = Vec::with_capacity(RADIX);\n                for i in 0..RADIX {\n                    new_pointer_block.push(pointer_block[i].map(|ptr| CachePointer::Stored(ptr)));\n                }\n                Self::Branch {\n                    pointer_block: new_pointer_block,\n                }\n            }\n            Trie::Extension { affix, pointer } => Self::Extension {\n                affix,\n                pointer: Box::new(CachePointer::Stored(pointer)),\n            },\n        }\n    }\n}\n\n// An in-memory cache for Trie nodes that is backed up by a store.\npub struct TrieCache<'a, K, V, S> {\n    root: TrieCacheNode<K, V>,\n    store: &'a S,\n}\n\nimpl<'a, K, V, S> TrieCache<'a, K, V, S>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq,\n    V: ToBytes + FromBytes + Clone + Eq,\n    S: TrieStore<K, V> + 'a,\n{\n    pub fn new<T, E>(txn: &T, store: &'a S, root: &Digest) -> Result<Self, E>\n    where\n        T: Readable<Handle = S::Handle>,\n        S::Error: From<T::Error>,\n        E: From<S::Error> + From<bytesrepr::Error> + From<CacheError>,\n    {\n        match store.get(txn, root)? {\n            Some(node) => Ok(Self {\n                root: node.into(),\n                store,\n            }),\n            None => Err(CacheError::RootNotFound(*root).into()),\n        }\n    }\n\n    pub fn insert<T, E>(&mut self, key: K, value: V, txn: &T) -> Result<(), E>\n    where\n        T: Readable<Handle = S::Handle>,\n        S::Error: From<T::Error>,\n        E: From<S::Error> + From<bytesrepr::Error> + From<CacheError>,\n    {\n        let path: Vec<u8> = key.to_bytes()?;\n\n        let mut depth: usize = 0;\n        let mut current = &mut self.root;\n\n        while depth < path.len() {\n            match current {\n                TrieCacheNode::Branch { pointer_block } => {\n                    let index: usize = {\n                        assert!(depth < path.len(), \"depth must be < {}\", path.len());\n                        path[depth].into()\n                    };\n\n                    let pointer = &mut pointer_block[index];\n                    if let Some(next) = pointer {\n                        if depth == path.len() - 1 {\n                            let leaf = TrieCacheNode::Leaf { key, value };\n                            *next = CachePointer::InMem(leaf);\n                            return Ok(());\n                        } else {\n                            depth += 1;\n\n                            next.load_from_store::<_, _, E>(txn, self.store)?;\n                            if let CachePointer::InMem(next) = next {\n                                current = next;\n                            } else {\n                                unreachable!(\"Stored pointer should have been converted\");\n                            }\n                        }\n                    } else {\n                        let leaf = TrieCacheNode::Leaf { key, value };\n                        let _ = std::mem::replace(pointer, Some(CachePointer::InMem(leaf)));\n                        return Ok(());\n                    }\n                }\n                TrieCacheNode::Leaf {\n                    key: old_key,\n                    value: old_value,\n                } => {\n                    if *old_key == key {\n                        *old_value = value;\n                    } else {\n                        let mut pointer_block = Vec::with_capacity(RADIX);\n                        pointer_block.resize_with(RADIX, || None::<CachePointer<K, V>>);\n                        let old_key_bytes = old_key.to_bytes()?;\n\n                        let shared_path = common_prefix(&old_key_bytes, &path);\n\n                        let existing_idx = old_key_bytes[shared_path.len()] as usize;\n                        pointer_block[existing_idx] =\n                            Some(CachePointer::InMem(TrieCacheNode::Leaf {\n                                key: old_key.clone(),\n                                value: old_value.clone(),\n                            }));\n\n                        let new_idx = path[shared_path.len()] as usize;\n                        pointer_block[new_idx] =\n                            Some(CachePointer::InMem(TrieCacheNode::Leaf { key, value }));\n\n                        let new_affix = { &shared_path[depth..] };\n                        *current = if !new_affix.is_empty() {\n                            TrieCacheNode::Extension {\n                                affix: Bytes::from(new_affix),\n                                pointer: Box::new(CachePointer::InMem(TrieCacheNode::Branch {\n                                    pointer_block,\n                                })),\n                            }\n                        } else {\n                            TrieCacheNode::Branch { pointer_block }\n                        };\n                    }\n                    return Ok(());\n                }\n                TrieCacheNode::Extension { affix, ref pointer }\n                    if path.len() < depth + affix.len()\n                        || affix.as_ref() != &path[depth..depth + affix.len()] =>\n                {\n                    // We might be trying to store a key that is shorter than the keys that are\n                    // already stored. In this case, we would need to split this extension.\n                    // We also need to split this extension if the affix changes.\n\n                    // Is there something common between the new key and the old key?\n                    let shared_prefix = common_prefix(affix, &path[depth..]);\n\n                    // Need to split the node at the byte that is different.\n                    let mut pointer_block = Vec::with_capacity(RADIX);\n                    pointer_block.resize_with(RADIX, || None::<CachePointer<K, V>>);\n\n                    // Add the new key under a leaf where the paths diverge.\n                    pointer_block[path[depth + shared_prefix.len()] as usize] =\n                        Some(CachePointer::InMem(TrieCacheNode::Leaf { key, value }));\n\n                    let post_branch_affix = &affix[shared_prefix.len() + 1..];\n                    if !post_branch_affix.is_empty() {\n                        let post_extension = TrieCacheNode::Extension {\n                            affix: Bytes::from(post_branch_affix),\n                            pointer: pointer.clone(),\n                        };\n                        let existing_idx = affix[shared_prefix.len()] as usize;\n                        pointer_block[existing_idx] = Some(CachePointer::InMem(post_extension));\n                    } else {\n                        let existing_idx = affix[shared_prefix.len()] as usize;\n                        pointer_block[existing_idx] = Some(*pointer.clone());\n                    }\n\n                    let new_branch = TrieCacheNode::Branch { pointer_block };\n                    let next = if !shared_prefix.is_empty() {\n                        // Create an extension node with the common part\n                        TrieCacheNode::Extension {\n                            affix: Bytes::from(shared_prefix),\n                            pointer: Box::new(CachePointer::InMem(new_branch)),\n                        }\n                    } else {\n                        new_branch\n                    };\n\n                    *current = next;\n                    return Ok(());\n                }\n                TrieCacheNode::Extension {\n                    affix,\n                    ref mut pointer,\n                } => {\n                    depth += affix.len();\n                    pointer.load_from_store::<_, _, E>(txn, self.store)?;\n                    if let CachePointer::InMem(next) = pointer.as_mut() {\n                        current = next;\n                    } else {\n                        unreachable!(\"Stored pointer should have been converted\");\n                    }\n                }\n            }\n        }\n        Ok(())\n    }\n\n    fn traverse_and_store<T, E>(\n        node: TrieCacheNode<K, V>,\n        txn: &mut T,\n        store: &S,\n    ) -> Result<Pointer, E>\n    where\n        T: Readable<Handle = S::Handle> + Writable<Handle = S::Handle>,\n        S::Error: From<T::Error>,\n        E: From<S::Error> + From<bytesrepr::Error> + From<CacheError>,\n    {\n        match node {\n            TrieCacheNode::Leaf { key, value } => {\n                let trie_leaf = Trie::leaf(key, value);\n                let (hash, trie_bytes) = trie_leaf.trie_hash_and_bytes()?;\n                store.put_raw(txn, &hash, Cow::from(trie_bytes))?;\n                Ok(Pointer::LeafPointer(hash))\n            }\n            TrieCacheNode::Branch { mut pointer_block } => {\n                let mut trie_pointer_block = PointerBlock::new();\n                for i in 0..RADIX {\n                    trie_pointer_block[i] = Option::take(&mut pointer_block[i])\n                        .map(|child| match child {\n                            CachePointer::InMem(in_mem_child) => {\n                                Self::traverse_and_store::<_, E>(in_mem_child, txn, store)\n                            }\n                            CachePointer::Stored(ptr) => Ok(ptr),\n                        })\n                        .transpose()?;\n                }\n\n                let trie_node = Trie::<K, V>::Node {\n                    pointer_block: Box::new(trie_pointer_block),\n                };\n                let (hash, trie_bytes) = trie_node.trie_hash_and_bytes()?;\n                store.put_raw(txn, &hash, Cow::from(trie_bytes))?;\n                Ok(Pointer::NodePointer(hash))\n            }\n            TrieCacheNode::Extension { pointer, affix } => {\n                let pointer = match *pointer {\n                    CachePointer::InMem(in_mem_ptr) => {\n                        Self::traverse_and_store::<_, E>(in_mem_ptr, txn, store)\n                    }\n                    CachePointer::Stored(ptr) => Ok(ptr),\n                }?;\n\n                let trie_extension = Trie::<K, V>::extension(affix.to_vec(), pointer);\n                let (hash, trie_bytes) = trie_extension.trie_hash_and_bytes()?;\n                store.put_raw(txn, &hash, Cow::from(trie_bytes))?;\n                Ok(Pointer::NodePointer(hash))\n            }\n        }\n    }\n\n    pub fn store_cache<T, E>(self, txn: &mut T) -> Result<Digest, E>\n    where\n        T: Readable<Handle = S::Handle> + Writable<Handle = S::Handle>,\n        S::Error: From<T::Error>,\n        E: From<S::Error> + From<bytesrepr::Error> + From<CacheError>,\n    {\n        Self::traverse_and_store::<_, E>(self.root, txn, self.store)\n            .map(|root_pointer| root_pointer.into_hash())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    impl<K, V, S> TrieCache<'_, K, V, S>\n    where\n        K: ToBytes + FromBytes + Clone + Eq,\n        V: ToBytes + FromBytes + Clone + Eq,\n        S: TrieStore<K, V>,\n    {\n        fn traverse(node: TrieCacheNode<K, V>) -> Pointer {\n            match node {\n                TrieCacheNode::Leaf { key, value } => {\n                    // Process the leaf node\n                    let trie_leaf = Trie::leaf(key, value);\n                    let hash = trie_leaf.trie_hash().unwrap();\n                    Pointer::LeafPointer(hash)\n                }\n                TrieCacheNode::Branch { mut pointer_block } => {\n                    let mut trie_pointer_block = PointerBlock::new();\n                    for i in 0..RADIX {\n                        trie_pointer_block[i] = Option::take(pointer_block.get_mut(i).unwrap())\n                            .map(|child| match child {\n                                CachePointer::InMem(in_mem_child) => Self::traverse(in_mem_child),\n                                CachePointer::Stored(ptr) => ptr,\n                            });\n                    }\n\n                    let trie_node = Trie::<K, V>::Node {\n                        pointer_block: Box::new(trie_pointer_block),\n                    };\n                    let hash = trie_node.trie_hash().unwrap();\n                    Pointer::NodePointer(hash)\n                }\n                TrieCacheNode::Extension { pointer, affix } => {\n                    let pointer = match *pointer {\n                        CachePointer::InMem(in_mem_ptr) => Self::traverse(in_mem_ptr),\n                        CachePointer::Stored(ptr) => ptr,\n                    };\n\n                    let trie_extension = Trie::<K, V>::extension(affix.to_vec(), pointer);\n                    let hash = trie_extension.trie_hash().unwrap();\n                    Pointer::NodePointer(hash)\n                }\n            }\n        }\n\n        pub fn calculate_root_hash(self) -> Digest {\n            Self::traverse(self.root).into_hash()\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/lmdb.rs",
    "content": "//! An LMDB-backed trie store.\n//!\n//! # Usage\n//!\n//! ```\n//! use casper_storage::global_state::store::Store;\n//! use casper_storage::global_state::transaction_source::{Transaction, TransactionSource};\n//! use casper_storage::global_state::transaction_source::lmdb::LmdbEnvironment;\n//! use casper_storage::global_state::trie::{PointerBlock, Trie};\n//! use casper_storage::global_state::trie_store::lmdb::LmdbTrieStore;\n//! use casper_types::Digest;\n//! use casper_types::global_state::Pointer;\n//! use casper_types::bytesrepr::{ToBytes, Bytes};\n//! use lmdb::DatabaseFlags;\n//! use tempfile::tempdir;\n//!\n//! // Create some leaves\n//! let leaf_1 = Trie::Leaf { key: Bytes::from([0u8, 0, 0].as_slice()), value: Bytes::from(b\"val_1\".as_slice()) };\n//! let leaf_2 = Trie::Leaf { key: Bytes::from([1u8, 0, 0].as_slice()), value: Bytes::from(b\"val_2\".as_slice()) };\n//!\n//! // Get their hashes\n//! let leaf_1_hash = Digest::hash(&leaf_1.to_bytes().unwrap());\n//! let leaf_2_hash = Digest::hash(&leaf_2.to_bytes().unwrap());\n//!\n//! // Create a node\n//! let node: Trie<Bytes, Bytes> = {\n//!     let mut pointer_block = PointerBlock::new();\n//!     pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash));\n//!     pointer_block[1] = Some(Pointer::LeafPointer(leaf_2_hash));\n//!     let pointer_block = Box::new(pointer_block);\n//!     Trie::Node { pointer_block }\n//! };\n//!\n//! // Get its hash\n//! let node_hash = Digest::hash(&node.to_bytes().unwrap());\n//!\n//! // Create the environment and the store. For both the in-memory and\n//! // LMDB-backed implementations, the environment is the source of\n//! // transactions.\n//! let tmp_dir = tempdir().unwrap();\n//! let map_size = 4096 * 2560;  // map size should be a multiple of OS page size\n//! let max_readers = 512;\n//! let env = LmdbEnvironment::new(&tmp_dir.path().to_path_buf(), map_size, max_readers, true).unwrap();\n//! let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n//!\n//! // First let's create a read-write transaction, persist the values, but\n//! // forget to commit the transaction.\n//! {\n//!     // Create a read-write transaction\n//!     let mut txn = env.create_read_write_txn().unwrap();\n//!\n//!     // Put the values in the store\n//!     store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap();\n//!     store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap();\n//!     store.put(&mut txn, &node_hash, &node).unwrap();\n//!\n//!     // Here we forget to commit the transaction before it goes out of scope\n//! }\n//!\n//! // Now let's check to see if the values were stored\n//! {\n//!     // Create a read transaction\n//!     let txn = env.create_read_txn().unwrap();\n//!\n//!     // Observe that nothing has been persisted to the store\n//!     for hash in [&leaf_1_hash, &leaf_2_hash, &node_hash].iter() {\n//!         // We need to use a type annotation here to help the compiler choose\n//!         // a suitable FromBytes instance\n//!         let maybe_trie: Option<Trie<Bytes, Bytes>> = store.get(&txn, hash).unwrap();\n//!         assert!(maybe_trie.is_none());\n//!     }\n//!\n//!     // Commit the read transaction.  Not strictly necessary, but better to be hygienic.\n//!     txn.commit().unwrap();\n//! }\n//!\n//! // Now let's try that again, remembering to commit the transaction this time\n//! {\n//!     // Create a read-write transaction\n//!     let mut txn = env.create_read_write_txn().unwrap();\n//!\n//!     // Put the values in the store\n//!     store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap();\n//!     store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap();\n//!     store.put(&mut txn, &node_hash, &node).unwrap();\n//!\n//!     // Commit the transaction.\n//!     txn.commit().unwrap();\n//! }\n//!\n//! // Now let's check to see if the values were stored again\n//! {\n//!     // Create a read transaction\n//!     let txn = env.create_read_txn().unwrap();\n//!\n//!     // Get the values in the store\n//!     assert_eq!(Some(leaf_1), store.get(&txn, &leaf_1_hash).unwrap());\n//!     assert_eq!(Some(leaf_2), store.get(&txn, &leaf_2_hash).unwrap());\n//!     assert_eq!(Some(node), store.get(&txn, &node_hash).unwrap());\n//!\n//!     // Commit the read transaction.\n//!     txn.commit().unwrap();\n//! }\n//!\n//! tmp_dir.close().unwrap();\n//! ```\nuse std::{\n    borrow::Cow,\n    collections::{hash_map::Entry, HashMap},\n    sync::{Arc, Mutex},\n};\n\nuse lmdb::{Database, DatabaseFlags, Transaction};\n\nuse casper_types::{\n    bytesrepr::{self, Bytes, ToBytes},\n    Digest, Key, StoredValue,\n};\n\nuse crate::global_state::{\n    error,\n    state::CommitError,\n    store::Store,\n    transaction_source::{lmdb::LmdbEnvironment, Readable, TransactionSource, Writable},\n    trie::{LazilyDeserializedTrie, Trie},\n    trie_store::{self, TrieStore},\n};\n\n/// An LMDB-backed trie store.\n///\n/// Wraps [`lmdb::Database`].\n#[derive(Debug, Clone)]\npub struct LmdbTrieStore {\n    db: Database,\n}\n\nimpl LmdbTrieStore {\n    /// Constructor for new `LmdbTrieStore`.\n    pub fn new(\n        env: &LmdbEnvironment,\n        maybe_name: Option<&str>,\n        flags: DatabaseFlags,\n    ) -> Result<Self, error::Error> {\n        let name = Self::name(maybe_name);\n        let db = env.env().create_db(Some(&name), flags)?;\n        Ok(LmdbTrieStore { db })\n    }\n\n    /// Constructor for `LmdbTrieStore` which opens an existing lmdb store file.\n    pub fn open(env: &LmdbEnvironment, maybe_name: Option<&str>) -> Result<Self, error::Error> {\n        let name = Self::name(maybe_name);\n        let db = env.env().open_db(Some(&name))?;\n        Ok(LmdbTrieStore { db })\n    }\n\n    fn name(maybe_name: Option<&str>) -> String {\n        maybe_name\n            .map(|name| format!(\"{}-{}\", trie_store::NAME, name))\n            .unwrap_or_else(|| String::from(trie_store::NAME))\n    }\n\n    /// Get a handle to the underlying database.\n    pub fn get_db(&self) -> Database {\n        self.db\n    }\n}\n\nimpl<K, V> Store<Digest, Trie<K, V>> for LmdbTrieStore {\n    type Error = error::Error;\n\n    type Handle = Database;\n\n    fn handle(&self) -> Self::Handle {\n        self.db\n    }\n}\n\nimpl<K, V> TrieStore<K, V> for LmdbTrieStore {}\n\n/// Cache used by the scratch trie.  The keys represent the hash of the trie being cached.  The\n/// values represent:  1) A boolean, where `false` means the trie was _not_ written and `true` means\n/// it was 2) A deserialized trie\npub(crate) type Cache = Arc<Mutex<HashMap<Digest, (bool, Bytes)>>>;\n\n/// Cached version of the trie store.\n#[derive(Clone)]\npub(crate) struct ScratchTrieStore {\n    pub(crate) cache: Cache,\n    pub(crate) store: Arc<LmdbTrieStore>,\n    pub(crate) env: Arc<LmdbEnvironment>,\n}\n\nimpl ScratchTrieStore {\n    /// Creates a new ScratchTrieStore.\n    pub fn new(store: Arc<LmdbTrieStore>, env: Arc<LmdbEnvironment>) -> Self {\n        Self {\n            store,\n            env,\n            cache: Default::default(),\n        }\n    }\n\n    /// Writes only tries which are both under the given `state_root` and dirty to the underlying\n    /// db.\n    pub fn write_root_to_db(self, state_root: Digest) -> Result<(), error::Error> {\n        let cache = &*self.cache.lock().map_err(|_| error::Error::Poison)?;\n        if !cache.contains_key(&state_root) {\n            return Err(CommitError::TrieNotFoundInCache(state_root).into());\n        }\n\n        let mut tries_to_write = vec![state_root];\n        let mut txn = self.env.create_read_write_txn()?;\n\n        while let Some(trie_hash) = tries_to_write.pop() {\n            let trie_bytes = if let Some((true, trie_bytes)) = cache.get(&trie_hash) {\n                trie_bytes\n            } else {\n                // We don't have this trie in the scratch store or it's not dirty - do nothing.\n                continue;\n            };\n\n            let lazy_trie: LazilyDeserializedTrie = bytesrepr::deserialize_from_slice(trie_bytes)?;\n            tries_to_write.extend(lazy_trie.iter_children());\n\n            Store::<Digest, Trie<Key, StoredValue>>::put_raw(\n                &*self.store,\n                &mut txn,\n                &trie_hash,\n                Cow::Borrowed(trie_bytes),\n            )?;\n        }\n\n        txn.commit()?;\n        Ok(())\n    }\n}\n\nimpl Store<Digest, Trie<Key, StoredValue>> for ScratchTrieStore {\n    type Error = error::Error;\n\n    type Handle = ScratchTrieStore;\n\n    fn handle(&self) -> Self::Handle {\n        self.clone()\n    }\n\n    fn get<T>(&self, txn: &T, key: &Digest) -> Result<Option<Trie<Key, StoredValue>>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        Digest: ToBytes,\n        Trie<Key, StoredValue>: bytesrepr::FromBytes,\n        Self::Error: From<T::Error>,\n    {\n        match self.get_raw(txn, key)? {\n            None => Ok(None),\n            Some(value_bytes) => {\n                let value = bytesrepr::deserialize(value_bytes.into())?;\n                Ok(Some(value))\n            }\n        }\n    }\n\n    fn get_raw<T>(&self, txn: &T, key: &Digest) -> Result<Option<Bytes>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        let mut store = self.cache.lock().map_err(|_| error::Error::Poison)?;\n\n        let maybe_trie = store.get(key);\n\n        match maybe_trie {\n            Some((_, trie_bytes)) => Ok(Some(trie_bytes.clone())),\n            None => {\n                let handle = self.handle();\n                match txn.read(handle, key.as_ref())? {\n                    Some(trie_bytes) => {\n                        match store.entry(*key) {\n                            Entry::Occupied(_) => {}\n                            Entry::Vacant(v) => {\n                                v.insert((false, trie_bytes.clone()));\n                            }\n                        }\n                        Ok(Some(trie_bytes))\n                    }\n                    None => Ok(None),\n                }\n            }\n        }\n    }\n\n    fn put<T>(\n        &self,\n        txn: &mut T,\n        key: &Digest,\n        value: &Trie<Key, StoredValue>,\n    ) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        Trie<Key, StoredValue>: ToBytes,\n        Self::Error: From<T::Error>,\n    {\n        self.put_raw(txn, key, Cow::Owned(value.to_bytes()?))\n    }\n\n    fn put_raw<T>(\n        &self,\n        _txn: &mut T,\n        key: &Digest,\n        value_bytes: Cow<'_, [u8]>,\n    ) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        Self::Error: From<T::Error>,\n    {\n        self.cache\n            .lock()\n            .map_err(|_| error::Error::Poison)?\n            .insert(*key, (true, Bytes::from(value_bytes.into_owned())));\n        Ok(())\n    }\n}\n\nimpl TrieStore<Key, StoredValue> for ScratchTrieStore {}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/mod.rs",
    "content": "//! A store for persisting `Trie` values at their hashes.\n//!\n//! See the [lmdb](lmdb/index.html#usage) modules for usage examples.\npub mod lmdb;\n/// Trie store operational logic.\npub mod operations;\n\n// An in-mem cache backed up by a store that is used to optimize batch writes.\nmod cache;\n\npub(crate) use cache::CacheError as TrieStoreCacheError;\n\n#[cfg(test)]\nmod tests;\n\nuse casper_types::Digest;\n\nuse crate::global_state::{store::Store, trie::Trie};\n\nconst NAME: &str = \"TRIE_STORE\";\n\n/// An entity which persists [`Trie`] values at their hashes.\npub trait TrieStore<K, V>: Store<Digest, Trie<K, V>> {}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/mod.rs",
    "content": "pub(crate) mod store_wrappers;\n#[cfg(test)]\nmod tests;\n\n#[cfg(test)]\nuse std::collections::HashSet;\nuse std::{borrow::Cow, cmp, collections::VecDeque, convert::TryInto, mem};\n\nuse num_traits::FromPrimitive;\nuse tracing::{error, warn};\n\nuse casper_types::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    global_state::{Pointer, TrieMerkleProof, TrieMerkleProofStep},\n    Digest,\n};\n\nuse crate::global_state::{\n    error::Error as GlobalStateError,\n    store::Store,\n    transaction_source::{Readable, Writable},\n    trie::{LazilyDeserializedTrie, Parents, PointerBlock, Trie, TrieTag, RADIX, USIZE_EXCEEDS_U8},\n    trie_store::TrieStore,\n};\n\nuse self::store_wrappers::NonDeserializingStore;\n\nuse super::{cache::TrieCache, TrieStoreCacheError};\n\n/// Result of attemptint to read a record from the trie store.\n#[allow(clippy::enum_variant_names)]\n#[derive(Debug, PartialEq, Eq)]\npub enum ReadResult<V> {\n    /// Requested item found in trie store.\n    Found(V),\n    /// Requested item not found in trie store.\n    NotFound,\n    /// Root hash not found in trie store.\n    RootNotFound,\n}\n\nimpl<V> ReadResult<V> {\n    /// Returns `true` if the result is [`ReadResult::Found`].\n    #[cfg(test)]\n    pub fn is_found(&self) -> bool {\n        matches!(self, ReadResult::Found(_))\n    }\n}\n\n/// Returns a value from the corresponding key at a given root in a given store\npub fn read<K, V, T, S, E>(txn: &T, store: &S, root: &Digest, key: &K) -> Result<ReadResult<V>, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let path: Vec<u8> = key.to_bytes()?;\n\n    let store = store_wrappers::OnceDeserializingStore::new(store);\n\n    let mut depth: usize = 0;\n    let mut current: Trie<K, V> = match store.get(txn, root)? {\n        Some(root) => root,\n        None => return Ok(ReadResult::RootNotFound),\n    };\n\n    loop {\n        match current {\n            Trie::Leaf {\n                key: leaf_key,\n                value: leaf_value,\n            } => {\n                let result = if *key == leaf_key {\n                    ReadResult::Found(leaf_value)\n                } else {\n                    // Keys may not match in the case of a compressed path from\n                    // a Node directly to a Leaf\n                    ReadResult::NotFound\n                };\n                return Ok(result);\n            }\n            Trie::Node { pointer_block } => {\n                let index: usize = {\n                    assert!(depth < path.len(), \"depth must be < {}\", path.len());\n                    path[depth].into()\n                };\n                let maybe_pointer: Option<Pointer> = {\n                    assert!(index < RADIX, \"key length must be < {}\", RADIX);\n                    pointer_block[index]\n                };\n\n                match maybe_pointer {\n                    Some(pointer) => match store.get(txn, pointer.hash()) {\n                        Ok(Some(next)) => {\n                            depth += 1;\n                            current = next;\n                        }\n                        Ok(None) => {\n                            warn!(\n                                \"No trie value at key: {:?} (reading from key: {:?})\",\n                                pointer.hash(),\n                                key\n                            );\n                            return Ok(ReadResult::NotFound);\n                        }\n                        Err(error) => {\n                            return Err(error.into());\n                        }\n                    },\n                    None => {\n                        return Ok(ReadResult::NotFound);\n                    }\n                }\n            }\n            Trie::Extension { affix, pointer } => {\n                let sub_path = &path[depth..depth + affix.len()];\n                if sub_path == affix.as_slice() {\n                    match store.get(txn, pointer.hash())? {\n                        Some(next) => {\n                            depth += affix.len();\n                            current = next;\n                        }\n                        None => {\n                            warn!(\n                                \"No trie value at key: {:?} (reading from key: {:?})\",\n                                pointer.hash(),\n                                key\n                            );\n                            return Ok(ReadResult::NotFound);\n                        }\n                    }\n                } else {\n                    return Ok(ReadResult::NotFound);\n                }\n            }\n        }\n    }\n}\n\n/// Same as [`read`], except that a [`TrieMerkleProof`] is generated and returned along with the key\n/// and the value given the root and store.\npub fn read_with_proof<K, V, T, S, E>(\n    txn: &T,\n    store: &S,\n    root: &Digest,\n    key: &K,\n) -> Result<ReadResult<TrieMerkleProof<K, V>>, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let mut proof_steps = VecDeque::new();\n    let path: Vec<u8> = key.to_bytes()?;\n\n    let mut depth: usize = 0;\n    let mut current: Trie<K, V> = match store.get(txn, root)? {\n        Some(root) => root,\n        None => return Ok(ReadResult::RootNotFound),\n    };\n    loop {\n        match current {\n            Trie::Leaf {\n                key: leaf_key,\n                value,\n            } => {\n                if *key != leaf_key {\n                    return Ok(ReadResult::NotFound);\n                }\n                let key = leaf_key;\n                return Ok(ReadResult::Found(TrieMerkleProof::new(\n                    key,\n                    value,\n                    proof_steps,\n                )));\n            }\n            Trie::Node { pointer_block } => {\n                let hole_index: usize = {\n                    assert!(depth < path.len(), \"depth must be < {}\", path.len());\n                    path[depth].into()\n                };\n                let pointer: Pointer = {\n                    assert!(hole_index < RADIX, \"key length must be < {}\", RADIX);\n                    match pointer_block[hole_index] {\n                        Some(pointer) => pointer,\n                        None => return Ok(ReadResult::NotFound),\n                    }\n                };\n                let indexed_pointers_with_hole = pointer_block\n                    .as_indexed_pointers()\n                    .filter(|(index, _)| *index as usize != hole_index)\n                    .collect();\n                let next = match store.get(txn, pointer.hash())? {\n                    Some(next) => next,\n                    None => {\n                        warn!(\n                            \"No trie value at key: {:?} (reading from path: {:?})\",\n                            pointer.hash(),\n                            path\n                        );\n                        return Ok(ReadResult::NotFound);\n                    }\n                };\n                depth += 1;\n                current = next;\n                let hole_index: u8 = hole_index.try_into().expect(USIZE_EXCEEDS_U8);\n                proof_steps.push_front(TrieMerkleProofStep::node(\n                    hole_index,\n                    indexed_pointers_with_hole,\n                ));\n            }\n            Trie::Extension { affix, pointer } => {\n                let sub_path = &path[depth..depth + affix.len()];\n                if sub_path != affix.as_slice() {\n                    return Ok(ReadResult::NotFound);\n                };\n\n                let next = match store.get(txn, pointer.hash())? {\n                    Some(next) => next,\n                    None => {\n                        warn!(\n                            \"No trie value at key: {:?} (reading from path: {:?})\",\n                            pointer.hash(),\n                            path\n                        );\n                        return Ok(ReadResult::NotFound);\n                    }\n                };\n                depth += affix.len();\n                current = next;\n                proof_steps.push_front(TrieMerkleProofStep::extension(affix.into()));\n            }\n        }\n    }\n}\n\n/// Given a serialized trie, find any children that are referenced but not present in the database.\npub fn missing_children<K, V, T, S, E>(\n    txn: &T,\n    store: &S,\n    trie_raw: &[u8],\n) -> Result<Vec<Digest>, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + std::fmt::Debug,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    // Optimization: Don't deserialize leaves as they have no descendants.\n    if let Some(TrieTag::Leaf) = trie_raw.first().copied().and_then(TrieTag::from_u8) {\n        return Ok(vec![]);\n    }\n\n    // Parse the trie, handling errors gracefully.\n    let trie = match bytesrepr::deserialize_from_slice(trie_raw) {\n        Ok(trie) => trie,\n        Err(err) => {\n            error!(?err, \"unable to parse trie\");\n            return Err(err.into());\n        }\n    };\n\n    let is_present = |trie_key| matches!(store.get_raw(txn, &trie_key), Ok(Some(_)));\n\n    Ok(match trie {\n        // Should be unreachable due to checking the first byte as a shortcut above.\n        Trie::<K, V>::Leaf { .. } => {\n            error!(\"did not expect to see a trie leaf in `missing_children` after shortcut\");\n            vec![]\n        }\n        // If we hit a pointer block, queue up all of the nodes it points to\n        Trie::Node { pointer_block } => pointer_block\n            .as_indexed_pointers()\n            .map(|(_, pointer)| *pointer.hash())\n            .filter(|pointer_hash| !is_present(*pointer_hash))\n            .collect(),\n        // If we hit an extension block, add its pointer to the queue\n        Trie::Extension { pointer, .. } => {\n            let trie_key = pointer.into_hash();\n            if is_present(trie_key) {\n                vec![]\n            } else {\n                vec![trie_key]\n            }\n        }\n    })\n}\n\nstruct TrieScanRaw<K, V> {\n    tip: LazilyDeserializedTrie,\n    parents: Parents<K, V>,\n}\n\nimpl<K, V> TrieScanRaw<K, V> {\n    fn new(tip: LazilyDeserializedTrie, parents: Parents<K, V>) -> Self {\n        TrieScanRaw { tip, parents }\n    }\n}\n\n/// Returns a [`TrieScanRaw`] from the given key at a given root in a given store.\n/// A scan consists of the deepest trie variant found at that key, a.k.a. the\n/// \"tip\", along the with the parents of that variant. Parents are ordered by\n/// their depth from the root (shallow to deep). The tip is not parsed.\nfn scan_raw<K, V, T, S, E>(\n    txn: &T,\n    store: &NonDeserializingStore<K, V, S>,\n    key_bytes: &[u8],\n    root_bytes: Bytes,\n) -> Result<TrieScanRaw<K, V>, E>\nwhere\n    K: ToBytes + FromBytes + Clone,\n    V: ToBytes + FromBytes + Clone,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let path = key_bytes;\n\n    let mut current = root_bytes;\n    let mut depth: usize = 0;\n    let mut acc: Parents<K, V> = Vec::new();\n\n    loop {\n        let maybe_trie_leaf = bytesrepr::deserialize_from_slice(&current)?;\n        match maybe_trie_leaf {\n            leaf_bytes @ LazilyDeserializedTrie::Leaf(_) => {\n                return Ok(TrieScanRaw::new(leaf_bytes, acc))\n            }\n            LazilyDeserializedTrie::Node { pointer_block } => {\n                let index = {\n                    assert!(depth < path.len(), \"depth must be < {}\", path.len());\n                    path[depth]\n                };\n                let maybe_pointer: Option<Pointer> = {\n                    let index: usize = index.into();\n                    assert!(index < RADIX, \"index must be < {}\", RADIX);\n                    pointer_block[index]\n                };\n                let pointer = match maybe_pointer {\n                    Some(pointer) => pointer,\n                    None => {\n                        return Ok(TrieScanRaw::new(\n                            LazilyDeserializedTrie::Node { pointer_block },\n                            acc,\n                        ));\n                    }\n                };\n                match store.get_raw(txn, pointer.hash())? {\n                    Some(next) => {\n                        current = next;\n                        depth += 1;\n                        acc.push((index, Trie::Node { pointer_block }))\n                    }\n                    None => {\n                        panic!(\n                            \"No trie value at key: {:?} (reading from path: {:?})\",\n                            pointer.hash(),\n                            path\n                        );\n                    }\n                }\n            }\n            LazilyDeserializedTrie::Extension { affix, pointer } => {\n                if path.len() < depth + affix.len() {\n                    // We might be trying to store a key that is shorter than the keys that are\n                    // already stored. In this case, we would need to split this extension.\n                    return Ok(TrieScanRaw::new(\n                        LazilyDeserializedTrie::Extension { affix, pointer },\n                        acc,\n                    ));\n                }\n                let sub_path = &path[depth..depth + affix.len()];\n                if sub_path != affix.as_slice() {\n                    return Ok(TrieScanRaw::new(\n                        LazilyDeserializedTrie::Extension { affix, pointer },\n                        acc,\n                    ));\n                }\n                match store.get_raw(txn, pointer.hash())? {\n                    Some(next) => {\n                        let index = {\n                            assert!(depth < path.len(), \"depth must be < {}\", path.len());\n                            path[depth]\n                        };\n                        current = next;\n                        depth += affix.len();\n                        acc.push((index, Trie::extension(affix.into(), pointer)))\n                    }\n                    None => {\n                        panic!(\n                            \"No trie value at key: {:?} (reading from path: {:?})\",\n                            pointer.hash(),\n                            path\n                        );\n                    }\n                }\n            }\n        }\n    }\n}\n\n/// Result of attempting to prune an item from the trie store.\n#[derive(Debug, PartialEq, Eq)]\npub enum TriePruneResult {\n    /// Successfully pruned item from trie store.\n    Pruned(Digest),\n    /// Requested key not found in trie store.\n    MissingKey,\n    /// Root hash not found in trie store.\n    RootNotFound,\n    /// Prune failure.\n    Failure(GlobalStateError),\n}\n\n/// Delete provided key from a global state so it is not reachable from a resulting state root hash.\npub(crate) fn prune<K, V, T, S, E>(\n    txn: &mut T,\n    store: &S,\n    root: &Digest,\n    keys_to_prune: &K,\n) -> Result<TriePruneResult, E>\nwhere\n    K: ToBytes + FromBytes + Clone + PartialEq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone,\n    T: Readable<Handle = S::Handle> + Writable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let store = store_wrappers::NonDeserializingStore::new(store);\n    let root_trie_bytes = match store.get_raw(txn, root)? {\n        None => return Ok(TriePruneResult::RootNotFound),\n        Some(root_trie) => root_trie,\n    };\n\n    let key_bytes = keys_to_prune.to_bytes()?;\n    let TrieScanRaw { tip, mut parents } =\n        scan_raw::<_, _, _, _, E>(txn, &store, &key_bytes, root_trie_bytes)?;\n\n    // Check that tip is a leaf\n    match tip {\n        LazilyDeserializedTrie::Leaf(leaf_bytes)\n            if {\n                // Partially deserialize a key of a leaf node to ensure that we can only continue if\n                // the key matches what we're looking for.\n                // _rem contains bytes of serialized V, but we don't need to inspect it.\n                let (key, _rem) = leaf_bytes.try_deserialize_leaf_key::<K>()?;\n                key == *keys_to_prune\n            } => {}\n        _ => return Ok(TriePruneResult::MissingKey),\n    }\n\n    let mut new_elements: Vec<(Digest, Trie<K, V>)> = Vec::new();\n\n    while let Some((idx, parent)) = parents.pop() {\n        match (new_elements.last_mut(), parent) {\n            (_, Trie::Leaf { .. }) => panic!(\"Should not find leaf\"),\n            (None, Trie::Extension { .. }) => panic!(\"Extension node should never end in leaf\"),\n            (Some((_, Trie::Leaf { .. })), _) => panic!(\"New elements should never contain a leaf\"),\n            // The parent is the node which pointed to the leaf we deleted, and that leaf had\n            // multiple siblings.\n            (None, Trie::Node { mut pointer_block }) if pointer_block.child_count() > 2 => {\n                let trie_node: Trie<K, V> = {\n                    pointer_block[idx as usize] = None;\n                    Trie::Node { pointer_block }\n                };\n                let trie_key = trie_node.trie_hash()?;\n                new_elements.push((trie_key, trie_node))\n            }\n            // The parent is the node which pointed to the leaf we deleted, and that leaf had one or\n            // zero siblings.\n            (None, Trie::Node { mut pointer_block }) => {\n                let (sibling_idx, sibling_pointer) = match pointer_block\n                    .as_indexed_pointers()\n                    .find(|(jdx, _)| idx != *jdx)\n                {\n                    // There are zero siblings.  Elsewhere we maintain the invariant that only the\n                    // root node can contain a single leaf.  Therefore the parent is the root node.\n                    // The resulting output is just the empty node and nothing else.\n                    None => {\n                        let trie_node = Trie::Node {\n                            pointer_block: Box::new(PointerBlock::new()),\n                        };\n                        let trie_key = trie_node.trie_hash()?;\n                        new_elements.push((trie_key, trie_node));\n                        break;\n                    }\n                    Some((sibling_idx, pointer)) => (sibling_idx, pointer),\n                };\n                // There is one sibling.\n                match (sibling_pointer, parents.pop()) {\n                    (_, Some((_, Trie::Leaf { .. }))) => panic!(\"Should not have leaf in scan\"),\n                    // There is no grandparent.  Therefore the parent is the root node.  Output the\n                    // root node with the index zeroed out.\n                    (_, None) => {\n                        pointer_block[idx as usize] = None;\n                        let trie_node = Trie::Node { pointer_block };\n                        let trie_key = trie_node.trie_hash()?;\n                        new_elements.push((trie_key, trie_node));\n                        break;\n                    }\n                    // The sibling is a leaf and the grandparent is a node.  Reseat the single leaf\n                    // sibling into the grandparent.\n                    (Pointer::LeafPointer(..), Some((idx, Trie::Node { mut pointer_block }))) => {\n                        pointer_block[idx as usize] = Some(sibling_pointer);\n                        let trie_node = Trie::Node { pointer_block };\n                        let trie_key = trie_node.trie_hash()?;\n                        new_elements.push((trie_key, trie_node))\n                    }\n                    // The sibling is a leaf and the grandparent is an extension.\n                    (Pointer::LeafPointer(..), Some((_, Trie::Extension { .. }))) => {\n                        match parents.pop() {\n                            None => panic!(\"Root node cannot be an extension node\"),\n                            Some((_, Trie::Leaf { .. })) => panic!(\"Should not find leaf\"),\n                            Some((_, Trie::Extension { .. })) => {\n                                panic!(\"Extension cannot extend to an extension\")\n                            }\n                            // The great-grandparent is a node. Reseat the single leaf sibling into\n                            // the position the grandparent was in.\n                            Some((idx, Trie::Node { mut pointer_block })) => {\n                                pointer_block[idx as usize] = Some(sibling_pointer);\n                                let trie_node = Trie::Node { pointer_block };\n                                let trie_key = trie_node.trie_hash()?;\n                                new_elements.push((trie_key, trie_node))\n                            }\n                        }\n                    }\n                    // The single sibling is a node or an extension, and a grandparent exists.\n                    // Therefore the parent is not the root\n                    (Pointer::NodePointer(sibling_trie_key), Some((idx, grandparent))) => {\n                        // Push the grandparent back onto the parents so it may be processed later.\n                        parents.push((idx, grandparent));\n                        // Elsewhere we maintain the invariant that all trie keys have corresponding\n                        // trie values.\n                        let sibling_trie = store\n                            .get(txn, &sibling_trie_key)?\n                            .expect(\"should have sibling\");\n                        match sibling_trie {\n                            Trie::Leaf { .. } => {\n                                panic!(\"Node pointer should not point to leaf\")\n                            }\n                            // The single sibling is a node, and there exists a grandparent.\n                            // Therefore the parent is not the root.  We output an extension to\n                            // replace the parent, with a single byte corresponding to the sibling\n                            // index.  In the next loop iteration, we will handle the case where\n                            // this extension might need to be combined with a grandparent\n                            // extension.\n                            Trie::Node { .. } => {\n                                let new_extension: Trie<K, V> =\n                                    Trie::extension(vec![sibling_idx], sibling_pointer);\n                                let trie_key = new_extension.trie_hash()?;\n                                new_elements.push((trie_key, new_extension))\n                            }\n                            // The single sibling is an extension.  We output an extension to\n                            // replace the parent, prepending the\n                            // sibling index to the sibling's affix.  In\n                            // the next loop iteration, we will handle the case where this extension\n                            // might need to be combined with a grandparent extension.\n                            Trie::Extension {\n                                affix: extension_affix,\n                                pointer,\n                            } => {\n                                let mut new_affix = vec![sibling_idx];\n                                new_affix.extend(Vec::<u8>::from(extension_affix));\n                                let new_extension: Trie<K, V> = Trie::extension(new_affix, pointer);\n                                let trie_key = new_extension.trie_hash()?;\n                                new_elements.push((trie_key, new_extension))\n                            }\n                        }\n                    }\n                }\n            }\n            // The parent is a pointer block, and we are propagating a node or extension upwards.\n            // It is impossible to propagate a leaf upwards.  Reseat the thing we are propagating\n            // into the parent.\n            (Some((trie_key, _)), Trie::Node { mut pointer_block }) => {\n                let trie_node: Trie<K, V> = {\n                    pointer_block[idx as usize] = Some(Pointer::NodePointer(*trie_key));\n                    Trie::Node { pointer_block }\n                };\n                let trie_key = trie_node.trie_hash()?;\n                new_elements.push((trie_key, trie_node))\n            }\n            // The parent is an extension, and we are outputting an extension.  Prepend the parent\n            // affix to affix of the output extension, mutating the output in place.  This is the\n            // only mutate-in-place.\n            (\n                Some((\n                    trie_key,\n                    Trie::Extension {\n                        affix: child_affix,\n                        pointer,\n                    },\n                )),\n                Trie::Extension { affix, .. },\n            ) => {\n                let mut new_affix: Vec<u8> = affix.into();\n                new_affix.extend_from_slice(child_affix.as_slice());\n                *child_affix = new_affix.into();\n                *trie_key = {\n                    let new_extension: Trie<K, V> =\n                        Trie::extension(child_affix.to_owned().into(), pointer.to_owned());\n                    new_extension.trie_hash()?\n                }\n            }\n            // The parent is an extension and the new element is a pointer block.  The next element\n            // we add will be an extension to the pointer block we are going to add.\n            (Some((trie_key, Trie::Node { .. })), Trie::Extension { affix, .. }) => {\n                let pointer = Pointer::NodePointer(*trie_key);\n                let trie_extension = Trie::Extension { affix, pointer };\n                let trie_key = trie_extension.trie_hash()?;\n                new_elements.push((trie_key, trie_extension))\n            }\n        }\n    }\n    for (hash, element) in new_elements.iter() {\n        store.put(txn, hash, element)?;\n    }\n    // The hash of the final trie in the new elements is the new root\n    let new_root = new_elements\n        .pop()\n        .map(|(hash, _)| hash)\n        .unwrap_or_else(|| root.to_owned());\n\n    Ok(TriePruneResult::Pruned(new_root))\n}\n\n#[allow(clippy::type_complexity)]\nfn rehash<K, V>(\n    mut tip: Trie<K, V>,\n    parents: Parents<K, V>,\n) -> Result<Vec<(Digest, Trie<K, V>)>, bytesrepr::Error>\nwhere\n    K: ToBytes + Clone,\n    V: ToBytes + Clone,\n{\n    let mut ret: Vec<(Digest, Trie<K, V>)> = Vec::new();\n    let mut tip_hash = tip.trie_hash()?;\n    ret.push((tip_hash, tip.to_owned()));\n\n    for (index, parent) in parents.into_iter().rev() {\n        match parent {\n            Trie::Leaf { .. } => {\n                panic!(\"parents should not contain any leaves\");\n            }\n            Trie::Node { mut pointer_block } => {\n                tip = {\n                    let pointer = match tip {\n                        Trie::Leaf { .. } => Pointer::LeafPointer(tip_hash),\n                        Trie::Node { .. } => Pointer::NodePointer(tip_hash),\n                        Trie::Extension { .. } => Pointer::NodePointer(tip_hash),\n                    };\n                    pointer_block[index.into()] = Some(pointer);\n                    Trie::Node { pointer_block }\n                };\n                tip_hash = tip.trie_hash()?;\n                ret.push((tip_hash, tip.to_owned()))\n            }\n            Trie::Extension { affix, pointer } => {\n                tip = {\n                    let pointer = pointer.update(tip_hash);\n                    Trie::Extension { affix, pointer }\n                };\n                tip_hash = tip.trie_hash()?;\n                ret.push((tip_hash, tip.to_owned()))\n            }\n        }\n    }\n    Ok(ret)\n}\n\npub(super) fn common_prefix<A: Eq + Clone>(ls: &[A], rs: &[A]) -> Vec<A> {\n    ls.iter()\n        .zip(rs.iter())\n        .take_while(|(l, r)| l == r)\n        .map(|(l, _)| l.to_owned())\n        .collect()\n}\n\nfn get_parents_path<K, V>(parents: &[(u8, Trie<K, V>)]) -> Vec<u8> {\n    let mut ret = Vec::new();\n    for (index, element) in parents.iter() {\n        if let Trie::Extension { affix, .. } = element {\n            ret.extend(affix);\n        } else {\n            ret.push(index.to_owned());\n        }\n    }\n    ret\n}\n\n/// Takes a path to a leaf, that leaf's parent node, and the parents of that\n/// node, and adds the node to the parents.\n///\n/// This function will panic if the path to the leaf and the path to its\n/// parent node do not share a common prefix.\nfn add_node_to_parents<K, V>(\n    path_to_leaf: &[u8],\n    new_parent_node: Trie<K, V>,\n    mut parents: Parents<K, V>,\n) -> Parents<K, V>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    // TODO: add is_node() method to Trie\n    match new_parent_node {\n        Trie::Node { .. } => (),\n        _ => panic!(\"new_parent must be a node\"),\n    }\n    // The current depth will be the length of the path to the new parent node.\n    let depth: usize = {\n        // Get the path to this node\n        let path_to_node: Vec<u8> = get_parents_path(&parents);\n        // Check that the path to the node is a prefix of the current path\n        let current_path = common_prefix(path_to_leaf, &path_to_node);\n        assert_eq!(current_path, path_to_node);\n        // Get the length\n        path_to_node.len()\n    };\n    // Index path by current depth;\n    let index = {\n        assert!(\n            depth < path_to_leaf.len(),\n            \"depth must be < {}\",\n            path_to_leaf.len()\n        );\n        path_to_leaf[depth]\n    };\n    // Add node to parents, along with index to modify\n    parents.push((index, new_parent_node));\n    parents\n}\n\n/// Takes paths to a new leaf and an existing leaf that share a common prefix,\n/// along with the parents of the existing leaf. Creates a new node (adding a\n/// possible parent extension for it to parents) which contains the existing\n/// leaf.  Returns the new node and parents, so that they can be used by\n/// [`add_node_to_parents`].\n#[allow(clippy::type_complexity)]\nfn reparent_leaf<K, V>(\n    new_leaf_path: &[u8],\n    existing_leaf_path: &[u8],\n    parents: Parents<K, V>,\n) -> Result<(Trie<K, V>, Parents<K, V>), bytesrepr::Error>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    let mut parents = parents;\n    let (child_index, parent) = parents.pop().expect(\"parents should not be empty\");\n    let pointer_block = match parent {\n        Trie::Node { pointer_block } => pointer_block,\n        _ => panic!(\"A leaf should have a node for its parent\"),\n    };\n    // Get the path that the new leaf and existing leaf share\n    let shared_path = common_prefix(new_leaf_path, existing_leaf_path);\n    // Assemble a new node to hold the existing leaf. The new leaf will\n    // be added later during the add_parent_node and rehash phase.\n    let new_node = {\n        let index = existing_leaf_path[shared_path.len()];\n        let existing_leaf_pointer =\n            pointer_block[<usize>::from(child_index)].expect(\"parent has lost the existing leaf\");\n        Trie::node(&[(index, existing_leaf_pointer)])\n    };\n    // Re-add the parent node to parents\n    parents.push((child_index, Trie::Node { pointer_block }));\n    // Create an affix for a possible extension node\n    let affix = {\n        let parents_path = get_parents_path(&parents);\n        &shared_path[parents_path.len()..]\n    };\n    // If the affix is non-empty, create an extension node and add it\n    // to parents.\n    if !affix.is_empty() {\n        let new_node_hash = new_node.trie_hash()?;\n        let new_extension = Trie::extension(affix.to_vec(), Pointer::NodePointer(new_node_hash));\n        parents.push((child_index, new_extension));\n    }\n    Ok((new_node, parents))\n}\n\nstruct SplitResult<K, V> {\n    new_node: Trie<K, V>,\n    parents: Parents<K, V>,\n    maybe_hashed_child_extension: Option<(Digest, Trie<K, V>)>,\n}\n\n/// Takes a path to a new leaf, an existing extension that leaf collides with,\n/// and the parents of that extension.  Creates a new node and possible parent\n/// and child extensions.  The node pointer contained in the existing extension\n/// is repositioned in the new node or the possible child extension.  The\n/// possible parent extension is added to parents.  Returns the new node,\n/// parents, and the possible child extension (paired with its hash).\n/// The new node and parents can be used by [`add_node_to_parents`], and the\n/// new hashed child extension can be added to the list of new trie elements.\nfn split_extension<K, V>(\n    new_leaf_path: &[u8],\n    existing_extension: Trie<K, V>,\n    mut parents: Parents<K, V>,\n) -> Result<SplitResult<K, V>, bytesrepr::Error>\nwhere\n    K: ToBytes + Clone,\n    V: ToBytes + Clone,\n{\n    // TODO: add is_extension() method to Trie\n    let (affix, pointer) = match existing_extension {\n        Trie::Extension { affix, pointer } => (affix, pointer),\n        _ => panic!(\"existing_extension must be an extension\"),\n    };\n    let parents_path = get_parents_path(&parents);\n    // Get the path to the existing extension node\n    let existing_extension_path: Vec<u8> =\n        parents_path.iter().chain(affix.iter()).cloned().collect();\n    // Get the path that the new leaf and existing leaf share\n    let shared_path = common_prefix(new_leaf_path, &existing_extension_path);\n    // Create an affix for a possible parent extension above the new\n    // node.\n    let parent_extension_affix = shared_path[parents_path.len()..].to_vec();\n    // Create an affix for a possible child extension between the new\n    // node and the node that the existing extension pointed to.\n    let child_extension_affix = affix[parent_extension_affix.len() + 1..].to_vec();\n    // Create a child extension (paired with its hash) if necessary\n    let maybe_hashed_child_extension: Option<(Digest, Trie<K, V>)> =\n        if child_extension_affix.is_empty() {\n            None\n        } else {\n            let child_extension = Trie::extension(child_extension_affix.to_vec(), pointer);\n            let child_extension_hash = child_extension.trie_hash()?;\n            Some((child_extension_hash, child_extension))\n        };\n    // Assemble a new node.\n    let new_node: Trie<K, V> = {\n        let index = existing_extension_path[shared_path.len()];\n        let pointer = maybe_hashed_child_extension\n            .to_owned()\n            .map_or(pointer, |(hash, _)| Pointer::NodePointer(hash));\n        Trie::node(&[(index, pointer)])\n    };\n    // Create a parent extension if necessary\n    if !parent_extension_affix.is_empty() {\n        let new_node_hash = new_node.trie_hash()?;\n        let parent_extension = Trie::extension(\n            parent_extension_affix.to_vec(),\n            Pointer::NodePointer(new_node_hash),\n        );\n        parents.push((parent_extension_affix[0], parent_extension));\n    }\n    Ok(SplitResult {\n        new_node,\n        parents,\n        maybe_hashed_child_extension,\n    })\n}\n\n/// Result of attemptint to write to trie store.\n#[derive(Debug, PartialEq, Eq)]\npub enum WriteResult {\n    /// Record written to trie store.\n    Written(Digest),\n    /// Record already exists in trie store.\n    AlreadyExists,\n    /// Requested global state root hash does not exist in trie store.\n    RootNotFound,\n}\n\n/// Write to trie store.\npub fn write<K, V, T, S, E>(\n    txn: &mut T,\n    store: &S,\n    root: &Digest,\n    key: &K,\n    value: &V,\n) -> Result<WriteResult, E>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq,\n    T: Readable<Handle = S::Handle> + Writable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let store = store_wrappers::NonDeserializingStore::new(store);\n    match store.get_raw(txn, root)? {\n        None => Ok(WriteResult::RootNotFound),\n        Some(current_root_bytes) => {\n            let new_leaf = Trie::Leaf {\n                key: key.to_owned(),\n                value: value.to_owned(),\n            };\n            let path: Vec<u8> = key.to_bytes()?;\n            let TrieScanRaw { tip, parents } =\n                scan_raw::<K, V, T, S, E>(txn, &store, &path, current_root_bytes)?;\n            let new_elements: Vec<(Digest, Trie<K, V>)> = match tip {\n                LazilyDeserializedTrie::Leaf(leaf_bytes) => {\n                    let (existing_leaf_key, existing_value_bytes) =\n                        leaf_bytes.try_deserialize_leaf_key()?;\n\n                    if key != &existing_leaf_key {\n                        // If the \"tip\" is an existing leaf with a different key than\n                        // the new leaf, then we are in a situation where the new leaf\n                        // shares some common prefix with the existing leaf.\n                        let existing_leaf_path = existing_leaf_key.to_bytes()?;\n                        let (new_node, parents) =\n                            reparent_leaf(&path, &existing_leaf_path, parents)?;\n                        let parents = add_node_to_parents(&path, new_node, parents);\n                        rehash(new_leaf, parents)?\n                    } else {\n                        let new_value_bytes = value.to_bytes()?;\n                        if new_value_bytes != existing_value_bytes {\n                            // If the \"tip\" is an existing leaf with the same key as the\n                            // new leaf, but the existing leaf and new leaf have different\n                            // values, then we are in the situation where we are \"updating\"\n                            // an existing leaf.\n                            rehash(new_leaf, parents)?\n                        } else {\n                            // Both key and values are the same.\n                            // If the \"tip\" is the same as the new leaf, then the leaf\n                            // is already in the Trie.\n                            Vec::new()\n                        }\n                    }\n                }\n                // If the \"tip\" is an existing node, then we can add a pointer\n                // to the new leaf to the node's pointer block.\n                node @ LazilyDeserializedTrie::Node { .. } => {\n                    let parents = add_node_to_parents(&path, node.try_into()?, parents);\n                    rehash(new_leaf, parents)?\n                }\n                // If the \"tip\" is an extension node, then we must modify or\n                // replace it, adding a node where necessary.\n                extension @ LazilyDeserializedTrie::Extension { .. } => {\n                    let SplitResult {\n                        new_node,\n                        parents,\n                        maybe_hashed_child_extension,\n                    } = split_extension(&path, extension.try_into()?, parents)?;\n                    let parents = add_node_to_parents(&path, new_node, parents);\n                    if let Some(hashed_extension) = maybe_hashed_child_extension {\n                        let mut ret = vec![hashed_extension];\n                        ret.extend(rehash(new_leaf, parents)?);\n                        ret\n                    } else {\n                        rehash(new_leaf, parents)?\n                    }\n                }\n            };\n            if new_elements.is_empty() {\n                return Ok(WriteResult::AlreadyExists);\n            }\n            let mut root_hash = root.to_owned();\n            for (hash, element) in new_elements.iter() {\n                store.put(txn, hash, element)?;\n                root_hash = *hash;\n            }\n            Ok(WriteResult::Written(root_hash))\n        }\n    }\n}\n\n/// Batch write to trie store.\npub fn batch_write<K, V, I, T, S, E>(\n    txn: &mut T,\n    store: &S,\n    root: &Digest,\n    values: I,\n) -> Result<Digest, E>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq,\n    I: Iterator<Item = (K, V)>,\n    T: Readable<Handle = S::Handle> + Writable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error> + From<TrieStoreCacheError>,\n{\n    let mut cache = TrieCache::<K, V, _>::new::<_, E>(txn, store, root)?;\n\n    for (key, value) in values {\n        cache.insert::<_, E>(key, value, txn)?;\n    }\n    cache.store_cache::<_, E>(txn)\n}\n\n/// Puts a trie pointer block, extension node or leaf into the trie.\npub fn put_trie<K, V, T, S, E>(txn: &mut T, store: &S, trie_bytes: &[u8]) -> Result<Digest, E>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq,\n    T: Readable<Handle = S::Handle> + Writable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let trie_hash = Digest::hash_into_chunks_if_necessary(trie_bytes);\n    store.put_raw(txn, &trie_hash, Cow::from(trie_bytes))?;\n    Ok(trie_hash)\n}\n\nenum KeysIteratorState<K, V, S: TrieStore<K, V>> {\n    /// Iterate normally\n    Ok,\n    /// Return the error and stop iterating\n    #[allow(dead_code)] // Return variant alone is used in testing.\n    ReturnError(S::Error),\n    /// Already failed, only return None\n    Failed,\n}\n\nstruct VisitedTrieNode {\n    trie: LazilyDeserializedTrie,\n    maybe_index: Option<usize>,\n    path: Vec<u8>,\n}\n\n/// Iterator for trie store keys.\npub struct KeysIterator<'a, 'b, K, V, T, S: TrieStore<K, V>> {\n    initial_descend: VecDeque<u8>,\n    visited: Vec<VisitedTrieNode>,\n    store: NonDeserializingStore<'a, K, V, S>,\n    txn: &'b T,\n    state: KeysIteratorState<K, V, S>,\n}\n\nimpl<K, V, T, S> Iterator for KeysIterator<'_, '_, K, V, T, S>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error> + From<bytesrepr::Error>,\n{\n    type Item = Result<K, S::Error>;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        match mem::replace(&mut self.state, KeysIteratorState::Ok) {\n            KeysIteratorState::Ok => (),\n            KeysIteratorState::ReturnError(e) => {\n                self.state = KeysIteratorState::Failed;\n                return Some(Err(e));\n            }\n            KeysIteratorState::Failed => {\n                return None;\n            }\n        }\n        while let Some(VisitedTrieNode {\n            trie,\n            maybe_index,\n            mut path,\n        }) = self.visited.pop()\n        {\n            let mut maybe_next_trie: Option<LazilyDeserializedTrie> = None;\n\n            match trie {\n                LazilyDeserializedTrie::Leaf(leaf_bytes) => {\n                    let leaf_bytes = leaf_bytes.bytes();\n                    if leaf_bytes.is_empty() {\n                        self.state = KeysIteratorState::Failed;\n                        return Some(Err(bytesrepr::Error::Formatting.into()));\n                    }\n\n                    let key_bytes = &leaf_bytes[1..]; // Skip `Trie::Leaf` tag\n                    debug_assert!(\n                        key_bytes.starts_with(&path),\n                        \"Expected key bytes to start with the current path\"\n                    );\n\n                    // only return the leaf if it matches the initial descend path\n                    path.extend(&self.initial_descend);\n                    if key_bytes.starts_with(&path) {\n                        // Only deserializes K when we're absolutely sure the path matches.\n                        let (key, _stored_value): (K, _) = match K::from_bytes(key_bytes) {\n                            Ok(key) => key,\n                            Err(error) => {\n                                self.state = KeysIteratorState::Failed;\n                                return Some(Err(error.into()));\n                            }\n                        };\n                        return Some(Ok(key));\n                    }\n                }\n                LazilyDeserializedTrie::Node { ref pointer_block } => {\n                    // if we are still initially descending (and initial_descend is not empty), take\n                    // the first index we should descend to, otherwise take maybe_index from the\n                    // visited stack\n                    let mut index: usize = self\n                        .initial_descend\n                        .front()\n                        .map(|i| *i as usize)\n                        .or(maybe_index)\n                        .unwrap_or_default();\n                    while index < RADIX {\n                        if let Some(ref pointer) = pointer_block[index] {\n                            maybe_next_trie = {\n                                match self.store.get_raw(self.txn, pointer.hash()) {\n                                    Ok(Some(trie_bytes)) => {\n                                        match bytesrepr::deserialize_from_slice(&trie_bytes) {\n                                            Ok(lazy_trie) => Some(lazy_trie),\n                                            Err(error) => {\n                                                self.state = KeysIteratorState::Failed;\n                                                return Some(Err(error.into()));\n                                            }\n                                        }\n                                    }\n                                    Ok(None) => None,\n                                    Err(error) => {\n                                        self.state = KeysIteratorState::Failed;\n                                        return Some(Err(error));\n                                    }\n                                }\n                            };\n                            debug_assert!(\n                                maybe_next_trie.is_some(),\n                                \"Trie at the pointer is expected to exist\"\n                            );\n                            if self.initial_descend.pop_front().is_none() {\n                                self.visited.push(VisitedTrieNode {\n                                    trie,\n                                    maybe_index: Some(index + 1),\n                                    path: path.clone(),\n                                });\n                            }\n                            path.push(index as u8);\n                            break;\n                        }\n                        // only continue the loop if we are not initially descending;\n                        // if we are descending and we land here, it means that there is no subtrie\n                        // along the descend path and we will return no results\n                        if !self.initial_descend.is_empty() {\n                            break;\n                        }\n                        index += 1;\n                    }\n                }\n                LazilyDeserializedTrie::Extension { affix, pointer } => {\n                    let descend_len = cmp::min(self.initial_descend.len(), affix.len());\n                    let check_prefix = self\n                        .initial_descend\n                        .drain(..descend_len)\n                        .collect::<Vec<_>>();\n                    // if we are initially descending, we only want to continue if the affix\n                    // matches the descend path\n                    // if we are not, the check_prefix will be empty, so we will enter the if\n                    // anyway\n                    if affix.starts_with(&check_prefix) {\n                        maybe_next_trie = match self.store.get_raw(self.txn, pointer.hash()) {\n                            Ok(Some(trie_bytes)) => {\n                                match bytesrepr::deserialize_from_slice(&trie_bytes) {\n                                    Ok(lazy_trie) => Some(lazy_trie),\n                                    Err(error) => {\n                                        self.state = KeysIteratorState::Failed;\n                                        return Some(Err(error.into()));\n                                    }\n                                }\n                            }\n                            Ok(None) => None,\n                            Err(e) => {\n                                self.state = KeysIteratorState::Failed;\n                                return Some(Err(e));\n                            }\n                        };\n                        debug_assert!(\n                            matches!(&maybe_next_trie, Some(LazilyDeserializedTrie::Node { .. }),),\n                            \"Expected a LazilyDeserializedTrie::Node but received {:?}\",\n                            maybe_next_trie\n                        );\n                        path.extend(affix);\n                    }\n                }\n            }\n\n            if let Some(next_trie) = maybe_next_trie {\n                self.visited.push(VisitedTrieNode {\n                    trie: next_trie,\n                    maybe_index: None,\n                    path,\n                });\n            }\n        }\n        None\n    }\n}\n\n/// Returns the iterator over the keys in the subtrie matching `prefix`.\n///\n/// The root should be the apex of the trie.\npub fn keys_with_prefix<'a, 'b, K, V, T, S>(\n    txn: &'b T,\n    store: &'a S,\n    root: &Digest,\n    prefix: &[u8],\n) -> KeysIterator<'a, 'b, K, V, T, S>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n{\n    let store = store_wrappers::NonDeserializingStore::new(store);\n    let (visited, init_state): (Vec<VisitedTrieNode>, _) = match store.get_raw(txn, root) {\n        Ok(None) => (vec![], KeysIteratorState::Ok),\n        Err(e) => (vec![], KeysIteratorState::ReturnError(e)),\n        Ok(Some(current_root_bytes)) => match bytesrepr::deserialize_from_slice(current_root_bytes)\n        {\n            Ok(lazy_trie) => {\n                let visited = vec![VisitedTrieNode {\n                    trie: lazy_trie,\n                    maybe_index: None,\n                    path: vec![],\n                }];\n                let init_state = KeysIteratorState::Ok;\n\n                (visited, init_state)\n            }\n            Err(error) => (vec![], KeysIteratorState::ReturnError(error.into())),\n        },\n    };\n\n    KeysIterator {\n        initial_descend: prefix.iter().cloned().collect(),\n        visited,\n        store,\n        txn,\n        state: init_state,\n    }\n}\n\n/// Returns the iterator over the keys at a given root hash.\n///\n/// The root should be the apex of the trie.\n#[cfg(test)]\npub fn keys<'a, 'b, K, V, T, S>(\n    txn: &'b T,\n    store: &'a S,\n    root: &Digest,\n) -> KeysIterator<'a, 'b, K, V, T, S>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n{\n    keys_with_prefix(txn, store, root, &[])\n}\n\n/// Checks the integrity of the trie store.\n#[cfg(test)]\npub fn check_integrity<K, V, T, S, E>(\n    txn: &T,\n    store: &S,\n    trie_keys_to_visit: Vec<Digest>,\n) -> Result<(), E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + std::fmt::Debug,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    for state_root in &trie_keys_to_visit {\n        match store.get(txn, state_root)? {\n            Some(Trie::Node { .. }) => {}\n            other => panic!(\n                \"Should have a pointer block node as state root but received {:?} instead\",\n                other\n            ),\n        }\n    }\n    let mut trie_keys_to_visit: Vec<(Vec<u8>, Digest)> = trie_keys_to_visit\n        .iter()\n        .map(|blake2b_hash| (Vec::new(), *blake2b_hash))\n        .collect();\n    let mut visited = HashSet::new();\n    while let Some((mut path, trie_key)) = trie_keys_to_visit.pop() {\n        if !visited.insert(trie_key) {\n            continue;\n        }\n        let maybe_retrieved_trie: Option<Trie<K, V>> = store.get(txn, &trie_key)?;\n        if let Some(trie_value) = &maybe_retrieved_trie {\n            let hash_of_trie_value = {\n                let node_bytes = trie_value.to_bytes()?;\n                Digest::hash(&node_bytes)\n            };\n            if trie_key != hash_of_trie_value {\n                panic!(\n                    \"Trie key {:?} has corrupted value {:?} (hash of value is {:?})\",\n                    trie_key, trie_value, hash_of_trie_value\n                );\n            }\n        }\n        match maybe_retrieved_trie {\n            // If we can't find the trie_key; it is missing and we'll return it\n            None => {\n                panic!(\"Missing trie key: {:?}\", trie_key)\n            }\n            // If we could retrieve the node and it is a leaf, the search can move on\n            Some(Trie::Leaf { key, .. }) => {\n                let key_bytes = key.to_bytes()?;\n                if !key_bytes.starts_with(&path) {\n                    panic!(\n                                \"Trie key {:?} belongs to a leaf with a corrupted affix. Key bytes: {:?}, Path: {:?}.\",\n                                trie_key, key_bytes, path\n                            );\n                }\n            }\n            // If we hit a pointer block, queue up all of the nodes it points to\n            Some(Trie::Node { pointer_block }) => {\n                for (byte, pointer) in pointer_block.as_indexed_pointers() {\n                    let mut new_path = path.clone();\n                    new_path.push(byte);\n                    match pointer {\n                        Pointer::LeafPointer(descendant_leaf_trie_key) => {\n                            trie_keys_to_visit.push((new_path, descendant_leaf_trie_key))\n                        }\n                        Pointer::NodePointer(descendant_node_trie_key) => {\n                            trie_keys_to_visit.push((new_path, descendant_node_trie_key))\n                        }\n                    }\n                }\n            }\n            // If we hit an extension block, add its pointer to the queue\n            Some(Trie::Extension { pointer, affix }) => {\n                path.extend_from_slice(affix.as_slice());\n                trie_keys_to_visit.push((path, pointer.into_hash()))\n            }\n        }\n    }\n    Ok(())\n}\n\n/// Recomputes a state root hash from a [`TrieMerkleProof`].\n/// This is done in the following steps:\n///\n/// 1. Using [`TrieMerkleProof::key`] and [`TrieMerkleProof::value`], construct a [`Trie::Leaf`] and\n///    compute a hash for that leaf.\n///\n/// 2. We then iterate over [`TrieMerkleProof::proof_steps`] left to right, using the hash from the\n///    previous step combined with the next step to compute a new hash.\n///\n/// 3. When there are no more steps, we return the final hash we have computed.\n///\n/// The steps in this function reflect `operations::rehash`.\npub fn compute_state_hash<K, V>(proof: &TrieMerkleProof<K, V>) -> Result<Digest, bytesrepr::Error>\nwhere\n    K: ToBytes + Copy + Clone,\n    V: ToBytes + Clone,\n{\n    let mut hash = {\n        let leaf = Trie::leaf(proof.key(), proof.value().to_owned());\n        leaf.trie_hash()?\n    };\n\n    for (proof_step_index, proof_step) in proof.proof_steps().iter().enumerate() {\n        let pointer = if proof_step_index == 0 {\n            Pointer::LeafPointer(hash)\n        } else {\n            Pointer::NodePointer(hash)\n        };\n        let proof_step_bytes = match proof_step {\n            TrieMerkleProofStep::Node {\n                hole_index,\n                indexed_pointers_with_hole,\n            } => {\n                let hole_index = *hole_index;\n                assert!(hole_index as usize <= RADIX, \"hole_index exceeded RADIX\");\n                let mut indexed_pointers = indexed_pointers_with_hole.to_owned();\n                indexed_pointers.push((hole_index, pointer));\n                Trie::<K, V>::node(&indexed_pointers).to_bytes()?\n            }\n            TrieMerkleProofStep::Extension { affix } => {\n                Trie::<K, V>::extension(affix.clone().into(), pointer).to_bytes()?\n            }\n        };\n        hash = Digest::hash(&proof_step_bytes);\n    }\n    Ok(hash)\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/store_wrappers.rs",
    "content": "use std::marker::PhantomData;\n#[cfg(debug_assertions)]\nuse std::{\n    collections::HashSet,\n    sync::{Arc, Mutex},\n};\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest,\n};\n\nuse crate::global_state::{\n    store::Store,\n    transaction_source::{Readable, Writable},\n    trie::Trie,\n    trie_store::TrieStore,\n};\n\n/// A [`TrieStore`] wrapper that panics in debug mode whenever an attempt to deserialize [`V`] is\n/// made, otherwise it behaves as a [`TrieStore`].\n///\n/// To ensure this wrapper has zero overhead, a debug assertion is used.\npub(crate) struct NonDeserializingStore<'a, K, V, S>(&'a S, PhantomData<*const (K, V)>)\nwhere\n    S: TrieStore<K, V>;\n\nimpl<'a, K, V, S> NonDeserializingStore<'a, K, V, S>\nwhere\n    S: TrieStore<K, V>,\n{\n    pub(crate) fn new(store: &'a S) -> Self {\n        Self(store, PhantomData)\n    }\n}\n\nimpl<K, V, S> Store<Digest, Trie<K, V>> for NonDeserializingStore<'_, K, V, S>\nwhere\n    S: TrieStore<K, V>,\n{\n    type Error = S::Error;\n\n    type Handle = S::Handle;\n\n    #[inline]\n    fn handle(&self) -> Self::Handle {\n        self.0.handle()\n    }\n\n    #[inline]\n    fn deserialize_value(&self, bytes: &[u8]) -> Result<Trie<K, V>, bytesrepr::Error>\n    where\n        Trie<K, V>: FromBytes,\n    {\n        #[cfg(debug_assertions)]\n        {\n            let trie: Trie<K, V> = self.0.deserialize_value(bytes)?;\n            if let Trie::Leaf { .. } = trie {\n                panic!(\"Tried to deserialize a value but expected no deserialization to happen.\")\n            }\n            Ok(trie)\n        }\n        #[cfg(not(debug_assertions))]\n        {\n            self.0.deserialize_value(bytes)\n        }\n    }\n\n    #[inline]\n    fn serialize_value(&self, value: &Trie<K, V>) -> Result<Vec<u8>, bytesrepr::Error>\n    where\n        Trie<K, V>: ToBytes,\n    {\n        self.0.serialize_value(value)\n    }\n\n    #[inline]\n    fn get<T>(&self, txn: &T, key: &Digest) -> Result<Option<Trie<K, V>>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Trie<K, V>: FromBytes,\n        Self::Error: From<T::Error>,\n    {\n        self.0.get(txn, key)\n    }\n\n    #[inline]\n    fn get_raw<T>(&self, txn: &T, key: &Digest) -> Result<Option<bytesrepr::Bytes>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        self.0.get_raw(txn, key)\n    }\n\n    #[inline]\n    fn put<T>(&self, txn: &mut T, key: &Digest, value: &Trie<K, V>) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Trie<K, V>: ToBytes,\n        Self::Error: From<T::Error>,\n    {\n        self.0.put(txn, key, value)\n    }\n\n    #[inline]\n    fn put_raw<T>(\n        &self,\n        txn: &mut T,\n        key: &Digest,\n        value_bytes: std::borrow::Cow<'_, [u8]>,\n    ) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        self.0.put_raw(txn, key, value_bytes)\n    }\n}\n\npub(crate) struct OnceDeserializingStore<'a, K: ToBytes, V: ToBytes, S: TrieStore<K, V>> {\n    store: &'a S,\n    #[cfg(debug_assertions)]\n    deserialize_tracking: Arc<Mutex<HashSet<Digest>>>,\n    _marker: PhantomData<*const (K, V)>,\n}\n\nimpl<'a, K, V, S> OnceDeserializingStore<'a, K, V, S>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n    S: TrieStore<K, V>,\n{\n    pub(crate) fn new(store: &'a S) -> Self {\n        Self {\n            store,\n            #[cfg(debug_assertions)]\n            deserialize_tracking: Arc::new(Mutex::new(HashSet::new())),\n            _marker: PhantomData,\n        }\n    }\n}\n\nimpl<K, V, S> Store<Digest, Trie<K, V>> for OnceDeserializingStore<'_, K, V, S>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n    S: TrieStore<K, V>,\n{\n    type Error = S::Error;\n\n    type Handle = S::Handle;\n\n    #[inline]\n    fn handle(&self) -> Self::Handle {\n        self.store.handle()\n    }\n\n    #[inline]\n    fn deserialize_value(&self, bytes: &[u8]) -> Result<Trie<K, V>, bytesrepr::Error>\n    where\n        Trie<K, V>: FromBytes,\n    {\n        #[cfg(debug_assertions)]\n        {\n            let trie: Trie<K, V> = self.store.deserialize_value(bytes)?;\n            if let Trie::Leaf { .. } = trie {\n                let trie_hash = trie.trie_hash()?;\n                let mut tracking = self.deserialize_tracking.lock().expect(\"Poisoned lock\");\n                if tracking.get(&trie_hash).is_some() {\n                    panic!(\"Tried to deserialize a value more than once.\");\n                } else {\n                    tracking.insert(trie_hash);\n                }\n            }\n            Ok(trie)\n        }\n        #[cfg(not(debug_assertions))]\n        {\n            self.store.deserialize_value(bytes)\n        }\n    }\n\n    #[inline]\n    fn serialize_value(&self, value: &Trie<K, V>) -> Result<Vec<u8>, bytesrepr::Error>\n    where\n        Trie<K, V>: ToBytes,\n    {\n        self.store.serialize_value(value)\n    }\n\n    #[inline]\n    fn get<T>(&self, txn: &T, key: &Digest) -> Result<Option<Trie<K, V>>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Trie<K, V>: FromBytes,\n        Self::Error: From<T::Error>,\n    {\n        self.store.get(txn, key)\n    }\n\n    #[inline]\n    fn get_raw<T>(&self, txn: &T, key: &Digest) -> Result<Option<bytesrepr::Bytes>, Self::Error>\n    where\n        T: Readable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        self.store.get_raw(txn, key)\n    }\n\n    #[inline]\n    fn put<T>(&self, txn: &mut T, key: &Digest, value: &Trie<K, V>) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Trie<K, V>: ToBytes,\n        Self::Error: From<T::Error>,\n    {\n        self.store.put(txn, key, value)\n    }\n\n    #[inline]\n    fn put_raw<T>(\n        &self,\n        txn: &mut T,\n        key: &Digest,\n        value_bytes: std::borrow::Cow<'_, [u8]>,\n    ) -> Result<(), Self::Error>\n    where\n        T: Writable<Handle = Self::Handle>,\n        Digest: AsRef<[u8]>,\n        Self::Error: From<T::Error>,\n    {\n        self.store.put_raw(txn, key, value_bytes)\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/bytesrepr_utils.rs",
    "content": "use casper_types::bytesrepr::{self, FromBytes, ToBytes};\n\n#[derive(PartialEq, Eq, Debug, Clone)]\npub(crate) struct PanickingFromBytes<T>(T);\n\nimpl<T> PanickingFromBytes<T> {\n    pub(crate) fn new(inner: T) -> PanickingFromBytes<T> {\n        PanickingFromBytes(inner)\n    }\n}\n\nimpl<T> FromBytes for PanickingFromBytes<T>\nwhere\n    T: FromBytes,\n{\n    fn from_bytes(_: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        unreachable!(\"This type is expected to never deserialize.\");\n    }\n}\n\nimpl<T> ToBytes for PanickingFromBytes<T>\nwhere\n    T: ToBytes,\n{\n    fn into_bytes(self) -> Result<Vec<u8>, bytesrepr::Error>\n    where\n        Self: Sized,\n    {\n        self.0.into_bytes()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/ee_699.rs",
    "content": "use proptest::{arbitrary, array, collection, prop_oneof, strategy::Strategy};\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    gens, Digest, URef,\n};\n\nuse super::{HashedTrie, TestValue};\nuse crate::global_state::trie::Trie;\n\npub const BASIC_LENGTH: usize = 4;\npub const SIMILAR_LENGTH: usize = 4;\npub const FANCY_LENGTH: usize = 5;\npub const LONG_LENGTH: usize = 8;\n\nconst PUBLIC_KEY_BASIC_ID: u8 = 0;\nconst PUBLIC_KEY_SIMILAR_ID: u8 = 1;\nconst PUBLIC_KEY_FANCY_ID: u8 = 2;\nconst PUBLIC_KEY_LONG_ID: u8 = 3;\n\npub const KEY_HASH_LENGTH: usize = 32;\n\nconst KEY_ACCOUNT_ID: u8 = 0;\nconst KEY_HASH_ID: u8 = 1;\nconst KEY_UREF_ID: u8 = 2;\n\nmacro_rules! make_array_newtype {\n    ($name:ident, $ty:ty, $len:expr) => {\n        pub struct $name([$ty; $len]);\n\n        impl $name {\n            pub fn new(source: [$ty; $len]) -> Self {\n                $name(source)\n            }\n\n            pub fn into_inner(self) -> [$ty; $len] {\n                self.0\n            }\n        }\n\n        // impl Clone for $name {\n        //     fn clone(&self) -> $name {\n        //         let &$name(ref dat) = self;\n        //         $name(dat.clone())\n        //     }\n        // }\n\n        impl Clone for $name {\n            fn clone(&self) -> $name {\n                *self\n            }\n        }\n\n        impl Copy for $name {}\n\n        impl PartialEq for $name {\n            fn eq(&self, other: &$name) -> bool {\n                &self[..] == &other[..]\n            }\n        }\n\n        impl Eq for $name {}\n\n        impl PartialOrd for $name {\n            fn partial_cmp(&self, other: &$name) -> Option<core::cmp::Ordering> {\n                Some(self.cmp(other))\n            }\n        }\n\n        impl Ord for $name {\n            fn cmp(&self, other: &$name) -> core::cmp::Ordering {\n                self.0.cmp(&other.0)\n            }\n        }\n\n        impl core::ops::Index<usize> for $name {\n            type Output = $ty;\n\n            fn index(&self, index: usize) -> &$ty {\n                let &$name(ref dat) = self;\n                &dat[index]\n            }\n        }\n\n        impl core::ops::Index<core::ops::Range<usize>> for $name {\n            type Output = [$ty];\n\n            fn index(&self, index: core::ops::Range<usize>) -> &[$ty] {\n                let &$name(ref dat) = self;\n                &dat[index]\n            }\n        }\n\n        impl core::ops::Index<core::ops::RangeTo<usize>> for $name {\n            type Output = [$ty];\n\n            fn index(&self, index: core::ops::RangeTo<usize>) -> &[$ty] {\n                let &$name(ref dat) = self;\n                &dat[index]\n            }\n        }\n\n        impl core::ops::Index<core::ops::RangeFrom<usize>> for $name {\n            type Output = [$ty];\n\n            fn index(&self, index: core::ops::RangeFrom<usize>) -> &[$ty] {\n                let &$name(ref dat) = self;\n                &dat[index]\n            }\n        }\n\n        impl core::ops::Index<core::ops::RangeFull> for $name {\n            type Output = [$ty];\n\n            fn index(&self, _: core::ops::RangeFull) -> &[$ty] {\n                let &$name(ref dat) = self;\n                &dat[..]\n            }\n        }\n\n        impl core::fmt::Debug for $name {\n            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {\n                write!(f, \"{}([\", stringify!($name))?;\n                write!(f, \"{:?}\", self.0[0])?;\n                for item in self.0[1..].iter() {\n                    write!(f, \", {:?}\", item)?;\n                }\n                write!(f, \"])\")\n            }\n        }\n\n        #[allow(unused_qualifications)]\n        impl bytesrepr::ToBytes for $name {\n            fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n                self.0.to_bytes()\n            }\n\n            fn serialized_length(&self) -> usize {\n                self.0.serialized_length()\n            }\n        }\n\n        #[allow(unused_qualifications)]\n        impl bytesrepr::FromBytes for $name {\n            fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n                let (dat, rem) = <[$ty; $len]>::from_bytes(bytes)?;\n                Ok(($name(dat), rem))\n            }\n        }\n    };\n}\n\nmake_array_newtype!(Basic, u8, BASIC_LENGTH);\nmake_array_newtype!(Similar, u8, SIMILAR_LENGTH);\nmake_array_newtype!(Fancy, u8, FANCY_LENGTH);\nmake_array_newtype!(Long, u8, LONG_LENGTH);\n\nmacro_rules! impl_distribution_for_array_newtype {\n    ($name:ident, $ty:ty, $len:expr) => {\n        impl rand::distributions::Distribution<$name> for rand::distributions::Standard {\n            fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> $name {\n                let mut dat = [0u8; $len];\n                rng.fill_bytes(dat.as_mut());\n                $name(dat)\n            }\n        }\n    };\n}\n\nimpl_distribution_for_array_newtype!(Basic, u8, BASIC_LENGTH);\nimpl_distribution_for_array_newtype!(Similar, u8, SIMILAR_LENGTH);\nimpl_distribution_for_array_newtype!(Fancy, u8, FANCY_LENGTH);\nimpl_distribution_for_array_newtype!(Long, u8, LONG_LENGTH);\n\nmacro_rules! make_array_newtype_arb {\n    ($name:ident, $ty:ty, $len:expr, $fn_name:ident) => {\n        fn $fn_name() -> impl Strategy<Value = $name> {\n            collection::vec(arbitrary::any::<$ty>(), $len).prop_map(|values| {\n                let mut dat = [0u8; $len];\n                dat.copy_from_slice(values.as_slice());\n                $name(dat)\n            })\n        }\n    };\n}\n\nmake_array_newtype_arb!(Basic, u8, BASIC_LENGTH, basic_arb);\nmake_array_newtype_arb!(Similar, u8, SIMILAR_LENGTH, similar_arb);\nmake_array_newtype_arb!(Fancy, u8, FANCY_LENGTH, fancy_arb);\nmake_array_newtype_arb!(Long, u8, LONG_LENGTH, long_arb);\n\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\npub enum PublicKey {\n    Basic(Basic),\n    Similar(Similar),\n    Fancy(Fancy),\n    Long(Long),\n}\n\nimpl ToBytes for PublicKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::allocate_buffer(self)?;\n        match self {\n            PublicKey::Basic(key) => {\n                ret.push(PUBLIC_KEY_BASIC_ID);\n                ret.extend(key.to_bytes()?)\n            }\n            PublicKey::Similar(key) => {\n                ret.push(PUBLIC_KEY_SIMILAR_ID);\n                ret.extend(key.to_bytes()?)\n            }\n            PublicKey::Fancy(key) => {\n                ret.push(PUBLIC_KEY_FANCY_ID);\n                ret.extend(key.to_bytes()?)\n            }\n            PublicKey::Long(key) => {\n                ret.push(PUBLIC_KEY_LONG_ID);\n                ret.extend(key.to_bytes()?)\n            }\n        };\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                PublicKey::Basic(key) => key.serialized_length(),\n                PublicKey::Similar(key) => key.serialized_length(),\n                PublicKey::Fancy(key) => key.serialized_length(),\n                PublicKey::Long(key) => key.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for PublicKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (id, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match id {\n            PUBLIC_KEY_BASIC_ID => {\n                let (key, rem): (Basic, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((PublicKey::Basic(key), rem))\n            }\n            PUBLIC_KEY_SIMILAR_ID => {\n                let (key, rem): (Similar, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((PublicKey::Similar(key), rem))\n            }\n            PUBLIC_KEY_FANCY_ID => {\n                let (key, rem): (Fancy, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((PublicKey::Fancy(key), rem))\n            }\n            PUBLIC_KEY_LONG_ID => {\n                let (key, rem): (Long, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((PublicKey::Long(key), rem))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nfn public_key_arb() -> impl Strategy<Value = PublicKey> {\n    prop_oneof![\n        basic_arb().prop_map(PublicKey::Basic),\n        similar_arb().prop_map(PublicKey::Similar),\n        fancy_arb().prop_map(PublicKey::Fancy),\n        long_arb().prop_map(PublicKey::Long)\n    ]\n}\n\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\npub enum TestKey {\n    Account(PublicKey),\n    Hash([u8; KEY_HASH_LENGTH]),\n    URef(URef),\n}\n\nimpl ToBytes for TestKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = Vec::with_capacity(self.serialized_length());\n        match self {\n            TestKey::Account(public_key) => {\n                ret.push(KEY_ACCOUNT_ID);\n                ret.extend(&public_key.to_bytes()?)\n            }\n            TestKey::Hash(hash) => {\n                ret.push(KEY_HASH_ID);\n                ret.extend(&hash.to_bytes()?)\n            }\n            TestKey::URef(uref) => {\n                ret.push(KEY_UREF_ID);\n                ret.extend(&uref.to_bytes()?)\n            }\n        }\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                TestKey::Account(public_key) => public_key.serialized_length(),\n                TestKey::Hash(hash) => hash.serialized_length(),\n                TestKey::URef(uref) => uref.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for TestKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (id, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match id {\n            KEY_ACCOUNT_ID => {\n                let (public_key, rem): (PublicKey, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((TestKey::Account(public_key), rem))\n            }\n            KEY_HASH_ID => {\n                let (hash, rem): ([u8; KEY_HASH_LENGTH], &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((TestKey::Hash(hash), rem))\n            }\n            KEY_UREF_ID => {\n                let (uref, rem): (URef, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((TestKey::URef(uref), rem))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nfn test_key_arb() -> impl Strategy<Value = TestKey> {\n    prop_oneof![\n        public_key_arb().prop_map(TestKey::Account),\n        gens::u8_slice_32().prop_map(TestKey::Hash),\n        gens::uref_arb().prop_map(TestKey::URef),\n    ]\n}\n\n#[allow(clippy::unnecessary_operation)]\nmod basics {\n    use proptest::proptest;\n\n    use super::*;\n\n    #[test]\n    fn random_key_generation_works_as_expected() {\n        use rand::Rng;\n        let mut rng = rand::thread_rng();\n        let a: Basic = rng.gen();\n        let b: Basic = rng.gen();\n        assert_ne!(a, b)\n    }\n\n    proptest! {\n        #[test]\n        fn key_should_roundtrip(key in test_key_arb()) {\n            bytesrepr::test_serialization_roundtrip(&key)\n        }\n    }\n}\n\ntype TestTrie = Trie<TestKey, TestValue>;\n\nconst TEST_LEAVES_LENGTH: usize = 6;\n\n/// Keys have been chosen deliberately and the `create_` functions below depend\n/// on these exact definitions.  Values are arbitrary.\nconst TEST_LEAVES: [TestTrie; TEST_LEAVES_LENGTH] = [\n    Trie::Leaf {\n        key: TestKey::Account(PublicKey::Basic(Basic([0u8, 0, 0, 0]))),\n        value: TestValue(*b\"value0\"),\n    },\n    Trie::Leaf {\n        key: TestKey::Account(PublicKey::Basic(Basic([0u8, 0, 0, 1]))),\n        value: TestValue(*b\"value1\"),\n    },\n    Trie::Leaf {\n        key: TestKey::Account(PublicKey::Similar(Similar([0u8, 0, 0, 1]))),\n        value: TestValue(*b\"value3\"),\n    },\n    Trie::Leaf {\n        key: TestKey::Account(PublicKey::Fancy(Fancy([0u8, 0, 0, 1, 0]))),\n        value: TestValue(*b\"value4\"),\n    },\n    Trie::Leaf {\n        key: TestKey::Account(PublicKey::Long(Long([0u8, 0, 0, 1, 0, 0, 0, 0]))),\n        value: TestValue(*b\"value5\"),\n    },\n    Trie::Leaf {\n        key: TestKey::Hash([0u8; 32]),\n        value: TestValue(*b\"value6\"),\n    },\n];\n\nfn create_0_leaf_trie() -> Result<(Digest, Vec<HashedTrie<TestKey, TestValue>>), bytesrepr::Error> {\n    let root = HashedTrie::new(Trie::node(&[]))?;\n\n    let root_hash: Digest = root.hash;\n\n    let parents: Vec<HashedTrie<TestKey, TestValue>> = vec![root];\n\n    let tries: Vec<HashedTrie<TestKey, TestValue>> = {\n        let mut ret = Vec::new();\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nmod empty_tries {\n    use super::*;\n    use crate::global_state::{\n        error,\n        trie_store::operations::tests::{self, LmdbTestContext},\n    };\n\n    #[test]\n    fn lmdb_writes_to_n_leaf_empty_trie_had_expected_results() {\n        let (root_hash, tries) = create_0_leaf_trie().unwrap();\n        let context = LmdbTestContext::new(&tries).unwrap();\n        let initial_states = vec![root_hash];\n\n        let _states = tests::writes_to_n_leaf_empty_trie_had_expected_results::<\n            _,\n            _,\n            _,\n            _,\n            _,\n            _,\n            error::Error,\n        >(\n            &context.environment,\n            &context.environment,\n            &context.store,\n            &context.store,\n            &initial_states,\n            &TEST_LEAVES,\n        )\n        .unwrap();\n    }\n}\n\nmod proptests {\n    use std::ops::RangeInclusive;\n\n    use proptest::{collection::vec, proptest};\n\n    use super::*;\n    use crate::global_state::{\n        error::{self},\n        trie_store::operations::tests::{self, LmdbTestContext},\n    };\n\n    const DEFAULT_MIN_LENGTH: usize = 0;\n    const DEFAULT_MAX_LENGTH: usize = 100;\n\n    fn get_range() -> RangeInclusive<usize> {\n        let start = option_env!(\"CL_TRIE_TEST_VECTOR_MIN_LENGTH\")\n            .and_then(|s| str::parse::<usize>(s).ok())\n            .unwrap_or(DEFAULT_MIN_LENGTH);\n        let end = option_env!(\"CL_TRIE_TEST_VECTOR_MAX_LENGTH\")\n            .and_then(|s| str::parse::<usize>(s).ok())\n            .unwrap_or(DEFAULT_MAX_LENGTH);\n        RangeInclusive::new(start, end)\n    }\n\n    fn lmdb_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool {\n        let (root_hash, tries) = create_0_leaf_trie().unwrap();\n        let context = LmdbTestContext::new(&tries).unwrap();\n        let mut states_to_check = vec![];\n\n        let root_hashes = tests::write_pairs::<_, _, _, _, error::Error>(\n            &context.environment,\n            &context.store,\n            &root_hash,\n            pairs,\n        )\n        .unwrap();\n\n        states_to_check.extend(root_hashes);\n\n        tests::check_pairs::<_, _, _, _, error::Error>(\n            &context.environment,\n            &context.store,\n            &states_to_check,\n            pairs,\n        )\n        .unwrap()\n    }\n\n    fn test_value_arb() -> impl Strategy<Value = TestValue> {\n        array::uniform6(arbitrary::any::<u8>()).prop_map(TestValue)\n    }\n\n    proptest! {\n        #[test]\n        fn prop_lmdb_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) {\n            assert!(lmdb_roundtrip_succeeds(&inputs));\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/keys.rs",
    "content": "mod partial_tries {\n    use crate::global_state::{\n        transaction_source::{Transaction, TransactionSource},\n        trie::Trie,\n        trie_store::operations::{\n            self,\n            tests::{\n                bytesrepr_utils::PanickingFromBytes, LmdbTestContext, TestKey, TestValue,\n                TEST_LEAVES, TEST_TRIE_GENERATORS,\n            },\n        },\n    };\n\n    #[test]\n    fn lmdb_keys_from_n_leaf_partial_trie_had_expected_results() {\n        for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let test_leaves = TEST_LEAVES;\n            let (used, _) = test_leaves.split_at(num_leaves);\n\n            let expected = {\n                let mut tmp = used\n                    .iter()\n                    .filter_map(Trie::key)\n                    .cloned()\n                    .collect::<Vec<TestKey>>();\n                tmp.sort();\n                tmp\n            };\n            let actual = {\n                let txn = context.environment.create_read_txn().unwrap();\n                let mut tmp = operations::keys::<TestKey, PanickingFromBytes<TestValue>, _, _>(\n                    &txn,\n                    &context.store,\n                    &root_hash,\n                )\n                .filter_map(Result::ok)\n                .collect::<Vec<TestKey>>();\n                txn.commit().unwrap();\n                tmp.sort();\n                tmp\n            };\n            assert_eq!(actual, expected);\n        }\n    }\n}\n\nmod full_tries {\n    use casper_types::Digest;\n\n    use crate::global_state::{\n        transaction_source::{Transaction, TransactionSource},\n        trie::Trie,\n        trie_store::operations::{\n            self,\n            tests::{\n                bytesrepr_utils::PanickingFromBytes, LmdbTestContext, TestKey, TestValue,\n                EMPTY_HASHED_TEST_TRIES, TEST_LEAVES, TEST_TRIE_GENERATORS,\n            },\n        },\n    };\n\n    #[test]\n    fn lmdb_keys_from_n_leaf_full_trie_had_expected_results() {\n        let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap();\n        let mut states: Vec<Digest> = Vec::new();\n\n        for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            context.update(&tries).unwrap();\n            states.push(root_hash);\n\n            for (num_leaves, state) in states[..state_index].iter().enumerate() {\n                let test_leaves = TEST_LEAVES;\n                let (used, _unused) = test_leaves.split_at(num_leaves);\n\n                let expected = {\n                    let mut tmp = used\n                        .iter()\n                        .filter_map(Trie::key)\n                        .cloned()\n                        .collect::<Vec<TestKey>>();\n                    tmp.sort();\n                    tmp\n                };\n                let actual = {\n                    let txn = context.environment.create_read_txn().unwrap();\n                    let mut tmp = operations::keys::<TestKey, PanickingFromBytes<TestValue>, _, _>(\n                        &txn,\n                        &context.store,\n                        state,\n                    )\n                    .filter_map(Result::ok)\n                    .collect::<Vec<TestKey>>();\n                    txn.commit().unwrap();\n                    tmp.sort();\n                    tmp\n                };\n                assert_eq!(actual, expected);\n            }\n        }\n    }\n}\n\n#[cfg(debug_assertions)]\nmod keys_iterator {\n    use casper_types::{bytesrepr, global_state::Pointer, Digest};\n\n    use crate::global_state::{\n        transaction_source::TransactionSource,\n        trie::Trie,\n        trie_store::operations::{\n            self,\n            tests::{\n                bytesrepr_utils::PanickingFromBytes, hash_test_tries, HashedTestTrie, HashedTrie,\n                LmdbTestContext, TestKey, TestValue, TEST_LEAVES,\n            },\n        },\n    };\n\n    fn create_invalid_extension_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n        let leaves = hash_test_tries(&TEST_LEAVES[2..3])?;\n        let ext_1 = HashedTrie::new(Trie::extension(\n            vec![0u8, 0],\n            Pointer::NodePointer(leaves[0].hash),\n        ))?;\n\n        let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_1.hash))]))?;\n        let root_hash = root.hash;\n\n        let tries = vec![root, ext_1, leaves[0].clone()];\n\n        Ok((root_hash, tries))\n    }\n\n    fn create_invalid_path_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n        let leaves = hash_test_tries(&TEST_LEAVES[..1])?;\n\n        let root = HashedTrie::new(Trie::node(&[(1, Pointer::NodePointer(leaves[0].hash))]))?;\n        let root_hash = root.hash;\n\n        let tries = vec![root, leaves[0].clone()];\n\n        Ok((root_hash, tries))\n    }\n\n    fn create_invalid_hash_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n        let leaves = hash_test_tries(&TEST_LEAVES[..2])?;\n\n        let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(leaves[1].hash))]))?;\n        let root_hash = root.hash;\n\n        let tries = vec![root, leaves[0].clone()];\n\n        Ok((root_hash, tries))\n    }\n\n    macro_rules! return_on_err {\n        ($x:expr) => {\n            match $x {\n                Ok(result) => result,\n                Err(_) => {\n                    return; // we expect the test to panic, so this will cause a test failure\n                }\n            }\n        };\n    }\n\n    fn test_trie(root_hash: Digest, tries: Vec<HashedTestTrie>) {\n        let context = return_on_err!(LmdbTestContext::new(&tries));\n        let txn = return_on_err!(context.environment.create_read_txn());\n        let _tmp = operations::keys::<TestKey, PanickingFromBytes<TestValue>, _, _>(\n            &txn,\n            &context.store,\n            &root_hash,\n        )\n        .collect::<Vec<_>>();\n    }\n\n    #[test]\n    #[should_panic = \"Expected a LazilyDeserializedTrie::Node but received\"]\n    fn should_panic_on_leaf_after_extension() {\n        let (root_hash, tries) = return_on_err!(create_invalid_extension_trie());\n        test_trie(root_hash, tries);\n    }\n\n    #[test]\n    #[should_panic = \"Expected key bytes to start with the current path\"]\n    fn should_panic_when_key_not_matching_path() {\n        let (root_hash, tries) = return_on_err!(create_invalid_path_trie());\n        test_trie(root_hash, tries);\n    }\n\n    #[test]\n    #[should_panic = \"Trie at the pointer is expected to exist\"]\n    fn should_panic_on_pointer_to_nonexisting_hash() {\n        let (root_hash, tries) = return_on_err!(create_invalid_hash_trie());\n        test_trie(root_hash, tries);\n    }\n}\n\nmod keys_with_prefix_iterator {\n    use crate::global_state::{\n        transaction_source::TransactionSource,\n        trie::Trie,\n        trie_store::operations::{\n            self,\n            tests::{\n                bytesrepr_utils::PanickingFromBytes, create_6_leaf_trie, LmdbTestContext, TestKey,\n                TestValue, TEST_LEAVES,\n            },\n        },\n    };\n\n    fn expected_keys(prefix: &[u8]) -> Vec<TestKey> {\n        let mut tmp = TEST_LEAVES\n            .iter()\n            .filter_map(Trie::key)\n            .filter(|key| key.0.starts_with(prefix))\n            .cloned()\n            .collect::<Vec<TestKey>>();\n        tmp.sort();\n        tmp\n    }\n\n    fn test_prefix(prefix: &[u8]) {\n        let (root_hash, tries) = create_6_leaf_trie().expect(\"should create a trie\");\n        let context = LmdbTestContext::new(&tries).expect(\"should create a new context\");\n        let txn = context\n            .environment\n            .create_read_txn()\n            .expect(\"should create a read txn\");\n        let expected = expected_keys(prefix);\n        let mut actual =\n            operations::keys_with_prefix::<TestKey, PanickingFromBytes<TestValue>, _, _>(\n                &txn,\n                &context.store,\n                &root_hash,\n                prefix,\n            )\n            .filter_map(Result::ok)\n            .collect::<Vec<_>>();\n        actual.sort();\n        assert_eq!(expected, actual);\n    }\n\n    #[test]\n    fn test_prefixes() {\n        test_prefix(&[]); // 6 leaves\n        test_prefix(&[0]); // 6 leaves\n        test_prefix(&[0, 1]); // 1 leaf\n        test_prefix(&[0, 1, 0]); // 1 leaf\n        test_prefix(&[0, 1, 1]); // 0 leaves\n        test_prefix(&[0, 0]); // 5 leaves\n        test_prefix(&[0, 0, 1]); // 0 leaves\n        test_prefix(&[0, 0, 2]); // 1 leaf\n        test_prefix(&[0, 0, 0, 0]); // 3 leaves, prefix points to an Extension\n        test_prefix(&[0, 0, 0, 0, 0]); // 3 leaves\n        test_prefix(&[0, 0, 0, 0, 0, 0]); // 2 leaves\n        test_prefix(&[0, 0, 0, 0, 0, 0, 1]); // 1 leaf\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/mod.rs",
    "content": "pub(crate) mod bytesrepr_utils;\nmod ee_699;\nmod keys;\nmod proptests;\nmod prune;\nmod read;\nmod scan;\nmod synchronize;\nmod write;\n\nuse std::{convert, ops::Not};\n\nuse lmdb::DatabaseFlags;\nuse tempfile::{tempdir, TempDir};\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    global_state::{Pointer, TrieMerkleProof},\n    Digest,\n};\n\nuse crate::global_state::{\n    error,\n    transaction_source::{lmdb::LmdbEnvironment, Readable, Transaction, TransactionSource},\n    trie::Trie,\n    trie_store::{\n        lmdb::LmdbTrieStore,\n        operations::{self, read, read_with_proof, write, ReadResult, WriteResult},\n        TrieStore,\n    },\n    DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS,\n};\n\nuse super::compute_state_hash;\n\nuse self::bytesrepr_utils::PanickingFromBytes;\n\nconst TEST_KEY_LENGTH: usize = 7;\n\n/// A short key type for tests.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\nstruct TestKey([u8; TEST_KEY_LENGTH]);\n\nimpl ToBytes for TestKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        Ok(self.0.to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        TEST_KEY_LENGTH\n    }\n}\n\nimpl FromBytes for TestKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, rem) = bytes.split_at(TEST_KEY_LENGTH);\n        let mut ret = [0u8; TEST_KEY_LENGTH];\n        ret.copy_from_slice(key);\n        Ok((TestKey(ret), rem))\n    }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]\nenum VariableAddr {\n    Empty,\n    LegacyAddr([u8; TEST_KEY_LENGTH]),\n}\n\npub enum VariableAddrTag {\n    Empty = 0,\n    LegacyTestKey = 1,\n}\n\nimpl ToBytes for VariableAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        1 + match self {\n            Self::Empty => 0,\n            Self::LegacyAddr(_) => TEST_KEY_LENGTH,\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            Self::Empty => writer.push(VariableAddrTag::Empty as u8),\n            Self::LegacyAddr(addr) => {\n                writer.push(VariableAddrTag::LegacyTestKey as u8);\n                writer.extend(addr.to_bytes()?);\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for VariableAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == VariableAddrTag::Empty as u8 => Ok((VariableAddr::Empty, remainder)),\n            tag if tag == VariableAddrTag::LegacyTestKey as u8 => {\n                let (key, rem) = remainder.split_at(TEST_KEY_LENGTH);\n                let mut ret = [0u8; TEST_KEY_LENGTH];\n                ret.copy_from_slice(key);\n                Ok((VariableAddr::LegacyAddr(ret), rem))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]\nenum MultiVariantTestKey {\n    VariableSizedKey(VariableAddr),\n}\n\nconst VARIABLE_SIZE_KEY_TAG: u8 = 1;\n\nimpl ToBytes for MultiVariantTestKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        1 + match self {\n            Self::VariableSizedKey(addr) => addr.serialized_length(),\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            Self::VariableSizedKey(addr) => {\n                writer.push(VARIABLE_SIZE_KEY_TAG);\n                writer.extend(addr.to_bytes()?);\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for MultiVariantTestKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            VARIABLE_SIZE_KEY_TAG => {\n                let (addr, rem) = FromBytes::from_bytes(remainder)?;\n                Ok((MultiVariantTestKey::VariableSizedKey(addr), rem))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nconst TEST_VAL_LENGTH: usize = 6;\n\n/// A short value type for tests.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\nstruct TestValue([u8; TEST_VAL_LENGTH]);\n\nimpl ToBytes for TestValue {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        Ok(self.0.to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        TEST_VAL_LENGTH\n    }\n}\n\nimpl FromBytes for TestValue {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, rem) = bytes.split_at(TEST_VAL_LENGTH);\n        let mut ret = [0u8; TEST_VAL_LENGTH];\n        ret.copy_from_slice(key);\n\n        Ok((TestValue(ret), rem))\n    }\n}\n\ntype TestTrie = Trie<TestKey, TestValue>;\n\ntype HashedTestTrie = HashedTrie<TestKey, TestValue>;\n\n/// A pairing of a trie element and its hash.\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct HashedTrie<K, V> {\n    hash: Digest,\n    trie: Trie<K, V>,\n}\n\nimpl<K: ToBytes, V: ToBytes> HashedTrie<K, V> {\n    pub fn new(trie: Trie<K, V>) -> Result<Self, bytesrepr::Error> {\n        let trie_bytes = trie.to_bytes()?;\n        let hash = Digest::hash(trie_bytes);\n        Ok(HashedTrie { hash, trie })\n    }\n}\n\nconst EMPTY_HASHED_TEST_TRIES: &[HashedTestTrie] = &[];\n\nconst TEST_LEAVES_LENGTH: usize = 6;\n\n/// Keys have been chosen deliberately and the `create_` functions below depend\n/// on these exact definitions.  Values are arbitrary.\nconst TEST_LEAVES: [TestTrie; TEST_LEAVES_LENGTH] = [\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"value0\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 1]),\n        value: TestValue(*b\"value1\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 2, 0, 0, 0]),\n        value: TestValue(*b\"value2\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 255, 0]),\n        value: TestValue(*b\"value3\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 1, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"value4\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 2, 0, 0, 0, 0]),\n        value: TestValue(*b\"value5\"),\n    },\n];\n\nconst TEST_LEAVES_UPDATED: [TestTrie; TEST_LEAVES_LENGTH] = [\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueA\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 1]),\n        value: TestValue(*b\"valueB\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 2, 0, 0, 0]),\n        value: TestValue(*b\"valueC\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 255, 0]),\n        value: TestValue(*b\"valueD\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 1, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueE\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 2, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueF\"),\n    },\n];\n\nconst TEST_LEAVES_NON_COLLIDING: [TestTrie; TEST_LEAVES_LENGTH] = [\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueA\"),\n    },\n    Trie::Leaf {\n        key: TestKey([1u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueB\"),\n    },\n    Trie::Leaf {\n        key: TestKey([2u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueC\"),\n    },\n    Trie::Leaf {\n        key: TestKey([3u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueD\"),\n    },\n    Trie::Leaf {\n        key: TestKey([4u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueE\"),\n    },\n    Trie::Leaf {\n        key: TestKey([5u8, 0, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueF\"),\n    },\n];\n\nconst TEST_LEAVES_ADJACENTS: [TestTrie; TEST_LEAVES_LENGTH] = [\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 2]),\n        value: TestValue(*b\"valueA\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 0, 3]),\n        value: TestValue(*b\"valueB\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 3, 0, 0, 0]),\n        value: TestValue(*b\"valueC\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 0, 0, 0, 1, 0]),\n        value: TestValue(*b\"valueD\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 2, 0, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueE\"),\n    },\n    Trie::Leaf {\n        key: TestKey([0u8, 0, 3, 0, 0, 0, 0]),\n        value: TestValue(*b\"valueF\"),\n    },\n];\n\ntype TrieGenerator<K, V> = fn() -> Result<(Digest, Vec<HashedTrie<K, V>>), bytesrepr::Error>;\n\nconst TEST_TRIE_GENERATORS_LENGTH: usize = 7;\n\nconst TEST_TRIE_GENERATORS: [TrieGenerator<TestKey, TestValue>; TEST_TRIE_GENERATORS_LENGTH] = [\n    create_0_leaf_trie,\n    create_1_leaf_trie,\n    create_2_leaf_trie,\n    create_3_leaf_trie,\n    create_4_leaf_trie,\n    create_5_leaf_trie,\n    create_6_leaf_trie,\n];\n\nfn hash_test_tries(tries: &[TestTrie]) -> Result<Vec<HashedTestTrie>, bytesrepr::Error> {\n    tries\n        .iter()\n        .map(|trie| HashedTestTrie::new(trie.to_owned()))\n        .collect()\n}\n\nfn create_0_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let root = HashedTrie::new(Trie::node(&[]))?;\n\n    let root_hash: Digest = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn create_empty_trie<K, V>() -> Result<(Digest, Vec<HashedTrie<K, V>>), bytesrepr::Error>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    let root_node = HashedTrie::<K, V>::new(Trie::node(&[]))?;\n    let root_hash = root_node.hash;\n    let tries = vec![root_node];\n\n    Ok((root_hash, tries))\n}\n\nfn create_1_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let leaves = hash_test_tries(&TEST_LEAVES[..1])?;\n\n    let root = HashedTrie::new(Trie::node(&[(0, Pointer::LeafPointer(leaves[0].hash))]))?;\n\n    let root_hash: Digest = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(leaves);\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn create_2_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let leaves = hash_test_tries(&TEST_LEAVES[..2])?;\n\n    let node = HashedTrie::new(Trie::node(&[\n        (0, Pointer::LeafPointer(leaves[0].hash)),\n        (1, Pointer::LeafPointer(leaves[1].hash)),\n    ]))?;\n\n    let ext = HashedTrie::new(Trie::extension(\n        vec![0u8, 0, 0, 0, 0],\n        Pointer::NodePointer(node.hash),\n    ))?;\n\n    let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext.hash))]))?;\n\n    let root_hash = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root, ext, node];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(leaves);\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn create_3_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let leaves = hash_test_tries(&TEST_LEAVES[..3])?;\n\n    let node_1 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::LeafPointer(leaves[0].hash)),\n        (1, Pointer::LeafPointer(leaves[1].hash)),\n    ]))?;\n\n    let ext_1 = HashedTrie::new(Trie::extension(\n        vec![0u8, 0],\n        Pointer::NodePointer(node_1.hash),\n    ))?;\n\n    let node_2 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(ext_1.hash)),\n        (2, Pointer::LeafPointer(leaves[2].hash)),\n    ]))?;\n\n    let ext_2 = HashedTrie::new(Trie::extension(\n        vec![0u8, 0],\n        Pointer::NodePointer(node_2.hash),\n    ))?;\n\n    let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_2.hash))]))?;\n\n    let root_hash = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root, ext_2, node_2, ext_1, node_1];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(leaves);\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn create_4_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let leaves = hash_test_tries(&TEST_LEAVES[..4])?;\n\n    let node_1 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::LeafPointer(leaves[0].hash)),\n        (1, Pointer::LeafPointer(leaves[1].hash)),\n    ]))?;\n\n    let node_2 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(node_1.hash)),\n        (255, Pointer::LeafPointer(leaves[3].hash)),\n    ]))?;\n\n    let ext_1 = HashedTrie::new(Trie::extension(\n        vec![0u8],\n        Pointer::NodePointer(node_2.hash),\n    ))?;\n\n    let node_3 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(ext_1.hash)),\n        (2, Pointer::LeafPointer(leaves[2].hash)),\n    ]))?;\n\n    let ext_2 = HashedTrie::new(Trie::extension(\n        vec![0u8, 0],\n        Pointer::NodePointer(node_3.hash),\n    ))?;\n\n    let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_2.hash))]))?;\n\n    let root_hash = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root, ext_2, node_3, ext_1, node_2, node_1];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(leaves);\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn create_5_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let leaves = hash_test_tries(&TEST_LEAVES[..5])?;\n\n    let node_1 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::LeafPointer(leaves[0].hash)),\n        (1, Pointer::LeafPointer(leaves[1].hash)),\n    ]))?;\n\n    let node_2 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(node_1.hash)),\n        (255, Pointer::LeafPointer(leaves[3].hash)),\n    ]))?;\n\n    let ext_1 = HashedTrie::new(Trie::extension(\n        vec![0u8],\n        Pointer::NodePointer(node_2.hash),\n    ))?;\n\n    let node_3 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(ext_1.hash)),\n        (2, Pointer::LeafPointer(leaves[2].hash)),\n    ]))?;\n\n    let ext_2 = HashedTrie::new(Trie::extension(\n        vec![0u8],\n        Pointer::NodePointer(node_3.hash),\n    ))?;\n\n    let node_4 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(ext_2.hash)),\n        (1, Pointer::LeafPointer(leaves[4].hash)),\n    ]))?;\n\n    let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_4.hash))]))?;\n\n    let root_hash = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root, node_4, ext_2, node_3, ext_1, node_2, node_1];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(leaves);\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn create_6_leaf_trie() -> Result<(Digest, Vec<HashedTestTrie>), bytesrepr::Error> {\n    let leaves = hash_test_tries(&TEST_LEAVES)?;\n\n    let node_1 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::LeafPointer(leaves[0].hash)),\n        (1, Pointer::LeafPointer(leaves[1].hash)),\n    ]))?;\n\n    let node_2 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(node_1.hash)),\n        (255, Pointer::LeafPointer(leaves[3].hash)),\n    ]))?;\n\n    let ext = HashedTrie::new(Trie::extension(\n        vec![0u8],\n        Pointer::NodePointer(node_2.hash),\n    ))?;\n\n    let node_3 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(ext.hash)),\n        (2, Pointer::LeafPointer(leaves[2].hash)),\n    ]))?;\n\n    let node_4 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(node_3.hash)),\n        (2, Pointer::LeafPointer(leaves[5].hash)),\n    ]))?;\n\n    let node_5 = HashedTrie::new(Trie::node(&[\n        (0, Pointer::NodePointer(node_4.hash)),\n        (1, Pointer::LeafPointer(leaves[4].hash)),\n    ]))?;\n\n    let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_5.hash))]))?;\n\n    let root_hash = root.hash;\n\n    let parents: Vec<HashedTestTrie> = vec![root, node_5, node_4, node_3, ext, node_2, node_1];\n\n    let tries: Vec<HashedTestTrie> = {\n        let mut ret = Vec::new();\n        ret.extend(leaves);\n        ret.extend(parents);\n        ret\n    };\n\n    Ok((root_hash, tries))\n}\n\nfn put_tries<'a, K, V, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    tries: &[HashedTrie<K, V>],\n) -> Result<(), E>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    if tries.is_empty() {\n        return Ok(());\n    }\n    let mut txn = environment.create_read_write_txn()?;\n    for HashedTrie { hash, trie } in tries.iter() {\n        store.put(&mut txn, hash, trie)?;\n    }\n    txn.commit()?;\n    Ok(())\n}\n\n// A context for holding lmdb-based test resources\nstruct LmdbTestContext {\n    _temp_dir: TempDir,\n    environment: LmdbEnvironment,\n    store: LmdbTrieStore,\n}\n\nimpl LmdbTestContext {\n    fn new<K, V>(tries: &[HashedTrie<K, V>]) -> anyhow::Result<Self>\n    where\n        K: FromBytes + ToBytes,\n        V: FromBytes + ToBytes,\n    {\n        let _temp_dir = tempdir()?;\n        let environment = LmdbEnvironment::new(\n            _temp_dir.path(),\n            DEFAULT_MAX_DB_SIZE,\n            DEFAULT_MAX_READERS,\n            true,\n        )?;\n        let store = LmdbTrieStore::new(&environment, None, DatabaseFlags::empty())?;\n        put_tries::<_, _, _, _, error::Error>(&environment, &store, tries)?;\n        Ok(LmdbTestContext {\n            _temp_dir,\n            environment,\n            store,\n        })\n    }\n\n    fn update<K, V>(&self, tries: &[HashedTrie<K, V>]) -> anyhow::Result<()>\n    where\n        K: ToBytes,\n        V: ToBytes,\n    {\n        put_tries::<_, _, _, _, error::Error>(&self.environment, &self.store, tries)?;\n        Ok(())\n    }\n}\n\nfn check_leaves_exist<K, V, T, S, E>(\n    txn: &T,\n    store: &S,\n    root: &Digest,\n    leaves: &[Trie<K, V>],\n) -> Result<Vec<bool>, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Eq + Copy,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let mut ret = Vec::new();\n\n    for leaf in leaves {\n        if let Trie::Leaf { key, value } = leaf {\n            let maybe_value: ReadResult<V> = read::<_, _, _, _, E>(txn, store, root, key)?;\n            if let ReadResult::Found(value_found) = maybe_value {\n                ret.push(*value == value_found);\n            }\n        } else {\n            panic!(\"leaves should only contain leaves\")\n        }\n    }\n    Ok(ret)\n}\n\n/// For a given vector of leaves check the merkle proofs exist and are correct\nfn check_merkle_proofs<K, V, T, S, E>(\n    txn: &T,\n    store: &S,\n    root: &Digest,\n    leaves: &[Trie<K, V>],\n) -> Result<Vec<bool>, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy,\n    V: ToBytes + FromBytes + Eq + Copy,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let mut ret = Vec::new();\n\n    for leaf in leaves {\n        if let Trie::Leaf { key, value } = leaf {\n            let maybe_proof: ReadResult<TrieMerkleProof<K, V>> =\n                read_with_proof::<_, _, _, _, E>(txn, store, root, key)?;\n            match maybe_proof {\n                ReadResult::Found(proof) => {\n                    let hash = compute_state_hash(&proof)?;\n                    ret.push(hash == *root && proof.value() == value);\n                }\n                ReadResult::NotFound => {\n                    ret.push(false);\n                }\n                ReadResult::RootNotFound => panic!(\"Root not found!\"),\n            };\n        } else {\n            panic!(\"leaves should only contain leaves\")\n        }\n    }\n    Ok(ret)\n}\n\nfn check_keys<K, V, T, S>(txn: &T, store: &S, root: &Digest, leaves: &[Trie<K, V>]) -> bool\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug + Clone + Ord,\n    V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n{\n    let expected = {\n        let mut tmp = leaves\n            .iter()\n            .filter_map(Trie::key)\n            .cloned()\n            .collect::<Vec<K>>();\n        tmp.sort();\n        tmp\n    };\n    let actual = {\n        let mut tmp = operations::keys::<_, _, _, _>(txn, store, root)\n            .filter_map(Result::ok)\n            .collect::<Vec<K>>();\n        tmp.sort();\n        tmp\n    };\n    expected == actual\n}\n\nfn check_leaves<'a, K, V, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    root: &Digest,\n    present: &[Trie<K, V>],\n    absent: &[Trie<K, V>],\n) -> Result<(), E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord,\n    V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    let txn: R::ReadTransaction = environment.create_read_txn()?;\n\n    assert!(\n        check_leaves_exist::<_, _, _, _, E>(&txn, store, root, present)?\n            .into_iter()\n            .all(convert::identity)\n    );\n\n    assert!(\n        check_merkle_proofs::<_, _, _, _, E>(&txn, store, root, present)?\n            .into_iter()\n            .all(convert::identity)\n    );\n\n    assert!(\n        check_leaves_exist::<_, _, _, _, E>(&txn, store, root, absent)?\n            .into_iter()\n            .all(bool::not)\n    );\n\n    assert!(\n        check_merkle_proofs::<_, _, _, _, E>(&txn, store, root, absent)?\n            .into_iter()\n            .all(bool::not)\n    );\n\n    assert!(check_keys::<_, _, _, _>(&txn, store, root, present,));\n\n    txn.commit()?;\n    Ok(())\n}\n\nfn write_leaves<'a, K, V, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    root_hash: &Digest,\n    leaves: &[Trie<K, V>],\n) -> Result<Vec<WriteResult>, E>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, PanickingFromBytes<V>>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    let mut results = Vec::new();\n    if leaves.is_empty() {\n        return Ok(results);\n    }\n    let mut root_hash = root_hash.to_owned();\n    let mut txn: R::ReadWriteTransaction = environment.create_read_write_txn()?;\n\n    for leaf in leaves.iter() {\n        if let Trie::Leaf { key, value } = leaf {\n            let new_value = PanickingFromBytes::new(value.clone());\n            let write_result = write::<K, PanickingFromBytes<V>, _, _, E>(\n                &mut txn, store, &root_hash, key, &new_value,\n            )?;\n            match write_result {\n                WriteResult::Written(hash) => {\n                    root_hash = hash;\n                }\n                WriteResult::AlreadyExists => (),\n                WriteResult::RootNotFound => panic!(\"write_leaves given an invalid root\"),\n            };\n            results.push(write_result);\n        } else {\n            panic!(\"leaves should contain only leaves\");\n        }\n    }\n    txn.commit()?;\n    Ok(results)\n}\n\nfn check_pairs_proofs<'a, K, V, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    root_hashes: &[Digest],\n    pairs: &[(K, V)],\n) -> Result<bool, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord,\n    V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    let txn = environment.create_read_txn()?;\n    for (index, root_hash) in root_hashes.iter().enumerate() {\n        for (key, value) in &pairs[..=index] {\n            let maybe_proof = read_with_proof::<_, _, _, _, E>(&txn, store, root_hash, key)?;\n            match maybe_proof {\n                ReadResult::Found(proof) => {\n                    let hash = compute_state_hash(&proof)?;\n                    if hash != *root_hash || proof.value() != value {\n                        return Ok(false);\n                    }\n                }\n                ReadResult::NotFound => return Ok(false),\n                ReadResult::RootNotFound => panic!(\"Root not found!\"),\n            };\n        }\n    }\n    Ok(true)\n}\n\nfn check_pairs<'a, K, V, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    root_hashes: &[Digest],\n    pairs: &[(K, V)],\n) -> Result<bool, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug + Clone + Ord,\n    V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    let txn: R::ReadTransaction = environment.create_read_txn()?;\n    for (index, root_hash) in root_hashes.iter().enumerate() {\n        for (key, value) in &pairs[..=index] {\n            let result = read::<_, _, _, _, E>(&txn, store, root_hash, key)?;\n            if ReadResult::Found(*value) != result {\n                return Ok(false);\n            }\n        }\n        let expected = {\n            let mut tmp = pairs[..=index]\n                .iter()\n                .map(|(k, _)| k)\n                .cloned()\n                .collect::<Vec<K>>();\n            tmp.sort();\n            tmp\n        };\n        let actual = {\n            let mut tmp = operations::keys::<_, _, _, _>(&txn, store, root_hash)\n                .filter_map(Result::ok)\n                .collect::<Vec<K>>();\n            tmp.sort();\n            tmp\n        };\n        if expected != actual {\n            return Ok(false);\n        }\n    }\n    Ok(true)\n}\n\nfn write_pairs<'a, K, V, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    root_hash: &Digest,\n    pairs: &[(K, V)],\n) -> Result<Vec<Digest>, E>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + Clone + Eq,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, PanickingFromBytes<V>>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    let mut results = Vec::new();\n    if pairs.is_empty() {\n        return Ok(results);\n    }\n    let mut root_hash = root_hash.to_owned();\n    let mut txn = environment.create_read_write_txn()?;\n\n    for (key, value) in pairs.iter() {\n        let new_val = PanickingFromBytes::new(value.clone());\n        match write::<K, PanickingFromBytes<V>, _, _, E>(\n            &mut txn, store, &root_hash, key, &new_val,\n        )? {\n            WriteResult::Written(hash) => {\n                root_hash = hash;\n            }\n            WriteResult::AlreadyExists => (),\n            WriteResult::RootNotFound => panic!(\"write_leaves given an invalid root\"),\n        };\n        results.push(root_hash);\n    }\n    txn.commit()?;\n    Ok(results)\n}\n\nfn writes_to_n_leaf_empty_trie_had_expected_results<'a, K, V, R, WR, S, WS, E>(\n    environment: &'a R,\n    writable_environment: &'a WR,\n    store: &S,\n    writable_store: &WS,\n    states: &[Digest],\n    test_leaves: &[Trie<K, V>],\n) -> Result<Vec<Digest>, E>\nwhere\n    K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug + Copy + Ord,\n    V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug + Copy,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    WR: TransactionSource<'a, Handle = WS::Handle>,\n    S: TrieStore<K, V>,\n    WS: TrieStore<K, PanickingFromBytes<V>>,\n    S::Error: From<R::Error>,\n    WS::Error: From<WR::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error> + From<WR::Error> + From<WS::Error>,\n{\n    let mut states = states.to_vec();\n\n    // Write set of leaves to the trie\n    let hashes = write_leaves::<_, _, _, _, E>(\n        writable_environment,\n        writable_store,\n        states.last().unwrap(),\n        test_leaves,\n    )?\n    .into_iter()\n    .map(|result| match result {\n        WriteResult::Written(root_hash) => root_hash,\n        _ => panic!(\"write_leaves resulted in non-write\"),\n    })\n    .collect::<Vec<Digest>>();\n\n    states.extend(hashes);\n\n    // Check that the expected set of leaves is in the trie at every\n    // state, and that the set of other leaves is not.\n    for (num_leaves, state) in states.iter().enumerate() {\n        let (used, unused) = test_leaves.split_at(num_leaves);\n        check_leaves::<_, _, _, _, E>(environment, store, state, used, unused)?;\n    }\n\n    Ok(states)\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/proptests.rs",
    "content": "use std::ops::RangeInclusive;\n\nuse proptest::{\n    array,\n    collection::vec,\n    prelude::{any, proptest, Strategy},\n};\n\nuse super::*;\n\nconst DEFAULT_MIN_LENGTH: usize = 0;\n\nconst DEFAULT_MAX_LENGTH: usize = 100;\n\nfn get_range() -> RangeInclusive<usize> {\n    let start = option_env!(\"CL_TRIE_TEST_VECTOR_MIN_LENGTH\")\n        .and_then(|s| str::parse::<usize>(s).ok())\n        .unwrap_or(DEFAULT_MIN_LENGTH);\n    let end = option_env!(\"CL_TRIE_TEST_VECTOR_MAX_LENGTH\")\n        .and_then(|s| str::parse::<usize>(s).ok())\n        .unwrap_or(DEFAULT_MAX_LENGTH);\n    RangeInclusive::new(start, end)\n}\n\nfn lmdb_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool {\n    let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap();\n    let context = LmdbTestContext::new(&tries).unwrap();\n    let mut states_to_check = vec![];\n\n    let root_hashes = write_pairs::<_, _, _, _, error::Error>(\n        &context.environment,\n        &context.store,\n        &root_hash,\n        pairs,\n    )\n    .unwrap();\n\n    states_to_check.extend(root_hashes);\n\n    check_pairs::<_, _, _, _, error::Error>(\n        &context.environment,\n        &context.store,\n        &states_to_check,\n        pairs,\n    )\n    .unwrap();\n\n    check_pairs_proofs::<_, _, _, _, error::Error>(\n        &context.environment,\n        &context.store,\n        &states_to_check,\n        pairs,\n    )\n    .unwrap()\n}\n\nfn test_key_arb() -> impl Strategy<Value = TestKey> {\n    array::uniform7(any::<u8>()).prop_map(TestKey)\n}\n\nfn test_value_arb() -> impl Strategy<Value = TestValue> {\n    array::uniform6(any::<u8>()).prop_map(TestValue)\n}\n\nproptest! {\n    #[test]\n    fn prop_lmdb_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) {\n        assert!(lmdb_roundtrip_succeeds(&inputs));\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/prune.rs",
    "content": "use super::*;\nuse crate::global_state::trie_store::operations::TriePruneResult;\n\nfn checked_prune<'a, K, V, R, WR, S, WS, E>(\n    environment: &'a R,\n    write_environment: &'a WR,\n    store: &S,\n    write_store: &WS,\n    root: &Digest,\n    key_to_prune: &K,\n) -> Result<TriePruneResult, E>\nwhere\n    K: ToBytes + FromBytes + Clone + std::fmt::Debug + Eq,\n    V: ToBytes + FromBytes + Clone + std::fmt::Debug,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    WR: TransactionSource<'a, Handle = WS::Handle>,\n    S: TrieStore<K, V>,\n    WS: TrieStore<K, PanickingFromBytes<V>>,\n    S::Error: From<R::Error>,\n    WS::Error: From<WR::Error>,\n    E: From<S::Error> + From<WS::Error> + From<R::Error> + From<WR::Error> + From<bytesrepr::Error>,\n{\n    let mut txn = write_environment.create_read_write_txn()?;\n    let prune_result = operations::prune::<K, PanickingFromBytes<V>, _, WS, E>(\n        &mut txn,\n        write_store,\n        root,\n        key_to_prune,\n    );\n    txn.commit()?;\n    let prune_result = prune_result?;\n    let rtxn = environment.create_read_write_txn()?;\n    if let TriePruneResult::Pruned(new_root) = prune_result {\n        operations::check_integrity::<K, V, _, S, E>(&rtxn, store, vec![new_root])?;\n    }\n    rtxn.commit()?;\n    Ok(prune_result)\n}\n\nmod partial_tries {\n    use super::*;\n    use crate::global_state::trie_store::operations::TriePruneResult;\n\n    #[allow(clippy::too_many_arguments)]\n    fn prune_from_partial_trie_had_expected_results<'a, K, V, R, WR, S, WS, E>(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        write_store: &WS,\n        root: &Digest,\n        key_to_prune: &K,\n        expected_root_after_prune: &Digest,\n        expected_tries_after_prune: &[HashedTrie<K, V>],\n    ) -> Result<(), E>\n    where\n        K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<K, V>,\n        WS: TrieStore<K, PanickingFromBytes<V>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<WR::Error>\n            + From<WS::Error>\n            + From<bytesrepr::Error>,\n    {\n        let rtxn = environment.create_read_txn()?;\n        // The assert below only works with partial tries\n        assert_eq!(store.get(&rtxn, expected_root_after_prune)?, None);\n        rtxn.commit()?;\n        let root_after_prune = match checked_prune::<K, V, _, _, _, _, E>(\n            environment,\n            write_environment,\n            store,\n            write_store,\n            root,\n            key_to_prune,\n        )? {\n            TriePruneResult::Pruned(root_after_prune) => root_after_prune,\n            TriePruneResult::MissingKey => panic!(\"key did not exist\"),\n            TriePruneResult::RootNotFound => panic!(\"root should be found\"),\n            TriePruneResult::Failure(err) => panic!(\"{:?}\", err),\n        };\n        assert_eq!(root_after_prune, *expected_root_after_prune);\n        let rtxn = environment.create_read_txn()?;\n        for HashedTrie { hash, trie } in expected_tries_after_prune {\n            assert_eq!(store.get(&rtxn, hash)?, Some(trie.clone()));\n        }\n        rtxn.commit()?;\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_prune_from_partial_trie_had_expected_results() {\n        for i in 0..TEST_LEAVES_LENGTH {\n            let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i + 1]().unwrap();\n            let (updated_root_hash, updated_tries) = TEST_TRIE_GENERATORS[i]().unwrap();\n            let key_to_prune = &TEST_LEAVES[i];\n            let context = LmdbTestContext::new(&initial_tries).unwrap();\n\n            prune_from_partial_trie_had_expected_results::<\n                TestKey,\n                TestValue,\n                _,\n                _,\n                _,\n                _,\n                error::Error,\n            >(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &initial_root_hash,\n                key_to_prune.key().unwrap(),\n                &updated_root_hash,\n                updated_tries.as_slice(),\n            )\n            .unwrap();\n        }\n    }\n\n    fn prune_non_existent_key_from_partial_trie_should_return_does_not_exist<\n        'a,\n        K,\n        V,\n        R,\n        WR,\n        S,\n        WS,\n        E,\n    >(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        write_store: &WS,\n        root: &Digest,\n        key_to_prune: &K,\n    ) -> Result<(), E>\n    where\n        K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<K, V>,\n        WS: TrieStore<K, PanickingFromBytes<V>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<bytesrepr::Error>\n            + From<WR::Error>\n            + From<WS::Error>,\n    {\n        match checked_prune::<K, _, _, _, _, _, E>(\n            environment,\n            write_environment,\n            store,\n            write_store,\n            root,\n            key_to_prune,\n        )? {\n            TriePruneResult::Pruned(_) => panic!(\"should not prune\"),\n            TriePruneResult::MissingKey => Ok(()),\n            TriePruneResult::RootNotFound => panic!(\"root should be found\"),\n            TriePruneResult::Failure(err) => panic!(\"{:?}\", err),\n        }\n    }\n\n    #[test]\n    fn lmdb_prune_non_existent_key_from_partial_trie_should_return_does_not_exist() {\n        for i in 0..TEST_LEAVES_LENGTH {\n            let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i]().unwrap();\n            let key_to_prune = &TEST_LEAVES_ADJACENTS[i];\n            let context = LmdbTestContext::new(&initial_tries).unwrap();\n\n            prune_non_existent_key_from_partial_trie_should_return_does_not_exist::<\n                TestKey,\n                TestValue,\n                _,\n                _,\n                _,\n                _,\n                error::Error,\n            >(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &initial_root_hash,\n                key_to_prune.key().unwrap(),\n            )\n            .unwrap();\n        }\n    }\n}\n\nmod full_tries {\n    use super::*;\n    use std::ops::RangeInclusive;\n\n    use proptest::{collection, prelude::*};\n\n    use casper_types::{\n        bytesrepr::{self, FromBytes, ToBytes},\n        gens::{colliding_key_arb, stored_value_arb},\n        Digest, Key, StoredValue,\n    };\n\n    use crate::global_state::{\n        error,\n        transaction_source::TransactionSource,\n        trie_store::{\n            operations::{\n                prune,\n                tests::{LmdbTestContext, TestKey, TestValue, TEST_TRIE_GENERATORS},\n                write, TriePruneResult, WriteResult,\n            },\n            TrieStore,\n        },\n    };\n\n    fn serially_insert_and_prune<'a, K, V, R, S, E>(\n        environment: &'a R,\n        store: &S,\n        root: &Digest,\n        pairs: &[(K, V)],\n    ) -> Result<(), E>\n    where\n        K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        R: TransactionSource<'a, Handle = S::Handle>,\n        S: TrieStore<K, PanickingFromBytes<V>>,\n        S::Error: From<R::Error>,\n        E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n    {\n        let mut txn: R::ReadWriteTransaction = environment.create_read_write_txn()?;\n        let mut roots = Vec::new();\n        // Insert the key-value pairs, keeping track of the roots as we go\n        for (key, value) in pairs {\n            let new_value = PanickingFromBytes::new(value.clone());\n            if let WriteResult::Written(new_root) = write::<K, PanickingFromBytes<V>, _, _, E>(\n                &mut txn,\n                store,\n                roots.last().unwrap_or(root),\n                key,\n                &new_value,\n            )? {\n                roots.push(new_root);\n            } else {\n                panic!(\"Could not write pair\")\n            }\n        }\n        // Delete the key-value pairs, checking the resulting roots as we go\n        let mut current_root = roots.pop().unwrap_or_else(|| root.to_owned());\n        for (key, _value) in pairs.iter().rev() {\n            let prune_result =\n                prune::<K, PanickingFromBytes<V>, _, _, E>(&mut txn, store, &current_root, key);\n            if let TriePruneResult::Pruned(new_root) = prune_result? {\n                current_root = roots.pop().unwrap_or_else(|| root.to_owned());\n                assert_eq!(new_root, current_root);\n            } else {\n                panic!(\"Could not prune\")\n            }\n        }\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_serially_insert_and_prune() {\n        let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap();\n        let context = LmdbTestContext::new(&empty_trie).unwrap();\n\n        serially_insert_and_prune::<TestKey, TestValue, _, _, error::Error>(\n            &context.environment,\n            &context.store,\n            &empty_root_hash,\n            &[\n                (TestKey([1u8; 7]), TestValue([1u8; 6])),\n                (TestKey([0u8; 7]), TestValue([0u8; 6])),\n                (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])),\n                (TestKey([2u8; 7]), TestValue([2u8; 6])),\n            ],\n        )\n        .unwrap();\n    }\n\n    const INTERLEAVED_INSERT_AND_PRUNE_TEST_LEAVES_1: [(TestKey, TestValue); 3] = [\n        (TestKey([1u8; 7]), TestValue([1u8; 6])),\n        (TestKey([0u8; 7]), TestValue([0u8; 6])),\n        (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])),\n    ];\n\n    const INTERLEAVED_PRUNE_TEST_KEYS_1: [TestKey; 1] = [TestKey([1u8; 7])];\n\n    fn interleaved_insert_and_prune<'a, K, V, R, S, E>(\n        environment: &'a R,\n        store: &S,\n        root: &Digest,\n        pairs_to_insert: &[(K, V)],\n        keys_to_prune: &[K],\n    ) -> Result<(), E>\n    where\n        K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug,\n        R: TransactionSource<'a, Handle = S::Handle>,\n        S: TrieStore<K, PanickingFromBytes<V>>,\n        S::Error: From<R::Error>,\n        E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n    {\n        let mut txn: R::ReadWriteTransaction = environment.create_read_write_txn()?;\n        let mut expected_root = *root;\n        // Insert the key-value pairs, keeping track of the roots as we go\n        for (key, value) in pairs_to_insert.iter() {\n            let new_value = PanickingFromBytes::new(value.clone());\n            if let WriteResult::Written(new_root) = write::<K, PanickingFromBytes<V>, _, _, E>(\n                &mut txn,\n                store,\n                &expected_root,\n                key,\n                &new_value,\n            )? {\n                expected_root = new_root;\n            } else {\n                panic!(\"Could not write pair\")\n            }\n        }\n        for key in keys_to_prune.iter() {\n            let prune_result =\n                prune::<K, PanickingFromBytes<V>, _, _, E>(&mut txn, store, &expected_root, key);\n            match prune_result? {\n                TriePruneResult::Pruned(new_root) => {\n                    expected_root = new_root;\n                }\n                TriePruneResult::MissingKey => {}\n                TriePruneResult::RootNotFound => panic!(\"should find root\"),\n                TriePruneResult::Failure(err) => panic!(\"{:?}\", err),\n            }\n        }\n\n        let pairs_to_insert_less_pruned: Vec<(K, V)> = pairs_to_insert\n            .iter()\n            .rev()\n            .filter(|&(key, _value)| !keys_to_prune.contains(key))\n            .cloned()\n            .collect();\n\n        let mut actual_root = *root;\n        for (key, value) in pairs_to_insert_less_pruned.iter() {\n            let new_value = PanickingFromBytes::new(value.clone());\n            if let WriteResult::Written(new_root) = write::<K, PanickingFromBytes<V>, _, _, E>(\n                &mut txn,\n                store,\n                &actual_root,\n                key,\n                &new_value,\n            )? {\n                actual_root = new_root;\n            } else {\n                panic!(\"Could not write pair\")\n            }\n        }\n\n        assert_eq!(expected_root, actual_root, \"Expected did not match actual\");\n\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_interleaved_insert_and_prune() {\n        let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap();\n        let context = LmdbTestContext::new(&empty_trie).unwrap();\n\n        interleaved_insert_and_prune::<TestKey, TestValue, _, _, error::Error>(\n            &context.environment,\n            &context.store,\n            &empty_root_hash,\n            &INTERLEAVED_INSERT_AND_PRUNE_TEST_LEAVES_1,\n            &INTERLEAVED_PRUNE_TEST_KEYS_1,\n        )\n        .unwrap();\n    }\n\n    const DEFAULT_MIN_LENGTH: usize = 1;\n\n    const DEFAULT_MAX_LENGTH: usize = 6;\n\n    fn get_range() -> RangeInclusive<usize> {\n        let start = option_env!(\"CL_TRIE_TEST_VECTOR_MIN_LENGTH\")\n            .and_then(|s| str::parse::<usize>(s).ok())\n            .unwrap_or(DEFAULT_MIN_LENGTH);\n        let end = option_env!(\"CL_TRIE_TEST_VECTOR_MAX_LENGTH\")\n            .and_then(|s| str::parse::<usize>(s).ok())\n            .unwrap_or(DEFAULT_MAX_LENGTH);\n        RangeInclusive::new(start, end)\n    }\n\n    proptest! {\n        #[test]\n        fn prop_lmdb_interleaved_insert_and_prune(\n            pairs_to_insert in collection::vec((colliding_key_arb(), stored_value_arb()), get_range())\n        ) {\n            let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap();\n            let context = LmdbTestContext::new(&empty_trie).unwrap();\n\n            let keys_to_prune = {\n                let mut tmp = Vec::new();\n                for i in (0..pairs_to_insert.len()).step_by(2) {\n                    tmp.push(pairs_to_insert[i].0)\n                }\n                tmp\n            };\n\n            interleaved_insert_and_prune::<Key, StoredValue, _, _, error::Error>(\n                &context.environment,\n                &context.store,\n                &empty_root_hash,\n                &pairs_to_insert,\n                &keys_to_prune,\n            )\n            .unwrap();\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/read.rs",
    "content": "//! This module contains tests for [`StateReader::read`].\n//!\n//! Our primary goal here is to test this functionality in isolation.\n//! Therefore, we manually construct test tries from a well-known set of\n//! leaves called [`TEST_LEAVES`](super::TEST_LEAVES), each of which represents a value we are\n//! trying to store in the trie at a given key.\n//!\n//! We use two strategies for testing.  See the [`partial_tries`] and\n//! [`full_tries`] modules for more info.\n\nuse super::*;\nuse crate::global_state::error;\n\nmod partial_tries {\n    //! Here we construct 6 separate \"partial\" tries, increasing in size\n    //! from 0 to 5 leaves.  Each of these tries contains no past history,\n    //! only a single a root to read from.  The tests check that we can read\n    //! only the expected set of leaves from the trie from this single root.\n\n    use super::*;\n\n    #[test]\n    fn lmdb_reads_from_n_leaf_partial_trie_had_expected_results() {\n        for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let test_leaves = TEST_LEAVES;\n            let (used, unused) = test_leaves.split_at(num_leaves);\n\n            check_leaves::<_, _, _, _, error::Error>(\n                &context.environment,\n                &context.store,\n                &root_hash,\n                used,\n                unused,\n            )\n            .unwrap();\n        }\n    }\n}\n\nmod full_tries {\n    //! Here we construct a series of 6 \"full\" tries, increasing in size\n    //! from 0 to 5 leaves.  Each trie contains the history from preceding\n    //! tries in this series, and past history can be read from the roots of\n    //! each preceding trie.  The tests check that we can read only the\n    //! expected set of leaves from the trie at the current root and all past\n    //! roots.\n\n    use super::*;\n\n    #[test]\n    fn lmdb_reads_from_n_leaf_full_trie_had_expected_results() {\n        let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap();\n        let mut states: Vec<Digest> = Vec::new();\n\n        for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            context.update(&tries).unwrap();\n            states.push(root_hash);\n\n            for (num_leaves, state) in states[..state_index].iter().enumerate() {\n                let test_leaves = TEST_LEAVES;\n                let (used, unused) = test_leaves.split_at(num_leaves);\n                check_leaves::<_, _, _, _, error::Error>(\n                    &context.environment,\n                    &context.store,\n                    state,\n                    used,\n                    unused,\n                )\n                .unwrap();\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/scan.rs",
    "content": "use casper_types::Digest;\nuse convert::TryInto;\n\nuse super::*;\nuse crate::global_state::{\n    error,\n    trie::LazilyDeserializedTrie,\n    trie_store::operations::{scan_raw, store_wrappers, TrieScanRaw},\n};\n\nfn check_scan<'a, R, S, E>(\n    environment: &'a R,\n    store: &S,\n    root_hash: &Digest,\n    key: &[u8],\n) -> Result<(), E>\nwhere\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<TestKey, TestValue>,\n    S::Error: From<R::Error> + std::fmt::Debug,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    let txn: R::ReadTransaction = environment.create_read_txn()?;\n    let root = store\n        .get(&txn, root_hash)?\n        .expect(\"check_scan received an invalid root hash\");\n    let root_bytes = root.to_bytes()?;\n    let store = store_wrappers::NonDeserializingStore::new(store);\n    let TrieScanRaw { mut tip, parents } = scan_raw::<TestKey, TestValue, R::ReadTransaction, S, E>(\n        &txn,\n        &store,\n        key,\n        root_bytes.into(),\n    )?;\n\n    for (index, parent) in parents.into_iter().rev() {\n        let expected_tip_hash = {\n            match tip {\n                LazilyDeserializedTrie::Leaf(leaf_bytes) => Digest::hash(leaf_bytes.bytes()),\n                node @ LazilyDeserializedTrie::Node { .. }\n                | node @ LazilyDeserializedTrie::Extension { .. } => {\n                    let tip_bytes = TryInto::<Trie<TestKey, TestValue>>::try_into(node)?\n                        .to_bytes()\n                        .unwrap();\n                    Digest::hash(&tip_bytes)\n                }\n            }\n        };\n        match parent {\n            Trie::Leaf { .. } => panic!(\"parents should not contain any leaves\"),\n            Trie::Node { pointer_block } => {\n                let pointer_tip_hash = pointer_block[<usize>::from(index)].map(|ptr| *ptr.hash());\n                assert_eq!(Some(expected_tip_hash), pointer_tip_hash);\n                tip = LazilyDeserializedTrie::Node { pointer_block };\n            }\n            Trie::Extension { affix, pointer } => {\n                let pointer_tip_hash = pointer.hash().to_owned();\n                assert_eq!(expected_tip_hash, pointer_tip_hash);\n                tip = LazilyDeserializedTrie::Extension { affix, pointer };\n            }\n        }\n    }\n\n    assert!(\n        matches!(\n            tip,\n            LazilyDeserializedTrie::Node { .. } | LazilyDeserializedTrie::Extension { .. },\n        ),\n        \"Unexpected leaf found\"\n    );\n    assert_eq!(root, tip.try_into()?);\n    txn.commit()?;\n    Ok(())\n}\n\nmod partial_tries {\n    use super::*;\n\n    #[test]\n    fn lmdb_scans_from_n_leaf_partial_trie_had_expected_results() {\n        for generator in &TEST_TRIE_GENERATORS {\n            let (root_hash, tries) = generator().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n\n            for leaf in TEST_LEAVES.iter() {\n                let leaf_bytes = leaf.to_bytes().unwrap();\n                check_scan::<_, _, error::Error>(\n                    &context.environment,\n                    &context.store,\n                    &root_hash,\n                    &leaf_bytes,\n                )\n                .unwrap()\n            }\n        }\n    }\n}\n\nmod full_tries {\n    use super::*;\n\n    #[test]\n    fn lmdb_scans_from_n_leaf_full_trie_had_expected_results() {\n        let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap();\n        let mut states: Vec<Digest> = Vec::new();\n\n        for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            context.update(&tries).unwrap();\n            states.push(root_hash);\n\n            for state in &states[..state_index] {\n                for leaf in TEST_LEAVES.iter() {\n                    let leaf_bytes = leaf.to_bytes().unwrap();\n                    check_scan::<_, _, error::Error>(\n                        &context.environment,\n                        &context.store,\n                        state,\n                        &leaf_bytes,\n                    )\n                    .unwrap()\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/synchronize.rs",
    "content": "use std::{borrow::Cow, collections::HashSet};\n\nuse num_traits::FromPrimitive;\n\nuse casper_types::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    global_state::Pointer,\n    Digest,\n};\n\nuse crate::global_state::{\n    error,\n    transaction_source::{Readable, Transaction, TransactionSource},\n    trie::{Trie, TrieTag},\n    trie_store::{\n        operations::{\n            self,\n            tests::{LmdbTestContext, TestKey, TestValue},\n            ReadResult,\n        },\n        TrieStore,\n    },\n};\n\n/// Given a root hash, find any trie keys that are descendant from it that are referenced but not\n/// present in the database.\n// TODO: We only need to check one trie key at a time\nfn missing_trie_keys<K, V, T, S, E>(\n    txn: &T,\n    store: &S,\n    mut trie_keys_to_visit: Vec<Digest>,\n    known_complete: &HashSet<Digest>,\n) -> Result<Vec<Digest>, E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug,\n    V: ToBytes + FromBytes + std::fmt::Debug,\n    T: Readable<Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<T::Error>,\n    E: From<S::Error> + From<bytesrepr::Error>,\n{\n    let mut missing_descendants = Vec::new();\n    let mut visited = HashSet::new();\n    while let Some(trie_key) = trie_keys_to_visit.pop() {\n        if !visited.insert(trie_key) {\n            continue;\n        }\n\n        if known_complete.contains(&trie_key) {\n            // Skip because we know there are no missing descendants.\n            continue;\n        }\n\n        let retrieved_trie_bytes = match store.get_raw(txn, &trie_key)? {\n            Some(bytes) => bytes,\n            None => {\n                // No entry under this trie key.\n                missing_descendants.push(trie_key);\n                continue;\n            }\n        };\n\n        // Optimization: Don't deserialize leaves as they have no descendants.\n        if let Some(TrieTag::Leaf) = retrieved_trie_bytes\n            .first()\n            .copied()\n            .and_then(TrieTag::from_u8)\n        {\n            continue;\n        }\n\n        // Parse the trie, handling errors gracefully.\n        let retrieved_trie = match bytesrepr::deserialize_from_slice(retrieved_trie_bytes) {\n            Ok(retrieved_trie) => retrieved_trie,\n            // Couldn't parse; treat as missing and continue.\n            Err(err) => {\n                tracing::error!(?err, \"unable to parse trie\");\n                missing_descendants.push(trie_key);\n                continue;\n            }\n        };\n\n        match retrieved_trie {\n            // Should be unreachable due to checking the first byte as a shortcut above.\n            Trie::<K, V>::Leaf { .. } => {\n                tracing::error!(\n                    \"did not expect to see a trie leaf in `missing_trie_keys` after shortcut\"\n                );\n            }\n            // If we hit a pointer block, queue up all of the nodes it points to\n            Trie::Node { pointer_block } => {\n                for (_, pointer) in pointer_block.as_indexed_pointers() {\n                    match pointer {\n                        Pointer::LeafPointer(descendant_leaf_trie_key) => {\n                            trie_keys_to_visit.push(descendant_leaf_trie_key)\n                        }\n                        Pointer::NodePointer(descendant_node_trie_key) => {\n                            trie_keys_to_visit.push(descendant_node_trie_key)\n                        }\n                    }\n                }\n            }\n            // If we hit an extension block, add its pointer to the queue\n            Trie::Extension { pointer, .. } => trie_keys_to_visit.push(pointer.into_hash()),\n        }\n    }\n    Ok(missing_descendants)\n}\n\nfn copy_state<'a, K, V, R, S, E>(\n    source_environment: &'a R,\n    source_store: &S,\n    target_environment: &'a R,\n    target_store: &S,\n    root: &Digest,\n) -> Result<(), E>\nwhere\n    K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord,\n    V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy,\n    R: TransactionSource<'a, Handle = S::Handle>,\n    S: TrieStore<K, V>,\n    S::Error: From<R::Error>,\n    E: From<R::Error> + From<S::Error> + From<bytesrepr::Error>,\n{\n    // Make sure no missing nodes in source\n    {\n        let txn: R::ReadTransaction = source_environment.create_read_txn()?;\n        let missing_from_source = missing_trie_keys::<_, _, _, _, E>(\n            &txn,\n            source_store,\n            vec![root.to_owned()],\n            &Default::default(),\n        )?;\n        assert_eq!(missing_from_source, Vec::new());\n        txn.commit()?;\n    }\n\n    // Copy source to target\n    {\n        let source_txn: R::ReadTransaction = source_environment.create_read_txn()?;\n        let mut target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?;\n        // Copy source to destination\n        let mut queue = vec![root.to_owned()];\n        while let Some(trie_key) = queue.pop() {\n            let trie_bytes_to_insert = source_store\n                .get_raw(&source_txn, &trie_key)?\n                .expect(\"should have trie\");\n            target_store.put_raw(\n                &mut target_txn,\n                &trie_key,\n                Cow::from(&*trie_bytes_to_insert),\n            )?;\n\n            // Now that we've added in `trie_to_insert`, queue up its children\n            let new_keys = missing_trie_keys::<_, _, _, _, E>(\n                &target_txn,\n                target_store,\n                vec![trie_key],\n                &Default::default(),\n            )?;\n\n            queue.extend(new_keys);\n        }\n        source_txn.commit()?;\n        target_txn.commit()?;\n    }\n\n    // After the copying process above there should be no missing entries in the target\n    {\n        let target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?;\n        let missing_from_target = missing_trie_keys::<_, _, _, _, E>(\n            &target_txn,\n            target_store,\n            vec![root.to_owned()],\n            &Default::default(),\n        )?;\n        assert_eq!(missing_from_target, Vec::new());\n        target_txn.commit()?;\n    }\n\n    // Make sure all of the target keys under the root hash are in the source\n    {\n        let source_txn: R::ReadTransaction = source_environment.create_read_txn()?;\n        let target_txn: R::ReadTransaction = target_environment.create_read_txn()?;\n        let target_keys = operations::keys::<_, _, _, _>(&target_txn, target_store, root)\n            .collect::<Result<Vec<K>, S::Error>>()?;\n        for key in target_keys {\n            let maybe_value: ReadResult<V> =\n                operations::read::<_, _, _, _, E>(&source_txn, source_store, root, &key)?;\n            assert!(maybe_value.is_found())\n        }\n        source_txn.commit()?;\n        target_txn.commit()?;\n    }\n\n    // Make sure all of the target keys under the root hash are in the source\n    {\n        let source_txn: R::ReadTransaction = source_environment.create_read_txn()?;\n        let target_txn: R::ReadTransaction = target_environment.create_read_txn()?;\n        let source_keys = operations::keys::<_, _, _, _>(&source_txn, source_store, root)\n            .collect::<Result<Vec<K>, S::Error>>()?;\n        for key in source_keys {\n            let maybe_value: ReadResult<V> =\n                operations::read::<_, _, _, _, E>(&target_txn, target_store, root, &key)?;\n            assert!(maybe_value.is_found())\n        }\n        source_txn.commit()?;\n        target_txn.commit()?;\n    }\n\n    Ok(())\n}\n\n#[test]\nfn lmdb_copy_state() {\n    let (root_hash, tries) = super::create_6_leaf_trie().unwrap();\n    let source = LmdbTestContext::new(&tries).unwrap();\n    let target = LmdbTestContext::new::<TestKey, TestValue>(&[]).unwrap();\n\n    copy_state::<TestKey, TestValue, _, _, error::Error>(\n        &source.environment,\n        &source.store,\n        &target.environment,\n        &target.store,\n        &root_hash,\n    )\n    .unwrap();\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/operations/tests/write.rs",
    "content": "use super::*;\n\nmod empty_tries {\n    use super::*;\n\n    #[test]\n    fn lmdb_non_colliding_writes_to_n_leaf_empty_trie_had_expected_results() {\n        for num_leaves in 1..=TEST_LEAVES_LENGTH {\n            let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let initial_states = vec![root_hash];\n\n            writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, _, _, error::Error>(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &initial_states,\n                &TEST_LEAVES_NON_COLLIDING[..num_leaves],\n            )\n            .unwrap();\n        }\n    }\n\n    #[test]\n    fn lmdb_writes_to_n_leaf_empty_trie_had_expected_results() {\n        for num_leaves in 1..=TEST_LEAVES_LENGTH {\n            let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let initial_states = vec![root_hash];\n\n            writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, _, _, error::Error>(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &initial_states,\n                &TEST_LEAVES[..num_leaves],\n            )\n            .unwrap();\n        }\n    }\n}\n\nmod partial_tries {\n    use super::*;\n\n    fn noop_writes_to_n_leaf_partial_trie_had_expected_results<'a, R, WR, S, WS, E>(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        writable_store: &WS,\n        states: &[Digest],\n        num_leaves: usize,\n    ) -> Result<(), E>\n    where\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<TestKey, TestValue>,\n        WS: TrieStore<TestKey, PanickingFromBytes<TestValue>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<bytesrepr::Error>\n            + From<WR::Error>\n            + From<WS::Error>,\n    {\n        // Check that the expected set of leaves is in the trie\n        check_leaves::<_, _, _, _, E>(\n            environment,\n            store,\n            &states[0],\n            &TEST_LEAVES[..num_leaves],\n            &[],\n        )?;\n\n        // Rewrite that set of leaves\n        let write_results = write_leaves::<TestKey, _, WR, WS, E>(\n            write_environment,\n            writable_store,\n            &states[0],\n            &TEST_LEAVES[..num_leaves],\n        )?;\n\n        assert!(write_results\n            .iter()\n            .all(|result| *result == WriteResult::AlreadyExists));\n\n        // Check that the expected set of leaves is in the trie\n        check_leaves::<_, _, _, _, E>(\n            environment,\n            store,\n            &states[0],\n            &TEST_LEAVES[..num_leaves],\n            &[],\n        )\n    }\n\n    #[test]\n    fn lmdb_noop_writes_to_n_leaf_partial_trie_had_expected_results() {\n        for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let states = vec![root_hash];\n\n            noop_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, _, _, error::Error>(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &states,\n                num_leaves,\n            )\n            .unwrap();\n        }\n    }\n\n    fn update_writes_to_n_leaf_partial_trie_had_expected_results<'a, R, WR, S, WS, E>(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        writable_store: &WS,\n        states: &[Digest],\n        num_leaves: usize,\n    ) -> Result<(), E>\n    where\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<TestKey, TestValue>,\n        WS: TrieStore<TestKey, PanickingFromBytes<TestValue>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<bytesrepr::Error>\n            + From<WR::Error>\n            + From<WS::Error>,\n    {\n        let mut states = states.to_owned();\n\n        // Check that the expected set of leaves is in the trie\n        check_leaves::<_, _, _, _, E>(\n            environment,\n            store,\n            &states[0],\n            &TEST_LEAVES[..num_leaves],\n            &[],\n        )?;\n\n        // Update and check leaves\n        for (n, leaf) in TEST_LEAVES_UPDATED[..num_leaves].iter().enumerate() {\n            let expected_leaves: Vec<TestTrie> = {\n                let n = n + 1;\n                TEST_LEAVES_UPDATED[..n]\n                    .iter()\n                    .chain(&TEST_LEAVES[n..num_leaves])\n                    .map(ToOwned::to_owned)\n                    .collect()\n            };\n\n            let root_hash = {\n                let current_root = states.last().unwrap();\n                let results = write_leaves::<_, _, _, _, E>(\n                    write_environment,\n                    writable_store,\n                    current_root,\n                    &[leaf.to_owned()],\n                )?;\n                assert_eq!(1, results.len());\n                match results[0] {\n                    WriteResult::Written(root_hash) => root_hash,\n                    _ => panic!(\"value not written\"),\n                }\n            };\n\n            states.push(root_hash);\n\n            // Check that the expected set of leaves is in the trie\n            check_leaves::<_, _, _, _, E>(\n                environment,\n                store,\n                states.last().unwrap(),\n                &expected_leaves,\n                &[],\n            )?;\n        }\n\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_update_writes_to_n_leaf_partial_trie_had_expected_results() {\n        for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let initial_states = vec![root_hash];\n\n            update_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, _, _, error::Error>(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &initial_states,\n                num_leaves,\n            )\n            .unwrap()\n        }\n    }\n}\n\nmod full_tries {\n    use super::*;\n\n    fn noop_writes_to_n_leaf_full_trie_had_expected_results<'a, R, WR, S, WS, E>(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        write_store: &WS,\n        states: &[Digest],\n        index: usize,\n    ) -> Result<(), E>\n    where\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<TestKey, TestValue>,\n        WS: TrieStore<TestKey, PanickingFromBytes<TestValue>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<bytesrepr::Error>\n            + From<WR::Error>\n            + From<WS::Error>,\n    {\n        // Check that the expected set of leaves is in the trie at every state reference\n        for (num_leaves, state) in states[..index].iter().enumerate() {\n            check_leaves::<_, _, _, _, E>(\n                environment,\n                store,\n                state,\n                &TEST_LEAVES[..num_leaves],\n                &[],\n            )?;\n        }\n\n        // Rewrite that set of leaves\n        let write_results = write_leaves::<_, _, _, _, E>(\n            write_environment,\n            write_store,\n            states.last().unwrap(),\n            &TEST_LEAVES[..index],\n        )?;\n\n        assert!(write_results\n            .iter()\n            .all(|result| *result == WriteResult::AlreadyExists));\n\n        // Check that the expected set of leaves is in the trie at every state reference\n        for (num_leaves, state) in states[..index].iter().enumerate() {\n            check_leaves::<_, _, _, _, E>(\n                environment,\n                store,\n                state,\n                &TEST_LEAVES[..num_leaves],\n                &[],\n            )?\n        }\n\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_noop_writes_to_n_leaf_full_trie_had_expected_results() {\n        let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap();\n        let mut states: Vec<Digest> = Vec::new();\n\n        for (index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            context.update(&tries).unwrap();\n            states.push(root_hash);\n\n            noop_writes_to_n_leaf_full_trie_had_expected_results::<_, _, _, _, error::Error>(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &states,\n                index,\n            )\n            .unwrap();\n        }\n    }\n\n    fn update_writes_to_n_leaf_full_trie_had_expected_results<'a, R, WR, S, WS, E>(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        write_store: &WS,\n        states: &[Digest],\n        num_leaves: usize,\n    ) -> Result<(), E>\n    where\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<TestKey, TestValue>,\n        WS: TrieStore<TestKey, PanickingFromBytes<TestValue>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<bytesrepr::Error>\n            + From<WR::Error>\n            + From<WS::Error>,\n    {\n        let mut states = states.to_vec();\n\n        // Check that the expected set of leaves is in the trie at every state reference\n        for (state_index, state) in states.iter().enumerate() {\n            check_leaves::<_, _, _, _, E>(\n                environment,\n                store,\n                state,\n                &TEST_LEAVES[..state_index],\n                &[],\n            )?;\n        }\n\n        // Write set of leaves to the trie\n        let hashes = write_leaves::<_, _, _, _, E>(\n            write_environment,\n            write_store,\n            states.last().unwrap(),\n            &TEST_LEAVES_UPDATED[..num_leaves],\n        )?\n        .iter()\n        .map(|result| match result {\n            WriteResult::Written(root_hash) => *root_hash,\n            _ => panic!(\"write_leaves resulted in non-write\"),\n        })\n        .collect::<Vec<Digest>>();\n\n        states.extend(hashes);\n\n        let expected: Vec<Vec<TestTrie>> = {\n            let mut ret = vec![vec![]];\n            if num_leaves > 0 {\n                for i in 1..=num_leaves {\n                    ret.push(TEST_LEAVES[..i].to_vec())\n                }\n                for i in 1..=num_leaves {\n                    ret.push(\n                        TEST_LEAVES[i..num_leaves]\n                            .iter()\n                            .chain(&TEST_LEAVES_UPDATED[..i])\n                            .map(ToOwned::to_owned)\n                            .collect::<Vec<TestTrie>>(),\n                    )\n                }\n            }\n            ret\n        };\n\n        assert_eq!(states.len(), expected.len());\n\n        // Check that the expected set of leaves is in the trie at every state reference\n        for (state_index, state) in states.iter().enumerate() {\n            check_leaves::<_, _, _, _, E>(environment, store, state, &expected[state_index], &[])?;\n        }\n\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_update_writes_to_n_leaf_full_trie_had_expected_results() {\n        let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap();\n        let mut states: Vec<Digest> = Vec::new();\n\n        for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() {\n            let (root_hash, tries) = generator().unwrap();\n            context.update(&tries).unwrap();\n            states.push(root_hash);\n\n            update_writes_to_n_leaf_full_trie_had_expected_results::<_, _, _, _, error::Error>(\n                &context.environment,\n                &context.environment,\n                &context.store,\n                &context.store,\n                &states,\n                num_leaves,\n            )\n            .unwrap()\n        }\n    }\n\n    fn node_writes_to_5_leaf_full_trie_had_expected_results<'a, R, WR, S, WS, E>(\n        environment: &'a R,\n        write_environment: &'a WR,\n        store: &S,\n        write_store: &WS,\n        states: &[Digest],\n    ) -> Result<(), E>\n    where\n        R: TransactionSource<'a, Handle = S::Handle>,\n        WR: TransactionSource<'a, Handle = WS::Handle>,\n        S: TrieStore<TestKey, TestValue>,\n        WS: TrieStore<TestKey, PanickingFromBytes<TestValue>>,\n        S::Error: From<R::Error>,\n        WS::Error: From<WR::Error>,\n        E: From<R::Error>\n            + From<S::Error>\n            + From<bytesrepr::Error>\n            + From<WR::Error>\n            + From<WS::Error>,\n    {\n        let mut states = states.to_vec();\n        let num_leaves = TEST_LEAVES_LENGTH;\n\n        // Check that the expected set of leaves is in the trie at every state reference\n        for (state_index, state) in states.iter().enumerate() {\n            check_leaves::<_, _, _, _, E>(\n                environment,\n                store,\n                state,\n                &TEST_LEAVES[..state_index],\n                &[],\n            )?;\n        }\n\n        // Write set of leaves to the trie\n        let hashes = write_leaves::<_, _, _, _, E>(\n            write_environment,\n            write_store,\n            states.last().unwrap(),\n            &TEST_LEAVES_ADJACENTS,\n        )?\n        .iter()\n        .map(|result| match result {\n            WriteResult::Written(root_hash) => *root_hash,\n            _ => panic!(\"write_leaves resulted in non-write\"),\n        })\n        .collect::<Vec<Digest>>();\n\n        states.extend(hashes);\n\n        let expected: Vec<Vec<TestTrie>> = {\n            let mut ret = vec![vec![]];\n            if num_leaves > 0 {\n                for i in 1..=num_leaves {\n                    ret.push(TEST_LEAVES[..i].to_vec())\n                }\n                for i in 1..=num_leaves {\n                    ret.push(\n                        TEST_LEAVES\n                            .iter()\n                            .chain(&TEST_LEAVES_ADJACENTS[..i])\n                            .map(ToOwned::to_owned)\n                            .collect::<Vec<TestTrie>>(),\n                    )\n                }\n            }\n            ret\n        };\n\n        assert_eq!(states.len(), expected.len());\n\n        // Check that the expected set of leaves is in the trie at every state reference\n        for (state_index, state) in states.iter().enumerate() {\n            check_leaves::<_, _, _, _, E>(environment, store, state, &expected[state_index], &[])?;\n        }\n        Ok(())\n    }\n\n    #[test]\n    fn lmdb_node_writes_to_5_leaf_full_trie_had_expected_results() {\n        let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap();\n        let mut states: Vec<Digest> = Vec::new();\n\n        for generator in &TEST_TRIE_GENERATORS {\n            let (root_hash, tries) = generator().unwrap();\n            context.update(&tries).unwrap();\n            states.push(root_hash);\n        }\n\n        node_writes_to_5_leaf_full_trie_had_expected_results::<_, _, _, _, error::Error>(\n            &context.environment,\n            &context.environment,\n            &context.store,\n            &context.store,\n            &states,\n        )\n        .unwrap()\n    }\n}\n\nmod variable_sized_keys {\n    use super::*;\n\n    fn assert_write_result(result: WriteResult) -> Option<Digest> {\n        match result {\n            WriteResult::Written(root_hash) => Some(root_hash),\n            WriteResult::AlreadyExists => None,\n            WriteResult::RootNotFound => panic!(\"Root not found while attempting write\"),\n        }\n    }\n\n    #[test]\n    fn write_variable_len_keys() {\n        let (root_hash, tries) = create_empty_trie::<MultiVariantTestKey, u32>().unwrap();\n\n        let context = LmdbTestContext::new(&tries).unwrap();\n        let mut txn = context.environment.create_read_write_txn().unwrap();\n\n        let test_key_1 =\n            MultiVariantTestKey::VariableSizedKey(VariableAddr::LegacyAddr(*b\"caab6ff\"));\n        let root_hash = assert_write_result(\n            write::<MultiVariantTestKey, _, _, _, error::Error>(\n                &mut txn,\n                &context.store,\n                &root_hash,\n                &test_key_1,\n                &1u32,\n            )\n            .unwrap(),\n        )\n        .expect(\"Expected new root hash after write\");\n\n        let test_key_2 =\n            MultiVariantTestKey::VariableSizedKey(VariableAddr::LegacyAddr(*b\"caabb74\"));\n        let root_hash = assert_write_result(\n            write::<MultiVariantTestKey, _, _, _, error::Error>(\n                &mut txn,\n                &context.store,\n                &root_hash,\n                &test_key_2,\n                &2u32,\n            )\n            .unwrap(),\n        )\n        .expect(\"Expected new root hash after write\");\n\n        let test_key_3 = MultiVariantTestKey::VariableSizedKey(VariableAddr::Empty);\n        let _ = assert_write_result(\n            write::<MultiVariantTestKey, _, _, _, error::Error>(\n                &mut txn,\n                &context.store,\n                &root_hash,\n                &test_key_3,\n                &3u32,\n            )\n            .unwrap(),\n        )\n        .expect(\"Expected new root hash after write\");\n    }\n}\n\nmod batch_write_with_random_keys {\n    use crate::global_state::trie_store::cache::TrieCache;\n\n    use super::*;\n\n    use casper_types::{testing::TestRng, Key};\n    use rand::Rng;\n\n    #[test]\n    fn compare_random_keys_seq_write_with_batch_cache_write() {\n        let mut rng = TestRng::new();\n\n        for _ in 0..100 {\n            let (mut seq_write_root_hash, tries) = create_empty_trie::<Key, u32>().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let mut txn = context.environment.create_read_write_txn().unwrap();\n\n            // Create some random keys and values.\n            let data: Vec<(Key, u32)> = (0u32..4000).map(|val| (rng.gen(), val)).collect();\n\n            // Write all the keys sequentially to the store\n            for (key, value) in data.iter() {\n                let write_result = write::<Key, u32, _, _, error::Error>(\n                    &mut txn,\n                    &context.store,\n                    &seq_write_root_hash,\n                    key,\n                    value,\n                )\n                .unwrap();\n                match write_result {\n                    WriteResult::Written(hash) => {\n                        seq_write_root_hash = hash; // Update the state root hash; we'll use it to\n                                                    // compare with the cache root hash.\n                    }\n                    WriteResult::AlreadyExists => (),\n                    WriteResult::RootNotFound => panic!(\"write_leaves given an invalid root\"),\n                };\n            }\n\n            // Create an empty store that backs up the cache.\n            let (cache_root_hash, tries) = create_empty_trie::<Key, u32>().unwrap();\n            let context = LmdbTestContext::new(&tries).unwrap();\n            let mut txn = context.environment.create_read_write_txn().unwrap();\n\n            let mut trie_cache = TrieCache::<Key, u32, _>::new::<_, error::Error>(\n                &txn,\n                &context.store,\n                &cache_root_hash,\n            )\n            .unwrap();\n            for (key, value) in data.iter() {\n                trie_cache\n                    .insert::<_, error::Error>(*key, *value, &txn)\n                    .unwrap();\n            }\n\n            let cache_root_hash = trie_cache.store_cache::<_, error::Error>(&mut txn).unwrap();\n\n            if seq_write_root_hash != cache_root_hash {\n                println!(\"Root Hash is: {:?}\", seq_write_root_hash);\n                println!(\"Cache root Hash is: {:?}\", cache_root_hash);\n                println!(\"Faulty keys: \");\n\n                for (key, _) in data.iter() {\n                    println!(\"{}\", key.to_formatted_string());\n                }\n                panic!(\"ROOT hash mismatch\");\n            }\n        }\n    }\n\n    #[test]\n    fn compare_random_keys_write_with_cache_and_readback() {\n        let mut rng = TestRng::new();\n\n        // create a store\n        let (mut root_hash, tries) = create_empty_trie::<Key, u32>().unwrap();\n        let context = LmdbTestContext::new(&tries).unwrap();\n        let mut txn = context.environment.create_read_write_txn().unwrap();\n\n        // Create initial keys and values.\n        let initial_keys: Vec<(Key, u32)> = (0u32..1000).map(|val| (rng.gen(), val)).collect();\n\n        // Store these keys and values using sequential write;\n        for (key, value) in initial_keys.iter() {\n            let write_result = write::<Key, u32, _, _, error::Error>(\n                &mut txn,\n                &context.store,\n                &root_hash,\n                key,\n                value,\n            )\n            .unwrap();\n            match write_result {\n                WriteResult::Written(hash) => {\n                    root_hash = hash;\n                }\n                WriteResult::AlreadyExists => (),\n                WriteResult::RootNotFound => panic!(\"write_leaves given an invalid root\"),\n            };\n        }\n\n        // Create some test data.\n        let data: Vec<(Key, u32)> = (0u32..1000).map(|val| (rng.gen(), val)).collect();\n\n        // Create a cache backed up by the store that has the initial data.\n        let mut trie_cache =\n            TrieCache::<Key, u32, _>::new::<_, error::Error>(&txn, &context.store, &root_hash)\n                .unwrap();\n\n        // Insert the test data into the cache.\n        for (key, value) in data.iter() {\n            trie_cache\n                .insert::<_, error::Error>(*key, *value, &txn)\n                .unwrap();\n        }\n\n        // Get the generated root hash\n        let cache_root_hash = trie_cache.calculate_root_hash();\n\n        // now write the same keys to the store one by one and check if we get the same root hash.\n        let mut seq_write_root_hash = root_hash;\n        for (key, value) in data.iter() {\n            let write_result = write::<Key, u32, _, _, error::Error>(\n                &mut txn,\n                &context.store,\n                &seq_write_root_hash,\n                key,\n                value,\n            )\n            .unwrap();\n            match write_result {\n                WriteResult::Written(hash) => {\n                    seq_write_root_hash = hash;\n                }\n                WriteResult::AlreadyExists => (),\n                WriteResult::RootNotFound => panic!(\"write_leaves given an invalid root\"),\n            };\n        }\n\n        assert_eq!(cache_root_hash, seq_write_root_hash);\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/tests/concurrent.rs",
    "content": "use std::{\n    sync::{Arc, Barrier},\n    thread,\n};\n\nuse casper_types::bytesrepr::Bytes;\nuse tempfile::tempdir;\n\nuse super::TestData;\nuse crate::global_state::{\n    store::Store,\n    transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource},\n    trie::Trie,\n    trie_store::lmdb::LmdbTrieStore,\n    DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS,\n};\n\n#[test]\nfn lmdb_writer_mutex_does_not_collide_with_readers() {\n    let dir = tempdir().unwrap();\n    let env = Arc::new(\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(),\n    );\n    let store = Arc::new(LmdbTrieStore::new(&env, None, Default::default()).unwrap());\n    let num_threads = 10;\n    let barrier = Arc::new(Barrier::new(num_threads + 1));\n    let mut handles = Vec::new();\n    let TestData(ref leaf_1_hash, ref leaf_1) = &super::create_data()[0..1][0];\n\n    for _ in 0..num_threads {\n        let reader_env = env.clone();\n        let reader_store = store.clone();\n        let reader_barrier = barrier.clone();\n        let leaf_1_hash = *leaf_1_hash;\n        #[allow(clippy::clone_on_copy)]\n        let leaf_1 = leaf_1.clone();\n\n        handles.push(thread::spawn(move || {\n            {\n                let txn = reader_env.create_read_txn().unwrap();\n                let result: Option<Trie<Bytes, Bytes>> =\n                    reader_store.get(&txn, &leaf_1_hash).unwrap();\n                assert_eq!(result, None);\n                txn.commit().unwrap();\n            }\n            // wait for other reader threads to read and the main thread to\n            // take a read-write transaction\n            reader_barrier.wait();\n            // wait for main thread to put and commit\n            reader_barrier.wait();\n            {\n                let txn = reader_env.create_read_txn().unwrap();\n                let result: Option<Trie<Bytes, Bytes>> =\n                    reader_store.get(&txn, &leaf_1_hash).unwrap();\n                txn.commit().unwrap();\n                result.unwrap() == leaf_1\n            }\n        }));\n    }\n\n    let mut txn = env.create_read_write_txn().unwrap();\n    // wait for reader threads to read\n    barrier.wait();\n    store.put(&mut txn, leaf_1_hash, leaf_1).unwrap();\n    txn.commit().unwrap();\n    // sync with reader threads\n    barrier.wait();\n\n    assert!(handles.into_iter().all(|b| b.join().unwrap()))\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/tests/mod.rs",
    "content": "mod concurrent;\nmod proptests;\nmod simple;\n\nuse casper_types::{\n    bytesrepr::{Bytes, ToBytes},\n    global_state::Pointer,\n    Digest,\n};\n\nuse crate::global_state::trie::{PointerBlock, Trie};\n\n#[derive(Clone)]\nstruct TestData<K, V>(Digest, Trie<K, V>);\n\nimpl<'a, K, V> From<&'a TestData<K, V>> for (&'a Digest, &'a Trie<K, V>) {\n    fn from(test_data: &'a TestData<K, V>) -> Self {\n        (&test_data.0, &test_data.1)\n    }\n}\n\nfn create_data() -> Vec<TestData<Bytes, Bytes>> {\n    let leaf_1 = Trie::Leaf {\n        key: Bytes::from(vec![0u8, 0, 0]),\n        value: Bytes::from(b\"val_1\".to_vec()),\n    };\n    let leaf_2 = Trie::Leaf {\n        key: Bytes::from(vec![1u8, 0, 0]),\n        value: Bytes::from(b\"val_2\".to_vec()),\n    };\n    let leaf_3 = Trie::Leaf {\n        key: Bytes::from(vec![1u8, 0, 1]),\n        value: Bytes::from(b\"val_3\".to_vec()),\n    };\n\n    let leaf_1_hash = Digest::hash(leaf_1.to_bytes().unwrap());\n    let leaf_2_hash = Digest::hash(leaf_2.to_bytes().unwrap());\n    let leaf_3_hash = Digest::hash(leaf_3.to_bytes().unwrap());\n\n    let node_2: Trie<Bytes, Bytes> = {\n        let mut pointer_block = PointerBlock::new();\n        pointer_block[0] = Some(Pointer::LeafPointer(leaf_2_hash));\n        pointer_block[1] = Some(Pointer::LeafPointer(leaf_3_hash));\n        let pointer_block = Box::new(pointer_block);\n        Trie::Node { pointer_block }\n    };\n\n    let node_2_hash = Digest::hash(node_2.to_bytes().unwrap());\n\n    let ext_node: Trie<Bytes, Bytes> = {\n        let affix = vec![1u8, 0];\n        let pointer = Pointer::NodePointer(node_2_hash);\n        Trie::extension(affix, pointer)\n    };\n\n    let ext_node_hash = Digest::hash(ext_node.to_bytes().unwrap());\n\n    let node_1: Trie<Bytes, Bytes> = {\n        let mut pointer_block = PointerBlock::new();\n        pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash));\n        pointer_block[1] = Some(Pointer::NodePointer(ext_node_hash));\n        let pointer_block = Box::new(pointer_block);\n        Trie::Node { pointer_block }\n    };\n\n    let node_1_hash = Digest::hash(node_1.to_bytes().unwrap());\n\n    vec![\n        TestData(leaf_1_hash, leaf_1),\n        TestData(leaf_2_hash, leaf_2),\n        TestData(leaf_3_hash, leaf_3),\n        TestData(node_1_hash, node_1),\n        TestData(node_2_hash, node_2),\n        TestData(ext_node_hash, ext_node),\n    ]\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/tests/proptests.rs",
    "content": "use std::{collections::BTreeMap, ops::RangeInclusive};\n\nuse lmdb::DatabaseFlags;\nuse proptest::{collection::vec, prelude::proptest};\nuse tempfile::tempdir;\n\nuse casper_types::{bytesrepr::ToBytes, Digest, Key, StoredValue};\n\nuse crate::global_state::{\n    store::tests as store_tests,\n    trie::{\n        gens::{trie_extension_arb, trie_leaf_arb, trie_node_arb},\n        Trie,\n    },\n    DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS,\n};\n\nconst DEFAULT_MIN_LENGTH: usize = 1;\nconst DEFAULT_MAX_LENGTH: usize = 4;\n\nfn get_range() -> RangeInclusive<usize> {\n    let start = option_env!(\"CL_TRIE_STORE_TEST_VECTOR_MIN_LENGTH\")\n        .and_then(|s| str::parse::<usize>(s).ok())\n        .unwrap_or(DEFAULT_MIN_LENGTH);\n    let end = option_env!(\"CL_TRIE_STORE_TEST_VECTOR_MAX_LENGTH\")\n        .and_then(|s| str::parse::<usize>(s).ok())\n        .unwrap_or(DEFAULT_MAX_LENGTH);\n    RangeInclusive::new(start, end)\n}\n\nfn lmdb_roundtrip_succeeds(inputs: Vec<Trie<Key, StoredValue>>) -> bool {\n    use crate::global_state::{\n        transaction_source::lmdb::LmdbEnvironment, trie_store::lmdb::LmdbTrieStore,\n    };\n\n    let tmp_dir = tempdir().unwrap();\n    let env = LmdbEnvironment::new(\n        tmp_dir.path(),\n        DEFAULT_MAX_DB_SIZE,\n        DEFAULT_MAX_READERS,\n        true,\n    )\n    .unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n\n    let inputs: BTreeMap<Digest, Trie<Key, StoredValue>> = inputs\n        .into_iter()\n        .map(|trie| (Digest::hash(trie.to_bytes().unwrap()), trie))\n        .collect();\n\n    let ret = store_tests::roundtrip_succeeds(&env, &store, inputs).unwrap();\n    tmp_dir.close().unwrap();\n    ret\n}\n\nproptest! {\n    #[test]\n    fn prop_lmdb_roundtrip_succeeds_leaf(v in vec(trie_leaf_arb(), get_range())) {\n        assert!(lmdb_roundtrip_succeeds(v))\n    }\n\n    #[test]\n    fn prop_lmdb_roundtrip_succeeds_node(v in vec(trie_node_arb(), get_range())) {\n        assert!(lmdb_roundtrip_succeeds(v))\n    }\n\n    #[test]\n    fn prop_lmdb_roundtrip_succeeds_extension(v in vec(trie_extension_arb(), get_range())) {\n        assert!(lmdb_roundtrip_succeeds(v))\n    }\n}\n"
  },
  {
    "path": "storage/src/global_state/trie_store/tests/simple.rs",
    "content": "use lmdb::DatabaseFlags;\nuse tempfile::tempdir;\n\nuse casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n\nuse super::TestData;\nuse crate::global_state::{\n    error,\n    store::StoreExt,\n    transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource},\n    trie::Trie,\n    trie_store::{lmdb::LmdbTrieStore, TrieStore},\n    DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS,\n};\n\nfn put_succeeds<'a, K, V, S, X, E>(\n    store: &S,\n    transaction_source: &'a X,\n    items: &[TestData<K, V>],\n) -> Result<(), E>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n    S: TrieStore<K, V>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error>,\n{\n    let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?;\n    let items = items.iter().map(Into::into);\n    store.put_many(&mut txn, items)?;\n    txn.commit()?;\n    Ok(())\n}\n\n#[test]\nfn lmdb_put_succeeds() {\n    let tmp_dir = tempdir().unwrap();\n    let env = LmdbEnvironment::new(\n        tmp_dir.path(),\n        DEFAULT_MAX_DB_SIZE,\n        DEFAULT_MAX_READERS,\n        true,\n    )\n    .unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n    let data = &super::create_data()[0..1];\n\n    assert!(put_succeeds::<_, _, _, _, error::Error>(&store, &env, data).is_ok());\n\n    tmp_dir.close().unwrap();\n}\n\nfn put_get_succeeds<'a, K, V, S, X, E>(\n    store: &S,\n    transaction_source: &'a X,\n    items: &[TestData<K, V>],\n) -> Result<Vec<Option<Trie<K, V>>>, E>\nwhere\n    K: ToBytes + FromBytes,\n    V: ToBytes + FromBytes,\n    S: TrieStore<K, V>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error>,\n{\n    let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?;\n    let items = items.iter().map(Into::into);\n    store.put_many(&mut txn, items.clone())?;\n    let keys = items.map(|(k, _)| k);\n    let ret = store.get_many(&txn, keys)?;\n    txn.commit()?;\n    Ok(ret)\n}\n\n#[test]\nfn lmdb_put_get_succeeds() {\n    let tmp_dir = tempdir().unwrap();\n    let env = LmdbEnvironment::new(\n        tmp_dir.path(),\n        DEFAULT_MAX_DB_SIZE,\n        DEFAULT_MAX_READERS,\n        true,\n    )\n    .unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n    let data = &super::create_data()[0..1];\n\n    let expected: Vec<Trie<Bytes, Bytes>> = data.iter().cloned().map(|TestData(_, v)| v).collect();\n\n    assert_eq!(\n        expected,\n        put_get_succeeds::<_, _, _, _, error::Error>(&store, &env, data)\n            .expect(\"put_get_succeeds failed\")\n            .into_iter()\n            .collect::<Option<Vec<Trie<Bytes, Bytes>>>>()\n            .expect(\"one of the outputs was empty\")\n    );\n\n    tmp_dir.close().unwrap();\n}\n\n#[test]\nfn lmdb_put_get_many_succeeds() {\n    let tmp_dir = tempdir().unwrap();\n    let env = LmdbEnvironment::new(\n        tmp_dir.path(),\n        DEFAULT_MAX_DB_SIZE,\n        DEFAULT_MAX_READERS,\n        true,\n    )\n    .unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n    let data = super::create_data();\n\n    let expected: Vec<Trie<Bytes, Bytes>> = data.iter().cloned().map(|TestData(_, v)| v).collect();\n\n    assert_eq!(\n        expected,\n        put_get_succeeds::<_, _, _, _, error::Error>(&store, &env, &data)\n            .expect(\"put_get failed\")\n            .into_iter()\n            .collect::<Option<Vec<Trie<Bytes, Bytes>>>>()\n            .expect(\"one of the outputs was empty\")\n    );\n\n    tmp_dir.close().unwrap();\n}\n\nfn uncommitted_read_write_txn_does_not_persist<'a, K, V, S, X, E>(\n    store: &S,\n    transaction_source: &'a X,\n    items: &[TestData<K, V>],\n) -> Result<Vec<Option<Trie<K, V>>>, E>\nwhere\n    K: ToBytes + FromBytes,\n    V: ToBytes + FromBytes,\n    S: TrieStore<K, V>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error>,\n{\n    {\n        let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?;\n        let items = items.iter().map(Into::into);\n        store.put_many(&mut txn, items)?;\n    }\n    {\n        let txn: X::ReadTransaction = transaction_source.create_read_txn()?;\n        let keys = items.iter().map(|TestData(k, _)| k);\n        let ret = store.get_many(&txn, keys)?;\n        txn.commit()?;\n        Ok(ret)\n    }\n}\n\n#[test]\nfn lmdb_uncommitted_read_write_txn_does_not_persist() {\n    let tmp_dir = tempdir().unwrap();\n    let env = LmdbEnvironment::new(\n        tmp_dir.path(),\n        DEFAULT_MAX_DB_SIZE,\n        DEFAULT_MAX_READERS,\n        true,\n    )\n    .unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n    let data = super::create_data();\n\n    assert_eq!(\n        None,\n        uncommitted_read_write_txn_does_not_persist::<_, _, _, _, error::Error>(\n            &store, &env, &data,\n        )\n        .expect(\"uncommitted_read_write_txn_does_not_persist failed\")\n        .into_iter()\n        .collect::<Option<Vec<Trie<Bytes, Bytes>>>>()\n    );\n\n    tmp_dir.close().unwrap();\n}\n\nfn read_write_transaction_does_not_block_read_transaction<'a, X, E>(\n    transaction_source: &'a X,\n) -> Result<(), E>\nwhere\n    X: TransactionSource<'a>,\n    E: From<X::Error>,\n{\n    let read_write_txn = transaction_source.create_read_write_txn()?;\n    let read_txn = transaction_source.create_read_txn()?;\n    read_write_txn.commit()?;\n    read_txn.commit()?;\n    Ok(())\n}\n\n#[test]\nfn lmdb_read_write_transaction_does_not_block_read_transaction() {\n    let dir = tempdir().unwrap();\n    let env =\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap();\n\n    assert!(read_write_transaction_does_not_block_read_transaction::<_, error::Error>(&env).is_ok())\n}\n\nfn reads_are_isolated<'a, S, X, E>(store: &S, env: &'a X) -> Result<(), E>\nwhere\n    S: TrieStore<Bytes, Bytes>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error> + From<bytesrepr::Error>,\n{\n    let TestData(leaf_1_hash, leaf_1) = &super::create_data()[0..1][0];\n\n    {\n        let read_txn_1 = env.create_read_txn()?;\n        let result = store.get(&read_txn_1, leaf_1_hash)?;\n        assert_eq!(result, None);\n\n        {\n            let mut write_txn = env.create_read_write_txn()?;\n            store.put(&mut write_txn, leaf_1_hash, leaf_1)?;\n            write_txn.commit()?;\n        }\n\n        let result = store.get(&read_txn_1, leaf_1_hash)?;\n        read_txn_1.commit()?;\n        assert_eq!(result, None);\n    }\n\n    {\n        let read_txn_2 = env.create_read_txn()?;\n        let result = store.get(&read_txn_2, leaf_1_hash)?;\n        read_txn_2.commit()?;\n        assert_eq!(result, Some(leaf_1.to_owned()));\n    }\n\n    Ok(())\n}\n\n#[test]\nfn lmdb_reads_are_isolated() {\n    let dir = tempdir().unwrap();\n    let env =\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n\n    assert!(reads_are_isolated::<_, _, error::Error>(&store, &env).is_ok())\n}\n\nfn reads_are_isolated_2<'a, S, X, E>(store: &S, env: &'a X) -> Result<(), E>\nwhere\n    S: TrieStore<Bytes, Bytes>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error> + From<bytesrepr::Error>,\n{\n    let data = super::create_data();\n    let TestData(ref leaf_1_hash, ref leaf_1) = data[0];\n    let TestData(ref leaf_2_hash, ref leaf_2) = data[1];\n\n    {\n        let mut write_txn = env.create_read_write_txn()?;\n        store.put(&mut write_txn, leaf_1_hash, leaf_1)?;\n        write_txn.commit()?;\n    }\n\n    {\n        let read_txn_1 = env.create_read_txn()?;\n        {\n            let mut write_txn = env.create_read_write_txn()?;\n            store.put(&mut write_txn, leaf_2_hash, leaf_2)?;\n            write_txn.commit()?;\n        }\n        let result = store.get(&read_txn_1, leaf_1_hash)?;\n        read_txn_1.commit()?;\n        assert_eq!(result, Some(leaf_1.to_owned()));\n    }\n\n    {\n        let read_txn_2 = env.create_read_txn()?;\n        let result = store.get(&read_txn_2, leaf_2_hash)?;\n        read_txn_2.commit()?;\n        assert_eq!(result, Some(leaf_2.to_owned()));\n    }\n\n    Ok(())\n}\n\n#[test]\nfn lmdb_reads_are_isolated_2() {\n    let dir = tempdir().unwrap();\n    let env =\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap();\n    let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();\n\n    assert!(reads_are_isolated_2::<_, _, error::Error>(&store, &env).is_ok())\n}\n\nfn dbs_are_isolated<'a, S, X, E>(env: &'a X, store_a: &S, store_b: &S) -> Result<(), E>\nwhere\n    S: TrieStore<Bytes, Bytes>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error> + From<bytesrepr::Error>,\n{\n    let data = super::create_data();\n    let TestData(ref leaf_1_hash, ref leaf_1) = data[0];\n    let TestData(ref leaf_2_hash, ref leaf_2) = data[1];\n\n    {\n        let mut write_txn = env.create_read_write_txn()?;\n        store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?;\n        write_txn.commit()?;\n    }\n\n    {\n        let mut write_txn = env.create_read_write_txn()?;\n        store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?;\n        write_txn.commit()?;\n    }\n\n    {\n        let read_txn = env.create_read_txn()?;\n        let result = store_a.get(&read_txn, leaf_1_hash)?;\n        assert_eq!(result, Some(leaf_1.to_owned()));\n        let result = store_a.get(&read_txn, leaf_2_hash)?;\n        assert_eq!(result, None);\n        read_txn.commit()?;\n    }\n\n    {\n        let read_txn = env.create_read_txn()?;\n        let result = store_b.get(&read_txn, leaf_1_hash)?;\n        assert_eq!(result, None);\n        let result = store_b.get(&read_txn, leaf_2_hash)?;\n        assert_eq!(result, Some(leaf_2.to_owned()));\n        read_txn.commit()?;\n    }\n\n    Ok(())\n}\n\n#[test]\nfn lmdb_dbs_are_isolated() {\n    let dir = tempdir().unwrap();\n    let env =\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap();\n    let store_a = LmdbTrieStore::new(&env, Some(\"a\"), DatabaseFlags::empty()).unwrap();\n    let store_b = LmdbTrieStore::new(&env, Some(\"b\"), DatabaseFlags::empty()).unwrap();\n\n    assert!(dbs_are_isolated::<_, _, error::Error>(&env, &store_a, &store_b).is_ok())\n}\n\nfn transactions_can_be_used_across_sub_databases<'a, S, X, E>(\n    env: &'a X,\n    store_a: &S,\n    store_b: &S,\n) -> Result<(), E>\nwhere\n    S: TrieStore<Bytes, Bytes>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error> + From<bytesrepr::Error>,\n{\n    let data = super::create_data();\n    let TestData(ref leaf_1_hash, ref leaf_1) = data[0];\n    let TestData(ref leaf_2_hash, ref leaf_2) = data[1];\n\n    {\n        let mut write_txn = env.create_read_write_txn()?;\n        store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?;\n        store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?;\n        write_txn.commit()?;\n    }\n\n    {\n        let read_txn = env.create_read_txn()?;\n        let result = store_a.get(&read_txn, leaf_1_hash)?;\n        assert_eq!(result, Some(leaf_1.to_owned()));\n        let result = store_b.get(&read_txn, leaf_2_hash)?;\n        assert_eq!(result, Some(leaf_2.to_owned()));\n        read_txn.commit()?;\n    }\n\n    Ok(())\n}\n\n#[test]\nfn lmdb_transactions_can_be_used_across_sub_databases() {\n    let dir = tempdir().unwrap();\n    let env =\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap();\n    let store_a = LmdbTrieStore::new(&env, Some(\"a\"), DatabaseFlags::empty()).unwrap();\n    let store_b = LmdbTrieStore::new(&env, Some(\"b\"), DatabaseFlags::empty()).unwrap();\n\n    assert!(\n        transactions_can_be_used_across_sub_databases::<_, _, error::Error>(\n            &env, &store_a, &store_b,\n        )\n        .is_ok()\n    )\n}\n\nfn uncommitted_transactions_across_sub_databases_do_not_persist<'a, S, X, E>(\n    env: &'a X,\n    store_a: &S,\n    store_b: &S,\n) -> Result<(), E>\nwhere\n    S: TrieStore<Bytes, Bytes>,\n    X: TransactionSource<'a, Handle = S::Handle>,\n    S::Error: From<X::Error>,\n    E: From<S::Error> + From<X::Error> + From<bytesrepr::Error>,\n{\n    let data = super::create_data();\n    let TestData(ref leaf_1_hash, ref leaf_1) = data[0];\n    let TestData(ref leaf_2_hash, ref leaf_2) = data[1];\n\n    {\n        let mut write_txn = env.create_read_write_txn()?;\n        store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?;\n        store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?;\n    }\n\n    {\n        let read_txn = env.create_read_txn()?;\n        let result = store_a.get(&read_txn, leaf_1_hash)?;\n        assert_eq!(result, None);\n        let result = store_b.get(&read_txn, leaf_2_hash)?;\n        assert_eq!(result, None);\n        read_txn.commit()?;\n    }\n\n    Ok(())\n}\n\n#[test]\nfn lmdb_uncommitted_transactions_across_sub_databases_do_not_persist() {\n    let dir = tempdir().unwrap();\n    let env =\n        LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap();\n    let store_a = LmdbTrieStore::new(&env, Some(\"a\"), DatabaseFlags::empty()).unwrap();\n    let store_b = LmdbTrieStore::new(&env, Some(\"b\"), DatabaseFlags::empty()).unwrap();\n\n    assert!(\n        uncommitted_transactions_across_sub_databases_do_not_persist::<_, _, error::Error>(\n            &env, &store_a, &store_b,\n        )\n        .is_ok()\n    )\n}\n"
  },
  {
    "path": "storage/src/global_state.rs",
    "content": "/// Storage errors.\npub mod error;\n/// Global State.\npub mod state;\n/// Store module.\npub mod store;\n/// Transaction Source.\npub mod transaction_source;\n/// Merkle Trie implementation.\npub mod trie;\n/// Merkle Trie storage.\npub mod trie_store;\n\nconst MAX_DBS: u32 = 2;\n\npub(crate) const DEFAULT_MAX_DB_SIZE: usize = 52_428_800; // 50 MiB\n\npub(crate) const DEFAULT_MAX_READERS: u32 = 512;\n\npub(crate) const DEFAULT_MAX_QUERY_DEPTH: u64 = 5;\n\n/// The global state reader.\npub trait GlobalStateReader:\n    state::StateReader<casper_types::Key, casper_types::StoredValue, Error = error::Error>\n{\n}\n\nimpl<R: state::StateReader<casper_types::Key, casper_types::StoredValue, Error = error::Error>>\n    GlobalStateReader for R\n{\n}\n\npub(crate) const DEFAULT_ENABLE_ENTITY: bool = false;\n"
  },
  {
    "path": "storage/src/lib.rs",
    "content": "//! Storage for a node on the Casper network.\n\n#![doc(html_root_url = \"https://docs.rs/casper-storage/5.0.0\")]\n#![doc(\n    html_favicon_url = \"https://raw.githubusercontent.com/casper-network/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png\",\n    html_logo_url = \"https://raw.githubusercontent.com/casper-network/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png\"\n)]\n#![cfg_attr(docsrs, feature(doc_auto_cfg))]\n#![warn(missing_docs)]\n\n/// Address generator logic.\npub mod address_generator;\n/// Block store logic.\npub mod block_store;\n/// Data access layer logic.\npub mod data_access_layer;\n/// Global state logic.\npub mod global_state;\n/// Storage layer logic.\npub mod system;\n/// Tracking copy.\npub mod tracking_copy;\n\npub use address_generator::{AddressGenerator, AddressGeneratorBuilder};\npub use data_access_layer::KeyPrefix;\n#[cfg(test)]\npub use tracking_copy::new_temporary_tracking_copy;\npub use tracking_copy::TrackingCopy;\n\npub use block_store::{\n    lmdb::{DbTableId, UnknownDbTableId},\n    DbRawBytesSpec,\n};\n"
  },
  {
    "path": "storage/src/system/auction/auction_native.rs",
    "content": "use crate::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    system::{\n        auction::{\n            providers::{AccountProvider, MintProvider, RuntimeProvider, StorageProvider},\n            Auction,\n        },\n        mint::Mint,\n        runtime_native::RuntimeNative,\n    },\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError},\n};\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{FromBytes, ToBytes},\n    system::{\n        auction::{BidAddr, BidKind, EraInfo, Error, Unbond, UnbondEra, UnbondKind},\n        mint,\n    },\n    AccessRights, CLTyped, CLValue, Key, KeyTag, PublicKey, StoredValue, URef, U512,\n};\nuse std::collections::BTreeSet;\nuse tracing::{debug, error};\n\nimpl<S> RuntimeProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_caller(&self) -> AccountHash {\n        self.address()\n    }\n\n    fn is_allowed_session_caller(&self, account_hash: &AccountHash) -> bool {\n        if self.get_caller() == PublicKey::System.to_account_hash() {\n            return true;\n        }\n\n        account_hash == &self.address()\n    }\n\n    fn is_valid_uref(&self, uref: URef) -> bool {\n        self.access_rights().has_access_rights_to_uref(&uref)\n    }\n\n    fn named_keys_get(&self, name: &str) -> Option<Key> {\n        self.named_keys().get(name).cloned()\n    }\n\n    fn get_keys(&mut self, key_tag: &KeyTag) -> Result<BTreeSet<Key>, Error> {\n        self.tracking_copy()\n            .borrow_mut()\n            .get_keys(key_tag)\n            .map_err(|error| {\n                error!(%key_tag, \"RuntimeProvider::get_keys: {:?}\", error);\n                Error::Storage\n            })\n    }\n\n    fn get_keys_by_prefix(&mut self, prefix: &[u8]) -> Result<Vec<Key>, Error> {\n        self.tracking_copy()\n            .borrow_mut()\n            .reader()\n            .keys_with_prefix(prefix)\n            .map_err(|error| {\n                error!(\"RuntimeProvider::get_keys_by_prefix: {:?}\", error);\n                Error::Storage\n            })\n    }\n\n    fn delegator_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error> {\n        let delegated_accounts = {\n            let prefix = bid_addr.delegated_account_prefix()?;\n            let keys = self.get_keys_by_prefix(&prefix).map_err(|err| {\n                error!(\"RuntimeProvider::delegator_count {:?}\", err);\n                Error::Storage\n            })?;\n            keys.len()\n        };\n        let delegated_purses = {\n            let prefix = bid_addr.delegated_purse_prefix()?;\n            let keys = self.get_keys_by_prefix(&prefix).map_err(|err| {\n                error!(\"RuntimeProvider::delegator_count {:?}\", err);\n                Error::Storage\n            })?;\n            keys.len()\n        };\n        Ok(delegated_accounts.saturating_add(delegated_purses))\n    }\n\n    fn reservation_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error> {\n        let reserved_accounts = {\n            let reservation_prefix = bid_addr.reserved_account_prefix()?;\n            let reservation_keys = self\n                .get_keys_by_prefix(&reservation_prefix)\n                .map_err(|err| {\n                    error!(\"RuntimeProvider::reservation_count {:?}\", err);\n                    Error::Storage\n                })?;\n            reservation_keys.len()\n        };\n        let reserved_purses = {\n            let reservation_prefix = bid_addr.reserved_purse_prefix()?;\n            let reservation_keys = self\n                .get_keys_by_prefix(&reservation_prefix)\n                .map_err(|err| {\n                    error!(\"RuntimeProvider::reservation_count {:?}\", err);\n                    Error::Storage\n                })?;\n            reservation_keys.len()\n        };\n        Ok(reserved_accounts.saturating_add(reserved_purses))\n    }\n\n    fn used_reservation_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error> {\n        let reservation_account_prefix = bid_addr.reserved_account_prefix()?;\n        let reservation_purse_prefix = bid_addr.reserved_purse_prefix()?;\n\n        let mut reservation_keys = self\n            .get_keys_by_prefix(&reservation_account_prefix)\n            .map_err(|exec_error| {\n                error!(\"RuntimeProvider::reservation_count {:?}\", exec_error);\n                <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n            })?;\n\n        let more = self\n            .get_keys_by_prefix(&reservation_purse_prefix)\n            .map_err(|exec_error| {\n                error!(\"RuntimeProvider::reservation_count {:?}\", exec_error);\n                <Option<Error>>::from(exec_error).unwrap_or(Error::Storage)\n            })?;\n\n        reservation_keys.extend(more);\n\n        let mut used = 0;\n        for reservation_key in reservation_keys {\n            if let Key::BidAddr(BidAddr::ReservedDelegationAccount {\n                validator,\n                delegator,\n            }) = reservation_key\n            {\n                let key_to_check = Key::BidAddr(BidAddr::DelegatedAccount {\n                    validator,\n                    delegator,\n                });\n                if let Ok(Some(_)) = self.read_bid(&key_to_check) {\n                    used += 1;\n                }\n            }\n            if let Key::BidAddr(BidAddr::ReservedDelegationPurse {\n                validator,\n                delegator,\n            }) = reservation_key\n            {\n                let key_to_check = Key::BidAddr(BidAddr::DelegatedPurse {\n                    validator,\n                    delegator,\n                });\n                if let Ok(Some(_)) = self.read_bid(&key_to_check) {\n                    used += 1;\n                }\n            }\n        }\n        Ok(used)\n    }\n\n    fn vesting_schedule_period_millis(&self) -> u64 {\n        self.vesting_schedule_period_millis()\n    }\n\n    fn allow_auction_bids(&self) -> bool {\n        self.allow_auction_bids()\n    }\n\n    fn should_compute_rewards(&self) -> bool {\n        self.compute_rewards()\n    }\n}\n\nimpl<S> StorageProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn read<T: FromBytes + CLTyped>(&mut self, uref: URef) -> Result<Option<T>, Error> {\n        // check access rights on uref\n        if !self.access_rights().has_access_rights_to_uref(&uref) {\n            return Err(Error::ForgedReference);\n        }\n        let key = &Key::URef(uref);\n        let stored_value = match self.tracking_copy().borrow_mut().read(key) {\n            Ok(Some(stored_value)) => stored_value,\n            Ok(None) => return Ok(None),\n            Err(_) => return Err(Error::Storage),\n        };\n        // by convention, we only store CLValues under Key::URef\n        if let StoredValue::CLValue(value) = stored_value {\n            // Only CLTyped instances should be stored as a CLValue.\n            let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?;\n            Ok(Some(value))\n        } else {\n            Err(Error::CLValue)\n        }\n    }\n\n    fn write<T: ToBytes + CLTyped>(&mut self, uref: URef, value: T) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        // is the uref writeable?\n        if !uref.is_writeable() {\n            error!(\"uref not writeable {}\", uref);\n            return Err(Error::Storage);\n        }\n        // check access rights on uref\n        if !self.access_rights().has_access_rights_to_uref(&uref) {\n            return Err(Error::ForgedReference);\n        }\n        self.tracking_copy()\n            .borrow_mut()\n            .write(Key::URef(uref), StoredValue::CLValue(cl_value));\n        Ok(())\n    }\n\n    fn read_bid(&mut self, key: &Key) -> Result<Option<BidKind>, Error> {\n        match self.tracking_copy().borrow_mut().read(key) {\n            Ok(Some(StoredValue::BidKind(bid_kind))) => Ok(Some(bid_kind)),\n            Ok(Some(_)) => {\n                error!(\"StorageProvider::read_bid: unexpected StoredValue variant\");\n                Err(Error::Storage)\n            }\n            Ok(None) => Ok(None),\n            Err(TrackingCopyError::BytesRepr(_)) => Err(Error::Serialization),\n            Err(err) => {\n                error!(\"StorageProvider::read_bid: {:?}\", err);\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn write_bid(&mut self, key: Key, bid_kind: BidKind) -> Result<(), Error> {\n        let stored_value = StoredValue::BidKind(bid_kind);\n\n        // Charge for amount as measured by serialized length\n        // let bytes_count = stored_value.serialized_length();\n        // self.charge_gas_storage(bytes_count)?;\n\n        self.tracking_copy().borrow_mut().write(key, stored_value);\n        Ok(())\n    }\n\n    fn read_unbond(&mut self, bid_addr: BidAddr) -> Result<Option<Unbond>, Error> {\n        match self\n            .tracking_copy()\n            .borrow_mut()\n            .read(&Key::BidAddr(bid_addr))\n        {\n            Ok(Some(StoredValue::BidKind(BidKind::Unbond(unbond)))) => Ok(Some(*unbond)),\n            Ok(Some(_)) => {\n                error!(\"StorageProvider::read_unbonds: unexpected StoredValue variant\");\n                Err(Error::Storage)\n            }\n            Ok(None) => Ok(None),\n            Err(TrackingCopyError::BytesRepr(_)) => Err(Error::Serialization),\n            Err(err) => {\n                error!(\"StorageProvider::read_unbonds: {:?}\", err);\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn write_unbond(&mut self, bid_addr: BidAddr, unbond: Option<Unbond>) -> Result<(), Error> {\n        let unbond_key = Key::BidAddr(bid_addr);\n        match unbond {\n            Some(unbond) => {\n                self.tracking_copy().borrow_mut().write(\n                    unbond_key,\n                    StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))),\n                );\n            }\n            None => {\n                self.tracking_copy().borrow_mut().prune(unbond_key);\n            }\n        }\n        Ok(())\n    }\n\n    fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidContext);\n        }\n        self.tracking_copy()\n            .borrow_mut()\n            .write(Key::EraSummary, StoredValue::EraInfo(era_info));\n        Ok(())\n    }\n\n    fn prune_bid(&mut self, bid_addr: BidAddr) {\n        self.tracking_copy().borrow_mut().prune(bid_addr.into());\n    }\n}\n\nimpl<S> MintProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn unbond(&mut self, unbond_kind: &UnbondKind, unbond_era: &UnbondEra) -> Result<(), Error> {\n        let (purse, maybe_account_hash) = match unbond_kind {\n            UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => {\n                let account_hash = pk.to_account_hash();\n                // Do a migration if the account hasn't been migrated yet. This is just a read if it\n                // has been migrated already.\n                self.tracking_copy()\n                    .borrow_mut()\n                    .migrate_account(account_hash, self.protocol_version())\n                    .map_err(|error| {\n                        error!(\n                            \"MintProvider::unbond: couldn't migrate account: {:?}\",\n                            error\n                        );\n                        Error::Storage\n                    })?;\n\n                let maybe_value = self\n                    .tracking_copy()\n                    .borrow_mut()\n                    .read(&Key::Account(account_hash))\n                    .map_err(|error| {\n                        error!(\"MintProvider::unbond: {:?}\", error);\n                        Error::Storage\n                    })?;\n\n                match maybe_value {\n                    Some(StoredValue::Account(account)) => {\n                        (account.main_purse(), Some(account_hash))\n                    }\n                    Some(StoredValue::CLValue(cl_value)) => {\n                        let entity_key: Key = cl_value.into_t().map_err(|_| Error::CLValue)?;\n                        let maybe_value = self\n                            .tracking_copy()\n                            .borrow_mut()\n                            .read(&entity_key)\n                            .map_err(|error| {\n                                error!(\"MintProvider::unbond: {:?}\", error);\n                                Error::Storage\n                            })?;\n                        match maybe_value {\n                            Some(StoredValue::AddressableEntity(entity)) => {\n                                (entity.main_purse(), Some(account_hash))\n                            }\n                            Some(_cl_value) => return Err(Error::CLValue),\n                            None => return Err(Error::InvalidPublicKey),\n                        }\n                    }\n                    Some(_cl_value) => return Err(Error::CLValue),\n                    None => return Err(Error::InvalidPublicKey),\n                }\n            }\n            UnbondKind::DelegatedPurse(addr) => {\n                let purse = URef::new(*addr, AccessRights::READ_ADD_WRITE);\n                match self.balance(purse) {\n                    Ok(Some(_)) => (purse, None),\n                    Ok(None) => return Err(Error::MissingPurse),\n                    Err(err) => {\n                        error!(\"MintProvider::unbond: {:?}\", err);\n                        return Err(Error::Unbonding);\n                    }\n                }\n            }\n        };\n\n        self.mint_transfer_direct(\n            maybe_account_hash,\n            *unbond_era.bonding_purse(),\n            purse,\n            *unbond_era.amount(),\n            None,\n        )\n        .map_err(|_| Error::Transfer)?\n        .map_err(|_| Error::Transfer)?;\n        Ok(())\n    }\n\n    fn mint_transfer_direct(\n        &mut self,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<Result<(), mint::Error>, Error> {\n        let addr = if let Some(uref) = self.runtime_footprint().main_purse() {\n            uref.addr()\n        } else {\n            return Err(Error::InvalidContext);\n        };\n        if !(addr == source.addr() || self.get_caller() == PublicKey::System.to_account_hash()) {\n            return Err(Error::InvalidCaller);\n        }\n\n        // let gas_counter = self.gas_counter();\n        self.extend_access_rights(&[source, target.into_add()]);\n\n        match self.transfer(to, source, target, amount, id) {\n            Ok(ret) => {\n                // self.set_gas_counter(gas_counter);\n                Ok(Ok(ret))\n            }\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::Transfer)\n            }\n        }\n    }\n\n    fn mint_into_existing_purse(\n        &mut self,\n        amount: U512,\n        existing_purse: URef,\n    ) -> Result<(), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidCaller);\n        }\n\n        match <Self as Mint>::mint_into_existing_purse(self, existing_purse, amount) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::MintError)\n            }\n        }\n    }\n\n    fn create_purse(&mut self) -> Result<URef, Error> {\n        let initial_balance = U512::zero();\n        match <Self as Mint>::mint(self, initial_balance) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::CreatePurseFailed)\n            }\n        }\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        match <Self as Mint>::balance(self, purse) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::GetBalance)\n            }\n        }\n    }\n\n    fn read_base_round_reward(&mut self) -> Result<U512, Error> {\n        match <Self as Mint>::read_base_round_reward(self) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::MissingValue)\n            }\n        }\n    }\n\n    fn mint(&mut self, amount: U512) -> Result<URef, Error> {\n        match <Self as Mint>::mint(self, amount) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::MintReward)\n            }\n        }\n    }\n\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> {\n        match <Self as Mint>::reduce_total_supply(self, amount) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::MintReduceTotalSupply)\n            }\n        }\n    }\n}\n\nimpl<S> AccountProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_main_purse(&self) -> Result<URef, Error> {\n        // NOTE: this is used by the system and is not (and should not be made to be) accessible\n        // from userland.\n        match self.runtime_footprint().main_purse() {\n            None => {\n                debug!(\"runtime_native attempt to access non-existent main purse\");\n                Err(Error::InvalidContext)\n            }\n            Some(purse) => Ok(purse),\n        }\n    }\n\n    /// Set main purse.\n    fn set_main_purse(&mut self, purse: URef) {\n        self.runtime_footprint_mut().set_main_purse(purse);\n    }\n}\n\nimpl<S> Auction for RuntimeNative<S> where S: StateReader<Key, StoredValue, Error = GlobalStateError>\n{}\n"
  },
  {
    "path": "storage/src/system/auction/detail.rs",
    "content": "use std::{collections::BTreeMap, convert::TryInto, ops::Mul};\n\nuse super::{\n    Auction, EraValidators, MintProvider, RuntimeProvider, StorageProvider, ValidatorWeights,\n};\nuse casper_types::{\n    bytesrepr::{FromBytes, ToBytes},\n    system::auction::{\n        BidAddr, BidAddrTag, BidKind, DelegatorBid, DelegatorBids, DelegatorKind, Error,\n        Reservation, Reservations, SeigniorageRecipient, SeigniorageRecipientV2,\n        SeigniorageRecipientsSnapshot, SeigniorageRecipientsSnapshotV1,\n        SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Unbond, UnbondEra, UnbondKind,\n        ValidatorBid, ValidatorBids, ValidatorCredit, ValidatorCredits, WeightsBreakout,\n        AUCTION_DELAY_KEY, DELEGATION_RATE_DENOMINATOR, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY,\n        SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY,\n    },\n    AccessRights, ApiError, CLTyped, EraId, Key, KeyTag, PublicKey, URef, U512,\n};\nuse num_rational::Ratio;\nuse num_traits::{CheckedMul, CheckedSub};\nuse tracing::{debug, error, warn};\n\n/// Maximum length of bridge records chain.\n/// Used when looking for the most recent bid record to avoid unbounded computations.\nconst MAX_BRIDGE_CHAIN_LENGTH: u64 = 20;\n\nfn read_from<P, T>(provider: &mut P, name: &str) -> Result<T, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n    T: FromBytes + CLTyped,\n{\n    let key = match provider.named_keys_get(name) {\n        None => {\n            error!(\"auction missing named key {:?}\", name);\n            return Err(Error::MissingKey);\n        }\n        Some(key) => key,\n    };\n    let uref = key.into_uref().ok_or(Error::InvalidKeyVariant)?;\n    let value: T = provider.read(uref)?.ok_or(Error::MissingValue)?;\n    Ok(value)\n}\n\nfn write_to<P, T>(provider: &mut P, name: &str, value: T) -> Result<(), Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n    T: ToBytes + CLTyped,\n{\n    let key = provider.named_keys_get(name).ok_or(Error::MissingKey)?;\n    let uref = key.into_uref().ok_or(Error::InvalidKeyVariant)?;\n    provider.write(uref, value)\n}\n\n/// Aggregated bid data for a Validator.\n#[derive(Debug, Default)]\npub struct ValidatorBidsDetail {\n    validator_bids: ValidatorBids,\n    validator_credits: ValidatorCredits,\n    delegator_bids: DelegatorBids,\n    reservations: Reservations,\n}\n\nimpl ValidatorBidsDetail {\n    /// Ctor.\n    pub fn new() -> Self {\n        ValidatorBidsDetail {\n            validator_bids: BTreeMap::new(),\n            validator_credits: BTreeMap::new(),\n            delegator_bids: BTreeMap::new(),\n            reservations: BTreeMap::new(),\n        }\n    }\n\n    /// Inserts a validator bid.\n    pub fn insert_bid(\n        &mut self,\n        validator: PublicKey,\n        validator_bid: Box<ValidatorBid>,\n        delegators: Vec<Box<DelegatorBid>>,\n        reservations: Vec<Box<Reservation>>,\n    ) -> Option<Box<ValidatorBid>> {\n        self.delegator_bids.insert(validator.clone(), delegators);\n        self.reservations.insert(validator.clone(), reservations);\n        self.validator_bids.insert(validator, validator_bid)\n    }\n\n    /// Inserts a validator credit.\n    pub fn insert_credit(\n        &mut self,\n        validator: PublicKey,\n        era_id: EraId,\n        validator_credit: Box<ValidatorCredit>,\n    ) {\n        let credits = &mut self.validator_credits;\n\n        credits\n            .entry(validator.clone())\n            .and_modify(|inner| {\n                inner\n                    .entry(era_id)\n                    .and_modify(|_| {\n                        warn!(\n                            ?validator,\n                            ?era_id,\n                            \"multiple validator credit entries in same era\"\n                        )\n                    })\n                    .or_insert(validator_credit.clone());\n            })\n            .or_insert_with(|| {\n                let mut inner = BTreeMap::new();\n                inner.insert(era_id, validator_credit);\n                inner\n            });\n    }\n\n    /// Get validator weights.\n    #[allow(clippy::too_many_arguments)]\n    pub fn validator_weights_breakout(\n        &mut self,\n        era_ending: EraId,\n        era_end_timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n        minimum_bid_amount: u64,\n        include_credits: bool,\n        credits_cap: Ratio<U512>,\n    ) -> Result<WeightsBreakout, Error> {\n        let mut ret = WeightsBreakout::new();\n        let min_bid = minimum_bid_amount.into();\n        for (validator_public_key, bid) in self\n            .validator_bids\n            .iter()\n            .filter(|(_, v)| !v.inactive() && !v.staked_amount() >= U512::one())\n        {\n            let mut staked_amount = bid.staked_amount();\n            let meets_minimum = staked_amount >= min_bid;\n            if let Some(delegators) = self.delegator_bids.get(validator_public_key) {\n                staked_amount = staked_amount\n                    .checked_add(delegators.iter().map(|d| d.staked_amount()).sum())\n                    .ok_or(Error::InvalidAmount)?;\n            }\n\n            let credit_amount = self.credit_amount(\n                validator_public_key,\n                era_ending,\n                staked_amount,\n                include_credits,\n                credits_cap,\n            );\n            let total = staked_amount.saturating_add(credit_amount);\n\n            let locked = bid.is_locked_with_vesting_schedule(\n                era_end_timestamp_millis,\n                vesting_schedule_period_millis,\n            );\n\n            ret.register(validator_public_key.clone(), total, locked, meets_minimum);\n        }\n\n        Ok(ret)\n    }\n\n    fn credit_amount(\n        &self,\n        validator_public_key: &PublicKey,\n        era_ending: EraId,\n        staked_amount: U512,\n        include_credit: bool,\n        cap: Ratio<U512>,\n    ) -> U512 {\n        if !include_credit {\n            return U512::zero();\n        }\n\n        if let Some(inner) = self.validator_credits.get(validator_public_key) {\n            if let Some(credit) = inner.get(&era_ending) {\n                let capped = Ratio::new_raw(staked_amount, U512::one())\n                    .mul(cap)\n                    .to_integer();\n                let credit_amount = credit.amount();\n                return credit_amount.min(capped);\n            }\n        }\n\n        U512::zero()\n    }\n\n    #[allow(unused)]\n    pub(crate) fn validator_bids(&self) -> &ValidatorBids {\n        &self.validator_bids\n    }\n\n    pub(crate) fn validator_bids_mut(&mut self) -> &mut ValidatorBids {\n        &mut self.validator_bids\n    }\n\n    /// Select winners for auction.\n    #[allow(clippy::too_many_arguments)]\n    pub fn pick_winners(\n        &mut self,\n        era_id: EraId,\n        validator_slots: usize,\n        minimum_bid_amount: u64,\n        include_credits: bool,\n        credit_cap: Ratio<U512>,\n        era_end_timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> Result<ValidatorWeights, ApiError> {\n        // as a safety mechanism, if we would fall below 75% of the expected\n        // validator count by enforcing minimum bid, allow bids with less\n        // that min bid up to fill to 75% of the expected count\n        let threshold = Ratio::new(3, 4)\n            .mul(Ratio::new(validator_slots, 1))\n            .to_integer();\n        let breakout = self.validator_weights_breakout(\n            era_id,\n            era_end_timestamp_millis,\n            vesting_schedule_period_millis,\n            minimum_bid_amount,\n            include_credits,\n            credit_cap,\n        )?;\n        let ret = breakout.take(validator_slots, threshold);\n        Ok(ret)\n    }\n\n    /// Consume self into in underlying collections.\n    pub fn destructure(self) -> (ValidatorBids, ValidatorCredits, DelegatorBids, Reservations) {\n        (\n            self.validator_bids,\n            self.validator_credits,\n            self.delegator_bids,\n            self.reservations,\n        )\n    }\n}\n\n/// Prunes away all validator credits for the imputed era, which should be the era ending.\n///\n/// This is intended to be called at the end of an era, after calculating validator weights.\npub fn prune_validator_credits<P>(\n    provider: &mut P,\n    era_ending: EraId,\n    validator_credits: &ValidatorCredits,\n) where\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    for (validator_public_key, inner) in validator_credits {\n        if inner.contains_key(&era_ending) {\n            provider.prune_bid(BidAddr::new_credit(validator_public_key, era_ending))\n        }\n    }\n}\n\n/// Returns the imputed validator bids.\npub fn get_validator_bids<P>(provider: &mut P, era_id: EraId) -> Result<ValidatorBidsDetail, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    let bids_keys = provider.get_keys(&KeyTag::BidAddr)?;\n\n    let mut ret = ValidatorBidsDetail::new();\n\n    for key in bids_keys {\n        match provider.read_bid(&key)? {\n            Some(BidKind::Validator(validator_bid)) => {\n                let validator_public_key = validator_bid.validator_public_key();\n                let delegator_bids = delegators(provider, validator_public_key)?;\n                let reservations = reservations(provider, validator_public_key)?;\n                ret.insert_bid(\n                    validator_public_key.clone(),\n                    validator_bid,\n                    delegator_bids,\n                    reservations,\n                );\n            }\n            Some(BidKind::Credit(credit)) => {\n                ret.insert_credit(credit.validator_public_key().clone(), era_id, credit);\n            }\n            Some(_) => {\n                // noop\n            }\n            None => return Err(Error::ValidatorNotFound),\n        };\n    }\n\n    Ok(ret)\n}\n\n/// Sets the imputed validator bids.\npub fn set_validator_bids<P>(provider: &mut P, validators: ValidatorBids) -> Result<(), Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    for (validator_public_key, validator_bid) in validators.into_iter() {\n        let bid_addr = BidAddr::from(validator_public_key.clone());\n        provider.write_bid(bid_addr.into(), BidKind::Validator(validator_bid))?;\n    }\n    Ok(())\n}\n\n/// Returns the unbonding purses.\npub fn get_unbonding_purses<P>(provider: &mut P) -> Result<BTreeMap<BidAddr, Unbond>, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    let prefix = vec![KeyTag::BidAddr as u8, BidAddrTag::UnbondAccount as u8];\n\n    let unbond_keys = provider.get_keys_by_prefix(&prefix)?;\n\n    let mut ret = BTreeMap::new();\n\n    for key in unbond_keys {\n        if let Key::BidAddr(bid_addr) = key {\n            match provider.read_bid(&key) {\n                Ok(Some(BidKind::Unbond(unbonds))) => {\n                    ret.insert(bid_addr, *unbonds);\n                }\n                Ok(Some(_)) => {\n                    warn!(\"unexpected BidKind variant {:?}\", key);\n                }\n                Ok(None) => {\n                    warn!(\"expected unbond record {:?}\", key);\n                }\n                Err(err) => {\n                    error!(\"{} {}\", key, err);\n                }\n            }\n        }\n    }\n\n    let prefix = vec![KeyTag::BidAddr as u8, BidAddrTag::UnbondPurse as u8];\n\n    let unbond_keys = provider.get_keys_by_prefix(&prefix)?;\n    for key in unbond_keys {\n        if let Key::BidAddr(bid_addr) = key {\n            match provider.read_bid(&key) {\n                Ok(Some(BidKind::Unbond(unbonds))) => {\n                    ret.insert(bid_addr, *unbonds);\n                }\n                Ok(Some(_)) => {\n                    warn!(\"unexpected BidKind variant {:?}\", key)\n                }\n                Ok(None) => {\n                    warn!(\"expected unbond record {:?}\", key)\n                }\n                Err(err) => {\n                    error!(\"{} {}\", key, err);\n                }\n            }\n        }\n    }\n\n    Ok(ret)\n}\n\n/// Returns the era id.\npub fn get_era_id<P>(provider: &mut P) -> Result<EraId, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    read_from(provider, ERA_ID_KEY)\n}\n\n/// Sets the era id.\npub fn set_era_id<P>(provider: &mut P, era_id: EraId) -> Result<(), Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    write_to(provider, ERA_ID_KEY, era_id)\n}\n\n/// Returns the era end timestamp.\npub fn get_era_end_timestamp_millis<P>(provider: &mut P) -> Result<u64, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    read_from(provider, ERA_END_TIMESTAMP_MILLIS_KEY)\n}\n\n/// Sets the era end timestamp.\npub fn set_era_end_timestamp_millis<P>(\n    provider: &mut P,\n    era_end_timestamp_millis: u64,\n) -> Result<(), Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    write_to(\n        provider,\n        ERA_END_TIMESTAMP_MILLIS_KEY,\n        era_end_timestamp_millis,\n    )\n}\n\n/// Returns seigniorage recipients snapshot.\npub fn get_seigniorage_recipients_snapshot<P>(\n    provider: &mut P,\n) -> Result<SeigniorageRecipientsSnapshotV2, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    read_from(provider, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n}\n\n/// Returns seigniorage recipients snapshot in legacy format.\npub fn get_legacy_seigniorage_recipients_snapshot<P>(\n    provider: &mut P,\n) -> Result<SeigniorageRecipientsSnapshotV1, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    read_from(provider, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n}\n\n/// Sets the setigniorage recipients snapshot.\npub fn set_seigniorage_recipients_snapshot<P>(\n    provider: &mut P,\n    snapshot: SeigniorageRecipientsSnapshotV2,\n) -> Result<(), Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    write_to(provider, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, snapshot)\n}\n\n/// Returns the number of validator slots.\npub fn get_validator_slots<P>(provider: &mut P) -> Result<usize, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    let validator_slots: u32 = match read_from(provider, VALIDATOR_SLOTS_KEY) {\n        Ok(ret) => ret,\n        Err(err) => {\n            error!(\"Failed to find VALIDATOR_SLOTS_KEY {}\", err);\n            return Err(err);\n        }\n    };\n    let validator_slots = validator_slots\n        .try_into()\n        .map_err(|_| Error::InvalidValidatorSlotsValue)?;\n    Ok(validator_slots)\n}\n\n/// Returns auction delay.\npub fn get_auction_delay<P>(provider: &mut P) -> Result<u64, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    let auction_delay: u64 = match read_from(provider, AUCTION_DELAY_KEY) {\n        Ok(ret) => ret,\n        Err(err) => {\n            error!(\"Failed to find AUCTION_DELAY_KEY {}\", err);\n            return Err(err);\n        }\n    };\n    Ok(auction_delay)\n}\n\nfn get_unbonding_delay<P>(provider: &mut P) -> Result<u64, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    read_from(provider, UNBONDING_DELAY_KEY)\n}\n\n/// Iterates over unbonding entries and checks if a locked amount can be paid already if\n/// a specific era is reached.\n///\n/// This function can be called by the system only.\npub fn process_unbond_requests<P: Auction>(\n    provider: &mut P,\n    max_delegators_per_validator: u32,\n) -> Result<(), ApiError> {\n    if provider.get_caller() != PublicKey::System.to_account_hash() {\n        return Err(Error::InvalidCaller.into());\n    }\n\n    let current_era_id = provider.read_era_id()?;\n\n    let unbonding_delay = get_unbonding_delay(provider)?;\n\n    let unbonds = get_unbonding_purses(provider)?;\n\n    for (bid_addr, unbond) in unbonds {\n        let unbond_kind = &unbond.unbond_kind().clone();\n        let (retained, expired) = unbond.expired(current_era_id, unbonding_delay);\n        if let Some(unbonded) = expired {\n            for unbond_era in unbonded {\n                if unbond_kind.is_validator() {\n                    provider.unbond(unbond_kind, &unbond_era).map_err(|err| {\n                        error!(?err, \"error unbonding purse\");\n                        ApiError::from(Error::TransferToUnbondingPurse)\n                    })?;\n                    continue;\n                }\n                let redelegation_result = handle_redelegation(\n                    provider,\n                    unbond_kind,\n                    &unbond_era,\n                    max_delegators_per_validator,\n                )\n                .inspect_err(|err| {\n                    error!(?err, ?unbond_kind, ?unbond_era, \"error processing unbond\");\n                })?;\n\n                match redelegation_result {\n                    UnbondRedelegationOutcome::SuccessfullyRedelegated => {\n                        // noop; on successful re-delegation, no actual unbond occurs\n                    }\n                    uro @ UnbondRedelegationOutcome::NonexistantRedelegationTarget\n                    | uro @ UnbondRedelegationOutcome::DelegationAmountBelowCap\n                    | uro @ UnbondRedelegationOutcome::DelegationAmountAboveCap\n                    | uro @ UnbondRedelegationOutcome::RedelegationTargetHasNoVacancy\n                    | uro @ UnbondRedelegationOutcome::RedelegationTargetIsUnstaked\n                    | uro @ UnbondRedelegationOutcome::Withdrawal => {\n                        // Move funds from bid purse to unbonding purse\n                        provider.unbond(unbond_kind, &unbond_era).map_err(|err| {\n                            error!(?err, ?uro, \"error unbonding purse\");\n                            ApiError::from(Error::TransferToUnbondingPurse)\n                        })?\n                    }\n                }\n            }\n        }\n        if retained.eras().is_empty() {\n            provider.write_unbond(bid_addr, None)?;\n        } else {\n            provider.write_unbond(bid_addr, Some(retained))?;\n        }\n    }\n    Ok(())\n}\n\n/// Creates a new purse in unbonding_purses given a validator's key, amount, and a destination\n/// unbonding purse. Returns the amount of motes remaining in the validator's bid purse.\npub fn create_unbonding_purse<P: Auction>(\n    provider: &mut P,\n    validator_public_key: PublicKey,\n    unbond_kind: UnbondKind,\n    bonding_purse: URef,\n    amount: U512,\n    new_validator: Option<PublicKey>,\n) -> Result<(), Error> {\n    if provider\n        .available_balance(bonding_purse)?\n        .unwrap_or_default()\n        < amount\n    {\n        return Err(Error::UnbondTooLarge);\n    }\n\n    let era_of_creation = provider.read_era_id()?;\n\n    let bid_addr = match &unbond_kind {\n        UnbondKind::Validator(_) => {\n            let account_hash = validator_public_key.to_account_hash();\n            BidAddr::UnbondAccount {\n                validator: account_hash,\n                unbonder: account_hash,\n            }\n        }\n        UnbondKind::DelegatedPublicKey(pk) => BidAddr::UnbondAccount {\n            validator: validator_public_key.to_account_hash(),\n            unbonder: pk.to_account_hash(),\n        },\n        UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse {\n            validator: validator_public_key.to_account_hash(),\n            unbonder: *addr,\n        },\n    };\n\n    let unbond_era = UnbondEra::new(bonding_purse, era_of_creation, amount, new_validator);\n\n    let unbond = match provider.read_unbond(bid_addr)? {\n        Some(unbond) => {\n            let mut eras = unbond.take_eras();\n            eras.push(unbond_era);\n            Unbond::new(validator_public_key, unbond_kind, eras)\n        }\n        None => Unbond::new(validator_public_key, unbond_kind, vec![unbond_era]),\n    };\n\n    provider.write_unbond(bid_addr, Some(unbond))?;\n\n    Ok(())\n}\n\n/// Reward distribution target variants.\n#[derive(Debug)]\npub enum DistributeTarget {\n    /// Validator bid.\n    Validator(Box<ValidatorBid>),\n    /// Bridged validator bid.\n    BridgedValidator {\n        /// Requested bid addr.\n        requested_validator_bid_addr: BidAddr,\n        /// The current bid addr for the bridged validator.\n        current_validator_bid_addr: BidAddr,\n        /// All chained bid addrs.\n        bridged_validator_addrs: Vec<BidAddr>,\n        /// Validator bid.\n        validator_bid: Box<ValidatorBid>,\n    },\n    /// Delegator bid.\n    Delegator(Box<DelegatorBid>),\n    /// Unbond record.\n    Unbond(Box<Unbond>),\n}\n\nimpl DistributeTarget {\n    /// Returns the bonding purse for this instance.\n    pub fn bonding_purse(&self) -> Result<URef, Error> {\n        match self {\n            DistributeTarget::Validator(vb) => Ok(*vb.bonding_purse()),\n            DistributeTarget::BridgedValidator { validator_bid, .. } => {\n                Ok(*validator_bid.bonding_purse())\n            }\n            DistributeTarget::Delegator(db) => Ok(*db.bonding_purse()),\n            DistributeTarget::Unbond(unbond) => match unbond.target_unbond_era() {\n                Some(unbond_era) => Ok(*unbond_era.bonding_purse()),\n                None => Err(Error::MissingPurse),\n            },\n        }\n    }\n}\n\n/// Returns most recent validator public key if public key has been changed\n/// or the validator has withdrawn their bid completely.\npub fn get_distribution_target<P: RuntimeProvider + StorageProvider>(\n    provider: &mut P,\n    bid_addr: BidAddr,\n) -> Result<DistributeTarget, Error> {\n    let mut bridged_addrs = vec![];\n    let mut current_validator_bid_addr = bid_addr;\n    for _ in 0..MAX_BRIDGE_CHAIN_LENGTH {\n        match provider.read_bid(&current_validator_bid_addr.into())? {\n            Some(BidKind::Validator(validator_bid)) => {\n                if !bridged_addrs.is_empty() {\n                    return Ok(DistributeTarget::BridgedValidator {\n                        requested_validator_bid_addr: bid_addr,\n                        current_validator_bid_addr,\n                        bridged_validator_addrs: bridged_addrs,\n                        validator_bid,\n                    });\n                }\n                return Ok(DistributeTarget::Validator(validator_bid));\n            }\n            Some(BidKind::Delegator(delegator_bid)) => {\n                return Ok(DistributeTarget::Delegator(delegator_bid));\n            }\n            Some(BidKind::Unbond(unbond)) => {\n                return Ok(DistributeTarget::Unbond(unbond));\n            }\n            Some(BidKind::Bridge(bridge)) => {\n                current_validator_bid_addr =\n                    BidAddr::from(bridge.new_validator_public_key().clone());\n                bridged_addrs.push(current_validator_bid_addr);\n            }\n            None => {\n                // in the case of missing validator or delegator bids, check unbonds\n                if let BidAddr::Validator(account_hash) = bid_addr {\n                    let validator_unbond_key = BidAddr::UnbondAccount {\n                        validator: account_hash,\n                        unbonder: account_hash,\n                    }\n                    .into();\n                    if let Some(BidKind::Unbond(unbond)) =\n                        provider.read_bid(&validator_unbond_key)?\n                    {\n                        return Ok(DistributeTarget::Unbond(unbond));\n                    }\n                    return Err(Error::ValidatorNotFound);\n                }\n\n                if let BidAddr::DelegatedAccount {\n                    validator,\n                    delegator,\n                } = bid_addr\n                {\n                    let delegator_unbond_key = BidAddr::UnbondAccount {\n                        validator,\n                        unbonder: delegator,\n                    }\n                    .into();\n                    if let Some(BidKind::Unbond(unbond)) =\n                        provider.read_bid(&delegator_unbond_key)?\n                    {\n                        return Ok(DistributeTarget::Unbond(unbond));\n                    }\n                    return Err(Error::DelegatorNotFound);\n                }\n\n                if let BidAddr::DelegatedPurse {\n                    validator,\n                    delegator,\n                } = bid_addr\n                {\n                    let delegator_unbond_key = BidAddr::UnbondPurse {\n                        validator,\n                        unbonder: delegator,\n                    }\n                    .into();\n                    if let Some(BidKind::Unbond(unbond)) =\n                        provider.read_bid(&delegator_unbond_key)?\n                    {\n                        return Ok(DistributeTarget::Unbond(unbond));\n                    }\n                    return Err(Error::DelegatorNotFound);\n                }\n\n                break;\n            }\n            _ => {\n                break;\n            }\n        };\n    }\n    Err(Error::BridgeRecordChainTooLong)\n}\n\n#[derive(Debug)]\nenum UnbondRedelegationOutcome {\n    Withdrawal,\n    SuccessfullyRedelegated,\n    NonexistantRedelegationTarget,\n    RedelegationTargetHasNoVacancy,\n    RedelegationTargetIsUnstaked,\n    DelegationAmountBelowCap,\n    DelegationAmountAboveCap,\n}\n\nfn handle_redelegation<P>(\n    provider: &mut P,\n    unbond_kind: &UnbondKind,\n    unbond_era: &UnbondEra,\n    max_delegators_per_validator: u32,\n) -> Result<UnbondRedelegationOutcome, ApiError>\nwhere\n    P: StorageProvider + MintProvider + RuntimeProvider,\n{\n    let delegator_kind = match unbond_kind {\n        UnbondKind::Validator(_) => {\n            return Err(ApiError::AuctionError(Error::UnexpectedUnbondVariant as u8))\n        }\n        UnbondKind::DelegatedPublicKey(pk) => DelegatorKind::PublicKey(pk.clone()),\n        UnbondKind::DelegatedPurse(addr) => DelegatorKind::Purse(*addr),\n    };\n\n    let redelegation_target_public_key = match unbond_era.new_validator() {\n        Some(public_key) => {\n            // get updated key if `ValidatorBid` public key was changed\n            let validator_bid_addr = BidAddr::from(public_key.clone());\n            match read_current_validator_bid(provider, validator_bid_addr.into()) {\n                Ok(validator_bid) => validator_bid.validator_public_key().clone(),\n                Err(err) => {\n                    error!(?err, ?unbond_era, redelegate_to=?public_key, \"error redelegating\");\n                    return Ok(UnbondRedelegationOutcome::NonexistantRedelegationTarget);\n                }\n            }\n        }\n        None => return Ok(UnbondRedelegationOutcome::Withdrawal),\n    };\n\n    let redelegation = handle_delegation(\n        provider,\n        delegator_kind,\n        redelegation_target_public_key,\n        *unbond_era.bonding_purse(),\n        *unbond_era.amount(),\n        max_delegators_per_validator,\n    );\n    match redelegation {\n        Ok(_) => Ok(UnbondRedelegationOutcome::SuccessfullyRedelegated),\n        Err(ApiError::AuctionError(err)) if err == Error::BondTooSmall as u8 => {\n            Ok(UnbondRedelegationOutcome::RedelegationTargetIsUnstaked)\n        }\n        Err(ApiError::AuctionError(err)) if err == Error::DelegationAmountTooSmall as u8 => {\n            Ok(UnbondRedelegationOutcome::DelegationAmountBelowCap)\n        }\n        Err(ApiError::AuctionError(err)) if err == Error::DelegationAmountTooLarge as u8 => {\n            Ok(UnbondRedelegationOutcome::DelegationAmountAboveCap)\n        }\n        Err(ApiError::AuctionError(err)) if err == Error::ValidatorNotFound as u8 => {\n            Ok(UnbondRedelegationOutcome::NonexistantRedelegationTarget)\n        }\n        Err(ApiError::AuctionError(err)) if err == Error::ExceededDelegatorSizeLimit as u8 => {\n            Ok(UnbondRedelegationOutcome::RedelegationTargetHasNoVacancy)\n        }\n        Err(err) => Err(err),\n    }\n}\n\n/// Checks if a reservation for a given delegator exists.\nfn has_reservation<P>(\n    provider: &mut P,\n    delegator_kind: &DelegatorKind,\n    validator: &PublicKey,\n) -> Result<bool, Error>\nwhere\n    P: RuntimeProvider + StorageProvider + ?Sized,\n{\n    let reservation_bid_key = match delegator_kind {\n        DelegatorKind::PublicKey(pk) => BidAddr::ReservedDelegationAccount {\n            validator: validator.to_account_hash(),\n            delegator: pk.to_account_hash(),\n        },\n        DelegatorKind::Purse(addr) => BidAddr::ReservedDelegationPurse {\n            validator: validator.to_account_hash(),\n            delegator: *addr,\n        },\n    }\n    .into();\n    if let Some(BidKind::Reservation(_)) = provider.read_bid(&reservation_bid_key)? {\n        Ok(true)\n    } else {\n        Ok(false)\n    }\n}\n\n/// If specified validator exists, and if validator is not yet at max delegators count, processes\n/// delegation. For a new delegation a delegator bid record will be created to track the delegation,\n/// otherwise the existing tracking record will be updated.\n#[allow(clippy::too_many_arguments)]\npub fn handle_delegation<P>(\n    provider: &mut P,\n    delegator_kind: DelegatorKind,\n    validator_public_key: PublicKey,\n    source: URef,\n    amount: U512,\n    max_delegators_per_validator: u32,\n) -> Result<U512, ApiError>\nwhere\n    P: StorageProvider + MintProvider + RuntimeProvider,\n{\n    if amount.is_zero() {\n        return Err(Error::BondTooSmall.into());\n    }\n\n    let validator_bid_addr = BidAddr::from(validator_public_key.clone());\n    // is there such a validator?\n    let validator_bid = read_validator_bid(provider, &validator_bid_addr.into())?;\n\n    // is there already a record for this delegator?\n    let delegator_bid_key =\n        BidAddr::new_delegator_kind(&validator_public_key, &delegator_kind).into();\n\n    let (target, delegator_bid) = if let Some(BidKind::Delegator(mut delegator_bid)) =\n        provider.read_bid(&delegator_bid_key)?\n    {\n        let current_stake = delegator_bid.staked_amount();\n        let total_stake = amount.saturating_add(current_stake);\n        let validator_max = U512::from(validator_bid.maximum_delegation_amount());\n        if total_stake > validator_max {\n            // Fill up the delegator stake upto the maximum limit and only transfer the difference\n            // required.\n            return Err(Error::DelegationAmountTooLarge.into());\n        };\n        delegator_bid.increase_stake(amount)?;\n        (*delegator_bid.bonding_purse(), delegator_bid)\n    } else {\n        // Early checks for a new delegator entry into a validators set.\n        if amount < U512::from(validator_bid.minimum_delegation_amount()) {\n            return Err(Error::DelegationAmountTooSmall.into());\n        }\n        if amount > U512::from(validator_bid.maximum_delegation_amount()) {\n            return Err(Error::DelegationAmountTooLarge.into());\n        }\n        // is this validator over the delegator limit\n        // or is there a reservation for given delegator public key?\n        let delegator_count = provider.delegator_count(&validator_bid_addr)?;\n        let reserved_slots_count = validator_bid.reserved_slots();\n        let reservation_count = provider.reservation_count(&validator_bid_addr)?;\n        let has_reservation = has_reservation(provider, &delegator_kind, &validator_public_key)?;\n        if delegator_count >= (max_delegators_per_validator - reserved_slots_count) as usize\n            && !has_reservation\n        {\n            warn!(\n                %delegator_count, %max_delegators_per_validator, %reservation_count, %has_reservation,\n                \"delegator_count {}, max_delegators_per_validator {}, reservation_count {}, has_reservation {}\",\n                delegator_count, max_delegators_per_validator, reservation_count, has_reservation\n            );\n            return Err(Error::ExceededDelegatorSizeLimit.into());\n        }\n\n        let bonding_purse = provider.create_purse()?;\n        let delegator_bid =\n            DelegatorBid::unlocked(delegator_kind, amount, bonding_purse, validator_public_key);\n        (bonding_purse, Box::new(delegator_bid))\n    };\n\n    // transfer token to bonding purse\n    provider\n        .mint_transfer_direct(\n            Some(PublicKey::System.to_account_hash()),\n            source,\n            target,\n            amount,\n            None,\n        )\n        .map_err(|_| Error::TransferToDelegatorPurse)?\n        .map_err(|mint_error| {\n            // Propagate mint contract's error that occured during execution of transfer\n            // entrypoint. This will improve UX in case of (for example)\n            // unapproved spending limit error.\n            ApiError::from(mint_error)\n        })?;\n\n    let updated_amount = delegator_bid.staked_amount();\n    provider.write_bid(delegator_bid_key, BidKind::Delegator(delegator_bid))?;\n\n    Ok(updated_amount)\n}\n\n/// If specified validator exists, and if validator is not yet at max reservations count, processes\n/// reservation. For a new reservation a bid record will be created to track the reservation,\n/// otherwise the existing tracking record will be updated.\n#[allow(clippy::too_many_arguments)]\npub fn handle_add_reservation<P>(\n    provider: &mut P,\n    reservation: Reservation,\n    minimum_delegation_rate: u8,\n) -> Result<(), Error>\nwhere\n    P: StorageProvider + MintProvider + RuntimeProvider,\n{\n    let delegation_rate = *reservation.delegation_rate();\n\n    // validate specified delegation rate\n    if reservation.delegation_rate() > &DELEGATION_RATE_DENOMINATOR {\n        return Err(Error::DelegationRateTooLarge);\n    }\n\n    if delegation_rate < minimum_delegation_rate {\n        return Err(Error::DelegationRateTooSmall);\n    }\n\n    // is there such a validator?\n    let validator_bid_addr = BidAddr::from(reservation.validator_public_key().clone());\n    let bid = read_validator_bid(provider, &validator_bid_addr.into())?;\n\n    let reservation_bid_key = match reservation.delegator_kind() {\n        DelegatorKind::PublicKey(pk) => BidAddr::ReservedDelegationAccount {\n            validator: reservation.validator_public_key().to_account_hash(),\n            delegator: pk.to_account_hash(),\n        },\n        DelegatorKind::Purse(addr) => BidAddr::ReservedDelegationPurse {\n            validator: reservation.validator_public_key().to_account_hash(),\n            delegator: *addr,\n        },\n    }\n    .into();\n    if provider.read_bid(&reservation_bid_key)?.is_none() {\n        // ensure reservation list has capacity to create a new reservation\n        let reservation_count = provider.reservation_count(&validator_bid_addr)?;\n        let reserved_slots = bid.reserved_slots() as usize;\n        if reservation_count >= reserved_slots {\n            warn!(\n                %reservation_count, %reserved_slots,\n                \"reservation_count {}, reserved_slots {}\",\n                reservation_count, reserved_slots\n            );\n            return Err(Error::ExceededReservationsLimit);\n        }\n    };\n\n    provider.write_bid(\n        reservation_bid_key,\n        BidKind::Reservation(Box::new(reservation)),\n    )?;\n\n    Ok(())\n}\n\n/// Attempts to remove a reservation if one exists. If not it returns an error.\n///\n/// If there is already a delegator bid associated with a given reservation it validates that\n/// there are free public slots available. If not, it returns an error since the delegator\n/// cannot be \"downgraded\".\npub fn handle_cancel_reservation<P>(\n    provider: &mut P,\n    validator: PublicKey,\n    delegator_kind: DelegatorKind,\n    max_delegators_per_validator: u32,\n) -> Result<(), Error>\nwhere\n    P: StorageProvider + MintProvider + RuntimeProvider,\n{\n    // is there such a validator?\n    let validator_bid_addr = BidAddr::from(validator.clone());\n    let validator_bid = read_validator_bid(provider, &validator_bid_addr.into())?;\n    let validator = validator.to_account_hash();\n\n    // is there a reservation for this delegator?\n    let (reservation_bid_addr, delegator_bid_addr) = match delegator_kind {\n        DelegatorKind::PublicKey(pk) => {\n            let delegator_account_hash = pk.to_account_hash();\n            (\n                BidAddr::ReservedDelegationAccount {\n                    validator,\n                    delegator: delegator_account_hash,\n                },\n                BidAddr::DelegatedAccount {\n                    validator,\n                    delegator: delegator_account_hash,\n                },\n            )\n        }\n        DelegatorKind::Purse(addr) => (\n            BidAddr::ReservedDelegationPurse {\n                validator,\n                delegator: addr,\n            },\n            BidAddr::DelegatedPurse {\n                validator,\n                delegator: addr,\n            },\n        ),\n    };\n\n    if provider.read_bid(&reservation_bid_addr.into())?.is_none() {\n        return Err(Error::ReservationNotFound);\n    }\n\n    // is there such a delegator?\n    if read_delegator_bid(provider, &delegator_bid_addr.into()).is_ok() {\n        // is there a free public slot\n        let reserved_slots = validator_bid.reserved_slots();\n        let delegator_count = provider.delegator_count(&validator_bid_addr)?;\n        let used_reservation_count = provider.used_reservation_count(&validator_bid_addr)?;\n        let normal_delegators = delegator_count.saturating_sub(used_reservation_count);\n        let public_slots = max_delegators_per_validator.saturating_sub(reserved_slots);\n\n        // cannot \"downgrade\" a delegator if there are no free public slots available\n        if public_slots == normal_delegators as u32 {\n            return Err(Error::ExceededDelegatorSizeLimit);\n        }\n    }\n\n    provider.prune_bid(reservation_bid_addr);\n    Ok(())\n}\n\n/// Returns validator bid by key.\npub fn read_validator_bid<P>(provider: &mut P, bid_key: &Key) -> Result<Box<ValidatorBid>, Error>\nwhere\n    P: StorageProvider + ?Sized,\n{\n    if !bid_key.is_bid_addr_key() {\n        return Err(Error::InvalidKeyVariant);\n    }\n    if let Some(BidKind::Validator(validator_bid)) = provider.read_bid(bid_key)? {\n        Ok(validator_bid)\n    } else {\n        Err(Error::ValidatorNotFound)\n    }\n}\n\n/// Returns current `ValidatorBid` in case the public key was changed.\npub fn read_current_validator_bid<P>(\n    provider: &mut P,\n    mut bid_key: Key,\n) -> Result<Box<ValidatorBid>, Error>\nwhere\n    P: StorageProvider + ?Sized,\n{\n    if !bid_key.is_bid_addr_key() {\n        return Err(Error::InvalidKeyVariant);\n    }\n\n    for _ in 0..MAX_BRIDGE_CHAIN_LENGTH {\n        match provider.read_bid(&bid_key)? {\n            Some(BidKind::Validator(validator_bid)) => return Ok(validator_bid),\n            Some(BidKind::Bridge(bridge)) => {\n                debug!(\n                    ?bid_key,\n                    ?bridge,\n                    \"read_current_validator_bid: bridge found\"\n                );\n                let validator_bid_addr = BidAddr::from(bridge.new_validator_public_key().clone());\n                bid_key = validator_bid_addr.into();\n            }\n            _ => break,\n        }\n    }\n    Err(Error::ValidatorNotFound)\n}\n\n/// Returns all delegator bids for imputed validator.\npub fn read_delegator_bids<P>(\n    provider: &mut P,\n    validator_public_key: &PublicKey,\n) -> Result<Vec<DelegatorBid>, Error>\nwhere\n    P: RuntimeProvider + StorageProvider + ?Sized,\n{\n    let mut ret = vec![];\n    let bid_addr = BidAddr::from(validator_public_key.clone());\n    let mut delegator_bid_keys = provider.get_keys_by_prefix(\n        &bid_addr\n            .delegated_account_prefix()\n            .map_err(|_| Error::Serialization)?,\n    )?;\n    delegator_bid_keys.extend(\n        provider.get_keys_by_prefix(\n            &bid_addr\n                .delegated_purse_prefix()\n                .map_err(|_| Error::Serialization)?,\n        )?,\n    );\n    for delegator_bid_key in delegator_bid_keys {\n        let delegator_bid = read_delegator_bid(provider, &delegator_bid_key)?;\n        ret.push(*delegator_bid);\n    }\n\n    Ok(ret)\n}\n\n/// Returns delegator bid by key.\npub fn read_delegator_bid<P>(provider: &mut P, bid_key: &Key) -> Result<Box<DelegatorBid>, Error>\nwhere\n    P: RuntimeProvider + ?Sized + StorageProvider,\n{\n    if !bid_key.is_bid_addr_key() {\n        return Err(Error::InvalidKeyVariant);\n    }\n    if let Some(BidKind::Delegator(delegator_bid)) = provider.read_bid(bid_key)? {\n        Ok(delegator_bid)\n    } else {\n        Err(Error::DelegatorNotFound)\n    }\n}\n\n/// Returns all delegator slot reservations for given validator.\npub fn read_reservation_bids<P>(\n    provider: &mut P,\n    validator_public_key: &PublicKey,\n) -> Result<Vec<Reservation>, Error>\nwhere\n    P: RuntimeProvider + StorageProvider + ?Sized,\n{\n    let mut ret = vec![];\n    let bid_addr = BidAddr::from(validator_public_key.clone());\n    let mut reservation_bid_keys = provider.get_keys_by_prefix(\n        &bid_addr\n            .reserved_account_prefix()\n            .map_err(|_| Error::Serialization)?,\n    )?;\n    reservation_bid_keys.extend(\n        provider.get_keys_by_prefix(\n            &bid_addr\n                .reserved_purse_prefix()\n                .map_err(|_| Error::Serialization)?,\n        )?,\n    );\n    for reservation_bid_key in reservation_bid_keys {\n        let reservation_bid = read_reservation_bid(provider, &reservation_bid_key)?;\n        ret.push(*reservation_bid);\n    }\n\n    Ok(ret)\n}\n\n/// Returns delegator slot reservation bid by key.\npub fn read_reservation_bid<P>(provider: &mut P, bid_key: &Key) -> Result<Box<Reservation>, Error>\nwhere\n    P: RuntimeProvider + ?Sized + StorageProvider,\n{\n    if !bid_key.is_bid_addr_key() {\n        return Err(Error::InvalidKeyVariant);\n    }\n    if let Some(BidKind::Reservation(reservation_bid)) = provider.read_bid(bid_key)? {\n        Ok(reservation_bid)\n    } else {\n        Err(Error::ReservationNotFound)\n    }\n}\n\n/// Applies seigniorage recipient changes.\npub fn seigniorage_recipients(\n    validator_weights: &ValidatorWeights,\n    validator_bids: &ValidatorBids,\n    delegator_bids: &DelegatorBids,\n    reservations: &Reservations,\n) -> Result<SeigniorageRecipientsV2, Error> {\n    let mut recipients = SeigniorageRecipientsV2::new();\n    for (validator_public_key, validator_total_weight) in validator_weights {\n        // check if validator bid exists before processing.\n        let validator_bid = validator_bids\n            .get(validator_public_key)\n            .ok_or(Error::ValidatorNotFound)?;\n        // calculate delegator portion(s), if any\n        let mut delegators_weight = U512::zero();\n        let mut delegators_stake = BTreeMap::new();\n        if let Some(delegators) = delegator_bids.get(validator_public_key) {\n            for delegator_bid in delegators {\n                if delegator_bid.staked_amount().is_zero() {\n                    continue;\n                }\n                let delegator_staked_amount = delegator_bid.staked_amount();\n                delegators_weight = delegators_weight.saturating_add(delegator_staked_amount);\n                let delegator_kind = delegator_bid.delegator_kind();\n                delegators_stake.insert(delegator_kind.clone(), delegator_staked_amount);\n            }\n        }\n\n        let mut reservation_delegation_rates = BTreeMap::new();\n        if let Some(reservations) = reservations.get(validator_public_key) {\n            for reservation in reservations {\n                reservation_delegation_rates.insert(\n                    reservation.delegator_kind().clone(),\n                    *reservation.delegation_rate(),\n                );\n            }\n        }\n\n        // determine validator's personal stake (total weight - sum of delegators weight)\n        let validator_stake = validator_total_weight.saturating_sub(delegators_weight);\n        let seigniorage_recipient = SeigniorageRecipientV2::new(\n            validator_stake,\n            *validator_bid.delegation_rate(),\n            delegators_stake,\n            reservation_delegation_rates,\n        );\n        recipients.insert(validator_public_key.clone(), seigniorage_recipient);\n    }\n    Ok(recipients)\n}\n\n/// Returns the era validators from a snapshot.\n///\n/// This is `pub` as it is used not just in the relevant auction entry point, but also by the\n/// engine state while directly querying for the era validators.\npub fn era_validators_from_snapshot(snapshot: SeigniorageRecipientsSnapshotV2) -> EraValidators {\n    snapshot\n        .into_iter()\n        .map(|(era_id, recipients)| {\n            let validator_weights = recipients\n                .into_iter()\n                .filter_map(|(public_key, bid)| bid.total_stake().map(|stake| (public_key, stake)))\n                .collect::<ValidatorWeights>();\n            (era_id, validator_weights)\n        })\n        .collect()\n}\n\n/// Returns the era validators from a legacy snapshot.\npub(crate) fn era_validators_from_legacy_snapshot(\n    snapshot: SeigniorageRecipientsSnapshotV1,\n) -> EraValidators {\n    snapshot\n        .into_iter()\n        .map(|(era_id, recipients)| {\n            let validator_weights = recipients\n                .into_iter()\n                .filter_map(|(public_key, bid)| bid.total_stake().map(|stake| (public_key, stake)))\n                .collect::<ValidatorWeights>();\n            (era_id, validator_weights)\n        })\n        .collect()\n}\n\n/// Initializes the vesting schedule of provided bid if the provided timestamp is greater than\n/// or equal to the bid's initial release timestamp and the bid is owned by a genesis\n/// validator.\n///\n/// Returns `true` if the provided bid's vesting schedule was initialized.\npub fn process_with_vesting_schedule<P>(\n    provider: &mut P,\n    validator_bid: &mut ValidatorBid,\n    timestamp_millis: u64,\n    vesting_schedule_period_millis: u64,\n) -> Result<bool, Error>\nwhere\n    P: StorageProvider + RuntimeProvider + ?Sized,\n{\n    let validator_public_key = validator_bid.validator_public_key().clone();\n\n    let delegator_bids = read_delegator_bids(provider, &validator_public_key)?;\n    for mut delegator_bid in delegator_bids {\n        let delegator_staked_amount = delegator_bid.staked_amount();\n        let delegator_vesting_schedule = match delegator_bid.vesting_schedule_mut() {\n            Some(vesting_schedule) => vesting_schedule,\n            None => continue,\n        };\n        if timestamp_millis < delegator_vesting_schedule.initial_release_timestamp_millis() {\n            continue;\n        }\n        if delegator_vesting_schedule\n            .initialize_with_schedule(delegator_staked_amount, vesting_schedule_period_millis)\n        {\n            let delegator_bid_key = delegator_bid.bid_addr().into();\n            provider.write_bid(\n                delegator_bid_key,\n                BidKind::Delegator(Box::new(delegator_bid)),\n            )?;\n        }\n    }\n\n    let validator_staked_amount = validator_bid.staked_amount();\n    let validator_vesting_schedule = match validator_bid.vesting_schedule_mut() {\n        Some(vesting_schedule) => vesting_schedule,\n        None => return Ok(false),\n    };\n    if timestamp_millis < validator_vesting_schedule.initial_release_timestamp_millis() {\n        Ok(false)\n    } else {\n        Ok(validator_vesting_schedule\n            .initialize_with_schedule(validator_staked_amount, vesting_schedule_period_millis))\n    }\n}\n\n/// Returns all delegators for imputed validator.\npub fn delegators<P>(\n    provider: &mut P,\n    validator_public_key: &PublicKey,\n) -> Result<Vec<Box<DelegatorBid>>, Error>\nwhere\n    P: RuntimeProvider + ?Sized + StorageProvider,\n{\n    let mut ret = vec![];\n    let bid_addr = BidAddr::from(validator_public_key.clone());\n    let mut delegator_bid_keys = provider.get_keys_by_prefix(\n        &bid_addr\n            .delegated_account_prefix()\n            .map_err(|_| Error::Serialization)?,\n    )?;\n    delegator_bid_keys.extend(\n        provider.get_keys_by_prefix(\n            &bid_addr\n                .delegated_purse_prefix()\n                .map_err(|_| Error::Serialization)?,\n        )?,\n    );\n\n    for delegator_bid_key in delegator_bid_keys {\n        let delegator = read_delegator_bid(provider, &delegator_bid_key)?;\n        ret.push(delegator);\n    }\n\n    Ok(ret)\n}\n\n/// Returns all delegator slot reservations for given validator.\npub fn reservations<P>(\n    provider: &mut P,\n    validator_public_key: &PublicKey,\n) -> Result<Vec<Box<Reservation>>, Error>\nwhere\n    P: RuntimeProvider + ?Sized + StorageProvider,\n{\n    let mut ret = vec![];\n    let bid_addr = BidAddr::from(validator_public_key.clone());\n    let mut reservation_bid_keys = provider.get_keys_by_prefix(\n        &bid_addr\n            .reserved_account_prefix()\n            .map_err(|_| Error::Serialization)?,\n    )?;\n    reservation_bid_keys.extend(\n        provider.get_keys_by_prefix(\n            &bid_addr\n                .reserved_purse_prefix()\n                .map_err(|_| Error::Serialization)?,\n        )?,\n    );\n\n    for reservation_bid_key in reservation_bid_keys {\n        let reservation = read_reservation_bid(provider, &reservation_bid_key)?;\n        ret.push(reservation);\n    }\n\n    Ok(ret)\n}\n\n/// Handles forced unbonding of delegators when a validator raises the min or lowers the max amount\n/// they allow delegators to stake with them.\npub fn process_updated_delegator_stake_boundaries<P: Auction>(\n    provider: &mut P,\n    validator_bid: &mut ValidatorBid,\n    minimum_delegation_amount: u64,\n    maximum_delegation_amount: u64,\n) -> Result<(), Error> {\n    let era_end_timestamp_millis = get_era_end_timestamp_millis(provider)?;\n    if validator_bid.is_locked(era_end_timestamp_millis) {\n        // cannot increase the min or decrease the max while vesting is locked\n        // as this could result in vested delegators being forcibly unbonded, thus\n        // prematurely allowing liquidity on a network still in its vesting period.\n        return Err(Error::VestingLockout);\n    }\n\n    let previous_minimum = validator_bid.minimum_delegation_amount();\n    let previous_maximum = validator_bid.maximum_delegation_amount();\n\n    // set updated delegation amount range\n    validator_bid\n        .set_delegation_amount_boundaries(minimum_delegation_amount, maximum_delegation_amount);\n\n    // check modified delegation bookends\n    let raised_min = previous_minimum < minimum_delegation_amount;\n    let lowered_max = previous_maximum > maximum_delegation_amount;\n    if !raised_min && !lowered_max {\n        return Ok(());\n    }\n\n    let validator_public_key = validator_bid.validator_public_key();\n    let min_delegation = minimum_delegation_amount.into();\n    let max_delegation = maximum_delegation_amount.into();\n    let delegators = read_delegator_bids(provider, validator_public_key)?;\n    for mut delegator in delegators {\n        let delegator_staked_amount = delegator.staked_amount();\n        let unbond_amount = if delegator_staked_amount < min_delegation {\n            // fully unbond the staked amount as it is below the min\n            delegator_staked_amount\n        } else if delegator_staked_amount > max_delegation {\n            // partially unbond the staked amount to not exceed the max\n            delegator_staked_amount.saturating_sub(max_delegation)\n        } else {\n            // nothing to unbond\n            U512::zero()\n        };\n        // skip delegators within the range\n        if unbond_amount.is_zero() {\n            continue;\n        }\n\n        let unbond_kind = delegator.unbond_kind();\n        create_unbonding_purse(\n            provider,\n            validator_public_key.clone(),\n            unbond_kind,\n            *delegator.bonding_purse(),\n            unbond_amount,\n            None,\n        )?;\n\n        let updated_stake = match delegator.decrease_stake(unbond_amount, era_end_timestamp_millis)\n        {\n            Ok(updated_stake) => updated_stake,\n            // Work around the case when the locked amounts table has yet to be\n            // initialized (likely pre-90 day mark).\n            Err(Error::DelegatorFundsLocked) => continue,\n            Err(err) => return Err(err),\n        };\n\n        let delegator_bid_addr = delegator.bid_addr();\n        if updated_stake.is_zero() {\n            debug!(\"pruning delegator bid {delegator_bid_addr}\");\n            provider.prune_bid(delegator_bid_addr);\n        } else {\n            debug!(\n                \"forced undelegation for {delegator_bid_addr} reducing {delegator_staked_amount} by {unbond_amount} to {updated_stake}\",\n            );\n            provider.write_bid(\n                delegator_bid_addr.into(),\n                BidKind::Delegator(Box::new(delegator)),\n            )?;\n        }\n    }\n    Ok(())\n}\n\n/// Handles an attempt by a validator to lower the number of delegator reserve slots\n/// they allow. An attempt to lower the number below the current count of occupied reservations\n/// will fail. An attempt to increase the number above the global allowed maximum of a given\n/// network will also fail.\npub fn process_updated_delegator_reservation_slots<P: Auction>(\n    provider: &mut P,\n    validator_bid: &mut ValidatorBid,\n    max_delegators_per_validator: u32,\n    reserved_slots: u32,\n) -> Result<(), Error> {\n    if reserved_slots == validator_bid.reserved_slots() {\n        return Ok(());\n    }\n\n    let validator_public_key = validator_bid.validator_public_key();\n\n    let validator_bid_addr = BidAddr::from(validator_public_key.clone());\n    // cannot reserve fewer slots than there are reservations\n    let reservation_count = provider.reservation_count(&validator_bid_addr)?;\n    if reserved_slots < reservation_count as u32 {\n        return Err(Error::ReservationSlotsCountTooSmall);\n    }\n\n    // cannot reserve more slots than there are free delegator slots\n    let max_reserved_slots = {\n        let used_reservation_count = provider.used_reservation_count(&validator_bid_addr)?;\n        let delegator_count = provider.delegator_count(&validator_bid_addr)?;\n        let normal_delegators = delegator_count.saturating_sub(used_reservation_count) as u32;\n        max_delegators_per_validator.saturating_sub(normal_delegators)\n    };\n    if reserved_slots > max_reserved_slots {\n        return Err(Error::ExceededReservationSlotsLimit);\n    }\n    validator_bid.with_reserved_slots(reserved_slots);\n    Ok(())\n}\n\n/// Processes undelegation with optional redelegation target.\npub fn process_undelegation<P: Auction>(\n    provider: &mut P,\n    delegator_kind: DelegatorKind,\n    validator_public_key: PublicKey,\n    amount: U512,\n    new_validator: Option<PublicKey>,\n) -> Result<U512, Error> {\n    match &delegator_kind {\n        DelegatorKind::PublicKey(pk) => {\n            let account_hash = pk.to_account_hash();\n            if !provider.is_allowed_session_caller(&account_hash) {\n                return Err(Error::InvalidContext);\n            }\n        }\n        DelegatorKind::Purse(addr) => {\n            let uref = URef::new(*addr, AccessRights::WRITE);\n            if !provider.is_valid_uref(uref) {\n                return Err(Error::InvalidContext);\n            }\n        }\n    }\n\n    let new_validator_public_key = {\n        // check redelegation target for existence\n        if let Some(new_validator_public_key) = new_validator {\n            let new_validator_bid_key = BidAddr::from(new_validator_public_key.clone()).into();\n            match read_validator_bid(provider, &new_validator_bid_key) {\n                Err(Error::ValidatorNotFound) => return Err(Error::RedelegationValidatorNotFound),\n                Err(err) => return Err(err),\n                Ok(_) => Some(new_validator_public_key),\n            }\n        } else {\n            None\n        }\n    };\n\n    let validator_bid_key = BidAddr::from(validator_public_key.clone()).into();\n    let validator_bid = read_validator_bid(provider, &validator_bid_key)?;\n\n    let delegator_bid_addr = BidAddr::new_delegator_kind(&validator_public_key, &delegator_kind);\n    let mut delegator_bid = read_delegator_bid(provider, &delegator_bid_addr.into())?;\n\n    let bonding_purse = *delegator_bid.bonding_purse();\n    let initial_staked_amount = delegator_bid.staked_amount();\n    let (unbonding_amount, updated_stake) = {\n        let era_end_timestamp_millis = get_era_end_timestamp_millis(provider)?;\n\n        // cannot unbond more than you have\n        let unbonding_amount = U512::min(amount, initial_staked_amount);\n        let rem = delegator_bid.decrease_stake(unbonding_amount, era_end_timestamp_millis)?;\n        if rem < validator_bid.minimum_delegation_amount().into() {\n            // if the remaining stake is less than the validator's min delegation amount\n            // unbond all the delegator's stake\n            let zeroed = delegator_bid.decrease_stake(rem, era_end_timestamp_millis)?;\n            (initial_staked_amount, zeroed)\n        } else {\n            (unbonding_amount, rem)\n        }\n    };\n\n    if updated_stake.is_zero() {\n        debug!(\"pruning delegator bid {}\", delegator_bid_addr);\n        provider.prune_bid(delegator_bid_addr);\n    } else {\n        provider.write_bid(delegator_bid_addr.into(), BidKind::Delegator(delegator_bid))?;\n    }\n\n    if !unbonding_amount.is_zero() {\n        let unbond_kind = delegator_kind.into();\n\n        create_unbonding_purse(\n            provider,\n            validator_public_key,\n            unbond_kind,\n            bonding_purse,\n            unbonding_amount,\n            new_validator_public_key,\n        )?;\n\n        debug!(\n            \"undelegation for {delegator_bid_addr} reducing {initial_staked_amount} by {unbonding_amount} to {updated_stake}\"\n        );\n    }\n\n    Ok(updated_stake)\n}\n\n/// Retrieves the total reward for a given validator or delegator in a given era.\npub fn reward(\n    validator: &PublicKey,\n    delegator: Option<&DelegatorKind>,\n    era_id: EraId,\n    rewards: &[U512],\n    seigniorage_recipients_snapshot: &SeigniorageRecipientsSnapshot,\n    rewards_ratio: Ratio<u64>,\n) -> Result<Option<U512>, Error> {\n    let rewards_ratio_as_u512 = Ratio::new(\n        U512::from(*rewards_ratio.numer()),\n        U512::from(*rewards_ratio.denom()),\n    );\n    let validator_rewards = match rewards_per_validator(\n        validator,\n        era_id,\n        rewards,\n        seigniorage_recipients_snapshot,\n        rewards_ratio_as_u512,\n    ) {\n        Ok(rewards) => rewards,\n        Err(Error::ValidatorNotFound) => return Ok(None),\n        Err(Error::MissingSeigniorageRecipients) => return Ok(None),\n        Err(err) => return Err(err),\n    };\n\n    let reward = validator_rewards\n        .into_iter()\n        .map(|reward_info| {\n            if let Some(delegator) = delegator {\n                reward_info\n                    .delegator_rewards\n                    .get(delegator)\n                    .copied()\n                    .unwrap_or_default()\n            } else {\n                reward_info.validator_reward\n            }\n        })\n        .sum();\n\n    Ok(Some(reward))\n}\n\n/// Calculates the reward for a given validator for a given era.\npub(crate) fn rewards_per_validator(\n    validator: &PublicKey,\n    era_id: EraId,\n    rewards: &[U512],\n    seigniorage_recipients_snapshot: &SeigniorageRecipientsSnapshot,\n    rewards_ratio: Ratio<U512>,\n) -> Result<Vec<RewardsPerValidator>, Error> {\n    let mut results = Vec::with_capacity(rewards.len());\n\n    for (reward_amount, eras_back) in rewards\n        .iter()\n        .enumerate()\n        .map(move |(i, &amount)| (amount, i as u64))\n        // do not process zero amounts, unless they are for the current era (we still want to\n        // record zero allocations for the current validators in EraInfo)\n        .filter(|(amount, eras_back)| !amount.is_zero() || *eras_back == 0)\n    {\n        let factor = { Ratio::new(U512::from(100), U512::from(100)) - rewards_ratio };\n        let total_reward = Ratio::from(reward_amount) * factor;\n        let rewarded_era = era_id\n            .checked_sub(eras_back)\n            .ok_or(Error::MissingSeigniorageRecipients)?;\n\n        // try to find validator in seigniorage snapshot\n        let maybe_seigniorage_recipient = match seigniorage_recipients_snapshot {\n            SeigniorageRecipientsSnapshot::V1(snapshot) => snapshot\n                .get(&rewarded_era)\n                .ok_or(Error::MissingSeigniorageRecipients)?\n                .get(validator)\n                .cloned()\n                .map(SeigniorageRecipient::V1),\n            SeigniorageRecipientsSnapshot::V2(snapshot) => snapshot\n                .get(&rewarded_era)\n                .ok_or(Error::MissingSeigniorageRecipients)?\n                .get(validator)\n                .cloned()\n                .map(SeigniorageRecipient::V2),\n        };\n\n        let Some(recipient) = maybe_seigniorage_recipient else {\n            // We couldn't find the validator. If the reward amount is zero, we don't care -\n            // the validator wasn't supposed to be rewarded in this era, anyway. Otherwise,\n            // return an error.\n            if reward_amount.is_zero() {\n                continue;\n            } else {\n                return Err(Error::ValidatorNotFound);\n            }\n        };\n\n        let total_stake = recipient.total_stake().ok_or(Error::ArithmeticOverflow)?;\n\n        if total_stake.is_zero() {\n            // The validator has completely unbonded. We can't compute the delegators' part (as\n            // their stakes are also zero), so we just give the whole reward to the validator.\n            // When used from `distribute`, we will mint the reward into their bonding purse\n            // and increase their unbond request by the corresponding amount.\n\n            results.push(RewardsPerValidator {\n                validator_reward: total_reward.to_integer(),\n                delegator_rewards: BTreeMap::new(),\n            });\n            continue;\n        }\n\n        let delegator_total_stake: U512 = recipient\n            .delegator_total_stake()\n            .ok_or(Error::ArithmeticOverflow)?;\n\n        // calculate part of reward to be distributed to delegators before commission\n        let base_delegators_part: Ratio<U512> = {\n            let reward_multiplier: Ratio<U512> = Ratio::new(delegator_total_stake, total_stake);\n            total_reward\n                .checked_mul(&reward_multiplier)\n                .ok_or(Error::ArithmeticOverflow)?\n        };\n\n        let default = BTreeMap::new();\n        let reservation_delegation_rates =\n            recipient.reservation_delegation_rates().unwrap_or(&default);\n        // calculate commission and final reward for each delegator\n        let mut delegator_rewards: BTreeMap<DelegatorKind, U512> = BTreeMap::new();\n        for (delegator_kind, delegator_stake) in recipient.delegator_stake().iter() {\n            let reward_multiplier = Ratio::new(*delegator_stake, delegator_total_stake);\n            let base_reward = base_delegators_part * reward_multiplier;\n            let delegation_rate = *reservation_delegation_rates\n                .get(delegator_kind)\n                .unwrap_or(recipient.delegation_rate());\n            let commission_rate = Ratio::new(\n                U512::from(delegation_rate),\n                U512::from(DELEGATION_RATE_DENOMINATOR),\n            );\n            let commission: Ratio<U512> = base_reward\n                .checked_mul(&commission_rate)\n                .ok_or(Error::ArithmeticOverflow)?;\n            let reward = base_reward\n                .checked_sub(&commission)\n                .ok_or(Error::ArithmeticOverflow)?;\n            delegator_rewards.insert(delegator_kind.clone(), reward.to_integer());\n        }\n\n        let total_delegator_payout: U512 =\n            delegator_rewards.iter().map(|(_, &amount)| amount).sum();\n\n        let validator_reward = { total_reward - Ratio::from(total_delegator_payout) }.to_integer();\n\n        results.push(RewardsPerValidator {\n            validator_reward,\n            delegator_rewards,\n        });\n    }\n    Ok(results)\n}\n\n/// Aggregated rewards data for a validator.\n#[derive(Debug, Default)]\npub struct RewardsPerValidator {\n    validator_reward: U512,\n    delegator_rewards: BTreeMap<DelegatorKind, U512>,\n}\n\nimpl RewardsPerValidator {\n    /// The validator reward amount.\n    pub fn validator_reward(&self) -> U512 {\n        self.validator_reward\n    }\n\n    /// The rewards for this validator's delegators.\n    pub fn delegator_rewards(&self) -> &BTreeMap<DelegatorKind, U512> {\n        &self.delegator_rewards\n    }\n\n    /// The rewards for this validator's delegators.\n    pub fn take_delegator_rewards(self) -> BTreeMap<DelegatorKind, U512> {\n        self.delegator_rewards\n    }\n}\n"
  },
  {
    "path": "storage/src/system/auction/providers.rs",
    "content": "use std::collections::BTreeSet;\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{FromBytes, ToBytes},\n    system::{\n        auction::{BidAddr, BidKind, EraInfo, Error, Unbond, UnbondEra, UnbondKind},\n        mint,\n    },\n    CLTyped, Key, KeyTag, URef, U512,\n};\n\n/// Provider of runtime host functionality.\npub trait RuntimeProvider {\n    /// This method should return the caller of the current context.\n    fn get_caller(&self) -> AccountHash;\n\n    /// Checks if account_hash matches the active session's account.\n    fn is_allowed_session_caller(&self, account_hash: &AccountHash) -> bool;\n\n    /// Checks if uref is in access rights.\n    fn is_valid_uref(&self, uref: URef) -> bool;\n\n    /// Gets named key under a `name`.\n    fn named_keys_get(&self, name: &str) -> Option<Key>;\n\n    /// Gets keys in a given keyspace\n    fn get_keys(&mut self, key_tag: &KeyTag) -> Result<BTreeSet<Key>, Error>;\n\n    /// Gets keys by prefix.\n    fn get_keys_by_prefix(&mut self, prefix: &[u8]) -> Result<Vec<Key>, Error>;\n\n    /// Returns the current number of delegators for this validator.\n    fn delegator_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error>;\n\n    /// Returns number of reservations for this validator.\n    fn reservation_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error>;\n\n    /// Returns number of reservations for which a delegator bid exists.\n    fn used_reservation_count(&mut self, bid_addr: &BidAddr) -> Result<usize, Error>;\n\n    /// Returns vesting schedule period.\n    fn vesting_schedule_period_millis(&self) -> u64;\n\n    /// Check if auction bids are allowed.\n    fn allow_auction_bids(&self) -> bool;\n\n    /// Check if auction should compute rewards.\n    fn should_compute_rewards(&self) -> bool;\n}\n\n/// Provides functionality of a contract storage.\npub trait StorageProvider {\n    /// Reads data from [`URef`].\n    fn read<T: FromBytes + CLTyped>(&mut self, uref: URef) -> Result<Option<T>, Error>;\n\n    /// Writes data to [`URef].\n    fn write<T: ToBytes + CLTyped>(&mut self, uref: URef, value: T) -> Result<(), Error>;\n\n    /// Reads [`casper_types::system::auction::Bid`] at account hash derived from given public key\n    fn read_bid(&mut self, key: &Key) -> Result<Option<BidKind>, Error>;\n\n    /// Writes given [`BidKind`] at given key.\n    fn write_bid(&mut self, key: Key, bid_kind: BidKind) -> Result<(), Error>;\n\n    /// Reads [`Unbond`]s at bid address.\n    fn read_unbond(&mut self, bid_addr: BidAddr) -> Result<Option<Unbond>, Error>;\n\n    /// Writes given [`Unbond`] if some, else prunes if none at bid address.\n    fn write_unbond(&mut self, bid_addr: BidAddr, unbond: Option<Unbond>) -> Result<(), Error>;\n\n    /// Records era info.\n    fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), Error>;\n\n    /// Prunes a given bid at [`BidAddr`].\n    fn prune_bid(&mut self, bid_addr: BidAddr);\n}\n\n/// Provides an access to mint.\npub trait MintProvider {\n    /// Returns successfully unbonded stake to origin account.\n    fn unbond(&mut self, unbond_kind: &UnbondKind, unbond_era: &UnbondEra) -> Result<(), Error>;\n\n    /// Allows optimized auction and mint interaction.\n    /// Intended to be used only by system contracts to manage staked purses.\n    fn mint_transfer_direct(\n        &mut self,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<Result<(), mint::Error>, Error>;\n\n    /// Mint `amount` new token into `existing_purse`.\n    /// Returns unit on success, otherwise an error.\n    fn mint_into_existing_purse(&mut self, amount: U512, existing_purse: URef)\n        -> Result<(), Error>;\n\n    /// Creates new purse.\n    fn create_purse(&mut self) -> Result<URef, Error>;\n\n    /// Gets purse balance.\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error>;\n\n    /// Reads the base round reward.\n    fn read_base_round_reward(&mut self) -> Result<U512, Error>;\n\n    /// Mints new token with given `initial_balance` balance. Returns new purse on success,\n    /// otherwise an error.\n    fn mint(&mut self, amount: U512) -> Result<URef, Error>;\n\n    /// Reduce total supply by `amount`. Returns unit on success, otherwise\n    /// an error.\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error>;\n}\n\n/// Provider of an account related functionality.\npub trait AccountProvider {\n    /// Get currently executing account's purse.\n    fn get_main_purse(&self) -> Result<URef, Error>;\n\n    /// Set main purse.\n    fn set_main_purse(&mut self, purse: URef);\n}\n"
  },
  {
    "path": "storage/src/system/auction.rs",
    "content": "mod auction_native;\n/// Auction business logic.\npub mod detail;\n/// System logic providers.\npub mod providers;\n\nuse itertools::Itertools;\nuse num_rational::Ratio;\nuse std::collections::BTreeMap;\nuse tracing::{debug, error, warn};\n\nuse self::providers::{AccountProvider, MintProvider, RuntimeProvider, StorageProvider};\nuse crate::system::auction::detail::{\n    process_undelegation, process_updated_delegator_reservation_slots,\n    process_updated_delegator_stake_boundaries, process_with_vesting_schedule, read_delegator_bids,\n    read_validator_bid, rewards_per_validator, seigniorage_recipients, DistributeTarget,\n};\nuse casper_types::{\n    account::AccountHash,\n    system::auction::{\n        BidAddr, BidKind, Bridge, DelegationRate, DelegatorKind, EraInfo, EraValidators, Error,\n        Reservation, SeigniorageAllocation, SeigniorageRecipientsSnapshot, SeigniorageRecipientsV2,\n        UnbondEra, UnbondKind, ValidatorBid, ValidatorCredit, ValidatorWeights,\n        DELEGATION_RATE_DENOMINATOR,\n    },\n    AccessRights, ApiError, EraId, Key, PublicKey, RewardsHandling, URef, U512,\n};\n\n/// Bonding auction contract interface\npub trait Auction:\n    StorageProvider + RuntimeProvider + MintProvider + AccountProvider + Sized\n{\n    /// Returns active validators and auction winners for a number of future eras determined by the\n    /// configured auction_delay.\n    fn get_era_validators(&mut self) -> Result<EraValidators, Error> {\n        let snapshot = detail::get_seigniorage_recipients_snapshot(self)?;\n        let era_validators = detail::era_validators_from_snapshot(snapshot);\n        Ok(era_validators)\n    }\n\n    /// Returns validators in era_validators, mapped to their bids or founding stakes, delegation\n    /// rates and lists of delegators together with their delegated quantities from delegators.\n    /// This function is publicly accessible, but intended for system use by the Handle Payment\n    /// contract, because this data is necessary for distributing seigniorage.\n    fn read_seigniorage_recipients(&mut self) -> Result<SeigniorageRecipientsV2, Error> {\n        // `era_validators` are assumed to be computed already by calling \"run_auction\" entrypoint.\n        let era_index = detail::get_era_id(self)?;\n        let mut seigniorage_recipients_snapshot =\n            detail::get_seigniorage_recipients_snapshot(self)?;\n        let seigniorage_recipients = seigniorage_recipients_snapshot\n            .remove(&era_index)\n            .ok_or(Error::MissingSeigniorageRecipients)?;\n        Ok(seigniorage_recipients)\n    }\n\n    /// This entry point adds or modifies an entry in the `Key::Bid` section of the global state and\n    /// creates (or tops off) a bid purse. Post genesis, any new call on this entry point causes a\n    /// non-founding validator in the system to exist.\n    ///\n    /// The logic works for both founding and non-founding validators, making it possible to adjust\n    /// their delegation rate and increase their stakes.\n    ///\n    /// A validator with its bid inactive due to slashing can activate its bid again by increasing\n    /// its stake.\n    ///\n    /// Validators cannot create a bid with 0 amount, and the delegation rate can't exceed\n    /// [`DELEGATION_RATE_DENOMINATOR`].\n    ///\n    /// Returns a [`U512`] value indicating total amount of tokens staked for given `public_key`.\n    #[allow(clippy::too_many_arguments)]\n    fn add_bid(\n        &mut self,\n        public_key: PublicKey,\n        delegation_rate: DelegationRate,\n        amount: U512,\n        minimum_delegation_amount: Option<u64>,\n        maximum_delegation_amount: Option<u64>,\n        minimum_bid_amount: u64,\n        max_delegators_per_validator: u32,\n        reserved_slots: u32,\n        global_minimum_delegation_amount: u64,\n        global_maximum_delegation_amount: u64,\n        minimum_delegation_rate: u8,\n    ) -> Result<U512, ApiError> {\n        if !self.allow_auction_bids() {\n            // The validator set may be closed on some side chains,\n            // which is configured by disabling bids.\n            return Err(Error::AuctionBidsDisabled.into());\n        }\n\n        if amount == U512::zero() {\n            return Err(Error::BondTooSmall.into());\n        }\n\n        if delegation_rate < minimum_delegation_rate {\n            return Err(Error::DelegationRateTooSmall.into());\n        }\n\n        if delegation_rate > DELEGATION_RATE_DENOMINATOR {\n            return Err(Error::DelegationRateTooLarge.into());\n        }\n\n        if reserved_slots > max_delegators_per_validator {\n            return Err(Error::ExceededReservationSlotsLimit.into());\n        }\n\n        let provided_account_hash = AccountHash::from(&public_key);\n\n        if !self.is_allowed_session_caller(&provided_account_hash) {\n            return Err(Error::InvalidContext.into());\n        }\n\n        if let Some(minimum_delegation_amount) = minimum_delegation_amount {\n            if minimum_delegation_amount < global_minimum_delegation_amount {\n                return Err(ApiError::InvalidDelegationAmountLimits);\n            }\n        }\n\n        if let Some(maximum_delegation_amount) = maximum_delegation_amount {\n            if maximum_delegation_amount > global_maximum_delegation_amount {\n                return Err(ApiError::InvalidDelegationAmountLimits);\n            }\n        }\n\n        let validator_bid_key = BidAddr::from(public_key.clone()).into();\n        let (target, validator_bid) = if let Some(BidKind::Validator(mut validator_bid)) =\n            self.read_bid(&validator_bid_key)?\n        {\n            let updated_stake = validator_bid.increase_stake(amount)?;\n            if updated_stake < U512::from(minimum_bid_amount) {\n                return Err(Error::BondTooSmall.into());\n            }\n            // idempotent\n            validator_bid.activate();\n\n            let minimum_delegation_amount =\n                minimum_delegation_amount.unwrap_or(validator_bid.minimum_delegation_amount());\n            let maximum_delegation_amount =\n                maximum_delegation_amount.unwrap_or(validator_bid.maximum_delegation_amount());\n\n            if maximum_delegation_amount < minimum_delegation_amount {\n                return Err(ApiError::InvalidDelegationAmountLimits);\n            }\n\n            validator_bid.with_delegation_rate(delegation_rate);\n            process_updated_delegator_stake_boundaries(\n                self,\n                &mut validator_bid,\n                minimum_delegation_amount,\n                maximum_delegation_amount,\n            )?;\n            process_updated_delegator_reservation_slots(\n                self,\n                &mut validator_bid,\n                max_delegators_per_validator,\n                reserved_slots,\n            )?;\n            (*validator_bid.bonding_purse(), validator_bid)\n        } else {\n            if amount < U512::from(minimum_bid_amount) {\n                return Err(Error::BondTooSmall.into());\n            }\n            let minimum_delegation_amount =\n                minimum_delegation_amount.unwrap_or(global_minimum_delegation_amount);\n            let maximum_delegation_amount =\n                maximum_delegation_amount.unwrap_or(global_maximum_delegation_amount);\n\n            if maximum_delegation_amount < minimum_delegation_amount {\n                return Err(ApiError::InvalidDelegationAmountLimits);\n            }\n\n            // create new validator bid\n            let bonding_purse = self.create_purse()?;\n            let validator_bid = ValidatorBid::unlocked(\n                public_key,\n                bonding_purse,\n                amount,\n                delegation_rate,\n                minimum_delegation_amount,\n                maximum_delegation_amount,\n                reserved_slots,\n            );\n            (bonding_purse, Box::new(validator_bid))\n        };\n\n        let source = self.get_main_purse()?;\n        self.mint_transfer_direct(\n            Some(PublicKey::System.to_account_hash()),\n            source,\n            target,\n            amount,\n            None,\n        )\n        .map_err(|_| Error::TransferToBidPurse)?\n        .map_err(|mint_error| {\n            // Propagate mint contract's error that occurred during execution of transfer\n            // entrypoint. This will improve UX in case of (for example)\n            // unapproved spending limit error.\n            ApiError::from(mint_error)\n        })?;\n\n        let updated_amount = validator_bid.staked_amount();\n        self.write_bid(validator_bid_key, BidKind::Validator(validator_bid))?;\n        Ok(updated_amount)\n    }\n\n    /// Unbonds aka reduces stake by specified amount, adding an entry to the unbonding queue.\n    /// For a genesis validator, this is subject to vesting if applicable to a given network.\n    ///\n    /// If this bid stake is reduced to 0, any delegators to this bid will be undelegated, with\n    /// entries made to the unbonding queue for each of them for their full delegated amount.\n    /// Additionally, this bid record will be pruned away from the next calculated root hash.\n    ///\n    /// An attempt to reduce stake by more than is staked will instead 0 the stake.\n    ///\n    /// The function returns the remaining staked amount (we allow partial unbonding).\n    fn withdraw_bid(\n        &mut self,\n        public_key: PublicKey,\n        amount: U512,\n        minimum_bid_amount: u64,\n    ) -> Result<U512, Error> {\n        let provided_account_hash = AccountHash::from(&public_key);\n\n        if !self.is_allowed_session_caller(&provided_account_hash) {\n            return Err(Error::InvalidContext);\n        }\n\n        let validator_bid_addr = BidAddr::from(public_key.clone());\n        let validator_bid_key = validator_bid_addr.into();\n        let mut validator_bid = read_validator_bid(self, &validator_bid_key)?;\n        let staked_amount = validator_bid.staked_amount();\n\n        if amount > staked_amount {\n            // An attempt to unbond more than is staked results in an error.\n            // We've gone back and forth on this behavior.\n            // * In 1.x it was an error.\n            // * In 2.0 it was changed to interpret it as \"up to amount\" and not error, by request.\n            // * In 2.1 it is restored to the original 1.x behavior, also by request.\n            return Err(Error::UnbondTooLarge);\n        }\n\n        let era_end_timestamp_millis = detail::get_era_end_timestamp_millis(self)?;\n        let updated_stake = validator_bid.decrease_stake(amount, era_end_timestamp_millis)?;\n\n        debug!(\n            \"withdrawing bid for {validator_bid_addr} reducing {staked_amount} by {amount} to {updated_stake}\",\n        );\n        // if validator stake is less than minimum_bid_amount, unbond fully and prune validator bid\n        if updated_stake < U512::from(minimum_bid_amount) {\n            // create unbonding purse for full validator stake\n            detail::create_unbonding_purse(\n                self,\n                public_key.clone(),\n                UnbondKind::Validator(public_key.clone()), // validator is the unbonder\n                *validator_bid.bonding_purse(),\n                staked_amount,\n                None,\n            )?;\n            // Unbond all delegators and zero them out\n            let delegators = read_delegator_bids(self, &public_key)?;\n            for mut delegator in delegators {\n                let unbond_kind = delegator.unbond_kind();\n                detail::create_unbonding_purse(\n                    self,\n                    public_key.clone(),\n                    unbond_kind,\n                    *delegator.bonding_purse(),\n                    delegator.staked_amount(),\n                    None,\n                )?;\n                delegator.decrease_stake(delegator.staked_amount(), era_end_timestamp_millis)?;\n\n                let delegator_bid_addr = delegator.bid_addr();\n                debug!(\"pruning delegator bid {}\", delegator_bid_addr);\n                self.prune_bid(delegator_bid_addr)\n            }\n            debug!(\"pruning validator bid {}\", validator_bid_addr);\n            self.prune_bid(validator_bid_addr);\n        } else {\n            // create unbonding purse for the unbonding amount\n            detail::create_unbonding_purse(\n                self,\n                public_key.clone(),\n                UnbondKind::Validator(public_key.clone()), // validator is the unbonder\n                *validator_bid.bonding_purse(),\n                amount,\n                None,\n            )?;\n            self.write_bid(validator_bid_key, BidKind::Validator(validator_bid))?;\n        }\n\n        Ok(updated_stake)\n    }\n\n    /// Adds a new delegator to delegators or increases its current stake. If the target validator\n    /// is missing, the function call returns an error and does nothing.\n    ///\n    /// The function transfers motes from the source purse to the delegator's bonding purse.\n    ///\n    /// This entry point returns the number of tokens currently delegated to a given validator.\n    fn delegate(\n        &mut self,\n        delegator_kind: DelegatorKind,\n        validator_public_key: PublicKey,\n        amount: U512,\n        max_delegators_per_validator: u32,\n    ) -> Result<U512, ApiError> {\n        if !self.allow_auction_bids() {\n            // The auction process can be disabled on a given network.\n            return Err(Error::AuctionBidsDisabled.into());\n        }\n\n        let source = match &delegator_kind {\n            DelegatorKind::PublicKey(pk) => {\n                let account_hash = pk.to_account_hash();\n                if !self.is_allowed_session_caller(&account_hash) {\n                    return Err(Error::InvalidContext.into());\n                }\n                self.get_main_purse()?\n            }\n            DelegatorKind::Purse(addr) => {\n                let uref = URef::new(*addr, AccessRights::WRITE);\n                if !self.is_valid_uref(uref) {\n                    return Err(Error::InvalidContext.into());\n                }\n                uref\n            }\n        };\n\n        detail::handle_delegation(\n            self,\n            delegator_kind,\n            validator_public_key,\n            source,\n            amount,\n            max_delegators_per_validator,\n        )\n    }\n\n    /// Unbonds aka reduces stake by specified amount, adding an entry to the unbonding queue\n    ///\n    /// The arguments are the delegator's key, the validator's key, and the amount.\n    ///\n    /// Returns the remaining staked amount (we allow partial unbonding).\n    fn undelegate(\n        &mut self,\n        delegator_kind: DelegatorKind,\n        validator_public_key: PublicKey,\n        amount: U512,\n    ) -> Result<U512, Error> {\n        let redelegate_target = None;\n        process_undelegation(\n            self,\n            delegator_kind,\n            validator_public_key,\n            amount,\n            redelegate_target,\n        )\n    }\n\n    /// Unbonds aka reduces stake by specified amount, adding an entry to the unbonding queue,\n    /// which when processed will attempt to re-delegate the stake to the specified new validator.\n    /// If this is not possible at that future point in time, the unbonded stake will instead\n    /// downgrade to a standard undelegate operation automatically (the unbonded stake is\n    /// returned to the associated purse).\n    ///\n    /// This is a quality of life / convenience method, allowing a delegator to indicate they\n    /// would like some or all of their stake moved away from a validator to a different validator\n    /// with a single transaction, instead of requiring them to send an unbonding transaction\n    /// to unbond from the first validator and then wait a number of eras equal to the unbonding\n    /// delay and then send a second transaction to bond to the second validator.\n    ///\n    /// The arguments are the delegator's key, the existing validator's key, the amount,\n    /// and the new validator's key.\n    ///\n    /// Returns the remaining staked amount (we allow partial unbonding).\n    fn redelegate(\n        &mut self,\n        delegator_kind: DelegatorKind,\n        validator_public_key: PublicKey,\n        amount: U512,\n        new_validator: PublicKey,\n    ) -> Result<U512, Error> {\n        let redelegate_target = Some(new_validator);\n        process_undelegation(\n            self,\n            delegator_kind,\n            validator_public_key,\n            amount,\n            redelegate_target,\n        )\n    }\n\n    /// Adds new reservations for a given validator with specified delegator public keys\n    /// and delegation rates. If during adding reservations configured number of reserved\n    /// delegator slots is exceeded it returns an error.\n    ///\n    /// If given reservation exists already and the delegation rate was changed it's updated.\n    fn add_reservations(\n        &mut self,\n        reservations: Vec<Reservation>,\n        minimum_delegation_rate: u8,\n    ) -> Result<(), Error> {\n        if !self.allow_auction_bids() {\n            // The auction process can be disabled on a given network.\n            return Err(Error::AuctionBidsDisabled);\n        }\n        for reservation in reservations {\n            if !self\n                .is_allowed_session_caller(&AccountHash::from(reservation.validator_public_key()))\n            {\n                return Err(Error::InvalidContext);\n            }\n\n            detail::handle_add_reservation(self, reservation, minimum_delegation_rate)?;\n        }\n        Ok(())\n    }\n\n    /// Removes reservations for given delegator public keys. If a reservation for one of the keys\n    /// does not exist it returns an error.\n    fn cancel_reservations(\n        &mut self,\n        validator: PublicKey,\n        delegators: Vec<DelegatorKind>,\n        max_delegators_per_validator: u32,\n    ) -> Result<(), Error> {\n        if !self.is_allowed_session_caller(&AccountHash::from(&validator)) {\n            return Err(Error::InvalidContext);\n        }\n\n        for delegator in delegators {\n            detail::handle_cancel_reservation(\n                self,\n                validator.clone(),\n                delegator.clone(),\n                max_delegators_per_validator,\n            )?;\n        }\n        Ok(())\n    }\n\n    /// Slashes each validator.\n    ///\n    /// This can be only invoked through a system call.\n    fn slash(&mut self, validator_public_keys: Vec<PublicKey>) -> Result<(), Error> {\n        fn slash_unbonds(unbond_eras: Vec<UnbondEra>) -> U512 {\n            let mut burned_amount = U512::zero();\n            for unbond_era in unbond_eras {\n                burned_amount += *unbond_era.amount();\n            }\n            burned_amount\n        }\n\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidCaller);\n        }\n\n        let mut burned_amount: U512 = U512::zero();\n\n        for validator_public_key in validator_public_keys {\n            let validator_bid_addr = BidAddr::from(validator_public_key.clone());\n            // Burn stake, deactivate\n            if let Some(BidKind::Validator(validator_bid)) =\n                self.read_bid(&validator_bid_addr.into())?\n            {\n                burned_amount += validator_bid.staked_amount();\n                self.prune_bid(validator_bid_addr);\n\n                // Also slash delegator stakes when deactivating validator bid.\n                let delegator_keys = {\n                    let mut ret =\n                        self.get_keys_by_prefix(&validator_bid_addr.delegated_account_prefix()?)?;\n                    ret.extend(\n                        self.get_keys_by_prefix(&validator_bid_addr.delegated_purse_prefix()?)?,\n                    );\n                    ret\n                };\n\n                for delegator_key in delegator_keys {\n                    if let Some(BidKind::Delegator(delegator_bid)) =\n                        self.read_bid(&delegator_key)?\n                    {\n                        burned_amount += delegator_bid.staked_amount();\n                        let delegator_bid_addr = delegator_bid.bid_addr();\n                        self.prune_bid(delegator_bid_addr);\n\n                        // Also slash delegator unbonds.\n                        let delegator_unbond_addr = match delegator_bid.delegator_kind() {\n                            DelegatorKind::PublicKey(pk) => BidAddr::UnbondAccount {\n                                validator: validator_public_key.to_account_hash(),\n                                unbonder: pk.to_account_hash(),\n                            },\n                            DelegatorKind::Purse(addr) => BidAddr::UnbondPurse {\n                                validator: validator_public_key.to_account_hash(),\n                                unbonder: *addr,\n                            },\n                        };\n\n                        match self.read_unbond(delegator_unbond_addr)? {\n                            Some(unbond) => {\n                                let burned = slash_unbonds(unbond.take_eras());\n\n                                burned_amount += burned;\n                                self.write_unbond(delegator_unbond_addr, None)?;\n                            }\n                            None => {\n                                continue;\n                            }\n                        }\n                    }\n                }\n            }\n\n            // get rid of any staked token in the unbonding queue\n            let validator_unbond_addr = BidAddr::UnbondAccount {\n                validator: validator_public_key.to_account_hash(),\n                unbonder: validator_public_key.to_account_hash(),\n            };\n            match self.read_unbond(validator_unbond_addr)? {\n                Some(unbond) => {\n                    let burned = slash_unbonds(unbond.take_eras());\n                    burned_amount += burned;\n                    self.write_unbond(validator_unbond_addr, None)?;\n                }\n                None => {\n                    continue;\n                }\n            }\n        }\n\n        self.reduce_total_supply(burned_amount)?;\n\n        Ok(())\n    }\n\n    /// Takes active_bids and delegators to construct a list of validators' total bids (their own\n    /// added to their delegators') ordered by size from largest to smallest, then takes the top N\n    /// (number of auction slots) bidders and replaces era_validators with these.\n    ///\n    /// Accessed by: node\n    fn run_auction(\n        &mut self,\n        era_end_timestamp_millis: u64,\n        evicted_validators: Vec<PublicKey>,\n        max_delegators_per_validator: u32,\n        include_credits: bool,\n        credit_cap: Ratio<U512>,\n        minimum_bid_amount: u64,\n    ) -> Result<(), ApiError> {\n        debug!(\"run_auction called\");\n\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidCaller.into());\n        }\n\n        let vesting_schedule_period_millis = self.vesting_schedule_period_millis();\n        let validator_slots = detail::get_validator_slots(self)?;\n        let auction_delay = detail::get_auction_delay(self)?;\n        // We have to store auction_delay future eras, one current era and one past era (for\n        // rewards calculations).\n        let snapshot_size = auction_delay as usize + 2;\n        let mut era_id: EraId = detail::get_era_id(self)?;\n\n        // Process unbond requests\n        debug!(\"processing unbond requests\");\n        detail::process_unbond_requests(self, max_delegators_per_validator)?;\n        debug!(\"processing unbond request successful\");\n\n        let mut validator_bids_detail = detail::get_validator_bids(self, era_id)?;\n\n        // Process bids\n        let mut bids_modified = false;\n        for (validator_public_key, validator_bid) in\n            validator_bids_detail.validator_bids_mut().iter_mut()\n        {\n            if process_with_vesting_schedule(\n                self,\n                validator_bid,\n                era_end_timestamp_millis,\n                self.vesting_schedule_period_millis(),\n            )? {\n                bids_modified = true;\n            }\n\n            if evicted_validators.contains(validator_public_key) {\n                validator_bid.deactivate();\n                bids_modified = true;\n            }\n        }\n\n        let winners = validator_bids_detail.pick_winners(\n            era_id,\n            validator_slots,\n            minimum_bid_amount,\n            include_credits,\n            credit_cap,\n            era_end_timestamp_millis,\n            vesting_schedule_period_millis,\n        )?;\n\n        let (validator_bids, validator_credits, delegator_bids, reservations) =\n            validator_bids_detail.destructure();\n\n        // call prune BEFORE incrementing the era\n        detail::prune_validator_credits(self, era_id, &validator_credits);\n\n        // Increment era\n        era_id = era_id.checked_add(1).ok_or(Error::ArithmeticOverflow)?;\n\n        let delayed_era = era_id\n            .checked_add(auction_delay)\n            .ok_or(Error::ArithmeticOverflow)?;\n\n        // Update seigniorage recipients for current era\n        {\n            let mut snapshot = detail::get_seigniorage_recipients_snapshot(self)?;\n            let recipients =\n                seigniorage_recipients(&winners, &validator_bids, &delegator_bids, &reservations)?;\n            let previous_recipients = snapshot.insert(delayed_era, recipients);\n            assert!(previous_recipients.is_none());\n\n            let snapshot = snapshot.into_iter().rev().take(snapshot_size).collect();\n            detail::set_seigniorage_recipients_snapshot(self, snapshot)?;\n        }\n\n        detail::set_era_id(self, era_id)?;\n        detail::set_era_end_timestamp_millis(self, era_end_timestamp_millis)?;\n\n        if bids_modified {\n            detail::set_validator_bids(self, validator_bids)?;\n        }\n\n        debug!(\"run_auction successful\");\n\n        Ok(())\n    }\n\n    /// Mint and distribute seigniorage rewards to validators and their delegators,\n    /// according to `reward_factors` returned by the consensus component.\n    // TODO: rework EraInfo and other related structs, methods, etc. to report correct era-end\n    // totals of per-block rewards\n    fn distribute(\n        &mut self,\n        rewards: BTreeMap<PublicKey, Vec<U512>>,\n        sustain_purse: Option<URef>,\n        rewards_handling: RewardsHandling,\n    ) -> Result<(), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            error!(\"invalid caller to auction distribute\");\n            return Err(Error::InvalidCaller);\n        }\n\n        let total = {\n            let mut ret = U512::zero();\n            for rewards_vec in rewards.values() {\n                for reward in rewards_vec {\n                    ret += *reward\n                }\n            }\n\n            ret\n        };\n        let total = Ratio::new(total, U512::one());\n        let sustain_ratio = match rewards_handling {\n            RewardsHandling::Standard => Ratio::new(U512::zero(), U512::one()),\n            RewardsHandling::Sustain { ratio, .. } => {\n                let numerator = U512::from(*ratio.numer());\n                let denom = U512::from(*ratio.denom());\n\n                Ratio::new(numerator, denom)\n            }\n        };\n\n        let share = (sustain_ratio * total).to_integer();\n\n        match (rewards_handling, sustain_purse) {\n            (RewardsHandling::Sustain { .. }, Some(sustain_purse)) => {\n                self.mint_into_existing_purse(share, sustain_purse)?;\n            }\n            (RewardsHandling::Sustain { .. }, None) => return Err(Error::MintReward),\n            (RewardsHandling::Standard, _) => {}\n        }\n\n        debug!(\"reading seigniorage recipients snapshot\");\n        let seigniorage_recipients_snapshot = detail::get_seigniorage_recipients_snapshot(self)?;\n        let current_era_id = detail::get_era_id(self)?;\n\n        let mut era_info = EraInfo::new();\n        let seigniorage_allocations = era_info.seigniorage_allocations_mut();\n\n        debug!(rewards_set_size = rewards.len(), \"processing rewards\");\n        for item in rewards\n            .into_iter()\n            .filter(|(key, _amounts)| key != &PublicKey::System)\n            .map(|(proposer, amounts)| {\n                rewards_per_validator(\n                    &proposer,\n                    current_era_id,\n                    &amounts,\n                    &SeigniorageRecipientsSnapshot::V2(seigniorage_recipients_snapshot.clone()),\n                    sustain_ratio,\n                )\n                .map(|infos| infos.into_iter().map(move |info| (proposer.clone(), info)))\n            })\n            .flatten_ok()\n        {\n            let (validator_public_key, reward_info) = item?;\n\n            let validator_bid_addr = BidAddr::Validator(validator_public_key.to_account_hash());\n            let mut maybe_bridged_validator_addrs: Option<Vec<BidAddr>> = None;\n            let validator_reward_amount = reward_info.validator_reward();\n            let (validator_bonding_purse, min_del, max_del) =\n                match detail::get_distribution_target(self, validator_bid_addr) {\n                    Ok(target) => match target {\n                        DistributeTarget::Validator(mut validator_bid) => {\n                            debug!(?validator_public_key, \"validator payout starting \");\n                            let validator_bonding_purse = *validator_bid.bonding_purse();\n                            validator_bid.increase_stake(validator_reward_amount)?;\n\n                            self.write_bid(\n                                validator_bid_addr.into(),\n                                BidKind::Validator(validator_bid.clone()),\n                            )?;\n                            (\n                                validator_bonding_purse,\n                                validator_bid.minimum_delegation_amount().into(),\n                                validator_bid.maximum_delegation_amount().into(),\n                            )\n                        }\n                        DistributeTarget::BridgedValidator {\n                            requested_validator_bid_addr: _requested_validator_bid_addr,\n                            current_validator_bid_addr,\n                            bridged_validator_addrs,\n                            mut validator_bid,\n                        } => {\n                            debug!(?validator_public_key, \"bridged validator payout starting \");\n                            maybe_bridged_validator_addrs = Some(bridged_validator_addrs); // <-- important\n                            let validator_bonding_purse = *validator_bid.bonding_purse();\n                            validator_bid.increase_stake(validator_reward_amount)?;\n\n                            self.write_bid(\n                                current_validator_bid_addr.into(),\n                                BidKind::Validator(validator_bid.clone()),\n                            )?;\n                            (\n                                validator_bonding_purse,\n                                validator_bid.minimum_delegation_amount().into(),\n                                validator_bid.maximum_delegation_amount().into(),\n                            )\n                        }\n                        DistributeTarget::Unbond(unbond) => match unbond.target_unbond_era() {\n                            Some(mut unbond_era) => {\n                                let account_hash = validator_public_key.to_account_hash();\n                                let unbond_addr = BidAddr::UnbondAccount {\n                                    validator: account_hash,\n                                    unbonder: account_hash,\n                                };\n                                let validator_bonding_purse = *unbond_era.bonding_purse();\n                                let new_amount =\n                                    unbond_era.amount().saturating_add(validator_reward_amount);\n                                unbond_era.with_amount(new_amount);\n                                self.write_unbond(unbond_addr, Some(*unbond.clone()))?;\n                                (validator_bonding_purse, U512::MAX, U512::MAX)\n                            }\n                            None => {\n                                warn!(\n                                    ?validator_public_key,\n                                    \"neither validator bid or unbond found\"\n                                );\n                                continue;\n                            }\n                        },\n                        DistributeTarget::Delegator(_) => {\n                            return Err(Error::UnexpectedBidVariant);\n                        }\n                    },\n                    Err(Error::BridgeRecordChainTooLong) => {\n                        warn!(?validator_public_key, \"bridge record chain too long\");\n                        continue;\n                    }\n                    Err(err) => return Err(err),\n                };\n\n            self.mint_into_existing_purse(validator_reward_amount, validator_bonding_purse)?;\n            seigniorage_allocations.push(SeigniorageAllocation::validator(\n                validator_public_key.clone(),\n                validator_reward_amount,\n            ));\n            debug!(?validator_public_key, \"validator payout finished\");\n\n            debug!(?validator_public_key, \"delegator payouts for validator\");\n            let mut undelegates = vec![];\n            let mut prunes = vec![];\n            for (delegator_kind, delegator_reward) in reward_info.take_delegator_rewards() {\n                let mut delegator_bid_addrs = Vec::with_capacity(2);\n                if let Some(bridged_validator_addrs) = &maybe_bridged_validator_addrs {\n                    for bridged_addr in bridged_validator_addrs {\n                        delegator_bid_addrs.push(BidAddr::new_delegator_kind_relaxed(\n                            bridged_addr.validator_account_hash(),\n                            &delegator_kind,\n                        ))\n                    }\n                }\n                delegator_bid_addrs.push(BidAddr::new_delegator_kind_relaxed(\n                    validator_bid_addr.validator_account_hash(),\n                    &delegator_kind,\n                ));\n                let mut maybe_delegator_bonding_purse: Option<URef> = None;\n                for delegator_bid_addr in delegator_bid_addrs {\n                    if delegator_reward.is_zero() {\n                        maybe_delegator_bonding_purse = None;\n                        break; // if there is no reward to give, no need to continue looking\n                    } else {\n                        let delegator_bid_key = delegator_bid_addr.into();\n                        match detail::get_distribution_target(self, delegator_bid_addr) {\n                            Ok(target) => match target {\n                                DistributeTarget::Delegator(mut delegator_bid) => {\n                                    let delegator_bonding_purse = *delegator_bid.bonding_purse();\n                                    let increased_stake =\n                                        delegator_bid.increase_stake(delegator_reward)?;\n                                    if increased_stake < min_del {\n                                        // update the bid initially, but register for unbond and\n                                        // prune\n                                        undelegates.push((\n                                            delegator_kind.clone(),\n                                            validator_public_key.clone(),\n                                            increased_stake,\n                                        ));\n                                        prunes.push(delegator_bid_addr);\n                                    } else if increased_stake > max_del {\n                                        // update the bid initially, but register overage for unbond\n                                        let unbond_amount = increased_stake.saturating_sub(max_del);\n                                        if !unbond_amount.is_zero() {\n                                            undelegates.push((\n                                                delegator_kind.clone(),\n                                                validator_public_key.clone(),\n                                                unbond_amount,\n                                            ));\n                                        }\n                                    }\n                                    self.write_bid(\n                                        delegator_bid_key,\n                                        BidKind::Delegator(delegator_bid),\n                                    )?;\n                                    maybe_delegator_bonding_purse = Some(delegator_bonding_purse);\n                                    break;\n                                }\n                                DistributeTarget::Unbond(mut unbond) => {\n                                    match unbond.target_unbond_era_mut() {\n                                        Some(unbond_era) => {\n                                            let unbond_addr = BidAddr::new_delegator_unbond_relaxed(\n                                                delegator_bid_addr.validator_account_hash(),\n                                                &delegator_kind,\n                                            );\n                                            let delegator_bonding_purse =\n                                                *unbond_era.bonding_purse();\n                                            let new_amount = unbond_era\n                                                .amount()\n                                                .saturating_add(delegator_reward);\n\n                                            unbond_era.with_amount(new_amount);\n                                            self.write_unbond(unbond_addr, Some(*unbond.clone()))?;\n                                            maybe_delegator_bonding_purse =\n                                                Some(delegator_bonding_purse);\n                                            break;\n                                        }\n                                        None => {\n                                            debug!(\n                                                ?delegator_bid_key,\n                                                \"neither delegator bid or unbond found\"\n                                            );\n                                            // keep looking\n                                        }\n                                    }\n                                }\n                                DistributeTarget::Validator(_)\n                                | DistributeTarget::BridgedValidator { .. } => {\n                                    return Err(Error::UnexpectedBidVariant)\n                                }\n                            },\n                            Err(Error::DelegatorNotFound) => {\n                                debug!(\n                                    ?validator_public_key,\n                                    ?delegator_bid_addr,\n                                    \"delegator bid not found\"\n                                );\n                                // keep looking\n                            }\n                            Err(err) => return Err(err),\n                        }\n                    }\n                }\n\n                // we include 0 allocations for explicitness\n                let allocation = SeigniorageAllocation::delegator_kind(\n                    delegator_kind,\n                    validator_public_key.clone(),\n                    delegator_reward,\n                );\n                seigniorage_allocations.push(allocation);\n                if let Some(delegator_bonding_purse) = maybe_delegator_bonding_purse {\n                    self.mint_into_existing_purse(delegator_reward, delegator_bonding_purse)?;\n                }\n            }\n\n            for (kind, pk, unbond_amount) in undelegates {\n                debug!(?kind, ?pk, ?unbond_amount, \"unbonding delegator\");\n                self.undelegate(kind, pk, unbond_amount)?;\n            }\n\n            for bid_addr in prunes {\n                debug!(?bid_addr, \"pruning bid\");\n                self.prune_bid(bid_addr);\n            }\n\n            debug!(\n                ?validator_public_key,\n                delegator_set_size = seigniorage_allocations.len(),\n                \"delegator payout finished\"\n            );\n\n            debug!(\n                ?validator_public_key,\n                \"rewards minted into recipient purses\"\n            );\n        }\n\n        // record allocations for this era for reporting purposes.\n        self.record_era_info(era_info)?;\n\n        Ok(())\n    }\n\n    /// Reads current era id.\n    fn read_era_id(&mut self) -> Result<EraId, Error> {\n        detail::get_era_id(self)\n    }\n\n    /// Activates a given validator's bid.  To be used when a validator has been marked as inactive\n    /// by consensus (aka \"evicted\").\n    fn activate_bid(&mut self, validator: PublicKey, minimum_bid: u64) -> Result<(), Error> {\n        let provided_account_hash = AccountHash::from(&validator);\n\n        if !self.is_allowed_session_caller(&provided_account_hash) {\n            return Err(Error::InvalidContext);\n        }\n\n        let key = BidAddr::from(validator).into();\n        if let Some(BidKind::Validator(mut validator_bid)) = self.read_bid(&key)? {\n            if validator_bid.staked_amount() >= minimum_bid.into() {\n                validator_bid.activate();\n                self.write_bid(key, BidKind::Validator(validator_bid))?;\n                Ok(())\n            } else {\n                Err(Error::BondTooSmall)\n            }\n        } else {\n            Err(Error::ValidatorNotFound)\n        }\n    }\n\n    /// Updates a `ValidatorBid` and all related delegator bids to use a new public key.\n    ///\n    /// This in effect \"transfers\" a validator bid along with its stake and all delegators\n    /// from one public key to another.\n    /// This method can only be called by the account associated with the current `ValidatorBid`.\n    ///\n    /// The arguments are the existing bid's 'validator_public_key' and the new public key.\n    fn change_bid_public_key(\n        &mut self,\n        public_key: PublicKey,\n        new_public_key: PublicKey,\n    ) -> Result<(), Error> {\n        let validator_account_hash = AccountHash::from(&public_key);\n\n        // check that the caller is the current bid's owner\n        if !self.is_allowed_session_caller(&validator_account_hash) {\n            return Err(Error::InvalidContext);\n        }\n\n        // verify that a bid for given public key exists\n        let validator_bid_addr = BidAddr::from(public_key.clone());\n        let mut validator_bid = read_validator_bid(self, &validator_bid_addr.into())?;\n\n        // verify that a bid for the new key does not exist yet\n        let new_validator_bid_addr = BidAddr::from(new_public_key.clone());\n        if self.read_bid(&new_validator_bid_addr.into())?.is_some() {\n            return Err(Error::ValidatorBidExistsAlready);\n        }\n\n        debug!(\"changing validator bid {validator_bid_addr} public key from {public_key} to {new_public_key}\");\n\n        // store new validator bid\n        validator_bid.with_validator_public_key(new_public_key.clone());\n        self.write_bid(\n            new_validator_bid_addr.into(),\n            BidKind::Validator(validator_bid),\n        )?;\n\n        // store bridge record in place of old validator bid\n        let bridge = Bridge::new(\n            public_key.clone(),\n            new_public_key.clone(),\n            self.read_era_id()?,\n        );\n        // write a bridge record under the old account hash, allowing forward pathing\n        // i.e. given an older account hash find the replacement account hash\n        self.write_bid(\n            validator_bid_addr.into(),\n            BidKind::Bridge(Box::new(bridge.clone())),\n        )?;\n        // write a bridge record under the new account hash, allowing reverse pathing\n        // i.e. given a newer account hash find the previous account hash\n        let rev_addr = BidAddr::new_validator_rev_addr_from_public_key(new_public_key.clone());\n        self.write_bid(rev_addr.into(), BidKind::Bridge(Box::new(bridge)))?;\n\n        debug!(\"transferring delegator bids from validator bid {validator_bid_addr} to {new_validator_bid_addr}\");\n        let delegators = read_delegator_bids(self, &public_key)?;\n        for mut delegator in delegators {\n            let delegator_bid_addr =\n                BidAddr::new_delegator_kind(&public_key, delegator.delegator_kind());\n\n            delegator.with_validator_public_key(new_public_key.clone());\n            let new_delegator_bid_addr =\n                BidAddr::new_delegator_kind(&new_public_key, delegator.delegator_kind());\n\n            self.write_bid(\n                new_delegator_bid_addr.into(),\n                BidKind::Delegator(Box::from(delegator)),\n            )?;\n\n            debug!(\"pruning delegator bid {delegator_bid_addr}\");\n            self.prune_bid(delegator_bid_addr);\n        }\n\n        Ok(())\n    }\n\n    /// Writes a validator credit record.\n    fn write_validator_credit(\n        &mut self,\n        validator: PublicKey,\n        era_id: EraId,\n        amount: U512,\n    ) -> Result<Option<BidAddr>, Error> {\n        // only the system may use this method\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            error!(\"invalid caller to auction validator_credit\");\n            return Err(Error::InvalidCaller);\n        }\n\n        // is imputed public key associated with a validator bid record?\n        let bid_addr = BidAddr::new_from_public_keys(&validator, None);\n        let key = Key::BidAddr(bid_addr);\n        let _ = match self.read_bid(&key)? {\n            Some(bid_kind) => bid_kind,\n            None => {\n                warn!(\n                    ?key,\n                    ?era_id,\n                    ?amount,\n                    \"attempt to add a validator credit to a non-existent validator\"\n                );\n                return Ok(None);\n            }\n        };\n\n        // if amount is zero, noop\n        if amount.is_zero() {\n            return Ok(None);\n        }\n\n        // write credit record\n        let credit_addr = BidAddr::new_credit(&validator, era_id);\n        let credit_key = Key::BidAddr(credit_addr);\n        let credit_bid = match self.read_bid(&credit_key)? {\n            Some(BidKind::Credit(mut existing_credit)) => {\n                existing_credit.increase(amount);\n                existing_credit\n            }\n            Some(_) => return Err(Error::UnexpectedBidVariant),\n            None => Box::new(ValidatorCredit::new(validator, era_id, amount)),\n        };\n\n        self.write_bid(credit_key, BidKind::Credit(credit_bid))\n            .map(|_| Some(credit_addr))\n    }\n}\n"
  },
  {
    "path": "storage/src/system/burn.rs",
    "content": "use std::{cell::RefCell, convert::TryFrom, rc::Rc};\nuse thiserror::Error;\n\nuse casper_types::{\n    bytesrepr::FromBytes,\n    system::{mint, mint::Error as MintError},\n    AccessRights, CLType, CLTyped, CLValue, CLValueError, Key, RuntimeArgs, RuntimeFootprint,\n    StoredValue, StoredValueTypeMismatch, URef, U512,\n};\n\nuse crate::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    tracking_copy::{TrackingCopy, TrackingCopyError, TrackingCopyExt},\n};\n\n/// Burn error.\n#[derive(Clone, Error, Debug)]\npub enum BurnError {\n    /// Invalid key variant.\n    #[error(\"Invalid key {0}\")]\n    UnexpectedKeyVariant(Key),\n    /// Type mismatch error.\n    #[error(\"{}\", _0)]\n    TypeMismatch(StoredValueTypeMismatch),\n    /// Forged reference error.\n    #[error(\"Forged reference: {}\", _0)]\n    ForgedReference(URef),\n    /// Invalid access.\n    #[error(\"Invalid access rights: {}\", required)]\n    InvalidAccess {\n        /// Required access rights of the operation.\n        required: AccessRights,\n    },\n    /// Error converting a CLValue.\n    #[error(\"{0}\")]\n    CLValue(CLValueError),\n    /// Invalid purse.\n    #[error(\"Invalid purse\")]\n    InvalidPurse,\n    /// Invalid argument.\n    #[error(\"Invalid argument\")]\n    InvalidArgument,\n    /// Missing argument.\n    #[error(\"Missing argument\")]\n    MissingArgument,\n    /// Invalid purse.\n    #[error(\"Attempt to transfer amount 0\")]\n    AttemptToBurnZero,\n    /// Invalid operation.\n    #[error(\"Invalid operation\")]\n    InvalidOperation,\n    /// Disallowed transfer attempt (private chain).\n    #[error(\"Either the source or the target must be an admin (private chain).\")]\n    RestrictedBurnAttempted,\n    /// Could not determine if target is an admin (private chain).\n    #[error(\"Unable to determine if the target of a transfer is an admin\")]\n    UnableToVerifyTargetIsAdmin,\n    /// Tracking copy error.\n    #[error(\"{0}\")]\n    TrackingCopy(TrackingCopyError),\n    /// Mint error.\n    #[error(\"{0}\")]\n    Mint(MintError),\n}\n\nimpl From<GlobalStateError> for BurnError {\n    fn from(gse: GlobalStateError) -> Self {\n        BurnError::TrackingCopy(TrackingCopyError::Storage(gse))\n    }\n}\n\nimpl From<TrackingCopyError> for BurnError {\n    fn from(tce: TrackingCopyError) -> Self {\n        BurnError::TrackingCopy(tce)\n    }\n}\n\n/// Mint's burn arguments.\n///\n/// A struct has a benefit of static typing, which is helpful while resolving the arguments.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct BurnArgs {\n    source: URef,\n    amount: U512,\n}\n\nimpl BurnArgs {\n    /// Creates new transfer arguments.\n    pub fn new(source: URef, amount: U512) -> Self {\n        Self { source, amount }\n    }\n\n    /// Returns `source` field.\n    pub fn source(&self) -> URef {\n        self.source\n    }\n\n    /// Returns `amount` field.\n    pub fn amount(&self) -> U512 {\n        self.amount\n    }\n}\n\nimpl TryFrom<BurnArgs> for RuntimeArgs {\n    type Error = CLValueError;\n\n    fn try_from(burn_args: BurnArgs) -> Result<Self, Self::Error> {\n        let mut runtime_args = RuntimeArgs::new();\n\n        runtime_args.insert(mint::ARG_SOURCE, burn_args.source)?;\n        runtime_args.insert(mint::ARG_AMOUNT, burn_args.amount)?;\n\n        Ok(runtime_args)\n    }\n}\n\n/// State of a builder of a `BurnArgs`.\n///\n/// Purpose of this builder is to resolve native burn args into BurnTargetMode and a\n/// [`BurnArgs`] instance to execute actual token burn on the mint contract.\n#[derive(Clone, Debug, PartialEq, Eq)]\npub struct BurnRuntimeArgsBuilder {\n    inner: RuntimeArgs,\n}\n\nimpl BurnRuntimeArgsBuilder {\n    /// Creates new burn args builder.\n    ///\n    /// Takes an incoming runtime args that represents native burn's arguments.\n    pub fn new(imputed_runtime_args: RuntimeArgs) -> BurnRuntimeArgsBuilder {\n        BurnRuntimeArgsBuilder {\n            inner: imputed_runtime_args,\n        }\n    }\n\n    /// Checks if a purse exists.\n    fn purse_exists<R>(&self, uref: URef, tracking_copy: Rc<RefCell<TrackingCopy<R>>>) -> bool\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let key = match tracking_copy\n            .borrow_mut()\n            .get_purse_balance_key(uref.into())\n        {\n            Ok(key) => key,\n            Err(_) => return false,\n        };\n        tracking_copy\n            .borrow_mut()\n            .get_available_balance(key)\n            .is_ok()\n    }\n\n    /// Resolves the source purse of the burn.\n    ///\n    /// User can optionally pass a \"source\" argument which should refer to an [`URef`] existing in\n    /// user's named keys. When the \"source\" argument is missing then user's main purse is assumed.\n    ///\n    /// Returns resolved [`URef`].\n    fn resolve_source_uref<R>(\n        &self,\n        account: &RuntimeFootprint,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n    ) -> Result<URef, BurnError>\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let imputed_runtime_args = &self.inner;\n        let arg_name = mint::ARG_SOURCE;\n        let uref = match imputed_runtime_args.get(arg_name) {\n            Some(cl_value) if *cl_value.cl_type() == CLType::URef => {\n                self.map_cl_value::<URef>(cl_value)?\n            }\n            Some(cl_value) if *cl_value.cl_type() == CLType::Option(CLType::URef.into()) => {\n                let Some(uref): Option<URef> = self.map_cl_value(cl_value)? else {\n                    return account.main_purse().ok_or(BurnError::InvalidOperation);\n                };\n                uref\n            }\n            Some(_) => return Err(BurnError::InvalidArgument),\n            None => return account.main_purse().ok_or(BurnError::InvalidOperation), /* if no source purse passed use account\n                                                                                     * main purse */\n        };\n        if account\n            .main_purse()\n            .ok_or(BurnError::InvalidOperation)?\n            .addr()\n            == uref.addr()\n        {\n            return Ok(uref);\n        }\n\n        let normalized_uref = Key::URef(uref).normalize();\n        let maybe_named_key = account\n            .named_keys()\n            .keys()\n            .find(|&named_key| named_key.normalize() == normalized_uref);\n\n        match maybe_named_key {\n            Some(Key::URef(found_uref)) => {\n                if found_uref.is_writeable() {\n                    // it is a URef and caller has access but is it a purse URef?\n                    if !self.purse_exists(found_uref.to_owned(), tracking_copy) {\n                        return Err(BurnError::InvalidPurse);\n                    }\n\n                    Ok(uref)\n                } else {\n                    Err(BurnError::InvalidAccess {\n                        required: AccessRights::WRITE,\n                    })\n                }\n            }\n            Some(key) => Err(BurnError::TypeMismatch(StoredValueTypeMismatch::new(\n                \"Key::URef\".to_string(),\n                key.type_string(),\n            ))),\n            None => Err(BurnError::ForgedReference(uref)),\n        }\n    }\n\n    /// Resolves amount.\n    ///\n    /// User has to specify \"amount\" argument that could be either a [`U512`] or a u64.\n    fn resolve_amount(&self) -> Result<U512, BurnError> {\n        let imputed_runtime_args = &self.inner;\n\n        let amount = match imputed_runtime_args.get(mint::ARG_AMOUNT) {\n            Some(amount_value) if *amount_value.cl_type() == CLType::U512 => {\n                self.map_cl_value(amount_value)?\n            }\n            Some(amount_value) if *amount_value.cl_type() == CLType::U64 => {\n                let amount: u64 = self.map_cl_value(amount_value)?;\n                U512::from(amount)\n            }\n            Some(_) => return Err(BurnError::InvalidArgument),\n            None => return Err(BurnError::MissingArgument),\n        };\n\n        if amount.is_zero() {\n            return Err(BurnError::AttemptToBurnZero);\n        }\n\n        Ok(amount)\n    }\n\n    /// Creates new [`BurnArgs`] instance.\n    pub fn build<R>(\n        self,\n        from: &RuntimeFootprint,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n    ) -> Result<BurnArgs, BurnError>\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let source = self.resolve_source_uref(from, Rc::clone(&tracking_copy))?;\n        let amount = self.resolve_amount()?;\n        Ok(BurnArgs { source, amount })\n    }\n\n    fn map_cl_value<T: CLTyped + FromBytes>(&self, cl_value: &CLValue) -> Result<T, BurnError> {\n        cl_value.clone().into_t().map_err(BurnError::CLValue)\n    }\n}\n"
  },
  {
    "path": "storage/src/system/error.rs",
    "content": "use casper_types::account::AccountHash;\n\n/// Implementation level errors for system contract providers\n#[derive(Debug)]\npub enum ProviderError {\n    /// System contract registry.\n    SystemEntityRegistry,\n    /// Account hash.\n    AccountHash(AccountHash),\n}\n"
  },
  {
    "path": "storage/src/system/genesis/account_contract_installer.rs",
    "content": "use itertools::Itertools;\nuse num_rational::Ratio;\nuse num_traits::Zero;\nuse rand::Rng;\nuse std::{\n    cell::RefCell,\n    collections::{BTreeMap, BTreeSet},\n    rc::Rc,\n};\n\nuse crate::{\n    global_state::state::StateProvider,\n    system::{\n        genesis::{GenesisError, DEFAULT_ADDRESS, NO_WASM},\n        protocol_upgrade::ProtocolUpgradeError,\n    },\n    tracking_copy::AddResult,\n    AddressGenerator, TrackingCopy,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{\n        ActionThresholds, EntityKindTag, MessageTopics, NamedKeyAddr, NamedKeyValue,\n    },\n    bytesrepr::{Bytes, ToBytes},\n    contracts::{\n        ContractHash, ContractPackage, ContractPackageHash, ContractPackageStatus,\n        ContractVersions, DisabledVersions, NamedKeys,\n    },\n    execution::Effects,\n    system::{\n        auction::{\n            self, BidAddr, BidKind, DelegationRate, Delegator, DelegatorBid, DelegatorKind,\n            SeigniorageRecipient, SeigniorageRecipientV2, SeigniorageRecipients,\n            SeigniorageRecipientsSnapshot, SeigniorageRecipientsSnapshotV2,\n            SeigniorageRecipientsV2, Staking, ValidatorBid, AUCTION_DELAY_KEY,\n            DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, DELEGATION_RATE_DENOMINATOR,\n            ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, INITIAL_ERA_END_TIMESTAMP_MILLIS,\n            INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY, MINIMUM_DELEGATION_RATE_KEY,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY,\n            UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY,\n        },\n        handle_payment::{self, ACCUMULATION_PURSE_KEY},\n        mint::{\n            self, ARG_ROUND_SEIGNIORAGE_RATE, MINT_GAS_HOLD_HANDLING_KEY,\n            MINT_GAS_HOLD_INTERVAL_KEY, MINT_SUSTAIN_PURSE_KEY, ROUND_SEIGNIORAGE_RATE_KEY,\n            TOTAL_SUPPLY_KEY,\n        },\n        standard_payment, SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT,\n    },\n    AccessRights, Account, AddressableEntity, AddressableEntityHash, AdministratorAccount,\n    BlockGlobalAddr, ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, CLValue,\n    ChainspecRegistry, Contract, ContractWasm, ContractWasmHash, Digest, EntityAddr, EntityKind,\n    EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, EraId, GenesisAccount,\n    GenesisConfig, Groups, HashAddr, Key, Motes, Package, PackageHash, PackageStatus, Phase,\n    ProtocolVersion, PublicKey, RewardsHandling, StoredValue, SystemHashRegistry, URef,\n    REWARDS_HANDLING_RATIO_TAG, U512,\n};\n\npub struct AccountContractInstaller<S>\nwhere\n    S: StateProvider,\n{\n    protocol_version: ProtocolVersion,\n    config: GenesisConfig,\n    address_generator: Rc<RefCell<AddressGenerator>>,\n    tracking_copy: Rc<RefCell<TrackingCopy<<S as StateProvider>::Reader>>>,\n}\n\nimpl<S> AccountContractInstaller<S>\nwhere\n    S: StateProvider,\n{\n    pub(crate) fn new(\n        genesis_config_hash: Digest,\n        protocol_version: ProtocolVersion,\n        config: GenesisConfig,\n        tracking_copy: Rc<RefCell<TrackingCopy<<S as StateProvider>::Reader>>>,\n    ) -> Self {\n        let phase = Phase::System;\n        let genesis_config_hash_bytes = genesis_config_hash.as_ref();\n\n        let address_generator = {\n            let generator = AddressGenerator::new(genesis_config_hash_bytes, phase);\n            Rc::new(RefCell::new(generator))\n        };\n\n        AccountContractInstaller {\n            protocol_version,\n            address_generator,\n            tracking_copy,\n            config,\n        }\n    }\n\n    pub(crate) fn finalize(self) -> Effects {\n        self.tracking_copy.borrow().effects()\n    }\n\n    fn create_mint(&mut self) -> Result<(Key, Key), Box<GenesisError>> {\n        let round_seigniorage_rate_uref =\n            {\n                let round_seigniorage_rate_uref = self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE);\n\n                let (round_seigniorage_rate_numer, round_seigniorage_rate_denom) =\n                    self.config.round_seigniorage_rate().into();\n                let round_seigniorage_rate: Ratio<U512> = Ratio::new(\n                    round_seigniorage_rate_numer.into(),\n                    round_seigniorage_rate_denom.into(),\n                );\n\n                self.tracking_copy.borrow_mut().write(\n                    round_seigniorage_rate_uref.into(),\n                    StoredValue::CLValue(CLValue::from_t(round_seigniorage_rate).map_err(\n                        |_| GenesisError::CLValue(ARG_ROUND_SEIGNIORAGE_RATE.to_string()),\n                    )?),\n                );\n                round_seigniorage_rate_uref\n            };\n\n        let total_supply_uref = {\n            let total_supply_uref = self\n                .address_generator\n                .borrow_mut()\n                .new_uref(AccessRights::READ_ADD_WRITE);\n\n            self.tracking_copy.borrow_mut().write(\n                total_supply_uref.into(),\n                StoredValue::CLValue(\n                    CLValue::from_t(U512::zero())\n                        .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?,\n                ),\n            );\n            total_supply_uref\n        };\n\n        let gas_hold_handling_uref =\n            {\n                let gas_hold_handling = self.config.gas_hold_balance_handling().tag();\n                let gas_hold_handling_uref = self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE);\n\n                self.tracking_copy.borrow_mut().write(\n                    gas_hold_handling_uref.into(),\n                    StoredValue::CLValue(CLValue::from_t(gas_hold_handling).map_err(|_| {\n                        GenesisError::CLValue(MINT_GAS_HOLD_HANDLING_KEY.to_string())\n                    })?),\n                );\n                gas_hold_handling_uref\n            };\n\n        let gas_hold_interval_uref =\n            {\n                let gas_hold_interval = self.config.gas_hold_interval_millis();\n                let gas_hold_interval_uref = self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE);\n\n                self.tracking_copy.borrow_mut().write(\n                    gas_hold_interval_uref.into(),\n                    StoredValue::CLValue(CLValue::from_t(gas_hold_interval).map_err(|_| {\n                        GenesisError::CLValue(MINT_GAS_HOLD_INTERVAL_KEY.to_string())\n                    })?),\n                );\n                gas_hold_interval_uref\n            };\n\n        let named_keys = {\n            let mut named_keys = NamedKeys::new();\n            named_keys.insert(\n                ROUND_SEIGNIORAGE_RATE_KEY.to_string(),\n                round_seigniorage_rate_uref.into(),\n            );\n            named_keys.insert(TOTAL_SUPPLY_KEY.to_string(), total_supply_uref.into());\n            named_keys.insert(\n                MINT_GAS_HOLD_HANDLING_KEY.to_string(),\n                gas_hold_handling_uref.into(),\n            );\n            named_keys.insert(\n                MINT_GAS_HOLD_INTERVAL_KEY.to_string(),\n                gas_hold_interval_uref.into(),\n            );\n            named_keys\n        };\n\n        let entry_points = mint::mint_entry_points();\n\n        let access_key = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n\n        let (_, mint_hash) = self.store_contract(access_key, named_keys, entry_points);\n\n        {\n            // Insert a partial registry into global state.\n            // This allows for default values to be accessible when the remaining system contracts\n            // call the `call_host_mint` function during their creation.\n            let mut partial_registry = BTreeMap::<String, HashAddr>::new();\n            partial_registry.insert(MINT.to_string(), mint_hash.value());\n            partial_registry.insert(HANDLE_PAYMENT.to_string(), DEFAULT_ADDRESS);\n            let cl_registry = CLValue::from_t(partial_registry)\n                .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n            self.tracking_copy\n                .borrow_mut()\n                .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry));\n        }\n\n        Ok((total_supply_uref.into(), Key::Hash(mint_hash.value())))\n    }\n\n    fn create_handle_payment(\n        &self,\n        handle_payment_payment_purse: URef,\n    ) -> Result<HashAddr, Box<GenesisError>> {\n        let named_keys = {\n            let mut named_keys = NamedKeys::new();\n            let named_key = Key::URef(handle_payment_payment_purse);\n            named_keys.insert(handle_payment::PAYMENT_PURSE_KEY.to_string(), named_key);\n\n            // This purse is used only in FeeHandling::Accumulate setting.\n            let rewards_purse_uref = self.create_purse(U512::zero())?;\n\n            named_keys.insert(\n                ACCUMULATION_PURSE_KEY.to_string(),\n                rewards_purse_uref.into(),\n            );\n\n            named_keys\n        };\n\n        let entry_points = handle_payment::handle_payment_entry_points();\n\n        let access_key = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n\n        let (_, handle_payment_hash) = self.store_contract(access_key, named_keys, entry_points);\n\n        self.store_system_contract(HANDLE_PAYMENT, handle_payment_hash)?;\n\n        Ok(handle_payment_hash.value())\n    }\n\n    fn create_auction(&self, total_supply_key: Key) -> Result<HashAddr, Box<GenesisError>> {\n        let locked_funds_period_millis = self.config.locked_funds_period_millis();\n        let auction_delay: u64 = self.config.auction_delay();\n        let genesis_timestamp_millis: u64 = self.config.genesis_timestamp_millis();\n        let minimum_delegation_rate = self.config.minimum_delegation_rate();\n\n        let mut named_keys = NamedKeys::new();\n\n        let cl_value = CLValue::from_t(minimum_delegation_rate)\n            .map_err(|cl_error| GenesisError::CLValue(cl_error.to_string()))?;\n        let minimum_delegation_rate_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            minimum_delegation_rate_uref.into(),\n            StoredValue::CLValue(cl_value),\n        );\n        named_keys.insert(\n            MINIMUM_DELEGATION_RATE_KEY.into(),\n            minimum_delegation_rate_uref.into(),\n        );\n\n        let genesis_validators: Vec<_> = self.config.get_bonded_validators().collect();\n        if (self.config.validator_slots() as usize) < genesis_validators.len() {\n            return Err(GenesisError::InvalidValidatorSlots {\n                validators: genesis_validators.len(),\n                validator_slots: self.config.validator_slots(),\n            }\n            .into());\n        }\n\n        let genesis_delegators: Vec<_> = self.config.get_bonded_delegators().collect();\n\n        // Make sure all delegators have corresponding genesis validator entries\n        for (validator_public_key, delegator_public_key, _, delegated_amount) in\n            genesis_delegators.iter()\n        {\n            if *delegated_amount == &Motes::zero() {\n                return Err(GenesisError::InvalidDelegatedAmount {\n                    public_key: (*delegator_public_key).clone(),\n                }\n                .into());\n            }\n\n            let orphan_condition = genesis_validators.iter().find(|genesis_validator| {\n                genesis_validator.public_key() == (*validator_public_key).clone()\n            });\n\n            if orphan_condition.is_none() {\n                return Err(GenesisError::OrphanedDelegator {\n                    validator_public_key: (*validator_public_key).clone(),\n                    delegator_public_key: (*delegator_public_key).clone(),\n                }\n                .into());\n            }\n        }\n\n        let mut total_staked_amount = U512::zero();\n\n        let staked = {\n            let mut staked: Staking = BTreeMap::new();\n\n            for genesis_validator in genesis_validators {\n                let public_key = genesis_validator.public_key();\n                let mut delegators = BTreeMap::new();\n\n                let staked_amount = genesis_validator.staked_amount().value();\n                if staked_amount.is_zero() {\n                    return Err(GenesisError::InvalidBondAmount { public_key }.into());\n                }\n\n                let delegation_rate = genesis_validator.delegation_rate();\n                if delegation_rate > DELEGATION_RATE_DENOMINATOR {\n                    return Err(GenesisError::InvalidDelegationRate {\n                        public_key,\n                        delegation_rate,\n                    }\n                    .into());\n                }\n                debug_assert_ne!(public_key, PublicKey::System);\n\n                total_staked_amount += staked_amount;\n\n                let purse_uref = self.create_purse(staked_amount)?;\n                let release_timestamp_millis =\n                    genesis_timestamp_millis + locked_funds_period_millis;\n                let validator_bid = {\n                    let bid = ValidatorBid::locked(\n                        public_key.clone(),\n                        purse_uref,\n                        staked_amount,\n                        delegation_rate,\n                        release_timestamp_millis,\n                        0,\n                        u64::MAX,\n                        0,\n                    );\n\n                    // Set up delegator entries attached to genesis validators\n                    for (\n                        validator_public_key,\n                        delegator_public_key,\n                        _delegator_balance,\n                        delegator_delegated_amount,\n                    ) in genesis_delegators.iter()\n                    {\n                        if (*validator_public_key).clone() == public_key.clone() {\n                            total_staked_amount += delegator_delegated_amount.value();\n\n                            let purse_uref =\n                                self.create_purse(delegator_delegated_amount.value())?;\n\n                            let delegator_kind: DelegatorKind =\n                                DelegatorKind::PublicKey((*delegator_public_key).clone());\n                            let delegator = DelegatorBid::locked(\n                                delegator_kind.clone(),\n                                delegator_delegated_amount.value(),\n                                purse_uref,\n                                (*validator_public_key).clone(),\n                                release_timestamp_millis,\n                            );\n\n                            if delegators.insert(delegator_kind, delegator).is_some() {\n                                return Err(GenesisError::DuplicatedDelegatorEntry {\n                                    validator_public_key: (*validator_public_key).clone(),\n                                    delegator_public_key: (*delegator_public_key).clone(),\n                                }\n                                .into());\n                            }\n                        }\n                    }\n\n                    bid\n                };\n\n                staked.insert(public_key, (validator_bid, delegators));\n            }\n            staked\n        };\n\n        let _ = self.tracking_copy.borrow_mut().add(\n            total_supply_key,\n            StoredValue::CLValue(\n                CLValue::from_t(total_staked_amount)\n                    .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?,\n            ),\n        );\n\n        let initial_seigniorage_recipients =\n            self.initial_seigniorage_recipients(&staked, auction_delay);\n\n        let era_id_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            era_id_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(INITIAL_ERA_ID)\n                    .map_err(|_| GenesisError::CLValue(ERA_ID_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(ERA_ID_KEY.into(), era_id_uref.into());\n\n        let era_end_timestamp_millis_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            era_end_timestamp_millis_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(INITIAL_ERA_END_TIMESTAMP_MILLIS)\n                    .map_err(|_| GenesisError::CLValue(ERA_END_TIMESTAMP_MILLIS_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(\n            ERA_END_TIMESTAMP_MILLIS_KEY.into(),\n            era_end_timestamp_millis_uref.into(),\n        );\n\n        let initial_seigniorage_recipients_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            initial_seigniorage_recipients_uref.into(),\n            StoredValue::CLValue(CLValue::from_t(initial_seigniorage_recipients).map_err(\n                |_| GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_string()),\n            )?),\n        );\n        named_keys.insert(\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.into(),\n            initial_seigniorage_recipients_uref.into(),\n        );\n\n        // initialize snapshot version flag\n        let initial_seigniorage_recipients_version_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            initial_seigniorage_recipients_version_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION).map_err(|_| {\n                    GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.to_string())\n                })?,\n            ),\n        );\n\n        named_keys.insert(\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.into(),\n            initial_seigniorage_recipients_version_uref.into(),\n        );\n\n        // store all delegator and validator bids\n        for (validator_public_key, (validator_bid, delegators)) in staked {\n            for (delegator_kind, delegator_bid) in delegators {\n                let delegator_bid_key = Key::BidAddr(BidAddr::new_delegator_kind(\n                    &validator_public_key.clone(),\n                    &delegator_kind,\n                ));\n                self.tracking_copy.borrow_mut().write(\n                    delegator_bid_key,\n                    StoredValue::BidKind(BidKind::Delegator(Box::new(delegator_bid))),\n                );\n            }\n            let validator_bid_key = Key::BidAddr(BidAddr::from(validator_public_key.clone()));\n            self.tracking_copy.borrow_mut().write(\n                validator_bid_key,\n                StoredValue::BidKind(BidKind::Validator(Box::new(validator_bid))),\n            );\n        }\n\n        let validator_slots = self.config.validator_slots();\n        let validator_slots_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            validator_slots_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(validator_slots)\n                    .map_err(|_| GenesisError::CLValue(VALIDATOR_SLOTS_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(VALIDATOR_SLOTS_KEY.into(), validator_slots_uref.into());\n\n        let auction_delay_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            auction_delay_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(auction_delay)\n                    .map_err(|_| GenesisError::CLValue(AUCTION_DELAY_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(AUCTION_DELAY_KEY.into(), auction_delay_uref.into());\n\n        let locked_funds_period_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            locked_funds_period_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(locked_funds_period_millis)\n                    .map_err(|_| GenesisError::CLValue(LOCKED_FUNDS_PERIOD_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(\n            LOCKED_FUNDS_PERIOD_KEY.into(),\n            locked_funds_period_uref.into(),\n        );\n\n        let unbonding_delay = self.config.unbonding_delay();\n        let unbonding_delay_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            unbonding_delay_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(unbonding_delay)\n                    .map_err(|_| GenesisError::CLValue(UNBONDING_DELAY_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(UNBONDING_DELAY_KEY.into(), unbonding_delay_uref.into());\n\n        let entry_points = auction::auction_entry_points();\n\n        let access_key = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n\n        let (_, auction_hash) = self.store_contract(access_key, named_keys, entry_points);\n\n        self.store_system_contract(AUCTION, auction_hash)?;\n\n        Ok(auction_hash.value())\n    }\n\n    pub(crate) fn create_accounts(\n        &self,\n        total_supply_key: Key,\n        payment_purse_uref: URef,\n    ) -> Result<Option<URef>, Box<GenesisError>> {\n        let accounts = {\n            let mut ret: Vec<GenesisAccount> = self.config.accounts_iter().cloned().collect();\n            let system_account = GenesisAccount::system();\n            ret.push(system_account);\n            ret\n        };\n\n        let mut administrative_accounts = self.config.administrative_accounts().peekable();\n\n        if administrative_accounts.peek().is_some()\n            && administrative_accounts\n                .duplicates_by(|admin| admin.public_key())\n                .next()\n                .is_some()\n        {\n            // Ensure no duplicate administrator accounts are specified as this might raise errors\n            // during genesis process when administrator accounts are added to associated keys.\n            return Err(GenesisError::DuplicatedAdministratorEntry.into());\n        }\n\n        let mut total_supply = U512::zero();\n        let mut sustain_purse = None;\n\n        for account in accounts {\n            let account_hash = account.account_hash();\n            let main_purse = match account {\n                GenesisAccount::System\n                    if self.config.administrative_accounts().next().is_some() =>\n                {\n                    payment_purse_uref\n                }\n                _ => self.create_purse(account.balance().value())?,\n            };\n\n            if self.config.rewards_ratio().is_some() && account.is_sustain_account() {\n                let cl_value = {\n                    let mut ret: BTreeMap<u8, Bytes> = BTreeMap::new();\n                    let ratio_as_bytes = self\n                        .config\n                        .rewards_ratio()\n                        .ok_or(Box::new(GenesisError::CLValue(\n                            \"could not serialize rewards ratio\".to_string(),\n                        )))?\n                        .to_bytes()\n                        .map_err(|err| Box::new(GenesisError::Bytesrepr(err)))?;\n\n                    ret.insert(REWARDS_HANDLING_RATIO_TAG, ratio_as_bytes.into());\n                    CLValue::from_t(ret)\n                }\n                .map_err(|cl_err| Box::new(GenesisError::CLValue(cl_err.to_string())))?;\n\n                self.tracking_copy\n                    .borrow_mut()\n                    .write(Key::RewardsHandling, StoredValue::CLValue(cl_value));\n\n                sustain_purse = Some(main_purse)\n            }\n\n            let key = Key::Account(account_hash);\n            let stored_value = StoredValue::Account(Account::create(\n                account_hash,\n                Default::default(),\n                main_purse,\n            ));\n\n            self.tracking_copy.borrow_mut().write(key, stored_value);\n\n            total_supply += account.balance().value();\n        }\n\n        self.tracking_copy.borrow_mut().write(\n            total_supply_key,\n            StoredValue::CLValue(\n                CLValue::from_t(total_supply)\n                    .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?,\n            ),\n        );\n\n        Ok(sustain_purse)\n    }\n\n    fn initial_seigniorage_recipients(\n        &self,\n        staked: &Staking,\n        auction_delay: u64,\n    ) -> BTreeMap<EraId, SeigniorageRecipientsV2> {\n        let initial_snapshot_range = INITIAL_ERA_ID.iter_inclusive(auction_delay);\n\n        let mut seigniorage_recipients = SeigniorageRecipientsV2::new();\n        for (validator_public_key, (validator_bid, delegators)) in staked {\n            let mut delegator_stake = BTreeMap::new();\n            for (k, v) in delegators {\n                delegator_stake.insert(k.clone(), v.staked_amount());\n            }\n            let recipient = SeigniorageRecipientV2::new(\n                validator_bid.staked_amount(),\n                *validator_bid.delegation_rate(),\n                delegator_stake,\n                BTreeMap::new(),\n            );\n            seigniorage_recipients.insert(validator_public_key.clone(), recipient);\n        }\n\n        let mut initial_seigniorage_recipients = SeigniorageRecipientsSnapshotV2::new();\n        for era_id in initial_snapshot_range {\n            initial_seigniorage_recipients.insert(era_id, seigniorage_recipients.clone());\n        }\n        initial_seigniorage_recipients\n    }\n\n    fn create_purse(&self, amount: U512) -> Result<URef, Box<GenesisError>> {\n        let purse_addr = self.address_generator.borrow_mut().create_address();\n\n        let balance_cl_value =\n            CLValue::from_t(amount).map_err(|error| GenesisError::CLValue(error.to_string()))?;\n        self.tracking_copy.borrow_mut().write(\n            Key::Balance(purse_addr),\n            StoredValue::CLValue(balance_cl_value),\n        );\n\n        let purse_cl_value = CLValue::unit();\n        let purse_uref = URef::new(purse_addr, AccessRights::READ_ADD_WRITE);\n        self.tracking_copy\n            .borrow_mut()\n            .write(Key::URef(purse_uref), StoredValue::CLValue(purse_cl_value));\n\n        Ok(purse_uref)\n    }\n\n    fn store_contract(\n        &self,\n        access_key: URef,\n        named_keys: NamedKeys,\n        entry_points: EntryPoints,\n    ) -> (ContractPackageHash, ContractHash) {\n        let protocol_version = self.protocol_version;\n        let contract_wasm_hash =\n            ContractWasmHash::new(self.address_generator.borrow_mut().new_hash_address());\n        let contract_hash =\n            ContractHash::new(self.address_generator.borrow_mut().new_hash_address());\n        let contract_package_hash =\n            ContractPackageHash::new(self.address_generator.borrow_mut().new_hash_address());\n\n        let contract_wasm = ContractWasm::new(vec![]);\n        let contract = Contract::new(\n            contract_package_hash,\n            contract_wasm_hash,\n            named_keys,\n            entry_points.into(),\n            protocol_version,\n        );\n\n        // Genesis contracts can be versioned contracts.\n        let contract_package = {\n            let mut contract_package = ContractPackage::new(\n                access_key,\n                ContractVersions::default(),\n                DisabledVersions::default(),\n                Groups::default(),\n                ContractPackageStatus::default(),\n            );\n            contract_package.insert_contract_version(protocol_version.value().major, contract_hash);\n            contract_package\n        };\n\n        self.tracking_copy.borrow_mut().write(\n            contract_wasm_hash.into(),\n            StoredValue::ContractWasm(contract_wasm),\n        );\n        self.tracking_copy\n            .borrow_mut()\n            .write(contract_hash.into(), StoredValue::Contract(contract));\n        self.tracking_copy.borrow_mut().write(\n            contract_package_hash.into(),\n            StoredValue::ContractPackage(contract_package),\n        );\n\n        (contract_package_hash, contract_hash)\n    }\n\n    fn store_system_contract(\n        &self,\n        contract_name: &str,\n        contract_hash: ContractHash,\n    ) -> Result<(), Box<GenesisError>> {\n        let partial_cl_registry = self\n            .tracking_copy\n            .borrow_mut()\n            .read(&Key::SystemEntityRegistry)\n            .map_err(|_| GenesisError::FailedToCreateSystemRegistry)?\n            .ok_or_else(|| {\n                GenesisError::CLValue(\"failed to convert registry as stored value\".to_string())\n            })?\n            .as_cl_value()\n            .ok_or_else(|| GenesisError::CLValue(\"failed to convert to CLValue\".to_string()))?\n            .to_owned();\n        let mut partial_registry = CLValue::into_t::<SystemHashRegistry>(partial_cl_registry)\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n        partial_registry.insert(contract_name.to_string(), contract_hash.value());\n        let cl_registry = CLValue::from_t(partial_registry)\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n        self.tracking_copy\n            .borrow_mut()\n            .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry));\n        Ok(())\n    }\n\n    fn store_chainspec_registry(\n        &self,\n        chainspec_registry: ChainspecRegistry,\n    ) -> Result<(), Box<GenesisError>> {\n        if chainspec_registry.genesis_accounts_raw_hash().is_none() {\n            return Err(GenesisError::MissingChainspecRegistryEntry.into());\n        }\n        let cl_value_registry = CLValue::from_t(chainspec_registry)\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n\n        self.tracking_copy.borrow_mut().write(\n            Key::ChainspecRegistry,\n            StoredValue::CLValue(cl_value_registry),\n        );\n        Ok(())\n    }\n\n    /// Writes a tracking record to global state for block time / genesis timestamp.\n    fn store_block_time(&self) -> Result<(), Box<GenesisError>> {\n        let cl_value = CLValue::from_t(self.config.genesis_timestamp_millis())\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n\n        self.tracking_copy.borrow_mut().write(\n            Key::BlockGlobal(BlockGlobalAddr::BlockTime),\n            StoredValue::CLValue(cl_value),\n        );\n        Ok(())\n    }\n\n    pub(crate) fn handle_sustain_purse(\n        &mut self,\n        sustain_purse: Option<URef>,\n        mint_key: Key,\n    ) -> Result<(), Box<GenesisError>> {\n        if sustain_purse.is_none() {\n            return Ok(());\n        }\n\n        // This is safe because we early exit on the none case\n        let sustain_purse = sustain_purse.unwrap();\n        let named_key_value = StoredValue::CLValue(\n            CLValue::from_t((MINT_SUSTAIN_PURSE_KEY.to_string(), Key::URef(sustain_purse)))\n                .map_err(|cl_error| Box::new(GenesisError::CLValue(cl_error.to_string())))?,\n        );\n\n        match self\n            .tracking_copy\n            .borrow_mut()\n            .add(mint_key, named_key_value)\n        {\n            Err(storage_error) => Err(Box::new(GenesisError::TrackingCopy(storage_error))),\n            Ok(AddResult::Success) => Ok(()),\n            Ok(AddResult::KeyNotFound(_)) => Err(Box::new(GenesisError::InvalidMintKey)),\n            Ok(AddResult::TypeMismatch(_)) | Ok(AddResult::Transform(_)) => Err(Box::new(\n                GenesisError::CLValue(\"Unable to add sustain purse\".to_string()),\n            )),\n            Ok(AddResult::Serialization(error)) => Err(Box::new(GenesisError::Bytesrepr(error))),\n        }\n    }\n\n    /// Performs a complete system installation.\n    pub(crate) fn install(\n        &mut self,\n        chainspec_registry: ChainspecRegistry,\n    ) -> Result<(), Box<GenesisError>> {\n        // self.setup_system_account()?;\n        // Create mint\n        let (total_supply_key, mint_key) = self.create_mint()?;\n\n        let payment_purse_uref = self.create_purse(U512::zero())?;\n\n        // Create all genesis accounts\n        let sustain_purse = self.create_accounts(total_supply_key, payment_purse_uref)?;\n\n        self.handle_sustain_purse(sustain_purse, mint_key)?;\n\n        // Create the auction and setup the stake of all genesis validators.\n        self.create_auction(total_supply_key)?;\n\n        // Create handle payment\n        self.create_handle_payment(payment_purse_uref)?;\n\n        // Write chainspec registry.\n        self.store_chainspec_registry(chainspec_registry)?;\n\n        // Write block time to global state\n        self.store_block_time()?;\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage/src/system/genesis/entity_installer.rs",
    "content": "use itertools::Itertools;\nuse num_rational::Ratio;\nuse num_traits::Zero;\nuse rand::Rng;\nuse std::{\n    cell::RefCell,\n    collections::{BTreeMap, BTreeSet},\n    rc::Rc,\n};\n\nuse crate::{\n    global_state::state::StateProvider,\n    system::genesis::{GenesisError, DEFAULT_ADDRESS, NO_WASM},\n    AddressGenerator, TrackingCopy,\n};\nuse casper_types::{\n    addressable_entity::{\n        ActionThresholds, EntityKindTag, MessageTopics, NamedKeyAddr, NamedKeyValue,\n    },\n    contracts::NamedKeys,\n    execution::Effects,\n    system::{\n        auction,\n        auction::{\n            BidAddr, BidKind, DelegatorBid, DelegatorKind, SeigniorageRecipient,\n            SeigniorageRecipientV2, SeigniorageRecipients, SeigniorageRecipientsSnapshot,\n            SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Staking, ValidatorBid,\n            AUCTION_DELAY_KEY, DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION,\n            DELEGATION_RATE_DENOMINATOR, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY,\n            INITIAL_ERA_END_TIMESTAMP_MILLIS, INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY,\n            MINIMUM_DELEGATION_RATE_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY,\n        },\n        handle_payment,\n        handle_payment::ACCUMULATION_PURSE_KEY,\n        mint,\n        mint::{\n            ARG_ROUND_SEIGNIORAGE_RATE, MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY,\n            ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY,\n        },\n        SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT,\n    },\n    AccessRights, AddressableEntity, AddressableEntityHash, AdministratorAccount, BlockGlobalAddr,\n    ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, CLValue, ChainspecRegistry, Digest,\n    EntityAddr, EntityKind, EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, EraId,\n    GenesisAccount, GenesisConfig, Groups, HashAddr, Key, Motes, Package, PackageHash,\n    PackageStatus, Phase, ProtocolVersion, PublicKey, StoredValue, SystemHashRegistry, Tagged,\n    URef, U512,\n};\n\npub struct EntityGenesisInstaller<S>\nwhere\n    S: StateProvider,\n{\n    protocol_version: ProtocolVersion,\n    config: GenesisConfig,\n    address_generator: Rc<RefCell<AddressGenerator>>,\n    tracking_copy: Rc<RefCell<TrackingCopy<<S as StateProvider>::Reader>>>,\n}\n\nimpl<S> EntityGenesisInstaller<S>\nwhere\n    S: StateProvider,\n{\n    pub fn new(\n        genesis_config_hash: Digest,\n        protocol_version: ProtocolVersion,\n        config: GenesisConfig,\n        tracking_copy: Rc<RefCell<TrackingCopy<<S as StateProvider>::Reader>>>,\n    ) -> Self {\n        let phase = Phase::System;\n        let genesis_config_hash_bytes = genesis_config_hash.as_ref();\n\n        let address_generator = {\n            let generator = AddressGenerator::new(genesis_config_hash_bytes, phase);\n            Rc::new(RefCell::new(generator))\n        };\n\n        EntityGenesisInstaller {\n            protocol_version,\n            config,\n            address_generator,\n            tracking_copy,\n        }\n    }\n\n    pub fn finalize(self) -> Effects {\n        self.tracking_copy.borrow().effects()\n    }\n\n    fn setup_system_account(&mut self) -> Result<(), Box<GenesisError>> {\n        let system_account_addr = PublicKey::System.to_account_hash();\n\n        self.store_addressable_entity(\n            EntityKind::Account(system_account_addr),\n            NO_WASM,\n            None,\n            None,\n            self.create_purse(U512::zero())?,\n        )?;\n\n        Ok(())\n    }\n\n    fn create_mint(&mut self) -> Result<Key, Box<GenesisError>> {\n        let round_seigniorage_rate_uref =\n            {\n                let round_seigniorage_rate_uref = self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE);\n\n                let (round_seigniorage_rate_numer, round_seigniorage_rate_denom) =\n                    self.config.round_seigniorage_rate().into();\n                let round_seigniorage_rate: Ratio<U512> = Ratio::new(\n                    round_seigniorage_rate_numer.into(),\n                    round_seigniorage_rate_denom.into(),\n                );\n\n                self.tracking_copy.borrow_mut().write(\n                    round_seigniorage_rate_uref.into(),\n                    StoredValue::CLValue(CLValue::from_t(round_seigniorage_rate).map_err(\n                        |_| GenesisError::CLValue(ARG_ROUND_SEIGNIORAGE_RATE.to_string()),\n                    )?),\n                );\n                round_seigniorage_rate_uref\n            };\n\n        let total_supply_uref = {\n            let total_supply_uref = self\n                .address_generator\n                .borrow_mut()\n                .new_uref(AccessRights::READ_ADD_WRITE);\n\n            self.tracking_copy.borrow_mut().write(\n                total_supply_uref.into(),\n                StoredValue::CLValue(\n                    CLValue::from_t(U512::zero())\n                        .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?,\n                ),\n            );\n            total_supply_uref\n        };\n\n        let gas_hold_handling_uref =\n            {\n                let gas_hold_handling = self.config.gas_hold_balance_handling().tag();\n                let gas_hold_handling_uref = self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE);\n\n                self.tracking_copy.borrow_mut().write(\n                    gas_hold_handling_uref.into(),\n                    StoredValue::CLValue(CLValue::from_t(gas_hold_handling).map_err(|_| {\n                        GenesisError::CLValue(MINT_GAS_HOLD_HANDLING_KEY.to_string())\n                    })?),\n                );\n                gas_hold_handling_uref\n            };\n\n        let gas_hold_interval_uref =\n            {\n                let gas_hold_interval = self.config.gas_hold_interval_millis();\n                let gas_hold_interval_uref = self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE);\n\n                self.tracking_copy.borrow_mut().write(\n                    gas_hold_interval_uref.into(),\n                    StoredValue::CLValue(CLValue::from_t(gas_hold_interval).map_err(|_| {\n                        GenesisError::CLValue(MINT_GAS_HOLD_INTERVAL_KEY.to_string())\n                    })?),\n                );\n                gas_hold_interval_uref\n            };\n\n        let named_keys = {\n            let mut named_keys = NamedKeys::new();\n            named_keys.insert(\n                ROUND_SEIGNIORAGE_RATE_KEY.to_string(),\n                round_seigniorage_rate_uref.into(),\n            );\n            named_keys.insert(TOTAL_SUPPLY_KEY.to_string(), total_supply_uref.into());\n            named_keys.insert(\n                MINT_GAS_HOLD_HANDLING_KEY.to_string(),\n                gas_hold_handling_uref.into(),\n            );\n            named_keys.insert(\n                MINT_GAS_HOLD_INTERVAL_KEY.to_string(),\n                gas_hold_interval_uref.into(),\n            );\n            named_keys\n        };\n\n        let entry_points = mint::mint_entry_points();\n\n        let contract_hash = self.store_system_contract(\n            named_keys,\n            entry_points,\n            EntityKind::System(SystemEntityType::Mint),\n        )?;\n\n        {\n            // Insert a partial registry into global state.\n            // This allows for default values to be accessible when the remaining system contracts\n            // call the `call_host_mint` function during their creation.\n            let mut partial_registry = BTreeMap::<String, AddressableEntityHash>::new();\n            partial_registry.insert(MINT.to_string(), contract_hash);\n            partial_registry.insert(HANDLE_PAYMENT.to_string(), DEFAULT_ADDRESS.into());\n            let cl_registry = CLValue::from_t(partial_registry)\n                .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n            self.tracking_copy\n                .borrow_mut()\n                .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry));\n        }\n\n        Ok(total_supply_uref.into())\n    }\n\n    fn create_handle_payment(&self) -> Result<HashAddr, Box<GenesisError>> {\n        let handle_payment_payment_purse = self.create_purse(U512::zero())?;\n        let named_keys = {\n            let mut named_keys = NamedKeys::new();\n            let named_key = Key::URef(handle_payment_payment_purse);\n            named_keys.insert(handle_payment::PAYMENT_PURSE_KEY.to_string(), named_key);\n\n            // This purse is used only in FeeHandling::Accumulate setting.\n            let accumulation_purse_uref = self.create_purse(U512::zero())?;\n            named_keys.insert(\n                ACCUMULATION_PURSE_KEY.to_string(),\n                accumulation_purse_uref.into(),\n            );\n            named_keys\n        };\n\n        let entry_points = handle_payment::handle_payment_entry_points();\n\n        let contract_hash = self.store_system_contract(\n            named_keys,\n            entry_points,\n            EntityKind::System(SystemEntityType::HandlePayment),\n        )?;\n\n        self.store_system_entity_registry(HANDLE_PAYMENT, contract_hash.value())?;\n\n        Ok(contract_hash.value())\n    }\n\n    fn create_auction(&self, total_supply_key: Key) -> Result<HashAddr, Box<GenesisError>> {\n        let locked_funds_period_millis = self.config.locked_funds_period_millis();\n        let auction_delay: u64 = self.config.auction_delay();\n        let genesis_timestamp_millis: u64 = self.config.genesis_timestamp_millis();\n        let minimum_delegation_rate = self.config.minimum_delegation_rate();\n\n        let mut named_keys = NamedKeys::new();\n\n        let minimum_delegation_rate_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        let cl_value = CLValue::from_t(minimum_delegation_rate)\n            .map_err(|cl_error| GenesisError::CLValue(cl_error.to_string()))?;\n        self.tracking_copy.borrow_mut().write(\n            minimum_delegation_rate_uref.into(),\n            StoredValue::CLValue(cl_value),\n        );\n        named_keys.insert(\n            MINIMUM_DELEGATION_RATE_KEY.into(),\n            minimum_delegation_rate_uref.into(),\n        );\n\n        let genesis_validators: Vec<_> = self.config.get_bonded_validators().collect();\n        if (self.config.validator_slots() as usize) < genesis_validators.len() {\n            return Err(GenesisError::InvalidValidatorSlots {\n                validators: genesis_validators.len(),\n                validator_slots: self.config.validator_slots(),\n            }\n            .into());\n        }\n\n        let genesis_delegators: Vec<_> = self.config.get_bonded_delegators().collect();\n\n        // Make sure all delegators have corresponding genesis validator entries\n        for (validator_public_key, delegator_public_key, _, delegated_amount) in\n            genesis_delegators.iter()\n        {\n            if *delegated_amount == &Motes::zero() {\n                return Err(GenesisError::InvalidDelegatedAmount {\n                    public_key: (*delegator_public_key).clone(),\n                }\n                .into());\n            }\n\n            let orphan_condition = genesis_validators.iter().find(|genesis_validator| {\n                genesis_validator.public_key() == (*validator_public_key).clone()\n            });\n\n            if orphan_condition.is_none() {\n                return Err(GenesisError::OrphanedDelegator {\n                    validator_public_key: (*validator_public_key).clone(),\n                    delegator_public_key: (*delegator_public_key).clone(),\n                }\n                .into());\n            }\n        }\n\n        let mut total_staked_amount = U512::zero();\n\n        let staked = {\n            let mut staked: Staking = BTreeMap::new();\n\n            for genesis_validator in genesis_validators {\n                let public_key = genesis_validator.public_key();\n                let mut delegators = BTreeMap::new();\n\n                let staked_amount = genesis_validator.staked_amount().value();\n                if staked_amount.is_zero() {\n                    return Err(GenesisError::InvalidBondAmount { public_key }.into());\n                }\n\n                let delegation_rate = genesis_validator.delegation_rate();\n                if delegation_rate > DELEGATION_RATE_DENOMINATOR {\n                    return Err(GenesisError::InvalidDelegationRate {\n                        public_key,\n                        delegation_rate,\n                    }\n                    .into());\n                }\n                debug_assert_ne!(public_key, PublicKey::System);\n\n                total_staked_amount += staked_amount;\n\n                let purse_uref = self.create_purse(staked_amount)?;\n                let release_timestamp_millis =\n                    genesis_timestamp_millis + locked_funds_period_millis;\n                let validator_bid = {\n                    let bid = ValidatorBid::locked(\n                        public_key.clone(),\n                        purse_uref,\n                        staked_amount,\n                        delegation_rate,\n                        release_timestamp_millis,\n                        0,\n                        u64::MAX,\n                        0,\n                    );\n\n                    // Set up delegator entries attached to genesis validators\n                    for (\n                        validator_public_key,\n                        delegator_public_key,\n                        _delegator_balance,\n                        delegator_delegated_amount,\n                    ) in genesis_delegators.iter()\n                    {\n                        if (*validator_public_key).clone() == public_key.clone() {\n                            total_staked_amount += delegator_delegated_amount.value();\n                            let purse_uref =\n                                self.create_purse(delegator_delegated_amount.value())?;\n\n                            let delegator_kind: DelegatorKind =\n                                (*delegator_public_key).clone().into();\n                            let delegator = DelegatorBid::locked(\n                                delegator_kind.clone(),\n                                delegator_delegated_amount.value(),\n                                purse_uref,\n                                (*validator_public_key).clone(),\n                                release_timestamp_millis,\n                            );\n\n                            if delegators.insert(delegator_kind, delegator).is_some() {\n                                return Err(GenesisError::DuplicatedDelegatorEntry {\n                                    validator_public_key: (*validator_public_key).clone(),\n                                    delegator_public_key: (*delegator_public_key).clone(),\n                                }\n                                .into());\n                            }\n                        }\n                    }\n\n                    bid\n                };\n\n                staked.insert(public_key, (validator_bid, delegators));\n            }\n            staked\n        };\n\n        let _ = self.tracking_copy.borrow_mut().add(\n            total_supply_key,\n            StoredValue::CLValue(\n                CLValue::from_t(total_staked_amount)\n                    .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?,\n            ),\n        );\n\n        let initial_seigniorage_recipients =\n            self.initial_seigniorage_recipients(&staked, auction_delay);\n\n        let era_id_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            era_id_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(INITIAL_ERA_ID)\n                    .map_err(|_| GenesisError::CLValue(ERA_ID_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(ERA_ID_KEY.into(), era_id_uref.into());\n\n        let era_end_timestamp_millis_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            era_end_timestamp_millis_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(INITIAL_ERA_END_TIMESTAMP_MILLIS)\n                    .map_err(|_| GenesisError::CLValue(ERA_END_TIMESTAMP_MILLIS_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(\n            ERA_END_TIMESTAMP_MILLIS_KEY.into(),\n            era_end_timestamp_millis_uref.into(),\n        );\n\n        let initial_seigniorage_recipients_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            initial_seigniorage_recipients_uref.into(),\n            StoredValue::CLValue(CLValue::from_t(initial_seigniorage_recipients).map_err(\n                |_| GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_string()),\n            )?),\n        );\n        named_keys.insert(\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.into(),\n            initial_seigniorage_recipients_uref.into(),\n        );\n\n        // initialize snapshot version flag\n        let initial_seigniorage_recipients_version_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            initial_seigniorage_recipients_version_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION).map_err(|_| {\n                    GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.to_string())\n                })?,\n            ),\n        );\n\n        named_keys.insert(\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.into(),\n            initial_seigniorage_recipients_version_uref.into(),\n        );\n\n        // store all delegator and validator bids\n        for (validator_public_key, (validator_bid, delegators)) in staked {\n            for (delegator_kind, delegator_bid) in delegators {\n                let delegator_bid_key = Key::BidAddr(BidAddr::new_delegator_kind(\n                    &validator_public_key,\n                    &delegator_kind,\n                ));\n                self.tracking_copy.borrow_mut().write(\n                    delegator_bid_key,\n                    StoredValue::BidKind(BidKind::Delegator(Box::new(delegator_bid))),\n                );\n            }\n            let validator_bid_key = Key::BidAddr(BidAddr::from(validator_public_key.clone()));\n            self.tracking_copy.borrow_mut().write(\n                validator_bid_key,\n                StoredValue::BidKind(BidKind::Validator(Box::new(validator_bid))),\n            );\n        }\n\n        let validator_slots = self.config.validator_slots();\n        let validator_slots_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            validator_slots_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(validator_slots)\n                    .map_err(|_| GenesisError::CLValue(VALIDATOR_SLOTS_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(VALIDATOR_SLOTS_KEY.into(), validator_slots_uref.into());\n\n        let auction_delay_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            auction_delay_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(auction_delay)\n                    .map_err(|_| GenesisError::CLValue(AUCTION_DELAY_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(AUCTION_DELAY_KEY.into(), auction_delay_uref.into());\n\n        let locked_funds_period_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            locked_funds_period_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(locked_funds_period_millis)\n                    .map_err(|_| GenesisError::CLValue(LOCKED_FUNDS_PERIOD_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(\n            LOCKED_FUNDS_PERIOD_KEY.into(),\n            locked_funds_period_uref.into(),\n        );\n\n        let unbonding_delay = self.config.unbonding_delay();\n        let unbonding_delay_uref = self\n            .address_generator\n            .borrow_mut()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.tracking_copy.borrow_mut().write(\n            unbonding_delay_uref.into(),\n            StoredValue::CLValue(\n                CLValue::from_t(unbonding_delay)\n                    .map_err(|_| GenesisError::CLValue(UNBONDING_DELAY_KEY.to_string()))?,\n            ),\n        );\n        named_keys.insert(UNBONDING_DELAY_KEY.into(), unbonding_delay_uref.into());\n\n        let entry_points = auction::auction_entry_points();\n\n        let contract_hash = self.store_system_contract(\n            named_keys,\n            entry_points,\n            EntityKind::System(SystemEntityType::Auction),\n        )?;\n\n        self.store_system_entity_registry(AUCTION, contract_hash.value())?;\n\n        Ok(contract_hash.value())\n    }\n\n    pub fn create_accounts(&self, total_supply_key: Key) -> Result<(), Box<GenesisError>> {\n        let accounts = {\n            let mut ret: Vec<GenesisAccount> = self.config.accounts_iter().cloned().collect();\n            let system_account = GenesisAccount::system();\n            ret.push(system_account);\n            ret\n        };\n\n        let mut administrative_accounts = self.config.administrative_accounts().peekable();\n\n        if administrative_accounts.peek().is_some()\n            && administrative_accounts\n                .duplicates_by(|admin| admin.public_key())\n                .next()\n                .is_some()\n        {\n            // Ensure no duplicate administrator accounts are specified as this might raise errors\n            // during genesis process when administrator accounts are added to associated keys.\n            return Err(GenesisError::DuplicatedAdministratorEntry.into());\n        }\n\n        let mut total_supply = U512::zero();\n\n        for account in accounts {\n            let account_starting_balance = account.balance().value();\n\n            let main_purse = self.create_purse(account_starting_balance)?;\n\n            self.store_addressable_entity(\n                EntityKind::Account(account.account_hash()),\n                NO_WASM,\n                None,\n                None,\n                main_purse,\n            )?;\n\n            total_supply += account_starting_balance;\n        }\n\n        self.tracking_copy.borrow_mut().write(\n            total_supply_key,\n            StoredValue::CLValue(\n                CLValue::from_t(total_supply)\n                    .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?,\n            ),\n        );\n\n        Ok(())\n    }\n\n    fn initial_seigniorage_recipients(\n        &self,\n        staked: &Staking,\n        auction_delay: u64,\n    ) -> BTreeMap<EraId, SeigniorageRecipientsV2> {\n        let initial_snapshot_range = INITIAL_ERA_ID.iter_inclusive(auction_delay);\n\n        let mut seigniorage_recipients = SeigniorageRecipientsV2::new();\n        for (validator_public_key, (validator_bid, delegators)) in staked {\n            let mut delegator_stake = BTreeMap::new();\n            for (k, v) in delegators {\n                delegator_stake.insert(k.clone(), v.staked_amount());\n            }\n            let recipient = SeigniorageRecipientV2::new(\n                validator_bid.staked_amount(),\n                *validator_bid.delegation_rate(),\n                delegator_stake,\n                BTreeMap::new(),\n            );\n            seigniorage_recipients.insert(validator_public_key.clone(), recipient);\n        }\n\n        let mut initial_seigniorage_recipients = SeigniorageRecipientsSnapshotV2::new();\n        for era_id in initial_snapshot_range {\n            initial_seigniorage_recipients.insert(era_id, seigniorage_recipients.clone());\n        }\n        initial_seigniorage_recipients\n    }\n\n    fn create_purse(&self, amount: U512) -> Result<URef, Box<GenesisError>> {\n        let purse_addr = self.address_generator.borrow_mut().create_address();\n\n        let balance_cl_value =\n            CLValue::from_t(amount).map_err(|error| GenesisError::CLValue(error.to_string()))?;\n        self.tracking_copy.borrow_mut().write(\n            Key::Balance(purse_addr),\n            StoredValue::CLValue(balance_cl_value),\n        );\n\n        let purse_cl_value = CLValue::unit();\n        let purse_uref = URef::new(purse_addr, AccessRights::READ_ADD_WRITE);\n        self.tracking_copy\n            .borrow_mut()\n            .write(Key::URef(purse_uref), StoredValue::CLValue(purse_cl_value));\n\n        Ok(purse_uref)\n    }\n\n    fn store_system_contract(\n        &self,\n        named_keys: NamedKeys,\n        entry_points: EntryPoints,\n        contract_package_kind: EntityKind,\n    ) -> Result<AddressableEntityHash, Box<GenesisError>> {\n        self.store_addressable_entity(\n            contract_package_kind,\n            NO_WASM,\n            Some(named_keys),\n            Some(entry_points),\n            self.create_purse(U512::zero())?,\n        )\n    }\n\n    fn store_addressable_entity(\n        &self,\n        entity_kind: EntityKind,\n        no_wasm: bool,\n        maybe_named_keys: Option<NamedKeys>,\n        maybe_entry_points: Option<EntryPoints>,\n        main_purse: URef,\n    ) -> Result<AddressableEntityHash, Box<GenesisError>> {\n        let protocol_version = self.protocol_version;\n        let byte_code_hash = if no_wasm {\n            ByteCodeHash::new(DEFAULT_ADDRESS)\n        } else {\n            ByteCodeHash::new(self.address_generator.borrow_mut().new_hash_address())\n        };\n\n        let entity_hash = match entity_kind {\n            EntityKind::System(_) | EntityKind::SmartContract(_) => {\n                AddressableEntityHash::new(self.address_generator.borrow_mut().new_hash_address())\n            }\n            EntityKind::Account(account_hash) => {\n                if entity_kind.is_system_account() {\n                    let entity_hash_addr = PublicKey::System.to_account_hash().value();\n                    AddressableEntityHash::new(entity_hash_addr)\n                } else {\n                    AddressableEntityHash::new(account_hash.value())\n                }\n            }\n        };\n\n        let entity_addr = match entity_kind.tag() {\n            EntityKindTag::System => EntityAddr::new_system(entity_hash.value()),\n            EntityKindTag::Account => EntityAddr::new_account(entity_hash.value()),\n            EntityKindTag::SmartContract => EntityAddr::new_smart_contract(entity_hash.value()),\n        };\n\n        let package_hash = PackageHash::new(self.address_generator.borrow_mut().new_hash_address());\n\n        let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]);\n        let associated_keys = entity_kind.associated_keys();\n        let maybe_account_hash = entity_kind.maybe_account_hash();\n        let named_keys = maybe_named_keys.unwrap_or_default();\n\n        self.store_system_contract_named_keys(entity_hash, named_keys)?;\n        if let Some(entry_point) = maybe_entry_points {\n            self.store_system_entry_points(entity_hash, entry_point)?;\n        }\n\n        let entity = AddressableEntity::new(\n            package_hash,\n            byte_code_hash,\n            protocol_version,\n            main_purse,\n            associated_keys,\n            ActionThresholds::default(),\n            entity_kind,\n        );\n\n        // Genesis contracts can be versioned contracts.\n        let contract_package = {\n            let mut package = Package::new(\n                EntityVersions::new(),\n                BTreeSet::default(),\n                Groups::default(),\n                PackageStatus::default(),\n            );\n            package.insert_entity_version(protocol_version.value().major, entity_addr);\n            package\n        };\n\n        let byte_code_key = Key::ByteCode(ByteCodeAddr::Empty);\n\n        self.tracking_copy\n            .borrow_mut()\n            .write(byte_code_key, StoredValue::ByteCode(byte_code));\n\n        let entity_key: Key = entity_addr.into();\n\n        self.tracking_copy\n            .borrow_mut()\n            .write(entity_key, StoredValue::AddressableEntity(entity));\n\n        self.tracking_copy.borrow_mut().write(\n            package_hash.into(),\n            StoredValue::SmartContract(contract_package),\n        );\n\n        if let Some(account_hash) = maybe_account_hash {\n            let entity_by_account = CLValue::from_t(entity_key)\n                .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n\n            self.tracking_copy.borrow_mut().write(\n                Key::Account(account_hash),\n                StoredValue::CLValue(entity_by_account),\n            );\n        }\n\n        Ok(entity_hash)\n    }\n\n    fn store_system_contract_named_keys(\n        &self,\n        contract_hash: AddressableEntityHash,\n        named_keys: NamedKeys,\n    ) -> Result<(), Box<GenesisError>> {\n        let entity_addr = EntityAddr::new_system(contract_hash.value());\n\n        for (string, key) in named_keys.iter() {\n            let named_key_entry = NamedKeyAddr::new_from_string(entity_addr, string.clone())\n                .map_err(GenesisError::Bytesrepr)?;\n\n            let named_key_value = NamedKeyValue::from_concrete_values(*key, string.clone())\n                .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n\n            let entry_key = Key::NamedKey(named_key_entry);\n\n            self.tracking_copy\n                .borrow_mut()\n                .write(entry_key, StoredValue::NamedKey(named_key_value));\n        }\n\n        Ok(())\n    }\n\n    fn store_system_entry_points(\n        &self,\n        contract_hash: AddressableEntityHash,\n        entry_points: EntryPoints,\n    ) -> Result<(), Box<GenesisError>> {\n        let entity_addr = EntityAddr::new_system(contract_hash.value());\n\n        for entry_point in entry_points.take_entry_points() {\n            let entry_point_addr =\n                EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name())\n                    .map_err(GenesisError::Bytesrepr)?;\n            self.tracking_copy.borrow_mut().write(\n                Key::EntryPoint(entry_point_addr),\n                StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)),\n            )\n        }\n\n        Ok(())\n    }\n\n    fn store_system_entity_registry(\n        &self,\n        contract_name: &str,\n        contract_hash: HashAddr,\n    ) -> Result<(), Box<GenesisError>> {\n        let partial_cl_registry = self\n            .tracking_copy\n            .borrow_mut()\n            .read(&Key::SystemEntityRegistry)\n            .map_err(|_| GenesisError::FailedToCreateSystemRegistry)?\n            .ok_or_else(|| {\n                GenesisError::CLValue(\"failed to convert registry as stored value\".to_string())\n            })?\n            .into_cl_value()\n            .ok_or_else(|| GenesisError::CLValue(\"failed to convert to CLValue\".to_string()))?;\n        let mut partial_registry = CLValue::into_t::<SystemHashRegistry>(partial_cl_registry)\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n        partial_registry.insert(contract_name.to_string(), contract_hash);\n        let cl_registry = CLValue::from_t(partial_registry)\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n        self.tracking_copy\n            .borrow_mut()\n            .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry));\n        Ok(())\n    }\n\n    fn store_chainspec_registry(\n        &self,\n        chainspec_registry: ChainspecRegistry,\n    ) -> Result<(), Box<GenesisError>> {\n        if chainspec_registry.genesis_accounts_raw_hash().is_none() {\n            return Err(GenesisError::MissingChainspecRegistryEntry.into());\n        }\n        let cl_value_registry = CLValue::from_t(chainspec_registry)\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n\n        self.tracking_copy.borrow_mut().write(\n            Key::ChainspecRegistry,\n            StoredValue::CLValue(cl_value_registry),\n        );\n        Ok(())\n    }\n\n    /// Writes a tracking record to global state for block time / genesis timestamp.\n    fn store_block_time(&self) -> Result<(), Box<GenesisError>> {\n        let cl_value = CLValue::from_t(self.config.genesis_timestamp_millis())\n            .map_err(|error| GenesisError::CLValue(error.to_string()))?;\n\n        self.tracking_copy.borrow_mut().write(\n            Key::BlockGlobal(BlockGlobalAddr::BlockTime),\n            StoredValue::CLValue(cl_value),\n        );\n        Ok(())\n    }\n\n    /// Performs a complete system installation.\n    pub fn install(\n        &mut self,\n        chainspec_registry: ChainspecRegistry,\n    ) -> Result<(), Box<GenesisError>> {\n        // Setup system account\n        self.setup_system_account()?;\n\n        // Create mint\n        let total_supply_key = self.create_mint()?;\n\n        // Create all genesis accounts\n        self.create_accounts(total_supply_key)?;\n\n        // Create the auction and setup the stake of all genesis validators.\n        self.create_auction(total_supply_key)?;\n\n        // Create handle payment\n        self.create_handle_payment()?;\n\n        // Write chainspec registry.\n        self.store_chainspec_registry(chainspec_registry)?;\n\n        // Write block time to global state\n        self.store_block_time()?;\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::AsymmetricType;\n    use rand::RngCore;\n\n    use casper_types::{bytesrepr, SecretKey};\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n        let genesis_account: GenesisAccount = rng.gen();\n        bytesrepr::test_serialization_roundtrip(&genesis_account);\n    }\n\n    #[test]\n    fn system_account_bytesrepr_roundtrip() {\n        let genesis_account = GenesisAccount::system();\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account);\n    }\n\n    #[test]\n    fn genesis_account_bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n        let mut bytes = [0u8; 32];\n        rng.fill_bytes(&mut bytes[..]);\n        let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap();\n        let public_key: PublicKey = PublicKey::from(&secret_key);\n\n        let genesis_account_1 = GenesisAccount::account(public_key.clone(), Motes::new(100), None);\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account_1);\n\n        let genesis_account_2 =\n            GenesisAccount::account(public_key, Motes::new(100), Some(rng.gen()));\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account_2);\n    }\n\n    #[test]\n    fn delegator_bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n        let mut validator_bytes = [0u8; 32];\n        let mut delegator_bytes = [0u8; 32];\n        rng.fill_bytes(&mut validator_bytes[..]);\n        rng.fill_bytes(&mut delegator_bytes[..]);\n        let validator_secret_key = SecretKey::ed25519_from_bytes(validator_bytes).unwrap();\n        let delegator_secret_key = SecretKey::ed25519_from_bytes(delegator_bytes).unwrap();\n\n        let validator_public_key = PublicKey::from(&validator_secret_key);\n        let delegator_public_key = PublicKey::from(&delegator_secret_key);\n\n        let genesis_account = GenesisAccount::delegator(\n            validator_public_key,\n            delegator_public_key,\n            Motes::new(100),\n            Motes::zero(),\n        );\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account);\n    }\n\n    #[test]\n    fn administrator_account_bytesrepr_roundtrip() {\n        let administrator_account = AdministratorAccount::new(\n            PublicKey::ed25519_from_bytes([123u8; 32]).unwrap(),\n            Motes::new(U512::MAX),\n        );\n        bytesrepr::test_serialization_roundtrip(&administrator_account);\n    }\n}\n"
  },
  {
    "path": "storage/src/system/genesis.rs",
    "content": "//! Support for a genesis process.\n#![allow(unused_imports)]\n\nuse std::{\n    cell::RefCell,\n    collections::{BTreeMap, BTreeSet},\n    fmt, iter,\n    rc::Rc,\n};\n\nuse itertools::Itertools;\nuse num::Zero;\nuse num_rational::Ratio;\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    addressable_entity::{\n        ActionThresholds, EntityKind, EntityKindTag, MessageTopics, NamedKeyAddr, NamedKeyValue,\n    },\n    bytesrepr,\n    contracts::NamedKeys,\n    execution::Effects,\n    system::{\n        auction::{\n            self, BidAddr, BidKind, DelegationRate, Delegator, SeigniorageRecipientV2,\n            SeigniorageRecipients, SeigniorageRecipientsSnapshot, SeigniorageRecipientsSnapshotV2,\n            SeigniorageRecipientsV2, Staking, ValidatorBid, AUCTION_DELAY_KEY,\n            DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, DELEGATION_RATE_DENOMINATOR,\n            ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, INITIAL_ERA_END_TIMESTAMP_MILLIS,\n            INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY,\n        },\n        handle_payment::{self, ACCUMULATION_PURSE_KEY},\n        mint::{\n            self, ARG_ROUND_SEIGNIORAGE_RATE, MINT_GAS_HOLD_HANDLING_KEY,\n            MINT_GAS_HOLD_INTERVAL_KEY, ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY,\n        },\n        SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT,\n    },\n    AccessRights, AddressableEntity, AddressableEntityHash, AdministratorAccount, BlockGlobalAddr,\n    BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, CLValue, Chainspec,\n    ChainspecRegistry, Digest, EntityAddr, EntityVersions, EntryPointAddr, EntryPointValue,\n    EntryPoints, EraId, FeeHandling, GenesisAccount, GenesisConfig, Groups, HashAddr, Key, Motes,\n    Package, PackageHash, PackageStatus, Phase, ProtocolVersion, PublicKey, RefundHandling,\n    StoredValue, SystemConfig, SystemHashRegistry, Tagged, TimeDiff, URef, WasmConfig, U512,\n};\n\nuse crate::{\n    global_state::state::StateProvider,\n    system::genesis::{\n        account_contract_installer::AccountContractInstaller,\n        entity_installer::EntityGenesisInstaller,\n    },\n    tracking_copy::{TrackingCopy, TrackingCopyError},\n    AddressGenerator,\n};\n\nmod account_contract_installer;\nmod entity_installer;\n\nconst DEFAULT_ADDRESS: [u8; 32] = [0; 32];\n\nconst NO_WASM: bool = true;\n\n/// Error returned as a result of a failed genesis process.\n#[derive(Clone, Debug)]\npub enum GenesisError {\n    /// Error creating a runtime.\n    StateUninitialized,\n    /// Error obtaining the mint's contract key.\n    InvalidMintKey,\n    /// Missing mint contract.\n    MissingMintContract,\n    /// Unexpected stored value variant.\n    UnexpectedStoredValue,\n    /// Error executing the mint system contract.\n    MintError(mint::Error),\n    /// Error converting a [`CLValue`] to a concrete type.\n    CLValue(String),\n    /// Specified validator does not exist among the genesis accounts.\n    OrphanedDelegator {\n        /// Validator's public key.\n        validator_public_key: PublicKey,\n        /// Delegator's public key.\n        delegator_public_key: PublicKey,\n    },\n    /// Duplicated delegator entry found for a given validator.\n    DuplicatedDelegatorEntry {\n        /// Validator's public key.\n        validator_public_key: PublicKey,\n        /// Delegator's public key.\n        delegator_public_key: PublicKey,\n    },\n    /// Delegation rate outside the allowed range.\n    InvalidDelegationRate {\n        /// Delegator's public key.\n        public_key: PublicKey,\n        /// Invalid delegation rate specified in the genesis account entry.\n        delegation_rate: DelegationRate,\n    },\n    /// Invalid bond amount in a genesis account.\n    InvalidBondAmount {\n        /// Validator's public key.\n        public_key: PublicKey,\n    },\n    /// Invalid delegated amount in a genesis account.\n    InvalidDelegatedAmount {\n        /// Delegator's public key.\n        public_key: PublicKey,\n    },\n    /// Failed to create system registry.\n    FailedToCreateSystemRegistry,\n    /// Missing system contract hash.\n    MissingSystemContractHash(String),\n    /// Invalid number of validator slots configured.\n    InvalidValidatorSlots {\n        /// Number of validators in the genesis config.\n        validators: usize,\n        /// Number of validator slots specified.\n        validator_slots: u32,\n    },\n    /// The chainspec registry is missing a required entry.\n    MissingChainspecRegistryEntry,\n    /// Duplicated administrator entry.\n    ///\n    /// This error can occur only on some private chains.\n    DuplicatedAdministratorEntry,\n    /// A bytesrepr Error.\n    Bytesrepr(bytesrepr::Error),\n    /// Genesis process requires initial accounts.\n    MissingGenesisAccounts,\n    /// A tracking copy error.\n    TrackingCopy(TrackingCopyError),\n}\n\nimpl fmt::Display for GenesisError {\n    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {\n        write!(f, \"GenesisError: {:?}\", self)\n    }\n}\n\n/// State for genesis installer.\npub enum GenesisInstaller<S>\nwhere\n    S: StateProvider,\n{\n    /// Install genesis using the Accounts/Contracts model.\n    AccountContract(AccountContractInstaller<S>),\n    /// Install genesis using the Addressable Entity model.\n    Entity(EntityGenesisInstaller<S>),\n}\nimpl<S> GenesisInstaller<S>\nwhere\n    S: StateProvider,\n{\n    /// Ctor.\n    pub fn new(\n        genesis_config_hash: Digest,\n        protocol_version: ProtocolVersion,\n        config: GenesisConfig,\n        tracking_copy: Rc<RefCell<TrackingCopy<<S as StateProvider>::Reader>>>,\n    ) -> Self {\n        if config.enable_entity() {\n            GenesisInstaller::Entity(EntityGenesisInstaller::new(\n                genesis_config_hash,\n                protocol_version,\n                config,\n                tracking_copy,\n            ))\n        } else {\n            GenesisInstaller::AccountContract(AccountContractInstaller::new(\n                genesis_config_hash,\n                protocol_version,\n                config,\n                tracking_copy,\n            ))\n        }\n    }\n\n    /// Finalize genesis.\n    pub fn finalize(self) -> Effects {\n        match self {\n            GenesisInstaller::AccountContract(installer) => installer.finalize(),\n            GenesisInstaller::Entity(installer) => installer.finalize(),\n        }\n    }\n\n    /// Performs a complete system installation.\n    pub fn install(\n        &mut self,\n        chainspec_registry: ChainspecRegistry,\n    ) -> Result<(), Box<GenesisError>> {\n        match self {\n            GenesisInstaller::AccountContract(installer) => installer.install(chainspec_registry),\n            GenesisInstaller::Entity(installer) => installer.install(chainspec_registry),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use casper_types::AsymmetricType;\n    use rand::RngCore;\n\n    use casper_types::{bytesrepr, SecretKey};\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n        let genesis_account: GenesisAccount = rng.gen();\n        bytesrepr::test_serialization_roundtrip(&genesis_account);\n    }\n\n    #[test]\n    fn system_account_bytesrepr_roundtrip() {\n        let genesis_account = GenesisAccount::system();\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account);\n    }\n\n    #[test]\n    fn genesis_account_bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n        let mut bytes = [0u8; 32];\n        rng.fill_bytes(&mut bytes[..]);\n        let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap();\n        let public_key: PublicKey = PublicKey::from(&secret_key);\n\n        let genesis_account_1 = GenesisAccount::account(public_key.clone(), Motes::new(100), None);\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account_1);\n\n        let genesis_account_2 =\n            GenesisAccount::account(public_key, Motes::new(100), Some(rng.gen()));\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account_2);\n    }\n\n    #[test]\n    fn delegator_bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n        let mut validator_bytes = [0u8; 32];\n        let mut delegator_bytes = [0u8; 32];\n        rng.fill_bytes(&mut validator_bytes[..]);\n        rng.fill_bytes(&mut delegator_bytes[..]);\n        let validator_secret_key = SecretKey::ed25519_from_bytes(validator_bytes).unwrap();\n        let delegator_secret_key = SecretKey::ed25519_from_bytes(delegator_bytes).unwrap();\n\n        let validator_public_key = PublicKey::from(&validator_secret_key);\n        let delegator_public_key = PublicKey::from(&delegator_secret_key);\n\n        let genesis_account = GenesisAccount::delegator(\n            validator_public_key,\n            delegator_public_key,\n            Motes::new(100),\n            Motes::zero(),\n        );\n\n        bytesrepr::test_serialization_roundtrip(&genesis_account);\n    }\n\n    #[test]\n    fn administrator_account_bytesrepr_roundtrip() {\n        let administrator_account = AdministratorAccount::new(\n            PublicKey::ed25519_from_bytes([123u8; 32]).unwrap(),\n            Motes::new(U512::MAX),\n        );\n        bytesrepr::test_serialization_roundtrip(&administrator_account);\n    }\n}\n"
  },
  {
    "path": "storage/src/system/handle_payment/handle_payment_native.rs",
    "content": "use crate::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    system::{\n        handle_payment::{\n            mint_provider::MintProvider, runtime_provider::RuntimeProvider,\n            storage_provider::StorageProvider, HandlePayment,\n        },\n        mint::Mint,\n        runtime_native::RuntimeNative,\n    },\n    tracking_copy::TrackingCopyEntityExt,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{NamedKeyAddr, NamedKeyValue},\n    system::handle_payment::Error,\n    AccessRights, CLValue, FeeHandling, GrantedAccess, Key, Phase, RefundHandling, StoredValue,\n    TransferredTo, URef, U512,\n};\nuse std::collections::BTreeSet;\nuse tracing::error;\n\nimpl<S> MintProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn transfer_purse_to_account(\n        &mut self,\n        source: URef,\n        target: AccountHash,\n        amount: U512,\n    ) -> Result<TransferredTo, Error> {\n        let target_key = Key::Account(target);\n        let target_uref = match self.tracking_copy().borrow_mut().read(&target_key) {\n            Ok(Some(StoredValue::CLValue(cl_value))) => {\n                let entity_key = CLValue::into_t::<Key>(cl_value)\n                    .map_err(|_| Error::FailedTransferToAccountPurse)?;\n                // get entity\n                let target_uref = {\n                    if let Ok(Some(StoredValue::AddressableEntity(entity))) =\n                        self.tracking_copy().borrow_mut().read(&entity_key)\n                    {\n                        entity.main_purse_add_only()\n                    } else {\n                        return Err(Error::Transfer);\n                    }\n                };\n                target_uref\n            } // entity exists\n            Ok(Some(StoredValue::Account(account))) => {\n                if self.config().enable_addressable_entity() {\n                    self.tracking_copy()\n                        .borrow_mut()\n                        .migrate_account(target, self.protocol_version())\n                        .map_err(|_| Error::Transfer)?;\n                }\n\n                account.main_purse_add_only()\n            }\n            Ok(_) | Err(_) => return Err(Error::Transfer),\n        };\n\n        // source and target are the same, noop\n        if source.with_access_rights(AccessRights::ADD) == target_uref {\n            return Ok(TransferredTo::ExistingAccount);\n        }\n\n        // Temporarily grant ADD access to target if it is not already present.\n        let granted_access = self.access_rights_mut().grant_access(target_uref);\n\n        let transfered = self\n            .transfer_purse_to_purse(source, target_uref, amount)\n            .is_ok();\n\n        // if ADD access was temporarily granted, remove it.\n        if let GrantedAccess::Granted {\n            uref_addr,\n            newly_granted_access_rights,\n        } = granted_access\n        {\n            self.access_rights_mut()\n                .remove_access(uref_addr, newly_granted_access_rights)\n        }\n\n        if transfered {\n            Ok(TransferredTo::ExistingAccount)\n        } else {\n            Err(Error::Transfer)\n        }\n    }\n\n    fn transfer_purse_to_purse(\n        &mut self,\n        source: URef,\n        target: URef,\n        amount: U512,\n    ) -> Result<(), Error> {\n        // system purses do not have holds on them\n        match self.transfer(None, source, target, amount, None) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::Transfer)\n            }\n        }\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        match <Self as Mint>::balance(self, purse) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::GetBalance)\n            }\n        }\n    }\n\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> {\n        match <Self as Mint>::reduce_total_supply(self, amount) {\n            Ok(ret) => Ok(ret),\n            Err(err) => {\n                error!(\"{}\", err);\n                Err(Error::ReduceTotalSupply)\n            }\n        }\n    }\n}\n\nimpl<S> RuntimeProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_key(&mut self, name: &str) -> Option<Key> {\n        self.named_keys().get(name).cloned()\n    }\n\n    fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error> {\n        let name = name.to_string();\n        match self.context_key() {\n            Key::Account(_) | Key::Hash(_) => {\n                let name: String = name.clone();\n                let value = CLValue::from_t((name.clone(), key)).map_err(|_| Error::PutKey)?;\n                let named_key_value = StoredValue::CLValue(value);\n                self.tracking_copy()\n                    .borrow_mut()\n                    .add(*self.context_key(), named_key_value)\n                    .map_err(|_| Error::PutKey)?;\n                self.named_keys_mut().insert(name, key);\n                Ok(())\n            }\n            Key::AddressableEntity(entity_addr) => {\n                let named_key_value = StoredValue::NamedKey(\n                    NamedKeyValue::from_concrete_values(key, name.clone())\n                        .map_err(|_| Error::PutKey)?,\n                );\n                let named_key_addr = NamedKeyAddr::new_from_string(*entity_addr, name.clone())\n                    .map_err(|_| Error::PutKey)?;\n                let named_key = Key::NamedKey(named_key_addr);\n                // write to both tracking copy and in-mem named keys cache\n                self.tracking_copy()\n                    .borrow_mut()\n                    .write(named_key, named_key_value);\n                self.named_keys_mut().insert(name, key);\n                Ok(())\n            }\n            _ => Err(Error::UnexpectedKeyVariant),\n        }\n    }\n\n    fn remove_key(&mut self, name: &str) -> Result<(), Error> {\n        self.named_keys_mut().remove(name);\n        match self.context_key() {\n            Key::AddressableEntity(entity_addr) => {\n                let named_key_addr = NamedKeyAddr::new_from_string(*entity_addr, name.to_string())\n                    .map_err(|_| Error::RemoveKey)?;\n                let key = Key::NamedKey(named_key_addr);\n                let value = self\n                    .tracking_copy()\n                    .borrow_mut()\n                    .read(&key)\n                    .map_err(|_| Error::RemoveKey)?;\n                if let Some(StoredValue::NamedKey(_)) = value {\n                    self.tracking_copy().borrow_mut().prune(key);\n                }\n            }\n            Key::Hash(_) => {\n                let mut contract = self\n                    .tracking_copy()\n                    .borrow_mut()\n                    .read(self.context_key())\n                    .map_err(|_| Error::RemoveKey)?\n                    .ok_or(Error::RemoveKey)?\n                    .as_contract()\n                    .ok_or(Error::RemoveKey)?\n                    .clone();\n\n                if contract.remove_named_key(name).is_none() {\n                    return Ok(());\n                }\n\n                self.tracking_copy()\n                    .borrow_mut()\n                    .write(*self.context_key(), StoredValue::Contract(contract))\n            }\n            Key::Account(_) => {\n                let account = {\n                    let mut account = match self\n                        .tracking_copy()\n                        .borrow_mut()\n                        .read(self.context_key())\n                        .map_err(|_| Error::RemoveKey)?\n                    {\n                        Some(StoredValue::Account(account)) => account,\n                        Some(_) | None => return Err(Error::UnexpectedKeyVariant),\n                    };\n                    account.named_keys_mut().remove(name);\n                    account\n                };\n                self.tracking_copy()\n                    .borrow_mut()\n                    .write(*self.context_key(), StoredValue::Account(account));\n            }\n            _ => return Err(Error::UnexpectedKeyVariant),\n        }\n\n        Ok(())\n    }\n\n    fn get_phase(&self) -> Phase {\n        self.phase()\n    }\n\n    fn get_caller(&self) -> AccountHash {\n        self.address()\n    }\n\n    fn refund_handling(&self) -> RefundHandling {\n        *self.config().refund_handling()\n    }\n\n    fn fee_handling(&self) -> FeeHandling {\n        *self.config().fee_handling()\n    }\n\n    fn administrative_accounts(&self) -> BTreeSet<AccountHash> {\n        self.transfer_config().administrative_accounts()\n    }\n}\n\nimpl<S> StorageProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn write_balance(&mut self, purse_uref: URef, amount: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(amount).map_err(|_| Error::Storage)?;\n        self.tracking_copy().borrow_mut().write(\n            Key::Balance(purse_uref.addr()),\n            StoredValue::CLValue(cl_value),\n        );\n        Ok(())\n    }\n}\n\nimpl<S> HandlePayment for RuntimeNative<S> where\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>\n{\n}\n"
  },
  {
    "path": "storage/src/system/handle_payment/internal.rs",
    "content": "use super::{\n    mint_provider::MintProvider, runtime_provider::RuntimeProvider,\n    storage_provider::StorageProvider,\n};\nuse casper_types::{\n    system::handle_payment::{Error, PAYMENT_PURSE_KEY, REFUND_PURSE_KEY},\n    Key, Phase, URef, U512,\n};\nuse num::CheckedMul;\nuse num_rational::Ratio;\nuse num_traits::Zero;\n\n/// Returns the purse for accepting payment for transactions.\npub fn get_payment_purse<R: RuntimeProvider>(runtime_provider: &mut R) -> Result<URef, Error> {\n    match runtime_provider.get_key(PAYMENT_PURSE_KEY) {\n        Some(Key::URef(uref)) => Ok(uref),\n        Some(_) => Err(Error::PaymentPurseKeyUnexpectedType),\n        None => Err(Error::PaymentPurseNotFound),\n    }\n}\n\n/// Sets the purse where refunds (excess funds not spent to pay for computation) will be sent.\n/// Note that if this function is never called, the default location is the main purse of the\n/// deployer's account.\npub fn set_refund<R: RuntimeProvider>(runtime_provider: &mut R, purse: URef) -> Result<(), Error> {\n    if let Phase::Payment = runtime_provider.get_phase() {\n        runtime_provider.put_key(REFUND_PURSE_KEY, Key::URef(purse))?;\n        return Ok(());\n    }\n    Err(Error::SetRefundPurseCalledOutsidePayment)\n}\n\n/// Returns the currently set refund purse.\npub fn get_refund_purse<R: RuntimeProvider>(\n    runtime_provider: &mut R,\n) -> Result<Option<URef>, Error> {\n    match runtime_provider.get_key(REFUND_PURSE_KEY) {\n        Some(Key::URef(uref)) => Ok(Some(uref)),\n        Some(_) => Err(Error::RefundPurseKeyUnexpectedType),\n        None => Ok(None),\n    }\n}\n\n/// Returns tuple where 1st element is the portion of unspent payment (if any), and the 2nd element\n/// is the fee (if any).\n///\n/// # Note\n///\n/// Any dust amounts are added to the fee.\npub fn calculate_overpayment_and_fee(\n    limit: U512,\n    gas_price: u8,\n    cost: U512,\n    consumed: U512,\n    available_balance: U512,\n    refund_ratio: Ratio<U512>,\n) -> Result<(U512, U512), Error> {\n    /*\n        cost is limit * price,  unused = limit - consumed\n        base refund is unused * price\n            refund rate is a percentage ranging from 0% to 100%\n            actual refund = base refund * refund rate\n            i.e. if rate == 100%, actual refund == base refund\n                 if rate = 0%, actual refund = 0 (and we can skip refund processing)\n        EXAMPLE 1\n        limit = 500, consumed = 450, price = 2, refund rate = 100%\n        cost = limit * price == 1000\n        unused = limit - consumed == 50\n        base refund = unused * price == 100\n        actual refund = base refund * refund rate == 100\n\n        EXAMPLE 2\n        limit = 5000, consumed = 0, price = 5, refund rate = 50%\n        cost = limit * price == 25000\n        unused = limit - consumed == 5000\n        base refund = unused * price == 25000\n        actual refund = base refund * refund rate == 12500\n\n        Complicating factors:\n            if the source purse does not have enough to cover the cost, their available balance is taken\n                and there is no refund\n            if the refund rate is 0%, there is no refund (although it would be bizarre for a network to\n                run with RefundHandling turned on but with a 0% rate, they are technically independent\n                settings and thus the logic must account for the possibility)\n            cost might be higher than limit * price if additional costs have been incurred.\n                as the refund calculation is based on paid for but unused gas, such additional costs\n                are not subject to refund. This is handled by this logic correctly, but tests over logic\n                that incurs any additional costs need to use actual discrete variables for each value\n                and not assume limit * price == cost\n    */\n    if limit > available_balance && available_balance <= cost {\n        return Ok((U512::zero(), available_balance));\n    }\n    if available_balance < cost {\n        return Ok((U512::zero(), available_balance));\n    }\n    if refund_ratio.is_zero() {\n        return Ok((U512::zero(), cost));\n    }\n    let unspent = limit.saturating_sub(consumed);\n\n    if unspent == U512::zero() {\n        return Ok((U512::zero(), cost));\n    }\n    let base_refund = unspent * gas_price;\n\n    let adjusted_refund = Ratio::from(base_refund)\n        .checked_mul(&refund_ratio)\n        .ok_or(Error::ArithmeticOverflow)?\n        .to_integer();\n\n    let fee = cost\n        .checked_sub(adjusted_refund)\n        .ok_or(Error::ArithmeticOverflow)?;\n\n    Ok((adjusted_refund, fee))\n}\n\npub fn payment_burn<P: MintProvider + RuntimeProvider + StorageProvider>(\n    provider: &mut P,\n    purse: URef,\n    amount: Option<U512>,\n) -> Result<(), Error> {\n    let available_balance = match provider.available_balance(purse)? {\n        Some(balance) => balance,\n        None => return Err(Error::PaymentPurseBalanceNotFound),\n    };\n    let burn_amount = amount.unwrap_or(available_balance);\n    if burn_amount.is_zero() {\n        // nothing to burn == noop\n        return Ok(());\n    }\n    // Reduce the source purse and total supply by the refund amount\n    let adjusted_balance = available_balance\n        .checked_sub(burn_amount)\n        .ok_or(Error::ArithmeticOverflow)?;\n    provider.write_balance(purse, adjusted_balance)?;\n    provider.reduce_total_supply(burn_amount)?;\n    Ok(())\n}\n\n/// This function distributes the fees according to the fee handling config.\n///\n/// NOTE: If a network is not configured for fee accumulation, this method will error if called.\npub fn distribute_accumulated_fees<P>(\n    provider: &mut P,\n    source_uref: URef,\n    amount: Option<U512>,\n) -> Result<(), Error>\nwhere\n    P: RuntimeProvider + MintProvider,\n{\n    let fee_handling = provider.fee_handling();\n    if !fee_handling.is_accumulate() {\n        return Err(Error::IncompatiblePaymentSettings);\n    }\n\n    let administrative_accounts = provider.administrative_accounts();\n    let reward_recipients = U512::from(administrative_accounts.len());\n\n    let distribute_amount = match amount {\n        Some(amount) => amount,\n        None => provider.available_balance(source_uref)?.unwrap_or_default(),\n    };\n\n    if distribute_amount.is_zero() {\n        return Ok(());\n    }\n\n    let portion = distribute_amount\n        .checked_div(reward_recipients)\n        .unwrap_or_else(U512::zero);\n\n    if !portion.is_zero() {\n        for target in administrative_accounts {\n            provider.transfer_purse_to_account(source_uref, target, portion)?;\n        }\n    }\n\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    // both burn and refund use the same basic calculation for\n    // overpayment / unspent vs fee...the only difference is\n    // what is done with the overage _after_ the calculation\n    // refund returns it to payer, while burn destroys it\n\n    #[test]\n    fn should_calculate_expected_amounts() {\n        let limit = U512::from(6u64);\n        let gas_price = 1;\n        let cost = limit;\n        let consumed = U512::from(3u64);\n        let available = U512::from(10u64);\n\n        let (overpay, fee) = calculate_overpayment_and_fee(\n            limit,\n            gas_price,\n            cost,\n            consumed,\n            available,\n            Ratio::new_raw(U512::from(1), U512::from(1)),\n        )\n        .unwrap();\n\n        let unspent = limit.saturating_sub(consumed);\n        let expected = unspent;\n        assert_eq!(expected, overpay, \"overpay\");\n        let expected_fee = consumed;\n        assert_eq!(expected_fee, fee, \"fee\");\n    }\n\n    #[test]\n    fn should_handle_straight_percentages() {\n        let limit = U512::from(100u64);\n        let gas_price = 1;\n        let cost = limit;\n        let consumed = U512::from(50u64);\n        let available = U512::from(1000u64);\n        let denom = 100;\n\n        for numer in 0..=denom {\n            let refund_ratio = Ratio::new_raw(U512::from(numer), U512::from(denom));\n            let (overpay, fee) = calculate_overpayment_and_fee(\n                limit,\n                gas_price,\n                cost,\n                consumed,\n                available,\n                refund_ratio,\n            )\n            .unwrap();\n\n            let unspent = limit.saturating_sub(consumed);\n            let expected = Ratio::from(unspent)\n                .checked_mul(&refund_ratio)\n                .ok_or(Error::ArithmeticOverflow)\n                .expect(\"should math\")\n                .to_integer();\n            assert_eq!(expected, overpay, \"overpay\");\n            let expected_fee = limit - expected;\n            assert_eq!(expected_fee, fee, \"fee\");\n        }\n    }\n\n    #[test]\n    fn should_roll_over_dust() {\n        let limit = U512::from(6u64);\n        let gas_price = 1;\n        let cost = limit;\n        let consumed = U512::from(3u64);\n        let available = U512::from(10u64);\n\n        for percentage in 0..=100 {\n            let refund_ratio = Ratio::new_raw(U512::from(percentage), U512::from(100));\n\n            let (overpay, fee) = calculate_overpayment_and_fee(\n                limit,\n                gas_price,\n                cost,\n                consumed,\n                available,\n                refund_ratio,\n            )\n            .expect(\"should have overpay and fee\");\n\n            let a = Ratio::from(overpay);\n            let b = Ratio::from(fee);\n\n            assert_eq!(a + b, Ratio::from(cost), \"{}\", percentage);\n        }\n    }\n\n    #[test]\n    fn should_take_all_of_insufficient_balance() {\n        let limit = U512::from(6u64);\n        let gas_price = 1;\n        let cost = limit;\n        let consumed = U512::from(3u64);\n        let available = U512::from(5u64);\n\n        let (overpay, fee) = calculate_overpayment_and_fee(\n            limit,\n            gas_price,\n            cost,\n            consumed,\n            available,\n            Ratio::new_raw(U512::from(1), U512::from(1)),\n        )\n        .unwrap();\n\n        assert_eq!(U512::zero(), overpay, \"overpay\");\n        let expected = available;\n        assert_eq!(expected, fee, \"fee\");\n    }\n\n    #[test]\n    fn should_handle_non_1_gas_price() {\n        let limit = U512::from(6u64);\n        let gas_price = 2;\n        let cost = limit * gas_price;\n        let consumed = U512::from(3u64);\n        let available = U512::from(12u64);\n\n        let (overpay, fee) = calculate_overpayment_and_fee(\n            limit,\n            gas_price,\n            cost,\n            consumed,\n            available,\n            Ratio::new_raw(U512::from(1), U512::from(1)),\n        )\n        .unwrap();\n\n        let unspent = limit.saturating_sub(consumed);\n        let expected = unspent * gas_price;\n        assert_eq!(expected, overpay, \"overpay\");\n        let expected_fee = consumed * gas_price;\n        assert_eq!(expected_fee, fee, \"fee\");\n    }\n\n    #[test]\n    fn should_handle_extra_cost() {\n        let limit = U512::from(6u64);\n        let gas_price = 2;\n        let extra_cost = U512::from(1u64);\n        let cost = limit * gas_price + extra_cost;\n        let consumed = U512::from(3u64);\n        let available = U512::from(21u64);\n\n        let (overpay, fee) = calculate_overpayment_and_fee(\n            limit,\n            gas_price,\n            cost,\n            consumed,\n            available,\n            Ratio::new_raw(U512::from(1), U512::from(1)),\n        )\n        .unwrap();\n\n        let unspent = limit.saturating_sub(consumed);\n        let expected = unspent * gas_price;\n        assert_eq!(expected, overpay, \"overpay\");\n        let expected_fee = consumed * gas_price + extra_cost;\n        assert_eq!(expected_fee, fee, \"fee\");\n    }\n}\n"
  },
  {
    "path": "storage/src/system/handle_payment/mint_provider.rs",
    "content": "use casper_types::{\n    account::AccountHash, system::handle_payment::Error, TransferredTo, URef, U512,\n};\n\n/// Provides an access to mint.\npub trait MintProvider {\n    /// Transfer `amount` from `source` purse to a `target` account.\n    /// Note: the source should always be a system purse of some kind,\n    /// such as the payment purse or an accumulator purse.\n    /// The target should be the recipient of a refund or a reward\n    fn transfer_purse_to_account(\n        &mut self,\n        source: URef,\n        target: AccountHash,\n        amount: U512,\n    ) -> Result<TransferredTo, Error>;\n\n    /// Transfer `amount` from `source` purse to a `target` purse.\n    /// Note: the source should always be a system purse of some kind,\n    /// such as the payment purse or an accumulator purse.\n    /// The target should be the recipient of a refund or a reward\n    fn transfer_purse_to_purse(\n        &mut self,\n        source: URef,\n        target: URef,\n        amount: U512,\n    ) -> Result<(), Error>;\n\n    /// Checks balance of a `purse`. Returns `None` if given purse does not exist.\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error>;\n\n    /// Reduce total supply by `amount`.\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error>;\n}\n"
  },
  {
    "path": "storage/src/system/handle_payment/runtime_provider.rs",
    "content": "use std::collections::BTreeSet;\n\nuse casper_types::{\n    account::AccountHash, system::handle_payment::Error, FeeHandling, Key, Phase, RefundHandling,\n};\n\n/// Provider of runtime host functionality.\npub trait RuntimeProvider {\n    /// Get named key under a `name`.\n    fn get_key(&mut self, name: &str) -> Option<Key>;\n\n    /// Put key under a `name`.\n    fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error>;\n\n    /// Remove a named key by `name`.\n    fn remove_key(&mut self, name: &str) -> Result<(), Error>;\n\n    /// Get current execution phase.\n    fn get_phase(&self) -> Phase;\n\n    /// Get caller.\n    fn get_caller(&self) -> AccountHash;\n\n    /// Get refund handling.\n    fn refund_handling(&self) -> RefundHandling;\n\n    /// Returns fee handling value.\n    fn fee_handling(&self) -> FeeHandling;\n\n    /// Returns list of administrative accounts.\n    fn administrative_accounts(&self) -> BTreeSet<AccountHash>;\n}\n"
  },
  {
    "path": "storage/src/system/handle_payment/storage_provider.rs",
    "content": "use casper_types::{URef, U512};\n\nuse crate::system::handle_payment::Error;\n\n/// Provider of storage functionality.\npub trait StorageProvider {\n    /// Write new balance.\n    fn write_balance(&mut self, purse_uref: URef, amount: U512) -> Result<(), Error>;\n}\n"
  },
  {
    "path": "storage/src/system/handle_payment.rs",
    "content": "mod handle_payment_native;\nmod internal;\n/// Provides mint logic for handle payment processing.\npub mod mint_provider;\n/// Provides runtime logic for handle payment processing.\npub mod runtime_provider;\n/// Provides storage logic for handle payment processing.\npub mod storage_provider;\n\nuse casper_types::{\n    system::handle_payment::{Error, REFUND_PURSE_KEY},\n    AccessRights, PublicKey, URef, U512,\n};\nuse num_rational::Ratio;\nuse tracing::error;\n\nuse crate::system::handle_payment::{\n    mint_provider::MintProvider, runtime_provider::RuntimeProvider,\n    storage_provider::StorageProvider,\n};\n\n/// Handle payment functionality implementation.\npub trait HandlePayment: MintProvider + RuntimeProvider + StorageProvider + Sized {\n    /// Get payment purse.\n    fn get_payment_purse(&mut self) -> Result<URef, Error> {\n        let purse = internal::get_payment_purse(self)?;\n        // Limit the access rights so only balance query and deposit are allowed.\n        Ok(URef::new(purse.addr(), AccessRights::READ_ADD))\n    }\n\n    /// Set refund purse.\n    fn set_refund_purse(&mut self, purse: URef) -> Result<(), Error> {\n        // make sure the passed uref is actually a purse...\n        // if it has a balance it is a purse and if not it isn't\n        let _balance = self.available_balance(purse)?;\n        internal::set_refund(self, purse)\n    }\n\n    /// Get refund purse.\n    fn get_refund_purse(&mut self) -> Result<Option<URef>, Error> {\n        // We purposely choose to remove the access rights so that we do not\n        // accidentally give rights for a purse to some contract that is not\n        // supposed to have it.\n        let maybe_purse = internal::get_refund_purse(self)?;\n        Ok(maybe_purse.map(|p| p.remove_access_rights()))\n    }\n\n    /// Clear refund purse.\n    fn clear_refund_purse(&mut self) -> Result<(), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            error!(\"invalid caller to clear refund purse\");\n            return Err(Error::InvalidCaller);\n        }\n\n        self.remove_key(REFUND_PURSE_KEY)\n    }\n\n    /// Calculate overpayment and fees (if any) for payment finalization.\n    #[allow(clippy::too_many_arguments)]\n    fn calculate_overpayment_and_fee(\n        &mut self,\n        limit: U512,\n        gas_price: u8,\n        cost: U512,\n        consumed: U512,\n        refund_ratio: Ratio<U512>,\n        available_balance: U512,\n    ) -> Result<(U512, U512), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            error!(\"invalid caller to calculate overpayment and fee\");\n            return Err(Error::InvalidCaller);\n        }\n\n        internal::calculate_overpayment_and_fee(\n            limit,\n            gas_price,\n            cost,\n            consumed,\n            available_balance,\n            refund_ratio,\n        )\n    }\n\n    /// Distribute fees from an accumulation purse.\n    fn distribute_accumulated_fees(\n        &mut self,\n        source_uref: URef,\n        amount: Option<U512>,\n    ) -> Result<(), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            error!(\"invalid caller to distribute accumulated fee\");\n            return Err(Error::InvalidCaller);\n        }\n\n        internal::distribute_accumulated_fees(self, source_uref, amount)\n    }\n\n    /// Burns the imputed amount from the imputed purse.\n    fn payment_burn(&mut self, source_uref: URef, amount: Option<U512>) -> Result<(), Error> {\n        if self.get_caller() != PublicKey::System.to_account_hash() {\n            error!(\"invalid caller to payment burn\");\n            return Err(Error::InvalidCaller);\n        }\n\n        internal::payment_burn(self, source_uref, amount)\n    }\n}\n"
  },
  {
    "path": "storage/src/system/mint/detail.rs",
    "content": "use casper_types::{\n    system::{\n        mint,\n        mint::{Error, TOTAL_SUPPLY_KEY},\n    },\n    Key, U512,\n};\n\nuse crate::system::mint::Mint;\n\n// Please do not expose this to the user!\npub(crate) fn reduce_total_supply_unsafe<P>(mint: &mut P, amount: U512) -> Result<(), mint::Error>\nwhere\n    P: Mint + ?Sized,\n{\n    if amount.is_zero() {\n        return Ok(()); // no change to supply\n    }\n\n    // get total supply or error\n    let total_supply_uref = match mint.get_key(TOTAL_SUPPLY_KEY) {\n        Some(Key::URef(uref)) => uref,\n        Some(_) => return Err(Error::MissingKey),\n        None => return Err(Error::MissingKey),\n    };\n    let total_supply: U512 = mint\n        .read(total_supply_uref)?\n        .ok_or(Error::TotalSupplyNotFound)?;\n\n    // decrease total supply\n    let reduced_total_supply = total_supply\n        .checked_sub(amount)\n        .ok_or(Error::ArithmeticOverflow)?;\n\n    // update total supply\n    mint.write_amount(total_supply_uref, reduced_total_supply)?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "storage/src/system/mint/mint_native.rs",
    "content": "use tracing::error;\n\nuse crate::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    system::{\n        error::ProviderError,\n        mint::{\n            runtime_provider::RuntimeProvider, storage_provider::StorageProvider,\n            system_provider::SystemProvider, Mint,\n        },\n        runtime_native::{Id, RuntimeNative},\n    },\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyExt},\n};\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::{FromBytes, ToBytes},\n    system::{mint::Error, Caller},\n    AccessRights, CLTyped, CLValue, Gas, InitiatorAddr, Key, Phase, PublicKey, RuntimeFootprint,\n    StoredValue, SystemHashRegistry, Transfer, TransferV2, URef, U512,\n};\n\nimpl<S> RuntimeProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn get_caller(&self) -> AccountHash {\n        self.address()\n    }\n\n    fn get_immediate_caller(&self) -> Option<Caller> {\n        let caller = Caller::Initiator {\n            account_hash: PublicKey::System.to_account_hash(),\n        };\n        Some(caller)\n    }\n\n    fn is_called_from_standard_payment(&self) -> bool {\n        self.phase() == Phase::Payment\n    }\n\n    fn get_system_entity_registry(&self) -> Result<SystemHashRegistry, ProviderError> {\n        self.tracking_copy()\n            .borrow_mut()\n            .get_system_entity_registry()\n            .map_err(|tce| {\n                error!(%tce, \"unable to obtain system entity registry during transfer\");\n                ProviderError::SystemEntityRegistry\n            })\n    }\n\n    fn runtime_footprint_by_account_hash(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<Option<RuntimeFootprint>, ProviderError> {\n        match self\n            .tracking_copy()\n            .borrow_mut()\n            .runtime_footprint_by_account_hash(self.protocol_version(), account_hash)\n        {\n            Ok((_, footprint)) => Ok(Some(footprint)),\n            Err(tce) => {\n                error!(%tce, \"error reading addressable entity by account hash\");\n                Err(ProviderError::AccountHash(account_hash))\n            }\n        }\n    }\n\n    fn get_phase(&self) -> Phase {\n        self.phase()\n    }\n\n    fn get_key(&self, name: &str) -> Option<Key> {\n        self.named_keys().get(name).cloned()\n    }\n\n    fn get_approved_spending_limit(&self) -> U512 {\n        self.remaining_spending_limit()\n    }\n\n    fn sub_approved_spending_limit(&mut self, amount: U512) {\n        if let Some(remaining) = self.remaining_spending_limit().checked_sub(amount) {\n            self.set_remaining_spending_limit(remaining);\n        } else {\n            error!(\n                limit = %self.remaining_spending_limit(),\n                spent = %amount,\n                \"exceeded main purse spending limit\"\n            );\n            self.set_remaining_spending_limit(U512::zero());\n        }\n    }\n\n    fn get_main_purse(&self) -> Option<URef> {\n        self.runtime_footprint().main_purse()\n    }\n\n    fn is_administrator(&self, account_hash: &AccountHash) -> bool {\n        self.transfer_config().is_administrator(account_hash)\n    }\n\n    fn allow_unrestricted_transfers(&self) -> bool {\n        self.transfer_config().allow_unrestricted_transfers()\n    }\n\n    fn is_valid_uref(&self, uref: &URef) -> bool {\n        self.access_rights().has_access_rights_to_uref(uref)\n    }\n}\n\nimpl<S> StorageProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn new_uref<T: CLTyped + ToBytes>(&mut self, value: T) -> Result<URef, Error> {\n        let cl_value: CLValue = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        let uref = self\n            .address_generator()\n            .write()\n            .new_uref(AccessRights::READ_ADD_WRITE);\n        self.extend_access_rights(&[uref]);\n        // we are creating this key now, thus we know it is a Key::URef and we grant the creator\n        // full permissions on it, thus we do not need to do validate key / validate uref access\n        // before storing it.\n        self.tracking_copy()\n            .borrow_mut()\n            .write(Key::URef(uref), StoredValue::CLValue(cl_value));\n        Ok(uref)\n    }\n\n    fn read<T: CLTyped + FromBytes>(&mut self, uref: URef) -> Result<Option<T>, Error> {\n        // check access rights on uref\n        if !self.access_rights().has_access_rights_to_uref(&uref) {\n            return Err(Error::ForgedReference);\n        }\n        let key = &Key::URef(uref);\n        let stored_value = match self.tracking_copy().borrow_mut().read(key) {\n            Ok(Some(stored_value)) => stored_value,\n            Ok(None) => return Ok(None),\n            Err(_) => return Err(Error::Storage),\n        };\n        // by convention, we only store CLValues under Key::URef\n        if let StoredValue::CLValue(value) = stored_value {\n            // Only CLTyped instances should be stored as a CLValue.\n            let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?;\n            Ok(Some(value))\n        } else {\n            Err(Error::CLValue)\n        }\n    }\n\n    fn write_amount(&mut self, uref: URef, amount: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(amount).map_err(|_| Error::CLValue)?;\n        // is the uref writeable?\n        if !uref.is_writeable() {\n            return Err(Error::Storage);\n        }\n        // check access rights on uref\n        if !self.access_rights().has_access_rights_to_uref(&uref) {\n            return Err(Error::ForgedReference);\n        }\n        self.tracking_copy()\n            .borrow_mut()\n            .write(Key::URef(uref), StoredValue::CLValue(cl_value));\n        Ok(())\n    }\n\n    fn add<T: CLTyped + ToBytes>(&mut self, uref: URef, value: T) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        self.tracking_copy()\n            .borrow_mut()\n            .add(Key::URef(uref), StoredValue::CLValue(cl_value))\n            .map_err(|_| Error::Storage)?;\n        Ok(())\n    }\n\n    fn total_balance(&mut self, purse: URef) -> Result<U512, Error> {\n        match self\n            .tracking_copy()\n            .borrow_mut()\n            .get_total_balance(purse.into())\n        {\n            Ok(total) => Ok(total.value()),\n            Err(err) => {\n                error!(?err, \"mint native total_balance\");\n                dbg!(&err);\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn available_balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        match self\n            .tracking_copy()\n            .borrow_mut()\n            .get_available_balance(Key::Balance(purse.addr()))\n        {\n            Ok(motes) => Ok(Some(motes.value())),\n            Err(err) => {\n                error!(?err, \"mint native available_balance\");\n                Err(Error::Storage)\n            }\n        }\n    }\n\n    fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(balance).map_err(|_| Error::CLValue)?;\n        self.tracking_copy()\n            .borrow_mut()\n            .write(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value));\n        Ok(())\n    }\n\n    fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error> {\n        let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?;\n        self.tracking_copy()\n            .borrow_mut()\n            .add(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value))\n            .map_err(|_| Error::Storage)?;\n        Ok(())\n    }\n}\n\nimpl<S> SystemProvider for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn record_transfer(\n        &mut self,\n        maybe_to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<(), Error> {\n        if self.phase() != Phase::Session {\n            return Ok(());\n        }\n        let txn_hash = match self.id() {\n            Id::Transaction(txn_hash) => *txn_hash,\n            // we don't write transfer records for systemic transfers (step, fees, rewards, etc)\n            // so return Ok and move on.\n            Id::Seed(_) => return Ok(()),\n        };\n        let from = InitiatorAddr::AccountHash(self.get_caller());\n        let fee = Gas::from(self.native_transfer_cost());\n        let transfer = Transfer::V2(TransferV2::new(\n            txn_hash, from, maybe_to, source, target, amount, fee, id,\n        ));\n\n        self.push_transfer(transfer);\n\n        Ok(())\n    }\n}\n\nimpl<S> Mint for RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    fn purse_exists(&mut self, uref: URef) -> Result<bool, Error> {\n        let key = Key::Balance(uref.addr());\n        match self\n            .tracking_copy()\n            .borrow_mut()\n            .read(&key)\n            .map_err(|_| Error::Storage)?\n        {\n            Some(StoredValue::CLValue(value)) => Ok(*value.cl_type() == U512::cl_type()),\n            Some(_non_cl_value) => Err(Error::CLValue),\n            None => Ok(false),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/system/mint/runtime_provider.rs",
    "content": "use crate::system::error::ProviderError;\nuse casper_types::{\n    account::AccountHash, system::Caller, Key, Phase, RuntimeFootprint, SystemHashRegistry, URef,\n    U512,\n};\n\n/// Provider of runtime host functionality.\npub trait RuntimeProvider {\n    /// This method should return the caller of the current context.\n    fn get_caller(&self) -> AccountHash;\n\n    /// This method should return the immediate caller of the current context.\n    fn get_immediate_caller(&self) -> Option<Caller>;\n\n    /// Is the caller standard payment logic?\n    fn is_called_from_standard_payment(&self) -> bool;\n\n    /// Get system entity registry.\n    fn get_system_entity_registry(&self) -> Result<SystemHashRegistry, ProviderError>;\n\n    /// Read addressable entity by account hash.\n    fn runtime_footprint_by_account_hash(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<Option<RuntimeFootprint>, ProviderError>;\n\n    /// Gets execution phase\n    fn get_phase(&self) -> Phase;\n\n    /// This method should handle obtaining a given named [`Key`] under a `name`.\n    fn get_key(&self, name: &str) -> Option<Key>;\n\n    /// Returns approved CSPR spending limit.\n    fn get_approved_spending_limit(&self) -> U512;\n\n    /// Signal to host that `amount` of tokens has been transferred.\n    fn sub_approved_spending_limit(&mut self, amount: U512);\n\n    /// Returns main purse of the sender account.\n    fn get_main_purse(&self) -> Option<URef>;\n\n    /// Returns `true` if the account hash belongs to an administrator account, otherwise `false`.\n    fn is_administrator(&self, account_hash: &AccountHash) -> bool;\n\n    /// Checks if users can perform unrestricted transfers. This option is valid only for private\n    /// chains.\n    fn allow_unrestricted_transfers(&self) -> bool;\n\n    /// Validate URef against context access rights.\n    fn is_valid_uref(&self, uref: &URef) -> bool;\n}\n"
  },
  {
    "path": "storage/src/system/mint/storage_provider.rs",
    "content": "use casper_types::{\n    bytesrepr::{FromBytes, ToBytes},\n    system::mint::Error,\n    CLTyped, URef, U512,\n};\n\n/// Provides functionality of a contract storage.\npub trait StorageProvider {\n    /// Create new [`URef`].\n    fn new_uref<T: CLTyped + ToBytes>(&mut self, init: T) -> Result<URef, Error>;\n\n    /// Read data from [`URef`].\n    fn read<T: CLTyped + FromBytes>(&mut self, uref: URef) -> Result<Option<T>, Error>;\n\n    /// Write a [`U512`] amount under a [`URef`].\n    fn write_amount(&mut self, uref: URef, amount: U512) -> Result<(), Error>;\n\n    /// Add data to a [`URef`].\n    fn add<T: CLTyped + ToBytes>(&mut self, uref: URef, value: T) -> Result<(), Error>;\n\n    /// Read total balance.\n    fn total_balance(&mut self, uref: URef) -> Result<U512, Error>;\n\n    /// Read balance.\n    fn available_balance(&mut self, uref: URef) -> Result<Option<U512>, Error>;\n\n    /// Write balance.\n    fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error>;\n\n    /// Add amount to an existing balance.\n    fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error>;\n}\n"
  },
  {
    "path": "storage/src/system/mint/system_provider.rs",
    "content": "use casper_types::{account::AccountHash, system::mint::Error, URef, U512};\n\n/// Provides functionality of a system module.\npub trait SystemProvider {\n    /// Records a transfer.\n    fn record_transfer(\n        &mut self,\n        maybe_to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<(), Error>;\n}\n"
  },
  {
    "path": "storage/src/system/mint.rs",
    "content": "pub(crate) mod detail;\n/// Provides native mint processing.\nmod mint_native;\n/// Provides runtime logic for mint processing.\npub mod runtime_provider;\n/// Provides storage logic for mint processing.\npub mod storage_provider;\n/// Provides system logic for mint processing.\npub mod system_provider;\n\nuse num_rational::Ratio;\nuse num_traits::CheckedMul;\n\nuse casper_types::{\n    account::AccountHash,\n    system::{\n        mint::{Error, ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY},\n        Caller,\n    },\n    Key, PublicKey, URef, U512,\n};\n\nuse crate::system::mint::{\n    runtime_provider::RuntimeProvider, storage_provider::StorageProvider,\n    system_provider::SystemProvider,\n};\n\n/// Mint trait.\npub trait Mint: RuntimeProvider + StorageProvider + SystemProvider {\n    /// Mint new token with given `initial_balance` balance. Returns new purse on success, otherwise\n    /// an error.\n    fn mint(&mut self, initial_balance: U512) -> Result<URef, Error> {\n        let caller = self.get_caller();\n        let is_empty_purse = initial_balance.is_zero();\n        if !is_empty_purse && caller != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidNonEmptyPurseCreation);\n        }\n\n        let purse_uref: URef = self.new_uref(())?;\n        self.write_balance(purse_uref, initial_balance)?;\n\n        if !is_empty_purse {\n            // get total supply uref if exists, otherwise error\n            let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) {\n                None => {\n                    // total supply URef should exist due to genesis\n                    return Err(Error::TotalSupplyNotFound);\n                }\n                Some(Key::URef(uref)) => uref,\n                Some(_) => return Err(Error::MissingKey),\n            };\n            // increase total supply\n            self.add(total_supply_uref, initial_balance)?;\n        }\n\n        Ok(purse_uref)\n    }\n\n    /// Burns native tokens.\n    fn burn(&mut self, purse: URef, amount: U512) -> Result<(), Error> {\n        if !purse.is_writeable() {\n            return Err(Error::InvalidAccessRights);\n        }\n        if !self.is_valid_uref(&purse) {\n            return Err(Error::ForgedReference);\n        }\n\n        let source_available_balance: U512 = match self.balance(purse)? {\n            Some(source_balance) => source_balance,\n            None => return Err(Error::PurseNotFound),\n        };\n\n        let new_balance = source_available_balance\n            .checked_sub(amount)\n            .unwrap_or_else(U512::zero);\n        // change balance\n        self.write_balance(purse, new_balance)?;\n        // reduce total supply AFTER changing balance in case changing balance errors\n        let burned_amount = source_available_balance.saturating_sub(new_balance);\n        detail::reduce_total_supply_unsafe(self, burned_amount)\n    }\n\n    /// Reduce total supply by `amount`. Returns unit on success, otherwise\n    /// an error.\n    fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> {\n        // only system may reduce total supply\n        let caller = self.get_caller();\n        if caller != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidTotalSupplyReductionAttempt);\n        }\n\n        detail::reduce_total_supply_unsafe(self, amount)\n    }\n\n    /// Read balance of given `purse`.\n    fn balance(&mut self, purse: URef) -> Result<Option<U512>, Error> {\n        match self.available_balance(purse)? {\n            some @ Some(_) => Ok(some),\n            None => Err(Error::PurseNotFound),\n        }\n    }\n\n    /// Transfers `amount` of tokens from `source` purse to a `target` purse.\n    fn transfer(\n        &mut self,\n        maybe_to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        id: Option<u64>,\n    ) -> Result<(), Error> {\n        if !self.allow_unrestricted_transfers() {\n            let registry = self\n                .get_system_entity_registry()\n                .map_err(|_| Error::UnableToGetSystemRegistry)?;\n            let immediate_caller = self.get_immediate_caller();\n            match immediate_caller {\n                Some(Caller::Entity { entity_addr, .. })\n                    if registry.exists(&entity_addr.value()) =>\n                {\n                    // System contract calling a mint is fine (i.e. standard payment calling mint's\n                    // transfer)\n                }\n\n                Some(Caller::Initiator { account_hash: _ })\n                    if self.is_called_from_standard_payment() =>\n                {\n                    // Standard payment acts as a session without separate stack frame and calls\n                    // into mint's transfer.\n                }\n\n                Some(Caller::Initiator { account_hash })\n                    if account_hash == PublicKey::System.to_account_hash() =>\n                {\n                    // System calls a session code.\n                }\n\n                Some(Caller::Initiator { account_hash }) => {\n                    // For example: a session using transfer host functions, or calling the mint's\n                    // entrypoint directly\n                    let is_source_admin = self.is_administrator(&account_hash);\n                    match maybe_to {\n                        Some(to) => {\n                            let maybe_account = self.runtime_footprint_by_account_hash(to);\n\n                            match maybe_account {\n                                Ok(Some(runtime_footprint)) => {\n                                    // This can happen when user tries to transfer funds by\n                                    // calling mint\n                                    // directly but tries to specify wrong account hash.\n                                    let addr = if let Some(uref) = runtime_footprint.main_purse() {\n                                        uref.addr()\n                                    } else {\n                                        return Err(Error::InvalidContext);\n                                    };\n\n                                    if addr != target.addr() {\n                                        return Err(Error::DisabledUnrestrictedTransfers);\n                                    }\n                                    let is_target_system_account =\n                                        to == PublicKey::System.to_account_hash();\n                                    let is_target_administrator = self.is_administrator(&to);\n                                    if !(is_source_admin\n                                        || is_target_system_account\n                                        || is_target_administrator)\n                                    {\n                                        return Err(Error::DisabledUnrestrictedTransfers);\n                                    }\n                                }\n                                Ok(None) => {\n                                    // `to` is specified, but no new account is persisted\n                                    // yet. Only\n                                    // administrators can do that and it is also validated\n                                    // at the host function level.\n                                    if !is_source_admin {\n                                        return Err(Error::DisabledUnrestrictedTransfers);\n                                    }\n                                }\n                                Err(_) => {\n                                    return Err(Error::Storage);\n                                }\n                            }\n                        }\n                        None => {\n                            if !is_source_admin {\n                                return Err(Error::DisabledUnrestrictedTransfers);\n                            }\n                        }\n                    }\n                }\n\n                Some(Caller::Entity {\n                    package_hash: _,\n                    entity_addr: _,\n                }) => {\n                    if self.get_caller() != PublicKey::System.to_account_hash()\n                        && !self.is_administrator(&self.get_caller())\n                    {\n                        return Err(Error::DisabledUnrestrictedTransfers);\n                    }\n                }\n\n                Some(Caller::SmartContract {\n                    contract_package_hash: _,\n                    contract_hash: _,\n                }) => {\n                    if self.get_caller() != PublicKey::System.to_account_hash()\n                        && !self.is_administrator(&self.get_caller())\n                    {\n                        return Err(Error::DisabledUnrestrictedTransfers);\n                    }\n                }\n\n                None => {\n                    // There's always an immediate caller, but we should return something.\n                    return Err(Error::DisabledUnrestrictedTransfers);\n                }\n            }\n        }\n\n        if !source.is_writeable() || !target.is_addable() {\n            // TODO: I don't think we should enforce is addable on the target\n            // Unlike other uses of URefs (such as a counter), in this context the value represents\n            // a deposit of token. Generally, deposit of a desirable resource is permissive.\n            return Err(Error::InvalidAccessRights);\n        }\n        let source_available_balance: U512 = match self.available_balance(source)? {\n            Some(source_balance) => source_balance,\n            None => return Err(Error::SourceNotFound),\n        };\n        if amount > source_available_balance {\n            // NOTE: we use AVAILABLE balance to check sufficient funds\n            return Err(Error::InsufficientFunds);\n        }\n        let source_total_balance = self.total_balance(source)?;\n        if source_available_balance > source_total_balance {\n            panic!(\"available balance can never be greater than total balance\");\n        }\n        if self.available_balance(target)?.is_none() {\n            return Err(Error::DestNotFound);\n        }\n        let addr = match self.get_main_purse() {\n            None => return Err(Error::InvalidURef),\n            Some(uref) => uref.addr(),\n        };\n        if self.get_caller() != PublicKey::System.to_account_hash() && addr == source.addr() {\n            if amount > self.get_approved_spending_limit() {\n                return Err(Error::UnapprovedSpendingAmount);\n            }\n            self.sub_approved_spending_limit(amount);\n        }\n\n        // NOTE: we use TOTAL balance to determine new balance\n        let new_balance = source_total_balance.saturating_sub(amount);\n        self.write_balance(source, new_balance)?;\n        self.add_balance(target, amount)?;\n        self.record_transfer(maybe_to, source, target, amount, id)?;\n        Ok(())\n    }\n\n    /// Retrieves the base round reward.\n    fn read_base_round_reward(&mut self) -> Result<U512, Error> {\n        let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) {\n            Some(Key::URef(uref)) => uref,\n            Some(_) => return Err(Error::MissingKey),\n            None => return Err(Error::MissingKey),\n        };\n        let total_supply: U512 = self\n            .read(total_supply_uref)?\n            .ok_or(Error::TotalSupplyNotFound)?;\n\n        let round_seigniorage_rate_uref = match self.get_key(ROUND_SEIGNIORAGE_RATE_KEY) {\n            Some(Key::URef(uref)) => uref,\n            Some(_) => return Err(Error::MissingKey),\n            None => return Err(Error::MissingKey),\n        };\n        let round_seigniorage_rate: Ratio<U512> = self\n            .read(round_seigniorage_rate_uref)?\n            .ok_or(Error::TotalSupplyNotFound)?;\n\n        round_seigniorage_rate\n            .checked_mul(&Ratio::from(total_supply))\n            .map(|ratio| ratio.to_integer())\n            .ok_or(Error::ArithmeticOverflow)\n    }\n\n    /// Mint `amount` new token into `existing_purse`.\n    /// Returns unit on success, otherwise an error.\n    fn mint_into_existing_purse(\n        &mut self,\n        existing_purse: URef,\n        amount: U512,\n    ) -> Result<(), Error> {\n        let caller = self.get_caller();\n        if caller != PublicKey::System.to_account_hash() {\n            return Err(Error::InvalidContext);\n        }\n        if amount.is_zero() {\n            // treat as noop\n            return Ok(());\n        }\n        if !self.purse_exists(existing_purse)? {\n            return Err(Error::PurseNotFound);\n        }\n        self.add_balance(existing_purse, amount)?;\n        // get total supply uref if exists, otherwise error.\n        let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) {\n            None => {\n                // total supply URef should exist due to genesis\n                // which obviously must have been called\n                // before new rewards are minted at the end of an era\n                return Err(Error::TotalSupplyNotFound);\n            }\n            Some(Key::URef(uref)) => uref,\n            Some(_) => return Err(Error::MissingKey),\n        };\n        // increase total supply\n        self.add(total_supply_uref, amount)?;\n        Ok(())\n    }\n\n    /// Check if a purse exists.\n    fn purse_exists(&mut self, uref: URef) -> Result<bool, Error>;\n}\n"
  },
  {
    "path": "storage/src/system/protocol_upgrade.rs",
    "content": "//! Support for applying upgrades on the execution engine.\nuse num_rational::Ratio;\nuse std::{\n    cell::RefCell,\n    collections::{BTreeMap, BTreeSet},\n    rc::Rc,\n};\n\nuse thiserror::Error;\nuse tracing::{debug, error, info, warn};\n\nuse casper_types::{\n    addressable_entity::{\n        ActionThresholds, AssociatedKeys, EntityKind, NamedKeyAddr, NamedKeyValue, Weight,\n    },\n    bytesrepr::{self, Bytes, ToBytes},\n    contracts::{ContractHash, ContractPackageStatus, NamedKeys},\n    system::{\n        auction::{\n            BidAddr, BidAddrTag, BidKind, DelegatorBid, DelegatorKind,\n            SeigniorageRecipientsSnapshotV1, SeigniorageRecipientsSnapshotV2,\n            SeigniorageRecipientsV2, Unbond, UnbondEra, UnbondKind, ValidatorBid,\n            AUCTION_DELAY_KEY, DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION,\n            ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, LOCKED_FUNDS_PERIOD_KEY,\n            MINIMUM_DELEGATION_RATE_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY,\n        },\n        handle_payment::{ACCUMULATION_PURSE_KEY, PAYMENT_PURSE_KEY},\n        mint::{\n            MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY, MINT_SUSTAIN_PURSE_KEY,\n            ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY,\n        },\n        SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT,\n    },\n    AccessRights, AddressableEntity, AddressableEntityHash, ByteCode, ByteCodeAddr, ByteCodeHash,\n    ByteCodeKind, CLValue, CLValueError, Contract, Digest, EntityAddr, EntityVersionKey,\n    EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, EraId, FeeHandling, Groups,\n    HashAddr, Key, KeyTag, Motes, Package, PackageHash, PackageStatus, Phase,\n    ProtocolUpgradeConfig, ProtocolVersion, PublicKey, RewardsHandling, StoredValue,\n    SystemHashRegistry, URef, REWARDS_HANDLING_RATIO_TAG, U512,\n};\n\nuse crate::{\n    global_state::state::StateProvider,\n    tracking_copy::{AddResult, TrackingCopy, TrackingCopyEntityExt, TrackingCopyExt},\n    AddressGenerator,\n};\n\nconst NO_CARRY_FORWARD: bool = false;\nconst CARRY_FORWARD: bool = true;\n\n/// Represents outcomes of a failed protocol upgrade.\n#[derive(Clone, Error, Debug)]\npub enum ProtocolUpgradeError {\n    /// Protocol version used in the deploy is invalid.\n    #[error(\"Invalid protocol version: {0}\")]\n    InvalidProtocolVersion(ProtocolVersion),\n    /// Error validating a protocol upgrade config.\n    #[error(\"Invalid upgrade config\")]\n    InvalidUpgradeConfig,\n    /// Unable to retrieve a system contract.\n    #[error(\"Unable to retrieve system contract: {0}\")]\n    UnableToRetrieveSystemContract(String),\n    /// Unable to retrieve a system contract package.\n    #[error(\"Unable to retrieve system contract package: {0}\")]\n    UnableToRetrieveSystemContractPackage(String),\n    /// Unable to disable previous version of a system contract.\n    #[error(\"Failed to disable previous version of system contract: {0}\")]\n    FailedToDisablePreviousVersion(String),\n    /// (De)serialization error.\n    #[error(\"Bytesrepr error: {0}\")]\n    Bytesrepr(String),\n    /// Failed to create system entity registry.\n    #[error(\"Failed to insert system entity registry\")]\n    FailedToCreateSystemRegistry,\n    /// Found unexpected variant of a key.\n    #[error(\"Unexpected key variant\")]\n    UnexpectedKeyVariant,\n    /// Found unexpected variant of a stored value.\n    #[error(\"Unexpected stored value variant\")]\n    UnexpectedStoredValueVariant,\n    /// Failed to convert into a CLValue.\n    #[error(\"{0}\")]\n    CLValue(String),\n    /// Missing system contract hash.\n    #[error(\"Missing system contract hash: {0}\")]\n    MissingSystemEntityHash(String),\n    /// Tracking copy error.\n    #[error(\"{0}\")]\n    TrackingCopy(crate::tracking_copy::TrackingCopyError),\n}\n\nimpl From<CLValueError> for ProtocolUpgradeError {\n    fn from(v: CLValueError) -> Self {\n        Self::CLValue(v.to_string())\n    }\n}\n\nimpl From<crate::tracking_copy::TrackingCopyError> for ProtocolUpgradeError {\n    fn from(err: crate::tracking_copy::TrackingCopyError) -> Self {\n        ProtocolUpgradeError::TrackingCopy(err)\n    }\n}\n\nimpl From<bytesrepr::Error> for ProtocolUpgradeError {\n    fn from(error: bytesrepr::Error) -> Self {\n        ProtocolUpgradeError::Bytesrepr(error.to_string())\n    }\n}\n\n/// Addresses for system entities.\npub struct SystemHashAddresses {\n    mint: HashAddr,\n    auction: HashAddr,\n    handle_payment: HashAddr,\n}\n\nimpl SystemHashAddresses {\n    /// Creates a new instance of system entity addresses.\n    pub fn new(mint: HashAddr, auction: HashAddr, handle_payment: HashAddr) -> Self {\n        SystemHashAddresses {\n            mint,\n            auction,\n            handle_payment,\n        }\n    }\n\n    /// Mint address.\n    pub fn mint(&self) -> HashAddr {\n        self.mint\n    }\n\n    /// Auction address.\n    pub fn auction(&self) -> HashAddr {\n        self.auction\n    }\n\n    /// Handle payment address.\n    pub fn handle_payment(&self) -> HashAddr {\n        self.handle_payment\n    }\n}\n\n/// The system upgrader deals with conducting an actual protocol upgrade.\npub struct ProtocolUpgrader<S>\nwhere\n    S: StateProvider,\n{\n    config: ProtocolUpgradeConfig,\n    address_generator: Rc<RefCell<AddressGenerator>>,\n    tracking_copy: TrackingCopy<<S as StateProvider>::Reader>,\n}\n\nimpl<S> ProtocolUpgrader<S>\nwhere\n    S: StateProvider,\n{\n    /// Creates new system upgrader instance.\n    pub fn new(\n        config: ProtocolUpgradeConfig,\n        protocol_upgrade_config_hash: Digest,\n        tracking_copy: TrackingCopy<<S as StateProvider>::Reader>,\n    ) -> Self {\n        let phase = Phase::System;\n        let protocol_upgrade_config_hash_bytes = protocol_upgrade_config_hash.as_ref();\n\n        let address_generator = {\n            let generator = AddressGenerator::new(protocol_upgrade_config_hash_bytes, phase);\n            Rc::new(RefCell::new(generator))\n        };\n        ProtocolUpgrader {\n            config,\n            address_generator,\n            tracking_copy,\n        }\n    }\n\n    /// Apply a protocol upgrade.\n    pub fn upgrade(\n        mut self,\n        pre_state_hash: Digest,\n    ) -> Result<TrackingCopy<<S as StateProvider>::Reader>, ProtocolUpgradeError> {\n        self.check_next_protocol_version_validity()?;\n        self.handle_global_state_updates();\n        let system_entity_addresses = self.handle_system_hashes()?;\n\n        if self.config.enable_addressable_entity() {\n            self.migrate_system_account(pre_state_hash)?;\n            self.create_accumulation_purse_if_required(\n                &system_entity_addresses.handle_payment(),\n                self.config.fee_handling(),\n            )?;\n            self.migrate_or_refresh_system_entities(&system_entity_addresses)?;\n        } else {\n            self.create_accumulation_purse_if_required_by_contract(\n                &system_entity_addresses.handle_payment(),\n                self.config.fee_handling(),\n            )?;\n            self.refresh_system_contracts(&system_entity_addresses)?;\n        }\n\n        self.handle_payment_purse_check(\n            system_entity_addresses.handle_payment(),\n            system_entity_addresses.mint(),\n        )?;\n        self.handle_new_gas_hold_config(system_entity_addresses.mint())?;\n        self.handle_new_validator_slots(system_entity_addresses.auction())?;\n        self.handle_new_auction_delay(system_entity_addresses.auction())?;\n        self.handle_new_locked_funds_period_millis(system_entity_addresses.auction())?;\n        self.handle_new_unbonding_delay(system_entity_addresses.auction())?;\n        self.handle_new_round_seigniorage_rate(system_entity_addresses.mint())?;\n        self.handle_unbonds_migration()?;\n        self.handle_bids_migration(\n            self.config.validator_minimum_bid_amount(),\n            self.config.minimum_delegation_amount(),\n            self.config.maximum_delegation_amount(),\n            system_entity_addresses.auction(),\n        )?;\n        self.handle_era_info_migration()?;\n        self.handle_seignorage_snapshot_migration(system_entity_addresses.auction())?;\n        self.handle_total_supply_calc(system_entity_addresses.mint())?;\n        self.handle_rewards_handling(system_entity_addresses.mint())?;\n        self.handle_minimum_delegation_rate(system_entity_addresses.auction())?;\n\n        Ok(self.tracking_copy)\n    }\n\n    /// Determine if the next protocol version is a legitimate semver progression.\n    pub fn check_next_protocol_version_validity(&self) -> Result<(), ProtocolUpgradeError> {\n        debug!(\"check next protocol version validity\");\n        let current_protocol_version = self.config.current_protocol_version();\n        let new_protocol_version = self.config.new_protocol_version();\n\n        let upgrade_check_result =\n            current_protocol_version.check_next_version(&new_protocol_version);\n\n        if upgrade_check_result.is_invalid() {\n            Err(ProtocolUpgradeError::InvalidProtocolVersion(\n                new_protocol_version,\n            ))\n        } else {\n            Ok(())\n        }\n    }\n\n    fn system_hash_registry(&self) -> Result<SystemHashRegistry, ProtocolUpgradeError> {\n        debug!(\"system entity registry\");\n        let registry = if let Ok(registry) = self.tracking_copy.get_system_entity_registry() {\n            registry\n        } else {\n            // Check the upgrade config for the registry\n            let upgrade_registry = self\n                .config\n                .global_state_update()\n                .get(&Key::SystemEntityRegistry)\n                .ok_or_else(|| {\n                    error!(\"Registry is absent in upgrade config\");\n                    ProtocolUpgradeError::FailedToCreateSystemRegistry\n                })?\n                .to_owned();\n            if let StoredValue::CLValue(cl_registry) = upgrade_registry {\n                CLValue::into_t::<SystemHashRegistry>(cl_registry).map_err(|error| {\n                    let error_msg = format!(\"Conversion to system registry failed: {:?}\", error);\n                    error!(\"{}\", error_msg);\n                    ProtocolUpgradeError::Bytesrepr(error_msg)\n                })?\n            } else {\n                error!(\"Failed to create registry as StoreValue in upgrade config is not CLValue\");\n                return Err(ProtocolUpgradeError::FailedToCreateSystemRegistry);\n            }\n        };\n        Ok(registry)\n    }\n\n    /// Handle system entities.\n    pub fn handle_system_hashes(&mut self) -> Result<SystemHashAddresses, ProtocolUpgradeError> {\n        debug!(\"handle system entities\");\n        let mut registry = self.system_hash_registry()?;\n\n        let mint = *registry.get(MINT).ok_or_else(|| {\n            error!(\"Missing system mint entity hash\");\n            ProtocolUpgradeError::MissingSystemEntityHash(MINT.to_string())\n        })?;\n        let auction = *registry.get(AUCTION).ok_or_else(|| {\n            error!(\"Missing system auction entity hash\");\n            ProtocolUpgradeError::MissingSystemEntityHash(AUCTION.to_string())\n        })?;\n        let handle_payment = *registry.get(HANDLE_PAYMENT).ok_or_else(|| {\n            error!(\"Missing system handle payment entity hash\");\n            ProtocolUpgradeError::MissingSystemEntityHash(HANDLE_PAYMENT.to_string())\n        })?;\n        if let Some(standard_payment_hash) = registry.remove_standard_payment() {\n            // Write the chainspec registry to global state\n            let cl_value_chainspec_registry = CLValue::from_t(registry)\n                .map_err(|error| ProtocolUpgradeError::Bytesrepr(error.to_string()))?;\n\n            self.tracking_copy.write(\n                Key::SystemEntityRegistry,\n                StoredValue::CLValue(cl_value_chainspec_registry),\n            );\n\n            // Prune away standard payment from global state.\n            self.tracking_copy.prune(Key::Hash(standard_payment_hash));\n        };\n\n        // Write the chainspec registry to global state\n        let cl_value_chainspec_registry = CLValue::from_t(self.config.chainspec_registry().clone())\n            .map_err(|error| ProtocolUpgradeError::Bytesrepr(error.to_string()))?;\n\n        self.tracking_copy.write(\n            Key::ChainspecRegistry,\n            StoredValue::CLValue(cl_value_chainspec_registry),\n        );\n\n        let system_hash_addresses = SystemHashAddresses::new(mint, auction, handle_payment);\n\n        Ok(system_hash_addresses)\n    }\n\n    /// Bump major version and/or update the entry points for system contracts.\n    pub fn migrate_or_refresh_system_entities(\n        &mut self,\n        system_entity_addresses: &SystemHashAddresses,\n    ) -> Result<(), ProtocolUpgradeError> {\n        debug!(\"refresh system contracts\");\n        self.migrate_or_refresh_system_entity_entry_points(\n            system_entity_addresses.mint(),\n            SystemEntityType::Mint,\n        )?;\n        self.migrate_or_refresh_system_entity_entry_points(\n            system_entity_addresses.auction(),\n            SystemEntityType::Auction,\n        )?;\n        self.migrate_or_refresh_system_entity_entry_points(\n            system_entity_addresses.handle_payment(),\n            SystemEntityType::HandlePayment,\n        )?;\n\n        Ok(())\n    }\n\n    /// Bump major version and/or update the entry points for system contracts.\n    pub fn refresh_system_contracts(\n        &mut self,\n        system_entity_addresses: &SystemHashAddresses,\n    ) -> Result<(), ProtocolUpgradeError> {\n        self.refresh_system_contract_entry_points(\n            system_entity_addresses.mint(),\n            SystemEntityType::Mint,\n        )?;\n        self.refresh_system_contract_entry_points(\n            system_entity_addresses.auction(),\n            SystemEntityType::Auction,\n        )?;\n        self.refresh_system_contract_entry_points(\n            system_entity_addresses.handle_payment(),\n            SystemEntityType::HandlePayment,\n        )?;\n\n        Ok(())\n    }\n\n    /// Refresh the system contracts with an updated set of entry points,\n    /// and bump the contract version at a major version upgrade.\n    fn migrate_or_refresh_system_entity_entry_points(\n        &mut self,\n        hash_addr: HashAddr,\n        system_entity_type: SystemEntityType,\n    ) -> Result<(), ProtocolUpgradeError> {\n        debug!(%system_entity_type, \"refresh system contract entry points\");\n        let entity_name = system_entity_type.entity_name();\n\n        let (mut entity, maybe_named_keys, must_carry_forward) =\n            match self.retrieve_system_entity(hash_addr, system_entity_type) {\n                Ok(ret) => ret,\n                Err(err) => {\n                    error!(\"{:?}\", err);\n                    return Err(err);\n                }\n            };\n\n        let mut package =\n            self.retrieve_system_package(entity.package_hash(), system_entity_type)?;\n\n        let entity_hash = AddressableEntityHash::new(hash_addr);\n        let entity_addr = EntityAddr::new_system(entity_hash.value());\n        package.disable_entity_version(entity_addr).map_err(|_| {\n            ProtocolUpgradeError::FailedToDisablePreviousVersion(entity_name.to_string())\n        })?;\n\n        entity.set_protocol_version(self.config.new_protocol_version());\n\n        let new_entity = AddressableEntity::new(\n            entity.package_hash(),\n            ByteCodeHash::default(),\n            self.config.new_protocol_version(),\n            URef::default(),\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::System(system_entity_type),\n        );\n\n        let byte_code_key = Key::byte_code_key(ByteCodeAddr::Empty);\n        let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]);\n\n        self.tracking_copy\n            .write(byte_code_key, StoredValue::ByteCode(byte_code));\n\n        let entity_key = new_entity.entity_key(entity_hash);\n\n        self.tracking_copy\n            .write(entity_key, StoredValue::AddressableEntity(new_entity));\n\n        if let Some(named_keys) = maybe_named_keys {\n            for (string, key) in named_keys.into_inner().into_iter() {\n                let entry_addr = NamedKeyAddr::new_from_string(entity_addr, string.clone())\n                    .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?;\n\n                let entry_key = Key::NamedKey(entry_addr);\n\n                let named_key_value = NamedKeyValue::from_concrete_values(key, string)\n                    .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?;\n\n                self.tracking_copy\n                    .write(entry_key, StoredValue::NamedKey(named_key_value));\n            }\n        }\n\n        let entry_points = system_entity_type.entry_points();\n\n        for entry_point in entry_points.take_entry_points() {\n            let entry_point_addr =\n                EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name())\n                    .map_err(|error| ProtocolUpgradeError::Bytesrepr(error.to_string()))?;\n            self.tracking_copy.write(\n                Key::EntryPoint(entry_point_addr),\n                StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)),\n            );\n        }\n\n        package.insert_entity_version(\n            self.config.new_protocol_version().value().major,\n            entity_addr,\n        );\n\n        self.tracking_copy.write(\n            Key::SmartContract(entity.package_hash().value()),\n            StoredValue::SmartContract(package),\n        );\n\n        if must_carry_forward {\n            // carry forward\n            let package_key = Key::SmartContract(entity.package_hash().value());\n            let uref = URef::default();\n            let indirection = CLValue::from_t((package_key, uref))\n                .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?;\n\n            self.tracking_copy.write(\n                Key::Hash(entity.package_hash().value()),\n                StoredValue::CLValue(indirection),\n            );\n\n            let contract_wasm_key = Key::Hash(entity.byte_code_hash().value());\n            let contract_wasm_indirection = CLValue::from_t(Key::ByteCode(ByteCodeAddr::Empty))\n                .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?;\n            self.tracking_copy.write(\n                contract_wasm_key,\n                StoredValue::CLValue(contract_wasm_indirection),\n            );\n\n            let contract_indirection = CLValue::from_t(Key::AddressableEntity(entity_addr))\n                .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?;\n\n            self.tracking_copy.write(\n                Key::Hash(entity_addr.value()),\n                StoredValue::CLValue(contract_indirection),\n            )\n        }\n\n        Ok(())\n    }\n\n    fn retrieve_system_package(\n        &mut self,\n        package_hash: PackageHash,\n        system_contract_type: SystemEntityType,\n    ) -> Result<Package, ProtocolUpgradeError> {\n        debug!(%system_contract_type, \"retrieve system package\");\n        if let Some(StoredValue::SmartContract(system_entity)) = self\n            .tracking_copy\n            .read(&Key::SmartContract(package_hash.value()))\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContractPackage(\n                    system_contract_type.to_string(),\n                )\n            })?\n        {\n            return Ok(system_entity);\n        }\n\n        if let Some(StoredValue::ContractPackage(contract_package)) = self\n            .tracking_copy\n            .read(&Key::Hash(package_hash.value()))\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContractPackage(\n                    system_contract_type.to_string(),\n                )\n            })?\n        {\n            let versions: BTreeMap<EntityVersionKey, EntityAddr> = contract_package\n                .versions()\n                .iter()\n                .map(|(version, contract_hash)| {\n                    let entity_version = EntityVersionKey::new(2, version.contract_version());\n                    let entity_hash = EntityAddr::System(contract_hash.value());\n                    (entity_version, entity_hash)\n                })\n                .collect();\n\n            let disabled_versions = contract_package\n                .disabled_versions()\n                .iter()\n                .map(|contract_versions| {\n                    EntityVersionKey::new(\n                        contract_versions.protocol_version_major(),\n                        contract_versions.contract_version(),\n                    )\n                })\n                .collect();\n\n            let lock_status = if contract_package.lock_status() == ContractPackageStatus::Locked {\n                PackageStatus::Locked\n            } else {\n                PackageStatus::Unlocked\n            };\n\n            let groups = contract_package.take_groups();\n            return Ok(Package::new(\n                versions.into(),\n                disabled_versions,\n                groups,\n                lock_status,\n            ));\n        }\n\n        Err(ProtocolUpgradeError::UnableToRetrieveSystemContractPackage(\n            system_contract_type.to_string(),\n        ))\n    }\n\n    fn retrieve_system_entity(\n        &mut self,\n        hash_addr: HashAddr,\n        system_contract_type: SystemEntityType,\n    ) -> Result<(AddressableEntity, Option<NamedKeys>, bool), ProtocolUpgradeError> {\n        debug!(%system_contract_type, \"retrieve system entity\");\n        if let Some(StoredValue::Contract(system_contract)) = self\n            .tracking_copy\n            .read(&Key::Hash(hash_addr))\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContract(\n                    system_contract_type.to_string(),\n                )\n            })?\n        {\n            let named_keys = system_contract.named_keys().clone();\n            return Ok((system_contract.into(), Some(named_keys), CARRY_FORWARD));\n        }\n\n        if let Some(StoredValue::AddressableEntity(system_entity)) = self\n            .tracking_copy\n            .read(&Key::AddressableEntity(EntityAddr::new_system(hash_addr)))\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContract(\n                    system_contract_type.to_string(),\n                )\n            })?\n        {\n            return Ok((system_entity, None, NO_CARRY_FORWARD));\n        }\n\n        Err(ProtocolUpgradeError::UnableToRetrieveSystemContract(\n            system_contract_type.to_string(),\n        ))\n    }\n\n    /// Refresh the system contracts with an updated set of entry points,\n    /// and bump the contract version at a major version upgrade.\n    fn refresh_system_contract_entry_points(\n        &mut self,\n        contract_hash: HashAddr,\n        system_entity_type: SystemEntityType,\n    ) -> Result<(), ProtocolUpgradeError> {\n        let contract_name = system_entity_type.entity_name();\n        let entry_points = system_entity_type.entry_points();\n\n        let mut contract = if let StoredValue::Contract(contract) = self\n            .tracking_copy\n            .read(&Key::Hash(contract_hash))\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string())\n            })?\n            .ok_or_else(|| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string())\n            })? {\n            contract\n        } else {\n            return Err(ProtocolUpgradeError::UnableToRetrieveSystemContract(\n                contract_name,\n            ));\n        };\n\n        let is_major_bump = self\n            .config\n            .current_protocol_version()\n            .check_next_version(&self.config.new_protocol_version())\n            .is_major_version();\n\n        let contract_entry_points: EntryPoints = contract.entry_points().clone().into();\n        let entry_points_unchanged = contract_entry_points == entry_points;\n        if entry_points_unchanged && !is_major_bump {\n            // We don't need to do anything if entry points are unchanged, or there's no major\n            // version bump.\n            return Ok(());\n        }\n\n        let contract_package_key = Key::Hash(contract.contract_package_hash().value());\n\n        let mut contract_package = if let StoredValue::ContractPackage(contract_package) = self\n            .tracking_copy\n            .read(&contract_package_key)\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContractPackage(\n                    contract_name.to_string(),\n                )\n            })?\n            .ok_or_else(|| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContractPackage(\n                    contract_name.to_string(),\n                )\n            })? {\n            contract_package\n        } else {\n            return Err(ProtocolUpgradeError::UnableToRetrieveSystemContractPackage(\n                contract_name,\n            ));\n        };\n\n        contract.set_protocol_version(self.config.new_protocol_version());\n\n        let new_contract = Contract::new(\n            contract.contract_package_hash(),\n            contract.contract_wasm_hash(),\n            contract.named_keys().clone(),\n            entry_points.into(),\n            self.config.new_protocol_version(),\n        );\n        self.tracking_copy.write(\n            Key::Hash(contract_hash),\n            StoredValue::Contract(new_contract),\n        );\n\n        contract_package.insert_contract_version(\n            self.config.new_protocol_version().value().major,\n            ContractHash::new(contract_hash),\n        );\n\n        self.tracking_copy.write(\n            contract_package_key,\n            StoredValue::ContractPackage(contract_package),\n        );\n\n        Ok(())\n    }\n\n    /// Migrate the system account to addressable entity if necessary.\n    pub fn migrate_system_account(\n        &mut self,\n        pre_state_hash: Digest,\n    ) -> Result<(), ProtocolUpgradeError> {\n        debug!(\"migrate system account\");\n        let mut address_generator = AddressGenerator::new(pre_state_hash.as_ref(), Phase::System);\n\n        let account_hash = PublicKey::System.to_account_hash();\n\n        let main_purse = {\n            let purse_addr = address_generator.new_hash_address();\n            let balance_cl_value = CLValue::from_t(U512::zero())\n                .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?;\n\n            self.tracking_copy.write(\n                Key::Balance(purse_addr),\n                StoredValue::CLValue(balance_cl_value),\n            );\n\n            let purse_cl_value = CLValue::unit();\n            let purse_uref = URef::new(purse_addr, AccessRights::READ_ADD_WRITE);\n\n            self.tracking_copy\n                .write(Key::URef(purse_uref), StoredValue::CLValue(purse_cl_value));\n            purse_uref\n        };\n\n        let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1));\n        let byte_code_hash = ByteCodeHash::default();\n        let entity_hash = AddressableEntityHash::new(PublicKey::System.to_account_hash().value());\n        let package_hash = PackageHash::new(address_generator.new_hash_address());\n\n        let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]);\n\n        let system_account_entity = AddressableEntity::new(\n            package_hash,\n            byte_code_hash,\n            self.config.new_protocol_version(),\n            main_purse,\n            associated_keys,\n            ActionThresholds::default(),\n            EntityKind::Account(account_hash),\n        );\n\n        let package = {\n            let mut package = Package::new(\n                EntityVersions::default(),\n                BTreeSet::default(),\n                Groups::default(),\n                PackageStatus::default(),\n            );\n            package.insert_entity_version(\n                self.config.new_protocol_version().value().major,\n                EntityAddr::Account(entity_hash.value()),\n            );\n            package\n        };\n\n        let byte_code_key = Key::ByteCode(ByteCodeAddr::Empty);\n        self.tracking_copy\n            .write(byte_code_key, StoredValue::ByteCode(byte_code));\n\n        let entity_key = system_account_entity.entity_key(entity_hash);\n\n        self.tracking_copy.write(\n            entity_key,\n            StoredValue::AddressableEntity(system_account_entity),\n        );\n\n        self.tracking_copy\n            .write(package_hash.into(), StoredValue::SmartContract(package));\n\n        let contract_by_account = CLValue::from_t(entity_key)\n            .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?;\n\n        self.tracking_copy.write(\n            Key::Account(account_hash),\n            StoredValue::CLValue(contract_by_account),\n        );\n\n        Ok(())\n    }\n\n    /// Creates an accumulation purse in the handle payment system contract if its not present.\n    ///\n    /// This can happen on older networks that did not have support for [`FeeHandling::Accumulate`]\n    /// at the genesis. In such cases we have to check the state of handle payment contract and\n    /// create an accumulation purse.\n    pub fn create_accumulation_purse_if_required(\n        &mut self,\n        handle_payment_hash: &HashAddr,\n        fee_handling: FeeHandling,\n    ) -> Result<(), ProtocolUpgradeError> {\n        debug!(?fee_handling, \"create accumulation purse if required\");\n        match fee_handling {\n            FeeHandling::PayToProposer | FeeHandling::Burn => return Ok(()),\n            FeeHandling::Accumulate | FeeHandling::NoFee => {}\n        }\n        let mut address_generator = {\n            let seed_bytes = (\n                self.config.current_protocol_version(),\n                self.config.new_protocol_version(),\n            )\n                .to_bytes()?;\n            let phase = Phase::System;\n            AddressGenerator::new(&seed_bytes, phase)\n        };\n        let system_contract = SystemEntityType::HandlePayment;\n\n        let (addressable_entity, maybe_named_keys, _) =\n            self.retrieve_system_entity(*handle_payment_hash, system_contract)?;\n\n        let entity_addr = EntityAddr::new_system(*handle_payment_hash);\n\n        if let Some(named_keys) = maybe_named_keys {\n            for (string, key) in named_keys.into_inner().into_iter() {\n                let entry_addr = NamedKeyAddr::new_from_string(entity_addr, string.clone())\n                    .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?;\n\n                let named_key_value = NamedKeyValue::from_concrete_values(key, string)\n                    .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?;\n\n                let entry_key = Key::NamedKey(entry_addr);\n\n                self.tracking_copy\n                    .write(entry_key, StoredValue::NamedKey(named_key_value));\n            }\n        }\n\n        let named_key_addr =\n            NamedKeyAddr::new_from_string(entity_addr, ACCUMULATION_PURSE_KEY.to_string())\n                .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?;\n\n        let requries_accumulation_purse = self\n            .tracking_copy\n            .read(&Key::NamedKey(named_key_addr))\n            .map_err(|_| ProtocolUpgradeError::UnexpectedStoredValueVariant)?\n            .is_none();\n\n        if requries_accumulation_purse {\n            let purse_uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE);\n            let balance_clvalue = CLValue::from_t(U512::zero())?;\n            self.tracking_copy.write(\n                Key::Balance(purse_uref.addr()),\n                StoredValue::CLValue(balance_clvalue),\n            );\n\n            let purse_key = Key::URef(purse_uref);\n\n            self.tracking_copy\n                .write(purse_key, StoredValue::CLValue(CLValue::unit()));\n\n            let purse =\n                NamedKeyValue::from_concrete_values(purse_key, ACCUMULATION_PURSE_KEY.to_string())\n                    .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?;\n\n            self.tracking_copy\n                .write(Key::NamedKey(named_key_addr), StoredValue::NamedKey(purse));\n\n            let entity_key = Key::AddressableEntity(EntityAddr::System(*handle_payment_hash));\n\n            self.tracking_copy.write(\n                entity_key,\n                StoredValue::AddressableEntity(addressable_entity),\n            );\n        }\n\n        Ok(())\n    }\n\n    /// Creates an accumulation purse in the handle payment system contract if its not present.\n    ///\n    /// This can happen on older networks that did not have support for [`FeeHandling::Accumulate`]\n    /// at the genesis. In such cases we have to check the state of handle payment contract and\n    /// create an accumulation purse.\n    pub fn create_accumulation_purse_if_required_by_contract(\n        &mut self,\n        handle_payment_hash: &HashAddr,\n        fee_handling: FeeHandling,\n    ) -> Result<(), ProtocolUpgradeError> {\n        match fee_handling {\n            FeeHandling::PayToProposer | FeeHandling::Burn => return Ok(()),\n            FeeHandling::Accumulate | FeeHandling::NoFee => {}\n        }\n\n        let mut address_generator = {\n            let seed_bytes = (\n                self.config.current_protocol_version(),\n                self.config.new_protocol_version(),\n            )\n                .to_bytes()?;\n\n            let phase = Phase::System;\n\n            AddressGenerator::new(&seed_bytes, phase)\n        };\n\n        let system_contract = SystemEntityType::HandlePayment;\n        let contract_name = system_contract.entity_name();\n        let mut contract = if let StoredValue::Contract(contract) = self\n            .tracking_copy\n            .read(&Key::Hash(*handle_payment_hash))\n            .map_err(|_| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string())\n            })?\n            .ok_or_else(|| {\n                ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string())\n            })? {\n            contract\n        } else {\n            return Err(ProtocolUpgradeError::UnableToRetrieveSystemContract(\n                contract_name,\n            ));\n        };\n\n        if !contract.named_keys().contains(ACCUMULATION_PURSE_KEY) {\n            let purse_uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE);\n            let balance_clvalue = CLValue::from_t(U512::zero())?;\n            self.tracking_copy.write(\n                Key::Balance(purse_uref.addr()),\n                StoredValue::CLValue(balance_clvalue),\n            );\n            self.tracking_copy\n                .write(Key::URef(purse_uref), StoredValue::CLValue(CLValue::unit()));\n\n            let mut new_named_keys = NamedKeys::new();\n            new_named_keys.insert(ACCUMULATION_PURSE_KEY.into(), Key::from(purse_uref));\n            contract.named_keys_append(new_named_keys);\n\n            self.tracking_copy.write(\n                Key::Hash(*handle_payment_hash),\n                StoredValue::Contract(contract),\n            );\n        }\n\n        Ok(())\n    }\n\n    fn get_named_keys(\n        &mut self,\n        contract_hash: HashAddr,\n    ) -> Result<NamedKeys, ProtocolUpgradeError> {\n        if self.config.enable_addressable_entity() {\n            let named_keys = self\n                .tracking_copy\n                .get_named_keys(EntityAddr::System(contract_hash))?;\n            Ok(named_keys)\n        } else {\n            let named_keys = self\n                .tracking_copy\n                .read(&Key::Hash(contract_hash))?\n                .ok_or_else(|| {\n                    ProtocolUpgradeError::UnableToRetrieveSystemContract(format!(\n                        \"{:?}\",\n                        contract_hash\n                    ))\n                })?\n                .as_contract()\n                .map(|contract| contract.named_keys().clone())\n                .ok_or(ProtocolUpgradeError::UnexpectedStoredValueVariant)?;\n\n            Ok(named_keys)\n        }\n    }\n\n    /// Check payment purse balance.\n    pub fn handle_payment_purse_check(\n        &mut self,\n        handle_payment: HashAddr,\n        mint: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        let payment_named_keys = self.get_named_keys(handle_payment)?;\n        let payment_purse_key = payment_named_keys\n            .get(PAYMENT_PURSE_KEY)\n            .expect(\"payment purse key must exist in handle payment contract's named keys\");\n        let balance = self\n            .tracking_copy\n            .get_total_balance(*payment_purse_key)\n            .expect(\"must be able to get payment purse balance\");\n        if balance <= Motes::zero() {\n            return Ok(());\n        }\n        warn!(\"payment purse had remaining balance at upgrade {}\", balance);\n        let balance_key = {\n            let uref_addr = payment_purse_key\n                .as_uref()\n                .expect(\"payment purse key must be uref.\")\n                .addr();\n            Key::Balance(uref_addr)\n        };\n\n        let mint_named_keys = self.get_named_keys(mint)?;\n        let total_supply_key = mint_named_keys\n            .get(TOTAL_SUPPLY_KEY)\n            .expect(\"total supply key must exist in mint contract's named keys\");\n\n        let stored_value = self\n            .tracking_copy\n            .read(total_supply_key)\n            .expect(\"must be able to read total supply\")\n            .expect(\"total supply must have a value\");\n\n        // by convention, we only store CLValues under Key::URef\n        if let StoredValue::CLValue(value) = stored_value {\n            // Only CLTyped instances should be stored as a CLValue.\n            let total_supply: U512 =\n                CLValue::into_t(value).expect(\"total supply must have expected type.\");\n\n            let new_total_supply = total_supply.saturating_sub(balance.value());\n            info!(\n                \"adjusting total supply from {} to {}\",\n                total_supply, new_total_supply\n            );\n            let cl_value = CLValue::from_t(new_total_supply)\n                .expect(\"new total supply must convert to CLValue.\");\n            self.tracking_copy\n                .write(*total_supply_key, StoredValue::CLValue(cl_value));\n            info!(\n                \"adjusting payment purse balance from {} to {}\",\n                balance.value(),\n                U512::zero()\n            );\n            let cl_value = CLValue::from_t(U512::zero()).expect(\"zero must convert to CLValue.\");\n            self.tracking_copy\n                .write(balance_key, StoredValue::CLValue(cl_value));\n            Ok(())\n        } else {\n            Err(ProtocolUpgradeError::CLValue(\n                \"failure to retrieve total supply\".to_string(),\n            ))\n        }\n    }\n\n    /// Upsert gas hold interval to mint named keys.\n    pub fn handle_new_gas_hold_config(\n        &mut self,\n        mint: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        if self.config.new_gas_hold_handling().is_none()\n            && self.config.new_gas_hold_interval().is_none()\n        {\n            return Ok(());\n        }\n\n        let mint_addr = EntityAddr::System(mint);\n        let named_keys = self.get_named_keys(mint)?;\n\n        if let Some(new_gas_hold_handling) = self.config.new_gas_hold_handling() {\n            debug!(%new_gas_hold_handling, \"handle new gas hold handling\");\n            let stored_value =\n                StoredValue::CLValue(CLValue::from_t(new_gas_hold_handling.tag()).map_err(\n                    |_| ProtocolUpgradeError::Bytesrepr(\"new_gas_hold_handling\".to_string()),\n                )?);\n\n            self.system_uref(\n                mint_addr,\n                MINT_GAS_HOLD_HANDLING_KEY,\n                &named_keys,\n                stored_value,\n            )?;\n        }\n\n        if let Some(new_gas_hold_interval) = self.config.new_gas_hold_interval() {\n            debug!(%new_gas_hold_interval, \"handle new gas hold interval\");\n            let stored_value =\n                StoredValue::CLValue(CLValue::from_t(new_gas_hold_interval).map_err(|_| {\n                    ProtocolUpgradeError::Bytesrepr(\"new_gas_hold_interval\".to_string())\n                })?);\n\n            self.system_uref(\n                mint_addr,\n                MINT_GAS_HOLD_INTERVAL_KEY,\n                &named_keys,\n                stored_value,\n            )?;\n        }\n        Ok(())\n    }\n\n    fn system_uref(\n        &mut self,\n        entity_addr: EntityAddr,\n        name: &str,\n        named_keys: &NamedKeys,\n        stored_value: StoredValue,\n    ) -> Result<(), ProtocolUpgradeError> {\n        let uref = {\n            match named_keys.get(name) {\n                Some(key) => match key.as_uref() {\n                    Some(uref) => *uref,\n                    None => {\n                        return Err(ProtocolUpgradeError::UnexpectedKeyVariant);\n                    }\n                },\n                None => self\n                    .address_generator\n                    .borrow_mut()\n                    .new_uref(AccessRights::READ_ADD_WRITE),\n            }\n        };\n        self.tracking_copy\n            .upsert_uref_to_named_keys(entity_addr, name, named_keys, uref, stored_value)\n            .map_err(ProtocolUpgradeError::TrackingCopy)\n    }\n\n    /// Handle new validator slots.\n    pub fn handle_new_validator_slots(\n        &mut self,\n        auction: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        if let Some(new_validator_slots) = self.config.new_validator_slots() {\n            debug!(%new_validator_slots, \"handle new validator slots\");\n            // if new total validator slots is provided, update auction contract state\n            let auction_named_keys = self.get_named_keys(auction)?;\n\n            let validator_slots_key = auction_named_keys\n                .get(VALIDATOR_SLOTS_KEY)\n                .expect(\"validator_slots key must exist in auction contract's named keys\");\n            let value =\n                StoredValue::CLValue(CLValue::from_t(new_validator_slots).map_err(|_| {\n                    ProtocolUpgradeError::Bytesrepr(\"new_validator_slots\".to_string())\n                })?);\n            self.tracking_copy.write(*validator_slots_key, value);\n        }\n        Ok(())\n    }\n\n    /// Applies the necessary changes if a new auction delay is part of the upgrade.\n    pub fn handle_new_auction_delay(\n        &mut self,\n        auction: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        if let Some(new_auction_delay) = self.config.new_auction_delay() {\n            debug!(%new_auction_delay, \"handle new auction delay\");\n            let auction_named_keys = self.get_named_keys(auction)?;\n\n            let auction_delay_key = auction_named_keys\n                .get(AUCTION_DELAY_KEY)\n                .expect(\"auction_delay key must exist in auction contract's named keys\");\n            let value =\n                StoredValue::CLValue(CLValue::from_t(new_auction_delay).map_err(|_| {\n                    ProtocolUpgradeError::Bytesrepr(\"new_auction_delay\".to_string())\n                })?);\n            self.tracking_copy.write(*auction_delay_key, value);\n        }\n        Ok(())\n    }\n\n    /// Applies the necessary changes if a new locked funds period is part of the upgrade.\n    pub fn handle_new_locked_funds_period_millis(\n        &mut self,\n        auction: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        if let Some(new_locked_funds_period) = self.config.new_locked_funds_period_millis() {\n            debug!(%new_locked_funds_period,\"handle new locked funds period millis\");\n\n            let auction_named_keys = self.get_named_keys(auction)?;\n\n            let locked_funds_period_key = auction_named_keys\n                .get(LOCKED_FUNDS_PERIOD_KEY)\n                .expect(\"locked_funds_period key must exist in auction contract's named keys\");\n            let value =\n                StoredValue::CLValue(CLValue::from_t(new_locked_funds_period).map_err(|_| {\n                    ProtocolUpgradeError::Bytesrepr(\"new_locked_funds_period\".to_string())\n                })?);\n            self.tracking_copy.write(*locked_funds_period_key, value);\n        }\n        Ok(())\n    }\n\n    /// Applies the necessary changes if a new unbonding delay is part of the upgrade.\n    pub fn handle_new_unbonding_delay(\n        &mut self,\n        auction: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        // We insert the new unbonding delay once the purses to be paid out have been transformed\n        // based on the previous unbonding delay.\n        if let Some(new_unbonding_delay) = self.config.new_unbonding_delay() {\n            debug!(%new_unbonding_delay,\"handle new unbonding delay\");\n\n            let auction_named_keys = self.get_named_keys(auction)?;\n\n            let unbonding_delay_key = auction_named_keys\n                .get(UNBONDING_DELAY_KEY)\n                .expect(\"unbonding_delay key must exist in auction contract's named keys\");\n            let value =\n                StoredValue::CLValue(CLValue::from_t(new_unbonding_delay).map_err(|_| {\n                    ProtocolUpgradeError::Bytesrepr(\"new_unbonding_delay\".to_string())\n                })?);\n            self.tracking_copy.write(*unbonding_delay_key, value);\n        }\n        Ok(())\n    }\n\n    /// Applies the necessary changes if a new round seigniorage rate is part of the upgrade.\n    pub fn handle_new_round_seigniorage_rate(\n        &mut self,\n        mint: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        if let Some(new_round_seigniorage_rate) = self.config.new_round_seigniorage_rate() {\n            debug!(%new_round_seigniorage_rate,\"handle new round seigniorage rate\");\n            let new_round_seigniorage_rate: Ratio<U512> = {\n                let (numer, denom) = new_round_seigniorage_rate.into();\n                Ratio::new(numer.into(), denom.into())\n            };\n\n            let mint_named_keys = self.get_named_keys(mint)?;\n\n            let locked_funds_period_key = mint_named_keys\n                .get(ROUND_SEIGNIORAGE_RATE_KEY)\n                .expect(\"round_seigniorage_rate key must exist in mint contract's named keys\");\n            let value = StoredValue::CLValue(CLValue::from_t(new_round_seigniorage_rate).map_err(\n                |_| ProtocolUpgradeError::Bytesrepr(\"new_round_seigniorage_rate\".to_string()),\n            )?);\n            self.tracking_copy.write(*locked_funds_period_key, value);\n        }\n        Ok(())\n    }\n\n    /// Handle unbonds migration.\n    pub fn handle_unbonds_migration(&mut self) -> Result<(), ProtocolUpgradeError> {\n        debug!(\"handle unbonds migration\");\n        let tc = &mut self.tracking_copy;\n        let existing_keys = match tc.get_keys(&KeyTag::Unbond) {\n            Ok(keys) => keys,\n            Err(err) => return Err(ProtocolUpgradeError::TrackingCopy(err)),\n        };\n        for key in existing_keys {\n            if let Some(StoredValue::Unbonding(unbonding_purses)) =\n                tc.get(&key).map_err(Into::<ProtocolUpgradeError>::into)?\n            {\n                // prune away the original record, we don't need it anymore\n                tc.prune(key);\n\n                // re-write records under Key::BidAddr , StoredValue::BidKind\n                for unbonding_purse in unbonding_purses {\n                    let validator = unbonding_purse.validator_public_key();\n                    let unbonder = unbonding_purse.unbonder_public_key();\n                    let new_key = Key::BidAddr(BidAddr::UnbondAccount {\n                        validator: validator.to_account_hash(),\n                        unbonder: unbonder.to_account_hash(),\n                    });\n                    let unbond = Box::new(Unbond::from(unbonding_purse));\n                    let unbond_bid_kind = BidKind::Unbond(unbond.clone());\n                    if !unbond.eras().is_empty() {\n                        tc.write(new_key, StoredValue::BidKind(unbond_bid_kind));\n                    }\n                }\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Handle bids migration.\n    pub fn handle_bids_migration(\n        &mut self,\n        validator_minimum: u64,\n        validator_delegation_minimum: u64,\n        validator_delegation_maximum: u64,\n        auction_hash: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        if validator_delegation_maximum < validator_delegation_minimum {\n            return Err(ProtocolUpgradeError::InvalidUpgradeConfig);\n        }\n        debug!(\"handle bids migration\");\n        let tc = &mut self.tracking_copy;\n        let existing_bid_keys = match tc.get_keys(&KeyTag::Bid) {\n            Ok(keys) => keys,\n            Err(err) => return Err(ProtocolUpgradeError::TrackingCopy(err)),\n        };\n        for key in existing_bid_keys {\n            if let Some(StoredValue::Bid(existing_bid)) =\n                tc.get(&key).map_err(Into::<ProtocolUpgradeError>::into)?\n            {\n                // prune away the original record, we don't need it anymore\n                tc.prune(key);\n\n                if existing_bid.staked_amount().is_zero() {\n                    // the previous logic enforces unbonding all delegators of\n                    // a validator that reduced their personal stake to 0 (and we have\n                    // various existent tests that prove this), thus there is no need\n                    // to handle the complicated hypothetical case of one or more\n                    // delegator stakes being > 0 if the validator stake is 0.\n                    //\n                    // tl;dr this is a \"zombie\" bid and we don't need to continue\n                    // carrying it forward at tip.\n                    continue;\n                }\n\n                let validator_public_key = existing_bid.validator_public_key();\n                let validator_bid_addr = BidAddr::from(validator_public_key.clone());\n                let validator_bid = {\n                    let validator_bid = ValidatorBid::from(*existing_bid.clone());\n                    let inactive = validator_bid.staked_amount() < U512::from(validator_minimum);\n                    validator_bid\n                        .with_inactive(inactive)\n                        .with_min_max_delegation_amount(\n                            validator_delegation_maximum,\n                            validator_delegation_minimum,\n                        )\n                };\n                tc.write(\n                    validator_bid_addr.into(),\n                    StoredValue::BidKind(BidKind::Validator(Box::new(validator_bid))),\n                );\n\n                let delegators = existing_bid.delegators().clone();\n                for (_, delegator) in delegators {\n                    let delegator_bid_addr = BidAddr::new_delegator_kind(\n                        validator_public_key,\n                        &DelegatorKind::PublicKey(delegator.delegator_public_key().clone()),\n                    );\n                    // the previous code was removing a delegator bid from the embedded\n                    // collection within their validator's bid when the delegator fully\n                    // unstaked, so technically we don't need to check for 0 balance here.\n                    // However, since it is low effort to check, doing it just to be sure.\n                    if !delegator.staked_amount().is_zero() {\n                        tc.write(\n                            delegator_bid_addr.into(),\n                            StoredValue::BidKind(BidKind::Delegator(Box::new(DelegatorBid::from(\n                                delegator,\n                            )))),\n                        );\n                    }\n                }\n            }\n        }\n\n        let validator_bid_keys = tc\n            .get_by_byte_prefix(&[KeyTag::BidAddr as u8, BidAddrTag::Validator as u8])\n            .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?;\n        for validator_bid_key in validator_bid_keys {\n            if let Some(StoredValue::BidKind(BidKind::Validator(validator_bid))) = tc\n                .get(&validator_bid_key)\n                .map_err(Into::<ProtocolUpgradeError>::into)?\n            {\n                let is_bid_inactive = validator_bid.inactive();\n                let has_less_than_validator_minimum =\n                    validator_bid.staked_amount() < U512::from(validator_minimum);\n                if !is_bid_inactive && has_less_than_validator_minimum {\n                    let inactive_bid = validator_bid.with_inactive(true);\n                    info!(\"marking bid inactive {validator_bid_key}\");\n                    tc.write(\n                        validator_bid_key,\n                        StoredValue::BidKind(BidKind::Validator(Box::new(inactive_bid))),\n                    );\n                    continue;\n                }\n\n                let validator_delegation_maximum =\n                    U512::from(validator_bid.maximum_delegation_amount());\n                let validator_delegation_minimum =\n                    U512::from(validator_bid.minimum_delegation_amount());\n\n                // Correct accounts over the max\n                {\n                    let validator_bid_addr = *validator_bid_key\n                        .as_bid_addr()\n                        .ok_or(ProtocolUpgradeError::UnexpectedKeyVariant)?;\n\n                    let prefix = validator_bid_addr\n                        .delegated_account_prefix()\n                        .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?;\n                    let mut delegated_account_keys = tc\n                        .get_by_byte_prefix(&prefix)\n                        .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?;\n\n                    let prefix = validator_bid_addr\n                        .delegated_purse_prefix()\n                        .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?;\n                    let mut delegated_purse_keys = tc\n                        .get_by_byte_prefix(&prefix)\n                        .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?;\n\n                    delegated_account_keys.append(&mut delegated_purse_keys);\n\n                    let mut delegators = vec![];\n                    for delegator_key in delegated_account_keys {\n                        if let Some(StoredValue::BidKind(BidKind::Delegator(delegator_bid))) = tc\n                            .get(&delegator_key)\n                            .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?\n                        {\n                            delegators.push(*delegator_bid.clone())\n                        }\n                    }\n\n                    for mut delegator in delegators {\n                        let delegator_staked_amount = delegator.staked_amount();\n                        let unbond_amount =\n                            if delegator_staked_amount < validator_delegation_minimum {\n                                // fully unbond the staked amount as it is below the min\n                                delegator_staked_amount\n                            } else if delegator_staked_amount > validator_delegation_maximum {\n                                // partially unbond the staked amount to not exceed the max\n                                delegator_staked_amount.saturating_sub(validator_delegation_maximum)\n                            } else {\n                                // nothing to unbond\n                                U512::zero()\n                            };\n                        // skip delegators within the range\n                        if unbond_amount.is_zero() {\n                            continue;\n                        }\n\n                        let unbond_kind = delegator.unbond_kind();\n\n                        let auction_named_keys =\n                            tc.get_named_keys(EntityAddr::System(auction_hash))?;\n                        let era_end = {\n                            let key = auction_named_keys\n                                .get(ERA_END_TIMESTAMP_MILLIS_KEY)\n                                .expect(\"era end key must exist in mint contract's named keys\");\n\n                            tc.read(key)\n                                .map_err(ProtocolUpgradeError::TrackingCopy)?\n                                .ok_or(ProtocolUpgradeError::UnexpectedKeyVariant)?\n                                .as_cl_value()\n                                .ok_or(ProtocolUpgradeError::UnexpectedStoredValueVariant)?\n                                .to_t::<u64>()\n                                .map_err(|err| ProtocolUpgradeError::CLValue(err.to_string()))?\n                        };\n\n                        let current_era = {\n                            let key = auction_named_keys\n                                .get(ERA_ID_KEY)\n                                .expect(\"era end key must exist in mint contract's named keys\");\n\n                            tc.read(key)\n                                .map_err(ProtocolUpgradeError::TrackingCopy)?\n                                .ok_or(ProtocolUpgradeError::UnexpectedKeyVariant)?\n                                .as_cl_value()\n                                .ok_or(ProtocolUpgradeError::UnexpectedStoredValueVariant)?\n                                .to_t::<EraId>()\n                                .map_err(|err| ProtocolUpgradeError::CLValue(err.to_string()))?\n                        };\n\n                        let validator_public_key = validator_bid.validator_public_key().clone();\n                        let bid_addr = match &unbond_kind {\n                            UnbondKind::Validator(_) => continue,\n                            UnbondKind::DelegatedPublicKey(pk) => BidAddr::UnbondAccount {\n                                validator: validator_public_key.to_account_hash(),\n                                unbonder: pk.to_account_hash(),\n                            },\n                            UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse {\n                                validator: validator_public_key.to_account_hash(),\n                                unbonder: *addr,\n                            },\n                        };\n\n                        let bonding_purse = *delegator.bonding_purse();\n                        let unbond_era =\n                            UnbondEra::new(bonding_purse, current_era, unbond_amount, None);\n\n                        let unbond = match tc\n                            .read(&Key::BidAddr(bid_addr))\n                            .map_err(ProtocolUpgradeError::TrackingCopy)?\n                        {\n                            Some(StoredValue::BidKind(BidKind::Unbond(unbond))) => {\n                                let mut eras = unbond.take_eras();\n                                eras.push(unbond_era);\n                                Unbond::new(validator_public_key, unbond_kind, eras)\n                            }\n                            Some(_) => continue,\n                            None => {\n                                Unbond::new(validator_public_key, unbond_kind, vec![unbond_era])\n                            }\n                        };\n\n                        tc.write(\n                            Key::BidAddr(bid_addr),\n                            StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))),\n                        );\n\n                        let updated_stake = match delegator.decrease_stake(unbond_amount, era_end) {\n                            Ok(updated_stake) => updated_stake,\n                            Err(error) => {\n                                error!(\"could not decrease stake for validator; {:?}\", error);\n                                continue;\n                            }\n                        };\n\n                        let delegator_bid_addr = delegator.bid_addr();\n                        if updated_stake.is_zero() {\n                            debug!(\"pruning delegator bid {delegator_bid_addr}\");\n                            tc.prune(Key::BidAddr(delegator_bid_addr));\n                        } else {\n                            debug!(\n                \"forced undelegation for {delegator_bid_addr} reducing {delegator_staked_amount} by {unbond_amount} to {updated_stake}\",\n            );\n                            tc.write(\n                                Key::BidAddr(delegator_bid_addr),\n                                StoredValue::BidKind(BidKind::Delegator(Box::new(delegator))),\n                            );\n                        }\n                    }\n                };\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Handle era info migration.\n    pub fn handle_era_info_migration(&mut self) -> Result<(), ProtocolUpgradeError> {\n        // EraInfo migration\n        if let Some(activation_point) = self.config.activation_point() {\n            // The highest stored era is the immediate predecessor of the activation point.\n            let highest_era_info_id = activation_point.saturating_sub(1);\n            let highest_era_info_key = Key::EraInfo(highest_era_info_id);\n\n            let get_result = self\n                .tracking_copy\n                .get(&highest_era_info_key)\n                .map_err(ProtocolUpgradeError::TrackingCopy)?;\n\n            match get_result {\n                Some(stored_value @ StoredValue::EraInfo(_)) => {\n                    self.tracking_copy.write(Key::EraSummary, stored_value);\n                }\n                Some(other_stored_value) => {\n                    // This should not happen as we only write EraInfo variants.\n                    error!(stored_value_type_name=%other_stored_value.type_name(),\n                        \"EraInfo key contains unexpected StoredValue variant\");\n                    return Err(ProtocolUpgradeError::UnexpectedStoredValueVariant);\n                }\n                None => {\n                    // Can't find key\n                    // Most likely this chain did not yet run an auction, or recently completed a\n                    // prune\n                }\n            };\n        }\n        Ok(())\n    }\n\n    /// Handle seignorage snapshot migration to new version.\n    pub fn handle_seignorage_snapshot_migration(\n        &mut self,\n        auction: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        let auction_named_keys = self.get_named_keys(auction)?;\n        let maybe_snapshot_version_key =\n            auction_named_keys.get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY);\n        let snapshot_key = auction_named_keys\n            .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n            .expect(\"snapshot key should already exist\");\n\n        // if version flag does not exist yet, set it and migrate snapshot\n        if maybe_snapshot_version_key.is_none() {\n            let auction_addr = EntityAddr::new_system(auction);\n\n            // add new snapshot version named key\n            let stored_value = StoredValue::CLValue(CLValue::from_t(\n                DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION,\n            )?);\n            self.system_uref(\n                auction_addr,\n                SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY,\n                &auction_named_keys,\n                stored_value,\n            )?;\n\n            // read legacy snapshot\n            if let Some(snapshot_stored_value) = self.tracking_copy.read(snapshot_key)? {\n                let snapshot_cl_value = match snapshot_stored_value.into_cl_value() {\n                    Some(cl_value) => cl_value,\n                    None => {\n                        error!(\"seigniorage recipients snapshot is not a CLValue\");\n                        return Err(ProtocolUpgradeError::CLValue(\n                            \"seigniorage recipients snapshot is not a CLValue\".to_string(),\n                        ));\n                    }\n                };\n\n                let legacy_snapshot: SeigniorageRecipientsSnapshotV1 =\n                    snapshot_cl_value.into_t()?;\n\n                let mut new_snapshot = SeigniorageRecipientsSnapshotV2::default();\n                for (era_id, recipients) in legacy_snapshot.into_iter() {\n                    let mut new_recipients = SeigniorageRecipientsV2::default();\n                    for (pubkey, recipient) in recipients {\n                        new_recipients.insert(pubkey, recipient.into());\n                    }\n                    new_snapshot.insert(era_id, new_recipients);\n                }\n\n                // store new snapshot\n                self.tracking_copy.write(\n                    *snapshot_key,\n                    StoredValue::CLValue(CLValue::from_t(new_snapshot)?),\n                );\n            };\n        }\n\n        Ok(())\n    }\n\n    /// Handle total supply calculation.\n    pub fn handle_total_supply_calc(&mut self, mint: HashAddr) -> Result<(), ProtocolUpgradeError> {\n        debug!(\"handle total supply calculation\");\n        let mint_named_keys = self.get_named_keys(mint)?;\n        let tc = &mut self.tracking_copy;\n        let total_supply_key = mint_named_keys\n            .get(TOTAL_SUPPLY_KEY)\n            .expect(\"total supply key must exist in mint contract's named keys\");\n\n        let total_supply = match tc.read(total_supply_key) {\n            Ok(Some(StoredValue::CLValue(cl_value))) => match cl_value.into_t::<U512>() {\n                Ok(total_supply) => total_supply,\n                Err(cve) => {\n                    warn!(\"total_supply {} not a U512; {}\", total_supply_key, cve);\n                    return Err(ProtocolUpgradeError::CLValue(\n                        \"total supply is not U512\".to_string(),\n                    ));\n                }\n            },\n            Ok(Some(_)) => {\n                error!(\"total supply is unexpected stored value type\");\n                return Err(ProtocolUpgradeError::CLValue(\n                    \"total supply is unexpected stored value type\".to_string(),\n                ));\n            }\n            Ok(None) => {\n                error!(\"total supply missing\");\n                return Err(ProtocolUpgradeError::CLValue(\n                    \"total supply missing\".to_string(),\n                ));\n            }\n            Err(err) => {\n                error!(\"failure to retrieve total supply: {}\", err);\n                return Err(ProtocolUpgradeError::CLValue(\n                    \"failure to retrieve total supply\".to_string(),\n                ));\n            }\n        };\n\n        let balance_keys = match tc.get_keys(&KeyTag::Balance) {\n            Ok(keys) => keys,\n            Err(err) => return Err(ProtocolUpgradeError::TrackingCopy(err)),\n        };\n\n        let mut running_balance = U512::zero();\n        for balance_key in balance_keys {\n            if let Some(StoredValue::CLValue(cl_value)) = tc\n                .get(&balance_key)\n                .map_err(Into::<ProtocolUpgradeError>::into)?\n            {\n                // need to shuck CLValue wrapper and get at interior value.\n                match cl_value.into_t::<U512>() {\n                    Ok(balance) => {\n                        running_balance += balance;\n                    }\n                    Err(cve) => {\n                        warn!(\"balance of {} not a U512; {}\", balance_key, cve);\n                    }\n                }\n            } else {\n                // this should be unreachable. if it is reached the options are halt & catch fire,\n                // or log and keep going. currently opting to log and keep going.\n                error!(\"failed to find balance value for {}\", balance_key);\n            }\n        }\n\n        // compare stored total supply with calculated total\n        // if same, no op\n        if total_supply != running_balance {\n            warn!(\n                \"adjusting total supply from {} to {}\",\n                total_supply, running_balance\n            );\n\n            let cl_value = CLValue::from_t(running_balance)\n                .expect(\"new total supply must convert to CLValue.\");\n\n            self.tracking_copy\n                .write(*total_supply_key, StoredValue::CLValue(cl_value));\n        } else {\n            debug!(\"total supply match\");\n        }\n\n        Ok(())\n    }\n\n    /// Write or prune away the rewards handling entry in GS.\n    pub fn handle_rewards_handling(&mut self, mint: HashAddr) -> Result<(), ProtocolUpgradeError> {\n        let rewards_handling = self.config.rewards_handling();\n        let rewards_handling_key = self\n            .tracking_copy\n            .read(&Key::RewardsHandling)\n            .map_err(ProtocolUpgradeError::TrackingCopy)?;\n\n        match rewards_handling {\n            RewardsHandling::Standard => {\n                if let Some(StoredValue::CLValue(_)) = rewards_handling_key {\n                    self.tracking_copy.prune(Key::RewardsHandling);\n                }\n            }\n            RewardsHandling::Sustain {\n                ratio,\n                purse_address,\n            } => {\n                let sustain_purse = URef::from_formatted_str(&purse_address).map_err(|_| {\n                    ProtocolUpgradeError::CLValue(\"unable to create sustain purse\".to_string())\n                })?;\n\n                let value = StoredValue::CLValue(\n                    CLValue::from_t((MINT_SUSTAIN_PURSE_KEY.to_string(), Key::URef(sustain_purse)))\n                        .map_err(|_| {\n                            ProtocolUpgradeError::Bytesrepr(\"sustain purse\".to_string())\n                        })?,\n                );\n\n                let mint_key = if self.config.enable_addressable_entity() {\n                    Key::AddressableEntity(EntityAddr::System(mint))\n                } else {\n                    Key::Hash(mint)\n                };\n                match self.tracking_copy.add(mint_key, value) {\n                    Ok(AddResult::Success) => {\n                        info!(\"Successfully added sustain purse to mint named keys\")\n                    }\n                    Ok(_) | Err(_) => {\n                        return Err(ProtocolUpgradeError::CLValue(\n                            \"Unable to add sustain purse\".to_string(),\n                        ))\n                    }\n                };\n\n                let rewards_ratio: Bytes = ratio\n                    .to_bytes()\n                    .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?\n                    .into();\n                let rewards_handling_map = {\n                    let mut ret = BTreeMap::new();\n                    ret.insert(REWARDS_HANDLING_RATIO_TAG, rewards_ratio);\n                    CLValue::from_t(ret)\n                        .map_err(|cl| ProtocolUpgradeError::CLValue(cl.to_string()))?\n                };\n                self.tracking_copy.write(\n                    Key::RewardsHandling,\n                    StoredValue::CLValue(rewards_handling_map),\n                );\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Handle global state updates.\n    pub fn handle_global_state_updates(&mut self) {\n        debug!(\"handle global state updates\");\n        for (key, value) in self.config.global_state_update() {\n            self.tracking_copy.write(*key, value.clone());\n        }\n    }\n\n    /// Handle setting up minimum_delegation_rate\n    pub fn handle_minimum_delegation_rate(\n        &mut self,\n        auction: HashAddr,\n    ) -> Result<(), ProtocolUpgradeError> {\n        let minimum_delegation_rate = self.config.new_minimum_delegation_rate().unwrap_or(0);\n        let named_keys = self.get_named_keys(auction)?;\n        let cl_value = CLValue::from_t(minimum_delegation_rate)\n            .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?;\n        let stored_value = StoredValue::CLValue(cl_value);\n        let auction_addr = EntityAddr::System(auction);\n        self.system_uref(\n            auction_addr,\n            MINIMUM_DELEGATION_RATE_KEY,\n            &named_keys,\n            stored_value,\n        )?;\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage/src/system/runtime_native.rs",
    "content": "use crate::{\n    global_state::{error::Error as GlobalStateReader, state::StateReader},\n    tracking_copy::{TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt},\n    AddressGenerator, TrackingCopy,\n};\nuse casper_types::{\n    account::AccountHash,\n    contracts::NamedKeys,\n    system::{auction::MINIMUM_DELEGATION_RATE_KEY, AUCTION},\n    Chainspec, ContextAccessRights, EntityAddr, FeeHandling, Key, Phase, ProtocolVersion,\n    PublicKey, RefundHandling, RewardsHandling, RuntimeFootprint, StoredValue, TransactionHash,\n    Transfer, URef, U512,\n};\nuse num_rational::Ratio;\nuse parking_lot::RwLock;\nuse std::{cell::RefCell, collections::BTreeSet, rc::Rc, sync::Arc};\nuse tracing::error;\n\n/// Configuration settings.\n#[derive(Debug, Clone, PartialEq, Eq, Default)]\npub struct Config {\n    transfer_config: TransferConfig,\n    fee_handling: FeeHandling,\n    refund_handling: RefundHandling,\n    vesting_schedule_period_millis: u64,\n    allow_auction_bids: bool,\n    compute_rewards: bool,\n    max_delegators_per_validator: u32,\n    minimum_bid_amount: u64,\n    minimum_delegation_amount: u64,\n    maximum_delegation_amount: u64,\n    balance_hold_interval: u64,\n    include_credits: bool,\n    credit_cap: Ratio<U512>,\n    enable_addressable_entity: bool,\n    native_transfer_cost: u32,\n    rewards_handling: RewardsHandling,\n}\n\nimpl Config {\n    /// Ctor.\n    #[allow(clippy::too_many_arguments)]\n    pub const fn new(\n        transfer_config: TransferConfig,\n        fee_handling: FeeHandling,\n        refund_handling: RefundHandling,\n        vesting_schedule_period_millis: u64,\n        allow_auction_bids: bool,\n        compute_rewards: bool,\n        max_delegators_per_validator: u32,\n        minimum_bid_amount: u64,\n        minimum_delegation_amount: u64,\n        maximum_delegation_amount: u64,\n        balance_hold_interval: u64,\n        include_credits: bool,\n        credit_cap: Ratio<U512>,\n        enable_addressable_entity: bool,\n        native_transfer_cost: u32,\n        rewards_handling: RewardsHandling,\n    ) -> Self {\n        Config {\n            transfer_config,\n            fee_handling,\n            refund_handling,\n            vesting_schedule_period_millis,\n            allow_auction_bids,\n            compute_rewards,\n            max_delegators_per_validator,\n            minimum_bid_amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            balance_hold_interval,\n            include_credits,\n            credit_cap,\n            enable_addressable_entity,\n            native_transfer_cost,\n            rewards_handling,\n        }\n    }\n\n    /// Ctor from chainspec.\n    pub fn from_chainspec(chainspec: &Chainspec) -> Self {\n        let transfer_config = TransferConfig::from_chainspec(chainspec);\n        let fee_handling = chainspec.core_config.fee_handling;\n        let refund_handling = chainspec.core_config.refund_handling;\n        let vesting_schedule_period_millis = chainspec.core_config.vesting_schedule_period.millis();\n        let allow_auction_bids = chainspec.core_config.allow_auction_bids;\n        let compute_rewards = chainspec.core_config.compute_rewards;\n        let max_delegators_per_validator = chainspec.core_config.max_delegators_per_validator;\n        let minimum_bid_amount = chainspec.core_config.minimum_bid_amount;\n        let minimum_delegation_amount = chainspec.core_config.minimum_delegation_amount;\n        let maximum_delegation_amount = chainspec.core_config.maximum_delegation_amount;\n        let balance_hold_interval = chainspec.core_config.gas_hold_interval.millis();\n        let include_credits = chainspec.core_config.fee_handling == FeeHandling::NoFee;\n        let credit_cap = Ratio::new_raw(\n            U512::from(*chainspec.core_config.validator_credit_cap.numer()),\n            U512::from(*chainspec.core_config.validator_credit_cap.denom()),\n        );\n        let enable_addressable_entity = chainspec.core_config.enable_addressable_entity;\n        let native_transfer_cost = chainspec.system_costs_config.mint_costs().transfer;\n        let rewards_handling = chainspec.core_config.rewards_handling.clone();\n        Config::new(\n            transfer_config,\n            fee_handling,\n            refund_handling,\n            vesting_schedule_period_millis,\n            allow_auction_bids,\n            compute_rewards,\n            max_delegators_per_validator,\n            minimum_bid_amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            balance_hold_interval,\n            include_credits,\n            credit_cap,\n            enable_addressable_entity,\n            native_transfer_cost,\n            rewards_handling,\n        )\n    }\n\n    /// Returns transfer config.\n    pub fn transfer_config(&self) -> &TransferConfig {\n        &self.transfer_config\n    }\n\n    /// Returns fee handling setting.\n    pub fn fee_handling(&self) -> &FeeHandling {\n        &self.fee_handling\n    }\n\n    /// Returns refund handling setting.\n    pub fn refund_handling(&self) -> &RefundHandling {\n        &self.refund_handling\n    }\n\n    /// Returns vesting schedule period millis setting.\n    pub fn vesting_schedule_period_millis(&self) -> u64 {\n        self.vesting_schedule_period_millis\n    }\n\n    /// Returns if auction bids are allowed.\n    pub fn allow_auction_bids(&self) -> bool {\n        self.allow_auction_bids\n    }\n\n    /// Returns if rewards should be computed.\n    pub fn compute_rewards(&self) -> bool {\n        self.compute_rewards\n    }\n\n    /// Returns max delegators per validator setting.\n    pub fn max_delegators_per_validator(&self) -> u32 {\n        self.max_delegators_per_validator\n    }\n\n    /// Returns minimum bid amount setting.\n    pub fn minimum_bid_amount(&self) -> u64 {\n        self.minimum_bid_amount\n    }\n\n    /// Returns the global minimum delegation amount setting.\n    pub fn global_minimum_delegation_amount(&self) -> u64 {\n        self.minimum_delegation_amount\n    }\n\n    /// Returns the global maximum delegation amount setting.\n    pub fn global_maximum_delegation_amount(&self) -> u64 {\n        self.maximum_delegation_amount\n    }\n\n    /// Returns balance hold interval setting.\n    pub fn balance_hold_interval(&self) -> u64 {\n        self.balance_hold_interval\n    }\n\n    /// Returns include credit setting.\n    pub fn include_credits(&self) -> bool {\n        self.include_credits\n    }\n\n    /// Returns validator credit cap setting.\n    pub fn credit_cap(&self) -> Ratio<U512> {\n        self.credit_cap\n    }\n\n    /// Enable the addressable entity and migrate accounts/contracts to entities.\n    pub fn enable_addressable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n\n    /// Rewards handling for the runtime native config.\n    pub fn rewards_handling(&self) -> RewardsHandling {\n        self.rewards_handling.clone()\n    }\n\n    /// Changes the transfer config.\n    pub fn set_transfer_config(self, transfer_config: TransferConfig) -> Self {\n        Config {\n            transfer_config,\n            fee_handling: self.fee_handling,\n            refund_handling: self.refund_handling,\n            vesting_schedule_period_millis: self.vesting_schedule_period_millis,\n            max_delegators_per_validator: self.max_delegators_per_validator,\n            allow_auction_bids: self.allow_auction_bids,\n            minimum_bid_amount: self.minimum_bid_amount,\n            minimum_delegation_amount: self.minimum_delegation_amount,\n            maximum_delegation_amount: self.maximum_delegation_amount,\n            compute_rewards: self.compute_rewards,\n            balance_hold_interval: self.balance_hold_interval,\n            include_credits: self.include_credits,\n            credit_cap: self.credit_cap,\n            enable_addressable_entity: self.enable_addressable_entity,\n            native_transfer_cost: self.native_transfer_cost,\n            rewards_handling: self.rewards_handling,\n        }\n    }\n}\n\n/// Configuration for transfer.\n#[derive(Debug, Clone, PartialEq, Eq, Default)]\npub enum TransferConfig {\n    /// Transfers are affected by the existence of administrative_accounts. This is a\n    /// behavior specific to private or managed chains, not a public chain.\n    Administered {\n        /// Retrusn the set of account hashes for all administrators.\n        administrative_accounts: BTreeSet<AccountHash>,\n        /// If true, transfers are unrestricted.\n        /// If false, the source and / or target of a transfer must be an administrative account.\n        allow_unrestricted_transfers: bool,\n    },\n    /// Transfers are not affected by the existence of administrative_accounts (the standard\n    /// behavior).\n    #[default]\n    Unadministered,\n}\n\nimpl TransferConfig {\n    /// Returns a new instance.\n    pub fn new(\n        administrative_accounts: BTreeSet<AccountHash>,\n        allow_unrestricted_transfers: bool,\n    ) -> Self {\n        if administrative_accounts.is_empty() && allow_unrestricted_transfers {\n            TransferConfig::Unadministered\n        } else {\n            TransferConfig::Administered {\n                administrative_accounts,\n                allow_unrestricted_transfers,\n            }\n        }\n    }\n\n    /// New instance from chainspec.\n    pub fn from_chainspec(chainspec: &Chainspec) -> Self {\n        let administrative_accounts: BTreeSet<AccountHash> = chainspec\n            .core_config\n            .administrators\n            .iter()\n            .map(|x| x.to_account_hash())\n            .collect();\n        let allow_unrestricted_transfers = chainspec.core_config.allow_unrestricted_transfers;\n        if administrative_accounts.is_empty() && allow_unrestricted_transfers {\n            TransferConfig::Unadministered\n        } else {\n            TransferConfig::Administered {\n                administrative_accounts,\n                allow_unrestricted_transfers,\n            }\n        }\n    }\n\n    /// Does account hash belong to an administrative account?\n    pub fn is_administrator(&self, account_hash: &AccountHash) -> bool {\n        match self {\n            TransferConfig::Administered {\n                administrative_accounts,\n                ..\n            } => administrative_accounts.contains(account_hash),\n            TransferConfig::Unadministered => false,\n        }\n    }\n\n    /// Administrative accounts, if any.\n    pub fn administrative_accounts(&self) -> BTreeSet<AccountHash> {\n        match self {\n            TransferConfig::Administered {\n                administrative_accounts,\n                ..\n            } => administrative_accounts.clone(),\n            TransferConfig::Unadministered => BTreeSet::default(),\n        }\n    }\n\n    /// Allow unrestricted transfers.\n    pub fn allow_unrestricted_transfers(&self) -> bool {\n        match self {\n            TransferConfig::Administered {\n                allow_unrestricted_transfers,\n                ..\n            } => *allow_unrestricted_transfers,\n            TransferConfig::Unadministered => true,\n        }\n    }\n\n    /// Restricted transfer should be enforced.\n    pub fn enforce_transfer_restrictions(&self, account_hash: &AccountHash) -> bool {\n        !self.allow_unrestricted_transfers() && !self.is_administrator(account_hash)\n    }\n}\n\n/// Id for runtime processing.\npub enum Id {\n    /// Hash of current transaction.\n    Transaction(TransactionHash),\n    /// An arbitrary set of bytes to be used as a seed value.\n    Seed(Vec<u8>),\n}\n\nimpl Id {\n    /// Ctor for id enum.\n    pub fn seed(&self) -> Vec<u8> {\n        match self {\n            Id::Transaction(hash) => hash.digest().into_vec(),\n            Id::Seed(bytes) => bytes.clone(),\n        }\n    }\n}\n\n/// State held by an instance of runtime native.\npub struct RuntimeNative<S> {\n    config: Config,\n\n    id: Id,\n    address_generator: Arc<RwLock<AddressGenerator>>,\n    protocol_version: ProtocolVersion,\n\n    tracking_copy: Rc<RefCell<TrackingCopy<S>>>,\n    address: AccountHash,\n    context_key: Key,\n    runtime_footprint: RuntimeFootprint,\n    access_rights: ContextAccessRights,\n    remaining_spending_limit: U512,\n    transfers: Vec<Transfer>,\n    phase: Phase,\n}\n\nimpl<S> RuntimeNative<S>\nwhere\n    S: StateReader<Key, StoredValue, Error = GlobalStateReader>,\n{\n    /// Ctor.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        config: Config,\n        protocol_version: ProtocolVersion,\n        id: Id,\n        address_generator: Arc<RwLock<AddressGenerator>>,\n        tracking_copy: Rc<RefCell<TrackingCopy<S>>>,\n        address: AccountHash,\n        context_key: Key,\n        runtime_footprint: RuntimeFootprint,\n        access_rights: ContextAccessRights,\n        remaining_spending_limit: U512,\n        phase: Phase,\n    ) -> Self {\n        let transfers = vec![];\n        RuntimeNative {\n            config,\n\n            id,\n            address_generator,\n            protocol_version,\n\n            tracking_copy,\n            address,\n            context_key,\n            runtime_footprint,\n            access_rights,\n            remaining_spending_limit,\n            transfers,\n            phase,\n        }\n    }\n\n    /// Creates a runtime with elevated permissions for systemic behaviors.\n    pub fn new_system_runtime(\n        config: Config,\n        protocol_version: ProtocolVersion,\n        id: Id,\n        address_generator: Arc<RwLock<AddressGenerator>>,\n        tracking_copy: Rc<RefCell<TrackingCopy<S>>>,\n        phase: Phase,\n    ) -> Result<Self, TrackingCopyError> {\n        let transfers = vec![];\n        let (entity_addr, runtime_footprint, access_rights) = tracking_copy\n            .borrow_mut()\n            .system_entity_runtime_footprint(protocol_version)?;\n        let address = PublicKey::System.to_account_hash();\n        let context_key = if config.enable_addressable_entity {\n            Key::AddressableEntity(entity_addr)\n        } else {\n            Key::Hash(entity_addr.value())\n        };\n        let remaining_spending_limit = U512::MAX; // system has no spending limit\n        Ok(RuntimeNative {\n            config,\n            id,\n            address_generator,\n            protocol_version,\n\n            tracking_copy,\n            address,\n            context_key,\n            runtime_footprint,\n            access_rights,\n            remaining_spending_limit,\n            transfers,\n            phase,\n        })\n    }\n\n    /// Creates a runtime context for a system contract.\n    pub fn new_system_contract_runtime(\n        config: Config,\n        protocol_version: ProtocolVersion,\n        id: Id,\n        address_generator: Arc<RwLock<AddressGenerator>>,\n        tracking_copy: Rc<RefCell<TrackingCopy<S>>>,\n        phase: Phase,\n        name: &str,\n    ) -> Result<Self, TrackingCopyError> {\n        let transfers = vec![];\n\n        let system_entity_registry = tracking_copy.borrow().get_system_entity_registry()?;\n        let hash = match system_entity_registry.get(name).copied() {\n            Some(hash) => hash,\n            None => {\n                error!(\"unexpected failure; system contract {} not found\", name);\n                return Err(TrackingCopyError::MissingSystemContractHash(\n                    name.to_string(),\n                ));\n            }\n        };\n        let context_key = if config.enable_addressable_entity {\n            Key::AddressableEntity(EntityAddr::System(hash))\n        } else {\n            Key::Hash(hash)\n        };\n        let runtime_footprint = tracking_copy\n            .borrow_mut()\n            .runtime_footprint_by_hash_addr(hash)?;\n        let access_rights = runtime_footprint.extract_access_rights(hash);\n        let address = PublicKey::System.to_account_hash();\n        let remaining_spending_limit = U512::MAX; // system has no spending limit\n        Ok(RuntimeNative {\n            config,\n            id,\n            address_generator,\n            protocol_version,\n\n            tracking_copy,\n            address,\n            context_key,\n            runtime_footprint,\n            access_rights,\n            remaining_spending_limit,\n            transfers,\n            phase,\n        })\n    }\n\n    /// Returns mutable reference to address generator.\n    pub fn address_generator(&mut self) -> Arc<RwLock<AddressGenerator>> {\n        Arc::clone(&self.address_generator)\n    }\n\n    /// Returns reference to config.\n    pub fn config(&self) -> &Config {\n        &self.config\n    }\n\n    /// Returns reference to transfer config.\n    pub fn transfer_config(&self) -> &TransferConfig {\n        &self.config.transfer_config\n    }\n\n    /// Returns protocol version.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns handle to tracking copy.\n    pub fn tracking_copy(&self) -> Rc<RefCell<TrackingCopy<S>>> {\n        Rc::clone(&self.tracking_copy)\n    }\n\n    /// Returns account hash being used by this instance.\n    pub fn address(&self) -> AccountHash {\n        self.address\n    }\n\n    /// Changes the account hash being used by this instance.\n    pub fn with_address(&mut self, account_hash: AccountHash) {\n        self.address = account_hash;\n    }\n\n    /// Returns the context key being used by this instance.\n    pub fn context_key(&self) -> &Key {\n        &self.context_key\n    }\n\n    /// Returns a reference to the runtime footprint used by this instance.\n    pub fn runtime_footprint(&self) -> &RuntimeFootprint {\n        &self.runtime_footprint\n    }\n\n    /// Returns the addressable entity being used by this instance.\n    pub fn runtime_footprint_mut(&mut self) -> &mut RuntimeFootprint {\n        &mut self.runtime_footprint\n    }\n\n    /// Changes the addressable entity being used by this instance.\n    pub fn with_addressable_entity(&mut self, runtime_footprint: RuntimeFootprint) {\n        self.runtime_footprint = runtime_footprint;\n    }\n\n    /// Returns a reference to the named keys being used by this instance.\n    pub fn named_keys(&self) -> &NamedKeys {\n        self.runtime_footprint().named_keys()\n    }\n\n    /// Returns a mutable reference to the named keys being used by this instance.\n    pub fn named_keys_mut(&mut self) -> &mut NamedKeys {\n        self.runtime_footprint.named_keys_mut()\n    }\n\n    /// Returns a reference to the access rights being used by this instance.\n    pub fn access_rights(&self) -> &ContextAccessRights {\n        &self.access_rights\n    }\n\n    /// Returns a mutable reference to the access rights being used by this instance.\n    pub fn access_rights_mut(&mut self) -> &mut ContextAccessRights {\n        &mut self.access_rights\n    }\n\n    /// Extends the access rights being used by this instance.\n    pub fn extend_access_rights(&mut self, urefs: &[URef]) {\n        self.access_rights.extend(urefs)\n    }\n\n    /// Returns the remaining spending limit.\n    pub fn remaining_spending_limit(&self) -> U512 {\n        self.remaining_spending_limit\n    }\n\n    /// Set remaining spending limit.\n    pub fn set_remaining_spending_limit(&mut self, remaining: U512) {\n        self.remaining_spending_limit = remaining;\n    }\n\n    /// Get references to transfers.\n    pub fn transfers(&self) -> &Vec<Transfer> {\n        &self.transfers\n    }\n\n    /// Push transfer instance.\n    pub fn push_transfer(&mut self, transfer: Transfer) {\n        self.transfers.push(transfer);\n    }\n\n    /// Get id.\n    pub fn id(&self) -> &Id {\n        &self.id\n    }\n\n    /// Get phase.\n    pub fn phase(&self) -> Phase {\n        self.phase\n    }\n\n    /// Vesting schedule period in milliseconds.\n    pub fn vesting_schedule_period_millis(&self) -> u64 {\n        self.config.vesting_schedule_period_millis\n    }\n\n    /// Are auction bids allowed?\n    pub fn allow_auction_bids(&self) -> bool {\n        self.config.allow_auction_bids\n    }\n\n    /// Are rewards computed?\n    pub fn compute_rewards(&self) -> bool {\n        self.config.compute_rewards\n    }\n\n    /// Extracts transfer items.\n    pub fn into_transfers(self) -> Vec<Transfer> {\n        self.transfers\n    }\n\n    pub(crate) fn native_transfer_cost(&self) -> u32 {\n        self.config.native_transfer_cost\n    }\n\n    pub(crate) fn get_minimum_delegation_rate(&self) -> Result<u8, TrackingCopyError> {\n        let mut borrow_mut = self.tracking_copy.borrow_mut();\n        let key = borrow_mut\n            .system_contract_named_key(AUCTION, MINIMUM_DELEGATION_RATE_KEY)?\n            .ok_or(TrackingCopyError::NamedKeyNotFound(\n                MINIMUM_DELEGATION_RATE_KEY.to_string(),\n            ))?;\n        let stored_value = borrow_mut\n            .read(&key)?\n            .ok_or(TrackingCopyError::ValueNotFound(\n                MINIMUM_DELEGATION_RATE_KEY.to_string(),\n            ))?;\n        if let StoredValue::CLValue(cl_value) = stored_value {\n            let minimum_delegation_rate: u8 =\n                cl_value.into_t().map_err(TrackingCopyError::CLValue)?;\n            Ok(minimum_delegation_rate)\n        } else {\n            Err(TrackingCopyError::UnexpectedStoredValueVariant)\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/system/standard_payment/account_provider.rs",
    "content": ""
  },
  {
    "path": "storage/src/system/standard_payment/handle_payment_provider.rs",
    "content": ""
  },
  {
    "path": "storage/src/system/standard_payment/mint_provider.rs",
    "content": ""
  },
  {
    "path": "storage/src/system/standard_payment.rs",
    "content": "\n"
  },
  {
    "path": "storage/src/system/transfer.rs",
    "content": "use std::{cell::RefCell, convert::TryFrom, rc::Rc};\nuse thiserror::Error;\n\nuse casper_types::{\n    account::AccountHash,\n    bytesrepr::FromBytes,\n    system::{mint, mint::Error as MintError},\n    AccessRights, CLType, CLTyped, CLValue, CLValueError, Key, ProtocolVersion, RuntimeArgs,\n    RuntimeFootprint, StoredValue, StoredValueTypeMismatch, URef, U512,\n};\n\nuse crate::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt},\n};\n\n/// Transfer error.\n#[derive(Clone, Error, Debug)]\npub enum TransferError {\n    /// Invalid key variant.\n    #[error(\"Invalid key {0}\")]\n    UnexpectedKeyVariant(Key),\n    /// Type mismatch error.\n    #[error(\"{}\", _0)]\n    TypeMismatch(StoredValueTypeMismatch),\n    /// Forged reference error.\n    #[error(\"Forged reference: {}\", _0)]\n    ForgedReference(URef),\n    /// Invalid access.\n    #[error(\"Invalid access rights: {}\", required)]\n    InvalidAccess {\n        /// Required access rights of the operation.\n        required: AccessRights,\n    },\n    /// Error converting a CLValue.\n    #[error(\"{0}\")]\n    CLValue(CLValueError),\n    /// Invalid purse.\n    #[error(\"Invalid purse\")]\n    InvalidPurse,\n    /// Invalid argument.\n    #[error(\"Invalid argument\")]\n    InvalidArgument,\n    /// Missing argument.\n    #[error(\"Missing argument\")]\n    MissingArgument,\n    /// Invalid purse.\n    #[error(\"Attempt to transfer amount 0\")]\n    AttemptToTransferZero,\n    /// Invalid operation.\n    #[error(\"Invalid operation\")]\n    InvalidOperation,\n    /// Disallowed transfer attempt (private chain).\n    #[error(\"Either the source or the target must be an admin (private chain).\")]\n    RestrictedTransferAttempted,\n    /// Could not determine if target is an admin (private chain).\n    #[error(\"Unable to determine if the target of a transfer is an admin\")]\n    UnableToVerifyTargetIsAdmin,\n    /// Tracking copy error.\n    #[error(\"{0}\")]\n    TrackingCopy(TrackingCopyError),\n    /// Mint error.\n    #[error(\"{0}\")]\n    Mint(MintError),\n}\n\nimpl From<GlobalStateError> for TransferError {\n    fn from(gse: GlobalStateError) -> Self {\n        TransferError::TrackingCopy(TrackingCopyError::Storage(gse))\n    }\n}\n\nimpl From<TrackingCopyError> for TransferError {\n    fn from(tce: TrackingCopyError) -> Self {\n        TransferError::TrackingCopy(tce)\n    }\n}\n\n/// A target mode indicates if a native transfer's arguments will resolve to an existing purse, or\n/// will have to create a new account first.\n#[derive(Copy, Clone, Debug, PartialEq)]\npub enum TransferTargetMode {\n    /// Native transfer arguments resolved into a transfer to an existing account.\n    ExistingAccount {\n        /// Existing account hash.\n        target_account_hash: AccountHash,\n        /// Main purse of a resolved account.\n        main_purse: URef,\n    },\n    /// Native transfer arguments resolved into a transfer to a purse.\n    PurseExists {\n        /// Target account hash (if known).\n        target_account_hash: Option<AccountHash>,\n        /// Purse.\n        purse_uref: URef,\n    },\n    /// Native transfer arguments resolved into a transfer to a new account.\n    CreateAccount(AccountHash),\n}\n\nimpl TransferTargetMode {\n    /// Target account hash, if any.\n    pub fn target_account_hash(&self) -> Option<AccountHash> {\n        match self {\n            TransferTargetMode::PurseExists {\n                target_account_hash,\n                ..\n            } => *target_account_hash,\n            TransferTargetMode::ExistingAccount {\n                target_account_hash,\n                ..\n            } => Some(*target_account_hash),\n            TransferTargetMode::CreateAccount(target_account_hash) => Some(*target_account_hash),\n        }\n    }\n}\n\n/// Mint's transfer arguments.\n///\n/// A struct has a benefit of static typing, which is helpful while resolving the arguments.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct TransferArgs {\n    to: Option<AccountHash>,\n    source: URef,\n    target: URef,\n    amount: U512,\n    arg_id: Option<u64>,\n}\n\nimpl TransferArgs {\n    /// Creates new transfer arguments.\n    pub fn new(\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        arg_id: Option<u64>,\n    ) -> Self {\n        Self {\n            to,\n            source,\n            target,\n            amount,\n            arg_id,\n        }\n    }\n\n    /// Returns `to` field.\n    pub fn to(&self) -> Option<AccountHash> {\n        self.to\n    }\n\n    /// Returns `source` field.\n    pub fn source(&self) -> URef {\n        self.source\n    }\n\n    /// Returns `target` field.\n    pub fn target(&self) -> URef {\n        self.target\n    }\n\n    /// Returns `amount` field.\n    pub fn amount(&self) -> U512 {\n        self.amount\n    }\n\n    /// Returns `arg_id` field.\n    pub fn arg_id(&self) -> Option<u64> {\n        self.arg_id\n    }\n}\n\nimpl TryFrom<TransferArgs> for RuntimeArgs {\n    type Error = CLValueError;\n\n    fn try_from(transfer_args: TransferArgs) -> Result<Self, Self::Error> {\n        let mut runtime_args = RuntimeArgs::new();\n\n        runtime_args.insert(mint::ARG_TO, transfer_args.to)?;\n        runtime_args.insert(mint::ARG_SOURCE, transfer_args.source)?;\n        runtime_args.insert(mint::ARG_TARGET, transfer_args.target)?;\n        runtime_args.insert(mint::ARG_AMOUNT, transfer_args.amount)?;\n        runtime_args.insert(mint::ARG_ID, transfer_args.arg_id)?;\n\n        Ok(runtime_args)\n    }\n}\n\n/// State of a builder of a `TransferArgs`.\n///\n/// Purpose of this builder is to resolve native transfer args into [`TransferTargetMode`] and a\n/// [`TransferArgs`] instance to execute actual token transfer on the mint contract.\n#[derive(Clone, Debug, PartialEq, Eq)]\npub struct TransferRuntimeArgsBuilder {\n    inner: RuntimeArgs,\n}\n\nimpl TransferRuntimeArgsBuilder {\n    /// Creates new transfer args builder.\n    ///\n    /// Takes an incoming runtime args that represents native transfer's arguments.\n    pub fn new(imputed_runtime_args: RuntimeArgs) -> TransferRuntimeArgsBuilder {\n        TransferRuntimeArgsBuilder {\n            inner: imputed_runtime_args,\n        }\n    }\n\n    /// Checks if a purse exists.\n    fn purse_exists<R>(&self, uref: URef, tracking_copy: Rc<RefCell<TrackingCopy<R>>>) -> bool\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let key = match tracking_copy\n            .borrow_mut()\n            .get_purse_balance_key(uref.into())\n        {\n            Ok(key) => key,\n            Err(_) => return false,\n        };\n        tracking_copy\n            .borrow_mut()\n            .get_available_balance(key)\n            .is_ok()\n    }\n\n    /// Resolves the source purse of the transfer.\n    ///\n    /// User can optionally pass a \"source\" argument which should refer to an [`URef`] existing in\n    /// user's named keys. When the \"source\" argument is missing then user's main purse is assumed.\n    ///\n    /// Returns resolved [`URef`].\n    fn resolve_source_uref<R>(\n        &self,\n        account: &RuntimeFootprint,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n    ) -> Result<URef, TransferError>\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let imputed_runtime_args = &self.inner;\n        let arg_name = mint::ARG_SOURCE;\n        let uref = match imputed_runtime_args.get(arg_name) {\n            Some(cl_value) if *cl_value.cl_type() == CLType::URef => {\n                self.map_cl_value::<URef>(cl_value)?\n            }\n            Some(cl_value) if *cl_value.cl_type() == CLType::Option(CLType::URef.into()) => {\n                let Some(uref): Option<URef> = self.map_cl_value(cl_value)? else {\n                    return account.main_purse().ok_or(TransferError::InvalidOperation);\n                };\n                uref\n            }\n            Some(_) => return Err(TransferError::InvalidArgument),\n            None => return account.main_purse().ok_or(TransferError::InvalidOperation), /* if no source purse passed use account\n                                                                                         * main purse */\n        };\n        if account\n            .main_purse()\n            .ok_or(TransferError::InvalidOperation)?\n            .addr()\n            == uref.addr()\n        {\n            return Ok(uref);\n        }\n\n        let normalized_uref = Key::URef(uref).normalize();\n        let maybe_named_key = account\n            .named_keys()\n            .keys()\n            .find(|&named_key| named_key.normalize() == normalized_uref);\n\n        match maybe_named_key {\n            Some(Key::URef(found_uref)) => {\n                if found_uref.is_writeable() {\n                    // it is a URef and caller has access but is it a purse URef?\n                    if !self.purse_exists(found_uref.to_owned(), tracking_copy) {\n                        return Err(TransferError::InvalidPurse);\n                    }\n\n                    Ok(uref)\n                } else {\n                    Err(TransferError::InvalidAccess {\n                        required: AccessRights::WRITE,\n                    })\n                }\n            }\n            Some(key) => Err(TransferError::TypeMismatch(StoredValueTypeMismatch::new(\n                \"Key::URef\".to_string(),\n                key.type_string(),\n            ))),\n            None => Err(TransferError::ForgedReference(uref)),\n        }\n    }\n\n    /// Resolves a transfer target mode.\n    ///\n    /// User has to specify a \"target\" argument which must be one of the following types:\n    ///   * an existing purse [`URef`]\n    ///   * a 32-byte array, interpreted as an account hash\n    ///   * a [`Key::Account`], from which the account hash is extracted\n    ///   * a [`casper_types::PublicKey`], which is converted to an account hash\n    ///\n    /// If the \"target\" account hash is not existing, then a special variant is returned that\n    /// indicates that the system has to create new account first.\n    ///\n    /// Returns [`TransferTargetMode`] with a resolved variant.\n    pub fn resolve_transfer_target_mode<R>(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n    ) -> Result<TransferTargetMode, TransferError>\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let imputed_runtime_args = &self.inner;\n        let to_name = mint::ARG_TO;\n\n        let target_account_hash = match imputed_runtime_args.get(to_name) {\n            Some(cl_value)\n                if *cl_value.cl_type() == CLType::Option(Box::new(CLType::ByteArray(32))) =>\n            {\n                let to: Option<AccountHash> = self.map_cl_value(cl_value)?;\n                to\n            }\n            Some(_) | None => None,\n        };\n\n        let target_name = mint::ARG_TARGET;\n        let account_hash = match imputed_runtime_args.get(target_name) {\n            Some(cl_value) if *cl_value.cl_type() == CLType::URef => {\n                let purse_uref = self.map_cl_value(cl_value)?;\n\n                if !self.purse_exists(purse_uref, tracking_copy) {\n                    return Err(TransferError::InvalidPurse);\n                }\n\n                return Ok(TransferTargetMode::PurseExists {\n                    purse_uref,\n                    target_account_hash,\n                });\n            }\n            Some(cl_value) if *cl_value.cl_type() == CLType::ByteArray(32) => {\n                self.map_cl_value(cl_value)?\n            }\n            Some(cl_value) if *cl_value.cl_type() == CLType::Key => {\n                let account_key: Key = self.map_cl_value(cl_value)?;\n                let account_hash: AccountHash = account_key\n                    .into_account()\n                    .ok_or(TransferError::UnexpectedKeyVariant(account_key))?;\n                account_hash\n            }\n            Some(cl_value) if *cl_value.cl_type() == CLType::PublicKey => {\n                let public_key = self.map_cl_value(cl_value)?;\n                AccountHash::from(&public_key)\n            }\n            Some(_) => return Err(TransferError::InvalidArgument),\n            None => return Err(TransferError::MissingArgument),\n        };\n\n        match tracking_copy\n            .borrow_mut()\n            .runtime_footprint_by_account_hash(protocol_version, account_hash)\n        {\n            Ok((_, entity)) => {\n                let main_purse_addable = entity\n                    .main_purse()\n                    .ok_or(TransferError::InvalidPurse)?\n                    .with_access_rights(AccessRights::ADD);\n                Ok(TransferTargetMode::ExistingAccount {\n                    target_account_hash: account_hash,\n                    main_purse: main_purse_addable,\n                })\n            }\n            Err(_) => Ok(TransferTargetMode::CreateAccount(account_hash)),\n        }\n    }\n\n    /// Resolves amount.\n    ///\n    /// User has to specify \"amount\" argument that could be either a [`U512`] or a u64.\n    fn resolve_amount(&self) -> Result<U512, TransferError> {\n        let imputed_runtime_args = &self.inner;\n\n        let amount = match imputed_runtime_args.get(mint::ARG_AMOUNT) {\n            Some(amount_value) if *amount_value.cl_type() == CLType::U512 => {\n                self.map_cl_value(amount_value)?\n            }\n            Some(amount_value) if *amount_value.cl_type() == CLType::U64 => {\n                let amount: u64 = self.map_cl_value(amount_value)?;\n                U512::from(amount)\n            }\n            Some(_) => return Err(TransferError::InvalidArgument),\n            None => return Err(TransferError::MissingArgument),\n        };\n\n        if amount.is_zero() {\n            return Err(TransferError::AttemptToTransferZero);\n        }\n\n        Ok(amount)\n    }\n\n    fn resolve_id(&self) -> Result<Option<u64>, TransferError> {\n        let id: Option<u64> = if let Some(id_value) = self.inner.get(mint::ARG_ID) {\n            self.map_cl_value(id_value)?\n        } else {\n            None\n        };\n        Ok(id)\n    }\n\n    /// Creates new [`TransferArgs`] instance.\n    pub fn build<R>(\n        mut self,\n        from: &RuntimeFootprint,\n        protocol_version: ProtocolVersion,\n        tracking_copy: Rc<RefCell<TrackingCopy<R>>>,\n    ) -> Result<TransferArgs, TransferError>\n    where\n        R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n    {\n        let (to, target) = match self\n            .resolve_transfer_target_mode(protocol_version, Rc::clone(&tracking_copy))?\n        {\n            TransferTargetMode::ExistingAccount {\n                main_purse: purse_uref,\n                target_account_hash: target_account,\n            } => (Some(target_account), purse_uref),\n            TransferTargetMode::PurseExists {\n                target_account_hash,\n                purse_uref,\n            } => (target_account_hash, purse_uref),\n            TransferTargetMode::CreateAccount(_) => {\n                // Method \"build()\" is called after `resolve_transfer_target_mode` is first called\n                // and handled by creating a new account. Calling `resolve_transfer_target_mode`\n                // for the second time should never return `CreateAccount` variant.\n                return Err(TransferError::InvalidOperation);\n            }\n        };\n\n        let source = self.resolve_source_uref(from, Rc::clone(&tracking_copy))?;\n\n        if source.addr() == target.addr() {\n            return Err(TransferError::InvalidPurse);\n        }\n\n        let amount = self.resolve_amount()?;\n\n        let arg_id = self.resolve_id()?;\n\n        Ok(TransferArgs {\n            to,\n            source,\n            target,\n            amount,\n            arg_id,\n        })\n    }\n\n    fn map_cl_value<T: CLTyped + FromBytes>(&self, cl_value: &CLValue) -> Result<T, TransferError> {\n        cl_value.clone().into_t().map_err(TransferError::CLValue)\n    }\n}\n"
  },
  {
    "path": "storage/src/system.rs",
    "content": "/// Auction logic.\npub mod auction;\n/// Burn logic.\npub mod burn;\n/// Error definition.\npub mod error;\n/// Genesis logic.\npub mod genesis;\n/// Handle payment logic.\npub mod handle_payment;\n/// Mint logic.\npub mod mint;\n/// Protocol upgrade logic.\npub mod protocol_upgrade;\n/// Runtime native logic.\npub mod runtime_native;\n/// Standard payment logic.\npub mod standard_payment;\n/// Transfer logic.\npub mod transfer;\n"
  },
  {
    "path": "storage/src/tracking_copy/byte_size.rs",
    "content": "use casper_types::{account::Account, bytesrepr::ToBytes, ByteCode, Key, StoredValue};\n\n/// Returns byte size of the element - both heap size and stack size.\npub trait ByteSize {\n    fn byte_size(&self) -> usize;\n}\n\nimpl ByteSize for Key {\n    fn byte_size(&self) -> usize {\n        size_of::<Self>() + self.heap_size()\n    }\n}\n\nimpl ByteSize for String {\n    fn byte_size(&self) -> usize {\n        size_of::<Self>() + self.heap_size()\n    }\n}\n\nimpl ByteSize for StoredValue {\n    fn byte_size(&self) -> usize {\n        size_of::<Self>()\n            + match self {\n                StoredValue::CLValue(cl_value) => cl_value.serialized_length(),\n                StoredValue::Account(account) => account.serialized_length(),\n                StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(),\n                StoredValue::ContractPackage(contract_package) => {\n                    contract_package.serialized_length()\n                }\n                StoredValue::Contract(contract) => contract.serialized_length(),\n                StoredValue::AddressableEntity(contract_header) => {\n                    contract_header.serialized_length()\n                }\n                StoredValue::SmartContract(package) => package.serialized_length(),\n                StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(),\n                StoredValue::Transfer(transfer_v1) => transfer_v1.serialized_length(),\n                StoredValue::EraInfo(era_info) => era_info.serialized_length(),\n                StoredValue::Bid(bid) => bid.serialized_length(),\n                StoredValue::BidKind(bid_kind) => bid_kind.serialized_length(),\n                StoredValue::Withdraw(withdraw_purses) => withdraw_purses.serialized_length(),\n                StoredValue::Unbonding(unbonding_purses) => unbonding_purses.serialized_length(),\n                StoredValue::ByteCode(byte_code) => byte_code.serialized_length(),\n                StoredValue::MessageTopic(message_topic_summary) => {\n                    message_topic_summary.serialized_length()\n                }\n                StoredValue::Message(message_summary) => message_summary.serialized_length(),\n                StoredValue::NamedKey(named_key) => named_key.serialized_length(),\n                StoredValue::Prepayment(prepayment_kind) => prepayment_kind.serialized_length(),\n                StoredValue::EntryPoint(entry_point) => entry_point.serialized_length(),\n                StoredValue::RawBytes(raw_bytes) => raw_bytes.serialized_length(),\n            }\n    }\n}\n\n/// Returns heap size of the value.\n/// Note it's different from [ByteSize] that returns both heap and stack size.\npub trait HeapSizeOf {\n    fn heap_size(&self) -> usize;\n}\n\nimpl HeapSizeOf for Key {\n    fn heap_size(&self) -> usize {\n        0\n    }\n}\n\n// TODO: contract has other fields (re a bunch) that are not repr here...on purpose?\nimpl HeapSizeOf for Account {\n    fn heap_size(&self) -> usize {\n        // NOTE: We're ignoring size of the tree's nodes.\n        self.named_keys()\n            .iter()\n            .fold(0, |sum, (k, v)| sum + k.heap_size() + v.heap_size())\n    }\n}\n\n// TODO: contract has other fields (re protocol version) that are not repr here...on purpose?\nimpl HeapSizeOf for ByteCode {\n    fn heap_size(&self) -> usize {\n        self.bytes().len()\n    }\n}\n\nimpl<T: HeapSizeOf> ByteSize for [T] {\n    fn byte_size(&self) -> usize {\n        self.iter()\n            .fold(0, |sum, el| sum + size_of::<T>() + el.heap_size())\n    }\n}\n\nimpl HeapSizeOf for String {\n    fn heap_size(&self) -> usize {\n        self.capacity()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::ByteSize;\n\n    fn assert_byte_size<T: ByteSize>(el: T, expected: usize) {\n        assert_eq!(el.byte_size(), expected)\n    }\n\n    #[test]\n    fn byte_size_of_string() {\n        assert_byte_size(\"Hello\".to_owned(), 5 + size_of::<String>())\n    }\n}\n"
  },
  {
    "path": "storage/src/tracking_copy/error.rs",
    "content": "use thiserror::Error;\n\nuse crate::data_access_layer::balance::BalanceFailure;\nuse casper_types::{\n    account::{AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure},\n    bytesrepr, system, ApiError, CLType, CLValueError, Key, StoredValueTypeMismatch,\n};\n\n/// Possible tracking copy errors.\n#[derive(Error, Debug, Clone)]\n#[non_exhaustive]\npub enum Error {\n    /// Storage error.\n    #[error(\"Storage error: {}\", _0)]\n    Storage(crate::global_state::error::Error),\n    /// Failed to (de)serialize bytes.\n    #[error(\"Serialization error: {}\", _0)]\n    BytesRepr(bytesrepr::Error),\n    /// Unable to find named key.\n    #[error(\"Named key {} not found\", _0)]\n    NamedKeyNotFound(String),\n    /// Unable to find a key.\n    #[error(\"Key {} not found\", _0)]\n    KeyNotFound(Key),\n    /// Unable to find an account.\n    #[error(\"Account {:?} not found\", _0)]\n    AccountNotFound(Key),\n    /// Type mismatch error.\n    #[error(\"{}\", _0)]\n    TypeMismatch(StoredValueTypeMismatch),\n    /// ApiError.\n    #[error(\"{}\", _0)]\n    Api(ApiError),\n    /// Error adding an associated key.\n    #[error(\"{}\", _0)]\n    AddKeyFailure(AddKeyFailure),\n    /// Error removing an associated key.\n    #[error(\"{}\", _0)]\n    RemoveKeyFailure(RemoveKeyFailure),\n    /// Error updating an associated key.\n    #[error(\"{}\", _0)]\n    UpdateKeyFailure(UpdateKeyFailure),\n    /// Error setting threshold on associated key.\n    #[error(\"{}\", _0)]\n    SetThresholdFailure(SetThresholdFailure),\n    /// Error executing system contract.\n    #[error(\"{}\", _0)]\n    SystemContract(system::Error),\n    /// Weight of all used associated keys does not meet account's deploy threshold.\n    #[error(\"Deployment authorization failure\")]\n    DeploymentAuthorizationFailure,\n    /// Error converting a CLValue.\n    #[error(\"{0}\")]\n    CLValue(CLValueError),\n    /// Unexpected variant of a stored value.\n    #[error(\"Unexpected variant of a stored value\")]\n    UnexpectedStoredValueVariant,\n    /// Missing system contract hash.\n    #[error(\"Missing system contract hash: {0}\")]\n    MissingSystemContractHash(String),\n    /// Invalid key\n    #[error(\"Invalid key {0}\")]\n    UnexpectedKeyVariant(Key),\n    /// Circular reference error.\n    #[error(\"Query attempted a circular reference: {0}\")]\n    CircularReference(String),\n    /// Depth limit reached.\n    #[error(\"Query exceeded depth limit: {depth}\")]\n    QueryDepthLimit {\n        /// Current depth limit.\n        depth: u64,\n    },\n    /// Missing bid.\n    #[error(\"Missing bid: {0}\")]\n    MissingBid(Key),\n    /// Not authorized.\n    #[error(\"Authorization error\")]\n    Authorization,\n    /// The value wasn't found.\n    #[error(\"Value not found\")]\n    ValueNotFound(String),\n    /// Balance calculation failure.\n    #[error(\"Balance calculation failure\")]\n    Balance(BalanceFailure),\n    /// Unable to find a contract.\n    #[error(\"Contract {:?} not found\", _0)]\n    ContractNotFound(Key),\n    #[error(\"flag\")]\n    /// Attempted to fetch an entity or an associated record\n    AddressableEntityDisable,\n}\n\nimpl Error {\n    /// Returns new type mismatch error.\n    pub fn type_mismatch(expected: CLType, found: CLType) -> Error {\n        Error::TypeMismatch(StoredValueTypeMismatch::new(\n            format!(\"{:?}\", expected),\n            format!(\"{:?}\", found),\n        ))\n    }\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(e: bytesrepr::Error) -> Self {\n        Error::BytesRepr(e)\n    }\n}\n\nimpl From<AddKeyFailure> for Error {\n    fn from(err: AddKeyFailure) -> Self {\n        Error::AddKeyFailure(err)\n    }\n}\n\nimpl From<RemoveKeyFailure> for Error {\n    fn from(err: RemoveKeyFailure) -> Self {\n        Error::RemoveKeyFailure(err)\n    }\n}\n\nimpl From<UpdateKeyFailure> for Error {\n    fn from(err: UpdateKeyFailure) -> Self {\n        Error::UpdateKeyFailure(err)\n    }\n}\n\nimpl From<SetThresholdFailure> for Error {\n    fn from(err: SetThresholdFailure) -> Self {\n        Error::SetThresholdFailure(err)\n    }\n}\n\nimpl From<CLValueError> for Error {\n    fn from(e: CLValueError) -> Self {\n        Error::CLValue(e)\n    }\n}\n\nimpl From<crate::global_state::error::Error> for Error {\n    fn from(gse: crate::global_state::error::Error) -> Self {\n        Error::Storage(gse)\n    }\n}\n"
  },
  {
    "path": "storage/src/tracking_copy/ext.rs",
    "content": "use std::{\n    collections::{btree_map::Entry, BTreeMap},\n    convert::TryInto,\n};\nuse tracing::{error, warn};\n\nuse crate::{\n    data_access_layer::balance::{\n        AvailableBalanceChecker, BalanceHolds, BalanceHoldsWithProof, ProcessingHoldBalanceHandling,\n    },\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyError},\n    KeyPrefix,\n};\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::MessageTopics,\n    bytesrepr::ToBytes,\n    contract_messages::TopicNameHash,\n    contracts::{ContractHash, NamedKeys},\n    global_state::TrieMerkleProof,\n    system::{\n        mint::{\n            BalanceHoldAddr, BalanceHoldAddrTag, MINT_GAS_HOLD_HANDLING_KEY,\n            MINT_GAS_HOLD_INTERVAL_KEY,\n        },\n        MINT,\n    },\n    BlockGlobalAddr, BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, CLValue, ChecksumRegistry,\n    Contract, EntityAddr, EntryPoints, HashAddr, HoldBalanceHandling, HoldsEpoch, Key, Motes,\n    Package, StoredValue, StoredValueTypeMismatch, SystemHashRegistry, URef, URefAddr, U512,\n};\n\n/// Higher-level operations on the state via a `TrackingCopy`.\npub trait TrackingCopyExt<R> {\n    /// The type for the returned errors.\n    type Error;\n\n    /// Reads the entity key for a given account hash.\n    fn read_account_key(&mut self, account_hash: AccountHash) -> Result<Key, Self::Error>;\n\n    /// Returns block time associated with checked out root hash.\n    fn get_block_time(&self) -> Result<Option<BlockTime>, Self::Error>;\n\n    /// Returns balance hold configuration settings for imputed kind of balance hold.\n    fn get_balance_hold_config(\n        &self,\n        hold_kind: BalanceHoldAddrTag,\n    ) -> Result<Option<(BlockTime, HoldBalanceHandling, u64)>, Self::Error>;\n\n    /// Gets the purse balance key for a given purse.\n    fn get_purse_balance_key(&self, purse_key: Key) -> Result<Key, Self::Error>;\n\n    /// Gets the balance hold keys for the imputed purse (if any).\n    fn get_balance_hold_addresses(\n        &self,\n        purse_addr: URefAddr,\n    ) -> Result<Vec<BalanceHoldAddr>, Self::Error>;\n\n    /// Returns total balance.\n    fn get_total_balance(&self, key: Key) -> Result<Motes, Self::Error>;\n\n    /// Returns the available balance, considering any holds from holds_epoch to now.\n    fn get_available_balance(&mut self, balance_key: Key) -> Result<Motes, Self::Error>;\n\n    /// Gets the purse balance key for a given purse and provides a Merkle proof.\n    fn get_purse_balance_key_with_proof(\n        &self,\n        purse_key: Key,\n    ) -> Result<(Key, TrieMerkleProof<Key, StoredValue>), Self::Error>;\n\n    /// Gets the balance at a given balance key and provides a Merkle proof.\n    fn get_total_balance_with_proof(\n        &self,\n        balance_key: Key,\n    ) -> Result<(U512, TrieMerkleProof<Key, StoredValue>), Self::Error>;\n\n    /// Clear expired balance holds.\n    fn clear_expired_balance_holds(\n        &mut self,\n        purse_addr: URefAddr,\n        filter: Vec<(BalanceHoldAddrTag, HoldsEpoch)>,\n    ) -> Result<(), Self::Error>;\n\n    /// Gets the balance holds for a given balance, without Merkle proofs.\n    fn get_balance_holds(\n        &mut self,\n        purse_addr: URefAddr,\n        block_time: BlockTime,\n        interval: u64,\n    ) -> Result<BTreeMap<BlockTime, BalanceHolds>, Self::Error>;\n\n    /// Gets the balance holds for a given balance, with Merkle proofs.\n    fn get_balance_holds_with_proof(\n        &self,\n        purse_addr: URefAddr,\n    ) -> Result<BTreeMap<BlockTime, BalanceHoldsWithProof>, Self::Error>;\n\n    /// Returns the collection of message topics (if any) for a given HashAddr.\n    fn get_message_topics(&self, entity_addr: EntityAddr) -> Result<MessageTopics, Self::Error>;\n\n    /// Returns the collection of named keys for a given AddressableEntity.\n    fn get_named_keys(&self, entity_addr: EntityAddr) -> Result<NamedKeys, Self::Error>;\n\n    /// Returns the collection of entry points for a given AddresableEntity.\n    fn get_v1_entry_points(&self, entity_addr: EntityAddr) -> Result<EntryPoints, Self::Error>;\n\n    /// Gets a package by hash.\n    fn get_package(&mut self, package_hash: HashAddr) -> Result<Package, Self::Error>;\n\n    /// Get a Contract record.\n    fn get_contract(&mut self, contract_hash: ContractHash) -> Result<Contract, Self::Error>;\n\n    /// Gets the system entity registry.\n    fn get_system_entity_registry(&self) -> Result<SystemHashRegistry, Self::Error>;\n\n    /// Gets the system checksum registry.\n    fn get_checksum_registry(&mut self) -> Result<Option<ChecksumRegistry>, Self::Error>;\n\n    /// Gets byte code by hash.\n    fn get_byte_code(&mut self, byte_code_hash: ByteCodeHash) -> Result<ByteCode, Self::Error>;\n}\n\nimpl<R> TrackingCopyExt<R> for TrackingCopy<R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    type Error = TrackingCopyError;\n\n    fn read_account_key(&mut self, account_hash: AccountHash) -> Result<Key, Self::Error> {\n        let account_key = Key::Account(account_hash);\n        match self.read(&account_key)? {\n            Some(StoredValue::CLValue(cl_value)) => Ok(CLValue::into_t(cl_value)?),\n            Some(other) => Err(TrackingCopyError::TypeMismatch(\n                StoredValueTypeMismatch::new(\"Account\".to_string(), other.type_name()),\n            )),\n            None => Err(TrackingCopyError::KeyNotFound(account_key)),\n        }\n    }\n\n    fn get_block_time(&self) -> Result<Option<BlockTime>, Self::Error> {\n        match self.read(&Key::BlockGlobal(BlockGlobalAddr::BlockTime))? {\n            None => Ok(None),\n            Some(StoredValue::CLValue(cl_value)) => {\n                let block_time = cl_value.into_t().map_err(Self::Error::CLValue)?;\n                Ok(Some(BlockTime::new(block_time)))\n            }\n            Some(unexpected) => {\n                warn!(?unexpected, \"block time stored as unexpected value type\");\n                Err(Self::Error::UnexpectedStoredValueVariant)\n            }\n        }\n    }\n\n    fn get_balance_hold_config(\n        &self,\n        hold_kind: BalanceHoldAddrTag,\n    ) -> Result<Option<(BlockTime, HoldBalanceHandling, u64)>, Self::Error> {\n        let block_time = match self.get_block_time()? {\n            None => return Ok(None),\n            Some(block_time) => block_time,\n        };\n        let (handling_key, interval_key) = match hold_kind {\n            BalanceHoldAddrTag::Processing => {\n                return Ok(Some((block_time, HoldBalanceHandling::Accrued, 0)));\n            }\n            BalanceHoldAddrTag::Gas => (MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY),\n        };\n\n        let system_contract_registry = self.get_system_entity_registry()?;\n\n        let entity_hash = *system_contract_registry.get(MINT).ok_or_else(|| {\n            error!(\"Missing system mint contract hash\");\n            TrackingCopyError::MissingSystemContractHash(MINT.to_string())\n        })?;\n\n        let named_keys = self.get_named_keys(EntityAddr::System(entity_hash))?;\n\n        // get the handling\n        let handling = {\n            let named_key =\n                named_keys\n                    .get(handling_key)\n                    .ok_or(TrackingCopyError::NamedKeyNotFound(\n                        handling_key.to_string(),\n                    ))?;\n            let _uref = named_key\n                .as_uref()\n                .ok_or(TrackingCopyError::UnexpectedKeyVariant(*named_key))?;\n\n            match self.read(&named_key.normalize()) {\n                Ok(Some(StoredValue::CLValue(cl_value))) => {\n                    let handling_tag = cl_value.into_t().map_err(TrackingCopyError::CLValue)?;\n                    HoldBalanceHandling::from_tag(handling_tag).map_err(|_| {\n                        TrackingCopyError::ValueNotFound(\n                            \"No hold balance handling variant matches stored tag\".to_string(),\n                        )\n                    })?\n                }\n                Ok(Some(unexpected)) => {\n                    warn!(\n                        ?unexpected,\n                        \"hold balance handling unexpected stored value variant\"\n                    );\n                    return Err(TrackingCopyError::UnexpectedStoredValueVariant);\n                }\n                Ok(None) => {\n                    error!(\"hold balance handling missing from gs\");\n                    return Err(TrackingCopyError::ValueNotFound(handling_key.to_string()));\n                }\n                Err(gse) => {\n                    error!(?gse, \"hold balance handling read error\");\n                    return Err(TrackingCopyError::Storage(gse));\n                }\n            }\n        };\n\n        // get the interval.\n        let interval = {\n            let named_key =\n                named_keys\n                    .get(interval_key)\n                    .ok_or(TrackingCopyError::NamedKeyNotFound(\n                        interval_key.to_string(),\n                    ))?;\n            let _uref = named_key\n                .as_uref()\n                .ok_or(TrackingCopyError::UnexpectedKeyVariant(*named_key))?;\n\n            match self.read(&named_key.normalize()) {\n                Ok(Some(StoredValue::CLValue(cl_value))) => {\n                    cl_value.into_t().map_err(TrackingCopyError::CLValue)?\n                }\n                Ok(Some(unexpected)) => {\n                    warn!(\n                        ?unexpected,\n                        \"hold balance interval unexpected stored value variant\"\n                    );\n                    return Err(TrackingCopyError::UnexpectedStoredValueVariant);\n                }\n                Ok(None) => {\n                    error!(\"hold balance interval missing from gs\");\n                    return Err(TrackingCopyError::ValueNotFound(handling_key.to_string()));\n                }\n                Err(gse) => return Err(TrackingCopyError::Storage(gse)),\n            }\n        };\n\n        Ok(Some((block_time, handling, interval)))\n    }\n\n    fn get_purse_balance_key(&self, purse_key: Key) -> Result<Key, Self::Error> {\n        let balance_key: URef = purse_key\n            .into_uref()\n            .ok_or(TrackingCopyError::UnexpectedKeyVariant(purse_key))?;\n        Ok(Key::Balance(balance_key.addr()))\n    }\n\n    fn get_balance_hold_addresses(\n        &self,\n        purse_addr: URefAddr,\n    ) -> Result<Vec<BalanceHoldAddr>, Self::Error> {\n        let tagged_keys = {\n            let mut ret: Vec<BalanceHoldAddr> = vec![];\n            let gas_prefix = KeyPrefix::GasBalanceHoldsByPurse(purse_addr).to_bytes()?;\n            for key in self.keys_with_prefix(&gas_prefix)? {\n                let addr = key\n                    .as_balance_hold()\n                    .ok_or(Self::Error::UnexpectedKeyVariant(key))?;\n                ret.push(*addr);\n            }\n            let processing_prefix =\n                KeyPrefix::ProcessingBalanceHoldsByPurse(purse_addr).to_bytes()?;\n            for key in self.keys_with_prefix(&processing_prefix)? {\n                let addr = key\n                    .as_balance_hold()\n                    .ok_or(Self::Error::UnexpectedKeyVariant(key))?;\n                ret.push(*addr);\n            }\n            ret\n        };\n        Ok(tagged_keys)\n    }\n\n    fn get_total_balance(&self, key: Key) -> Result<Motes, Self::Error> {\n        let key = {\n            if let Key::URef(uref) = key {\n                Key::Balance(uref.addr())\n            } else {\n                key\n            }\n        };\n        if let Key::Balance(_) = key {\n            let stored_value: StoredValue = self\n                .read(&key)?\n                .ok_or(TrackingCopyError::KeyNotFound(key))?;\n            let cl_value: CLValue = stored_value\n                .try_into()\n                .map_err(TrackingCopyError::TypeMismatch)?;\n            let total_balance = cl_value.into_t::<U512>()?;\n            Ok(Motes::new(total_balance))\n        } else {\n            Err(Self::Error::UnexpectedKeyVariant(key))\n        }\n    }\n\n    fn get_available_balance(&mut self, key: Key) -> Result<Motes, Self::Error> {\n        let purse_addr = {\n            if let Key::URef(uref) = key {\n                uref.addr()\n            } else if let Key::Balance(uref_addr) = key {\n                uref_addr\n            } else {\n                return Err(Self::Error::UnexpectedKeyVariant(key));\n            }\n        };\n\n        let total_balance = self.get_total_balance(Key::Balance(purse_addr))?.value();\n        let (block_time, handling, interval) =\n            match self.get_balance_hold_config(BalanceHoldAddrTag::Gas)? {\n                None => {\n                    // if there is no hold config at this root hash, holds are not a thing\n                    // and available balance = total balance\n                    return Ok(Motes::new(total_balance));\n                }\n                Some((block_time, handling, interval)) => (block_time, handling, interval),\n            };\n\n        let balance_holds = self.get_balance_holds(purse_addr, block_time, interval)?;\n        let gas_handling = (handling, interval).into();\n        let processing_handling = ProcessingHoldBalanceHandling::new();\n        match balance_holds.available_balance(\n            block_time,\n            total_balance,\n            gas_handling,\n            processing_handling,\n        ) {\n            Ok(balance) => Ok(Motes::new(balance)),\n            Err(balance_error) => Err(Self::Error::Balance(balance_error)),\n        }\n    }\n\n    fn get_purse_balance_key_with_proof(\n        &self,\n        purse_key: Key,\n    ) -> Result<(Key, TrieMerkleProof<Key, StoredValue>), Self::Error> {\n        let balance_key: Key = purse_key\n            .uref_to_hash()\n            .ok_or(TrackingCopyError::UnexpectedKeyVariant(purse_key))?;\n        let proof: TrieMerkleProof<Key, StoredValue> = self\n            .read_with_proof(&balance_key)?\n            .ok_or(TrackingCopyError::KeyNotFound(purse_key))?;\n        let stored_value_ref: &StoredValue = proof.value();\n        let cl_value: CLValue = stored_value_ref\n            .to_owned()\n            .try_into()\n            .map_err(TrackingCopyError::TypeMismatch)?;\n        let balance_key: Key = cl_value.into_t()?;\n        Ok((balance_key, proof))\n    }\n\n    fn get_total_balance_with_proof(\n        &self,\n        key: Key,\n    ) -> Result<(U512, TrieMerkleProof<Key, StoredValue>), Self::Error> {\n        let key = {\n            if let Key::URef(uref) = key {\n                Key::Balance(uref.addr())\n            } else {\n                key\n            }\n        };\n        if let Key::Balance(_) = key {\n            let proof: TrieMerkleProof<Key, StoredValue> = self\n                .read_with_proof(&key.normalize())?\n                .ok_or(TrackingCopyError::KeyNotFound(key))?;\n            let cl_value: CLValue = proof\n                .value()\n                .to_owned()\n                .try_into()\n                .map_err(TrackingCopyError::TypeMismatch)?;\n            let balance = cl_value.into_t()?;\n            Ok((balance, proof))\n        } else {\n            Err(Self::Error::UnexpectedKeyVariant(key))\n        }\n    }\n\n    fn clear_expired_balance_holds(\n        &mut self,\n        purse_addr: URefAddr,\n        filter: Vec<(BalanceHoldAddrTag, HoldsEpoch)>,\n    ) -> Result<(), Self::Error> {\n        for (tag, holds_epoch) in filter {\n            let prefix = match tag {\n                BalanceHoldAddrTag::Gas => KeyPrefix::GasBalanceHoldsByPurse(purse_addr),\n                BalanceHoldAddrTag::Processing => {\n                    KeyPrefix::ProcessingBalanceHoldsByPurse(purse_addr)\n                }\n            };\n            let immut: &_ = self;\n            let hold_keys = immut.keys_with_prefix(&prefix.to_bytes()?)?;\n            for hold_key in hold_keys {\n                let balance_hold_addr = hold_key\n                    .as_balance_hold()\n                    .ok_or(Self::Error::UnexpectedKeyVariant(hold_key))?;\n                let hold_block_time = balance_hold_addr.block_time();\n                if let Some(earliest_relevant_timestamp) = holds_epoch.value() {\n                    if hold_block_time.value() > earliest_relevant_timestamp {\n                        // skip still relevant holds\n                        //  the expectation is that holds are cleared after balance checks,\n                        //  and before payment settlement; if that ordering changes in the\n                        //  future this strategy should be reevaluated to determine if it\n                        //  remains correct.\n                        continue;\n                    }\n                }\n                // prune away holds with a timestamp newer than epoch timestamp\n                //  including holds with a timestamp == epoch timestamp\n                self.prune(hold_key)\n            }\n        }\n        Ok(())\n    }\n\n    fn get_balance_holds(\n        &mut self,\n        purse_addr: URefAddr,\n        block_time: BlockTime,\n        interval: u64,\n    ) -> Result<BTreeMap<BlockTime, BalanceHolds>, Self::Error> {\n        // NOTE: currently there are two kinds of holds, gas and processing.\n        // Processing holds only effect one block to prevent double spend and are always\n        // cleared at the end of processing each transaction. Gas holds persist for some\n        // interval, over many blocks and eras. Thus, using the holds_epoch for gas holds\n        // during transaction execution also picks up processing holds and call sites of\n        // this method currently pass the holds epoch for gas holds. This works fine for\n        // now, but if one or more other kinds of holds with differing periods are added\n        // in the future, this logic will need to be tweaked to take get the holds epoch\n        // for each hold kind and process each kind discretely in order and collate the\n        // non-expired hold total at the end.\n        let mut ret: BTreeMap<BlockTime, BalanceHolds> = BTreeMap::new();\n        let holds_epoch = { HoldsEpoch::from_millis(block_time.value(), interval) };\n        let holds = self.get_balance_hold_addresses(purse_addr)?;\n        for balance_hold_addr in holds {\n            let block_time = balance_hold_addr.block_time();\n            if let Some(timestamp) = holds_epoch.value() {\n                if block_time.value() < timestamp {\n                    // skip holds older than the interval\n                    //  don't skip holds with a timestamp >= epoch timestamp\n                    continue;\n                }\n            }\n            let hold_key: Key = balance_hold_addr.into();\n            let hold_amount = match self.read(&hold_key) {\n                Ok(Some(StoredValue::CLValue(cl_value))) => match cl_value.into_t::<U512>() {\n                    Ok(val) => val,\n                    Err(cve) => return Err(Self::Error::CLValue(cve)),\n                },\n                Ok(Some(_)) => return Err(Self::Error::UnexpectedStoredValueVariant),\n                Ok(None) => return Err(Self::Error::KeyNotFound(hold_key)),\n                Err(tce) => return Err(tce),\n            };\n            match ret.entry(block_time) {\n                Entry::Vacant(entry) => {\n                    let mut inner = BTreeMap::new();\n                    inner.insert(balance_hold_addr.tag(), hold_amount);\n                    entry.insert(inner);\n                }\n                Entry::Occupied(mut occupied_entry) => {\n                    let inner = occupied_entry.get_mut();\n                    match inner.entry(balance_hold_addr.tag()) {\n                        Entry::Vacant(entry) => {\n                            entry.insert(hold_amount);\n                        }\n                        Entry::Occupied(_) => {\n                            unreachable!(\n                                \"there should be only one entry per (block_time, hold kind)\"\n                            );\n                        }\n                    }\n                }\n            }\n        }\n        Ok(ret)\n    }\n\n    fn get_balance_holds_with_proof(\n        &self,\n        purse_addr: URefAddr,\n    ) -> Result<BTreeMap<BlockTime, BalanceHoldsWithProof>, Self::Error> {\n        // NOTE: currently there are two kinds of holds, gas and processing.\n        // Processing holds only effect one block to prevent double spend and are always\n        // cleared at the end of processing each transaction. Gas holds persist for some\n        // interval, over many blocks and eras. Thus, using the holds_epoch for gas holds\n        // during transaction execution also picks up processing holds and call sites of\n        // this method currently pass the holds epoch for gas holds. This works fine for\n        // now, but if one or more other kinds of holds with differing periods are added\n        // in the future, this logic will need to be tweaked to take get the holds epoch\n        // for each hold kind and process each kind discretely in order and collate the\n        // non-expired hold total at the end.\n        let mut ret: BTreeMap<BlockTime, BalanceHoldsWithProof> = BTreeMap::new();\n        let (block_time, interval) = match self.get_balance_hold_config(BalanceHoldAddrTag::Gas)? {\n            Some((block_time, _, interval)) => (block_time.value(), interval),\n            None => {\n                // if there is no holds config at this root hash, there can't be any holds\n                return Ok(ret);\n            }\n        };\n        let holds_epoch = { HoldsEpoch::from_millis(block_time, interval) };\n        let holds = self.get_balance_hold_addresses(purse_addr)?;\n        for balance_hold_addr in holds {\n            let block_time = balance_hold_addr.block_time();\n            if let Some(timestamp) = holds_epoch.value() {\n                if block_time.value() < timestamp {\n                    // skip holds older than the interval\n                    //  don't skip holds with a timestamp >= epoch timestamp\n                    continue;\n                }\n            }\n            let hold_key: Key = balance_hold_addr.into();\n            let proof: TrieMerkleProof<Key, StoredValue> = self\n                .read_with_proof(&hold_key.normalize())?\n                .ok_or(TrackingCopyError::KeyNotFound(hold_key))?;\n            let cl_value: CLValue = proof\n                .value()\n                .to_owned()\n                .try_into()\n                .map_err(TrackingCopyError::TypeMismatch)?;\n            let hold_amount = cl_value.into_t()?;\n            match ret.entry(block_time) {\n                Entry::Vacant(entry) => {\n                    let mut inner = BTreeMap::new();\n                    inner.insert(balance_hold_addr.tag(), (hold_amount, proof));\n                    entry.insert(inner);\n                }\n                Entry::Occupied(mut occupied_entry) => {\n                    let inner = occupied_entry.get_mut();\n                    match inner.entry(balance_hold_addr.tag()) {\n                        Entry::Vacant(entry) => {\n                            entry.insert((hold_amount, proof));\n                        }\n                        Entry::Occupied(_) => {\n                            unreachable!(\n                                \"there should be only one entry per (block_time, hold kind)\"\n                            );\n                        }\n                    }\n                }\n            }\n        }\n        Ok(ret)\n    }\n\n    fn get_message_topics(&self, hash_addr: EntityAddr) -> Result<MessageTopics, Self::Error> {\n        let keys = self.get_keys_by_prefix(&KeyPrefix::MessageEntriesByEntity(hash_addr))?;\n\n        let mut topics: BTreeMap<String, TopicNameHash> = BTreeMap::new();\n\n        for entry_key in &keys {\n            if let Some(topic_name_hash) = entry_key.as_message_topic_name_hash() {\n                match self.read(entry_key)?.as_ref() {\n                    Some(StoredValue::Message(_)) => {\n                        continue;\n                    }\n                    Some(StoredValue::MessageTopic(summary)) => {\n                        topics.insert(summary.topic_name().to_owned(), topic_name_hash);\n                    }\n                    Some(other) => {\n                        return Err(TrackingCopyError::TypeMismatch(\n                            StoredValueTypeMismatch::new(\n                                \"MessageTopic\".to_string(),\n                                other.type_name(),\n                            ),\n                        ));\n                    }\n                    None => match self.cache.reads_cached.get(entry_key) {\n                        Some(StoredValue::Message(_)) => {\n                            continue;\n                        }\n                        Some(StoredValue::MessageTopic(summary)) => {\n                            topics.insert(summary.topic_name().to_owned(), topic_name_hash);\n                        }\n                        Some(_) | None => {\n                            return Err(TrackingCopyError::KeyNotFound(*entry_key));\n                        }\n                    },\n                };\n            }\n        }\n\n        Ok(MessageTopics::from(topics))\n    }\n\n    fn get_named_keys(&self, entity_addr: EntityAddr) -> Result<NamedKeys, Self::Error> {\n        Ok(self\n            .runtime_footprint_by_entity_addr(entity_addr)?\n            .take_named_keys())\n    }\n\n    fn get_v1_entry_points(&self, entity_addr: EntityAddr) -> Result<EntryPoints, Self::Error> {\n        Ok(self\n            .runtime_footprint_by_entity_addr(entity_addr)?\n            .entry_points()\n            .clone())\n    }\n\n    fn get_package(&mut self, hash_addr: HashAddr) -> Result<Package, Self::Error> {\n        let key = Key::Hash(hash_addr);\n        match self.read(&key)? {\n            Some(StoredValue::ContractPackage(contract_package)) => Ok(contract_package.into()),\n            Some(_) | None => match self.read(&Key::SmartContract(hash_addr))? {\n                Some(StoredValue::SmartContract(package)) => Ok(package),\n                Some(other) => Err(TrackingCopyError::TypeMismatch(\n                    StoredValueTypeMismatch::new(\n                        \"Package or CLValue\".to_string(),\n                        other.type_name(),\n                    ),\n                )),\n                None => Err(Self::Error::ValueNotFound(key.to_formatted_string())),\n            },\n        }\n    }\n\n    fn get_contract(&mut self, contract_hash: ContractHash) -> Result<Contract, Self::Error> {\n        let key = Key::Hash(contract_hash.value());\n        match self.read(&key)? {\n            Some(StoredValue::Contract(contract)) => Ok(contract),\n            Some(other) => Err(Self::Error::TypeMismatch(StoredValueTypeMismatch::new(\n                \"Contract\".to_string(),\n                other.type_name(),\n            ))),\n            None => Err(Self::Error::ValueNotFound(key.to_formatted_string())),\n        }\n    }\n\n    fn get_system_entity_registry(&self) -> Result<SystemHashRegistry, Self::Error> {\n        match self.read(&Key::SystemEntityRegistry)? {\n            Some(StoredValue::CLValue(registry)) => {\n                let registry: SystemHashRegistry =\n                    CLValue::into_t(registry).map_err(Self::Error::from)?;\n                Ok(registry)\n            }\n            Some(other) => Err(TrackingCopyError::TypeMismatch(\n                StoredValueTypeMismatch::new(\"CLValue\".to_string(), other.type_name()),\n            )),\n            None => Err(TrackingCopyError::KeyNotFound(Key::SystemEntityRegistry)),\n        }\n    }\n\n    fn get_checksum_registry(&mut self) -> Result<Option<ChecksumRegistry>, Self::Error> {\n        match self.get(&Key::ChecksumRegistry)? {\n            Some(StoredValue::CLValue(registry)) => {\n                let registry: ChecksumRegistry =\n                    CLValue::into_t(registry).map_err(Self::Error::from)?;\n                Ok(Some(registry))\n            }\n            Some(other) => Err(TrackingCopyError::TypeMismatch(\n                StoredValueTypeMismatch::new(\"CLValue\".to_string(), other.type_name()),\n            )),\n            None => Ok(None),\n        }\n    }\n\n    fn get_byte_code(&mut self, byte_code_hash: ByteCodeHash) -> Result<ByteCode, Self::Error> {\n        let key = Key::ByteCode(ByteCodeAddr::V1CasperWasm(byte_code_hash.value()));\n        match self.get(&key)? {\n            Some(StoredValue::ByteCode(byte_code)) => Ok(byte_code),\n            Some(other) => Err(TrackingCopyError::TypeMismatch(\n                StoredValueTypeMismatch::new(\"ContractWasm\".to_string(), other.type_name()),\n            )),\n            None => Err(TrackingCopyError::KeyNotFound(key)),\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/tracking_copy/ext_entity.rs",
    "content": "use std::collections::BTreeSet;\nuse tracing::{debug, error};\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AssociatedKeys, NamedKeyAddr, NamedKeyValue, Weight},\n    contracts::{ContractHash, NamedKeys},\n    system::{\n        handle_payment::ACCUMULATION_PURSE_KEY, SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT,\n    },\n    AccessRights, Account, AddressableEntity, AddressableEntityHash, ByteCode, ByteCodeAddr,\n    ByteCodeHash, CLValue, ContextAccessRights, ContractRuntimeTag, EntityAddr, EntityKind,\n    EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, Groups, HashAddr, Key, Package,\n    PackageHash, PackageStatus, Phase, ProtocolVersion, PublicKey, RuntimeFootprint, StoredValue,\n    StoredValueTypeMismatch, URef, U512,\n};\n\nuse crate::{\n    global_state::{error::Error as GlobalStateError, state::StateReader},\n    tracking_copy::{TrackingCopy, TrackingCopyError, TrackingCopyExt},\n    AddressGenerator, KeyPrefix,\n};\n\n/// Fees purse handling.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum FeesPurseHandling {\n    /// Transfer fees to proposer.\n    ToProposer(AccountHash),\n    /// Transfer all fees to a system-wide accumulation purse, for future disbursement.\n    Accumulate,\n    /// Burn all fees from specified purse.\n    Burn(URef),\n    /// No fees are charged.\n    None(URef),\n}\n\n/// Higher-level operations on the state via a `TrackingCopy`.\npub trait TrackingCopyEntityExt<R> {\n    /// The type for the returned errors.\n    type Error;\n\n    /// Gets a runtime information by entity_addr.\n    fn runtime_footprint_by_entity_addr(\n        &self,\n        entity_addr: EntityAddr,\n    ) -> Result<RuntimeFootprint, Self::Error>;\n\n    /// Gets a runtime information by hash_addr.\n    fn runtime_footprint_by_hash_addr(\n        &mut self,\n        hash_addr: HashAddr,\n    ) -> Result<RuntimeFootprint, Self::Error>;\n\n    /// Gets a runtime information by account hash.\n    fn runtime_footprint_by_account_hash(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        account_hash: AccountHash,\n    ) -> Result<(EntityAddr, RuntimeFootprint), Self::Error>;\n\n    /// Get runtime information for an account if authorized, else error.\n    fn authorized_runtime_footprint_by_account(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        account_hash: AccountHash,\n        authorization_keys: &BTreeSet<AccountHash>,\n        administrative_accounts: &BTreeSet<AccountHash>,\n    ) -> Result<(RuntimeFootprint, EntityAddr), Self::Error>;\n\n    /// Returns runtime information and access rights if authorized, else error.\n    fn authorized_runtime_footprint_with_access_rights(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        initiating_address: AccountHash,\n        authorization_keys: &BTreeSet<AccountHash>,\n        administrative_accounts: &BTreeSet<AccountHash>,\n    ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError>;\n\n    /// Returns runtime information for systemic functionality.\n    fn system_entity_runtime_footprint(\n        &mut self,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError>;\n\n    /// Migrate the NamedKeys for a entity.\n    fn migrate_named_keys(\n        &mut self,\n        entity_addr: EntityAddr,\n        named_keys: NamedKeys,\n    ) -> Result<(), Self::Error>;\n\n    /// Migrate entry points from and older structure to top level entries.\n    fn migrate_entry_points(\n        &mut self,\n        entity_addr: EntityAddr,\n        entry_points: EntryPoints,\n    ) -> Result<(), Self::Error>;\n\n    /// Upsert uref value to global state and imputed entity's named keys.\n    fn upsert_uref_to_named_keys(\n        &mut self,\n        entity_addr: EntityAddr,\n        name: &str,\n        named_keys: &NamedKeys,\n        uref: URef,\n        stored_value: StoredValue,\n    ) -> Result<(), Self::Error>;\n\n    /// Migrate Account to AddressableEntity.\n    fn migrate_account(\n        &mut self,\n        account_hash: AccountHash,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error>;\n\n    /// Create an addressable entity to receive transfer.\n    fn create_new_addressable_entity_on_transfer(\n        &mut self,\n        account_hash: AccountHash,\n        main_purse: URef,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error>;\n\n    /// Create an addressable entity instance using the field data of an account instance.\n    fn create_addressable_entity_from_account(\n        &mut self,\n        account: Account,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error>;\n\n    /// Migrate ContractPackage to Package.\n    fn migrate_package(\n        &mut self,\n        contract_package_key: Key,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error>;\n\n    /// Returns fee purse.\n    fn fees_purse(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        fees_purse_handling: FeesPurseHandling,\n    ) -> Result<URef, TrackingCopyError>;\n\n    /// Returns named key from selected system contract.\n    fn system_contract_named_key(\n        &mut self,\n        system_contract_name: &str,\n        name: &str,\n    ) -> Result<Option<Key>, Self::Error>;\n}\n\nimpl<R> TrackingCopyEntityExt<R> for TrackingCopy<R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    type Error = TrackingCopyError;\n\n    fn runtime_footprint_by_entity_addr(\n        &self,\n        entity_addr: EntityAddr,\n    ) -> Result<RuntimeFootprint, Self::Error> {\n        let entity_key = match entity_addr {\n            EntityAddr::Account(account_addr) => {\n                let account_key = Key::Account(AccountHash::new(account_addr));\n                match self.read(&account_key)? {\n                    Some(StoredValue::Account(account)) => {\n                        return Ok(RuntimeFootprint::new_account_footprint(account))\n                    }\n                    Some(StoredValue::CLValue(cl_value)) => cl_value.to_t::<Key>()?,\n                    Some(other) => {\n                        return Err(TrackingCopyError::TypeMismatch(\n                            StoredValueTypeMismatch::new(\n                                \"Account or Key\".to_string(),\n                                other.type_name(),\n                            ),\n                        ))\n                    }\n                    None => return Err(TrackingCopyError::KeyNotFound(account_key)),\n                }\n            }\n            EntityAddr::SmartContract(addr) | EntityAddr::System(addr) => {\n                let contract_key = Key::Hash(addr);\n                match self.read(&contract_key)? {\n                    Some(StoredValue::Contract(contract)) => {\n                        let contract_hash = ContractHash::new(entity_addr.value());\n                        let maybe_system_entity_type = {\n                            let mut ret = None;\n                            let registry = self.get_system_entity_registry()?;\n                            for (name, hash) in registry.inner().into_iter() {\n                                if hash == entity_addr.value() {\n                                    match name.as_ref() {\n                                        MINT => ret = Some(SystemEntityType::Mint),\n                                        AUCTION => ret = Some(SystemEntityType::Auction),\n                                        HANDLE_PAYMENT => {\n                                            ret = Some(SystemEntityType::HandlePayment)\n                                        }\n                                        _ => continue,\n                                    }\n                                }\n                            }\n\n                            ret\n                        };\n\n                        return Ok(RuntimeFootprint::new_contract_footprint(\n                            contract_hash,\n                            contract,\n                            maybe_system_entity_type,\n                        ));\n                    }\n                    Some(StoredValue::CLValue(cl_value)) => cl_value.to_t::<Key>()?,\n                    Some(_) | None => Key::AddressableEntity(entity_addr),\n                }\n            }\n        };\n\n        match self.read(&entity_key)? {\n            Some(StoredValue::AddressableEntity(entity)) => {\n                let named_keys = {\n                    let keys =\n                        self.get_keys_by_prefix(&KeyPrefix::NamedKeysByEntity(entity_addr))?;\n\n                    let mut named_keys = NamedKeys::new();\n\n                    for entry_key in &keys {\n                        match self.read(entry_key)? {\n                            Some(StoredValue::NamedKey(named_key)) => {\n                                let key =\n                                    named_key.get_key().map_err(TrackingCopyError::CLValue)?;\n                                let name =\n                                    named_key.get_name().map_err(TrackingCopyError::CLValue)?;\n                                named_keys.insert(name, key);\n                            }\n                            Some(other) => {\n                                return Err(TrackingCopyError::TypeMismatch(\n                                    StoredValueTypeMismatch::new(\n                                        \"CLValue\".to_string(),\n                                        other.type_name(),\n                                    ),\n                                ));\n                            }\n                            None => match self.cache.reads_cached.get(entry_key) {\n                                Some(StoredValue::NamedKey(named_key_value)) => {\n                                    let key = named_key_value\n                                        .get_key()\n                                        .map_err(TrackingCopyError::CLValue)?;\n                                    let name = named_key_value\n                                        .get_name()\n                                        .map_err(TrackingCopyError::CLValue)?;\n                                    named_keys.insert(name, key);\n                                }\n                                Some(_) | None => {\n                                    return Err(TrackingCopyError::KeyNotFound(*entry_key));\n                                }\n                            },\n                        };\n                    }\n\n                    named_keys\n                };\n                let entry_points = {\n                    let keys =\n                        self.get_keys_by_prefix(&KeyPrefix::EntryPointsV1ByEntity(entity_addr))?;\n\n                    let mut entry_points_v1 = EntryPoints::new();\n\n                    for entry_point_key in keys.iter() {\n                        match self.read(entry_point_key)? {\n                            Some(StoredValue::EntryPoint(EntryPointValue::V1CasperVm(\n                                entry_point,\n                            ))) => entry_points_v1.add_entry_point(entry_point),\n                            Some(other) => {\n                                return Err(TrackingCopyError::TypeMismatch(\n                                    StoredValueTypeMismatch::new(\n                                        \"EntryPointsV1\".to_string(),\n                                        other.type_name(),\n                                    ),\n                                ));\n                            }\n                            None => match self.cache.reads_cached.get(entry_point_key) {\n                                Some(StoredValue::EntryPoint(EntryPointValue::V1CasperVm(\n                                    entry_point,\n                                ))) => entry_points_v1.add_entry_point(entry_point.to_owned()),\n                                Some(other) => {\n                                    return Err(TrackingCopyError::TypeMismatch(\n                                        StoredValueTypeMismatch::new(\n                                            \"EntryPointsV1\".to_string(),\n                                            other.type_name(),\n                                        ),\n                                    ));\n                                }\n                                None => {\n                                    return Err(TrackingCopyError::KeyNotFound(*entry_point_key));\n                                }\n                            },\n                        }\n                    }\n\n                    entry_points_v1\n                };\n                Ok(RuntimeFootprint::new_entity_footprint(\n                    entity_addr,\n                    entity,\n                    named_keys,\n                    entry_points,\n                ))\n            }\n            Some(other) => Err(TrackingCopyError::TypeMismatch(\n                StoredValueTypeMismatch::new(\"AddressableEntity\".to_string(), other.type_name()),\n            )),\n            None => Err(TrackingCopyError::KeyNotFound(entity_key)),\n        }\n    }\n\n    fn runtime_footprint_by_hash_addr(\n        &mut self,\n        hash_addr: HashAddr,\n    ) -> Result<RuntimeFootprint, Self::Error> {\n        let entity_addr = if self.get_system_entity_registry()?.exists(&hash_addr) {\n            EntityAddr::new_system(hash_addr)\n        } else {\n            EntityAddr::new_smart_contract(hash_addr)\n        };\n\n        self.runtime_footprint_by_entity_addr(entity_addr)\n    }\n\n    fn runtime_footprint_by_account_hash(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        account_hash: AccountHash,\n    ) -> Result<(EntityAddr, RuntimeFootprint), Self::Error> {\n        let account_key = Key::Account(account_hash);\n\n        let entity_addr = match self.get(&account_key)? {\n            Some(StoredValue::Account(account)) => {\n                if self.enable_addressable_entity {\n                    self.create_addressable_entity_from_account(account.clone(), protocol_version)?;\n                }\n\n                let footprint = RuntimeFootprint::new_account_footprint(account);\n                let entity_addr = EntityAddr::new_account(account_hash.value());\n                return Ok((entity_addr, footprint));\n            }\n\n            Some(StoredValue::CLValue(contract_key_as_cl_value)) => {\n                let key = CLValue::into_t::<Key>(contract_key_as_cl_value)?;\n                if let Key::AddressableEntity(addr) = key {\n                    addr\n                } else {\n                    return Err(Self::Error::UnexpectedKeyVariant(key));\n                }\n            }\n            Some(other) => {\n                return Err(TrackingCopyError::TypeMismatch(\n                    StoredValueTypeMismatch::new(\"Key\".to_string(), other.type_name()),\n                ));\n            }\n            None => return Err(TrackingCopyError::KeyNotFound(account_key)),\n        };\n\n        match self.get(&Key::AddressableEntity(entity_addr))? {\n            Some(StoredValue::AddressableEntity(entity)) => {\n                let named_keys = self.get_named_keys(entity_addr)?;\n                let entry_points = self.get_v1_entry_points(entity_addr)?;\n                let runtime_footprint = RuntimeFootprint::new_entity_footprint(\n                    entity_addr,\n                    entity,\n                    named_keys,\n                    entry_points,\n                );\n                Ok((entity_addr, runtime_footprint))\n            }\n            Some(other) => Err(TrackingCopyError::TypeMismatch(\n                StoredValueTypeMismatch::new(\"AddressableEntity\".to_string(), other.type_name()),\n            )),\n            None => Err(TrackingCopyError::KeyNotFound(Key::AddressableEntity(\n                entity_addr,\n            ))),\n        }\n    }\n\n    fn authorized_runtime_footprint_by_account(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        account_hash: AccountHash,\n        authorization_keys: &BTreeSet<AccountHash>,\n        administrative_accounts: &BTreeSet<AccountHash>,\n    ) -> Result<(RuntimeFootprint, EntityAddr), Self::Error> {\n        let (entity_addr, footprint) =\n            self.runtime_footprint_by_account_hash(protocol_version, account_hash)?;\n\n        if !administrative_accounts.is_empty()\n            && administrative_accounts\n                .intersection(authorization_keys)\n                .next()\n                .is_some()\n        {\n            // Exit early if there's at least a single signature coming from an admin.\n            return Ok((footprint, entity_addr));\n        }\n\n        // Authorize using provided authorization keys\n        if !footprint.can_authorize(authorization_keys) {\n            return Err(Self::Error::Authorization);\n        }\n\n        // Check total key weight against deploy threshold\n        if !footprint.can_deploy_with(authorization_keys) {\n            return Err(Self::Error::DeploymentAuthorizationFailure);\n        }\n\n        Ok((footprint, entity_addr))\n    }\n\n    fn authorized_runtime_footprint_with_access_rights(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        initiating_address: AccountHash,\n        authorization_keys: &BTreeSet<AccountHash>,\n        administrative_accounts: &BTreeSet<AccountHash>,\n    ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError> {\n        if initiating_address == PublicKey::System.to_account_hash() {\n            return self.system_entity_runtime_footprint(protocol_version);\n        }\n\n        let (footprint, entity_addr) = self.authorized_runtime_footprint_by_account(\n            protocol_version,\n            initiating_address,\n            authorization_keys,\n            administrative_accounts,\n        )?;\n        let access_rights = footprint.extract_access_rights(entity_addr.value());\n        Ok((entity_addr, footprint, access_rights))\n    }\n\n    fn system_entity_runtime_footprint(\n        &mut self,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError> {\n        let system_account_hash = PublicKey::System.to_account_hash();\n        let (system_entity_addr, mut system_entity) =\n            self.runtime_footprint_by_account_hash(protocol_version, system_account_hash)?;\n\n        let system_entity_registry = self.get_system_entity_registry()?;\n\n        let (auction_named_keys, mut auction_access_rights) = {\n            let auction_hash = match system_entity_registry.get(AUCTION).copied() {\n                Some(auction_hash) => auction_hash,\n                None => {\n                    error!(\"unexpected failure; auction not found\");\n                    return Err(TrackingCopyError::MissingSystemContractHash(\n                        AUCTION.to_string(),\n                    ));\n                }\n            };\n            let auction = self.runtime_footprint_by_hash_addr(auction_hash)?;\n            let auction_access_rights = auction.extract_access_rights(auction_hash);\n            (auction.take_named_keys(), auction_access_rights)\n        };\n        let (mint_named_keys, mint_access_rights) = {\n            let mint_hash = match system_entity_registry.get(MINT).copied() {\n                Some(mint_hash) => mint_hash,\n                None => {\n                    error!(\"unexpected failure; mint not found\");\n                    return Err(TrackingCopyError::MissingSystemContractHash(\n                        MINT.to_string(),\n                    ));\n                }\n            };\n            let mint = self.runtime_footprint_by_hash_addr(mint_hash)?;\n            let mint_access_rights = mint.extract_access_rights(mint_hash);\n            (mint.take_named_keys(), mint_access_rights)\n        };\n\n        let (payment_named_keys, payment_access_rights) = {\n            let payment_hash = match system_entity_registry.get(HANDLE_PAYMENT).copied() {\n                Some(payment_hash) => payment_hash,\n                None => {\n                    error!(\"unexpected failure; handle payment not found\");\n                    return Err(TrackingCopyError::MissingSystemContractHash(\n                        HANDLE_PAYMENT.to_string(),\n                    ));\n                }\n            };\n            let payment = self.runtime_footprint_by_hash_addr(payment_hash)?;\n            let payment_access_rights = payment.extract_access_rights(payment_hash);\n            (payment.take_named_keys(), payment_access_rights)\n        };\n\n        // the auction calls the mint for total supply behavior, so extending the context to include\n        // mint named keys & access rights\n        system_entity.named_keys_mut().append(auction_named_keys);\n        system_entity.named_keys_mut().append(mint_named_keys);\n        system_entity.named_keys_mut().append(payment_named_keys);\n\n        auction_access_rights.extend_access_rights(mint_access_rights.take_access_rights());\n        auction_access_rights.extend_access_rights(payment_access_rights.take_access_rights());\n\n        Ok((system_entity_addr, system_entity, auction_access_rights))\n    }\n\n    fn migrate_named_keys(\n        &mut self,\n        entity_addr: EntityAddr,\n        named_keys: NamedKeys,\n    ) -> Result<(), Self::Error> {\n        if !self.enable_addressable_entity {\n            return Err(Self::Error::AddressableEntityDisable);\n        }\n\n        for (string, key) in named_keys.into_inner().into_iter() {\n            let entry_addr = NamedKeyAddr::new_from_string(entity_addr, string.clone())?;\n            let named_key_value =\n                StoredValue::NamedKey(NamedKeyValue::from_concrete_values(key, string.clone())?);\n            let entry_key = Key::NamedKey(entry_addr);\n            self.write(entry_key, named_key_value)\n        }\n\n        Ok(())\n    }\n\n    fn migrate_entry_points(\n        &mut self,\n        entity_addr: EntityAddr,\n        entry_points: EntryPoints,\n    ) -> Result<(), Self::Error> {\n        if !self.enable_addressable_entity {\n            return Err(Self::Error::AddressableEntityDisable);\n        }\n\n        if entry_points.is_empty() {\n            return Ok(());\n        }\n        for entry_point in entry_points.take_entry_points().into_iter() {\n            let entry_point_addr =\n                EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name())?;\n            let entry_point_value =\n                StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point));\n            self.write(Key::EntryPoint(entry_point_addr), entry_point_value)\n        }\n\n        Ok(())\n    }\n\n    fn upsert_uref_to_named_keys(\n        &mut self,\n        entity_addr: EntityAddr,\n        name: &str,\n        named_keys: &NamedKeys,\n        uref: URef,\n        stored_value: StoredValue,\n    ) -> Result<(), Self::Error> {\n        match named_keys.get(name) {\n            Some(key) => {\n                if let Key::URef(_) = key {\n                    self.write(*key, stored_value);\n                } else {\n                    return Err(Self::Error::UnexpectedKeyVariant(*key));\n                }\n            }\n            None => {\n                let uref_key = Key::URef(uref).normalize();\n                self.write(uref_key, stored_value);\n\n                if self.enable_addressable_entity {\n                    let entry_value = {\n                        let named_key_value =\n                            NamedKeyValue::from_concrete_values(uref_key, name.to_string())\n                                .map_err(Self::Error::CLValue)?;\n                        StoredValue::NamedKey(named_key_value)\n                    };\n                    let entry_key = {\n                        let named_key_entry =\n                            NamedKeyAddr::new_from_string(entity_addr, name.to_string())\n                                .map_err(Self::Error::BytesRepr)?;\n                        Key::NamedKey(named_key_entry)\n                    };\n\n                    self.write(entry_key, entry_value);\n                } else {\n                    let named_key_value = StoredValue::CLValue(CLValue::from_t((name, uref_key))?);\n                    let base_key = match entity_addr {\n                        EntityAddr::System(hash_addr) | EntityAddr::SmartContract(hash_addr) => {\n                            Key::Hash(hash_addr)\n                        }\n                        EntityAddr::Account(addr) => Key::Account(AccountHash::new(addr)),\n                    };\n                    self.add(base_key, named_key_value)?;\n                }\n            }\n        };\n        Ok(())\n    }\n\n    fn migrate_account(\n        &mut self,\n        account_hash: AccountHash,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error> {\n        if !self.enable_addressable_entity {\n            debug!(\"ae is not enabled, skipping migration\");\n            return Ok(());\n        }\n        let key = Key::Account(account_hash);\n        let maybe_stored_value = self.read(&key)?;\n\n        match maybe_stored_value {\n            Some(StoredValue::Account(account)) => {\n                self.create_addressable_entity_from_account(account, protocol_version)\n            }\n            Some(StoredValue::CLValue(_)) => Ok(()),\n            // This means the Account does not exist, which we consider to be\n            // an authorization error. As used by the node, this type of deploy\n            // will have already been filtered out, but for other EE use cases\n            // and testing it is reachable.\n            Some(_) => Err(Self::Error::UnexpectedStoredValueVariant),\n            None => Err(Self::Error::AccountNotFound(key)),\n        }\n    }\n\n    fn create_new_addressable_entity_on_transfer(\n        &mut self,\n        account_hash: AccountHash,\n        main_purse: URef,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error> {\n        let mut generator = AddressGenerator::new(main_purse.addr().as_ref(), Phase::System);\n\n        let byte_code_hash = ByteCodeHash::default();\n        let entity_hash = AddressableEntityHash::new(account_hash.value());\n        let package_hash = PackageHash::new(generator.new_hash_address());\n\n        let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1));\n\n        let action_thresholds: ActionThresholds = Default::default();\n\n        let entity = AddressableEntity::new(\n            package_hash,\n            byte_code_hash,\n            protocol_version,\n            main_purse,\n            associated_keys,\n            action_thresholds,\n            EntityKind::Account(account_hash),\n        );\n\n        let entity_addr = EntityAddr::new_account(entity_hash.value());\n        let package = {\n            let mut package = Package::new(\n                EntityVersions::default(),\n                BTreeSet::default(),\n                Groups::default(),\n                PackageStatus::Locked,\n            );\n            package.insert_entity_version(protocol_version.value().major, entity_addr);\n            package\n        };\n\n        let entity_key = Key::AddressableEntity(entity_addr);\n\n        self.write(entity_key, entity.into());\n        self.write(package_hash.into(), package.into());\n        let contract_by_account = match CLValue::from_t(entity_key) {\n            Ok(cl_value) => cl_value,\n            Err(err) => return Err(Self::Error::CLValue(err)),\n        };\n\n        self.write(\n            Key::Account(account_hash),\n            StoredValue::CLValue(contract_by_account),\n        );\n        Ok(())\n    }\n\n    fn create_addressable_entity_from_account(\n        &mut self,\n        account: Account,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error> {\n        let account_hash = account.account_hash();\n        if !self.enable_addressable_entity {\n            self.write(Key::Account(account_hash), StoredValue::Account(account));\n            return Ok(());\n        }\n\n        // carry forward the account hash to allow reverse lookup\n        let entity_hash = AddressableEntityHash::new(account_hash.value());\n        let entity_addr = EntityAddr::new_account(entity_hash.value());\n\n        // migrate named keys -- if this fails there is no reason to proceed further.\n        let named_keys = account.named_keys().clone();\n        self.migrate_named_keys(entity_addr, named_keys)?;\n\n        // write package first\n        let package_hash = {\n            let mut generator =\n                AddressGenerator::new(account.main_purse().addr().as_ref(), Phase::System);\n\n            let package_hash = PackageHash::new(generator.new_hash_address());\n\n            let mut package = Package::new(\n                EntityVersions::default(),\n                BTreeSet::default(),\n                Groups::default(),\n                PackageStatus::Locked,\n            );\n            package.insert_entity_version(protocol_version.value().major, entity_addr);\n            self.write(package_hash.into(), package.into());\n            package_hash\n        };\n\n        // write entity after package\n        {\n            // currently, addressable entities of account kind are not permitted to have bytecode\n            // however, we intend to revisit this and potentially allow it in a future release\n            // as a replacement for stored session.\n            let byte_code_hash = ByteCodeHash::default();\n\n            let action_thresholds = {\n                let account_threshold = account.action_thresholds().clone();\n                ActionThresholds::new(\n                    Weight::new(account_threshold.deployment.value()),\n                    Weight::new(1u8),\n                    Weight::new(account_threshold.key_management.value()),\n                )\n                .map_err(Self::Error::SetThresholdFailure)?\n            };\n\n            let associated_keys = AssociatedKeys::from(account.associated_keys().clone());\n\n            let entity = AddressableEntity::new(\n                package_hash,\n                byte_code_hash,\n                protocol_version,\n                account.main_purse(),\n                associated_keys,\n                action_thresholds,\n                EntityKind::Account(account_hash),\n            );\n            let entity_key = entity.entity_key(entity_hash);\n            let contract_by_account = match CLValue::from_t(entity_key) {\n                Ok(cl_value) => cl_value,\n                Err(err) => return Err(Self::Error::CLValue(err)),\n            };\n\n            self.write(entity_key, entity.into());\n            self.write(\n                Key::Account(account_hash),\n                StoredValue::CLValue(contract_by_account),\n            );\n        }\n\n        Ok(())\n    }\n\n    fn migrate_package(\n        &mut self,\n        legacy_package_key: Key,\n        protocol_version: ProtocolVersion,\n    ) -> Result<(), Self::Error> {\n        if !self.enable_addressable_entity {\n            return Err(Self::Error::AddressableEntityDisable);\n        }\n\n        let legacy_package = match self.read(&legacy_package_key)? {\n            Some(StoredValue::ContractPackage(legacy_package)) => legacy_package,\n            Some(_) | None => {\n                return Err(Self::Error::ValueNotFound(format!(\n                    \"contract package not found {}\",\n                    legacy_package_key\n                )));\n            }\n        };\n\n        let legacy_versions = legacy_package.versions().clone();\n        let access_uref = legacy_package.access_key();\n        let mut generator = AddressGenerator::new(access_uref.addr().as_ref(), Phase::System);\n\n        let package: Package = legacy_package.into();\n\n        for (_, contract_hash) in legacy_versions.into_iter() {\n            let contract = match self.read(&Key::Hash(contract_hash.value()))? {\n                Some(StoredValue::Contract(legacy_contract)) => legacy_contract,\n                Some(_) | None => {\n                    return Err(Self::Error::ValueNotFound(format!(\n                        \"contract not found {}\",\n                        contract_hash\n                    )));\n                }\n            };\n\n            let purse = generator.new_uref(AccessRights::all());\n            let cl_value: CLValue = CLValue::from_t(()).map_err(Self::Error::CLValue)?;\n            self.write(Key::URef(purse), StoredValue::CLValue(cl_value));\n\n            let balance_value: CLValue =\n                CLValue::from_t(U512::zero()).map_err(Self::Error::CLValue)?;\n            self.write(\n                Key::Balance(purse.addr()),\n                StoredValue::CLValue(balance_value),\n            );\n\n            let contract_addr = EntityAddr::new_smart_contract(contract_hash.value());\n\n            let contract_wasm_hash = contract.contract_wasm_hash();\n\n            let updated_entity = AddressableEntity::new(\n                PackageHash::new(contract.contract_package_hash().value()),\n                ByteCodeHash::new(contract_wasm_hash.value()),\n                protocol_version,\n                purse,\n                AssociatedKeys::default(),\n                ActionThresholds::default(),\n                EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n            );\n\n            let entry_points = contract.entry_points().clone();\n            let named_keys = contract.take_named_keys();\n\n            self.migrate_named_keys(contract_addr, named_keys)?;\n            self.migrate_entry_points(contract_addr, entry_points.into())?;\n\n            let maybe_previous_wasm = self\n                .read(&Key::Hash(contract_wasm_hash.value()))?\n                .and_then(|stored_value| stored_value.into_contract_wasm());\n\n            match maybe_previous_wasm {\n                None => {\n                    return Err(Self::Error::ValueNotFound(format!(\n                        \"{}\",\n                        contract_wasm_hash\n                    )));\n                }\n                Some(contract_wasm) => {\n                    let byte_code_key = Key::byte_code_key(ByteCodeAddr::new_wasm_addr(\n                        updated_entity.byte_code_addr(),\n                    ));\n                    let byte_code_cl_value = match CLValue::from_t(byte_code_key) {\n                        Ok(cl_value) => cl_value,\n                        Err(err) => return Err(Self::Error::CLValue(err)),\n                    };\n                    self.write(\n                        Key::Hash(updated_entity.byte_code_addr()),\n                        StoredValue::CLValue(byte_code_cl_value),\n                    );\n\n                    let byte_code: ByteCode = contract_wasm.into();\n                    self.write(byte_code_key, StoredValue::ByteCode(byte_code));\n                }\n            }\n\n            let entity_hash = AddressableEntityHash::new(contract_hash.value());\n            let entity_key = Key::contract_entity_key(entity_hash);\n            let indirection = match CLValue::from_t(entity_key) {\n                Ok(cl_value) => cl_value,\n                Err(err) => return Err(Self::Error::CLValue(err)),\n            };\n            self.write(\n                Key::Hash(contract_hash.value()),\n                StoredValue::CLValue(indirection),\n            );\n\n            self.write(entity_key, StoredValue::AddressableEntity(updated_entity));\n        }\n\n        let package_key = Key::SmartContract(\n            legacy_package_key\n                .into_hash_addr()\n                .ok_or(Self::Error::UnexpectedKeyVariant(legacy_package_key))?,\n        );\n\n        let access_key_value =\n            CLValue::from_t((package_key, access_uref)).map_err(Self::Error::CLValue)?;\n        self.write(legacy_package_key, StoredValue::CLValue(access_key_value));\n        self.write(package_key, StoredValue::SmartContract(package));\n        Ok(())\n    }\n\n    fn fees_purse(\n        &mut self,\n        protocol_version: ProtocolVersion,\n        fees_purse_handling: FeesPurseHandling,\n    ) -> Result<URef, TrackingCopyError> {\n        let fee_handling = fees_purse_handling;\n        match fee_handling {\n            FeesPurseHandling::None(uref) => Ok(uref),\n            FeesPurseHandling::ToProposer(proposer) => {\n                let (_, entity) =\n                    self.runtime_footprint_by_account_hash(protocol_version, proposer)?;\n                Ok(entity\n                    .main_purse()\n                    .ok_or(TrackingCopyError::AddressableEntityDisable)?)\n            }\n            FeesPurseHandling::Accumulate => {\n                let registry = self.get_system_entity_registry()?;\n                let entity_addr = {\n                    let hash = match registry.get(HANDLE_PAYMENT) {\n                        Some(hash) => hash,\n                        None => {\n                            return Err(TrackingCopyError::MissingSystemContractHash(\n                                HANDLE_PAYMENT.to_string(),\n                            ));\n                        }\n                    };\n                    EntityAddr::new_system(*hash)\n                };\n\n                let named_keys = self.get_named_keys(entity_addr)?;\n\n                let accumulation_purse_uref = match named_keys.get(ACCUMULATION_PURSE_KEY) {\n                    Some(Key::URef(accumulation_purse)) => *accumulation_purse,\n                    Some(_) | None => {\n                        error!(\n                            \"fee handling is configured to accumulate but handle payment does not \\\n                            have accumulation purse\"\n                        );\n                        return Err(TrackingCopyError::NamedKeyNotFound(\n                            ACCUMULATION_PURSE_KEY.to_string(),\n                        ));\n                    }\n                };\n\n                Ok(accumulation_purse_uref)\n            }\n            FeesPurseHandling::Burn(uref) => Ok(uref),\n        }\n    }\n\n    fn system_contract_named_key(\n        &mut self,\n        system_contract_name: &str,\n        name: &str,\n    ) -> Result<Option<Key>, Self::Error> {\n        let system_entity_registry = self.get_system_entity_registry()?;\n        let hash = match system_entity_registry.get(system_contract_name).copied() {\n            Some(hash) => hash,\n            None => {\n                error!(\n                    \"unexpected failure; system contract {} not found\",\n                    system_contract_name\n                );\n                return Err(TrackingCopyError::MissingSystemContractHash(\n                    system_contract_name.to_string(),\n                ));\n            }\n        };\n        let runtime_footprint = self.runtime_footprint_by_hash_addr(hash)?;\n        Ok(runtime_footprint.take_named_keys().get(name).copied())\n    }\n}\n"
  },
  {
    "path": "storage/src/tracking_copy/meter.rs",
    "content": "use std::{collections::BTreeSet, fmt::Debug};\n\n/// Trait for measuring \"size\" of key-value pairs.\npub trait Meter<K, V>: Copy + Default + Debug {\n    fn measure(&self, k: &K, v: &V) -> usize;\n\n    fn measure_keys(&self, keys: &BTreeSet<K>) -> usize;\n}\n\npub mod heap_meter {\n    use std::collections::BTreeSet;\n\n    use crate::tracking_copy::byte_size::ByteSize;\n\n    #[derive(Copy, Clone, Default, Debug)]\n    pub struct HeapSize;\n\n    impl<K: ByteSize, V: ByteSize> super::Meter<K, V> for HeapSize {\n        fn measure(&self, _: &K, v: &V) -> usize {\n            size_of::<V>() + v.byte_size()\n        }\n\n        fn measure_keys(&self, keys: &BTreeSet<K>) -> usize {\n            let mut total: usize = 0;\n            for key in keys {\n                total += key.byte_size();\n            }\n            total\n        }\n    }\n}\n\n#[cfg(test)]\npub mod count_meter {\n    use std::collections::BTreeSet;\n\n    #[derive(Clone, Copy, Debug, Default)]\n    pub struct Count;\n\n    impl<K, V> super::Meter<K, V> for Count {\n        fn measure(&self, _k: &K, _v: &V) -> usize {\n            1\n        }\n\n        fn measure_keys(&self, _keys: &BTreeSet<K>) -> usize {\n            unimplemented!()\n        }\n    }\n}\n"
  },
  {
    "path": "storage/src/tracking_copy/mod.rs",
    "content": "//! This module defines the `TrackingCopy` - a utility that caches operations on the state, so that\n//! the underlying state remains unmodified, but it can be interacted with as if the modifications\n//! were applied on it.\nmod byte_size;\nmod error;\nmod ext;\nmod ext_entity;\nmod meter;\n#[cfg(test)]\nmod tests;\n\nuse std::{\n    borrow::Borrow,\n    collections::{BTreeMap, BTreeSet, HashSet, VecDeque},\n    convert::{From, TryInto},\n    fmt::Debug,\n    sync::Arc,\n};\n\nuse linked_hash_map::LinkedHashMap;\nuse thiserror::Error;\nuse tracing::error;\n\nuse crate::{\n    global_state::{\n        error::Error as GlobalStateError, state::StateReader,\n        trie_store::operations::compute_state_hash, DEFAULT_MAX_QUERY_DEPTH,\n    },\n    KeyPrefix,\n};\nuse casper_types::{\n    addressable_entity::NamedKeyAddr,\n    bytesrepr::{self, ToBytes},\n    contract_messages::{Message, Messages},\n    contracts::NamedKeys,\n    execution::{Effects, TransformError, TransformInstruction, TransformKindV2, TransformV2},\n    global_state::TrieMerkleProof,\n    handle_stored_dictionary_value, BlockGlobalAddr, CLType, CLValue, CLValueError, Digest, Key,\n    KeyTag, StoredValue, StoredValueTypeMismatch, U512,\n};\n\nuse self::meter::{heap_meter::HeapSize, Meter};\npub use self::{\n    error::Error as TrackingCopyError,\n    ext::TrackingCopyExt,\n    ext_entity::{FeesPurseHandling, TrackingCopyEntityExt},\n};\n\n/// Result of a query on a `TrackingCopy`.\n#[derive(Debug)]\n#[allow(clippy::large_enum_variant)]\npub enum TrackingCopyQueryResult {\n    /// Invalid state root hash.\n    RootNotFound,\n    /// The value wasn't found.\n    ValueNotFound(String),\n    /// A circular reference was found in the state while traversing it.\n    CircularReference(String),\n    /// The query reached the depth limit.\n    DepthLimit {\n        /// The depth reached.\n        depth: u64,\n    },\n    /// The query was successful.\n    Success {\n        /// The value read from the state.\n        value: StoredValue,\n        /// Merkle proofs for the value.\n        proofs: Vec<TrieMerkleProof<Key, StoredValue>>,\n    },\n}\n\nimpl TrackingCopyQueryResult {\n    /// Is this a successful query?\n    pub fn is_success(&self) -> bool {\n        matches!(self, TrackingCopyQueryResult::Success { .. })\n    }\n\n    /// As result.\n    pub fn into_result(self) -> Result<StoredValue, TrackingCopyError> {\n        match self {\n            TrackingCopyQueryResult::RootNotFound => {\n                Err(TrackingCopyError::Storage(Error::RootNotFound))\n            }\n            TrackingCopyQueryResult::ValueNotFound(msg) => {\n                Err(TrackingCopyError::ValueNotFound(msg))\n            }\n            TrackingCopyQueryResult::CircularReference(msg) => {\n                Err(TrackingCopyError::CircularReference(msg))\n            }\n            TrackingCopyQueryResult::DepthLimit { depth } => {\n                Err(TrackingCopyError::QueryDepthLimit { depth })\n            }\n            TrackingCopyQueryResult::Success { value, .. } => Ok(value),\n        }\n    }\n}\n\n/// Result of a cache lookup in a tracking copy.\n#[derive(Debug, PartialEq, Eq)]\npub enum CacheEntry<'a> {\n    /// This key was explicitly pruned in the tracking copy.\n    Pruned,\n    /// The cache has no local knowledge of this key.\n    NotFound,\n    /// The cache contains a locally visible value for this key.\n    Exists(&'a StoredValue),\n}\n\n/// Struct containing state relating to a given query.\nstruct Query {\n    /// The key from where the search starts.\n    base_key: Key,\n    /// A collection of normalized keys which have been visited during the search.\n    visited_keys: HashSet<Key>,\n    /// The key currently being processed.\n    current_key: Key,\n    /// Path components which have not yet been followed, held in the same order in which they were\n    /// provided to the `query()` call.\n    unvisited_names: VecDeque<String>,\n    /// Path components which have been followed, held in the same order in which they were\n    /// provided to the `query()` call.\n    visited_names: Vec<String>,\n    /// Current depth of the query.\n    depth: u64,\n}\n\nimpl Query {\n    fn new(base_key: Key, path: &[String]) -> Self {\n        Query {\n            base_key,\n            current_key: base_key.normalize(),\n            unvisited_names: path.iter().cloned().collect(),\n            visited_names: Vec::new(),\n            visited_keys: HashSet::new(),\n            depth: 0,\n        }\n    }\n\n    /// Panics if `unvisited_names` is empty.\n    fn next_name(&mut self) -> Option<&String> {\n        let next_name = self.unvisited_names.pop_front()?;\n        self.visited_names.push(next_name);\n        self.visited_names.last()\n    }\n\n    fn navigate(&mut self, key: Key) {\n        self.current_key = key.normalize();\n        self.depth += 1;\n    }\n\n    fn navigate_for_named_key(&mut self, named_key: Key) {\n        if let Key::NamedKey(_) = &named_key {\n            self.current_key = named_key.normalize();\n        }\n    }\n\n    fn into_not_found_result(self, msg_prefix: &str) -> TrackingCopyQueryResult {\n        let msg = format!(\"{} at path: {}\", msg_prefix, self.current_path());\n        TrackingCopyQueryResult::ValueNotFound(msg)\n    }\n\n    fn into_circular_ref_result(self) -> TrackingCopyQueryResult {\n        let msg = format!(\n            \"{:?} has formed a circular reference at path: {}\",\n            self.current_key,\n            self.current_path()\n        );\n        TrackingCopyQueryResult::CircularReference(msg)\n    }\n\n    fn into_depth_limit_result(self) -> TrackingCopyQueryResult {\n        TrackingCopyQueryResult::DepthLimit { depth: self.depth }\n    }\n\n    fn current_path(&self) -> String {\n        let mut path = format!(\"{:?}\", self.base_key);\n        for name in &self.visited_names {\n            path.push('/');\n            path.push_str(name);\n        }\n        path\n    }\n}\n\n/// Keeps track of already accessed keys.\n/// We deliberately separate cached Reads from cached mutations\n/// because we want to invalidate Reads' cache so it doesn't grow too fast.\n#[derive(Clone, Debug)]\npub struct GenericTrackingCopyCache<M: Copy + Debug> {\n    max_cache_size: usize,\n    current_cache_size: usize,\n    reads_cached: LinkedHashMap<Key, StoredValue>,\n    muts_cached: BTreeMap<KeyWithByteRepr, StoredValue>,\n    prunes_cached: BTreeSet<Key>,\n    meter: M,\n}\n\nimpl<M: Meter<Key, StoredValue> + Copy + Default> GenericTrackingCopyCache<M> {\n    /// Creates instance of `TrackingCopyCache` with specified `max_cache_size`,\n    /// above which least-recently-used elements of the cache are invalidated.\n    /// Measurements of elements' \"size\" is done with the usage of `Meter`\n    /// instance.\n    pub fn new(max_cache_size: usize, meter: M) -> GenericTrackingCopyCache<M> {\n        GenericTrackingCopyCache {\n            max_cache_size,\n            current_cache_size: 0,\n            reads_cached: LinkedHashMap::new(),\n            muts_cached: BTreeMap::new(),\n            prunes_cached: BTreeSet::new(),\n            meter,\n        }\n    }\n\n    /// Creates instance of `TrackingCopyCache` with specified `max_cache_size`, above which\n    /// least-recently-used elements of the cache are invalidated. Measurements of elements' \"size\"\n    /// is done with the usage of default `Meter` instance.\n    pub fn new_default(max_cache_size: usize) -> GenericTrackingCopyCache<M> {\n        GenericTrackingCopyCache::new(max_cache_size, M::default())\n    }\n\n    /// Inserts `key` and `value` pair to Read cache.\n    pub fn insert_read(&mut self, key: Key, value: StoredValue) {\n        let element_size = Meter::measure(&self.meter, &key, &value);\n        self.reads_cached.insert(key, value);\n        self.current_cache_size += element_size;\n        while self.current_cache_size > self.max_cache_size {\n            match self.reads_cached.pop_front() {\n                Some((k, v)) => {\n                    let element_size = Meter::measure(&self.meter, &k, &v);\n                    self.current_cache_size -= element_size;\n                }\n                None => break,\n            }\n        }\n    }\n\n    /// Inserts `key` and `value` pair to Write/Add cache.\n    pub fn insert_write(&mut self, key: Key, value: StoredValue) {\n        let kb = KeyWithByteRepr::new(key);\n        self.prunes_cached.remove(&key);\n        self.muts_cached.insert(kb, value);\n    }\n\n    /// Inserts `key` and `value` pair to Write/Add cache.\n    pub fn insert_prune(&mut self, key: Key) {\n        let kb = KeyWithByteRepr::new(key);\n        self.muts_cached.remove(&kb);\n        self.prunes_cached.insert(key);\n    }\n\n    /// Gets value from `key` in the cache.\n    pub fn get<'a>(&'a mut self, key: &Key) -> CacheEntry<'a> {\n        if self.prunes_cached.contains(key) {\n            // the item is marked for pruning and therefore\n            // is no longer accessible.\n            return CacheEntry::Pruned;\n        }\n        let kb = KeyWithByteRepr::new(*key);\n        if let Some(value) = self.muts_cached.get(&kb) {\n            return CacheEntry::Exists(value);\n        };\n\n        match self.reads_cached.get_refresh(key).map(|v| &*v) {\n            Some(value) => CacheEntry::Exists(value),\n            None => CacheEntry::NotFound,\n        }\n    }\n\n    /// Get cached items by prefix.\n    fn get_muts_cached_by_byte_prefix(&self, prefix: &[u8]) -> Vec<Key> {\n        self.muts_cached\n            .range(prefix.to_vec()..)\n            .take_while(|(key, _)| key.starts_with(prefix))\n            .map(|(key, _)| key.to_key())\n            .collect()\n    }\n\n    /// Does the prune cache contain key.\n    pub fn is_pruned(&self, key: &Key) -> bool {\n        self.prunes_cached.contains(key)\n    }\n\n    pub(self) fn into_muts(self) -> (BTreeMap<KeyWithByteRepr, StoredValue>, BTreeSet<Key>) {\n        (self.muts_cached, self.prunes_cached)\n    }\n}\n\n/// A helper type for `TrackingCopyCache` that allows convenient storage and access\n/// to keys as bytes.\n/// Its equality and ordering is based on the byte representation of the key.\n#[derive(Debug, Clone)]\nstruct KeyWithByteRepr(Key, Vec<u8>);\n\nimpl KeyWithByteRepr {\n    #[inline]\n    fn new(key: Key) -> Self {\n        let bytes = key.to_bytes().expect(\"should always serialize a Key\");\n        KeyWithByteRepr(key, bytes)\n    }\n\n    #[inline]\n    fn starts_with(&self, prefix: &[u8]) -> bool {\n        self.1.starts_with(prefix)\n    }\n\n    #[inline]\n    fn to_key(&self) -> Key {\n        self.0\n    }\n}\n\nimpl Borrow<Vec<u8>> for KeyWithByteRepr {\n    #[inline]\n    fn borrow(&self) -> &Vec<u8> {\n        &self.1\n    }\n}\n\nimpl PartialEq for KeyWithByteRepr {\n    #[inline]\n    fn eq(&self, other: &Self) -> bool {\n        self.1 == other.1\n    }\n}\n\nimpl Eq for KeyWithByteRepr {}\n\nimpl PartialOrd for KeyWithByteRepr {\n    #[inline]\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for KeyWithByteRepr {\n    #[inline]\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        self.1.cmp(&other.1)\n    }\n}\n\n/// An alias for a `TrackingCopyCache` with `HeapSize` as the meter.\npub type TrackingCopyCache = GenericTrackingCopyCache<HeapSize>;\n\n/// An interface for the global state that caches all operations (reads and writes) instead of\n/// applying them directly to the state. This way the state remains unmodified, while the user can\n/// interact with it as if it was being modified in real time.\n#[derive(Clone)]\npub struct TrackingCopy<R> {\n    reader: Arc<R>,\n    cache: TrackingCopyCache,\n    effects: Effects,\n    max_query_depth: u64,\n    messages: Messages,\n    enable_addressable_entity: bool,\n}\n\n/// Result of executing an \"add\" operation on a value in the state.\n#[derive(Debug)]\npub enum AddResult {\n    /// The operation was successful.\n    Success,\n    /// The key was not found.\n    KeyNotFound(Key),\n    /// There was a type mismatch between the stored value and the value being added.\n    TypeMismatch(StoredValueTypeMismatch),\n    /// Serialization error.\n    Serialization(bytesrepr::Error),\n    /// Transform error.\n    Transform(TransformError),\n}\n\nimpl From<CLValueError> for AddResult {\n    fn from(error: CLValueError) -> Self {\n        match error {\n            CLValueError::Serialization(error) => AddResult::Serialization(error),\n            CLValueError::Type(type_mismatch) => {\n                let expected = format!(\"{:?}\", type_mismatch.expected);\n                let found = format!(\"{:?}\", type_mismatch.found);\n                AddResult::TypeMismatch(StoredValueTypeMismatch::new(expected, found))\n            }\n        }\n    }\n}\n\n/// A helper type for `TrackingCopy` that represents a key-value pair.\npub type TrackingCopyParts = (TrackingCopyCache, Effects, Messages);\n\nimpl<R: StateReader<Key, StoredValue>> TrackingCopy<R>\nwhere\n    R: StateReader<Key, StoredValue, Error = GlobalStateError>,\n{\n    /// Creates a new `TrackingCopy` using the `reader` as the interface to the state.\n    pub fn new(\n        reader: R,\n        max_query_depth: u64,\n        enable_addressable_entity: bool,\n    ) -> TrackingCopy<R> {\n        TrackingCopy {\n            reader: Arc::new(reader),\n            // TODO: Should `max_cache_size` be a fraction of wasm memory limit?\n            cache: GenericTrackingCopyCache::new(1024 * 16, HeapSize),\n            effects: Effects::new(),\n            max_query_depth,\n            messages: Vec::new(),\n            enable_addressable_entity,\n        }\n    }\n\n    /// Returns the `reader` used to access the state.\n    pub fn reader(&self) -> &R {\n        &self.reader\n    }\n\n    /// Returns a shared reference to the `reader` used to access the state.\n    pub fn shared_reader(&self) -> Arc<R> {\n        Arc::clone(&self.reader)\n    }\n\n    /// Creates a new `TrackingCopy` using the `reader` as the interface to the state.\n    /// Returns a new `TrackingCopy` instance that is a snapshot of the current state, allowing\n    /// further changes to be made.\n    ///\n    /// This method creates a new `TrackingCopy` using the current instance (including its\n    /// mutations) as the base state to read against. Mutations made to the new `TrackingCopy`\n    /// will not impact the original instance.\n    ///\n    /// Note: Currently, there is no `join` or `merge` function to bring changes from a fork back to\n    /// the main `TrackingCopy`. Therefore, forking should be done repeatedly, which is\n    /// suboptimal and will be improved in the future.\n    pub fn fork(&self) -> TrackingCopy<&TrackingCopy<R>> {\n        TrackingCopy::new(self, self.max_query_depth, self.enable_addressable_entity)\n    }\n\n    /// Returns a new `TrackingCopy` instance that is a snapshot of the current state, allowing\n    /// further changes to be made.\n    ///\n    /// This method creates a new `TrackingCopy` using the current instance (including its\n    /// mutations) as the base state to read against. Mutations made to the new `TrackingCopy`\n    /// will not impact the original instance.\n    ///\n    /// Note: Currently, there is no `join` or `merge` function to bring changes from a fork back to\n    /// the main `TrackingCopy`. This method is an alternative to the `fork` method and is\n    /// provided for clarity and consistency in naming.\n    pub fn fork2(&self) -> Self {\n        TrackingCopy {\n            reader: Arc::clone(&self.reader),\n            cache: self.cache.clone(),\n            effects: self.effects.clone(),\n            max_query_depth: self.max_query_depth,\n            messages: self.messages.clone(),\n            enable_addressable_entity: self.enable_addressable_entity,\n        }\n    }\n\n    /// Applies the changes to the state.\n    ///\n    /// This is a low-level function that should be used only by the execution engine. The purpose\n    /// of this function is to apply the changes to the state from a forked tracking copy. Once\n    /// caller decides that the changes are valid, they can be applied to the state and the\n    /// processing can resume.\n    pub fn apply_changes(\n        &mut self,\n        effects: Effects,\n        cache: TrackingCopyCache,\n        messages: Messages,\n    ) {\n        self.effects = effects;\n        self.cache = cache;\n        self.messages = messages;\n    }\n\n    /// Returns a copy of the execution effects cached by this instance.\n    pub fn effects(&self) -> Effects {\n        self.effects.clone()\n    }\n\n    /// Returns copy of cache.\n    pub fn cache(&self) -> TrackingCopyCache {\n        self.cache.clone()\n    }\n\n    /// Destructure cached entries.\n    pub fn destructure(self) -> (Vec<(Key, StoredValue)>, BTreeSet<Key>, Effects) {\n        let (writes, prunes) = self.cache.into_muts();\n        let writes: Vec<(Key, StoredValue)> = writes.into_iter().map(|(k, v)| (k.0, v)).collect();\n\n        (writes, prunes, self.effects)\n    }\n\n    /// Enable the addressable entity and migrate accounts/contracts to entities.\n    pub fn enable_addressable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n\n    /// Get record by key.\n    pub fn get(&mut self, key: &Key) -> Result<Option<StoredValue>, TrackingCopyError> {\n        match self.cache.get(key) {\n            CacheEntry::Exists(value) => return Ok(Some(value.to_owned())),\n            CacheEntry::Pruned => return Ok(None),\n            CacheEntry::NotFound => {}\n        }\n        match self.reader.read(key) {\n            Ok(ret) => {\n                if let Some(value) = ret {\n                    self.cache.insert_read(*key, value.to_owned());\n                    Ok(Some(value))\n                } else {\n                    Ok(None)\n                }\n            }\n            Err(err) => Err(TrackingCopyError::Storage(err)),\n        }\n    }\n\n    /// Gets the set of keys in the state whose tag is `key_tag`.\n    pub fn get_keys(&self, key_tag: &KeyTag) -> Result<BTreeSet<Key>, TrackingCopyError> {\n        self.get_by_byte_prefix(&[*key_tag as u8])\n    }\n\n    /// Get keys by prefix.\n    pub fn get_keys_by_prefix(\n        &self,\n        key_prefix: &KeyPrefix,\n    ) -> Result<BTreeSet<Key>, TrackingCopyError> {\n        let byte_prefix = key_prefix\n            .to_bytes()\n            .map_err(TrackingCopyError::BytesRepr)?;\n        self.get_by_byte_prefix(&byte_prefix)\n    }\n\n    /// Gets the set of keys in the state by a byte prefix.\n    pub(crate) fn get_by_byte_prefix(\n        &self,\n        byte_prefix: &[u8],\n    ) -> Result<BTreeSet<Key>, TrackingCopyError> {\n        let ret = self.keys_with_prefix(byte_prefix)?.into_iter().collect();\n        Ok(ret)\n    }\n\n    /// Reads the value stored under `key`.\n    pub fn read(&mut self, key: &Key) -> Result<Option<StoredValue>, TrackingCopyError> {\n        let normalized_key = key.normalize();\n        if let Some(value) = self.get(&normalized_key)? {\n            self.effects\n                .push(TransformV2::new(normalized_key, TransformKindV2::Identity));\n            Ok(Some(value))\n        } else {\n            Ok(None)\n        }\n    }\n\n    /// Reads the first value stored under the keys in `keys`.\n    pub fn read_first(&mut self, keys: &[&Key]) -> Result<Option<StoredValue>, TrackingCopyError> {\n        for key in keys {\n            if let Some(value) = self.read(key)? {\n                return Ok(Some(value));\n            }\n        }\n        Ok(None)\n    }\n\n    /// Writes `value` under `key`. Note that the written value is only cached.\n    pub fn write(&mut self, key: Key, value: StoredValue) {\n        let normalized_key = key.normalize();\n        self.cache.insert_write(normalized_key, value.clone());\n        let transform = TransformV2::new(normalized_key, TransformKindV2::Write(value));\n        self.effects.push(transform);\n    }\n\n    /// Caches the emitted message and writes the message topic summary under the specified key.\n    ///\n    /// This function does not check the types for the key and the value so the caller should\n    /// correctly set the type. The `message_topic_key` should be of the `Key::MessageTopic`\n    /// variant and the `message_topic_summary` should be of the `StoredValue::Message` variant.\n    #[allow(clippy::too_many_arguments)]\n    pub fn emit_message(\n        &mut self,\n        message_topic_key: Key,\n        message_topic_summary: StoredValue,\n        message_key: Key,\n        message_value: StoredValue,\n        block_message_count_value: StoredValue,\n        message: Message,\n    ) {\n        self.write(message_key, message_value);\n        self.write(message_topic_key, message_topic_summary);\n        self.write(\n            Key::BlockGlobal(BlockGlobalAddr::MessageCount),\n            block_message_count_value,\n        );\n        self.messages.push(message);\n    }\n\n    /// Prunes a `key`.\n    pub fn prune(&mut self, key: Key) {\n        let normalized_key = key.normalize();\n        self.cache.insert_prune(normalized_key);\n        self.effects.push(TransformV2::new(\n            normalized_key,\n            TransformKindV2::Prune(key),\n        ));\n    }\n\n    /// Ok(None) represents missing key to which we want to \"add\" some value.\n    /// Ok(Some(unit)) represents successful operation.\n    /// Err(error) is reserved for unexpected errors when accessing global\n    /// state.\n    pub fn add(&mut self, key: Key, value: StoredValue) -> Result<AddResult, TrackingCopyError> {\n        let normalized_key = key.normalize();\n        let current_value = match self.get(&normalized_key)? {\n            None => return Ok(AddResult::KeyNotFound(normalized_key)),\n            Some(current_value) => current_value,\n        };\n\n        let type_name = value.type_name();\n        let mismatch = || {\n            Ok(AddResult::TypeMismatch(StoredValueTypeMismatch::new(\n                \"I32, U64, U128, U256, U512 or (String, Key) tuple\".to_string(),\n                type_name,\n            )))\n        };\n\n        let transform_kind = match value {\n            StoredValue::CLValue(cl_value) => match *cl_value.cl_type() {\n                CLType::I32 => match cl_value.into_t() {\n                    Ok(value) => TransformKindV2::AddInt32(value),\n                    Err(error) => return Ok(AddResult::from(error)),\n                },\n                CLType::U64 => match cl_value.into_t() {\n                    Ok(value) => TransformKindV2::AddUInt64(value),\n                    Err(error) => return Ok(AddResult::from(error)),\n                },\n                CLType::U128 => match cl_value.into_t() {\n                    Ok(value) => TransformKindV2::AddUInt128(value),\n                    Err(error) => return Ok(AddResult::from(error)),\n                },\n                CLType::U256 => match cl_value.into_t() {\n                    Ok(value) => TransformKindV2::AddUInt256(value),\n                    Err(error) => return Ok(AddResult::from(error)),\n                },\n                CLType::U512 => match cl_value.into_t() {\n                    Ok(value) => TransformKindV2::AddUInt512(value),\n                    Err(error) => return Ok(AddResult::from(error)),\n                },\n                _ => {\n                    if *cl_value.cl_type() == casper_types::named_key_type() {\n                        match cl_value.into_t() {\n                            Ok((name, key)) => {\n                                let mut named_keys = NamedKeys::new();\n                                named_keys.insert(name, key);\n                                TransformKindV2::AddKeys(named_keys)\n                            }\n                            Err(error) => return Ok(AddResult::from(error)),\n                        }\n                    } else {\n                        return mismatch();\n                    }\n                }\n            },\n            _ => return mismatch(),\n        };\n\n        match transform_kind.clone().apply(current_value) {\n            Ok(TransformInstruction::Store(new_value)) => {\n                self.cache.insert_write(normalized_key, new_value);\n                self.effects\n                    .push(TransformV2::new(normalized_key, transform_kind));\n                Ok(AddResult::Success)\n            }\n            Ok(TransformInstruction::Prune(key)) => {\n                self.cache.insert_prune(normalized_key);\n                self.effects.push(TransformV2::new(\n                    normalized_key,\n                    TransformKindV2::Prune(key),\n                ));\n                Ok(AddResult::Success)\n            }\n            Err(TransformError::TypeMismatch(type_mismatch)) => {\n                Ok(AddResult::TypeMismatch(type_mismatch))\n            }\n            Err(TransformError::Serialization(error)) => Ok(AddResult::Serialization(error)),\n            Err(transform_error) => Ok(AddResult::Transform(transform_error)),\n        }\n    }\n\n    /// Returns a copy of the messages cached by this instance.\n    pub fn messages(&self) -> Messages {\n        self.messages.clone()\n    }\n\n    /// Calling `query()` avoids calling into `self.cache`, so this will not return any values\n    /// written or mutated in this `TrackingCopy` via previous calls to `write()` or `add()`, since\n    /// these updates are only held in `self.cache`.\n    ///\n    /// The intent is that `query()` is only used to satisfy `QueryRequest`s made to the server.\n    /// Other EE internal use cases should call `read()` or `get()` in order to retrieve cached\n    /// values.\n    pub fn query(\n        &self,\n        base_key: Key,\n        path: &[String],\n    ) -> Result<TrackingCopyQueryResult, TrackingCopyError> {\n        let mut query = Query::new(base_key, path);\n\n        let mut proofs = Vec::new();\n\n        loop {\n            if query.depth >= self.max_query_depth {\n                return Ok(query.into_depth_limit_result());\n            }\n\n            if !query.visited_keys.insert(query.current_key) {\n                return Ok(query.into_circular_ref_result());\n            }\n\n            let stored_value = match self.reader.read_with_proof(&query.current_key)? {\n                None => {\n                    return Ok(query.into_not_found_result(\"Failed to find base key\"));\n                }\n                Some(stored_value) => stored_value,\n            };\n\n            let value = stored_value.value().to_owned();\n\n            // Following code does a patching on the `StoredValue` to get an inner\n            // `DictionaryValue` for dictionaries only.\n            let value = match handle_stored_dictionary_value(query.current_key, value) {\n                Ok(patched_stored_value) => patched_stored_value,\n                Err(error) => {\n                    return Ok(query.into_not_found_result(&format!(\n                        \"Failed to retrieve dictionary value: {}\",\n                        error\n                    )));\n                }\n            };\n\n            proofs.push(stored_value);\n\n            if query.unvisited_names.is_empty() && !query.current_key.is_named_key() {\n                return Ok(TrackingCopyQueryResult::Success { value, proofs });\n            }\n\n            let stored_value: &StoredValue = proofs\n                .last()\n                .map(|r| r.value())\n                .expect(\"but we just pushed\");\n\n            match stored_value {\n                StoredValue::Account(account) => {\n                    let mut maybe_msg_prefix: Option<String> = None;\n                    if let Some(name) = query.next_name() {\n                        if let Some(key) = account.named_keys().get(name) {\n                            query.navigate(*key);\n                        } else {\n                            maybe_msg_prefix = Some(format!(\"Name {} not found in Account\", name));\n                        }\n                    } else {\n                        maybe_msg_prefix = Some(\"All names visited\".to_string());\n                    }\n                    if let Some(msg_prefix) = maybe_msg_prefix {\n                        return Ok(query.into_not_found_result(&msg_prefix));\n                    }\n                }\n                StoredValue::Contract(contract) => {\n                    let mut maybe_msg_prefix: Option<String> = None;\n                    if let Some(name) = query.next_name() {\n                        if let Some(key) = contract.named_keys().get(name) {\n                            query.navigate(*key);\n                        } else {\n                            maybe_msg_prefix = Some(format!(\"Name {} not found in Contract\", name));\n                        }\n                    } else {\n                        maybe_msg_prefix = Some(\"All names visited\".to_string());\n                    }\n                    if let Some(msg_prefix) = maybe_msg_prefix {\n                        return Ok(query.into_not_found_result(&msg_prefix));\n                    }\n                }\n                StoredValue::AddressableEntity(_) => {\n                    let current_key = query.current_key;\n                    let mut maybe_msg_prefix: Option<String> = None;\n                    if let Some(name) = query.next_name() {\n                        if let Key::AddressableEntity(addr) = current_key {\n                            let named_key_addr =\n                                match NamedKeyAddr::new_from_string(addr, name.clone()) {\n                                    Ok(named_key_addr) => Key::NamedKey(named_key_addr),\n                                    Err(error) => {\n                                        let msg_prefix = format!(\"{}\", error);\n                                        return Ok(query.into_not_found_result(&msg_prefix));\n                                    }\n                                };\n                            query.navigate_for_named_key(named_key_addr);\n                        } else {\n                            maybe_msg_prefix = Some(\"Invalid base key\".to_string());\n                        }\n                    } else {\n                        maybe_msg_prefix = Some(\"All names visited\".to_string());\n                    }\n                    if let Some(msg_prefix) = maybe_msg_prefix {\n                        return Ok(query.into_not_found_result(&msg_prefix));\n                    }\n                }\n                StoredValue::NamedKey(named_key_value) => {\n                    match query.visited_names.last() {\n                        Some(expected_name) => match named_key_value.get_name() {\n                            Ok(actual_name) => {\n                                if &actual_name != expected_name {\n                                    return Ok(query.into_not_found_result(\n                                        \"Queried and retrieved names do not match\",\n                                    ));\n                                } else if let Ok(key) = named_key_value.get_key() {\n                                    query.navigate(key)\n                                } else {\n                                    return Ok(query\n                                        .into_not_found_result(\"Failed to parse CLValue as Key\"));\n                                }\n                            }\n                            Err(_) => {\n                                return Ok(query\n                                    .into_not_found_result(\"Failed to parse CLValue as String\"));\n                            }\n                        },\n                        None if path.is_empty() => {\n                            return Ok(TrackingCopyQueryResult::Success { value, proofs });\n                        }\n                        None => return Ok(query.into_not_found_result(\"No visited names\")),\n                    }\n                }\n                StoredValue::CLValue(cl_value) if cl_value.cl_type() == &CLType::Key => {\n                    if let Ok(key) = cl_value.to_owned().into_t::<Key>() {\n                        query.navigate(key);\n                    } else {\n                        return Ok(query.into_not_found_result(\"Failed to parse CLValue as Key\"));\n                    }\n                }\n                StoredValue::CLValue(cl_value) => {\n                    let msg_prefix = format!(\n                        \"Query cannot continue as {:?} is not an account, contract nor key to \\\n                        such.  Value found\",\n                        cl_value\n                    );\n                    return Ok(query.into_not_found_result(&msg_prefix));\n                }\n                StoredValue::ContractWasm(_) => {\n                    return Ok(query.into_not_found_result(\"ContractWasm value found.\"));\n                }\n                StoredValue::ContractPackage(_) => {\n                    return Ok(query.into_not_found_result(\"ContractPackage value found.\"));\n                }\n                StoredValue::SmartContract(_) => {\n                    return Ok(query.into_not_found_result(\"Package value found.\"));\n                }\n                StoredValue::ByteCode(_) => {\n                    return Ok(query.into_not_found_result(\"ByteCode value found.\"));\n                }\n                StoredValue::Transfer(_) => {\n                    return Ok(query.into_not_found_result(\"Legacy Transfer value found.\"));\n                }\n                StoredValue::DeployInfo(_) => {\n                    return Ok(query.into_not_found_result(\"DeployInfo value found.\"));\n                }\n                StoredValue::EraInfo(_) => {\n                    return Ok(query.into_not_found_result(\"EraInfo value found.\"));\n                }\n                StoredValue::Bid(_) => {\n                    return Ok(query.into_not_found_result(\"Bid value found.\"));\n                }\n                StoredValue::BidKind(_) => {\n                    return Ok(query.into_not_found_result(\"BidKind value found.\"));\n                }\n                StoredValue::Withdraw(_) => {\n                    return Ok(query.into_not_found_result(\"WithdrawPurses value found.\"));\n                }\n                StoredValue::Unbonding(_) => {\n                    return Ok(query.into_not_found_result(\"Unbonding value found.\"));\n                }\n                StoredValue::MessageTopic(_) => {\n                    return Ok(query.into_not_found_result(\"MessageTopic value found.\"));\n                }\n                StoredValue::Message(_) => {\n                    return Ok(query.into_not_found_result(\"Message value found.\"));\n                }\n                StoredValue::EntryPoint(_) => {\n                    return Ok(query.into_not_found_result(\"EntryPoint value found.\"));\n                }\n                StoredValue::Prepayment(_) => {\n                    return Ok(query.into_not_found_result(\"Prepayment value found.\"))\n                }\n                StoredValue::RawBytes(_) => {\n                    return Ok(query.into_not_found_result(\"RawBytes value found.\"));\n                }\n            }\n        }\n    }\n}\n\n/// The purpose of this implementation is to allow a \"snapshot\" mechanism for\n/// TrackingCopy. The state of a TrackingCopy (including the effects of\n/// any transforms it has accumulated) can be read using an immutable\n/// reference to that TrackingCopy via this trait implementation. See\n/// `TrackingCopy::fork` for more information.\nimpl<R: StateReader<Key, StoredValue>> StateReader<Key, StoredValue> for &TrackingCopy<R> {\n    type Error = R::Error;\n\n    fn read(&self, key: &Key) -> Result<Option<StoredValue>, Self::Error> {\n        let kb = KeyWithByteRepr::new(*key);\n        if let Some(value) = self.cache.muts_cached.get(&kb) {\n            return Ok(Some(value.to_owned()));\n        }\n        if let Some(value) = self.reader.read(key)? {\n            Ok(Some(value))\n        } else {\n            Ok(None)\n        }\n    }\n\n    fn read_with_proof(\n        &self,\n        key: &Key,\n    ) -> Result<Option<TrieMerkleProof<Key, StoredValue>>, Self::Error> {\n        self.reader.read_with_proof(key)\n    }\n\n    fn keys_with_prefix(&self, byte_prefix: &[u8]) -> Result<Vec<Key>, Self::Error> {\n        let keys = self.reader.keys_with_prefix(byte_prefix)?;\n\n        let ret = keys\n            .into_iter()\n            // don't include keys marked for pruning\n            .filter(|key| !self.cache.is_pruned(key))\n            // there may be newly inserted keys which have not been committed yet\n            .chain(self.cache.get_muts_cached_by_byte_prefix(byte_prefix))\n            .collect();\n        Ok(ret)\n    }\n}\n\n/// Error conditions of a proof validation.\n#[derive(Error, Debug, PartialEq, Eq)]\npub enum ValidationError {\n    /// The path should not have a different length than the proof less one.\n    #[error(\"The path should not have a different length than the proof less one.\")]\n    PathLengthDifferentThanProofLessOne,\n\n    /// The provided key does not match the key in the proof.\n    #[error(\"The provided key does not match the key in the proof.\")]\n    UnexpectedKey,\n\n    /// The provided value does not match the value in the proof.\n    #[error(\"The provided value does not match the value in the proof.\")]\n    UnexpectedValue,\n\n    /// The proof hash is invalid.\n    #[error(\"The proof hash is invalid.\")]\n    InvalidProofHash,\n\n    /// The path went cold.\n    #[error(\"The path went cold.\")]\n    PathCold,\n\n    /// (De)serialization error.\n    #[error(\"Serialization error: {0}\")]\n    BytesRepr(bytesrepr::Error),\n\n    /// Key is not a URef.\n    #[error(\"Key is not a URef\")]\n    KeyIsNotAURef(Key),\n\n    /// Error converting a stored value to a [`Key`].\n    #[error(\"Failed to convert stored value to key\")]\n    ValueToCLValueConversion,\n\n    /// CLValue conversion error.\n    #[error(\"{0}\")]\n    CLValueError(CLValueError),\n}\n\nimpl From<CLValueError> for ValidationError {\n    fn from(err: CLValueError) -> Self {\n        ValidationError::CLValueError(err)\n    }\n}\n\nimpl From<bytesrepr::Error> for ValidationError {\n    fn from(error: bytesrepr::Error) -> Self {\n        Self::BytesRepr(error)\n    }\n}\n\n/// Validates proof of the query.\n///\n/// Returns [`ValidationError`] for any of\npub fn validate_query_proof(\n    hash: &Digest,\n    proofs: &[TrieMerkleProof<Key, StoredValue>],\n    expected_first_key: &Key,\n    path: &[String],\n    expected_value: &StoredValue,\n) -> Result<(), ValidationError> {\n    if proofs.len() != path.len() + 1 {\n        return Err(ValidationError::PathLengthDifferentThanProofLessOne);\n    }\n\n    let mut proofs_iter = proofs.iter();\n    let first_proof = match proofs_iter.next() {\n        Some(proof) => proof,\n        None => {\n            return Err(ValidationError::PathLengthDifferentThanProofLessOne);\n        }\n    };\n\n    let mut path_components_iter = path.iter();\n\n    if first_proof.key() != &expected_first_key.normalize() {\n        return Err(ValidationError::UnexpectedKey);\n    }\n\n    if hash != &compute_state_hash(first_proof)? {\n        return Err(ValidationError::InvalidProofHash);\n    }\n\n    let mut proof_value = first_proof.value();\n\n    for proof in proofs_iter {\n        let named_keys = match proof_value {\n            StoredValue::Account(account) => account.named_keys(),\n            StoredValue::Contract(contract) => contract.named_keys(),\n            _ => return Err(ValidationError::PathCold),\n        };\n\n        let path_component = match path_components_iter.next() {\n            Some(path_component) => path_component,\n            None => return Err(ValidationError::PathCold),\n        };\n\n        let key = match named_keys.get(path_component) {\n            Some(key) => key,\n            None => return Err(ValidationError::PathCold),\n        };\n\n        if proof.key() != &key.normalize() {\n            return Err(ValidationError::UnexpectedKey);\n        }\n\n        if hash != &compute_state_hash(proof)? {\n            return Err(ValidationError::InvalidProofHash);\n        }\n\n        proof_value = proof.value();\n    }\n\n    if proof_value != expected_value {\n        return Err(ValidationError::UnexpectedValue);\n    }\n\n    Ok(())\n}\n\n/// Validates proof of the query.\n///\n/// Returns [`ValidationError`] for any of\npub fn validate_query_merkle_proof(\n    hash: &Digest,\n    proofs: &[TrieMerkleProof<Key, StoredValue>],\n    expected_key_trace: &[Key],\n    expected_value: &StoredValue,\n) -> Result<(), ValidationError> {\n    let expected_len = expected_key_trace.len();\n    if proofs.len() != expected_len {\n        return Err(ValidationError::PathLengthDifferentThanProofLessOne);\n    }\n\n    let proof_keys: Vec<Key> = proofs.iter().map(|proof| *proof.key()).collect();\n\n    if !expected_key_trace.eq(&proof_keys) {\n        return Err(ValidationError::UnexpectedKey);\n    }\n\n    if expected_value != proofs[expected_len - 1].value() {\n        return Err(ValidationError::UnexpectedValue);\n    }\n\n    let mut proofs_iter = proofs.iter();\n\n    let first_proof = match proofs_iter.next() {\n        Some(proof) => proof,\n        None => return Err(ValidationError::PathLengthDifferentThanProofLessOne),\n    };\n\n    if hash != &compute_state_hash(first_proof)? {\n        return Err(ValidationError::InvalidProofHash);\n    }\n\n    Ok(())\n}\n\n/// Validates a proof of a balance request.\npub fn validate_balance_proof(\n    hash: &Digest,\n    balance_proof: &TrieMerkleProof<Key, StoredValue>,\n    expected_purse_key: Key,\n    expected_motes: &U512,\n) -> Result<(), ValidationError> {\n    let expected_balance_key = expected_purse_key\n        .into_uref()\n        .map(|uref| Key::Balance(uref.addr()))\n        .ok_or_else(|| ValidationError::KeyIsNotAURef(expected_purse_key.to_owned()))?;\n\n    if balance_proof.key() != &expected_balance_key.normalize() {\n        return Err(ValidationError::UnexpectedKey);\n    }\n\n    if hash != &compute_state_hash(balance_proof)? {\n        return Err(ValidationError::InvalidProofHash);\n    }\n\n    let balance_proof_stored_value = balance_proof.value().to_owned();\n\n    let balance_proof_clvalue: CLValue = balance_proof_stored_value\n        .try_into()\n        .map_err(|_| ValidationError::ValueToCLValueConversion)?;\n\n    let balance_motes: U512 = balance_proof_clvalue.into_t()?;\n\n    if expected_motes != &balance_motes {\n        return Err(ValidationError::UnexpectedValue);\n    }\n\n    Ok(())\n}\n\nuse crate::global_state::{\n    error::Error,\n    state::{\n        lmdb::{make_temporary_global_state, LmdbGlobalStateView},\n        StateProvider,\n    },\n};\nuse tempfile::TempDir;\n\n/// Creates a temp global state with initial state and checks out a tracking copy on it.\npub fn new_temporary_tracking_copy(\n    initial_data: impl IntoIterator<Item = (Key, StoredValue)>,\n    max_query_depth: Option<u64>,\n    enable_addressable_entity: bool,\n) -> (TrackingCopy<LmdbGlobalStateView>, TempDir) {\n    let (global_state, state_root_hash, tempdir) = make_temporary_global_state(initial_data);\n\n    let reader = global_state\n        .checkout(state_root_hash)\n        .expect(\"Checkout should not throw errors.\")\n        .expect(\"Root hash should exist.\");\n\n    let query_depth = max_query_depth.unwrap_or(DEFAULT_MAX_QUERY_DEPTH);\n\n    (\n        TrackingCopy::new(reader, query_depth, enable_addressable_entity),\n        tempdir,\n    )\n}\n"
  },
  {
    "path": "storage/src/tracking_copy/tests.rs",
    "content": "use std::{\n    collections::BTreeSet,\n    iter::FromIterator,\n    sync::{Arc, RwLock},\n};\n\nuse assert_matches::assert_matches;\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{\n        ActionThresholds, AddressableEntityHash, AssociatedKeys, NamedKeyAddr, NamedKeyValue,\n        Weight,\n    },\n    contract_messages::Messages,\n    contracts::{EntryPoints as ContractEntryPoints, NamedKeys},\n    execution::{Effects, TransformKindV2, TransformV2},\n    gens::*,\n    global_state::TrieMerkleProof,\n    handle_stored_dictionary_value, AccessRights, AddressableEntity, ByteCodeHash, CLValue,\n    CLValueDictionary, CLValueError, ContractRuntimeTag, EntityAddr, EntityKind, HashAddr, Key,\n    KeyTag, PackageHash, ProtocolVersion, StoredValue, URef, U256, U512, UREF_ADDR_LENGTH,\n};\n\nuse super::{\n    meter::count_meter::Count, CacheEntry, GenericTrackingCopyCache, TrackingCopyError,\n    TrackingCopyQueryResult,\n};\nuse crate::{\n    global_state::state::{self, StateProvider, StateReader},\n    tracking_copy::{self, TrackingCopy},\n};\n\nuse crate::global_state::{DEFAULT_ENABLE_ENTITY, DEFAULT_MAX_QUERY_DEPTH};\nuse casper_types::contracts::ContractHash;\nuse proptest::proptest;\n\nstruct CountingDb {\n    count: Arc<RwLock<i32>>,\n    value: Option<StoredValue>,\n}\n\nimpl CountingDb {\n    fn new(counter: Arc<RwLock<i32>>) -> CountingDb {\n        CountingDb {\n            count: counter,\n            value: None,\n        }\n    }\n}\n\nimpl StateReader<Key, StoredValue> for CountingDb {\n    type Error = crate::global_state::error::Error;\n    fn read(&self, _key: &Key) -> Result<Option<StoredValue>, Self::Error> {\n        let count = *self.count.read().unwrap();\n        let value = match self.value {\n            Some(ref v) => v.clone(),\n            None => StoredValue::CLValue(CLValue::from_t(count).unwrap()),\n        };\n        *self.count.write().unwrap() = count + 1;\n        Ok(Some(value))\n    }\n\n    fn read_with_proof(\n        &self,\n        _key: &Key,\n    ) -> Result<Option<TrieMerkleProof<Key, StoredValue>>, Self::Error> {\n        Ok(None)\n    }\n\n    fn keys_with_prefix(&self, _prefix: &[u8]) -> Result<Vec<Key>, Self::Error> {\n        Ok(Vec::new())\n    }\n}\n\nfn effects(transform_keys_and_kinds: Vec<(Key, TransformKindV2)>) -> Effects {\n    let mut effects = Effects::new();\n    for (key, kind) in transform_keys_and_kinds {\n        effects.push(TransformV2::new(key, kind));\n    }\n    effects\n}\n\n#[test]\nfn tracking_copy_new() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(counter);\n    let tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    assert!(tc.effects.is_empty());\n}\n\n#[test]\nfn tracking_copy_caching() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(Arc::clone(&counter));\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    let zero = StoredValue::CLValue(CLValue::from_t(0_i32).unwrap());\n    // first read\n    let value = tc.read(&k).unwrap().unwrap();\n    assert_eq!(value, zero);\n\n    // second read; should use cache instead\n    // of going back to the DB\n    let value = tc.read(&k).unwrap().unwrap();\n    let db_value = *counter.read().unwrap();\n    assert_eq!(value, zero);\n    assert_eq!(db_value, 1);\n}\n\n#[test]\nfn tracking_copy_read() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(Arc::clone(&counter));\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    let zero = StoredValue::CLValue(CLValue::from_t(0_i32).unwrap());\n    let value = tc.read(&k).unwrap().unwrap();\n    // value read correctly\n    assert_eq!(value, zero);\n    // Reading does produce an identity transform.\n    assert_eq!(tc.effects, effects(vec![(k, TransformKindV2::Identity)]));\n}\n\n#[test]\nfn tracking_copy_write() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(Arc::clone(&counter));\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    let one = StoredValue::CLValue(CLValue::from_t(1_i32).unwrap());\n    let two = StoredValue::CLValue(CLValue::from_t(2_i32).unwrap());\n\n    // writing should work\n    tc.write(k, one.clone());\n    // write does not need to query the DB\n    let db_value = *counter.read().unwrap();\n    assert_eq!(db_value, 0);\n    // Writing creates a write transform.\n    assert_eq!(\n        tc.effects,\n        effects(vec![(k, TransformKindV2::Write(one.clone()))])\n    );\n\n    // writing again should update the values\n    tc.write(k, two.clone());\n    let db_value = *counter.read().unwrap();\n    assert_eq!(db_value, 0);\n    assert_eq!(\n        tc.effects,\n        effects(vec![\n            (k, TransformKindV2::Write(one)),\n            (k, TransformKindV2::Write(two)),\n        ])\n    );\n}\n\n#[test]\nfn tracking_copy_add_i32() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(counter);\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    let three = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap());\n\n    // adding should work\n    let add = tc.add(k, three.clone());\n    assert_matches!(add, Ok(_));\n\n    // Adding creates an add transform.\n    assert_eq!(tc.effects, effects(vec![(k, TransformKindV2::AddInt32(3))]));\n\n    // adding again should update the values\n    let add = tc.add(k, three);\n    assert_matches!(add, Ok(_));\n    assert_eq!(\n        tc.effects,\n        effects(vec![(k, TransformKindV2::AddInt32(3)); 2])\n    );\n}\n\n#[test]\nfn tracking_copy_rw() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(counter);\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    // reading then writing should update the op\n    let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap());\n    let _ = tc.read(&k);\n    tc.write(k, value.clone());\n    assert_eq!(\n        tc.effects,\n        effects(vec![\n            (k, TransformKindV2::Identity),\n            (k, TransformKindV2::Write(value)),\n        ])\n    );\n}\n\n#[test]\nfn tracking_copy_ra() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(counter);\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    // reading then adding should update the op\n    let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap());\n    let _ = tc.read(&k);\n    let _ = tc.add(k, value);\n    assert_eq!(\n        tc.effects,\n        effects(vec![\n            (k, TransformKindV2::Identity),\n            (k, TransformKindV2::AddInt32(3)),\n        ])\n    );\n}\n\n#[test]\nfn tracking_copy_aw() {\n    let counter = Arc::new(RwLock::new(0));\n    let db = CountingDb::new(counter);\n    let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let k = Key::Hash([0u8; 32]);\n\n    // adding then writing should update the op\n    let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap());\n    let write_value = StoredValue::CLValue(CLValue::from_t(7_i32).unwrap());\n    let _ = tc.add(k, value);\n    tc.write(k, write_value.clone());\n    assert_eq!(\n        tc.effects,\n        effects(vec![\n            (k, TransformKindV2::AddInt32(3)),\n            (k, TransformKindV2::Write(write_value)),\n        ])\n    );\n}\n\n#[test]\nfn should_return_value_not_found() {\n    let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([]);\n    let view = gs.checkout(root_hash).unwrap().unwrap();\n\n    let missing_key = Key::Dictionary([2u8; 32]);\n    let empty_path = Vec::new();\n    let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let result = tc.query(missing_key, &empty_path);\n    assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_)));\n}\n\n#[test]\nfn should_find_existing_entry() {\n    let foo_key = Key::URef(URef::default());\n    let foo_val = CLValue::from_t(\"test\").expect(\"should get cl_value from string\");\n    let stored_val = StoredValue::CLValue(foo_val);\n\n    // seed gs w/ entry as a testing convenience\n    let (gs, root_hash, _tempdir) =\n        state::lmdb::make_temporary_global_state([(foo_key, stored_val.clone())]);\n\n    let view = gs.checkout(root_hash).unwrap().unwrap();\n    let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let empty_path = Vec::new();\n    let query_result = tc.query(foo_key, &empty_path);\n    if let Ok(TrackingCopyQueryResult::Success { value, .. }) = query_result {\n        assert_eq!(stored_val, value);\n    } else {\n        panic!(\"Query failed when it should not have!\");\n    }\n}\n\n#[test]\nfn should_query_empty_path() {\n    let dictionary_key = Key::Dictionary([1u8; 32]);\n    let cl_value = CLValue::from_t(\"test\").expect(\"should get cl_value from string\");\n    let seed_uref = URef::default();\n    let dictionary_item_key_bytes = \"dict_name\".as_bytes();\n    let dictionary_value = CLValueDictionary::new(\n        cl_value,\n        seed_uref.addr().to_vec(),\n        dictionary_item_key_bytes.to_vec(),\n    );\n    let stored_value = StoredValue::CLValue(\n        CLValue::from_t(dictionary_value).expect(\"should get cl_value from dictionary_value\"),\n    );\n\n    // seed gs w/ entry as a testing convenience\n    let (gs, root_hash, _tempdir) =\n        state::lmdb::make_temporary_global_state([(dictionary_key, stored_value.clone())]);\n\n    let view = gs.checkout(root_hash).unwrap().unwrap();\n    let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let empty_path = Vec::new();\n    let query_result = tc.query(dictionary_key, &empty_path);\n    let dictionary_stored_value = handle_stored_dictionary_value(dictionary_key, stored_value)\n        .expect(\"should get dictionary stored value\");\n    if let Ok(TrackingCopyQueryResult::Success { value, .. }) = query_result {\n        assert_eq!(dictionary_stored_value, value);\n    } else {\n        panic!(\"Query failed when it should not have!\");\n    }\n}\n\n#[test]\nfn should_traverse_contract_pathing() {\n    let account_hash = AccountHash::new([0u8; 32]);\n    let account_key = Key::Account(account_hash);\n    let account =\n        casper_types::account::Account::create(account_hash, NamedKeys::default(), URef::default());\n    let stored_account = StoredValue::Account(account);\n\n    let account_alias = \"account_alias\".to_string();\n    let contract_named_keys = {\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(account_alias.clone(), account_key);\n        named_keys\n    };\n    let contract = casper_types::contracts::Contract::new(\n        [2; 32].into(),\n        [3; 32].into(),\n        contract_named_keys,\n        ContractEntryPoints::new(),\n        ProtocolVersion::V1_0_0,\n    );\n    let contract_hash = ContractHash::default();\n    let contract_key = Key::Hash(contract_hash.value());\n    let stored_contract = StoredValue::Contract(contract);\n\n    let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n        (account_key, stored_account.clone()),\n        (contract_key, stored_contract),\n    ]);\n    let view = gs.checkout(root_hash).unwrap().unwrap();\n    let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let path = vec![account_alias];\n    if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(contract_key, &path) {\n        assert_eq!(value, stored_account, \"should find expected account\");\n    } else {\n        panic!(\"Query failed when it should not have!\");\n    }\n}\n\n#[test]\nfn should_traverse_account_pathing() {\n    let contract = casper_types::contracts::Contract::new(\n        [2; 32].into(),\n        [3; 32].into(),\n        NamedKeys::default(),\n        ContractEntryPoints::new(),\n        ProtocolVersion::V1_0_0,\n    );\n    let contract_hash = ContractHash::default();\n    let contract_key = Key::Hash(contract_hash.value());\n    let stored_contract = StoredValue::Contract(contract);\n\n    let account_hash = AccountHash::new([0u8; 32]);\n    let account_key = Key::Account(account_hash);\n    let contract_alias = \"contract_alias\".to_string();\n    let account_named_keys = {\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(contract_alias.clone(), contract_key);\n        named_keys\n    };\n    let account =\n        casper_types::account::Account::create(account_hash, account_named_keys, URef::default());\n    let stored_account = StoredValue::Account(account);\n\n    let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n        (account_key, stored_account),\n        (contract_key, stored_contract.clone()),\n    ]);\n    let view = gs.checkout(root_hash).unwrap().unwrap();\n    let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    let path = vec![contract_alias];\n    if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(account_key, &path) {\n        assert_eq!(value, stored_contract, \"should find expected contract\");\n    } else {\n        panic!(\"Query failed when it should not have!\");\n    }\n}\n\n#[test]\nfn should_traverse_all_paths() {\n    let contract_hash = ContractHash::default();\n    let contract_key = Key::Hash(contract_hash.value());\n    let contract_alias = \"contract_alias\".to_string();\n    let account_hash = AccountHash::new([0u8; 32]);\n    let account_key = Key::Account(account_hash);\n    let account_alias = \"account_alias\".to_string();\n\n    let some_inner = \"test\";\n    let (misc_uref_key, misc_stored_value) = {\n        (\n            Key::URef(URef::new([4u8; UREF_ADDR_LENGTH], AccessRights::all())),\n            StoredValue::CLValue(\n                CLValue::from_t(some_inner).expect(\"should get cl_value from string\"),\n            ),\n        )\n    };\n    let misc_alias = \"some_alias\".to_string();\n\n    let stored_contract = {\n        let contract_named_keys = {\n            let mut named_keys = NamedKeys::new();\n            named_keys.insert(account_alias.clone(), account_key);\n            named_keys.insert(misc_alias.clone(), misc_uref_key);\n            named_keys\n        };\n        let contract = casper_types::contracts::Contract::new(\n            [2; 32].into(),\n            [3; 32].into(),\n            contract_named_keys,\n            ContractEntryPoints::new(),\n            ProtocolVersion::V1_0_0,\n        );\n        StoredValue::Contract(contract)\n    };\n\n    let stored_account = {\n        let account_named_keys = {\n            let mut named_keys = NamedKeys::new();\n            named_keys.insert(contract_alias.clone(), contract_key);\n            named_keys.insert(misc_alias.clone(), misc_uref_key);\n            named_keys\n        };\n        let account = casper_types::account::Account::create(\n            account_hash,\n            account_named_keys,\n            URef::default(),\n        );\n        StoredValue::Account(account)\n    };\n\n    let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n        (account_key, stored_account.clone()),\n        (contract_key, stored_contract.clone()),\n        (misc_uref_key, misc_stored_value.clone()),\n    ]);\n    let view = gs.checkout(root_hash).unwrap().unwrap();\n    let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    fn unpack(\n        result: Result<TrackingCopyQueryResult, TrackingCopyError>,\n        err_msg: String,\n    ) -> StoredValue {\n        if let Ok(TrackingCopyQueryResult::Success { value, .. }) = result {\n            value\n        } else {\n            panic!(\"{}\", err_msg);\n        }\n    }\n\n    let expected_contract = unpack(\n        tc.query(account_key, &[contract_alias.clone()]),\n        \"contract should exist\".to_string(),\n    );\n    assert_eq!(\n        expected_contract, stored_contract,\n        \"unexpected stored value\"\n    );\n\n    // from account, traverse to contract then to misc val\n    let expected_account_contract_misc = unpack(\n        tc.query(\n            account_key,\n            &[contract_alias, misc_alias.clone()], // <-- path magic here\n        ),\n        \"misc value should exist via account to contract\".to_string(),\n    );\n    assert_eq!(\n        expected_account_contract_misc, misc_stored_value,\n        \"unexpected stored value\"\n    );\n\n    let expected_account = unpack(\n        tc.query(contract_key, &[account_alias.clone()]),\n        \"account should exist\".to_string(),\n    );\n    assert_eq!(expected_account, stored_account, \"unexpected stored value\");\n\n    // from contract, traverse to account then to misc val\n    let expected_contract_account_misc = unpack(\n        tc.query(\n            contract_key,\n            &[account_alias, misc_alias.clone()], // <-- path magic here\n        ),\n        \"misc value should exist via contract to account\".to_string(),\n    );\n    assert_eq!(\n        expected_contract_account_misc, misc_stored_value,\n        \"unexpected stored value\"\n    );\n\n    let expected_value = unpack(\n        tc.query(misc_uref_key, &[]),\n        \"misc value should exist\".to_string(),\n    );\n    assert_eq!(expected_value, misc_stored_value, \"unexpected stored value\");\n\n    let expected_account_misc = unpack(\n        tc.query(account_key, &[misc_alias.clone()]),\n        \"misc value should exist via account\".to_string(),\n    );\n    assert_eq!(\n        expected_account_misc, misc_stored_value,\n        \"unexpected stored value\"\n    );\n\n    let expected_contract_misc = unpack(\n        tc.query(contract_key, &[misc_alias]),\n        \"misc value should exist via contract\".to_string(),\n    );\n    assert_eq!(\n        expected_contract_misc, misc_stored_value,\n        \"unexpected stored value\"\n    );\n}\n\nfn handle_stored_value_into(\n    key: Key,\n    stored_value: StoredValue,\n) -> Result<StoredValue, CLValueError> {\n    match (key, stored_value) {\n        (Key::Dictionary(_), StoredValue::CLValue(cl_value)) => {\n            let wrapped_dictionary_value =\n                CLValueDictionary::new(cl_value, vec![0; 32], vec![255; 32]);\n            let wrapped_cl_value = CLValue::from_t(wrapped_dictionary_value)?;\n            Ok(StoredValue::CLValue(wrapped_cl_value))\n        }\n        (_, stored_value) => Ok(stored_value),\n    }\n}\n\nproptest! {\n    #[test]\n    fn query_contract_state(\n        k in key_arb(), // key state is stored at\n        v in stored_value_arb(), // value in contract state\n        name in \"\\\\PC*\", // human-readable name for state\n        missing_name in \"\\\\PC*\",\n        hash in u8_slice_32(), // hash for contract key\n    ) {\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(name.clone(), k);\n        let contract =\n            StoredValue::AddressableEntity(AddressableEntity::new(\n            [2; 32].into(),\n            [3; 32].into(),\n            ProtocolVersion::V1_0_0,\n            URef::default(),\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1)\n        ));\n        let contract_key = Key::AddressableEntity(EntityAddr::SmartContract(hash));\n\n        let value = handle_stored_value_into(k, v.clone()).unwrap();\n\n        let named_key = Key::NamedKey( NamedKeyAddr::new_from_string(EntityAddr::SmartContract(hash), name.clone()).unwrap());\n        let named_value = StoredValue::NamedKey(NamedKeyValue::from_concrete_values(k, name.clone()).unwrap());\n\n        let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(\n            [(k, value), (named_key, named_value) ,(contract_key, contract)]\n        );\n        let view = gs.checkout(root_hash).unwrap().unwrap();\n        let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n        let path = vec!(name.clone());\n        if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query( contract_key, &path) {\n            assert_eq!(v, value);\n        } else {\n            panic!(\"Query failed when it should not have!\");\n        }\n\n        if missing_name != name {\n            let result = tc.query(contract_key, &[missing_name]);\n            assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_)));\n        }\n    }\n\n    #[test]\n    fn query_account_state(\n        k in key_arb(), // key state is stored at\n        v in stored_value_arb(), // value in account state\n        name in \"\\\\PC*\", // human-readable name for state\n        missing_name in \"\\\\PC*\",\n        pk in account_hash_arb(), // account hash\n        address in account_hash_arb(), // address for account hash\n    ) {\n        let purse = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE);\n        let associated_keys = AssociatedKeys::new(pk, Weight::new(1));\n        let entity = AddressableEntity::new(\n            PackageHash::new([1u8;32]),\n            ByteCodeHash::default(),\n            ProtocolVersion::V1_0_0,\n            purse,\n            associated_keys,\n            ActionThresholds::default(),\n            EntityKind::Account(address)\n        );\n\n        let account_key = Key::AddressableEntity(EntityAddr::Account([9;32]));\n        let value = handle_stored_value_into(k, v.clone()).unwrap();\n\n        let named_key = Key::NamedKey( NamedKeyAddr::new_from_string(EntityAddr::Account([9;32]), name.clone()).unwrap());\n        let named_value = StoredValue::NamedKey(NamedKeyValue::from_concrete_values(k, name.clone()).unwrap());\n\n        let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(\n            [(k, value), (named_key, named_value),(account_key, entity.into())],\n        );\n        let view = gs.checkout(root_hash).unwrap().unwrap();\n        let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n        let path = vec!(name.clone());\n        if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(account_key, &path) {\n            assert_eq!(v, value);\n        } else {\n            panic!(\"Query failed when it should not have!\");\n        }\n\n        if missing_name != name {\n            let result = tc.query( account_key, &[missing_name]);\n            assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_)));\n        }\n    }\n\n    #[test]\n    fn query_path(\n        k in key_arb(), // key state is stored at\n        v in stored_value_arb(), // value in contract state\n        state_name in \"\\\\PC*\", // human-readable name for state\n        _pk in account_hash_arb(), // account hash\n        hash in u8_slice_32(), // hash for contract key\n    ) {\n        // create contract which knows about value\n        let mut contract_named_keys = NamedKeys::new();\n        contract_named_keys.insert(state_name.clone(), k);\n        let contract =\n            StoredValue::AddressableEntity(AddressableEntity::new(\n            [2; 32].into(),\n            [3; 32].into(),\n            ProtocolVersion::V1_0_0,\n            URef::default(),\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1)\n        ));\n        let contract_key = Key::AddressableEntity(EntityAddr::SmartContract(hash));\n        let contract_named_key = NamedKeyAddr::new_from_string(EntityAddr::SmartContract(hash), state_name.clone())\n         .unwrap();\n\n        let contract_value = NamedKeyValue::from_concrete_values(k, state_name.clone()).unwrap();\n\n        let value = handle_stored_value_into(k, v.clone()).unwrap();\n\n        let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n            (k, value),\n            (contract_key, contract),\n            (Key::NamedKey(contract_named_key), StoredValue::NamedKey(contract_value))\n        ]);\n        let view = gs.checkout(root_hash).unwrap().unwrap();\n        let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n        let path = vec!(state_name);\n\n        let results =  tc.query( contract_key, &path);\n        if let Ok(TrackingCopyQueryResult::Success { value, .. }) = results {\n            assert_eq!(v, value);\n        } else {\n            panic!(\"Query failed when it should not have!\");\n        }\n    }\n}\n\n#[test]\nfn cache_reads_invalidation() {\n    let mut tc_cache = GenericTrackingCopyCache::new(2, Count);\n    let (k1, v1) = (\n        Key::Hash([1u8; 32]),\n        StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n    );\n    let (k2, v2) = (\n        Key::Hash([2u8; 32]),\n        StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()),\n    );\n    let (k3, v3) = (\n        Key::Hash([3u8; 32]),\n        StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()),\n    );\n    tc_cache.insert_read(k1, v1);\n    tc_cache.insert_read(k2, v2.clone());\n    tc_cache.insert_read(k3, v3.clone());\n    assert_eq!(tc_cache.get(&k1), CacheEntry::NotFound); // first entry should be invalidated\n    assert_eq!(tc_cache.get(&k2), CacheEntry::Exists(&v2)); // k2 and k3 should be there\n    assert_eq!(tc_cache.get(&k3), CacheEntry::Exists(&v3));\n}\n\n#[test]\nfn cache_writes_not_invalidated() {\n    let mut tc_cache = GenericTrackingCopyCache::new(2, Count);\n    let (k1, v1) = (\n        Key::Hash([1u8; 32]),\n        StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()),\n    );\n    let (k2, v2) = (\n        Key::Hash([2u8; 32]),\n        StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()),\n    );\n    let (k3, v3) = (\n        Key::Hash([3u8; 32]),\n        StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()),\n    );\n    tc_cache.insert_write(k1, v1.clone());\n    tc_cache.insert_read(k2, v2.clone());\n    tc_cache.insert_read(k3, v3.clone());\n    // Writes are not subject to cache invalidation\n    assert_eq!(tc_cache.get(&k1), CacheEntry::Exists(&v1));\n    assert_eq!(tc_cache.get(&k2), CacheEntry::Exists(&v2)); // k2 and k3 should be there\n    assert_eq!(tc_cache.get(&k3), CacheEntry::Exists(&v3));\n}\n\n#[test]\nfn query_for_circular_references_should_fail() {\n    // create self-referential key\n    let cl_value_key = Key::URef(URef::new([255; 32], AccessRights::READ));\n    let cl_value = StoredValue::CLValue(CLValue::from_t(cl_value_key).unwrap());\n    let key_name = \"key\".to_string();\n\n    // create contract with this self-referential key in its named keys, and also a key referring to\n    // itself in its named keys.\n    let contract_key = Key::AddressableEntity(EntityAddr::SmartContract([1; 32]));\n    let contract_name = \"contract\".to_string();\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(key_name.clone(), cl_value_key);\n    named_keys.insert(contract_name.clone(), contract_key);\n    let contract = StoredValue::AddressableEntity(AddressableEntity::new(\n        [2; 32].into(),\n        [3; 32].into(),\n        ProtocolVersion::V1_0_0,\n        URef::default(),\n        AssociatedKeys::default(),\n        ActionThresholds::default(),\n        EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n    ));\n\n    let name_key_cl_value = Key::NamedKey(\n        NamedKeyAddr::new_from_string(EntityAddr::SmartContract([1; 32]), \"key\".to_string())\n            .unwrap(),\n    );\n    let key_value = StoredValue::NamedKey(\n        NamedKeyValue::from_concrete_values(cl_value_key, \"key\".to_string()).unwrap(),\n    );\n\n    let name_key_contract = Key::NamedKey(\n        NamedKeyAddr::new_from_string(EntityAddr::SmartContract([1; 32]), \"contract\".to_string())\n            .unwrap(),\n    );\n    let key_value_contract = StoredValue::NamedKey(\n        NamedKeyValue::from_concrete_values(contract_key, \"contract\".to_string()).unwrap(),\n    );\n\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n        (cl_value_key, cl_value),\n        (contract_key, contract),\n        (name_key_cl_value, key_value),\n        (name_key_contract, key_value_contract),\n    ]);\n    let view = global_state.checkout(root_hash).unwrap().unwrap();\n    let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    // query for the self-referential key (second path element of arbitrary value required to cause\n    // iteration _into_ the self-referential key)\n    let path = vec![key_name, String::new()];\n    if let Ok(TrackingCopyQueryResult::CircularReference(msg)) =\n        tracking_copy.query(contract_key, &path)\n    {\n        let expected_path_msg = format!(\"at path: {:?}/{}\", contract_key, path[0]);\n        assert!(msg.contains(&expected_path_msg));\n    } else {\n        panic!(\"Query didn't fail with a circular reference error\");\n    }\n\n    // query for itself in its own named keys\n    let path = vec![contract_name];\n    if let Ok(TrackingCopyQueryResult::CircularReference(msg)) =\n        tracking_copy.query(contract_key, &path)\n    {\n        let expected_path_msg = format!(\"at path: {:?}/{}\", contract_key, path[0]);\n        assert!(msg.contains(&expected_path_msg));\n    } else {\n        panic!(\"Query didn't fail with a circular reference error\");\n    }\n}\n\n#[test]\nfn validate_query_proof_should_work() {\n    let a_e_key = Key::AddressableEntity(EntityAddr::Account([30; 32]));\n    let a_e = StoredValue::AddressableEntity(AddressableEntity::new(\n        PackageHash::new([20; 32]),\n        ByteCodeHash::default(),\n        ProtocolVersion::V1_0_0,\n        URef::default(),\n        AssociatedKeys::new(AccountHash::new([3; 32]), Weight::new(1)),\n        ActionThresholds::default(),\n        EntityKind::Account(AccountHash::new([3; 32])),\n    ));\n\n    let c_e_key = Key::AddressableEntity(EntityAddr::SmartContract([5; 32]));\n    let c_e = StoredValue::AddressableEntity(AddressableEntity::new(\n        [2; 32].into(),\n        [3; 32].into(),\n        ProtocolVersion::V1_0_0,\n        URef::default(),\n        AssociatedKeys::default(),\n        ActionThresholds::default(),\n        EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n    ));\n\n    let c_nk = \"abc\".to_string();\n\n    let (nk, nkv) = {\n        let entity_addr = if let Key::AddressableEntity(addr) = a_e_key {\n            addr\n        } else {\n            panic!(\"unexpected key variant\");\n        };\n        let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, c_nk.clone())\n            .expect(\"must create named key entry\");\n        (\n            Key::NamedKey(named_key_addr),\n            StoredValue::NamedKey(\n                NamedKeyValue::from_concrete_values(c_e_key, c_nk.clone()).unwrap(),\n            ),\n        )\n    };\n\n    let initial_data = vec![(a_e_key, a_e), (c_e_key, c_e.clone()), (nk, nkv)];\n\n    // persist them\n    let (global_state, root_hash, _tempdir) =\n        state::lmdb::make_temporary_global_state(initial_data);\n\n    let view = global_state\n        .checkout(root_hash)\n        .expect(\"should checkout\")\n        .expect(\"should have view\");\n\n    let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    let path = &[c_nk];\n\n    let result = tracking_copy.query(a_e_key, path).expect(\"should query\");\n\n    let proofs = if let TrackingCopyQueryResult::Success { proofs, .. } = result {\n        proofs\n    } else {\n        panic!(\"query was not successful: {:?}\", result)\n    };\n\n    let expected_key_trace = &[a_e_key, nk, c_e_key];\n\n    // Happy path\n    tracking_copy::validate_query_merkle_proof(&root_hash, &proofs, expected_key_trace, &c_e)\n        .expect(\"should validate\");\n}\n\n#[test]\nfn get_keys_should_return_keys_in_the_account_keyspace() {\n    // account 1\n    let account_1_hash = AccountHash::new([1; 32]);\n\n    let account_cl_value = CLValue::from_t(AddressableEntityHash::new([20; 32])).unwrap();\n    let account_1_value = StoredValue::CLValue(account_cl_value);\n    let account_1_key = Key::Account(account_1_hash);\n\n    // account 2\n    let account_2_hash = AccountHash::new([2; 32]);\n\n    let fake_account_cl_value = CLValue::from_t(AddressableEntityHash::new([21; 32])).unwrap();\n    let account_2_value = StoredValue::CLValue(fake_account_cl_value);\n    let account_2_key = Key::Account(account_2_hash);\n\n    // random value\n    let cl_value = CLValue::from_t(U512::zero()).expect(\"should convert\");\n    let uref_value = StoredValue::CLValue(cl_value);\n    let uref_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE));\n\n    // persist them\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n        (account_1_key, account_1_value),\n        (account_2_key, account_2_value),\n        (uref_key, uref_value),\n    ]);\n\n    let view = global_state\n        .checkout(root_hash)\n        .expect(\"should checkout\")\n        .expect(\"should have view\");\n\n    let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::Account).unwrap();\n\n    assert_eq!(key_set.len(), 2);\n    assert!(key_set.contains(&account_1_key));\n    assert!(key_set.contains(&account_2_key));\n    assert!(!key_set.contains(&uref_key));\n}\n\n#[test]\nfn get_keys_should_return_keys_in_the_uref_keyspace() {\n    // account\n    let account_hash = AccountHash::new([1; 32]);\n\n    let account_cl_value = CLValue::from_t(AddressableEntityHash::new([20; 32])).unwrap();\n    let account_value = StoredValue::CLValue(account_cl_value);\n    let account_key = Key::Account(account_hash);\n\n    // random value 1\n    let cl_value = CLValue::from_t(U512::zero()).expect(\"should convert\");\n    let uref_1_value = StoredValue::CLValue(cl_value);\n    let uref_1_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE));\n\n    // random value 2\n    let cl_value = CLValue::from_t(U512::one()).expect(\"should convert\");\n    let uref_2_value = StoredValue::CLValue(cl_value);\n    let uref_2_key = Key::URef(URef::new([9; 32], AccessRights::READ_ADD_WRITE));\n\n    // persist them\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([\n        (account_key, account_value),\n        (uref_1_key, uref_1_value),\n        (uref_2_key, uref_2_value),\n    ]);\n\n    let view = global_state\n        .checkout(root_hash)\n        .expect(\"should checkout\")\n        .expect(\"should have view\");\n\n    let mut tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap();\n\n    assert_eq!(key_set.len(), 2);\n    assert!(key_set.contains(&uref_1_key.normalize()));\n    assert!(key_set.contains(&uref_2_key.normalize()));\n    assert!(!key_set.contains(&account_key));\n\n    // random value 3\n    let cl_value = CLValue::from_t(U512::from(2)).expect(\"should convert\");\n    let uref_3_value = StoredValue::CLValue(cl_value);\n    let uref_3_key = Key::URef(URef::new([10; 32], AccessRights::READ_ADD_WRITE));\n    tracking_copy.write(uref_3_key, uref_3_value);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap();\n\n    assert_eq!(key_set.len(), 3);\n    assert!(key_set.contains(&uref_1_key.normalize()));\n    assert!(key_set.contains(&uref_2_key.normalize()));\n    assert!(key_set.contains(&uref_3_key.normalize()));\n    assert!(!key_set.contains(&account_key));\n}\n\n#[test]\nfn get_keys_should_handle_reads_from_empty_trie() {\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([]);\n\n    let view = global_state\n        .checkout(root_hash)\n        .expect(\"should checkout\")\n        .expect(\"should have view\");\n\n    let mut tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap();\n\n    assert_eq!(key_set.len(), 0);\n    assert!(key_set.is_empty());\n\n    // persist random value 1\n    let cl_value = CLValue::from_t(U512::zero()).expect(\"should convert\");\n    let uref_1_value = StoredValue::CLValue(cl_value);\n    let uref_1_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE));\n    tracking_copy.write(uref_1_key, uref_1_value);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap();\n\n    assert_eq!(key_set.len(), 1);\n    assert!(key_set.contains(&uref_1_key.normalize()));\n\n    // persist random value 2\n    let cl_value = CLValue::from_t(U512::one()).expect(\"should convert\");\n    let uref_2_value = StoredValue::CLValue(cl_value);\n    let uref_2_key = Key::URef(URef::new([9; 32], AccessRights::READ_ADD_WRITE));\n    tracking_copy.write(uref_2_key, uref_2_value);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap();\n\n    assert_eq!(key_set.len(), 2);\n    assert!(key_set.contains(&uref_1_key.normalize()));\n    assert!(key_set.contains(&uref_2_key.normalize()));\n\n    // persist account\n    let account_hash = AccountHash::new([1; 32]);\n\n    let account_value = CLValue::from_t(AddressableEntityHash::new([10; 32])).unwrap();\n    let account_value = StoredValue::CLValue(account_value);\n    let account_key = Key::Account(account_hash);\n    tracking_copy.write(account_key, account_value);\n\n    assert_eq!(key_set.len(), 2);\n    assert!(key_set.contains(&uref_1_key.normalize()));\n    assert!(key_set.contains(&uref_2_key.normalize()));\n    assert!(!key_set.contains(&account_key));\n\n    // persist random value 3\n    let cl_value = CLValue::from_t(U512::from(2)).expect(\"should convert\");\n    let uref_3_value = StoredValue::CLValue(cl_value);\n    let uref_3_key = Key::URef(URef::new([10; 32], AccessRights::READ_ADD_WRITE));\n    tracking_copy.write(uref_3_key, uref_3_value);\n\n    let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap();\n\n    assert_eq!(key_set.len(), 3);\n    assert!(key_set.contains(&uref_1_key.normalize()));\n    assert!(key_set.contains(&uref_2_key.normalize()));\n    assert!(key_set.contains(&uref_3_key.normalize()));\n    assert!(!key_set.contains(&account_key));\n}\n\nfn val_to_hashaddr<T: Into<U256>>(value: T) -> HashAddr {\n    let mut addr = HashAddr::default();\n    value.into().to_big_endian(&mut addr);\n    addr\n}\n\n#[test]\nfn query_with_large_depth_with_fixed_path_should_fail() {\n    let mut pairs = Vec::new();\n    let mut contract_keys = Vec::new();\n    let mut path = Vec::new();\n\n    const WASM_OFFSET: u64 = 1_000_000;\n    const PACKAGE_OFFSET: u64 = 1_000;\n\n    // create a long chain of contract at address X with a named key that points to a contract X+1\n    // which has a size that exceeds configured max query depth.\n    for value in 1..=DEFAULT_MAX_QUERY_DEPTH {\n        let contract_addr = EntityAddr::SmartContract(val_to_hashaddr(value));\n        let contract_key = Key::AddressableEntity(contract_addr);\n        let next_contract_key =\n            Key::AddressableEntity(EntityAddr::SmartContract(val_to_hashaddr(value + 1)));\n        let contract_name = format!(\"contract{}\", value);\n\n        let named_key =\n            NamedKeyAddr::new_from_string(contract_addr, contract_name.clone()).unwrap();\n\n        let named_key_value =\n            NamedKeyValue::from_concrete_values(next_contract_key, contract_name.clone()).unwrap();\n\n        pairs.push((\n            Key::NamedKey(named_key),\n            StoredValue::NamedKey(named_key_value),\n        ));\n\n        let contract = StoredValue::AddressableEntity(AddressableEntity::new(\n            val_to_hashaddr(PACKAGE_OFFSET + value).into(),\n            val_to_hashaddr(WASM_OFFSET + value).into(),\n            ProtocolVersion::V1_0_0,\n            URef::default(),\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        ));\n        pairs.push((contract_key, contract));\n        contract_keys.push(contract_key);\n        path.push(contract_name.clone());\n    }\n\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs);\n\n    let view = global_state.checkout(root_hash).unwrap().unwrap();\n    let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    let contract_key = contract_keys[0];\n    let result = tracking_copy.query(contract_key, &path);\n\n    assert!(\n        matches!(result, Ok(TrackingCopyQueryResult::DepthLimit {\n        depth\n    }) if depth == DEFAULT_MAX_QUERY_DEPTH),\n        \"{:?}\",\n        result\n    );\n}\n\n#[test]\nfn query_with_large_depth_with_urefs_should_fail() {\n    let mut pairs = Vec::new();\n    let mut uref_keys = Vec::new();\n\n    const WASM_OFFSET: u64 = 1_000_000;\n    const PACKAGE_OFFSET: u64 = 1_000;\n    let root_key_name = \"key\".to_string();\n\n    // create a long chain of urefs at address X with a uref that points to a uref X+1\n    // which has a size that exceeds configured max query depth.\n    for value in 1..=DEFAULT_MAX_QUERY_DEPTH {\n        let uref_addr = val_to_hashaddr(value);\n        let uref = Key::URef(URef::new(uref_addr, AccessRights::READ));\n\n        let next_uref_addr = val_to_hashaddr(value + 1);\n        let next_uref = Key::URef(URef::new(next_uref_addr, AccessRights::READ));\n        let next_cl_value = StoredValue::CLValue(CLValue::from_t(next_uref).unwrap());\n\n        pairs.push((uref, next_cl_value));\n        uref_keys.push(uref);\n    }\n\n    let contract_addr = EntityAddr::SmartContract([0; 32]);\n\n    let named_key = NamedKeyAddr::new_from_string(contract_addr, root_key_name.clone()).unwrap();\n\n    let named_key_value =\n        NamedKeyValue::from_concrete_values(uref_keys[0], root_key_name.clone()).unwrap();\n\n    pairs.push((\n        Key::NamedKey(named_key),\n        StoredValue::NamedKey(named_key_value),\n    ));\n\n    let contract = StoredValue::AddressableEntity(AddressableEntity::new(\n        val_to_hashaddr(PACKAGE_OFFSET).into(),\n        val_to_hashaddr(WASM_OFFSET).into(),\n        ProtocolVersion::V1_0_0,\n        URef::default(),\n        AssociatedKeys::default(),\n        ActionThresholds::default(),\n        EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n    ));\n    let contract_key = Key::AddressableEntity(contract_addr);\n    pairs.push((contract_key, contract));\n\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs);\n\n    let view = global_state.checkout(root_hash).unwrap().unwrap();\n    let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    // query for the beginning of a long chain of urefs\n    // (second path element of arbitrary value required to cause iteration _into_ the nested key)\n    let path = vec![root_key_name, String::new()];\n    let result = tracking_copy.query(contract_key, &path);\n\n    assert!(\n        matches!(result, Ok(TrackingCopyQueryResult::DepthLimit {\n        depth\n    }) if depth == DEFAULT_MAX_QUERY_DEPTH),\n        \"{:?}\",\n        result\n    );\n}\n\n#[test]\nfn add_should_work() {\n    let mut pairs = Vec::new();\n    let key = Key::URef(URef::default());\n    let initial_value = CLValue::from_t(1_i32).unwrap();\n    pairs.push((key, StoredValue::CLValue(initial_value)));\n\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs);\n\n    let (effects, cache) = {\n        let view = global_state.checkout(root_hash).unwrap().unwrap();\n        let mut tracking_copy =\n            TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n        assert!(\n            matches!(tracking_copy.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 1)\n        );\n        tracking_copy\n            .add(key, StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()))\n            .unwrap();\n        assert!(\n            matches!(tracking_copy.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 2)\n        );\n        (tracking_copy.effects(), tracking_copy.cache())\n    };\n\n    let view = global_state.checkout(root_hash).unwrap().unwrap();\n    let mut tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n    assert!(\n        matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 1)\n    );\n    tc.apply_changes(effects, cache, Messages::new());\n    assert!(\n        matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 2)\n    );\n}\n\n#[test]\nfn tracking_copy_get_should_not_return_value_if_pruned() {\n    let mut pairs = Vec::new();\n    let key = Key::URef(URef::default());\n    let initial_value = CLValue::from_t(1_i32).unwrap();\n    pairs.push((key, StoredValue::CLValue(initial_value)));\n\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs);\n    let view = global_state.checkout(root_hash).unwrap().unwrap();\n    let mut tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    assert!(\n        matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 1)\n    );\n    tc.prune(key);\n    assert!(matches!(tc.get(&key), Ok(None)));\n\n    let secondary_value = CLValue::from_t(2_i32).unwrap();\n    tc.write(key, StoredValue::CLValue(secondary_value));\n    assert!(\n        matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 2)\n    );\n}\n\n#[test]\nfn tracking_copy_keys_with_prefix_should_include_pruning_of_uncommited_keys() {\n    let mut pairs = Vec::new();\n    let key = Key::Hash([1; 32]);\n    let initial_value = CLValue::from_t(1_i32).unwrap();\n    pairs.push((key, StoredValue::CLValue(initial_value)));\n\n    let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs);\n    let view = global_state.checkout(root_hash).unwrap().unwrap();\n    let mut tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY);\n\n    assert!(\n        matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::<i32>().unwrap() == 1)\n    );\n\n    let key_2 = Key::Hash([2; 32]);\n    let secondary_value = CLValue::from_t(2_i32).unwrap();\n    tc.write(key_2, StoredValue::CLValue(secondary_value));\n    assert_eq!(\n        tc.get_by_byte_prefix(&[KeyTag::Hash as u8]).unwrap(),\n        BTreeSet::from_iter(vec![key, key_2])\n    );\n\n    tc.prune(key_2);\n    assert_eq!(\n        tc.get_by_byte_prefix(&[KeyTag::Hash as u8]).unwrap(),\n        BTreeSet::from_iter(vec![key])\n    );\n}\n"
  },
  {
    "path": "types/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file. The format is based on [Keep a Changelog].\n\n[comment]: <> (Added: new features)\n[comment]: <> (Changed: changes in existing functionality)\n[comment]: <> (Deprecated: soon-to-be removed features)\n[comment]: <> (Removed: now removed features)\n[comment]: <> (Fixed: any bug fixes)\n[comment]: <> (Security: in case of vulnerabilities)\n\n## casper-types 7.0.0\n\n### Added\n\n- Added new enum `RewardsHandling` with two variants `Standard` and `Sustain`\n- Added `RewardsHandling` to the `CoreConfig` as the field `rewards_handling`\n- Added `RewardsHandling` to the `ProtocolUpgradeConfig` as the field `rewards_handling`\n- Added new field `minimum_delegation_rate` to the `CoreConfig` struct\n- Added new variant `Key::RewardsHandling` to the `Key` enum\n- Added new variant `GenesisAccount::Sustain` to the `GenesisAccount` enum\n- Added new constant `MINT_SUSTAIN_PURSE_KEY`\n\n## casper-types 6.0.0\n\n### Added\n\n- TransactionInvocationTarget::ByPackageHash::protocol_version_major field\n- TransactionInvocationTarget::ByPackageName::protocol_version_major field\n- New variant PackageIdentifier::HashWithVersion\n- New variant PackageIdentifier::NameWithVersion\n\n## casper-types 5.0.0\n\n### Added\n\n- enum EntityKind\n- enum addressable_entity::EntityKindTag\n- enum EntityAddr\n- struct addressable_entity::NamedKeyAddr\n- struct addressable_entity::NamedKeyValue\n- struct addressable_entity::MessageTopics\n- enum addressable_entity::MessageTopicError\n- struct AddressableEntity\n- struct addressable_entity::ActionThresholds\n- enum addressable_entity::ActionType\n- struct addressable_entity::AssociatedKeys\n- struct contract::EntryPoint\n- enum EntryPointType\n- enum EntryPointPayment\n- struct EntryPoint\n- enum EntryPointAddr\n- enum EntryPointValue\n- enum addressable_entity::FromAccountHashStrError\n- enum addressable_entity::SetThresholdFailure\n- struct addressable_entity::TryFromSliceForAccountHashError\n- struct addressable_entity::NamedKeys\n- struct BlockV1\n- struct BlockBodyV1\n- struct BlockV2\n- struct BlockHeaderV2\n- struct BlockBodyV2\n- struct ChainNameDigest\n- enum EraEnd\n- struct EraEndV1\n- struct EraEndV2\n- struct EraReport\n- enum FinalitySignature\n- struct FinalitySignatureV1\n- struct FinalitySignatureV2\n- struct FinalitySignatureId\n- struct JsonBlockWithSignatures\n- struct RewardedSignatures\n- struct SingleBlockRewardedSignatures\n- enum Rewards\n- struct BlockWithSignatures\n- enum BlockHeaderWithSignaturesValidationError\n- struct BlockHeaderWithSignatures\n- enum BlockValidationError (moved from casper-node)\n- enum Block (don't confuse with previous `Block` struct, see `Changed` section for details)\n- enum BlockHeader (don't confuse with previous `BlockHeader` struct, see `Changed` section for details)\n- struct HoldsEpoch\n- struct addressable_entity::TryFromSliceForContractHashError\n- enum addressable_entity::FromStrError\n- enum contract_messages::FromStrError\n- enum ByteCodeAddr\n- struct ByteCodeHash\n- enum ByteCodeKind\n- struct ByteCode\n- struct Chainspec\n- struct AccountsConfig\n- struct AccountConfig\n- struct DelegatorConfig\n- struct GenesisValidator\n- struct AdministratorAccount\n- enum GenesisAccount\n- struct ValidatorConfig\n- enum ActivationPoint\n- struct ChainspecRawBytes\n- struct CoreConfig\n- enum ConsensusProtocolName\n- enum LegacyRequiredFinality\n- enum FeeHandling\n- struct GenesisConfig\n- struct GlobalStateUpdateConfig\n- struct GlobalStateUpdate\n- enum GlobalStateUpdateError\n- struct HighwayConfig\n- enum HoldBalanceHandling\n- struct NetworkConfig\n- struct NextUpgrade\n- enum PricingHandling\n- struct ProtocolConfig\n- enum RefundHandling\n- struct TransactionConfig\n- struct TransactionLimitsDefinition\n- struct TransactionV1Config\n- struct ProtocolUpgradeConfig\n- struct VacancyConfig\n- struct AuctionCosts\n- struct ChainspecRegistry\n- struct HandlePaymentCosts\n- struct HostFunctionCosts\n- struct MessageLimits\n- struct MintCosts\n- struct BrTableCost\n- struct ControlFlowCosts\n- struct OpcodeCosts\n- struct StandardPaymentCosts\n- struct StorageCosts\n- struct SystemConfig\n- struct WasmConfig\n- struct WasmV1Config\n- struct ChecksumRegistry\n- struct SystemEntityRegistry\n- struct contract_messages::MessageAddr\n- type contract_messages::Messages\n- struct contract_messages::MessageChecksum\n- enum contract_messages::MessagePayload\n- struct contract_messages::Message\n- struct contract_messages::TopicNameHash\n- struct contract_messages::MessageTopicSummary\n- struct Contract\n- struct EntryPoints\n- struct Digest\n- struct DigestError\n- struct ChunkWithProof\n- enum MerkleConstructionError\n- enum MerkleVerificationError\n- struct IndexedMerkleProof\n- struct DisplayIter\n- struct execution::Effects;\n- enum execution::ExecutionResult (not to be confused with previous `ExecutionResult`, see `Changed` secion for details)\n- struct execution::ExecutionResultV2\n- struct execution::TransformV2\n- struct execution::TransformError\n- struct execution::TransformInstruction\n- struct execution::TransformKindV2\n- struct execution::PaymentInfo\n- enum global_state::TrieMerkleProofStep\n- enum global_state::TrieMerkleProof\n- struct Pointer\n- trait GasLimited\n- enum AddressableEntityIdentifier\n- struct Approval\n- struct ApprovalsHash\n- enum InvalidDeploy\n- enum DeployDecodeFromJsonError\n- struct ExecutableDeployItem,\n- enum ExecutableDeployItemIdentifier\n- struct ExecutionInfo\n- enum InitiatorAddr,\n- enum InvalidTransaction,\n- enum InvalidTransactionV1\n- enum PackageIdentifier\n- enum PricingMode\n- enum PricingModeError\n- enum Transaction\n- enum TransactionEntryPoint,\n- enum TransactionHash\n- struct TransactionId\n- enum TransactionInvocationTarget\n- enum TransactionRuntime\n- enum TransactionScheduling\n- enum TransactionTarget\n- struct TransactionV1,\n- struct TransactionV1Payload,\n- struct TransactionV1Hash\n- enum TransactionV1DecodeFromJsonError\n- enum TransactionV1Error\n- struct TransactionV1ExcessiveSizeError\n- enum TransferTarget\n- struct TransferV2\n- enum ValidatorChange\n- type contracts::ProtocolVersionMajor\n- type EntityVersion\n- struct EntityVersionKey\n- struct EntityVersions\n- struct PackageHash\n- enum PackageStatus\n- struct Package\n- struct PeerEntry\n- struct Peers\n- enum system::auction::BidAddr\n- enum system::auction::BidAddrTag\n- enum system::auction::BidKind\n- enum system::auction::BidKindTag\n- enum system::auction::Bridge\n- enum system::auction::Reservation\n- enum system::auction::ValidatorBid;\n- enum system::auction::ValidatorBids\n- enum system::auction::DelegatorBids\n- enum system::auction::ValidatorCredits\n- enum system::auction::Staking\n- trait system::auction::BidsExt\n- enum system::auction::Error has new variants: ForgedReference, MissingPurse, ValidatorBidExistsAlready,BridgeRecordChainTooLong,UnexpectedBidVariant, DelegationAmountTooLarge\n- enum system::CallerTag\n- enum system::Caller\n- enum system::handle_payment::Error\n- enum system::handle_payment::Error has new variants IncompatiblePaymentSettings, UnexpectedKeyVariant\n- enum system::mint::BalanceHoldAddrTag\n- enum system::mint::Error has new variant: ForgedReference\n- enum system::reservation::ReservationKind\n- method CLValue::to_t\n- function handle_stored_dictionary_value\n- methods in ContractWasm: `new` and `take_bytes`\n- method `lock_status` in struct ContractPackage\n- function bytesrepr::allocate_buffer_for_size(expected_size: usize) -> Result<Vec<u8>, Error>\n- Enum EntryPointAccess has new variant `Template` added\n\n### Changed\n\n- pub enum ApiError has new variants: MessageTopicAlreadyRegistered, MaxTopicsNumberExceeded, MaxTopicNameSizeExceeded, MessageTopicNotRegistered, MessageTopicFull, MessageTooLarge, MaxMessagesPerBlockExceeded,NotAllowedToAddContractVersion,InvalidDelegationAmountLimits,InvalidCallerInfoRequest\n- struct AuctionState#bids is now a BTreeMap<PublicKey, Bid> instead of Vec<JsonBids>. This field is still serialized as an array. Due to this change the elements of the array will have more fields than before (added `validator_public_key`, `vesting_schedule`).\n- Variants of enum EntryPointType changed\n- Struct Parameter moved from contracts to addressable_entity::entry_points\n- struct EraId has new methods `iter_range_inclusive`, `increment`\n- struct ExecutionEffect moved to module execution::execution_result_v1\n- enum OpKind moved to module execution::execution_result_v1\n- struct Operation moved to module execution::execution_result_v1\n- enum Transform changed name to TransformKindV1, moved to module execution::execution_result_v1 and has new variants (WriteAddressableEntity, Prune, WriteBidKind)\n- enum ExecutionResult changed name to ExecutionResultV1, moved to module execution::execution_result_v1\n- struct TransformEntry changed name to TransformV1 and moved to module execution::execution_result_v1\n- moved NamedKey to module execution::execution_result_v1\n- KeyTag::SystemContractRegistry variant changed name to KeyTag::SystemEntityRegistry\n- variants for KeyTag enum: BidAddr = 15, Package = 16, AddressableEntity = 17, ByteCode = 18, Message = 19, NamedKey = 20, BlockGlobal = 21, BalanceHold = 22, EntryPoint = 23,\n- enum Key::SystemContractRegistry changed name to Key::SystemEntityRegistry\n- variants for enum Key: BidAddr, Package, AddressableEntity, ByteCode, Message, NamedKey, BlockGlobal, BalanceHold, EntryPoint,\n- struct ExcessiveSizeError changed name to DeployExcessiveSizeError\n- struct Transfer changed name to TransferV1\n- enum GlobalStateIdentifier\n- enum StoredValue has new variants: Transfer, AddressableEntity, BidKind, Package, ByteCode, MessageTopic, Message, NamedKey,Reservation,EntryPoint,\n- enum system::SystemContractType changed name to system::SystemEntityType\n- enum system::handle_payment::Error variant SystemFunctionCalledByUserAccount changed to InvalidCaller\n- struct EntryPoint has a new field `entry_point_payment`\n- struct BlockHeader was renamed to BlockHeaderV1 and used as a variant in enum BlockHeader\n- struct Block was renamed to BlockV1 and used as a variant in enum Block\n- Gas::from_motes now takes `u8` instead of `u64` as second parameter\n\n### Removed\n\n- type Groups (there is now a struct with that name)\n- type EntryPointsMap\n- type NamedKeys\n- methods `groups_mut`, `add_group`, `lookup_contract_hash`, `is_version_enabled`, `is_contract_enabled`, `insert_contract_version`, `disable_contract_version`, `enable_contract_version`, `enabled_versions`, `remove_group`, `next_contract_version_for`, `current_contract_version`, `current_contract_hash` in struct ContractPackage\n\n## [Unreleased] (node 1.5.4)\n\n### Changed\n\n- Remove filesystem I/O functionality from the `std` feature, and gated this behind a new feature `std-fs-io` which depends upon `std`.\n\n## 4.0.1\n\n### Added\n\n- Add a new `SyncHandling` enum, which allows a node to opt out of historical sync.\n\n### Changed\n\n- Update `k256` to version 0.13.1.\n\n### Removed\n\n- Remove `ExecutionResult::successful_transfers`.\n\n### Security\n\n- Update `ed25519-dalek` to version 2.0.0 as mitigation for [RUSTSEC-2022-0093](https://rustsec.org/advisories/RUSTSEC-2022-0093)\n\n## 3.0.0\n\n### Added\n\n- Add new `bytesrepr::Error::NotRepresentable` error variant that represents values that are not representable by the serialization format.\n- Add new `Key::Unbond` key variant under which the new unbonding information (to support redelegation) is written.\n- Add new `Key::ChainspecRegistry` key variant under which the `ChainspecRegistry` is written.\n- Add new `Key::ChecksumRegistry` key variant under which a registry of checksums for a given block is written. There are two checksums in the registry, one for the execution results and the other for the approvals of all deploys in the block.\n- Add new `StoredValue::Unbonding` variant to support redelegating.\n- Add a new type `WithdrawPurses` which is meant to represent `UnbondingPurses` as they exist in current live networks.\n\n### Changed\n\n- Extend `UnbondingPurse` to take a new field `new_validator` which represents the validator to whom tokens will be re-delegated.\n- Increase `DICTIONARY_ITEM_KEY_MAX_LENGTH` to 128.\n- Change prefix of formatted string representation of `ContractPackageHash` from \"contract-package-wasm\" to \"contract-package-\". Parsing from the old format is still supported.\n- Apply `#[non_exhaustive]` to error enums.\n- Change Debug output of `DeployHash` to hex-encoded string rather than a list of integers.\n\n### Fixed\n\n- Fix some integer casts, where failure is now detected and reported via new error variant `NotRepresentable`.\n\n## 2.0.0\n\n### Fixed\n\n- Republish v1.6.0 as v2.0.0 due to missed breaking change in API (addition of new variant to `Key`).\n\n## 1.6.0 [YANKED]\n\n### Added\n\n- Extend asymmetric key functionality, available via feature `std` (moved from `casper-nodes` crate).\n- Provide `Timestamp` and `TimeDiff` types for time operations, with extended functionality available via feature `std` (moved from `casper-nodes` crate).\n- Provide test-only functionality, in particular a seedable RNG `TestRng` which outputs its seed on test failure. Available via a new feature `testing`.\n- Add new `Key::EraSummary` key variant under which the era summary info is written on each switch block execution.\n\n### Deprecated\n\n- Deprecate `gens` feature: its functionality is included in the new `testing` feature.\n\n## 1.5.0\n\n### Added\n\n- Provide types and functionality to support improved access control inside execution engine.\n- Provide `CLTyped` impl for `ContractPackage` to allow it to be passed into contracts.\n\n### Fixed\n\n- Limit parsing of CLTyped objects to a maximum of 50 types deep.\n\n## 1.4.6 - 2021-12-29\n\n### Changed\n\n- Disable checksummed-hex encoding, but leave checksummed-hex decoding in place.\n\n## 1.4.5 - 2021-12-06\n\n### Added\n\n- Add function to `auction::MintProvider` trait to support minting into an existing purse.\n\n### Changed\n\n- Change checksummed hex implementation to use 32 byte rather than 64 byte blake2b digests.\n\n## [1.4.4] - 2021-11-18\n\n### Fixed\n\n- Revert the accidental change to the `std` feature causing a broken build when this feature is enabled.\n\n## [1.4.3] - 2021-11-17 [YANKED]\n\n## [1.4.2] - 2021-11-13 [YANKED]\n\n### Added\n\n- Add checksummed hex encoding following a scheme similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55).\n\n## [1.4.1] - 2021-10-23\n\nNo changes.\n\n## [1.4.0] - 2021-10-21 [YANKED]\n\n### Added\n\n- Add `json-schema` feature, disabled by default, to enable many types to be used to produce JSON-schema data.\n- Add implicit `datasize` feature, disabled by default, to enable many types to derive the `DataSize` trait.\n- Add `StoredValue` types to this crate.\n\n### Changed\n\n- Support building and testing using stable Rust.\n- Allow longer hex string to be presented in `json` files. Current maximum is increased from 100 to 150 characters.\n- Improve documentation and `Debug` impls for `ApiError`.\n\n### Deprecated\n\n- Feature `std` is deprecated as it is now a no-op, since there is no benefit to linking the std lib via this crate.\n\n## [1.3.0] - 2021-07-19\n\n### Changed\n\n- Restrict summarization when JSON pretty-printing to contiguous long hex strings.\n- Update pinned version of Rust to `nightly-2021-06-17`.\n\n### Removed\n\n- Remove ability to clone `SecretKey`s.\n\n## [1.2.0] - 2021-05-27\n\n### Changed\n\n- Change to Apache 2.0 license.\n- Return a `Result` from the constructor of `SecretKey` rather than potentially panicking.\n- Improve `Key` error reporting and tests.\n\n### Fixed\n\n- Fix `Key` deserialization.\n\n## [1.1.1] - 2021-04-19\n\nNo changes.\n\n## [1.1.0] - 2021-04-13 [YANKED]\n\nNo changes.\n\n## [1.0.1] - 2021-04-08\n\nNo changes.\n\n## [1.0.0] - 2021-03-30\n\n### Added\n\n- Initial release of types for use by software compatible with Casper mainnet.\n\n[Keep a Changelog]: https://keepachangelog.com/en/1.0.0\n[unreleased]: https://github.com/casper-network/casper-node/compare/24fc4027a...dev\n[1.4.3]: https://github.com/casper-network/casper-node/compare/2be27b3f5...24fc4027a\n[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.1...2be27b3f5\n[1.4.1]: https://github.com/casper-network/casper-node/compare/v1.4.0...v1.4.1\n[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0\n[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0\n[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0\n[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1\n[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1\n[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0\n"
  },
  {
    "path": "types/Cargo.toml",
    "content": "[package]\nname = \"casper-types\"\nversion = \"7.0.0\" # when updating, also update 'html_root_url' in lib.rs\nauthors = [\"Ed Hastings <ed@casper.network>\"]\nedition = \"2021\"\ndescription = \"Types shared by many casper crates for use on the Casper network.\"\nreadme = \"README.md\"\ndocumentation = \"https://docs.rs/casper-types\"\nhomepage = \"https://casper.network\"\nrepository = \"https://github.com/casper-network/casper-node/tree/master/types\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nbase16 = { version = \"0.2.1\", default-features = false, features = [\"alloc\"] }\nbase64 = { version = \"0.13.0\", default-features = false }\nbitflags = \"1\"\nbincode = { version = \"1.3.1\", optional = true }\nblake2 = { version = \"0.9.0\", default-features = false }\ndatasize = { version = \"0.2.15\", optional = true }\nderp = { version = \"0.0.14\", optional = true }\ned25519-dalek = { version = \"2.1.1\", default-features = false, features = [\"alloc\", \"zeroize\"] }\ngetrandom = { version = \"0.2.0\", features = [\"rdrand\", \"js\"], optional = true }\nhex = { version = \"0.4.2\", default-features = false, features = [\"alloc\"] }\nhex_fmt = \"0.3.0\"\nhumantime = { version = \"2\", optional = true }\nitertools = { version = \"0.10.3\", default-features = false }\nlibc = { version = \"0.2.146\", optional = true, default-features = false }\nk256 = { version = \"0.13.4\", default-features = false, features = [\"ecdsa\", \"sha256\"] }\nnum = { version = \"0.4.0\", default-features = false, features = [\"alloc\"] }\nnum-derive = { version = \"0.4.2\", default-features = false }\nnum-integer = { version = \"0.1.42\", default-features = false }\nnum-rational = { version = \"0.4.0\", default-features = false, features = [\"serde\"] }\nnum-traits = { version = \"0.2.19\", default-features = false }\nonce_cell = { version = \"1.5.2\", optional = true }\npem = { version = \"0.8.1\", optional = true }\nproptest = { version = \"1.0.0\", optional = true }\nproptest-derive = { version = \"0.5.1\", optional = true }\nrand = { version = \"0.8.3\", default-features = false, features = [\"small_rng\"] }\nrand_pcg = { version = \"0.3.0\", optional = true }\nschemars = { version = \"0.8.21\", features = [\"preserve_order\"], optional = true }\nserde-map-to-array = \"1.1.0\"\nserde = { version = \"1\", default-features = false, features = [\"alloc\", \"derive\"] }\nserde_bytes = { version = \"0.11.5\", default-features = false, features = [\"alloc\"] }\nserde_json = { version = \"1.0.59\", default-features = false, features = [\"alloc\"] }\nstrum = { version = \"0.27\", features = [\"derive\"], optional = true }\nthiserror = { version = \"1\", optional = true }\ntracing = { version = \"0.1.37\", default-features = false }\nuint = { version = \"0.9.0\", default-features = false }\nuntrusted = { version = \"0.7.1\", optional = true }\nderive_more = \"0.99.17\"\nversion-sync = { version = \"0.9\", optional = true }\n\n[dev-dependencies]\nbase16 = { version = \"0.2.1\", features = [\"std\"] }\nbincode = \"1.3.1\"\ncriterion = \"0.5.1\"\nderp = \"0.0.14\"\ngetrandom = \"0.2.0\"\nhumantime = \"2\"\nonce_cell = \"1.5.2\"\nopenssl = \"0.10.70\"\npem = \"0.8.1\"\nproptest = \"1.0.0\"\nproptest-derive = \"0.5.1\"\nproptest-attr-macro = \"1.0.0\"\nrand = \"0.8.3\"\nrand_pcg = \"0.3.0\"\nserde_json = \"1\"\nserde_test = \"1\"\nstrum = { version = \"0.27\", features = [\"derive\"] }\ntempfile = \"3.4.0\"\nthiserror = \"1\"\nuntrusted = \"0.7.1\"\n#  add explicit dependency to resolve RUSTSEC-2024-0421\nurl = \"2.5.4\"\n\n[features]\njson-schema = [\"once_cell\", \"schemars\", \"serde-map-to-array/json-schema\"]\ntesting = [\"proptest\", \"proptest-derive\", \"rand/default\", \"rand_pcg\", \"strum\", \"bincode\", \"thiserror\", \"getrandom\", \"derp\"]\n# Includes a restricted set of std lib functionality suitable for usage e.g. in a JS environment when compiled to Wasm.\nstd = [\"base16/std\", \"derp\", \"getrandom/std\", \"humantime\", \"itertools/use_std\", \"libc\", \"once_cell\", \"pem\", \"serde_json/preserve_order\", \"thiserror\", \"untrusted\"]\n# Includes a complete set of std lib functionality, including filesystem I/O operations.\nstd-fs-io = [\"std\"]\n# DEPRECATED - use \"testing\" instead of \"gens\".\ngens = [\"testing\"]\nversion-sync = [\"dep:version-sync\"]\n\n[[bench]]\nname = \"bytesrepr_bench\"\nharness = false\nrequired-features = [\"testing\"]\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "types/README.md",
    "content": "# `casper-types`\n\n[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/)\n\n[![Crates.io](https://img.shields.io/crates/v/casper-types)](https://crates.io/crates/casper-types)\n[![Documentation](https://docs.rs/casper-types/badge.svg)](https://docs.rs/casper-types)\n[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE)\n\nTypes shared by many casper crates for use on the Casper network.\n\n## `no_std`\n\nThe crate is `no_std` (using the `core` and `alloc` crates) unless any of the following features are enabled:\n\n* `json-schema` to enable many types to be used to produce JSON-schema data via the [`schemars`](https://crates.io/crates/schemars) crate\n* `datasize` to enable many types to derive the [`DataSize`](https://github.com/casper-network/datasize-rs) trait\n* `gens` to enable many types to be produced in accordance with [`proptest`](https://crates.io/crates/proptest) usage for consumption within dependee crates' property testing suites\n\n## License\n\nLicensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE).\n"
  },
  {
    "path": "types/benches/bytesrepr_bench.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    iter,\n};\n\nuse criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion};\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AddressableEntity, AssociatedKeys, EntityKind},\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    system::auction::{\n        Bid, BidKind, Delegator, DelegatorBid, DelegatorKind, EraInfo, SeigniorageAllocation,\n        ValidatorBid,\n    },\n    AccessRights, ByteCodeHash, CLTyped, CLValue, ContractRuntimeTag, DeployHash, DeployInfo,\n    EntityAddr, EntityVersionKey, EntityVersions, Gas, Group, Groups, InitiatorAddr, Key, Package,\n    PackageHash, PackageStatus, ProtocolVersion, PublicKey, SecretKey, TransactionHash,\n    TransactionV1Hash, TransferAddr, TransferV2, URef, KEY_HASH_LENGTH, TRANSFER_ADDR_LENGTH, U128,\n    U256, U512, UREF_ADDR_LENGTH,\n};\n\nstatic KB: usize = 1024;\nstatic BATCH: usize = 4 * KB;\n\nconst TEST_I32: i32 = 123_456_789;\nconst TEST_U128: U128 = U128([123_456_789, 0]);\nconst TEST_U256: U256 = U256([123_456_789, 0, 0, 0]);\nconst TEST_U512: U512 = U512([123_456_789, 0, 0, 0, 0, 0, 0, 0]);\nconst TEST_STR_1: &str = \"String One\";\nconst TEST_STR_2: &str = \"String Two\";\n\nfn prepare_vector(size: usize) -> Vec<i32> {\n    (0..size as i32).collect()\n}\n\nfn serialize_vector_of_i32s(b: &mut Bencher) {\n    let data = prepare_vector(black_box(BATCH));\n    b.iter(|| data.to_bytes());\n}\n\nfn deserialize_vector_of_i32s(b: &mut Bencher) {\n    let data = prepare_vector(black_box(BATCH)).to_bytes().unwrap();\n    b.iter(|| {\n        let (res, _rem): (Vec<i32>, _) = FromBytes::from_bytes(&data).unwrap();\n        res\n    });\n}\n\nfn serialize_vector_of_u8(b: &mut Bencher) {\n    // 0, 1, ... 254, 255, 0, 1, ...\n    let data: Bytes = prepare_vector(BATCH)\n        .into_iter()\n        .map(|value| value as u8)\n        .collect();\n    b.iter(|| ToBytes::to_bytes(black_box(&data)));\n}\n\nfn deserialize_vector_of_u8(b: &mut Bencher) {\n    // 0, 1, ... 254, 255, 0, 1, ...\n    let data: Vec<u8> = prepare_vector(BATCH)\n        .into_iter()\n        .map(|value| value as u8)\n        .collect::<Bytes>()\n        .to_bytes()\n        .unwrap();\n    b.iter(|| Bytes::from_bytes(black_box(&data)))\n}\n\nfn serialize_u8(b: &mut Bencher) {\n    b.iter(|| ToBytes::to_bytes(black_box(&129u8)));\n}\n\nfn deserialize_u8(b: &mut Bencher) {\n    b.iter(|| u8::from_bytes(black_box(&[129u8])));\n}\n\nfn serialize_i32(b: &mut Bencher) {\n    b.iter(|| ToBytes::to_bytes(black_box(&1_816_142_132i32)));\n}\n\nfn deserialize_i32(b: &mut Bencher) {\n    b.iter(|| i32::from_bytes(black_box(&[0x34, 0x21, 0x40, 0x6c])));\n}\n\nfn serialize_u64(b: &mut Bencher) {\n    b.iter(|| ToBytes::to_bytes(black_box(&14_157_907_845_468_752_670u64)));\n}\n\nfn deserialize_u64(b: &mut Bencher) {\n    b.iter(|| u64::from_bytes(black_box(&[0x1e, 0x8b, 0xe1, 0x73, 0x2c, 0xfe, 0x7a, 0xc4])));\n}\n\nfn serialize_some_u64(b: &mut Bencher) {\n    let data = Some(14_157_907_845_468_752_670u64);\n\n    b.iter(|| ToBytes::to_bytes(black_box(&data)));\n}\n\nfn deserialize_some_u64(b: &mut Bencher) {\n    let data = Some(14_157_907_845_468_752_670u64);\n    let data = data.to_bytes().unwrap();\n\n    b.iter(|| Option::<u64>::from_bytes(&data));\n}\n\nfn serialize_none_u64(b: &mut Bencher) {\n    let data: Option<u64> = None;\n\n    b.iter(|| ToBytes::to_bytes(black_box(&data)));\n}\n\nfn deserialize_ok_u64(b: &mut Bencher) {\n    let data: Option<u64> = None;\n    let data = data.to_bytes().unwrap();\n    b.iter(|| Option::<u64>::from_bytes(&data));\n}\n\nfn make_test_vec_of_vec8() -> Vec<Bytes> {\n    (0..4)\n        .map(|_v| {\n            // 0, 1, 2, ..., 254, 255\n            let inner_vec = iter::repeat_with(|| 0..255u8)\n                .flatten()\n                // 4 times to create 4x 1024 bytes\n                .take(4)\n                .collect::<Vec<_>>();\n            Bytes::from(inner_vec)\n        })\n        .collect()\n}\n\nfn serialize_vector_of_vector_of_u8(b: &mut Bencher) {\n    let data = make_test_vec_of_vec8();\n    b.iter(|| data.to_bytes());\n}\n\nfn deserialize_vector_of_vector_of_u8(b: &mut Bencher) {\n    let data = make_test_vec_of_vec8().to_bytes().unwrap();\n    b.iter(|| Vec::<Bytes>::from_bytes(black_box(&data)));\n}\n\nfn serialize_tree_map(b: &mut Bencher) {\n    let data = {\n        let mut res = BTreeMap::new();\n        res.insert(\"asdf\".to_string(), \"zxcv\".to_string());\n        res.insert(\"qwer\".to_string(), \"rewq\".to_string());\n        res.insert(\"1234\".to_string(), \"5678\".to_string());\n        res\n    };\n\n    b.iter(|| ToBytes::to_bytes(black_box(&data)));\n}\n\nfn deserialize_treemap(b: &mut Bencher) {\n    let data = {\n        let mut res = BTreeMap::new();\n        res.insert(\"asdf\".to_string(), \"zxcv\".to_string());\n        res.insert(\"qwer\".to_string(), \"rewq\".to_string());\n        res.insert(\"1234\".to_string(), \"5678\".to_string());\n        res\n    };\n    let data = data.to_bytes().unwrap();\n    b.iter(|| BTreeMap::<String, String>::from_bytes(black_box(&data)));\n}\n\nfn serialize_string(b: &mut Bencher) {\n    let lorem = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\";\n    let data = lorem.to_string();\n    b.iter(|| ToBytes::to_bytes(black_box(&data)));\n}\n\nfn deserialize_string(b: &mut Bencher) {\n    let lorem = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\";\n    let data = lorem.to_bytes().unwrap();\n    b.iter(|| String::from_bytes(&data));\n}\n\nfn serialize_vec_of_string(b: &mut Bencher) {\n    let lorem = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\".to_string();\n    let array_of_lorem: Vec<String> = lorem.split(' ').map(Into::into).collect();\n    let data = array_of_lorem;\n    b.iter(|| ToBytes::to_bytes(black_box(&data)));\n}\n\nfn deserialize_vec_of_string(b: &mut Bencher) {\n    let lorem = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\".to_string();\n    let array_of_lorem: Vec<String> = lorem.split(' ').map(Into::into).collect();\n    let data = array_of_lorem.to_bytes().unwrap();\n\n    b.iter(|| Vec::<String>::from_bytes(&data));\n}\n\nfn serialize_unit(b: &mut Bencher) {\n    b.iter(|| ToBytes::to_bytes(black_box(&())))\n}\n\nfn deserialize_unit(b: &mut Bencher) {\n    let data = ().to_bytes().unwrap();\n\n    b.iter(|| <()>::from_bytes(&data))\n}\n\nfn serialize_key_account(b: &mut Bencher) {\n    let account = Key::Account(AccountHash::new([0u8; 32]));\n\n    b.iter(|| ToBytes::to_bytes(black_box(&account)))\n}\n\nfn deserialize_key_account(b: &mut Bencher) {\n    let account = Key::Account(AccountHash::new([0u8; 32]));\n    let account_bytes = account.to_bytes().unwrap();\n\n    b.iter(|| Key::from_bytes(black_box(&account_bytes)))\n}\n\nfn serialize_key_hash(b: &mut Bencher) {\n    let hash = Key::Hash([0u8; 32]);\n    b.iter(|| ToBytes::to_bytes(black_box(&hash)))\n}\n\nfn deserialize_key_hash(b: &mut Bencher) {\n    let hash = Key::Hash([0u8; 32]);\n    let hash_bytes = hash.to_bytes().unwrap();\n\n    b.iter(|| Key::from_bytes(black_box(&hash_bytes)))\n}\n\nfn serialize_key_uref(b: &mut Bencher) {\n    let uref = Key::URef(URef::new([0u8; 32], AccessRights::ADD_WRITE));\n    b.iter(|| ToBytes::to_bytes(black_box(&uref)))\n}\n\nfn deserialize_key_uref(b: &mut Bencher) {\n    let uref = Key::URef(URef::new([0u8; 32], AccessRights::ADD_WRITE));\n    let uref_bytes = uref.to_bytes().unwrap();\n\n    b.iter(|| Key::from_bytes(black_box(&uref_bytes)))\n}\n\nfn serialize_vec_of_keys(b: &mut Bencher) {\n    let keys: Vec<Key> = (0..32)\n        .map(|i| Key::URef(URef::new([i; 32], AccessRights::ADD_WRITE)))\n        .collect();\n    b.iter(|| ToBytes::to_bytes(black_box(&keys)))\n}\n\nfn deserialize_vec_of_keys(b: &mut Bencher) {\n    let keys: Vec<Key> = (0..32)\n        .map(|i| Key::URef(URef::new([i; 32], AccessRights::ADD_WRITE)))\n        .collect();\n    let keys_bytes = keys.to_bytes().unwrap();\n    b.iter(|| Vec::<Key>::from_bytes(black_box(&keys_bytes)));\n}\n\nfn serialize_access_rights_read(b: &mut Bencher) {\n    b.iter(|| AccessRights::READ.to_bytes());\n}\n\nfn deserialize_access_rights_read(b: &mut Bencher) {\n    let data = AccessRights::READ.to_bytes().unwrap();\n    b.iter(|| AccessRights::from_bytes(&data));\n}\n\nfn serialize_access_rights_write(b: &mut Bencher) {\n    b.iter(|| AccessRights::WRITE.to_bytes());\n}\n\nfn deserialize_access_rights_write(b: &mut Bencher) {\n    let data = AccessRights::WRITE.to_bytes().unwrap();\n    b.iter(|| AccessRights::from_bytes(&data));\n}\n\nfn serialize_access_rights_add(b: &mut Bencher) {\n    b.iter(|| AccessRights::ADD.to_bytes());\n}\n\nfn deserialize_access_rights_add(b: &mut Bencher) {\n    let data = AccessRights::ADD.to_bytes().unwrap();\n    b.iter(|| AccessRights::from_bytes(&data));\n}\n\nfn serialize_access_rights_read_add(b: &mut Bencher) {\n    b.iter(|| AccessRights::READ_ADD.to_bytes());\n}\n\nfn deserialize_access_rights_read_add(b: &mut Bencher) {\n    let data = AccessRights::READ_ADD.to_bytes().unwrap();\n    b.iter(|| AccessRights::from_bytes(&data));\n}\n\nfn serialize_access_rights_read_write(b: &mut Bencher) {\n    b.iter(|| AccessRights::READ_WRITE.to_bytes());\n}\n\nfn deserialize_access_rights_read_write(b: &mut Bencher) {\n    let data = AccessRights::READ_WRITE.to_bytes().unwrap();\n    b.iter(|| AccessRights::from_bytes(&data));\n}\n\nfn serialize_access_rights_add_write(b: &mut Bencher) {\n    b.iter(|| AccessRights::ADD_WRITE.to_bytes());\n}\n\nfn deserialize_access_rights_add_write(b: &mut Bencher) {\n    let data = AccessRights::ADD_WRITE.to_bytes().unwrap();\n    b.iter(|| AccessRights::from_bytes(&data));\n}\n\nfn serialize_cl_value<T: CLTyped + ToBytes>(raw_value: T) -> Vec<u8> {\n    CLValue::from_t(raw_value)\n        .expect(\"should create CLValue\")\n        .to_bytes()\n        .expect(\"should serialize CLValue\")\n}\n\nfn benchmark_deserialization<T: CLTyped + ToBytes + FromBytes>(b: &mut Bencher, raw_value: T) {\n    let serialized_value = serialize_cl_value(raw_value);\n    b.iter(|| {\n        let cl_value: CLValue = bytesrepr::deserialize_from_slice(&serialized_value).unwrap();\n        let _raw_value: T = cl_value.into_t().unwrap();\n    });\n}\n\nfn serialize_cl_value_int32(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value(TEST_I32));\n}\n\nfn deserialize_cl_value_int32(b: &mut Bencher) {\n    benchmark_deserialization(b, TEST_I32);\n}\n\nfn serialize_cl_value_uint128(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value(TEST_U128));\n}\n\nfn deserialize_cl_value_uint128(b: &mut Bencher) {\n    benchmark_deserialization(b, TEST_U128);\n}\n\nfn serialize_cl_value_uint256(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value(TEST_U256));\n}\n\nfn deserialize_cl_value_uint256(b: &mut Bencher) {\n    benchmark_deserialization(b, TEST_U256);\n}\n\nfn serialize_cl_value_uint512(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value(TEST_U512));\n}\n\nfn deserialize_cl_value_uint512(b: &mut Bencher) {\n    benchmark_deserialization(b, TEST_U512);\n}\n\nfn serialize_cl_value_bytearray(b: &mut Bencher) {\n    b.iter_with_setup(\n        || {\n            let vec: Vec<u8> = (0..255).collect();\n            Bytes::from(vec)\n        },\n        serialize_cl_value,\n    );\n}\n\nfn deserialize_cl_value_bytearray(b: &mut Bencher) {\n    let vec = (0..255).collect::<Vec<u8>>();\n    let bytes: Bytes = vec.into();\n    benchmark_deserialization(b, bytes);\n}\n\nfn serialize_cl_value_listint32(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value((0..1024).collect::<Vec<i32>>()));\n}\n\nfn deserialize_cl_value_listint32(b: &mut Bencher) {\n    benchmark_deserialization(b, (0..1024).collect::<Vec<i32>>());\n}\n\nfn serialize_cl_value_string(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value(TEST_STR_1.to_string()));\n}\n\nfn deserialize_cl_value_string(b: &mut Bencher) {\n    benchmark_deserialization(b, TEST_STR_1.to_string());\n}\n\nfn serialize_cl_value_liststring(b: &mut Bencher) {\n    b.iter(|| serialize_cl_value(vec![TEST_STR_1.to_string(), TEST_STR_2.to_string()]));\n}\n\nfn deserialize_cl_value_liststring(b: &mut Bencher) {\n    benchmark_deserialization(b, vec![TEST_STR_1.to_string(), TEST_STR_2.to_string()]);\n}\n\nfn serialize_cl_value_namedkey(b: &mut Bencher) {\n    b.iter(|| {\n        serialize_cl_value((\n            TEST_STR_1.to_string(),\n            Key::Account(AccountHash::new([0xffu8; 32])),\n        ))\n    });\n}\n\nfn deserialize_cl_value_namedkey(b: &mut Bencher) {\n    benchmark_deserialization(\n        b,\n        (\n            TEST_STR_1.to_string(),\n            Key::Account(AccountHash::new([0xffu8; 32])),\n        ),\n    );\n}\n\nfn serialize_u128(b: &mut Bencher) {\n    let num_u128 = U128::default();\n    b.iter(|| ToBytes::to_bytes(black_box(&num_u128)))\n}\n\nfn deserialize_u128(b: &mut Bencher) {\n    let num_u128 = U128::default();\n    let num_u128_bytes = num_u128.to_bytes().unwrap();\n\n    b.iter(|| U128::from_bytes(black_box(&num_u128_bytes)))\n}\n\nfn serialize_u256(b: &mut Bencher) {\n    let num_u256 = U256::default();\n    b.iter(|| ToBytes::to_bytes(black_box(&num_u256)))\n}\n\nfn deserialize_u256(b: &mut Bencher) {\n    let num_u256 = U256::default();\n    let num_u256_bytes = num_u256.to_bytes().unwrap();\n\n    b.iter(|| U256::from_bytes(black_box(&num_u256_bytes)))\n}\n\nfn serialize_u512(b: &mut Bencher) {\n    let num_u512 = U512::default();\n    b.iter(|| ToBytes::to_bytes(black_box(&num_u512)))\n}\n\nfn deserialize_u512(b: &mut Bencher) {\n    let num_u512 = U512::default();\n    let num_u512_bytes = num_u512.to_bytes().unwrap();\n\n    b.iter(|| U512::from_bytes(black_box(&num_u512_bytes)))\n}\n\nfn serialize_contract(b: &mut Bencher) {\n    let contract = sample_contract();\n    b.iter(|| ToBytes::to_bytes(black_box(&contract)));\n}\n\nfn deserialize_contract(b: &mut Bencher) {\n    let contract = sample_contract();\n    let contract_bytes = AddressableEntity::to_bytes(&contract).unwrap();\n    b.iter(|| AddressableEntity::from_bytes(black_box(&contract_bytes)).unwrap());\n}\n\nfn sample_contract() -> AddressableEntity {\n    AddressableEntity::new(\n        PackageHash::default(),\n        ByteCodeHash::default(),\n        ProtocolVersion::default(),\n        URef::default(),\n        AssociatedKeys::default(),\n        ActionThresholds::default(),\n        EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n    )\n}\n\nfn contract_version_key_fn(i: u8) -> EntityVersionKey {\n    EntityVersionKey::new(i as u32, i as u32)\n}\n\nfn contract_hash_fn(i: u8) -> EntityAddr {\n    EntityAddr::SmartContract([i; KEY_HASH_LENGTH])\n}\n\nfn sample_map<K: Ord, V, FK, FV>(key_fn: FK, value_fn: FV, count: u8) -> BTreeMap<K, V>\nwhere\n    FK: Fn(u8) -> K,\n    FV: Fn(u8) -> V,\n{\n    (0..count)\n        .map(|i| {\n            let key = key_fn(i);\n            let value = value_fn(i);\n            (key, value)\n        })\n        .collect()\n}\n\nfn sample_set<K: Ord, F>(fun: F, count: u8) -> BTreeSet<K>\nwhere\n    F: Fn(u8) -> K,\n{\n    (0..count).map(fun).collect()\n}\n\nfn sample_group(i: u8) -> Group {\n    Group::new(format!(\"group-{}\", i))\n}\n\nfn sample_uref(i: u8) -> URef {\n    URef::new([i; UREF_ADDR_LENGTH], AccessRights::all())\n}\n\nfn sample_contract_package(\n    contract_versions_len: u8,\n    disabled_versions_len: u8,\n    groups_len: u8,\n) -> Package {\n    let versions = EntityVersions::from(sample_map(\n        contract_version_key_fn,\n        contract_hash_fn,\n        contract_versions_len,\n    ));\n    let disabled_versions = sample_set(contract_version_key_fn, disabled_versions_len);\n    let groups = Groups::from(sample_map(\n        sample_group,\n        |_| sample_set(sample_uref, 3),\n        groups_len,\n    ));\n\n    Package::new(versions, disabled_versions, groups, PackageStatus::Locked)\n}\n\nfn serialize_contract_package(b: &mut Bencher) {\n    let contract = sample_contract_package(5, 1, 5);\n    b.iter(|| Package::to_bytes(black_box(&contract)));\n}\n\nfn deserialize_contract_package(b: &mut Bencher) {\n    let contract_package = sample_contract_package(5, 1, 5);\n    let contract_bytes = Package::to_bytes(&contract_package).unwrap();\n    b.iter(|| Package::from_bytes(black_box(&contract_bytes)).unwrap());\n}\n\nfn u32_to_pk(i: u32) -> PublicKey {\n    let mut sk_bytes = [0u8; 32];\n    U256::from(i).to_big_endian(&mut sk_bytes);\n    let sk = SecretKey::ed25519_from_bytes(sk_bytes).unwrap();\n    PublicKey::from(&sk)\n}\n\nfn sample_delegators(delegators_len: u32) -> Vec<Delegator> {\n    (0..delegators_len)\n        .map(|i| {\n            let delegator_pk = u32_to_pk(i);\n            let staked_amount = U512::from_dec_str(\"123123123123123\").unwrap();\n            let bonding_purse = URef::default();\n            let validator_pk = u32_to_pk(i);\n            Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk)\n        })\n        .collect()\n}\n\nfn sample_delegator_bids(delegators_len: u32) -> Vec<DelegatorBid> {\n    (0..delegators_len)\n        .map(|i| {\n            let delegator_pk = u32_to_pk(i);\n            let staked_amount = U512::from_dec_str(\"123123123123123\").unwrap();\n            let bonding_purse = URef::default();\n            let validator_pk = u32_to_pk(i);\n            DelegatorBid::unlocked(\n                delegator_pk.into(),\n                staked_amount,\n                bonding_purse,\n                validator_pk,\n            )\n        })\n        .collect()\n}\n\nfn sample_bid(delegators_len: u32) -> Bid {\n    let validator_public_key = PublicKey::System;\n    let bonding_purse = URef::default();\n    let staked_amount = U512::from_dec_str(\"123123123123123\").unwrap();\n    let delegation_rate = 10u8;\n    let mut bid = Bid::unlocked(\n        validator_public_key,\n        bonding_purse,\n        staked_amount,\n        delegation_rate,\n    );\n    let new_delegators = sample_delegators(delegators_len);\n\n    let curr_delegators = bid.delegators_mut();\n    for delegator in new_delegators.into_iter() {\n        assert!(curr_delegators\n            .insert(delegator.delegator_public_key().clone(), delegator)\n            .is_none());\n    }\n    bid\n}\n\nfn serialize_bid(delegators_len: u32, b: &mut Bencher) {\n    let bid = sample_bid(delegators_len);\n    b.iter(|| Bid::to_bytes(black_box(&bid)));\n}\nfn serialize_delegation_bid(delegators_len: u32, b: &mut Bencher) {\n    let bids = sample_delegator_bids(delegators_len);\n    for bid in bids {\n        b.iter(|| BidKind::to_bytes(black_box(&BidKind::Delegator(Box::new(bid.clone())))));\n    }\n}\n\nfn sample_validator_bid() -> BidKind {\n    let validator_public_key = PublicKey::System;\n    let bonding_purse = URef::default();\n    let staked_amount = U512::from_dec_str(\"123123123123123\").unwrap();\n    let delegation_rate = 10u8;\n    BidKind::Validator(Box::new(ValidatorBid::unlocked(\n        validator_public_key,\n        bonding_purse,\n        staked_amount,\n        delegation_rate,\n        0,\n        0,\n        0,\n    )))\n}\n\nfn serialize_validator_bid(b: &mut Bencher) {\n    let bid = sample_validator_bid();\n    b.iter(|| BidKind::to_bytes(black_box(&bid)));\n}\n\nfn deserialize_bid(delegators_len: u32, b: &mut Bencher) {\n    let bid = sample_bid(delegators_len);\n    let bid_bytes = Bid::to_bytes(&bid).unwrap();\n    b.iter(|| Bid::from_bytes(black_box(&bid_bytes)));\n}\n\nfn sample_transfer() -> TransferV2 {\n    TransferV2::new(\n        TransactionHash::V1(TransactionV1Hash::default()),\n        InitiatorAddr::AccountHash(AccountHash::default()),\n        None,\n        URef::default(),\n        URef::default(),\n        U512::MAX,\n        Gas::new(U512::from_dec_str(\"123123123123\").unwrap()),\n        Some(1u64),\n    )\n}\n\nfn serialize_transfer(b: &mut Bencher) {\n    let transfer = sample_transfer();\n    b.iter(|| TransferV2::to_bytes(&transfer));\n}\n\nfn deserialize_transfer(b: &mut Bencher) {\n    let transfer = sample_transfer();\n    let transfer_bytes = transfer.to_bytes().unwrap();\n    b.iter(|| TransferV2::from_bytes(&transfer_bytes));\n}\n\nfn sample_deploy_info(transfer_len: u16) -> DeployInfo {\n    let transfers = (0..transfer_len)\n        .map(|i| {\n            let mut tmp = [0u8; TRANSFER_ADDR_LENGTH];\n            U256::from(i).to_little_endian(&mut tmp);\n            TransferAddr::new(tmp)\n        })\n        .collect::<Vec<_>>();\n    DeployInfo::new(\n        DeployHash::default(),\n        &transfers,\n        AccountHash::default(),\n        URef::default(),\n        U512::MAX,\n    )\n}\n\nfn serialize_deploy_info(b: &mut Bencher) {\n    let deploy_info = sample_deploy_info(1000);\n    b.iter(|| DeployInfo::to_bytes(&deploy_info));\n}\n\nfn deserialize_deploy_info(b: &mut Bencher) {\n    let deploy_info = sample_deploy_info(1000);\n    let deploy_bytes = deploy_info.to_bytes().unwrap();\n    b.iter(|| DeployInfo::from_bytes(&deploy_bytes));\n}\n\nfn sample_era_info(delegators_len: u32) -> EraInfo {\n    let mut base = EraInfo::new();\n    let delegations = (0..delegators_len).map(|i| {\n        let pk = u32_to_pk(i);\n        SeigniorageAllocation::delegator_kind(DelegatorKind::PublicKey(pk.clone()), pk, U512::MAX)\n    });\n    base.seigniorage_allocations_mut().extend(delegations);\n    base\n}\n\nfn serialize_era_info(delegators_len: u32, b: &mut Bencher) {\n    let era_info = sample_era_info(delegators_len);\n    b.iter(|| EraInfo::to_bytes(&era_info));\n}\n\nfn deserialize_era_info(delegators_len: u32, b: &mut Bencher) {\n    let era_info = sample_era_info(delegators_len);\n    let era_info_bytes = era_info.to_bytes().unwrap();\n    b.iter(|| EraInfo::from_bytes(&era_info_bytes));\n}\n\nfn bytesrepr_bench(c: &mut Criterion) {\n    c.bench_function(\"serialize_vector_of_i32s\", serialize_vector_of_i32s);\n    c.bench_function(\"deserialize_vector_of_i32s\", deserialize_vector_of_i32s);\n    c.bench_function(\"serialize_vector_of_u8\", serialize_vector_of_u8);\n    c.bench_function(\"deserialize_vector_of_u8\", deserialize_vector_of_u8);\n    c.bench_function(\"serialize_u8\", serialize_u8);\n    c.bench_function(\"deserialize_u8\", deserialize_u8);\n    c.bench_function(\"serialize_i32\", serialize_i32);\n    c.bench_function(\"deserialize_i32\", deserialize_i32);\n    c.bench_function(\"serialize_u64\", serialize_u64);\n    c.bench_function(\"deserialize_u64\", deserialize_u64);\n    c.bench_function(\"serialize_some_u64\", serialize_some_u64);\n    c.bench_function(\"deserialize_some_u64\", deserialize_some_u64);\n    c.bench_function(\"serialize_none_u64\", serialize_none_u64);\n    c.bench_function(\"deserialize_ok_u64\", deserialize_ok_u64);\n    c.bench_function(\n        \"serialize_vector_of_vector_of_u8\",\n        serialize_vector_of_vector_of_u8,\n    );\n    c.bench_function(\n        \"deserialize_vector_of_vector_of_u8\",\n        deserialize_vector_of_vector_of_u8,\n    );\n    c.bench_function(\"serialize_tree_map\", serialize_tree_map);\n    c.bench_function(\"deserialize_treemap\", deserialize_treemap);\n    c.bench_function(\"serialize_string\", serialize_string);\n    c.bench_function(\"deserialize_string\", deserialize_string);\n    c.bench_function(\"serialize_vec_of_string\", serialize_vec_of_string);\n    c.bench_function(\"deserialize_vec_of_string\", deserialize_vec_of_string);\n    c.bench_function(\"serialize_unit\", serialize_unit);\n    c.bench_function(\"deserialize_unit\", deserialize_unit);\n    c.bench_function(\"serialize_key_account\", serialize_key_account);\n    c.bench_function(\"deserialize_key_account\", deserialize_key_account);\n    c.bench_function(\"serialize_key_hash\", serialize_key_hash);\n    c.bench_function(\"deserialize_key_hash\", deserialize_key_hash);\n    c.bench_function(\"serialize_key_uref\", serialize_key_uref);\n    c.bench_function(\"deserialize_key_uref\", deserialize_key_uref);\n    c.bench_function(\"serialize_vec_of_keys\", serialize_vec_of_keys);\n    c.bench_function(\"deserialize_vec_of_keys\", deserialize_vec_of_keys);\n    c.bench_function(\"serialize_access_rights_read\", serialize_access_rights_read);\n    c.bench_function(\n        \"deserialize_access_rights_read\",\n        deserialize_access_rights_read,\n    );\n    c.bench_function(\n        \"serialize_access_rights_write\",\n        serialize_access_rights_write,\n    );\n    c.bench_function(\n        \"deserialize_access_rights_write\",\n        deserialize_access_rights_write,\n    );\n    c.bench_function(\"serialize_access_rights_add\", serialize_access_rights_add);\n    c.bench_function(\n        \"deserialize_access_rights_add\",\n        deserialize_access_rights_add,\n    );\n    c.bench_function(\n        \"serialize_access_rights_read_add\",\n        serialize_access_rights_read_add,\n    );\n    c.bench_function(\n        \"deserialize_access_rights_read_add\",\n        deserialize_access_rights_read_add,\n    );\n    c.bench_function(\n        \"serialize_access_rights_read_write\",\n        serialize_access_rights_read_write,\n    );\n    c.bench_function(\n        \"deserialize_access_rights_read_write\",\n        deserialize_access_rights_read_write,\n    );\n    c.bench_function(\n        \"serialize_access_rights_add_write\",\n        serialize_access_rights_add_write,\n    );\n    c.bench_function(\n        \"deserialize_access_rights_add_write\",\n        deserialize_access_rights_add_write,\n    );\n    c.bench_function(\"serialize_cl_value_int32\", serialize_cl_value_int32);\n    c.bench_function(\"deserialize_cl_value_int32\", deserialize_cl_value_int32);\n    c.bench_function(\"serialize_cl_value_uint128\", serialize_cl_value_uint128);\n    c.bench_function(\"deserialize_cl_value_uint128\", deserialize_cl_value_uint128);\n    c.bench_function(\"serialize_cl_value_uint256\", serialize_cl_value_uint256);\n    c.bench_function(\"deserialize_cl_value_uint256\", deserialize_cl_value_uint256);\n    c.bench_function(\"serialize_cl_value_uint512\", serialize_cl_value_uint512);\n    c.bench_function(\"deserialize_cl_value_uint512\", deserialize_cl_value_uint512);\n    c.bench_function(\"serialize_cl_value_bytearray\", serialize_cl_value_bytearray);\n    c.bench_function(\n        \"deserialize_cl_value_bytearray\",\n        deserialize_cl_value_bytearray,\n    );\n    c.bench_function(\"serialize_cl_value_listint32\", serialize_cl_value_listint32);\n    c.bench_function(\n        \"deserialize_cl_value_listint32\",\n        deserialize_cl_value_listint32,\n    );\n    c.bench_function(\"serialize_cl_value_string\", serialize_cl_value_string);\n    c.bench_function(\"deserialize_cl_value_string\", deserialize_cl_value_string);\n    c.bench_function(\n        \"serialize_cl_value_liststring\",\n        serialize_cl_value_liststring,\n    );\n    c.bench_function(\n        \"deserialize_cl_value_liststring\",\n        deserialize_cl_value_liststring,\n    );\n    c.bench_function(\"serialize_cl_value_namedkey\", serialize_cl_value_namedkey);\n    c.bench_function(\n        \"deserialize_cl_value_namedkey\",\n        deserialize_cl_value_namedkey,\n    );\n    c.bench_function(\"serialize_u128\", serialize_u128);\n    c.bench_function(\"deserialize_u128\", deserialize_u128);\n    c.bench_function(\"serialize_u256\", serialize_u256);\n    c.bench_function(\"deserialize_u256\", deserialize_u256);\n    c.bench_function(\"serialize_u512\", serialize_u512);\n    c.bench_function(\"deserialize_u512\", deserialize_u512);\n    // c.bench_function(\"bytesrepr::serialize_account\", serialize_account);\n    // c.bench_function(\"bytesrepr::deserialize_account\", deserialize_account);\n    c.bench_function(\"bytesrepr::serialize_contract\", serialize_contract);\n    c.bench_function(\"bytesrepr::deserialize_contract\", deserialize_contract);\n    c.bench_function(\n        \"bytesrepr::serialize_contract_package\",\n        serialize_contract_package,\n    );\n    c.bench_function(\n        \"bytesrepr::deserialize_contract_package\",\n        deserialize_contract_package,\n    );\n    c.bench_function(\n        \"bytesrepr::serialize_validator_bid\",\n        serialize_validator_bid,\n    );\n    c.bench_function(\"bytesrepr::serialize_delegation_bid\", |b| {\n        serialize_delegation_bid(10, b)\n    });\n    c.bench_function(\"bytesrepr::serialize_bid_small\", |b| serialize_bid(10, b));\n    c.bench_function(\"bytesrepr::serialize_bid_medium\", |b| serialize_bid(100, b));\n    c.bench_function(\"bytesrepr::serialize_bid_big\", |b| serialize_bid(1000, b));\n    c.bench_function(\"bytesrepr::deserialize_bid_small\", |b| {\n        deserialize_bid(10, b)\n    });\n    c.bench_function(\"bytesrepr::deserialize_bid_medium\", |b| {\n        deserialize_bid(100, b)\n    });\n    c.bench_function(\"bytesrepr::deserialize_bid_big\", |b| {\n        deserialize_bid(1000, b)\n    });\n    c.bench_function(\"bytesrepr::serialize_transfer\", serialize_transfer);\n    c.bench_function(\"bytesrepr::deserialize_transfer\", deserialize_transfer);\n    c.bench_function(\"bytesrepr::serialize_deploy_info\", serialize_deploy_info);\n    c.bench_function(\n        \"bytesrepr::deserialize_deploy_info\",\n        deserialize_deploy_info,\n    );\n    c.bench_function(\"bytesrepr::serialize_era_info\", |b| {\n        serialize_era_info(500, b)\n    });\n    c.bench_function(\"bytesrepr::deserialize_era_info\", |b| {\n        deserialize_era_info(500, b)\n    });\n}\n\ncriterion_group!(benches, bytesrepr_bench);\ncriterion_main!(benches);\n"
  },
  {
    "path": "types/proptest-regressions/stored_value.txt",
    "content": "# Seeds for failure cases proptest has generated in the past. It is\n# automatically read and these particular cases re-run before any\n# novel cases are generated.\n#\n# It is recommended to check this file in to source control so that\n# everyone who runs the test benefits from these saved cases.\ncc 451b981c778518acba99daaa2eb7b621b1882430c7a665ed85ce06446732117e # shrinks to v = RawBytes([])\n"
  },
  {
    "path": "types/src/access_rights.rs",
    "content": "use alloc::{\n    collections::{btree_map::Entry, BTreeMap},\n    vec::Vec,\n};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::{bytesrepr, HashAddr, URef, URefAddr};\npub use private::AccessRights;\n\n/// The number of bytes in a serialized [`AccessRights`].\npub const ACCESS_RIGHTS_SERIALIZED_LENGTH: usize = 1;\n\n// Module exists only to restrict the scope of the following `#allow`.\n#[allow(clippy::bad_bit_mask)]\nmod private {\n    use bitflags::bitflags;\n    #[cfg(feature = \"datasize\")]\n    use datasize::DataSize;\n\n    bitflags! {\n        /// A struct which behaves like a set of bitflags to define access rights associated with a\n        /// [`URef`](crate::URef).\n        #[allow(clippy::derived_hash_with_manual_eq)]\n        #[cfg_attr(feature = \"datasize\", derive(DataSize))]\n        pub struct AccessRights: u8 {\n            /// No permissions\n            const NONE = 0;\n            /// Permission to read the value under the associated `URef`.\n            const READ  = 0b001;\n            /// Permission to write a value under the associated `URef`.\n            const WRITE = 0b010;\n            /// Permission to add to the value under the associated `URef`.\n            const ADD   = 0b100;\n            /// Permission to read or add to the value under the associated `URef`.\n            const READ_ADD       = Self::READ.bits | Self::ADD.bits;\n            /// Permission to read or write the value under the associated `URef`.\n            const READ_WRITE     = Self::READ.bits | Self::WRITE.bits;\n            /// Permission to add to, or write the value under the associated `URef`.\n            const ADD_WRITE      = Self::ADD.bits  | Self::WRITE.bits;\n            /// Permission to read, add to, or write the value under the associated `URef`.\n            const READ_ADD_WRITE = Self::READ.bits | Self::ADD.bits | Self::WRITE.bits;\n        }\n    }\n}\n\nimpl Default for AccessRights {\n    fn default() -> Self {\n        AccessRights::NONE\n    }\n}\n\nimpl AccessRights {\n    /// Returns `true` if the `READ` flag is set.\n    pub fn is_readable(self) -> bool {\n        self & AccessRights::READ == AccessRights::READ\n    }\n\n    /// Returns `true` if the `WRITE` flag is set.\n    pub fn is_writeable(self) -> bool {\n        self & AccessRights::WRITE == AccessRights::WRITE\n    }\n\n    /// Returns `true` if the `ADD` flag is set.\n    pub fn is_addable(self) -> bool {\n        self & AccessRights::ADD == AccessRights::ADD\n    }\n\n    /// Returns `true` if no flags are set.\n    pub fn is_none(self) -> bool {\n        self == AccessRights::NONE\n    }\n}\n\nimpl Display for AccessRights {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match *self {\n            AccessRights::NONE => write!(f, \"NONE\"),\n            AccessRights::READ => write!(f, \"READ\"),\n            AccessRights::WRITE => write!(f, \"WRITE\"),\n            AccessRights::ADD => write!(f, \"ADD\"),\n            AccessRights::READ_ADD => write!(f, \"READ_ADD\"),\n            AccessRights::READ_WRITE => write!(f, \"READ_WRITE\"),\n            AccessRights::ADD_WRITE => write!(f, \"ADD_WRITE\"),\n            AccessRights::READ_ADD_WRITE => write!(f, \"READ_ADD_WRITE\"),\n            _ => write!(f, \"UNKNOWN\"),\n        }\n    }\n}\n\nimpl bytesrepr::ToBytes for AccessRights {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.bits().to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        ACCESS_RIGHTS_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.bits());\n        Ok(())\n    }\n}\n\nimpl bytesrepr::FromBytes for AccessRights {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (id, rem) = u8::from_bytes(bytes)?;\n        match AccessRights::from_bits(id) {\n            Some(rights) => Ok((rights, rem)),\n            None => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Serialize for AccessRights {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        self.bits().serialize(serializer)\n    }\n}\n\nimpl<'de> Deserialize<'de> for AccessRights {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        let bits = u8::deserialize(deserializer)?;\n        AccessRights::from_bits(bits).ok_or_else(|| SerdeError::custom(\"invalid bits\"))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<AccessRights> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> AccessRights {\n        let mut result = AccessRights::NONE;\n        if rng.gen() {\n            result |= AccessRights::READ;\n        }\n        if rng.gen() {\n            result |= AccessRights::WRITE;\n        }\n        if rng.gen() {\n            result |= AccessRights::ADD;\n        }\n        result\n    }\n}\n\n/// Used to indicate if a granted [`URef`] was already held by the context.\n#[derive(Debug, PartialEq, Eq)]\npub enum GrantedAccess {\n    /// No new set of access rights were granted.\n    PreExisting,\n    /// A new set of access rights were granted.\n    Granted {\n        /// The address of the URef.\n        uref_addr: URefAddr,\n        /// The set of the newly granted access rights.\n        newly_granted_access_rights: AccessRights,\n    },\n}\n\n/// Access rights for a given runtime context.\n#[derive(Debug, PartialEq, Eq)]\npub struct ContextAccessRights {\n    hash_addr: HashAddr,\n    access_rights: BTreeMap<URefAddr, AccessRights>,\n}\n\nimpl ContextAccessRights {\n    /// Creates a new instance of access rights from an iterator of URefs merging any duplicates,\n    /// taking the union of their rights.\n    pub fn new<T: IntoIterator<Item = URef>>(hash_addr: HashAddr, uref_iter: T) -> Self {\n        let mut context_access_rights = ContextAccessRights {\n            hash_addr,\n            access_rights: BTreeMap::new(),\n        };\n        context_access_rights.do_extend(uref_iter);\n        context_access_rights\n    }\n\n    /// Extend context access rights with access rights.\n    pub fn extend_access_rights(&mut self, access_rights: BTreeMap<URefAddr, AccessRights>) {\n        for (uref_addr, access_rights) in access_rights {\n            match self.access_rights.entry(uref_addr) {\n                Entry::Occupied(rights) => {\n                    *rights.into_mut() = rights.get().union(access_rights);\n                }\n                Entry::Vacant(rights) => {\n                    rights.insert(access_rights);\n                }\n            }\n        }\n    }\n\n    /// Returns the current context key.\n    pub fn context_key(&self) -> HashAddr {\n        self.hash_addr\n    }\n\n    /// Extends the current access rights from a given set of URefs.\n    pub fn extend(&mut self, urefs: &[URef]) {\n        self.do_extend(urefs.iter().copied())\n    }\n\n    /// Extends the current access rights from a given set of URefs.\n    fn do_extend<T: IntoIterator<Item = URef>>(&mut self, uref_iter: T) {\n        for uref in uref_iter {\n            match self.access_rights.entry(uref.addr()) {\n                Entry::Occupied(rights) => {\n                    *rights.into_mut() = rights.get().union(uref.access_rights());\n                }\n                Entry::Vacant(rights) => {\n                    rights.insert(uref.access_rights());\n                }\n            }\n        }\n    }\n\n    /// Checks whether given uref has enough access rights.\n    pub fn has_access_rights_to_uref(&self, uref: &URef) -> bool {\n        if let Some(known_rights) = self.access_rights.get(&uref.addr()) {\n            let rights_to_check = uref.access_rights();\n            known_rights.contains(rights_to_check)\n        } else {\n            // URef is not known\n            false\n        }\n    }\n\n    /// Returns a reference to the map of access rights.\n    pub fn access_rights(&self) -> &BTreeMap<URefAddr, AccessRights> {\n        &self.access_rights\n    }\n\n    /// Consume into access rights.\n    pub fn take_access_rights(self) -> BTreeMap<URefAddr, AccessRights> {\n        self.access_rights\n    }\n\n    /// Grants access to a [`URef`]; unless access was pre-existing.\n    pub fn grant_access(&mut self, uref: URef) -> GrantedAccess {\n        match self.access_rights.entry(uref.addr()) {\n            Entry::Occupied(existing_rights) => {\n                let newly_granted_access_rights =\n                    uref.access_rights().difference(*existing_rights.get());\n                *existing_rights.into_mut() = existing_rights.get().union(uref.access_rights());\n                if newly_granted_access_rights.is_none() {\n                    GrantedAccess::PreExisting\n                } else {\n                    GrantedAccess::Granted {\n                        uref_addr: uref.addr(),\n                        newly_granted_access_rights,\n                    }\n                }\n            }\n            Entry::Vacant(rights) => {\n                rights.insert(uref.access_rights());\n                GrantedAccess::Granted {\n                    uref_addr: uref.addr(),\n                    newly_granted_access_rights: uref.access_rights(),\n                }\n            }\n        }\n    }\n\n    /// Remove access for a given `URef`.\n    pub fn remove_access(&mut self, uref_addr: URefAddr, access_rights: AccessRights) {\n        if let Some(current_access_rights) = self.access_rights.get_mut(&uref_addr) {\n            current_access_rights.remove(access_rights)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::UREF_ADDR_LENGTH;\n\n    const ENTITY_HASH: HashAddr = [1u8; 32];\n    const UREF_ADDRESS: [u8; UREF_ADDR_LENGTH] = [1; UREF_ADDR_LENGTH];\n    const UREF_NO_PERMISSIONS: URef = URef::new(UREF_ADDRESS, AccessRights::empty());\n    const UREF_READ: URef = URef::new(UREF_ADDRESS, AccessRights::READ);\n    const UREF_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::ADD);\n    const UREF_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::WRITE);\n    const UREF_READ_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD);\n    const UREF_READ_ADD_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD_WRITE);\n\n    fn test_readable(right: AccessRights, is_true: bool) {\n        assert_eq!(right.is_readable(), is_true)\n    }\n\n    #[test]\n    fn test_is_readable() {\n        test_readable(AccessRights::READ, true);\n        test_readable(AccessRights::READ_ADD, true);\n        test_readable(AccessRights::READ_WRITE, true);\n        test_readable(AccessRights::READ_ADD_WRITE, true);\n        test_readable(AccessRights::ADD, false);\n        test_readable(AccessRights::ADD_WRITE, false);\n        test_readable(AccessRights::WRITE, false);\n    }\n\n    fn test_writable(right: AccessRights, is_true: bool) {\n        assert_eq!(right.is_writeable(), is_true)\n    }\n\n    #[test]\n    fn test_is_writable() {\n        test_writable(AccessRights::WRITE, true);\n        test_writable(AccessRights::READ_WRITE, true);\n        test_writable(AccessRights::ADD_WRITE, true);\n        test_writable(AccessRights::READ, false);\n        test_writable(AccessRights::ADD, false);\n        test_writable(AccessRights::READ_ADD, false);\n        test_writable(AccessRights::READ_ADD_WRITE, true);\n    }\n\n    fn test_addable(right: AccessRights, is_true: bool) {\n        assert_eq!(right.is_addable(), is_true)\n    }\n\n    #[test]\n    fn test_is_addable() {\n        test_addable(AccessRights::ADD, true);\n        test_addable(AccessRights::READ_ADD, true);\n        test_addable(AccessRights::READ_WRITE, false);\n        test_addable(AccessRights::ADD_WRITE, true);\n        test_addable(AccessRights::READ, false);\n        test_addable(AccessRights::WRITE, false);\n        test_addable(AccessRights::READ_ADD_WRITE, true);\n    }\n\n    #[test]\n    fn should_check_has_access_rights_to_uref() {\n        let context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]);\n        assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD));\n        assert!(context_rights.has_access_rights_to_uref(&UREF_READ));\n        assert!(context_rights.has_access_rights_to_uref(&UREF_ADD));\n        assert!(context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS));\n    }\n\n    #[test]\n    fn should_check_does_not_have_access_rights_to_uref() {\n        let context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]);\n        assert!(!context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE));\n        assert!(!context_rights\n            .has_access_rights_to_uref(&URef::new([2; UREF_ADDR_LENGTH], AccessRights::empty())));\n    }\n\n    #[test]\n    fn should_extend_access_rights() {\n        // Start with uref with no permissions.\n        let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_NO_PERMISSIONS]);\n        let mut expected_rights = BTreeMap::new();\n        expected_rights.insert(UREF_ADDRESS, AccessRights::empty());\n        assert_eq!(context_rights.access_rights, expected_rights);\n\n        // Extend with a READ_ADD: should merge to single READ_ADD.\n        context_rights.extend(&[UREF_READ_ADD]);\n        *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD;\n        assert_eq!(context_rights.access_rights, expected_rights);\n\n        // Extend with a READ: should have no observable effect.\n        context_rights.extend(&[UREF_READ]);\n        assert_eq!(context_rights.access_rights, expected_rights);\n\n        // Extend with a WRITE: should merge to single READ_ADD_WRITE.\n        context_rights.extend(&[UREF_WRITE]);\n        *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD_WRITE;\n        assert_eq!(context_rights.access_rights, expected_rights);\n    }\n\n    #[test]\n    fn should_perform_union_of_access_rights_in_new() {\n        let context_rights =\n            ContextAccessRights::new(ENTITY_HASH, vec![UREF_NO_PERMISSIONS, UREF_READ, UREF_ADD]);\n\n        // Expect the three discrete URefs' rights to be unioned into READ_ADD.\n        let mut expected_rights = BTreeMap::new();\n        expected_rights.insert(UREF_ADDRESS, AccessRights::READ_ADD);\n        assert_eq!(context_rights.access_rights, expected_rights);\n    }\n\n    #[test]\n    fn should_grant_access_rights() {\n        let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]);\n        let granted_access = context_rights.grant_access(UREF_READ);\n        assert_eq!(granted_access, GrantedAccess::PreExisting);\n        let granted_access = context_rights.grant_access(UREF_READ_ADD_WRITE);\n        assert_eq!(\n            granted_access,\n            GrantedAccess::Granted {\n                uref_addr: UREF_ADDRESS,\n                newly_granted_access_rights: AccessRights::WRITE\n            }\n        );\n        assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE));\n        let new_uref = URef::new([3; 32], AccessRights::all());\n        let granted_access = context_rights.grant_access(new_uref);\n        assert_eq!(\n            granted_access,\n            GrantedAccess::Granted {\n                uref_addr: new_uref.addr(),\n                newly_granted_access_rights: AccessRights::all()\n            }\n        );\n        assert!(context_rights.has_access_rights_to_uref(&new_uref));\n    }\n\n    #[test]\n    fn should_remove_access_rights() {\n        let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD_WRITE]);\n        assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE));\n\n        // Strip write access from the context rights.\n        context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE);\n        assert!(\n            !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE),\n            \"Write access should have been removed\"\n        );\n\n        // Strip the access again to ensure that the bit is not flipped back.\n        context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE);\n        assert!(\n            !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE),\n            \"Write access should not have been granted back\"\n        );\n        assert!(\n            context_rights.has_access_rights_to_uref(&UREF_READ_ADD),\n            \"Read and add access should be preserved.\"\n        );\n\n        // Strip both read and add access from the context rights.\n        context_rights.remove_access(UREF_ADDRESS, AccessRights::READ_ADD);\n        assert!(\n            !context_rights.has_access_rights_to_uref(&UREF_READ_ADD),\n            \"Read and add access should have been removed\"\n        );\n        assert!(\n            context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS),\n            \"The access rights should be empty\"\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/account/account_hash.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::{\n    convert::{From, TryFrom},\n    fmt::{Debug, Display, Formatter},\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::{\n    addressable_entity::FromStrError,\n    bytesrepr::{Error, FromBytes, ToBytes},\n    checksummed_hex, crypto, CLType, CLTyped, PublicKey, BLAKE2B_DIGEST_LENGTH,\n};\n\n/// The length in bytes of a [`AccountHash`].\npub const ACCOUNT_HASH_LENGTH: usize = 32;\n/// The prefix applied to the hex-encoded `AccountHash` to produce a formatted string\n/// representation.\npub const ACCOUNT_HASH_FORMATTED_STRING_PREFIX: &str = \"account-hash-\";\n\n/// A newtype wrapping an array which contains the raw bytes of\n/// the AccountHash, a hash of Public Key and Algorithm\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Account hash as a formatted string.\")\n)]\npub struct AccountHash(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))]\n    pub  [u8; ACCOUNT_HASH_LENGTH],\n);\n\nimpl AccountHash {\n    /// Constructs a new `AccountHash` instance from the raw bytes of an Public Key Account Hash.\n    pub const fn new(value: [u8; ACCOUNT_HASH_LENGTH]) -> AccountHash {\n        AccountHash(value)\n    }\n\n    /// Returns the raw bytes of the account hash as an array.\n    pub fn value(&self) -> [u8; ACCOUNT_HASH_LENGTH] {\n        self.0\n    }\n\n    /// Returns the raw bytes of the account hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `AccountHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\n            \"{}{}\",\n            ACCOUNT_HASH_FORMATTED_STRING_PREFIX,\n            base16::encode_lower(&self.0),\n        )\n    }\n\n    /// Hexadecimal representation of the hash.\n    pub fn to_hex_string(&self) -> String {\n        base16::encode_lower(&self.0)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into an `AccountHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(ACCOUNT_HASH_FORMATTED_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n        let bytes =\n            <[u8; ACCOUNT_HASH_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?;\n        Ok(AccountHash(bytes))\n    }\n\n    /// Parses a `PublicKey` and outputs the corresponding account hash.\n    pub fn from_public_key(\n        public_key: &PublicKey,\n        blake2b_hash_fn: impl Fn(Vec<u8>) -> [u8; BLAKE2B_DIGEST_LENGTH],\n    ) -> Self {\n        const SYSTEM_LOWERCASE: &str = \"system\";\n        const ED25519_LOWERCASE: &str = \"ed25519\";\n        const SECP256K1_LOWERCASE: &str = \"secp256k1\";\n\n        let algorithm_name = match public_key {\n            PublicKey::System => SYSTEM_LOWERCASE,\n            PublicKey::Ed25519(_) => ED25519_LOWERCASE,\n            PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE,\n        };\n        let public_key_bytes: Vec<u8> = public_key.into();\n\n        // Prepare preimage based on the public key parameters.\n        let preimage = {\n            let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1);\n            data.extend(algorithm_name.as_bytes());\n            data.push(0);\n            data.extend(public_key_bytes);\n            data\n        };\n        // Hash the preimage data using blake2b256 and return it.\n        let digest = blake2b_hash_fn(preimage);\n        Self::new(digest)\n    }\n}\n\nimpl Serialize for AccountHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for AccountHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            AccountHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = <[u8; ACCOUNT_HASH_LENGTH]>::deserialize(deserializer)?;\n            Ok(AccountHash(bytes))\n        }\n    }\n}\n\nimpl TryFrom<&[u8]> for AccountHash {\n    type Error = TryFromSliceForAccountHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForAccountHashError> {\n        <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes)\n            .map(AccountHash::new)\n            .map_err(|_| TryFromSliceForAccountHashError(()))\n    }\n}\n\nimpl TryFrom<&alloc::vec::Vec<u8>> for AccountHash {\n    type Error = TryFromSliceForAccountHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes as &[u8])\n            .map(AccountHash::new)\n            .map_err(|_| TryFromSliceForAccountHashError(()))\n    }\n}\n\nimpl From<&PublicKey> for AccountHash {\n    fn from(public_key: &PublicKey) -> Self {\n        AccountHash::from_public_key(public_key, crypto::blake2b)\n    }\n}\n\nimpl Display for AccountHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for AccountHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"AccountHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for AccountHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(ACCOUNT_HASH_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for AccountHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for AccountHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((AccountHash::new(bytes), rem))\n    }\n}\n\nimpl AsRef<[u8]> for AccountHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\n/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`].\n#[derive(Debug)]\npub struct TryFromSliceForAccountHashError(());\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<AccountHash> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> AccountHash {\n        AccountHash::new(rng.gen())\n    }\n}\n"
  },
  {
    "path": "types/src/account/action_thresholds.rs",
    "content": "//! This module contains types and functions for managing action thresholds.\n\nuse alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    account::{ActionType, SetThresholdFailure, Weight},\n    addressable_entity::WEIGHT_SERIALIZED_LENGTH,\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n};\n\n/// Thresholds that have to be met when executing an action of a certain type.\n#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"json-schema\", schemars(rename = \"AccountActionThresholds\"))]\npub struct ActionThresholds {\n    /// Threshold for deploy execution.\n    pub deployment: Weight,\n    /// Threshold for managing action threshold.\n    pub key_management: Weight,\n}\n\nimpl ActionThresholds {\n    /// Creates new ActionThresholds object with provided weights\n    ///\n    /// Requires deployment threshold to be lower than or equal to\n    /// key management threshold.\n    pub fn new(\n        deployment: Weight,\n        key_management: Weight,\n    ) -> Result<ActionThresholds, SetThresholdFailure> {\n        if deployment > key_management {\n            return Err(SetThresholdFailure::DeploymentThreshold);\n        }\n        Ok(ActionThresholds {\n            deployment,\n            key_management,\n        })\n    }\n    /// Sets new threshold for [ActionType::Deployment].\n    /// Should return an error if setting new threshold for `action_type` breaks\n    /// one of the invariants. Currently, invariant is that\n    /// `ActionType::Deployment` threshold shouldn't be higher than any\n    /// other, which should be checked both when increasing `Deployment`\n    /// threshold and decreasing the other.\n    pub fn set_deployment_threshold(\n        &mut self,\n        new_threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        if new_threshold > self.key_management {\n            Err(SetThresholdFailure::DeploymentThreshold)\n        } else {\n            self.deployment = new_threshold;\n            Ok(())\n        }\n    }\n\n    /// Sets new threshold for [ActionType::KeyManagement].\n    pub fn set_key_management_threshold(\n        &mut self,\n        new_threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        if self.deployment > new_threshold {\n            Err(SetThresholdFailure::KeyManagementThreshold)\n        } else {\n            self.key_management = new_threshold;\n            Ok(())\n        }\n    }\n\n    /// Returns the deployment action threshold.\n    pub fn deployment(&self) -> &Weight {\n        &self.deployment\n    }\n\n    /// Returns key management action threshold.\n    pub fn key_management(&self) -> &Weight {\n        &self.key_management\n    }\n\n    /// Unified function that takes an action type, and changes appropriate\n    /// threshold defined by the [ActionType] variants.\n    pub fn set_threshold(\n        &mut self,\n        action_type: ActionType,\n        new_threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        match action_type {\n            ActionType::Deployment => self.set_deployment_threshold(new_threshold),\n            ActionType::KeyManagement => self.set_key_management_threshold(new_threshold),\n        }\n    }\n}\n\nimpl Default for ActionThresholds {\n    fn default() -> Self {\n        ActionThresholds {\n            deployment: Weight::new(1),\n            key_management: Weight::new(1),\n        }\n    }\n}\n\nimpl ToBytes for ActionThresholds {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        result.append(&mut self.deployment.to_bytes()?);\n        result.append(&mut self.key_management.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        2 * WEIGHT_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.deployment().write_bytes(writer)?;\n        self.key_management().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ActionThresholds {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (deployment, rem) = Weight::from_bytes(bytes)?;\n        let (key_management, rem) = Weight::from_bytes(rem)?;\n        let ret = ActionThresholds {\n            deployment,\n            key_management,\n        };\n        Ok((ret, rem))\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use super::ActionThresholds;\n\n    pub fn account_action_thresholds_arb() -> impl Strategy<Value = ActionThresholds> {\n        Just(Default::default())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn should_create_new_action_thresholds() {\n        let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap();\n        assert_eq!(*action_thresholds.deployment(), Weight::new(1));\n        assert_eq!(*action_thresholds.key_management(), Weight::new(42));\n    }\n\n    #[test]\n    fn should_not_create_action_thresholds_with_invalid_deployment_threshold() {\n        // deployment cant be greater than key management\n        assert!(ActionThresholds::new(Weight::new(5), Weight::new(1)).is_err());\n    }\n\n    #[test]\n    fn serialization_roundtrip() {\n        let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap();\n        bytesrepr::test_serialization_roundtrip(&action_thresholds);\n    }\n}\n"
  },
  {
    "path": "types/src/account/action_type.rs",
    "content": "use core::convert::TryFrom;\n\nuse crate::addressable_entity::TryFromIntError;\n\n/// The various types of action which can be performed in the context of a given account.\n#[repr(u32)]\npub enum ActionType {\n    /// Represents performing a deploy.\n    Deployment = 0,\n    /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s\n    /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total\n    /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to\n    /// perform various actions).\n    KeyManagement = 1,\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<u32> for ActionType {\n    type Error = TryFromIntError;\n\n    fn try_from(value: u32) -> Result<Self, Self::Error> {\n        // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive\n        // that helps to automatically create `from_u32` and `to_u32`. This approach\n        // gives better control over generated code.\n        match value {\n            d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment),\n            d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement),\n            _ => Err(TryFromIntError(())),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/account/associated_keys.rs",
    "content": "//! This module contains types and functions for working with keys associated with an account.\n\nuse alloc::{\n    collections::{btree_map::Entry, BTreeMap, BTreeSet},\n    vec::Vec,\n};\nuse core::{\n    fmt,\n    fmt::{Display, Formatter},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\nuse crate::{\n    account::{AccountHash, TryFromIntError, Weight},\n    bytesrepr::{self, FromBytes, ToBytes},\n};\n\n/// Errors that can occur while adding a new [`AccountHash`] to an account's associated keys map.\n#[derive(PartialEq, Eq, Debug, Copy, Clone)]\n#[repr(i32)]\n#[non_exhaustive]\npub enum AddKeyFailure {\n    /// There are already maximum [`AccountHash`]s associated with the given account.\n    MaxKeysLimit = 1,\n    /// The given [`AccountHash`] is already associated with the given account.\n    DuplicateKey = 2,\n    /// Caller doesn't have sufficient permissions to associate a new [`AccountHash`] with the\n    /// given account.\n    PermissionDenied = 3,\n}\n\nimpl Display for AddKeyFailure {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            AddKeyFailure::MaxKeysLimit => formatter.write_str(\n                \"Unable to add new associated key because maximum amount of keys is reached\",\n            ),\n            AddKeyFailure::DuplicateKey => formatter\n                .write_str(\"Unable to add new associated key because given key already exists\"),\n            AddKeyFailure::PermissionDenied => formatter\n                .write_str(\"Unable to add new associated key due to insufficient permissions\"),\n        }\n    }\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<i32> for AddKeyFailure {\n    type Error = TryFromIntError;\n\n    fn try_from(value: i32) -> Result<Self, Self::Error> {\n        match value {\n            d if d == AddKeyFailure::MaxKeysLimit as i32 => Ok(AddKeyFailure::MaxKeysLimit),\n            d if d == AddKeyFailure::DuplicateKey as i32 => Ok(AddKeyFailure::DuplicateKey),\n            d if d == AddKeyFailure::PermissionDenied as i32 => Ok(AddKeyFailure::PermissionDenied),\n            _ => Err(TryFromIntError(())),\n        }\n    }\n}\n\n/// Errors that can occur while removing a [`AccountHash`] from an account's associated keys map.\n#[derive(Debug, Eq, PartialEq, Copy, Clone)]\n#[repr(i32)]\n#[non_exhaustive]\npub enum RemoveKeyFailure {\n    /// The given [`AccountHash`] is not associated with the given account.\n    MissingKey = 1,\n    /// Caller doesn't have sufficient permissions to remove an associated [`AccountHash`] from the\n    /// given account.\n    PermissionDenied = 2,\n    /// Removing the given associated [`AccountHash`] would cause the total weight of all remaining\n    /// `AccountHash`s to fall below one of the action thresholds for the given account.\n    ThresholdViolation = 3,\n}\n\nimpl Display for RemoveKeyFailure {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            RemoveKeyFailure::MissingKey => {\n                formatter.write_str(\"Unable to remove a key that does not exist\")\n            }\n            RemoveKeyFailure::PermissionDenied => formatter\n                .write_str(\"Unable to remove associated key due to insufficient permissions\"),\n            RemoveKeyFailure::ThresholdViolation => formatter.write_str(\n                \"Unable to remove a key which would violate action threshold constraints\",\n            ),\n        }\n    }\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<i32> for RemoveKeyFailure {\n    type Error = TryFromIntError;\n\n    fn try_from(value: i32) -> Result<Self, Self::Error> {\n        match value {\n            d if d == RemoveKeyFailure::MissingKey as i32 => Ok(RemoveKeyFailure::MissingKey),\n            d if d == RemoveKeyFailure::PermissionDenied as i32 => {\n                Ok(RemoveKeyFailure::PermissionDenied)\n            }\n            d if d == RemoveKeyFailure::ThresholdViolation as i32 => {\n                Ok(RemoveKeyFailure::ThresholdViolation)\n            }\n            _ => Err(TryFromIntError(())),\n        }\n    }\n}\n\n/// Errors that can occur while updating the [`crate::addressable_entity::Weight`] of a\n/// [`AccountHash`] in an account's associated keys map.\n#[derive(PartialEq, Eq, Debug, Copy, Clone)]\n#[repr(i32)]\n#[non_exhaustive]\npub enum UpdateKeyFailure {\n    /// The given [`AccountHash`] is not associated with the given account.\n    MissingKey = 1,\n    /// Caller doesn't have sufficient permissions to update an associated [`AccountHash`] from the\n    /// given account.\n    PermissionDenied = 2,\n    /// Updating the [`crate::addressable_entity::Weight`] of the given associated [`AccountHash`]\n    /// would cause the total weight of all `AccountHash`s to fall below one of the action\n    /// thresholds for the given account.\n    ThresholdViolation = 3,\n}\n\nimpl Display for UpdateKeyFailure {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            UpdateKeyFailure::MissingKey => formatter.write_str(\n                \"Unable to update the value under an associated key that does not exist\",\n            ),\n            UpdateKeyFailure::PermissionDenied => formatter\n                .write_str(\"Unable to update associated key due to insufficient permissions\"),\n            UpdateKeyFailure::ThresholdViolation => formatter.write_str(\n                \"Unable to update weight that would fall below any of action thresholds\",\n            ),\n        }\n    }\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<i32> for UpdateKeyFailure {\n    type Error = TryFromIntError;\n\n    fn try_from(value: i32) -> Result<Self, Self::Error> {\n        match value {\n            d if d == UpdateKeyFailure::MissingKey as i32 => Ok(UpdateKeyFailure::MissingKey),\n            d if d == UpdateKeyFailure::PermissionDenied as i32 => {\n                Ok(UpdateKeyFailure::PermissionDenied)\n            }\n            d if d == UpdateKeyFailure::ThresholdViolation as i32 => {\n                Ok(UpdateKeyFailure::ThresholdViolation)\n            }\n            _ => Err(TryFromIntError(())),\n        }\n    }\n}\n\n/// A collection of weighted public keys (represented as account hashes) associated with an account.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"json-schema\", schemars(rename = \"AccountAssociatedKeys\"))]\n#[serde(deny_unknown_fields)]\n#[rustfmt::skip]\npub struct AssociatedKeys(\n    #[serde(with = \"BTreeMapToArray::<AccountHash, Weight, Labels>\")]\n    BTreeMap<AccountHash, Weight>,\n);\n\nimpl AssociatedKeys {\n    /// Constructs a new AssociatedKeys.\n    pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys {\n        let mut bt: BTreeMap<AccountHash, Weight> = BTreeMap::new();\n        bt.insert(key, weight);\n        AssociatedKeys(bt)\n    }\n\n    /// Adds a new AssociatedKey to the set.\n    ///\n    /// Returns true if added successfully, false otherwise.\n    pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> {\n        match self.0.entry(key) {\n            Entry::Vacant(entry) => {\n                entry.insert(weight);\n            }\n            Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey),\n        }\n        Ok(())\n    }\n\n    /// Removes key from the associated keys set.\n    /// Returns true if value was found in the set prior to the removal, false\n    /// otherwise.\n    pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> {\n        self.0\n            .remove(key)\n            .map(|_| ())\n            .ok_or(RemoveKeyFailure::MissingKey)\n    }\n\n    /// Adds new AssociatedKey to the set.\n    /// Returns true if added successfully, false otherwise.\n    pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> {\n        match self.0.entry(key) {\n            Entry::Vacant(_) => {\n                return Err(UpdateKeyFailure::MissingKey);\n            }\n            Entry::Occupied(mut entry) => {\n                *entry.get_mut() = weight;\n            }\n        }\n        Ok(())\n    }\n\n    /// Returns the weight of an account hash.\n    pub fn get(&self, key: &AccountHash) -> Option<&Weight> {\n        self.0.get(key)\n    }\n\n    /// Returns `true` if a given key exists.\n    pub fn contains_key(&self, key: &AccountHash) -> bool {\n        self.0.contains_key(key)\n    }\n\n    /// Returns an iterator over the account hash and the weights.\n    pub fn iter(&self) -> impl Iterator<Item = (&AccountHash, &Weight)> {\n        self.0.iter()\n    }\n\n    /// Returns the count of the associated keys.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if the associated keys are empty.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Helper method that calculates weight for keys that comes from any\n    /// source.\n    ///\n    /// This method is not concerned about uniqueness of the passed iterable.\n    /// Uniqueness is determined based on the input collection properties,\n    /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`])\n    /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]).\n    fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator<Item = &'a AccountHash>) -> Weight {\n        let total = keys\n            .filter_map(|key| self.0.get(key))\n            .fold(0u8, |acc, w| acc.saturating_add(w.value()));\n\n        Weight::new(total)\n    }\n\n    /// Calculates total weight of authorization keys provided by an argument\n    pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet<AccountHash>) -> Weight {\n        self.calculate_any_keys_weight(authorization_keys.iter())\n    }\n\n    /// Calculates total weight of all authorization keys\n    pub fn total_keys_weight(&self) -> Weight {\n        self.calculate_any_keys_weight(self.0.keys())\n    }\n\n    /// Calculates total weight of all authorization keys excluding a given key\n    pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight {\n        self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash))\n    }\n}\n\nimpl From<BTreeMap<AccountHash, Weight>> for AssociatedKeys {\n    fn from(associated_keys: BTreeMap<AccountHash, Weight>) -> Self {\n        Self(associated_keys)\n    }\n}\n\nimpl From<AssociatedKeys> for BTreeMap<AccountHash, Weight> {\n    fn from(associated_keys: AssociatedKeys) -> Self {\n        associated_keys.0\n    }\n}\n\nimpl ToBytes for AssociatedKeys {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for AssociatedKeys {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (associated_keys, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((AssociatedKeys(associated_keys), rem))\n    }\n}\n\nstruct Labels;\n\nimpl KeyValueLabels for Labels {\n    const KEY: &'static str = \"account_hash\";\n    const VALUE: &'static str = \"weight\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for Labels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"AssociatedKey\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some(\"A weighted public key.\");\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> =\n        Some(\"The account hash of the public key.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> =\n        Some(\"The weight assigned to the public key.\");\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use crate::gens::{account_hash_arb, account_weight_arb};\n\n    use super::AssociatedKeys;\n\n    pub fn account_associated_keys_arb() -> impl Strategy<Value = AssociatedKeys> {\n        proptest::collection::btree_map(account_hash_arb(), account_weight_arb(), 10).prop_map(\n            |keys| {\n                let mut associated_keys = AssociatedKeys::default();\n                keys.into_iter().for_each(|(k, v)| {\n                    associated_keys.add_key(k, v).unwrap();\n                });\n                associated_keys\n            },\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::BTreeSet, iter::FromIterator};\n\n    use crate::{\n        account::{AccountHash, Weight, ACCOUNT_HASH_LENGTH},\n        bytesrepr,\n    };\n\n    use super::*;\n\n    #[test]\n    fn associated_keys_add() {\n        let mut keys =\n            AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1));\n        let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]);\n        let new_pk_weight = Weight::new(2);\n        assert!(keys.add_key(new_pk, new_pk_weight).is_ok());\n        assert_eq!(keys.get(&new_pk), Some(&new_pk_weight))\n    }\n\n    #[test]\n    fn associated_keys_add_duplicate() {\n        let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]);\n        let weight = Weight::new(1);\n        let mut keys = AssociatedKeys::new(pk, weight);\n        assert_eq!(\n            keys.add_key(pk, Weight::new(10)),\n            Err(AddKeyFailure::DuplicateKey)\n        );\n        assert_eq!(keys.get(&pk), Some(&weight));\n    }\n\n    #[test]\n    fn associated_keys_remove() {\n        let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]);\n        let weight = Weight::new(1);\n        let mut keys = AssociatedKeys::new(pk, weight);\n        assert!(keys.remove_key(&pk).is_ok());\n        assert!(keys\n            .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]))\n            .is_err());\n    }\n\n    #[test]\n    fn associated_keys_update() {\n        let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]);\n        let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]);\n        let weight = Weight::new(1);\n        let mut keys = AssociatedKeys::new(pk1, weight);\n        assert!(matches!(\n            keys.update_key(pk2, Weight::new(2))\n                .expect_err(\"should get error\"),\n            UpdateKeyFailure::MissingKey\n        ));\n        keys.add_key(pk2, Weight::new(1)).unwrap();\n        assert_eq!(keys.get(&pk2), Some(&Weight::new(1)));\n        keys.update_key(pk2, Weight::new(2)).unwrap();\n        assert_eq!(keys.get(&pk2), Some(&Weight::new(2)));\n    }\n\n    #[test]\n    fn associated_keys_calculate_keys_once() {\n        let key_1 = AccountHash::new([0; 32]);\n        let key_2 = AccountHash::new([1; 32]);\n        let key_3 = AccountHash::new([2; 32]);\n        let mut keys = AssociatedKeys::default();\n\n        keys.add_key(key_2, Weight::new(2))\n            .expect(\"should add key_1\");\n        keys.add_key(key_1, Weight::new(1))\n            .expect(\"should add key_1\");\n        keys.add_key(key_3, Weight::new(3))\n            .expect(\"should add key_1\");\n\n        assert_eq!(\n            keys.calculate_keys_weight(&BTreeSet::from_iter(vec![\n                key_1, key_2, key_3, key_1, key_2, key_3,\n            ])),\n            Weight::new(1 + 2 + 3)\n        );\n    }\n\n    #[test]\n    fn associated_keys_total_weight() {\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1));\n            res.add_key(AccountHash::new([2u8; 32]), Weight::new(11))\n                .expect(\"should add key 1\");\n            res.add_key(AccountHash::new([3u8; 32]), Weight::new(12))\n                .expect(\"should add key 2\");\n            res.add_key(AccountHash::new([4u8; 32]), Weight::new(13))\n                .expect(\"should add key 3\");\n            res\n        };\n        assert_eq!(\n            associated_keys.total_keys_weight(),\n            Weight::new(1 + 11 + 12 + 13)\n        );\n    }\n\n    #[test]\n    fn associated_keys_total_weight_excluding() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let identity_key_weight = Weight::new(1);\n\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_1_weight = Weight::new(11);\n\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_2_weight = Weight::new(12);\n\n        let key_3 = AccountHash::new([4u8; 32]);\n        let key_3_weight = Weight::new(13);\n\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, identity_key_weight);\n            res.add_key(key_1, key_1_weight).expect(\"should add key 1\");\n            res.add_key(key_2, key_2_weight).expect(\"should add key 2\");\n            res.add_key(key_3, key_3_weight).expect(\"should add key 3\");\n            res\n        };\n        assert_eq!(\n            associated_keys.total_keys_weight_excluding(key_2),\n            Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value())\n        );\n    }\n\n    #[test]\n    fn overflowing_keys_weight() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_3 = AccountHash::new([4u8; 32]);\n\n        let identity_key_weight = Weight::new(250);\n        let weight_1 = Weight::new(1);\n        let weight_2 = Weight::new(2);\n        let weight_3 = Weight::new(3);\n\n        let saturated_weight = Weight::new(u8::MAX);\n\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, identity_key_weight);\n\n            res.add_key(key_1, weight_1).expect(\"should add key 1\");\n            res.add_key(key_2, weight_2).expect(\"should add key 2\");\n            res.add_key(key_3, weight_3).expect(\"should add key 3\");\n            res\n        };\n\n        assert_eq!(\n            associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![\n                identity_key, // 250\n                key_1,        // 251\n                key_2,        // 253\n                key_3,        // 256 - error\n            ])),\n            saturated_weight,\n        );\n    }\n\n    #[test]\n    fn serialization_roundtrip() {\n        let mut keys = AssociatedKeys::default();\n        keys.add_key(AccountHash::new([1; 32]), Weight::new(1))\n            .unwrap();\n        keys.add_key(AccountHash::new([2; 32]), Weight::new(2))\n            .unwrap();\n        keys.add_key(AccountHash::new([3; 32]), Weight::new(3))\n            .unwrap();\n        bytesrepr::test_serialization_roundtrip(&keys);\n    }\n}\n"
  },
  {
    "path": "types/src/account/error.rs",
    "content": "use core::{\n    array::TryFromSliceError,\n    fmt::{self, Display, Formatter},\n};\n\n// This error type is not intended to be used by third party crates.\n#[doc(hidden)]\n#[derive(Debug, Eq, PartialEq)]\npub struct TryFromIntError(pub(super) ());\n\n/// Error returned when decoding an `AccountHash` from a formatted string.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    /// The prefix is invalid.\n    InvalidPrefix,\n    /// The hash is not valid hex.\n    Hex(base16::DecodeError),\n    /// The hash is the wrong length.\n    Hash(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Hash(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => write!(f, \"prefix is not 'account-hash-'\"),\n            FromStrError::Hex(error) => {\n                write!(f, \"failed to decode address portion from hex: {}\", error)\n            }\n            FromStrError::Hash(error) => write!(f, \"address portion is wrong length: {}\", error),\n        }\n    }\n}\n\n/// Errors that can occur while changing action thresholds (i.e. the total\n/// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to perform\n/// various actions) on an account.\n#[repr(i32)]\n#[derive(Debug, PartialEq, Eq, Copy, Clone)]\n#[non_exhaustive]\npub enum SetThresholdFailure {\n    /// Setting the key-management threshold to a value lower than the deployment threshold is\n    /// disallowed.\n    KeyManagementThreshold = 1,\n    /// Setting the deployment threshold to a value greater than any other threshold is disallowed.\n    DeploymentThreshold = 2,\n    /// Caller doesn't have sufficient permissions to set new thresholds.\n    PermissionDeniedError = 3,\n    /// Setting a threshold to a value greater than the total weight of associated keys is\n    /// disallowed.\n    InsufficientTotalWeight = 4,\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<i32> for SetThresholdFailure {\n    type Error = TryFromIntError;\n\n    fn try_from(value: i32) -> Result<Self, Self::Error> {\n        match value {\n            d if d == SetThresholdFailure::KeyManagementThreshold as i32 => {\n                Ok(SetThresholdFailure::KeyManagementThreshold)\n            }\n            d if d == SetThresholdFailure::DeploymentThreshold as i32 => {\n                Ok(SetThresholdFailure::DeploymentThreshold)\n            }\n            d if d == SetThresholdFailure::PermissionDeniedError as i32 => {\n                Ok(SetThresholdFailure::PermissionDeniedError)\n            }\n            d if d == SetThresholdFailure::InsufficientTotalWeight as i32 => {\n                Ok(SetThresholdFailure::InsufficientTotalWeight)\n            }\n            _ => Err(TryFromIntError(())),\n        }\n    }\n}\n\nimpl Display for SetThresholdFailure {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            SetThresholdFailure::KeyManagementThreshold => formatter\n                .write_str(\"New threshold should be greater than or equal to deployment threshold\"),\n            SetThresholdFailure::DeploymentThreshold => formatter.write_str(\n                \"New threshold should be lower than or equal to key management threshold\",\n            ),\n            SetThresholdFailure::PermissionDeniedError => formatter\n                .write_str(\"Unable to set action threshold due to insufficient permissions\"),\n            SetThresholdFailure::InsufficientTotalWeight => formatter.write_str(\n                \"New threshold should be lower or equal than total weight of associated keys\",\n            ),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/account/weight.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    CLType, CLTyped,\n};\n\n/// The number of bytes in a serialized [`Weight`].\npub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// The weight associated with public keys in an account's associated keys.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    schemars(rename = \"AccountAssociatedKeyWeight\")\n)]\npub struct Weight(u8);\n\nimpl Weight {\n    /// Maximum possible weight.\n    pub const MAX: Weight = Weight(u8::MAX);\n\n    /// Constructs a new `Weight`.\n    pub const fn new(weight: u8) -> Weight {\n        Weight(weight)\n    }\n\n    /// Returns the value of `self` as a `u8`.\n    pub fn value(self) -> u8 {\n        self.0\n    }\n}\n\nimpl ToBytes for Weight {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        WEIGHT_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for Weight {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (byte, rem) = u8::from_bytes(bytes)?;\n        Ok((Weight::new(byte), rem))\n    }\n}\n\nimpl CLTyped for Weight {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n"
  },
  {
    "path": "types/src/account.rs",
    "content": "//! Contains types and constants associated with user accounts.\n\nmod account_hash;\npub mod action_thresholds;\npub mod action_type;\npub mod associated_keys;\nmod error;\nmod weight;\n\nuse serde::{Deserialize, Serialize};\n\nuse alloc::{collections::BTreeSet, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n\npub use self::{\n    account_hash::{AccountHash, ACCOUNT_HASH_FORMATTED_STRING_PREFIX, ACCOUNT_HASH_LENGTH},\n    action_thresholds::ActionThresholds,\n    action_type::ActionType,\n    associated_keys::{AddKeyFailure, AssociatedKeys, RemoveKeyFailure, UpdateKeyFailure},\n    error::{FromStrError, SetThresholdFailure, TryFromIntError},\n    weight::Weight,\n};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    contracts::NamedKeys,\n    AccessRights, Key, URef,\n};\n#[cfg(feature = \"json-schema\")]\nuse crate::{PublicKey, SecretKey};\n\n#[cfg(feature = \"json-schema\")]\nstatic ACCOUNT: Lazy<Account> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap();\n    let account_hash = PublicKey::from(&secret_key).to_account_hash();\n    let main_purse = URef::from_formatted_str(\n        \"uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007\",\n    )\n    .unwrap();\n    let mut named_keys = NamedKeys::new();\n    named_keys.insert(\"main_purse\".to_string(), Key::URef(main_purse));\n    let weight = Weight::new(1);\n    let associated_keys = AssociatedKeys::new(account_hash, weight);\n    let action_thresholds = ActionThresholds::new(weight, weight).unwrap();\n    Account {\n        account_hash,\n        named_keys,\n        main_purse,\n        associated_keys,\n        action_thresholds,\n    }\n});\n\n/// Represents an Account in the global state.\n#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Account {\n    account_hash: AccountHash,\n    named_keys: NamedKeys,\n    main_purse: URef,\n    associated_keys: AssociatedKeys,\n    action_thresholds: ActionThresholds,\n}\n\nimpl Account {\n    /// Creates a new account.\n    pub fn new(\n        account_hash: AccountHash,\n        named_keys: NamedKeys,\n        main_purse: URef,\n        associated_keys: AssociatedKeys,\n        action_thresholds: ActionThresholds,\n    ) -> Self {\n        Account {\n            account_hash,\n            named_keys,\n            main_purse,\n            associated_keys,\n            action_thresholds,\n        }\n    }\n\n    /// An Account constructor with presets for associated_keys and action_thresholds.\n    ///\n    /// An account created with this method is valid and can be used as the target of a transaction.\n    /// It will be created with an [`AssociatedKeys`] with a [`Weight`] of 1, and a default\n    /// [`ActionThresholds`].\n    pub fn create(account: AccountHash, named_keys: NamedKeys, main_purse: URef) -> Self {\n        let associated_keys = AssociatedKeys::new(account, Weight::new(1));\n\n        let action_thresholds: ActionThresholds = Default::default();\n        Account::new(\n            account,\n            named_keys,\n            main_purse,\n            associated_keys,\n            action_thresholds,\n        )\n    }\n\n    /// Appends named keys to an account's named_keys field.\n    pub fn named_keys_append(&mut self, keys: NamedKeys) {\n        self.named_keys.append(keys);\n    }\n\n    /// Returns named keys.\n    pub fn named_keys(&self) -> &NamedKeys {\n        &self.named_keys\n    }\n\n    /// Returns a mutable reference to named keys.\n    pub fn named_keys_mut(&mut self) -> &mut NamedKeys {\n        &mut self.named_keys\n    }\n\n    /// Removes the key under the given name from named keys.\n    pub fn remove_named_key(&mut self, name: &str) -> Option<Key> {\n        self.named_keys.remove(name)\n    }\n\n    /// Returns account hash.\n    pub fn account_hash(&self) -> AccountHash {\n        self.account_hash\n    }\n\n    /// Returns main purse.\n    pub fn main_purse(&self) -> URef {\n        self.main_purse\n    }\n\n    /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`].\n    pub fn main_purse_add_only(&self) -> URef {\n        URef::new(self.main_purse.addr(), AccessRights::ADD)\n    }\n\n    /// Returns associated keys.\n    pub fn associated_keys(&self) -> &AssociatedKeys {\n        &self.associated_keys\n    }\n\n    /// Returns action thresholds.\n    pub fn action_thresholds(&self) -> &ActionThresholds {\n        &self.action_thresholds\n    }\n\n    /// Adds an associated key to an account.\n    pub fn add_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n        weight: Weight,\n    ) -> Result<(), AddKeyFailure> {\n        self.associated_keys.add_key(account_hash, weight)\n    }\n\n    /// Checks if removing given key would properly satisfy thresholds.\n    fn can_remove_key(&self, account_hash: AccountHash) -> bool {\n        let total_weight_without = self\n            .associated_keys\n            .total_keys_weight_excluding(account_hash);\n\n        // Returns true if the total weight calculated without given public key would be greater or\n        // equal to all of the thresholds.\n        total_weight_without >= *self.action_thresholds().deployment()\n            && total_weight_without >= *self.action_thresholds().key_management()\n    }\n\n    /// Checks if adding a weight to a sum of all weights excluding the given key would make the\n    /// resulting value to fall below any of the thresholds on account.\n    fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool {\n        // Calculates total weight of all keys excluding the given key\n        let total_weight = self\n            .associated_keys\n            .total_keys_weight_excluding(account_hash);\n\n        // Safely calculate new weight by adding the updated weight\n        let new_weight = total_weight.value().saturating_add(weight.value());\n\n        // Returns true if the new weight would be greater or equal to all of\n        // the thresholds.\n        new_weight >= self.action_thresholds().deployment().value()\n            && new_weight >= self.action_thresholds().key_management().value()\n    }\n\n    /// Removes an associated key from an account.\n    ///\n    /// Verifies that removing the key will not cause the remaining weight to fall below any action\n    /// thresholds.\n    pub fn remove_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<(), RemoveKeyFailure> {\n        if self.associated_keys.contains_key(&account_hash) {\n            // Check if removing this weight would fall below thresholds\n            if !self.can_remove_key(account_hash) {\n                return Err(RemoveKeyFailure::ThresholdViolation);\n            }\n        }\n        self.associated_keys.remove_key(&account_hash)\n    }\n\n    /// Updates an associated key.\n    ///\n    /// Returns an error if the update would result in a violation of the key management thresholds.\n    pub fn update_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n        weight: Weight,\n    ) -> Result<(), UpdateKeyFailure> {\n        if let Some(current_weight) = self.associated_keys.get(&account_hash) {\n            if weight < *current_weight {\n                // New weight is smaller than current weight\n                if !self.can_update_key(account_hash, weight) {\n                    return Err(UpdateKeyFailure::ThresholdViolation);\n                }\n            }\n        }\n        self.associated_keys.update_key(account_hash, weight)\n    }\n\n    /// Sets a new action threshold for a given action type for the account without checking against\n    /// the total weight of the associated keys.\n    ///\n    /// This should only be called when authorized by an administrator account.\n    ///\n    /// Returns an error if setting the action would cause the `ActionType::Deployment` threshold to\n    /// be greater than any of the other action types.\n    pub fn set_action_threshold_unchecked(\n        &mut self,\n        action_type: ActionType,\n        threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        self.action_thresholds.set_threshold(action_type, threshold)\n    }\n\n    /// Sets a new action threshold for a given action type for the account.\n    ///\n    /// Returns an error if the new action threshold weight is greater than the total weight of the\n    /// account's associated keys.\n    pub fn set_action_threshold(\n        &mut self,\n        action_type: ActionType,\n        weight: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        // Verify if new threshold weight exceeds total weight of all associated\n        // keys.\n        self.can_set_threshold(weight)?;\n        // Set new weight for given action\n        self.action_thresholds.set_threshold(action_type, weight)\n    }\n\n    /// Verifies if user can set action threshold.\n    pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> {\n        let total_weight = self.associated_keys.total_keys_weight();\n        if new_threshold > total_weight {\n            return Err(SetThresholdFailure::InsufficientTotalWeight);\n        }\n        Ok(())\n    }\n\n    /// Checks whether all authorization keys are associated with this account.\n    pub fn can_authorize(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        !authorization_keys.is_empty()\n            && authorization_keys\n                .iter()\n                .all(|e| self.associated_keys.contains_key(e))\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to deploy threshold.\n    pub fn can_deploy_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        total_weight >= *self.action_thresholds().deployment()\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to key management threshold.\n    pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        total_weight >= *self.action_thresholds().key_management()\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ACCOUNT\n    }\n}\n\nimpl ToBytes for Account {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.account_hash().write_bytes(&mut result)?;\n        self.named_keys().write_bytes(&mut result)?;\n        self.main_purse.write_bytes(&mut result)?;\n        self.associated_keys().write_bytes(&mut result)?;\n        self.action_thresholds().write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.account_hash.serialized_length()\n            + self.named_keys.serialized_length()\n            + self.main_purse.serialized_length()\n            + self.associated_keys.serialized_length()\n            + self.action_thresholds.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.account_hash().write_bytes(writer)?;\n        self.named_keys().write_bytes(writer)?;\n        self.main_purse().write_bytes(writer)?;\n        self.associated_keys().write_bytes(writer)?;\n        self.action_thresholds().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Account {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (account_hash, rem) = AccountHash::from_bytes(bytes)?;\n        let (named_keys, rem) = NamedKeys::from_bytes(rem)?;\n        let (main_purse, rem) = URef::from_bytes(rem)?;\n        let (associated_keys, rem) = AssociatedKeys::from_bytes(rem)?;\n        let (action_thresholds, rem) = ActionThresholds::from_bytes(rem)?;\n        Ok((\n            Account {\n                account_hash,\n                named_keys,\n                main_purse,\n                associated_keys,\n                action_thresholds,\n            },\n            rem,\n        ))\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use crate::{\n        account::{associated_keys::gens::account_associated_keys_arb, Account, Weight},\n        gens::{account_hash_arb, named_keys_arb, uref_arb},\n    };\n\n    use super::action_thresholds::gens::account_action_thresholds_arb;\n\n    prop_compose! {\n        pub fn account_arb()(\n            account_hash in account_hash_arb(),\n            urefs in named_keys_arb(3),\n            purse in uref_arb(),\n            thresholds in account_action_thresholds_arb(),\n            mut associated_keys in account_associated_keys_arb(),\n        ) -> Account {\n                associated_keys.add_key(account_hash, Weight::new(1)).unwrap();\n                Account::new(\n                    account_hash,\n                    urefs,\n                    purse,\n                    associated_keys,\n                    thresholds,\n                )\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        account::{\n            Account, AccountHash, ActionThresholds, ActionType, AssociatedKeys, RemoveKeyFailure,\n            TryFromIntError, UpdateKeyFailure, Weight,\n        },\n        contracts::NamedKeys,\n        AccessRights, URef,\n    };\n    use std::{collections::BTreeSet, convert::TryFrom, iter::FromIterator, vec::Vec};\n\n    use super::*;\n\n    #[test]\n    fn account_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let account_hash = AccountHash::try_from(&bytes[..]).expect(\n            \"should create account\nhash\",\n        );\n        assert_eq!(&bytes, &account_hash.as_bytes());\n    }\n\n    #[test]\n    fn account_hash_from_slice_too_small() {\n        let _account_hash =\n            AccountHash::try_from(&[0u8; 31][..]).expect_err(\"should not create account hash\");\n    }\n\n    #[test]\n    fn account_hash_from_slice_too_big() {\n        let _account_hash =\n            AccountHash::try_from(&[0u8; 33][..]).expect_err(\"should not create account hash\");\n    }\n\n    #[test]\n    fn try_from_i32_for_set_threshold_failure() {\n        let max_valid_value_for_variant = SetThresholdFailure::InsufficientTotalWeight as i32;\n        assert_eq!(\n            Err(TryFromIntError(())),\n            SetThresholdFailure::try_from(max_valid_value_for_variant + 1),\n            \"Did you forget to update `SetThresholdFailure::try_from` for a new variant of \\\n                   `SetThresholdFailure`, or `max_valid_value_for_variant` in this test?\"\n        );\n    }\n\n    #[test]\n    fn try_from_i32_for_add_key_failure() {\n        let max_valid_value_for_variant = AddKeyFailure::PermissionDenied as i32;\n        assert_eq!(\n            Err(TryFromIntError(())),\n            AddKeyFailure::try_from(max_valid_value_for_variant + 1),\n            \"Did you forget to update `AddKeyFailure::try_from` for a new variant of \\\n                   `AddKeyFailure`, or `max_valid_value_for_variant` in this test?\"\n        );\n    }\n\n    #[test]\n    fn try_from_i32_for_remove_key_failure() {\n        let max_valid_value_for_variant = RemoveKeyFailure::ThresholdViolation as i32;\n        assert_eq!(\n            Err(TryFromIntError(())),\n            RemoveKeyFailure::try_from(max_valid_value_for_variant + 1),\n            \"Did you forget to update `RemoveKeyFailure::try_from` for a new variant of \\\n                   `RemoveKeyFailure`, or `max_valid_value_for_variant` in this test?\"\n        );\n    }\n\n    #[test]\n    fn try_from_i32_for_update_key_failure() {\n        let max_valid_value_for_variant = UpdateKeyFailure::ThresholdViolation as i32;\n        assert_eq!(\n            Err(TryFromIntError(())),\n            UpdateKeyFailure::try_from(max_valid_value_for_variant + 1),\n            \"Did you forget to update `UpdateKeyFailure::try_from` for a new variant of \\\n                   `UpdateKeyFailure`, or `max_valid_value_for_variant` in this test?\"\n        );\n    }\n\n    #[test]\n    fn account_hash_from_str() {\n        let account_hash = AccountHash([3; 32]);\n        let encoded = account_hash.to_formatted_string();\n        let decoded = AccountHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(account_hash, decoded);\n\n        let invalid_prefix =\n            \"accounthash-0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AccountHash::from_formatted_str(invalid_prefix).is_err());\n\n        let invalid_prefix =\n            \"account-hash0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AccountHash::from_formatted_str(invalid_prefix).is_err());\n\n        let short_addr =\n            \"account-hash-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AccountHash::from_formatted_str(short_addr).is_err());\n\n        let long_addr =\n            \"account-hash-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AccountHash::from_formatted_str(long_addr).is_err());\n\n        let invalid_hex =\n            \"account-hash-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(AccountHash::from_formatted_str(invalid_hex).is_err());\n    }\n\n    #[test]\n    fn account_hash_serde_roundtrip() {\n        let account_hash = AccountHash([255; 32]);\n        let serialized = bincode::serialize(&account_hash).unwrap();\n        let decoded = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(account_hash, decoded);\n    }\n\n    #[test]\n    fn account_hash_json_roundtrip() {\n        let account_hash = AccountHash([255; 32]);\n        let json_string = serde_json::to_string_pretty(&account_hash).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(account_hash, decoded);\n    }\n\n    #[test]\n    fn associated_keys_can_authorize_keys() {\n        let key_1 = AccountHash::new([0; 32]);\n        let key_2 = AccountHash::new([1; 32]);\n        let key_3 = AccountHash::new([2; 32]);\n        let mut keys = AssociatedKeys::default();\n\n        keys.add_key(key_2, Weight::new(2))\n            .expect(\"should add key_1\");\n        keys.add_key(key_1, Weight::new(1))\n            .expect(\"should add key_1\");\n        keys.add_key(key_3, Weight::new(3))\n            .expect(\"should add key_1\");\n\n        let account = Account::new(\n            AccountHash::new([0u8; 32]),\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            keys,\n            // deploy: 33 (3*11)\n            ActionThresholds::new(Weight::new(33), Weight::new(48))\n                .expect(\"should create thresholds\"),\n        );\n\n        assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_3, key_2, key_1])));\n        assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_3, key_2])));\n\n        assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_2])));\n        assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1])));\n\n        assert!(!account.can_authorize(&BTreeSet::from_iter(vec![\n            key_1,\n            key_2,\n            AccountHash::new([42; 32])\n        ])));\n        assert!(!account.can_authorize(&BTreeSet::from_iter(vec![\n            AccountHash::new([42; 32]),\n            key_1,\n            key_2\n        ])));\n        assert!(!account.can_authorize(&BTreeSet::from_iter(vec![\n            AccountHash::new([43; 32]),\n            AccountHash::new([44; 32]),\n            AccountHash::new([42; 32])\n        ])));\n        assert!(!account.can_authorize(&BTreeSet::new()));\n    }\n\n    #[test]\n    fn account_can_deploy_with() {\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1));\n            res.add_key(AccountHash::new([2u8; 32]), Weight::new(11))\n                .expect(\"should add key 1\");\n            res.add_key(AccountHash::new([3u8; 32]), Weight::new(11))\n                .expect(\"should add key 2\");\n            res.add_key(AccountHash::new([4u8; 32]), Weight::new(11))\n                .expect(\"should add key 3\");\n            res\n        };\n        let account = Account::new(\n            AccountHash::new([0u8; 32]),\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            // deploy: 33 (3*11)\n            ActionThresholds::new(Weight::new(33), Weight::new(48))\n                .expect(\"should create thresholds\"),\n        );\n\n        // sum: 22, required 33 - can't deploy\n        assert!(!account.can_deploy_with(&BTreeSet::from_iter(vec![\n            AccountHash::new([3u8; 32]),\n            AccountHash::new([2u8; 32]),\n        ])));\n\n        // sum: 33, required 33 - can deploy\n        assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![\n            AccountHash::new([4u8; 32]),\n            AccountHash::new([3u8; 32]),\n            AccountHash::new([2u8; 32]),\n        ])));\n\n        // sum: 34, required 33 - can deploy\n        assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![\n            AccountHash::new([2u8; 32]),\n            AccountHash::new([1u8; 32]),\n            AccountHash::new([4u8; 32]),\n            AccountHash::new([3u8; 32]),\n        ])));\n    }\n\n    #[test]\n    fn account_can_manage_keys_with() {\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1));\n            res.add_key(AccountHash::new([2u8; 32]), Weight::new(11))\n                .expect(\"should add key 1\");\n            res.add_key(AccountHash::new([3u8; 32]), Weight::new(11))\n                .expect(\"should add key 2\");\n            res.add_key(AccountHash::new([4u8; 32]), Weight::new(11))\n                .expect(\"should add key 3\");\n            res\n        };\n        let account = Account::new(\n            AccountHash::new([0u8; 32]),\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            // deploy: 33 (3*11)\n            ActionThresholds::new(Weight::new(11), Weight::new(33))\n                .expect(\"should create thresholds\"),\n        );\n\n        // sum: 22, required 33 - can't manage\n        assert!(!account.can_manage_keys_with(&BTreeSet::from_iter(vec![\n            AccountHash::new([3u8; 32]),\n            AccountHash::new([2u8; 32]),\n        ])));\n\n        // sum: 33, required 33 - can manage\n        assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![\n            AccountHash::new([4u8; 32]),\n            AccountHash::new([3u8; 32]),\n            AccountHash::new([2u8; 32]),\n        ])));\n\n        // sum: 34, required 33 - can manage\n        assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![\n            AccountHash::new([2u8; 32]),\n            AccountHash::new([1u8; 32]),\n            AccountHash::new([4u8; 32]),\n            AccountHash::new([3u8; 32]),\n        ])));\n    }\n\n    #[test]\n    fn set_action_threshold_higher_than_total_weight() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_3 = AccountHash::new([4u8; 32]);\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, Weight::new(1));\n            res.add_key(key_1, Weight::new(2))\n                .expect(\"should add key 1\");\n            res.add_key(key_2, Weight::new(3))\n                .expect(\"should add key 2\");\n            res.add_key(key_3, Weight::new(4))\n                .expect(\"should add key 3\");\n            res\n        };\n        let mut account = Account::new(\n            AccountHash::new([0u8; 32]),\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            // deploy: 33 (3*11)\n            ActionThresholds::new(Weight::new(33), Weight::new(48))\n                .expect(\"should create thresholds\"),\n        );\n\n        assert_eq!(\n            account\n                .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 1))\n                .unwrap_err(),\n            SetThresholdFailure::InsufficientTotalWeight,\n        );\n        assert_eq!(\n            account\n                .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 245))\n                .unwrap_err(),\n            SetThresholdFailure::InsufficientTotalWeight,\n        )\n    }\n\n    #[test]\n    fn remove_key_would_violate_action_thresholds() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_3 = AccountHash::new([4u8; 32]);\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, Weight::new(1));\n            res.add_key(key_1, Weight::new(2))\n                .expect(\"should add key 1\");\n            res.add_key(key_2, Weight::new(3))\n                .expect(\"should add key 2\");\n            res.add_key(key_3, Weight::new(4))\n                .expect(\"should add key 3\");\n            res\n        };\n        let mut account = Account::new(\n            AccountHash::new([0u8; 32]),\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            // deploy: 33 (3*11)\n            ActionThresholds::new(Weight::new(1 + 2 + 3 + 4), Weight::new(1 + 2 + 3 + 4 + 5))\n                .expect(\"should create thresholds\"),\n        );\n\n        assert_eq!(\n            account.remove_associated_key(key_3).unwrap_err(),\n            RemoveKeyFailure::ThresholdViolation,\n        )\n    }\n\n    #[test]\n    fn updating_key_would_violate_action_thresholds() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let identity_key_weight = Weight::new(1);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_1_weight = Weight::new(2);\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_2_weight = Weight::new(3);\n        let key_3 = AccountHash::new([4u8; 32]);\n        let key_3_weight = Weight::new(4);\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, identity_key_weight);\n            res.add_key(key_1, key_1_weight).expect(\"should add key 1\");\n            res.add_key(key_2, key_2_weight).expect(\"should add key 2\");\n            res.add_key(key_3, key_3_weight).expect(\"should add key 3\");\n            // 1 + 2 + 3 + 4\n            res\n        };\n\n        let deployment_threshold = Weight::new(\n            identity_key_weight.value()\n                + key_1_weight.value()\n                + key_2_weight.value()\n                + key_3_weight.value(),\n        );\n        let key_management_threshold = Weight::new(deployment_threshold.value() + 1);\n        let mut account = Account::new(\n            identity_key,\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            // deploy: 33 (3*11)\n            ActionThresholds::new(deployment_threshold, key_management_threshold)\n                .expect(\"should create thresholds\"),\n        );\n\n        // Decreases by 3\n        assert_eq!(\n            account\n                .clone()\n                .update_associated_key(key_3, Weight::new(1))\n                .unwrap_err(),\n            UpdateKeyFailure::ThresholdViolation,\n        );\n\n        // increase total weight (12)\n        account\n            .update_associated_key(identity_key, Weight::new(3))\n            .unwrap();\n\n        // variant a) decrease total weight by 1 (total 11)\n        account\n            .clone()\n            .update_associated_key(key_3, Weight::new(3))\n            .unwrap();\n        // variant b) decrease total weight by 3 (total 9) - fail\n        assert_eq!(\n            account\n                .update_associated_key(key_3, Weight::new(1))\n                .unwrap_err(),\n            UpdateKeyFailure::ThresholdViolation\n        );\n    }\n\n    #[test]\n    fn overflowing_should_allow_removal() {\n        let identity_key = AccountHash::new([42; 32]);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_2 = AccountHash::new([3u8; 32]);\n\n        let associated_keys = {\n            // Identity\n            let mut res = AssociatedKeys::new(identity_key, Weight::new(1));\n\n            // Spare key\n            res.add_key(key_1, Weight::new(2))\n                .expect(\"should add key 1\");\n            // Big key\n            res.add_key(key_2, Weight::new(255))\n                .expect(\"should add key 2\");\n\n            res\n        };\n\n        let mut account = Account::new(\n            identity_key,\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            ActionThresholds::new(Weight::new(1), Weight::new(254))\n                .expect(\"should create thresholds\"),\n        );\n\n        account.remove_associated_key(key_1).expect(\"should work\")\n    }\n\n    #[test]\n    fn overflowing_should_allow_updating() {\n        let identity_key = AccountHash::new([1; 32]);\n        let identity_key_weight = Weight::new(1);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_1_weight = Weight::new(3);\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_2_weight = Weight::new(255);\n        let deployment_threshold = Weight::new(1);\n        let key_management_threshold = Weight::new(254);\n\n        let associated_keys = {\n            // Identity\n            let mut res = AssociatedKeys::new(identity_key, identity_key_weight);\n\n            // Spare key\n            res.add_key(key_1, key_1_weight).expect(\"should add key 1\");\n            // Big key\n            res.add_key(key_2, key_2_weight).expect(\"should add key 2\");\n\n            res\n        };\n\n        let mut account = Account::new(\n            identity_key,\n            NamedKeys::new(),\n            URef::new([0u8; 32], AccessRights::READ_ADD_WRITE),\n            associated_keys,\n            ActionThresholds::new(deployment_threshold, key_management_threshold)\n                .expect(\"should create thresholds\"),\n        );\n\n        // decrease so total weight would be changed from 1 + 3 + 255 to 1 + 1 + 255\n        account\n            .update_associated_key(key_1, Weight::new(1))\n            .expect(\"should work\");\n    }\n}\n\n#[cfg(test)]\nmod proptests {\n    use proptest::prelude::*;\n\n    use crate::bytesrepr;\n\n    use super::*;\n\n    proptest! {\n        #[test]\n        fn test_value_account(acct in gens::account_arb()) {\n            bytesrepr::test_serialization_roundtrip(&acct);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity/action_thresholds.rs",
    "content": "//! This module contains types and functions for managing action thresholds.\n\nuse alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    account::{ActionThresholds as AccountActionThresholds, SetThresholdFailure},\n    addressable_entity::{ActionType, Weight, WEIGHT_SERIALIZED_LENGTH},\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n};\n\n/// Thresholds that have to be met when executing an action of a certain type.\n#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"json-schema\", schemars(rename = \"EntityActionThresholds\"))]\npub struct ActionThresholds {\n    /// Threshold for deploy execution.\n    pub deployment: Weight,\n    /// Threshold for upgrading contracts.\n    pub upgrade_management: Weight,\n    /// Threshold for managing action threshold.\n    pub key_management: Weight,\n}\n\nimpl ActionThresholds {\n    /// Creates new ActionThresholds object with provided weights\n    ///\n    /// Requires deployment threshold to be lower than or equal to\n    /// key management threshold.\n    pub fn new(\n        deployment: Weight,\n        upgrade_management: Weight,\n        key_management: Weight,\n    ) -> Result<ActionThresholds, SetThresholdFailure> {\n        if deployment > key_management {\n            return Err(SetThresholdFailure::DeploymentThreshold);\n        }\n        Ok(ActionThresholds {\n            deployment,\n            upgrade_management,\n            key_management,\n        })\n    }\n    /// Sets new threshold for [ActionType::Deployment].\n    /// Should return an error if setting new threshold for `action_type` breaks\n    /// one of the invariants. Currently, invariant is that\n    /// `ActionType::Deployment` threshold shouldn't be higher than any\n    /// other, which should be checked both when increasing `Deployment`\n    /// threshold and decreasing the other.\n    pub fn set_deployment_threshold(\n        &mut self,\n        new_threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        if new_threshold > self.key_management {\n            Err(SetThresholdFailure::DeploymentThreshold)\n        } else {\n            self.deployment = new_threshold;\n            Ok(())\n        }\n    }\n\n    /// Sets new threshold for [ActionType::KeyManagement].\n    pub fn set_key_management_threshold(\n        &mut self,\n        new_threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        if self.deployment > new_threshold {\n            Err(SetThresholdFailure::KeyManagementThreshold)\n        } else {\n            self.key_management = new_threshold;\n            Ok(())\n        }\n    }\n\n    /// Sets new threshold for [ActionType::UpgradeManagement].\n    pub fn set_upgrade_management_threshold(\n        &mut self,\n        upgrade_management: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        self.upgrade_management = upgrade_management;\n        Ok(())\n    }\n\n    /// Returns the deployment action threshold.\n    pub fn deployment(&self) -> &Weight {\n        &self.deployment\n    }\n\n    /// Returns key management action threshold.\n    pub fn key_management(&self) -> &Weight {\n        &self.key_management\n    }\n\n    /// Returns the upgrade management action threshold.\n    pub fn upgrade_management(&self) -> &Weight {\n        &self.upgrade_management\n    }\n\n    /// Unified function that takes an action type, and changes appropriate\n    /// threshold defined by the [ActionType] variants.\n    pub fn set_threshold(\n        &mut self,\n        action_type: ActionType,\n        new_threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        match action_type {\n            ActionType::Deployment => self.set_deployment_threshold(new_threshold),\n            ActionType::KeyManagement => self.set_key_management_threshold(new_threshold),\n            ActionType::UpgradeManagement => self.set_upgrade_management_threshold(new_threshold),\n        }\n    }\n}\n\nimpl Default for ActionThresholds {\n    fn default() -> Self {\n        ActionThresholds {\n            deployment: Weight::new(1),\n            upgrade_management: Weight::new(1),\n            key_management: Weight::new(1),\n        }\n    }\n}\n\nimpl From<AccountActionThresholds> for ActionThresholds {\n    fn from(value: AccountActionThresholds) -> Self {\n        Self {\n            deployment: Weight::new(value.deployment.value()),\n            key_management: Weight::new(value.key_management.value()),\n            upgrade_management: Weight::new(1),\n        }\n    }\n}\n\nimpl ToBytes for ActionThresholds {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        result.append(&mut self.deployment.to_bytes()?);\n        result.append(&mut self.upgrade_management.to_bytes()?);\n        result.append(&mut self.key_management.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        3 * WEIGHT_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.deployment().write_bytes(writer)?;\n        self.upgrade_management().write_bytes(writer)?;\n        self.key_management().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ActionThresholds {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (deployment, rem) = Weight::from_bytes(bytes)?;\n        let (upgrade_management, rem) = Weight::from_bytes(rem)?;\n        let (key_management, rem) = Weight::from_bytes(rem)?;\n        let ret = ActionThresholds {\n            deployment,\n            upgrade_management,\n            key_management,\n        };\n        Ok((ret, rem))\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use super::ActionThresholds;\n\n    pub fn action_thresholds_arb() -> impl Strategy<Value = ActionThresholds> {\n        Just(Default::default())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn should_create_new_action_thresholds() {\n        let action_thresholds =\n            ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(42)).unwrap();\n        assert_eq!(*action_thresholds.deployment(), Weight::new(1));\n        assert_eq!(*action_thresholds.upgrade_management(), Weight::new(1));\n        assert_eq!(*action_thresholds.key_management(), Weight::new(42));\n    }\n\n    #[test]\n    fn should_not_create_action_thresholds_with_invalid_deployment_threshold() {\n        // deployment cant be greater than key management\n        assert!(ActionThresholds::new(Weight::new(5), Weight::new(1), Weight::new(1)).is_err());\n    }\n\n    #[test]\n    fn serialization_roundtrip() {\n        let action_thresholds =\n            ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(42)).unwrap();\n        bytesrepr::test_serialization_roundtrip(&action_thresholds);\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity/action_type.rs",
    "content": "use core::convert::TryFrom;\n\nuse super::TryFromIntError;\n\n/// The various types of action which can be performed in the context of a given account.\n#[repr(u32)]\npub enum ActionType {\n    /// Represents performing a deploy.\n    Deployment = 0,\n    /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s\n    /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total\n    /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to\n    /// perform various actions).\n    KeyManagement = 1,\n    /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s\n    /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total\n    /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to\n    /// upgrade the addressable entity.\n    UpgradeManagement = 2,\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<u32> for ActionType {\n    type Error = TryFromIntError;\n\n    fn try_from(value: u32) -> Result<Self, Self::Error> {\n        // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive\n        // that helps to automatically create `from_u32` and `to_u32`. This approach\n        // gives better control over generated code.\n        match value {\n            d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment),\n            d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement),\n            d if d == ActionType::UpgradeManagement as u32 => Ok(ActionType::UpgradeManagement),\n            _ => Err(TryFromIntError(())),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity/associated_keys.rs",
    "content": "//! This module contains types and functions for working with keys associated with an account.\n\nuse alloc::{\n    collections::{btree_map::Entry, BTreeMap, BTreeSet},\n    vec::Vec,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\nuse crate::{\n    account::{\n        AccountHash, AddKeyFailure, AssociatedKeys as AccountAssociatedKeys, RemoveKeyFailure,\n        UpdateKeyFailure,\n    },\n    addressable_entity::Weight,\n    bytesrepr::{self, FromBytes, ToBytes},\n};\n\n/// A collection of weighted public keys (represented as account hashes) associated with an account.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"json-schema\", schemars(rename = \"EntityAssociatedKeys\"))]\n#[serde(deny_unknown_fields)]\n#[rustfmt::skip]\npub struct AssociatedKeys(\n    #[serde(with = \"BTreeMapToArray::<AccountHash, Weight, Labels>\")]\n    BTreeMap<AccountHash, Weight>,\n);\n\nimpl AssociatedKeys {\n    /// Constructs a new AssociatedKeys.\n    pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys {\n        let mut bt: BTreeMap<AccountHash, Weight> = BTreeMap::new();\n        bt.insert(key, weight);\n        AssociatedKeys(bt)\n    }\n\n    /// Adds a new AssociatedKey to the set.\n    ///\n    /// Returns true if added successfully, false otherwise.\n    pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> {\n        match self.0.entry(key) {\n            Entry::Vacant(entry) => {\n                entry.insert(weight);\n            }\n            Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey),\n        }\n        Ok(())\n    }\n\n    /// Removes key from the associated keys set.\n    /// Returns true if value was found in the set prior to the removal, false\n    /// otherwise.\n    pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> {\n        self.0\n            .remove(key)\n            .map(|_| ())\n            .ok_or(RemoveKeyFailure::MissingKey)\n    }\n\n    /// Adds new AssociatedKey to the set.\n    /// Returns true if added successfully, false otherwise.\n    pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> {\n        match self.0.entry(key) {\n            Entry::Vacant(_) => {\n                return Err(UpdateKeyFailure::MissingKey);\n            }\n            Entry::Occupied(mut entry) => {\n                *entry.get_mut() = weight;\n            }\n        }\n        Ok(())\n    }\n\n    /// Returns the weight of an account hash.\n    pub fn get(&self, key: &AccountHash) -> Option<&Weight> {\n        self.0.get(key)\n    }\n\n    /// Returns `true` if a given key exists.\n    pub fn contains_key(&self, key: &AccountHash) -> bool {\n        self.0.contains_key(key)\n    }\n\n    /// Returns an iterator over the account hash and the weights.\n    pub fn iter(&self) -> impl Iterator<Item = (&AccountHash, &Weight)> {\n        self.0.iter()\n    }\n\n    /// Returns the count of the associated keys.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if the associated keys are empty.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Helper method that calculates weight for keys that comes from any\n    /// source.\n    ///\n    /// This method is not concerned about uniqueness of the passed iterable.\n    /// Uniqueness is determined based on the input collection properties,\n    /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`])\n    /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]).\n    fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator<Item = &'a AccountHash>) -> Weight {\n        let total = keys\n            .filter_map(|key| self.0.get(key))\n            .fold(0u8, |acc, w| acc.saturating_add(w.value()));\n\n        Weight::new(total)\n    }\n\n    /// Calculates total weight of authorization keys provided by an argument\n    pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet<AccountHash>) -> Weight {\n        self.calculate_any_keys_weight(authorization_keys.iter())\n    }\n\n    /// Calculates total weight of all authorization keys\n    pub fn total_keys_weight(&self) -> Weight {\n        self.calculate_any_keys_weight(self.0.keys())\n    }\n\n    /// Calculates total weight of all authorization keys excluding a given key\n    pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight {\n        self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash))\n    }\n\n    pub fn empty_keys() -> Self {\n        AssociatedKeys(BTreeMap::new())\n    }\n}\n\nimpl From<BTreeMap<AccountHash, Weight>> for AssociatedKeys {\n    fn from(associated_keys: BTreeMap<AccountHash, Weight>) -> Self {\n        Self(associated_keys)\n    }\n}\n\nimpl ToBytes for AssociatedKeys {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for AssociatedKeys {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (associated_keys, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((AssociatedKeys(associated_keys), rem))\n    }\n}\n\nimpl From<AccountAssociatedKeys> for AssociatedKeys {\n    fn from(value: AccountAssociatedKeys) -> Self {\n        let mut associated_keys = AssociatedKeys::default();\n        for (account_hash, weight) in value.iter() {\n            associated_keys\n                .0\n                .insert(*account_hash, Weight::new(weight.value()));\n        }\n        associated_keys\n    }\n}\n\nstruct Labels;\n\nimpl KeyValueLabels for Labels {\n    const KEY: &'static str = \"account_hash\";\n    const VALUE: &'static str = \"weight\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for Labels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"AssociatedKey\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some(\"A weighted public key.\");\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> =\n        Some(\"The account hash of the public key.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> =\n        Some(\"The weight assigned to the public key.\");\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use crate::gens::{account_hash_arb, weight_arb};\n\n    use super::AssociatedKeys;\n\n    pub fn associated_keys_arb() -> impl Strategy<Value = AssociatedKeys> {\n        proptest::collection::btree_map(account_hash_arb(), weight_arb(), 10).prop_map(|keys| {\n            let mut associated_keys = AssociatedKeys::default();\n            keys.into_iter().for_each(|(k, v)| {\n                associated_keys.add_key(k, v).unwrap();\n            });\n            associated_keys\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::BTreeSet, iter::FromIterator};\n\n    use crate::{\n        account::{AccountHash, AddKeyFailure, ACCOUNT_HASH_LENGTH},\n        addressable_entity::Weight,\n        bytesrepr,\n    };\n\n    use super::*;\n\n    #[test]\n    fn associated_keys_add() {\n        let mut keys =\n            AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1));\n        let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]);\n        let new_pk_weight = Weight::new(2);\n        assert!(keys.add_key(new_pk, new_pk_weight).is_ok());\n        assert_eq!(keys.get(&new_pk), Some(&new_pk_weight))\n    }\n\n    #[test]\n    fn associated_keys_add_duplicate() {\n        let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]);\n        let weight = Weight::new(1);\n        let mut keys = AssociatedKeys::new(pk, weight);\n        assert_eq!(\n            keys.add_key(pk, Weight::new(10)),\n            Err(AddKeyFailure::DuplicateKey)\n        );\n        assert_eq!(keys.get(&pk), Some(&weight));\n    }\n\n    #[test]\n    fn associated_keys_remove() {\n        let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]);\n        let weight = Weight::new(1);\n        let mut keys = AssociatedKeys::new(pk, weight);\n        assert!(keys.remove_key(&pk).is_ok());\n        assert!(keys\n            .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]))\n            .is_err());\n    }\n\n    #[test]\n    fn associated_keys_update() {\n        let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]);\n        let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]);\n        let weight = Weight::new(1);\n        let mut keys = AssociatedKeys::new(pk1, weight);\n        assert!(matches!(\n            keys.update_key(pk2, Weight::new(2))\n                .expect_err(\"should get error\"),\n            UpdateKeyFailure::MissingKey\n        ));\n        keys.add_key(pk2, Weight::new(1)).unwrap();\n        assert_eq!(keys.get(&pk2), Some(&Weight::new(1)));\n        keys.update_key(pk2, Weight::new(2)).unwrap();\n        assert_eq!(keys.get(&pk2), Some(&Weight::new(2)));\n    }\n\n    #[test]\n    fn associated_keys_calculate_keys_once() {\n        let key_1 = AccountHash::new([0; 32]);\n        let key_2 = AccountHash::new([1; 32]);\n        let key_3 = AccountHash::new([2; 32]);\n        let mut keys = AssociatedKeys::default();\n\n        keys.add_key(key_2, Weight::new(2))\n            .expect(\"should add key_1\");\n        keys.add_key(key_1, Weight::new(1))\n            .expect(\"should add key_1\");\n        keys.add_key(key_3, Weight::new(3))\n            .expect(\"should add key_1\");\n\n        assert_eq!(\n            keys.calculate_keys_weight(&BTreeSet::from_iter(vec![\n                key_1, key_2, key_3, key_1, key_2, key_3,\n            ])),\n            Weight::new(1 + 2 + 3)\n        );\n    }\n\n    #[test]\n    fn associated_keys_total_weight() {\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1));\n            res.add_key(AccountHash::new([2u8; 32]), Weight::new(11))\n                .expect(\"should add key 1\");\n            res.add_key(AccountHash::new([3u8; 32]), Weight::new(12))\n                .expect(\"should add key 2\");\n            res.add_key(AccountHash::new([4u8; 32]), Weight::new(13))\n                .expect(\"should add key 3\");\n            res\n        };\n        assert_eq!(\n            associated_keys.total_keys_weight(),\n            Weight::new(1 + 11 + 12 + 13)\n        );\n    }\n\n    #[test]\n    fn associated_keys_total_weight_excluding() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let identity_key_weight = Weight::new(1);\n\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_1_weight = Weight::new(11);\n\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_2_weight = Weight::new(12);\n\n        let key_3 = AccountHash::new([4u8; 32]);\n        let key_3_weight = Weight::new(13);\n\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, identity_key_weight);\n            res.add_key(key_1, key_1_weight).expect(\"should add key 1\");\n            res.add_key(key_2, key_2_weight).expect(\"should add key 2\");\n            res.add_key(key_3, key_3_weight).expect(\"should add key 3\");\n            res\n        };\n        assert_eq!(\n            associated_keys.total_keys_weight_excluding(key_2),\n            Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value())\n        );\n    }\n\n    #[test]\n    fn overflowing_keys_weight() {\n        let identity_key = AccountHash::new([1u8; 32]);\n        let key_1 = AccountHash::new([2u8; 32]);\n        let key_2 = AccountHash::new([3u8; 32]);\n        let key_3 = AccountHash::new([4u8; 32]);\n\n        let identity_key_weight = Weight::new(250);\n        let weight_1 = Weight::new(1);\n        let weight_2 = Weight::new(2);\n        let weight_3 = Weight::new(3);\n\n        let saturated_weight = Weight::new(u8::MAX);\n\n        let associated_keys = {\n            let mut res = AssociatedKeys::new(identity_key, identity_key_weight);\n\n            res.add_key(key_1, weight_1).expect(\"should add key 1\");\n            res.add_key(key_2, weight_2).expect(\"should add key 2\");\n            res.add_key(key_3, weight_3).expect(\"should add key 3\");\n            res\n        };\n\n        assert_eq!(\n            associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![\n                identity_key, // 250\n                key_1,        // 251\n                key_2,        // 253\n                key_3,        // 256 - error\n            ])),\n            saturated_weight,\n        );\n    }\n\n    #[test]\n    fn serialization_roundtrip() {\n        let mut keys = AssociatedKeys::default();\n        keys.add_key(AccountHash::new([1; 32]), Weight::new(1))\n            .unwrap();\n        keys.add_key(AccountHash::new([2; 32]), Weight::new(2))\n            .unwrap();\n        keys.add_key(AccountHash::new([3; 32]), Weight::new(3))\n            .unwrap();\n        bytesrepr::test_serialization_roundtrip(&keys);\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity/entry_points.rs",
    "content": "use core::fmt::{Debug, Display, Formatter};\n\nuse alloc::{\n    collections::BTreeMap,\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse blake2::{\n    digest::{Update, VariableOutput},\n    VarBlake2b,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num_derive::FromPrimitive;\nuse num_traits::FromPrimitive;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    addressable_entity::FromStrError,\n    bytesrepr,\n    bytesrepr::{Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex, CLType, EntityAddr, Group, HashAddr, BLAKE2B_DIGEST_LENGTH, KEY_HASH_LENGTH,\n};\n\nconst V1_ENTRY_POINT_TAG: u8 = 0;\n\nconst V1_ENTRY_POINT_PREFIX: &str = \"entry-point-v1-\";\n\n/// Context of method execution\n///\n/// Most significant bit represents version i.e.\n/// - 0b0 -> 0.x/1.x (session & contracts)\n/// - 0b1 -> 2.x and later (introduced installer, utility entry points)\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, FromPrimitive)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum EntryPointType {\n    /// Runs using the calling entity's context.\n    /// In v1.x this was used for both \"session\" code run using the originating\n    /// Account's context, and also for \"StoredSession\" code that ran in the\n    /// caller's context. While this made systemic sense due to the way the runtime\n    /// context nesting works, this dual usage was very confusing to most human beings.\n    ///\n    /// In v2.x the renamed Caller variant is exclusively used for wasm run using the initiating\n    /// account entity's context. Previously installed 1.x stored session code should\n    /// continue to work as the binary value matches but we no longer allow such logic\n    /// to be upgraded, nor do we allow new stored session to be installed.\n    Caller = 0b00000000,\n    /// Runs using the called entity's context.\n    Called = 0b00000001,\n    /// Extract a subset of bytecode and installs it as a new smart contract.\n    /// Runs using the called entity's context.\n    Factory = 0b10000000,\n}\n\nimpl EntryPointType {\n    /// Checks if entry point type is introduced before 2.0.\n    ///\n    /// This method checks if there is a bit pattern for entry point types introduced in 2.0.\n    ///\n    /// If this bit is missing, that means given entry point type was defined in pre-2.0 world.\n    pub fn is_legacy_pattern(&self) -> bool {\n        (*self as u8) & 0b10000000 == 0\n    }\n\n    /// Get the bit pattern.\n    pub fn bits(self) -> u8 {\n        self as u8\n    }\n\n    /// Returns true if entry point type is invalid for the context.\n    pub fn is_invalid_context(&self) -> bool {\n        match self {\n            EntryPointType::Caller => true,\n            EntryPointType::Called | EntryPointType::Factory => false,\n        }\n    }\n}\n\nimpl ToBytes for EntryPointType {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.bits().to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        1\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.bits());\n        Ok(())\n    }\n}\n\nimpl FromBytes for EntryPointType {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, bytes) = u8::from_bytes(bytes)?;\n        let entry_point_type =\n            EntryPointType::from_u8(value).ok_or(bytesrepr::Error::Formatting)?;\n        Ok((entry_point_type, bytes))\n    }\n}\n\n/// Default name for an entry point.\npub const DEFAULT_ENTRY_POINT_NAME: &str = \"call\";\n\n/// Collection of entry point parameters.\npub type Parameters = Vec<Parameter>;\n\n/// An enum specifying who pays for the invocation and execution of the entrypoint.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[repr(u8)]\npub enum EntryPointPayment {\n    /// The caller must cover costs\n    Caller = 0,\n    /// Will cover costs if directly invoked.\n    DirectInvocationOnly = 1,\n    /// will cover costs to execute self including any subsequent invoked contracts\n    SelfOnward = 2,\n}\n\nimpl EntryPointPayment {\n    /// Contract will pay if directly invoked.\n    pub fn will_pay_direct_invocation(&self) -> bool {\n        match self {\n            EntryPointPayment::Caller => false,\n            EntryPointPayment::DirectInvocationOnly | EntryPointPayment::SelfOnward => true,\n        }\n    }\n}\n\nimpl ToBytes for EntryPointPayment {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.push(*self as u8);\n        Ok(())\n    }\n}\n\nimpl FromBytes for EntryPointPayment {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (id, rem) = u8::from_bytes(bytes)?;\n        let tag = match id {\n            tag if tag == EntryPointPayment::Caller as u8 => EntryPointPayment::Caller,\n            tag if tag == EntryPointPayment::DirectInvocationOnly as u8 => {\n                EntryPointPayment::DirectInvocationOnly\n            }\n            tag if tag == EntryPointPayment::SelfOnward as u8 => EntryPointPayment::SelfOnward,\n            _ => return Err(Error::Formatting),\n        };\n        Ok((tag, rem))\n    }\n}\n\n/// Type signature of a method. Order of arguments matter since can be\n/// referenced by index as well as name.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct EntityEntryPoint {\n    name: String,\n    args: Parameters, // one argument vec![Parameter::new(\"chunked\", CLType::Any)]\n    ret: CLType,\n    access: EntryPointAccess,\n    entry_point_type: EntryPointType,\n    entry_point_payment: EntryPointPayment,\n}\n\nimpl From<EntityEntryPoint>\n    for (\n        String,\n        Parameters,\n        CLType,\n        EntryPointAccess,\n        EntryPointType,\n        EntryPointPayment,\n    )\n{\n    fn from(entry_point: EntityEntryPoint) -> Self {\n        (\n            entry_point.name,\n            entry_point.args,\n            entry_point.ret,\n            entry_point.access,\n            entry_point.entry_point_type,\n            entry_point.entry_point_payment,\n        )\n    }\n}\n\nimpl EntityEntryPoint {\n    /// `EntryPoint` constructor.\n    pub fn new<T: Into<String>>(\n        name: T,\n        args: Parameters,\n        ret: CLType,\n        access: EntryPointAccess,\n        entry_point_type: EntryPointType,\n        entry_point_payment: EntryPointPayment,\n    ) -> Self {\n        EntityEntryPoint {\n            name: name.into(),\n            args,\n            ret,\n            access,\n            entry_point_type,\n            entry_point_payment,\n        }\n    }\n\n    /// Create a default [`EntityEntryPoint`] with specified name.\n    pub fn default_with_name<T: Into<String>>(name: T) -> Self {\n        EntityEntryPoint {\n            name: name.into(),\n            ..Default::default()\n        }\n    }\n\n    /// Get name.\n    pub fn name(&self) -> &str {\n        &self.name\n    }\n\n    /// Get access enum.\n    pub fn access(&self) -> &EntryPointAccess {\n        &self.access\n    }\n\n    /// Get the arguments for this method.\n    pub fn args(&self) -> &[Parameter] {\n        self.args.as_slice()\n    }\n\n    /// Get the return type.\n    pub fn ret(&self) -> &CLType {\n        &self.ret\n    }\n\n    /// Obtains entry point\n    pub fn entry_point_type(&self) -> EntryPointType {\n        self.entry_point_type\n    }\n\n    /// Obtains entry point payment\n    pub fn entry_point_payment(&self) -> EntryPointPayment {\n        self.entry_point_payment\n    }\n}\n\nimpl Default for EntityEntryPoint {\n    /// constructor for a public session `EntryPoint` that takes no args and returns `Unit`\n    fn default() -> Self {\n        EntityEntryPoint {\n            name: DEFAULT_ENTRY_POINT_NAME.to_string(),\n            args: Vec::new(),\n            ret: CLType::Unit,\n            access: EntryPointAccess::Public,\n            entry_point_type: EntryPointType::Caller,\n            entry_point_payment: EntryPointPayment::Caller,\n        }\n    }\n}\n\nimpl ToBytes for EntityEntryPoint {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.name.serialized_length()\n            + self.args.serialized_length()\n            + self.ret.serialized_length()\n            + self.access.serialized_length()\n            + self.entry_point_type.serialized_length()\n            + self.entry_point_payment.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.name.write_bytes(writer)?;\n        self.args.write_bytes(writer)?;\n        self.ret.append_bytes(writer)?;\n        self.access.write_bytes(writer)?;\n        self.entry_point_type.write_bytes(writer)?;\n        self.entry_point_payment.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for EntityEntryPoint {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (name, bytes) = String::from_bytes(bytes)?;\n        let (args, bytes) = Vec::<Parameter>::from_bytes(bytes)?;\n        let (ret, bytes) = CLType::from_bytes(bytes)?;\n        let (access, bytes) = EntryPointAccess::from_bytes(bytes)?;\n        let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?;\n        let (entry_point_payment, bytes) = EntryPointPayment::from_bytes(bytes)?;\n\n        Ok((\n            EntityEntryPoint {\n                name,\n                args,\n                ret,\n                access,\n                entry_point_type,\n                entry_point_payment,\n            },\n            bytes,\n        ))\n    }\n}\n\n/// Enum describing the possible access control options for a contract entry\n/// point (method).\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum EntryPointAccess {\n    /// Anyone can call this method (no access controls).\n    Public,\n    /// Only users from the listed groups may call this method. Note: if the\n    /// list is empty then this method is not callable from outside the\n    /// contract.\n    Groups(Vec<Group>),\n    /// Can't be accessed directly but are kept in the derived wasm bytes.\n    Template,\n}\n\nconst ENTRYPOINTACCESS_PUBLIC_TAG: u8 = 1;\nconst ENTRYPOINTACCESS_GROUPS_TAG: u8 = 2;\nconst ENTRYPOINTACCESS_ABSTRACT_TAG: u8 = 3;\n\nimpl EntryPointAccess {\n    /// Constructor for access granted to only listed groups.\n    pub fn groups(labels: &[&str]) -> Self {\n        let list: Vec<Group> = labels\n            .iter()\n            .map(|s| Group::new(String::from(*s)))\n            .collect();\n        EntryPointAccess::Groups(list)\n    }\n}\n\nimpl ToBytes for EntryPointAccess {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n\n        match self {\n            EntryPointAccess::Public => {\n                result.push(ENTRYPOINTACCESS_PUBLIC_TAG);\n            }\n            EntryPointAccess::Groups(groups) => {\n                result.push(ENTRYPOINTACCESS_GROUPS_TAG);\n                result.append(&mut groups.to_bytes()?);\n            }\n            EntryPointAccess::Template => {\n                result.push(ENTRYPOINTACCESS_ABSTRACT_TAG);\n            }\n        }\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            EntryPointAccess::Public => 1,\n            EntryPointAccess::Groups(groups) => 1 + groups.serialized_length(),\n            EntryPointAccess::Template => 1,\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            EntryPointAccess::Public => {\n                writer.push(ENTRYPOINTACCESS_PUBLIC_TAG);\n            }\n            EntryPointAccess::Groups(groups) => {\n                writer.push(ENTRYPOINTACCESS_GROUPS_TAG);\n                groups.write_bytes(writer)?;\n            }\n            EntryPointAccess::Template => {\n                writer.push(ENTRYPOINTACCESS_ABSTRACT_TAG);\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for EntryPointAccess {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, bytes) = u8::from_bytes(bytes)?;\n\n        match tag {\n            ENTRYPOINTACCESS_PUBLIC_TAG => Ok((EntryPointAccess::Public, bytes)),\n            ENTRYPOINTACCESS_GROUPS_TAG => {\n                let (groups, bytes) = Vec::<Group>::from_bytes(bytes)?;\n                let result = EntryPointAccess::Groups(groups);\n                Ok((result, bytes))\n            }\n            ENTRYPOINTACCESS_ABSTRACT_TAG => Ok((EntryPointAccess::Template, bytes)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n/// Parameter to a method\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Parameter {\n    name: String,\n    cl_type: CLType,\n}\n\nimpl Parameter {\n    /// `Parameter` constructor.\n    pub fn new<T: Into<String>>(name: T, cl_type: CLType) -> Self {\n        Parameter {\n            name: name.into(),\n            cl_type,\n        }\n    }\n\n    /// Get the type of this argument.\n    pub fn cl_type(&self) -> &CLType {\n        &self.cl_type\n    }\n\n    /// Get a reference to the parameter's name.\n    pub fn name(&self) -> &str {\n        &self.name\n    }\n}\n\nimpl From<Parameter> for (String, CLType) {\n    fn from(parameter: Parameter) -> Self {\n        (parameter.name, parameter.cl_type)\n    }\n}\n\nimpl ToBytes for Parameter {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = ToBytes::to_bytes(&self.name)?;\n        self.cl_type.append_bytes(&mut result)?;\n\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        ToBytes::serialized_length(&self.name) + self.cl_type.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.name.write_bytes(writer)?;\n        self.cl_type.append_bytes(writer)\n    }\n}\n\nimpl FromBytes for Parameter {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (name, bytes) = String::from_bytes(bytes)?;\n        let (cl_type, bytes) = CLType::from_bytes(bytes)?;\n\n        Ok((Parameter { name, cl_type }, bytes))\n    }\n}\n\n/// Collection of named entry points.\n#[derive(Clone, PartialEq, Eq, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct EntryPoints(BTreeMap<String, EntityEntryPoint>);\n\nimpl ToBytes for EntryPoints {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for EntryPoints {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (entry_points_map, remainder) =\n            BTreeMap::<String, EntityEntryPoint>::from_bytes(bytes)?;\n        Ok((EntryPoints(entry_points_map), remainder))\n    }\n}\n\nimpl Default for EntryPoints {\n    fn default() -> Self {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::default();\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    }\n}\n\nimpl EntryPoints {\n    /// Constructs a new, empty `EntryPoints`.\n    pub const fn new() -> EntryPoints {\n        EntryPoints(BTreeMap::<String, EntityEntryPoint>::new())\n    }\n\n    /// Constructs a new `EntryPoints` with a single entry for the default `EntryPoint`.\n    pub fn new_with_default_entry_point() -> Self {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntityEntryPoint::default();\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    }\n\n    /// Adds new [`EntityEntryPoint`].\n    pub fn add_entry_point(&mut self, entry_point: EntityEntryPoint) {\n        self.0.insert(entry_point.name().to_string(), entry_point);\n    }\n\n    /// Checks if given [`EntityEntryPoint`] exists.\n    pub fn has_entry_point(&self, entry_point_name: &str) -> bool {\n        self.0.contains_key(entry_point_name)\n    }\n\n    /// Gets an existing [`EntityEntryPoint`] by its name.\n    pub fn get(&self, entry_point_name: &str) -> Option<&EntityEntryPoint> {\n        self.0.get(entry_point_name)\n    }\n\n    /// Returns iterator for existing entry point names.\n    pub fn keys(&self) -> impl Iterator<Item = &String> {\n        self.0.keys()\n    }\n\n    /// Takes all entry points.\n    pub fn take_entry_points(self) -> Vec<EntityEntryPoint> {\n        self.0.into_values().collect()\n    }\n\n    /// Returns the length of the entry points\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Checks if the `EntryPoints` is empty.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Checks if any of the entry points are of the type Session.\n    pub fn contains_stored_session(&self) -> bool {\n        self.0\n            .values()\n            .any(|entry_point| entry_point.entry_point_type == EntryPointType::Caller)\n    }\n}\n\nimpl From<Vec<EntityEntryPoint>> for EntryPoints {\n    fn from(entry_points: Vec<EntityEntryPoint>) -> EntryPoints {\n        let entries = entry_points\n            .into_iter()\n            .map(|entry_point| (String::from(entry_point.name()), entry_point))\n            .collect();\n        EntryPoints(entries)\n    }\n}\n\n/// The entry point address.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum EntryPointAddr {\n    /// The address for a V1 Entrypoint.\n    VmCasperV1 {\n        /// The addr of the entity.\n        entity_addr: EntityAddr,\n        /// The 32 byte hash of the name of the entry point\n        name_bytes: [u8; KEY_HASH_LENGTH],\n    },\n}\n\nimpl EntryPointAddr {\n    /// Returns a `VmCasperV1` variant of the entry point address.\n    pub fn new_v1_entry_point_addr(\n        entity_addr: EntityAddr,\n        name: &str,\n    ) -> Result<Self, bytesrepr::Error> {\n        let bytes = name.to_bytes()?;\n        let mut hasher = {\n            match VarBlake2b::new(BLAKE2B_DIGEST_LENGTH) {\n                Ok(hasher) => hasher,\n                Err(_) => return Err(bytesrepr::Error::Formatting),\n            }\n        };\n        hasher.update(bytes);\n        // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher.\n        let mut name_bytes = HashAddr::default();\n        hasher.finalize_variable(|hash| name_bytes.clone_from_slice(hash));\n        Ok(Self::VmCasperV1 {\n            entity_addr,\n            name_bytes,\n        })\n    }\n\n    /// Returns the encapsulated [`EntityAddr`].\n    pub fn entity_addr(&self) -> EntityAddr {\n        match self {\n            EntryPointAddr::VmCasperV1 { entity_addr, .. } => *entity_addr,\n        }\n    }\n\n    /// Returns the formatted String representation of the [`EntryPointAddr`].\n    pub fn to_formatted_string(&self) -> String {\n        format!(\"{}\", self)\n    }\n\n    /// Returns the address from the formatted string.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        if let Some(entry_point_v1) = input.strip_prefix(V1_ENTRY_POINT_PREFIX) {\n            if let Some((entity_addr_str, string_bytes_str)) = entry_point_v1.rsplit_once('-') {\n                let entity_addr = EntityAddr::from_formatted_str(entity_addr_str)?;\n                let string_bytes =\n                    checksummed_hex::decode(string_bytes_str).map_err(FromStrError::Hex)?;\n                let (name_bytes, _) =\n                    FromBytes::from_vec(string_bytes).map_err(FromStrError::BytesRepr)?;\n                return Ok(Self::VmCasperV1 {\n                    entity_addr,\n                    name_bytes,\n                });\n            }\n        }\n\n        Err(FromStrError::InvalidPrefix)\n    }\n}\n\nimpl ToBytes for EntryPointAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            EntryPointAddr::VmCasperV1 {\n                entity_addr,\n                name_bytes: named_bytes,\n            } => {\n                buffer.insert(0, V1_ENTRY_POINT_TAG);\n                buffer.append(&mut entity_addr.to_bytes()?);\n                buffer.append(&mut named_bytes.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                EntryPointAddr::VmCasperV1 {\n                    entity_addr,\n                    name_bytes: named_bytes,\n                } => entity_addr.serialized_length() + named_bytes.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for EntryPointAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, bytes) = u8::from_bytes(bytes)?;\n        match tag {\n            V1_ENTRY_POINT_TAG => {\n                let (entity_addr, bytes) = EntityAddr::from_bytes(bytes)?;\n                let (name_bytes, bytes) = FromBytes::from_bytes(bytes)?;\n                Ok((\n                    Self::VmCasperV1 {\n                        entity_addr,\n                        name_bytes,\n                    },\n                    bytes,\n                ))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\nimpl Display for EntryPointAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        match self {\n            EntryPointAddr::VmCasperV1 {\n                entity_addr,\n                name_bytes,\n            } => {\n                write!(\n                    f,\n                    \"{}{}-{}\",\n                    V1_ENTRY_POINT_PREFIX,\n                    entity_addr,\n                    base16::encode_lower(name_bytes)\n                )\n            }\n        }\n    }\n}\n\nimpl Debug for EntryPointAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        match self {\n            EntryPointAddr::VmCasperV1 {\n                entity_addr,\n                name_bytes,\n            } => {\n                write!(f, \"EntryPointAddr({:?}-{:?})\", entity_addr, name_bytes)\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<EntryPointAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> EntryPointAddr {\n        EntryPointAddr::VmCasperV1 {\n            entity_addr: rng.gen(),\n            name_bytes: rng.gen(),\n        }\n    }\n}\n\n/// The encaspulated representation of entrypoints.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum EntryPointValue {\n    /// Entrypoints to be executed against the V1 Casper VM.\n    V1CasperVm(EntityEntryPoint),\n}\n\nimpl EntryPointValue {\n    /// Returns [`EntryPointValue::V1CasperVm`] variant.\n    pub fn new_v1_entry_point_value(entry_point: EntityEntryPoint) -> Self {\n        Self::V1CasperVm(entry_point)\n    }\n\n    /// Entry point will cover payment if directly invoked.\n    pub fn will_pay_direct_invocation(&self) -> bool {\n        match self {\n            EntryPointValue::V1CasperVm(ep) => ep.entry_point_payment.will_pay_direct_invocation(),\n        }\n    }\n}\n\nimpl ToBytes for EntryPointValue {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                EntryPointValue::V1CasperVm(entry_point) => entry_point.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        match self {\n            EntryPointValue::V1CasperVm(entry_point) => {\n                writer.push(V1_ENTRY_POINT_TAG);\n                entry_point.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for EntryPointValue {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            V1_ENTRY_POINT_TAG => {\n                let (entry_point, remainder) = EntityEntryPoint::from_bytes(remainder)?;\n                Ok((Self::V1CasperVm(entry_point), remainder))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn entry_point_type_serialization_roundtrip() {\n        let vm1 = EntryPointAddr::VmCasperV1 {\n            entity_addr: EntityAddr::new_smart_contract([42; 32]),\n            name_bytes: [99; 32],\n        };\n        bytesrepr::test_serialization_roundtrip(&vm1);\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity/error.rs",
    "content": "use core::{\n    array::TryFromSliceError,\n    fmt::{self, Display, Formatter},\n};\n\n// This error type is not intended to be used by third party crates.\n#[doc(hidden)]\n#[derive(Debug, Eq, PartialEq)]\npub struct TryFromIntError(pub ());\n\n/// Error returned when decoding an `AccountHash` from a formatted string.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromAccountHashStrError {\n    /// The prefix is invalid.\n    InvalidPrefix,\n    /// The hash is not valid hex.\n    Hex(base16::DecodeError),\n    /// The hash is the wrong length.\n    Hash(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for FromAccountHashStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromAccountHashStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromAccountHashStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromAccountHashStrError::Hash(error)\n    }\n}\n\nimpl Display for FromAccountHashStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromAccountHashStrError::InvalidPrefix => write!(f, \"prefix is not 'account-hash-'\"),\n            FromAccountHashStrError::Hex(error) => {\n                write!(f, \"failed to decode address portion from hex: {}\", error)\n            }\n            FromAccountHashStrError::Hash(error) => {\n                write!(f, \"address portion is wrong length: {}\", error)\n            }\n        }\n    }\n}\n\n/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`](super::AccountHash).\n#[derive(Debug)]\npub struct TryFromSliceForAccountHashError(());\n"
  },
  {
    "path": "types/src/addressable_entity/named_keys.rs",
    "content": "use alloc::{collections::BTreeMap, string::String, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\n#[cfg(feature = \"json-schema\")]\nuse crate::execution::execution_result_v1::NamedKey;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, Key,\n};\n\n/// A collection of named keys.\n#[derive(Clone, Eq, PartialEq, Default, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\n#[rustfmt::skip]\npub struct NamedKeys(\n    #[serde(with = \"BTreeMapToArray::<String, Key, Labels>\")]\n    #[cfg_attr(feature = \"json-schema\", schemars(with = \"Vec<NamedKey>\"))]\n    BTreeMap<String, Key>,\n);\n\nimpl NamedKeys {\n    /// Constructs a new, empty `NamedKeys`.\n    pub const fn new() -> Self {\n        NamedKeys(BTreeMap::new())\n    }\n\n    /// Consumes `self`, returning the wrapped map.\n    pub fn into_inner(self) -> BTreeMap<String, Key> {\n        self.0\n    }\n\n    /// Inserts a named key.\n    ///\n    /// If the map did not have this name present, `None` is returned.  If the map did have this\n    /// name present, the `Key` is updated, and the old `Key` is returned.\n    pub fn insert(&mut self, name: String, key: Key) -> Option<Key> {\n        self.0.insert(name, key)\n    }\n\n    /// Moves all elements from `other` into `self`.\n    pub fn append(&mut self, mut other: Self) {\n        self.0.append(&mut other.0)\n    }\n\n    /// Removes a named `Key`, returning the `Key` if it existed in the collection.\n    pub fn remove(&mut self, name: &str) -> Option<Key> {\n        self.0.remove(name)\n    }\n\n    /// Returns a reference to the `Key` under the given `name` if any.\n    pub fn get(&self, name: &str) -> Option<&Key> {\n        self.0.get(name)\n    }\n\n    /// Returns `true` if the named `Key` exists in the collection.\n    pub fn contains(&self, name: &str) -> bool {\n        self.0.contains_key(name)\n    }\n\n    /// Returns an iterator over the names.\n    pub fn names(&self) -> impl Iterator<Item = &String> {\n        self.0.keys()\n    }\n\n    /// Returns an iterator over the `Key`s (i.e. the map's values).\n    pub fn keys(&self) -> impl Iterator<Item = &Key> {\n        self.0.values()\n    }\n\n    /// Returns a mutable iterator over the `Key`s (i.e. the map's values).\n    pub fn keys_mut(&mut self) -> impl Iterator<Item = &mut Key> {\n        self.0.values_mut()\n    }\n\n    /// Returns an iterator over the name-key pairs.\n    pub fn iter(&self) -> impl Iterator<Item = (&String, &Key)> {\n        self.0.iter()\n    }\n\n    /// Returns the number of named `Key`s.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if there are no named `Key`s.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n}\n\nimpl From<BTreeMap<String, Key>> for NamedKeys {\n    fn from(value: BTreeMap<String, Key>) -> Self {\n        NamedKeys(value)\n    }\n}\n\nimpl ToBytes for NamedKeys {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for NamedKeys {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (named_keys, remainder) = BTreeMap::<String, Key>::from_bytes(bytes)?;\n        Ok((NamedKeys(named_keys), remainder))\n    }\n}\n\nimpl CLTyped for NamedKeys {\n    fn cl_type() -> CLType {\n        BTreeMap::<String, Key>::cl_type()\n    }\n}\n\nstruct Labels;\n\nimpl KeyValueLabels for Labels {\n    const KEY: &'static str = \"name\";\n    const VALUE: &'static str = \"key\";\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use super::*;\n    use crate::testing::TestRng;\n\n    /// `NamedKeys` was previously (pre node v2.0.0) just an alias for `BTreeMap<String, Key>`.\n    /// Check if we serialize as the old form, that can deserialize to the new.\n    #[test]\n    fn should_be_backwards_compatible() {\n        let rng = &mut TestRng::new();\n        let mut named_keys = NamedKeys::new();\n        assert!(named_keys.insert(\"a\".to_string(), rng.gen()).is_none());\n        assert!(named_keys.insert(\"bb\".to_string(), rng.gen()).is_none());\n        assert!(named_keys.insert(\"ccc\".to_string(), rng.gen()).is_none());\n\n        let serialized_old = bincode::serialize(&named_keys.0).unwrap();\n        let parsed_new = bincode::deserialize(&serialized_old).unwrap();\n        assert_eq!(named_keys, parsed_new);\n\n        let serialized_old = bytesrepr::serialize(&named_keys.0).unwrap();\n        let parsed_new = bytesrepr::deserialize(serialized_old).unwrap();\n        assert_eq!(named_keys, parsed_new);\n    }\n\n    #[test]\n    fn should_match_field_names() {\n        // this test was written to ensure that the schema generated by schemars matches the serde\n        // encoding, both are configured using attributes and they can get out of sync\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"key\".to_string(), Key::Hash([0u8; 32]));\n        assert_eq!(\n            serde_json::to_value(&named_keys).expect(\"should serialize\"),\n            serde_json::json!([{\n                        Labels::KEY: \"key\",\n                        Labels::VALUE: \"hash-0000000000000000000000000000000000000000000000000000000000000000\"\n            }])\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity/weight.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    CLType, CLTyped,\n};\n\n/// The number of bytes in a serialized [`Weight`].\npub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// The weight associated with public keys in an account's associated keys.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    schemars(rename = \"EntityAssociatedKeyWeight\")\n)]\npub struct Weight(u8);\n\nimpl Weight {\n    /// Constructs a new `Weight`.\n    pub const fn new(weight: u8) -> Weight {\n        Weight(weight)\n    }\n\n    /// Returns the value of `self` as a `u8`.\n    pub fn value(self) -> u8 {\n        self.0\n    }\n}\n\nimpl ToBytes for Weight {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        WEIGHT_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for Weight {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (byte, rem) = u8::from_bytes(bytes)?;\n        Ok((Weight::new(byte), rem))\n    }\n}\n\nimpl CLTyped for Weight {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n"
  },
  {
    "path": "types/src/addressable_entity.rs",
    "content": "//! Data types for supporting contract headers feature.\n// TODO - remove once schemars stops causing warning.\n#![allow(clippy::field_reassign_with_default)]\n\npub mod action_thresholds;\nmod action_type;\npub mod associated_keys;\nmod entry_points;\nmod error;\n//mod named_keys;\nmod weight;\n\nuse alloc::{\n    collections::{btree_map::Entry, BTreeMap, BTreeSet},\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse blake2::{\n    digest::{Update, VariableOutput},\n    VarBlake2b,\n};\nuse core::{\n    array::TryFromSliceError,\n    convert::{TryFrom, TryInto},\n    fmt::{self, Debug, Display, Formatter},\n    iter,\n};\n\n#[cfg(feature = \"json-schema\")]\nuse crate::SecretKey;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\npub use self::{\n    action_thresholds::ActionThresholds,\n    action_type::ActionType,\n    associated_keys::AssociatedKeys,\n    entry_points::{\n        EntityEntryPoint, EntryPointAccess, EntryPointAddr, EntryPointPayment, EntryPointType,\n        EntryPointValue, EntryPoints, Parameter, Parameters, DEFAULT_ENTRY_POINT_NAME,\n    },\n    error::{FromAccountHashStrError, TryFromIntError, TryFromSliceForAccountHashError},\n    weight::{Weight, WEIGHT_SERIALIZED_LENGTH},\n};\nuse crate::{\n    account::{\n        Account, AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure,\n        UpdateKeyFailure,\n    },\n    byte_code::ByteCodeHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex,\n    contract_messages::TopicNameHash,\n    contracts::{Contract, ContractHash},\n    system::SystemEntityType,\n    uref::{self, URef},\n    AccessRights, ApiError, CLType, CLTyped, CLValue, CLValueError, ContextAccessRights, HashAddr,\n    Key, NamedKeys, PackageHash, ProtocolVersion, PublicKey, Tagged, BLAKE2B_DIGEST_LENGTH,\n    KEY_HASH_LENGTH,\n};\n\n/// Maximum number of distinct user groups.\npub const MAX_GROUPS: u8 = 10;\n/// Maximum number of URefs which can be assigned across all user groups.\npub const MAX_TOTAL_UREFS: usize = 100;\n\n/// The prefix applied to the hex-encoded `Addressable Entity` to produce a formatted string\n/// representation.\npub const ADDRESSABLE_ENTITY_STRING_PREFIX: &str = \"addressable-entity-\";\n/// The prefix applied to the hex-encoded `Entity` to produce a formatted string\n/// representation.\npub const ENTITY_PREFIX: &str = \"entity-\";\n/// The prefix applied to the hex-encoded `Account` to produce a formatted string\n/// representation.\npub const ACCOUNT_ENTITY_PREFIX: &str = \"account-\";\n/// The prefix applied to the hex-encoded `Smart contract` to produce a formatted string\n/// representation.\npub const CONTRACT_ENTITY_PREFIX: &str = \"contract-\";\n/// The prefix applied to the hex-encoded `System entity account or contract` to produce a formatted\n///  string representation.\npub const SYSTEM_ENTITY_PREFIX: &str = \"system-\";\n/// The prefix applied to the hex-encoded `Named Key` to produce a formatted string\n/// representation.\npub const NAMED_KEY_PREFIX: &str = \"named-key-\";\n\n/// Set of errors which may happen when working with contract headers.\n#[derive(Debug, PartialEq, Eq)]\n#[repr(u8)]\n#[non_exhaustive]\npub enum Error {\n    /// Attempt to override an existing or previously existing version with a\n    /// new header (this is not allowed to ensure immutability of a given\n    /// version).\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(1, Error::PreviouslyUsedVersion as u8);\n    /// ```\n    PreviouslyUsedVersion = 1,\n    /// Attempted to disable a contract that does not exist.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(2, Error::EntityNotFound as u8);\n    /// ```\n    EntityNotFound = 2,\n    /// Attempted to create a user group which already exists (use the update\n    /// function to change an existing user group).\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(3, Error::GroupAlreadyExists as u8);\n    /// ```\n    GroupAlreadyExists = 3,\n    /// Attempted to add a new user group which exceeds the allowed maximum\n    /// number of groups.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(4, Error::MaxGroupsExceeded as u8);\n    /// ```\n    MaxGroupsExceeded = 4,\n    /// Attempted to add a new URef to a group, which resulted in the total\n    /// number of URefs across all user groups to exceed the allowed maximum.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8);\n    /// ```\n    MaxTotalURefsExceeded = 5,\n    /// Attempted to remove a URef from a group, which does not exist in the\n    /// group.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(6, Error::GroupDoesNotExist as u8);\n    /// ```\n    GroupDoesNotExist = 6,\n    /// Attempted to remove unknown URef from the group.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(7, Error::UnableToRemoveURef as u8);\n    /// ```\n    UnableToRemoveURef = 7,\n    /// Group is use by at least one active contract.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(8, Error::GroupInUse as u8);\n    /// ```\n    GroupInUse = 8,\n    /// URef already exists in given group.\n    /// ```\n    /// # use casper_types::addressable_entity::Error;\n    /// assert_eq!(9, Error::URefAlreadyExists as u8);\n    /// ```\n    URefAlreadyExists = 9,\n}\n\nimpl TryFrom<u8> for Error {\n    type Error = ();\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        let error = match value {\n            v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion,\n            v if v == Self::EntityNotFound as u8 => Self::EntityNotFound,\n            v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists,\n            v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded,\n            v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded,\n            v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist,\n            v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef,\n            v if v == Self::GroupInUse as u8 => Self::GroupInUse,\n            v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists,\n            _ => return Err(()),\n        };\n        Ok(error)\n    }\n}\n\n/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`.\n#[derive(Debug)]\npub struct TryFromSliceForContractHashError(());\n\nimpl Display for TryFromSliceForContractHashError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"failed to retrieve from slice\")\n    }\n}\n\n/// An error from parsing a formatted contract string\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    /// Invalid formatted string prefix.\n    InvalidPrefix,\n    /// Error when decoding a hex string\n    Hex(base16::DecodeError),\n    /// Error when parsing an account\n    Account(TryFromSliceForAccountHashError),\n    /// Error when parsing the hash.\n    Hash(TryFromSliceError),\n    /// Error when parsing an uref.\n    URef(uref::FromStrError),\n    /// Error parsing from bytes.\n    BytesRepr(bytesrepr::Error),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Hash(error)\n    }\n}\n\nimpl From<uref::FromStrError> for FromStrError {\n    fn from(error: uref::FromStrError) -> Self {\n        FromStrError::URef(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => write!(f, \"invalid prefix\"),\n            FromStrError::Hex(error) => write!(f, \"decode from hex: {}\", error),\n            FromStrError::Hash(error) => write!(f, \"hash from string error: {}\", error),\n            FromStrError::URef(error) => write!(f, \"uref from string error: {:?}\", error),\n            FromStrError::Account(error) => {\n                write!(f, \"account hash from string error: {:?}\", error)\n            }\n            FromStrError::BytesRepr(error) => {\n                write!(f, \"bytesrepr error: {:?}\", error)\n            }\n        }\n    }\n}\n\n/// A newtype wrapping a `HashAddr` which references an [`AddressableEntity`] in the global state.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"The hex-encoded address of the addressable entity.\")\n)]\npub struct AddressableEntityHash(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))] HashAddr,\n);\n\nimpl AddressableEntityHash {\n    /// Constructs a new `AddressableEntityHash` from the raw bytes of the contract hash.\n    pub const fn new(value: HashAddr) -> AddressableEntityHash {\n        AddressableEntityHash(value)\n    }\n\n    /// Get the entity addr for this entity hash from the corresponding entity.\n    pub fn entity_addr(&self, entity: AddressableEntity) -> EntityAddr {\n        entity.entity_addr(*self)\n    }\n\n    /// Returns the raw bytes of the contract hash as an array.\n    pub fn value(&self) -> HashAddr {\n        self.0\n    }\n\n    /// Returns the raw bytes of the contract hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `AddressableEntityHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\n            \"{}{}\",\n            ADDRESSABLE_ENTITY_STRING_PREFIX,\n            base16::encode_lower(&self.0),\n        )\n    }\n\n    /// Hexadecimal representation of the hash.\n    pub fn to_hex_string(&self) -> String {\n        base16::encode_lower(&self.0)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `AddressableEntityHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(ADDRESSABLE_ENTITY_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n        let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?;\n        Ok(AddressableEntityHash(bytes))\n    }\n}\n\nimpl From<ContractHash> for AddressableEntityHash {\n    fn from(contract_hash: ContractHash) -> Self {\n        AddressableEntityHash::new(contract_hash.value())\n    }\n}\n\nimpl Display for AddressableEntityHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for AddressableEntityHash {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(\n            f,\n            \"AddressableEntityHash({})\",\n            base16::encode_lower(&self.0)\n        )\n    }\n}\n\nimpl CLTyped for AddressableEntityHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(KEY_HASH_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for AddressableEntityHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.extend_from_slice(&self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for AddressableEntityHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((AddressableEntityHash::new(bytes), rem))\n    }\n}\n\nimpl From<[u8; 32]> for AddressableEntityHash {\n    fn from(bytes: [u8; 32]) -> Self {\n        AddressableEntityHash(bytes)\n    }\n}\n\nimpl TryFrom<Key> for AddressableEntityHash {\n    type Error = ApiError;\n\n    fn try_from(value: Key) -> Result<Self, Self::Error> {\n        if let Key::AddressableEntity(entity_addr) = value {\n            Ok(AddressableEntityHash::new(entity_addr.value()))\n        } else {\n            Err(ApiError::Formatting)\n        }\n    }\n}\n\nimpl Serialize for AddressableEntityHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for AddressableEntityHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            AddressableEntityHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = HashAddr::deserialize(deserializer)?;\n            Ok(AddressableEntityHash(bytes))\n        }\n    }\n}\n\nimpl AsRef<[u8]> for AddressableEntityHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl TryFrom<&[u8]> for AddressableEntityHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForContractHashError> {\n        HashAddr::try_from(bytes)\n            .map(AddressableEntityHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\nimpl TryFrom<&Vec<u8>> for AddressableEntityHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        HashAddr::try_from(bytes as &[u8])\n            .map(AddressableEntityHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<AddressableEntityHash> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> AddressableEntityHash {\n        AddressableEntityHash(rng.gen())\n    }\n}\n\n/// Tag for the variants of [`EntityKind`].\n#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[repr(u8)]\npub enum EntityKindTag {\n    /// `EntityKind::System` variant.\n    System = 0,\n    /// `EntityKind::Account` variant.\n    Account = 1,\n    /// `EntityKind::SmartContract` variant.\n    SmartContract = 2,\n}\n\nimpl TryFrom<u8> for EntityKindTag {\n    type Error = bytesrepr::Error;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(EntityKindTag::System),\n            1 => Ok(EntityKindTag::Account),\n            2 => Ok(EntityKindTag::SmartContract),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl ToBytes for EntityKindTag {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        (*self as u8).to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        (*self as u8).write_bytes(writer)\n    }\n}\n\nimpl FromBytes for EntityKindTag {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (entity_kind_tag, remainder) = u8::from_bytes(bytes)?;\n        Ok((entity_kind_tag.try_into()?, remainder))\n    }\n}\n\nimpl Display for EntityKindTag {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            EntityKindTag::System => {\n                write!(f, \"system\")\n            }\n            EntityKindTag::Account => {\n                write!(f, \"account\")\n            }\n            EntityKindTag::SmartContract => {\n                write!(f, \"contract\")\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<EntityKindTag> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> EntityKindTag {\n        match rng.gen_range(0..=2) {\n            0 => EntityKindTag::System,\n            1 => EntityKindTag::Account,\n            2 => EntityKindTag::SmartContract,\n            _ => unreachable!(),\n        }\n    }\n}\n\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Runtime used to execute a Transaction.\")\n)]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\n#[serde(deny_unknown_fields)]\n#[repr(u8)]\npub enum ContractRuntimeTag {\n    #[cfg_attr(any(feature = \"testing\", test), default)]\n    VmCasperV1,\n    VmCasperV2,\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ContractRuntimeTag> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ContractRuntimeTag {\n        match rng.gen_range(0..=1) {\n            0 => ContractRuntimeTag::VmCasperV1,\n            1 => ContractRuntimeTag::VmCasperV2,\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for ContractRuntimeTag {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        (*self as u8).to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        (*self as u8).write_bytes(writer)\n    }\n}\n\nimpl FromBytes for ContractRuntimeTag {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        if tag == ContractRuntimeTag::VmCasperV1 as u8 {\n            Ok((ContractRuntimeTag::VmCasperV1, remainder))\n        } else if tag == ContractRuntimeTag::VmCasperV2 as u8 {\n            Ok((ContractRuntimeTag::VmCasperV2, remainder))\n        } else {\n            Err(bytesrepr::Error::Formatting)\n        }\n    }\n}\n\nimpl Display for ContractRuntimeTag {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ContractRuntimeTag::VmCasperV1 => write!(f, \"vm-casper-v1\"),\n            ContractRuntimeTag::VmCasperV2 => write!(f, \"vm-casper-v2\"),\n        }\n    }\n}\nimpl ContractRuntimeTag {\n    /// Returns the tag of the [`ContractRuntimeTag`].\n    pub fn tag(&self) -> u8 {\n        *self as u8\n    }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n/// The type of Package.\npub enum EntityKind {\n    /// Package associated with a native contract implementation.\n    System(SystemEntityType),\n    /// Package associated with an Account hash.\n    Account(AccountHash),\n    /// Packages associated with Wasm stored on chain.\n    SmartContract(ContractRuntimeTag),\n}\n\nimpl EntityKind {\n    /// Returns the Account hash associated with a Package based on the package kind.\n    pub fn maybe_account_hash(&self) -> Option<AccountHash> {\n        match self {\n            Self::Account(account_hash) => Some(*account_hash),\n            Self::SmartContract(_) | Self::System(_) => None,\n        }\n    }\n\n    /// Returns the associated key set based on the Account hash set in the package kind.\n    pub fn associated_keys(&self) -> AssociatedKeys {\n        match self {\n            Self::Account(account_hash) => AssociatedKeys::new(*account_hash, Weight::new(1)),\n            Self::SmartContract(_) | Self::System(_) => AssociatedKeys::default(),\n        }\n    }\n\n    /// Returns if the current package is either a system contract or the system entity.\n    pub fn is_system(&self) -> bool {\n        matches!(self, Self::System(_))\n    }\n\n    /// Returns if the current package is the system mint.\n    pub fn is_system_mint(&self) -> bool {\n        matches!(self, Self::System(SystemEntityType::Mint))\n    }\n\n    /// Returns if the current package is the system auction.\n    pub fn is_system_auction(&self) -> bool {\n        matches!(self, Self::System(SystemEntityType::Auction))\n    }\n\n    /// Returns if the current package is associated with the system addressable entity.\n    pub fn is_system_account(&self) -> bool {\n        match self {\n            Self::Account(account_hash) => {\n                if *account_hash == PublicKey::System.to_account_hash() {\n                    return true;\n                }\n                false\n            }\n            _ => false,\n        }\n    }\n}\n\nimpl Tagged<EntityKindTag> for EntityKind {\n    fn tag(&self) -> EntityKindTag {\n        match self {\n            EntityKind::System(_) => EntityKindTag::System,\n            EntityKind::Account(_) => EntityKindTag::Account,\n            EntityKind::SmartContract(_) => EntityKindTag::SmartContract,\n        }\n    }\n}\n\nimpl Tagged<u8> for EntityKind {\n    fn tag(&self) -> u8 {\n        let package_kind_tag: EntityKindTag = self.tag();\n        package_kind_tag as u8\n    }\n}\n\nimpl ToBytes for EntityKind {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                EntityKind::SmartContract(transaction_runtime) => {\n                    transaction_runtime.serialized_length()\n                }\n                EntityKind::System(system_entity_type) => system_entity_type.serialized_length(),\n                EntityKind::Account(account_hash) => account_hash.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            EntityKind::SmartContract(transaction_runtime) => {\n                writer.push(self.tag());\n                transaction_runtime.write_bytes(writer)\n            }\n            EntityKind::System(system_entity_type) => {\n                writer.push(self.tag());\n                system_entity_type.write_bytes(writer)\n            }\n            EntityKind::Account(account_hash) => {\n                writer.push(self.tag());\n                account_hash.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for EntityKind {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = EntityKindTag::from_bytes(bytes)?;\n        match tag {\n            EntityKindTag::System => {\n                let (entity_type, remainder) = SystemEntityType::from_bytes(remainder)?;\n                Ok((EntityKind::System(entity_type), remainder))\n            }\n            EntityKindTag::Account => {\n                let (account_hash, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((EntityKind::Account(account_hash), remainder))\n            }\n            EntityKindTag::SmartContract => {\n                let (transaction_runtime, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((EntityKind::SmartContract(transaction_runtime), remainder))\n            }\n        }\n    }\n}\n\nimpl Display for EntityKind {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            EntityKind::System(system_entity) => {\n                write!(f, \"system-entity-kind({})\", system_entity)\n            }\n            EntityKind::Account(account_hash) => {\n                write!(f, \"account-entity-kind({})\", account_hash)\n            }\n            EntityKind::SmartContract(transaction_runtime) => {\n                write!(f, \"smart-contract-entity-kind({})\", transaction_runtime)\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<EntityKind> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> EntityKind {\n        match rng.gen_range(0..=2) {\n            0 => EntityKind::System(rng.gen()),\n            1 => EntityKind::Account(rng.gen()),\n            2 => EntityKind::SmartContract(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\n/// The address for an AddressableEntity which contains the 32 bytes and tagging information.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema), schemars(untagged))]\npub enum EntityAddr {\n    /// The address for a system entity account or contract.\n    System(#[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))] HashAddr),\n    /// The address of an entity that corresponds to an Account.\n    Account(#[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))] HashAddr),\n    /// The address of an entity that corresponds to a Userland smart contract.\n    SmartContract(#[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))] HashAddr),\n}\n\nimpl EntityAddr {\n    /// The length in bytes of an `EntityAddr`.\n    pub const LENGTH: usize = U8_SERIALIZED_LENGTH + KEY_HASH_LENGTH;\n\n    /// Constructs a new `EntityAddr` for a system entity.\n    pub const fn new_system(hash_addr: HashAddr) -> Self {\n        Self::System(hash_addr)\n    }\n\n    /// Constructs a new `EntityAddr` for an Account entity.\n    pub const fn new_account(hash_addr: HashAddr) -> Self {\n        Self::Account(hash_addr)\n    }\n\n    /// Constructs a new `EntityAddr` for a smart contract.\n    pub const fn new_smart_contract(hash_addr: HashAddr) -> Self {\n        Self::SmartContract(hash_addr)\n    }\n\n    /// Constructs a new `EntityAddr` based on the supplied kind.\n    pub fn new_of_kind(entity_kind: EntityKind, hash_addr: HashAddr) -> Self {\n        match entity_kind {\n            EntityKind::System(_) => Self::new_system(hash_addr),\n            EntityKind::Account(_) => Self::new_account(hash_addr),\n            EntityKind::SmartContract(_) => Self::new_smart_contract(hash_addr),\n        }\n    }\n\n    /// Returns the tag of the [`EntityAddr`].\n    pub fn tag(&self) -> EntityKindTag {\n        match self {\n            EntityAddr::System(_) => EntityKindTag::System,\n            EntityAddr::Account(_) => EntityKindTag::Account,\n            EntityAddr::SmartContract(_) => EntityKindTag::SmartContract,\n        }\n    }\n\n    /// Is this a system entity address?\n    pub fn is_system(&self) -> bool {\n        self.tag() == EntityKindTag::System\n            || self.value() == PublicKey::System.to_account_hash().value()\n    }\n\n    /// Is this a contract entity address?\n    pub fn is_contract(&self) -> bool {\n        self.tag() == EntityKindTag::SmartContract\n    }\n\n    /// Is this an account entity address?\n    pub fn is_account(&self) -> bool {\n        self.tag() == EntityKindTag::Account\n    }\n\n    /// Returns the 32 bytes of the [`EntityAddr`].\n    pub fn value(&self) -> HashAddr {\n        match self {\n            EntityAddr::System(hash_addr)\n            | EntityAddr::Account(hash_addr)\n            | EntityAddr::SmartContract(hash_addr) => *hash_addr,\n        }\n    }\n\n    /// Returns the formatted String representation of the [`EntityAddr`].\n    pub fn to_formatted_string(&self) -> String {\n        match self {\n            EntityAddr::System(addr) => {\n                format!(\n                    \"{}{}{}\",\n                    ENTITY_PREFIX,\n                    SYSTEM_ENTITY_PREFIX,\n                    base16::encode_lower(addr)\n                )\n            }\n            EntityAddr::Account(addr) => {\n                format!(\n                    \"{}{}{}\",\n                    ENTITY_PREFIX,\n                    ACCOUNT_ENTITY_PREFIX,\n                    base16::encode_lower(addr)\n                )\n            }\n            EntityAddr::SmartContract(addr) => {\n                format!(\n                    \"{}{}{}\",\n                    ENTITY_PREFIX,\n                    CONTRACT_ENTITY_PREFIX,\n                    base16::encode_lower(addr)\n                )\n            }\n        }\n    }\n\n    /// Constructs an [`EntityAddr`] from a formatted String.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        if let Some(entity) = input.strip_prefix(ENTITY_PREFIX) {\n            let (addr_str, tag) = if let Some(str) = entity.strip_prefix(SYSTEM_ENTITY_PREFIX) {\n                (str, EntityKindTag::System)\n            } else if let Some(str) = entity.strip_prefix(ACCOUNT_ENTITY_PREFIX) {\n                (str, EntityKindTag::Account)\n            } else if let Some(str) = entity.strip_prefix(CONTRACT_ENTITY_PREFIX) {\n                (str, EntityKindTag::SmartContract)\n            } else {\n                return Err(FromStrError::InvalidPrefix);\n            };\n            let addr = checksummed_hex::decode(addr_str).map_err(FromStrError::Hex)?;\n            let hash_addr = HashAddr::try_from(addr.as_ref()).map_err(FromStrError::Hash)?;\n            let entity_addr = match tag {\n                EntityKindTag::System => EntityAddr::new_system(hash_addr),\n                EntityKindTag::Account => EntityAddr::new_account(hash_addr),\n                EntityKindTag::SmartContract => EntityAddr::new_smart_contract(hash_addr),\n            };\n\n            return Ok(entity_addr);\n        }\n\n        Err(FromStrError::InvalidPrefix)\n    }\n\n    pub fn into_smart_contract(&self) -> Option<[u8; 32]> {\n        match self {\n            EntityAddr::SmartContract(addr) => Some(*addr),\n            _ => None,\n        }\n    }\n}\n\nimpl ToBytes for EntityAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        EntityAddr::LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            EntityAddr::System(addr) => {\n                EntityKindTag::System.write_bytes(writer)?;\n                addr.write_bytes(writer)\n            }\n            EntityAddr::Account(addr) => {\n                EntityKindTag::Account.write_bytes(writer)?;\n                addr.write_bytes(writer)\n            }\n            EntityAddr::SmartContract(addr) => {\n                EntityKindTag::SmartContract.write_bytes(writer)?;\n                addr.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for EntityAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = EntityKindTag::from_bytes(bytes)?;\n        let (addr, remainder) = HashAddr::from_bytes(remainder)?;\n        let entity_addr = match tag {\n            EntityKindTag::System => EntityAddr::System(addr),\n            EntityKindTag::Account => EntityAddr::Account(addr),\n            EntityKindTag::SmartContract => EntityAddr::SmartContract(addr),\n        };\n        Ok((entity_addr, remainder))\n    }\n}\n\nimpl CLTyped for EntityAddr {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl From<EntityAddr> for AddressableEntityHash {\n    fn from(entity_addr: EntityAddr) -> Self {\n        AddressableEntityHash::new(entity_addr.value())\n    }\n}\n\nimpl Display for EntityAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.write_str(&self.to_formatted_string())\n    }\n}\n\nimpl Debug for EntityAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            EntityAddr::System(hash_addr) => {\n                write!(f, \"EntityAddr::System({})\", base16::encode_lower(hash_addr))\n            }\n            EntityAddr::Account(hash_addr) => {\n                write!(\n                    f,\n                    \"EntityAddr::Account({})\",\n                    base16::encode_lower(hash_addr)\n                )\n            }\n            EntityAddr::SmartContract(hash_addr) => {\n                write!(\n                    f,\n                    \"EntityAddr::SmartContract({})\",\n                    base16::encode_lower(hash_addr)\n                )\n            }\n        }\n    }\n}\n\nimpl Serialize for EntityAddr {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            let (tag, value): (EntityKindTag, HashAddr) = (self.tag(), self.value());\n            (tag, value).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for EntityAddr {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            Self::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let (tag, addr) = <(EntityKindTag, HashAddr)>::deserialize(deserializer)?;\n            match tag {\n                EntityKindTag::System => Ok(EntityAddr::new_system(addr)),\n                EntityKindTag::Account => Ok(EntityAddr::new_account(addr)),\n                EntityKindTag::SmartContract => Ok(EntityAddr::new_smart_contract(addr)),\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<EntityAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> EntityAddr {\n        match rng.gen_range(0..=2) {\n            0 => EntityAddr::System(rng.gen()),\n            1 => EntityAddr::Account(rng.gen()),\n            2 => EntityAddr::SmartContract(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\n/// A NamedKey address.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct NamedKeyAddr {\n    /// The address of the entity.\n    base_addr: EntityAddr,\n    /// The bytes of the name.\n    string_bytes: [u8; KEY_HASH_LENGTH],\n}\n\nimpl NamedKeyAddr {\n    /// The length in bytes of a [`NamedKeyAddr`].\n    pub const NAMED_KEY_ADDR_BASE_LENGTH: usize = 1 + EntityAddr::LENGTH;\n\n    /// Constructs a new [`NamedKeyAddr`] based on the supplied bytes.\n    pub const fn new_named_key_entry(\n        entity_addr: EntityAddr,\n        string_bytes: [u8; KEY_HASH_LENGTH],\n    ) -> Self {\n        Self {\n            base_addr: entity_addr,\n            string_bytes,\n        }\n    }\n\n    /// Constructs a new [`NamedKeyAddr`] based on string name.\n    /// Will fail if the string cannot be serialized.\n    pub fn new_from_string(\n        entity_addr: EntityAddr,\n        entry: String,\n    ) -> Result<Self, bytesrepr::Error> {\n        let bytes = entry.to_bytes()?;\n        let mut hasher = {\n            match VarBlake2b::new(BLAKE2B_DIGEST_LENGTH) {\n                Ok(hasher) => hasher,\n                Err(_) => return Err(bytesrepr::Error::Formatting),\n            }\n        };\n        hasher.update(bytes);\n        // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher.\n        let mut string_bytes = HashAddr::default();\n        hasher.finalize_variable(|hash| string_bytes.clone_from_slice(hash));\n        Ok(Self::new_named_key_entry(entity_addr, string_bytes))\n    }\n\n    /// Returns the encapsulated [`EntityAddr`].\n    pub fn entity_addr(&self) -> EntityAddr {\n        self.base_addr\n    }\n\n    /// Returns the formatted String representation of the [`NamedKeyAddr`].\n    pub fn to_formatted_string(&self) -> String {\n        format!(\"{}\", self)\n    }\n\n    /// Constructs a [`NamedKeyAddr`] from a formatted string.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        if let Some(named_key) = input.strip_prefix(NAMED_KEY_PREFIX) {\n            if let Some((entity_addr_str, string_bytes_str)) = named_key.rsplit_once('-') {\n                let entity_addr = EntityAddr::from_formatted_str(entity_addr_str)?;\n                let string_bytes =\n                    checksummed_hex::decode(string_bytes_str).map_err(FromStrError::Hex)?;\n                let (string_bytes, _) =\n                    FromBytes::from_vec(string_bytes).map_err(FromStrError::BytesRepr)?;\n                return Ok(Self::new_named_key_entry(entity_addr, string_bytes));\n            };\n        }\n\n        Err(FromStrError::InvalidPrefix)\n    }\n}\n\nimpl Default for NamedKeyAddr {\n    fn default() -> Self {\n        NamedKeyAddr {\n            base_addr: EntityAddr::System(HashAddr::default()),\n            string_bytes: Default::default(),\n        }\n    }\n}\n\nimpl ToBytes for NamedKeyAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.base_addr.to_bytes()?);\n        buffer.append(&mut self.string_bytes.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.base_addr.serialized_length() + self.string_bytes.serialized_length()\n    }\n}\n\nimpl FromBytes for NamedKeyAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (base_addr, remainder) = EntityAddr::from_bytes(bytes)?;\n        let (string_bytes, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            Self {\n                base_addr,\n                string_bytes,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl Display for NamedKeyAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"{}{}-{}\",\n            NAMED_KEY_PREFIX,\n            self.base_addr,\n            base16::encode_lower(&self.string_bytes)\n        )\n    }\n}\n\nimpl Debug for NamedKeyAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"NamedKeyAddr({:?}-{:?})\",\n            self.base_addr,\n            base16::encode_lower(&self.string_bytes)\n        )\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<NamedKeyAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NamedKeyAddr {\n        NamedKeyAddr {\n            base_addr: rng.gen(),\n            string_bytes: rng.gen(),\n        }\n    }\n}\n\n/// A NamedKey value.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct NamedKeyValue {\n    /// The actual `Key` encoded as a CLValue.\n    named_key: CLValue,\n    /// The name of the `Key` encoded as a CLValue.\n    name: CLValue,\n}\n\nimpl NamedKeyValue {\n    /// Constructs a new [`NamedKeyValue`].\n    pub fn new(key: CLValue, name: CLValue) -> Self {\n        Self {\n            named_key: key,\n            name,\n        }\n    }\n\n    /// Constructs a new [`NamedKeyValue`] from its [`Key`] and [`String`].\n    pub fn from_concrete_values(named_key: Key, name: String) -> Result<Self, CLValueError> {\n        let key_cl_value = CLValue::from_t(named_key)?;\n        let string_cl_value = CLValue::from_t(name)?;\n        Ok(Self::new(key_cl_value, string_cl_value))\n    }\n\n    /// Returns the [`Key`] as a CLValue.\n    pub fn get_key_as_cl_value(&self) -> &CLValue {\n        &self.named_key\n    }\n\n    /// Returns the [`String`] as a CLValue.\n    pub fn get_name_as_cl_value(&self) -> &CLValue {\n        &self.name\n    }\n\n    /// Returns the concrete `Key` value\n    pub fn get_key(&self) -> Result<Key, CLValueError> {\n        self.named_key.clone().into_t::<Key>()\n    }\n\n    /// Returns the concrete `String` value\n    pub fn get_name(&self) -> Result<String, CLValueError> {\n        self.name.clone().into_t::<String>()\n    }\n}\n\nimpl ToBytes for NamedKeyValue {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.named_key.to_bytes()?);\n        buffer.append(&mut self.name.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.named_key.serialized_length() + self.name.serialized_length()\n    }\n}\n\nimpl FromBytes for NamedKeyValue {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (named_key, remainder) = CLValue::from_bytes(bytes)?;\n        let (name, remainder) = CLValue::from_bytes(remainder)?;\n        Ok((Self { named_key, name }, remainder))\n    }\n}\n\n/// Collection of named message topics.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(transparent, deny_unknown_fields)]\npub struct MessageTopics(\n    #[serde(with = \"BTreeMapToArray::<String, TopicNameHash, MessageTopicLabels>\")]\n    BTreeMap<String, TopicNameHash>,\n);\n\nimpl ToBytes for MessageTopics {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for MessageTopics {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (message_topics_map, remainder) = BTreeMap::<String, TopicNameHash>::from_bytes(bytes)?;\n        Ok((MessageTopics(message_topics_map), remainder))\n    }\n}\n\nimpl MessageTopics {\n    /// Adds new message topic by topic name.\n    pub fn add_topic(\n        &mut self,\n        topic_name: &str,\n        topic_name_hash: TopicNameHash,\n    ) -> Result<(), MessageTopicError> {\n        match self.0.entry(topic_name.to_string()) {\n            Entry::Vacant(entry) => {\n                entry.insert(topic_name_hash);\n                Ok(())\n            }\n            Entry::Occupied(_) => Err(MessageTopicError::DuplicateTopic),\n        }\n    }\n\n    /// Checks if given topic name exists.\n    pub fn has_topic(&self, topic_name: &str) -> bool {\n        self.0.contains_key(topic_name)\n    }\n\n    /// Gets the topic hash from the collection by its topic name.\n    pub fn get(&self, topic_name: &str) -> Option<&TopicNameHash> {\n        self.0.get(topic_name)\n    }\n\n    /// Returns the length of the message topics.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns true if no message topics are registered.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Returns an iterator over the topic name and its hash.\n    pub fn iter(&self) -> impl Iterator<Item = (&String, &TopicNameHash)> {\n        self.0.iter()\n    }\n}\n\nstruct MessageTopicLabels;\n\nimpl KeyValueLabels for MessageTopicLabels {\n    const KEY: &'static str = \"topic_name\";\n    const VALUE: &'static str = \"topic_name_hash\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for MessageTopicLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"MessageTopic\");\n}\n\nimpl From<BTreeMap<String, TopicNameHash>> for MessageTopics {\n    fn from(topics: BTreeMap<String, TopicNameHash>) -> MessageTopics {\n        MessageTopics(topics)\n    }\n}\n\n/// Errors that can occur while adding a new topic.\n#[derive(PartialEq, Eq, Debug, Clone)]\n#[non_exhaustive]\npub enum MessageTopicError {\n    /// Topic already exists.\n    DuplicateTopic,\n    /// Maximum number of topics exceeded.\n    MaxTopicsExceeded,\n    /// Topic name size exceeded.\n    TopicNameSizeExceeded,\n}\n\n#[cfg(feature = \"json-schema\")]\nstatic ADDRESSABLE_ENTITY: Lazy<AddressableEntity> = Lazy::new(|| {\n    let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap();\n    let account_hash = PublicKey::from(&secret_key).to_account_hash();\n    let package_hash = PackageHash::new([0; 32]);\n    let byte_code_hash = ByteCodeHash::new([0; 32]);\n    let main_purse = URef::from_formatted_str(\n        \"uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007\",\n    )\n    .unwrap();\n    let weight = Weight::new(1);\n    let associated_keys = AssociatedKeys::new(account_hash, weight);\n    let action_thresholds = ActionThresholds::new(weight, weight, weight).unwrap();\n    let protocol_version = ProtocolVersion::from_parts(2, 0, 0);\n    AddressableEntity {\n        protocol_version,\n        entity_kind: EntityKind::Account(account_hash),\n        package_hash,\n        byte_code_hash,\n        main_purse,\n        associated_keys,\n        action_thresholds,\n    }\n});\n\n/// The address for an AddressableEntity which contains the 32 bytes and tagging information.\npub type ContractAddress = PackageHash;\n\n/// Methods and type signatures supported by a contract.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct AddressableEntity {\n    protocol_version: ProtocolVersion,\n    entity_kind: EntityKind,\n    package_hash: PackageHash,\n    byte_code_hash: ByteCodeHash,\n    main_purse: URef,\n\n    associated_keys: AssociatedKeys,\n    action_thresholds: ActionThresholds,\n}\n\nimpl From<AddressableEntity>\n    for (\n        PackageHash,\n        ByteCodeHash,\n        ProtocolVersion,\n        URef,\n        AssociatedKeys,\n        ActionThresholds,\n    )\n{\n    fn from(entity: AddressableEntity) -> Self {\n        (\n            entity.package_hash,\n            entity.byte_code_hash,\n            entity.protocol_version,\n            entity.main_purse,\n            entity.associated_keys,\n            entity.action_thresholds,\n        )\n    }\n}\n\nimpl AddressableEntity {\n    /// `AddressableEntity` constructor.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        package_hash: PackageHash,\n        byte_code_hash: ByteCodeHash,\n        protocol_version: ProtocolVersion,\n        main_purse: URef,\n        associated_keys: AssociatedKeys,\n        action_thresholds: ActionThresholds,\n        entity_kind: EntityKind,\n    ) -> Self {\n        AddressableEntity {\n            package_hash,\n            byte_code_hash,\n            protocol_version,\n            main_purse,\n            action_thresholds,\n            associated_keys,\n            entity_kind,\n        }\n    }\n\n    /// Get the entity addr for this entity from the corresponding hash.\n    pub fn entity_addr(&self, entity_hash: AddressableEntityHash) -> EntityAddr {\n        let hash_addr = entity_hash.value();\n        match self.entity_kind {\n            EntityKind::System(_) => EntityAddr::new_system(hash_addr),\n            EntityKind::Account(_) => EntityAddr::new_account(hash_addr),\n            EntityKind::SmartContract(_) => EntityAddr::new_smart_contract(hash_addr),\n        }\n    }\n\n    pub fn entity_kind(&self) -> EntityKind {\n        self.entity_kind\n    }\n\n    /// Hash for accessing contract package\n    pub fn package_hash(&self) -> PackageHash {\n        self.package_hash\n    }\n\n    /// Hash for accessing contract WASM\n    pub fn byte_code_hash(&self) -> ByteCodeHash {\n        self.byte_code_hash\n    }\n\n    /// Get the protocol version this header is targeting.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns main purse.\n    pub fn main_purse(&self) -> URef {\n        self.main_purse\n    }\n\n    /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`].\n    pub fn main_purse_add_only(&self) -> URef {\n        URef::new(self.main_purse.addr(), AccessRights::ADD)\n    }\n\n    /// Returns associated keys.\n    pub fn associated_keys(&self) -> &AssociatedKeys {\n        &self.associated_keys\n    }\n\n    /// Returns action thresholds.\n    pub fn action_thresholds(&self) -> &ActionThresholds {\n        &self.action_thresholds\n    }\n\n    /// Adds an associated key to an addressable entity.\n    pub fn add_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n        weight: Weight,\n    ) -> Result<(), AddKeyFailure> {\n        self.associated_keys.add_key(account_hash, weight)\n    }\n\n    /// Checks if removing given key would properly satisfy thresholds.\n    fn can_remove_key(&self, account_hash: AccountHash) -> bool {\n        let total_weight_without = self\n            .associated_keys\n            .total_keys_weight_excluding(account_hash);\n\n        // Returns true if the total weight calculated without given public key would be greater or\n        // equal to all of the thresholds.\n        total_weight_without >= *self.action_thresholds().deployment()\n            && total_weight_without >= *self.action_thresholds().key_management()\n    }\n\n    /// Checks if adding a weight to a sum of all weights excluding the given key would make the\n    /// resulting value to fall below any of the thresholds on account.\n    fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool {\n        // Calculates total weight of all keys excluding the given key\n        let total_weight = self\n            .associated_keys\n            .total_keys_weight_excluding(account_hash);\n\n        // Safely calculate new weight by adding the updated weight\n        let new_weight = total_weight.value().saturating_add(weight.value());\n\n        // Returns true if the new weight would be greater or equal to all of\n        // the thresholds.\n        new_weight >= self.action_thresholds().deployment().value()\n            && new_weight >= self.action_thresholds().key_management().value()\n    }\n\n    /// Removes an associated key from an addressable entity.\n    ///\n    /// Verifies that removing the key will not cause the remaining weight to fall below any action\n    /// thresholds.\n    pub fn remove_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n    ) -> Result<(), RemoveKeyFailure> {\n        if self.associated_keys.contains_key(&account_hash) {\n            // Check if removing this weight would fall below thresholds\n            if !self.can_remove_key(account_hash) {\n                return Err(RemoveKeyFailure::ThresholdViolation);\n            }\n        }\n        self.associated_keys.remove_key(&account_hash)\n    }\n\n    /// Updates an associated key.\n    ///\n    /// Returns an error if the update would result in a violation of the key management thresholds.\n    pub fn update_associated_key(\n        &mut self,\n        account_hash: AccountHash,\n        weight: Weight,\n    ) -> Result<(), UpdateKeyFailure> {\n        if let Some(current_weight) = self.associated_keys.get(&account_hash) {\n            if weight < *current_weight {\n                // New weight is smaller than current weight\n                if !self.can_update_key(account_hash, weight) {\n                    return Err(UpdateKeyFailure::ThresholdViolation);\n                }\n            }\n        }\n        self.associated_keys.update_key(account_hash, weight)\n    }\n\n    /// Sets new action threshold for a given action type for the addressable entity.\n    ///\n    /// Returns an error if the new action threshold weight is greater than the total weight of the\n    /// account's associated keys.\n    pub fn set_action_threshold(\n        &mut self,\n        action_type: ActionType,\n        weight: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        // Verify if new threshold weight exceeds total weight of all associated\n        // keys.\n        self.can_set_threshold(weight)?;\n        // Set new weight for given action\n        self.action_thresholds.set_threshold(action_type, weight)\n    }\n\n    /// Sets a new action threshold for a given action type for the account without checking against\n    /// the total weight of the associated keys.\n    ///\n    /// This should only be called when authorized by an administrator account.\n    ///\n    /// Returns an error if setting the action would cause the `ActionType::Deployment` threshold to\n    /// be greater than any of the other action types.\n    pub fn set_action_threshold_unchecked(\n        &mut self,\n        action_type: ActionType,\n        threshold: Weight,\n    ) -> Result<(), SetThresholdFailure> {\n        self.action_thresholds.set_threshold(action_type, threshold)\n    }\n\n    /// Verifies if user can set action threshold.\n    pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> {\n        let total_weight = self.associated_keys.total_keys_weight();\n        if new_threshold > total_weight {\n            return Err(SetThresholdFailure::InsufficientTotalWeight);\n        }\n        Ok(())\n    }\n\n    /// Checks whether all authorization keys are associated with this addressable entity.\n    pub fn can_authorize(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        !authorization_keys.is_empty()\n            && authorization_keys\n                .iter()\n                .any(|e| self.associated_keys.contains_key(e))\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to deploy threshold.\n    pub fn can_deploy_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        total_weight >= *self.action_thresholds().deployment()\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to key management threshold.\n    pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        total_weight >= *self.action_thresholds().key_management()\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to upgrade management threshold.\n    pub fn can_upgrade_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        total_weight >= *self.action_thresholds().upgrade_management()\n    }\n\n    /// Addr for accessing wasm bytes\n    pub fn byte_code_addr(&self) -> HashAddr {\n        self.byte_code_hash.value()\n    }\n\n    /// Set protocol_version.\n    pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) {\n        self.protocol_version = protocol_version;\n    }\n\n    /// Determines if `AddressableEntity` is compatible with a given `ProtocolVersion`.\n    pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool {\n        let entity_protocol_version = self.protocol_version.value();\n        let context_protocol_version = protocol_version.value();\n        if entity_protocol_version.major == context_protocol_version.major {\n            return true;\n        }\n        if entity_protocol_version.major == 1 && context_protocol_version.major == 2 {\n            // the 1.x model has been deprecated but is still supported until 3.0.0\n            return true;\n        }\n        false\n    }\n\n    /// Returns the kind of `AddressableEntity`.\n    pub fn kind(&self) -> EntityKind {\n        self.entity_kind\n    }\n\n    /// Is this an account?\n    pub fn is_account_kind(&self) -> bool {\n        matches!(self.entity_kind, EntityKind::Account(_))\n    }\n\n    /// Key for the addressable entity\n    pub fn entity_key(&self, entity_hash: AddressableEntityHash) -> Key {\n        match self.entity_kind {\n            EntityKind::System(_) => {\n                Key::addressable_entity_key(EntityKindTag::System, entity_hash)\n            }\n            EntityKind::Account(_) => {\n                Key::addressable_entity_key(EntityKindTag::Account, entity_hash)\n            }\n            EntityKind::SmartContract(_) => {\n                Key::addressable_entity_key(EntityKindTag::SmartContract, entity_hash)\n            }\n        }\n    }\n\n    /// Extracts the access rights from the named keys of the addressable entity.\n    pub fn extract_access_rights(\n        &self,\n        entity_hash: AddressableEntityHash,\n        named_keys: &NamedKeys,\n    ) -> ContextAccessRights {\n        let urefs_iter = named_keys\n            .keys()\n            .filter_map(|key| key.as_uref().copied())\n            .chain(iter::once(self.main_purse));\n        ContextAccessRights::new(entity_hash.value(), urefs_iter)\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ADDRESSABLE_ENTITY\n    }\n}\n\nimpl ToBytes for AddressableEntity {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.package_hash().write_bytes(&mut result)?;\n        self.byte_code_hash().write_bytes(&mut result)?;\n        self.protocol_version().write_bytes(&mut result)?;\n        self.main_purse().write_bytes(&mut result)?;\n        self.associated_keys().write_bytes(&mut result)?;\n        self.action_thresholds().write_bytes(&mut result)?;\n        self.kind().write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        ToBytes::serialized_length(&self.package_hash)\n            + ToBytes::serialized_length(&self.byte_code_hash)\n            + ToBytes::serialized_length(&self.protocol_version)\n            + ToBytes::serialized_length(&self.main_purse)\n            + ToBytes::serialized_length(&self.associated_keys)\n            + ToBytes::serialized_length(&self.action_thresholds)\n            + ToBytes::serialized_length(&self.entity_kind)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.package_hash().write_bytes(writer)?;\n        self.byte_code_hash().write_bytes(writer)?;\n        self.protocol_version().write_bytes(writer)?;\n        self.main_purse().write_bytes(writer)?;\n        self.associated_keys().write_bytes(writer)?;\n        self.action_thresholds().write_bytes(writer)?;\n        self.kind().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for AddressableEntity {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (package_hash, bytes) = PackageHash::from_bytes(bytes)?;\n        let (byte_code_hash, bytes) = ByteCodeHash::from_bytes(bytes)?;\n        let (protocol_version, bytes) = ProtocolVersion::from_bytes(bytes)?;\n        let (main_purse, bytes) = URef::from_bytes(bytes)?;\n        let (associated_keys, bytes) = AssociatedKeys::from_bytes(bytes)?;\n        let (action_thresholds, bytes) = ActionThresholds::from_bytes(bytes)?;\n        let (entity_kind, bytes) = EntityKind::from_bytes(bytes)?;\n        Ok((\n            AddressableEntity {\n                package_hash,\n                byte_code_hash,\n                protocol_version,\n                main_purse,\n                associated_keys,\n                action_thresholds,\n                entity_kind,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Default for AddressableEntity {\n    fn default() -> Self {\n        AddressableEntity {\n            byte_code_hash: [0; KEY_HASH_LENGTH].into(),\n            package_hash: [0; KEY_HASH_LENGTH].into(),\n            protocol_version: ProtocolVersion::V1_0_0,\n            main_purse: URef::default(),\n            action_thresholds: ActionThresholds::default(),\n            associated_keys: AssociatedKeys::default(),\n            entity_kind: EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        }\n    }\n}\n\nimpl From<Contract> for AddressableEntity {\n    fn from(value: Contract) -> Self {\n        AddressableEntity::new(\n            PackageHash::new(value.contract_package_hash().value()),\n            ByteCodeHash::new(value.contract_wasm_hash().value()),\n            value.protocol_version(),\n            URef::default(),\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        )\n    }\n}\n\nimpl From<Account> for AddressableEntity {\n    fn from(value: Account) -> Self {\n        AddressableEntity::new(\n            PackageHash::default(),\n            ByteCodeHash::new([0u8; 32]),\n            ProtocolVersion::default(),\n            value.main_purse(),\n            value.associated_keys().clone().into(),\n            value.action_thresholds().clone().into(),\n            EntityKind::Account(value.account_hash()),\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{AccessRights, URef, UREF_ADDR_LENGTH};\n\n    #[cfg(feature = \"json-schema\")]\n    use schemars::{gen::SchemaGenerator, schema::InstanceType};\n\n    #[test]\n    fn entity_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let entity_hash = HashAddr::try_from(&bytes[..]).expect(\"should create contract hash\");\n        let entity_hash = AddressableEntityHash::new(entity_hash);\n        assert_eq!(&bytes, &entity_hash.as_bytes());\n    }\n\n    #[test]\n    fn entity_hash_from_str() {\n        let entity_hash = AddressableEntityHash([3; 32]);\n        let encoded = entity_hash.to_formatted_string();\n        let decoded = AddressableEntityHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(entity_hash, decoded);\n\n        let invalid_prefix =\n            \"addressable-entity--0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AddressableEntityHash::from_formatted_str(invalid_prefix).is_err());\n\n        let short_addr =\n            \"addressable-entity-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AddressableEntityHash::from_formatted_str(short_addr).is_err());\n\n        let long_addr =\n            \"addressable-entity-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(AddressableEntityHash::from_formatted_str(long_addr).is_err());\n\n        let invalid_hex =\n            \"addressable-entity-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(AddressableEntityHash::from_formatted_str(invalid_hex).is_err());\n    }\n\n    #[test]\n    fn named_key_addr_from_str() {\n        let named_key_addr =\n            NamedKeyAddr::new_named_key_entry(EntityAddr::new_smart_contract([3; 32]), [4; 32]);\n        let encoded = named_key_addr.to_formatted_string();\n        let decoded = NamedKeyAddr::from_formatted_str(&encoded).unwrap();\n        assert_eq!(named_key_addr, decoded);\n    }\n\n    #[test]\n    fn entity_hash_serde_roundtrip() {\n        let entity_hash = AddressableEntityHash([255; 32]);\n        let serialized = bincode::serialize(&entity_hash).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(entity_hash, deserialized)\n    }\n\n    #[test]\n    fn entity_hash_json_roundtrip() {\n        let entity_hash = AddressableEntityHash([255; 32]);\n        let json_string = serde_json::to_string_pretty(&entity_hash).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(entity_hash, decoded)\n    }\n\n    #[test]\n    fn entity_addr_formatted_string_roundtrip() {\n        let entity_addr = EntityAddr::Account([5; 32]);\n        let encoded = entity_addr.to_formatted_string();\n        let decoded = EntityAddr::from_formatted_str(&encoded).expect(\"must get entity addr\");\n        assert_eq!(decoded, entity_addr);\n\n        let entity_addr = EntityAddr::SmartContract([5; 32]);\n        let encoded = entity_addr.to_formatted_string();\n        let decoded = EntityAddr::from_formatted_str(&encoded).expect(\"must get entity addr\");\n        assert_eq!(decoded, entity_addr);\n\n        let entity_addr = EntityAddr::System([5; 32]);\n        let encoded = entity_addr.to_formatted_string();\n        let decoded = EntityAddr::from_formatted_str(&encoded).expect(\"must get entity addr\");\n        assert_eq!(decoded, entity_addr);\n    }\n\n    #[test]\n    fn entity_addr_serialization_roundtrip() {\n        for addr in [\n            EntityAddr::new_system([1; 32]),\n            EntityAddr::new_account([1; 32]),\n            EntityAddr::new_smart_contract([1; 32]),\n        ] {\n            bytesrepr::test_serialization_roundtrip(&addr);\n        }\n    }\n\n    #[test]\n    fn entity_addr_serde_roundtrip() {\n        for addr in [\n            EntityAddr::new_system([1; 32]),\n            EntityAddr::new_account([1; 32]),\n            EntityAddr::new_smart_contract([1; 32]),\n        ] {\n            let serialized = bincode::serialize(&addr).unwrap();\n            let deserialized = bincode::deserialize(&serialized).unwrap();\n            assert_eq!(addr, deserialized)\n        }\n    }\n\n    #[test]\n    fn entity_addr_json_roundtrip() {\n        for addr in [\n            EntityAddr::new_system([1; 32]),\n            EntityAddr::new_account([1; 32]),\n            EntityAddr::new_smart_contract([1; 32]),\n        ] {\n            let json_string = serde_json::to_string_pretty(&addr).unwrap();\n            let decoded = serde_json::from_str(&json_string).unwrap();\n            assert_eq!(addr, decoded)\n        }\n    }\n\n    #[cfg(feature = \"json-schema\")]\n    #[test]\n    fn entity_addr_schema() {\n        let mut gen = SchemaGenerator::default();\n        let any_of = EntityAddr::json_schema(&mut gen)\n            .into_object()\n            .subschemas\n            .expect(\"should have subschemas\")\n            .any_of\n            .expect(\"should have any_of\");\n        for elem in any_of {\n            let schema = elem\n                .into_object()\n                .instance_type\n                .expect(\"should have instance type\");\n            assert!(schema.contains(&InstanceType::String), \"{:?}\", schema);\n        }\n    }\n\n    #[test]\n    fn should_extract_access_rights() {\n        const MAIN_PURSE: URef = URef::new([2; 32], AccessRights::READ_ADD_WRITE);\n\n        let entity_hash = AddressableEntityHash([255; 32]);\n        let uref = URef::new([84; UREF_ADDR_LENGTH], AccessRights::READ_ADD);\n        let uref_r = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ);\n        let uref_a = URef::new([42; UREF_ADDR_LENGTH], AccessRights::ADD);\n        let uref_w = URef::new([42; UREF_ADDR_LENGTH], AccessRights::WRITE);\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"a\".to_string(), Key::URef(uref_r));\n        named_keys.insert(\"b\".to_string(), Key::URef(uref_a));\n        named_keys.insert(\"c\".to_string(), Key::URef(uref_w));\n        named_keys.insert(\"d\".to_string(), Key::URef(uref));\n        let associated_keys = AssociatedKeys::new(AccountHash::new([254; 32]), Weight::new(1));\n        let contract = AddressableEntity::new(\n            PackageHash::new([254; 32]),\n            ByteCodeHash::new([253; 32]),\n            ProtocolVersion::V1_0_0,\n            MAIN_PURSE,\n            associated_keys,\n            ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(1))\n                .expect(\"should create thresholds\"),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        );\n        let access_rights = contract.extract_access_rights(entity_hash, &named_keys);\n        let expected_uref = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE);\n        assert!(\n            access_rights.has_access_rights_to_uref(&uref),\n            \"urefs in named keys should be included in access rights\"\n        );\n        assert!(\n            access_rights.has_access_rights_to_uref(&expected_uref),\n            \"multiple access right bits to the same uref should coalesce\"\n        );\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_contract(contract in gens::addressable_entity_arb()) {\n            bytesrepr::test_serialization_roundtrip(&contract);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/api_error.rs",
    "content": "//! Contains [`ApiError`] and associated helper functions.\n\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Debug, Formatter},\n};\n\nuse crate::{\n    account::{\n        AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, TryFromIntError, UpdateKeyFailure,\n    },\n    addressable_entity::{self, MessageTopicError, TryFromSliceForAccountHashError},\n    bytesrepr, contracts,\n    system::{auction, handle_payment, mint},\n    CLValueError,\n};\n\n/// All `Error` variants defined in this library other than `Error::User` will convert to a `u32`\n/// value less than or equal to `RESERVED_ERROR_MAX`.\nconst RESERVED_ERROR_MAX: u32 = u16::MAX as u32; // 0..=65535\n\n/// Handle Payment errors will have this value added to them when being converted to a `u32`.\nconst POS_ERROR_OFFSET: u32 = RESERVED_ERROR_MAX - u8::MAX as u32; // 65280..=65535\n\n/// Mint errors will have this value added to them when being converted to a `u32`.\nconst MINT_ERROR_OFFSET: u32 = (POS_ERROR_OFFSET - 1) - u8::MAX as u32; // 65024..=65279\n\n/// Contract header errors will have this value added to them when being converted to a `u32`.\nconst HEADER_ERROR_OFFSET: u32 = (MINT_ERROR_OFFSET - 1) - u8::MAX as u32; // 64768..=65023\n\n/// Contract header errors will have this value added to them when being converted to a `u32`.\nconst AUCTION_ERROR_OFFSET: u32 = (HEADER_ERROR_OFFSET - 1) - u8::MAX as u32; // 64512..=64767\n\n/// Minimum value of user error's inclusive range.\nconst USER_ERROR_MIN: u32 = RESERVED_ERROR_MAX + 1;\n\n/// Maximum value of user error's inclusive range.\nconst USER_ERROR_MAX: u32 = 2 * RESERVED_ERROR_MAX + 1;\n\n/// Minimum value of Mint error's inclusive range.\nconst MINT_ERROR_MIN: u32 = MINT_ERROR_OFFSET;\n\n/// Maximum value of Mint error's inclusive range.\nconst MINT_ERROR_MAX: u32 = POS_ERROR_OFFSET - 1;\n\n/// Minimum value of Handle Payment error's inclusive range.\nconst HP_ERROR_MIN: u32 = POS_ERROR_OFFSET;\n\n/// Maximum value of Handle Payment error's inclusive range.\nconst HP_ERROR_MAX: u32 = RESERVED_ERROR_MAX;\n\n/// Minimum value of contract header error's inclusive range.\nconst HEADER_ERROR_MIN: u32 = HEADER_ERROR_OFFSET;\n\n/// Maximum value of contract header error's inclusive range.\nconst HEADER_ERROR_MAX: u32 = HEADER_ERROR_OFFSET + u8::MAX as u32;\n\n/// Minimum value of an auction contract error's inclusive range.\nconst AUCTION_ERROR_MIN: u32 = AUCTION_ERROR_OFFSET;\n\n/// Maximum value of an auction contract error's inclusive range.\nconst AUCTION_ERROR_MAX: u32 = AUCTION_ERROR_OFFSET + u8::MAX as u32;\n\n/// Errors which can be encountered while running a smart contract.\n///\n/// An `ApiError` can be converted to a `u32` in order to be passed via the execution engine's\n/// `ext_ffi::casper_revert()` function.  This means the information each variant can convey is\n/// limited.\n///\n/// The variants are split into numeric ranges as follows:\n///\n/// | Inclusive range | Variant(s)                                                      |\n/// | ----------------| ----------------------------------------------------------------|\n/// | [1, 64511]      | all except reserved system contract error ranges defined below. |\n/// | [64512, 64767]  | `Auction`                                                       |\n/// | [64768, 65023]  | `ContractHeader`                                                |\n/// | [65024, 65279]  | `Mint`                                                          |\n/// | [65280, 65535]  | `HandlePayment`                                                 |\n/// | [65536, 131071] | `User`                                                          |\n///\n/// Users can specify a C-style enum and implement `From` to ease usage of\n/// `casper_contract::runtime::revert()`, e.g.\n/// ```\n/// use casper_types::ApiError;\n///\n/// #[repr(u16)]\n/// enum FailureCode {\n///     Zero = 0,  // 65,536 as an ApiError::User\n///     One,       // 65,537 as an ApiError::User\n///     Two        // 65,538 as an ApiError::User\n/// }\n///\n/// impl From<FailureCode> for ApiError {\n///     fn from(code: FailureCode) -> Self {\n///         ApiError::User(code as u16)\n///     }\n/// }\n///\n/// assert_eq!(ApiError::User(1), FailureCode::One.into());\n/// assert_eq!(65_536, u32::from(ApiError::from(FailureCode::Zero)));\n/// assert_eq!(65_538, u32::from(ApiError::from(FailureCode::Two)));\n/// ```\n#[derive(Copy, Clone, PartialEq, Eq)]\n#[non_exhaustive]\npub enum ApiError {\n    /// Optional data was unexpectedly `None`.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(1), ApiError::None);\n    /// ```\n    None,\n    /// Specified argument not provided.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(2), ApiError::MissingArgument);\n    /// ```\n    MissingArgument,\n    /// Argument not of correct type.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(3), ApiError::InvalidArgument);\n    /// ```\n    InvalidArgument,\n    /// Failed to deserialize a value.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(4), ApiError::Deserialize);\n    /// ```\n    Deserialize,\n    /// `casper_contract::storage::read()` returned an error.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(5), ApiError::Read);\n    /// ```\n    Read,\n    /// The given key returned a `None` value.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(6), ApiError::ValueNotFound);\n    /// ```\n    ValueNotFound,\n    /// Failed to find a specified contract.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(7), ApiError::ContractNotFound);\n    /// ```\n    ContractNotFound,\n    /// A call to `casper_contract::runtime::get_key()` returned a failure.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(8), ApiError::GetKey);\n    /// ```\n    GetKey,\n    /// The [`Key`](crate::Key) variant was not as expected.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(9), ApiError::UnexpectedKeyVariant);\n    /// ```\n    UnexpectedKeyVariant,\n    /// Unsupported contract discovery variant.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(10), ApiError::UnexpectedContractRefVariant);\n    /// ```\n    UnexpectedContractRefVariant,\n    /// Invalid purse name given.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(11), ApiError::InvalidPurseName);\n    /// ```\n    InvalidPurseName,\n    /// Invalid purse retrieved.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(12), ApiError::InvalidPurse);\n    /// ```\n    InvalidPurse,\n    /// Failed to upgrade contract at [`URef`](crate::URef).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(13), ApiError::UpgradeContractAtURef);\n    /// ```\n    UpgradeContractAtURef,\n    /// Failed to transfer motes.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(14), ApiError::Transfer);\n    /// ```\n    Transfer,\n    /// The given [`URef`](crate::URef) has no access rights.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(15), ApiError::NoAccessRights);\n    /// ```\n    NoAccessRights,\n    /// A given type could not be constructed from a [`CLValue`](crate::CLValue).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(16), ApiError::CLTypeMismatch);\n    /// ```\n    CLTypeMismatch,\n    /// Early end of stream while deserializing.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(17), ApiError::EarlyEndOfStream);\n    /// ```\n    EarlyEndOfStream,\n    /// Formatting error while deserializing.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(18), ApiError::Formatting);\n    /// ```\n    Formatting,\n    /// Not all input bytes were consumed in [`deserialize`](crate::bytesrepr::deserialize).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(19), ApiError::LeftOverBytes);\n    /// ```\n    LeftOverBytes,\n    /// Out of memory error.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(20), ApiError::OutOfMemory);\n    /// ```\n    OutOfMemory,\n    /// There are already maximum [`AccountHash`](crate::account::AccountHash)s associated with the\n    /// given account.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(21), ApiError::MaxKeysLimit);\n    /// ```\n    MaxKeysLimit,\n    /// The given [`AccountHash`](crate::account::AccountHash) is already associated with the given\n    /// account.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(22), ApiError::DuplicateKey);\n    /// ```\n    DuplicateKey,\n    /// Caller doesn't have sufficient permissions to perform the given action.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(23), ApiError::PermissionDenied);\n    /// ```\n    PermissionDenied,\n    /// The given [`AccountHash`](crate::account::AccountHash) is not associated with the given\n    /// account.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(24), ApiError::MissingKey);\n    /// ```\n    MissingKey,\n    /// Removing/updating the given associated [`AccountHash`](crate::account::AccountHash) would\n    /// cause the total [`Weight`](addressable_entity::Weight) of all remaining `AccountHash`s to\n    /// fall below one of the action thresholds for the given account.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(25), ApiError::ThresholdViolation);\n    /// ```\n    ThresholdViolation,\n    /// Setting the key-management threshold to a value lower than the deployment threshold is\n    /// disallowed.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(26), ApiError::KeyManagementThreshold);\n    /// ```\n    KeyManagementThreshold,\n    /// Setting the deployment threshold to a value greater than any other threshold is disallowed.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(27), ApiError::DeploymentThreshold);\n    /// ```\n    DeploymentThreshold,\n    /// Setting a threshold to a value greater than the total weight of associated keys is\n    /// disallowed.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(28), ApiError::InsufficientTotalWeight);\n    /// ```\n    InsufficientTotalWeight,\n    /// The given `u32` doesn't map to a [`SystemContractType`](crate::system::SystemEntityType).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(29), ApiError::InvalidSystemContract);\n    /// ```\n    InvalidSystemContract,\n    /// Failed to create a new purse.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(30), ApiError::PurseNotCreated);\n    /// ```\n    PurseNotCreated,\n    /// An unhandled value, likely representing a bug in the code.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(31), ApiError::Unhandled);\n    /// ```\n    Unhandled,\n    /// The provided buffer is too small to complete an operation.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(32), ApiError::BufferTooSmall);\n    /// ```\n    BufferTooSmall,\n    /// No data available in the host buffer.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(33), ApiError::HostBufferEmpty);\n    /// ```\n    HostBufferEmpty,\n    /// The host buffer has been set to a value and should be consumed first by a read operation.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(34), ApiError::HostBufferFull);\n    /// ```\n    HostBufferFull,\n    /// Could not lay out an array in memory\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(35), ApiError::AllocLayout);\n    /// ```\n    AllocLayout,\n    /// The `dictionary_item_key` length exceeds the maximum length.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(36), ApiError::DictionaryItemKeyExceedsLength);\n    /// ```\n    DictionaryItemKeyExceedsLength,\n    /// The `dictionary_item_key` is invalid.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(37), ApiError::InvalidDictionaryItemKey);\n    /// ```\n    InvalidDictionaryItemKey,\n    /// Unable to retrieve the requested system contract hash.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(38), ApiError::MissingSystemContractHash);\n    /// ```\n    MissingSystemContractHash,\n    /// Exceeded a recursion depth limit.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(39), ApiError::ExceededRecursionDepth);\n    /// ```\n    ExceededRecursionDepth,\n    /// Attempt to serialize a value that does not have a serialized representation.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(40), ApiError::NonRepresentableSerialization);\n    /// ```\n    NonRepresentableSerialization,\n    /// Error specific to Auction contract. See\n    /// [casper_types::system::auction::Error](crate::system::auction::Error).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// for code in 64512..=64767 {\n    ///     assert!(matches!(ApiError::from(code), ApiError::AuctionError(_auction_error)));\n    /// }\n    /// ```\n    AuctionError(u8),\n    /// Contract header errors. See\n    /// [casper_types::contracts::Error](crate::addressable_entity::Error).\n    ///\n    /// ```\n    /// # use casper_types::ApiError;\n    /// for code in 64768..=65023 {\n    ///     assert!(matches!(ApiError::from(code), ApiError::ContractHeader(_contract_header_error)));\n    /// }\n    /// ```\n    ContractHeader(u8),\n    /// Error specific to Mint contract. See\n    /// [casper_types::system::mint::Error](crate::system::mint::Error).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// for code in 65024..=65279 {\n    ///     assert!(matches!(ApiError::from(code), ApiError::Mint(_mint_error)));\n    /// }\n    /// ```\n    Mint(u8),\n    /// Error specific to Handle Payment contract. See\n    /// [casper_types::system::handle_payment](crate::system::handle_payment::Error).\n    /// ```\n    /// # use casper_types::ApiError;\n    /// for code in 65280..=65535 {\n    ///     assert!(matches!(ApiError::from(code), ApiError::HandlePayment(_handle_payment_error)));\n    /// }\n    /// ```\n    HandlePayment(u8),\n    /// User-specified error code.  The internal `u16` value is added to `u16::MAX as u32 + 1` when\n    /// an `Error::User` is converted to a `u32`.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// for code in 65536..131071 {\n    ///     assert!(matches!(ApiError::from(code), ApiError::User(_)));\n    /// }\n    /// ```\n    User(u16),\n    /// The message topic is already registered.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(41), ApiError::MessageTopicAlreadyRegistered);\n    /// ```\n    MessageTopicAlreadyRegistered,\n    /// The maximum number of allowed message topics was exceeded.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(42), ApiError::MaxTopicsNumberExceeded);\n    /// ```\n    MaxTopicsNumberExceeded,\n    /// The maximum size for the topic name was exceeded.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(43), ApiError::MaxTopicNameSizeExceeded);\n    /// ```\n    MaxTopicNameSizeExceeded,\n    /// The message topic is not registered.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(44), ApiError::MessageTopicNotRegistered);\n    /// ```\n    MessageTopicNotRegistered,\n    /// The message topic is full and cannot accept new messages.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(45), ApiError::MessageTopicFull);\n    /// ```\n    MessageTopicFull,\n    /// The message topic is full and cannot accept new messages.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(46), ApiError::MessageTooLarge);\n    /// ```\n    MessageTooLarge,\n    /// The maximum number of messages emitted per block was exceeded when trying to emit a\n    /// message.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(47), ApiError::MaxMessagesPerBlockExceeded);\n    /// ```\n    MaxMessagesPerBlockExceeded,\n    /// Attempt to call FFI function `casper_add_contract_version()` from a transaction not defined\n    /// as an installer/upgrader.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(48), ApiError::NotAllowedToAddContractVersion);\n    /// ```\n    NotAllowedToAddContractVersion,\n    /// Invalid delegation amount limits.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(49), ApiError::InvalidDelegationAmountLimits);\n    /// ```\n    InvalidDelegationAmountLimits,\n    /// Invalid action for caller information.\n    /// ```\n    /// # use casper_types::ApiError;\n    /// assert_eq!(ApiError::from(50), ApiError::InvalidCallerInfoRequest);\n    /// ```\n    InvalidCallerInfoRequest,\n}\n\nimpl From<bytesrepr::Error> for ApiError {\n    fn from(error: bytesrepr::Error) -> Self {\n        match error {\n            bytesrepr::Error::EarlyEndOfStream => ApiError::EarlyEndOfStream,\n            bytesrepr::Error::Formatting => ApiError::Formatting,\n            bytesrepr::Error::LeftOverBytes => ApiError::LeftOverBytes,\n            bytesrepr::Error::OutOfMemory => ApiError::OutOfMemory,\n            bytesrepr::Error::NotRepresentable => ApiError::NonRepresentableSerialization,\n            bytesrepr::Error::ExceededRecursionDepth => ApiError::ExceededRecursionDepth,\n        }\n    }\n}\n\nimpl From<AddKeyFailure> for ApiError {\n    fn from(error: AddKeyFailure) -> Self {\n        match error {\n            AddKeyFailure::MaxKeysLimit => ApiError::MaxKeysLimit,\n            AddKeyFailure::DuplicateKey => ApiError::DuplicateKey,\n            AddKeyFailure::PermissionDenied => ApiError::PermissionDenied,\n        }\n    }\n}\n\nimpl From<UpdateKeyFailure> for ApiError {\n    fn from(error: UpdateKeyFailure) -> Self {\n        match error {\n            UpdateKeyFailure::MissingKey => ApiError::MissingKey,\n            UpdateKeyFailure::PermissionDenied => ApiError::PermissionDenied,\n            UpdateKeyFailure::ThresholdViolation => ApiError::ThresholdViolation,\n        }\n    }\n}\n\nimpl From<RemoveKeyFailure> for ApiError {\n    fn from(error: RemoveKeyFailure) -> Self {\n        match error {\n            RemoveKeyFailure::MissingKey => ApiError::MissingKey,\n            RemoveKeyFailure::PermissionDenied => ApiError::PermissionDenied,\n            RemoveKeyFailure::ThresholdViolation => ApiError::ThresholdViolation,\n        }\n    }\n}\n\nimpl From<SetThresholdFailure> for ApiError {\n    fn from(error: SetThresholdFailure) -> Self {\n        match error {\n            SetThresholdFailure::KeyManagementThreshold => ApiError::KeyManagementThreshold,\n            SetThresholdFailure::DeploymentThreshold => ApiError::DeploymentThreshold,\n            SetThresholdFailure::PermissionDeniedError => ApiError::PermissionDenied,\n            SetThresholdFailure::InsufficientTotalWeight => ApiError::InsufficientTotalWeight,\n        }\n    }\n}\n\nimpl From<CLValueError> for ApiError {\n    fn from(error: CLValueError) -> Self {\n        match error {\n            CLValueError::Serialization(bytesrepr_error) => bytesrepr_error.into(),\n            CLValueError::Type(_) => ApiError::CLTypeMismatch,\n        }\n    }\n}\n\nimpl From<addressable_entity::Error> for ApiError {\n    fn from(error: addressable_entity::Error) -> Self {\n        ApiError::ContractHeader(error as u8)\n    }\n}\n\nimpl From<contracts::Error> for ApiError {\n    fn from(error: contracts::Error) -> Self {\n        ApiError::ContractHeader(error as u8)\n    }\n}\n\nimpl From<auction::Error> for ApiError {\n    fn from(error: auction::Error) -> Self {\n        ApiError::AuctionError(error as u8)\n    }\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl From<TryFromIntError> for ApiError {\n    fn from(_error: TryFromIntError) -> Self {\n        ApiError::Unhandled\n    }\n}\n\nimpl From<TryFromSliceForAccountHashError> for ApiError {\n    fn from(_error: TryFromSliceForAccountHashError) -> Self {\n        ApiError::Deserialize\n    }\n}\n\nimpl From<mint::Error> for ApiError {\n    fn from(error: mint::Error) -> Self {\n        ApiError::Mint(error as u8)\n    }\n}\n\nimpl From<handle_payment::Error> for ApiError {\n    fn from(error: handle_payment::Error) -> Self {\n        ApiError::HandlePayment(error as u8)\n    }\n}\n\nimpl From<MessageTopicError> for ApiError {\n    fn from(error: MessageTopicError) -> Self {\n        match error {\n            MessageTopicError::DuplicateTopic => ApiError::MessageTopicAlreadyRegistered,\n            MessageTopicError::MaxTopicsExceeded => ApiError::MaxTopicsNumberExceeded,\n            MessageTopicError::TopicNameSizeExceeded => ApiError::MaxTopicNameSizeExceeded,\n        }\n    }\n}\n\nimpl From<ApiError> for u32 {\n    fn from(error: ApiError) -> Self {\n        match error {\n            ApiError::None => 1,\n            ApiError::MissingArgument => 2,\n            ApiError::InvalidArgument => 3,\n            ApiError::Deserialize => 4,\n            ApiError::Read => 5,\n            ApiError::ValueNotFound => 6,\n            ApiError::ContractNotFound => 7,\n            ApiError::GetKey => 8,\n            ApiError::UnexpectedKeyVariant => 9,\n            ApiError::UnexpectedContractRefVariant => 10,\n            ApiError::InvalidPurseName => 11,\n            ApiError::InvalidPurse => 12,\n            ApiError::UpgradeContractAtURef => 13,\n            ApiError::Transfer => 14,\n            ApiError::NoAccessRights => 15,\n            ApiError::CLTypeMismatch => 16,\n            ApiError::EarlyEndOfStream => 17,\n            ApiError::Formatting => 18,\n            ApiError::LeftOverBytes => 19,\n            ApiError::OutOfMemory => 20,\n            ApiError::MaxKeysLimit => 21,\n            ApiError::DuplicateKey => 22,\n            ApiError::PermissionDenied => 23,\n            ApiError::MissingKey => 24,\n            ApiError::ThresholdViolation => 25,\n            ApiError::KeyManagementThreshold => 26,\n            ApiError::DeploymentThreshold => 27,\n            ApiError::InsufficientTotalWeight => 28,\n            ApiError::InvalidSystemContract => 29,\n            ApiError::PurseNotCreated => 30,\n            ApiError::Unhandled => 31,\n            ApiError::BufferTooSmall => 32,\n            ApiError::HostBufferEmpty => 33,\n            ApiError::HostBufferFull => 34,\n            ApiError::AllocLayout => 35,\n            ApiError::DictionaryItemKeyExceedsLength => 36,\n            ApiError::InvalidDictionaryItemKey => 37,\n            ApiError::MissingSystemContractHash => 38,\n            ApiError::ExceededRecursionDepth => 39,\n            ApiError::NonRepresentableSerialization => 40,\n            ApiError::MessageTopicAlreadyRegistered => 41,\n            ApiError::MaxTopicsNumberExceeded => 42,\n            ApiError::MaxTopicNameSizeExceeded => 43,\n            ApiError::MessageTopicNotRegistered => 44,\n            ApiError::MessageTopicFull => 45,\n            ApiError::MessageTooLarge => 46,\n            ApiError::MaxMessagesPerBlockExceeded => 47,\n            ApiError::NotAllowedToAddContractVersion => 48,\n            ApiError::InvalidDelegationAmountLimits => 49,\n            ApiError::InvalidCallerInfoRequest => 50,\n            ApiError::AuctionError(value) => AUCTION_ERROR_OFFSET + u32::from(value),\n            ApiError::ContractHeader(value) => HEADER_ERROR_OFFSET + u32::from(value),\n            ApiError::Mint(value) => MINT_ERROR_OFFSET + u32::from(value),\n            ApiError::HandlePayment(value) => POS_ERROR_OFFSET + u32::from(value),\n            ApiError::User(value) => RESERVED_ERROR_MAX + 1 + u32::from(value),\n        }\n    }\n}\n\nimpl From<u32> for ApiError {\n    fn from(value: u32) -> ApiError {\n        match value {\n            1 => ApiError::None,\n            2 => ApiError::MissingArgument,\n            3 => ApiError::InvalidArgument,\n            4 => ApiError::Deserialize,\n            5 => ApiError::Read,\n            6 => ApiError::ValueNotFound,\n            7 => ApiError::ContractNotFound,\n            8 => ApiError::GetKey,\n            9 => ApiError::UnexpectedKeyVariant,\n            10 => ApiError::UnexpectedContractRefVariant,\n            11 => ApiError::InvalidPurseName,\n            12 => ApiError::InvalidPurse,\n            13 => ApiError::UpgradeContractAtURef,\n            14 => ApiError::Transfer,\n            15 => ApiError::NoAccessRights,\n            16 => ApiError::CLTypeMismatch,\n            17 => ApiError::EarlyEndOfStream,\n            18 => ApiError::Formatting,\n            19 => ApiError::LeftOverBytes,\n            20 => ApiError::OutOfMemory,\n            21 => ApiError::MaxKeysLimit,\n            22 => ApiError::DuplicateKey,\n            23 => ApiError::PermissionDenied,\n            24 => ApiError::MissingKey,\n            25 => ApiError::ThresholdViolation,\n            26 => ApiError::KeyManagementThreshold,\n            27 => ApiError::DeploymentThreshold,\n            28 => ApiError::InsufficientTotalWeight,\n            29 => ApiError::InvalidSystemContract,\n            30 => ApiError::PurseNotCreated,\n            31 => ApiError::Unhandled,\n            32 => ApiError::BufferTooSmall,\n            33 => ApiError::HostBufferEmpty,\n            34 => ApiError::HostBufferFull,\n            35 => ApiError::AllocLayout,\n            36 => ApiError::DictionaryItemKeyExceedsLength,\n            37 => ApiError::InvalidDictionaryItemKey,\n            38 => ApiError::MissingSystemContractHash,\n            39 => ApiError::ExceededRecursionDepth,\n            40 => ApiError::NonRepresentableSerialization,\n            41 => ApiError::MessageTopicAlreadyRegistered,\n            42 => ApiError::MaxTopicsNumberExceeded,\n            43 => ApiError::MaxTopicNameSizeExceeded,\n            44 => ApiError::MessageTopicNotRegistered,\n            45 => ApiError::MessageTopicFull,\n            46 => ApiError::MessageTooLarge,\n            47 => ApiError::MaxMessagesPerBlockExceeded,\n            48 => ApiError::NotAllowedToAddContractVersion,\n            49 => ApiError::InvalidDelegationAmountLimits,\n            50 => ApiError::InvalidCallerInfoRequest,\n            USER_ERROR_MIN..=USER_ERROR_MAX => ApiError::User(value as u16),\n            HP_ERROR_MIN..=HP_ERROR_MAX => ApiError::HandlePayment(value as u8),\n            MINT_ERROR_MIN..=MINT_ERROR_MAX => ApiError::Mint(value as u8),\n            HEADER_ERROR_MIN..=HEADER_ERROR_MAX => ApiError::ContractHeader(value as u8),\n            AUCTION_ERROR_MIN..=AUCTION_ERROR_MAX => ApiError::AuctionError(value as u8),\n            _ => ApiError::Unhandled,\n        }\n    }\n}\n\nimpl Debug for ApiError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            ApiError::None => write!(f, \"ApiError::None\")?,\n            ApiError::MissingArgument => write!(f, \"ApiError::MissingArgument\")?,\n            ApiError::InvalidArgument => write!(f, \"ApiError::InvalidArgument\")?,\n            ApiError::Deserialize => write!(f, \"ApiError::Deserialize\")?,\n            ApiError::Read => write!(f, \"ApiError::Read\")?,\n            ApiError::ValueNotFound => write!(f, \"ApiError::ValueNotFound\")?,\n            ApiError::ContractNotFound => write!(f, \"ApiError::ContractNotFound\")?,\n            ApiError::GetKey => write!(f, \"ApiError::GetKey\")?,\n            ApiError::UnexpectedKeyVariant => write!(f, \"ApiError::UnexpectedKeyVariant\")?,\n            ApiError::UnexpectedContractRefVariant => {\n                write!(f, \"ApiError::UnexpectedContractRefVariant\")?\n            }\n            ApiError::InvalidPurseName => write!(f, \"ApiError::InvalidPurseName\")?,\n            ApiError::InvalidPurse => write!(f, \"ApiError::InvalidPurse\")?,\n            ApiError::UpgradeContractAtURef => write!(f, \"ApiError::UpgradeContractAtURef\")?,\n            ApiError::Transfer => write!(f, \"ApiError::Transfer\")?,\n            ApiError::NoAccessRights => write!(f, \"ApiError::NoAccessRights\")?,\n            ApiError::CLTypeMismatch => write!(f, \"ApiError::CLTypeMismatch\")?,\n            ApiError::EarlyEndOfStream => write!(f, \"ApiError::EarlyEndOfStream\")?,\n            ApiError::Formatting => write!(f, \"ApiError::Formatting\")?,\n            ApiError::LeftOverBytes => write!(f, \"ApiError::LeftOverBytes\")?,\n            ApiError::OutOfMemory => write!(f, \"ApiError::OutOfMemory\")?,\n            ApiError::MaxKeysLimit => write!(f, \"ApiError::MaxKeysLimit\")?,\n            ApiError::DuplicateKey => write!(f, \"ApiError::DuplicateKey\")?,\n            ApiError::PermissionDenied => write!(f, \"ApiError::PermissionDenied\")?,\n            ApiError::MissingKey => write!(f, \"ApiError::MissingKey\")?,\n            ApiError::ThresholdViolation => write!(f, \"ApiError::ThresholdViolation\")?,\n            ApiError::KeyManagementThreshold => write!(f, \"ApiError::KeyManagementThreshold\")?,\n            ApiError::DeploymentThreshold => write!(f, \"ApiError::DeploymentThreshold\")?,\n            ApiError::InsufficientTotalWeight => write!(f, \"ApiError::InsufficientTotalWeight\")?,\n            ApiError::InvalidSystemContract => write!(f, \"ApiError::InvalidSystemContract\")?,\n            ApiError::PurseNotCreated => write!(f, \"ApiError::PurseNotCreated\")?,\n            ApiError::Unhandled => write!(f, \"ApiError::Unhandled\")?,\n            ApiError::BufferTooSmall => write!(f, \"ApiError::BufferTooSmall\")?,\n            ApiError::HostBufferEmpty => write!(f, \"ApiError::HostBufferEmpty\")?,\n            ApiError::HostBufferFull => write!(f, \"ApiError::HostBufferFull\")?,\n            ApiError::AllocLayout => write!(f, \"ApiError::AllocLayout\")?,\n            ApiError::DictionaryItemKeyExceedsLength => {\n                write!(f, \"ApiError::DictionaryItemKeyTooLarge\")?\n            }\n            ApiError::InvalidDictionaryItemKey => write!(f, \"ApiError::InvalidDictionaryItemKey\")?,\n            ApiError::MissingSystemContractHash => write!(f, \"ApiError::MissingContractHash\")?,\n            ApiError::NonRepresentableSerialization => {\n                write!(f, \"ApiError::NonRepresentableSerialization\")?\n            }\n            ApiError::MessageTopicAlreadyRegistered => {\n                write!(f, \"ApiError::MessageTopicAlreadyRegistered\")?\n            }\n            ApiError::MaxTopicsNumberExceeded => write!(f, \"ApiError::MaxTopicsNumberExceeded\")?,\n            ApiError::MaxTopicNameSizeExceeded => write!(f, \"ApiError::MaxTopicNameSizeExceeded\")?,\n            ApiError::MessageTopicNotRegistered => {\n                write!(f, \"ApiError::MessageTopicNotRegistered\")?\n            }\n            ApiError::MessageTopicFull => write!(f, \"ApiError::MessageTopicFull\")?,\n            ApiError::MessageTooLarge => write!(f, \"ApiError::MessageTooLarge\")?,\n            ApiError::MaxMessagesPerBlockExceeded => {\n                write!(f, \"ApiError::MaxMessagesPerBlockExceeded\")?\n            }\n            ApiError::NotAllowedToAddContractVersion => {\n                write!(f, \"ApiError::NotAllowedToAddContractVersion\")?\n            }\n            ApiError::InvalidDelegationAmountLimits => {\n                write!(f, \"ApiError::InvalidDelegationAmountLimits\")?\n            }\n            ApiError::InvalidCallerInfoRequest => write!(f, \"ApiError::InvalidCallerInfoRequest\")?,\n            ApiError::ExceededRecursionDepth => write!(f, \"ApiError::ExceededRecursionDepth\")?,\n            ApiError::AuctionError(value) => write!(\n                f,\n                \"ApiError::AuctionError({:?})\",\n                auction::Error::try_from(*value).map_err(|_err| fmt::Error)?\n            )?,\n            ApiError::ContractHeader(value) => write!(\n                f,\n                \"ApiError::ContractHeader({:?})\",\n                addressable_entity::Error::try_from(*value).map_err(|_err| fmt::Error)?\n            )?,\n            ApiError::Mint(value) => write!(\n                f,\n                \"ApiError::Mint({:?})\",\n                mint::Error::try_from(*value).map_err(|_err| fmt::Error)?\n            )?,\n            ApiError::HandlePayment(value) => write!(\n                f,\n                \"ApiError::HandlePayment({:?})\",\n                handle_payment::Error::try_from(*value).map_err(|_err| fmt::Error)?\n            )?,\n            ApiError::User(value) => write!(f, \"ApiError::User({})\", value)?,\n        }\n        write!(f, \" [{}]\", u32::from(*self))\n    }\n}\n\nimpl fmt::Display for ApiError {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match self {\n            ApiError::User(value) => write!(f, \"User error: {}\", value),\n            ApiError::ContractHeader(value) => write!(f, \"Contract header error: {}\", value),\n            ApiError::Mint(value) => write!(f, \"Mint error: {}\", value),\n            ApiError::HandlePayment(value) => write!(f, \"Handle Payment error: {}\", value),\n            _ => <Self as Debug>::fmt(self, f),\n        }\n    }\n}\n\n// This function is not intended to be used by third party crates.\n#[doc(hidden)]\npub fn i32_from<T>(result: Result<(), T>) -> i32\nwhere\n    ApiError: From<T>,\n{\n    match result {\n        Ok(()) => 0,\n        Err(error) => {\n            let api_error = ApiError::from(error);\n            u32::from(api_error) as i32\n        }\n    }\n}\n\n/// Converts an `i32` to a `Result<(), ApiError>`, where `0` represents `Ok(())`, and all other\n/// inputs are mapped to `Err(ApiError::<variant>)`.  The full list of mappings can be found in the\n/// [docs for `ApiError`](ApiError#mappings).\npub fn result_from(value: i32) -> Result<(), ApiError> {\n    match value {\n        0 => Ok(()),\n        _ => Err(ApiError::from(value as u32)),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    fn round_trip(result: Result<(), ApiError>) {\n        let code = i32_from(result);\n        assert_eq!(result, result_from(code));\n    }\n\n    #[test]\n    fn error_values() {\n        assert_eq!(65_024_u32, u32::from(ApiError::Mint(0))); // MINT_ERROR_OFFSET == 65,024\n        assert_eq!(65_279_u32, u32::from(ApiError::Mint(u8::MAX)));\n        assert_eq!(65_280_u32, u32::from(ApiError::HandlePayment(0))); // POS_ERROR_OFFSET == 65,280\n        assert_eq!(65_535_u32, u32::from(ApiError::HandlePayment(u8::MAX)));\n        assert_eq!(65_536_u32, u32::from(ApiError::User(0))); // u16::MAX + 1\n        assert_eq!(131_071_u32, u32::from(ApiError::User(u16::MAX))); // 2 * u16::MAX + 1\n    }\n\n    #[test]\n    fn error_descriptions_getkey() {\n        assert_eq!(\"ApiError::GetKey [8]\", &format!(\"{:?}\", ApiError::GetKey));\n        assert_eq!(\"ApiError::GetKey [8]\", &format!(\"{}\", ApiError::GetKey));\n    }\n\n    #[test]\n    fn error_descriptions_contract_header() {\n        assert_eq!(\n            \"ApiError::ContractHeader(PreviouslyUsedVersion) [64769]\",\n            &format!(\n                \"{:?}\",\n                ApiError::ContractHeader(addressable_entity::Error::PreviouslyUsedVersion as u8)\n            )\n        );\n        assert_eq!(\n            \"Contract header error: 0\",\n            &format!(\"{}\", ApiError::ContractHeader(0))\n        );\n        assert_eq!(\n            \"Contract header error: 255\",\n            &format!(\"{}\", ApiError::ContractHeader(u8::MAX))\n        );\n    }\n\n    #[test]\n    fn error_descriptions_mint() {\n        assert_eq!(\n            \"ApiError::Mint(InsufficientFunds) [65024]\",\n            &format!(\"{:?}\", ApiError::Mint(0))\n        );\n        assert_eq!(\"Mint error: 0\", &format!(\"{}\", ApiError::Mint(0)));\n        assert_eq!(\"Mint error: 255\", &format!(\"{}\", ApiError::Mint(u8::MAX)));\n    }\n\n    #[test]\n    fn error_descriptions_handle_payment() {\n        assert_eq!(\n            \"ApiError::HandlePayment(NotBonded) [65280]\",\n            &format!(\n                \"{:?}\",\n                ApiError::HandlePayment(handle_payment::Error::NotBonded as u8)\n            )\n        );\n    }\n\n    #[test]\n    fn error_descriptions_handle_payment_display() {\n        assert_eq!(\n            \"Handle Payment error: 0\",\n            &format!(\n                \"{}\",\n                ApiError::HandlePayment(handle_payment::Error::NotBonded as u8)\n            )\n        );\n    }\n\n    #[test]\n    fn error_descriptions_user_errors() {\n        assert_eq!(\n            \"ApiError::User(0) [65536]\",\n            &format!(\"{:?}\", ApiError::User(0))\n        );\n\n        assert_eq!(\"User error: 0\", &format!(\"{}\", ApiError::User(0)));\n        assert_eq!(\n            \"ApiError::User(65535) [131071]\",\n            &format!(\"{:?}\", ApiError::User(u16::MAX))\n        );\n        assert_eq!(\n            \"User error: 65535\",\n            &format!(\"{}\", ApiError::User(u16::MAX))\n        );\n    }\n\n    #[test]\n    fn error_edge_cases() {\n        assert_eq!(Err(ApiError::Unhandled), result_from(i32::MAX));\n        assert_eq!(\n            Err(ApiError::ContractHeader(255)),\n            result_from(MINT_ERROR_OFFSET as i32 - 1)\n        );\n        assert_eq!(Err(ApiError::Unhandled), result_from(-1));\n        assert_eq!(Err(ApiError::Unhandled), result_from(i32::MIN));\n    }\n\n    #[test]\n    fn error_round_trips() {\n        round_trip(Ok(()));\n        round_trip(Err(ApiError::None));\n        round_trip(Err(ApiError::MissingArgument));\n        round_trip(Err(ApiError::InvalidArgument));\n        round_trip(Err(ApiError::Deserialize));\n        round_trip(Err(ApiError::Read));\n        round_trip(Err(ApiError::ValueNotFound));\n        round_trip(Err(ApiError::ContractNotFound));\n        round_trip(Err(ApiError::GetKey));\n        round_trip(Err(ApiError::UnexpectedKeyVariant));\n        round_trip(Err(ApiError::UnexpectedContractRefVariant));\n        round_trip(Err(ApiError::InvalidPurseName));\n        round_trip(Err(ApiError::InvalidPurse));\n        round_trip(Err(ApiError::UpgradeContractAtURef));\n        round_trip(Err(ApiError::Transfer));\n        round_trip(Err(ApiError::NoAccessRights));\n        round_trip(Err(ApiError::CLTypeMismatch));\n        round_trip(Err(ApiError::EarlyEndOfStream));\n        round_trip(Err(ApiError::Formatting));\n        round_trip(Err(ApiError::LeftOverBytes));\n        round_trip(Err(ApiError::OutOfMemory));\n        round_trip(Err(ApiError::MaxKeysLimit));\n        round_trip(Err(ApiError::DuplicateKey));\n        round_trip(Err(ApiError::PermissionDenied));\n        round_trip(Err(ApiError::MissingKey));\n        round_trip(Err(ApiError::ThresholdViolation));\n        round_trip(Err(ApiError::KeyManagementThreshold));\n        round_trip(Err(ApiError::DeploymentThreshold));\n        round_trip(Err(ApiError::InsufficientTotalWeight));\n        round_trip(Err(ApiError::InvalidSystemContract));\n        round_trip(Err(ApiError::PurseNotCreated));\n        round_trip(Err(ApiError::Unhandled));\n        round_trip(Err(ApiError::BufferTooSmall));\n        round_trip(Err(ApiError::HostBufferEmpty));\n        round_trip(Err(ApiError::HostBufferFull));\n        round_trip(Err(ApiError::AllocLayout));\n        round_trip(Err(ApiError::NonRepresentableSerialization));\n        round_trip(Err(ApiError::ContractHeader(0)));\n        round_trip(Err(ApiError::ContractHeader(u8::MAX)));\n        round_trip(Err(ApiError::Mint(0)));\n        round_trip(Err(ApiError::Mint(u8::MAX)));\n        round_trip(Err(ApiError::HandlePayment(0)));\n        round_trip(Err(ApiError::HandlePayment(u8::MAX)));\n        round_trip(Err(ApiError::User(0)));\n        round_trip(Err(ApiError::User(u16::MAX)));\n        round_trip(Err(ApiError::AuctionError(0)));\n        round_trip(Err(ApiError::AuctionError(u8::MAX)));\n        round_trip(Err(ApiError::MessageTopicAlreadyRegistered));\n        round_trip(Err(ApiError::MaxTopicsNumberExceeded));\n        round_trip(Err(ApiError::MaxTopicNameSizeExceeded));\n        round_trip(Err(ApiError::MessageTopicNotRegistered));\n        round_trip(Err(ApiError::MessageTopicFull));\n        round_trip(Err(ApiError::MessageTooLarge));\n        round_trip(Err(ApiError::NotAllowedToAddContractVersion));\n        round_trip(Err(ApiError::InvalidDelegationAmountLimits));\n    }\n}\n"
  },
  {
    "path": "types/src/auction_state.rs",
    "content": "#![allow(deprecated)]\n\nuse alloc::{\n    collections::{btree_map::Entry, BTreeMap},\n    vec::Vec,\n};\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\nuse crate::{\n    system::auction::{\n        Bid, BidKind, DelegatorBid, DelegatorKind, EraValidators, Staking, ValidatorBid,\n    },\n    Digest, EraId, PublicKey, U512,\n};\n\n#[cfg(feature = \"json-schema\")]\nstatic ERA_VALIDATORS: Lazy<EraValidators> = Lazy::new(|| {\n    use crate::SecretKey;\n\n    let secret_key_1 = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    let public_key_1 = PublicKey::from(&secret_key_1);\n\n    let mut validator_weights = BTreeMap::new();\n    validator_weights.insert(public_key_1, U512::from(10));\n\n    let mut era_validators = BTreeMap::new();\n    era_validators.insert(EraId::from(10u64), validator_weights);\n\n    era_validators\n});\n\n#[cfg(feature = \"json-schema\")]\nstatic AUCTION_INFO: Lazy<AuctionState> = Lazy::new(|| {\n    use crate::{system::auction::DelegationRate, AccessRights, SecretKey, URef};\n    use num_traits::Zero;\n\n    let state_root_hash = Digest::from([11; Digest::LENGTH]);\n    let validator_secret_key =\n        SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n    let validator_public_key = PublicKey::from(&validator_secret_key);\n\n    let mut bids = vec![];\n    let validator_bid = ValidatorBid::unlocked(\n        validator_public_key.clone(),\n        URef::new([250; 32], AccessRights::READ_ADD_WRITE),\n        U512::from(20),\n        DelegationRate::zero(),\n        0,\n        u64::MAX,\n        0,\n    );\n    bids.push(BidKind::Validator(Box::new(validator_bid)));\n\n    let delegator_secret_key =\n        SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap();\n    let delegator_public_key = PublicKey::from(&delegator_secret_key);\n    let delegator_bid = DelegatorBid::unlocked(\n        delegator_public_key.into(),\n        U512::from(10),\n        URef::new([251; 32], AccessRights::READ_ADD_WRITE),\n        validator_public_key,\n    );\n    bids.push(BidKind::Delegator(Box::new(delegator_bid)));\n\n    let height: u64 = 10;\n    let era_validators = ERA_VALIDATORS.clone();\n    AuctionState::new(state_root_hash, height, era_validators, bids)\n});\n\n/// A validator's weight.\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\n#[deprecated(since = \"5.0.0\")]\npub struct JsonValidatorWeights {\n    public_key: PublicKey,\n    weight: U512,\n}\n\n/// The validators for the given era.\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\n#[deprecated(since = \"5.0.0\")]\npub struct JsonEraValidators {\n    era_id: EraId,\n    validator_weights: Vec<JsonValidatorWeights>,\n}\n\n/// Data structure summarizing auction contract data.\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\n#[deprecated(since = \"5.0.0\")]\npub struct AuctionState {\n    /// Global state hash.\n    pub state_root_hash: Digest,\n    /// Block height.\n    pub block_height: u64,\n    /// Era validators.\n    pub era_validators: Vec<JsonEraValidators>,\n    /// All bids.\n    #[serde(with = \"BTreeMapToArray::<PublicKey, Bid, BidLabels>\")]\n    bids: BTreeMap<PublicKey, Bid>,\n}\n\nimpl AuctionState {\n    /// Create new instance of `AuctionState`\n    /// this logic will retrofit new data into old structure if applicable (it's a lossy\n    /// conversion).\n    pub fn new(\n        state_root_hash: Digest,\n        block_height: u64,\n        era_validators: EraValidators,\n        bids: Vec<BidKind>,\n    ) -> Self {\n        let mut json_era_validators: Vec<JsonEraValidators> = Vec::new();\n        for (era_id, validator_weights) in era_validators.iter() {\n            let mut json_validator_weights: Vec<JsonValidatorWeights> = Vec::new();\n            for (public_key, weight) in validator_weights.iter() {\n                json_validator_weights.push(JsonValidatorWeights {\n                    public_key: public_key.clone(),\n                    weight: *weight,\n                });\n            }\n            json_era_validators.push(JsonEraValidators {\n                era_id: *era_id,\n                validator_weights: json_validator_weights,\n            });\n        }\n\n        let staking = {\n            let mut staking: Staking = BTreeMap::new();\n            for bid_kind in bids.iter().filter(|x| x.is_unified()) {\n                if let BidKind::Unified(bid) = bid_kind {\n                    let public_key = bid.validator_public_key().clone();\n                    let validator_bid = ValidatorBid::unlocked(\n                        bid.validator_public_key().clone(),\n                        *bid.bonding_purse(),\n                        *bid.staked_amount(),\n                        *bid.delegation_rate(),\n                        0,\n                        u64::MAX,\n                        0,\n                    );\n                    let mut delegators: BTreeMap<DelegatorKind, DelegatorBid> = BTreeMap::new();\n                    for (delegator_public_key, delegator) in bid.delegators() {\n                        delegators.insert(\n                            DelegatorKind::PublicKey(delegator_public_key.clone()),\n                            DelegatorBid::from(delegator.clone()),\n                        );\n                    }\n                    staking.insert(public_key, (validator_bid, delegators));\n                }\n            }\n\n            for bid_kind in bids.iter().filter(|x| x.is_validator()) {\n                if let BidKind::Validator(validator_bid) = bid_kind {\n                    let public_key = validator_bid.validator_public_key().clone();\n                    staking.insert(public_key, (*validator_bid.clone(), BTreeMap::new()));\n                }\n            }\n\n            for bid_kind in bids.iter().filter(|x| x.is_delegator()) {\n                if let BidKind::Delegator(delegator_bid) = bid_kind {\n                    let validator_public_key = delegator_bid.validator_public_key().clone();\n                    if let Entry::Occupied(mut occupant) =\n                        staking.entry(validator_public_key.clone())\n                    {\n                        let (_, delegators) = occupant.get_mut();\n                        delegators.insert(\n                            delegator_bid.delegator_kind().clone(),\n                            *delegator_bid.clone(),\n                        );\n                    }\n                }\n            }\n            staking\n        };\n\n        let mut bids: BTreeMap<PublicKey, Bid> = BTreeMap::new();\n        for (public_key, (validator_bid, delegators)) in staking {\n            let bid = Bid::from_non_unified(validator_bid, delegators);\n            bids.insert(public_key, bid);\n        }\n\n        AuctionState {\n            state_root_hash,\n            block_height,\n            era_validators: json_era_validators,\n            bids,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &AUCTION_INFO\n    }\n}\n\nstruct BidLabels;\n\nimpl KeyValueLabels for BidLabels {\n    const KEY: &'static str = \"public_key\";\n    const VALUE: &'static str = \"bid\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for BidLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"PublicKeyAndBid\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> =\n        Some(\"A bid associated with the given public key.\");\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some(\"The public key of the bidder.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some(\"The bid details.\");\n}\n"
  },
  {
    "path": "types/src/block/available_block_range.rs",
    "content": "use core::fmt::{self, Display, Formatter};\n\nuse alloc::vec::Vec;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\n/// An unbroken, inclusive range of blocks.\n#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct AvailableBlockRange {\n    /// The inclusive lower bound of the range.\n    low: u64,\n    /// The inclusive upper bound of the range.\n    high: u64,\n}\n\nimpl AvailableBlockRange {\n    /// An `AvailableRange` of [0, 0].\n    pub const RANGE_0_0: AvailableBlockRange = AvailableBlockRange { low: 0, high: 0 };\n\n    /// Constructs a new `AvailableBlockRange` with the given limits.\n    pub fn new(low: u64, high: u64) -> Self {\n        assert!(\n            low <= high,\n            \"cannot construct available block range with low > high\"\n        );\n        AvailableBlockRange { low, high }\n    }\n\n    /// Returns `true` if `height` is within the range.\n    pub fn contains(&self, height: u64) -> bool {\n        height >= self.low && height <= self.high\n    }\n\n    /// Returns the low value.\n    pub fn low(&self) -> u64 {\n        self.low\n    }\n\n    /// Returns the high value.\n    pub fn high(&self) -> u64 {\n        self.high\n    }\n\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let low = rng.gen::<u16>() as u64;\n        let high = low + rng.gen::<u16>() as u64;\n        Self { low, high }\n    }\n}\n\nimpl Display for AvailableBlockRange {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"available block range [{}, {}]\",\n            self.low, self.high\n        )\n    }\n}\n\nimpl ToBytes for AvailableBlockRange {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.low.write_bytes(writer)?;\n        self.high.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.low.serialized_length() + self.high.serialized_length()\n    }\n}\n\nimpl FromBytes for AvailableBlockRange {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (low, remainder) = u64::from_bytes(bytes)?;\n        let (high, remainder) = u64::from_bytes(remainder)?;\n        Ok((AvailableBlockRange { low, high }, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = AvailableBlockRange::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_body/block_body_v1.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    DeployHash, Digest, PublicKey,\n};\n\n/// The body portion of a block. Version 1.\n#[derive(Clone, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockBodyV1 {\n    /// The public key of the validator which proposed the block.\n    pub(super) proposer: PublicKey,\n    /// The deploy hashes of the non-transfer deploys within the block.\n    pub(super) deploy_hashes: Vec<DeployHash>,\n    /// The deploy hashes of the transfers within the block.\n    pub(super) transfer_hashes: Vec<DeployHash>,\n    #[serde(skip)]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub(super) hash: OnceCell<Digest>,\n}\n\nimpl BlockBodyV1 {\n    /// Constructs a new `BlockBody`.\n    pub(crate) fn new(\n        proposer: PublicKey,\n        deploy_hashes: Vec<DeployHash>,\n        transfer_hashes: Vec<DeployHash>,\n    ) -> Self {\n        BlockBodyV1 {\n            proposer,\n            deploy_hashes,\n            transfer_hashes,\n            #[cfg(any(feature = \"once_cell\", test))]\n            hash: OnceCell::new(),\n        }\n    }\n\n    /// Returns the public key of the validator which proposed the block.\n    pub fn proposer(&self) -> &PublicKey {\n        &self.proposer\n    }\n\n    /// Returns the deploy hashes of the non-transfer deploys within the block.\n    pub fn deploy_hashes(&self) -> &[DeployHash] {\n        &self.deploy_hashes\n    }\n\n    /// Returns the deploy hashes of the transfers within the block.\n    pub fn transfer_hashes(&self) -> &[DeployHash] {\n        &self.transfer_hashes\n    }\n\n    /// Returns the deploy and transfer hashes in the order in which they were executed.\n    pub fn deploy_and_transfer_hashes(&self) -> impl Iterator<Item = &DeployHash> {\n        self.deploy_hashes()\n            .iter()\n            .chain(self.transfer_hashes().iter())\n    }\n\n    /// Returns the body hash, i.e. the hash of the body's serialized bytes.\n    pub fn hash(&self) -> Digest {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return *self.hash.get_or_init(|| self.compute_hash());\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.compute_hash()\n    }\n\n    fn compute_hash(&self) -> Digest {\n        let serialized_body = self\n            .to_bytes()\n            .unwrap_or_else(|error| panic!(\"should serialize block body: {}\", error));\n        Digest::hash(serialized_body)\n    }\n}\n\nimpl PartialEq for BlockBodyV1 {\n    fn eq(&self, other: &BlockBodyV1) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let BlockBodyV1 {\n            proposer,\n            deploy_hashes,\n            transfer_hashes,\n            hash: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let BlockBodyV1 {\n            proposer,\n            deploy_hashes,\n            transfer_hashes,\n        } = self;\n        *proposer == other.proposer\n            && *deploy_hashes == other.deploy_hashes\n            && *transfer_hashes == other.transfer_hashes\n    }\n}\n\nimpl Display for BlockBodyV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"block body proposed by {}, {} deploys, {} transfers\",\n            self.proposer,\n            self.deploy_hashes.len(),\n            self.transfer_hashes.len()\n        )\n    }\n}\n\nimpl ToBytes for BlockBodyV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.proposer.write_bytes(writer)?;\n        self.deploy_hashes.write_bytes(writer)?;\n        self.transfer_hashes.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.proposer.serialized_length()\n            + self.deploy_hashes.serialized_length()\n            + self.transfer_hashes.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockBodyV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (proposer, bytes) = PublicKey::from_bytes(bytes)?;\n        let (deploy_hashes, bytes) = Vec::<DeployHash>::from_bytes(bytes)?;\n        let (transfer_hashes, bytes) = Vec::<DeployHash>::from_bytes(bytes)?;\n        let body = BlockBodyV1 {\n            proposer,\n            deploy_hashes,\n            transfer_hashes,\n            #[cfg(any(feature = \"once_cell\", test))]\n            hash: OnceCell::new(),\n        };\n        Ok((body, bytes))\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_body/block_body_v2.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    block::RewardedSignatures,\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest, TransactionHash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, LARGE_WASM_LANE_ID,\n    MEDIUM_WASM_LANE_ID, MINT_LANE_ID, SMALL_WASM_LANE_ID,\n};\n\n/// The body portion of a block. Version 2.\n#[derive(Clone, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockBodyV2 {\n    /// Map of transactions mapping categories to a list of transaction hashes.\n    pub(super) transactions: BTreeMap<u8, Vec<TransactionHash>>,\n    /// List of identifiers for finality signatures for a particular past block.\n    pub(super) rewarded_signatures: RewardedSignatures,\n    #[serde(skip)]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub(super) hash: OnceCell<Digest>,\n}\n\nimpl BlockBodyV2 {\n    /// Constructs a new `BlockBodyV2`.\n    pub(crate) fn new(\n        transactions: BTreeMap<u8, Vec<TransactionHash>>,\n        rewarded_signatures: RewardedSignatures,\n    ) -> Self {\n        BlockBodyV2 {\n            transactions,\n            rewarded_signatures,\n            #[cfg(any(feature = \"once_cell\", test))]\n            hash: OnceCell::new(),\n        }\n    }\n\n    /// Returns the hashes of the transactions within the block filtered by lane_id.\n    pub fn transaction_by_lane(&self, lane_id: u8) -> impl Iterator<Item = TransactionHash> {\n        match self.transactions.get(&lane_id) {\n            Some(transactions) => transactions.to_vec(),\n            None => vec![],\n        }\n        .into_iter()\n    }\n\n    /// Returns the hashes of the mint transactions within the block.\n    pub fn mint(&self) -> impl Iterator<Item = TransactionHash> {\n        self.transaction_by_lane(MINT_LANE_ID)\n    }\n\n    /// Returns the hashes of the auction transactions within the block.\n    pub fn auction(&self) -> impl Iterator<Item = TransactionHash> {\n        self.transaction_by_lane(AUCTION_LANE_ID)\n    }\n\n    /// Returns the hashes of the installer/upgrader transactions within the block.\n    pub fn install_upgrade(&self) -> impl Iterator<Item = TransactionHash> {\n        self.transaction_by_lane(INSTALL_UPGRADE_LANE_ID)\n    }\n\n    /// Returns the hashes of the transactions filtered by lane id within the block.\n    pub fn transactions_by_lane_id(&self, lane_id: u8) -> impl Iterator<Item = TransactionHash> {\n        self.transaction_by_lane(lane_id)\n    }\n\n    /// Returns a reference to the collection of mapped transactions.\n    pub fn transactions(&self) -> &BTreeMap<u8, Vec<TransactionHash>> {\n        &self.transactions\n    }\n\n    /// Returns all of the transaction hashes in the order in which they were executed.\n    pub fn all_transactions(&self) -> impl Iterator<Item = &TransactionHash> {\n        self.transactions.values().flatten()\n    }\n\n    /// Returns the body hash, i.e. the hash of the body's serialized bytes.\n    pub fn hash(&self) -> Digest {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return *self.hash.get_or_init(|| self.compute_hash());\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.compute_hash()\n    }\n\n    fn compute_hash(&self) -> Digest {\n        let serialized_body = self\n            .to_bytes()\n            .unwrap_or_else(|error| panic!(\"should serialize block body: {}\", error));\n        Digest::hash(serialized_body)\n    }\n\n    /// Return the list of identifiers for finality signatures for a particular past block.\n    pub fn rewarded_signatures(&self) -> &RewardedSignatures {\n        &self.rewarded_signatures\n    }\n}\n\nimpl PartialEq for BlockBodyV2 {\n    fn eq(&self, other: &BlockBodyV2) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let BlockBodyV2 {\n            transactions,\n            rewarded_signatures,\n            hash: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let BlockBodyV2 {\n            transactions,\n            rewarded_signatures,\n        } = self;\n        *transactions == other.transactions && *rewarded_signatures == other.rewarded_signatures\n    }\n}\n\nimpl Display for BlockBodyV2 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"block body, {} mint, {} auction, {} install_upgrade, {} large wasm, {} medium wasm, {} small wasm\",\n            self.mint().count(),\n            self.auction().count(),\n            self.install_upgrade().count(),\n            self.transaction_by_lane(LARGE_WASM_LANE_ID).count(),\n            self.transaction_by_lane(MEDIUM_WASM_LANE_ID).count(),\n            self.transaction_by_lane(SMALL_WASM_LANE_ID).count(),\n        )\n    }\n}\n\nimpl ToBytes for BlockBodyV2 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.transactions.write_bytes(writer)?;\n        self.rewarded_signatures.write_bytes(writer)?;\n        Ok(())\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.transactions.serialized_length() + self.rewarded_signatures.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockBodyV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (transactions, bytes) = FromBytes::from_bytes(bytes)?;\n        let (rewarded_signatures, bytes) = RewardedSignatures::from_bytes(bytes)?;\n        let body = BlockBodyV2 {\n            transactions,\n            rewarded_signatures,\n            #[cfg(any(feature = \"once_cell\", test))]\n            hash: OnceCell::new(),\n        };\n        Ok((body, bytes))\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_body.rs",
    "content": "mod block_body_v1;\nmod block_body_v2;\n\npub use block_body_v1::BlockBodyV1;\npub use block_body_v2::BlockBodyV2;\n\nuse alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH};\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for block body v1.\npub const BLOCK_BODY_V1_TAG: u8 = 0;\n/// Tag for block body v2.\npub const BLOCK_BODY_V2_TAG: u8 = 1;\n\n/// The versioned body portion of a block. It encapsulates different variants of the BlockBody\n/// struct.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(any(feature = \"testing\", test), derive(PartialEq))]\n#[derive(Clone, Serialize, Deserialize, Debug)]\n#[allow(clippy::large_enum_variant)]\npub enum BlockBody {\n    /// The legacy, initial version of the body portion of a block.\n    #[serde(rename = \"Version1\")]\n    V1(BlockBodyV1),\n    /// The version 2 of the body portion of a block, which includes the\n    /// `past_finality_signatures`.\n    #[serde(rename = \"Version2\")]\n    V2(BlockBodyV2),\n}\n\nimpl Display for BlockBody {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            BlockBody::V1(v1) => Display::fmt(&v1, formatter),\n            BlockBody::V2(v2) => Display::fmt(&v2, formatter),\n        }\n    }\n}\n\nimpl From<BlockBodyV1> for BlockBody {\n    fn from(body: BlockBodyV1) -> Self {\n        BlockBody::V1(body)\n    }\n}\n\nimpl From<&BlockBodyV2> for BlockBody {\n    fn from(body: &BlockBodyV2) -> Self {\n        BlockBody::V2(body.clone())\n    }\n}\n\nimpl ToBytes for BlockBody {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            BlockBody::V1(v1) => {\n                buffer.insert(0, BLOCK_BODY_V1_TAG);\n                buffer.extend(v1.to_bytes()?);\n            }\n            BlockBody::V2(v2) => {\n                buffer.insert(0, BLOCK_BODY_V2_TAG);\n                buffer.extend(v2.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                BlockBody::V1(v1) => v1.serialized_length(),\n                BlockBody::V2(v2) => v2.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for BlockBody {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            BLOCK_BODY_V1_TAG => {\n                let (body, remainder): (BlockBodyV1, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V1(body), remainder))\n            }\n            BLOCK_BODY_V2_TAG => {\n                let (body, remainder): (BlockBodyV2, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V2(body), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, testing::TestRng, TestBlockBuilder, TestBlockV1Builder};\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let block_body_v1 = TestBlockV1Builder::new().build_versioned(rng).clone_body();\n        bytesrepr::test_serialization_roundtrip(&block_body_v1);\n\n        let block_body_v2 = TestBlockBuilder::new().build_versioned(rng).clone_body();\n        bytesrepr::test_serialization_roundtrip(&block_body_v2);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_global.rs",
    "content": "use alloc::{\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse crate::{\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes},\n    checksummed_hex,\n    key::FromStrError,\n    Key,\n};\n\nuse core::{\n    convert::TryFrom,\n    fmt::{Debug, Display, Formatter},\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::distributions::{Distribution, Standard};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nconst BLOCK_TIME_TAG: u8 = 0;\nconst MESSAGE_COUNT_TAG: u8 = 1;\nconst PROTOCOL_VERSION_TAG: u8 = 2;\nconst ADDRESSABLE_ENTITY_TAG: u8 = 3;\n\n/// Serialization tag for BlockGlobalAddr variants.\n#[derive(\n    Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize,\n)]\n#[repr(u8)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BlockGlobalAddrTag {\n    #[default]\n    /// Tag for block time variant.\n    BlockTime = BLOCK_TIME_TAG,\n    /// Tag for processing variant.\n    MessageCount = MESSAGE_COUNT_TAG,\n    /// Tag for protocol version variant.\n    ProtocolVersion = PROTOCOL_VERSION_TAG,\n    /// Tag for addressable entity variant.\n    AddressableEntity = ADDRESSABLE_ENTITY_TAG,\n}\n\nimpl BlockGlobalAddrTag {\n    /// The length in bytes of a [`BlockGlobalAddrTag`].\n    pub const BLOCK_GLOBAL_ADDR_TAG_LENGTH: usize = 1;\n\n    /// Attempts to map `BalanceHoldAddrTag` from a u8.\n    pub fn try_from_u8(value: u8) -> Option<Self> {\n        // TryFrom requires std, so doing this instead.\n        if value == BLOCK_TIME_TAG {\n            return Some(BlockGlobalAddrTag::BlockTime);\n        }\n        if value == MESSAGE_COUNT_TAG {\n            return Some(BlockGlobalAddrTag::MessageCount);\n        }\n        if value == PROTOCOL_VERSION_TAG {\n            return Some(BlockGlobalAddrTag::ProtocolVersion);\n        }\n        if value == ADDRESSABLE_ENTITY_TAG {\n            return Some(BlockGlobalAddrTag::AddressableEntity);\n        }\n        None\n    }\n}\n\nimpl Display for BlockGlobalAddrTag {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        let tag = match self {\n            BlockGlobalAddrTag::BlockTime => BLOCK_TIME_TAG,\n            BlockGlobalAddrTag::MessageCount => MESSAGE_COUNT_TAG,\n            BlockGlobalAddrTag::ProtocolVersion => PROTOCOL_VERSION_TAG,\n            BlockGlobalAddrTag::AddressableEntity => ADDRESSABLE_ENTITY_TAG,\n        };\n        write!(f, \"{}\", base16::encode_lower(&[tag]))\n    }\n}\n\nimpl ToBytes for BlockGlobalAddrTag {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        Self::BLOCK_GLOBAL_ADDR_TAG_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(*self as u8);\n        Ok(())\n    }\n}\n\nimpl FromBytes for BlockGlobalAddrTag {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        if let Some((byte, rem)) = bytes.split_first() {\n            let tag = BlockGlobalAddrTag::try_from_u8(*byte).ok_or(bytesrepr::Error::Formatting)?;\n            Ok((tag, rem))\n        } else {\n            Err(bytesrepr::Error::Formatting)\n        }\n    }\n}\n\n/// Address for singleton values associated to specific block. These are values which are\n/// calculated or set during the execution of a block such as the block timestamp, or the\n/// total count of messages emitted during the execution of the block, and so on.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BlockGlobalAddr {\n    /// Block time variant\n    #[default]\n    BlockTime,\n    /// Message count variant.\n    MessageCount,\n    /// Protocol version.\n    ProtocolVersion,\n    /// Addressable entity.\n    AddressableEntity,\n}\n\nimpl BlockGlobalAddr {\n    /// The length in bytes of a [`BlockGlobalAddr`].\n    pub const BLOCK_GLOBAL_ADDR_LENGTH: usize = BlockGlobalAddrTag::BLOCK_GLOBAL_ADDR_TAG_LENGTH;\n\n    /// How long is be the serialized value for this instance.\n    pub fn serialized_length(&self) -> usize {\n        Self::BLOCK_GLOBAL_ADDR_LENGTH\n    }\n\n    /// Returns the tag of this instance.\n    pub fn tag(&self) -> BlockGlobalAddrTag {\n        match self {\n            BlockGlobalAddr::MessageCount => BlockGlobalAddrTag::MessageCount,\n            BlockGlobalAddr::BlockTime => BlockGlobalAddrTag::BlockTime,\n            BlockGlobalAddr::ProtocolVersion => BlockGlobalAddrTag::ProtocolVersion,\n            BlockGlobalAddr::AddressableEntity => BlockGlobalAddrTag::AddressableEntity,\n        }\n    }\n\n    /// To formatted string.\n    pub fn to_formatted_string(self) -> String {\n        match self {\n            BlockGlobalAddr::BlockTime => base16::encode_lower(&BLOCK_TIME_TAG.to_le_bytes()),\n            BlockGlobalAddr::MessageCount => base16::encode_lower(&MESSAGE_COUNT_TAG.to_le_bytes()),\n            BlockGlobalAddr::ProtocolVersion => {\n                base16::encode_lower(&PROTOCOL_VERSION_TAG.to_le_bytes())\n            }\n            BlockGlobalAddr::AddressableEntity => {\n                base16::encode_lower(&ADDRESSABLE_ENTITY_TAG.to_le_bytes())\n            }\n        }\n    }\n\n    /// From formatted string.\n    pub fn from_formatted_string(hex: &str) -> Result<Self, FromStrError> {\n        let bytes = checksummed_hex::decode(hex)\n            .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?;\n        if bytes.is_empty() {\n            return Err(FromStrError::BlockGlobal(\n                \"bytes should not be 0 len\".to_string(),\n            ));\n        }\n        let tag_bytes = <[u8; BlockGlobalAddrTag::BLOCK_GLOBAL_ADDR_TAG_LENGTH]>::try_from(\n            bytes[0..BlockGlobalAddrTag::BLOCK_GLOBAL_ADDR_TAG_LENGTH].as_ref(),\n        )\n        .map_err(|err| FromStrError::BlockGlobal(err.to_string()))?;\n        let tag = <u8>::from_le_bytes(tag_bytes);\n        let tag = BlockGlobalAddrTag::try_from_u8(tag).ok_or_else(|| {\n            FromStrError::BlockGlobal(\"failed to parse block global addr tag\".to_string())\n        })?;\n\n        // if more tags are added, extend the below logic to handle every case.\n        match tag {\n            BlockGlobalAddrTag::BlockTime => Ok(BlockGlobalAddr::BlockTime),\n            BlockGlobalAddrTag::MessageCount => Ok(BlockGlobalAddr::MessageCount),\n            BlockGlobalAddrTag::ProtocolVersion => Ok(BlockGlobalAddr::ProtocolVersion),\n            BlockGlobalAddrTag::AddressableEntity => Ok(BlockGlobalAddr::AddressableEntity),\n        }\n    }\n}\n\nimpl ToBytes for BlockGlobalAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.push(self.tag() as u8);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockGlobalAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == BlockGlobalAddrTag::BlockTime as u8 => {\n                Ok((BlockGlobalAddr::BlockTime, remainder))\n            }\n            tag if tag == BlockGlobalAddrTag::MessageCount as u8 => {\n                Ok((BlockGlobalAddr::MessageCount, remainder))\n            }\n            tag if tag == BlockGlobalAddrTag::ProtocolVersion as u8 => {\n                Ok((BlockGlobalAddr::ProtocolVersion, remainder))\n            }\n            tag if tag == BlockGlobalAddrTag::AddressableEntity as u8 => {\n                Ok((BlockGlobalAddr::AddressableEntity, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl From<BlockGlobalAddr> for Key {\n    fn from(block_global_addr: BlockGlobalAddr) -> Self {\n        Key::BlockGlobal(block_global_addr)\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<Key> for BlockGlobalAddr {\n    type Error = ();\n\n    fn try_from(value: Key) -> Result<Self, Self::Error> {\n        if let Key::BlockGlobal(block_global_addr) = value {\n            Ok(block_global_addr)\n        } else {\n            Err(())\n        }\n    }\n}\n\nimpl Display for BlockGlobalAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        let tag = self.tag();\n        write!(f, \"{}\", tag,)\n    }\n}\n\nimpl Debug for BlockGlobalAddr {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        match self {\n            BlockGlobalAddr::BlockTime => write!(f, \"BlockTime\",),\n            BlockGlobalAddr::MessageCount => write!(f, \"MessageCount\",),\n            BlockGlobalAddr::ProtocolVersion => write!(f, \"ProtocolVersion\"),\n            BlockGlobalAddr::AddressableEntity => write!(f, \"AddressableEntity\"),\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<BlockGlobalAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BlockGlobalAddr {\n        match rng.gen_range(BLOCK_TIME_TAG..=ADDRESSABLE_ENTITY_TAG) {\n            BLOCK_TIME_TAG => BlockGlobalAddr::BlockTime,\n            MESSAGE_COUNT_TAG => BlockGlobalAddr::MessageCount,\n            PROTOCOL_VERSION_TAG => BlockGlobalAddr::ProtocolVersion,\n            ADDRESSABLE_ENTITY_TAG => BlockGlobalAddr::AddressableEntity,\n            _ => unreachable!(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{block::block_global::BlockGlobalAddr, bytesrepr};\n\n    #[test]\n    fn serialization_roundtrip() {\n        let addr = BlockGlobalAddr::BlockTime;\n        bytesrepr::test_serialization_roundtrip(&addr);\n        let addr = BlockGlobalAddr::MessageCount;\n        bytesrepr::test_serialization_roundtrip(&addr);\n        let addr = BlockGlobalAddr::ProtocolVersion;\n        bytesrepr::test_serialization_roundtrip(&addr);\n        let addr = BlockGlobalAddr::AddressableEntity;\n        bytesrepr::test_serialization_roundtrip(&addr);\n    }\n}\n\n#[cfg(test)]\nmod prop_test_gas {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_variant_gas(addr in gens::balance_hold_addr_arb()) {\n            bytesrepr::test_serialization_roundtrip(&addr);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_hash.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::Block;\n#[cfg(doc)]\nuse super::BlockV2;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest,\n};\n\n#[cfg(feature = \"json-schema\")]\nstatic BLOCK_HASH: Lazy<BlockHash> =\n    Lazy::new(|| BlockHash::new(Digest::from([7; BlockHash::LENGTH])));\n\n/// The cryptographic hash of a [`Block`].\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded cryptographic hash of a block.\")\n)]\n#[serde(deny_unknown_fields)]\npub struct BlockHash(Digest);\n\nimpl BlockHash {\n    /// The number of bytes in a `BlockHash` digest.\n    pub const LENGTH: usize = Digest::LENGTH;\n\n    /// Constructs a new `BlockHash`.\n    pub fn new(hash: Digest) -> Self {\n        BlockHash(hash)\n    }\n\n    /// Returns the wrapped inner digest.\n    pub fn inner(&self) -> &Digest {\n        &self.0\n    }\n\n    /// Hexadecimal representation of the hash.\n    pub fn to_hex_string(&self) -> String {\n        base16::encode_lower(self.inner())\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &BLOCK_HASH\n    }\n\n    /// Returns a new `DeployHash` directly initialized with the provided bytes; no hashing is done.\n    #[cfg(any(feature = \"testing\", test))]\n    pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self {\n        BlockHash(Digest::from_raw(raw_digest))\n    }\n\n    /// Returns a random `DeployHash`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let hash = rng.gen::<[u8; Self::LENGTH]>().into();\n        BlockHash(hash)\n    }\n}\n\nimpl From<Digest> for BlockHash {\n    fn from(digest: Digest) -> Self {\n        Self(digest)\n    }\n}\n\nimpl From<BlockHash> for Digest {\n    fn from(block_hash: BlockHash) -> Self {\n        block_hash.0\n    }\n}\n\nimpl Display for BlockHash {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"block-hash({})\", self.0)\n    }\n}\n\nimpl AsRef<[u8]> for BlockHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl ToBytes for BlockHash {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Digest::from_bytes(bytes).map(|(inner, remainder)| (BlockHash(inner), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = BlockHash::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_hash_and_height.rs",
    "content": "use core::fmt::{self, Display, Formatter};\n\nuse alloc::vec::Vec;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse super::BlockHash;\n#[cfg(doc)]\nuse super::BlockV2;\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\n/// The block hash and height of a given block.\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockHashAndHeight {\n    /// The hash of the block.\n    block_hash: BlockHash,\n    /// The height of the block.\n    block_height: u64,\n}\n\nimpl BlockHashAndHeight {\n    /// Constructs a new `BlockHashAndHeight`.\n    pub fn new(block_hash: BlockHash, block_height: u64) -> Self {\n        Self {\n            block_hash,\n            block_height,\n        }\n    }\n\n    /// Returns the hash of the block.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Returns the height of the block.\n    pub fn block_height(&self) -> u64 {\n        self.block_height\n    }\n\n    /// Returns a random `BlockHashAndHeight`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self {\n            block_hash: BlockHash::random(rng),\n            block_height: rng.gen(),\n        }\n    }\n}\n\nimpl Display for BlockHashAndHeight {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"{}, height {} \",\n            self.block_hash, self.block_height\n        )\n    }\n}\n\nimpl ToBytes for BlockHashAndHeight {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(writer)?;\n        self.block_height.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length() + self.block_height.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockHashAndHeight {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (block_height, remainder) = u64::from_bytes(remainder)?;\n        Ok((\n            BlockHashAndHeight {\n                block_hash,\n                block_height,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BlockHashAndHeight::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_header/block_header_v1.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    block::{BlockHash, EraEndV1},\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest, EraId, ProtocolVersion, PublicKey, Timestamp, U512,\n};\n#[cfg(feature = \"std\")]\nuse crate::{ActivationPoint, ProtocolConfig};\n\n#[cfg(feature = \"json-schema\")]\nstatic BLOCK_HEADER_V1: Lazy<BlockHeaderV1> = Lazy::new(|| {\n    let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH]));\n    let state_root_hash = Digest::from([8; Digest::LENGTH]);\n    let random_bit = true;\n    let era_end = Some(EraEndV1::example().clone());\n    let timestamp = *Timestamp::example();\n    let era_id = EraId::from(1);\n    let height: u64 = 10;\n    let protocol_version = ProtocolVersion::V1_0_0;\n    let accumulated_seed = Digest::hash_pair(Digest::from([9; Digest::LENGTH]), [random_bit as u8]);\n    let body_hash = Digest::from([5; Digest::LENGTH]);\n    BlockHeaderV1::new(\n        parent_hash,\n        state_root_hash,\n        body_hash,\n        random_bit,\n        accumulated_seed,\n        era_end,\n        timestamp,\n        era_id,\n        height,\n        protocol_version,\n        #[cfg(any(feature = \"once_cell\", test))]\n        OnceCell::new(),\n    )\n});\n\n/// The header portion of a block.\n#[derive(Clone, Debug, Eq)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockHeaderV1 {\n    /// The parent block's hash.\n    pub(super) parent_hash: BlockHash,\n    /// The root hash of global state after the deploys in this block have been executed.\n    pub(super) state_root_hash: Digest,\n    /// The hash of the block's body.\n    pub(super) body_hash: Digest,\n    /// A random bit needed for initializing a future era.\n    pub(super) random_bit: bool,\n    /// A seed needed for initializing a future era.\n    pub(super) accumulated_seed: Digest,\n    /// The `EraEnd` of a block if it is a switch block.\n    pub(super) era_end: Option<EraEndV1>,\n    /// The timestamp from when the block was proposed.\n    pub(super) timestamp: Timestamp,\n    /// The era ID in which this block was created.\n    pub(super) era_id: EraId,\n    /// The height of this block, i.e. the number of ancestors.\n    pub(super) height: u64,\n    /// The protocol version of the network from when this block was created.\n    pub(super) protocol_version: ProtocolVersion,\n    #[cfg_attr(any(all(feature = \"std\", feature = \"once_cell\"), test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub(super) block_hash: OnceCell<BlockHash>,\n}\n\nimpl BlockHeaderV1 {\n    /// Returns the hash of this block header.\n    pub fn block_hash(&self) -> BlockHash {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return *self.block_hash.get_or_init(|| self.compute_block_hash());\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.compute_block_hash()\n    }\n\n    /// Returns the parent block's hash.\n    pub fn parent_hash(&self) -> &BlockHash {\n        &self.parent_hash\n    }\n\n    /// Returns the root hash of global state after the deploys in this block have been executed.\n    pub fn state_root_hash(&self) -> &Digest {\n        &self.state_root_hash\n    }\n\n    /// Returns the hash of the block's body.\n    pub fn body_hash(&self) -> &Digest {\n        &self.body_hash\n    }\n\n    /// Returns a random bit needed for initializing a future era.\n    pub fn random_bit(&self) -> bool {\n        self.random_bit\n    }\n\n    /// Returns a seed needed for initializing a future era.\n    pub fn accumulated_seed(&self) -> &Digest {\n        &self.accumulated_seed\n    }\n\n    /// Returns the `EraEnd` of a block if it is a switch block.\n    pub fn era_end(&self) -> Option<&EraEndV1> {\n        self.era_end.as_ref()\n    }\n\n    /// Returns the timestamp from when the block was proposed.\n    pub fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// Returns the era ID in which this block was created.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or\n    /// its successor if this is a switch block).\n    pub fn next_block_era_id(&self) -> EraId {\n        if self.era_end.is_some() {\n            self.era_id.successor()\n        } else {\n            self.era_id\n        }\n    }\n\n    /// Returns the height of this block, i.e. the number of ancestors.\n    pub fn height(&self) -> u64 {\n        self.height\n    }\n\n    /// Returns the protocol version of the network from when this block was created.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns `true` if this block is the last one in the current era.\n    pub fn is_switch_block(&self) -> bool {\n        self.era_end.is_some()\n    }\n\n    /// Returns the validators for the upcoming era and their respective weights (if this is a\n    /// switch block).\n    pub fn next_era_validator_weights(&self) -> Option<&BTreeMap<PublicKey, U512>> {\n        self.era_end\n            .as_ref()\n            .map(|era_end| era_end.next_era_validator_weights())\n    }\n\n    /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0.\n    pub fn is_genesis(&self) -> bool {\n        self.era_id().is_genesis() && self.height() == 0\n    }\n\n    /// Returns `true` if this block belongs to the last block before the upgrade to the\n    /// current protocol version.\n    #[cfg(feature = \"std\")]\n    pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool {\n        protocol_config.version > self.protocol_version\n            && self.is_switch_block()\n            && ActivationPoint::EraId(self.next_block_era_id()) == protocol_config.activation_point\n    }\n\n    pub(crate) fn compute_block_hash(&self) -> BlockHash {\n        let serialized_header = self\n            .to_bytes()\n            .unwrap_or_else(|error| panic!(\"should serialize block header: {}\", error));\n        BlockHash::new(Digest::hash(serialized_header))\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        parent_hash: BlockHash,\n        state_root_hash: Digest,\n        body_hash: Digest,\n        random_bit: bool,\n        accumulated_seed: Digest,\n        era_end: Option<EraEndV1>,\n        timestamp: Timestamp,\n        era_id: EraId,\n        height: u64,\n        protocol_version: ProtocolVersion,\n        #[cfg(any(feature = \"once_cell\", test))] block_hash: OnceCell<BlockHash>,\n    ) -> Self {\n        BlockHeaderV1 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            #[cfg(any(feature = \"once_cell\", test))]\n            block_hash,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    //\n    // Sets the block hash without recomputing it. Must only be called with the correct hash.\n    #[doc(hidden)]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub fn set_block_hash(&self, block_hash: BlockHash) {\n        self.block_hash.get_or_init(|| block_hash);\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &BLOCK_HEADER_V1\n    }\n\n    #[cfg(test)]\n    pub(crate) fn set_body_hash(&mut self, new_body_hash: Digest) {\n        self.body_hash = new_body_hash;\n    }\n}\n\nimpl PartialEq for BlockHeaderV1 {\n    fn eq(&self, other: &BlockHeaderV1) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let BlockHeaderV1 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            block_hash: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let BlockHeaderV1 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n        } = self;\n        *parent_hash == other.parent_hash\n            && *state_root_hash == other.state_root_hash\n            && *body_hash == other.body_hash\n            && *random_bit == other.random_bit\n            && *accumulated_seed == other.accumulated_seed\n            && *era_end == other.era_end\n            && *timestamp == other.timestamp\n            && *era_id == other.era_id\n            && *height == other.height\n            && *protocol_version == other.protocol_version\n    }\n}\n\nimpl Display for BlockHeaderV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"block header #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash {}, \\\n            random bit {}, protocol version: {}\",\n            self.height,\n            self.block_hash(),\n            self.timestamp,\n            self.era_id,\n            self.parent_hash.inner(),\n            self.state_root_hash,\n            self.body_hash,\n            self.random_bit,\n            self.protocol_version,\n        )?;\n        if let Some(era_end) = &self.era_end {\n            write!(formatter, \", era_end: {}\", era_end)?;\n        }\n        Ok(())\n    }\n}\n\nimpl ToBytes for BlockHeaderV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.parent_hash.write_bytes(writer)?;\n        self.state_root_hash.write_bytes(writer)?;\n        self.body_hash.write_bytes(writer)?;\n        self.random_bit.write_bytes(writer)?;\n        self.accumulated_seed.write_bytes(writer)?;\n        self.era_end.write_bytes(writer)?;\n        self.timestamp.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.height.write_bytes(writer)?;\n        self.protocol_version.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.parent_hash.serialized_length()\n            + self.state_root_hash.serialized_length()\n            + self.body_hash.serialized_length()\n            + self.random_bit.serialized_length()\n            + self.accumulated_seed.serialized_length()\n            + self.era_end.serialized_length()\n            + self.timestamp.serialized_length()\n            + self.era_id.serialized_length()\n            + self.height.serialized_length()\n            + self.protocol_version.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockHeaderV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (state_root_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (body_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (random_bit, remainder) = bool::from_bytes(remainder)?;\n        let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?;\n        let (era_end, remainder) = Option::from_bytes(remainder)?;\n        let (timestamp, remainder) = Timestamp::from_bytes(remainder)?;\n        let (era_id, remainder) = EraId::from_bytes(remainder)?;\n        let (height, remainder) = u64::from_bytes(remainder)?;\n        let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?;\n        let block_header = BlockHeaderV1 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            #[cfg(any(feature = \"once_cell\", test))]\n            block_hash: OnceCell::new(),\n        };\n        Ok((block_header, remainder))\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_header/block_header_v2.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    BlockHash, Digest, EraEndV2, EraId, ProtocolVersion, PublicKey, Timestamp, U512,\n};\n#[cfg(feature = \"std\")]\nuse crate::{ActivationPoint, ProtocolConfig};\n\n#[cfg(feature = \"json-schema\")]\nstatic BLOCK_HEADER_V2: Lazy<BlockHeaderV2> = Lazy::new(|| {\n    let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH]));\n    let state_root_hash = Digest::from([8; Digest::LENGTH]);\n    let random_bit = true;\n    let era_end = Some(EraEndV2::example().clone());\n    let timestamp = *Timestamp::example();\n    let era_id = EraId::from(1);\n    let height: u64 = 10;\n    let current_gas_price: u8 = 1;\n    let protocol_version = ProtocolVersion::V1_0_0;\n    let accumulated_seed = Digest::hash_pair(Digest::from([9; Digest::LENGTH]), [random_bit as u8]);\n    let body_hash = Digest::from([5; Digest::LENGTH]);\n    let proposer = PublicKey::example().clone();\n    let last_switch_block_hash = BlockHash::new(Digest::from([9; Digest::LENGTH]));\n    BlockHeaderV2::new(\n        parent_hash,\n        state_root_hash,\n        body_hash,\n        random_bit,\n        accumulated_seed,\n        era_end,\n        timestamp,\n        era_id,\n        height,\n        protocol_version,\n        proposer,\n        current_gas_price,\n        Some(last_switch_block_hash),\n        #[cfg(any(feature = \"once_cell\", test))]\n        OnceCell::new(),\n    )\n});\n\n/// The header portion of a block.\n#[derive(Clone, Debug, Eq)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockHeaderV2 {\n    /// The parent block's hash.\n    pub(super) parent_hash: BlockHash,\n    /// The root hash of global state after the deploys in this block have been executed.\n    pub(super) state_root_hash: Digest,\n    /// The hash of the block's body.\n    pub(super) body_hash: Digest,\n    /// A random bit needed for initializing a future era.\n    pub(super) random_bit: bool,\n    /// A seed needed for initializing a future era.\n    pub(super) accumulated_seed: Digest,\n    /// The `EraEnd` of a block if it is a switch block.\n    pub(super) era_end: Option<EraEndV2>,\n    /// The timestamp from when the block was proposed.\n    pub(super) timestamp: Timestamp,\n    /// The era ID in which this block was created.\n    pub(super) era_id: EraId,\n    /// The height of this block, i.e. the number of ancestors.\n    pub(super) height: u64,\n    /// The protocol version of the network from when this block was created.\n    pub(super) protocol_version: ProtocolVersion,\n    /// The public key of the validator which proposed the block.\n    pub(super) proposer: PublicKey,\n    /// The gas price of the era\n    pub(super) current_gas_price: u8,\n    /// The most recent switch block hash.\n    pub(super) last_switch_block_hash: Option<BlockHash>,\n    #[cfg_attr(any(all(feature = \"std\", feature = \"once_cell\"), test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub(super) block_hash: OnceCell<BlockHash>,\n}\n\nimpl BlockHeaderV2 {\n    /// Returns the hash of this block header.\n    pub fn block_hash(&self) -> BlockHash {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return *self.block_hash.get_or_init(|| self.compute_block_hash());\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.compute_block_hash()\n    }\n\n    /// Returns the parent block's hash.\n    pub fn parent_hash(&self) -> &BlockHash {\n        &self.parent_hash\n    }\n\n    /// Returns the root hash of global state after the deploys in this block have been executed.\n    pub fn state_root_hash(&self) -> &Digest {\n        &self.state_root_hash\n    }\n\n    /// Returns the hash of the block's body.\n    pub fn body_hash(&self) -> &Digest {\n        &self.body_hash\n    }\n\n    /// Returns a random bit needed for initializing a future era.\n    pub fn random_bit(&self) -> bool {\n        self.random_bit\n    }\n\n    /// Returns a seed needed for initializing a future era.\n    pub fn accumulated_seed(&self) -> &Digest {\n        &self.accumulated_seed\n    }\n\n    /// Returns the `EraEnd` of a block if it is a switch block.\n    pub fn era_end(&self) -> Option<&EraEndV2> {\n        self.era_end.as_ref()\n    }\n\n    /// Returns the timestamp from when the block was proposed.\n    pub fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// Returns the era ID in which this block was created.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or\n    /// its successor if this is a switch block).\n    pub fn next_block_era_id(&self) -> EraId {\n        if self.era_end.is_some() {\n            self.era_id.successor()\n        } else {\n            self.era_id\n        }\n    }\n\n    /// Returns the height of this block, i.e. the number of ancestors.\n    pub fn height(&self) -> u64 {\n        self.height\n    }\n\n    /// Returns the protocol version of the network from when this block was created.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Returns `true` if this block is the last one in the current era.\n    pub fn is_switch_block(&self) -> bool {\n        self.era_end.is_some()\n    }\n\n    /// Returns the public key of the validator which proposed the block.\n    pub fn proposer(&self) -> &PublicKey {\n        &self.proposer\n    }\n\n    /// Returns the validators for the upcoming era and their respective weights (if this is a\n    /// switch block).\n    pub fn next_era_validator_weights(&self) -> Option<&BTreeMap<PublicKey, U512>> {\n        self.era_end\n            .as_ref()\n            .map(|era_end| era_end.next_era_validator_weights())\n    }\n\n    /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0.\n    pub fn is_genesis(&self) -> bool {\n        self.era_id().is_genesis() && self.height() == 0\n    }\n\n    /// Returns the gas price for the given block.\n    pub fn current_gas_price(&self) -> u8 {\n        self.current_gas_price\n    }\n\n    /// Returns the hash for the last relevant switch block.\n    pub fn last_switch_block_hash(&self) -> Option<BlockHash> {\n        self.last_switch_block_hash\n    }\n\n    /// Returns `true` if this block belongs to the last block before the upgrade to the\n    /// current protocol version.\n    #[cfg(feature = \"std\")]\n    pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool {\n        protocol_config.version > self.protocol_version\n            && self.is_switch_block()\n            && ActivationPoint::EraId(self.next_block_era_id()) == protocol_config.activation_point\n    }\n\n    pub(crate) fn compute_block_hash(&self) -> BlockHash {\n        let serialized_header = self\n            .to_bytes()\n            .unwrap_or_else(|error| panic!(\"should serialize block header: {}\", error));\n        BlockHash::new(Digest::hash(serialized_header))\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        parent_hash: BlockHash,\n        state_root_hash: Digest,\n        body_hash: Digest,\n        random_bit: bool,\n        accumulated_seed: Digest,\n        era_end: Option<EraEndV2>,\n        timestamp: Timestamp,\n        era_id: EraId,\n        height: u64,\n        protocol_version: ProtocolVersion,\n        proposer: PublicKey,\n        current_gas_price: u8,\n        last_switch_block_hash: Option<BlockHash>,\n        #[cfg(any(feature = \"once_cell\", test))] block_hash: OnceCell<BlockHash>,\n    ) -> Self {\n        BlockHeaderV2 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            current_gas_price,\n            last_switch_block_hash,\n            #[cfg(any(feature = \"once_cell\", test))]\n            block_hash,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    //\n    // Sets the block hash without recomputing it. Must only be called with the correct hash.\n    #[doc(hidden)]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub fn set_block_hash(&self, block_hash: BlockHash) {\n        self.block_hash.get_or_init(|| block_hash);\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &BLOCK_HEADER_V2\n    }\n\n    #[cfg(test)]\n    pub(crate) fn set_body_hash(&mut self, new_body_hash: Digest) {\n        self.body_hash = new_body_hash;\n    }\n}\n\nimpl PartialEq for BlockHeaderV2 {\n    fn eq(&self, other: &BlockHeaderV2) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let BlockHeaderV2 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            current_gas_price,\n            last_switch_block_hash,\n            block_hash: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let BlockHeaderV2 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            current_gas_price,\n            last_switch_block_hash,\n        } = self;\n        *parent_hash == other.parent_hash\n            && *state_root_hash == other.state_root_hash\n            && *body_hash == other.body_hash\n            && *random_bit == other.random_bit\n            && *accumulated_seed == other.accumulated_seed\n            && *era_end == other.era_end\n            && *timestamp == other.timestamp\n            && *era_id == other.era_id\n            && *height == other.height\n            && *protocol_version == other.protocol_version\n            && *proposer == other.proposer\n            && *current_gas_price == other.current_gas_price\n            && *last_switch_block_hash == other.last_switch_block_hash\n    }\n}\n\nimpl Display for BlockHeaderV2 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"block header #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash {}, \\\n            random bit {}, protocol version: {}, proposed by {}, current_gas_price: {}\",\n            self.height,\n            self.block_hash(),\n            self.timestamp,\n            self.era_id,\n            self.parent_hash.inner(),\n            self.state_root_hash,\n            self.body_hash,\n            self.random_bit,\n            self.protocol_version,\n            self.proposer,\n            self.current_gas_price,\n        )?;\n        if let Some(last_switch_block_hash) = &self.last_switch_block_hash {\n            write!(\n                formatter,\n                \", last_switch_block_hash: {}\",\n                last_switch_block_hash\n            )?;\n        }\n        if let Some(era_end) = &self.era_end {\n            write!(formatter, \", era_end: {}\", era_end)?;\n        }\n        Ok(())\n    }\n}\n\nimpl ToBytes for BlockHeaderV2 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.parent_hash.write_bytes(writer)?;\n        self.state_root_hash.write_bytes(writer)?;\n        self.body_hash.write_bytes(writer)?;\n        self.random_bit.write_bytes(writer)?;\n        self.accumulated_seed.write_bytes(writer)?;\n        self.era_end.write_bytes(writer)?;\n        self.timestamp.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.height.write_bytes(writer)?;\n        self.protocol_version.write_bytes(writer)?;\n        self.proposer.write_bytes(writer)?;\n        self.current_gas_price.write_bytes(writer)?;\n        self.last_switch_block_hash.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.parent_hash.serialized_length()\n            + self.state_root_hash.serialized_length()\n            + self.body_hash.serialized_length()\n            + self.random_bit.serialized_length()\n            + self.accumulated_seed.serialized_length()\n            + self.era_end.serialized_length()\n            + self.timestamp.serialized_length()\n            + self.era_id.serialized_length()\n            + self.height.serialized_length()\n            + self.protocol_version.serialized_length()\n            + self.proposer.serialized_length()\n            + self.current_gas_price.serialized_length()\n            + self.last_switch_block_hash.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockHeaderV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (state_root_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (body_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (random_bit, remainder) = bool::from_bytes(remainder)?;\n        let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?;\n        let (era_end, remainder) = Option::from_bytes(remainder)?;\n        let (timestamp, remainder) = Timestamp::from_bytes(remainder)?;\n        let (era_id, remainder) = EraId::from_bytes(remainder)?;\n        let (height, remainder) = u64::from_bytes(remainder)?;\n        let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?;\n        let (proposer, remainder) = PublicKey::from_bytes(remainder)?;\n        let (current_gas_price, remainder) = u8::from_bytes(remainder)?;\n        let (last_switch_block_hash, remainder) = Option::from_bytes(remainder)?;\n        let block_header = BlockHeaderV2 {\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            current_gas_price,\n            last_switch_block_hash,\n            #[cfg(any(feature = \"once_cell\", test))]\n            block_hash: OnceCell::new(),\n        };\n        Ok((block_header, remainder))\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_header.rs",
    "content": "mod block_header_v1;\nmod block_header_v2;\n\npub use block_header_v1::BlockHeaderV1;\npub use block_header_v2::BlockHeaderV2;\n\nuse alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"std\")]\nuse crate::ProtocolConfig;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    BlockHash, Digest, EraEnd, EraId, ProtocolVersion, PublicKey, Timestamp, U512,\n};\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for block header v1.\npub const BLOCK_HEADER_V1_TAG: u8 = 0;\n/// Tag for block header v2.\npub const BLOCK_HEADER_V2_TAG: u8 = 1;\n\n/// The versioned header portion of a block. It encapsulates different variants of the BlockHeader\n/// struct.\n#[derive(Clone, Debug, Eq, PartialEq)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[allow(clippy::large_enum_variant)]\npub enum BlockHeader {\n    /// The legacy, initial version of the header portion of a block.\n    #[cfg_attr(any(feature = \"std\", test), serde(rename = \"Version1\"))]\n    V1(BlockHeaderV1),\n    /// The version 2 of the header portion of a block.\n    #[cfg_attr(any(feature = \"std\", test), serde(rename = \"Version2\"))]\n    V2(BlockHeaderV2),\n}\n\nimpl BlockHeader {\n    /// Returns the hash of this block header.\n    pub fn block_hash(&self) -> BlockHash {\n        match self {\n            BlockHeader::V1(v1) => v1.block_hash(),\n            BlockHeader::V2(v2) => v2.block_hash(),\n        }\n    }\n\n    /// Returns the parent block's hash.\n    pub fn parent_hash(&self) -> &BlockHash {\n        match self {\n            BlockHeader::V1(v1) => v1.parent_hash(),\n            BlockHeader::V2(v2) => v2.parent_hash(),\n        }\n    }\n\n    /// Returns the root hash of global state after the deploys in this block have been executed.\n    pub fn state_root_hash(&self) -> &Digest {\n        match self {\n            BlockHeader::V1(v1) => v1.state_root_hash(),\n            BlockHeader::V2(v2) => v2.state_root_hash(),\n        }\n    }\n\n    /// Returns the hash of the block's body.\n    pub fn body_hash(&self) -> &Digest {\n        match self {\n            BlockHeader::V1(v1) => v1.body_hash(),\n            BlockHeader::V2(v2) => v2.body_hash(),\n        }\n    }\n\n    /// Returns a random bit needed for initializing a future era.\n    pub fn random_bit(&self) -> bool {\n        match self {\n            BlockHeader::V1(v1) => v1.random_bit(),\n            BlockHeader::V2(v2) => v2.random_bit(),\n        }\n    }\n\n    /// Returns a seed needed for initializing a future era.\n    pub fn accumulated_seed(&self) -> &Digest {\n        match self {\n            BlockHeader::V1(v1) => v1.accumulated_seed(),\n            BlockHeader::V2(v2) => v2.accumulated_seed(),\n        }\n    }\n\n    /// Returns the `EraEnd` of a block if it is a switch block.\n    pub fn clone_era_end(&self) -> Option<EraEnd> {\n        match self {\n            BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.clone().into()),\n            BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.clone().into()),\n        }\n    }\n\n    /// Returns equivocators if the header is of a switch block.\n    pub fn maybe_equivocators(&self) -> Option<&[PublicKey]> {\n        match self {\n            BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.equivocators()),\n            BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.equivocators()),\n        }\n    }\n\n    /// Returns equivocators if the header is of a switch block.\n    pub fn maybe_inactive_validators(&self) -> Option<&[PublicKey]> {\n        match self {\n            BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.inactive_validators()),\n            BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.inactive_validators()),\n        }\n    }\n\n    /// Returns the timestamp from when the block was proposed.\n    pub fn timestamp(&self) -> Timestamp {\n        match self {\n            BlockHeader::V1(v1) => v1.timestamp(),\n            BlockHeader::V2(v2) => v2.timestamp(),\n        }\n    }\n\n    /// Returns the era ID in which this block was created.\n    pub fn era_id(&self) -> EraId {\n        match self {\n            BlockHeader::V1(v1) => v1.era_id(),\n            BlockHeader::V2(v2) => v2.era_id(),\n        }\n    }\n\n    /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or\n    /// its successor if this is a switch block).\n    pub fn next_block_era_id(&self) -> EraId {\n        match self {\n            BlockHeader::V1(v1) => v1.next_block_era_id(),\n            BlockHeader::V2(v2) => v2.next_block_era_id(),\n        }\n    }\n\n    /// Returns the height of this block, i.e. the number of ancestors.\n    pub fn height(&self) -> u64 {\n        match self {\n            BlockHeader::V1(v1) => v1.height(),\n            BlockHeader::V2(v2) => v2.height(),\n        }\n    }\n\n    /// Returns the protocol version of the network from when this block was created.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        match self {\n            BlockHeader::V1(v1) => v1.protocol_version(),\n            BlockHeader::V2(v2) => v2.protocol_version(),\n        }\n    }\n\n    /// Returns `true` if this block is the last one in the current era.\n    pub fn is_switch_block(&self) -> bool {\n        match self {\n            BlockHeader::V1(v1) => v1.is_switch_block(),\n            BlockHeader::V2(v2) => v2.is_switch_block(),\n        }\n    }\n\n    /// Returns the validators for the upcoming era and their respective weights (if this is a\n    /// switch block).\n    pub fn next_era_validator_weights(&self) -> Option<&BTreeMap<PublicKey, U512>> {\n        match self {\n            BlockHeader::V1(v1) => v1.next_era_validator_weights(),\n            BlockHeader::V2(v2) => v2.next_era_validator_weights(),\n        }\n    }\n\n    /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0.\n    pub fn is_genesis(&self) -> bool {\n        match self {\n            BlockHeader::V1(v1) => v1.is_genesis(),\n            BlockHeader::V2(v2) => v2.is_genesis(),\n        }\n    }\n\n    /// Returns `true` if this block belongs to the last block before the upgrade to the\n    /// current protocol version.\n    #[cfg(feature = \"std\")]\n    pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool {\n        match self {\n            BlockHeader::V1(v1) => v1.is_last_block_before_activation(protocol_config),\n            BlockHeader::V2(v2) => v2.is_last_block_before_activation(protocol_config),\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    //\n    // Sets the block hash without recomputing it. Must only be called with the correct hash.\n    #[doc(hidden)]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub fn set_block_hash(&self, block_hash: BlockHash) {\n        match self {\n            BlockHeader::V1(v1) => v1.set_block_hash(block_hash),\n            BlockHeader::V2(v2) => v2.set_block_hash(block_hash),\n        }\n    }\n}\n\nimpl Display for BlockHeader {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            BlockHeader::V1(v1) => Display::fmt(&v1, formatter),\n            BlockHeader::V2(v2) => Display::fmt(&v2, formatter),\n        }\n    }\n}\n\nimpl From<BlockHeaderV1> for BlockHeader {\n    fn from(header: BlockHeaderV1) -> Self {\n        BlockHeader::V1(header)\n    }\n}\n\nimpl From<BlockHeaderV2> for BlockHeader {\n    fn from(header: BlockHeaderV2) -> Self {\n        BlockHeader::V2(header)\n    }\n}\n\nimpl ToBytes for BlockHeader {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            BlockHeader::V1(v1) => {\n                buffer.insert(0, BLOCK_HEADER_V1_TAG);\n                buffer.extend(v1.to_bytes()?);\n            }\n            BlockHeader::V2(v2) => {\n                buffer.insert(0, BLOCK_HEADER_V2_TAG);\n                buffer.extend(v2.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                BlockHeader::V1(v1) => v1.serialized_length(),\n                BlockHeader::V2(v2) => v2.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for BlockHeader {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            BLOCK_HEADER_V1_TAG => {\n                let (header, remainder): (BlockHeaderV1, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V1(header), remainder))\n            }\n            BLOCK_HEADER_V2_TAG => {\n                let (header, remainder): (BlockHeaderV2, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V2(header), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, testing::TestRng, TestBlockBuilder, TestBlockV1Builder};\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let block_header_v1 = TestBlockV1Builder::new()\n            .build_versioned(rng)\n            .clone_header();\n        bytesrepr::test_serialization_roundtrip(&block_header_v1);\n\n        let block_header_v2 = TestBlockBuilder::new().build_versioned(rng).clone_header();\n        bytesrepr::test_serialization_roundtrip(&block_header_v2);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_header_with_signatures.rs",
    "content": "use core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\nuse super::{BlockHash, BlockHeader, BlockSignatures};\nuse crate::EraId;\n#[cfg(doc)]\nuse crate::Signature;\n\n/// An error which can result from validating a [`BlockHeaderWithSignatures`].\n#[derive(Copy, Clone, Eq, PartialEq, Debug)]\n#[non_exhaustive]\npub enum BlockHeaderWithSignaturesValidationError {\n    /// Mismatch between block hash in [`BlockHeader`] and [`BlockSignatures`].\n    BlockHashMismatch {\n        /// The block hash in the `BlockHeader`.\n        block_hash_in_header: BlockHash,\n        /// The block hash in the `BlockSignatures`.\n        block_hash_in_signatures: BlockHash,\n    },\n    /// Mismatch between era ID in [`BlockHeader`] and [`BlockSignatures`].\n    EraIdMismatch {\n        /// The era ID in the `BlockHeader`.\n        era_id_in_header: EraId,\n        /// The era ID in the `BlockSignatures`.\n        era_id_in_signatures: EraId,\n    },\n}\n\nimpl Display for BlockHeaderWithSignaturesValidationError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            BlockHeaderWithSignaturesValidationError::BlockHashMismatch {\n                block_hash_in_header: expected,\n                block_hash_in_signatures: actual,\n            } => {\n                write!(\n                    formatter,\n                    \"block hash mismatch - header: {expected}, signatures: {actual}\",\n                )\n            }\n            BlockHeaderWithSignaturesValidationError::EraIdMismatch {\n                era_id_in_header: expected,\n                era_id_in_signatures: actual,\n            } => {\n                write!(\n                    formatter,\n                    \"era id mismatch - header: {expected}, signatures: {actual}\",\n                )\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for BlockHeaderWithSignaturesValidationError {}\n\n/// A block header and collection of signatures of a given block.\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct BlockHeaderWithSignatures {\n    block_header: BlockHeader,\n    block_signatures: BlockSignatures,\n}\n\nimpl BlockHeaderWithSignatures {\n    /// Returns a new `BlockHeaderWithSignatures`.\n    pub fn new(block_header: BlockHeader, block_signatures: BlockSignatures) -> Self {\n        BlockHeaderWithSignatures {\n            block_header,\n            block_signatures,\n        }\n    }\n\n    /// Returns the block header.\n    pub fn block_header(&self) -> &BlockHeader {\n        &self.block_header\n    }\n\n    /// Returns the block signatures.\n    pub fn block_signatures(&self) -> &BlockSignatures {\n        &self.block_signatures\n    }\n\n    /// Returns `Ok` if and only if the block hash and era ID in the `BlockHeader` are identical to\n    /// those in the `BlockSignatures`.\n    ///\n    /// Note that no cryptographic verification of the contained signatures is performed.  For this,\n    /// see [`BlockSignatures::is_verified`].\n    pub fn is_valid(&self) -> Result<(), BlockHeaderWithSignaturesValidationError> {\n        if self.block_header.block_hash() != *self.block_signatures.block_hash() {\n            return Err(\n                BlockHeaderWithSignaturesValidationError::BlockHashMismatch {\n                    block_hash_in_header: self.block_header.block_hash(),\n                    block_hash_in_signatures: *self.block_signatures.block_hash(),\n                },\n            );\n        }\n        if self.block_header.era_id() != self.block_signatures.era_id() {\n            return Err(BlockHeaderWithSignaturesValidationError::EraIdMismatch {\n                era_id_in_header: self.block_header.era_id(),\n                era_id_in_signatures: self.block_signatures.era_id(),\n            });\n        }\n        Ok(())\n    }\n\n    /// Sets the era ID contained in `block_signatures` to its max value, rendering it and hence\n    /// `self` invalid (assuming the relevant era ID for this `BlockHeaderWithSignatures` wasn't\n    /// already the max value).\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn invalidate_era(&mut self) {\n        self.block_signatures.invalidate_era()\n    }\n\n    /// Replaces the signature field of the last `block_signatures` entry with the `System` variant\n    /// of [`crate::crypto::Signature`], rendering that entry invalid.\n    ///\n    /// Note that [`Self::is_valid`] will be unaffected by this as it only checks for equality in\n    /// the block hash and era ID of the header and signatures; no cryptographic verification is\n    /// performed.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn invalidate_last_signature(&mut self) {\n        self.block_signatures.invalidate_last_signature()\n    }\n}\n\nimpl Display for BlockHeaderWithSignatures {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{}, and {}\", self.block_header, self.block_signatures)\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_identifier.rs",
    "content": "use alloc::vec::Vec;\nuse core::num::ParseIntError;\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    BlockHash, Digest, DigestError,\n};\n\nconst HASH_TAG: u8 = 0;\nconst HEIGHT_TAG: u8 = 1;\n\n/// Identifier for possible ways to retrieve a block.\n#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum BlockIdentifier {\n    /// Identify and retrieve the block with its hash.\n    Hash(BlockHash),\n    /// Identify and retrieve the block with its height.\n    Height(u64),\n}\n\nimpl BlockIdentifier {\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..1) {\n            0 => Self::Hash(BlockHash::random(rng)),\n            1 => Self::Height(rng.gen()),\n            _ => panic!(),\n        }\n    }\n}\n\nimpl FromBytes for BlockIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        match bytes.split_first() {\n            Some((&HASH_TAG, rem)) => {\n                let (hash, rem) = FromBytes::from_bytes(rem)?;\n                Ok((BlockIdentifier::Hash(hash), rem))\n            }\n            Some((&HEIGHT_TAG, rem)) => {\n                let (height, rem) = FromBytes::from_bytes(rem)?;\n                Ok((BlockIdentifier::Height(height), rem))\n            }\n            Some(_) | None => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl ToBytes for BlockIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            BlockIdentifier::Hash(hash) => {\n                writer.push(HASH_TAG);\n                hash.write_bytes(writer)?;\n            }\n            BlockIdentifier::Height(height) => {\n                writer.push(HEIGHT_TAG);\n                height.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                BlockIdentifier::Hash(hash) => hash.serialized_length(),\n                BlockIdentifier::Height(height) => height.serialized_length(),\n            }\n    }\n}\n\nimpl core::str::FromStr for BlockIdentifier {\n    type Err = ParseBlockIdentifierError;\n\n    fn from_str(maybe_block_identifier: &str) -> Result<Self, Self::Err> {\n        if maybe_block_identifier.is_empty() {\n            return Err(ParseBlockIdentifierError::EmptyString);\n        }\n\n        if maybe_block_identifier.len() == (Digest::LENGTH * 2) {\n            let hash = Digest::from_hex(maybe_block_identifier)\n                .map_err(ParseBlockIdentifierError::FromHexError)?;\n            Ok(BlockIdentifier::Hash(BlockHash::new(hash)))\n        } else {\n            let height = maybe_block_identifier\n                .parse()\n                .map_err(ParseBlockIdentifierError::ParseIntError)?;\n            Ok(BlockIdentifier::Height(height))\n        }\n    }\n}\n\n/// Represents errors that can arise when parsing a [`BlockIdentifier`].\n#[derive(Debug)]\n#[cfg_attr(feature = \"std\", derive(thiserror::Error))]\npub enum ParseBlockIdentifierError {\n    /// String was empty.\n    #[cfg_attr(\n        feature = \"std\",\n        error(\"Empty string is not a valid block identifier.\")\n    )]\n    EmptyString,\n    /// Couldn't parse a height value.\n    #[cfg_attr(feature = \"std\", error(\"Unable to parse height from string. {0}\"))]\n    ParseIntError(ParseIntError),\n    /// Couldn't parse a blake2bhash.\n    #[cfg_attr(feature = \"std\", error(\"Unable to parse digest from string. {0}\"))]\n    FromHexError(DigestError),\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BlockIdentifier::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_signatures/block_signatures_v1.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\nuse super::BlockHash;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    crypto, EraId, FinalitySignatureV1, PublicKey, Signature,\n};\n\n/// A collection of signatures for a single block, along with the associated block's hash and era\n/// ID.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct BlockSignaturesV1 {\n    /// The block hash.\n    pub(super) block_hash: BlockHash,\n    /// The era ID in which this block was created.\n    pub(super) era_id: EraId,\n    /// The proofs of the block, i.e. a collection of validators' signatures of the block hash.\n    pub(super) proofs: BTreeMap<PublicKey, Signature>,\n}\n\nimpl BlockSignaturesV1 {\n    /// Constructs a new `BlockSignaturesV1`.\n    pub fn new(block_hash: BlockHash, era_id: EraId) -> Self {\n        BlockSignaturesV1 {\n            block_hash,\n            era_id,\n            proofs: BTreeMap::new(),\n        }\n    }\n\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Returns the era id of the associated block.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the finality signature associated with the given public key, if available.\n    pub fn finality_signature(&self, public_key: &PublicKey) -> Option<FinalitySignatureV1> {\n        self.proofs\n            .get(public_key)\n            .map(|signature| FinalitySignatureV1 {\n                block_hash: self.block_hash,\n                era_id: self.era_id,\n                signature: *signature,\n                public_key: public_key.clone(),\n                #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: Default::default(),\n            })\n    }\n\n    /// Returns `true` if there is a signature associated with the given public key.\n    pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool {\n        self.proofs.contains_key(public_key)\n    }\n\n    /// Returns an iterator over all the signatures.\n    pub fn finality_signatures(&self) -> impl Iterator<Item = FinalitySignatureV1> + '_ {\n        self.proofs\n            .iter()\n            .map(move |(public_key, signature)| FinalitySignatureV1 {\n                block_hash: self.block_hash,\n                era_id: self.era_id,\n                signature: *signature,\n                public_key: public_key.clone(),\n                #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: Default::default(),\n            })\n    }\n\n    /// Returns an iterator over all the validator public keys.\n    pub fn signers(&self) -> impl Iterator<Item = &'_ PublicKey> + '_ {\n        self.proofs.keys()\n    }\n\n    /// Returns the number of signatures in the collection.\n    pub fn len(&self) -> usize {\n        self.proofs.len()\n    }\n\n    /// Returns `true` if there are no signatures in the collection.\n    pub fn is_empty(&self) -> bool {\n        self.proofs.is_empty()\n    }\n\n    /// Inserts a new signature.\n    pub fn insert_signature(&mut self, public_key: PublicKey, signature: Signature) {\n        let _ = self.proofs.insert(public_key, signature);\n    }\n\n    /// Returns `Ok` if and only if all the signatures are cryptographically valid.\n    pub fn is_verified(&self) -> Result<(), crypto::Error> {\n        for (public_key, signature) in self.proofs.iter() {\n            let signature = FinalitySignatureV1 {\n                block_hash: self.block_hash,\n                era_id: self.era_id,\n                signature: *signature,\n                public_key: public_key.clone(),\n                #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: Default::default(),\n            };\n            signature.is_verified()?;\n        }\n        Ok(())\n    }\n\n    /// Returns a random `BlockSignaturesV1`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let block_hash = BlockHash::random(rng);\n        let era_id = EraId::random(rng);\n        let proofs = (0..rng.gen_range(0..10))\n            .map(|_| {\n                let public_key = PublicKey::random(rng);\n                let bytes = std::array::from_fn(|_| rng.gen());\n                let signature = Signature::ed25519(bytes).unwrap();\n                (public_key, signature)\n            })\n            .collect();\n        Self {\n            block_hash,\n            era_id,\n            proofs,\n        }\n    }\n}\n\nimpl Display for BlockSignaturesV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"block signatures for {} in {} with {} proofs\",\n            self.block_hash,\n            self.era_id,\n            self.proofs.len()\n        )\n    }\n}\n\nimpl ToBytes for BlockSignaturesV1 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buf = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buf)?;\n        Ok(buf)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.proofs.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length()\n            + self.era_id.serialized_length()\n            + self.proofs.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockSignaturesV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (era_id, remainder) = EraId::from_bytes(remainder)?;\n        let (proofs, remainder) = BTreeMap::<PublicKey, Signature>::from_bytes(remainder)?;\n        Ok((\n            Self {\n                block_hash,\n                era_id,\n                proofs,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = BlockSignaturesV1::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_signatures/block_signatures_v2.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    crypto, BlockHash, ChainNameDigest, EraId, FinalitySignatureV2, PublicKey, Signature,\n};\n\n/// A collection of signatures for a single block, along with the associated block's hash and era\n/// ID.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct BlockSignaturesV2 {\n    /// The block hash.\n    pub(super) block_hash: BlockHash,\n    /// The block height.\n    pub(super) block_height: u64,\n    /// The era ID in which this block was created.\n    pub(super) era_id: EraId,\n    /// The hash of the chain name of the associated block.\n    pub(super) chain_name_hash: ChainNameDigest,\n    /// The proofs of the block, i.e. a collection of validators' signatures of the block hash.\n    pub(super) proofs: BTreeMap<PublicKey, Signature>,\n}\n\nimpl BlockSignaturesV2 {\n    /// Constructs a new `BlockSignaturesV2`.\n    pub fn new(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        chain_name_hash: ChainNameDigest,\n    ) -> Self {\n        BlockSignaturesV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            proofs: BTreeMap::new(),\n        }\n    }\n\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Returns the block height of the associated block.\n    pub fn block_height(&self) -> u64 {\n        self.block_height\n    }\n\n    /// Returns the era id of the associated block.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the chain name hash of the associated block.\n    pub fn chain_name_hash(&self) -> ChainNameDigest {\n        self.chain_name_hash\n    }\n\n    /// Returns the finality signature associated with the given public key, if available.\n    pub fn finality_signature(&self, public_key: &PublicKey) -> Option<FinalitySignatureV2> {\n        self.proofs\n            .get(public_key)\n            .map(|signature| FinalitySignatureV2 {\n                block_hash: self.block_hash,\n                block_height: self.block_height,\n                era_id: self.era_id,\n                chain_name_hash: self.chain_name_hash,\n                signature: *signature,\n                public_key: public_key.clone(),\n                #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: Default::default(),\n            })\n    }\n\n    /// Returns `true` if there is a signature associated with the given public key.\n    pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool {\n        self.proofs.contains_key(public_key)\n    }\n\n    /// Returns an iterator over all the signatures.\n    pub fn finality_signatures(&self) -> impl Iterator<Item = FinalitySignatureV2> + '_ {\n        self.proofs\n            .iter()\n            .map(move |(public_key, signature)| FinalitySignatureV2 {\n                block_hash: self.block_hash,\n                block_height: self.block_height,\n                era_id: self.era_id,\n                chain_name_hash: self.chain_name_hash,\n                signature: *signature,\n                public_key: public_key.clone(),\n                #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: Default::default(),\n            })\n    }\n\n    /// Returns an iterator over all the validator public keys.\n    pub fn signers(&self) -> impl Iterator<Item = &'_ PublicKey> + '_ {\n        self.proofs.keys()\n    }\n\n    /// Returns the number of signatures in the collection.\n    pub fn len(&self) -> usize {\n        self.proofs.len()\n    }\n\n    /// Returns `true` if there are no signatures in the collection.\n    pub fn is_empty(&self) -> bool {\n        self.proofs.is_empty()\n    }\n\n    /// Inserts a new signature.\n    pub fn insert_signature(&mut self, public_key: PublicKey, signature: Signature) {\n        let _ = self.proofs.insert(public_key, signature);\n    }\n\n    /// Returns `Ok` if and only if all the signatures are cryptographically valid.\n    pub fn is_verified(&self) -> Result<(), crypto::Error> {\n        for (public_key, signature) in self.proofs.iter() {\n            let signature = FinalitySignatureV2 {\n                block_hash: self.block_hash,\n                block_height: self.block_height,\n                era_id: self.era_id,\n                chain_name_hash: self.chain_name_hash,\n                signature: *signature,\n                public_key: public_key.clone(),\n                #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: Default::default(),\n            };\n            signature.is_verified()?;\n        }\n        Ok(())\n    }\n\n    /// Returns a random `BlockSignaturesV2`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let era_id = EraId::random(rng);\n        let chain_name_hash = ChainNameDigest::random(rng);\n        let proofs = (0..rng.gen_range(0..10))\n            .map(|_| {\n                let public_key = PublicKey::random(rng);\n                let bytes = std::array::from_fn(|_| rng.gen());\n                let signature = Signature::ed25519(bytes).unwrap();\n                (public_key, signature)\n            })\n            .collect();\n        Self {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            proofs,\n        }\n    }\n}\n\nimpl Display for BlockSignaturesV2 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"block signatures for {} in {} with {} proofs\",\n            self.block_hash,\n            self.era_id,\n            self.proofs.len()\n        )\n    }\n}\n\nimpl ToBytes for BlockSignaturesV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buf = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buf)?;\n        Ok(buf)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(writer)?;\n        self.block_height.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.chain_name_hash.write_bytes(writer)?;\n        self.proofs.write_bytes(writer)?;\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length()\n            + self.block_height.serialized_length()\n            + self.era_id.serialized_length()\n            + self.chain_name_hash.serialized_length()\n            + self.proofs.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockSignaturesV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (block_height, remainder) = u64::from_bytes(remainder)?;\n        let (era_id, remainder) = EraId::from_bytes(remainder)?;\n        let (chain_name_hash, remainder) = ChainNameDigest::from_bytes(remainder)?;\n        let (proofs, remainder) = BTreeMap::<PublicKey, Signature>::from_bytes(remainder)?;\n        Ok((\n            Self {\n                block_hash,\n                block_height,\n                era_id,\n                chain_name_hash,\n                proofs,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = BlockSignaturesV2::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_signatures.rs",
    "content": "mod block_signatures_v1;\nmod block_signatures_v2;\n\npub use block_signatures_v1::BlockSignaturesV1;\npub use block_signatures_v2::BlockSignaturesV2;\n\nuse alloc::{collections::BTreeMap, vec::Vec};\nuse core::{\n    fmt::{self, Display, Formatter},\n    hash::Hash,\n};\nuse itertools::Either;\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    crypto, BlockHash, ChainNameDigest, EraId, FinalitySignature, PublicKey, Signature,\n};\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for block signatures v1.\npub const BLOCK_SIGNATURES_V1_TAG: u8 = 0;\n/// Tag for block signatures v2.\npub const BLOCK_SIGNATURES_V2_TAG: u8 = 1;\n\n/// A collection of signatures for a single block, along with the associated block's hash and era\n/// ID.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum BlockSignatures {\n    /// Version 1 of the block signatures.\n    V1(BlockSignaturesV1),\n    /// Version 2 of the block signatures.\n    V2(BlockSignaturesV2),\n}\n\nimpl BlockSignatures {\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.block_hash(),\n            BlockSignatures::V2(block_signatures) => block_signatures.block_hash(),\n        }\n    }\n\n    /// Returns the era id of the associated block.\n    pub fn era_id(&self) -> EraId {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.era_id(),\n            BlockSignatures::V2(block_signatures) => block_signatures.era_id(),\n        }\n    }\n\n    /// Returns the finality signature associated with the given public key, if available.\n    pub fn finality_signature(&self, public_key: &PublicKey) -> Option<FinalitySignature> {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures\n                .finality_signature(public_key)\n                .map(FinalitySignature::V1),\n            BlockSignatures::V2(block_signatures) => block_signatures\n                .finality_signature(public_key)\n                .map(FinalitySignature::V2),\n        }\n    }\n\n    /// Returns `true` if there is a signature associated with the given public key.\n    pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool {\n        match self {\n            BlockSignatures::V1(block_signatures) => {\n                block_signatures.has_finality_signature(public_key)\n            }\n            BlockSignatures::V2(block_signatures) => {\n                block_signatures.has_finality_signature(public_key)\n            }\n        }\n    }\n\n    /// Returns an iterator over all the signatures.\n    pub fn finality_signatures(&self) -> impl Iterator<Item = FinalitySignature> + '_ {\n        match self {\n            BlockSignatures::V1(block_signatures) => Either::Left(\n                block_signatures\n                    .finality_signatures()\n                    .map(FinalitySignature::V1),\n            ),\n            BlockSignatures::V2(block_signatures) => Either::Right(\n                block_signatures\n                    .finality_signatures()\n                    .map(FinalitySignature::V2),\n            ),\n        }\n    }\n\n    /// Returns an `BTreeMap` of public keys to signatures.\n    pub fn proofs(&self) -> &BTreeMap<PublicKey, Signature> {\n        match self {\n            BlockSignatures::V1(block_signatures) => &block_signatures.proofs,\n            BlockSignatures::V2(block_signatures) => &block_signatures.proofs,\n        }\n    }\n\n    /// Returns an iterator over all the validator public keys.\n    pub fn signers(&self) -> impl Iterator<Item = &'_ PublicKey> + '_ {\n        match self {\n            BlockSignatures::V1(block_signatures) => Either::Left(block_signatures.signers()),\n            BlockSignatures::V2(block_signatures) => Either::Right(block_signatures.signers()),\n        }\n    }\n\n    /// Returns the number of signatures in the collection.\n    pub fn len(&self) -> usize {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.len(),\n            BlockSignatures::V2(block_signatures) => block_signatures.len(),\n        }\n    }\n\n    /// Returns `true` if there are no signatures in the collection.\n    pub fn is_empty(&self) -> bool {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.is_empty(),\n            BlockSignatures::V2(block_signatures) => block_signatures.is_empty(),\n        }\n    }\n\n    /// Merges the collection of signatures in `other` into `self`.\n    ///\n    /// Returns an error if the block hashes, block heights, era IDs, or chain name hashes do not\n    /// match.\n    pub fn merge(&mut self, mut other: Self) -> Result<(), BlockSignaturesMergeError> {\n        if self.block_hash() != other.block_hash() {\n            return Err(BlockSignaturesMergeError::BlockHashMismatch {\n                self_hash: *self.block_hash(),\n                other_hash: *other.block_hash(),\n            });\n        }\n\n        if self.era_id() != other.era_id() {\n            return Err(BlockSignaturesMergeError::EraIdMismatch {\n                self_era_id: self.era_id(),\n                other_era_id: other.era_id(),\n            });\n        }\n\n        match (self, &mut other) {\n            (BlockSignatures::V1(self_), BlockSignatures::V1(other)) => {\n                self_.proofs.append(&mut other.proofs);\n            }\n            (BlockSignatures::V2(self_), BlockSignatures::V2(other)) => {\n                if self_.block_height != other.block_height {\n                    return Err(BlockSignaturesMergeError::BlockHeightMismatch {\n                        self_height: self_.block_height,\n                        other_height: other.block_height,\n                    });\n                }\n\n                if self_.chain_name_hash != other.chain_name_hash {\n                    return Err(BlockSignaturesMergeError::ChainNameHashMismatch {\n                        self_chain_name_hash: self_.chain_name_hash,\n                        other_chain_name_hash: other.chain_name_hash,\n                    });\n                }\n\n                self_.proofs.append(&mut other.proofs);\n            }\n            _ => return Err(BlockSignaturesMergeError::VersionMismatch),\n        }\n\n        Ok(())\n    }\n\n    /// Returns `Ok` if and only if all the signatures are cryptographically valid.\n    pub fn is_verified(&self) -> Result<(), crypto::Error> {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.is_verified(),\n            BlockSignatures::V2(block_signatures) => block_signatures.is_verified(),\n        }\n    }\n\n    /// Converts self into a `BTreeMap` of public keys to signatures.\n    pub fn into_proofs(self) -> BTreeMap<PublicKey, Signature> {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.proofs,\n            BlockSignatures::V2(block_signatures) => block_signatures.proofs,\n        }\n    }\n\n    /// Inserts a new signature.\n    pub fn insert_signature(&mut self, public_key: PublicKey, signature: Signature) {\n        match self {\n            BlockSignatures::V1(block_signatures) => {\n                block_signatures.insert_signature(public_key, signature)\n            }\n            BlockSignatures::V2(block_signatures) => {\n                block_signatures.insert_signature(public_key, signature)\n            }\n        }\n    }\n\n    /// Removes a signature corresponding to the specified key.\n    pub fn remove_signature(&mut self, public_key: &PublicKey) -> Option<Signature> {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.proofs.remove(public_key),\n            BlockSignatures::V2(block_signatures) => block_signatures.proofs.remove(public_key),\n        }\n    }\n\n    /// Sets the era ID to its max value, rendering it and hence `self` invalid (assuming the\n    /// relevant era ID for this `BlockHeaderWithSignatures` wasn't already the max value).\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn invalidate_era(&mut self) {\n        match self {\n            BlockSignatures::V1(block_signatures) => block_signatures.era_id = EraId::new(u64::MAX),\n            BlockSignatures::V2(block_signatures) => block_signatures.era_id = EraId::new(u64::MAX),\n        }\n    }\n\n    /// Replaces the signature field of the last `proofs` entry with the `System` variant\n    /// of [`Signature`], rendering that entry invalid.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn invalidate_last_signature(&mut self) {\n        let proofs = match self {\n            BlockSignatures::V1(block_signatures) => &mut block_signatures.proofs,\n            BlockSignatures::V2(block_signatures) => &mut block_signatures.proofs,\n        };\n        let last_proof = proofs\n            .last_entry()\n            .expect(\"should have at least one signature\");\n        *last_proof.into_mut() = Signature::System;\n    }\n\n    /// Returns a random `BlockSignatures`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        if rng.gen() {\n            BlockSignatures::V1(BlockSignaturesV1::random(rng))\n        } else {\n            BlockSignatures::V2(BlockSignaturesV2::random(rng))\n        }\n    }\n}\n\nimpl Display for BlockSignatures {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            BlockSignatures::V1(block_signatures) => write!(formatter, \"{}\", block_signatures),\n            BlockSignatures::V2(block_signatures) => write!(formatter, \"{}\", block_signatures),\n        }\n    }\n}\n\nimpl From<BlockSignaturesV1> for BlockSignatures {\n    fn from(block_signatures: BlockSignaturesV1) -> Self {\n        BlockSignatures::V1(block_signatures)\n    }\n}\n\nimpl From<BlockSignaturesV2> for BlockSignatures {\n    fn from(block_signatures: BlockSignaturesV2) -> Self {\n        BlockSignatures::V2(block_signatures)\n    }\n}\n\nimpl ToBytes for BlockSignatures {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buf = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buf)?;\n        Ok(buf)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            BlockSignatures::V1(block_signatures) => {\n                writer.push(BLOCK_SIGNATURES_V1_TAG);\n                block_signatures.write_bytes(writer)?;\n            }\n            BlockSignatures::V2(block_signatures) => {\n                writer.push(BLOCK_SIGNATURES_V2_TAG);\n                block_signatures.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                BlockSignatures::V1(block_signatures) => block_signatures.serialized_length(),\n                BlockSignatures::V2(block_signatures) => block_signatures.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for BlockSignatures {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            BLOCK_SIGNATURES_V1_TAG => {\n                let (block_signatures, remainder) = BlockSignaturesV1::from_bytes(remainder)?;\n                Ok((BlockSignatures::V1(block_signatures), remainder))\n            }\n            BLOCK_SIGNATURES_V2_TAG => {\n                let (block_signatures, remainder) = BlockSignaturesV2::from_bytes(remainder)?;\n                Ok((BlockSignatures::V2(block_signatures), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n/// An error returned during an attempt to merge two incompatible [`BlockSignaturesV1`].\n#[derive(Copy, Clone, Eq, PartialEq, Debug)]\n#[non_exhaustive]\npub enum BlockSignaturesMergeError {\n    /// A mismatch between block hashes.\n    BlockHashMismatch {\n        /// The `self` hash.\n        self_hash: BlockHash,\n        /// The `other` hash.\n        other_hash: BlockHash,\n    },\n    /// A mismatch between block heights.\n    BlockHeightMismatch {\n        /// The `self` height.\n        self_height: u64,\n        /// The `other` height.\n        other_height: u64,\n    },\n    /// A mismatch between era IDs.\n    EraIdMismatch {\n        /// The `self` era ID.\n        self_era_id: EraId,\n        /// The `other` era ID.\n        other_era_id: EraId,\n    },\n    /// A mismatch between chain name hashes.\n    ChainNameHashMismatch {\n        /// The `self` chain name hash.\n        self_chain_name_hash: ChainNameDigest,\n        /// The `other` chain name hash.\n        other_chain_name_hash: ChainNameDigest,\n    },\n    /// A mismatch between the versions of the block signatures.\n    VersionMismatch,\n}\n\nimpl Display for BlockSignaturesMergeError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            BlockSignaturesMergeError::BlockHashMismatch {\n                self_hash,\n                other_hash,\n            } => {\n                write!(\n                    formatter,\n                    \"mismatch between block hashes while merging block signatures - self: {}, \\\n                    other: {}\",\n                    self_hash, other_hash\n                )\n            }\n            BlockSignaturesMergeError::BlockHeightMismatch {\n                self_height,\n                other_height,\n            } => {\n                write!(\n                    formatter,\n                    \"mismatch between block heights while merging block signatures - self: {}, \\\n                    other: {}\",\n                    self_height, other_height\n                )\n            }\n            BlockSignaturesMergeError::EraIdMismatch {\n                self_era_id,\n                other_era_id,\n            } => {\n                write!(\n                    formatter,\n                    \"mismatch between era ids while merging block signatures - self: {}, other: \\\n                    {}\",\n                    self_era_id, other_era_id\n                )\n            }\n            BlockSignaturesMergeError::ChainNameHashMismatch {\n                self_chain_name_hash,\n                other_chain_name_hash,\n            } => {\n                write!(\n                    formatter,\n                    \"mismatch between chain name hashes while merging block signatures - self: {}, \\\n                    other: {}\",\n                    self_chain_name_hash, other_chain_name_hash\n                )\n            }\n            BlockSignaturesMergeError::VersionMismatch => {\n                write!(\n                    formatter,\n                    \"mismatch between versions of block signatures while merging\"\n                )\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for BlockSignaturesMergeError {}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = BlockSignatures::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_sync_status.rs",
    "content": "use alloc::{string::String, vec::Vec};\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    BlockHash,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\n#[cfg(feature = \"json-schema\")]\nstatic BLOCK_SYNCHRONIZER_STATUS: Lazy<BlockSynchronizerStatus> = Lazy::new(|| {\n    use crate::Digest;\n\n    BlockSynchronizerStatus::new(\n        Some(BlockSyncStatus {\n            block_hash: BlockHash::new(\n                Digest::from_hex(\n                    \"16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e\",\n                )\n                .unwrap(),\n            ),\n            block_height: Some(40),\n            acquisition_state: \"have strict finality(40) for: block hash 16dd..c55e\".to_string(),\n        }),\n        Some(BlockSyncStatus {\n            block_hash: BlockHash::new(\n                Digest::from_hex(\n                    \"59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983\",\n                )\n                .unwrap(),\n            ),\n            block_height: Some(6701),\n            acquisition_state: \"have block body(6701) for: block hash 5990..4983\".to_string(),\n        }),\n    )\n});\n\n/// The status of syncing an individual block.\n#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct BlockSyncStatus {\n    /// The block hash.\n    block_hash: BlockHash,\n    /// The height of the block, if known.\n    block_height: Option<u64>,\n    /// The state of acquisition of the data associated with the block.\n    acquisition_state: String,\n}\n\nimpl BlockSyncStatus {\n    /// Constructs a new `BlockSyncStatus`.\n    pub fn new(\n        block_hash: BlockHash,\n        block_height: Option<u64>,\n        acquisition_state: String,\n    ) -> Self {\n        Self {\n            block_hash,\n            block_height,\n            acquisition_state,\n        }\n    }\n\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self {\n            block_hash: BlockHash::random(rng),\n            block_height: rng.gen::<bool>().then_some(rng.gen()),\n            acquisition_state: rng.random_string(10..20),\n        }\n    }\n}\n\nimpl ToBytes for BlockSyncStatus {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(writer)?;\n        self.block_height.write_bytes(writer)?;\n        self.acquisition_state.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length()\n            + self.block_height.serialized_length()\n            + self.acquisition_state.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockSyncStatus {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (block_height, remainder) = Option::<u64>::from_bytes(remainder)?;\n        let (acquisition_state, remainder) = String::from_bytes(remainder)?;\n        Ok((\n            BlockSyncStatus {\n                block_hash,\n                block_height,\n                acquisition_state,\n            },\n            remainder,\n        ))\n    }\n}\n\n/// The status of the block synchronizer.\n#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct BlockSynchronizerStatus {\n    /// The status of syncing a historical block, if any.\n    historical: Option<BlockSyncStatus>,\n    /// The status of syncing a forward block, if any.\n    forward: Option<BlockSyncStatus>,\n}\n\nimpl BlockSynchronizerStatus {\n    /// Constructs a new `BlockSynchronizerStatus`.\n    pub fn new(historical: Option<BlockSyncStatus>, forward: Option<BlockSyncStatus>) -> Self {\n        Self {\n            historical,\n            forward,\n        }\n    }\n\n    /// Returns an example `BlockSynchronizerStatus`.\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &BLOCK_SYNCHRONIZER_STATUS\n    }\n\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let historical = rng.gen::<bool>().then_some(BlockSyncStatus::random(rng));\n        let forward = rng.gen::<bool>().then_some(BlockSyncStatus::random(rng));\n        Self {\n            historical,\n            forward,\n        }\n    }\n\n    /// Returns status of the historical block sync.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn historical(&self) -> &Option<BlockSyncStatus> {\n        &self.historical\n    }\n\n    /// Returns status of the forward block sync.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn forward(&self) -> &Option<BlockSyncStatus> {\n        &self.forward\n    }\n}\n\nimpl ToBytes for BlockSynchronizerStatus {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.historical.write_bytes(writer)?;\n        self.forward.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.historical.serialized_length() + self.forward.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockSynchronizerStatus {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (historical, remainder) = Option::<BlockSyncStatus>::from_bytes(bytes)?;\n        let (forward, remainder) = Option::<BlockSyncStatus>::from_bytes(remainder)?;\n        Ok((\n            BlockSynchronizerStatus {\n                historical,\n                forward,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = BlockSyncStatus::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_v1.rs",
    "content": "#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse alloc::collections::BTreeMap;\nuse alloc::{boxed::Box, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse core::iter;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse rand::Rng;\n\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::U512;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Block, BlockBodyV1, BlockHash, BlockHeaderV1, BlockValidationError, DeployHash, Digest,\n    EraEndV1, EraId, ProtocolVersion, PublicKey, Timestamp,\n};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::{testing::TestRng, EraReport};\n\n/// A block after execution, with the resulting global state root hash. This is the core component\n/// of the Casper linear blockchain. Version 1.\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockV1 {\n    /// The block hash identifying this block.\n    pub(super) hash: BlockHash,\n    /// The header portion of the block.\n    pub(super) header: BlockHeaderV1,\n    /// The body portion of the block.\n    pub(super) body: BlockBodyV1,\n}\n\nimpl BlockV1 {\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        parent_hash: BlockHash,\n        parent_seed: Digest,\n        state_root_hash: Digest,\n        random_bit: bool,\n        era_end: Option<EraEndV1>,\n        timestamp: Timestamp,\n        era_id: EraId,\n        height: u64,\n        protocol_version: ProtocolVersion,\n        proposer: PublicKey,\n        deploy_hashes: Vec<DeployHash>,\n        transfer_hashes: Vec<DeployHash>,\n    ) -> Self {\n        let body = BlockBodyV1::new(proposer, deploy_hashes, transfer_hashes);\n        let body_hash = body.hash();\n        let accumulated_seed = Digest::hash_pair(parent_seed, [random_bit as u8]);\n        let header = BlockHeaderV1::new(\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            #[cfg(any(feature = \"once_cell\", test))]\n            OnceCell::new(),\n        );\n        Self::new_from_header_and_body(header, body)\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn new_from_header_and_body(header: BlockHeaderV1, body: BlockBodyV1) -> Self {\n        let hash = header.block_hash();\n        BlockV1 { hash, header, body }\n    }\n\n    /// Returns the `BlockHash` identifying this block.\n    pub fn hash(&self) -> &BlockHash {\n        &self.hash\n    }\n\n    /// Returns the block's header.\n    pub fn header(&self) -> &BlockHeaderV1 {\n        &self.header\n    }\n\n    /// Returns the block's header, consuming `self`.\n    pub fn take_header(self) -> BlockHeaderV1 {\n        self.header\n    }\n\n    /// Returns the block's body.\n    pub fn body(&self) -> &BlockBodyV1 {\n        &self.body\n    }\n\n    /// Returns the block's body, consuming `self`.\n    pub fn take_body(self) -> BlockBodyV1 {\n        self.body\n    }\n\n    /// Returns the parent block's hash.\n    pub fn parent_hash(&self) -> &BlockHash {\n        self.header.parent_hash()\n    }\n\n    /// Returns the root hash of global state after the deploys in this block have been executed.\n    pub fn state_root_hash(&self) -> &Digest {\n        self.header.state_root_hash()\n    }\n\n    /// Returns the hash of the block's body.\n    pub fn body_hash(&self) -> &Digest {\n        self.header.body_hash()\n    }\n\n    /// Returns a random bit needed for initializing a future era.\n    pub fn random_bit(&self) -> bool {\n        self.header.random_bit()\n    }\n\n    /// Returns a seed needed for initializing a future era.\n    pub fn accumulated_seed(&self) -> &Digest {\n        self.header.accumulated_seed()\n    }\n\n    /// Returns the `EraEnd` of a block if it is a switch block.\n    pub fn era_end(&self) -> Option<&EraEndV1> {\n        self.header.era_end()\n    }\n\n    /// Returns the timestamp from when the block was proposed.\n    pub fn timestamp(&self) -> Timestamp {\n        self.header.timestamp()\n    }\n\n    /// Returns the era ID in which this block was created.\n    pub fn era_id(&self) -> EraId {\n        self.header.era_id()\n    }\n\n    /// Returns the height of this block, i.e. the number of ancestors.\n    pub fn height(&self) -> u64 {\n        self.header.height()\n    }\n\n    /// Returns the protocol version of the network from when this block was created.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.header.protocol_version()\n    }\n\n    /// Returns `true` if this block is the last one in the current era.\n    pub fn is_switch_block(&self) -> bool {\n        self.header.is_switch_block()\n    }\n\n    /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0.\n    pub fn is_genesis(&self) -> bool {\n        self.header.is_genesis()\n    }\n\n    /// Returns the public key of the validator which proposed the block.\n    pub fn proposer(&self) -> &PublicKey {\n        self.body.proposer()\n    }\n\n    /// Returns the deploy hashes within the block.\n    pub fn deploy_hashes(&self) -> &[DeployHash] {\n        self.body.deploy_hashes()\n    }\n\n    /// Returns the transfer hashes within the block.\n    pub fn transfer_hashes(&self) -> &[DeployHash] {\n        self.body.transfer_hashes()\n    }\n\n    /// Returns the deploy and transfer hashes in the order in which they were executed.\n    pub fn deploy_and_transfer_hashes(&self) -> impl Iterator<Item = &DeployHash> {\n        self.deploy_hashes()\n            .iter()\n            .chain(self.transfer_hashes().iter())\n    }\n\n    /// Returns `Ok` if and only if the block's provided block hash and body hash are identical to\n    /// those generated by hashing the appropriate input data.\n    pub fn verify(&self) -> Result<(), BlockValidationError> {\n        let actual_block_header_hash = self.header().block_hash();\n        if *self.hash() != actual_block_header_hash {\n            return Err(BlockValidationError::UnexpectedBlockHash {\n                block: Box::new(Block::V1(self.clone())),\n                actual_block_hash: actual_block_header_hash,\n            });\n        }\n\n        let actual_block_body_hash = self.body.hash();\n        if *self.header.body_hash() != actual_block_body_hash {\n            return Err(BlockValidationError::UnexpectedBodyHash {\n                block: Box::new(Block::V1(self.clone())),\n                actual_block_body_hash,\n            });\n        }\n\n        Ok(())\n    }\n\n    /// Returns a random block, but using the provided values.\n    ///\n    /// If `deploy_hashes_iter` is empty, a few random deploy hashes will be added to the\n    /// `deploy_hashes` and `transfer_hashes` fields of the body.  Otherwise, the provided deploy\n    /// hashes will populate the `deploy_hashes` field and `transfer_hashes` will be empty.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_specifics<I: IntoIterator<Item = DeployHash>>(\n        rng: &mut TestRng,\n        era_id: EraId,\n        height: u64,\n        protocol_version: ProtocolVersion,\n        is_switch: bool,\n        deploy_hashes_iter: I,\n    ) -> Self {\n        let parent_hash = BlockHash::random(rng);\n        let parent_seed = Digest::random(rng);\n        let state_root_hash = Digest::random(rng);\n        let random_bit = rng.gen();\n        let era_end = is_switch.then(|| {\n            let mut next_era_validator_weights = BTreeMap::new();\n            for i in 1_u64..6 {\n                let _ = next_era_validator_weights.insert(PublicKey::random(rng), U512::from(i));\n            }\n            EraEndV1::new(EraReport::random(rng), next_era_validator_weights)\n        });\n        let timestamp = Timestamp::now();\n        let proposer = PublicKey::random(rng);\n        let mut deploy_hashes: Vec<DeployHash> = deploy_hashes_iter.into_iter().collect();\n        let mut transfer_hashes: Vec<DeployHash> = vec![];\n        if deploy_hashes.is_empty() {\n            let count = rng.gen_range(0..6);\n            deploy_hashes = iter::repeat_with(|| DeployHash::random(rng))\n                .take(count)\n                .collect();\n            let count = rng.gen_range(0..6);\n            transfer_hashes = iter::repeat_with(|| DeployHash::random(rng))\n                .take(count)\n                .collect();\n        }\n\n        BlockV1::new(\n            parent_hash,\n            parent_seed,\n            state_root_hash,\n            random_bit,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            deploy_hashes,\n            transfer_hashes,\n        )\n    }\n}\n\nimpl Display for BlockV1 {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \\\n            {}, random bit {}, protocol version: {}\",\n            self.height(),\n            self.hash(),\n            self.timestamp(),\n            self.era_id(),\n            self.parent_hash().inner(),\n            self.state_root_hash(),\n            self.body_hash(),\n            self.random_bit(),\n            self.protocol_version()\n        )?;\n        if let Some(era_end) = self.era_end() {\n            write!(formatter, \", era_end: {}\", era_end)?;\n        }\n        Ok(())\n    }\n}\n\nimpl ToBytes for BlockV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.hash.write_bytes(writer)?;\n        self.header.write_bytes(writer)?;\n        self.body.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.hash.serialized_length()\n            + self.header.serialized_length()\n            + self.body.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (header, remainder) = BlockHeaderV1::from_bytes(remainder)?;\n        let (body, remainder) = BlockBodyV1::from_bytes(remainder)?;\n        let block = BlockV1 { hash, header, body };\n        Ok((block, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{Block, TestBlockV1Builder};\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let block = TestBlockV1Builder::new().build(rng);\n        bytesrepr::test_serialization_roundtrip(&block);\n    }\n\n    #[test]\n    fn block_check_bad_body_hash_sad_path() {\n        let rng = &mut TestRng::new();\n\n        let mut block = TestBlockV1Builder::new().build(rng);\n        let bogus_block_body_hash = Digest::hash([0xde, 0xad, 0xbe, 0xef]);\n        block.header.set_body_hash(bogus_block_body_hash);\n        block.hash = block.header.block_hash();\n\n        let expected_error = BlockValidationError::UnexpectedBodyHash {\n            block: Box::new(Block::V1(block.clone())),\n            actual_block_body_hash: block.body.hash(),\n        };\n        assert_eq!(block.verify(), Err(expected_error));\n    }\n\n    #[test]\n    fn block_check_bad_block_hash_sad_path() {\n        let rng = &mut TestRng::new();\n\n        let mut block = TestBlockV1Builder::new().build(rng);\n        let bogus_block_hash = BlockHash::from(Digest::hash([0xde, 0xad, 0xbe, 0xef]));\n        block.hash = bogus_block_hash;\n\n        let expected_error = BlockValidationError::UnexpectedBlockHash {\n            block: Box::new(Block::V1(block.clone())),\n            actual_block_hash: block.header.block_hash(),\n        };\n        assert_eq!(block.verify(), Err(expected_error));\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_v2.rs",
    "content": "use alloc::{boxed::Box, collections::BTreeMap, vec::Vec};\n\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Display, Formatter},\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n\nuse super::{Block, BlockBodyV2, BlockConversionError, RewardedSignatures};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    transaction::TransactionHash,\n    BlockHash, BlockHeaderV2, BlockValidationError, Digest, EraEndV2, EraId, ProtocolVersion,\n    PublicKey, Timestamp,\n};\n#[cfg(feature = \"json-schema\")]\nuse crate::{TransactionV1Hash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID};\n\n#[cfg(feature = \"json-schema\")]\nstatic BLOCK_V2: Lazy<BlockV2> = Lazy::new(|| {\n    let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH]));\n    let parent_seed = Digest::from([9; Digest::LENGTH]);\n    let state_root_hash = Digest::from([8; Digest::LENGTH]);\n    let random_bit = true;\n    let era_end = Some(EraEndV2::example().clone());\n    let timestamp = *Timestamp::example();\n    let era_id = EraId::from(1);\n    let height = 10;\n    let protocol_version = ProtocolVersion::V1_0_0;\n    let secret_key = crate::SecretKey::example();\n    let proposer = PublicKey::from(secret_key);\n    let mint_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(Digest::from(\n        [20; Digest::LENGTH],\n    )))];\n    let auction_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(Digest::from(\n        [21; Digest::LENGTH],\n    )))];\n    let installer_upgrader_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(\n        Digest::from([22; Digest::LENGTH]),\n    ))];\n    let transactions = {\n        let mut ret = BTreeMap::new();\n        ret.insert(MINT_LANE_ID, mint_hashes);\n        ret.insert(AUCTION_LANE_ID, auction_hashes);\n        ret.insert(INSTALL_UPGRADE_LANE_ID, installer_upgrader_hashes);\n        ret\n    };\n    let rewarded_signatures = RewardedSignatures::default();\n    let current_gas_price = 1u8;\n    let last_switch_block_hash = BlockHash::new(Digest::from([10; Digest::LENGTH]));\n    BlockV2::new(\n        parent_hash,\n        parent_seed,\n        state_root_hash,\n        random_bit,\n        era_end,\n        timestamp,\n        era_id,\n        height,\n        protocol_version,\n        proposer,\n        transactions,\n        rewarded_signatures,\n        current_gas_price,\n        Some(last_switch_block_hash),\n    )\n});\n\n/// A block after execution, with the resulting global state root hash. This is the core component\n/// of the Casper linear blockchain. Version 2.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[derive(Clone, Debug, PartialEq, Eq)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct BlockV2 {\n    /// The block hash identifying this block.\n    pub(super) hash: BlockHash,\n    /// The header portion of the block.\n    pub(super) header: BlockHeaderV2,\n    /// The body portion of the block.\n    pub(super) body: BlockBodyV2,\n}\n\nimpl BlockV2 {\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        parent_hash: BlockHash,\n        parent_seed: Digest,\n        state_root_hash: Digest,\n        random_bit: bool,\n        era_end: Option<EraEndV2>,\n        timestamp: Timestamp,\n        era_id: EraId,\n        height: u64,\n        protocol_version: ProtocolVersion,\n        proposer: PublicKey,\n        transactions: BTreeMap<u8, Vec<TransactionHash>>,\n        rewarded_signatures: RewardedSignatures,\n        current_gas_price: u8,\n        last_switch_block_hash: Option<BlockHash>,\n    ) -> Self {\n        let body = BlockBodyV2::new(transactions, rewarded_signatures);\n        let body_hash = body.hash();\n        let accumulated_seed = Digest::hash_pair(parent_seed, [random_bit as u8]);\n        let header = BlockHeaderV2::new(\n            parent_hash,\n            state_root_hash,\n            body_hash,\n            random_bit,\n            accumulated_seed,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            current_gas_price,\n            last_switch_block_hash,\n            #[cfg(any(feature = \"once_cell\", test))]\n            OnceCell::new(),\n        );\n        Self::new_from_header_and_body(header, body)\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn new_from_header_and_body(header: BlockHeaderV2, body: BlockBodyV2) -> Self {\n        let hash = header.block_hash();\n        BlockV2 { hash, header, body }\n    }\n\n    /// Returns the `BlockHash` identifying this block.\n    pub fn hash(&self) -> &BlockHash {\n        &self.hash\n    }\n\n    /// Returns the block's header.\n    pub fn header(&self) -> &BlockHeaderV2 {\n        &self.header\n    }\n\n    /// Returns the block's header, consuming `self`.\n    pub fn take_header(self) -> BlockHeaderV2 {\n        self.header\n    }\n\n    /// Returns the block's body.\n    pub fn body(&self) -> &BlockBodyV2 {\n        &self.body\n    }\n\n    /// Returns the block's body, consuming `self`.\n    pub fn take_body(self) -> BlockBodyV2 {\n        self.body\n    }\n\n    /// Returns the parent block's hash.\n    pub fn parent_hash(&self) -> &BlockHash {\n        self.header.parent_hash()\n    }\n\n    /// Returns the root hash of global state after the deploys in this block have been executed.\n    pub fn state_root_hash(&self) -> &Digest {\n        self.header.state_root_hash()\n    }\n\n    /// Returns the hash of the block's body.\n    pub fn body_hash(&self) -> &Digest {\n        self.header.body_hash()\n    }\n\n    /// Returns a random bit needed for initializing a future era.\n    pub fn random_bit(&self) -> bool {\n        self.header.random_bit()\n    }\n\n    /// Returns a seed needed for initializing a future era.\n    pub fn accumulated_seed(&self) -> &Digest {\n        self.header.accumulated_seed()\n    }\n\n    /// Returns the `EraEnd` of a block if it is a switch block.\n    pub fn era_end(&self) -> Option<&EraEndV2> {\n        self.header.era_end()\n    }\n\n    /// Returns the timestamp from when the block was proposed.\n    pub fn timestamp(&self) -> Timestamp {\n        self.header.timestamp()\n    }\n\n    /// Returns the era ID in which this block was created.\n    pub fn era_id(&self) -> EraId {\n        self.header.era_id()\n    }\n\n    /// Returns the height of this block, i.e. the number of ancestors.\n    pub fn height(&self) -> u64 {\n        self.header.height()\n    }\n\n    /// Returns the protocol version of the network from when this block was created.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.header.protocol_version()\n    }\n\n    /// Returns `true` if this block is the last one in the current era.\n    pub fn is_switch_block(&self) -> bool {\n        self.header.is_switch_block()\n    }\n\n    /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0.\n    pub fn is_genesis(&self) -> bool {\n        self.header.is_genesis()\n    }\n\n    /// Returns the public key of the validator which proposed the block.\n    pub fn proposer(&self) -> &PublicKey {\n        self.header.proposer()\n    }\n\n    /// List of identifiers for finality signatures for a particular past block.\n    pub fn rewarded_signatures(&self) -> &RewardedSignatures {\n        self.body.rewarded_signatures()\n    }\n\n    /// Returns the hashes of the transfer transactions within the block.\n    pub fn mint(&self) -> impl Iterator<Item = TransactionHash> {\n        self.body.mint()\n    }\n\n    /// Returns the hashes of the non-transfer, native transactions within the block.\n    pub fn auction(&self) -> impl Iterator<Item = TransactionHash> {\n        self.body.auction()\n    }\n\n    /// Returns the hashes of the install/upgrade wasm transactions within the block.\n    pub fn install_upgrade(&self) -> impl Iterator<Item = TransactionHash> {\n        self.body.install_upgrade()\n    }\n\n    /// Returns the hashes of the transactions filtered by lane id within the block.\n    pub fn transactions_by_lane_id(&self, lane_id: u8) -> impl Iterator<Item = TransactionHash> {\n        self.body.transaction_by_lane(lane_id)\n    }\n\n    /// Returns all of the transaction hashes in the order in which they were executed.\n    pub fn all_transactions(&self) -> impl Iterator<Item = &TransactionHash> {\n        self.body.all_transactions()\n    }\n\n    /// Returns a reference to the collection of mapped transactions.\n    pub fn transactions(&self) -> &BTreeMap<u8, Vec<TransactionHash>> {\n        self.body.transactions()\n    }\n\n    /// Returns the last relevant switch block hash.\n    pub fn last_switch_block_hash(&self) -> Option<BlockHash> {\n        self.header.last_switch_block_hash()\n    }\n\n    /// Returns `Ok` if and only if the block's provided block hash and body hash are identical to\n    /// those generated by hashing the appropriate input data.\n    pub fn verify(&self) -> Result<(), BlockValidationError> {\n        let actual_block_header_hash = self.header().block_hash();\n        if *self.hash() != actual_block_header_hash {\n            return Err(BlockValidationError::UnexpectedBlockHash {\n                block: Box::new(Block::V2(self.clone())),\n                actual_block_hash: actual_block_header_hash,\n            });\n        }\n\n        let actual_block_body_hash = self.body.hash();\n        if *self.header.body_hash() != actual_block_body_hash {\n            return Err(BlockValidationError::UnexpectedBodyHash {\n                block: Box::new(Block::V2(self.clone())),\n                actual_block_body_hash,\n            });\n        }\n\n        Ok(())\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &BLOCK_V2\n    }\n\n    /// Makes the block invalid, for testing purpose.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn make_invalid(self, rng: &mut TestRng) -> Self {\n        let block = BlockV2 {\n            hash: BlockHash::random(rng),\n            ..self\n        };\n\n        assert!(block.verify().is_err());\n        block\n    }\n}\n\nimpl Display for BlockV2 {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \\\n            {}, random bit {}, protocol version: {}\",\n            self.height(),\n            self.hash(),\n            self.timestamp(),\n            self.era_id(),\n            self.parent_hash().inner(),\n            self.state_root_hash(),\n            self.body_hash(),\n            self.random_bit(),\n            self.protocol_version()\n        )?;\n        if let Some(era_end) = self.era_end() {\n            write!(formatter, \", era_end: {}\", era_end)?;\n        }\n        Ok(())\n    }\n}\n\nimpl ToBytes for BlockV2 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.hash.write_bytes(writer)?;\n        self.header.write_bytes(writer)?;\n        self.body.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.hash.serialized_length()\n            + self.header.serialized_length()\n            + self.body.serialized_length()\n    }\n}\n\nimpl FromBytes for BlockV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (hash, remainder) = BlockHash::from_bytes(bytes)?;\n        let (header, remainder) = BlockHeaderV2::from_bytes(remainder)?;\n        let (body, remainder) = BlockBodyV2::from_bytes(remainder)?;\n        let block = BlockV2 { hash, header, body };\n        Ok((block, remainder))\n    }\n}\n\nimpl TryFrom<Block> for BlockV2 {\n    type Error = BlockConversionError;\n\n    fn try_from(value: Block) -> Result<BlockV2, BlockConversionError> {\n        match value {\n            Block::V2(v2) => Ok(v2),\n            _ => Err(BlockConversionError::DifferentVersion {\n                expected_version: 2,\n            }),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::TestBlockBuilder;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let block = TestBlockBuilder::new().build(rng);\n        bytesrepr::test_serialization_roundtrip(&block);\n    }\n\n    #[test]\n    fn block_check_bad_body_hash_sad_path() {\n        let rng = &mut TestRng::new();\n\n        let mut block = TestBlockBuilder::new().build(rng);\n        let bogus_block_body_hash = Digest::hash([0xde, 0xad, 0xbe, 0xef]);\n        block.header.set_body_hash(bogus_block_body_hash);\n        block.hash = block.header.block_hash();\n\n        let expected_error = BlockValidationError::UnexpectedBodyHash {\n            block: Box::new(Block::V2(block.clone())),\n            actual_block_body_hash: block.body.hash(),\n        };\n        assert_eq!(block.verify(), Err(expected_error));\n    }\n\n    #[test]\n    fn block_check_bad_block_hash_sad_path() {\n        let rng = &mut TestRng::new();\n\n        let mut block = TestBlockBuilder::new().build(rng);\n        let bogus_block_hash = BlockHash::from(Digest::hash([0xde, 0xad, 0xbe, 0xef]));\n        block.hash = bogus_block_hash;\n\n        let expected_error = BlockValidationError::UnexpectedBlockHash {\n            block: Box::new(Block::V2(block.clone())),\n            actual_block_hash: block.header.block_hash(),\n        };\n        assert_eq!(block.verify(), Err(expected_error));\n    }\n}\n"
  },
  {
    "path": "types/src/block/block_with_signatures.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Block, BlockSignatures,\n};\n#[cfg(any(feature = \"std\", feature = \"json-schema\", test))]\nuse serde::{Deserialize, Serialize};\n\n/// A block and signatures for that block.\n#[derive(Clone, Debug, PartialEq, Eq)]\n#[cfg_attr(\n    any(feature = \"std\", feature = \"json-schema\", test),\n    derive(Serialize, Deserialize)\n)]\npub struct BlockWithSignatures {\n    /// Block.\n    pub(crate) block: Block,\n    // The signatures of the block.\n    pub(crate) block_signatures: BlockSignatures,\n}\n\nimpl BlockWithSignatures {\n    /// Creates a new `BlockWithSignatures`.\n    pub fn new(block: Block, block_signatures: BlockSignatures) -> Self {\n        Self {\n            block,\n            block_signatures,\n        }\n    }\n\n    /// Returns the inner block.\n    pub fn block(&self) -> &Block {\n        &self.block\n    }\n\n    /// Returns the block signatures.\n    pub fn block_signatures(&self) -> &BlockSignatures {\n        &self.block_signatures\n    }\n\n    /// Converts `self` into the block and signatures.\n    pub fn into_inner(self) -> (Block, BlockSignatures) {\n        (self.block, self.block_signatures)\n    }\n}\n\nimpl FromBytes for BlockWithSignatures {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block, bytes) = FromBytes::from_bytes(bytes)?;\n        let (block_signatures, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((BlockWithSignatures::new(block, block_signatures), bytes))\n    }\n}\n\nimpl ToBytes for BlockWithSignatures {\n    fn to_bytes(&self) -> Result<Vec<u8>, crate::bytesrepr::Error> {\n        let mut buf = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buf)?;\n        Ok(buf)\n    }\n\n    fn write_bytes(&self, bytes: &mut Vec<u8>) -> Result<(), crate::bytesrepr::Error> {\n        self.block.write_bytes(bytes)?;\n        self.block_signatures.write_bytes(bytes)?;\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block.serialized_length() + self.block_signatures.serialized_length()\n    }\n}\n\nimpl Display for BlockWithSignatures {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"block #{}, {}, with {} block signatures\",\n            self.block.height(),\n            self.block.hash(),\n            self.block_signatures.len()\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/block/chain_name_digest.rs",
    "content": "use core::fmt::{self, Display, Formatter};\n\nuse alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest,\n};\n\n/// A cryptographic hash of a chain name.\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded cryptographic hash of a chain name.\")\n)]\n#[serde(deny_unknown_fields)]\npub struct ChainNameDigest(Digest);\n\nimpl ChainNameDigest {\n    /// The number of bytes in a `ChainNameDigest` digest.\n    pub const LENGTH: usize = Digest::LENGTH;\n\n    /// Constructs a new `ChainNameDigest` from the given chain name.\n    pub fn from_chain_name(name: &str) -> Self {\n        ChainNameDigest(Digest::hash(name.as_bytes()))\n    }\n\n    /// Returns the wrapped inner digest.\n    pub fn inner(&self) -> &Digest {\n        &self.0\n    }\n\n    /// Returns a new `ChainNameDigest` directly initialized with the provided `Digest`;\n    /// no hashing is done.\n    #[cfg(any(feature = \"testing\", test))]\n    pub const fn from_digest(digest: Digest) -> Self {\n        ChainNameDigest(digest)\n    }\n\n    /// Returns a random `ChainNameDigest`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let hash = rng.gen::<[u8; Digest::LENGTH]>().into();\n        ChainNameDigest(hash)\n    }\n}\n\nimpl Display for ChainNameDigest {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"chain-name-hash({})\", self.0)\n    }\n}\n\nimpl ToBytes for ChainNameDigest {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for ChainNameDigest {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Digest::from_bytes(bytes).map(|(inner, remainder)| (Self(inner), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = ChainNameDigest::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/block/era_end/era_end_v1/era_report.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n#[cfg(any(feature = \"testing\", test))]\nuse core::iter;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(feature = \"json-schema\")]\nuse crate::SecretKey;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest, DisplayIter, PublicKey,\n};\n\n#[cfg(feature = \"json-schema\")]\nstatic ERA_REPORT: Lazy<EraReport<PublicKey>> = Lazy::new(|| {\n    let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap();\n    let public_key_1 = PublicKey::from(&secret_key_1);\n    let equivocators = vec![public_key_1];\n\n    let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap();\n    let public_key_3 = PublicKey::from(&secret_key_3);\n    let inactive_validators = vec![public_key_3];\n\n    let rewards = BTreeMap::new();\n\n    EraReport {\n        equivocators,\n        rewards,\n        inactive_validators,\n    }\n});\n\n/// Equivocation, reward and validator inactivity information.\n///\n/// `VID` represents validator ID type, generally [`PublicKey`].\n#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(bound(\n    serialize = \"VID: Ord + Serialize\",\n    deserialize = \"VID: Ord + Deserialize<'de>\",\n))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    schemars(description = \"Equivocation, reward and validator inactivity information.\")\n)]\npub struct EraReport<VID> {\n    /// The set of equivocators.\n    pub(super) equivocators: Vec<VID>,\n    /// Rewards for finalization of earlier blocks.\n    #[serde(with = \"BTreeMapToArray::<VID, u64, EraRewardsLabels>\")]\n    pub(super) rewards: BTreeMap<VID, u64>,\n    /// Validators that haven't produced any unit during the era.\n    pub(super) inactive_validators: Vec<VID>,\n}\n\nimpl<VID> EraReport<VID> {\n    /// Constructs a new `EraReport`.\n    pub fn new(\n        equivocators: Vec<VID>,\n        rewards: BTreeMap<VID, u64>,\n        inactive_validators: Vec<VID>,\n    ) -> Self {\n        EraReport {\n            equivocators,\n            rewards,\n            inactive_validators,\n        }\n    }\n\n    /// Returns the set of equivocators.\n    pub fn equivocators(&self) -> &[VID] {\n        &self.equivocators\n    }\n\n    /// Returns rewards for finalization of earlier blocks.\n    ///\n    /// This is a measure of the value of each validator's contribution to consensus, in\n    /// fractions of the configured maximum block reward.\n    pub fn rewards(&self) -> &BTreeMap<VID, u64> {\n        &self.rewards\n    }\n\n    /// Returns validators that haven't produced any unit during the era.\n    pub fn inactive_validators(&self) -> &[VID] {\n        &self.inactive_validators\n    }\n\n    /// Returns a cryptographic hash of the `EraReport`.\n    pub fn hash(&self) -> Digest\n    where\n        VID: ToBytes,\n    {\n        // Helper function to hash slice of validators\n        fn hash_slice_of_validators<VID>(slice_of_validators: &[VID]) -> Digest\n        where\n            VID: ToBytes,\n        {\n            Digest::hash_merkle_tree(slice_of_validators.iter().map(|validator| {\n                Digest::hash(validator.to_bytes().expect(\"Could not serialize validator\"))\n            }))\n        }\n\n        // Pattern match here leverages compiler to ensure every field is accounted for\n        let EraReport {\n            equivocators,\n            inactive_validators,\n            rewards,\n        } = self;\n\n        let hashed_equivocators = hash_slice_of_validators(equivocators);\n        let hashed_inactive_validators = hash_slice_of_validators(inactive_validators);\n        let hashed_rewards = Digest::hash_btree_map(rewards).expect(\"Could not hash rewards\");\n\n        Digest::hash_slice_rfold(&[\n            hashed_equivocators,\n            hashed_rewards,\n            hashed_inactive_validators,\n        ])\n    }\n}\n\nimpl<VID: Ord> Default for EraReport<VID> {\n    fn default() -> Self {\n        EraReport {\n            equivocators: vec![],\n            rewards: BTreeMap::new(),\n            inactive_validators: vec![],\n        }\n    }\n}\n\nimpl<VID: Display> Display for EraReport<VID> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let slashings = DisplayIter::new(&self.equivocators);\n        let rewards = DisplayIter::new(\n            self.rewards\n                .iter()\n                .map(|(public_key, amount)| format!(\"{}: {}\", public_key, amount)),\n        );\n        write!(f, \"era end: slash {}, reward {}\", slashings, rewards)\n    }\n}\n\nimpl<VID: ToBytes> ToBytes for EraReport<VID> {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.equivocators.write_bytes(writer)?;\n        self.rewards.write_bytes(writer)?;\n        self.inactive_validators.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.equivocators.serialized_length()\n            + self.rewards.serialized_length()\n            + self.inactive_validators.serialized_length()\n    }\n}\n\nimpl<VID: FromBytes + Ord> FromBytes for EraReport<VID> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (equivocators, remainder) = Vec::<VID>::from_bytes(bytes)?;\n        let (rewards, remainder) = BTreeMap::<VID, u64>::from_bytes(remainder)?;\n        let (inactive_validators, remainder) = Vec::<VID>::from_bytes(remainder)?;\n        let era_report = EraReport {\n            equivocators,\n            rewards,\n            inactive_validators,\n        };\n        Ok((era_report, remainder))\n    }\n}\n\nimpl EraReport<PublicKey> {\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ERA_REPORT\n    }\n\n    /// Returns a random `EraReport`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use rand::Rng;\n\n        let equivocators_count = rng.gen_range(0..5);\n        let rewards_count = rng.gen_range(0..5);\n        let inactive_count = rng.gen_range(0..5);\n        let equivocators = iter::repeat_with(|| PublicKey::random(rng))\n            .take(equivocators_count)\n            .collect();\n        let rewards = iter::repeat_with(|| {\n            let pub_key = PublicKey::random(rng);\n            let reward = rng.gen_range(1..(1_000_000_000 + 1));\n            (pub_key, reward)\n        })\n        .take(rewards_count)\n        .collect();\n        let inactive_validators = iter::repeat_with(|| PublicKey::random(rng))\n            .take(inactive_count)\n            .collect();\n        EraReport::new(equivocators, rewards, inactive_validators)\n    }\n}\n\nstruct EraRewardsLabels;\n\nimpl KeyValueLabels for EraRewardsLabels {\n    const KEY: &'static str = \"validator\";\n    const VALUE: &'static str = \"amount\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for EraRewardsLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"EraReward\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some(\n        \"A validator's public key paired with a measure of the value of its \\\n        contribution to consensus, as a fraction of the configured maximum block reward.\",\n    );\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some(\"The validator's public key.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some(\"The reward amount.\");\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let era_report = EraReport::random(rng);\n        bytesrepr::test_serialization_roundtrip(&era_report);\n    }\n}\n"
  },
  {
    "path": "types/src/block/era_end/era_end_v1.rs",
    "content": "mod era_report;\n\nuse alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\n#[cfg(feature = \"json-schema\")]\nuse crate::SecretKey;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    PublicKey, U512,\n};\npub use era_report::EraReport;\n\n#[cfg(feature = \"json-schema\")]\nstatic ERA_END_V1: Lazy<EraEndV1> = Lazy::new(|| {\n    let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap();\n    let public_key_1 = PublicKey::from(&secret_key_1);\n    let next_era_validator_weights = {\n        let mut next_era_validator_weights: BTreeMap<PublicKey, U512> = BTreeMap::new();\n        next_era_validator_weights.insert(public_key_1, U512::from(123));\n        next_era_validator_weights.insert(\n            PublicKey::from(\n                &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            U512::from(456),\n        );\n        next_era_validator_weights.insert(\n            PublicKey::from(\n                &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            U512::from(789),\n        );\n        next_era_validator_weights\n    };\n\n    let era_report = EraReport::example().clone();\n    EraEndV1::new(era_report, next_era_validator_weights)\n});\n\n/// Information related to the end of an era, and validator weights for the following era.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct EraEndV1 {\n    /// Equivocation, reward and validator inactivity information.\n    pub(super) era_report: EraReport<PublicKey>,\n    /// The validators for the upcoming era and their respective weights.\n    #[serde(with = \"BTreeMapToArray::<PublicKey, U512, NextEraValidatorLabels>\")]\n    pub(super) next_era_validator_weights: BTreeMap<PublicKey, U512>,\n}\n\nimpl EraEndV1 {\n    /// Returns equivocation, reward and validator inactivity information.\n    pub fn era_report(&self) -> &EraReport<PublicKey> {\n        &self.era_report\n    }\n\n    /// Retrieves the deploy hashes within the block.\n    pub fn equivocators(&self) -> &[PublicKey] {\n        self.era_report.equivocators()\n    }\n\n    /// Retrieves the transfer hashes within the block.\n    pub fn inactive_validators(&self) -> &[PublicKey] {\n        self.era_report.inactive_validators()\n    }\n\n    /// Returns rewards for finalization of earlier blocks.\n    pub fn rewards(&self) -> &BTreeMap<PublicKey, u64> {\n        self.era_report.rewards()\n    }\n\n    /// Returns the validators for the upcoming era and their respective weights.\n    pub fn next_era_validator_weights(&self) -> &BTreeMap<PublicKey, U512> {\n        &self.next_era_validator_weights\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn new(\n        era_report: EraReport<PublicKey>,\n        next_era_validator_weights: BTreeMap<PublicKey, U512>,\n    ) -> Self {\n        EraEndV1 {\n            era_report,\n            next_era_validator_weights,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ERA_END_V1\n    }\n}\n\nimpl ToBytes for EraEndV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.era_report.write_bytes(writer)?;\n        self.next_era_validator_weights.write_bytes(writer)?;\n\n        Ok(())\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.era_report.serialized_length() + self.next_era_validator_weights.serialized_length()\n    }\n}\n\nimpl FromBytes for EraEndV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (era_report, remainder) = EraReport::<PublicKey>::from_bytes(bytes)?;\n        let (next_era_validator_weights, remainder) =\n            BTreeMap::<PublicKey, U512>::from_bytes(remainder)?;\n        let era_end = EraEndV1 {\n            era_report,\n            next_era_validator_weights,\n        };\n        Ok((era_end, remainder))\n    }\n}\n\nimpl Display for EraEndV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"era end: {} \", self.era_report)\n    }\n}\n\nstruct NextEraValidatorLabels;\n\nimpl KeyValueLabels for NextEraValidatorLabels {\n    const KEY: &'static str = \"validator\";\n    const VALUE: &'static str = \"weight\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for NextEraValidatorLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"ValidatorWeight\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some(\n        \"A validator's public key paired with its weight, i.e. the total number of \\\n        motes staked by it and its delegators.\",\n    );\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some(\"The validator's public key.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some(\"The validator's weight.\");\n}\n"
  },
  {
    "path": "types/src/block/era_end/era_end_v2.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\n#[cfg(feature = \"json-schema\")]\nuse crate::SecretKey;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    DisplayIter, PublicKey, U512,\n};\n\n#[cfg(feature = \"json-schema\")]\nstatic ERA_END_V2: Lazy<EraEndV2> = Lazy::new(|| {\n    let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap();\n    let public_key_1 = PublicKey::from(&secret_key_1);\n    let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap();\n    let public_key_3 = PublicKey::from(&secret_key_3);\n\n    let equivocators = vec![public_key_1.clone()];\n    let inactive_validators = vec![public_key_3];\n    let next_era_validator_weights = {\n        let mut next_era_validator_weights: BTreeMap<PublicKey, U512> = BTreeMap::new();\n        next_era_validator_weights.insert(public_key_1, U512::from(123));\n        next_era_validator_weights.insert(\n            PublicKey::from(\n                &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            U512::from(456),\n        );\n        next_era_validator_weights.insert(\n            PublicKey::from(\n                &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            U512::from(789),\n        );\n        next_era_validator_weights\n    };\n    let rewards = Default::default();\n\n    EraEndV2::new(\n        equivocators,\n        inactive_validators,\n        next_era_validator_weights,\n        rewards,\n        1u8,\n    )\n});\n\n/// Information related to the end of an era, and validator weights for the following era.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct EraEndV2 {\n    /// The set of equivocators.\n    pub(super) equivocators: Vec<PublicKey>,\n    /// Validators that haven't produced any unit during the era.\n    pub(super) inactive_validators: Vec<PublicKey>,\n    /// The validators for the upcoming era and their respective weights.\n    #[serde(with = \"BTreeMapToArray::<PublicKey, U512, NextEraValidatorLabels>\")]\n    pub(super) next_era_validator_weights: BTreeMap<PublicKey, U512>,\n    /// The rewards distributed to the validators.\n    pub(super) rewards: BTreeMap<PublicKey, Vec<U512>>,\n    pub(super) next_era_gas_price: u8,\n}\n\nimpl EraEndV2 {\n    /// Returns the set of equivocators.\n    pub fn equivocators(&self) -> &[PublicKey] {\n        &self.equivocators\n    }\n\n    /// Returns the validators that haven't produced any unit during the era.\n    pub fn inactive_validators(&self) -> &[PublicKey] {\n        &self.inactive_validators\n    }\n\n    /// Returns the validators for the upcoming era and their respective weights.\n    pub fn next_era_validator_weights(&self) -> &BTreeMap<PublicKey, U512> {\n        &self.next_era_validator_weights\n    }\n\n    /// Returns the rewards distributed to the validators.\n    pub fn rewards(&self) -> &BTreeMap<PublicKey, Vec<U512>> {\n        &self.rewards\n    }\n\n    /// Returns the next era gas price.\n    pub fn next_era_gas_price(&self) -> u8 {\n        self.next_era_gas_price\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn new(\n        equivocators: Vec<PublicKey>,\n        inactive_validators: Vec<PublicKey>,\n        next_era_validator_weights: BTreeMap<PublicKey, U512>,\n        rewards: BTreeMap<PublicKey, Vec<U512>>,\n        next_era_gas_price: u8,\n    ) -> Self {\n        EraEndV2 {\n            equivocators,\n            inactive_validators,\n            next_era_validator_weights,\n            rewards,\n            next_era_gas_price,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ERA_END_V2\n    }\n\n    /// Returns a random `EraReport`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut crate::testing::TestRng) -> Self {\n        use rand::Rng;\n\n        let equivocators_count = rng.gen_range(0..5);\n        let inactive_count = rng.gen_range(0..5);\n        let next_era_validator_weights_count = rng.gen_range(0..5);\n        let rewards_count = rng.gen_range(0..5);\n\n        let equivocators = core::iter::repeat_with(|| PublicKey::random(rng))\n            .take(equivocators_count)\n            .collect();\n\n        let inactive_validators = core::iter::repeat_with(|| PublicKey::random(rng))\n            .take(inactive_count)\n            .collect();\n\n        let next_era_validator_weights = core::iter::repeat_with(|| {\n            let pub_key = PublicKey::random(rng);\n            let reward = rng.gen_range(1..=1_000_000_000);\n            (pub_key, U512::from(reward))\n        })\n        .take(next_era_validator_weights_count)\n        .collect();\n\n        let rewards = core::iter::repeat_with(|| {\n            let pub_key = PublicKey::random(rng);\n            let mut rewards = vec![U512::from(rng.gen_range(1..=1_000_000_000 + 1))];\n            if rng.gen_bool(0.2) {\n                rewards.push(U512::from(rng.gen_range(1..=1_000_000_000 + 1)));\n            };\n            (pub_key, rewards)\n        })\n        .take(rewards_count)\n        .collect();\n\n        Self::new(\n            equivocators,\n            inactive_validators,\n            next_era_validator_weights,\n            rewards,\n            1u8,\n        )\n    }\n}\n\nimpl ToBytes for EraEndV2 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let EraEndV2 {\n            equivocators,\n            inactive_validators,\n            next_era_validator_weights,\n            rewards,\n            next_era_gas_price,\n        } = self;\n\n        equivocators.write_bytes(writer)?;\n        inactive_validators.write_bytes(writer)?;\n        next_era_validator_weights.write_bytes(writer)?;\n        rewards.write_bytes(writer)?;\n        next_era_gas_price.write_bytes(writer)?;\n\n        Ok(())\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let EraEndV2 {\n            equivocators,\n            inactive_validators,\n            next_era_validator_weights,\n            rewards,\n            next_era_gas_price,\n        } = self;\n\n        equivocators.serialized_length()\n            + inactive_validators.serialized_length()\n            + next_era_validator_weights.serialized_length()\n            + rewards.serialized_length()\n            + next_era_gas_price.serialized_length()\n    }\n}\n\nimpl FromBytes for EraEndV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (equivocators, bytes) = Vec::from_bytes(bytes)?;\n        let (inactive_validators, bytes) = Vec::from_bytes(bytes)?;\n        let (next_era_validator_weights, bytes) = BTreeMap::from_bytes(bytes)?;\n        let (rewards, bytes) = BTreeMap::from_bytes(bytes)?;\n        let (next_era_gas_price, bytes) = u8::from_bytes(bytes)?;\n        let era_end = EraEndV2 {\n            equivocators,\n            inactive_validators,\n            next_era_validator_weights,\n            rewards,\n            next_era_gas_price,\n        };\n\n        Ok((era_end, bytes))\n    }\n}\n\nimpl fmt::Display for EraEndV2 {\n    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n        let slashings = DisplayIter::new(&self.equivocators);\n        let rewards = DisplayIter::new(\n            self.rewards\n                .iter()\n                .map(|(public_key, amounts)| format!(\"{}: {:?}\", public_key, amounts)),\n        );\n\n        write!(\n            formatter,\n            \"era end: slash {}, reward {}\",\n            slashings, rewards\n        )\n    }\n}\n\nstruct NextEraValidatorLabels;\n\nimpl KeyValueLabels for NextEraValidatorLabels {\n    const KEY: &'static str = \"validator\";\n    const VALUE: &'static str = \"weight\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for NextEraValidatorLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"ValidatorWeight\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some(\n        \"A validator's public key paired with its weight, i.e. the total number of \\\n        motes staked by it and its delegators.\",\n    );\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some(\"The validator's public key.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some(\"The validator's weight.\");\n}\n"
  },
  {
    "path": "types/src/block/era_end.rs",
    "content": "mod era_end_v1;\nmod era_end_v2;\n\nuse alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    PublicKey, Rewards, U512,\n};\npub use era_end_v1::{EraEndV1, EraReport};\npub use era_end_v2::EraEndV2;\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for block body v1.\npub const ERA_END_V1_TAG: u8 = 0;\n/// Tag for block body v2.\npub const ERA_END_V2_TAG: u8 = 1;\n\n/// The versioned era end of a block, storing the data for a switch block.\n/// It encapsulates different variants of the EraEnd struct.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(any(feature = \"testing\", test), derive(PartialEq))]\n#[derive(Clone, Hash, Serialize, Deserialize, Debug)]\npub enum EraEnd {\n    /// The legacy, initial version of the body portion of a block.\n    V1(EraEndV1),\n    /// The version 2 of the body portion of a block, which includes the\n    /// `past_finality_signatures`.\n    V2(EraEndV2),\n}\n\nimpl EraEnd {\n    /// Returns the equivocators.\n    pub fn equivocators(&self) -> &[PublicKey] {\n        match self {\n            EraEnd::V1(v1) => v1.equivocators(),\n            EraEnd::V2(v2) => v2.equivocators(),\n        }\n    }\n\n    /// Returns the inactive validators.\n    pub fn inactive_validators(&self) -> &[PublicKey] {\n        match self {\n            EraEnd::V1(v1) => v1.inactive_validators(),\n            EraEnd::V2(v2) => v2.inactive_validators(),\n        }\n    }\n\n    /// Returns the weights of validators in the upcoming era.\n    pub fn next_era_validator_weights(&self) -> &BTreeMap<PublicKey, U512> {\n        match self {\n            EraEnd::V1(v1) => v1.next_era_validator_weights(),\n            EraEnd::V2(v2) => v2.next_era_validator_weights(),\n        }\n    }\n\n    /// Returns the rewards.\n    pub fn rewards(&self) -> Rewards {\n        match self {\n            EraEnd::V1(v1) => Rewards::V1(v1.rewards()),\n            EraEnd::V2(v2) => Rewards::V2(v2.rewards()),\n        }\n    }\n}\n\nimpl Display for EraEnd {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            EraEnd::V1(v1) => Display::fmt(&v1, formatter),\n            EraEnd::V2(v2) => Display::fmt(&v2, formatter),\n        }\n    }\n}\n\nimpl From<EraEndV1> for EraEnd {\n    fn from(era_end: EraEndV1) -> Self {\n        EraEnd::V1(era_end)\n    }\n}\n\nimpl From<EraEndV2> for EraEnd {\n    fn from(era_end: EraEndV2) -> Self {\n        EraEnd::V2(era_end)\n    }\n}\n\nimpl ToBytes for EraEnd {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            EraEnd::V1(v1) => {\n                buffer.insert(0, ERA_END_V1_TAG);\n                buffer.extend(v1.to_bytes()?);\n            }\n            EraEnd::V2(v2) => {\n                buffer.insert(0, ERA_END_V2_TAG);\n                buffer.extend(v2.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                EraEnd::V1(v1) => v1.serialized_length(),\n                EraEnd::V2(v2) => v2.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for EraEnd {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            ERA_END_V1_TAG => {\n                let (body, remainder): (EraEndV1, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V1(body), remainder))\n            }\n            ERA_END_V2_TAG => {\n                let (body, remainder): (EraEndV2, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V2(body), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/block/finality_signature/finality_signature_v1.rs",
    "content": "use alloc::vec::Vec;\nuse core::{\n    cmp::Ordering,\n    fmt::{self, Display, Formatter},\n    hash::{Hash, Hasher},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{crypto, BlockHash, EraId, PublicKey, SecretKey, Signature};\n\n/// A validator's signature of a block, confirming it is finalized.\n///\n/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault\n/// tolerance threshold before accepting the block as finalized.\n#[derive(Clone, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"A validator's signature of a block, confirming it is finalized.\")\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct FinalitySignatureV1 {\n    /// The block hash of the associated block.\n    pub(crate) block_hash: BlockHash,\n    /// The era in which the associated block was created.\n    pub(crate) era_id: EraId,\n    /// The signature over the block hash of the associated block.\n    pub(crate) signature: Signature,\n    /// The public key of the signing validator.\n    pub(crate) public_key: PublicKey,\n    #[serde(skip)]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub(crate) is_verified: OnceCell<Result<(), crypto::Error>>,\n}\n\nimpl FinalitySignatureV1 {\n    /// Constructs a new `FinalitySignatureV1`.\n    pub fn create(block_hash: BlockHash, era_id: EraId, secret_key: &SecretKey) -> Self {\n        let bytes = Self::bytes_to_sign(&block_hash, era_id);\n        let public_key = PublicKey::from(secret_key);\n        let signature = crypto::sign(bytes, secret_key, &public_key);\n        FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::with_value(Ok(())),\n        }\n    }\n\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Returns the era in which the associated block was created.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the signature over the block hash of the associated block.\n    pub fn signature(&self) -> &Signature {\n        &self.signature\n    }\n\n    /// Returns the public key of the signing validator.\n    pub fn public_key(&self) -> &PublicKey {\n        &self.public_key\n    }\n\n    /// Returns `Ok` if the signature is cryptographically valid.\n    pub fn is_verified(&self) -> Result<(), crypto::Error> {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return self.is_verified.get_or_init(|| self.verify()).clone();\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.verify()\n    }\n\n    /// Constructs a new `FinalitySignatureV1`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn new(\n        block_hash: BlockHash,\n        era_id: EraId,\n        signature: Signature,\n        public_key: PublicKey,\n    ) -> Self {\n        FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::new(),\n        }\n    }\n\n    /// Returns a random `FinalitySignatureV1`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        FinalitySignatureV1::random_for_block(BlockHash::random(rng), EraId::random(rng), rng)\n    }\n\n    /// Returns a random `FinalitySignatureV1` for the provided `block_hash` and `era_id`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_for_block(block_hash: BlockHash, era_id: EraId, rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random(rng);\n        FinalitySignatureV1::create(block_hash, era_id, &secret_key)\n    }\n\n    fn bytes_to_sign(block_hash: &BlockHash, era_id: EraId) -> Vec<u8> {\n        let mut bytes = block_hash.inner().into_vec();\n        bytes.extend_from_slice(&era_id.to_le_bytes());\n        bytes\n    }\n\n    fn verify(&self) -> Result<(), crypto::Error> {\n        let bytes = Self::bytes_to_sign(&self.block_hash, self.era_id);\n        crypto::verify(bytes, &self.signature, &self.public_key)\n    }\n}\n\nimpl Hash for FinalitySignatureV1 {\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        // Ensure we initialize self.is_verified field.\n        let is_verified = self.is_verified().is_ok();\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n            is_verified: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n        } = self;\n        block_hash.hash(state);\n        era_id.hash(state);\n        signature.hash(state);\n        public_key.hash(state);\n        is_verified.hash(state);\n    }\n}\n\nimpl PartialEq for FinalitySignatureV1 {\n    fn eq(&self, other: &FinalitySignatureV1) -> bool {\n        // Ensure we initialize self.is_verified field.\n        let is_verified = self.is_verified().is_ok();\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n            is_verified: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n        } = self;\n        *block_hash == other.block_hash\n            && *era_id == other.era_id\n            && *signature == other.signature\n            && *public_key == other.public_key\n            && is_verified == other.is_verified().is_ok()\n    }\n}\n\nimpl Ord for FinalitySignatureV1 {\n    fn cmp(&self, other: &FinalitySignatureV1) -> Ordering {\n        // Ensure we initialize self.is_verified field.\n        let is_verified = self.is_verified().is_ok();\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n            is_verified: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let FinalitySignatureV1 {\n            block_hash,\n            era_id,\n            signature,\n            public_key,\n        } = self;\n        block_hash\n            .cmp(&other.block_hash)\n            .then_with(|| era_id.cmp(&other.era_id))\n            .then_with(|| signature.cmp(&other.signature))\n            .then_with(|| public_key.cmp(&other.public_key))\n            .then_with(|| is_verified.cmp(&other.is_verified().is_ok()))\n    }\n}\n\nimpl PartialOrd for FinalitySignatureV1 {\n    fn partial_cmp(&self, other: &FinalitySignatureV1) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Display for FinalitySignatureV1 {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"finality signature for {}, from {}\",\n            self.block_hash, self.public_key\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::TestBlockBuilder;\n\n    #[test]\n    fn finality_signature() {\n        let rng = &mut TestRng::new();\n        let block = TestBlockBuilder::new().build(rng);\n        // Signature should be over both block hash and era id.\n        let secret_key = SecretKey::random(rng);\n        let public_key = PublicKey::from(&secret_key);\n        let era_id = EraId::from(1);\n        let finality_signature = FinalitySignatureV1::create(*block.hash(), era_id, &secret_key);\n        finality_signature.is_verified().unwrap();\n        let signature = finality_signature.signature;\n        // Verify that signature includes era id.\n        let invalid_finality_signature = FinalitySignatureV1 {\n            block_hash: *block.hash(),\n            era_id: EraId::from(2),\n            signature,\n            public_key,\n            is_verified: OnceCell::new(),\n        };\n        // Test should fail b/c `signature` is over `era_id=1` and here we're using `era_id=2`.\n        assert!(invalid_finality_signature.is_verified().is_err());\n    }\n}\n"
  },
  {
    "path": "types/src/block/finality_signature/finality_signature_v2.rs",
    "content": "use alloc::vec::Vec;\nuse core::{\n    cmp::Ordering,\n    fmt::{self, Display, Formatter},\n    hash::{Hash, Hasher},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{crypto, BlockHash, ChainNameDigest, EraId, PublicKey, SecretKey, Signature};\n\n/// A validator's signature of a block, confirming it is finalized.\n///\n/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault\n/// tolerance threshold before accepting the block as finalized.\n#[derive(Clone, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"A validator's signature of a block, confirming it is finalized.\")\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct FinalitySignatureV2 {\n    /// The block hash of the associated block.\n    pub(crate) block_hash: BlockHash,\n    /// The height of the associated block.\n    pub(crate) block_height: u64,\n    /// The era in which the associated block was created.\n    pub(crate) era_id: EraId,\n    /// The hash of the chain name of the associated block.\n    pub(crate) chain_name_hash: ChainNameDigest,\n    /// The signature over the block hash of the associated block.\n    pub(crate) signature: Signature,\n    /// The public key of the signing validator.\n    pub(crate) public_key: PublicKey,\n    #[serde(skip)]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    pub(crate) is_verified: OnceCell<Result<(), crypto::Error>>,\n}\n\nimpl FinalitySignatureV2 {\n    /// Constructs a new `FinalitySignatureV2`.\n    pub fn create(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        chain_name_hash: ChainNameDigest,\n        secret_key: &SecretKey,\n    ) -> Self {\n        let bytes = Self::bytes_to_sign(block_hash, block_height, era_id, chain_name_hash);\n        let public_key = PublicKey::from(secret_key);\n        let signature = crypto::sign(bytes, secret_key, &public_key);\n        FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::with_value(Ok(())),\n        }\n    }\n\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Returns the height of the associated block.\n    pub fn block_height(&self) -> u64 {\n        self.block_height\n    }\n\n    /// Returns the era in which the associated block was created.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the hash of the chain name of the associated block.\n    pub fn chain_name_hash(&self) -> ChainNameDigest {\n        self.chain_name_hash\n    }\n\n    /// Returns the signature over the block hash of the associated block.\n    pub fn signature(&self) -> &Signature {\n        &self.signature\n    }\n\n    /// Returns the public key of the signing validator.\n    pub fn public_key(&self) -> &PublicKey {\n        &self.public_key\n    }\n\n    /// Returns `Ok` if the signature is cryptographically valid.\n    pub fn is_verified(&self) -> Result<(), crypto::Error> {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return self.is_verified.get_or_init(|| self.verify()).clone();\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.verify()\n    }\n\n    /// Constructs a new `FinalitySignatureV2`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn new(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        chain_name_hash: ChainNameDigest,\n        signature: Signature,\n        public_key: PublicKey,\n    ) -> Self {\n        FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::new(),\n        }\n    }\n\n    /// Returns a random `FinalitySignatureV2`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        FinalitySignatureV2::random_for_block(\n            BlockHash::random(rng),\n            rng.gen(),\n            EraId::random(rng),\n            ChainNameDigest::random(rng),\n            rng,\n        )\n    }\n\n    /// Returns a random `FinalitySignatureV2` for the provided `block_hash`, `block_height`,\n    /// `era_id`, and `chain_name_hash`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_for_block(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        chain_name_hash: ChainNameDigest,\n        rng: &mut TestRng,\n    ) -> Self {\n        let secret_key = SecretKey::random(rng);\n        FinalitySignatureV2::create(\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            &secret_key,\n        )\n    }\n\n    fn bytes_to_sign(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        chain_name_hash: ChainNameDigest,\n    ) -> Vec<u8> {\n        let mut bytes = block_hash.inner().into_vec();\n        bytes.extend_from_slice(&block_height.to_le_bytes());\n        bytes.extend_from_slice(&era_id.to_le_bytes());\n        bytes.extend_from_slice(chain_name_hash.inner().as_ref());\n        bytes\n    }\n\n    fn verify(&self) -> Result<(), crypto::Error> {\n        let bytes = Self::bytes_to_sign(\n            self.block_hash,\n            self.block_height,\n            self.era_id,\n            self.chain_name_hash,\n        );\n        crypto::verify(bytes, &self.signature, &self.public_key)\n    }\n}\n\nimpl Hash for FinalitySignatureV2 {\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        // Ensure we initialize self.is_verified field.\n        let is_verified = self.is_verified().is_ok();\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n            is_verified: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n        } = self;\n        block_hash.hash(state);\n        block_height.hash(state);\n        era_id.hash(state);\n        chain_name_hash.hash(state);\n        signature.hash(state);\n        public_key.hash(state);\n        is_verified.hash(state);\n    }\n}\n\nimpl PartialEq for FinalitySignatureV2 {\n    fn eq(&self, other: &FinalitySignatureV2) -> bool {\n        // Ensure we initialize self.is_verified field.\n        let is_verified = self.is_verified().is_ok();\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n            is_verified: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n        } = self;\n        *block_hash == other.block_hash\n            && *block_height == other.block_height\n            && *era_id == other.era_id\n            && *chain_name_hash == other.chain_name_hash\n            && *signature == other.signature\n            && *public_key == other.public_key\n            && is_verified == other.is_verified().is_ok()\n    }\n}\n\nimpl Ord for FinalitySignatureV2 {\n    fn cmp(&self, other: &FinalitySignatureV2) -> Ordering {\n        // Ensure we initialize self.is_verified field.\n        let is_verified = self.is_verified().is_ok();\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n            is_verified: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let FinalitySignatureV2 {\n            block_hash,\n            block_height,\n            era_id,\n            chain_name_hash,\n            signature,\n            public_key,\n        } = self;\n        block_hash\n            .cmp(&other.block_hash)\n            .then_with(|| block_height.cmp(&other.block_height))\n            .then_with(|| era_id.cmp(&other.era_id))\n            .then_with(|| chain_name_hash.cmp(&other.chain_name_hash))\n            .then_with(|| signature.cmp(&other.signature))\n            .then_with(|| public_key.cmp(&other.public_key))\n            .then_with(|| is_verified.cmp(&other.is_verified().is_ok()))\n    }\n}\n\nimpl PartialOrd for FinalitySignatureV2 {\n    fn partial_cmp(&self, other: &FinalitySignatureV2) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Display for FinalitySignatureV2 {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"finality signature for {}, from {}\",\n            self.block_hash, self.public_key\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::TestBlockBuilder;\n\n    #[test]\n    fn finality_signature() {\n        let rng = &mut TestRng::new();\n        let block = TestBlockBuilder::new().build(rng);\n        // Signature should be over block hash, block height, era id and chain name hash.\n        let secret_key = SecretKey::random(rng);\n        let era_id = EraId::from(1);\n        let chain_name_hash = ChainNameDigest::from_chain_name(\"example\");\n        let finality_signature = FinalitySignatureV2::create(\n            *block.hash(),\n            block.height(),\n            era_id,\n            chain_name_hash,\n            &secret_key,\n        );\n        finality_signature\n            .is_verified()\n            .expect(\"should have verified\");\n        // Verify that changing era causes verification to fail.\n        let invalid_finality_signature = FinalitySignatureV2 {\n            era_id: EraId::from(2),\n            is_verified: OnceCell::new(),\n            ..finality_signature.clone()\n        };\n        assert!(invalid_finality_signature.is_verified().is_err());\n        // Verify that changing block height causes verification to fail.\n        let invalid_finality_signature = FinalitySignatureV2 {\n            block_height: block.height() + 1,\n            is_verified: OnceCell::new(),\n            ..finality_signature.clone()\n        };\n        assert!(invalid_finality_signature.is_verified().is_err());\n        // Verify that changing chain name hash causes verification to fail.\n        let invalid_finality_signature = FinalitySignatureV2 {\n            chain_name_hash: ChainNameDigest::from_chain_name(\"different\"),\n            is_verified: OnceCell::new(),\n            ..finality_signature\n        };\n        assert!(invalid_finality_signature.is_verified().is_err());\n    }\n}\n"
  },
  {
    "path": "types/src/block/finality_signature.rs",
    "content": "mod finality_signature_v1;\nmod finality_signature_v2;\n\npub use finality_signature_v1::FinalitySignatureV1;\npub use finality_signature_v2::FinalitySignatureV2;\n\nuse core::{\n    fmt::{self, Display, Formatter},\n    hash::Hash,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{crypto, BlockHash, EraId, PublicKey, Signature};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{testing::TestRng, ChainNameDigest};\n\n/// A validator's signature of a block, confirming it is finalized.\n///\n/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault\n/// tolerance threshold before accepting the block as finalized.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"A validator's signature of a block, confirming it is finalized.\")\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum FinalitySignature {\n    /// Version 1 of the finality signature.\n    V1(FinalitySignatureV1),\n    /// Version 2 of the finality signature.\n    V2(FinalitySignatureV2),\n}\n\nimpl FinalitySignature {\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        match self {\n            FinalitySignature::V1(fs) => fs.block_hash(),\n            FinalitySignature::V2(fs) => fs.block_hash(),\n        }\n    }\n\n    /// Returns the era in which the associated block was created.\n    pub fn era_id(&self) -> EraId {\n        match self {\n            FinalitySignature::V1(fs) => fs.era_id(),\n            FinalitySignature::V2(fs) => fs.era_id(),\n        }\n    }\n\n    /// Returns the public key of the signing validator.\n    pub fn public_key(&self) -> &PublicKey {\n        match self {\n            FinalitySignature::V1(fs) => fs.public_key(),\n            FinalitySignature::V2(fs) => fs.public_key(),\n        }\n    }\n\n    /// Returns the signature over the block hash of the associated block.\n    pub fn signature(&self) -> &Signature {\n        match self {\n            FinalitySignature::V1(fs) => fs.signature(),\n            FinalitySignature::V2(fs) => fs.signature(),\n        }\n    }\n\n    /// Returns `Ok` if the signature is cryptographically valid.\n    pub fn is_verified(&self) -> Result<(), crypto::Error> {\n        match self {\n            FinalitySignature::V1(fs) => fs.is_verified(),\n            FinalitySignature::V2(fs) => fs.is_verified(),\n        }\n    }\n\n    /// Returns a random `FinalitySignature`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let block_hash = BlockHash::random(rng);\n        let block_height = rng.gen();\n        let era_id = EraId::random(rng);\n        let chain_name_hash = ChainNameDigest::random(rng);\n        Self::random_for_block(block_hash, block_height, era_id, chain_name_hash, rng)\n    }\n\n    /// Returns a random `FinalitySignature` for the provided `block_hash` and `era_id`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_for_block(\n        block_hash: BlockHash,\n        block_height: u64,\n        era_id: EraId,\n        chain_name_hash: ChainNameDigest,\n        rng: &mut TestRng,\n    ) -> Self {\n        if rng.gen_bool(0.5) {\n            FinalitySignature::V1(FinalitySignatureV1::random_for_block(\n                block_hash, era_id, rng,\n            ))\n        } else {\n            FinalitySignature::V2(FinalitySignatureV2::random_for_block(\n                block_hash,\n                block_height,\n                era_id,\n                chain_name_hash,\n                rng,\n            ))\n        }\n    }\n}\n\nimpl From<FinalitySignatureV1> for FinalitySignature {\n    fn from(fs: FinalitySignatureV1) -> Self {\n        FinalitySignature::V1(fs)\n    }\n}\n\nimpl From<FinalitySignatureV2> for FinalitySignature {\n    fn from(fs: FinalitySignatureV2) -> Self {\n        FinalitySignature::V2(fs)\n    }\n}\n\nimpl Display for FinalitySignature {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FinalitySignature::V1(fs) => write!(f, \"{}\", fs),\n            FinalitySignature::V2(fs) => write!(f, \"{}\", fs),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/block/finality_signature_id.rs",
    "content": "use core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse super::BlockHash;\n#[cfg(doc)]\nuse super::FinalitySignature;\nuse crate::{EraId, PublicKey};\n\n/// An identifier for a [`FinalitySignature`].\n#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct FinalitySignatureId {\n    block_hash: BlockHash,\n    era_id: EraId,\n    public_key: PublicKey,\n}\n\nimpl FinalitySignatureId {\n    /// Returns a new `FinalitySignatureId`.\n    pub fn new(block_hash: BlockHash, era_id: EraId, public_key: PublicKey) -> Self {\n        FinalitySignatureId {\n            block_hash,\n            era_id,\n            public_key,\n        }\n    }\n\n    /// Returns the block hash of the associated block.\n    pub fn block_hash(&self) -> &BlockHash {\n        &self.block_hash\n    }\n\n    /// Returns the era in which the associated block was created.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Returns the public key of the signing validator.\n    pub fn public_key(&self) -> &PublicKey {\n        &self.public_key\n    }\n}\n\nimpl Display for FinalitySignatureId {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"finality signature id for {}, from {}\",\n            self.block_hash, self.public_key\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/block/json_compatibility/json_block_with_signatures.rs",
    "content": "use alloc::collections::BTreeMap;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse serde_map_to_array::{BTreeMapToArray, KeyValueJsonSchema, KeyValueLabels};\n\nuse crate::{crypto, Block, BlockSignatures, BlockV2, PublicKey, SecretKey, Signature};\n\n#[cfg(feature = \"json-schema\")]\nstatic JSON_BLOCK_WITH_SIGNATURES: Lazy<JsonBlockWithSignatures> = Lazy::new(|| {\n    let block = BlockV2::example().clone();\n    let secret_key = SecretKey::example();\n    let public_key = PublicKey::from(secret_key);\n    let signature = crypto::sign(block.hash.inner(), secret_key, &public_key);\n    let mut proofs = BTreeMap::new();\n    proofs.insert(public_key, signature);\n\n    JsonBlockWithSignatures {\n        block: block.into(),\n        proofs,\n    }\n});\n\n/// A JSON-friendly representation of a block and the signatures for that block.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct JsonBlockWithSignatures {\n    /// The block.\n    pub block: Block,\n    /// The proofs of the block, i.e. a collection of validators' signatures of the block hash.\n    #[serde(with = \"BTreeMapToArray::<PublicKey, Signature, BlockProofLabels>\")]\n    pub proofs: BTreeMap<PublicKey, Signature>,\n}\n\nimpl JsonBlockWithSignatures {\n    /// Constructs a new `JsonBlock`.\n    pub fn new(block: Block, maybe_signatures: Option<BlockSignatures>) -> Self {\n        let proofs = maybe_signatures\n            .map(|signatures| signatures.into_proofs())\n            .unwrap_or_default();\n\n        JsonBlockWithSignatures { block, proofs }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn example() -> &'static Self {\n        &JSON_BLOCK_WITH_SIGNATURES\n    }\n}\nstruct BlockProofLabels;\n\nimpl KeyValueLabels for BlockProofLabels {\n    const KEY: &'static str = \"public_key\";\n    const VALUE: &'static str = \"signature\";\n}\n\nimpl KeyValueJsonSchema for BlockProofLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"BlockProof\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some(\n        \"A validator's public key paired with a corresponding signature of a given block hash.\",\n    );\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some(\"The validator's public key.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some(\"The validator's signature.\");\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        testing::TestRng, BlockSignaturesV1, BlockSignaturesV2, ChainNameDigest, TestBlockBuilder,\n    };\n\n    use super::*;\n\n    #[test]\n    fn block_to_and_from_json_block_with_signatures_v1() {\n        let rng = &mut TestRng::new();\n        let block: Block = TestBlockBuilder::new().build(rng).into();\n        let empty_signatures =\n            BlockSignatures::V1(BlockSignaturesV1::new(*block.hash(), block.era_id()));\n        let json_block = JsonBlockWithSignatures::new(block.clone(), Some(empty_signatures));\n        let recovered_block = Block::from(json_block);\n        assert_eq!(block, recovered_block);\n    }\n\n    #[test]\n    fn block_to_and_from_json_block_with_signatures_v2() {\n        let rng = &mut TestRng::new();\n        let block: Block = TestBlockBuilder::new().build(rng).into();\n        let empty_signatures = BlockSignatures::V2(BlockSignaturesV2::new(\n            *block.hash(),\n            block.height(),\n            block.era_id(),\n            ChainNameDigest::random(rng),\n        ));\n        let json_block = JsonBlockWithSignatures::new(block.clone(), Some(empty_signatures));\n        let recovered_block = Block::from(json_block);\n        assert_eq!(block, recovered_block);\n    }\n\n    #[test]\n    fn json_block_roundtrip() {\n        let rng = &mut TestRng::new();\n        let block: Block = TestBlockBuilder::new().build(rng).into();\n        let json_string = serde_json::to_string_pretty(&block).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(block, decoded);\n    }\n}\n"
  },
  {
    "path": "types/src/block/json_compatibility.rs",
    "content": "//! This module provides types primarily to support converting instances of `BTreeMap<K, V>` into\n//! `Vec<(K, V)>` or similar, in order to allow these types to be able to be converted to and from\n//! JSON, and to allow for the production of a static schema for them.\n\n#![cfg(all(feature = \"std\", feature = \"json-schema\"))]\nmod json_block_with_signatures;\n\npub use json_block_with_signatures::JsonBlockWithSignatures;\n"
  },
  {
    "path": "types/src/block/rewarded_signatures.rs",
    "content": "use alloc::{collections::BTreeSet, vec::Vec};\n\nuse crate::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    PublicKey,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n\nuse serde::{Deserialize, Serialize};\nuse tracing::error;\n\n/// Describes finality signatures that will be rewarded in a block. Consists of a vector of\n/// `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor\n/// block. The first entry represents the signatures for the parent block, the second for the\n/// parent of the parent, and so on.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct RewardedSignatures(Vec<SingleBlockRewardedSignatures>);\n\n/// List of identifiers for finality signatures for a particular past block.\n///\n/// That past block height is current_height - signature_rewards_max_delay, the latter being defined\n/// in the chainspec.\n///\n/// We need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality\n/// signers because we need a bit of time to get the block finality.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct SingleBlockRewardedSignatures(Vec<u8>);\n\nimpl SingleBlockRewardedSignatures {\n    /// Creates a new set of recorded finality signaures from the era's validators +\n    /// the list of validators which signed.\n    pub fn from_validator_set<'a>(\n        public_keys: &BTreeSet<PublicKey>,\n        all_validators: impl IntoIterator<Item = &'a PublicKey>,\n    ) -> Self {\n        // Take the validators list\n        // Replace the ones who signed with 1 and the ones who didn't with 0\n        // Pack everything into bytes\n        let result = Self::pack(\n            all_validators\n                .into_iter()\n                .map(|key| u8::from(public_keys.contains(key))),\n        );\n\n        let included_count: u32 = result.0.iter().map(|c| c.count_ones()).sum();\n        if included_count as usize != public_keys.len() {\n            error!(\n                included_count,\n                expected_count = public_keys.len(),\n                \"error creating past finality signatures from validator set\"\n            );\n        }\n\n        result\n    }\n\n    /// Gets the list of validators which signed from a set of recorded finality signaures (`self`)\n    /// + the era's validators.\n    pub fn to_validator_set(\n        &self,\n        all_validators: impl IntoIterator<Item = PublicKey>,\n    ) -> BTreeSet<PublicKey> {\n        self.unpack()\n            .zip(all_validators)\n            .filter_map(|(active, validator)| (active != 0).then_some(validator))\n            .collect()\n    }\n\n    /// Packs the bits to bytes, to create a `PastFinalitySignature`\n    /// from an iterator of bits.\n    ///\n    /// If a value is neither 1 nor 0, it is interpreted as a 1.\n    #[doc(hidden)]\n    pub fn pack(bits: impl Iterator<Item = u8>) -> Self {\n        //use itertools::Itertools;\n\n        fn set_bit_at(value: u8, position: usize) -> u8 {\n            // Sanitize the value (must be 0 or 1):\n            let value = u8::from(value != 0);\n\n            value << (7 - position)\n        }\n\n        let inner = chunks_8(bits)\n            .map(|bits_chunk| {\n                bits_chunk\n                    .enumerate()\n                    .fold(0, |acc, (pos, value)| acc | set_bit_at(value, pos))\n            })\n            .collect();\n\n        SingleBlockRewardedSignatures(inner)\n    }\n\n    /// Unpacks the bytes to bits,\n    /// to get a human readable representation of `PastFinalitySignature`.\n    #[doc(hidden)]\n    pub fn unpack(&self) -> impl Iterator<Item = u8> + '_ {\n        // Returns the bit at the given position (0 or 1):\n        fn bit_at(byte: u8, position: u8) -> u8 {\n            (byte & (0b1000_0000 >> position)) >> (7 - position)\n        }\n\n        self.0\n            .iter()\n            .flat_map(|&byte| (0..8).map(move |i| bit_at(byte, i)))\n    }\n\n    /// Calculates the set difference of two instances of `SingleBlockRewardedSignatures`.\n    #[doc(hidden)]\n    pub fn difference(mut self, other: &SingleBlockRewardedSignatures) -> Self {\n        for (self_byte, other_byte) in self.0.iter_mut().zip(other.0.iter()) {\n            *self_byte &= !other_byte;\n        }\n        self\n    }\n\n    /// Calculates the set intersection of two instances of `SingleBlockRewardedSignatures`.\n    pub fn intersection(mut self, other: &SingleBlockRewardedSignatures) -> Self {\n        self.0 = self\n            .0\n            .iter()\n            .zip(other.0.iter())\n            .map(|(a, b)| *a & *b)\n            .collect();\n        self\n    }\n\n    /// Returns `true` if the set contains at least one signature.\n    pub fn has_some(&self) -> bool {\n        self.0.iter().any(|byte| *byte != 0)\n    }\n}\n\nimpl ToBytes for SingleBlockRewardedSignatures {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(Bytes::from(self.0.as_ref()).to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for SingleBlockRewardedSignatures {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (inner, rest) = Bytes::from_bytes(bytes)?;\n        Ok((SingleBlockRewardedSignatures(inner.into()), rest))\n    }\n}\n\nimpl RewardedSignatures {\n    /// Creates a new instance of `RewardedSignatures`.\n    pub fn new<I: IntoIterator<Item = SingleBlockRewardedSignatures>>(\n        single_block_signatures: I,\n    ) -> Self {\n        Self(single_block_signatures.into_iter().collect())\n    }\n\n    /// Creates an instance of `RewardedSignatures` based on its unpacked (one byte per validator)\n    /// representation.\n    pub fn pack(unpacked: Vec<Vec<u8>>) -> Self {\n        Self(\n            unpacked\n                .into_iter()\n                .map(|single_block_signatures| {\n                    SingleBlockRewardedSignatures::pack(single_block_signatures.into_iter())\n                })\n                .collect(),\n        )\n    }\n\n    /// Creates an unpacked (one byte per validator) representation of the finality signatures to\n    /// be rewarded in this block.\n    pub fn unpack(&self) -> Vec<Vec<u8>> {\n        self.0\n            .iter()\n            .map(|single_block_signatures| single_block_signatures.unpack().collect())\n            .collect()\n    }\n\n    /// Returns this instance of `RewardedSignatures` with `num_blocks` of empty signatures\n    /// prepended.\n    pub fn left_padded(self, num_blocks: usize) -> Self {\n        Self(\n            core::iter::repeat_with(SingleBlockRewardedSignatures::default)\n                .take(num_blocks)\n                .chain(self.0)\n                .collect(),\n        )\n    }\n\n    /// Calculates the set difference between two instances of `RewardedSignatures`.\n    pub fn difference(self, other: &RewardedSignatures) -> Self {\n        Self(\n            self.0\n                .into_iter()\n                .zip(other.0.iter())\n                .map(|(single_block_signatures, other_block_signatures)| {\n                    single_block_signatures.difference(other_block_signatures)\n                })\n                .collect(),\n        )\n    }\n\n    /// Calculates the set intersection between two instances of `RewardedSignatures`.\n    pub fn intersection(&self, other: &RewardedSignatures) -> Self {\n        Self(\n            self.0\n                .iter()\n                .zip(other.0.iter())\n                .map(|(single_block_signatures, other_block_signatures)| {\n                    single_block_signatures\n                        .clone()\n                        .intersection(other_block_signatures)\n                })\n                .collect(),\n        )\n    }\n\n    /// Iterates over the `SingleBlockRewardedSignatures` for each rewarded block.\n    pub fn iter(&self) -> impl Iterator<Item = &SingleBlockRewardedSignatures> {\n        self.0.iter()\n    }\n\n    /// Iterates over the `SingleBlockRewardedSignatures`, yielding the signatures together with\n    /// the block height for each entry. `block_height` is the height of the block that contains\n    /// this instance of `RewardedSignatures`.\n    pub fn iter_with_height(\n        &self,\n        block_height: u64,\n    ) -> impl Iterator<Item = (u64, &SingleBlockRewardedSignatures)> {\n        self.0.iter().enumerate().map(move |(rel_height, sbrs)| {\n            (\n                block_height\n                    .saturating_sub(rel_height as u64)\n                    .saturating_sub(1),\n                sbrs,\n            )\n        })\n    }\n\n    /// Returns `true` if there is at least one cited signature.\n    pub fn has_some(&self) -> bool {\n        self.0.iter().any(|signatures| signatures.has_some())\n    }\n}\n\npub(crate) static EMPTY: RewardedSignatures = RewardedSignatures(Vec::new());\n\nimpl ToBytes for RewardedSignatures {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for RewardedSignatures {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Vec::<SingleBlockRewardedSignatures>::from_bytes(bytes)\n            .map(|(inner, rest)| (RewardedSignatures(inner), rest))\n    }\n}\n\n/// Chunks an iterator over `u8`s into pieces of maximum size of 8.\nfn chunks_8(bits: impl Iterator<Item = u8>) -> impl Iterator<Item = impl Iterator<Item = u8>> {\n    struct Chunks<B>(B);\n\n    struct Chunk {\n        values: [u8; 8],\n        index: usize,\n        max: usize,\n    }\n\n    impl<B> Iterator for Chunks<B>\n    where\n        B: Iterator<Item = u8>,\n    {\n        type Item = Chunk;\n\n        fn next(&mut self) -> Option<Self::Item> {\n            let mut values = [0; 8];\n            let max = core::iter::zip(&mut values, &mut self.0)\n                .map(|(array_slot, value)| *array_slot = value)\n                .count();\n\n            (max != 0).then_some(Chunk {\n                values,\n                max,\n                index: 0,\n            })\n        }\n    }\n\n    impl Iterator for Chunk {\n        type Item = u8;\n\n        fn next(&mut self) -> Option<Self::Item> {\n            if self.index < self.max {\n                let n = self.values.get(self.index).cloned();\n                self.index += 1;\n                n\n            } else {\n                None\n            }\n        }\n    }\n\n    Chunks(bits)\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl SingleBlockRewardedSignatures {\n    /// Returns random data.\n    pub fn random(rng: &mut crate::testing::TestRng, n_validators: usize) -> Self {\n        let mut bytes = vec![0; (n_validators + 7) / 8];\n\n        rand::RngCore::fill_bytes(rng, bytes.as_mut());\n\n        SingleBlockRewardedSignatures(bytes)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::{chunks_8, SingleBlockRewardedSignatures};\n    use crate::{\n        bytesrepr::{FromBytes, ToBytes},\n        testing::TestRng,\n        PublicKey,\n    };\n    use rand::{seq::IteratorRandom, Rng};\n    use std::collections::BTreeSet;\n\n    #[test]\n    fn empty_signatures() {\n        let rng = &mut TestRng::new();\n        let validators: Vec<_> = std::iter::repeat_with(|| PublicKey::random(rng))\n            .take(7)\n            .collect();\n        let original_signed = BTreeSet::new();\n\n        let past_finality_signatures =\n            SingleBlockRewardedSignatures::from_validator_set(&original_signed, validators.iter());\n\n        assert_eq!(past_finality_signatures.0, &[0]);\n\n        let signed = past_finality_signatures.to_validator_set(validators);\n\n        assert_eq!(original_signed, signed);\n    }\n\n    #[test]\n    fn from_and_to_methods_match_in_a_simple_case() {\n        let rng = &mut TestRng::new();\n        let validators: Vec<_> = std::iter::repeat_with(|| PublicKey::random(rng))\n            .take(11)\n            .collect();\n        let signed = {\n            let mut signed = BTreeSet::new();\n            signed.insert(validators[2].clone());\n            signed.insert(validators[5].clone());\n            signed.insert(validators[6].clone());\n            signed.insert(validators[8].clone());\n            signed.insert(validators[10].clone());\n            signed\n        };\n\n        let past_finality_signatures =\n            SingleBlockRewardedSignatures::from_validator_set(&signed, validators.iter());\n\n        assert_eq!(past_finality_signatures.0, &[0b0010_0110, 0b1010_0000]);\n\n        let signed_ = past_finality_signatures.to_validator_set(validators);\n\n        assert_eq!(signed, signed_);\n    }\n\n    #[test]\n    fn simple_serialization_roundtrip() {\n        let data = SingleBlockRewardedSignatures(vec![1, 2, 3, 4, 5]);\n\n        let serialized = data.to_bytes().unwrap();\n        assert_eq!(serialized.len(), data.0.len() + 4);\n        assert_eq!(data.serialized_length(), data.0.len() + 4);\n\n        let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap();\n\n        assert_eq!(data, deserialized);\n        assert_eq!(rest, &[0u8; 0]);\n    }\n\n    #[test]\n    fn serialization_roundtrip_of_empty_data() {\n        let data = SingleBlockRewardedSignatures::default();\n\n        let serialized = data.to_bytes().unwrap();\n        assert_eq!(serialized, &[0; 4]);\n        assert_eq!(data.serialized_length(), 4);\n\n        let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap();\n\n        assert_eq!(data, deserialized);\n        assert_eq!(rest, &[0u8; 0]);\n    }\n\n    #[test]\n    fn serialization_roundtrip_of_random_data() {\n        let rng = &mut TestRng::new();\n        let n_validators = rng.gen_range(50..200);\n        let all_validators: BTreeSet<_> = std::iter::repeat_with(|| PublicKey::random(rng))\n            .take(n_validators)\n            .collect();\n        let n_to_sign = rng.gen_range(0..all_validators.len());\n        let public_keys = all_validators\n            .iter()\n            .cloned()\n            .choose_multiple(rng, n_to_sign)\n            .into_iter()\n            .collect();\n\n        let past_finality_signatures =\n            SingleBlockRewardedSignatures::from_validator_set(&public_keys, all_validators.iter());\n\n        let serialized = past_finality_signatures.to_bytes().unwrap();\n        let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap();\n\n        assert_eq!(public_keys, deserialized.to_validator_set(all_validators));\n        assert_eq!(rest, &[0u8; 0]);\n    }\n\n    #[test]\n    fn chunk_iterator() {\n        fn v(maybe_chunk: Option<impl Iterator<Item = u8>>) -> Option<Vec<u8>> {\n            maybe_chunk.map(itertools::Itertools::collect_vec)\n        }\n\n        // Empty chunks:\n\n        let mut chunks = chunks_8(IntoIterator::into_iter([]));\n\n        assert_eq!(v(chunks.next()), None);\n\n        // Exact size chunk:\n\n        let mut chunks = chunks_8(IntoIterator::into_iter([10, 11, 12, 13, 14, 15, 16, 17]));\n\n        assert_eq!(v(chunks.next()), Some(vec![10, 11, 12, 13, 14, 15, 16, 17]));\n        assert_eq!(v(chunks.next()), None);\n\n        // Chunks with a remainder:\n\n        let mut chunks = chunks_8(IntoIterator::into_iter([\n            10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,\n        ]));\n\n        assert_eq!(v(chunks.next()), Some(vec![10, 11, 12, 13, 14, 15, 16, 17]));\n        assert_eq!(v(chunks.next()), Some(vec![18, 19, 20, 21, 22, 23, 24, 25]));\n        assert_eq!(v(chunks.next()), Some(vec![26]));\n    }\n}\n"
  },
  {
    "path": "types/src/block/rewards.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\n\nuse crate::{PublicKey, U512};\n\n/// Rewards distributed to validators.\n#[derive(Debug)]\npub enum Rewards<'a> {\n    /// Rewards for version 1, associate a ratio to each validator.\n    V1(&'a BTreeMap<PublicKey, u64>),\n    /// Rewards for version 1, associate a tokens amount to each validator.\n    V2(&'a BTreeMap<PublicKey, Vec<U512>>),\n}\n"
  },
  {
    "path": "types/src/block/test_block_builder/test_block_v1_builder.rs",
    "content": "use std::iter;\n\nuse rand::Rng;\n\nuse crate::{testing::TestRng, Block, EraEndV1};\n\nuse crate::{\n    system::auction::ValidatorWeights, BlockHash, BlockV1, Deploy, Digest, EraId, EraReport,\n    ProtocolVersion, PublicKey, Timestamp, U512,\n};\n\n/// A helper to build the blocks with various properties required for tests.\npub struct TestBlockV1Builder {\n    parent_hash: Option<BlockHash>,\n    state_root_hash: Option<Digest>,\n    timestamp: Option<Timestamp>,\n    era: Option<EraId>,\n    height: Option<u64>,\n    protocol_version: ProtocolVersion,\n    deploys: Vec<Deploy>,\n    is_switch: Option<bool>,\n    validator_weights: Option<ValidatorWeights>,\n}\n\nimpl Default for TestBlockV1Builder {\n    fn default() -> Self {\n        Self {\n            parent_hash: None,\n            state_root_hash: None,\n            timestamp: None,\n            era: None,\n            height: None,\n            protocol_version: ProtocolVersion::V1_0_0,\n            deploys: Vec::new(),\n            is_switch: None,\n            validator_weights: None,\n        }\n    }\n}\n\nimpl TestBlockV1Builder {\n    /// Creates new `TestBlockBuilder`.\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    /// Sets the parent hash for the block.\n    pub fn parent_hash(self, parent_hash: BlockHash) -> Self {\n        Self {\n            parent_hash: Some(parent_hash),\n            ..self\n        }\n    }\n\n    /// Sets the state root hash for the block.\n    pub fn state_root_hash(self, state_root_hash: Digest) -> Self {\n        Self {\n            state_root_hash: Some(state_root_hash),\n            ..self\n        }\n    }\n\n    /// Sets the timestamp for the block.\n    pub fn timestamp(self, timestamp: Timestamp) -> Self {\n        Self {\n            timestamp: Some(timestamp),\n            ..self\n        }\n    }\n\n    /// Sets the era for the block\n    pub fn era(self, era: impl Into<EraId>) -> Self {\n        Self {\n            era: Some(era.into()),\n            ..self\n        }\n    }\n\n    /// Sets the height for the block.\n    pub fn height(self, height: u64) -> Self {\n        Self {\n            height: Some(height),\n            ..self\n        }\n    }\n\n    /// Sets the protocol version for the block.\n    pub fn protocol_version(self, protocol_version: ProtocolVersion) -> Self {\n        Self {\n            protocol_version,\n            ..self\n        }\n    }\n\n    /// Associates the given deploys with the created block.\n    pub fn deploys<'a, I: IntoIterator<Item = &'a Deploy>>(self, deploys_iter: I) -> Self {\n        Self {\n            deploys: deploys_iter.into_iter().cloned().collect(),\n            ..self\n        }\n    }\n\n    /// Associates a number of random deploys with the created block.\n    pub fn random_deploys(mut self, count: usize, rng: &mut TestRng) -> Self {\n        self.deploys = iter::repeat(())\n            .take(count)\n            .map(|_| Deploy::random(rng))\n            .collect();\n        self\n    }\n\n    /// Allows setting the created block to be switch block or not.\n    pub fn switch_block(self, is_switch: bool) -> Self {\n        Self {\n            is_switch: Some(is_switch),\n            ..self\n        }\n    }\n\n    /// Sets the validator weights for the block.\n    pub fn validator_weights(self, validator_weights: ValidatorWeights) -> Self {\n        Self {\n            validator_weights: Some(validator_weights),\n            ..self\n        }\n    }\n\n    /// Builds the block.\n    pub fn build(self, rng: &mut TestRng) -> BlockV1 {\n        let Self {\n            parent_hash,\n            state_root_hash,\n            timestamp,\n            era,\n            height,\n            protocol_version,\n            deploys,\n            is_switch,\n            validator_weights,\n        } = self;\n\n        let parent_hash = parent_hash.unwrap_or_else(|| BlockHash::new(rng.gen()));\n        let parent_seed = Digest::random(rng);\n        let state_root_hash = state_root_hash.unwrap_or_else(|| rng.gen());\n        let random_bit = rng.gen();\n        let is_switch = is_switch.unwrap_or_else(|| rng.gen_bool(0.1));\n        let era_end = is_switch.then(|| {\n            let next_era_validator_weights = validator_weights.unwrap_or_else(|| {\n                (1..6)\n                    .map(|i| (PublicKey::random(rng), U512::from(i)))\n                    .take(6)\n                    .collect()\n            });\n            EraEndV1::new(EraReport::random(rng), next_era_validator_weights)\n        });\n        let timestamp = timestamp.unwrap_or_else(Timestamp::now);\n        let era_id = era.unwrap_or(EraId::random(rng));\n        let height = height.unwrap_or_else(|| era_id.value() * 10 + rng.gen_range(0..10));\n        let proposer = PublicKey::random(rng);\n        let deploy_hashes = deploys.iter().map(|deploy| *deploy.hash()).collect();\n        let transfer_hashes = vec![];\n\n        BlockV1::new(\n            parent_hash,\n            parent_seed,\n            state_root_hash,\n            random_bit,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            deploy_hashes,\n            transfer_hashes,\n        )\n    }\n\n    /// Builds the block as a versioned block.\n    pub fn build_versioned(self, rng: &mut TestRng) -> Block {\n        self.build(rng).into()\n    }\n}\n"
  },
  {
    "path": "types/src/block/test_block_builder/test_block_v2_builder.rs",
    "content": "use std::iter;\n\nuse alloc::collections::BTreeMap;\nuse rand::Rng;\n\nuse crate::{\n    system::auction::ValidatorWeights, testing::TestRng, Block, BlockHash, BlockV2, Digest,\n    EraEndV2, EraId, ProtocolVersion, PublicKey, RewardedSignatures, Timestamp, Transaction,\n    TransactionEntryPoint, TransactionTarget, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID,\n    LARGE_WASM_LANE_ID, MEDIUM_WASM_LANE_ID, MINT_LANE_ID, SMALL_WASM_LANE_ID, U512,\n};\n\n/// A helper to build the blocks with various properties required for tests.\npub struct TestBlockV2Builder {\n    parent_hash: Option<BlockHash>,\n    state_root_hash: Option<Digest>,\n    timestamp: Option<Timestamp>,\n    era: Option<EraId>,\n    height: Option<u64>,\n    proposer: Option<PublicKey>,\n    protocol_version: ProtocolVersion,\n    txns: Vec<Transaction>,\n    is_switch: Option<bool>,\n    validator_weights: Option<ValidatorWeights>,\n    rewarded_signatures: Option<RewardedSignatures>,\n}\n\nimpl Default for TestBlockV2Builder {\n    fn default() -> Self {\n        Self {\n            parent_hash: None,\n            state_root_hash: None,\n            timestamp: None,\n            era: None,\n            height: None,\n            proposer: None,\n            protocol_version: ProtocolVersion::V1_0_0,\n            txns: Vec::new(),\n            is_switch: None,\n            validator_weights: None,\n            rewarded_signatures: None,\n        }\n    }\n}\n\nimpl TestBlockV2Builder {\n    /// Creates new `TestBlockBuilder`.\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    /// Sets the parent hash for the block.\n    pub fn parent_hash(self, parent_hash: BlockHash) -> Self {\n        Self {\n            parent_hash: Some(parent_hash),\n            ..self\n        }\n    }\n\n    /// Sets the state root hash for the block.\n    pub fn state_root_hash(self, state_root_hash: Digest) -> Self {\n        Self {\n            state_root_hash: Some(state_root_hash),\n            ..self\n        }\n    }\n\n    /// Sets the timestamp for the block.\n    pub fn timestamp(self, timestamp: Timestamp) -> Self {\n        Self {\n            timestamp: Some(timestamp),\n            ..self\n        }\n    }\n\n    /// Sets the era for the block\n    pub fn era(self, era: impl Into<EraId>) -> Self {\n        Self {\n            era: Some(era.into()),\n            ..self\n        }\n    }\n\n    /// Sets the height for the block.\n    pub fn height(self, height: u64) -> Self {\n        Self {\n            height: Some(height),\n            ..self\n        }\n    }\n\n    /// Sets the block proposer.\n    pub fn proposer(self, proposer: PublicKey) -> Self {\n        Self {\n            proposer: Some(proposer),\n            ..self\n        }\n    }\n\n    /// Sets the protocol version for the block.\n    pub fn protocol_version(self, protocol_version: ProtocolVersion) -> Self {\n        Self {\n            protocol_version,\n            ..self\n        }\n    }\n\n    /// Associates the given transactions with the created block.\n    pub fn transactions<'a, I: IntoIterator<Item = &'a Transaction>>(self, txns_iter: I) -> Self {\n        Self {\n            txns: txns_iter.into_iter().cloned().collect(),\n            ..self\n        }\n    }\n\n    /// Sets the height for the block.\n    pub fn rewarded_signatures(self, rewarded_signatures: RewardedSignatures) -> Self {\n        Self {\n            rewarded_signatures: Some(rewarded_signatures),\n            ..self\n        }\n    }\n\n    /// Associates a number of random transactions with the created block.\n    pub fn random_transactions(mut self, count: usize, rng: &mut TestRng) -> Self {\n        self.txns = iter::repeat_with(|| Transaction::random(rng))\n            .take(count)\n            .collect();\n        self\n    }\n\n    /// Allows setting the created block to be switch block or not.\n    pub fn switch_block(self, is_switch: bool) -> Self {\n        Self {\n            is_switch: Some(is_switch),\n            ..self\n        }\n    }\n\n    /// Sets the validator weights for the block.\n    pub fn validator_weights(self, validator_weights: ValidatorWeights) -> Self {\n        Self {\n            validator_weights: Some(validator_weights),\n            ..self\n        }\n    }\n\n    /// Builds the block.\n    pub fn build(self, rng: &mut TestRng) -> BlockV2 {\n        let Self {\n            parent_hash,\n            state_root_hash,\n            timestamp,\n            era,\n            height,\n            proposer,\n            protocol_version,\n            txns,\n            is_switch,\n            validator_weights,\n            rewarded_signatures,\n        } = self;\n\n        let parent_hash = parent_hash.unwrap_or_else(|| BlockHash::new(rng.gen()));\n        let parent_seed = Digest::random(rng);\n        let state_root_hash = state_root_hash.unwrap_or_else(|| rng.gen());\n        let random_bit = rng.gen();\n        let is_switch = is_switch.unwrap_or_else(|| rng.gen_bool(0.1));\n        let era_end = is_switch.then(|| gen_era_end_v2(rng, validator_weights));\n        let timestamp = timestamp.unwrap_or_else(Timestamp::now);\n        let era_id = era.unwrap_or(EraId::random(rng));\n        let height = height.unwrap_or_else(|| era_id.value() * 10 + rng.gen_range(0..10));\n        let proposer = proposer.unwrap_or_else(|| PublicKey::random(rng));\n\n        let mut mint_hashes = vec![];\n        let mut auction_hashes = vec![];\n        let mut install_upgrade_hashes = vec![];\n        let mut large_hashes = vec![];\n        let mut medium_hashes = vec![];\n        let mut small_hashes = vec![];\n        for txn in txns {\n            let txn_hash = txn.hash();\n            let lane_id = match txn {\n                Transaction::Deploy(deploy) => {\n                    if deploy.is_transfer() {\n                        MINT_LANE_ID\n                    } else {\n                        LARGE_WASM_LANE_ID\n                    }\n                }\n                Transaction::V1(transaction_v1) => {\n                    let entry_point = transaction_v1.get_transaction_entry_point().unwrap();\n                    let target = transaction_v1.get_transaction_target().unwrap();\n                    simplified_calculate_transaction_lane_from_values(&entry_point, &target)\n                }\n            };\n            match lane_id {\n                MINT_LANE_ID => mint_hashes.push(txn_hash),\n                AUCTION_LANE_ID => auction_hashes.push(txn_hash),\n                INSTALL_UPGRADE_LANE_ID => install_upgrade_hashes.push(txn_hash),\n                LARGE_WASM_LANE_ID => large_hashes.push(txn_hash),\n                MEDIUM_WASM_LANE_ID => medium_hashes.push(txn_hash),\n                SMALL_WASM_LANE_ID => small_hashes.push(txn_hash),\n                _ => panic!(\"Invalid lane id\"),\n            }\n        }\n        let transactions = {\n            let mut ret = BTreeMap::new();\n            ret.insert(MINT_LANE_ID, mint_hashes);\n            ret.insert(AUCTION_LANE_ID, auction_hashes);\n            ret.insert(INSTALL_UPGRADE_LANE_ID, install_upgrade_hashes);\n            ret.insert(LARGE_WASM_LANE_ID, large_hashes);\n            ret.insert(MEDIUM_WASM_LANE_ID, medium_hashes);\n            ret.insert(SMALL_WASM_LANE_ID, small_hashes);\n            ret\n        };\n        let rewarded_signatures = rewarded_signatures.unwrap_or_default();\n        let current_gas_price: u8 = 1;\n        let last_switch_block_hash = BlockHash::new(Digest::from([8; Digest::LENGTH]));\n        BlockV2::new(\n            parent_hash,\n            parent_seed,\n            state_root_hash,\n            random_bit,\n            era_end,\n            timestamp,\n            era_id,\n            height,\n            protocol_version,\n            proposer,\n            transactions,\n            rewarded_signatures,\n            current_gas_price,\n            Some(last_switch_block_hash),\n        )\n    }\n\n    /// Builds the block as a versioned block.\n    pub fn build_versioned(self, rng: &mut TestRng) -> Block {\n        self.build(rng).into()\n    }\n\n    /// Builds a block that is invalid.\n    pub fn build_invalid(self, rng: &mut TestRng) -> BlockV2 {\n        self.build(rng).make_invalid(rng)\n    }\n}\n\n// A simplified way of calculating transaction lanes. It doesn't take\n// into consideration the size of the transaction against the chainspec\n// and doesn't take `additional_computation_factor` into consideration.\n// This is only used for tests purposes.\nfn simplified_calculate_transaction_lane_from_values(\n    entry_point: &TransactionEntryPoint,\n    target: &TransactionTarget,\n) -> u8 {\n    match target {\n        TransactionTarget::Native => match entry_point {\n            TransactionEntryPoint::Transfer | TransactionEntryPoint::Burn => MINT_LANE_ID,\n            TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => AUCTION_LANE_ID,\n            TransactionEntryPoint::Call => panic!(\"EntryPointCannotBeCall\"),\n            TransactionEntryPoint::Custom(_) => panic!(\"EntryPointCannotBeCustom\"),\n        },\n        TransactionTarget::Stored { .. } => match entry_point {\n            TransactionEntryPoint::Custom(_) => LARGE_WASM_LANE_ID,\n            TransactionEntryPoint::Call\n            | TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn\n            | TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => {\n                panic!(\"EntryPointMustBeCustom\")\n            }\n        },\n        TransactionTarget::Session {\n            is_install_upgrade, ..\n        } => match entry_point {\n            TransactionEntryPoint::Call => {\n                if *is_install_upgrade {\n                    INSTALL_UPGRADE_LANE_ID\n                } else {\n                    LARGE_WASM_LANE_ID\n                }\n            }\n            TransactionEntryPoint::Custom(_)\n            | TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn\n            | TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => {\n                panic!(\"EntryPointMustBeCall\")\n            }\n        },\n    }\n}\n\nfn gen_era_end_v2(\n    rng: &mut TestRng,\n    validator_weights: Option<BTreeMap<PublicKey, U512>>,\n) -> EraEndV2 {\n    let equivocators_count = rng.gen_range(0..5);\n    let rewards_count = rng.gen_range(0..5);\n    let inactive_count = rng.gen_range(0..5);\n    let next_era_validator_weights = validator_weights.unwrap_or_else(|| {\n        (1..6)\n            .map(|i| (PublicKey::random(rng), U512::from(i)))\n            .take(6)\n            .collect()\n    });\n    let equivocators = iter::repeat_with(|| PublicKey::random(rng))\n        .take(equivocators_count)\n        .collect();\n    let rewards = iter::repeat_with(|| {\n        let pub_key = PublicKey::random(rng);\n        let mut rewards = vec![U512::from(rng.gen_range(1..=1_000_000_000 + 1))];\n        if rng.gen_bool(0.2) {\n            rewards.push(U512::from(rng.gen_range(1..=1_000_000_000 + 1)));\n        };\n        (pub_key, rewards)\n    })\n    .take(rewards_count)\n    .collect();\n    let inactive_validators = iter::repeat_with(|| PublicKey::random(rng))\n        .take(inactive_count)\n        .collect();\n\n    EraEndV2::new(\n        equivocators,\n        inactive_validators,\n        next_era_validator_weights,\n        rewards,\n        1u8,\n    )\n}\n"
  },
  {
    "path": "types/src/block/test_block_builder.rs",
    "content": "mod test_block_v1_builder;\nmod test_block_v2_builder;\n\npub use test_block_v1_builder::TestBlockV1Builder;\npub use test_block_v2_builder::TestBlockV2Builder as TestBlockBuilder;\n"
  },
  {
    "path": "types/src/block.rs",
    "content": "mod available_block_range;\nmod block_body;\nmod block_global;\nmod block_hash;\nmod block_hash_and_height;\nmod block_header;\nmod block_header_with_signatures;\nmod block_identifier;\nmod block_signatures;\nmod block_sync_status;\nmod block_v1;\nmod block_v2;\nmod block_with_signatures;\nmod chain_name_digest;\nmod era_end;\nmod finality_signature;\nmod finality_signature_id;\nmod json_compatibility;\nmod rewarded_signatures;\nmod rewards;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nmod test_block_builder;\n\nuse alloc::{boxed::Box, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\nuse itertools::Either;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"std\")]\nuse num_rational::Ratio;\n\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n\n#[cfg(feature = \"std\")]\nuse crate::TransactionConfig;\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    transaction::TransactionHash,\n    Digest, EraId, ProtocolVersion, PublicKey, Timestamp,\n};\npub use available_block_range::AvailableBlockRange;\npub use block_body::{BlockBody, BlockBodyV1, BlockBodyV2};\npub use block_global::{BlockGlobalAddr, BlockGlobalAddrTag};\npub use block_hash::BlockHash;\npub use block_hash_and_height::BlockHashAndHeight;\npub use block_header::{BlockHeader, BlockHeaderV1, BlockHeaderV2};\npub use block_header_with_signatures::{\n    BlockHeaderWithSignatures, BlockHeaderWithSignaturesValidationError,\n};\npub use block_identifier::BlockIdentifier;\npub use block_signatures::{\n    BlockSignatures, BlockSignaturesMergeError, BlockSignaturesV1, BlockSignaturesV2,\n};\npub use block_sync_status::{BlockSyncStatus, BlockSynchronizerStatus};\npub use block_v1::BlockV1;\npub use block_v2::BlockV2;\npub use block_with_signatures::BlockWithSignatures;\npub use chain_name_digest::ChainNameDigest;\npub use era_end::{EraEnd, EraEndV1, EraEndV2, EraReport};\npub use finality_signature::{FinalitySignature, FinalitySignatureV1, FinalitySignatureV2};\npub use finality_signature_id::FinalitySignatureId;\n#[cfg(all(feature = \"std\", feature = \"json-schema\"))]\npub use json_compatibility::JsonBlockWithSignatures;\npub use rewarded_signatures::{RewardedSignatures, SingleBlockRewardedSignatures};\npub use rewards::Rewards;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\npub use test_block_builder::{TestBlockBuilder, TestBlockV1Builder};\n\n#[cfg(feature = \"json-schema\")]\nstatic BLOCK: Lazy<Block> = Lazy::new(|| BlockV2::example().into());\n\n/// An error that can arise when validating a block's cryptographic integrity using its hashes.\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(any(feature = \"std\", test), derive(serde::Serialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\npub enum BlockValidationError {\n    /// Problem serializing some of a block's data into bytes.\n    Bytesrepr(bytesrepr::Error),\n    /// The provided block's hash is not the same as the actual hash of the block.\n    UnexpectedBlockHash {\n        /// The block with the incorrect block hash.\n        block: Box<Block>,\n        /// The actual hash of the block.\n        actual_block_hash: BlockHash,\n    },\n    /// The body hash in the header is not the same as the actual hash of the body of the block.\n    UnexpectedBodyHash {\n        /// The block with the header containing the incorrect block body hash.\n        block: Box<Block>,\n        /// The actual hash of the block's body.\n        actual_block_body_hash: Digest,\n    },\n    /// The header version does not match the body version.\n    IncompatibleVersions,\n}\n\nimpl Display for BlockValidationError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            BlockValidationError::Bytesrepr(error) => {\n                write!(formatter, \"error validating block: {}\", error)\n            }\n            BlockValidationError::UnexpectedBlockHash {\n                block,\n                actual_block_hash,\n            } => {\n                write!(\n                    formatter,\n                    \"block has incorrect block hash - actual block hash: {:?}, block: {:?}\",\n                    actual_block_hash, block\n                )\n            }\n            BlockValidationError::UnexpectedBodyHash {\n                block,\n                actual_block_body_hash,\n            } => {\n                write!(\n                    formatter,\n                    \"block header has incorrect body hash - actual body hash: {:?}, block: {:?}\",\n                    actual_block_body_hash, block\n                )\n            }\n            BlockValidationError::IncompatibleVersions => {\n                write!(formatter, \"block body and header versions do not match\")\n            }\n        }\n    }\n}\n\nimpl From<bytesrepr::Error> for BlockValidationError {\n    fn from(error: bytesrepr::Error) -> Self {\n        BlockValidationError::Bytesrepr(error)\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for BlockValidationError {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            BlockValidationError::Bytesrepr(error) => Some(error),\n            BlockValidationError::UnexpectedBlockHash { .. }\n            | BlockValidationError::UnexpectedBodyHash { .. }\n            | BlockValidationError::IncompatibleVersions => None,\n        }\n    }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub enum BlockConversionError {\n    DifferentVersion { expected_version: u8 },\n}\n\n#[cfg(feature = \"std\")]\nimpl Display for BlockConversionError {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        match self {\n            BlockConversionError::DifferentVersion { expected_version } => {\n                write!(\n                    f,\n                    \"Could not convert a block to the expected version {}\",\n                    expected_version\n                )\n            }\n        }\n    }\n}\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for block body v1.\nconst BLOCK_V1_TAG: u8 = 0;\n/// Tag for block body v2.\nconst BLOCK_V2_TAG: u8 = 1;\n\n/// A block after execution.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    any(feature = \"std\", feature = \"json-schema\", test),\n    derive(serde::Serialize, serde::Deserialize)\n)]\n#[derive(Clone, Debug, PartialEq, Eq)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum Block {\n    /// The legacy, initial version of the block.\n    #[cfg_attr(\n        any(feature = \"std\", feature = \"json-schema\", test),\n        serde(rename = \"Version1\")\n    )]\n    V1(BlockV1),\n    /// The version 2 of the block.\n    #[cfg_attr(\n        any(feature = \"std\", feature = \"json-schema\", test),\n        serde(rename = \"Version2\")\n    )]\n    V2(BlockV2),\n}\n\nimpl Block {\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn new_from_header_and_body(\n        block_header: BlockHeader,\n        block_body: BlockBody,\n    ) -> Result<Self, Box<BlockValidationError>> {\n        let hash = block_header.block_hash();\n        let block = match (block_body, block_header) {\n            (BlockBody::V1(body), BlockHeader::V1(header)) => {\n                Ok(Block::V1(BlockV1 { hash, header, body }))\n            }\n            (BlockBody::V2(body), BlockHeader::V2(header)) => {\n                Ok(Block::V2(BlockV2 { hash, header, body }))\n            }\n            _ => Err(BlockValidationError::IncompatibleVersions),\n        }?;\n\n        block.verify()?;\n        Ok(block)\n    }\n\n    /// Clones the header, put it in the versioning enum, and returns it.\n    pub fn clone_header(&self) -> BlockHeader {\n        match self {\n            Block::V1(v1) => BlockHeader::V1(v1.header().clone()),\n            Block::V2(v2) => BlockHeader::V2(v2.header().clone()),\n        }\n    }\n\n    /// Returns the block's header, consuming `self`.\n    pub fn take_header(self) -> BlockHeader {\n        match self {\n            Block::V1(v1) => BlockHeader::V1(v1.take_header()),\n            Block::V2(v2) => BlockHeader::V2(v2.take_header()),\n        }\n    }\n\n    /// Returns the timestamp from when the block was proposed.\n    pub fn timestamp(&self) -> Timestamp {\n        match self {\n            Block::V1(v1) => v1.header.timestamp(),\n            Block::V2(v2) => v2.header.timestamp(),\n        }\n    }\n\n    /// Returns the protocol version of the network from when this block was created.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        match self {\n            Block::V1(v1) => v1.header.protocol_version(),\n            Block::V2(v2) => v2.header.protocol_version(),\n        }\n    }\n\n    /// The hash of this block's header.\n    pub fn hash(&self) -> &BlockHash {\n        match self {\n            Block::V1(v1) => v1.hash(),\n            Block::V2(v2) => v2.hash(),\n        }\n    }\n\n    /// Returns the hash of the block's body.\n    pub fn body_hash(&self) -> &Digest {\n        match self {\n            Block::V1(v1) => v1.header().body_hash(),\n            Block::V2(v2) => v2.header().body_hash(),\n        }\n    }\n\n    /// Returns a random bit needed for initializing a future era.\n    pub fn random_bit(&self) -> bool {\n        match self {\n            Block::V1(v1) => v1.header().random_bit(),\n            Block::V2(v2) => v2.header().random_bit(),\n        }\n    }\n\n    /// Returns a seed needed for initializing a future era.\n    pub fn accumulated_seed(&self) -> &Digest {\n        match self {\n            Block::V1(v1) => v1.accumulated_seed(),\n            Block::V2(v2) => v2.accumulated_seed(),\n        }\n    }\n\n    /// Returns the parent block's hash.\n    pub fn parent_hash(&self) -> &BlockHash {\n        match self {\n            Block::V1(v1) => v1.parent_hash(),\n            Block::V2(v2) => v2.parent_hash(),\n        }\n    }\n\n    /// Returns the public key of the validator which proposed the block.\n    pub fn proposer(&self) -> &PublicKey {\n        match self {\n            Block::V1(v1) => v1.proposer(),\n            Block::V2(v2) => v2.proposer(),\n        }\n    }\n\n    /// Clone the body and wrap is up in the versioned `Body`.\n    pub fn clone_body(&self) -> BlockBody {\n        match self {\n            Block::V1(v1) => BlockBody::V1(v1.body().clone()),\n            Block::V2(v2) => BlockBody::V2(v2.body().clone()),\n        }\n    }\n\n    /// Returns the block's body, consuming `self`.\n    pub fn take_body(self) -> BlockBody {\n        match self {\n            Block::V1(v1) => BlockBody::V1(v1.take_body()),\n            Block::V2(v2) => BlockBody::V2(v2.take_body()),\n        }\n    }\n\n    /// Check the integrity of a block by hashing its body and header\n    pub fn verify(&self) -> Result<(), BlockValidationError> {\n        match self {\n            Block::V1(v1) => v1.verify(),\n            Block::V2(v2) => v2.verify(),\n        }\n    }\n\n    /// Returns the height of this block, i.e. the number of ancestors.\n    pub fn height(&self) -> u64 {\n        match self {\n            Block::V1(v1) => v1.header.height(),\n            Block::V2(v2) => v2.header.height(),\n        }\n    }\n\n    /// Returns the era ID in which this block was created.\n    pub fn era_id(&self) -> EraId {\n        match self {\n            Block::V1(v1) => v1.era_id(),\n            Block::V2(v2) => v2.era_id(),\n        }\n    }\n\n    /// Clones the era end, put it in the versioning enum, and returns it.\n    pub fn clone_era_end(&self) -> Option<EraEnd> {\n        match self {\n            Block::V1(v1) => v1.header().era_end().cloned().map(EraEnd::V1),\n            Block::V2(v2) => v2.header().era_end().cloned().map(EraEnd::V2),\n        }\n    }\n\n    /// Returns `true` if this block is the last one in the current era.\n    pub fn is_switch_block(&self) -> bool {\n        match self {\n            Block::V1(v1) => v1.header.is_switch_block(),\n            Block::V2(v2) => v2.header.is_switch_block(),\n        }\n    }\n\n    /// Returns `true` if this block is the first block of the chain, the genesis block.\n    pub fn is_genesis(&self) -> bool {\n        match self {\n            Block::V1(v1) => v1.header.is_genesis(),\n            Block::V2(v2) => v2.header.is_genesis(),\n        }\n    }\n\n    /// Returns the root hash of global state after the deploys in this block have been executed.\n    pub fn state_root_hash(&self) -> &Digest {\n        match self {\n            Block::V1(v1) => v1.header.state_root_hash(),\n            Block::V2(v2) => v2.header.state_root_hash(),\n        }\n    }\n\n    /// List of identifiers for finality signatures for a particular past block.\n    pub fn rewarded_signatures(&self) -> &RewardedSignatures {\n        match self {\n            Block::V1(_v1) => &rewarded_signatures::EMPTY,\n            Block::V2(v2) => v2.body.rewarded_signatures(),\n        }\n    }\n\n    /// Return the gas price for V2 block header.\n    pub fn maybe_current_gas_price(&self) -> Option<u8> {\n        match self {\n            Block::V1(_) => None,\n            Block::V2(v2) => Some(v2.header().current_gas_price()),\n        }\n    }\n\n    /// Returns the count of transactions within a block.\n    pub fn transaction_count(&self) -> u64 {\n        match self {\n            Block::V1(block) => {\n                (block.body.deploy_hashes().len() + block.body.transfer_hashes().len()) as u64\n            }\n            Block::V2(block_v2) => block_v2.all_transactions().count() as u64,\n        }\n    }\n\n    /// Returns a list of all transaction hashes in a block.\n    pub fn all_transaction_hashes(&self) -> impl Iterator<Item = TransactionHash> + '_ {\n        match self {\n            Block::V1(block) => Either::Left(\n                block\n                    .body\n                    .deploy_and_transfer_hashes()\n                    .map(TransactionHash::from),\n            ),\n            Block::V2(block_v2) => Either::Right(block_v2.all_transactions().copied()),\n        }\n    }\n\n    /// Returns the utilization of the block against a given chainspec.\n    #[cfg(feature = \"std\")]\n    pub fn block_utilization(&self, transaction_config: TransactionConfig) -> u64 {\n        match self {\n            Block::V1(_) => {\n                // We shouldnt be tracking this for legacy blocks\n                0\n            }\n            Block::V2(block_v2) => {\n                let has_hit_slot_limt = self.has_hit_slot_capacity(transaction_config.clone());\n                let per_block_capacity = transaction_config\n                    .transaction_v1_config\n                    .get_max_block_count();\n\n                if has_hit_slot_limt {\n                    100u64\n                } else {\n                    let num = block_v2.all_transactions().count() as u64;\n                    Ratio::new(num * 100, per_block_capacity).to_integer()\n                }\n            }\n        }\n    }\n\n    /// Returns true if the block has reached capacity in any of its transaction limit.\n    #[cfg(feature = \"std\")]\n    pub fn has_hit_slot_capacity(&self, transaction_config: TransactionConfig) -> bool {\n        match self {\n            Block::V1(_) => false,\n            Block::V2(block_v2) => {\n                let mint_count = block_v2.mint().count();\n                if mint_count as u64\n                    >= transaction_config\n                        .transaction_v1_config\n                        .native_mint_lane\n                        .max_transaction_count()\n                {\n                    return true;\n                }\n\n                let auction_count = block_v2.auction().count();\n                if auction_count as u64\n                    >= transaction_config\n                        .transaction_v1_config\n                        .native_auction_lane\n                        .max_transaction_count()\n                {\n                    return true;\n                }\n\n                let install_upgrade_count = block_v2.install_upgrade().count();\n                if install_upgrade_count as u64\n                    >= transaction_config\n                        .transaction_v1_config\n                        .install_upgrade_lane\n                        .max_transaction_count()\n                {\n                    return true;\n                }\n\n                for (lane_id, transactions) in block_v2.body.transactions() {\n                    let transaction_count = transactions.len();\n                    if *lane_id < 2 {\n                        continue;\n                    };\n                    let max_transaction_count = transaction_config\n                        .transaction_v1_config\n                        .get_max_transaction_count(*lane_id);\n\n                    if transaction_count as u64 >= max_transaction_count {\n                        return true;\n                    }\n                }\n                false\n            }\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &BLOCK\n    }\n}\n\nimpl Display for Block {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \\\n            {}, random bit {}, protocol version: {}\",\n            self.height(),\n            self.hash(),\n            self.timestamp(),\n            self.era_id(),\n            self.parent_hash().inner(),\n            self.state_root_hash(),\n            self.body_hash(),\n            self.random_bit(),\n            self.protocol_version()\n        )?;\n        if let Some(era_end) = self.clone_era_end() {\n            write!(formatter, \", era_end: {}\", era_end)?;\n        }\n        Ok(())\n    }\n}\n\nimpl ToBytes for Block {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            Block::V1(v1) => {\n                buffer.insert(0, BLOCK_V1_TAG);\n                buffer.extend(v1.to_bytes()?);\n            }\n            Block::V2(v2) => {\n                buffer.insert(0, BLOCK_V2_TAG);\n                buffer.extend(v2.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                Block::V1(v1) => v1.serialized_length(),\n                Block::V2(v2) => v2.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for Block {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            BLOCK_V1_TAG => {\n                let (body, remainder): (BlockV1, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V1(body), remainder))\n            }\n            BLOCK_V2_TAG => {\n                let (body, remainder): (BlockV2, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::V2(body), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl From<&BlockV2> for Block {\n    fn from(block: &BlockV2) -> Self {\n        Block::V2(block.clone())\n    }\n}\n\nimpl From<BlockV2> for Block {\n    fn from(block: BlockV2) -> Self {\n        Block::V2(block)\n    }\n}\n\nimpl From<&BlockV1> for Block {\n    fn from(block: &BlockV1) -> Self {\n        Block::V1(block.clone())\n    }\n}\n\nimpl From<BlockV1> for Block {\n    fn from(block: BlockV1) -> Self {\n        Block::V1(block)\n    }\n}\n\n#[cfg(all(feature = \"std\", feature = \"json-schema\"))]\nimpl From<JsonBlockWithSignatures> for Block {\n    fn from(block_with_signatures: JsonBlockWithSignatures) -> Self {\n        block_with_signatures.block\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, testing::TestRng};\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let block_v1 = TestBlockV1Builder::new().build(rng);\n        let block = Block::V1(block_v1);\n        bytesrepr::test_serialization_roundtrip(&block);\n\n        let block_v2 = TestBlockBuilder::new().build(rng);\n        let block = Block::V2(block_v2);\n        bytesrepr::test_serialization_roundtrip(&block);\n    }\n}\n"
  },
  {
    "path": "types/src/block_time.rs",
    "content": "use alloc::vec::Vec;\n\nuse crate::{\n    bytesrepr::{Error, FromBytes, ToBytes, U64_SERIALIZED_LENGTH},\n    CLType, CLTyped, TimeDiff, Timestamp,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// The number of bytes in a serialized [`BlockTime`].\npub const BLOCKTIME_SERIALIZED_LENGTH: usize = U64_SERIALIZED_LENGTH;\n\n/// Holds epoch type.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]\npub struct HoldsEpoch(Option<u64>);\n\nimpl HoldsEpoch {\n    /// No epoch is applicable.\n    pub const NOT_APPLICABLE: HoldsEpoch = HoldsEpoch(None);\n\n    /// Instance from block time.\n    pub fn from_block_time(block_time: BlockTime, hold_internal: TimeDiff) -> Self {\n        HoldsEpoch(Some(\n            block_time.value().saturating_sub(hold_internal.millis()),\n        ))\n    }\n\n    /// Instance from timestamp.\n    pub fn from_timestamp(timestamp: Timestamp, hold_internal: TimeDiff) -> Self {\n        HoldsEpoch(Some(\n            timestamp.millis().saturating_sub(hold_internal.millis()),\n        ))\n    }\n\n    /// Instance from milliseconds.\n    pub fn from_millis(timestamp_millis: u64, hold_internal_millis: u64) -> Self {\n        HoldsEpoch(Some(timestamp_millis.saturating_sub(hold_internal_millis)))\n    }\n\n    /// Returns the inner value.\n    pub fn value(&self) -> Option<u64> {\n        self.0\n    }\n}\n\n/// A newtype wrapping a [`u64`] which represents the block time.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[derive(\n    Clone, Copy, Default, Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize,\n)]\npub struct BlockTime(u64);\n\nimpl BlockTime {\n    /// Constructs a `BlockTime`.\n    pub const fn new(value: u64) -> Self {\n        BlockTime(value)\n    }\n\n    /// Saturating integer subtraction. Computes `self - other`, saturating at `0` instead of\n    /// overflowing.\n    #[must_use]\n    pub fn saturating_sub(self, other: BlockTime) -> Self {\n        BlockTime(self.0.saturating_sub(other.0))\n    }\n\n    /// Returns inner value.\n    pub fn value(&self) -> u64 {\n        self.0\n    }\n}\n\nimpl From<BlockTime> for u64 {\n    fn from(blocktime: BlockTime) -> Self {\n        blocktime.0\n    }\n}\n\nimpl From<BlockTime> for Timestamp {\n    fn from(value: BlockTime) -> Self {\n        Timestamp::from(value.0)\n    }\n}\n\nimpl From<u64> for BlockTime {\n    fn from(value: u64) -> Self {\n        BlockTime(value)\n    }\n}\n\nimpl From<Timestamp> for BlockTime {\n    fn from(value: Timestamp) -> Self {\n        BlockTime(value.millis())\n    }\n}\n\nimpl ToBytes for BlockTime {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        BLOCKTIME_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for BlockTime {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (time, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((BlockTime::new(time), rem))\n    }\n}\n\nimpl CLTyped for BlockTime {\n    fn cl_type() -> CLType {\n        CLType::U64\n    }\n}\n"
  },
  {
    "path": "types/src/byte_code.rs",
    "content": "use alloc::{format, string::String, vec::Vec};\nuse core::{\n    array::TryFromSliceError,\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::{\n    addressable_entity, bytesrepr,\n    bytesrepr::{Bytes, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex, uref, CLType, CLTyped, HashAddr,\n};\n\nconst BYTE_CODE_MAX_DISPLAY_LEN: usize = 16;\nconst KEY_HASH_LENGTH: usize = 32;\nconst WASM_STRING_PREFIX: &str = \"byte-code-\";\n\nconst BYTE_CODE_PREFIX: &str = \"byte-code-\";\nconst V1_WASM_PREFIX: &str = \"v1-wasm-\";\nconst V2_WASM_PREFIX: &str = \"v2-wasm-\";\nconst EMPTY_PREFIX: &str = \"empty-\";\n\n/// Associated error type of `TryFrom<&[u8]>` for `ByteCodeHash`.\n#[derive(Debug)]\npub struct TryFromSliceForContractHashError(());\n\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    InvalidPrefix,\n    Hex(base16::DecodeError),\n    Hash(TryFromSliceError),\n    AccountHash(addressable_entity::FromAccountHashStrError),\n    URef(uref::FromStrError),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Hash(error)\n    }\n}\n\nimpl From<addressable_entity::FromAccountHashStrError> for FromStrError {\n    fn from(error: addressable_entity::FromAccountHashStrError) -> Self {\n        FromStrError::AccountHash(error)\n    }\n}\n\nimpl From<uref::FromStrError> for FromStrError {\n    fn from(error: uref::FromStrError) -> Self {\n        FromStrError::URef(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => write!(f, \"invalid prefix\"),\n            FromStrError::Hex(error) => write!(f, \"decode from hex: {}\", error),\n            FromStrError::Hash(error) => write!(f, \"hash from string error: {}\", error),\n            FromStrError::AccountHash(error) => {\n                write!(f, \"account hash from string error: {:?}\", error)\n            }\n            FromStrError::URef(error) => write!(f, \"uref from string error: {:?}\", error),\n        }\n    }\n}\n\n/// An address for ByteCode records stored in global state.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum ByteCodeAddr {\n    /// An address for byte code to be executed against the V1 Casper execution engine.\n    V1CasperWasm(HashAddr),\n    /// An address for byte code to be executed against the V2 Casper execution engine.\n    V2CasperWasm(HashAddr),\n    /// An empty byte code record\n    Empty,\n}\n\nimpl ByteCodeAddr {\n    /// Constructs a new Byte code address for Wasm.\n    pub const fn new_wasm_addr(hash_addr: HashAddr) -> Self {\n        Self::V1CasperWasm(hash_addr)\n    }\n\n    /// Returns the tag of the byte code address.\n    pub fn tag(&self) -> ByteCodeKind {\n        match self {\n            Self::Empty => ByteCodeKind::Empty,\n            Self::V1CasperWasm(_) => ByteCodeKind::V1CasperWasm,\n            Self::V2CasperWasm(_) => ByteCodeKind::V2CasperWasm,\n        }\n    }\n\n    /// Formats the `ByteCodeAddr` for users getting and putting.\n    pub fn to_formatted_string(&self) -> String {\n        format!(\"{}\", self)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `ByteCodeAddr`.\n    pub fn from_formatted_string(input: &str) -> Result<Self, FromStrError> {\n        if let Some(byte_code) = input.strip_prefix(BYTE_CODE_PREFIX) {\n            let (addr_str, tag) = if let Some(str) = byte_code.strip_prefix(EMPTY_PREFIX) {\n                (str, ByteCodeKind::Empty)\n            } else if let Some(str) = byte_code.strip_prefix(V1_WASM_PREFIX) {\n                (str, ByteCodeKind::V1CasperWasm)\n            } else if let Some(str) = byte_code.strip_prefix(V2_WASM_PREFIX) {\n                (str, ByteCodeKind::V2CasperWasm)\n            } else {\n                return Err(FromStrError::InvalidPrefix);\n            };\n            let addr = checksummed_hex::decode(addr_str).map_err(FromStrError::Hex)?;\n            let byte_code_addr = HashAddr::try_from(addr.as_ref()).map_err(FromStrError::Hash)?;\n            return match tag {\n                ByteCodeKind::V1CasperWasm => Ok(ByteCodeAddr::V1CasperWasm(byte_code_addr)),\n                ByteCodeKind::V2CasperWasm => Ok(ByteCodeAddr::V2CasperWasm(byte_code_addr)),\n                ByteCodeKind::Empty => Ok(ByteCodeAddr::Empty),\n            };\n        }\n\n        Err(FromStrError::InvalidPrefix)\n    }\n}\n\nimpl ToBytes for ByteCodeAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Self::Empty => 0,\n                Self::V1CasperWasm(_) => KEY_HASH_LENGTH,\n                Self::V2CasperWasm(_) => KEY_HASH_LENGTH,\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        match self {\n            Self::Empty => writer.push(self.tag() as u8),\n            Self::V1CasperWasm(addr) => {\n                writer.push(self.tag() as u8);\n                writer.extend(addr.to_bytes()?);\n            }\n            Self::V2CasperWasm(addr) => {\n                writer.push(self.tag() as u8);\n                writer.extend(addr.to_bytes()?);\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for ByteCodeAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, remainder): (ByteCodeKind, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            ByteCodeKind::Empty => Ok((ByteCodeAddr::Empty, remainder)),\n            ByteCodeKind::V1CasperWasm => {\n                let (addr, remainder) = HashAddr::from_bytes(remainder)?;\n                Ok((ByteCodeAddr::new_wasm_addr(addr), remainder))\n            }\n            ByteCodeKind::V2CasperWasm => {\n                let (addr, remainder) = HashAddr::from_bytes(remainder)?;\n                Ok((ByteCodeAddr::V2CasperWasm(addr), remainder))\n            }\n        }\n    }\n}\n\nimpl Display for ByteCodeAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ByteCodeAddr::V1CasperWasm(addr) => {\n                write!(\n                    f,\n                    \"{}{}{}\",\n                    BYTE_CODE_PREFIX,\n                    V1_WASM_PREFIX,\n                    base16::encode_lower(&addr)\n                )\n            }\n            ByteCodeAddr::V2CasperWasm(addr) => {\n                write!(\n                    f,\n                    \"{}{}{}\",\n                    BYTE_CODE_PREFIX,\n                    V2_WASM_PREFIX,\n                    base16::encode_lower(&addr)\n                )\n            }\n            ByteCodeAddr::Empty => {\n                write!(\n                    f,\n                    \"{}{}{}\",\n                    BYTE_CODE_PREFIX,\n                    EMPTY_PREFIX,\n                    base16::encode_lower(&[0u8; 32])\n                )\n            }\n        }\n    }\n}\n\nimpl Debug for ByteCodeAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ByteCodeAddr::V1CasperWasm(addr) => {\n                write!(f, \"ByteCodeAddr::V1CasperWasm({:?})\", addr)\n            }\n            ByteCodeAddr::V2CasperWasm(addr) => {\n                write!(f, \"ByteCodeAddr::V2CasperWasm({:?})\", addr)\n            }\n            ByteCodeAddr::Empty => {\n                write!(f, \"ByteCodeAddr::Empty\")\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ByteCodeAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ByteCodeAddr {\n        match rng.gen_range(0..=2) {\n            0 => ByteCodeAddr::Empty,\n            1 => ByteCodeAddr::V1CasperWasm(rng.gen()),\n            2 => ByteCodeAddr::V2CasperWasm(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\n/// A newtype wrapping a `HashAddr` which is the raw bytes of\n/// the ByteCodeHash\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ByteCodeHash(HashAddr);\n\nimpl ByteCodeHash {\n    /// Constructs a new `ByteCodeHash` from the raw bytes of the contract wasm hash.\n    pub const fn new(value: HashAddr) -> ByteCodeHash {\n        ByteCodeHash(value)\n    }\n\n    /// Returns the raw bytes of the contract hash as an array.\n    pub fn value(&self) -> HashAddr {\n        self.0\n    }\n\n    /// Returns the raw bytes of the contract hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `ByteCodeHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\"{}{}\", WASM_STRING_PREFIX, base16::encode_lower(&self.0),)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `ByteCodeHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(WASM_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n        let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?;\n        Ok(ByteCodeHash(bytes))\n    }\n}\n\nimpl Default for ByteCodeHash {\n    fn default() -> Self {\n        Self::new([0u8; KEY_HASH_LENGTH])\n    }\n}\n\nimpl Display for ByteCodeHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for ByteCodeHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"ByteCodeHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for ByteCodeHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(KEY_HASH_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for ByteCodeHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for ByteCodeHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((ByteCodeHash::new(bytes), rem))\n    }\n}\n\nimpl From<[u8; KEY_HASH_LENGTH]> for ByteCodeHash {\n    fn from(bytes: [u8; KEY_HASH_LENGTH]) -> Self {\n        ByteCodeHash(bytes)\n    }\n}\n\nimpl Serialize for ByteCodeHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for ByteCodeHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            ByteCodeHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = HashAddr::deserialize(deserializer)?;\n            Ok(ByteCodeHash(bytes))\n        }\n    }\n}\n\nimpl AsRef<[u8]> for ByteCodeHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl TryFrom<&[u8]> for ByteCodeHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForContractHashError> {\n        HashAddr::try_from(bytes)\n            .map(ByteCodeHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\nimpl TryFrom<&Vec<u8>> for ByteCodeHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        HashAddr::try_from(bytes as &[u8])\n            .map(ByteCodeHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for ByteCodeHash {\n    fn schema_name() -> String {\n        String::from(\"ByteCodeHash\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description =\n            Some(\"The hash address of the contract wasm\".to_string());\n        schema_object.into()\n    }\n}\n\n/// The type of Byte code.\n#[repr(u8)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize)]\npub enum ByteCodeKind {\n    /// Empty byte code.\n    Empty = 0,\n    /// Byte code to be executed with the version 1 Casper execution engine.\n    V1CasperWasm = 1,\n    /// Byte code to be executed with the version 2 Casper execution engine.\n    V2CasperWasm = 2,\n}\n\nimpl ToBytes for ByteCodeKind {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        (*self as u8).to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        (*self as u8).write_bytes(writer)\n    }\n}\n\nimpl FromBytes for ByteCodeKind {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (byte_code_kind, remainder) = u8::from_bytes(bytes)?;\n        match byte_code_kind {\n            byte_code_kind if byte_code_kind == ByteCodeKind::Empty as u8 => {\n                Ok((ByteCodeKind::Empty, remainder))\n            }\n            byte_code_kind if byte_code_kind == ByteCodeKind::V1CasperWasm as u8 => {\n                Ok((ByteCodeKind::V1CasperWasm, remainder))\n            }\n            byte_code_kind if byte_code_kind == ByteCodeKind::V2CasperWasm as u8 => {\n                Ok((ByteCodeKind::V2CasperWasm, remainder))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\nimpl Display for ByteCodeKind {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ByteCodeKind::Empty => {\n                write!(f, \"empty\")\n            }\n            ByteCodeKind::V1CasperWasm => {\n                write!(f, \"v1-casper-wasm\")\n            }\n            ByteCodeKind::V2CasperWasm => {\n                write!(f, \"v2-casper-wasm\")\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ByteCodeKind> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ByteCodeKind {\n        match rng.gen_range(0..=2) {\n            0 => ByteCodeKind::Empty,\n            1 => ByteCodeKind::V1CasperWasm,\n            2 => ByteCodeKind::V2CasperWasm,\n            _ => unreachable!(),\n        }\n    }\n}\n\n/// A container for contract's Wasm bytes.\n#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct ByteCode {\n    kind: ByteCodeKind,\n    bytes: Bytes,\n}\n\nimpl Debug for ByteCode {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        if self.bytes.len() > BYTE_CODE_MAX_DISPLAY_LEN {\n            write!(\n                f,\n                \"ByteCode(0x{}...)\",\n                base16::encode_lower(&self.bytes[..BYTE_CODE_MAX_DISPLAY_LEN])\n            )\n        } else {\n            write!(f, \"ByteCode(0x{})\", base16::encode_lower(&self.bytes))\n        }\n    }\n}\n\nimpl ByteCode {\n    /// Creates new Wasm object from bytes.\n    pub fn new(kind: ByteCodeKind, bytes: Vec<u8>) -> Self {\n        ByteCode {\n            kind,\n            bytes: bytes.into(),\n        }\n    }\n\n    /// Consumes instance of [`ByteCode`] and returns its bytes.\n    pub fn take_bytes(self) -> Vec<u8> {\n        self.bytes.into()\n    }\n\n    /// Returns a slice of contained Wasm bytes.\n    pub fn bytes(&self) -> &[u8] {\n        self.bytes.as_ref()\n    }\n\n    /// Return the type of byte code.\n    pub fn kind(&self) -> ByteCodeKind {\n        self.kind\n    }\n}\n\nimpl ToBytes for ByteCode {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.kind.serialized_length() + self.bytes.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.kind.write_bytes(writer)?;\n        self.bytes.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ByteCode {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (kind, remainder) = ByteCodeKind::from_bytes(bytes)?;\n        let (bytes, remainder) = Bytes::from_bytes(remainder)?;\n        Ok((ByteCode { kind, bytes }, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::RngCore;\n\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn debug_repr_of_short_wasm() {\n        const SIZE: usize = 8;\n        let wasm_bytes = vec![0; SIZE];\n        let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, wasm_bytes);\n        assert_eq!(format!(\"{:?}\", byte_code), \"ByteCode(0x0000000000000000)\");\n    }\n\n    #[test]\n    fn debug_repr_of_long_wasm() {\n        const SIZE: usize = 65;\n        let wasm_bytes = vec![0; SIZE];\n        let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, wasm_bytes);\n        // String output is less than the bytes itself\n        assert_eq!(\n            format!(\"{:?}\", byte_code),\n            \"ByteCode(0x00000000000000000000000000000000...)\"\n        );\n    }\n\n    #[test]\n    fn byte_code_bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let byte_code = ByteCode::new(rng.gen(), vec![]);\n        bytesrepr::test_serialization_roundtrip(&byte_code);\n\n        let mut buffer = vec![0u8; rng.gen_range(1..100)];\n        rng.fill_bytes(buffer.as_mut());\n        let byte_code = ByteCode::new(rng.gen(), buffer);\n        bytesrepr::test_serialization_roundtrip(&byte_code);\n    }\n\n    #[test]\n    fn contract_wasm_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let byte_code_hash = HashAddr::try_from(&bytes[..]).expect(\"should create byte code hash\");\n        let contract_hash = ByteCodeHash::new(byte_code_hash);\n        assert_eq!(&bytes, &contract_hash.as_bytes());\n    }\n\n    #[test]\n    fn contract_wasm_hash_from_str() {\n        let byte_code_hash = ByteCodeHash([3; KEY_HASH_LENGTH]);\n        let encoded = byte_code_hash.to_formatted_string();\n        let decoded = ByteCodeHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(byte_code_hash, decoded);\n\n        let invalid_prefix =\n            \"contractwasm-0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ByteCodeHash::from_formatted_str(invalid_prefix).is_err());\n\n        let short_addr =\n            \"contract-wasm-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ByteCodeHash::from_formatted_str(short_addr).is_err());\n\n        let long_addr =\n            \"contract-wasm-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ByteCodeHash::from_formatted_str(long_addr).is_err());\n\n        let invalid_hex =\n            \"contract-wasm-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(ByteCodeHash::from_formatted_str(invalid_hex).is_err());\n    }\n\n    #[test]\n    fn byte_code_addr_from_str() {\n        let empty_addr = ByteCodeAddr::Empty;\n        let encoded = empty_addr.to_formatted_string();\n        let decoded = ByteCodeAddr::from_formatted_string(&encoded).unwrap();\n        assert_eq!(empty_addr, decoded);\n\n        let wasm_addr = ByteCodeAddr::V1CasperWasm([3; 32]);\n        let encoded = wasm_addr.to_formatted_string();\n        let decoded = ByteCodeAddr::from_formatted_string(&encoded).unwrap();\n        assert_eq!(wasm_addr, decoded);\n    }\n\n    #[test]\n    fn byte_code_serialization_roundtrip() {\n        let rng = &mut TestRng::new();\n        let wasm_addr = ByteCodeAddr::V1CasperWasm(rng.gen());\n        bytesrepr::test_serialization_roundtrip(&wasm_addr);\n\n        let empty_addr = ByteCodeAddr::Empty;\n        bytesrepr::test_serialization_roundtrip(&empty_addr);\n    }\n\n    #[test]\n    fn contract_wasm_hash_bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let byte_code_hash = ByteCodeHash(rng.gen());\n        bytesrepr::test_serialization_roundtrip(&byte_code_hash);\n    }\n\n    #[test]\n    fn contract_wasm_hash_bincode_roundtrip() {\n        let rng = &mut TestRng::new();\n        let byte_code_hash = ByteCodeHash(rng.gen());\n        let serialized = bincode::serialize(&byte_code_hash).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(byte_code_hash, deserialized)\n    }\n\n    #[test]\n    fn contract_wasm_hash_json_roundtrip() {\n        let rng = &mut TestRng::new();\n        let byte_code_hash = ByteCodeHash(rng.gen());\n        let json_string = serde_json::to_string_pretty(&byte_code_hash).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(byte_code_hash, decoded)\n    }\n}\n"
  },
  {
    "path": "types/src/bytesrepr/bytes.rs",
    "content": "use alloc::{\n    string::String,\n    vec::{IntoIter, Vec},\n};\nuse core::{\n    cmp, fmt,\n    iter::FromIterator,\n    ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeTo},\n    slice,\n};\n\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{\n    de::{Error as SerdeError, SeqAccess, Visitor},\n    Deserialize, Deserializer, Serialize, Serializer,\n};\n\nuse super::{Error, FromBytes, ToBytes};\nuse crate::{checksummed_hex, CLType, CLTyped};\n\n/// A newtype wrapper for bytes that has efficient serialization routines.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Default, Hash)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded bytes.\")\n)]\n#[rustfmt::skip]\npub struct Bytes(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))]\n    Vec<u8>\n);\n\nimpl Bytes {\n    /// Constructs a new, empty vector of bytes.\n    pub fn new() -> Bytes {\n        Bytes::default()\n    }\n\n    /// Returns reference to inner container.\n    #[inline]\n    pub fn inner_bytes(&self) -> &Vec<u8> {\n        &self.0\n    }\n\n    /// Extracts a slice containing the entire vector.\n    pub fn as_slice(&self) -> &[u8] {\n        self\n    }\n\n    /// Consumes self and returns the inner bytes.\n    pub fn take_inner(self) -> Vec<u8> {\n        self.0\n    }\n}\n\nimpl Deref for Bytes {\n    type Target = [u8];\n\n    fn deref(&self) -> &Self::Target {\n        self.0.deref()\n    }\n}\n\nimpl From<Vec<u8>> for Bytes {\n    fn from(vec: Vec<u8>) -> Self {\n        Self(vec)\n    }\n}\n\nimpl From<Bytes> for Vec<u8> {\n    fn from(bytes: Bytes) -> Self {\n        bytes.0\n    }\n}\n\nimpl From<&[u8]> for Bytes {\n    fn from(bytes: &[u8]) -> Self {\n        Self(bytes.to_vec())\n    }\n}\n\nimpl CLTyped for Bytes {\n    fn cl_type() -> CLType {\n        <Vec<u8>>::cl_type()\n    }\n}\n\nimpl AsRef<[u8]> for Bytes {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl ToBytes for Bytes {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        super::vec_u8_to_bytes(&self.0)\n    }\n\n    #[inline(always)]\n    fn into_bytes(self) -> Result<Vec<u8>, Error> {\n        super::vec_u8_to_bytes(&self.0)\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        super::vec_u8_serialized_length(&self.0)\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        super::write_u8_slice(self.as_slice(), writer)\n    }\n}\n\nimpl FromBytes for Bytes {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), super::Error> {\n        let (size, remainder) = u32::from_bytes(bytes)?;\n        let (result, remainder) = super::safe_split_at(remainder, size as usize)?;\n        Ok((Bytes(result.to_vec()), remainder))\n    }\n\n    fn from_vec(stream: Vec<u8>) -> Result<(Self, Vec<u8>), Error> {\n        let (size, mut stream) = u32::from_vec(stream)?;\n\n        if size as usize > stream.len() {\n            Err(Error::EarlyEndOfStream)\n        } else {\n            let remainder = stream.split_off(size as usize);\n            Ok((Bytes(stream), remainder))\n        }\n    }\n}\n\nimpl Index<usize> for Bytes {\n    type Output = u8;\n\n    fn index(&self, index: usize) -> &u8 {\n        let Bytes(ref dat) = self;\n        &dat[index]\n    }\n}\n\nimpl Index<Range<usize>> for Bytes {\n    type Output = [u8];\n\n    fn index(&self, index: Range<usize>) -> &[u8] {\n        let Bytes(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl Index<RangeTo<usize>> for Bytes {\n    type Output = [u8];\n\n    fn index(&self, index: RangeTo<usize>) -> &[u8] {\n        let Bytes(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl Index<RangeFrom<usize>> for Bytes {\n    type Output = [u8];\n\n    fn index(&self, index: RangeFrom<usize>) -> &[u8] {\n        let Bytes(dat) = self;\n        &dat[index]\n    }\n}\n\nimpl Index<RangeFull> for Bytes {\n    type Output = [u8];\n\n    fn index(&self, _: RangeFull) -> &[u8] {\n        let Bytes(dat) = self;\n        &dat[..]\n    }\n}\n\nimpl FromIterator<u8> for Bytes {\n    #[inline]\n    fn from_iter<I: IntoIterator<Item = u8>>(iter: I) -> Bytes {\n        let vec = Vec::from_iter(iter);\n        Bytes(vec)\n    }\n}\n\nimpl<'a> IntoIterator for &'a Bytes {\n    type Item = &'a u8;\n\n    type IntoIter = slice::Iter<'a, u8>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.iter()\n    }\n}\n\nimpl IntoIterator for Bytes {\n    type Item = u8;\n\n    type IntoIter = IntoIter<u8>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.into_iter()\n    }\n}\n\n#[cfg(feature = \"datasize\")]\nimpl datasize::DataSize for Bytes {\n    const IS_DYNAMIC: bool = true;\n\n    const STATIC_HEAP_SIZE: usize = 0;\n\n    fn estimate_heap_size(&self) -> usize {\n        self.0.capacity() * size_of::<u8>()\n    }\n}\n\nconst RANDOM_BYTES_MAX_LENGTH: usize = 100;\n\nimpl Distribution<Bytes> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Bytes {\n        let len = rng.gen_range(0..RANDOM_BYTES_MAX_LENGTH);\n        let mut result = Vec::with_capacity(len);\n        for _ in 0..len {\n            result.push(rng.gen());\n        }\n        result.into()\n    }\n}\n\nstruct BytesVisitor;\n\nimpl<'de> Visitor<'de> for BytesVisitor {\n    type Value = Bytes;\n\n    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n        formatter.write_str(\"byte array\")\n    }\n\n    fn visit_seq<V>(self, mut visitor: V) -> Result<Bytes, V::Error>\n    where\n        V: SeqAccess<'de>,\n    {\n        let len = cmp::min(visitor.size_hint().unwrap_or(0), 4096);\n        let mut bytes = Vec::with_capacity(len);\n\n        while let Some(b) = visitor.next_element()? {\n            bytes.push(b);\n        }\n\n        Ok(Bytes::from(bytes))\n    }\n\n    fn visit_bytes<E>(self, v: &[u8]) -> Result<Bytes, E>\n    where\n        E: SerdeError,\n    {\n        Ok(Bytes::from(v))\n    }\n\n    fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Bytes, E>\n    where\n        E: SerdeError,\n    {\n        Ok(Bytes::from(v))\n    }\n\n    fn visit_str<E>(self, v: &str) -> Result<Bytes, E>\n    where\n        E: SerdeError,\n    {\n        Ok(Bytes::from(v.as_bytes()))\n    }\n\n    fn visit_string<E>(self, v: String) -> Result<Bytes, E>\n    where\n        E: SerdeError,\n    {\n        Ok(Bytes::from(v.into_bytes()))\n    }\n}\n\nimpl<'de> Deserialize<'de> for Bytes {\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: Deserializer<'de>,\n    {\n        if deserializer.is_human_readable() {\n            let hex_string = String::deserialize(deserializer)?;\n            checksummed_hex::decode(hex_string)\n                .map(Bytes)\n                .map_err(SerdeError::custom)\n        } else {\n            let bytes = deserializer.deserialize_byte_buf(BytesVisitor)?;\n            Ok(bytes)\n        }\n    }\n}\n\nimpl Serialize for Bytes {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        if serializer.is_human_readable() {\n            base16::encode_lower(&self.0).serialize(serializer)\n        } else {\n            serializer.serialize_bytes(&self.0)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH};\n    use alloc::vec::Vec;\n\n    use serde_json::json;\n    use serde_test::{assert_tokens, Configure, Token};\n\n    use super::Bytes;\n\n    const TRUTH: &[u8] = &[0xde, 0xad, 0xbe, 0xef];\n\n    #[test]\n    fn vec_u8_from_bytes() {\n        let data: Bytes = vec![1, 2, 3, 4, 5].into();\n        let data_bytes = data.to_bytes().unwrap();\n        assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH / 2]).is_err());\n        assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH]).is_err());\n        assert!(Bytes::from_bytes(&data_bytes[..U32_SERIALIZED_LENGTH + 2]).is_err());\n    }\n\n    #[test]\n    fn should_serialize_deserialize_bytes() {\n        let data: Bytes = vec![1, 2, 3, 4, 5].into();\n        bytesrepr::test_serialization_roundtrip(&data);\n    }\n\n    #[test]\n    fn should_fail_to_serialize_deserialize_malicious_bytes() {\n        let data: Bytes = vec![1, 2, 3, 4, 5].into();\n        let mut serialized = data.to_bytes().expect(\"should serialize data\");\n        serialized = serialized[..serialized.len() - 1].to_vec();\n        let res: Result<(_, &[u8]), Error> = Bytes::from_bytes(&serialized);\n        assert_eq!(res.unwrap_err(), Error::EarlyEndOfStream);\n    }\n\n    #[test]\n    fn should_serialize_deserialize_bytes_and_keep_rem() {\n        let data: Bytes = vec![1, 2, 3, 4, 5].into();\n        let expected_rem: Vec<u8> = vec![6, 7, 8, 9, 10];\n        let mut serialized = data.to_bytes().expect(\"should serialize data\");\n        serialized.extend(&expected_rem);\n        let (deserialized, rem): (Bytes, &[u8]) =\n            FromBytes::from_bytes(&serialized).expect(\"should deserialize data\");\n        assert_eq!(data, deserialized);\n        assert_eq!(&rem, &expected_rem);\n    }\n\n    #[test]\n    fn should_ser_de_human_readable() {\n        let truth = vec![0xde, 0xad, 0xbe, 0xef];\n\n        let bytes_ser: Bytes = truth.clone().into();\n\n        let json_object = serde_json::to_value(bytes_ser).unwrap();\n        assert_eq!(json_object, json!(\"deadbeef\"));\n\n        let bytes_de: Bytes = serde_json::from_value(json_object).unwrap();\n        assert_eq!(bytes_de, Bytes::from(truth));\n    }\n\n    #[test]\n    fn should_ser_de_readable() {\n        let truth: Bytes = TRUTH.into();\n        assert_tokens(&truth.readable(), &[Token::Str(\"deadbeef\")]);\n    }\n\n    #[test]\n    fn should_ser_de_compact() {\n        let truth: Bytes = TRUTH.into();\n        assert_tokens(&truth.compact(), &[Token::Bytes(TRUTH)]);\n    }\n}\n\n#[cfg(test)]\npub mod gens {\n    use super::Bytes;\n    use proptest::{\n        collection::{vec, SizeRange},\n        prelude::*,\n    };\n\n    pub fn bytes_arb(size: impl Into<SizeRange>) -> impl Strategy<Value = Bytes> {\n        vec(any::<u8>(), size).prop_map(Bytes::from)\n    }\n}\n"
  },
  {
    "path": "types/src/bytesrepr.rs",
    "content": "//! Contains serialization and deserialization code for types used throughout the system.\nmod bytes;\n\nuse alloc::{\n    boxed::Box,\n    collections::{BTreeMap, BTreeSet, VecDeque},\n    str,\n    string::String,\n    vec,\n    vec::Vec,\n};\n#[cfg(debug_assertions)]\nuse core::any;\nuse core::{\n    convert::TryInto,\n    fmt::{self, Display, Formatter},\n};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num_integer::Integer;\nuse num_rational::Ratio;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\npub use bytes::Bytes;\n\n/// The number of bytes in a serialized `()`.\npub const UNIT_SERIALIZED_LENGTH: usize = 0;\n/// The number of bytes in a serialized `bool`.\npub const BOOL_SERIALIZED_LENGTH: usize = 1;\n/// The number of bytes in a serialized `i32`.\npub const I32_SERIALIZED_LENGTH: usize = size_of::<i32>();\n/// The number of bytes in a serialized `i64`.\npub const I64_SERIALIZED_LENGTH: usize = size_of::<i64>();\n/// The number of bytes in a serialized `u8`.\npub const U8_SERIALIZED_LENGTH: usize = size_of::<u8>();\n/// The number of bytes in a serialized `u16`.\npub const U16_SERIALIZED_LENGTH: usize = size_of::<u16>();\n/// The number of bytes in a serialized `u32`.\npub const U32_SERIALIZED_LENGTH: usize = size_of::<u32>();\n/// The number of bytes in a serialized `u64`.\npub const U64_SERIALIZED_LENGTH: usize = size_of::<u64>();\n/// The number of bytes in a serialized [`U128`](crate::U128).\npub const U128_SERIALIZED_LENGTH: usize = size_of::<u128>();\n/// The number of bytes in a serialized [`U256`](crate::U256).\npub const U256_SERIALIZED_LENGTH: usize = U128_SERIALIZED_LENGTH * 2;\n/// The number of bytes in a serialized [`U512`](crate::U512).\npub const U512_SERIALIZED_LENGTH: usize = U256_SERIALIZED_LENGTH * 2;\n/// The tag representing a `None` value.\npub const OPTION_NONE_TAG: u8 = 0;\n/// The tag representing a `Some` value.\npub const OPTION_SOME_TAG: u8 = 1;\n/// The tag representing an `Err` value.\npub const RESULT_ERR_TAG: u8 = 0;\n/// The tag representing an `Ok` value.\npub const RESULT_OK_TAG: u8 = 1;\n\n/// A type which can be serialized to a `Vec<u8>`.\npub trait ToBytes {\n    /// Serializes `&self` to a `Vec<u8>`.\n    fn to_bytes(&self) -> Result<Vec<u8>, Error>;\n    /// Consumes `self` and serializes to a `Vec<u8>`.\n    fn into_bytes(self) -> Result<Vec<u8>, Error>\n    where\n        Self: Sized,\n    {\n        self.to_bytes()\n    }\n    /// Returns the length of the `Vec<u8>` which would be returned from a successful call to\n    /// `to_bytes()` or `into_bytes()`.  The data is not actually serialized, so this call is\n    /// relatively cheap.\n    fn serialized_length(&self) -> usize;\n\n    /// Writes `&self` into a mutable `writer`.\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend(self.to_bytes()?);\n        Ok(())\n    }\n}\n\n/// A type which can be deserialized from a `Vec<u8>`.\npub trait FromBytes: Sized {\n    /// Deserializes the slice into `Self`.\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error>;\n\n    /// Deserializes the `Vec<u8>` into `Self`.\n    fn from_vec(bytes: Vec<u8>) -> Result<(Self, Vec<u8>), Error> {\n        Self::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder)))\n    }\n}\n\n/// Returns a `Vec<u8>` initialized with sufficient capacity to hold `to_be_serialized` after\n/// serialization.\npub fn unchecked_allocate_buffer<T: ToBytes>(to_be_serialized: &T) -> Vec<u8> {\n    let serialized_length = to_be_serialized.serialized_length();\n    Vec::with_capacity(serialized_length)\n}\n\n/// Returns a `Vec<u8>` initialized with sufficient capacity to hold `to_be_serialized` after\n/// serialization, or an error if the capacity would exceed `u32::MAX`.\npub fn allocate_buffer<T: ToBytes>(to_be_serialized: &T) -> Result<Vec<u8>, Error> {\n    let serialized_length = to_be_serialized.serialized_length();\n    allocate_buffer_for_size(serialized_length)\n}\n\n/// Returns a `Vec<u8>` initialized with sufficient capacity to hold `expected_size` bytes,\n/// or an error if the capacity would exceed `u32::MAX`.\npub fn allocate_buffer_for_size(expected_size: usize) -> Result<Vec<u8>, Error> {\n    if expected_size > u32::MAX as usize {\n        return Err(Error::OutOfMemory);\n    }\n    Ok(Vec::with_capacity(expected_size))\n}\n\n/// Serialization and deserialization errors.\n#[derive(Copy, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(rename = \"BytesreprError\")\n)]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\n#[repr(u8)]\n#[non_exhaustive]\npub enum Error {\n    /// Early end of stream while deserializing.\n    #[cfg_attr(any(feature = \"testing\", test), default)]\n    EarlyEndOfStream = 0,\n    /// Formatting error while deserializing.\n    Formatting,\n    /// Not all input bytes were consumed in [`deserialize`].\n    LeftOverBytes,\n    /// Out of memory error.\n    OutOfMemory,\n    /// No serialized representation is available for a value.\n    NotRepresentable,\n    /// Exceeded a recursion depth limit.\n    ExceededRecursionDepth,\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::EarlyEndOfStream => {\n                formatter.write_str(\"Deserialization error: early end of stream\")\n            }\n            Error::Formatting => formatter.write_str(\"Deserialization error: formatting\"),\n            Error::LeftOverBytes => formatter.write_str(\"Deserialization error: left-over bytes\"),\n            Error::OutOfMemory => formatter.write_str(\"Serialization error: out of memory\"),\n            Error::NotRepresentable => {\n                formatter.write_str(\"Serialization error: value is not representable.\")\n            }\n            Error::ExceededRecursionDepth => formatter.write_str(\"exceeded recursion depth\"),\n        }\n    }\n}\n\nimpl ToBytes for Error {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        (*self as u8).to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        (*self as u8).write_bytes(writer)\n    }\n}\n\nimpl FromBytes for Error {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (value, remainder) = u8::from_bytes(bytes)?;\n        match value {\n            value if value == Error::EarlyEndOfStream as u8 => {\n                Ok((Error::EarlyEndOfStream, remainder))\n            }\n            value if value == Error::Formatting as u8 => Ok((Error::Formatting, remainder)),\n            value if value == Error::LeftOverBytes as u8 => Ok((Error::LeftOverBytes, remainder)),\n            value if value == Error::OutOfMemory as u8 => Ok((Error::OutOfMemory, remainder)),\n            value if value == Error::NotRepresentable as u8 => {\n                Ok((Error::NotRepresentable, remainder))\n            }\n            value if value == Error::ExceededRecursionDepth as u8 => {\n                Ok((Error::ExceededRecursionDepth, remainder))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for Error {}\n\n/// Deserializes `bytes` into an instance of `T`.\n///\n/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes\n/// are consumed in the operation.\npub fn deserialize<T: FromBytes>(bytes: Vec<u8>) -> Result<T, Error> {\n    let (t, remainder) = T::from_bytes(&bytes)?;\n    if remainder.is_empty() {\n        Ok(t)\n    } else {\n        Err(Error::LeftOverBytes)\n    }\n}\n\n/// Deserializes a slice of bytes into an instance of `T`.\n///\n/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes\n/// are consumed in the operation.\npub fn deserialize_from_slice<I: AsRef<[u8]>, O: FromBytes>(bytes: I) -> Result<O, Error> {\n    let (t, remainder) = O::from_bytes(bytes.as_ref())?;\n    if remainder.is_empty() {\n        Ok(t)\n    } else {\n        Err(Error::LeftOverBytes)\n    }\n}\n\n/// Serializes `t` into a `Vec<u8>`.\npub fn serialize(t: impl ToBytes) -> Result<Vec<u8>, Error> {\n    t.into_bytes()\n}\n\n/// Safely splits the slice at the given point.\npub(crate) fn safe_split_at(bytes: &[u8], n: usize) -> Result<(&[u8], &[u8]), Error> {\n    if n > bytes.len() {\n        Err(Error::EarlyEndOfStream)\n    } else {\n        Ok(bytes.split_at(n))\n    }\n}\n\nimpl ToBytes for () {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(Vec::new())\n    }\n\n    fn serialized_length(&self) -> usize {\n        UNIT_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for () {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        Ok(((), bytes))\n    }\n}\n\nimpl ToBytes for bool {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        u8::from(*self).to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        BOOL_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.push(*self as u8);\n        Ok(())\n    }\n}\n\nimpl FromBytes for bool {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        match bytes.split_first() {\n            None => Err(Error::EarlyEndOfStream),\n            Some((byte, rem)) => match byte {\n                1 => Ok((true, rem)),\n                0 => Ok((false, rem)),\n                _ => Err(Error::Formatting),\n            },\n        }\n    }\n}\n\nimpl ToBytes for u8 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(vec![*self])\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.push(*self);\n        Ok(())\n    }\n}\n\nimpl FromBytes for u8 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        match bytes.split_first() {\n            None => Err(Error::EarlyEndOfStream),\n            Some((byte, rem)) => Ok((*byte, rem)),\n        }\n    }\n}\n\nimpl ToBytes for i32 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_le_bytes().to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        I32_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for i32 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [0u8; I32_SERIALIZED_LENGTH];\n        let (bytes, remainder) = safe_split_at(bytes, I32_SERIALIZED_LENGTH)?;\n        result.copy_from_slice(bytes);\n        Ok((<i32>::from_le_bytes(result), remainder))\n    }\n}\n\nimpl ToBytes for i64 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_le_bytes().to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        I64_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for i64 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [0u8; I64_SERIALIZED_LENGTH];\n        let (bytes, remainder) = safe_split_at(bytes, I64_SERIALIZED_LENGTH)?;\n        result.copy_from_slice(bytes);\n        Ok((<i64>::from_le_bytes(result), remainder))\n    }\n}\n\nimpl ToBytes for u16 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_le_bytes().to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U16_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for u16 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [0u8; U16_SERIALIZED_LENGTH];\n        let (bytes, remainder) = safe_split_at(bytes, U16_SERIALIZED_LENGTH)?;\n        result.copy_from_slice(bytes);\n        Ok((<u16>::from_le_bytes(result), remainder))\n    }\n}\n\nimpl ToBytes for u32 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_le_bytes().to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U32_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for u32 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [0u8; U32_SERIALIZED_LENGTH];\n        let (bytes, remainder) = safe_split_at(bytes, U32_SERIALIZED_LENGTH)?;\n        result.copy_from_slice(bytes);\n        Ok((<u32>::from_le_bytes(result), remainder))\n    }\n}\n\nimpl ToBytes for u64 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_le_bytes().to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U64_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for u64 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [0u8; U64_SERIALIZED_LENGTH];\n        let (bytes, remainder) = safe_split_at(bytes, U64_SERIALIZED_LENGTH)?;\n        result.copy_from_slice(bytes);\n        Ok((<u64>::from_le_bytes(result), remainder))\n    }\n}\n\nimpl ToBytes for u128 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_le_bytes().to_vec())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U128_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(&self.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for u128 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [0u8; U128_SERIALIZED_LENGTH];\n        let (bytes, remainder) = safe_split_at(bytes, U128_SERIALIZED_LENGTH)?;\n        result.copy_from_slice(bytes);\n        Ok((<u128>::from_le_bytes(result), remainder))\n    }\n}\n\nimpl ToBytes for String {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let bytes = self.as_bytes();\n        u8_slice_to_bytes(bytes)\n    }\n\n    fn serialized_length(&self) -> usize {\n        u8_slice_serialized_length(self.as_bytes())\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        write_u8_slice(self.as_bytes(), writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for String {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (size, remainder) = u32::from_bytes(bytes)?;\n        let (str_bytes, remainder) = safe_split_at(remainder, size as usize)?;\n        let result = String::from_utf8(str_bytes.to_vec()).map_err(|_| Error::Formatting)?;\n        Ok((result, remainder))\n    }\n}\n\nfn ensure_efficient_serialization<T>() {\n    #[cfg(debug_assertions)]\n    debug_assert_ne!(\n        any::type_name::<T>(),\n        any::type_name::<u8>(),\n        \"You should use `casper_types::bytesrepr::Bytes` newtype wrapper instead of `Vec<u8>` for efficiency\"\n    );\n}\n\nfn iterator_serialized_length<'a, T: 'a + ToBytes>(ts: impl Iterator<Item = &'a T>) -> usize {\n    U32_SERIALIZED_LENGTH + ts.map(ToBytes::serialized_length).sum::<usize>()\n}\n\nimpl<T: ToBytes> ToBytes for Vec<T> {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        ensure_efficient_serialization::<T>();\n\n        let mut result = Vec::with_capacity(self.serialized_length());\n        let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        result.append(&mut length_32.to_bytes()?);\n\n        for item in self.iter() {\n            result.append(&mut item.to_bytes()?);\n        }\n\n        Ok(result)\n    }\n\n    fn into_bytes(self) -> Result<Vec<u8>, Error> {\n        ensure_efficient_serialization::<T>();\n\n        let mut result = allocate_buffer(&self)?;\n        let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        result.append(&mut length_32.to_bytes()?);\n\n        for item in self {\n            result.append(&mut item.into_bytes()?);\n        }\n\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        iterator_serialized_length(self.iter())\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        writer.extend_from_slice(&length_32.to_le_bytes());\n        for item in self.iter() {\n            item.write_bytes(writer)?;\n        }\n        Ok(())\n    }\n}\n\nfn vec_from_vec<T: FromBytes>(bytes: Vec<u8>) -> Result<(Vec<T>, Vec<u8>), Error> {\n    ensure_efficient_serialization::<T>();\n\n    Vec::<T>::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder)))\n}\n\n/// Returns a conservative estimate for the preallocated number of elements for a new `Vec<T>`.\n///\n/// `hint` indicates the desired upper limit in heap size (in bytes), which is itself bounded by\n/// 4096 bytes. This function will never return less than 1.\n#[inline]\nfn cautious<T>(hint: usize) -> usize {\n    let el_size = size_of::<T>();\n    core::cmp::max(core::cmp::min(hint, 4096 / el_size), 1)\n}\n\nimpl<T: FromBytes> FromBytes for Vec<T> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        ensure_efficient_serialization::<T>();\n\n        let (count, mut stream) = u32::from_bytes(bytes)?;\n\n        if count == 0 {\n            return Ok((Vec::new(), stream));\n        }\n\n        let mut result = Vec::with_capacity(cautious::<T>(count as usize));\n\n        for _ in 0..count {\n            let (value, remainder) = T::from_bytes(stream)?;\n            result.push(value);\n            stream = remainder;\n        }\n\n        Ok((result, stream))\n    }\n\n    fn from_vec(bytes: Vec<u8>) -> Result<(Self, Vec<u8>), Error> {\n        vec_from_vec(bytes)\n    }\n}\n\nimpl<T: ToBytes> ToBytes for VecDeque<T> {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let (slice1, slice2) = self.as_slices();\n        let mut result = allocate_buffer(self)?;\n        let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        result.append(&mut length_32.to_bytes()?);\n        for item in slice1.iter().chain(slice2.iter()) {\n            result.append(&mut item.to_bytes()?);\n        }\n        Ok(result)\n    }\n\n    fn into_bytes(self) -> Result<Vec<u8>, Error> {\n        let vec: Vec<T> = self.into();\n        vec.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        let (slice1, slice2) = self.as_slices();\n        iterator_serialized_length(slice1.iter().chain(slice2.iter()))\n    }\n}\n\nimpl<T: FromBytes> FromBytes for VecDeque<T> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (vec, bytes) = Vec::from_bytes(bytes)?;\n        Ok((VecDeque::from(vec), bytes))\n    }\n\n    fn from_vec(bytes: Vec<u8>) -> Result<(Self, Vec<u8>), Error> {\n        let (vec, bytes) = vec_from_vec(bytes)?;\n        Ok((VecDeque::from(vec), bytes))\n    }\n}\n\nimpl<const COUNT: usize> ToBytes for [u8; COUNT] {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        Ok(self.to_vec())\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        COUNT\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend_from_slice(self);\n        Ok(())\n    }\n}\n\nimpl<const COUNT: usize> FromBytes for [u8; COUNT] {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (bytes, rem) = safe_split_at(bytes, COUNT)?;\n        // SAFETY: safe_split_at makes sure `bytes` is exactly `COUNT` bytes.\n        let ptr = bytes.as_ptr() as *const [u8; COUNT];\n        let result = unsafe { *ptr };\n        Ok((result, rem))\n    }\n}\n\nimpl<V: ToBytes> ToBytes for BTreeSet<V> {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n\n        let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        result.append(&mut num_keys.to_bytes()?);\n\n        for value in self.iter() {\n            result.append(&mut value.to_bytes()?);\n        }\n\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U32_SERIALIZED_LENGTH + self.iter().map(|v| v.serialized_length()).sum::<usize>()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        writer.extend_from_slice(&length_32.to_le_bytes());\n        for value in self.iter() {\n            value.write_bytes(writer)?;\n        }\n        Ok(())\n    }\n}\n\nimpl<V: FromBytes + Ord> FromBytes for BTreeSet<V> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (num_keys, mut stream) = u32::from_bytes(bytes)?;\n        let mut result = BTreeSet::new();\n        for _ in 0..num_keys {\n            let (v, rem) = V::from_bytes(stream)?;\n            result.insert(v);\n            stream = rem;\n        }\n        Ok((result, stream))\n    }\n}\n\nimpl<K, V> ToBytes for BTreeMap<K, V>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n\n        let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        result.append(&mut num_keys.to_bytes()?);\n\n        for (key, value) in self.iter() {\n            result.append(&mut key.to_bytes()?);\n            result.append(&mut value.to_bytes()?);\n        }\n\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U32_SERIALIZED_LENGTH\n            + self\n                .iter()\n                .map(|(key, value)| key.serialized_length() + value.serialized_length())\n                .sum::<usize>()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?;\n        writer.extend_from_slice(&length_32.to_le_bytes());\n        for (key, value) in self.iter() {\n            key.write_bytes(writer)?;\n            value.write_bytes(writer)?;\n        }\n        Ok(())\n    }\n}\n\nimpl<K, V> FromBytes for BTreeMap<K, V>\nwhere\n    K: FromBytes + Ord,\n    V: FromBytes,\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (num_keys, mut stream) = u32::from_bytes(bytes)?;\n        let mut result = BTreeMap::new();\n        for _ in 0..num_keys {\n            let (k, rem) = K::from_bytes(stream)?;\n            let (v, rem) = V::from_bytes(rem)?;\n            result.insert(k, v);\n            stream = rem;\n        }\n        Ok((result, stream))\n    }\n}\n\nimpl<T: ToBytes> ToBytes for Option<T> {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            None => Ok(vec![OPTION_NONE_TAG]),\n            Some(v) => {\n                let mut result = allocate_buffer(self)?;\n                result.push(OPTION_SOME_TAG);\n\n                let mut value = v.to_bytes()?;\n                result.append(&mut value);\n\n                Ok(result)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Some(v) => v.serialized_length(),\n                None => 0,\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        match self {\n            None => writer.push(OPTION_NONE_TAG),\n            Some(v) => {\n                writer.push(OPTION_SOME_TAG);\n                v.write_bytes(writer)?;\n            }\n        };\n        Ok(())\n    }\n}\n\nimpl<T: FromBytes> FromBytes for Option<T> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n        match tag {\n            OPTION_NONE_TAG => Ok((None, rem)),\n            OPTION_SOME_TAG => {\n                let (t, rem) = T::from_bytes(rem)?;\n                Ok((Some(t), rem))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\nimpl<T: ToBytes, E: ToBytes> ToBytes for Result<T, E> {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        let (variant, mut value) = match self {\n            Err(error) => (RESULT_ERR_TAG, error.to_bytes()?),\n            Ok(result) => (RESULT_OK_TAG, result.to_bytes()?),\n        };\n        result.push(variant);\n        result.append(&mut value);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Ok(ok) => ok.serialized_length(),\n                Err(error) => error.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        match self {\n            Err(error) => {\n                writer.push(RESULT_ERR_TAG);\n                error.write_bytes(writer)?;\n            }\n            Ok(result) => {\n                writer.push(RESULT_OK_TAG);\n                result.write_bytes(writer)?;\n            }\n        };\n        Ok(())\n    }\n}\n\nimpl<T: FromBytes, E: FromBytes> FromBytes for Result<T, E> {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (variant, rem) = u8::from_bytes(bytes)?;\n        match variant {\n            RESULT_ERR_TAG => {\n                let (value, rem) = E::from_bytes(rem)?;\n                Ok((Err(value), rem))\n            }\n            RESULT_OK_TAG => {\n                let (value, rem) = T::from_bytes(rem)?;\n                Ok((Ok(value), rem))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\nimpl<T1: ToBytes> ToBytes for (T1,) {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl<T1: FromBytes> FromBytes for (T1,) {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        Ok(((t1,), remainder))\n    }\n}\n\nimpl<T1: ToBytes, T2: ToBytes> ToBytes for (T1, T2) {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length() + self.1.serialized_length()\n    }\n}\n\nimpl<T1: FromBytes, T2: FromBytes> FromBytes for (T1, T2) {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        Ok(((t1, t2), remainder))\n    }\n}\n\nimpl<T1: ToBytes, T2: ToBytes, T3: ToBytes> ToBytes for (T1, T2, T3) {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length() + self.1.serialized_length() + self.2.serialized_length()\n    }\n}\n\nimpl<T1: FromBytes, T2: FromBytes, T3: FromBytes> FromBytes for (T1, T2, T3) {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        Ok(((t1, t2, t3), remainder))\n    }\n}\n\nimpl<T1: ToBytes, T2: ToBytes, T3: ToBytes, T4: ToBytes> ToBytes for (T1, T2, T3, T4) {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n    }\n}\n\nimpl<T1: FromBytes, T2: FromBytes, T3: FromBytes, T4: FromBytes> FromBytes for (T1, T2, T3, T4) {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4), remainder))\n    }\n}\n\nimpl<T1: ToBytes, T2: ToBytes, T3: ToBytes, T4: ToBytes, T5: ToBytes> ToBytes\n    for (T1, T2, T3, T4, T5)\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        result.append(&mut self.4.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n            + self.4.serialized_length()\n    }\n}\n\nimpl<T1: FromBytes, T2: FromBytes, T3: FromBytes, T4: FromBytes, T5: FromBytes> FromBytes\n    for (T1, T2, T3, T4, T5)\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        let (t5, remainder) = T5::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4, t5), remainder))\n    }\n}\n\nimpl<T1: ToBytes, T2: ToBytes, T3: ToBytes, T4: ToBytes, T5: ToBytes, T6: ToBytes> ToBytes\n    for (T1, T2, T3, T4, T5, T6)\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        result.append(&mut self.4.to_bytes()?);\n        result.append(&mut self.5.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n            + self.4.serialized_length()\n            + self.5.serialized_length()\n    }\n}\n\nimpl<T1: FromBytes, T2: FromBytes, T3: FromBytes, T4: FromBytes, T5: FromBytes, T6: FromBytes>\n    FromBytes for (T1, T2, T3, T4, T5, T6)\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        let (t5, remainder) = T5::from_bytes(remainder)?;\n        let (t6, remainder) = T6::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4, t5, t6), remainder))\n    }\n}\n\nimpl<T1: ToBytes, T2: ToBytes, T3: ToBytes, T4: ToBytes, T5: ToBytes, T6: ToBytes, T7: ToBytes>\n    ToBytes for (T1, T2, T3, T4, T5, T6, T7)\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        result.append(&mut self.4.to_bytes()?);\n        result.append(&mut self.5.to_bytes()?);\n        result.append(&mut self.6.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n            + self.4.serialized_length()\n            + self.5.serialized_length()\n            + self.6.serialized_length()\n    }\n}\n\nimpl<\n        T1: FromBytes,\n        T2: FromBytes,\n        T3: FromBytes,\n        T4: FromBytes,\n        T5: FromBytes,\n        T6: FromBytes,\n        T7: FromBytes,\n    > FromBytes for (T1, T2, T3, T4, T5, T6, T7)\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        let (t5, remainder) = T5::from_bytes(remainder)?;\n        let (t6, remainder) = T6::from_bytes(remainder)?;\n        let (t7, remainder) = T7::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4, t5, t6, t7), remainder))\n    }\n}\n\nimpl<\n        T1: ToBytes,\n        T2: ToBytes,\n        T3: ToBytes,\n        T4: ToBytes,\n        T5: ToBytes,\n        T6: ToBytes,\n        T7: ToBytes,\n        T8: ToBytes,\n    > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8)\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        result.append(&mut self.4.to_bytes()?);\n        result.append(&mut self.5.to_bytes()?);\n        result.append(&mut self.6.to_bytes()?);\n        result.append(&mut self.7.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n            + self.4.serialized_length()\n            + self.5.serialized_length()\n            + self.6.serialized_length()\n            + self.7.serialized_length()\n    }\n}\n\nimpl<\n        T1: FromBytes,\n        T2: FromBytes,\n        T3: FromBytes,\n        T4: FromBytes,\n        T5: FromBytes,\n        T6: FromBytes,\n        T7: FromBytes,\n        T8: FromBytes,\n    > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8)\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        let (t5, remainder) = T5::from_bytes(remainder)?;\n        let (t6, remainder) = T6::from_bytes(remainder)?;\n        let (t7, remainder) = T7::from_bytes(remainder)?;\n        let (t8, remainder) = T8::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4, t5, t6, t7, t8), remainder))\n    }\n}\n\nimpl<\n        T1: ToBytes,\n        T2: ToBytes,\n        T3: ToBytes,\n        T4: ToBytes,\n        T5: ToBytes,\n        T6: ToBytes,\n        T7: ToBytes,\n        T8: ToBytes,\n        T9: ToBytes,\n    > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9)\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        result.append(&mut self.4.to_bytes()?);\n        result.append(&mut self.5.to_bytes()?);\n        result.append(&mut self.6.to_bytes()?);\n        result.append(&mut self.7.to_bytes()?);\n        result.append(&mut self.8.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n            + self.4.serialized_length()\n            + self.5.serialized_length()\n            + self.6.serialized_length()\n            + self.7.serialized_length()\n            + self.8.serialized_length()\n    }\n}\n\nimpl<\n        T1: FromBytes,\n        T2: FromBytes,\n        T3: FromBytes,\n        T4: FromBytes,\n        T5: FromBytes,\n        T6: FromBytes,\n        T7: FromBytes,\n        T8: FromBytes,\n        T9: FromBytes,\n    > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9)\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        let (t5, remainder) = T5::from_bytes(remainder)?;\n        let (t6, remainder) = T6::from_bytes(remainder)?;\n        let (t7, remainder) = T7::from_bytes(remainder)?;\n        let (t8, remainder) = T8::from_bytes(remainder)?;\n        let (t9, remainder) = T9::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4, t5, t6, t7, t8, t9), remainder))\n    }\n}\n\nimpl<\n        T1: ToBytes,\n        T2: ToBytes,\n        T3: ToBytes,\n        T4: ToBytes,\n        T5: ToBytes,\n        T6: ToBytes,\n        T7: ToBytes,\n        T8: ToBytes,\n        T9: ToBytes,\n        T10: ToBytes,\n    > ToBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        result.append(&mut self.2.to_bytes()?);\n        result.append(&mut self.3.to_bytes()?);\n        result.append(&mut self.4.to_bytes()?);\n        result.append(&mut self.5.to_bytes()?);\n        result.append(&mut self.6.to_bytes()?);\n        result.append(&mut self.7.to_bytes()?);\n        result.append(&mut self.8.to_bytes()?);\n        result.append(&mut self.9.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n            + self.1.serialized_length()\n            + self.2.serialized_length()\n            + self.3.serialized_length()\n            + self.4.serialized_length()\n            + self.5.serialized_length()\n            + self.6.serialized_length()\n            + self.7.serialized_length()\n            + self.8.serialized_length()\n            + self.9.serialized_length()\n    }\n}\n\nimpl<\n        T1: FromBytes,\n        T2: FromBytes,\n        T3: FromBytes,\n        T4: FromBytes,\n        T5: FromBytes,\n        T6: FromBytes,\n        T7: FromBytes,\n        T8: FromBytes,\n        T9: FromBytes,\n        T10: FromBytes,\n    > FromBytes for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (t1, remainder) = T1::from_bytes(bytes)?;\n        let (t2, remainder) = T2::from_bytes(remainder)?;\n        let (t3, remainder) = T3::from_bytes(remainder)?;\n        let (t4, remainder) = T4::from_bytes(remainder)?;\n        let (t5, remainder) = T5::from_bytes(remainder)?;\n        let (t6, remainder) = T6::from_bytes(remainder)?;\n        let (t7, remainder) = T7::from_bytes(remainder)?;\n        let (t8, remainder) = T8::from_bytes(remainder)?;\n        let (t9, remainder) = T9::from_bytes(remainder)?;\n        let (t10, remainder) = T10::from_bytes(remainder)?;\n        Ok(((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10), remainder))\n    }\n}\n\nimpl ToBytes for str {\n    #[inline]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        u8_slice_to_bytes(self.as_bytes())\n    }\n\n    #[inline]\n    fn serialized_length(&self) -> usize {\n        u8_slice_serialized_length(self.as_bytes())\n    }\n\n    #[inline]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        write_u8_slice(self.as_bytes(), writer)?;\n        Ok(())\n    }\n}\n\nimpl ToBytes for &str {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        (*self).to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        (*self).serialized_length()\n    }\n\n    #[inline]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        write_u8_slice(self.as_bytes(), writer)?;\n        Ok(())\n    }\n}\n\nimpl<T> ToBytes for &T\nwhere\n    T: ToBytes,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        (*self).to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        (*self).serialized_length()\n    }\n}\n\nimpl<T> ToBytes for Box<T>\nwhere\n    T: ToBytes,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.as_ref().to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.as_ref().serialized_length()\n    }\n}\n\nimpl<T> ToBytes for Ratio<T>\nwhere\n    T: Clone + Integer + ToBytes,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        if self.denom().is_zero() {\n            return Err(Error::Formatting);\n        }\n        (self.numer().clone(), self.denom().clone()).into_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        (self.numer().clone(), self.denom().clone()).serialized_length()\n    }\n}\n\nimpl<T> FromBytes for Ratio<T>\nwhere\n    T: Clone + FromBytes + Integer,\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let ((numer, denom), rem): ((T, T), &[u8]) = FromBytes::from_bytes(bytes)?;\n        if denom.is_zero() {\n            return Err(Error::Formatting);\n        }\n        Ok((Ratio::new(numer, denom), rem))\n    }\n}\n\n/// Serializes a slice of bytes with a length prefix.\n///\n/// This function is serializing a slice of bytes with an addition of a 4 byte length prefix.\n///\n/// For safety you should prefer to use [`vec_u8_to_bytes`]. For efficiency reasons you should also\n/// avoid using serializing Vec<u8>.\nfn u8_slice_to_bytes(bytes: &[u8]) -> Result<Vec<u8>, Error> {\n    let serialized_length = u8_slice_serialized_length(bytes);\n    let mut vec = Vec::with_capacity(serialized_length);\n    let length_prefix: u32 = bytes\n        .len()\n        .try_into()\n        .map_err(|_| Error::NotRepresentable)?;\n    let length_prefix_bytes = length_prefix.to_le_bytes();\n    vec.extend_from_slice(&length_prefix_bytes);\n    vec.extend_from_slice(bytes);\n    Ok(vec)\n}\n\nfn write_u8_slice(bytes: &[u8], writer: &mut Vec<u8>) -> Result<(), Error> {\n    let length_32: u32 = bytes\n        .len()\n        .try_into()\n        .map_err(|_| Error::NotRepresentable)?;\n    writer.extend_from_slice(&length_32.to_le_bytes());\n    writer.extend_from_slice(bytes);\n    Ok(())\n}\n\n/// Serializes a vector of bytes with a length prefix.\n///\n/// For efficiency you should avoid serializing Vec<u8>.\n#[allow(clippy::ptr_arg)]\n#[inline]\npub(crate) fn vec_u8_to_bytes(vec: &Vec<u8>) -> Result<Vec<u8>, Error> {\n    u8_slice_to_bytes(vec.as_slice())\n}\n\n/// Returns serialized length of serialized slice of bytes.\n///\n/// This function adds a length prefix in the beginning.\n#[inline(always)]\nfn u8_slice_serialized_length(bytes: &[u8]) -> usize {\n    U32_SERIALIZED_LENGTH + bytes.len()\n}\n\n#[allow(clippy::ptr_arg)]\n#[inline]\npub(crate) fn vec_u8_serialized_length(vec: &Vec<u8>) -> usize {\n    u8_slice_serialized_length(vec.as_slice())\n}\n\n/// Asserts that `t` can be serialized and when deserialized back into an instance `T` compares\n/// equal to `t`.\n///\n/// Also asserts that `t.serialized_length()` is the same as the actual number of bytes of the\n/// serialized `t` instance.\n#[cfg(any(feature = \"testing\", test))]\n#[track_caller]\npub fn test_serialization_roundtrip<T>(t: &T)\nwhere\n    T: fmt::Debug + ToBytes + FromBytes + PartialEq,\n{\n    let serialized = ToBytes::to_bytes(t).expect(\"Unable to serialize data\");\n    assert_eq!(\n        serialized.len(),\n        t.serialized_length(),\n        \"\\nLength of serialized data: {},\\nserialized_length() yielded: {},\\n t is {:?}\",\n        serialized.len(),\n        t.serialized_length(),\n        t\n    );\n    let mut written_bytes = vec![];\n    t.write_bytes(&mut written_bytes)\n        .expect(\"Unable to serialize data via write_bytes\");\n    assert_eq!(serialized, written_bytes);\n\n    let deserialized_from_slice = deserialize_from_slice(&serialized)\n        .unwrap_or_else(|error| panic!(\"Unable to deserialize data: {error:?} ({t:?})\"));\n    assert_eq!(*t, deserialized_from_slice);\n\n    let deserialized = deserialize::<T>(serialized).expect(\"Unable to deserialize data\");\n    assert_eq!(*t, deserialized);\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::U128;\n\n    use super::*;\n\n    #[test]\n    fn should_not_serialize_zero_denominator() {\n        let malicious = Ratio::new_raw(1, 0);\n        assert_eq!(malicious.to_bytes().unwrap_err(), Error::Formatting);\n    }\n\n    #[test]\n    fn should_not_deserialize_zero_denominator() {\n        let malicious_bytes = (1u64, 0u64).to_bytes().unwrap();\n        let result: Result<Ratio<u64>, Error> = deserialize(malicious_bytes);\n        assert_eq!(result.unwrap_err(), Error::Formatting);\n    }\n\n    #[test]\n    fn should_have_generic_tobytes_impl_for_borrowed_types() {\n        struct NonCopyable;\n\n        impl ToBytes for NonCopyable {\n            fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n                Ok(vec![1, 2, 3])\n            }\n\n            fn serialized_length(&self) -> usize {\n                3\n            }\n        }\n\n        let noncopyable: &NonCopyable = &NonCopyable;\n\n        assert_eq!(noncopyable.to_bytes().unwrap(), vec![1, 2, 3]);\n        assert_eq!(noncopyable.serialized_length(), 3);\n        assert_eq!(noncopyable.into_bytes().unwrap(), vec![1, 2, 3]);\n    }\n\n    #[cfg(debug_assertions)]\n    #[test]\n    #[should_panic(\n        expected = \"You should use `casper_types::bytesrepr::Bytes` newtype wrapper instead of `Vec<u8>` for efficiency\"\n    )]\n    fn should_fail_to_serialize_slice_of_u8() {\n        let bytes = b\"0123456789\".to_vec();\n        bytes.to_bytes().unwrap();\n    }\n\n    #[test]\n    fn should_calculate_capacity() {\n        #[allow(dead_code)]\n        struct CustomStruct {\n            u8_field: u8,\n            u16_field: u16,\n            u32_field: u32,\n            u64_field: u64,\n            // Here we're using U128 type that represents u128 with a two u64s which is what the\n            // compiler is doing for x86_64. On 64-bit ARM architecture u128 is aligned\n            // to 16 bytes, but on x86_64 it's aligned to 8 bytes. This changes the\n            // memory layout of the struct and affects the results of function `cautious`.\n            // The expected behaviour of u128 alignment is 8 bytes instead of 16,\n            // and there is a bug in the rust compiler for this: https://github.com/rust-lang/rust/issues/54341\n            u128_field: U128,\n            str_field: String,\n        }\n        assert_eq!(\n            cautious::<usize>(u32::MAX as usize),\n            512,\n            \"hint is 2^32-1 and we can only preallocate 512 elements\"\n        );\n        assert_eq!(\n            cautious::<u8>(usize::MAX),\n            4096,\n            \"hint is usize::MAX and we can only preallocate 4096 elements\"\n        );\n        assert_eq!(\n            cautious::<u16>(usize::MAX),\n            2048,\n            \"hint is usize::MAX and we can only preallocate 2048 elements\"\n        );\n        assert_eq!(\n            cautious::<CustomStruct>(usize::MAX),\n            73,\n            \"hint is usize::MAX and we can only preallocate 73 elements\"\n        );\n    }\n\n    #[test]\n    fn deserializing_empty_vec_has_no_capacity() {\n        let bytes = ToBytes::to_bytes(&(0u32, b\"123\")).unwrap();\n        let (vec, rem): (Vec<u32>, _) = FromBytes::from_bytes(&bytes).unwrap();\n        assert!(vec.is_empty());\n        assert_eq!(vec.capacity(), 0);\n        assert_eq!(rem, b\"123\");\n    }\n}\n\n#[cfg(test)]\nmod proptests {\n    use std::collections::VecDeque;\n\n    use proptest::{collection::vec, prelude::*};\n\n    use crate::{\n        bytesrepr::{self, bytes::gens::bytes_arb, ToBytes},\n        gens::*,\n    };\n\n    proptest! {\n        #[test]\n        fn test_bool(u in any::<bool>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u8(u in any::<u8>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u16(u in any::<u16>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u32(u in any::<u32>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_i32(u in any::<i32>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u64(u in any::<u64>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_i64(u in any::<i64>()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u8_slice_32(s in u8_slice_32()) {\n            bytesrepr::test_serialization_roundtrip(&s);\n        }\n\n        #[test]\n        fn test_vec_u8(u in bytes_arb(1..100)) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_vec_i32(u in vec(any::<i32>(), 1..100)) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_vecdeque_i32((front, back) in (vec(any::<i32>(), 1..100), vec(any::<i32>(), 1..100))) {\n            let mut vec_deque = VecDeque::new();\n            for f in front {\n                vec_deque.push_front(f);\n            }\n            for f in back {\n                vec_deque.push_back(f);\n            }\n            bytesrepr::test_serialization_roundtrip(&vec_deque);\n        }\n\n        #[test]\n        fn test_vec_vec_u8(u in vec(bytes_arb(1..100), 10)) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_uref_map(m in named_keys_arb(20)) {\n            bytesrepr::test_serialization_roundtrip(&m);\n        }\n\n        #[test]\n        fn test_array_u8_32(arr in any::<[u8; 32]>()) {\n            bytesrepr::test_serialization_roundtrip(&arr);\n        }\n\n        #[test]\n        fn test_string(s in \"\\\\PC*\") {\n            bytesrepr::test_serialization_roundtrip(&s);\n        }\n\n        #[test]\n        fn test_str(s in \"\\\\PC*\") {\n            let not_a_string_object = s.as_str();\n            not_a_string_object.to_bytes().expect(\"should serialize a str\");\n        }\n\n        #[test]\n        fn test_option(o in proptest::option::of(key_arb())) {\n            bytesrepr::test_serialization_roundtrip(&o);\n        }\n\n        #[test]\n        fn test_unit(unit in Just(())) {\n            bytesrepr::test_serialization_roundtrip(&unit);\n        }\n\n        #[test]\n        fn test_u128_serialization(u in u128_arb()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u256_serialization(u in u256_arb()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_u512_serialization(u in u512_arb()) {\n            bytesrepr::test_serialization_roundtrip(&u);\n        }\n\n        #[test]\n        fn test_key_serialization(key in key_arb()) {\n            bytesrepr::test_serialization_roundtrip(&key);\n        }\n\n        #[test]\n        fn test_cl_value_serialization(cl_value in cl_value_arb()) {\n            bytesrepr::test_serialization_roundtrip(&cl_value);\n        }\n\n        #[test]\n        fn test_access_rights(access_right in access_rights_arb()) {\n            bytesrepr::test_serialization_roundtrip(&access_right);\n        }\n\n        #[test]\n        fn test_uref(uref in uref_arb()) {\n            bytesrepr::test_serialization_roundtrip(&uref);\n        }\n\n        #[test]\n        fn test_account_hash(pk in account_hash_arb()) {\n            bytesrepr::test_serialization_roundtrip(&pk);\n        }\n\n        #[test]\n        fn test_result(result in result_arb()) {\n            bytesrepr::test_serialization_roundtrip(&result);\n        }\n\n        #[test]\n        fn test_phase_serialization(phase in phase_arb()) {\n            bytesrepr::test_serialization_roundtrip(&phase);\n        }\n\n        #[test]\n        fn test_protocol_version(protocol_version in protocol_version_arb()) {\n            bytesrepr::test_serialization_roundtrip(&protocol_version);\n        }\n\n        #[test]\n        fn test_sem_ver(sem_ver in sem_ver_arb()) {\n            bytesrepr::test_serialization_roundtrip(&sem_ver);\n        }\n\n        #[test]\n        fn test_tuple1(t in (any::<u8>(),)) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n\n        #[test]\n        fn test_tuple2(t in (any::<u8>(),any::<u32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n\n        #[test]\n        fn test_tuple3(t in (any::<u8>(),any::<u32>(),any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n\n        #[test]\n        fn test_tuple4(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_tuple5(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_tuple6(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_tuple7(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_tuple8(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_tuple9(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_tuple10(t in (any::<u8>(),any::<u32>(),any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>(), any::<i32>())) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n        #[test]\n        fn test_ratio_u64(t in (any::<u64>(), 1..u64::MAX)) {\n            bytesrepr::test_serialization_roundtrip(&t);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/accounts_config/account_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{distributions::Standard, prelude::*};\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    GenesisAccount, Motes, PublicKey,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{SecretKey, U512};\n\nuse super::ValidatorConfig;\n\n/// Configuration of an individial account in accounts.toml\n#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, Debug, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct AccountConfig {\n    /// Public Key.\n    pub public_key: PublicKey,\n    /// Balance.\n    pub balance: Motes,\n    /// Validator config.\n    pub validator: Option<ValidatorConfig>,\n}\n\nimpl AccountConfig {\n    /// Creates a new `AccountConfig`.\n    pub fn new(public_key: PublicKey, balance: Motes, validator: Option<ValidatorConfig>) -> Self {\n        Self {\n            public_key,\n            balance,\n            validator,\n        }\n    }\n\n    /// Public key.\n    pub fn public_key(&self) -> PublicKey {\n        self.public_key.clone()\n    }\n\n    /// Balance.\n    pub fn balance(&self) -> Motes {\n        self.balance\n    }\n\n    /// Bonded amount.\n    pub fn bonded_amount(&self) -> Motes {\n        match self.validator {\n            Some(validator_config) => validator_config.bonded_amount(),\n            None => Motes::zero(),\n        }\n    }\n\n    /// Is this a genesis validator?\n    pub fn is_genesis_validator(&self) -> bool {\n        self.validator.is_some()\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let public_key =\n            PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap());\n        let balance = Motes::new(rng.gen::<u64>());\n        let validator = rng.gen();\n\n        AccountConfig {\n            public_key,\n            balance,\n            validator,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<AccountConfig> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> AccountConfig {\n        let secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap();\n        let public_key = PublicKey::from(&secret_key);\n\n        let mut u512_array = [0u8; 64];\n        rng.fill_bytes(u512_array.as_mut());\n        let balance = Motes::new(U512::from(u512_array));\n\n        let validator = rng.gen();\n\n        AccountConfig::new(public_key, balance, validator)\n    }\n}\n\nimpl ToBytes for AccountConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.public_key.to_bytes()?);\n        buffer.extend(self.balance.to_bytes()?);\n        buffer.extend(self.validator.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.public_key.serialized_length()\n            + self.balance.serialized_length()\n            + self.validator.serialized_length()\n    }\n}\n\nimpl FromBytes for AccountConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (public_key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (balance, remainder) = FromBytes::from_bytes(remainder)?;\n        let (validator, remainder) = FromBytes::from_bytes(remainder)?;\n        let account_config = AccountConfig {\n            public_key,\n            balance,\n            validator,\n        };\n        Ok((account_config, remainder))\n    }\n}\n\nimpl From<AccountConfig> for GenesisAccount {\n    fn from(account_config: AccountConfig) -> Self {\n        let genesis_validator = account_config.validator.map(Into::into);\n        GenesisAccount::account(\n            account_config.public_key,\n            account_config.balance,\n            genesis_validator,\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/accounts_config/delegator_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{distributions::Standard, prelude::*};\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    GenesisAccount, Motes, PublicKey,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{SecretKey, U512};\n\n/// Configuration values related to a delegator.\n#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, Debug, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct DelegatorConfig {\n    /// Validator public key.\n    pub validator_public_key: PublicKey,\n    /// Delegator public key.\n    pub delegator_public_key: PublicKey,\n    /// Balance for this delegator in Motes.\n    pub balance: Motes,\n    /// Delegated amount in Motes.\n    pub delegated_amount: Motes,\n}\n\nimpl DelegatorConfig {\n    /// Creates a new DelegatorConfig.\n    pub fn new(\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        balance: Motes,\n        delegated_amount: Motes,\n    ) -> Self {\n        Self {\n            validator_public_key,\n            delegator_public_key,\n            balance,\n            delegated_amount,\n        }\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let validator_public_key =\n            PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap());\n        let delegator_public_key =\n            PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap());\n        let balance = Motes::new(rng.gen::<u64>());\n        let delegated_amount = Motes::new(rng.gen::<u64>());\n\n        DelegatorConfig {\n            validator_public_key,\n            delegator_public_key,\n            balance,\n            delegated_amount,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<DelegatorConfig> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> DelegatorConfig {\n        let validator_secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap();\n        let delegator_secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap();\n\n        let validator_public_key = PublicKey::from(&validator_secret_key);\n        let delegator_public_key = PublicKey::from(&delegator_secret_key);\n\n        let mut u512_array = [0u8; 64];\n        rng.fill_bytes(u512_array.as_mut());\n        let balance = Motes::new(U512::from(u512_array));\n\n        rng.fill_bytes(u512_array.as_mut());\n        let delegated_amount = Motes::new(U512::from(u512_array));\n\n        DelegatorConfig::new(\n            validator_public_key,\n            delegator_public_key,\n            balance,\n            delegated_amount,\n        )\n    }\n}\n\nimpl ToBytes for DelegatorConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.validator_public_key.to_bytes()?);\n        buffer.extend(self.delegator_public_key.to_bytes()?);\n        buffer.extend(self.balance.to_bytes()?);\n        buffer.extend(self.delegated_amount.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.validator_public_key.serialized_length()\n            + self.delegator_public_key.serialized_length()\n            + self.balance.serialized_length()\n            + self.delegated_amount.serialized_length()\n    }\n}\n\nimpl FromBytes for DelegatorConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n        let (balance, remainder) = FromBytes::from_bytes(remainder)?;\n        let (delegated_amount, remainder) = FromBytes::from_bytes(remainder)?;\n        let delegator_config = DelegatorConfig {\n            validator_public_key,\n            delegator_public_key,\n            balance,\n            delegated_amount,\n        };\n        Ok((delegator_config, remainder))\n    }\n}\n\nimpl From<DelegatorConfig> for GenesisAccount {\n    fn from(delegator_config: DelegatorConfig) -> Self {\n        GenesisAccount::delegator(\n            delegator_config.validator_public_key,\n            delegator_config.delegator_public_key,\n            delegator_config.balance,\n            delegator_config.delegated_amount,\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/accounts_config/genesis.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::SecretKey;\nuse crate::{\n    account::AccountHash,\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    system::auction::DelegationRate,\n    Motes, PublicKey,\n};\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n#[repr(u8)]\nenum GenesisAccountTag {\n    System = 0,\n    Account = 1,\n    Delegator = 2,\n    Administrator = 3,\n    SustainAccount = 4,\n}\n\n/// Represents details about genesis account's validator status.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct GenesisValidator {\n    /// Stake of a genesis validator.\n    bonded_amount: Motes,\n    /// Delegation rate in the range of 0-100.\n    delegation_rate: DelegationRate,\n}\n\nimpl ToBytes for GenesisValidator {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.bonded_amount.to_bytes()?);\n        buffer.extend(self.delegation_rate.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.bonded_amount.serialized_length() + self.delegation_rate.serialized_length()\n    }\n}\n\nimpl FromBytes for GenesisValidator {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bonded_amount, remainder) = FromBytes::from_bytes(bytes)?;\n        let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?;\n        let genesis_validator = GenesisValidator {\n            bonded_amount,\n            delegation_rate,\n        };\n        Ok((genesis_validator, remainder))\n    }\n}\n\nimpl GenesisValidator {\n    /// Creates new [`GenesisValidator`].\n    pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self {\n        Self {\n            bonded_amount,\n            delegation_rate,\n        }\n    }\n\n    /// Returns the bonded amount of a genesis validator.\n    pub fn bonded_amount(&self) -> Motes {\n        self.bonded_amount\n    }\n\n    /// Returns the delegation rate of a genesis validator.\n    pub fn delegation_rate(&self) -> DelegationRate {\n        self.delegation_rate\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<GenesisValidator> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> GenesisValidator {\n        let bonded_amount = Motes::new(rng.gen::<u64>());\n        let delegation_rate = rng.gen();\n\n        GenesisValidator::new(bonded_amount, delegation_rate)\n    }\n}\n\n/// Special account in the system that is useful only for some private chains.\n#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct AdministratorAccount {\n    public_key: PublicKey,\n    balance: Motes,\n}\n\nimpl AdministratorAccount {\n    /// Creates new special account.\n    pub fn new(public_key: PublicKey, balance: Motes) -> Self {\n        Self {\n            public_key,\n            balance,\n        }\n    }\n\n    /// Gets a reference to the administrator account's public key.\n    pub fn public_key(&self) -> &PublicKey {\n        &self.public_key\n    }\n}\n\nimpl ToBytes for AdministratorAccount {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let AdministratorAccount {\n            public_key,\n            balance,\n        } = self;\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(public_key.to_bytes()?);\n        buffer.extend(balance.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let AdministratorAccount {\n            public_key,\n            balance,\n        } = self;\n        public_key.serialized_length() + balance.serialized_length()\n    }\n}\n\nimpl FromBytes for AdministratorAccount {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (public_key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (balance, remainder) = FromBytes::from_bytes(remainder)?;\n        let administrator_account = AdministratorAccount {\n            public_key,\n            balance,\n        };\n        Ok((administrator_account, remainder))\n    }\n}\n\n/// This enum represents possible states of a genesis account.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum GenesisAccount {\n    /// This variant is for internal use only - genesis process will create a virtual system\n    /// account and use it to call system contracts.\n    System,\n    /// Genesis account that will be created.\n    Account {\n        /// Public key of a genesis account.\n        public_key: PublicKey,\n        /// Starting balance of a genesis account.\n        balance: Motes,\n        /// If set, it will make this account a genesis validator.\n        validator: Option<GenesisValidator>,\n    },\n    /// The genesis delegator is a special account that will be created as a delegator.\n    /// It does not have any stake of its own, but will create a real account in the system\n    /// which will delegate to a genesis validator.\n    Delegator {\n        /// Validator's public key that has to refer to other instance of\n        /// [`GenesisAccount::Account`] with a `validator` field set.\n        validator_public_key: PublicKey,\n        /// Public key of the genesis account that will be created as part of this entry.\n        delegator_public_key: PublicKey,\n        /// Starting balance of the account.\n        balance: Motes,\n        /// Delegated amount for given `validator_public_key`.\n        delegated_amount: Motes,\n    },\n    /// An administrative account in the genesis process.\n    ///\n    /// This variant makes sense for some private chains.\n    Administrator(AdministratorAccount),\n    /// An account to associate for the sustain purse\n    SustainAccount { public_key: PublicKey },\n}\n\nimpl From<AdministratorAccount> for GenesisAccount {\n    fn from(v: AdministratorAccount) -> Self {\n        Self::Administrator(v)\n    }\n}\n\nimpl GenesisAccount {\n    /// Create a system account variant.\n    pub fn system() -> Self {\n        Self::System\n    }\n\n    /// Create a standard account variant.\n    pub fn account(\n        public_key: PublicKey,\n        balance: Motes,\n        validator: Option<GenesisValidator>,\n    ) -> Self {\n        Self::Account {\n            public_key,\n            balance,\n            validator,\n        }\n    }\n\n    /// Create a delegator account variant.\n    pub fn delegator(\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        balance: Motes,\n        delegated_amount: Motes,\n    ) -> Self {\n        Self::Delegator {\n            validator_public_key,\n            delegator_public_key,\n            balance,\n            delegated_amount,\n        }\n    }\n\n    /// Create a new sustain account.\n    pub fn sustain(public_key: PublicKey) -> Self {\n        Self::SustainAccount { public_key }\n    }\n\n    /// The public key (if any) associated with the account.\n    pub fn public_key(&self) -> PublicKey {\n        match self {\n            GenesisAccount::System => PublicKey::System,\n            GenesisAccount::Account { public_key, .. } => public_key.clone(),\n            GenesisAccount::Delegator {\n                delegator_public_key,\n                ..\n            } => delegator_public_key.clone(),\n            GenesisAccount::Administrator(AdministratorAccount { public_key, .. }) => {\n                public_key.clone()\n            }\n            GenesisAccount::SustainAccount { public_key } => public_key.clone(),\n        }\n    }\n\n    /// The account hash for the account.\n    pub fn account_hash(&self) -> AccountHash {\n        match self {\n            GenesisAccount::System => PublicKey::System.to_account_hash(),\n            GenesisAccount::Account { public_key, .. } => public_key.to_account_hash(),\n            GenesisAccount::Delegator {\n                delegator_public_key,\n                ..\n            } => delegator_public_key.to_account_hash(),\n            GenesisAccount::Administrator(AdministratorAccount { public_key, .. }) => {\n                public_key.to_account_hash()\n            }\n            GenesisAccount::SustainAccount { public_key } => public_key.to_account_hash(),\n        }\n    }\n\n    /// How many motes are to be deposited in the account's main purse.\n    pub fn balance(&self) -> Motes {\n        match self {\n            GenesisAccount::System => Motes::zero(),\n            GenesisAccount::Account { balance, .. } => *balance,\n            GenesisAccount::Delegator { balance, .. } => *balance,\n            GenesisAccount::Administrator(AdministratorAccount { balance, .. }) => *balance,\n            GenesisAccount::SustainAccount { .. } => Motes::zero(),\n        }\n    }\n\n    /// How many motes are to be staked.\n    ///\n    /// Staked accounts are either validators with some amount of bonded stake or delgators with\n    /// some amount of delegated stake.\n    pub fn staked_amount(&self) -> Motes {\n        match self {\n            GenesisAccount::System { .. }\n            | GenesisAccount::Account {\n                validator: None, ..\n            } => Motes::zero(),\n            GenesisAccount::Account {\n                validator: Some(genesis_validator),\n                ..\n            } => genesis_validator.bonded_amount(),\n            GenesisAccount::Delegator {\n                delegated_amount, ..\n            } => *delegated_amount,\n            GenesisAccount::Administrator(AdministratorAccount {\n                public_key: _,\n                balance: _,\n            }) => {\n                // This is defaulted to zero because administrator accounts are filtered out before\n                // validator set is created at the genesis.\n                Motes::zero()\n            }\n            GenesisAccount::SustainAccount { .. } => Motes::zero(),\n        }\n    }\n\n    /// What is the delegation rate of a validator.\n    pub fn delegation_rate(&self) -> DelegationRate {\n        match self {\n            GenesisAccount::Account {\n                validator: Some(genesis_validator),\n                ..\n            } => genesis_validator.delegation_rate(),\n            GenesisAccount::System\n            | GenesisAccount::Account {\n                validator: None, ..\n            }\n            | GenesisAccount::Delegator { .. } => {\n                // This value represents a delegation rate in invalid state that system is supposed\n                // to reject if used.\n                DelegationRate::MAX\n            }\n            GenesisAccount::Administrator(AdministratorAccount { .. }) => DelegationRate::MAX,\n            GenesisAccount::SustainAccount { .. } => DelegationRate::MAX,\n        }\n    }\n\n    /// Is this a virtual system account.\n    pub fn is_system_account(&self) -> bool {\n        matches!(self, GenesisAccount::System { .. })\n    }\n\n    /// Is this a validator account.\n    pub fn is_validator(&self) -> bool {\n        match self {\n            GenesisAccount::Account {\n                validator: Some(_), ..\n            } => true,\n            GenesisAccount::System { .. }\n            | GenesisAccount::Account {\n                validator: None, ..\n            }\n            | GenesisAccount::Delegator { .. }\n            | GenesisAccount::Administrator(AdministratorAccount { .. })\n            | GenesisAccount::SustainAccount { .. } => false,\n        }\n    }\n\n    /// Details about the genesis validator.\n    pub fn validator(&self) -> Option<&GenesisValidator> {\n        match self {\n            GenesisAccount::Account {\n                validator: Some(genesis_validator),\n                ..\n            } => Some(genesis_validator),\n            _ => None,\n        }\n    }\n\n    /// Is this a delegator account.\n    pub fn is_delegator(&self) -> bool {\n        matches!(self, GenesisAccount::Delegator { .. })\n    }\n\n    /// Details about the genesis delegator.\n    pub fn as_delegator(&self) -> Option<(&PublicKey, &PublicKey, &Motes, &Motes)> {\n        match self {\n            GenesisAccount::Delegator {\n                validator_public_key,\n                delegator_public_key,\n                balance,\n                delegated_amount,\n            } => Some((\n                validator_public_key,\n                delegator_public_key,\n                balance,\n                delegated_amount,\n            )),\n            _ => None,\n        }\n    }\n\n    /// Gets the administrator account variant.\n    pub fn as_administrator_account(&self) -> Option<&AdministratorAccount> {\n        if let Self::Administrator(v) = self {\n            Some(v)\n        } else {\n            None\n        }\n    }\n\n    /// Set validator.\n    pub fn try_set_validator(&mut self, genesis_validator: GenesisValidator) -> bool {\n        match self {\n            GenesisAccount::Account { validator, .. } => {\n                *validator = Some(genesis_validator);\n                true\n            }\n            GenesisAccount::System\n            | GenesisAccount::Delegator { .. }\n            | GenesisAccount::Administrator(_)\n            | GenesisAccount::SustainAccount { .. } => false,\n        }\n    }\n\n    pub fn is_sustain_account(&self) -> bool {\n        matches!(self, Self::SustainAccount { .. })\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<GenesisAccount> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> GenesisAccount {\n        let mut bytes = [0u8; 32];\n        rng.fill_bytes(&mut bytes[..]);\n        let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap();\n        let public_key = PublicKey::from(&secret_key);\n        let balance = Motes::new(rng.gen::<u64>());\n        let validator = rng.gen();\n\n        GenesisAccount::account(public_key, balance, validator)\n    }\n}\n\nimpl ToBytes for GenesisAccount {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            GenesisAccount::System => {\n                buffer.push(GenesisAccountTag::System as u8);\n            }\n            GenesisAccount::Account {\n                public_key,\n                balance,\n                validator,\n            } => {\n                buffer.push(GenesisAccountTag::Account as u8);\n                buffer.extend(public_key.to_bytes()?);\n                buffer.extend(balance.value().to_bytes()?);\n                buffer.extend(validator.to_bytes()?);\n            }\n            GenesisAccount::Delegator {\n                validator_public_key,\n                delegator_public_key,\n                balance,\n                delegated_amount,\n            } => {\n                buffer.push(GenesisAccountTag::Delegator as u8);\n                buffer.extend(validator_public_key.to_bytes()?);\n                buffer.extend(delegator_public_key.to_bytes()?);\n                buffer.extend(balance.to_bytes()?);\n                buffer.extend(delegated_amount.to_bytes()?);\n            }\n            GenesisAccount::Administrator(administrator_account) => {\n                buffer.push(GenesisAccountTag::Administrator as u8);\n                buffer.extend(administrator_account.to_bytes()?);\n            }\n            GenesisAccount::SustainAccount { public_key } => {\n                buffer.push(GenesisAccountTag::SustainAccount as u8);\n                buffer.extend(public_key.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            GenesisAccount::System => TAG_LENGTH,\n            GenesisAccount::Account {\n                public_key,\n                balance,\n                validator,\n            } => {\n                public_key.serialized_length()\n                    + balance.value().serialized_length()\n                    + validator.serialized_length()\n                    + TAG_LENGTH\n            }\n            GenesisAccount::Delegator {\n                validator_public_key,\n                delegator_public_key,\n                balance,\n                delegated_amount,\n            } => {\n                validator_public_key.serialized_length()\n                    + delegator_public_key.serialized_length()\n                    + balance.value().serialized_length()\n                    + delegated_amount.value().serialized_length()\n                    + TAG_LENGTH\n            }\n            GenesisAccount::Administrator(administrator_account) => {\n                administrator_account.serialized_length() + TAG_LENGTH\n            }\n            GenesisAccount::SustainAccount { public_key } => {\n                public_key.serialized_length() + TAG_LENGTH\n            }\n        }\n    }\n}\n\nimpl FromBytes for GenesisAccount {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            tag if tag == GenesisAccountTag::System as u8 => {\n                let genesis_account = GenesisAccount::system();\n                Ok((genesis_account, remainder))\n            }\n            tag if tag == GenesisAccountTag::Account as u8 => {\n                let (public_key, remainder) = FromBytes::from_bytes(remainder)?;\n                let (balance, remainder) = FromBytes::from_bytes(remainder)?;\n                let (validator, remainder) = FromBytes::from_bytes(remainder)?;\n                let genesis_account = GenesisAccount::account(public_key, balance, validator);\n                Ok((genesis_account, remainder))\n            }\n            tag if tag == GenesisAccountTag::Delegator as u8 => {\n                let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n                let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n                let (balance, remainder) = FromBytes::from_bytes(remainder)?;\n                let (delegated_amount_value, remainder) = FromBytes::from_bytes(remainder)?;\n                let genesis_account = GenesisAccount::delegator(\n                    validator_public_key,\n                    delegator_public_key,\n                    balance,\n                    delegated_amount_value,\n                );\n                Ok((genesis_account, remainder))\n            }\n            tag if tag == GenesisAccountTag::Administrator as u8 => {\n                let (administrator_account, remainder) =\n                    AdministratorAccount::from_bytes(remainder)?;\n                let genesis_account = GenesisAccount::Administrator(administrator_account);\n                Ok((genesis_account, remainder))\n            }\n            tag if tag == GenesisAccountTag::SustainAccount as u8 => {\n                let (public_key, remainder) = FromBytes::from_bytes(remainder)?;\n                Ok((GenesisAccount::SustainAccount { public_key }, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/accounts_config/validator_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num::Zero;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{distributions::Standard, prelude::*};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::DelegationRate,\n    GenesisValidator, Motes,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{testing::TestRng, U512};\n\n/// Validator account configuration.\n#[derive(PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug, Copy, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ValidatorConfig {\n    bonded_amount: Motes,\n    #[serde(default = \"DelegationRate::zero\")]\n    delegation_rate: DelegationRate,\n}\n\nimpl ValidatorConfig {\n    /// Creates a new `ValidatorConfig`.\n    pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self {\n        Self {\n            bonded_amount,\n            delegation_rate,\n        }\n    }\n\n    /// Delegation rate.\n    pub fn delegation_rate(&self) -> DelegationRate {\n        self.delegation_rate\n    }\n\n    /// Bonded amount.\n    pub fn bonded_amount(&self) -> Motes {\n        self.bonded_amount\n    }\n\n    /// Returns a random `ValidatorConfig`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let bonded_amount = Motes::new(rng.gen::<u64>());\n        let delegation_rate = rng.gen();\n\n        ValidatorConfig {\n            bonded_amount,\n            delegation_rate,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ValidatorConfig> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ValidatorConfig {\n        let mut u512_array = [0; 64];\n        rng.fill_bytes(u512_array.as_mut());\n        let bonded_amount = Motes::new(U512::from(u512_array));\n\n        let delegation_rate = rng.gen();\n\n        ValidatorConfig::new(bonded_amount, delegation_rate)\n    }\n}\n\nimpl ToBytes for ValidatorConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.bonded_amount.to_bytes()?);\n        buffer.extend(self.delegation_rate.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.bonded_amount.serialized_length() + self.delegation_rate.serialized_length()\n    }\n}\n\nimpl FromBytes for ValidatorConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bonded_amount, remainder) = FromBytes::from_bytes(bytes)?;\n        let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?;\n        let account_config = ValidatorConfig {\n            bonded_amount,\n            delegation_rate,\n        };\n        Ok((account_config, remainder))\n    }\n}\n\nimpl From<ValidatorConfig> for GenesisValidator {\n    fn from(account_config: ValidatorConfig) -> Self {\n        GenesisValidator::new(\n            account_config.bonded_amount(),\n            account_config.delegation_rate,\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/accounts_config.rs",
    "content": "//! The accounts config is a set of configuration options that is used to create accounts at\n//! genesis, and set up auction contract with validators and delegators.\nmod account_config;\nmod delegator_config;\nmod genesis;\nmod validator_config;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Deserializer, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    PublicKey,\n};\n\npub use account_config::AccountConfig;\npub use delegator_config::DelegatorConfig;\npub use genesis::{AdministratorAccount, GenesisAccount, GenesisValidator};\npub use validator_config::ValidatorConfig;\n\nfn sorted_vec_deserializer<'de, T, D>(deserializer: D) -> Result<Vec<T>, D::Error>\nwhere\n    T: Deserialize<'de> + Ord,\n    D: Deserializer<'de>,\n{\n    let mut vec = Vec::<T>::deserialize(deserializer)?;\n    vec.sort_unstable();\n    Ok(vec)\n}\n\n/// Configuration values associated with accounts.toml\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct AccountsConfig {\n    #[serde(deserialize_with = \"sorted_vec_deserializer\")]\n    accounts: Vec<AccountConfig>,\n    #[serde(default, deserialize_with = \"sorted_vec_deserializer\")]\n    delegators: Vec<DelegatorConfig>,\n    #[serde(\n        default,\n        deserialize_with = \"sorted_vec_deserializer\",\n        skip_serializing_if = \"Vec::is_empty\"\n    )]\n    administrators: Vec<AdministratorAccount>,\n}\n\nimpl AccountsConfig {\n    /// Create new accounts config instance.\n    pub fn new(\n        accounts: Vec<AccountConfig>,\n        delegators: Vec<DelegatorConfig>,\n        administrators: Vec<AdministratorAccount>,\n    ) -> Self {\n        Self {\n            accounts,\n            delegators,\n            administrators,\n        }\n    }\n\n    /// Accounts.\n    pub fn accounts(&self) -> &[AccountConfig] {\n        &self.accounts\n    }\n\n    /// Delegators.\n    pub fn delegators(&self) -> &[DelegatorConfig] {\n        &self.delegators\n    }\n\n    /// Administrators.\n    pub fn administrators(&self) -> &[AdministratorAccount] {\n        &self.administrators\n    }\n\n    /// Account.\n    pub fn account(&self, public_key: &PublicKey) -> Option<&AccountConfig> {\n        self.accounts\n            .iter()\n            .find(|account| &account.public_key == public_key)\n    }\n\n    /// All of the validators.\n    pub fn validators(&self) -> impl Iterator<Item = &AccountConfig> {\n        self.accounts\n            .iter()\n            .filter(|account| account.validator.is_some())\n    }\n\n    /// Is the provided public key in the set of genesis validator public keys.\n    pub fn is_genesis_validator(&self, public_key: &PublicKey) -> bool {\n        match self.account(public_key) {\n            None => false,\n            Some(account_config) => account_config.is_genesis_validator(),\n        }\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        use rand::Rng;\n\n        use crate::Motes;\n\n        let alpha = AccountConfig::random(rng);\n        let accounts = vec![\n            alpha.clone(),\n            AccountConfig::random(rng),\n            AccountConfig::random(rng),\n            AccountConfig::random(rng),\n        ];\n\n        let mut delegator = DelegatorConfig::random(rng);\n        delegator.validator_public_key = alpha.public_key;\n\n        let delegators = vec![delegator];\n\n        let admin_balance: u32 = rng.gen();\n        let administrators = vec![AdministratorAccount::new(\n            PublicKey::random(rng),\n            Motes::new(admin_balance),\n        )];\n\n        AccountsConfig {\n            accounts,\n            delegators,\n            administrators,\n        }\n    }\n}\n\nimpl ToBytes for AccountsConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.accounts.to_bytes()?);\n        buffer.extend(self.delegators.to_bytes()?);\n        buffer.extend(self.administrators.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.accounts.serialized_length()\n            + self.delegators.serialized_length()\n            + self.administrators.serialized_length()\n    }\n}\n\nimpl FromBytes for AccountsConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (accounts, remainder) = FromBytes::from_bytes(bytes)?;\n        let (delegators, remainder) = FromBytes::from_bytes(remainder)?;\n        let (administrators, remainder) = FromBytes::from_bytes(remainder)?;\n        let accounts_config = AccountsConfig::new(accounts, delegators, administrators);\n        Ok((accounts_config, remainder))\n    }\n}\n\nimpl From<AccountsConfig> for Vec<GenesisAccount> {\n    fn from(accounts_config: AccountsConfig) -> Self {\n        let mut genesis_accounts = Vec::with_capacity(accounts_config.accounts.len());\n        for account_config in accounts_config.accounts {\n            let genesis_account = account_config.into();\n            genesis_accounts.push(genesis_account);\n        }\n        for delegator_config in accounts_config.delegators {\n            let genesis_account = delegator_config.into();\n            genesis_accounts.push(genesis_account);\n        }\n\n        for administrator_config in accounts_config.administrators {\n            let administrator_account = administrator_config.into();\n            genesis_accounts.push(administrator_account);\n        }\n\n        genesis_accounts\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nmod tests {\n    #[cfg(test)]\n    use crate::{bytesrepr, testing::TestRng, AccountsConfig};\n\n    #[test]\n    fn serialization_roundtrip() {\n        let mut rng = TestRng::new();\n        let accounts_config = AccountsConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&accounts_config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/activation_point.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    EraId, TimeDiff, Timestamp,\n};\n\nconst ERA_ID_TAG: u8 = 0;\nconst GENESIS_TAG: u8 = 1;\n\n/// The first era to which the associated protocol version applies.\n#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(untagged)]\npub enum ActivationPoint {\n    /// Era id.\n    EraId(EraId),\n    /// Genesis timestamp.\n    Genesis(Timestamp),\n}\n\nimpl ActivationPoint {\n    /// Returns whether we should upgrade the node due to the next era being the upgrade activation\n    /// point.\n    pub fn should_upgrade(&self, era_being_deactivated: &EraId) -> bool {\n        match self {\n            ActivationPoint::EraId(era_id) => era_being_deactivated.successor() >= *era_id,\n            ActivationPoint::Genesis(_) => false,\n        }\n    }\n\n    /// Returns the Era ID if `self` is of `EraId` variant, or else 0 if `Genesis`.\n    pub fn era_id(&self) -> EraId {\n        match self {\n            ActivationPoint::EraId(era_id) => *era_id,\n            ActivationPoint::Genesis(_) => EraId::from(0),\n        }\n    }\n\n    /// Returns the timestamp if `self` is of `Genesis` variant, or else `None`.\n    pub fn genesis_timestamp(&self) -> Option<Timestamp> {\n        match self {\n            ActivationPoint::EraId(_) => None,\n            ActivationPoint::Genesis(timestamp) => Some(*timestamp),\n        }\n    }\n\n    /// Returns a random `ActivationPoint`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        if rng.gen() {\n            ActivationPoint::EraId(EraId::random(rng))\n        } else {\n            ActivationPoint::Genesis(Timestamp::random(rng))\n        }\n    }\n}\n\nimpl Default for ActivationPoint {\n    fn default() -> Self {\n        ActivationPoint::Genesis(Timestamp::now().saturating_add(TimeDiff::from_seconds(15)))\n    }\n}\n\nimpl Display for ActivationPoint {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ActivationPoint::EraId(era_id) => write!(formatter, \"activation point {}\", era_id),\n            ActivationPoint::Genesis(timestamp) => {\n                write!(formatter, \"activation point {}\", timestamp)\n            }\n        }\n    }\n}\n\nimpl ToBytes for ActivationPoint {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        match self {\n            ActivationPoint::EraId(era_id) => {\n                let mut buffer = vec![ERA_ID_TAG];\n                buffer.extend(era_id.to_bytes()?);\n                Ok(buffer)\n            }\n            ActivationPoint::Genesis(timestamp) => {\n                let mut buffer = vec![GENESIS_TAG];\n                buffer.extend(timestamp.to_bytes()?);\n                Ok(buffer)\n            }\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                ActivationPoint::EraId(era_id) => era_id.serialized_length(),\n                ActivationPoint::Genesis(timestamp) => timestamp.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for ActivationPoint {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            ERA_ID_TAG => {\n                let (era_id, remainder) = EraId::from_bytes(remainder)?;\n                Ok((ActivationPoint::EraId(era_id), remainder))\n            }\n            GENESIS_TAG => {\n                let (timestamp, remainder) = Timestamp::from_bytes(remainder)?;\n                Ok((ActivationPoint::Genesis(timestamp), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/chainspec_raw_bytes.rs",
    "content": "use core::fmt::{self, Debug, Display, Formatter};\n\nuse crate::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct ChainspecRawBytes {\n    /// Raw bytes of the current chainspec.toml file.\n    chainspec_bytes: Bytes,\n    /// Raw bytes of the current genesis accounts.toml file.\n    maybe_genesis_accounts_bytes: Option<Bytes>,\n    /// Raw bytes of the current global_state.toml file.\n    maybe_global_state_bytes: Option<Bytes>,\n}\n\nimpl ChainspecRawBytes {\n    /// Create an instance from parts.\n    pub fn new(\n        chainspec_bytes: Bytes,\n        maybe_genesis_accounts_bytes: Option<Bytes>,\n        maybe_global_state_bytes: Option<Bytes>,\n    ) -> Self {\n        ChainspecRawBytes {\n            chainspec_bytes,\n            maybe_genesis_accounts_bytes,\n            maybe_global_state_bytes,\n        }\n    }\n\n    /// The bytes of the chainspec file.\n    pub fn chainspec_bytes(&self) -> &[u8] {\n        self.chainspec_bytes.as_slice()\n    }\n\n    /// The bytes of global state account entries, when present for a protocol version.\n    pub fn maybe_genesis_accounts_bytes(&self) -> Option<&[u8]> {\n        match self.maybe_genesis_accounts_bytes.as_ref() {\n            Some(bytes) => Some(bytes.as_slice()),\n            None => None,\n        }\n    }\n\n    /// The bytes of global state update entries, when present for a protocol version.\n    pub fn maybe_global_state_bytes(&self) -> Option<&[u8]> {\n        match self.maybe_global_state_bytes.as_ref() {\n            Some(bytes) => Some(bytes.as_slice()),\n            None => None,\n        }\n    }\n\n    /// Returns a random `ChainspecRawBytes`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use rand::Rng;\n\n        let chainspec_bytes = Bytes::from(rng.random_vec(0..1024));\n        let maybe_genesis_accounts_bytes = rng\n            .gen::<bool>()\n            .then(|| Bytes::from(rng.random_vec(0..1024)));\n        let maybe_global_state_bytes = rng\n            .gen::<bool>()\n            .then(|| Bytes::from(rng.random_vec(0..1024)));\n        ChainspecRawBytes {\n            chainspec_bytes,\n            maybe_genesis_accounts_bytes,\n            maybe_global_state_bytes,\n        }\n    }\n}\n\nimpl Debug for ChainspecRawBytes {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let genesis_accounts_bytes_owned: Bytes;\n        let global_state_bytes_owned: Bytes;\n        f.debug_struct(\"ChainspecRawBytes\")\n            .field(\n                \"chainspec_bytes\",\n                &self.chainspec_bytes[0..16].to_ascii_uppercase(),\n            )\n            .field(\n                \"maybe_genesis_accounts_bytes\",\n                match self.maybe_genesis_accounts_bytes.as_ref() {\n                    Some(genesis_accounts_bytes) => {\n                        genesis_accounts_bytes_owned =\n                            genesis_accounts_bytes[0..16].to_ascii_uppercase().into();\n                        &genesis_accounts_bytes_owned\n                    }\n                    None => &self.maybe_genesis_accounts_bytes,\n                },\n            )\n            .field(\n                \"maybe_global_state_bytes\",\n                match self.maybe_global_state_bytes.as_ref() {\n                    Some(global_state_bytes) => {\n                        global_state_bytes_owned =\n                            global_state_bytes[0..16].to_ascii_uppercase().into();\n                        &global_state_bytes_owned\n                    }\n                    None => &self.maybe_global_state_bytes,\n                },\n            )\n            .finish()\n    }\n}\n\nimpl Display for ChainspecRawBytes {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"{}\",\n            String::from_utf8_lossy(&self.chainspec_bytes)\n        )?;\n        if let Some(genesis_accounts_bytes) = &self.maybe_genesis_accounts_bytes {\n            write!(\n                formatter,\n                \"{}\",\n                String::from_utf8_lossy(genesis_accounts_bytes)\n            )?;\n        }\n        if let Some(global_state_bytes) = &self.maybe_global_state_bytes {\n            write!(formatter, \"{}\", String::from_utf8_lossy(global_state_bytes))?;\n        }\n        Ok(())\n    }\n}\n\nimpl ToBytes for ChainspecRawBytes {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let ChainspecRawBytes {\n            chainspec_bytes,\n            maybe_genesis_accounts_bytes,\n            maybe_global_state_bytes,\n        } = self;\n\n        chainspec_bytes.write_bytes(writer)?;\n        maybe_genesis_accounts_bytes.write_bytes(writer)?;\n        maybe_global_state_bytes.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let ChainspecRawBytes {\n            chainspec_bytes,\n            maybe_genesis_accounts_bytes,\n            maybe_global_state_bytes,\n        } = self;\n        chainspec_bytes.serialized_length()\n            + maybe_genesis_accounts_bytes.serialized_length()\n            + maybe_global_state_bytes.serialized_length()\n    }\n}\n\nimpl FromBytes for ChainspecRawBytes {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (chainspec_bytes, remainder) = FromBytes::from_bytes(bytes)?;\n        let (maybe_genesis_accounts_bytes, remainder) = FromBytes::from_bytes(remainder)?;\n        let (maybe_global_state_bytes, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((\n            ChainspecRawBytes {\n                chainspec_bytes,\n                maybe_genesis_accounts_bytes,\n                maybe_global_state_bytes,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = ChainspecRawBytes::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/core_config.rs",
    "content": "use alloc::collections::BTreeSet;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num::rational::Ratio;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n\nuse serde::{\n    de::{Deserializer, Error as DeError},\n    Deserialize, Serialize, Serializer,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    chainspec::rewards_handling::RewardsHandling,\n    ProtocolVersion, PublicKey, TimeDiff, U512,\n};\n\nuse super::{\n    fee_handling::FeeHandling, hold_balance_handling::HoldBalanceHandling,\n    pricing_handling::PricingHandling, refund_handling::RefundHandling,\n};\n\n/// Default value for maximum associated keys configuration option.\npub const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100;\n\n/// Default value for maximum runtime call stack height configuration option.\npub const DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 = 12;\n\n/// Default refund handling.\npub const DEFAULT_REFUND_HANDLING: RefundHandling = RefundHandling::NoRefund;\n\n/// Default pricing handling.\npub const DEFAULT_PRICING_HANDLING: PricingHandling = PricingHandling::Fixed;\n\n/// Default fee handling.\npub const DEFAULT_FEE_HANDLING: FeeHandling = FeeHandling::NoFee;\n\n/// Default allow prepaid.\npub const DEFAULT_ALLOW_PREPAID: bool = false;\n\n/// Default value for minimum bid amount in motes.\npub const DEFAULT_MINIMUM_BID_AMOUNT: u64 = 2;\n\n/// Default processing hold balance handling.\n#[allow(unused)]\npub const DEFAULT_PROCESSING_HOLD_BALANCE_HANDLING: HoldBalanceHandling =\n    HoldBalanceHandling::Accrued;\n\n/// Default gas hold balance handling.\npub const DEFAULT_GAS_HOLD_BALANCE_HANDLING: HoldBalanceHandling = HoldBalanceHandling::Amortized;\n\n/// Default gas hold interval.\npub const DEFAULT_GAS_HOLD_INTERVAL: TimeDiff = TimeDiff::from_seconds(24 * 60 * 60);\n\n/// Default enable entity setting.\npub const DEFAULT_ENABLE_ENTITY: bool = false;\n\n/// Default baseline motes amount.\npub const DEFAULT_BASELINE_MOTES_AMOUNT: u64 = 2_500_000_000;\n\n/// Configuration values associated with the core protocol.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct CoreConfig {\n    /// Duration of an era.\n    pub era_duration: TimeDiff,\n\n    /// Minimum era height.\n    pub minimum_era_height: u64,\n\n    /// Minimum block time.\n    pub minimum_block_time: TimeDiff,\n\n    /// Validator slots.\n    pub validator_slots: u32,\n\n    /// Finality threshold fraction.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub finality_threshold_fraction: Ratio<u64>,\n\n    /// Protocol version from which nodes are required to hold strict finality signatures.\n    pub start_protocol_version_with_strict_finality_signatures_required: ProtocolVersion,\n\n    /// Which finality is required for legacy blocks.\n    /// Used to determine finality sufficiency for new joiners syncing blocks created\n    /// in a protocol version before\n    /// `start_protocol_version_with_strict_finality_signatures_required`.\n    pub legacy_required_finality: LegacyRequiredFinality,\n\n    /// Number of eras before an auction actually defines the set of validators.\n    /// If you bond with a sufficient bid in era N, you will be a validator in era N +\n    /// auction_delay + 1\n    pub auction_delay: u64,\n\n    /// The period after genesis during which a genesis validator's bid is locked.\n    pub locked_funds_period: TimeDiff,\n\n    /// The period in which genesis validator's bid is released over time after it's unlocked.\n    pub vesting_schedule_period: TimeDiff,\n\n    /// The delay in number of eras for paying out the unbonding amount.\n    pub unbonding_delay: u64,\n\n    /// Round seigniorage rate represented as a fractional number.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub round_seigniorage_rate: Ratio<u64>,\n\n    /// Maximum number of associated keys for a single account.\n    pub max_associated_keys: u32,\n\n    /// Maximum height of contract runtime call stack.\n    pub max_runtime_call_stack_height: u32,\n\n    /// The minimum bound of motes that can be delegated to a validator.\n    pub minimum_delegation_amount: u64,\n\n    /// The maximum bound of motes that can be delegated to a validator.\n    pub maximum_delegation_amount: u64,\n\n    /// The minimum bound of motes that can be bid for a validator.\n    pub minimum_bid_amount: u64,\n\n    /// Global state prune batch size (0 means the feature is off in the current protocol version).\n    pub prune_batch_size: u64,\n\n    /// Enables strict arguments checking when calling a contract.\n    pub strict_argument_checking: bool,\n\n    /// How many peers to simultaneously ask when sync leaping.\n    pub simultaneous_peer_requests: u8,\n\n    /// Which consensus protocol to use.\n    pub consensus_protocol: ConsensusProtocolName,\n\n    /// The maximum amount of delegators per validator.\n    /// if the value is 0, there is no maximum capacity.\n    pub max_delegators_per_validator: u32,\n\n    /// The split in finality signature rewards between block producer and participating signers.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub finders_fee: Ratio<u64>,\n\n    /// The proportion of baseline rewards going to reward finality signatures specifically.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub finality_signature_proportion: Ratio<u64>,\n\n    /// The cap for validator credits based upon a proportion of a receiving validator's total\n    /// stake.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub validator_credit_cap: Ratio<u64>,\n\n    /// Lookback interval indicating which past block we are looking at to reward.\n    pub signature_rewards_max_delay: u64,\n    /// Auction entrypoints such as \"add_bid\" or \"delegate\" are disabled if this flag is set to\n    /// `false`. Setting up this option makes sense only for private chains where validator set\n    /// rotation is unnecessary.\n    pub allow_auction_bids: bool,\n    /// Allows unrestricted transfers between users.\n    pub allow_unrestricted_transfers: bool,\n    /// If set to false then consensus doesn't compute rewards and always uses 0.\n    pub compute_rewards: bool,\n    /// Refund handling.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub refund_handling: RefundHandling,\n    /// Fee handling.\n    pub fee_handling: FeeHandling,\n    /// Pricing handling.\n    pub pricing_handling: PricingHandling,\n    /// Allow prepaid.\n    pub allow_prepaid: bool,\n    /// How do gas holds affect available balance calculations?\n    pub gas_hold_balance_handling: HoldBalanceHandling,\n    /// How long does it take for a gas hold to expire?\n    pub gas_hold_interval: TimeDiff,\n    /// Administrative accounts are a valid option for a private chain only.\n    //#[serde(default, skip_serializing_if = \"BTreeSet::is_empty\")]\n    pub administrators: BTreeSet<PublicKey>,\n    /// Turn on migration to addressable entity behavior.\n    pub enable_addressable_entity: bool,\n    /// This value is used as the penalty payment amount, the minimum balance amount,\n    /// and the minimum consumed amount.\n    pub baseline_motes_amount: u64,\n    /// The flag on whether the engine will return an error for multiple\n    /// entity versions.\n    pub trap_on_ambiguous_entity_version: bool,\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub rewards_handling: RewardsHandling,\n    /// Minimum delegation rate a validator can specify.\n    /// Also applies to delegation reservations.\n    pub minimum_delegation_rate: u8,\n}\n\nimpl CoreConfig {\n    /// Turn on migration to addressable entity behavior.\n    pub fn enable_addressable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n\n    /// The number of eras that have already started and whose validators are still bonded.\n    pub fn recent_era_count(&self) -> u64 {\n        // Safe to use naked `-` operation assuming `CoreConfig::is_valid()` has been checked.\n        self.unbonding_delay - self.auction_delay\n    }\n\n    /// The proportion of the total rewards going to block production.\n    pub fn production_rewards_proportion(&self) -> Ratio<u64> {\n        Ratio::new(1, 1) - self.finality_signature_proportion\n    }\n\n    /// The finder's fee, *i.e.* the proportion of the total rewards going to the validator\n    /// collecting the finality signatures which is the validator producing the block.\n    pub fn collection_rewards_proportion(&self) -> Ratio<u64> {\n        self.finders_fee * self.finality_signature_proportion\n    }\n\n    /// The proportion of the total rewards going to finality signatures collection.\n    pub fn contribution_rewards_proportion(&self) -> Ratio<u64> {\n        (Ratio::new(1, 1) - self.finders_fee) * self.finality_signature_proportion\n    }\n\n    /// The baseline motes amount as a U512.\n    pub fn baseline_motes_amount_u512(&self) -> U512 {\n        U512::from(self.baseline_motes_amount)\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl CoreConfig {\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let era_duration = TimeDiff::from_seconds(rng.gen_range(600..604_800));\n        let minimum_era_height = rng.gen_range(5..100);\n        let minimum_block_time = TimeDiff::from_seconds(rng.gen_range(1..60));\n        let validator_slots = rng.gen_range(1..10_000);\n        let finality_threshold_fraction = Ratio::new(rng.gen_range(1..100), 100);\n        let start_protocol_version_with_strict_finality_signatures_required =\n            ProtocolVersion::from_parts(1, rng.gen_range(5..10), rng.gen_range(0..100));\n        let legacy_required_finality = rng.gen();\n        let auction_delay = rng.gen_range(1..5);\n        let locked_funds_period = TimeDiff::from_seconds(rng.gen_range(600..604_800));\n        let vesting_schedule_period = TimeDiff::from_seconds(rng.gen_range(600..604_800));\n        let unbonding_delay = rng.gen_range((auction_delay + 1)..1_000_000_000);\n        let round_seigniorage_rate = Ratio::new(\n            rng.gen_range(1..1_000_000_000),\n            rng.gen_range(1..1_000_000_000),\n        );\n        let max_associated_keys = rng.gen();\n        let max_runtime_call_stack_height = rng.gen();\n        let minimum_delegation_amount = rng.gen::<u32>() as u64;\n        // `maximum_delegation_amount` must be greater than `minimum_delegation_amount`.\n        let maximum_delegation_amount = rng.gen_range(minimum_delegation_amount..u32::MAX as u64);\n        let minimum_bid_amount = DEFAULT_MINIMUM_BID_AMOUNT;\n        let prune_batch_size = rng.gen_range(0..100);\n        let strict_argument_checking = rng.gen();\n        let simultaneous_peer_requests = rng.gen_range(3..100);\n        let consensus_protocol = rng.gen();\n        let finders_fee = Ratio::new(rng.gen_range(1..100), 100);\n        let finality_signature_proportion = Ratio::new(rng.gen_range(1..100), 100);\n        let signature_rewards_max_delay = rng.gen_range(1..10);\n        let allow_auction_bids = rng.gen();\n        let allow_unrestricted_transfers = rng.gen();\n        let compute_rewards = rng.gen();\n        let administrators = (0..rng.gen_range(0..=10u32))\n            .map(|_| PublicKey::random(rng))\n            .collect();\n        let refund_handling = {\n            let numer = rng.gen_range(0..=100);\n            let refund_ratio = Ratio::new(numer, 100);\n            RefundHandling::Refund { refund_ratio }\n        };\n\n        let pricing_handling = if rng.gen() {\n            PricingHandling::PaymentLimited\n        } else {\n            PricingHandling::Fixed\n        };\n\n        let allow_prepaid = DEFAULT_ALLOW_PREPAID;\n\n        let fee_handling = if rng.gen() {\n            FeeHandling::PayToProposer\n        } else {\n            FeeHandling::NoFee\n        };\n\n        let gas_hold_balance_handling = if rng.gen() {\n            HoldBalanceHandling::Accrued\n        } else {\n            HoldBalanceHandling::Amortized\n        };\n\n        let gas_hold_interval = TimeDiff::from_seconds(rng.gen_range(600..604_800));\n\n        let validator_credit_cap = Ratio::new(rng.gen_range(1..100), 100);\n\n        CoreConfig {\n            era_duration,\n            minimum_era_height,\n            minimum_block_time,\n            validator_slots,\n            finality_threshold_fraction,\n            start_protocol_version_with_strict_finality_signatures_required,\n            legacy_required_finality,\n            auction_delay,\n            locked_funds_period,\n            vesting_schedule_period,\n            unbonding_delay,\n            round_seigniorage_rate,\n            max_associated_keys,\n            max_runtime_call_stack_height,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            minimum_bid_amount,\n            prune_batch_size,\n            strict_argument_checking,\n            simultaneous_peer_requests,\n            consensus_protocol,\n            max_delegators_per_validator: 0,\n            finders_fee,\n            finality_signature_proportion,\n            signature_rewards_max_delay,\n            allow_auction_bids,\n            administrators,\n            allow_unrestricted_transfers,\n            compute_rewards,\n            refund_handling,\n            pricing_handling,\n            allow_prepaid,\n            fee_handling,\n            gas_hold_balance_handling,\n            gas_hold_interval,\n            validator_credit_cap,\n            enable_addressable_entity: DEFAULT_ENABLE_ENTITY,\n            baseline_motes_amount: DEFAULT_BASELINE_MOTES_AMOUNT,\n            trap_on_ambiguous_entity_version: false,\n            rewards_handling: RewardsHandling::Standard,\n            minimum_delegation_rate: 0,\n        }\n    }\n}\n\nimpl Default for CoreConfig {\n    fn default() -> Self {\n        Self {\n            era_duration: TimeDiff::from_seconds(41),\n            minimum_era_height: 5,\n            minimum_block_time: TimeDiff::from_millis(4096),\n            validator_slots: 7,\n            finality_threshold_fraction: Ratio::new(1, 3),\n            start_protocol_version_with_strict_finality_signatures_required:\n                ProtocolVersion::from_parts(1, 5, 0),\n            legacy_required_finality: LegacyRequiredFinality::Weak,\n            auction_delay: 1,\n            locked_funds_period: Default::default(),\n            vesting_schedule_period: Default::default(),\n            unbonding_delay: 7,\n            round_seigniorage_rate: Ratio::new(1, 4_200_000_000_000_000_000),\n            max_associated_keys: DEFAULT_MAX_ASSOCIATED_KEYS,\n            max_runtime_call_stack_height: DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT,\n            minimum_delegation_amount: 500_000_000_000,\n            maximum_delegation_amount: 1_000_000_000_000_000_000,\n            minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT,\n            prune_batch_size: 0,\n            strict_argument_checking: false,\n            simultaneous_peer_requests: 5,\n            consensus_protocol: ConsensusProtocolName::Zug,\n            max_delegators_per_validator: 1200,\n            finders_fee: Ratio::new(1, 5),\n            finality_signature_proportion: Ratio::new(1, 2),\n            signature_rewards_max_delay: 3,\n            allow_auction_bids: true,\n            allow_unrestricted_transfers: true,\n            compute_rewards: true,\n            administrators: Default::default(),\n            refund_handling: DEFAULT_REFUND_HANDLING,\n            pricing_handling: DEFAULT_PRICING_HANDLING,\n            fee_handling: DEFAULT_FEE_HANDLING,\n            allow_prepaid: DEFAULT_ALLOW_PREPAID,\n            gas_hold_balance_handling: DEFAULT_GAS_HOLD_BALANCE_HANDLING,\n            gas_hold_interval: DEFAULT_GAS_HOLD_INTERVAL,\n            validator_credit_cap: Ratio::new(1, 5),\n            enable_addressable_entity: DEFAULT_ENABLE_ENTITY,\n            baseline_motes_amount: DEFAULT_BASELINE_MOTES_AMOUNT,\n            trap_on_ambiguous_entity_version: false,\n            rewards_handling: RewardsHandling::Standard,\n            minimum_delegation_rate: 0,\n        }\n    }\n}\n\nimpl ToBytes for CoreConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.era_duration.to_bytes()?);\n        buffer.extend(self.minimum_era_height.to_bytes()?);\n        buffer.extend(self.minimum_block_time.to_bytes()?);\n        buffer.extend(self.validator_slots.to_bytes()?);\n        buffer.extend(self.finality_threshold_fraction.to_bytes()?);\n        buffer.extend(\n            self.start_protocol_version_with_strict_finality_signatures_required\n                .to_bytes()?,\n        );\n        buffer.extend(self.legacy_required_finality.to_bytes()?);\n        buffer.extend(self.auction_delay.to_bytes()?);\n        buffer.extend(self.locked_funds_period.to_bytes()?);\n        buffer.extend(self.vesting_schedule_period.to_bytes()?);\n        buffer.extend(self.unbonding_delay.to_bytes()?);\n        buffer.extend(self.round_seigniorage_rate.to_bytes()?);\n        buffer.extend(self.max_associated_keys.to_bytes()?);\n        buffer.extend(self.max_runtime_call_stack_height.to_bytes()?);\n        buffer.extend(self.minimum_delegation_amount.to_bytes()?);\n        buffer.extend(self.maximum_delegation_amount.to_bytes()?);\n        buffer.extend(self.minimum_bid_amount.to_bytes()?);\n        buffer.extend(self.prune_batch_size.to_bytes()?);\n        buffer.extend(self.strict_argument_checking.to_bytes()?);\n        buffer.extend(self.simultaneous_peer_requests.to_bytes()?);\n        buffer.extend(self.consensus_protocol.to_bytes()?);\n        buffer.extend(self.max_delegators_per_validator.to_bytes()?);\n        buffer.extend(self.finders_fee.to_bytes()?);\n        buffer.extend(self.finality_signature_proportion.to_bytes()?);\n        buffer.extend(self.signature_rewards_max_delay.to_bytes()?);\n        buffer.extend(self.allow_auction_bids.to_bytes()?);\n        buffer.extend(self.allow_unrestricted_transfers.to_bytes()?);\n        buffer.extend(self.compute_rewards.to_bytes()?);\n        buffer.extend(self.administrators.to_bytes()?);\n        buffer.extend(self.refund_handling.to_bytes()?);\n        buffer.extend(self.pricing_handling.to_bytes()?);\n        buffer.extend(self.fee_handling.to_bytes()?);\n        buffer.extend(self.allow_prepaid.to_bytes()?);\n        buffer.extend(self.gas_hold_balance_handling.to_bytes()?);\n        buffer.extend(self.gas_hold_interval.to_bytes()?);\n        buffer.extend(self.validator_credit_cap.to_bytes()?);\n        buffer.extend(self.enable_addressable_entity.to_bytes()?);\n        buffer.extend(self.baseline_motes_amount.to_bytes()?);\n        buffer.extend(self.trap_on_ambiguous_entity_version.to_bytes()?);\n        buffer.extend(self.rewards_handling.to_bytes()?);\n        buffer.extend(self.minimum_delegation_rate.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.era_duration.serialized_length()\n            + self.minimum_era_height.serialized_length()\n            + self.minimum_block_time.serialized_length()\n            + self.validator_slots.serialized_length()\n            + self.finality_threshold_fraction.serialized_length()\n            + self\n                .start_protocol_version_with_strict_finality_signatures_required\n                .serialized_length()\n            + self.legacy_required_finality.serialized_length()\n            + self.auction_delay.serialized_length()\n            + self.locked_funds_period.serialized_length()\n            + self.vesting_schedule_period.serialized_length()\n            + self.unbonding_delay.serialized_length()\n            + self.round_seigniorage_rate.serialized_length()\n            + self.max_associated_keys.serialized_length()\n            + self.max_runtime_call_stack_height.serialized_length()\n            + self.minimum_delegation_amount.serialized_length()\n            + self.maximum_delegation_amount.serialized_length()\n            + self.minimum_bid_amount.serialized_length()\n            + self.prune_batch_size.serialized_length()\n            + self.strict_argument_checking.serialized_length()\n            + self.simultaneous_peer_requests.serialized_length()\n            + self.consensus_protocol.serialized_length()\n            + self.max_delegators_per_validator.serialized_length()\n            + self.finders_fee.serialized_length()\n            + self.finality_signature_proportion.serialized_length()\n            + self.signature_rewards_max_delay.serialized_length()\n            + self.allow_auction_bids.serialized_length()\n            + self.allow_unrestricted_transfers.serialized_length()\n            + self.compute_rewards.serialized_length()\n            + self.administrators.serialized_length()\n            + self.refund_handling.serialized_length()\n            + self.pricing_handling.serialized_length()\n            + self.fee_handling.serialized_length()\n            + self.allow_prepaid.serialized_length()\n            + self.gas_hold_balance_handling.serialized_length()\n            + self.gas_hold_interval.serialized_length()\n            + self.validator_credit_cap.serialized_length()\n            + self.enable_addressable_entity.serialized_length()\n            + self.baseline_motes_amount.serialized_length()\n            + self.trap_on_ambiguous_entity_version.serialized_length()\n            + self.rewards_handling.serialized_length()\n            + self.minimum_delegation_rate.serialized_length()\n    }\n}\n\nimpl FromBytes for CoreConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (era_duration, remainder) = TimeDiff::from_bytes(bytes)?;\n        let (minimum_era_height, remainder) = u64::from_bytes(remainder)?;\n        let (minimum_block_time, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (validator_slots, remainder) = u32::from_bytes(remainder)?;\n        let (finality_threshold_fraction, remainder) = Ratio::<u64>::from_bytes(remainder)?;\n        let (start_protocol_version_with_strict_finality_signatures_required, remainder) =\n            ProtocolVersion::from_bytes(remainder)?;\n        let (legacy_required_finality, remainder) = LegacyRequiredFinality::from_bytes(remainder)?;\n        let (auction_delay, remainder) = u64::from_bytes(remainder)?;\n        let (locked_funds_period, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (vesting_schedule_period, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (unbonding_delay, remainder) = u64::from_bytes(remainder)?;\n        let (round_seigniorage_rate, remainder) = Ratio::<u64>::from_bytes(remainder)?;\n        let (max_associated_keys, remainder) = u32::from_bytes(remainder)?;\n        let (max_runtime_call_stack_height, remainder) = u32::from_bytes(remainder)?;\n        let (minimum_delegation_amount, remainder) = u64::from_bytes(remainder)?;\n        let (maximum_delegation_amount, remainder) = u64::from_bytes(remainder)?;\n        let (minimum_bid_amount, remainder) = u64::from_bytes(remainder)?;\n        let (prune_batch_size, remainder) = u64::from_bytes(remainder)?;\n        let (strict_argument_checking, remainder) = bool::from_bytes(remainder)?;\n        let (simultaneous_peer_requests, remainder) = u8::from_bytes(remainder)?;\n        let (consensus_protocol, remainder) = ConsensusProtocolName::from_bytes(remainder)?;\n        let (max_delegators_per_validator, remainder) = FromBytes::from_bytes(remainder)?;\n        let (finders_fee, remainder) = Ratio::from_bytes(remainder)?;\n        let (finality_signature_proportion, remainder) = Ratio::from_bytes(remainder)?;\n        let (signature_rewards_max_delay, remainder) = u64::from_bytes(remainder)?;\n        let (allow_auction_bids, remainder) = FromBytes::from_bytes(remainder)?;\n        let (allow_unrestricted_transfers, remainder) = FromBytes::from_bytes(remainder)?;\n        let (compute_rewards, remainder) = bool::from_bytes(remainder)?;\n        let (administrative_accounts, remainder) = FromBytes::from_bytes(remainder)?;\n        let (refund_handling, remainder) = FromBytes::from_bytes(remainder)?;\n        let (pricing_handling, remainder) = FromBytes::from_bytes(remainder)?;\n        let (fee_handling, remainder) = FromBytes::from_bytes(remainder)?;\n        let (allow_prepaid, remainder) = FromBytes::from_bytes(remainder)?;\n        let (gas_hold_balance_handling, remainder) = FromBytes::from_bytes(remainder)?;\n        let (gas_hold_interval, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (validator_credit_cap, remainder) = Ratio::from_bytes(remainder)?;\n        let (enable_addressable_entity, remainder) = FromBytes::from_bytes(remainder)?;\n        let (baseline_motes_amount, remainder) = u64::from_bytes(remainder)?;\n        let (trap_on_ambiguous_entity_version, remainder) = bool::from_bytes(remainder)?;\n        let (rewards_handling, remainder) = RewardsHandling::from_bytes(remainder)?;\n        let (minimum_delegation_rate, remainder) = u8::from_bytes(remainder)?;\n        let config = CoreConfig {\n            era_duration,\n            minimum_era_height,\n            minimum_block_time,\n            validator_slots,\n            finality_threshold_fraction,\n            start_protocol_version_with_strict_finality_signatures_required,\n            legacy_required_finality,\n            auction_delay,\n            locked_funds_period,\n            vesting_schedule_period,\n            unbonding_delay,\n            round_seigniorage_rate,\n            max_associated_keys,\n            max_runtime_call_stack_height,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            minimum_bid_amount,\n            prune_batch_size,\n            strict_argument_checking,\n            simultaneous_peer_requests,\n            consensus_protocol,\n            max_delegators_per_validator,\n            finders_fee,\n            finality_signature_proportion,\n            signature_rewards_max_delay,\n            allow_auction_bids,\n            allow_unrestricted_transfers,\n            compute_rewards,\n            administrators: administrative_accounts,\n            refund_handling,\n            pricing_handling,\n            fee_handling,\n            allow_prepaid,\n            gas_hold_balance_handling,\n            gas_hold_interval,\n            validator_credit_cap,\n            enable_addressable_entity,\n            baseline_motes_amount,\n            trap_on_ambiguous_entity_version,\n            rewards_handling,\n            minimum_delegation_rate,\n        };\n        Ok((config, remainder))\n    }\n}\n\n/// Consensus protocol name.\n#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum ConsensusProtocolName {\n    /// Highway.\n    Highway,\n    /// Zug.\n    #[default]\n    Zug,\n}\n\nimpl Serialize for ConsensusProtocolName {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        match self {\n            ConsensusProtocolName::Highway => \"Highway\",\n            ConsensusProtocolName::Zug => \"Zug\",\n        }\n        .serialize(serializer)\n    }\n}\n\nimpl<'de> Deserialize<'de> for ConsensusProtocolName {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        match String::deserialize(deserializer)?.to_lowercase().as_str() {\n            \"highway\" => Ok(ConsensusProtocolName::Highway),\n            \"zug\" => Ok(ConsensusProtocolName::Zug),\n            _ => Err(DeError::custom(\"unknown consensus protocol name\")),\n        }\n    }\n}\n\nconst CONSENSUS_HIGHWAY_TAG: u8 = 0;\nconst CONSENSUS_ZUG_TAG: u8 = 1;\n\nimpl ToBytes for ConsensusProtocolName {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let tag = match self {\n            ConsensusProtocolName::Highway => CONSENSUS_HIGHWAY_TAG,\n            ConsensusProtocolName::Zug => CONSENSUS_ZUG_TAG,\n        };\n        Ok(vec![tag])\n    }\n\n    fn serialized_length(&self) -> usize {\n        1\n    }\n}\n\nimpl FromBytes for ConsensusProtocolName {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        let name = match tag {\n            CONSENSUS_HIGHWAY_TAG => ConsensusProtocolName::Highway,\n            CONSENSUS_ZUG_TAG => ConsensusProtocolName::Zug,\n            _ => return Err(bytesrepr::Error::Formatting),\n        };\n        Ok((name, remainder))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ConsensusProtocolName> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ConsensusProtocolName {\n        if rng.gen() {\n            ConsensusProtocolName::Highway\n        } else {\n            ConsensusProtocolName::Zug\n        }\n    }\n}\n\n/// Which finality a legacy block needs during a fast sync.\n#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum LegacyRequiredFinality {\n    /// Strict finality: more than 2/3rd of validators.\n    Strict,\n    /// Weak finality: more than 1/3rd of validators.\n    Weak,\n    /// Finality always valid.\n    #[default]\n    Any,\n}\n\nimpl Serialize for LegacyRequiredFinality {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        match self {\n            LegacyRequiredFinality::Strict => \"Strict\",\n            LegacyRequiredFinality::Weak => \"Weak\",\n            LegacyRequiredFinality::Any => \"Any\",\n        }\n        .serialize(serializer)\n    }\n}\n\nimpl<'de> Deserialize<'de> for LegacyRequiredFinality {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        match String::deserialize(deserializer)?.to_lowercase().as_str() {\n            \"strict\" => Ok(LegacyRequiredFinality::Strict),\n            \"weak\" => Ok(LegacyRequiredFinality::Weak),\n            \"any\" => Ok(LegacyRequiredFinality::Any),\n            _ => Err(DeError::custom(\"unknown legacy required finality\")),\n        }\n    }\n}\n\nconst LEGACY_REQUIRED_FINALITY_STRICT_TAG: u8 = 0;\nconst LEGACY_REQUIRED_FINALITY_WEAK_TAG: u8 = 1;\nconst LEGACY_REQUIRED_FINALITY_ANY_TAG: u8 = 2;\n\nimpl ToBytes for LegacyRequiredFinality {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let tag = match self {\n            LegacyRequiredFinality::Strict => LEGACY_REQUIRED_FINALITY_STRICT_TAG,\n            LegacyRequiredFinality::Weak => LEGACY_REQUIRED_FINALITY_WEAK_TAG,\n            LegacyRequiredFinality::Any => LEGACY_REQUIRED_FINALITY_ANY_TAG,\n        };\n        Ok(vec![tag])\n    }\n\n    fn serialized_length(&self) -> usize {\n        1\n    }\n}\n\nimpl FromBytes for LegacyRequiredFinality {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            LEGACY_REQUIRED_FINALITY_STRICT_TAG => Ok((LegacyRequiredFinality::Strict, remainder)),\n            LEGACY_REQUIRED_FINALITY_WEAK_TAG => Ok((LegacyRequiredFinality::Weak, remainder)),\n            LEGACY_REQUIRED_FINALITY_ANY_TAG => Ok((LegacyRequiredFinality::Any, remainder)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<LegacyRequiredFinality> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> LegacyRequiredFinality {\n        match rng.gen_range(0..3) {\n            0 => LegacyRequiredFinality::Strict,\n            1 => LegacyRequiredFinality::Weak,\n            2 => LegacyRequiredFinality::Any,\n            _not_in_range => unreachable!(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::SeedableRng;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let config = CoreConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/fee_handling.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\nconst FEE_HANDLING_PROPOSER_TAG: u8 = 0;\nconst FEE_HANDLING_ACCUMULATE_TAG: u8 = 1;\nconst FEE_HANDLING_BURN_TAG: u8 = 2;\nconst FEE_HANDLING_NONE_TAG: u8 = 3;\n\n/// Defines how fees are handled in the system.\n#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]\n#[serde(tag = \"type\", rename_all = \"snake_case\")]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum FeeHandling {\n    /// Transaction fees are paid to the block proposer.\n    ///\n    /// This is the default option for public chains.\n    PayToProposer,\n    /// Transaction fees are accumulated in a special purse and then distributed during end of era\n    /// processing evenly among all administrator accounts.\n    ///\n    /// This setting is applicable for some private chains (but not all).\n    Accumulate,\n    /// Burn the fees.\n    Burn,\n    /// No fees.\n    // in 1.x the (implicit) default was PayToProposer\n    // FeeHandling::PayToProposer\n    // in 2.x the default is NoFee as there are no fees.\n    #[default]\n    NoFee,\n}\n\nimpl FeeHandling {\n    /// Is the Accumulate variant selected?\n    pub fn is_accumulate(&self) -> bool {\n        matches!(self, FeeHandling::Accumulate)\n    }\n\n    /// Returns true if configured for no fees.\n    pub fn is_no_fee(&self) -> bool {\n        matches!(self, FeeHandling::NoFee)\n    }\n}\n\nimpl ToBytes for FeeHandling {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        match self {\n            FeeHandling::PayToProposer => Ok(vec![FEE_HANDLING_PROPOSER_TAG]),\n            FeeHandling::Accumulate => Ok(vec![FEE_HANDLING_ACCUMULATE_TAG]),\n            FeeHandling::Burn => Ok(vec![FEE_HANDLING_BURN_TAG]),\n            FeeHandling::NoFee => Ok(vec![FEE_HANDLING_NONE_TAG]),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        1\n    }\n}\n\nimpl FromBytes for FeeHandling {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n        match tag {\n            FEE_HANDLING_PROPOSER_TAG => Ok((FeeHandling::PayToProposer, rem)),\n            FEE_HANDLING_ACCUMULATE_TAG => Ok((FeeHandling::Accumulate, rem)),\n            FEE_HANDLING_BURN_TAG => Ok((FeeHandling::Burn, rem)),\n            FEE_HANDLING_NONE_TAG => Ok((FeeHandling::NoFee, rem)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip_for_refund() {\n        let fee_config = FeeHandling::PayToProposer;\n        bytesrepr::test_serialization_roundtrip(&fee_config);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_accumulate() {\n        let fee_config = FeeHandling::Accumulate;\n        bytesrepr::test_serialization_roundtrip(&fee_config);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_burn() {\n        let fee_config = FeeHandling::Burn;\n        bytesrepr::test_serialization_roundtrip(&fee_config);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_no_fee() {\n        let fee_config = FeeHandling::NoFee;\n        bytesrepr::test_serialization_roundtrip(&fee_config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/genesis_config.rs",
    "content": "//! Contains genesis configuration settings.\n\n#[cfg(any(feature = \"testing\", test))]\nuse std::iter;\n\nuse num_rational::Ratio;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    system::auction::DelegationRate, AdministratorAccount, Chainspec, GenesisAccount,\n    GenesisValidator, HoldBalanceHandling, Motes, PublicKey, RewardsHandling, SystemConfig,\n    WasmConfig,\n};\n\nuse super::StorageCosts;\n\n/// Represents the details of a genesis process.\n#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub struct GenesisConfig {\n    accounts: Vec<GenesisAccount>,\n    wasm_config: WasmConfig,\n    system_config: SystemConfig,\n    validator_slots: u32,\n    auction_delay: u64,\n    locked_funds_period_millis: u64,\n    round_seigniorage_rate: Ratio<u64>,\n    unbonding_delay: u64,\n    genesis_timestamp_millis: u64,\n    gas_hold_balance_handling: HoldBalanceHandling,\n    gas_hold_interval_millis: u64,\n    enable_addressable_entity: bool,\n    rewards_ratio: Option<Ratio<u64>>,\n    storage_costs: StorageCosts,\n    minimum_delegation_rate: DelegationRate,\n}\n\nimpl GenesisConfig {\n    /// Creates a new genesis configuration.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        accounts: Vec<GenesisAccount>,\n        wasm_config: WasmConfig,\n        system_config: SystemConfig,\n        validator_slots: u32,\n        auction_delay: u64,\n        locked_funds_period_millis: u64,\n        round_seigniorage_rate: Ratio<u64>,\n        unbonding_delay: u64,\n        genesis_timestamp_millis: u64,\n        gas_hold_balance_handling: HoldBalanceHandling,\n        gas_hold_interval_millis: u64,\n        enable_addressable_entity: bool,\n        rewards_handling: Option<Ratio<u64>>,\n        storage_costs: StorageCosts,\n        minimum_delegation_rate: DelegationRate,\n    ) -> GenesisConfig {\n        GenesisConfig {\n            accounts,\n            wasm_config,\n            system_config,\n            validator_slots,\n            auction_delay,\n            locked_funds_period_millis,\n            round_seigniorage_rate,\n            unbonding_delay,\n            genesis_timestamp_millis,\n            gas_hold_balance_handling,\n            gas_hold_interval_millis,\n            enable_addressable_entity,\n            rewards_ratio: rewards_handling,\n            storage_costs,\n            minimum_delegation_rate,\n        }\n    }\n\n    /// Returns WASM config.\n    pub fn wasm_config(&self) -> &WasmConfig {\n        &self.wasm_config\n    }\n\n    /// Returns system config.\n    pub fn system_config(&self) -> &SystemConfig {\n        &self.system_config\n    }\n\n    /// Returns all bonded genesis validators.\n    pub fn get_bonded_validators(&self) -> impl Iterator<Item = &GenesisAccount> {\n        self.accounts_iter()\n            .filter(|&genesis_account| genesis_account.is_validator())\n    }\n\n    /// Returns all bonded genesis delegators.\n    pub fn get_bonded_delegators(\n        &self,\n    ) -> impl Iterator<Item = (&PublicKey, &PublicKey, &Motes, &Motes)> {\n        self.accounts\n            .iter()\n            .filter_map(|genesis_account| genesis_account.as_delegator())\n    }\n\n    /// Returns all genesis accounts.\n    pub fn accounts(&self) -> &[GenesisAccount] {\n        self.accounts.as_slice()\n    }\n\n    /// Returns an iterator over all genesis accounts.\n    pub fn accounts_iter(&self) -> impl Iterator<Item = &GenesisAccount> {\n        self.accounts.iter()\n    }\n\n    /// Returns an iterator over all administrative accounts.\n    pub fn administrative_accounts(&self) -> impl Iterator<Item = &AdministratorAccount> {\n        self.accounts\n            .iter()\n            .filter_map(GenesisAccount::as_administrator_account)\n    }\n\n    /// Adds new genesis account to the config.\n    pub fn push_account(&mut self, account: GenesisAccount) {\n        self.accounts.push(account)\n    }\n\n    /// Returns validator slots.\n    pub fn validator_slots(&self) -> u32 {\n        self.validator_slots\n    }\n\n    /// Returns auction delay.\n    pub fn auction_delay(&self) -> u64 {\n        self.auction_delay\n    }\n\n    /// Returns locked funds period expressed in milliseconds.\n    pub fn locked_funds_period_millis(&self) -> u64 {\n        self.locked_funds_period_millis\n    }\n\n    /// Returns round seigniorage rate.\n    pub fn round_seigniorage_rate(&self) -> Ratio<u64> {\n        self.round_seigniorage_rate\n    }\n\n    /// Returns unbonding delay in eras.\n    pub fn unbonding_delay(&self) -> u64 {\n        self.unbonding_delay\n    }\n\n    /// Returns genesis timestamp expressed in milliseconds.\n    pub fn genesis_timestamp_millis(&self) -> u64 {\n        self.genesis_timestamp_millis\n    }\n\n    /// Returns gas hold balance handling.\n    pub fn gas_hold_balance_handling(&self) -> HoldBalanceHandling {\n        self.gas_hold_balance_handling\n    }\n\n    /// Returns gas hold interval expressed in milliseconds.\n    pub fn gas_hold_interval_millis(&self) -> u64 {\n        self.gas_hold_interval_millis\n    }\n\n    /// Enable entity.\n    pub fn enable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n\n    /// Set enable entity.\n    pub fn set_enable_entity(&mut self, enable: bool) {\n        self.enable_addressable_entity = enable\n    }\n\n    /// Push genesis validator.\n    pub fn push_genesis_validator(\n        &mut self,\n        public_key: &PublicKey,\n        genesis_validator: GenesisValidator,\n    ) {\n        if let Some(genesis_account) = self\n            .accounts\n            .iter_mut()\n            .find(|x| &x.public_key() == public_key)\n        {\n            genesis_account.try_set_validator(genesis_validator);\n        }\n    }\n\n    pub fn rewards_ratio(&self) -> Option<Ratio<u64>> {\n        self.rewards_ratio\n    }\n    pub fn push_rewards_ratio(&mut self, rewards_ratio: Ratio<u64>) {\n        self.rewards_ratio = Some(rewards_ratio);\n    }\n\n    pub fn minimum_delegation_rate(&self) -> DelegationRate {\n        self.minimum_delegation_rate\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<GenesisConfig> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> GenesisConfig {\n        let count = rng.gen_range(1..10);\n\n        let accounts = iter::repeat(()).map(|_| rng.gen()).take(count).collect();\n\n        let wasm_config = rng.gen();\n\n        let system_config = rng.gen();\n\n        let validator_slots = rng.gen();\n\n        let auction_delay = rng.gen();\n\n        let locked_funds_period_millis = rng.gen();\n\n        let round_seigniorage_rate = Ratio::new(\n            rng.gen_range(1..1_000_000_000),\n            rng.gen_range(1..1_000_000_000),\n        );\n\n        let unbonding_delay = rng.gen();\n\n        let genesis_timestamp_millis = rng.gen();\n        let gas_hold_balance_handling = rng.gen();\n        let gas_hold_interval_millis = rng.gen();\n        let storage_costs = rng.gen();\n        let minimum_delegation_rate = rng.gen();\n\n        GenesisConfig {\n            accounts,\n            wasm_config,\n            system_config,\n            validator_slots,\n            auction_delay,\n            locked_funds_period_millis,\n            round_seigniorage_rate,\n            unbonding_delay,\n            genesis_timestamp_millis,\n            gas_hold_balance_handling,\n            gas_hold_interval_millis,\n            enable_addressable_entity: false,\n            rewards_ratio: None,\n            storage_costs,\n            minimum_delegation_rate,\n        }\n    }\n}\n\nimpl From<&Chainspec> for GenesisConfig {\n    fn from(chainspec: &Chainspec) -> Self {\n        let genesis_timestamp_millis = chainspec\n            .protocol_config\n            .activation_point\n            .genesis_timestamp()\n            .map_or(0, |timestamp| timestamp.millis());\n        let gas_hold_interval_millis = chainspec.core_config.gas_hold_interval.millis();\n        let gas_hold_balance_handling = chainspec.core_config.gas_hold_balance_handling;\n        let rewards_ratio = match chainspec.core_config.rewards_handling {\n            RewardsHandling::Standard => None,\n            RewardsHandling::Sustain { ratio, .. } => Some(ratio),\n        };\n        let storage_costs = chainspec.storage_costs;\n        GenesisConfig {\n            accounts: chainspec.network_config.accounts_config.clone().into(),\n            wasm_config: chainspec.wasm_config,\n            system_config: chainspec.system_costs_config,\n            validator_slots: chainspec.core_config.validator_slots,\n            auction_delay: chainspec.core_config.auction_delay,\n            locked_funds_period_millis: chainspec.core_config.locked_funds_period.millis(),\n            round_seigniorage_rate: chainspec.core_config.round_seigniorage_rate,\n            unbonding_delay: chainspec.core_config.unbonding_delay,\n            genesis_timestamp_millis,\n            gas_hold_balance_handling,\n            gas_hold_interval_millis,\n            enable_addressable_entity: chainspec.core_config.enable_addressable_entity,\n            rewards_ratio,\n            storage_costs,\n            minimum_delegation_rate: chainspec.core_config.minimum_delegation_rate,\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/global_state_update.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\nuse std::{collections::BTreeMap, convert::TryFrom};\nuse thiserror::Error;\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    AsymmetricType, Key, PublicKey, U512,\n};\n\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct GlobalStateUpdateEntry {\n    key: String,\n    value: String,\n}\n\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct GlobalStateUpdateValidatorInfo {\n    public_key: String,\n    weight: String,\n}\n\n/// Type storing global state update entries.\n#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct GlobalStateUpdateConfig {\n    validators: Option<Vec<GlobalStateUpdateValidatorInfo>>,\n    entries: Vec<GlobalStateUpdateEntry>,\n}\n\n/// Type storing the information about modifications to be applied to the global state.\n///\n/// It stores the serialized `StoredValue`s corresponding to keys to be modified, and for the case\n/// where the validator set is being modified in any way, the full set of post-upgrade validators.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct GlobalStateUpdate {\n    /// Some with all validators (including pre-existent), if any change to the set is made.\n    pub validators: Option<BTreeMap<PublicKey, U512>>,\n    /// Global state key value pairs, which will be directly upserted into global state against\n    /// the root hash of the final block of the era before the upgrade.\n    pub entries: BTreeMap<Key, Bytes>,\n}\n\nimpl GlobalStateUpdate {\n    /// Returns a random `GlobalStateUpdate`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let mut validators = BTreeMap::new();\n        if rng.gen() {\n            let count = rng.gen_range(5..10);\n            for _ in 0..count {\n                validators.insert(PublicKey::random(rng), rng.gen::<U512>());\n            }\n        }\n\n        let count = rng.gen_range(0..10);\n        let mut entries = BTreeMap::new();\n        for _ in 0..count {\n            entries.insert(rng.gen(), rng.gen());\n        }\n\n        Self {\n            validators: Some(validators),\n            entries,\n        }\n    }\n}\n\nimpl ToBytes for GlobalStateUpdate {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.validators.write_bytes(writer)?;\n        self.entries.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.validators.serialized_length() + self.entries.serialized_length()\n    }\n}\n\nimpl FromBytes for GlobalStateUpdate {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validators, remainder) = Option::<BTreeMap<PublicKey, U512>>::from_bytes(bytes)?;\n        let (entries, remainder) = BTreeMap::<Key, Bytes>::from_bytes(remainder)?;\n        let global_state_update = GlobalStateUpdate {\n            entries,\n            validators,\n        };\n        Ok((global_state_update, remainder))\n    }\n}\n\n/// Error loading global state update file.\n#[derive(Debug, Error)]\npub enum GlobalStateUpdateError {\n    /// Error while decoding a key from a prefix formatted string.\n    #[error(\"decoding key from formatted string error: {0}\")]\n    DecodingKeyFromStr(String),\n    /// Error while decoding a key from a hex formatted string.\n    #[error(\"decoding key from hex string error: {0}\")]\n    DecodingKeyFromHex(String),\n    /// Error while decoding a public key weight from formatted string.\n    #[error(\"decoding weight from decimal string error: {0}\")]\n    DecodingWeightFromStr(String),\n    /// Error while decoding a serialized value from a base64 encoded string.\n    #[error(\"decoding from base64 error: {0}\")]\n    DecodingFromBase64(#[from] base64::DecodeError),\n}\n\nimpl TryFrom<GlobalStateUpdateConfig> for GlobalStateUpdate {\n    type Error = GlobalStateUpdateError;\n\n    fn try_from(config: GlobalStateUpdateConfig) -> Result<Self, Self::Error> {\n        let mut validators: Option<BTreeMap<PublicKey, U512>> = None;\n        if let Some(config_validators) = config.validators {\n            let mut new_validators = BTreeMap::new();\n            for (index, validator) in config_validators.into_iter().enumerate() {\n                let public_key = PublicKey::from_hex(&validator.public_key).map_err(|error| {\n                    GlobalStateUpdateError::DecodingKeyFromHex(format!(\n                        \"failed to decode validator public key {}: {:?}\",\n                        index, error\n                    ))\n                })?;\n                let weight = U512::from_dec_str(&validator.weight).map_err(|error| {\n                    GlobalStateUpdateError::DecodingWeightFromStr(format!(\n                        \"failed to decode validator weight {}: {}\",\n                        index, error\n                    ))\n                })?;\n                let _ = new_validators.insert(public_key, weight);\n            }\n            validators = Some(new_validators);\n        }\n\n        let mut entries = BTreeMap::new();\n        for (index, entry) in config.entries.into_iter().enumerate() {\n            let key = Key::from_formatted_str(&entry.key).map_err(|error| {\n                GlobalStateUpdateError::DecodingKeyFromStr(format!(\n                    \"failed to decode entry key {}: {}\",\n                    index, error\n                ))\n            })?;\n            let value = base64::decode(&entry.value)?.into();\n            let _ = entries.insert(key, value);\n        }\n\n        Ok(GlobalStateUpdate {\n            validators,\n            entries,\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use rand::SeedableRng;\n\n    #[test]\n    fn global_state_update_bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let update = GlobalStateUpdate::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&update);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/highway_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    TimeDiff,\n};\n\n/// Configuration values relevant to Highway consensus.\n#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct HighwayConfig {\n    /// The upper limit for Highway round lengths.\n    pub maximum_round_length: TimeDiff,\n}\n\nimpl HighwayConfig {\n    /// Checks whether the values set in the config make sense and returns `false` if they don't.\n    pub fn is_valid(&self) -> Result<(), String> {\n        Ok(())\n    }\n\n    /// Returns a random `HighwayConfig`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let maximum_round_length = TimeDiff::from_seconds(rng.gen_range(60..600));\n\n        HighwayConfig {\n            maximum_round_length,\n        }\n    }\n}\n\nimpl ToBytes for HighwayConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.maximum_round_length.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.maximum_round_length.serialized_length()\n    }\n}\n\nimpl FromBytes for HighwayConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (maximum_round_length, remainder) = TimeDiff::from_bytes(bytes)?;\n        let config = HighwayConfig {\n            maximum_round_length,\n        };\n        Ok((config, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::SeedableRng;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let config = HighwayConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/hold_balance_handling.rs",
    "content": "use crate::{\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes},\n};\nuse core::fmt::{Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n\nconst HOLD_BALANCE_ACCRUED_TAG: u8 = 0;\nconst HOLD_BALANCE_AMORTIZED_TAG: u8 = 1;\nconst HOLD_BALANCE_HANDLING_TAG_LENGTH: u8 = 1;\n\n/// Defines how a given network handles holds when calculating available balances. There may be\n/// multiple types of holds (such as Processing and Gas currently, and potentially other kinds in\n/// the future), and each type of hold can differ on how it applies to available\n/// balance calculation.\n#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]\n#[serde(tag = \"type\", rename_all = \"snake_case\")]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum HoldBalanceHandling {\n    /// The sum of full value of all non-expired holds is used.\n    // in 2.0 the default hold balance handling is Accrued,\n    // which means a non-expired hold is applied in full to\n    // available balance calculations\n    #[default]\n    Accrued,\n    /// The sum of each hold is amortized over the time remaining until expiry.\n    /// For instance, if 12 hours remain on a 24 hour hold, half the hold amount is applied.\n    Amortized,\n}\n\nimpl HoldBalanceHandling {\n    /// Returns variant for tag, if able.\n    #[allow(clippy::result_unit_err)]\n    pub fn from_tag(tag: u8) -> Result<HoldBalanceHandling, ()> {\n        if tag == HOLD_BALANCE_ACCRUED_TAG {\n            Ok(HoldBalanceHandling::Accrued)\n        } else if tag == HOLD_BALANCE_AMORTIZED_TAG {\n            Ok(HoldBalanceHandling::Amortized)\n        } else {\n            Err(())\n        }\n    }\n\n    /// Returns the tag for the variant.\n    pub fn tag(&self) -> u8 {\n        match self {\n            HoldBalanceHandling::Accrued => HOLD_BALANCE_ACCRUED_TAG,\n            HoldBalanceHandling::Amortized => HOLD_BALANCE_AMORTIZED_TAG,\n        }\n    }\n}\n\nimpl Display for HoldBalanceHandling {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        match self {\n            HoldBalanceHandling::Accrued => {\n                write!(f, \"HoldBalanceHandling::Accrued\")\n            }\n            HoldBalanceHandling::Amortized => {\n                write!(f, \"HoldBalanceHandling::Amortized\")\n            }\n        }\n    }\n}\n\nimpl ToBytes for HoldBalanceHandling {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n\n        match self {\n            HoldBalanceHandling::Accrued => {\n                buffer.push(HOLD_BALANCE_ACCRUED_TAG);\n            }\n            HoldBalanceHandling::Amortized => {\n                buffer.push(HOLD_BALANCE_AMORTIZED_TAG);\n            }\n        }\n\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        HOLD_BALANCE_HANDLING_TAG_LENGTH as usize\n    }\n}\n\nimpl FromBytes for HoldBalanceHandling {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n        match tag {\n            HOLD_BALANCE_ACCRUED_TAG => Ok((HoldBalanceHandling::Accrued, rem)),\n            HOLD_BALANCE_AMORTIZED_TAG => Ok((HoldBalanceHandling::Amortized, rem)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<HoldBalanceHandling> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> HoldBalanceHandling {\n        match rng.gen_range(HOLD_BALANCE_ACCRUED_TAG..=HOLD_BALANCE_AMORTIZED_TAG) {\n            HOLD_BALANCE_ACCRUED_TAG => HoldBalanceHandling::Accrued,\n            HOLD_BALANCE_AMORTIZED_TAG => HoldBalanceHandling::Amortized,\n            _ => unreachable!(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip_for_accrued() {\n        let handling = HoldBalanceHandling::Accrued;\n        bytesrepr::test_serialization_roundtrip(&handling);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_amortized() {\n        let handling = HoldBalanceHandling::Amortized;\n        bytesrepr::test_serialization_roundtrip(&handling);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/network_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::Serialize;\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\nuse super::AccountsConfig;\n\n/// Configuration values associated with the network.\n#[derive(Clone, PartialEq, Eq, Serialize, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct NetworkConfig {\n    /// The network name.\n    pub name: String,\n    /// The maximum size of an accepted network message, in bytes.\n    pub maximum_net_message_size: u32,\n    /// Validator accounts specified in the chainspec.\n    // Note: `accounts_config` must be the last field on this struct due to issues in the TOML\n    // crate - see <https://github.com/alexcrichton/toml-rs/search?q=ValueAfterTable&type=issues>.\n    pub accounts_config: AccountsConfig,\n}\n\nimpl NetworkConfig {\n    /// Returns a random `NetworkConfig`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let name = rng.gen::<char>().to_string();\n        let maximum_net_message_size = 4 + rng.gen_range(0..4);\n        let accounts_config = AccountsConfig::random(rng);\n\n        NetworkConfig {\n            name,\n            maximum_net_message_size,\n            accounts_config,\n        }\n    }\n}\n\nimpl ToBytes for NetworkConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.name.to_bytes()?);\n        buffer.extend(self.accounts_config.to_bytes()?);\n        buffer.extend(self.maximum_net_message_size.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.name.serialized_length()\n            + self.accounts_config.serialized_length()\n            + self.maximum_net_message_size.serialized_length()\n    }\n}\n\nimpl FromBytes for NetworkConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (name, remainder) = String::from_bytes(bytes)?;\n        let (accounts_config, remainder) = FromBytes::from_bytes(remainder)?;\n        let (maximum_net_message_size, remainder) = FromBytes::from_bytes(remainder)?;\n        let config = NetworkConfig {\n            name,\n            maximum_net_message_size,\n            accounts_config,\n        };\n        Ok((config, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::SeedableRng;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let config = NetworkConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/next_upgrade.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    ActivationPoint, ProtocolConfig, ProtocolVersion,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\n/// Information about the next protocol upgrade.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]\npub struct NextUpgrade {\n    activation_point: ActivationPoint,\n    protocol_version: ProtocolVersion,\n}\n\nimpl NextUpgrade {\n    /// Creates a new `NextUpgrade`.\n    pub fn new(activation_point: ActivationPoint, protocol_version: ProtocolVersion) -> Self {\n        NextUpgrade {\n            activation_point,\n            protocol_version,\n        }\n    }\n\n    /// Returns the activation point of the next upgrade.\n    pub fn activation_point(&self) -> ActivationPoint {\n        self.activation_point\n    }\n\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self {\n            activation_point: ActivationPoint::random(rng),\n            protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()),\n        }\n    }\n}\n\nimpl From<ProtocolConfig> for NextUpgrade {\n    fn from(protocol_config: ProtocolConfig) -> Self {\n        NextUpgrade {\n            activation_point: protocol_config.activation_point,\n            protocol_version: protocol_config.version,\n        }\n    }\n}\n\nimpl Display for NextUpgrade {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"next upgrade to {} at start of era {}\",\n            self.protocol_version,\n            self.activation_point.era_id()\n        )\n    }\n}\n\nimpl ToBytes for NextUpgrade {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.activation_point.write_bytes(writer)?;\n        self.protocol_version.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.activation_point.serialized_length() + self.protocol_version.serialized_length()\n    }\n}\n\nimpl FromBytes for NextUpgrade {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (activation_point, remainder) = ActivationPoint::from_bytes(bytes)?;\n        let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?;\n        Ok((\n            NextUpgrade {\n                activation_point,\n                protocol_version,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = NextUpgrade::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/pricing_handling.rs",
    "content": "use crate::{\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes},\n};\nuse core::fmt::{Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nconst PRICING_HANDLING_TAG_LENGTH: u8 = 1;\n\nconst PRICING_HANDLING_PAYMENT_LIMITED_TAG: u8 = 0;\nconst PRICING_HANDLING_FIXED_TAG: u8 = 1;\n\n/// Defines what pricing mode a network allows. Correlates to the PricingMode of a\n/// [`crate::Transaction`]. Nodes will not accept transactions whose pricing mode does not match.\n#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]\n#[serde(tag = \"type\", rename_all = \"snake_case\")]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum PricingHandling {\n    #[default]\n    /// The transaction sender self-specifies how much token they pay, which becomes their gas\n    /// limit.\n    PaymentLimited,\n    /// The costs are fixed, per the cost tables.\n    Fixed,\n}\n\nimpl Display for PricingHandling {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        match self {\n            PricingHandling::PaymentLimited => {\n                write!(f, \"PricingHandling::PaymentLimited\")\n            }\n            PricingHandling::Fixed => {\n                write!(f, \"PricingHandling::Fixed\")\n            }\n        }\n    }\n}\n\nimpl ToBytes for PricingHandling {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n\n        match self {\n            PricingHandling::PaymentLimited => {\n                buffer.push(PRICING_HANDLING_PAYMENT_LIMITED_TAG);\n            }\n            PricingHandling::Fixed => {\n                buffer.push(PRICING_HANDLING_FIXED_TAG);\n            }\n        }\n\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        PRICING_HANDLING_TAG_LENGTH as usize\n    }\n}\n\nimpl FromBytes for PricingHandling {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n        match tag {\n            PRICING_HANDLING_PAYMENT_LIMITED_TAG => Ok((PricingHandling::PaymentLimited, rem)),\n            PRICING_HANDLING_FIXED_TAG => Ok((PricingHandling::Fixed, rem)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip_for_payment_limited() {\n        let handling = PricingHandling::PaymentLimited;\n        bytesrepr::test_serialization_roundtrip(&handling);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_fixed() {\n        let handling = PricingHandling::Fixed;\n        bytesrepr::test_serialization_roundtrip(&handling);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/protocol_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\nuse std::{collections::BTreeMap, str::FromStr};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Key, ProtocolVersion, StoredValue, Timestamp,\n};\n\nuse crate::{ActivationPoint, GlobalStateUpdate};\n\n/// Configuration values associated with the protocol.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ProtocolConfig {\n    /// Protocol version.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    pub version: ProtocolVersion,\n    /// Whether we need to clear latest blocks back to the switch block just before the activation\n    /// point or not.\n    pub hard_reset: bool,\n    /// This protocol config applies starting at the era specified in the activation point.\n    pub activation_point: ActivationPoint,\n    /// Any arbitrary updates we might want to make to the global state at the start of the era\n    /// specified in the activation point.\n    pub global_state_update: Option<GlobalStateUpdate>,\n}\n\nimpl ProtocolConfig {\n    /// The mapping of [`Key`]s to [`StoredValue`]s we will use to update global storage in the\n    /// event of an emergency update.\n    pub(crate) fn get_update_mapping(\n        &self,\n    ) -> Result<BTreeMap<Key, StoredValue>, bytesrepr::Error> {\n        let state_update = match &self.global_state_update {\n            Some(GlobalStateUpdate { entries, .. }) => entries,\n            None => return Ok(BTreeMap::default()),\n        };\n        let mut update_mapping = BTreeMap::new();\n        for (key, stored_value_bytes) in state_update {\n            let stored_value = bytesrepr::deserialize(stored_value_bytes.clone().into())?;\n            update_mapping.insert(*key, stored_value);\n        }\n        Ok(update_mapping)\n    }\n\n    /// Returns a random `ProtocolConfig`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let protocol_version = ProtocolVersion::from_parts(\n            rng.gen_range(0..10),\n            rng.gen::<u8>() as u32,\n            rng.gen::<u8>() as u32,\n        );\n        let activation_point = ActivationPoint::random(rng);\n\n        ProtocolConfig {\n            version: protocol_version,\n            hard_reset: rng.gen(),\n            activation_point,\n            global_state_update: None,\n        }\n    }\n}\n\nimpl ToBytes for ProtocolConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.version.to_string().to_bytes()?);\n        buffer.extend(self.hard_reset.to_bytes()?);\n        buffer.extend(self.activation_point.to_bytes()?);\n        buffer.extend(self.global_state_update.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.version.to_string().serialized_length()\n            + self.hard_reset.serialized_length()\n            + self.activation_point.serialized_length()\n            + self.global_state_update.serialized_length()\n    }\n}\n\nimpl FromBytes for ProtocolConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (protocol_version_string, remainder) = String::from_bytes(bytes)?;\n        let version = ProtocolVersion::from_str(&protocol_version_string)\n            .map_err(|_| bytesrepr::Error::Formatting)?;\n        let (hard_reset, remainder) = bool::from_bytes(remainder)?;\n        let (activation_point, remainder) = ActivationPoint::from_bytes(remainder)?;\n        let (global_state_update, remainder) = Option::<GlobalStateUpdate>::from_bytes(remainder)?;\n        let protocol_config = ProtocolConfig {\n            version,\n            hard_reset,\n            activation_point,\n            global_state_update,\n        };\n        Ok((protocol_config, remainder))\n    }\n}\n\nimpl Default for ProtocolConfig {\n    fn default() -> Self {\n        ProtocolConfig {\n            activation_point: ActivationPoint::Genesis(Timestamp::now()),\n            global_state_update: None,\n            hard_reset: true,\n            version: ProtocolVersion::V2_0_0,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use rand::SeedableRng;\n\n    #[test]\n    fn activation_point_bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let activation_point = ActivationPoint::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&activation_point);\n    }\n\n    #[test]\n    fn protocol_config_bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let config = ProtocolConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/refund_handling.rs",
    "content": "/// Configuration options of refund handling that are executed as part of handle payment\n/// finalization.\nuse num_rational::Ratio;\nuse num_traits::Zero;\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\nconst REFUND_HANDLING_REFUND_TAG: u8 = 0;\nconst REFUND_HANDLING_BURN_TAG: u8 = 1;\nconst REFUND_HANDLING_NONE_TAG: u8 = 2;\n\n/// Defines how refunds are calculated.\n#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]\n#[serde(tag = \"type\", rename_all = \"snake_case\")]\npub enum RefundHandling {\n    /// Refund of excess payment amount goes to either a pre-defined purse, or back to the sender\n    /// and the rest of the payment amount goes to the block proposer.\n    Refund {\n        /// Computes how much refund goes back to the user after deducting gas spent from the paid\n        /// amount.\n        ///\n        /// user_part = (payment_amount - gas_spent_amount) * refund_ratio\n        /// validator_part = payment_amount - user_part\n        ///\n        /// Any dust amount that was a result of multiplying by refund_ratio goes back to user.\n        refund_ratio: Ratio<u64>,\n    },\n    /// Burns the refund amount.\n    Burn {\n        /// Computes how much of the refund amount is burned after deducting gas spent from the\n        /// paid amount.\n        refund_ratio: Ratio<u64>,\n    },\n    /// No refunds.\n    // in 1.x the default was Refund\n    // RefundHandling::Refund {\n    //     refund_ratio: Ratio::new(99, 100),\n    // }\n    // in 2.0 the default payment mode is Fixed with Fee Elimination on,\n    // thus there is nothing to refund.\n    #[default]\n    NoRefund,\n}\n\nimpl RefundHandling {\n    /// Returns true if we don't need to process a refund.\n    pub fn skip_refund(&self) -> bool {\n        match self {\n            RefundHandling::NoRefund => true,\n            RefundHandling::Refund { refund_ratio } => refund_ratio.is_zero(),\n            RefundHandling::Burn { .. } => false,\n        }\n    }\n\n    /// Returns refund ratio.\n    pub fn refund_ratio(&self) -> Ratio<u64> {\n        match self {\n            RefundHandling::Refund { refund_ratio } | RefundHandling::Burn { refund_ratio } => {\n                *refund_ratio\n            }\n            RefundHandling::NoRefund => Ratio::zero(),\n        }\n    }\n}\n\nimpl ToBytes for RefundHandling {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n\n        match self {\n            RefundHandling::Refund { refund_ratio } => {\n                buffer.push(REFUND_HANDLING_REFUND_TAG);\n                buffer.extend(refund_ratio.to_bytes()?);\n            }\n            RefundHandling::Burn { refund_ratio } => {\n                buffer.push(REFUND_HANDLING_BURN_TAG);\n                buffer.extend(refund_ratio.to_bytes()?);\n            }\n            RefundHandling::NoRefund => {\n                buffer.push(REFUND_HANDLING_NONE_TAG);\n            }\n        }\n\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        1 + match self {\n            RefundHandling::Refund { refund_ratio } => refund_ratio.serialized_length(),\n            RefundHandling::Burn { refund_ratio } => refund_ratio.serialized_length(),\n            RefundHandling::NoRefund => 0,\n        }\n    }\n}\n\nimpl FromBytes for RefundHandling {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n        match tag {\n            REFUND_HANDLING_REFUND_TAG => {\n                let (refund_ratio, rem) = FromBytes::from_bytes(rem)?;\n                Ok((RefundHandling::Refund { refund_ratio }, rem))\n            }\n            REFUND_HANDLING_BURN_TAG => {\n                let (refund_ratio, rem) = FromBytes::from_bytes(rem)?;\n                Ok((RefundHandling::Burn { refund_ratio }, rem))\n            }\n            REFUND_HANDLING_NONE_TAG => Ok((RefundHandling::NoRefund, rem)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip_for_refund() {\n        let refund_config = RefundHandling::Refund {\n            refund_ratio: Ratio::new(49, 313),\n        };\n        bytesrepr::test_serialization_roundtrip(&refund_config);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_burn() {\n        let refund_config = RefundHandling::Burn {\n            refund_ratio: Ratio::new(49, 313),\n        };\n        bytesrepr::test_serialization_roundtrip(&refund_config);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_no_refund() {\n        let refund_config = RefundHandling::NoRefund;\n        bytesrepr::test_serialization_roundtrip(&refund_config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/rewards_handling.rs",
    "content": "/// Configuration options of reward handling that are executed as part of rewards distribution.\nuse num_rational::Ratio;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n    uref::FromStrError,\n    URef,\n};\n\npub const REWARDS_HANDLING_RATIO_TAG: u8 = 0;\n\nconst REWARDS_HANDLING_STANDARD_TAG: u8 = 0;\n\nconst REWARDS_HANDLING_SUSTAIN_TAG: u8 = 1;\n\n#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]\n#[serde(tag = \"type\", rename_all = \"snake_case\")]\npub enum RewardsHandling {\n    #[default]\n    Standard,\n\n    Sustain {\n        ratio: Ratio<u64>,\n        purse_address: String,\n    },\n}\n\nimpl RewardsHandling {\n    pub fn purse_address(&self) -> Option<Result<URef, FromStrError>> {\n        match self {\n            Self::Standard => None,\n            Self::Sustain { purse_address, .. } => Some(URef::from_formatted_str(purse_address)),\n        }\n    }\n\n    pub fn maybe_ratio(&self) -> Option<Ratio<u64>> {\n        match self {\n            Self::Standard => None,\n            Self::Sustain { ratio, .. } => Some(*ratio),\n        }\n    }\n\n    pub fn is_valid_configuration(&self) -> bool {\n        match self {\n            Self::Standard => true,\n            Self::Sustain {\n                ratio,\n                purse_address,\n            } => {\n                if *ratio.numer() > *ratio.denom() {\n                    return false;\n                }\n\n                if URef::from_formatted_str(purse_address).is_err() {\n                    return false;\n                }\n\n                true\n            }\n        }\n    }\n}\n\nimpl ToBytes for RewardsHandling {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n\n        match self {\n            Self::Standard => {\n                buffer.push(REWARDS_HANDLING_STANDARD_TAG);\n            }\n            Self::Sustain {\n                ratio,\n                purse_address,\n            } => {\n                buffer.push(REWARDS_HANDLING_SUSTAIN_TAG);\n                buffer.extend(ratio.to_bytes()?);\n                buffer.extend(purse_address.to_bytes()?);\n            }\n        }\n\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        1 + match self {\n            RewardsHandling::Standard => 0,\n            RewardsHandling::Sustain {\n                ratio,\n                purse_address,\n            } => ratio.serialized_length() + purse_address.serialized_length(),\n        }\n    }\n}\n\nimpl FromBytes for RewardsHandling {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n\n        match tag {\n            REWARDS_HANDLING_STANDARD_TAG => Ok((RewardsHandling::Standard, rem)),\n            REWARDS_HANDLING_SUSTAIN_TAG => {\n                let (ratio, rem) = FromBytes::from_bytes(rem)?;\n                let (purse_address, rem) = String::from_bytes(rem)?;\n                Ok((\n                    RewardsHandling::Sustain {\n                        ratio,\n                        purse_address,\n                    },\n                    rem,\n                ))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::URef;\n\n    #[test]\n    fn bytesrepr_roundtrip_for_sustain() {\n        let rewards_handling = RewardsHandling::Sustain {\n            ratio: Ratio::new(49, 313),\n            purse_address: URef::default().to_formatted_string(),\n        };\n        bytesrepr::test_serialization_roundtrip(&rewards_handling);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_for_standard() {\n        let rewards_handling = RewardsHandling::Standard;\n        bytesrepr::test_serialization_roundtrip(&rewards_handling);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/transaction_config/deploy_config.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Motes,\n};\n\n/// The default  maximum number of motes that payment code execution can cost.\npub const DEFAULT_MAX_PAYMENT_MOTES: u64 = 2_500_000_000;\n\n/// Configuration values associated with deploys.\n#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct DeployConfig {\n    /// Maximum amount any deploy can pay.\n    pub max_payment_cost: Motes,\n    /// Maximum length in bytes of payment args per deploy.\n    pub payment_args_max_length: u32,\n    /// Maximum length in bytes of session args per deploy.\n    pub session_args_max_length: u32,\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl DeployConfig {\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let max_payment_cost = Motes::new(rng.gen_range(1_000_000..1_000_000_000));\n        let payment_args_max_length = rng.gen();\n        let session_args_max_length = rng.gen();\n\n        DeployConfig {\n            max_payment_cost,\n            payment_args_max_length,\n            session_args_max_length,\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl Default for DeployConfig {\n    fn default() -> Self {\n        DeployConfig {\n            max_payment_cost: Motes::new(DEFAULT_MAX_PAYMENT_MOTES),\n            payment_args_max_length: 1024,\n            session_args_max_length: 1024,\n        }\n    }\n}\n\nimpl ToBytes for DeployConfig {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.max_payment_cost.write_bytes(writer)?;\n        self.payment_args_max_length.write_bytes(writer)?;\n        self.session_args_max_length.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.max_payment_cost.value().serialized_length()\n            + self.payment_args_max_length.serialized_length()\n            + self.session_args_max_length.serialized_length()\n    }\n}\n\nimpl FromBytes for DeployConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (max_payment_cost, remainder) = Motes::from_bytes(bytes)?;\n        let (payment_args_max_length, remainder) = u32::from_bytes(remainder)?;\n        let (session_args_max_length, remainder) = u32::from_bytes(remainder)?;\n        let config = DeployConfig {\n            max_payment_cost,\n            payment_args_max_length,\n            session_args_max_length,\n        };\n        Ok((config, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::new();\n        let config = DeployConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/transaction_config/runtime_config.rs",
    "content": "use crate::bytesrepr::{self, FromBytes, ToBytes};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse {crate::testing::TestRng, rand::Rng};\n\n/// Configuration values associated with deploys.\n#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct RuntimeConfig {\n    /// Whether the chain is using the Casper v1 runtime.\n    pub vm_casper_v1: bool,\n    /// Whether the chain is using the Casper v2 runtime.\n    pub vm_casper_v2: bool,\n}\n\nimpl RuntimeConfig {\n    #[cfg(any(feature = \"testing\", test))]\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self {\n            vm_casper_v1: rng.gen(),\n            vm_casper_v2: rng.gen(),\n        }\n    }\n}\n\nimpl FromBytes for RuntimeConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), crate::bytesrepr::Error> {\n        let (vm_casper_v1, rem) = bool::from_bytes(bytes)?;\n        let (vm_casper_v2, rem) = bool::from_bytes(rem)?;\n        Ok((\n            RuntimeConfig {\n                vm_casper_v1,\n                vm_casper_v2,\n            },\n            rem,\n        ))\n    }\n}\n\nimpl ToBytes for RuntimeConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, crate::bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.vm_casper_v1.serialized_length() + self.vm_casper_v2.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), crate::bytesrepr::Error> {\n        self.vm_casper_v1.write_bytes(writer)?;\n        self.vm_casper_v2.write_bytes(writer)\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/transaction_config/transaction_v1_config.rs",
    "content": "use core::cmp;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{\n    de::{Error, Unexpected},\n    ser::SerializeSeq,\n    Deserialize, Deserializer, Serialize, Serializer,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n\n/// Default gas limit of standard transactions\npub const DEFAULT_LARGE_TRANSACTION_GAS_LIMIT: u64 = 6_000_000_000_000;\n\nconst DEFAULT_NATIVE_MINT_LANE: [u64; 5] = [0, 1_048_576, 1024, 2_500_000_000, 650];\nconst DEFAULT_NATIVE_AUCTION_LANE: [u64; 5] = [1, 1_048_576, 1024, 5_000_000_000_000, 145];\nconst DEFAULT_INSTALL_UPGRADE_LANE: [u64; 5] = [2, 1_048_576, 2048, 3_500_000_000_000, 2];\n\nconst TRANSACTION_ID_INDEX: usize = 0;\nconst TRANSACTION_LENGTH_INDEX: usize = 1;\nconst TRANSACTION_ARGS_LENGTH_INDEX: usize = 2;\nconst TRANSACTION_GAS_LIMIT_INDEX: usize = 3;\nconst TRANSACTION_COUNT_INDEX: usize = 4;\n\n/// Structured limits imposed on a transaction lane\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct TransactionLaneDefinition {\n    /// The lane identifier\n    #[deprecated(\n        note = \"Use TransactionLaneDefinition::id() or TransactionLaneDefinition::set_id() instead.\"\n    )]\n    pub id: u8,\n    /// The maximum length of a transaction in bytes\n    #[deprecated(\n        note = \"Use TransactionLaneDefinition::max_transaction_length() or TransactionLaneDefinition::set_max_transaction_length() instead.\"\n    )]\n    pub max_transaction_length: u64,\n    /// The max args length size in bytes\n    #[deprecated(\n        note = \"Use TransactionLaneDefinition::max_transaction_args_length() or TransactionLaneDefinition::set_max_transaction_args_length() instead.\"\n    )]\n    pub max_transaction_args_length: u64,\n    /// The maximum gas limit\n    #[deprecated(\n        note = \"Use TransactionLaneDefinition::max_transaction_gas_limit() or TransactionLaneDefinition::set_max_transaction_gas_limit() instead.\"\n    )]\n    pub max_transaction_gas_limit: u64,\n    /// The maximum number of transactions\n    #[deprecated(\n        note = \"Use TransactionLaneDefinition::max_transaction_count() or TransactionLaneDefinition::set_max_transaction_count() instead.\"\n    )]\n    pub max_transaction_count: u64,\n}\n\n#[allow(deprecated)]\nimpl TryFrom<Vec<u64>> for TransactionLaneDefinition {\n    type Error = TransactionConfigError;\n\n    fn try_from(v: Vec<u64>) -> Result<Self, Self::Error> {\n        if v.len() != 5 {\n            return Err(TransactionConfigError::InvalidArgsProvided);\n        }\n        Ok(TransactionLaneDefinition {\n            id: v[TRANSACTION_ID_INDEX] as u8,\n            max_transaction_length: v[TRANSACTION_LENGTH_INDEX],\n            max_transaction_args_length: v[TRANSACTION_ARGS_LENGTH_INDEX],\n            max_transaction_gas_limit: v[TRANSACTION_GAS_LIMIT_INDEX],\n            max_transaction_count: v[TRANSACTION_COUNT_INDEX],\n        })\n    }\n}\n\n#[allow(deprecated)]\nimpl TransactionLaneDefinition {\n    /// Creates a new instance of TransactionLimitsDefinition\n    pub fn new(\n        id: u8,\n        max_transaction_length: u64,\n        max_transaction_args_length: u64,\n        max_transaction_gas_limit: u64,\n        max_transaction_count: u64,\n    ) -> Self {\n        Self {\n            id,\n            max_transaction_length,\n            max_transaction_args_length,\n            max_transaction_gas_limit,\n            max_transaction_count,\n        }\n    }\n\n    fn as_vec(&self) -> Vec<u64> {\n        vec![\n            self.id as u64,\n            self.max_transaction_length,\n            self.max_transaction_args_length,\n            self.max_transaction_gas_limit,\n            self.max_transaction_count,\n        ]\n    }\n\n    /// Returns max_transaction_length\n    pub fn max_transaction_length(&self) -> u64 {\n        self.max_transaction_length\n    }\n\n    /// Returns max_transaction_args_length\n    pub fn max_transaction_args_length(&self) -> u64 {\n        self.max_transaction_args_length\n    }\n\n    /// Returns max_transaction_gas_limit\n    pub fn max_transaction_gas_limit(&self) -> u64 {\n        self.max_transaction_gas_limit\n    }\n\n    /// Returns max_transaction_count\n    pub fn max_transaction_count(&self) -> u64 {\n        self.max_transaction_count\n    }\n\n    /// Returns id\n    pub fn id(&self) -> u8 {\n        self.id\n    }\n\n    pub fn set_id(&mut self, id: u8) {\n        self.id = id;\n    }\n\n    pub fn set_max_transaction_count(&mut self, max_transaction_count: u64) {\n        self.max_transaction_count = max_transaction_count;\n    }\n\n    pub fn set_max_transaction_gas_limit(&mut self, max_transaction_gas_limit: u64) {\n        self.max_transaction_gas_limit = max_transaction_gas_limit;\n    }\n\n    pub fn set_max_transaction_args_length(&mut self, max_transaction_args_length: u64) {\n        self.max_transaction_args_length = max_transaction_args_length;\n    }\n\n    pub fn set_max_transaction_length(&mut self, max_transaction_length: u64) {\n        self.max_transaction_length = max_transaction_length;\n    }\n}\n\n#[derive(Debug, Clone)]\npub enum TransactionConfigError {\n    InvalidArgsProvided,\n}\n\n/// Configuration values associated with V1 Transactions.\n#[derive(Clone, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct TransactionV1Config {\n    #[serde(\n        serialize_with = \"limit_definition_to_vec\",\n        deserialize_with = \"vec_to_limit_definition\"\n    )]\n    /// Lane configuration of the native mint interaction.\n    pub native_mint_lane: TransactionLaneDefinition,\n    #[serde(\n        serialize_with = \"limit_definition_to_vec\",\n        deserialize_with = \"vec_to_limit_definition\"\n    )]\n    /// Lane configuration for the native auction interaction.\n    pub native_auction_lane: TransactionLaneDefinition,\n    #[serde(\n        serialize_with = \"limit_definition_to_vec\",\n        deserialize_with = \"vec_to_limit_definition\"\n    )]\n    /// Lane configuration for the install/upgrade interaction.\n    pub install_upgrade_lane: TransactionLaneDefinition,\n    #[serde(\n        serialize_with = \"wasm_definitions_to_vec\",\n        deserialize_with = \"definition_to_wasms\"\n    )]\n    /// Lane configurations for Wasm based lanes that are not declared as install/upgrade.\n    wasm_lanes: Vec<TransactionLaneDefinition>,\n    #[cfg_attr(any(all(feature = \"std\", feature = \"once_cell\"), test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    wasm_lanes_ordered_by_transaction_size: OnceCell<Vec<TransactionLaneDefinition>>,\n    #[cfg_attr(any(all(feature = \"std\", feature = \"once_cell\"), test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length:\n        OnceCell<Vec<TransactionLaneDefinition>>,\n}\n\nimpl PartialEq for TransactionV1Config {\n    fn eq(&self, other: &TransactionV1Config) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        let TransactionV1Config {\n            native_mint_lane,\n            native_auction_lane,\n            install_upgrade_lane,\n            wasm_lanes,\n            #[cfg(any(feature = \"once_cell\", test))]\n                wasm_lanes_ordered_by_transaction_size: _,\n            #[cfg(any(feature = \"once_cell\", test))]\n                wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length: _,\n        } = self;\n        *native_mint_lane == other.native_mint_lane\n            && *native_auction_lane == other.native_auction_lane\n            && *install_upgrade_lane == other.install_upgrade_lane\n            && *wasm_lanes == other.wasm_lanes\n    }\n}\n\nimpl TransactionV1Config {\n    /// Cretaes a new instance of TransactionV1Config\n    pub fn new(\n        native_mint_lane: TransactionLaneDefinition,\n        native_auction_lane: TransactionLaneDefinition,\n        install_upgrade_lane: TransactionLaneDefinition,\n        wasm_lanes: Vec<TransactionLaneDefinition>,\n    ) -> Self {\n        #[cfg(any(feature = \"once_cell\", test))]\n        let wasm_lanes_ordered_by_transaction_size = OnceCell::with_value(\n            Self::build_wasm_lanes_ordered_by_transaction_size(wasm_lanes.clone()),\n        );\n        #[cfg(any(feature = \"once_cell\", test))]\n        let wasm_lanes_ordered_by_transaction_gas_limit =\n            OnceCell::with_value(Self::build_wasm_lanes_ordered(wasm_lanes.clone()));\n        TransactionV1Config {\n            native_mint_lane,\n            native_auction_lane,\n            install_upgrade_lane,\n            wasm_lanes,\n            #[cfg(any(feature = \"once_cell\", test))]\n            wasm_lanes_ordered_by_transaction_size,\n            #[cfg(any(feature = \"once_cell\", test))]\n            wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length:\n                wasm_lanes_ordered_by_transaction_gas_limit,\n        }\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let native_mint_lane = DEFAULT_NATIVE_MINT_LANE.to_vec();\n        let native_auction_lane = DEFAULT_NATIVE_AUCTION_LANE.to_vec();\n        let install_upgrade_lane = DEFAULT_INSTALL_UPGRADE_LANE.to_vec();\n        let mut wasm_lanes = vec![];\n        for kind in 2..7 {\n            let lane = vec![\n                kind as u64,\n                rng.gen_range(0..=1_048_576),\n                rng.gen_range(0..=1024),\n                rng.gen_range(0..=2_500_000_000),\n                rng.gen_range(5..=150),\n            ];\n            wasm_lanes.push(lane.try_into().unwrap())\n        }\n\n        TransactionV1Config::new(\n            native_mint_lane.try_into().unwrap(),\n            native_auction_lane.try_into().unwrap(),\n            install_upgrade_lane.try_into().unwrap(),\n            wasm_lanes,\n        )\n    }\n\n    /// Returns the max serialized length of a transaction for the given lane.\n    pub fn get_max_serialized_length(&self, lane_id: u8) -> u64 {\n        match lane_id {\n            MINT_LANE_ID => self.native_mint_lane.max_transaction_length(),\n            AUCTION_LANE_ID => self.native_auction_lane.max_transaction_length(),\n            INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_length(),\n            _ => match self.wasm_lanes.iter().find(|lane| lane.id() == lane_id) {\n                Some(wasm_lane) => wasm_lane.max_transaction_length(),\n                None => 0,\n            },\n        }\n    }\n\n    /// Returns the max number of runtime args\n    pub fn get_max_args_length(&self, lane_id: u8) -> u64 {\n        match lane_id {\n            MINT_LANE_ID => self.native_mint_lane.max_transaction_args_length(),\n            AUCTION_LANE_ID => self.native_auction_lane.max_transaction_args_length(),\n            INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_args_length(),\n            _ => match self.wasm_lanes.iter().find(|lane| lane.id() == lane_id) {\n                Some(wasm_lane) => wasm_lane.max_transaction_args_length(),\n                None => 0,\n            },\n        }\n    }\n\n    /// Returns the max gas limit of a transaction for the given lane.\n    pub fn get_max_transaction_gas_limit(&self, lane_id: u8) -> u64 {\n        match lane_id {\n            MINT_LANE_ID => self.native_mint_lane.max_transaction_gas_limit(),\n            AUCTION_LANE_ID => self.native_auction_lane.max_transaction_gas_limit(),\n            INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_gas_limit(),\n            _ => match self.wasm_lanes.iter().find(|lane| lane.id() == lane_id) {\n                Some(wasm_lane) => wasm_lane.max_transaction_gas_limit(),\n                None => 0,\n            },\n        }\n    }\n\n    /// Returns the max transactions count for the given lane.\n    pub fn get_max_transaction_count(&self, lane_id: u8) -> u64 {\n        match lane_id {\n            MINT_LANE_ID => self.native_mint_lane.max_transaction_count(),\n            AUCTION_LANE_ID => self.native_auction_lane.max_transaction_count(),\n            INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_count(),\n            _ => match self.wasm_lanes.iter().find(|lane| lane.id() == lane_id) {\n                Some(wasm_lane) => wasm_lane.max_transaction_count(),\n                None => 0,\n            },\n        }\n    }\n\n    /// Returns the maximum number of Wasm based transactions across wasm lanes.\n    pub fn get_max_wasm_transaction_count(&self) -> u64 {\n        let mut ret = 0;\n        for lane in self.wasm_lanes.iter() {\n            ret += lane.max_transaction_count();\n        }\n        ret\n    }\n\n    /// Are the given transaction parameters supported.\n    pub fn is_supported(&self, lane_id: u8) -> bool {\n        if !self.is_predefined_lane(lane_id) {\n            return self.wasm_lanes.iter().any(|lane| lane.id() == lane_id);\n        }\n        true\n    }\n\n    /// Returns the list of currently supported lane identifiers.\n    pub fn get_supported_lanes(&self) -> Vec<u8> {\n        let mut ret = vec![0, 1, 2];\n        for lane in self.wasm_lanes.iter() {\n            ret.push(lane.id());\n        }\n        ret\n    }\n\n    /// Returns the transaction v1 configuration with the specified lane limits.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn with_count_limits(\n        mut self,\n        mint: Option<u64>,\n        auction: Option<u64>,\n        install: Option<u64>,\n        large: Option<u64>,\n    ) -> Self {\n        if let Some(mint_count) = mint {\n            self.native_mint_lane.set_max_transaction_count(mint_count);\n        }\n        if let Some(auction_count) = auction {\n            self.native_auction_lane\n                .set_max_transaction_count(auction_count);\n        }\n        if let Some(install_upgrade) = install {\n            self.install_upgrade_lane\n                .set_max_transaction_count(install_upgrade);\n        }\n        if let Some(large_limit) = large {\n            let mut wasm_lanes = self.wasm_lanes.clone();\n            for lane in wasm_lanes.iter_mut() {\n                if lane.id() == 3 {\n                    lane.set_max_transaction_count(large_limit);\n                }\n            }\n            self.set_wasm_lanes(wasm_lanes);\n        }\n        self\n    }\n\n    /// Returns the max total count for all transactions across all lanes allowed in a block.\n    pub fn get_max_block_count(&self) -> u64 {\n        self.native_mint_lane.max_transaction_count()\n            + self.native_auction_lane.max_transaction_count()\n            + self.install_upgrade_lane.max_transaction_count()\n            + self\n                .wasm_lanes\n                .iter()\n                .map(TransactionLaneDefinition::max_transaction_count)\n                .sum::<u64>()\n    }\n\n    /// Returns true if the lane identifier is for one of the predefined lanes.\n    pub fn is_predefined_lane(&self, lane: u8) -> bool {\n        lane == AUCTION_LANE_ID || lane == MINT_LANE_ID || lane == INSTALL_UPGRADE_LANE_ID\n    }\n\n    /// Returns a wasm lane id based on the transaction size adjusted by\n    /// maybe_additional_computation_factor if necessary.\n    pub fn get_wasm_lane_id_by_size(\n        &self,\n        transaction_size: u64,\n        additional_computation_factor: u8,\n        runtime_args_size: u64,\n    ) -> Option<u8> {\n        let mut maybe_adequate_lane_index = None;\n        let buckets = self.get_wasm_lanes_ordered_by_transaction_size();\n        let number_of_lanes = buckets.len();\n        for (i, lane) in buckets.iter().enumerate() {\n            let max_transaction_size = lane.max_transaction_length();\n            let max_runtime_args_size = lane.max_transaction_args_length();\n            if max_transaction_size >= transaction_size\n                && max_runtime_args_size >= runtime_args_size\n            {\n                maybe_adequate_lane_index = Some(i);\n                break;\n            }\n        }\n        if let Some(adequate_lane_index) = maybe_adequate_lane_index {\n            maybe_adequate_lane_index = Some(cmp::min(\n                adequate_lane_index + additional_computation_factor as usize,\n                number_of_lanes - 1,\n            ));\n        }\n        maybe_adequate_lane_index.map(|index| buckets[index].id())\n    }\n\n    pub fn get_lane_by_id(&self, lane_id: u8) -> Option<&TransactionLaneDefinition> {\n        if lane_id == MINT_LANE_ID {\n            return Some(&self.native_mint_lane);\n        }\n        if lane_id == AUCTION_LANE_ID {\n            return Some(&self.native_auction_lane);\n        }\n        if lane_id == INSTALL_UPGRADE_LANE_ID {\n            return Some(&self.install_upgrade_lane);\n        }\n        self.wasm_lanes.iter().find(|el| el.id() == lane_id)\n    }\n\n    pub fn get_wasm_lane_id_by_payment_limited(\n        &self,\n        gas_limit: u64,\n        transaction_size: u64,\n        runtime_args_size: u64,\n    ) -> Option<u8> {\n        let mut maybe_adequate_lane_index = None;\n        let lanes = self.get_wasm_lanes_ordered();\n        for (i, lane) in lanes.iter().enumerate() {\n            let max_transaction_gas = lane.max_transaction_gas_limit();\n            let max_transaction_size = lane.max_transaction_length();\n            let max_runtime_args_size = lane.max_transaction_args_length();\n            if gas_limit <= max_transaction_gas\n                && transaction_size <= max_transaction_size\n                && runtime_args_size <= max_runtime_args_size\n            {\n                maybe_adequate_lane_index = Some(i);\n                break;\n            }\n        }\n        maybe_adequate_lane_index.map(|index| lanes[index].id())\n    }\n\n    #[allow(unreachable_code)]\n    //We're allowing unreachable code here because there's a possibility that someone might\n    // want to use the types crate without once_cell\n    fn get_wasm_lanes_ordered_by_transaction_size(&self) -> &Vec<TransactionLaneDefinition> {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return self.wasm_lanes_ordered_by_transaction_size.get_or_init(|| {\n            Self::build_wasm_lanes_ordered_by_transaction_size(self.wasm_lanes.clone())\n        });\n        &Self::build_wasm_lanes_ordered_by_transaction_size(self.wasm_lanes.clone())\n    }\n\n    #[allow(unreachable_code)]\n    //We're allowing unreachable code here because there's a possibility that someone might\n    // want to use the types crate without once_cell\n    // This function will take the wasm lanes ordered by:\n    //   - firstly gas limit\n    //   - secondly max_transaction_length\n    //   - thirdly max runtime args\n    //   - fourthly lane id (this has no \"business\" value, but it ensures that the ordering is\n    //     always reproducible since ids should be unique)\n    fn get_wasm_lanes_ordered(&self) -> &Vec<TransactionLaneDefinition> {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return self\n            .wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length\n            .get_or_init(|| Self::build_wasm_lanes_ordered(self.wasm_lanes.clone()));\n        &Self::build_wasm_lanes_ordered(self.wasm_lanes.clone())\n    }\n\n    fn build_wasm_lanes_ordered(\n        wasm_lanes: Vec<TransactionLaneDefinition>,\n    ) -> Vec<TransactionLaneDefinition> {\n        let mut ordered = wasm_lanes;\n        ordered.sort_by_key(|item| {\n            (\n                item.max_transaction_gas_limit(),\n                item.max_transaction_length(),\n                item.max_transaction_args_length(),\n                item.id(),\n            )\n        });\n        ordered\n    }\n\n    fn build_wasm_lanes_ordered_by_transaction_size(\n        wasm_lanes: Vec<TransactionLaneDefinition>,\n    ) -> Vec<TransactionLaneDefinition> {\n        let mut ordered = wasm_lanes;\n        ordered.sort_by_key(TransactionLaneDefinition::max_transaction_length);\n        ordered\n    }\n\n    pub fn wasm_lanes(&self) -> &Vec<TransactionLaneDefinition> {\n        &self.wasm_lanes\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn set_wasm_lanes(&mut self, wasm_lanes: Vec<TransactionLaneDefinition>) {\n        self.wasm_lanes = wasm_lanes;\n        #[cfg(any(feature = \"once_cell\", test))]\n        {\n            let wasm_lanes_ordered_by_transaction_size = OnceCell::with_value(\n                Self::build_wasm_lanes_ordered_by_transaction_size(self.wasm_lanes.clone()),\n            );\n            self.wasm_lanes_ordered_by_transaction_size = wasm_lanes_ordered_by_transaction_size;\n            let wasm_lanes_ordered_by_transaction_gas_limit =\n                OnceCell::with_value(Self::build_wasm_lanes_ordered(self.wasm_lanes.clone()));\n            self.wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length =\n                wasm_lanes_ordered_by_transaction_gas_limit;\n        }\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn get_max_wasm_lane_by_gas_limit(&self) -> Option<TransactionLaneDefinition> {\n        self.wasm_lanes\n            .iter()\n            .max_by_key(|lane| lane.max_transaction_gas_limit())\n            .cloned()\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl Default for TransactionV1Config {\n    fn default() -> Self {\n        let wasm_lane = vec![\n            3_u64, //large lane id\n            1_048_576,\n            1024,\n            DEFAULT_LARGE_TRANSACTION_GAS_LIMIT,\n            10,\n        ];\n\n        let native_mint_lane = DEFAULT_NATIVE_MINT_LANE.to_vec();\n        let native_auction_lane = DEFAULT_NATIVE_AUCTION_LANE.to_vec();\n        let install_upgrade_lane = DEFAULT_INSTALL_UPGRADE_LANE.to_vec();\n        let raw_wasm_lanes = vec![wasm_lane];\n        let wasm_lanes: Result<Vec<TransactionLaneDefinition>, _> =\n            raw_wasm_lanes.into_iter().map(|v| v.try_into()).collect();\n\n        TransactionV1Config::new(\n            native_mint_lane.try_into().unwrap(),\n            native_auction_lane.try_into().unwrap(),\n            install_upgrade_lane.try_into().unwrap(),\n            wasm_lanes.unwrap(),\n        )\n    }\n}\n\nimpl ToBytes for TransactionV1Config {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.native_mint_lane.as_vec().write_bytes(writer)?;\n        self.native_auction_lane.as_vec().write_bytes(writer)?;\n        self.install_upgrade_lane.as_vec().write_bytes(writer)?;\n        let wasm_lanes_as_vecs: Vec<Vec<u64>> = self\n            .wasm_lanes\n            .iter()\n            .map(TransactionLaneDefinition::as_vec)\n            .collect();\n        wasm_lanes_as_vecs.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let wasm_lanes_as_vecs: Vec<Vec<u64>> = self\n            .wasm_lanes\n            .iter()\n            .map(TransactionLaneDefinition::as_vec)\n            .collect();\n        self.native_mint_lane.as_vec().serialized_length()\n            + self.native_auction_lane.as_vec().serialized_length()\n            + self.install_upgrade_lane.as_vec().serialized_length()\n            + wasm_lanes_as_vecs.serialized_length()\n    }\n}\n\nimpl FromBytes for TransactionV1Config {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (raw_native_mint_lane, remainder): (Vec<u64>, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (raw_native_auction_lane, remainder): (Vec<u64>, &[u8]) =\n            FromBytes::from_bytes(remainder)?;\n        let (raw_install_upgrade_lane, remainder): (Vec<u64>, &[u8]) =\n            FromBytes::from_bytes(remainder)?;\n        let (raw_wasm_lanes, remainder): (Vec<Vec<u64>>, &[u8]) = FromBytes::from_bytes(remainder)?;\n        let native_mint_lane = raw_native_mint_lane\n            .try_into()\n            .map_err(|_| bytesrepr::Error::Formatting)?;\n        let native_auction_lane = raw_native_auction_lane\n            .try_into()\n            .map_err(|_| bytesrepr::Error::Formatting)?;\n        let install_upgrade_lane = raw_install_upgrade_lane\n            .try_into()\n            .map_err(|_| bytesrepr::Error::Formatting)?;\n        let wasm_lanes: Result<Vec<TransactionLaneDefinition>, _> =\n            raw_wasm_lanes.into_iter().map(|v| v.try_into()).collect();\n        let config = TransactionV1Config::new(\n            native_mint_lane,\n            native_auction_lane,\n            install_upgrade_lane,\n            wasm_lanes.map_err(|_| bytesrepr::Error::Formatting)?,\n        );\n        Ok((config, remainder))\n    }\n}\n\nfn vec_to_limit_definition<'de, D>(deserializer: D) -> Result<TransactionLaneDefinition, D::Error>\nwhere\n    D: Deserializer<'de>,\n{\n    let vec = Vec::<u64>::deserialize(deserializer)?;\n    let limits = TransactionLaneDefinition::try_from(vec).map_err(|_| {\n        D::Error::invalid_value(\n            Unexpected::Seq,\n            &\"expected 5 u64 compliant numbers to create a TransactionLimitsDefinition\",\n        )\n    })?;\n    Ok(limits)\n}\n\nfn limit_definition_to_vec<S>(\n    limits: &TransactionLaneDefinition,\n    serializer: S,\n) -> Result<S::Ok, S::Error>\nwhere\n    S: Serializer,\n{\n    let vec = limits.as_vec();\n    let mut seq = serializer.serialize_seq(Some(vec.len()))?;\n    for element in vec {\n        seq.serialize_element(&element)?;\n    }\n    seq.end()\n}\n\nfn definition_to_wasms<'de, D>(deserializer: D) -> Result<Vec<TransactionLaneDefinition>, D::Error>\nwhere\n    D: Deserializer<'de>,\n{\n    let vec = Vec::<Vec<u64>>::deserialize(deserializer)?;\n    let result: Result<Vec<TransactionLaneDefinition>, TransactionConfigError> =\n        vec.into_iter().map(|v| v.try_into()).collect();\n    result.map_err(|_| {\n        D::Error::invalid_value(\n            Unexpected::Seq,\n            &\"sequence of sequences to assemble wasm definitions\",\n        )\n    })\n}\n\nfn wasm_definitions_to_vec<S>(\n    limits: &[TransactionLaneDefinition],\n    serializer: S,\n) -> Result<S::Ok, S::Error>\nwhere\n    S: Serializer,\n{\n    let vec_of_vecs: Vec<Vec<u64>> = limits.iter().map(|v| v.as_vec()).collect();\n    let mut seq = serializer.serialize_seq(Some(vec_of_vecs.len()))?;\n    for element in vec_of_vecs {\n        seq.serialize_element(&element)?;\n    }\n    seq.end()\n}\n\n#[cfg(test)]\nmod tests {\n    use serde_json::Value;\n\n    use super::*;\n    const EXAMPLE_JSON: &str = r#\"{\n        \"native_mint_lane\": [0,1,2,3,4],\n        \"native_auction_lane\": [1,5,6,7,8],\n        \"install_upgrade_lane\": [2,9,10,11,12],\n        \"wasm_lanes\": [[3,13,14,15,16], [4,17,18,19,20], [5,21,22,23,24]]\n        }\"#;\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::new();\n        let config = TransactionV1Config::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n\n    #[test]\n    fn should_correctly_track_supported() {\n        let config = TransactionV1Config::default();\n        assert!(config.is_supported(0));\n        assert!(config.is_supported(1));\n        assert!(config.is_supported(2));\n        assert!(config.is_supported(3));\n        assert!(!config.is_supported(10));\n    }\n\n    #[test]\n    fn should_get_configuration_for_wasm() {\n        let config = build_example_transaction_config();\n        let got = config.get_wasm_lane_id_by_size(100, 0, 0);\n        assert_eq!(got, Some(3));\n        let config = build_example_transaction_config_reverse_wasm_ids();\n        let got = config.get_wasm_lane_id_by_size(100, 0, 0);\n        assert_eq!(got, Some(5));\n    }\n\n    #[test]\n    fn given_too_big_transaction_should_return_none() {\n        let config = build_example_transaction_config();\n        let got = config.get_wasm_lane_id_by_size(100000000, 0, 0);\n        assert!(got.is_none());\n        let config = build_example_transaction_config_reverse_wasm_ids();\n        let got = config.get_wasm_lane_id_by_size(100000000, 0, 0);\n        assert!(got.is_none());\n        let config = build_example_transaction_config_reverse_wasm_ids();\n        let got = config.get_wasm_lane_id_by_size(1, 0, 100000);\n        assert!(got.is_none());\n    }\n\n    #[test]\n    fn given_wasm_should_return_first_fit() {\n        let config = build_example_transaction_config();\n\n        let got = config.get_wasm_lane_id_by_size(660, 0, 0);\n        assert_eq!(got, Some(4));\n\n        let got = config.get_wasm_lane_id_by_size(800, 0, 0);\n        assert_eq!(got, Some(5));\n\n        let got = config.get_wasm_lane_id_by_size(1, 0, 0);\n        assert_eq!(got, Some(3));\n\n        let got = config.get_wasm_lane_id_by_size(800, 0, 6024);\n        assert_eq!(got, Some(5));\n\n        let config = build_example_transaction_config_reverse_wasm_ids();\n\n        let got = config.get_wasm_lane_id_by_size(660, 0, 0);\n        assert_eq!(got, Some(4));\n\n        let got = config.get_wasm_lane_id_by_size(800, 0, 0);\n        assert_eq!(got, Some(3));\n\n        let got = config.get_wasm_lane_id_by_size(1, 0, 0);\n        assert_eq!(got, Some(5));\n\n        let got = config.get_wasm_lane_id_by_size(800, 0, 6024);\n        assert_eq!(got, Some(3));\n    }\n\n    #[test]\n    fn given_additional_computation_factor_should_be_applied() {\n        let config = build_example_transaction_config();\n        let got = config.get_wasm_lane_id_by_size(660, 1, 0);\n        assert_eq!(got, Some(5));\n\n        let config = build_example_transaction_config_reverse_wasm_ids();\n        let got = config.get_wasm_lane_id_by_size(660, 1, 0);\n        assert_eq!(got, Some(3));\n    }\n\n    #[test]\n    fn given_additional_computation_factor_should_not_overflow() {\n        let config = build_example_transaction_config();\n        let got = config.get_wasm_lane_id_by_size(660, 2, 0);\n        assert_eq!(got, Some(5));\n        let got_2 = config.get_wasm_lane_id_by_size(660, 20, 0);\n        assert_eq!(got_2, Some(5));\n\n        let config = build_example_transaction_config_reverse_wasm_ids();\n        let got = config.get_wasm_lane_id_by_size(660, 2, 0);\n        assert_eq!(got, Some(3));\n        let got_2 = config.get_wasm_lane_id_by_size(660, 20, 0);\n        assert_eq!(got_2, Some(3));\n    }\n\n    #[test]\n    fn given_no_wasm_lanes_should_return_none() {\n        let config = build_example_transaction_config_no_wasms();\n        let got = config.get_wasm_lane_id_by_size(660, 2, 0);\n        assert!(got.is_none());\n        let got = config.get_wasm_lane_id_by_size(660, 0, 0);\n        assert!(got.is_none());\n        let got = config.get_wasm_lane_id_by_size(660, 20, 0);\n        assert!(got.is_none());\n\n        let got = config.get_wasm_lane_id_by_payment_limited(100, 1, 0);\n        assert!(got.is_none());\n    }\n\n    #[test]\n    fn given_wasm_when_by_payment_should_find_smallest_lane() {\n        let config = TransactionV1Config::new(\n            example_native(),\n            example_auction(),\n            example_install_upgrade(),\n            vec![\n                TransactionLaneDefinition::new(3, 10, 1, 5, 1),\n                TransactionLaneDefinition::new(4, 11, 1, 55, 1),\n                TransactionLaneDefinition::new(5, 12, 5, 155, 1),\n            ],\n        );\n        let got = config.get_wasm_lane_id_by_payment_limited(54, 1, 0);\n        assert_eq!(got, Some(4));\n        let got = config.get_wasm_lane_id_by_payment_limited(54, 10, 3);\n        assert_eq!(got, Some(5));\n    }\n\n    #[test]\n    fn given_wasm_when_by_payment_should_take_size_into_consideration() {\n        let config = TransactionV1Config::new(\n            example_native(),\n            example_auction(),\n            example_install_upgrade(),\n            vec![\n                TransactionLaneDefinition::new(3, 10, 1, 5, 1),\n                TransactionLaneDefinition::new(4, 11, 1, 55, 1),\n                TransactionLaneDefinition::new(5, 12, 1, 155, 1),\n            ],\n        );\n        let got = config.get_wasm_lane_id_by_payment_limited(54, 12, 0);\n        assert_eq!(got, Some(5));\n    }\n\n    #[test]\n    fn given_wasm_when_by_payment_should_return_none_if_no_size_fits() {\n        let config = TransactionV1Config::new(\n            example_native(),\n            example_auction(),\n            example_install_upgrade(),\n            vec![\n                TransactionLaneDefinition::new(3, 10, 1, 5, 1),\n                TransactionLaneDefinition::new(4, 11, 1, 55, 1),\n                TransactionLaneDefinition::new(5, 12, 5, 155, 1),\n            ],\n        );\n        let got = config.get_wasm_lane_id_by_payment_limited(54, 120, 0);\n        assert_eq!(got, None);\n        let got = config.get_wasm_lane_id_by_payment_limited(54, 10, 1000);\n        assert_eq!(got, None);\n    }\n\n    #[test]\n    fn should_deserialize() {\n        let got: TransactionV1Config = serde_json::from_str(EXAMPLE_JSON).unwrap();\n        let expected = TransactionV1Config::new(\n            TransactionLaneDefinition::new(0, 1, 2, 3, 4),\n            TransactionLaneDefinition::new(1, 5, 6, 7, 8),\n            TransactionLaneDefinition::new(2, 9, 10, 11, 12),\n            vec![\n                TransactionLaneDefinition::new(3, 13, 14, 15, 16),\n                TransactionLaneDefinition::new(4, 17, 18, 19, 20),\n                TransactionLaneDefinition::new(5, 21, 22, 23, 24),\n            ],\n        );\n        assert_eq!(got, expected);\n    }\n\n    #[test]\n    fn should_serialize() {\n        let input = TransactionV1Config::new(\n            TransactionLaneDefinition::new(0, 1, 2, 3, 4),\n            TransactionLaneDefinition::new(1, 5, 6, 7, 8),\n            TransactionLaneDefinition::new(2, 9, 10, 11, 12),\n            vec![\n                TransactionLaneDefinition::new(3, 13, 14, 15, 16),\n                TransactionLaneDefinition::new(4, 17, 18, 19, 20),\n                TransactionLaneDefinition::new(5, 21, 22, 23, 24),\n            ],\n        );\n        let raw = serde_json::to_string(&input).unwrap();\n        let got = serde_json::from_str::<Value>(&raw).unwrap();\n        let expected: Value = serde_json::from_str::<Value>(EXAMPLE_JSON).unwrap();\n        assert_eq!(got, expected);\n    }\n\n    #[test]\n    fn should_order_by_nested_predicates() {\n        // Firstly, order by max_transaction_gas_limit\n        let definition_1 = TransactionLaneDefinition::new(0, 0, 0, 4, 0);\n        let definition_2 = TransactionLaneDefinition::new(1, 0, 0, 3, 0);\n        let definition_3 = TransactionLaneDefinition::new(2, 0, 0, 2, 0);\n        let res = TransactionV1Config::build_wasm_lanes_ordered(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        assert_eq!(res, vec![definition_3, definition_2, definition_1,]);\n\n        // If max_transaction_gas_limit equal, order by\n        let definition_1 = TransactionLaneDefinition::new(0, 3, 0, 1, 0);\n        let definition_2 = TransactionLaneDefinition::new(1, 4, 0, 1, 0);\n        let definition_3 = TransactionLaneDefinition::new(2, 2, 0, 1, 0);\n        let res = TransactionV1Config::build_wasm_lanes_ordered(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        assert_eq!(res, vec![definition_3, definition_1, definition_2,]);\n\n        // If max_transaction_gas_limit and max_transaction_length equal, order by\n        // max_transaction_args_length\n        let definition_1 = TransactionLaneDefinition::new(0, 2, 4, 1, 0);\n        let definition_2 = TransactionLaneDefinition::new(1, 2, 2, 1, 0);\n        let definition_3 = TransactionLaneDefinition::new(2, 2, 3, 1, 0);\n        let res = TransactionV1Config::build_wasm_lanes_ordered(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n        ]);\n        assert_eq!(res, vec![definition_2, definition_3, definition_1,]);\n\n        // If max_transaction_gas_limit and max_transaction_length equal and\n        // max_transaction_args_length, order by id\n        let definition_1 = TransactionLaneDefinition::new(2, 2, 3, 1, 0);\n        let definition_2 = TransactionLaneDefinition::new(0, 2, 3, 1, 0);\n        let definition_3 = TransactionLaneDefinition::new(1, 2, 3, 1, 0);\n        let res = TransactionV1Config::build_wasm_lanes_ordered(vec![\n            definition_2.clone(),\n            definition_3.clone(),\n            definition_1.clone(),\n        ]);\n        assert_eq!(res, vec![definition_2, definition_3, definition_1,]);\n\n        // Should apply those rules mixed\n        let definition_1 = TransactionLaneDefinition::new(10, 0, 2, 2, 0);\n        let definition_2 = TransactionLaneDefinition::new(1, 2, 3, 1, 0);\n        let definition_3 = TransactionLaneDefinition::new(2, 4, 3, 1, 0);\n        let definition_4 = TransactionLaneDefinition::new(3, 4, 2, 1, 0);\n        let definition_5 = TransactionLaneDefinition::new(4, 0, 0, 2, 0);\n        let definition_6 = TransactionLaneDefinition::new(5, 4, 3, 1, 0);\n\n        let res = TransactionV1Config::build_wasm_lanes_ordered(vec![\n            definition_1.clone(),\n            definition_2.clone(),\n            definition_3.clone(),\n            definition_4.clone(),\n            definition_5.clone(),\n            definition_6.clone(),\n        ]);\n        assert_eq!(\n            res,\n            vec![\n                definition_2,\n                definition_4,\n                definition_3,\n                definition_6,\n                definition_5,\n                definition_1\n            ]\n        );\n    }\n\n    fn example_native() -> TransactionLaneDefinition {\n        TransactionLaneDefinition::new(0, 1500, 1024, 1_500_000_000, 150)\n    }\n\n    fn example_auction() -> TransactionLaneDefinition {\n        TransactionLaneDefinition::new(1, 500, 3024, 3_500_000_000, 350)\n    }\n\n    fn example_install_upgrade() -> TransactionLaneDefinition {\n        TransactionLaneDefinition::new(2, 10000, 2024, 2_500_000_000, 250)\n    }\n\n    fn wasm_small(id: u8) -> TransactionLaneDefinition {\n        TransactionLaneDefinition::new(id, 600, 4024, 4_500_000_000, 450)\n    }\n\n    fn wasm_medium(id: u8) -> TransactionLaneDefinition {\n        TransactionLaneDefinition::new(id, 700, 5024, 5_500_000_000, 550)\n    }\n\n    fn wasm_large(id: u8) -> TransactionLaneDefinition {\n        TransactionLaneDefinition::new(id, 800, 6024, 6_500_000_000, 650)\n    }\n\n    fn example_wasm() -> Vec<TransactionLaneDefinition> {\n        vec![wasm_small(3), wasm_medium(4), wasm_large(5)]\n    }\n\n    fn example_wasm_reversed_ids() -> Vec<TransactionLaneDefinition> {\n        vec![wasm_small(5), wasm_medium(4), wasm_large(3)]\n    }\n\n    fn build_example_transaction_config_no_wasms() -> TransactionV1Config {\n        TransactionV1Config::new(\n            example_native(),\n            example_auction(),\n            example_install_upgrade(),\n            vec![],\n        )\n    }\n\n    fn build_example_transaction_config() -> TransactionV1Config {\n        TransactionV1Config::new(\n            example_native(),\n            example_auction(),\n            example_install_upgrade(),\n            example_wasm(),\n        )\n    }\n\n    fn build_example_transaction_config_reverse_wasm_ids() -> TransactionV1Config {\n        TransactionV1Config::new(\n            example_native(),\n            example_auction(),\n            example_install_upgrade(),\n            example_wasm_reversed_ids(),\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/transaction_config.rs",
    "content": "mod deploy_config;\nmod runtime_config;\nmod transaction_v1_config;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse runtime_config::RuntimeConfig;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    TimeDiff,\n};\n\npub use deploy_config::DeployConfig;\n#[cfg(any(feature = \"testing\", test))]\npub use deploy_config::DEFAULT_MAX_PAYMENT_MOTES;\n#[cfg(any(feature = \"testing\", test))]\npub use transaction_v1_config::DEFAULT_LARGE_TRANSACTION_GAS_LIMIT;\npub use transaction_v1_config::{TransactionLaneDefinition, TransactionV1Config};\n\n/// The default minimum number of motes that can be transferred.\npub const DEFAULT_MIN_TRANSFER_MOTES: u64 = 2_500_000_000;\n\n/// Configuration values associated with Transactions.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n// Disallow unknown fields to ensure config files and command-line overrides contain valid keys.\n#[serde(deny_unknown_fields)]\npub struct TransactionConfig {\n    /// Maximum time to live any transaction can specify.\n    pub max_ttl: TimeDiff,\n    /// Maximum number of approvals (signatures) allowed in a block across all transactions.\n    pub block_max_approval_count: u32,\n    /// Maximum possible size in bytes of a block.\n    pub max_block_size: u32,\n    /// Maximum sum of payment across all transactions included in a block.\n    pub block_gas_limit: u64,\n    /// Minimum token amount for a native transfer deploy or transaction (a transfer deploy or\n    /// transaction received with an transfer amount less than this will be rejected upon receipt).\n    pub native_transfer_minimum_motes: u64,\n    /// Maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the\n    /// config.toml file.\n    pub max_timestamp_leeway: TimeDiff,\n    /// Configuration values specific to Deploy transactions.\n    #[serde(rename = \"deploy\")]\n    pub deploy_config: DeployConfig,\n    /// Configuration of the transaction runtime.\n    /// Configuration values specific to V1 transactions.\n    #[serde(rename = \"v1\")]\n    pub transaction_v1_config: TransactionV1Config,\n    /// Configuration values specific to the runtime.\n    ///\n    /// This is where we specify which runtimes are available.\n    #[serde(rename = \"enabled_runtime\")]\n    pub runtime_config: RuntimeConfig,\n}\n\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nimpl TransactionConfig {\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let max_ttl = TimeDiff::from_seconds(rng.gen_range(60..3_600));\n        let block_max_approval_count = rng.gen();\n        let max_block_size = rng.gen_range(1_000_000..1_000_000_000);\n        let block_gas_limit = rng.gen_range(100_000_000_000..1_000_000_000_000_000);\n        let native_transfer_minimum_motes =\n            rng.gen_range(DEFAULT_MIN_TRANSFER_MOTES..1_000_000_000_000_000);\n        let max_timestamp_leeway = TimeDiff::from_seconds(rng.gen_range(0..6));\n        let deploy_config = DeployConfig::random(rng);\n        let transaction_v1_config: TransactionV1Config = TransactionV1Config::random(rng);\n        let runtime_config = RuntimeConfig::random(rng);\n\n        TransactionConfig {\n            max_ttl,\n            block_max_approval_count,\n            max_block_size,\n            block_gas_limit,\n            native_transfer_minimum_motes,\n            max_timestamp_leeway,\n            deploy_config,\n            transaction_v1_config,\n            runtime_config,\n        }\n    }\n}\n\nimpl Default for TransactionConfig {\n    fn default() -> Self {\n        let two_hours = TimeDiff::from_seconds(2 * 60 * 60);\n        TransactionConfig {\n            max_ttl: two_hours,\n            block_max_approval_count: 2600,\n            max_block_size: 10_485_760,\n            block_gas_limit: 10_000_000_000_000,\n            native_transfer_minimum_motes: DEFAULT_MIN_TRANSFER_MOTES,\n            max_timestamp_leeway: TimeDiff::from_seconds(5),\n            deploy_config: DeployConfig::default(),\n            runtime_config: RuntimeConfig {\n                vm_casper_v1: true,\n                vm_casper_v2: false,\n            },\n            transaction_v1_config: TransactionV1Config::default(),\n        }\n    }\n}\n\nimpl ToBytes for TransactionConfig {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.max_ttl.write_bytes(writer)?;\n        self.block_max_approval_count.write_bytes(writer)?;\n        self.max_block_size.write_bytes(writer)?;\n        self.block_gas_limit.write_bytes(writer)?;\n        self.native_transfer_minimum_motes.write_bytes(writer)?;\n        self.max_timestamp_leeway.write_bytes(writer)?;\n        self.deploy_config.write_bytes(writer)?;\n        self.runtime_config.write_bytes(writer)?;\n        self.transaction_v1_config.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.max_ttl.serialized_length()\n            + self.block_max_approval_count.serialized_length()\n            + self.max_block_size.serialized_length()\n            + self.block_gas_limit.serialized_length()\n            + self.native_transfer_minimum_motes.serialized_length()\n            + self.max_timestamp_leeway.serialized_length()\n            + self.deploy_config.serialized_length()\n            + self.runtime_config.serialized_length()\n            + self.transaction_v1_config.serialized_length()\n    }\n}\n\nimpl FromBytes for TransactionConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (max_ttl, remainder) = TimeDiff::from_bytes(bytes)?;\n        let (block_max_approval_count, remainder) = u32::from_bytes(remainder)?;\n        let (max_block_size, remainder) = u32::from_bytes(remainder)?;\n        let (block_gas_limit, remainder) = u64::from_bytes(remainder)?;\n        let (native_transfer_minimum_motes, remainder) = u64::from_bytes(remainder)?;\n        let (max_timestamp_leeway, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (deploy_config, remainder) = DeployConfig::from_bytes(remainder)?;\n        let (runtime_config, remainder) = RuntimeConfig::from_bytes(remainder)?;\n        let (transaction_v1_config, remainder) = TransactionV1Config::from_bytes(remainder)?;\n\n        let config = TransactionConfig {\n            max_ttl,\n            block_max_approval_count,\n            max_block_size,\n            block_gas_limit,\n            native_transfer_minimum_motes,\n            max_timestamp_leeway,\n            deploy_config,\n            runtime_config,\n            transaction_v1_config,\n        };\n        Ok((config, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::new();\n        let config = TransactionConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/upgrade_config.rs",
    "content": "use num_rational::Ratio;\nuse serde::Serialize;\nuse std::collections::BTreeMap;\n\nuse crate::{\n    system::auction::DelegationRate, ChainspecRegistry, Digest, EraId, FeeHandling,\n    HoldBalanceHandling, Key, ProtocolVersion, RewardsHandling, StoredValue,\n};\n\n/// Represents the configuration of a protocol upgrade.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct ProtocolUpgradeConfig {\n    pre_state_hash: Digest,\n    current_protocol_version: ProtocolVersion,\n    new_protocol_version: ProtocolVersion,\n    activation_point: Option<EraId>,\n    new_gas_hold_handling: Option<HoldBalanceHandling>,\n    new_gas_hold_interval: Option<u64>,\n    new_validator_slots: Option<u32>,\n    new_auction_delay: Option<u64>,\n    new_locked_funds_period_millis: Option<u64>,\n    new_round_seigniorage_rate: Option<Ratio<u64>>,\n    new_unbonding_delay: Option<u64>,\n    global_state_update: BTreeMap<Key, StoredValue>,\n    chainspec_registry: ChainspecRegistry,\n    fee_handling: FeeHandling,\n    validator_minimum_bid_amount: u64,\n    maximum_delegation_amount: u64,\n    minimum_delegation_amount: u64,\n    enable_addressable_entity: bool,\n    rewards_handling: RewardsHandling,\n    minimum_delegation_rate: Option<DelegationRate>,\n}\n\nimpl ProtocolUpgradeConfig {\n    /// Create new upgrade config.\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        pre_state_hash: Digest,\n        current_protocol_version: ProtocolVersion,\n        new_protocol_version: ProtocolVersion,\n        activation_point: Option<EraId>,\n        new_gas_hold_handling: Option<HoldBalanceHandling>,\n        new_gas_hold_interval: Option<u64>,\n        new_validator_slots: Option<u32>,\n        new_auction_delay: Option<u64>,\n        new_locked_funds_period_millis: Option<u64>,\n        new_round_seigniorage_rate: Option<Ratio<u64>>,\n        new_unbonding_delay: Option<u64>,\n        global_state_update: BTreeMap<Key, StoredValue>,\n        chainspec_registry: ChainspecRegistry,\n        fee_handling: FeeHandling,\n        validator_minimum_bid_amount: u64,\n        maximum_delegation_amount: u64,\n        minimum_delegation_amount: u64,\n        enable_addressable_entity: bool,\n        rewards_handling: RewardsHandling,\n        minimum_delegation_rate: Option<DelegationRate>,\n    ) -> Self {\n        ProtocolUpgradeConfig {\n            pre_state_hash,\n            current_protocol_version,\n            new_protocol_version,\n            activation_point,\n            new_gas_hold_handling,\n            new_gas_hold_interval,\n            new_validator_slots,\n            new_auction_delay,\n            new_locked_funds_period_millis,\n            new_round_seigniorage_rate,\n            new_unbonding_delay,\n            global_state_update,\n            chainspec_registry,\n            fee_handling,\n            validator_minimum_bid_amount,\n            maximum_delegation_amount,\n            minimum_delegation_amount,\n            enable_addressable_entity,\n            rewards_handling,\n            minimum_delegation_rate,\n        }\n    }\n\n    /// Returns the current state root state hash\n    pub fn pre_state_hash(&self) -> Digest {\n        self.pre_state_hash\n    }\n\n    /// Returns current protocol version of this upgrade.\n    pub fn current_protocol_version(&self) -> ProtocolVersion {\n        self.current_protocol_version\n    }\n\n    /// Returns new protocol version of this upgrade.\n    pub fn new_protocol_version(&self) -> ProtocolVersion {\n        self.new_protocol_version\n    }\n\n    /// Returns activation point in eras.\n    pub fn activation_point(&self) -> Option<EraId> {\n        self.activation_point\n    }\n\n    /// Returns new gas hold handling if specified.\n    pub fn new_gas_hold_handling(&self) -> Option<HoldBalanceHandling> {\n        self.new_gas_hold_handling\n    }\n\n    /// Returns new auction delay if specified.\n    pub fn new_gas_hold_interval(&self) -> Option<u64> {\n        self.new_gas_hold_interval\n    }\n\n    /// Returns new validator slots if specified.\n    pub fn new_validator_slots(&self) -> Option<u32> {\n        self.new_validator_slots\n    }\n\n    /// Returns new auction delay if specified.\n    pub fn new_auction_delay(&self) -> Option<u64> {\n        self.new_auction_delay\n    }\n\n    /// Returns new locked funds period if specified.\n    pub fn new_locked_funds_period_millis(&self) -> Option<u64> {\n        self.new_locked_funds_period_millis\n    }\n\n    /// Returns new round seigniorage rate if specified.\n    pub fn new_round_seigniorage_rate(&self) -> Option<Ratio<u64>> {\n        self.new_round_seigniorage_rate\n    }\n\n    /// Returns new unbonding delay if specified.\n    pub fn new_unbonding_delay(&self) -> Option<u64> {\n        self.new_unbonding_delay\n    }\n\n    /// Returns new map of emergency global state updates.\n    pub fn global_state_update(&self) -> &BTreeMap<Key, StoredValue> {\n        &self.global_state_update\n    }\n\n    /// Returns a reference to the chainspec registry.\n    pub fn chainspec_registry(&self) -> &ChainspecRegistry {\n        &self.chainspec_registry\n    }\n\n    /// Sets new pre state hash.\n    pub fn with_pre_state_hash(&mut self, pre_state_hash: Digest) {\n        self.pre_state_hash = pre_state_hash;\n    }\n\n    /// Fee handling setting.\n    pub fn fee_handling(&self) -> FeeHandling {\n        self.fee_handling\n    }\n\n    /// Validator minimum bid amount\n    pub fn validator_minimum_bid_amount(&self) -> u64 {\n        self.validator_minimum_bid_amount\n    }\n\n    /// Maximum delegation amount for validator.\n    pub fn maximum_delegation_amount(&self) -> u64 {\n        self.maximum_delegation_amount\n    }\n\n    /// Minimum delegation amount for validator.\n    pub fn minimum_delegation_amount(&self) -> u64 {\n        self.minimum_delegation_amount\n    }\n\n    pub fn enable_addressable_entity(&self) -> bool {\n        self.enable_addressable_entity\n    }\n\n    pub fn rewards_handling(&self) -> RewardsHandling {\n        self.rewards_handling.clone()\n    }\n\n    /// Returns minimum_delegation_rate.\n    pub fn new_minimum_delegation_rate(&self) -> Option<DelegationRate> {\n        self.minimum_delegation_rate\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vacancy_config.rs",
    "content": "#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr,\n    bytesrepr::{Error, FromBytes, ToBytes},\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\n\n/// The configuration to determine gas price based on block vacancy.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct VacancyConfig {\n    /// The upper threshold to determine an increment in gas price\n    pub upper_threshold: u64,\n    /// The lower threshold to determine a decrement in gas price\n    pub lower_threshold: u64,\n    /// The upper limit of the gas price.\n    pub max_gas_price: u8,\n    /// The lower limit of the gas price.\n    pub min_gas_price: u8,\n}\n\nimpl Default for VacancyConfig {\n    fn default() -> Self {\n        Self {\n            upper_threshold: 90,\n            lower_threshold: 50,\n            max_gas_price: 3,\n            min_gas_price: 1,\n        }\n    }\n}\n\nimpl VacancyConfig {\n    /// Returns a random [`VacancyConfig`]\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self {\n            upper_threshold: rng.gen_range(49..100),\n            lower_threshold: rng.gen_range(0..50),\n            max_gas_price: rng.gen_range(3..5),\n            min_gas_price: rng.gen_range(1..3),\n        }\n    }\n}\n\nimpl ToBytes for VacancyConfig {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.upper_threshold.write_bytes(writer)?;\n        self.lower_threshold.write_bytes(writer)?;\n        self.max_gas_price.write_bytes(writer)?;\n        self.min_gas_price.write_bytes(writer)\n    }\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n    fn serialized_length(&self) -> usize {\n        self.upper_threshold.serialized_length()\n            + self.lower_threshold.serialized_length()\n            + self.max_gas_price.serialized_length()\n            + self.min_gas_price.serialized_length()\n    }\n}\n\nimpl FromBytes for VacancyConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (upper_threshold, remainder) = u64::from_bytes(bytes)?;\n        let (lower_threshold, remainder) = u64::from_bytes(remainder)?;\n        let (max_gas_price, remainder) = u8::from_bytes(remainder)?;\n        let (min_gas_price, remainder) = u8::from_bytes(remainder)?;\n        Ok((\n            Self {\n                upper_threshold,\n                lower_threshold,\n                max_gas_price,\n                min_gas_price,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::new();\n        let config = VacancyConfig::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&config);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/auction_costs.rs",
    "content": "//! Costs of the auction system contract.\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// Default cost of the `get_era_validators` auction entry point.\npub const DEFAULT_GET_ERA_VALIDATORS_COST: u64 = 2_500_000_000;\n/// Default cost of the `read_seigniorage_recipients` auction entry point.\npub const DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST: u64 = 5_000_000_000;\n/// Default cost of the `add_bid` auction entry point.\npub const DEFAULT_ADD_BID_COST: u64 = 2_500_000_000;\n/// Default cost of the `withdraw_bid` auction entry point.\npub const DEFAULT_WITHDRAW_BID_COST: u64 = 2_500_000_000;\n/// Default cost of the `delegate` auction entry point.\npub const DEFAULT_DELEGATE_COST: u64 = DEFAULT_WITHDRAW_BID_COST;\n/// Default cost of the `redelegate` auction entry point.\npub const DEFAULT_REDELEGATE_COST: u64 = DEFAULT_WITHDRAW_BID_COST;\n/// Default cost of the `undelegate` auction entry point.\npub const DEFAULT_UNDELEGATE_COST: u64 = DEFAULT_WITHDRAW_BID_COST;\n/// Default cost of the `run_auction` auction entry point.\npub const DEFAULT_RUN_AUCTION_COST: u64 = 2_500_000_000;\n/// Default cost of the `slash` auction entry point.\npub const DEFAULT_SLASH_COST: u64 = 2_500_000_000;\n/// Default cost of the `distribute` auction entry point.\npub const DEFAULT_DISTRIBUTE_COST: u64 = 2_500_000_000;\n/// Default cost of the `withdraw_delegator_reward` auction entry point.\npub const DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST: u64 = 5_000_000_000;\n/// Default cost of the `withdraw_validator_reward` auction entry point.\npub const DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST: u64 = 5_000_000_000;\n/// Default cost of the `read_era_id` auction entry point.\npub const DEFAULT_READ_ERA_ID_COST: u64 = 2_500_000_000;\n/// Default cost of the `activate_bid` auction entry point.\npub const DEFAULT_ACTIVATE_BID_COST: u64 = 2_500_000_000;\n/// Default cost of the `change_bid_public_key` auction entry point.\npub const DEFAULT_CHANGE_BID_PUBLIC_KEY_COST: u64 = 5_000_000_000;\n/// Default cost of the `add_reservations` auction entry point.\npub const DEFAULT_ADD_RESERVATIONS_COST: u64 = DEFAULT_WITHDRAW_BID_COST;\n/// Default cost of the `cancel_reservations` auction entry point.\npub const DEFAULT_CANCEL_RESERVATIONS_COST: u64 = DEFAULT_WITHDRAW_BID_COST;\n\n/// Description of the costs of calling auction entrypoints.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct AuctionCosts {\n    /// Cost of calling the `get_era_validators` entry point.\n    pub get_era_validators: u64,\n    /// Cost of calling the `read_seigniorage_recipients` entry point.\n    pub read_seigniorage_recipients: u64,\n    /// Cost of calling the `add_bid` entry point.\n    pub add_bid: u64,\n    /// Cost of calling the `withdraw_bid` entry point.\n    pub withdraw_bid: u64,\n    /// Cost of calling the `delegate` entry point.\n    pub delegate: u64,\n    /// Cost of calling the `undelegate` entry point.\n    pub undelegate: u64,\n    /// Cost of calling the `run_auction` entry point.\n    pub run_auction: u64,\n    /// Cost of calling the `slash` entry point.\n    pub slash: u64,\n    /// Cost of calling the `distribute` entry point.\n    pub distribute: u64,\n    /// Cost of calling the `withdraw_delegator_reward` entry point.\n    pub withdraw_delegator_reward: u64,\n    /// Cost of calling the `withdraw_validator_reward` entry point.\n    pub withdraw_validator_reward: u64,\n    /// Cost of calling the `read_era_id` entry point.\n    pub read_era_id: u64,\n    /// Cost of calling the `activate_bid` entry point.\n    pub activate_bid: u64,\n    /// Cost of calling the `redelegate` entry point.\n    pub redelegate: u64,\n    /// Cost of calling the `change_bid_public_key` entry point.\n    pub change_bid_public_key: u64,\n    /// Cost of calling the `add_reservations` entry point.\n    pub add_reservations: u64,\n    /// Cost of calling the `cancel_reservations` entry point.\n    pub cancel_reservations: u64,\n}\n\nimpl Default for AuctionCosts {\n    fn default() -> Self {\n        Self {\n            get_era_validators: DEFAULT_GET_ERA_VALIDATORS_COST,\n            read_seigniorage_recipients: DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST,\n            add_bid: DEFAULT_ADD_BID_COST,\n            withdraw_bid: DEFAULT_WITHDRAW_BID_COST,\n            delegate: DEFAULT_DELEGATE_COST,\n            undelegate: DEFAULT_UNDELEGATE_COST,\n            run_auction: DEFAULT_RUN_AUCTION_COST,\n            slash: DEFAULT_SLASH_COST,\n            distribute: DEFAULT_DISTRIBUTE_COST,\n            withdraw_delegator_reward: DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST,\n            withdraw_validator_reward: DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST,\n            read_era_id: DEFAULT_READ_ERA_ID_COST,\n            activate_bid: DEFAULT_ACTIVATE_BID_COST,\n            redelegate: DEFAULT_REDELEGATE_COST,\n            change_bid_public_key: DEFAULT_CHANGE_BID_PUBLIC_KEY_COST,\n            add_reservations: DEFAULT_ADD_RESERVATIONS_COST,\n            cancel_reservations: DEFAULT_CANCEL_RESERVATIONS_COST,\n        }\n    }\n}\n\nimpl ToBytes for AuctionCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        let Self {\n            get_era_validators,\n            read_seigniorage_recipients,\n            add_bid,\n            withdraw_bid,\n            delegate,\n            undelegate,\n            run_auction,\n            slash,\n            distribute,\n            withdraw_delegator_reward,\n            withdraw_validator_reward,\n            read_era_id,\n            activate_bid,\n            redelegate,\n            change_bid_public_key,\n            add_reservations,\n            cancel_reservations,\n        } = self;\n\n        ret.append(&mut get_era_validators.to_bytes()?);\n        ret.append(&mut read_seigniorage_recipients.to_bytes()?);\n        ret.append(&mut add_bid.to_bytes()?);\n        ret.append(&mut withdraw_bid.to_bytes()?);\n        ret.append(&mut delegate.to_bytes()?);\n        ret.append(&mut undelegate.to_bytes()?);\n        ret.append(&mut run_auction.to_bytes()?);\n        ret.append(&mut slash.to_bytes()?);\n        ret.append(&mut distribute.to_bytes()?);\n        ret.append(&mut withdraw_delegator_reward.to_bytes()?);\n        ret.append(&mut withdraw_validator_reward.to_bytes()?);\n        ret.append(&mut read_era_id.to_bytes()?);\n        ret.append(&mut activate_bid.to_bytes()?);\n        ret.append(&mut redelegate.to_bytes()?);\n        ret.append(&mut change_bid_public_key.to_bytes()?);\n        ret.append(&mut add_reservations.to_bytes()?);\n        ret.append(&mut cancel_reservations.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let Self {\n            get_era_validators,\n            read_seigniorage_recipients,\n            add_bid,\n            withdraw_bid,\n            delegate,\n            undelegate,\n            run_auction,\n            slash,\n            distribute,\n            withdraw_delegator_reward,\n            withdraw_validator_reward,\n            read_era_id,\n            activate_bid,\n            redelegate,\n            change_bid_public_key,\n            add_reservations,\n            cancel_reservations,\n        } = self;\n\n        get_era_validators.serialized_length()\n            + read_seigniorage_recipients.serialized_length()\n            + add_bid.serialized_length()\n            + withdraw_bid.serialized_length()\n            + delegate.serialized_length()\n            + undelegate.serialized_length()\n            + run_auction.serialized_length()\n            + slash.serialized_length()\n            + distribute.serialized_length()\n            + withdraw_delegator_reward.serialized_length()\n            + withdraw_validator_reward.serialized_length()\n            + read_era_id.serialized_length()\n            + activate_bid.serialized_length()\n            + redelegate.serialized_length()\n            + change_bid_public_key.serialized_length()\n            + add_reservations.serialized_length()\n            + cancel_reservations.serialized_length()\n    }\n}\n\nimpl FromBytes for AuctionCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (get_era_validators, rem) = FromBytes::from_bytes(bytes)?;\n        let (read_seigniorage_recipients, rem) = FromBytes::from_bytes(rem)?;\n        let (add_bid, rem) = FromBytes::from_bytes(rem)?;\n        let (withdraw_bid, rem) = FromBytes::from_bytes(rem)?;\n        let (delegate, rem) = FromBytes::from_bytes(rem)?;\n        let (undelegate, rem) = FromBytes::from_bytes(rem)?;\n        let (run_auction, rem) = FromBytes::from_bytes(rem)?;\n        let (slash, rem) = FromBytes::from_bytes(rem)?;\n        let (distribute, rem) = FromBytes::from_bytes(rem)?;\n        let (withdraw_delegator_reward, rem) = FromBytes::from_bytes(rem)?;\n        let (withdraw_validator_reward, rem) = FromBytes::from_bytes(rem)?;\n        let (read_era_id, rem) = FromBytes::from_bytes(rem)?;\n        let (activate_bid, rem) = FromBytes::from_bytes(rem)?;\n        let (redelegate, rem) = FromBytes::from_bytes(rem)?;\n        let (change_bid_public_key, rem) = FromBytes::from_bytes(rem)?;\n        let (add_reservations, rem) = FromBytes::from_bytes(rem)?;\n        let (cancel_reservations, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            Self {\n                get_era_validators,\n                read_seigniorage_recipients,\n                add_bid,\n                withdraw_bid,\n                delegate,\n                undelegate,\n                run_auction,\n                slash,\n                distribute,\n                withdraw_delegator_reward,\n                withdraw_validator_reward,\n                read_era_id,\n                activate_bid,\n                redelegate,\n                change_bid_public_key,\n                add_reservations,\n                cancel_reservations,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<AuctionCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> AuctionCosts {\n        AuctionCosts {\n            get_era_validators: rng.gen_range(0..i64::MAX) as u64,\n            read_seigniorage_recipients: rng.gen_range(0..i64::MAX) as u64,\n            add_bid: rng.gen_range(0..i64::MAX) as u64,\n            withdraw_bid: rng.gen_range(0..i64::MAX) as u64,\n            delegate: rng.gen_range(0..i64::MAX) as u64,\n            undelegate: rng.gen_range(0..i64::MAX) as u64,\n            run_auction: rng.gen_range(0..i64::MAX) as u64,\n            slash: rng.gen_range(0..i64::MAX) as u64,\n            distribute: rng.gen_range(0..i64::MAX) as u64,\n            withdraw_delegator_reward: rng.gen_range(0..i64::MAX) as u64,\n            withdraw_validator_reward: rng.gen_range(0..i64::MAX) as u64,\n            read_era_id: rng.gen_range(0..i64::MAX) as u64,\n            activate_bid: rng.gen_range(0..i64::MAX) as u64,\n            redelegate: rng.gen_range(0..i64::MAX) as u64,\n            change_bid_public_key: rng.gen_range(0..i64::MAX) as u64,\n            add_reservations: rng.gen_range(0..i64::MAX) as u64,\n            cancel_reservations: rng.gen_range(0..i64::MAX) as u64,\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use super::AuctionCosts;\n\n    prop_compose! {\n        pub fn auction_costs_arb()(\n            get_era_validators in 0..=(i64::MAX as u64),\n            read_seigniorage_recipients in 0..=(i64::MAX as u64),\n            add_bid in 0..=(i64::MAX as u64),\n            withdraw_bid in 0..=(i64::MAX as u64),\n            delegate in 0..=(i64::MAX as u64),\n            undelegate in 0..=(i64::MAX as u64),\n            run_auction in 0..=(i64::MAX as u64),\n            slash in 0..=(i64::MAX as u64),\n            distribute in 0..=(i64::MAX as u64),\n            withdraw_delegator_reward in 0..=(i64::MAX as u64),\n            withdraw_validator_reward in 0..=(i64::MAX as u64),\n            read_era_id in 0..=(i64::MAX as u64),\n            activate_bid in 0..=(i64::MAX as u64),\n            redelegate in 0..=(i64::MAX as u64),\n            change_bid_public_key in 0..=(i64::MAX as u64),\n            add_reservations in 0..=(i64::MAX as u64),\n            cancel_reservations in 0..=(i64::MAX as u64),\n        ) -> AuctionCosts {\n            AuctionCosts {\n                get_era_validators,\n                read_seigniorage_recipients,\n                add_bid,\n                withdraw_bid,\n                delegate,\n                undelegate,\n                run_auction,\n                slash,\n                distribute,\n                withdraw_delegator_reward,\n                withdraw_validator_reward,\n                read_era_id,\n                activate_bid,\n                redelegate,\n                change_bid_public_key,\n                add_reservations,\n                cancel_reservations,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/chainspec_registry.rs",
    "content": "//! The registry of chainspec hash digests.\n\nuse std::{collections::BTreeMap, convert::TryFrom};\n\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, Digest,\n};\n\ntype BytesreprChainspecRegistry = BTreeMap<String, Digest>;\n\n/// The chainspec registry.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)]\npub struct ChainspecRegistry {\n    chainspec_raw_hash: Digest,\n    genesis_accounts_raw_hash: Option<Digest>,\n    global_state_raw_hash: Option<Digest>,\n}\n\nimpl ChainspecRegistry {\n    const CHAINSPEC_RAW_MAP_KEY: &'static str = \"chainspec_raw\";\n    const GENESIS_ACCOUNTS_RAW_MAP_KEY: &'static str = \"genesis_accounts_raw\";\n    const GLOBAL_STATE_RAW_MAP_KEY: &'static str = \"global_state_raw\";\n\n    /// Returns a `ChainspecRegistry` constructed at genesis.\n    pub fn new_with_genesis(\n        chainspec_file_bytes: &[u8],\n        genesis_accounts_file_bytes: &[u8],\n    ) -> Self {\n        ChainspecRegistry {\n            chainspec_raw_hash: Digest::hash(chainspec_file_bytes),\n            genesis_accounts_raw_hash: Some(Digest::hash(genesis_accounts_file_bytes)),\n            global_state_raw_hash: None,\n        }\n    }\n\n    /// Returns a `ChainspecRegistry` constructed at node upgrade.\n    pub fn new_with_optional_global_state(\n        chainspec_file_bytes: &[u8],\n        global_state_file_bytes: Option<&[u8]>,\n    ) -> Self {\n        ChainspecRegistry {\n            chainspec_raw_hash: Digest::hash(chainspec_file_bytes),\n            genesis_accounts_raw_hash: None,\n            global_state_raw_hash: global_state_file_bytes.map(Digest::hash),\n        }\n    }\n\n    /// Returns the hash of the raw bytes of the chainspec.toml file.\n    pub fn chainspec_raw_hash(&self) -> &Digest {\n        &self.chainspec_raw_hash\n    }\n\n    /// Returns the hash of the raw bytes of the genesis accounts.toml file if it exists.\n    pub fn genesis_accounts_raw_hash(&self) -> Option<&Digest> {\n        self.genesis_accounts_raw_hash.as_ref()\n    }\n\n    /// Returns the hash of the raw bytes of the global_state.toml file if it exists.\n    pub fn global_state_raw_hash(&self) -> Option<&Digest> {\n        self.global_state_raw_hash.as_ref()\n    }\n\n    fn as_map(&self) -> BytesreprChainspecRegistry {\n        let mut map = BTreeMap::new();\n        map.insert(\n            Self::CHAINSPEC_RAW_MAP_KEY.to_string(),\n            self.chainspec_raw_hash,\n        );\n        if let Some(genesis_accounts_raw_hash) = self.genesis_accounts_raw_hash {\n            map.insert(\n                Self::GENESIS_ACCOUNTS_RAW_MAP_KEY.to_string(),\n                genesis_accounts_raw_hash,\n            );\n        }\n        if let Some(global_state_raw_hash) = self.global_state_raw_hash {\n            map.insert(\n                Self::GLOBAL_STATE_RAW_MAP_KEY.to_string(),\n                global_state_raw_hash,\n            );\n        }\n        map\n    }\n}\n\nimpl TryFrom<BytesreprChainspecRegistry> for ChainspecRegistry {\n    type Error = bytesrepr::Error;\n\n    fn try_from(map: BytesreprChainspecRegistry) -> Result<Self, Self::Error> {\n        let chainspec_raw_hash = *map\n            .get(Self::CHAINSPEC_RAW_MAP_KEY)\n            .ok_or(bytesrepr::Error::Formatting)?;\n        let genesis_accounts_raw_hash = map.get(Self::GENESIS_ACCOUNTS_RAW_MAP_KEY).copied();\n        let global_state_raw_hash = map.get(Self::GLOBAL_STATE_RAW_MAP_KEY).copied();\n        Ok(ChainspecRegistry {\n            chainspec_raw_hash,\n            genesis_accounts_raw_hash,\n            global_state_raw_hash,\n        })\n    }\n}\n\nimpl ToBytes for ChainspecRegistry {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.as_map().to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.as_map().serialized_length()\n    }\n}\n\nimpl FromBytes for ChainspecRegistry {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (map, remainder) = BytesreprChainspecRegistry::from_bytes(bytes)?;\n        let chainspec_registry = ChainspecRegistry::try_from(map)?;\n        Ok((chainspec_registry, remainder))\n    }\n}\n\nimpl CLTyped for ChainspecRegistry {\n    fn cl_type() -> CLType {\n        BytesreprChainspecRegistry::cl_type()\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ChainspecRegistry> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ChainspecRegistry {\n        ChainspecRegistry {\n            chainspec_raw_hash: rng.gen(),\n            genesis_accounts_raw_hash: rng.gen(),\n            global_state_raw_hash: rng.gen(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = rand::thread_rng();\n\n        let chainspec_file_bytes: [u8; 10] = rng.gen();\n\n        let genesis_account_file_bytes: [u8; 10] = rng.gen();\n        let chainspec_registry =\n            ChainspecRegistry::new_with_genesis(&chainspec_file_bytes, &genesis_account_file_bytes);\n        bytesrepr::test_serialization_roundtrip(&chainspec_registry);\n\n        let global_state_file_bytes: [u8; 10] = rng.gen();\n        let chainspec_registry = ChainspecRegistry::new_with_optional_global_state(\n            &chainspec_file_bytes,\n            Some(&global_state_file_bytes),\n        );\n        bytesrepr::test_serialization_roundtrip(&chainspec_registry);\n\n        let chainspec_registry =\n            ChainspecRegistry::new_with_optional_global_state(&chainspec_file_bytes, None);\n        bytesrepr::test_serialization_roundtrip(&chainspec_registry);\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/handle_payment_costs.rs",
    "content": "//! Costs of the `handle_payment` system contract.\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// Default cost of the `get_payment_purse` `handle_payment` entry point.\npub const DEFAULT_GET_PAYMENT_PURSE_COST: u32 = 10_000;\n/// Default cost of the `set_refund_purse` `handle_payment` entry point.\npub const DEFAULT_SET_REFUND_PURSE_COST: u32 = 10_000;\n/// Default cost of the `get_refund_purse` `handle_payment` entry point.\npub const DEFAULT_GET_REFUND_PURSE_COST: u32 = 10_000;\n/// Default cost of the `finalize_payment` `handle_payment` entry point.\npub const DEFAULT_FINALIZE_PAYMENT_COST: u32 = 2_500_000_000;\n\n/// Description of the costs of calling `handle_payment` entrypoints.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct HandlePaymentCosts {\n    /// Cost of calling the `get_payment_purse` entry point.\n    pub get_payment_purse: u32,\n    /// Cost of calling the `set_refund_purse` entry point.\n    pub set_refund_purse: u32,\n    /// Cost of calling the `get_refund_purse` entry point.\n    pub get_refund_purse: u32,\n    /// Cost of calling the `finalize_payment` entry point.\n    pub finalize_payment: u32,\n}\n\nimpl Default for HandlePaymentCosts {\n    fn default() -> Self {\n        Self {\n            get_payment_purse: DEFAULT_GET_PAYMENT_PURSE_COST,\n            set_refund_purse: DEFAULT_SET_REFUND_PURSE_COST,\n            get_refund_purse: DEFAULT_GET_REFUND_PURSE_COST,\n            finalize_payment: DEFAULT_FINALIZE_PAYMENT_COST,\n        }\n    }\n}\n\nimpl ToBytes for HandlePaymentCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        ret.append(&mut self.get_payment_purse.to_bytes()?);\n        ret.append(&mut self.set_refund_purse.to_bytes()?);\n        ret.append(&mut self.get_refund_purse.to_bytes()?);\n        ret.append(&mut self.finalize_payment.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.get_payment_purse.serialized_length()\n            + self.set_refund_purse.serialized_length()\n            + self.get_refund_purse.serialized_length()\n            + self.finalize_payment.serialized_length()\n    }\n}\n\nimpl FromBytes for HandlePaymentCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (get_payment_purse, rem) = FromBytes::from_bytes(bytes)?;\n        let (set_refund_purse, rem) = FromBytes::from_bytes(rem)?;\n        let (get_refund_purse, rem) = FromBytes::from_bytes(rem)?;\n        let (finalize_payment, rem) = FromBytes::from_bytes(rem)?;\n\n        Ok((\n            Self {\n                get_payment_purse,\n                set_refund_purse,\n                get_refund_purse,\n                finalize_payment,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<HandlePaymentCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> HandlePaymentCosts {\n        HandlePaymentCosts {\n            get_payment_purse: rng.gen(),\n            set_refund_purse: rng.gen(),\n            get_refund_purse: rng.gen(),\n            finalize_payment: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::{num, prop_compose};\n\n    use super::HandlePaymentCosts;\n\n    prop_compose! {\n        pub fn handle_payment_costs_arb()(\n            get_payment_purse in num::u32::ANY,\n            set_refund_purse in num::u32::ANY,\n            get_refund_purse in num::u32::ANY,\n            finalize_payment in num::u32::ANY,\n        ) -> HandlePaymentCosts {\n            HandlePaymentCosts {\n                get_payment_purse,\n                set_refund_purse,\n                get_refund_purse,\n                finalize_payment,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/host_function_costs.rs",
    "content": "//! Support for host function gas cost tables.\nuse core::ops::Add;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse derive_more::Add;\nuse num_traits::Zero;\nuse rand::{distributions::Standard, prelude::Distribution, Rng};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH},\n    Gas,\n};\n\n/// Representation of argument's cost.\npub type Cost = u32;\n\nconst COST_SERIALIZED_LENGTH: usize = U32_SERIALIZED_LENGTH;\n\n/// An identifier that represents an unused argument.\nconst NOT_USED: Cost = 0;\n\n/// An arbitrary default fixed cost for host functions that were not researched yet.\nconst DEFAULT_FIXED_COST: Cost = 200;\n\nconst DEFAULT_ADD_COST: u32 = 5_800;\nconst DEFAULT_ADD_ASSOCIATED_KEY_COST: u32 = 1_200_000;\n\nconst DEFAULT_CALL_CONTRACT_COST: u32 = 300_000_000;\n\nconst DEFAULT_CREATE_PURSE_COST: u32 = 2_500_000_000;\nconst DEFAULT_GET_BALANCE_COST: u32 = 3_000_000;\nconst DEFAULT_GET_BLOCKTIME_COST: u32 = 330;\nconst DEFAULT_GET_CALLER_COST: u32 = 380;\nconst DEFAULT_GET_KEY_COST: u32 = 2_000;\nconst DEFAULT_GET_KEY_NAME_SIZE_WEIGHT: u32 = 440;\nconst DEFAULT_GET_MAIN_PURSE_COST: u32 = 1_300;\nconst DEFAULT_GET_PHASE_COST: u32 = 710;\nconst DEFAULT_GET_SYSTEM_CONTRACT_COST: u32 = 1_100;\nconst DEFAULT_HAS_KEY_COST: u32 = 1_500;\nconst DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT: u32 = 840;\nconst DEFAULT_IS_VALID_UREF_COST: u32 = 760;\nconst DEFAULT_LOAD_NAMED_KEYS_COST: u32 = 42_000;\nconst DEFAULT_NEW_UREF_COST: u32 = 17_000;\nconst DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT: u32 = 590;\n\nconst DEFAULT_PRINT_COST: u32 = 20_000;\nconst DEFAULT_PRINT_TEXT_SIZE_WEIGHT: u32 = 4_600;\n\nconst DEFAULT_PUT_KEY_COST: u32 = 100_000_000;\nconst DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT: u32 = 120_000;\n\nconst DEFAULT_READ_HOST_BUFFER_COST: u32 = 3_500;\nconst DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT: u32 = 310;\n\nconst DEFAULT_READ_VALUE_COST: u32 = 60_000;\nconst DEFAULT_DICTIONARY_GET_COST: u32 = 5_500;\nconst DEFAULT_DICTIONARY_GET_KEY_SIZE_WEIGHT: u32 = 590;\n\nconst DEFAULT_REMOVE_ASSOCIATED_KEY_COST: u32 = 4_200;\n\nconst DEFAULT_REMOVE_KEY_COST: u32 = 61_000;\nconst DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT: u32 = 3_200;\n\nconst DEFAULT_RET_COST: u32 = 23_000;\nconst DEFAULT_RET_VALUE_SIZE_WEIGHT: u32 = 420_000;\n\nconst DEFAULT_REVERT_COST: u32 = 500;\nconst DEFAULT_SET_ACTION_THRESHOLD_COST: u32 = 74_000;\nconst DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST: u32 = 2_500_000_000;\nconst DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST: u32 = 82_000_000;\nconst DEFAULT_TRANSFER_TO_ACCOUNT_COST: u32 = 2_500_000_000;\nconst DEFAULT_UPDATE_ASSOCIATED_KEY_COST: u32 = 4_200;\n\nconst DEFAULT_WRITE_COST: u32 = 14_000;\nconst DEFAULT_WRITE_VALUE_SIZE_WEIGHT: u32 = 980;\n\nconst DEFAULT_ARG_CHARGE: u32 = 120_000;\n\nconst DEFAULT_DICTIONARY_PUT_COST: u32 = 9_500;\nconst DEFAULT_DICTIONARY_PUT_KEY_BYTES_SIZE_WEIGHT: u32 = 1_800;\nconst DEFAULT_DICTIONARY_PUT_VALUE_SIZE_WEIGHT: u32 = 520;\n\n/// Default cost for a new dictionary.\npub const DEFAULT_NEW_DICTIONARY_COST: u32 = DEFAULT_NEW_UREF_COST;\n\n/// Host function cost unit for a new dictionary.\n#[allow(unused)]\npub const DEFAULT_HOST_FUNCTION_NEW_DICTIONARY: HostFunction<[Cost; 1]> =\n    HostFunction::new(DEFAULT_NEW_DICTIONARY_COST, [NOT_USED]);\nconst DEFAULT_BLAKE2B_COST: u32 = 1_200_000;\n\n/// Default value that the cost of calling `casper_emit_message` increases by for every new message\n/// emitted within an execution.\npub const DEFAULT_COST_INCREASE_PER_MESSAGE_EMITTED: u32 = 50;\n\nconst DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT: u32 = 30_000;\nconst DEFAULT_MESSAGE_PAYLOAD_SIZE_WEIGHT: u32 = 120_000;\n\nconst DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT: u32 = 120_000;\n\nconst DEFAULT_GENERIC_HASH_COST: u32 = 1_200_000;\n\nconst DEFAULT_GENERIC_HASH_INPUT_COST: u32 = 120_000;\n\nconst DEFAULT_RECOVER_SECP256K1_COST: u32 = 1_300_000;\nconst DEFAULT_RECOVER_SECP256K1_SIZE_WEIGHT: u32 = 120_000;\n\nconst DEFAULT_VERIFY_SIGNATURE_COST: u32 = 1_300_000;\nconst DEFAULT_VERIFY_SIGNATURE_SIZE_WEIGHT: u32 = 120_000;\n\n/// Representation of a host function cost.\n///\n/// The total gas cost is equal to `cost` + sum of each argument weight multiplied by the byte size\n/// of the data.\n#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct HostFunction<T> {\n    /// How much the user is charged for calling the host function.\n    cost: Cost,\n    /// Weights of the function arguments.\n    arguments: T,\n}\n\nimpl<T> Default for HostFunction<T>\nwhere\n    T: Default,\n{\n    fn default() -> Self {\n        HostFunction::new(DEFAULT_FIXED_COST, Default::default())\n    }\n}\n\nimpl<T> HostFunction<T> {\n    /// Creates a new instance of `HostFunction` with a fixed call cost and argument weights.\n    pub const fn new(cost: Cost, arguments: T) -> Self {\n        Self { cost, arguments }\n    }\n\n    pub fn with_new_static_cost(self, cost: Cost) -> Self {\n        Self {\n            cost,\n            arguments: self.arguments,\n        }\n    }\n\n    /// Returns the base gas fee for calling the host function.\n    pub fn cost(&self) -> Cost {\n        self.cost\n    }\n}\n\nimpl<T> HostFunction<T>\nwhere\n    T: Default,\n{\n    /// Creates a new fixed host function cost with argument weights of zero.\n    pub fn fixed(cost: Cost) -> Self {\n        Self {\n            cost,\n            ..Default::default()\n        }\n    }\n}\n\nimpl<T> HostFunction<T>\nwhere\n    T: AsRef<[Cost]>,\n{\n    /// Returns a slice containing the argument weights.\n    pub fn arguments(&self) -> &[Cost] {\n        self.arguments.as_ref()\n    }\n\n    /// Calculate gas cost for a host function\n    pub fn calculate_gas_cost(&self, weights: T) -> Option<Gas> {\n        let mut gas = Gas::new(self.cost);\n        for (argument, weight) in self.arguments.as_ref().iter().zip(weights.as_ref()) {\n            let lhs = Gas::new(*argument);\n            let rhs = Gas::new(*weight);\n            let product = lhs.checked_mul(rhs)?;\n            gas = gas.checked_add(product)?;\n        }\n        Some(gas)\n    }\n}\n\nimpl<const COUNT: usize> Add for HostFunction<[Cost; COUNT]> {\n    type Output = HostFunction<[Cost; COUNT]>;\n\n    fn add(self, rhs: Self) -> Self::Output {\n        let mut result = HostFunction::new(self.cost + rhs.cost, [0; COUNT]);\n        for i in 0..COUNT {\n            result.arguments[i] = self.arguments[i] + rhs.arguments[i];\n        }\n        result\n    }\n}\n\nimpl<const COUNT: usize> Zero for HostFunction<[Cost; COUNT]> {\n    fn zero() -> Self {\n        HostFunction::new(0, [0; COUNT])\n    }\n\n    fn is_zero(&self) -> bool {\n        !self.arguments.iter().any(|cost| *cost != 0) && self.cost.is_zero()\n    }\n}\n\nimpl<T> Distribution<HostFunction<T>> for Standard\nwhere\n    Standard: Distribution<T>,\n    T: AsRef<[Cost]>,\n{\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> HostFunction<T> {\n        let cost = rng.gen::<Cost>();\n        let arguments = rng.gen();\n        HostFunction::new(cost, arguments)\n    }\n}\n\nimpl<T> ToBytes for HostFunction<T>\nwhere\n    T: AsRef<[Cost]>,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.cost.to_bytes()?);\n        for value in self.arguments.as_ref().iter() {\n            ret.append(&mut value.to_bytes()?);\n        }\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.cost.serialized_length() + (COST_SERIALIZED_LENGTH * self.arguments.as_ref().len())\n    }\n}\n\nimpl<T> FromBytes for HostFunction<T>\nwhere\n    T: Default + AsMut<[Cost]>,\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (cost, mut bytes) = FromBytes::from_bytes(bytes)?;\n        let mut arguments = T::default();\n        let arguments_mut = arguments.as_mut();\n        for ith_argument in arguments_mut {\n            let (cost, rem) = FromBytes::from_bytes(bytes)?;\n            *ith_argument = cost;\n            bytes = rem;\n        }\n        Ok((Self { cost, arguments }, bytes))\n    }\n}\n\n/// Definition of a host function cost table.\n#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct HostFunctionCostsV1 {\n    /// Cost increase for successive calls to `casper_emit_message` within an execution.\n    pub cost_increase_per_message: u32,\n    /// Cost of calling the `read_value` host function.\n    pub read_value: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `dictionary_get` host function.\n    pub dictionary_get: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `write` host function.\n    pub write: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `dictionary_put` host function.\n    pub dictionary_put: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `add` host function.\n    pub add: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `new_uref` host function.\n    pub new_uref: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `load_named_keys` host function.\n    pub load_named_keys: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `ret` host function.\n    pub ret: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `get_key` host function.\n    pub get_key: HostFunction<[Cost; 5]>,\n    /// Cost of calling the `has_key` host function.\n    pub has_key: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `put_key` host function.\n    pub put_key: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `remove_key` host function.\n    pub remove_key: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `revert` host function.\n    pub revert: HostFunction<[Cost; 1]>,\n    /// Cost of calling the `is_valid_uref` host function.\n    pub is_valid_uref: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `add_associated_key` host function.\n    pub add_associated_key: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `remove_associated_key` host function.\n    pub remove_associated_key: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `update_associated_key` host function.\n    pub update_associated_key: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `set_action_threshold` host function.\n    pub set_action_threshold: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `get_caller` host function.\n    pub get_caller: HostFunction<[Cost; 1]>,\n    /// Cost of calling the `get_blocktime` host function.\n    pub get_blocktime: HostFunction<[Cost; 1]>,\n    /// Cost of calling the `create_purse` host function.\n    pub create_purse: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `transfer_to_account` host function.\n    pub transfer_to_account: HostFunction<[Cost; 7]>,\n    /// Cost of calling the `transfer_from_purse_to_account` host function.\n    pub transfer_from_purse_to_account: HostFunction<[Cost; 9]>,\n    /// Cost of calling the `transfer_from_purse_to_purse` host function.\n    pub transfer_from_purse_to_purse: HostFunction<[Cost; 8]>,\n    /// Cost of calling the `get_balance` host function.\n    pub get_balance: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `get_phase` host function.\n    pub get_phase: HostFunction<[Cost; 1]>,\n    /// Cost of calling the `get_system_contract` host function.\n    pub get_system_contract: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `get_main_purse` host function.\n    pub get_main_purse: HostFunction<[Cost; 1]>,\n    /// Cost of calling the `read_host_buffer` host function.\n    pub read_host_buffer: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `create_contract_package_at_hash` host function.\n    pub create_contract_package_at_hash: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `create_contract_user_group` host function.\n    pub create_contract_user_group: HostFunction<[Cost; 8]>,\n    /// Cost of calling the `add_contract_version` host function.\n    pub add_contract_version: HostFunction<[Cost; 10]>,\n    /// Cost of calling the `add_contract_version_with_message_topics` host function.\n    pub add_contract_version_with_message_topics: HostFunction<[Cost; 11]>,\n    /// Cost of calling the `add_package_version` host function.\n    pub add_package_version_with_message_topics: HostFunction<[Cost; 11]>,\n    /// Cost of calling the `disable_contract_version` host function.\n    pub disable_contract_version: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `call_contract` host function.\n    pub call_contract: HostFunction<[Cost; 7]>,\n    /// Cost of calling the `call_versioned_contract` host function.\n    pub call_versioned_contract: HostFunction<[Cost; 9]>,\n    /// Cost of calling the `get_named_arg_size` host function.\n    pub get_named_arg_size: HostFunction<[Cost; 3]>,\n    /// Cost of calling the `get_named_arg` host function.\n    pub get_named_arg: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `remove_contract_user_group` host function.\n    pub remove_contract_user_group: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `provision_contract_user_group_uref` host function.\n    pub provision_contract_user_group_uref: HostFunction<[Cost; 5]>,\n    /// Cost of calling the `remove_contract_user_group_urefs` host function.\n    pub remove_contract_user_group_urefs: HostFunction<[Cost; 6]>,\n    /// Cost of calling the `print` host function.\n    pub print: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `blake2b` host function.\n    pub blake2b: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `next address` host function.\n    pub random_bytes: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `enable_contract_version` host function.\n    pub enable_contract_version: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `casper_manage_message_topic` host function.\n    pub manage_message_topic: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `casper_emit_message` host function.\n    pub emit_message: HostFunction<[Cost; 4]>,\n    /// Cost of calling the `get_block_info` host function.\n    pub get_block_info: HostFunction<[Cost; 2]>,\n    /// Cost of calling the `generic_hash` host function.\n    pub generic_hash: HostFunction<[Cost; 5]>,\n    /// Cost of calling the 'recover_secp256k1' host function.\n    pub recover_secp256k1: HostFunction<[Cost; 6]>,\n    /// Cost of calling the 'recover_secp256k1' host function.\n    pub verify_signature: HostFunction<[Cost; 6]>,\n    /// Cost of calling the 'call_package_version' host function.\n    pub call_package_version: HostFunction<[Cost; 11]>,\n}\n\nimpl Zero for HostFunctionCostsV1 {\n    fn zero() -> Self {\n        Self {\n            read_value: HostFunction::zero(),\n            dictionary_get: HostFunction::zero(),\n            write: HostFunction::zero(),\n            dictionary_put: HostFunction::zero(),\n            add: HostFunction::zero(),\n            new_uref: HostFunction::zero(),\n            load_named_keys: HostFunction::zero(),\n            ret: HostFunction::zero(),\n            get_key: HostFunction::zero(),\n            has_key: HostFunction::zero(),\n            put_key: HostFunction::zero(),\n            remove_key: HostFunction::zero(),\n            revert: HostFunction::zero(),\n            is_valid_uref: HostFunction::zero(),\n            add_associated_key: HostFunction::zero(),\n            remove_associated_key: HostFunction::zero(),\n            update_associated_key: HostFunction::zero(),\n            set_action_threshold: HostFunction::zero(),\n            get_caller: HostFunction::zero(),\n            get_blocktime: HostFunction::zero(),\n            create_purse: HostFunction::zero(),\n            transfer_to_account: HostFunction::zero(),\n            transfer_from_purse_to_account: HostFunction::zero(),\n            transfer_from_purse_to_purse: HostFunction::zero(),\n            get_balance: HostFunction::zero(),\n            get_phase: HostFunction::zero(),\n            get_system_contract: HostFunction::zero(),\n            get_main_purse: HostFunction::zero(),\n            read_host_buffer: HostFunction::zero(),\n            create_contract_package_at_hash: HostFunction::zero(),\n            create_contract_user_group: HostFunction::zero(),\n            add_contract_version_with_message_topics: HostFunction::zero(),\n            add_contract_version: HostFunction::zero(),\n            add_package_version_with_message_topics: HostFunction::zero(),\n            disable_contract_version: HostFunction::zero(),\n            call_contract: HostFunction::zero(),\n            call_versioned_contract: HostFunction::zero(),\n            get_named_arg_size: HostFunction::zero(),\n            get_named_arg: HostFunction::zero(),\n            remove_contract_user_group: HostFunction::zero(),\n            provision_contract_user_group_uref: HostFunction::zero(),\n            remove_contract_user_group_urefs: HostFunction::zero(),\n            print: HostFunction::zero(),\n            blake2b: HostFunction::zero(),\n            random_bytes: HostFunction::zero(),\n            enable_contract_version: HostFunction::zero(),\n            manage_message_topic: HostFunction::zero(),\n            emit_message: HostFunction::zero(),\n            cost_increase_per_message: Zero::zero(),\n            get_block_info: HostFunction::zero(),\n            generic_hash: HostFunction::zero(),\n            recover_secp256k1: HostFunction::zero(),\n            verify_signature: HostFunction::zero(),\n            call_package_version: HostFunction::zero(),\n        }\n    }\n\n    fn is_zero(&self) -> bool {\n        let HostFunctionCostsV1 {\n            cost_increase_per_message,\n            read_value,\n            dictionary_get,\n            write,\n            dictionary_put,\n            add,\n            new_uref,\n            load_named_keys,\n            ret,\n            get_key,\n            has_key,\n            put_key,\n            remove_key,\n            revert,\n            is_valid_uref,\n            add_associated_key,\n            remove_associated_key,\n            update_associated_key,\n            set_action_threshold,\n            get_caller,\n            get_blocktime,\n            create_purse,\n            transfer_to_account,\n            transfer_from_purse_to_account,\n            transfer_from_purse_to_purse,\n            get_balance,\n            get_phase,\n            get_system_contract,\n            get_main_purse,\n            read_host_buffer,\n            create_contract_package_at_hash,\n            create_contract_user_group,\n            add_contract_version_with_message_topics,\n            add_contract_version,\n            add_package_version_with_message_topics: add_package_version,\n            disable_contract_version,\n            call_contract,\n            call_versioned_contract,\n            get_named_arg_size,\n            get_named_arg,\n            remove_contract_user_group,\n            provision_contract_user_group_uref,\n            remove_contract_user_group_urefs,\n            print,\n            blake2b,\n            random_bytes,\n            enable_contract_version,\n            manage_message_topic,\n            emit_message,\n            get_block_info,\n            generic_hash,\n            recover_secp256k1,\n            verify_signature,\n            call_package_version,\n        } = self;\n        read_value.is_zero()\n            && dictionary_get.is_zero()\n            && write.is_zero()\n            && dictionary_put.is_zero()\n            && add.is_zero()\n            && new_uref.is_zero()\n            && load_named_keys.is_zero()\n            && ret.is_zero()\n            && get_key.is_zero()\n            && has_key.is_zero()\n            && put_key.is_zero()\n            && remove_key.is_zero()\n            && revert.is_zero()\n            && is_valid_uref.is_zero()\n            && add_associated_key.is_zero()\n            && remove_associated_key.is_zero()\n            && update_associated_key.is_zero()\n            && set_action_threshold.is_zero()\n            && get_caller.is_zero()\n            && get_blocktime.is_zero()\n            && create_purse.is_zero()\n            && transfer_to_account.is_zero()\n            && transfer_from_purse_to_account.is_zero()\n            && transfer_from_purse_to_purse.is_zero()\n            && get_balance.is_zero()\n            && get_phase.is_zero()\n            && get_system_contract.is_zero()\n            && get_main_purse.is_zero()\n            && read_host_buffer.is_zero()\n            && create_contract_package_at_hash.is_zero()\n            && create_contract_user_group.is_zero()\n            && add_contract_version.is_zero()\n            && disable_contract_version.is_zero()\n            && call_contract.is_zero()\n            && call_versioned_contract.is_zero()\n            && get_named_arg_size.is_zero()\n            && get_named_arg.is_zero()\n            && remove_contract_user_group.is_zero()\n            && provision_contract_user_group_uref.is_zero()\n            && remove_contract_user_group_urefs.is_zero()\n            && print.is_zero()\n            && blake2b.is_zero()\n            && random_bytes.is_zero()\n            && enable_contract_version.is_zero()\n            && manage_message_topic.is_zero()\n            && emit_message.is_zero()\n            && cost_increase_per_message.is_zero()\n            && add_package_version.is_zero()\n            && get_block_info.is_zero()\n            && add_contract_version_with_message_topics.is_zero()\n            && generic_hash.is_zero()\n            && recover_secp256k1.is_zero()\n            && verify_signature.is_zero()\n            && call_package_version.is_zero()\n    }\n}\n\nimpl Default for HostFunctionCostsV1 {\n    fn default() -> Self {\n        Self {\n            read_value: HostFunction::new(\n                DEFAULT_READ_VALUE_COST,\n                [NOT_USED, DEFAULT_ARG_CHARGE, NOT_USED],\n            ),\n            dictionary_get: HostFunction::new(\n                DEFAULT_DICTIONARY_GET_COST,\n                [NOT_USED, DEFAULT_DICTIONARY_GET_KEY_SIZE_WEIGHT, NOT_USED],\n            ),\n            write: HostFunction::new(\n                DEFAULT_WRITE_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_WRITE_VALUE_SIZE_WEIGHT,\n                ],\n            ),\n            dictionary_put: HostFunction::new(\n                DEFAULT_DICTIONARY_PUT_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_DICTIONARY_PUT_KEY_BYTES_SIZE_WEIGHT,\n                    NOT_USED,\n                    DEFAULT_DICTIONARY_PUT_VALUE_SIZE_WEIGHT,\n                ],\n            ),\n            add: HostFunction::fixed(DEFAULT_ADD_COST),\n            new_uref: HostFunction::new(\n                DEFAULT_NEW_UREF_COST,\n                [NOT_USED, NOT_USED, DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT],\n            ),\n            load_named_keys: HostFunction::fixed(DEFAULT_LOAD_NAMED_KEYS_COST),\n            ret: HostFunction::new(DEFAULT_RET_COST, [NOT_USED, DEFAULT_RET_VALUE_SIZE_WEIGHT]),\n            get_key: HostFunction::new(\n                DEFAULT_GET_KEY_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_GET_KEY_NAME_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            has_key: HostFunction::new(\n                DEFAULT_HAS_KEY_COST,\n                [NOT_USED, DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT],\n            ),\n            put_key: HostFunction::new(\n                DEFAULT_PUT_KEY_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                ],\n            ),\n            remove_key: HostFunction::new(\n                DEFAULT_REMOVE_KEY_COST,\n                [NOT_USED, DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT],\n            ),\n            revert: HostFunction::fixed(DEFAULT_REVERT_COST),\n            is_valid_uref: HostFunction::fixed(DEFAULT_IS_VALID_UREF_COST),\n            add_associated_key: HostFunction::fixed(DEFAULT_ADD_ASSOCIATED_KEY_COST),\n            remove_associated_key: HostFunction::fixed(DEFAULT_REMOVE_ASSOCIATED_KEY_COST),\n            update_associated_key: HostFunction::fixed(DEFAULT_UPDATE_ASSOCIATED_KEY_COST),\n            set_action_threshold: HostFunction::fixed(DEFAULT_SET_ACTION_THRESHOLD_COST),\n            get_caller: HostFunction::fixed(DEFAULT_GET_CALLER_COST),\n            get_blocktime: HostFunction::fixed(DEFAULT_GET_BLOCKTIME_COST),\n            create_purse: HostFunction::fixed(DEFAULT_CREATE_PURSE_COST),\n            transfer_to_account: HostFunction::fixed(DEFAULT_TRANSFER_TO_ACCOUNT_COST),\n            transfer_from_purse_to_account: HostFunction::fixed(\n                DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST,\n            ),\n            transfer_from_purse_to_purse: HostFunction::fixed(\n                DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST,\n            ),\n            get_balance: HostFunction::fixed(DEFAULT_GET_BALANCE_COST),\n            get_phase: HostFunction::fixed(DEFAULT_GET_PHASE_COST),\n            get_system_contract: HostFunction::fixed(DEFAULT_GET_SYSTEM_CONTRACT_COST),\n            get_main_purse: HostFunction::fixed(DEFAULT_GET_MAIN_PURSE_COST),\n            read_host_buffer: HostFunction::new(\n                DEFAULT_READ_HOST_BUFFER_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT,\n                    NOT_USED,\n                ],\n            ),\n            create_contract_package_at_hash: HostFunction::default(),\n            create_contract_user_group: HostFunction::default(),\n            add_package_version_with_message_topics: HostFunction::new(\n                DEFAULT_FIXED_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            add_contract_version: HostFunction::new(\n                DEFAULT_FIXED_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            add_contract_version_with_message_topics: HostFunction::new(\n                DEFAULT_FIXED_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            disable_contract_version: HostFunction::default(),\n            call_contract: HostFunction::new(\n                DEFAULT_CALL_CONTRACT_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                    NOT_USED,\n                ],\n            ),\n            call_versioned_contract: HostFunction::new(\n                DEFAULT_CALL_CONTRACT_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                    NOT_USED,\n                ],\n            ),\n            get_named_arg_size: HostFunction::default(),\n            get_named_arg: HostFunction::new(\n                200,\n                [NOT_USED, DEFAULT_ARG_CHARGE, NOT_USED, DEFAULT_ARG_CHARGE],\n            ),\n            remove_contract_user_group: HostFunction::default(),\n            provision_contract_user_group_uref: HostFunction::default(),\n            remove_contract_user_group_urefs: HostFunction::new(\n                200,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                ],\n            ),\n            print: HostFunction::new(\n                DEFAULT_PRINT_COST,\n                [NOT_USED, DEFAULT_PRINT_TEXT_SIZE_WEIGHT],\n            ),\n            blake2b: HostFunction::new(\n                DEFAULT_BLAKE2B_COST,\n                [NOT_USED, DEFAULT_ARG_CHARGE, NOT_USED, NOT_USED],\n            ),\n            random_bytes: HostFunction::default(),\n            enable_contract_version: HostFunction::default(),\n            manage_message_topic: HostFunction::new(\n                DEFAULT_FIXED_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            emit_message: HostFunction::new(\n                DEFAULT_FIXED_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT,\n                    NOT_USED,\n                    DEFAULT_MESSAGE_PAYLOAD_SIZE_WEIGHT,\n                ],\n            ),\n            generic_hash: HostFunction::new(\n                DEFAULT_GENERIC_HASH_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_GENERIC_HASH_INPUT_COST,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            cost_increase_per_message: DEFAULT_COST_INCREASE_PER_MESSAGE_EMITTED,\n            get_block_info: HostFunction::new(DEFAULT_GET_BLOCKTIME_COST, [NOT_USED, NOT_USED]),\n            recover_secp256k1: HostFunction::new(\n                DEFAULT_RECOVER_SECP256K1_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_RECOVER_SECP256K1_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            verify_signature: HostFunction::new(\n                DEFAULT_VERIFY_SIGNATURE_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_VERIFY_SIGNATURE_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            call_package_version: HostFunction::new(\n                DEFAULT_CALL_CONTRACT_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                    NOT_USED,\n                    DEFAULT_ARG_CHARGE,\n                    NOT_USED,\n                ],\n            ),\n        }\n    }\n}\n\nimpl ToBytes for HostFunctionCostsV1 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.read_value.to_bytes()?);\n        ret.append(&mut self.dictionary_get.to_bytes()?);\n        ret.append(&mut self.write.to_bytes()?);\n        ret.append(&mut self.dictionary_put.to_bytes()?);\n        ret.append(&mut self.add.to_bytes()?);\n        ret.append(&mut self.new_uref.to_bytes()?);\n        ret.append(&mut self.load_named_keys.to_bytes()?);\n        ret.append(&mut self.ret.to_bytes()?);\n        ret.append(&mut self.get_key.to_bytes()?);\n        ret.append(&mut self.has_key.to_bytes()?);\n        ret.append(&mut self.put_key.to_bytes()?);\n        ret.append(&mut self.remove_key.to_bytes()?);\n        ret.append(&mut self.revert.to_bytes()?);\n        ret.append(&mut self.is_valid_uref.to_bytes()?);\n        ret.append(&mut self.add_associated_key.to_bytes()?);\n        ret.append(&mut self.remove_associated_key.to_bytes()?);\n        ret.append(&mut self.update_associated_key.to_bytes()?);\n        ret.append(&mut self.set_action_threshold.to_bytes()?);\n        ret.append(&mut self.get_caller.to_bytes()?);\n        ret.append(&mut self.get_blocktime.to_bytes()?);\n        ret.append(&mut self.create_purse.to_bytes()?);\n        ret.append(&mut self.transfer_to_account.to_bytes()?);\n        ret.append(&mut self.transfer_from_purse_to_account.to_bytes()?);\n        ret.append(&mut self.transfer_from_purse_to_purse.to_bytes()?);\n        ret.append(&mut self.get_balance.to_bytes()?);\n        ret.append(&mut self.get_phase.to_bytes()?);\n        ret.append(&mut self.get_system_contract.to_bytes()?);\n        ret.append(&mut self.get_main_purse.to_bytes()?);\n        ret.append(&mut self.read_host_buffer.to_bytes()?);\n        ret.append(&mut self.create_contract_package_at_hash.to_bytes()?);\n        ret.append(&mut self.create_contract_user_group.to_bytes()?);\n        ret.append(&mut self.add_contract_version_with_message_topics.to_bytes()?);\n        ret.append(&mut self.add_contract_version.to_bytes()?);\n        ret.append(&mut self.add_package_version_with_message_topics.to_bytes()?);\n        ret.append(&mut self.disable_contract_version.to_bytes()?);\n        ret.append(&mut self.call_contract.to_bytes()?);\n        ret.append(&mut self.call_versioned_contract.to_bytes()?);\n        ret.append(&mut self.get_named_arg_size.to_bytes()?);\n        ret.append(&mut self.get_named_arg.to_bytes()?);\n        ret.append(&mut self.remove_contract_user_group.to_bytes()?);\n        ret.append(&mut self.provision_contract_user_group_uref.to_bytes()?);\n        ret.append(&mut self.remove_contract_user_group_urefs.to_bytes()?);\n        ret.append(&mut self.print.to_bytes()?);\n        ret.append(&mut self.blake2b.to_bytes()?);\n        ret.append(&mut self.random_bytes.to_bytes()?);\n        ret.append(&mut self.enable_contract_version.to_bytes()?);\n        ret.append(&mut self.manage_message_topic.to_bytes()?);\n        ret.append(&mut self.emit_message.to_bytes()?);\n        ret.append(&mut self.cost_increase_per_message.to_bytes()?);\n        ret.append(&mut self.get_block_info.to_bytes()?);\n        ret.append(&mut self.generic_hash.to_bytes()?);\n        ret.append(&mut self.recover_secp256k1.to_bytes()?);\n        ret.append(&mut self.verify_signature.to_bytes()?);\n        ret.append(&mut self.call_package_version.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.read_value.serialized_length()\n            + self.dictionary_get.serialized_length()\n            + self.write.serialized_length()\n            + self.dictionary_put.serialized_length()\n            + self.add.serialized_length()\n            + self.new_uref.serialized_length()\n            + self.load_named_keys.serialized_length()\n            + self.ret.serialized_length()\n            + self.get_key.serialized_length()\n            + self.has_key.serialized_length()\n            + self.put_key.serialized_length()\n            + self.remove_key.serialized_length()\n            + self.revert.serialized_length()\n            + self.is_valid_uref.serialized_length()\n            + self.add_associated_key.serialized_length()\n            + self.remove_associated_key.serialized_length()\n            + self.update_associated_key.serialized_length()\n            + self.set_action_threshold.serialized_length()\n            + self.get_caller.serialized_length()\n            + self.get_blocktime.serialized_length()\n            + self.create_purse.serialized_length()\n            + self.transfer_to_account.serialized_length()\n            + self.transfer_from_purse_to_account.serialized_length()\n            + self.transfer_from_purse_to_purse.serialized_length()\n            + self.get_balance.serialized_length()\n            + self.get_phase.serialized_length()\n            + self.get_system_contract.serialized_length()\n            + self.get_main_purse.serialized_length()\n            + self.read_host_buffer.serialized_length()\n            + self.create_contract_package_at_hash.serialized_length()\n            + self.create_contract_user_group.serialized_length()\n            + self\n                .add_contract_version_with_message_topics\n                .serialized_length()\n            + self.add_contract_version.serialized_length()\n            + self\n                .add_package_version_with_message_topics\n                .serialized_length()\n            + self.disable_contract_version.serialized_length()\n            + self.call_contract.serialized_length()\n            + self.call_versioned_contract.serialized_length()\n            + self.get_named_arg_size.serialized_length()\n            + self.get_named_arg.serialized_length()\n            + self.remove_contract_user_group.serialized_length()\n            + self.provision_contract_user_group_uref.serialized_length()\n            + self.remove_contract_user_group_urefs.serialized_length()\n            + self.print.serialized_length()\n            + self.blake2b.serialized_length()\n            + self.random_bytes.serialized_length()\n            + self.enable_contract_version.serialized_length()\n            + self.manage_message_topic.serialized_length()\n            + self.emit_message.serialized_length()\n            + self.cost_increase_per_message.serialized_length()\n            + self.get_block_info.serialized_length()\n            + self.generic_hash.serialized_length()\n            + self.recover_secp256k1.serialized_length()\n            + self.verify_signature.serialized_length()\n            + self.call_package_version.serialized_length()\n    }\n}\n\nimpl FromBytes for HostFunctionCostsV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (read_value, rem) = FromBytes::from_bytes(bytes)?;\n        let (dictionary_get, rem) = FromBytes::from_bytes(rem)?;\n        let (write, rem) = FromBytes::from_bytes(rem)?;\n        let (dictionary_put, rem) = FromBytes::from_bytes(rem)?;\n        let (add, rem) = FromBytes::from_bytes(rem)?;\n        let (new_uref, rem) = FromBytes::from_bytes(rem)?;\n        let (load_named_keys, rem) = FromBytes::from_bytes(rem)?;\n        let (ret, rem) = FromBytes::from_bytes(rem)?;\n        let (get_key, rem) = FromBytes::from_bytes(rem)?;\n        let (has_key, rem) = FromBytes::from_bytes(rem)?;\n        let (put_key, rem) = FromBytes::from_bytes(rem)?;\n        let (remove_key, rem) = FromBytes::from_bytes(rem)?;\n        let (revert, rem) = FromBytes::from_bytes(rem)?;\n        let (is_valid_uref, rem) = FromBytes::from_bytes(rem)?;\n        let (add_associated_key, rem) = FromBytes::from_bytes(rem)?;\n        let (remove_associated_key, rem) = FromBytes::from_bytes(rem)?;\n        let (update_associated_key, rem) = FromBytes::from_bytes(rem)?;\n        let (set_action_threshold, rem) = FromBytes::from_bytes(rem)?;\n        let (get_caller, rem) = FromBytes::from_bytes(rem)?;\n        let (get_blocktime, rem) = FromBytes::from_bytes(rem)?;\n        let (create_purse, rem) = FromBytes::from_bytes(rem)?;\n        let (transfer_to_account, rem) = FromBytes::from_bytes(rem)?;\n        let (transfer_from_purse_to_account, rem) = FromBytes::from_bytes(rem)?;\n        let (transfer_from_purse_to_purse, rem) = FromBytes::from_bytes(rem)?;\n        let (get_balance, rem) = FromBytes::from_bytes(rem)?;\n        let (get_phase, rem) = FromBytes::from_bytes(rem)?;\n        let (get_system_contract, rem) = FromBytes::from_bytes(rem)?;\n        let (get_main_purse, rem) = FromBytes::from_bytes(rem)?;\n        let (read_host_buffer, rem) = FromBytes::from_bytes(rem)?;\n        let (create_contract_package_at_hash, rem) = FromBytes::from_bytes(rem)?;\n        let (create_contract_user_group, rem) = FromBytes::from_bytes(rem)?;\n        let (add_contract_version_with_message_topics, rem) = FromBytes::from_bytes(rem)?;\n        let (add_contract_version, rem) = FromBytes::from_bytes(rem)?;\n        let (add_package_version_with_message_topics, rem) = FromBytes::from_bytes(rem)?;\n        let (disable_contract_version, rem) = FromBytes::from_bytes(rem)?;\n        let (call_contract, rem) = FromBytes::from_bytes(rem)?;\n        let (call_versioned_contract, rem) = FromBytes::from_bytes(rem)?;\n        let (get_named_arg_size, rem) = FromBytes::from_bytes(rem)?;\n        let (get_named_arg, rem) = FromBytes::from_bytes(rem)?;\n        let (remove_contract_user_group, rem) = FromBytes::from_bytes(rem)?;\n        let (provision_contract_user_group_uref, rem) = FromBytes::from_bytes(rem)?;\n        let (remove_contract_user_group_urefs, rem) = FromBytes::from_bytes(rem)?;\n        let (print, rem) = FromBytes::from_bytes(rem)?;\n        let (blake2b, rem) = FromBytes::from_bytes(rem)?;\n        let (random_bytes, rem) = FromBytes::from_bytes(rem)?;\n        let (enable_contract_version, rem) = FromBytes::from_bytes(rem)?;\n        let (manage_message_topic, rem) = FromBytes::from_bytes(rem)?;\n        let (emit_message, rem) = FromBytes::from_bytes(rem)?;\n        let (cost_increase_per_message, rem) = FromBytes::from_bytes(rem)?;\n        let (get_block_info, rem) = FromBytes::from_bytes(rem)?;\n        let (generic_hash, rem) = FromBytes::from_bytes(rem)?;\n        let (recover_secp256k1, rem) = FromBytes::from_bytes(rem)?;\n        let (verify_signature, rem) = FromBytes::from_bytes(rem)?;\n        let (call_package_version, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            HostFunctionCostsV1 {\n                read_value,\n                dictionary_get,\n                write,\n                dictionary_put,\n                add,\n                new_uref,\n                load_named_keys,\n                ret,\n                get_key,\n                has_key,\n                put_key,\n                remove_key,\n                revert,\n                is_valid_uref,\n                add_associated_key,\n                remove_associated_key,\n                update_associated_key,\n                set_action_threshold,\n                get_caller,\n                get_blocktime,\n                create_purse,\n                transfer_to_account,\n                transfer_from_purse_to_account,\n                transfer_from_purse_to_purse,\n                get_balance,\n                get_phase,\n                get_system_contract,\n                get_main_purse,\n                read_host_buffer,\n                create_contract_package_at_hash,\n                create_contract_user_group,\n                add_contract_version_with_message_topics,\n                add_contract_version,\n                add_package_version_with_message_topics,\n                disable_contract_version,\n                call_contract,\n                call_versioned_contract,\n                get_named_arg_size,\n                get_named_arg,\n                remove_contract_user_group,\n                provision_contract_user_group_uref,\n                remove_contract_user_group_urefs,\n                print,\n                blake2b,\n                random_bytes,\n                enable_contract_version,\n                manage_message_topic,\n                emit_message,\n                cost_increase_per_message,\n                get_block_info,\n                generic_hash,\n                recover_secp256k1,\n                verify_signature,\n                call_package_version,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<HostFunctionCostsV1> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> HostFunctionCostsV1 {\n        HostFunctionCostsV1 {\n            read_value: rng.gen(),\n            dictionary_get: rng.gen(),\n            write: rng.gen(),\n            dictionary_put: rng.gen(),\n            add: rng.gen(),\n            new_uref: rng.gen(),\n            load_named_keys: rng.gen(),\n            ret: rng.gen(),\n            get_key: rng.gen(),\n            has_key: rng.gen(),\n            put_key: rng.gen(),\n            remove_key: rng.gen(),\n            revert: rng.gen(),\n            is_valid_uref: rng.gen(),\n            add_associated_key: rng.gen(),\n            remove_associated_key: rng.gen(),\n            update_associated_key: rng.gen(),\n            set_action_threshold: rng.gen(),\n            get_caller: rng.gen(),\n            get_blocktime: rng.gen(),\n            create_purse: rng.gen(),\n            transfer_to_account: rng.gen(),\n            transfer_from_purse_to_account: rng.gen(),\n            transfer_from_purse_to_purse: rng.gen(),\n            get_balance: rng.gen(),\n            get_phase: rng.gen(),\n            get_system_contract: rng.gen(),\n            get_main_purse: rng.gen(),\n            read_host_buffer: rng.gen(),\n            create_contract_package_at_hash: rng.gen(),\n            create_contract_user_group: rng.gen(),\n            add_contract_version_with_message_topics: rng.gen(),\n            add_contract_version: rng.gen(),\n            add_package_version_with_message_topics: rng.gen(),\n            disable_contract_version: rng.gen(),\n            call_contract: rng.gen(),\n            call_versioned_contract: rng.gen(),\n            get_named_arg_size: rng.gen(),\n            get_named_arg: rng.gen(),\n            remove_contract_user_group: rng.gen(),\n            provision_contract_user_group_uref: rng.gen(),\n            remove_contract_user_group_urefs: rng.gen(),\n            print: rng.gen(),\n            blake2b: rng.gen(),\n            random_bytes: rng.gen(),\n            enable_contract_version: rng.gen(),\n            manage_message_topic: rng.gen(),\n            emit_message: rng.gen(),\n            cost_increase_per_message: rng.gen(),\n            get_block_info: rng.gen(),\n            generic_hash: rng.gen(),\n            recover_secp256k1: rng.gen(),\n            verify_signature: rng.gen(),\n            call_package_version: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::{num, prelude::*};\n\n    use crate::{HostFunction, HostFunctionCost, HostFunctionCostsV1};\n\n    #[allow(unused)]\n    pub fn host_function_cost_arb<T: Copy + Arbitrary>() -> impl Strategy<Value = HostFunction<T>> {\n        (any::<HostFunctionCost>(), any::<T>())\n            .prop_map(|(cost, arguments)| HostFunction::new(cost, arguments))\n    }\n\n    prop_compose! {\n        pub fn host_function_costs_arb() (\n            read_value in host_function_cost_arb(),\n            dictionary_get in host_function_cost_arb(),\n            write in host_function_cost_arb(),\n            dictionary_put in host_function_cost_arb(),\n            add in host_function_cost_arb(),\n            new_uref in host_function_cost_arb(),\n            load_named_keys in host_function_cost_arb(),\n            ret in host_function_cost_arb(),\n            get_key in host_function_cost_arb(),\n            has_key in host_function_cost_arb(),\n            put_key in host_function_cost_arb(),\n            remove_key in host_function_cost_arb(),\n            revert in host_function_cost_arb(),\n            is_valid_uref in host_function_cost_arb(),\n            add_associated_key in host_function_cost_arb(),\n            remove_associated_key in host_function_cost_arb(),\n            update_associated_key in host_function_cost_arb(),\n            set_action_threshold in host_function_cost_arb(),\n            get_caller in host_function_cost_arb(),\n            get_blocktime in host_function_cost_arb(),\n            create_purse in host_function_cost_arb(),\n            transfer_to_account in host_function_cost_arb(),\n            transfer_from_purse_to_account in host_function_cost_arb(),\n            transfer_from_purse_to_purse in host_function_cost_arb(),\n            get_balance in host_function_cost_arb(),\n            get_phase in host_function_cost_arb(),\n            get_system_contract in host_function_cost_arb(),\n            get_main_purse in host_function_cost_arb(),\n            read_host_buffer in host_function_cost_arb(),\n            create_contract_package_at_hash in host_function_cost_arb(),\n            create_contract_user_group in host_function_cost_arb(),\n            add_contract_version_with_message_topics in host_function_cost_arb(),\n            add_contract_version in host_function_cost_arb(),\n            add_package_version_with_message_topics in host_function_cost_arb(),\n            disable_contract_version in host_function_cost_arb(),\n            call_contract in host_function_cost_arb(),\n            call_versioned_contract in host_function_cost_arb(),\n            get_named_arg_size in host_function_cost_arb(),\n            get_named_arg in host_function_cost_arb(),\n            remove_contract_user_group in host_function_cost_arb(),\n            provision_contract_user_group_uref in host_function_cost_arb(),\n            remove_contract_user_group_urefs in host_function_cost_arb(),\n            print in host_function_cost_arb(),\n            blake2b in host_function_cost_arb(),\n            random_bytes in host_function_cost_arb(),\n            enable_contract_version in host_function_cost_arb(),\n            manage_message_topic in host_function_cost_arb(),\n            emit_message in host_function_cost_arb(),\n            cost_increase_per_message in num::u32::ANY,\n            get_block_info in host_function_cost_arb(),\n            generic_hash in host_function_cost_arb(),\n            recover_secp256k1 in host_function_cost_arb(),\n            verify_signature in host_function_cost_arb(),\n            call_package_version in host_function_cost_arb(),\n        ) -> HostFunctionCostsV1 {\n            HostFunctionCostsV1 {\n                read_value,\n                dictionary_get,\n                write,\n                dictionary_put,\n                add,\n                new_uref,\n                load_named_keys,\n                ret,\n                get_key,\n                has_key,\n                put_key,\n                remove_key,\n                revert,\n                is_valid_uref,\n                add_associated_key,\n                remove_associated_key,\n                update_associated_key,\n                set_action_threshold,\n                get_caller,\n                get_blocktime,\n                create_purse,\n                transfer_to_account,\n                transfer_from_purse_to_account,\n                transfer_from_purse_to_purse,\n                get_balance,\n                get_phase,\n                get_system_contract,\n                get_main_purse,\n                read_host_buffer,\n                create_contract_package_at_hash,\n                create_contract_user_group,\n                add_contract_version_with_message_topics,\n                add_contract_version,\n                add_package_version_with_message_topics,\n                disable_contract_version,\n                call_contract,\n                call_versioned_contract,\n                get_named_arg_size,\n                get_named_arg,\n                remove_contract_user_group,\n                provision_contract_user_group_uref,\n                remove_contract_user_group_urefs,\n                print,\n                blake2b,\n                random_bytes,\n                enable_contract_version,\n                manage_message_topic,\n                emit_message,\n                cost_increase_per_message,\n                get_block_info,\n                generic_hash,\n                recover_secp256k1,\n                verify_signature,\n                call_package_version,\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::U512;\n\n    use super::*;\n\n    const COST: Cost = 42;\n    const ARGUMENT_COSTS: [Cost; 3] = [123, 456, 789];\n    const WEIGHTS: [Cost; 3] = [1000, 1100, 1200];\n\n    #[test]\n    fn calculate_gas_cost_for_host_function() {\n        let host_function = HostFunction::new(COST, ARGUMENT_COSTS);\n        let expected_cost = COST\n            + (ARGUMENT_COSTS[0] * WEIGHTS[0])\n            + (ARGUMENT_COSTS[1] * WEIGHTS[1])\n            + (ARGUMENT_COSTS[2] * WEIGHTS[2]);\n        assert_eq!(\n            host_function.calculate_gas_cost(WEIGHTS),\n            Some(Gas::new(expected_cost))\n        );\n    }\n\n    #[test]\n    fn calculate_gas_cost_would_overflow() {\n        let large_value = Cost::MAX;\n\n        let host_function = HostFunction::new(\n            large_value,\n            [large_value, large_value, large_value, large_value],\n        );\n\n        let lhs =\n            host_function.calculate_gas_cost([large_value, large_value, large_value, large_value]);\n\n        let large_value = U512::from(large_value);\n        let rhs = large_value + (U512::from(4) * large_value * large_value);\n\n        assert_eq!(lhs, Some(Gas::new(rhs)));\n    }\n}\n\n#[cfg(test)]\nmod proptests {\n    use proptest::prelude::*;\n\n    use crate::bytesrepr;\n\n    use super::*;\n\n    type Signature = [Cost; 10];\n\n    proptest! {\n        #[test]\n        fn test_host_function(host_function in gens::host_function_cost_arb::<Signature>()) {\n            bytesrepr::test_serialization_roundtrip(&host_function);\n        }\n\n        #[test]\n        fn test_host_function_costs(host_function_costs in gens::host_function_costs_arb()) {\n            bytesrepr::test_serialization_roundtrip(&host_function_costs);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/host_function_costs_v2.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{distributions::Standard, prelude::Distribution, Rng};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U64_SERIALIZED_LENGTH},\n    Gas,\n};\n\n/// Representation of argument's cost.\npub type Cost = u64;\n\n/// Representation of a host function cost.\n///\n/// The total gas cost is equal to `cost` + sum of each argument weight multiplied by the byte size\n/// of the data.\n///\n/// NOTE: This is duplicating the `HostFunction` struct from the `casper-types` crate\n/// but to avoid changing the public API of that crate, we are creating a new struct\n/// with the same name and fields.\n///\n/// There is some opportunity to unify the code to turn `HostFunction` into a generic struct\n/// that generalizes over the cost type, but that would require a lot of work and\n/// is not worth it at this time.\n#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct HostFunctionV2<T> {\n    /// How much the user is charged for calling the host function.\n    cost: Cost,\n    /// Weights of the function arguments.\n    arguments: T,\n}\n\nimpl<T> Default for HostFunctionV2<T>\nwhere\n    T: Default,\n{\n    fn default() -> Self {\n        Self {\n            cost: DEFAULT_FIXED_COST,\n            arguments: T::default(),\n        }\n    }\n}\n\nimpl<T> HostFunctionV2<T> {\n    /// Creates a new instance of `HostFunction` with a fixed call cost and argument weights.\n    pub const fn new(cost: Cost, arguments: T) -> Self {\n        Self { cost, arguments }\n    }\n\n    pub fn with_new_static_cost(self, cost: Cost) -> Self {\n        Self {\n            cost,\n            arguments: self.arguments,\n        }\n    }\n\n    /// Returns the base gas fee for calling the host function.\n    pub fn cost(&self) -> Cost {\n        self.cost\n    }\n}\n\nimpl<T> HostFunctionV2<T>\nwhere\n    T: Default,\n{\n    /// Creates a new fixed host function cost with argument weights of zero.\n    pub fn fixed(cost: Cost) -> Self {\n        Self {\n            cost,\n            ..Default::default()\n        }\n    }\n\n    pub fn zero() -> Self {\n        Self {\n            cost: Default::default(),\n            arguments: Default::default(),\n        }\n    }\n}\n\nimpl<T> HostFunctionV2<T>\nwhere\n    T: AsRef<[Cost]>,\n{\n    /// Returns a slice containing the argument weights.\n    pub fn arguments(&self) -> &[Cost] {\n        self.arguments.as_ref()\n    }\n\n    /// Calculate gas cost for a host function\n    pub fn calculate_gas_cost(&self, weights: T) -> Option<Gas> {\n        let mut gas = Gas::new(self.cost);\n        for (argument, weight) in self.arguments.as_ref().iter().zip(weights.as_ref()) {\n            let lhs = Gas::new(*argument);\n            let rhs = Gas::new(*weight);\n            let product = lhs.checked_mul(rhs)?;\n            gas = gas.checked_add(product)?;\n        }\n        Some(gas)\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl<T> Distribution<HostFunctionV2<T>> for Standard\nwhere\n    Standard: Distribution<T>,\n    T: AsMut<[Cost]> + Default,\n{\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> HostFunctionV2<T> {\n        let cost = rng.gen::<u32>() as u64;\n        let mut arguments = T::default();\n        for arg in arguments.as_mut() {\n            *arg = rng.gen::<u32>() as u64;\n        }\n\n        HostFunctionV2::new(cost, arguments)\n    }\n}\n\nimpl<T> ToBytes for HostFunctionV2<T>\nwhere\n    T: AsRef<[Cost]>,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.cost.to_bytes()?);\n        for value in self.arguments.as_ref().iter() {\n            ret.append(&mut value.to_bytes()?);\n        }\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.cost.serialized_length() + (U64_SERIALIZED_LENGTH * self.arguments.as_ref().len())\n    }\n}\n\nimpl<T> FromBytes for HostFunctionV2<T>\nwhere\n    T: Default + AsMut<[Cost]>,\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (cost, mut bytes) = FromBytes::from_bytes(bytes)?;\n        let mut arguments = T::default();\n        let arguments_mut = arguments.as_mut();\n        for ith_argument in arguments_mut {\n            let (cost, rem) = FromBytes::from_bytes(bytes)?;\n            *ith_argument = cost;\n            bytes = rem;\n        }\n        Ok((Self { cost, arguments }, bytes))\n    }\n}\n/// An identifier that represents an unused argument.\nconst NOT_USED: Cost = 0;\n\n/// An arbitrary default fixed cost for host functions that were not researched yet.\nconst DEFAULT_FIXED_COST: Cost = 200;\n\nconst DEFAULT_CALL_COST: u64 = 10_000;\nconst DEFAULT_ENV_BALANCE_COST: u64 = 100;\n\nconst DEFAULT_PRINT_COST: Cost = 100;\n\nconst DEFAULT_READ_COST: Cost = 1_000;\nconst DEFAULT_READ_KEY_SIZE_WEIGHT: Cost = 100;\n\nconst DEFAULT_RET_COST: Cost = 300;\nconst DEFAULT_RET_VALUE_SIZE_WEIGHT: Cost = 100;\n\nconst DEFAULT_TRANSFER_COST: Cost = 2_500_000_000;\n\nconst DEFAULT_WRITE_COST: Cost = 25_000;\nconst DEFAULT_WRITE_SIZE_WEIGHT: Cost = 100_000;\n\nconst DEFAULT_REMOVE_COST: Cost = 15_000;\n\nconst DEFAULT_COPY_INPUT_COST: Cost = 300;\nconst DEFAULT_COPY_INPUT_VALUE_SIZE_WEIGHT: Cost = 0;\n\nconst DEFAULT_CREATE_COST: Cost = 0;\nconst DEFAULT_CREATE_CODE_SIZE_WEIGHT: Cost = 0;\nconst DEFAULT_CREATE_ENTRYPOINT_SIZE_WEIGHT: Cost = 0;\nconst DEFAULT_CREATE_INPUT_SIZE_WEIGHT: Cost = 0;\nconst DEFAULT_CREATE_SEED_SIZE_WEIGHT: Cost = 0;\n\nconst DEFAULT_EMIT_COST: Cost = 200;\nconst DEFAULT_EMIT_TOPIC_SIZE_WEIGHT: Cost = 100;\nconst DEFAULT_EMIT_PAYLOAD_SIZE_HEIGHT: Cost = 100;\n\nconst DEFAULT_ENV_INFO_COST: Cost = 10_000;\n\n/// Definition of a host function cost table.\n#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct HostFunctionCostsV2 {\n    /// Cost of calling the `read` host function.\n    pub read: HostFunctionV2<[Cost; 6]>,\n    /// Cost of calling the `write` host function.\n    pub write: HostFunctionV2<[Cost; 5]>,\n    /// Cost of calling the `remove` host function.\n    pub remove: HostFunctionV2<[Cost; 3]>,\n    /// Cost of calling the `copy_input` host function.\n    pub copy_input: HostFunctionV2<[Cost; 2]>,\n    /// Cost of calling the `ret` host function.\n    pub ret: HostFunctionV2<[Cost; 2]>,\n    /// Cost of calling the `create` host function.\n    pub create: HostFunctionV2<[Cost; 10]>,\n    /// Cost of calling the `transfer` host function.\n    pub transfer: HostFunctionV2<[Cost; 3]>,\n    /// Cost of calling the `env_balance` host function.\n    pub env_balance: HostFunctionV2<[Cost; 4]>,\n    /// Cost of calling the `upgrade` host function.\n    pub upgrade: HostFunctionV2<[Cost; 6]>,\n    /// Cost of calling the `call` host function.\n    pub call: HostFunctionV2<[Cost; 9]>,\n    /// Cost of calling the `print` host function.\n    pub print: HostFunctionV2<[Cost; 2]>,\n    /// Cost of calling the `emit` host function.\n    pub emit: HostFunctionV2<[Cost; 4]>,\n    /// Cost of calling the `env_info` host function.\n    pub env_info: HostFunctionV2<[Cost; 2]>,\n}\n\nimpl HostFunctionCostsV2 {\n    pub fn zero() -> Self {\n        Self {\n            read: HostFunctionV2::zero(),\n            write: HostFunctionV2::zero(),\n            remove: HostFunctionV2::zero(),\n            copy_input: HostFunctionV2::zero(),\n            ret: HostFunctionV2::zero(),\n            create: HostFunctionV2::zero(),\n            transfer: HostFunctionV2::zero(),\n            env_balance: HostFunctionV2::zero(),\n            upgrade: HostFunctionV2::zero(),\n            call: HostFunctionV2::zero(),\n            print: HostFunctionV2::zero(),\n            emit: HostFunctionV2::zero(),\n            env_info: HostFunctionV2::zero(),\n        }\n    }\n}\n\nimpl Default for HostFunctionCostsV2 {\n    fn default() -> Self {\n        Self {\n            read: HostFunctionV2::new(\n                DEFAULT_READ_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_READ_KEY_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            write: HostFunctionV2::new(\n                DEFAULT_WRITE_COST,\n                [\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_WRITE_SIZE_WEIGHT,\n                ],\n            ),\n            remove: HostFunctionV2::new(DEFAULT_REMOVE_COST, [NOT_USED, NOT_USED, NOT_USED]),\n            copy_input: HostFunctionV2::new(\n                DEFAULT_COPY_INPUT_COST,\n                [NOT_USED, DEFAULT_COPY_INPUT_VALUE_SIZE_WEIGHT],\n            ),\n            ret: HostFunctionV2::new(DEFAULT_RET_COST, [NOT_USED, DEFAULT_RET_VALUE_SIZE_WEIGHT]),\n            create: HostFunctionV2::new(\n                DEFAULT_CREATE_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_CREATE_CODE_SIZE_WEIGHT,\n                    NOT_USED,\n                    NOT_USED,\n                    DEFAULT_CREATE_ENTRYPOINT_SIZE_WEIGHT,\n                    NOT_USED,\n                    DEFAULT_CREATE_INPUT_SIZE_WEIGHT,\n                    NOT_USED,\n                    DEFAULT_CREATE_SEED_SIZE_WEIGHT,\n                    NOT_USED,\n                ],\n            ),\n            env_balance: HostFunctionV2::fixed(DEFAULT_ENV_BALANCE_COST),\n            transfer: HostFunctionV2::new(DEFAULT_TRANSFER_COST, [NOT_USED, NOT_USED, NOT_USED]),\n            upgrade: HostFunctionV2::new(\n                DEFAULT_FIXED_COST,\n                [NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED],\n            ),\n            call: HostFunctionV2::new(\n                DEFAULT_CALL_COST,\n                [\n                    NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED,\n                    NOT_USED,\n                ],\n            ),\n            print: HostFunctionV2::new(DEFAULT_PRINT_COST, [NOT_USED, NOT_USED]),\n            emit: HostFunctionV2::new(\n                DEFAULT_EMIT_COST,\n                [\n                    NOT_USED,\n                    DEFAULT_EMIT_TOPIC_SIZE_WEIGHT,\n                    NOT_USED,\n                    DEFAULT_EMIT_PAYLOAD_SIZE_HEIGHT,\n                ],\n            ),\n            env_info: HostFunctionV2::new(DEFAULT_ENV_INFO_COST, [NOT_USED, NOT_USED]),\n        }\n    }\n}\n\nimpl ToBytes for HostFunctionCostsV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.read.to_bytes()?);\n        ret.append(&mut self.write.to_bytes()?);\n        ret.append(&mut self.remove.to_bytes()?);\n        ret.append(&mut self.copy_input.to_bytes()?);\n        ret.append(&mut self.ret.to_bytes()?);\n        ret.append(&mut self.create.to_bytes()?);\n        ret.append(&mut self.transfer.to_bytes()?);\n        ret.append(&mut self.env_balance.to_bytes()?);\n        ret.append(&mut self.upgrade.to_bytes()?);\n        ret.append(&mut self.call.to_bytes()?);\n        ret.append(&mut self.print.to_bytes()?);\n        ret.append(&mut self.emit.to_bytes()?);\n        ret.append(&mut self.env_info.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.read.serialized_length()\n            + self.write.serialized_length()\n            + self.remove.serialized_length()\n            + self.copy_input.serialized_length()\n            + self.ret.serialized_length()\n            + self.create.serialized_length()\n            + self.transfer.serialized_length()\n            + self.env_balance.serialized_length()\n            + self.upgrade.serialized_length()\n            + self.call.serialized_length()\n            + self.print.serialized_length()\n            + self.emit.serialized_length()\n            + self.env_info.serialized_length()\n    }\n}\n\nimpl FromBytes for HostFunctionCostsV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (read, rem) = FromBytes::from_bytes(bytes)?;\n        let (write, rem) = FromBytes::from_bytes(rem)?;\n        let (remove, rem) = FromBytes::from_bytes(rem)?;\n        let (copy_input, rem) = FromBytes::from_bytes(rem)?;\n        let (ret, rem) = FromBytes::from_bytes(rem)?;\n        let (create, rem) = FromBytes::from_bytes(rem)?;\n        let (transfer, rem) = FromBytes::from_bytes(rem)?;\n        let (env_balance, rem) = FromBytes::from_bytes(rem)?;\n        let (upgrade, rem) = FromBytes::from_bytes(rem)?;\n        let (call, rem) = FromBytes::from_bytes(rem)?;\n        let (print, rem) = FromBytes::from_bytes(rem)?;\n        let (emit, rem) = FromBytes::from_bytes(rem)?;\n        let (env_info, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            HostFunctionCostsV2 {\n                read,\n                write,\n                remove,\n                copy_input,\n                ret,\n                create,\n                transfer,\n                env_balance,\n                upgrade,\n                call,\n                print,\n                emit,\n                env_info,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<HostFunctionCostsV2> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> HostFunctionCostsV2 {\n        HostFunctionCostsV2 {\n            read: rng.gen(),\n            write: rng.gen(),\n            remove: rng.gen(),\n            copy_input: rng.gen(),\n            ret: rng.gen(),\n            create: rng.gen(),\n            transfer: rng.gen(),\n            env_balance: rng.gen(),\n            upgrade: rng.gen(),\n            call: rng.gen(),\n            print: rng.gen(),\n            emit: rng.gen(),\n            env_info: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use super::*;\n\n    #[allow(unused)]\n    pub fn host_function_cost_v2_arb<const N: usize>(\n    ) -> impl Strategy<Value = HostFunctionV2<[Cost; N]>> {\n        (any::<u64>(), any::<[u64; N]>())\n            .prop_map(|(cost, arguments)| HostFunctionV2::new(cost, arguments))\n    }\n\n    prop_compose! {\n        pub fn host_function_costs_v2_arb() (\n            read in host_function_cost_v2_arb(),\n            write in host_function_cost_v2_arb(),\n            remove in host_function_cost_v2_arb(),\n            copy_input in host_function_cost_v2_arb(),\n            ret in host_function_cost_v2_arb(),\n            create in host_function_cost_v2_arb(),\n            transfer in host_function_cost_v2_arb(),\n            env_balance in host_function_cost_v2_arb(),\n            upgrade in host_function_cost_v2_arb(),\n            call in host_function_cost_v2_arb(),\n            print in host_function_cost_v2_arb(),\n            emit in host_function_cost_v2_arb(),\n            env_info in host_function_cost_v2_arb(),\n        ) -> HostFunctionCostsV2 {\n            HostFunctionCostsV2 {\n                read,\n                write,\n                remove,\n                copy_input,\n                ret,\n                create,\n                transfer,\n                env_balance,\n                upgrade,\n                call,\n                print,\n                emit,\n                env_info\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{Gas, U512};\n\n    use super::*;\n\n    const COST: Cost = 42;\n    const ARGUMENT_COSTS: [Cost; 3] = [123, 456, 789];\n    const WEIGHTS: [u64; 3] = [1000, 1000, 1000];\n\n    #[test]\n    fn calculate_gas_cost_for_host_function() {\n        let host_function = HostFunctionV2::new(COST, ARGUMENT_COSTS);\n        let expected_cost = COST\n            + (ARGUMENT_COSTS[0] * Cost::from(WEIGHTS[0]))\n            + (ARGUMENT_COSTS[1] * Cost::from(WEIGHTS[1]))\n            + (ARGUMENT_COSTS[2] * Cost::from(WEIGHTS[2]));\n        assert_eq!(\n            host_function.calculate_gas_cost(WEIGHTS),\n            Some(Gas::new(expected_cost))\n        );\n    }\n\n    #[test]\n    fn calculate_gas_cost_would_overflow() {\n        let large_value = Cost::MAX;\n\n        let host_function = HostFunctionV2::new(\n            large_value,\n            [large_value, large_value, large_value, large_value],\n        );\n\n        let lhs =\n            host_function.calculate_gas_cost([large_value, large_value, large_value, large_value]);\n\n        let large_value = U512::from(large_value);\n        let rhs = large_value + (U512::from(4) * large_value * large_value);\n\n        assert_eq!(lhs, Some(Gas::new(rhs)));\n    }\n    #[test]\n    fn calculate_large_gas_cost() {\n        let hf = HostFunctionV2::new(1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);\n        assert_eq!(\n            hf.calculate_gas_cost([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),\n            Some(Gas::new(\n                1 + (1 + 2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 6 * 6 + 7 * 7 + 8 * 8 + 9 * 9 + 10 * 10)\n            ))\n        );\n    }\n}\n\n#[cfg(test)]\nmod proptests {\n    use proptest::prelude::*;\n\n    use crate::bytesrepr;\n\n    use super::*;\n\n    proptest! {\n        #[test]\n        fn test_host_function(host_function in gens::host_function_cost_v2_arb::<10>()) {\n            bytesrepr::test_serialization_roundtrip(&host_function);\n        }\n\n        #[test]\n        fn test_host_function_costs(host_function_costs in gens::host_function_costs_v2_arb()) {\n            bytesrepr::test_serialization_roundtrip(&host_function_costs);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/message_limits.rs",
    "content": "#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// Configuration for messages limits.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct MessageLimits {\n    /// Maximum size (in bytes) of a topic name string.\n    pub max_topic_name_size: u32,\n    /// Maximum message size in bytes.\n    pub max_message_size: u32,\n    /// Maximum number of topics that a contract can register.\n    pub max_topics_per_contract: u32,\n}\n\nimpl MessageLimits {\n    /// Returns the max number of topics a contract can register.\n    pub fn max_topics_per_contract(&self) -> u32 {\n        self.max_topics_per_contract\n    }\n\n    /// Returns the maximum allowed size for the topic name string.\n    pub fn max_topic_name_size(&self) -> u32 {\n        self.max_topic_name_size\n    }\n\n    /// Returns the maximum allowed size (in bytes) of the serialized message payload.\n    pub fn max_message_size(&self) -> u32 {\n        self.max_message_size\n    }\n}\n\nimpl Default for MessageLimits {\n    fn default() -> Self {\n        Self {\n            max_topic_name_size: 256,\n            max_message_size: 1024,\n            max_topics_per_contract: 128,\n        }\n    }\n}\n\nimpl ToBytes for MessageLimits {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        ret.append(&mut self.max_topic_name_size.to_bytes()?);\n        ret.append(&mut self.max_message_size.to_bytes()?);\n        ret.append(&mut self.max_topics_per_contract.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.max_topic_name_size.serialized_length()\n            + self.max_message_size.serialized_length()\n            + self.max_topics_per_contract.serialized_length()\n    }\n}\n\nimpl FromBytes for MessageLimits {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (max_topic_name_size, rem) = FromBytes::from_bytes(bytes)?;\n        let (max_message_size, rem) = FromBytes::from_bytes(rem)?;\n        let (max_topics_per_contract, rem) = FromBytes::from_bytes(rem)?;\n\n        Ok((\n            MessageLimits {\n                max_topic_name_size,\n                max_message_size,\n                max_topics_per_contract,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<MessageLimits> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MessageLimits {\n        MessageLimits {\n            max_topic_name_size: rng.gen(),\n            max_message_size: rng.gen(),\n            max_topics_per_contract: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::{num, prop_compose};\n\n    use super::MessageLimits;\n\n    prop_compose! {\n        pub fn message_limits_arb()(\n            max_topic_name_size in num::u32::ANY,\n            max_message_size in num::u32::ANY,\n            max_topics_per_contract in num::u32::ANY,\n        ) -> MessageLimits {\n            MessageLimits {\n                max_topic_name_size,\n                max_message_size,\n                max_topics_per_contract,\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::proptest;\n\n    use crate::bytesrepr;\n\n    use super::gens;\n\n    proptest! {\n        #[test]\n        fn should_serialize_and_deserialize_with_arbitrary_values(\n            message_limits in gens::message_limits_arb()\n        ) {\n            bytesrepr::test_serialization_roundtrip(&message_limits);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/mint_costs.rs",
    "content": "//! Costs of the mint system contract.\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// Default cost of the `mint` mint entry point.\npub const DEFAULT_MINT_COST: u32 = 2_500_000_000;\n/// Default cost of the `reduce_total_supply` mint entry point.\npub const DEFAULT_REDUCE_TOTAL_SUPPLY_COST: u32 = 2_500_000_000;\n/// Default cost of the `burn` mint entry point.\npub const DEFAULT_BURN_COST: u32 = 100_000_000;\n/// Default cost of the `create` mint entry point.\npub const DEFAULT_CREATE_COST: u32 = 2_500_000_000;\n/// Default cost of the `balance` mint entry point.\npub const DEFAULT_BALANCE_COST: u32 = 100_000_000;\n/// Default cost of the `transfer` mint entry point.\npub const DEFAULT_TRANSFER_COST: u32 = 100_000_000;\n/// Default cost of the `read_base_round_reward` mint entry point.\npub const DEFAULT_READ_BASE_ROUND_REWARD_COST: u32 = 2_500_000_000;\n/// Default cost of the `mint_into_existing_purse` mint entry point.\npub const DEFAULT_MINT_INTO_EXISTING_PURSE_COST: u32 = 2_500_000_000;\n\n/// Description of the costs of calling mint entry points.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct MintCosts {\n    /// Cost of calling the `mint` entry point.\n    pub mint: u32,\n    /// Cost of calling the `reduce_total_supply` entry point.\n    pub reduce_total_supply: u32,\n    /// Cost of calling the `burn` entry point.\n    pub burn: u32,\n    /// Cost of calling the `create` entry point.\n    pub create: u32,\n    /// Cost of calling the `balance` entry point.\n    pub balance: u32,\n    /// Cost of calling the `transfer` entry point.\n    pub transfer: u32,\n    /// Cost of calling the `read_base_round_reward` entry point.\n    pub read_base_round_reward: u32,\n    /// Cost of calling the `mint_into_existing_purse` entry point.\n    pub mint_into_existing_purse: u32,\n}\n\nimpl Default for MintCosts {\n    fn default() -> Self {\n        Self {\n            mint: DEFAULT_MINT_COST,\n            reduce_total_supply: DEFAULT_REDUCE_TOTAL_SUPPLY_COST,\n            burn: DEFAULT_BURN_COST,\n            create: DEFAULT_CREATE_COST,\n            balance: DEFAULT_BALANCE_COST,\n            transfer: DEFAULT_TRANSFER_COST,\n            read_base_round_reward: DEFAULT_READ_BASE_ROUND_REWARD_COST,\n            mint_into_existing_purse: DEFAULT_MINT_INTO_EXISTING_PURSE_COST,\n        }\n    }\n}\n\nimpl ToBytes for MintCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        let Self {\n            mint,\n            reduce_total_supply,\n            burn,\n            create,\n            balance,\n            transfer,\n            read_base_round_reward,\n            mint_into_existing_purse,\n        } = self;\n\n        ret.append(&mut mint.to_bytes()?);\n        ret.append(&mut reduce_total_supply.to_bytes()?);\n        ret.append(&mut create.to_bytes()?);\n        ret.append(&mut balance.to_bytes()?);\n        ret.append(&mut transfer.to_bytes()?);\n        ret.append(&mut read_base_round_reward.to_bytes()?);\n        ret.append(&mut mint_into_existing_purse.to_bytes()?);\n        ret.append(&mut burn.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let Self {\n            mint,\n            reduce_total_supply,\n            burn,\n            create,\n            balance,\n            transfer,\n            read_base_round_reward,\n            mint_into_existing_purse,\n        } = self;\n\n        mint.serialized_length()\n            + reduce_total_supply.serialized_length()\n            + burn.serialized_length()\n            + create.serialized_length()\n            + balance.serialized_length()\n            + transfer.serialized_length()\n            + read_base_round_reward.serialized_length()\n            + mint_into_existing_purse.serialized_length()\n    }\n}\n\nimpl FromBytes for MintCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (mint, rem) = FromBytes::from_bytes(bytes)?;\n        let (reduce_total_supply, rem) = FromBytes::from_bytes(rem)?;\n        let (create, rem) = FromBytes::from_bytes(rem)?;\n        let (balance, rem) = FromBytes::from_bytes(rem)?;\n        let (transfer, rem) = FromBytes::from_bytes(rem)?;\n        let (read_base_round_reward, rem) = FromBytes::from_bytes(rem)?;\n        let (mint_into_existing_purse, rem) = FromBytes::from_bytes(rem)?;\n        let (burn, rem) = FromBytes::from_bytes(rem)?;\n\n        Ok((\n            Self {\n                mint,\n                reduce_total_supply,\n                burn,\n                create,\n                balance,\n                transfer,\n                read_base_round_reward,\n                mint_into_existing_purse,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<MintCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MintCosts {\n        MintCosts {\n            mint: rng.gen(),\n            burn: rng.gen(),\n            reduce_total_supply: rng.gen(),\n            create: rng.gen(),\n            balance: rng.gen(),\n            transfer: rng.gen(),\n            read_base_round_reward: rng.gen(),\n            mint_into_existing_purse: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::{num, prop_compose};\n\n    use super::MintCosts;\n\n    prop_compose! {\n        pub fn mint_costs_arb()(\n            mint in num::u32::ANY,\n            reduce_total_supply in num::u32::ANY,\n            burn in num::u32::ANY,\n            create in num::u32::ANY,\n            balance in num::u32::ANY,\n            transfer in num::u32::ANY,\n            read_base_round_reward in num::u32::ANY,\n            mint_into_existing_purse in num::u32::ANY,\n        ) -> MintCosts {\n            MintCosts {\n                mint,\n                reduce_total_supply,\n                burn,\n                create,\n                balance,\n                transfer,\n                read_base_round_reward,\n                mint_into_existing_purse,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/opcode_costs.rs",
    "content": "//! Support for Wasm opcode costs.\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse derive_more::Add;\nuse num_traits::Zero;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// Default cost of the `bit` Wasm opcode.\npub const DEFAULT_BIT_COST: u32 = 105;\n/// Default cost of the `add` Wasm opcode.\npub const DEFAULT_ADD_COST: u32 = 105;\n/// Default cost of the `mul` Wasm opcode.\npub const DEFAULT_MUL_COST: u32 = 105;\n/// Default cost of the `div` Wasm opcode.\npub const DEFAULT_DIV_COST: u32 = 105;\n/// Default cost of the `load` Wasm opcode.\npub const DEFAULT_LOAD_COST: u32 = 105;\n/// Default cost of the `store` Wasm opcode.\npub const DEFAULT_STORE_COST: u32 = 105;\n/// Default cost of the `const` Wasm opcode.\npub const DEFAULT_CONST_COST: u32 = 105;\n/// Default cost of the `local` Wasm opcode.\npub const DEFAULT_LOCAL_COST: u32 = 105;\n/// Default cost of the `global` Wasm opcode.\npub const DEFAULT_GLOBAL_COST: u32 = 105;\n/// Default cost of the `integer_comparison` Wasm opcode.\npub const DEFAULT_INTEGER_COMPARISON_COST: u32 = 105;\n/// Default cost of the `conversion` Wasm opcode.\npub const DEFAULT_CONVERSION_COST: u32 = 105;\n/// Default cost of the `unreachable` Wasm opcode.\npub const DEFAULT_UNREACHABLE_COST: u32 = 105;\n/// Default cost of the `nop` Wasm opcode.\npub const DEFAULT_NOP_COST: u32 = 105;\n/// Default cost of the `current_memory` Wasm opcode.\npub const DEFAULT_CURRENT_MEMORY_COST: u32 = 105;\n/// Default cost of the `grow_memory` Wasm opcode.\npub const DEFAULT_GROW_MEMORY_COST: u32 = 900;\n/// Default cost of the `block` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_BLOCK_OPCODE: u32 = 255;\n/// Default cost of the `loop` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_LOOP_OPCODE: u32 = 255;\n/// Default cost of the `if` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_IF_OPCODE: u32 = 105;\n/// Default cost of the `else` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_ELSE_OPCODE: u32 = 105;\n/// Default cost of the `end` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_END_OPCODE: u32 = 105;\n/// Default cost of the `br` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_BR_OPCODE: u32 = 1665;\n/// Default cost of the `br_if` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_BR_IF_OPCODE: u32 = 510;\n/// Default cost of the `return` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_RETURN_OPCODE: u32 = 105;\n/// Default cost of the `select` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_SELECT_OPCODE: u32 = 105;\n/// Default cost of the `call` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_CALL_OPCODE: u32 = 225;\n/// Default cost of the `call_indirect` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE: u32 = 270;\n/// Default cost of the `drop` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_DROP_OPCODE: u32 = 105;\n/// Default fixed cost of the `br_table` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE: u32 = 150;\n/// Default multiplier for the size of targets in `br_table` Wasm opcode.\npub const DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER: u32 = 100;\n/// Default cost of the sign extension opcodes\npub const DEFAULT_SIGN_COST: u32 = 105;\n\n/// Definition of a cost table for a Wasm `br_table` opcode.\n///\n/// Charge of a `br_table` opcode is calculated as follows:\n///\n/// ```text\n/// cost + (len(br_table.targets) * size_multiplier)\n/// ```\n// This is done to encourage users to avoid writing code with very long `br_table`s.\n#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct BrTableCost {\n    /// Fixed cost charge for `br_table` opcode.\n    pub cost: u32,\n    /// Multiplier for size of target labels in the `br_table` opcode.\n    pub size_multiplier: u32,\n}\n\nimpl Default for BrTableCost {\n    fn default() -> Self {\n        Self {\n            cost: DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE,\n            size_multiplier: DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<BrTableCost> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BrTableCost {\n        BrTableCost {\n            cost: rng.gen(),\n            size_multiplier: rng.gen(),\n        }\n    }\n}\n\nimpl ToBytes for BrTableCost {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let Self {\n            cost,\n            size_multiplier,\n        } = self;\n\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        ret.append(&mut cost.to_bytes()?);\n        ret.append(&mut size_multiplier.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let Self {\n            cost,\n            size_multiplier,\n        } = self;\n\n        cost.serialized_length() + size_multiplier.serialized_length()\n    }\n}\n\nimpl FromBytes for BrTableCost {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (cost, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (size_multiplier, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Self {\n                cost,\n                size_multiplier,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Zero for BrTableCost {\n    fn zero() -> Self {\n        BrTableCost {\n            cost: 0,\n            size_multiplier: 0,\n        }\n    }\n\n    fn is_zero(&self) -> bool {\n        let BrTableCost {\n            cost,\n            size_multiplier,\n        } = self;\n        cost.is_zero() && size_multiplier.is_zero()\n    }\n}\n\n/// Definition of a cost table for a Wasm control flow opcodes.\n#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct ControlFlowCosts {\n    /// Cost for `block` opcode.\n    pub block: u32,\n    /// Cost for `loop` opcode.\n    #[serde(rename = \"loop\")]\n    pub op_loop: u32,\n    /// Cost for `if` opcode.\n    #[serde(rename = \"if\")]\n    pub op_if: u32,\n    /// Cost for `else` opcode.\n    #[serde(rename = \"else\")]\n    pub op_else: u32,\n    /// Cost for `end` opcode.\n    pub end: u32,\n    /// Cost for `br` opcode.\n    pub br: u32,\n    /// Cost for `br_if` opcode.\n    pub br_if: u32,\n    /// Cost for `return` opcode.\n    #[serde(rename = \"return\")]\n    pub op_return: u32,\n    /// Cost for `call` opcode.\n    pub call: u32,\n    /// Cost for `call_indirect` opcode.\n    pub call_indirect: u32,\n    /// Cost for `drop` opcode.\n    pub drop: u32,\n    /// Cost for `select` opcode.\n    pub select: u32,\n    /// Cost for `br_table` opcode.\n    pub br_table: BrTableCost,\n}\n\nimpl Default for ControlFlowCosts {\n    fn default() -> Self {\n        Self {\n            block: DEFAULT_CONTROL_FLOW_BLOCK_OPCODE,\n            op_loop: DEFAULT_CONTROL_FLOW_LOOP_OPCODE,\n            op_if: DEFAULT_CONTROL_FLOW_IF_OPCODE,\n            op_else: DEFAULT_CONTROL_FLOW_ELSE_OPCODE,\n            end: DEFAULT_CONTROL_FLOW_END_OPCODE,\n            br: DEFAULT_CONTROL_FLOW_BR_OPCODE,\n            br_if: DEFAULT_CONTROL_FLOW_BR_IF_OPCODE,\n            op_return: DEFAULT_CONTROL_FLOW_RETURN_OPCODE,\n            call: DEFAULT_CONTROL_FLOW_CALL_OPCODE,\n            call_indirect: DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE,\n            drop: DEFAULT_CONTROL_FLOW_DROP_OPCODE,\n            select: DEFAULT_CONTROL_FLOW_SELECT_OPCODE,\n            br_table: Default::default(),\n        }\n    }\n}\n\nimpl ToBytes for ControlFlowCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        let Self {\n            block,\n            op_loop,\n            op_if,\n            op_else,\n            end,\n            br,\n            br_if,\n            op_return,\n            call,\n            call_indirect,\n            drop,\n            select,\n            br_table,\n        } = self;\n        ret.append(&mut block.to_bytes()?);\n        ret.append(&mut op_loop.to_bytes()?);\n        ret.append(&mut op_if.to_bytes()?);\n        ret.append(&mut op_else.to_bytes()?);\n        ret.append(&mut end.to_bytes()?);\n        ret.append(&mut br.to_bytes()?);\n        ret.append(&mut br_if.to_bytes()?);\n        ret.append(&mut op_return.to_bytes()?);\n        ret.append(&mut call.to_bytes()?);\n        ret.append(&mut call_indirect.to_bytes()?);\n        ret.append(&mut drop.to_bytes()?);\n        ret.append(&mut select.to_bytes()?);\n        ret.append(&mut br_table.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let Self {\n            block,\n            op_loop,\n            op_if,\n            op_else,\n            end,\n            br,\n            br_if,\n            op_return,\n            call,\n            call_indirect,\n            drop,\n            select,\n            br_table,\n        } = self;\n        block.serialized_length()\n            + op_loop.serialized_length()\n            + op_if.serialized_length()\n            + op_else.serialized_length()\n            + end.serialized_length()\n            + br.serialized_length()\n            + br_if.serialized_length()\n            + op_return.serialized_length()\n            + call.serialized_length()\n            + call_indirect.serialized_length()\n            + drop.serialized_length()\n            + select.serialized_length()\n            + br_table.serialized_length()\n    }\n}\n\nimpl FromBytes for ControlFlowCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (op_loop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (op_if, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (op_else, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (end, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (br, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (br_if, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (op_return, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (call, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (call_indirect, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (drop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (select, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (br_table, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n\n        let control_flow_cost = ControlFlowCosts {\n            block,\n            op_loop,\n            op_if,\n            op_else,\n            end,\n            br,\n            br_if,\n            op_return,\n            call,\n            call_indirect,\n            drop,\n            select,\n            br_table,\n        };\n        Ok((control_flow_cost, bytes))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ControlFlowCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ControlFlowCosts {\n        ControlFlowCosts {\n            block: rng.gen(),\n            op_loop: rng.gen(),\n            op_if: rng.gen(),\n            op_else: rng.gen(),\n            end: rng.gen(),\n            br: rng.gen(),\n            br_if: rng.gen(),\n            op_return: rng.gen(),\n            call: rng.gen(),\n            call_indirect: rng.gen(),\n            drop: rng.gen(),\n            select: rng.gen(),\n            br_table: rng.gen(),\n        }\n    }\n}\n\nimpl Zero for ControlFlowCosts {\n    fn zero() -> Self {\n        ControlFlowCosts {\n            block: 0,\n            op_loop: 0,\n            op_if: 0,\n            op_else: 0,\n            end: 0,\n            br: 0,\n            br_if: 0,\n            op_return: 0,\n            call: 0,\n            call_indirect: 0,\n            drop: 0,\n            select: 0,\n            br_table: BrTableCost::zero(),\n        }\n    }\n\n    fn is_zero(&self) -> bool {\n        let ControlFlowCosts {\n            block,\n            op_loop,\n            op_if,\n            op_else,\n            end,\n            br,\n            br_if,\n            op_return,\n            call,\n            call_indirect,\n            drop,\n            select,\n            br_table,\n        } = self;\n        block.is_zero()\n            && op_loop.is_zero()\n            && op_if.is_zero()\n            && op_else.is_zero()\n            && end.is_zero()\n            && br.is_zero()\n            && br_if.is_zero()\n            && op_return.is_zero()\n            && call.is_zero()\n            && call_indirect.is_zero()\n            && drop.is_zero()\n            && select.is_zero()\n            && br_table.is_zero()\n    }\n}\n\n/// Definition of a cost table for Wasm opcodes.\n///\n/// This is taken (partially) from parity-ethereum.\n#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct OpcodeCosts {\n    /// Bit operations multiplier.\n    pub bit: u32,\n    /// Arithmetic add operations multiplier.\n    pub add: u32,\n    /// Mul operations multiplier.\n    pub mul: u32,\n    /// Div operations multiplier.\n    pub div: u32,\n    /// Memory load operation multiplier.\n    pub load: u32,\n    /// Memory store operation multiplier.\n    pub store: u32,\n    /// Const operation multiplier.\n    #[serde(rename = \"const\")]\n    pub op_const: u32,\n    /// Local operations multiplier.\n    pub local: u32,\n    /// Global operations multiplier.\n    pub global: u32,\n    /// Integer operations multiplier.\n    pub integer_comparison: u32,\n    /// Conversion operations multiplier.\n    pub conversion: u32,\n    /// Unreachable operation multiplier.\n    pub unreachable: u32,\n    /// Nop operation multiplier.\n    pub nop: u32,\n    /// Get current memory operation multiplier.\n    pub current_memory: u32,\n    /// Grow memory cost, per page (64kb)\n    pub grow_memory: u32,\n    /// Control flow operations multiplier.\n    pub control_flow: ControlFlowCosts,\n    /// Sign ext operations costs\n    pub sign: u32,\n}\n\nimpl Default for OpcodeCosts {\n    fn default() -> Self {\n        OpcodeCosts {\n            bit: DEFAULT_BIT_COST,\n            add: DEFAULT_ADD_COST,\n            mul: DEFAULT_MUL_COST,\n            div: DEFAULT_DIV_COST,\n            load: DEFAULT_LOAD_COST,\n            store: DEFAULT_STORE_COST,\n            op_const: DEFAULT_CONST_COST,\n            local: DEFAULT_LOCAL_COST,\n            global: DEFAULT_GLOBAL_COST,\n            integer_comparison: DEFAULT_INTEGER_COMPARISON_COST,\n            conversion: DEFAULT_CONVERSION_COST,\n            unreachable: DEFAULT_UNREACHABLE_COST,\n            nop: DEFAULT_NOP_COST,\n            current_memory: DEFAULT_CURRENT_MEMORY_COST,\n            grow_memory: DEFAULT_GROW_MEMORY_COST,\n            control_flow: ControlFlowCosts::default(),\n            sign: DEFAULT_SIGN_COST,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<OpcodeCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> OpcodeCosts {\n        OpcodeCosts {\n            bit: rng.gen(),\n            add: rng.gen(),\n            mul: rng.gen(),\n            div: rng.gen(),\n            load: rng.gen(),\n            store: rng.gen(),\n            op_const: rng.gen(),\n            local: rng.gen(),\n            global: rng.gen(),\n            integer_comparison: rng.gen(),\n            conversion: rng.gen(),\n            unreachable: rng.gen(),\n            nop: rng.gen(),\n            current_memory: rng.gen(),\n            grow_memory: rng.gen(),\n            control_flow: rng.gen(),\n            sign: rng.gen(),\n        }\n    }\n}\n\nimpl ToBytes for OpcodeCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        let Self {\n            bit,\n            add,\n            mul,\n            div,\n            load,\n            store,\n            op_const,\n            local,\n            global,\n            integer_comparison,\n            conversion,\n            unreachable,\n            nop,\n            current_memory,\n            grow_memory,\n            control_flow,\n            sign,\n        } = self;\n\n        ret.append(&mut bit.to_bytes()?);\n        ret.append(&mut add.to_bytes()?);\n        ret.append(&mut mul.to_bytes()?);\n        ret.append(&mut div.to_bytes()?);\n        ret.append(&mut load.to_bytes()?);\n        ret.append(&mut store.to_bytes()?);\n        ret.append(&mut op_const.to_bytes()?);\n        ret.append(&mut local.to_bytes()?);\n        ret.append(&mut global.to_bytes()?);\n        ret.append(&mut integer_comparison.to_bytes()?);\n        ret.append(&mut conversion.to_bytes()?);\n        ret.append(&mut unreachable.to_bytes()?);\n        ret.append(&mut nop.to_bytes()?);\n        ret.append(&mut current_memory.to_bytes()?);\n        ret.append(&mut grow_memory.to_bytes()?);\n        ret.append(&mut control_flow.to_bytes()?);\n        ret.append(&mut sign.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let Self {\n            bit,\n            add,\n            mul,\n            div,\n            load,\n            store,\n            op_const,\n            local,\n            global,\n            integer_comparison,\n            conversion,\n            unreachable,\n            nop,\n            current_memory,\n            grow_memory,\n            control_flow,\n            sign,\n        } = self;\n        bit.serialized_length()\n            + add.serialized_length()\n            + mul.serialized_length()\n            + div.serialized_length()\n            + load.serialized_length()\n            + store.serialized_length()\n            + op_const.serialized_length()\n            + local.serialized_length()\n            + global.serialized_length()\n            + integer_comparison.serialized_length()\n            + conversion.serialized_length()\n            + unreachable.serialized_length()\n            + nop.serialized_length()\n            + current_memory.serialized_length()\n            + grow_memory.serialized_length()\n            + control_flow.serialized_length()\n            + sign.serialized_length()\n    }\n}\n\nimpl FromBytes for OpcodeCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bit, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (add, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (mul, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (div, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (load, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (store, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (const_, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (local, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (global, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (integer_comparison, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (conversion, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (unreachable, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (nop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (current_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (grow_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (control_flow, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (sign, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?;\n\n        let opcode_costs = OpcodeCosts {\n            bit,\n            add,\n            mul,\n            div,\n            load,\n            store,\n            op_const: const_,\n            local,\n            global,\n            integer_comparison,\n            conversion,\n            unreachable,\n            nop,\n            current_memory,\n            grow_memory,\n            control_flow,\n            sign,\n        };\n        Ok((opcode_costs, bytes))\n    }\n}\n\nimpl Zero for OpcodeCosts {\n    fn zero() -> Self {\n        Self {\n            bit: 0,\n            add: 0,\n            mul: 0,\n            div: 0,\n            load: 0,\n            store: 0,\n            op_const: 0,\n            local: 0,\n            global: 0,\n            integer_comparison: 0,\n            conversion: 0,\n            unreachable: 0,\n            nop: 0,\n            current_memory: 0,\n            grow_memory: 0,\n            control_flow: ControlFlowCosts::zero(),\n            sign: 0,\n        }\n    }\n\n    fn is_zero(&self) -> bool {\n        let OpcodeCosts {\n            bit,\n            add,\n            mul,\n            div,\n            load,\n            store,\n            op_const,\n            local,\n            global,\n            integer_comparison,\n            conversion,\n            unreachable,\n            nop,\n            current_memory,\n            grow_memory,\n            control_flow,\n            sign,\n        } = self;\n        bit.is_zero()\n            && add.is_zero()\n            && mul.is_zero()\n            && div.is_zero()\n            && load.is_zero()\n            && store.is_zero()\n            && op_const.is_zero()\n            && local.is_zero()\n            && global.is_zero()\n            && integer_comparison.is_zero()\n            && conversion.is_zero()\n            && unreachable.is_zero()\n            && nop.is_zero()\n            && current_memory.is_zero()\n            && grow_memory.is_zero()\n            && control_flow.is_zero()\n            && sign.is_zero()\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::{num, prop_compose};\n\n    use crate::{BrTableCost, ControlFlowCosts, OpcodeCosts};\n\n    prop_compose! {\n        pub fn br_table_cost_arb()(\n            cost in num::u32::ANY,\n            size_multiplier in num::u32::ANY,\n        ) -> BrTableCost {\n            BrTableCost { cost, size_multiplier }\n        }\n    }\n\n    prop_compose! {\n        pub fn control_flow_cost_arb()(\n            block in num::u32::ANY,\n            op_loop in num::u32::ANY,\n            op_if in num::u32::ANY,\n            op_else in num::u32::ANY,\n            end in num::u32::ANY,\n            br in num::u32::ANY,\n            br_if in num::u32::ANY,\n            br_table in br_table_cost_arb(),\n            op_return in num::u32::ANY,\n            call in num::u32::ANY,\n            call_indirect in num::u32::ANY,\n            drop in num::u32::ANY,\n            select in num::u32::ANY,\n        ) -> ControlFlowCosts {\n            ControlFlowCosts {\n                block,\n                op_loop,\n                op_if,\n                op_else,\n                end,\n                br,\n                br_if,\n                br_table,\n                op_return,\n                call,\n                call_indirect,\n                drop,\n                select\n            }\n        }\n\n    }\n\n    prop_compose! {\n        pub fn opcode_costs_arb()(\n            bit in num::u32::ANY,\n            add in num::u32::ANY,\n            mul in num::u32::ANY,\n            div in num::u32::ANY,\n            load in num::u32::ANY,\n            store in num::u32::ANY,\n            op_const in num::u32::ANY,\n            local in num::u32::ANY,\n            global in num::u32::ANY,\n            integer_comparison in num::u32::ANY,\n            conversion in num::u32::ANY,\n            unreachable in num::u32::ANY,\n            nop in num::u32::ANY,\n            current_memory in num::u32::ANY,\n            grow_memory in num::u32::ANY,\n            control_flow in control_flow_cost_arb(),\n            sign in num::u32::ANY,\n        ) -> OpcodeCosts {\n            OpcodeCosts {\n                bit,\n                add,\n                mul,\n                div,\n                load,\n                store,\n                op_const,\n                local,\n                global,\n                integer_comparison,\n                conversion,\n                unreachable,\n                nop,\n                current_memory,\n                grow_memory,\n                control_flow,\n                sign,\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::proptest;\n\n    use crate::bytesrepr;\n\n    use super::gens;\n\n    proptest! {\n        #[test]\n        fn should_serialize_and_deserialize_with_arbitrary_values(\n            opcode_costs in gens::opcode_costs_arb()\n        ) {\n            bytesrepr::test_serialization_roundtrip(&opcode_costs);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/standard_payment_costs.rs",
    "content": "//! Costs of the standard payment system contract.\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// Default cost of the `pay` standard payment entry point.\nconst DEFAULT_PAY_COST: u32 = 10_000;\n\n/// Description of the costs of calling standard payment entry points.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct StandardPaymentCosts {\n    /// Cost of calling the `pay` entry point.\n    pub pay: u32,\n}\n\nimpl Default for StandardPaymentCosts {\n    fn default() -> Self {\n        Self {\n            pay: DEFAULT_PAY_COST,\n        }\n    }\n}\n\nimpl ToBytes for StandardPaymentCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.pay.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.pay.serialized_length()\n    }\n}\n\nimpl FromBytes for StandardPaymentCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (pay, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((Self { pay }, rem))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<StandardPaymentCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> StandardPaymentCosts {\n        StandardPaymentCosts { pay: rng.gen() }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::{num, prop_compose};\n\n    use super::StandardPaymentCosts;\n\n    prop_compose! {\n        pub fn standard_payment_costs_arb()(\n            pay in num::u32::ANY,\n        ) -> StandardPaymentCosts {\n            StandardPaymentCosts {\n                pay,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/storage_costs.rs",
    "content": "//! Support for storage costs.\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse derive_more::Add;\nuse num_traits::Zero;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Gas, U512,\n};\n\n/// Default gas cost per byte stored.\npub const DEFAULT_GAS_PER_BYTE_COST: u32 = 1_117_587;\n\n/// Represents a cost table for storage costs.\n#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct StorageCosts {\n    /// Gas charged per byte stored in the global state.\n    gas_per_byte: u32,\n}\n\nimpl StorageCosts {\n    /// Creates new `StorageCosts`.\n    pub const fn new(gas_per_byte: u32) -> Self {\n        Self { gas_per_byte }\n    }\n\n    /// Returns amount of gas per byte stored.\n    pub fn gas_per_byte(&self) -> u32 {\n        self.gas_per_byte\n    }\n\n    /// Calculates gas cost for storing `bytes`.\n    pub fn calculate_gas_cost(&self, bytes: usize) -> Gas {\n        let value = U512::from(self.gas_per_byte) * U512::from(bytes);\n        Gas::new(value)\n    }\n}\n\nimpl Default for StorageCosts {\n    fn default() -> Self {\n        Self {\n            gas_per_byte: DEFAULT_GAS_PER_BYTE_COST,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<StorageCosts> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> StorageCosts {\n        StorageCosts {\n            gas_per_byte: rng.gen(),\n        }\n    }\n}\n\nimpl ToBytes for StorageCosts {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        ret.append(&mut self.gas_per_byte.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.gas_per_byte.serialized_length()\n    }\n}\n\nimpl FromBytes for StorageCosts {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (gas_per_byte, rem) = FromBytes::from_bytes(bytes)?;\n\n        Ok((StorageCosts { gas_per_byte }, rem))\n    }\n}\n\nimpl Zero for StorageCosts {\n    fn zero() -> Self {\n        StorageCosts { gas_per_byte: 0 }\n    }\n\n    fn is_zero(&self) -> bool {\n        self.gas_per_byte.is_zero()\n    }\n}\n\n#[cfg(test)]\npub mod tests {\n    use crate::U512;\n\n    use super::*;\n    use proptest::prelude::*;\n\n    const SMALL_WEIGHT: usize = 123456789;\n    const LARGE_WEIGHT: usize = usize::MAX;\n\n    #[test]\n    fn should_calculate_gas_cost() {\n        let storage_costs = StorageCosts::default();\n\n        let cost = storage_costs.calculate_gas_cost(SMALL_WEIGHT);\n\n        let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(SMALL_WEIGHT);\n        assert_eq!(cost, Gas::new(expected_cost));\n    }\n\n    #[test]\n    fn should_calculate_big_gas_cost() {\n        let storage_costs = StorageCosts::default();\n\n        let cost = storage_costs.calculate_gas_cost(LARGE_WEIGHT);\n\n        let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(LARGE_WEIGHT);\n        assert_eq!(cost, Gas::new(expected_cost));\n    }\n\n    proptest! {\n        #[test]\n        fn bytesrepr_roundtrip(storage_costs in super::gens::storage_costs_arb()) {\n            bytesrepr::test_serialization_roundtrip(&storage_costs);\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(test)]\npub mod gens {\n    use crate::gens::example_u32_arb;\n\n    use super::StorageCosts;\n    use proptest::prelude::*;\n\n    pub(super) fn storage_costs_arb() -> impl Strategy<Value = StorageCosts> {\n        example_u32_arb().prop_map(StorageCosts::new)\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/system_config.rs",
    "content": "#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    chainspec::vm_config::{AuctionCosts, HandlePaymentCosts, MintCosts, StandardPaymentCosts},\n};\n\n/// Default cost for calls not a non-existent entrypoint.\npub const DEFAULT_NO_SUCH_ENTRYPOINT_COST: u64 = 2_500_000_000;\n\n/// Definition of costs in the system.\n///\n/// This structure contains the costs of all the system contract's entry points and, additionally,\n/// it defines a wasmless mint cost.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct SystemConfig {\n    /// The cost of the calling non-existing system contract entry point.\n    no_such_entrypoint: u64,\n\n    /// Configuration of auction entrypoint costs.\n    auction_costs: AuctionCosts,\n\n    /// Configuration of mint entrypoint costs.\n    mint_costs: MintCosts,\n\n    /// Configuration of handle payment entrypoint costs.\n    handle_payment_costs: HandlePaymentCosts,\n\n    /// Configuration of standard payment costs.\n    standard_payment_costs: StandardPaymentCosts,\n}\n\nimpl Default for SystemConfig {\n    /// Implements Default for SystemConfig.\n    fn default() -> Self {\n        Self {\n            no_such_entrypoint: DEFAULT_NO_SUCH_ENTRYPOINT_COST,\n            auction_costs: Default::default(),\n            handle_payment_costs: Default::default(),\n            mint_costs: Default::default(),\n            standard_payment_costs: Default::default(),\n        }\n    }\n}\n\nimpl SystemConfig {\n    /// Creates new system config instance.\n    pub fn new(\n        no_such_entrypoint: u64,\n        auction_costs: AuctionCosts,\n        mint_costs: MintCosts,\n        handle_payment_costs: HandlePaymentCosts,\n        standard_payment_costs: StandardPaymentCosts,\n    ) -> Self {\n        Self {\n            no_such_entrypoint,\n            auction_costs,\n            mint_costs,\n            handle_payment_costs,\n            standard_payment_costs,\n        }\n    }\n\n    /// Returns the cost of calling a non-existing system contract entry point.\n    pub fn no_such_entrypoint(&self) -> u64 {\n        self.no_such_entrypoint\n    }\n\n    /// Returns the costs of executing auction entry points.\n    pub fn auction_costs(&self) -> &AuctionCosts {\n        &self.auction_costs\n    }\n\n    /// Returns the costs of executing mint entry points.\n    pub fn mint_costs(&self) -> &MintCosts {\n        &self.mint_costs\n    }\n\n    /// Sets mint costs.\n    pub fn with_mint_costs(mut self, mint_costs: MintCosts) -> Self {\n        self.mint_costs = mint_costs;\n        self\n    }\n\n    /// Returns the costs of executing `handle_payment` entry points.\n    pub fn handle_payment_costs(&self) -> &HandlePaymentCosts {\n        &self.handle_payment_costs\n    }\n\n    /// Returns the costs of executing `standard_payment` entry points.\n    pub fn standard_payment_costs(&self) -> &StandardPaymentCosts {\n        &self.standard_payment_costs\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl SystemConfig {\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        // there's a bug in toml...under the hood it uses an i64 when it should use a u64\n        // this causes flaky test failures if the random result exceeds i64::MAX\n        let no_such_entrypoint = rng.gen_range(0..i64::MAX as u64);\n        let auction_costs = rng.gen();\n        let mint_costs = rng.gen();\n        let handle_payment_costs = rng.gen();\n        let standard_payment_costs = rng.gen();\n\n        SystemConfig {\n            no_such_entrypoint,\n            auction_costs,\n            mint_costs,\n            handle_payment_costs,\n            standard_payment_costs,\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<SystemConfig> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> SystemConfig {\n        SystemConfig {\n            no_such_entrypoint: rng.gen_range(0..i64::MAX) as u64,\n            auction_costs: rng.gen(),\n            mint_costs: rng.gen(),\n            handle_payment_costs: rng.gen(),\n            standard_payment_costs: rng.gen(),\n        }\n    }\n}\n\nimpl ToBytes for SystemConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n\n        ret.append(&mut self.no_such_entrypoint.to_bytes()?);\n        ret.append(&mut self.auction_costs.to_bytes()?);\n        ret.append(&mut self.mint_costs.to_bytes()?);\n        ret.append(&mut self.handle_payment_costs.to_bytes()?);\n        ret.append(&mut self.standard_payment_costs.to_bytes()?);\n\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.no_such_entrypoint.serialized_length()\n            + self.auction_costs.serialized_length()\n            + self.mint_costs.serialized_length()\n            + self.handle_payment_costs.serialized_length()\n            + self.standard_payment_costs.serialized_length()\n    }\n}\n\nimpl FromBytes for SystemConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (no_such_entrypoint, rem) = FromBytes::from_bytes(bytes)?;\n        let (auction_costs, rem) = FromBytes::from_bytes(rem)?;\n        let (mint_costs, rem) = FromBytes::from_bytes(rem)?;\n        let (handle_payment_costs, rem) = FromBytes::from_bytes(rem)?;\n        let (standard_payment_costs, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            SystemConfig {\n                no_such_entrypoint,\n                auction_costs,\n                mint_costs,\n                handle_payment_costs,\n                standard_payment_costs,\n            },\n            rem,\n        ))\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::prelude::*;\n\n    use crate::{\n        chainspec::vm_config::{\n            auction_costs::gens::auction_costs_arb,\n            handle_payment_costs::gens::handle_payment_costs_arb, mint_costs::gens::mint_costs_arb,\n            standard_payment_costs::gens::standard_payment_costs_arb,\n        },\n        SystemConfig,\n    };\n\n    prop_compose! {\n        pub fn system_config_arb()(\n            no_such_entrypoint in 0..i64::MAX as u64,\n            auction_costs in auction_costs_arb(),\n            mint_costs in mint_costs_arb(),\n            handle_payment_costs in handle_payment_costs_arb(),\n            standard_payment_costs in standard_payment_costs_arb(),\n        ) -> SystemConfig {\n            SystemConfig {\n                no_such_entrypoint,\n                auction_costs,\n                mint_costs,\n                handle_payment_costs,\n                standard_payment_costs,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/wasm_config.rs",
    "content": "//! Configuration of the Wasm execution engine.\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    chainspec::vm_config::MessageLimits,\n};\n\nuse super::{wasm_v1_config::WasmV1Config, wasm_v2_config::WasmV2Config};\n\n/// Configuration of the Wasm execution environment.\n///\n/// This structure contains various Wasm execution configuration options, such as memory limits,\n/// stack limits and costs.\n#[derive(Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct WasmConfig {\n    /// Messages limits.\n    messages_limits: MessageLimits,\n    /// Configuration for wasms in v1 execution engine.\n    v1: WasmV1Config,\n    /// Configuration for wasms in v2 execution engine.\n    v2: WasmV2Config,\n}\n\nimpl WasmConfig {\n    /// Creates new Wasm config.\n    pub const fn new(messages_limits: MessageLimits, v1: WasmV1Config, v2: WasmV2Config) -> Self {\n        Self {\n            messages_limits,\n            v1,\n            v2,\n        }\n    }\n\n    /// Returns the limits config for messages.\n    pub fn messages_limits(&self) -> MessageLimits {\n        self.messages_limits\n    }\n\n    /// Returns the config for v1 wasms.\n    pub fn v1(&self) -> &WasmV1Config {\n        &self.v1\n    }\n\n    /// Returns mutable v1 reference\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn v1_mut(&mut self) -> &mut WasmV1Config {\n        &mut self.v1\n    }\n\n    /// Returns the config for v2 wasms.\n    pub fn v2(&self) -> &WasmV2Config {\n        &self.v2\n    }\n\n    /// Returns mutable v2 reference\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn v2_mut(&mut self) -> &mut WasmV2Config {\n        &mut self.v2\n    }\n}\n\nimpl ToBytes for WasmConfig {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.messages_limits.to_bytes()?);\n        ret.append(&mut self.v1.to_bytes()?);\n        ret.append(&mut self.v2.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.messages_limits.serialized_length()\n            + self.v1.serialized_length()\n            + self.v2.serialized_length()\n    }\n}\n\nimpl FromBytes for WasmConfig {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (messages_limits, rem) = FromBytes::from_bytes(bytes)?;\n        let (v1, rem) = FromBytes::from_bytes(rem)?;\n        let (v2, rem) = FromBytes::from_bytes(rem)?;\n\n        Ok((\n            WasmConfig {\n                messages_limits,\n                v1,\n                v2,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<WasmConfig> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> WasmConfig {\n        WasmConfig {\n            messages_limits: rng.gen(),\n            v1: rng.gen(),\n            v2: rng.gen(),\n        }\n    }\n}\n\n#[cfg(test)]\npub mod tests {\n    use super::*;\n    use proptest::prelude::*;\n    proptest! {\n        #[test]\n        fn bytesrepr_roundtrip(wasm_config in super::gens::wasm_config_arb()) {\n            bytesrepr::test_serialization_roundtrip(&wasm_config);\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use proptest::prop_compose;\n\n    use crate::{\n        chainspec::vm_config::{\n            message_limits::gens::message_limits_arb, wasm_v1_config::gens::wasm_v1_config_arb,\n            wasm_v2_config::gens::wasm_v2_config_arb,\n        },\n        WasmConfig,\n    };\n\n    prop_compose! {\n        pub fn wasm_config_arb() (\n            v1 in wasm_v1_config_arb(),\n            v2 in wasm_v2_config_arb(),\n            messages_limits in message_limits_arb(),\n        ) -> WasmConfig {\n            WasmConfig {\n                messages_limits,\n                v1,\n                v2\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/wasm_v1_config.rs",
    "content": "use crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    chainspec::vm_config::{HostFunctionCostsV1, OpcodeCosts},\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\n/// Default maximum number of pages of the Wasm memory.\npub const DEFAULT_WASM_MAX_MEMORY: u32 = 64;\n/// Default maximum stack height.\npub const DEFAULT_MAX_STACK_HEIGHT: u32 = 500;\n\n/// Configuration of the Wasm execution environment for V1 execution machine.\n///\n/// This structure contains various Wasm execution configuration options, such as memory limits,\n/// stack limits and costs.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct WasmV1Config {\n    /// Maximum amount of heap memory (represented in 64kB pages) each contract can use.\n    max_memory: u32,\n    /// Max stack height (native WebAssembly stack limiter).\n    max_stack_height: u32,\n    /// Wasm opcode costs table.\n    opcode_costs: OpcodeCosts,\n    /// Host function costs table.\n    host_function_costs: HostFunctionCostsV1,\n}\n\nimpl WasmV1Config {\n    /// ctor\n    pub fn new(\n        max_memory: u32,\n        max_stack_height: u32,\n        opcode_costs: OpcodeCosts,\n        host_function_costs: HostFunctionCostsV1,\n    ) -> Self {\n        WasmV1Config {\n            max_memory,\n            max_stack_height,\n            opcode_costs,\n            host_function_costs,\n        }\n    }\n\n    /// Returns opcode costs.\n    pub fn opcode_costs(&self) -> OpcodeCosts {\n        self.opcode_costs\n    }\n\n    /// Returns host function costs and consumes this object.\n    pub fn take_host_function_costs(self) -> HostFunctionCostsV1 {\n        self.host_function_costs\n    }\n\n    /// Returns max_memory.\n    pub fn max_memory(&self) -> u32 {\n        self.max_memory\n    }\n\n    /// Returns mutable max_memory reference\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn max_memory_mut(&mut self) -> &mut u32 {\n        &mut self.max_memory\n    }\n\n    /// Returns mutable max_stack_height reference\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn max_stack_height_mut(&mut self) -> &mut u32 {\n        &mut self.max_stack_height\n    }\n\n    /// Returns max_stack_height.\n    pub fn max_stack_height(&self) -> u32 {\n        self.max_stack_height\n    }\n}\n\nimpl Default for WasmV1Config {\n    fn default() -> Self {\n        Self {\n            max_memory: DEFAULT_WASM_MAX_MEMORY,\n            max_stack_height: DEFAULT_MAX_STACK_HEIGHT,\n            opcode_costs: OpcodeCosts::default(),\n            host_function_costs: HostFunctionCostsV1::default(),\n        }\n    }\n}\n\nimpl ToBytes for WasmV1Config {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.max_memory.to_bytes()?);\n        ret.append(&mut self.max_stack_height.to_bytes()?);\n        ret.append(&mut self.opcode_costs.to_bytes()?);\n        ret.append(&mut self.host_function_costs.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.max_memory.serialized_length()\n            + self.max_stack_height.serialized_length()\n            + self.opcode_costs.serialized_length()\n            + self.host_function_costs.serialized_length()\n    }\n}\n\nimpl FromBytes for WasmV1Config {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (max_memory, rem) = FromBytes::from_bytes(bytes)?;\n        let (max_stack_height, rem) = FromBytes::from_bytes(rem)?;\n        let (opcode_costs, rem) = FromBytes::from_bytes(rem)?;\n        let (host_function_costs, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            WasmV1Config {\n                max_memory,\n                max_stack_height,\n                opcode_costs,\n                host_function_costs,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<WasmV1Config> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> WasmV1Config {\n        WasmV1Config {\n            max_memory: rng.gen(),\n            max_stack_height: rng.gen(),\n            opcode_costs: rng.gen(),\n            host_function_costs: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use crate::{\n        chainspec::vm_config::{\n            host_function_costs::gens::host_function_costs_arb,\n            opcode_costs::gens::opcode_costs_arb,\n        },\n        gens::example_u32_arb,\n    };\n    use proptest::prop_compose;\n\n    use super::WasmV1Config;\n\n    prop_compose! {\n        pub fn wasm_v1_config_arb() (\n            max_memory in example_u32_arb(),\n            max_stack_height in example_u32_arb(),\n            opcode_costs in opcode_costs_arb(),\n            host_function_costs in host_function_costs_arb(),\n        ) -> WasmV1Config {\n            WasmV1Config {\n                max_memory,\n                max_stack_height,\n                opcode_costs,\n                host_function_costs,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config/wasm_v2_config.rs",
    "content": "use crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    chainspec::vm_config::OpcodeCosts,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{Deserialize, Serialize};\n\nuse super::HostFunctionCostsV2;\n\n/// Default maximum number of pages of the Wasm memory.\npub const DEFAULT_V2_WASM_MAX_MEMORY: u32 = 64;\n\n/// Configuration of the Wasm execution environment for V2 execution machine.\n///\n/// This structure contains various Wasm execution configuration options, such as memory limits and\n/// costs.\n#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct WasmV2Config {\n    /// Maximum amount of heap memory each contract can use.\n    max_memory: u32,\n    /// Wasm opcode costs table.\n    opcode_costs: OpcodeCosts,\n    /// Host function costs table.\n    host_function_costs: HostFunctionCostsV2,\n}\n\nimpl WasmV2Config {\n    /// ctor\n    pub fn new(\n        max_memory: u32,\n        opcode_costs: OpcodeCosts,\n        host_function_costs: HostFunctionCostsV2,\n    ) -> Self {\n        WasmV2Config {\n            max_memory,\n            opcode_costs,\n            host_function_costs,\n        }\n    }\n\n    /// Returns opcode costs.\n    pub fn opcode_costs(&self) -> OpcodeCosts {\n        self.opcode_costs\n    }\n\n    /// Returns a reference to host function costs\n    pub fn host_function_costs(&self) -> &HostFunctionCostsV2 {\n        &self.host_function_costs\n    }\n\n    /// Returns host function costs and consumes this object.\n    pub fn take_host_function_costs(self) -> HostFunctionCostsV2 {\n        self.host_function_costs\n    }\n\n    /// Returns max_memory.\n    pub fn max_memory(&self) -> u32 {\n        self.max_memory\n    }\n\n    /// Returns mutable max_memory reference\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn max_memory_mut(&mut self) -> &mut u32 {\n        &mut self.max_memory\n    }\n}\n\nimpl Default for WasmV2Config {\n    fn default() -> Self {\n        Self {\n            max_memory: DEFAULT_V2_WASM_MAX_MEMORY,\n            opcode_costs: OpcodeCosts::default(),\n            host_function_costs: HostFunctionCostsV2::default(),\n        }\n    }\n}\n\nimpl ToBytes for WasmV2Config {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.max_memory.to_bytes()?);\n        ret.append(&mut self.opcode_costs.to_bytes()?);\n        ret.append(&mut self.host_function_costs.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.max_memory.serialized_length()\n            + self.opcode_costs.serialized_length()\n            + self.host_function_costs.serialized_length()\n    }\n}\n\nimpl FromBytes for WasmV2Config {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (max_memory, rem) = FromBytes::from_bytes(bytes)?;\n        let (opcode_costs, rem) = FromBytes::from_bytes(rem)?;\n        let (host_function_costs, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            WasmV2Config {\n                max_memory,\n                opcode_costs,\n                host_function_costs,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<WasmV2Config> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> WasmV2Config {\n        WasmV2Config {\n            max_memory: rng.gen(),\n            opcode_costs: rng.gen(),\n            host_function_costs: rng.gen(),\n        }\n    }\n}\n\n#[doc(hidden)]\n#[cfg(any(feature = \"gens\", test))]\npub mod gens {\n    use crate::{\n        chainspec::vm_config::{\n            host_function_costs_v2::gens::host_function_costs_v2_arb,\n            opcode_costs::gens::opcode_costs_arb,\n        },\n        gens::example_u32_arb,\n    };\n    use proptest::prop_compose;\n\n    use super::WasmV2Config;\n\n    prop_compose! {\n        pub fn wasm_v2_config_arb() (\n            max_memory in example_u32_arb(),\n            opcode_costs in opcode_costs_arb(),\n            host_function_costs in host_function_costs_v2_arb(),\n        ) -> WasmV2Config {\n            WasmV2Config {\n                max_memory,\n                opcode_costs,\n                host_function_costs,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/chainspec/vm_config.rs",
    "content": "mod auction_costs;\nmod chainspec_registry;\nmod handle_payment_costs;\nmod host_function_costs;\nmod host_function_costs_v2;\nmod message_limits;\nmod mint_costs;\nmod opcode_costs;\nmod standard_payment_costs;\nmod storage_costs;\nmod system_config;\nmod wasm_config;\nmod wasm_v1_config;\nmod wasm_v2_config;\n\npub use auction_costs::AuctionCosts;\n#[cfg(any(feature = \"testing\", test))]\npub use auction_costs::{DEFAULT_ADD_BID_COST, DEFAULT_DELEGATE_COST};\npub use chainspec_registry::ChainspecRegistry;\npub use handle_payment_costs::HandlePaymentCosts;\n#[cfg(any(feature = \"testing\", test))]\npub use host_function_costs::DEFAULT_NEW_DICTIONARY_COST;\npub use host_function_costs::{\n    Cost as HostFunctionCost, HostFunction, HostFunctionCostsV1,\n    DEFAULT_HOST_FUNCTION_NEW_DICTIONARY,\n};\npub use host_function_costs_v2::{HostFunctionCostsV2, HostFunctionV2};\npub use message_limits::MessageLimits;\npub use mint_costs::MintCosts;\n#[cfg(any(feature = \"testing\", test))]\npub use mint_costs::DEFAULT_TRANSFER_COST;\npub use opcode_costs::{BrTableCost, ControlFlowCosts, OpcodeCosts};\n#[cfg(any(feature = \"testing\", test))]\npub use opcode_costs::{\n    DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, DEFAULT_CONTROL_FLOW_BLOCK_OPCODE,\n    DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, DEFAULT_CONTROL_FLOW_BR_OPCODE,\n    DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE,\n    DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, DEFAULT_CONTROL_FLOW_CALL_OPCODE,\n    DEFAULT_CONTROL_FLOW_DROP_OPCODE, DEFAULT_CONTROL_FLOW_ELSE_OPCODE,\n    DEFAULT_CONTROL_FLOW_END_OPCODE, DEFAULT_CONTROL_FLOW_IF_OPCODE,\n    DEFAULT_CONTROL_FLOW_LOOP_OPCODE, DEFAULT_CONTROL_FLOW_RETURN_OPCODE,\n    DEFAULT_CONTROL_FLOW_SELECT_OPCODE, DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST,\n    DEFAULT_DIV_COST, DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST,\n    DEFAULT_INTEGER_COMPARISON_COST, DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MUL_COST,\n    DEFAULT_NOP_COST, DEFAULT_STORE_COST, DEFAULT_UNREACHABLE_COST,\n};\npub use standard_payment_costs::StandardPaymentCosts;\npub use storage_costs::StorageCosts;\npub use system_config::SystemConfig;\npub use wasm_config::WasmConfig;\npub use wasm_v1_config::WasmV1Config;\n#[cfg(any(feature = \"testing\", test))]\npub use wasm_v1_config::{DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY};\npub use wasm_v2_config::WasmV2Config;\n"
  },
  {
    "path": "types/src/chainspec.rs",
    "content": "//! The chainspec is a set of configuration options for the network.  All validators must apply the\n//! same set of options in order to join and act as a peer in a given network.\n\nmod accounts_config;\nmod activation_point;\nmod chainspec_raw_bytes;\nmod core_config;\nmod fee_handling;\npub mod genesis_config;\nmod global_state_update;\nmod highway_config;\nmod hold_balance_handling;\nmod network_config;\nmod next_upgrade;\nmod pricing_handling;\nmod protocol_config;\nmod refund_handling;\nmod rewards_handling;\nmod transaction_config;\nmod upgrade_config;\nmod vacancy_config;\nmod vm_config;\n\n#[cfg(any(feature = \"std\", test))]\nuse std::{fmt::Debug, sync::Arc};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::Serialize;\nuse tracing::error;\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    ChainNameDigest, Digest, EraId, ProtocolVersion, Timestamp,\n};\npub use accounts_config::{\n    AccountConfig, AccountsConfig, AdministratorAccount, DelegatorConfig, GenesisAccount,\n    GenesisValidator, ValidatorConfig,\n};\npub use activation_point::ActivationPoint;\npub use chainspec_raw_bytes::ChainspecRawBytes;\npub use core_config::{\n    ConsensusProtocolName, CoreConfig, LegacyRequiredFinality, DEFAULT_GAS_HOLD_INTERVAL,\n    DEFAULT_MINIMUM_BID_AMOUNT,\n};\n#[cfg(any(feature = \"std\", test))]\npub use core_config::{\n    DEFAULT_BASELINE_MOTES_AMOUNT, DEFAULT_FEE_HANDLING, DEFAULT_REFUND_HANDLING,\n};\npub use fee_handling::FeeHandling;\n#[cfg(any(feature = \"std\", test))]\npub use genesis_config::GenesisConfig;\npub use global_state_update::{GlobalStateUpdate, GlobalStateUpdateConfig, GlobalStateUpdateError};\npub use highway_config::HighwayConfig;\npub use hold_balance_handling::HoldBalanceHandling;\npub use network_config::NetworkConfig;\npub use next_upgrade::NextUpgrade;\npub use pricing_handling::PricingHandling;\npub use protocol_config::ProtocolConfig;\npub use refund_handling::RefundHandling;\npub use rewards_handling::{RewardsHandling, REWARDS_HANDLING_RATIO_TAG};\npub use transaction_config::{\n    DeployConfig, TransactionConfig, TransactionLaneDefinition, TransactionV1Config,\n};\n#[cfg(any(feature = \"testing\", test))]\npub use transaction_config::{\n    DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MIN_TRANSFER_MOTES,\n};\npub use upgrade_config::ProtocolUpgradeConfig;\npub use vacancy_config::VacancyConfig;\npub use vm_config::{\n    AuctionCosts, BrTableCost, ChainspecRegistry, ControlFlowCosts, HandlePaymentCosts,\n    HostFunction, HostFunctionCost, HostFunctionCostsV1, HostFunctionCostsV2, HostFunctionV2,\n    MessageLimits, MintCosts, OpcodeCosts, StandardPaymentCosts, StorageCosts, SystemConfig,\n    WasmConfig, WasmV1Config, WasmV2Config, DEFAULT_HOST_FUNCTION_NEW_DICTIONARY,\n};\n#[cfg(any(feature = \"testing\", test))]\npub use vm_config::{\n    DEFAULT_ADD_BID_COST, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST,\n    DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, DEFAULT_CONTROL_FLOW_BR_IF_OPCODE,\n    DEFAULT_CONTROL_FLOW_BR_OPCODE, DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER,\n    DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE,\n    DEFAULT_CONTROL_FLOW_CALL_OPCODE, DEFAULT_CONTROL_FLOW_DROP_OPCODE,\n    DEFAULT_CONTROL_FLOW_ELSE_OPCODE, DEFAULT_CONTROL_FLOW_END_OPCODE,\n    DEFAULT_CONTROL_FLOW_IF_OPCODE, DEFAULT_CONTROL_FLOW_LOOP_OPCODE,\n    DEFAULT_CONTROL_FLOW_RETURN_OPCODE, DEFAULT_CONTROL_FLOW_SELECT_OPCODE,\n    DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, DEFAULT_DELEGATE_COST, DEFAULT_DIV_COST,\n    DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, DEFAULT_INTEGER_COMPARISON_COST,\n    DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_MUL_COST,\n    DEFAULT_NEW_DICTIONARY_COST, DEFAULT_NOP_COST, DEFAULT_STORE_COST, DEFAULT_TRANSFER_COST,\n    DEFAULT_UNREACHABLE_COST, DEFAULT_WASM_MAX_MEMORY,\n};\n\n/// A collection of configuration settings describing the state of the system at genesis and after\n/// upgrades to basic system functionality occurring after genesis.\n#[derive(Clone, PartialEq, Eq, Serialize, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct Chainspec {\n    /// Protocol config.\n    #[serde(rename = \"protocol\")]\n    pub protocol_config: ProtocolConfig,\n\n    /// Network config.\n    #[serde(rename = \"network\")]\n    pub network_config: NetworkConfig,\n\n    /// Core config.\n    #[serde(rename = \"core\")]\n    pub core_config: CoreConfig,\n\n    /// Highway config.\n    #[serde(rename = \"highway\")]\n    pub highway_config: HighwayConfig,\n\n    /// Transaction Config.\n    #[serde(rename = \"transactions\")]\n    pub transaction_config: TransactionConfig,\n\n    /// Wasm config.\n    #[serde(rename = \"wasm\")]\n    pub wasm_config: WasmConfig,\n\n    /// System costs config.\n    #[serde(rename = \"system_costs\")]\n    pub system_costs_config: SystemConfig,\n\n    /// Vacancy behavior config\n    #[serde(rename = \"vacancy\")]\n    pub vacancy_config: VacancyConfig,\n\n    /// Storage costs.\n    pub storage_costs: StorageCosts,\n}\n\nimpl Chainspec {\n    /// Returns the hash of the chainspec's name.\n    pub fn name_hash(&self) -> ChainNameDigest {\n        ChainNameDigest::from_chain_name(&self.network_config.name)\n    }\n\n    /// Serializes `self` and hashes the resulting bytes.\n    pub fn hash(&self) -> Digest {\n        let serialized_chainspec = self.to_bytes().unwrap_or_else(|error| {\n            error!(%error, \"failed to serialize chainspec\");\n            vec![]\n        });\n        Digest::hash(serialized_chainspec)\n    }\n\n    /// Serializes `self` and hashes the resulting bytes, if able.\n    pub fn try_hash(&self) -> Result<Digest, String> {\n        let arr = self\n            .to_bytes()\n            .map_err(|_| \"failed to serialize chainspec\".to_string())?;\n        Ok(Digest::hash(arr))\n    }\n\n    /// Returns the protocol version of the chainspec.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_config.version\n    }\n\n    /// Returns the era ID of where we should reset back to.  This means stored blocks in that and\n    /// subsequent eras are deleted from storage.\n    pub fn hard_reset_to_start_of_era(&self) -> Option<EraId> {\n        self.protocol_config\n            .hard_reset\n            .then(|| self.protocol_config.activation_point.era_id())\n    }\n\n    /// Creates an upgrade config instance from parts.\n    pub fn upgrade_config_from_parts(\n        &self,\n        pre_state_hash: Digest,\n        current_protocol_version: ProtocolVersion,\n        era_id: EraId,\n        chainspec_raw_bytes: Arc<ChainspecRawBytes>,\n    ) -> Result<ProtocolUpgradeConfig, String> {\n        let chainspec_registry = ChainspecRegistry::new_with_optional_global_state(\n            chainspec_raw_bytes.chainspec_bytes(),\n            chainspec_raw_bytes.maybe_global_state_bytes(),\n        );\n        let global_state_update = match self.protocol_config.get_update_mapping() {\n            Ok(global_state_update) => global_state_update,\n            Err(err) => {\n                return Err(format!(\"failed to generate global state update: {}\", err));\n            }\n        };\n        let fee_handling = self.core_config.fee_handling;\n        let validator_minimum_bid_amount = self.core_config.minimum_bid_amount;\n        let maximum_delegation_amount = self.core_config.maximum_delegation_amount;\n        let minimum_delegation_amount = self.core_config.minimum_delegation_amount;\n        let enable_addressable_entity = self.core_config.enable_addressable_entity;\n        let rewards_handling = self.core_config.rewards_handling.clone();\n        let minimum_delegation_rate = Some(self.core_config.minimum_delegation_rate);\n\n        Ok(ProtocolUpgradeConfig::new(\n            pre_state_hash,\n            current_protocol_version,\n            self.protocol_config.version,\n            Some(era_id),\n            Some(self.core_config.gas_hold_balance_handling),\n            Some(self.core_config.gas_hold_interval.millis()),\n            Some(self.core_config.validator_slots),\n            Some(self.core_config.auction_delay),\n            Some(self.core_config.locked_funds_period.millis()),\n            Some(self.core_config.round_seigniorage_rate),\n            Some(self.core_config.unbonding_delay),\n            global_state_update,\n            chainspec_registry,\n            fee_handling,\n            validator_minimum_bid_amount,\n            maximum_delegation_amount,\n            minimum_delegation_amount,\n            enable_addressable_entity,\n            rewards_handling,\n            minimum_delegation_rate,\n        ))\n    }\n\n    /// Returns balance hold epoch based upon configured hold interval, calculated from the imputed\n    /// timestamp.\n    pub fn balance_holds_epoch(&self, timestamp: Timestamp) -> u64 {\n        timestamp\n            .millis()\n            .saturating_sub(self.core_config.gas_hold_interval.millis())\n    }\n\n    /// Is the given transaction lane supported.\n    pub fn is_supported(&self, lane: u8) -> bool {\n        self.transaction_config\n            .transaction_v1_config\n            .is_supported(lane)\n    }\n\n    /// Returns the max serialized for the given category.\n    pub fn get_max_serialized_length_by_category(&self, lane: u8) -> u64 {\n        self.transaction_config\n            .transaction_v1_config\n            .get_max_serialized_length(lane)\n    }\n\n    /// Returns the max args length for the given category.\n    pub fn get_max_args_length_by_category(&self, lane: u8) -> u64 {\n        self.transaction_config\n            .transaction_v1_config\n            .get_max_args_length(lane)\n    }\n\n    /// Returns the max gas limit for the given category.\n    pub fn get_max_gas_limit_by_category(&self, lane: u8) -> u64 {\n        self.transaction_config\n            .transaction_v1_config\n            .get_max_transaction_gas_limit(lane)\n    }\n\n    /// Returns the max transaction count for the given category.\n    pub fn get_max_transaction_count_by_category(&self, lane: u8) -> u64 {\n        self.transaction_config\n            .transaction_v1_config\n            .get_max_transaction_count(lane)\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Chainspec {\n    /// Generates a random instance using a `TestRng`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let protocol_config = ProtocolConfig::random(rng);\n        let network_config = NetworkConfig::random(rng);\n        let core_config = CoreConfig::random(rng);\n        let highway_config = HighwayConfig::random(rng);\n        let transaction_config = TransactionConfig::random(rng);\n        let wasm_config = rng.gen();\n        let system_costs_config = SystemConfig::random(rng);\n        let vacancy_config = VacancyConfig::random(rng);\n\n        Chainspec {\n            protocol_config,\n            network_config,\n            core_config,\n            highway_config,\n            transaction_config,\n            wasm_config,\n            system_costs_config,\n            vacancy_config,\n            storage_costs: rng.gen(),\n        }\n    }\n\n    /// Set the chain name;\n    pub fn with_chain_name(&mut self, chain_name: String) -> &mut Self {\n        self.network_config.name = chain_name;\n        self\n    }\n\n    /// Set max associated keys.\n    pub fn with_max_associated_keys(&mut self, max_associated_keys: u32) -> &mut Self {\n        self.core_config.max_associated_keys = max_associated_keys;\n        self\n    }\n\n    /// Set pricing handling.\n    pub fn with_pricing_handling(&mut self, pricing_handling: PricingHandling) -> &mut Self {\n        self.core_config.pricing_handling = pricing_handling;\n        self\n    }\n\n    /// Set allow prepaid.\n    pub fn with_allow_prepaid(&mut self, allow_prepaid: bool) -> &mut Self {\n        self.core_config.allow_prepaid = allow_prepaid;\n        self\n    }\n\n    /// Set block gas limit.\n    pub fn with_block_gas_limit(&mut self, block_gas_limit: u64) -> &mut Self {\n        self.transaction_config.block_gas_limit = block_gas_limit;\n        self\n    }\n\n    /// Set vm2 casper wasm.\n    pub fn with_vm_casper_v2(&mut self, vm_casper_v2: bool) -> &mut Self {\n        self.transaction_config.runtime_config.vm_casper_v2 = vm_casper_v2;\n        self\n    }\n}\n\nimpl ToBytes for Chainspec {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.protocol_config.write_bytes(writer)?;\n        self.network_config.write_bytes(writer)?;\n        self.core_config.write_bytes(writer)?;\n        self.highway_config.write_bytes(writer)?;\n        self.transaction_config.write_bytes(writer)?;\n        self.wasm_config.write_bytes(writer)?;\n        self.system_costs_config.write_bytes(writer)?;\n        self.vacancy_config.write_bytes(writer)?;\n        self.storage_costs.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.protocol_config.serialized_length()\n            + self.network_config.serialized_length()\n            + self.core_config.serialized_length()\n            + self.highway_config.serialized_length()\n            + self.transaction_config.serialized_length()\n            + self.wasm_config.serialized_length()\n            + self.system_costs_config.serialized_length()\n            + self.vacancy_config.serialized_length()\n            + self.storage_costs.serialized_length()\n    }\n}\n\nimpl FromBytes for Chainspec {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (protocol_config, remainder) = ProtocolConfig::from_bytes(bytes)?;\n        let (network_config, remainder) = NetworkConfig::from_bytes(remainder)?;\n        let (core_config, remainder) = CoreConfig::from_bytes(remainder)?;\n        let (highway_config, remainder) = HighwayConfig::from_bytes(remainder)?;\n        let (transaction_config, remainder) = TransactionConfig::from_bytes(remainder)?;\n        let (wasm_config, remainder) = WasmConfig::from_bytes(remainder)?;\n        let (system_costs_config, remainder) = SystemConfig::from_bytes(remainder)?;\n        let (vacancy_config, remainder) = VacancyConfig::from_bytes(remainder)?;\n        let (storage_costs, remainder) = FromBytes::from_bytes(remainder)?;\n        let chainspec = Chainspec {\n            protocol_config,\n            network_config,\n            core_config,\n            highway_config,\n            transaction_config,\n            wasm_config,\n            system_costs_config,\n            vacancy_config,\n            storage_costs,\n        };\n        Ok((chainspec, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::SeedableRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::from_entropy();\n        let chainspec = Chainspec::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(&chainspec);\n    }\n}\n"
  },
  {
    "path": "types/src/checksummed_hex.rs",
    "content": "//! Checksummed hex encoding following an [EIP-55][1]-like scheme.\n//!\n//! [1]: https://eips.ethereum.org/EIPS/eip-55\n\nuse alloc::vec::Vec;\nuse core::ops::RangeInclusive;\n\nuse base16;\n\nuse crate::crypto;\n\n/// The number of input bytes, at or below which [`decode`] will checksum-decode the output.\npub const SMALL_BYTES_COUNT: usize = 75;\n\nconst HEX_CHARS: [char; 22] = [\n    '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C',\n    'D', 'E', 'F',\n];\n\n/// Takes a slice of bytes and breaks it up into a vector of *nibbles* (ie, 4-bit values)\n/// represented as `u8`s.\nfn bytes_to_nibbles<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator<Item = u8> + 'a {\n    input\n        .as_ref()\n        .iter()\n        .flat_map(move |byte| [4, 0].iter().map(move |offset| (byte >> offset) & 0x0f))\n}\n\n/// Takes a slice of bytes and outputs an infinite cyclic stream of bits for those bytes.\nfn bytes_to_bits_cycle(bytes: Vec<u8>) -> impl Iterator<Item = bool> {\n    bytes\n        .into_iter()\n        .cycle()\n        .flat_map(move |byte| (0..8usize).map(move |offset| ((byte >> offset) & 0x01) == 0x01))\n}\n\n/// Returns the bytes encoded as hexadecimal with mixed-case based checksums following a scheme\n/// similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55).\n///\n/// Key differences:\n///   - Works on any length of data, not just 20-byte addresses\n///   - Uses Blake2b hashes rather than Keccak\n///   - Uses hash bits rather than nibbles\nfn encode_iter<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator<Item = char> + 'a {\n    let nibbles = bytes_to_nibbles(input);\n    let mut hash_bits = bytes_to_bits_cycle(crypto::blake2b(input.as_ref()).to_vec());\n    nibbles.map(move |mut nibble| {\n        // Base 16 numbers greater than 10 are represented by the ascii characters a through f.\n        if nibble >= 10 && hash_bits.next().unwrap_or(true) {\n            // We are using nibble to index HEX_CHARS, so adding 6 to nibble gives us the index\n            // of the uppercase character. HEX_CHARS[10] == 'a', HEX_CHARS[16] == 'A'.\n            nibble += 6;\n        }\n        HEX_CHARS[nibble as usize]\n    })\n}\n\n/// Returns true if all chars in a string are uppercase or lowercase.\n/// Returns false if the string is mixed case or if there are no alphabetic chars.\nfn string_is_same_case<T: AsRef<[u8]>>(s: T) -> bool {\n    const LOWER_RANGE: RangeInclusive<u8> = b'a'..=b'f';\n    const UPPER_RANGE: RangeInclusive<u8> = b'A'..=b'F';\n\n    let mut chars = s\n        .as_ref()\n        .iter()\n        .filter(|c| LOWER_RANGE.contains(c) || UPPER_RANGE.contains(c));\n\n    match chars.next() {\n        Some(first) => {\n            let is_upper = UPPER_RANGE.contains(first);\n            chars.all(|c| UPPER_RANGE.contains(c) == is_upper)\n        }\n        None => {\n            // String has no actual characters.\n            true\n        }\n    }\n}\n\n/// Decodes a mixed-case hexadecimal string, verifying that it conforms to the checksum scheme\n/// similar to scheme in [EIP-55][1].\n///\n/// Key differences:\n///   - Works on any length of (decoded) data up to `SMALL_BYTES_COUNT`, not just 20-byte addresses\n///   - Uses Blake2b hashes rather than Keccak\n///   - Uses hash bits rather than nibbles\n///\n/// For backward compatibility: if the hex string is all uppercase or all lowercase, the check is\n/// skipped.\n///\n/// [1]: https://eips.ethereum.org/EIPS/eip-55\npub fn decode<T: AsRef<[u8]>>(input: T) -> Result<Vec<u8>, base16::DecodeError> {\n    let bytes = base16::decode(input.as_ref())?;\n\n    // If the string was not small or not mixed case, don't verify the checksum.\n    if bytes.len() > SMALL_BYTES_COUNT || string_is_same_case(input.as_ref()) {\n        return Ok(bytes);\n    }\n\n    encode_iter(&bytes)\n        .zip(input.as_ref().iter())\n        .enumerate()\n        .try_for_each(|(index, (expected_case_hex_char, &input_hex_char))| {\n            if expected_case_hex_char as u8 == input_hex_char {\n                Ok(())\n            } else {\n                Err(base16::DecodeError::InvalidByte {\n                    index,\n                    byte: expected_case_hex_char as u8,\n                })\n            }\n        })?;\n    Ok(bytes)\n}\n\n#[cfg(test)]\nmod tests {\n    use alloc::string::String;\n\n    use proptest::{\n        collection::vec,\n        prelude::{any, prop_assert, prop_assert_eq},\n    };\n    use proptest_attr_macro::proptest;\n\n    use super::*;\n\n    #[test]\n    fn should_decode_empty_input() {\n        let input = String::new();\n        let actual = decode(input).unwrap();\n        assert!(actual.is_empty());\n    }\n\n    #[test]\n    fn string_is_same_case_true_when_same_case() {\n        let input = \"aaaaaaaaaaa\";\n        assert!(string_is_same_case(input));\n\n        let input = \"AAAAAAAAAAA\";\n        assert!(string_is_same_case(input));\n    }\n\n    #[test]\n    fn string_is_same_case_false_when_mixed_case() {\n        let input = \"aAaAaAaAaAa\";\n        assert!(!string_is_same_case(input));\n    }\n\n    #[test]\n    fn string_is_same_case_no_alphabetic_chars_in_string() {\n        let input = \"424242424242\";\n        assert!(string_is_same_case(input));\n    }\n\n    #[test]\n    fn should_checksum_decode_only_if_small() {\n        let input = [255; SMALL_BYTES_COUNT];\n        let small_encoded: String = encode_iter(&input).collect();\n        assert_eq!(input.to_vec(), decode(&small_encoded).unwrap());\n\n        assert!(decode(\"A1a2\").is_err());\n\n        let large_encoded = format!(\"A1{}\", small_encoded);\n        assert!(decode(large_encoded).is_ok());\n    }\n\n    #[proptest]\n    fn hex_roundtrip(input: Vec<u8>) {\n        prop_assert_eq!(\n            input.clone(),\n            decode(encode_iter(&input).collect::<String>()).expect(\"Failed to decode input.\")\n        );\n    }\n\n    proptest::proptest! {\n        #[test]\n        fn should_fail_on_invalid_checksum(input in vec(any::<u8>(), 0..75)) {\n            let encoded: String = encode_iter(&input).collect();\n\n            // Swap the case of the first letter in the checksum hex-encoded value.\n            let mut expected_error = None;\n            let mutated: String = encoded\n                .char_indices()\n                .map(|(index, mut c)| {\n                    if expected_error.is_some() || c.is_ascii_digit() {\n                        return c;\n                    }\n                    expected_error = Some(base16::DecodeError::InvalidByte {\n                        index,\n                        byte: c as u8,\n                    });\n                    if c.is_ascii_uppercase() {\n                        c.make_ascii_lowercase();\n                    } else {\n                        c.make_ascii_uppercase();\n                    }\n                    c\n                })\n                .collect();\n\n            // If the encoded form is now all the same case or digits, just return.\n            if string_is_same_case(&mutated) {\n                return Ok(());\n            }\n\n            // Assert we can still decode to original input using `base16::decode`.\n            prop_assert_eq!(\n                input,\n                base16::decode(&mutated).expect(\"Failed to decode input.\")\n            );\n\n            // Assert decoding using `checksummed_hex::decode` returns the expected error.\n            prop_assert_eq!(expected_error.unwrap(), decode(&mutated).unwrap_err())\n        }\n    }\n\n    #[proptest]\n    fn hex_roundtrip_sanity(input: Vec<u8>) {\n        prop_assert!(decode(encode_iter(&input).collect::<String>()).is_ok())\n    }\n\n    #[proptest]\n    fn is_same_case_uppercase(input: String) {\n        let input = input.to_uppercase();\n        prop_assert!(string_is_same_case(input));\n    }\n\n    #[proptest]\n    fn is_same_case_lowercase(input: String) {\n        let input = input.to_lowercase();\n        prop_assert!(string_is_same_case(input));\n    }\n\n    #[proptest]\n    fn is_not_same_case(input: String) {\n        let input = format!(\"aA{}\", input);\n        prop_assert!(!string_is_same_case(input));\n    }\n}\n"
  },
  {
    "path": "types/src/cl_type.rs",
    "content": "use alloc::{\n    boxed::Box,\n    collections::{BTreeMap, BTreeSet, VecDeque},\n    string::String,\n    vec::Vec,\n};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num_rational::Ratio;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Key, URef, U128, U256, U512,\n};\n\n// This must be less than 300 in order to avoid a stack overflow when deserializing.\npub(crate) const CL_TYPE_RECURSION_DEPTH: u8 = 50;\n\nconst CL_TYPE_TAG_BOOL: u8 = 0;\nconst CL_TYPE_TAG_I32: u8 = 1;\nconst CL_TYPE_TAG_I64: u8 = 2;\nconst CL_TYPE_TAG_U8: u8 = 3;\nconst CL_TYPE_TAG_U32: u8 = 4;\nconst CL_TYPE_TAG_U64: u8 = 5;\nconst CL_TYPE_TAG_U128: u8 = 6;\nconst CL_TYPE_TAG_U256: u8 = 7;\nconst CL_TYPE_TAG_U512: u8 = 8;\nconst CL_TYPE_TAG_UNIT: u8 = 9;\nconst CL_TYPE_TAG_STRING: u8 = 10;\nconst CL_TYPE_TAG_KEY: u8 = 11;\nconst CL_TYPE_TAG_UREF: u8 = 12;\nconst CL_TYPE_TAG_OPTION: u8 = 13;\nconst CL_TYPE_TAG_LIST: u8 = 14;\nconst CL_TYPE_TAG_BYTE_ARRAY: u8 = 15;\nconst CL_TYPE_TAG_RESULT: u8 = 16;\nconst CL_TYPE_TAG_MAP: u8 = 17;\nconst CL_TYPE_TAG_TUPLE1: u8 = 18;\nconst CL_TYPE_TAG_TUPLE2: u8 = 19;\nconst CL_TYPE_TAG_TUPLE3: u8 = 20;\nconst CL_TYPE_TAG_ANY: u8 = 21;\nconst CL_TYPE_TAG_PUBLIC_KEY: u8 = 22;\n\n/// Casper types, i.e. types which can be stored and manipulated by smart contracts.\n///\n/// Provides a description of the underlying data type of a [`CLValue`](crate::CLValue).\n#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum CLType {\n    /// `bool` primitive.\n    Bool,\n    /// `i32` primitive.\n    I32,\n    /// `i64` primitive.\n    I64,\n    /// `u8` primitive.\n    U8,\n    /// `u32` primitive.\n    U32,\n    /// `u64` primitive.\n    U64,\n    /// [`U128`] large unsigned integer type.\n    U128,\n    /// [`U256`] large unsigned integer type.\n    U256,\n    /// [`U512`] large unsigned integer type.\n    U512,\n    /// `()` primitive.\n    Unit,\n    /// `String` primitive.\n    String,\n    /// [`Key`] system type.\n    Key,\n    /// [`URef`] system type.\n    URef,\n    /// [`PublicKey`](crate::PublicKey) system type.\n    PublicKey,\n    /// `Option` of a `CLType`.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Option(Box<CLType>),\n    /// Variable-length list of a single `CLType` (comparable to a `Vec`).\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    List(Box<CLType>),\n    /// Fixed-length list of a single `CLType` (comparable to a Rust array).\n    ByteArray(u32),\n    /// `Result` with `Ok` and `Err` variants of `CLType`s.\n    #[allow(missing_docs)] // generated docs are explicit enough.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Result { ok: Box<CLType>, err: Box<CLType> },\n    /// Map with keys of a single `CLType` and values of a single `CLType`.\n    #[allow(missing_docs)] // generated docs are explicit enough.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Map {\n        key: Box<CLType>,\n        value: Box<CLType>,\n    },\n    /// 1-ary tuple of a `CLType`.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Tuple1([Box<CLType>; 1]),\n    /// 2-ary tuple of `CLType`s.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Tuple2([Box<CLType>; 2]),\n    /// 3-ary tuple of `CLType`s.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Tuple3([Box<CLType>; 3]),\n    /// Unspecified type.\n    Any,\n}\n\nimpl CLType {\n    /// The `len()` of the `Vec<u8>` resulting from `self.to_bytes()`.\n    pub fn serialized_length(&self) -> usize {\n        size_of::<u8>()\n            + match self {\n                CLType::Bool\n                | CLType::I32\n                | CLType::I64\n                | CLType::U8\n                | CLType::U32\n                | CLType::U64\n                | CLType::U128\n                | CLType::U256\n                | CLType::U512\n                | CLType::Unit\n                | CLType::String\n                | CLType::Key\n                | CLType::URef\n                | CLType::PublicKey\n                | CLType::Any => 0,\n                CLType::Option(cl_type) | CLType::List(cl_type) => cl_type.serialized_length(),\n                CLType::ByteArray(list_len) => list_len.serialized_length(),\n                CLType::Result { ok, err } => ok.serialized_length() + err.serialized_length(),\n                CLType::Map { key, value } => key.serialized_length() + value.serialized_length(),\n                CLType::Tuple1(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array),\n                CLType::Tuple2(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array),\n                CLType::Tuple3(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array),\n            }\n    }\n\n    /// Returns `true` if the [`CLType`] is [`Option`].\n    pub fn is_option(&self) -> bool {\n        matches!(self, Self::Option(..))\n    }\n\n    /// Creates a `CLType::Map`.\n    pub fn map(key: CLType, value: CLType) -> Self {\n        CLType::Map {\n            key: Box::new(key),\n            value: Box::new(value),\n        }\n    }\n}\n\n/// Returns the `CLType` describing a \"named key\" on the system, i.e. a `(String, Key)`.\npub fn named_key_type() -> CLType {\n    CLType::Tuple2([Box::new(CLType::String), Box::new(CLType::Key)])\n}\n\nimpl CLType {\n    pub(crate) fn append_bytes(&self, stream: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            CLType::Bool => stream.push(CL_TYPE_TAG_BOOL),\n            CLType::I32 => stream.push(CL_TYPE_TAG_I32),\n            CLType::I64 => stream.push(CL_TYPE_TAG_I64),\n            CLType::U8 => stream.push(CL_TYPE_TAG_U8),\n            CLType::U32 => stream.push(CL_TYPE_TAG_U32),\n            CLType::U64 => stream.push(CL_TYPE_TAG_U64),\n            CLType::U128 => stream.push(CL_TYPE_TAG_U128),\n            CLType::U256 => stream.push(CL_TYPE_TAG_U256),\n            CLType::U512 => stream.push(CL_TYPE_TAG_U512),\n            CLType::Unit => stream.push(CL_TYPE_TAG_UNIT),\n            CLType::String => stream.push(CL_TYPE_TAG_STRING),\n            CLType::Key => stream.push(CL_TYPE_TAG_KEY),\n            CLType::URef => stream.push(CL_TYPE_TAG_UREF),\n            CLType::PublicKey => stream.push(CL_TYPE_TAG_PUBLIC_KEY),\n            CLType::Option(cl_type) => {\n                stream.push(CL_TYPE_TAG_OPTION);\n                cl_type.append_bytes(stream)?;\n            }\n            CLType::List(cl_type) => {\n                stream.push(CL_TYPE_TAG_LIST);\n                cl_type.append_bytes(stream)?;\n            }\n            CLType::ByteArray(len) => {\n                stream.push(CL_TYPE_TAG_BYTE_ARRAY);\n                stream.append(&mut len.to_bytes()?);\n            }\n            CLType::Result { ok, err } => {\n                stream.push(CL_TYPE_TAG_RESULT);\n                ok.append_bytes(stream)?;\n                err.append_bytes(stream)?;\n            }\n            CLType::Map { key, value } => {\n                stream.push(CL_TYPE_TAG_MAP);\n                key.append_bytes(stream)?;\n                value.append_bytes(stream)?;\n            }\n            CLType::Tuple1(cl_type_array) => {\n                serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE1, cl_type_array, stream)?\n            }\n            CLType::Tuple2(cl_type_array) => {\n                serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE2, cl_type_array, stream)?\n            }\n            CLType::Tuple3(cl_type_array) => {\n                serialize_cl_tuple_type(CL_TYPE_TAG_TUPLE3, cl_type_array, stream)?\n            }\n            CLType::Any => stream.push(CL_TYPE_TAG_ANY),\n        }\n        Ok(())\n    }\n}\n\nimpl Display for CLType {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            CLType::Bool => write!(formatter, \"bool\"),\n            CLType::I32 => write!(formatter, \"i32\"),\n            CLType::I64 => write!(formatter, \"i64\"),\n            CLType::U8 => write!(formatter, \"u8\"),\n            CLType::U32 => write!(formatter, \"u32\"),\n            CLType::U64 => write!(formatter, \"u64\"),\n            CLType::U128 => write!(formatter, \"u128\"),\n            CLType::U256 => write!(formatter, \"u256\"),\n            CLType::U512 => write!(formatter, \"u512\"),\n            CLType::Unit => write!(formatter, \"unit\"),\n            CLType::String => write!(formatter, \"string\"),\n            CLType::Key => write!(formatter, \"key\"),\n            CLType::URef => write!(formatter, \"uref\"),\n            CLType::PublicKey => write!(formatter, \"public-key\"),\n            CLType::Option(t) => write!(formatter, \"option<{t}>\"),\n            CLType::List(t) => write!(formatter, \"list<{t}>\"),\n            CLType::ByteArray(len) => write!(formatter, \"byte-array[{len}]\"),\n            CLType::Result { ok, err } => write!(formatter, \"result<{ok}, {err}>\"),\n            CLType::Map { key, value } => write!(formatter, \"map<{key}, {value}>\"),\n            CLType::Tuple1([t1]) => write!(formatter, \"({t1},)\"),\n            CLType::Tuple2([t1, t2]) => write!(formatter, \"({t1}, {t2})\"),\n            CLType::Tuple3([t1, t2, t3]) => write!(formatter, \"({t1}, {t2}, {t3})\"),\n            CLType::Any => write!(formatter, \"any\"),\n        }\n    }\n}\n\nimpl FromBytes for CLType {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        depth_limited_from_bytes(0, bytes)\n    }\n}\n\nfn depth_limited_from_bytes(depth: u8, bytes: &[u8]) -> Result<(CLType, &[u8]), bytesrepr::Error> {\n    if depth >= CL_TYPE_RECURSION_DEPTH {\n        return Err(bytesrepr::Error::ExceededRecursionDepth);\n    }\n    let depth = depth + 1;\n    let (tag, remainder) = u8::from_bytes(bytes)?;\n    match tag {\n        CL_TYPE_TAG_BOOL => Ok((CLType::Bool, remainder)),\n        CL_TYPE_TAG_I32 => Ok((CLType::I32, remainder)),\n        CL_TYPE_TAG_I64 => Ok((CLType::I64, remainder)),\n        CL_TYPE_TAG_U8 => Ok((CLType::U8, remainder)),\n        CL_TYPE_TAG_U32 => Ok((CLType::U32, remainder)),\n        CL_TYPE_TAG_U64 => Ok((CLType::U64, remainder)),\n        CL_TYPE_TAG_U128 => Ok((CLType::U128, remainder)),\n        CL_TYPE_TAG_U256 => Ok((CLType::U256, remainder)),\n        CL_TYPE_TAG_U512 => Ok((CLType::U512, remainder)),\n        CL_TYPE_TAG_UNIT => Ok((CLType::Unit, remainder)),\n        CL_TYPE_TAG_STRING => Ok((CLType::String, remainder)),\n        CL_TYPE_TAG_KEY => Ok((CLType::Key, remainder)),\n        CL_TYPE_TAG_UREF => Ok((CLType::URef, remainder)),\n        CL_TYPE_TAG_PUBLIC_KEY => Ok((CLType::PublicKey, remainder)),\n        CL_TYPE_TAG_OPTION => {\n            let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?;\n            let cl_type = CLType::Option(Box::new(inner_type));\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_LIST => {\n            let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?;\n            let cl_type = CLType::List(Box::new(inner_type));\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_BYTE_ARRAY => {\n            let (len, remainder) = u32::from_bytes(remainder)?;\n            let cl_type = CLType::ByteArray(len);\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_RESULT => {\n            let (ok_type, remainder) = depth_limited_from_bytes(depth, remainder)?;\n            let (err_type, remainder) = depth_limited_from_bytes(depth, remainder)?;\n            let cl_type = CLType::Result {\n                ok: Box::new(ok_type),\n                err: Box::new(err_type),\n            };\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_MAP => {\n            let (key_type, remainder) = depth_limited_from_bytes(depth, remainder)?;\n            let (value_type, remainder) = depth_limited_from_bytes(depth, remainder)?;\n            let cl_type = CLType::Map {\n                key: Box::new(key_type),\n                value: Box::new(value_type),\n            };\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_TUPLE1 => {\n            let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 1, remainder)?;\n            // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 1\n            // element\n            let cl_type = CLType::Tuple1([inner_types.pop_front().unwrap()]);\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_TUPLE2 => {\n            let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 2, remainder)?;\n            // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 2\n            // elements\n            let cl_type = CLType::Tuple2([\n                inner_types.pop_front().unwrap(),\n                inner_types.pop_front().unwrap(),\n            ]);\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_TUPLE3 => {\n            let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 3, remainder)?;\n            // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 3\n            // elements\n            let cl_type = CLType::Tuple3([\n                inner_types.pop_front().unwrap(),\n                inner_types.pop_front().unwrap(),\n                inner_types.pop_front().unwrap(),\n            ]);\n            Ok((cl_type, remainder))\n        }\n        CL_TYPE_TAG_ANY => Ok((CLType::Any, remainder)),\n        _ => Err(bytesrepr::Error::Formatting),\n    }\n}\n\nfn serialize_cl_tuple_type<'a, T: IntoIterator<Item = &'a Box<CLType>>>(\n    tag: u8,\n    cl_type_array: T,\n    stream: &mut Vec<u8>,\n) -> Result<(), bytesrepr::Error> {\n    stream.push(tag);\n    for cl_type in cl_type_array {\n        cl_type.append_bytes(stream)?;\n    }\n    Ok(())\n}\n\nfn parse_cl_tuple_types(\n    depth: u8,\n    count: usize,\n    mut bytes: &[u8],\n) -> Result<(VecDeque<Box<CLType>>, &[u8]), bytesrepr::Error> {\n    let mut cl_types = VecDeque::with_capacity(count);\n    for _ in 0..count {\n        let (cl_type, remainder) = depth_limited_from_bytes(depth, bytes)?;\n        cl_types.push_back(Box::new(cl_type));\n        bytes = remainder;\n    }\n\n    Ok((cl_types, bytes))\n}\n\nfn serialized_length_of_cl_tuple_type<'a, T: IntoIterator<Item = &'a Box<CLType>>>(\n    cl_type_array: T,\n) -> usize {\n    cl_type_array\n        .into_iter()\n        .map(|cl_type| cl_type.serialized_length())\n        .sum()\n}\n\n/// A type which can be described as a [`CLType`].\npub trait CLTyped {\n    /// The `CLType` of `Self`.\n    fn cl_type() -> CLType;\n}\n\nimpl CLTyped for bool {\n    fn cl_type() -> CLType {\n        CLType::Bool\n    }\n}\n\nimpl CLTyped for i32 {\n    fn cl_type() -> CLType {\n        CLType::I32\n    }\n}\n\nimpl CLTyped for i64 {\n    fn cl_type() -> CLType {\n        CLType::I64\n    }\n}\n\nimpl CLTyped for u8 {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n\nimpl CLTyped for u32 {\n    fn cl_type() -> CLType {\n        CLType::U32\n    }\n}\n\nimpl CLTyped for u64 {\n    fn cl_type() -> CLType {\n        CLType::U64\n    }\n}\n\nimpl CLTyped for U128 {\n    fn cl_type() -> CLType {\n        CLType::U128\n    }\n}\n\nimpl CLTyped for U256 {\n    fn cl_type() -> CLType {\n        CLType::U256\n    }\n}\n\nimpl CLTyped for U512 {\n    fn cl_type() -> CLType {\n        CLType::U512\n    }\n}\n\nimpl CLTyped for () {\n    fn cl_type() -> CLType {\n        CLType::Unit\n    }\n}\n\nimpl CLTyped for String {\n    fn cl_type() -> CLType {\n        CLType::String\n    }\n}\n\nimpl CLTyped for &str {\n    fn cl_type() -> CLType {\n        CLType::String\n    }\n}\n\nimpl CLTyped for Key {\n    fn cl_type() -> CLType {\n        CLType::Key\n    }\n}\n\nimpl CLTyped for URef {\n    fn cl_type() -> CLType {\n        CLType::URef\n    }\n}\n\nimpl<T: CLTyped> CLTyped for Option<T> {\n    fn cl_type() -> CLType {\n        CLType::Option(Box::new(T::cl_type()))\n    }\n}\n\nimpl<T: CLTyped> CLTyped for Vec<T> {\n    fn cl_type() -> CLType {\n        CLType::List(Box::new(T::cl_type()))\n    }\n}\n\nimpl<T: CLTyped> CLTyped for BTreeSet<T> {\n    fn cl_type() -> CLType {\n        CLType::List(Box::new(T::cl_type()))\n    }\n}\n\nimpl<T: CLTyped> CLTyped for &T {\n    fn cl_type() -> CLType {\n        T::cl_type()\n    }\n}\n\nimpl<const COUNT: usize> CLTyped for [u8; COUNT] {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(COUNT as u32)\n    }\n}\n\nimpl<T: CLTyped, E: CLTyped> CLTyped for Result<T, E> {\n    fn cl_type() -> CLType {\n        let ok = Box::new(T::cl_type());\n        let err = Box::new(E::cl_type());\n        CLType::Result { ok, err }\n    }\n}\n\nimpl<K: CLTyped, V: CLTyped> CLTyped for BTreeMap<K, V> {\n    fn cl_type() -> CLType {\n        let key = Box::new(K::cl_type());\n        let value = Box::new(V::cl_type());\n        CLType::Map { key, value }\n    }\n}\n\nimpl<T1: CLTyped> CLTyped for (T1,) {\n    fn cl_type() -> CLType {\n        CLType::Tuple1([Box::new(T1::cl_type())])\n    }\n}\n\nimpl<T1: CLTyped, T2: CLTyped> CLTyped for (T1, T2) {\n    fn cl_type() -> CLType {\n        CLType::Tuple2([Box::new(T1::cl_type()), Box::new(T2::cl_type())])\n    }\n}\n\nimpl<T1: CLTyped, T2: CLTyped, T3: CLTyped> CLTyped for (T1, T2, T3) {\n    fn cl_type() -> CLType {\n        CLType::Tuple3([\n            Box::new(T1::cl_type()),\n            Box::new(T2::cl_type()),\n            Box::new(T3::cl_type()),\n        ])\n    }\n}\n\nimpl<T: CLTyped> CLTyped for Ratio<T> {\n    fn cl_type() -> CLType {\n        <(T, T)>::cl_type()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{fmt::Debug, iter, string::ToString};\n\n    use super::*;\n    use crate::{\n        bytesrepr::{FromBytes, ToBytes},\n        AccessRights, CLValue,\n    };\n\n    fn round_trip<T: CLTyped + FromBytes + ToBytes + PartialEq + Debug>(value: &T) {\n        let cl_value = CLValue::from_t(value).unwrap();\n\n        let serialized_cl_value = cl_value.to_bytes().unwrap();\n        assert_eq!(serialized_cl_value.len(), cl_value.serialized_length());\n        let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap();\n        assert_eq!(cl_value, parsed_cl_value);\n\n        let parsed_value = cl_value.into_t().unwrap();\n        assert_eq!(*value, parsed_value);\n    }\n\n    #[test]\n    fn bool_should_work() {\n        round_trip(&true);\n        round_trip(&false);\n    }\n\n    #[test]\n    fn u8_should_work() {\n        round_trip(&1u8);\n    }\n\n    #[test]\n    fn u32_should_work() {\n        round_trip(&1u32);\n    }\n\n    #[test]\n    fn i32_should_work() {\n        round_trip(&-1i32);\n    }\n\n    #[test]\n    fn u64_should_work() {\n        round_trip(&1u64);\n    }\n\n    #[test]\n    fn i64_should_work() {\n        round_trip(&-1i64);\n    }\n\n    #[test]\n    fn u128_should_work() {\n        round_trip(&U128::one());\n    }\n\n    #[test]\n    fn u256_should_work() {\n        round_trip(&U256::one());\n    }\n\n    #[test]\n    fn u512_should_work() {\n        round_trip(&U512::one());\n    }\n\n    #[test]\n    fn unit_should_work() {\n        round_trip(&());\n    }\n\n    #[test]\n    fn string_should_work() {\n        round_trip(&String::from(\"abc\"));\n    }\n\n    #[test]\n    fn key_should_work() {\n        let key = Key::URef(URef::new([0u8; 32], AccessRights::READ_ADD_WRITE));\n        round_trip(&key);\n    }\n\n    #[test]\n    fn uref_should_work() {\n        let uref = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE);\n        round_trip(&uref);\n    }\n\n    #[test]\n    fn option_of_cl_type_should_work() {\n        let x: Option<i32> = Some(-1);\n        let y: Option<i32> = None;\n\n        round_trip(&x);\n        round_trip(&y);\n    }\n\n    #[test]\n    fn vec_of_cl_type_should_work() {\n        let vec = vec![String::from(\"a\"), String::from(\"b\")];\n        round_trip(&vec);\n    }\n\n    #[test]\n    #[allow(clippy::cognitive_complexity)]\n    fn small_array_of_u8_should_work() {\n        macro_rules! test_small_array {\n            ($($N:literal)+) => {\n                $(\n                    let mut array: [u8; $N] = Default::default();\n                    for i in 0..$N {\n                        array[i] = i as u8;\n                    }\n                    round_trip(&array);\n                )+\n            }\n        }\n\n        test_small_array! {\n                 1  2  3  4  5  6  7  8  9\n             10 11 12 13 14 15 16 17 18 19\n             20 21 22 23 24 25 26 27 28 29\n             30 31 32\n        }\n    }\n\n    #[test]\n    fn large_array_of_cl_type_should_work() {\n        macro_rules! test_large_array {\n            ($($N:literal)+) => {\n                $(\n                    let array = {\n                        let mut tmp = [0u8; $N];\n                        for i in 0..$N {\n                            tmp[i] = i as u8;\n                        }\n                        tmp\n                    };\n\n                    let cl_value = CLValue::from_t(array.clone()).unwrap();\n\n                    let serialized_cl_value = cl_value.to_bytes().unwrap();\n                    let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap();\n                    assert_eq!(cl_value, parsed_cl_value);\n\n                    let parsed_value: [u8; $N] = CLValue::into_t(cl_value).unwrap();\n                    for i in 0..$N {\n                        assert_eq!(array[i], parsed_value[i]);\n                    }\n                )+\n            }\n        }\n\n        test_large_array! { 64 128 256 512 }\n    }\n\n    #[test]\n    fn result_of_cl_type_should_work() {\n        let x: Result<(), String> = Ok(());\n        let y: Result<(), String> = Err(String::from(\"Hello, world!\"));\n\n        round_trip(&x);\n        round_trip(&y);\n    }\n\n    #[test]\n    fn map_of_cl_type_should_work() {\n        let mut map: BTreeMap<String, u64> = BTreeMap::new();\n        map.insert(String::from(\"abc\"), 1);\n        map.insert(String::from(\"xyz\"), 2);\n\n        round_trip(&map);\n    }\n\n    #[test]\n    fn tuple_1_should_work() {\n        let x = (-1i32,);\n\n        round_trip(&x);\n    }\n\n    #[test]\n    fn tuple_2_should_work() {\n        let x = (-1i32, String::from(\"a\"));\n\n        round_trip(&x);\n    }\n\n    #[test]\n    fn tuple_3_should_work() {\n        let x = (-1i32, 1u32, String::from(\"a\"));\n\n        round_trip(&x);\n    }\n\n    #[test]\n    fn parsing_nested_tuple_1_cltype_should_not_stack_overflow() {\n        // The bytesrepr representation of the CLType for a\n        // nested (((...((),),...),),) looks like:\n        // [18, 18, 18, ..., 9]\n\n        for i in 1..1000 {\n            let bytes = iter::repeat(CL_TYPE_TAG_TUPLE1)\n                .take(i)\n                .chain(iter::once(CL_TYPE_TAG_UNIT))\n                .collect();\n            match bytesrepr::deserialize(bytes) {\n                Ok(parsed_cltype) => assert!(matches!(parsed_cltype, CLType::Tuple1(_))),\n                Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth),\n            }\n        }\n    }\n\n    #[test]\n    fn parsing_nested_tuple_1_value_should_not_stack_overflow() {\n        // The bytesrepr representation of the CLValue for a\n        // nested (((...((),),...),),) looks like:\n        // [0, 0, 0, 0, 18, 18, 18, ..., 18, 9]\n\n        for i in 1..1000 {\n            let bytes = iter::repeat(0)\n                .take(4)\n                .chain(iter::repeat(CL_TYPE_TAG_TUPLE1).take(i))\n                .chain(iter::once(CL_TYPE_TAG_UNIT))\n                .collect();\n            match bytesrepr::deserialize::<CLValue>(bytes) {\n                Ok(parsed_clvalue) => {\n                    assert!(matches!(parsed_clvalue.cl_type(), CLType::Tuple1(_)))\n                }\n                Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth),\n            }\n        }\n    }\n\n    #[test]\n    fn any_should_work() {\n        #[derive(PartialEq, Debug, Clone)]\n        struct Any(String);\n\n        impl CLTyped for Any {\n            fn cl_type() -> CLType {\n                CLType::Any\n            }\n        }\n\n        impl ToBytes for Any {\n            fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n                self.0.to_bytes()\n            }\n\n            fn serialized_length(&self) -> usize {\n                self.0.serialized_length()\n            }\n        }\n\n        impl FromBytes for Any {\n            fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n                let (inner, remainder) = String::from_bytes(bytes)?;\n                Ok((Any(inner), remainder))\n            }\n        }\n\n        let any = Any(\"Any test\".to_string());\n        round_trip(&any);\n    }\n\n    #[test]\n    fn should_have_cltype_of_ref_to_cltyped() {\n        assert_eq!(<Vec<&u64>>::cl_type(), <Vec<u64>>::cl_type())\n    }\n}\n"
  },
  {
    "path": "types/src/cl_value/checksum_registry.rs",
    "content": "//! The registry of checksums.\n\nuse alloc::{\n    collections::BTreeMap,\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, Digest,\n};\n\n/// The checksum registry.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default)]\npub struct ChecksumRegistry(BTreeMap<String, Digest>);\n\nimpl ChecksumRegistry {\n    /// Returns a new `ChecksumRegistry`.\n    pub fn new() -> Self {\n        ChecksumRegistry(BTreeMap::new())\n    }\n\n    /// Inserts a checksum into the registry.\n    pub fn insert(&mut self, checksum_name: &str, checksum: Digest) {\n        self.0.insert(checksum_name.to_string(), checksum);\n    }\n\n    /// Gets a checksum from the registry.\n    pub fn get(&self, checksum_name: &str) -> Option<&Digest> {\n        self.0.get(checksum_name)\n    }\n}\n\nimpl ToBytes for ChecksumRegistry {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for ChecksumRegistry {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (inner, remainder) = BTreeMap::from_bytes(bytes)?;\n        Ok((ChecksumRegistry(inner), remainder))\n    }\n}\n\nimpl CLTyped for ChecksumRegistry {\n    fn cl_type() -> CLType {\n        BTreeMap::<String, Digest>::cl_type()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut checksum_registry = ChecksumRegistry::new();\n        checksum_registry.insert(\"a\", Digest::hash([9; 100]));\n        bytesrepr::test_serialization_roundtrip(&checksum_registry);\n    }\n}\n"
  },
  {
    "path": "types/src/cl_value/dictionary.rs",
    "content": "use alloc::vec::Vec;\n\nuse crate::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes},\n    CLType, CLTyped, CLValue, CLValueError, Key, StoredValue,\n};\n\n/// Wraps a [`CLValue`] for storage in a dictionary.\n///\n/// Note that we include the dictionary [`super::super::URef`] and key used to create the\n/// `Key::Dictionary` under which this value is stored.  This is to allow migration to a different\n/// key representation in the future.\n#[derive(Clone)]\npub struct DictionaryValue {\n    /// Actual [`CLValue`] written to global state.\n    cl_value: CLValue,\n    /// [`URef`] seed bytes.\n    seed_uref_addr: Bytes,\n    /// Original key bytes.\n    dictionary_item_key_bytes: Bytes,\n}\n\nimpl DictionaryValue {\n    /// Constructor.\n    pub fn new(\n        cl_value: CLValue,\n        seed_uref_addr: Vec<u8>,\n        dictionary_item_key_bytes: Vec<u8>,\n    ) -> Self {\n        Self {\n            cl_value,\n            seed_uref_addr: seed_uref_addr.into(),\n            dictionary_item_key_bytes: dictionary_item_key_bytes.into(),\n        }\n    }\n\n    /// Get a reference to the [`DictionaryValue`]'s wrapper's cl value.\n    pub fn into_cl_value(self) -> CLValue {\n        self.cl_value\n    }\n}\n\nimpl CLTyped for DictionaryValue {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl FromBytes for DictionaryValue {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (cl_value, remainder) = FromBytes::from_bytes(bytes)?;\n        let (uref_addr, remainder) = FromBytes::from_bytes(remainder)?;\n        let (key_bytes, remainder) = FromBytes::from_bytes(remainder)?;\n        let dictionary_value = DictionaryValue {\n            cl_value,\n            seed_uref_addr: uref_addr,\n            dictionary_item_key_bytes: key_bytes,\n        };\n        Ok((dictionary_value, remainder))\n    }\n}\n\nimpl ToBytes for DictionaryValue {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.cl_value.to_bytes()?);\n        buffer.extend(self.seed_uref_addr.to_bytes()?);\n        buffer.extend(self.dictionary_item_key_bytes.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.cl_value.serialized_length()\n            + self.seed_uref_addr.serialized_length()\n            + self.dictionary_item_key_bytes.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.cl_value.write_bytes(writer)?;\n        self.seed_uref_addr.write_bytes(writer)?;\n        self.dictionary_item_key_bytes.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\n/// Inspects `key` argument whether it contains a dictionary variant, and checks if `stored_value`\n/// contains a [`CLValue`], then it will attempt a conversion from the held clvalue into\n/// [`DictionaryValue`] and returns the real [`CLValue`] held by it.\n///\n/// For any other combination of `key` and `stored_value` it returns its unmodified value.\npub fn handle_stored_dictionary_value(\n    key: Key,\n    stored_value: StoredValue,\n) -> Result<StoredValue, CLValueError> {\n    match (key, stored_value) {\n        (Key::Dictionary(_), StoredValue::CLValue(cl_value)) => {\n            let wrapped_cl_value: DictionaryValue = cl_value.into_t()?;\n            let cl_value = wrapped_cl_value.into_cl_value();\n            Ok(StoredValue::CLValue(cl_value))\n        }\n        (_, stored_value) => Ok(stored_value),\n    }\n}\n"
  },
  {
    "path": "types/src/cl_value/jsonrepr.rs",
    "content": "use alloc::{string::String, vec::Vec};\n\nuse serde::Serialize;\nuse serde_json::{json, Value};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, OPTION_NONE_TAG, OPTION_SOME_TAG, RESULT_ERR_TAG, RESULT_OK_TAG},\n    cl_type::CL_TYPE_RECURSION_DEPTH,\n    CLType, CLValue, Key, PublicKey, URef, U128, U256, U512,\n};\n\n/// Returns a best-effort attempt to convert the `CLValue` into a meaningful JSON value.\npub fn cl_value_to_json(cl_value: &CLValue) -> Option<Value> {\n    depth_limited_to_json(0, cl_value.cl_type(), cl_value.inner_bytes()).and_then(\n        |(json_value, remainder)| {\n            if remainder.is_empty() {\n                Some(json_value)\n            } else {\n                None\n            }\n        },\n    )\n}\n\nfn depth_limited_to_json<'a>(\n    depth: u8,\n    cl_type: &CLType,\n    bytes: &'a [u8],\n) -> Option<(Value, &'a [u8])> {\n    if depth >= CL_TYPE_RECURSION_DEPTH {\n        return None;\n    }\n    let depth = depth + 1;\n\n    match cl_type {\n        CLType::Bool => simple_type_to_json::<bool>(bytes),\n        CLType::I32 => simple_type_to_json::<i32>(bytes),\n        CLType::I64 => simple_type_to_json::<i64>(bytes),\n        CLType::U8 => simple_type_to_json::<u8>(bytes),\n        CLType::U32 => simple_type_to_json::<u32>(bytes),\n        CLType::U64 => simple_type_to_json::<u64>(bytes),\n        CLType::U128 => simple_type_to_json::<U128>(bytes),\n        CLType::U256 => simple_type_to_json::<U256>(bytes),\n        CLType::U512 => simple_type_to_json::<U512>(bytes),\n        CLType::Unit => simple_type_to_json::<()>(bytes),\n        CLType::String => simple_type_to_json::<String>(bytes),\n        CLType::Key => simple_type_to_json::<Key>(bytes),\n        CLType::URef => simple_type_to_json::<URef>(bytes),\n        CLType::PublicKey => simple_type_to_json::<PublicKey>(bytes),\n        CLType::Option(inner_cl_type) => {\n            let (variant, remainder) = u8::from_bytes(bytes).ok()?;\n            match variant {\n                OPTION_NONE_TAG => Some((Value::Null, remainder)),\n                OPTION_SOME_TAG => Some(depth_limited_to_json(depth, inner_cl_type, remainder)?),\n                _ => None,\n            }\n        }\n        CLType::List(inner_cl_type) => {\n            let (count, mut stream) = u32::from_bytes(bytes).ok()?;\n            let mut result: Vec<Value> = Vec::new();\n            for _ in 0..count {\n                let (value, remainder) = depth_limited_to_json(depth, inner_cl_type, stream)?;\n                result.push(value);\n                stream = remainder;\n            }\n            Some((json!(result), stream))\n        }\n        CLType::ByteArray(length) => {\n            let (bytes, remainder) = bytesrepr::safe_split_at(bytes, *length as usize).ok()?;\n            let hex_encoded_bytes = base16::encode_lower(&bytes);\n            Some((json![hex_encoded_bytes], remainder))\n        }\n        CLType::Result { ok, err } => {\n            let (variant, remainder) = u8::from_bytes(bytes).ok()?;\n            match variant {\n                RESULT_ERR_TAG => {\n                    let (value, remainder) = depth_limited_to_json(depth, err, remainder)?;\n                    Some((json!({ \"Err\": value }), remainder))\n                }\n                RESULT_OK_TAG => {\n                    let (value, remainder) = depth_limited_to_json(depth, ok, remainder)?;\n                    Some((json!({ \"Ok\": value }), remainder))\n                }\n                _ => None,\n            }\n        }\n        CLType::Map { key, value } => {\n            let (num_keys, mut stream) = u32::from_bytes(bytes).ok()?;\n            let mut result: Vec<Value> = Vec::new();\n            for _ in 0..num_keys {\n                let (k, remainder) = depth_limited_to_json(depth, key, stream)?;\n                let (v, remainder) = depth_limited_to_json(depth, value, remainder)?;\n                result.push(json!({\"key\": k, \"value\": v}));\n                stream = remainder;\n            }\n            Some((json!(result), stream))\n        }\n        CLType::Tuple1(arr) => {\n            let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?;\n            Some((json!([t1]), remainder))\n        }\n        CLType::Tuple2(arr) => {\n            let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?;\n            let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?;\n            Some((json!([t1, t2]), remainder))\n        }\n        CLType::Tuple3(arr) => {\n            let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?;\n            let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?;\n            let (t3, remainder) = depth_limited_to_json(depth, &arr[2], remainder)?;\n            Some((json!([t1, t2, t3]), remainder))\n        }\n        CLType::Any => None,\n    }\n}\n\nfn simple_type_to_json<T: FromBytes + Serialize>(bytes: &[u8]) -> Option<(Value, &[u8])> {\n    let (value, remainder) = T::from_bytes(bytes).ok()?;\n    Some((json!(value), remainder))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{bytesrepr::ToBytes, AsymmetricType, CLTyped, SecretKey};\n    use alloc::collections::BTreeMap;\n\n    fn test_value<T: ToBytes + Serialize + Clone + CLTyped>(value: T) {\n        let cl_value = CLValue::from_t(value.clone()).unwrap();\n        let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap();\n        let expected = json!(value);\n        assert_eq!(cl_value_as_json, expected);\n    }\n\n    #[test]\n    fn list_of_ints_to_json_value() {\n        test_value::<Vec<i32>>(vec![]);\n        test_value(vec![10u32, 12u32]);\n    }\n\n    #[test]\n    fn list_of_bools_to_json_value() {\n        test_value(vec![true, false]);\n    }\n\n    #[test]\n    fn list_of_string_to_json_value() {\n        test_value(vec![\"rust\", \"python\"]);\n    }\n\n    #[test]\n    fn list_of_public_keys_to_json_value() {\n        let a = PublicKey::from(\n            &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(),\n        );\n        let b = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let a_hex = a.to_hex();\n        let b_hex = b.to_hex();\n        let cl_value = CLValue::from_t(vec![a, b]).unwrap();\n        let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap();\n        let expected = json!([a_hex, b_hex]);\n        assert_eq!(cl_value_as_json, expected);\n    }\n\n    #[test]\n    fn list_of_list_of_public_keys_to_json_value() {\n        let a = PublicKey::from(\n            &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(),\n        );\n        let b = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([3; PublicKey::ED25519_LENGTH]).unwrap(),\n        );\n        let c = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([6; PublicKey::ED25519_LENGTH]).unwrap(),\n        );\n        let a_hex = a.to_hex();\n        let b_hex = b.to_hex();\n        let c_hex = c.to_hex();\n        let cl_value = CLValue::from_t(vec![vec![a, b], vec![c]]).unwrap();\n        let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap();\n        let expected = json!([[a_hex, b_hex], [c_hex]]);\n        assert_eq!(cl_value_as_json, expected);\n    }\n\n    #[test]\n    fn map_of_string_to_list_of_ints_to_json_value() {\n        let key1 = String::from(\"first\");\n        let key2 = String::from(\"second\");\n        let value1 = vec![];\n        let value2 = vec![1, 2, 3];\n        let mut map: BTreeMap<String, Vec<i32>> = BTreeMap::new();\n        map.insert(key1.clone(), value1.clone());\n        map.insert(key2.clone(), value2.clone());\n        let cl_value = CLValue::from_t(map).unwrap();\n        let cl_value_as_json: Value = cl_value_to_json(&cl_value).unwrap();\n        let expected = json!([\n            { \"key\": key1, \"value\": value1 },\n            { \"key\": key2, \"value\": value2 }\n        ]);\n        assert_eq!(cl_value_as_json, expected);\n    }\n\n    #[test]\n    fn option_some_of_lists_to_json_value() {\n        test_value(Some(vec![1, 2, 3]));\n    }\n\n    #[test]\n    fn option_none_to_json_value() {\n        test_value(Option::<i32>::None);\n    }\n\n    #[test]\n    fn bytes_to_json_value() {\n        let bytes = [1_u8, 2];\n        let cl_value = CLValue::from_t(bytes).unwrap();\n        let cl_value_as_json = cl_value_to_json(&cl_value).unwrap();\n        let expected = json!(base16::encode_lower(&bytes));\n        assert_eq!(cl_value_as_json, expected);\n    }\n\n    #[test]\n    fn result_ok_to_json_value() {\n        test_value(Result::<Vec<i32>, String>::Ok(vec![1, 2, 3]));\n    }\n\n    #[test]\n    fn result_error_to_json_value() {\n        test_value(Result::<Vec<i32>, String>::Err(String::from(\"Upsss\")));\n    }\n\n    #[test]\n    fn tuples_to_json_value() {\n        let v1 = String::from(\"Hello\");\n        let v2 = vec![1, 2, 3];\n        let v3 = 1u8;\n\n        test_value((v1.clone(),));\n        test_value((v1.clone(), v2.clone()));\n        test_value((v1, v2, v3));\n    }\n\n    #[test]\n    fn json_encoding_nested_tuple_1_value_should_not_stack_overflow() {\n        // Returns a CLType corresponding to (((...(cl_type,),...),),) nested in tuples to\n        // `depth_limit`.\n        fn wrap_in_tuple1(cl_type: CLType, current_depth: usize, depth_limit: usize) -> CLType {\n            if current_depth == depth_limit {\n                return cl_type;\n            }\n            wrap_in_tuple1(\n                CLType::Tuple1([Box::new(cl_type)]),\n                current_depth + 1,\n                depth_limit,\n            )\n        }\n\n        for depth_limit in &[1, CL_TYPE_RECURSION_DEPTH as usize] {\n            let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit);\n            let cl_value = CLValue::from_components(cl_type, vec![]);\n            assert!(cl_value_to_json(&cl_value).is_some());\n        }\n\n        for depth_limit in &[CL_TYPE_RECURSION_DEPTH as usize + 1, 1000] {\n            let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit);\n            let cl_value = CLValue::from_components(cl_type, vec![]);\n            assert!(cl_value_to_json(&cl_value).is_none());\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/cl_value/system_entity_registry.rs",
    "content": "//! The registry of system contracts.\n\nuse alloc::{collections::BTreeMap, string::String, vec::Vec};\n\n// #[cfg(feature = \"datasize\")]\n// use datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::STANDARD_PAYMENT,\n    AddressableEntityHash, CLType, CLTyped, HashAddr,\n};\n\n/// The system entity registry.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)]\npub struct SystemHashRegistry(BTreeMap<String, HashAddr>);\n\nimpl SystemHashRegistry {\n    /// Returns a new `SystemEntityRegistry`.\n    #[allow(clippy::new_without_default)] // This empty `new()` will be replaced in the future.\n    pub fn new() -> Self {\n        SystemHashRegistry(BTreeMap::new())\n    }\n\n    /// Inserts a contract's details into the registry.\n    pub fn insert(&mut self, contract_name: String, contract_hash: HashAddr) {\n        self.0.insert(contract_name, contract_hash);\n    }\n\n    /// Gets a contract's hash from the registry.\n    pub fn get(&self, contract_name: &str) -> Option<&HashAddr> {\n        self.0.get(contract_name)\n    }\n\n    /// Returns `true` if the given hash_addr exists as a value in the registry.\n    pub fn exists(&self, hash_addr: &HashAddr) -> bool {\n        self.0\n            .values()\n            .any(|system_contract_hash| system_contract_hash == hash_addr)\n    }\n\n    /// Remove standard payment from the contract registry.\n    pub fn remove_standard_payment(&mut self) -> Option<HashAddr> {\n        self.0.remove(STANDARD_PAYMENT)\n    }\n\n    pub fn inner(self) -> BTreeMap<String, HashAddr> {\n        self.0\n    }\n}\n\nimpl ToBytes for SystemHashRegistry {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for SystemHashRegistry {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (inner, remainder) = BTreeMap::from_bytes(bytes)?;\n        Ok((SystemHashRegistry(inner), remainder))\n    }\n}\n\nimpl CLTyped for SystemHashRegistry {\n    fn cl_type() -> CLType {\n        BTreeMap::<String, AddressableEntityHash>::cl_type()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut system_entity_registry = SystemHashRegistry::new();\n        system_entity_registry.insert(\"a\".to_string(), [9; 32]);\n        bytesrepr::test_serialization_roundtrip(&system_entity_registry);\n    }\n\n    #[test]\n    fn bytesrepr_transparent() {\n        // this test ensures that the serialization is not affected by the wrapper, because\n        // this data is deserialized in other places as a BTree, e.g. GetAuctionInfo in the sidecar\n        let mut system_entity_registry = SystemHashRegistry::new();\n        system_entity_registry.insert(\"a\".to_string(), [9; 32]);\n        let serialized =\n            ToBytes::to_bytes(&system_entity_registry).expect(\"Unable to serialize data\");\n        let deserialized: BTreeMap<String, HashAddr> =\n            bytesrepr::deserialize_from_slice(serialized).expect(\"Unable to deserialize data\");\n        assert_eq!(system_entity_registry, SystemHashRegistry(deserialized));\n    }\n}\n"
  },
  {
    "path": "types/src/cl_value.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\nuse crate::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes, U32_SERIALIZED_LENGTH},\n    checksummed_hex, CLType, CLTyped,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n#[cfg(feature = \"json-schema\")]\nuse serde_json::Value;\n\nmod checksum_registry;\nmod dictionary;\n#[cfg(feature = \"json-schema\")]\npub use jsonrepr::cl_value_to_json;\n#[cfg(feature = \"json-schema\")]\nmod jsonrepr;\nmod system_entity_registry;\n\npub use checksum_registry::ChecksumRegistry;\npub use dictionary::{handle_stored_dictionary_value, DictionaryValue};\npub use system_entity_registry::SystemHashRegistry;\n\n/// Error while converting a [`CLValue`] into a given type.\n#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct CLTypeMismatch {\n    /// The [`CLType`] into which the `CLValue` was being converted.\n    pub expected: CLType,\n    /// The actual underlying [`CLType`] of this `CLValue`, i.e. the type from which it was\n    /// constructed.\n    pub found: CLType,\n}\n\nimpl Display for CLTypeMismatch {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(\n            f,\n            \"Expected {:?} but found {:?}.\",\n            self.expected, self.found\n        )\n    }\n}\n\n/// Error relating to [`CLValue`] operations.\n#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum CLValueError {\n    /// An error while serializing or deserializing the underlying data.\n    Serialization(bytesrepr::Error),\n    /// A type mismatch while trying to convert a [`CLValue`] into a given type.\n    Type(CLTypeMismatch),\n}\n\nimpl From<bytesrepr::Error> for CLValueError {\n    fn from(error: bytesrepr::Error) -> Self {\n        CLValueError::Serialization(error)\n    }\n}\n\nimpl Display for CLValueError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            CLValueError::Serialization(error) => write!(formatter, \"CLValue error: {}\", error),\n            CLValueError::Type(error) => write!(formatter, \"Type mismatch: {}\", error),\n        }\n    }\n}\n\n/// A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n///\n/// It holds the underlying data as a type-erased, serialized `Vec<u8>` and also holds the\n/// [`CLType`] of the underlying data as a separate member.\n#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct CLValue {\n    cl_type: CLType,\n    bytes: Bytes,\n}\n\nimpl CLValue {\n    /// Constructs a `CLValue` from `t`.\n    pub fn from_t<T: CLTyped + ToBytes>(t: T) -> Result<CLValue, CLValueError> {\n        let bytes = t.into_bytes()?;\n\n        Ok(CLValue {\n            cl_type: T::cl_type(),\n            bytes: bytes.into(),\n        })\n    }\n\n    /// Converts `self` into its underlying type.\n    pub fn to_t<T: CLTyped + FromBytes>(&self) -> Result<T, CLValueError> {\n        let expected = T::cl_type();\n\n        if self.cl_type == expected {\n            Ok(bytesrepr::deserialize_from_slice(&self.bytes)?)\n        } else {\n            Err(CLValueError::Type(CLTypeMismatch {\n                expected,\n                found: self.cl_type.clone(),\n            }))\n        }\n    }\n\n    /// Consumes and converts `self` back into its underlying type.\n    pub fn into_t<T: CLTyped + FromBytes>(self) -> Result<T, CLValueError> {\n        let expected = T::cl_type();\n\n        if self.cl_type == expected {\n            Ok(bytesrepr::deserialize_from_slice(&self.bytes)?)\n        } else {\n            Err(CLValueError::Type(CLTypeMismatch {\n                expected,\n                found: self.cl_type,\n            }))\n        }\n    }\n\n    /// A convenience method to create CLValue for a unit.\n    pub fn unit() -> Self {\n        CLValue::from_components(CLType::Unit, Vec::new())\n    }\n\n    // This is only required in order to implement `TryFrom<state::CLValue> for CLValue` (i.e. the\n    // conversion from the Protobuf `CLValue`) in a separate module to this one.\n    #[doc(hidden)]\n    pub fn from_components(cl_type: CLType, bytes: Vec<u8>) -> Self {\n        Self {\n            cl_type,\n            bytes: bytes.into(),\n        }\n    }\n\n    // This is only required in order to implement `From<CLValue> for state::CLValue` (i.e. the\n    // conversion to the Protobuf `CLValue`) in a separate module to this one.\n    #[doc(hidden)]\n    pub fn destructure(self) -> (CLType, Bytes) {\n        (self.cl_type, self.bytes)\n    }\n\n    /// The [`CLType`] of the underlying data.\n    pub fn cl_type(&self) -> &CLType {\n        &self.cl_type\n    }\n\n    /// Returns a reference to the serialized form of the underlying value held in this `CLValue`.\n    pub fn inner_bytes(&self) -> &Vec<u8> {\n        self.bytes.inner_bytes()\n    }\n\n    /// Returns the length of the `Vec<u8>` yielded after calling `self.to_bytes()`.\n    ///\n    /// Note, this method doesn't actually serialize `self`, and hence is relatively cheap.\n    pub fn serialized_length(&self) -> usize {\n        self.cl_type.serialized_length() + U32_SERIALIZED_LENGTH + self.bytes.len()\n    }\n}\n\nimpl ToBytes for CLValue {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.clone().into_bytes()\n    }\n\n    fn into_bytes(self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = self.bytes.into_bytes()?;\n        self.cl_type.append_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.bytes.serialized_length() + self.cl_type.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.bytes.write_bytes(writer)?;\n        self.cl_type.append_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for CLValue {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, remainder) = FromBytes::from_bytes(bytes)?;\n        let (cl_type, remainder) = FromBytes::from_bytes(remainder)?;\n        let cl_value = CLValue { cl_type, bytes };\n        Ok((cl_value, remainder))\n    }\n}\n\n/// We need to implement `JsonSchema` for `CLValue` as though it is a `CLValueJson`.\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for CLValue {\n    fn schema_name() -> String {\n        \"CLValue\".to_string()\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        <CLValueJson>::json_schema(gen)\n    }\n}\n\n/// A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n///\n/// It holds the underlying data as a type-erased, serialized `Vec<u8>` and also holds the CLType of\n/// the underlying data as a separate member.\n///\n/// The `parsed` field, representing the original value, is a convenience only available when a\n/// CLValue is encoded to JSON, and can always be set to null if preferred.\n#[derive(Serialize, Deserialize)]\n#[serde(deny_unknown_fields)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"json-schema\", schemars(rename = \"CLValue\"))]\nstruct CLValueJson {\n    cl_type: CLType,\n    bytes: String,\n    #[cfg(feature = \"json-schema\")]\n    parsed: Option<Value>,\n}\n\nimpl Serialize for CLValue {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            CLValueJson {\n                cl_type: self.cl_type.clone(),\n                bytes: base16::encode_lower(&self.bytes),\n                #[cfg(feature = \"json-schema\")]\n                parsed: jsonrepr::cl_value_to_json(self),\n            }\n            .serialize(serializer)\n        } else {\n            (&self.cl_type, &self.bytes).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for CLValue {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        let (cl_type, bytes) = if deserializer.is_human_readable() {\n            let json = CLValueJson::deserialize(deserializer)?;\n            (\n                json.cl_type.clone(),\n                checksummed_hex::decode(&json.bytes).map_err(D::Error::custom)?,\n            )\n        } else {\n            <(CLType, Vec<u8>)>::deserialize(deserializer)?\n        };\n        Ok(CLValue {\n            cl_type,\n            bytes: bytes.into(),\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use alloc::string::ToString;\n\n    #[cfg(feature = \"json-schema\")]\n    use schemars::schema_for;\n\n    use super::*;\n    use crate::{\n        account::{AccountHash, ACCOUNT_HASH_LENGTH},\n        key::KEY_HASH_LENGTH,\n        AccessRights, DeployHash, Digest, Key, PublicKey, TransferAddr, URef, TRANSFER_ADDR_LENGTH,\n        U128, U256, U512, UREF_ADDR_LENGTH,\n    };\n\n    #[cfg(feature = \"json-schema\")]\n    #[test]\n    fn json_schema() {\n        let json_clvalue_schema = schema_for!(CLValueJson);\n        let clvalue_schema = schema_for!(CLValue);\n        assert_eq!(json_clvalue_schema, clvalue_schema);\n    }\n\n    #[test]\n    fn serde_roundtrip() {\n        let cl_value = CLValue::from_t(true).unwrap();\n        let serialized = bincode::serialize(&cl_value).unwrap();\n        let decoded = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(cl_value, decoded);\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let cl_value = CLValue::from_t(true).unwrap();\n        let json_string = serde_json::to_string_pretty(&cl_value).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(cl_value, decoded);\n    }\n\n    fn check_to_json<T: CLTyped + ToBytes + FromBytes>(value: T, expected: &str) {\n        let cl_value = CLValue::from_t(value).unwrap();\n        let cl_value_as_json = serde_json::to_string(&cl_value).unwrap();\n        // Remove the `serialized_bytes` field:\n        // Split the string at `,\"serialized_bytes\":`.\n        let pattern = r#\",\"bytes\":\"\"#;\n        let start_index = cl_value_as_json.find(pattern).unwrap();\n        let (start, end) = cl_value_as_json.split_at(start_index);\n        // Find the end of the value of the `bytes` field, and split there.\n        let mut json_without_serialize_bytes = start.to_string();\n        for (index, char) in end.char_indices().skip(pattern.len()) {\n            if char == '\"' {\n                let (_to_remove, to_keep) = end.split_at(index + 1);\n                json_without_serialize_bytes.push_str(to_keep);\n                break;\n            }\n        }\n        assert_eq!(json_without_serialize_bytes, expected);\n    }\n\n    mod simple_types {\n        use super::*;\n        use crate::crypto::SecretKey;\n\n        #[test]\n        fn bool_cl_value_should_encode_to_json() {\n            check_to_json(true, r#\"{\"cl_type\":\"Bool\",\"parsed\":true}\"#);\n            check_to_json(false, r#\"{\"cl_type\":\"Bool\",\"parsed\":false}\"#);\n        }\n\n        #[test]\n        fn i32_cl_value_should_encode_to_json() {\n            check_to_json(i32::MIN, r#\"{\"cl_type\":\"I32\",\"parsed\":-2147483648}\"#);\n            check_to_json(0_i32, r#\"{\"cl_type\":\"I32\",\"parsed\":0}\"#);\n            check_to_json(i32::MAX, r#\"{\"cl_type\":\"I32\",\"parsed\":2147483647}\"#);\n        }\n\n        #[test]\n        fn i64_cl_value_should_encode_to_json() {\n            check_to_json(\n                i64::MIN,\n                r#\"{\"cl_type\":\"I64\",\"parsed\":-9223372036854775808}\"#,\n            );\n            check_to_json(0_i64, r#\"{\"cl_type\":\"I64\",\"parsed\":0}\"#);\n            check_to_json(\n                i64::MAX,\n                r#\"{\"cl_type\":\"I64\",\"parsed\":9223372036854775807}\"#,\n            );\n        }\n\n        #[test]\n        fn u8_cl_value_should_encode_to_json() {\n            check_to_json(0_u8, r#\"{\"cl_type\":\"U8\",\"parsed\":0}\"#);\n            check_to_json(u8::MAX, r#\"{\"cl_type\":\"U8\",\"parsed\":255}\"#);\n        }\n\n        #[test]\n        fn u32_cl_value_should_encode_to_json() {\n            check_to_json(0_u32, r#\"{\"cl_type\":\"U32\",\"parsed\":0}\"#);\n            check_to_json(u32::MAX, r#\"{\"cl_type\":\"U32\",\"parsed\":4294967295}\"#);\n        }\n\n        #[test]\n        fn u64_cl_value_should_encode_to_json() {\n            check_to_json(0_u64, r#\"{\"cl_type\":\"U64\",\"parsed\":0}\"#);\n            check_to_json(\n                u64::MAX,\n                r#\"{\"cl_type\":\"U64\",\"parsed\":18446744073709551615}\"#,\n            );\n        }\n\n        #[test]\n        fn u128_cl_value_should_encode_to_json() {\n            check_to_json(U128::zero(), r#\"{\"cl_type\":\"U128\",\"parsed\":\"0\"}\"#);\n            check_to_json(\n                U128::MAX,\n                r#\"{\"cl_type\":\"U128\",\"parsed\":\"340282366920938463463374607431768211455\"}\"#,\n            );\n        }\n\n        #[test]\n        fn u256_cl_value_should_encode_to_json() {\n            check_to_json(U256::zero(), r#\"{\"cl_type\":\"U256\",\"parsed\":\"0\"}\"#);\n            check_to_json(\n                U256::MAX,\n                r#\"{\"cl_type\":\"U256\",\"parsed\":\"115792089237316195423570985008687907853269984665640564039457584007913129639935\"}\"#,\n            );\n        }\n\n        #[test]\n        fn u512_cl_value_should_encode_to_json() {\n            check_to_json(U512::zero(), r#\"{\"cl_type\":\"U512\",\"parsed\":\"0\"}\"#);\n            check_to_json(\n                U512::MAX,\n                r#\"{\"cl_type\":\"U512\",\"parsed\":\"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095\"}\"#,\n            );\n        }\n\n        #[test]\n        fn unit_cl_value_should_encode_to_json() {\n            check_to_json((), r#\"{\"cl_type\":\"Unit\",\"parsed\":null}\"#);\n        }\n\n        #[test]\n        fn string_cl_value_should_encode_to_json() {\n            check_to_json(String::new(), r#\"{\"cl_type\":\"String\",\"parsed\":\"\"}\"#);\n            check_to_json(\n                \"test string\".to_string(),\n                r#\"{\"cl_type\":\"String\",\"parsed\":\"test string\"}\"#,\n            );\n        }\n\n        #[test]\n        fn key_cl_value_should_encode_to_json() {\n            let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH]));\n            check_to_json(\n                key_account,\n                r#\"{\"cl_type\":\"Key\",\"parsed\":\"account-hash-0101010101010101010101010101010101010101010101010101010101010101\"}\"#,\n            );\n\n            let key_hash = Key::Hash([2; KEY_HASH_LENGTH]);\n            check_to_json(\n                key_hash,\n                r#\"{\"cl_type\":\"Key\",\"parsed\":\"hash-0202020202020202020202020202020202020202020202020202020202020202\"}\"#,\n            );\n\n            let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ));\n            check_to_json(\n                key_uref,\n                r#\"{\"cl_type\":\"Key\",\"parsed\":\"uref-0303030303030303030303030303030303030303030303030303030303030303-001\"}\"#,\n            );\n\n            let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH]));\n            check_to_json(\n                key_transfer,\n                r#\"{\"cl_type\":\"Key\",\"parsed\":\"transfer-0404040404040404040404040404040404040404040404040404040404040404\"}\"#,\n            );\n\n            let key_deploy_info = Key::DeployInfo(DeployHash::from_raw([5; Digest::LENGTH]));\n            check_to_json(\n                key_deploy_info,\n                r#\"{\"cl_type\":\"Key\",\"parsed\":\"deploy-0505050505050505050505050505050505050505050505050505050505050505\"}\"#,\n            );\n        }\n\n        #[test]\n        fn uref_cl_value_should_encode_to_json() {\n            let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE);\n            check_to_json(\n                uref,\n                r#\"{\"cl_type\":\"URef\",\"parsed\":\"uref-0606060606060606060606060606060606060606060606060606060606060606-007\"}\"#,\n            );\n        }\n\n        #[test]\n        fn public_key_cl_value_should_encode_to_json() {\n            check_to_json(\n                PublicKey::from(\n                    &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(),\n                ),\n                r#\"{\"cl_type\":\"PublicKey\",\"parsed\":\"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c\"}\"#,\n            );\n            check_to_json(\n                PublicKey::from(\n                    &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(),\n                ),\n                r#\"{\"cl_type\":\"PublicKey\",\"parsed\":\"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b\"}\"#,\n            );\n        }\n    }\n\n    mod option {\n        use super::*;\n        use crate::crypto::SecretKey;\n\n        #[test]\n        fn bool_cl_value_should_encode_to_json() {\n            check_to_json(Some(true), r#\"{\"cl_type\":{\"Option\":\"Bool\"},\"parsed\":true}\"#);\n            check_to_json(\n                Some(false),\n                r#\"{\"cl_type\":{\"Option\":\"Bool\"},\"parsed\":false}\"#,\n            );\n            check_to_json(\n                Option::<bool>::None,\n                r#\"{\"cl_type\":{\"Option\":\"Bool\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn i32_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(i32::MIN),\n                r#\"{\"cl_type\":{\"Option\":\"I32\"},\"parsed\":-2147483648}\"#,\n            );\n            check_to_json(Some(0_i32), r#\"{\"cl_type\":{\"Option\":\"I32\"},\"parsed\":0}\"#);\n            check_to_json(\n                Some(i32::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"I32\"},\"parsed\":2147483647}\"#,\n            );\n            check_to_json(\n                Option::<i32>::None,\n                r#\"{\"cl_type\":{\"Option\":\"I32\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn i64_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(i64::MIN),\n                r#\"{\"cl_type\":{\"Option\":\"I64\"},\"parsed\":-9223372036854775808}\"#,\n            );\n            check_to_json(Some(0_i64), r#\"{\"cl_type\":{\"Option\":\"I64\"},\"parsed\":0}\"#);\n            check_to_json(\n                Some(i64::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"I64\"},\"parsed\":9223372036854775807}\"#,\n            );\n            check_to_json(\n                Option::<i64>::None,\n                r#\"{\"cl_type\":{\"Option\":\"I64\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn u8_cl_value_should_encode_to_json() {\n            check_to_json(Some(0_u8), r#\"{\"cl_type\":{\"Option\":\"U8\"},\"parsed\":0}\"#);\n            check_to_json(Some(u8::MAX), r#\"{\"cl_type\":{\"Option\":\"U8\"},\"parsed\":255}\"#);\n            check_to_json(\n                Option::<u8>::None,\n                r#\"{\"cl_type\":{\"Option\":\"U8\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn u32_cl_value_should_encode_to_json() {\n            check_to_json(Some(0_u32), r#\"{\"cl_type\":{\"Option\":\"U32\"},\"parsed\":0}\"#);\n            check_to_json(\n                Some(u32::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"U32\"},\"parsed\":4294967295}\"#,\n            );\n            check_to_json(\n                Option::<u32>::None,\n                r#\"{\"cl_type\":{\"Option\":\"U32\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn u64_cl_value_should_encode_to_json() {\n            check_to_json(Some(0_u64), r#\"{\"cl_type\":{\"Option\":\"U64\"},\"parsed\":0}\"#);\n            check_to_json(\n                Some(u64::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"U64\"},\"parsed\":18446744073709551615}\"#,\n            );\n            check_to_json(\n                Option::<u64>::None,\n                r#\"{\"cl_type\":{\"Option\":\"U64\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn u128_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(U128::zero()),\n                r#\"{\"cl_type\":{\"Option\":\"U128\"},\"parsed\":\"0\"}\"#,\n            );\n            check_to_json(\n                Some(U128::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"U128\"},\"parsed\":\"340282366920938463463374607431768211455\"}\"#,\n            );\n            check_to_json(\n                Option::<U128>::None,\n                r#\"{\"cl_type\":{\"Option\":\"U128\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn u256_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(U256::zero()),\n                r#\"{\"cl_type\":{\"Option\":\"U256\"},\"parsed\":\"0\"}\"#,\n            );\n            check_to_json(\n                Some(U256::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"U256\"},\"parsed\":\"115792089237316195423570985008687907853269984665640564039457584007913129639935\"}\"#,\n            );\n            check_to_json(\n                Option::<U256>::None,\n                r#\"{\"cl_type\":{\"Option\":\"U256\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn u512_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(U512::zero()),\n                r#\"{\"cl_type\":{\"Option\":\"U512\"},\"parsed\":\"0\"}\"#,\n            );\n            check_to_json(\n                Some(U512::MAX),\n                r#\"{\"cl_type\":{\"Option\":\"U512\"},\"parsed\":\"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095\"}\"#,\n            );\n            check_to_json(\n                Option::<U512>::None,\n                r#\"{\"cl_type\":{\"Option\":\"U512\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn unit_cl_value_should_encode_to_json() {\n            check_to_json(Some(()), r#\"{\"cl_type\":{\"Option\":\"Unit\"},\"parsed\":null}\"#);\n            check_to_json(\n                Option::<()>::None,\n                r#\"{\"cl_type\":{\"Option\":\"Unit\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn string_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(String::new()),\n                r#\"{\"cl_type\":{\"Option\":\"String\"},\"parsed\":\"\"}\"#,\n            );\n            check_to_json(\n                Some(\"test string\".to_string()),\n                r#\"{\"cl_type\":{\"Option\":\"String\"},\"parsed\":\"test string\"}\"#,\n            );\n            check_to_json(\n                Option::<String>::None,\n                r#\"{\"cl_type\":{\"Option\":\"String\"},\"parsed\":null}\"#,\n            );\n        }\n\n        #[test]\n        fn key_cl_value_should_encode_to_json() {\n            let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH]));\n            check_to_json(\n                Some(key_account),\n                r#\"{\"cl_type\":{\"Option\":\"Key\"},\"parsed\":\"account-hash-0101010101010101010101010101010101010101010101010101010101010101\"}\"#,\n            );\n\n            let key_hash = Key::Hash([2; KEY_HASH_LENGTH]);\n            check_to_json(\n                Some(key_hash),\n                r#\"{\"cl_type\":{\"Option\":\"Key\"},\"parsed\":\"hash-0202020202020202020202020202020202020202020202020202020202020202\"}\"#,\n            );\n\n            let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ));\n            check_to_json(\n                Some(key_uref),\n                r#\"{\"cl_type\":{\"Option\":\"Key\"},\"parsed\":\"uref-0303030303030303030303030303030303030303030303030303030303030303-001\"}\"#,\n            );\n\n            let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH]));\n            check_to_json(\n                Some(key_transfer),\n                r#\"{\"cl_type\":{\"Option\":\"Key\"},\"parsed\":\"transfer-0404040404040404040404040404040404040404040404040404040404040404\"}\"#,\n            );\n\n            let key_deploy_info = Key::DeployInfo(DeployHash::from_raw([5; Digest::LENGTH]));\n            check_to_json(\n                Some(key_deploy_info),\n                r#\"{\"cl_type\":{\"Option\":\"Key\"},\"parsed\":\"deploy-0505050505050505050505050505050505050505050505050505050505050505\"}\"#,\n            );\n\n            check_to_json(\n                Option::<Key>::None,\n                r#\"{\"cl_type\":{\"Option\":\"Key\"},\"parsed\":null}\"#,\n            )\n        }\n\n        #[test]\n        fn uref_cl_value_should_encode_to_json() {\n            let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE);\n            check_to_json(\n                Some(uref),\n                r#\"{\"cl_type\":{\"Option\":\"URef\"},\"parsed\":\"uref-0606060606060606060606060606060606060606060606060606060606060606-007\"}\"#,\n            );\n            check_to_json(\n                Option::<URef>::None,\n                r#\"{\"cl_type\":{\"Option\":\"URef\"},\"parsed\":null}\"#,\n            )\n        }\n\n        #[test]\n        fn public_key_cl_value_should_encode_to_json() {\n            check_to_json(\n                Some(PublicKey::from(\n                    &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(),\n                )),\n                r#\"{\"cl_type\":{\"Option\":\"PublicKey\"},\"parsed\":\"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c\"}\"#,\n            );\n            check_to_json(\n                Some(PublicKey::from(\n                    &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(),\n                )),\n                r#\"{\"cl_type\":{\"Option\":\"PublicKey\"},\"parsed\":\"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b\"}\"#,\n            );\n            check_to_json(\n                Option::<PublicKey>::None,\n                r#\"{\"cl_type\":{\"Option\":\"PublicKey\"},\"parsed\":null}\"#,\n            )\n        }\n    }\n\n    mod result {\n        use super::*;\n        use crate::crypto::SecretKey;\n\n        #[test]\n        fn bool_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<bool, i32>::Ok(true),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":true}}\"#,\n            );\n            check_to_json(\n                Result::<bool, u32>::Ok(true),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":true}}\"#,\n            );\n            check_to_json(\n                Result::<bool, ()>::Ok(true),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":true}}\"#,\n            );\n            check_to_json(\n                Result::<bool, String>::Ok(true),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"String\"}},\"parsed\":{\"Ok\":true}}\"#,\n            );\n            check_to_json(\n                Result::<bool, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<bool, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<bool, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<bool, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Bool\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn i32_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<i32, i32>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i32, u32>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i32, ()>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i32, String>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"String\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i32, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i32, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<i32, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<i32, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I32\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn i64_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<i64, i32>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i64, u32>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i64, ()>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i64, String>::Ok(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"String\"}},\"parsed\":{\"Ok\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i64, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<i64, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<i64, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<i64, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"I64\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn u8_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<u8, i32>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u8, u32>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u8, ()>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u8, String>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"String\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u8, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<u8, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u8, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<u8, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U8\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn u32_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<u32, i32>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u32, u32>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u32, ()>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u32, String>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"String\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u32, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<u32, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u32, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<u32, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U32\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn u64_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<u64, i32>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u64, u32>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u64, ()>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u64, String>::Ok(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"String\"}},\"parsed\":{\"Ok\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u64, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<u64, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<u64, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<u64, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U64\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn u128_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<U128, i32>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U128, u32>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U128, ()>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U128, String>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U128, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<U128, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<U128, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<U128, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U128\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn u256_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<U256, i32>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U256, u32>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U256, ()>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U256, String>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U256, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<U256, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<U256, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<U256, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U256\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn u512_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<U512, i32>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U512, u32>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U512, ()>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U512, String>::Ok(1.into()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"1\"}}\"#,\n            );\n            check_to_json(\n                Result::<U512, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<U512, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<U512, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<U512, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"U512\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn unit_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<(), i32>::Ok(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":null}}\"#,\n            );\n            check_to_json(\n                Result::<(), u32>::Ok(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":null}}\"#,\n            );\n            check_to_json(\n                Result::<(), ()>::Ok(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":null}}\"#,\n            );\n            check_to_json(\n                Result::<(), String>::Ok(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"String\"}},\"parsed\":{\"Ok\":null}}\"#,\n            );\n            check_to_json(\n                Result::<(), i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<(), u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<(), ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<(), String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Unit\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn string_cl_value_should_encode_to_json() {\n            check_to_json(\n                Result::<String, i32>::Ok(\"test string\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"test string\"}}\"#,\n            );\n            check_to_json(\n                Result::<String, u32>::Ok(\"test string\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"test string\"}}\"#,\n            );\n            check_to_json(\n                Result::<String, ()>::Ok(\"test string\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"test string\"}}\"#,\n            );\n            check_to_json(\n                Result::<String, String>::Ok(\"test string\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"test string\"}}\"#,\n            );\n            check_to_json(\n                Result::<String, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<String, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<String, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<String, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"String\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn key_cl_value_should_encode_to_json() {\n            let key = Key::Hash([2; KEY_HASH_LENGTH]);\n            check_to_json(\n                Result::<Key, i32>::Ok(key),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"hash-0202020202020202020202020202020202020202020202020202020202020202\"}}\"#,\n            );\n            check_to_json(\n                Result::<Key, u32>::Ok(key),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"hash-0202020202020202020202020202020202020202020202020202020202020202\"}}\"#,\n            );\n            check_to_json(\n                Result::<Key, ()>::Ok(key),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"hash-0202020202020202020202020202020202020202020202020202020202020202\"}}\"#,\n            );\n            check_to_json(\n                Result::<Key, String>::Ok(key),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"hash-0202020202020202020202020202020202020202020202020202020202020202\"}}\"#,\n            );\n            check_to_json(\n                Result::<Key, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<Key, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<Key, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<Key, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"Key\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn uref_cl_value_should_encode_to_json() {\n            let uref = URef::new([6; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE);\n            check_to_json(\n                Result::<URef, i32>::Ok(uref),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"uref-0606060606060606060606060606060606060606060606060606060606060606-007\"}}\"#,\n            );\n            check_to_json(\n                Result::<URef, u32>::Ok(uref),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"uref-0606060606060606060606060606060606060606060606060606060606060606-007\"}}\"#,\n            );\n            check_to_json(\n                Result::<URef, ()>::Ok(uref),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"uref-0606060606060606060606060606060606060606060606060606060606060606-007\"}}\"#,\n            );\n            check_to_json(\n                Result::<URef, String>::Ok(uref),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"uref-0606060606060606060606060606060606060606060606060606060606060606-007\"}}\"#,\n            );\n            check_to_json(\n                Result::<URef, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<URef, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<URef, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<URef, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"URef\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n\n        #[test]\n        fn public_key_cl_value_should_encode_to_json() {\n            let secret_key =\n                SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap();\n            let public_key = PublicKey::from(&secret_key);\n            check_to_json(\n                Result::<PublicKey, i32>::Ok(public_key.clone()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"I32\"}},\"parsed\":{\"Ok\":\"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b\"}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, u32>::Ok(public_key.clone()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"U32\"}},\"parsed\":{\"Ok\":\"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b\"}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, ()>::Ok(public_key.clone()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"Unit\"}},\"parsed\":{\"Ok\":\"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b\"}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, String>::Ok(public_key),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"String\"}},\"parsed\":{\"Ok\":\"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b\"}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, i32>::Err(-1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"I32\"}},\"parsed\":{\"Err\":-1}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, u32>::Err(1),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"U32\"}},\"parsed\":{\"Err\":1}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, ()>::Err(()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"Unit\"}},\"parsed\":{\"Err\":null}}\"#,\n            );\n            check_to_json(\n                Result::<PublicKey, String>::Err(\"e\".to_string()),\n                r#\"{\"cl_type\":{\"Result\":{\"ok\":\"PublicKey\",\"err\":\"String\"}},\"parsed\":{\"Err\":\"e\"}}\"#,\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/contract_messages/error.rs",
    "content": "use core::array::TryFromSliceError;\n\nuse alloc::string::String;\nuse core::{\n    fmt::{self, Debug, Display, Formatter},\n    num::ParseIntError,\n};\n\n/// Error while parsing message hashes from string.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    /// The prefix is invalid.\n    InvalidPrefix,\n    /// No message index at the end of the string.\n    MissingMessageIndex,\n    /// String not formatted correctly.\n    Formatting,\n    /// Cannot parse entity hash.\n    EntityAddrParseError(crate::addressable_entity::FromStrError),\n    /// Cannot parse message topic hash.\n    MessageTopicParseError(String),\n    /// Failed to decode address portion of URef.\n    Hex(base16::DecodeError),\n    /// Failed to parse an int.\n    Int(ParseIntError),\n    /// The slice is the wrong length.\n    Length(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<ParseIntError> for FromStrError {\n    fn from(error: ParseIntError) -> Self {\n        FromStrError::Int(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Length(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => {\n                write!(f, \"prefix is invalid\")\n            }\n            FromStrError::MissingMessageIndex => {\n                write!(f, \"no message index found at the end of the string\")\n            }\n            FromStrError::Formatting => {\n                write!(f, \"string not properly formatted\")\n            }\n            FromStrError::EntityAddrParseError(err) => {\n                write!(f, \"could not parse entity addr: {}\", err)\n            }\n            FromStrError::MessageTopicParseError(err) => {\n                write!(f, \"could not parse topic hash: {}\", err)\n            }\n            FromStrError::Hex(error) => {\n                write!(f, \"failed to decode address portion from hex: {}\", error)\n            }\n            FromStrError::Int(error) => write!(f, \"failed to parse an int: {}\", error),\n            FromStrError::Length(error) => write!(f, \"address portion is wrong length: {}\", error),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/contract_messages/messages.rs",
    "content": "use crate::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex, crypto, EntityAddr, Key,\n};\n\nuse alloc::{string::String, vec::Vec};\nuse core::{convert::TryFrom, fmt::Debug};\n#[cfg(any(feature = \"std\", test))]\nuse thiserror::Error;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Alphanumeric, DistString, Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\nuse super::{FromStrError, TopicNameHash};\n\n/// Collection of multiple messages.\npub type Messages = Vec<Message>;\n\n/// The length of a message digest\npub const MESSAGE_CHECKSUM_LENGTH: usize = 32;\n\nconst MESSAGE_CHECKSUM_STRING_PREFIX: &str = \"message-checksum-\";\n\n/// A newtype wrapping an array which contains the raw bytes of\n/// the hash of the message emitted.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Message checksum as a formatted string.\")\n)]\npub struct MessageChecksum(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))]\n    pub  [u8; MESSAGE_CHECKSUM_LENGTH],\n);\n\nimpl MessageChecksum {\n    /// Returns inner value of the message checksum.\n    pub fn value(&self) -> [u8; MESSAGE_CHECKSUM_LENGTH] {\n        self.0\n    }\n\n    /// Formats the `MessageChecksum` as a human readable string.\n    pub fn to_formatted_string(self) -> String {\n        format!(\n            \"{}{}\",\n            MESSAGE_CHECKSUM_STRING_PREFIX,\n            base16::encode_lower(&self.0),\n        )\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `MessageChecksum`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let hex_addr = input\n            .strip_prefix(MESSAGE_CHECKSUM_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n\n        let bytes =\n            <[u8; MESSAGE_CHECKSUM_LENGTH]>::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?;\n        Ok(MessageChecksum(bytes))\n    }\n}\n\nimpl ToBytes for MessageChecksum {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.0.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for MessageChecksum {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (checksum, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((MessageChecksum(checksum), rem))\n    }\n}\n\nimpl Serialize for MessageChecksum {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for MessageChecksum {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            MessageChecksum::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = <[u8; MESSAGE_CHECKSUM_LENGTH]>::deserialize(deserializer)?;\n            Ok(MessageChecksum(bytes))\n        }\n    }\n}\n\nconst MESSAGE_PAYLOAD_TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for a message payload that contains a human readable string.\npub const MESSAGE_PAYLOAD_STRING_TAG: u8 = 0;\n/// Tag for a message payload that contains raw bytes.\npub const MESSAGE_PAYLOAD_BYTES_TAG: u8 = 1;\n\n/// The payload of the message emitted by an addressable entity during execution.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum MessagePayload {\n    /// Human readable string message.\n    String(String),\n    /// Message represented as raw bytes.\n    Bytes(Bytes),\n}\n\nimpl MessagePayload {\n    #[cfg(any(feature = \"testing\", test))]\n    /// Returns a random `MessagePayload`.\n    pub fn random(rng: &mut TestRng) -> Self {\n        let count = rng.gen_range(16..128);\n        if rng.gen() {\n            MessagePayload::String(Alphanumeric.sample_string(rng, count))\n        } else {\n            MessagePayload::Bytes(\n                std::iter::repeat_with(|| rng.gen())\n                    .take(count)\n                    .collect::<Vec<u8>>()\n                    .into(),\n            )\n        }\n    }\n}\n\nimpl<T> From<T> for MessagePayload\nwhere\n    T: Into<String>,\n{\n    fn from(value: T) -> Self {\n        Self::String(value.into())\n    }\n}\n\nimpl From<Bytes> for MessagePayload {\n    fn from(bytes: Bytes) -> Self {\n        Self::Bytes(bytes)\n    }\n}\n\nimpl ToBytes for MessagePayload {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            MessagePayload::String(message_string) => {\n                buffer.insert(0, MESSAGE_PAYLOAD_STRING_TAG);\n                buffer.extend(message_string.to_bytes()?);\n            }\n            MessagePayload::Bytes(message_bytes) => {\n                buffer.insert(0, MESSAGE_PAYLOAD_BYTES_TAG);\n                buffer.extend(message_bytes.to_bytes()?);\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        MESSAGE_PAYLOAD_TAG_LENGTH\n            + match self {\n                MessagePayload::String(message_string) => message_string.serialized_length(),\n                MessagePayload::Bytes(message_bytes) => message_bytes.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for MessagePayload {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            MESSAGE_PAYLOAD_STRING_TAG => {\n                let (message, remainder): (String, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::String(message), remainder))\n            }\n            MESSAGE_PAYLOAD_BYTES_TAG => {\n                let (message_bytes, remainder): (Bytes, _) = FromBytes::from_bytes(remainder)?;\n                Ok((Self::Bytes(message_bytes), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n/// Message that was emitted by an addressable entity during execution.\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Message {\n    /// The identity of the entity that produced the message.\n    entity_addr: EntityAddr,\n    /// The payload of the message.\n    message: MessagePayload,\n    /// The name of the topic on which the message was emitted on.\n    topic_name: String,\n    /// The hash of the name of the topic.\n    topic_name_hash: TopicNameHash,\n    /// Message index in the topic.\n    topic_index: u32,\n    /// Message index in the block.\n    block_index: u64,\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Serialize, Deserialize)]\nstruct HumanReadableMessage {\n    entity_addr: String,\n    message: MessagePayload,\n    topic_name: String,\n    topic_name_hash: TopicNameHash,\n    topic_index: u32,\n    block_index: u64,\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl From<&Message> for HumanReadableMessage {\n    fn from(message: &Message) -> Self {\n        Self {\n            entity_addr: message.entity_addr.to_formatted_string(),\n            message: message.message.clone(),\n            topic_name: message.topic_name.clone(),\n            topic_name_hash: message.topic_name_hash,\n            topic_index: message.topic_index,\n            block_index: message.block_index,\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl From<&Message> for NonHumanReadableMessage {\n    fn from(message: &Message) -> Self {\n        Self {\n            entity_addr: message.entity_addr,\n            message: message.message.clone(),\n            topic_name: message.topic_name.clone(),\n            topic_name_hash: message.topic_name_hash,\n            topic_index: message.topic_index,\n            block_index: message.block_index,\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl From<NonHumanReadableMessage> for Message {\n    fn from(message: NonHumanReadableMessage) -> Self {\n        Self {\n            entity_addr: message.entity_addr,\n            message: message.message,\n            topic_name: message.topic_name,\n            topic_name_hash: message.topic_name_hash,\n            topic_index: message.topic_index,\n            block_index: message.block_index,\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Error, Debug)]\nenum MessageDeserializationError {\n    #[error(\"{0}\")]\n    FailedToParseEntityAddr(crate::addressable_entity::FromStrError),\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<HumanReadableMessage> for Message {\n    type Error = MessageDeserializationError;\n    fn try_from(message: HumanReadableMessage) -> Result<Self, Self::Error> {\n        let entity_addr = EntityAddr::from_formatted_str(&message.entity_addr)\n            .map_err(Self::Error::FailedToParseEntityAddr)?;\n\n        Ok(Self {\n            entity_addr,\n            message: message.message,\n            topic_name: message.topic_name,\n            topic_name_hash: message.topic_name_hash,\n            topic_index: message.topic_index,\n            block_index: message.block_index,\n        })\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Serialize, Deserialize)]\nstruct NonHumanReadableMessage {\n    entity_addr: EntityAddr,\n    message: MessagePayload,\n    topic_name: String,\n    topic_name_hash: TopicNameHash,\n    topic_index: u32,\n    block_index: u64,\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl Serialize for Message {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            HumanReadableMessage::from(self).serialize(serializer)\n        } else {\n            NonHumanReadableMessage::from(self).serialize(serializer)\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl<'de> Deserialize<'de> for Message {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let human_readable = HumanReadableMessage::deserialize(deserializer)?;\n            Message::try_from(human_readable)\n                .map_err(|error| SerdeError::custom(format!(\"{:?}\", error)))\n        } else {\n            let non_human_readable = NonHumanReadableMessage::deserialize(deserializer)?;\n            Ok(Message::from(non_human_readable))\n        }\n    }\n}\n\nimpl Message {\n    /// Creates new instance of [`Message`] with the specified source and message payload.\n    pub fn new(\n        source: EntityAddr,\n        message: MessagePayload,\n        topic_name: String,\n        topic_name_hash: TopicNameHash,\n        topic_index: u32,\n        block_index: u64,\n    ) -> Self {\n        Self {\n            entity_addr: source,\n            message,\n            topic_name,\n            topic_name_hash,\n            topic_index,\n            block_index,\n        }\n    }\n\n    /// Returns a reference to the identity of the entity that produced the message.\n    pub fn entity_addr(&self) -> &EntityAddr {\n        &self.entity_addr\n    }\n\n    /// Returns a reference to the payload of the message.\n    pub fn payload(&self) -> &MessagePayload {\n        &self.message\n    }\n\n    /// Returns a reference to the name of the topic on which the message was emitted on.\n    pub fn topic_name(&self) -> &str {\n        &self.topic_name\n    }\n\n    /// Returns a reference to the hash of the name of the topic.\n    pub fn topic_name_hash(&self) -> &TopicNameHash {\n        &self.topic_name_hash\n    }\n\n    /// Returns the index of the message in the topic.\n    pub fn topic_index(&self) -> u32 {\n        self.topic_index\n    }\n\n    /// Returns the index of the message relative to other messages emitted in the block.\n    pub fn block_index(&self) -> u64 {\n        self.block_index\n    }\n\n    /// Returns a new [`Key::Message`] based on the information in the message.\n    /// This key can be used to query the checksum record for the message in global state.\n    pub fn message_key(&self) -> Key {\n        Key::message(self.entity_addr, self.topic_name_hash, self.topic_index)\n    }\n\n    /// Returns a new [`Key::Message`] based on the information in the message.\n    /// This key can be used to query the control record for the topic of this message in global\n    /// state.\n    pub fn topic_key(&self) -> Key {\n        Key::message_topic(self.entity_addr, self.topic_name_hash)\n    }\n\n    /// Returns the checksum of the message.\n    pub fn checksum(&self) -> Result<MessageChecksum, bytesrepr::Error> {\n        let input = (&self.block_index, &self.message).to_bytes()?;\n        let checksum = crypto::blake2b(input);\n\n        Ok(MessageChecksum(checksum))\n    }\n\n    /// Returns a random `Message`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let count = rng.gen_range(16..128);\n        Self {\n            entity_addr: rng.gen(),\n            message: MessagePayload::random(rng),\n            topic_name: Alphanumeric.sample_string(rng, count),\n            topic_name_hash: rng.gen(),\n            topic_index: rng.gen(),\n            block_index: rng.gen(),\n        }\n    }\n}\n\nimpl ToBytes for Message {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.entity_addr.to_bytes()?);\n        buffer.append(&mut self.message.to_bytes()?);\n        buffer.append(&mut self.topic_name.to_bytes()?);\n        buffer.append(&mut self.topic_name_hash.to_bytes()?);\n        buffer.append(&mut self.topic_index.to_bytes()?);\n        buffer.append(&mut self.block_index.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.entity_addr.serialized_length()\n            + self.message.serialized_length()\n            + self.topic_name.serialized_length()\n            + self.topic_name_hash.serialized_length()\n            + self.topic_index.serialized_length()\n            + self.block_index.serialized_length()\n    }\n}\n\nimpl FromBytes for Message {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (entity_addr, rem) = FromBytes::from_bytes(bytes)?;\n        let (message, rem) = FromBytes::from_bytes(rem)?;\n        let (topic_name, rem) = FromBytes::from_bytes(rem)?;\n        let (topic_name_hash, rem) = FromBytes::from_bytes(rem)?;\n        let (topic_index, rem) = FromBytes::from_bytes(rem)?;\n        let (block_index, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            Message {\n                entity_addr,\n                message,\n                topic_name,\n                topic_name_hash,\n                topic_index,\n                block_index,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<Message> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Message {\n        let topic_name = Alphanumeric.sample_string(rng, 32);\n        let topic_name_hash = crypto::blake2b(&topic_name).into();\n        let message = Alphanumeric.sample_string(rng, 64).into();\n\n        Message {\n            entity_addr: rng.gen(),\n            message,\n            topic_name,\n            topic_name_hash,\n            topic_index: rng.gen(),\n            block_index: rng.gen(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::bytesrepr;\n\n    use super::*;\n\n    #[test]\n    fn serialization_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let message_checksum = MessageChecksum([1; MESSAGE_CHECKSUM_LENGTH]);\n        bytesrepr::test_serialization_roundtrip(&message_checksum);\n\n        let message_payload = MessagePayload::random(rng);\n        bytesrepr::test_serialization_roundtrip(&message_payload);\n\n        let message = Message::random(rng);\n        bytesrepr::test_serialization_roundtrip(&message);\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let message_payload = MessagePayload::random(rng);\n        let json_string = serde_json::to_string_pretty(&message_payload).unwrap();\n        let decoded: MessagePayload = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, message_payload);\n    }\n\n    #[test]\n    fn message_json_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let message = Message::random(rng);\n        let json_string = serde_json::to_string_pretty(&message).unwrap();\n        let decoded: Message = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, message);\n    }\n}\n"
  },
  {
    "path": "types/src/contract_messages/topics.rs",
    "content": "use crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    checksummed_hex, BlockTime,\n};\n\nuse core::convert::TryFrom;\n\nuse alloc::{string::String, vec::Vec};\nuse core::fmt::{Debug, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse super::error::FromStrError;\n\n/// The length in bytes of a topic name hash.\npub const TOPIC_NAME_HASH_LENGTH: usize = 32;\n\n/// The hash of the name of the message topic.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Hash)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"The hash of the name of the message topic.\")\n)]\npub struct TopicNameHash(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))]\n    pub  [u8; TOPIC_NAME_HASH_LENGTH],\n);\n\nimpl TopicNameHash {\n    /// Returns a new [`TopicNameHash`] based on the specified value.\n    pub const fn new(topic_name_hash: [u8; TOPIC_NAME_HASH_LENGTH]) -> TopicNameHash {\n        TopicNameHash(topic_name_hash)\n    }\n\n    /// Returns inner value of the topic hash.\n    pub fn value(&self) -> [u8; TOPIC_NAME_HASH_LENGTH] {\n        self.0\n    }\n\n    /// Formats the [`TopicNameHash`] as a prefixed, hex-encoded string.\n    pub fn to_formatted_string(self) -> String {\n        base16::encode_lower(&self.0)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a [`TopicNameHash`].\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let bytes =\n            <[u8; TOPIC_NAME_HASH_LENGTH]>::try_from(checksummed_hex::decode(input)?.as_ref())?;\n        Ok(TopicNameHash(bytes))\n    }\n}\n\nimpl ToBytes for TopicNameHash {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.0.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for TopicNameHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (hash, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((TopicNameHash(hash), rem))\n    }\n}\n\nimpl Serialize for TopicNameHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for TopicNameHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            TopicNameHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = <[u8; TOPIC_NAME_HASH_LENGTH]>::deserialize(deserializer)?;\n            Ok(TopicNameHash(bytes))\n        }\n    }\n}\n\nimpl Display for TopicNameHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for TopicNameHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"MessageTopicHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<TopicNameHash> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> TopicNameHash {\n        TopicNameHash(rng.gen())\n    }\n}\n\nimpl From<[u8; TOPIC_NAME_HASH_LENGTH]> for TopicNameHash {\n    fn from(value: [u8; TOPIC_NAME_HASH_LENGTH]) -> Self {\n        TopicNameHash(value)\n    }\n}\n\n/// Summary of a message topic that will be stored in global state.\n#[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct MessageTopicSummary {\n    /// Number of messages in this topic.\n    pub(crate) message_count: u32,\n    /// Block timestamp in which these messages were emitted.\n    pub(crate) blocktime: BlockTime,\n    /// Name of the topic.\n    pub(crate) topic_name: String,\n}\n\nimpl MessageTopicSummary {\n    /// Creates a new topic summary.\n    pub fn new(message_count: u32, blocktime: BlockTime, topic_name: String) -> Self {\n        Self {\n            message_count,\n            blocktime,\n            topic_name,\n        }\n    }\n\n    /// Returns the number of messages that were sent on this topic.\n    pub fn message_count(&self) -> u32 {\n        self.message_count\n    }\n\n    /// Returns the block time.\n    pub fn blocktime(&self) -> BlockTime {\n        self.blocktime\n    }\n\n    /// Returns the topic name.\n    pub fn topic_name(&self) -> &str {\n        &self.topic_name\n    }\n}\n\nimpl ToBytes for MessageTopicSummary {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.message_count.to_bytes()?);\n        buffer.append(&mut self.blocktime.to_bytes()?);\n        buffer.append(&mut self.topic_name.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.message_count.serialized_length()\n            + self.blocktime.serialized_length()\n            + self.topic_name.serialized_length()\n    }\n}\n\nimpl FromBytes for MessageTopicSummary {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (message_count, rem) = FromBytes::from_bytes(bytes)?;\n        let (blocktime, rem) = FromBytes::from_bytes(rem)?;\n        let (topic_name, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            MessageTopicSummary {\n                message_count,\n                blocktime,\n                topic_name,\n            },\n            rem,\n        ))\n    }\n}\n\nconst TOPIC_OPERATION_ADD_TAG: u8 = 0;\nconst OPERATION_MAX_SERIALIZED_LEN: usize = 1;\n\n/// Operations that can be performed on message topics.\n#[derive(Debug, PartialEq)]\npub enum MessageTopicOperation {\n    /// Add a new message topic.\n    Add,\n}\n\nimpl MessageTopicOperation {\n    /// Maximum serialized length of a message topic operation.\n    pub const fn max_serialized_len() -> usize {\n        OPERATION_MAX_SERIALIZED_LEN\n    }\n}\n\nimpl ToBytes for MessageTopicOperation {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        match self {\n            MessageTopicOperation::Add => buffer.push(TOPIC_OPERATION_ADD_TAG),\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            MessageTopicOperation::Add => 1,\n        }\n    }\n}\n\nimpl FromBytes for MessageTopicOperation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            TOPIC_OPERATION_ADD_TAG => Ok((MessageTopicOperation::Add, remainder)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::bytesrepr;\n\n    use super::*;\n\n    #[test]\n    fn serialization_roundtrip() {\n        let topic_name_hash = TopicNameHash::new([0x4du8; TOPIC_NAME_HASH_LENGTH]);\n        bytesrepr::test_serialization_roundtrip(&topic_name_hash);\n\n        let topic_summary =\n            MessageTopicSummary::new(10, BlockTime::new(100), \"topic_name\".to_string());\n        bytesrepr::test_serialization_roundtrip(&topic_summary);\n\n        let topic_operation = MessageTopicOperation::Add;\n        bytesrepr::test_serialization_roundtrip(&topic_operation);\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let topic_name_hash = TopicNameHash::new([0x4du8; TOPIC_NAME_HASH_LENGTH]);\n        let json_string = serde_json::to_string_pretty(&topic_name_hash).unwrap();\n        let decoded: TopicNameHash = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, topic_name_hash);\n\n        let topic_summary =\n            MessageTopicSummary::new(10, BlockTime::new(100), \"topic_name\".to_string());\n        let json_string = serde_json::to_string_pretty(&topic_summary).unwrap();\n        let decoded: MessageTopicSummary = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, topic_summary);\n    }\n}\n"
  },
  {
    "path": "types/src/contract_messages.rs",
    "content": "//! Data types for interacting with contract level messages.\n\nmod error;\nmod messages;\nmod topics;\n\npub use error::FromStrError;\npub use messages::{Message, MessageChecksum, MessagePayload, Messages};\npub use topics::{\n    MessageTopicOperation, MessageTopicSummary, TopicNameHash, TOPIC_NAME_HASH_LENGTH,\n};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    EntityAddr,\n};\n\nuse alloc::{string::String, vec::Vec};\nuse core::fmt::{Debug, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nconst TOPIC_FORMATTED_STRING_PREFIX: &str = \"topic-\";\nconst MESSAGE_ADDR_PREFIX: &str = \"message-\";\n\n/// MessageTopicAddr\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct MessageAddr {\n    /// The entity addr.\n    entity_addr: EntityAddr,\n    /// The hash of the name of the message topic.\n    topic_name_hash: TopicNameHash,\n    /// The message index.\n    message_index: Option<u32>,\n}\n\nimpl MessageAddr {\n    /// Constructs a new topic address based on the addressable entity addr and the hash of the\n    /// message topic name.\n    pub const fn new_topic_addr(entity_addr: EntityAddr, topic_name_hash: TopicNameHash) -> Self {\n        Self {\n            entity_addr,\n            topic_name_hash,\n            message_index: None,\n        }\n    }\n\n    /// Constructs a new message address based on the addressable entity addr, the hash of the\n    /// message topic name and the message index in the topic.\n    pub const fn new_message_addr(\n        entity_addr: EntityAddr,\n        topic_name_hash: TopicNameHash,\n        message_index: u32,\n    ) -> Self {\n        Self {\n            entity_addr,\n            topic_name_hash,\n            message_index: Some(message_index),\n        }\n    }\n\n    /// Formats the [`MessageAddr`] as a prefixed, hex-encoded string.\n    pub fn to_formatted_string(self) -> String {\n        match self.message_index {\n            Some(index) => {\n                format!(\n                    \"{}{}-{}-{:x}\",\n                    MESSAGE_ADDR_PREFIX,\n                    self.entity_addr,\n                    self.topic_name_hash.to_formatted_string(),\n                    index,\n                )\n            }\n            None => {\n                format!(\n                    \"{}{}{}-{}\",\n                    MESSAGE_ADDR_PREFIX,\n                    TOPIC_FORMATTED_STRING_PREFIX,\n                    self.entity_addr,\n                    self.topic_name_hash.to_formatted_string(),\n                )\n            }\n        }\n    }\n\n    /// Parses a formatted string into a [`MessageAddr`].\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(MESSAGE_ADDR_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n\n        let (remainder, message_index) = match remainder.strip_prefix(TOPIC_FORMATTED_STRING_PREFIX)\n        {\n            Some(topic_string) => (topic_string, None),\n            None => {\n                let (remainder, message_index_str) = remainder\n                    .rsplit_once('-')\n                    .ok_or(FromStrError::MissingMessageIndex)?;\n                (remainder, Some(u32::from_str_radix(message_index_str, 16)?))\n            }\n        };\n\n        let (entity_addr_str, topic_name_hash_str) = remainder\n            .rsplit_once('-')\n            .ok_or(FromStrError::MissingMessageIndex)?;\n\n        let entity_addr = EntityAddr::from_formatted_str(entity_addr_str)\n            .map_err(FromStrError::EntityAddrParseError)?;\n\n        let topic_name_hash = TopicNameHash::from_formatted_str(topic_name_hash_str)?;\n        Ok(MessageAddr {\n            entity_addr,\n            topic_name_hash,\n            message_index,\n        })\n    }\n\n    /// Returns the entity addr of this message topic.\n    pub fn entity_addr(&self) -> EntityAddr {\n        self.entity_addr\n    }\n\n    /// Returns the topic name hash of this message topic.\n    pub fn topic_name_hash(&self) -> TopicNameHash {\n        self.topic_name_hash\n    }\n\n    /// Returns None in the case of the key for a message topic summary,\n    /// else Some with the sequential index of the underlying message within the topic.\n    pub fn message_index(&self) -> Option<u32> {\n        self.message_index\n    }\n}\n\nimpl Display for MessageAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        match self.message_index {\n            Some(index) => {\n                write!(\n                    f,\n                    \"{}-{}-{:x}\",\n                    self.entity_addr, self.topic_name_hash, index,\n                )\n            }\n            None => {\n                write!(f, \"{}-{}\", self.entity_addr, self.topic_name_hash)\n            }\n        }\n    }\n}\n\nimpl ToBytes for MessageAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.append(&mut self.entity_addr.to_bytes()?);\n        buffer.append(&mut self.topic_name_hash.to_bytes()?);\n        buffer.append(&mut self.message_index.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.entity_addr.serialized_length()\n            + self.topic_name_hash.serialized_length()\n            + self.message_index.serialized_length()\n    }\n}\n\nimpl FromBytes for MessageAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (entity_addr, rem) = FromBytes::from_bytes(bytes)?;\n        let (topic_hash, rem) = FromBytes::from_bytes(rem)?;\n        let (message_index, rem) = FromBytes::from_bytes(rem)?;\n        Ok((\n            MessageAddr {\n                entity_addr,\n                topic_name_hash: topic_hash,\n                message_index,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<MessageAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MessageAddr {\n        MessageAddr {\n            entity_addr: rng.gen(),\n            topic_name_hash: rng.gen(),\n            message_index: rng.gen(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, KEY_HASH_LENGTH};\n\n    use super::{topics::TOPIC_NAME_HASH_LENGTH, *};\n\n    #[test]\n    fn serialization_roundtrip() {\n        let topic_addr = MessageAddr::new_topic_addr(\n            EntityAddr::SmartContract([1; KEY_HASH_LENGTH]),\n            [2; TOPIC_NAME_HASH_LENGTH].into(),\n        );\n        bytesrepr::test_serialization_roundtrip(&topic_addr);\n\n        let message_addr = MessageAddr::new_message_addr(\n            EntityAddr::SmartContract([1; KEY_HASH_LENGTH]),\n            [2; TOPIC_NAME_HASH_LENGTH].into(),\n            3,\n        );\n        bytesrepr::test_serialization_roundtrip(&message_addr);\n    }\n}\n"
  },
  {
    "path": "types/src/contract_wasm.rs",
    "content": "use alloc::{format, string::String, vec::Vec};\nuse core::{\n    array::TryFromSliceError,\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::{\n    account,\n    addressable_entity::TryFromSliceForAccountHashError,\n    bytesrepr::{Bytes, Error, FromBytes, ToBytes},\n    checksummed_hex, uref, ByteCode, ByteCodeKind, CLType, CLTyped, HashAddr,\n};\n\nconst CONTRACT_WASM_MAX_DISPLAY_LEN: usize = 16;\nconst KEY_HASH_LENGTH: usize = 32;\nconst WASM_STRING_PREFIX: &str = \"contract-wasm-\";\n\n/// Associated error type of `TryFrom<&[u8]>` for `ContractWasmHash`.\n#[derive(Debug)]\npub struct TryFromSliceForContractHashError(());\n\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    InvalidPrefix,\n    Hex(base16::DecodeError),\n    Account(TryFromSliceForAccountHashError),\n    Hash(TryFromSliceError),\n    AccountHash(account::FromStrError),\n    URef(uref::FromStrError),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceForAccountHashError> for FromStrError {\n    fn from(error: TryFromSliceForAccountHashError) -> Self {\n        FromStrError::Account(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Hash(error)\n    }\n}\n\nimpl From<account::FromStrError> for FromStrError {\n    fn from(error: account::FromStrError) -> Self {\n        FromStrError::AccountHash(error)\n    }\n}\n\nimpl From<uref::FromStrError> for FromStrError {\n    fn from(error: uref::FromStrError) -> Self {\n        FromStrError::URef(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => write!(f, \"invalid prefix\"),\n            FromStrError::Hex(error) => write!(f, \"decode from hex: {}\", error),\n            FromStrError::Account(error) => write!(f, \"account from string error: {:?}\", error),\n            FromStrError::Hash(error) => write!(f, \"hash from string error: {}\", error),\n            FromStrError::AccountHash(error) => {\n                write!(f, \"account hash from string error: {:?}\", error)\n            }\n            FromStrError::URef(error) => write!(f, \"uref from string error: {:?}\", error),\n        }\n    }\n}\n\n/// A newtype wrapping a `HashAddr` which is the raw bytes of\n/// the ContractWasmHash\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ContractWasmHash(HashAddr);\n\nimpl ContractWasmHash {\n    /// Constructs a new `ContractWasmHash` from the raw bytes of the contract wasm hash.\n    pub const fn new(value: HashAddr) -> ContractWasmHash {\n        ContractWasmHash(value)\n    }\n\n    /// Returns the raw bytes of the contract hash as an array.\n    pub fn value(&self) -> HashAddr {\n        self.0\n    }\n\n    /// Returns the raw bytes of the contract hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `ContractWasmHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\"{}{}\", WASM_STRING_PREFIX, base16::encode_lower(&self.0),)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `ContractWasmHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(WASM_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n        let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?;\n        Ok(ContractWasmHash(bytes))\n    }\n}\n\nimpl Default for ContractWasmHash {\n    fn default() -> Self {\n        ContractWasmHash::new([0; 32])\n    }\n}\n\nimpl Display for ContractWasmHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for ContractWasmHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"ContractWasmHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for ContractWasmHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(KEY_HASH_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for ContractWasmHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.0.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractWasmHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((ContractWasmHash::new(bytes), rem))\n    }\n}\n\nimpl From<[u8; 32]> for ContractWasmHash {\n    fn from(bytes: [u8; 32]) -> Self {\n        ContractWasmHash(bytes)\n    }\n}\n\nimpl Serialize for ContractWasmHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for ContractWasmHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            ContractWasmHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = HashAddr::deserialize(deserializer)?;\n            Ok(ContractWasmHash(bytes))\n        }\n    }\n}\n\nimpl AsRef<[u8]> for ContractWasmHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl TryFrom<&[u8]> for ContractWasmHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForContractHashError> {\n        HashAddr::try_from(bytes)\n            .map(ContractWasmHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\nimpl TryFrom<&Vec<u8>> for ContractWasmHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        HashAddr::try_from(bytes as &[u8])\n            .map(ContractWasmHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for ContractWasmHash {\n    fn schema_name() -> String {\n        String::from(\"ContractWasmHash\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description =\n            Some(\"The hash address of the contract wasm\".to_string());\n        schema_object.into()\n    }\n}\n\n/// A container for contract's WASM bytes.\n#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ContractWasm {\n    bytes: Bytes,\n}\n\nimpl ContractWasm {\n    /// Creates a new `ContractWasm`.\n    pub fn new(bytes: Vec<u8>) -> Self {\n        Self {\n            bytes: bytes.into(),\n        }\n    }\n\n    pub fn take_bytes(self) -> Vec<u8> {\n        self.bytes.into()\n    }\n}\n\nimpl Debug for ContractWasm {\n    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n        if self.bytes.len() > CONTRACT_WASM_MAX_DISPLAY_LEN {\n            write!(\n                f,\n                \"ContractWasm(0x{}...)\",\n                base16::encode_lower(&self.bytes[..CONTRACT_WASM_MAX_DISPLAY_LEN])\n            )\n        } else {\n            write!(f, \"ContractWasm(0x{})\", base16::encode_lower(&self.bytes))\n        }\n    }\n}\n\nimpl ToBytes for ContractWasm {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.bytes.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.bytes.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.bytes.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractWasm {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (bytes, rem1) = FromBytes::from_bytes(bytes)?;\n        Ok((ContractWasm { bytes }, rem1))\n    }\n}\n\nimpl From<ContractWasm> for ByteCode {\n    fn from(value: ContractWasm) -> Self {\n        ByteCode::new(ByteCodeKind::V1CasperWasm, value.take_bytes())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    #[test]\n    fn test_debug_repr_of_short_wasm() {\n        const SIZE: usize = 8;\n        let wasm_bytes = vec![0; SIZE];\n        let contract_wasm = ContractWasm::new(wasm_bytes);\n        // String output is less than the bytes itself\n        assert_eq!(\n            format!(\"{:?}\", contract_wasm),\n            \"ContractWasm(0x0000000000000000)\"\n        );\n    }\n\n    #[test]\n    fn test_debug_repr_of_long_wasm() {\n        const SIZE: usize = 65;\n        let wasm_bytes = vec![0; SIZE];\n        let contract_wasm = ContractWasm::new(wasm_bytes);\n        // String output is less than the bytes itself\n        assert_eq!(\n            format!(\"{:?}\", contract_wasm),\n            \"ContractWasm(0x00000000000000000000000000000000...)\"\n        );\n    }\n\n    #[test]\n    fn contract_wasm_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let contract_hash =\n            HashAddr::try_from(&bytes[..]).expect(\"should create contract wasm hash\");\n        let contract_hash = ContractWasmHash::new(contract_hash);\n        assert_eq!(&bytes, &contract_hash.as_bytes());\n    }\n\n    #[test]\n    fn contract_wasm_hash_from_str() {\n        let contract_hash = ContractWasmHash([3; 32]);\n        let encoded = contract_hash.to_formatted_string();\n        let decoded = ContractWasmHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(contract_hash, decoded);\n\n        let invalid_prefix =\n            \"contractwasm-0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ContractWasmHash::from_formatted_str(invalid_prefix).is_err());\n\n        let short_addr =\n            \"contract-wasm-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ContractWasmHash::from_formatted_str(short_addr).is_err());\n\n        let long_addr =\n            \"contract-wasm-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ContractWasmHash::from_formatted_str(long_addr).is_err());\n\n        let invalid_hex =\n            \"contract-wasm-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(ContractWasmHash::from_formatted_str(invalid_hex).is_err());\n    }\n\n    #[test]\n    fn contract_wasm_hash_serde_roundtrip() {\n        let contract_hash = ContractWasmHash([255; 32]);\n        let serialized = bincode::serialize(&contract_hash).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(contract_hash, deserialized)\n    }\n\n    #[test]\n    fn contract_wasm_hash_json_roundtrip() {\n        let contract_hash = ContractWasmHash([255; 32]);\n        let json_string = serde_json::to_string_pretty(&contract_hash).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(contract_hash, decoded)\n    }\n}\n"
  },
  {
    "path": "types/src/contracts/named_keys.rs",
    "content": "use alloc::{collections::BTreeMap, string::String, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\n#[cfg(feature = \"json-schema\")]\nuse crate::execution::execution_result_v1::NamedKey;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, Key,\n};\n\n/// A collection of named keys.\n#[derive(Clone, Eq, PartialEq, Default, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\n#[rustfmt::skip]\npub struct NamedKeys(\n    #[serde(with = \"BTreeMapToArray::<String, Key, Labels>\")]\n    #[cfg_attr(feature = \"json-schema\", schemars(with = \"Vec<NamedKey>\"))]\n    BTreeMap<String, Key>,\n);\n\nimpl NamedKeys {\n    /// Constructs a new, empty `NamedKeys`.\n    pub const fn new() -> Self {\n        NamedKeys(BTreeMap::new())\n    }\n\n    /// Consumes `self`, returning the wrapped map.\n    pub fn into_inner(self) -> BTreeMap<String, Key> {\n        self.0\n    }\n\n    /// Inserts a named key.\n    ///\n    /// If the map did not have this name present, `None` is returned.  If the map did have this\n    /// name present, the `Key` is updated, and the old `Key` is returned.\n    pub fn insert(&mut self, name: String, key: Key) -> Option<Key> {\n        self.0.insert(name, key)\n    }\n\n    /// Moves all elements from `other` into `self`.\n    pub fn append(&mut self, mut other: Self) {\n        self.0.append(&mut other.0)\n    }\n\n    /// Removes a named `Key`, returning the `Key` if it existed in the collection.\n    pub fn remove(&mut self, name: &str) -> Option<Key> {\n        self.0.remove(name)\n    }\n\n    /// Returns a reference to the `Key` under the given `name` if any.\n    pub fn get(&self, name: &str) -> Option<&Key> {\n        self.0.get(name)\n    }\n\n    /// Returns `true` if the named `Key` exists in the collection.\n    pub fn contains(&self, name: &str) -> bool {\n        self.0.contains_key(name)\n    }\n\n    /// Returns an iterator over the names.\n    pub fn names(&self) -> impl Iterator<Item = &String> {\n        self.0.keys()\n    }\n\n    /// Returns an iterator over the `Key`s (i.e. the map's values).\n    pub fn keys(&self) -> impl Iterator<Item = &Key> {\n        self.0.values()\n    }\n\n    /// Returns a mutable iterator over the `Key`s (i.e. the map's values).\n    pub fn keys_mut(&mut self) -> impl Iterator<Item = &mut Key> {\n        self.0.values_mut()\n    }\n\n    /// Returns an iterator over the name-key pairs.\n    pub fn iter(&self) -> impl Iterator<Item = (&String, &Key)> {\n        self.0.iter()\n    }\n\n    /// Returns the number of named `Key`s.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if there are no named `Key`s.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n}\n\nimpl From<BTreeMap<String, Key>> for NamedKeys {\n    fn from(value: BTreeMap<String, Key>) -> Self {\n        NamedKeys(value)\n    }\n}\n\nimpl ToBytes for NamedKeys {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for NamedKeys {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (named_keys, remainder) = BTreeMap::<String, Key>::from_bytes(bytes)?;\n        Ok((NamedKeys(named_keys), remainder))\n    }\n}\n\nimpl CLTyped for NamedKeys {\n    fn cl_type() -> CLType {\n        BTreeMap::<String, Key>::cl_type()\n    }\n}\n\nstruct Labels;\n\nimpl KeyValueLabels for Labels {\n    const KEY: &'static str = \"name\";\n    const VALUE: &'static str = \"key\";\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use super::*;\n    use crate::testing::TestRng;\n\n    /// `NamedKeys` was previously (pre node v2.0.0) just an alias for `BTreeMap<String, Key>`.\n    /// Check if we serialize as the old form, that can deserialize to the new.\n    #[test]\n    fn should_be_backwards_compatible() {\n        let rng = &mut TestRng::new();\n        let mut named_keys = NamedKeys::new();\n        assert!(named_keys.insert(\"a\".to_string(), rng.gen()).is_none());\n        assert!(named_keys.insert(\"bb\".to_string(), rng.gen()).is_none());\n        assert!(named_keys.insert(\"ccc\".to_string(), rng.gen()).is_none());\n\n        let serialized_old = bincode::serialize(&named_keys.0).unwrap();\n        let parsed_new = bincode::deserialize(&serialized_old).unwrap();\n        assert_eq!(named_keys, parsed_new);\n\n        let serialized_old = bytesrepr::serialize(&named_keys.0).unwrap();\n        let parsed_new = bytesrepr::deserialize(serialized_old).unwrap();\n        assert_eq!(named_keys, parsed_new);\n    }\n\n    #[test]\n    fn should_match_field_names() {\n        // this test was written to ensure that the schema generated by schemars matches the serde\n        // encoding, both are configured using attributes and they can get out of sync\n        let mut named_keys = NamedKeys::new();\n        named_keys.insert(\"key\".to_string(), Key::Hash([0u8; 32]));\n        assert_eq!(\n            serde_json::to_value(&named_keys).expect(\"should serialize\"),\n            serde_json::json!([{\n                        Labels::KEY: \"key\",\n                        Labels::VALUE: \"hash-0000000000000000000000000000000000000000000000000000000000000000\"\n            }])\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/contracts.rs",
    "content": "//! Data types for supporting contract headers feature.\n// TODO - remove once schemars stops causing warning.\n#![allow(clippy::field_reassign_with_default)]\n\nmod named_keys;\n\nuse alloc::{\n    collections::{BTreeMap, BTreeSet},\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::{\n    array::TryFromSliceError,\n    convert::{TryFrom, TryInto},\n    fmt::{self, Debug, Display, Formatter},\n};\nuse serde_bytes::ByteBuf;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};\nuse serde::{\n    de::{self, Error as SerdeError},\n    ser, Deserialize, Deserializer, Serialize, Serializer,\n};\n\npub use self::named_keys::NamedKeys;\n\nuse crate::{\n    account,\n    addressable_entity::TryFromSliceForAccountHashError,\n    bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH},\n    checksummed_hex,\n    contract_wasm::ContractWasmHash,\n    package::PackageStatus,\n    serde_helpers::contract_package::HumanReadableContractPackage,\n    uref::{self, URef},\n    AddressableEntityHash, CLType, CLTyped, EntityAddr, EntityEntryPoint, EntityVersionKey,\n    EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints as EntityEntryPoints, Group,\n    Groups, HashAddr, Key, Package, PackageHash, Parameter, Parameters, ProtocolVersion,\n    KEY_HASH_LENGTH,\n};\n\nconst CONTRACT_STRING_PREFIX: &str = \"contract-\";\nconst CONTRACT_PACKAGE_STRING_PREFIX: &str = \"contract-package-\";\n// We need to support the legacy prefix of \"contract-package-wasm\".\nconst CONTRACT_PACKAGE_STRING_LEGACY_EXTRA_PREFIX: &str = \"wasm\";\n\n/// Set of errors which may happen when working with contract headers.\n#[derive(Debug, PartialEq, Eq)]\n#[repr(u8)]\n#[non_exhaustive]\npub enum Error {\n    /// Attempt to override an existing or previously existing version with a\n    /// new header (this is not allowed to ensure immutability of a given\n    /// version).\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(1, Error::PreviouslyUsedVersion as u8);\n    /// ```\n    PreviouslyUsedVersion = 1,\n    /// Attempted to disable a contract that does not exist.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(2, Error::ContractNotFound as u8);\n    /// ```\n    ContractNotFound = 2,\n    /// Attempted to create a user group which already exists (use the update\n    /// function to change an existing user group).\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(3, Error::GroupAlreadyExists as u8);\n    /// ```\n    GroupAlreadyExists = 3,\n    /// Attempted to add a new user group which exceeds the allowed maximum\n    /// number of groups.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(4, Error::MaxGroupsExceeded as u8);\n    /// ```\n    MaxGroupsExceeded = 4,\n    /// Attempted to add a new URef to a group, which resulted in the total\n    /// number of URefs across all user groups to exceed the allowed maximum.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8);\n    /// ```\n    MaxTotalURefsExceeded = 5,\n    /// Attempted to remove a URef from a group, which does not exist in the\n    /// group.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(6, Error::GroupDoesNotExist as u8);\n    /// ```\n    GroupDoesNotExist = 6,\n    /// Attempted to remove unknown URef from the group.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(7, Error::UnableToRemoveURef as u8);\n    /// ```\n    UnableToRemoveURef = 7,\n    /// Group is use by at least one active contract.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(8, Error::GroupInUse as u8);\n    /// ```\n    GroupInUse = 8,\n    /// URef already exists in given group.\n    /// ```\n    /// # use casper_types::contracts::Error;\n    /// assert_eq!(9, Error::URefAlreadyExists as u8);\n    /// ```\n    URefAlreadyExists = 9,\n}\n\nimpl TryFrom<u8> for Error {\n    type Error = ();\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        let error = match value {\n            v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion,\n            v if v == Self::ContractNotFound as u8 => Self::ContractNotFound,\n            v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists,\n            v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded,\n            v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded,\n            v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist,\n            v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef,\n            v if v == Self::GroupInUse as u8 => Self::GroupInUse,\n            v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists,\n            _ => return Err(()),\n        };\n        Ok(error)\n    }\n}\n\n/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`.\n#[derive(Debug)]\npub struct TryFromSliceForContractHashError(());\n\nimpl Display for TryFromSliceForContractHashError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"failed to retrieve from slice\")\n    }\n}\n\n/// An error from parsing a formatted contract string\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    /// Invalid formatted string prefix.\n    InvalidPrefix,\n    /// Error when decoding a hex string\n    Hex(base16::DecodeError),\n    /// Error when parsing an account\n    Account(TryFromSliceForAccountHashError),\n    /// Error when parsing the hash.\n    Hash(TryFromSliceError),\n    /// Error when parsing an account hash.\n    AccountHash(account::FromStrError),\n    /// Error when parsing an uref.\n    URef(uref::FromStrError),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceForAccountHashError> for FromStrError {\n    fn from(error: TryFromSliceForAccountHashError) -> Self {\n        FromStrError::Account(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Hash(error)\n    }\n}\n\nimpl From<account::FromStrError> for FromStrError {\n    fn from(error: account::FromStrError) -> Self {\n        FromStrError::AccountHash(error)\n    }\n}\n\nimpl From<uref::FromStrError> for FromStrError {\n    fn from(error: uref::FromStrError) -> Self {\n        FromStrError::URef(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => write!(f, \"invalid prefix\"),\n            FromStrError::Hex(error) => write!(f, \"decode from hex: {}\", error),\n            FromStrError::Account(error) => write!(f, \"account from string error: {:?}\", error),\n            FromStrError::Hash(error) => write!(f, \"hash from string error: {}\", error),\n            FromStrError::AccountHash(error) => {\n                write!(f, \"account hash from string error: {:?}\", error)\n            }\n            FromStrError::URef(error) => write!(f, \"uref from string error: {:?}\", error),\n        }\n    }\n}\n\n/// Automatically incremented value for a contract version within a major `ProtocolVersion`.\npub type ContractVersion = u32;\n\n/// Within each discrete major `ProtocolVersion`, contract version resets to this value.\npub const CONTRACT_INITIAL_VERSION: ContractVersion = 1;\n\n/// Major element of `ProtocolVersion` a `ContractVersion` is compatible with.\npub type ProtocolVersionMajor = u32;\n\n/// Major element of `ProtocolVersion` combined with `ContractVersion`.\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Hash)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ContractVersionKey(ProtocolVersionMajor, ContractVersion);\n\nimpl ContractVersionKey {\n    /// Returns a new instance of ContractVersionKey with provided values.\n    pub fn new(\n        protocol_version_major: ProtocolVersionMajor,\n        contract_version: ContractVersion,\n    ) -> Self {\n        Self(protocol_version_major, contract_version)\n    }\n\n    /// Returns the major element of the protocol version this contract is compatible with.\n    pub fn protocol_version_major(self) -> ProtocolVersionMajor {\n        self.0\n    }\n\n    /// Returns the contract version within the protocol major version.\n    pub fn contract_version(self) -> ContractVersion {\n        self.1\n    }\n}\n\nimpl From<ContractVersionKey> for (ProtocolVersionMajor, ContractVersion) {\n    fn from(contract_version_key: ContractVersionKey) -> Self {\n        (contract_version_key.0, contract_version_key.1)\n    }\n}\n\n/// Serialized length of `ContractVersionKey`.\npub const CONTRACT_VERSION_KEY_SERIALIZED_LENGTH: usize =\n    U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH;\n\nimpl ToBytes for ContractVersionKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.0.to_bytes()?);\n        ret.append(&mut self.1.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        CONTRACT_VERSION_KEY_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)?;\n        self.1.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractVersionKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (major, rem): (u32, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (contract, rem): (ContractVersion, &[u8]) = FromBytes::from_bytes(rem)?;\n        Ok((ContractVersionKey::new(major, contract), rem))\n    }\n}\n\nimpl fmt::Display for ContractVersionKey {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"{}.{}\", self.0, self.1)\n    }\n}\n\n/// Collection of contract versions.\npub type ContractVersions = BTreeMap<ContractVersionKey, ContractHash>;\n\n/// Collection of disabled contract versions. The runtime will not permit disabled\n/// contract versions to be executed.\npub type DisabledVersions = BTreeSet<ContractVersionKey>;\n\n/// A newtype wrapping a `HashAddr` which references a [`Contract`] in the global state.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ContractHash(HashAddr);\n\nimpl ContractHash {\n    /// Constructs a new `ContractHash` from the raw bytes of the contract hash.\n    pub const fn new(value: HashAddr) -> ContractHash {\n        ContractHash(value)\n    }\n\n    /// Returns the raw bytes of the contract hash as an array.\n    pub fn value(&self) -> HashAddr {\n        self.0\n    }\n\n    /// Returns the raw bytes of the contract hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `ContractHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\n            \"{}{}\",\n            CONTRACT_STRING_PREFIX,\n            base16::encode_lower(&self.0),\n        )\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `ContractHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(CONTRACT_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n        let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?;\n        Ok(ContractHash(bytes))\n    }\n}\n\nimpl Display for ContractHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for ContractHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"ContractHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for ContractHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(KEY_HASH_LENGTH as u32)\n    }\n}\n\nimpl From<AddressableEntityHash> for ContractHash {\n    fn from(entity_hash: AddressableEntityHash) -> Self {\n        ContractHash::new(entity_hash.value())\n    }\n}\n\nimpl ToBytes for ContractHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.extend_from_slice(&self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((ContractHash::new(bytes), rem))\n    }\n}\n\nimpl From<[u8; 32]> for ContractHash {\n    fn from(bytes: [u8; 32]) -> Self {\n        ContractHash(bytes)\n    }\n}\n\nimpl Serialize for ContractHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for ContractHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            ContractHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = HashAddr::deserialize(deserializer)?;\n            Ok(ContractHash(bytes))\n        }\n    }\n}\n\nimpl AsRef<[u8]> for ContractHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl TryFrom<&[u8]> for ContractHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForContractHashError> {\n        HashAddr::try_from(bytes)\n            .map(ContractHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\nimpl TryFrom<&Vec<u8>> for ContractHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        HashAddr::try_from(bytes as &[u8])\n            .map(ContractHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for ContractHash {\n    fn schema_name() -> String {\n        String::from(\"ContractHash\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description = Some(\"The hash address of the contract\".to_string());\n        schema_object.into()\n    }\n}\n\n/// A newtype wrapping a `HashAddr` which references a [`ContractPackage`] in the global state.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ContractPackageHash(HashAddr);\n\nimpl ContractPackageHash {\n    /// Constructs a new `ContractPackageHash` from the raw bytes of the contract package hash.\n    pub const fn new(value: HashAddr) -> ContractPackageHash {\n        ContractPackageHash(value)\n    }\n\n    /// Returns the raw bytes of the contract hash as an array.\n    pub fn value(&self) -> HashAddr {\n        self.0\n    }\n\n    /// Returns the raw bytes of the contract hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `ContractPackageHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\n            \"{}{}\",\n            CONTRACT_PACKAGE_STRING_PREFIX,\n            base16::encode_lower(&self.0),\n        )\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `ContractPackageHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(CONTRACT_PACKAGE_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n\n        let hex_addr = remainder\n            .strip_prefix(CONTRACT_PACKAGE_STRING_LEGACY_EXTRA_PREFIX)\n            .unwrap_or(remainder);\n\n        let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?;\n        Ok(ContractPackageHash(bytes))\n    }\n}\n\nimpl From<PackageHash> for ContractPackageHash {\n    fn from(value: PackageHash) -> Self {\n        ContractPackageHash::new(value.value())\n    }\n}\n\nimpl Display for ContractPackageHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for ContractPackageHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"ContractPackageHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for ContractPackageHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(KEY_HASH_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for ContractPackageHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.extend_from_slice(&self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractPackageHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((ContractPackageHash::new(bytes), rem))\n    }\n}\n\nimpl From<[u8; 32]> for ContractPackageHash {\n    fn from(bytes: [u8; 32]) -> Self {\n        ContractPackageHash(bytes)\n    }\n}\n\nimpl Serialize for ContractPackageHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for ContractPackageHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            ContractPackageHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = HashAddr::deserialize(deserializer)?;\n            Ok(ContractPackageHash(bytes))\n        }\n    }\n}\n\nimpl AsRef<[u8]> for ContractPackageHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl TryFrom<&[u8]> for ContractPackageHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForContractHashError> {\n        HashAddr::try_from(bytes)\n            .map(ContractPackageHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\nimpl TryFrom<&Vec<u8>> for ContractPackageHash {\n    type Error = TryFromSliceForContractHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        HashAddr::try_from(bytes as &[u8])\n            .map(ContractPackageHash::new)\n            .map_err(|_| TryFromSliceForContractHashError(()))\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for ContractPackageHash {\n    fn schema_name() -> String {\n        String::from(\"ContractPackageHash\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description =\n            Some(\"The hash address of the contract package\".to_string());\n        schema_object.into()\n    }\n}\n\n/// A enum to determine the lock status of the contract package.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum ContractPackageStatus {\n    /// The package is locked and cannot be versioned.\n    Locked,\n    /// The package is unlocked and can be versioned.\n    Unlocked,\n}\n\nimpl ContractPackageStatus {\n    /// Create a new status flag based on a boolean value\n    pub fn new(is_locked: bool) -> Self {\n        if is_locked {\n            ContractPackageStatus::Locked\n        } else {\n            ContractPackageStatus::Unlocked\n        }\n    }\n}\n\nimpl Default for ContractPackageStatus {\n    fn default() -> Self {\n        Self::Unlocked\n    }\n}\n\nimpl ToBytes for ContractPackageStatus {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        match self {\n            ContractPackageStatus::Unlocked => result.append(&mut false.to_bytes()?),\n            ContractPackageStatus::Locked => result.append(&mut true.to_bytes()?),\n        }\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            ContractPackageStatus::Unlocked => false.serialized_length(),\n            ContractPackageStatus::Locked => true.serialized_length(),\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            ContractPackageStatus::Locked => writer.push(u8::from(true)),\n            ContractPackageStatus::Unlocked => writer.push(u8::from(false)),\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractPackageStatus {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (val, bytes) = bool::from_bytes(bytes)?;\n        let status = ContractPackageStatus::new(val);\n        Ok((status, bytes))\n    }\n}\n\n/// Contract definition, metadata, and security container.\n#[derive(Debug, Clone, PartialEq, Eq, Default)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ContractPackage {\n    /// Key used to add or disable versions\n    access_key: URef,\n    /// All versions (enabled & disabled)\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            with = \"Vec<crate::serde_helpers::contract_package::HumanReadableContractVersion>\"\n        )\n    )]\n    versions: ContractVersions,\n    /// Disabled versions\n    disabled_versions: DisabledVersions,\n    /// Mapping maintaining the set of URefs associated with each \"user\n    /// group\". This can be used to control access to methods in a particular\n    /// version of the contract. A method is callable by any context which\n    /// \"knows\" any of the URefs associated with the method's user group.\n    groups: Groups,\n    /// A flag that determines whether a contract is locked\n    lock_status: ContractPackageStatus,\n}\n\nimpl CLTyped for ContractPackage {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ContractPackage {\n    /// Create new `ContractPackage` (with no versions) from given access key.\n    pub fn new(\n        access_key: URef,\n        versions: ContractVersions,\n        disabled_versions: DisabledVersions,\n        groups: Groups,\n        lock_status: ContractPackageStatus,\n    ) -> Self {\n        ContractPackage {\n            access_key,\n            versions,\n            disabled_versions,\n            groups,\n            lock_status,\n        }\n    }\n\n    /// Get the access key for this contract.\n    pub fn access_key(&self) -> URef {\n        self.access_key\n    }\n\n    /// Get the group definitions for this contract.\n    pub fn groups(&self) -> &Groups {\n        &self.groups\n    }\n\n    /// Returns reference to all of this contract's versions.\n    pub fn versions(&self) -> &ContractVersions {\n        &self.versions\n    }\n\n    /// Returns mutable reference to all of this contract's versions (enabled and disabled).\n    pub fn versions_mut(&mut self) -> &mut ContractVersions {\n        &mut self.versions\n    }\n\n    /// Consumes the object and returns all of this contract's versions (enabled and disabled).\n    pub fn take_versions(self) -> ContractVersions {\n        self.versions\n    }\n\n    /// Consumes the object and returns all the groups of the contract package.\n    pub fn take_groups(self) -> Groups {\n        self.groups\n    }\n\n    /// Returns all of this contract's disabled versions.\n    pub fn disabled_versions(&self) -> &DisabledVersions {\n        &self.disabled_versions\n    }\n\n    /// Returns mut reference to all of this contract's disabled versions.\n    pub fn disabled_versions_mut(&mut self) -> &mut DisabledVersions {\n        &mut self.disabled_versions\n    }\n\n    /// Returns lock_status of the contract package.\n    pub fn lock_status(&self) -> ContractPackageStatus {\n        self.lock_status.clone()\n    }\n\n    pub fn is_locked(&self) -> bool {\n        match self.lock_status {\n            ContractPackageStatus::Locked => true,\n            ContractPackageStatus::Unlocked => false,\n        }\n    }\n\n    /// Disable the contract version corresponding to the given hash (if it exists).\n    pub fn disable_contract_version(&mut self, contract_hash: ContractHash) -> Result<(), Error> {\n        let contract_version_key = self\n            .find_contract_version_key_by_hash(&contract_hash)\n            .copied()\n            .ok_or(Error::ContractNotFound)?;\n\n        if !self.disabled_versions.contains(&contract_version_key) {\n            self.disabled_versions.insert(contract_version_key);\n        }\n\n        Ok(())\n    }\n\n    /// Enable the contract version corresponding to the given hash (if it exists).\n    pub fn enable_contract_version(&mut self, contract_hash: ContractHash) -> Result<(), Error> {\n        let contract_version_key = self\n            .find_contract_version_key_by_hash(&contract_hash)\n            .copied()\n            .ok_or(Error::ContractNotFound)?;\n\n        self.disabled_versions.remove(&contract_version_key);\n\n        Ok(())\n    }\n\n    fn find_contract_version_key_by_hash(\n        &self,\n        contract_hash: &ContractHash,\n    ) -> Option<&ContractVersionKey> {\n        self.versions\n            .iter()\n            .filter_map(|(k, v)| if v == contract_hash { Some(k) } else { None })\n            .next()\n    }\n\n    /// Removes a group from this entity (if it exists).\n    pub fn remove_group(&mut self, group: &Group) -> bool {\n        self.groups.0.remove(group).is_some()\n    }\n    fn next_contract_version_for(&self, protocol_version: ProtocolVersionMajor) -> ContractVersion {\n        let current_version = self\n            .versions\n            .keys()\n            .rev()\n            .find_map(|&contract_version_key| {\n                if contract_version_key.protocol_version_major() == protocol_version {\n                    Some(contract_version_key.contract_version())\n                } else {\n                    None\n                }\n            })\n            .unwrap_or(0);\n\n        current_version + 1\n    }\n\n    /// Returns `true` if the given contract version exists and is enabled.\n    pub fn is_version_enabled(&self, contract_version_key: ContractVersionKey) -> bool {\n        !self.disabled_versions.contains(&contract_version_key)\n            && self.versions.contains_key(&contract_version_key)\n    }\n\n    /// Returns all of this contract's enabled contract versions.\n    pub fn enabled_versions(&self) -> ContractVersions {\n        let mut ret = ContractVersions::new();\n        for version in &self.versions {\n            if !self.is_version_enabled(*version.0) {\n                continue;\n            }\n            ret.insert(*version.0, *version.1);\n        }\n        ret\n    }\n\n    /// Return the contract version key for the newest enabled contract version.\n    pub fn current_contract_version(&self) -> Option<ContractVersionKey> {\n        self.enabled_versions().keys().next_back().copied()\n    }\n\n    /// Return the contract hash for the newest enabled contract version.\n    pub fn current_contract_hash(&self) -> Option<ContractHash> {\n        self.enabled_versions().values().next_back().copied()\n    }\n\n    pub fn insert_contract_version(\n        &mut self,\n        protocol_version_major: ProtocolVersionMajor,\n        contract_hash: ContractHash,\n    ) -> ContractVersionKey {\n        let contract_version = self.next_contract_version_for(protocol_version_major);\n        let key = ContractVersionKey::new(protocol_version_major, contract_version);\n        self.versions.insert(key, contract_hash);\n        key\n    }\n\n    pub fn groups_mut(&mut self) -> &mut Groups {\n        &mut self.groups\n    }\n}\n\nimpl Serialize for ContractPackage {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            HumanReadableContractPackage::from(self).serialize(serializer)\n        } else {\n            let bytes = self\n                .to_bytes()\n                .map_err(|error| ser::Error::custom(format!(\"{:?}\", error)))?;\n            ByteBuf::from(bytes).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for ContractPackage {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let json_helper = HumanReadableContractPackage::deserialize(deserializer)?;\n            json_helper.try_into().map_err(de::Error::custom)\n        } else {\n            let bytes = ByteBuf::deserialize(deserializer)?.into_vec();\n            bytesrepr::deserialize::<ContractPackage>(bytes)\n                .map_err(|error| de::Error::custom(format!(\"{:?}\", error)))\n        }\n    }\n}\n\nimpl ToBytes for ContractPackage {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.access_key().write_bytes(&mut result)?;\n        self.versions().write_bytes(&mut result)?;\n        self.disabled_versions().write_bytes(&mut result)?;\n        self.groups().write_bytes(&mut result)?;\n        self.lock_status.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.access_key.serialized_length()\n            + self.versions.serialized_length()\n            + self.disabled_versions.serialized_length()\n            + self.groups.serialized_length()\n            + self.lock_status.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.access_key().write_bytes(writer)?;\n        self.versions().write_bytes(writer)?;\n        self.disabled_versions().write_bytes(writer)?;\n        self.groups().write_bytes(writer)?;\n        self.lock_status.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ContractPackage {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (access_key, bytes) = URef::from_bytes(bytes)?;\n        let (versions, bytes) = ContractVersions::from_bytes(bytes)?;\n        let (disabled_versions, bytes) = DisabledVersions::from_bytes(bytes)?;\n        let (groups, bytes) = Groups::from_bytes(bytes)?;\n        let (lock_status, bytes) = ContractPackageStatus::from_bytes(bytes)?;\n        let result = ContractPackage {\n            access_key,\n            versions,\n            disabled_versions,\n            groups,\n            lock_status,\n        };\n\n        Ok((result, bytes))\n    }\n}\n\nimpl From<ContractPackage> for Package {\n    fn from(value: ContractPackage) -> Self {\n        let versions: BTreeMap<EntityVersionKey, EntityAddr> = value\n            .versions\n            .into_iter()\n            .map(|(version, contract_hash)| {\n                let entity_version = EntityVersionKey::new(\n                    version.protocol_version_major(),\n                    version.contract_version(),\n                );\n                let entity_hash = EntityAddr::SmartContract(contract_hash.value());\n                (entity_version, entity_hash)\n            })\n            .collect();\n\n        let disabled_versions = value\n            .disabled_versions\n            .into_iter()\n            .map(|contract_versions| {\n                EntityVersionKey::new(\n                    contract_versions.protocol_version_major(),\n                    contract_versions.contract_version(),\n                )\n            })\n            .collect();\n\n        let lock_status = if value.lock_status == ContractPackageStatus::Locked {\n            PackageStatus::Locked\n        } else {\n            PackageStatus::Unlocked\n        };\n\n        Package::new(\n            versions.into(),\n            disabled_versions,\n            value.groups,\n            lock_status,\n        )\n    }\n}\n\n/// Type signature of a method. Order of arguments matter since can be\n/// referenced by index as well as name.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct EntryPoint {\n    name: String,\n    args: Parameters,\n    ret: CLType,\n    access: EntryPointAccess,\n    entry_point_type: EntryPointType,\n}\n\nimpl From<EntryPoint> for (String, Parameters, CLType, EntryPointAccess, EntryPointType) {\n    fn from(entry_point: EntryPoint) -> Self {\n        (\n            entry_point.name,\n            entry_point.args,\n            entry_point.ret,\n            entry_point.access,\n            entry_point.entry_point_type,\n        )\n    }\n}\n\nimpl EntryPoint {\n    /// `EntryPoint` constructor.\n    pub fn new<T: Into<String>>(\n        name: T,\n        args: Parameters,\n        ret: CLType,\n        access: EntryPointAccess,\n        entry_point_type: EntryPointType,\n    ) -> Self {\n        EntryPoint {\n            name: name.into(),\n            args,\n            ret,\n            access,\n            entry_point_type,\n        }\n    }\n\n    /// Create a default [`EntryPoint`] with specified name.\n    pub fn default_with_name<T: Into<String>>(name: T) -> Self {\n        EntryPoint {\n            name: name.into(),\n            ..Default::default()\n        }\n    }\n\n    /// Get name.\n    pub fn name(&self) -> &str {\n        &self.name\n    }\n\n    /// Get access enum.\n    pub fn access(&self) -> &EntryPointAccess {\n        &self.access\n    }\n\n    /// Get the arguments for this method.\n    pub fn args(&self) -> &[Parameter] {\n        self.args.as_slice()\n    }\n\n    /// Get the return type.\n    pub fn ret(&self) -> &CLType {\n        &self.ret\n    }\n\n    /// Obtains entry point\n    pub fn entry_point_type(&self) -> EntryPointType {\n        self.entry_point_type\n    }\n}\n\nimpl Default for EntryPoint {\n    /// constructor for a public session `EntryPoint` that takes no args and returns `Unit`\n    fn default() -> Self {\n        EntryPoint {\n            name: DEFAULT_ENTRY_POINT_NAME.to_string(),\n            args: Vec::new(),\n            ret: CLType::Unit,\n            access: EntryPointAccess::Public,\n            entry_point_type: EntryPointType::Caller,\n        }\n    }\n}\n\nimpl ToBytes for EntryPoint {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.name.serialized_length()\n            + self.args.serialized_length()\n            + self.ret.serialized_length()\n            + self.access.serialized_length()\n            + self.entry_point_type.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.name.write_bytes(writer)?;\n        self.args.write_bytes(writer)?;\n        self.ret.append_bytes(writer)?;\n        self.access.write_bytes(writer)?;\n        self.entry_point_type.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for EntryPoint {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (name, bytes) = String::from_bytes(bytes)?;\n        let (args, bytes) = Vec::<Parameter>::from_bytes(bytes)?;\n        let (ret, bytes) = CLType::from_bytes(bytes)?;\n        let (access, bytes) = EntryPointAccess::from_bytes(bytes)?;\n        let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?;\n\n        Ok((\n            EntryPoint {\n                name,\n                args,\n                ret,\n                access,\n                entry_point_type,\n            },\n            bytes,\n        ))\n    }\n}\n\n/// Collection of named entry points.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(transparent, deny_unknown_fields)]\npub struct EntryPoints(BTreeMap<String, EntryPoint>);\n\nimpl From<crate::addressable_entity::EntryPoints> for EntryPoints {\n    fn from(value: EntityEntryPoints) -> Self {\n        let mut ret = EntryPoints::new();\n        for entity_entry_point in value.take_entry_points() {\n            let entry_point = EntryPoint::new(\n                entity_entry_point.name(),\n                Parameters::from(entity_entry_point.args()),\n                entity_entry_point.ret().clone(),\n                entity_entry_point.access().clone(),\n                entity_entry_point.entry_point_type(),\n            );\n            ret.add_entry_point(entry_point);\n        }\n        ret\n    }\n}\n\nimpl ToBytes for EntryPoints {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for EntryPoints {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (entry_points_map, remainder) = BTreeMap::<String, EntryPoint>::from_bytes(bytes)?;\n        Ok((EntryPoints(entry_points_map), remainder))\n    }\n}\n\nimpl Default for EntryPoints {\n    fn default() -> Self {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntryPoint::default();\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    }\n}\n\nimpl From<EntryPoint> for EntityEntryPoint {\n    fn from(value: EntryPoint) -> Self {\n        EntityEntryPoint::from(&value)\n    }\n}\n\nimpl From<&EntryPoint> for EntityEntryPoint {\n    fn from(value: &EntryPoint) -> Self {\n        EntityEntryPoint::new(\n            value.name.clone(),\n            value.args.clone(),\n            value.ret.clone(),\n            value.access.clone(),\n            value.entry_point_type,\n            EntryPointPayment::Caller,\n        )\n    }\n}\n\nimpl EntryPoints {\n    /// Constructs a new, empty `EntryPoints`.\n    pub const fn new() -> EntryPoints {\n        EntryPoints(BTreeMap::<String, EntryPoint>::new())\n    }\n\n    /// Constructs a new `EntryPoints` with a single entry for the default `EntryPoint`.\n    pub fn new_with_default_entry_point() -> Self {\n        let mut entry_points = EntryPoints::new();\n        let entry_point = EntryPoint::default();\n        entry_points.add_entry_point(entry_point);\n        entry_points\n    }\n\n    /// Adds new [`EntryPoint`].\n    pub fn add_entry_point(&mut self, entry_point: EntryPoint) -> Option<EntryPoint> {\n        self.0.insert(entry_point.name().to_string(), entry_point)\n    }\n\n    /// Checks if given [`EntryPoint`] exists.\n    pub fn has_entry_point(&self, entry_point_name: &str) -> bool {\n        self.0.contains_key(entry_point_name)\n    }\n\n    /// Gets an existing [`EntryPoint`] by its name.\n    pub fn get(&self, entry_point_name: &str) -> Option<&EntryPoint> {\n        self.0.get(entry_point_name)\n    }\n\n    /// Returns iterator for existing entry point names.\n    pub fn keys(&self) -> impl Iterator<Item = &String> {\n        self.0.keys()\n    }\n\n    /// Takes all entry points.\n    pub fn take_entry_points(self) -> Vec<EntryPoint> {\n        self.0.into_values().collect()\n    }\n\n    /// Returns the length of the entry points\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Checks if the `EntryPoints` is empty.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Checks if any of the entry points are of the type Session.\n    pub fn contains_stored_session(&self) -> bool {\n        self.0\n            .values()\n            .any(|entry_point| entry_point.entry_point_type == EntryPointType::Caller)\n    }\n}\n\nimpl From<Vec<EntryPoint>> for EntryPoints {\n    fn from(entry_points: Vec<EntryPoint>) -> EntryPoints {\n        let entries = entry_points\n            .into_iter()\n            .map(|entry_point| (String::from(entry_point.name()), entry_point))\n            .collect();\n        EntryPoints(entries)\n    }\n}\n\nimpl From<EntryPoints> for EntityEntryPoints {\n    fn from(value: EntryPoints) -> Self {\n        let mut entry_points = EntityEntryPoints::new();\n        for contract_entry_point in value.take_entry_points() {\n            entry_points.add_entry_point(EntityEntryPoint::from(contract_entry_point));\n        }\n        entry_points\n    }\n}\n\n/// Methods and type signatures supported by a contract.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Contract {\n    contract_package_hash: ContractPackageHash,\n    contract_wasm_hash: ContractWasmHash,\n    named_keys: NamedKeys,\n    #[cfg_attr(feature = \"json-schema\", schemars(with = \"Vec<EntryPoint>\"))]\n    entry_points: EntryPoints,\n    protocol_version: ProtocolVersion,\n}\n\nimpl Contract {\n    /// `Contract` constructor.\n    pub fn new(\n        contract_package_hash: ContractPackageHash,\n        contract_wasm_hash: ContractWasmHash,\n        named_keys: NamedKeys,\n        entry_points: EntryPoints,\n        protocol_version: ProtocolVersion,\n    ) -> Self {\n        Contract {\n            contract_package_hash,\n            contract_wasm_hash,\n            named_keys,\n            entry_points,\n            protocol_version,\n        }\n    }\n\n    /// Hash for accessing contract package\n    pub fn contract_package_hash(&self) -> ContractPackageHash {\n        self.contract_package_hash\n    }\n\n    /// Hash for accessing contract WASM\n    pub fn contract_wasm_hash(&self) -> ContractWasmHash {\n        self.contract_wasm_hash\n    }\n\n    /// Checks whether there is a method with the given name\n    pub fn has_entry_point(&self, name: &str) -> bool {\n        self.entry_points.has_entry_point(name)\n    }\n\n    /// Returns the type signature for the given `method`.\n    pub fn entry_point(&self, method: &str) -> Option<&EntryPoint> {\n        self.entry_points.get(method)\n    }\n\n    /// Get the protocol version this header is targeting.\n    pub fn protocol_version(&self) -> ProtocolVersion {\n        self.protocol_version\n    }\n\n    /// Adds new entry point\n    pub fn add_entry_point<T: Into<String>>(&mut self, entry_point: EntryPoint) {\n        self.entry_points.add_entry_point(entry_point);\n    }\n\n    /// Hash for accessing contract bytes\n    pub fn contract_wasm_key(&self) -> Key {\n        self.contract_wasm_hash.into()\n    }\n\n    /// Returns immutable reference to methods\n    pub fn entry_points(&self) -> &EntryPoints {\n        &self.entry_points\n    }\n\n    /// Takes `named_keys`\n    pub fn take_named_keys(self) -> NamedKeys {\n        self.named_keys\n    }\n\n    /// Returns a reference to `named_keys`\n    pub fn named_keys(&self) -> &NamedKeys {\n        &self.named_keys\n    }\n\n    /// Appends `keys` to `named_keys`\n    pub fn named_keys_append(&mut self, keys: NamedKeys) {\n        self.named_keys.append(keys);\n    }\n\n    /// Removes given named key.\n    pub fn remove_named_key(&mut self, key: &str) -> Option<Key> {\n        self.named_keys.remove(key)\n    }\n\n    /// Set protocol_version.\n    pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) {\n        self.protocol_version = protocol_version;\n    }\n\n    /// Determines if `Contract` is compatible with a given `ProtocolVersion`.\n    pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool {\n        self.protocol_version.value().major == protocol_version.value().major\n    }\n}\n\nimpl ToBytes for Contract {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.contract_package_hash().write_bytes(&mut result)?;\n        self.contract_wasm_hash().write_bytes(&mut result)?;\n        self.named_keys().write_bytes(&mut result)?;\n        self.entry_points().write_bytes(&mut result)?;\n        self.protocol_version().write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        ToBytes::serialized_length(&self.entry_points)\n            + ToBytes::serialized_length(&self.contract_package_hash)\n            + ToBytes::serialized_length(&self.contract_wasm_hash)\n            + ToBytes::serialized_length(&self.protocol_version)\n            + ToBytes::serialized_length(&self.named_keys)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.contract_package_hash().write_bytes(writer)?;\n        self.contract_wasm_hash().write_bytes(writer)?;\n        self.named_keys().write_bytes(writer)?;\n        self.entry_points().write_bytes(writer)?;\n        self.protocol_version().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Contract {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (contract_package_hash, bytes) = FromBytes::from_bytes(bytes)?;\n        let (contract_wasm_hash, bytes) = FromBytes::from_bytes(bytes)?;\n        let (named_keys, bytes) = NamedKeys::from_bytes(bytes)?;\n        let (entry_points, bytes) = EntryPoints::from_bytes(bytes)?;\n        let (protocol_version, bytes) = ProtocolVersion::from_bytes(bytes)?;\n        Ok((\n            Contract {\n                contract_package_hash,\n                contract_wasm_hash,\n                named_keys,\n                entry_points,\n                protocol_version,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Default for Contract {\n    fn default() -> Self {\n        Contract {\n            named_keys: NamedKeys::default(),\n            entry_points: EntryPoints::default(),\n            contract_wasm_hash: [0; KEY_HASH_LENGTH].into(),\n            contract_package_hash: [0; KEY_HASH_LENGTH].into(),\n            protocol_version: ProtocolVersion::V1_0_0,\n        }\n    }\n}\n\n/// Default name for an entry point\npub const DEFAULT_ENTRY_POINT_NAME: &str = \"call\";\n\n/// Default name for an installer entry point\npub const ENTRY_POINT_NAME_INSTALL: &str = \"install\";\n\n/// Default name for an upgrade entry point\npub const UPGRADE_ENTRY_POINT_NAME: &str = \"upgrade\";\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{AccessRights, EntryPointAccess, EntryPointType, Group, Parameter, URef};\n    use alloc::borrow::ToOwned;\n\n    const CONTRACT_HASH_V1: ContractHash = ContractHash::new([42; 32]);\n    const CONTRACT_HASH_V2: ContractHash = ContractHash::new([84; 32]);\n\n    fn make_contract_package() -> ContractPackage {\n        let mut contract_package = ContractPackage::new(\n            URef::new([0; 32], AccessRights::NONE),\n            ContractVersions::default(),\n            DisabledVersions::default(),\n            Groups::default(),\n            ContractPackageStatus::default(),\n        );\n\n        // add groups\n        {\n            let group_urefs = {\n                let mut ret = BTreeSet::new();\n                ret.insert(URef::new([1; 32], AccessRights::READ));\n                ret\n            };\n\n            contract_package\n                .groups_mut()\n                .insert(Group::new(\"Group 1\"), group_urefs.clone());\n\n            contract_package\n                .groups_mut()\n                .insert(Group::new(\"Group 2\"), group_urefs);\n        }\n\n        // add entry_points\n        let _entry_points = {\n            let mut ret = BTreeMap::new();\n            let entrypoint = EntryPoint::new(\n                \"method0\".to_string(),\n                vec![],\n                CLType::U32,\n                EntryPointAccess::groups(&[\"Group 2\"]),\n                EntryPointType::Caller,\n            );\n            ret.insert(entrypoint.name().to_owned(), entrypoint);\n            let entrypoint = EntryPoint::new(\n                \"method1\".to_string(),\n                vec![Parameter::new(\"Foo\", CLType::U32)],\n                CLType::U32,\n                EntryPointAccess::groups(&[\"Group 1\"]),\n                EntryPointType::Caller,\n            );\n            ret.insert(entrypoint.name().to_owned(), entrypoint);\n            ret\n        };\n\n        let _contract_package_hash = [41; 32];\n        let _contract_wasm_hash = [43; 32];\n        let _named_keys = NamedKeys::new();\n        let protocol_version = ProtocolVersion::V1_0_0;\n\n        let v1 = contract_package\n            .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V1);\n        let v2 = contract_package\n            .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V2);\n\n        assert!(v2 > v1);\n\n        contract_package\n    }\n\n    #[test]\n    fn roundtrip_serialization() {\n        let contract_package = make_contract_package();\n        let bytes = contract_package.to_bytes().expect(\"should serialize\");\n        let (decoded_package, rem) =\n            ContractPackage::from_bytes(&bytes).expect(\"should deserialize\");\n        assert_eq!(contract_package, decoded_package);\n        assert_eq!(rem.len(), 0);\n    }\n\n    #[test]\n    fn contract_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let contract_hash = HashAddr::try_from(&bytes[..]).expect(\"should create contract hash\");\n        let contract_hash = ContractHash::new(contract_hash);\n        assert_eq!(&bytes, &contract_hash.as_bytes());\n    }\n\n    #[test]\n    fn contract_package_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let contract_hash = HashAddr::try_from(&bytes[..]).expect(\"should create contract hash\");\n        let contract_hash = ContractPackageHash::new(contract_hash);\n        assert_eq!(&bytes, &contract_hash.as_bytes());\n    }\n\n    #[test]\n    fn contract_hash_from_str() {\n        let contract_hash = ContractHash([3; 32]);\n        let encoded = contract_hash.to_formatted_string();\n        let decoded = ContractHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(contract_hash, decoded);\n\n        let invalid_prefix =\n            \"contract--0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ContractHash::from_formatted_str(invalid_prefix).is_err());\n\n        let short_addr = \"contract-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ContractHash::from_formatted_str(short_addr).is_err());\n\n        let long_addr =\n            \"contract-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(ContractHash::from_formatted_str(long_addr).is_err());\n\n        let invalid_hex =\n            \"contract-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(ContractHash::from_formatted_str(invalid_hex).is_err());\n    }\n\n    #[test]\n    fn contract_package_hash_from_str() {\n        let contract_package_hash = ContractPackageHash([3; 32]);\n        let encoded = contract_package_hash.to_formatted_string();\n        let decoded = ContractPackageHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(contract_package_hash, decoded);\n\n        let invalid_prefix =\n            \"contract-package0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(),\n            FromStrError::InvalidPrefix\n        ));\n\n        let short_addr =\n            \"contract-package-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(short_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let long_addr =\n            \"contract-package-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(long_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let invalid_hex =\n            \"contract-package-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(),\n            FromStrError::Hex(_)\n        ));\n    }\n\n    #[test]\n    fn contract_package_hash_from_legacy_str() {\n        let contract_package_hash = ContractPackageHash([3; 32]);\n        let hex_addr = contract_package_hash.to_string();\n        let legacy_encoded = format!(\"contract-package-wasm{}\", hex_addr);\n        let decoded_from_legacy = ContractPackageHash::from_formatted_str(&legacy_encoded)\n            .expect(\"should accept legacy prefixed string\");\n        assert_eq!(\n            contract_package_hash, decoded_from_legacy,\n            \"decoded_from_legacy should equal decoded\"\n        );\n\n        let invalid_prefix =\n            \"contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(),\n            FromStrError::InvalidPrefix\n        ));\n\n        let short_addr =\n            \"contract-package-wasm00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(short_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let long_addr =\n            \"contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(long_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let invalid_hex =\n            \"contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(),\n            FromStrError::Hex(_)\n        ));\n    }\n\n    #[test]\n    fn contract_hash_serde_roundtrip() {\n        let contract_hash = ContractHash([255; 32]);\n        let serialized = bincode::serialize(&contract_hash).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(contract_hash, deserialized)\n    }\n\n    #[test]\n    fn contract_hash_json_roundtrip() {\n        let contract_hash = ContractHash([255; 32]);\n        let json_string = serde_json::to_string_pretty(&contract_hash).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(contract_hash, decoded)\n    }\n\n    #[test]\n    fn contract_package_hash_serde_roundtrip() {\n        let contract_hash = ContractPackageHash([255; 32]);\n        let serialized = bincode::serialize(&contract_hash).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(contract_hash, deserialized)\n    }\n\n    #[test]\n    fn contract_package_hash_json_roundtrip() {\n        let contract_hash = ContractPackageHash([255; 32]);\n        let json_string = serde_json::to_string_pretty(&contract_hash).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(contract_hash, decoded)\n    }\n\n    #[test]\n    fn package_hash_from_legacy_str() {\n        let package_hash = ContractPackageHash([3; 32]);\n        let hex_addr = package_hash.to_string();\n        let legacy_encoded = format!(\"contract-package-wasm{}\", hex_addr);\n        let decoded_from_legacy = ContractPackageHash::from_formatted_str(&legacy_encoded)\n            .expect(\"should accept legacy prefixed string\");\n        assert_eq!(\n            package_hash, decoded_from_legacy,\n            \"decoded_from_legacy should equal decoded\"\n        );\n\n        let invalid_prefix =\n            \"contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(),\n            FromStrError::InvalidPrefix\n        ));\n\n        let short_addr =\n            \"contract-package-wasm00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(short_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let long_addr =\n            \"contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(long_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let invalid_hex =\n            \"contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(matches!(\n            ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(),\n            FromStrError::Hex(_)\n        ));\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, contracts::ContractPackage, gens};\n\n    proptest! {\n        #![proptest_config(ProptestConfig {\n            cases: 1024,\n            .. ProptestConfig::default()\n        })]\n\n        #[test]\n        fn test_value_contract(contract in gens::contract_arb()) {\n            bytesrepr::test_serialization_roundtrip(&contract);\n        }\n\n        #[test]\n        fn test_value_contract_package(contract_pkg in gens::contract_package_arb()) {\n            bytesrepr::test_serialization_roundtrip(&contract_pkg);\n        }\n\n        #[test]\n        fn test_json_contract_package(v in gens::contract_package_arb()) {\n            let json_str = serde_json::to_string(&v).unwrap();\n            let deserialized = serde_json::from_str::<ContractPackage>(&json_str).unwrap();\n            assert_eq!(v, deserialized);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/crypto/asymmetric_key/gens.rs",
    "content": "//! Generators for asymmetric key types\n\nuse core::convert::TryInto;\n\nuse proptest::{\n    collection,\n    prelude::{Arbitrary, Just, Strategy},\n    prop_oneof,\n};\n\nuse crate::{crypto::SecretKey, PublicKey};\n\n/// Creates an arbitrary [`PublicKey`]\npub fn public_key_arb() -> impl Strategy<Value = PublicKey> {\n    prop_oneof![\n        Just(PublicKey::System),\n        collection::vec(<u8>::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| {\n            let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap();\n            let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap();\n            PublicKey::from(&secret_key)\n        }),\n        collection::vec(<u8>::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| {\n            let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap();\n            let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap();\n            PublicKey::from(&secret_key)\n        })\n    ]\n}\n\n/// Returns a strategy for creating random [`PublicKey`] instances but NOT system variant.\npub fn public_key_arb_no_system() -> impl Strategy<Value = PublicKey> {\n    prop_oneof![\n        collection::vec(<u8>::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| {\n            let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap();\n            let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap();\n            PublicKey::from(&secret_key)\n        }),\n        collection::vec(<u8>::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| {\n            let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap();\n            let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap();\n            PublicKey::from(&secret_key)\n        })\n    ]\n}\n\n/// Returns a strategy for creating random [`SecretKey`] instances but NOT system variant.\npub fn secret_key_arb_no_system() -> impl Strategy<Value = SecretKey> {\n    prop_oneof![\n        collection::vec(<u8>::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| {\n            let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap();\n            SecretKey::ed25519_from_bytes(byte_array).unwrap()\n        }),\n        collection::vec(<u8>::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| {\n            let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap();\n            SecretKey::secp256k1_from_bytes(bytes_array).unwrap()\n        })\n    ]\n}\n"
  },
  {
    "path": "types/src/crypto/asymmetric_key/tests.rs",
    "content": "use std::{\n    cmp::Ordering,\n    collections::hash_map::DefaultHasher,\n    hash::{Hash, Hasher},\n    iter,\n};\n\nuse rand::RngCore;\n\nuse k256::elliptic_curve::sec1::ToEncodedPoint;\nuse openssl::pkey::{PKey, Private, Public};\n\nuse super::*;\nuse crate::{\n    bytesrepr, checksummed_hex, crypto::SecretKey, testing::TestRng, AsymmetricType, PublicKey,\n    Tagged,\n};\n\n#[test]\nfn can_construct_ed25519_keypair_from_zeroes() {\n    let bytes = [0; SecretKey::ED25519_LENGTH];\n    let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap();\n    let _public_key: PublicKey = (&secret_key).into();\n}\n\n#[test]\n#[should_panic]\nfn cannot_construct_secp256k1_keypair_from_zeroes() {\n    let bytes = [0; SecretKey::SECP256K1_LENGTH];\n    let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap();\n    let _public_key: PublicKey = (&secret_key).into();\n}\n\n#[test]\nfn can_construct_ed25519_keypair_from_ones() {\n    let bytes = [1; SecretKey::ED25519_LENGTH];\n    let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap();\n    let _public_key: PublicKey = (&secret_key).into();\n}\n\n#[test]\nfn can_construct_secp256k1_keypair_from_ones() {\n    let bytes = [1; SecretKey::SECP256K1_LENGTH];\n    let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap();\n    let _public_key: PublicKey = (&secret_key).into();\n}\n\ntype OpenSSLSecretKey = PKey<Private>;\ntype OpenSSLPublicKey = PKey<Public>;\n\n// `SecretKey` does not implement `PartialEq`, so just compare derived `PublicKey`s.\nfn assert_secret_keys_equal(lhs: &SecretKey, rhs: &SecretKey) {\n    assert_eq!(PublicKey::from(lhs), PublicKey::from(rhs));\n}\n\nfn secret_key_der_roundtrip(secret_key: SecretKey) {\n    let der_encoded = secret_key.to_der().unwrap();\n    let decoded = SecretKey::from_der(&der_encoded).unwrap();\n    assert_secret_keys_equal(&secret_key, &decoded);\n    assert_eq!(secret_key.tag(), decoded.tag());\n\n    // Ensure malformed encoded version fails to decode.\n    SecretKey::from_der(&der_encoded[1..]).unwrap_err();\n}\n\nfn secret_key_pem_roundtrip(secret_key: SecretKey) {\n    let pem_encoded = secret_key.to_pem().unwrap();\n    let decoded = SecretKey::from_pem(pem_encoded.as_bytes()).unwrap();\n    assert_secret_keys_equal(&secret_key, &decoded);\n    assert_eq!(secret_key.tag(), decoded.tag());\n\n    // Check PEM-encoded can be decoded by openssl.\n    let _ = OpenSSLSecretKey::private_key_from_pem(pem_encoded.as_bytes()).unwrap();\n\n    // Ensure malformed encoded version fails to decode.\n    SecretKey::from_pem(&pem_encoded[1..]).unwrap_err();\n}\n\nfn known_secret_key_to_pem(expected_key: &SecretKey, known_key_pem: &str, expected_tag: u8) {\n    let decoded = SecretKey::from_pem(known_key_pem.as_bytes()).unwrap();\n    assert_secret_keys_equal(expected_key, &decoded);\n    assert_eq!(expected_tag, decoded.tag());\n}\n\n#[cfg(any(feature = \"std-fs-io\", test))]\nfn secret_key_file_roundtrip(secret_key: SecretKey) {\n    let tempdir = tempfile::tempdir().unwrap();\n    let path = tempdir.path().join(\"test_secret_key.pem\");\n\n    secret_key.to_file(&path).unwrap();\n    let decoded = SecretKey::from_file(&path).unwrap();\n    assert_secret_keys_equal(&secret_key, &decoded);\n    assert_eq!(secret_key.tag(), decoded.tag());\n}\n\nfn public_key_serialization_roundtrip(public_key: PublicKey) {\n    // Try to/from bincode.\n    let serialized = bincode::serialize(&public_key).unwrap();\n    let deserialized = bincode::deserialize(&serialized).unwrap();\n    assert_eq!(public_key, deserialized);\n    assert_eq!(public_key.tag(), deserialized.tag());\n\n    // Try to/from JSON.\n    let serialized = serde_json::to_vec_pretty(&public_key).unwrap();\n    let deserialized = serde_json::from_slice(&serialized).unwrap();\n    assert_eq!(public_key, deserialized);\n    assert_eq!(public_key.tag(), deserialized.tag());\n\n    // Using bytesrepr.\n    bytesrepr::test_serialization_roundtrip(&public_key);\n}\n\nfn public_key_der_roundtrip(public_key: PublicKey) {\n    let der_encoded = public_key.to_der().unwrap();\n    let decoded = PublicKey::from_der(&der_encoded).unwrap();\n    assert_eq!(public_key, decoded);\n\n    // Check DER-encoded can be decoded by openssl.\n    let _ = OpenSSLPublicKey::public_key_from_der(&der_encoded).unwrap();\n\n    // Ensure malformed encoded version fails to decode.\n    PublicKey::from_der(&der_encoded[1..]).unwrap_err();\n}\n\nfn public_key_pem_roundtrip(public_key: PublicKey) {\n    let pem_encoded = public_key.to_pem().unwrap();\n    let decoded = PublicKey::from_pem(pem_encoded.as_bytes()).unwrap();\n    assert_eq!(public_key, decoded);\n    assert_eq!(public_key.tag(), decoded.tag());\n\n    // Check PEM-encoded can be decoded by openssl.\n    let _ = OpenSSLPublicKey::public_key_from_pem(pem_encoded.as_bytes()).unwrap();\n\n    // Ensure malformed encoded version fails to decode.\n    PublicKey::from_pem(&pem_encoded[1..]).unwrap_err();\n}\n\nfn known_public_key_to_pem(known_key_hex: &str, known_key_pem: &str) {\n    let key_bytes = checksummed_hex::decode(known_key_hex).unwrap();\n    let decoded = PublicKey::from_pem(known_key_pem.as_bytes()).unwrap();\n    assert_eq!(key_bytes, Into::<Vec<u8>>::into(decoded));\n}\n\n#[cfg(any(feature = \"std-fs-io\", test))]\nfn public_key_file_roundtrip(public_key: PublicKey) {\n    let tempdir = tempfile::tempdir().unwrap();\n    let path = tempdir.path().join(\"test_public_key.pem\");\n\n    public_key.to_file(&path).unwrap();\n    let decoded = PublicKey::from_file(&path).unwrap();\n    assert_eq!(public_key, decoded);\n}\n\nfn public_key_hex_roundtrip(public_key: PublicKey) {\n    let hex_encoded = public_key.to_hex();\n    let decoded = PublicKey::from_hex(&hex_encoded).unwrap();\n    assert_eq!(public_key, decoded);\n    assert_eq!(public_key.tag(), decoded.tag());\n\n    // Ensure malformed encoded version fails to decode.\n    PublicKey::from_hex(&hex_encoded[..1]).unwrap_err();\n    PublicKey::from_hex(&hex_encoded[1..]).unwrap_err();\n}\n\nfn signature_serialization_roundtrip(signature: Signature) {\n    // Try to/from bincode.\n    let serialized = bincode::serialize(&signature).unwrap();\n    let deserialized: Signature = bincode::deserialize(&serialized).unwrap();\n    assert_eq!(signature, deserialized);\n    assert_eq!(signature.tag(), deserialized.tag());\n\n    // Try to/from JSON.\n    let serialized = serde_json::to_vec_pretty(&signature).unwrap();\n    let deserialized = serde_json::from_slice(&serialized).unwrap();\n    assert_eq!(signature, deserialized);\n    assert_eq!(signature.tag(), deserialized.tag());\n\n    // Try to/from using bytesrepr.\n    let serialized = bytesrepr::serialize(signature).unwrap();\n    let deserialized = bytesrepr::deserialize(serialized).unwrap();\n    assert_eq!(signature, deserialized);\n    assert_eq!(signature.tag(), deserialized.tag())\n}\n\nfn signature_hex_roundtrip(signature: Signature) {\n    let hex_encoded = signature.to_hex();\n    let decoded = Signature::from_hex(hex_encoded.as_bytes()).unwrap();\n    assert_eq!(signature, decoded);\n    assert_eq!(signature.tag(), decoded.tag());\n\n    // Ensure malformed encoded version fails to decode.\n    Signature::from_hex(&hex_encoded[..1]).unwrap_err();\n    Signature::from_hex(&hex_encoded[1..]).unwrap_err();\n}\n\nfn hash<T: Hash>(data: &T) -> u64 {\n    let mut hasher = DefaultHasher::new();\n    data.hash(&mut hasher);\n    hasher.finish()\n}\n\nfn check_ord_and_hash<T: Clone + Ord + PartialOrd + Hash>(low: T, high: T) {\n    let low_copy = low.clone();\n\n    assert_eq!(hash(&low), hash(&low_copy));\n    assert_ne!(hash(&low), hash(&high));\n\n    assert_eq!(Ordering::Less, low.cmp(&high));\n    assert_eq!(Some(Ordering::Less), low.partial_cmp(&high));\n\n    assert_eq!(Ordering::Greater, high.cmp(&low));\n    assert_eq!(Some(Ordering::Greater), high.partial_cmp(&low));\n\n    assert_eq!(Ordering::Equal, low.cmp(&low_copy));\n    assert_eq!(Some(Ordering::Equal), low.partial_cmp(&low_copy));\n}\n\nmod system {\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    use std::path::Path;\n\n    use super::{sign, verify};\n    use crate::crypto::{AsymmetricType, PublicKey, SecretKey, Signature};\n\n    #[test]\n    fn secret_key_to_der_should_error() {\n        assert!(SecretKey::system().to_der().is_err());\n    }\n\n    #[test]\n    fn secret_key_to_pem_should_error() {\n        assert!(SecretKey::system().to_pem().is_err());\n    }\n\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[test]\n    fn secret_key_to_file_should_error() {\n        assert!(SecretKey::system().to_file(Path::new(\"/dev/null\")).is_err());\n    }\n\n    #[test]\n    fn public_key_serialization_roundtrip() {\n        super::public_key_serialization_roundtrip(PublicKey::system());\n    }\n\n    #[test]\n    fn public_key_to_der_should_error() {\n        assert!(PublicKey::system().to_der().is_err());\n    }\n\n    #[test]\n    fn public_key_to_pem_should_error() {\n        assert!(PublicKey::system().to_pem().is_err());\n    }\n\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[test]\n    fn public_key_to_file_should_error() {\n        assert!(PublicKey::system().to_file(Path::new(\"/dev/null\")).is_err());\n    }\n\n    #[test]\n    fn public_key_to_and_from_hex() {\n        super::public_key_hex_roundtrip(PublicKey::system());\n    }\n\n    #[test]\n    #[should_panic]\n    fn sign_should_panic() {\n        sign([], &SecretKey::system(), &PublicKey::system());\n    }\n\n    #[test]\n    fn signature_to_and_from_hex() {\n        super::signature_hex_roundtrip(Signature::system());\n    }\n\n    #[test]\n    fn public_key_to_account_hash() {\n        assert_ne!(\n            PublicKey::system().to_account_hash().as_ref(),\n            Into::<Vec<u8>>::into(PublicKey::system())\n        );\n    }\n\n    #[test]\n    fn verify_should_error() {\n        assert!(verify([], &Signature::system(), &PublicKey::system()).is_err());\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_signature() {\n        crate::bytesrepr::test_serialization_roundtrip(&Signature::system());\n    }\n}\n\nmod ed25519 {\n    use rand::Rng;\n\n    use super::*;\n    use crate::ED25519_TAG;\n\n    const SECRET_KEY_LENGTH: usize = SecretKey::ED25519_LENGTH;\n    const PUBLIC_KEY_LENGTH: usize = PublicKey::ED25519_LENGTH;\n    const SIGNATURE_LENGTH: usize = Signature::ED25519_LENGTH;\n\n    #[test]\n    fn secret_key_from_bytes() {\n        // Secret key should be `SecretKey::ED25519_LENGTH` bytes.\n        let bytes = [0; SECRET_KEY_LENGTH + 1];\n        assert!(SecretKey::ed25519_from_bytes(&bytes[..]).is_err());\n        assert!(SecretKey::ed25519_from_bytes(&bytes[2..]).is_err());\n\n        // Check the same bytes but of the right length succeeds.\n        assert!(SecretKey::ed25519_from_bytes(&bytes[1..]).is_ok());\n    }\n\n    #[test]\n    fn secret_key_to_and_from_der() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_ed25519(&mut rng);\n        let der_encoded = secret_key.to_der().unwrap();\n        secret_key_der_roundtrip(secret_key);\n\n        // Check DER-encoded can be decoded by openssl.\n        let _ = OpenSSLSecretKey::private_key_from_der(&der_encoded).unwrap();\n    }\n\n    #[test]\n    fn secret_key_to_and_from_pem() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_ed25519(&mut rng);\n        secret_key_pem_roundtrip(secret_key);\n    }\n\n    #[test]\n    fn known_secret_key_to_pem() {\n        // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.3\n        const KNOWN_KEY_PEM: &str = r#\"-----BEGIN PRIVATE KEY-----\nMC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC\n-----END PRIVATE KEY-----\"#;\n        let key_bytes =\n            base16::decode(\"d4ee72dbf913584ad5b6d8f1f769f8ad3afe7c28cbf1d4fbe097a88f44755842\")\n                .unwrap();\n        let expected_key = SecretKey::ed25519_from_bytes(key_bytes).unwrap();\n        super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, ED25519_TAG);\n    }\n\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[test]\n    fn secret_key_to_and_from_file() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_ed25519(&mut rng);\n        secret_key_file_roundtrip(secret_key);\n    }\n\n    #[test]\n    fn public_key_serialization_roundtrip() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_ed25519(&mut rng);\n        super::public_key_serialization_roundtrip(public_key);\n    }\n\n    #[test]\n    fn public_key_from_bytes() {\n        // Public key should be `PublicKey::ED25519_LENGTH` bytes.  Create vec with an extra\n        // byte.\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_ed25519(&mut rng);\n        let bytes: Vec<u8> = iter::once(rng.gen())\n            .chain(Into::<Vec<u8>>::into(public_key))\n            .collect::<Vec<u8>>();\n\n        assert!(PublicKey::ed25519_from_bytes(&bytes[..]).is_err());\n        assert!(PublicKey::ed25519_from_bytes(&bytes[2..]).is_err());\n\n        // Check the same bytes but of the right length succeeds.\n        assert!(PublicKey::ed25519_from_bytes(&bytes[1..]).is_ok());\n    }\n\n    #[test]\n    fn public_key_to_and_from_der() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_ed25519(&mut rng);\n        public_key_der_roundtrip(public_key);\n    }\n\n    #[test]\n    fn public_key_to_and_from_pem() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_ed25519(&mut rng);\n        public_key_pem_roundtrip(public_key);\n    }\n\n    #[test]\n    fn known_public_key_to_pem() {\n        // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.1\n        const KNOWN_KEY_HEX: &str =\n            \"19bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1\";\n        const KNOWN_KEY_PEM: &str = r#\"-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAGb9ECWmEzf6FQbrBZ9w7lshQhqowtrbLDFw4rXAxZuE=\n-----END PUBLIC KEY-----\"#;\n        super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM);\n    }\n\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[test]\n    fn public_key_to_and_from_file() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_ed25519(&mut rng);\n        public_key_file_roundtrip(public_key);\n    }\n\n    #[test]\n    fn public_key_to_and_from_hex() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_ed25519(&mut rng);\n        public_key_hex_roundtrip(public_key);\n    }\n\n    #[test]\n    fn signature_serialization_roundtrip() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_ed25519(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        let data = b\"data\";\n        let signature = sign(data, &secret_key, &public_key);\n        super::signature_serialization_roundtrip(signature);\n    }\n\n    #[test]\n    fn signature_from_bytes() {\n        // Signature should be `Signature::ED25519_LENGTH` bytes.\n        let bytes = [2; SIGNATURE_LENGTH + 1];\n        assert!(Signature::ed25519_from_bytes(&bytes[..]).is_err());\n        assert!(Signature::ed25519_from_bytes(&bytes[2..]).is_err());\n\n        // Check the same bytes but of the right length succeeds.\n        assert!(Signature::ed25519_from_bytes(&bytes[1..]).is_ok());\n    }\n\n    #[test]\n    fn signature_key_to_and_from_hex() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_ed25519(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        let data = b\"data\";\n        let signature = sign(data, &secret_key, &public_key);\n        signature_hex_roundtrip(signature);\n    }\n\n    #[test]\n    fn public_key_traits() {\n        let public_key_low = PublicKey::ed25519_from_bytes([1; PUBLIC_KEY_LENGTH]).unwrap();\n        let public_key_high = PublicKey::ed25519_from_bytes([3; PUBLIC_KEY_LENGTH]).unwrap();\n        check_ord_and_hash(public_key_low, public_key_high)\n    }\n\n    #[test]\n    fn public_key_to_account_hash() {\n        let public_key_high = PublicKey::ed25519_from_bytes([255; PUBLIC_KEY_LENGTH]).unwrap();\n        assert_ne!(\n            public_key_high.to_account_hash().as_ref(),\n            Into::<Vec<u8>>::into(public_key_high)\n        );\n    }\n\n    #[test]\n    fn signature_traits() {\n        let signature_low = Signature::ed25519([1; SIGNATURE_LENGTH]).unwrap();\n        let signature_high = Signature::ed25519([3; SIGNATURE_LENGTH]).unwrap();\n        check_ord_and_hash(signature_low, signature_high)\n    }\n\n    #[test]\n    fn sign_and_verify() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_ed25519(&mut rng);\n\n        let public_key = PublicKey::from(&secret_key);\n        let other_public_key = PublicKey::random_ed25519(&mut rng);\n        let wrong_type_public_key = PublicKey::random_secp256k1(&mut rng);\n\n        let message = b\"message\";\n        let signature = sign(message, &secret_key, &public_key);\n\n        assert!(verify(message, &signature, &public_key).is_ok());\n        assert!(verify(message, &signature, &other_public_key).is_err());\n        assert!(verify(message, &signature, &wrong_type_public_key).is_err());\n        assert!(verify(&message[1..], &signature, &public_key).is_err());\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_signature() {\n        let mut rng = TestRng::new();\n        let ed25519_secret_key = SecretKey::random_ed25519(&mut rng);\n        let public_key = PublicKey::from(&ed25519_secret_key);\n        let data = b\"data\";\n        let signature = sign(data, &ed25519_secret_key, &public_key);\n        bytesrepr::test_serialization_roundtrip(&signature);\n    }\n\n    #[test]\n    fn validate_known_signature() {\n        // In the event that this test fails, we need to consider pinning the version of the\n        // `ed25519-dalek` crate to maintain backwards compatibility with existing data on the\n        // Casper network.\n\n        // Values taken from:\n        // https://github.com/dalek-cryptography/ed25519-dalek/blob/925eb9ea56192053c9eb93b9d30d1b9419eee128/TESTVECTORS#L62\n        let secret_key_hex = \"bf5ba5d6a49dd5ef7b4d5d7d3e4ecc505c01f6ccee4c54b5ef7b40af6a454140\";\n        let public_key_hex = \"1be034f813017b900d8990af45fad5b5214b573bd303ef7a75ef4b8c5c5b9842\";\n        let message_hex =\n            \"16152c2e037b1c0d3219ced8e0674aee6b57834b55106c5344625322da638ecea2fc9a424a05ee9512\\\n                d48fcf75dd8bd4691b3c10c28ec98ee1afa5b863d1c36795ed18105db3a9aabd9d2b4c1747adbaf1a56\\\n                ffcc0c533c1c0faef331cdb79d961fa39f880a1b8b1164741822efb15a7259a465bef212855751fab66\\\n                a897bfa211abe0ea2f2e1cd8a11d80e142cde1263eec267a3138ae1fcf4099db0ab53d64f336f4bcd7a\\\n                363f6db112c0a2453051a0006f813aaf4ae948a2090619374fa58052409c28ef76225687df3cb2d1b0b\\\n                fb43b09f47f1232f790e6d8dea759e57942099f4c4bd3390f28afc2098244961465c643fc8b29766af2\\\n                bcbc5440b86e83608cfc937be98bb4827fd5e6b689adc2e26513db531076a6564396255a09975b7034d\\\n                ac06461b255642e3a7ed75fa9fc265011f5f6250382a84ac268d63ba64\";\n        let signature_hex =\n            \"279cace6fdaf3945e3837df474b28646143747632bede93e7a66f5ca291d2c24978512ca0cb8827c8c\\\n                322685bd605503a5ec94dbae61bbdcae1e49650602bc07\";\n\n        let secret_key_bytes = base16::decode(secret_key_hex).unwrap();\n        let public_key_bytes = base16::decode(public_key_hex).unwrap();\n        let message_bytes = base16::decode(message_hex).unwrap();\n        let signature_bytes = base16::decode(signature_hex).unwrap();\n\n        let secret_key = SecretKey::ed25519_from_bytes(secret_key_bytes).unwrap();\n        let public_key = PublicKey::ed25519_from_bytes(public_key_bytes).unwrap();\n        assert_eq!(public_key, PublicKey::from(&secret_key));\n\n        let signature = Signature::ed25519_from_bytes(signature_bytes).unwrap();\n        assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature);\n        assert!(verify(&message_bytes, &signature, &public_key).is_ok());\n    }\n}\n\nmod secp256k1 {\n    use rand::Rng;\n\n    use super::*;\n    use crate::SECP256K1_TAG;\n\n    const SECRET_KEY_LENGTH: usize = SecretKey::SECP256K1_LENGTH;\n    const SIGNATURE_LENGTH: usize = Signature::SECP256K1_LENGTH;\n\n    #[test]\n    fn secret_key_from_bytes() {\n        // Secret key should be `SecretKey::SECP256K1_LENGTH` bytes.\n        // The k256 library will ensure that a byte stream of a length not equal to\n        // `SECP256K1_LENGTH` will fail due to an assertion internal to the library.\n        // We can check that invalid byte streams e.g [0;32] does not generate a valid key.\n        let bytes = [0; SECRET_KEY_LENGTH];\n        assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_err());\n\n        // Check that a valid byte stream produces a valid key\n        let bytes = [1; SECRET_KEY_LENGTH];\n        assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_ok());\n    }\n\n    #[test]\n    fn secret_key_to_and_from_der() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_secp256k1(&mut rng);\n        secret_key_der_roundtrip(secret_key);\n    }\n\n    #[test]\n    fn secret_key_to_and_from_pem() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_secp256k1(&mut rng);\n        secret_key_pem_roundtrip(secret_key);\n    }\n\n    #[test]\n    fn known_secret_key_to_pem() {\n        // Example values taken from Python client.\n        const KNOWN_KEY_PEM: &str = r#\"-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEIL3fqaMKAfXSK1D2PnVVbZlZ7jTv133nukq4+95s6kmcoAcGBSuBBAAK\noUQDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtdkv+kBR5u4ISEAkuc2TFWQHX0\nYj9oTB9fx9+vvQdxJOhMtu46kGo0Uw==\n-----END EC PRIVATE KEY-----\"#;\n        let key_bytes =\n            base16::decode(\"bddfa9a30a01f5d22b50f63e75556d9959ee34efd77de7ba4ab8fbde6cea499c\")\n                .unwrap();\n        let expected_key = SecretKey::secp256k1_from_bytes(key_bytes).unwrap();\n        super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, SECP256K1_TAG);\n    }\n\n    #[test]\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    fn secret_key_to_and_from_file() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_secp256k1(&mut rng);\n        secret_key_file_roundtrip(secret_key);\n    }\n\n    #[test]\n    fn public_key_serialization_roundtrip() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        super::public_key_serialization_roundtrip(public_key);\n    }\n\n    #[test]\n    fn public_key_from_bytes() {\n        // Public key should be `PublicKey::SECP256K1_LENGTH` bytes.  Create vec with an extra\n        // byte.\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        let bytes: Vec<u8> = iter::once(rng.gen())\n            .chain(Into::<Vec<u8>>::into(public_key))\n            .collect::<Vec<u8>>();\n\n        assert!(PublicKey::secp256k1_from_bytes(&bytes[..]).is_err());\n        assert!(PublicKey::secp256k1_from_bytes(&bytes[2..]).is_err());\n\n        // Check the same bytes but of the right length succeeds.\n        assert!(PublicKey::secp256k1_from_bytes(&bytes[1..]).is_ok());\n    }\n\n    #[test]\n    fn public_key_to_and_from_der() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        public_key_der_roundtrip(public_key);\n    }\n\n    #[test]\n    fn public_key_to_and_from_pem() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        public_key_pem_roundtrip(public_key);\n    }\n\n    #[test]\n    fn known_public_key_to_pem() {\n        // Example values taken from Python client.\n        const KNOWN_KEY_HEX: &str =\n            \"03408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084\";\n        const KNOWN_KEY_PEM: &str = r#\"-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtd\nkv+kBR5u4ISEAkuc2TFWQHX0Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw==\n-----END PUBLIC KEY-----\"#;\n        super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM);\n    }\n\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[test]\n    fn public_key_to_and_from_file() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        public_key_file_roundtrip(public_key);\n    }\n\n    #[test]\n    fn public_key_to_and_from_hex() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        public_key_hex_roundtrip(public_key);\n    }\n\n    #[test]\n    fn signature_serialization_roundtrip() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_secp256k1(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        let data = b\"data\";\n        let signature = sign(data, &secret_key, &public_key);\n        super::signature_serialization_roundtrip(signature);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip_signature() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_secp256k1(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        let data = b\"data\";\n        let signature = sign(data, &secret_key, &public_key);\n        bytesrepr::test_serialization_roundtrip(&signature);\n    }\n\n    #[test]\n    fn signature_from_bytes() {\n        // Signature should be `Signature::SECP256K1_LENGTH` bytes.\n        let bytes = [2; SIGNATURE_LENGTH + 1];\n        assert!(Signature::secp256k1_from_bytes(&bytes[..]).is_err());\n        assert!(Signature::secp256k1_from_bytes(&bytes[2..]).is_err());\n\n        // Check the same bytes but of the right length succeeds.\n        assert!(Signature::secp256k1_from_bytes(&bytes[1..]).is_ok());\n    }\n\n    #[test]\n    fn signature_key_to_and_from_hex() {\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random_secp256k1(&mut rng);\n        let public_key = PublicKey::from(&secret_key);\n        let data = b\"data\";\n        let signature = sign(data, &secret_key, &public_key);\n        signature_hex_roundtrip(signature);\n    }\n\n    #[test]\n    fn public_key_traits() {\n        let mut rng = TestRng::new();\n        let public_key1 = PublicKey::random_secp256k1(&mut rng);\n        let public_key2 = PublicKey::random_secp256k1(&mut rng);\n        if Into::<Vec<u8>>::into(public_key1.clone()) < Into::<Vec<u8>>::into(public_key2.clone()) {\n            check_ord_and_hash(public_key1, public_key2)\n        } else {\n            check_ord_and_hash(public_key2, public_key1)\n        }\n    }\n\n    #[test]\n    fn public_key_to_account_hash() {\n        let mut rng = TestRng::new();\n        let public_key = PublicKey::random_secp256k1(&mut rng);\n        assert_ne!(\n            public_key.to_account_hash().as_ref(),\n            Into::<Vec<u8>>::into(public_key)\n        );\n    }\n\n    #[test]\n    fn signature_traits() {\n        let signature_low = Signature::secp256k1([1; SIGNATURE_LENGTH]).unwrap();\n        let signature_high = Signature::secp256k1([3; SIGNATURE_LENGTH]).unwrap();\n        check_ord_and_hash(signature_low, signature_high)\n    }\n\n    #[test]\n    fn validate_known_signature() {\n        // In the event that this test fails, we need to consider pinning the version of the\n        // `k256` crate to maintain backwards compatibility with existing data on the Casper\n        // network.\n        let secret_key_hex = \"833fe62409237b9d62ec77587520911e9a759cec1d19755b7da901b96dca3d42\";\n        let public_key_hex = \"028e24fd9654f12c793d3d376c15f7abe53e0fbd537884a3a98d10d2dc6d513b4e\";\n        let message_hex = \"616263\";\n        let signature_hex = \"8016162860f0795154643d15c5ab5bb840d8c695d6de027421755579ea7f2a4629b7e0c88fc3428669a6a89496f426181b73f10c6c8a05ac8f49d6cb5032eb89\";\n\n        let secret_key_bytes = base16::decode(secret_key_hex).unwrap();\n        let public_key_bytes = base16::decode(public_key_hex).unwrap();\n        let message_bytes = base16::decode(message_hex).unwrap();\n        let signature_bytes = base16::decode(signature_hex).unwrap();\n\n        let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap();\n        let public_key = PublicKey::secp256k1_from_bytes(public_key_bytes).unwrap();\n        assert_eq!(public_key, PublicKey::from(&secret_key));\n\n        let signature = Signature::secp256k1_from_bytes(signature_bytes).unwrap();\n        assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature);\n        assert!(verify(&message_bytes, &signature, &public_key).is_ok());\n    }\n}\n\n#[test]\nfn public_key_traits() {\n    let system_key = PublicKey::system();\n    let mut rng = TestRng::new();\n    let ed25519_public_key = PublicKey::random_ed25519(&mut rng);\n    let secp256k1_public_key = PublicKey::random_secp256k1(&mut rng);\n    check_ord_and_hash(ed25519_public_key.clone(), secp256k1_public_key.clone());\n    check_ord_and_hash(system_key.clone(), ed25519_public_key);\n    check_ord_and_hash(system_key, secp256k1_public_key);\n}\n\n#[test]\nfn signature_traits() {\n    let system_sig = Signature::system();\n    let ed25519_sig = Signature::ed25519([3; Signature::ED25519_LENGTH]).unwrap();\n    let secp256k1_sig = Signature::secp256k1([1; Signature::SECP256K1_LENGTH]).unwrap();\n    check_ord_and_hash(ed25519_sig, secp256k1_sig);\n    check_ord_and_hash(system_sig, ed25519_sig);\n    check_ord_and_hash(system_sig, secp256k1_sig);\n}\n\n#[test]\nfn sign_and_verify() {\n    let mut rng = TestRng::new();\n    let ed25519_secret_key = SecretKey::random_ed25519(&mut rng);\n    let secp256k1_secret_key = SecretKey::random_secp256k1(&mut rng);\n\n    let ed25519_public_key = PublicKey::from(&ed25519_secret_key);\n    let secp256k1_public_key = PublicKey::from(&secp256k1_secret_key);\n\n    let other_ed25519_public_key = PublicKey::random_ed25519(&mut rng);\n    let other_secp256k1_public_key = PublicKey::random_secp256k1(&mut rng);\n\n    let message = b\"message\";\n    let ed25519_signature = sign(message, &ed25519_secret_key, &ed25519_public_key);\n    let secp256k1_signature = sign(message, &secp256k1_secret_key, &secp256k1_public_key);\n\n    assert!(verify(message, &ed25519_signature, &ed25519_public_key).is_ok());\n    assert!(verify(message, &secp256k1_signature, &secp256k1_public_key).is_ok());\n\n    assert!(verify(message, &ed25519_signature, &other_ed25519_public_key).is_err());\n    assert!(verify(message, &secp256k1_signature, &other_secp256k1_public_key).is_err());\n\n    assert!(verify(message, &ed25519_signature, &secp256k1_public_key).is_err());\n    assert!(verify(message, &secp256k1_signature, &ed25519_public_key).is_err());\n\n    assert!(verify(&message[1..], &ed25519_signature, &ed25519_public_key).is_err());\n    assert!(verify(&message[1..], &secp256k1_signature, &secp256k1_public_key).is_err());\n}\n\n#[test]\nfn should_construct_secp256k1_from_uncompressed_bytes() {\n    let mut rng = TestRng::new();\n\n    let mut secret_key_bytes = [0u8; SecretKey::SECP256K1_LENGTH];\n    rng.fill_bytes(&mut secret_key_bytes[..]);\n\n    // Construct a secp256k1 secret key and use that to construct a public key.\n    let secp256k1_secret_key = k256::SecretKey::from_slice(&secret_key_bytes).unwrap();\n    let secp256k1_public_key = secp256k1_secret_key.public_key();\n\n    // Construct a CL secret key and public key from that (which will be a compressed key).\n    let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap();\n    let public_key = PublicKey::from(&secret_key);\n    assert_eq!(\n        Into::<Vec<u8>>::into(public_key.clone()).len(),\n        PublicKey::SECP256K1_LENGTH\n    );\n    assert_ne!(\n        secp256k1_public_key\n            .to_encoded_point(false)\n            .as_bytes()\n            .len(),\n        PublicKey::SECP256K1_LENGTH\n    );\n\n    // Construct a CL public key from uncompressed public key bytes and ensure it's compressed.\n    let from_uncompressed_bytes =\n        PublicKey::secp256k1_from_bytes(secp256k1_public_key.to_encoded_point(false).as_bytes())\n            .unwrap();\n    assert_eq!(public_key, from_uncompressed_bytes);\n\n    // Construct a CL public key from the uncompressed one's hex representation and ensure it's\n    // compressed.\n    let uncompressed_hex = {\n        let tag_bytes = vec![0x02u8];\n        base16::encode_lower(&tag_bytes)\n            + &base16::encode_lower(&secp256k1_public_key.to_encoded_point(false).as_bytes())\n    };\n\n    let from_uncompressed_hex = PublicKey::from_hex(uncompressed_hex).unwrap();\n    assert_eq!(public_key, from_uncompressed_hex);\n}\n\n#[test]\nfn generate_ed25519_should_generate_an_ed25519_key() {\n    let secret_key = SecretKey::generate_ed25519().unwrap();\n    assert!(matches!(secret_key, SecretKey::Ed25519(_)))\n}\n\n#[test]\nfn generate_secp256k1_should_generate_an_secp256k1_key() {\n    let secret_key = SecretKey::generate_secp256k1().unwrap();\n    assert!(matches!(secret_key, SecretKey::Secp256k1(_)))\n}\n"
  },
  {
    "path": "types/src/crypto/asymmetric_key.rs",
    "content": "//! Asymmetric key types and methods on them\n\nuse alloc::{\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::{\n    cmp::Ordering,\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n    hash::{Hash, Hasher},\n    iter,\n    marker::Copy,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::distributions::{Distribution, Standard};\n#[cfg(any(feature = \"std-fs-io\", test))]\nuse std::path::Path;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"std\", test))]\nuse derp::{Der, Tag};\nuse ed25519_dalek::{\n    Signature as Ed25519Signature, SigningKey as Ed25519SecretKey,\n    VerifyingKey as Ed25519PublicKey, PUBLIC_KEY_LENGTH as ED25519_PUBLIC_KEY_LENGTH,\n    SECRET_KEY_LENGTH as ED25519_SECRET_KEY_LENGTH, SIGNATURE_LENGTH as ED25519_SIGNATURE_LENGTH,\n};\nuse hex_fmt::HexFmt;\nuse k256::ecdsa::{\n    signature::{Signer, Verifier},\n    RecoveryId, Signature as Secp256k1Signature, SigningKey as Secp256k1SecretKey, VerifyingKey,\n    VerifyingKey as Secp256k1PublicKey,\n};\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"std\", test))]\nuse pem::Pem;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{Rng, RngCore};\n#[cfg(feature = \"json-schema\")]\nuse schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};\nuse serde::{Deserialize, Deserializer, Serialize, Serializer};\n#[cfg(feature = \"json-schema\")]\nuse serde_json::json;\n#[cfg(any(feature = \"std\", test))]\nuse untrusted::Input;\n\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse crate::crypto::ErrorExt;\n#[cfg(any(feature = \"std-fs-io\", test))]\nuse crate::file_utils::{read_file, write_file, write_private_file};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    account::AccountHash,\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex,\n    crypto::Error,\n    CLType, CLTyped, Tagged,\n};\n\n#[cfg(any(feature = \"testing\", test))]\npub mod gens;\n#[cfg(test)]\nmod tests;\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\n\n/// Tag for system variant.\npub const SYSTEM_TAG: u8 = 0;\nconst SYSTEM: &str = \"System\";\n\n/// Tag for ed25519 variant.\npub const ED25519_TAG: u8 = 1;\nconst ED25519: &str = \"Ed25519\";\n\n/// Tag for secp256k1 variant.\npub const SECP256K1_TAG: u8 = 2;\nconst SECP256K1: &str = \"Secp256k1\";\n\nconst SECP256K1_SECRET_KEY_LENGTH: usize = 32;\nconst SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH: usize = 33;\nconst SECP256K1_SIGNATURE_LENGTH: usize = 64;\n\n/// Public key for system account.\npub const SYSTEM_ACCOUNT: PublicKey = PublicKey::System;\n\n// See https://www.secg.org/sec1-v2.pdf#subsection.C.4\n#[cfg(any(feature = \"std\", test))]\nconst EC_PUBLIC_KEY_OBJECT_IDENTIFIER: [u8; 7] = [42, 134, 72, 206, 61, 2, 1];\n\n// See https://tools.ietf.org/html/rfc8410#section-10.3\n#[cfg(any(feature = \"std\", test))]\nconst ED25519_OBJECT_IDENTIFIER: [u8; 3] = [43, 101, 112];\n#[cfg(any(feature = \"std\", test))]\nconst ED25519_PEM_SECRET_KEY_TAG: &str = \"PRIVATE KEY\";\n#[cfg(any(feature = \"std\", test))]\nconst ED25519_PEM_PUBLIC_KEY_TAG: &str = \"PUBLIC KEY\";\n\n// Ref?\n#[cfg(any(feature = \"std\", test))]\nconst SECP256K1_OBJECT_IDENTIFIER: [u8; 5] = [43, 129, 4, 0, 10];\n#[cfg(any(feature = \"std\", test))]\nconst SECP256K1_PEM_SECRET_KEY_TAG: &str = \"EC PRIVATE KEY\";\n#[cfg(any(feature = \"std\", test))]\nconst SECP256K1_PEM_PUBLIC_KEY_TAG: &str = \"PUBLIC KEY\";\n\n#[cfg(feature = \"json-schema\")]\nstatic ED25519_SECRET_KEY: Lazy<SecretKey> = Lazy::new(|| {\n    let bytes = [15u8; SecretKey::ED25519_LENGTH];\n    SecretKey::ed25519_from_bytes(bytes).unwrap()\n});\n\n#[cfg(feature = \"json-schema\")]\nstatic ED25519_PUBLIC_KEY: Lazy<PublicKey> = Lazy::new(|| {\n    let bytes = [15u8; SecretKey::ED25519_LENGTH];\n    let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap();\n    PublicKey::from(&secret_key)\n});\n\n/// Operations on asymmetric cryptographic type.\npub trait AsymmetricType<'a>\nwhere\n    Self: 'a + Sized + Tagged<u8>,\n    Vec<u8>: From<&'a Self>,\n{\n    /// Converts `self` to hex, where the first byte represents the algorithm tag.\n    fn to_hex(&'a self) -> String {\n        let bytes = iter::once(self.tag())\n            .chain(Vec::<u8>::from(self))\n            .collect::<Vec<u8>>();\n        base16::encode_lower(&bytes)\n    }\n\n    /// Tries to decode `Self` from its hex-representation.  The hex format should be as produced\n    /// by `AsymmetricType::to_hex()`.\n    fn from_hex<A: AsRef<[u8]>>(input: A) -> Result<Self, Error> {\n        if input.as_ref().len() < 2 {\n            return Err(Error::AsymmetricKey(\n                \"failed to decode from hex: too short\".to_string(),\n            ));\n        }\n\n        let (tag_hex, key_hex) = input.as_ref().split_at(2);\n\n        let tag = checksummed_hex::decode(tag_hex)?;\n        let key_bytes = checksummed_hex::decode(key_hex)?;\n\n        match tag[0] {\n            SYSTEM_TAG => {\n                if key_bytes.is_empty() {\n                    Ok(Self::system())\n                } else {\n                    Err(Error::AsymmetricKey(\n                        \"failed to decode from hex: invalid system variant\".to_string(),\n                    ))\n                }\n            }\n            ED25519_TAG => Self::ed25519_from_bytes(&key_bytes),\n            SECP256K1_TAG => Self::secp256k1_from_bytes(&key_bytes),\n            _ => Err(Error::AsymmetricKey(format!(\n                \"failed to decode from hex: invalid tag.  Expected {}, {} or {}, got {}\",\n                SYSTEM_TAG, ED25519_TAG, SECP256K1_TAG, tag[0]\n            ))),\n        }\n    }\n\n    /// Constructs a new system variant.\n    fn system() -> Self;\n\n    /// Constructs a new ed25519 variant from a byte slice.\n    fn ed25519_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error>;\n\n    /// Constructs a new secp256k1 variant from a byte slice.\n    fn secp256k1_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error>;\n}\n\n/// A secret or private asymmetric key.\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\npub enum SecretKey {\n    /// System secret key.\n    System,\n    /// Ed25519 secret key.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    // Manually verified to have no data on the heap.\n    Ed25519(Ed25519SecretKey),\n    /// secp256k1 secret key.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Secp256k1(Secp256k1SecretKey),\n}\n\nimpl SecretKey {\n    /// The length in bytes of a system secret key.\n    pub const SYSTEM_LENGTH: usize = 0;\n\n    /// The length in bytes of an Ed25519 secret key.\n    pub const ED25519_LENGTH: usize = ED25519_SECRET_KEY_LENGTH;\n\n    /// The length in bytes of a secp256k1 secret key.\n    pub const SECP256K1_LENGTH: usize = SECP256K1_SECRET_KEY_LENGTH;\n\n    /// Constructs a new system variant.\n    pub fn system() -> Self {\n        SecretKey::System\n    }\n\n    /// Constructs a new ed25519 variant from a byte slice.\n    pub fn ed25519_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error> {\n        Ok(SecretKey::Ed25519(Ed25519SecretKey::try_from(\n            bytes.as_ref(),\n        )?))\n    }\n\n    /// Constructs a new secp256k1 variant from a byte slice.\n    pub fn secp256k1_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error> {\n        Ok(SecretKey::Secp256k1(Secp256k1SecretKey::from_slice(\n            bytes.as_ref(),\n        )?))\n    }\n\n    /// Generates a new ed25519 variant using the system's secure random number generator.\n    #[cfg(any(feature = \"std\", feature = \"testing\", test))]\n    pub fn generate_ed25519() -> Result<Self, ErrorExt> {\n        let mut bytes = [0u8; Self::ED25519_LENGTH];\n        getrandom::getrandom(&mut bytes[..])?;\n        SecretKey::ed25519_from_bytes(bytes).map_err(Into::into)\n    }\n\n    /// Generates a new secp256k1 variant using the system's secure random number generator.\n    #[cfg(any(feature = \"std\", feature = \"testing\", test))]\n    pub fn generate_secp256k1() -> Result<Self, ErrorExt> {\n        let mut bytes = [0u8; Self::SECP256K1_LENGTH];\n        getrandom::getrandom(&mut bytes[..])?;\n        SecretKey::secp256k1_from_bytes(bytes).map_err(Into::into)\n    }\n\n    /// Attempts to write the key bytes to the configured file path.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    pub fn to_file<P: AsRef<Path>>(&self, file: P) -> Result<(), ErrorExt> {\n        write_private_file(file, self.to_pem()?).map_err(ErrorExt::SecretKeySave)\n    }\n\n    /// Attempts to read the key bytes from configured file path.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    pub fn from_file<P: AsRef<Path>>(file: P) -> Result<Self, ErrorExt> {\n        let data = read_file(file).map_err(ErrorExt::SecretKeyLoad)?;\n        Self::from_pem(data)\n    }\n\n    /// DER encodes a key.\n    #[cfg(any(feature = \"std\", test))]\n    #[allow(deprecated)]\n    pub fn to_der(&self) -> Result<Vec<u8>, ErrorExt> {\n        match self {\n            SecretKey::System => Err(Error::System(String::from(\"to_der\")).into()),\n            SecretKey::Ed25519(secret_key) => {\n                // See https://tools.ietf.org/html/rfc8410#section-10.3\n                let mut key_bytes = vec![];\n                let mut der = Der::new(&mut key_bytes);\n                der.octet_string(&secret_key.to_bytes())?;\n\n                let mut encoded = vec![];\n                der = Der::new(&mut encoded);\n                der.sequence(|der| {\n                    der.integer(&[0])?;\n                    der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?;\n                    der.octet_string(&key_bytes)\n                })?;\n                Ok(encoded)\n            }\n            SecretKey::Secp256k1(secret_key) => {\n                // See https://www.secg.org/sec1-v2.pdf#subsection.C.4\n                let mut oid_bytes = vec![];\n                let mut der = Der::new(&mut oid_bytes);\n                der.oid(&SECP256K1_OBJECT_IDENTIFIER)?;\n\n                let mut encoded = vec![];\n                der = Der::new(&mut encoded);\n                der.sequence(|der| {\n                    der.integer(&[1])?;\n                    der.octet_string(secret_key.to_bytes().as_slice())?;\n                    der.element(Tag::ContextSpecificConstructed0, &oid_bytes)\n                })?;\n                Ok(encoded)\n            }\n        }\n    }\n\n    /// Decodes a key from a DER-encoded slice.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn from_der<T: AsRef<[u8]>>(input: T) -> Result<Self, ErrorExt> {\n        let input = Input::from(input.as_ref());\n\n        let (key_type_tag, raw_bytes) = input.read_all(derp::Error::Read, |input| {\n            derp::nested(input, Tag::Sequence, |input| {\n                // Safe to ignore the first value which should be an integer.\n                let version_slice =\n                    derp::expect_tag_and_get_value(input, Tag::Integer)?.as_slice_less_safe();\n                if version_slice.len() != 1 {\n                    return Err(derp::Error::NonZeroUnusedBits);\n                }\n                let version = version_slice[0];\n\n                // Read the next value.\n                let (tag, value) = derp::read_tag_and_get_value(input)?;\n                if tag == Tag::Sequence as u8 {\n                    // Expecting an Ed25519 key.\n                    if version != 0 {\n                        return Err(derp::Error::WrongValue);\n                    }\n\n                    // The sequence should have one element: an object identifier defining Ed25519.\n                    let object_identifier = value.read_all(derp::Error::Read, |input| {\n                        derp::expect_tag_and_get_value(input, Tag::Oid)\n                    })?;\n                    if object_identifier.as_slice_less_safe() != ED25519_OBJECT_IDENTIFIER {\n                        return Err(derp::Error::WrongValue);\n                    }\n\n                    // The third and final value should be the raw bytes of the secret key as an\n                    // octet string in an octet string.\n                    let raw_bytes = derp::nested(input, Tag::OctetString, |input| {\n                        derp::expect_tag_and_get_value(input, Tag::OctetString)\n                    })?\n                    .as_slice_less_safe();\n\n                    return Ok((ED25519_TAG, raw_bytes));\n                } else if tag == Tag::OctetString as u8 {\n                    // Expecting a secp256k1 key.\n                    if version != 1 {\n                        return Err(derp::Error::WrongValue);\n                    }\n\n                    // The octet string is the secret key.\n                    let raw_bytes = value.as_slice_less_safe();\n\n                    // The object identifier is next.\n                    let parameter0 =\n                        derp::expect_tag_and_get_value(input, Tag::ContextSpecificConstructed0)?;\n                    let object_identifier = parameter0.read_all(derp::Error::Read, |input| {\n                        derp::expect_tag_and_get_value(input, Tag::Oid)\n                    })?;\n                    if object_identifier.as_slice_less_safe() != SECP256K1_OBJECT_IDENTIFIER {\n                        return Err(derp::Error::WrongValue);\n                    }\n\n                    // There might be an optional public key as the final value, but we're not\n                    // interested in parsing that.  Read it to ensure `input.read_all` doesn't fail\n                    // with unused bytes error.\n                    let _ = derp::read_tag_and_get_value(input);\n\n                    return Ok((SECP256K1_TAG, raw_bytes));\n                }\n\n                Err(derp::Error::WrongValue)\n            })\n        })?;\n\n        match key_type_tag {\n            SYSTEM_TAG => Err(Error::AsymmetricKey(\"cannot construct variant\".to_string()).into()),\n            ED25519_TAG => SecretKey::ed25519_from_bytes(raw_bytes).map_err(Into::into),\n            SECP256K1_TAG => SecretKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into),\n            _ => Err(Error::AsymmetricKey(\"unknown type tag\".to_string()).into()),\n        }\n    }\n\n    /// PEM encodes a key.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn to_pem(&self) -> Result<String, ErrorExt> {\n        let tag = match self {\n            SecretKey::System => return Err(Error::System(String::from(\"to_pem\")).into()),\n            SecretKey::Ed25519(_) => ED25519_PEM_SECRET_KEY_TAG.to_string(),\n            SecretKey::Secp256k1(_) => SECP256K1_PEM_SECRET_KEY_TAG.to_string(),\n        };\n        let contents = self.to_der()?;\n        let pem = Pem { tag, contents };\n        Ok(pem::encode(&pem))\n    }\n\n    /// Decodes a key from a PEM-encoded slice.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn from_pem<T: AsRef<[u8]>>(input: T) -> Result<Self, ErrorExt> {\n        let pem = pem::parse(input)?;\n\n        let secret_key = Self::from_der(&pem.contents)?;\n\n        let bad_tag = |expected_tag: &str| {\n            ErrorExt::FromPem(format!(\n                \"invalid tag: expected {}, got {}\",\n                expected_tag, pem.tag\n            ))\n        };\n\n        match secret_key {\n            SecretKey::System => return Err(Error::System(String::from(\"from_pem\")).into()),\n            SecretKey::Ed25519(_) => {\n                if pem.tag != ED25519_PEM_SECRET_KEY_TAG {\n                    return Err(bad_tag(ED25519_PEM_SECRET_KEY_TAG));\n                }\n            }\n            SecretKey::Secp256k1(_) => {\n                if pem.tag != SECP256K1_PEM_SECRET_KEY_TAG {\n                    return Err(bad_tag(SECP256K1_PEM_SECRET_KEY_TAG));\n                }\n            }\n        }\n\n        Ok(secret_key)\n    }\n\n    /// Returns a random `SecretKey`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        if rng.gen() {\n            Self::random_ed25519(rng)\n        } else {\n            Self::random_secp256k1(rng)\n        }\n    }\n\n    /// Returns a random Ed25519 variant of `SecretKey`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_ed25519(rng: &mut TestRng) -> Self {\n        let mut bytes = [0u8; Self::ED25519_LENGTH];\n        rng.fill_bytes(&mut bytes[..]);\n        SecretKey::ed25519_from_bytes(bytes).unwrap()\n    }\n\n    /// Returns a random secp256k1 variant of `SecretKey`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_secp256k1(rng: &mut TestRng) -> Self {\n        let mut bytes = [0u8; Self::SECP256K1_LENGTH];\n        rng.fill_bytes(&mut bytes[..]);\n        SecretKey::secp256k1_from_bytes(bytes).unwrap()\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ED25519_SECRET_KEY\n    }\n\n    fn variant_name(&self) -> &str {\n        match self {\n            SecretKey::System => SYSTEM,\n            SecretKey::Ed25519(_) => ED25519,\n            SecretKey::Secp256k1(_) => SECP256K1,\n        }\n    }\n}\n\nimpl Debug for SecretKey {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"SecretKey::{}\", self.variant_name())\n    }\n}\n\nimpl Display for SecretKey {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        <Self as Debug>::fmt(self, formatter)\n    }\n}\n\nimpl Tagged<u8> for SecretKey {\n    fn tag(&self) -> u8 {\n        match self {\n            SecretKey::System => SYSTEM_TAG,\n            SecretKey::Ed25519(_) => ED25519_TAG,\n            SecretKey::Secp256k1(_) => SECP256K1_TAG,\n        }\n    }\n}\n\n/// A public asymmetric key.\n#[derive(Clone, Eq, PartialEq)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\npub enum PublicKey {\n    /// System public key.\n    System,\n    /// Ed25519 public key.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Ed25519(Ed25519PublicKey),\n    /// secp256k1 public key.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Secp256k1(Secp256k1PublicKey),\n}\n\nimpl PublicKey {\n    /// The length in bytes of a system public key.\n    pub const SYSTEM_LENGTH: usize = 0;\n\n    /// The length in bytes of an Ed25519 public key.\n    pub const ED25519_LENGTH: usize = ED25519_PUBLIC_KEY_LENGTH;\n\n    /// The length in bytes of a secp256k1 public key.\n    pub const SECP256K1_LENGTH: usize = SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH;\n\n    /// Creates an `AccountHash` from a given `PublicKey` instance.\n    pub fn to_account_hash(&self) -> AccountHash {\n        AccountHash::from(self)\n    }\n\n    /// Hexadecimal representation of the key.\n    pub fn to_hex_string(&self) -> String {\n        self.to_hex()\n    }\n\n    /// Returns `true` if this public key is of the `System` variant.\n    pub fn is_system(&self) -> bool {\n        matches!(self, PublicKey::System)\n    }\n\n    /// Attempts to write the key bytes to the configured file path.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    pub fn to_file<P: AsRef<Path>>(&self, file: P) -> Result<(), ErrorExt> {\n        write_file(file, self.to_pem()?).map_err(ErrorExt::PublicKeySave)\n    }\n\n    /// Attempts to read the key bytes from configured file path.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    pub fn from_file<P: AsRef<Path>>(file: P) -> Result<Self, ErrorExt> {\n        let data = read_file(file).map_err(ErrorExt::PublicKeyLoad)?;\n        Self::from_pem(data)\n    }\n\n    /// DER encodes a key.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn to_der(&self) -> Result<Vec<u8>, ErrorExt> {\n        match self {\n            PublicKey::System => Err(Error::System(String::from(\"to_der\")).into()),\n            PublicKey::Ed25519(public_key) => {\n                // See https://tools.ietf.org/html/rfc8410#section-10.1\n                let mut encoded = vec![];\n                let mut der = Der::new(&mut encoded);\n                der.sequence(|der| {\n                    der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?;\n                    der.bit_string(0, public_key.as_ref())\n                })?;\n                Ok(encoded)\n            }\n            PublicKey::Secp256k1(public_key) => {\n                // See https://www.secg.org/sec1-v2.pdf#subsection.C.3\n                let mut encoded = vec![];\n                let mut der = Der::new(&mut encoded);\n                der.sequence(|der| {\n                    der.sequence(|der| {\n                        der.oid(&EC_PUBLIC_KEY_OBJECT_IDENTIFIER)?;\n                        der.oid(&SECP256K1_OBJECT_IDENTIFIER)\n                    })?;\n                    der.bit_string(0, public_key.to_encoded_point(true).as_ref())\n                })?;\n                Ok(encoded)\n            }\n        }\n    }\n\n    /// Decodes a key from a DER-encoded slice.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn from_der<T: AsRef<[u8]>>(input: T) -> Result<Self, ErrorExt> {\n        let input = Input::from(input.as_ref());\n\n        let mut key_type_tag = ED25519_TAG;\n        let raw_bytes = input.read_all(derp::Error::Read, |input| {\n            derp::nested(input, Tag::Sequence, |input| {\n                derp::nested(input, Tag::Sequence, |input| {\n                    // Read the first value.\n                    let object_identifier =\n                        derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe();\n                    if object_identifier == ED25519_OBJECT_IDENTIFIER {\n                        key_type_tag = ED25519_TAG;\n                        Ok(())\n                    } else if object_identifier == EC_PUBLIC_KEY_OBJECT_IDENTIFIER {\n                        // Assert the next object identifier is the secp256k1 ID.\n                        let next_object_identifier =\n                            derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe();\n                        if next_object_identifier != SECP256K1_OBJECT_IDENTIFIER {\n                            return Err(derp::Error::WrongValue);\n                        }\n\n                        key_type_tag = SECP256K1_TAG;\n                        Ok(())\n                    } else {\n                        Err(derp::Error::WrongValue)\n                    }\n                })?;\n                Ok(derp::bit_string_with_no_unused_bits(input)?.as_slice_less_safe())\n            })\n        })?;\n\n        match key_type_tag {\n            ED25519_TAG => PublicKey::ed25519_from_bytes(raw_bytes).map_err(Into::into),\n            SECP256K1_TAG => PublicKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into),\n            _ => unreachable!(),\n        }\n    }\n\n    /// PEM encodes a key.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn to_pem(&self) -> Result<String, ErrorExt> {\n        let tag = match self {\n            PublicKey::System => return Err(Error::System(String::from(\"to_pem\")).into()),\n            PublicKey::Ed25519(_) => ED25519_PEM_PUBLIC_KEY_TAG.to_string(),\n            PublicKey::Secp256k1(_) => SECP256K1_PEM_PUBLIC_KEY_TAG.to_string(),\n        };\n        let contents = self.to_der()?;\n        let pem = Pem { tag, contents };\n        Ok(pem::encode(&pem))\n    }\n\n    /// Decodes a key from a PEM-encoded slice.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn from_pem<T: AsRef<[u8]>>(input: T) -> Result<Self, ErrorExt> {\n        let pem = pem::parse(input)?;\n        let public_key = Self::from_der(&pem.contents)?;\n        let bad_tag = |expected_tag: &str| {\n            ErrorExt::FromPem(format!(\n                \"invalid tag: expected {}, got {}\",\n                expected_tag, pem.tag\n            ))\n        };\n        match public_key {\n            PublicKey::System => return Err(Error::System(String::from(\"from_pem\")).into()),\n            PublicKey::Ed25519(_) => {\n                if pem.tag != ED25519_PEM_PUBLIC_KEY_TAG {\n                    return Err(bad_tag(ED25519_PEM_PUBLIC_KEY_TAG));\n                }\n            }\n            PublicKey::Secp256k1(_) => {\n                if pem.tag != SECP256K1_PEM_PUBLIC_KEY_TAG {\n                    return Err(bad_tag(SECP256K1_PEM_PUBLIC_KEY_TAG));\n                }\n            }\n        }\n        Ok(public_key)\n    }\n\n    /// Returns a random `PublicKey`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random(rng);\n        PublicKey::from(&secret_key)\n    }\n\n    /// Returns a random Ed25519 variant of `PublicKey`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_ed25519(rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random_ed25519(rng);\n        PublicKey::from(&secret_key)\n    }\n\n    /// Returns a random secp256k1 variant of `PublicKey`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_secp256k1(rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random_secp256k1(rng);\n        PublicKey::from(&secret_key)\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &ED25519_PUBLIC_KEY\n    }\n\n    fn variant_name(&self) -> &str {\n        match self {\n            PublicKey::System => SYSTEM,\n            PublicKey::Ed25519(_) => ED25519,\n            PublicKey::Secp256k1(_) => SECP256K1,\n        }\n    }\n}\n\nimpl AsymmetricType<'_> for PublicKey {\n    fn system() -> Self {\n        PublicKey::System\n    }\n\n    fn ed25519_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error> {\n        Ok(PublicKey::Ed25519(Ed25519PublicKey::try_from(\n            bytes.as_ref(),\n        )?))\n    }\n\n    fn secp256k1_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error> {\n        Ok(PublicKey::Secp256k1(\n            Secp256k1PublicKey::from_sec1_bytes(bytes.as_ref())\n                .map_err(|_| Error::SignatureError)?,\n        ))\n    }\n}\n\nimpl From<&SecretKey> for PublicKey {\n    fn from(secret_key: &SecretKey) -> PublicKey {\n        match secret_key {\n            SecretKey::System => PublicKey::System,\n            SecretKey::Ed25519(secret_key) => PublicKey::Ed25519(secret_key.into()),\n            SecretKey::Secp256k1(secret_key) => PublicKey::Secp256k1(secret_key.into()),\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<PublicKey> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> PublicKey {\n        let secret_key = if rng.gen() {\n            SecretKey::generate_ed25519().unwrap()\n        } else {\n            SecretKey::generate_secp256k1().unwrap()\n        };\n        PublicKey::from(&secret_key)\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl PartialEq for SecretKey {\n    fn eq(&self, other: &Self) -> bool {\n        match (self, other) {\n            (Self::System, Self::System) => true,\n            (Self::Ed25519(k1), Self::Ed25519(k2)) => k1.to_bytes() == k2.to_bytes(),\n            (Self::Secp256k1(k1), Self::Secp256k1(k2)) => k1.to_bytes() == k2.to_bytes(),\n            _ => false,\n        }\n    }\n}\n#[cfg(any(feature = \"testing\", test))]\nimpl Eq for SecretKey {}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Ord for SecretKey {\n    fn cmp(&self, other: &Self) -> Ordering {\n        match (self, other) {\n            (Self::System, Self::System) => Ordering::Equal,\n            (Self::Ed25519(k1), Self::Ed25519(k2)) => k1.to_bytes().cmp(&k2.to_bytes()),\n            (Self::Secp256k1(k1), Self::Secp256k1(k2)) => k1.to_bytes().cmp(&k2.to_bytes()),\n            (k1, k2) => k1.variant_name().cmp(k2.variant_name()),\n        }\n    }\n}\n#[cfg(any(feature = \"testing\", test))]\nimpl PartialOrd for SecretKey {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl From<&PublicKey> for Vec<u8> {\n    fn from(public_key: &PublicKey) -> Self {\n        match public_key {\n            PublicKey::System => Vec::new(),\n            PublicKey::Ed25519(key) => key.to_bytes().into(),\n            PublicKey::Secp256k1(key) => key.to_encoded_point(true).as_ref().into(),\n        }\n    }\n}\n\nimpl From<PublicKey> for Vec<u8> {\n    fn from(public_key: PublicKey) -> Self {\n        Vec::<u8>::from(&public_key)\n    }\n}\n\nimpl Debug for PublicKey {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"PublicKey::{}({})\",\n            self.variant_name(),\n            base16::encode_lower(&Into::<Vec<u8>>::into(self))\n        )\n    }\n}\n\nimpl Display for PublicKey {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"PubKey::{}({:10})\",\n            self.variant_name(),\n            HexFmt(Into::<Vec<u8>>::into(self))\n        )\n    }\n}\n\nimpl PartialOrd for PublicKey {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for PublicKey {\n    fn cmp(&self, other: &Self) -> Ordering {\n        let self_tag = self.tag();\n        let other_tag = other.tag();\n        if self_tag == other_tag {\n            Into::<Vec<u8>>::into(self).cmp(&Into::<Vec<u8>>::into(other))\n        } else {\n            self_tag.cmp(&other_tag)\n        }\n    }\n}\n\n// This implementation of `Hash` agrees with the derived `PartialEq`.  It's required since\n// `ed25519_dalek::PublicKey` doesn't implement `Hash`.\n#[allow(clippy::derived_hash_with_manual_eq)]\nimpl Hash for PublicKey {\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        self.tag().hash(state);\n        Into::<Vec<u8>>::into(self).hash(state);\n    }\n}\n\nimpl Tagged<u8> for PublicKey {\n    fn tag(&self) -> u8 {\n        match self {\n            PublicKey::System => SYSTEM_TAG,\n            PublicKey::Ed25519(_) => ED25519_TAG,\n            PublicKey::Secp256k1(_) => SECP256K1_TAG,\n        }\n    }\n}\n\nimpl ToBytes for PublicKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                PublicKey::System => Self::SYSTEM_LENGTH,\n                PublicKey::Ed25519(_) => Self::ED25519_LENGTH,\n                PublicKey::Secp256k1(_) => Self::SECP256K1_LENGTH,\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            PublicKey::System => writer.push(SYSTEM_TAG),\n            PublicKey::Ed25519(public_key) => {\n                writer.push(ED25519_TAG);\n                writer.extend_from_slice(public_key.as_bytes());\n            }\n            PublicKey::Secp256k1(public_key) => {\n                writer.push(SECP256K1_TAG);\n                writer.extend_from_slice(public_key.to_encoded_point(true).as_ref());\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for PublicKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            SYSTEM_TAG => Ok((PublicKey::System, remainder)),\n            ED25519_TAG => {\n                let (raw_bytes, remainder): ([u8; Self::ED25519_LENGTH], _) =\n                    FromBytes::from_bytes(remainder)?;\n                let public_key = Self::ed25519_from_bytes(raw_bytes)\n                    .map_err(|_error| bytesrepr::Error::Formatting)?;\n                Ok((public_key, remainder))\n            }\n            SECP256K1_TAG => {\n                let (raw_bytes, remainder): ([u8; Self::SECP256K1_LENGTH], _) =\n                    FromBytes::from_bytes(remainder)?;\n                let public_key = Self::secp256k1_from_bytes(raw_bytes)\n                    .map_err(|_error| bytesrepr::Error::Formatting)?;\n                Ok((public_key, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Serialize for PublicKey {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        detail::serialize(self, serializer)\n    }\n}\n\nimpl<'de> Deserialize<'de> for PublicKey {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        detail::deserialize(deserializer)\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for PublicKey {\n    fn schema_name() -> String {\n        String::from(\"PublicKey\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description = Some(\n            \"Hex-encoded cryptographic public key, including the algorithm tag prefix.\".to_string(),\n        );\n        schema_object.metadata().examples = vec![\n            json!({\n                \"name\": \"SystemPublicKey\",\n                \"description\": \"A pseudo public key, used for example when the system proposes an \\\n                immediate switch block after a network upgrade rather than a specific validator. \\\n                Its hex-encoded value is always '00', as is the corresponding pseudo signature's\",\n                \"value\": \"00\"\n            }),\n            json!({\n                \"name\": \"Ed25519PublicKey\",\n                \"description\": \"An Ed25519 public key. Its hex-encoded value begins '01' and is \\\n                followed by 64 characters\",\n                \"value\": \"018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c\"\n            }),\n            json!({\n                \"name\": \"Secp256k1PublicKey\",\n                \"description\": \"A secp256k1 public key. Its hex-encoded value begins '02' and is \\\n                followed by 66 characters\",\n                \"value\": \"0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084\"\n            }),\n        ];\n        schema_object.into()\n    }\n}\n\nimpl CLTyped for PublicKey {\n    fn cl_type() -> CLType {\n        CLType::PublicKey\n    }\n}\n\n/// A signature of given data.\n#[derive(Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\npub enum Signature {\n    /// System signature.  Cannot be verified.\n    System,\n    /// Ed25519 signature.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Ed25519(Ed25519Signature),\n    /// Secp256k1 signature.\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    Secp256k1(Secp256k1Signature),\n}\n\nimpl Signature {\n    /// The length in bytes of a system signature,\n    pub const SYSTEM_LENGTH: usize = 0;\n\n    /// The length in bytes of an Ed25519 signature,\n    pub const ED25519_LENGTH: usize = ED25519_SIGNATURE_LENGTH;\n\n    /// The length in bytes of a secp256k1 signature\n    pub const SECP256K1_LENGTH: usize = SECP256K1_SIGNATURE_LENGTH;\n\n    /// Constructs a new Ed25519 variant from a byte array.\n    pub fn ed25519(bytes: [u8; Self::ED25519_LENGTH]) -> Result<Self, Error> {\n        let signature = Ed25519Signature::from_bytes(&bytes);\n        Ok(Signature::Ed25519(signature))\n    }\n\n    /// Constructs a new secp256k1 variant from a byte array.\n    pub fn secp256k1(bytes: [u8; Self::SECP256K1_LENGTH]) -> Result<Self, Error> {\n        let signature = Secp256k1Signature::try_from(&bytes[..]).map_err(|_| {\n            Error::AsymmetricKey(format!(\n                \"failed to construct secp256k1 signature from {:?}\",\n                &bytes[..]\n            ))\n        })?;\n\n        Ok(Signature::Secp256k1(signature))\n    }\n\n    fn variant_name(&self) -> &str {\n        match self {\n            Signature::System => SYSTEM,\n            Signature::Ed25519(_) => ED25519,\n            Signature::Secp256k1(_) => SECP256K1,\n        }\n    }\n\n    /// Hexadecimal representation of the signature.\n    pub fn to_hex_string(&self) -> String {\n        self.to_hex()\n    }\n}\n\nimpl AsymmetricType<'_> for Signature {\n    fn system() -> Self {\n        Signature::System\n    }\n\n    fn ed25519_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error> {\n        let signature = Ed25519Signature::try_from(bytes.as_ref()).map_err(|_| {\n            Error::AsymmetricKey(format!(\n                \"failed to construct Ed25519 signature from {:?}\",\n                bytes.as_ref()\n            ))\n        })?;\n        Ok(Signature::Ed25519(signature))\n    }\n\n    fn secp256k1_from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, Error> {\n        let signature = Secp256k1Signature::try_from(bytes.as_ref()).map_err(|_| {\n            Error::AsymmetricKey(format!(\n                \"failed to construct secp256k1 signature from {:?}\",\n                bytes.as_ref()\n            ))\n        })?;\n        Ok(Signature::Secp256k1(signature))\n    }\n}\n\nimpl Debug for Signature {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"Signature::{}({})\",\n            self.variant_name(),\n            base16::encode_lower(&Into::<Vec<u8>>::into(*self))\n        )\n    }\n}\n\nimpl Display for Signature {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(\n            formatter,\n            \"Sig::{}({:10})\",\n            self.variant_name(),\n            HexFmt(Into::<Vec<u8>>::into(*self))\n        )\n    }\n}\n\nimpl PartialOrd for Signature {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for Signature {\n    fn cmp(&self, other: &Self) -> Ordering {\n        let self_tag = self.tag();\n        let other_tag = other.tag();\n        if self_tag == other_tag {\n            Into::<Vec<u8>>::into(*self).cmp(&Into::<Vec<u8>>::into(*other))\n        } else {\n            self_tag.cmp(&other_tag)\n        }\n    }\n}\n\nimpl PartialEq for Signature {\n    fn eq(&self, other: &Self) -> bool {\n        self.tag() == other.tag() && Into::<Vec<u8>>::into(*self) == Into::<Vec<u8>>::into(*other)\n    }\n}\n\nimpl Eq for Signature {}\n\nimpl Hash for Signature {\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        self.tag().hash(state);\n        Into::<Vec<u8>>::into(*self).hash(state);\n    }\n}\n\nimpl Tagged<u8> for Signature {\n    fn tag(&self) -> u8 {\n        match self {\n            Signature::System => SYSTEM_TAG,\n            Signature::Ed25519(_) => ED25519_TAG,\n            Signature::Secp256k1(_) => SECP256K1_TAG,\n        }\n    }\n}\n\nimpl ToBytes for Signature {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                Signature::System => Self::SYSTEM_LENGTH,\n                Signature::Ed25519(_) => Self::ED25519_LENGTH,\n                Signature::Secp256k1(_) => Self::SECP256K1_LENGTH,\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            Signature::System => {\n                writer.push(SYSTEM_TAG);\n            }\n            Signature::Ed25519(signature) => {\n                writer.push(ED25519_TAG);\n                writer.extend(signature.to_bytes());\n            }\n            Signature::Secp256k1(signature) => {\n                writer.push(SECP256K1_TAG);\n                writer.extend_from_slice(&signature.to_bytes());\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for Signature {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            SYSTEM_TAG => Ok((Signature::System, remainder)),\n            ED25519_TAG => {\n                let (raw_bytes, remainder): ([u8; Self::ED25519_LENGTH], _) =\n                    FromBytes::from_bytes(remainder)?;\n                let public_key =\n                    Self::ed25519(raw_bytes).map_err(|_error| bytesrepr::Error::Formatting)?;\n                Ok((public_key, remainder))\n            }\n            SECP256K1_TAG => {\n                let (raw_bytes, remainder): ([u8; Self::SECP256K1_LENGTH], _) =\n                    FromBytes::from_bytes(remainder)?;\n                let public_key =\n                    Self::secp256k1(raw_bytes).map_err(|_error| bytesrepr::Error::Formatting)?;\n                Ok((public_key, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Serialize for Signature {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        detail::serialize(self, serializer)\n    }\n}\n\nimpl<'de> Deserialize<'de> for Signature {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        detail::deserialize(deserializer)\n    }\n}\n\nimpl From<&Signature> for Vec<u8> {\n    fn from(signature: &Signature) -> Self {\n        match signature {\n            Signature::System => Vec::new(),\n            Signature::Ed25519(signature) => signature.to_bytes().into(),\n            Signature::Secp256k1(signature) => (*signature.to_bytes()).into(),\n        }\n    }\n}\n\nimpl From<Signature> for Vec<u8> {\n    fn from(signature: Signature) -> Self {\n        Vec::<u8>::from(&signature)\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for Signature {\n    fn schema_name() -> String {\n        String::from(\"Signature\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description = Some(\n            \"Hex-encoded cryptographic signature, including the algorithm tag prefix.\".to_string(),\n        );\n        schema_object.into()\n    }\n}\n\n/// Signs the given message using the given key pair.\npub fn sign<T: AsRef<[u8]>>(\n    message: T,\n    secret_key: &SecretKey,\n    public_key: &PublicKey,\n) -> Signature {\n    match (secret_key, public_key) {\n        (SecretKey::System, PublicKey::System) => {\n            panic!(\"cannot create signature with system keys\",)\n        }\n        (SecretKey::Ed25519(secret_key), PublicKey::Ed25519(_public_key)) => {\n            let signature = secret_key.sign(message.as_ref());\n            Signature::Ed25519(signature)\n        }\n        (SecretKey::Secp256k1(secret_key), PublicKey::Secp256k1(_public_key)) => {\n            let signer = secret_key;\n            let signature: Secp256k1Signature = signer\n                .try_sign(message.as_ref())\n                .expect(\"should create signature\");\n            Signature::Secp256k1(signature)\n        }\n        _ => panic!(\"secret and public key types must match\"),\n    }\n}\n\n/// Attempts to recover a Secp256k1 [`PublicKey`] from a message and a signature over it.\npub fn recover_secp256k1<T: AsRef<[u8]>>(\n    message: T,\n    signature: &Signature,\n    recovery_id: u8,\n) -> Result<PublicKey, Error> {\n    let Signature::Secp256k1(signature) = signature else {\n        return Err(Error::AsymmetricKey(String::from(\n            \"public keys can only be recovered from Secp256k1 signatures\",\n        )));\n    };\n\n    let Ok(key) = VerifyingKey::recover_from_msg(\n        message.as_ref(),\n        signature,\n        RecoveryId::try_from(recovery_id)?,\n    ) else {\n        return Err(Error::AsymmetricKey(String::from(\"Key extraction failed\")));\n    };\n\n    Ok(PublicKey::Secp256k1(key))\n}\n\n/// Verifies the signature of the given message against the given public key.\npub fn verify<T: AsRef<[u8]>>(\n    message: T,\n    signature: &Signature,\n    public_key: &PublicKey,\n) -> Result<(), Error> {\n    match (signature, public_key) {\n        (Signature::System, _) => Err(Error::AsymmetricKey(String::from(\n            \"signatures based on the system key cannot be verified\",\n        ))),\n        (Signature::Ed25519(signature), PublicKey::Ed25519(public_key)) => public_key\n            .verify_strict(message.as_ref(), signature)\n            .map_err(|_| Error::AsymmetricKey(String::from(\"failed to verify Ed25519 signature\"))),\n        (Signature::Secp256k1(signature), PublicKey::Secp256k1(public_key)) => {\n            let verifier: &Secp256k1PublicKey = public_key;\n            verifier\n                .verify(message.as_ref(), signature)\n                .map_err(|error| {\n                    Error::AsymmetricKey(format!(\"failed to verify secp256k1 signature: {}\", error))\n                })\n        }\n        _ => Err(Error::AsymmetricKey(format!(\n            \"type mismatch between {} and {}\",\n            signature, public_key\n        ))),\n    }\n}\n\n/// Generates an Ed25519 keypair using the operating system's cryptographically secure random number\n/// generator.\n#[cfg(any(feature = \"std\", test))]\npub fn generate_ed25519_keypair() -> (SecretKey, PublicKey) {\n    let secret_key = SecretKey::generate_ed25519().unwrap();\n    let public_key = PublicKey::from(&secret_key);\n    (secret_key, public_key)\n}\n\nmod detail {\n    use alloc::{string::String, vec::Vec};\n\n    use serde::{de::Error as _deError, Deserialize, Deserializer, Serialize, Serializer};\n\n    use super::{PublicKey, Signature};\n    use crate::AsymmetricType;\n\n    /// Used to serialize and deserialize asymmetric key types where the (de)serializer is not a\n    /// human-readable type.\n    ///\n    /// The wrapped contents are the result of calling `t_as_ref()` on the type.\n    #[derive(Serialize, Deserialize)]\n    pub(super) enum AsymmetricTypeAsBytes {\n        System,\n        Ed25519(Vec<u8>),\n        Secp256k1(Vec<u8>),\n    }\n\n    impl From<&PublicKey> for AsymmetricTypeAsBytes {\n        fn from(public_key: &PublicKey) -> Self {\n            match public_key {\n                PublicKey::System => AsymmetricTypeAsBytes::System,\n                key @ PublicKey::Ed25519(_) => AsymmetricTypeAsBytes::Ed25519(key.into()),\n                key @ PublicKey::Secp256k1(_) => AsymmetricTypeAsBytes::Secp256k1(key.into()),\n            }\n        }\n    }\n\n    impl From<&Signature> for AsymmetricTypeAsBytes {\n        fn from(signature: &Signature) -> Self {\n            match signature {\n                Signature::System => AsymmetricTypeAsBytes::System,\n                key @ Signature::Ed25519(_) => AsymmetricTypeAsBytes::Ed25519(key.into()),\n                key @ Signature::Secp256k1(_) => AsymmetricTypeAsBytes::Secp256k1(key.into()),\n            }\n        }\n    }\n\n    pub(super) fn serialize<'a, T, S>(value: &'a T, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        T: AsymmetricType<'a>,\n        Vec<u8>: From<&'a T>,\n        S: Serializer,\n        AsymmetricTypeAsBytes: From<&'a T>,\n    {\n        if serializer.is_human_readable() {\n            return value.to_hex().serialize(serializer);\n        }\n\n        AsymmetricTypeAsBytes::from(value).serialize(serializer)\n    }\n\n    pub(super) fn deserialize<'a, 'de, T, D>(deserializer: D) -> Result<T, D::Error>\n    where\n        T: AsymmetricType<'a>,\n        Vec<u8>: From<&'a T>,\n        D: Deserializer<'de>,\n    {\n        if deserializer.is_human_readable() {\n            let hex_string = String::deserialize(deserializer)?;\n            let value = T::from_hex(hex_string.as_bytes()).map_err(D::Error::custom)?;\n            return Ok(value);\n        }\n\n        let as_bytes = AsymmetricTypeAsBytes::deserialize(deserializer)?;\n        match as_bytes {\n            AsymmetricTypeAsBytes::System => Ok(T::system()),\n            AsymmetricTypeAsBytes::Ed25519(raw_bytes) => {\n                T::ed25519_from_bytes(raw_bytes).map_err(D::Error::custom)\n            }\n            AsymmetricTypeAsBytes::Secp256k1(raw_bytes) => {\n                T::secp256k1_from_bytes(raw_bytes).map_err(D::Error::custom)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/crypto/error.rs",
    "content": "use alloc::string::String;\nuse core::fmt::{self, Display, Formatter};\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse ed25519_dalek::ed25519::Error as SignatureError;\n#[cfg(any(feature = \"std\", test))]\nuse pem::PemError;\nuse serde::Serialize;\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse thiserror::Error;\n\n#[cfg(any(feature = \"std-fs-io\", test))]\nuse crate::file_utils::{ReadFileError, WriteFileError};\n\n/// Cryptographic errors.\n#[derive(Clone, Eq, PartialEq, Debug, Serialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\npub enum Error {\n    /// Error resulting from creating or using asymmetric key types.\n    AsymmetricKey(String),\n\n    /// Error resulting when decoding a type from a hex-encoded representation.\n    #[serde(with = \"serde_helpers::Base16DecodeError\")]\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    FromHex(base16::DecodeError),\n\n    /// Error resulting when decoding a type from a base64 representation.\n    #[serde(with = \"serde_helpers::Base64DecodeError\")]\n    #[cfg_attr(feature = \"datasize\", data_size(skip))]\n    FromBase64(base64::DecodeError),\n\n    /// Signature error.\n    #[cfg_attr(any(feature = \"testing\", test), default)]\n    SignatureError,\n\n    /// Error trying to manipulate the system key.\n    System(String),\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Error::AsymmetricKey(error_msg) => {\n                write!(formatter, \"asymmetric key error: {}\", error_msg)\n            }\n            Error::FromHex(error) => {\n                write!(formatter, \"decoding from hex: {}\", error)\n            }\n            Error::FromBase64(error) => {\n                write!(formatter, \"decoding from base 64: {}\", error)\n            }\n            Error::SignatureError => {\n                write!(formatter, \"error in signature\")\n            }\n            Error::System(error_msg) => {\n                write!(formatter, \"invalid operation on system key: {}\", error_msg)\n            }\n        }\n    }\n}\n\nimpl From<base16::DecodeError> for Error {\n    fn from(error: base16::DecodeError) -> Self {\n        Error::FromHex(error)\n    }\n}\n\nimpl From<SignatureError> for Error {\n    fn from(_error: SignatureError) -> Self {\n        Error::SignatureError\n    }\n}\n\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nimpl StdError for Error {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            Error::FromHex(error) => Some(error),\n            Error::FromBase64(error) => Some(error),\n            Error::AsymmetricKey(_) | Error::SignatureError | Error::System(_) => None,\n        }\n    }\n}\n\n/// Cryptographic errors extended with some additional variants.\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\n#[derive(Debug, Error)]\n#[non_exhaustive]\npub enum ErrorExt {\n    /// A basic crypto error.\n    #[error(\"crypto error: {0:?}\")]\n    CryptoError(#[from] Error),\n\n    /// Error trying to read a secret key.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[error(\"secret key load failed: {0}\")]\n    SecretKeyLoad(ReadFileError),\n\n    /// Error trying to read a public key.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[error(\"public key load failed: {0}\")]\n    PublicKeyLoad(ReadFileError),\n\n    /// Error trying to write a secret key.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[error(\"secret key save failed: {0}\")]\n    SecretKeySave(WriteFileError),\n\n    /// Error trying to write a public key.\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    #[error(\"public key save failed: {0}\")]\n    PublicKeySave(WriteFileError),\n\n    /// Pem format error.\n    #[error(\"pem error: {0}\")]\n    FromPem(String),\n\n    /// DER format error.\n    #[error(\"der error: {0}\")]\n    FromDer(#[from] derp::Error),\n\n    /// Error in getting random bytes from the system's preferred random number source.\n    #[error(\"failed to get random bytes: {0}\")]\n    GetRandomBytes(#[from] getrandom::Error),\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl From<PemError> for ErrorExt {\n    fn from(error: PemError) -> Self {\n        ErrorExt::FromPem(error.to_string())\n    }\n}\n\n/// This module allows us to derive `Serialize` for the third party error types which don't\n/// themselves derive it.\n///\n/// See <https://serde.rs/remote-derive.html> for more info.\n#[allow(clippy::enum_variant_names)]\nmod serde_helpers {\n    use serde::Serialize;\n\n    #[derive(Serialize)]\n    #[serde(remote = \"base16::DecodeError\")]\n    pub(super) enum Base16DecodeError {\n        InvalidByte { index: usize, byte: u8 },\n        InvalidLength { length: usize },\n    }\n\n    #[derive(Serialize)]\n    #[serde(remote = \"base64::DecodeError\")]\n    pub(super) enum Base64DecodeError {\n        #[allow(dead_code)]\n        InvalidByte(usize, u8),\n        InvalidLength,\n        #[allow(dead_code)]\n        InvalidLastSymbol(usize, u8),\n    }\n}\n"
  },
  {
    "path": "types/src/crypto.rs",
    "content": "//! Cryptographic types and operations on them\n\nmod asymmetric_key;\nmod error;\n\nuse blake2::{\n    digest::{Update, VariableOutput},\n    VarBlake2b,\n};\n\nuse num::FromPrimitive;\nuse num_derive::FromPrimitive;\n\npub use crate::key::BLAKE2B_DIGEST_LENGTH;\n#[cfg(any(feature = \"std\", test))]\npub use asymmetric_key::generate_ed25519_keypair;\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub use asymmetric_key::gens;\npub use asymmetric_key::{\n    recover_secp256k1, sign, verify, AsymmetricType, PublicKey, SecretKey, Signature, ED25519_TAG,\n    SECP256K1_TAG, SYSTEM_ACCOUNT, SYSTEM_TAG,\n};\npub use error::Error;\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\npub use error::ErrorExt;\n\npub(crate) fn blake2b<T: AsRef<[u8]>>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] {\n    let mut result = [0; BLAKE2B_DIGEST_LENGTH];\n    // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher\n    let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect(\"should create hasher\");\n\n    hasher.update(data);\n    hasher.finalize_variable(|slice| {\n        result.copy_from_slice(slice);\n    });\n    result\n}\n\n/// A type of hashing algorithm.\n#[repr(u8)]\n#[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive)]\npub enum HashAlgorithm {\n    /// Blake2b\n    Blake2b = 0,\n    /// Blake3\n    Blake3 = 1,\n    /// Sha256,\n    Sha256 = 2,\n}\n\nimpl TryFrom<u8> for HashAlgorithm {\n    type Error = ();\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        FromPrimitive::from_u8(value).ok_or(())\n    }\n}\n"
  },
  {
    "path": "types/src/deploy_info.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes},\n    serde_helpers, DeployHash, TransferAddr, URef, U512,\n};\n\n/// Information relating to the given Deploy.\n#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct DeployInfo {\n    /// The relevant Deploy.\n    #[serde(with = \"serde_helpers::deploy_hash_as_array\")]\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(with = \"DeployHash\", description = \"Hex-encoded Deploy hash.\")\n    )]\n    pub deploy_hash: DeployHash,\n    /// Version 1 transfers performed by the Deploy.\n    pub transfers: Vec<TransferAddr>,\n    /// Account identifier of the creator of the Deploy.\n    pub from: AccountHash,\n    /// Source purse used for payment of the Deploy.\n    pub source: URef,\n    /// Gas cost of executing the Deploy.\n    pub gas: U512,\n}\n\nimpl DeployInfo {\n    /// Creates a [`DeployInfo`].\n    pub fn new(\n        deploy_hash: DeployHash,\n        transfers: &[TransferAddr],\n        from: AccountHash,\n        source: URef,\n        gas: U512,\n    ) -> Self {\n        let transfers = transfers.to_vec();\n        DeployInfo {\n            deploy_hash,\n            transfers,\n            from,\n            source,\n            gas,\n        }\n    }\n}\n\nimpl FromBytes for DeployInfo {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (deploy_hash, rem) = DeployHash::from_bytes(bytes)?;\n        let (transfers, rem) = Vec::<TransferAddr>::from_bytes(rem)?;\n        let (from, rem) = AccountHash::from_bytes(rem)?;\n        let (source, rem) = URef::from_bytes(rem)?;\n        let (gas, rem) = U512::from_bytes(rem)?;\n        Ok((\n            DeployInfo {\n                deploy_hash,\n                transfers,\n                from,\n                source,\n                gas,\n            },\n            rem,\n        ))\n    }\n}\n\nimpl ToBytes for DeployInfo {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.deploy_hash.write_bytes(&mut result)?;\n        self.transfers.write_bytes(&mut result)?;\n        self.from.write_bytes(&mut result)?;\n        self.source.write_bytes(&mut result)?;\n        self.gas.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.deploy_hash.serialized_length()\n            + self.transfers.serialized_length()\n            + self.from.serialized_length()\n            + self.source.serialized_length()\n            + self.gas.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.deploy_hash.write_bytes(writer)?;\n        self.transfers.write_bytes(writer)?;\n        self.from.write_bytes(writer)?;\n        self.source.write_bytes(writer)?;\n        self.gas.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\n/// Generators for a `DeployInfo`\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub(crate) mod gens {\n    use crate::{\n        gens::{account_hash_arb, u512_arb, uref_arb},\n        transaction::gens::deploy_hash_arb,\n        transfer::gens::transfer_v1_addr_arb,\n        DeployInfo,\n    };\n    use proptest::{collection, prelude::Strategy};\n\n    pub fn deploy_info_arb() -> impl Strategy<Value = DeployInfo> {\n        let transfers_length_range = 0..5;\n        (\n            deploy_hash_arb(),\n            collection::vec(transfer_v1_addr_arb(), transfers_length_range),\n            account_hash_arb(),\n            uref_arb(),\n            u512_arb(),\n        )\n            .prop_map(|(deploy_hash, transfers, from, source, gas)| DeployInfo {\n                deploy_hash,\n                transfers,\n                from,\n                source,\n                gas,\n            })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::prelude::*;\n\n    use crate::bytesrepr;\n\n    use super::gens;\n\n    proptest! {\n        #[test]\n        fn test_serialization_roundtrip(deploy_info in gens::deploy_info_arb()) {\n            bytesrepr::test_serialization_roundtrip(&deploy_info)\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/digest/chunk_with_proof.rs",
    "content": "//! Chunks with Merkle proofs.\n\nuse alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse super::{ChunkWithProofVerificationError, Digest, IndexedMerkleProof, MerkleConstructionError};\nuse crate::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n\n/// Represents a chunk of data with attached proof.\n#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct ChunkWithProof {\n    proof: IndexedMerkleProof,\n    chunk: Bytes,\n}\n\nimpl ToBytes for ChunkWithProof {\n    fn write_bytes(&self, buf: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        buf.append(&mut self.proof.to_bytes()?);\n        buf.append(&mut self.chunk.to_bytes()?);\n\n        Ok(())\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.proof.serialized_length() + self.chunk.serialized_length()\n    }\n}\n\nimpl FromBytes for ChunkWithProof {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (proof, remainder) = FromBytes::from_bytes(bytes)?;\n        let (chunk, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((ChunkWithProof { proof, chunk }, remainder))\n    }\n}\n\nimpl ChunkWithProof {\n    #[cfg(test)]\n    /// 10 bytes for testing purposes.\n    pub const CHUNK_SIZE_BYTES: usize = 10;\n\n    #[cfg(not(test))]\n    /// 8 MiB\n    pub const CHUNK_SIZE_BYTES: usize = 8 * 1024 * 1024;\n\n    /// Constructs the [`ChunkWithProof`] that contains the chunk of data with the appropriate index\n    /// and the cryptographic proof.\n    ///\n    /// Empty data is always represented as single, empty chunk and not as zero chunks.\n    pub fn new(data: &[u8], index: u64) -> Result<Self, MerkleConstructionError> {\n        Ok(if data.is_empty() {\n            ChunkWithProof {\n                proof: IndexedMerkleProof::new([Digest::blake2b_hash([])], index)?,\n                chunk: Bytes::new(),\n            }\n        } else {\n            ChunkWithProof {\n                proof: IndexedMerkleProof::new(\n                    data.chunks(Self::CHUNK_SIZE_BYTES)\n                        .map(Digest::blake2b_hash),\n                    index,\n                )?,\n                chunk: Bytes::from(\n                    data.chunks(Self::CHUNK_SIZE_BYTES)\n                        .nth(index as usize)\n                        .ok_or_else(|| MerkleConstructionError::IndexOutOfBounds {\n                            count: data.chunks(Self::CHUNK_SIZE_BYTES).len() as u64,\n                            index,\n                        })?,\n                ),\n            }\n        })\n    }\n\n    /// Get a reference to the `ChunkWithProof`'s chunk.\n    pub fn chunk(&self) -> &[u8] {\n        self.chunk.as_slice()\n    }\n\n    /// Convert a chunk with proof into the underlying chunk.\n    pub fn into_chunk(self) -> Bytes {\n        self.chunk\n    }\n\n    /// Returns the `IndexedMerkleProof`.\n    pub fn proof(&self) -> &IndexedMerkleProof {\n        &self.proof\n    }\n\n    /// Verify the integrity of this chunk with indexed Merkle proof.\n    pub fn verify(&self) -> Result<(), ChunkWithProofVerificationError> {\n        self.proof().verify()?;\n        let first_digest_in_indexed_merkle_proof =\n            self.proof().merkle_proof().first().ok_or_else(|| {\n                ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof {\n                    chunk_with_proof: self.clone(),\n                }\n            })?;\n        let hash_of_chunk = Digest::hash(self.chunk());\n        if *first_digest_in_indexed_merkle_proof != hash_of_chunk {\n            return Err(\n                ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk {\n                    first_digest_in_indexed_merkle_proof: *first_digest_in_indexed_merkle_proof,\n                    hash_of_chunk,\n                },\n            );\n        }\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::{\n        arbitrary::Arbitrary,\n        strategy::{BoxedStrategy, Strategy},\n    };\n    use proptest_attr_macro::proptest;\n    use rand::Rng;\n\n    use crate::{\n        bytesrepr::{self, FromBytes, ToBytes},\n        ChunkWithProof, Digest, MerkleConstructionError,\n    };\n\n    fn prepare_bytes(length: usize) -> Vec<u8> {\n        let mut rng = rand::thread_rng();\n\n        (0..length).map(|_| rng.gen()).collect()\n    }\n\n    fn random_chunk_with_proof() -> ChunkWithProof {\n        let mut rng = rand::thread_rng();\n        let data: Vec<u8> = prepare_bytes(rng.gen_range(1..1024));\n        let index = rng.gen_range(0..data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES).len() as u64);\n\n        ChunkWithProof::new(&data, index).unwrap()\n    }\n\n    impl ChunkWithProof {\n        fn replace_first_proof(self) -> Self {\n            let mut rng = rand::thread_rng();\n            let ChunkWithProof { mut proof, chunk } = self;\n\n            // Keep the same number of proofs, but replace the first one with some random hash\n            let mut merkle_proof: Vec<_> = proof.merkle_proof().to_vec();\n            merkle_proof.pop();\n            merkle_proof.insert(0, Digest::hash(rng.gen::<usize>().to_string()));\n            proof.inject_merkle_proof(merkle_proof);\n\n            ChunkWithProof { proof, chunk }\n        }\n    }\n\n    #[derive(Debug)]\n    pub struct TestDataSize(usize);\n    impl Arbitrary for TestDataSize {\n        type Parameters = ();\n        type Strategy = BoxedStrategy<Self>;\n\n        fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {\n            (0usize..32usize)\n                .prop_map(|chunk_count| {\n                    TestDataSize(chunk_count * ChunkWithProof::CHUNK_SIZE_BYTES)\n                })\n                .boxed()\n        }\n    }\n\n    #[derive(Debug)]\n    pub struct TestDataSizeAtLeastTwoChunks(usize);\n    impl Arbitrary for TestDataSizeAtLeastTwoChunks {\n        type Parameters = ();\n        type Strategy = BoxedStrategy<Self>;\n\n        fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {\n            (2usize..32usize)\n                .prop_map(|chunk_count| {\n                    TestDataSizeAtLeastTwoChunks(chunk_count * ChunkWithProof::CHUNK_SIZE_BYTES)\n                })\n                .boxed()\n        }\n    }\n\n    #[proptest]\n    fn generates_valid_proof(test_data: TestDataSize) {\n        for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] {\n            let number_of_chunks: u64 = data\n                .chunks(ChunkWithProof::CHUNK_SIZE_BYTES)\n                .len()\n                .try_into()\n                .unwrap();\n\n            assert!((0..number_of_chunks)\n                .map(|chunk_index| { ChunkWithProof::new(data.as_slice(), chunk_index).unwrap() })\n                .all(|chunk_with_proof| chunk_with_proof.verify().is_ok()));\n        }\n    }\n\n    #[proptest]\n    fn validate_chunks_against_hash_merkle_tree(test_data: TestDataSizeAtLeastTwoChunks) {\n        // This test requires at least two chunks\n        assert!(test_data.0 >= ChunkWithProof::CHUNK_SIZE_BYTES * 2);\n\n        for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] {\n            let expected_root = Digest::hash_merkle_tree(\n                data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES)\n                    .map(Digest::hash),\n            );\n\n            // Calculate proof with `ChunkWithProof`\n            let ChunkWithProof {\n                proof: proof_0,\n                chunk: _,\n            } = ChunkWithProof::new(data.as_slice(), 0).unwrap();\n            let ChunkWithProof {\n                proof: proof_1,\n                chunk: _,\n            } = ChunkWithProof::new(data.as_slice(), 1).unwrap();\n\n            assert_eq!(proof_0.root_hash(), expected_root);\n            assert_eq!(proof_1.root_hash(), expected_root);\n        }\n    }\n\n    #[proptest]\n    fn verifies_chunk_with_proofs(test_data: TestDataSize) {\n        for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] {\n            let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap();\n            assert!(chunk_with_proof.verify().is_ok());\n\n            let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof();\n            assert!(chunk_with_incorrect_proof.verify().is_err());\n        }\n    }\n\n    #[proptest]\n    fn serde_deserialization_of_malformed_chunk_should_work(test_data: TestDataSize) {\n        for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] {\n            let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap();\n\n            let json = serde_json::to_string(&chunk_with_proof).unwrap();\n            assert_eq!(\n                chunk_with_proof,\n                serde_json::from_str::<ChunkWithProof>(&json)\n                    .expect(\"should deserialize correctly\")\n            );\n\n            let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof();\n            let json = serde_json::to_string(&chunk_with_incorrect_proof).unwrap();\n            serde_json::from_str::<ChunkWithProof>(&json).expect(\"should deserialize correctly\");\n        }\n    }\n\n    #[proptest]\n    fn bytesrepr_deserialization_of_malformed_chunk_should_work(test_data: TestDataSize) {\n        for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] {\n            let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap();\n\n            let bytes = chunk_with_proof\n                .to_bytes()\n                .expect(\"should serialize correctly\");\n\n            let (deserialized_chunk_with_proof, _) =\n                ChunkWithProof::from_bytes(&bytes).expect(\"should deserialize correctly\");\n\n            assert_eq!(chunk_with_proof, deserialized_chunk_with_proof);\n\n            let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof();\n            let bytes = chunk_with_incorrect_proof\n                .to_bytes()\n                .expect(\"should serialize correctly\");\n\n            ChunkWithProof::from_bytes(&bytes).expect(\"should deserialize correctly\");\n        }\n    }\n\n    #[test]\n    fn returns_error_on_incorrect_index() {\n        // This test needs specific data sizes, hence it doesn't use the proptest\n\n        let chunk_with_proof = ChunkWithProof::new(&[], 0).expect(\"should create with empty data\");\n        assert!(chunk_with_proof.verify().is_ok());\n\n        let chunk_with_proof =\n            ChunkWithProof::new(&[], 1).expect_err(\"should error with empty data and index > 0\");\n        if let MerkleConstructionError::IndexOutOfBounds { count, index } = chunk_with_proof {\n            assert_eq!(count, 1);\n            assert_eq!(index, 1);\n        } else {\n            panic!(\"expected MerkleConstructionError::IndexOutOfBounds\");\n        }\n\n        let data_larger_than_single_chunk = vec![0u8; ChunkWithProof::CHUNK_SIZE_BYTES * 10];\n        ChunkWithProof::new(data_larger_than_single_chunk.as_slice(), 9).unwrap();\n\n        let chunk_with_proof =\n            ChunkWithProof::new(data_larger_than_single_chunk.as_slice(), 10).unwrap_err();\n        if let MerkleConstructionError::IndexOutOfBounds { count, index } = chunk_with_proof {\n            assert_eq!(count, 10);\n            assert_eq!(index, 10);\n        } else {\n            panic!(\"expected MerkleConstructionError::IndexOutOfBounds\");\n        }\n    }\n\n    #[test]\n    fn bytesrepr_serialization() {\n        let chunk_with_proof = random_chunk_with_proof();\n        bytesrepr::test_serialization_roundtrip(&chunk_with_proof);\n    }\n\n    #[test]\n    fn chunk_with_empty_data_contains_a_single_proof() {\n        let chunk_with_proof = ChunkWithProof::new(&[], 0).unwrap();\n        assert_eq!(chunk_with_proof.proof.merkle_proof().len(), 1)\n    }\n}\n"
  },
  {
    "path": "types/src/digest/error.rs",
    "content": "//! Errors in constructing and validating indexed Merkle proofs, chunks with indexed Merkle proofs.\n\nuse alloc::string::String;\nuse core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\nuse super::{ChunkWithProof, Digest};\nuse crate::bytesrepr;\n\n/// Possible hashing errors.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum Error {\n    /// The digest length was an incorrect size.\n    IncorrectDigestLength(usize),\n    /// There was a decoding error.\n    Base16DecodeError(base16::DecodeError),\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::IncorrectDigestLength(length) => {\n                write!(\n                    formatter,\n                    \"incorrect digest length {}, expected length {}.\",\n                    length,\n                    Digest::LENGTH\n                )\n            }\n            Error::Base16DecodeError(error) => {\n                write!(formatter, \"base16 decode error: {}\", error)\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for Error {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            Error::IncorrectDigestLength(_) => None,\n            Error::Base16DecodeError(error) => Some(error),\n        }\n    }\n}\n\n/// Error validating a Merkle proof of a chunk.\n#[derive(Debug, PartialEq, Eq)]\n#[non_exhaustive]\npub enum MerkleVerificationError {\n    /// Index out of bounds.\n    IndexOutOfBounds {\n        /// Count.\n        count: u64,\n        /// Index.\n        index: u64,\n    },\n\n    /// Unexpected proof length.\n    UnexpectedProofLength {\n        /// Count.\n        count: u64,\n        /// Index.\n        index: u64,\n        /// Expected proof length.\n        expected_proof_length: u8,\n        /// Actual proof length.\n        actual_proof_length: usize,\n    },\n}\n\nimpl Display for MerkleVerificationError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            MerkleVerificationError::IndexOutOfBounds { count, index } => {\n                write!(\n                    formatter,\n                    \"index out of bounds - count: {}, index: {}\",\n                    count, index\n                )\n            }\n            MerkleVerificationError::UnexpectedProofLength {\n                count,\n                index,\n                expected_proof_length,\n                actual_proof_length,\n            } => {\n                write!(\n                    formatter,\n                    \"unexpected proof length - count: {}, index: {}, expected length: {}, actual \\\n                    length: {}\",\n                    count, index, expected_proof_length, actual_proof_length\n                )\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for MerkleVerificationError {}\n\n/// Error validating a chunk with proof.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum ChunkWithProofVerificationError {\n    /// Indexed Merkle proof verification error.\n    MerkleVerificationError(MerkleVerificationError),\n\n    /// Empty Merkle proof for trie with chunk.\n    ChunkWithProofHasEmptyMerkleProof {\n        /// Chunk with empty Merkle proof.\n        chunk_with_proof: ChunkWithProof,\n    },\n    /// Unexpected Merkle root hash.\n    UnexpectedRootHash,\n    /// Bytesrepr error.\n    Bytesrepr(bytesrepr::Error),\n\n    /// First digest in indexed Merkle proof did not match hash of chunk.\n    FirstDigestInMerkleProofDidNotMatchHashOfChunk {\n        /// First digest in indexed Merkle proof.\n        first_digest_in_indexed_merkle_proof: Digest,\n        /// Hash of chunk.\n        hash_of_chunk: Digest,\n    },\n}\n\nimpl Display for ChunkWithProofVerificationError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            ChunkWithProofVerificationError::MerkleVerificationError(error) => {\n                write!(formatter, \"{}\", error)\n            }\n            ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof {\n                chunk_with_proof,\n            } => {\n                write!(\n                    formatter,\n                    \"chunk with proof has empty merkle proof: {:?}\",\n                    chunk_with_proof\n                )\n            }\n            ChunkWithProofVerificationError::UnexpectedRootHash => {\n                write!(formatter, \"merkle proof has an unexpected root hash\")\n            }\n            ChunkWithProofVerificationError::Bytesrepr(error) => {\n                write!(\n                    formatter,\n                    \"bytesrepr error computing chunkable hash: {}\",\n                    error\n                )\n            }\n            ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk {\n                first_digest_in_indexed_merkle_proof,\n                hash_of_chunk,\n            } => {\n                write!(\n                    formatter,\n                    \"first digest in merkle proof did not match hash of chunk - first digest: \\\n                    {:?}, hash of chunk: {:?}\",\n                    first_digest_in_indexed_merkle_proof, hash_of_chunk\n                )\n            }\n        }\n    }\n}\n\nimpl From<MerkleVerificationError> for ChunkWithProofVerificationError {\n    fn from(error: MerkleVerificationError) -> Self {\n        ChunkWithProofVerificationError::MerkleVerificationError(error)\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for ChunkWithProofVerificationError {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            ChunkWithProofVerificationError::MerkleVerificationError(error) => Some(error),\n            ChunkWithProofVerificationError::Bytesrepr(error) => Some(error),\n            ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { .. }\n            | ChunkWithProofVerificationError::UnexpectedRootHash\n            | ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk {\n                ..\n            } => None,\n        }\n    }\n}\n\n/// Error during the construction of a Merkle proof.\n#[derive(Debug, Eq, PartialEq, Clone)]\n#[non_exhaustive]\npub enum MerkleConstructionError {\n    /// Chunk index was out of bounds.\n    IndexOutOfBounds {\n        /// Total chunks count.\n        count: u64,\n        /// Requested index.\n        index: u64,\n    },\n    /// Too many Merkle tree leaves.\n    TooManyLeaves {\n        /// Total chunks count.\n        count: String,\n    },\n}\n\nimpl Display for MerkleConstructionError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            MerkleConstructionError::IndexOutOfBounds { count, index } => {\n                write!(\n                    formatter,\n                    \"could not construct merkle proof - index out of bounds - count: {}, index: {}\",\n                    count, index\n                )\n            }\n            MerkleConstructionError::TooManyLeaves { count } => {\n                write!(\n                    formatter,\n                    \"could not construct merkle proof - too many leaves - count: {}, max: {} \\\n                    (u64::MAX)\",\n                    count,\n                    u64::MAX\n                )\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for MerkleConstructionError {}\n"
  },
  {
    "path": "types/src/digest/indexed_merkle_proof.rs",
    "content": "//! Constructing and validating indexed Merkle proofs.\nuse alloc::{string::ToString, vec::Vec};\nuse core::convert::TryInto;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse itertools::Itertools;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse super::{Digest, MerkleConstructionError, MerkleVerificationError};\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// A Merkle proof of the given chunk.\n#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct IndexedMerkleProof {\n    index: u64,\n    count: u64,\n    merkle_proof: Vec<Digest>,\n    #[cfg_attr(any(feature = \"once_cell\", test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    root_hash: OnceCell<Digest>,\n}\n\nimpl ToBytes for IndexedMerkleProof {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.append(&mut self.index.to_bytes()?);\n        result.append(&mut self.count.to_bytes()?);\n        result.append(&mut self.merkle_proof.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.index.serialized_length()\n            + self.count.serialized_length()\n            + self.merkle_proof.serialized_length()\n    }\n}\n\nimpl FromBytes for IndexedMerkleProof {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (index, remainder) = FromBytes::from_bytes(bytes)?;\n        let (count, remainder) = FromBytes::from_bytes(remainder)?;\n        let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((\n            IndexedMerkleProof {\n                index,\n                count,\n                merkle_proof,\n                #[cfg(any(feature = \"once_cell\", test))]\n                root_hash: OnceCell::new(),\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl IndexedMerkleProof {\n    /// Attempts to construct a new instance.\n    pub fn new<I>(leaves: I, index: u64) -> Result<IndexedMerkleProof, MerkleConstructionError>\n    where\n        I: IntoIterator<Item = Digest>,\n        I::IntoIter: ExactSizeIterator,\n    {\n        use HashOrProof::{Hash as H, Proof as P};\n\n        enum HashOrProof {\n            Hash(Digest),\n            Proof(Vec<Digest>),\n        }\n\n        let leaves = leaves.into_iter();\n        let count: u64 =\n            leaves\n                .len()\n                .try_into()\n                .map_err(|_| MerkleConstructionError::TooManyLeaves {\n                    count: leaves.len().to_string(),\n                })?;\n\n        let maybe_proof = leaves\n            .enumerate()\n            .map(|(i, hash)| {\n                if i as u64 == index {\n                    P(vec![hash])\n                } else {\n                    H(hash)\n                }\n            })\n            .tree_fold1(|x, y| match (x, y) {\n                (H(hash_x), H(hash_y)) => H(Digest::hash_pair(hash_x, hash_y)),\n                (H(hash), P(mut proof)) | (P(mut proof), H(hash)) => {\n                    proof.push(hash);\n                    P(proof)\n                }\n                (P(_), P(_)) => unreachable!(),\n            });\n\n        match maybe_proof {\n            None | Some(H(_)) => Err(MerkleConstructionError::IndexOutOfBounds { count, index }),\n            Some(P(merkle_proof)) => Ok(IndexedMerkleProof {\n                index,\n                count,\n                merkle_proof,\n                #[cfg(any(feature = \"once_cell\", test))]\n                root_hash: OnceCell::new(),\n            }),\n        }\n    }\n\n    /// Returns the index.\n    pub fn index(&self) -> u64 {\n        self.index\n    }\n\n    /// Returns the total count of chunks.\n    pub fn count(&self) -> u64 {\n        self.count\n    }\n\n    /// Returns the root hash of this proof (i.e. the index hashed with the Merkle root hash).\n    ///\n    /// Note that with the `once_cell` feature enabled (generally done by enabling the `std`\n    /// feature), the root hash is memoized, and hence calling this method is cheap after the first\n    /// call.  Without `once_cell` enabled, every call to this method calculates the root hash.\n    pub fn root_hash(&self) -> Digest {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return *self.root_hash.get_or_init(|| self.compute_root_hash());\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.compute_root_hash()\n    }\n\n    /// Returns the full collection of hash digests of the proof.\n    pub fn merkle_proof(&self) -> &[Digest] {\n        &self.merkle_proof\n    }\n\n    /// Attempts to verify self.\n    pub fn verify(&self) -> Result<(), MerkleVerificationError> {\n        if self.index >= self.count {\n            return Err(MerkleVerificationError::IndexOutOfBounds {\n                count: self.count,\n                index: self.index,\n            });\n        }\n        let expected_proof_length = self.compute_expected_proof_length();\n        if self.merkle_proof.len() != expected_proof_length as usize {\n            return Err(MerkleVerificationError::UnexpectedProofLength {\n                count: self.count,\n                index: self.index,\n                expected_proof_length,\n                actual_proof_length: self.merkle_proof.len(),\n            });\n        }\n        Ok(())\n    }\n\n    fn compute_root_hash(&self) -> Digest {\n        let IndexedMerkleProof {\n            count,\n            merkle_proof,\n            ..\n        } = self;\n\n        let mut hashes = merkle_proof.iter();\n        let raw_root = if let Some(leaf_hash) = hashes.next().cloned() {\n            // Compute whether to hash left or right for the elements of the Merkle proof.\n            // This gives a path to the value with the specified index.\n            // We represent this path as a sequence of 64 bits. 1 here means \"hash right\".\n            let mut path: u64 = 0;\n            let mut n = self.count;\n            let mut i = self.index;\n            while n > 1 {\n                path <<= 1;\n                let pivot = 1u64 << (63 - (n - 1).leading_zeros());\n                if i < pivot {\n                    n = pivot;\n                } else {\n                    path |= 1;\n                    n -= pivot;\n                    i -= pivot;\n                }\n            }\n\n            // Compute the raw Merkle root by hashing the proof from leaf hash up.\n            hashes.fold(leaf_hash, |acc, hash| {\n                let digest = if (path & 1) == 1 {\n                    Digest::hash_pair(hash, acc)\n                } else {\n                    Digest::hash_pair(acc, hash)\n                };\n                path >>= 1;\n                digest\n            })\n        } else {\n            Digest::SENTINEL_MERKLE_TREE\n        };\n\n        // The Merkle root is the hash of the count with the raw root.\n        Digest::hash_merkle_root(*count, raw_root)\n    }\n\n    // Proof lengths are never bigger than 65 is because we are using 64 bit counts\n    fn compute_expected_proof_length(&self) -> u8 {\n        if self.count == 0 {\n            return 0;\n        }\n        let mut l = 1;\n        let mut n = self.count;\n        let mut i = self.index;\n        while n > 1 {\n            let pivot = 1u64 << (63 - (n - 1).leading_zeros());\n            if i < pivot {\n                n = pivot;\n            } else {\n                n -= pivot;\n                i -= pivot;\n            }\n            l += 1;\n        }\n        l\n    }\n\n    #[cfg(test)]\n    pub fn inject_merkle_proof(&mut self, merkle_proof: Vec<Digest>) {\n        self.merkle_proof = merkle_proof;\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use once_cell::sync::OnceCell;\n    use proptest::prelude::{prop_assert, prop_assert_eq};\n    use proptest_attr_macro::proptest;\n    use rand::{distributions::Standard, Rng};\n\n    use crate::{\n        bytesrepr::{self, FromBytes, ToBytes},\n        Digest, IndexedMerkleProof, MerkleVerificationError,\n    };\n\n    fn random_indexed_merkle_proof() -> IndexedMerkleProof {\n        let mut rng = rand::thread_rng();\n        let leaf_count: u64 = rng.gen_range(1..100);\n        let index = rng.gen_range(0..leaf_count);\n        let leaves: Vec<Digest> = (0..leaf_count)\n            .map(|i| Digest::hash(i.to_le_bytes()))\n            .collect();\n        IndexedMerkleProof::new(leaves.iter().cloned(), index)\n            .expect(\"should create indexed Merkle proof\")\n    }\n\n    #[test]\n    fn test_merkle_proofs() {\n        let mut rng = rand::thread_rng();\n        for _ in 0..20 {\n            let leaf_count: u64 = rng.gen_range(1..100);\n            let index = rng.gen_range(0..leaf_count);\n            let leaves: Vec<Digest> = (0..leaf_count)\n                .map(|i| Digest::hash(i.to_le_bytes()))\n                .collect();\n            let root = Digest::hash_merkle_tree(leaves.clone());\n            let indexed_merkle_proof = IndexedMerkleProof::new(leaves.clone(), index).unwrap();\n            assert_eq!(\n                indexed_merkle_proof.compute_expected_proof_length(),\n                indexed_merkle_proof.merkle_proof().len() as u8\n            );\n            assert_eq!(indexed_merkle_proof.verify(), Ok(()));\n            assert_eq!(leaf_count, indexed_merkle_proof.count);\n            assert_eq!(leaves[index as usize], indexed_merkle_proof.merkle_proof[0]);\n            assert_eq!(root, indexed_merkle_proof.root_hash());\n        }\n    }\n\n    #[test]\n    fn out_of_bounds_index() {\n        let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof {\n            index: 23,\n            count: 4,\n            merkle_proof: vec![Digest([0u8; 32]); 3],\n            root_hash: OnceCell::new(),\n        };\n        assert_eq!(\n            out_of_bounds_indexed_merkle_proof.verify(),\n            Err(MerkleVerificationError::IndexOutOfBounds {\n                count: 4,\n                index: 23\n            })\n        )\n    }\n\n    #[test]\n    fn unexpected_proof_length() {\n        let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof {\n            index: 1235,\n            count: 5647,\n            merkle_proof: vec![Digest([0u8; 32]); 13],\n            root_hash: OnceCell::new(),\n        };\n        assert_eq!(\n            out_of_bounds_indexed_merkle_proof.verify(),\n            Err(MerkleVerificationError::UnexpectedProofLength {\n                count: 5647,\n                index: 1235,\n                expected_proof_length: 14,\n                actual_proof_length: 13\n            })\n        )\n    }\n\n    #[test]\n    fn empty_unexpected_proof_length() {\n        let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof {\n            index: 0,\n            count: 0,\n            merkle_proof: vec![Digest([0u8; 32]); 3],\n            root_hash: OnceCell::new(),\n        };\n        assert_eq!(\n            out_of_bounds_indexed_merkle_proof.verify(),\n            Err(MerkleVerificationError::IndexOutOfBounds { count: 0, index: 0 })\n        )\n    }\n\n    #[test]\n    fn empty_out_of_bounds_index() {\n        let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof {\n            index: 23,\n            count: 0,\n            merkle_proof: vec![],\n            root_hash: OnceCell::new(),\n        };\n        assert_eq!(\n            out_of_bounds_indexed_merkle_proof.verify(),\n            Err(MerkleVerificationError::IndexOutOfBounds {\n                count: 0,\n                index: 23\n            })\n        )\n    }\n\n    #[test]\n    fn deep_proof_doesnt_kill_stack() {\n        const PROOF_LENGTH: usize = 63;\n        let indexed_merkle_proof = IndexedMerkleProof {\n            index: 42,\n            count: 1 << (PROOF_LENGTH - 1),\n            merkle_proof: vec![Digest([0u8; Digest::LENGTH]); PROOF_LENGTH],\n            root_hash: OnceCell::new(),\n        };\n        let _hash = indexed_merkle_proof.root_hash();\n    }\n\n    #[test]\n    fn empty_proof() {\n        let empty_merkle_root = Digest::hash_merkle_tree(vec![]);\n        assert_eq!(empty_merkle_root, Digest::SENTINEL_MERKLE_TREE);\n        let indexed_merkle_proof = IndexedMerkleProof {\n            index: 0,\n            count: 0,\n            merkle_proof: vec![],\n            root_hash: OnceCell::new(),\n        };\n        assert!(indexed_merkle_proof.verify().is_err());\n    }\n\n    #[proptest]\n    fn expected_proof_length_le_65(index: u64, count: u64) {\n        let indexed_merkle_proof = IndexedMerkleProof {\n            index,\n            count,\n            merkle_proof: vec![],\n            root_hash: OnceCell::new(),\n        };\n        prop_assert!(indexed_merkle_proof.compute_expected_proof_length() <= 65);\n    }\n\n    fn reference_root_from_proof(index: u64, count: u64, proof: &[Digest]) -> Digest {\n        fn compute_raw_root_from_proof(index: u64, leaf_count: u64, proof: &[Digest]) -> Digest {\n            if leaf_count == 0 {\n                return Digest::SENTINEL_MERKLE_TREE;\n            }\n            if leaf_count == 1 {\n                return proof[0];\n            }\n            let half = 1u64 << (63 - (leaf_count - 1).leading_zeros());\n            let last = proof.len() - 1;\n            if index < half {\n                let left = compute_raw_root_from_proof(index, half, &proof[..last]);\n                Digest::hash_pair(left, proof[last])\n            } else {\n                let right =\n                    compute_raw_root_from_proof(index - half, leaf_count - half, &proof[..last]);\n                Digest::hash_pair(proof[last], right)\n            }\n        }\n\n        let raw_root = compute_raw_root_from_proof(index, count, proof);\n        Digest::hash_merkle_root(count, raw_root)\n    }\n\n    /// Construct an `IndexedMerkleProof` with a proof of zero digests.\n    fn test_indexed_merkle_proof(index: u64, count: u64) -> IndexedMerkleProof {\n        let mut indexed_merkle_proof = IndexedMerkleProof {\n            index,\n            count,\n            merkle_proof: vec![],\n            root_hash: OnceCell::new(),\n        };\n        let expected_proof_length = indexed_merkle_proof.compute_expected_proof_length();\n        indexed_merkle_proof.merkle_proof = rand::thread_rng()\n            .sample_iter(Standard)\n            .take(expected_proof_length as usize)\n            .collect();\n        indexed_merkle_proof\n    }\n\n    #[proptest]\n    fn root_from_proof_agrees_with_recursion(index: u64, count: u64) {\n        let indexed_merkle_proof = test_indexed_merkle_proof(index, count);\n        prop_assert_eq!(\n            indexed_merkle_proof.root_hash(),\n            reference_root_from_proof(\n                indexed_merkle_proof.index,\n                indexed_merkle_proof.count,\n                indexed_merkle_proof.merkle_proof(),\n            ),\n            \"Result did not agree with reference implementation.\",\n        );\n    }\n\n    #[test]\n    fn root_from_proof_agrees_with_recursion_2147483648_4294967297() {\n        let indexed_merkle_proof = test_indexed_merkle_proof(2147483648, 4294967297);\n        assert_eq!(\n            indexed_merkle_proof.root_hash(),\n            reference_root_from_proof(\n                indexed_merkle_proof.index,\n                indexed_merkle_proof.count,\n                indexed_merkle_proof.merkle_proof(),\n            ),\n            \"Result did not agree with reference implementation.\",\n        );\n    }\n\n    #[test]\n    fn serde_deserialization_of_malformed_proof_should_work() {\n        let indexed_merkle_proof = test_indexed_merkle_proof(10, 10);\n\n        let json = serde_json::to_string(&indexed_merkle_proof).unwrap();\n        assert_eq!(\n            indexed_merkle_proof,\n            serde_json::from_str::<IndexedMerkleProof>(&json)\n                .expect(\"should deserialize correctly\")\n        );\n\n        // Check that proof with index greater than count deserializes correctly\n        let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10);\n        indexed_merkle_proof.index += 1;\n        let json = serde_json::to_string(&indexed_merkle_proof).unwrap();\n        serde_json::from_str::<IndexedMerkleProof>(&json).expect(\"should deserialize correctly\");\n\n        // Check that proof with incorrect length deserializes correctly\n        let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10);\n        indexed_merkle_proof.merkle_proof.push(Digest::hash(\"XXX\"));\n        let json = serde_json::to_string(&indexed_merkle_proof).unwrap();\n        serde_json::from_str::<IndexedMerkleProof>(&json).expect(\"should deserialize correctly\");\n    }\n\n    #[test]\n    fn bytesrepr_deserialization_of_malformed_proof_should_work() {\n        let indexed_merkle_proof = test_indexed_merkle_proof(10, 10);\n\n        let bytes = indexed_merkle_proof\n            .to_bytes()\n            .expect(\"should serialize correctly\");\n        IndexedMerkleProof::from_bytes(&bytes).expect(\"should deserialize correctly\");\n\n        // Check that proof with index greater than count deserializes correctly\n        let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10);\n        indexed_merkle_proof.index += 1;\n        let bytes = indexed_merkle_proof\n            .to_bytes()\n            .expect(\"should serialize correctly\");\n        IndexedMerkleProof::from_bytes(&bytes).expect(\"should deserialize correctly\");\n\n        // Check that proof with incorrect length deserializes correctly\n        let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10);\n        indexed_merkle_proof.merkle_proof.push(Digest::hash(\"XXX\"));\n        let bytes = indexed_merkle_proof\n            .to_bytes()\n            .expect(\"should serialize correctly\");\n        IndexedMerkleProof::from_bytes(&bytes).expect(\"should deserialize correctly\");\n    }\n\n    #[test]\n    fn bytesrepr_serialization() {\n        let indexed_merkle_proof = random_indexed_merkle_proof();\n        bytesrepr::test_serialization_roundtrip(&indexed_merkle_proof);\n    }\n}\n"
  },
  {
    "path": "types/src/digest.rs",
    "content": "//! Contains digest and merkle chunking used throughout the system.\n\nmod chunk_with_proof;\nmod error;\nmod indexed_merkle_proof;\n\nuse alloc::{collections::BTreeMap, string::String, vec::Vec};\nuse core::{\n    array::TryFromSliceError,\n    convert::{TryFrom, TryInto},\n    fmt::{self, Debug, Display, Formatter, LowerHex, UpperHex},\n};\n\nuse blake2::{\n    digest::{Update, VariableOutput},\n    VarBlake2b,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\nuse itertools::Itertools;\n#[cfg(feature = \"once_cell\")]\nuse once_cell::sync::OnceCell;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    checksummed_hex, CLType, CLTyped,\n};\npub use chunk_with_proof::ChunkWithProof;\npub use error::{\n    ChunkWithProofVerificationError, Error as DigestError, MerkleConstructionError,\n    MerkleVerificationError,\n};\npub use indexed_merkle_proof::IndexedMerkleProof;\n\n/// The output of the hash function.\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded hash digest.\")\n)]\npub struct Digest(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))]\n    pub(super)  [u8; Digest::LENGTH],\n);\n\nconst CHUNK_DATA_ZEROED: &[u8] = &[0u8; ChunkWithProof::CHUNK_SIZE_BYTES];\n\nimpl Digest {\n    /// The number of bytes in a `Digest`.\n    pub const LENGTH: usize = 32;\n\n    /// Sentinel hash to be used for hashing options in the case of `None`.\n    pub const SENTINEL_NONE: Digest = Digest([0u8; Digest::LENGTH]);\n    /// Sentinel hash to be used by `hash_slice_rfold`. Terminates the fold.\n    pub const SENTINEL_RFOLD: Digest = Digest([1u8; Digest::LENGTH]);\n    /// Sentinel hash to be used by `hash_merkle_tree` in the case of an empty list.\n    pub const SENTINEL_MERKLE_TREE: Digest = Digest([2u8; Digest::LENGTH]);\n\n    /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data.\n    #[inline(always)]\n    pub fn hash<T: AsRef<[u8]>>(data: T) -> Digest {\n        Self::blake2b_hash(data)\n    }\n\n    /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data\n    pub(crate) fn blake2b_hash<T: AsRef<[u8]>>(data: T) -> Digest {\n        let mut ret = [0u8; Digest::LENGTH];\n        // NOTE: Safe to unwrap here because our digest length is constant and valid\n        let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap();\n        hasher.update(data);\n        hasher.finalize_variable(|hash| ret.clone_from_slice(hash));\n        Digest(ret)\n    }\n\n    /// Hashes a pair of byte slices.\n    pub fn hash_pair<T: AsRef<[u8]>, U: AsRef<[u8]>>(data1: T, data2: U) -> Digest {\n        let mut result = [0; Digest::LENGTH];\n        let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap();\n        hasher.update(data1);\n        hasher.update(data2);\n        hasher.finalize_variable(|slice| {\n            result.copy_from_slice(slice);\n        });\n        Digest(result)\n    }\n\n    /// Hashes a raw Merkle root and leaf count to firm the final Merkle hash.\n    ///\n    /// To avoid pre-image attacks, the final hash that is based upon the number of leaves in the\n    /// Merkle tree and the root hash is prepended with a padding to ensure it is longer than the\n    /// actual chunk size.\n    ///\n    /// Without this feature, an attacker could construct an item that is only a few bytes long but\n    /// hashes to the same value as a much longer, chunked item by hashing `(len || root hash of\n    /// longer item's Merkle tree root)`.\n    ///\n    /// This function computes the correct final hash by ensuring the hasher used has been\n    /// initialized with padding before.\n    ///\n    /// With `once_cell` feature enabled (generally done by enabling `std` feature), for efficiency\n    /// reasons it uses a memoized hasher state computed on first run and cloned afterwards.\n    fn hash_merkle_root(leaf_count: u64, root: Digest) -> Digest {\n        #[cfg(feature = \"once_cell\")]\n        static PAIR_PREFIX_HASHER: OnceCell<VarBlake2b> = OnceCell::new();\n\n        let mut result = [0; Digest::LENGTH];\n        let get_hasher = || {\n            let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap();\n            hasher.update(CHUNK_DATA_ZEROED);\n            hasher\n        };\n        #[cfg(feature = \"once_cell\")]\n        let mut hasher = PAIR_PREFIX_HASHER.get_or_init(get_hasher).clone();\n        #[cfg(not(feature = \"once_cell\"))]\n        let mut hasher = get_hasher();\n\n        hasher.update(leaf_count.to_le_bytes());\n        hasher.update(root);\n        hasher.finalize_variable(|slice| {\n            result.copy_from_slice(slice);\n        });\n        Digest(result)\n    }\n\n    /// Returns the underlying BLAKE2b hash bytes\n    pub fn value(&self) -> [u8; Digest::LENGTH] {\n        self.0\n    }\n\n    /// Converts the underlying BLAKE2b hash digest array to a `Vec`\n    pub fn into_vec(self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    /// Hashes an `impl IntoIterator` of [`Digest`]s into a single [`Digest`] by\n    /// constructing a [Merkle tree][1]. Reduces pairs of elements in the collection by repeatedly\n    /// calling [Digest::hash_pair].\n    ///\n    /// The pattern of hashing is as follows. It is akin to [graph reduction][2]:\n    ///\n    /// ```text\n    /// 1 2 4 5 8 9\n    /// │ │ │ │ │ │\n    /// └─3 └─6 └─10\n    ///   │   │   │\n    ///   └───7   │\n    ///       │   │\n    ///       └───11\n    /// ```\n    ///\n    /// Finally hashes the number of elements with the resulting hash. In the example above the\n    /// final output would be `hash_pair(6_u64.to_le_bytes(), l)`.\n    ///\n    /// Returns [`Digest::SENTINEL_MERKLE_TREE`] when the input is empty.\n    ///\n    /// [1]: https://en.wikipedia.org/wiki/Merkle_tree\n    /// [2]: https://en.wikipedia.org/wiki/Graph_reduction\n    pub fn hash_merkle_tree<I>(leaves: I) -> Digest\n    where\n        I: IntoIterator<Item = Digest>,\n        I::IntoIter: ExactSizeIterator,\n    {\n        let leaves = leaves.into_iter();\n        let leaf_count = leaves.len() as u64;\n\n        leaves.tree_fold1(Digest::hash_pair).map_or_else(\n            || Digest::SENTINEL_MERKLE_TREE,\n            |raw_root| Digest::hash_merkle_root(leaf_count, raw_root),\n        )\n    }\n\n    /// Hashes a `BTreeMap`.\n    pub fn hash_btree_map<K, V>(btree_map: &BTreeMap<K, V>) -> Result<Digest, bytesrepr::Error>\n    where\n        K: ToBytes,\n        V: ToBytes,\n    {\n        let mut kv_hashes: Vec<Digest> = Vec::with_capacity(btree_map.len());\n        for (key, value) in btree_map.iter() {\n            kv_hashes.push(Digest::hash_pair(\n                Digest::hash(key.to_bytes()?),\n                Digest::hash(value.to_bytes()?),\n            ))\n        }\n        Ok(Self::hash_merkle_tree(kv_hashes))\n    }\n\n    /// Hashes a `&[Digest]` using a [right fold][1].\n    ///\n    /// This pattern of hashing is as follows:\n    ///\n    /// ```text\n    /// hash_pair(a, &hash_pair(b, &hash_pair(c, &SENTINEL_RFOLD)))\n    /// ```\n    ///\n    /// Unlike Merkle trees, this is suited to hashing heterogeneous lists we may wish to extend in\n    /// the future (ie, hashes of data structures that may undergo revision).\n    ///\n    /// Returns [`Digest::SENTINEL_RFOLD`] when given an empty slice as input.\n    ///\n    /// [1]: https://en.wikipedia.org/wiki/Fold_(higher-order_function)#Linear_folds\n    pub fn hash_slice_rfold(slice: &[Digest]) -> Digest {\n        Self::hash_slice_with_proof(slice, Self::SENTINEL_RFOLD)\n    }\n\n    /// Hashes a `&[Digest]` using a [right fold][1]. Uses `proof` as a Merkle proof for the\n    /// missing tail of the slice.\n    ///\n    /// [1]: https://en.wikipedia.org/wiki/Fold_(higher-order_function)#Linear_folds\n    pub fn hash_slice_with_proof(slice: &[Digest], proof: Digest) -> Digest {\n        slice\n            .iter()\n            .rfold(proof, |prev, next| Digest::hash_pair(next, prev))\n    }\n\n    /// Returns a `Digest` parsed from a hex-encoded `Digest`.\n    pub fn from_hex<T: AsRef<[u8]>>(hex_input: T) -> Result<Self, DigestError> {\n        let bytes = checksummed_hex::decode(&hex_input).map_err(DigestError::Base16DecodeError)?;\n        let slice: [u8; Self::LENGTH] = bytes\n            .try_into()\n            .map_err(|_| DigestError::IncorrectDigestLength(hex_input.as_ref().len()))?;\n        Ok(Digest(slice))\n    }\n\n    /// Hash data into chunks if necessary.\n    pub fn hash_into_chunks_if_necessary(bytes: &[u8]) -> Digest {\n        if bytes.len() <= ChunkWithProof::CHUNK_SIZE_BYTES {\n            Digest::blake2b_hash(bytes)\n        } else {\n            Digest::hash_merkle_tree(\n                bytes\n                    .chunks(ChunkWithProof::CHUNK_SIZE_BYTES)\n                    .map(Digest::blake2b_hash),\n            )\n        }\n    }\n\n    /// Returns a new `Digest` directly initialized with the provided bytes; no hashing is done.\n    ///\n    /// This is equivalent to `Digest::from`, but is a const function.\n    pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self {\n        Digest(raw_digest)\n    }\n\n    /// Returns a random `Digest`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Digest(rng.gen())\n    }\n}\n\nimpl CLTyped for Digest {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(Digest::LENGTH as u32)\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<Digest> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Digest {\n        Digest(rng.gen())\n    }\n}\n\nimpl LowerHex for Digest {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        let hex_string = base16::encode_lower(&self.value());\n        if f.alternate() {\n            write!(f, \"0x{}\", hex_string)\n        } else {\n            write!(f, \"{}\", hex_string)\n        }\n    }\n}\n\nimpl UpperHex for Digest {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        let hex_string = base16::encode_upper(&self.value());\n        if f.alternate() {\n            write!(f, \"0x{}\", hex_string)\n        } else {\n            write!(f, \"{}\", hex_string)\n        }\n    }\n}\n\nimpl Display for Digest {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{:10}\", HexFmt(&self.0))\n    }\n}\n\nimpl Debug for Digest {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl From<[u8; Digest::LENGTH]> for Digest {\n    fn from(arr: [u8; Digest::LENGTH]) -> Self {\n        Digest(arr)\n    }\n}\n\nimpl TryFrom<&[u8]> for Digest {\n    type Error = TryFromSliceError;\n\n    fn try_from(slice: &[u8]) -> Result<Digest, Self::Error> {\n        <[u8; Digest::LENGTH]>::try_from(slice).map(Digest)\n    }\n}\n\nimpl AsRef<[u8]> for Digest {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl From<Digest> for [u8; Digest::LENGTH] {\n    fn from(hash: Digest) -> Self {\n        hash.0\n    }\n}\n\nimpl ToBytes for Digest {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.extend_from_slice(&self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for Digest {\n    #[inline(always)]\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        FromBytes::from_bytes(bytes).map(|(arr, rem)| (Digest(arr), rem))\n    }\n}\n\nimpl Serialize for Digest {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            base16::encode_lower(&self.0).serialize(serializer)\n        } else {\n            // This is to keep backwards compatibility with how HexForm encodes\n            // byte arrays. HexForm treats this like a slice.\n            self.0[..].serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for Digest {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let hex_string = String::deserialize(deserializer)?;\n            let bytes =\n                checksummed_hex::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?;\n            let data =\n                <[u8; Digest::LENGTH]>::try_from(bytes.as_ref()).map_err(SerdeError::custom)?;\n            Ok(Digest::from(data))\n        } else {\n            let data = <Vec<u8>>::deserialize(deserializer)?;\n            Digest::try_from(data.as_slice()).map_err(D::Error::custom)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::BTreeMap, iter};\n\n    use proptest_attr_macro::proptest;\n\n    use super::Digest;\n\n    use crate::{\n        bytesrepr::{self, ToBytes},\n        ChunkWithProof,\n    };\n\n    #[proptest]\n    fn bytesrepr_roundtrip(hash: [u8; Digest::LENGTH]) {\n        let digest = Digest(hash);\n        bytesrepr::test_serialization_roundtrip(&digest);\n    }\n\n    #[proptest]\n    fn serde_roundtrip(hash: [u8; Digest::LENGTH]) {\n        let preser_digest = Digest(hash);\n        let serialized = serde_json::to_string(&preser_digest).unwrap();\n        let deser_digest: Digest = serde_json::from_str(&serialized).unwrap();\n        assert_eq!(preser_digest, deser_digest);\n    }\n\n    #[test]\n    fn serde_custom_serialization() {\n        let serialized = serde_json::to_string(&Digest::SENTINEL_RFOLD).unwrap();\n        let expected = format!(\"\\\"{:?}\\\"\", Digest::SENTINEL_RFOLD);\n        assert_eq!(expected, serialized);\n    }\n\n    #[test]\n    fn hash_known() {\n        // Data of length less or equal to [ChunkWithProof::CHUNK_SIZE_BYTES]\n        // are hashed using Blake2B algorithm.\n        // Larger data are chunked and Merkle tree hash is calculated.\n        //\n        // Please note that [ChunkWithProof::CHUNK_SIZE_BYTES] is `test` configuration\n        // is smaller than in production, to allow testing with more chunks\n        // with still reasonable time and memory consumption.\n        //\n        // See: [Digest::hash]\n        let inputs_and_digests = [\n            (\n                \"\",\n                \"0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8\",\n            ),\n            (\n                \"abc\",\n                \"bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319\",\n            ),\n            (\n                \"0123456789\",\n                \"7b6cb8d374484e221785288b035dc53fc9ddf000607f473fc2a3258d89a70398\",\n            ),\n            (\n                \"01234567890\",\n                \"3d199478c18b7fe3ca1f4f2a9b3e07f708ff66ed52eb345db258abe8a812ed5c\",\n            ),\n            (\n                \"The quick brown fox jumps over the lazy dog\",\n                \"01718cec35cd3d796dd00020e0bfecb473ad23457d063b75eff29c0ffa2e58a9\",\n            ),\n        ];\n        for (known_input, expected_digest) in &inputs_and_digests {\n            let known_input: &[u8] = known_input.as_ref();\n            assert_eq!(*expected_digest, format!(\"{:?}\", Digest::hash(known_input)));\n        }\n    }\n\n    #[test]\n    fn from_valid_hex_should_succeed() {\n        for char in \"abcdefABCDEF0123456789\".chars() {\n            let input: String = iter::repeat(char).take(64).collect();\n            assert!(Digest::from_hex(input).is_ok());\n        }\n    }\n\n    #[test]\n    fn from_hex_invalid_length_should_fail() {\n        for len in &[2_usize, 62, 63, 65, 66] {\n            let input: String = \"f\".repeat(*len);\n            assert!(Digest::from_hex(input).is_err());\n        }\n    }\n\n    #[test]\n    fn from_hex_invalid_char_should_fail() {\n        for char in \"g %-\".chars() {\n            let input: String = iter::repeat('f').take(63).chain(iter::once(char)).collect();\n            assert!(Digest::from_hex(input).is_err());\n        }\n    }\n\n    #[test]\n    fn should_display_digest_in_hex() {\n        let hash = Digest([0u8; 32]);\n        let hash_hex = format!(\"{:?}\", hash);\n        assert_eq!(\n            hash_hex,\n            \"0000000000000000000000000000000000000000000000000000000000000000\"\n        );\n    }\n\n    #[test]\n    fn should_print_digest_lower_hex() {\n        let hash = Digest([10u8; 32]);\n        let hash_lower_hex = format!(\"{:x}\", hash);\n        assert_eq!(\n            hash_lower_hex,\n            \"0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a\"\n        )\n    }\n\n    #[test]\n    fn should_print_digest_upper_hex() {\n        let hash = Digest([10u8; 32]);\n        let hash_upper_hex = format!(\"{:X}\", hash);\n        assert_eq!(\n            hash_upper_hex,\n            \"0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A\"\n        )\n    }\n\n    #[test]\n    fn alternate_should_prepend_0x() {\n        let hash = Digest([0u8; 32]);\n        let hash_hex_alt = format!(\"{:#x}\", hash);\n        assert_eq!(\n            hash_hex_alt,\n            \"0x0000000000000000000000000000000000000000000000000000000000000000\"\n        )\n    }\n\n    #[test]\n    fn test_hash_pair() {\n        let hash1 = Digest([1u8; 32]);\n        let hash2 = Digest([2u8; 32]);\n\n        let hash = Digest::hash_pair(hash1, hash2);\n        let hash_lower_hex = format!(\"{:x}\", hash);\n\n        assert_eq!(\n            hash_lower_hex,\n            \"30b600fb1f0cc0b3f0fc28cdcb7389405a6659be81c7d5c5905725aa3a5119ce\"\n        );\n    }\n\n    #[test]\n    fn test_hash_rfold() {\n        let hashes = [\n            Digest([1u8; 32]),\n            Digest([2u8; 32]),\n            Digest([3u8; 32]),\n            Digest([4u8; 32]),\n            Digest([5u8; 32]),\n        ];\n\n        let hash = Digest::hash_slice_rfold(&hashes[..]);\n        let hash_lower_hex = format!(\"{:x}\", hash);\n\n        assert_eq!(\n            hash_lower_hex,\n            \"e137f4eb94d2387065454eecfe2cdb5584e3dbd5f1ca07fc511fffd13d234e8e\"\n        );\n\n        let proof = Digest::hash_slice_rfold(&hashes[2..]);\n        let hash_proof = Digest::hash_slice_with_proof(&hashes[..2], proof);\n\n        assert_eq!(hash, hash_proof);\n    }\n\n    #[test]\n    fn test_hash_merkle_odd() {\n        let hashes = vec![\n            Digest([1u8; 32]),\n            Digest([2u8; 32]),\n            Digest([3u8; 32]),\n            Digest([4u8; 32]),\n            Digest([5u8; 32]),\n        ];\n\n        let hash = Digest::hash_merkle_tree(hashes);\n        let hash_lower_hex = format!(\"{:x}\", hash);\n\n        assert_eq!(\n            hash_lower_hex,\n            \"775cec8133b97b0e8d4e97659025d5bac4ed7c8927d1bd99cf62114df57f3e74\"\n        );\n    }\n\n    #[test]\n    fn test_hash_merkle_even() {\n        let hashes = vec![\n            Digest([1u8; 32]),\n            Digest([2u8; 32]),\n            Digest([3u8; 32]),\n            Digest([4u8; 32]),\n            Digest([5u8; 32]),\n            Digest([6u8; 32]),\n        ];\n\n        let hash = Digest::hash_merkle_tree(hashes);\n        let hash_lower_hex = format!(\"{:x}\", hash);\n\n        assert_eq!(\n            hash_lower_hex,\n            \"4bd50b08a8366b28c35bc831b95d147123bad01c29ffbf854b659c4b3ea4086c\"\n        );\n    }\n\n    #[test]\n    fn test_hash_btreemap() {\n        let mut map = BTreeMap::new();\n        let _ = map.insert(Digest([1u8; 32]), Digest([2u8; 32]));\n        let _ = map.insert(Digest([3u8; 32]), Digest([4u8; 32]));\n        let _ = map.insert(Digest([5u8; 32]), Digest([6u8; 32]));\n        let _ = map.insert(Digest([7u8; 32]), Digest([8u8; 32]));\n        let _ = map.insert(Digest([9u8; 32]), Digest([10u8; 32]));\n\n        let hash = Digest::hash_btree_map(&map).unwrap();\n        let hash_lower_hex = format!(\"{:x}\", hash);\n\n        assert_eq!(\n            hash_lower_hex,\n            \"fd1214a627473ffc6d6cc97e7012e6344d74abbf987b48cde5d0642049a0db98\"\n        );\n    }\n\n    #[test]\n    fn digest_deserialize_regression() {\n        let input = Digest([0; 32]);\n        let serialized = bincode::serialize(&input).expect(\"failed to serialize.\");\n\n        let expected = vec![\n            32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n        ];\n\n        assert_eq!(expected, serialized);\n    }\n\n    #[test]\n    fn should_assert_simple_digest_serialization_format() {\n        let digest_bytes = [0; 32];\n\n        assert_eq!(\n            Digest(digest_bytes).to_bytes().unwrap(),\n            digest_bytes.to_vec()\n        );\n    }\n\n    #[test]\n    fn merkle_roots_are_preimage_resistent() {\n        // Input data is two chunks long.\n        //\n        // The resulting tree will look like this:\n        //\n        // 1..0  a..j\n        // │     │\n        // └─────── R\n        //\n        // The Merkle root is thus: R = h( h(1..0) || h(a..j) )\n        //\n        // h(1..0) = 807f1ba73147c3a96c2d63b38dd5a5f514f66290a1436bb9821e9f2a72eff263\n        // h(a..j) = 499e1cdb476523fedafc9d9db31125e2744f271578ea95b16ab4bd1905f05fea\n        // R=h(h(1..0)||h(a..j)) = 1319394a98d0cb194f960e3748baeb2045a9ec28aa51e0d42011be43f4a91f5f\n        // h(2u64le || R) = c31f0bb6ef569354d1a26c3a51f1ad4b6d87cef7f73a290ab6be8db6a9c7d4ee\n        //\n        // The final step is to hash h(2u64le || R), which is the length as little endian\n        // concatenated with the root.\n\n        // Constants used here assume a chunk size of 10 bytes.\n        assert_eq!(ChunkWithProof::CHUNK_SIZE_BYTES, 10);\n\n        let long_data = b\"1234567890abcdefghij\";\n        assert_eq!(long_data.len(), ChunkWithProof::CHUNK_SIZE_BYTES * 2);\n\n        // The `long_data_hash` is constructed manually here, as `Digest::hash` still had\n        // deactivated chunking code at the time this test was written.\n        let long_data_hash = Digest::hash_merkle_tree(\n            long_data\n                .as_ref()\n                .chunks(ChunkWithProof::CHUNK_SIZE_BYTES)\n                .map(Digest::blake2b_hash),\n        );\n\n        // The concatenation of `2u64` in little endian + the Merkle root hash `R`. Note that this\n        // is a valid hashable object on its own.\n        let maybe_colliding_short_data = [\n            2, 0, 0, 0, 0, 0, 0, 0, 19, 25, 57, 74, 152, 208, 203, 25, 79, 150, 14, 55, 72, 186,\n            235, 32, 69, 169, 236, 40, 170, 81, 224, 212, 32, 17, 190, 67, 244, 169, 31, 95,\n        ];\n\n        // Use `blake2b_hash` to work around the issue of the chunk size being shorter than the\n        // digest length.\n        let short_data_hash = Digest::blake2b_hash(maybe_colliding_short_data);\n\n        // Ensure there is no collision. You can verify this test is correct by temporarily changing\n        // the `Digest::hash_merkle_tree` function to use the unpadded `hash_pair` function, instead\n        // of `hash_merkle_root`.\n        assert_ne!(long_data_hash, short_data_hash);\n\n        // The expected input for the root hash is the colliding data, but prefixed with a full\n        // chunk of zeros.\n        let expected_final_hash_input = [\n            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 19, 25, 57, 74, 152, 208, 203,\n            25, 79, 150, 14, 55, 72, 186, 235, 32, 69, 169, 236, 40, 170, 81, 224, 212, 32, 17,\n            190, 67, 244, 169, 31, 95,\n        ];\n        assert_eq!(\n            Digest::blake2b_hash(expected_final_hash_input),\n            long_data_hash\n        );\n\n        // Another way to specify this sanity check is to say that the short and long data should\n        // hash differently.\n        //\n        // Note: This condition is true at the time of writing this test, where chunk hashing is\n        //       disabled. It should still hold true once enabled.\n        assert_ne!(\n            Digest::hash(maybe_colliding_short_data),\n            Digest::hash(long_data)\n        );\n\n        // In a similar manner, the internal padded data should also not hash equal to either, as it\n        // should be hashed using the chunking function.\n        assert_ne!(\n            Digest::hash(maybe_colliding_short_data),\n            Digest::hash(expected_final_hash_input)\n        );\n        assert_ne!(\n            Digest::hash(long_data),\n            Digest::hash(expected_final_hash_input)\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/display_iter.rs",
    "content": "use core::{\n    cell::RefCell,\n    fmt::{self, Display, Formatter},\n};\n\n/// A helper to allow `Display` printing the items of an iterator with a comma and space between\n/// each.\n#[derive(Debug)]\npub struct DisplayIter<T>(RefCell<Option<T>>);\n\nimpl<T> DisplayIter<T> {\n    /// Returns a new `DisplayIter`.\n    pub fn new(item: T) -> Self {\n        DisplayIter(RefCell::new(Some(item)))\n    }\n}\n\nimpl<I, T> Display for DisplayIter<I>\nwhere\n    I: IntoIterator<Item = T>,\n    T: Display,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        if let Some(src) = self.0.borrow_mut().take() {\n            let mut first = true;\n            for item in src.into_iter().take(f.width().unwrap_or(usize::MAX)) {\n                if first {\n                    first = false;\n                    write!(f, \"{}\", item)?;\n                } else {\n                    write!(f, \", {}\", item)?;\n                }\n            }\n\n            Ok(())\n        } else {\n            write!(f, \"DisplayIter:GONE\")\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/era_id.rs",
    "content": "use alloc::vec::Vec;\nuse core::{\n    fmt::{self, Debug, Display, Formatter},\n    num::ParseIntError,\n    ops::{Add, AddAssign, Sub},\n    str::FromStr,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped,\n};\n\n/// Era ID newtype.\n#[derive(\n    Debug, Default, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"testing\", derive(proptest_derive::Arbitrary))]\n#[serde(deny_unknown_fields)]\npub struct EraId(u64);\n\nimpl EraId {\n    /// Maximum possible value an [`EraId`] can hold.\n    pub const MAX: EraId = EraId(u64::MAX);\n\n    /// Creates new [`EraId`] instance.\n    pub const fn new(value: u64) -> EraId {\n        EraId(value)\n    }\n\n    /// Returns an iterator over era IDs of `num_eras` future eras starting from current.\n    pub fn iter(&self, num_eras: u64) -> impl Iterator<Item = EraId> {\n        let current_era_id = self.0;\n        (current_era_id..current_era_id + num_eras).map(EraId)\n    }\n\n    /// Returns an iterator over era IDs of `num_eras` future eras starting from current, plus the\n    /// provided one.\n    pub fn iter_inclusive(&self, num_eras: u64) -> impl Iterator<Item = EraId> {\n        let current_era_id = self.0;\n        (current_era_id..=current_era_id + num_eras).map(EraId)\n    }\n\n    /// Returns an iterator over a range of era IDs, starting from `start` and ending at `end`,\n    /// inclusive.\n    pub fn iter_range_inclusive(\n        start: EraId,\n        end: EraId,\n    ) -> impl DoubleEndedIterator<Item = EraId> {\n        (start.0..=end.0).map(EraId)\n    }\n\n    /// Increments the era.\n    ///\n    /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and\n    /// that era number will never be reached in practice.\n    pub fn increment(&mut self) {\n        self.0 = self.0.saturating_add(1);\n    }\n\n    /// Returns a successor to current era.\n    ///\n    /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and\n    /// that era number will never be reached in practice.\n    #[must_use]\n    pub fn successor(self) -> EraId {\n        EraId::from(self.0.saturating_add(1))\n    }\n\n    /// Returns the predecessor to current era, or `None` if genesis.\n    #[must_use]\n    pub fn predecessor(self) -> Option<EraId> {\n        self.0.checked_sub(1).map(EraId)\n    }\n\n    /// Returns the current era plus `x`, or `None` if that would overflow\n    pub fn checked_add(&self, x: u64) -> Option<EraId> {\n        self.0.checked_add(x).map(EraId)\n    }\n\n    /// Returns the current era minus `x`, or `None` if that would be less than `0`.\n    pub fn checked_sub(&self, x: u64) -> Option<EraId> {\n        self.0.checked_sub(x).map(EraId)\n    }\n\n    /// Returns the current era minus `x`, or `0` if that would be less than `0`.\n    #[must_use]\n    pub fn saturating_sub(&self, x: u64) -> EraId {\n        EraId::from(self.0.saturating_sub(x))\n    }\n\n    /// Returns the current era plus `x`, or [`EraId::MAX`] if overflow would occur.\n    #[must_use]\n    pub fn saturating_add(self, rhs: u64) -> EraId {\n        EraId(self.0.saturating_add(rhs))\n    }\n\n    /// Returns the current era times `x`, or [`EraId::MAX`] if overflow would occur.\n    #[must_use]\n    pub fn saturating_mul(&self, x: u64) -> EraId {\n        EraId::from(self.0.saturating_mul(x))\n    }\n\n    /// Returns whether this is era 0.\n    pub fn is_genesis(&self) -> bool {\n        self.0 == 0\n    }\n\n    /// Returns little endian bytes.\n    pub fn to_le_bytes(self) -> [u8; 8] {\n        self.0.to_le_bytes()\n    }\n\n    /// Returns a raw value held by this [`EraId`] instance.\n    ///\n    /// You should prefer [`From`] trait implementations over this method where possible.\n    pub fn value(self) -> u64 {\n        self.0\n    }\n\n    /// Returns a random `EraId`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        EraId(rng.gen_range(0..1_000_000))\n    }\n}\n\nimpl FromStr for EraId {\n    type Err = ParseIntError;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        u64::from_str(s).map(EraId)\n    }\n}\n\nimpl Add<u64> for EraId {\n    type Output = EraId;\n\n    #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow.\n    fn add(self, x: u64) -> EraId {\n        EraId::from(self.0 + x)\n    }\n}\n\nimpl AddAssign<u64> for EraId {\n    fn add_assign(&mut self, x: u64) {\n        self.0 += x;\n    }\n}\n\nimpl Sub<u64> for EraId {\n    type Output = EraId;\n\n    #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow.\n    fn sub(self, x: u64) -> EraId {\n        EraId::from(self.0 - x)\n    }\n}\n\nimpl Display for EraId {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"era {}\", self.0)\n    }\n}\n\nimpl From<EraId> for u64 {\n    fn from(era_id: EraId) -> Self {\n        era_id.value()\n    }\n}\n\nimpl From<u64> for EraId {\n    fn from(era_id: u64) -> Self {\n        EraId(era_id)\n    }\n}\n\nimpl ToBytes for EraId {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for EraId {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (id_value, remainder) = u64::from_bytes(bytes)?;\n        let era_id = EraId::from(id_value);\n        Ok((era_id, remainder))\n    }\n}\n\nimpl CLTyped for EraId {\n    fn cl_type() -> CLType {\n        CLType::U64\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::prelude::*;\n\n    use super::*;\n    use crate::gens::era_id_arb;\n\n    #[test]\n    fn should_calculate_correct_inclusive_future_eras() {\n        let auction_delay = 3;\n\n        let current_era = EraId::from(42);\n\n        let window: Vec<EraId> = current_era.iter_inclusive(auction_delay).collect();\n        assert_eq!(window.len(), auction_delay as usize + 1);\n        assert_eq!(window.first(), Some(&current_era));\n        assert_eq!(\n            window.iter().next_back(),\n            Some(&(current_era + auction_delay))\n        );\n    }\n\n    #[test]\n    fn should_have_valid_genesis_era_id() {\n        let expected_initial_era_id = EraId::from(0);\n        assert!(expected_initial_era_id.is_genesis());\n        assert!(!expected_initial_era_id.successor().is_genesis())\n    }\n\n    #[test]\n    fn should_increment_era_id() {\n        let mut era = EraId::from(0);\n        assert!(era.is_genesis());\n        era.increment();\n        assert_eq!(era.value(), 1, \"should have incremented to 1\");\n    }\n\n    proptest! {\n        #[test]\n        fn bytesrepr_roundtrip(era_id in era_id_arb()) {\n            bytesrepr::test_serialization_roundtrip(&era_id);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/execution/effects.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse super::TransformKindV2;\nuse super::TransformV2;\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// A log of all transforms produced during execution.\n#[derive(Debug, Clone, Eq, Default, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Effects(Vec<TransformV2>);\n\nimpl Effects {\n    /// Constructs a new, empty `Effects`.\n    pub const fn new() -> Self {\n        Effects(vec![])\n    }\n\n    /// Returns a reference to the transforms.\n    pub fn transforms(&self) -> &[TransformV2] {\n        &self.0\n    }\n\n    /// Appends a transform.\n    pub fn push(&mut self, transform: TransformV2) {\n        self.0.push(transform)\n    }\n\n    /// Moves all elements from `other` into `self`.\n    pub fn append(&mut self, mut other: Self) {\n        self.0.append(&mut other.0);\n    }\n\n    /// Returns `true` if there are no transforms recorded.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Returns the number of transforms recorded.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Consumes `self`, returning the wrapped vec.\n    pub fn value(self) -> Vec<TransformV2> {\n        self.0\n    }\n\n    /// Returns a random `Effects`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random<R: Rng + ?Sized>(rng: &mut R) -> Self {\n        let mut effects = Effects::new();\n        let transform_count = rng.gen_range(0..6);\n        for _ in 0..transform_count {\n            effects.push(TransformV2::new(rng.gen(), TransformKindV2::random(rng)));\n        }\n        effects\n    }\n}\n\nimpl ToBytes for Effects {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for Effects {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (transforms, remainder) = Vec::<TransformV2>::from_bytes(bytes)?;\n        Ok((Effects(transforms), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::testing::TestRng;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let effects = Effects::random(rng);\n        bytesrepr::test_serialization_roundtrip(&effects);\n    }\n}\n"
  },
  {
    "path": "types/src/execution/execution_result.rs",
    "content": "use alloc::{boxed::Box, string::String, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::distributions::Distribution;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse tracing::error;\n\nuse super::{ExecutionResultV1, ExecutionResultV2};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Transfer, U512,\n};\n\nconst V1_TAG: u8 = 0;\nconst V2_TAG: u8 = 1;\n\n/// The versioned result of executing a single deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum ExecutionResult {\n    /// Version 1 of execution result type.\n    #[serde(rename = \"Version1\")]\n    V1(ExecutionResultV1),\n    /// Version 2 of execution result type.\n    #[serde(rename = \"Version2\")]\n    V2(Box<ExecutionResultV2>),\n}\n\nimpl ExecutionResult {\n    /// Returns cost.\n    pub fn cost(&self) -> U512 {\n        match self {\n            ExecutionResult::V1(result) => result.cost(),\n            ExecutionResult::V2(result) => result.cost,\n        }\n    }\n\n    /// Returns consumed amount.\n    pub fn consumed(&self) -> U512 {\n        match self {\n            ExecutionResult::V1(result) => result.cost(),\n            ExecutionResult::V2(result) => result.consumed.value(),\n        }\n    }\n\n    /// Returns refund amount.\n    pub fn refund(&self) -> Option<U512> {\n        match self {\n            ExecutionResult::V1(_) => None,\n            ExecutionResult::V2(result) => Some(result.refund),\n        }\n    }\n\n    /// Returns a random ExecutionResult.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        if rng.gen_bool(0.5) {\n            Self::V1(rand::distributions::Standard.sample(rng))\n        } else {\n            Self::V2(Box::new(ExecutionResultV2::random(rng)))\n        }\n    }\n\n    /// Returns the error message, if any.\n    pub fn error_message(&self) -> Option<String> {\n        match self {\n            ExecutionResult::V1(v1) => match v1 {\n                ExecutionResultV1::Failure { error_message, .. } => Some(error_message.clone()),\n                ExecutionResultV1::Success { .. } => None,\n            },\n            ExecutionResult::V2(v2) => v2.error_message.clone(),\n        }\n    }\n\n    /// Returns transfers, if any.\n    pub fn transfers(&self) -> Vec<Transfer> {\n        match self {\n            ExecutionResult::V1(_) => {\n                vec![]\n            }\n            ExecutionResult::V2(execution_result) => execution_result.transfers.clone(),\n        }\n    }\n}\n\nimpl From<ExecutionResultV1> for ExecutionResult {\n    fn from(value: ExecutionResultV1) -> Self {\n        ExecutionResult::V1(value)\n    }\n}\n\nimpl From<ExecutionResultV2> for ExecutionResult {\n    fn from(value: ExecutionResultV2) -> Self {\n        ExecutionResult::V2(Box::new(value))\n    }\n}\n\nimpl ToBytes for ExecutionResult {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                ExecutionResult::V1(result) => result.serialized_length(),\n                ExecutionResult::V2(result) => result.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            ExecutionResult::V1(result) => {\n                V1_TAG.write_bytes(writer)?;\n                result.write_bytes(writer)\n            }\n            ExecutionResult::V2(result) => {\n                V2_TAG.write_bytes(writer)?;\n                result.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for ExecutionResult {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        if bytes.is_empty() {\n            error!(\"FromBytes for ExecutionResult: bytes length should not be 0\");\n        }\n        let (tag, remainder) = match u8::from_bytes(bytes) {\n            Ok((tag, rem)) => (tag, rem),\n            Err(err) => {\n                error!(%err, \"FromBytes for ExecutionResult\");\n                return Err(err);\n            }\n        };\n        match tag {\n            V1_TAG => {\n                let (result, remainder) = ExecutionResultV1::from_bytes(remainder)?;\n                Ok((ExecutionResult::V1(result), remainder))\n            }\n            V2_TAG => {\n                let (result, remainder) = ExecutionResultV2::from_bytes(remainder)?;\n                Ok((ExecutionResult::V2(Box::new(result)), remainder))\n            }\n            _ => {\n                error!(%tag, rem_len = remainder.len(), \"FromBytes for ExecutionResult: unknown tag\");\n                Err(bytesrepr::Error::Formatting)\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let execution_result = ExecutionResult::V1(rng.gen());\n        bytesrepr::test_serialization_roundtrip(&execution_result);\n        let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng));\n        bytesrepr::test_serialization_roundtrip(&execution_result);\n    }\n\n    #[test]\n    fn bincode_roundtrip() {\n        let rng = &mut TestRng::new();\n        let execution_result = ExecutionResult::V1(rng.gen());\n        let serialized = bincode::serialize(&execution_result).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(execution_result, deserialized);\n\n        let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng));\n        let serialized = bincode::serialize(&execution_result).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(execution_result, deserialized);\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let rng = &mut TestRng::new();\n        let execution_result = ExecutionResult::V1(rng.gen());\n        let serialized = serde_json::to_string(&execution_result).unwrap();\n        let deserialized = serde_json::from_str(&serialized).unwrap();\n        assert_eq!(execution_result, deserialized);\n\n        let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng));\n        let serialized = serde_json::to_string(&execution_result).unwrap();\n        println!(\"{:#}\", serialized);\n        let deserialized = serde_json::from_str(&serialized).unwrap();\n        assert_eq!(execution_result, deserialized);\n    }\n}\n"
  },
  {
    "path": "types/src/execution/execution_result_v1.rs",
    "content": "//! Types for reporting results of execution pre `casper-node` v2.0.0.\n\nuse core::convert::TryFrom;\n\nuse alloc::{boxed::Box, string::String, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num::{FromPrimitive, ToPrimitive};\nuse num_derive::{FromPrimitive, ToPrimitive};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    seq::SliceRandom,\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    system::auction::{Bid, BidKind, EraInfo, UnbondingPurse, WithdrawPurse},\n    CLValue, DeployInfo, Key, TransferAddr, TransferV1, U128, U256, U512,\n};\n\n#[derive(FromPrimitive, ToPrimitive, Debug)]\n#[repr(u8)]\nenum ExecutionResultTag {\n    Failure = 0,\n    Success = 1,\n}\n\nimpl TryFrom<u8> for ExecutionResultTag {\n    type Error = bytesrepr::Error;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting)\n    }\n}\n\n#[derive(FromPrimitive, ToPrimitive, Debug)]\n#[repr(u8)]\nenum OpTag {\n    Read = 0,\n    Write = 1,\n    Add = 2,\n    NoOp = 3,\n    Prune = 4,\n}\n\nimpl TryFrom<u8> for OpTag {\n    type Error = bytesrepr::Error;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting)\n    }\n}\n\n#[derive(FromPrimitive, ToPrimitive, Debug)]\n#[repr(u8)]\nenum TransformTag {\n    Identity = 0,\n    WriteCLValue = 1,\n    WriteAccount = 2,\n    WriteByteCode = 3,\n    WriteContract = 4,\n    WritePackage = 5,\n    WriteDeployInfo = 6,\n    WriteTransfer = 7,\n    WriteEraInfo = 8,\n    WriteBid = 9,\n    WriteWithdraw = 10,\n    AddInt32 = 11,\n    AddUInt64 = 12,\n    AddUInt128 = 13,\n    AddUInt256 = 14,\n    AddUInt512 = 15,\n    AddKeys = 16,\n    Failure = 17,\n    WriteUnbonding = 18,\n    WriteAddressableEntity = 19,\n    Prune = 20,\n    WriteBidKind = 21,\n}\n\nimpl TryFrom<u8> for TransformTag {\n    type Error = bytesrepr::Error;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting)\n    }\n}\n\n/// The result of executing a single deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum ExecutionResultV1 {\n    /// The result of a failed execution.\n    Failure {\n        /// The effect of executing the deploy.\n        effect: ExecutionEffect,\n        /// A record of version 1 Transfers performed while executing the deploy.\n        transfers: Vec<TransferAddr>,\n        /// The cost of executing the deploy.\n        cost: U512,\n        /// The error message associated with executing the deploy.\n        error_message: String,\n    },\n    /// The result of a successful execution.\n    Success {\n        /// The effect of executing the deploy.\n        effect: ExecutionEffect,\n        /// A record of Transfers performed while executing the deploy.\n        transfers: Vec<TransferAddr>,\n        /// The cost of executing the deploy.\n        cost: U512,\n    },\n}\n\nimpl ExecutionResultV1 {\n    /// Returns cost amount.\n    pub fn cost(&self) -> U512 {\n        match self {\n            ExecutionResultV1::Failure { cost, .. } | ExecutionResultV1::Success { cost, .. } => {\n                *cost\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ExecutionResultV1> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ExecutionResultV1 {\n        let op_count = rng.gen_range(0..6);\n        let mut operations = Vec::new();\n        for _ in 0..op_count {\n            let op = [OpKind::Read, OpKind::Add, OpKind::NoOp, OpKind::Write]\n                .choose(rng)\n                .unwrap();\n            operations.push(Operation {\n                key: rng.gen::<u64>().to_string(),\n                kind: *op,\n            });\n        }\n\n        let transform_count = rng.gen_range(0..6);\n        let mut transforms = Vec::new();\n        for _ in 0..transform_count {\n            transforms.push(TransformV1 {\n                key: rng.gen::<u64>().to_string(),\n                transform: rng.gen(),\n            });\n        }\n\n        let execution_effect = ExecutionEffect {\n            operations,\n            transforms,\n        };\n\n        let transfer_count = rng.gen_range(0..6);\n        let mut transfers = Vec::new();\n        for _ in 0..transfer_count {\n            transfers.push(TransferAddr::new(rng.gen()))\n        }\n\n        if rng.gen() {\n            ExecutionResultV1::Failure {\n                effect: execution_effect,\n                transfers,\n                cost: rng.gen::<u64>().into(),\n                error_message: format!(\"Error message {}\", rng.gen::<u64>()),\n            }\n        } else {\n            ExecutionResultV1::Success {\n                effect: execution_effect,\n                transfers,\n                cost: rng.gen::<u64>().into(),\n            }\n        }\n    }\n}\n\nimpl ToBytes for ExecutionResultV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            ExecutionResultV1::Failure {\n                effect,\n                transfers,\n                cost,\n                error_message,\n            } => {\n                (ExecutionResultTag::Failure as u8).write_bytes(writer)?;\n                effect.write_bytes(writer)?;\n                transfers.write_bytes(writer)?;\n                cost.write_bytes(writer)?;\n                error_message.write_bytes(writer)\n            }\n            ExecutionResultV1::Success {\n                effect,\n                transfers,\n                cost,\n            } => {\n                (ExecutionResultTag::Success as u8).write_bytes(writer)?;\n                effect.write_bytes(writer)?;\n                transfers.write_bytes(writer)?;\n                cost.write_bytes(writer)\n            }\n        }\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                ExecutionResultV1::Failure {\n                    effect,\n                    transfers,\n                    cost,\n                    error_message,\n                } => {\n                    effect.serialized_length()\n                        + transfers.serialized_length()\n                        + cost.serialized_length()\n                        + error_message.serialized_length()\n                }\n                ExecutionResultV1::Success {\n                    effect,\n                    transfers,\n                    cost,\n                } => {\n                    effect.serialized_length()\n                        + transfers.serialized_length()\n                        + cost.serialized_length()\n                }\n            }\n    }\n}\n\nimpl FromBytes for ExecutionResultV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match TryFrom::try_from(tag)? {\n            ExecutionResultTag::Failure => {\n                let (effect, remainder) = ExecutionEffect::from_bytes(remainder)?;\n                let (transfers, remainder) = Vec::<TransferAddr>::from_bytes(remainder)?;\n                let (cost, remainder) = U512::from_bytes(remainder)?;\n                let (error_message, remainder) = String::from_bytes(remainder)?;\n                let execution_result = ExecutionResultV1::Failure {\n                    effect,\n                    transfers,\n                    cost,\n                    error_message,\n                };\n                Ok((execution_result, remainder))\n            }\n            ExecutionResultTag::Success => {\n                let (execution_effect, remainder) = ExecutionEffect::from_bytes(remainder)?;\n                let (transfers, remainder) = Vec::<TransferAddr>::from_bytes(remainder)?;\n                let (cost, remainder) = U512::from_bytes(remainder)?;\n                let execution_result = ExecutionResultV1::Success {\n                    effect: execution_effect,\n                    transfers,\n                    cost,\n                };\n                Ok((execution_result, remainder))\n            }\n        }\n    }\n}\n\n/// The sequence of execution transforms from a single deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct ExecutionEffect {\n    /// The resulting operations.\n    pub operations: Vec<Operation>,\n    /// The sequence of execution transforms.\n    pub transforms: Vec<TransformV1>,\n}\n\nimpl ToBytes for ExecutionEffect {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.operations.write_bytes(writer)?;\n        self.transforms.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.operations.serialized_length() + self.transforms.serialized_length()\n    }\n}\n\nimpl FromBytes for ExecutionEffect {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (operations, remainder) = Vec::<Operation>::from_bytes(bytes)?;\n        let (transforms, remainder) = Vec::<TransformV1>::from_bytes(remainder)?;\n        let json_effects = ExecutionEffect {\n            operations,\n            transforms,\n        };\n        Ok((json_effects, remainder))\n    }\n}\n\n/// An operation performed while executing a deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Operation {\n    /// The formatted string of the `Key`.\n    pub key: String,\n    /// The type of operation.\n    pub kind: OpKind,\n}\n\nimpl ToBytes for Operation {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.key.write_bytes(writer)?;\n        self.kind.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.key.serialized_length() + self.kind.serialized_length()\n    }\n}\n\nimpl FromBytes for Operation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, remainder) = String::from_bytes(bytes)?;\n        let (kind, remainder) = OpKind::from_bytes(remainder)?;\n        let operation = Operation { key, kind };\n        Ok((operation, remainder))\n    }\n}\n\n/// The type of operation performed while executing a deploy.\n#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum OpKind {\n    /// A read operation.\n    Read,\n    /// A write operation.\n    Write,\n    /// An addition.\n    Add,\n    /// An operation which has no effect.\n    NoOp,\n    /// A prune operation.\n    Prune,\n}\n\nimpl OpKind {\n    fn tag(&self) -> OpTag {\n        match self {\n            OpKind::Read => OpTag::Read,\n            OpKind::Write => OpTag::Write,\n            OpKind::Add => OpTag::Add,\n            OpKind::NoOp => OpTag::NoOp,\n            OpKind::Prune => OpTag::Prune,\n        }\n    }\n}\n\nimpl ToBytes for OpKind {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        let tag_byte = self.tag().to_u8().ok_or(bytesrepr::Error::Formatting)?;\n        tag_byte.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for OpKind {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match TryFrom::try_from(tag)? {\n            OpTag::Read => Ok((OpKind::Read, remainder)),\n            OpTag::Write => Ok((OpKind::Write, remainder)),\n            OpTag::Add => Ok((OpKind::Add, remainder)),\n            OpTag::NoOp => Ok((OpKind::NoOp, remainder)),\n            OpTag::Prune => Ok((OpKind::Prune, remainder)),\n        }\n    }\n}\n\n/// A transformation performed while executing a deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct TransformV1 {\n    /// The formatted string of the `Key`.\n    pub key: String,\n    /// The transformation.\n    pub transform: TransformKindV1,\n}\n\nimpl ToBytes for TransformV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.key.write_bytes(writer)?;\n        self.transform.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.key.serialized_length() + self.transform.serialized_length()\n    }\n}\n\nimpl FromBytes for TransformV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, remainder) = String::from_bytes(bytes)?;\n        let (transform, remainder) = TransformKindV1::from_bytes(remainder)?;\n        let transform_entry = TransformV1 { key, transform };\n        Ok((transform_entry, remainder))\n    }\n}\n\n/// The actual transformation performed while executing a deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum TransformKindV1 {\n    /// A transform having no effect.\n    Identity,\n    /// Writes the given CLValue to global state.\n    WriteCLValue(CLValue),\n    /// Writes the given Account to global state.\n    WriteAccount(AccountHash),\n    /// Writes a smart contract as Wasm to global state.\n    WriteContractWasm,\n    /// Writes a smart contract to global state.\n    WriteContract,\n    /// Writes a smart contract package to global state.\n    WriteContractPackage,\n    /// Writes the given DeployInfo to global state.\n    WriteDeployInfo(DeployInfo),\n    /// Writes the given EraInfo to global state.\n    WriteEraInfo(EraInfo),\n    /// Writes the given version 1 Transfer to global state.\n    WriteTransfer(TransferV1),\n    /// Writes the given Bid to global state.\n    WriteBid(Box<Bid>),\n    /// Writes the given Withdraw to global state.\n    WriteWithdraw(Vec<WithdrawPurse>),\n    /// Adds the given `i32`.\n    AddInt32(i32),\n    /// Adds the given `u64`.\n    AddUInt64(u64),\n    /// Adds the given `U128`.\n    AddUInt128(U128),\n    /// Adds the given `U256`.\n    AddUInt256(U256),\n    /// Adds the given `U512`.\n    AddUInt512(U512),\n    /// Adds the given collection of named keys.\n    AddKeys(Vec<NamedKey>),\n    /// A failed transformation, containing an error message.\n    Failure(String),\n    /// Writes the given Unbonding to global state.\n    WriteUnbonding(Vec<UnbondingPurse>),\n    /// Writes the addressable entity to global state.\n    WriteAddressableEntity,\n    /// Removes pathing to keyed value within global state. This is a form of soft delete; the\n    /// underlying value remains in global state and is reachable from older global state root\n    /// hashes where it was included in the hash up.\n    Prune(Key),\n    /// Writes the given BidKind to global state.\n    WriteBidKind(BidKind),\n}\n\nimpl ToBytes for TransformKindV1 {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            TransformKindV1::Identity => (TransformTag::Identity as u8).write_bytes(writer),\n            TransformKindV1::WriteCLValue(value) => {\n                (TransformTag::WriteCLValue as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::WriteAccount(account_hash) => {\n                (TransformTag::WriteAccount as u8).write_bytes(writer)?;\n                account_hash.write_bytes(writer)\n            }\n            TransformKindV1::WriteContractWasm => {\n                (TransformTag::WriteByteCode as u8).write_bytes(writer)\n            }\n            TransformKindV1::WriteContract => {\n                (TransformTag::WriteContract as u8).write_bytes(writer)\n            }\n            TransformKindV1::WriteContractPackage => {\n                (TransformTag::WritePackage as u8).write_bytes(writer)\n            }\n            TransformKindV1::WriteDeployInfo(deploy_info) => {\n                (TransformTag::WriteDeployInfo as u8).write_bytes(writer)?;\n                deploy_info.write_bytes(writer)\n            }\n            TransformKindV1::WriteEraInfo(era_info) => {\n                (TransformTag::WriteEraInfo as u8).write_bytes(writer)?;\n                era_info.write_bytes(writer)\n            }\n            TransformKindV1::WriteTransfer(transfer) => {\n                (TransformTag::WriteTransfer as u8).write_bytes(writer)?;\n                transfer.write_bytes(writer)\n            }\n            TransformKindV1::WriteBid(bid) => {\n                (TransformTag::WriteBid as u8).write_bytes(writer)?;\n                bid.write_bytes(writer)\n            }\n            TransformKindV1::WriteWithdraw(unbonding_purses) => {\n                (TransformTag::WriteWithdraw as u8).write_bytes(writer)?;\n                unbonding_purses.write_bytes(writer)\n            }\n            TransformKindV1::AddInt32(value) => {\n                (TransformTag::AddInt32 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::AddUInt64(value) => {\n                (TransformTag::AddUInt64 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::AddUInt128(value) => {\n                (TransformTag::AddUInt128 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::AddUInt256(value) => {\n                (TransformTag::AddUInt256 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::AddUInt512(value) => {\n                (TransformTag::AddUInt512 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::AddKeys(value) => {\n                (TransformTag::AddKeys as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::Failure(value) => {\n                (TransformTag::Failure as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::WriteUnbonding(value) => {\n                (TransformTag::WriteUnbonding as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::WriteAddressableEntity => {\n                (TransformTag::WriteAddressableEntity as u8).write_bytes(writer)\n            }\n            TransformKindV1::Prune(value) => {\n                (TransformTag::Prune as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV1::WriteBidKind(value) => {\n                (TransformTag::WriteBidKind as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n        }\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        let body_len = match self {\n            TransformKindV1::Prune(key) => key.serialized_length(),\n            TransformKindV1::WriteCLValue(value) => value.serialized_length(),\n            TransformKindV1::WriteAccount(value) => value.serialized_length(),\n            TransformKindV1::WriteDeployInfo(value) => value.serialized_length(),\n            TransformKindV1::WriteEraInfo(value) => value.serialized_length(),\n            TransformKindV1::WriteTransfer(value) => value.serialized_length(),\n            TransformKindV1::AddInt32(value) => value.serialized_length(),\n            TransformKindV1::AddUInt64(value) => value.serialized_length(),\n            TransformKindV1::AddUInt128(value) => value.serialized_length(),\n            TransformKindV1::AddUInt256(value) => value.serialized_length(),\n            TransformKindV1::AddUInt512(value) => value.serialized_length(),\n            TransformKindV1::AddKeys(value) => value.serialized_length(),\n            TransformKindV1::Failure(value) => value.serialized_length(),\n            TransformKindV1::Identity\n            | TransformKindV1::WriteContractWasm\n            | TransformKindV1::WriteContract\n            | TransformKindV1::WriteContractPackage\n            | TransformKindV1::WriteAddressableEntity => 0,\n            TransformKindV1::WriteBid(value) => value.serialized_length(),\n            TransformKindV1::WriteBidKind(value) => value.serialized_length(),\n            TransformKindV1::WriteWithdraw(value) => value.serialized_length(),\n            TransformKindV1::WriteUnbonding(value) => value.serialized_length(),\n        };\n        U8_SERIALIZED_LENGTH + body_len\n    }\n}\n\nimpl FromBytes for TransformKindV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match TryFrom::try_from(tag)? {\n            TransformTag::Identity => Ok((TransformKindV1::Identity, remainder)),\n            TransformTag::WriteCLValue => {\n                let (cl_value, remainder) = CLValue::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteCLValue(cl_value), remainder))\n            }\n            TransformTag::WriteAccount => {\n                let (account_hash, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteAccount(account_hash), remainder))\n            }\n            TransformTag::WriteByteCode => Ok((TransformKindV1::WriteContractWasm, remainder)),\n            TransformTag::WriteContract => Ok((TransformKindV1::WriteContract, remainder)),\n            TransformTag::WritePackage => Ok((TransformKindV1::WriteContractPackage, remainder)),\n            TransformTag::WriteDeployInfo => {\n                let (deploy_info, remainder) = DeployInfo::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteDeployInfo(deploy_info), remainder))\n            }\n            TransformTag::WriteEraInfo => {\n                let (era_info, remainder) = EraInfo::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteEraInfo(era_info), remainder))\n            }\n            TransformTag::WriteTransfer => {\n                let (transfer, remainder) = TransferV1::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteTransfer(transfer), remainder))\n            }\n            TransformTag::AddInt32 => {\n                let (value_i32, remainder) = i32::from_bytes(remainder)?;\n                Ok((TransformKindV1::AddInt32(value_i32), remainder))\n            }\n            TransformTag::AddUInt64 => {\n                let (value_u64, remainder) = u64::from_bytes(remainder)?;\n                Ok((TransformKindV1::AddUInt64(value_u64), remainder))\n            }\n            TransformTag::AddUInt128 => {\n                let (value_u128, remainder) = U128::from_bytes(remainder)?;\n                Ok((TransformKindV1::AddUInt128(value_u128), remainder))\n            }\n            TransformTag::AddUInt256 => {\n                let (value_u256, remainder) = U256::from_bytes(remainder)?;\n                Ok((TransformKindV1::AddUInt256(value_u256), remainder))\n            }\n            TransformTag::AddUInt512 => {\n                let (value_u512, remainder) = U512::from_bytes(remainder)?;\n                Ok((TransformKindV1::AddUInt512(value_u512), remainder))\n            }\n            TransformTag::AddKeys => {\n                let (value, remainder) = Vec::<NamedKey>::from_bytes(remainder)?;\n                Ok((TransformKindV1::AddKeys(value), remainder))\n            }\n            TransformTag::Failure => {\n                let (value, remainder) = String::from_bytes(remainder)?;\n                Ok((TransformKindV1::Failure(value), remainder))\n            }\n            TransformTag::WriteBid => {\n                let (bid, remainder) = Bid::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteBid(Box::new(bid)), remainder))\n            }\n            TransformTag::WriteWithdraw => {\n                let (withdraw_purses, remainder) =\n                    <Vec<WithdrawPurse> as FromBytes>::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteWithdraw(withdraw_purses), remainder))\n            }\n            TransformTag::WriteUnbonding => {\n                let (unbonding_purses, remainder) =\n                    <Vec<UnbondingPurse> as FromBytes>::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteUnbonding(unbonding_purses), remainder))\n            }\n            TransformTag::WriteAddressableEntity => {\n                Ok((TransformKindV1::WriteAddressableEntity, remainder))\n            }\n            TransformTag::Prune => {\n                let (key, remainder) = Key::from_bytes(remainder)?;\n                Ok((TransformKindV1::Prune(key), remainder))\n            }\n            TransformTag::WriteBidKind => {\n                let (value, remainder) = BidKind::from_bytes(remainder)?;\n                Ok((TransformKindV1::WriteBidKind(value), remainder))\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<TransformKindV1> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> TransformKindV1 {\n        // TODO - cover all options\n        match rng.gen_range(0..13) {\n            0 => TransformKindV1::Identity,\n            1 => TransformKindV1::WriteCLValue(CLValue::from_t(true).unwrap()),\n            2 => TransformKindV1::WriteAccount(AccountHash::new(rng.gen())),\n            3 => TransformKindV1::WriteContractWasm,\n            4 => TransformKindV1::WriteContract,\n            5 => TransformKindV1::WriteContractPackage,\n            6 => TransformKindV1::AddInt32(rng.gen()),\n            7 => TransformKindV1::AddUInt64(rng.gen()),\n            8 => TransformKindV1::AddUInt128(rng.gen::<u64>().into()),\n            9 => TransformKindV1::AddUInt256(rng.gen::<u64>().into()),\n            10 => TransformKindV1::AddUInt512(rng.gen::<u64>().into()),\n            11 => {\n                let mut named_keys = Vec::new();\n                for _ in 0..rng.gen_range(1..6) {\n                    named_keys.push(NamedKey {\n                        name: rng.gen::<u64>().to_string(),\n                        key: rng.gen::<u64>().to_string(),\n                    });\n                }\n                TransformKindV1::AddKeys(named_keys)\n            }\n            12 => TransformKindV1::Failure(rng.gen::<u64>().to_string()),\n            13 => TransformKindV1::WriteAddressableEntity,\n            _ => unreachable!(),\n        }\n    }\n}\n\n/// A key with a name.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct NamedKey {\n    /// The name of the entry.\n    pub name: String,\n    /// The value of the entry: a casper `Key` type.\n    #[cfg_attr(feature = \"json-schema\", schemars(with = \"Key\"))]\n    pub key: String,\n}\n\nimpl ToBytes for NamedKey {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.name.write_bytes(writer)?;\n        self.key.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.name.serialized_length() + self.key.serialized_length()\n    }\n}\n\nimpl FromBytes for NamedKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (name, remainder) = String::from_bytes(bytes)?;\n        let (key, remainder) = String::from_bytes(remainder)?;\n        let named_key = NamedKey { name, key };\n        Ok((named_key, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_test_transform() {\n        let mut rng = TestRng::new();\n        let transform: TransformKindV1 = rng.gen();\n        bytesrepr::test_serialization_roundtrip(&transform);\n    }\n\n    #[test]\n    fn bytesrepr_test_execution_result() {\n        let mut rng = TestRng::new();\n        let execution_result: ExecutionResultV1 = rng.gen();\n        bytesrepr::test_serialization_roundtrip(&execution_result);\n    }\n}\n"
  },
  {
    "path": "types/src/execution/execution_result_v2.rs",
    "content": "//! This file provides types to allow conversion from an EE `ExecutionResult` into a similar type\n//! which can be serialized to a valid binary or JSON representation.\n//!\n//! It is stored as metadata related to a given transaction, and made available to clients via the\n//! JSON-RPC API.\n\n#[cfg(any(feature = \"testing\", test))]\nuse alloc::format;\nuse alloc::{string::String, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse super::Effects;\n#[cfg(feature = \"json-schema\")]\nuse super::{TransformKindV2, TransformV2};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(feature = \"json-schema\")]\nuse crate::Key;\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n    Gas, InitiatorAddr, Transfer, U512,\n};\n\n#[cfg(feature = \"json-schema\")]\nstatic EXECUTION_RESULT: Lazy<ExecutionResultV2> = Lazy::new(|| {\n    let key1 = Key::from_formatted_str(\n        \"account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb\",\n    )\n    .unwrap();\n    let key2 = Key::from_formatted_str(\n        \"deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1\",\n    )\n    .unwrap();\n    let mut effects = Effects::new();\n    effects.push(TransformV2::new(key1, TransformKindV2::AddUInt64(8u64)));\n    effects.push(TransformV2::new(key2, TransformKindV2::Identity));\n\n    let transfers = vec![Transfer::example().clone()];\n\n    // NOTE: these are arbitrary values for schema and type demonstration,\n    // they are not properly derived actual values. Depending on current chainspec\n    // settings on a given chain, we may or may not be issuing a refund and if we are\n    // the percentage can vary. And the cost is affected by dynamic gas pricing\n    // for a given era, within an inclusive range defined in the chainspec.\n    // Thus, real values cannot be calculated in a vacuum.\n    const LIMIT: u64 = 123_456;\n    const CONSUMED: u64 = 100_000;\n    const COST: u64 = 246_912;\n\n    const PRICE: u8 = 2;\n\n    let refund = COST.saturating_sub(CONSUMED);\n\n    ExecutionResultV2 {\n        initiator: InitiatorAddr::from(crate::PublicKey::example().clone()),\n        error_message: None,\n        current_price: PRICE,\n        limit: Gas::new(LIMIT),\n        consumed: Gas::new(CONSUMED),\n        cost: U512::from(COST),\n        refund: U512::from(refund),\n        size_estimate: Transfer::example().serialized_length() as u64,\n        transfers,\n        effects,\n    }\n});\n\n/// The result of executing a single transaction.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct ExecutionResultV2 {\n    /// Who initiated this transaction.\n    pub initiator: InitiatorAddr,\n    /// If there is no error message, this execution was processed successfully.\n    /// If there is an error message, this execution failed to fully process for the stated reason.\n    pub error_message: Option<String>,\n    /// The current gas price. I.e. how many motes are charged for each unit of computation.\n    pub current_price: u8,\n    /// The maximum allowed gas limit for this transaction\n    pub limit: Gas,\n    /// How much gas was consumed executing this transaction.\n    pub consumed: Gas,\n    /// How much was paid for this transaction.\n    pub cost: U512,\n    /// How much unconsumed gas was refunded (if any)?\n    pub refund: U512,\n    /// A record of transfers performed while executing this transaction.\n    pub transfers: Vec<Transfer>,\n    /// The size estimate of the transaction\n    pub size_estimate: u64,\n    /// The effects of executing this transaction.\n    pub effects: Effects,\n}\n\nimpl ExecutionResultV2 {\n    /// The refunded amount, if any.\n    pub fn refund(&self) -> U512 {\n        self.refund\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &EXECUTION_RESULT\n    }\n\n    /// Returns a random `ExecutionResultV2`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let effects = Effects::random(rng);\n\n        let transfer_count = rng.gen_range(0..6);\n        let mut transfers = vec![];\n        for _ in 0..transfer_count {\n            transfers.push(Transfer::random(rng))\n        }\n\n        let limit = Gas::new(rng.gen::<u64>());\n        let gas_price = rng.gen_range(1..6);\n        // cost = the limit * the price\n        let cost = limit.value() * U512::from(gas_price);\n        let range = limit.value().as_u64();\n\n        // can range from 0 to limit\n        let consumed = limit\n            .checked_sub(Gas::new(rng.gen_range(0..=range)))\n            .expect(\"consumed\");\n\n        // this assumes 100% refund ratio\n        let refund = cost.saturating_sub(consumed.value());\n\n        let size_estimate = rng.gen();\n\n        ExecutionResultV2 {\n            initiator: InitiatorAddr::random(rng),\n            effects,\n            transfers,\n            current_price: gas_price,\n            cost,\n            limit,\n            consumed,\n            refund,\n            size_estimate,\n            error_message: if rng.gen() {\n                Some(format!(\"Error message {}\", rng.gen::<u64>()))\n            } else {\n                None\n            },\n        }\n    }\n}\n\nimpl ToBytes for ExecutionResultV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.initiator.serialized_length()\n            + self.error_message.serialized_length()\n            + self.limit.serialized_length()\n            + self.consumed.serialized_length()\n            + self.cost.serialized_length()\n            + self.transfers.serialized_length()\n            + self.size_estimate.serialized_length()\n            + self.effects.serialized_length()\n            + self.refund.serialized_length()\n            + self.current_price.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.initiator.write_bytes(writer)?; // initiator should logically be first\n        self.error_message.write_bytes(writer)?;\n        self.limit.write_bytes(writer)?;\n        self.consumed.write_bytes(writer)?;\n        self.cost.write_bytes(writer)?;\n        self.transfers.write_bytes(writer)?;\n        self.size_estimate.write_bytes(writer)?;\n        self.effects.write_bytes(writer)?;\n        self.refund.write_bytes(writer)?;\n        self.current_price.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for ExecutionResultV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (initiator, remainder) = InitiatorAddr::from_bytes(bytes)?;\n        let (error_message, remainder) = Option::<String>::from_bytes(remainder)?;\n        let (limit, remainder) = Gas::from_bytes(remainder)?;\n        let (consumed, remainder) = Gas::from_bytes(remainder)?;\n        let (cost, remainder) = U512::from_bytes(remainder)?;\n        let (transfers, remainder) = Vec::<Transfer>::from_bytes(remainder)?;\n        let (size_estimate, remainder) = FromBytes::from_bytes(remainder)?;\n        let (effects, remainder) = Effects::from_bytes(remainder)?;\n        // refund && current_price were added after 2.0 was upgraded into on\n        // DevNet and IntegrationNet, thus the bytes repr must be appended and optional\n        let (refund, remainder) = match U512::from_bytes(remainder) {\n            Ok((ret, rem)) => (ret, rem),\n            Err(_) => {\n                let rem: &[u8] = &[];\n                (U512::zero(), rem)\n            }\n        };\n        let (current_price, remainder) = match u8::from_bytes(remainder) {\n            Ok((ret, rem)) => (ret, rem),\n            Err(_) => {\n                let ret = {\n                    let div = cost.checked_div(limit.value()).unwrap_or_default();\n                    if div > U512::from(u8::MAX) {\n                        u8::MAX\n                    } else {\n                        div.as_u32() as u8\n                    }\n                };\n\n                let rem: &[u8] = &[];\n                (ret, rem)\n            }\n        };\n        let execution_result = ExecutionResultV2 {\n            initiator,\n            error_message,\n            current_price,\n            limit,\n            consumed,\n            cost,\n            refund,\n            transfers,\n            size_estimate,\n            effects,\n        };\n        Ok((execution_result, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            let execution_result = ExecutionResultV2::random(rng);\n            bytesrepr::test_serialization_roundtrip(&execution_result);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/execution/transform.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse tracing::error;\n\nuse super::TransformKindV2;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Key,\n};\n\n/// A transformation performed while executing a deploy.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct TransformV2 {\n    key: Key,\n    kind: TransformKindV2,\n}\n\nimpl TransformV2 {\n    /// Constructs a new `Transform`.\n    pub fn new(key: Key, kind: TransformKindV2) -> Self {\n        TransformV2 { key, kind }\n    }\n\n    /// Returns the key whose value was transformed.\n    pub fn key(&self) -> &Key {\n        &self.key\n    }\n\n    /// Returns the transformation kind.\n    pub fn kind(&self) -> &TransformKindV2 {\n        &self.kind\n    }\n\n    /// Consumes `self`, returning its constituent parts.\n    pub fn destructure(self) -> (Key, TransformKindV2) {\n        (self.key, self.kind)\n    }\n}\n\nimpl ToBytes for TransformV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.key.serialized_length() + self.kind.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.key.write_bytes(writer)?;\n        if let Err(err) = self.kind.write_bytes(writer) {\n            error!(%err, \"ToBytes for TransformV2\");\n            Err(err)\n        } else {\n            Ok(())\n        }\n    }\n}\n\nimpl FromBytes for TransformV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, remainder) = match Key::from_bytes(bytes) {\n            Ok((k, rem)) => (k, rem),\n            Err(err) => {\n                error!(%err, \"FromBytes for TransformV2: key\");\n                return Err(err);\n            }\n        };\n        let (transform, remainder) = match TransformKindV2::from_bytes(remainder) {\n            Ok((tk, rem)) => (tk, rem),\n            Err(err) => {\n                error!(%err, \"FromBytes for TransformV2: transform\");\n                return Err(err);\n            }\n        };\n        let transform_entry = TransformV2 {\n            key,\n            kind: transform,\n        };\n        Ok((transform_entry, remainder))\n    }\n}\n"
  },
  {
    "path": "types/src/execution/transform_error.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    CLValueError, StoredValueTypeMismatch,\n};\n\n/// Error type for applying and combining transforms.\n///\n/// A `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible\n/// (e.g. trying to add a number to a string).\n#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[non_exhaustive]\npub enum TransformError {\n    /// Error while (de)serializing data.\n    Serialization(bytesrepr::Error),\n    /// Type mismatch error.\n    TypeMismatch(StoredValueTypeMismatch),\n    /// Type no longer supported.\n    Deprecated,\n}\n\nimpl Display for TransformError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransformError::Serialization(error) => {\n                write!(formatter, \"{}\", error)\n            }\n            TransformError::TypeMismatch(error) => {\n                write!(formatter, \"{}\", error)\n            }\n            TransformError::Deprecated => {\n                write!(formatter, \"type no longer supported\")\n            }\n        }\n    }\n}\n\nimpl From<StoredValueTypeMismatch> for TransformError {\n    fn from(error: StoredValueTypeMismatch) -> Self {\n        TransformError::TypeMismatch(error)\n    }\n}\n\nimpl From<CLValueError> for TransformError {\n    fn from(cl_value_error: CLValueError) -> TransformError {\n        match cl_value_error {\n            CLValueError::Serialization(error) => TransformError::Serialization(error),\n            CLValueError::Type(cl_type_mismatch) => {\n                let expected = format!(\"{:?}\", cl_type_mismatch.expected);\n                let found = format!(\"{:?}\", cl_type_mismatch.found);\n                let type_mismatch = StoredValueTypeMismatch::new(expected, found);\n                TransformError::TypeMismatch(type_mismatch)\n            }\n        }\n    }\n}\n\nimpl ToBytes for TransformError {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            TransformError::Serialization(error) => {\n                (TransformErrorTag::Serialization as u8).write_bytes(writer)?;\n                error.write_bytes(writer)\n            }\n            TransformError::TypeMismatch(error) => {\n                (TransformErrorTag::TypeMismatch as u8).write_bytes(writer)?;\n                error.write_bytes(writer)\n            }\n            TransformError::Deprecated => (TransformErrorTag::Deprecated as u8).write_bytes(writer),\n        }\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                TransformError::Serialization(error) => error.serialized_length(),\n                TransformError::TypeMismatch(error) => error.serialized_length(),\n                TransformError::Deprecated => 0,\n            }\n    }\n}\n\nimpl FromBytes for TransformError {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            tag if tag == TransformErrorTag::Serialization as u8 => {\n                let (error, remainder) = bytesrepr::Error::from_bytes(remainder)?;\n                Ok((TransformError::Serialization(error), remainder))\n            }\n            tag if tag == TransformErrorTag::TypeMismatch as u8 => {\n                let (error, remainder) = StoredValueTypeMismatch::from_bytes(remainder)?;\n                Ok((TransformError::TypeMismatch(error), remainder))\n            }\n            tag if tag == TransformErrorTag::Deprecated as u8 => {\n                Ok((TransformError::Deprecated, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for TransformError {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            TransformError::Serialization(error) => Some(error),\n            TransformError::TypeMismatch(_) | TransformError::Deprecated => None,\n        }\n    }\n}\n\n#[repr(u8)]\nenum TransformErrorTag {\n    Serialization = 0,\n    TypeMismatch = 1,\n    Deprecated = 2,\n}\n"
  },
  {
    "path": "types/src/execution/transform_kind.rs",
    "content": "use alloc::{string::ToString, vec::Vec};\nuse core::{any, convert::TryFrom};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse num::traits::{AsPrimitive, WrappingAdd};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\nuse tracing::error;\n\nuse super::TransformError;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contracts::NamedKeys,\n    CLType, CLTyped, CLValue, Key, StoredValue, StoredValueTypeMismatch, U128, U256, U512,\n};\n\n/// Taxonomy of Transform.\n#[derive(PartialEq, Eq, Debug, Clone)]\n#[allow(clippy::large_enum_variant)]\npub enum TransformInstruction {\n    /// Store a StoredValue.\n    Store(StoredValue),\n    /// Prune a StoredValue by Key.\n    Prune(Key),\n}\n\nimpl TransformInstruction {\n    /// Store instruction.\n    pub fn store(stored_value: StoredValue) -> Self {\n        Self::Store(stored_value)\n    }\n\n    /// Prune instruction.\n    pub fn prune(key: Key) -> Self {\n        Self::Prune(key)\n    }\n}\n\nimpl From<StoredValue> for TransformInstruction {\n    fn from(value: StoredValue) -> Self {\n        TransformInstruction::Store(value)\n    }\n}\n\n/// Representation of a single transformation occurring during execution.\n///\n/// Note that all arithmetic variants of `TransformKindV2` are commutative which means that a given\n/// collection of them can be executed in any order to produce the same end result.\n#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\n#[allow(clippy::large_enum_variant)]\npub enum TransformKindV2 {\n    /// An identity transformation that does not modify a value in the global state.\n    ///\n    /// Created as a result of reading from the global state.\n    #[default]\n    Identity,\n    /// Writes a new value in the global state.\n    Write(StoredValue),\n    /// A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in\n    /// the global state.\n    AddInt32(i32),\n    /// A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in\n    /// the global state.\n    AddUInt64(u64),\n    /// A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in\n    /// the global state.\n    AddUInt128(U128),\n    /// A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in\n    /// the global state.\n    AddUInt256(U256),\n    /// A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in\n    /// the global state.\n    AddUInt512(U512),\n    /// Adds new named keys to an existing entry in the global state.\n    ///\n    /// This transform assumes that the existing stored value is either an Account or a Contract.\n    AddKeys(NamedKeys),\n    /// Removes the pathing to the global state entry of the specified key. The pruned element\n    /// remains reachable from previously generated global state root hashes, but will not be\n    /// included in the next generated global state root hash and subsequent state accumulated\n    /// from it.\n    Prune(Key),\n    /// Represents the case where applying a transform would cause an error.\n    Failure(TransformError),\n}\n\nimpl TransformKindV2 {\n    /// Applies the transformation on a specified stored value instance.\n    ///\n    /// This method produces a new `StoredValue` instance based on the `TransformKind` variant.\n    pub fn apply(self, stored_value: StoredValue) -> Result<TransformInstruction, TransformError> {\n        fn store(sv: StoredValue) -> TransformInstruction {\n            TransformInstruction::Store(sv)\n        }\n        match self {\n            TransformKindV2::Identity => Ok(store(stored_value)),\n            TransformKindV2::Write(new_value) => Ok(store(new_value)),\n            TransformKindV2::Prune(key) => Ok(TransformInstruction::prune(key)),\n            TransformKindV2::AddInt32(to_add) => wrapping_addition(stored_value, to_add),\n            TransformKindV2::AddUInt64(to_add) => wrapping_addition(stored_value, to_add),\n            TransformKindV2::AddUInt128(to_add) => wrapping_addition(stored_value, to_add),\n            TransformKindV2::AddUInt256(to_add) => wrapping_addition(stored_value, to_add),\n            TransformKindV2::AddUInt512(to_add) => wrapping_addition(stored_value, to_add),\n            TransformKindV2::AddKeys(keys) => match stored_value {\n                StoredValue::Contract(mut contract) => {\n                    contract.named_keys_append(keys);\n                    Ok(store(StoredValue::Contract(contract)))\n                }\n                StoredValue::Account(mut account) => {\n                    account.named_keys_append(keys);\n                    Ok(store(StoredValue::Account(account)))\n                }\n                StoredValue::AddressableEntity(_) => Err(TransformError::Deprecated),\n                StoredValue::CLValue(cl_value) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = format!(\"{:?}\", cl_value.cl_type());\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::SmartContract(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"ContractPackage\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::ByteCode(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"ByteCode\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::Transfer(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"Transfer\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::DeployInfo(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"DeployInfo\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::EraInfo(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"EraInfo\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::Bid(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"Bid\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::BidKind(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"BidKind\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::Withdraw(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"Withdraw\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::Unbonding(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"Unbonding\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::ContractWasm(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"ContractWasm\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::ContractPackage(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"ContractPackage\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::NamedKey(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"NamedKeyValue\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::MessageTopic(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"MessageTopic\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::Message(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"Message\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::RawBytes(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"RawBytes\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::Prepayment(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"Prepayment\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n                StoredValue::EntryPoint(_) => {\n                    let expected = \"Contract or Account\".to_string();\n                    let found = \"EntryPoint\".to_string();\n                    Err(StoredValueTypeMismatch::new(expected, found).into())\n                }\n            },\n            TransformKindV2::Failure(error) => Err(error),\n        }\n    }\n\n    /// Returns a random `TransformKind`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random<R: Rng + ?Sized>(rng: &mut R) -> Self {\n        match rng.gen_range(0..10) {\n            0 => TransformKindV2::Identity,\n            1 => TransformKindV2::Write(StoredValue::CLValue(CLValue::from_t(true).unwrap())),\n            2 => TransformKindV2::AddInt32(rng.gen()),\n            3 => TransformKindV2::AddUInt64(rng.gen()),\n            4 => TransformKindV2::AddUInt128(rng.gen::<u64>().into()),\n            5 => TransformKindV2::AddUInt256(rng.gen::<u64>().into()),\n            6 => TransformKindV2::AddUInt512(rng.gen::<u64>().into()),\n            7 => {\n                let mut named_keys = NamedKeys::new();\n                for _ in 0..rng.gen_range(1..6) {\n                    named_keys.insert(rng.gen::<u64>().to_string(), rng.gen());\n                }\n                TransformKindV2::AddKeys(named_keys)\n            }\n            8 => TransformKindV2::Failure(TransformError::Serialization(\n                bytesrepr::Error::EarlyEndOfStream,\n            )),\n            9 => TransformKindV2::Prune(rng.gen::<Key>()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for TransformKindV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                TransformKindV2::Identity => 0,\n                TransformKindV2::Write(stored_value) => stored_value.serialized_length(),\n                TransformKindV2::AddInt32(value) => value.serialized_length(),\n                TransformKindV2::AddUInt64(value) => value.serialized_length(),\n                TransformKindV2::AddUInt128(value) => value.serialized_length(),\n                TransformKindV2::AddUInt256(value) => value.serialized_length(),\n                TransformKindV2::AddUInt512(value) => value.serialized_length(),\n                TransformKindV2::AddKeys(named_keys) => named_keys.serialized_length(),\n                TransformKindV2::Failure(error) => error.serialized_length(),\n                TransformKindV2::Prune(value) => value.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            TransformKindV2::Identity => (TransformTag::Identity as u8).write_bytes(writer),\n            TransformKindV2::Write(stored_value) => {\n                (TransformTag::Write as u8).write_bytes(writer)?;\n                stored_value.write_bytes(writer)\n            }\n            TransformKindV2::AddInt32(value) => {\n                (TransformTag::AddInt32 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV2::AddUInt64(value) => {\n                (TransformTag::AddUInt64 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV2::AddUInt128(value) => {\n                (TransformTag::AddUInt128 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV2::AddUInt256(value) => {\n                (TransformTag::AddUInt256 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV2::AddUInt512(value) => {\n                (TransformTag::AddUInt512 as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n            TransformKindV2::AddKeys(named_keys) => {\n                (TransformTag::AddKeys as u8).write_bytes(writer)?;\n                named_keys.write_bytes(writer)\n            }\n            TransformKindV2::Failure(error) => {\n                (TransformTag::Failure as u8).write_bytes(writer)?;\n                error.write_bytes(writer)\n            }\n            TransformKindV2::Prune(value) => {\n                (TransformTag::Prune as u8).write_bytes(writer)?;\n                value.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for TransformKindV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        if bytes.is_empty() {\n            error!(\"FromBytes for TransformKindV2: bytes length should not be 0\");\n        }\n        let (tag, remainder) = match u8::from_bytes(bytes) {\n            Ok((tag, rem)) => (tag, rem),\n            Err(err) => {\n                error!(%err, \"FromBytes for TransformKindV2\");\n                return Err(err);\n            }\n        };\n        match tag {\n            tag if tag == TransformTag::Identity as u8 => {\n                Ok((TransformKindV2::Identity, remainder))\n            }\n            tag if tag == TransformTag::Write as u8 => {\n                let (stored_value, remainder) = StoredValue::from_bytes(remainder)?;\n                Ok((TransformKindV2::Write(stored_value), remainder))\n            }\n            tag if tag == TransformTag::AddInt32 as u8 => {\n                let (value, remainder) = i32::from_bytes(remainder)?;\n                Ok((TransformKindV2::AddInt32(value), remainder))\n            }\n            tag if tag == TransformTag::AddUInt64 as u8 => {\n                let (value, remainder) = u64::from_bytes(remainder)?;\n                Ok((TransformKindV2::AddUInt64(value), remainder))\n            }\n            tag if tag == TransformTag::AddUInt128 as u8 => {\n                let (value, remainder) = U128::from_bytes(remainder)?;\n                Ok((TransformKindV2::AddUInt128(value), remainder))\n            }\n            tag if tag == TransformTag::AddUInt256 as u8 => {\n                let (value, remainder) = U256::from_bytes(remainder)?;\n                Ok((TransformKindV2::AddUInt256(value), remainder))\n            }\n            tag if tag == TransformTag::AddUInt512 as u8 => {\n                let (value, remainder) = U512::from_bytes(remainder)?;\n                Ok((TransformKindV2::AddUInt512(value), remainder))\n            }\n            tag if tag == TransformTag::AddKeys as u8 => {\n                let (named_keys, remainder) = NamedKeys::from_bytes(remainder)?;\n                Ok((TransformKindV2::AddKeys(named_keys), remainder))\n            }\n            tag if tag == TransformTag::Failure as u8 => {\n                let (error, remainder) = TransformError::from_bytes(remainder)?;\n                Ok((TransformKindV2::Failure(error), remainder))\n            }\n            tag if tag == TransformTag::Prune as u8 => {\n                let (key, remainder) = Key::from_bytes(remainder)?;\n                Ok((TransformKindV2::Prune(key), remainder))\n            }\n            _ => {\n                error!(%tag, rem_len = remainder.len(), \"FromBytes for TransformKindV2: unknown tag\");\n                Err(bytesrepr::Error::Formatting)\n            }\n        }\n    }\n}\n\n/// Attempts a wrapping addition of `to_add` to `stored_value`, assuming `stored_value` is\n/// compatible with type `Y`.\nfn wrapping_addition<Y>(\n    stored_value: StoredValue,\n    to_add: Y,\n) -> Result<TransformInstruction, TransformError>\nwhere\n    Y: AsPrimitive<i32>\n        + AsPrimitive<i64>\n        + AsPrimitive<u8>\n        + AsPrimitive<u32>\n        + AsPrimitive<u64>\n        + AsPrimitive<U128>\n        + AsPrimitive<U256>\n        + AsPrimitive<U512>,\n{\n    let cl_value = CLValue::try_from(stored_value)?;\n\n    match cl_value.cl_type() {\n        CLType::I32 => do_wrapping_addition::<i32, _>(cl_value, to_add),\n        CLType::I64 => do_wrapping_addition::<i64, _>(cl_value, to_add),\n        CLType::U8 => do_wrapping_addition::<u8, _>(cl_value, to_add),\n        CLType::U32 => do_wrapping_addition::<u32, _>(cl_value, to_add),\n        CLType::U64 => do_wrapping_addition::<u64, _>(cl_value, to_add),\n        CLType::U128 => do_wrapping_addition::<U128, _>(cl_value, to_add),\n        CLType::U256 => do_wrapping_addition::<U256, _>(cl_value, to_add),\n        CLType::U512 => do_wrapping_addition::<U512, _>(cl_value, to_add),\n        other => {\n            let expected = format!(\"integral type compatible with {}\", any::type_name::<Y>());\n            let found = format!(\"{:?}\", other);\n            Err(StoredValueTypeMismatch::new(expected, found).into())\n        }\n    }\n}\n\n/// Attempts a wrapping addition of `to_add` to the value represented by `cl_value`.\nfn do_wrapping_addition<X, Y>(\n    cl_value: CLValue,\n    to_add: Y,\n) -> Result<TransformInstruction, TransformError>\nwhere\n    X: WrappingAdd + CLTyped + ToBytes + FromBytes + Copy + 'static,\n    Y: AsPrimitive<X>,\n{\n    let x: X = cl_value.into_t()?;\n    let result = x.wrapping_add(&(to_add.as_()));\n    let stored_value = StoredValue::CLValue(CLValue::from_t(result)?);\n    Ok(TransformInstruction::store(stored_value))\n}\n\n#[derive(Debug)]\n#[repr(u8)]\nenum TransformTag {\n    Identity = 0,\n    Write = 1,\n    AddInt32 = 2,\n    AddUInt64 = 3,\n    AddUInt128 = 4,\n    AddUInt256 = 5,\n    AddUInt512 = 6,\n    AddKeys = 7,\n    Failure = 8,\n    Prune = 9,\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{collections::BTreeMap, fmt};\n\n    use num::{Bounded, Num};\n\n    use crate::{\n        byte_code::ByteCodeKind, bytesrepr::Bytes, testing::TestRng, AccessRights, ByteCode, Key,\n        URef, U128, U256, U512,\n    };\n\n    use super::*;\n\n    const ZERO_ARRAY: [u8; 32] = [0; 32];\n    const TEST_STR: &str = \"a\";\n    const TEST_BOOL: bool = true;\n\n    const ZERO_I32: i32 = 0;\n    const ONE_I32: i32 = 1;\n    const NEG_ONE_I32: i32 = -1;\n    const NEG_TWO_I32: i32 = -2;\n    const MIN_I32: i32 = i32::MIN;\n    const MAX_I32: i32 = i32::MAX;\n\n    const ZERO_I64: i64 = 0;\n    const ONE_I64: i64 = 1;\n    const NEG_ONE_I64: i64 = -1;\n    const NEG_TWO_I64: i64 = -2;\n    const MIN_I64: i64 = i64::MIN;\n    const MAX_I64: i64 = i64::MAX;\n\n    const ZERO_U8: u8 = 0;\n    const ONE_U8: u8 = 1;\n    const MAX_U8: u8 = u8::MAX;\n\n    const ZERO_U32: u32 = 0;\n    const ONE_U32: u32 = 1;\n    const MAX_U32: u32 = u32::MAX;\n\n    const ZERO_U64: u64 = 0;\n    const ONE_U64: u64 = 1;\n    const MAX_U64: u64 = u64::MAX;\n\n    const ZERO_U128: U128 = U128([0; 2]);\n    const ONE_U128: U128 = U128([1, 0]);\n    const MAX_U128: U128 = U128([MAX_U64; 2]);\n\n    const ZERO_U256: U256 = U256([0; 4]);\n    const ONE_U256: U256 = U256([1, 0, 0, 0]);\n    const MAX_U256: U256 = U256([MAX_U64; 4]);\n\n    const ZERO_U512: U512 = U512([0; 8]);\n    const ONE_U512: U512 = U512([1, 0, 0, 0, 0, 0, 0, 0]);\n    const MAX_U512: U512 = U512([MAX_U64; 8]);\n\n    impl From<U128> for TransformKindV2 {\n        fn from(x: U128) -> Self {\n            TransformKindV2::AddUInt128(x)\n        }\n    }\n    impl From<U256> for TransformKindV2 {\n        fn from(x: U256) -> Self {\n            TransformKindV2::AddUInt256(x)\n        }\n    }\n    impl From<U512> for TransformKindV2 {\n        fn from(x: U512) -> Self {\n            TransformKindV2::AddUInt512(x)\n        }\n    }\n\n    #[test]\n    fn i32_overflow() {\n        let max = i32::MAX;\n        let min = i32::MIN;\n\n        let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap());\n        let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap());\n\n        let apply_overflow = TransformKindV2::AddInt32(1).apply(max_value.clone());\n        let apply_underflow = TransformKindV2::AddInt32(-1).apply(min_value.clone());\n\n        assert_eq!(\n            apply_overflow.expect(\"Unexpected overflow\"),\n            TransformInstruction::store(min_value)\n        );\n        assert_eq!(\n            apply_underflow.expect(\"Unexpected underflow\"),\n            TransformInstruction::store(max_value)\n        );\n    }\n\n    fn uint_overflow_test<T>()\n    where\n        T: Num + Bounded + CLTyped + ToBytes + Into<TransformKindV2> + Copy,\n    {\n        let max = T::max_value();\n        let min = T::min_value();\n        let one = T::one();\n        let zero = T::zero();\n\n        let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap());\n        let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap());\n        let zero_value = StoredValue::CLValue(CLValue::from_t(zero).unwrap());\n\n        let one_transform: TransformKindV2 = one.into();\n\n        let apply_overflow = TransformKindV2::AddInt32(1).apply(max_value.clone());\n\n        let apply_overflow_uint = one_transform.apply(max_value.clone());\n        let apply_underflow = TransformKindV2::AddInt32(-1).apply(min_value);\n\n        assert_eq!(apply_overflow, Ok(zero_value.clone().into()));\n        assert_eq!(apply_overflow_uint, Ok(zero_value.into()));\n        assert_eq!(apply_underflow, Ok(max_value.into()));\n    }\n\n    #[test]\n    fn u128_overflow() {\n        uint_overflow_test::<U128>();\n    }\n\n    #[test]\n    fn u256_overflow() {\n        uint_overflow_test::<U256>();\n    }\n\n    #[test]\n    fn u512_overflow() {\n        uint_overflow_test::<U512>();\n    }\n\n    #[test]\n    fn addition_between_mismatched_types_should_fail() {\n        fn assert_yields_type_mismatch_error(stored_value: StoredValue) {\n            match wrapping_addition(stored_value, ZERO_I32) {\n                Err(TransformError::TypeMismatch(_)) => (),\n                _ => panic!(\"wrapping addition should yield TypeMismatch error\"),\n            };\n        }\n\n        let byte_code = StoredValue::ByteCode(ByteCode::new(ByteCodeKind::V1CasperWasm, vec![]));\n        assert_yields_type_mismatch_error(byte_code);\n\n        let uref = URef::new(ZERO_ARRAY, AccessRights::READ);\n\n        let cl_bool =\n            StoredValue::CLValue(CLValue::from_t(TEST_BOOL).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_bool);\n\n        let cl_unit = StoredValue::CLValue(CLValue::from_t(()).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_unit);\n\n        let cl_string =\n            StoredValue::CLValue(CLValue::from_t(TEST_STR).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_string);\n\n        let cl_key = StoredValue::CLValue(\n            CLValue::from_t(Key::Hash(ZERO_ARRAY)).expect(\"should create CLValue\"),\n        );\n        assert_yields_type_mismatch_error(cl_key);\n\n        let cl_uref = StoredValue::CLValue(CLValue::from_t(uref).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_uref);\n\n        let cl_option =\n            StoredValue::CLValue(CLValue::from_t(Some(ZERO_U8)).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_option);\n\n        let cl_list = StoredValue::CLValue(\n            CLValue::from_t(Bytes::from(vec![ZERO_U8])).expect(\"should create CLValue\"),\n        );\n        assert_yields_type_mismatch_error(cl_list);\n\n        let cl_fixed_list =\n            StoredValue::CLValue(CLValue::from_t([ZERO_U8]).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_fixed_list);\n\n        let cl_result: Result<(), u8> = Err(ZERO_U8);\n        let cl_result =\n            StoredValue::CLValue(CLValue::from_t(cl_result).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_result);\n\n        let cl_map = StoredValue::CLValue(\n            CLValue::from_t(BTreeMap::<u8, u8>::new()).expect(\"should create CLValue\"),\n        );\n        assert_yields_type_mismatch_error(cl_map);\n\n        let cl_tuple1 =\n            StoredValue::CLValue(CLValue::from_t((ZERO_U8,)).expect(\"should create CLValue\"));\n        assert_yields_type_mismatch_error(cl_tuple1);\n\n        let cl_tuple2 = StoredValue::CLValue(\n            CLValue::from_t((ZERO_U8, ZERO_U8)).expect(\"should create CLValue\"),\n        );\n        assert_yields_type_mismatch_error(cl_tuple2);\n\n        let cl_tuple3 = StoredValue::CLValue(\n            CLValue::from_t((ZERO_U8, ZERO_U8, ZERO_U8)).expect(\"should create CLValue\"),\n        );\n        assert_yields_type_mismatch_error(cl_tuple3);\n    }\n\n    #[test]\n    #[allow(clippy::cognitive_complexity)]\n    fn wrapping_addition_should_succeed() {\n        fn add<X, Y>(current_value: X, to_add: Y) -> X\n        where\n            X: CLTyped + ToBytes + FromBytes + PartialEq + fmt::Debug,\n            Y: AsPrimitive<i32>\n                + AsPrimitive<i64>\n                + AsPrimitive<u8>\n                + AsPrimitive<u32>\n                + AsPrimitive<u64>\n                + AsPrimitive<U128>\n                + AsPrimitive<U256>\n                + AsPrimitive<U512>,\n        {\n            let current = StoredValue::CLValue(\n                CLValue::from_t(current_value).expect(\"should create CLValue\"),\n            );\n            if let TransformInstruction::Store(result) =\n                wrapping_addition(current, to_add).expect(\"wrapping addition should succeed\")\n            {\n                CLValue::try_from(result)\n                    .expect(\"should be CLValue\")\n                    .into_t()\n                    .expect(\"should parse to X\")\n            } else {\n                panic!(\"expected TransformInstruction::Store\");\n            }\n        }\n\n        // Adding to i32\n        assert_eq!(ONE_I32, add(ZERO_I32, ONE_I32));\n        assert_eq!(MIN_I32, add(MAX_I32, ONE_I32));\n        assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32));\n        assert_eq!(ZERO_I32, add(ONE_I32, NEG_ONE_I32));\n        assert_eq!(NEG_ONE_I32, add(ZERO_I32, NEG_ONE_I32));\n        assert_eq!(MAX_I32, add(NEG_ONE_I32, MIN_I32));\n\n        assert_eq!(ONE_I32, add(ZERO_I32, ONE_U64));\n        assert_eq!(MIN_I32, add(MAX_I32, ONE_U64));\n        assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32 as u64));\n\n        assert_eq!(ONE_I32, add(ZERO_I32, ONE_U128));\n        assert_eq!(MIN_I32, add(MAX_I32, ONE_U128));\n        assert_eq!(NEG_TWO_I32, add(MAX_I32, U128::from(MAX_I32)));\n\n        assert_eq!(ONE_I32, add(ZERO_I32, ONE_U256));\n        assert_eq!(MIN_I32, add(MAX_I32, ONE_U256));\n        assert_eq!(NEG_TWO_I32, add(MAX_I32, U256::from(MAX_I32)));\n\n        assert_eq!(ONE_I32, add(ZERO_I32, ONE_U512));\n        assert_eq!(MIN_I32, add(MAX_I32, ONE_U512));\n        assert_eq!(NEG_TWO_I32, add(MAX_I32, U512::from(MAX_I32)));\n\n        // Adding to i64\n        assert_eq!(ONE_I64, add(ZERO_I64, ONE_I32));\n        assert_eq!(MIN_I64, add(MAX_I64, ONE_I32));\n        assert_eq!(ZERO_I64, add(ONE_I64, NEG_ONE_I32));\n        assert_eq!(NEG_ONE_I64, add(ZERO_I64, NEG_ONE_I32));\n        assert_eq!(MAX_I64, add(MIN_I64, NEG_ONE_I32));\n\n        assert_eq!(ONE_I64, add(ZERO_I64, ONE_U64));\n        assert_eq!(MIN_I64, add(MAX_I64, ONE_U64));\n        assert_eq!(NEG_TWO_I64, add(MAX_I64, MAX_I64 as u64));\n\n        assert_eq!(ONE_I64, add(ZERO_I64, ONE_U128));\n        assert_eq!(MIN_I64, add(MAX_I64, ONE_U128));\n        assert_eq!(NEG_TWO_I64, add(MAX_I64, U128::from(MAX_I64)));\n\n        assert_eq!(ONE_I64, add(ZERO_I64, ONE_U256));\n        assert_eq!(MIN_I64, add(MAX_I64, ONE_U256));\n        assert_eq!(NEG_TWO_I64, add(MAX_I64, U256::from(MAX_I64)));\n\n        assert_eq!(ONE_I64, add(ZERO_I64, ONE_U512));\n        assert_eq!(MIN_I64, add(MAX_I64, ONE_U512));\n        assert_eq!(NEG_TWO_I64, add(MAX_I64, U512::from(MAX_I64)));\n\n        // Adding to u8\n        assert_eq!(ONE_U8, add(ZERO_U8, ONE_I32));\n        assert_eq!(ZERO_U8, add(MAX_U8, ONE_I32));\n        assert_eq!(MAX_U8, add(MAX_U8, 256_i32));\n        assert_eq!(ZERO_U8, add(MAX_U8, 257_i32));\n        assert_eq!(ZERO_U8, add(ONE_U8, NEG_ONE_I32));\n        assert_eq!(MAX_U8, add(ZERO_U8, NEG_ONE_I32));\n        assert_eq!(ZERO_U8, add(ZERO_U8, -256_i32));\n        assert_eq!(MAX_U8, add(ZERO_U8, -257_i32));\n        assert_eq!(MAX_U8, add(ZERO_U8, MAX_I32));\n        assert_eq!(ZERO_U8, add(ZERO_U8, MIN_I32));\n\n        assert_eq!(ONE_U8, add(ZERO_U8, ONE_U64));\n        assert_eq!(ZERO_U8, add(MAX_U8, ONE_U64));\n        assert_eq!(ONE_U8, add(ZERO_U8, u64::from(MAX_U8) + 2));\n        assert_eq!(MAX_U8, add(ZERO_U8, MAX_U64));\n\n        assert_eq!(ONE_U8, add(ZERO_U8, ONE_U128));\n        assert_eq!(ZERO_U8, add(MAX_U8, ONE_U128));\n        assert_eq!(ONE_U8, add(ZERO_U8, U128::from(MAX_U8) + 2));\n        assert_eq!(MAX_U8, add(ZERO_U8, MAX_U128));\n\n        assert_eq!(ONE_U8, add(ZERO_U8, ONE_U256));\n        assert_eq!(ZERO_U8, add(MAX_U8, ONE_U256));\n        assert_eq!(ONE_U8, add(ZERO_U8, U256::from(MAX_U8) + 2));\n        assert_eq!(MAX_U8, add(ZERO_U8, MAX_U256));\n\n        assert_eq!(ONE_U8, add(ZERO_U8, ONE_U512));\n        assert_eq!(ZERO_U8, add(MAX_U8, ONE_U512));\n        assert_eq!(ONE_U8, add(ZERO_U8, U512::from(MAX_U8) + 2));\n        assert_eq!(MAX_U8, add(ZERO_U8, MAX_U512));\n\n        // Adding to u32\n        assert_eq!(ONE_U32, add(ZERO_U32, ONE_I32));\n        assert_eq!(ZERO_U32, add(MAX_U32, ONE_I32));\n        assert_eq!(ZERO_U32, add(ONE_U32, NEG_ONE_I32));\n        assert_eq!(MAX_U32, add(ZERO_U32, NEG_ONE_I32));\n        assert_eq!(MAX_I32 as u32 + 1, add(ZERO_U32, MIN_I32));\n\n        assert_eq!(ONE_U32, add(ZERO_U32, ONE_U64));\n        assert_eq!(ZERO_U32, add(MAX_U32, ONE_U64));\n        assert_eq!(ONE_U32, add(ZERO_U32, u64::from(MAX_U32) + 2));\n        assert_eq!(MAX_U32, add(ZERO_U32, MAX_U64));\n\n        assert_eq!(ONE_U32, add(ZERO_U32, ONE_U128));\n        assert_eq!(ZERO_U32, add(MAX_U32, ONE_U128));\n        assert_eq!(ONE_U32, add(ZERO_U32, U128::from(MAX_U32) + 2));\n        assert_eq!(MAX_U32, add(ZERO_U32, MAX_U128));\n\n        assert_eq!(ONE_U32, add(ZERO_U32, ONE_U256));\n        assert_eq!(ZERO_U32, add(MAX_U32, ONE_U256));\n        assert_eq!(ONE_U32, add(ZERO_U32, U256::from(MAX_U32) + 2));\n        assert_eq!(MAX_U32, add(ZERO_U32, MAX_U256));\n\n        assert_eq!(ONE_U32, add(ZERO_U32, ONE_U512));\n        assert_eq!(ZERO_U32, add(MAX_U32, ONE_U512));\n        assert_eq!(ONE_U32, add(ZERO_U32, U512::from(MAX_U32) + 2));\n        assert_eq!(MAX_U32, add(ZERO_U32, MAX_U512));\n\n        // Adding to u64\n        assert_eq!(ONE_U64, add(ZERO_U64, ONE_I32));\n        assert_eq!(ZERO_U64, add(MAX_U64, ONE_I32));\n        assert_eq!(ZERO_U64, add(ONE_U64, NEG_ONE_I32));\n        assert_eq!(MAX_U64, add(ZERO_U64, NEG_ONE_I32));\n\n        assert_eq!(ONE_U64, add(ZERO_U64, ONE_U64));\n        assert_eq!(ZERO_U64, add(MAX_U64, ONE_U64));\n        assert_eq!(MAX_U64 - 1, add(MAX_U64, MAX_U64));\n\n        assert_eq!(ONE_U64, add(ZERO_U64, ONE_U128));\n        assert_eq!(ZERO_U64, add(MAX_U64, ONE_U128));\n        assert_eq!(ONE_U64, add(ZERO_U64, U128::from(MAX_U64) + 2));\n        assert_eq!(MAX_U64, add(ZERO_U64, MAX_U128));\n\n        assert_eq!(ONE_U64, add(ZERO_U64, ONE_U256));\n        assert_eq!(ZERO_U64, add(MAX_U64, ONE_U256));\n        assert_eq!(ONE_U64, add(ZERO_U64, U256::from(MAX_U64) + 2));\n        assert_eq!(MAX_U64, add(ZERO_U64, MAX_U256));\n\n        assert_eq!(ONE_U64, add(ZERO_U64, ONE_U512));\n        assert_eq!(ZERO_U64, add(MAX_U64, ONE_U512));\n        assert_eq!(ONE_U64, add(ZERO_U64, U512::from(MAX_U64) + 2));\n        assert_eq!(MAX_U64, add(ZERO_U64, MAX_U512));\n\n        // Adding to U128\n        assert_eq!(ONE_U128, add(ZERO_U128, ONE_I32));\n        assert_eq!(ZERO_U128, add(MAX_U128, ONE_I32));\n        assert_eq!(ZERO_U128, add(ONE_U128, NEG_ONE_I32));\n        assert_eq!(MAX_U128, add(ZERO_U128, NEG_ONE_I32));\n\n        assert_eq!(ONE_U128, add(ZERO_U128, ONE_U64));\n        assert_eq!(ZERO_U128, add(MAX_U128, ONE_U64));\n\n        assert_eq!(ONE_U128, add(ZERO_U128, ONE_U128));\n        assert_eq!(ZERO_U128, add(MAX_U128, ONE_U128));\n        assert_eq!(MAX_U128 - 1, add(MAX_U128, MAX_U128));\n\n        assert_eq!(ONE_U128, add(ZERO_U128, ONE_U256));\n        assert_eq!(ZERO_U128, add(MAX_U128, ONE_U256));\n        assert_eq!(\n            ONE_U128,\n            add(\n                ZERO_U128,\n                U256::from_dec_str(&MAX_U128.to_string()).unwrap() + 2,\n            )\n        );\n        assert_eq!(MAX_U128, add(ZERO_U128, MAX_U256));\n\n        assert_eq!(ONE_U128, add(ZERO_U128, ONE_U512));\n        assert_eq!(ZERO_U128, add(MAX_U128, ONE_U512));\n        assert_eq!(\n            ONE_U128,\n            add(\n                ZERO_U128,\n                U512::from_dec_str(&MAX_U128.to_string()).unwrap() + 2,\n            )\n        );\n        assert_eq!(MAX_U128, add(ZERO_U128, MAX_U512));\n\n        // Adding to U256\n        assert_eq!(ONE_U256, add(ZERO_U256, ONE_I32));\n        assert_eq!(ZERO_U256, add(MAX_U256, ONE_I32));\n        assert_eq!(ZERO_U256, add(ONE_U256, NEG_ONE_I32));\n        assert_eq!(MAX_U256, add(ZERO_U256, NEG_ONE_I32));\n\n        assert_eq!(ONE_U256, add(ZERO_U256, ONE_U64));\n        assert_eq!(ZERO_U256, add(MAX_U256, ONE_U64));\n\n        assert_eq!(ONE_U256, add(ZERO_U256, ONE_U128));\n        assert_eq!(ZERO_U256, add(MAX_U256, ONE_U128));\n\n        assert_eq!(ONE_U256, add(ZERO_U256, ONE_U256));\n        assert_eq!(ZERO_U256, add(MAX_U256, ONE_U256));\n        assert_eq!(MAX_U256 - 1, add(MAX_U256, MAX_U256));\n\n        assert_eq!(ONE_U256, add(ZERO_U256, ONE_U512));\n        assert_eq!(ZERO_U256, add(MAX_U256, ONE_U512));\n        assert_eq!(\n            ONE_U256,\n            add(\n                ZERO_U256,\n                U512::from_dec_str(&MAX_U256.to_string()).unwrap() + 2,\n            )\n        );\n        assert_eq!(MAX_U256, add(ZERO_U256, MAX_U512));\n\n        // Adding to U512\n        assert_eq!(ONE_U512, add(ZERO_U512, ONE_I32));\n        assert_eq!(ZERO_U512, add(MAX_U512, ONE_I32));\n        assert_eq!(ZERO_U512, add(ONE_U512, NEG_ONE_I32));\n        assert_eq!(MAX_U512, add(ZERO_U512, NEG_ONE_I32));\n\n        assert_eq!(ONE_U512, add(ZERO_U512, ONE_U64));\n        assert_eq!(ZERO_U512, add(MAX_U512, ONE_U64));\n\n        assert_eq!(ONE_U512, add(ZERO_U512, ONE_U128));\n        assert_eq!(ZERO_U512, add(MAX_U512, ONE_U128));\n\n        assert_eq!(ONE_U512, add(ZERO_U512, ONE_U256));\n        assert_eq!(ZERO_U512, add(MAX_U512, ONE_U256));\n\n        assert_eq!(ONE_U512, add(ZERO_U512, ONE_U512));\n        assert_eq!(ZERO_U512, add(MAX_U512, ONE_U512));\n        assert_eq!(MAX_U512 - 1, add(MAX_U512, MAX_U512));\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..11 {\n            let execution_result = TransformKindV2::random(rng);\n            bytesrepr::test_serialization_roundtrip(&execution_result);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/execution.rs",
    "content": "//! Types related to execution of deploys.\n\nmod effects;\nmod execution_result;\npub mod execution_result_v1;\nmod execution_result_v2;\nmod transform;\nmod transform_error;\nmod transform_kind;\n\npub use effects::Effects;\npub use execution_result::ExecutionResult;\npub use execution_result_v1::ExecutionResultV1;\npub use execution_result_v2::ExecutionResultV2;\npub use transform::TransformV2;\npub use transform_error::TransformError;\npub use transform_kind::{TransformInstruction, TransformKindV2};\n"
  },
  {
    "path": "types/src/file_utils.rs",
    "content": "//! Utilities for handling reading from and writing to files.\n\nuse std::{\n    fs,\n    io::{self, Write},\n    os::unix::fs::OpenOptionsExt,\n    path::{Path, PathBuf},\n};\n\nuse thiserror::Error;\n\n/// Error reading a file.\n#[derive(Debug, Error)]\n#[error(\"could not read '{0}': {error}\", .path.display())]\npub struct ReadFileError {\n    /// Path that failed to be read.\n    path: PathBuf,\n    /// The underlying OS error.\n    #[source]\n    error: io::Error,\n}\n\n/// Error writing a file\n#[derive(Debug, Error)]\n#[error(\"could not write to '{0}': {error}\", .path.display())]\npub struct WriteFileError {\n    /// Path that failed to be written to.\n    path: PathBuf,\n    /// The underlying OS error.\n    #[source]\n    error: io::Error,\n}\n\n/// Read complete at `path` into memory.\n///\n/// Wraps `fs::read`, but preserves the filename for better error printing.\npub fn read_file<P: AsRef<Path>>(filename: P) -> Result<Vec<u8>, ReadFileError> {\n    let path = filename.as_ref();\n    fs::read(path).map_err(|error| ReadFileError {\n        path: path.to_owned(),\n        error,\n    })\n}\n\n/// Write data to `path`.\n///\n/// Wraps `fs::write`, but preserves the filename for better error printing.\npub(crate) fn write_file<P: AsRef<Path>, B: AsRef<[u8]>>(\n    filename: P,\n    data: B,\n) -> Result<(), WriteFileError> {\n    let path = filename.as_ref();\n    fs::write(path, data.as_ref()).map_err(|error| WriteFileError {\n        path: path.to_owned(),\n        error,\n    })\n}\n\n/// Writes data to `path`, ensuring only the owner can read or write it.\n///\n/// Otherwise functions like [`write_file`].\npub(crate) fn write_private_file<P: AsRef<Path>, B: AsRef<[u8]>>(\n    filename: P,\n    data: B,\n) -> Result<(), WriteFileError> {\n    let path = filename.as_ref();\n    fs::OpenOptions::new()\n        .write(true)\n        .create(true)\n        .truncate(true)\n        .mode(0o600)\n        .open(path)\n        .and_then(|mut file| file.write_all(data.as_ref()))\n        .map_err(|error| WriteFileError {\n            path: path.to_owned(),\n            error,\n        })\n}\n"
  },
  {
    "path": "types/src/gas.rs",
    "content": "//! The `gas` module is used for working with Gas including converting to and from Motes.\n\nuse alloc::vec::Vec;\nuse core::fmt;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Motes, U512,\n};\n\n/// The `Gas` struct represents a `U512` amount of gas.\n#[derive(\n    Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Gas(U512);\n\nimpl Gas {\n    /// The maximum value of `Gas`.\n    pub const MAX: Gas = Gas(U512::MAX);\n\n    /// Constructs a new `Gas`.\n    pub fn new<T: Into<U512>>(value: T) -> Self {\n        Gas(value.into())\n    }\n\n    /// Constructs a new `Gas` with value `0`.\n    pub const fn zero() -> Self {\n        Gas(U512::zero())\n    }\n\n    /// Returns the inner `U512` value.\n    pub fn value(&self) -> U512 {\n        self.0\n    }\n\n    /// Converts the given `motes` to `Gas` by dividing them by `conv_rate`.\n    ///\n    /// Returns `None` if `motes_per_unit_of_gas == 0`.\n    pub fn from_motes(motes: Motes, motes_per_unit_of_gas: u8) -> Option<Self> {\n        motes\n            .value()\n            .checked_div(U512::from(motes_per_unit_of_gas))\n            .map(Self::new)\n    }\n\n    /// Converts the given `U512` to `Gas` by dividing it by `gas_price`.\n    ///\n    /// Returns `None` if `gas_price == 0`.\n    pub fn from_price(base_amount: U512, gas_price: u8) -> Option<Self> {\n        base_amount\n            .checked_div(U512::from(gas_price))\n            .map(Self::new)\n    }\n\n    /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred.\n    pub fn checked_add(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_add(rhs.value()).map(Self::new)\n    }\n\n    /// Saturating integer addition. Computes `self + rhs`, returning max if overflow occurred.\n    pub fn saturating_add(self, rhs: Self) -> Self {\n        Gas(self.0.saturating_add(rhs.value()))\n    }\n\n    /// Saturating integer subtraction. Computes `self + rhs`, returning min if overflow occurred.\n    pub fn saturating_sub(self, rhs: Self) -> Self {\n        Gas(self.0.saturating_sub(rhs.value()))\n    }\n\n    /// Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred.\n    pub fn checked_sub(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_sub(rhs.value()).map(Self::new)\n    }\n\n    /// Checked integer subtraction. Computes `self * rhs`, returning `None` if overflow occurred.\n    pub fn checked_mul(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_mul(rhs.value()).map(Self::new)\n    }\n\n    /// Checked integer division. Computes `self / rhs`, returning `None` if overflow occurred.\n    pub fn checked_div(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_div(rhs.value()).map(Self::new)\n    }\n\n    /// Returns a random `Gas`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Self(rng.gen::<u128>().into())\n    }\n}\n\nimpl ToBytes for Gas {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for Gas {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, remainder) = U512::from_bytes(bytes)?;\n        Ok((Gas(value), remainder))\n    }\n}\n\nimpl fmt::Display for Gas {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"{:?}\", self.0)\n    }\n}\n\nimpl From<u32> for Gas {\n    fn from(gas: u32) -> Self {\n        let gas_u512: U512 = gas.into();\n        Gas::new(gas_u512)\n    }\n}\n\nimpl From<u64> for Gas {\n    fn from(gas: u64) -> Self {\n        let gas_u512: U512 = gas.into();\n        Gas::new(gas_u512)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::U512;\n\n    use crate::{Gas, Motes};\n\n    #[test]\n    fn should_be_able_to_get_instance_of_gas() {\n        let initial_value = 1;\n        let gas = Gas::new(U512::from(initial_value));\n        assert_eq!(\n            initial_value,\n            gas.value().as_u64(),\n            \"should have equal value\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_compare_two_instances_of_gas() {\n        let left_gas = Gas::new(U512::from(1));\n        let right_gas = Gas::new(U512::from(1));\n        assert_eq!(left_gas, right_gas, \"should be equal\");\n        let right_gas = Gas::new(U512::from(2));\n        assert_ne!(left_gas, right_gas, \"should not be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_add_two_instances_of_gas() {\n        let left_gas = Gas::new(U512::from(1));\n        let right_gas = Gas::new(U512::from(1));\n        let expected_gas = Gas::new(U512::from(2));\n        assert_eq!(\n            left_gas.checked_add(right_gas),\n            Some(expected_gas),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_subtract_two_instances_of_gas() {\n        let left_gas = Gas::new(U512::from(1));\n        let right_gas = Gas::new(U512::from(1));\n        let expected_gas = Gas::new(U512::from(0));\n        assert_eq!(\n            left_gas.checked_sub(right_gas),\n            Some(expected_gas),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_multiply_two_instances_of_gas() {\n        let left_gas = Gas::new(U512::from(100));\n        let right_gas = Gas::new(U512::from(10));\n        let expected_gas = Gas::new(U512::from(1000));\n        assert_eq!(\n            left_gas.checked_mul(right_gas),\n            Some(expected_gas),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_divide_two_instances_of_gas() {\n        let left_gas = Gas::new(U512::from(1000));\n        let right_gas = Gas::new(U512::from(100));\n        let expected_gas = Gas::new(U512::from(10));\n        assert_eq!(\n            left_gas.checked_div(right_gas),\n            Some(expected_gas),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_convert_from_mote() {\n        let mote = Motes::new(U512::from(100));\n        let gas = Gas::from_motes(mote, 10).expect(\"should have gas\");\n        let expected_gas = Gas::new(U512::from(10));\n        assert_eq!(gas, expected_gas, \"should be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_default() {\n        let gas = Gas::default();\n        let expected_gas = Gas::zero();\n        assert_eq!(gas, expected_gas, \"should be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_compare_relative_value() {\n        let left_gas = Gas::new(U512::from(100));\n        let right_gas = Gas::new(U512::from(10));\n        assert!(left_gas > right_gas, \"should be gt\");\n        let right_gas = Gas::new(U512::from(100));\n        assert!(left_gas >= right_gas, \"should be gte\");\n        assert!(left_gas <= right_gas, \"should be lte\");\n        let left_gas = Gas::new(U512::from(10));\n        assert!(left_gas < right_gas, \"should be lt\");\n    }\n\n    #[test]\n    fn should_support_checked_div_from_motes() {\n        let motes = Motes::zero();\n        let conv_rate = 0;\n        let maybe = Gas::from_motes(motes, conv_rate);\n        assert!(maybe.is_none(), \"should be none due to divide by zero\");\n    }\n}\n"
  },
  {
    "path": "types/src/gens.rs",
    "content": "//! Contains functions for generating arbitrary values for use by\n//! [`Proptest`](https://crates.io/crates/proptest).\n#![allow(missing_docs)]\nuse alloc::{\n    boxed::Box,\n    collections::{BTreeMap, BTreeSet},\n    string::String,\n    vec,\n};\n\nuse crate::{\n    account::{\n        self, action_thresholds::gens::account_action_thresholds_arb,\n        associated_keys::gens::account_associated_keys_arb, Account, AccountHash,\n    },\n    addressable_entity::{\n        action_thresholds::gens::action_thresholds_arb, associated_keys::gens::associated_keys_arb,\n        ContractRuntimeTag, MessageTopics, NamedKeyAddr, NamedKeyValue, Parameters, Weight,\n    },\n    block::BlockGlobalAddr,\n    byte_code::ByteCodeKind,\n    bytesrepr::Bytes,\n    contract_messages::{MessageAddr, MessageChecksum, MessageTopicSummary, TopicNameHash},\n    contracts::{\n        Contract, ContractHash, ContractPackage, ContractPackageStatus, ContractVersionKey,\n        ContractVersions, EntryPoint as ContractEntryPoint, EntryPoints as ContractEntryPoints,\n        NamedKeys,\n    },\n    crypto::{\n        self,\n        gens::{public_key_arb_no_system, secret_key_arb_no_system},\n    },\n    deploy_info::gens::deploy_info_arb,\n    global_state::{Pointer, TrieMerkleProof, TrieMerkleProofStep},\n    package::{EntityVersionKey, EntityVersions, Groups, PackageStatus},\n    system::{\n        auction::{\n            gens::era_info_arb, Bid, BidAddr, BidKind, DelegationRate, Delegator, DelegatorBid,\n            DelegatorKind, Reservation, UnbondingPurse, ValidatorBid, ValidatorCredit,\n            WithdrawPurse, DELEGATION_RATE_DENOMINATOR,\n        },\n        mint::BalanceHoldAddr,\n        SystemEntityType,\n    },\n    transaction::{\n        gens::deploy_hash_arb, FieldsContainer, InitiatorAddrAndSecretKey, TransactionArgs,\n        TransactionRuntimeParams, TransactionV1Payload,\n    },\n    transfer::{\n        gens::{transfer_v1_addr_arb, transfer_v1_arb},\n        TransferAddr,\n    },\n    AccessRights, AddressableEntity, AddressableEntityHash, BlockTime, ByteCode, ByteCodeAddr,\n    CLType, CLValue, Digest, EntityAddr, EntityEntryPoint, EntityKind, EntryPointAccess,\n    EntryPointAddr, EntryPointPayment, EntryPointType, EntryPoints, EraId, Group, InitiatorAddr,\n    Key, NamedArg, Package, Parameter, Phase, PricingMode, ProtocolVersion, PublicKey, RuntimeArgs,\n    SemVer, StoredValue, TimeDiff, Timestamp, Transaction, TransactionEntryPoint,\n    TransactionInvocationTarget, TransactionScheduling, TransactionTarget, TransactionV1, URef,\n    U128, U256, U512,\n};\nuse proptest::{\n    array, bits, bool,\n    collection::{self, vec, SizeRange},\n    option,\n    prelude::*,\n    result,\n};\n\npub fn u8_slice_32() -> impl Strategy<Value = [u8; 32]> {\n    collection::vec(any::<u8>(), 32).prop_map(|b| {\n        let mut res = [0u8; 32];\n        res.clone_from_slice(b.as_slice());\n        res\n    })\n}\n\npub fn u2_slice_32() -> impl Strategy<Value = [u8; 32]> {\n    array::uniform32(any::<u8>()).prop_map(|mut arr| {\n        for byte in arr.iter_mut() {\n            *byte &= 0b11;\n        }\n        arr\n    })\n}\n\npub(crate) fn named_keys_arb(depth: usize) -> impl Strategy<Value = NamedKeys> {\n    collection::btree_map(\"\\\\PC*\", key_arb(), depth).prop_map(NamedKeys::from)\n}\n\npub fn access_rights_arb() -> impl Strategy<Value = AccessRights> {\n    prop_oneof![\n        Just(AccessRights::NONE),\n        Just(AccessRights::READ),\n        Just(AccessRights::ADD),\n        Just(AccessRights::WRITE),\n        Just(AccessRights::READ_ADD),\n        Just(AccessRights::READ_WRITE),\n        Just(AccessRights::ADD_WRITE),\n        Just(AccessRights::READ_ADD_WRITE),\n    ]\n}\n\npub fn phase_arb() -> impl Strategy<Value = Phase> {\n    prop_oneof![\n        Just(Phase::Payment),\n        Just(Phase::Session),\n        Just(Phase::FinalizePayment),\n    ]\n}\n\npub fn uref_arb() -> impl Strategy<Value = URef> {\n    (array::uniform32(bits::u8::ANY), access_rights_arb())\n        .prop_map(|(id, access_rights)| URef::new(id, access_rights))\n}\n\npub fn era_id_arb() -> impl Strategy<Value = EraId> {\n    any::<u64>().prop_map(EraId::from)\n}\n\npub fn named_key_addr_arb() -> impl Strategy<Value = NamedKeyAddr> {\n    (entity_addr_arb(), u8_slice_32())\n        .prop_map(|(entity_addr, b)| NamedKeyAddr::new_named_key_entry(entity_addr, b))\n}\n\npub fn message_addr_arb() -> impl Strategy<Value = MessageAddr> {\n    prop_oneof![\n        (entity_addr_arb(), u8_slice_32()).prop_map(|(hash_addr, topic_name_hash)| {\n            MessageAddr::new_topic_addr(hash_addr, TopicNameHash::new(topic_name_hash))\n        }),\n        (entity_addr_arb(), u8_slice_32(), example_u32_arb()).prop_map(\n            |(hash_addr, topic_name_hash, index)| MessageAddr::new_message_addr(\n                hash_addr,\n                TopicNameHash::new(topic_name_hash),\n                index\n            )\n        ),\n    ]\n}\n\npub fn entry_point_addr_arb() -> impl Strategy<Value = EntryPointAddr> {\n    (entity_addr_arb(), any::<String>()).prop_map(|(entity_addr, b)| {\n        EntryPointAddr::new_v1_entry_point_addr(entity_addr, &b).unwrap()\n    })\n}\n\npub fn byte_code_addr_arb() -> impl Strategy<Value = ByteCodeAddr> {\n    prop_oneof![\n        Just(ByteCodeAddr::Empty),\n        u8_slice_32().prop_map(ByteCodeAddr::V1CasperWasm),\n        u8_slice_32().prop_map(ByteCodeAddr::V2CasperWasm),\n    ]\n}\n\npub fn key_arb() -> impl Strategy<Value = Key> {\n    prop_oneof![\n        account_hash_arb().prop_map(Key::Account),\n        u8_slice_32().prop_map(Key::Hash),\n        uref_arb().prop_map(Key::URef),\n        transfer_v1_addr_arb().prop_map(Key::Transfer),\n        deploy_hash_arb().prop_map(Key::DeployInfo),\n        era_id_arb().prop_map(Key::EraInfo),\n        uref_arb().prop_map(|uref| Key::Balance(uref.addr())),\n        bid_addr_validator_arb().prop_map(Key::BidAddr),\n        bid_addr_delegator_arb().prop_map(Key::BidAddr),\n        account_hash_arb().prop_map(Key::Withdraw),\n        u8_slice_32().prop_map(Key::Dictionary),\n        balance_hold_addr_arb().prop_map(Key::BalanceHold),\n        Just(Key::EraSummary)\n    ]\n}\n\npub fn all_keys_arb() -> impl Strategy<Value = Key> {\n    prop_oneof![\n        account_hash_arb().prop_map(Key::Account),\n        u8_slice_32().prop_map(Key::Hash),\n        uref_arb().prop_map(Key::URef),\n        transfer_v1_addr_arb().prop_map(Key::Transfer),\n        deploy_hash_arb().prop_map(Key::DeployInfo),\n        era_id_arb().prop_map(Key::EraInfo),\n        uref_arb().prop_map(|uref| Key::Balance(uref.addr())),\n        account_hash_arb().prop_map(Key::Withdraw),\n        u8_slice_32().prop_map(Key::Dictionary),\n        balance_hold_addr_arb().prop_map(Key::BalanceHold),\n        Just(Key::EraSummary),\n        Just(Key::SystemEntityRegistry),\n        Just(Key::ChainspecRegistry),\n        Just(Key::ChecksumRegistry),\n        bid_addr_arb().prop_map(Key::BidAddr),\n        account_hash_arb().prop_map(Key::Bid),\n        account_hash_arb().prop_map(Key::Unbond),\n        u8_slice_32().prop_map(Key::SmartContract),\n        byte_code_addr_arb().prop_map(Key::ByteCode),\n        entity_addr_arb().prop_map(Key::AddressableEntity),\n        block_global_addr_arb().prop_map(Key::BlockGlobal),\n        message_addr_arb().prop_map(Key::Message),\n        named_key_addr_arb().prop_map(Key::NamedKey),\n        balance_hold_addr_arb().prop_map(Key::BalanceHold),\n        entry_point_addr_arb().prop_map(Key::EntryPoint),\n        entity_addr_arb().prop_map(Key::State),\n    ]\n}\n\npub fn colliding_key_arb() -> impl Strategy<Value = Key> {\n    prop_oneof![\n        u2_slice_32().prop_map(|bytes| Key::Account(AccountHash::new(bytes))),\n        u2_slice_32().prop_map(Key::Hash),\n        u2_slice_32().prop_map(|bytes| Key::URef(URef::new(bytes, AccessRights::NONE))),\n        u2_slice_32().prop_map(|bytes| Key::Transfer(TransferAddr::new(bytes))),\n        u2_slice_32().prop_map(Key::Dictionary),\n    ]\n}\n\npub fn account_hash_arb() -> impl Strategy<Value = AccountHash> {\n    u8_slice_32().prop_map(AccountHash::new)\n}\n\npub fn entity_addr_arb() -> impl Strategy<Value = EntityAddr> {\n    prop_oneof![\n        u8_slice_32().prop_map(EntityAddr::System),\n        u8_slice_32().prop_map(EntityAddr::Account),\n        u8_slice_32().prop_map(EntityAddr::SmartContract),\n    ]\n}\n\npub fn topic_name_hash_arb() -> impl Strategy<Value = TopicNameHash> {\n    u8_slice_32().prop_map(TopicNameHash::new)\n}\n\npub fn bid_addr_validator_arb() -> impl Strategy<Value = BidAddr> {\n    u8_slice_32().prop_map(BidAddr::new_validator_addr)\n}\n\npub fn bid_addr_delegator_arb() -> impl Strategy<Value = BidAddr> {\n    let x = u8_slice_32();\n    let y = u8_slice_32();\n    (x, y).prop_map(BidAddr::new_delegator_account_addr)\n}\n\npub fn bid_legacy_arb() -> impl Strategy<Value = BidAddr> {\n    u8_slice_32().prop_map(BidAddr::legacy)\n}\n\npub fn bid_addr_delegated_arb() -> impl Strategy<Value = BidAddr> {\n    (public_key_arb_no_system(), delegator_kind_arb()).prop_map(|(validator, delegator_kind)| {\n        BidAddr::new_delegator_kind(&validator, &delegator_kind)\n    })\n}\n\npub fn bid_addr_credit_arb() -> impl Strategy<Value = BidAddr> {\n    (public_key_arb_no_system(), era_id_arb())\n        .prop_map(|(validator, era_id)| BidAddr::new_credit(&validator, era_id))\n}\n\npub fn bid_addr_reservation_account_arb() -> impl Strategy<Value = BidAddr> {\n    (public_key_arb_no_system(), public_key_arb_no_system())\n        .prop_map(|(validator, delegator)| BidAddr::new_reservation_account(&validator, &delegator))\n}\n\npub fn bid_addr_reservation_purse_arb() -> impl Strategy<Value = BidAddr> {\n    (public_key_arb_no_system(), u8_slice_32())\n        .prop_map(|(validator, uref)| BidAddr::new_reservation_purse(&validator, uref))\n}\n\npub fn bid_addr_new_unbond_account_arb() -> impl Strategy<Value = BidAddr> {\n    (public_key_arb_no_system(), public_key_arb_no_system())\n        .prop_map(|(validator, unbonder)| BidAddr::new_unbond_account(validator, unbonder))\n}\n\npub fn bid_addr_arb() -> impl Strategy<Value = BidAddr> {\n    prop_oneof![\n        bid_addr_validator_arb(),\n        bid_addr_delegator_arb(),\n        bid_legacy_arb(),\n        bid_addr_delegated_arb(),\n        bid_addr_credit_arb(),\n        bid_addr_reservation_account_arb(),\n        bid_addr_reservation_purse_arb(),\n        bid_addr_new_unbond_account_arb(),\n    ]\n}\n\npub fn balance_hold_addr_arb() -> impl Strategy<Value = BalanceHoldAddr> {\n    let x = uref_arb().prop_map(|uref| uref.addr());\n    let y = any::<u64>();\n    (x, y).prop_map(|(x, y)| BalanceHoldAddr::new_gas(x, BlockTime::new(y)))\n}\n\npub fn block_global_addr_arb() -> impl Strategy<Value = BlockGlobalAddr> {\n    prop_oneof![\n        0 => Just(BlockGlobalAddr::BlockTime),\n        1 => Just(BlockGlobalAddr::MessageCount)\n    ]\n}\n\npub fn weight_arb() -> impl Strategy<Value = Weight> {\n    any::<u8>().prop_map(Weight::new)\n}\n\npub fn account_weight_arb() -> impl Strategy<Value = account::Weight> {\n    any::<u8>().prop_map(account::Weight::new)\n}\n\npub fn sem_ver_arb() -> impl Strategy<Value = SemVer> {\n    (any::<u32>(), any::<u32>(), any::<u32>())\n        .prop_map(|(major, minor, patch)| SemVer::new(major, minor, patch))\n}\n\npub fn protocol_version_arb() -> impl Strategy<Value = ProtocolVersion> {\n    sem_ver_arb().prop_map(ProtocolVersion::new)\n}\n\npub fn u128_arb() -> impl Strategy<Value = U128> {\n    collection::vec(any::<u8>(), 0..16).prop_map(|b| U128::from_little_endian(b.as_slice()))\n}\n\npub fn u256_arb() -> impl Strategy<Value = U256> {\n    collection::vec(any::<u8>(), 0..32).prop_map(|b| U256::from_little_endian(b.as_slice()))\n}\n\npub fn u512_arb() -> impl Strategy<Value = U512> {\n    prop_oneof![\n        1 => Just(U512::zero()),\n        8 => collection::vec(any::<u8>(), 0..64).prop_map(|b| U512::from_little_endian(b.as_slice())),\n        1 => Just(U512::MAX),\n    ]\n}\n\npub fn cl_simple_type_arb() -> impl Strategy<Value = CLType> {\n    prop_oneof![\n        Just(CLType::Bool),\n        Just(CLType::I32),\n        Just(CLType::I64),\n        Just(CLType::U8),\n        Just(CLType::U32),\n        Just(CLType::U64),\n        Just(CLType::U128),\n        Just(CLType::U256),\n        Just(CLType::U512),\n        Just(CLType::Unit),\n        Just(CLType::String),\n        Just(CLType::Key),\n        Just(CLType::URef),\n    ]\n}\n\npub fn cl_type_arb() -> impl Strategy<Value = CLType> {\n    cl_simple_type_arb().prop_recursive(4, 16, 8, |element| {\n        prop_oneof![\n            // We want to produce basic types too\n            element.clone(),\n            // For complex type\n            element\n                .clone()\n                .prop_map(|val| CLType::Option(Box::new(val))),\n            element.clone().prop_map(|val| CLType::List(Box::new(val))),\n            // Realistic Result type generator: ok is anything recursive, err is simple type\n            (element.clone(), cl_simple_type_arb()).prop_map(|(ok, err)| CLType::Result {\n                ok: Box::new(ok),\n                err: Box::new(err)\n            }),\n            // Realistic Map type generator: key is simple type, value is complex recursive type\n            (cl_simple_type_arb(), element.clone()).prop_map(|(key, value)| CLType::Map {\n                key: Box::new(key),\n                value: Box::new(value)\n            }),\n            // Various tuples\n            element\n                .clone()\n                .prop_map(|cl_type| CLType::Tuple1([Box::new(cl_type)])),\n            (element.clone(), element.clone()).prop_map(|(cl_type1, cl_type2)| CLType::Tuple2([\n                Box::new(cl_type1),\n                Box::new(cl_type2)\n            ])),\n            (element.clone(), element.clone(), element).prop_map(\n                |(cl_type1, cl_type2, cl_type3)| CLType::Tuple3([\n                    Box::new(cl_type1),\n                    Box::new(cl_type2),\n                    Box::new(cl_type3)\n                ])\n            ),\n        ]\n    })\n}\n\npub fn cl_value_arb() -> impl Strategy<Value = CLValue> {\n    // If compiler brings you here it most probably means you've added a variant to `CLType` enum\n    // but forgot to add generator for it.\n    let stub: Option<CLType> = None;\n    if let Some(cl_type) = stub {\n        match cl_type {\n            CLType::Bool\n            | CLType::I32\n            | CLType::I64\n            | CLType::U8\n            | CLType::U32\n            | CLType::U64\n            | CLType::U128\n            | CLType::U256\n            | CLType::U512\n            | CLType::Unit\n            | CLType::String\n            | CLType::Key\n            | CLType::URef\n            | CLType::PublicKey\n            | CLType::Option(_)\n            | CLType::List(_)\n            | CLType::ByteArray(..)\n            | CLType::Result { .. }\n            | CLType::Map { .. }\n            | CLType::Tuple1(_)\n            | CLType::Tuple2(_)\n            | CLType::Tuple3(_)\n            | CLType::Any => (),\n        }\n    };\n\n    prop_oneof![\n        Just(CLValue::from_t(()).expect(\"should create CLValue\")),\n        any::<bool>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        any::<i32>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        any::<i64>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        any::<u8>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        any::<u32>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        any::<u64>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        u128_arb().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        u256_arb().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        u512_arb().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        key_arb().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        uref_arb().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        \".*\".prop_map(|x: String| CLValue::from_t(x).expect(\"should create CLValue\")),\n        option::of(any::<u64>()).prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        collection::vec(uref_arb(), 0..100)\n            .prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        result::maybe_err(key_arb(), \".*\")\n            .prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        collection::btree_map(\".*\", u512_arb(), 0..100)\n            .prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        any::<bool>().prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        (any::<bool>(), any::<i32>())\n            .prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        (any::<bool>(), any::<i32>(), any::<i64>())\n            .prop_map(|x| CLValue::from_t(x).expect(\"should create CLValue\")),\n        // Fixed lists of any size\n        any::<u8>().prop_map(|len| CLValue::from_t([len; 32]).expect(\"should create CLValue\")),\n    ]\n}\n\npub fn result_arb() -> impl Strategy<Value = Result<u32, u32>> {\n    result::maybe_ok(any::<u32>(), any::<u32>())\n}\n\npub fn named_args_arb() -> impl Strategy<Value = NamedArg> {\n    (\".*\", cl_value_arb()).prop_map(|(name, value)| NamedArg::new(name, value))\n}\n\npub fn group_arb() -> impl Strategy<Value = Group> {\n    \".*\".prop_map(Group::new)\n}\n\npub fn entry_point_access_arb() -> impl Strategy<Value = EntryPointAccess> {\n    prop_oneof![\n        Just(EntryPointAccess::Public),\n        collection::vec(group_arb(), 0..32).prop_map(EntryPointAccess::Groups),\n        Just(EntryPointAccess::Template),\n    ]\n}\n\npub fn entry_point_type_arb() -> impl Strategy<Value = EntryPointType> {\n    prop_oneof![\n        Just(EntryPointType::Caller),\n        Just(EntryPointType::Called),\n        Just(EntryPointType::Factory),\n    ]\n}\n\npub fn entry_point_payment_arb() -> impl Strategy<Value = EntryPointPayment> {\n    prop_oneof![\n        Just(EntryPointPayment::Caller),\n        Just(EntryPointPayment::DirectInvocationOnly),\n        Just(EntryPointPayment::SelfOnward),\n    ]\n}\n\npub fn parameter_arb() -> impl Strategy<Value = Parameter> {\n    (\".*\", cl_type_arb()).prop_map(|(name, cl_type)| Parameter::new(name, cl_type))\n}\n\npub fn parameters_arb() -> impl Strategy<Value = Parameters> {\n    collection::vec(parameter_arb(), 0..10)\n}\n\npub fn entry_point_arb() -> impl Strategy<Value = EntityEntryPoint> {\n    (\n        \".*\",\n        parameters_arb(),\n        entry_point_type_arb(),\n        entry_point_access_arb(),\n        entry_point_payment_arb(),\n        cl_type_arb(),\n    )\n        .prop_map(\n            |(name, parameters, entry_point_type, entry_point_access, entry_point_payment, ret)| {\n                EntityEntryPoint::new(\n                    name,\n                    parameters,\n                    ret,\n                    entry_point_access,\n                    entry_point_type,\n                    entry_point_payment,\n                )\n            },\n        )\n}\n\npub fn contract_entry_point_arb() -> impl Strategy<Value = ContractEntryPoint> {\n    (\n        \".*\",\n        parameters_arb(),\n        entry_point_type_arb(),\n        entry_point_access_arb(),\n        cl_type_arb(),\n    )\n        .prop_map(\n            |(name, parameters, entry_point_type, entry_point_access, ret)| {\n                ContractEntryPoint::new(name, parameters, ret, entry_point_access, entry_point_type)\n            },\n        )\n}\n\npub fn entry_points_arb() -> impl Strategy<Value = EntryPoints> {\n    collection::vec(entry_point_arb(), 1..10).prop_map(EntryPoints::from)\n}\n\npub fn contract_entry_points_arb() -> impl Strategy<Value = ContractEntryPoints> {\n    collection::vec(contract_entry_point_arb(), 1..10).prop_map(ContractEntryPoints::from)\n}\n\npub fn message_topics_arb() -> impl Strategy<Value = MessageTopics> {\n    collection::vec(any::<String>(), 1..100).prop_map(|topic_names| {\n        MessageTopics::from(\n            topic_names\n                .into_iter()\n                .map(|name| {\n                    let name_hash = crypto::blake2b(&name).into();\n                    (name, name_hash)\n                })\n                .collect::<BTreeMap<String, TopicNameHash>>(),\n        )\n    })\n}\n\npub fn account_arb() -> impl Strategy<Value = Account> {\n    (\n        account_hash_arb(),\n        named_keys_arb(20),\n        uref_arb(),\n        account_associated_keys_arb(),\n        account_action_thresholds_arb(),\n    )\n        .prop_map(\n            |(account_hash, named_keys, main_purse, associated_keys, action_thresholds)| {\n                Account::new(\n                    account_hash,\n                    named_keys,\n                    main_purse,\n                    associated_keys,\n                    action_thresholds,\n                )\n            },\n        )\n}\n\npub fn contract_package_arb() -> impl Strategy<Value = ContractPackage> {\n    (\n        uref_arb(),\n        contract_versions_arb(),\n        disabled_contract_versions_arb(),\n        groups_arb(),\n    )\n        .prop_map(|(access_key, versions, disabled_versions, groups)| {\n            ContractPackage::new(\n                access_key,\n                versions,\n                disabled_versions,\n                groups,\n                ContractPackageStatus::default(),\n            )\n        })\n}\n\npub fn contract_arb() -> impl Strategy<Value = Contract> {\n    (\n        protocol_version_arb(),\n        contract_entry_points_arb(),\n        u8_slice_32(),\n        u8_slice_32(),\n        named_keys_arb(20),\n    )\n        .prop_map(\n            |(\n                protocol_version,\n                entry_points,\n                contract_package_hash_arb,\n                contract_wasm_hash,\n                named_keys,\n            )| {\n                Contract::new(\n                    contract_package_hash_arb.into(),\n                    contract_wasm_hash.into(),\n                    named_keys,\n                    entry_points,\n                    protocol_version,\n                )\n            },\n        )\n}\n\npub fn system_entity_type_arb() -> impl Strategy<Value = SystemEntityType> {\n    prop_oneof![\n        Just(SystemEntityType::Mint),\n        Just(SystemEntityType::HandlePayment),\n        Just(SystemEntityType::StandardPayment),\n        Just(SystemEntityType::Auction),\n    ]\n}\n\npub fn contract_runtime_arb() -> impl Strategy<Value = ContractRuntimeTag> {\n    prop_oneof![\n        Just(ContractRuntimeTag::VmCasperV1),\n        Just(ContractRuntimeTag::VmCasperV2),\n    ]\n}\n\npub fn entity_kind_arb() -> impl Strategy<Value = EntityKind> {\n    prop_oneof![\n        system_entity_type_arb().prop_map(EntityKind::System),\n        account_hash_arb().prop_map(EntityKind::Account),\n        contract_runtime_arb().prop_map(EntityKind::SmartContract),\n    ]\n}\n\npub fn addressable_entity_hash_arb() -> impl Strategy<Value = AddressableEntityHash> {\n    u8_slice_32().prop_map(AddressableEntityHash::new)\n}\n\npub fn addressable_entity_arb() -> impl Strategy<Value = AddressableEntity> {\n    (\n        protocol_version_arb(),\n        u8_slice_32(),\n        u8_slice_32(),\n        uref_arb(),\n        associated_keys_arb(),\n        action_thresholds_arb(),\n        entity_kind_arb(),\n    )\n        .prop_map(\n            |(\n                protocol_version,\n                contract_package_hash_arb,\n                contract_wasm_hash,\n                main_purse,\n                associated_keys,\n                action_thresholds,\n                entity_kind,\n            )| {\n                AddressableEntity::new(\n                    contract_package_hash_arb.into(),\n                    contract_wasm_hash.into(),\n                    protocol_version,\n                    main_purse,\n                    associated_keys,\n                    action_thresholds,\n                    entity_kind,\n                )\n            },\n        )\n}\n\npub fn byte_code_arb() -> impl Strategy<Value = ByteCode> {\n    collection::vec(any::<u8>(), 1..1000)\n        .prop_map(|byte_code| ByteCode::new(ByteCodeKind::V1CasperWasm, byte_code))\n}\n\npub fn contract_version_key_arb() -> impl Strategy<Value = ContractVersionKey> {\n    (1..32u32, 1..1000u32)\n        .prop_map(|(major, contract_ver)| ContractVersionKey::new(major, contract_ver))\n}\n\npub fn entity_version_key_arb() -> impl Strategy<Value = EntityVersionKey> {\n    (1..32u32, 1..1000u32)\n        .prop_map(|(major, contract_ver)| EntityVersionKey::new(major, contract_ver))\n}\n\npub fn contract_versions_arb() -> impl Strategy<Value = ContractVersions> {\n    collection::btree_map(\n        contract_version_key_arb(),\n        u8_slice_32().prop_map(ContractHash::new),\n        1..5,\n    )\n}\n\npub fn entity_versions_arb() -> impl Strategy<Value = EntityVersions> {\n    collection::btree_map(entity_version_key_arb(), entity_addr_arb(), 1..5)\n        .prop_map(EntityVersions::from)\n}\n\npub fn disabled_versions_arb() -> impl Strategy<Value = BTreeSet<EntityVersionKey>> {\n    collection::btree_set(entity_version_key_arb(), 0..5)\n}\n\npub fn disabled_contract_versions_arb() -> impl Strategy<Value = BTreeSet<ContractVersionKey>> {\n    collection::btree_set(contract_version_key_arb(), 0..5)\n}\n\npub fn groups_arb() -> impl Strategy<Value = Groups> {\n    collection::btree_map(group_arb(), collection::btree_set(uref_arb(), 1..10), 0..5)\n        .prop_map(Groups::from)\n}\n\npub fn package_arb() -> impl Strategy<Value = Package> {\n    (entity_versions_arb(), disabled_versions_arb(), groups_arb()).prop_map(\n        |(versions, disabled_versions, groups)| {\n            Package::new(\n                versions,\n                disabled_versions,\n                groups,\n                PackageStatus::default(),\n            )\n        },\n    )\n}\n\npub(crate) fn delegator_arb() -> impl Strategy<Value = Delegator> {\n    (\n        public_key_arb_no_system(),\n        u512_arb(),\n        uref_arb(),\n        public_key_arb_no_system(),\n    )\n        .prop_map(\n            |(delegator_pk, staked_amount, bonding_purse, validator_pk)| {\n                Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk)\n            },\n        )\n}\n\npub(crate) fn delegator_kind_arb() -> impl Strategy<Value = DelegatorKind> {\n    prop_oneof![\n        public_key_arb_no_system().prop_map(DelegatorKind::PublicKey),\n        array::uniform32(bits::u8::ANY).prop_map(DelegatorKind::Purse)\n    ]\n}\n\npub(crate) fn delegator_bid_arb() -> impl Strategy<Value = DelegatorBid> {\n    (\n        public_key_arb_no_system(),\n        u512_arb(),\n        uref_arb(),\n        public_key_arb_no_system(),\n    )\n        .prop_map(\n            |(delegator_pk, staked_amount, bonding_purse, validator_pk)| {\n                DelegatorBid::unlocked(\n                    delegator_pk.into(),\n                    staked_amount,\n                    bonding_purse,\n                    validator_pk,\n                )\n            },\n        )\n}\n\nfn delegation_rate_arb() -> impl Strategy<Value = DelegationRate> {\n    0..=DELEGATION_RATE_DENOMINATOR // Maximum, allowed value for delegation rate.\n}\n\npub(crate) fn reservation_bid_arb() -> impl Strategy<Value = BidKind> {\n    reservation_arb().prop_map(|reservation| BidKind::Reservation(Box::new(reservation)))\n}\n\npub(crate) fn reservation_arb() -> impl Strategy<Value = Reservation> {\n    (\n        public_key_arb_no_system(),\n        delegator_kind_arb(),\n        delegation_rate_arb(),\n    )\n        .prop_map(|(validator_pk, delegator_kind, delegation_rate)| {\n            Reservation::new(validator_pk, delegator_kind, delegation_rate)\n        })\n}\n\npub(crate) fn unified_bid_arb(\n    delegations_len: impl Into<SizeRange>,\n) -> impl Strategy<Value = BidKind> {\n    (\n        public_key_arb_no_system(),\n        uref_arb(),\n        u512_arb(),\n        delegation_rate_arb(),\n        bool::ANY,\n        collection::vec(delegator_arb(), delegations_len),\n    )\n        .prop_map(\n            |(\n                validator_public_key,\n                bonding_purse,\n                staked_amount,\n                delegation_rate,\n                is_locked,\n                new_delegators,\n            )| {\n                let mut bid = if is_locked {\n                    Bid::locked(\n                        validator_public_key,\n                        bonding_purse,\n                        staked_amount,\n                        delegation_rate,\n                        1u64,\n                    )\n                } else {\n                    Bid::unlocked(\n                        validator_public_key,\n                        bonding_purse,\n                        staked_amount,\n                        delegation_rate,\n                    )\n                };\n                let delegators = bid.delegators_mut();\n                new_delegators.into_iter().for_each(|delegator| {\n                    assert!(delegators\n                        .insert(delegator.delegator_public_key().clone(), delegator)\n                        .is_none());\n                });\n                BidKind::Unified(Box::new(bid))\n            },\n        )\n}\n\npub(crate) fn delegator_bid_kind_arb() -> impl Strategy<Value = BidKind> {\n    delegator_bid_arb().prop_map(|delegator| BidKind::Delegator(Box::new(delegator)))\n}\n\npub(crate) fn validator_bid_arb() -> impl Strategy<Value = BidKind> {\n    (\n        public_key_arb_no_system(),\n        uref_arb(),\n        u512_arb(),\n        delegation_rate_arb(),\n        bool::ANY,\n    )\n        .prop_map(\n            |(validator_public_key, bonding_purse, staked_amount, delegation_rate, is_locked)| {\n                let validator_bid = if is_locked {\n                    ValidatorBid::locked(\n                        validator_public_key,\n                        bonding_purse,\n                        staked_amount,\n                        delegation_rate,\n                        1u64,\n                        0,\n                        u64::MAX,\n                        0,\n                    )\n                } else {\n                    ValidatorBid::unlocked(\n                        validator_public_key,\n                        bonding_purse,\n                        staked_amount,\n                        delegation_rate,\n                        0,\n                        u64::MAX,\n                        0,\n                    )\n                };\n                BidKind::Validator(Box::new(validator_bid))\n            },\n        )\n}\n\npub(crate) fn credit_bid_arb() -> impl Strategy<Value = BidKind> {\n    (public_key_arb_no_system(), era_id_arb(), u512_arb()).prop_map(\n        |(validator_public_key, era_id, amount)| {\n            BidKind::Credit(Box::new(ValidatorCredit::new(\n                validator_public_key,\n                era_id,\n                amount,\n            )))\n        },\n    )\n}\n\nfn withdraw_arb() -> impl Strategy<Value = WithdrawPurse> {\n    (\n        uref_arb(),\n        public_key_arb_no_system(),\n        public_key_arb_no_system(),\n        era_id_arb(),\n        u512_arb(),\n    )\n        .prop_map(|(bonding_purse, validator_pk, unbonder_pk, era, amount)| {\n            WithdrawPurse::new(bonding_purse, validator_pk, unbonder_pk, era, amount)\n        })\n}\n\nfn withdraws_arb(size: impl Into<SizeRange>) -> impl Strategy<Value = Vec<WithdrawPurse>> {\n    collection::vec(withdraw_arb(), size)\n}\n\nfn unbonding_arb() -> impl Strategy<Value = UnbondingPurse> {\n    (\n        uref_arb(),\n        public_key_arb_no_system(),\n        public_key_arb_no_system(),\n        era_id_arb(),\n        u512_arb(),\n        option::of(public_key_arb_no_system()),\n    )\n        .prop_map(\n            |(\n                bonding_purse,\n                validator_public_key,\n                unbonder_public_key,\n                era,\n                amount,\n                new_validator,\n            )| {\n                UnbondingPurse::new(\n                    bonding_purse,\n                    validator_public_key,\n                    unbonder_public_key,\n                    era,\n                    amount,\n                    new_validator,\n                )\n            },\n        )\n}\n\nfn unbondings_arb(size: impl Into<SizeRange>) -> impl Strategy<Value = Vec<UnbondingPurse>> {\n    collection::vec(unbonding_arb(), size)\n}\n\nfn message_topic_summary_arb() -> impl Strategy<Value = MessageTopicSummary> {\n    (any::<u32>(), any::<u64>(), \"test\").prop_map(|(message_count, blocktime, topic_name)| {\n        MessageTopicSummary {\n            message_count,\n            blocktime: BlockTime::new(blocktime),\n            topic_name,\n        }\n    })\n}\n\nfn message_summary_arb() -> impl Strategy<Value = MessageChecksum> {\n    u8_slice_32().prop_map(MessageChecksum)\n}\n\npub fn named_key_value_arb() -> impl Strategy<Value = NamedKeyValue> {\n    (key_arb(), \"test\").prop_map(|(key, string)| {\n        let cl_key = CLValue::from_t(key).unwrap();\n        let cl_string = CLValue::from_t(string).unwrap();\n        NamedKeyValue::new(cl_key, cl_string)\n    })\n}\n\npub fn stored_value_arb() -> impl Strategy<Value = StoredValue> {\n    prop_oneof![\n        cl_value_arb().prop_map(StoredValue::CLValue),\n        account_arb().prop_map(StoredValue::Account),\n        byte_code_arb().prop_map(StoredValue::ByteCode),\n        contract_arb().prop_map(StoredValue::Contract),\n        contract_package_arb().prop_map(StoredValue::ContractPackage),\n        addressable_entity_arb().prop_map(StoredValue::AddressableEntity),\n        package_arb().prop_map(StoredValue::SmartContract),\n        transfer_v1_arb().prop_map(StoredValue::Transfer),\n        deploy_info_arb().prop_map(StoredValue::DeployInfo),\n        era_info_arb(1..10).prop_map(StoredValue::EraInfo),\n        unified_bid_arb(0..3).prop_map(StoredValue::BidKind),\n        validator_bid_arb().prop_map(StoredValue::BidKind),\n        delegator_bid_kind_arb().prop_map(StoredValue::BidKind),\n        reservation_bid_arb().prop_map(StoredValue::BidKind),\n        credit_bid_arb().prop_map(StoredValue::BidKind),\n        withdraws_arb(1..50).prop_map(StoredValue::Withdraw),\n        unbondings_arb(1..50).prop_map(StoredValue::Unbonding),\n        message_topic_summary_arb().prop_map(StoredValue::MessageTopic),\n        message_summary_arb().prop_map(StoredValue::Message),\n        named_key_value_arb().prop_map(StoredValue::NamedKey),\n        collection::vec(any::<u8>(), 0..1000).prop_map(StoredValue::RawBytes),\n    ]\n    .prop_map(|stored_value|\n            // The following match statement is here only to make sure\n            // we don't forget to update the generator when a new variant is added.\n            match stored_value {\n                StoredValue::CLValue(_) => stored_value,\n                StoredValue::Account(_) => stored_value,\n                StoredValue::ContractWasm(_) => stored_value,\n                StoredValue::Contract(_) => stored_value,\n                StoredValue::ContractPackage(_) => stored_value,\n                StoredValue::Transfer(_) => stored_value,\n                StoredValue::DeployInfo(_) => stored_value,\n                StoredValue::EraInfo(_) => stored_value,\n                StoredValue::Bid(_) => stored_value,\n                StoredValue::Withdraw(_) => stored_value,\n                StoredValue::Unbonding(_) => stored_value,\n                StoredValue::AddressableEntity(_) => stored_value,\n                StoredValue::BidKind(_) => stored_value,\n                StoredValue::SmartContract(_) => stored_value,\n                StoredValue::ByteCode(_) => stored_value,\n                StoredValue::MessageTopic(_) => stored_value,\n                StoredValue::Message(_) => stored_value,\n                StoredValue::NamedKey(_) => stored_value,\n                StoredValue::Prepayment(_) => stored_value,\n                StoredValue::EntryPoint(_) => stored_value,\n                StoredValue::RawBytes(_) => stored_value,\n        })\n}\n\npub fn blake2b_hash_arb() -> impl Strategy<Value = Digest> {\n    vec(any::<u8>(), 0..1000).prop_map(Digest::hash)\n}\n\npub fn trie_pointer_arb() -> impl Strategy<Value = Pointer> {\n    prop_oneof![\n        blake2b_hash_arb().prop_map(Pointer::LeafPointer),\n        blake2b_hash_arb().prop_map(Pointer::NodePointer)\n    ]\n}\n\npub fn trie_merkle_proof_step_arb() -> impl Strategy<Value = TrieMerkleProofStep> {\n    const POINTERS_SIZE: usize = 32;\n    const AFFIX_SIZE: usize = 6;\n\n    prop_oneof![\n        (\n            <u8>::arbitrary(),\n            vec((<u8>::arbitrary(), trie_pointer_arb()), POINTERS_SIZE)\n        )\n            .prop_map(|(hole_index, indexed_pointers_with_hole)| {\n                TrieMerkleProofStep::Node {\n                    hole_index,\n                    indexed_pointers_with_hole,\n                }\n            }),\n        vec(<u8>::arbitrary(), AFFIX_SIZE).prop_map(|affix| {\n            TrieMerkleProofStep::Extension {\n                affix: affix.into(),\n            }\n        })\n    ]\n}\n\npub fn trie_merkle_proof_arb() -> impl Strategy<Value = TrieMerkleProof<Key, StoredValue>> {\n    const STEPS_SIZE: usize = 6;\n\n    (\n        key_arb(),\n        stored_value_arb(),\n        vec(trie_merkle_proof_step_arb(), STEPS_SIZE),\n    )\n        .prop_map(|(key, value, proof_steps)| TrieMerkleProof::new(key, value, proof_steps.into()))\n}\n\npub fn transaction_scheduling_arb() -> impl Strategy<Value = TransactionScheduling> {\n    prop_oneof![Just(TransactionScheduling::Standard),]\n}\n\npub fn json_compliant_transaction_scheduling_arb() -> impl Strategy<Value = TransactionScheduling> {\n    prop_oneof![Just(TransactionScheduling::Standard),]\n}\n\npub fn transaction_invocation_target_arb() -> impl Strategy<Value = TransactionInvocationTarget> {\n    prop_oneof![\n        addressable_entity_hash_arb().prop_map(TransactionInvocationTarget::new_invocable_entity),\n        Just(TransactionInvocationTarget::new_invocable_entity_alias(\n            \"abcd\".to_string()\n        )),\n        Just(TransactionInvocationTarget::new_package_alias_with_major(\n            \"abcd\".to_string(),\n            None,\n            None\n        )),\n        Just(TransactionInvocationTarget::new_package_alias_with_major(\n            \"abcd\".to_string(),\n            Some(1),\n            None\n        )),\n        Just(TransactionInvocationTarget::new_package_alias_with_major(\n            \"abcd\".to_string(),\n            Some(1),\n            Some(1)\n        )),\n        Just(TransactionInvocationTarget::new_package_alias_with_major(\n            \"abcd\".to_string(),\n            None,\n            Some(1)\n        )),\n        u8_slice_32().prop_map(|addr| {\n            TransactionInvocationTarget::new_package_with_major(addr.into(), None, None)\n        }),\n        u8_slice_32().prop_map(|addr| {\n            TransactionInvocationTarget::new_package_with_major(addr.into(), Some(1), Some(2))\n        }),\n        u8_slice_32().prop_map(|addr| {\n            TransactionInvocationTarget::new_package_with_major(addr.into(), None, Some(2))\n        }),\n        u8_slice_32().prop_map(|addr| {\n            TransactionInvocationTarget::new_package_with_major(addr.into(), Some(1), None)\n        })\n    ]\n}\n\npub fn stored_transaction_target() -> impl Strategy<Value = TransactionTarget> {\n    (\n        transaction_invocation_target_arb(),\n        transaction_stored_runtime_params_arb(),\n    )\n        .prop_map(|(id, runtime)| TransactionTarget::Stored { id, runtime })\n}\n\nfn transferred_value_arb() -> impl Strategy<Value = u64> {\n    any::<u64>()\n}\n\nfn seed_arb() -> impl Strategy<Value = Option<[u8; 32]>> {\n    option::of(array::uniform32(any::<u8>()))\n}\n\npub fn session_transaction_target() -> impl Strategy<Value = TransactionTarget> {\n    (\n        any::<bool>(),\n        Just(Bytes::from(vec![1; 10])),\n        transaction_session_runtime_params_arb(),\n    )\n        .prop_map(\n            |(is_install_upgrade, module_bytes, runtime)| TransactionTarget::Session {\n                is_install_upgrade,\n                module_bytes,\n                runtime,\n            },\n        )\n}\n\npub(crate) fn transaction_stored_runtime_params_arb(\n) -> impl Strategy<Value = TransactionRuntimeParams> {\n    prop_oneof![\n        Just(TransactionRuntimeParams::VmCasperV1),\n        transferred_value_arb().prop_map(|transferred_value| {\n            TransactionRuntimeParams::VmCasperV2 {\n                transferred_value,\n                seed: None,\n            }\n        }),\n    ]\n}\n\npub(crate) fn transaction_session_runtime_params_arb(\n) -> impl Strategy<Value = TransactionRuntimeParams> {\n    prop_oneof![\n        Just(TransactionRuntimeParams::VmCasperV1),\n        (transferred_value_arb(), seed_arb()).prop_map(|(transferred_value, seed)| {\n            TransactionRuntimeParams::VmCasperV2 {\n                transferred_value,\n                seed,\n            }\n        })\n    ]\n}\n\npub fn transaction_target_arb() -> impl Strategy<Value = TransactionTarget> {\n    prop_oneof![\n        Just(TransactionTarget::Native),\n        (\n            transaction_invocation_target_arb(),\n            transaction_stored_runtime_params_arb(),\n        )\n            .prop_map(|(id, runtime)| TransactionTarget::Stored { id, runtime }),\n        (\n            any::<bool>(),\n            Just(Bytes::from(vec![1; 10])),\n            transaction_session_runtime_params_arb(),\n        )\n            .prop_map(|(is_install_upgrade, module_bytes, runtime)| {\n                TransactionTarget::Session {\n                    is_install_upgrade,\n                    module_bytes,\n                    runtime,\n                }\n            })\n    ]\n}\n\npub fn legal_target_entry_point_calls_arb(\n) -> impl Strategy<Value = (TransactionTarget, TransactionEntryPoint)> {\n    prop_oneof![\n        native_entry_point_arb().prop_map(|s| (TransactionTarget::Native, s)),\n        stored_transaction_target()\n            .prop_map(|s| (s, TransactionEntryPoint::Custom(\"ABC\".to_string()))),\n        session_transaction_target().prop_map(|s| (s, TransactionEntryPoint::Call)),\n    ]\n}\n\npub fn native_entry_point_arb() -> impl Strategy<Value = TransactionEntryPoint> {\n    prop_oneof![\n        Just(TransactionEntryPoint::Transfer),\n        Just(TransactionEntryPoint::AddBid),\n        Just(TransactionEntryPoint::WithdrawBid),\n        Just(TransactionEntryPoint::Delegate),\n        Just(TransactionEntryPoint::Undelegate),\n        Just(TransactionEntryPoint::Redelegate),\n        Just(TransactionEntryPoint::ActivateBid),\n        Just(TransactionEntryPoint::ChangeBidPublicKey),\n        Just(TransactionEntryPoint::AddReservations),\n        Just(TransactionEntryPoint::CancelReservations),\n    ]\n}\npub fn transaction_entry_point_arb() -> impl Strategy<Value = TransactionEntryPoint> {\n    prop_oneof![\n        native_entry_point_arb(),\n        Just(TransactionEntryPoint::Call),\n        Just(TransactionEntryPoint::Custom(\"custom\".to_string())),\n    ]\n}\n\npub fn runtime_args_arb() -> impl Strategy<Value = RuntimeArgs> {\n    let mut runtime_args_1 = RuntimeArgs::new();\n    let semi_random_string_pairs = [\n        (\"977837db-8dba-48c2-86f1-32f9740631db\", \"b7b3b3b3-8b3b-48c2-86f1-32f9740631db\"),\n        (\"5de3eecc-b9c8-477f-bebe-937c3a16df85\", \"2ffd7939-34e5-4660-af9f-772a83011ce0\"),\n        (\"036db036-8b7b-4009-a0d4-c9ce\", \"515f4fe6-06c8-45c5-8554-f07e727a842d036db036-8b7b-4009-a0d4-c9ce036db036-8b7b-4009-a0d4-c9ce\"),\n    ];\n    for (key, val_str) in semi_random_string_pairs.iter() {\n        let _ = runtime_args_1.insert(key.to_string(), Bytes::from(val_str.as_bytes()));\n    }\n    prop_oneof![Just(runtime_args_1)]\n}\n\nfn transaction_args_bytes_arbitrary() -> impl Strategy<Value = TransactionArgs> {\n    prop::collection::vec(any::<u8>(), 0..100)\n        .prop_map(|bytes| TransactionArgs::Bytesrepr(bytes.into()))\n}\n\npub fn transaction_args_arb() -> impl Strategy<Value = TransactionArgs> {\n    prop_oneof![\n        runtime_args_arb().prop_map(TransactionArgs::Named),\n        transaction_args_bytes_arbitrary()\n    ]\n}\n\npub fn fields_arb() -> impl Strategy<Value = BTreeMap<u16, Bytes>> {\n    collection::btree_map(\n        any::<u16>(),\n        any::<String>().prop_map(|s| Bytes::from(s.as_bytes())),\n        3..30,\n    )\n}\npub fn v1_transaction_payload_arb() -> impl Strategy<Value = TransactionV1Payload> {\n    (\n        any::<String>(),\n        timestamp_arb(),\n        any::<u64>(),\n        pricing_mode_arb(),\n        initiator_addr_arb(),\n        fields_arb(),\n    )\n        .prop_map(\n            |(chain_name, timestamp, ttl_millis, pricing_mode, initiator_addr, fields)| {\n                TransactionV1Payload::new(\n                    chain_name,\n                    timestamp,\n                    TimeDiff::from_millis(ttl_millis),\n                    pricing_mode,\n                    initiator_addr,\n                    fields,\n                )\n            },\n        )\n}\n\npub fn fixed_pricing_mode_arb() -> impl Strategy<Value = PricingMode> {\n    (any::<u8>(), any::<u8>()).prop_map(|(gas_price_tolerance, additional_computation_factor)| {\n        PricingMode::Fixed {\n            gas_price_tolerance,\n            additional_computation_factor,\n        }\n    })\n}\n\npub fn pricing_mode_arb() -> impl Strategy<Value = PricingMode> {\n    prop_oneof![\n        (any::<u64>(), any::<u8>(), any::<bool>()).prop_map(\n            |(payment_amount, gas_price_tolerance, standard_payment)| {\n                PricingMode::PaymentLimited {\n                    payment_amount,\n                    gas_price_tolerance,\n                    standard_payment,\n                }\n            }\n        ),\n        fixed_pricing_mode_arb(),\n    ]\n}\n\npub fn initiator_addr_arb() -> impl Strategy<Value = InitiatorAddr> {\n    prop_oneof![\n        public_key_arb_no_system().prop_map(InitiatorAddr::PublicKey),\n        u2_slice_32().prop_map(|hash| InitiatorAddr::AccountHash(AccountHash::new(hash))),\n    ]\n}\n\npub fn timestamp_arb() -> impl Strategy<Value = Timestamp> {\n    //The weird u64 value is the max milliseconds that are bofeore year 10000. 5 digit years are\n    // not rfc3339 compliant and will cause an error when trying to serialize to json.\n    prop_oneof![Just(0_u64), Just(1_u64), Just(253_402_300_799_999_u64)].prop_map(Timestamp::from)\n}\n\npub fn legal_v1_transaction_arb() -> impl Strategy<Value = TransactionV1> {\n    (\n        any::<String>(),\n        timestamp_arb(),\n        any::<u32>(),\n        pricing_mode_arb(),\n        secret_key_arb_no_system(),\n        transaction_args_arb(),\n        json_compliant_transaction_scheduling_arb(),\n        legal_target_entry_point_calls_arb(),\n    )\n        .prop_map(\n            |(\n                chain_name,\n                timestamp,\n                ttl,\n                pricing_mode,\n                secret_key,\n                args,\n                scheduling,\n                (target, entry_point),\n            )| {\n                let public_key = PublicKey::from(&secret_key);\n                let initiator_addr = InitiatorAddr::PublicKey(public_key);\n                let initiator_addr_with_secret = InitiatorAddrAndSecretKey::Both {\n                    initiator_addr,\n                    secret_key: &secret_key,\n                };\n                let container = FieldsContainer::new(args, target, entry_point, scheduling);\n                TransactionV1::build(\n                    chain_name,\n                    timestamp,\n                    TimeDiff::from_seconds(ttl),\n                    pricing_mode,\n                    container.to_map().unwrap(),\n                    initiator_addr_with_secret,\n                )\n            },\n        )\n}\npub fn v1_transaction_arb() -> impl Strategy<Value = TransactionV1> {\n    (\n        any::<String>(),\n        timestamp_arb(),\n        any::<u32>(),\n        pricing_mode_arb(),\n        secret_key_arb_no_system(),\n        runtime_args_arb(),\n        transaction_target_arb(),\n        transaction_entry_point_arb(),\n        transaction_scheduling_arb(),\n    )\n        .prop_map(\n            |(\n                chain_name,\n                timestamp,\n                ttl,\n                pricing_mode,\n                secret_key,\n                args,\n                target,\n                entry_point,\n                scheduling,\n            )| {\n                let public_key = PublicKey::from(&secret_key);\n                let initiator_addr = InitiatorAddr::PublicKey(public_key);\n                let initiator_addr_with_secret = InitiatorAddrAndSecretKey::Both {\n                    initiator_addr,\n                    secret_key: &secret_key,\n                };\n                let container = FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    target,\n                    entry_point,\n                    scheduling,\n                );\n                TransactionV1::build(\n                    chain_name,\n                    timestamp,\n                    TimeDiff::from_seconds(ttl),\n                    pricing_mode,\n                    container.to_map().unwrap(),\n                    initiator_addr_with_secret,\n                )\n            },\n        )\n}\n\npub fn transaction_arb() -> impl Strategy<Value = Transaction> {\n    (v1_transaction_arb()).prop_map(Transaction::V1)\n}\n\npub fn legal_transaction_arb() -> impl Strategy<Value = Transaction> {\n    (legal_v1_transaction_arb()).prop_map(Transaction::V1)\n}\npub fn example_u32_arb() -> impl Strategy<Value = u32> {\n    prop_oneof![Just(0), Just(1), Just(u32::MAX / 2), Just(u32::MAX)]\n}\n"
  },
  {
    "path": "types/src/global_state/merkle_proof.rs",
    "content": "use alloc::{collections::VecDeque, vec::Vec};\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, Bytes, FromBytes, ToBytes};\n\nuse super::pointer::Pointer;\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\nconst TRIE_MERKLE_PROOF_STEP_NODE_ID: u8 = 0;\nconst TRIE_MERKLE_PROOF_STEP_EXTENSION_ID: u8 = 1;\n\n/// A component of a proof that an entry exists in the Merkle trie.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\npub enum TrieMerkleProofStep {\n    /// Corresponds to a trie node.\n    Node {\n        /// Hole index.\n        hole_index: u8,\n        /// Indexed pointers with hole.\n        indexed_pointers_with_hole: Vec<(u8, Pointer)>,\n    },\n    /// Corresponds to a trie extension.\n    Extension {\n        /// Affix bytes.\n        affix: Bytes,\n    },\n}\n\nimpl TrieMerkleProofStep {\n    /// Constructor for  [`TrieMerkleProofStep::Node`]\n    pub fn node(hole_index: u8, indexed_pointers_with_hole: Vec<(u8, Pointer)>) -> Self {\n        Self::Node {\n            hole_index,\n            indexed_pointers_with_hole,\n        }\n    }\n\n    /// Constructor for  [`TrieMerkleProofStep::Extension`]\n    pub fn extension(affix: Vec<u8>) -> Self {\n        Self::Extension {\n            affix: affix.into(),\n        }\n    }\n\n    /// Returns a random `TrieMerkleProofStep`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use rand::Rng;\n\n        match rng.gen_range(0..2) {\n            0 => {\n                let hole_index = rng.gen();\n                let indexed_pointers_with_hole = (0..rng.gen_range(0..10))\n                    .map(|_| (rng.gen(), Pointer::random(rng)))\n                    .collect();\n                Self::node(hole_index, indexed_pointers_with_hole)\n            }\n            1 => {\n                let affix = (0..rng.gen_range(0..10)).map(|_| rng.gen()).collect();\n                Self::extension(affix)\n            }\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for TrieMerkleProofStep {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret: Vec<u8> = bytesrepr::allocate_buffer(self)?;\n        match self {\n            TrieMerkleProofStep::Node {\n                hole_index,\n                indexed_pointers_with_hole,\n            } => {\n                ret.push(TRIE_MERKLE_PROOF_STEP_NODE_ID);\n                ret.push(*hole_index);\n                ret.append(&mut indexed_pointers_with_hole.to_bytes()?)\n            }\n            TrieMerkleProofStep::Extension { affix } => {\n                ret.push(TRIE_MERKLE_PROOF_STEP_EXTENSION_ID);\n                ret.append(&mut affix.to_bytes()?)\n            }\n        };\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        size_of::<u8>()\n            + match self {\n                TrieMerkleProofStep::Node {\n                    hole_index,\n                    indexed_pointers_with_hole,\n                } => {\n                    (*hole_index).serialized_length()\n                        + (*indexed_pointers_with_hole).serialized_length()\n                }\n                TrieMerkleProofStep::Extension { affix } => affix.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for TrieMerkleProofStep {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            TRIE_MERKLE_PROOF_STEP_NODE_ID => {\n                let (hole_index, rem): (u8, &[u8]) = FromBytes::from_bytes(rem)?;\n                let (indexed_pointers_with_hole, rem): (Vec<(u8, Pointer)>, &[u8]) =\n                    FromBytes::from_bytes(rem)?;\n                Ok((\n                    TrieMerkleProofStep::Node {\n                        hole_index,\n                        indexed_pointers_with_hole,\n                    },\n                    rem,\n                ))\n            }\n            TRIE_MERKLE_PROOF_STEP_EXTENSION_ID => {\n                let (affix, rem): (_, &[u8]) = FromBytes::from_bytes(rem)?;\n                Ok((TrieMerkleProofStep::Extension { affix }, rem))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n/// A proof that a node with a specified `key` and `value` is present in the Merkle trie.\n/// Given a state hash `x`, one can validate a proof `p` by checking `x == p.compute_state_hash()`.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\npub struct TrieMerkleProof<K, V> {\n    key: K,\n    value: V,\n    proof_steps: VecDeque<TrieMerkleProofStep>,\n}\n\nimpl<K, V> TrieMerkleProof<K, V> {\n    /// Constructor for [`TrieMerkleProof`]\n    pub fn new(key: K, value: V, proof_steps: VecDeque<TrieMerkleProofStep>) -> Self {\n        TrieMerkleProof {\n            key,\n            value,\n            proof_steps,\n        }\n    }\n\n    /// Getter for the key in [`TrieMerkleProof`]\n    pub fn key(&self) -> &K {\n        &self.key\n    }\n\n    /// Getter for the value in [`TrieMerkleProof`]\n    pub fn value(&self) -> &V {\n        &self.value\n    }\n\n    /// Getter for the proof steps in [`TrieMerkleProof`]\n    pub fn proof_steps(&self) -> &VecDeque<TrieMerkleProofStep> {\n        &self.proof_steps\n    }\n\n    /// Transforms a [`TrieMerkleProof`] into the value it contains\n    pub fn into_value(self) -> V {\n        self.value\n    }\n}\n\nimpl<K, V> ToBytes for TrieMerkleProof<K, V>\nwhere\n    K: ToBytes,\n    V: ToBytes,\n{\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret: Vec<u8> = bytesrepr::allocate_buffer(self)?;\n        ret.append(&mut self.key.to_bytes()?);\n        ret.append(&mut self.value.to_bytes()?);\n        ret.append(&mut self.proof_steps.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.key.serialized_length()\n            + self.value.serialized_length()\n            + self.proof_steps.serialized_length()\n    }\n}\n\nimpl<K, V> FromBytes for TrieMerkleProof<K, V>\nwhere\n    K: FromBytes,\n    V: FromBytes,\n{\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (key, rem): (K, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (value, rem): (V, &[u8]) = FromBytes::from_bytes(rem)?;\n        let (proof_steps, rem): (VecDeque<TrieMerkleProofStep>, &[u8]) =\n            FromBytes::from_bytes(rem)?;\n        Ok((\n            TrieMerkleProof {\n                key,\n                value,\n                proof_steps,\n            },\n            rem,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn trie_merkle_proof_step_serialization_is_correct(\n            step in gens::trie_merkle_proof_step_arb()\n        ) {\n            bytesrepr::test_serialization_roundtrip(&step)\n        }\n\n        #[test]\n        fn trie_merkle_proof_serialization_is_correct(\n            proof in gens::trie_merkle_proof_arb()\n        ) {\n            bytesrepr::test_serialization_roundtrip(&proof)\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/global_state/pointer.rs",
    "content": "use core::fmt::Debug;\n\nuse alloc::vec::Vec;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Digest,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\n/// Represents a pointer to the next object in a Merkle Trie\n#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]\npub enum Pointer {\n    /// Leaf pointer.\n    LeafPointer(Digest),\n    /// Node pointer.\n    NodePointer(Digest),\n}\n\nimpl Pointer {\n    /// Borrows the inner hash from a `Pointer`.\n    pub fn hash(&self) -> &Digest {\n        match self {\n            Pointer::LeafPointer(hash) => hash,\n            Pointer::NodePointer(hash) => hash,\n        }\n    }\n\n    /// Takes ownership of the hash, consuming this `Pointer`.\n    pub fn into_hash(self) -> Digest {\n        match self {\n            Pointer::LeafPointer(hash) => hash,\n            Pointer::NodePointer(hash) => hash,\n        }\n    }\n\n    /// Creates a new owned `Pointer` with a new `Digest`.\n    pub fn update(&self, hash: Digest) -> Self {\n        match self {\n            Pointer::LeafPointer(_) => Pointer::LeafPointer(hash),\n            Pointer::NodePointer(_) => Pointer::NodePointer(hash),\n        }\n    }\n\n    /// Returns the `tag` value for a variant of `Pointer`.\n    fn tag(&self) -> u8 {\n        match self {\n            Pointer::LeafPointer(_) => 0,\n            Pointer::NodePointer(_) => 1,\n        }\n    }\n\n    /// Returns a random `Pointer`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use rand::Rng;\n\n        match rng.gen_range(0..2) {\n            0 => Pointer::LeafPointer(Digest::random(rng)),\n            1 => Pointer::NodePointer(Digest::random(rng)),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl ToBytes for Pointer {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        self.write_bytes(&mut ret)?;\n        Ok(ret)\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH + Digest::LENGTH\n    }\n\n    #[inline]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.tag());\n        writer.extend_from_slice(self.hash().as_ref());\n        Ok(())\n    }\n}\n\nimpl FromBytes for Pointer {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem) = u8::from_bytes(bytes)?;\n        match tag {\n            0 => {\n                let (hash, rem) = Digest::from_bytes(rem)?;\n                Ok((Pointer::LeafPointer(hash), rem))\n            }\n            1 => {\n                let (hash, rem) = Digest::from_bytes(rem)?;\n                Ok((Pointer::NodePointer(hash), rem))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/global_state.rs",
    "content": "//! Types for global state.\nmod merkle_proof;\nmod pointer;\n\npub use merkle_proof::{TrieMerkleProof, TrieMerkleProofStep};\npub use pointer::Pointer;\n"
  },
  {
    "path": "types/src/json_pretty_printer.rs",
    "content": "extern crate alloc;\n\nuse alloc::{format, string::String, vec::Vec};\n\nuse serde::Serialize;\n\nuse serde_json::{json, Value};\n\nconst MAX_STRING_LEN: usize = 150;\n\n/// Represents the information about a substring found in a string.\n#[derive(Debug)]\nstruct SubstringSpec {\n    /// Index of the first character.\n    start_index: usize,\n    /// Length of the substring.\n    length: usize,\n}\n\nimpl SubstringSpec {\n    /// Constructs a new StringSpec with the given start index and length.\n    fn new(start_index: usize, length: usize) -> Self {\n        Self {\n            start_index,\n            length,\n        }\n    }\n}\n\n/// Serializes the given data structure as a pretty-printed `String` of JSON using\n/// `serde_json::to_string_pretty()`, but after first reducing any large hex-string values.\n///\n/// A large hex-string is one containing only hex characters and which is over `MAX_STRING_LEN`.\n/// Such hex-strings will be replaced by an indication of the number of chars redacted, for example\n/// `[130 hex chars]`.\npub fn json_pretty_print<T>(value: &T) -> serde_json::Result<String>\nwhere\n    T: ?Sized + Serialize,\n{\n    let mut json_value = json!(value);\n    shorten_string_field(&mut json_value);\n\n    serde_json::to_string_pretty(&json_value)\n}\n\n/// Searches the given string for all occurrences of hex substrings\n/// that are longer than the specified `max_len`.\nfn find_hex_strings_longer_than(string: &str, max_len: usize) -> Vec<SubstringSpec> {\n    let mut ranges_to_remove = Vec::new();\n    let mut start_index = 0;\n    let mut contiguous_hex_count = 0;\n\n    // Record all large hex-strings' start positions and lengths.\n    for (index, char) in string.char_indices() {\n        if char.is_ascii_hexdigit() {\n            if contiguous_hex_count == 0 {\n                // This is the start of a new hex-string.\n                start_index = index;\n            }\n            contiguous_hex_count += 1;\n        } else if contiguous_hex_count != 0 {\n            // This is the end of a hex-string: if it's too long, record it.\n            if contiguous_hex_count > max_len {\n                ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count));\n            }\n            contiguous_hex_count = 0;\n        }\n    }\n    // If the string contains a large hex-string at the end, record it now.\n    if contiguous_hex_count > max_len {\n        ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count));\n    }\n    ranges_to_remove\n}\n\nfn shorten_string_field(value: &mut Value) {\n    match value {\n        Value::String(string) => {\n            // Iterate over the ranges to remove from last to first so each\n            // replacement start index remains valid.\n            find_hex_strings_longer_than(string, MAX_STRING_LEN)\n                .into_iter()\n                .rev()\n                .for_each(\n                    |SubstringSpec {\n                         start_index,\n                         length,\n                     }| {\n                        let range = start_index..(start_index + length);\n                        string.replace_range(range, &format!(\"[{} hex chars]\", length));\n                    },\n                )\n        }\n        Value::Array(values) => {\n            for value in values {\n                shorten_string_field(value);\n            }\n        }\n        Value::Object(map) => {\n            for map_value in map.values_mut() {\n                shorten_string_field(map_value);\n            }\n        }\n        Value::Null | Value::Bool(_) | Value::Number(_) => {}\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    fn hex_string(length: usize) -> String {\n        \"0123456789abcdef\".chars().cycle().take(length).collect()\n    }\n\n    impl PartialEq<(usize, usize)> for SubstringSpec {\n        fn eq(&self, other: &(usize, usize)) -> bool {\n            self.start_index == other.0 && self.length == other.1\n        }\n    }\n\n    #[test]\n    fn finds_hex_strings_longer_than() {\n        const TESTING_LEN: usize = 3;\n\n        let input = \"01234\";\n        let expected = vec![(0, 5)];\n        let actual = find_hex_strings_longer_than(input, TESTING_LEN);\n        assert_eq!(actual, expected);\n\n        let input = \"01234-0123\";\n        let expected = vec![(0, 5), (6, 4)];\n        let actual = find_hex_strings_longer_than(input, TESTING_LEN);\n        assert_eq!(actual, expected);\n\n        let input = \"012-34-0123\";\n        let expected = vec![(7, 4)];\n        let actual = find_hex_strings_longer_than(input, TESTING_LEN);\n        assert_eq!(actual, expected);\n\n        let input = \"012-34-01-23\";\n        let expected: Vec<(usize, usize)> = vec![];\n        let actual = find_hex_strings_longer_than(input, TESTING_LEN);\n        assert_eq!(actual, expected);\n\n        let input = \"0\";\n        let expected: Vec<(usize, usize)> = vec![];\n        let actual = find_hex_strings_longer_than(input, TESTING_LEN);\n        assert_eq!(actual, expected);\n\n        let input = \"\";\n        let expected: Vec<(usize, usize)> = vec![];\n        let actual = find_hex_strings_longer_than(input, TESTING_LEN);\n        assert_eq!(actual, expected);\n    }\n\n    #[test]\n    fn respects_length() {\n        let input = \"I like beef\";\n        let expected = vec![(7, 4)];\n        let actual = find_hex_strings_longer_than(input, 3);\n        assert_eq!(actual, expected);\n\n        let input = \"I like beef\";\n        let expected: Vec<(usize, usize)> = vec![];\n        let actual = find_hex_strings_longer_than(input, 1000);\n        assert_eq!(actual, expected);\n    }\n\n    #[test]\n    fn should_shorten_long_strings() {\n        let max_unshortened_hex_string = hex_string(MAX_STRING_LEN);\n        let long_hex_string = hex_string(MAX_STRING_LEN + 1);\n        let long_non_hex_string: String = \"g\".repeat(MAX_STRING_LEN + 1);\n        let long_hex_substring = format!(\"a-{}-b\", hex_string(MAX_STRING_LEN + 1));\n        let multiple_long_hex_substrings =\n            format!(\"a: {0}, b: {0}, c: {0}\", hex_string(MAX_STRING_LEN + 1));\n\n        let mut long_strings: Vec<String> = vec![];\n        for i in 1..=5 {\n            long_strings.push(\"a\".repeat(MAX_STRING_LEN + i));\n        }\n        let value = json!({\n            \"field_1\": Option::<usize>::None,\n            \"field_2\": true,\n            \"field_3\": 123,\n            \"field_4\": max_unshortened_hex_string,\n            \"field_5\": [\"short string value\", long_hex_string],\n            \"field_6\": {\n                \"f1\": Option::<usize>::None,\n                \"f2\": false,\n                \"f3\": -123,\n                \"f4\": long_non_hex_string,\n                \"f5\": [\"short string value\", long_hex_substring],\n                \"f6\": {\n                    \"final long string\": multiple_long_hex_substrings\n                }\n            }\n        });\n\n        let expected = r#\"{\n  \"field_1\": null,\n  \"field_2\": true,\n  \"field_3\": 123,\n  \"field_4\": \"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012345\",\n  \"field_5\": [\n    \"short string value\",\n    \"[151 hex chars]\"\n  ],\n  \"field_6\": {\n    \"f1\": null,\n    \"f2\": false,\n    \"f3\": -123,\n    \"f4\": \"ggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg\",\n    \"f5\": [\n      \"short string value\",\n      \"a-[151 hex chars]-b\"\n    ],\n    \"f6\": {\n      \"final long string\": \"a: [151 hex chars], b: [151 hex chars], c: [151 hex chars]\"\n    }\n  }\n}\"#;\n\n        let output = json_pretty_print(&value).unwrap();\n        assert_eq!(\n            output, expected,\n            \"Actual:\\n{}\\nExpected:\\n{}\\n\",\n            output, expected\n        );\n    }\n\n    #[test]\n    fn should_not_modify_short_strings() {\n        let max_string: String = \"a\".repeat(MAX_STRING_LEN);\n        let value = json!({\n            \"field_1\": Option::<usize>::None,\n            \"field_2\": true,\n            \"field_3\": 123,\n            \"field_4\": max_string,\n            \"field_5\": [\n                \"short string value\",\n                \"another short string\"\n            ],\n            \"field_6\": {\n                \"f1\": Option::<usize>::None,\n                \"f2\": false,\n                \"f3\": -123,\n                \"f4\": \"short\",\n                \"f5\": [\n                    \"short string value\",\n                    \"another short string\"\n                ],\n                \"f6\": {\n                    \"final string\": \"the last short string\"\n                }\n            }\n        });\n\n        let expected = serde_json::to_string_pretty(&value).unwrap();\n        let output = json_pretty_print(&value).unwrap();\n        assert_eq!(\n            output, expected,\n            \"Actual:\\n{}\\nExpected:\\n{}\\n\",\n            output, expected\n        );\n    }\n\n    #[test]\n    /// Ref: https://github.com/casper-network/casper-node/issues/1456\n    fn regression_1456() {\n        let long_string = r#\"state query failed: ValueNotFound(\"Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)\")\"#;\n        assert_eq!(long_string.len(), 148);\n\n        let value = json!({\n            \"code\": -32003,\n            \"message\": long_string,\n        });\n\n        let expected = r#\"{\n  \"code\": -32003,\n  \"message\": \"state query failed: ValueNotFound(\\\"Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)\\\")\"\n}\"#;\n\n        let output = json_pretty_print(&value).unwrap();\n        assert_eq!(\n            output, expected,\n            \"Actual:\\n{}\\nExpected:\\n{}\\n\",\n            output, expected\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/key.rs",
    "content": "//! Key types.\n\nuse alloc::{\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n    str::FromStr,\n};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\n#[cfg(doc)]\nuse crate::CLValue;\nuse blake2::{\n    digest::{Update, VariableOutput},\n    VarBlake2b,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\nuse tracing::{error, warn};\n\nuse crate::{\n    account::{AccountHash, ACCOUNT_HASH_LENGTH},\n    addressable_entity::{\n        self, AddressableEntityHash, EntityAddr, EntityKindTag, EntryPointAddr, NamedKeyAddr,\n    },\n    block::BlockGlobalAddr,\n    byte_code,\n    bytesrepr::{\n        self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH, U64_SERIALIZED_LENGTH,\n        U8_SERIALIZED_LENGTH,\n    },\n    checksummed_hex,\n    contract_messages::{self, MessageAddr, TopicNameHash, TOPIC_NAME_HASH_LENGTH},\n    contract_wasm::ContractWasmHash,\n    contracts::{ContractHash, ContractPackageHash},\n    package::PackageHash,\n    system::{\n        auction::{BidAddr, BidAddrTag},\n        mint::BalanceHoldAddr,\n    },\n    uref::{self, URef, URefAddr, UREF_SERIALIZED_LENGTH},\n    ByteCodeAddr, DeployHash, Digest, EraId, Tagged, TransferAddr, TransferFromStrError,\n    TRANSFER_ADDR_LENGTH, UREF_ADDR_LENGTH,\n};\n\nconst HASH_PREFIX: &str = \"hash-\";\nconst DEPLOY_INFO_PREFIX: &str = \"deploy-\";\nconst TRANSFER_PREFIX: &str = \"transfer-\";\nconst ERA_INFO_PREFIX: &str = \"era-\";\nconst BALANCE_PREFIX: &str = \"balance-\";\nconst BALANCE_HOLD_PREFIX: &str = \"balance-hold-\";\nconst BID_PREFIX: &str = \"bid-\";\nconst WITHDRAW_PREFIX: &str = \"withdraw-\";\nconst DICTIONARY_PREFIX: &str = \"dictionary-\";\nconst UNBOND_PREFIX: &str = \"unbond-\";\nconst SYSTEM_ENTITY_REGISTRY_PREFIX: &str = \"system-entity-registry-\";\nconst ERA_SUMMARY_PREFIX: &str = \"era-summary-\";\nconst CHAINSPEC_REGISTRY_PREFIX: &str = \"chainspec-registry-\";\nconst CHECKSUM_REGISTRY_PREFIX: &str = \"checksum-registry-\";\nconst BID_ADDR_PREFIX: &str = \"bid-addr-\";\nconst PACKAGE_PREFIX: &str = \"package-\";\nconst BLOCK_GLOBAL_TIME_PREFIX: &str = \"block-time-\";\nconst BLOCK_GLOBAL_MESSAGE_COUNT_PREFIX: &str = \"block-message-count-\";\nconst BLOCK_GLOBAL_PROTOCOL_VERSION_PREFIX: &str = \"block-protocol-version-\";\nconst BLOCK_GLOBAL_ADDRESSABLE_ENTITY_PREFIX: &str = \"block-addressable-entity-\";\nconst STATE_PREFIX: &str = \"state-\";\nconst REWARDS_HANDLING_PREFIX: &str = \"rewards-handling-\";\n\n/// The number of bytes in a Blake2b hash\npub const BLAKE2B_DIGEST_LENGTH: usize = 32;\n/// The number of bytes in a [`Key::Hash`].\npub const KEY_HASH_LENGTH: usize = 32;\n/// The number of bytes in a [`Key::Transfer`].\npub const KEY_TRANSFER_LENGTH: usize = TRANSFER_ADDR_LENGTH;\n/// The number of bytes in a [`Key::DeployInfo`].\npub const KEY_DEPLOY_INFO_LENGTH: usize = DeployHash::LENGTH;\n/// The number of bytes in a [`Key::Dictionary`].\npub const KEY_DICTIONARY_LENGTH: usize = 32;\n/// The maximum length for a `dictionary_item_key`.\npub const DICTIONARY_ITEM_KEY_MAX_LENGTH: usize = 128;\n/// The maximum length for an `Addr`.\npub const ADDR_LENGTH: usize = 32;\nconst PADDING_BYTES: [u8; 32] = [0u8; 32];\nconst BLOCK_GLOBAL_PADDING_BYTES: [u8; 31] = [0u8; 31];\nconst KEY_ID_SERIALIZED_LENGTH: usize = 1;\n// u8 used to determine the ID\nconst KEY_HASH_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH;\nconst KEY_UREF_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_SERIALIZED_LENGTH;\nconst KEY_TRANSFER_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_TRANSFER_LENGTH;\nconst KEY_DEPLOY_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DEPLOY_INFO_LENGTH;\nconst KEY_ERA_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + U64_SERIALIZED_LENGTH;\nconst KEY_BALANCE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_ADDR_LENGTH;\nconst KEY_BID_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH;\nconst KEY_WITHDRAW_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH;\nconst KEY_UNBOND_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH;\nconst KEY_DICTIONARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DICTIONARY_LENGTH;\nconst KEY_SYSTEM_ENTITY_REGISTRY_SERIALIZED_LENGTH: usize =\n    KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len();\nconst KEY_ERA_SUMMARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len();\nconst KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH: usize =\n    KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len();\nconst KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH: usize =\n    KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len();\nconst KEY_REWARDS_HANDLING_SERIALIZED_LENGTH: usize =\n    KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len();\nconst KEY_PACKAGE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + 32;\nconst KEY_MESSAGE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH\n    + U8_SERIALIZED_LENGTH\n    + KEY_HASH_LENGTH\n    + TOPIC_NAME_HASH_LENGTH\n    + U8_SERIALIZED_LENGTH\n    + U32_SERIALIZED_LENGTH;\n\nconst MAX_SERIALIZED_LENGTH: usize = KEY_MESSAGE_SERIALIZED_LENGTH;\n\n/// An alias for [`Key`]s hash variant.\npub type HashAddr = [u8; KEY_HASH_LENGTH];\n\n/// An alias for [`Key`]s package variant.\npub type PackageAddr = [u8; ADDR_LENGTH];\n\n/// An alias for [`Key`]s dictionary variant.\npub type DictionaryAddr = [u8; KEY_DICTIONARY_LENGTH];\n\n#[allow(missing_docs)]\n#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)]\n#[repr(u8)]\npub enum KeyTag {\n    Account = 0,\n    Hash = 1,\n    URef = 2,\n    Transfer = 3,\n    DeployInfo = 4,\n    EraInfo = 5,\n    Balance = 6,\n    Bid = 7,\n    Withdraw = 8,\n    Dictionary = 9,\n    SystemEntityRegistry = 10,\n    EraSummary = 11,\n    Unbond = 12,\n    ChainspecRegistry = 13,\n    ChecksumRegistry = 14,\n    BidAddr = 15,\n    Package = 16,\n    AddressableEntity = 17,\n    ByteCode = 18,\n    Message = 19,\n    NamedKey = 20,\n    BlockGlobal = 21,\n    BalanceHold = 22,\n    EntryPoint = 23,\n    State = 24,\n    RewardsHandling = 25,\n}\n\nimpl KeyTag {\n    /// Returns a random `KeyTag`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..=23) {\n            0 => KeyTag::Account,\n            1 => KeyTag::Hash,\n            2 => KeyTag::URef,\n            3 => KeyTag::Transfer,\n            4 => KeyTag::DeployInfo,\n            5 => KeyTag::EraInfo,\n            6 => KeyTag::Balance,\n            7 => KeyTag::Bid,\n            8 => KeyTag::Withdraw,\n            9 => KeyTag::Dictionary,\n            10 => KeyTag::SystemEntityRegistry,\n            11 => KeyTag::EraSummary,\n            12 => KeyTag::Unbond,\n            13 => KeyTag::ChainspecRegistry,\n            14 => KeyTag::ChecksumRegistry,\n            15 => KeyTag::BidAddr,\n            16 => KeyTag::Package,\n            17 => KeyTag::AddressableEntity,\n            18 => KeyTag::ByteCode,\n            19 => KeyTag::Message,\n            20 => KeyTag::NamedKey,\n            21 => KeyTag::BlockGlobal,\n            22 => KeyTag::BalanceHold,\n            23 => KeyTag::EntryPoint,\n            24 => KeyTag::State,\n            _ => panic!(),\n        }\n    }\n}\n\nimpl Display for KeyTag {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            KeyTag::Account => write!(f, \"Account\"),\n            KeyTag::Hash => write!(f, \"Hash\"),\n            KeyTag::URef => write!(f, \"URef\"),\n            KeyTag::Transfer => write!(f, \"Transfer\"),\n            KeyTag::DeployInfo => write!(f, \"DeployInfo\"),\n            KeyTag::EraInfo => write!(f, \"EraInfo\"),\n            KeyTag::Balance => write!(f, \"Balance\"),\n            KeyTag::Bid => write!(f, \"Bid\"),\n            KeyTag::Withdraw => write!(f, \"Withdraw\"),\n            KeyTag::Dictionary => write!(f, \"Dictionary\"),\n            KeyTag::SystemEntityRegistry => write!(f, \"SystemEntityRegistry\"),\n            KeyTag::EraSummary => write!(f, \"EraSummary\"),\n            KeyTag::Unbond => write!(f, \"Unbond\"),\n            KeyTag::ChainspecRegistry => write!(f, \"ChainspecRegistry\"),\n            KeyTag::ChecksumRegistry => write!(f, \"ChecksumRegistry\"),\n            KeyTag::BidAddr => write!(f, \"BidAddr\"),\n            KeyTag::Package => write!(f, \"Package\"),\n            KeyTag::AddressableEntity => write!(f, \"AddressableEntity\"),\n            KeyTag::ByteCode => write!(f, \"ByteCode\"),\n            KeyTag::Message => write!(f, \"Message\"),\n            KeyTag::NamedKey => write!(f, \"NamedKey\"),\n            KeyTag::BlockGlobal => write!(f, \"BlockGlobal\"),\n            KeyTag::BalanceHold => write!(f, \"BalanceHold\"),\n            KeyTag::State => write!(f, \"State\"),\n            KeyTag::EntryPoint => write!(f, \"EntryPoint\"),\n            KeyTag::RewardsHandling => write!(f, \"RewardsHandling\"),\n        }\n    }\n}\n\nimpl ToBytes for KeyTag {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        KEY_ID_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.push(*self as u8);\n        Ok(())\n    }\n}\n\nimpl FromBytes for KeyTag {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (id, rem) = u8::from_bytes(bytes)?;\n        let tag = match id {\n            tag if tag == KeyTag::Account as u8 => KeyTag::Account,\n            tag if tag == KeyTag::Hash as u8 => KeyTag::Hash,\n            tag if tag == KeyTag::URef as u8 => KeyTag::URef,\n            tag if tag == KeyTag::Transfer as u8 => KeyTag::Transfer,\n            tag if tag == KeyTag::DeployInfo as u8 => KeyTag::DeployInfo,\n            tag if tag == KeyTag::EraInfo as u8 => KeyTag::EraInfo,\n            tag if tag == KeyTag::Balance as u8 => KeyTag::Balance,\n            tag if tag == KeyTag::Bid as u8 => KeyTag::Bid,\n            tag if tag == KeyTag::Withdraw as u8 => KeyTag::Withdraw,\n            tag if tag == KeyTag::Dictionary as u8 => KeyTag::Dictionary,\n            tag if tag == KeyTag::SystemEntityRegistry as u8 => KeyTag::SystemEntityRegistry,\n            tag if tag == KeyTag::EraSummary as u8 => KeyTag::EraSummary,\n            tag if tag == KeyTag::Unbond as u8 => KeyTag::Unbond,\n            tag if tag == KeyTag::ChainspecRegistry as u8 => KeyTag::ChainspecRegistry,\n            tag if tag == KeyTag::ChecksumRegistry as u8 => KeyTag::ChecksumRegistry,\n            tag if tag == KeyTag::BidAddr as u8 => KeyTag::BidAddr,\n            tag if tag == KeyTag::Package as u8 => KeyTag::Package,\n            tag if tag == KeyTag::AddressableEntity as u8 => KeyTag::AddressableEntity,\n            tag if tag == KeyTag::ByteCode as u8 => KeyTag::ByteCode,\n            tag if tag == KeyTag::Message as u8 => KeyTag::Message,\n            tag if tag == KeyTag::NamedKey as u8 => KeyTag::NamedKey,\n            tag if tag == KeyTag::BlockGlobal as u8 => KeyTag::BlockGlobal,\n            tag if tag == KeyTag::BalanceHold as u8 => KeyTag::BalanceHold,\n            tag if tag == KeyTag::EntryPoint as u8 => KeyTag::EntryPoint,\n            tag if tag == KeyTag::State as u8 => KeyTag::State,\n            tag if tag == KeyTag::RewardsHandling as u8 => KeyTag::RewardsHandling,\n            _ => return Err(Error::Formatting),\n        };\n        Ok((tag, rem))\n    }\n}\n\n/// The key under which data (e.g. [`CLValue`]s, smart contracts, user accounts) are stored in\n/// global state.\n#[repr(C)]\n#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum Key {\n    /// A `Key` under which a user account is stored.\n    Account(AccountHash),\n    /// A `Key` under which a smart contract is stored and which is the pseudo-hash of the\n    /// contract.\n    Hash(HashAddr),\n    /// A `Key` which is a [`URef`], under which most types of data can be stored.\n    URef(URef),\n    /// A `Key` under which a transfer is stored.\n    Transfer(TransferAddr),\n    /// A `Key` under which a deploy info is stored.\n    DeployInfo(DeployHash),\n    /// A `Key` under which an era info is stored.\n    EraInfo(EraId),\n    /// A `Key` under which a purse balance is stored.\n    Balance(URefAddr),\n    /// A `Key` under which bid information is stored.\n    Bid(AccountHash),\n    /// A `Key` under which withdraw information is stored.\n    Withdraw(AccountHash),\n    /// A `Key` whose value is derived by hashing a [`URef`] address and arbitrary data, under\n    /// which a dictionary is stored.\n    Dictionary(DictionaryAddr),\n    /// A `Key` under which system entity hashes are stored.\n    SystemEntityRegistry,\n    /// A `Key` under which current era info is stored.\n    EraSummary,\n    /// A `Key` under which unbond information is stored.\n    Unbond(AccountHash),\n    /// A `Key` under which chainspec and other hashes are stored.\n    ChainspecRegistry,\n    /// A `Key` under which a registry of checksums is stored.\n    ChecksumRegistry,\n    /// A `Key` under which bid information is stored.\n    BidAddr(BidAddr),\n    /// A `Key` under which package information is stored.\n    SmartContract(PackageAddr),\n    /// A `Key` under which an addressable entity is stored.\n    AddressableEntity(EntityAddr),\n    /// A `Key` under which a byte code record is stored.\n    ByteCode(ByteCodeAddr),\n    /// A `Key` under which a message is stored.\n    Message(MessageAddr),\n    /// A `Key` under which a single named key entry is stored.\n    NamedKey(NamedKeyAddr),\n    /// A `Key` under which per-block details are stored to global state.\n    BlockGlobal(BlockGlobalAddr),\n    /// A `Key` under which a hold on a purse balance is stored.\n    BalanceHold(BalanceHoldAddr),\n    /// A `Key` under which a entrypoint record is written.\n    EntryPoint(EntryPointAddr),\n    /// A `Key` under which a contract's state lives.\n    State(EntityAddr),\n    /// A `Key` under which we store rewards handling information\n    RewardsHandling,\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for Key {\n    fn schema_name() -> String {\n        String::from(\"Key\")\n    }\n\n    fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description = Some(\n            \"The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, \\\n                user accounts) are stored in global state.\"\n                .to_string(),\n        );\n        schema_object.into()\n    }\n}\n\n/// Errors produced when converting a `String` into a `Key`.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    /// Account parse error.\n    Account(addressable_entity::FromStrError),\n    /// Hash parse error.\n    Hash(String),\n    /// URef parse error.\n    URef(uref::FromStrError),\n    /// Transfer parse error.\n    Transfer(TransferFromStrError),\n    /// DeployInfo parse error.\n    DeployInfo(String),\n    /// EraInfo parse error.\n    EraInfo(String),\n    /// Balance parse error.\n    Balance(String),\n    /// Bid parse error.\n    Bid(String),\n    /// Withdraw parse error.\n    Withdraw(String),\n    /// Dictionary parse error.\n    Dictionary(String),\n    /// System entity registry parse error.\n    SystemEntityRegistry(String),\n    /// Era summary parse error.\n    EraSummary(String),\n    /// Unbond parse error.\n    Unbond(String),\n    /// Chainspec registry error.\n    ChainspecRegistry(String),\n    /// Checksum registry error.\n    ChecksumRegistry(String),\n    /// Bid parse error.\n    BidAddr(String),\n    /// Package parse error.\n    Package(String),\n    /// Entity parse error.\n    AddressableEntity(String),\n    /// Byte code parse error.\n    ByteCode(String),\n    /// Message parse error.\n    Message(contract_messages::FromStrError),\n    /// Named key parse error.\n    NamedKey(String),\n    /// BlockGlobal key parse error.\n    BlockGlobal(String),\n    /// Balance hold parse error.\n    BalanceHold(String),\n    /// Entry point parse error.\n    EntryPoint(String),\n    /// State key parse error.\n    State(String),\n    RewardsHandling(String),\n    /// Unknown prefix.\n    UnknownPrefix,\n}\n\nimpl From<addressable_entity::FromStrError> for FromStrError {\n    fn from(error: addressable_entity::FromStrError) -> Self {\n        FromStrError::Account(error)\n    }\n}\n\nimpl From<uref::FromStrError> for FromStrError {\n    fn from(error: uref::FromStrError) -> Self {\n        FromStrError::URef(error)\n    }\n}\n\nimpl From<contract_messages::FromStrError> for FromStrError {\n    fn from(error: contract_messages::FromStrError) -> Self {\n        FromStrError::Message(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::Account(error) => write!(f, \"account-key from string error: {}\", error),\n            FromStrError::Hash(error) => write!(f, \"hash-key from string error: {}\", error),\n            FromStrError::URef(error) => write!(f, \"uref-key from string error: {}\", error),\n            FromStrError::Transfer(error) => {\n                write!(f, \"legacy-transfer-key from string error: {}\", error)\n            }\n            FromStrError::DeployInfo(error) => {\n                write!(f, \"deploy-info-key from string error: {}\", error)\n            }\n            FromStrError::EraInfo(error) => write!(f, \"era-info-key from string error: {}\", error),\n            FromStrError::Balance(error) => write!(f, \"balance-key from string error: {}\", error),\n            FromStrError::Bid(error) => write!(f, \"bid-key from string error: {}\", error),\n            FromStrError::Withdraw(error) => write!(f, \"withdraw-key from string error: {}\", error),\n            FromStrError::Dictionary(error) => {\n                write!(f, \"dictionary-key from string error: {}\", error)\n            }\n            FromStrError::SystemEntityRegistry(error) => {\n                write!(\n                    f,\n                    \"system-contract-registry-key from string error: {}\",\n                    error\n                )\n            }\n            FromStrError::EraSummary(error) => {\n                write!(f, \"era-summary-key from string error: {}\", error)\n            }\n            FromStrError::Unbond(error) => {\n                write!(f, \"unbond-key from string error: {}\", error)\n            }\n            FromStrError::ChainspecRegistry(error) => {\n                write!(f, \"chainspec-registry-key from string error: {}\", error)\n            }\n            FromStrError::ChecksumRegistry(error) => {\n                write!(f, \"checksum-registry-key from string error: {}\", error)\n            }\n            FromStrError::BidAddr(error) => write!(f, \"bid-addr-key from string error: {}\", error),\n            FromStrError::Package(error) => write!(f, \"package-key from string error: {}\", error),\n            FromStrError::AddressableEntity(error) => {\n                write!(f, \"addressable-entity-key from string error: {}\", error)\n            }\n            FromStrError::ByteCode(error) => {\n                write!(f, \"byte-code-key from string error: {}\", error)\n            }\n            FromStrError::Message(error) => {\n                write!(f, \"message-key from string error: {}\", error)\n            }\n            FromStrError::NamedKey(error) => {\n                write!(f, \"named-key from string error: {}\", error)\n            }\n            FromStrError::BlockGlobal(error) => {\n                write!(f, \"block-message-count-key form string error: {}\", error)\n            }\n            FromStrError::BalanceHold(error) => {\n                write!(f, \"balance-hold from string error: {}\", error)\n            }\n            FromStrError::EntryPoint(error) => {\n                write!(f, \"entry-point from string error: {}\", error)\n            }\n            FromStrError::UnknownPrefix => write!(f, \"unknown prefix for key\"),\n            FromStrError::State(error) => write!(f, \"state-key from string error: {}\", error),\n\n            FromStrError::RewardsHandling(error) => {\n                write!(f, \"rewards-handling-key from string error: {}\", error)\n            }\n        }\n    }\n}\n\nimpl Key {\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn type_string(&self) -> String {\n        match self {\n            Key::Account(_) => String::from(\"Key::Account\"),\n            Key::Hash(_) => String::from(\"Key::Hash\"),\n            Key::URef(_) => String::from(\"Key::URef\"),\n            Key::Transfer(_) => String::from(\"Key::Transfer\"),\n            Key::DeployInfo(_) => String::from(\"Key::DeployInfo\"),\n            Key::EraInfo(_) => String::from(\"Key::EraInfo\"),\n            Key::Balance(_) => String::from(\"Key::Balance\"),\n            Key::Bid(_) => String::from(\"Key::Bid\"),\n            Key::Withdraw(_) => String::from(\"Key::Unbond\"),\n            Key::Dictionary(_) => String::from(\"Key::Dictionary\"),\n            Key::SystemEntityRegistry => String::from(\"Key::SystemEntityRegistry\"),\n            Key::EraSummary => String::from(\"Key::EraSummary\"),\n            Key::Unbond(_) => String::from(\"Key::Unbond\"),\n            Key::ChainspecRegistry => String::from(\"Key::ChainspecRegistry\"),\n            Key::ChecksumRegistry => String::from(\"Key::ChecksumRegistry\"),\n            Key::BidAddr(_) => String::from(\"Key::BidAddr\"),\n            Key::SmartContract(_) => String::from(\"Key::SmartContract\"),\n            Key::AddressableEntity(_) => String::from(\"Key::AddressableEntity\"),\n            Key::ByteCode(_) => String::from(\"Key::ByteCode\"),\n            Key::Message(_) => String::from(\"Key::Message\"),\n            Key::NamedKey(_) => String::from(\"Key::NamedKey\"),\n            Key::BlockGlobal(_) => String::from(\"Key::BlockGlobal\"),\n            Key::BalanceHold(_) => String::from(\"Key::BalanceHold\"),\n            Key::EntryPoint(_) => String::from(\"Key::EntryPoint\"),\n            Key::State(_) => String::from(\"Key::State\"),\n            Key::RewardsHandling => String::from(\"Key::RewardsHandling\"),\n        }\n    }\n\n    /// Returns the maximum size a [`Key`] can be serialized into.\n    pub const fn max_serialized_length() -> usize {\n        MAX_SERIALIZED_LENGTH\n    }\n\n    /// If `self` is of type [`Key::URef`], returns `self` with the\n    /// [`AccessRights`](crate::AccessRights) stripped from the wrapped [`URef`], otherwise\n    /// returns `self` unmodified.\n    #[must_use]\n    pub fn normalize(self) -> Key {\n        match self {\n            Key::URef(uref) => Key::URef(uref.remove_access_rights()),\n            other => other,\n        }\n    }\n\n    /// Returns a human-readable version of `self`, with the inner bytes encoded to Base16.\n    pub fn to_formatted_string(self) -> String {\n        match self {\n            Key::Account(account_hash) => account_hash.to_formatted_string(),\n            Key::Hash(addr) => format!(\"{}{}\", HASH_PREFIX, base16::encode_lower(&addr)),\n            Key::URef(uref) => uref.to_formatted_string(),\n            Key::Transfer(transfer_v1_addr) => {\n                format!(\n                    \"{}{}\",\n                    TRANSFER_PREFIX,\n                    base16::encode_lower(&transfer_v1_addr.value())\n                )\n            }\n            Key::DeployInfo(deploy_hash) => {\n                format!(\n                    \"{}{}\",\n                    DEPLOY_INFO_PREFIX,\n                    base16::encode_lower(deploy_hash.as_ref())\n                )\n            }\n            Key::EraInfo(era_id) => {\n                format!(\"{}{}\", ERA_INFO_PREFIX, era_id.value())\n            }\n            Key::Balance(uref_addr) => {\n                format!(\"{}{}\", BALANCE_PREFIX, base16::encode_lower(&uref_addr))\n            }\n            Key::Bid(account_hash) => {\n                format!(\"{}{}\", BID_PREFIX, base16::encode_lower(&account_hash))\n            }\n            Key::Withdraw(account_hash) => {\n                format!(\"{}{}\", WITHDRAW_PREFIX, base16::encode_lower(&account_hash))\n            }\n            Key::Dictionary(dictionary_addr) => {\n                format!(\n                    \"{}{}\",\n                    DICTIONARY_PREFIX,\n                    base16::encode_lower(&dictionary_addr)\n                )\n            }\n            Key::SystemEntityRegistry => {\n                format!(\n                    \"{}{}\",\n                    SYSTEM_ENTITY_REGISTRY_PREFIX,\n                    base16::encode_lower(&PADDING_BYTES)\n                )\n            }\n            Key::EraSummary => {\n                format!(\n                    \"{}{}\",\n                    ERA_SUMMARY_PREFIX,\n                    base16::encode_lower(&PADDING_BYTES)\n                )\n            }\n            Key::Unbond(account_hash) => {\n                format!(\"{}{}\", UNBOND_PREFIX, base16::encode_lower(&account_hash))\n            }\n            Key::ChainspecRegistry => {\n                format!(\n                    \"{}{}\",\n                    CHAINSPEC_REGISTRY_PREFIX,\n                    base16::encode_lower(&PADDING_BYTES)\n                )\n            }\n            Key::ChecksumRegistry => {\n                format!(\n                    \"{}{}\",\n                    CHECKSUM_REGISTRY_PREFIX,\n                    base16::encode_lower(&PADDING_BYTES)\n                )\n            }\n            Key::BidAddr(bid_addr) => {\n                format!(\"{}{}\", BID_ADDR_PREFIX, bid_addr)\n            }\n            Key::Message(message_addr) => message_addr.to_formatted_string(),\n            Key::SmartContract(package_addr) => {\n                format!(\"{}{}\", PACKAGE_PREFIX, base16::encode_lower(&package_addr))\n            }\n            Key::AddressableEntity(entity_addr) => {\n                format!(\"{}\", entity_addr)\n            }\n            Key::ByteCode(byte_code_addr) => {\n                format!(\"{}\", byte_code_addr)\n            }\n            Key::NamedKey(named_key) => {\n                format!(\"{}\", named_key)\n            }\n            Key::BlockGlobal(addr) => {\n                let prefix = match addr {\n                    BlockGlobalAddr::BlockTime => BLOCK_GLOBAL_TIME_PREFIX,\n                    BlockGlobalAddr::MessageCount => BLOCK_GLOBAL_MESSAGE_COUNT_PREFIX,\n                    BlockGlobalAddr::ProtocolVersion => BLOCK_GLOBAL_PROTOCOL_VERSION_PREFIX,\n                    BlockGlobalAddr::AddressableEntity => BLOCK_GLOBAL_ADDRESSABLE_ENTITY_PREFIX,\n                };\n                format!(\n                    \"{}{}\",\n                    prefix,\n                    base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES)\n                )\n            }\n            Key::BalanceHold(balance_hold_addr) => {\n                let tail = BalanceHoldAddr::to_formatted_string(&balance_hold_addr);\n                format!(\"{}{}\", BALANCE_HOLD_PREFIX, tail)\n            }\n            Key::State(entity_addr) => {\n                format!(\"{}{}\", STATE_PREFIX, entity_addr)\n            }\n            Key::EntryPoint(entry_point_addr) => {\n                format!(\"{}\", entry_point_addr)\n            }\n            Key::RewardsHandling => {\n                format!(\n                    \"{}{}\",\n                    REWARDS_HANDLING_PREFIX,\n                    base16::encode_lower(&PADDING_BYTES)\n                )\n            }\n        }\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a `Key`.\n    pub fn from_formatted_str(input: &str) -> Result<Key, FromStrError> {\n        match AccountHash::from_formatted_str(input) {\n            Ok(account_hash) => return Ok(Key::Account(account_hash)),\n            Err(addressable_entity::FromStrError::InvalidPrefix) => {}\n            Err(error) => return Err(error.into()),\n        }\n\n        if let Some(hex) = input.strip_prefix(HASH_PREFIX) {\n            let addr = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::Hash(error.to_string()))?;\n            let hash_addr = HashAddr::try_from(addr.as_ref())\n                .map_err(|error| FromStrError::Hash(error.to_string()))?;\n            return Ok(Key::Hash(hash_addr));\n        }\n\n        if let Some(hex) = input.strip_prefix(DEPLOY_INFO_PREFIX) {\n            let hash = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::DeployInfo(error.to_string()))?;\n            let hash_array = <[u8; DeployHash::LENGTH]>::try_from(hash.as_ref())\n                .map_err(|error| FromStrError::DeployInfo(error.to_string()))?;\n            return Ok(Key::DeployInfo(DeployHash::new(Digest::from(hash_array))));\n        }\n\n        if let Some(hex) = input.strip_prefix(TRANSFER_PREFIX) {\n            let addr = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::Transfer(TransferFromStrError::from(error)))?;\n            let addr_array = <[u8; TRANSFER_ADDR_LENGTH]>::try_from(addr.as_ref())\n                .map_err(|error| FromStrError::Transfer(TransferFromStrError::from(error)))?;\n            return Ok(Key::Transfer(TransferAddr::new(addr_array)));\n        }\n\n        match URef::from_formatted_str(input) {\n            Ok(uref) => return Ok(Key::URef(uref)),\n            Err(uref::FromStrError::InvalidPrefix) => {}\n            Err(error) => return Err(error.into()),\n        }\n\n        if let Some(era_summary_padding) = input.strip_prefix(ERA_SUMMARY_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(era_summary_padding)\n                .map_err(|error| FromStrError::EraSummary(error.to_string()))?;\n            let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::EraSummary(\"Failed to deserialize era summary key\".to_string())\n            })?;\n            return Ok(Key::EraSummary);\n        }\n\n        if let Some(era_id_str) = input.strip_prefix(ERA_INFO_PREFIX) {\n            let era_id = EraId::from_str(era_id_str)\n                .map_err(|error| FromStrError::EraInfo(error.to_string()))?;\n            return Ok(Key::EraInfo(era_id));\n        }\n\n        // note: BALANCE_HOLD must come before BALANCE due to overlapping head (balance-)\n        if let Some(hex) = input.strip_prefix(BALANCE_HOLD_PREFIX) {\n            let balance_hold_addr = BalanceHoldAddr::from_formatted_string(hex)?;\n            return Ok(Key::BalanceHold(balance_hold_addr));\n        }\n\n        if let Some(hex) = input.strip_prefix(BALANCE_PREFIX) {\n            let addr = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::Balance(error.to_string()))?;\n            let uref_addr = URefAddr::try_from(addr.as_ref())\n                .map_err(|error| FromStrError::Balance(error.to_string()))?;\n            return Ok(Key::Balance(uref_addr));\n        }\n\n        // note: BID_ADDR must come before BID as their heads overlap (bid- / bid-addr-)\n        if let Some(hex) = input.strip_prefix(BID_ADDR_PREFIX) {\n            let bytes = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::BidAddr(error.to_string()))?;\n            if bytes.is_empty() {\n                return Err(FromStrError::BidAddr(\n                    \"bytes should not be 0 len\".to_string(),\n                ));\n            }\n            let tag_bytes = <[u8; BidAddrTag::BID_ADDR_TAG_LENGTH]>::try_from(bytes[0..1].as_ref())\n                .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n            let tag = BidAddrTag::try_from_u8(tag_bytes[0])\n                .ok_or_else(|| FromStrError::BidAddr(\"failed to parse bid addr tag\".to_string()))?;\n            let validator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(\n                bytes[1..BidAddr::VALIDATOR_BID_ADDR_LENGTH].as_ref(),\n            )\n            .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n\n            let bid_addr = match tag {\n                BidAddrTag::Unified => BidAddr::legacy(validator_bytes),\n                BidAddrTag::Validator => BidAddr::new_validator_addr(validator_bytes),\n                BidAddrTag::ValidatorRev => BidAddr::new_validator_rev_addr(validator_bytes),\n                BidAddrTag::DelegatedAccount => {\n                    let delegator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(\n                        bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(),\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::new_delegator_account_addr((validator_bytes, delegator_bytes))\n                }\n                BidAddrTag::DelegatedPurse => {\n                    let uref = <[u8; UREF_ADDR_LENGTH]>::try_from(\n                        bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(),\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::DelegatedPurse {\n                        validator: AccountHash::new(validator_bytes),\n                        delegator: uref,\n                    }\n                }\n                BidAddrTag::Credit => {\n                    let era_id = bytesrepr::deserialize_from_slice(\n                        &bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..],\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::Credit {\n                        validator: AccountHash::new(validator_bytes),\n                        era_id,\n                    }\n                }\n                BidAddrTag::ReservedDelegationAccount => {\n                    let delegator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(\n                        bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(),\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::new_reservation_account_addr((validator_bytes, delegator_bytes))\n                }\n                BidAddrTag::ReservedDelegationPurse => {\n                    let uref = <[u8; UREF_ADDR_LENGTH]>::try_from(\n                        bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(),\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::ReservedDelegationPurse {\n                        validator: AccountHash::new(validator_bytes),\n                        delegator: uref,\n                    }\n                }\n                BidAddrTag::UnbondAccount => {\n                    let unbonder_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(\n                        bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(),\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::UnbondAccount {\n                        validator: AccountHash::new(validator_bytes),\n                        unbonder: AccountHash::new(unbonder_bytes),\n                    }\n                }\n                BidAddrTag::UnbondPurse => {\n                    let uref = <[u8; UREF_ADDR_LENGTH]>::try_from(\n                        bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(),\n                    )\n                    .map_err(|err| FromStrError::BidAddr(err.to_string()))?;\n                    BidAddr::UnbondPurse {\n                        validator: AccountHash::new(validator_bytes),\n                        unbonder: uref,\n                    }\n                }\n            };\n            return Ok(Key::BidAddr(bid_addr));\n        }\n\n        if let Some(hex) = input.strip_prefix(BID_PREFIX) {\n            let hash = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::Bid(error.to_string()))?;\n            let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref())\n                .map_err(|error| FromStrError::Bid(error.to_string()))?;\n            return Ok(Key::Bid(AccountHash::new(account_hash)));\n        }\n\n        if let Some(hex) = input.strip_prefix(WITHDRAW_PREFIX) {\n            let hash = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::Withdraw(error.to_string()))?;\n            let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref())\n                .map_err(|error| FromStrError::Withdraw(error.to_string()))?;\n            return Ok(Key::Withdraw(AccountHash::new(account_hash)));\n        }\n\n        if let Some(hex) = input.strip_prefix(UNBOND_PREFIX) {\n            let hash = checksummed_hex::decode(hex)\n                .map_err(|error| FromStrError::Unbond(error.to_string()))?;\n            let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref())\n                .map_err(|error| FromStrError::Unbond(error.to_string()))?;\n            return Ok(Key::Unbond(AccountHash::new(account_hash)));\n        }\n\n        if let Some(dictionary_addr) = input.strip_prefix(DICTIONARY_PREFIX) {\n            let dictionary_addr_bytes = checksummed_hex::decode(dictionary_addr)\n                .map_err(|error| FromStrError::Dictionary(error.to_string()))?;\n            let addr = DictionaryAddr::try_from(dictionary_addr_bytes.as_ref())\n                .map_err(|error| FromStrError::Dictionary(error.to_string()))?;\n            return Ok(Key::Dictionary(addr));\n        }\n\n        if let Some(registry_address) = input.strip_prefix(SYSTEM_ENTITY_REGISTRY_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(registry_address)\n                .map_err(|error| FromStrError::SystemEntityRegistry(error.to_string()))?;\n            let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::SystemEntityRegistry(\n                    \"Failed to deserialize system registry key\".to_string(),\n                )\n            })?;\n            return Ok(Key::SystemEntityRegistry);\n        }\n\n        if let Some(registry_address) = input.strip_prefix(CHAINSPEC_REGISTRY_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(registry_address)\n                .map_err(|error| FromStrError::ChainspecRegistry(error.to_string()))?;\n            let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::ChainspecRegistry(\n                    \"Failed to deserialize chainspec registry key\".to_string(),\n                )\n            })?;\n            return Ok(Key::ChainspecRegistry);\n        }\n\n        if let Some(registry_address) = input.strip_prefix(CHECKSUM_REGISTRY_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(registry_address)\n                .map_err(|error| FromStrError::ChecksumRegistry(error.to_string()))?;\n            let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::ChecksumRegistry(\n                    \"Failed to deserialize checksum registry key\".to_string(),\n                )\n            })?;\n            return Ok(Key::ChecksumRegistry);\n        }\n\n        if let Some(package_addr) = input.strip_prefix(PACKAGE_PREFIX) {\n            let package_addr_bytes = checksummed_hex::decode(package_addr)\n                .map_err(|error| FromStrError::Dictionary(error.to_string()))?;\n            let addr = PackageAddr::try_from(package_addr_bytes.as_ref())\n                .map_err(|error| FromStrError::Package(error.to_string()))?;\n            return Ok(Key::SmartContract(addr));\n        }\n\n        match EntityAddr::from_formatted_str(input) {\n            Ok(entity_addr) => return Ok(Key::AddressableEntity(entity_addr)),\n            Err(addressable_entity::FromStrError::InvalidPrefix) => {}\n            Err(error) => {\n                return Err(FromStrError::AddressableEntity(error.to_string()));\n            }\n        }\n\n        match ByteCodeAddr::from_formatted_string(input) {\n            Ok(byte_code_addr) => return Ok(Key::ByteCode(byte_code_addr)),\n            Err(byte_code::FromStrError::InvalidPrefix) => {}\n            Err(error) => return Err(FromStrError::ByteCode(error.to_string())),\n        }\n\n        match MessageAddr::from_formatted_str(input) {\n            Ok(message_addr) => return Ok(Key::Message(message_addr)),\n            Err(contract_messages::FromStrError::InvalidPrefix) => {}\n            Err(error) => return Err(error.into()),\n        }\n\n        match NamedKeyAddr::from_formatted_str(input) {\n            Ok(named_key) => return Ok(Key::NamedKey(named_key)),\n            Err(addressable_entity::FromStrError::InvalidPrefix) => {}\n            Err(error) => return Err(FromStrError::NamedKey(error.to_string())),\n        }\n\n        if let Some(block_time) = input.strip_prefix(BLOCK_GLOBAL_TIME_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(block_time)\n                .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?;\n            let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::BlockGlobal(\"Failed to deserialize global block time key\".to_string())\n            })?;\n            return Ok(BlockGlobalAddr::BlockTime.into());\n        }\n\n        if let Some(message_count) = input.strip_prefix(BLOCK_GLOBAL_MESSAGE_COUNT_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(message_count)\n                .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?;\n            let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::BlockGlobal(\n                    \"Failed to deserialize global block message count key\".to_string(),\n                )\n            })?;\n            return Ok(BlockGlobalAddr::MessageCount.into());\n        }\n\n        if let Some(protocol_version) = input.strip_prefix(BLOCK_GLOBAL_PROTOCOL_VERSION_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(protocol_version)\n                .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?;\n            let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::BlockGlobal(\n                    \"Failed to deserialize global block protocol version key\".to_string(),\n                )\n            })?;\n            return Ok(BlockGlobalAddr::ProtocolVersion.into());\n        }\n\n        if let Some(addressable_entity) = input.strip_prefix(BLOCK_GLOBAL_ADDRESSABLE_ENTITY_PREFIX)\n        {\n            let padded_bytes = checksummed_hex::decode(addressable_entity)\n                .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?;\n            let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::BlockGlobal(\n                    \"Failed to deserialize global block addressable entity key\".to_string(),\n                )\n            })?;\n            return Ok(BlockGlobalAddr::AddressableEntity.into());\n        }\n\n        match EntryPointAddr::from_formatted_str(input) {\n            Ok(entry_point_addr) => return Ok(Key::EntryPoint(entry_point_addr)),\n            Err(addressable_entity::FromStrError::InvalidPrefix) => {}\n            Err(error) => return Err(FromStrError::EntryPoint(error.to_string())),\n        }\n\n        if let Some(entity_addr_formatted) = input.strip_prefix(STATE_PREFIX) {\n            match EntityAddr::from_formatted_str(entity_addr_formatted) {\n                Ok(entity_addr) => return Ok(Key::State(entity_addr)),\n                Err(addressable_entity::FromStrError::InvalidPrefix) => {}\n                Err(error) => {\n                    return Err(FromStrError::State(error.to_string()));\n                }\n            }\n        }\n\n        if let Some(rewards_handling_padding) = input.strip_prefix(REWARDS_HANDLING_PREFIX) {\n            let padded_bytes = checksummed_hex::decode(rewards_handling_padding)\n                .map_err(|error| FromStrError::RewardsHandling(error.to_string()))?;\n            let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| {\n                FromStrError::RewardsHandling(\"Failed to deserialize era summary key\".to_string())\n            })?;\n            return Ok(Key::RewardsHandling);\n        }\n\n        Err(FromStrError::UnknownPrefix)\n    }\n\n    /// Returns the inner bytes of `self` if `self` is of type [`Key::Account`], otherwise returns\n    /// `None`.\n    pub fn into_account(self) -> Option<AccountHash> {\n        match self {\n            Key::Account(bytes) => Some(bytes),\n            _ => None,\n        }\n    }\n\n    /// Returns the inner bytes of `self` if `self` is of type [`Key::Hash`], otherwise returns\n    /// `None`.\n    pub fn into_hash_addr(self) -> Option<HashAddr> {\n        match self {\n            Key::Hash(hash) => Some(hash),\n            _ => None,\n        }\n    }\n\n    /// Returns the inner bytes of `self` if `self` is of type [`Key::AddressableEntity`], otherwise\n    /// returns `None`.\n    pub fn into_entity_hash_addr(self) -> Option<HashAddr> {\n        match self {\n            Key::AddressableEntity(entity_addr) => Some(entity_addr.value()),\n            Key::Account(account_hash) => Some(account_hash.value()),\n            Key::Hash(hash) => Some(hash),\n            _ => None,\n        }\n    }\n\n    /// Returns the inner bytes of `self` if `self` is of type [`Key::SmartContract`], otherwise\n    /// returns `None`.\n    pub fn into_package_addr(self) -> Option<PackageAddr> {\n        match self {\n            Key::Hash(hash) => Some(hash),\n            Key::SmartContract(package_addr) => Some(package_addr),\n            _ => None,\n        }\n    }\n\n    /// Returns [`AddressableEntityHash`] of `self` if `self` is of type [`Key::AddressableEntity`],\n    /// otherwise returns `None`.\n    pub fn into_entity_hash(self) -> Option<AddressableEntityHash> {\n        let entity_addr = self.into_entity_hash_addr()?;\n        Some(AddressableEntityHash::new(entity_addr))\n    }\n\n    /// Returns [`PackageHash`] of `self` if `self` is of type [`Key::SmartContract`], otherwise\n    /// returns `None`.\n    pub fn into_package_hash(self) -> Option<PackageHash> {\n        let package_addr = self.into_package_addr()?;\n        Some(PackageHash::new(package_addr))\n    }\n\n    /// Returns [`NamedKeyAddr`] of `self` if `self` is of type [`Key::NamedKey`], otherwise\n    /// returns `None`.\n    pub fn into_named_key_addr(self) -> Option<NamedKeyAddr> {\n        match self {\n            Key::NamedKey(addr) => Some(addr),\n            _ => None,\n        }\n    }\n\n    /// Returns the inner [`URef`] if `self` is of type [`Key::URef`], otherwise returns `None`.\n    pub fn into_uref(self) -> Option<URef> {\n        match self {\n            Key::URef(uref) => Some(uref),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise\n    /// returns `None`.\n    pub fn as_uref(&self) -> Option<&URef> {\n        match self {\n            Key::URef(uref) => Some(uref),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise\n    /// returns `None`.\n    pub fn as_uref_mut(&mut self) -> Option<&mut URef> {\n        match self {\n            Key::URef(uref) => Some(uref),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the inner `URefAddr` if `self` is of type [`Key::Balance`],\n    /// otherwise returns `None`.\n    pub fn as_balance(&self) -> Option<&URefAddr> {\n        if let Self::Balance(v) = self {\n            Some(v)\n        } else {\n            None\n        }\n    }\n\n    /// Returns a reference to the inner `BalanceHoldAddr` if `self` is of type\n    /// [`Key::BalanceHold`], otherwise returns `None`.\n    pub fn as_balance_hold(&self) -> Option<&BalanceHoldAddr> {\n        if let Self::BalanceHold(addr) = self {\n            Some(addr)\n        } else {\n            None\n        }\n    }\n\n    /// Returns a reference to the inner [`DictionaryAddr`] if `self` is of type\n    /// [`Key::Dictionary`], otherwise returns `None`.\n    pub fn as_dictionary(&self) -> Option<&DictionaryAddr> {\n        match self {\n            Key::Dictionary(v) => Some(v),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the inner `BidAddr` if `self` is of type [`Key::Bid`],\n    /// otherwise returns `None`.\n    pub fn as_bid_addr(&self) -> Option<&BidAddr> {\n        if let Self::BidAddr(addr) = self {\n            Some(addr)\n        } else {\n            None\n        }\n    }\n\n    /// Returns a reference to the inner `TopicNameHash` if `self` is of the type [`Key::Message`]\n    /// otherwise returns `None`.\n    pub fn as_message_topic_name_hash(&self) -> Option<TopicNameHash> {\n        if let Self::Message(addr) = self {\n            Some(addr.topic_name_hash())\n        } else {\n            None\n        }\n    }\n\n    /// Casts a [`Key::URef`] to a [`Key::Hash`]\n    pub fn uref_to_hash(&self) -> Option<Key> {\n        let uref = self.as_uref()?;\n        let addr = uref.addr();\n        Some(Key::Hash(addr))\n    }\n\n    /// Casts a [`Key::Withdraw`] to a [`Key::Unbond`]\n    pub fn withdraw_to_unbond(&self) -> Option<Key> {\n        if let Key::Withdraw(account_hash) = self {\n            return Some(Key::Unbond(*account_hash));\n        }\n        None\n    }\n\n    /// Creates a new [`Key::Dictionary`] variant based on a `seed_uref` and a `dictionary_item_key`\n    /// bytes.\n    pub fn dictionary(seed_uref: URef, dictionary_item_key: &[u8]) -> Key {\n        // NOTE: Expect below is safe because the length passed is supported.\n        let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect(\"should create hasher\");\n        hasher.update(seed_uref.addr().as_ref());\n        hasher.update(dictionary_item_key);\n        // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher.\n        let mut addr = HashAddr::default();\n        hasher.finalize_variable(|hash| addr.clone_from_slice(hash));\n        Key::Dictionary(addr)\n    }\n\n    /// Creates a new [`Key::AddressableEntity`] variant from a package kind and an entity\n    /// hash.\n    pub fn addressable_entity_key(\n        entity_kind_tag: EntityKindTag,\n        entity_hash: AddressableEntityHash,\n    ) -> Self {\n        let entity_addr = match entity_kind_tag {\n            EntityKindTag::System => EntityAddr::new_system(entity_hash.value()),\n            EntityKindTag::Account => EntityAddr::new_account(entity_hash.value()),\n            EntityKindTag::SmartContract => EntityAddr::new_smart_contract(entity_hash.value()),\n        };\n\n        Key::AddressableEntity(entity_addr)\n    }\n\n    /// Creates a new [`Key::AddressableEntity`] for a Smart contract.\n    pub fn contract_entity_key(entity_hash: AddressableEntityHash) -> Key {\n        Self::addressable_entity_key(EntityKindTag::SmartContract, entity_hash)\n    }\n\n    /// Creates a new [`Key::ByteCode`] variant from a byte code kind and an byte code addr.\n    pub fn byte_code_key(byte_code_addr: ByteCodeAddr) -> Self {\n        Key::ByteCode(byte_code_addr)\n    }\n\n    /// Creates a new [`Key::Message`] variant that identifies an indexed message based on an\n    /// `hash_addr`, `topic_name_hash` and message `index`.\n    pub fn message(entity_addr: EntityAddr, topic_name_hash: TopicNameHash, index: u32) -> Key {\n        Key::Message(MessageAddr::new_message_addr(\n            entity_addr,\n            topic_name_hash,\n            index,\n        ))\n    }\n\n    /// Creates a new [`Key::Message`] variant that identifies a message topic based on an\n    /// `hash_addr` and a hash of the topic name.\n    pub fn message_topic(entity_addr: EntityAddr, topic_name_hash: TopicNameHash) -> Key {\n        Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash))\n    }\n\n    /// Creates a new [`Key::EntryPoint`] variant from an entrypoint addr.\n    pub fn entry_point(entry_point_addr: EntryPointAddr) -> Self {\n        Key::EntryPoint(entry_point_addr)\n    }\n\n    /// Returns true if the key is of type [`Key::Dictionary`].\n    pub fn is_dictionary_key(&self) -> bool {\n        if let Key::Dictionary(_) = self {\n            return true;\n        }\n        false\n    }\n\n    /// Returns true if the key is of type [`Key::Bid`].\n    pub fn is_balance_key(&self) -> bool {\n        if let Key::Balance(_) = self {\n            return true;\n        }\n        false\n    }\n\n    /// Returns true if the key is of type [`Key::BidAddr`].\n    pub fn is_bid_addr_key(&self) -> bool {\n        if let Key::BidAddr(_) = self {\n            return true;\n        }\n        false\n    }\n\n    /// Returns true if the key is of type [`Key::NamedKey`].\n    pub fn is_named_key(&self) -> bool {\n        if let Key::NamedKey(_) = self {\n            return true;\n        }\n\n        false\n    }\n\n    /// Returns if the inner address is for a system contract entity.\n    pub fn is_system_key(&self) -> bool {\n        if let Self::AddressableEntity(entity_addr) = self {\n            return match entity_addr.tag() {\n                EntityKindTag::System => true,\n                EntityKindTag::SmartContract | EntityKindTag::Account => false,\n            };\n        }\n        false\n    }\n\n    /// Return true if the inner Key is of the smart contract type.\n    pub fn is_smart_contract_key(&self) -> bool {\n        matches!(\n            self,\n            Self::AddressableEntity(EntityAddr::SmartContract(_)) | Self::Hash(_)\n        )\n    }\n\n    /// Returns true if the key is of type [`Key::NamedKey`] and its Entry variant.\n    pub fn is_named_key_entry(&self) -> bool {\n        matches!(self, Self::NamedKey(_))\n    }\n\n    /// Returns true if the key is of type [`Key::NamedKey`] and the variants have the\n    /// same [`EntityAddr`].\n    pub fn is_entry_for_base(&self, entity_addr: &EntityAddr) -> bool {\n        if let Self::NamedKey(named_key_addr) = self {\n            named_key_addr.entity_addr() == *entity_addr\n        } else {\n            false\n        }\n    }\n\n    /// Is the record under this key readable by the entity corresponding to the imputed address?\n    pub fn is_readable(&self, entity_addr: &EntityAddr) -> bool {\n        if entity_addr.is_system() {\n            // the system can read everything\n            return true;\n        }\n        let ret = match self {\n            Key::BidAddr(_) => {\n                // all bids are public information\n                true\n            }\n            Key::URef(uref) => {\n                // uref's require explicit permissions\n                uref.is_readable()\n            }\n            Key::SystemEntityRegistry | Key::SmartContract(_) => {\n                // the system entities and all packages are public info\n                true\n            }\n            Key::Unbond(account_hash) => {\n                // and an account holder can read their own account record\n                entity_addr.tag() == EntityKindTag::Account\n                    && entity_addr.value() == account_hash.value()\n            }\n            Key::NamedKey(named_key_addr) => {\n                // an entity can read its own named keys\n                &named_key_addr.entity_addr() == entity_addr\n            }\n            Key::ByteCode(_)\n            | Key::Account(_)\n            | Key::Hash(_)\n            | Key::AddressableEntity(_)\n            | Key::Balance(_)\n            | Key::BalanceHold(_)\n            | Key::Dictionary(_)\n            | Key::Message(_)\n            | Key::BlockGlobal(_)\n            | Key::EntryPoint(_) => true,\n            _ => false,\n        };\n        if !ret {\n            let reading_entity_key = Key::AddressableEntity(*entity_addr);\n            warn!(?reading_entity_key, attempted_key=?self,  \"attempt to read without permission\")\n        }\n        ret\n    }\n\n    /// Is the record under this key addable by the entity corresponding to the imputed address?\n    pub fn is_addable(&self, entity_addr: &EntityAddr) -> bool {\n        // unlike readable / writeable which are universally supported,\n        //  only some data types support commutative add / extension\n        let ret = match self {\n            Key::URef(uref) => uref.is_addable(),\n            Key::AddressableEntity(addr_entity_addr) => {\n                // an entity can extend itself (only associated keys, currently)\n                entity_addr == addr_entity_addr\n            }\n            Key::NamedKey(named_key_addr) => {\n                // an entity can extend its own named keys\n                &named_key_addr.entity_addr() == entity_addr\n            }\n            _ => {\n                // other data types do not support commutative addition / extension\n                let adding_entity_key = Key::AddressableEntity(*entity_addr);\n                warn!(?adding_entity_key, attempted_key=?self,  \"attempt to add on an unsupported data type\");\n                return false; // we want the above more explicit warn message, not both messages.\n            }\n        };\n        if !ret {\n            let adding_entity_key = Key::AddressableEntity(*entity_addr);\n            warn!(?adding_entity_key, attempted_key=?self,  \"attempt to add without permission\");\n        }\n        ret\n    }\n\n    /// Is the record under this key writeable by the entity corresponding to the imputed address?\n    pub fn is_writeable(&self, entity_addr: &EntityAddr) -> bool {\n        if entity_addr.is_system() {\n            // the system can write everything\n            return true;\n        }\n        let ret = match self {\n            Key::URef(uref) => uref.is_writeable(),\n            Key::NamedKey(named_key_addr) => {\n                // an entity can write to its own named keys\n                &named_key_addr.entity_addr() == entity_addr\n            }\n            _ => {\n                // only the system can write other kinds of records\n                false\n            }\n        };\n        if !ret {\n            let writing_entity_key = Key::AddressableEntity(*entity_addr);\n            warn!(?writing_entity_key, attempted_key=?self,  \"attempt to write without permission\")\n        }\n        ret\n    }\n\n    /// Returns an entity addr for a [`Key::AddressableEntity`].\n    pub fn into_entity_addr(self) -> Option<EntityAddr> {\n        match self {\n            Key::AddressableEntity(entity_addr) => Some(entity_addr),\n            _ => None,\n        }\n    }\n}\n\nimpl Display for Key {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            Key::Account(account_hash) => write!(f, \"Key::Account({})\", account_hash),\n            Key::Hash(addr) => write!(f, \"Key::Hash({})\", base16::encode_lower(&addr)),\n            Key::URef(uref) => write!(f, \"Key::{}\", uref), /* Display impl for URef will append */\n            Key::Transfer(transfer_v1_addr) => {\n                write!(f, \"Key::Transfer({})\", transfer_v1_addr)\n            }\n            Key::DeployInfo(addr) => write!(\n                f,\n                \"Key::DeployInfo({})\",\n                base16::encode_lower(addr.as_ref())\n            ),\n            Key::EraInfo(era_id) => write!(f, \"Key::EraInfo({})\", era_id),\n            Key::Balance(uref_addr) => {\n                write!(f, \"Key::Balance({})\", base16::encode_lower(uref_addr))\n            }\n            Key::Bid(account_hash) => write!(f, \"Key::Bid({})\", account_hash),\n            Key::Withdraw(account_hash) => write!(f, \"Key::Withdraw({})\", account_hash),\n            Key::Dictionary(addr) => {\n                write!(f, \"Key::Dictionary({})\", base16::encode_lower(addr))\n            }\n            Key::SystemEntityRegistry => write!(\n                f,\n                \"Key::SystemEntityRegistry({})\",\n                base16::encode_lower(&PADDING_BYTES)\n            ),\n            Key::EraSummary => write!(\n                f,\n                \"Key::EraSummary({})\",\n                base16::encode_lower(&PADDING_BYTES),\n            ),\n            Key::Unbond(account_hash) => write!(f, \"Key::Unbond({})\", account_hash),\n            Key::ChainspecRegistry => write!(\n                f,\n                \"Key::ChainspecRegistry({})\",\n                base16::encode_lower(&PADDING_BYTES)\n            ),\n            Key::ChecksumRegistry => {\n                write!(\n                    f,\n                    \"Key::ChecksumRegistry({})\",\n                    base16::encode_lower(&PADDING_BYTES)\n                )\n            }\n            Key::BidAddr(bid_addr) => write!(f, \"Key::BidAddr({})\", bid_addr),\n            Key::Message(message_addr) => {\n                write!(f, \"Key::Message({})\", message_addr)\n            }\n            Key::SmartContract(package_addr) => {\n                write!(f, \"Key::Package({})\", base16::encode_lower(package_addr))\n            }\n            Key::AddressableEntity(entity_addr) => write!(\n                f,\n                \"Key::AddressableEntity({}-{})\",\n                entity_addr.tag(),\n                base16::encode_lower(&entity_addr.value())\n            ),\n            Key::ByteCode(byte_code_addr) => {\n                write!(f, \"Key::ByteCode({})\", byte_code_addr)\n            }\n            Key::NamedKey(named_key_addr) => {\n                write!(f, \"Key::NamedKey({})\", named_key_addr)\n            }\n            Key::BlockGlobal(addr) => {\n                write!(\n                    f,\n                    \"Key::BlockGlobal({}-{})\",\n                    addr,\n                    base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES)\n                )\n            }\n            Key::BalanceHold(balance_hold_addr) => {\n                write!(f, \"Key::BalanceHold({})\", balance_hold_addr)\n            }\n            Key::EntryPoint(entry_point_addr) => {\n                write!(f, \"Key::EntryPointAddr({})\", entry_point_addr)\n            }\n            Key::State(entity_addr) => {\n                write!(f, \"Key::State({})\", entity_addr)\n            }\n            Key::RewardsHandling => write!(\n                f,\n                \"Key::RewardsHandling({})\",\n                base16::encode_lower(&PADDING_BYTES),\n            ),\n        }\n    }\n}\n\nimpl Debug for Key {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{}\", self)\n    }\n}\n\nimpl Tagged<KeyTag> for Key {\n    fn tag(&self) -> KeyTag {\n        match self {\n            Key::Account(_) => KeyTag::Account,\n            Key::Hash(_) => KeyTag::Hash,\n            Key::URef(_) => KeyTag::URef,\n            Key::Transfer(_) => KeyTag::Transfer,\n            Key::DeployInfo(_) => KeyTag::DeployInfo,\n            Key::EraInfo(_) => KeyTag::EraInfo,\n            Key::Balance(_) => KeyTag::Balance,\n            Key::Bid(_) => KeyTag::Bid,\n            Key::Withdraw(_) => KeyTag::Withdraw,\n            Key::Dictionary(_) => KeyTag::Dictionary,\n            Key::SystemEntityRegistry => KeyTag::SystemEntityRegistry,\n            Key::EraSummary => KeyTag::EraSummary,\n            Key::Unbond(_) => KeyTag::Unbond,\n            Key::ChainspecRegistry => KeyTag::ChainspecRegistry,\n            Key::ChecksumRegistry => KeyTag::ChecksumRegistry,\n            Key::BidAddr(_) => KeyTag::BidAddr,\n            Key::SmartContract(_) => KeyTag::Package,\n            Key::AddressableEntity(..) => KeyTag::AddressableEntity,\n            Key::ByteCode(..) => KeyTag::ByteCode,\n            Key::Message(_) => KeyTag::Message,\n            Key::NamedKey(_) => KeyTag::NamedKey,\n            Key::BlockGlobal(_) => KeyTag::BlockGlobal,\n            Key::BalanceHold(_) => KeyTag::BalanceHold,\n            Key::EntryPoint(_) => KeyTag::EntryPoint,\n            Key::State(_) => KeyTag::State,\n            Key::RewardsHandling => KeyTag::RewardsHandling,\n        }\n    }\n}\n\nimpl Tagged<u8> for Key {\n    fn tag(&self) -> u8 {\n        let key_tag: KeyTag = self.tag();\n        key_tag as u8\n    }\n}\n\nimpl From<URef> for Key {\n    fn from(uref: URef) -> Key {\n        Key::URef(uref)\n    }\n}\n\nimpl From<AccountHash> for Key {\n    fn from(account_hash: AccountHash) -> Key {\n        Key::Account(account_hash)\n    }\n}\n\nimpl From<PackageHash> for Key {\n    fn from(package_hash: PackageHash) -> Key {\n        Key::SmartContract(package_hash.value())\n    }\n}\n\nimpl From<ContractWasmHash> for Key {\n    fn from(wasm_hash: ContractWasmHash) -> Self {\n        Key::Hash(wasm_hash.value())\n    }\n}\n\nimpl From<ContractPackageHash> for Key {\n    fn from(contract_package_hash: ContractPackageHash) -> Self {\n        Key::Hash(contract_package_hash.value())\n    }\n}\n\nimpl From<ContractHash> for Key {\n    fn from(contract_hash: ContractHash) -> Self {\n        Key::Hash(contract_hash.value())\n    }\n}\n\nimpl From<EntityAddr> for Key {\n    fn from(entity_addr: EntityAddr) -> Self {\n        Key::AddressableEntity(entity_addr)\n    }\n}\n\nimpl From<NamedKeyAddr> for Key {\n    fn from(value: NamedKeyAddr) -> Self {\n        Key::NamedKey(value)\n    }\n}\n\nimpl From<ByteCodeAddr> for Key {\n    fn from(value: ByteCodeAddr) -> Self {\n        Key::ByteCode(value)\n    }\n}\n\nimpl ToBytes for Key {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            Key::Account(account_hash) => {\n                KEY_ID_SERIALIZED_LENGTH + account_hash.serialized_length()\n            }\n            Key::Hash(_) => KEY_HASH_SERIALIZED_LENGTH,\n            Key::URef(_) => KEY_UREF_SERIALIZED_LENGTH,\n            Key::Transfer(_) => KEY_TRANSFER_SERIALIZED_LENGTH,\n            Key::DeployInfo(_) => KEY_DEPLOY_INFO_SERIALIZED_LENGTH,\n            Key::EraInfo(_) => KEY_ERA_INFO_SERIALIZED_LENGTH,\n            Key::Balance(_) => KEY_BALANCE_SERIALIZED_LENGTH,\n            Key::Bid(_) => KEY_BID_SERIALIZED_LENGTH,\n            Key::Withdraw(_) => KEY_WITHDRAW_SERIALIZED_LENGTH,\n            Key::Dictionary(_) => KEY_DICTIONARY_SERIALIZED_LENGTH,\n            Key::SystemEntityRegistry => KEY_SYSTEM_ENTITY_REGISTRY_SERIALIZED_LENGTH,\n            Key::EraSummary => KEY_ERA_SUMMARY_SERIALIZED_LENGTH,\n            Key::Unbond(_) => KEY_UNBOND_SERIALIZED_LENGTH,\n            Key::ChainspecRegistry => KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH,\n            Key::ChecksumRegistry => KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH,\n            Key::BidAddr(bid_addr) => KEY_ID_SERIALIZED_LENGTH + bid_addr.serialized_length(),\n            Key::SmartContract(_) => KEY_PACKAGE_SERIALIZED_LENGTH,\n            Key::AddressableEntity(entity_addr) => {\n                KEY_ID_SERIALIZED_LENGTH + entity_addr.serialized_length()\n            }\n            Key::ByteCode(byte_code_addr) => {\n                KEY_ID_SERIALIZED_LENGTH + byte_code_addr.serialized_length()\n            }\n            Key::Message(message_addr) => {\n                KEY_ID_SERIALIZED_LENGTH + message_addr.serialized_length()\n            }\n            Key::NamedKey(named_key_addr) => {\n                KEY_ID_SERIALIZED_LENGTH + named_key_addr.serialized_length()\n            }\n            Key::BlockGlobal(addr) => {\n                KEY_ID_SERIALIZED_LENGTH\n                    + addr.serialized_length()\n                    + BLOCK_GLOBAL_PADDING_BYTES.len()\n            }\n            Key::BalanceHold(balance_hold_addr) => {\n                KEY_ID_SERIALIZED_LENGTH + balance_hold_addr.serialized_length()\n            }\n            Key::EntryPoint(entry_point_addr) => {\n                U8_SERIALIZED_LENGTH + entry_point_addr.serialized_length()\n            }\n            Key::State(entity_addr) => KEY_ID_SERIALIZED_LENGTH + entity_addr.serialized_length(),\n            Key::RewardsHandling => KEY_REWARDS_HANDLING_SERIALIZED_LENGTH,\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.push(self.tag());\n        match self {\n            Key::Account(account_hash) => account_hash.write_bytes(writer),\n            Key::Hash(hash) => hash.write_bytes(writer),\n            Key::URef(uref) => uref.write_bytes(writer),\n            Key::Transfer(addr) => addr.write_bytes(writer),\n            Key::DeployInfo(deploy_hash) => deploy_hash.write_bytes(writer),\n            Key::EraInfo(era_id) => era_id.write_bytes(writer),\n            Key::Balance(uref_addr) => uref_addr.write_bytes(writer),\n            Key::Bid(account_hash) => account_hash.write_bytes(writer),\n            Key::Withdraw(account_hash) => account_hash.write_bytes(writer),\n            Key::Dictionary(addr) => addr.write_bytes(writer),\n            Key::Unbond(account_hash) => account_hash.write_bytes(writer),\n            Key::SystemEntityRegistry\n            | Key::EraSummary\n            | Key::ChainspecRegistry\n            | Key::ChecksumRegistry\n            | Key::RewardsHandling => PADDING_BYTES.write_bytes(writer),\n            Key::BlockGlobal(addr) => {\n                addr.write_bytes(writer)?;\n                BLOCK_GLOBAL_PADDING_BYTES.write_bytes(writer)\n            }\n            Key::BidAddr(bid_addr) => bid_addr.write_bytes(writer),\n            Key::SmartContract(package_addr) => package_addr.write_bytes(writer),\n            Key::AddressableEntity(entity_addr) => entity_addr.write_bytes(writer),\n            Key::ByteCode(byte_code_addr) => byte_code_addr.write_bytes(writer),\n            Key::Message(message_addr) => message_addr.write_bytes(writer),\n            Key::NamedKey(named_key_addr) => named_key_addr.write_bytes(writer),\n            Key::BalanceHold(balance_hold_addr) => balance_hold_addr.write_bytes(writer),\n            Key::EntryPoint(entry_point_addr) => entry_point_addr.write_bytes(writer),\n            Key::State(entity_addr) => entity_addr.write_bytes(writer),\n        }\n    }\n}\n\nimpl FromBytes for Key {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        if bytes.is_empty() {\n            error!(\"FromBytes for Key: bytes length should not be 0\");\n        }\n        let (tag, remainder) = match KeyTag::from_bytes(bytes) {\n            Ok((tag, rem)) => (tag, rem),\n            Err(err) => {\n                error!(%err, \"FromBytes for Key\");\n                return Err(err);\n            }\n        };\n        match tag {\n            KeyTag::Account => {\n                let (account_hash, rem) = AccountHash::from_bytes(remainder)?;\n                Ok((Key::Account(account_hash), rem))\n            }\n            KeyTag::Hash => {\n                let (hash, rem) = HashAddr::from_bytes(remainder)?;\n                Ok((Key::Hash(hash), rem))\n            }\n            KeyTag::URef => {\n                let (uref, rem) = URef::from_bytes(remainder)?;\n                Ok((Key::URef(uref), rem))\n            }\n            KeyTag::Transfer => {\n                let (transfer_v1_addr, rem) = TransferAddr::from_bytes(remainder)?;\n                Ok((Key::Transfer(transfer_v1_addr), rem))\n            }\n            KeyTag::DeployInfo => {\n                let (deploy_hash, rem) = DeployHash::from_bytes(remainder)?;\n                Ok((Key::DeployInfo(deploy_hash), rem))\n            }\n            KeyTag::EraInfo => {\n                let (era_id, rem) = EraId::from_bytes(remainder)?;\n                Ok((Key::EraInfo(era_id), rem))\n            }\n            KeyTag::Balance => {\n                let (uref_addr, rem) = URefAddr::from_bytes(remainder)?;\n                Ok((Key::Balance(uref_addr), rem))\n            }\n            KeyTag::Bid => {\n                let (account_hash, rem) = AccountHash::from_bytes(remainder)?;\n                Ok((Key::Bid(account_hash), rem))\n            }\n            KeyTag::Withdraw => {\n                let (account_hash, rem) = AccountHash::from_bytes(remainder)?;\n                Ok((Key::Withdraw(account_hash), rem))\n            }\n            KeyTag::Dictionary => {\n                let (addr, rem) = DictionaryAddr::from_bytes(remainder)?;\n                Ok((Key::Dictionary(addr), rem))\n            }\n            KeyTag::SystemEntityRegistry => {\n                let (_, rem) = <[u8; 32]>::from_bytes(remainder)?;\n                Ok((Key::SystemEntityRegistry, rem))\n            }\n            KeyTag::EraSummary => {\n                let (_, rem) = <[u8; 32]>::from_bytes(remainder)?;\n                Ok((Key::EraSummary, rem))\n            }\n            KeyTag::Unbond => {\n                let (account_hash, rem) = AccountHash::from_bytes(remainder)?;\n                Ok((Key::Unbond(account_hash), rem))\n            }\n            KeyTag::ChainspecRegistry => {\n                let (_, rem) = <[u8; 32]>::from_bytes(remainder)?;\n                Ok((Key::ChainspecRegistry, rem))\n            }\n            KeyTag::ChecksumRegistry => {\n                let (_, rem) = <[u8; 32]>::from_bytes(remainder)?;\n                Ok((Key::ChecksumRegistry, rem))\n            }\n            KeyTag::BidAddr => {\n                let (bid_addr, rem) = BidAddr::from_bytes(remainder)?;\n                Ok((Key::BidAddr(bid_addr), rem))\n            }\n            KeyTag::Package => {\n                let (package_addr, rem) = PackageAddr::from_bytes(remainder)?;\n                Ok((Key::SmartContract(package_addr), rem))\n            }\n            KeyTag::AddressableEntity => {\n                let (entity_addr, rem) = EntityAddr::from_bytes(remainder)?;\n                Ok((Key::AddressableEntity(entity_addr), rem))\n            }\n            KeyTag::ByteCode => {\n                let (byte_code_addr, rem) = ByteCodeAddr::from_bytes(remainder)?;\n                Ok((Key::ByteCode(byte_code_addr), rem))\n            }\n            KeyTag::Message => {\n                let (message_addr, rem) = MessageAddr::from_bytes(remainder)?;\n                Ok((Key::Message(message_addr), rem))\n            }\n            KeyTag::NamedKey => {\n                let (named_key_addr, rem) = NamedKeyAddr::from_bytes(remainder)?;\n                Ok((Key::NamedKey(named_key_addr), rem))\n            }\n            KeyTag::BlockGlobal => {\n                let (addr, rem) = BlockGlobalAddr::from_bytes(remainder)?;\n                let (_, rem) = <[u8; 31]>::from_bytes(rem)?; // strip padding\n                Ok((Key::BlockGlobal(addr), rem))\n            }\n            KeyTag::BalanceHold => {\n                let (balance_hold_addr, rem) = BalanceHoldAddr::from_bytes(remainder)?;\n                Ok((Key::BalanceHold(balance_hold_addr), rem))\n            }\n            KeyTag::EntryPoint => {\n                let (entry_point_addr, rem) = EntryPointAddr::from_bytes(remainder)?;\n                Ok((Key::EntryPoint(entry_point_addr), rem))\n            }\n            KeyTag::State => {\n                let (entity_addr, rem) = EntityAddr::from_bytes(remainder)?;\n                Ok((Key::State(entity_addr), rem))\n            }\n            KeyTag::RewardsHandling => {\n                let (_, rem) = <[u8; 32]>::from_bytes(remainder)?;\n                Ok((Key::RewardsHandling, rem))\n            }\n        }\n    }\n}\n\n#[allow(dead_code)]\nfn please_add_to_distribution_impl(key: Key) {\n    // If you've been forced to come here, you likely need to add your variant to the\n    // `Distribution` impl for `Key`.\n    match key {\n        Key::Account(_) => unimplemented!(),\n        Key::Hash(_) => unimplemented!(),\n        Key::URef(_) => unimplemented!(),\n        Key::Transfer(_) => unimplemented!(),\n        Key::DeployInfo(_) => unimplemented!(),\n        Key::EraInfo(_) => unimplemented!(),\n        Key::Balance(_) => unimplemented!(),\n        Key::Bid(_) => unimplemented!(),\n        Key::Withdraw(_) => unimplemented!(),\n        Key::Dictionary(_) => unimplemented!(),\n        Key::SystemEntityRegistry => unimplemented!(),\n        Key::EraSummary => unimplemented!(),\n        Key::Unbond(_) => unimplemented!(),\n        Key::ChainspecRegistry => unimplemented!(),\n        Key::ChecksumRegistry => unimplemented!(),\n        Key::BidAddr(_) => unimplemented!(),\n        Key::SmartContract(_) => unimplemented!(),\n        Key::AddressableEntity(..) => unimplemented!(),\n        Key::ByteCode(..) => unimplemented!(),\n        Key::Message(_) => unimplemented!(),\n        Key::NamedKey(_) => unimplemented!(),\n        Key::BlockGlobal(_) => unimplemented!(),\n        Key::BalanceHold(_) => unimplemented!(),\n        Key::EntryPoint(_) => unimplemented!(),\n        Key::State(_) => unimplemented!(),\n        Key::RewardsHandling => unimplemented!(),\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<Key> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Key {\n        match rng.gen_range(0..=24) {\n            0 => Key::Account(rng.gen()),\n            1 => Key::Hash(rng.gen()),\n            2 => Key::URef(rng.gen()),\n            3 => Key::Transfer(TransferAddr::new(rng.gen())),\n            4 => Key::DeployInfo(DeployHash::from_raw(rng.gen())),\n            5 => Key::EraInfo(EraId::new(rng.gen())),\n            6 => Key::Balance(rng.gen()),\n            7 => Key::Bid(rng.gen()),\n            8 => Key::Withdraw(rng.gen()),\n            9 => Key::Dictionary(rng.gen()),\n            10 => Key::SystemEntityRegistry,\n            11 => Key::EraSummary,\n            12 => Key::Unbond(rng.gen()),\n            13 => Key::ChainspecRegistry,\n            14 => Key::ChecksumRegistry,\n            15 => Key::BidAddr(rng.gen()),\n            16 => Key::SmartContract(rng.gen()),\n            17 => Key::AddressableEntity(rng.gen()),\n            18 => Key::ByteCode(rng.gen()),\n            19 => Key::Message(rng.gen()),\n            20 => Key::NamedKey(NamedKeyAddr::new_named_key_entry(rng.gen(), rng.gen())),\n            21 => Key::BlockGlobal(rng.gen()),\n            22 => Key::BalanceHold(rng.gen()),\n            23 => Key::EntryPoint(rng.gen()),\n            24 => Key::State(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nmod serde_helpers {\n    use super::*;\n\n    #[derive(Serialize)]\n    pub(super) enum BinarySerHelper<'a> {\n        Account(&'a AccountHash),\n        Hash(&'a HashAddr),\n        URef(&'a URef),\n        Transfer(&'a TransferAddr),\n        #[serde(with = \"crate::serde_helpers::deploy_hash_as_array\")]\n        DeployInfo(&'a DeployHash),\n        EraInfo(&'a EraId),\n        Balance(&'a URefAddr),\n        Bid(&'a AccountHash),\n        Withdraw(&'a AccountHash),\n        Dictionary(&'a HashAddr),\n        SystemEntityRegistry,\n        EraSummary,\n        Unbond(&'a AccountHash),\n        ChainspecRegistry,\n        ChecksumRegistry,\n        BidAddr(&'a BidAddr),\n        Package(&'a PackageAddr),\n        AddressableEntity(&'a EntityAddr),\n        ByteCode(&'a ByteCodeAddr),\n        Message(&'a MessageAddr),\n        NamedKey(&'a NamedKeyAddr),\n        BlockGlobal(&'a BlockGlobalAddr),\n        BalanceHold(&'a BalanceHoldAddr),\n        EntryPoint(&'a EntryPointAddr),\n        State(&'a EntityAddr),\n        RewardsHandling,\n    }\n\n    #[derive(Deserialize)]\n    pub(super) enum BinaryDeserHelper {\n        Account(AccountHash),\n        Hash(HashAddr),\n        URef(URef),\n        Transfer(TransferAddr),\n        #[serde(with = \"crate::serde_helpers::deploy_hash_as_array\")]\n        DeployInfo(DeployHash),\n        EraInfo(EraId),\n        Balance(URefAddr),\n        Bid(AccountHash),\n        Withdraw(AccountHash),\n        Dictionary(DictionaryAddr),\n        SystemEntityRegistry,\n        EraSummary,\n        Unbond(AccountHash),\n        ChainspecRegistry,\n        ChecksumRegistry,\n        BidAddr(BidAddr),\n        Package(PackageAddr),\n        AddressableEntity(EntityAddr),\n        ByteCode(ByteCodeAddr),\n        Message(MessageAddr),\n        NamedKey(NamedKeyAddr),\n        BlockGlobal(BlockGlobalAddr),\n        BalanceHold(BalanceHoldAddr),\n        EntryPoint(EntryPointAddr),\n        State(EntityAddr),\n        RewardsHandling,\n    }\n\n    impl<'a> From<&'a Key> for BinarySerHelper<'a> {\n        fn from(key: &'a Key) -> Self {\n            match key {\n                Key::Account(account_hash) => BinarySerHelper::Account(account_hash),\n                Key::Hash(hash_addr) => BinarySerHelper::Hash(hash_addr),\n                Key::URef(uref) => BinarySerHelper::URef(uref),\n                Key::Transfer(transfer_v1_addr) => BinarySerHelper::Transfer(transfer_v1_addr),\n                Key::DeployInfo(deploy_hash) => BinarySerHelper::DeployInfo(deploy_hash),\n                Key::EraInfo(era_id) => BinarySerHelper::EraInfo(era_id),\n                Key::Balance(uref_addr) => BinarySerHelper::Balance(uref_addr),\n                Key::Bid(account_hash) => BinarySerHelper::Bid(account_hash),\n                Key::Withdraw(account_hash) => BinarySerHelper::Withdraw(account_hash),\n                Key::Dictionary(addr) => BinarySerHelper::Dictionary(addr),\n                Key::SystemEntityRegistry => BinarySerHelper::SystemEntityRegistry,\n                Key::EraSummary => BinarySerHelper::EraSummary,\n                Key::Unbond(account_hash) => BinarySerHelper::Unbond(account_hash),\n                Key::ChainspecRegistry => BinarySerHelper::ChainspecRegistry,\n                Key::ChecksumRegistry => BinarySerHelper::ChecksumRegistry,\n                Key::BidAddr(bid_addr) => BinarySerHelper::BidAddr(bid_addr),\n                Key::Message(message_addr) => BinarySerHelper::Message(message_addr),\n                Key::SmartContract(package_addr) => BinarySerHelper::Package(package_addr),\n                Key::AddressableEntity(entity_addr) => {\n                    BinarySerHelper::AddressableEntity(entity_addr)\n                }\n                Key::ByteCode(byte_code_addr) => BinarySerHelper::ByteCode(byte_code_addr),\n                Key::NamedKey(named_key_addr) => BinarySerHelper::NamedKey(named_key_addr),\n                Key::BlockGlobal(addr) => BinarySerHelper::BlockGlobal(addr),\n                Key::BalanceHold(balance_hold_addr) => {\n                    BinarySerHelper::BalanceHold(balance_hold_addr)\n                }\n                Key::EntryPoint(entry_point_addr) => BinarySerHelper::EntryPoint(entry_point_addr),\n                Key::State(entity_addr) => BinarySerHelper::State(entity_addr),\n                Key::RewardsHandling => BinarySerHelper::RewardsHandling,\n            }\n        }\n    }\n\n    impl From<BinaryDeserHelper> for Key {\n        fn from(helper: BinaryDeserHelper) -> Self {\n            match helper {\n                BinaryDeserHelper::Account(account_hash) => Key::Account(account_hash),\n                BinaryDeserHelper::Hash(hash_addr) => Key::Hash(hash_addr),\n                BinaryDeserHelper::URef(uref) => Key::URef(uref),\n                BinaryDeserHelper::Transfer(transfer_v1_addr) => Key::Transfer(transfer_v1_addr),\n                BinaryDeserHelper::DeployInfo(deploy_hash) => Key::DeployInfo(deploy_hash),\n                BinaryDeserHelper::EraInfo(era_id) => Key::EraInfo(era_id),\n                BinaryDeserHelper::Balance(uref_addr) => Key::Balance(uref_addr),\n                BinaryDeserHelper::Bid(account_hash) => Key::Bid(account_hash),\n                BinaryDeserHelper::Withdraw(account_hash) => Key::Withdraw(account_hash),\n                BinaryDeserHelper::Dictionary(addr) => Key::Dictionary(addr),\n                BinaryDeserHelper::SystemEntityRegistry => Key::SystemEntityRegistry,\n                BinaryDeserHelper::EraSummary => Key::EraSummary,\n                BinaryDeserHelper::Unbond(account_hash) => Key::Unbond(account_hash),\n                BinaryDeserHelper::ChainspecRegistry => Key::ChainspecRegistry,\n                BinaryDeserHelper::ChecksumRegistry => Key::ChecksumRegistry,\n                BinaryDeserHelper::BidAddr(bid_addr) => Key::BidAddr(bid_addr),\n                BinaryDeserHelper::Message(message_addr) => Key::Message(message_addr),\n                BinaryDeserHelper::Package(package_addr) => Key::SmartContract(package_addr),\n                BinaryDeserHelper::AddressableEntity(entity_addr) => {\n                    Key::AddressableEntity(entity_addr)\n                }\n                BinaryDeserHelper::ByteCode(byte_code_addr) => Key::ByteCode(byte_code_addr),\n                BinaryDeserHelper::NamedKey(named_key_addr) => Key::NamedKey(named_key_addr),\n                BinaryDeserHelper::BlockGlobal(addr) => Key::BlockGlobal(addr),\n                BinaryDeserHelper::BalanceHold(balance_hold_addr) => {\n                    Key::BalanceHold(balance_hold_addr)\n                }\n                BinaryDeserHelper::EntryPoint(entry_point_addr) => {\n                    Key::EntryPoint(entry_point_addr)\n                }\n                BinaryDeserHelper::State(entity_addr) => Key::State(entity_addr),\n                BinaryDeserHelper::RewardsHandling => Key::RewardsHandling,\n            }\n        }\n    }\n}\n\nimpl Serialize for Key {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            serde_helpers::BinarySerHelper::from(self).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for Key {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_key = String::deserialize(deserializer)?;\n            Key::from_formatted_str(&formatted_key).map_err(SerdeError::custom)\n        } else {\n            let binary_helper = serde_helpers::BinaryDeserHelper::deserialize(deserializer)?;\n            Ok(Key::from(binary_helper))\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::string::ToString;\n\n    use super::*;\n    use crate::{\n        account::ACCOUNT_HASH_FORMATTED_STRING_PREFIX,\n        bytesrepr::{Error, FromBytes},\n        uref::UREF_FORMATTED_STRING_PREFIX,\n        AccessRights, BlockTime, URef,\n    };\n\n    const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = \"transfer-\";\n    const ENTITY_PREFIX: &str = \"entity-\";\n    const ACCOUNT_ENTITY_PREFIX: &str = \"account-\";\n\n    const BYTE_CODE_PREFIX: &str = \"byte-code-\";\n    const EMPTY_PREFIX: &str = \"empty-\";\n\n    const ACCOUNT_KEY: Key = Key::Account(AccountHash::new([42; 32]));\n    const HASH_KEY: Key = Key::Hash([42; 32]);\n    const UREF_KEY: Key = Key::URef(URef::new([42; 32], AccessRights::READ));\n    const TRANSFER_KEY: Key = Key::Transfer(TransferAddr::new([42; 32]));\n    const DEPLOY_INFO_KEY: Key = Key::DeployInfo(DeployHash::from_raw([42; 32]));\n    const ERA_INFO_KEY: Key = Key::EraInfo(EraId::new(42));\n    const BALANCE_KEY: Key = Key::Balance([42; 32]);\n    const BID_KEY: Key = Key::Bid(AccountHash::new([42; 32]));\n    const UNIFIED_BID_KEY: Key = Key::BidAddr(BidAddr::legacy([42; 32]));\n    const VALIDATOR_BID_KEY: Key = Key::BidAddr(BidAddr::new_validator_addr([2; 32]));\n    const DELEGATOR_BID_KEY: Key =\n        Key::BidAddr(BidAddr::new_delegator_account_addr(([2; 32], [9; 32])));\n    const WITHDRAW_KEY: Key = Key::Withdraw(AccountHash::new([42; 32]));\n    const DICTIONARY_KEY: Key = Key::Dictionary([42; 32]);\n    const SYSTEM_ENTITY_REGISTRY_KEY: Key = Key::SystemEntityRegistry;\n    const ERA_SUMMARY_KEY: Key = Key::EraSummary;\n    const UNBOND_KEY: Key = Key::Unbond(AccountHash::new([42; 32]));\n    const CHAINSPEC_REGISTRY_KEY: Key = Key::ChainspecRegistry;\n    const CHECKSUM_REGISTRY_KEY: Key = Key::ChecksumRegistry;\n    const PACKAGE_KEY: Key = Key::SmartContract([42; 32]);\n    const ADDRESSABLE_ENTITY_SYSTEM_KEY: Key =\n        Key::AddressableEntity(EntityAddr::new_system([42; 32]));\n    const ADDRESSABLE_ENTITY_ACCOUNT_KEY: Key =\n        Key::AddressableEntity(EntityAddr::new_account([42; 32]));\n    const ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY: Key =\n        Key::AddressableEntity(EntityAddr::new_smart_contract([42; 32]));\n    const BYTE_CODE_EMPTY_KEY: Key = Key::ByteCode(ByteCodeAddr::Empty);\n    const BYTE_CODE_V1_WASM_KEY: Key = Key::ByteCode(ByteCodeAddr::V1CasperWasm([42; 32]));\n    const MESSAGE_TOPIC_KEY: Key = Key::Message(MessageAddr::new_topic_addr(\n        EntityAddr::SmartContract([42; 32]),\n        TopicNameHash::new([42; 32]),\n    ));\n    const MESSAGE_KEY: Key = Key::Message(MessageAddr::new_message_addr(\n        EntityAddr::SmartContract([42; 32]),\n        TopicNameHash::new([2; 32]),\n        15,\n    ));\n    const NAMED_KEY: Key = Key::NamedKey(NamedKeyAddr::new_named_key_entry(\n        EntityAddr::new_smart_contract([42; 32]),\n        [43; 32],\n    ));\n    const BLOCK_TIME_KEY: Key = Key::BlockGlobal(BlockGlobalAddr::BlockTime);\n    const BLOCK_MESSAGE_COUNT_KEY: Key = Key::BlockGlobal(BlockGlobalAddr::MessageCount);\n    // const STATE_KEY: Key = Key::State(EntityAddr::new_contract_entity_addr([42; 32]));\n    const BALANCE_HOLD: Key =\n        Key::BalanceHold(BalanceHoldAddr::new_gas([42; 32], BlockTime::new(100)));\n    const STATE_KEY: Key = Key::State(EntityAddr::new_smart_contract([42; 32]));\n    const KEYS: &[Key] = &[\n        ACCOUNT_KEY,\n        HASH_KEY,\n        UREF_KEY,\n        TRANSFER_KEY,\n        DEPLOY_INFO_KEY,\n        ERA_INFO_KEY,\n        BALANCE_KEY,\n        BID_KEY,\n        WITHDRAW_KEY,\n        DICTIONARY_KEY,\n        SYSTEM_ENTITY_REGISTRY_KEY,\n        ERA_SUMMARY_KEY,\n        UNBOND_KEY,\n        CHAINSPEC_REGISTRY_KEY,\n        CHECKSUM_REGISTRY_KEY,\n        UNIFIED_BID_KEY,\n        VALIDATOR_BID_KEY,\n        DELEGATOR_BID_KEY,\n        PACKAGE_KEY,\n        ADDRESSABLE_ENTITY_SYSTEM_KEY,\n        ADDRESSABLE_ENTITY_ACCOUNT_KEY,\n        ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY,\n        BYTE_CODE_EMPTY_KEY,\n        BYTE_CODE_V1_WASM_KEY,\n        MESSAGE_TOPIC_KEY,\n        MESSAGE_KEY,\n        NAMED_KEY,\n        BLOCK_TIME_KEY,\n        BLOCK_MESSAGE_COUNT_KEY,\n        BALANCE_HOLD,\n        STATE_KEY,\n    ];\n    const HEX_STRING: &str = \"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\";\n    const TOPIC_NAME_HEX_STRING: &str =\n        \"0202020202020202020202020202020202020202020202020202020202020202\";\n    const MESSAGE_INDEX_HEX_STRING: &str = \"f\";\n    const UNIFIED_HEX_STRING: &str =\n        \"002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\";\n    const VALIDATOR_HEX_STRING: &str =\n        \"010202020202020202020202020202020202020202020202020202020202020202\";\n    const DELEGATOR_HEX_STRING: &str =\n        \"0202020202020202020202020202020202020202020202020202020202020202020909090909090909090909090909090909090909090909090909090909090909\";\n\n    fn test_readable(right: AccessRights, is_true: bool) {\n        assert_eq!(right.is_readable(), is_true)\n    }\n\n    #[test]\n    fn test_is_readable() {\n        test_readable(AccessRights::READ, true);\n        test_readable(AccessRights::READ_ADD, true);\n        test_readable(AccessRights::READ_WRITE, true);\n        test_readable(AccessRights::READ_ADD_WRITE, true);\n        test_readable(AccessRights::ADD, false);\n        test_readable(AccessRights::ADD_WRITE, false);\n        test_readable(AccessRights::WRITE, false);\n    }\n\n    fn test_writable(right: AccessRights, is_true: bool) {\n        assert_eq!(right.is_writeable(), is_true)\n    }\n\n    #[test]\n    fn test_is_writable() {\n        test_writable(AccessRights::WRITE, true);\n        test_writable(AccessRights::READ_WRITE, true);\n        test_writable(AccessRights::ADD_WRITE, true);\n        test_writable(AccessRights::READ, false);\n        test_writable(AccessRights::ADD, false);\n        test_writable(AccessRights::READ_ADD, false);\n        test_writable(AccessRights::READ_ADD_WRITE, true);\n    }\n\n    fn test_addable(right: AccessRights, is_true: bool) {\n        assert_eq!(right.is_addable(), is_true)\n    }\n\n    #[test]\n    fn test_is_addable() {\n        test_addable(AccessRights::ADD, true);\n        test_addable(AccessRights::READ_ADD, true);\n        test_addable(AccessRights::READ_WRITE, false);\n        test_addable(AccessRights::ADD_WRITE, true);\n        test_addable(AccessRights::READ, false);\n        test_addable(AccessRights::WRITE, false);\n        test_addable(AccessRights::READ_ADD_WRITE, true);\n    }\n\n    #[test]\n    fn should_display_key() {\n        assert_eq!(\n            format!(\"{}\", ACCOUNT_KEY),\n            format!(\"Key::Account({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", HASH_KEY),\n            format!(\"Key::Hash({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", UREF_KEY),\n            format!(\"Key::URef({}, READ)\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", TRANSFER_KEY),\n            format!(\"Key::Transfer({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", DEPLOY_INFO_KEY),\n            format!(\"Key::DeployInfo({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", ERA_INFO_KEY),\n            \"Key::EraInfo(era 42)\".to_string()\n        );\n        assert_eq!(\n            format!(\"{}\", BALANCE_KEY),\n            format!(\"Key::Balance({})\", HEX_STRING)\n        );\n        assert_eq!(format!(\"{}\", BID_KEY), format!(\"Key::Bid({})\", HEX_STRING));\n        assert_eq!(\n            format!(\"{}\", UNIFIED_BID_KEY),\n            format!(\"Key::BidAddr({})\", UNIFIED_HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", VALIDATOR_BID_KEY),\n            format!(\"Key::BidAddr({})\", VALIDATOR_HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", DELEGATOR_BID_KEY),\n            format!(\"Key::BidAddr({})\", DELEGATOR_HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", WITHDRAW_KEY),\n            format!(\"Key::Withdraw({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", DICTIONARY_KEY),\n            format!(\"Key::Dictionary({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", SYSTEM_ENTITY_REGISTRY_KEY),\n            format!(\n                \"Key::SystemEntityRegistry({})\",\n                base16::encode_lower(&PADDING_BYTES)\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", ERA_SUMMARY_KEY),\n            format!(\"Key::EraSummary({})\", base16::encode_lower(&PADDING_BYTES))\n        );\n        assert_eq!(\n            format!(\"{}\", UNBOND_KEY),\n            format!(\"Key::Unbond({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", CHAINSPEC_REGISTRY_KEY),\n            format!(\n                \"Key::ChainspecRegistry({})\",\n                base16::encode_lower(&PADDING_BYTES)\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", CHECKSUM_REGISTRY_KEY),\n            format!(\n                \"Key::ChecksumRegistry({})\",\n                base16::encode_lower(&PADDING_BYTES),\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", PACKAGE_KEY),\n            format!(\"Key::Package({})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", ADDRESSABLE_ENTITY_SYSTEM_KEY),\n            format!(\"Key::AddressableEntity(system-{})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", ADDRESSABLE_ENTITY_ACCOUNT_KEY),\n            format!(\"Key::AddressableEntity(account-{})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY),\n            format!(\"Key::AddressableEntity(contract-{})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", BYTE_CODE_EMPTY_KEY),\n            format!(\n                \"Key::ByteCode(byte-code-empty-{})\",\n                base16::encode_lower(&[0u8; 32])\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", BYTE_CODE_V1_WASM_KEY),\n            format!(\"Key::ByteCode(byte-code-v1-wasm-{})\", HEX_STRING)\n        );\n        assert_eq!(\n            format!(\"{}\", MESSAGE_TOPIC_KEY),\n            format!(\n                \"Key::Message(entity-contract-{}-{})\",\n                HEX_STRING, HEX_STRING\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", MESSAGE_KEY),\n            format!(\n                \"Key::Message(entity-contract-{}-{}-{})\",\n                HEX_STRING, TOPIC_NAME_HEX_STRING, MESSAGE_INDEX_HEX_STRING\n            )\n        );\n\n        assert_eq!(\n            format!(\"{}\", STATE_KEY),\n            format!(\n                \"Key::State(entity-contract-{})\",\n                base16::encode_lower(&[42; 32])\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", BLOCK_TIME_KEY),\n            format!(\n                \"Key::BlockGlobal({}-{})\",\n                BlockGlobalAddr::BlockTime,\n                base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES)\n            )\n        );\n        assert_eq!(\n            format!(\"{}\", BLOCK_MESSAGE_COUNT_KEY),\n            format!(\n                \"Key::BlockGlobal({}-{})\",\n                BlockGlobalAddr::MessageCount,\n                base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES)\n            )\n        );\n    }\n\n    #[test]\n    fn abuse_vec_key() {\n        // Prefix is 2^32-1 = shouldn't allocate that much\n        let bytes: Vec<u8> = vec![255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9];\n        let res: Result<(Vec<Key>, &[u8]), _> = FromBytes::from_bytes(&bytes);\n        assert_eq!(\n            res.expect_err(\"should fail\"),\n            Error::EarlyEndOfStream,\n            \"length prefix says 2^32-1, but there's not enough data in the stream\"\n        );\n\n        // Prefix is 2^32-2 = shouldn't allocate that much\n        let bytes: Vec<u8> = vec![255, 255, 255, 254, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9];\n        let res: Result<(Vec<Key>, &[u8]), _> = FromBytes::from_bytes(&bytes);\n        assert_eq!(\n            res.expect_err(\"should fail\"),\n            Error::EarlyEndOfStream,\n            \"length prefix says 2^32-2, but there's not enough data in the stream\"\n        );\n\n        // Valid prefix but not enough data in the stream\n        let bytes: Vec<u8> = vec![0, 0, 0, 254, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9];\n        let res: Result<(Vec<Key>, &[u8]), _> = FromBytes::from_bytes(&bytes);\n        assert_eq!(\n            res.expect_err(\"should fail\"),\n            Error::EarlyEndOfStream,\n            \"length prefix says 254, but there's not enough data in the stream\"\n        );\n    }\n\n    #[test]\n    fn check_key_account_getters() {\n        let account = [42; 32];\n        let account_hash = AccountHash::new(account);\n        let key1 = Key::Account(account_hash);\n        assert_eq!(key1.into_account(), Some(account_hash));\n        assert!(key1.into_entity_hash_addr().is_some());\n        assert!(key1.as_uref().is_none());\n    }\n\n    #[test]\n    fn check_key_hash_getters() {\n        let hash = [42; KEY_HASH_LENGTH];\n        let key1 = Key::Hash(hash);\n        assert!(key1.into_account().is_none());\n        assert_eq!(key1.into_hash_addr(), Some(hash));\n        assert!(key1.as_uref().is_none());\n    }\n\n    #[test]\n    fn check_entity_key_getters() {\n        let hash = [42; KEY_HASH_LENGTH];\n        let key1 = Key::contract_entity_key(AddressableEntityHash::new(hash));\n        assert!(key1.into_account().is_none());\n        assert_eq!(key1.into_entity_hash_addr(), Some(hash));\n        assert!(key1.as_uref().is_none());\n    }\n\n    #[test]\n    fn check_package_key_getters() {\n        let hash = [42; KEY_HASH_LENGTH];\n        let key1 = Key::SmartContract(hash);\n        assert!(key1.into_account().is_none());\n        assert_eq!(key1.into_package_addr(), Some(hash));\n        assert!(key1.as_uref().is_none());\n    }\n\n    #[test]\n    fn check_key_uref_getters() {\n        let uref = URef::new([42; 32], AccessRights::READ_ADD_WRITE);\n        let key1 = Key::URef(uref);\n        assert!(key1.into_account().is_none());\n        assert!(key1.into_entity_hash_addr().is_none());\n        assert_eq!(key1.as_uref(), Some(&uref));\n    }\n\n    #[test]\n    fn key_max_serialized_length() {\n        let mut got_max = false;\n        for key in KEYS {\n            let expected = Key::max_serialized_length();\n            let actual = key.serialized_length();\n            assert!(\n                actual <= expected,\n                \"key too long {} expected {} actual {}\",\n                key,\n                expected,\n                actual\n            );\n            if actual == Key::max_serialized_length() {\n                got_max = true;\n            }\n        }\n        assert!(\n            got_max,\n            \"None of the Key variants has a serialized_length equal to \\\n            Key::max_serialized_length(), so Key::max_serialized_length() should be reduced\"\n        );\n    }\n\n    #[test]\n    fn should_parse_legacy_bid_key_from_string() {\n        let account_hash = AccountHash([1; 32]);\n        let legacy_bid_key = Key::Bid(account_hash);\n        let original_string = legacy_bid_key.to_formatted_string();\n\n        let parsed_bid_key =\n            Key::from_formatted_str(&original_string).expect(\"{string} (key = {key:?})\");\n        if let Key::Bid(parsed_account_hash) = parsed_bid_key {\n            assert_eq!(parsed_account_hash, account_hash,);\n            assert_eq!(legacy_bid_key, parsed_bid_key);\n\n            let translated_string = parsed_bid_key.to_formatted_string();\n            assert_eq!(original_string, translated_string);\n        } else {\n            panic!(\"should have account hash\");\n        }\n    }\n\n    #[test]\n    fn should_parse_legacy_unified_bid_key_from_string() {\n        let legacy_bid_addr = BidAddr::legacy([1; 32]);\n        let legacy_bid_key = Key::BidAddr(legacy_bid_addr);\n        assert_eq!(legacy_bid_addr.tag(), BidAddrTag::Unified,);\n\n        let original_string = legacy_bid_key.to_formatted_string();\n        let parsed_key =\n            Key::from_formatted_str(&original_string).expect(\"{string} (key = {key:?})\");\n        let parsed_bid_addr = parsed_key.as_bid_addr().expect(\"must have bid addr\");\n        assert!(parsed_key.is_bid_addr_key());\n        assert_eq!(parsed_bid_addr.tag(), legacy_bid_addr.tag(),);\n        assert_eq!(*parsed_bid_addr, legacy_bid_addr);\n\n        let translated_string = parsed_key.to_formatted_string();\n        assert_eq!(original_string, translated_string);\n        assert_eq!(parsed_key.as_bid_addr(), legacy_bid_key.as_bid_addr(),);\n    }\n\n    #[test]\n    fn should_parse_validator_bid_key_from_string() {\n        let validator_bid_addr = BidAddr::new_validator_addr([1; 32]);\n        let validator_bid_key = Key::BidAddr(validator_bid_addr);\n        assert_eq!(validator_bid_addr.tag(), BidAddrTag::Validator,);\n\n        let original_string = validator_bid_key.to_formatted_string();\n        let parsed_key =\n            Key::from_formatted_str(&original_string).expect(\"{string} (key = {key:?})\");\n        let parsed_bid_addr = parsed_key.as_bid_addr().expect(\"must have bid addr\");\n        assert!(parsed_key.is_bid_addr_key());\n        assert_eq!(parsed_bid_addr.tag(), validator_bid_addr.tag(),);\n        assert_eq!(*parsed_bid_addr, validator_bid_addr,);\n\n        let translated_string = parsed_key.to_formatted_string();\n        assert_eq!(original_string, translated_string);\n        assert_eq!(parsed_key.as_bid_addr(), validator_bid_key.as_bid_addr(),);\n    }\n\n    #[test]\n    fn should_parse_delegator_bid_key_from_string() {\n        let delegator_bid_addr = BidAddr::new_delegator_account_addr(([1; 32], [9; 32]));\n        let delegator_bid_key = Key::BidAddr(delegator_bid_addr);\n        assert_eq!(delegator_bid_addr.tag(), BidAddrTag::DelegatedAccount);\n\n        let original_string = delegator_bid_key.to_formatted_string();\n\n        let parsed_key =\n            Key::from_formatted_str(&original_string).expect(\"{string} (key = {key:?})\");\n        let parsed_bid_addr = parsed_key.as_bid_addr().expect(\"must have bid addr\");\n        assert!(parsed_key.is_bid_addr_key());\n        assert_eq!(parsed_bid_addr.tag(), delegator_bid_addr.tag(),);\n        assert_eq!(*parsed_bid_addr, delegator_bid_addr,);\n\n        let translated_string = parsed_key.to_formatted_string();\n        assert_eq!(original_string, translated_string);\n        assert_eq!(parsed_key.as_bid_addr(), delegator_bid_key.as_bid_addr(),);\n    }\n\n    #[test]\n    fn should_parse_credit_bid_key_from_string() {\n        let credit_bid_addr = BidAddr::Credit {\n            validator: AccountHash::new([1; 32]),\n            era_id: 1.into(),\n        };\n        let delegator_bid_key = Key::BidAddr(credit_bid_addr);\n        assert_eq!(credit_bid_addr.tag(), BidAddrTag::Credit);\n\n        let original_string = delegator_bid_key.to_formatted_string();\n\n        let parsed_key =\n            Key::from_formatted_str(&original_string).expect(\"{string} (key = {key:?})\");\n        let parsed_bid_addr = parsed_key.as_bid_addr().expect(\"must have bid addr\");\n        assert!(parsed_key.is_bid_addr_key());\n        assert_eq!(parsed_bid_addr.tag(), credit_bid_addr.tag(),);\n        assert_eq!(*parsed_bid_addr, credit_bid_addr,);\n\n        let translated_string = parsed_key.to_formatted_string();\n        assert_eq!(original_string, translated_string);\n        assert_eq!(parsed_key.as_bid_addr(), delegator_bid_key.as_bid_addr());\n    }\n\n    #[test]\n    fn should_parse_key_from_str() {\n        for key in KEYS {\n            let string = key.to_formatted_string();\n            let parsed_key = Key::from_formatted_str(&string).expect(\"{string} (key = {key:?})\");\n            assert_eq!(parsed_key, *key, \"{string} (key = {key:?})\");\n        }\n    }\n\n    #[test]\n    fn should_fail_to_parse_key_from_str() {\n        assert!(\n            Key::from_formatted_str(ACCOUNT_HASH_FORMATTED_STRING_PREFIX)\n                .unwrap_err()\n                .to_string()\n                .starts_with(\"account-key from string error: \")\n        );\n        assert!(Key::from_formatted_str(HASH_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"hash-key from string error: \"));\n        assert!(Key::from_formatted_str(UREF_FORMATTED_STRING_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"uref-key from string error: \"));\n        assert!(\n            Key::from_formatted_str(TRANSFER_ADDR_FORMATTED_STRING_PREFIX)\n                .unwrap_err()\n                .to_string()\n                .starts_with(\"legacy-transfer-key from string error: \")\n        );\n        assert!(Key::from_formatted_str(DEPLOY_INFO_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"deploy-info-key from string error: \"));\n        assert!(Key::from_formatted_str(ERA_INFO_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"era-info-key from string error: \"));\n        assert!(Key::from_formatted_str(BALANCE_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"balance-key from string error: \"));\n        assert!(Key::from_formatted_str(BID_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"bid-key from string error: \"));\n        assert!(Key::from_formatted_str(WITHDRAW_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"withdraw-key from string error: \"));\n        assert!(Key::from_formatted_str(DICTIONARY_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"dictionary-key from string error: \"));\n        assert!(Key::from_formatted_str(SYSTEM_ENTITY_REGISTRY_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"system-contract-registry-key from string error: \"));\n        assert!(Key::from_formatted_str(ERA_SUMMARY_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"era-summary-key from string error\"));\n        assert!(Key::from_formatted_str(UNBOND_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"unbond-key from string error: \"));\n        assert!(Key::from_formatted_str(CHAINSPEC_REGISTRY_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"chainspec-registry-key from string error: \"));\n        assert!(Key::from_formatted_str(CHECKSUM_REGISTRY_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"checksum-registry-key from string error: \"));\n        let bid_addr_err = Key::from_formatted_str(BID_ADDR_PREFIX)\n            .unwrap_err()\n            .to_string();\n        assert!(\n            bid_addr_err.starts_with(\"bid-addr-key from string error: \"),\n            \"{}\",\n            bid_addr_err\n        );\n        assert!(Key::from_formatted_str(PACKAGE_PREFIX)\n            .unwrap_err()\n            .to_string()\n            .starts_with(\"package-key from string error: \"));\n\n        let error_string =\n            Key::from_formatted_str(&format!(\"{}{}\", ENTITY_PREFIX, ACCOUNT_ENTITY_PREFIX))\n                .unwrap_err()\n                .to_string();\n        assert!(error_string.starts_with(\"addressable-entity-key from string error: \"));\n        assert!(\n            Key::from_formatted_str(&format!(\"{}{}\", BYTE_CODE_PREFIX, EMPTY_PREFIX))\n                .unwrap_err()\n                .to_string()\n                .starts_with(\"byte-code-key from string error: \")\n        );\n        let invalid_prefix = \"a-0000000000000000000000000000000000000000000000000000000000000000\";\n        assert_eq!(\n            Key::from_formatted_str(invalid_prefix)\n                .unwrap_err()\n                .to_string(),\n            \"unknown prefix for key\"\n        );\n\n        let missing_hyphen_prefix =\n            \"hash0000000000000000000000000000000000000000000000000000000000000000\";\n        assert_eq!(\n            Key::from_formatted_str(missing_hyphen_prefix)\n                .unwrap_err()\n                .to_string(),\n            \"unknown prefix for key\"\n        );\n\n        let no_prefix = \"0000000000000000000000000000000000000000000000000000000000000000\";\n        assert_eq!(\n            Key::from_formatted_str(no_prefix).unwrap_err().to_string(),\n            \"unknown prefix for key\"\n        );\n\n        let balance_hold_err = Key::from_formatted_str(BALANCE_HOLD_PREFIX)\n            .unwrap_err()\n            .to_string();\n        assert!(\n            balance_hold_err.starts_with(\"balance-hold from string error: \"),\n            \"{}\",\n            bid_addr_err\n        );\n    }\n\n    #[test]\n    fn key_to_json() {\n        for key in KEYS.iter() {\n            assert_eq!(\n                serde_json::to_string(key).unwrap(),\n                format!(\"\\\"{}\\\"\", key.to_formatted_string())\n            );\n        }\n    }\n\n    #[test]\n    fn serialization_roundtrip_bincode() {\n        for key in KEYS {\n            let encoded = bincode::serialize(key).unwrap();\n            let decoded = bincode::deserialize(&encoded).unwrap();\n            assert_eq!(key, &decoded);\n        }\n    }\n\n    #[test]\n    fn key_tag_bytes_roundtrip() {\n        for key in KEYS {\n            let tag: KeyTag = key.tag();\n            bytesrepr::test_serialization_roundtrip(&tag);\n        }\n    }\n\n    #[test]\n    fn bytesrepr_serialization_roundtrip() {\n        bytesrepr::test_serialization_roundtrip(&ACCOUNT_KEY);\n        bytesrepr::test_serialization_roundtrip(&HASH_KEY);\n        bytesrepr::test_serialization_roundtrip(&UREF_KEY);\n        bytesrepr::test_serialization_roundtrip(&TRANSFER_KEY);\n        bytesrepr::test_serialization_roundtrip(&DEPLOY_INFO_KEY);\n        bytesrepr::test_serialization_roundtrip(&ERA_INFO_KEY);\n        bytesrepr::test_serialization_roundtrip(&BALANCE_KEY);\n        bytesrepr::test_serialization_roundtrip(&BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&WITHDRAW_KEY);\n        bytesrepr::test_serialization_roundtrip(&DICTIONARY_KEY);\n        // bytesrepr::test_serialization_roundtrip(&SYSTEM_CONTRACT_REGISTRY_KEY);\n        bytesrepr::test_serialization_roundtrip(&ERA_SUMMARY_KEY);\n        bytesrepr::test_serialization_roundtrip(&UNBOND_KEY);\n        bytesrepr::test_serialization_roundtrip(&CHAINSPEC_REGISTRY_KEY);\n        bytesrepr::test_serialization_roundtrip(&CHECKSUM_REGISTRY_KEY);\n        // bytesrepr::test_serialization_roundtrip(&UNIFIED_BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&VALIDATOR_BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&DELEGATOR_BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&PACKAGE_KEY);\n        bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SYSTEM_KEY);\n        bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_ACCOUNT_KEY);\n        bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY);\n        bytesrepr::test_serialization_roundtrip(&BYTE_CODE_EMPTY_KEY);\n        bytesrepr::test_serialization_roundtrip(&BYTE_CODE_V1_WASM_KEY);\n        bytesrepr::test_serialization_roundtrip(&MESSAGE_TOPIC_KEY);\n        bytesrepr::test_serialization_roundtrip(&MESSAGE_KEY);\n        bytesrepr::test_serialization_roundtrip(&NAMED_KEY);\n        bytesrepr::test_serialization_roundtrip(&STATE_KEY);\n    }\n\n    #[test]\n    fn serialization_roundtrip_json() {\n        for key in KEYS {\n            round_trip(key);\n        }\n\n        let zeros = [0; BLAKE2B_DIGEST_LENGTH];\n        let nines = [9; BLAKE2B_DIGEST_LENGTH];\n\n        round_trip(&Key::Account(AccountHash::new(zeros)));\n        round_trip(&Key::Hash(zeros));\n        round_trip(&Key::URef(URef::new(zeros, AccessRights::READ)));\n        round_trip(&Key::Transfer(TransferAddr::new(zeros)));\n        round_trip(&Key::DeployInfo(DeployHash::from_raw(zeros)));\n        round_trip(&Key::EraInfo(EraId::from(0)));\n        round_trip(&Key::Balance(URef::new(zeros, AccessRights::READ).addr()));\n        round_trip(&Key::Bid(AccountHash::new(zeros)));\n        round_trip(&Key::BidAddr(BidAddr::legacy(zeros)));\n        round_trip(&Key::BidAddr(BidAddr::new_validator_addr(zeros)));\n        round_trip(&Key::BidAddr(BidAddr::new_delegator_account_addr((\n            zeros, nines,\n        ))));\n        round_trip(&Key::Withdraw(AccountHash::new(zeros)));\n        round_trip(&Key::Dictionary(zeros));\n        round_trip(&Key::Unbond(AccountHash::new(zeros)));\n        round_trip(&Key::SmartContract(zeros));\n        round_trip(&Key::AddressableEntity(EntityAddr::new_system(zeros)));\n        round_trip(&Key::AddressableEntity(EntityAddr::new_account(zeros)));\n        round_trip(&Key::AddressableEntity(EntityAddr::new_smart_contract(\n            zeros,\n        )));\n        round_trip(&Key::ByteCode(ByteCodeAddr::Empty));\n        round_trip(&Key::ByteCode(ByteCodeAddr::V1CasperWasm(zeros)));\n        round_trip(&Key::Message(MessageAddr::new_topic_addr(\n            EntityAddr::new_smart_contract(zeros),\n            nines.into(),\n        )));\n        round_trip(&Key::Message(MessageAddr::new_message_addr(\n            EntityAddr::new_smart_contract(zeros),\n            nines.into(),\n            1,\n        )));\n        round_trip(&Key::NamedKey(NamedKeyAddr::default()));\n        round_trip(&Key::BlockGlobal(BlockGlobalAddr::BlockTime));\n        round_trip(&Key::BlockGlobal(BlockGlobalAddr::MessageCount));\n        round_trip(&Key::BlockGlobal(BlockGlobalAddr::ProtocolVersion));\n        round_trip(&Key::BlockGlobal(BlockGlobalAddr::AddressableEntity));\n        round_trip(&Key::BalanceHold(BalanceHoldAddr::default()));\n        round_trip(&Key::State(EntityAddr::new_system(zeros)));\n    }\n\n    #[test]\n    fn state_json_deserialization() {\n        let mut test_rng = TestRng::new();\n        let state_key = Key::State(EntityAddr::new_account(test_rng.gen()));\n        round_trip(&state_key);\n\n        let state_key = Key::State(EntityAddr::new_system(test_rng.gen()));\n        round_trip(&state_key);\n\n        let state_key = Key::State(EntityAddr::new_smart_contract(test_rng.gen()));\n        round_trip(&state_key);\n    }\n\n    #[test]\n    fn roundtrip() {\n        bytesrepr::test_serialization_roundtrip(&ACCOUNT_KEY);\n        bytesrepr::test_serialization_roundtrip(&HASH_KEY);\n        bytesrepr::test_serialization_roundtrip(&UREF_KEY);\n        bytesrepr::test_serialization_roundtrip(&TRANSFER_KEY);\n        bytesrepr::test_serialization_roundtrip(&DEPLOY_INFO_KEY);\n        bytesrepr::test_serialization_roundtrip(&ERA_INFO_KEY);\n        bytesrepr::test_serialization_roundtrip(&BALANCE_KEY);\n        bytesrepr::test_serialization_roundtrip(&BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&WITHDRAW_KEY);\n        bytesrepr::test_serialization_roundtrip(&DICTIONARY_KEY);\n        bytesrepr::test_serialization_roundtrip(&SYSTEM_ENTITY_REGISTRY_KEY);\n        bytesrepr::test_serialization_roundtrip(&ERA_SUMMARY_KEY);\n        bytesrepr::test_serialization_roundtrip(&UNBOND_KEY);\n        bytesrepr::test_serialization_roundtrip(&CHAINSPEC_REGISTRY_KEY);\n        bytesrepr::test_serialization_roundtrip(&CHECKSUM_REGISTRY_KEY);\n        bytesrepr::test_serialization_roundtrip(&UNIFIED_BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&VALIDATOR_BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&DELEGATOR_BID_KEY);\n        bytesrepr::test_serialization_roundtrip(&PACKAGE_KEY);\n        bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SYSTEM_KEY);\n        bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_ACCOUNT_KEY);\n        bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY);\n        bytesrepr::test_serialization_roundtrip(&BYTE_CODE_EMPTY_KEY);\n        bytesrepr::test_serialization_roundtrip(&BYTE_CODE_V1_WASM_KEY);\n        bytesrepr::test_serialization_roundtrip(&MESSAGE_TOPIC_KEY);\n        bytesrepr::test_serialization_roundtrip(&MESSAGE_KEY);\n        bytesrepr::test_serialization_roundtrip(&NAMED_KEY);\n    }\n\n    fn round_trip(key: &Key) {\n        let encoded = serde_json::to_value(key).unwrap();\n        let decoded = serde_json::from_value(encoded.clone())\n            .unwrap_or_else(|_| panic!(\"{} {}\", key, encoded));\n        assert_eq!(key, &decoded);\n    }\n}\n\n#[cfg(test)]\nmod proptest {\n    use crate::gens;\n    use proptest::prelude::*;\n\n    proptest! {\n        #[test]\n        fn test_json_roundtrip_for_bidaddr_key(key in gens::all_keys_arb()) {\n            let json_string = serde_json::to_string_pretty(&key).unwrap();\n            let decoded = serde_json::from_str(&json_string).unwrap();\n            assert_eq!(key, decoded);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/lib.rs",
    "content": "//! Types used to allow creation of Wasm contracts and tests for use on the Casper Platform.\n\n#![cfg_attr(\n    not(any(\n        feature = \"json-schema\",\n        feature = \"datasize\",\n        feature = \"std\",\n        feature = \"testing\",\n        test,\n    )),\n    no_std\n)]\n#![doc(html_root_url = \"https://docs.rs/casper-types/7.0.0\")]\n#![doc(\n    html_favicon_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png\",\n    html_logo_url = \"https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png\"\n)]\n#![cfg_attr(docsrs, feature(doc_auto_cfg))]\n\n#[cfg_attr(not(test), macro_use)]\nextern crate alloc;\n\nextern crate core;\n\nmod access_rights;\npub mod account;\npub mod addressable_entity;\npub mod api_error;\nmod auction_state;\nmod block;\nmod block_time;\nmod byte_code;\npub mod bytesrepr;\n#[cfg(any(feature = \"std\", test))]\nmod chainspec;\npub mod checksummed_hex;\nmod cl_type;\nmod cl_value;\npub mod contract_messages;\nmod contract_wasm;\npub mod contracts;\npub mod crypto;\nmod deploy_info;\nmod digest;\nmod display_iter;\nmod era_id;\npub mod execution;\n#[cfg(any(feature = \"std-fs-io\", test))]\npub mod file_utils;\nmod gas;\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens;\npub mod global_state;\n#[cfg(feature = \"json-schema\")]\nmod json_pretty_printer;\nmod key;\nmod motes;\nmod package;\nmod peers_map;\nmod phase;\nmod protocol_version;\npub mod runtime_footprint;\nmod semver;\npub(crate) mod serde_helpers;\nmod stored_value;\npub mod system;\nmod tagged;\n#[cfg(any(feature = \"testing\", test))]\npub mod testing;\nmod timestamp;\nmod transaction;\nmod transfer;\nmod transfer_result;\nmod uint;\nmod uref;\nmod validator_change;\n\n#[cfg(all(feature = \"std\", any(feature = \"std-fs-io\", test)))]\nuse libc::{c_long, sysconf, _SC_PAGESIZE};\n#[cfg(feature = \"std\")]\nuse once_cell::sync::Lazy;\n\npub use crate::uint::{UIntParseError, U128, U256, U512};\n\npub use access_rights::{\n    AccessRights, ContextAccessRights, GrantedAccess, ACCESS_RIGHTS_SERIALIZED_LENGTH,\n};\npub use account::Account;\n#[doc(inline)]\npub use addressable_entity::{\n    AddressableEntity, AddressableEntityHash, ContractRuntimeTag, EntityAddr, EntityEntryPoint,\n    EntityKind, EntryPointAccess, EntryPointAddr, EntryPointPayment, EntryPointType,\n    EntryPointValue, EntryPoints, Parameter, Parameters, DEFAULT_ENTRY_POINT_NAME,\n};\n#[doc(inline)]\npub use api_error::ApiError;\n#[allow(deprecated)]\npub use auction_state::{AuctionState, JsonEraValidators, JsonValidatorWeights};\n#[cfg(all(feature = \"std\", feature = \"json-schema\"))]\npub use block::JsonBlockWithSignatures;\npub use block::{\n    AvailableBlockRange, Block, BlockBody, BlockBodyV1, BlockBodyV2, BlockGlobalAddr,\n    BlockGlobalAddrTag, BlockHash, BlockHashAndHeight, BlockHeader, BlockHeaderV1, BlockHeaderV2,\n    BlockHeaderWithSignatures, BlockHeaderWithSignaturesValidationError, BlockIdentifier,\n    BlockSignatures, BlockSignaturesMergeError, BlockSignaturesV1, BlockSignaturesV2,\n    BlockSyncStatus, BlockSynchronizerStatus, BlockV1, BlockV2, BlockValidationError,\n    BlockWithSignatures, ChainNameDigest, EraEnd, EraEndV1, EraEndV2, EraReport, FinalitySignature,\n    FinalitySignatureId, FinalitySignatureV1, FinalitySignatureV2, RewardedSignatures, Rewards,\n    SingleBlockRewardedSignatures,\n};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\npub use block::{TestBlockBuilder, TestBlockV1Builder};\npub use block_time::{BlockTime, HoldsEpoch, BLOCKTIME_SERIALIZED_LENGTH};\npub use byte_code::{ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind};\npub use cl_type::{named_key_type, CLType, CLTyped};\n#[cfg(feature = \"json-schema\")]\npub use cl_value::cl_value_to_json;\npub use cl_value::{\n    handle_stored_dictionary_value, CLTypeMismatch, CLValue, CLValueError, ChecksumRegistry,\n    DictionaryValue as CLValueDictionary, SystemHashRegistry,\n};\npub use global_state::Pointer;\n\n#[cfg(any(feature = \"std\", test))]\npub use chainspec::{\n    AccountConfig, AccountsConfig, ActivationPoint, AdministratorAccount, AuctionCosts,\n    BrTableCost, Chainspec, ChainspecRawBytes, ChainspecRegistry, ConsensusProtocolName,\n    ControlFlowCosts, CoreConfig, DelegatorConfig, DeployConfig, FeeHandling, GenesisAccount,\n    GenesisConfig, GenesisValidator, GlobalStateUpdate, GlobalStateUpdateConfig,\n    GlobalStateUpdateError, HandlePaymentCosts, HighwayConfig, HoldBalanceHandling, HostFunction,\n    HostFunctionCost, HostFunctionCostsV1, HostFunctionCostsV2, HostFunctionV2,\n    LegacyRequiredFinality, MessageLimits, MintCosts, NetworkConfig, NextUpgrade, OpcodeCosts,\n    PricingHandling, ProtocolConfig, ProtocolUpgradeConfig, RefundHandling, RewardsHandling,\n    StandardPaymentCosts, StorageCosts, SystemConfig, TransactionConfig, TransactionLaneDefinition,\n    TransactionV1Config, VacancyConfig, ValidatorConfig, WasmConfig, WasmV1Config, WasmV2Config,\n    DEFAULT_BASELINE_MOTES_AMOUNT, DEFAULT_GAS_HOLD_INTERVAL, DEFAULT_HOST_FUNCTION_NEW_DICTIONARY,\n    DEFAULT_MINIMUM_BID_AMOUNT, DEFAULT_REFUND_HANDLING, REWARDS_HANDLING_RATIO_TAG,\n};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\npub use chainspec::{\n    DEFAULT_ADD_BID_COST, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST,\n    DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, DEFAULT_CONTROL_FLOW_BR_IF_OPCODE,\n    DEFAULT_CONTROL_FLOW_BR_OPCODE, DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER,\n    DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE,\n    DEFAULT_CONTROL_FLOW_CALL_OPCODE, DEFAULT_CONTROL_FLOW_DROP_OPCODE,\n    DEFAULT_CONTROL_FLOW_ELSE_OPCODE, DEFAULT_CONTROL_FLOW_END_OPCODE,\n    DEFAULT_CONTROL_FLOW_IF_OPCODE, DEFAULT_CONTROL_FLOW_LOOP_OPCODE,\n    DEFAULT_CONTROL_FLOW_RETURN_OPCODE, DEFAULT_CONTROL_FLOW_SELECT_OPCODE,\n    DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, DEFAULT_DELEGATE_COST, DEFAULT_DIV_COST,\n    DEFAULT_FEE_HANDLING, DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST,\n    DEFAULT_INTEGER_COMPARISON_COST, DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, DEFAULT_LOAD_COST,\n    DEFAULT_LOCAL_COST, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MAX_STACK_HEIGHT,\n    DEFAULT_MIN_TRANSFER_MOTES, DEFAULT_MUL_COST, DEFAULT_NEW_DICTIONARY_COST, DEFAULT_NOP_COST,\n    DEFAULT_STORE_COST, DEFAULT_TRANSFER_COST, DEFAULT_UNREACHABLE_COST, DEFAULT_WASM_MAX_MEMORY,\n};\npub use contract_wasm::{ContractWasm, ContractWasmHash};\n#[doc(inline)]\npub use contracts::{Contract, NamedKeys};\npub use crypto::*;\npub use deploy_info::DeployInfo;\npub use digest::{\n    ChunkWithProof, ChunkWithProofVerificationError, Digest, DigestError, IndexedMerkleProof,\n    MerkleConstructionError, MerkleVerificationError,\n};\npub use display_iter::DisplayIter;\npub use era_id::EraId;\npub use gas::Gas;\n#[cfg(feature = \"json-schema\")]\npub use json_pretty_printer::json_pretty_print;\n#[doc(inline)]\npub use key::{\n    DictionaryAddr, FromStrError as KeyFromStrError, HashAddr, Key, KeyTag, PackageAddr,\n    BLAKE2B_DIGEST_LENGTH, DICTIONARY_ITEM_KEY_MAX_LENGTH, KEY_DICTIONARY_LENGTH, KEY_HASH_LENGTH,\n};\npub use motes::Motes;\n#[doc(inline)]\npub use package::{\n    EntityVersion, EntityVersionKey, EntityVersions, Group, Groups, Package, PackageHash,\n    PackageStatus, ENTITY_INITIAL_VERSION,\n};\npub use peers_map::{PeerEntry, Peers};\npub use phase::{Phase, PHASE_SERIALIZED_LENGTH};\npub use protocol_version::{ProtocolVersion, VersionCheckResult};\npub use runtime_footprint::RuntimeFootprint;\npub use semver::{ParseSemVerError, SemVer, SEM_VER_SERIALIZED_LENGTH};\npub use stored_value::{\n    GlobalStateIdentifier, StoredValue, StoredValueTag, TypeMismatch as StoredValueTypeMismatch,\n};\npub use system::mint::METHOD_TRANSFER;\npub use tagged::Tagged;\n#[cfg(any(feature = \"std\", test))]\npub use timestamp::serde_option_time_diff;\npub use timestamp::{TimeDiff, Timestamp};\n#[cfg(any(feature = \"std\", test))]\npub use transaction::{calculate_lane_id_for_deploy, calculate_transaction_lane, GasLimited};\npub use transaction::{\n    AddressableEntityIdentifier, Approval, ApprovalsHash, Deploy, DeployDecodeFromJsonError,\n    DeployError, DeployExcessiveSizeError, DeployHash, DeployHeader, DeployId,\n    ExecutableDeployItem, ExecutableDeployItemIdentifier, ExecutionInfo, InitiatorAddr,\n    InvalidDeploy, InvalidTransaction, InvalidTransactionV1, NamedArg, PackageIdentifier,\n    PricingMode, PricingModeError, RuntimeArgs, Transaction, TransactionArgs,\n    TransactionEntryPoint, TransactionHash, TransactionId, TransactionInvocationTarget,\n    TransactionRuntimeParams, TransactionScheduling, TransactionTarget, TransactionV1,\n    TransactionV1DecodeFromJsonError, TransactionV1Error, TransactionV1ExcessiveSizeError,\n    TransactionV1Hash, TransactionV1Payload, TransferTarget,\n};\npub use transfer::{\n    Transfer, TransferAddr, TransferFromStrError, TransferV1, TransferV2, TRANSFER_ADDR_LENGTH,\n};\npub use transfer_result::{TransferResult, TransferredTo};\npub use uref::{\n    FromStrError as URefFromStrError, URef, URefAddr, UREF_ADDR_LENGTH, UREF_SERIALIZED_LENGTH,\n};\npub use validator_change::ValidatorChange;\n/// The lane identifier for the native mint interaction.\npub const MINT_LANE_ID: u8 = 0;\n/// The lane identifier for the native auction interaction.\npub const AUCTION_LANE_ID: u8 = 1;\n/// The lane identifier for the install/upgrade auction interaction.\npub const INSTALL_UPGRADE_LANE_ID: u8 = 2;\n/// The lane identifier for large wasms.\npub(crate) const LARGE_WASM_LANE_ID: u8 = 3;\n/// The lane identifier for medium wasms.\npub(crate) const MEDIUM_WASM_LANE_ID: u8 = 4;\n/// The lane identifier for small wasms.\npub(crate) const SMALL_WASM_LANE_ID: u8 = 5;\n\n/// OS page size.\n#[cfg(feature = \"std\")]\npub static OS_PAGE_SIZE: Lazy<usize> = Lazy::new(|| {\n    /// Sensible default for many if not all systems.\n    const DEFAULT_PAGE_SIZE: usize = 4096;\n\n    #[cfg(any(feature = \"std-fs-io\", test))]\n    // https://www.gnu.org/software/libc/manual/html_node/Sysconf.html\n    let value: c_long = unsafe { sysconf(_SC_PAGESIZE) };\n\n    #[cfg(not(any(feature = \"std-fs-io\", test)))]\n    let value = 0;\n\n    if value <= 0 {\n        DEFAULT_PAGE_SIZE\n    } else {\n        value as usize\n    }\n});\n"
  },
  {
    "path": "types/src/motes.rs",
    "content": "//! The `motes` module is used for working with Motes.\n\nuse alloc::vec::Vec;\nuse core::fmt;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Gas, U512,\n};\n\n/// A struct representing a number of `Motes`.\n#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct Motes(U512);\n\nimpl Motes {\n    /// The maximum value of `Motes`.\n    pub const MAX: Motes = Motes(U512::MAX);\n\n    /// Constructs a new `Motes`.\n    pub fn new<T: Into<U512>>(value: T) -> Self {\n        Motes(value.into())\n    }\n\n    /// Constructs a new `Motes` with value `0`.\n    pub const fn zero() -> Self {\n        Motes(U512::zero())\n    }\n\n    /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred.\n    pub fn checked_add(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_add(rhs.value()).map(Self::new)\n    }\n\n    /// Checked integer subtraction. Computes `self - rhs`, returning `None` if underflow occurred.\n    pub fn checked_sub(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_sub(rhs.value()).map(Self::new)\n    }\n\n    /// Checked integer multiplication. Computes `self * rhs`, returning `None` if overflow\n    /// occurred.\n    pub fn checked_mul(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_mul(rhs.value()).map(Self::new)\n    }\n\n    /// Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`.\n    pub fn checked_div(&self, rhs: Self) -> Option<Self> {\n        self.0.checked_div(rhs.value()).map(Self::new)\n    }\n\n    /// Returns the inner `U512` value.\n    pub fn value(&self) -> U512 {\n        self.0\n    }\n\n    /// Converts the given `gas` to `Motes` by multiplying them by `conv_rate`.\n    ///\n    /// Returns `None` if an arithmetic overflow occurred.\n    pub fn from_gas(gas: Gas, conv_rate: u8) -> Option<Self> {\n        gas.value()\n            .checked_mul(U512::from(conv_rate))\n            .map(Self::new)\n    }\n\n    /// Converts the given `amount` to `Motes` by multiplying them by `price`.\n    ///\n    /// Returns `None` if an arithmetic overflow occurred.\n    pub fn from_price(amount: U512, price: u8) -> Option<Self> {\n        amount.checked_mul(U512::from(price)).map(Self::new)\n    }\n}\n\nimpl fmt::Display for Motes {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"{:?}\", self.0)\n    }\n}\n\nimpl ToBytes for Motes {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for Motes {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, remainder) = FromBytes::from_bytes(bytes)?;\n        Ok((Motes(value), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::U512;\n\n    use crate::{Gas, Motes};\n\n    #[test]\n    fn should_be_able_to_get_instance_of_motes() {\n        let initial_value = 1;\n        let motes = Motes::new(initial_value);\n        assert_eq!(\n            initial_value,\n            motes.value().as_u64(),\n            \"should have equal value\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_compare_two_instances_of_motes() {\n        let left_motes = Motes::new(1);\n        let right_motes = Motes::new(1);\n        assert_eq!(left_motes, right_motes, \"should be equal\");\n        let right_motes = Motes::new(2);\n        assert_ne!(left_motes, right_motes, \"should not be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_add_two_instances_of_motes() {\n        let left_motes = Motes::new(1);\n        let right_motes = Motes::new(1);\n        let expected_motes = Motes::new(2);\n        assert_eq!(\n            left_motes.checked_add(right_motes),\n            Some(expected_motes),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_subtract_two_instances_of_motes() {\n        let left_motes = Motes::new(1);\n        let right_motes = Motes::new(1);\n        let expected_motes = Motes::new(0);\n        assert_eq!(\n            left_motes.checked_sub(right_motes),\n            Some(expected_motes),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_multiply_two_instances_of_motes() {\n        let left_motes = Motes::new(100);\n        let right_motes = Motes::new(10);\n        let expected_motes = Motes::new(1000);\n        assert_eq!(\n            left_motes.checked_mul(right_motes),\n            Some(expected_motes),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_divide_two_instances_of_motes() {\n        let left_motes = Motes::new(1000);\n        let right_motes = Motes::new(100);\n        let expected_motes = Motes::new(10);\n        assert_eq!(\n            left_motes.checked_div(right_motes),\n            Some(expected_motes),\n            \"should be equal\"\n        )\n    }\n\n    #[test]\n    fn should_be_able_to_convert_from_motes() {\n        let gas = Gas::new(100);\n        let motes = Motes::from_gas(gas, 10).expect(\"should have value\");\n        let expected_motes = Motes::new(1000);\n        assert_eq!(motes, expected_motes, \"should be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_default() {\n        let motes = Motes::default();\n        let expected_motes = Motes::new(0);\n        assert_eq!(motes, expected_motes, \"should be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_compare_relative_value() {\n        let left_motes = Motes::new(100);\n        let right_motes = Motes::new(10);\n        assert!(left_motes > right_motes, \"should be gt\");\n        let right_motes = Motes::new(100);\n        assert!(left_motes >= right_motes, \"should be gte\");\n        assert!(left_motes <= right_motes, \"should be lte\");\n        let left_motes = Motes::new(10);\n        assert!(left_motes < right_motes, \"should be lt\");\n    }\n\n    #[test]\n    fn should_default() {\n        let left_motes = Motes::new(0);\n        let right_motes = Motes::default();\n        assert_eq!(left_motes, right_motes, \"should be equal\");\n        let u512 = U512::zero();\n        assert_eq!(left_motes.value(), u512, \"should be equal\");\n    }\n\n    #[test]\n    fn should_support_checked_mul_from_gas() {\n        let gas = Gas::new(U512::MAX);\n        let conv_rate = 10;\n        let maybe = Motes::from_gas(gas, conv_rate);\n        assert!(maybe.is_none(), \"should be none due to overflow\");\n    }\n}\n"
  },
  {
    "path": "types/src/package.rs",
    "content": "//! Module containing the Package and associated types for addressable entities.\n\nuse alloc::{\n    collections::{BTreeMap, BTreeSet},\n    format,\n    string::String,\n    vec::Vec,\n};\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n};\n\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\nuse rand::{distributions::Standard, prelude::Distribution, Rng};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\nuse crate::{\n    addressable_entity::{Error, FromStrError},\n    bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH},\n    checksummed_hex,\n    crypto::{self, PublicKey},\n    uref::URef,\n    CLType, CLTyped, EntityAddr, HashAddr, BLAKE2B_DIGEST_LENGTH, KEY_HASH_LENGTH,\n};\n\nconst PACKAGE_STRING_PREFIX: &str = \"package-\";\n\n/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`.\n#[derive(Debug)]\npub struct TryFromSliceForPackageHashError(());\n\nimpl Display for TryFromSliceForPackageHashError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"failed to retrieve from slice\")\n    }\n}\n\n/// A (labelled) \"user group\". Each method of a versioned contract may be\n/// associated with one or more user groups which are allowed to call it.\n#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Group(String);\n\nimpl Group {\n    /// Basic constructor\n    pub fn new<T: Into<String>>(s: T) -> Self {\n        Group(s.into())\n    }\n\n    /// Retrieves underlying name.\n    pub fn value(&self) -> &str {\n        &self.0\n    }\n}\n\nimpl From<Group> for String {\n    fn from(group: Group) -> Self {\n        group.0\n    }\n}\n\nimpl ToBytes for Group {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.value().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Group {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        String::from_bytes(bytes).map(|(label, bytes)| (Group(label), bytes))\n    }\n}\n\n/// Automatically incremented value for a contract version within a major `ProtocolVersion`.\npub type EntityVersion = u32;\n\n/// Within each discrete major `ProtocolVersion`, entity version resets to this value.\npub const ENTITY_INITIAL_VERSION: EntityVersion = 1;\n\n/// Major element of `ProtocolVersion` a `EntityVersion` is compatible with.\npub type ProtocolVersionMajor = u32;\n\n/// Major element of `ProtocolVersion` combined with `EntityVersion`.\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Hash)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct EntityVersionKey {\n    /// Major element of `ProtocolVersion` a `ContractVersion` is compatible with.\n    protocol_version_major: ProtocolVersionMajor,\n    /// Automatically incremented value for a contract version within a major `ProtocolVersion`.\n    entity_version: EntityVersion,\n}\n\nimpl EntityVersionKey {\n    /// Returns a new instance of ContractVersionKey with provided values.\n    pub fn new(\n        protocol_version_major: ProtocolVersionMajor,\n        entity_version: EntityVersion,\n    ) -> Self {\n        Self {\n            protocol_version_major,\n            entity_version,\n        }\n    }\n\n    /// Returns the major element of the protocol version this contract is compatible with.\n    pub fn protocol_version_major(self) -> ProtocolVersionMajor {\n        self.protocol_version_major\n    }\n\n    /// Returns the contract version within the protocol major version.\n    pub fn entity_version(self) -> EntityVersion {\n        self.entity_version\n    }\n}\n\nimpl From<EntityVersionKey> for (ProtocolVersionMajor, EntityVersion) {\n    fn from(entity_version_key: EntityVersionKey) -> Self {\n        (\n            entity_version_key.protocol_version_major,\n            entity_version_key.entity_version,\n        )\n    }\n}\n\nimpl ToBytes for EntityVersionKey {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        ENTITY_VERSION_KEY_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.protocol_version_major.write_bytes(writer)?;\n        self.entity_version.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for EntityVersionKey {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (protocol_version_major, remainder) = ProtocolVersionMajor::from_bytes(bytes)?;\n        let (entity_version, remainder) = EntityVersion::from_bytes(remainder)?;\n        Ok((\n            EntityVersionKey {\n                protocol_version_major,\n                entity_version,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl Display for EntityVersionKey {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{}.{}\", self.protocol_version_major, self.entity_version)\n    }\n}\n\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\nimpl Distribution<EntityVersionKey> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> EntityVersionKey {\n        EntityVersionKey {\n            protocol_version_major: rng.gen(),\n            entity_version: rng.gen(),\n        }\n    }\n}\n\n/// Serialized length of `EntityVersionKey`.\npub const ENTITY_VERSION_KEY_SERIALIZED_LENGTH: usize =\n    U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH;\n\n/// Collection of entity versions.\n#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(transparent, deny_unknown_fields)]\npub struct EntityVersions(\n    #[serde(with = \"BTreeMapToArray::<EntityVersionKey, EntityAddr, EntityVersionLabels>\")]\n    BTreeMap<EntityVersionKey, EntityAddr>,\n);\n\nimpl EntityVersions {\n    /// Constructs a new, empty `EntityVersions`.\n    pub const fn new() -> Self {\n        EntityVersions(BTreeMap::new())\n    }\n\n    /// Returns an iterator over the `AddressableEntityHash`s (i.e. the map's values).\n    pub fn contract_hashes(&self) -> impl Iterator<Item = &EntityAddr> {\n        self.0.values()\n    }\n\n    /// Returns the `AddressableEntityHash` under the key\n    pub fn get(&self, key: &EntityVersionKey) -> Option<&EntityAddr> {\n        self.0.get(key)\n    }\n\n    /// Retrieve the first entity version key if it exists\n    pub fn maybe_first(&mut self) -> Option<(EntityVersionKey, EntityAddr)> {\n        if let Some((entity_version_key, entity_hash)) = self.0.iter().next() {\n            Some((*entity_version_key, *entity_hash))\n        } else {\n            None\n        }\n    }\n\n    /// The number of versions present in the package.\n    pub fn version_count(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns the latest entity version key if it exists.\n    pub fn latest(&self) -> Option<&EntityAddr> {\n        let (_, value) = self.0.last_key_value()?;\n        Some(value)\n    }\n\n    /// Returns an iterator over the `AddressableEntityHash`s (i.e. the map's values).\n    pub fn iter_entries(&self) -> impl Iterator<Item = (&EntityVersionKey, &EntityAddr)> {\n        self.0.iter()\n    }\n}\n\nimpl ToBytes for EntityVersions {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for EntityVersions {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (versions, remainder) = BTreeMap::<EntityVersionKey, EntityAddr>::from_bytes(bytes)?;\n        Ok((EntityVersions(versions), remainder))\n    }\n}\n\nimpl From<BTreeMap<EntityVersionKey, EntityAddr>> for EntityVersions {\n    fn from(value: BTreeMap<EntityVersionKey, EntityAddr>) -> Self {\n        EntityVersions(value)\n    }\n}\n\nstruct EntityVersionLabels;\n\nimpl KeyValueLabels for EntityVersionLabels {\n    const KEY: &'static str = \"entity_version_key\";\n    const VALUE: &'static str = \"entity_addr\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for EntityVersionLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"EntityVersionAndEntityAddr\");\n}\n\n/// Collection of named groups.\n#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(transparent, deny_unknown_fields)]\npub struct Groups(\n    #[serde(with = \"BTreeMapToArray::<Group, BTreeSet::<URef>, GroupLabels>\")]\n    pub(crate)  BTreeMap<Group, BTreeSet<URef>>,\n);\n\nimpl Groups {\n    /// Constructs a new, empty `Groups`.\n    pub const fn new() -> Self {\n        Groups(BTreeMap::new())\n    }\n\n    /// Inserts a named group.\n    ///\n    /// If the map did not have this name present, `None` is returned.  If the map did have this\n    /// name present, its collection of `URef`s is overwritten, and the collection is returned.\n    pub fn insert(&mut self, name: Group, urefs: BTreeSet<URef>) -> Option<BTreeSet<URef>> {\n        self.0.insert(name, urefs)\n    }\n\n    /// Returns `true` if the named group exists in the collection.\n    pub fn contains(&self, name: &Group) -> bool {\n        self.0.contains_key(name)\n    }\n\n    /// Returns a reference to the collection of `URef`s under the given `name` if any.\n    pub fn get(&self, name: &Group) -> Option<&BTreeSet<URef>> {\n        self.0.get(name)\n    }\n\n    /// Returns a mutable reference to the collection of `URef`s under the given `name` if any.\n    pub fn get_mut(&mut self, name: &Group) -> Option<&mut BTreeSet<URef>> {\n        self.0.get_mut(name)\n    }\n\n    /// Returns the number of named groups.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if there are no named groups.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Returns an iterator over the `Key`s (i.e. the map's values).\n    pub fn keys(&self) -> impl Iterator<Item = &BTreeSet<URef>> {\n        self.0.values()\n    }\n\n    /// Returns the total number of `URef`s contained in all the groups.\n    pub fn total_urefs(&self) -> usize {\n        self.0.values().map(|urefs| urefs.len()).sum()\n    }\n}\n\nimpl ToBytes for Groups {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for Groups {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (groups, remainder) = BTreeMap::<Group, BTreeSet<URef>>::from_bytes(bytes)?;\n        Ok((Groups(groups), remainder))\n    }\n}\n\nstruct GroupLabels;\n\nimpl KeyValueLabels for GroupLabels {\n    const KEY: &'static str = \"group_name\";\n    const VALUE: &'static str = \"group_users\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for GroupLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"NamedUserGroup\");\n}\n\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\nimpl From<BTreeMap<Group, BTreeSet<URef>>> for Groups {\n    fn from(value: BTreeMap<Group, BTreeSet<URef>>) -> Self {\n        Groups(value)\n    }\n}\n\n/// A newtype wrapping a `HashAddr` which references a [`Package`] in the global state.\n#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"The hex-encoded address of the Package.\")\n)]\npub struct PackageHash(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))] HashAddr,\n);\n\nimpl PackageHash {\n    /// Constructs a new `PackageHash` from the raw bytes of the package hash.\n    pub const fn new(value: HashAddr) -> PackageHash {\n        PackageHash(value)\n    }\n\n    /// Returns the raw bytes of the entity hash as an array.\n    pub fn value(&self) -> HashAddr {\n        self.0\n    }\n\n    /// Returns the raw bytes of the entity hash as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `PackageHash` for users getting and putting.\n    pub fn to_formatted_string(self) -> String {\n        format!(\"{}{}\", PACKAGE_STRING_PREFIX, base16::encode_lower(&self.0),)\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a\n    /// `PackageHash`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let hex_addr = input\n            .strip_prefix(PACKAGE_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n\n        let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?;\n        Ok(PackageHash(bytes))\n    }\n\n    /// Parses a `PublicKey` and outputs the corresponding account hash.\n    pub fn from_public_key(\n        public_key: &PublicKey,\n        blake2b_hash_fn: impl Fn(Vec<u8>) -> [u8; BLAKE2B_DIGEST_LENGTH],\n    ) -> Self {\n        const SYSTEM_LOWERCASE: &str = \"system\";\n        const ED25519_LOWERCASE: &str = \"ed25519\";\n        const SECP256K1_LOWERCASE: &str = \"secp256k1\";\n\n        let algorithm_name = match public_key {\n            PublicKey::System => SYSTEM_LOWERCASE,\n            PublicKey::Ed25519(_) => ED25519_LOWERCASE,\n            PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE,\n        };\n        let public_key_bytes: Vec<u8> = public_key.into();\n\n        // Prepare preimage based on the public key parameters.\n        let preimage = {\n            let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1);\n            data.extend(algorithm_name.as_bytes());\n            data.push(0);\n            data.extend(public_key_bytes);\n            data\n        };\n        // Hash the preimage data using blake2b256 and return it.\n        let digest = blake2b_hash_fn(preimage);\n        Self::new(digest)\n    }\n}\n\nimpl Display for PackageHash {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        write!(f, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for PackageHash {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        write!(f, \"PackageHash({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for PackageHash {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(KEY_HASH_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for PackageHash {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.extend_from_slice(&self.0);\n        Ok(())\n    }\n}\n\nimpl FromBytes for PackageHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, rem) = FromBytes::from_bytes(bytes)?;\n        Ok((PackageHash::new(bytes), rem))\n    }\n}\n\nimpl From<[u8; 32]> for PackageHash {\n    fn from(bytes: [u8; 32]) -> Self {\n        PackageHash(bytes)\n    }\n}\n\nimpl Serialize for PackageHash {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for PackageHash {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            PackageHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = HashAddr::deserialize(deserializer)?;\n            Ok(PackageHash(bytes))\n        }\n    }\n}\n\nimpl AsRef<[u8]> for PackageHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl TryFrom<&[u8]> for PackageHash {\n    type Error = TryFromSliceForPackageHashError;\n\n    fn try_from(bytes: &[u8]) -> Result<Self, TryFromSliceForPackageHashError> {\n        HashAddr::try_from(bytes)\n            .map(PackageHash::new)\n            .map_err(|_| TryFromSliceForPackageHashError(()))\n    }\n}\n\nimpl TryFrom<&Vec<u8>> for PackageHash {\n    type Error = TryFromSliceForPackageHashError;\n\n    fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {\n        HashAddr::try_from(bytes as &[u8])\n            .map(PackageHash::new)\n            .map_err(|_| TryFromSliceForPackageHashError(()))\n    }\n}\n\nimpl From<&PublicKey> for PackageHash {\n    fn from(public_key: &PublicKey) -> Self {\n        PackageHash::from_public_key(public_key, crypto::blake2b)\n    }\n}\n\n/// A enum to determine the lock status of the package.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum PackageStatus {\n    /// The package is locked and cannot be versioned.\n    Locked,\n    /// The package is unlocked and can be versioned.\n    Unlocked,\n}\n\nimpl PackageStatus {\n    /// Create a new status flag based on a boolean value\n    pub fn new(is_locked: bool) -> Self {\n        if is_locked {\n            PackageStatus::Locked\n        } else {\n            PackageStatus::Unlocked\n        }\n    }\n}\n\nimpl Default for PackageStatus {\n    fn default() -> Self {\n        Self::Unlocked\n    }\n}\n\nimpl ToBytes for PackageStatus {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        match self {\n            PackageStatus::Unlocked => result.append(&mut false.to_bytes()?),\n            PackageStatus::Locked => result.append(&mut true.to_bytes()?),\n        }\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            PackageStatus::Unlocked => false.serialized_length(),\n            PackageStatus::Locked => true.serialized_length(),\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            PackageStatus::Locked => writer.push(u8::from(true)),\n            PackageStatus::Unlocked => writer.push(u8::from(false)),\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for PackageStatus {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (val, bytes) = bool::from_bytes(bytes)?;\n        let status = PackageStatus::new(val);\n        Ok((status, bytes))\n    }\n}\n\n/// Entity definition, metadata, and security container.\n#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct Package {\n    /// All versions (enabled & disabled).\n    versions: EntityVersions,\n    /// Collection of disabled entity versions. The runtime will not permit disabled entity\n    /// versions to be executed.\n    disabled_versions: BTreeSet<EntityVersionKey>,\n    /// Mapping maintaining the set of URefs associated with each \"user group\". This can be used to\n    /// control access to methods in a particular version of the entity. A method is callable by\n    /// any context which \"knows\" any of the URefs associated with the method's user group.\n    groups: Groups,\n    /// A flag that determines whether a entity is locked\n    lock_status: PackageStatus,\n}\n\nimpl CLTyped for Package {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl Package {\n    /// Create new `Package` (with no versions) from given access key.\n    pub fn new(\n        versions: EntityVersions,\n        disabled_versions: BTreeSet<EntityVersionKey>,\n        groups: Groups,\n        lock_status: PackageStatus,\n    ) -> Self {\n        Package {\n            versions,\n            disabled_versions,\n            groups,\n            lock_status,\n        }\n    }\n\n    /// Enable the entity version corresponding to the given hash (if it exists).\n    pub fn enable_version(&mut self, entity_addr: EntityAddr) -> Result<(), Error> {\n        let entity_version_key = self\n            .find_entity_version_key_by_hash(&entity_addr)\n            .copied()\n            .ok_or(Error::EntityNotFound)?;\n\n        self.disabled_versions.remove(&entity_version_key);\n\n        Ok(())\n    }\n\n    /// Get the mutable group definitions for this entity.\n    pub fn groups_mut(&mut self) -> &mut Groups {\n        &mut self.groups\n    }\n\n    /// Get the group definitions for this entity.\n    pub fn groups(&self) -> &Groups {\n        &self.groups\n    }\n\n    /// Adds new group to this entity.\n    pub fn add_group(&mut self, group: Group, urefs: BTreeSet<URef>) {\n        let v = self.groups.0.entry(group).or_default();\n        v.extend(urefs)\n    }\n\n    /// Lookup the entity hash for a given entity version (if present)\n    pub fn lookup_entity_hash(&self, entity_version_key: EntityVersionKey) -> Option<&EntityAddr> {\n        self.versions.0.get(&entity_version_key)\n    }\n\n    /// Checks if the given entity version exists.\n    pub fn is_version_missing(&self, entity_version_key: EntityVersionKey) -> bool {\n        !self.versions.0.contains_key(&entity_version_key)\n    }\n\n    /// Checks if the given entity version exists and is available for use.\n    pub fn is_version_enabled(&self, entity_version_key: EntityVersionKey) -> bool {\n        !self.is_version_missing(entity_version_key)\n            && !self.disabled_versions.contains(&entity_version_key)\n    }\n\n    /// Returns `true` if the given entity hash exists and is enabled.\n    pub fn is_entity_enabled(&self, entity_hash: &EntityAddr) -> bool {\n        match self.find_entity_version_key_by_hash(entity_hash) {\n            Some(version_key) => !self.disabled_versions.contains(version_key),\n            None => false,\n        }\n    }\n\n    /// Insert a new entity version; the next sequential version number will be issued.\n    pub fn insert_entity_version(\n        &mut self,\n        protocol_version_major: ProtocolVersionMajor,\n        entity_hash: EntityAddr,\n    ) -> EntityVersionKey {\n        let contract_version = self.next_entity_version_for(protocol_version_major);\n        let key = EntityVersionKey::new(protocol_version_major, contract_version);\n        self.versions.0.insert(key, entity_hash);\n        key\n    }\n\n    /// Disable the entity version corresponding to the given hash (if it exists).\n    pub fn disable_entity_version(&mut self, entity_hash: EntityAddr) -> Result<(), Error> {\n        let entity_version_key = self\n            .versions\n            .0\n            .iter()\n            .filter_map(|(k, v)| if *v == entity_hash { Some(*k) } else { None })\n            .next()\n            .ok_or(Error::EntityNotFound)?;\n\n        if !self.disabled_versions.contains(&entity_version_key) {\n            self.disabled_versions.insert(entity_version_key);\n        }\n\n        Ok(())\n    }\n\n    fn find_entity_version_key_by_hash(\n        &self,\n        entity_hash: &EntityAddr,\n    ) -> Option<&EntityVersionKey> {\n        self.versions\n            .0\n            .iter()\n            .filter_map(|(k, v)| if v == entity_hash { Some(k) } else { None })\n            .next()\n    }\n\n    /// Returns reference to all of this entity's versions.\n    pub fn versions(&self) -> &EntityVersions {\n        &self.versions\n    }\n\n    /// Returns all of this entity's enabled entity versions.\n    pub fn enabled_versions(&self) -> EntityVersions {\n        let mut ret = EntityVersions::new();\n        for version in &self.versions.0 {\n            if !self.is_version_enabled(*version.0) {\n                continue;\n            }\n            ret.0.insert(*version.0, *version.1);\n        }\n        ret\n    }\n\n    /// Returns mutable reference to all of this entity's versions (enabled and disabled).\n    pub fn versions_mut(&mut self) -> &mut EntityVersions {\n        &mut self.versions\n    }\n\n    /// Consumes the object and returns all of this entity's versions (enabled and disabled).\n    pub fn take_versions(self) -> EntityVersions {\n        self.versions\n    }\n\n    /// Returns all of this entity's disabled versions.\n    pub fn disabled_versions(&self) -> &BTreeSet<EntityVersionKey> {\n        &self.disabled_versions\n    }\n\n    /// Returns mut reference to all of this entity's disabled versions.\n    pub fn disabled_versions_mut(&mut self) -> &mut BTreeSet<EntityVersionKey> {\n        &mut self.disabled_versions\n    }\n\n    /// Removes a group from this entity (if it exists).\n    pub fn remove_group(&mut self, group: &Group) -> bool {\n        self.groups.0.remove(group).is_some()\n    }\n\n    /// Gets the next available entity version for the given protocol version\n    pub fn next_entity_version_for(&self, protocol_version: ProtocolVersionMajor) -> EntityVersion {\n        let current_version = self\n            .versions\n            .0\n            .keys()\n            .rev()\n            .find_map(|&entity_version_key| {\n                if entity_version_key.protocol_version_major() == protocol_version {\n                    Some(entity_version_key.entity_version())\n                } else {\n                    None\n                }\n            })\n            .unwrap_or(0);\n\n        current_version + 1\n    }\n\n    pub fn current_entity_version_for(\n        &self,\n        protocol_version: ProtocolVersionMajor,\n    ) -> EntityVersionKey {\n        let current_version = self\n            .enabled_versions()\n            .0\n            .keys()\n            .rev()\n            .find_map(|&entity_version_key| {\n                if entity_version_key.protocol_version_major() == protocol_version {\n                    Some(entity_version_key.entity_version())\n                } else {\n                    None\n                }\n            })\n            .unwrap_or(0);\n\n        EntityVersionKey::new(protocol_version, current_version)\n    }\n\n    /// Return the entity version key for the newest enabled entity version.\n    pub fn current_entity_version(&self) -> Option<EntityVersionKey> {\n        self.enabled_versions().0.keys().next_back().copied()\n    }\n\n    /// Return the entity hash for the newest enabled entity version.\n    pub fn current_entity_hash(&self) -> Option<EntityAddr> {\n        self.enabled_versions().0.values().next_back().copied()\n    }\n\n    /// Return the lock status of the entity package.\n    pub fn is_locked(&self) -> bool {\n        if self.versions.0.is_empty() {\n            return false;\n        }\n\n        match self.lock_status {\n            PackageStatus::Unlocked => false,\n            PackageStatus::Locked => true,\n        }\n    }\n\n    /// Return the package status itself\n    pub fn get_lock_status(&self) -> PackageStatus {\n        self.lock_status.clone()\n    }\n}\n\nimpl ToBytes for Package {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.versions.serialized_length()\n            + self.disabled_versions.serialized_length()\n            + self.groups.serialized_length()\n            + self.lock_status.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.versions().write_bytes(writer)?;\n        self.disabled_versions().write_bytes(writer)?;\n        self.groups().write_bytes(writer)?;\n        self.lock_status.write_bytes(writer)?;\n\n        Ok(())\n    }\n}\n\nimpl FromBytes for Package {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (versions, bytes) = EntityVersions::from_bytes(bytes)?;\n        let (disabled_versions, bytes) = BTreeSet::<EntityVersionKey>::from_bytes(bytes)?;\n        let (groups, bytes) = Groups::from_bytes(bytes)?;\n        let (lock_status, bytes) = PackageStatus::from_bytes(bytes)?;\n\n        let result = Package {\n            versions,\n            disabled_versions,\n            groups,\n            lock_status,\n        };\n\n        Ok((result, bytes))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use core::iter::FromIterator;\n\n    use super::*;\n    use crate::{\n        AccessRights, EntityEntryPoint, EntityVersionKey, EntryPointAccess, EntryPointPayment,\n        EntryPointType, Parameter, ProtocolVersion, URef,\n    };\n    use alloc::borrow::ToOwned;\n\n    const ENTITY_HASH_V1: EntityAddr = EntityAddr::new_smart_contract([42; 32]);\n    const ENTITY_HASH_V2: EntityAddr = EntityAddr::new_smart_contract([84; 32]);\n\n    fn make_package_with_two_versions() -> Package {\n        let mut package = Package::new(\n            EntityVersions::default(),\n            BTreeSet::new(),\n            Groups::default(),\n            PackageStatus::default(),\n        );\n\n        // add groups\n        {\n            let group_urefs = {\n                let mut ret = BTreeSet::new();\n                ret.insert(URef::new([1; 32], AccessRights::READ));\n                ret\n            };\n\n            package\n                .groups_mut()\n                .insert(Group::new(\"Group 1\"), group_urefs.clone());\n\n            package\n                .groups_mut()\n                .insert(Group::new(\"Group 2\"), group_urefs);\n        }\n\n        // add entry_points\n        let _entry_points = {\n            let mut ret = BTreeMap::new();\n            let entrypoint = EntityEntryPoint::new(\n                \"method0\".to_string(),\n                vec![],\n                CLType::U32,\n                EntryPointAccess::groups(&[\"Group 2\"]),\n                EntryPointType::Caller,\n                EntryPointPayment::Caller,\n            );\n            ret.insert(entrypoint.name().to_owned(), entrypoint);\n            let entrypoint = EntityEntryPoint::new(\n                \"method1\".to_string(),\n                vec![Parameter::new(\"Foo\", CLType::U32)],\n                CLType::U32,\n                EntryPointAccess::groups(&[\"Group 1\"]),\n                EntryPointType::Caller,\n                EntryPointPayment::Caller,\n            );\n            ret.insert(entrypoint.name().to_owned(), entrypoint);\n            ret\n        };\n\n        let protocol_version = ProtocolVersion::V1_0_0;\n\n        let v1 = package.insert_entity_version(protocol_version.value().major, ENTITY_HASH_V1);\n        let v2 = package.insert_entity_version(protocol_version.value().major, ENTITY_HASH_V2);\n        assert!(v2 > v1);\n\n        package\n    }\n\n    #[test]\n    fn next_entity_version() {\n        let major = 1;\n        let mut package = Package::new(\n            EntityVersions::default(),\n            BTreeSet::default(),\n            Groups::default(),\n            PackageStatus::default(),\n        );\n        assert_eq!(package.next_entity_version_for(major), 1);\n\n        let next_version =\n            package.insert_entity_version(major, EntityAddr::SmartContract([123; 32]));\n        assert_eq!(next_version, EntityVersionKey::new(major, 1));\n        assert_eq!(package.next_entity_version_for(major), 2);\n        let next_version_2 =\n            package.insert_entity_version(major, EntityAddr::SmartContract([124; 32]));\n        assert_eq!(next_version_2, EntityVersionKey::new(major, 2));\n\n        let major = 2;\n        assert_eq!(package.next_entity_version_for(major), 1);\n        let next_version_3 =\n            package.insert_entity_version(major, EntityAddr::SmartContract([42; 32]));\n        assert_eq!(next_version_3, EntityVersionKey::new(major, 1));\n    }\n\n    #[test]\n    fn roundtrip_serialization() {\n        let package = make_package_with_two_versions();\n        let bytes = package.to_bytes().expect(\"should serialize\");\n        let (decoded_package, rem) = Package::from_bytes(&bytes).expect(\"should deserialize\");\n        assert_eq!(package, decoded_package);\n        assert_eq!(rem.len(), 0);\n    }\n\n    #[test]\n    fn should_remove_group() {\n        let mut package = make_package_with_two_versions();\n\n        assert!(!package.remove_group(&Group::new(\"Non-existent group\")));\n        assert!(package.remove_group(&Group::new(\"Group 1\")));\n        assert!(!package.remove_group(&Group::new(\"Group 1\"))); // Group no longer exists\n    }\n\n    #[test]\n    fn should_disable_and_enable_entity_version() {\n        const ENTITY_HASH: EntityAddr = EntityAddr::new_smart_contract([123; 32]);\n\n        let mut package = make_package_with_two_versions();\n\n        assert!(\n            !package.is_entity_enabled(&ENTITY_HASH),\n            \"nonexisting entity should return false\"\n        );\n\n        assert_eq!(\n            package.current_entity_version(),\n            Some(EntityVersionKey::new(1, 2))\n        );\n        assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2));\n\n        assert_eq!(\n            package.versions(),\n            &EntityVersions::from(BTreeMap::from_iter([\n                (EntityVersionKey::new(1, 1), ENTITY_HASH_V1),\n                (EntityVersionKey::new(1, 2), ENTITY_HASH_V2)\n            ])),\n        );\n        assert_eq!(\n            package.enabled_versions(),\n            EntityVersions::from(BTreeMap::from_iter([\n                (EntityVersionKey::new(1, 1), ENTITY_HASH_V1),\n                (EntityVersionKey::new(1, 2), ENTITY_HASH_V2)\n            ])),\n        );\n\n        assert!(!package.is_entity_enabled(&ENTITY_HASH));\n\n        assert_eq!(\n            package.disable_entity_version(ENTITY_HASH),\n            Err(Error::EntityNotFound),\n            \"should return entity not found error\"\n        );\n\n        assert!(\n            !package.is_entity_enabled(&ENTITY_HASH),\n            \"disabling missing entity shouldnt change outcome\"\n        );\n\n        let next_version = package.insert_entity_version(1, ENTITY_HASH);\n        assert!(\n            package.is_version_enabled(next_version),\n            \"version should exist and be enabled\"\n        );\n        assert!(package.is_entity_enabled(&ENTITY_HASH));\n\n        assert!(\n            package.is_entity_enabled(&ENTITY_HASH),\n            \"entity should be enabled\"\n        );\n\n        assert_eq!(\n            package.disable_entity_version(ENTITY_HASH),\n            Ok(()),\n            \"should be able to disable version\"\n        );\n        assert!(!package.is_entity_enabled(&ENTITY_HASH));\n\n        assert!(\n            !package.is_entity_enabled(&ENTITY_HASH),\n            \"entity should be disabled\"\n        );\n        // This was once true, but look up vs disable checking have been decoupled in 2.0\n        // assert_eq!(\n        //     package.lookup_entity_hash(next_version),\n        //     None,\n        //     \"should not return disabled entity version\"\n        // );\n        assert!(\n            !package.is_version_enabled(next_version),\n            \"version should not be enabled\"\n        );\n\n        assert_eq!(\n            package.current_entity_version(),\n            Some(EntityVersionKey::new(1, 2))\n        );\n        assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2));\n        assert_eq!(\n            package.versions(),\n            &EntityVersions::from(BTreeMap::from_iter([\n                (EntityVersionKey::new(1, 1), ENTITY_HASH_V1),\n                (EntityVersionKey::new(1, 2), ENTITY_HASH_V2),\n                (next_version, ENTITY_HASH),\n            ])),\n        );\n        assert_eq!(\n            package.enabled_versions(),\n            EntityVersions::from(BTreeMap::from_iter([\n                (EntityVersionKey::new(1, 1), ENTITY_HASH_V1),\n                (EntityVersionKey::new(1, 2), ENTITY_HASH_V2),\n            ])),\n        );\n        assert_eq!(\n            package.disabled_versions(),\n            &BTreeSet::from_iter([next_version]),\n        );\n\n        assert_eq!(\n            package.current_entity_version(),\n            Some(EntityVersionKey::new(1, 2))\n        );\n        assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2));\n\n        assert_eq!(\n            package.disable_entity_version(ENTITY_HASH_V2),\n            Ok(()),\n            \"should be able to disable version 2\"\n        );\n\n        assert_eq!(\n            package.enabled_versions(),\n            EntityVersions::from(BTreeMap::from_iter([(\n                EntityVersionKey::new(1, 1),\n                ENTITY_HASH_V1\n            ),])),\n        );\n\n        assert_eq!(\n            package.current_entity_version(),\n            Some(EntityVersionKey::new(1, 1))\n        );\n        assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V1));\n\n        assert_eq!(\n            package.disabled_versions(),\n            &BTreeSet::from_iter([next_version, EntityVersionKey::new(1, 2)]),\n        );\n\n        assert_eq!(package.enable_version(ENTITY_HASH_V2), Ok(()),);\n\n        assert_eq!(\n            package.enabled_versions(),\n            EntityVersions::from(BTreeMap::from_iter([\n                (EntityVersionKey::new(1, 1), ENTITY_HASH_V1),\n                (EntityVersionKey::new(1, 2), ENTITY_HASH_V2),\n            ])),\n        );\n\n        assert_eq!(\n            package.disabled_versions(),\n            &BTreeSet::from_iter([next_version])\n        );\n\n        assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2));\n\n        assert_eq!(package.enable_version(ENTITY_HASH), Ok(()),);\n\n        assert_eq!(\n            package.enable_version(ENTITY_HASH),\n            Ok(()),\n            \"enabling a entity twice should be a noop\"\n        );\n\n        assert_eq!(\n            package.enabled_versions(),\n            EntityVersions::from(BTreeMap::from_iter([\n                (EntityVersionKey::new(1, 1), ENTITY_HASH_V1),\n                (EntityVersionKey::new(1, 2), ENTITY_HASH_V2),\n                (next_version, ENTITY_HASH),\n            ])),\n        );\n\n        assert_eq!(package.disabled_versions(), &BTreeSet::new(),);\n\n        assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH));\n    }\n\n    #[test]\n    fn should_not_allow_to_enable_non_existing_version() {\n        let mut package = make_package_with_two_versions();\n\n        assert_eq!(\n            package.enable_version(EntityAddr::SmartContract(HashAddr::default())),\n            Err(Error::EntityNotFound),\n        );\n    }\n\n    #[test]\n    fn package_hash_from_slice() {\n        let bytes: Vec<u8> = (0..32).collect();\n        let package_hash = HashAddr::try_from(&bytes[..]).expect(\"should create package hash\");\n        let package_hash = PackageHash::new(package_hash);\n        assert_eq!(&bytes, &package_hash.as_bytes());\n    }\n\n    #[test]\n    fn package_hash_from_str() {\n        let package_hash = PackageHash::new([3; 32]);\n        let encoded = package_hash.to_formatted_string();\n        let decoded = PackageHash::from_formatted_str(&encoded).unwrap();\n        assert_eq!(package_hash, decoded);\n\n        let invalid_prefix =\n            \"package0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            PackageHash::from_formatted_str(invalid_prefix).unwrap_err(),\n            FromStrError::InvalidPrefix\n        ));\n\n        let short_addr = \"package-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            PackageHash::from_formatted_str(short_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let long_addr =\n            \"package-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            PackageHash::from_formatted_str(long_addr).unwrap_err(),\n            FromStrError::Hash(_)\n        ));\n\n        let invalid_hex =\n            \"package-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(matches!(\n            PackageHash::from_formatted_str(invalid_hex).unwrap_err(),\n            FromStrError::Hex(_)\n        ));\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_contract_package(contract_pkg in gens::package_arb()) {\n            bytesrepr::test_serialization_roundtrip(&contract_pkg);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/peers_map.rs",
    "content": "use alloc::collections::BTreeMap;\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\nuse alloc::{\n    string::{String, ToString},\n    vec::Vec,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(any(feature = \"testing\", test))]\nuse core::iter;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\n/// Node peer entry.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct PeerEntry {\n    /// Node id.\n    pub node_id: String,\n    /// Node address.\n    pub address: String,\n}\n\nimpl PeerEntry {\n    #[cfg(any(feature = \"testing\", test))]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        Self {\n            node_id: rng.random_string(10..20),\n            address: rng.random_string(10..20),\n        }\n    }\n}\n\nimpl ToBytes for PeerEntry {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.node_id.write_bytes(writer)?;\n        self.address.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.node_id.serialized_length() + self.address.serialized_length()\n    }\n}\n\nimpl FromBytes for PeerEntry {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (node_id, remainder) = String::from_bytes(bytes)?;\n        let (address, remainder) = String::from_bytes(remainder)?;\n        Ok((PeerEntry { node_id, address }, remainder))\n    }\n}\n\n/// Map of peer IDs to network addresses.\n#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Peers(Vec<PeerEntry>);\n\nimpl Peers {\n    /// Retrieve collection of `PeerEntry` records.\n    pub fn into_inner(self) -> Vec<PeerEntry> {\n        self.0\n    }\n\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let count = rng.gen_range(0..10);\n        let peers = iter::repeat(())\n            .map(|_| PeerEntry::random(rng))\n            .take(count)\n            .collect();\n        Self(peers)\n    }\n}\n\nimpl<Id: ToString> From<BTreeMap<Id, String>> for Peers {\n    fn from(input: BTreeMap<Id, String>) -> Self {\n        let ret = input\n            .into_iter()\n            .map(|(node_id, address)| PeerEntry {\n                node_id: node_id.to_string(),\n                address,\n            })\n            .collect();\n        Peers(ret)\n    }\n}\n\nimpl ToBytes for Peers {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for Peers {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (inner, remainder) = Vec::<PeerEntry>::from_bytes(bytes)?;\n        Ok((Peers(inner), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = Peers::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n\n    #[test]\n    fn bytesrepr_empty_roundtrip() {\n        let val = Peers(vec![]);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n\n    #[test]\n    fn bytesrepr_empty_vec_should_have_count_0() {\n        let val = Peers(vec![]);\n        let x = Peers::to_bytes(&val).expect(\"should have vec\");\n        let (count, _) = u32::from_bytes(&x).expect(\"should have count\");\n        assert!(count == 0, \"count should be 0\");\n    }\n}\n"
  },
  {
    "path": "types/src/phase.rs",
    "content": "// Can be removed once https://github.com/rust-lang/rustfmt/issues/3362 is resolved.\n#[rustfmt::skip]\nuse alloc::vec;\nuse alloc::vec::Vec;\n\nuse num_derive::{FromPrimitive, ToPrimitive};\nuse num_traits::{FromPrimitive, ToPrimitive};\n\nuse crate::{\n    bytesrepr::{Error, FromBytes, ToBytes},\n    CLType, CLTyped,\n};\n\n/// The number of bytes in a serialized [`Phase`].\npub const PHASE_SERIALIZED_LENGTH: usize = 1;\n\n/// The phase in which a given contract is executing.\n#[derive(Debug, PartialEq, Eq, Clone, Copy, FromPrimitive, ToPrimitive)]\n#[repr(u8)]\npub enum Phase {\n    /// Set while committing the genesis or upgrade configurations.\n    System = 0,\n    /// Set while executing the payment code of a deploy.\n    Payment = 1,\n    /// Set while executing the session code of a deploy.\n    Session = 2,\n    /// Set while finalizing payment at the end of a deploy.\n    FinalizePayment = 3,\n}\n\nimpl ToBytes for Phase {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        // NOTE: Assumed safe as [`Phase`] is represented as u8.\n        let id = self.to_u8().expect(\"Phase is represented as a u8\");\n\n        Ok(vec![id])\n    }\n\n    fn serialized_length(&self) -> usize {\n        PHASE_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for Phase {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (id, rest) = u8::from_bytes(bytes)?;\n        let phase = FromPrimitive::from_u8(id).ok_or(Error::Formatting)?;\n        Ok((phase, rest))\n    }\n}\n\nimpl CLTyped for Phase {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n"
  },
  {
    "path": "types/src/protocol_version.rs",
    "content": "use alloc::{format, string::String, vec::Vec};\nuse core::{convert::TryFrom, fmt, str::FromStr};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::{\n    bytesrepr::{Error, FromBytes, ToBytes},\n    ParseSemVerError, SemVer,\n};\n\n/// A newtype wrapping a [`SemVer`] which represents a Casper Platform protocol version.\n#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct ProtocolVersion(SemVer);\n\n/// The result of [`ProtocolVersion::check_next_version`].\n#[derive(Debug, PartialEq, Eq)]\npub enum VersionCheckResult {\n    /// Upgrade possible.\n    Valid {\n        /// Is this a major protocol version upgrade?\n        is_major_version: bool,\n    },\n    /// Upgrade is invalid.\n    Invalid,\n}\n\nimpl VersionCheckResult {\n    /// Checks if given version result is invalid.\n    ///\n    /// Invalid means that a given version can not be followed.\n    pub fn is_invalid(&self) -> bool {\n        matches!(self, VersionCheckResult::Invalid)\n    }\n\n    /// Checks if given version is a major protocol version upgrade.\n    pub fn is_major_version(&self) -> bool {\n        match self {\n            VersionCheckResult::Valid { is_major_version } => *is_major_version,\n            VersionCheckResult::Invalid => false,\n        }\n    }\n}\n\nimpl ProtocolVersion {\n    /// Version 1.0.0.\n    pub const V1_0_0: ProtocolVersion = ProtocolVersion(SemVer {\n        major: 1,\n        minor: 0,\n        patch: 0,\n    });\n\n    /// Version 2.0.0.\n    pub const V2_0_0: ProtocolVersion = ProtocolVersion(SemVer {\n        major: 2,\n        minor: 0,\n        patch: 0,\n    });\n\n    /// Constructs a new `ProtocolVersion` from `version`.\n    pub const fn new(version: SemVer) -> ProtocolVersion {\n        ProtocolVersion(version)\n    }\n\n    /// Constructs a new `ProtocolVersion` from the given semver parts.\n    pub const fn from_parts(major: u32, minor: u32, patch: u32) -> ProtocolVersion {\n        let sem_ver = SemVer::new(major, minor, patch);\n        Self::new(sem_ver)\n    }\n\n    /// Returns the inner [`SemVer`].\n    pub const fn value(&self) -> SemVer {\n        self.0\n    }\n\n    /// Returns the inner [`SemVer`] destructed into a tuple of (major, minor, patch).\n    pub const fn destructure(&self) -> (u32, u32, u32) {\n        (self.0.major, self.0.minor, self.0.patch)\n    }\n\n    /// Checks if next version can be followed.\n    pub fn check_next_version(&self, next: &ProtocolVersion) -> VersionCheckResult {\n        // Protocol major versions should increase monotonically by 1.\n        let major_bumped = self.0.major.saturating_add(1);\n        if next.0.major < self.0.major || next.0.major > major_bumped {\n            return VersionCheckResult::Invalid;\n        }\n\n        if next.0.major == major_bumped {\n            return VersionCheckResult::Valid {\n                is_major_version: true,\n            };\n        }\n\n        // Covers the equal major versions\n        debug_assert_eq!(next.0.major, self.0.major);\n\n        if next.0.minor < self.0.minor {\n            // Protocol minor versions within the same major version should not go backwards.\n            return VersionCheckResult::Invalid;\n        }\n\n        if next.0.minor > self.0.minor {\n            return VersionCheckResult::Valid {\n                is_major_version: false,\n            };\n        }\n\n        // Code belows covers equal minor versions\n        debug_assert_eq!(next.0.minor, self.0.minor);\n\n        // Protocol patch versions should increase monotonically but can be skipped.\n        if next.0.patch <= self.0.patch {\n            return VersionCheckResult::Invalid;\n        }\n\n        VersionCheckResult::Valid {\n            is_major_version: false,\n        }\n    }\n\n    /// Checks if given protocol version is compatible with current one.\n    ///\n    /// Two protocol versions with different major version are considered to be incompatible.\n    pub fn is_compatible_with(&self, version: &ProtocolVersion) -> bool {\n        self.0.major == version.0.major\n    }\n}\n\nimpl ToBytes for ProtocolVersion {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.value().to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.value().serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.extend(self.0.major.to_le_bytes());\n        writer.extend(self.0.minor.to_le_bytes());\n        writer.extend(self.0.patch.to_le_bytes());\n        Ok(())\n    }\n}\n\nimpl FromBytes for ProtocolVersion {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (version, rem) = SemVer::from_bytes(bytes)?;\n        let protocol_version = ProtocolVersion::new(version);\n        Ok((protocol_version, rem))\n    }\n}\n\nimpl FromStr for ProtocolVersion {\n    type Err = ParseSemVerError;\n\n    fn from_str(s: &str) -> Result<Self, ParseSemVerError> {\n        let version = SemVer::try_from(s)?;\n        Ok(ProtocolVersion::new(version))\n    }\n}\n\nimpl Serialize for ProtocolVersion {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            let str = format!(\"{}.{}.{}\", self.0.major, self.0.minor, self.0.patch);\n            String::serialize(&str, serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for ProtocolVersion {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        let semver = if deserializer.is_human_readable() {\n            let value_as_string = String::deserialize(deserializer)?;\n            SemVer::try_from(value_as_string.as_str()).map_err(SerdeError::custom)?\n        } else {\n            SemVer::deserialize(deserializer)?\n        };\n        Ok(ProtocolVersion(semver))\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for ProtocolVersion {\n    fn schema_name() -> String {\n        String::from(\"ProtocolVersion\")\n    }\n\n    fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description = Some(\"Casper Platform protocol version\".to_string());\n        schema_object.into()\n    }\n}\n\nimpl fmt::Display for ProtocolVersion {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        self.0.fmt(f)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::SemVer;\n\n    #[test]\n    fn should_follow_version_with_optional_code() {\n        let value = VersionCheckResult::Valid {\n            is_major_version: false,\n        };\n        assert!(!value.is_invalid());\n        assert!(!value.is_major_version());\n    }\n\n    #[test]\n    fn should_follow_version_with_required_code() {\n        let value = VersionCheckResult::Valid {\n            is_major_version: true,\n        };\n        assert!(!value.is_invalid());\n        assert!(value.is_major_version());\n    }\n\n    #[test]\n    fn should_not_follow_version_with_invalid_code() {\n        let value = VersionCheckResult::Invalid;\n        assert!(value.is_invalid());\n        assert!(!value.is_major_version());\n    }\n\n    #[test]\n    fn should_be_able_to_get_instance() {\n        let initial_value = SemVer::new(1, 0, 0);\n        let item = ProtocolVersion::new(initial_value);\n        assert_eq!(initial_value, item.value(), \"should have equal value\")\n    }\n\n    #[test]\n    fn should_be_able_to_compare_two_instances() {\n        let lhs = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let rhs = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        assert_eq!(lhs, rhs, \"should be equal\");\n        let rhs = ProtocolVersion::new(SemVer::new(2, 0, 0));\n        assert_ne!(lhs, rhs, \"should not be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_default() {\n        let defaulted = ProtocolVersion::default();\n        let expected = ProtocolVersion::new(SemVer::new(0, 0, 0));\n        assert_eq!(defaulted, expected, \"should be equal\")\n    }\n\n    #[test]\n    fn should_be_able_to_compare_relative_value() {\n        let lhs = ProtocolVersion::new(SemVer::new(2, 0, 0));\n        let rhs = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        assert!(lhs > rhs, \"should be gt\");\n        let rhs = ProtocolVersion::new(SemVer::new(2, 0, 0));\n        assert!(lhs >= rhs, \"should be gte\");\n        assert!(lhs <= rhs, \"should be lte\");\n        let lhs = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        assert!(lhs < rhs, \"should be lt\");\n    }\n\n    #[test]\n    fn should_follow_major_version_upgrade() {\n        // If the upgrade protocol version is lower than or the same as EE's current in-use protocol\n        // version the upgrade is rejected and an error is returned; this includes the special case\n        // of a defaulted protocol version ( 0.0.0 ).\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(2, 0, 0));\n        assert!(\n            prev.check_next_version(&next).is_major_version(),\n            \"should be major version\"\n        );\n    }\n\n    #[test]\n    fn should_reject_if_major_version_decreases() {\n        let prev = ProtocolVersion::new(SemVer::new(10, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(9, 0, 0));\n        // Major version must not decrease ...\n        assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid);\n    }\n\n    #[test]\n    fn should_check_follows_minor_version_upgrade() {\n        // [major version] may remain the same in the case of a minor or patch version increase.\n\n        // Minor version must not decrease within the same major version\n        let prev = ProtocolVersion::new(SemVer::new(1, 1, 0));\n        let next = ProtocolVersion::new(SemVer::new(1, 2, 0));\n\n        let value = prev.check_next_version(&next);\n        assert!(!value.is_invalid(), \"should be valid\");\n        assert!(!value.is_major_version(), \"should not be a major version\");\n    }\n\n    #[test]\n    fn should_not_care_if_minor_bump_resets_patch() {\n        let prev = ProtocolVersion::new(SemVer::new(1, 2, 0));\n        let next = ProtocolVersion::new(SemVer::new(1, 3, 1));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: false\n            }\n        );\n\n        let prev = ProtocolVersion::new(SemVer::new(1, 20, 42));\n        let next = ProtocolVersion::new(SemVer::new(1, 30, 43));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: false\n            }\n        );\n    }\n\n    #[test]\n    fn should_not_care_if_major_bump_resets_minor_or_patch() {\n        // A major version increase resets both the minor and patch versions to ( 0.0 ).\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(2, 1, 0));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: true\n            }\n        );\n\n        let next = ProtocolVersion::new(SemVer::new(2, 0, 1));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: true\n            }\n        );\n\n        let next = ProtocolVersion::new(SemVer::new(2, 1, 1));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: true\n            }\n        );\n    }\n\n    #[test]\n    fn should_reject_patch_version_rollback() {\n        // Patch version must not decrease or remain the same within the same major and minor\n        // version pair, but may skip.\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 42));\n        let next = ProtocolVersion::new(SemVer::new(1, 0, 41));\n        assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid);\n        let next = ProtocolVersion::new(SemVer::new(1, 0, 13));\n        assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid);\n    }\n\n    #[test]\n    fn should_accept_patch_version_update_with_optional_code() {\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(1, 0, 1));\n        let value = prev.check_next_version(&next);\n        assert!(!value.is_invalid(), \"should be valid\");\n        assert!(!value.is_major_version(), \"should not be a major version\");\n\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 8));\n        let next = ProtocolVersion::new(SemVer::new(1, 0, 42));\n        let value = prev.check_next_version(&next);\n        assert!(!value.is_invalid(), \"should be valid\");\n        assert!(!value.is_major_version(), \"should not be a major version\");\n    }\n\n    #[test]\n    fn should_accept_minor_version_update_with_optional_code() {\n        // installer is optional for minor bump\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(1, 1, 0));\n        let value = prev.check_next_version(&next);\n        assert!(!value.is_invalid(), \"should be valid\");\n        assert!(!value.is_major_version(), \"should not be a major version\");\n\n        let prev = ProtocolVersion::new(SemVer::new(3, 98, 0));\n        let next = ProtocolVersion::new(SemVer::new(3, 99, 0));\n        let value = prev.check_next_version(&next);\n        assert!(!value.is_invalid(), \"should be valid\");\n        assert!(!value.is_major_version(), \"should not be a major version\");\n    }\n\n    #[test]\n    fn should_allow_skip_minor_version_within_major_version() {\n        let prev = ProtocolVersion::new(SemVer::new(1, 1, 0));\n\n        let next = ProtocolVersion::new(SemVer::new(1, 3, 0));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: false\n            }\n        );\n\n        let next = ProtocolVersion::new(SemVer::new(1, 7, 0));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: false\n            }\n        );\n    }\n\n    #[test]\n    fn should_allow_skip_patch_version_within_minor_version() {\n        let prev = ProtocolVersion::new(SemVer::new(1, 1, 0));\n\n        let next = ProtocolVersion::new(SemVer::new(1, 1, 2));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: false\n            }\n        );\n    }\n\n    #[test]\n    fn should_allow_skipped_minor_and_patch_on_major_bump() {\n        // skip minor\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(2, 1, 0));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: true\n            }\n        );\n\n        // skip patch\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(2, 0, 1));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: true\n            }\n        );\n\n        // skip many minors and patches\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(2, 3, 10));\n        assert_eq!(\n            prev.check_next_version(&next),\n            VersionCheckResult::Valid {\n                is_major_version: true\n            }\n        );\n    }\n\n    #[test]\n    fn should_allow_code_on_major_update() {\n        // major upgrade requires installer to be present\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(2, 0, 0));\n        assert!(\n            prev.check_next_version(&next).is_major_version(),\n            \"should be major version\"\n        );\n\n        let prev = ProtocolVersion::new(SemVer::new(2, 99, 99));\n        let next = ProtocolVersion::new(SemVer::new(3, 0, 0));\n        assert!(\n            prev.check_next_version(&next).is_major_version(),\n            \"should be major version\"\n        );\n    }\n\n    #[test]\n    fn should_not_skip_major_version() {\n        // can bump only by 1\n        let prev = ProtocolVersion::new(SemVer::new(1, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(3, 0, 0));\n        assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid);\n    }\n\n    #[test]\n    fn should_reject_major_version_rollback() {\n        // can bump forward\n        let prev = ProtocolVersion::new(SemVer::new(2, 0, 0));\n        let next = ProtocolVersion::new(SemVer::new(0, 0, 0));\n        assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid);\n    }\n\n    #[test]\n    fn should_check_same_version_is_invalid() {\n        for ver in &[\n            ProtocolVersion::from_parts(1, 0, 0),\n            ProtocolVersion::from_parts(1, 2, 0),\n            ProtocolVersion::from_parts(1, 2, 3),\n        ] {\n            assert_eq!(ver.check_next_version(ver), VersionCheckResult::Invalid);\n        }\n    }\n\n    #[test]\n    fn should_not_be_compatible_with_different_major_version() {\n        let current = ProtocolVersion::from_parts(1, 2, 3);\n        let other = ProtocolVersion::from_parts(2, 5, 6);\n        assert!(!current.is_compatible_with(&other));\n\n        let current = ProtocolVersion::from_parts(1, 0, 0);\n        let other = ProtocolVersion::from_parts(2, 0, 0);\n        assert!(!current.is_compatible_with(&other));\n    }\n\n    #[test]\n    fn should_be_compatible_with_equal_major_version_backwards() {\n        let current = ProtocolVersion::from_parts(1, 99, 99);\n        let other = ProtocolVersion::from_parts(1, 0, 0);\n        assert!(current.is_compatible_with(&other));\n    }\n\n    #[test]\n    fn should_be_compatible_with_equal_major_version_forwards() {\n        let current = ProtocolVersion::from_parts(1, 0, 0);\n        let other = ProtocolVersion::from_parts(1, 99, 99);\n        assert!(current.is_compatible_with(&other));\n    }\n\n    #[test]\n    fn should_serialize_to_json_properly() {\n        let protocol_version = ProtocolVersion::from_parts(1, 1, 1);\n        let json = serde_json::to_string(&protocol_version).unwrap();\n        let expected = \"\\\"1.1.1\\\"\";\n        assert_eq!(json, expected);\n    }\n\n    #[test]\n    fn serialize_roundtrip() {\n        let protocol_version = ProtocolVersion::from_parts(1, 1, 1);\n        let serialized_json = serde_json::to_string(&protocol_version).unwrap();\n        assert_eq!(\n            protocol_version,\n            serde_json::from_str(&serialized_json).unwrap()\n        );\n\n        let serialized_bincode = bincode::serialize(&protocol_version).unwrap();\n        assert_eq!(\n            protocol_version,\n            bincode::deserialize(&serialized_bincode).unwrap()\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/runtime_footprint.rs",
    "content": "use crate::{\n    account::AccountHash,\n    addressable_entity::{AssociatedKeys, ContractRuntimeTag, Weight},\n    contracts::{ContractHash, NamedKeys},\n    system::SystemEntityType,\n    Account, AddressableEntity, ContextAccessRights, Contract, EntityAddr, EntityKind, EntryPoints,\n    HashAddr, Key, ProtocolVersion, URef,\n};\nuse alloc::{\n    collections::{BTreeMap, BTreeSet},\n    string::String,\n};\nuse core::{fmt::Debug, iter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// Runtime Address.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum RuntimeAddress {\n    /// Account address\n    Hash(HashAddr),\n    /// Runtime executable address.\n    StoredContract {\n        /// The hash addr of the runtime entity\n        hash_addr: HashAddr,\n        /// The package hash\n        package_hash_addr: HashAddr,\n        /// The wasm hash\n        wasm_hash_addr: HashAddr,\n        /// protocol version\n        protocol_version: ProtocolVersion,\n    },\n}\n\nimpl RuntimeAddress {\n    /// Returns a new hash\n    pub fn new_hash(hash_addr: HashAddr) -> Self {\n        Self::Hash(hash_addr)\n    }\n\n    /// Returns new stored contract\n    pub fn new_stored_contract(\n        hash_addr: HashAddr,\n        package_hash_addr: HashAddr,\n        wasm_hash_addr: HashAddr,\n        protocol_version: ProtocolVersion,\n    ) -> Self {\n        Self::StoredContract {\n            hash_addr,\n            package_hash_addr,\n            wasm_hash_addr,\n            protocol_version,\n        }\n    }\n\n    /// The hash addr for the runtime.\n    pub fn hash_addr(&self) -> HashAddr {\n        match self {\n            RuntimeAddress::Hash(hash_addr) => *hash_addr,\n            RuntimeAddress::StoredContract { hash_addr, .. } => *hash_addr,\n        }\n    }\n}\n\n#[repr(u8)]\n#[allow(clippy::enum_variant_names)]\npub(crate) enum Action {\n    KeyManagement = 0,\n    DeployManagement,\n    UpgradeManagement,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct RuntimeFootprint {\n    named_keys: NamedKeys,\n    action_thresholds: BTreeMap<u8, Weight>,\n    associated_keys: AssociatedKeys,\n    entry_points: EntryPoints,\n    entity_kind: EntityKind,\n\n    main_purse: Option<URef>,\n    runtime_address: RuntimeAddress,\n}\n\nimpl RuntimeFootprint {\n    pub fn new(\n        named_keys: NamedKeys,\n        action_thresholds: BTreeMap<u8, Weight>,\n        associated_keys: AssociatedKeys,\n        entry_points: EntryPoints,\n        entity_kind: EntityKind,\n        main_purse: Option<URef>,\n        runtime_address: RuntimeAddress,\n    ) -> Self {\n        Self {\n            named_keys,\n            action_thresholds,\n            associated_keys,\n            entry_points,\n            entity_kind,\n            main_purse,\n            runtime_address,\n        }\n    }\n\n    pub fn new_account_footprint(account: Account) -> Self {\n        let named_keys = account.named_keys().clone();\n        let action_thresholds = {\n            let mut ret = BTreeMap::new();\n            ret.insert(\n                Action::KeyManagement as u8,\n                Weight::new(account.action_thresholds().key_management.value()),\n            );\n            ret.insert(\n                Action::DeployManagement as u8,\n                Weight::new(account.action_thresholds().deployment.value()),\n            );\n            ret\n        };\n        let associated_keys = account.associated_keys().clone().into();\n        let entry_points = EntryPoints::new();\n        let entity_kind = EntityKind::Account(account.account_hash());\n        let main_purse = Some(account.main_purse());\n        let runtime_address = RuntimeAddress::new_hash(account.account_hash().value());\n\n        Self::new(\n            named_keys,\n            action_thresholds,\n            associated_keys,\n            entry_points,\n            entity_kind,\n            main_purse,\n            runtime_address,\n        )\n    }\n\n    pub fn new_contract_footprint(\n        contract_hash: ContractHash,\n        contract: Contract,\n        system_entity_type: Option<SystemEntityType>,\n    ) -> Self {\n        let contract_package_hash = contract.contract_package_hash();\n        let contract_wasm_hash = contract.contract_wasm_hash();\n        let entry_points = contract.entry_points().clone().into();\n        let protocol_version = contract.protocol_version();\n        let named_keys = contract.take_named_keys();\n\n        let runtime_address = RuntimeAddress::new_stored_contract(\n            contract_hash.value(),\n            contract_package_hash.value(),\n            contract_wasm_hash.value(),\n            protocol_version,\n        );\n\n        let main_purse = None;\n        let action_thresholds = BTreeMap::new();\n        let associated_keys = AssociatedKeys::empty_keys();\n\n        let entity_kind = match system_entity_type {\n            None => EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n            Some(kind) => EntityKind::System(kind),\n        };\n\n        Self::new(\n            named_keys,\n            action_thresholds,\n            associated_keys,\n            entry_points,\n            entity_kind,\n            main_purse,\n            runtime_address,\n        )\n    }\n\n    pub fn new_entity_footprint(\n        entity_addr: EntityAddr,\n        entity: AddressableEntity,\n        named_keys: NamedKeys,\n        entry_points: EntryPoints,\n    ) -> Self {\n        let runtime_address = RuntimeAddress::new_stored_contract(\n            entity_addr.value(),\n            entity.package_hash().value(),\n            entity.byte_code_hash().value(),\n            entity.protocol_version(),\n        );\n        let action_thresholds = {\n            let mut ret = BTreeMap::new();\n            ret.insert(\n                Action::KeyManagement as u8,\n                entity.action_thresholds().key_management,\n            );\n            ret.insert(\n                Action::DeployManagement as u8,\n                entity.action_thresholds().key_management,\n            );\n            ret.insert(\n                Action::UpgradeManagement as u8,\n                entity.action_thresholds().upgrade_management,\n            );\n            ret\n        };\n        Self::new(\n            named_keys,\n            action_thresholds,\n            entity.associated_keys().clone(),\n            entry_points,\n            entity.entity_kind(),\n            Some(entity.main_purse()),\n            runtime_address,\n        )\n    }\n\n    pub fn package_hash(&self) -> Option<HashAddr> {\n        match &self.runtime_address {\n            RuntimeAddress::Hash(_) => None,\n            RuntimeAddress::StoredContract {\n                package_hash_addr, ..\n            } => Some(*package_hash_addr),\n        }\n    }\n\n    pub fn associated_keys(&self) -> &AssociatedKeys {\n        &self.associated_keys\n    }\n\n    pub fn wasm_hash(&self) -> Option<HashAddr> {\n        match &self.runtime_address {\n            RuntimeAddress::Hash(_) => None,\n            RuntimeAddress::StoredContract { wasm_hash_addr, .. } => Some(*wasm_hash_addr),\n        }\n    }\n\n    pub fn hash_addr(&self) -> HashAddr {\n        match &self.runtime_address {\n            RuntimeAddress::Hash(hash_addr) => *hash_addr,\n            RuntimeAddress::StoredContract { hash_addr, .. } => *hash_addr,\n        }\n    }\n\n    pub fn named_keys(&self) -> &NamedKeys {\n        &self.named_keys\n    }\n\n    pub fn insert_into_named_keys(&mut self, name: String, key: Key) {\n        self.named_keys.insert(name, key);\n    }\n\n    pub fn named_keys_mut(&mut self) -> &mut NamedKeys {\n        &mut self.named_keys\n    }\n\n    pub fn take_named_keys(self) -> NamedKeys {\n        self.named_keys\n    }\n\n    pub fn main_purse(&self) -> Option<URef> {\n        self.main_purse\n    }\n\n    pub fn set_main_purse(&mut self, purse: URef) {\n        self.main_purse = Some(purse);\n    }\n\n    pub fn entry_points(&self) -> &EntryPoints {\n        &self.entry_points\n    }\n\n    pub fn entity_kind(&self) -> EntityKind {\n        self.entity_kind\n    }\n\n    /// Checks whether all authorization keys are associated with this addressable entity.\n    pub fn can_authorize(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        !authorization_keys.is_empty()\n            && authorization_keys\n                .iter()\n                .any(|e| self.associated_keys.contains_key(e))\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to key management threshold.\n    pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        match self.action_thresholds.get(&(Action::KeyManagement as u8)) {\n            None => false,\n            Some(weight) => total_weight >= *weight,\n        }\n    }\n\n    /// Checks whether the sum of the weights of all authorization keys is\n    /// greater or equal to deploy threshold.\n    pub fn can_deploy_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        match self\n            .action_thresholds\n            .get(&(Action::DeployManagement as u8))\n        {\n            None => false,\n            Some(weight) => total_weight >= *weight,\n        }\n    }\n\n    pub fn can_upgrade_with(&self, authorization_keys: &BTreeSet<AccountHash>) -> bool {\n        let total_weight = self\n            .associated_keys\n            .calculate_keys_weight(authorization_keys);\n\n        match self\n            .action_thresholds\n            .get(&(Action::UpgradeManagement as u8))\n        {\n            None => false,\n            Some(weight) => total_weight >= *weight,\n        }\n    }\n\n    /// Extracts the access rights from the named keys of the addressable entity.\n    pub fn extract_access_rights(&self, hash_addr: HashAddr) -> ContextAccessRights {\n        match self.main_purse {\n            Some(purse) => {\n                let urefs_iter = self\n                    .named_keys\n                    .keys()\n                    .filter_map(|key| key.as_uref().copied())\n                    .chain(iter::once(purse));\n                ContextAccessRights::new(hash_addr, urefs_iter)\n            }\n            None => {\n                let urefs_iter = self\n                    .named_keys\n                    .keys()\n                    .filter_map(|key| key.as_uref().copied());\n                ContextAccessRights::new(hash_addr, urefs_iter)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/semver.rs",
    "content": "use alloc::vec::Vec;\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Display, Formatter},\n    num::ParseIntError,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH};\n\n/// Length of SemVer when serialized\npub const SEM_VER_SERIALIZED_LENGTH: usize = 3 * U32_SERIALIZED_LENGTH;\n\n/// A struct for semantic versioning.\n#[derive(\n    Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct SemVer {\n    /// Major version.\n    pub major: u32,\n    /// Minor version.\n    pub minor: u32,\n    /// Patch version.\n    pub patch: u32,\n}\n\nimpl SemVer {\n    /// Version 1.0.0.\n    pub const V1_0_0: SemVer = SemVer {\n        major: 1,\n        minor: 0,\n        patch: 0,\n    };\n\n    /// Constructs a new `SemVer` from the given semver parts.\n    pub const fn new(major: u32, minor: u32, patch: u32) -> SemVer {\n        SemVer {\n            major,\n            minor,\n            patch,\n        }\n    }\n}\n\nimpl ToBytes for SemVer {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut ret = bytesrepr::unchecked_allocate_buffer(self);\n        ret.append(&mut self.major.to_bytes()?);\n        ret.append(&mut self.minor.to_bytes()?);\n        ret.append(&mut self.patch.to_bytes()?);\n        Ok(ret)\n    }\n\n    fn serialized_length(&self) -> usize {\n        SEM_VER_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for SemVer {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (major, rem): (u32, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let (minor, rem): (u32, &[u8]) = FromBytes::from_bytes(rem)?;\n        let (patch, rem): (u32, &[u8]) = FromBytes::from_bytes(rem)?;\n        Ok((SemVer::new(major, minor, patch), rem))\n    }\n}\n\nimpl Display for SemVer {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{}.{}.{}\", self.major, self.minor, self.patch)\n    }\n}\n\n/// Parsing error when creating a SemVer.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum ParseSemVerError {\n    /// Invalid version format.\n    InvalidVersionFormat,\n    /// Error parsing an integer.\n    ParseIntError(ParseIntError),\n}\n\nimpl Display for ParseSemVerError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            ParseSemVerError::InvalidVersionFormat => formatter.write_str(\"invalid version format\"),\n            ParseSemVerError::ParseIntError(error) => error.fmt(formatter),\n        }\n    }\n}\n\nimpl From<ParseIntError> for ParseSemVerError {\n    fn from(error: ParseIntError) -> ParseSemVerError {\n        ParseSemVerError::ParseIntError(error)\n    }\n}\n\nimpl TryFrom<&str> for SemVer {\n    type Error = ParseSemVerError;\n    fn try_from(value: &str) -> Result<SemVer, Self::Error> {\n        let tokens: Vec<&str> = value.split('.').collect();\n        if tokens.len() != 3 {\n            return Err(ParseSemVerError::InvalidVersionFormat);\n        }\n\n        Ok(SemVer {\n            major: tokens[0].parse()?,\n            minor: tokens[1].parse()?,\n            patch: tokens[2].parse()?,\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use core::convert::TryInto;\n\n    #[test]\n    fn should_compare_semver_versions() {\n        assert!(SemVer::new(0, 0, 0) < SemVer::new(1, 2, 3));\n        assert!(SemVer::new(1, 1, 0) < SemVer::new(1, 2, 0));\n        assert!(SemVer::new(1, 0, 0) < SemVer::new(1, 2, 0));\n        assert!(SemVer::new(1, 0, 0) < SemVer::new(1, 2, 3));\n        assert!(SemVer::new(1, 2, 0) < SemVer::new(1, 2, 3));\n        assert!(SemVer::new(1, 2, 3) == SemVer::new(1, 2, 3));\n        assert!(SemVer::new(1, 2, 3) >= SemVer::new(1, 2, 3));\n        assert!(SemVer::new(1, 2, 3) <= SemVer::new(1, 2, 3));\n        assert!(SemVer::new(2, 0, 0) >= SemVer::new(1, 99, 99));\n        assert!(SemVer::new(2, 0, 0) > SemVer::new(1, 99, 99));\n    }\n\n    #[test]\n    fn parse_from_string() {\n        let ver1: SemVer = \"100.20.3\".try_into().expect(\"should parse\");\n        assert_eq!(ver1, SemVer::new(100, 20, 3));\n        let ver2: SemVer = \"0.0.1\".try_into().expect(\"should parse\");\n        assert_eq!(ver2, SemVer::new(0, 0, 1));\n\n        assert!(SemVer::try_from(\"1.a.2.3\").is_err());\n        assert!(SemVer::try_from(\"1. 2.3\").is_err());\n        assert!(SemVer::try_from(\"12345124361461.0.1\").is_err());\n        assert!(SemVer::try_from(\"1.2.3.4\").is_err());\n        assert!(SemVer::try_from(\"1.2\").is_err());\n        assert!(SemVer::try_from(\"1\").is_err());\n        assert!(SemVer::try_from(\"0\").is_err());\n    }\n}\n"
  },
  {
    "path": "types/src/serde_helpers.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::convert::TryFrom;\n\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::Digest;\n\npub(crate) mod raw_32_byte_array {\n    use super::*;\n\n    pub(crate) fn serialize<S: Serializer>(\n        array: &[u8; 32],\n        serializer: S,\n    ) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            base16::encode_lower(array).serialize(serializer)\n        } else {\n            array.serialize(serializer)\n        }\n    }\n\n    pub(crate) fn deserialize<'de, D: Deserializer<'de>>(\n        deserializer: D,\n    ) -> Result<[u8; 32], D::Error> {\n        if deserializer.is_human_readable() {\n            let hex_string = String::deserialize(deserializer)?;\n            let bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?;\n            <[u8; 32]>::try_from(bytes.as_ref()).map_err(SerdeError::custom)\n        } else {\n            <[u8; 32]>::deserialize(deserializer)\n        }\n    }\n}\n\npub(crate) mod contract_hash_as_digest {\n    use super::*;\n    use crate::contracts::ContractHash;\n\n    pub(crate) fn serialize<S: Serializer>(\n        contract_hash: &ContractHash,\n        serializer: S,\n    ) -> Result<S::Ok, S::Error> {\n        Digest::from(contract_hash.value()).serialize(serializer)\n    }\n\n    pub(crate) fn deserialize<'de, D: Deserializer<'de>>(\n        deserializer: D,\n    ) -> Result<ContractHash, D::Error> {\n        let digest = Digest::deserialize(deserializer)?;\n        Ok(ContractHash::new(digest.value()))\n    }\n}\n\npub(crate) mod contract_package_hash_as_digest {\n    use super::*;\n    use crate::contracts::ContractPackageHash;\n\n    pub(crate) fn serialize<S: Serializer>(\n        contract_package_hash: &ContractPackageHash,\n        serializer: S,\n    ) -> Result<S::Ok, S::Error> {\n        Digest::from(contract_package_hash.value()).serialize(serializer)\n    }\n\n    pub(crate) fn deserialize<'de, D: Deserializer<'de>>(\n        deserializer: D,\n    ) -> Result<ContractPackageHash, D::Error> {\n        let digest = Digest::deserialize(deserializer)?;\n        Ok(ContractPackageHash::new(digest.value()))\n    }\n}\n\n/// This module allows `DeployHash`es to be serialized and deserialized using the underlying\n/// `[u8; 32]` rather than delegating to the wrapped `Digest`, which in turn delegates to a\n/// `Vec<u8>` for legacy reasons.\n///\n/// This is required as the `DeployHash` defined in `casper-types` up until v4.0.0 used the array\n/// form, while the `DeployHash` defined in `casper-node` during this period delegated to `Digest`.\n///\n/// We use this module in places where the old `casper_types::DeployHash` was held as a member of a\n/// type which implements `Serialize` and/or `Deserialize`.\npub(crate) mod deploy_hash_as_array {\n    use super::*;\n    use crate::DeployHash;\n\n    pub(crate) fn serialize<S: Serializer>(\n        deploy_hash: &DeployHash,\n        serializer: S,\n    ) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            base16::encode_lower(&deploy_hash.inner().value()).serialize(serializer)\n        } else {\n            deploy_hash.inner().value().serialize(serializer)\n        }\n    }\n\n    pub(crate) fn deserialize<'de, D: Deserializer<'de>>(\n        deserializer: D,\n    ) -> Result<DeployHash, D::Error> {\n        let bytes = if deserializer.is_human_readable() {\n            let hex_string = String::deserialize(deserializer)?;\n            let vec_bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?;\n            <[u8; DeployHash::LENGTH]>::try_from(vec_bytes.as_ref()).map_err(SerdeError::custom)?\n        } else {\n            <[u8; DeployHash::LENGTH]>::deserialize(deserializer)?\n        };\n        Ok(DeployHash::new(Digest::from(bytes)))\n    }\n}\n\npub(crate) mod contract {\n    use super::*;\n    use crate::{\n        contracts::{ContractPackageHash, EntryPoint, EntryPoints},\n        Contract, ContractWasmHash, NamedKeys, ProtocolVersion,\n    };\n    use core::fmt::Display;\n    #[cfg(feature = \"json-schema\")]\n    use schemars::JsonSchema;\n    use serde::{Deserialize, Serialize};\n\n    #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]\n    #[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            rename = \"Contract\",\n            description = \"Methods and type signatures supported by a contract.\",\n        )\n    )]\n    pub(crate) struct HumanReadableContract {\n        contract_package_hash: ContractPackageHash,\n        contract_wasm_hash: ContractWasmHash,\n        named_keys: NamedKeys,\n        entry_points: Vec<EntryPoint>,\n        protocol_version: ProtocolVersion,\n    }\n\n    impl From<&Contract> for HumanReadableContract {\n        fn from(value: &Contract) -> Self {\n            Self {\n                contract_package_hash: value.contract_package_hash(),\n                contract_wasm_hash: value.contract_wasm_hash(),\n                named_keys: value.named_keys().clone(),\n                protocol_version: value.protocol_version(),\n                entry_points: value.entry_points().clone().take_entry_points(),\n            }\n        }\n    }\n\n    /// Parsing error when deserializing StoredValue.\n    #[derive(Debug, Clone)]\n    pub(crate) enum ContractDeserializationError {\n        /// Contract not deserializable.\n        NonUniqueEntryPointName,\n    }\n\n    impl Display for ContractDeserializationError {\n        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n            match self {\n                ContractDeserializationError::NonUniqueEntryPointName => {\n                    write!(f, \"Non unique `entry_points.name`\")\n                }\n            }\n        }\n    }\n\n    impl TryFrom<HumanReadableContract> for Contract {\n        type Error = ContractDeserializationError;\n        fn try_from(value: HumanReadableContract) -> Result<Self, Self::Error> {\n            let HumanReadableContract {\n                contract_package_hash,\n                contract_wasm_hash,\n                named_keys,\n                entry_points,\n                protocol_version,\n            } = value;\n            let mut entry_points_map = EntryPoints::new();\n            for entry_point in entry_points {\n                if entry_points_map.add_entry_point(entry_point).is_some() {\n                    //There were duplicate entries in regards to 'name'\n                    return Err(ContractDeserializationError::NonUniqueEntryPointName);\n                }\n            }\n\n            Ok(Contract::new(\n                contract_package_hash,\n                contract_wasm_hash,\n                named_keys,\n                entry_points_map,\n                protocol_version,\n            ))\n        }\n    }\n}\n\npub(crate) mod contract_package {\n    use core::convert::TryFrom;\n\n    use super::*;\n    #[cfg(feature = \"json-schema\")]\n    use schemars::JsonSchema;\n    use serde::{Deserialize, Serialize};\n\n    use crate::{\n        contracts::{\n            ContractHash, ContractPackage, ContractPackageStatus, ContractVersion,\n            ContractVersionKey, ContractVersions, DisabledVersions, ProtocolVersionMajor,\n        },\n        Groups, URef,\n    };\n\n    #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]\n    #[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n    #[cfg_attr(feature = \"json-schema\", schemars(rename = \"ContractVersion\"))]\n    pub(crate) struct HumanReadableContractVersion {\n        protocol_version_major: ProtocolVersionMajor,\n        contract_version: ContractVersion,\n        contract_hash: ContractHash,\n    }\n\n    /// Helper struct for deserializing/serializing `ContractPackage` from and to JSON.\n    #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]\n    #[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n    #[cfg_attr(feature = \"json-schema\", schemars(rename = \"ContractPackage\"))]\n    pub(crate) struct HumanReadableContractPackage {\n        access_key: URef,\n        versions: Vec<HumanReadableContractVersion>,\n        disabled_versions: DisabledVersions,\n        groups: Groups,\n        lock_status: ContractPackageStatus,\n    }\n\n    impl From<&ContractPackage> for HumanReadableContractPackage {\n        fn from(package: &ContractPackage) -> Self {\n            let mut versions = vec![];\n            for (key, hash) in package.versions() {\n                versions.push(HumanReadableContractVersion {\n                    protocol_version_major: key.protocol_version_major(),\n                    contract_version: key.contract_version(),\n                    contract_hash: *hash,\n                });\n            }\n            HumanReadableContractPackage {\n                access_key: package.access_key(),\n                versions,\n                disabled_versions: package.disabled_versions().clone(),\n                groups: package.groups().clone(),\n                lock_status: package.lock_status(),\n            }\n        }\n    }\n\n    impl TryFrom<HumanReadableContractPackage> for ContractPackage {\n        type Error = String;\n\n        fn try_from(value: HumanReadableContractPackage) -> Result<Self, Self::Error> {\n            let mut versions = ContractVersions::default();\n            for version in value.versions.iter() {\n                let key = ContractVersionKey::new(\n                    version.protocol_version_major,\n                    version.contract_version,\n                );\n                if versions.contains_key(&key) {\n                    return Err(format!(\"duplicate contract version: {:?}\", key));\n                }\n                versions.insert(key, version.contract_hash);\n            }\n            Ok(ContractPackage::new(\n                value.access_key,\n                versions,\n                value.disabled_versions,\n                value.groups,\n                value.lock_status,\n            ))\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/stored_value/global_state_identifier.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    BlockHash, BlockIdentifier, Digest,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\nconst BLOCK_HASH_TAG: u8 = 0;\nconst BLOCK_HEIGHT_TAG: u8 = 1;\nconst STATE_ROOT_HASH_TAG: u8 = 2;\n\n/// Identifier for possible ways to query Global State\n#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum GlobalStateIdentifier {\n    /// Query using a block hash.\n    BlockHash(BlockHash),\n    /// Query using a block height.\n    BlockHeight(u64),\n    /// Query using the state root hash.\n    StateRootHash(Digest),\n}\n\nimpl GlobalStateIdentifier {\n    /// Random.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..3) {\n            0 => Self::BlockHash(BlockHash::random(rng)),\n            1 => Self::BlockHeight(rng.gen()),\n            2 => Self::StateRootHash(Digest::random(rng)),\n            _ => panic!(),\n        }\n    }\n}\n\nimpl From<BlockIdentifier> for GlobalStateIdentifier {\n    fn from(block_identifier: BlockIdentifier) -> Self {\n        match block_identifier {\n            BlockIdentifier::Hash(block_hash) => GlobalStateIdentifier::BlockHash(block_hash),\n            BlockIdentifier::Height(block_height) => {\n                GlobalStateIdentifier::BlockHeight(block_height)\n            }\n        }\n    }\n}\n\nimpl FromBytes for GlobalStateIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        match bytes.split_first() {\n            Some((&BLOCK_HASH_TAG, rem)) => {\n                let (block_hash, rem) = FromBytes::from_bytes(rem)?;\n                Ok((GlobalStateIdentifier::BlockHash(block_hash), rem))\n            }\n            Some((&BLOCK_HEIGHT_TAG, rem)) => {\n                let (block_height, rem) = FromBytes::from_bytes(rem)?;\n                Ok((GlobalStateIdentifier::BlockHeight(block_height), rem))\n            }\n            Some((&STATE_ROOT_HASH_TAG, rem)) => {\n                let (state_root_hash, rem) = FromBytes::from_bytes(rem)?;\n                Ok((GlobalStateIdentifier::StateRootHash(state_root_hash), rem))\n            }\n            Some(_) | None => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl ToBytes for GlobalStateIdentifier {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            GlobalStateIdentifier::BlockHash(block_hash) => {\n                writer.push(BLOCK_HASH_TAG);\n                block_hash.write_bytes(writer)?;\n            }\n            GlobalStateIdentifier::BlockHeight(block_height) => {\n                writer.push(BLOCK_HEIGHT_TAG);\n                block_height.write_bytes(writer)?;\n            }\n            GlobalStateIdentifier::StateRootHash(state_root_hash) => {\n                writer.push(STATE_ROOT_HASH_TAG);\n                state_root_hash.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                GlobalStateIdentifier::BlockHash(block_hash) => block_hash.serialized_length(),\n                GlobalStateIdentifier::BlockHeight(block_height) => {\n                    block_height.serialized_length()\n                }\n                GlobalStateIdentifier::StateRootHash(state_root_hash) => {\n                    state_root_hash.serialized_length()\n                }\n            }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = GlobalStateIdentifier::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/src/stored_value/type_mismatch.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n/// An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations.\n#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct TypeMismatch {\n    /// The name of the expected type.\n    expected: String,\n    /// The actual type found.\n    found: String,\n}\n\nimpl TypeMismatch {\n    /// Creates a new `TypeMismatch`.\n    pub fn new(expected: String, found: String) -> TypeMismatch {\n        TypeMismatch { expected, found }\n    }\n}\n\nimpl Display for TypeMismatch {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"Type mismatch. Expected {} but found {}.\",\n            self.expected, self.found\n        )\n    }\n}\n\nimpl ToBytes for TypeMismatch {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.expected.write_bytes(writer)?;\n        self.found.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.expected.serialized_length() + self.found.serialized_length()\n    }\n}\n\nimpl FromBytes for TypeMismatch {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (expected, remainder) = String::from_bytes(bytes)?;\n        let (found, remainder) = String::from_bytes(remainder)?;\n        Ok((TypeMismatch { expected, found }, remainder))\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for TypeMismatch {}\n"
  },
  {
    "path": "types/src/stored_value.rs",
    "content": "mod global_state_identifier;\nmod type_mismatch;\n\nuse alloc::{\n    boxed::Box,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::{convert::TryFrom, fmt::Debug};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer};\nuse serde_bytes::ByteBuf;\n\nuse crate::{\n    account::Account,\n    addressable_entity::NamedKeyValue,\n    bytesrepr::{self, Bytes, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contract_messages::{MessageChecksum, MessageTopicSummary},\n    contract_wasm::ContractWasm,\n    contracts::{Contract, ContractPackage},\n    package::Package,\n    system::{\n        auction::{Bid, BidKind, EraInfo, Unbond, UnbondingPurse, WithdrawPurse},\n        prepayment::PrepaymentKind,\n    },\n    AddressableEntity, ByteCode, CLValue, DeployInfo, EntryPointValue, TransferV1,\n};\npub use global_state_identifier::GlobalStateIdentifier;\npub use type_mismatch::TypeMismatch;\n\n/// Tag used to discriminate between different variants of `StoredValue`.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\n#[repr(u8)]\npub enum StoredValueTag {\n    /// A CLValue.\n    CLValue = 0,\n    /// An account.\n    Account = 1,\n    /// Contract wasm.\n    ContractWasm = 2,\n    /// A contract.\n    Contract = 3,\n    /// A contract package.\n    ContractPackage = 4,\n    /// A version 1 transfer.\n    Transfer = 5,\n    /// Info about a deploy.\n    DeployInfo = 6,\n    /// Info about an era.\n    EraInfo = 7,\n    /// A bid.\n    Bid = 8,\n    /// Withdraw information.\n    Withdraw = 9,\n    /// Unbonding information.\n    Unbonding = 10,\n    /// An `AddressableEntity`.\n    BidKind = 11,\n    /// A `Package`.\n    Package = 12,\n    /// A record of byte code.\n    AddressableEntity = 13,\n    /// A record of byte code.\n    ByteCode = 14,\n    /// A message topic.\n    MessageTopic = 15,\n    /// A message digest.\n    Message = 16,\n    /// A NamedKey record.\n    NamedKey = 17,\n    /// A prepayment record.\n    Prepayment = 18,\n    /// An entrypoint record.\n    EntryPoint = 19,\n    /// Raw bytes.\n    RawBytes = 20,\n}\n\n/// A value stored in Global State.\n#[allow(clippy::large_enum_variant)]\n#[derive(Eq, PartialEq, Clone, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(with = \"serde_helpers::HumanReadableDeserHelper\")\n)]\npub enum StoredValue {\n    /// A CLValue.\n    CLValue(CLValue),\n    /// An account.\n    Account(Account),\n    /// Contract wasm.\n    ContractWasm(ContractWasm),\n    /// A contract.\n    Contract(Contract),\n    /// A contract package.\n    ContractPackage(ContractPackage),\n    /// A version 1 transfer.\n    Transfer(TransferV1),\n    /// Info about a deploy.\n    DeployInfo(DeployInfo),\n    /// Info about an era.\n    EraInfo(EraInfo),\n    /// Variant that stores [`Bid`].\n    Bid(Box<Bid>),\n    /// Variant that stores withdraw information.\n    Withdraw(Vec<WithdrawPurse>),\n    /// Unbonding information.\n    Unbonding(Vec<UnbondingPurse>),\n    /// An `AddressableEntity`.\n    AddressableEntity(AddressableEntity),\n    /// Variant that stores [`BidKind`].\n    BidKind(BidKind),\n    /// A smart contract `Package`.\n    SmartContract(Package),\n    /// A record of byte code.\n    ByteCode(ByteCode),\n    /// Variant that stores a message topic.\n    MessageTopic(MessageTopicSummary),\n    /// Variant that stores a message digest.\n    Message(MessageChecksum),\n    /// A NamedKey record.\n    NamedKey(NamedKeyValue),\n    /// A prepayment record.\n    Prepayment(PrepaymentKind),\n    /// An entrypoint record.\n    EntryPoint(EntryPointValue),\n    /// Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of a\n    /// [`crate::CLValue`] and [`crate::CLType`].\n    RawBytes(#[cfg_attr(feature = \"json-schema\", schemars(with = \"String\"))] Vec<u8>),\n}\n\nimpl StoredValue {\n    /// Returns a reference to the wrapped `CLValue` if this is a `CLValue` variant.\n    pub fn as_cl_value(&self) -> Option<&CLValue> {\n        match self {\n            StoredValue::CLValue(cl_value) => Some(cl_value),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `Account` if this is an `Account` variant.\n    pub fn as_account(&self) -> Option<&Account> {\n        match self {\n            StoredValue::Account(account) => Some(account),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `ByteCode` if this is a `ByteCode` variant.\n    pub fn as_byte_code(&self) -> Option<&ByteCode> {\n        match self {\n            StoredValue::ByteCode(byte_code) => Some(byte_code),\n            _ => None,\n        }\n    }\n\n    pub fn as_contract_wasm(&self) -> Option<&ContractWasm> {\n        match self {\n            StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `Contract` if this is a `Contract` variant.\n    pub fn as_contract(&self) -> Option<&Contract> {\n        match self {\n            StoredValue::Contract(contract) => Some(contract),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `Package` if this is a `Package` variant.\n    pub fn as_package(&self) -> Option<&Package> {\n        match self {\n            StoredValue::SmartContract(package) => Some(package),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `ContractPackage` if this is a `ContractPackage` variant.\n    pub fn as_contract_package(&self) -> Option<&ContractPackage> {\n        match self {\n            StoredValue::ContractPackage(package) => Some(package),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `TransferV1` if this is a `Transfer` variant.\n    pub fn as_transfer(&self) -> Option<&TransferV1> {\n        match self {\n            StoredValue::Transfer(transfer_v1) => Some(transfer_v1),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `DeployInfo` if this is a `DeployInfo` variant.\n    pub fn as_deploy_info(&self) -> Option<&DeployInfo> {\n        match self {\n            StoredValue::DeployInfo(deploy_info) => Some(deploy_info),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `EraInfo` if this is an `EraInfo` variant.\n    pub fn as_era_info(&self) -> Option<&EraInfo> {\n        match self {\n            StoredValue::EraInfo(era_info) => Some(era_info),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `Bid` if this is a `Bid` variant.\n    pub fn as_bid(&self) -> Option<&Bid> {\n        match self {\n            StoredValue::Bid(bid) => Some(bid),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped list of `WithdrawPurse`s if this is a `Withdraw` variant.\n    pub fn as_withdraw(&self) -> Option<&Vec<WithdrawPurse>> {\n        match self {\n            StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped list of `UnbondingPurse`s if this is an `Unbonding`\n    /// variant.\n    pub fn as_unbonding(&self) -> Option<&Vec<UnbondingPurse>> {\n        match self {\n            StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped list of `UnbondingPurse`s if this is an `Unbonding`\n    /// variant.\n    pub fn as_unbond(&self) -> Option<&Unbond> {\n        match self {\n            StoredValue::BidKind(BidKind::Unbond(unbond)) => Some(unbond),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `AddressableEntity` if this is an `AddressableEntity`\n    /// variant.\n    pub fn as_addressable_entity(&self) -> Option<&AddressableEntity> {\n        match self {\n            StoredValue::AddressableEntity(entity) => Some(entity),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `MessageTopicSummary` if this is a `MessageTopic`\n    /// variant.\n    pub fn as_message_topic_summary(&self) -> Option<&MessageTopicSummary> {\n        match self {\n            StoredValue::MessageTopic(summary) => Some(summary),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `MessageChecksum` if this is a `Message`\n    /// variant.\n    pub fn as_message_checksum(&self) -> Option<&MessageChecksum> {\n        match self {\n            StoredValue::Message(checksum) => Some(checksum),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `BidKind` if this is a `BidKind` variant.\n    pub fn as_bid_kind(&self) -> Option<&BidKind> {\n        match self {\n            StoredValue::BidKind(bid_kind) => Some(bid_kind),\n            _ => None,\n        }\n    }\n\n    /// Returns raw bytes if this is a `RawBytes` variant.\n    pub fn as_raw_bytes(&self) -> Option<&[u8]> {\n        match self {\n            StoredValue::RawBytes(bytes) => Some(bytes),\n            _ => None,\n        }\n    }\n\n    /// Returns a reference to the wrapped `EntryPointValue` if this is a `EntryPointValue` variant.\n    pub fn as_entry_point_value(&self) -> Option<&EntryPointValue> {\n        match self {\n            StoredValue::EntryPoint(entry_point) => Some(entry_point),\n            _ => None,\n        }\n    }\n\n    /// Returns the `CLValue` if this is a `CLValue` variant.\n    pub fn into_cl_value(self) -> Option<CLValue> {\n        match self {\n            StoredValue::CLValue(cl_value) => Some(cl_value),\n            _ => None,\n        }\n    }\n\n    /// Returns the `Account` if this is an `Account` variant.\n    pub fn into_account(self) -> Option<Account> {\n        match self {\n            StoredValue::Account(account) => Some(account),\n            _ => None,\n        }\n    }\n\n    /// Returns the `ContractWasm` if this is a `ContractWasm` variant.\n    pub fn into_contract_wasm(self) -> Option<ContractWasm> {\n        match self {\n            StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm),\n            _ => None,\n        }\n    }\n\n    /// Returns the `Contract` if this is a `Contract` variant.\n    pub fn into_contract(self) -> Option<Contract> {\n        match self {\n            StoredValue::Contract(contract) => Some(contract),\n            _ => None,\n        }\n    }\n\n    /// Returns the `ContractPackage` if this is a `ContractPackage` variant.\n    pub fn into_contract_package(self) -> Option<ContractPackage> {\n        match self {\n            StoredValue::ContractPackage(contract_package) => Some(contract_package),\n            _ => None,\n        }\n    }\n\n    /// Returns the `Package` if this is a `Package` variant.\n    pub fn into_package(self) -> Option<Package> {\n        match self {\n            StoredValue::SmartContract(package) => Some(package),\n            _ => None,\n        }\n    }\n\n    /// Returns the `TransferV1` if this is a `Transfer` variant.\n    pub fn into_legacy_transfer(self) -> Option<TransferV1> {\n        match self {\n            StoredValue::Transfer(transfer_v1) => Some(transfer_v1),\n            _ => None,\n        }\n    }\n\n    /// Returns the `DeployInfo` if this is a `DeployInfo` variant.\n    pub fn into_deploy_info(self) -> Option<DeployInfo> {\n        match self {\n            StoredValue::DeployInfo(deploy_info) => Some(deploy_info),\n            _ => None,\n        }\n    }\n\n    /// Returns the `EraInfo` if this is an `EraInfo` variant.\n    pub fn into_era_info(self) -> Option<EraInfo> {\n        match self {\n            StoredValue::EraInfo(era_info) => Some(era_info),\n            _ => None,\n        }\n    }\n\n    /// Returns the `Bid` if this is a `Bid` variant.\n    pub fn into_bid(self) -> Option<Bid> {\n        match self {\n            StoredValue::Bid(bid) => Some(*bid),\n            _ => None,\n        }\n    }\n\n    /// Returns the list of `WithdrawPurse`s if this is a `Withdraw` variant.\n    pub fn into_withdraw(self) -> Option<Vec<WithdrawPurse>> {\n        match self {\n            StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses),\n            _ => None,\n        }\n    }\n\n    /// Returns the list of `UnbondingPurse`s if this is an `Unbonding` variant.\n    pub fn into_unbonding(self) -> Option<Vec<UnbondingPurse>> {\n        match self {\n            StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses),\n            _ => None,\n        }\n    }\n\n    /// Returns the `AddressableEntity` if this is an `AddressableEntity` variant.\n    pub fn into_addressable_entity(self) -> Option<AddressableEntity> {\n        match self {\n            StoredValue::AddressableEntity(entity) => Some(entity),\n            _ => None,\n        }\n    }\n\n    /// Returns the `BidKind` if this is a `BidKind` variant.\n    pub fn into_bid_kind(self) -> Option<BidKind> {\n        match self {\n            StoredValue::BidKind(bid_kind) => Some(bid_kind),\n            _ => None,\n        }\n    }\n\n    /// Returns the `EntryPointValue` if this is a `EntryPointValue` variant.\n    pub fn into_entry_point_value(self) -> Option<EntryPointValue> {\n        match self {\n            StoredValue::EntryPoint(value) => Some(value),\n            _ => None,\n        }\n    }\n\n    /// Returns the type name of the [`StoredValue`] enum variant.\n    ///\n    /// For [`CLValue`] variants it will return the name of the [`CLType`](crate::cl_type::CLType)\n    pub fn type_name(&self) -> String {\n        match self {\n            StoredValue::CLValue(cl_value) => format!(\"{:?}\", cl_value.cl_type()),\n            StoredValue::Account(_) => \"Account\".to_string(),\n            StoredValue::ContractWasm(_) => \"ContractWasm\".to_string(),\n            StoredValue::Contract(_) => \"Contract\".to_string(),\n            StoredValue::ContractPackage(_) => \"ContractPackage\".to_string(),\n            StoredValue::Transfer(_) => \"Transfer\".to_string(),\n            StoredValue::DeployInfo(_) => \"DeployInfo\".to_string(),\n            StoredValue::EraInfo(_) => \"EraInfo\".to_string(),\n            StoredValue::Bid(_) => \"Bid\".to_string(),\n            StoredValue::Withdraw(_) => \"Withdraw\".to_string(),\n            StoredValue::Unbonding(_) => \"Unbonding\".to_string(),\n            StoredValue::AddressableEntity(_) => \"AddressableEntity\".to_string(),\n            StoredValue::BidKind(_) => \"BidKind\".to_string(),\n            StoredValue::ByteCode(_) => \"ByteCode\".to_string(),\n            StoredValue::SmartContract(_) => \"SmartContract\".to_string(),\n            StoredValue::MessageTopic(_) => \"MessageTopic\".to_string(),\n            StoredValue::Message(_) => \"Message\".to_string(),\n            StoredValue::NamedKey(_) => \"NamedKey\".to_string(),\n            StoredValue::Prepayment(_) => \"Prepayment\".to_string(),\n            StoredValue::EntryPoint(_) => \"EntryPoint\".to_string(),\n            StoredValue::RawBytes(_) => \"RawBytes\".to_string(),\n        }\n    }\n\n    /// Returns the tag of the `StoredValue`.\n    pub fn tag(&self) -> StoredValueTag {\n        match self {\n            StoredValue::CLValue(_) => StoredValueTag::CLValue,\n            StoredValue::Account(_) => StoredValueTag::Account,\n            StoredValue::ContractWasm(_) => StoredValueTag::ContractWasm,\n            StoredValue::ContractPackage(_) => StoredValueTag::ContractPackage,\n            StoredValue::Contract(_) => StoredValueTag::Contract,\n            StoredValue::Transfer(_) => StoredValueTag::Transfer,\n            StoredValue::DeployInfo(_) => StoredValueTag::DeployInfo,\n            StoredValue::EraInfo(_) => StoredValueTag::EraInfo,\n            StoredValue::Bid(_) => StoredValueTag::Bid,\n            StoredValue::Withdraw(_) => StoredValueTag::Withdraw,\n            StoredValue::Unbonding(_) => StoredValueTag::Unbonding,\n            StoredValue::AddressableEntity(_) => StoredValueTag::AddressableEntity,\n            StoredValue::BidKind(_) => StoredValueTag::BidKind,\n            StoredValue::SmartContract(_) => StoredValueTag::Package,\n            StoredValue::ByteCode(_) => StoredValueTag::ByteCode,\n            StoredValue::MessageTopic(_) => StoredValueTag::MessageTopic,\n            StoredValue::Message(_) => StoredValueTag::Message,\n            StoredValue::NamedKey(_) => StoredValueTag::NamedKey,\n            StoredValue::Prepayment(_) => StoredValueTag::Prepayment,\n            StoredValue::EntryPoint(_) => StoredValueTag::EntryPoint,\n            StoredValue::RawBytes(_) => StoredValueTag::RawBytes,\n        }\n    }\n\n    /// Returns the serialized length of the `StoredValue`.\n    pub fn into_byte_code(self) -> Option<ByteCode> {\n        match self {\n            StoredValue::ByteCode(byte_code) => Some(byte_code),\n            _ => None,\n        }\n    }\n\n    /// Returns the serialized length of the `StoredValue`.\n    pub fn into_named_key(self) -> Option<NamedKeyValue> {\n        match self {\n            StoredValue::NamedKey(named_key_value) => Some(named_key_value),\n            _ => None,\n        }\n    }\n}\n\nimpl From<CLValue> for StoredValue {\n    fn from(value: CLValue) -> StoredValue {\n        StoredValue::CLValue(value)\n    }\n}\n\nimpl From<Account> for StoredValue {\n    fn from(value: Account) -> StoredValue {\n        StoredValue::Account(value)\n    }\n}\n\nimpl From<ContractWasm> for StoredValue {\n    fn from(value: ContractWasm) -> Self {\n        StoredValue::ContractWasm(value)\n    }\n}\n\nimpl From<ContractPackage> for StoredValue {\n    fn from(value: ContractPackage) -> Self {\n        StoredValue::ContractPackage(value)\n    }\n}\n\nimpl From<Contract> for StoredValue {\n    fn from(value: Contract) -> Self {\n        StoredValue::Contract(value)\n    }\n}\n\nimpl From<AddressableEntity> for StoredValue {\n    fn from(value: AddressableEntity) -> StoredValue {\n        StoredValue::AddressableEntity(value)\n    }\n}\n\nimpl From<Package> for StoredValue {\n    fn from(value: Package) -> StoredValue {\n        StoredValue::SmartContract(value)\n    }\n}\n\nimpl From<Bid> for StoredValue {\n    fn from(bid: Bid) -> StoredValue {\n        StoredValue::Bid(Box::new(bid))\n    }\n}\n\nimpl From<BidKind> for StoredValue {\n    fn from(bid_kind: BidKind) -> StoredValue {\n        StoredValue::BidKind(bid_kind)\n    }\n}\n\nimpl From<ByteCode> for StoredValue {\n    fn from(value: ByteCode) -> StoredValue {\n        StoredValue::ByteCode(value)\n    }\n}\n\nimpl From<EntryPointValue> for StoredValue {\n    fn from(value: EntryPointValue) -> Self {\n        StoredValue::EntryPoint(value)\n    }\n}\n\nimpl TryFrom<StoredValue> for CLValue {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        let type_name = stored_value.type_name();\n        match stored_value {\n            StoredValue::CLValue(cl_value) => Ok(cl_value),\n            StoredValue::BidKind(bid_kind) => Ok(CLValue::from_t(bid_kind)\n                .map_err(|_| TypeMismatch::new(\"BidKind\".to_string(), type_name))?),\n            StoredValue::ContractPackage(contract_package) => Ok(CLValue::from_t(contract_package)\n                .map_err(|_error| TypeMismatch::new(\"ContractPackage\".to_string(), type_name))?),\n            _ => Err(TypeMismatch::new(\"StoredValue\".to_string(), type_name)),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for Account {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        match stored_value {\n            StoredValue::Account(account) => Ok(account),\n            _ => Err(TypeMismatch::new(\n                \"Account\".to_string(),\n                stored_value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for ContractWasm {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        match stored_value {\n            StoredValue::ContractWasm(contract_wasm) => Ok(contract_wasm),\n            _ => Err(TypeMismatch::new(\n                \"ContractWasm\".to_string(),\n                stored_value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for ByteCode {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        match stored_value {\n            StoredValue::ByteCode(byte_code) => Ok(byte_code),\n            _ => Err(TypeMismatch::new(\n                \"ByteCode\".to_string(),\n                stored_value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for ContractPackage {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::ContractPackage(contract_package) => Ok(contract_package),\n            _ => Err(TypeMismatch::new(\n                \"ContractPackage\".to_string(),\n                value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for Contract {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        match stored_value {\n            StoredValue::Contract(contract) => Ok(contract),\n            _ => Err(TypeMismatch::new(\n                \"Contract\".to_string(),\n                stored_value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for Package {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        match stored_value {\n            StoredValue::SmartContract(contract_package) => Ok(contract_package),\n            StoredValue::ContractPackage(contract_package) => Ok(contract_package.into()),\n            _ => Err(TypeMismatch::new(\n                \"ContractPackage\".to_string(),\n                stored_value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for AddressableEntity {\n    type Error = TypeMismatch;\n\n    fn try_from(stored_value: StoredValue) -> Result<Self, Self::Error> {\n        match stored_value {\n            StoredValue::AddressableEntity(contract) => Ok(contract),\n            _ => Err(TypeMismatch::new(\n                \"AddressableEntity\".to_string(),\n                stored_value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for TransferV1 {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::Transfer(transfer_v1) => Ok(transfer_v1),\n            _ => Err(TypeMismatch::new(\"Transfer\".to_string(), value.type_name())),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for DeployInfo {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::DeployInfo(deploy_info) => Ok(deploy_info),\n            _ => Err(TypeMismatch::new(\n                \"DeployInfo\".to_string(),\n                value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for EraInfo {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::EraInfo(era_info) => Ok(era_info),\n            _ => Err(TypeMismatch::new(\"EraInfo\".to_string(), value.type_name())),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for Bid {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::Bid(bid) => Ok(*bid),\n            _ => Err(TypeMismatch::new(\"Bid\".to_string(), value.type_name())),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for BidKind {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::BidKind(bid_kind) => Ok(bid_kind),\n            _ => Err(TypeMismatch::new(\"BidKind\".to_string(), value.type_name())),\n        }\n    }\n}\n\nimpl TryFrom<StoredValue> for NamedKeyValue {\n    type Error = TypeMismatch;\n\n    fn try_from(value: StoredValue) -> Result<Self, Self::Error> {\n        match value {\n            StoredValue::NamedKey(named_key_value) => Ok(named_key_value),\n            _ => Err(TypeMismatch::new(\n                \"NamedKeyValue\".to_string(),\n                value.type_name(),\n            )),\n        }\n    }\n}\n\nimpl ToBytes for StoredValue {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                StoredValue::CLValue(cl_value) => cl_value.serialized_length(),\n                StoredValue::Account(account) => account.serialized_length(),\n                StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(),\n                StoredValue::Contract(contract_header) => contract_header.serialized_length(),\n                StoredValue::ContractPackage(contract_package) => {\n                    contract_package.serialized_length()\n                }\n                StoredValue::Transfer(transfer_v1) => transfer_v1.serialized_length(),\n                StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(),\n                StoredValue::EraInfo(era_info) => era_info.serialized_length(),\n                StoredValue::Bid(bid) => bid.serialized_length(),\n                StoredValue::Withdraw(withdraw_purses) => withdraw_purses.serialized_length(),\n                StoredValue::Unbonding(unbonding_purses) => unbonding_purses.serialized_length(),\n                StoredValue::AddressableEntity(entity) => entity.serialized_length(),\n                StoredValue::BidKind(bid_kind) => bid_kind.serialized_length(),\n                StoredValue::SmartContract(package) => package.serialized_length(),\n                StoredValue::ByteCode(byte_code) => byte_code.serialized_length(),\n                StoredValue::MessageTopic(message_topic_summary) => {\n                    message_topic_summary.serialized_length()\n                }\n                StoredValue::Message(message_digest) => message_digest.serialized_length(),\n                StoredValue::NamedKey(named_key_value) => named_key_value.serialized_length(),\n                StoredValue::Prepayment(prepayment_kind) => prepayment_kind.serialized_length(),\n                StoredValue::EntryPoint(entry_point_value) => entry_point_value.serialized_length(),\n                StoredValue::RawBytes(bytes) => bytes.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        writer.push(self.tag() as u8);\n        match self {\n            StoredValue::CLValue(cl_value) => cl_value.write_bytes(writer),\n            StoredValue::Account(account) => account.write_bytes(writer),\n            StoredValue::ContractWasm(contract_wasm) => contract_wasm.write_bytes(writer),\n            StoredValue::Contract(contract_header) => contract_header.write_bytes(writer),\n            StoredValue::ContractPackage(contract_package) => contract_package.write_bytes(writer),\n            StoredValue::Transfer(transfer_v1) => transfer_v1.write_bytes(writer),\n            StoredValue::DeployInfo(deploy_info) => deploy_info.write_bytes(writer),\n            StoredValue::EraInfo(era_info) => era_info.write_bytes(writer),\n            StoredValue::Bid(bid) => bid.write_bytes(writer),\n            StoredValue::Withdraw(unbonding_purses) => unbonding_purses.write_bytes(writer),\n            StoredValue::Unbonding(unbonding_purses) => unbonding_purses.write_bytes(writer),\n            StoredValue::AddressableEntity(entity) => entity.write_bytes(writer),\n            StoredValue::BidKind(bid_kind) => bid_kind.write_bytes(writer),\n            StoredValue::SmartContract(package) => package.write_bytes(writer),\n            StoredValue::ByteCode(byte_code) => byte_code.write_bytes(writer),\n            StoredValue::MessageTopic(message_topic_summary) => {\n                message_topic_summary.write_bytes(writer)\n            }\n            StoredValue::Message(message_digest) => message_digest.write_bytes(writer),\n            StoredValue::NamedKey(named_key_value) => named_key_value.write_bytes(writer),\n            StoredValue::Prepayment(prepayment_kind) => prepayment_kind.write_bytes(writer),\n            StoredValue::EntryPoint(entry_point_value) => entry_point_value.write_bytes(writer),\n            StoredValue::RawBytes(bytes) => bytes.write_bytes(writer),\n        }\n    }\n}\n\nimpl FromBytes for StoredValue {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            tag if tag == StoredValueTag::CLValue as u8 => CLValue::from_bytes(remainder)\n                .map(|(cl_value, remainder)| (StoredValue::CLValue(cl_value), remainder)),\n            tag if tag == StoredValueTag::Account as u8 => Account::from_bytes(remainder)\n                .map(|(account, remainder)| (StoredValue::Account(account), remainder)),\n            tag if tag == StoredValueTag::ContractWasm as u8 => ContractWasm::from_bytes(remainder)\n                .map(|(contract_wasm, remainder)| {\n                    (StoredValue::ContractWasm(contract_wasm), remainder)\n                }),\n            tag if tag == StoredValueTag::ContractPackage as u8 => {\n                ContractPackage::from_bytes(remainder).map(|(contract_package, remainder)| {\n                    (StoredValue::ContractPackage(contract_package), remainder)\n                })\n            }\n            tag if tag == StoredValueTag::Contract as u8 => Contract::from_bytes(remainder)\n                .map(|(contract, remainder)| (StoredValue::Contract(contract), remainder)),\n            tag if tag == StoredValueTag::Transfer as u8 => TransferV1::from_bytes(remainder)\n                .map(|(transfer_v1, remainder)| (StoredValue::Transfer(transfer_v1), remainder)),\n            tag if tag == StoredValueTag::DeployInfo as u8 => DeployInfo::from_bytes(remainder)\n                .map(|(deploy_info, remainder)| (StoredValue::DeployInfo(deploy_info), remainder)),\n            tag if tag == StoredValueTag::EraInfo as u8 => EraInfo::from_bytes(remainder)\n                .map(|(deploy_info, remainder)| (StoredValue::EraInfo(deploy_info), remainder)),\n            tag if tag == StoredValueTag::Bid as u8 => Bid::from_bytes(remainder)\n                .map(|(bid, remainder)| (StoredValue::Bid(Box::new(bid)), remainder)),\n            tag if tag == StoredValueTag::BidKind as u8 => BidKind::from_bytes(remainder)\n                .map(|(bid_kind, remainder)| (StoredValue::BidKind(bid_kind), remainder)),\n            tag if tag == StoredValueTag::Withdraw as u8 => {\n                Vec::<WithdrawPurse>::from_bytes(remainder).map(|(withdraw_purses, remainder)| {\n                    (StoredValue::Withdraw(withdraw_purses), remainder)\n                })\n            }\n            tag if tag == StoredValueTag::Unbonding as u8 => {\n                Vec::<UnbondingPurse>::from_bytes(remainder).map(|(unbonding_purses, remainder)| {\n                    (StoredValue::Unbonding(unbonding_purses), remainder)\n                })\n            }\n            tag if tag == StoredValueTag::AddressableEntity as u8 => {\n                AddressableEntity::from_bytes(remainder)\n                    .map(|(entity, remainder)| (StoredValue::AddressableEntity(entity), remainder))\n            }\n            tag if tag == StoredValueTag::Package as u8 => Package::from_bytes(remainder)\n                .map(|(package, remainder)| (StoredValue::SmartContract(package), remainder)),\n            tag if tag == StoredValueTag::ByteCode as u8 => ByteCode::from_bytes(remainder)\n                .map(|(byte_code, remainder)| (StoredValue::ByteCode(byte_code), remainder)),\n            tag if tag == StoredValueTag::MessageTopic as u8 => {\n                MessageTopicSummary::from_bytes(remainder).map(|(message_summary, remainder)| {\n                    (StoredValue::MessageTopic(message_summary), remainder)\n                })\n            }\n            tag if tag == StoredValueTag::Message as u8 => MessageChecksum::from_bytes(remainder)\n                .map(|(checksum, remainder)| (StoredValue::Message(checksum), remainder)),\n            tag if tag == StoredValueTag::NamedKey as u8 => NamedKeyValue::from_bytes(remainder)\n                .map(|(named_key_value, remainder)| {\n                    (StoredValue::NamedKey(named_key_value), remainder)\n                }),\n            tag if tag == StoredValueTag::EntryPoint as u8 => {\n                EntryPointValue::from_bytes(remainder).map(|(entry_point, remainder)| {\n                    (StoredValue::EntryPoint(entry_point), remainder)\n                })\n            }\n            tag if tag == StoredValueTag::RawBytes as u8 => {\n                let (bytes, remainder) = Bytes::from_bytes(remainder)?;\n                Ok((StoredValue::RawBytes(bytes.into()), remainder))\n            }\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\npub mod serde_helpers {\n    use core::fmt::Display;\n\n    use crate::serde_helpers::contract::HumanReadableContract;\n\n    use super::*;\n\n    #[derive(Serialize)]\n    #[cfg_attr(\n        feature = \"json-schema\",\n        derive(JsonSchema),\n        schemars(\n            rename = \"StoredValue\",\n            description = \"A value stored in Global State.\"\n        )\n    )]\n    pub(crate) enum HumanReadableSerHelper<'a> {\n        CLValue(&'a CLValue),\n        Account(&'a Account),\n        ContractWasm(&'a ContractWasm),\n        Contract(HumanReadableContract),\n        ContractPackage(&'a ContractPackage),\n        Transfer(&'a TransferV1),\n        DeployInfo(&'a DeployInfo),\n        EraInfo(&'a EraInfo),\n        Bid(&'a Bid),\n        Withdraw(&'a Vec<WithdrawPurse>),\n        Unbonding(&'a Vec<UnbondingPurse>),\n        AddressableEntity(&'a AddressableEntity),\n        BidKind(&'a BidKind),\n        SmartContract(&'a Package),\n        ByteCode(&'a ByteCode),\n        MessageTopic(&'a MessageTopicSummary),\n        Message(&'a MessageChecksum),\n        NamedKey(&'a NamedKeyValue),\n        Prepayment(&'a PrepaymentKind),\n        EntryPoint(&'a EntryPointValue),\n        RawBytes(Bytes),\n    }\n\n    /// A value stored in Global State.\n    #[derive(Deserialize)]\n    #[cfg_attr(\n        feature = \"json-schema\",\n        derive(JsonSchema),\n        schemars(\n            rename = \"StoredValue\",\n            description = \"A value stored in Global State.\"\n        )\n    )]\n    pub(crate) enum HumanReadableDeserHelper {\n        /// A CLValue.\n        CLValue(CLValue),\n        /// An account.\n        Account(Account),\n        /// Contract wasm.\n        ContractWasm(ContractWasm),\n        /// A contract.\n        Contract(HumanReadableContract),\n        /// A contract package.\n        ContractPackage(ContractPackage),\n        /// A version 1 transfer.\n        Transfer(TransferV1),\n        /// Info about a deploy.\n        DeployInfo(DeployInfo),\n        /// Info about an era.\n        EraInfo(EraInfo),\n        /// Variant that stores [`Bid`].\n        Bid(Box<Bid>),\n        /// Variant that stores withdraw information.\n        Withdraw(Vec<WithdrawPurse>),\n        /// Unbonding information.\n        Unbonding(Vec<UnbondingPurse>),\n        /// An `AddressableEntity`.\n        AddressableEntity(AddressableEntity),\n        /// Variant that stores [`BidKind`].\n        BidKind(BidKind),\n        /// A smart contract `Package`.\n        SmartContract(Package),\n        /// A record of byte code.\n        ByteCode(ByteCode),\n        /// Variant that stores a message topic.\n        MessageTopic(MessageTopicSummary),\n        /// Variant that stores a message digest.\n        Message(MessageChecksum),\n        /// A NamedKey record.\n        NamedKey(NamedKeyValue),\n        /// A prepayment record.\n        EntryPoint(EntryPointValue),\n        /// An entrypoint record.\n        Prepayment(PrepaymentKind),\n        /// Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of\n        /// a [`crate::CLValue`] and [`crate::CLType`].\n        RawBytes(Bytes),\n    }\n\n    impl<'a> From<&'a StoredValue> for HumanReadableSerHelper<'a> {\n        fn from(stored_value: &'a StoredValue) -> Self {\n            match stored_value {\n                StoredValue::CLValue(payload) => HumanReadableSerHelper::CLValue(payload),\n                StoredValue::Account(payload) => HumanReadableSerHelper::Account(payload),\n                StoredValue::ContractWasm(payload) => HumanReadableSerHelper::ContractWasm(payload),\n                StoredValue::Contract(payload) => HumanReadableSerHelper::Contract(payload.into()),\n                StoredValue::ContractPackage(payload) => {\n                    HumanReadableSerHelper::ContractPackage(payload)\n                }\n                StoredValue::Transfer(payload) => HumanReadableSerHelper::Transfer(payload),\n                StoredValue::DeployInfo(payload) => HumanReadableSerHelper::DeployInfo(payload),\n                StoredValue::EraInfo(payload) => HumanReadableSerHelper::EraInfo(payload),\n                StoredValue::Bid(payload) => HumanReadableSerHelper::Bid(payload),\n                StoredValue::Withdraw(payload) => HumanReadableSerHelper::Withdraw(payload),\n                StoredValue::Unbonding(payload) => HumanReadableSerHelper::Unbonding(payload),\n                StoredValue::AddressableEntity(payload) => {\n                    HumanReadableSerHelper::AddressableEntity(payload)\n                }\n                StoredValue::BidKind(payload) => HumanReadableSerHelper::BidKind(payload),\n                StoredValue::SmartContract(payload) => {\n                    HumanReadableSerHelper::SmartContract(payload)\n                }\n                StoredValue::ByteCode(payload) => HumanReadableSerHelper::ByteCode(payload),\n                StoredValue::MessageTopic(message_topic_summary) => {\n                    HumanReadableSerHelper::MessageTopic(message_topic_summary)\n                }\n                StoredValue::Message(message_digest) => {\n                    HumanReadableSerHelper::Message(message_digest)\n                }\n                StoredValue::NamedKey(payload) => HumanReadableSerHelper::NamedKey(payload),\n                StoredValue::Prepayment(payload) => HumanReadableSerHelper::Prepayment(payload),\n                StoredValue::EntryPoint(payload) => HumanReadableSerHelper::EntryPoint(payload),\n                StoredValue::RawBytes(bytes) => {\n                    HumanReadableSerHelper::RawBytes(bytes.as_slice().into())\n                }\n            }\n        }\n    }\n\n    /// Parsing error when deserializing StoredValue.\n    #[derive(Debug, Clone)]\n    pub enum StoredValueDeserializationError {\n        /// Contract not deserializable.\n        CouldNotDeserializeContract(String),\n    }\n\n    impl Display for StoredValueDeserializationError {\n        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n            match self {\n                StoredValueDeserializationError::CouldNotDeserializeContract(reason) => {\n                    write!(\n                        f,\n                        \"Could not deserialize StoredValue::Contract. Reason: {reason}\"\n                    )\n                }\n            }\n        }\n    }\n\n    impl TryFrom<HumanReadableDeserHelper> for StoredValue {\n        type Error = StoredValueDeserializationError;\n        fn try_from(helper: HumanReadableDeserHelper) -> Result<Self, Self::Error> {\n            Ok(match helper {\n                HumanReadableDeserHelper::CLValue(payload) => StoredValue::CLValue(payload),\n                HumanReadableDeserHelper::Account(payload) => StoredValue::Account(payload),\n                HumanReadableDeserHelper::ContractWasm(payload) => {\n                    StoredValue::ContractWasm(payload)\n                }\n                HumanReadableDeserHelper::Contract(payload) => {\n                    StoredValue::Contract(Contract::try_from(payload).map_err(|e| {\n                        StoredValueDeserializationError::CouldNotDeserializeContract(e.to_string())\n                    })?)\n                }\n                HumanReadableDeserHelper::ContractPackage(payload) => {\n                    StoredValue::ContractPackage(payload)\n                }\n                HumanReadableDeserHelper::Transfer(payload) => StoredValue::Transfer(payload),\n                HumanReadableDeserHelper::DeployInfo(payload) => StoredValue::DeployInfo(payload),\n                HumanReadableDeserHelper::EraInfo(payload) => StoredValue::EraInfo(payload),\n                HumanReadableDeserHelper::Bid(bid) => StoredValue::Bid(bid),\n                HumanReadableDeserHelper::Withdraw(payload) => StoredValue::Withdraw(payload),\n                HumanReadableDeserHelper::Unbonding(payload) => StoredValue::Unbonding(payload),\n                HumanReadableDeserHelper::AddressableEntity(payload) => {\n                    StoredValue::AddressableEntity(payload)\n                }\n                HumanReadableDeserHelper::BidKind(payload) => StoredValue::BidKind(payload),\n                HumanReadableDeserHelper::ByteCode(payload) => StoredValue::ByteCode(payload),\n                HumanReadableDeserHelper::SmartContract(payload) => {\n                    StoredValue::SmartContract(payload)\n                }\n                HumanReadableDeserHelper::MessageTopic(message_topic_summary) => {\n                    StoredValue::MessageTopic(message_topic_summary)\n                }\n                HumanReadableDeserHelper::Message(message_digest) => {\n                    StoredValue::Message(message_digest)\n                }\n                HumanReadableDeserHelper::NamedKey(payload) => StoredValue::NamedKey(payload),\n                HumanReadableDeserHelper::EntryPoint(payload) => StoredValue::EntryPoint(payload),\n                HumanReadableDeserHelper::RawBytes(bytes) => StoredValue::RawBytes(bytes.into()),\n                HumanReadableDeserHelper::Prepayment(prepayment_kind) => {\n                    StoredValue::Prepayment(prepayment_kind)\n                }\n            })\n        }\n    }\n}\n\nimpl Serialize for StoredValue {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            serde_helpers::HumanReadableSerHelper::from(self).serialize(serializer)\n        } else {\n            let bytes = self\n                .to_bytes()\n                .map_err(|error| ser::Error::custom(format!(\"{:?}\", error)))?;\n            ByteBuf::from(bytes).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for StoredValue {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let json_helper = serde_helpers::HumanReadableDeserHelper::deserialize(deserializer)?;\n            StoredValue::try_from(json_helper).map_err(de::Error::custom)\n        } else {\n            let bytes = ByteBuf::deserialize(deserializer)?.into_vec();\n            bytesrepr::deserialize::<StoredValue>(bytes)\n                .map_err(|error| de::Error::custom(format!(\"{:?}\", error)))\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, gens, StoredValue};\n    use proptest::proptest;\n    use serde_json::Value;\n\n    const STORED_VALUE_CONTRACT_RAW: &str = r#\"{\n    \"Contract\": {\n            \"contract_package_hash\": \"contract-package-e26c7f95890f99b4d476609649939910e636e175c428add9b403ebe597673005\",\n            \"contract_wasm_hash\": \"contract-wasm-8447a228c6055df42fcedb18804786abcab0e7aed00e94ad0fc0a34cd09509fb\",\n            \"named_keys\": [\n                {\n                    \"name\": \"count_v2.0\",\n                    \"key\": \"uref-53834a8313fa5eda357a75ef8eb017e1ed30bc64e6dbaa81a41abd0ffd761586-007\"\n                }\n            ],\n            \"entry_points\": [\n                {\n                    \"name\": \"counter_get\",\n                    \"args\": [],\n                    \"ret\": \"I32\",\n                    \"access\": \"Public\",\n                    \"entry_point_type\": \"Caller\"\n                },\n                {\n                    \"name\": \"counter_inc\",\n                    \"args\": [],\n                    \"ret\": \"Unit\",\n                    \"access\": \"Public\",\n                    \"entry_point_type\": \"Called\"\n                },\n                {\n                    \"name\": \"counter_zero\",\n                    \"args\": [],\n                    \"ret\": \"Unit\",\n                    \"access\": \"Public\",\n                    \"entry_point_type\": \"Factory\"\n                }\n                \n            ],\n            \"protocol_version\": \"2.0.0\"\n        }\n}\n    \"#;\n\n    const JSON_CONTRACT_NON_UNIQUE_ENTRYPOINT_NAMES_RAW: &str = r#\"{\n        \"Contract\": {\n                \"contract_package_hash\": \"contract-package-e26c7f95890f99b4d476609649939910e636e175c428add9b403ebe597673005\",\n                \"contract_wasm_hash\": \"contract-wasm-8447a228c6055df42fcedb18804786abcab0e7aed00e94ad0fc0a34cd09509fb\",\n                \"named_keys\": [\n                    {\n                        \"name\": \"count_v2.0\",\n                        \"key\": \"uref-53834a8313fa5eda357a75ef8eb017e1ed30bc64e6dbaa81a41abd0ffd761586-007\"\n                    }\n                ],\n                \"entry_points\": [\n                    {\n                        \"name\": \"counter_get\",\n                        \"args\": [],\n                        \"ret\": \"I32\",\n                        \"access\": \"Public\",\n                        \"entry_point_type\": \"Caller\"\n                    },\n                    {\n                        \"name\": \"counter_get\",\n                        \"args\": [],\n                        \"ret\": \"Unit\",\n                        \"access\": \"Public\",\n                        \"entry_point_type\": \"Called\"\n                    },\n                    {\n                        \"name\": \"counter_inc\",\n                        \"args\": [],\n                        \"ret\": \"Unit\",\n                        \"access\": \"Public\",\n                        \"entry_point_type\": \"Factory\"\n                    }\n                ],\n                \"protocol_version\": \"2.0.0\"\n            }\n    }\n        \"#;\n\n    const STORED_VALUE_CONTRACT_PACKAGE_RAW: &str = r#\"\n    {\n        \"ContractPackage\": {\n          \"access_key\": \"uref-024d69e50a458f337817d3d11ba95bdbdd6258ba8f2dc980644c9efdbd64945d-007\",\n          \"versions\": [\n            {\n              \"protocol_version_major\": 1,\n              \"contract_version\": 1,\n              \"contract_hash\": \"contract-1b301b49505ec5eaec1787686c54818bd60836b9301cce3f5c0237560e5a4bfd\"\n            }\n          ],\n          \"disabled_versions\": [],\n          \"groups\": [],\n          \"lock_status\": \"Unlocked\"\n        }\n    }\"#;\n\n    const INCORRECT_STORED_VALUE_CONTRACT_PACKAGE_RAW: &str = r#\"\n    {\n        \"ContractPackage\": {\n          \"access_key\": \"uref-024d69e50a458f337817d3d11ba95bdbdd6258ba8f2dc980644c9efdbd64945d-007\",\n          \"versions\": [\n            {\n              \"protocol_version_major\": 1,\n              \"contract_version\": 1,\n              \"contract_hash\": \"contract-1b301b49505ec5eaec1787686c54818bd60836b9301cce3f5c0237560e5a4bfd\"\n            },\n            {\n              \"protocol_version_major\": 1,\n              \"contract_version\": 1,\n              \"contract_hash\": \"contract-1b301b49505ec5eaec1787686c54818bd60836b9301cce3f5c0237560e5a4bfe\"\n            }\n          ],\n          \"disabled_versions\": [],\n          \"groups\": [],\n          \"lock_status\": \"Unlocked\"\n        }\n    }\n    \"#;\n\n    #[test]\n    fn cannot_deserialize_contract_with_non_unique_entry_point_names() {\n        let res =\n            serde_json::from_str::<StoredValue>(JSON_CONTRACT_NON_UNIQUE_ENTRYPOINT_NAMES_RAW);\n        assert!(res.is_err());\n        assert_eq!(\n            res.err().unwrap().to_string(),\n            \"Could not deserialize StoredValue::Contract. Reason: Non unique `entry_points.name`\"\n        )\n    }\n\n    #[test]\n    fn contract_stored_value_serializes_entry_points_to_flat_array() {\n        let value_from_raw_json = serde_json::from_str::<Value>(STORED_VALUE_CONTRACT_RAW).unwrap();\n        let deserialized = serde_json::from_str::<StoredValue>(STORED_VALUE_CONTRACT_RAW).unwrap();\n        let roundtrip_value = serde_json::to_value(&deserialized).unwrap();\n        assert_eq!(value_from_raw_json, roundtrip_value);\n    }\n\n    #[test]\n    fn contract_package_stored_value_serializes_versions_to_flat_array() {\n        let value_from_raw_json =\n            serde_json::from_str::<Value>(STORED_VALUE_CONTRACT_PACKAGE_RAW).unwrap();\n        let deserialized =\n            serde_json::from_str::<StoredValue>(STORED_VALUE_CONTRACT_PACKAGE_RAW).unwrap();\n        let roundtrip_value = serde_json::to_value(&deserialized).unwrap();\n        assert_eq!(value_from_raw_json, roundtrip_value);\n    }\n\n    #[test]\n    fn contract_package_stored_value_should_fail_on_duplicate_keys() {\n        let deserialization_res =\n            serde_json::from_str::<StoredValue>(INCORRECT_STORED_VALUE_CONTRACT_PACKAGE_RAW);\n        assert!(deserialization_res.is_err());\n        assert!(deserialization_res\n            .unwrap_err()\n            .to_string()\n            .contains(\"duplicate contract version: ContractVersionKey(1, 1)\"));\n    }\n\n    #[test]\n    fn json_serialization_of_raw_bytes() {\n        let stored_value = StoredValue::RawBytes(vec![1, 2, 3, 4]);\n        assert_eq!(\n            serde_json::to_string(&stored_value).unwrap(),\n            r#\"{\"RawBytes\":\"01020304\"}\"#\n        );\n    }\n\n    proptest! {\n\n        #[test]\n        fn json_serialization_roundtrip(v in gens::stored_value_arb()) {\n            let json_str = serde_json::to_string(&v).unwrap();\n            let deserialized = serde_json::from_str::<StoredValue>(&json_str).unwrap();\n            assert_eq!(v, deserialized);\n        }\n\n        #[test]\n        fn serialization_roundtrip(v in gens::stored_value_arb()) {\n            bytesrepr::test_serialization_roundtrip(&v);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/bid/vesting.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n    U512,\n};\n\nconst DAY_MILLIS: usize = 24 * 60 * 60 * 1000;\nconst DAYS_IN_WEEK: usize = 7;\nconst WEEK_MILLIS: usize = DAYS_IN_WEEK * DAY_MILLIS;\n\n/// Length of total vesting schedule in days.\nconst VESTING_SCHEDULE_LENGTH_DAYS: usize = 91;\n/// Length of total vesting schedule expressed in days.\npub const VESTING_SCHEDULE_LENGTH_MILLIS: u64 =\n    VESTING_SCHEDULE_LENGTH_DAYS as u64 * DAY_MILLIS as u64;\n/// 91 days / 7 days in a week = 13 weeks\nconst LOCKED_AMOUNTS_MAX_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1;\n\n#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct VestingSchedule {\n    initial_release_timestamp_millis: u64,\n    locked_amounts: Option<[U512; LOCKED_AMOUNTS_MAX_LENGTH]>,\n}\n\nfn vesting_schedule_period_to_weeks(vesting_schedule_period_millis: u64) -> usize {\n    debug_assert_ne!(DAY_MILLIS, 0);\n    debug_assert_ne!(DAYS_IN_WEEK, 0);\n    vesting_schedule_period_millis as usize / DAY_MILLIS / DAYS_IN_WEEK\n}\n\nimpl VestingSchedule {\n    pub fn new(initial_release_timestamp_millis: u64) -> Self {\n        let locked_amounts = None;\n        VestingSchedule {\n            initial_release_timestamp_millis,\n            locked_amounts,\n        }\n    }\n\n    /// Initializes vesting schedule with a configured amount of weekly releases.\n    ///\n    /// Returns `false` if already initialized.\n    ///\n    /// # Panics\n    ///\n    /// Panics if `vesting_schedule_period_millis` represents more than 13 weeks.\n    pub fn initialize_with_schedule(\n        &mut self,\n        staked_amount: U512,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        if self.locked_amounts.is_some() {\n            return false;\n        }\n\n        let locked_amounts_length =\n            vesting_schedule_period_to_weeks(vesting_schedule_period_millis);\n\n        assert!(\n            locked_amounts_length < LOCKED_AMOUNTS_MAX_LENGTH,\n            \"vesting schedule period must be less than {} weeks\",\n            LOCKED_AMOUNTS_MAX_LENGTH,\n        );\n\n        if locked_amounts_length == 0 || vesting_schedule_period_millis == 0 {\n            // Zero weeks means instant unlock of staked amount.\n            self.locked_amounts = Some(Default::default());\n            return true;\n        }\n\n        let release_period: U512 = U512::from(locked_amounts_length + 1);\n        let weekly_release = staked_amount / release_period;\n\n        let mut locked_amounts = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH];\n        let mut remaining_locked = staked_amount;\n\n        for locked_amount in locked_amounts.iter_mut().take(locked_amounts_length) {\n            remaining_locked -= weekly_release;\n            *locked_amount = remaining_locked;\n        }\n\n        assert_eq!(\n            locked_amounts.get(locked_amounts_length),\n            Some(&U512::zero()),\n            \"first element after the schedule should be zero\"\n        );\n\n        self.locked_amounts = Some(locked_amounts);\n        true\n    }\n\n    /// Initializes weekly release for a fixed amount of 14 weeks period.\n    ///\n    /// Returns `false` if already initialized.\n    pub fn initialize(&mut self, staked_amount: U512) -> bool {\n        self.initialize_with_schedule(staked_amount, VESTING_SCHEDULE_LENGTH_MILLIS)\n    }\n\n    pub fn initial_release_timestamp_millis(&self) -> u64 {\n        self.initial_release_timestamp_millis\n    }\n\n    pub fn locked_amounts(&self) -> Option<&[U512]> {\n        let locked_amounts = self.locked_amounts.as_ref()?;\n        Some(locked_amounts.as_slice())\n    }\n\n    pub fn locked_amount(&self, timestamp_millis: u64) -> Option<U512> {\n        let locked_amounts = self.locked_amounts()?;\n\n        let index = {\n            let index_timestamp =\n                timestamp_millis.checked_sub(self.initial_release_timestamp_millis)?;\n            (index_timestamp as usize).checked_div(WEEK_MILLIS)?\n        };\n\n        let locked_amount = locked_amounts.get(index).cloned().unwrap_or_default();\n\n        Some(locked_amount)\n    }\n\n    /// Checks if this vesting schedule is still under the vesting\n    pub(crate) fn is_vesting(\n        &self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        let vested_period = match self.locked_amounts() {\n            Some(locked_amounts) => {\n                let vesting_weeks = locked_amounts\n                    .iter()\n                    .position(|amount| amount.is_zero())\n                    .expect(\"vesting schedule should always have zero at the end\"); // SAFETY: at least one zero is guaranteed by `initialize_with_schedule` method\n\n                let vesting_weeks_millis =\n                    (vesting_weeks as u64).saturating_mul(WEEK_MILLIS as u64);\n\n                self.initial_release_timestamp_millis()\n                    .saturating_add(vesting_weeks_millis)\n            }\n            None => {\n                // Uninitialized yet but we know this will be the configured period of time.\n                self.initial_release_timestamp_millis()\n                    .saturating_add(vesting_schedule_period_millis)\n            }\n        };\n        timestamp_millis < vested_period\n    }\n}\n\nimpl ToBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.iter().map(ToBytes::serialized_length).sum::<usize>()\n    }\n\n    #[inline]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        for amount in self {\n            amount.write_bytes(writer)?;\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] {\n    fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let mut result = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH];\n        for value in &mut result {\n            let (amount, rem) = FromBytes::from_bytes(bytes)?;\n            *value = amount;\n            bytes = rem;\n        }\n        Ok((result, bytes))\n    }\n}\n\nimpl ToBytes for VestingSchedule {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.append(&mut self.initial_release_timestamp_millis.to_bytes()?);\n        result.append(&mut self.locked_amounts.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.initial_release_timestamp_millis.serialized_length()\n            + self.locked_amounts.serialized_length()\n    }\n}\n\nimpl FromBytes for VestingSchedule {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (initial_release_timestamp_millis, bytes) = FromBytes::from_bytes(bytes)?;\n        let (locked_amounts, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            VestingSchedule {\n                initial_release_timestamp_millis,\n                locked_amounts,\n            },\n            bytes,\n        ))\n    }\n}\n\n/// Generators for [`VestingSchedule`]\n#[cfg(test)]\nmod gens {\n    use proptest::{\n        array, option,\n        prelude::{Arbitrary, Strategy},\n    };\n\n    use super::VestingSchedule;\n    use crate::gens::u512_arb;\n\n    pub fn vesting_schedule_arb() -> impl Strategy<Value = VestingSchedule> {\n        (<u64>::arbitrary(), option::of(array::uniform14(u512_arb()))).prop_map(\n            |(initial_release_timestamp_millis, locked_amounts)| VestingSchedule {\n                initial_release_timestamp_millis,\n                locked_amounts,\n            },\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::{prop_assert, proptest};\n\n    use crate::{\n        bytesrepr,\n        gens::u512_arb,\n        system::auction::bid::{\n            vesting::{gens::vesting_schedule_arb, vesting_schedule_period_to_weeks, WEEK_MILLIS},\n            VestingSchedule,\n        },\n        U512,\n    };\n\n    use super::*;\n\n    /// Default lock-in period of 90 days\n    const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS as u64;\n    const RELEASE_TIMESTAMP: u64 = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n    const STAKE: u64 = 140;\n\n    const DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS as u64;\n    const LOCKED_AMOUNTS_LENGTH: usize =\n        (DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS as usize / WEEK_MILLIS) + 1;\n\n    #[test]\n    #[should_panic = \"vesting schedule period must be less than\"]\n    fn test_vesting_schedule_exceeding_the_maximum_should_not_panic() {\n        let future_date = 98 * DAY_MILLIS as u64;\n        let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP);\n        vesting_schedule.initialize_with_schedule(U512::from(STAKE), future_date);\n\n        assert_eq!(vesting_schedule.locked_amount(0), None);\n        assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None);\n    }\n\n    #[test]\n    fn test_locked_amount_check_should_not_panic() {\n        let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP);\n        vesting_schedule.initialize(U512::from(STAKE));\n\n        assert_eq!(vesting_schedule.locked_amount(0), None);\n        assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None);\n    }\n\n    #[test]\n    fn test_locked_with_zero_length_schedule_should_not_panic() {\n        let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP);\n        vesting_schedule.initialize_with_schedule(U512::from(STAKE), 0);\n\n        assert_eq!(vesting_schedule.locked_amount(0), None);\n        assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None);\n    }\n\n    #[test]\n    fn test_locked_amount() {\n        let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP);\n        vesting_schedule.initialize(U512::from(STAKE));\n\n        let mut timestamp = RELEASE_TIMESTAMP;\n\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(130))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64 - 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(130))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(120))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + WEEK_MILLIS as u64 + 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(120))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2) - 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(120))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2);\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(110))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 2) + 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(110))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3) - 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(110))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3);\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(100))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 3) + 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(100))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12) - 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(20))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12);\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(10))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 12) + 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(10))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13) - 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(10))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13);\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(0))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 13) + 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(0))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) - 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(0))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14);\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(0))\n        );\n\n        timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) + 1;\n        assert_eq!(\n            vesting_schedule.locked_amount(timestamp),\n            Some(U512::from(0))\n        );\n    }\n\n    fn vested_amounts_match_initial_stake(\n        initial_stake: U512,\n        release_timestamp: u64,\n        vesting_schedule_length: u64,\n    ) -> bool {\n        let mut vesting_schedule = VestingSchedule::new(release_timestamp);\n        vesting_schedule.initialize_with_schedule(initial_stake, vesting_schedule_length);\n\n        let mut total_vested_amounts = U512::zero();\n\n        for i in 0..LOCKED_AMOUNTS_LENGTH {\n            let timestamp = release_timestamp + (WEEK_MILLIS * i) as u64;\n            if let Some(locked_amount) = vesting_schedule.locked_amount(timestamp) {\n                let current_vested_amount = initial_stake - locked_amount - total_vested_amounts;\n                total_vested_amounts += current_vested_amount\n            }\n        }\n\n        total_vested_amounts == initial_stake\n    }\n\n    #[test]\n    fn vested_amounts_conserve_stake() {\n        let stake = U512::from(1000);\n        assert!(vested_amounts_match_initial_stake(\n            stake,\n            DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n            DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS,\n        ))\n    }\n\n    #[test]\n    fn is_vesting_with_default_schedule() {\n        let initial_stake = U512::from(1000u64);\n        let release_timestamp = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS;\n        let mut vesting_schedule = VestingSchedule::new(release_timestamp);\n\n        let is_vesting_before: Vec<bool> = (0..LOCKED_AMOUNTS_LENGTH + 1)\n            .map(|i| {\n                vesting_schedule.is_vesting(\n                    release_timestamp + (WEEK_MILLIS * i) as u64,\n                    DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS,\n                )\n            })\n            .collect();\n\n        assert_eq!(\n            is_vesting_before,\n            vec![\n                true, true, true, true, true, true, true, true, true, true, true, true, true,\n                false, // week after is always set to zero\n                false\n            ]\n        );\n        vesting_schedule.initialize(initial_stake);\n\n        let is_vesting_after: Vec<bool> = (0..LOCKED_AMOUNTS_LENGTH + 1)\n            .map(|i| {\n                vesting_schedule.is_vesting(\n                    release_timestamp + (WEEK_MILLIS * i) as u64,\n                    DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS,\n                )\n            })\n            .collect();\n\n        assert_eq!(\n            is_vesting_after,\n            vec![\n                true, true, true, true, true, true, true, true, true, true, true, true, true,\n                false, // week after is always set to zero\n                false,\n            ]\n        );\n    }\n\n    #[test]\n    fn should_calculate_vesting_schedule_period_to_weeks() {\n        let thirteen_weeks_millis = 13 * 7 * DAY_MILLIS as u64;\n        assert_eq!(vesting_schedule_period_to_weeks(thirteen_weeks_millis), 13,);\n\n        assert_eq!(vesting_schedule_period_to_weeks(0), 0);\n        assert_eq!(\n            vesting_schedule_period_to_weeks(u64::MAX),\n            30_500_568_904usize\n        );\n    }\n\n    proptest! {\n        #[test]\n        fn prop_total_vested_amounts_conserve_stake(stake in u512_arb()) {\n            prop_assert!(vested_amounts_match_initial_stake(\n                stake,\n                DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS,\n                DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS,\n            ))\n        }\n\n        #[test]\n        fn prop_serialization_roundtrip(vesting_schedule in vesting_schedule_arb()) {\n            bytesrepr::test_serialization_roundtrip(&vesting_schedule)\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/bid.rs",
    "content": "mod vesting;\n\nuse alloc::{collections::BTreeMap, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"json-schema\")]\nuse serde_map_to_array::KeyValueJsonSchema;\nuse serde_map_to_array::{BTreeMapToArray, KeyValueLabels};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::{\n        DelegationRate, Delegator, DelegatorBid, DelegatorKind, Error, ValidatorBid,\n    },\n    CLType, CLTyped, PublicKey, URef, U512,\n};\n\npub use vesting::{VestingSchedule, VESTING_SCHEDULE_LENGTH_MILLIS};\n\n/// An entry in the validator map.\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Bid {\n    /// Validator public key.\n    validator_public_key: PublicKey,\n    /// The purse that was used for bonding.\n    bonding_purse: URef,\n    /// The amount of tokens staked by a validator (not including delegators).\n    staked_amount: U512,\n    /// Delegation rate.\n    delegation_rate: DelegationRate,\n    /// Vesting schedule for a genesis validator. `None` if non-genesis validator.\n    vesting_schedule: Option<VestingSchedule>,\n    /// This validator's delegators, indexed by their public keys.\n    #[serde(with = \"BTreeMapToArray::<PublicKey, Delegator, DelegatorLabels>\")]\n    delegators: BTreeMap<PublicKey, Delegator>,\n    /// `true` if validator has been \"evicted\".\n    inactive: bool,\n}\n\nimpl Bid {\n    #[allow(missing_docs)]\n    pub fn from_non_unified(\n        validator_bid: ValidatorBid,\n        delegators: BTreeMap<DelegatorKind, DelegatorBid>,\n    ) -> Self {\n        let mut map = BTreeMap::new();\n        for (kind, bid) in delegators {\n            if let DelegatorKind::PublicKey(pk) = kind {\n                let delegator = Delegator::unlocked(\n                    pk.clone(),\n                    bid.staked_amount(),\n                    *bid.bonding_purse(),\n                    bid.validator_public_key().clone(),\n                );\n                map.insert(pk, delegator);\n            }\n        }\n        Self {\n            validator_public_key: validator_bid.validator_public_key().clone(),\n            bonding_purse: *validator_bid.bonding_purse(),\n            staked_amount: validator_bid.staked_amount(),\n            delegation_rate: *validator_bid.delegation_rate(),\n            vesting_schedule: validator_bid.vesting_schedule().cloned(),\n            delegators: map,\n            inactive: validator_bid.inactive(),\n        }\n    }\n\n    /// Creates new instance of a bid with locked funds.\n    pub fn locked(\n        validator_public_key: PublicKey,\n        bonding_purse: URef,\n        staked_amount: U512,\n        delegation_rate: DelegationRate,\n        release_timestamp_millis: u64,\n    ) -> Self {\n        let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis));\n        let delegators = BTreeMap::new();\n        let inactive = false;\n        Self {\n            validator_public_key,\n            bonding_purse,\n            staked_amount,\n            delegation_rate,\n            vesting_schedule,\n            delegators,\n            inactive,\n        }\n    }\n\n    /// Creates new instance of a bid with unlocked funds.\n    pub fn unlocked(\n        validator_public_key: PublicKey,\n        bonding_purse: URef,\n        staked_amount: U512,\n        delegation_rate: DelegationRate,\n    ) -> Self {\n        let vesting_schedule = None;\n        let delegators = BTreeMap::new();\n        let inactive = false;\n        Self {\n            validator_public_key,\n            bonding_purse,\n            staked_amount,\n            delegation_rate,\n            vesting_schedule,\n            delegators,\n            inactive,\n        }\n    }\n\n    /// Creates a new inactive instance of a bid with 0 staked amount.\n    pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self {\n        let vesting_schedule = None;\n        let delegators = BTreeMap::new();\n        let inactive = true;\n        let staked_amount = 0.into();\n        let delegation_rate = Default::default();\n        Self {\n            validator_public_key,\n            bonding_purse,\n            staked_amount,\n            delegation_rate,\n            vesting_schedule,\n            delegators,\n            inactive,\n        }\n    }\n\n    /// Gets the validator public key of the provided bid\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Gets the bonding purse of the provided bid\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked(&self, timestamp_millis: u64) -> bool {\n        self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS)\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked_with_vesting_schedule(\n        &self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        match &self.vesting_schedule {\n            Some(vesting_schedule) => {\n                vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis)\n            }\n            None => false,\n        }\n    }\n\n    /// Gets the staked amount of the provided bid\n    pub fn staked_amount(&self) -> &U512 {\n        &self.staked_amount\n    }\n\n    /// Gets the staked amount of the provided bid\n    pub fn staked_amount_mut(&mut self) -> &mut U512 {\n        &mut self.staked_amount\n    }\n\n    /// Gets the delegation rate of the provided bid\n    pub fn delegation_rate(&self) -> &DelegationRate {\n        &self.delegation_rate\n    }\n\n    /// Returns a reference to the vesting schedule of the provided bid.  `None` if a non-genesis\n    /// validator.\n    pub fn vesting_schedule(&self) -> Option<&VestingSchedule> {\n        self.vesting_schedule.as_ref()\n    }\n\n    /// Returns a mutable reference to the vesting schedule of the provided bid.  `None` if a\n    /// non-genesis validator.\n    pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> {\n        self.vesting_schedule.as_mut()\n    }\n\n    /// Returns a reference to the delegators of the provided bid\n    pub fn delegators(&self) -> &BTreeMap<PublicKey, Delegator> {\n        &self.delegators\n    }\n\n    /// Returns a mutable reference to the delegators of the provided bid\n    pub fn delegators_mut(&mut self) -> &mut BTreeMap<PublicKey, Delegator> {\n        &mut self.delegators\n    }\n\n    /// Returns `true` if validator is inactive\n    pub fn inactive(&self) -> bool {\n        self.inactive\n    }\n\n    /// Decreases the stake of the provided bid\n    pub fn decrease_stake(\n        &mut self,\n        amount: U512,\n        era_end_timestamp_millis: u64,\n    ) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_sub(amount)\n            .ok_or(Error::UnbondTooLarge)?;\n\n        let vesting_schedule = match self.vesting_schedule.as_ref() {\n            Some(vesting_schedule) => vesting_schedule,\n            None => {\n                self.staked_amount = updated_staked_amount;\n                return Ok(updated_staked_amount);\n            }\n        };\n\n        match vesting_schedule.locked_amount(era_end_timestamp_millis) {\n            Some(locked_amount) if updated_staked_amount < locked_amount => {\n                Err(Error::ValidatorFundsLocked)\n            }\n            None => {\n                // If `None`, then the locked amounts table has yet to be initialized (likely\n                // pre-90 day mark)\n                Err(Error::ValidatorFundsLocked)\n            }\n            Some(_) => {\n                self.staked_amount = updated_staked_amount;\n                Ok(updated_staked_amount)\n            }\n        }\n    }\n\n    /// Increases the stake of the provided bid\n    pub fn increase_stake(&mut self, amount: U512) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_add(amount)\n            .ok_or(Error::InvalidAmount)?;\n\n        self.staked_amount = updated_staked_amount;\n\n        Ok(updated_staked_amount)\n    }\n\n    /// Updates the delegation rate of the provided bid\n    pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self {\n        self.delegation_rate = delegation_rate;\n        self\n    }\n\n    /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than\n    /// or equal to the bid's initial release timestamp and the bid is owned by a genesis\n    /// validator. This method initializes with default 14 week vesting schedule.\n    ///\n    /// Returns `true` if the provided bid's vesting schedule was initialized.\n    pub fn process(&mut self, timestamp_millis: u64) -> bool {\n        self.process_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS)\n    }\n\n    /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than\n    /// or equal to the bid's initial release timestamp and the bid is owned by a genesis\n    /// validator.\n    ///\n    /// Returns `true` if the provided bid's vesting schedule was initialized.\n    pub fn process_with_vesting_schedule(\n        &mut self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        // Put timestamp-sensitive processing logic in here\n        let staked_amount = self.staked_amount;\n        let vesting_schedule = match self.vesting_schedule_mut() {\n            Some(vesting_schedule) => vesting_schedule,\n            None => return false,\n        };\n        if timestamp_millis < vesting_schedule.initial_release_timestamp_millis() {\n            return false;\n        }\n\n        let mut initialized = false;\n\n        if vesting_schedule.initialize_with_schedule(staked_amount, vesting_schedule_period_millis)\n        {\n            initialized = true;\n        }\n\n        for delegator in self.delegators_mut().values_mut() {\n            let staked_amount = delegator.staked_amount();\n            if let Some(vesting_schedule) = delegator.vesting_schedule_mut() {\n                if timestamp_millis >= vesting_schedule.initial_release_timestamp_millis()\n                    && vesting_schedule\n                        .initialize_with_schedule(staked_amount, vesting_schedule_period_millis)\n                {\n                    initialized = true;\n                }\n            }\n        }\n\n        initialized\n    }\n\n    /// Sets given bid's `inactive` field to `false`\n    pub fn activate(&mut self) -> bool {\n        self.inactive = false;\n        false\n    }\n\n    /// Sets given bid's `inactive` field to `true`\n    pub fn deactivate(&mut self) -> bool {\n        self.inactive = true;\n        true\n    }\n\n    /// Returns the total staked amount of validator + all delegators\n    pub fn total_staked_amount(&self) -> Result<U512, Error> {\n        self.delegators\n            .iter()\n            .try_fold(U512::zero(), |a, (_, b)| a.checked_add(b.staked_amount()))\n            .and_then(|delegators_sum| delegators_sum.checked_add(*self.staked_amount()))\n            .ok_or(Error::InvalidAmount)\n    }\n}\n\nimpl CLTyped for Bid {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for Bid {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.validator_public_key.serialized_length()\n            + self.bonding_purse.serialized_length()\n            + self.staked_amount.serialized_length()\n            + self.delegation_rate.serialized_length()\n            + self.vesting_schedule.serialized_length()\n            + self.delegators.serialized_length()\n            + self.inactive.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.validator_public_key.write_bytes(writer)?;\n        self.bonding_purse.write_bytes(writer)?;\n        self.staked_amount.write_bytes(writer)?;\n        self.delegation_rate.write_bytes(writer)?;\n        self.vesting_schedule.write_bytes(writer)?;\n        self.delegators.write_bytes(writer)?;\n        self.inactive.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Bid {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?;\n        let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?;\n        let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?;\n        let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegators, bytes) = FromBytes::from_bytes(bytes)?;\n        let (inactive, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Bid {\n                validator_public_key,\n                bonding_purse,\n                staked_amount,\n                delegation_rate,\n                vesting_schedule,\n                delegators,\n                inactive,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Display for Bid {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"bid {{ bonding purse {}, staked {}, delegation rate {}, delegators {{\",\n            self.bonding_purse, self.staked_amount, self.delegation_rate\n        )?;\n\n        let count = self.delegators.len();\n        for (index, delegator) in self.delegators.values().enumerate() {\n            write!(\n                formatter,\n                \"{}{}\",\n                delegator,\n                if index + 1 == count { \"\" } else { \", \" }\n            )?;\n        }\n\n        write!(\n            formatter,\n            \"}}, is {}inactive }}\",\n            if self.inactive { \"\" } else { \"not \" }\n        )\n    }\n}\n\nstruct DelegatorLabels;\n\nimpl KeyValueLabels for DelegatorLabels {\n    const KEY: &'static str = \"delegator_public_key\";\n    const VALUE: &'static str = \"delegator\";\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl KeyValueJsonSchema for DelegatorLabels {\n    const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some(\"PublicKeyAndDelegator\");\n    const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> =\n        Some(\"A delegator associated with the given validator.\");\n    const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> =\n        Some(\"The public key of the delegator.\");\n    const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some(\"The delegator details.\");\n}\n\n#[cfg(test)]\nmod tests {\n    use alloc::collections::BTreeMap;\n\n    use crate::{\n        bytesrepr,\n        system::auction::{bid::VestingSchedule, Bid, DelegationRate, Delegator},\n        AccessRights, PublicKey, SecretKey, URef, U512,\n    };\n\n    const WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000;\n    const TEST_VESTING_SCHEDULE_LENGTH_MILLIS: u64 = 7 * WEEK_MILLIS;\n\n    #[test]\n    fn serialization_roundtrip() {\n        let founding_validator = Bid {\n            validator_public_key: PublicKey::from(\n                &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE),\n            staked_amount: U512::one(),\n            delegation_rate: DelegationRate::MAX,\n            vesting_schedule: Some(VestingSchedule::default()),\n            delegators: BTreeMap::default(),\n            inactive: true,\n        };\n        bytesrepr::test_serialization_roundtrip(&founding_validator);\n    }\n\n    #[test]\n    fn should_immediately_initialize_unlock_amounts() {\n        const TIMESTAMP_MILLIS: u64 = 0;\n\n        let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into();\n\n        let validator_release_timestamp = TIMESTAMP_MILLIS;\n        let vesting_schedule_period_millis = TIMESTAMP_MILLIS;\n        let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD);\n        let validator_staked_amount = U512::from(1000);\n        let validator_delegation_rate = 0;\n\n        let mut bid = Bid::locked(\n            validator_pk,\n            validator_bonding_purse,\n            validator_staked_amount,\n            validator_delegation_rate,\n            validator_release_timestamp,\n        );\n\n        assert!(bid.process_with_vesting_schedule(\n            validator_release_timestamp,\n            vesting_schedule_period_millis,\n        ));\n        assert!(!bid.is_locked_with_vesting_schedule(\n            validator_release_timestamp,\n            vesting_schedule_period_millis\n        ));\n    }\n\n    #[test]\n    fn should_initialize_delegators_different_timestamps() {\n        const TIMESTAMP_MILLIS: u64 = WEEK_MILLIS;\n\n        let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into();\n\n        let delegator_1_pk: PublicKey = (&SecretKey::ed25519_from_bytes([43; 32]).unwrap()).into();\n        let delegator_2_pk: PublicKey = (&SecretKey::ed25519_from_bytes([44; 32]).unwrap()).into();\n\n        let validator_release_timestamp = TIMESTAMP_MILLIS;\n        let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD);\n        let validator_staked_amount = U512::from(1000);\n        let validator_delegation_rate = 0;\n\n        let delegator_1_release_timestamp = TIMESTAMP_MILLIS + 1;\n        let delegator_1_bonding_purse = URef::new([52; 32], AccessRights::ADD);\n        let delegator_1_staked_amount = U512::from(2000);\n\n        let delegator_2_release_timestamp = TIMESTAMP_MILLIS + 2;\n        let delegator_2_bonding_purse = URef::new([62; 32], AccessRights::ADD);\n        let delegator_2_staked_amount = U512::from(3000);\n\n        let delegator_1 = Delegator::locked(\n            delegator_1_pk.clone(),\n            delegator_1_staked_amount,\n            delegator_1_bonding_purse,\n            validator_pk.clone(),\n            delegator_1_release_timestamp,\n        );\n\n        let delegator_2 = Delegator::locked(\n            delegator_2_pk.clone(),\n            delegator_2_staked_amount,\n            delegator_2_bonding_purse,\n            validator_pk.clone(),\n            delegator_2_release_timestamp,\n        );\n\n        let mut bid = Bid::locked(\n            validator_pk,\n            validator_bonding_purse,\n            validator_staked_amount,\n            validator_delegation_rate,\n            validator_release_timestamp,\n        );\n\n        assert!(!bid.process_with_vesting_schedule(\n            validator_release_timestamp - 1,\n            TEST_VESTING_SCHEDULE_LENGTH_MILLIS\n        ));\n\n        {\n            let delegators = bid.delegators_mut();\n\n            delegators.insert(delegator_1_pk.clone(), delegator_1);\n            delegators.insert(delegator_2_pk.clone(), delegator_2);\n        }\n\n        assert!(bid.process_with_vesting_schedule(\n            delegator_1_release_timestamp,\n            TEST_VESTING_SCHEDULE_LENGTH_MILLIS\n        ));\n\n        let delegator_1_updated_1 = bid\n            .delegators()\n            .get(&delegator_1_pk.clone())\n            .cloned()\n            .unwrap();\n        assert!(delegator_1_updated_1\n            .vesting_schedule()\n            .unwrap()\n            .locked_amounts()\n            .is_some());\n\n        let delegator_2_updated_1 = bid\n            .delegators()\n            .get(&delegator_2_pk.clone())\n            .cloned()\n            .unwrap();\n        assert!(delegator_2_updated_1\n            .vesting_schedule()\n            .unwrap()\n            .locked_amounts()\n            .is_none());\n\n        assert!(bid.process_with_vesting_schedule(\n            delegator_2_release_timestamp,\n            TEST_VESTING_SCHEDULE_LENGTH_MILLIS\n        ));\n\n        let delegator_1_updated_2 = bid\n            .delegators()\n            .get(&delegator_1_pk.clone())\n            .cloned()\n            .unwrap();\n        assert!(delegator_1_updated_2\n            .vesting_schedule()\n            .unwrap()\n            .locked_amounts()\n            .is_some());\n        // Delegator 1 is already initialized and did not change after 2nd Bid::process\n        assert_eq!(delegator_1_updated_1, delegator_1_updated_2);\n\n        let delegator_2_updated_2 = bid\n            .delegators()\n            .get(&delegator_2_pk.clone())\n            .cloned()\n            .unwrap();\n        assert!(delegator_2_updated_2\n            .vesting_schedule()\n            .unwrap()\n            .locked_amounts()\n            .is_some());\n\n        // Delegator 2 is different compared to first Bid::process\n        assert_ne!(delegator_2_updated_1, delegator_2_updated_2);\n\n        // Validator initialized, and all delegators initialized\n        assert!(!bid.process_with_vesting_schedule(\n            delegator_2_release_timestamp + 1,\n            TEST_VESTING_SCHEDULE_LENGTH_MILLIS\n        ));\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_unified_bid(bid in gens::unified_bid_arb(0..3)) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/bid_addr.rs",
    "content": "use crate::{\n    account::{AccountHash, ACCOUNT_HASH_LENGTH},\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes},\n    system::auction::{error::Error, DelegatorKind},\n    EraId, Key, KeyTag, PublicKey, URefAddr,\n};\nuse alloc::vec::Vec;\nuse core::fmt::{Debug, Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nconst UNIFIED_TAG: u8 = 0;\nconst VALIDATOR_TAG: u8 = 1;\nconst DELEGATED_ACCOUNT_TAG: u8 = 2;\nconst DELEGATED_PURSE_TAG: u8 = 3;\nconst CREDIT_TAG: u8 = 4;\nconst RESERVATION_ACCOUNT_TAG: u8 = 5;\nconst RESERVATION_PURSE_TAG: u8 = 6;\nconst UNBOND_ACCOUNT_TAG: u8 = 7;\nconst UNBOND_PURSE_TAG: u8 = 8;\nconst VALIDATOR_REV_PURSE_TAG: u8 = 9;\n\n/// Serialization tag for BidAddr variants.\n#[derive(\n    Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize,\n)]\n#[repr(u8)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BidAddrTag {\n    /// BidAddr for legacy unified bid.\n    Unified = UNIFIED_TAG,\n    /// BidAddr for validator bid.\n    #[default]\n    Validator = VALIDATOR_TAG,\n    /// BidAddr for delegated account bid.\n    DelegatedAccount = DELEGATED_ACCOUNT_TAG,\n    /// BidAddr for delegated purse bid.\n    DelegatedPurse = DELEGATED_PURSE_TAG,\n\n    /// BidAddr for auction credit.\n    Credit = CREDIT_TAG,\n\n    /// BidAddr for reserved delegation account bid.\n    ReservedDelegationAccount = RESERVATION_ACCOUNT_TAG,\n    /// BidAddr for reserved delegation purse bid.\n    ReservedDelegationPurse = RESERVATION_PURSE_TAG,\n    /// BidAddr for unbonding accounts.\n    UnbondAccount = UNBOND_ACCOUNT_TAG,\n    /// BidAddr for unbonding purses.\n    UnbondPurse = UNBOND_PURSE_TAG,\n    /// BidAddr for reverse validator look up.\n    ValidatorRev = VALIDATOR_REV_PURSE_TAG,\n}\n\nimpl Display for BidAddrTag {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        let tag = match self {\n            BidAddrTag::Unified => UNIFIED_TAG,\n            BidAddrTag::Validator => VALIDATOR_TAG,\n            BidAddrTag::DelegatedAccount => DELEGATED_ACCOUNT_TAG,\n            BidAddrTag::DelegatedPurse => DELEGATED_PURSE_TAG,\n\n            BidAddrTag::Credit => CREDIT_TAG,\n            BidAddrTag::ReservedDelegationAccount => RESERVATION_ACCOUNT_TAG,\n            BidAddrTag::ReservedDelegationPurse => RESERVATION_PURSE_TAG,\n            BidAddrTag::UnbondAccount => UNBOND_ACCOUNT_TAG,\n            BidAddrTag::UnbondPurse => UNBOND_PURSE_TAG,\n            BidAddrTag::ValidatorRev => VALIDATOR_REV_PURSE_TAG,\n        };\n        write!(f, \"{}\", base16::encode_lower(&[tag]))\n    }\n}\n\nimpl BidAddrTag {\n    /// The length in bytes of a [`BidAddrTag`].\n    pub const BID_ADDR_TAG_LENGTH: usize = 1;\n\n    /// Attempts to map `BidAddrTag` from a u8.\n    pub fn try_from_u8(value: u8) -> Option<Self> {\n        // TryFrom requires std, so doing this instead.\n        if value == UNIFIED_TAG {\n            return Some(BidAddrTag::Unified);\n        }\n        if value == VALIDATOR_TAG {\n            return Some(BidAddrTag::Validator);\n        }\n        if value == DELEGATED_ACCOUNT_TAG {\n            return Some(BidAddrTag::DelegatedAccount);\n        }\n        if value == DELEGATED_PURSE_TAG {\n            return Some(BidAddrTag::DelegatedPurse);\n        }\n\n        if value == CREDIT_TAG {\n            return Some(BidAddrTag::Credit);\n        }\n        if value == RESERVATION_ACCOUNT_TAG {\n            return Some(BidAddrTag::ReservedDelegationAccount);\n        }\n        if value == RESERVATION_PURSE_TAG {\n            return Some(BidAddrTag::ReservedDelegationPurse);\n        }\n        if value == UNBOND_ACCOUNT_TAG {\n            return Some(BidAddrTag::UnbondAccount);\n        }\n        if value == UNBOND_PURSE_TAG {\n            return Some(BidAddrTag::UnbondPurse);\n        }\n        if value == VALIDATOR_REV_PURSE_TAG {\n            return Some(BidAddrTag::ValidatorRev);\n        }\n        None\n    }\n}\n\n/// Bid Address\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BidAddr {\n    /// Unified BidAddr.\n    Unified(AccountHash),\n    /// Validator BidAddr.\n    Validator(AccountHash),\n    /// Delegated account BidAddr.\n    DelegatedAccount {\n        /// The validator addr.\n        validator: AccountHash,\n        /// The delegator addr.\n        delegator: AccountHash,\n    },\n    /// Delegated purse BidAddr.\n    DelegatedPurse {\n        /// The validator addr.\n        validator: AccountHash,\n        /// The delegated purse addr.\n        delegator: URefAddr,\n    },\n    /// Validator credit BidAddr.\n    Credit {\n        /// The validator addr.\n        validator: AccountHash,\n        /// The era id.\n        era_id: EraId,\n    },\n    /// Reserved delegation account BidAddr\n    ReservedDelegationAccount {\n        /// The validator addr.\n        validator: AccountHash,\n        /// The delegator addr.\n        delegator: AccountHash,\n    },\n    /// Reserved delegation purse BidAddr\n    ReservedDelegationPurse {\n        /// The validator addr.\n        validator: AccountHash,\n        /// The delegated purse addr.\n        delegator: URefAddr,\n    },\n    UnbondAccount {\n        /// The validator.\n        validator: AccountHash,\n        /// The unbonder.\n        unbonder: AccountHash,\n    },\n    UnbondPurse {\n        /// The validator.\n        validator: AccountHash,\n        /// The unbonder.\n        unbonder: URefAddr,\n    },\n    /// Validator BidAddr for reverse look up.\n    /// For instance, in the case of a changed public key.\n    ValidatorRev(AccountHash),\n}\n\nimpl BidAddr {\n    /// The length in bytes of a [`BidAddr`] for a validator bid.\n    pub const VALIDATOR_BID_ADDR_LENGTH: usize =\n        ACCOUNT_HASH_LENGTH + BidAddrTag::BID_ADDR_TAG_LENGTH;\n\n    /// The length in bytes of a [`BidAddr`] for a delegator bid.\n    pub const DELEGATOR_BID_ADDR_LENGTH: usize =\n        (ACCOUNT_HASH_LENGTH * 2) + BidAddrTag::BID_ADDR_TAG_LENGTH;\n\n    /// Constructs a new [`BidAddr`] instance from a validator's [`AccountHash`].\n    pub const fn new_validator_addr(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self {\n        BidAddr::Validator(AccountHash::new(validator))\n    }\n\n    /// Constructs a new [`BidAddr`] instance from a validator's [`AccountHash`].\n    pub const fn new_validator_rev_addr(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self {\n        BidAddr::ValidatorRev(AccountHash::new(validator))\n    }\n\n    /// Constructs a new [`BidAddr`] instance from a validator's [`PublicKey`].\n    pub fn new_validator_addr_from_public_key(validator_public_key: PublicKey) -> Self {\n        BidAddr::Validator(validator_public_key.to_account_hash())\n    }\n\n    /// Constructs a new [`BidAddr`] instance from a validator's [`PublicKey`].\n    pub fn new_validator_rev_addr_from_public_key(validator_public_key: PublicKey) -> Self {\n        BidAddr::ValidatorRev(validator_public_key.to_account_hash())\n    }\n\n    /// Constructs a new [`BidAddr::DelegatedAccount`] instance from the [`AccountHash`] pair of a\n    /// validator and a delegator.\n    pub const fn new_delegator_account_addr(\n        pair: ([u8; ACCOUNT_HASH_LENGTH], [u8; ACCOUNT_HASH_LENGTH]),\n    ) -> Self {\n        BidAddr::DelegatedAccount {\n            validator: AccountHash::new(pair.0),\n            delegator: AccountHash::new(pair.1),\n        }\n    }\n\n    /// Constructs a new [`BidAddr::ReservedDelegationAccount`] instance from the [`AccountHash`]\n    /// pair of a validator and a delegator.\n    pub const fn new_reservation_account_addr(\n        pair: ([u8; ACCOUNT_HASH_LENGTH], [u8; ACCOUNT_HASH_LENGTH]),\n    ) -> Self {\n        BidAddr::ReservedDelegationAccount {\n            validator: AccountHash::new(pair.0),\n            delegator: AccountHash::new(pair.1),\n        }\n    }\n\n    #[allow(missing_docs)]\n    pub const fn legacy(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self {\n        BidAddr::Unified(AccountHash::new(validator))\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_delegator_kind_relaxed(\n        validator: AccountHash,\n        delegator_kind: &DelegatorKind,\n    ) -> Self {\n        match delegator_kind {\n            DelegatorKind::PublicKey(pk) => BidAddr::DelegatedAccount {\n                validator,\n                delegator: pk.to_account_hash(),\n            },\n            DelegatorKind::Purse(addr) => BidAddr::DelegatedPurse {\n                validator,\n                delegator: *addr,\n            },\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_delegator_kind(validator: &PublicKey, delegator_kind: &DelegatorKind) -> Self {\n        Self::new_delegator_kind_relaxed(validator.to_account_hash(), delegator_kind)\n    }\n\n    /// Create a new instance of a [`BidAddr`] for delegator unbonds.\n    pub fn new_delegator_unbond_relaxed(\n        validator: AccountHash,\n        delegator_kind: &DelegatorKind,\n    ) -> Self {\n        match &delegator_kind {\n            DelegatorKind::PublicKey(pk) => BidAddr::UnbondAccount {\n                validator,\n                unbonder: pk.to_account_hash(),\n            },\n            DelegatorKind::Purse(addr) => BidAddr::UnbondPurse {\n                validator,\n                unbonder: *addr,\n            },\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`] for delegator unbonds.\n    pub fn new_delegator_unbond(validator: &PublicKey, delegator_kind: &DelegatorKind) -> Self {\n        Self::new_delegator_unbond_relaxed(validator.to_account_hash(), delegator_kind)\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_from_public_keys(\n        validator: &PublicKey,\n        maybe_delegator: Option<&PublicKey>,\n    ) -> Self {\n        if let Some(delegator) = maybe_delegator {\n            BidAddr::DelegatedAccount {\n                validator: AccountHash::from(validator),\n                delegator: AccountHash::from(delegator),\n            }\n        } else {\n            BidAddr::Validator(AccountHash::from(validator))\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_purse_delegation(validator: &PublicKey, delegator: URefAddr) -> Self {\n        BidAddr::DelegatedPurse {\n            validator: validator.to_account_hash(),\n            delegator,\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_credit(validator: &PublicKey, era_id: EraId) -> Self {\n        BidAddr::Credit {\n            validator: AccountHash::from(validator),\n            era_id,\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_reservation_account(validator: &PublicKey, delegator: &PublicKey) -> Self {\n        BidAddr::ReservedDelegationAccount {\n            validator: validator.into(),\n            delegator: delegator.into(),\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_reservation_purse(validator: &PublicKey, delegator: URefAddr) -> Self {\n        BidAddr::ReservedDelegationPurse {\n            validator: validator.to_account_hash(),\n            delegator,\n        }\n    }\n\n    /// Create a new instance of a [`BidAddr`].\n    pub fn new_unbond_account(validator: PublicKey, unbonder: PublicKey) -> Self {\n        BidAddr::UnbondAccount {\n            validator: validator.to_account_hash(),\n            unbonder: unbonder.to_account_hash(),\n        }\n    }\n\n    /// Returns the common prefix of all delegated accounts to the cited validator.\n    pub fn delegated_account_prefix(&self) -> Result<Vec<u8>, Error> {\n        let validator = self.validator_account_hash();\n        let mut ret = Vec::with_capacity(validator.serialized_length() + 2);\n        ret.push(KeyTag::BidAddr as u8);\n        ret.push(BidAddrTag::DelegatedAccount as u8);\n        validator.write_bytes(&mut ret)?;\n        Ok(ret)\n    }\n\n    /// Returns the common prefix of all delegated purses to the cited validator.\n    pub fn delegated_purse_prefix(&self) -> Result<Vec<u8>, Error> {\n        let validator = self.validator_account_hash();\n        let mut ret = Vec::with_capacity(validator.serialized_length() + 2);\n        ret.push(KeyTag::BidAddr as u8);\n        ret.push(BidAddrTag::DelegatedPurse as u8);\n        validator.write_bytes(&mut ret)?;\n        Ok(ret)\n    }\n\n    /// Returns the common prefix of all reservations for accounts to the cited validator.\n    pub fn reserved_account_prefix(&self) -> Result<Vec<u8>, Error> {\n        let validator = self.validator_account_hash();\n        let mut ret = Vec::with_capacity(validator.serialized_length() + 2);\n        ret.push(KeyTag::BidAddr as u8);\n        ret.push(BidAddrTag::ReservedDelegationAccount as u8);\n        validator.write_bytes(&mut ret)?;\n        Ok(ret)\n    }\n\n    /// Returns the common prefix of all reservations for purses to the cited validator.\n    pub fn reserved_purse_prefix(&self) -> Result<Vec<u8>, Error> {\n        let validator = self.validator_account_hash();\n        let mut ret = Vec::with_capacity(validator.serialized_length() + 2);\n        ret.push(KeyTag::BidAddr as u8);\n        ret.push(BidAddrTag::ReservedDelegationPurse as u8);\n        validator.write_bytes(&mut ret)?;\n        Ok(ret)\n    }\n\n    /// Validator account hash.\n    pub fn validator_account_hash(&self) -> AccountHash {\n        match self {\n            BidAddr::Unified(account_hash)\n            | BidAddr::Validator(account_hash)\n            | BidAddr::ValidatorRev(account_hash) => *account_hash,\n            BidAddr::DelegatedAccount { validator, .. }\n            | BidAddr::DelegatedPurse { validator, .. }\n            | BidAddr::Credit { validator, .. }\n            | BidAddr::ReservedDelegationAccount { validator, .. }\n            | BidAddr::ReservedDelegationPurse { validator, .. }\n            | BidAddr::UnbondAccount { validator, .. }\n            | BidAddr::UnbondPurse { validator, .. } => *validator,\n        }\n    }\n\n    /// Delegator account hash or none.\n    pub fn maybe_delegator_account_hash(&self) -> Option<AccountHash> {\n        match self {\n            BidAddr::Unified(_)\n            | BidAddr::Validator(_)\n            | BidAddr::ValidatorRev(_)\n            | BidAddr::Credit { .. }\n            | BidAddr::DelegatedPurse { .. }\n            | BidAddr::ReservedDelegationPurse { .. }\n            | BidAddr::UnbondPurse { .. } => None,\n            BidAddr::DelegatedAccount { delegator, .. }\n            | BidAddr::ReservedDelegationAccount { delegator, .. } => Some(*delegator),\n            BidAddr::UnbondAccount { unbonder, .. } => Some(*unbonder),\n        }\n    }\n\n    /// Delegator purse addr or none.\n    pub fn maybe_delegator_purse(&self) -> Option<URefAddr> {\n        match self {\n            BidAddr::Unified(_)\n            | BidAddr::Validator(_)\n            | BidAddr::ValidatorRev(_)\n            | BidAddr::Credit { .. }\n            | BidAddr::DelegatedAccount { .. }\n            | BidAddr::ReservedDelegationAccount { .. }\n            | BidAddr::UnbondAccount { .. } => None,\n            BidAddr::DelegatedPurse { delegator, .. }\n            | BidAddr::ReservedDelegationPurse { delegator, .. } => Some(*delegator),\n            BidAddr::UnbondPurse { unbonder, .. } => Some(*unbonder),\n        }\n    }\n\n    /// Era id or none.\n    pub fn maybe_era_id(&self) -> Option<EraId> {\n        match self {\n            BidAddr::Unified(_)\n            | BidAddr::Validator(_)\n            | BidAddr::ValidatorRev(_)\n            | BidAddr::DelegatedAccount { .. }\n            | BidAddr::DelegatedPurse { .. }\n            | BidAddr::ReservedDelegationAccount { .. }\n            | BidAddr::ReservedDelegationPurse { .. }\n            | BidAddr::UnbondPurse { .. }\n            | BidAddr::UnbondAccount { .. } => None,\n            BidAddr::Credit { era_id, .. } => Some(*era_id),\n        }\n    }\n\n    /// If true, this instance is the key for a delegator bid record.\n    /// Else, it is the key for a validator bid record.\n    pub fn is_delegator_bid_addr(&self) -> bool {\n        match self {\n            BidAddr::Unified(_)\n            | BidAddr::Validator(_)\n            | BidAddr::ValidatorRev(_)\n            | BidAddr::Credit { .. }\n            | BidAddr::ReservedDelegationAccount { .. }\n            | BidAddr::ReservedDelegationPurse { .. }\n            | BidAddr::UnbondPurse { .. }\n            | BidAddr::UnbondAccount { .. } => false,\n            BidAddr::DelegatedAccount { .. } | BidAddr::DelegatedPurse { .. } => true,\n        }\n    }\n\n    /// How long will be the serialized value for this instance.\n    pub fn serialized_length(&self) -> usize {\n        match self {\n            BidAddr::Unified(account_hash)\n            | BidAddr::Validator(account_hash)\n            | BidAddr::ValidatorRev(account_hash) => ToBytes::serialized_length(account_hash) + 1,\n            BidAddr::DelegatedAccount {\n                validator,\n                delegator,\n            } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1,\n            BidAddr::DelegatedPurse {\n                validator,\n                delegator,\n            } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1,\n            BidAddr::Credit { validator, era_id } => {\n                ToBytes::serialized_length(validator) + ToBytes::serialized_length(era_id) + 1\n            }\n            BidAddr::ReservedDelegationAccount {\n                validator,\n                delegator,\n            } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1,\n            BidAddr::ReservedDelegationPurse {\n                validator,\n                delegator,\n            } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1,\n            BidAddr::UnbondAccount {\n                validator,\n                unbonder,\n            } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(unbonder) + 1,\n            BidAddr::UnbondPurse {\n                validator,\n                unbonder,\n            } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(unbonder) + 1,\n        }\n    }\n\n    /// Returns the BiddAddrTag of this instance.\n    pub fn tag(&self) -> BidAddrTag {\n        match self {\n            BidAddr::Unified(_) => BidAddrTag::Unified,\n            BidAddr::Validator(_) => BidAddrTag::Validator,\n            BidAddr::ValidatorRev(_) => BidAddrTag::ValidatorRev,\n            BidAddr::DelegatedAccount { .. } => BidAddrTag::DelegatedAccount,\n            BidAddr::DelegatedPurse { .. } => BidAddrTag::DelegatedPurse,\n\n            BidAddr::Credit { .. } => BidAddrTag::Credit,\n            BidAddr::ReservedDelegationAccount { .. } => BidAddrTag::ReservedDelegationAccount,\n            BidAddr::ReservedDelegationPurse { .. } => BidAddrTag::ReservedDelegationPurse,\n            BidAddr::UnbondAccount { .. } => BidAddrTag::UnbondAccount,\n            BidAddr::UnbondPurse { .. } => BidAddrTag::UnbondPurse,\n        }\n    }\n}\n\nimpl ToBytes for BidAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.push(self.tag() as u8);\n        buffer.append(&mut self.validator_account_hash().to_bytes()?);\n        if let Some(delegator) = self.maybe_delegator_purse() {\n            buffer.append(&mut delegator.to_bytes()?);\n        }\n        if let Some(delegator) = self.maybe_delegator_account_hash() {\n            buffer.append(&mut delegator.to_bytes()?);\n        }\n        if let Some(era_id) = self.maybe_era_id() {\n            buffer.append(&mut era_id.to_bytes()?);\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.serialized_length()\n    }\n}\n\nimpl FromBytes for BidAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == BidAddrTag::Unified as u8 => AccountHash::from_bytes(remainder)\n                .map(|(account_hash, remainder)| (BidAddr::Unified(account_hash), remainder)),\n            tag if tag == BidAddrTag::Validator as u8 => AccountHash::from_bytes(remainder)\n                .map(|(account_hash, remainder)| (BidAddr::Validator(account_hash), remainder)),\n            tag if tag == BidAddrTag::ValidatorRev as u8 => AccountHash::from_bytes(remainder)\n                .map(|(account_hash, remainder)| (BidAddr::ValidatorRev(account_hash), remainder)),\n            tag if tag == BidAddrTag::DelegatedAccount as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (delegator, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((\n                    BidAddr::DelegatedAccount {\n                        validator,\n                        delegator,\n                    },\n                    remainder,\n                ))\n            }\n            tag if tag == BidAddrTag::DelegatedPurse as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (delegator, remainder) = URefAddr::from_bytes(remainder)?;\n                Ok((\n                    BidAddr::DelegatedPurse {\n                        validator,\n                        delegator,\n                    },\n                    remainder,\n                ))\n            }\n\n            tag if tag == BidAddrTag::Credit as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (era_id, remainder) = EraId::from_bytes(remainder)?;\n                Ok((BidAddr::Credit { validator, era_id }, remainder))\n            }\n            tag if tag == BidAddrTag::ReservedDelegationAccount as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (delegator, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((\n                    BidAddr::ReservedDelegationAccount {\n                        validator,\n                        delegator,\n                    },\n                    remainder,\n                ))\n            }\n            tag if tag == BidAddrTag::ReservedDelegationPurse as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (delegator, remainder) = URefAddr::from_bytes(remainder)?;\n                Ok((\n                    BidAddr::ReservedDelegationPurse {\n                        validator,\n                        delegator,\n                    },\n                    remainder,\n                ))\n            }\n            tag if tag == BidAddrTag::UnbondAccount as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (unbonder, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((\n                    BidAddr::UnbondAccount {\n                        validator,\n                        unbonder,\n                    },\n                    remainder,\n                ))\n            }\n            tag if tag == BidAddrTag::UnbondPurse as u8 => {\n                let (validator, remainder) = AccountHash::from_bytes(remainder)?;\n                let (unbonder, remainder) = URefAddr::from_bytes(remainder)?;\n                Ok((\n                    BidAddr::UnbondPurse {\n                        validator,\n                        unbonder,\n                    },\n                    remainder,\n                ))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Default for BidAddr {\n    fn default() -> Self {\n        BidAddr::Validator(AccountHash::default())\n    }\n}\n\nimpl From<BidAddr> for Key {\n    fn from(bid_addr: BidAddr) -> Self {\n        Key::BidAddr(bid_addr)\n    }\n}\n\nimpl From<AccountHash> for BidAddr {\n    fn from(account_hash: AccountHash) -> Self {\n        BidAddr::Validator(account_hash)\n    }\n}\n\nimpl From<PublicKey> for BidAddr {\n    fn from(public_key: PublicKey) -> Self {\n        BidAddr::Validator(public_key.to_account_hash())\n    }\n}\n\nimpl Display for BidAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        let tag = self.tag();\n        match self {\n            BidAddr::Unified(account_hash)\n            | BidAddr::Validator(account_hash)\n            | BidAddr::ValidatorRev(account_hash) => {\n                write!(f, \"{}{}\", tag, account_hash)\n            }\n            BidAddr::DelegatedAccount {\n                validator,\n                delegator,\n            } => write!(f, \"{}{}{}\", tag, validator, delegator),\n            BidAddr::DelegatedPurse {\n                validator,\n                delegator,\n            } => write!(\n                f,\n                \"{}{}{}\",\n                tag,\n                validator,\n                base16::encode_lower(&delegator),\n            ),\n\n            BidAddr::Credit { validator, era_id } => write!(\n                f,\n                \"{}{}{}\",\n                tag,\n                validator,\n                base16::encode_lower(&era_id.to_le_bytes())\n            ),\n            BidAddr::ReservedDelegationAccount {\n                validator,\n                delegator,\n            } => write!(f, \"{}{}{}\", tag, validator, delegator),\n            BidAddr::ReservedDelegationPurse {\n                validator,\n                delegator,\n            } => write!(\n                f,\n                \"{}{}{}\",\n                tag,\n                validator,\n                base16::encode_lower(&delegator),\n            ),\n            BidAddr::UnbondAccount {\n                validator,\n                unbonder,\n            } => write!(f, \"{}{}{}\", tag, validator, unbonder,),\n            BidAddr::UnbondPurse {\n                validator,\n                unbonder,\n            } => write!(f, \"{}{}{}\", tag, validator, base16::encode_lower(&unbonder),),\n        }\n    }\n}\n\nimpl Debug for BidAddr {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        match self {\n            BidAddr::Unified(validator) => write!(f, \"BidAddr::Unified({:?})\", validator),\n            BidAddr::Validator(validator) => write!(f, \"BidAddr::Validator({:?})\", validator),\n            BidAddr::ValidatorRev(validator) => write!(f, \"BidAddr::ValidatorRev({:?})\", validator),\n            BidAddr::DelegatedAccount {\n                validator,\n                delegator,\n            } => {\n                write!(\n                    f,\n                    \"BidAddr::DelegatedAccount({:?}{:?})\",\n                    validator, delegator\n                )\n            }\n            BidAddr::DelegatedPurse {\n                validator,\n                delegator,\n            } => {\n                write!(f, \"BidAddr::DelegatedPurse({:?}{:?})\", validator, delegator)\n            }\n            BidAddr::Credit { validator, era_id } => {\n                write!(f, \"BidAddr::Credit({:?}{:?})\", validator, era_id)\n            }\n            BidAddr::ReservedDelegationAccount {\n                validator,\n                delegator,\n            } => {\n                write!(\n                    f,\n                    \"BidAddr::ReservedDelegationAccount({:?}{:?})\",\n                    validator, delegator\n                )\n            }\n            BidAddr::ReservedDelegationPurse {\n                validator,\n                delegator,\n            } => {\n                write!(\n                    f,\n                    \"BidAddr::ReservedDelegationPurse({:?}{:?})\",\n                    validator, delegator\n                )\n            }\n            BidAddr::UnbondAccount {\n                validator,\n                unbonder,\n            } => {\n                write!(f, \"BidAddr::UnbondAccount({:?}{:?})\", validator, unbonder)\n            }\n            BidAddr::UnbondPurse {\n                validator,\n                unbonder,\n            } => {\n                write!(f, \"BidAddr::UnbondPurse({:?}{:?})\", validator, unbonder)\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<BidAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BidAddr {\n        BidAddr::Validator(AccountHash::new(rng.gen()))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, system::auction::BidAddr, EraId, PublicKey, SecretKey};\n\n    #[test]\n    fn serialization_roundtrip() {\n        let bid_addr = BidAddr::legacy([1; 32]);\n        bytesrepr::test_serialization_roundtrip(&bid_addr);\n        let bid_addr = BidAddr::new_validator_addr([1; 32]);\n        bytesrepr::test_serialization_roundtrip(&bid_addr);\n        let bid_addr = BidAddr::new_delegator_account_addr(([1; 32], [2; 32]));\n        bytesrepr::test_serialization_roundtrip(&bid_addr);\n        let bid_addr = BidAddr::new_credit(\n            &PublicKey::from(\n                &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            EraId::new(0),\n        );\n        bytesrepr::test_serialization_roundtrip(&bid_addr);\n        let bid_addr = BidAddr::new_reservation_account_addr(([1; 32], [2; 32]));\n        bytesrepr::test_serialization_roundtrip(&bid_addr);\n    }\n}\n\n#[cfg(test)]\nmod proptest {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid_addr_validator(bid_addr in gens::bid_addr_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid_addr);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/bid_kind.rs",
    "content": "use crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    system::auction::{\n        bid::VestingSchedule, Bid, BidAddr, DelegatorBid, ValidatorBid, ValidatorCredit,\n    },\n    CLType, CLTyped, EraId, PublicKey, URef, U512,\n};\n\nuse crate::system::auction::{\n    delegator_kind::DelegatorKind,\n    unbond::{Unbond, UnbondKind},\n    Bridge,\n};\nuse alloc::{boxed::Box, vec::Vec};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse super::Reservation;\n\n/// BidKindTag variants.\n#[allow(clippy::large_enum_variant)]\n#[repr(u8)]\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\npub enum BidKindTag {\n    /// Unified bid.\n    Unified = 0,\n    /// Validator bid.\n    Validator = 1,\n    /// Delegator bid.\n    Delegator = 2,\n    /// Bridge record.\n    Bridge = 3,\n    /// Validator credit bid.\n    Credit = 4,\n    /// Reservation bid.\n    Reservation = 5,\n    /// Unbond.\n    Unbond = 6,\n}\n\n/// Auction bid variants.\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BidKind {\n    /// A unified record indexed on validator data, with an embedded collection of all delegator\n    /// bids assigned to that validator. The Unified variant is for legacy retrograde support, new\n    /// instances will not be created going forward.\n    Unified(Box<Bid>),\n    /// A bid record containing only validator data.\n    Validator(Box<ValidatorBid>),\n    /// A bid record containing only delegator data.\n    Delegator(Box<DelegatorBid>),\n    /// A bridge record pointing to a new `ValidatorBid` after the public key was changed.\n    Bridge(Box<Bridge>),\n    /// Credited amount.\n    Credit(Box<ValidatorCredit>),\n    /// Reservation\n    Reservation(Box<Reservation>),\n    /// Unbond\n    Unbond(Box<Unbond>),\n}\n\nimpl BidKind {\n    /// Returns validator public key.\n    pub fn validator_public_key(&self) -> PublicKey {\n        match self {\n            BidKind::Unified(bid) => bid.validator_public_key().clone(),\n            BidKind::Validator(validator_bid) => validator_bid.validator_public_key().clone(),\n            BidKind::Delegator(delegator_bid) => delegator_bid.validator_public_key().clone(),\n            BidKind::Bridge(bridge) => bridge.old_validator_public_key().clone(),\n            BidKind::Credit(validator_credit) => validator_credit.validator_public_key().clone(),\n            BidKind::Reservation(reservation) => reservation.validator_public_key().clone(),\n            BidKind::Unbond(unbond) => unbond.validator_public_key().clone(),\n        }\n    }\n\n    /// Returns new validator public key if it was changed.\n    pub fn new_validator_public_key(&self) -> Option<PublicKey> {\n        match self {\n            BidKind::Bridge(bridge) => Some(bridge.new_validator_public_key().clone()),\n            BidKind::Unified(_)\n            | BidKind::Validator(_)\n            | BidKind::Delegator(_)\n            | BidKind::Credit(_)\n            | BidKind::Reservation(_)\n            | BidKind::Unbond(_) => None,\n        }\n    }\n\n    /// Returns BidAddr.\n    pub fn bid_addr(&self) -> BidAddr {\n        match self {\n            BidKind::Unified(bid) => BidAddr::Unified(bid.validator_public_key().to_account_hash()),\n            BidKind::Validator(validator_bid) => {\n                BidAddr::Validator(validator_bid.validator_public_key().to_account_hash())\n            }\n            BidKind::Delegator(delegator_bid) => {\n                let validator = delegator_bid.validator_public_key().to_account_hash();\n                match delegator_bid.delegator_kind() {\n                    DelegatorKind::PublicKey(pk) => BidAddr::DelegatedAccount {\n                        validator,\n                        delegator: pk.to_account_hash(),\n                    },\n                    DelegatorKind::Purse(addr) => BidAddr::DelegatedPurse {\n                        validator,\n                        delegator: *addr,\n                    },\n                }\n            }\n            BidKind::Bridge(bridge) => {\n                BidAddr::Validator(bridge.old_validator_public_key().to_account_hash())\n            }\n            BidKind::Credit(credit) => {\n                let validator = credit.validator_public_key().to_account_hash();\n                let era_id = credit.era_id();\n                BidAddr::Credit { validator, era_id }\n            }\n            BidKind::Reservation(reservation_bid) => {\n                let validator = reservation_bid.validator_public_key().to_account_hash();\n                match reservation_bid.delegator_kind() {\n                    DelegatorKind::PublicKey(pk) => BidAddr::ReservedDelegationAccount {\n                        validator,\n                        delegator: pk.to_account_hash(),\n                    },\n                    DelegatorKind::Purse(addr) => BidAddr::ReservedDelegationPurse {\n                        validator,\n                        delegator: *addr,\n                    },\n                }\n            }\n            BidKind::Unbond(unbond) => {\n                let validator = unbond.validator_public_key().to_account_hash();\n                let unbond_kind = unbond.unbond_kind();\n                match unbond_kind {\n                    UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => {\n                        BidAddr::UnbondAccount {\n                            validator,\n                            unbonder: pk.to_account_hash(),\n                        }\n                    }\n                    UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse {\n                        validator,\n                        unbonder: *addr,\n                    },\n                }\n            }\n        }\n    }\n\n    /// Is this instance a unified bid?\n    pub fn is_unified(&self) -> bool {\n        matches!(self, BidKind::Unified(_))\n    }\n\n    /// Is this instance a validator bid?\n    pub fn is_validator(&self) -> bool {\n        matches!(self, BidKind::Validator(_))\n    }\n\n    /// Is this instance a delegator bid?\n    pub fn is_delegator(&self) -> bool {\n        matches!(self, BidKind::Delegator(_))\n    }\n\n    /// Is this instance a bridge record?\n    pub fn is_bridge(&self) -> bool {\n        matches!(self, BidKind::Bridge(_))\n    }\n\n    /// Is this instance a validator credit?\n    pub fn is_credit(&self) -> bool {\n        matches!(self, BidKind::Credit(_))\n    }\n\n    /// Is this instance a reservation?\n    pub fn is_reservation(&self) -> bool {\n        matches!(self, BidKind::Reservation(_))\n    }\n\n    /// Is this instance a unbond record?\n    pub fn is_unbond(&self) -> bool {\n        matches!(self, BidKind::Unbond(_))\n    }\n\n    /// The staked amount.\n    pub fn staked_amount(&self) -> Option<U512> {\n        match self {\n            BidKind::Unified(bid) => Some(*bid.staked_amount()),\n            BidKind::Validator(validator_bid) => Some(validator_bid.staked_amount()),\n            BidKind::Delegator(delegator) => Some(delegator.staked_amount()),\n            BidKind::Credit(credit) => Some(credit.amount()),\n            BidKind::Bridge(_) | BidKind::Reservation(_) | BidKind::Unbond(_) => None,\n        }\n    }\n\n    /// The bonding purse.\n    pub fn bonding_purse(&self) -> Option<URef> {\n        match self {\n            BidKind::Unified(bid) => Some(*bid.bonding_purse()),\n            BidKind::Validator(validator_bid) => Some(*validator_bid.bonding_purse()),\n            BidKind::Delegator(delegator) => Some(*delegator.bonding_purse()),\n            BidKind::Unbond(_)\n            | BidKind::Bridge(_)\n            | BidKind::Credit(_)\n            | BidKind::Reservation(_) => None,\n        }\n    }\n\n    /// The delegator kind, if relevant.\n    pub fn delegator_kind(&self) -> Option<DelegatorKind> {\n        match self {\n            BidKind::Unified(_)\n            | BidKind::Validator(_)\n            | BidKind::Bridge(_)\n            | BidKind::Credit(_) => None,\n            BidKind::Unbond(unbond) => match unbond.unbond_kind() {\n                UnbondKind::Validator(_) => None,\n                UnbondKind::DelegatedPublicKey(pk) => Some(DelegatorKind::PublicKey(pk.clone())),\n                UnbondKind::DelegatedPurse(addr) => Some(DelegatorKind::Purse(*addr)),\n            },\n            BidKind::Delegator(bid) => Some(bid.delegator_kind().clone()),\n            BidKind::Reservation(bid) => Some(bid.delegator_kind().clone()),\n        }\n    }\n\n    /// The unbond kind, if relevant.\n    pub fn unbond_kind(&self) -> Option<UnbondKind> {\n        match self {\n            BidKind::Unified(_)\n            | BidKind::Validator(_)\n            | BidKind::Bridge(_)\n            | BidKind::Credit(_)\n            | BidKind::Delegator(_)\n            | BidKind::Reservation(_) => None,\n            BidKind::Unbond(unbond) => Some(unbond.unbond_kind().clone()),\n        }\n    }\n\n    /// Is this bid inactive?\n    pub fn inactive(&self) -> bool {\n        match self {\n            BidKind::Unified(bid) => bid.inactive(),\n            BidKind::Validator(validator_bid) => validator_bid.inactive(),\n            BidKind::Delegator(delegator) => delegator.staked_amount().is_zero(),\n            BidKind::Credit(credit) => credit.amount().is_zero(),\n            BidKind::Bridge(_) | BidKind::Reservation(_) | BidKind::Unbond(_) => false,\n        }\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked(&self, timestamp_millis: u64) -> bool {\n        match self {\n            BidKind::Unified(bid) => bid.is_locked(timestamp_millis),\n            BidKind::Validator(validator_bid) => validator_bid.is_locked(timestamp_millis),\n            BidKind::Delegator(delegator) => delegator.is_locked(timestamp_millis),\n            BidKind::Bridge(_)\n            | BidKind::Credit(_)\n            | BidKind::Reservation(_)\n            | BidKind::Unbond(_) => false,\n        }\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked_with_vesting_schedule(\n        &self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        match self {\n            BidKind::Unified(bid) => bid\n                .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis),\n            BidKind::Validator(validator_bid) => validator_bid\n                .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis),\n            BidKind::Delegator(delegator) => delegator\n                .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis),\n            BidKind::Bridge(_)\n            | BidKind::Credit(_)\n            | BidKind::Reservation(_)\n            | BidKind::Unbond(_) => false,\n        }\n    }\n\n    /// Returns a reference to the vesting schedule of the provided bid.  `None` if a non-genesis\n    /// validator.\n    pub fn vesting_schedule(&self) -> Option<&VestingSchedule> {\n        match self {\n            BidKind::Unified(bid) => bid.vesting_schedule(),\n            BidKind::Validator(validator_bid) => validator_bid.vesting_schedule(),\n            BidKind::Delegator(delegator) => delegator.vesting_schedule(),\n            BidKind::Bridge(_)\n            | BidKind::Credit(_)\n            | BidKind::Reservation(_)\n            | BidKind::Unbond(_) => None,\n        }\n    }\n\n    /// BidKindTag.\n    pub fn tag(&self) -> BidKindTag {\n        match self {\n            BidKind::Unified(_) => BidKindTag::Unified,\n            BidKind::Validator(_) => BidKindTag::Validator,\n            BidKind::Delegator(_) => BidKindTag::Delegator,\n            BidKind::Bridge(_) => BidKindTag::Bridge,\n            BidKind::Credit(_) => BidKindTag::Credit,\n            BidKind::Reservation(_) => BidKindTag::Reservation,\n            BidKind::Unbond(_) => BidKindTag::Unbond,\n        }\n    }\n\n    /// The `[EraId]` associated with this `[BidKind]`, if any.\n    pub fn era_id(&self) -> Option<EraId> {\n        match self {\n            BidKind::Bridge(bridge) => Some(*bridge.era_id()),\n            BidKind::Credit(credit) => Some(credit.era_id()),\n            BidKind::Unified(_)\n            | BidKind::Validator(_)\n            | BidKind::Delegator(_)\n            | BidKind::Reservation(_)\n            | BidKind::Unbond(_) => None,\n        }\n    }\n\n    /// Returns a cloned validator bid.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn as_validator_bid(&self) -> Option<ValidatorBid> {\n        if let Self::Validator(bid) = self {\n            return Some(*bid.clone());\n        }\n\n        None\n    }\n}\n\nimpl CLTyped for BidKind {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for BidKind {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        let (tag, mut serialized_data) = match self {\n            BidKind::Unified(bid) => (BidKindTag::Unified, bid.to_bytes()?),\n            BidKind::Validator(validator_bid) => (BidKindTag::Validator, validator_bid.to_bytes()?),\n            BidKind::Delegator(delegator_bid) => (BidKindTag::Delegator, delegator_bid.to_bytes()?),\n            BidKind::Bridge(bridge) => (BidKindTag::Bridge, bridge.to_bytes()?),\n            BidKind::Credit(credit) => (BidKindTag::Credit, credit.to_bytes()?),\n            BidKind::Reservation(reservation) => (BidKindTag::Reservation, reservation.to_bytes()?),\n            BidKind::Unbond(unbond) => (BidKindTag::Unbond, unbond.to_bytes()?),\n        };\n        result.push(tag as u8);\n        result.append(&mut serialized_data);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                BidKind::Unified(bid) => bid.serialized_length(),\n                BidKind::Validator(validator_bid) => validator_bid.serialized_length(),\n                BidKind::Delegator(delegator_bid) => delegator_bid.serialized_length(),\n                BidKind::Bridge(bridge) => bridge.serialized_length(),\n                BidKind::Credit(credit) => credit.serialized_length(),\n                BidKind::Reservation(reservation) => reservation.serialized_length(),\n                BidKind::Unbond(unbond) => unbond.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.tag() as u8);\n        match self {\n            BidKind::Unified(bid) => bid.write_bytes(writer)?,\n            BidKind::Validator(validator_bid) => validator_bid.write_bytes(writer)?,\n            BidKind::Delegator(delegator_bid) => delegator_bid.write_bytes(writer)?,\n            BidKind::Bridge(bridge) => bridge.write_bytes(writer)?,\n            BidKind::Credit(credit) => credit.write_bytes(writer)?,\n            BidKind::Reservation(reservation) => reservation.write_bytes(writer)?,\n            BidKind::Unbond(unbond) => unbond.write_bytes(writer)?,\n        };\n        Ok(())\n    }\n}\n\nimpl FromBytes for BidKind {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == BidKindTag::Unified as u8 => Bid::from_bytes(remainder)\n                .map(|(bid, remainder)| (BidKind::Unified(Box::new(bid)), remainder)),\n            tag if tag == BidKindTag::Validator as u8 => {\n                ValidatorBid::from_bytes(remainder).map(|(validator_bid, remainder)| {\n                    (BidKind::Validator(Box::new(validator_bid)), remainder)\n                })\n            }\n            tag if tag == BidKindTag::Delegator as u8 => {\n                DelegatorBid::from_bytes(remainder).map(|(delegator_bid, remainder)| {\n                    (BidKind::Delegator(Box::new(delegator_bid)), remainder)\n                })\n            }\n            tag if tag == BidKindTag::Bridge as u8 => Bridge::from_bytes(remainder)\n                .map(|(bridge, remainder)| (BidKind::Bridge(Box::new(bridge)), remainder)),\n            tag if tag == BidKindTag::Credit as u8 => ValidatorCredit::from_bytes(remainder)\n                .map(|(credit, remainder)| (BidKind::Credit(Box::new(credit)), remainder)),\n            tag if tag == BidKindTag::Reservation as u8 => {\n                Reservation::from_bytes(remainder).map(|(reservation, remainder)| {\n                    (BidKind::Reservation(Box::new(reservation)), remainder)\n                })\n            }\n            tag if tag == BidKindTag::Unbond as u8 => Unbond::from_bytes(remainder)\n                .map(|(unbond, remainder)| (BidKind::Unbond(Box::new(unbond)), remainder)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::{BidKind, *};\n    use crate::{\n        bytesrepr,\n        system::auction::{delegator_kind::DelegatorKind, DelegationRate},\n        AccessRights, SecretKey,\n    };\n\n    #[test]\n    fn serialization_roundtrip() {\n        let validator_public_key = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE);\n        let bid = Bid::unlocked(\n            validator_public_key.clone(),\n            bonding_purse,\n            U512::one(),\n            DelegationRate::MAX,\n        );\n        let unified_bid = BidKind::Unified(Box::new(bid.clone()));\n        let validator_bid = ValidatorBid::from(bid.clone());\n\n        let delegator_public_key = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([1u8; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n\n        let delegator_kind = DelegatorKind::PublicKey(delegator_public_key);\n\n        let delegator = DelegatorBid::unlocked(\n            delegator_kind,\n            U512::one(),\n            bonding_purse,\n            validator_public_key.clone(),\n        );\n        let delegator_bid = BidKind::Delegator(Box::new(delegator));\n\n        let credit = ValidatorCredit::new(validator_public_key, EraId::new(0), U512::one());\n        let credit_bid = BidKind::Credit(Box::new(credit));\n\n        bytesrepr::test_serialization_roundtrip(&bid);\n        bytesrepr::test_serialization_roundtrip(&unified_bid);\n        bytesrepr::test_serialization_roundtrip(&validator_bid);\n        bytesrepr::test_serialization_roundtrip(&delegator_bid);\n        bytesrepr::test_serialization_roundtrip(&credit_bid);\n    }\n}\n\n#[cfg(test)]\nmod prop_test_bid_kind_unified {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid_kind_unified(bid_kind in gens::unified_bid_arb(0..3)) {\n            bytesrepr::test_serialization_roundtrip(&bid_kind);\n        }\n    }\n}\n\n#[cfg(test)]\nmod prop_test_bid_kind_validator {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid_kind_validator(bid_kind in gens::validator_bid_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid_kind);\n        }\n    }\n}\n\n#[cfg(test)]\nmod prop_test_bid_kind_delegator {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid_kind_delegator(bid_kind in gens::delegator_bid_kind_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid_kind);\n        }\n    }\n}\n\n#[cfg(test)]\nmod prop_test_bid_kind_validator_credit {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid_kind_validator_credit(bid_kind in gens::credit_bid_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid_kind);\n        }\n    }\n}\n\n#[cfg(test)]\nmod prop_test_bid_kind_reservation {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid_kind_reservation(bid_kind in gens::reservation_bid_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid_kind);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/bridge.rs",
    "content": "use alloc::vec::Vec;\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, EraId, PublicKey,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// A bridge record pointing to a new `ValidatorBid` after the public key was changed.\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Bridge {\n    /// Previous validator public key associated with the bid.\n    old_validator_public_key: PublicKey,\n    /// New validator public key associated with the bid.\n    new_validator_public_key: PublicKey,\n    /// Era when bridge record was created.\n    era_id: EraId,\n}\n\nimpl Bridge {\n    /// Creates new instance of a bridge record.\n    pub fn new(\n        old_validator_public_key: PublicKey,\n        new_validator_public_key: PublicKey,\n        era_id: EraId,\n    ) -> Self {\n        Self {\n            old_validator_public_key,\n            new_validator_public_key,\n            era_id,\n        }\n    }\n\n    /// Gets the old validator public key\n    pub fn old_validator_public_key(&self) -> &PublicKey {\n        &self.old_validator_public_key\n    }\n\n    /// Gets the new validator public key\n    pub fn new_validator_public_key(&self) -> &PublicKey {\n        &self.new_validator_public_key\n    }\n\n    /// Gets the era when key change happened\n    pub fn era_id(&self) -> &EraId {\n        &self.era_id\n    }\n}\n\nimpl CLTyped for Bridge {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for Bridge {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.old_validator_public_key.serialized_length()\n            + self.new_validator_public_key.serialized_length()\n            + self.era_id.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.old_validator_public_key.write_bytes(writer)?;\n        self.new_validator_public_key.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Bridge {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (old_validator_public_key, bytes) = FromBytes::from_bytes(bytes)?;\n        let (new_validator_public_key, bytes) = FromBytes::from_bytes(bytes)?;\n        let (era_id, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Bridge {\n                old_validator_public_key,\n                new_validator_public_key,\n                era_id,\n            },\n            bytes,\n        ))\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/constants.rs",
    "content": "use crate::EraId;\n\nuse super::DelegationRate;\n\n/// Initial value of era id we start at genesis.\npub const INITIAL_ERA_ID: EraId = EraId::new(0);\n\n/// Initial value of era end timestamp.\npub const INITIAL_ERA_END_TIMESTAMP_MILLIS: u64 = 0;\n\n/// Delegation rate is a fraction between 0-1. Validator sets the delegation rate\n/// in integer terms, which is then divided by the denominator to obtain the fraction.\npub const DELEGATION_RATE_DENOMINATOR: DelegationRate = 100;\n\n/// We use one trillion as a block reward unit because it's large enough to allow precise\n/// fractions, and small enough for many block rewards to fit into a u64.\npub const BLOCK_REWARD: u64 = 1_000_000_000_000;\n\n/// Named constant for `amount`.\npub const ARG_AMOUNT: &str = \"amount\";\n/// Named constant for `delegation_rate`.\npub const ARG_DELEGATION_RATE: &str = \"delegation_rate\";\n/// Named constant for `public_key`.\npub const ARG_PUBLIC_KEY: &str = \"public_key\";\n/// Named constant for `new_public_key`.\npub const ARG_NEW_PUBLIC_KEY: &str = \"new_public_key\";\n/// Named constant for `validator`.\npub const ARG_VALIDATOR: &str = \"validator\";\n/// Named constant for `delegator`.\npub const ARG_DELEGATOR: &str = \"delegator\";\n/// Named constant for `delegator_purse`.\npub const ARG_DELEGATOR_PURSE: &str = \"delegator_purse\";\n/// Named constant for `delegators`.\npub const ARG_DELEGATORS: &str = \"delegators\";\n/// Named constant for `reservations`.\npub const ARG_RESERVATIONS: &str = \"reservations\";\n/// Named constant for `validator_purse`.\npub const ARG_VALIDATOR_PURSE: &str = \"validator_purse\";\n/// Named constant for `validator_keys`.\npub const ARG_VALIDATOR_KEYS: &str = \"validator_keys\";\n/// Named constant for `validator_public_keys`.\npub const ARG_VALIDATOR_PUBLIC_KEYS: &str = \"validator_public_keys\";\n/// Named constant for `new_validator`.\npub const ARG_NEW_VALIDATOR: &str = \"new_validator\";\n/// Named constant for `era_id`.\npub const ARG_ERA_ID: &str = \"era_id\";\n/// Named constant for `validator_slots` argument.\npub const ARG_VALIDATOR_SLOTS: &str = VALIDATOR_SLOTS_KEY;\n/// Named constant for `mint_contract_package_hash`\npub const ARG_MINT_CONTRACT_PACKAGE_HASH: &str = \"mint_contract_package_hash\";\n/// Named constant for `genesis_validators`\npub const ARG_GENESIS_VALIDATORS: &str = \"genesis_validators\";\n/// Named constant of `auction_delay`\npub const ARG_AUCTION_DELAY: &str = \"auction_delay\";\n/// Named constant for `locked_funds_period`\npub const ARG_LOCKED_FUNDS_PERIOD: &str = \"locked_funds_period\";\n/// Named constant for `unbonding_delay`\npub const ARG_UNBONDING_DELAY: &str = \"unbonding_delay\";\n/// Named constant for `era_end_timestamp_millis`;\npub const ARG_ERA_END_TIMESTAMP_MILLIS: &str = \"era_end_timestamp_millis\";\n/// Named constant for `evicted_validators`;\npub const ARG_EVICTED_VALIDATORS: &str = \"evicted_validators\";\n/// Named constant for `rewards_map`;\npub const ARG_REWARDS_MAP: &str = \"rewards_map\";\n/// Named constant for `entry_point`;\npub const ARG_ENTRY_POINT: &str = \"entry_point\";\n/// Named constrant for `minimum_delegation_amount`.\npub const ARG_MINIMUM_DELEGATION_AMOUNT: &str = \"minimum_delegation_amount\";\n/// Named constrant for `maximum_delegation_amount`.\npub const ARG_MAXIMUM_DELEGATION_AMOUNT: &str = \"maximum_delegation_amount\";\n/// Named constant for `reserved_slots`.\npub const ARG_RESERVED_SLOTS: &str = \"reserved_slots\";\n\n/// Named constant for method `get_era_validators`.\npub const METHOD_GET_ERA_VALIDATORS: &str = \"get_era_validators\";\n/// Named constant for method `add_bid`.\npub const METHOD_ADD_BID: &str = \"add_bid\";\n/// Named constant for method `withdraw_bid`.\npub const METHOD_WITHDRAW_BID: &str = \"withdraw_bid\";\n/// Named constant for method `delegate`.\npub const METHOD_DELEGATE: &str = \"delegate\";\n/// Named constant for method `undelegate`.\npub const METHOD_UNDELEGATE: &str = \"undelegate\";\n/// Named constant for method `redelegate`.\npub const METHOD_REDELEGATE: &str = \"redelegate\";\n/// Named constant for method `run_auction`.\npub const METHOD_RUN_AUCTION: &str = \"run_auction\";\n/// Named constant for method `slash`.\npub const METHOD_SLASH: &str = \"slash\";\n/// Named constant for method `distribute`.\npub const METHOD_DISTRIBUTE: &str = \"distribute\";\n/// Named constant for method `read_era_id`.\npub const METHOD_READ_ERA_ID: &str = \"read_era_id\";\n/// Named constant for method `activate_bid`.\npub const METHOD_ACTIVATE_BID: &str = \"activate_bid\";\n/// Named constant for method `change_bid_public_key`.\npub const METHOD_CHANGE_BID_PUBLIC_KEY: &str = \" change_bid_public_key\";\n/// Named constant for method `add_reservations`.\npub const METHOD_ADD_RESERVATIONS: &str = \"add_reservations\";\n/// Named constant for method `cancel_reservations`.\npub const METHOD_CANCEL_RESERVATIONS: &str = \"cancel_reservations\";\n\n/// Storage for `EraId`.\npub const ERA_ID_KEY: &str = \"era_id\";\n/// Storage for era-end timestamp.\npub const ERA_END_TIMESTAMP_MILLIS_KEY: &str = \"era_end_timestamp_millis\";\n/// Storage for `SeigniorageRecipientsSnapshot`.\npub const SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY: &str = \"seigniorage_recipients_snapshot\";\n/// Storage for a flag determining current version of `SeigniorageRecipientsSnapshot`.\npub const SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY: &str =\n    \"seigniorage_recipients_snapshot_version\";\n/// Default value for the current version of `SeigniorageRecipientsSnapshot`.\npub const DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION: u8 = 2;\n\n/// Total validator slots allowed.\npub const VALIDATOR_SLOTS_KEY: &str = \"validator_slots\";\n/// Amount of auction delay.\npub const AUCTION_DELAY_KEY: &str = \"auction_delay\";\n/// Default lock period for new bid entries represented in eras.\npub const LOCKED_FUNDS_PERIOD_KEY: &str = \"locked_funds_period\";\n/// Unbonding delay expressed in eras.\npub const UNBONDING_DELAY_KEY: &str = \"unbonding_delay\";\n/// Key under which minimum delegation rate is stored in the auction contracts named keys\npub const MINIMUM_DELEGATION_RATE_KEY: &str = \"minimum_delegation_rate\";\n"
  },
  {
    "path": "types/src/system/auction/delegator.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::{bid::VestingSchedule, Error, VESTING_SCHEDULE_LENGTH_MILLIS},\n    CLType, CLTyped, PublicKey, URef, U512,\n};\n\n/// Represents a party delegating their stake to a validator (or \"delegatee\")\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Delegator {\n    delegator_public_key: PublicKey,\n    staked_amount: U512,\n    bonding_purse: URef,\n    validator_public_key: PublicKey,\n    vesting_schedule: Option<VestingSchedule>,\n}\n\nimpl Delegator {\n    /// Creates a new [`Delegator`]\n    pub fn unlocked(\n        delegator_public_key: PublicKey,\n        staked_amount: U512,\n        bonding_purse: URef,\n        validator_public_key: PublicKey,\n    ) -> Self {\n        let vesting_schedule = None;\n        Delegator {\n            delegator_public_key,\n            staked_amount,\n            bonding_purse,\n            validator_public_key,\n            vesting_schedule,\n        }\n    }\n\n    /// Creates new instance of a [`Delegator`] with locked funds.\n    pub fn locked(\n        delegator_public_key: PublicKey,\n        staked_amount: U512,\n        bonding_purse: URef,\n        validator_public_key: PublicKey,\n        release_timestamp_millis: u64,\n    ) -> Self {\n        let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis));\n        Delegator {\n            delegator_public_key,\n            staked_amount,\n            bonding_purse,\n            validator_public_key,\n            vesting_schedule,\n        }\n    }\n\n    /// Returns public key of the delegator.\n    pub fn delegator_public_key(&self) -> &PublicKey {\n        &self.delegator_public_key\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked(&self, timestamp_millis: u64) -> bool {\n        self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS)\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked_with_vesting_schedule(\n        &self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        match &self.vesting_schedule {\n            Some(vesting_schedule) => {\n                vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis)\n            }\n            None => false,\n        }\n    }\n\n    /// Returns the staked amount\n    pub fn staked_amount(&self) -> U512 {\n        self.staked_amount\n    }\n\n    /// Returns the mutable staked amount\n    pub fn staked_amount_mut(&mut self) -> &mut U512 {\n        &mut self.staked_amount\n    }\n\n    /// Returns the bonding purse\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Returns delegatee\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Decreases the stake of the provided bid\n    pub fn decrease_stake(\n        &mut self,\n        amount: U512,\n        era_end_timestamp_millis: u64,\n    ) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_sub(amount)\n            .ok_or(Error::InvalidAmount)?;\n\n        let vesting_schedule = match self.vesting_schedule.as_ref() {\n            Some(vesting_schedule) => vesting_schedule,\n            None => {\n                self.staked_amount = updated_staked_amount;\n                return Ok(updated_staked_amount);\n            }\n        };\n\n        match vesting_schedule.locked_amount(era_end_timestamp_millis) {\n            Some(locked_amount) if updated_staked_amount < locked_amount => {\n                Err(Error::DelegatorFundsLocked)\n            }\n            None => {\n                // If `None`, then the locked amounts table has yet to be initialized (likely\n                // pre-90 day mark)\n                Err(Error::DelegatorFundsLocked)\n            }\n            Some(_) => {\n                self.staked_amount = updated_staked_amount;\n                Ok(updated_staked_amount)\n            }\n        }\n    }\n\n    /// Increases the stake of the provided bid\n    pub fn increase_stake(&mut self, amount: U512) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_add(amount)\n            .ok_or(Error::InvalidAmount)?;\n\n        self.staked_amount = updated_staked_amount;\n\n        Ok(updated_staked_amount)\n    }\n\n    /// Returns a reference to the vesting schedule of the provided\n    /// delegator bid.  `None` if a non-genesis validator.\n    pub fn vesting_schedule(&self) -> Option<&VestingSchedule> {\n        self.vesting_schedule.as_ref()\n    }\n\n    /// Returns a mutable reference to the vesting schedule of the provided\n    /// delegator bid.  `None` if a non-genesis validator.\n    pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> {\n        self.vesting_schedule.as_mut()\n    }\n\n    /// Creates a new inactive instance of a bid with 0 staked amount.\n    pub fn empty(\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        bonding_purse: URef,\n    ) -> Self {\n        let vesting_schedule = None;\n        let staked_amount = 0.into();\n        Self {\n            validator_public_key,\n            delegator_public_key,\n            bonding_purse,\n            staked_amount,\n            vesting_schedule,\n        }\n    }\n\n    /// Sets validator public key\n    pub fn with_validator_public_key(&mut self, validator_public_key: PublicKey) -> &mut Self {\n        self.validator_public_key = validator_public_key;\n        self\n    }\n}\n\nimpl CLTyped for Delegator {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for Delegator {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.delegator_public_key.to_bytes()?);\n        buffer.extend(self.staked_amount.to_bytes()?);\n        buffer.extend(self.bonding_purse.to_bytes()?);\n        buffer.extend(self.validator_public_key.to_bytes()?);\n        buffer.extend(self.vesting_schedule.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.delegator_public_key.serialized_length()\n            + self.staked_amount.serialized_length()\n            + self.bonding_purse.serialized_length()\n            + self.validator_public_key.serialized_length()\n            + self.vesting_schedule.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.delegator_public_key.write_bytes(writer)?;\n        self.staked_amount.write_bytes(writer)?;\n        self.bonding_purse.write_bytes(writer)?;\n        self.validator_public_key.write_bytes(writer)?;\n        self.vesting_schedule.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Delegator {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (delegator_public_key, bytes) = PublicKey::from_bytes(bytes)?;\n        let (staked_amount, bytes) = U512::from_bytes(bytes)?;\n        let (bonding_purse, bytes) = URef::from_bytes(bytes)?;\n        let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?;\n        let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Delegator {\n                delegator_public_key,\n                staked_amount,\n                bonding_purse,\n                validator_public_key,\n                vesting_schedule,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Display for Delegator {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"delegator {{ {} {} motes, bonding purse {}, validator {} }}\",\n            self.delegator_public_key,\n            self.staked_amount,\n            self.bonding_purse,\n            self.validator_public_key\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bytesrepr, system::auction::Delegator, AccessRights, PublicKey, SecretKey, URef, U512,\n    };\n\n    #[test]\n    fn serialization_roundtrip() {\n        let staked_amount = U512::one();\n        let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE);\n        let delegator_public_key: PublicKey = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n\n        let validator_public_key: PublicKey = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let unlocked_delegator = Delegator::unlocked(\n            delegator_public_key.clone(),\n            staked_amount,\n            bonding_purse,\n            validator_public_key.clone(),\n        );\n        bytesrepr::test_serialization_roundtrip(&unlocked_delegator);\n\n        let release_timestamp_millis = 42;\n        let locked_delegator = Delegator::locked(\n            delegator_public_key,\n            staked_amount,\n            bonding_purse,\n            validator_public_key,\n            release_timestamp_millis,\n        );\n        bytesrepr::test_serialization_roundtrip(&locked_delegator);\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::delegator_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/delegator_bid.rs",
    "content": "// TODO - remove once schemars stops causing warning.\n#![allow(clippy::field_reassign_with_default)]\n\nuse alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::{\n        bid::VestingSchedule, delegator_kind::DelegatorKind, BidAddr, Delegator, Error, UnbondKind,\n        VESTING_SCHEDULE_LENGTH_MILLIS,\n    },\n    CLType, CLTyped, PublicKey, URef, U512,\n};\n\n/// Represents a party delegating their stake to a validator (or \"delegatee\")\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct DelegatorBid {\n    delegator_kind: DelegatorKind,\n    staked_amount: U512,\n    bonding_purse: URef,\n    validator_public_key: PublicKey,\n    vesting_schedule: Option<VestingSchedule>,\n}\n\nimpl DelegatorBid {\n    /// Creates a new [`DelegatorBid`]\n    pub fn unlocked(\n        delegator_kind: DelegatorKind,\n        staked_amount: U512,\n        bonding_purse: URef,\n        validator_public_key: PublicKey,\n    ) -> Self {\n        let vesting_schedule = None;\n        DelegatorBid {\n            delegator_kind,\n            staked_amount,\n            bonding_purse,\n            validator_public_key,\n            vesting_schedule,\n        }\n    }\n\n    /// Creates new instance of a [`DelegatorBid`] with locked funds.\n    pub fn locked(\n        delegator_kind: DelegatorKind,\n        staked_amount: U512,\n        bonding_purse: URef,\n        validator_public_key: PublicKey,\n        release_timestamp_millis: u64,\n    ) -> Self {\n        let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis));\n        DelegatorBid {\n            delegator_kind,\n            staked_amount,\n            bonding_purse,\n            validator_public_key,\n            vesting_schedule,\n        }\n    }\n\n    /// Returns the delegator kind.\n    pub fn delegator_kind(&self) -> &DelegatorKind {\n        &self.delegator_kind\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked(&self, timestamp_millis: u64) -> bool {\n        self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS)\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked_with_vesting_schedule(\n        &self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        match &self.vesting_schedule {\n            Some(vesting_schedule) => {\n                vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis)\n            }\n            None => false,\n        }\n    }\n\n    /// Returns the staked amount\n    pub fn staked_amount(&self) -> U512 {\n        self.staked_amount\n    }\n\n    /// Returns the mutable staked amount\n    pub fn staked_amount_mut(&mut self) -> &mut U512 {\n        &mut self.staked_amount\n    }\n\n    /// Returns the bonding purse\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Returns delegatee\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Decreases the stake of the provided bid\n    pub fn decrease_stake(\n        &mut self,\n        amount: U512,\n        era_end_timestamp_millis: u64,\n    ) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_sub(amount)\n            .ok_or(Error::InvalidAmount)?;\n\n        let vesting_schedule = match self.vesting_schedule.as_ref() {\n            Some(vesting_schedule) => vesting_schedule,\n            None => {\n                self.staked_amount = updated_staked_amount;\n                return Ok(updated_staked_amount);\n            }\n        };\n\n        match vesting_schedule.locked_amount(era_end_timestamp_millis) {\n            Some(locked_amount) if updated_staked_amount < locked_amount => {\n                Err(Error::DelegatorFundsLocked)\n            }\n            None => {\n                // If `None`, then the locked amounts table has yet to be initialized (likely\n                // pre-90 day mark)\n                Err(Error::DelegatorFundsLocked)\n            }\n            Some(_) => {\n                self.staked_amount = updated_staked_amount;\n                Ok(updated_staked_amount)\n            }\n        }\n    }\n\n    /// Increases the stake of the provided bid\n    pub fn increase_stake(&mut self, amount: U512) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_add(amount)\n            .ok_or(Error::InvalidAmount)?;\n\n        self.staked_amount = updated_staked_amount;\n\n        Ok(updated_staked_amount)\n    }\n\n    /// Returns a reference to the vesting schedule of the provided\n    /// delegator bid.  `None` if a non-genesis validator.\n    pub fn vesting_schedule(&self) -> Option<&VestingSchedule> {\n        self.vesting_schedule.as_ref()\n    }\n\n    /// Returns a mutable reference to the vesting schedule of the provided\n    /// delegator bid.  `None` if a non-genesis validator.\n    pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> {\n        self.vesting_schedule.as_mut()\n    }\n\n    /// Creates a new inactive instance of a bid with 0 staked amount.\n    pub fn empty(\n        validator_public_key: PublicKey,\n        delegator_kind: DelegatorKind,\n        bonding_purse: URef,\n    ) -> Self {\n        let vesting_schedule = None;\n        let staked_amount = 0.into();\n        Self {\n            validator_public_key,\n            delegator_kind,\n            bonding_purse,\n            staked_amount,\n            vesting_schedule,\n        }\n    }\n\n    /// Sets validator public key\n    pub fn with_validator_public_key(&mut self, validator_public_key: PublicKey) -> &mut Self {\n        self.validator_public_key = validator_public_key;\n        self\n    }\n\n    /// BidAddr for this instance.\n    pub fn bid_addr(&self) -> BidAddr {\n        match &self.delegator_kind {\n            DelegatorKind::PublicKey(pk) => BidAddr::DelegatedAccount {\n                validator: self.validator_public_key.clone().to_account_hash(),\n                delegator: pk.clone().to_account_hash(),\n            },\n            DelegatorKind::Purse(addr) => BidAddr::DelegatedPurse {\n                validator: self.validator_public_key.clone().to_account_hash(),\n                delegator: *addr,\n            },\n        }\n    }\n\n    /// BidAddr for this instance.\n    pub fn unbond_kind(&self) -> UnbondKind {\n        match &self.delegator_kind {\n            DelegatorKind::PublicKey(pk) => UnbondKind::DelegatedPublicKey(pk.clone()),\n            DelegatorKind::Purse(addr) => UnbondKind::DelegatedPurse(*addr),\n        }\n    }\n}\n\nimpl CLTyped for DelegatorBid {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for DelegatorBid {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.delegator_kind.to_bytes()?);\n        buffer.extend(self.staked_amount.to_bytes()?);\n        buffer.extend(self.bonding_purse.to_bytes()?);\n        buffer.extend(self.validator_public_key.to_bytes()?);\n        buffer.extend(self.vesting_schedule.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.delegator_kind.serialized_length()\n            + self.staked_amount.serialized_length()\n            + self.bonding_purse.serialized_length()\n            + self.validator_public_key.serialized_length()\n            + self.vesting_schedule.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.delegator_kind.write_bytes(writer)?;\n        self.staked_amount.write_bytes(writer)?;\n        self.bonding_purse.write_bytes(writer)?;\n        self.validator_public_key.write_bytes(writer)?;\n        self.vesting_schedule.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for DelegatorBid {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (delegator_kind, bytes) = DelegatorKind::from_bytes(bytes)?;\n        let (staked_amount, bytes) = U512::from_bytes(bytes)?;\n        let (bonding_purse, bytes) = URef::from_bytes(bytes)?;\n        let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?;\n        let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            DelegatorBid {\n                delegator_kind,\n                staked_amount,\n                bonding_purse,\n                validator_public_key,\n                vesting_schedule,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Display for DelegatorBid {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"delegator {{ {} {} motes, bonding purse {}, validator {} }}\",\n            self.delegator_kind, self.staked_amount, self.bonding_purse, self.validator_public_key\n        )\n    }\n}\n\nimpl From<Delegator> for DelegatorBid {\n    fn from(value: Delegator) -> Self {\n        DelegatorBid {\n            delegator_kind: value.delegator_public_key().clone().into(),\n            validator_public_key: value.validator_public_key().clone(),\n            staked_amount: value.staked_amount(),\n            bonding_purse: *value.bonding_purse(),\n            vesting_schedule: value.vesting_schedule().cloned(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bytesrepr,\n        system::auction::{delegator_kind::DelegatorKind, DelegatorBid},\n        AccessRights, PublicKey, SecretKey, URef, U512,\n    };\n\n    #[test]\n    fn serialization_roundtrip() {\n        let staked_amount = U512::one();\n        let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE);\n        let delegator_public_key: PublicKey = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n\n        let delegator_kind = DelegatorKind::PublicKey(delegator_public_key);\n\n        let validator_public_key: PublicKey = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let unlocked_delegator = DelegatorBid::unlocked(\n            delegator_kind.clone(),\n            staked_amount,\n            bonding_purse,\n            validator_public_key.clone(),\n        );\n        bytesrepr::test_serialization_roundtrip(&unlocked_delegator);\n\n        let release_timestamp_millis = 42;\n        let locked_delegator = DelegatorBid::locked(\n            delegator_kind,\n            staked_amount,\n            bonding_purse,\n            validator_public_key,\n            release_timestamp_millis,\n        );\n        bytesrepr::test_serialization_roundtrip(&locked_delegator);\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::delegator_bid_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/delegator_kind.rs",
    "content": "use crate::{\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex, CLType, CLTyped, PublicKey, URef, URefAddr,\n};\nuse alloc::{string::String, vec::Vec};\nuse core::{\n    fmt,\n    fmt::{Display, Formatter},\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\nuse serde_helpers::{HumanReadableDelegatorKind, NonHumanReadableDelegatorKind};\n\n/// DelegatorKindTag variants.\n#[repr(u8)]\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\npub enum DelegatorKindTag {\n    /// Public key.\n    PublicKey = 0,\n    /// Purse.\n    Purse = 1,\n}\n\n/// Auction bid variants.\n#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n/// Kinds of delegation bids.\npub enum DelegatorKind {\n    /// Delegation from public key.\n    PublicKey(PublicKey),\n    /// Delegation from purse.\n    Purse(#[cfg_attr(feature = \"json-schema\", schemars(with = \"String\"))] URefAddr),\n}\n\nimpl DelegatorKind {\n    /// DelegatorKindTag.\n    pub fn tag(&self) -> DelegatorKindTag {\n        match self {\n            DelegatorKind::PublicKey(_) => DelegatorKindTag::PublicKey,\n            DelegatorKind::Purse(_) => DelegatorKindTag::Purse,\n        }\n    }\n\n    /// Returns true if the kind is a purse.\n    pub fn is_purse(&self) -> bool {\n        matches!(self, DelegatorKind::Purse(_))\n    }\n}\n\nimpl ToBytes for DelegatorKind {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        let (tag, mut serialized_data) = match self {\n            DelegatorKind::PublicKey(public_key) => {\n                (DelegatorKindTag::PublicKey, public_key.to_bytes()?)\n            }\n            DelegatorKind::Purse(uref_addr) => (DelegatorKindTag::Purse, uref_addr.to_bytes()?),\n        };\n        result.push(tag as u8);\n        result.append(&mut serialized_data);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                DelegatorKind::PublicKey(pk) => pk.serialized_length(),\n                DelegatorKind::Purse(addr) => addr.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.tag() as u8);\n        match self {\n            DelegatorKind::PublicKey(pk) => pk.write_bytes(writer)?,\n            DelegatorKind::Purse(addr) => addr.write_bytes(writer)?,\n        };\n        Ok(())\n    }\n}\n\nimpl FromBytes for DelegatorKind {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == DelegatorKindTag::PublicKey as u8 => PublicKey::from_bytes(remainder)\n                .map(|(pk, remainder)| (DelegatorKind::PublicKey(pk), remainder)),\n            tag if tag == DelegatorKindTag::Purse as u8 => URefAddr::from_bytes(remainder)\n                .map(|(addr, remainder)| (DelegatorKind::Purse(addr), remainder)),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Display for DelegatorKind {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            DelegatorKind::PublicKey(pk) => {\n                write!(formatter, \"{}\", pk)\n            }\n            DelegatorKind::Purse(addr) => {\n                write!(formatter, \"{}\", base16::encode_lower(addr))\n            }\n        }\n    }\n}\n\nimpl From<PublicKey> for DelegatorKind {\n    fn from(value: PublicKey) -> Self {\n        DelegatorKind::PublicKey(value)\n    }\n}\n\nimpl From<&PublicKey> for DelegatorKind {\n    fn from(value: &PublicKey) -> Self {\n        DelegatorKind::PublicKey(value.clone())\n    }\n}\n\nimpl From<URef> for DelegatorKind {\n    fn from(value: URef) -> Self {\n        DelegatorKind::Purse(value.addr())\n    }\n}\n\nimpl From<URefAddr> for DelegatorKind {\n    fn from(value: URefAddr) -> Self {\n        DelegatorKind::Purse(value)\n    }\n}\n\nimpl CLTyped for DelegatorKind {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<DelegatorKind> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> DelegatorKind {\n        if rng.gen() {\n            DelegatorKind::PublicKey(rng.gen())\n        } else {\n            DelegatorKind::Purse(rng.gen())\n        }\n    }\n}\n\nimpl Serialize for DelegatorKind {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            HumanReadableDelegatorKind::from(self).serialize(serializer)\n        } else {\n            NonHumanReadableDelegatorKind::from(self).serialize(serializer)\n        }\n    }\n}\n\n#[derive(Debug)]\nenum DelegatorKindError {\n    DeserializationError(String),\n}\n\nimpl Display for DelegatorKindError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            DelegatorKindError::DeserializationError(error) => {\n                write!(f, \"Error when deserializing DelegatorKind: {}\", error)\n            }\n        }\n    }\n}\n\nimpl TryFrom<HumanReadableDelegatorKind> for DelegatorKind {\n    type Error = DelegatorKindError;\n\n    fn try_from(value: HumanReadableDelegatorKind) -> Result<Self, Self::Error> {\n        match value {\n            HumanReadableDelegatorKind::PublicKey(public_key) => {\n                Ok(DelegatorKind::PublicKey(public_key))\n            }\n            HumanReadableDelegatorKind::Purse(encoded) => {\n                let decoded = checksummed_hex::decode(encoded).map_err(|e| {\n                    DelegatorKindError::DeserializationError(format!(\n                        \"Failed to decode encoded URefAddr: {}\",\n                        e\n                    ))\n                })?;\n                let uref_addr = URefAddr::try_from(decoded.as_ref()).map_err(|e| {\n                    DelegatorKindError::DeserializationError(format!(\n                        \"Failed to build uref address: {}\",\n                        e\n                    ))\n                })?;\n                Ok(DelegatorKind::Purse(uref_addr))\n            }\n        }\n    }\n}\n\nimpl From<NonHumanReadableDelegatorKind> for DelegatorKind {\n    fn from(value: NonHumanReadableDelegatorKind) -> Self {\n        match value {\n            NonHumanReadableDelegatorKind::PublicKey(public_key) => {\n                DelegatorKind::PublicKey(public_key)\n            }\n            NonHumanReadableDelegatorKind::Purse(addr) => DelegatorKind::Purse(addr),\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for DelegatorKind {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let human_readable = HumanReadableDelegatorKind::deserialize(deserializer)?;\n            DelegatorKind::try_from(human_readable)\n                .map_err(|error| SerdeError::custom(format!(\"{:?}\", error)))\n        } else {\n            let non_human_readable = NonHumanReadableDelegatorKind::deserialize(deserializer)?;\n            Ok(DelegatorKind::from(non_human_readable))\n        }\n    }\n}\n\nmod serde_helpers {\n    use super::DelegatorKind;\n    use crate::{PublicKey, URefAddr};\n    use alloc::string::String;\n    use serde::{Deserialize, Serialize};\n\n    #[derive(Serialize, Deserialize)]\n    pub(super) enum HumanReadableDelegatorKind {\n        PublicKey(PublicKey),\n        Purse(String),\n    }\n\n    #[derive(Serialize, Deserialize)]\n    pub(super) enum NonHumanReadableDelegatorKind {\n        PublicKey(PublicKey),\n        Purse(URefAddr),\n    }\n\n    impl From<&DelegatorKind> for HumanReadableDelegatorKind {\n        fn from(delegator_kind: &DelegatorKind) -> Self {\n            match delegator_kind {\n                DelegatorKind::PublicKey(public_key) => {\n                    HumanReadableDelegatorKind::PublicKey(public_key.clone())\n                }\n                DelegatorKind::Purse(uref_addr) => {\n                    HumanReadableDelegatorKind::Purse(base16::encode_lower(uref_addr))\n                }\n            }\n        }\n    }\n\n    impl From<&DelegatorKind> for NonHumanReadableDelegatorKind {\n        fn from(delegator_kind: &DelegatorKind) -> Self {\n            match delegator_kind {\n                DelegatorKind::PublicKey(public_key) => {\n                    NonHumanReadableDelegatorKind::PublicKey(public_key.clone())\n                }\n                DelegatorKind::Purse(uref_addr) => NonHumanReadableDelegatorKind::Purse(*uref_addr),\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use crate::{\n        bytesrepr, system::auction::delegator_kind::DelegatorKind, testing::TestRng, PublicKey,\n        SecretKey,\n    };\n\n    #[test]\n    fn purse_serialized_as_string() {\n        let delegator_kind_payload = DelegatorKind::Purse([1; 32]);\n        let serialized = serde_json::to_string(&delegator_kind_payload).unwrap();\n        assert_eq!(\n            serialized,\n            \"{\\\"Purse\\\":\\\"0101010101010101010101010101010101010101010101010101010101010101\\\"}\"\n        );\n    }\n\n    #[test]\n    fn given_broken_address_purse_deserialziation_fails() {\n        let failing =\n            \"{\\\"Purse\\\":\\\"Z101010101010101010101010101010101010101010101010101010101010101\\\"}\";\n        let ret = serde_json::from_str::<DelegatorKind>(failing);\n        assert!(ret.is_err());\n        let failing = \"{\\\"Purse\\\":\\\"01010101010101010101010101010101010101010101010101010101\\\"}\";\n        let ret = serde_json::from_str::<DelegatorKind>(failing);\n        assert!(ret.is_err());\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let delegator_kind_payload = DelegatorKind::PublicKey(PublicKey::random(rng));\n        let json_string = serde_json::to_string_pretty(&delegator_kind_payload).unwrap();\n        let decoded: DelegatorKind = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, delegator_kind_payload);\n\n        let delegator_kind_payload = DelegatorKind::Purse(rng.gen());\n        let json_string = serde_json::to_string_pretty(&delegator_kind_payload).unwrap();\n        let decoded: DelegatorKind = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, delegator_kind_payload);\n    }\n\n    #[test]\n    fn serialization_roundtrip() {\n        let delegator_kind = DelegatorKind::PublicKey(PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        ));\n\n        bytesrepr::test_serialization_roundtrip(&delegator_kind);\n\n        let delegator_kind = DelegatorKind::Purse([43; 32]);\n\n        bytesrepr::test_serialization_roundtrip(&delegator_kind);\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(kind in gens::delegator_kind_arb()) {\n            bytesrepr::test_serialization_roundtrip(&kind);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/entry_points.rs",
    "content": "use crate::{\n    system::auction::{\n        DelegationRate, ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR,\n        ARG_DELEGATORS, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY,\n        ARG_VALIDATOR, METHOD_ACTIVATE_BID, METHOD_ADD_BID, METHOD_DELEGATE, METHOD_DISTRIBUTE,\n        METHOD_GET_ERA_VALIDATORS, METHOD_READ_ERA_ID, METHOD_REDELEGATE, METHOD_RUN_AUCTION,\n        METHOD_SLASH, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID,\n    },\n    CLType, CLTyped, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType,\n    EntryPoints, Parameter, PublicKey, U512,\n};\nuse alloc::boxed::Box;\n\nuse super::{\n    DelegatorKind, Reservation, ARG_MAXIMUM_DELEGATION_AMOUNT, ARG_MINIMUM_DELEGATION_AMOUNT,\n    ARG_NEW_PUBLIC_KEY, ARG_RESERVATIONS, ARG_REWARDS_MAP, METHOD_ADD_RESERVATIONS,\n    METHOD_CANCEL_RESERVATIONS, METHOD_CHANGE_BID_PUBLIC_KEY,\n};\n\n/// Creates auction contract entry points.\npub fn auction_entry_points() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_GET_ERA_VALIDATORS,\n        vec![],\n        Option::<ValidatorWeights>::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_ADD_BID,\n        vec![\n            Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(ARG_DELEGATION_RATE, DelegationRate::cl_type()),\n            Parameter::new(ARG_AMOUNT, U512::cl_type()),\n            Parameter::new(ARG_MINIMUM_DELEGATION_AMOUNT, u64::cl_type()),\n            Parameter::new(ARG_MAXIMUM_DELEGATION_AMOUNT, u64::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_WITHDRAW_BID,\n        vec![\n            Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_DELEGATE,\n        vec![\n            Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_UNDELEGATE,\n        vec![\n            Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(ARG_AMOUNT, U512::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_REDELEGATE,\n        vec![\n            Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()),\n            Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(ARG_AMOUNT, U512::cl_type()),\n            Parameter::new(ARG_NEW_VALIDATOR, PublicKey::cl_type()),\n        ],\n        U512::cl_type(),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_RUN_AUCTION,\n        vec![Parameter::new(ARG_ERA_END_TIMESTAMP_MILLIS, u64::cl_type())],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_SLASH,\n        vec![],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_DISTRIBUTE,\n        vec![Parameter::new(\n            ARG_REWARDS_MAP,\n            CLType::map(CLType::PublicKey, CLType::U512),\n        )],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_READ_ERA_ID,\n        vec![],\n        CLType::U64,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_ACTIVATE_BID,\n        vec![Parameter::new(ARG_VALIDATOR, CLType::PublicKey)],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_CHANGE_BID_PUBLIC_KEY,\n        vec![\n            Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()),\n            Parameter::new(ARG_NEW_PUBLIC_KEY, PublicKey::cl_type()),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_ADD_RESERVATIONS,\n        vec![Parameter::new(\n            ARG_RESERVATIONS,\n            CLType::List(Box::new(Reservation::cl_type())),\n        )],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_CANCEL_RESERVATIONS,\n        vec![\n            Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()),\n            Parameter::new(\n                ARG_DELEGATORS,\n                CLType::List(Box::new(DelegatorKind::cl_type())),\n            ),\n        ],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    entry_points\n}\n"
  },
  {
    "path": "types/src/system/auction/era_info.rs",
    "content": "use alloc::{boxed::Box, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::DelegatorKind,\n    CLType, CLTyped, PublicKey, U512,\n};\n\nconst SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG: u8 = 0;\nconst SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG: u8 = 1;\nconst SEIGNIORAGE_ALLOCATION_DELEGATOR_KIND_TAG: u8 = 2;\n\n/// Information about a seigniorage allocation\n#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum SeigniorageAllocation {\n    /// Info about a seigniorage allocation for a validator\n    Validator {\n        /// Validator's public key\n        validator_public_key: PublicKey,\n        /// Allocated amount\n        amount: U512,\n    },\n    /// Info about a seigniorage allocation for a delegator\n    Delegator {\n        /// Delegator's public key\n        delegator_public_key: PublicKey,\n        /// Validator's public key\n        validator_public_key: PublicKey,\n        /// Allocated amount\n        amount: U512,\n    },\n    /// Info about a seigniorage allocation for a delegator\n    DelegatorKind {\n        /// Delegator kind\n        delegator_kind: DelegatorKind,\n        /// Validator's public key\n        validator_public_key: PublicKey,\n        /// Allocated amount\n        amount: U512,\n    },\n}\n\nimpl SeigniorageAllocation {\n    /// Constructs a [`SeigniorageAllocation::Validator`]\n    pub const fn validator(validator_public_key: PublicKey, amount: U512) -> Self {\n        SeigniorageAllocation::Validator {\n            validator_public_key,\n            amount,\n        }\n    }\n\n    /// Constructs a [`SeigniorageAllocation::Delegator`]\n    pub const fn delegator(\n        delegator_public_key: PublicKey,\n        validator_public_key: PublicKey,\n        amount: U512,\n    ) -> Self {\n        SeigniorageAllocation::Delegator {\n            delegator_public_key,\n            validator_public_key,\n            amount,\n        }\n    }\n\n    /// Constructs a [`SeigniorageAllocation::DelegatorKind`]\n    pub const fn delegator_kind(\n        delegator_kind: DelegatorKind,\n        validator_public_key: PublicKey,\n        amount: U512,\n    ) -> Self {\n        SeigniorageAllocation::DelegatorKind {\n            delegator_kind,\n            validator_public_key,\n            amount,\n        }\n    }\n\n    /// Returns the amount for a given seigniorage allocation\n    pub fn amount(&self) -> &U512 {\n        match self {\n            SeigniorageAllocation::Validator { amount, .. }\n            | SeigniorageAllocation::Delegator { amount, .. }\n            | SeigniorageAllocation::DelegatorKind { amount, .. } => amount,\n        }\n    }\n\n    fn tag(&self) -> u8 {\n        match self {\n            SeigniorageAllocation::Validator { .. } => SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG,\n            SeigniorageAllocation::Delegator { .. } => SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG,\n            SeigniorageAllocation::DelegatorKind { .. } => {\n                SEIGNIORAGE_ALLOCATION_DELEGATOR_KIND_TAG\n            }\n        }\n    }\n}\n\nimpl ToBytes for SeigniorageAllocation {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.tag().serialized_length()\n            + match self {\n                SeigniorageAllocation::Validator {\n                    validator_public_key,\n                    amount,\n                } => validator_public_key.serialized_length() + amount.serialized_length(),\n                SeigniorageAllocation::Delegator {\n                    delegator_public_key,\n                    validator_public_key,\n                    amount,\n                } => {\n                    delegator_public_key.serialized_length()\n                        + validator_public_key.serialized_length()\n                        + amount.serialized_length()\n                }\n                SeigniorageAllocation::DelegatorKind {\n                    delegator_kind,\n                    validator_public_key,\n                    amount,\n                } => {\n                    delegator_kind.serialized_length()\n                        + validator_public_key.serialized_length()\n                        + amount.serialized_length()\n                }\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.tag());\n        match self {\n            SeigniorageAllocation::Validator {\n                validator_public_key,\n                amount,\n            } => {\n                validator_public_key.write_bytes(writer)?;\n                amount.write_bytes(writer)?;\n            }\n            SeigniorageAllocation::Delegator {\n                delegator_public_key,\n                validator_public_key,\n                amount,\n            } => {\n                delegator_public_key.write_bytes(writer)?;\n                validator_public_key.write_bytes(writer)?;\n                amount.write_bytes(writer)?;\n            }\n            SeigniorageAllocation::DelegatorKind {\n                delegator_kind,\n                validator_public_key,\n                amount,\n            } => {\n                delegator_kind.write_bytes(writer)?;\n                validator_public_key.write_bytes(writer)?;\n                amount.write_bytes(writer)?;\n            }\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for SeigniorageAllocation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, rem) = <u8>::from_bytes(bytes)?;\n        match tag {\n            SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG => {\n                let (validator_public_key, rem) = PublicKey::from_bytes(rem)?;\n                let (amount, rem) = U512::from_bytes(rem)?;\n                Ok((\n                    SeigniorageAllocation::validator(validator_public_key, amount),\n                    rem,\n                ))\n            }\n            SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG => {\n                let (delegator_public_key, rem) = PublicKey::from_bytes(rem)?;\n                let (validator_public_key, rem) = PublicKey::from_bytes(rem)?;\n                let (amount, rem) = U512::from_bytes(rem)?;\n                Ok((\n                    SeigniorageAllocation::delegator(\n                        delegator_public_key,\n                        validator_public_key,\n                        amount,\n                    ),\n                    rem,\n                ))\n            }\n            SEIGNIORAGE_ALLOCATION_DELEGATOR_KIND_TAG => {\n                let (delegator_kind, rem) = DelegatorKind::from_bytes(rem)?;\n                let (validator_public_key, rem) = PublicKey::from_bytes(rem)?;\n                let (amount, rem) = U512::from_bytes(rem)?;\n                Ok((\n                    SeigniorageAllocation::delegator_kind(\n                        delegator_kind,\n                        validator_public_key,\n                        amount,\n                    ),\n                    rem,\n                ))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl CLTyped for SeigniorageAllocation {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\n/// Auction metadata.  Intended to be recorded at each era.\n#[derive(Debug, Default, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct EraInfo {\n    seigniorage_allocations: Vec<SeigniorageAllocation>,\n}\n\nimpl EraInfo {\n    /// Constructs a [`EraInfo`].\n    pub fn new() -> Self {\n        let seigniorage_allocations = Vec::new();\n        EraInfo {\n            seigniorage_allocations,\n        }\n    }\n\n    /// Returns a reference to the seigniorage allocations collection\n    pub fn seigniorage_allocations(&self) -> &Vec<SeigniorageAllocation> {\n        &self.seigniorage_allocations\n    }\n\n    /// Returns a mutable reference to the seigniorage allocations collection\n    pub fn seigniorage_allocations_mut(&mut self) -> &mut Vec<SeigniorageAllocation> {\n        &mut self.seigniorage_allocations\n    }\n\n    /// Returns all seigniorage allocations that match the provided public key\n    /// using the following criteria:\n    /// * If the match candidate is a validator allocation, the provided public key is matched\n    ///   against the validator public key.\n    /// * If the match candidate is a delegator allocation, the provided public key is matched\n    ///   against the delegator public key if any.\n    pub fn select(&self, public_key: PublicKey) -> impl Iterator<Item = &SeigniorageAllocation> {\n        self.seigniorage_allocations\n            .iter()\n            .filter(move |allocation| match allocation {\n                SeigniorageAllocation::Validator {\n                    validator_public_key,\n                    ..\n                } => public_key == *validator_public_key,\n                SeigniorageAllocation::Delegator {\n                    delegator_public_key,\n                    ..\n                } => public_key == *delegator_public_key,\n                SeigniorageAllocation::DelegatorKind { delegator_kind, .. } => {\n                    if let DelegatorKind::PublicKey(delegator_public_key) = delegator_kind {\n                        public_key == *delegator_public_key\n                    } else {\n                        false\n                    }\n                }\n            })\n    }\n}\n\nimpl ToBytes for EraInfo {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.seigniorage_allocations().write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.seigniorage_allocations.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.seigniorage_allocations().write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for EraInfo {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (seigniorage_allocations, rem) = Vec::<SeigniorageAllocation>::from_bytes(bytes)?;\n        Ok((\n            EraInfo {\n                seigniorage_allocations,\n            },\n            rem,\n        ))\n    }\n}\n\nimpl CLTyped for EraInfo {\n    fn cl_type() -> CLType {\n        CLType::List(Box::new(SeigniorageAllocation::cl_type()))\n    }\n}\n\n/// Generators for [`SeigniorageAllocation`] and [`EraInfo`]\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::{\n        collection::{self, SizeRange},\n        prelude::Strategy,\n        prop_oneof,\n    };\n\n    use crate::{\n        crypto::gens::public_key_arb,\n        gens::{delegator_kind_arb, u512_arb},\n        system::auction::{EraInfo, SeigniorageAllocation},\n    };\n\n    fn seigniorage_allocation_validator_arb() -> impl Strategy<Value = SeigniorageAllocation> {\n        (public_key_arb(), u512_arb()).prop_map(|(validator_public_key, amount)| {\n            SeigniorageAllocation::validator(validator_public_key, amount)\n        })\n    }\n\n    fn seigniorage_allocation_delegator_arb() -> impl Strategy<Value = SeigniorageAllocation> {\n        (delegator_kind_arb(), public_key_arb(), u512_arb()).prop_map(\n            |(delegator_kind, validator_public_key, amount)| {\n                SeigniorageAllocation::delegator_kind(delegator_kind, validator_public_key, amount)\n            },\n        )\n    }\n\n    /// Creates an arbitrary [`SeignorageAllocation`](SeigniorageAllocation)\n    pub fn seigniorage_allocation_arb() -> impl Strategy<Value = SeigniorageAllocation> {\n        prop_oneof![\n            seigniorage_allocation_validator_arb(),\n            seigniorage_allocation_delegator_arb()\n        ]\n    }\n\n    /// Creates an arbitrary [`EraInfo`]\n    pub fn era_info_arb(size: impl Into<SizeRange>) -> impl Strategy<Value = EraInfo> {\n        collection::vec(seigniorage_allocation_arb(), size).prop_map(|allocations| {\n            let mut era_info = EraInfo::new();\n            *era_info.seigniorage_allocations_mut() = allocations;\n            era_info\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::prelude::*;\n\n    use crate::bytesrepr;\n\n    use super::gens;\n\n    proptest! {\n        #[test]\n        fn test_serialization_roundtrip(era_info in gens::era_info_arb(0..32)) {\n            bytesrepr::test_serialization_roundtrip(&era_info)\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/error.rs",
    "content": "//! Home of the Auction contract's [`enum@Error`] type.\nuse alloc::vec::Vec;\nuse core::{\n    convert::{TryFrom, TryInto},\n    fmt::{self, Display, Formatter},\n    result,\n};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    CLType, CLTyped,\n};\n\n/// Errors which can occur while executing the Auction contract.\n#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]\n#[cfg_attr(test, derive(strum::EnumIter))]\n#[repr(u8)]\n#[non_exhaustive]\npub enum Error {\n    /// Unable to find named key in the contract's named keys.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(0, Error::MissingKey as u8);\n    /// ```\n    MissingKey = 0,\n    /// Invalid key variant.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(1, Error::InvalidKeyVariant as u8);\n    /// ```\n    InvalidKeyVariant = 1,\n    /// Value under an uref does not exist. This means the installer contract didn't work properly.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(2, Error::MissingValue as u8);\n    /// ```\n    MissingValue = 2,\n    /// ABI serialization issue while reading or writing.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(3, Error::Serialization as u8);\n    /// ```\n    Serialization = 3,\n    /// Triggered when contract was unable to transfer desired amount of tokens into a bid purse.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(4, Error::TransferToBidPurse as u8);\n    /// ```\n    TransferToBidPurse = 4,\n    /// User passed invalid amount of tokens which might result in wrong values after calculation.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(5, Error::InvalidAmount as u8);\n    /// ```\n    InvalidAmount = 5,\n    /// Unable to find a bid by account hash in `active_bids` map.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(6, Error::BidNotFound as u8);\n    /// ```\n    BidNotFound = 6,\n    /// Validator was not found in the map.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(7, Error::ValidatorNotFound as u8);\n    /// ```\n    ValidatorNotFound = 7,\n    /// Delegator was not found in the map.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(8, Error::DelegatorNotFound as u8);\n    /// ```\n    DelegatorNotFound = 8,\n    /// Storage problem.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(9, Error::Storage as u8);\n    /// ```\n    Storage = 9,\n    /// Raised when system is unable to bond.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(10, Error::Bonding as u8);\n    /// ```\n    Bonding = 10,\n    /// Raised when system is unable to unbond.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(11, Error::Unbonding as u8);\n    /// ```\n    Unbonding = 11,\n    /// Raised when Mint contract is unable to release founder stake.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(12, Error::ReleaseFounderStake as u8);\n    /// ```\n    ReleaseFounderStake = 12,\n    /// Raised when the system is unable to determine purse balance.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(13, Error::GetBalance as u8);\n    /// ```\n    GetBalance = 13,\n    /// Raised when an entry point is called from invalid account context.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(14, Error::InvalidContext as u8);\n    /// ```\n    InvalidContext = 14,\n    /// Raised whenever a validator's funds are still locked in but an attempt to withdraw was\n    /// made.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(15, Error::ValidatorFundsLocked as u8);\n    /// ```\n    ValidatorFundsLocked = 15,\n    /// Raised when caller is not the system account.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(16, Error::InvalidCaller as u8);\n    /// ```\n    InvalidCaller = 16,\n    /// Raised when function is supplied a public key that does match the caller's or does not have\n    /// an associated account.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(17, Error::InvalidPublicKey as u8);\n    /// ```\n    InvalidPublicKey = 17,\n    /// Validator is not bonded.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(18, Error::BondNotFound as u8);\n    /// ```\n    BondNotFound = 18,\n    /// Unable to create purse.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(19, Error::CreatePurseFailed as u8);\n    /// ```\n    CreatePurseFailed = 19,\n    /// Attempted to unbond an amount which was too large.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(20, Error::UnbondTooLarge as u8);\n    /// ```\n    UnbondTooLarge = 20,\n    /// Attempted to bond with a stake which was too small.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(21, Error::BondTooSmall as u8);\n    /// ```\n    BondTooSmall = 21,\n    /// Raised when rewards are to be distributed to delegators, but the validator has no\n    /// delegations.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(22, Error::MissingDelegations as u8);\n    /// ```\n    MissingDelegations = 22,\n    /// The validators returned by the consensus component should match\n    /// current era validators when distributing rewards.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(23, Error::MismatchedEraValidators as u8);\n    /// ```\n    MismatchedEraValidators = 23,\n    /// Failed to mint reward tokens.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(24, Error::MintReward as u8);\n    /// ```\n    MintReward = 24,\n    /// Invalid number of validator slots.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(25, Error::InvalidValidatorSlotsValue as u8);\n    /// ```\n    InvalidValidatorSlotsValue = 25,\n    /// Failed to reduce total supply.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(26, Error::MintReduceTotalSupply as u8);\n    /// ```\n    MintReduceTotalSupply = 26,\n    /// Triggered when contract was unable to transfer desired amount of tokens into a delegators\n    /// purse.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(27, Error::TransferToDelegatorPurse as u8);\n    /// ```\n    TransferToDelegatorPurse = 27,\n    /// Triggered when contract was unable to perform a transfer to distribute validators reward.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(28, Error::ValidatorRewardTransfer as u8);\n    /// ```\n    ValidatorRewardTransfer = 28,\n    /// Triggered when contract was unable to perform a transfer to distribute delegators rewards.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(29, Error::DelegatorRewardTransfer as u8);\n    /// ```\n    DelegatorRewardTransfer = 29,\n    /// Failed to transfer desired amount while withdrawing delegators reward.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(30, Error::WithdrawDelegatorReward as u8);\n    /// ```\n    WithdrawDelegatorReward = 30,\n    /// Failed to transfer desired amount while withdrawing validators reward.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(31, Error::WithdrawValidatorReward as u8);\n    /// ```\n    WithdrawValidatorReward = 31,\n    /// Failed to transfer desired amount into unbonding purse.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(32, Error::TransferToUnbondingPurse as u8);\n    /// ```\n    TransferToUnbondingPurse = 32,\n    /// Failed to record era info.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(33, Error::RecordEraInfo as u8);\n    /// ```\n    RecordEraInfo = 33,\n    /// Failed to create a [`crate::CLValue`].\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(34, Error::CLValue as u8);\n    /// ```\n    CLValue = 34,\n    /// Missing seigniorage recipients for given era.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(35, Error::MissingSeigniorageRecipients as u8);\n    /// ```\n    MissingSeigniorageRecipients = 35,\n    /// Failed to transfer funds.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(36, Error::Transfer as u8);\n    /// ```\n    Transfer = 36,\n    /// Delegation rate exceeds rate.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(37, Error::DelegationRateTooLarge as u8);\n    /// ```\n    DelegationRateTooLarge = 37,\n    /// Raised whenever a delegator's funds are still locked in but an attempt to undelegate was\n    /// made.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(38, Error::DelegatorFundsLocked as u8);\n    /// ```\n    DelegatorFundsLocked = 38,\n    /// An arithmetic overflow has occurred.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(39, Error::ArithmeticOverflow as u8);\n    /// ```\n    ArithmeticOverflow = 39,\n    /// Execution exceeded the gas limit.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(40, Error::GasLimit as u8);\n    /// ```\n    GasLimit = 40,\n    /// Too many frames on the runtime stack.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(41, Error::RuntimeStackOverflow as u8);\n    /// ```\n    RuntimeStackOverflow = 41,\n    /// An error that is raised when there is an error in the mint contract that cannot\n    /// be mapped to a specific auction error.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(42, Error::MintError as u8);\n    /// ```\n    MintError = 42,\n    /// The validator has exceeded the maximum amount of delegators allowed.\n    /// NOTE: This variant is no longer in use.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(43, Error::ExceededDelegatorSizeLimit as u8);\n    /// ```\n    ExceededDelegatorSizeLimit = 43,\n    /// The global delegator capacity for the auction has been reached.\n    /// NOTE: This variant is no longer in use.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(44, Error::GlobalDelegatorCapacityReached as u8);\n    /// ```\n    GlobalDelegatorCapacityReached = 44,\n    /// The delegated amount is below the minimum allowed.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(45, Error::DelegationAmountTooSmall as u8);\n    /// ```\n    DelegationAmountTooSmall = 45,\n    /// Runtime stack error.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(46, Error::RuntimeStack as u8);\n    /// ```\n    RuntimeStack = 46,\n    /// An error that is raised on private chain only when a `disable_auction_bids` flag is set to\n    /// `true`.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(47, Error::AuctionBidsDisabled as u8);\n    /// ```\n    AuctionBidsDisabled = 47,\n    /// Error getting accumulation purse.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(48, Error::GetAccumulationPurse as u8);\n    /// ```\n    GetAccumulationPurse = 48,\n    /// Failed to transfer desired amount into administrators account.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(49, Error::TransferToAdministrator as u8);\n    /// ```\n    TransferToAdministrator = 49,\n    /// Failed to transfer desired amount into administrators account.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(50, Error::ForgedReference as u8);\n    /// ```\n    ForgedReference = 50,\n    /// Unable to find purse.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(51, Error::MissingPurse as u8);\n    /// ```\n    MissingPurse = 51,\n    /// Failed to transfer validator bid to new public key.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(52, Error::ValidatorBidExistsAlready as u8);\n    /// ```\n    ValidatorBidExistsAlready = 52,\n    /// Failed to look up current validator bid\n    /// because it's public key has been changed\n    /// and bridge record chain is too long to follow.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(53, Error::BridgeRecordChainTooLong as u8);\n    /// ```\n    BridgeRecordChainTooLong = 53,\n    /// Unexpected bid variant.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(54, Error::UnexpectedBidVariant as u8);\n    /// ```\n    UnexpectedBidVariant = 54,\n    /// The delegated amount is above the maximum allowed.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(55, Error::DelegationAmountTooLarge as u8);\n    /// ```\n    DelegationAmountTooLarge = 55,\n    /// Reservation was not found in the map.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(56, Error::ReservationNotFound as u8);\n    /// ```\n    ReservationNotFound = 56,\n    /// Validator exceeded allowed number of reserved delegator slots.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(57, Error::ExceededReservationSlotsLimit as u8);\n    /// ```\n    ExceededReservationSlotsLimit = 57,\n    /// All reserved slots for validator are already occupied.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(58, Error::ExceededReservationsLimit as u8);\n    /// ```\n    ExceededReservationsLimit = 58,\n    /// Reserved slots count is less than number of existing reservations.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(59, Error::ReservationSlotsCountTooSmall as u8);\n    /// ```\n    ReservationSlotsCountTooSmall = 59,\n    /// Unexpected unbond variant.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(60, Error::UnexpectedUnbondVariant as u8);\n    /// ```\n    UnexpectedUnbondVariant = 60,\n    /// Unexpected stored value variant.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(61, Error::UnexpectedStoredValueVariant as u8);\n    /// ```\n    UnexpectedStoredValueVariant = 61,\n    /// Redelegation validator was not found in the map.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(62, Error::RedelegationValidatorNotFound as u8);\n    /// ```\n    RedelegationValidatorNotFound = 62,\n    /// Certain operations are not permitted on bid records during vesting periods.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(63, Error::VestingLockout as u8);\n    /// ```\n    VestingLockout = 63,\n    /// The minimum delegation rate is not met.\n    /// ```\n    /// # use casper_types::system::auction::Error;\n    /// assert_eq!(64, Error::DelegationRateTooSmall as u8);\n    /// ```\n    DelegationRateTooSmall = 64,\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::MissingKey => formatter.write_str(\"Missing key\"),\n            Error::InvalidKeyVariant => formatter.write_str(\"Invalid key variant\"),\n            Error::MissingValue => formatter.write_str(\"Missing value\"),\n            Error::Serialization => formatter.write_str(\"Serialization error\"),\n            Error::TransferToBidPurse => formatter.write_str(\"Transfer to bid purse error\"),\n            Error::InvalidAmount => formatter.write_str(\"Invalid amount\"),\n            Error::BidNotFound => formatter.write_str(\"Bid not found\"),\n            Error::ValidatorNotFound => formatter.write_str(\"Validator not found\"),\n            Error::DelegatorNotFound => formatter.write_str(\"Delegator not found\"),\n            Error::Storage => formatter.write_str(\"Storage error\"),\n            Error::Bonding => formatter.write_str(\"Bonding error\"),\n            Error::Unbonding => formatter.write_str(\"Unbonding error\"),\n            Error::ReleaseFounderStake => formatter.write_str(\"Unable to release founder stake\"),\n            Error::GetBalance => formatter.write_str(\"Unable to get purse balance\"),\n            Error::InvalidContext => formatter.write_str(\"Invalid context\"),\n            Error::ValidatorFundsLocked => formatter.write_str(\"Validator's funds are locked\"),\n            Error::InvalidCaller => formatter.write_str(\"Function must be called by system account\"),\n            Error::InvalidPublicKey => formatter.write_str(\"Supplied public key does not match caller's public key or has no associated account\"),\n            Error::BondNotFound => formatter.write_str(\"Validator's bond not found\"),\n            Error::CreatePurseFailed => formatter.write_str(\"Unable to create purse\"),\n            Error::UnbondTooLarge => formatter.write_str(\"Unbond is too large\"),\n            Error::BondTooSmall => formatter.write_str(\"Bond is too small\"),\n            Error::MissingDelegations => formatter.write_str(\"Validators has not received any delegations\"),\n            Error::MismatchedEraValidators => formatter.write_str(\"Mismatched era validator sets to distribute rewards\"),\n            Error::MintReward => formatter.write_str(\"Failed to mint rewards\"),\n            Error::InvalidValidatorSlotsValue => formatter.write_str(\"Invalid number of validator slots\"),\n            Error::MintReduceTotalSupply => formatter.write_str(\"Failed to reduce total supply\"),\n            Error::TransferToDelegatorPurse => formatter.write_str(\"Transfer to delegators purse error\"),\n            Error::ValidatorRewardTransfer => formatter.write_str(\"Reward transfer to validator error\"),\n            Error::DelegatorRewardTransfer => formatter.write_str(\"Rewards transfer to delegator error\"),\n            Error::WithdrawDelegatorReward => formatter.write_str(\"Withdraw delegator reward error\"),\n            Error::WithdrawValidatorReward => formatter.write_str(\"Withdraw validator reward error\"),\n            Error::TransferToUnbondingPurse => formatter.write_str(\"Transfer to unbonding purse error\"),\n            Error::RecordEraInfo => formatter.write_str(\"Record era info error\"),\n            Error::CLValue => formatter.write_str(\"CLValue error\"),\n            Error::MissingSeigniorageRecipients => formatter.write_str(\"Missing seigniorage recipients for given era\"),\n            Error::Transfer => formatter.write_str(\"Transfer error\"),\n            Error::DelegationRateTooLarge => formatter.write_str(\"Delegation rate too large\"),\n            Error::DelegatorFundsLocked => formatter.write_str(\"Delegator's funds are locked\"),\n            Error::ArithmeticOverflow => formatter.write_str(\"Arithmetic overflow\"),\n            Error::GasLimit => formatter.write_str(\"Execution exceeded the gas limit\"),\n            Error::RuntimeStackOverflow => formatter.write_str(\"Runtime stack overflow\"),\n            Error::MintError => formatter.write_str(\"An error in the mint contract execution\"),\n            Error::ExceededDelegatorSizeLimit => formatter.write_str(\"The amount of delegators per validator has been exceeded\"),\n            Error::GlobalDelegatorCapacityReached => formatter.write_str(\"The global delegator capacity has been reached\"),\n            Error::DelegationAmountTooSmall => formatter.write_str(\"The delegated amount is below the minimum allowed\"),\n            Error::RuntimeStack => formatter.write_str(\"Runtime stack error\"),\n            Error::AuctionBidsDisabled => formatter.write_str(\"Auction bids are disabled\"),\n            Error::GetAccumulationPurse => formatter.write_str(\"Get accumulation purse error\"),\n            Error::TransferToAdministrator => formatter.write_str(\"Transfer to administrator error\"),\n            Error::ForgedReference => formatter.write_str(\"Forged reference\"),\n            Error::MissingPurse => formatter.write_str(\"Missing purse\"),\n            Error::ValidatorBidExistsAlready => formatter.write_str(\"Validator bid with given public key already exists\"),\n            Error::BridgeRecordChainTooLong => formatter.write_str(\"Bridge record chain is too long to find current validator bid\"),\n            Error::UnexpectedBidVariant => formatter.write_str(\"Unexpected bid variant\"),\n            Error::DelegationAmountTooLarge => formatter.write_str(\"The delegated amount is above the maximum allowed\"),\n            Error::ReservationNotFound => formatter.write_str(\"Reservation not found\"),\n            Error::ExceededReservationSlotsLimit => formatter.write_str(\"Validator exceeded allowed number of reserved delegator slots\"),\n            Error::ExceededReservationsLimit => formatter.write_str(\"All reserved slots for validator are already occupied\"),\n            Error::ReservationSlotsCountTooSmall => formatter.write_str(\"Reserved slots count is less than number of existing reservations\"),\n            Error::UnexpectedUnbondVariant => formatter.write_str(\"Unexpected unbond variant\"),\n            Error::UnexpectedStoredValueVariant => formatter.write_str(\"Unexpected stored value variant\"),\n            Error::RedelegationValidatorNotFound => formatter.write_str(\"Redelegation validator not found\"),\n            Error::VestingLockout => formatter.write_str(\"Cannot perform attempted action during vesting periods\"),\n            Error::DelegationRateTooSmall => formatter.write_str(\"Delegation rate too small\"),\n        }\n    }\n}\n\nimpl CLTyped for Error {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n\n// This error type is not intended to be used by third party crates.\n#[doc(hidden)]\n#[derive(Debug, PartialEq, Eq)]\npub struct TryFromU8ForError(());\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<u8> for Error {\n    type Error = TryFromU8ForError;\n\n    fn try_from(value: u8) -> result::Result<Self, Self::Error> {\n        match value {\n            d if d == Error::MissingKey as u8 => Ok(Error::MissingKey),\n            d if d == Error::InvalidKeyVariant as u8 => Ok(Error::InvalidKeyVariant),\n            d if d == Error::MissingValue as u8 => Ok(Error::MissingValue),\n            d if d == Error::Serialization as u8 => Ok(Error::Serialization),\n            d if d == Error::TransferToBidPurse as u8 => Ok(Error::TransferToBidPurse),\n            d if d == Error::InvalidAmount as u8 => Ok(Error::InvalidAmount),\n            d if d == Error::BidNotFound as u8 => Ok(Error::BidNotFound),\n            d if d == Error::ValidatorNotFound as u8 => Ok(Error::ValidatorNotFound),\n            d if d == Error::DelegatorNotFound as u8 => Ok(Error::DelegatorNotFound),\n            d if d == Error::Storage as u8 => Ok(Error::Storage),\n            d if d == Error::Bonding as u8 => Ok(Error::Bonding),\n            d if d == Error::Unbonding as u8 => Ok(Error::Unbonding),\n            d if d == Error::ReleaseFounderStake as u8 => Ok(Error::ReleaseFounderStake),\n            d if d == Error::GetBalance as u8 => Ok(Error::GetBalance),\n            d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext),\n            d if d == Error::ValidatorFundsLocked as u8 => Ok(Error::ValidatorFundsLocked),\n            d if d == Error::InvalidCaller as u8 => Ok(Error::InvalidCaller),\n            d if d == Error::InvalidPublicKey as u8 => Ok(Error::InvalidPublicKey),\n            d if d == Error::BondNotFound as u8 => Ok(Error::BondNotFound),\n            d if d == Error::CreatePurseFailed as u8 => Ok(Error::CreatePurseFailed),\n            d if d == Error::UnbondTooLarge as u8 => Ok(Error::UnbondTooLarge),\n            d if d == Error::BondTooSmall as u8 => Ok(Error::BondTooSmall),\n            d if d == Error::MissingDelegations as u8 => Ok(Error::MissingDelegations),\n            d if d == Error::MismatchedEraValidators as u8 => Ok(Error::MismatchedEraValidators),\n            d if d == Error::MintReward as u8 => Ok(Error::MintReward),\n            d if d == Error::InvalidValidatorSlotsValue as u8 => {\n                Ok(Error::InvalidValidatorSlotsValue)\n            }\n            d if d == Error::MintReduceTotalSupply as u8 => Ok(Error::MintReduceTotalSupply),\n            d if d == Error::TransferToDelegatorPurse as u8 => Ok(Error::TransferToDelegatorPurse),\n            d if d == Error::ValidatorRewardTransfer as u8 => Ok(Error::ValidatorRewardTransfer),\n            d if d == Error::DelegatorRewardTransfer as u8 => Ok(Error::DelegatorRewardTransfer),\n            d if d == Error::WithdrawDelegatorReward as u8 => Ok(Error::WithdrawDelegatorReward),\n            d if d == Error::WithdrawValidatorReward as u8 => Ok(Error::WithdrawValidatorReward),\n            d if d == Error::TransferToUnbondingPurse as u8 => Ok(Error::TransferToUnbondingPurse),\n\n            d if d == Error::RecordEraInfo as u8 => Ok(Error::RecordEraInfo),\n            d if d == Error::CLValue as u8 => Ok(Error::CLValue),\n            d if d == Error::MissingSeigniorageRecipients as u8 => {\n                Ok(Error::MissingSeigniorageRecipients)\n            }\n            d if d == Error::Transfer as u8 => Ok(Error::Transfer),\n            d if d == Error::DelegationRateTooLarge as u8 => Ok(Error::DelegationRateTooLarge),\n            d if d == Error::DelegatorFundsLocked as u8 => Ok(Error::DelegatorFundsLocked),\n            d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow),\n            d if d == Error::GasLimit as u8 => Ok(Error::GasLimit),\n            d if d == Error::RuntimeStackOverflow as u8 => Ok(Error::RuntimeStackOverflow),\n            d if d == Error::MintError as u8 => Ok(Error::MintError),\n            d if d == Error::ExceededDelegatorSizeLimit as u8 => {\n                Ok(Error::ExceededDelegatorSizeLimit)\n            }\n            d if d == Error::GlobalDelegatorCapacityReached as u8 => {\n                Ok(Error::GlobalDelegatorCapacityReached)\n            }\n            d if d == Error::DelegationAmountTooSmall as u8 => Ok(Error::DelegationAmountTooSmall),\n            d if d == Error::RuntimeStack as u8 => Ok(Error::RuntimeStack),\n            d if d == Error::AuctionBidsDisabled as u8 => Ok(Error::AuctionBidsDisabled),\n            d if d == Error::GetAccumulationPurse as u8 => Ok(Error::GetAccumulationPurse),\n            d if d == Error::TransferToAdministrator as u8 => Ok(Error::TransferToAdministrator),\n            d if d == Error::ForgedReference as u8 => Ok(Error::ForgedReference),\n            d if d == Error::MissingPurse as u8 => Ok(Error::MissingPurse),\n            d if d == Error::ValidatorBidExistsAlready as u8 => {\n                Ok(Error::ValidatorBidExistsAlready)\n            }\n            d if d == Error::BridgeRecordChainTooLong as u8 => Ok(Error::BridgeRecordChainTooLong),\n            d if d == Error::UnexpectedBidVariant as u8 => Ok(Error::UnexpectedBidVariant),\n            d if d == Error::DelegationAmountTooLarge as u8 => Ok(Error::DelegationAmountTooLarge),\n            d if d == Error::ReservationNotFound as u8 => Ok(Error::ReservationNotFound),\n            d if d == Error::ExceededReservationSlotsLimit as u8 => {\n                Ok(Error::ExceededReservationSlotsLimit)\n            }\n            d if d == Error::ExceededReservationsLimit as u8 => {\n                Ok(Error::ExceededReservationsLimit)\n            }\n            d if d == Error::ReservationSlotsCountTooSmall as u8 => {\n                Ok(Error::ReservationSlotsCountTooSmall)\n            }\n            d if d == Error::UnexpectedUnbondVariant as u8 => Ok(Error::UnexpectedUnbondVariant),\n            d if d == Error::UnexpectedStoredValueVariant as u8 => {\n                Ok(Error::UnexpectedStoredValueVariant)\n            }\n            d if d == Error::RedelegationValidatorNotFound as u8 => {\n                Ok(Error::RedelegationValidatorNotFound)\n            }\n            d if d == Error::VestingLockout as u8 => Ok(Error::VestingLockout),\n            d if d == Error::DelegationRateTooSmall as u8 => Ok(Error::DelegationRateTooSmall),\n            _ => Err(TryFromU8ForError(())),\n        }\n    }\n}\n\nimpl ToBytes for Error {\n    fn to_bytes(&self) -> result::Result<Vec<u8>, bytesrepr::Error> {\n        let value = *self as u8;\n        value.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for Error {\n    fn from_bytes(bytes: &[u8]) -> result::Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, rem): (u8, _) = FromBytes::from_bytes(bytes)?;\n        let error: Error = value\n            .try_into()\n            // In case an Error variant is unable to be determined it would return an\n            // Error::Formatting as if it's unable to be correctly deserialized.\n            .map_err(|_| bytesrepr::Error::Formatting)?;\n        Ok((error, rem))\n    }\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(_: bytesrepr::Error) -> Self {\n        Error::Serialization\n    }\n}\n\n// This error type is not intended to be used by third party crates.\n#[doc(hidden)]\npub enum PurseLookupError {\n    KeyNotFound,\n    KeyUnexpectedType,\n}\n\nimpl From<PurseLookupError> for Error {\n    fn from(error: PurseLookupError) -> Self {\n        match error {\n            PurseLookupError::KeyNotFound => Error::MissingPurse,\n            PurseLookupError::KeyUnexpectedType => Error::InvalidKeyVariant,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use strum::IntoEnumIterator;\n\n    use super::Error;\n\n    #[test]\n    fn error_forward_trips() {\n        for expected_error_variant in Error::iter() {\n            assert_eq!(\n                Error::try_from(expected_error_variant as u8),\n                Ok(expected_error_variant)\n            )\n        }\n    }\n\n    #[test]\n    fn error_backward_trips() {\n        for u8 in 0..=u8::MAX {\n            match Error::try_from(u8) {\n                Ok(error_variant) => {\n                    assert_eq!(u8, error_variant as u8, \"Error code mismatch\")\n                }\n                Err(_) => continue,\n            };\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/reservation.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, PublicKey,\n};\n\nuse super::{DelegationRate, DelegatorKind};\n\n/// Represents a validator reserving a slot for specific delegator\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Reservation {\n    /// Delegator kind.\n    delegator_kind: DelegatorKind,\n    /// Validator public key.\n    validator_public_key: PublicKey,\n    /// Individual delegation rate.\n    delegation_rate: DelegationRate,\n}\n\nimpl Reservation {\n    /// Creates a new [`Reservation`]\n    pub fn new(\n        validator_public_key: PublicKey,\n        delegator_kind: DelegatorKind,\n        delegation_rate: DelegationRate,\n    ) -> Self {\n        Self {\n            delegator_kind,\n            validator_public_key,\n            delegation_rate,\n        }\n    }\n\n    /// Returns kind of delegator.\n    pub fn delegator_kind(&self) -> &DelegatorKind {\n        &self.delegator_kind\n    }\n\n    /// Returns delegatee\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Gets the delegation rate of the provided bid\n    pub fn delegation_rate(&self) -> &DelegationRate {\n        &self.delegation_rate\n    }\n}\n\nimpl CLTyped for Reservation {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for Reservation {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.extend(self.delegator_kind.to_bytes()?);\n        buffer.extend(self.validator_public_key.to_bytes()?);\n        buffer.extend(self.delegation_rate.to_bytes()?);\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.delegator_kind.serialized_length()\n            + self.validator_public_key.serialized_length()\n            + self.delegation_rate.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.delegator_kind.write_bytes(writer)?;\n        self.validator_public_key.write_bytes(writer)?;\n        self.delegation_rate.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Reservation {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (delegator_kind, bytes) = DelegatorKind::from_bytes(bytes)?;\n        let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?;\n        let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Self {\n                delegator_kind,\n                validator_public_key,\n                delegation_rate,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl Display for Reservation {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"Reservation {{ delegator {}, validator {} }}\",\n            self.delegator_kind, self.validator_public_key\n        )\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<Reservation> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Reservation {\n        Reservation {\n            delegator_kind: rng.gen(),\n            validator_public_key: rng.gen(),\n            delegation_rate: rng.gen(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, system::auction::Reservation, PublicKey, SecretKey};\n\n    #[test]\n    fn serialization_roundtrip() {\n        let delegator_kind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n\n        let validator_public_key: PublicKey = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let entry = Reservation::new(validator_public_key, delegator_kind, 0);\n        bytesrepr::test_serialization_roundtrip(&entry);\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::reservation_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/seigniorage_recipient.rs",
    "content": "use alloc::{collections::BTreeMap, vec::Vec};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::{Bid, DelegationRate, DelegatorKind},\n    CLType, CLTyped, PublicKey, U512,\n};\n\n/// The seigniorage recipient details.\n/// Legacy version required to deserialize old records.\n#[derive(Default, PartialEq, Eq, Clone, Debug)]\npub struct SeigniorageRecipientV1 {\n    /// Validator stake (not including delegators)\n    stake: U512,\n    /// Delegation rate of a seigniorage recipient.\n    delegation_rate: DelegationRate,\n    /// Delegators and their bids.\n    delegator_stake: BTreeMap<PublicKey, U512>,\n}\n\nimpl SeigniorageRecipientV1 {\n    /// Creates a new SeigniorageRecipient\n    pub fn new(\n        stake: U512,\n        delegation_rate: DelegationRate,\n        delegator_stake: BTreeMap<PublicKey, U512>,\n    ) -> Self {\n        Self {\n            stake,\n            delegation_rate,\n            delegator_stake,\n        }\n    }\n\n    /// Returns stake of the provided recipient\n    pub fn stake(&self) -> &U512 {\n        &self.stake\n    }\n\n    /// Returns delegation rate of the provided recipient\n    pub fn delegation_rate(&self) -> &DelegationRate {\n        &self.delegation_rate\n    }\n\n    /// Returns delegators of the provided recipient and their stake\n    pub fn delegator_stake(&self) -> &BTreeMap<PublicKey, U512> {\n        &self.delegator_stake\n    }\n\n    /// Calculates total stake, including delegators' total stake\n    pub fn total_stake(&self) -> Option<U512> {\n        self.delegator_total_stake()?.checked_add(self.stake)\n    }\n\n    /// Calculates total stake for all delegators\n    pub fn delegator_total_stake(&self) -> Option<U512> {\n        let mut total_stake: U512 = U512::zero();\n        for stake in self.delegator_stake.values() {\n            total_stake = total_stake.checked_add(*stake)?;\n        }\n        Some(total_stake)\n    }\n}\n\nimpl CLTyped for SeigniorageRecipientV1 {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for SeigniorageRecipientV1 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.extend(self.stake.to_bytes()?);\n        result.extend(self.delegation_rate.to_bytes()?);\n        result.extend(self.delegator_stake.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.stake.serialized_length()\n            + self.delegation_rate.serialized_length()\n            + self.delegator_stake.serialized_length()\n    }\n}\n\nimpl FromBytes for SeigniorageRecipientV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (stake, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegator_stake, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Self {\n                stake,\n                delegation_rate,\n                delegator_stake,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl From<&Bid> for SeigniorageRecipientV1 {\n    fn from(bid: &Bid) -> Self {\n        let delegator_stake = bid\n            .delegators()\n            .iter()\n            .map(|(delegator_public_key, delegator)| {\n                (delegator_public_key.clone(), delegator.staked_amount())\n            })\n            .collect();\n        Self {\n            stake: *bid.staked_amount(),\n            delegation_rate: *bid.delegation_rate(),\n            delegator_stake,\n        }\n    }\n}\n\n/// The seigniorage recipient details with delegation rates for reservations.\n#[derive(Default, PartialEq, Eq, Clone, Debug)]\npub struct SeigniorageRecipientV2 {\n    /// Validator stake (not including delegators)\n    stake: U512,\n    /// Delegation rate of a seigniorage recipient.\n    delegation_rate: DelegationRate,\n    /// Delegators and their bids.\n    delegator_stake: BTreeMap<DelegatorKind, U512>,\n    /// Delegation rates for reserved slots\n    reservation_delegation_rates: BTreeMap<DelegatorKind, DelegationRate>,\n}\n\nimpl SeigniorageRecipientV2 {\n    /// Creates a new SeigniorageRecipient\n    pub fn new(\n        stake: U512,\n        delegation_rate: DelegationRate,\n        delegator_stake: BTreeMap<DelegatorKind, U512>,\n        reservation_delegation_rates: BTreeMap<DelegatorKind, DelegationRate>,\n    ) -> Self {\n        Self {\n            stake,\n            delegation_rate,\n            delegator_stake,\n            reservation_delegation_rates,\n        }\n    }\n\n    /// Returns stake of the provided recipient\n    pub fn stake(&self) -> &U512 {\n        &self.stake\n    }\n\n    /// Returns delegation rate of the provided recipient\n    pub fn delegation_rate(&self) -> &DelegationRate {\n        &self.delegation_rate\n    }\n\n    /// Returns delegators of the provided recipient and their stake\n    pub fn delegator_stake(&self) -> &BTreeMap<DelegatorKind, U512> {\n        &self.delegator_stake\n    }\n\n    /// Calculates total stake, including delegators' total stake\n    pub fn total_stake(&self) -> Option<U512> {\n        self.delegator_total_stake()?.checked_add(self.stake)\n    }\n\n    /// Calculates total stake for all delegators\n    pub fn delegator_total_stake(&self) -> Option<U512> {\n        let mut total_stake: U512 = U512::zero();\n        for stake in self.delegator_stake.values() {\n            total_stake = total_stake.checked_add(*stake)?;\n        }\n        Some(total_stake)\n    }\n\n    /// Returns delegation rates for reservations of the provided recipient\n    pub fn reservation_delegation_rates(&self) -> &BTreeMap<DelegatorKind, DelegationRate> {\n        &self.reservation_delegation_rates\n    }\n}\n\nimpl CLTyped for SeigniorageRecipientV2 {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for SeigniorageRecipientV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.extend(self.stake.to_bytes()?);\n        result.extend(self.delegation_rate.to_bytes()?);\n        result.extend(self.delegator_stake.to_bytes()?);\n        result.extend(self.reservation_delegation_rates.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.stake.serialized_length()\n            + self.delegation_rate.serialized_length()\n            + self.delegator_stake.serialized_length()\n            + self.reservation_delegation_rates.serialized_length()\n    }\n}\n\nimpl FromBytes for SeigniorageRecipientV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (stake, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegator_stake, bytes) = FromBytes::from_bytes(bytes)?;\n        let (reservation_delegation_rates, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            Self {\n                stake,\n                delegation_rate,\n                delegator_stake,\n                reservation_delegation_rates,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl From<&Bid> for SeigniorageRecipientV2 {\n    fn from(bid: &Bid) -> Self {\n        let delegator_stake = bid\n            .delegators()\n            .iter()\n            .map(|(delegator_public_key, delegator)| {\n                (\n                    DelegatorKind::PublicKey(delegator_public_key.clone()),\n                    delegator.staked_amount(),\n                )\n            })\n            .collect();\n        Self {\n            stake: *bid.staked_amount(),\n            delegation_rate: *bid.delegation_rate(),\n            delegator_stake,\n            reservation_delegation_rates: BTreeMap::new(),\n        }\n    }\n}\n\nimpl From<SeigniorageRecipientV1> for SeigniorageRecipientV2 {\n    fn from(snapshot: SeigniorageRecipientV1) -> Self {\n        let mut delegator_stake = BTreeMap::new();\n        for (kind, amount) in snapshot.delegator_stake {\n            delegator_stake.insert(DelegatorKind::PublicKey(kind), amount);\n        }\n\n        Self {\n            stake: snapshot.stake,\n            delegation_rate: snapshot.delegation_rate,\n            delegator_stake,\n            reservation_delegation_rates: Default::default(),\n        }\n    }\n}\n\n/// Wrapper enum for all variants of `SeigniorageRecipient`.\n#[allow(missing_docs)]\npub enum SeigniorageRecipient {\n    V1(SeigniorageRecipientV1),\n    V2(SeigniorageRecipientV2),\n}\n\nimpl SeigniorageRecipient {\n    /// Returns stake of the provided recipient\n    pub fn stake(&self) -> &U512 {\n        match self {\n            Self::V1(recipient) => &recipient.stake,\n            Self::V2(recipient) => &recipient.stake,\n        }\n    }\n\n    /// Returns delegation rate of the provided recipient\n    pub fn delegation_rate(&self) -> &DelegationRate {\n        match self {\n            Self::V1(recipient) => &recipient.delegation_rate,\n            Self::V2(recipient) => &recipient.delegation_rate,\n        }\n    }\n\n    /// Returns delegators of the provided recipient and their stake\n    pub fn delegator_stake(&self) -> BTreeMap<DelegatorKind, U512> {\n        let recipient = match self {\n            Self::V1(recipient) => {\n                let ret: SeigniorageRecipientV2 = recipient.clone().into();\n                ret\n            }\n            Self::V2(recipient) => recipient.clone(),\n        };\n        recipient.delegator_stake\n    }\n\n    /// Calculates total stake, including delegators' total stake\n    pub fn total_stake(&self) -> Option<U512> {\n        match self {\n            Self::V1(recipient) => recipient.total_stake(),\n            Self::V2(recipient) => recipient.total_stake(),\n        }\n    }\n\n    /// Calculates total stake for all delegators\n    pub fn delegator_total_stake(&self) -> Option<U512> {\n        match self {\n            Self::V1(recipient) => recipient.delegator_total_stake(),\n            Self::V2(recipient) => recipient.delegator_total_stake(),\n        }\n    }\n\n    /// Returns delegation rates for reservations of the provided recipient\n    pub fn reservation_delegation_rates(&self) -> Option<&BTreeMap<DelegatorKind, DelegationRate>> {\n        match self {\n            Self::V1(_recipient) => None,\n            Self::V2(recipient) => Some(&recipient.reservation_delegation_rates),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use alloc::collections::BTreeMap;\n    use core::iter::FromIterator;\n\n    use super::SeigniorageRecipientV2;\n    use crate::{\n        bytesrepr,\n        system::auction::{DelegationRate, DelegatorKind, SeigniorageRecipientV1},\n        PublicKey, SecretKey, U512,\n    };\n\n    #[test]\n    fn serialization_roundtrip() {\n        let delegator_1_kind: DelegatorKind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n        let delegator_2_kind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n        let delegator_3_kind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n        let seigniorage_recipient = SeigniorageRecipientV2 {\n            stake: U512::max_value(),\n            delegation_rate: DelegationRate::MAX,\n            delegator_stake: BTreeMap::from_iter(vec![\n                (delegator_1_kind.clone(), U512::max_value()),\n                (delegator_2_kind, U512::max_value()),\n                (delegator_3_kind, U512::zero()),\n            ]),\n            reservation_delegation_rates: BTreeMap::from_iter(vec![(\n                delegator_1_kind,\n                DelegationRate::MIN,\n            )]),\n        };\n        bytesrepr::test_serialization_roundtrip(&seigniorage_recipient);\n    }\n\n    #[test]\n    fn serialization_roundtrip_legacy_version() {\n        let delegator_1_key = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let delegator_2_key = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let delegator_3_key = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(),\n        );\n        let legacy_seigniorage_recipient = SeigniorageRecipientV1 {\n            stake: U512::max_value(),\n            delegation_rate: DelegationRate::MAX,\n            delegator_stake: BTreeMap::from_iter(vec![\n                (delegator_1_key.clone(), U512::max_value()),\n                (delegator_2_key.clone(), U512::max_value()),\n                (delegator_3_key.clone(), U512::zero()),\n            ]),\n        };\n\n        bytesrepr::test_serialization_roundtrip(&legacy_seigniorage_recipient);\n    }\n\n    #[test]\n    fn test_overflow_in_delegation_rate() {\n        let delegator_1_kind = DelegatorKind::PublicKey(PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        ));\n        let delegator_2_kind = DelegatorKind::PublicKey(PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        ));\n        let delegator_3_kind = DelegatorKind::PublicKey(PublicKey::from(\n            &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(),\n        ));\n        let seigniorage_recipient = SeigniorageRecipientV2 {\n            stake: U512::max_value(),\n            delegation_rate: DelegationRate::MAX,\n            delegator_stake: BTreeMap::from_iter(vec![\n                (delegator_1_kind.clone(), U512::max_value()),\n                (delegator_2_kind, U512::max_value()),\n                (delegator_3_kind, U512::zero()),\n            ]),\n            reservation_delegation_rates: BTreeMap::from_iter(vec![(\n                delegator_1_kind,\n                DelegationRate::MIN,\n            )]),\n        };\n        assert_eq!(seigniorage_recipient.total_stake(), None)\n    }\n\n    #[test]\n    fn test_overflow_in_delegation_total_stake() {\n        let delegator_1_kind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n        let delegator_2_kind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n        let delegator_3_kind = PublicKey::from(\n            &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(),\n        )\n        .into();\n        let seigniorage_recipient = SeigniorageRecipientV2 {\n            stake: U512::max_value(),\n            delegation_rate: DelegationRate::MAX,\n            delegator_stake: BTreeMap::from_iter(vec![\n                (delegator_1_kind, U512::MAX),\n                (delegator_2_kind, U512::MAX),\n                (delegator_3_kind, U512::MAX),\n            ]),\n            reservation_delegation_rates: BTreeMap::new(),\n        };\n        assert_eq!(seigniorage_recipient.delegator_total_stake(), None)\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/unbond.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n\nuse super::{BidAddr, DelegatorKind, UnbondingPurse, WithdrawPurse};\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    checksummed_hex, CLType, CLTyped, EraId, PublicKey, URef, URefAddr, U512,\n};\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\nuse serde_helpers::{HumanReadableUnbondKind, NonHumanReadableUnbondKind};\n\n/// UnbondKindTag variants.\n#[allow(clippy::large_enum_variant)]\n#[repr(u8)]\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\npub enum UnbondKindTag {\n    /// Validator bid.\n    Validator = 0,\n    /// Validator bid.\n    DelegatedAccount = 1,\n    /// Delegator bid.\n    DelegatedPurse = 2,\n}\n\n/// Unbond variants.\n#[derive(Debug, PartialEq, Eq, Clone, Ord, PartialOrd)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum UnbondKind {\n    Validator(PublicKey),\n    DelegatedPublicKey(PublicKey),\n    DelegatedPurse(#[cfg_attr(feature = \"json-schema\", schemars(with = \"String\"))] URefAddr),\n}\n\nimpl UnbondKind {\n    /// Returns UnbondKindTag.\n    pub fn tag(&self) -> UnbondKindTag {\n        match self {\n            UnbondKind::Validator(_) => UnbondKindTag::Validator,\n            UnbondKind::DelegatedPublicKey(_) => UnbondKindTag::DelegatedAccount,\n            UnbondKind::DelegatedPurse(_) => UnbondKindTag::DelegatedPurse,\n        }\n    }\n\n    /// Returns PublicKey, if any.\n    pub fn maybe_public_key(&self) -> Option<PublicKey> {\n        match self {\n            UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => Some(pk.clone()),\n            UnbondKind::DelegatedPurse(_) => None,\n        }\n    }\n\n    /// Is this a validator unbond?\n    pub fn is_validator(&self) -> bool {\n        match self {\n            UnbondKind::Validator(_) => true,\n            UnbondKind::DelegatedPublicKey(_) | UnbondKind::DelegatedPurse(_) => false,\n        }\n    }\n\n    /// Is this a delegator unbond?\n    pub fn is_delegator(&self) -> bool {\n        !self.is_validator()\n    }\n\n    /// The correct bid addr for this instance.\n    pub fn bid_addr(&self, validator_public_key: &PublicKey) -> BidAddr {\n        match self {\n            UnbondKind::Validator(pk) => BidAddr::UnbondAccount {\n                validator: validator_public_key.to_account_hash(),\n                unbonder: pk.to_account_hash(),\n            },\n            UnbondKind::DelegatedPublicKey(pk) => BidAddr::DelegatedAccount {\n                delegator: pk.to_account_hash(),\n                validator: validator_public_key.to_account_hash(),\n            },\n            UnbondKind::DelegatedPurse(addr) => BidAddr::DelegatedPurse {\n                validator: validator_public_key.to_account_hash(),\n                delegator: *addr,\n            },\n        }\n    }\n}\n\nimpl ToBytes for UnbondKind {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        let (tag, mut serialized_data) = match self {\n            UnbondKind::Validator(pk) => (UnbondKindTag::Validator, pk.to_bytes()?),\n            UnbondKind::DelegatedPublicKey(pk) => (UnbondKindTag::DelegatedAccount, pk.to_bytes()?),\n            UnbondKind::DelegatedPurse(addr) => (UnbondKindTag::DelegatedPurse, addr.to_bytes()?),\n        };\n        result.push(tag as u8);\n        result.append(&mut serialized_data);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                UnbondKind::Validator(pk) => pk.serialized_length(),\n                UnbondKind::DelegatedPublicKey(pk) => pk.serialized_length(),\n                UnbondKind::DelegatedPurse(addr) => addr.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(self.tag() as u8);\n        match self {\n            UnbondKind::Validator(pk) => pk.write_bytes(writer)?,\n            UnbondKind::DelegatedPublicKey(pk) => pk.write_bytes(writer)?,\n            UnbondKind::DelegatedPurse(addr) => addr.write_bytes(writer)?,\n        };\n        Ok(())\n    }\n}\n\nimpl FromBytes for UnbondKind {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == UnbondKindTag::Validator as u8 => PublicKey::from_bytes(remainder)\n                .map(|(pk, remainder)| (UnbondKind::Validator(pk), remainder)),\n            tag if tag == UnbondKindTag::DelegatedAccount as u8 => PublicKey::from_bytes(remainder)\n                .map(|(pk, remainder)| (UnbondKind::DelegatedPublicKey(pk), remainder)),\n            tag if tag == UnbondKindTag::DelegatedPurse as u8 => URefAddr::from_bytes(remainder)\n                .map(|(delegator_bid, remainder)| {\n                    (UnbondKind::DelegatedPurse(delegator_bid), remainder)\n                }),\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl From<DelegatorKind> for UnbondKind {\n    fn from(value: DelegatorKind) -> Self {\n        match value {\n            DelegatorKind::PublicKey(pk) => UnbondKind::DelegatedPublicKey(pk),\n            DelegatorKind::Purse(addr) => UnbondKind::DelegatedPurse(addr),\n        }\n    }\n}\n\nimpl Serialize for UnbondKind {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            HumanReadableUnbondKind::from(self).serialize(serializer)\n        } else {\n            NonHumanReadableUnbondKind::from(self).serialize(serializer)\n        }\n    }\n}\n\n#[derive(Debug)]\nenum UnbondKindError {\n    DeserializationError(String),\n}\n\nimpl Display for UnbondKindError {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            UnbondKindError::DeserializationError(msg) => {\n                write!(f, \"Error when deserializing UnbondKind: {}\", msg)\n            }\n        }\n    }\n}\n\nimpl TryFrom<HumanReadableUnbondKind> for UnbondKind {\n    type Error = UnbondKindError;\n\n    fn try_from(value: HumanReadableUnbondKind) -> Result<Self, Self::Error> {\n        match value {\n            HumanReadableUnbondKind::Validator(public_key) => Ok(UnbondKind::Validator(public_key)),\n            HumanReadableUnbondKind::DelegatedPublicKey(public_key) => {\n                Ok(UnbondKind::DelegatedPublicKey(public_key))\n            }\n            HumanReadableUnbondKind::DelegatedPurse(encoded) => {\n                let decoded = checksummed_hex::decode(encoded).map_err(|e| {\n                    UnbondKindError::DeserializationError(format!(\n                        \"Failed to decode encoded URefAddr: {}\",\n                        e\n                    ))\n                })?;\n                let uref_addr = URefAddr::try_from(decoded.as_ref()).map_err(|e| {\n                    UnbondKindError::DeserializationError(format!(\n                        \"Failed to build uref address: {}\",\n                        e\n                    ))\n                })?;\n                Ok(UnbondKind::DelegatedPurse(uref_addr))\n            }\n        }\n    }\n}\n\nimpl From<NonHumanReadableUnbondKind> for UnbondKind {\n    fn from(value: NonHumanReadableUnbondKind) -> Self {\n        match value {\n            NonHumanReadableUnbondKind::Validator(public_key) => UnbondKind::Validator(public_key),\n            NonHumanReadableUnbondKind::DelegatedPublicKey(public_key) => {\n                UnbondKind::DelegatedPublicKey(public_key)\n            }\n            NonHumanReadableUnbondKind::DelegatedPurse(uref_addr) => {\n                UnbondKind::DelegatedPurse(uref_addr)\n            }\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for UnbondKind {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let human_readable = HumanReadableUnbondKind::deserialize(deserializer)?;\n            UnbondKind::try_from(human_readable)\n                .map_err(|error| SerdeError::custom(format!(\"{:?}\", error)))\n        } else {\n            let non_human_readable = NonHumanReadableUnbondKind::deserialize(deserializer)?;\n            Ok(UnbondKind::from(non_human_readable))\n        }\n    }\n}\n\n#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Unbond {\n    /// Validators public key.\n    validator_public_key: PublicKey,\n    /// Unbond kind.\n    unbond_kind: UnbondKind,\n    /// Unbond amounts per era.\n    eras: Vec<UnbondEra>,\n}\n\nimpl Unbond {\n    /// Creates [`Unbond`] instance for an unbonding request.\n    pub const fn new(\n        validator_public_key: PublicKey,\n        unbond_kind: UnbondKind,\n        eras: Vec<UnbondEra>,\n    ) -> Self {\n        Self {\n            validator_public_key,\n            unbond_kind,\n            eras,\n        }\n    }\n\n    /// Creates [`Unbond`] instance for an unbonding request.\n    pub fn new_validator_unbond(validator_public_key: PublicKey, eras: Vec<UnbondEra>) -> Self {\n        Self {\n            validator_public_key: validator_public_key.clone(),\n            unbond_kind: UnbondKind::Validator(validator_public_key),\n            eras,\n        }\n    }\n\n    /// Creates [`Unbond`] instance for an unbonding request.\n    pub const fn new_delegated_account_unbond(\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        eras: Vec<UnbondEra>,\n    ) -> Self {\n        Self {\n            validator_public_key,\n            unbond_kind: UnbondKind::DelegatedPublicKey(delegator_public_key),\n            eras,\n        }\n    }\n\n    /// Creates [`Unbond`] instance for an unbonding request.\n    pub const fn new_delegated_purse_unbond(\n        validator_public_key: PublicKey,\n        delegator_purse_addr: URefAddr,\n        eras: Vec<UnbondEra>,\n    ) -> Self {\n        Self {\n            validator_public_key,\n            unbond_kind: UnbondKind::DelegatedPurse(delegator_purse_addr),\n            eras,\n        }\n    }\n\n    /// Checks if given request is made by a validator by checking if public key of unbonder is same\n    /// as a key owned by validator.\n    pub fn is_validator(&self) -> bool {\n        match self.unbond_kind.maybe_public_key() {\n            Some(pk) => pk == self.validator_public_key,\n            None => false,\n        }\n    }\n\n    /// Returns public key of validator.\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Returns unbond kind.\n    pub fn unbond_kind(&self) -> &UnbondKind {\n        &self.unbond_kind\n    }\n\n    /// Returns eras unbond items.\n    pub fn eras(&self) -> &Vec<UnbondEra> {\n        &self.eras\n    }\n\n    /// Returns eras unbond items.\n    pub fn eras_mut(&mut self) -> &mut Vec<UnbondEra> {\n        &mut self.eras\n    }\n\n    /// Takes eras unbond items.\n    pub fn take_eras(mut self) -> Vec<UnbondEra> {\n        let eras = self.eras;\n        self.eras = vec![];\n        eras\n    }\n\n    /// Splits instance into eras that are not expired, and eras that are expired (if any).\n    pub fn expired(self, era_id: EraId, unbonding_delay: u64) -> (Unbond, Option<Vec<UnbondEra>>) {\n        let mut retained = vec![];\n        let mut expired = vec![];\n        for era in self.eras {\n            let threshold = era\n                .era_of_creation()\n                .value()\n                .saturating_add(unbonding_delay);\n            if era_id.value() >= threshold {\n                expired.push(era);\n            } else {\n                retained.push(era)\n            }\n        }\n        let ret = Unbond::new(self.validator_public_key, self.unbond_kind, retained);\n        if !expired.is_empty() {\n            (ret, Some(expired))\n        } else {\n            (ret, None)\n        }\n    }\n\n    /// Returns the unbond era with the highest era of creation.\n    pub fn target_unbond_era(&self) -> Option<UnbondEra> {\n        self.eras()\n            .iter()\n            .max_by(|x, y| x.era_of_creation().cmp(&y.era_of_creation()))\n            .cloned()\n    }\n\n    /// Returns a mutable reference to the unbond era with the highest era of creation.\n    pub fn target_unbond_era_mut(&mut self) -> Option<&mut UnbondEra> {\n        self.eras_mut()\n            .iter_mut()\n            .max_by(|x, y| x.era_of_creation().cmp(&y.era_of_creation()))\n    }\n}\n\nimpl ToBytes for Unbond {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.extend(&self.validator_public_key.to_bytes()?);\n        result.extend(&self.unbond_kind.to_bytes()?);\n        result.extend(&self.eras.to_bytes()?);\n        Ok(result)\n    }\n    fn serialized_length(&self) -> usize {\n        self.validator_public_key.serialized_length()\n            + self.unbond_kind.serialized_length()\n            + self.eras.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.validator_public_key.write_bytes(writer)?;\n        self.unbond_kind.write_bytes(writer)?;\n        self.eras.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for Unbond {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (unbond_kind, remainder) = FromBytes::from_bytes(remainder)?;\n        let (eras, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((\n            Unbond {\n                validator_public_key,\n                unbond_kind,\n                eras,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl CLTyped for Unbond {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl Default for Unbond {\n    fn default() -> Self {\n        Self {\n            unbond_kind: UnbondKind::Validator(PublicKey::System),\n            validator_public_key: PublicKey::System,\n            eras: vec![],\n        }\n    }\n}\n\nimpl From<UnbondingPurse> for Unbond {\n    fn from(unbonding_purse: UnbondingPurse) -> Self {\n        let unbond_kind =\n            if unbonding_purse.validator_public_key() == unbonding_purse.unbonder_public_key() {\n                UnbondKind::Validator(unbonding_purse.validator_public_key().clone())\n            } else {\n                UnbondKind::DelegatedPublicKey(unbonding_purse.unbonder_public_key().clone())\n            };\n        Unbond::new(\n            unbonding_purse.validator_public_key().clone(),\n            unbond_kind,\n            vec![UnbondEra::new(\n                *unbonding_purse.bonding_purse(),\n                unbonding_purse.era_of_creation(),\n                *unbonding_purse.amount(),\n                None,\n            )],\n        )\n    }\n}\n\nimpl From<WithdrawPurse> for Unbond {\n    fn from(withdraw_purse: WithdrawPurse) -> Self {\n        let unbond_kind =\n            if withdraw_purse.validator_public_key == withdraw_purse.unbonder_public_key {\n                UnbondKind::Validator(withdraw_purse.validator_public_key.clone())\n            } else {\n                UnbondKind::DelegatedPublicKey(withdraw_purse.unbonder_public_key.clone())\n            };\n        Unbond::new(\n            withdraw_purse.validator_public_key,\n            unbond_kind,\n            vec![UnbondEra::new(\n                withdraw_purse.bonding_purse,\n                withdraw_purse.era_of_creation,\n                withdraw_purse.amount,\n                None,\n            )],\n        )\n    }\n}\n\n/// Unbond amounts per era.\n#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct UnbondEra {\n    /// Bonding Purse\n    bonding_purse: URef,\n    /// Era in which this unbonding request was created.\n    era_of_creation: EraId,\n    /// Unbonding Amount.\n    amount: U512,\n    /// The validator public key to re-delegate to.\n    new_validator: Option<PublicKey>,\n}\n\nimpl UnbondEra {\n    /// Creates [`UnbondEra`] instance for an unbonding request.\n    pub const fn new(\n        bonding_purse: URef,\n        era_of_creation: EraId,\n        amount: U512,\n        new_validator: Option<PublicKey>,\n    ) -> Self {\n        Self {\n            bonding_purse,\n            era_of_creation,\n            amount,\n            new_validator,\n        }\n    }\n\n    /// Returns bonding purse used to make this unbonding request.\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Returns era which was used to create this unbonding request.\n    pub fn era_of_creation(&self) -> EraId {\n        self.era_of_creation\n    }\n\n    /// Returns unbonding amount.\n    pub fn amount(&self) -> &U512 {\n        &self.amount\n    }\n\n    /// Returns the public key for the new validator.\n    pub fn new_validator(&self) -> &Option<PublicKey> {\n        &self.new_validator\n    }\n\n    /// Sets amount to provided value.\n    pub fn with_amount(&mut self, amount: U512) {\n        self.amount = amount;\n    }\n}\n\nimpl ToBytes for UnbondEra {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.extend(&self.bonding_purse.to_bytes()?);\n        result.extend(&self.era_of_creation.to_bytes()?);\n        result.extend(&self.amount.to_bytes()?);\n        result.extend(&self.new_validator.to_bytes()?);\n        Ok(result)\n    }\n    fn serialized_length(&self) -> usize {\n        self.bonding_purse.serialized_length()\n            + self.era_of_creation.serialized_length()\n            + self.amount.serialized_length()\n            + self.new_validator.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.bonding_purse.write_bytes(writer)?;\n        self.era_of_creation.write_bytes(writer)?;\n        self.amount.write_bytes(writer)?;\n        self.new_validator.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for UnbondEra {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?;\n        let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?;\n        let (amount, remainder) = FromBytes::from_bytes(remainder)?;\n        let (new_validator, remainder) = Option::<PublicKey>::from_bytes(remainder)?;\n\n        Ok((\n            UnbondEra {\n                bonding_purse,\n                era_of_creation,\n                amount,\n                new_validator,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl CLTyped for UnbondEra {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nmod serde_helpers {\n    use super::UnbondKind;\n    use crate::{PublicKey, URefAddr};\n    use alloc::string::String;\n    use serde::{Deserialize, Serialize};\n\n    #[derive(Serialize, Deserialize)]\n    pub(super) enum HumanReadableUnbondKind {\n        Validator(PublicKey),\n        DelegatedPublicKey(PublicKey),\n        DelegatedPurse(String),\n    }\n\n    #[derive(Serialize, Deserialize)]\n    pub(super) enum NonHumanReadableUnbondKind {\n        Validator(PublicKey),\n        DelegatedPublicKey(PublicKey),\n        DelegatedPurse(URefAddr),\n    }\n\n    impl From<&UnbondKind> for HumanReadableUnbondKind {\n        fn from(unbond_source: &UnbondKind) -> Self {\n            match unbond_source {\n                UnbondKind::Validator(public_key) => {\n                    HumanReadableUnbondKind::Validator(public_key.clone())\n                }\n                UnbondKind::DelegatedPublicKey(public_key) => {\n                    HumanReadableUnbondKind::DelegatedPublicKey(public_key.clone())\n                }\n                UnbondKind::DelegatedPurse(uref_addr) => {\n                    HumanReadableUnbondKind::DelegatedPurse(base16::encode_lower(uref_addr))\n                }\n            }\n        }\n    }\n\n    impl From<&UnbondKind> for NonHumanReadableUnbondKind {\n        fn from(unbond_kind: &UnbondKind) -> Self {\n            match unbond_kind {\n                UnbondKind::Validator(public_key) => {\n                    NonHumanReadableUnbondKind::Validator(public_key.clone())\n                }\n                UnbondKind::DelegatedPublicKey(public_key) => {\n                    NonHumanReadableUnbondKind::DelegatedPublicKey(public_key.clone())\n                }\n                UnbondKind::DelegatedPurse(uref_addr) => {\n                    NonHumanReadableUnbondKind::DelegatedPurse(*uref_addr)\n                }\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use rand::Rng;\n\n    use crate::{\n        bytesrepr,\n        system::auction::{\n            unbond::{Unbond, UnbondKind},\n            UnbondEra,\n        },\n        testing::TestRng,\n        AccessRights, EraId, PublicKey, SecretKey, URef, U512,\n    };\n\n    const BONDING_PURSE: URef = URef::new([14; 32], AccessRights::READ_ADD_WRITE);\n    const ERA_OF_WITHDRAWAL: EraId = EraId::MAX;\n\n    fn validator_public_key() -> PublicKey {\n        let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    }\n\n    fn delegated_account_unbond_kind() -> UnbondKind {\n        let secret_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap();\n        UnbondKind::DelegatedPublicKey(PublicKey::from(&secret_key))\n    }\n\n    fn amount() -> U512 {\n        U512::max_value() - 1\n    }\n\n    #[test]\n    fn serialization_roundtrip_for_unbond() {\n        let unbond_era = UnbondEra {\n            bonding_purse: BONDING_PURSE,\n            era_of_creation: ERA_OF_WITHDRAWAL,\n            amount: amount(),\n            new_validator: None,\n        };\n\n        let unbond = Unbond {\n            validator_public_key: validator_public_key(),\n            unbond_kind: delegated_account_unbond_kind(),\n            eras: vec![unbond_era],\n        };\n\n        bytesrepr::test_serialization_roundtrip(&unbond);\n    }\n\n    #[test]\n    fn should_be_validator_condition_for_unbond() {\n        let validator_pk = validator_public_key();\n        let validator_unbond = Unbond::new(\n            validator_pk.clone(),\n            UnbondKind::Validator(validator_pk),\n            vec![],\n        );\n        assert!(validator_unbond.is_validator());\n    }\n\n    #[test]\n    fn should_be_delegator_condition_for_unbond() {\n        let delegator_unbond = Unbond::new(\n            validator_public_key(),\n            delegated_account_unbond_kind(),\n            vec![],\n        );\n        assert!(!delegator_unbond.is_validator());\n    }\n\n    #[test]\n    fn purse_serialized_as_string() {\n        let delegator_kind_payload = UnbondKind::DelegatedPurse([1; 32]);\n        let serialized = serde_json::to_string(&delegator_kind_payload).unwrap();\n        assert_eq!(\n            serialized,\n            \"{\\\"DelegatedPurse\\\":\\\"0101010101010101010101010101010101010101010101010101010101010101\\\"}\"\n        );\n    }\n\n    #[test]\n    fn given_broken_address_purse_deserialziation_fails() {\n        let failing =\n            \"{\\\"DelegatedPurse\\\":\\\"Z101010101010101010101010101010101010101010101010101010101010101\\\"}\";\n        let ret = serde_json::from_str::<UnbondKind>(failing);\n        assert!(ret.is_err());\n        let failing =\n            \"{\\\"DelegatedPurse\\\":\\\"01010101010101010101010101010101010101010101010101010101\\\"}\";\n        let ret = serde_json::from_str::<UnbondKind>(failing);\n        assert!(ret.is_err());\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let entity = UnbondKind::Validator(PublicKey::random(rng));\n        let json_string = serde_json::to_string_pretty(&entity).unwrap();\n        let decoded: UnbondKind = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, entity);\n\n        let entity = UnbondKind::DelegatedPublicKey(PublicKey::random(rng));\n        let json_string = serde_json::to_string_pretty(&entity).unwrap();\n        let decoded: UnbondKind = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, entity);\n\n        let entity = UnbondKind::DelegatedPurse(rng.gen());\n        let json_string = serde_json::to_string_pretty(&entity).unwrap();\n        let decoded: UnbondKind = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(decoded, entity);\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/unbonding_purse.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, EraId, PublicKey, URef, U512,\n};\n\nuse super::WithdrawPurse;\n\n/// Unbonding purse.\n#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct UnbondingPurse {\n    /// Bonding Purse\n    bonding_purse: URef,\n    /// Validators public key.\n    validator_public_key: PublicKey,\n    /// Unbonders public key.\n    unbonder_public_key: PublicKey,\n    /// Era in which this unbonding request was created.\n    era_of_creation: EraId,\n    /// Unbonding Amount.\n    amount: U512,\n    /// The validator public key to re-delegate to.\n    new_validator: Option<PublicKey>,\n}\n\nimpl UnbondingPurse {\n    /// Creates [`UnbondingPurse`] instance for an unbonding request.\n    pub const fn new(\n        bonding_purse: URef,\n        validator_public_key: PublicKey,\n        unbonder_public_key: PublicKey,\n        era_of_creation: EraId,\n        amount: U512,\n        new_validator: Option<PublicKey>,\n    ) -> Self {\n        Self {\n            bonding_purse,\n            validator_public_key,\n            unbonder_public_key,\n            era_of_creation,\n            amount,\n            new_validator,\n        }\n    }\n\n    /// Checks if given request is made by a validator by checking if public key of unbonder is same\n    /// as a key owned by validator.\n    pub fn is_validator(&self) -> bool {\n        self.validator_public_key == self.unbonder_public_key\n    }\n\n    /// Returns bonding purse used to make this unbonding request.\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Returns public key of validator.\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Returns public key of unbonder.\n    ///\n    /// For withdrawal requests that originated from validator's public key through `withdraw_bid`\n    /// entrypoint this is equal to [`UnbondingPurse::validator_public_key`] and\n    /// [`UnbondingPurse::is_validator`] is `true`.\n    pub fn unbonder_public_key(&self) -> &PublicKey {\n        &self.unbonder_public_key\n    }\n\n    /// Returns era which was used to create this unbonding request.\n    pub fn era_of_creation(&self) -> EraId {\n        self.era_of_creation\n    }\n\n    /// Returns unbonding amount.\n    pub fn amount(&self) -> &U512 {\n        &self.amount\n    }\n\n    /// Returns the public key for the new validator.\n    pub fn new_validator(&self) -> &Option<PublicKey> {\n        &self.new_validator\n    }\n\n    /// Sets amount to provided value.\n    pub fn with_amount(&mut self, amount: U512) {\n        self.amount = amount;\n    }\n}\n\nimpl ToBytes for UnbondingPurse {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.extend(&self.bonding_purse.to_bytes()?);\n        result.extend(&self.validator_public_key.to_bytes()?);\n        result.extend(&self.unbonder_public_key.to_bytes()?);\n        result.extend(&self.era_of_creation.to_bytes()?);\n        result.extend(&self.amount.to_bytes()?);\n        result.extend(&self.new_validator.to_bytes()?);\n        Ok(result)\n    }\n    fn serialized_length(&self) -> usize {\n        self.bonding_purse.serialized_length()\n            + self.validator_public_key.serialized_length()\n            + self.unbonder_public_key.serialized_length()\n            + self.era_of_creation.serialized_length()\n            + self.amount.serialized_length()\n            + self.new_validator.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.bonding_purse.write_bytes(writer)?;\n        self.validator_public_key.write_bytes(writer)?;\n        self.unbonder_public_key.write_bytes(writer)?;\n        self.era_of_creation.write_bytes(writer)?;\n        self.amount.write_bytes(writer)?;\n        self.new_validator.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for UnbondingPurse {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?;\n        let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n        let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n        let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?;\n        let (amount, remainder) = FromBytes::from_bytes(remainder)?;\n        let (new_validator, remainder) = Option::<PublicKey>::from_bytes(remainder)?;\n\n        Ok((\n            UnbondingPurse {\n                bonding_purse,\n                validator_public_key,\n                unbonder_public_key,\n                era_of_creation,\n                amount,\n                new_validator,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl CLTyped for UnbondingPurse {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl From<WithdrawPurse> for UnbondingPurse {\n    fn from(withdraw_purse: WithdrawPurse) -> Self {\n        UnbondingPurse::new(\n            withdraw_purse.bonding_purse,\n            withdraw_purse.validator_public_key,\n            withdraw_purse.unbonder_public_key,\n            withdraw_purse.era_of_creation,\n            withdraw_purse.amount,\n            None,\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bytesrepr, system::auction::UnbondingPurse, AccessRights, EraId, PublicKey, SecretKey,\n        URef, U512,\n    };\n\n    const BONDING_PURSE: URef = URef::new([14; 32], AccessRights::READ_ADD_WRITE);\n    const ERA_OF_WITHDRAWAL: EraId = EraId::MAX;\n\n    fn validator_public_key() -> PublicKey {\n        let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    }\n\n    fn unbonder_public_key() -> PublicKey {\n        let secret_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    }\n\n    fn amount() -> U512 {\n        U512::max_value() - 1\n    }\n\n    #[test]\n    fn serialization_roundtrip_for_unbonding_purse() {\n        let unbonding_purse = UnbondingPurse {\n            bonding_purse: BONDING_PURSE,\n            validator_public_key: validator_public_key(),\n            unbonder_public_key: unbonder_public_key(),\n            era_of_creation: ERA_OF_WITHDRAWAL,\n            amount: amount(),\n            new_validator: None,\n        };\n\n        bytesrepr::test_serialization_roundtrip(&unbonding_purse);\n    }\n\n    #[test]\n    fn should_be_validator_condition_for_unbonding_purse() {\n        let validator_unbonding_purse = UnbondingPurse::new(\n            BONDING_PURSE,\n            validator_public_key(),\n            validator_public_key(),\n            ERA_OF_WITHDRAWAL,\n            amount(),\n            None,\n        );\n        assert!(validator_unbonding_purse.is_validator());\n    }\n\n    #[test]\n    fn should_be_delegator_condition_for_unbonding_purse() {\n        let delegator_unbonding_purse = UnbondingPurse::new(\n            BONDING_PURSE,\n            validator_public_key(),\n            unbonder_public_key(),\n            ERA_OF_WITHDRAWAL,\n            amount(),\n            None,\n        );\n        assert!(!delegator_unbonding_purse.is_validator());\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/validator_bid.rs",
    "content": "// TODO - remove once schemars stops causing warning.\n#![allow(clippy::field_reassign_with_default)]\n\nuse alloc::vec::Vec;\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    system::auction::{\n        bid::VestingSchedule, Bid, DelegationRate, Error, VESTING_SCHEDULE_LENGTH_MILLIS,\n    },\n    CLType, CLTyped, PublicKey, URef, U512,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// An entry in the validator map.\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct ValidatorBid {\n    /// Validator public key\n    validator_public_key: PublicKey,\n    /// The purse that was used for bonding.\n    bonding_purse: URef,\n    /// The amount of tokens staked by a validator (not including delegators).\n    staked_amount: U512,\n    /// Delegation rate\n    delegation_rate: DelegationRate,\n    /// Vesting schedule for a genesis validator. `None` if non-genesis validator.\n    vesting_schedule: Option<VestingSchedule>,\n    /// `true` if validator has been \"evicted\"\n    inactive: bool,\n    /// Minimum allowed delegation amount in motes\n    minimum_delegation_amount: u64,\n    /// Maximum allowed delegation amount in motes\n    maximum_delegation_amount: u64,\n    /// Slots reserved for specific delegators\n    reserved_slots: u32,\n}\n\nimpl ValidatorBid {\n    /// Sets the maximum and minimum delegation amounts for a validators bid.\n    pub fn with_min_max_delegation_amount(\n        mut self,\n        maximum_delegation_amount: u64,\n        minimum_delegation_amount: u64,\n    ) -> Self {\n        self.maximum_delegation_amount = maximum_delegation_amount;\n        self.minimum_delegation_amount = minimum_delegation_amount;\n        self\n    }\n\n    pub fn with_inactive(mut self, inactive: bool) -> Self {\n        self.inactive = inactive;\n        self\n    }\n}\n\nimpl ValidatorBid {\n    /// Creates new instance of a bid with locked funds.\n    #[allow(clippy::too_many_arguments)]\n    pub fn locked(\n        validator_public_key: PublicKey,\n        bonding_purse: URef,\n        staked_amount: U512,\n        delegation_rate: DelegationRate,\n        release_timestamp_millis: u64,\n        minimum_delegation_amount: u64,\n        maximum_delegation_amount: u64,\n        reserved_slots: u32,\n    ) -> Self {\n        let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis));\n        let inactive = false;\n        Self {\n            validator_public_key,\n            bonding_purse,\n            staked_amount,\n            delegation_rate,\n            vesting_schedule,\n            inactive,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            reserved_slots,\n        }\n    }\n\n    /// Creates new instance of a bid with unlocked funds.\n    pub fn unlocked(\n        validator_public_key: PublicKey,\n        bonding_purse: URef,\n        staked_amount: U512,\n        delegation_rate: DelegationRate,\n        minimum_delegation_amount: u64,\n        maximum_delegation_amount: u64,\n        reserved_slots: u32,\n    ) -> Self {\n        let vesting_schedule = None;\n        let inactive = false;\n        Self {\n            validator_public_key,\n            bonding_purse,\n            staked_amount,\n            delegation_rate,\n            vesting_schedule,\n            inactive,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            reserved_slots,\n        }\n    }\n\n    /// Creates a new inactive instance of a bid with 0 staked amount.\n    pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self {\n        let vesting_schedule = None;\n        let inactive = true;\n        let staked_amount = U512::zero();\n        let delegation_rate = Default::default();\n        Self {\n            validator_public_key,\n            bonding_purse,\n            staked_amount,\n            delegation_rate,\n            vesting_schedule,\n            inactive,\n            minimum_delegation_amount: 0,\n            maximum_delegation_amount: u64::MAX,\n            reserved_slots: 0,\n        }\n    }\n\n    /// Gets the validator public key of the provided bid\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Gets the bonding purse of the provided bid\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked(&self, timestamp_millis: u64) -> bool {\n        self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS)\n    }\n\n    /// Checks if a bid is still locked under a vesting schedule.\n    ///\n    /// Returns true if a timestamp falls below the initial lockup period + 91 days release\n    /// schedule, otherwise false.\n    pub fn is_locked_with_vesting_schedule(\n        &self,\n        timestamp_millis: u64,\n        vesting_schedule_period_millis: u64,\n    ) -> bool {\n        match &self.vesting_schedule {\n            Some(vesting_schedule) => {\n                vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis)\n            }\n            None => false,\n        }\n    }\n\n    /// Gets the staked amount of the provided bid\n    pub fn staked_amount(&self) -> U512 {\n        self.staked_amount\n    }\n\n    /// Gets the staked amount of the provided bid\n    pub fn staked_amount_mut(&mut self) -> &mut U512 {\n        &mut self.staked_amount\n    }\n\n    /// Gets the delegation rate of the provided bid\n    pub fn delegation_rate(&self) -> &DelegationRate {\n        &self.delegation_rate\n    }\n\n    /// Returns a reference to the vesting schedule of the provided bid.  `None` if a non-genesis\n    /// validator.\n    pub fn vesting_schedule(&self) -> Option<&VestingSchedule> {\n        self.vesting_schedule.as_ref()\n    }\n\n    /// Returns a mutable reference to the vesting schedule of the provided bid.  `None` if a\n    /// non-genesis validator.\n    pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> {\n        self.vesting_schedule.as_mut()\n    }\n\n    /// Gets the reserved slots of the provided bid\n    pub fn reserved_slots(&self) -> u32 {\n        self.reserved_slots\n    }\n\n    /// Returns `true` if validator is inactive\n    pub fn inactive(&self) -> bool {\n        self.inactive\n    }\n\n    /// Decreases the stake of the provided bid\n    pub fn decrease_stake(\n        &mut self,\n        amount: U512,\n        era_end_timestamp_millis: u64,\n    ) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_sub(amount)\n            .ok_or(Error::UnbondTooLarge)?;\n\n        let vesting_schedule = match self.vesting_schedule.as_ref() {\n            Some(vesting_schedule) => vesting_schedule,\n            None => {\n                self.staked_amount = updated_staked_amount;\n                return Ok(updated_staked_amount);\n            }\n        };\n\n        match vesting_schedule.locked_amount(era_end_timestamp_millis) {\n            Some(locked_amount) if updated_staked_amount < locked_amount => {\n                Err(Error::ValidatorFundsLocked)\n            }\n            None => {\n                // If `None`, then the locked amounts table has yet to be initialized (likely\n                // pre-90 day mark)\n                Err(Error::ValidatorFundsLocked)\n            }\n            Some(_) => {\n                self.staked_amount = updated_staked_amount;\n                Ok(updated_staked_amount)\n            }\n        }\n    }\n\n    /// Increases the stake of the provided bid\n    pub fn increase_stake(&mut self, amount: U512) -> Result<U512, Error> {\n        let updated_staked_amount = self\n            .staked_amount\n            .checked_add(amount)\n            .ok_or(Error::InvalidAmount)?;\n\n        self.staked_amount = updated_staked_amount;\n\n        Ok(updated_staked_amount)\n    }\n\n    /// Updates the delegation rate of the provided bid\n    pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self {\n        self.delegation_rate = delegation_rate;\n        self\n    }\n\n    /// Updates the reserved slots of the provided bid\n    pub fn with_reserved_slots(&mut self, reserved_slots: u32) -> &mut Self {\n        self.reserved_slots = reserved_slots;\n        self\n    }\n\n    /// Sets given bid's `inactive` field to `false`\n    pub fn activate(&mut self) {\n        self.inactive = false;\n    }\n\n    /// Sets given bid's `inactive` field to `true`\n    pub fn deactivate(&mut self) {\n        self.inactive = true;\n    }\n\n    /// Sets validator public key\n    pub fn with_validator_public_key(&mut self, validator_public_key: PublicKey) -> &mut Self {\n        self.validator_public_key = validator_public_key;\n        self\n    }\n\n    /// Returns minimum allowed delegation amount in motes.\n    pub fn minimum_delegation_amount(&self) -> u64 {\n        self.minimum_delegation_amount\n    }\n\n    /// Returns maximum allowed delegation amount in motes.\n    pub fn maximum_delegation_amount(&self) -> u64 {\n        self.maximum_delegation_amount\n    }\n\n    /// Sets minimum and maximum delegation amounts in motes.\n    pub fn set_delegation_amount_boundaries(\n        &mut self,\n        minimum_delegation_amount: u64,\n        maximum_delegation_amount: u64,\n    ) {\n        self.minimum_delegation_amount = minimum_delegation_amount;\n        self.maximum_delegation_amount = maximum_delegation_amount;\n    }\n}\n\nimpl CLTyped for ValidatorBid {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for ValidatorBid {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.validator_public_key.write_bytes(&mut result)?;\n        self.bonding_purse.write_bytes(&mut result)?;\n        self.staked_amount.write_bytes(&mut result)?;\n        self.delegation_rate.write_bytes(&mut result)?;\n        self.vesting_schedule.write_bytes(&mut result)?;\n        self.inactive.write_bytes(&mut result)?;\n        self.minimum_delegation_amount.write_bytes(&mut result)?;\n        self.maximum_delegation_amount.write_bytes(&mut result)?;\n        self.reserved_slots.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.validator_public_key.serialized_length()\n            + self.bonding_purse.serialized_length()\n            + self.staked_amount.serialized_length()\n            + self.delegation_rate.serialized_length()\n            + self.vesting_schedule.serialized_length()\n            + self.inactive.serialized_length()\n            + self.minimum_delegation_amount.serialized_length()\n            + self.maximum_delegation_amount.serialized_length()\n            + self.reserved_slots.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.validator_public_key.write_bytes(writer)?;\n        self.bonding_purse.write_bytes(writer)?;\n        self.staked_amount.write_bytes(writer)?;\n        self.delegation_rate.write_bytes(writer)?;\n        self.vesting_schedule.write_bytes(writer)?;\n        self.inactive.write_bytes(writer)?;\n        self.minimum_delegation_amount.write_bytes(writer)?;\n        self.maximum_delegation_amount.write_bytes(writer)?;\n        self.reserved_slots.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ValidatorBid {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?;\n        let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?;\n        let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?;\n        let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?;\n        let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?;\n        let (inactive, bytes) = FromBytes::from_bytes(bytes)?;\n        let (minimum_delegation_amount, bytes) = FromBytes::from_bytes(bytes)?;\n        let (maximum_delegation_amount, bytes) = FromBytes::from_bytes(bytes)?;\n        let (reserved_slots, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            ValidatorBid {\n                validator_public_key,\n                bonding_purse,\n                staked_amount,\n                delegation_rate,\n                vesting_schedule,\n                inactive,\n                minimum_delegation_amount,\n                maximum_delegation_amount,\n                reserved_slots,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl From<Bid> for ValidatorBid {\n    fn from(bid: Bid) -> Self {\n        ValidatorBid {\n            validator_public_key: bid.validator_public_key().clone(),\n            bonding_purse: *bid.bonding_purse(),\n            staked_amount: *bid.staked_amount(),\n            delegation_rate: *bid.delegation_rate(),\n            vesting_schedule: bid.vesting_schedule().cloned(),\n            inactive: bid.inactive(),\n            minimum_delegation_amount: 0,\n            maximum_delegation_amount: u64::MAX,\n            reserved_slots: 0,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bytesrepr,\n        system::auction::{bid::VestingSchedule, DelegationRate, ValidatorBid},\n        AccessRights, PublicKey, SecretKey, URef, U512,\n    };\n\n    #[test]\n    fn serialization_roundtrip_active() {\n        let founding_validator = ValidatorBid {\n            validator_public_key: PublicKey::from(\n                &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE),\n            staked_amount: U512::one(),\n            delegation_rate: DelegationRate::MAX,\n            vesting_schedule: Some(VestingSchedule::default()),\n            inactive: false,\n            minimum_delegation_amount: 0,\n            maximum_delegation_amount: u64::MAX,\n            reserved_slots: 0,\n        };\n        bytesrepr::test_serialization_roundtrip(&founding_validator);\n    }\n\n    #[test]\n    fn serialization_roundtrip_inactive() {\n        let founding_validator = ValidatorBid {\n            validator_public_key: PublicKey::from(\n                &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE),\n            staked_amount: U512::one(),\n            delegation_rate: DelegationRate::MAX,\n            vesting_schedule: Some(VestingSchedule::default()),\n            inactive: true,\n            minimum_delegation_amount: 0,\n            maximum_delegation_amount: u64::MAX,\n            reserved_slots: 0,\n        };\n        bytesrepr::test_serialization_roundtrip(&founding_validator);\n    }\n\n    #[test]\n    fn should_immediately_initialize_unlock_amounts() {\n        const TIMESTAMP_MILLIS: u64 = 0;\n\n        let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into();\n\n        let validator_release_timestamp = TIMESTAMP_MILLIS;\n        let vesting_schedule_period_millis = TIMESTAMP_MILLIS;\n        let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD);\n        let validator_staked_amount = U512::from(1000);\n        let validator_delegation_rate = 0;\n\n        let bid = ValidatorBid::locked(\n            validator_pk,\n            validator_bonding_purse,\n            validator_staked_amount,\n            validator_delegation_rate,\n            validator_release_timestamp,\n            0,\n            u64::MAX,\n            0,\n        );\n\n        assert!(!bid.is_locked_with_vesting_schedule(\n            validator_release_timestamp,\n            vesting_schedule_period_millis,\n        ));\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::validator_bid_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/validator_credit.rs",
    "content": "use crate::{\n    bytesrepr,\n    bytesrepr::{FromBytes, ToBytes},\n    CLType, CLTyped, EraId, PublicKey, U512,\n};\nuse alloc::vec::Vec;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// Validator credit record.\n#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct ValidatorCredit {\n    /// Validator public key\n    validator_public_key: PublicKey,\n    /// The era id the credit was created.\n    era_id: EraId,\n    /// The credit amount.\n    amount: U512,\n}\n\nimpl ValidatorCredit {\n    /// Returns a new instance of `[ValidatorCredit]`.\n    pub fn new(validator_public_key: PublicKey, era_id: EraId, amount: U512) -> Self {\n        ValidatorCredit {\n            validator_public_key,\n            era_id,\n            amount,\n        }\n    }\n\n    /// Gets the validator public key of this instance.\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Gets the era_id of this instance.\n    pub fn era_id(&self) -> EraId {\n        self.era_id\n    }\n\n    /// Gets the era_id of this instance.\n    pub fn amount(&self) -> U512 {\n        self.amount\n    }\n\n    /// Increase the credit amount.\n    pub fn increase(&mut self, additional_amount: U512) -> U512 {\n        self.amount.saturating_add(additional_amount);\n        self.amount\n    }\n\n    /// Creates a new empty instance of a credit, with amount 0.\n    pub fn empty(validator_public_key: PublicKey, era_id: EraId) -> Self {\n        Self {\n            validator_public_key,\n            era_id,\n            amount: U512::zero(),\n        }\n    }\n}\n\nimpl CLTyped for ValidatorCredit {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl ToBytes for ValidatorCredit {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.validator_public_key.serialized_length()\n            + self.era_id.serialized_length()\n            + self.amount.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.validator_public_key.write_bytes(writer)?;\n        self.era_id.write_bytes(writer)?;\n        self.amount.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for ValidatorCredit {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?;\n        let (era_id, remainder) = FromBytes::from_bytes(remainder)?;\n        let (amount, remainder) = FromBytes::from_bytes(remainder)?;\n        Ok((\n            ValidatorCredit {\n                validator_public_key,\n                era_id,\n                amount,\n            },\n            remainder,\n        ))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bytesrepr, system::auction::validator_credit::ValidatorCredit, EraId, PublicKey, SecretKey,\n        U512,\n    };\n\n    #[test]\n    fn serialization_roundtrip() {\n        let credit = ValidatorCredit {\n            validator_public_key: PublicKey::from(\n                &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(),\n            ),\n            era_id: EraId::new(0),\n            amount: U512::one(),\n        };\n        bytesrepr::test_serialization_roundtrip(&credit);\n    }\n}\n\n#[cfg(test)]\nmod prop_tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::credit_bid_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction/withdraw_purse.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    CLType, CLTyped, EraId, PublicKey, URef, U512,\n};\n\n/// A withdraw purse, a legacy structure.\n#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct WithdrawPurse {\n    /// Bonding Purse\n    pub(crate) bonding_purse: URef,\n    /// Validators public key.\n    pub(crate) validator_public_key: PublicKey,\n    /// Unbonders public key.\n    pub(crate) unbonder_public_key: PublicKey,\n    /// Era in which this unbonding request was created.\n    pub(crate) era_of_creation: EraId,\n    /// Unbonding Amount.\n    pub(crate) amount: U512,\n}\n\nimpl WithdrawPurse {\n    /// Creates [`WithdrawPurse`] instance for an unbonding request.\n    pub const fn new(\n        bonding_purse: URef,\n        validator_public_key: PublicKey,\n        unbonder_public_key: PublicKey,\n        era_of_creation: EraId,\n        amount: U512,\n    ) -> Self {\n        Self {\n            bonding_purse,\n            validator_public_key,\n            unbonder_public_key,\n            era_of_creation,\n            amount,\n        }\n    }\n\n    /// Checks if given request is made by a validator by checking if public key of unbonder is same\n    /// as a key owned by validator.\n    pub fn is_validator(&self) -> bool {\n        self.validator_public_key == self.unbonder_public_key\n    }\n\n    /// Returns bonding purse used to make this unbonding request.\n    pub fn bonding_purse(&self) -> &URef {\n        &self.bonding_purse\n    }\n\n    /// Returns public key of validator.\n    pub fn validator_public_key(&self) -> &PublicKey {\n        &self.validator_public_key\n    }\n\n    /// Returns public key of unbonder.\n    ///\n    /// For withdrawal requests that originated from validator's public key through `withdraw_bid`\n    /// entrypoint this is equal to [`WithdrawPurse::validator_public_key`] and\n    /// [`WithdrawPurse::is_validator`] is `true`.\n    pub fn unbonder_public_key(&self) -> &PublicKey {\n        &self.unbonder_public_key\n    }\n\n    /// Returns era which was used to create this unbonding request.\n    pub fn era_of_creation(&self) -> EraId {\n        self.era_of_creation\n    }\n\n    /// Returns unbonding amount.\n    pub fn amount(&self) -> &U512 {\n        &self.amount\n    }\n}\n\nimpl ToBytes for WithdrawPurse {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.extend(&self.bonding_purse.to_bytes()?);\n        result.extend(&self.validator_public_key.to_bytes()?);\n        result.extend(&self.unbonder_public_key.to_bytes()?);\n        result.extend(&self.era_of_creation.to_bytes()?);\n        result.extend(&self.amount.to_bytes()?);\n\n        Ok(result)\n    }\n    fn serialized_length(&self) -> usize {\n        self.bonding_purse.serialized_length()\n            + self.validator_public_key.serialized_length()\n            + self.unbonder_public_key.serialized_length()\n            + self.era_of_creation.serialized_length()\n            + self.amount.serialized_length()\n    }\n}\n\nimpl FromBytes for WithdrawPurse {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?;\n        let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n        let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?;\n        let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?;\n        let (amount, remainder) = FromBytes::from_bytes(remainder)?;\n\n        Ok((\n            WithdrawPurse {\n                bonding_purse,\n                validator_public_key,\n                unbonder_public_key,\n                era_of_creation,\n                amount,\n            },\n            remainder,\n        ))\n    }\n}\n\nimpl CLTyped for WithdrawPurse {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, AccessRights, EraId, PublicKey, SecretKey, URef, U512};\n\n    use super::WithdrawPurse;\n\n    const BONDING_PURSE: URef = URef::new([41; 32], AccessRights::READ_ADD_WRITE);\n    const ERA_OF_WITHDRAWAL: EraId = EraId::MAX;\n\n    fn validator_public_key() -> PublicKey {\n        let secret_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    }\n\n    fn unbonder_public_key() -> PublicKey {\n        let secret_key = SecretKey::ed25519_from_bytes([45; SecretKey::ED25519_LENGTH]).unwrap();\n        PublicKey::from(&secret_key)\n    }\n\n    fn amount() -> U512 {\n        U512::max_value() - 1\n    }\n\n    #[test]\n    fn serialization_roundtrip_for_withdraw_purse() {\n        let withdraw_purse = WithdrawPurse {\n            bonding_purse: BONDING_PURSE,\n            validator_public_key: validator_public_key(),\n            unbonder_public_key: unbonder_public_key(),\n            era_of_creation: ERA_OF_WITHDRAWAL,\n            amount: amount(),\n        };\n\n        bytesrepr::test_serialization_roundtrip(&withdraw_purse);\n    }\n\n    #[test]\n    fn should_be_validator_condition_for_withdraw_purse() {\n        let validator_withdraw_purse = WithdrawPurse::new(\n            BONDING_PURSE,\n            validator_public_key(),\n            validator_public_key(),\n            ERA_OF_WITHDRAWAL,\n            amount(),\n        );\n        assert!(validator_withdraw_purse.is_validator());\n    }\n\n    #[test]\n    fn should_be_delegator_condition_for_withdraw_purse() {\n        let delegator_withdraw_purse = WithdrawPurse::new(\n            BONDING_PURSE,\n            validator_public_key(),\n            unbonder_public_key(),\n            ERA_OF_WITHDRAWAL,\n            amount(),\n        );\n        assert!(!delegator_withdraw_purse.is_validator());\n    }\n}\n"
  },
  {
    "path": "types/src/system/auction.rs",
    "content": "//! Contains implementation of the Auction contract functionality.\nmod bid;\nmod bid_addr;\nmod bid_kind;\nmod bridge;\nmod constants;\nmod delegator;\nmod delegator_bid;\nmod delegator_kind;\nmod entry_points;\nmod era_info;\nmod error;\nmod reservation;\nmod seigniorage_recipient;\nmod unbond;\nmod unbonding_purse;\nmod validator_bid;\nmod validator_credit;\nmod withdraw_purse;\n\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse alloc::collections::btree_map::Entry;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse itertools::Itertools;\n\nuse alloc::{boxed::Box, collections::BTreeMap, vec::Vec};\n\npub use bid::{Bid, VESTING_SCHEDULE_LENGTH_MILLIS};\npub use bid_addr::{BidAddr, BidAddrTag};\npub use bid_kind::{BidKind, BidKindTag};\npub use bridge::Bridge;\npub use constants::*;\npub use delegator::Delegator;\npub use delegator_bid::DelegatorBid;\npub use delegator_kind::DelegatorKind;\npub use entry_points::auction_entry_points;\npub use era_info::{EraInfo, SeigniorageAllocation};\npub use error::Error;\npub use reservation::Reservation;\npub use seigniorage_recipient::{\n    SeigniorageRecipient, SeigniorageRecipientV1, SeigniorageRecipientV2,\n};\npub use unbond::{Unbond, UnbondEra, UnbondKind};\npub use unbonding_purse::UnbondingPurse;\npub use validator_bid::ValidatorBid;\npub use validator_credit::ValidatorCredit;\npub use withdraw_purse::WithdrawPurse;\n\n#[cfg(any(feature = \"testing\", test))]\npub(crate) mod gens {\n    pub use super::era_info::gens::*;\n}\n\nuse crate::{account::AccountHash, EraId, PublicKey, U512};\n\n/// Representation of delegation rate of tokens. Range from 0..=100.\npub type DelegationRate = u8;\n\n/// Validators mapped to their bids.\npub type ValidatorBids = BTreeMap<PublicKey, Box<ValidatorBid>>;\n\n/// Delegator bids mapped to their validator.\npub type DelegatorBids = BTreeMap<PublicKey, Vec<Box<DelegatorBid>>>;\n\n/// Reservations mapped to their validator.\npub type Reservations = BTreeMap<PublicKey, Vec<Box<Reservation>>>;\n\n/// Validators mapped to their credits by era.\npub type ValidatorCredits = BTreeMap<PublicKey, BTreeMap<EraId, Box<ValidatorCredit>>>;\n\n/// Weights of validators. \"Weight\" in this context means a sum of their stakes.\npub type ValidatorWeights = BTreeMap<PublicKey, U512>;\n\n#[derive(Debug)]\npub struct WeightsBreakout {\n    locked: ValidatorWeights,\n    unlocked_meets_min: ValidatorWeights,\n    unlocked_below_min: ValidatorWeights,\n}\n\nimpl WeightsBreakout {\n    pub fn new() -> Self {\n        WeightsBreakout {\n            locked: BTreeMap::default(),\n            unlocked_meets_min: BTreeMap::default(),\n            unlocked_below_min: BTreeMap::default(),\n        }\n    }\n\n    pub fn register(\n        &mut self,\n        public_key: PublicKey,\n        weight: U512,\n        locked: bool,\n        meets_minimum: bool,\n    ) {\n        if locked {\n            self.locked.insert(public_key, weight);\n        } else if meets_minimum {\n            self.unlocked_meets_min.insert(public_key, weight);\n        } else {\n            self.unlocked_below_min.insert(public_key, weight);\n        }\n    }\n\n    /// The count of locked weights.\n    pub fn locked_count(&self) -> usize {\n        self.locked.len()\n    }\n\n    /// The count of unlocked weights with at least minimum bid amount.\n    pub fn unlocked_meets_min_count(&self) -> usize {\n        self.unlocked_meets_min.len()\n    }\n\n    /// The count of unlocked weights that do not meet minimum bid amount.\n    pub fn unlocked_below_min_count(&self) -> usize {\n        self.unlocked_below_min.len()\n    }\n\n    /// Takes all locked and remaining slots number of unlocked meets min.\n    pub fn take(self, validator_slots: usize, threshold: usize) -> ValidatorWeights {\n        let locked_count = self.locked.len();\n        if locked_count >= validator_slots {\n            // locked validators are taken even if exceeding validator_slots count\n            // they are literally locked in\n            return self.locked;\n        }\n        let remaining_auction_slots = validator_slots.saturating_sub(locked_count);\n        let mut unlocked_hi = self\n            .unlocked_meets_min\n            .iter()\n            .map(|(public_key, validator_bid)| (public_key.clone(), *validator_bid))\n            .collect::<Vec<(PublicKey, U512)>>();\n        // sort highest to lowest (rhs to lhs)\n        unlocked_hi.sort_by(|(_, lhs), (_, rhs)| rhs.cmp(lhs));\n        let unlocked_hi_count = unlocked_hi.len();\n        let combined_count = unlocked_hi_count.saturating_add(locked_count);\n        let unlocked_low_count = self.unlocked_below_min.len();\n        if unlocked_low_count == 0\n            || unlocked_hi_count >= remaining_auction_slots\n            || combined_count >= threshold\n        {\n            return self\n                .locked\n                .into_iter()\n                .chain(unlocked_hi.into_iter().take(remaining_auction_slots))\n                .collect();\n        }\n\n        // we have fewer locked bids and bids >= min bid than the safety threshold,\n        // so we will attempt to backfill slots up to the safety threshold from otherwise\n        // valid bids that have less than the min bid\n        let backfill_count = threshold.saturating_sub(combined_count);\n        let mut unlocked_low = self\n            .unlocked_below_min\n            .iter()\n            .map(|(public_key, validator_bid)| (public_key.clone(), *validator_bid))\n            .collect::<Vec<(PublicKey, U512)>>();\n        // sort highest to lowest (rhs to lhs)\n        unlocked_low.sort_by(|(_, lhs), (_, rhs)| rhs.cmp(lhs));\n        self.locked\n            .into_iter()\n            .chain(unlocked_hi.into_iter().take(remaining_auction_slots))\n            .chain(unlocked_low.into_iter().take(backfill_count))\n            .collect()\n    }\n}\n\nimpl Default for WeightsBreakout {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\n/// List of era validators\npub type EraValidators = BTreeMap<EraId, ValidatorWeights>;\n\n/// Collection of seigniorage recipients. Legacy version.\npub type SeigniorageRecipientsV1 = BTreeMap<PublicKey, SeigniorageRecipientV1>;\n/// Collection of seigniorage recipients.\npub type SeigniorageRecipientsV2 = BTreeMap<PublicKey, SeigniorageRecipientV2>;\n/// Wrapper enum for all variants of `SeigniorageRecipients`.\n#[allow(missing_docs)]\npub enum SeigniorageRecipients {\n    V1(SeigniorageRecipientsV1),\n    V2(SeigniorageRecipientsV2),\n}\n\n/// Snapshot of `SeigniorageRecipients` for a given era. Legacy version.\npub type SeigniorageRecipientsSnapshotV1 = BTreeMap<EraId, SeigniorageRecipientsV1>;\n/// Snapshot of `SeigniorageRecipients` for a given era.\npub type SeigniorageRecipientsSnapshotV2 = BTreeMap<EraId, SeigniorageRecipientsV2>;\n/// Wrapper enum for all variants of `SeigniorageRecipientsSnapshot`.\n#[derive(Debug)]\n#[allow(missing_docs)]\npub enum SeigniorageRecipientsSnapshot {\n    V1(SeigniorageRecipientsSnapshotV1),\n    V2(SeigniorageRecipientsSnapshotV2),\n}\n\nimpl SeigniorageRecipientsSnapshot {\n    /// Returns rewards for given validator in a specified era\n    pub fn get_seignorage_recipient(\n        &self,\n        era_id: &EraId,\n        validator_public_key: &PublicKey,\n    ) -> Option<SeigniorageRecipient> {\n        match self {\n            Self::V1(snapshot) => snapshot.get(era_id).and_then(|era| {\n                era.get(validator_public_key)\n                    .map(|recipient| SeigniorageRecipient::V1(recipient.clone()))\n            }),\n            Self::V2(snapshot) => snapshot.get(era_id).and_then(|era| {\n                era.get(validator_public_key)\n                    .map(|recipient| SeigniorageRecipient::V2(recipient.clone()))\n            }),\n        }\n    }\n}\n\n/// Validators and delegators mapped to their withdraw purses.\npub type WithdrawPurses = BTreeMap<AccountHash, Vec<WithdrawPurse>>;\n\n/// Aggregated representation of validator and associated delegator bids.\npub type Staking = BTreeMap<PublicKey, (ValidatorBid, BTreeMap<DelegatorKind, DelegatorBid>)>;\n\n/// Utils for working with a vector of BidKind.\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\npub trait BidsExt {\n    /// Returns Bid matching public_key, if present.\n    fn unified_bid(&self, public_key: &PublicKey) -> Option<Bid>;\n\n    /// Returns ValidatorBid matching public_key, if present.\n    fn validator_bid(&self, public_key: &PublicKey) -> Option<ValidatorBid>;\n\n    /// Returns a bridge record matching old and new public key, if present.\n    fn bridge(\n        &self,\n        public_key: &PublicKey,\n        new_public_key: &PublicKey,\n        era_id: &EraId,\n    ) -> Option<Bridge>;\n\n    /// Returns ValidatorCredit matching public_key, if present.\n    fn credit(&self, public_key: &PublicKey) -> Option<ValidatorCredit>;\n\n    /// Returns total validator stake, if present.\n    fn validator_total_stake(&self, public_key: &PublicKey) -> Option<U512>;\n\n    /// Returns Delegator entries matching validator public key, if present.\n    fn delegators_by_validator_public_key(\n        &self,\n        public_key: &PublicKey,\n    ) -> Option<Vec<DelegatorBid>>;\n\n    /// Returns Delegator entry, if present.\n    fn delegator_by_kind(\n        &self,\n        validator_public_key: &PublicKey,\n        delegator_kind: &DelegatorKind,\n    ) -> Option<DelegatorBid>;\n\n    /// Returns Reservation entries matching validator public key, if present.\n    fn reservations_by_validator_public_key(\n        &self,\n        public_key: &PublicKey,\n    ) -> Option<Vec<Reservation>>;\n\n    /// Returns Reservation entry, if present.\n    fn reservation_by_kind(\n        &self,\n        validator_public_key: &PublicKey,\n        delegator_kind: &DelegatorKind,\n    ) -> Option<Reservation>;\n\n    /// Returns Unbond entry, if present.\n    fn unbond_by_kind(\n        &self,\n        validator_public_key: &PublicKey,\n        unbond_kind: &UnbondKind,\n    ) -> Option<Unbond>;\n\n    /// Returns true if containing any elements matching the provided validator public key.\n    fn contains_validator_public_key(&self, public_key: &PublicKey) -> bool;\n\n    /// Removes any items with a public key matching the provided validator public key.\n    fn remove_by_validator_public_key(&mut self, public_key: &PublicKey);\n\n    /// Creates a map of Validator public keys to associated Delegators.\n    fn delegator_map(&self) -> BTreeMap<PublicKey, Vec<DelegatorKind>>;\n\n    /// Inserts if bid_kind does not exist, otherwise replaces.\n    fn upsert(&mut self, bid_kind: BidKind);\n}\n\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nimpl BidsExt for Vec<BidKind> {\n    fn unified_bid(&self, public_key: &PublicKey) -> Option<Bid> {\n        if let BidKind::Unified(bid) = self\n            .iter()\n            .find(|x| x.is_validator() && &x.validator_public_key() == public_key)?\n        {\n            Some(*bid.clone())\n        } else {\n            None\n        }\n    }\n\n    fn validator_bid(&self, public_key: &PublicKey) -> Option<ValidatorBid> {\n        if let BidKind::Validator(validator_bid) = self\n            .iter()\n            .find(|x| x.is_validator() && &x.validator_public_key() == public_key)?\n        {\n            Some(*validator_bid.clone())\n        } else {\n            None\n        }\n    }\n\n    fn bridge(\n        &self,\n        public_key: &PublicKey,\n        new_public_key: &PublicKey,\n        era_id: &EraId,\n    ) -> Option<Bridge> {\n        self.iter().find_map(|x| match x {\n            BidKind::Bridge(bridge)\n                if bridge.old_validator_public_key() == public_key\n                    && bridge.new_validator_public_key() == new_public_key\n                    && bridge.era_id() == era_id =>\n            {\n                Some(*bridge.clone())\n            }\n            _ => None,\n        })\n    }\n\n    fn credit(&self, public_key: &PublicKey) -> Option<ValidatorCredit> {\n        if let BidKind::Credit(credit) = self\n            .iter()\n            .find(|x| x.is_credit() && &x.validator_public_key() == public_key)?\n        {\n            Some(*credit.clone())\n        } else {\n            None\n        }\n    }\n\n    fn validator_total_stake(&self, public_key: &PublicKey) -> Option<U512> {\n        if let Some(validator_bid) = self.validator_bid(public_key) {\n            let delegator_stake = {\n                match self.delegators_by_validator_public_key(validator_bid.validator_public_key())\n                {\n                    None => U512::zero(),\n                    Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(),\n                }\n            };\n            return Some(validator_bid.staked_amount() + delegator_stake);\n        }\n\n        if let BidKind::Unified(bid) = self\n            .iter()\n            .find(|x| x.is_validator() && &x.validator_public_key() == public_key)?\n        {\n            return Some(*bid.staked_amount());\n        }\n\n        None\n    }\n\n    fn delegators_by_validator_public_key(\n        &self,\n        public_key: &PublicKey,\n    ) -> Option<Vec<DelegatorBid>> {\n        let mut ret = vec![];\n        for delegator in self\n            .iter()\n            .filter(|x| x.is_delegator() && &x.validator_public_key() == public_key)\n        {\n            if let BidKind::Delegator(delegator) = delegator {\n                ret.push(*delegator.clone());\n            }\n        }\n\n        if ret.is_empty() {\n            None\n        } else {\n            Some(ret)\n        }\n    }\n\n    fn delegator_by_kind(\n        &self,\n        validator_public_key: &PublicKey,\n        delegator_kind: &DelegatorKind,\n    ) -> Option<DelegatorBid> {\n        if let BidKind::Delegator(delegator) = self.iter().find(|x| {\n            x.is_delegator()\n                && &x.validator_public_key() == validator_public_key\n                && x.delegator_kind() == Some(delegator_kind.clone())\n        })? {\n            Some(*delegator.clone())\n        } else {\n            None\n        }\n    }\n\n    fn reservations_by_validator_public_key(\n        &self,\n        validator_public_key: &PublicKey,\n    ) -> Option<Vec<Reservation>> {\n        let mut ret = vec![];\n        for reservation in self\n            .iter()\n            .filter(|x| x.is_reservation() && &x.validator_public_key() == validator_public_key)\n        {\n            if let BidKind::Reservation(reservation) = reservation {\n                ret.push(*reservation.clone());\n            }\n        }\n\n        if ret.is_empty() {\n            None\n        } else {\n            Some(ret)\n        }\n    }\n\n    fn reservation_by_kind(\n        &self,\n        validator_public_key: &PublicKey,\n        delegator_kind: &DelegatorKind,\n    ) -> Option<Reservation> {\n        if let BidKind::Reservation(reservation) = self.iter().find(|x| {\n            x.is_reservation()\n                && &x.validator_public_key() == validator_public_key\n                && x.delegator_kind() == Some(delegator_kind.clone())\n        })? {\n            Some(*reservation.clone())\n        } else {\n            None\n        }\n    }\n\n    fn unbond_by_kind(\n        &self,\n        validator_public_key: &PublicKey,\n        unbond_kind: &UnbondKind,\n    ) -> Option<Unbond> {\n        if let BidKind::Unbond(unbond) = self.iter().find(|x| {\n            x.is_unbond()\n                && &x.validator_public_key() == validator_public_key\n                && x.unbond_kind() == Some(unbond_kind.clone())\n        })? {\n            Some(*unbond.clone())\n        } else {\n            None\n        }\n    }\n\n    fn contains_validator_public_key(&self, public_key: &PublicKey) -> bool {\n        self.iter().any(|x| &x.validator_public_key() == public_key)\n    }\n\n    fn remove_by_validator_public_key(&mut self, public_key: &PublicKey) {\n        self.retain(|x| &x.validator_public_key() != public_key)\n    }\n\n    fn delegator_map(&self) -> BTreeMap<PublicKey, Vec<DelegatorKind>> {\n        let mut ret = BTreeMap::new();\n        let validators = self\n            .iter()\n            .filter(|x| x.is_validator())\n            .cloned()\n            .collect_vec();\n        for bid_kind in validators {\n            ret.insert(bid_kind.validator_public_key().clone(), vec![]);\n        }\n        let delegators = self\n            .iter()\n            .filter(|x| x.is_delegator())\n            .cloned()\n            .collect_vec();\n        for bid_kind in delegators {\n            if let BidKind::Delegator(delegator) = bid_kind {\n                match ret.entry(delegator.validator_public_key().clone()) {\n                    Entry::Vacant(ve) => {\n                        ve.insert(vec![delegator.delegator_kind().clone()]);\n                    }\n                    Entry::Occupied(mut oe) => {\n                        let delegators = oe.get_mut();\n                        delegators.push(delegator.delegator_kind().clone())\n                    }\n                }\n            }\n        }\n        let unified = self\n            .iter()\n            .filter(|x| x.is_unified())\n            .cloned()\n            .collect_vec();\n        for bid_kind in unified {\n            if let BidKind::Unified(unified) = bid_kind {\n                let delegators = unified\n                    .delegators()\n                    .iter()\n                    .map(|(_, y)| DelegatorKind::PublicKey(y.delegator_public_key().clone()))\n                    .collect();\n                ret.insert(unified.validator_public_key().clone(), delegators);\n            }\n        }\n        ret\n    }\n\n    fn upsert(&mut self, bid_kind: BidKind) {\n        let maybe_index = match bid_kind {\n            BidKind::Unified(_) | BidKind::Validator(_) => self\n                .iter()\n                .find_position(|x| {\n                    x.validator_public_key() == bid_kind.validator_public_key()\n                        && x.tag() == bid_kind.tag()\n                })\n                .map(|(idx, _)| idx),\n            BidKind::Delegator(_) => self\n                .iter()\n                .find_position(|x| {\n                    x.is_delegator()\n                        && x.validator_public_key() == bid_kind.validator_public_key()\n                        && x.delegator_kind() == bid_kind.delegator_kind()\n                })\n                .map(|(idx, _)| idx),\n            BidKind::Bridge(_) => self\n                .iter()\n                .find_position(|x| {\n                    x.is_bridge()\n                        && x.validator_public_key() == bid_kind.validator_public_key()\n                        && x.new_validator_public_key() == bid_kind.new_validator_public_key()\n                        && x.era_id() == bid_kind.era_id()\n                })\n                .map(|(idx, _)| idx),\n            BidKind::Credit(_) => self\n                .iter()\n                .find_position(|x| {\n                    x.validator_public_key() == bid_kind.validator_public_key()\n                        && x.tag() == bid_kind.tag()\n                        && x.era_id() == bid_kind.era_id()\n                })\n                .map(|(idx, _)| idx),\n            BidKind::Reservation(_) => self\n                .iter()\n                .find_position(|x| {\n                    x.is_reservation()\n                        && x.validator_public_key() == bid_kind.validator_public_key()\n                        && x.delegator_kind() == bid_kind.delegator_kind()\n                })\n                .map(|(idx, _)| idx),\n            BidKind::Unbond(_) => self\n                .iter()\n                .find_position(|x| {\n                    x.is_unbond()\n                        && x.validator_public_key() == bid_kind.validator_public_key()\n                        && x.unbond_kind() == bid_kind.unbond_kind()\n                })\n                .map(|(idx, _)| idx),\n        };\n\n        match maybe_index {\n            Some(index) => {\n                self.insert(index, bid_kind);\n            }\n            None => {\n                self.push(bid_kind);\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod prop_test_delegator {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::delegator_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n\n#[cfg(test)]\nmod prop_test_reservation {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_value_bid(bid in gens::reservation_arb()) {\n            bytesrepr::test_serialization_roundtrip(&bid);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/caller/call_stack_elements.rs",
    "content": "use alloc::vec::Vec;\n\nuse num_derive::{FromPrimitive, ToPrimitive};\nuse num_traits::FromPrimitive;\n\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contracts::{ContractHash, ContractPackageHash},\n    CLType, CLTyped,\n};\n\n/// Tag representing variants of CallStackElement for purposes of serialization.\n#[derive(FromPrimitive, ToPrimitive)]\n#[repr(u8)]\npub enum CallStackElementTag {\n    /// Session tag.\n    Session = 0,\n    /// StoredSession tag.\n    StoredSession,\n    /// StoredContract tag.\n    StoredContract,\n}\n\n/// Represents the origin of a sub-call.\n#[derive(Clone, Debug, PartialEq, Eq)]\npub enum CallStackElement {\n    /// Session\n    Session {\n        /// The account hash of the caller\n        account_hash: AccountHash,\n    },\n    /// Effectively an EntryPointType::Session - stored access to a session.\n    StoredSession {\n        /// The account hash of the caller\n        account_hash: AccountHash,\n        /// The contract package hash\n        contract_package_hash: ContractPackageHash,\n        /// The contract hash\n        contract_hash: ContractHash,\n    },\n    /// Contract\n    StoredContract {\n        /// The contract package hash\n        contract_package_hash: ContractPackageHash,\n        /// The contract hash\n        contract_hash: ContractHash,\n    },\n}\n\nimpl CallStackElement {\n    /// Creates a [`CallStackElement::Session`]. This represents a call into session code, and\n    /// should only ever happen once in a call stack.\n    pub fn session(account_hash: AccountHash) -> Self {\n        CallStackElement::Session { account_hash }\n    }\n\n    /// Creates a [`'CallStackElement::StoredContract`]. This represents a call into a contract with\n    /// `EntryPointType::Contract`.\n    pub fn stored_contract(\n        contract_package_hash: ContractPackageHash,\n        contract_hash: ContractHash,\n    ) -> Self {\n        CallStackElement::StoredContract {\n            contract_package_hash,\n            contract_hash,\n        }\n    }\n\n    /// Creates a [`'CallStackElement::StoredSession`]. This represents a call into a contract with\n    /// `EntryPointType::Session`.\n    pub fn stored_session(\n        account_hash: AccountHash,\n        contract_package_hash: ContractPackageHash,\n        contract_hash: ContractHash,\n    ) -> Self {\n        CallStackElement::StoredSession {\n            account_hash,\n            contract_package_hash,\n            contract_hash,\n        }\n    }\n\n    /// Gets the tag from self.\n    pub fn tag(&self) -> CallStackElementTag {\n        match self {\n            CallStackElement::Session { .. } => CallStackElementTag::Session,\n            CallStackElement::StoredSession { .. } => CallStackElementTag::StoredSession,\n            CallStackElement::StoredContract { .. } => CallStackElementTag::StoredContract,\n        }\n    }\n\n    /// Gets the [`ContractHash`] for both stored session and stored contract variants.\n    pub fn contract_hash(&self) -> Option<&ContractHash> {\n        match self {\n            CallStackElement::Session { .. } => None,\n            CallStackElement::StoredSession { contract_hash, .. }\n            | CallStackElement::StoredContract { contract_hash, .. } => Some(contract_hash),\n        }\n    }\n}\n\nimpl ToBytes for CallStackElement {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.push(self.tag() as u8);\n        match self {\n            CallStackElement::Session { account_hash } => {\n                result.append(&mut account_hash.to_bytes()?)\n            }\n            CallStackElement::StoredSession {\n                account_hash,\n                contract_package_hash,\n                contract_hash,\n            } => {\n                result.append(&mut account_hash.to_bytes()?);\n                result.append(&mut contract_package_hash.to_bytes()?);\n                result.append(&mut contract_hash.to_bytes()?);\n            }\n            CallStackElement::StoredContract {\n                contract_package_hash,\n                contract_hash,\n            } => {\n                result.append(&mut contract_package_hash.to_bytes()?);\n                result.append(&mut contract_hash.to_bytes()?);\n            }\n        };\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                CallStackElement::Session { account_hash } => account_hash.serialized_length(),\n                CallStackElement::StoredSession {\n                    account_hash,\n                    contract_package_hash,\n                    contract_hash,\n                } => {\n                    account_hash.serialized_length()\n                        + contract_package_hash.serialized_length()\n                        + contract_hash.serialized_length()\n                }\n                CallStackElement::StoredContract {\n                    contract_package_hash,\n                    contract_hash,\n                } => contract_package_hash.serialized_length() + contract_hash.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for CallStackElement {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let tag = CallStackElementTag::from_u8(tag).ok_or(bytesrepr::Error::Formatting)?;\n        match tag {\n            CallStackElementTag::Session => {\n                let (account_hash, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((CallStackElement::Session { account_hash }, remainder))\n            }\n            CallStackElementTag::StoredSession => {\n                let (account_hash, remainder) = AccountHash::from_bytes(remainder)?;\n                let (contract_package_hash, remainder) =\n                    ContractPackageHash::from_bytes(remainder)?;\n                let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?;\n                Ok((\n                    CallStackElement::StoredSession {\n                        account_hash,\n                        contract_package_hash,\n                        contract_hash,\n                    },\n                    remainder,\n                ))\n            }\n            CallStackElementTag::StoredContract => {\n                let (contract_package_hash, remainder) =\n                    ContractPackageHash::from_bytes(remainder)?;\n                let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?;\n                Ok((\n                    CallStackElement::StoredContract {\n                        contract_package_hash,\n                        contract_hash,\n                    },\n                    remainder,\n                ))\n            }\n        }\n    }\n}\n\nimpl CLTyped for CallStackElement {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n"
  },
  {
    "path": "types/src/system/caller.rs",
    "content": "pub mod call_stack_elements;\n\nuse alloc::{collections::BTreeMap, vec::Vec};\n\nuse num_derive::{FromPrimitive, ToPrimitive};\nuse num_traits::FromPrimitive;\n\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    package::PackageHash,\n    CLType, CLTyped, CLValue, CLValueError, EntityAddr, HashAddr,\n};\n\nuse crate::{\n    bytesrepr::Error,\n    contracts::{ContractHash, ContractPackageHash},\n};\npub use call_stack_elements::CallStackElement;\n\n/// Tag representing variants of CallerTag for purposes of serialization.\n#[derive(FromPrimitive, ToPrimitive)]\n#[repr(u8)]\npub enum CallerTag {\n    /// Initiator tag.\n    Initiator = 0,\n    /// Entity tag.\n    Entity,\n    /// Smart contract tag.\n    SmartContract,\n}\n\nconst ACCOUNT: u8 = 0;\nconst PACKAGE: u8 = 1;\nconst CONTRACT_PACKAGE: u8 = 2;\nconst ENTITY: u8 = 3;\nconst CONTRACT: u8 = 4;\n\n#[derive(Clone, Debug, PartialEq, Eq)]\npub struct CallerInfo {\n    kind: u8,\n    fields: BTreeMap<u8, CLValue>,\n}\n\nimpl CLTyped for CallerInfo {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl CallerInfo {\n    pub fn kind(&self) -> u8 {\n        self.kind\n    }\n\n    pub fn get_field_by_index(&self, index: u8) -> Option<&CLValue> {\n        self.fields.get(&index)\n    }\n}\n\nimpl ToBytes for CallerInfo {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.append(&mut self.kind.to_bytes()?);\n        result.append(&mut self.fields.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH + self.fields.serialized_length()\n    }\n}\n\nimpl FromBytes for CallerInfo {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (kind, remainder) = u8::from_bytes(bytes)?;\n        let (fields, remainder) = BTreeMap::from_bytes(remainder)?;\n        Ok((CallerInfo { kind, fields }, remainder))\n    }\n}\n\nimpl TryFrom<Caller> for CallerInfo {\n    type Error = CLValueError;\n\n    fn try_from(value: Caller) -> Result<Self, Self::Error> {\n        match value {\n            Caller::Initiator { account_hash } => {\n                let kind = ACCOUNT;\n\n                let mut ret = BTreeMap::new();\n                ret.insert(ACCOUNT, CLValue::from_t(Some(account_hash))?);\n                ret.insert(PACKAGE, CLValue::from_t(Option::<PackageHash>::None)?);\n                ret.insert(\n                    CONTRACT_PACKAGE,\n                    CLValue::from_t(Option::<ContractPackageHash>::None)?,\n                );\n                ret.insert(ENTITY, CLValue::from_t(Option::<EntityAddr>::None)?);\n                ret.insert(CONTRACT, CLValue::from_t(Option::<ContractHash>::None)?);\n                Ok(CallerInfo { kind, fields: ret })\n            }\n            Caller::Entity {\n                package_hash,\n                entity_addr,\n            } => {\n                let kind = ENTITY;\n\n                let mut ret = BTreeMap::new();\n                ret.insert(ACCOUNT, CLValue::from_t(Option::<AccountHash>::None)?);\n                ret.insert(PACKAGE, CLValue::from_t(Some(package_hash))?);\n                ret.insert(\n                    CONTRACT_PACKAGE,\n                    CLValue::from_t(Option::<ContractPackageHash>::None)?,\n                );\n                ret.insert(ENTITY, CLValue::from_t(Some(entity_addr))?);\n                ret.insert(CONTRACT, CLValue::from_t(Option::<ContractHash>::None)?);\n                Ok(CallerInfo { kind, fields: ret })\n            }\n            Caller::SmartContract {\n                contract_package_hash,\n                contract_hash,\n            } => {\n                let kind = CONTRACT;\n\n                let mut ret = BTreeMap::new();\n                ret.insert(ACCOUNT, CLValue::from_t(Option::<AccountHash>::None)?);\n                ret.insert(PACKAGE, CLValue::from_t(Option::<PackageHash>::None)?);\n                ret.insert(\n                    CONTRACT_PACKAGE,\n                    CLValue::from_t(Some(contract_package_hash))?,\n                );\n\n                ret.insert(ENTITY, CLValue::from_t(Option::<EntityAddr>::None)?);\n                ret.insert(CONTRACT, CLValue::from_t(Some(contract_hash))?);\n                Ok(CallerInfo { kind, fields: ret })\n            }\n        }\n    }\n}\n\n/// Identity of a calling entity.\n#[derive(Clone, Copy, Debug, PartialEq, Eq)]\npub enum Caller {\n    /// Initiator (calling account)\n    Initiator {\n        /// The account hash of the caller\n        account_hash: AccountHash,\n    },\n    /// Entity (smart contract / system contract)\n    Entity {\n        /// The package hash\n        package_hash: PackageHash,\n        /// The entity addr.\n        entity_addr: EntityAddr,\n    },\n    SmartContract {\n        /// The contract package hash.\n        contract_package_hash: ContractPackageHash,\n        /// The contract hash.\n        contract_hash: ContractHash,\n    },\n}\n\nimpl Caller {\n    /// Creates a [`Caller::Initiator`]. This represents a call into session code, and\n    /// should only ever happen once in a call stack.\n    pub fn initiator(account_hash: AccountHash) -> Self {\n        Caller::Initiator { account_hash }\n    }\n\n    /// Creates a [`'Caller::Entity`]. This represents a call into a contract with\n    /// `EntryPointType::Called`.\n    pub fn entity(package_hash: PackageHash, entity_addr: EntityAddr) -> Self {\n        Caller::Entity {\n            package_hash,\n            entity_addr,\n        }\n    }\n\n    pub fn smart_contract(\n        contract_package_hash: ContractPackageHash,\n        contract_hash: ContractHash,\n    ) -> Self {\n        Caller::SmartContract {\n            contract_package_hash,\n            contract_hash,\n        }\n    }\n\n    /// Gets the tag from self.\n    pub fn tag(&self) -> CallerTag {\n        match self {\n            Caller::Initiator { .. } => CallerTag::Initiator,\n            Caller::Entity { .. } => CallerTag::Entity,\n            Caller::SmartContract { .. } => CallerTag::SmartContract,\n        }\n    }\n\n    /// Gets the [`HashAddr`] for both stored session and stored contract variants.\n    pub fn contract_hash(&self) -> Option<HashAddr> {\n        match self {\n            Caller::Initiator { .. } => None,\n            Caller::Entity { entity_addr, .. } => Some(entity_addr.value()),\n            Caller::SmartContract { contract_hash, .. } => Some(contract_hash.value()),\n        }\n    }\n}\n\nimpl ToBytes for Caller {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.push(self.tag() as u8);\n        match self {\n            Caller::Initiator { account_hash } => result.append(&mut account_hash.to_bytes()?),\n\n            Caller::Entity {\n                package_hash,\n                entity_addr,\n            } => {\n                result.append(&mut package_hash.to_bytes()?);\n                result.append(&mut entity_addr.to_bytes()?);\n            }\n            Caller::SmartContract {\n                contract_package_hash,\n                contract_hash,\n            } => {\n                result.append(&mut contract_package_hash.to_bytes()?);\n                result.append(&mut contract_hash.to_bytes()?);\n            }\n        };\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Caller::Initiator { account_hash } => account_hash.serialized_length(),\n                Caller::Entity {\n                    package_hash,\n                    entity_addr,\n                } => package_hash.serialized_length() + entity_addr.serialized_length(),\n                Caller::SmartContract {\n                    contract_package_hash,\n                    contract_hash,\n                } => contract_package_hash.serialized_length() + contract_hash.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for Caller {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        let tag = CallerTag::from_u8(tag).ok_or(bytesrepr::Error::Formatting)?;\n        match tag {\n            CallerTag::Initiator => {\n                let (account_hash, remainder) = AccountHash::from_bytes(remainder)?;\n                Ok((Caller::Initiator { account_hash }, remainder))\n            }\n            CallerTag::Entity => {\n                let (package_hash, remainder) = PackageHash::from_bytes(remainder)?;\n                let (entity_addr, remainder) = EntityAddr::from_bytes(remainder)?;\n                Ok((\n                    Caller::Entity {\n                        package_hash,\n                        entity_addr,\n                    },\n                    remainder,\n                ))\n            }\n            CallerTag::SmartContract => {\n                let (contract_package_hash, remainder) =\n                    ContractPackageHash::from_bytes(remainder)?;\n                let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?;\n                Ok((\n                    Caller::SmartContract {\n                        contract_package_hash,\n                        contract_hash,\n                    },\n                    remainder,\n                ))\n            }\n        }\n    }\n}\n\nimpl CLTyped for Caller {\n    fn cl_type() -> CLType {\n        CLType::Any\n    }\n}\n\nimpl From<&Caller> for CallStackElement {\n    fn from(caller: &Caller) -> Self {\n        match caller {\n            Caller::Initiator { account_hash } => CallStackElement::Session {\n                account_hash: *account_hash,\n            },\n            Caller::Entity {\n                package_hash,\n                entity_addr: entity_hash,\n            } => CallStackElement::StoredContract {\n                contract_package_hash: ContractPackageHash::new(package_hash.value()),\n                contract_hash: ContractHash::new(entity_hash.value()),\n            },\n            Caller::SmartContract {\n                contract_package_hash,\n                contract_hash,\n            } => CallStackElement::StoredContract {\n                contract_package_hash: *contract_package_hash,\n                contract_hash: *contract_hash,\n            },\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/error.rs",
    "content": "use core::fmt::{self, Display, Formatter};\n\nuse crate::system::{auction, handle_payment, mint};\n\n/// An aggregate enum error with variants for each system contract's error.\n#[derive(Debug, Copy, Clone)]\n#[non_exhaustive]\npub enum Error {\n    /// Contains a [`mint::Error`].\n    Mint(mint::Error),\n    /// Contains a [`handle_payment::Error`].\n    HandlePayment(handle_payment::Error),\n    /// Contains a [`auction::Error`].\n    Auction(auction::Error),\n}\n\nimpl From<mint::Error> for Error {\n    fn from(error: mint::Error) -> Error {\n        Error::Mint(error)\n    }\n}\n\nimpl From<handle_payment::Error> for Error {\n    fn from(error: handle_payment::Error) -> Error {\n        Error::HandlePayment(error)\n    }\n}\n\nimpl From<auction::Error> for Error {\n    fn from(error: auction::Error) -> Error {\n        Error::Auction(error)\n    }\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::Mint(error) => write!(formatter, \"Mint error: {}\", error),\n            Error::HandlePayment(error) => write!(formatter, \"HandlePayment error: {}\", error),\n            Error::Auction(error) => write!(formatter, \"Auction error: {}\", error),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/handle_payment/constants.rs",
    "content": "/// Named constant for `purse`.\npub const ARG_PURSE: &str = \"purse\";\n/// Named constant for `amount`.\npub const ARG_AMOUNT: &str = \"amount\";\n/// Named constant for `source`.\npub const ARG_ACCOUNT: &str = \"account\";\n/// Named constant for `target`.\npub const ARG_TARGET: &str = \"target\";\n\n/// Named constant for method `get_payment_purse`.\npub const METHOD_GET_PAYMENT_PURSE: &str = \"get_payment_purse\";\n/// Named constant for method `set_refund_purse`.\npub const METHOD_SET_REFUND_PURSE: &str = \"set_refund_purse\";\n/// Named constant for method `get_refund_purse`.\npub const METHOD_GET_REFUND_PURSE: &str = \"get_refund_purse\";\n\n/// Storage for handle payment contract hash.\npub const CONTRACT_HASH_KEY: &str = \"contract_hash\";\n\n/// Storage for handle payment access key.\npub const CONTRACT_ACCESS_KEY: &str = \"access_key\";\n\n/// The uref name where the Handle Payment accepts payment for computation on behalf of validators.\npub const PAYMENT_PURSE_KEY: &str = \"payment_purse\";\n\n/// The uref name where the Handle Payment will refund unused payment back to the user. The uref\n/// this name corresponds to is set by the user.\npub const REFUND_PURSE_KEY: &str = \"refund_purse\";\n/// Storage for handle payment accumulation purse key.\n///\n/// This purse is used when `fee_elimination` config is set to `Accumulate` which makes sense for\n/// some private chains.\npub const ACCUMULATION_PURSE_KEY: &str = \"accumulation_purse\";\n"
  },
  {
    "path": "types/src/system/handle_payment/entry_points.rs",
    "content": "use alloc::boxed::Box;\n\nuse crate::{\n    system::handle_payment::{\n        ARG_PURSE, METHOD_GET_PAYMENT_PURSE, METHOD_GET_REFUND_PURSE, METHOD_SET_REFUND_PURSE,\n    },\n    CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints,\n    Parameter,\n};\n\n/// Creates handle payment contract entry points.\npub fn handle_payment_entry_points() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n\n    let get_payment_purse = EntityEntryPoint::new(\n        METHOD_GET_PAYMENT_PURSE,\n        vec![],\n        CLType::URef,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(get_payment_purse);\n\n    let set_refund_purse = EntityEntryPoint::new(\n        METHOD_SET_REFUND_PURSE,\n        vec![Parameter::new(ARG_PURSE, CLType::URef)],\n        CLType::Unit,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(set_refund_purse);\n\n    let get_refund_purse = EntityEntryPoint::new(\n        METHOD_GET_REFUND_PURSE,\n        vec![],\n        CLType::Option(Box::new(CLType::URef)),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(get_refund_purse);\n\n    entry_points\n}\n"
  },
  {
    "path": "types/src/system/handle_payment/error.rs",
    "content": "//! Home of the Handle Payment contract's [`enum@Error`] type.\nuse alloc::vec::Vec;\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Display, Formatter},\n};\n\nuse crate::{\n    bytesrepr::{self, ToBytes, U8_SERIALIZED_LENGTH},\n    CLType, CLTyped,\n};\n\n/// Errors which can occur while executing the Handle Payment contract.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\n#[repr(u8)]\n#[non_exhaustive]\npub enum Error {\n    // ===== User errors =====\n    /// The given validator is not bonded.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(0, Error::NotBonded as u8);\n    /// ```\n    NotBonded = 0,\n    /// There are too many bonding or unbonding attempts already enqueued to allow more.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(1, Error::TooManyEventsInQueue as u8);\n    /// ```\n    TooManyEventsInQueue = 1,\n    /// At least one validator must remain bonded.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(2, Error::CannotUnbondLastValidator as u8);\n    /// ```\n    CannotUnbondLastValidator = 2,\n    /// Failed to bond or unbond as this would have resulted in exceeding the maximum allowed\n    /// difference between the largest and smallest stakes.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(3, Error::SpreadTooHigh as u8);\n    /// ```\n    SpreadTooHigh = 3,\n    /// The given validator already has a bond or unbond attempt enqueued.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(4, Error::MultipleRequests as u8);\n    /// ```\n    MultipleRequests = 4,\n    /// Attempted to bond with a stake which was too small.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(5, Error::BondTooSmall as u8);\n    /// ```\n    BondTooSmall = 5,\n    /// Attempted to bond with a stake which was too large.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(6, Error::BondTooLarge as u8);\n    /// ```\n    BondTooLarge = 6,\n    /// Attempted to unbond an amount which was too large.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(7, Error::UnbondTooLarge as u8);\n    /// ```\n    UnbondTooLarge = 7,\n    /// While bonding, the transfer from source purse to the Handle Payment internal purse failed.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(8, Error::BondTransferFailed as u8);\n    /// ```\n    BondTransferFailed = 8,\n    /// While unbonding, the transfer from the Handle Payment internal purse to the destination\n    /// purse failed.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(9, Error::UnbondTransferFailed as u8);\n    /// ```\n    UnbondTransferFailed = 9,\n    // ===== System errors =====\n    /// Internal error: a [`BlockTime`](crate::BlockTime) was unexpectedly out of sequence.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(10, Error::TimeWentBackwards as u8);\n    /// ```\n    TimeWentBackwards = 10,\n    /// Internal error: stakes were unexpectedly empty.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(11, Error::StakesNotFound as u8);\n    /// ```\n    StakesNotFound = 11,\n    /// Internal error: the Handle Payment contract's payment purse wasn't found.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(12, Error::PaymentPurseNotFound as u8);\n    /// ```\n    PaymentPurseNotFound = 12,\n    /// Internal error: the Handle Payment contract's payment purse key was the wrong type.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(13, Error::PaymentPurseKeyUnexpectedType as u8);\n    /// ```\n    PaymentPurseKeyUnexpectedType = 13,\n    /// Internal error: couldn't retrieve the balance for the Handle Payment contract's payment\n    /// purse.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(14, Error::PaymentPurseBalanceNotFound as u8);\n    /// ```\n    PaymentPurseBalanceNotFound = 14,\n    /// Internal error: the Handle Payment contract's bonding purse wasn't found.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(15, Error::BondingPurseNotFound as u8);\n    /// ```\n    BondingPurseNotFound = 15,\n    /// Internal error: the Handle Payment contract's bonding purse key was the wrong type.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(16, Error::BondingPurseKeyUnexpectedType as u8);\n    /// ```\n    BondingPurseKeyUnexpectedType = 16,\n    /// Internal error: the Handle Payment contract's refund purse key was the wrong type.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(17, Error::RefundPurseKeyUnexpectedType as u8);\n    /// ```\n    RefundPurseKeyUnexpectedType = 17,\n    /// Internal error: the Handle Payment contract's rewards purse wasn't found.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(18, Error::RewardsPurseNotFound as u8);\n    /// ```\n    RewardsPurseNotFound = 18,\n    /// Internal error: the Handle Payment contract's rewards purse key was the wrong type.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(19, Error::RewardsPurseKeyUnexpectedType as u8);\n    /// ```\n    RewardsPurseKeyUnexpectedType = 19,\n    /// Internal error: failed to deserialize the stake's key.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(20, Error::StakesKeyDeserializationFailed as u8);\n    /// ```\n    StakesKeyDeserializationFailed = 20,\n    /// Internal error: failed to deserialize the stake's balance.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(21, Error::StakesDeserializationFailed as u8);\n    /// ```\n    StakesDeserializationFailed = 21,\n    /// Raised when caller is not the system account.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(22, Error::InvalidCaller as u8);\n    /// ```\n    InvalidCaller = 22,\n    /// Internal error: while finalizing payment, the amount spent exceeded the amount available.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(23, Error::InsufficientPaymentForAmountSpent as u8);\n    /// ```\n    InsufficientPaymentForAmountSpent = 23,\n    /// Internal error: while finalizing payment, failed to pay the validators (the transfer from\n    /// the Handle Payment contract's payment purse to rewards purse failed).\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(24, Error::FailedTransferToRewardsPurse as u8);\n    /// ```\n    FailedTransferToRewardsPurse = 24,\n    /// Internal error: while finalizing payment, failed to refund the caller's purse (the transfer\n    /// from the Handle Payment contract's payment purse to refund purse or account's main purse\n    /// failed).\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(25, Error::FailedTransferToAccountPurse as u8);\n    /// ```\n    FailedTransferToAccountPurse = 25,\n    /// Handle Payment contract's \"set_refund_purse\" method can only be called by the payment code\n    /// of a deploy, but was called by the session code.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(26, Error::SetRefundPurseCalledOutsidePayment as u8);\n    /// ```\n    SetRefundPurseCalledOutsidePayment = 26,\n    /// Raised when the system is unable to determine purse balance.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(27, Error::GetBalance as u8);\n    /// ```\n    GetBalance = 27,\n    /// Raised when the system is unable to put named key.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(28, Error::PutKey as u8);\n    /// ```\n    PutKey = 28,\n    /// Raised when the system is unable to remove given named key.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(29, Error::RemoveKey as u8);\n    /// ```\n    RemoveKey = 29,\n    /// Failed to transfer funds.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(30, Error::Transfer as u8);\n    /// ```\n    Transfer = 30,\n    /// An arithmetic overflow occurred\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(31, Error::ArithmeticOverflow as u8);\n    /// ```\n    ArithmeticOverflow = 31,\n    // NOTE: These variants below will be removed once support for WASM system contracts will be\n    // dropped.\n    #[doc(hidden)]\n    GasLimit = 32,\n    /// Refund purse is a payment purse.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(33, Error::RefundPurseIsPaymentPurse as u8);\n    /// ```\n    RefundPurseIsPaymentPurse = 33,\n    /// Error raised while reducing total supply on the mint system contract.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(34, Error::ReduceTotalSupply as u8);\n    /// ```\n    ReduceTotalSupply = 34,\n    /// Error writing to a storage.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(35, Error::Storage as u8);\n    /// ```\n    Storage = 35,\n    /// Internal error: the Handle Payment contract's accumulation purse wasn't found.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(36, Error::AccumulationPurseNotFound as u8);\n    /// ```\n    AccumulationPurseNotFound = 36,\n    /// Internal error: the Handle Payment contract's accumulation purse key was the wrong type.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(37, Error::AccumulationPurseKeyUnexpectedType as u8);\n    /// ```\n    AccumulationPurseKeyUnexpectedType = 37,\n    /// Internal error: invalid fee and / or refund settings encountered during payment processing.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(38, Error::IncompatiblePaymentSettings as u8);\n    /// ```\n    IncompatiblePaymentSettings = 38,\n    /// Unexpected key variant.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(39, Error::UnexpectedKeyVariant as u8);\n    /// ```\n    UnexpectedKeyVariant = 39,\n    /// Attempt to persist payment purse.\n    /// ```\n    /// # use casper_types::system::handle_payment::Error;\n    /// assert_eq!(40, Error::AttemptToPersistPaymentPurse as u8);\n    /// ```\n    AttemptToPersistPaymentPurse = 40,\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::NotBonded => formatter.write_str(\"Not bonded\"),\n            Error::TooManyEventsInQueue => formatter.write_str(\"Too many events in queue\"),\n            Error::CannotUnbondLastValidator => formatter.write_str(\"Cannot unbond last validator\"),\n            Error::SpreadTooHigh => formatter.write_str(\"Spread is too high\"),\n            Error::MultipleRequests => formatter.write_str(\"Multiple requests\"),\n            Error::BondTooSmall => formatter.write_str(\"Bond is too small\"),\n            Error::BondTooLarge => formatter.write_str(\"Bond is too large\"),\n            Error::UnbondTooLarge => formatter.write_str(\"Unbond is too large\"),\n            Error::BondTransferFailed => formatter.write_str(\"Bond transfer failed\"),\n            Error::UnbondTransferFailed => formatter.write_str(\"Unbond transfer failed\"),\n            Error::TimeWentBackwards => formatter.write_str(\"Time went backwards\"),\n            Error::StakesNotFound => formatter.write_str(\"Stakes not found\"),\n            Error::PaymentPurseNotFound => formatter.write_str(\"Payment purse not found\"),\n            Error::PaymentPurseKeyUnexpectedType => {\n                formatter.write_str(\"Payment purse has unexpected type\")\n            }\n            Error::PaymentPurseBalanceNotFound => {\n                formatter.write_str(\"Payment purse balance not found\")\n            }\n            Error::BondingPurseNotFound => formatter.write_str(\"Bonding purse not found\"),\n            Error::BondingPurseKeyUnexpectedType => {\n                formatter.write_str(\"Bonding purse key has unexpected type\")\n            }\n            Error::RefundPurseKeyUnexpectedType => {\n                formatter.write_str(\"Refund purse key has unexpected type\")\n            }\n            Error::RewardsPurseNotFound => formatter.write_str(\"Rewards purse not found\"),\n            Error::RewardsPurseKeyUnexpectedType => {\n                formatter.write_str(\"Rewards purse has unexpected type\")\n            }\n            Error::StakesKeyDeserializationFailed => {\n                formatter.write_str(\"Failed to deserialize stake's key\")\n            }\n            Error::StakesDeserializationFailed => {\n                formatter.write_str(\"Failed to deserialize stake's balance\")\n            }\n            Error::InvalidCaller => {\n                formatter.write_str(\"System function was called by user account\")\n            }\n            Error::InsufficientPaymentForAmountSpent => {\n                formatter.write_str(\"Insufficient payment for amount spent\")\n            }\n            Error::FailedTransferToRewardsPurse => {\n                formatter.write_str(\"Transfer to rewards purse has failed\")\n            }\n            Error::FailedTransferToAccountPurse => {\n                formatter.write_str(\"Transfer to account's purse failed\")\n            }\n            Error::SetRefundPurseCalledOutsidePayment => {\n                formatter.write_str(\"Set refund purse was called outside payment\")\n            }\n            Error::GetBalance => formatter.write_str(\"Unable to get purse balance\"),\n            Error::PutKey => formatter.write_str(\"Unable to put named key\"),\n            Error::RemoveKey => formatter.write_str(\"Unable to remove named key\"),\n            Error::Transfer => formatter.write_str(\"Failed to transfer funds\"),\n            Error::ArithmeticOverflow => formatter.write_str(\"Arithmetic overflow\"),\n            Error::GasLimit => formatter.write_str(\"GasLimit\"),\n            Error::RefundPurseIsPaymentPurse => {\n                formatter.write_str(\"Refund purse is a payment purse.\")\n            }\n            Error::ReduceTotalSupply => formatter.write_str(\"Failed to reduce total supply.\"),\n            Error::Storage => formatter.write_str(\"Failed to write to storage.\"),\n            Error::AccumulationPurseNotFound => formatter.write_str(\"Accumulation purse not found\"),\n            Error::AccumulationPurseKeyUnexpectedType => {\n                formatter.write_str(\"Accumulation purse has unexpected type\")\n            }\n            Error::IncompatiblePaymentSettings => {\n                formatter.write_str(\"Incompatible payment settings\")\n            }\n            Error::UnexpectedKeyVariant => formatter.write_str(\"Unexpected key variant\"),\n            Error::AttemptToPersistPaymentPurse => {\n                formatter.write_str(\"Attempt to persist payment purse\")\n            }\n        }\n    }\n}\n\nimpl TryFrom<u8> for Error {\n    type Error = ();\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        let error = match value {\n            v if v == Error::NotBonded as u8 => Error::NotBonded,\n            v if v == Error::TooManyEventsInQueue as u8 => Error::TooManyEventsInQueue,\n            v if v == Error::CannotUnbondLastValidator as u8 => Error::CannotUnbondLastValidator,\n            v if v == Error::SpreadTooHigh as u8 => Error::SpreadTooHigh,\n            v if v == Error::MultipleRequests as u8 => Error::MultipleRequests,\n            v if v == Error::BondTooSmall as u8 => Error::BondTooSmall,\n            v if v == Error::BondTooLarge as u8 => Error::BondTooLarge,\n            v if v == Error::UnbondTooLarge as u8 => Error::UnbondTooLarge,\n            v if v == Error::BondTransferFailed as u8 => Error::BondTransferFailed,\n            v if v == Error::UnbondTransferFailed as u8 => Error::UnbondTransferFailed,\n            v if v == Error::TimeWentBackwards as u8 => Error::TimeWentBackwards,\n            v if v == Error::StakesNotFound as u8 => Error::StakesNotFound,\n            v if v == Error::PaymentPurseNotFound as u8 => Error::PaymentPurseNotFound,\n            v if v == Error::PaymentPurseKeyUnexpectedType as u8 => {\n                Error::PaymentPurseKeyUnexpectedType\n            }\n            v if v == Error::PaymentPurseBalanceNotFound as u8 => {\n                Error::PaymentPurseBalanceNotFound\n            }\n            v if v == Error::BondingPurseNotFound as u8 => Error::BondingPurseNotFound,\n            v if v == Error::BondingPurseKeyUnexpectedType as u8 => {\n                Error::BondingPurseKeyUnexpectedType\n            }\n            v if v == Error::RefundPurseKeyUnexpectedType as u8 => {\n                Error::RefundPurseKeyUnexpectedType\n            }\n            v if v == Error::RewardsPurseNotFound as u8 => Error::RewardsPurseNotFound,\n            v if v == Error::RewardsPurseKeyUnexpectedType as u8 => {\n                Error::RewardsPurseKeyUnexpectedType\n            }\n            v if v == Error::StakesKeyDeserializationFailed as u8 => {\n                Error::StakesKeyDeserializationFailed\n            }\n            v if v == Error::StakesDeserializationFailed as u8 => {\n                Error::StakesDeserializationFailed\n            }\n            v if v == Error::InvalidCaller as u8 => Error::InvalidCaller,\n            v if v == Error::InsufficientPaymentForAmountSpent as u8 => {\n                Error::InsufficientPaymentForAmountSpent\n            }\n            v if v == Error::FailedTransferToRewardsPurse as u8 => {\n                Error::FailedTransferToRewardsPurse\n            }\n            v if v == Error::FailedTransferToAccountPurse as u8 => {\n                Error::FailedTransferToAccountPurse\n            }\n            v if v == Error::SetRefundPurseCalledOutsidePayment as u8 => {\n                Error::SetRefundPurseCalledOutsidePayment\n            }\n\n            v if v == Error::GetBalance as u8 => Error::GetBalance,\n            v if v == Error::PutKey as u8 => Error::PutKey,\n            v if v == Error::RemoveKey as u8 => Error::RemoveKey,\n            v if v == Error::Transfer as u8 => Error::Transfer,\n            v if v == Error::ArithmeticOverflow as u8 => Error::ArithmeticOverflow,\n            v if v == Error::GasLimit as u8 => Error::GasLimit,\n            v if v == Error::RefundPurseIsPaymentPurse as u8 => Error::RefundPurseIsPaymentPurse,\n            v if v == Error::ReduceTotalSupply as u8 => Error::ReduceTotalSupply,\n            v if v == Error::Storage as u8 => Error::Storage,\n            v if v == Error::AccumulationPurseNotFound as u8 => Error::AccumulationPurseNotFound,\n            v if v == Error::AccumulationPurseKeyUnexpectedType as u8 => {\n                Error::AccumulationPurseKeyUnexpectedType\n            }\n            v if v == Error::IncompatiblePaymentSettings as u8 => {\n                Error::IncompatiblePaymentSettings\n            }\n            v if v == Error::UnexpectedKeyVariant as u8 => Error::UnexpectedKeyVariant,\n            v if v == Error::AttemptToPersistPaymentPurse as u8 => {\n                Error::AttemptToPersistPaymentPurse\n            }\n            _ => return Err(()),\n        };\n        Ok(error)\n    }\n}\n\nimpl CLTyped for Error {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n\nimpl ToBytes for Error {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let value = *self as u8;\n        value.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n}\n"
  },
  {
    "path": "types/src/system/handle_payment.rs",
    "content": "//! Contains implementation of a Handle Payment contract functionality.\nmod constants;\nmod entry_points;\nmod error;\n\npub use constants::*;\npub use entry_points::handle_payment_entry_points;\npub use error::Error;\n"
  },
  {
    "path": "types/src/system/mint/balance_hold.rs",
    "content": "use alloc::{\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\n\nuse core::{\n    convert::TryFrom,\n    fmt::{Debug, Display, Formatter},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::distributions::{Distribution, Standard};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    checksummed_hex,\n    key::FromStrError,\n    BlockTime, Key, Timestamp, URefAddr, BLOCKTIME_SERIALIZED_LENGTH, UREF_ADDR_LENGTH,\n};\n\nconst GAS_TAG: u8 = 0;\nconst PROCESSING_TAG: u8 = 1;\n\n/// Serialization tag for BalanceHold variants.\n#[derive(\n    Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize,\n)]\n#[repr(u8)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BalanceHoldAddrTag {\n    #[default]\n    /// Tag for gas variant.\n    Gas = GAS_TAG,\n    /// Tag for processing variant.\n    Processing = PROCESSING_TAG,\n}\n\nimpl BalanceHoldAddrTag {\n    /// The length in bytes of a [`BalanceHoldAddrTag`].\n    pub const BALANCE_HOLD_ADDR_TAG_LENGTH: usize = 1;\n\n    /// Attempts to map `BalanceHoldAddrTag` from a u8.\n    pub fn try_from_u8(value: u8) -> Option<Self> {\n        // TryFrom requires std, so doing this instead.\n        if value == GAS_TAG {\n            return Some(BalanceHoldAddrTag::Gas);\n        }\n        if value == PROCESSING_TAG {\n            return Some(BalanceHoldAddrTag::Processing);\n        }\n        None\n    }\n}\n\nimpl Display for BalanceHoldAddrTag {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        let tag = match self {\n            BalanceHoldAddrTag::Gas => GAS_TAG,\n            BalanceHoldAddrTag::Processing => PROCESSING_TAG,\n        };\n        write!(f, \"{}\", base16::encode_lower(&[tag]))\n    }\n}\n\nimpl ToBytes for BalanceHoldAddrTag {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        Self::BALANCE_HOLD_ADDR_TAG_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        writer.push(*self as u8);\n        Ok(())\n    }\n}\n\nimpl FromBytes for BalanceHoldAddrTag {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        if let Some((byte, rem)) = bytes.split_first() {\n            let tag = BalanceHoldAddrTag::try_from_u8(*byte).ok_or(bytesrepr::Error::Formatting)?;\n            Ok((tag, rem))\n        } else {\n            Err(bytesrepr::Error::Formatting)\n        }\n    }\n}\n\n/// Balance hold address.\n#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum BalanceHoldAddr {\n    /// Gas hold variant.\n    Gas {\n        /// The address of the purse this hold is on.\n        purse_addr: URefAddr,\n        /// The block time this hold was placed.\n        block_time: BlockTime,\n    },\n    /// Processing variant\n    Processing {\n        /// The address of the purse this hold is on.\n        purse_addr: URefAddr,\n        /// The block time this hold was placed.\n        block_time: BlockTime,\n    },\n}\n\nimpl BalanceHoldAddr {\n    /// The length in bytes of a [`BalanceHoldAddr`] for a gas hold address.\n    pub const GAS_HOLD_ADDR_LENGTH: usize = UREF_ADDR_LENGTH\n        + BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH\n        + BLOCKTIME_SERIALIZED_LENGTH;\n\n    /// Creates a Gas variant instance of [`BalanceHoldAddr`].\n    pub const fn new_gas(purse_addr: URefAddr, block_time: BlockTime) -> BalanceHoldAddr {\n        BalanceHoldAddr::Gas {\n            purse_addr,\n            block_time,\n        }\n    }\n\n    /// Creates a Processing variant instance of [`BalanceHoldAddr`].\n    pub const fn new_processing(purse_addr: URefAddr, block_time: BlockTime) -> BalanceHoldAddr {\n        BalanceHoldAddr::Processing {\n            purse_addr,\n            block_time,\n        }\n    }\n\n    /// How long is be the serialized value for this instance.\n    pub fn serialized_length(&self) -> usize {\n        match self {\n            BalanceHoldAddr::Gas {\n                purse_addr,\n                block_time,\n            } => {\n                BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH\n                    + ToBytes::serialized_length(purse_addr)\n                    + ToBytes::serialized_length(block_time)\n            }\n            BalanceHoldAddr::Processing {\n                purse_addr,\n                block_time,\n            } => {\n                BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH\n                    + ToBytes::serialized_length(purse_addr)\n                    + ToBytes::serialized_length(block_time)\n            }\n        }\n    }\n\n    /// Returns the tag of this instance.\n    pub fn tag(&self) -> BalanceHoldAddrTag {\n        match self {\n            BalanceHoldAddr::Gas { .. } => BalanceHoldAddrTag::Gas,\n            BalanceHoldAddr::Processing { .. } => BalanceHoldAddrTag::Processing,\n        }\n    }\n\n    /// Returns the `[URefAddr]` for the purse associated with this hold.\n    pub fn purse_addr(&self) -> URefAddr {\n        match self {\n            BalanceHoldAddr::Gas { purse_addr, .. } => *purse_addr,\n            BalanceHoldAddr::Processing { purse_addr, .. } => *purse_addr,\n        }\n    }\n\n    /// Returns the `[BlockTime]` when this hold was written.\n    pub fn block_time(&self) -> BlockTime {\n        match self {\n            BalanceHoldAddr::Gas { block_time, .. } => *block_time,\n            BalanceHoldAddr::Processing { block_time, .. } => *block_time,\n        }\n    }\n\n    /// To formatted string.\n    pub fn to_formatted_string(&self) -> String {\n        match self {\n            BalanceHoldAddr::Gas {\n                purse_addr,\n                block_time,\n            } => {\n                format!(\n                    \"{}{}{}\",\n                    // also, put the tag in readable form\n                    base16::encode_lower(&GAS_TAG.to_le_bytes()),\n                    base16::encode_lower(purse_addr),\n                    base16::encode_lower(&block_time.value().to_le_bytes())\n                )\n            }\n            BalanceHoldAddr::Processing {\n                purse_addr,\n                block_time,\n            } => {\n                format!(\n                    \"{}{}{}\",\n                    // also, put the tag in readable form\n                    base16::encode_lower(&PROCESSING_TAG.to_le_bytes()),\n                    base16::encode_lower(purse_addr),\n                    base16::encode_lower(&block_time.value().to_le_bytes())\n                )\n            }\n        }\n    }\n\n    /// From formatted string.\n    pub fn from_formatted_string(hex: &str) -> Result<Self, FromStrError> {\n        let bytes = checksummed_hex::decode(hex)\n            .map_err(|error| FromStrError::BalanceHold(error.to_string()))?;\n        if bytes.is_empty() {\n            return Err(FromStrError::BalanceHold(\n                \"bytes should not be 0 len\".to_string(),\n            ));\n        }\n        let tag_bytes = <[u8; BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH]>::try_from(\n            bytes[0..BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH].as_ref(),\n        )\n        .map_err(|err| FromStrError::BalanceHold(err.to_string()))?;\n        let tag = <u8>::from_le_bytes(tag_bytes);\n        let tag = BalanceHoldAddrTag::try_from_u8(tag).ok_or_else(|| {\n            FromStrError::BalanceHold(\"failed to parse balance hold addr tag\".to_string())\n        })?;\n\n        let uref_addr = URefAddr::try_from(bytes[1..=UREF_ADDR_LENGTH].as_ref())\n            .map_err(|err| FromStrError::BalanceHold(err.to_string()))?;\n\n        // if more tags are added, extend the below logic to handle every case.\n        // it is possible that it will turn out that all further tags include blocktime\n        // in which case it can be pulled up out of the tag guard condition.\n        // however, im erring on the side of future tolerance and guarding it for now.\n        match tag {\n            BalanceHoldAddrTag::Gas => {\n                let block_time_bytes =\n                    <[u8; BLOCKTIME_SERIALIZED_LENGTH]>::try_from(bytes[33..].as_ref())\n                        .map_err(|err| FromStrError::BalanceHold(err.to_string()))?;\n\n                let block_time_millis = <u64>::from_le_bytes(block_time_bytes);\n                let block_time = BlockTime::new(block_time_millis);\n                Ok(BalanceHoldAddr::new_gas(uref_addr, block_time))\n            }\n            BalanceHoldAddrTag::Processing => {\n                let block_time_bytes =\n                    <[u8; BLOCKTIME_SERIALIZED_LENGTH]>::try_from(bytes[33..].as_ref())\n                        .map_err(|err| FromStrError::BalanceHold(err.to_string()))?;\n\n                let block_time_millis = <u64>::from_le_bytes(block_time_bytes);\n                let block_time = BlockTime::new(block_time_millis);\n                Ok(BalanceHoldAddr::new_processing(uref_addr, block_time))\n            }\n        }\n    }\n}\n\nimpl ToBytes for BalanceHoldAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        buffer.push(self.tag() as u8);\n        match self {\n            BalanceHoldAddr::Gas {\n                purse_addr,\n                block_time,\n            }\n            | BalanceHoldAddr::Processing {\n                purse_addr,\n                block_time,\n            } => {\n                buffer.append(&mut purse_addr.to_bytes()?);\n                buffer.append(&mut block_time.to_bytes()?)\n            }\n        }\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.serialized_length()\n    }\n}\n\nimpl FromBytes for BalanceHoldAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n        match tag {\n            tag if tag == BalanceHoldAddrTag::Gas as u8 => {\n                let (purse_addr, rem) = URefAddr::from_bytes(remainder)?;\n                let (block_time, rem) = BlockTime::from_bytes(rem)?;\n                Ok((\n                    BalanceHoldAddr::Gas {\n                        purse_addr,\n                        block_time,\n                    },\n                    rem,\n                ))\n            }\n            tag if tag == BalanceHoldAddrTag::Processing as u8 => {\n                let (purse_addr, rem) = URefAddr::from_bytes(remainder)?;\n                let (block_time, rem) = BlockTime::from_bytes(rem)?;\n                Ok((\n                    BalanceHoldAddr::Processing {\n                        purse_addr,\n                        block_time,\n                    },\n                    rem,\n                ))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Default for BalanceHoldAddr {\n    fn default() -> Self {\n        BalanceHoldAddr::Gas {\n            purse_addr: URefAddr::default(),\n            block_time: BlockTime::default(),\n        }\n    }\n}\n\nimpl From<BalanceHoldAddr> for Key {\n    fn from(balance_hold_addr: BalanceHoldAddr) -> Self {\n        Key::BalanceHold(balance_hold_addr)\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<Key> for BalanceHoldAddr {\n    type Error = ();\n\n    fn try_from(value: Key) -> Result<Self, Self::Error> {\n        if let Key::BalanceHold(balance_hold_addr) = value {\n            Ok(balance_hold_addr)\n        } else {\n            Err(())\n        }\n    }\n}\n\nimpl Display for BalanceHoldAddr {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        let tag = self.tag();\n        match self {\n            BalanceHoldAddr::Gas {\n                purse_addr,\n                block_time,\n            }\n            | BalanceHoldAddr::Processing {\n                purse_addr,\n                block_time,\n            } => {\n                write!(\n                    f,\n                    \"{}-{}-{}\",\n                    tag,\n                    base16::encode_lower(&purse_addr),\n                    Timestamp::from(block_time.value())\n                )\n            }\n        }\n    }\n}\n\nimpl Debug for BalanceHoldAddr {\n    fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {\n        match self {\n            BalanceHoldAddr::Gas {\n                purse_addr,\n                block_time,\n            } => write!(\n                f,\n                \"BidAddr::Gas({}, {})\",\n                base16::encode_lower(&purse_addr),\n                Timestamp::from(block_time.value())\n            ),\n            BalanceHoldAddr::Processing {\n                purse_addr,\n                block_time,\n            } => write!(\n                f,\n                \"BidAddr::Processing({}, {})\",\n                base16::encode_lower(&purse_addr),\n                Timestamp::from(block_time.value())\n            ),\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<BalanceHoldAddr> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BalanceHoldAddr {\n        BalanceHoldAddr::new_gas(rng.gen(), BlockTime::new(rng.gen()))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, system::mint::BalanceHoldAddr, BlockTime, Timestamp};\n\n    #[test]\n    fn serialization_roundtrip() {\n        let addr = BalanceHoldAddr::new_gas([1; 32], BlockTime::new(Timestamp::now().millis()));\n        bytesrepr::test_serialization_roundtrip(&addr);\n        let addr =\n            BalanceHoldAddr::new_processing([1; 32], BlockTime::new(Timestamp::now().millis()));\n        bytesrepr::test_serialization_roundtrip(&addr);\n    }\n}\n\n#[cfg(test)]\nmod prop_test_gas {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens};\n\n    proptest! {\n        #[test]\n        fn test_variant_gas(addr in gens::balance_hold_addr_arb()) {\n            bytesrepr::test_serialization_roundtrip(&addr);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/mint/constants.rs",
    "content": "/// Named constant for `purse`.\npub const ARG_PURSE: &str = \"purse\";\n/// Named constant for `amount`.\npub const ARG_AMOUNT: &str = \"amount\";\n/// Named constant for `id`.\npub const ARG_ID: &str = \"id\";\n/// Named constant for `to`.\npub const ARG_TO: &str = \"to\";\n/// Named constant for `source`.\npub const ARG_SOURCE: &str = \"source\";\n/// Named constant for `target`.\npub const ARG_TARGET: &str = \"target\";\n/// Named constant for `round_seigniorage_rate` used in installer.\npub const ARG_ROUND_SEIGNIORAGE_RATE: &str = \"round_seigniorage_rate\";\n\n/// Named constant for method `mint`.\npub const METHOD_MINT: &str = \"mint\";\n/// Named constant for method `reduce_total_supply`.\npub const METHOD_REDUCE_TOTAL_SUPPLY: &str = \"reduce_total_supply\";\n/// Named constant for method `burn`.\npub const METHOD_BURN: &str = \"burn\";\n/// Named constant for (synthetic) method `create`\npub const METHOD_CREATE: &str = \"create\";\n/// Named constant for method `balance`.\npub const METHOD_BALANCE: &str = \"balance\";\n/// Named constant for method `transfer`.\npub const METHOD_TRANSFER: &str = \"transfer\";\n/// Named constant for method `read_base_round_reward`.\npub const METHOD_READ_BASE_ROUND_REWARD: &str = \"read_base_round_reward\";\n/// Named constant for method `mint_into_existing_purse`.\npub const METHOD_MINT_INTO_EXISTING_PURSE: &str = \"mint_into_existing_purse\";\n\n/// Storage for mint contract hash.\npub const HASH_KEY: &str = \"mint_hash\";\n/// Storage for mint access key.\npub const ACCESS_KEY: &str = \"mint_access\";\n/// Storage for base round reward key.\npub const BASE_ROUND_REWARD_KEY: &str = \"mint_base_round_reward\";\n/// Storage for mint total supply key.\npub const TOTAL_SUPPLY_KEY: &str = \"total_supply\";\n/// Storage for mint round seigniorage rate.\npub const ROUND_SEIGNIORAGE_RATE_KEY: &str = \"round_seigniorage_rate\";\n/// Storage for gas hold handling.\npub const MINT_GAS_HOLD_HANDLING_KEY: &str = \"gas_hold_handling\";\n/// Storage for gas hold interval.\npub const MINT_GAS_HOLD_INTERVAL_KEY: &str = \"gas_hold_interval\";\n/// Named key for sustain purse\npub const MINT_SUSTAIN_PURSE_KEY: &str = \"sustain_purse\";\n"
  },
  {
    "path": "types/src/system/mint/entry_points.rs",
    "content": "use alloc::boxed::Box;\n\nuse crate::{\n    addressable_entity::Parameters,\n    system::mint::{\n        ARG_AMOUNT, ARG_ID, ARG_PURSE, ARG_SOURCE, ARG_TARGET, ARG_TO, METHOD_BALANCE, METHOD_BURN,\n        METHOD_CREATE, METHOD_MINT, METHOD_MINT_INTO_EXISTING_PURSE, METHOD_READ_BASE_ROUND_REWARD,\n        METHOD_REDUCE_TOTAL_SUPPLY, METHOD_TRANSFER,\n    },\n    CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints,\n    Parameter,\n};\n\n/// Returns entry points for a mint system contract.\npub fn mint_entry_points() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_MINT,\n        vec![Parameter::new(ARG_AMOUNT, CLType::U512)],\n        CLType::Result {\n            ok: Box::new(CLType::URef),\n            err: Box::new(CLType::U8),\n        },\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_REDUCE_TOTAL_SUPPLY,\n        vec![Parameter::new(ARG_AMOUNT, CLType::U512)],\n        CLType::Result {\n            ok: Box::new(CLType::Unit),\n            err: Box::new(CLType::U8),\n        },\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_BURN,\n        vec![\n            Parameter::new(ARG_PURSE, CLType::URef),\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n        ],\n        CLType::Result {\n            ok: Box::new(CLType::Unit),\n            err: Box::new(CLType::U8),\n        },\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_CREATE,\n        Parameters::new(),\n        CLType::URef,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_BALANCE,\n        vec![Parameter::new(ARG_PURSE, CLType::URef)],\n        CLType::Option(Box::new(CLType::U512)),\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_TRANSFER,\n        vec![\n            Parameter::new(ARG_TO, CLType::Option(Box::new(CLType::ByteArray(32)))),\n            Parameter::new(ARG_SOURCE, CLType::URef),\n            Parameter::new(ARG_TARGET, CLType::URef),\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n            Parameter::new(ARG_ID, CLType::Option(Box::new(CLType::U64))),\n        ],\n        CLType::Result {\n            ok: Box::new(CLType::Unit),\n            err: Box::new(CLType::U8),\n        },\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_READ_BASE_ROUND_REWARD,\n        Parameters::new(),\n        CLType::U512,\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_MINT_INTO_EXISTING_PURSE,\n        vec![\n            Parameter::new(ARG_AMOUNT, CLType::U512),\n            Parameter::new(ARG_PURSE, CLType::URef),\n        ],\n        CLType::Result {\n            ok: Box::new(CLType::Unit),\n            err: Box::new(CLType::U8),\n        },\n        EntryPointAccess::Public,\n        EntryPointType::Called,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    entry_points\n}\n"
  },
  {
    "path": "types/src/system/mint/error.rs",
    "content": "//! Home of the Mint contract's [`enum@Error`] type.\n\nuse alloc::vec::Vec;\nuse core::{\n    convert::{TryFrom, TryInto},\n    fmt::{self, Display, Formatter},\n};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    CLType, CLTyped,\n};\n\n/// Errors which can occur while executing the Mint contract.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\n#[repr(u8)]\n#[non_exhaustive]\npub enum Error {\n    /// Insufficient funds to complete the transfer.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(0, Error::InsufficientFunds as u8);\n    /// ```\n    InsufficientFunds = 0,\n    /// Source purse not found.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(1, Error::SourceNotFound as u8);\n    /// ```\n    SourceNotFound = 1,\n    /// Destination purse not found.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(2, Error::DestNotFound as u8);\n    /// ```\n    DestNotFound = 2,\n    /// The given [`URef`](crate::URef) does not reference the account holder's purse, or such a\n    /// `URef` does not have the required [`AccessRights`](crate::AccessRights).\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(3, Error::InvalidURef as u8);\n    /// ```\n    InvalidURef = 3,\n    /// The source purse is not writeable (see [`URef::is_writeable`](crate::URef::is_writeable)),\n    /// or the destination purse is not addable (see\n    /// [`URef::is_addable`](crate::URef::is_addable)).\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(4, Error::InvalidAccessRights as u8);\n    /// ```\n    InvalidAccessRights = 4,\n    /// Tried to create a new purse with a non-zero initial balance.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(5, Error::InvalidNonEmptyPurseCreation as u8);\n    /// ```\n    InvalidNonEmptyPurseCreation = 5,\n    /// Failed to read from local or global storage.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(6, Error::Storage as u8);\n    /// ```\n    Storage = 6,\n    /// Purse not found while trying to get balance.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(7, Error::PurseNotFound as u8);\n    /// ```\n    PurseNotFound = 7,\n    /// Unable to obtain a key by its name.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(8, Error::MissingKey as u8);\n    /// ```\n    MissingKey = 8,\n    /// Total supply not found.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(9, Error::TotalSupplyNotFound as u8);\n    /// ```\n    TotalSupplyNotFound = 9,\n    /// Failed to record transfer.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(10, Error::RecordTransferFailure as u8);\n    /// ```\n    RecordTransferFailure = 10,\n    /// Invalid attempt to reduce total supply.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(11, Error::InvalidTotalSupplyReductionAttempt as u8);\n    /// ```\n    InvalidTotalSupplyReductionAttempt = 11,\n    /// Failed to create new uref.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(12, Error::NewURef as u8);\n    /// ```\n    NewURef = 12,\n    /// Failed to put key.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(13, Error::PutKey as u8);\n    /// ```\n    PutKey = 13,\n    /// Failed to write to dictionary.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(14, Error::WriteDictionary as u8);\n    /// ```\n    WriteDictionary = 14,\n    /// Failed to create a [`crate::CLValue`].\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(15, Error::CLValue as u8);\n    /// ```\n    CLValue = 15,\n    /// Failed to serialize data.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(16, Error::Serialize as u8);\n    /// ```\n    Serialize = 16,\n    /// Source and target purse [`crate::URef`]s are equal.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(17, Error::EqualSourceAndTarget as u8);\n    /// ```\n    EqualSourceAndTarget = 17,\n    /// An arithmetic overflow has occurred.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(18, Error::ArithmeticOverflow as u8);\n    /// ```\n    ArithmeticOverflow = 18,\n\n    // NOTE: These variants below will be removed once support for WASM system contracts will be\n    // dropped.\n    #[doc(hidden)]\n    GasLimit = 19,\n\n    /// Raised when an entry point is called from invalid account context.\n    InvalidContext = 20,\n\n    /// Session code tried to transfer more CSPR than user approved.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(21, Error::UnapprovedSpendingAmount as u8);\n    UnapprovedSpendingAmount = 21,\n\n    /// Failed to transfer tokens on a private chain.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(22, Error::DisabledUnrestrictedTransfers as u8);\n    DisabledUnrestrictedTransfers = 22,\n\n    /// Attempt to access a record using forged permissions.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(23, Error::ForgedReference as u8);\n    ForgedReference = 23,\n    /// Available balance can never be greater than total balance.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(24, Error::InconsistentBalances as u8);\n    InconsistentBalances = 24,\n    /// Unable to get the system registry.\n    /// ```\n    /// # use casper_types::system::mint::Error;\n    /// assert_eq!(25, Error::UnableToGetSystemRegistry as u8);\n    UnableToGetSystemRegistry = 25,\n\n    #[cfg(test)]\n    #[doc(hidden)]\n    Sentinel,\n}\n\n/// Used for testing; this should be guaranteed to be the maximum valid value of [`Error`] enum.\n#[cfg(test)]\nconst MAX_ERROR_VALUE: u8 = Error::Sentinel as u8;\n\nimpl CLTyped for Error {\n    fn cl_type() -> CLType {\n        CLType::U8\n    }\n}\n\n// This error type is not intended to be used by third party crates.\n#[doc(hidden)]\npub struct TryFromU8ForError(());\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<u8> for Error {\n    type Error = TryFromU8ForError;\n\n    fn try_from(value: u8) -> Result<Self, Self::Error> {\n        match value {\n            d if d == Error::InsufficientFunds as u8 => Ok(Error::InsufficientFunds),\n            d if d == Error::SourceNotFound as u8 => Ok(Error::SourceNotFound),\n            d if d == Error::DestNotFound as u8 => Ok(Error::DestNotFound),\n            d if d == Error::InvalidURef as u8 => Ok(Error::InvalidURef),\n            d if d == Error::InvalidAccessRights as u8 => Ok(Error::InvalidAccessRights),\n            d if d == Error::InvalidNonEmptyPurseCreation as u8 => {\n                Ok(Error::InvalidNonEmptyPurseCreation)\n            }\n            d if d == Error::Storage as u8 => Ok(Error::Storage),\n            d if d == Error::PurseNotFound as u8 => Ok(Error::PurseNotFound),\n            d if d == Error::MissingKey as u8 => Ok(Error::MissingKey),\n            d if d == Error::TotalSupplyNotFound as u8 => Ok(Error::TotalSupplyNotFound),\n            d if d == Error::RecordTransferFailure as u8 => Ok(Error::RecordTransferFailure),\n            d if d == Error::InvalidTotalSupplyReductionAttempt as u8 => {\n                Ok(Error::InvalidTotalSupplyReductionAttempt)\n            }\n            d if d == Error::NewURef as u8 => Ok(Error::NewURef),\n            d if d == Error::PutKey as u8 => Ok(Error::PutKey),\n            d if d == Error::WriteDictionary as u8 => Ok(Error::WriteDictionary),\n            d if d == Error::CLValue as u8 => Ok(Error::CLValue),\n            d if d == Error::Serialize as u8 => Ok(Error::Serialize),\n            d if d == Error::EqualSourceAndTarget as u8 => Ok(Error::EqualSourceAndTarget),\n            d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow),\n            d if d == Error::GasLimit as u8 => Ok(Error::GasLimit),\n            d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext),\n            d if d == Error::UnapprovedSpendingAmount as u8 => Ok(Error::UnapprovedSpendingAmount),\n            d if d == Error::DisabledUnrestrictedTransfers as u8 => {\n                Ok(Error::DisabledUnrestrictedTransfers)\n            }\n            d if d == Error::ForgedReference as u8 => Ok(Error::ForgedReference),\n            d if d == Error::InconsistentBalances as u8 => Ok(Error::InconsistentBalances),\n            d if d == Error::UnableToGetSystemRegistry as u8 => {\n                Ok(Error::UnableToGetSystemRegistry)\n            }\n            _ => Err(TryFromU8ForError(())),\n        }\n    }\n}\n\nimpl ToBytes for Error {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let value = *self as u8;\n        value.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for Error {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (value, rem): (u8, _) = FromBytes::from_bytes(bytes)?;\n        let error: Error = value\n            .try_into()\n            // In case an Error variant is unable to be determined it would return an\n            // Error::Formatting as if its unable to be correctly deserialized.\n            .map_err(|_| bytesrepr::Error::Formatting)?;\n        Ok((error, rem))\n    }\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::InsufficientFunds => formatter.write_str(\"Insufficient funds\"),\n            Error::SourceNotFound => formatter.write_str(\"Source not found\"),\n            Error::DestNotFound => formatter.write_str(\"Destination not found\"),\n            Error::InvalidURef => formatter.write_str(\"Invalid URef\"),\n            Error::InvalidAccessRights => formatter.write_str(\"Invalid AccessRights\"),\n            Error::InvalidNonEmptyPurseCreation => {\n                formatter.write_str(\"Invalid non-empty purse creation\")\n            }\n            Error::Storage => formatter.write_str(\"Storage error\"),\n            Error::PurseNotFound => formatter.write_str(\"Purse not found\"),\n            Error::MissingKey => formatter.write_str(\"Missing key\"),\n            Error::TotalSupplyNotFound => formatter.write_str(\"Total supply not found\"),\n            Error::RecordTransferFailure => formatter.write_str(\"Failed to record transfer\"),\n            Error::InvalidTotalSupplyReductionAttempt => {\n                formatter.write_str(\"Invalid attempt to reduce total supply\")\n            }\n            Error::NewURef => formatter.write_str(\"Failed to create new uref\"),\n            Error::PutKey => formatter.write_str(\"Failed to put key\"),\n            Error::WriteDictionary => formatter.write_str(\"Failed to write dictionary\"),\n            Error::CLValue => formatter.write_str(\"Failed to create a CLValue\"),\n            Error::Serialize => formatter.write_str(\"Failed to serialize data\"),\n            Error::EqualSourceAndTarget => formatter.write_str(\"Invalid target purse\"),\n            Error::ArithmeticOverflow => formatter.write_str(\"Arithmetic overflow has occurred\"),\n            Error::GasLimit => formatter.write_str(\"GasLimit\"),\n            Error::InvalidContext => formatter.write_str(\"Invalid context\"),\n            Error::UnapprovedSpendingAmount => formatter.write_str(\"Unapproved spending amount\"),\n            Error::DisabledUnrestrictedTransfers => {\n                formatter.write_str(\"Disabled unrestricted transfers\")\n            }\n            Error::ForgedReference => formatter.write_str(\"Forged reference\"),\n            Error::InconsistentBalances => {\n                formatter.write_str(\"Available balance can never be greater than total balance\")\n            }\n            Error::UnableToGetSystemRegistry => {\n                formatter.write_str(\"Unable to get the system registry\")\n            }\n            #[cfg(test)]\n            Error::Sentinel => formatter.write_str(\"Sentinel error\"),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::{Error, TryFromU8ForError, MAX_ERROR_VALUE};\n\n    #[test]\n    fn error_round_trips() {\n        for i in 0..=u8::MAX {\n            match Error::try_from(i) {\n                Ok(error) if i < MAX_ERROR_VALUE => assert_eq!(error as u8, i),\n                Ok(error) => panic!(\n                    \"value of variant {:?} ({}) exceeds MAX_ERROR_VALUE ({})\",\n                    error, i, MAX_ERROR_VALUE\n                ),\n                Err(TryFromU8ForError(())) if i >= MAX_ERROR_VALUE => (),\n                Err(TryFromU8ForError(())) => {\n                    panic!(\"missing conversion from u8 to error value: {}\", i)\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/system/mint.rs",
    "content": "//! Contains implementation of a Mint contract functionality.\nmod balance_hold;\nmod constants;\nmod entry_points;\nmod error;\n\npub use balance_hold::{BalanceHoldAddr, BalanceHoldAddrTag};\npub use constants::*;\npub use entry_points::mint_entry_points;\npub use error::Error;\n"
  },
  {
    "path": "types/src/system/prepayment/prepayment_kind.rs",
    "content": "use crate::{\n    bytesrepr,\n    bytesrepr::{Bytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Digest,\n};\nuse alloc::vec::Vec;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// Container for bytes recording location, type and data for a gas pre payment\n#[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct PrepaymentKind {\n    receipt: Digest,\n    prepayment_kind: u8,\n    prepayment_data: Bytes,\n}\n\nimpl ToBytes for PrepaymentKind {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.receipt.serialized_length()\n            + U8_SERIALIZED_LENGTH\n            + self.prepayment_data.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.receipt.write_bytes(writer)?;\n        self.prepayment_kind.write_bytes(writer)?;\n        self.prepayment_data.write_bytes(writer)?;\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "types/src/system/prepayment.rs",
    "content": "//! Contains implementation of the gas prepayment system\nmod prepayment_kind;\n\npub use prepayment_kind::PrepaymentKind;\n"
  },
  {
    "path": "types/src/system/standard_payment/constants.rs",
    "content": "/// Named constant for `amount`.\npub const ARG_AMOUNT: &str = \"amount\";\n\n/// Named constant for method `pay`.\npub const METHOD_PAY: &str = \"pay\";\n\n/// Storage for standard payment contract hash.\npub const HASH_KEY: &str = \"standard_payment_hash\";\n/// Storage for standard payment access key.\npub const ACCESS_KEY: &str = \"standard_payment_access\";\n"
  },
  {
    "path": "types/src/system/standard_payment/entry_points.rs",
    "content": "use alloc::{boxed::Box, string::ToString};\n\nuse crate::{\n    system::standard_payment::{ARG_AMOUNT, METHOD_PAY},\n    CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints,\n    Parameter,\n};\n\n/// Creates standard payment contract entry points.\npub fn standard_payment_entry_points() -> EntryPoints {\n    let mut entry_points = EntryPoints::new();\n\n    let entry_point = EntityEntryPoint::new(\n        METHOD_PAY.to_string(),\n        vec![Parameter::new(ARG_AMOUNT, CLType::U512)],\n        CLType::Result {\n            ok: Box::new(CLType::Unit),\n            err: Box::new(CLType::U32),\n        },\n        EntryPointAccess::Public,\n        EntryPointType::Caller,\n        EntryPointPayment::Caller,\n    );\n    entry_points.add_entry_point(entry_point);\n\n    entry_points\n}\n"
  },
  {
    "path": "types/src/system/standard_payment.rs",
    "content": "//! Contains implementation of a standard payment contract implementation.\nmod constants;\nmod entry_points;\n\npub use constants::*;\npub use entry_points::standard_payment_entry_points;\n"
  },
  {
    "path": "types/src/system/system_contract_type.rs",
    "content": "//! Home of system contract type enum.\n\nuse alloc::{\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Display, Formatter},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    ApiError, EntryPoints,\n};\n\nconst MINT_TAG: u8 = 0;\nconst HANDLE_PAYMENT_TAG: u8 = 1;\nconst STANDARD_PAYMENT_TAG: u8 = 2;\nconst AUCTION_TAG: u8 = 3;\n\nuse super::{\n    auction::auction_entry_points, handle_payment::handle_payment_entry_points,\n    mint::mint_entry_points, standard_payment::standard_payment_entry_points,\n};\n\n/// System contract types.\n///\n/// Used by converting to a `u32` and passing as the `system_contract_index` argument of\n/// `ext_ffi::casper_get_system_contract()`.\n#[derive(\n    Debug, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Hash, Serialize, Deserialize, Copy,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum SystemEntityType {\n    /// Mint contract.\n    #[default]\n    Mint,\n    /// Handle Payment contract.\n    HandlePayment,\n    /// Standard Payment contract.\n    StandardPayment,\n    /// Auction contract.\n    Auction,\n}\n\nimpl ToBytes for SystemEntityType {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        match self {\n            SystemEntityType::Mint => {\n                writer.push(MINT_TAG);\n            }\n            SystemEntityType::HandlePayment => {\n                writer.push(HANDLE_PAYMENT_TAG);\n            }\n            SystemEntityType::StandardPayment => {\n                writer.push(STANDARD_PAYMENT_TAG);\n            }\n            SystemEntityType::Auction => writer.push(AUCTION_TAG),\n        }\n        Ok(())\n    }\n}\n\nimpl FromBytes for SystemEntityType {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            MINT_TAG => Ok((SystemEntityType::Mint, remainder)),\n            HANDLE_PAYMENT_TAG => Ok((SystemEntityType::HandlePayment, remainder)),\n            STANDARD_PAYMENT_TAG => Ok((SystemEntityType::StandardPayment, remainder)),\n            AUCTION_TAG => Ok((SystemEntityType::Auction, remainder)),\n            _ => Err(Error::Formatting),\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<SystemEntityType> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> SystemEntityType {\n        match rng.gen_range(0..=3) {\n            0 => SystemEntityType::Mint,\n            1 => SystemEntityType::Auction,\n            2 => SystemEntityType::StandardPayment,\n            3 => SystemEntityType::HandlePayment,\n            _ => unreachable!(),\n        }\n    }\n}\n\n/// Name of mint system contract\npub const MINT: &str = \"mint\";\n/// Name of handle payment system contract\npub const HANDLE_PAYMENT: &str = \"handle payment\";\n/// Name of standard payment system contract\npub const STANDARD_PAYMENT: &str = \"standard payment\";\n/// Name of auction system contract\npub const AUCTION: &str = \"auction\";\n\nimpl SystemEntityType {\n    /// Returns the name of the system contract.\n    pub fn entity_name(&self) -> String {\n        match self {\n            SystemEntityType::Mint => MINT.to_string(),\n            SystemEntityType::HandlePayment => HANDLE_PAYMENT.to_string(),\n            SystemEntityType::StandardPayment => STANDARD_PAYMENT.to_string(),\n            SystemEntityType::Auction => AUCTION.to_string(),\n        }\n    }\n\n    /// Returns the entrypoint of the system contract.\n    pub fn entry_points(&self) -> EntryPoints {\n        match self {\n            SystemEntityType::Mint => mint_entry_points(),\n            SystemEntityType::HandlePayment => handle_payment_entry_points(),\n            SystemEntityType::StandardPayment => standard_payment_entry_points(),\n            SystemEntityType::Auction => auction_entry_points(),\n        }\n    }\n}\n\nimpl From<SystemEntityType> for u32 {\n    fn from(system_contract_type: SystemEntityType) -> u32 {\n        match system_contract_type {\n            SystemEntityType::Mint => 0,\n            SystemEntityType::HandlePayment => 1,\n            SystemEntityType::StandardPayment => 2,\n            SystemEntityType::Auction => 3,\n        }\n    }\n}\n\n// This conversion is not intended to be used by third party crates.\n#[doc(hidden)]\nimpl TryFrom<u32> for SystemEntityType {\n    type Error = ApiError;\n    fn try_from(value: u32) -> Result<SystemEntityType, Self::Error> {\n        match value {\n            0 => Ok(SystemEntityType::Mint),\n            1 => Ok(SystemEntityType::HandlePayment),\n            2 => Ok(SystemEntityType::StandardPayment),\n            3 => Ok(SystemEntityType::Auction),\n            _ => Err(ApiError::InvalidSystemContract),\n        }\n    }\n}\n\nimpl Display for SystemEntityType {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match *self {\n            SystemEntityType::Mint => write!(f, \"{}\", MINT),\n            SystemEntityType::HandlePayment => write!(f, \"{}\", HANDLE_PAYMENT),\n            SystemEntityType::StandardPayment => write!(f, \"{}\", STANDARD_PAYMENT),\n            SystemEntityType::Auction => write!(f, \"{}\", AUCTION),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::string::ToString;\n\n    use super::*;\n\n    #[test]\n    fn get_index_of_mint_contract() {\n        let index: u32 = SystemEntityType::Mint.into();\n        assert_eq!(index, 0u32);\n        assert_eq!(SystemEntityType::Mint.to_string(), MINT);\n    }\n\n    #[test]\n    fn get_index_of_handle_payment_contract() {\n        let index: u32 = SystemEntityType::HandlePayment.into();\n        assert_eq!(index, 1u32);\n        assert_eq!(SystemEntityType::HandlePayment.to_string(), HANDLE_PAYMENT);\n    }\n\n    #[test]\n    fn get_index_of_standard_payment_contract() {\n        let index: u32 = SystemEntityType::StandardPayment.into();\n        assert_eq!(index, 2u32);\n        assert_eq!(\n            SystemEntityType::StandardPayment.to_string(),\n            STANDARD_PAYMENT\n        );\n    }\n\n    #[test]\n    fn get_index_of_auction_contract() {\n        let index: u32 = SystemEntityType::Auction.into();\n        assert_eq!(index, 3u32);\n        assert_eq!(SystemEntityType::Auction.to_string(), AUCTION);\n    }\n\n    #[test]\n    fn create_mint_variant_from_int() {\n        let mint = SystemEntityType::try_from(0).ok().unwrap();\n        assert_eq!(mint, SystemEntityType::Mint);\n    }\n\n    #[test]\n    fn create_handle_payment_variant_from_int() {\n        let handle_payment = SystemEntityType::try_from(1).ok().unwrap();\n        assert_eq!(handle_payment, SystemEntityType::HandlePayment);\n    }\n\n    #[test]\n    fn create_standard_payment_variant_from_int() {\n        let handle_payment = SystemEntityType::try_from(2).ok().unwrap();\n        assert_eq!(handle_payment, SystemEntityType::StandardPayment);\n    }\n\n    #[test]\n    fn create_auction_variant_from_int() {\n        let auction = SystemEntityType::try_from(3).ok().unwrap();\n        assert_eq!(auction, SystemEntityType::Auction);\n    }\n\n    #[test]\n    fn create_unknown_system_contract_variant() {\n        assert!(SystemEntityType::try_from(4).is_err());\n        assert!(SystemEntityType::try_from(5).is_err());\n        assert!(SystemEntityType::try_from(10).is_err());\n        assert!(SystemEntityType::try_from(u32::MAX).is_err());\n    }\n}\n"
  },
  {
    "path": "types/src/system.rs",
    "content": "//! System modules, formerly known as \"system contracts\"\npub mod auction;\nmod caller;\nmod error;\npub mod handle_payment;\npub mod mint;\npub mod prepayment;\npub mod standard_payment;\nmod system_contract_type;\n\npub use caller::{CallStackElement, Caller, CallerInfo, CallerTag};\npub use error::Error;\npub use system_contract_type::{SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT};\n"
  },
  {
    "path": "types/src/tagged.rs",
    "content": "/// The quality of having a tag\npub trait Tagged<T> {\n    /// Returns the tag of a given object\n    fn tag(&self) -> T;\n}\n"
  },
  {
    "path": "types/src/testing.rs",
    "content": "//! An RNG for testing purposes.\nuse std::{\n    cell::RefCell,\n    cmp, env,\n    fmt::{self, Debug, Display, Formatter},\n    iter, thread,\n};\n\nuse rand::{\n    self,\n    distributions::{uniform::SampleRange, Distribution, Standard},\n    CryptoRng, Error, Rng, RngCore, SeedableRng,\n};\nuse rand_pcg::Pcg64Mcg;\n\nthread_local! {\n    static THIS_THREAD_HAS_RNG: RefCell<bool> = const { RefCell::new(false) };\n}\n\nconst CL_TEST_SEED: &str = \"CL_TEST_SEED\";\n\ntype Seed = <Pcg64Mcg as SeedableRng>::Seed; // [u8; 16]\n\n/// A fast, seedable pseudorandom number generator for use in tests which prints the seed if the\n/// thread in which it is created panics.\n///\n/// Only one `TestRng` is permitted per thread.\npub struct TestRng {\n    seed: Seed,\n    rng: Pcg64Mcg,\n}\n\nimpl TestRng {\n    /// Constructs a new `TestRng` using a seed generated from the env var `CL_TEST_SEED` if set or\n    /// from cryptographically secure random data if not.\n    ///\n    /// Note that `new()` or `default()` should only be called once per test.  If a test needs to\n    /// spawn multiple threads each with their own `TestRng`, then use `new()` to create a single,\n    /// master `TestRng`, then use it to create a seed per child thread.  The child `TestRng`s can\n    /// then be constructed in their own threads via `from_seed()`.\n    ///\n    /// # Panics\n    ///\n    /// Panics if a `TestRng` has already been created on this thread.\n    pub fn new() -> Self {\n        Self::set_flag_or_panic();\n\n        let mut seed = Seed::default();\n        match env::var(CL_TEST_SEED) {\n            Ok(seed_as_hex) => {\n                base16::decode_slice(&seed_as_hex, &mut seed).unwrap_or_else(|error| {\n                    THIS_THREAD_HAS_RNG.with(|flag| {\n                        *flag.borrow_mut() = false;\n                    });\n                    panic!(\"can't parse '{}' as a TestRng seed: {}\", seed_as_hex, error)\n                });\n            }\n            Err(_) => {\n                rand::thread_rng().fill(&mut seed);\n            }\n        };\n\n        let rng = Pcg64Mcg::from_seed(seed);\n\n        TestRng { seed, rng }\n    }\n\n    /// Constructs a new `TestRng` using `seed`.  This should be used in cases where a test needs to\n    /// spawn multiple threads each with their own `TestRng`.  A single, master `TestRng` should be\n    /// constructed before any child threads are spawned, and that one should be used to create\n    /// seeds for the child threads' `TestRng`s.\n    ///\n    /// # Panics\n    ///\n    /// Panics if a `TestRng` has already been created on this thread.\n    pub fn from_seed(seed: Seed) -> Self {\n        Self::set_flag_or_panic();\n        let rng = Pcg64Mcg::from_seed(seed);\n        TestRng { seed, rng }\n    }\n\n    /// Returns a random `String` of length within the range specified by `length_range`.\n    pub fn random_string<R: SampleRange<usize>>(&mut self, length_range: R) -> String {\n        let count = self.gen_range(length_range);\n        iter::repeat_with(|| self.gen::<char>())\n            .take(count)\n            .collect()\n    }\n\n    /// Returns a random `Vec` of length within the range specified by `length_range`.\n    pub fn random_vec<R: SampleRange<usize>, T>(&mut self, length_range: R) -> Vec<T>\n    where\n        Standard: Distribution<T>,\n    {\n        let count = self.gen_range(length_range);\n        iter::repeat_with(|| self.gen::<T>()).take(count).collect()\n    }\n\n    fn set_flag_or_panic() {\n        THIS_THREAD_HAS_RNG.with(|flag| {\n            if *flag.borrow() {\n                panic!(\"cannot create multiple TestRngs on the same thread\");\n            }\n            *flag.borrow_mut() = true;\n        });\n    }\n\n    /// Creates a child RNG.\n    ///\n    /// The resulting RNG is seeded from `self` deterministically.\n    pub fn create_child(&mut self) -> Self {\n        let seed = self.gen();\n        let rng = Pcg64Mcg::from_seed(seed);\n        TestRng { seed, rng }\n    }\n}\n\nimpl Default for TestRng {\n    fn default() -> Self {\n        TestRng::new()\n    }\n}\n\nimpl Display for TestRng {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"TestRng seed: {}\",\n            base16::encode_lower(&self.seed)\n        )\n    }\n}\n\nimpl Debug for TestRng {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        Display::fmt(self, formatter)\n    }\n}\n\nimpl Drop for TestRng {\n    fn drop(&mut self) {\n        if thread::panicking() {\n            let line_1 = format!(\"Thread: {}\", thread::current().name().unwrap_or(\"unnamed\"));\n            let line_2 = \"To reproduce failure, try running with env var:\";\n            let line_3 = format!(\"{}={}\", CL_TEST_SEED, base16::encode_lower(&self.seed));\n            let max_length = cmp::max(line_1.len(), line_2.len());\n            let border = \"=\".repeat(max_length);\n            println!(\n                \"\\n{}\\n{}\\n{}\\n{}\\n{}\\n\",\n                border, line_1, line_2, line_3, border\n            );\n        }\n    }\n}\n\nimpl SeedableRng for TestRng {\n    type Seed = <Pcg64Mcg as SeedableRng>::Seed;\n\n    fn from_seed(seed: Self::Seed) -> Self {\n        Self::from_seed(seed)\n    }\n}\n\nimpl RngCore for TestRng {\n    fn next_u32(&mut self) -> u32 {\n        self.rng.next_u32()\n    }\n\n    fn next_u64(&mut self) -> u64 {\n        self.rng.next_u64()\n    }\n\n    fn fill_bytes(&mut self, dest: &mut [u8]) {\n        self.rng.fill_bytes(dest)\n    }\n\n    fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {\n        self.rng.try_fill_bytes(dest)\n    }\n}\n\nimpl CryptoRng for TestRng {}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    #[should_panic(expected = \"cannot create multiple TestRngs on the same thread\")]\n    fn second_test_rng_in_thread_should_panic() {\n        let _test_rng1 = TestRng::new();\n        let seed = [1; 16];\n        let _test_rng2 = TestRng::from_seed(seed);\n    }\n}\n"
  },
  {
    "path": "types/src/timestamp.rs",
    "content": "use alloc::vec::Vec;\nuse core::{\n    fmt::{self, Display, Formatter},\n    ops::{Add, AddAssign, Div, Mul, Rem, Shl, Shr, Sub, SubAssign},\n    time::Duration,\n};\n#[cfg(any(feature = \"std\", test))]\nuse std::{str::FromStr, time::SystemTime};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"std\", test))]\nuse humantime::{DurationError, TimestampError};\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\n/// Example timestamp equal to 2020-11-17T00:39:24.072Z.\n#[cfg(feature = \"json-schema\")]\nconst TIMESTAMP: Timestamp = Timestamp(1_605_573_564_072);\n\n/// A timestamp type, representing a concrete moment in time.\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Timestamp formatted as per RFC 3339\")\n)]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\npub struct Timestamp(#[cfg_attr(feature = \"json-schema\", schemars(with = \"String\"))] u64);\n\nimpl Timestamp {\n    /// The maximum value a timestamp can have.\n    pub const MAX: Timestamp = Timestamp(u64::MAX);\n\n    #[cfg(any(feature = \"std\", test))]\n    /// Returns the timestamp of the current moment.\n    pub fn now() -> Self {\n        let millis = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() as u64;\n        Timestamp(millis)\n    }\n\n    #[cfg(any(feature = \"std\", test))]\n    /// Returns the time that has elapsed since this timestamp.\n    pub fn elapsed(&self) -> TimeDiff {\n        TimeDiff(Timestamp::now().0.saturating_sub(self.0))\n    }\n\n    /// Returns a zero timestamp.\n    pub fn zero() -> Self {\n        Timestamp(0)\n    }\n\n    /// Returns the timestamp as the number of milliseconds since the Unix epoch\n    pub fn millis(&self) -> u64 {\n        self.0\n    }\n\n    /// Returns the difference between `self` and `other`, or `0` if `self` is earlier than `other`.\n    pub fn saturating_diff(self, other: Timestamp) -> TimeDiff {\n        TimeDiff(self.0.saturating_sub(other.0))\n    }\n\n    /// Returns the difference between `self` and `other`, or `0` if that would be before the epoch.\n    #[must_use]\n    pub fn saturating_sub(self, other: TimeDiff) -> Timestamp {\n        Timestamp(self.0.saturating_sub(other.0))\n    }\n\n    /// Returns the sum of `self` and `other`, or the maximum possible value if that would be\n    /// exceeded.\n    #[must_use]\n    pub fn saturating_add(self, other: TimeDiff) -> Timestamp {\n        Timestamp(self.0.saturating_add(other.0))\n    }\n\n    /// Returns the number of trailing zeros in the number of milliseconds since the epoch.\n    pub fn trailing_zeros(&self) -> u8 {\n        self.0.trailing_zeros() as u8\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &TIMESTAMP\n    }\n\n    /// Returns a random `Timestamp`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        Timestamp(1_596_763_000_000 + rng.gen_range(200_000..1_000_000))\n    }\n\n    /// Checked subtraction for timestamps\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn checked_sub(self, other: TimeDiff) -> Option<Timestamp> {\n        self.0.checked_sub(other.0).map(Timestamp)\n    }\n}\n\nimpl Display for Timestamp {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        #[cfg(any(feature = \"std\", test))]\n        return match SystemTime::UNIX_EPOCH.checked_add(Duration::from_millis(self.0)) {\n            Some(system_time) => write!(f, \"{}\", humantime::format_rfc3339_millis(system_time))\n                .or_else(|e| write!(f, \"Invalid timestamp: {}: {}\", e, self.0)),\n            None => write!(f, \"invalid Timestamp: {} ms after the Unix epoch\", self.0),\n        };\n\n        #[cfg(not(any(feature = \"std\", test)))]\n        write!(f, \"timestamp({}ms)\", self.0)\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl FromStr for Timestamp {\n    type Err = TimestampError;\n\n    fn from_str(value: &str) -> Result<Self, Self::Err> {\n        let system_time = humantime::parse_rfc3339_weak(value)?;\n        let inner = system_time\n            .duration_since(SystemTime::UNIX_EPOCH)\n            .map_err(|_| TimestampError::OutOfRange)?\n            .as_millis() as u64;\n        Ok(Timestamp(inner))\n    }\n}\n\nimpl Add<TimeDiff> for Timestamp {\n    type Output = Timestamp;\n\n    fn add(self, diff: TimeDiff) -> Timestamp {\n        Timestamp(self.0 + diff.0)\n    }\n}\n\nimpl AddAssign<TimeDiff> for Timestamp {\n    fn add_assign(&mut self, rhs: TimeDiff) {\n        self.0 += rhs.0;\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Sub<TimeDiff> for Timestamp {\n    type Output = Timestamp;\n\n    fn sub(self, diff: TimeDiff) -> Timestamp {\n        Timestamp(self.0 - diff.0)\n    }\n}\n\nimpl Rem<TimeDiff> for Timestamp {\n    type Output = TimeDiff;\n\n    fn rem(self, diff: TimeDiff) -> TimeDiff {\n        TimeDiff(self.0 % diff.0)\n    }\n}\n\nimpl<T> Shl<T> for Timestamp\nwhere\n    u64: Shl<T, Output = u64>,\n{\n    type Output = Timestamp;\n\n    fn shl(self, rhs: T) -> Timestamp {\n        Timestamp(self.0 << rhs)\n    }\n}\n\nimpl<T> Shr<T> for Timestamp\nwhere\n    u64: Shr<T, Output = u64>,\n{\n    type Output = Timestamp;\n\n    fn shr(self, rhs: T) -> Timestamp {\n        Timestamp(self.0 >> rhs)\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl Serialize for Timestamp {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl<'de> Deserialize<'de> for Timestamp {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let value_as_string = String::deserialize(deserializer)?;\n            Timestamp::from_str(&value_as_string).map_err(SerdeError::custom)\n        } else {\n            let inner = u64::deserialize(deserializer)?;\n            Ok(Timestamp(inner))\n        }\n    }\n}\n\nimpl ToBytes for Timestamp {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for Timestamp {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        u64::from_bytes(bytes).map(|(inner, remainder)| (Timestamp(inner), remainder))\n    }\n}\n\nimpl From<u64> for Timestamp {\n    fn from(milliseconds_since_epoch: u64) -> Timestamp {\n        Timestamp(milliseconds_since_epoch)\n    }\n}\n\n/// A time difference between two timestamps.\n#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Human-readable duration.\")\n)]\npub struct TimeDiff(#[cfg_attr(feature = \"json-schema\", schemars(with = \"String\"))] u64);\n\nimpl Display for TimeDiff {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        #[cfg(any(feature = \"std\", test))]\n        return write!(f, \"{}\", humantime::format_duration(Duration::from(*self)));\n\n        #[cfg(not(any(feature = \"std\", test)))]\n        write!(f, \"time diff({}ms)\", self.0)\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl FromStr for TimeDiff {\n    type Err = DurationError;\n\n    fn from_str(value: &str) -> Result<Self, Self::Err> {\n        let inner = humantime::parse_duration(value)?.as_millis() as u64;\n        Ok(TimeDiff(inner))\n    }\n}\n\nimpl TimeDiff {\n    /// Zero diff.\n    pub const ZERO: TimeDiff = TimeDiff(0);\n\n    /// Returns the time difference as the number of milliseconds since the Unix epoch\n    pub const fn millis(&self) -> u64 {\n        self.0\n    }\n\n    /// Creates a new time difference from seconds.\n    pub const fn from_seconds(seconds: u32) -> Self {\n        TimeDiff(seconds as u64 * 1_000)\n    }\n\n    /// Creates a new time difference from milliseconds.\n    pub const fn from_millis(millis: u64) -> Self {\n        TimeDiff(millis)\n    }\n\n    /// Returns the sum, or `TimeDiff(u64::MAX)` if it would overflow.\n    #[must_use]\n    pub fn saturating_add(self, rhs: u64) -> Self {\n        TimeDiff(self.0.saturating_add(rhs))\n    }\n\n    /// Returns the product, or `TimeDiff(u64::MAX)` if it would overflow.\n    #[must_use]\n    pub fn saturating_mul(self, rhs: u64) -> Self {\n        TimeDiff(self.0.saturating_mul(rhs))\n    }\n\n    /// Returns the product, or `None` if it would overflow.\n    #[must_use]\n    pub fn checked_mul(self, rhs: u64) -> Option<Self> {\n        Some(TimeDiff(self.0.checked_mul(rhs)?))\n    }\n}\n\nimpl Add<TimeDiff> for TimeDiff {\n    type Output = TimeDiff;\n\n    fn add(self, rhs: TimeDiff) -> TimeDiff {\n        TimeDiff(self.0 + rhs.0)\n    }\n}\n\nimpl AddAssign<TimeDiff> for TimeDiff {\n    fn add_assign(&mut self, rhs: TimeDiff) {\n        self.0 += rhs.0;\n    }\n}\n\nimpl Sub<TimeDiff> for TimeDiff {\n    type Output = TimeDiff;\n\n    fn sub(self, rhs: TimeDiff) -> TimeDiff {\n        TimeDiff(self.0 - rhs.0)\n    }\n}\n\nimpl SubAssign<TimeDiff> for TimeDiff {\n    fn sub_assign(&mut self, rhs: TimeDiff) {\n        self.0 -= rhs.0;\n    }\n}\n\nimpl Mul<u64> for TimeDiff {\n    type Output = TimeDiff;\n\n    fn mul(self, rhs: u64) -> TimeDiff {\n        TimeDiff(self.0 * rhs)\n    }\n}\n\nimpl Div<u64> for TimeDiff {\n    type Output = TimeDiff;\n\n    fn div(self, rhs: u64) -> TimeDiff {\n        TimeDiff(self.0 / rhs)\n    }\n}\n\nimpl Div<TimeDiff> for TimeDiff {\n    type Output = u64;\n\n    fn div(self, rhs: TimeDiff) -> u64 {\n        self.0 / rhs.0\n    }\n}\n\nimpl From<TimeDiff> for Duration {\n    fn from(diff: TimeDiff) -> Duration {\n        Duration::from_millis(diff.0)\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl Serialize for TimeDiff {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl<'de> Deserialize<'de> for TimeDiff {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let value_as_string = String::deserialize(deserializer)?;\n            TimeDiff::from_str(&value_as_string).map_err(SerdeError::custom)\n        } else {\n            let inner = u64::deserialize(deserializer)?;\n            Ok(TimeDiff(inner))\n        }\n    }\n}\n\nimpl ToBytes for TimeDiff {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for TimeDiff {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        u64::from_bytes(bytes).map(|(inner, remainder)| (TimeDiff(inner), remainder))\n    }\n}\n\nimpl From<Duration> for TimeDiff {\n    fn from(duration: Duration) -> TimeDiff {\n        TimeDiff(duration.as_millis() as u64)\n    }\n}\n\n/// A module for the `[serde(with = serde_option_time_diff)]` attribute, to serialize and\n/// deserialize `Option<TimeDiff>` treating `None` as 0.\n#[cfg(any(feature = \"std\", test))]\npub mod serde_option_time_diff {\n    use super::*;\n\n    /// Serializes an `Option<TimeDiff>`, using `0` if the value is `None`.\n    pub fn serialize<S: Serializer>(\n        maybe_td: &Option<TimeDiff>,\n        serializer: S,\n    ) -> Result<S::Ok, S::Error> {\n        maybe_td\n            .unwrap_or_else(|| TimeDiff::from_millis(0))\n            .serialize(serializer)\n    }\n\n    /// Deserializes an `Option<TimeDiff>`, returning `None` if the value is `0`.\n    pub fn deserialize<'de, D: Deserializer<'de>>(\n        deserializer: D,\n    ) -> Result<Option<TimeDiff>, D::Error> {\n        let td = TimeDiff::deserialize(deserializer)?;\n        if td.0 == 0 {\n            Ok(None)\n        } else {\n            Ok(Some(td))\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn timestamp_serialization_roundtrip() {\n        let timestamp = Timestamp::now();\n\n        let timestamp_as_string = timestamp.to_string();\n        assert_eq!(\n            timestamp,\n            Timestamp::from_str(&timestamp_as_string).unwrap()\n        );\n\n        let serialized_json = serde_json::to_string(&timestamp).unwrap();\n        assert_eq!(timestamp, serde_json::from_str(&serialized_json).unwrap());\n\n        let serialized_bincode = bincode::serialize(&timestamp).unwrap();\n        assert_eq!(\n            timestamp,\n            bincode::deserialize(&serialized_bincode).unwrap()\n        );\n\n        bytesrepr::test_serialization_roundtrip(&timestamp);\n    }\n\n    #[test]\n    fn timediff_serialization_roundtrip() {\n        let mut rng = TestRng::new();\n        let timediff = TimeDiff(rng.gen());\n\n        let timediff_as_string = timediff.to_string();\n        assert_eq!(timediff, TimeDiff::from_str(&timediff_as_string).unwrap());\n\n        let serialized_json = serde_json::to_string(&timediff).unwrap();\n        assert_eq!(timediff, serde_json::from_str(&serialized_json).unwrap());\n\n        let serialized_bincode = bincode::serialize(&timediff).unwrap();\n        assert_eq!(timediff, bincode::deserialize(&serialized_bincode).unwrap());\n\n        bytesrepr::test_serialization_roundtrip(&timediff);\n    }\n\n    #[test]\n    fn does_not_crash_for_big_timestamp_value() {\n        assert!(Timestamp::MAX.to_string().starts_with(\"Invalid timestamp:\"));\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/addressable_entity_identifier.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Debug, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::{ExecutableDeployItem, TransactionTarget};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    AddressableEntityHash, EntityAddr,\n};\n\nconst HASH_TAG: u8 = 0;\nconst NAME_TAG: u8 = 1;\nconst ADDR_TAG: u8 = 2;\n\n/// Identifier for the contract object within a [`TransactionTarget::Stored`] or an\n/// [`ExecutableDeployItem`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(\n        description = \"Identifier for the contract object within a `Stored` transaction target \\\n        or an `ExecutableDeployItem`.\"\n    )\n)]\n#[serde(deny_unknown_fields)]\npub enum AddressableEntityIdentifier {\n    /// The hash identifying the addressable entity.\n    Hash(AddressableEntityHash),\n    /// The name identifying the addressable entity.\n    Name(String),\n    /// The entity address\n    Addr(EntityAddr),\n}\n\nimpl AddressableEntityIdentifier {\n    /// Returns a random `AddressableEntityIdentifier`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        if rng.gen() {\n            if rng.gen() {\n                AddressableEntityIdentifier::Hash(AddressableEntityHash::new(rng.gen()))\n            } else {\n                AddressableEntityIdentifier::Addr(EntityAddr::new_of_kind(rng.gen(), rng.gen()))\n            }\n        } else {\n            AddressableEntityIdentifier::Name(rng.random_string(1..21))\n        }\n    }\n}\n\nimpl Display for AddressableEntityIdentifier {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            AddressableEntityIdentifier::Hash(hash) => write!(formatter, \"entity-hash({})\", hash),\n            AddressableEntityIdentifier::Name(name) => write!(formatter, \"entity-name({})\", name),\n            AddressableEntityIdentifier::Addr(entity_addr) => {\n                write!(formatter, \"entity-addr({})\", entity_addr)\n            }\n        }\n    }\n}\n\nimpl ToBytes for AddressableEntityIdentifier {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            AddressableEntityIdentifier::Hash(hash) => {\n                HASH_TAG.write_bytes(writer)?;\n                hash.write_bytes(writer)\n            }\n            AddressableEntityIdentifier::Name(name) => {\n                NAME_TAG.write_bytes(writer)?;\n                name.write_bytes(writer)\n            }\n            AddressableEntityIdentifier::Addr(entity_addr) => {\n                ADDR_TAG.write_bytes(writer)?;\n                entity_addr.write_bytes(writer)\n            }\n        }\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                AddressableEntityIdentifier::Hash(hash) => hash.serialized_length(),\n                AddressableEntityIdentifier::Name(name) => name.serialized_length(),\n                AddressableEntityIdentifier::Addr(addr) => addr.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for AddressableEntityIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            HASH_TAG => {\n                let (hash, remainder) = AddressableEntityHash::from_bytes(remainder)?;\n                Ok((AddressableEntityIdentifier::Hash(hash), remainder))\n            }\n            NAME_TAG => {\n                let (name, remainder) = String::from_bytes(remainder)?;\n                Ok((AddressableEntityIdentifier::Name(name), remainder))\n            }\n            ADDR_TAG => {\n                let (addr, remainder) = EntityAddr::from_bytes(remainder)?;\n                Ok((AddressableEntityIdentifier::Addr(addr), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            bytesrepr::test_serialization_roundtrip(&AddressableEntityIdentifier::random(rng));\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/approval.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    crypto, PublicKey, SecretKey, Signature,\n};\n\nuse super::TransactionHash;\n\n/// A struct containing a signature of a transaction hash and the public key of the signer.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct Approval {\n    signer: PublicKey,\n    signature: Signature,\n}\n\nimpl Approval {\n    /// Creates an approval by signing the given transaction hash using the given secret key.\n    pub fn create(hash: &TransactionHash, secret_key: &SecretKey) -> Self {\n        let signer = PublicKey::from(secret_key);\n        let signature = crypto::sign(hash, secret_key, &signer);\n        Self { signer, signature }\n    }\n\n    /// Returns a new approval.\n    pub fn new(signer: PublicKey, signature: Signature) -> Self {\n        Self { signer, signature }\n    }\n\n    /// Returns the public key of the approval's signer.\n    pub fn signer(&self) -> &PublicKey {\n        &self.signer\n    }\n\n    /// Returns the approval signature.\n    pub fn signature(&self) -> &Signature {\n        &self.signature\n    }\n\n    /// Returns a random `Approval`.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let hash = TransactionHash::random(rng);\n        let secret_key = SecretKey::random(rng);\n        Approval::create(&hash, &secret_key)\n    }\n}\n\nimpl Display for Approval {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"approval({})\", self.signer)\n    }\n}\n\nimpl ToBytes for Approval {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.signer.write_bytes(writer)?;\n        self.signature.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.signer.serialized_length() + self.signature.serialized_length()\n    }\n}\n\nimpl FromBytes for Approval {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (signer, remainder) = PublicKey::from_bytes(bytes)?;\n        let (signature, remainder) = Signature::from_bytes(remainder)?;\n        let approval = Approval { signer, signature };\n        Ok((approval, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let approval = Approval::random(rng);\n        bytesrepr::test_serialization_roundtrip(&approval);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/approvals_hash.rs",
    "content": "use alloc::{collections::BTreeSet, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\nuse serde::{Deserialize, Serialize};\n\nuse super::Approval;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest,\n};\n\n/// The cryptographic hash of the bytesrepr-encoded set of approvals for a single [``].\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct ApprovalsHash(pub Digest);\n\nimpl ApprovalsHash {\n    /// The number of bytes in a `ApprovalsHash` digest.\n    pub const LENGTH: usize = Digest::LENGTH;\n\n    /// Constructs a new `ApprovalsHash` by bytesrepr-encoding `approvals` and creating\n    /// a [`Digest`] of this.\n    pub fn compute(approvals: &BTreeSet<Approval>) -> Result<Self, bytesrepr::Error> {\n        let digest = Digest::hash(approvals.to_bytes()?);\n        Ok(ApprovalsHash(digest))\n    }\n\n    /// Returns the wrapped inner digest.\n    pub fn inner(&self) -> &Digest {\n        &self.0\n    }\n\n    /// Returns a new `ApprovalsHash` directly initialized with the provided bytes; no\n    /// hashing is done.\n    #[cfg(any(feature = \"testing\", test))]\n    pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self {\n        ApprovalsHash(Digest::from_raw(raw_digest))\n    }\n\n    /// Returns a random `ApprovalsHash`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let hash = rng.gen::<[u8; Digest::LENGTH]>().into();\n        ApprovalsHash(hash)\n    }\n}\n\nimpl From<ApprovalsHash> for Digest {\n    fn from(hash: ApprovalsHash) -> Self {\n        hash.0\n    }\n}\n\nimpl From<Digest> for ApprovalsHash {\n    fn from(digest: Digest) -> Self {\n        Self(digest)\n    }\n}\n\nimpl Display for ApprovalsHash {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"transaction-v1-approvals-hash({})\", self.0,)\n    }\n}\n\nimpl AsRef<[u8]> for ApprovalsHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl ToBytes for ApprovalsHash {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for ApprovalsHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Digest::from_bytes(bytes).map(|(inner, remainder)| (ApprovalsHash(inner), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = ApprovalsHash::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy/deploy_category.rs",
    "content": "use core::fmt::{self, Formatter};\n\nuse crate::Deploy;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// The category of a [`Transaction`].\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Session kind of legacy Deploy.\")\n)]\n#[serde(deny_unknown_fields)]\n#[repr(u8)]\npub enum DeployCategory {\n    /// Standard transaction (the default).\n    #[default]\n    Standard = 0,\n    /// Native transfer interaction.\n    Transfer = 1,\n}\n\nimpl fmt::Display for DeployCategory {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            DeployCategory::Standard => write!(f, \"Standard\"),\n            DeployCategory::Transfer => write!(f, \"Transfer\"),\n        }\n    }\n}\n\nimpl From<Deploy> for DeployCategory {\n    fn from(value: Deploy) -> Self {\n        if value.is_transfer() {\n            DeployCategory::Transfer\n        } else {\n            DeployCategory::Standard\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy/deploy_hash.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::Deploy;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest,\n};\n\n/// The cryptographic hash of a [`Deploy`].\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded deploy hash.\")\n)]\n#[serde(deny_unknown_fields)]\npub struct DeployHash(Digest);\n\nimpl DeployHash {\n    /// The number of bytes in a `DeployHash` digest.\n    pub const LENGTH: usize = Digest::LENGTH;\n\n    /// Constructs a new `DeployHash`.\n    pub const fn new(hash: Digest) -> Self {\n        DeployHash(hash)\n    }\n\n    /// Returns the wrapped inner digest.\n    pub fn inner(&self) -> &Digest {\n        &self.0\n    }\n\n    /// Hexadecimal representation of the hash.\n    pub fn to_hex_string(&self) -> String {\n        base16::encode_lower(self.inner())\n    }\n\n    /// Returns a new `DeployHash` directly initialized with the provided bytes; no hashing is done.\n    #[cfg(any(feature = \"testing\", test))]\n    pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self {\n        DeployHash(Digest::from_raw(raw_digest))\n    }\n\n    /// Returns a random `DeployHash`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let hash = rng.gen::<[u8; Digest::LENGTH]>().into();\n        DeployHash(hash)\n    }\n}\n\nimpl From<Digest> for DeployHash {\n    fn from(digest: Digest) -> Self {\n        DeployHash(digest)\n    }\n}\n\nimpl From<DeployHash> for Digest {\n    fn from(deploy_hash: DeployHash) -> Self {\n        deploy_hash.0\n    }\n}\n\nimpl Display for DeployHash {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"deploy-hash({})\", self.0,)\n    }\n}\n\nimpl AsRef<[u8]> for DeployHash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl ToBytes for DeployHash {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for DeployHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Digest::from_bytes(bytes).map(|(inner, remainder)| (DeployHash(inner), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = DeployHash::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy/deploy_header.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n#[cfg(any(feature = \"std\", test))]\nuse tracing::debug;\n\n#[cfg(doc)]\nuse super::Deploy;\nuse super::DeployHash;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest, DisplayIter, PublicKey, TimeDiff, Timestamp,\n};\n#[cfg(any(feature = \"std\", test))]\nuse crate::{InvalidDeploy, TransactionConfig};\n\n/// The header portion of a [`Deploy`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(\n    any(feature = \"std\", test),\n    derive(Serialize, Deserialize),\n    serde(deny_unknown_fields)\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct DeployHeader {\n    account: PublicKey,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n    gas_price: u64,\n    body_hash: Digest,\n    dependencies: Vec<DeployHash>,\n    chain_name: String,\n}\n\nimpl DeployHeader {\n    #[cfg(any(feature = \"std\", feature = \"json-schema\", test))]\n    pub fn new(\n        account: PublicKey,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        gas_price: u64,\n        body_hash: Digest,\n        dependencies: Vec<DeployHash>,\n        chain_name: String,\n    ) -> Self {\n        DeployHeader {\n            account,\n            timestamp,\n            ttl,\n            gas_price,\n            body_hash,\n            dependencies,\n            chain_name,\n        }\n    }\n\n    /// Returns the public key of the account providing the context in which to run the `Deploy`.\n    pub fn account(&self) -> &PublicKey {\n        &self.account\n    }\n\n    /// Returns the creation timestamp of the `Deploy`.\n    pub fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid.\n    ///\n    /// After this duration has ended, the `Deploy` will be considered expired.\n    pub fn ttl(&self) -> TimeDiff {\n        self.ttl\n    }\n\n    /// Returns `true` if the `Deploy` has expired.\n    pub fn expired(&self, current_instant: Timestamp) -> bool {\n        self.expires() < current_instant\n    }\n\n    /// Returns the sender's gas price tolerance for block inclusion.\n    pub fn gas_price(&self) -> u64 {\n        // in the original implementation, we did not have dynamic gas pricing\n        // but the sender of the deploy could specify a higher gas price,\n        // and the payment amount would be multiplied by that number\n        // for settlement purposes. This did not increase their computation limit,\n        // only how much they were charged. The intent was, the total cost\n        // would be a consideration for block proposal but in the end we shipped\n        // with an egalitarian subjective fifo proposer. Thus, there was no\n        // functional reason / no benefit to a sender setting gas price to\n        // anything higher than 1.\n        //\n        // As of 2.0 we have dynamic gas prices, this vestigial field has been\n        // repurposed, interpreted to indicate a gas price tolerance.\n        // If this deploy is buffered and the current gas price is higher than this\n        // value, it will not be included in a proposed block.\n        //\n        // This allowing the sender to opt out of block inclusion if the gas price is\n        // higher than they want to pay for.\n        self.gas_price\n    }\n\n    /// Returns the hash of the body (i.e. the Wasm code) of the `Deploy`.\n    pub fn body_hash(&self) -> &Digest {\n        &self.body_hash\n    }\n\n    /// Returns the list of other `Deploy`s that have to be executed before this one.\n    pub fn dependencies(&self) -> &Vec<DeployHash> {\n        &self.dependencies\n    }\n\n    /// Returns the name of the chain the `Deploy` should be executed on.\n    pub fn chain_name(&self) -> &str {\n        &self.chain_name\n    }\n\n    /// Returns `Ok` if and only if the dependencies count and TTL are within limits, and the\n    /// timestamp is not later than `at + timestamp_leeway`.  Does NOT check for expiry.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn is_valid(\n        &self,\n        config: &TransactionConfig,\n        timestamp_leeway: TimeDiff,\n        at: Timestamp,\n        deploy_hash: &DeployHash,\n    ) -> Result<(), InvalidDeploy> {\n        // as of 2.0.0 deploy dependencies are not supported.\n        // a legacy deploy citing dependencies should be rejected\n        if !self.dependencies.is_empty() {\n            debug!(\n                %deploy_hash,\n                \"deploy dependencies no longer supported\"\n            );\n            return Err(InvalidDeploy::DependenciesNoLongerSupported);\n        }\n\n        if self.ttl() > config.max_ttl {\n            debug!(\n                %deploy_hash,\n                deploy_header = %self,\n                max_ttl = %config.max_ttl,\n                \"deploy ttl excessive\"\n            );\n            return Err(InvalidDeploy::ExcessiveTimeToLive {\n                max_ttl: config.max_ttl,\n                got: self.ttl(),\n            });\n        }\n\n        if self.timestamp() > at + timestamp_leeway {\n            debug!(%deploy_hash, deploy_header = %self, %at, \"deploy timestamp in the future\");\n            return Err(InvalidDeploy::TimestampInFuture {\n                validation_timestamp: at,\n                timestamp_leeway,\n                got: self.timestamp(),\n            });\n        }\n\n        Ok(())\n    }\n\n    /// Returns the timestamp of when the `Deploy` expires, i.e. `self.timestamp + self.ttl`.\n    pub fn expires(&self) -> Timestamp {\n        self.timestamp.saturating_add(self.ttl)\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub(super) fn invalidate(&mut self) {\n        self.chain_name.clear();\n    }\n}\n\nimpl ToBytes for DeployHeader {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.account.write_bytes(writer)?;\n        self.timestamp.write_bytes(writer)?;\n        self.ttl.write_bytes(writer)?;\n        self.gas_price.write_bytes(writer)?;\n        self.body_hash.write_bytes(writer)?;\n        self.dependencies.write_bytes(writer)?;\n        self.chain_name.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.account.serialized_length()\n            + self.timestamp.serialized_length()\n            + self.ttl.serialized_length()\n            + self.gas_price.serialized_length()\n            + self.body_hash.serialized_length()\n            + self.dependencies.serialized_length()\n            + self.chain_name.serialized_length()\n    }\n}\n\nimpl FromBytes for DeployHeader {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (account, remainder) = PublicKey::from_bytes(bytes)?;\n        let (timestamp, remainder) = Timestamp::from_bytes(remainder)?;\n        let (ttl, remainder) = TimeDiff::from_bytes(remainder)?;\n        let (gas_price, remainder) = u64::from_bytes(remainder)?;\n        let (body_hash, remainder) = Digest::from_bytes(remainder)?;\n        let (dependencies, remainder) = Vec::<DeployHash>::from_bytes(remainder)?;\n        let (chain_name, remainder) = String::from_bytes(remainder)?;\n        let deploy_header = DeployHeader {\n            account,\n            timestamp,\n            ttl,\n            gas_price,\n            body_hash,\n            dependencies,\n            chain_name,\n        };\n        Ok((deploy_header, remainder))\n    }\n}\n\nimpl Display for DeployHeader {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"deploy-header[account: {}, timestamp: {}, ttl: {}, gas_price: {}, body_hash: {}, \\\n            dependencies: [{}], chain_name: {}]\",\n            self.account,\n            self.timestamp,\n            self.ttl,\n            self.gas_price,\n            self.body_hash,\n            DisplayIter::new(self.dependencies.iter()),\n            self.chain_name,\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy/deploy_id.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::Deploy;\nuse super::DeployHash;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    transaction::{ApprovalsHash, TransactionHash, TransactionId},\n};\n\n/// The unique identifier of a [`Deploy`], comprising its [`DeployHash`] and\n/// [`ApprovalsHash`].\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct DeployId {\n    deploy_hash: DeployHash,\n    approvals_hash: ApprovalsHash,\n}\n\nimpl DeployId {\n    /// Returns a new `DeployId`.\n    pub fn new(deploy_hash: DeployHash, approvals_hash: ApprovalsHash) -> Self {\n        DeployId {\n            deploy_hash,\n            approvals_hash,\n        }\n    }\n\n    /// Returns the deploy hash.\n    pub fn deploy_hash(&self) -> &DeployHash {\n        &self.deploy_hash\n    }\n\n    /// Returns the approvals hash.\n    pub fn approvals_hash(&self) -> &ApprovalsHash {\n        &self.approvals_hash\n    }\n\n    /// Consumes `self`, returning a tuple of the constituent parts.\n    pub fn destructure(self) -> (DeployHash, ApprovalsHash) {\n        (self.deploy_hash, self.approvals_hash)\n    }\n\n    /// Returns a random `DeployId`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        DeployId::new(DeployHash::random(rng), ApprovalsHash::random(rng))\n    }\n}\n\nimpl Display for DeployId {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"deploy-id({}, {})\",\n            self.deploy_hash, self.approvals_hash\n        )\n    }\n}\n\nimpl ToBytes for DeployId {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.deploy_hash.write_bytes(writer)?;\n        self.approvals_hash.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.deploy_hash.serialized_length() + self.approvals_hash.serialized_length()\n    }\n}\n\nimpl FromBytes for DeployId {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (deploy_hash, remainder) = DeployHash::from_bytes(bytes)?;\n        let (approvals_hash, remainder) = ApprovalsHash::from_bytes(remainder)?;\n        let id = DeployId::new(deploy_hash, approvals_hash);\n        Ok((id, remainder))\n    }\n}\n\nimpl From<DeployId> for TransactionId {\n    fn from(id: DeployId) -> Self {\n        TransactionId::new(TransactionHash::Deploy(id.deploy_hash), id.approvals_hash)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let id = DeployId::random(rng);\n        bytesrepr::test_serialization_roundtrip(&id);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy/error.rs",
    "content": "use alloc::{boxed::Box, string::String};\nuse core::{\n    array::TryFromSliceError,\n    fmt::{self, Display, Formatter},\n};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n#[cfg(any(feature = \"testing\", test))]\nuse strum::EnumIter;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::Serialize;\n\nuse crate::{crypto, TimeDiff, Timestamp, U512};\n\n/// A representation of the way in which a deploy failed validation checks.\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(feature = \"std\", derive(Serialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\n// This derive should not be removed due to a completeness\n// test that we have in binary-port. It checks if all variants\n// of this error have corresponding binary port error codes\n#[cfg_attr(any(feature = \"testing\", test), derive(EnumIter))]\npub enum InvalidDeploy {\n    /// Invalid chain name.\n    InvalidChainName {\n        /// The expected chain name.\n        expected: String,\n        /// The received chain name.\n        got: String,\n    },\n\n    /// Deploy dependencies are no longer supported.\n    DependenciesNoLongerSupported,\n\n    /// Deploy is too large.\n    ExcessiveSize(ExcessiveSizeError),\n\n    /// Excessive time-to-live.\n    ExcessiveTimeToLive {\n        /// The time-to-live limit.\n        max_ttl: TimeDiff,\n        /// The received time-to-live.\n        got: TimeDiff,\n    },\n\n    /// Deploy's timestamp is in the future.\n    TimestampInFuture {\n        /// The node's timestamp when validating the deploy.\n        validation_timestamp: Timestamp,\n        /// Any configured leeway added to `validation_timestamp`.\n        timestamp_leeway: TimeDiff,\n        /// The deploy's timestamp.\n        got: Timestamp,\n    },\n\n    /// The provided body hash does not match the actual hash of the body.\n    InvalidBodyHash,\n\n    /// The provided deploy hash does not match the actual hash of the deploy.\n    InvalidDeployHash,\n\n    /// The deploy has no approvals.\n    EmptyApprovals,\n\n    /// Invalid approval.\n    InvalidApproval {\n        /// The index of the approval at fault.\n        index: usize,\n        /// The approval verification error.\n        error: crypto::Error,\n    },\n\n    /// Excessive length of deploy's session args.\n    ExcessiveSessionArgsLength {\n        /// The byte size limit of session arguments.\n        max_length: usize,\n        /// The received length of session arguments.\n        got: usize,\n    },\n\n    /// Excessive length of deploy's payment args.\n    ExcessivePaymentArgsLength {\n        /// The byte size limit of payment arguments.\n        max_length: usize,\n        /// The received length of payment arguments.\n        got: usize,\n    },\n\n    /// Missing payment \"amount\" runtime argument.\n    MissingPaymentAmount,\n\n    /// Failed to parse payment \"amount\" runtime argument.\n    FailedToParsePaymentAmount,\n\n    /// The payment amount associated with the deploy exceeds the block gas limit.\n    ExceededBlockGasLimit {\n        /// Configured block gas limit.\n        block_gas_limit: u64,\n        /// The payment amount received.\n        got: Box<U512>,\n    },\n\n    /// Missing payment \"amount\" runtime argument\n    MissingTransferAmount,\n\n    /// Failed to parse transfer \"amount\" runtime argument.\n    FailedToParseTransferAmount,\n\n    /// Insufficient transfer amount.\n    InsufficientTransferAmount {\n        /// The minimum transfer amount.\n        minimum: Box<U512>,\n        /// The attempted transfer amount.\n        attempted: Box<U512>,\n    },\n\n    /// The amount of approvals on the deploy exceeds the max_associated_keys limit.\n    ExcessiveApprovals {\n        /// Number of approvals on the deploy.\n        got: u32,\n        /// The chainspec limit for max_associated_keys.\n        max_associated_keys: u32,\n    },\n\n    /// Unable to calculate gas limit.\n    UnableToCalculateGasLimit,\n\n    /// Unable to calculate gas cost.\n    UnableToCalculateGasCost,\n\n    /// Gas limit is not supported in legacy deploys.\n    GasLimitNotSupported,\n\n    /// Gas price tolerance too low.\n    GasPriceToleranceTooLow {\n        /// The minimum gas price tolerance.\n        min_gas_price_tolerance: u8,\n        /// The provided gas price tolerance.\n        provided_gas_price_tolerance: u8,\n    },\n\n    /// Invalid runtime.\n    InvalidRuntime,\n\n    /// Could not match deploy with transaction lane\n    NoLaneMatch,\n\n    /// The payment amount associated with the deploy exceeds the lane gas limit.\n    ExceededLaneGasLimit {\n        /// Configured lane gas limit.\n        lane_gas_limit: u64,\n        /// The payment amount received.\n        got: Box<U512>,\n    },\n\n    /// Invalid payment amount.\n    InvalidPaymentAmount,\n\n    /// Pricing mode not supported\n    PricingModeNotSupported,\n}\n\nimpl Display for InvalidDeploy {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            InvalidDeploy::InvalidChainName { expected, got } => {\n                                write!(\n                                    formatter,\n                                    \"invalid chain name: expected {}, got {}\",\n                                    expected, got\n                                )\n                            }\n            InvalidDeploy::DependenciesNoLongerSupported => {\n                                write!(formatter, \"dependencies no longer supported\",)\n                            }\n            InvalidDeploy::ExcessiveSize(error) => {\n                                write!(formatter, \"deploy size too large: {}\", error)\n                            }\n            InvalidDeploy::ExcessiveTimeToLive { max_ttl, got } => {\n                                write!(\n                                    formatter,\n                                    \"time-to-live of {} exceeds limit of {}\",\n                                    got, max_ttl\n                                )\n                            }\n            InvalidDeploy::TimestampInFuture {\n                                validation_timestamp,\n                                timestamp_leeway,\n                                got,\n                            } => {\n                                write!(\n                                    formatter,\n                                    \"timestamp of {} is later than node's timestamp of {} plus leeway of {}\",\n                                    got, validation_timestamp, timestamp_leeway\n                                )\n                            }\n            InvalidDeploy::InvalidBodyHash => {\n                                write!(\n                                    formatter,\n                                    \"the provided body hash does not match the actual hash of the body\"\n                                )\n                            }\n            InvalidDeploy::InvalidDeployHash => {\n                                write!(\n                                    formatter,\n                                    \"the provided hash does not match the actual hash of the deploy\"\n                                )\n                            }\n            InvalidDeploy::EmptyApprovals => {\n                                write!(formatter, \"the deploy has no approvals\")\n                            }\n            InvalidDeploy::InvalidApproval { index, error } => {\n                                write!(\n                                    formatter,\n                                    \"the approval at index {} is invalid: {}\",\n                                    index, error\n                                )\n                            }\n            InvalidDeploy::ExcessiveSessionArgsLength { max_length, got } => {\n                                write!(\n                                    formatter,\n                                    \"serialized session code runtime args of {} exceeds limit of {}\",\n                                    got, max_length\n                                )\n                            }\n            InvalidDeploy::ExcessivePaymentArgsLength { max_length, got } => {\n                                write!(\n                                    formatter,\n                                    \"serialized payment code runtime args of {} exceeds limit of {}\",\n                                    got, max_length\n                                )\n                            }\n            InvalidDeploy::MissingPaymentAmount => {\n                                write!(formatter, \"missing payment 'amount' runtime argument\")\n                            }\n            InvalidDeploy::FailedToParsePaymentAmount => {\n                                write!(formatter, \"failed to parse payment 'amount' as U512\")\n                            }\n            InvalidDeploy::ExceededBlockGasLimit {\n                                block_gas_limit,\n                                got,\n                            } => {\n                                write!(\n                                    formatter,\n                                    \"payment amount of {} exceeds the block gas limit of {}\",\n                                    got, block_gas_limit\n                                )\n                            }\n            InvalidDeploy::MissingTransferAmount => {\n                                write!(formatter, \"missing transfer 'amount' runtime argument\")\n                            }\n            InvalidDeploy::FailedToParseTransferAmount => {\n                                write!(formatter, \"failed to parse transfer 'amount' as U512\")\n                            }\n            InvalidDeploy::InsufficientTransferAmount { minimum, attempted } => {\n                                write!(\n                                    formatter,\n                                    \"insufficient transfer amount; minimum: {} attempted: {}\",\n                                    minimum, attempted\n                                )\n                            }\n            InvalidDeploy::ExcessiveApprovals {\n                                got,\n                                max_associated_keys,\n                            } => {\n                                write!(\n                                    formatter,\n                                    \"number of approvals {} exceeds the maximum number of associated keys {}\",\n                                    got, max_associated_keys\n                                )\n                            }\n            InvalidDeploy::UnableToCalculateGasLimit => {\n                                write!(formatter, \"unable to calculate gas limit\",)\n                            }\n            InvalidDeploy::UnableToCalculateGasCost => {\n                                write!(formatter, \"unable to calculate gas cost\",)\n                            }\n            InvalidDeploy::GasLimitNotSupported => {\n                                write!(formatter, \"gas limit is not supported in legacy deploys\",)\n                            }\n            InvalidDeploy::GasPriceToleranceTooLow { min_gas_price_tolerance, provided_gas_price_tolerance } => write!(\n                                formatter,\n                                \"received a deploy with gas price tolerance {} but this chain will only go as low as {}\",\n                                provided_gas_price_tolerance, min_gas_price_tolerance\n                            ),\n            InvalidDeploy::InvalidRuntime => {\n                                write!(formatter, \"invalid runtime\",)\n                            }\n            InvalidDeploy::NoLaneMatch => write!(formatter, \"chainspec didnt have any wasm lanes defined which is required for wasm based deploys\",),\n            InvalidDeploy::ExceededLaneGasLimit {\n                                lane_gas_limit: wasm_lane_gas_limit,\n                                got,\n                            } => {\n                                write!(\n                                    formatter,\n                                    \"payment amount of {} exceeds the largest wasm lane gas limit of {}\",\n                                    got, wasm_lane_gas_limit\n                                )\n                            }\n            InvalidDeploy::InvalidPaymentAmount => write!(formatter, \"invalid payment amount\",),\n            InvalidDeploy::PricingModeNotSupported => write!(formatter, \"pricing mode not supported\",),\n        }\n    }\n}\n\nimpl From<ExcessiveSizeError> for InvalidDeploy {\n    fn from(error: ExcessiveSizeError) -> Self {\n        InvalidDeploy::ExcessiveSize(error)\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for InvalidDeploy {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            InvalidDeploy::InvalidApproval { error, .. } => Some(error),\n            InvalidDeploy::InvalidChainName { .. }\n            | InvalidDeploy::DependenciesNoLongerSupported { .. }\n            | InvalidDeploy::ExcessiveSize(_)\n            | InvalidDeploy::ExcessiveTimeToLive { .. }\n            | InvalidDeploy::TimestampInFuture { .. }\n            | InvalidDeploy::InvalidBodyHash\n            | InvalidDeploy::InvalidDeployHash\n            | InvalidDeploy::EmptyApprovals\n            | InvalidDeploy::ExcessiveSessionArgsLength { .. }\n            | InvalidDeploy::ExcessivePaymentArgsLength { .. }\n            | InvalidDeploy::MissingPaymentAmount\n            | InvalidDeploy::FailedToParsePaymentAmount\n            | InvalidDeploy::ExceededBlockGasLimit { .. }\n            | InvalidDeploy::MissingTransferAmount\n            | InvalidDeploy::FailedToParseTransferAmount\n            | InvalidDeploy::InsufficientTransferAmount { .. }\n            | InvalidDeploy::ExcessiveApprovals { .. }\n            | InvalidDeploy::UnableToCalculateGasLimit\n            | InvalidDeploy::GasLimitNotSupported\n            | InvalidDeploy::UnableToCalculateGasCost\n            | InvalidDeploy::GasPriceToleranceTooLow { .. }\n            | InvalidDeploy::InvalidRuntime\n            | InvalidDeploy::NoLaneMatch\n            | InvalidDeploy::ExceededLaneGasLimit { .. }\n            | InvalidDeploy::InvalidPaymentAmount\n            | InvalidDeploy::PricingModeNotSupported => None,\n        }\n    }\n}\n\n/// Error returned when a Deploy is too large.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Serialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\npub struct ExcessiveSizeError {\n    /// The maximum permitted serialized deploy size, in bytes.\n    pub max_transaction_size: u32,\n    /// The serialized size of the deploy provided, in bytes.\n    pub actual_deploy_size: usize,\n}\n\nimpl Display for ExcessiveSizeError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"deploy size of {} bytes exceeds limit of {}\",\n            self.actual_deploy_size, self.max_transaction_size\n        )\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for ExcessiveSizeError {}\n/// Errors other than validation failures relating to `Deploy`s.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum Error {\n    /// Error while encoding to JSON.\n    EncodeToJson(serde_json::Error),\n\n    /// Error while decoding from JSON.\n    DecodeFromJson(DecodeFromJsonError),\n\n    /// Failed to get \"amount\" from `payment()`'s runtime args.\n    InvalidPayment,\n}\n\nimpl From<serde_json::Error> for Error {\n    fn from(error: serde_json::Error) -> Self {\n        Error::EncodeToJson(error)\n    }\n}\n\nimpl From<DecodeFromJsonError> for Error {\n    fn from(error: DecodeFromJsonError) -> Self {\n        Error::DecodeFromJson(error)\n    }\n}\n\nimpl Display for Error {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Error::EncodeToJson(error) => {\n                write!(formatter, \"encoding to json: {}\", error)\n            }\n            Error::DecodeFromJson(error) => {\n                write!(formatter, \"decoding from json: {}\", error)\n            }\n            Error::InvalidPayment => {\n                write!(formatter, \"invalid payment: missing 'amount' arg\")\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for Error {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            Error::EncodeToJson(error) => Some(error),\n            Error::DecodeFromJson(error) => Some(error),\n            Error::InvalidPayment => None,\n        }\n    }\n}\n\n/// Error while decoding a `Deploy` from JSON.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum DecodeFromJsonError {\n    /// Failed to decode from base 16.\n    FromHex(base16::DecodeError),\n\n    /// Failed to convert slice to array.\n    TryFromSlice(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for DecodeFromJsonError {\n    fn from(error: base16::DecodeError) -> Self {\n        DecodeFromJsonError::FromHex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for DecodeFromJsonError {\n    fn from(error: TryFromSliceError) -> Self {\n        DecodeFromJsonError::TryFromSlice(error)\n    }\n}\n\nimpl Display for DecodeFromJsonError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            DecodeFromJsonError::FromHex(error) => {\n                write!(formatter, \"{}\", error)\n            }\n            DecodeFromJsonError::TryFromSlice(error) => {\n                write!(formatter, \"{}\", error)\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for DecodeFromJsonError {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            DecodeFromJsonError::FromHex(error) => Some(error),\n            DecodeFromJsonError::TryFromSlice(error) => Some(error),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy/executable_deploy_item.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Debug, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Alphanumeric, Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::Deploy;\nuse crate::{\n    addressable_entity::DEFAULT_ENTRY_POINT_NAME,\n    bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contracts::{ContractHash, ContractPackageHash, ContractVersion},\n    package::PackageHash,\n    runtime_args, serde_helpers,\n    system::mint::ARG_AMOUNT,\n    transaction::{RuntimeArgs, TransferTarget},\n    AddressableEntityHash, AddressableEntityIdentifier, Gas, Motes, PackageIdentifier, Phase, URef,\n    METHOD_TRANSFER, U512,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{testing::TestRng, CLValue};\n\nconst TAG_LENGTH: usize = U8_SERIALIZED_LENGTH;\nconst MODULE_BYTES_TAG: u8 = 0;\nconst STORED_CONTRACT_BY_HASH_TAG: u8 = 1;\nconst STORED_CONTRACT_BY_NAME_TAG: u8 = 2;\nconst STORED_VERSIONED_CONTRACT_BY_HASH_TAG: u8 = 3;\nconst STORED_VERSIONED_CONTRACT_BY_NAME_TAG: u8 = 4;\nconst TRANSFER_TAG: u8 = 5;\nconst TRANSFER_ARG_AMOUNT: &str = \"amount\";\nconst TRANSFER_ARG_SOURCE: &str = \"source\";\nconst TRANSFER_ARG_TARGET: &str = \"target\";\nconst TRANSFER_ARG_ID: &str = \"id\";\n\n/// Identifier for an [`ExecutableDeployItem`].\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]\npub enum ExecutableDeployItemIdentifier {\n    /// The deploy item is of the type [`ExecutableDeployItem::ModuleBytes`]\n    Module,\n    /// The deploy item is a variation of a stored contract.\n    AddressableEntity(AddressableEntityIdentifier),\n    /// The deploy item is a variation of a stored contract package.\n    Package(PackageIdentifier),\n    /// The deploy item is a native transfer.\n    Transfer,\n}\n\n/// The executable component of a [`Deploy`].\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum ExecutableDeployItem {\n    /// Executable specified as raw bytes that represent Wasm code and an instance of\n    /// [`RuntimeArgs`].\n    ModuleBytes {\n        /// Raw Wasm module bytes with 'call' exported as an entrypoint.\n        #[cfg_attr(\n            feature = \"json-schema\",\n            schemars(description = \"Hex-encoded raw Wasm bytes.\")\n        )]\n        module_bytes: Bytes,\n        /// Runtime arguments.\n        args: RuntimeArgs,\n    },\n    /// Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of\n    /// [`RuntimeArgs`].\n    StoredContractByHash {\n        /// Contract hash.\n        #[serde(with = \"serde_helpers::contract_hash_as_digest\")]\n        #[cfg_attr(\n            feature = \"json-schema\",\n            schemars(\n                // this attribute is necessary due to a bug: https://github.com/GREsau/schemars/issues/89\n                with = \"ContractHash\",\n                description = \"Hex-encoded contract hash.\"\n            )\n        )]\n        hash: ContractHash,\n        /// Name of an entry point.\n        entry_point: String,\n        /// Runtime arguments.\n        args: RuntimeArgs,\n    },\n    /// Stored contract referenced by a named key existing in the signer's account context, entry\n    /// point and an instance of [`RuntimeArgs`].\n    StoredContractByName {\n        /// Named key.\n        name: String,\n        /// Name of an entry point.\n        entry_point: String,\n        /// Runtime arguments.\n        args: RuntimeArgs,\n    },\n    /// Stored versioned contract referenced by its [`PackageHash`], entry point and an\n    /// instance of [`RuntimeArgs`].\n    StoredVersionedContractByHash {\n        /// Contract package hash\n        #[serde(with = \"serde_helpers::contract_package_hash_as_digest\")]\n        #[cfg_attr(\n            feature = \"json-schema\",\n            schemars(\n                // this attribute is necessary due to a bug: https://github.com/GREsau/schemars/issues/89\n                with = \"ContractPackageHash\",\n                description = \"Hex-encoded contract package hash.\"\n            )\n        )]\n        hash: ContractPackageHash,\n        /// An optional version of the contract to call. It will default to the highest enabled\n        /// version if no value is specified.\n        version: Option<ContractVersion>,\n        /// Entry point name.\n        entry_point: String,\n        /// Runtime arguments.\n        args: RuntimeArgs,\n    },\n    /// Stored versioned contract referenced by a named key existing in the signer's account\n    /// context, entry point and an instance of [`RuntimeArgs`].\n    StoredVersionedContractByName {\n        /// Named key.\n        name: String,\n        /// An optional version of the contract to call. It will default to the highest enabled\n        /// version if no value is specified.\n        version: Option<ContractVersion>,\n        /// Entry point name.\n        entry_point: String,\n        /// Runtime arguments.\n        args: RuntimeArgs,\n    },\n    /// A native transfer which does not contain or reference a Wasm code.\n    Transfer {\n        /// Runtime arguments.\n        args: RuntimeArgs,\n    },\n}\n\nimpl ExecutableDeployItem {\n    /// Returns a new `ExecutableDeployItem::ModuleBytes`.\n    pub fn new_module_bytes(module_bytes: Bytes, args: RuntimeArgs) -> Self {\n        ExecutableDeployItem::ModuleBytes { module_bytes, args }\n    }\n\n    /// Returns a new `ExecutableDeployItem::ModuleBytes` suitable for use as standard payment code\n    /// of a `Deploy`.\n    pub fn new_standard_payment<A: Into<U512>>(amount: A) -> Self {\n        ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                ARG_AMOUNT => amount.into(),\n            },\n        }\n    }\n\n    /// Returns a new `ExecutableDeployItem::StoredContractByHash`.\n    pub fn new_stored_contract_by_hash(\n        hash: ContractHash,\n        entry_point: String,\n        args: RuntimeArgs,\n    ) -> Self {\n        ExecutableDeployItem::StoredContractByHash {\n            hash,\n            entry_point,\n            args,\n        }\n    }\n\n    /// Returns a new `ExecutableDeployItem::StoredContractByName`.\n    pub fn new_stored_contract_by_name(\n        name: String,\n        entry_point: String,\n        args: RuntimeArgs,\n    ) -> Self {\n        ExecutableDeployItem::StoredContractByName {\n            name,\n            entry_point,\n            args,\n        }\n    }\n\n    /// Returns a new `ExecutableDeployItem::StoredVersionedContractByHash`.\n    pub fn new_stored_versioned_contract_by_hash(\n        hash: ContractPackageHash,\n        version: Option<ContractVersion>,\n        entry_point: String,\n        args: RuntimeArgs,\n    ) -> Self {\n        ExecutableDeployItem::StoredVersionedContractByHash {\n            hash,\n            version,\n            entry_point,\n            args,\n        }\n    }\n\n    /// Returns a new `ExecutableDeployItem::StoredVersionedKeyContractByName`.\n    pub fn new_stored_versioned_contract_by_name(\n        name: String,\n        version: Option<ContractVersion>,\n        entry_point: String,\n        args: RuntimeArgs,\n    ) -> Self {\n        ExecutableDeployItem::StoredVersionedContractByName {\n            name,\n            version,\n            entry_point,\n            args,\n        }\n    }\n\n    /// Returns a new `ExecutableDeployItem` suitable for use as session code for a transfer.\n    ///\n    /// If `maybe_source` is None, the account's main purse is used as the source.\n    pub fn new_transfer<A: Into<U512>, T: Into<TransferTarget>>(\n        amount: A,\n        maybe_source: Option<URef>,\n        target: T,\n        maybe_transfer_id: Option<u64>,\n    ) -> Self {\n        let mut args = RuntimeArgs::new();\n        args.insert(TRANSFER_ARG_AMOUNT, amount.into())\n            .expect(\"should serialize amount arg\");\n\n        if let Some(source) = maybe_source {\n            args.insert(TRANSFER_ARG_SOURCE, source)\n                .expect(\"should serialize source arg\");\n        }\n\n        match target.into() {\n            TransferTarget::PublicKey(public_key) => args\n                .insert(TRANSFER_ARG_TARGET, public_key)\n                .expect(\"should serialize public key target arg\"),\n            TransferTarget::AccountHash(account_hash) => args\n                .insert(TRANSFER_ARG_TARGET, account_hash)\n                .expect(\"should serialize account hash target arg\"),\n            TransferTarget::URef(uref) => args\n                .insert(TRANSFER_ARG_TARGET, uref)\n                .expect(\"should serialize uref target arg\"),\n        }\n\n        args.insert(TRANSFER_ARG_ID, maybe_transfer_id)\n            .expect(\"should serialize transfer id arg\");\n\n        ExecutableDeployItem::Transfer { args }\n    }\n\n    /// Returns the entry point name.\n    pub fn entry_point_name(&self) -> &str {\n        match self {\n            ExecutableDeployItem::ModuleBytes { .. } => DEFAULT_ENTRY_POINT_NAME,\n            ExecutableDeployItem::Transfer { .. } => METHOD_TRANSFER,\n            ExecutableDeployItem::StoredVersionedContractByName { entry_point, .. }\n            | ExecutableDeployItem::StoredVersionedContractByHash { entry_point, .. }\n            | ExecutableDeployItem::StoredContractByHash { entry_point, .. }\n            | ExecutableDeployItem::StoredContractByName { entry_point, .. } => entry_point,\n        }\n    }\n\n    /// Returns the identifier of the `ExecutableDeployItem`.\n    pub fn identifier(&self) -> ExecutableDeployItemIdentifier {\n        match self {\n            ExecutableDeployItem::ModuleBytes { .. } => ExecutableDeployItemIdentifier::Module,\n            ExecutableDeployItem::StoredContractByHash { hash, .. } => {\n                ExecutableDeployItemIdentifier::AddressableEntity(\n                    AddressableEntityIdentifier::Hash(AddressableEntityHash::new(hash.value())),\n                )\n            }\n            ExecutableDeployItem::StoredContractByName { name, .. } => {\n                ExecutableDeployItemIdentifier::AddressableEntity(\n                    AddressableEntityIdentifier::Name(name.clone()),\n                )\n            }\n            ExecutableDeployItem::StoredVersionedContractByHash { hash, version, .. } => {\n                ExecutableDeployItemIdentifier::Package(PackageIdentifier::Hash {\n                    package_hash: PackageHash::new(hash.value()),\n                    version: *version,\n                })\n            }\n            ExecutableDeployItem::StoredVersionedContractByName { name, version, .. } => {\n                ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name {\n                    name: name.clone(),\n                    version: *version,\n                })\n            }\n            ExecutableDeployItem::Transfer { .. } => ExecutableDeployItemIdentifier::Transfer,\n        }\n    }\n\n    /// Returns the identifier of the contract in the deploy item, if present.\n    pub fn contract_identifier(&self) -> Option<AddressableEntityIdentifier> {\n        match self {\n            ExecutableDeployItem::ModuleBytes { .. }\n            | ExecutableDeployItem::StoredVersionedContractByHash { .. }\n            | ExecutableDeployItem::StoredVersionedContractByName { .. }\n            | ExecutableDeployItem::Transfer { .. } => None,\n            ExecutableDeployItem::StoredContractByHash { hash, .. } => Some(\n                AddressableEntityIdentifier::Hash(AddressableEntityHash::new(hash.value())),\n            ),\n            ExecutableDeployItem::StoredContractByName { name, .. } => {\n                Some(AddressableEntityIdentifier::Name(name.clone()))\n            }\n        }\n    }\n\n    /// Returns the identifier of the contract package in the deploy item, if present.\n    pub fn contract_package_identifier(&self) -> Option<PackageIdentifier> {\n        match self {\n            ExecutableDeployItem::ModuleBytes { .. }\n            | ExecutableDeployItem::StoredContractByHash { .. }\n            | ExecutableDeployItem::StoredContractByName { .. }\n            | ExecutableDeployItem::Transfer { .. } => None,\n\n            ExecutableDeployItem::StoredVersionedContractByHash { hash, version, .. } => {\n                Some(PackageIdentifier::HashWithMajorVersion {\n                    package_hash: PackageHash::new(hash.value()),\n                    version: *version,\n                    protocol_version_major: None,\n                })\n            }\n            ExecutableDeployItem::StoredVersionedContractByName { name, version, .. } => {\n                Some(PackageIdentifier::NameWithMajorVersion {\n                    name: name.clone(),\n                    version: *version,\n                    protocol_version_major: None,\n                })\n            }\n        }\n    }\n\n    /// Returns the runtime arguments.\n    pub fn args(&self) -> &RuntimeArgs {\n        match self {\n            ExecutableDeployItem::ModuleBytes { args, .. }\n            | ExecutableDeployItem::StoredContractByHash { args, .. }\n            | ExecutableDeployItem::StoredContractByName { args, .. }\n            | ExecutableDeployItem::StoredVersionedContractByHash { args, .. }\n            | ExecutableDeployItem::StoredVersionedContractByName { args, .. }\n            | ExecutableDeployItem::Transfer { args } => args,\n        }\n    }\n\n    /// Returns the payment amount from args (if any) as Gas.\n    pub fn payment_amount(&self, conv_rate: u8) -> Option<Gas> {\n        let cl_value = self.args().get(ARG_AMOUNT)?;\n        let motes = cl_value.clone().into_t::<U512>().ok()?;\n        Gas::from_motes(Motes::new(motes), conv_rate)\n    }\n\n    /// Returns `true` if this deploy item is a native transfer.\n    pub fn is_transfer(&self) -> bool {\n        matches!(self, ExecutableDeployItem::Transfer { .. })\n    }\n\n    /// Returns `true` if this deploy item is a standard payment.\n    pub fn is_standard_payment(&self, phase: Phase) -> bool {\n        if phase != Phase::Payment {\n            return false;\n        }\n\n        if let ExecutableDeployItem::ModuleBytes { module_bytes, .. } = self {\n            return module_bytes.is_empty();\n        }\n\n        false\n    }\n\n    /// Returns `true` if the deploy item is a contract identified by its name.\n    pub fn is_by_name(&self) -> bool {\n        matches!(\n            self,\n            ExecutableDeployItem::StoredVersionedContractByName { .. }\n        ) || matches!(self, ExecutableDeployItem::StoredContractByName { .. })\n    }\n\n    /// Returns the name of the contract or contract package, if the deploy item is identified by\n    /// name.\n    pub fn by_name(&self) -> Option<String> {\n        match self {\n            ExecutableDeployItem::StoredContractByName { name, .. }\n            | ExecutableDeployItem::StoredVersionedContractByName { name, .. } => {\n                Some(name.clone())\n            }\n            ExecutableDeployItem::ModuleBytes { .. }\n            | ExecutableDeployItem::StoredContractByHash { .. }\n            | ExecutableDeployItem::StoredVersionedContractByHash { .. }\n            | ExecutableDeployItem::Transfer { .. } => None,\n        }\n    }\n\n    /// Returns `true` if the deploy item is a stored contract.\n    pub fn is_stored_contract(&self) -> bool {\n        matches!(self, ExecutableDeployItem::StoredContractByHash { .. })\n            || matches!(self, ExecutableDeployItem::StoredContractByName { .. })\n    }\n\n    /// Returns `true` if the deploy item is a stored contract package.\n    pub fn is_stored_contract_package(&self) -> bool {\n        matches!(\n            self,\n            ExecutableDeployItem::StoredVersionedContractByHash { .. }\n        ) || matches!(\n            self,\n            ExecutableDeployItem::StoredVersionedContractByName { .. }\n        )\n    }\n\n    /// Returns `true` if the deploy item is [`ModuleBytes`].\n    ///\n    /// [`ModuleBytes`]: ExecutableDeployItem::ModuleBytes\n    pub fn is_module_bytes(&self) -> bool {\n        matches!(self, Self::ModuleBytes { .. })\n    }\n\n    /// Returns a random `ExecutableDeployItem`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        rng.gen()\n    }\n}\n\nimpl ToBytes for ExecutableDeployItem {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            ExecutableDeployItem::ModuleBytes { module_bytes, args } => {\n                writer.push(MODULE_BYTES_TAG);\n                module_bytes.write_bytes(writer)?;\n                args.write_bytes(writer)\n            }\n            ExecutableDeployItem::StoredContractByHash {\n                hash,\n                entry_point,\n                args,\n            } => {\n                writer.push(STORED_CONTRACT_BY_HASH_TAG);\n                hash.write_bytes(writer)?;\n                entry_point.write_bytes(writer)?;\n                args.write_bytes(writer)\n            }\n            ExecutableDeployItem::StoredContractByName {\n                name,\n                entry_point,\n                args,\n            } => {\n                writer.push(STORED_CONTRACT_BY_NAME_TAG);\n                name.write_bytes(writer)?;\n                entry_point.write_bytes(writer)?;\n                args.write_bytes(writer)\n            }\n            ExecutableDeployItem::StoredVersionedContractByHash {\n                hash,\n                version,\n                entry_point,\n                args,\n            } => {\n                writer.push(STORED_VERSIONED_CONTRACT_BY_HASH_TAG);\n                hash.write_bytes(writer)?;\n                version.write_bytes(writer)?;\n                entry_point.write_bytes(writer)?;\n                args.write_bytes(writer)\n            }\n            ExecutableDeployItem::StoredVersionedContractByName {\n                name,\n                version,\n                entry_point,\n                args,\n            } => {\n                writer.push(STORED_VERSIONED_CONTRACT_BY_NAME_TAG);\n                name.write_bytes(writer)?;\n                version.write_bytes(writer)?;\n                entry_point.write_bytes(writer)?;\n                args.write_bytes(writer)\n            }\n            ExecutableDeployItem::Transfer { args } => {\n                writer.push(TRANSFER_TAG);\n                args.write_bytes(writer)\n            }\n        }\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        TAG_LENGTH\n            + match self {\n                ExecutableDeployItem::ModuleBytes { module_bytes, args } => {\n                    module_bytes.serialized_length() + args.serialized_length()\n                }\n                ExecutableDeployItem::StoredContractByHash {\n                    hash,\n                    entry_point,\n                    args,\n                } => {\n                    hash.serialized_length()\n                        + entry_point.serialized_length()\n                        + args.serialized_length()\n                }\n                ExecutableDeployItem::StoredContractByName {\n                    name,\n                    entry_point,\n                    args,\n                } => {\n                    name.serialized_length()\n                        + entry_point.serialized_length()\n                        + args.serialized_length()\n                }\n                ExecutableDeployItem::StoredVersionedContractByHash {\n                    hash,\n                    version,\n                    entry_point,\n                    args,\n                } => {\n                    hash.serialized_length()\n                        + version.serialized_length()\n                        + entry_point.serialized_length()\n                        + args.serialized_length()\n                }\n                ExecutableDeployItem::StoredVersionedContractByName {\n                    name,\n                    version,\n                    entry_point,\n                    args,\n                } => {\n                    name.serialized_length()\n                        + version.serialized_length()\n                        + entry_point.serialized_length()\n                        + args.serialized_length()\n                }\n                ExecutableDeployItem::Transfer { args } => args.serialized_length(),\n            }\n    }\n}\n\nimpl FromBytes for ExecutableDeployItem {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            MODULE_BYTES_TAG => {\n                let (module_bytes, remainder) = Bytes::from_bytes(remainder)?;\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((\n                    ExecutableDeployItem::ModuleBytes { module_bytes, args },\n                    remainder,\n                ))\n            }\n            STORED_CONTRACT_BY_HASH_TAG => {\n                let (hash, remainder) = ContractHash::from_bytes(remainder)?;\n                let (entry_point, remainder) = String::from_bytes(remainder)?;\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((\n                    ExecutableDeployItem::StoredContractByHash {\n                        hash,\n                        entry_point,\n                        args,\n                    },\n                    remainder,\n                ))\n            }\n            STORED_CONTRACT_BY_NAME_TAG => {\n                let (name, remainder) = String::from_bytes(remainder)?;\n                let (entry_point, remainder) = String::from_bytes(remainder)?;\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((\n                    ExecutableDeployItem::StoredContractByName {\n                        name,\n                        entry_point,\n                        args,\n                    },\n                    remainder,\n                ))\n            }\n            STORED_VERSIONED_CONTRACT_BY_HASH_TAG => {\n                let (hash, remainder) = ContractPackageHash::from_bytes(remainder)?;\n                let (version, remainder) = Option::<ContractVersion>::from_bytes(remainder)?;\n                let (entry_point, remainder) = String::from_bytes(remainder)?;\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((\n                    ExecutableDeployItem::StoredVersionedContractByHash {\n                        hash,\n                        version,\n                        entry_point,\n                        args,\n                    },\n                    remainder,\n                ))\n            }\n            STORED_VERSIONED_CONTRACT_BY_NAME_TAG => {\n                let (name, remainder) = String::from_bytes(remainder)?;\n                let (version, remainder) = Option::<ContractVersion>::from_bytes(remainder)?;\n                let (entry_point, remainder) = String::from_bytes(remainder)?;\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((\n                    ExecutableDeployItem::StoredVersionedContractByName {\n                        name,\n                        version,\n                        entry_point,\n                        args,\n                    },\n                    remainder,\n                ))\n            }\n            TRANSFER_TAG => {\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((ExecutableDeployItem::Transfer { args }, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Display for ExecutableDeployItem {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ExecutableDeployItem::ModuleBytes { module_bytes, .. } => {\n                write!(f, \"module-bytes [{} bytes]\", module_bytes.len())\n            }\n            ExecutableDeployItem::StoredContractByHash {\n                hash, entry_point, ..\n            } => write!(\n                f,\n                \"stored-contract-by-hash: {:10}, entry-point: {}\",\n                HexFmt(hash),\n                entry_point,\n            ),\n            ExecutableDeployItem::StoredContractByName {\n                name, entry_point, ..\n            } => write!(\n                f,\n                \"stored-contract-by-name: {}, entry-point: {}\",\n                name, entry_point,\n            ),\n            ExecutableDeployItem::StoredVersionedContractByHash {\n                hash,\n                version: Some(ver),\n                entry_point,\n                ..\n            } => write!(\n                f,\n                \"stored-versioned-contract-by-hash: {:10}, version: {}, entry-point: {}\",\n                HexFmt(hash),\n                ver,\n                entry_point,\n            ),\n            ExecutableDeployItem::StoredVersionedContractByHash {\n                hash, entry_point, ..\n            } => write!(\n                f,\n                \"stored-versioned-contract-by-hash: {:10}, version: latest, entry-point: {}\",\n                HexFmt(hash),\n                entry_point,\n            ),\n            ExecutableDeployItem::StoredVersionedContractByName {\n                name,\n                version: Some(ver),\n                entry_point,\n                ..\n            } => write!(\n                f,\n                \"stored-versioned-contract: {}, version: {}, entry-point: {}\",\n                name, ver, entry_point,\n            ),\n            ExecutableDeployItem::StoredVersionedContractByName {\n                name, entry_point, ..\n            } => write!(\n                f,\n                \"stored-versioned-contract: {}, version: latest, entry-point: {}\",\n                name, entry_point,\n            ),\n            ExecutableDeployItem::Transfer { .. } => write!(f, \"transfer\"),\n        }\n    }\n}\n\nimpl Debug for ExecutableDeployItem {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            ExecutableDeployItem::ModuleBytes { module_bytes, args } => f\n                .debug_struct(\"ModuleBytes\")\n                .field(\"module_bytes\", &format!(\"[{} bytes]\", module_bytes.len()))\n                .field(\"args\", args)\n                .finish(),\n            ExecutableDeployItem::StoredContractByHash {\n                hash,\n                entry_point,\n                args,\n            } => f\n                .debug_struct(\"StoredContractByHash\")\n                .field(\"hash\", &base16::encode_lower(hash))\n                .field(\"entry_point\", &entry_point)\n                .field(\"args\", args)\n                .finish(),\n            ExecutableDeployItem::StoredContractByName {\n                name,\n                entry_point,\n                args,\n            } => f\n                .debug_struct(\"StoredContractByName\")\n                .field(\"name\", &name)\n                .field(\"entry_point\", &entry_point)\n                .field(\"args\", args)\n                .finish(),\n            ExecutableDeployItem::StoredVersionedContractByHash {\n                hash,\n                version,\n                entry_point,\n                args,\n            } => f\n                .debug_struct(\"StoredVersionedContractByHash\")\n                .field(\"hash\", &base16::encode_lower(hash))\n                .field(\"version\", version)\n                .field(\"entry_point\", &entry_point)\n                .field(\"args\", args)\n                .finish(),\n            ExecutableDeployItem::StoredVersionedContractByName {\n                name,\n                version,\n                entry_point,\n                args,\n            } => f\n                .debug_struct(\"StoredVersionedContractByName\")\n                .field(\"name\", &name)\n                .field(\"version\", version)\n                .field(\"entry_point\", &entry_point)\n                .field(\"args\", args)\n                .finish(),\n            ExecutableDeployItem::Transfer { args } => {\n                f.debug_struct(\"Transfer\").field(\"args\", args).finish()\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<ExecutableDeployItem> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ExecutableDeployItem {\n        fn random_bytes<R: Rng + ?Sized>(rng: &mut R) -> Vec<u8> {\n            let mut bytes = vec![0u8; rng.gen_range(0..100)];\n            rng.fill_bytes(bytes.as_mut());\n            bytes\n        }\n\n        fn random_string<R: Rng + ?Sized>(rng: &mut R) -> String {\n            rng.sample_iter(&Alphanumeric)\n                .take(20)\n                .map(char::from)\n                .collect()\n        }\n\n        let mut args = RuntimeArgs::new();\n        let _ = args.insert(random_string(rng), Bytes::from(random_bytes(rng)));\n\n        match rng.gen_range(0..5) {\n            0 => ExecutableDeployItem::ModuleBytes {\n                module_bytes: random_bytes(rng).into(),\n                args,\n            },\n            1 => ExecutableDeployItem::StoredContractByHash {\n                hash: ContractHash::new(rng.gen()),\n                entry_point: random_string(rng),\n                args,\n            },\n            2 => ExecutableDeployItem::StoredContractByName {\n                name: random_string(rng),\n                entry_point: random_string(rng),\n                args,\n            },\n            3 => ExecutableDeployItem::StoredVersionedContractByHash {\n                hash: ContractPackageHash::new(rng.gen()),\n                version: rng.gen(),\n                entry_point: random_string(rng),\n                args,\n            },\n            4 => ExecutableDeployItem::StoredVersionedContractByName {\n                name: random_string(rng),\n                version: rng.gen(),\n                entry_point: random_string(rng),\n                args,\n            },\n            5 => {\n                let amount = rng.gen_range(2_500_000_000_u64..1_000_000_000_000_000);\n                let mut transfer_args = RuntimeArgs::new();\n                transfer_args.insert_cl_value(\n                    ARG_AMOUNT,\n                    CLValue::from_t(U512::from(amount)).expect(\"should get CLValue from U512\"),\n                );\n                ExecutableDeployItem::Transfer {\n                    args: transfer_args,\n                }\n            }\n            _ => unreachable!(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn serialization_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            let executable_deploy_item = ExecutableDeployItem::random(rng);\n            bytesrepr::test_serialization_roundtrip(&executable_deploy_item);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/deploy.rs",
    "content": "pub mod deploy_category;\nmod deploy_hash;\nmod deploy_header;\nmod deploy_id;\nmod error;\nmod executable_deploy_item;\n\nuse alloc::{collections::BTreeSet, vec::Vec};\nuse core::{\n    cmp,\n    fmt::{self, Debug, Display, Formatter},\n    hash,\n};\n\n#[cfg(any(feature = \"std\", test))]\nuse std::convert::TryFrom;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"std\", test))]\nuse itertools::Itertools;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse tracing::{debug, warn};\n\n#[cfg(any(feature = \"std\", test))]\nuse super::{get_lane_for_non_install_wasm, InitiatorAddr, InitiatorAddrAndSecretKey, PricingMode};\n#[cfg(any(\n    all(feature = \"std\", feature = \"testing\"),\n    feature = \"json-schema\",\n    test\n))]\nuse crate::runtime_args;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::{\n    bytesrepr::Bytes,\n    system::auction::{\n        ARG_AMOUNT as ARG_AUCTION_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_NEW_VALIDATOR,\n        ARG_PUBLIC_KEY as ARG_AUCTION_PUBLIC_KEY, ARG_VALIDATOR, METHOD_ADD_BID, METHOD_DELEGATE,\n        METHOD_REDELEGATE, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID,\n    },\n    testing::TestRng,\n    transaction::RuntimeArgs,\n    AddressableEntityHash, URef, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MIN_TRANSFER_MOTES,\n};\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    crypto,\n    transaction::{Approval, ApprovalsHash},\n    Digest, DisplayIter, PublicKey, SecretKey, TimeDiff, Timestamp,\n};\n\n#[cfg(any(feature = \"std\", test))]\nuse crate::{chainspec::PricingHandling, Chainspec, Phase, TransactionV1Config, MINT_LANE_ID};\n#[cfg(any(feature = \"std\", test))]\nuse crate::{system::auction::ARG_AMOUNT, transaction::GasLimited, Gas, Motes, U512};\npub use deploy_hash::DeployHash;\npub use deploy_header::DeployHeader;\npub use deploy_id::DeployId;\npub use error::{\n    DecodeFromJsonError as DeployDecodeFromJsonError, Error as DeployError,\n    ExcessiveSizeError as DeployExcessiveSizeError, InvalidDeploy,\n};\npub use executable_deploy_item::{ExecutableDeployItem, ExecutableDeployItemIdentifier};\n\n#[cfg(feature = \"json-schema\")]\nstatic DEPLOY: Lazy<Deploy> = Lazy::new(|| {\n    let payment_args = runtime_args! {\n        \"amount\" => 1000\n    };\n    let payment = ExecutableDeployItem::StoredContractByName {\n        name: String::from(\"casper-example\"),\n        entry_point: String::from(\"example-entry-point\"),\n        args: payment_args,\n    };\n    let session_args = runtime_args! {\n        \"amount\" => 1000\n    };\n    let session = ExecutableDeployItem::Transfer { args: session_args };\n    let serialized_body = serialize_body(&payment, &session);\n    let body_hash = Digest::hash(serialized_body);\n\n    let secret_key = SecretKey::example();\n    let timestamp = *Timestamp::example();\n    let header = DeployHeader::new(\n        PublicKey::from(secret_key),\n        timestamp,\n        TimeDiff::from_seconds(3_600),\n        1,\n        body_hash,\n        vec![DeployHash::new(Digest::from([1u8; Digest::LENGTH]))],\n        String::from(\"casper-example\"),\n    );\n    let serialized_header = serialize_header(&header);\n    let hash = DeployHash::new(Digest::hash(serialized_header));\n\n    let mut approvals = BTreeSet::new();\n    let approval = Approval::create(&hash.into(), secret_key);\n    approvals.insert(approval);\n\n    Deploy {\n        hash,\n        header,\n        payment,\n        session,\n        approvals,\n        is_valid: OnceCell::new(),\n    }\n});\n\n/// A signed smart contract.\n#[derive(Clone, Eq, Debug)]\n#[cfg_attr(\n    any(feature = \"std\", test),\n    derive(Serialize, Deserialize),\n    serde(deny_unknown_fields)\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"A signed smart contract.\")\n)]\npub struct Deploy {\n    hash: DeployHash,\n    header: DeployHeader,\n    payment: ExecutableDeployItem,\n    session: ExecutableDeployItem,\n    approvals: BTreeSet<Approval>,\n    #[cfg_attr(any(all(feature = \"std\", feature = \"once_cell\"), test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    is_valid: OnceCell<Result<(), InvalidDeploy>>,\n}\n\nimpl Deploy {\n    /// Constructs a new `Deploy`.\n    pub fn new(\n        hash: DeployHash,\n        header: DeployHeader,\n        payment: ExecutableDeployItem,\n        session: ExecutableDeployItem,\n    ) -> Deploy {\n        Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals: BTreeSet::new(),\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_valid: OnceCell::new(),\n        }\n    }\n    /// Constructs a new signed `Deploy`.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    #[allow(clippy::too_many_arguments)]\n    pub fn new_signed(\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        gas_price: u64,\n        dependencies: Vec<DeployHash>,\n        chain_name: String,\n        payment: ExecutableDeployItem,\n        session: ExecutableDeployItem,\n        secret_key: &SecretKey,\n        account: Option<PublicKey>,\n    ) -> Deploy {\n        let account_and_secret_key = match account {\n            Some(account) => InitiatorAddrAndSecretKey::Both {\n                initiator_addr: InitiatorAddr::PublicKey(account),\n                secret_key,\n            },\n            None => InitiatorAddrAndSecretKey::SecretKey(secret_key),\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            gas_price,\n            dependencies,\n            chain_name,\n            payment,\n            session,\n            account_and_secret_key,\n        )\n    }\n\n    #[cfg(any(feature = \"std\", test))]\n    #[allow(clippy::too_many_arguments)]\n    fn build(\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        gas_price: u64,\n        dependencies: Vec<DeployHash>,\n        chain_name: String,\n        payment: ExecutableDeployItem,\n        session: ExecutableDeployItem,\n        initiator_addr_and_secret_key: InitiatorAddrAndSecretKey,\n    ) -> Deploy {\n        let serialized_body = serialize_body(&payment, &session);\n        let body_hash = Digest::hash(serialized_body);\n\n        let account = match initiator_addr_and_secret_key.initiator_addr() {\n            InitiatorAddr::PublicKey(public_key) => public_key,\n            InitiatorAddr::AccountHash(_) => unreachable!(),\n        };\n\n        let dependencies = dependencies.into_iter().unique().collect();\n        let header = DeployHeader::new(\n            account,\n            timestamp,\n            ttl,\n            gas_price,\n            body_hash,\n            dependencies,\n            chain_name,\n        );\n        let serialized_header = serialize_header(&header);\n        let hash = DeployHash::new(Digest::hash(serialized_header));\n\n        let mut deploy = Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals: BTreeSet::new(),\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_valid: OnceCell::new(),\n        };\n\n        if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() {\n            deploy.sign(secret_key);\n        }\n        deploy\n    }\n\n    /// Returns the `DeployHash` identifying this `Deploy`.\n    pub fn hash(&self) -> &DeployHash {\n        &self.hash\n    }\n\n    /// Returns the public key of the account providing the context in which to run the `Deploy`.\n    pub fn account(&self) -> &PublicKey {\n        self.header.account()\n    }\n\n    /// Returns the creation timestamp of the `Deploy`.\n    pub fn timestamp(&self) -> Timestamp {\n        self.header.timestamp()\n    }\n\n    /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid.\n    ///\n    /// After this duration has ended, the `Deploy` will be considered expired.\n    pub fn ttl(&self) -> TimeDiff {\n        self.header.ttl()\n    }\n\n    /// Returns `true` if the `Deploy` has expired.\n    pub fn expired(&self, current_instant: Timestamp) -> bool {\n        self.header.expired(current_instant)\n    }\n\n    /// Returns the sender's gas price tolerance for block inclusion.\n    pub fn gas_price(&self) -> u64 {\n        self.header.gas_price()\n    }\n\n    /// Returns the hash of the body (i.e. the Wasm code) of the `Deploy`.\n    pub fn body_hash(&self) -> &Digest {\n        self.header.body_hash()\n    }\n\n    /// Returns the name of the chain the `Deploy` should be executed on.\n    pub fn chain_name(&self) -> &str {\n        self.header.chain_name()\n    }\n\n    /// Returns a reference to the `DeployHeader` of this `Deploy`.\n    pub fn header(&self) -> &DeployHeader {\n        &self.header\n    }\n\n    /// Consumes `self`, returning the `DeployHeader` of this `Deploy`.\n    pub fn take_header(self) -> DeployHeader {\n        self.header\n    }\n\n    /// Returns the `ExecutableDeployItem` for payment code.\n    pub fn payment(&self) -> &ExecutableDeployItem {\n        &self.payment\n    }\n\n    /// Returns the `ExecutableDeployItem` for session code.\n    pub fn session(&self) -> &ExecutableDeployItem {\n        &self.session\n    }\n\n    /// Returns the `Approval`s for this deploy.\n    pub fn approvals(&self) -> &BTreeSet<Approval> {\n        &self.approvals\n    }\n\n    /// Consumes `self`, returning a tuple of its constituent parts.\n    pub fn destructure(\n        self,\n    ) -> (\n        DeployHash,\n        DeployHeader,\n        ExecutableDeployItem,\n        ExecutableDeployItem,\n        BTreeSet<Approval>,\n    ) {\n        (\n            self.hash,\n            self.header,\n            self.payment,\n            self.session,\n            self.approvals,\n        )\n    }\n\n    /// Adds a signature of this `Deploy`'s hash to its approvals.\n    pub fn sign(&mut self, secret_key: &SecretKey) {\n        let approval = Approval::create(&self.hash.into(), secret_key);\n        self.approvals.insert(approval);\n    }\n\n    /// Returns the `ApprovalsHash` of this `Deploy`'s approvals.\n    pub fn compute_approvals_hash(&self) -> Result<ApprovalsHash, bytesrepr::Error> {\n        ApprovalsHash::compute(&self.approvals)\n    }\n\n    /// Returns `true` if the serialized size of the deploy is not greater than\n    /// `max_transaction_size`.\n    #[cfg(any(feature = \"std\", test))]\n    pub fn is_valid_size(&self, max_transaction_size: u32) -> Result<(), DeployExcessiveSizeError> {\n        let deploy_size = self.serialized_length();\n        if deploy_size > max_transaction_size as usize {\n            return Err(DeployExcessiveSizeError {\n                max_transaction_size,\n                actual_deploy_size: deploy_size,\n            });\n        }\n        Ok(())\n    }\n\n    /// Returns `Ok` if and only if this `Deploy`'s body hashes to the value of `body_hash()`, and\n    /// if this `Deploy`'s header hashes to the value claimed as the deploy hash.\n    pub fn has_valid_hash(&self) -> Result<(), InvalidDeploy> {\n        let serialized_body = serialize_body(&self.payment, &self.session);\n        let body_hash = Digest::hash(serialized_body);\n        if body_hash != *self.header.body_hash() {\n            #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n            warn!(?self, ?body_hash, \"invalid deploy body hash\");\n            return Err(InvalidDeploy::InvalidBodyHash);\n        }\n\n        let serialized_header = serialize_header(&self.header);\n        let hash = DeployHash::new(Digest::hash(serialized_header));\n        if hash != self.hash {\n            #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n            warn!(?self, ?hash, \"invalid deploy hash\");\n            return Err(InvalidDeploy::InvalidDeployHash);\n        }\n        Ok(())\n    }\n\n    /// Returns `Ok` if and only if:\n    ///   * the deploy hash is correct (should be the hash of the header), and\n    ///   * the body hash is correct (should be the hash of the body), and\n    ///   * approvals are non empty, and\n    ///   * all approvals are valid signatures of the deploy hash\n    pub fn is_valid(&self) -> Result<(), InvalidDeploy> {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return self.is_valid.get_or_init(|| validate_deploy(self)).clone();\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        validate_deploy(self)\n    }\n\n    /// Returns `true` if this deploy is a native transfer.\n    pub fn is_transfer(&self) -> bool {\n        self.session.is_transfer()\n    }\n\n    /// Should this transaction start in the initiating accounts context?\n    pub fn is_account_session(&self) -> bool {\n        // legacy deploys are always initiated by an account\n        true\n    }\n\n    /// Returns `Ok` if and only if:\n    ///   * the chain_name is correct,\n    ///   * the configured parameters are complied with at the given timestamp\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn is_config_compliant(\n        &self,\n        chainspec: &Chainspec,\n        timestamp_leeway: TimeDiff,\n        at: Timestamp,\n    ) -> Result<(), InvalidDeploy> {\n        let config = &chainspec.transaction_config;\n\n        if !config.runtime_config.vm_casper_v1 {\n            // Not config compliant if V1 runtime is disabled.\n            return Err(InvalidDeploy::InvalidRuntime);\n        }\n        let pricing_handling = chainspec.core_config.pricing_handling;\n        let v1_config = &chainspec.transaction_config.transaction_v1_config;\n        let lane_id = calculate_lane_id_for_deploy(self, pricing_handling, v1_config)?;\n        let lane_definition = v1_config\n            .get_lane_by_id(lane_id)\n            .ok_or(InvalidDeploy::NoLaneMatch)?;\n\n        self.is_valid_size(lane_definition.max_transaction_length() as u32)?;\n\n        let header = self.header();\n        let chain_name = &chainspec.network_config.name;\n\n        if header.chain_name() != chain_name {\n            debug!(\n                deploy_hash = %self.hash(),\n                deploy_header = %header,\n                chain_name = %header.chain_name(),\n                \"invalid chain identifier\"\n            );\n            return Err(InvalidDeploy::InvalidChainName {\n                expected: chain_name.to_string(),\n                got: header.chain_name().to_string(),\n            });\n        }\n\n        let min_gas_price = chainspec.vacancy_config.min_gas_price;\n        let gas_price_tolerance = self.gas_price_tolerance()?;\n        if gas_price_tolerance < min_gas_price {\n            return Err(InvalidDeploy::GasPriceToleranceTooLow {\n                min_gas_price_tolerance: min_gas_price,\n                provided_gas_price_tolerance: gas_price_tolerance,\n            });\n        }\n\n        header.is_valid(config, timestamp_leeway, at, &self.hash)?;\n\n        let max_associated_keys = chainspec.core_config.max_associated_keys;\n        if self.approvals.len() > max_associated_keys as usize {\n            debug!(\n                deploy_hash = %self.hash(),\n                number_of_associated_keys = %self.approvals.len(),\n                max_associated_keys = %max_associated_keys,\n                \"number of associated keys exceeds the maximum limit\"\n            );\n            return Err(InvalidDeploy::ExcessiveApprovals {\n                got: self.approvals.len() as u32,\n                max_associated_keys,\n            });\n        }\n\n        let gas_limit = self.gas_limit(chainspec)?;\n        if gas_limit == Gas::zero() {\n            return Err(InvalidDeploy::InvalidPaymentAmount);\n        }\n\n        let block_gas_limit = Gas::new(config.block_gas_limit);\n        if gas_limit > block_gas_limit {\n            debug!(\n                payment_amount = %gas_limit,\n                %block_gas_limit,\n                    \"transaction gas limit exceeds block gas limit\"\n            );\n            return Err(InvalidDeploy::ExceededBlockGasLimit {\n                block_gas_limit: config.block_gas_limit,\n                got: Box::new(gas_limit.value()),\n            });\n        }\n        let lane_limit = lane_definition.max_transaction_gas_limit();\n        let lane_limit_as_gas = Gas::new(lane_limit);\n        if gas_limit > lane_limit_as_gas {\n            debug!(\n                calculated_lane = lane_definition.id(),\n                payment_amount = %gas_limit,\n                %block_gas_limit,\n                    \"transaction gas limit exceeds lane limit\"\n            );\n            return Err(InvalidDeploy::ExceededLaneGasLimit {\n                lane_gas_limit: lane_limit,\n                got: Box::new(gas_limit.value()),\n            });\n        }\n\n        let payment_args_length = self.payment().args().serialized_length();\n        if payment_args_length > config.deploy_config.payment_args_max_length as usize {\n            debug!(\n                payment_args_length,\n                payment_args_max_length = config.deploy_config.payment_args_max_length,\n                \"payment args excessive\"\n            );\n            return Err(InvalidDeploy::ExcessivePaymentArgsLength {\n                max_length: config.deploy_config.payment_args_max_length as usize,\n                got: payment_args_length,\n            });\n        }\n\n        let session_args_length = self.session().args().serialized_length();\n        if session_args_length > config.deploy_config.session_args_max_length as usize {\n            debug!(\n                session_args_length,\n                session_args_max_length = config.deploy_config.session_args_max_length,\n                \"session args excessive\"\n            );\n            return Err(InvalidDeploy::ExcessiveSessionArgsLength {\n                max_length: config.deploy_config.session_args_max_length as usize,\n                got: session_args_length,\n            });\n        }\n\n        if self.session().is_transfer() {\n            let item = self.session().clone();\n            let attempted = item\n                .args()\n                .get(ARG_AMOUNT)\n                .ok_or_else(|| {\n                    debug!(\"missing transfer 'amount' runtime argument\");\n                    InvalidDeploy::MissingTransferAmount\n                })?\n                .clone()\n                .into_t::<U512>()\n                .map_err(|_| {\n                    debug!(\"failed to parse transfer 'amount' runtime argument as a U512\");\n                    InvalidDeploy::FailedToParseTransferAmount\n                })?;\n            let minimum = U512::from(config.native_transfer_minimum_motes);\n            if attempted < minimum {\n                debug!(\n                    minimum = %config.native_transfer_minimum_motes,\n                    amount = %attempted,\n                    \"insufficient transfer amount\"\n                );\n                return Err(InvalidDeploy::InsufficientTransferAmount {\n                    minimum: Box::new(minimum),\n                    attempted: Box::new(attempted),\n                });\n            }\n        } else {\n            let payment_args = self.payment().args();\n            let payment_amount = payment_args\n                .get(ARG_AMOUNT)\n                .ok_or_else(|| {\n                    debug!(\"missing transfer 'amount' runtime argument\");\n                    InvalidDeploy::MissingTransferAmount\n                })?\n                .clone()\n                .into_t::<U512>()\n                .map_err(|_| {\n                    debug!(\"failed to parse transfer 'amount' runtime argument as a U512\");\n                    InvalidDeploy::FailedToParseTransferAmount\n                })?;\n            if payment_amount < U512::from(chainspec.core_config.baseline_motes_amount) {\n                return Err(InvalidDeploy::InvalidPaymentAmount);\n            }\n        }\n\n        Ok(())\n    }\n\n    // This method is not intended to be used by third party crates.\n    //\n    // It is required to allow finalized approvals to be injected after reading a `Deploy` from\n    // storage.\n    #[doc(hidden)]\n    pub fn with_approvals(mut self, approvals: BTreeSet<Approval>) -> Self {\n        self.approvals = approvals;\n        self\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &DEPLOY\n    }\n\n    /// Returns a random `Deploy`.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let timestamp = Timestamp::random(rng);\n        let ttl = TimeDiff::from_seconds(rng.gen_range(60..300));\n        Deploy::random_with_timestamp_and_ttl(rng, timestamp, ttl)\n    }\n\n    /// Returns a random `Deploy` but using the specified `timestamp` and `ttl`.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_timestamp_and_ttl(\n        rng: &mut TestRng,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let gas_price = rng.gen_range(1..100);\n\n        let dependencies = vec![];\n        let chain_name = String::from(\"casper-example\");\n\n        // We need \"amount\" in order to be able to get correct info via `deploy_info()`.\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(DEFAULT_MAX_PAYMENT_MOTES),\n        };\n        let payment = ExecutableDeployItem::StoredContractByName {\n            name: String::from(\"casper-example\"),\n            entry_point: String::from(\"example-entry-point\"),\n            args: payment_args,\n        };\n\n        let session = rng.gen();\n\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            dependencies,\n            chain_name,\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Turns `self` into an invalid `Deploy` by clearing the `chain_name`, invalidating the deploy\n    /// hash.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn invalidate(&mut self) {\n        self.header.invalidate();\n    }\n\n    /// Returns a random `Deploy` for a native transfer.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_valid_native_transfer(rng: &mut TestRng) -> Self {\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(rng.gen_range(60..300));\n        Self::random_valid_native_transfer_with_timestamp_and_ttl(rng, timestamp, ttl)\n    }\n\n    /// Returns a random `Deploy` for a native transfer with timestamp and ttl.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_valid_native_transfer_with_timestamp_and_ttl(\n        rng: &mut TestRng,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let deploy = Self::random_with_timestamp_and_ttl(rng, timestamp, ttl);\n        let transfer_args = runtime_args! {\n            \"amount\" => U512::from(DEFAULT_MIN_TRANSFER_MOTES),\n            \"source\" => PublicKey::random(rng).to_account_hash(),\n            \"target\" => PublicKey::random(rng).to_account_hash(),\n        };\n        let payment_amount = 10_000_000_000u64;\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(payment_amount),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        let secret_key = SecretKey::random(rng);\n        Deploy::new_signed(\n            timestamp,\n            ttl,\n            deploy.header.gas_price(),\n            deploy.header.dependencies().clone(),\n            deploy.header.chain_name().to_string(),\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random `Deploy` for a native transfer with no dependencies.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_valid_native_transfer_without_deps(rng: &mut TestRng) -> Self {\n        let deploy = Self::random(rng);\n        let transfer_args = runtime_args! {\n            \"amount\" => U512::from(DEFAULT_MIN_TRANSFER_MOTES),\n            \"source\" => PublicKey::random(rng).to_account_hash(),\n            \"target\" => PublicKey::random(rng).to_account_hash(),\n        };\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(10),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        let secret_key = SecretKey::random(rng);\n        Deploy::new_signed(\n            Timestamp::now(),\n            deploy.header.ttl(),\n            deploy.header.gas_price(),\n            vec![],\n            deploy.header.chain_name().to_string(),\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random invalid `Deploy` without a payment amount specified.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_without_payment_amount(rng: &mut TestRng) -> Self {\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: RuntimeArgs::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random invalid `Deploy` with an invalid value for the payment amount.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_mangled_payment_amount(rng: &mut TestRng) -> Self {\n        let payment_args = runtime_args! {\n            \"amount\" => \"invalid-argument\"\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random invalid `Deploy` with insufficient payment amount.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_payment_one(rng: &mut TestRng) -> Self {\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(rng.gen_range(60..3600));\n        let payment_args = runtime_args! {\n            \"amount\" => U512::one()\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        let gas_price = rng.gen_range(1..4);\n\n        let dependencies = vec![];\n        let chain_name = String::from(\"casper-example\");\n        let session = rng.gen();\n\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            dependencies,\n            chain_name,\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random invalid `Deploy` with insufficient payment amount.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_insufficient_payment_amount(\n        rng: &mut TestRng,\n        payment_amount: U512,\n    ) -> Self {\n        let payment_args = runtime_args! {\n            \"amount\" => payment_amount\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random invalid `Deploy` with an invalid value for the payment amount.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_oversized_payment_amount(rng: &mut TestRng) -> Self {\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(1_000_000_000_001u64)\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"Test\".to_string(),\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n\n        let deploy = Self::random_valid_native_transfer(rng);\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            deploy.header.timestamp(),\n            deploy.header.ttl(),\n            deploy.header.gas_price(),\n            deploy.header.dependencies().clone(),\n            deploy.header.chain_name().to_string(),\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random `Deploy` with custom payment specified as a stored contract by name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_valid_custom_payment_contract_by_name(rng: &mut TestRng) -> Self {\n        let payment = ExecutableDeployItem::StoredContractByName {\n            name: \"Test\".to_string(),\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random invalid `Deploy` with custom payment specified as a stored contract by\n    /// hash, but missing the runtime args.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_missing_payment_contract_by_hash(rng: &mut TestRng) -> Self {\n        let payment = ExecutableDeployItem::StoredContractByHash {\n            hash: [19; 32].into(),\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random invalid `Deploy` with custom payment specified as a stored contract by\n    /// hash, but calling an invalid entry point.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_missing_entry_point_in_payment_contract(rng: &mut TestRng) -> Self {\n        let payment = ExecutableDeployItem::StoredContractByHash {\n            hash: [19; 32].into(),\n            entry_point: \"non-existent-entry-point\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random `Deploy` with custom payment specified as a stored versioned contract by\n    /// name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_versioned_payment_package_by_name(\n        version: Option<u32>,\n        rng: &mut TestRng,\n    ) -> Self {\n        let payment = ExecutableDeployItem::StoredVersionedContractByName {\n            name: \"Test\".to_string(),\n            version,\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random `Deploy` with custom payment specified as a stored versioned contract by\n    /// name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_valid_custom_payment_package_by_name(rng: &mut TestRng) -> Self {\n        Self::random_with_versioned_payment_package_by_name(None, rng)\n    }\n\n    /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned\n    /// contract by hash, but missing the runtime args.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_missing_payment_package_by_hash(rng: &mut TestRng) -> Self {\n        Self::random_with_payment_package_version_by_hash(None, rng)\n    }\n\n    /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned\n    /// contract by hash, but calling an invalid entry point.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_nonexistent_contract_version_in_payment_package(rng: &mut TestRng) -> Self {\n        let payment = ExecutableDeployItem::StoredVersionedContractByHash {\n            hash: [19; 32].into(),\n            version: Some(6u32),\n            entry_point: \"non-existent-entry-point\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned\n    /// contract by hash, but missing the runtime args.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_payment_package_version_by_hash(\n        version: Option<u32>,\n        rng: &mut TestRng,\n    ) -> Self {\n        let payment = ExecutableDeployItem::StoredVersionedContractByHash {\n            hash: Default::default(),\n            version,\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    /// Returns a random `Deploy` with custom session specified as a stored contract by name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_valid_session_contract_by_name(rng: &mut TestRng) -> Self {\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"Test\".to_string(),\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random invalid `Deploy` with custom session specified as a stored contract by\n    /// hash, but missing the runtime args.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_missing_session_contract_by_hash(rng: &mut TestRng) -> Self {\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: Default::default(),\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random invalid `Deploy` with custom session specified as a stored contract by\n    /// hash, but calling an invalid entry point.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_missing_entry_point_in_session_contract(rng: &mut TestRng) -> Self {\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(rng.gen_range(60..3600));\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: [19; 32].into(),\n            entry_point: \"non-existent-entry-point\".to_string(),\n            args: Default::default(),\n        };\n\n        let payment_amount = 10_000_000_000u64;\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(payment_amount)\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        let gas_price = rng.gen_range(1..4);\n\n        let dependencies = vec![];\n        let chain_name = String::from(\"casper-example\");\n\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            dependencies,\n            chain_name,\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random `Deploy` with custom session specified as a stored versioned contract by\n    /// name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_valid_session_package_by_name(rng: &mut TestRng) -> Self {\n        Self::random_with_versioned_session_package_by_name(None, rng)\n    }\n\n    /// Returns a random `Deploy` with custom session specified as a stored versioned contract by\n    /// name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_versioned_session_package_by_name(\n        version: Option<u32>,\n        rng: &mut TestRng,\n    ) -> Self {\n        let session = ExecutableDeployItem::StoredVersionedContractByName {\n            name: \"Test\".to_string(),\n            version,\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random deploy with custom session specified as a stored versioned contract by\n    /// name.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_contract_by_name(\n        rng: &mut TestRng,\n        maybe_secret_key: Option<SecretKey>,\n        maybe_contract_name: Option<String>,\n        maybe_entry_point_name: Option<String>,\n        maybe_timestamp: Option<Timestamp>,\n        maybe_ttl: Option<TimeDiff>,\n    ) -> Self {\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(10),\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        let contract_name = maybe_contract_name.unwrap_or_else(|| \"Test\".to_string());\n        let entry_point_name = maybe_entry_point_name.unwrap_or_else(|| \"Test\".to_string());\n        let session = ExecutableDeployItem::StoredVersionedContractByName {\n            name: contract_name,\n            version: None,\n            entry_point: entry_point_name,\n            args: Default::default(),\n        };\n        let secret_key = match maybe_secret_key {\n            None => SecretKey::random(rng),\n            Some(secret_key) => secret_key,\n        };\n        let timestamp = maybe_timestamp.unwrap_or_else(Timestamp::now);\n        let ttl = match maybe_ttl {\n            None => TimeDiff::from_seconds(rng.gen_range(60..3600)),\n            Some(ttl) => ttl,\n        };\n        Deploy::new_signed(\n            timestamp,\n            ttl,\n            1,\n            vec![],\n            \"test_chain\".to_string(),\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random invalid `Deploy` with custom session specified as a stored versioned\n    /// contract by hash, but missing the runtime args.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_missing_session_package_by_hash(rng: &mut TestRng) -> Self {\n        Self::random_with_versioned_session_package_by_hash(None, rng)\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_versioned_session_package_by_hash(\n        version: Option<u32>,\n        rng: &mut TestRng,\n    ) -> Self {\n        let session = ExecutableDeployItem::StoredVersionedContractByHash {\n            hash: Default::default(),\n            version,\n            entry_point: \"call\".to_string(),\n            args: Default::default(),\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random invalid transfer `Deploy` with the \"target\" runtime arg missing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_without_transfer_target(rng: &mut TestRng) -> Self {\n        let transfer_args = runtime_args! {\n            \"amount\" => U512::from(DEFAULT_MIN_TRANSFER_MOTES),\n            \"source\" => PublicKey::random(rng).to_account_hash(),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random invalid transfer `Deploy` with the \"amount\" runtime arg missing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_without_transfer_amount(rng: &mut TestRng) -> Self {\n        let transfer_args = runtime_args! {\n            \"source\" => PublicKey::random(rng).to_account_hash(),\n            \"target\" => PublicKey::random(rng).to_account_hash(),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random invalid transfer `Deploy` with an invalid \"amount\" runtime arg.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_mangled_transfer_amount(rng: &mut TestRng) -> Self {\n        let transfer_args = runtime_args! {\n            \"amount\" => \"mangled-transfer-amount\",\n            \"source\" => PublicKey::random(rng).to_account_hash(),\n            \"target\" => PublicKey::random(rng).to_account_hash(),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        Self::random_transfer_with_session(rng, session)\n    }\n\n    /// Returns a random invalid `Deploy` with empty session bytes.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_empty_session_module_bytes(rng: &mut TestRng) -> Self {\n        let session = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: Default::default(),\n        };\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_seconds(rng.gen_range(60..3600));\n        let amount = 10_000_000_000u64;\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(amount)\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n        let gas_price = 1;\n\n        let dependencies = vec![];\n        let chain_name = String::from(\"casper-example\");\n\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            timestamp,\n            ttl,\n            gas_price,\n            dependencies,\n            chain_name,\n            payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random invalid `Deploy` with an expired TTL.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_expired_deploy(rng: &mut TestRng) -> Self {\n        let deploy = Self::random_valid_native_transfer(rng);\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            Timestamp::zero(),\n            TimeDiff::from_seconds(1u32),\n            deploy.header.gas_price(),\n            deploy.header.dependencies().clone(),\n            deploy.header.chain_name().to_string(),\n            deploy.payment,\n            deploy.session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random `Deploy` with native transfer as payment code.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_native_transfer_in_payment_logic(rng: &mut TestRng) -> Self {\n        let transfer_args = runtime_args! {\n            \"amount\" => U512::from(DEFAULT_MIN_TRANSFER_MOTES),\n            \"source\" => PublicKey::random(rng).to_account_hash(),\n            \"target\" => PublicKey::random(rng).to_account_hash(),\n        };\n        let payment = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        Self::random_transfer_with_payment(rng, payment)\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    fn random_transfer_with_payment(rng: &mut TestRng, payment: ExecutableDeployItem) -> Self {\n        let deploy = Self::random_valid_native_transfer(rng);\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            deploy.header.timestamp(),\n            deploy.header.ttl(),\n            deploy.header.gas_price(),\n            deploy.header.dependencies().clone(),\n            deploy.header.chain_name().to_string(),\n            payment,\n            deploy.session,\n            &secret_key,\n            None,\n        )\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    fn random_transfer_with_session(rng: &mut TestRng, session: ExecutableDeployItem) -> Self {\n        let deploy = Self::random_valid_native_transfer(rng);\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            deploy.header.timestamp(),\n            deploy.header.ttl(),\n            deploy.header.gas_price(),\n            deploy.header.dependencies().clone(),\n            deploy.header.chain_name().to_string(),\n            deploy.payment,\n            session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Returns a random valid `Deploy` with specified gas price.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_gas_price(rng: &mut TestRng, gas_price: u64) -> Self {\n        let deploy = Self::random(rng);\n        let secret_key = SecretKey::random(rng);\n\n        Deploy::new_signed(\n            deploy.header.timestamp(),\n            deploy.header.ttl(),\n            gas_price,\n            deploy.header.dependencies().clone(),\n            deploy.header.chain_name().to_string(),\n            deploy.payment,\n            deploy.session,\n            &secret_key,\n            None,\n        )\n    }\n\n    /// Creates an add bid deploy, for testing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn add_bid(\n        chain_name: String,\n        auction_contract_hash: AddressableEntityHash,\n        public_key: PublicKey,\n        bid_amount: U512,\n        delegation_rate: u8,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! { ARG_AMOUNT => U512::from(100_000_000_000u64) },\n        };\n        let args = runtime_args! {\n            ARG_AUCTION_AMOUNT => bid_amount,\n            ARG_AUCTION_PUBLIC_KEY => public_key.clone(),\n            ARG_DELEGATION_RATE => delegation_rate,\n        };\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: auction_contract_hash.into(),\n            entry_point: METHOD_ADD_BID.to_string(),\n            args,\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            1,\n            vec![],\n            chain_name,\n            payment,\n            session,\n            InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(public_key)),\n        )\n    }\n\n    /// Creates a withdraw bid deploy, for testing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn withdraw_bid(\n        chain_name: String,\n        auction_contract_hash: AddressableEntityHash,\n        public_key: PublicKey,\n        amount: U512,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) },\n        };\n        let args = runtime_args! {\n            ARG_AUCTION_AMOUNT => amount,\n            ARG_AUCTION_PUBLIC_KEY => public_key.clone(),\n        };\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: auction_contract_hash.into(),\n            entry_point: METHOD_WITHDRAW_BID.to_string(),\n            args,\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            1,\n            vec![],\n            chain_name,\n            payment,\n            session,\n            InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(public_key)),\n        )\n    }\n\n    /// Creates a delegate deploy, for testing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn delegate(\n        chain_name: String,\n        auction_contract_hash: AddressableEntityHash,\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        amount: U512,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) },\n        };\n        let args = runtime_args! {\n            ARG_DELEGATOR => delegator_public_key.clone(),\n            ARG_VALIDATOR => validator_public_key,\n            ARG_AUCTION_AMOUNT => amount,\n        };\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: auction_contract_hash.into(),\n            entry_point: METHOD_DELEGATE.to_string(),\n            args,\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            1,\n            vec![],\n            chain_name,\n            payment,\n            session,\n            InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(\n                delegator_public_key,\n            )),\n        )\n    }\n\n    /// Creates an undelegate deploy, for testing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn undelegate(\n        chain_name: String,\n        auction_contract_hash: AddressableEntityHash,\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        amount: U512,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) },\n        };\n        let args = runtime_args! {\n            ARG_DELEGATOR => delegator_public_key.clone(),\n            ARG_VALIDATOR => validator_public_key,\n            ARG_AUCTION_AMOUNT => amount,\n        };\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: auction_contract_hash.into(),\n            entry_point: METHOD_UNDELEGATE.to_string(),\n            args,\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            1,\n            vec![],\n            chain_name,\n            payment,\n            session,\n            InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(\n                delegator_public_key,\n            )),\n        )\n    }\n\n    /// Creates an redelegate deploy, for testing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    #[allow(clippy::too_many_arguments)]\n    pub fn redelegate(\n        chain_name: String,\n        auction_contract_hash: AddressableEntityHash,\n        validator_public_key: PublicKey,\n        delegator_public_key: PublicKey,\n        redelegate_validator_public_key: PublicKey,\n        amount: U512,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n    ) -> Self {\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) },\n        };\n        let args = runtime_args! {\n            ARG_DELEGATOR => delegator_public_key.clone(),\n            ARG_VALIDATOR => validator_public_key,\n            ARG_NEW_VALIDATOR => redelegate_validator_public_key,\n            ARG_AUCTION_AMOUNT => amount,\n        };\n        let session = ExecutableDeployItem::StoredContractByHash {\n            hash: auction_contract_hash.into(),\n            entry_point: METHOD_REDELEGATE.to_string(),\n            args,\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            1,\n            vec![],\n            chain_name,\n            payment,\n            session,\n            InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(\n                delegator_public_key,\n            )),\n        )\n    }\n\n    /// Creates a native transfer, for testing.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    #[allow(clippy::too_many_arguments)]\n    pub fn native_transfer(\n        chain_name: String,\n        source_purse: Option<URef>,\n        sender_public_key: PublicKey,\n        receiver_public_key: PublicKey,\n        amount: Option<U512>,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        gas_price: u64,\n    ) -> Self {\n        let amount = amount.unwrap_or_else(|| U512::from(DEFAULT_MIN_TRANSFER_MOTES));\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) },\n        };\n\n        let mut transfer_args = runtime_args! {\n            \"amount\" => amount,\n            \"target\" => receiver_public_key.to_account_hash(),\n        };\n\n        if let Some(source) = source_purse {\n            transfer_args\n                .insert(\"source\", source)\n                .expect(\"should serialize source arg\");\n        }\n\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n\n        Deploy::build(\n            timestamp,\n            ttl,\n            gas_price,\n            vec![],\n            chain_name,\n            payment,\n            session,\n            InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(sender_public_key)),\n        )\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl GasLimited for Deploy {\n    type Error = InvalidDeploy;\n\n    fn gas_cost(&self, chainspec: &Chainspec, gas_price: u8) -> Result<Motes, Self::Error> {\n        let gas_limit = self.gas_limit(chainspec)?;\n        let motes =\n            Motes::from_gas(gas_limit, gas_price).ok_or(InvalidDeploy::UnableToCalculateGasCost)?;\n        Ok(motes)\n    }\n\n    fn gas_limit(&self, chainspec: &Chainspec) -> Result<Gas, Self::Error> {\n        let pricing_handling = chainspec.core_config.pricing_handling;\n        let costs = &chainspec.system_costs_config;\n        let gas_limit = match pricing_handling {\n            PricingHandling::PaymentLimited => {\n                // in the original implementation, for standard deploys the payment amount\n                // specified by the sender is the gas limit (up to the max block limit).\n                if self.is_transfer() {\n                    Gas::new(costs.mint_costs().transfer)\n                } else {\n                    let value = self\n                        .payment()\n                        .args()\n                        .get(ARG_AMOUNT)\n                        .ok_or(InvalidDeploy::MissingPaymentAmount)?;\n                    let payment_amount = value\n                        .clone()\n                        .into_t::<U512>()\n                        .map_err(|_| InvalidDeploy::FailedToParsePaymentAmount)?;\n                    Gas::new(payment_amount)\n                }\n            }\n            PricingHandling::Fixed => {\n                let v1_config = &chainspec.transaction_config.transaction_v1_config;\n                let lane_id = calculate_lane_id_for_deploy(self, pricing_handling, v1_config)?;\n                let lane_definition = v1_config\n                    .get_lane_by_id(lane_id)\n                    .ok_or(InvalidDeploy::NoLaneMatch)?;\n                let computation_limit = lane_definition.max_transaction_gas_limit();\n                Gas::new(computation_limit)\n            } // legacy deploys do not support prepaid\n        };\n        Ok(gas_limit)\n    }\n\n    fn gas_price_tolerance(&self) -> Result<u8, Self::Error> {\n        u8::try_from(self.gas_price()).map_err(|_| Self::Error::UnableToCalculateGasLimit)\n    }\n}\n\nimpl hash::Hash for Deploy {\n    fn hash<H: hash::Hasher>(&self, state: &mut H) {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals,\n            is_valid: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals,\n        } = self;\n        hash.hash(state);\n        header.hash(state);\n        payment.hash(state);\n        session.hash(state);\n        approvals.hash(state);\n    }\n}\n\nimpl PartialEq for Deploy {\n    fn eq(&self, other: &Deploy) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals,\n            is_valid: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals,\n        } = self;\n        *hash == other.hash\n            && *header == other.header\n            && *payment == other.payment\n            && *session == other.session\n            && *approvals == other.approvals\n    }\n}\n\nimpl Ord for Deploy {\n    fn cmp(&self, other: &Deploy) -> cmp::Ordering {\n        // Destructure to make sure we don't accidentally omit fields.\n        #[cfg(any(feature = \"once_cell\", test))]\n        let Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals,\n            is_valid: _,\n        } = self;\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        let Deploy {\n            hash,\n            header,\n            payment,\n            session,\n            approvals,\n        } = self;\n        hash.cmp(&other.hash)\n            .then_with(|| header.cmp(&other.header))\n            .then_with(|| payment.cmp(&other.payment))\n            .then_with(|| session.cmp(&other.session))\n            .then_with(|| approvals.cmp(&other.approvals))\n    }\n}\n\nimpl PartialOrd for Deploy {\n    fn partial_cmp(&self, other: &Deploy) -> Option<cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl ToBytes for Deploy {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.header.serialized_length()\n            + self.hash.serialized_length()\n            + self.payment.serialized_length()\n            + self.session.serialized_length()\n            + self.approvals.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.header.write_bytes(writer)?;\n        self.hash.write_bytes(writer)?;\n        self.payment.write_bytes(writer)?;\n        self.session.write_bytes(writer)?;\n        self.approvals.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for Deploy {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (header, remainder) = DeployHeader::from_bytes(bytes)?;\n        let (hash, remainder) = DeployHash::from_bytes(remainder)?;\n        let (payment, remainder) = ExecutableDeployItem::from_bytes(remainder)?;\n        let (session, remainder) = ExecutableDeployItem::from_bytes(remainder)?;\n        let (approvals, remainder) = BTreeSet::<Approval>::from_bytes(remainder)?;\n        let maybe_valid_deploy = Deploy {\n            header,\n            hash,\n            payment,\n            session,\n            approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_valid: OnceCell::new(),\n        };\n        Ok((maybe_valid_deploy, remainder))\n    }\n}\n\nimpl Display for Deploy {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"deploy[{}, {}, payment_code: {}, session_code: {}, approvals: {}]\",\n            self.hash,\n            self.header,\n            self.payment,\n            self.session,\n            DisplayIter::new(self.approvals.iter())\n        )\n    }\n}\n\nfn serialize_header(header: &DeployHeader) -> Vec<u8> {\n    header\n        .to_bytes()\n        .unwrap_or_else(|error| panic!(\"should serialize deploy header: {}\", error))\n}\n\nfn serialize_body(payment: &ExecutableDeployItem, session: &ExecutableDeployItem) -> Vec<u8> {\n    let mut buffer = Vec::with_capacity(payment.serialized_length() + session.serialized_length());\n    payment\n        .write_bytes(&mut buffer)\n        .unwrap_or_else(|error| panic!(\"should serialize payment code: {}\", error));\n    session\n        .write_bytes(&mut buffer)\n        .unwrap_or_else(|error| panic!(\"should serialize session code: {}\", error));\n    buffer\n}\n\n/// Computationally expensive validity check for a given deploy instance, including asymmetric_key\n/// signing verification.\nfn validate_deploy(deploy: &Deploy) -> Result<(), InvalidDeploy> {\n    if deploy.approvals.is_empty() {\n        #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n        warn!(?deploy, \"deploy has no approvals\");\n        return Err(InvalidDeploy::EmptyApprovals);\n    }\n\n    deploy.has_valid_hash()?;\n\n    for (index, approval) in deploy.approvals.iter().enumerate() {\n        if let Err(error) = crypto::verify(deploy.hash, approval.signature(), approval.signer()) {\n            #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n            warn!(?deploy, \"failed to verify approval {}: {}\", index, error);\n            return Err(InvalidDeploy::InvalidApproval { index, error });\n        }\n    }\n\n    Ok(())\n}\n\n#[cfg(any(feature = \"std\", test))]\n/// Calculate lane id for deploy\npub fn calculate_lane_id_for_deploy(\n    deploy: &Deploy,\n    pricing_handling: PricingHandling,\n    config: &TransactionV1Config,\n) -> Result<u8, InvalidDeploy> {\n    if deploy.is_transfer() {\n        return Ok(MINT_LANE_ID);\n    }\n    let size_estimation = deploy.serialized_length() as u64;\n    let runtime_args_size = (deploy.payment().args().serialized_length()\n        + deploy.session().args().serialized_length()) as u64;\n\n    let gas_price_tolerance = deploy.gas_price_tolerance()?;\n    let pricing_mode = match pricing_handling {\n        PricingHandling::PaymentLimited => {\n            let is_standard_payment = deploy.payment().is_standard_payment(Phase::Payment);\n            let value = deploy\n                .payment()\n                .args()\n                .get(ARG_AMOUNT)\n                .ok_or(InvalidDeploy::MissingPaymentAmount)?;\n            let payment_amount = value\n                .clone()\n                .into_t::<U512>()\n                .map_err(|_| InvalidDeploy::FailedToParsePaymentAmount)?\n                .as_u64();\n            PricingMode::PaymentLimited {\n                payment_amount,\n                gas_price_tolerance,\n                standard_payment: is_standard_payment,\n            }\n        }\n        PricingHandling::Fixed => PricingMode::Fixed {\n            gas_price_tolerance,\n            // additional_computation_factor is not representable for Deploys, we default to 0\n            additional_computation_factor: 0,\n        },\n    };\n\n    get_lane_for_non_install_wasm(config, &pricing_mode, size_estimation, runtime_args_size)\n        .map_err(Into::into)\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{iter, time::Duration};\n\n    use super::*;\n    use crate::{CLValue, TransactionConfig};\n\n    #[test]\n    fn json_roundtrip() {\n        let mut rng = TestRng::new();\n        let deploy = Deploy::random(&mut rng);\n        let json_string = serde_json::to_string_pretty(&deploy).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(deploy, decoded);\n    }\n\n    #[test]\n    fn bincode_roundtrip() {\n        let mut rng = TestRng::new();\n        let deploy = Deploy::random(&mut rng);\n        let serialized = bincode::serialize(&deploy).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(deploy, deserialized);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let mut rng = TestRng::new();\n        let deploy = Deploy::random(&mut rng);\n        bytesrepr::test_serialization_roundtrip(deploy.header());\n        bytesrepr::test_serialization_roundtrip(&deploy);\n    }\n\n    fn create_deploy(\n        rng: &mut TestRng,\n        ttl: TimeDiff,\n        dependency_count: usize,\n        chain_name: &str,\n        gas_price: u64,\n    ) -> Deploy {\n        let secret_key = SecretKey::random(rng);\n        let dependencies = iter::repeat_with(|| DeployHash::random(rng))\n            .take(dependency_count)\n            .collect();\n        let transfer_args = {\n            let mut transfer_args = RuntimeArgs::new();\n            let value = CLValue::from_t(U512::from(DEFAULT_MIN_TRANSFER_MOTES))\n                .expect(\"should create CLValue\");\n            transfer_args.insert_cl_value(\"amount\", value);\n            transfer_args\n        };\n        Deploy::new_signed(\n            Timestamp::now(),\n            ttl,\n            gas_price,\n            dependencies,\n            chain_name.to_string(),\n            ExecutableDeployItem::ModuleBytes {\n                module_bytes: Bytes::new(),\n                args: RuntimeArgs::new(),\n            },\n            ExecutableDeployItem::Transfer {\n                args: transfer_args,\n            },\n            &secret_key,\n            None,\n        )\n    }\n\n    #[test]\n    fn is_valid() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n\n        let deploy = create_deploy(\n            &mut rng,\n            TransactionConfig::default().max_ttl,\n            0,\n            \"net-1\",\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        assert_eq!(\n            deploy.is_valid.get(),\n            None,\n            \"is valid should initially be None\"\n        );\n        deploy.is_valid().expect(\"should be valid\");\n        assert_eq!(\n            deploy.is_valid.get(),\n            Some(&Ok(())),\n            \"is valid should be true\"\n        );\n    }\n\n    fn check_is_not_valid(invalid_deploy: Deploy, expected_error: InvalidDeploy) {\n        assert!(\n            invalid_deploy.is_valid.get().is_none(),\n            \"is valid should initially be None\"\n        );\n        let actual_error = invalid_deploy.is_valid().unwrap_err();\n\n        // Ignore the `error_msg` field of `InvalidApproval` when comparing to expected error, as\n        // this makes the test too fragile.  Otherwise expect the actual error should exactly match\n        // the expected error.\n        match expected_error {\n            InvalidDeploy::InvalidApproval {\n                index: expected_index,\n                ..\n            } => match actual_error {\n                InvalidDeploy::InvalidApproval {\n                    index: actual_index,\n                    ..\n                } => {\n                    assert_eq!(actual_index, expected_index);\n                }\n                _ => panic!(\"expected {}, got: {}\", expected_error, actual_error),\n            },\n            _ => {\n                assert_eq!(actual_error, expected_error,);\n            }\n        }\n\n        // The actual error should have been lazily initialized correctly.\n        assert_eq!(\n            invalid_deploy.is_valid.get(),\n            Some(&Err(actual_error)),\n            \"is valid should now be Some\"\n        );\n    }\n\n    #[test]\n    fn not_valid_due_to_invalid_body_hash() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let mut deploy = create_deploy(\n            &mut rng,\n            TransactionConfig::default().max_ttl,\n            0,\n            \"net-1\",\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        deploy.session = ExecutableDeployItem::Transfer {\n            args: runtime_args! {\n                \"amount\" => 1\n            },\n        };\n        check_is_not_valid(deploy, InvalidDeploy::InvalidBodyHash);\n    }\n\n    #[test]\n    fn not_valid_due_to_invalid_deploy_hash() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let mut deploy = create_deploy(\n            &mut rng,\n            TransactionConfig::default().max_ttl,\n            0,\n            \"net-1\",\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        // deploy.header.gas_price = 2;\n        deploy.invalidate();\n        check_is_not_valid(deploy, InvalidDeploy::InvalidDeployHash);\n    }\n\n    #[test]\n    fn not_valid_due_to_empty_approvals() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let mut deploy = create_deploy(\n            &mut rng,\n            TransactionConfig::default().max_ttl,\n            0,\n            \"net-1\",\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        deploy.approvals = BTreeSet::new();\n        assert!(deploy.approvals.is_empty());\n        check_is_not_valid(deploy, InvalidDeploy::EmptyApprovals)\n    }\n\n    #[test]\n    fn not_valid_due_to_invalid_approval() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let mut deploy = create_deploy(\n            &mut rng,\n            TransactionConfig::default().max_ttl,\n            0,\n            \"net-1\",\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let deploy2 = Deploy::random(&mut rng);\n\n        deploy.approvals.extend(deploy2.approvals.clone());\n        // the expected index for the invalid approval will be the first index at which there is an\n        // approval coming from deploy2\n        let expected_index = deploy\n            .approvals\n            .iter()\n            .enumerate()\n            .find(|(_, approval)| deploy2.approvals.contains(approval))\n            .map(|(index, _)| index)\n            .unwrap();\n        check_is_not_valid(\n            deploy,\n            InvalidDeploy::InvalidApproval {\n                index: expected_index,\n                error: crypto::Error::SignatureError, // This field is ignored in the check.\n            },\n        );\n    }\n\n    #[test]\n    fn is_acceptable() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\".to_string();\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            &chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        let current_timestamp = deploy.header().timestamp();\n        deploy\n            .is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp)\n            .expect(\"should be acceptable\");\n    }\n\n    #[test]\n    fn not_acceptable_due_to_invalid_chain_name() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let expected_chain_name = \"net-1\";\n        let wrong_chain_name = \"net-2\".to_string();\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(expected_chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            &wrong_chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let expected_error = InvalidDeploy::InvalidChainName {\n            expected: expected_chain_name.to_string(),\n            got: wrong_chain_name,\n        };\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(expected_error)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn not_acceptable_due_to_excessive_dependencies() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            1,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let expected_error = InvalidDeploy::DependenciesNoLongerSupported;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(expected_error)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn not_acceptable_due_to_excessive_ttl() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n\n        let ttl = config.max_ttl + TimeDiff::from(Duration::from_secs(1));\n\n        let deploy = create_deploy(&mut rng, ttl, 0, chain_name, GAS_PRICE_TOLERANCE as u64);\n\n        let expected_error = InvalidDeploy::ExcessiveTimeToLive {\n            max_ttl: config.max_ttl,\n            got: ttl,\n        };\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(expected_error)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn not_acceptable_due_to_timestamp_in_future() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n        let leeway = TimeDiff::from_seconds(2);\n\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        let current_timestamp = deploy.header.timestamp() - leeway - TimeDiff::from_seconds(1);\n\n        let expected_error = InvalidDeploy::TimestampInFuture {\n            validation_timestamp: current_timestamp,\n            timestamp_leeway: leeway,\n            got: deploy.header.timestamp(),\n        };\n\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, leeway, current_timestamp),\n            Err(expected_error)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn acceptable_if_timestamp_slightly_in_future() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n        let leeway = TimeDiff::from_seconds(2);\n\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        let current_timestamp = deploy.header.timestamp() - (leeway / 2);\n        deploy\n            .is_config_compliant(&chainspec, leeway, current_timestamp)\n            .expect(\"should be acceptable\");\n    }\n\n    #[test]\n    fn not_acceptable_due_to_missing_payment_amount() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        chainspec.with_pricing_handling(PricingHandling::PaymentLimited);\n        let config = chainspec.transaction_config.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: RuntimeArgs::default(),\n        };\n\n        // Create an empty session object that is not transfer to ensure\n        // that the payment amount is checked.\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"\".to_string(),\n            entry_point: \"\".to_string(),\n            args: Default::default(),\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        deploy.payment = payment;\n        deploy.session = session;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(InvalidDeploy::MissingPaymentAmount)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn not_acceptable_due_to_mangled_payment_amount() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        chainspec.with_pricing_handling(PricingHandling::PaymentLimited);\n        let config = chainspec.transaction_config.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                \"amount\" => \"mangled-amount\"\n            },\n        };\n\n        // Create an empty session object that is not transfer to ensure\n        // that the payment amount is checked.\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"\".to_string(),\n            entry_point: \"\".to_string(),\n            args: Default::default(),\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        deploy.payment = payment;\n        deploy.session = session;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(InvalidDeploy::FailedToParsePaymentAmount)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn not_acceptable_if_doesnt_fit_in_any_lane() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        chainspec.with_pricing_handling(PricingHandling::PaymentLimited);\n        let config = chainspec.transaction_config.clone();\n        let max_lane = chainspec\n            .transaction_config\n            .transaction_v1_config\n            .get_max_wasm_lane_by_gas_limit()\n            .unwrap();\n        let amount = U512::from(max_lane.max_transaction_gas_limit() + 1);\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                \"amount\" => amount\n            },\n        };\n\n        // Create an empty session object that is not transfer to ensure\n        // that the payment amount is checked.\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"\".to_string(),\n            entry_point: \"\".to_string(),\n            args: Default::default(),\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        deploy.payment = payment;\n        deploy.session = session;\n\n        let expected_error = InvalidDeploy::NoLaneMatch;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(expected_error)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn not_acceptable_due_to_transaction_bigger_than_block_limit() {\n        //TODO we should consider validating on startup if the\n        // chainspec doesn't defined wasm lanes that are bigger than\n        // the block limit\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_block_gas_limit(100); // The default wasm lane is much bigger than\n        chainspec.with_chain_name(chain_name.to_string());\n        chainspec.with_pricing_handling(PricingHandling::PaymentLimited);\n        let config = chainspec.transaction_config.clone();\n        let max_lane = chainspec\n            .transaction_config\n            .transaction_v1_config\n            .get_max_wasm_lane_by_gas_limit()\n            .unwrap();\n        let amount = U512::from(max_lane.max_transaction_gas_limit());\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                \"amount\" => amount\n            },\n        };\n\n        // Create an empty session object that is not transfer to ensure\n        // that the payment amount is checked.\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"\".to_string(),\n            entry_point: \"\".to_string(),\n            args: Default::default(),\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        deploy.payment = payment;\n        deploy.session = session;\n\n        let expected_error = InvalidDeploy::ExceededBlockGasLimit {\n            block_gas_limit: config.block_gas_limit,\n            got: Box::new(amount),\n        };\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp),\n            Err(expected_error)\n        );\n        assert!(\n            deploy.is_valid.get().is_none(),\n            \"deploy should not have run expensive `is_valid` call\"\n        );\n    }\n\n    #[test]\n    fn transfer_acceptable_regardless_of_excessive_payment_amount() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let secret_key = SecretKey::random(&mut rng);\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n        let amount = U512::from(config.block_gas_limit + 1);\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                \"amount\" => amount\n            },\n        };\n\n        let transfer_args = {\n            let mut transfer_args = RuntimeArgs::new();\n            let value = CLValue::from_t(U512::from(DEFAULT_MIN_TRANSFER_MOTES))\n                .expect(\"should create CLValue\");\n            transfer_args.insert_cl_value(\"amount\", value);\n            transfer_args\n        };\n\n        let deploy = Deploy::new_signed(\n            Timestamp::now(),\n            config.max_ttl,\n            GAS_PRICE_TOLERANCE as u64,\n            vec![],\n            chain_name.to_string(),\n            payment,\n            ExecutableDeployItem::Transfer {\n                args: transfer_args,\n            },\n            &secret_key,\n            None,\n        );\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            Ok(()),\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp)\n        )\n    }\n\n    #[test]\n    fn not_acceptable_due_to_excessive_approvals() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n        let config = chainspec.transaction_config.clone();\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        // This test is to ensure a given limit is being checked.\n        // Therefore, set the limit to one less than the approvals in the deploy.\n        let max_associated_keys = (deploy.approvals.len() - 1) as u32;\n        chainspec.with_max_associated_keys(max_associated_keys);\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            Err(InvalidDeploy::ExcessiveApprovals {\n                got: deploy.approvals.len() as u32,\n                max_associated_keys,\n            }),\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp)\n        )\n    }\n\n    #[test]\n    fn not_acceptable_due_to_missing_transfer_amount() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n\n        let config = chainspec.transaction_config.clone();\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let transfer_args = RuntimeArgs::default();\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        deploy.session = session;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            Err(InvalidDeploy::MissingTransferAmount),\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp)\n        )\n    }\n\n    #[test]\n    fn not_acceptable_due_to_mangled_transfer_amount() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n\n        let config = chainspec.transaction_config.clone();\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let transfer_args = runtime_args! {\n            \"amount\" => \"mangled-amount\",\n            \"source\" => PublicKey::random(&mut rng).to_account_hash(),\n            \"target\" => PublicKey::random(&mut rng).to_account_hash(),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        deploy.session = session;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            Err(InvalidDeploy::FailedToParseTransferAmount),\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp)\n        )\n    }\n\n    #[test]\n    fn not_acceptable_due_to_too_low_gas_price_tolerance() {\n        const GAS_PRICE_TOLERANCE: u8 = 0;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n\n        let config = chainspec.transaction_config.clone();\n        let deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let current_timestamp = deploy.header().timestamp();\n        assert!(matches!(\n            deploy.is_config_compliant(\n                &chainspec,\n                TimeDiff::default(),\n                current_timestamp\n            ),\n            Err(InvalidDeploy::GasPriceToleranceTooLow { min_gas_price_tolerance, provided_gas_price_tolerance })\n                if min_gas_price_tolerance == chainspec.vacancy_config.min_gas_price && provided_gas_price_tolerance == GAS_PRICE_TOLERANCE\n        ))\n    }\n\n    #[test]\n    fn not_acceptable_due_to_insufficient_transfer_amount() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec.with_chain_name(chain_name.to_string());\n\n        let config = chainspec.transaction_config.clone();\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n\n        let amount = config.native_transfer_minimum_motes - 1;\n        let insufficient_amount = U512::from(amount);\n\n        let transfer_args = runtime_args! {\n            \"amount\" => insufficient_amount,\n            \"source\" => PublicKey::random(&mut rng).to_account_hash(),\n            \"target\" => PublicKey::random(&mut rng).to_account_hash(),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        deploy.session = session;\n\n        let current_timestamp = deploy.header().timestamp();\n        assert_eq!(\n            Err(InvalidDeploy::InsufficientTransferAmount {\n                minimum: Box::new(U512::from(config.native_transfer_minimum_motes)),\n                attempted: Box::new(insufficient_amount),\n            }),\n            deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp,)\n        )\n    }\n\n    #[test]\n    fn should_use_payment_amount_for_payment_limited_payment() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let payment_amount = 500u64;\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec\n            .with_chain_name(chain_name.to_string())\n            .with_pricing_handling(PricingHandling::PaymentLimited);\n\n        let config = chainspec.transaction_config.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                \"amount\" => U512::from(payment_amount)\n            },\n        };\n\n        // Create an empty session object that is not transfer to ensure\n        // that the payment amount is checked.\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"\".to_string(),\n            entry_point: \"\".to_string(),\n            args: Default::default(),\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        deploy.payment = payment;\n        deploy.session = session;\n\n        let mut gas_price = 1;\n        let cost = deploy\n            .gas_cost(&chainspec, gas_price)\n            .expect(\"should cost\")\n            .value();\n        assert_eq!(\n            cost,\n            U512::from(payment_amount),\n            \"in payment limited pricing, the user selected amount should be the cost if gas price is 1\"\n        );\n        gas_price += 1;\n        let cost = deploy\n            .gas_cost(&chainspec, gas_price)\n            .expect(\"should cost\")\n            .value();\n        assert_eq!(\n            cost,\n            U512::from(payment_amount) * gas_price,\n            \"in payment limited pricing, the cost should == user selected amount * gas_price\"\n        );\n    }\n\n    #[test]\n    fn should_use_cost_table_for_fixed_payment() {\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n\n        let payment_amount = 500u64;\n        let mut rng = TestRng::new();\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec\n            .with_chain_name(chain_name.to_string())\n            .with_pricing_handling(PricingHandling::PaymentLimited);\n\n        let config = chainspec.transaction_config.clone();\n\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: runtime_args! {\n                \"amount\" => U512::from(payment_amount)\n            },\n        };\n\n        // Create an empty session object that is not transfer to ensure\n        // that the payment amount is checked.\n        let session = ExecutableDeployItem::StoredContractByName {\n            name: \"\".to_string(),\n            entry_point: \"\".to_string(),\n            args: Default::default(),\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        deploy.payment = payment;\n        deploy.session = session;\n\n        let mut gas_price = 1;\n        let limit = deploy.gas_limit(&chainspec).expect(\"should limit\").value();\n        let cost = deploy\n            .gas_cost(&chainspec, gas_price)\n            .expect(\"should cost\")\n            .value();\n        assert_eq!(\n            cost, limit,\n            \"in fixed pricing, the cost & limit should == if gas price is 1\"\n        );\n        gas_price += 1;\n        let cost = deploy\n            .gas_cost(&chainspec, gas_price)\n            .expect(\"should cost\")\n            .value();\n        assert_eq!(\n            cost,\n            limit * gas_price,\n            \"in fixed pricing, the cost should == limit * gas_price\"\n        );\n    }\n\n    #[test]\n    fn should_use_lane_specific_size_constraints() {\n        let mut rng = TestRng::new();\n        // Deploy is a transfer; should select MINT_LANE_ID\n        // and apply size limitations appropriate to that\n        const GAS_PRICE_TOLERANCE: u8 = u8::MAX;\n        let chain_name = \"net-1\";\n        let mut chainspec = Chainspec::default();\n        chainspec\n            .with_chain_name(chain_name.to_string())\n            .with_pricing_handling(PricingHandling::PaymentLimited);\n\n        let config = chainspec.transaction_config.clone();\n\n        let transfer_args = runtime_args! {\n            \"amount\" => U512::from(DEFAULT_MIN_TRANSFER_MOTES),\n            \"source\" => PublicKey::random(&mut rng).to_account_hash(),\n            \"target\" => PublicKey::random(&mut rng).to_account_hash(),\n            \"some_other\" => vec![1; 1_000_000], //pumping a big runtime arg to make sure that we don't fit in the mint lane\n        };\n        let payment_amount = 10_000_000_000u64;\n        let payment_args = runtime_args! {\n            \"amount\" => U512::from(payment_amount),\n        };\n        let session = ExecutableDeployItem::Transfer {\n            args: transfer_args,\n        };\n        let payment = ExecutableDeployItem::ModuleBytes {\n            module_bytes: Bytes::new(),\n            args: payment_args,\n        };\n\n        let mut deploy = create_deploy(\n            &mut rng,\n            config.max_ttl,\n            0,\n            chain_name,\n            GAS_PRICE_TOLERANCE as u64,\n        );\n        deploy.payment = payment;\n        deploy.session = session;\n        assert_eq!(\n            calculate_lane_id_for_deploy(\n                &deploy,\n                chainspec.core_config.pricing_handling,\n                &config.transaction_v1_config,\n            ),\n            Ok(MINT_LANE_ID)\n        );\n        let current_timestamp = deploy.header().timestamp();\n        let ret = deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp);\n        assert!(ret.is_err());\n        let err = ret.err().unwrap();\n        assert!(matches!(\n            err,\n            InvalidDeploy::ExcessiveSize(DeployExcessiveSizeError { .. })\n        ))\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/error.rs",
    "content": "use crate::InvalidDeploy;\nuse core::fmt::{Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n\n#[cfg(feature = \"std\")]\nuse serde::Serialize;\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\npub use crate::transaction::transaction_v1::InvalidTransactionV1;\n\n/// A representation of the way in which a transaction failed validation checks.\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(feature = \"std\", derive(Serialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\npub enum InvalidTransaction {\n    /// Deploys.\n    Deploy(InvalidDeploy),\n    /// V1 transactions.\n    V1(InvalidTransactionV1),\n}\n\nimpl From<InvalidDeploy> for InvalidTransaction {\n    fn from(value: InvalidDeploy) -> Self {\n        Self::Deploy(value)\n    }\n}\n\nimpl From<InvalidTransactionV1> for InvalidTransaction {\n    fn from(value: InvalidTransactionV1) -> Self {\n        Self::V1(value)\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for InvalidTransaction {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            InvalidTransaction::Deploy(deploy) => deploy.source(),\n            InvalidTransaction::V1(v1) => v1.source(),\n        }\n    }\n}\n\nimpl Display for InvalidTransaction {\n    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {\n        match self {\n            InvalidTransaction::Deploy(inner) => Display::fmt(inner, f),\n            InvalidTransaction::V1(inner) => Display::fmt(inner, f),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/execution_info.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    execution::ExecutionResult,\n    BlockHash,\n};\n\n/// The block hash and height in which a given deploy was executed, along with the execution result\n/// if known.\n#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct ExecutionInfo {\n    /// The hash of the block in which the deploy was executed.\n    pub block_hash: BlockHash,\n    /// The height of the block in which the deploy was executed.\n    pub block_height: u64,\n    /// The execution result if known.\n    pub execution_result: Option<ExecutionResult>,\n}\n\nimpl FromBytes for ExecutionInfo {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (block_hash, bytes) = FromBytes::from_bytes(bytes)?;\n        let (block_height, bytes) = FromBytes::from_bytes(bytes)?;\n        let (execution_result, bytes) = FromBytes::from_bytes(bytes)?;\n        Ok((\n            ExecutionInfo {\n                block_hash,\n                block_height,\n                execution_result,\n            },\n            bytes,\n        ))\n    }\n}\n\nimpl ToBytes for ExecutionInfo {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut result)?;\n        Ok(result)\n    }\n\n    fn write_bytes(&self, bytes: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.block_hash.write_bytes(bytes)?;\n        self.block_height.write_bytes(bytes)?;\n        self.execution_result.write_bytes(bytes)?;\n        Ok(())\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.block_hash.serialized_length()\n            + self.block_height.serialized_length()\n            + self.execution_result.serialized_length()\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/initiator_addr.rs",
    "content": "use super::serialization::CalltableSerializationEnvelope;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    transaction::serialization::CalltableSerializationEnvelopeBuilder,\n    AsymmetricType, PublicKey,\n};\nuse alloc::vec::Vec;\nuse core::fmt::{self, Debug, Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nconst TAG_FIELD_INDEX: u16 = 0;\n\nconst PUBLIC_KEY_VARIANT_TAG: u8 = 0;\nconst PUBLIC_KEY_FIELD_INDEX: u16 = 1;\n\nconst ACCOUNT_HASH_VARIANT_TAG: u8 = 1;\nconst ACCOUNT_HASH_FIELD_INDEX: u16 = 1;\n\n/// The address of the initiator of a [`crate::Transaction`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"The address of the initiator of a TransactionV1.\")\n)]\n#[serde(deny_unknown_fields)]\npub enum InitiatorAddr {\n    /// The public key of the initiator.\n    PublicKey(PublicKey),\n    /// The account hash derived from the public key of the initiator.\n    AccountHash(AccountHash),\n}\n\nimpl InitiatorAddr {\n    /// Gets the account hash.\n    pub fn account_hash(&self) -> AccountHash {\n        match self {\n            InitiatorAddr::PublicKey(public_key) => public_key.to_account_hash(),\n            InitiatorAddr::AccountHash(hash) => *hash,\n        }\n    }\n\n    /// Returns a random `InitiatorAddr`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..=1) {\n            0 => InitiatorAddr::PublicKey(PublicKey::random(rng)),\n            1 => InitiatorAddr::AccountHash(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            InitiatorAddr::PublicKey(pub_key) => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    pub_key.serialized_length(),\n                ]\n            }\n            InitiatorAddr::AccountHash(hash) => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    hash.serialized_length(),\n                ]\n            }\n        }\n    }\n}\n\nimpl ToBytes for InitiatorAddr {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            InitiatorAddr::PublicKey(pub_key) => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &PUBLIC_KEY_VARIANT_TAG)?\n                    .add_field(PUBLIC_KEY_FIELD_INDEX, &pub_key)?\n                    .binary_payload_bytes()\n            }\n            InitiatorAddr::AccountHash(hash) => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &ACCOUNT_HASH_VARIANT_TAG)?\n                    .add_field(ACCOUNT_HASH_FIELD_INDEX, &hash)?\n                    .binary_payload_bytes()\n            }\n        }\n    }\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for InitiatorAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(InitiatorAddr, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(2, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(TAG_FIELD_INDEX)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            PUBLIC_KEY_VARIANT_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(PUBLIC_KEY_FIELD_INDEX)?;\n                let (pub_key, window) = window.deserialize_and_maybe_next::<PublicKey>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(InitiatorAddr::PublicKey(pub_key))\n            }\n            ACCOUNT_HASH_VARIANT_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(ACCOUNT_HASH_FIELD_INDEX)?;\n                let (hash, window) = window.deserialize_and_maybe_next::<AccountHash>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(InitiatorAddr::AccountHash(hash))\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl From<PublicKey> for InitiatorAddr {\n    fn from(public_key: PublicKey) -> Self {\n        InitiatorAddr::PublicKey(public_key)\n    }\n}\n\nimpl From<AccountHash> for InitiatorAddr {\n    fn from(account_hash: AccountHash) -> Self {\n        InitiatorAddr::AccountHash(account_hash)\n    }\n}\n\nimpl Display for InitiatorAddr {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            InitiatorAddr::PublicKey(public_key) => {\n                write!(formatter, \"public key {}\", public_key.to_hex())\n            }\n            InitiatorAddr::AccountHash(account_hash) => {\n                write!(formatter, \"account hash {}\", account_hash)\n            }\n        }\n    }\n}\n\nimpl Debug for InitiatorAddr {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            InitiatorAddr::PublicKey(public_key) => formatter\n                .debug_tuple(\"PublicKey\")\n                .field(public_key)\n                .finish(),\n            InitiatorAddr::AccountHash(account_hash) => formatter\n                .debug_tuple(\"AccountHash\")\n                .field(account_hash)\n                .finish(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{bytesrepr, gens::initiator_addr_arb};\n    use proptest::prelude::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            bytesrepr::test_serialization_roundtrip(&InitiatorAddr::random(rng));\n        }\n    }\n\n    proptest! {\n        #[test]\n        fn generative_bytesrepr_roundtrip(val in initiator_addr_arb()) {\n            bytesrepr::test_serialization_roundtrip(&val);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/initiator_addr_and_secret_key.rs",
    "content": "use crate::{InitiatorAddr, PublicKey, SecretKey};\n\n/// Used when constructing a deploy or transaction.\n#[derive(Debug)]\npub(crate) enum InitiatorAddrAndSecretKey<'a> {\n    /// Provides both the initiator address and the secret key (not necessarily for the same\n    /// initiator address) used to sign the deploy or transaction.\n    Both {\n        /// The initiator address of the account.\n        initiator_addr: InitiatorAddr,\n        /// The secret key used to sign the deploy or transaction.\n        secret_key: &'a SecretKey,\n    },\n    /// The initiator address only (no secret key).  The deploy or transaction will be created\n    /// unsigned.\n    #[allow(unused)]\n    InitiatorAddr(InitiatorAddr),\n    /// The initiator address will be derived from the provided secret key, and the deploy or\n    /// transaction will be signed by the same secret key.\n    #[allow(unused)]\n    SecretKey(&'a SecretKey),\n}\n\nimpl InitiatorAddrAndSecretKey<'_> {\n    /// The address of the initiator of a `TransactionV1`.\n    pub fn initiator_addr(&self) -> InitiatorAddr {\n        match self {\n            InitiatorAddrAndSecretKey::Both { initiator_addr, .. }\n            | InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) => initiator_addr.clone(),\n            InitiatorAddrAndSecretKey::SecretKey(secret_key) => {\n                InitiatorAddr::PublicKey(PublicKey::from(*secret_key))\n            }\n        }\n    }\n\n    /// The secret key of the initiator of a `TransactionV1`.\n    pub fn secret_key(&self) -> Option<&SecretKey> {\n        match self {\n            InitiatorAddrAndSecretKey::Both { secret_key, .. }\n            | InitiatorAddrAndSecretKey::SecretKey(secret_key) => Some(secret_key),\n            InitiatorAddrAndSecretKey::InitiatorAddr(_) => None,\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/package_identifier.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    contracts::ProtocolVersionMajor,\n    EntityVersion, PackageHash,\n};\n#[cfg(doc)]\nuse crate::{ExecutableDeployItem, TransactionTarget};\n\nconst HASH_TAG: u8 = 0;\nconst NAME_TAG: u8 = 1;\nconst HASH_WITH_VERSION_TAG: u8 = 2;\nconst NAME_WITH_VERSION_TAG: u8 = 3;\n\n/// Identifier for the package object within a [`TransactionTarget::Stored`] or an\n/// [`ExecutableDeployItem`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(\n        description = \"Identifier for the package object within a `Stored` transaction target or \\\n        an `ExecutableDeployItem`.\"\n    )\n)]\npub enum PackageIdentifier {\n    /// The hash and optional version identifying the contract package.\n    Hash {\n        /// The hash of the contract package.\n        package_hash: PackageHash,\n        /// The version of the contract package.\n        ///\n        /// `None` implies latest version.\n        version: Option<EntityVersion>,\n    },\n    /// The name and optional version identifying the contract package.\n    Name {\n        /// The name of the contract package.\n        name: String,\n        /// The version of the contract package.\n        ///\n        /// `None` implies latest version.\n        version: Option<EntityVersion>,\n    },\n    /// The hash and optional version key identifying the contract package.\n    HashWithMajorVersion {\n        /// The hash of the contract package.\n        package_hash: PackageHash,\n        /// The major protocol version of the contract package.\n        ///\n        /// `None` implies latest major protocol version.\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        /// The version of the contract package.\n        ///\n        /// `None` implies latest version.\n        version: Option<EntityVersion>,\n    },\n    /// The name and optional version key identifying the contract package.\n    NameWithMajorVersion {\n        /// The name of the contract package.\n        name: String,\n        /// The major protocol version of the contract package.\n        ///\n        /// `None` implies latest major protocol version.\n        protocol_version_major: Option<ProtocolVersionMajor>,\n        /// The version of the contract package.\n        ///\n        /// `None` implies latest version.\n        version: Option<EntityVersion>,\n    },\n}\n\nimpl PackageIdentifier {\n    /// Returns the optional version of the contract package.\n    ///\n    /// `None` implies latest version.\n    pub fn version(&self) -> Option<EntityVersion> {\n        match self {\n            PackageIdentifier::HashWithMajorVersion { version, .. }\n            | PackageIdentifier::NameWithMajorVersion { version, .. }\n            | PackageIdentifier::Hash { version, .. }\n            | PackageIdentifier::Name { version, .. } => *version,\n        }\n    }\n\n    /// Returns the optional version key of the contract package.\n    ///\n    /// `None` implies latest version.\n    pub fn protocol_version_major(&self) -> Option<ProtocolVersionMajor> {\n        match self {\n            PackageIdentifier::HashWithMajorVersion {\n                protocol_version_major,\n                ..\n            }\n            | PackageIdentifier::NameWithMajorVersion {\n                protocol_version_major,\n                ..\n            } => *protocol_version_major,\n            PackageIdentifier::Hash { .. } | PackageIdentifier::Name { .. } => None,\n        }\n    }\n\n    /// Returns a random `PackageIdentifier`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..4) {\n            0 => PackageIdentifier::Hash {\n                package_hash: PackageHash::new(rng.gen()),\n                version: rng.gen(),\n            },\n            1 => PackageIdentifier::Name {\n                name: rng.random_string(1..21),\n                version: rng.gen(),\n            },\n            2 => PackageIdentifier::HashWithMajorVersion {\n                package_hash: PackageHash::new(rng.gen()),\n                protocol_version_major: rng.gen(),\n                version: rng.gen(),\n            },\n            3 => PackageIdentifier::NameWithMajorVersion {\n                name: rng.random_string(1..21),\n                protocol_version_major: rng.gen(),\n                version: rng.gen(),\n            },\n            _ => unreachable!(\"Unexpected tag\"),\n        }\n    }\n}\n\nimpl Display for PackageIdentifier {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            PackageIdentifier::Hash {\n                package_hash: contract_package_hash,\n                version: Some(ver),\n            } => write!(\n                formatter,\n                \"package-id({}, version {})\",\n                HexFmt(contract_package_hash),\n                ver\n            ),\n            PackageIdentifier::Hash {\n                package_hash: contract_package_hash,\n                ..\n            } => write!(\n                formatter,\n                \"package-id({}, latest)\",\n                HexFmt(contract_package_hash),\n            ),\n            PackageIdentifier::Name {\n                name,\n                version: Some(ver),\n            } => write!(formatter, \"package-id({}, version {})\", name, ver),\n            PackageIdentifier::Name { name, .. } => {\n                write!(formatter, \"package-id({}, latest)\", name)\n            }\n            PackageIdentifier::HashWithMajorVersion {\n                package_hash,\n                protocol_version_major,\n                version,\n            } => {\n                write!(\n                    formatter,\n                    \"package-id-HashWithVersion({}, protocol_version_major: {:?}, version: {:?})\",\n                    HexFmt(package_hash),\n                    protocol_version_major,\n                    version,\n                )\n            }\n            PackageIdentifier::NameWithMajorVersion {\n                name,\n                protocol_version_major,\n                version,\n            } => {\n                write!(\n                    formatter,\n                    \"package-id-NameWithVersion({},protocol_version_major: {:?}, version: {:?})\",\n                    name, protocol_version_major, version,\n                )\n            }\n        }\n    }\n}\n\nimpl ToBytes for PackageIdentifier {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            PackageIdentifier::Hash {\n                package_hash,\n                version,\n            } => {\n                HASH_TAG.write_bytes(writer)?;\n                package_hash.write_bytes(writer)?;\n                version.write_bytes(writer)\n            }\n            PackageIdentifier::Name { name, version } => {\n                NAME_TAG.write_bytes(writer)?;\n                name.write_bytes(writer)?;\n                version.write_bytes(writer)\n            }\n            PackageIdentifier::HashWithMajorVersion {\n                package_hash,\n                protocol_version_major,\n                version,\n            } => {\n                HASH_WITH_VERSION_TAG.write_bytes(writer)?;\n                package_hash.write_bytes(writer)?;\n                protocol_version_major.write_bytes(writer)?;\n                version.write_bytes(writer)\n            }\n            PackageIdentifier::NameWithMajorVersion {\n                name,\n                protocol_version_major,\n                version,\n            } => {\n                NAME_WITH_VERSION_TAG.write_bytes(writer)?;\n                name.write_bytes(writer)?;\n                protocol_version_major.write_bytes(writer)?;\n                version.write_bytes(writer)\n            }\n        }\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                PackageIdentifier::Hash {\n                    package_hash,\n                    version,\n                } => package_hash.serialized_length() + version.serialized_length(),\n                PackageIdentifier::Name { name, version } => {\n                    name.serialized_length() + version.serialized_length()\n                }\n                PackageIdentifier::HashWithMajorVersion {\n                    package_hash,\n                    protocol_version_major,\n                    version,\n                } => {\n                    package_hash.serialized_length()\n                        + protocol_version_major.serialized_length()\n                        + version.serialized_length()\n                }\n                PackageIdentifier::NameWithMajorVersion {\n                    name,\n                    protocol_version_major,\n                    version,\n                } => {\n                    name.serialized_length()\n                        + protocol_version_major.serialized_length()\n                        + version.serialized_length()\n                }\n            }\n    }\n}\n\nimpl FromBytes for PackageIdentifier {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            HASH_TAG => {\n                let (package_hash, remainder) = PackageHash::from_bytes(remainder)?;\n                let (version, remainder) = Option::<EntityVersion>::from_bytes(remainder)?;\n                let id = PackageIdentifier::Hash {\n                    package_hash,\n                    version,\n                };\n                Ok((id, remainder))\n            }\n            NAME_TAG => {\n                let (name, remainder) = String::from_bytes(remainder)?;\n                let (version, remainder) = Option::<EntityVersion>::from_bytes(remainder)?;\n                let id = PackageIdentifier::Name { name, version };\n                Ok((id, remainder))\n            }\n            HASH_WITH_VERSION_TAG => {\n                let (package_hash, remainder) = PackageHash::from_bytes(remainder)?;\n                let (protocol_version_major, remainder) = Option::from_bytes(remainder)?;\n                let (version, remainder) = Option::from_bytes(remainder)?;\n                let id = PackageIdentifier::HashWithMajorVersion {\n                    package_hash,\n                    protocol_version_major,\n                    version,\n                };\n                Ok((id, remainder))\n            }\n            NAME_WITH_VERSION_TAG => {\n                let (name, remainder) = String::from_bytes(remainder)?;\n                let (protocol_version_major, remainder) = Option::from_bytes(remainder)?;\n                let (version, remainder) = Option::from_bytes(remainder)?;\n                let id = PackageIdentifier::NameWithMajorVersion {\n                    name,\n                    protocol_version_major,\n                    version,\n                };\n                Ok((id, remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        bytesrepr::test_serialization_roundtrip(&PackageIdentifier::random(rng));\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/pricing_mode.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::Transaction;\nuse super::{\n    serialization::CalltableSerializationEnvelope, InvalidTransaction, InvalidTransactionV1,\n    TransactionEntryPoint,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    transaction::serialization::CalltableSerializationEnvelopeBuilder,\n    Digest,\n};\n#[cfg(any(feature = \"std\", test))]\nuse crate::{Chainspec, Gas, Motes};\n\n/// The pricing mode of a [`Transaction`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Pricing mode of a Transaction.\")\n)]\n#[serde(deny_unknown_fields)]\npub enum PricingMode {\n    /// The original payment model, where the creator of the transaction\n    /// specifies how much they will pay, at what gas price.\n    PaymentLimited {\n        /// User-specified payment amount.\n        payment_amount: u64,\n        /// User-specified gas_price tolerance (minimum 1).\n        /// This is interpreted to mean \"do not include this transaction in a block\n        /// if the current gas price is greater than this number\"\n        gas_price_tolerance: u8,\n        /// Standard payment.\n        standard_payment: bool,\n    },\n    /// The cost of the transaction is determined by the cost table, per the\n    /// transaction category.\n    Fixed {\n        /// User-specified additional computation factor (minimum 0). If \"0\" is provided,\n        ///  no additional logic is applied to the computation limit. Each value above \"0\"\n        ///  tells the node that it needs to treat the transaction as if it uses more gas\n        ///  than it's serialized size indicates. Each \"1\" will increase the \"wasm lane\"\n        ///  size bucket for this transaction by 1. So if the size of the transaction\n        ///  indicates bucket \"0\" and \"additional_computation_factor = 2\", the transaction\n        ///  will be treated as a \"2\".\n        additional_computation_factor: u8,\n        /// User-specified gas_price tolerance (minimum 1).\n        /// This is interpreted to mean \"do not include this transaction in a block\n        /// if the current gas price is greater than this number\"\n        gas_price_tolerance: u8,\n    },\n    /// The payment for this transaction was previously paid, as proven by\n    /// the receipt hash (this is for future use, not currently implemented).\n    Prepaid {\n        /// Pre-paid receipt.\n        receipt: Digest,\n    },\n}\n\nimpl PricingMode {\n    /// Returns a random `PricingMode.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..=2) {\n            0 => PricingMode::PaymentLimited {\n                payment_amount: rng.gen(),\n                gas_price_tolerance: 1,\n                standard_payment: true,\n            },\n            1 => PricingMode::Fixed {\n                gas_price_tolerance: rng.gen(),\n                additional_computation_factor: 1,\n            },\n            2 => PricingMode::Prepaid { receipt: rng.gen() },\n            _ => unreachable!(),\n        }\n    }\n\n    /// Returns standard payment flag, if it is a `PaymentLimited` variant.\n    pub fn is_standard_payment(&self) -> bool {\n        match self {\n            PricingMode::PaymentLimited {\n                standard_payment, ..\n            } => *standard_payment,\n            PricingMode::Fixed { .. } => true,\n            PricingMode::Prepaid { .. } => true,\n        }\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            PricingMode::PaymentLimited {\n                payment_amount,\n                gas_price_tolerance,\n                standard_payment,\n            } => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    payment_amount.serialized_length(),\n                    gas_price_tolerance.serialized_length(),\n                    standard_payment.serialized_length(),\n                ]\n            }\n            PricingMode::Fixed {\n                gas_price_tolerance,\n                additional_computation_factor,\n            } => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    gas_price_tolerance.serialized_length(),\n                    additional_computation_factor.serialized_length(),\n                ]\n            }\n            PricingMode::Prepaid { receipt } => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    receipt.serialized_length(),\n                ]\n            }\n        }\n    }\n\n    #[cfg(any(feature = \"std\", test))]\n    /// Returns the gas limit.\n    pub fn gas_limit(&self, chainspec: &Chainspec, lane_id: u8) -> Result<Gas, PricingModeError> {\n        let gas = match self {\n            PricingMode::PaymentLimited { payment_amount, .. } => Gas::new(*payment_amount),\n            PricingMode::Fixed { .. } => {\n                //The lane_id should already include additional_computation_factor in case of wasm\n                Gas::new(chainspec.get_max_gas_limit_by_category(lane_id))\n            }\n            PricingMode::Prepaid { receipt } => {\n                return Err(PricingModeError::InvalidPricingMode {\n                    price_mode: PricingMode::Prepaid { receipt: *receipt },\n                });\n            }\n        };\n        Ok(gas)\n    }\n\n    #[cfg(any(feature = \"std\", test))]\n    /// Returns gas cost.\n    pub fn gas_cost(\n        &self,\n        chainspec: &Chainspec,\n        lane_id: u8,\n        gas_price: u8,\n    ) -> Result<Motes, PricingModeError> {\n        let gas_limit = self.gas_limit(chainspec, lane_id)?;\n        let motes = match self {\n            PricingMode::PaymentLimited { payment_amount, .. } => {\n                Motes::from_gas(Gas::from(*payment_amount), gas_price)\n                    .ok_or(PricingModeError::UnableToCalculateGasCost)?\n            }\n            PricingMode::Fixed { .. } => Motes::from_gas(gas_limit, gas_price)\n                .ok_or(PricingModeError::UnableToCalculateGasCost)?,\n            PricingMode::Prepaid { .. } => {\n                Motes::zero() // prepaid\n            }\n        };\n        Ok(motes)\n    }\n\n    /// Returns gas cost.\n    pub fn additional_computation_factor(&self) -> u8 {\n        match self {\n            PricingMode::PaymentLimited { .. } => 0,\n            PricingMode::Fixed {\n                additional_computation_factor,\n                ..\n            } => *additional_computation_factor,\n            PricingMode::Prepaid { .. } => 0,\n        }\n    }\n}\n\n// This impl is provided due to a completeness test that we\n// have in binary-port. It checks if all variants of this\n// error have corresponding binary port error codes\n#[cfg(any(feature = \"testing\", test))]\nimpl Default for PricingMode {\n    fn default() -> Self {\n        Self::PaymentLimited {\n            payment_amount: 1,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        }\n    }\n}\n\n///Errors that can occur when calling PricingMode functions\n#[derive(Debug)]\npub enum PricingModeError {\n    /// The entry point for this transaction target cannot be `call`.\n    EntryPointCannotBeCall,\n    /// The entry point for this transaction target cannot be `TransactionEntryPoint::Custom`.\n    EntryPointCannotBeCustom {\n        /// The invalid entry point.\n        entry_point: TransactionEntryPoint,\n    },\n    /// Invalid combination of pricing handling and pricing mode.\n    InvalidPricingMode {\n        /// The pricing mode as specified by the transaction.\n        price_mode: PricingMode,\n    },\n    /// Unable to calculate gas cost.\n    UnableToCalculateGasCost,\n    /// Unexpected entry point.\n    UnexpectedEntryPoint {\n        entry_point: TransactionEntryPoint,\n        lane_id: u8,\n    },\n}\n\nimpl From<PricingModeError> for InvalidTransaction {\n    fn from(err: PricingModeError) -> Self {\n        InvalidTransaction::V1(err.into())\n    }\n}\n\nimpl From<PricingModeError> for InvalidTransactionV1 {\n    fn from(err: PricingModeError) -> Self {\n        match err {\n            PricingModeError::EntryPointCannotBeCall => {\n                InvalidTransactionV1::EntryPointCannotBeCall\n            }\n            PricingModeError::EntryPointCannotBeCustom { entry_point } => {\n                InvalidTransactionV1::EntryPointCannotBeCustom { entry_point }\n            }\n            PricingModeError::InvalidPricingMode { price_mode } => {\n                InvalidTransactionV1::InvalidPricingMode { price_mode }\n            }\n            PricingModeError::UnableToCalculateGasCost => {\n                InvalidTransactionV1::UnableToCalculateGasCost\n            }\n            PricingModeError::UnexpectedEntryPoint {\n                entry_point,\n                lane_id,\n            } => InvalidTransactionV1::UnexpectedEntryPoint {\n                entry_point,\n                lane_id,\n            },\n        }\n    }\n}\nconst TAG_FIELD_INDEX: u16 = 0;\n\nconst PAYMENT_LIMITED_VARIANT_TAG: u8 = 0;\nconst PAYMENT_LIMITED_PAYMENT_AMOUNT_INDEX: u16 = 1;\nconst PAYMENT_LIMITED_GAS_PRICE_TOLERANCE_INDEX: u16 = 2;\nconst PAYMENT_LIMITED_STANDARD_PAYMENT_INDEX: u16 = 3;\n\nconst FIXED_VARIANT_TAG: u8 = 1;\nconst FIXED_GAS_PRICE_TOLERANCE_INDEX: u16 = 1;\nconst FIXED_ADDITIONAL_COMPUTATION_FACTOR_INDEX: u16 = 2;\n\nconst RESERVED_VARIANT_TAG: u8 = 2;\nconst RESERVED_RECEIPT_INDEX: u16 = 1;\n\nimpl ToBytes for PricingMode {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            PricingMode::PaymentLimited {\n                payment_amount,\n                gas_price_tolerance,\n                standard_payment,\n            } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                .add_field(TAG_FIELD_INDEX, &PAYMENT_LIMITED_VARIANT_TAG)?\n                .add_field(PAYMENT_LIMITED_PAYMENT_AMOUNT_INDEX, &payment_amount)?\n                .add_field(\n                    PAYMENT_LIMITED_GAS_PRICE_TOLERANCE_INDEX,\n                    &gas_price_tolerance,\n                )?\n                .add_field(PAYMENT_LIMITED_STANDARD_PAYMENT_INDEX, &standard_payment)?\n                .binary_payload_bytes(),\n            PricingMode::Fixed {\n                gas_price_tolerance,\n                additional_computation_factor,\n            } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                .add_field(TAG_FIELD_INDEX, &FIXED_VARIANT_TAG)?\n                .add_field(FIXED_GAS_PRICE_TOLERANCE_INDEX, &gas_price_tolerance)?\n                .add_field(\n                    FIXED_ADDITIONAL_COMPUTATION_FACTOR_INDEX,\n                    &additional_computation_factor,\n                )?\n                .binary_payload_bytes(),\n            PricingMode::Prepaid { receipt } => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &RESERVED_VARIANT_TAG)?\n                    .add_field(RESERVED_RECEIPT_INDEX, &receipt)?\n                    .binary_payload_bytes()\n            }\n        }\n    }\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for PricingMode {\n    fn from_bytes(bytes: &[u8]) -> Result<(PricingMode, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(4, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(TAG_FIELD_INDEX)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            PAYMENT_LIMITED_VARIANT_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(PAYMENT_LIMITED_PAYMENT_AMOUNT_INDEX)?;\n                let (payment_amount, window) = window.deserialize_and_maybe_next::<u64>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(PAYMENT_LIMITED_GAS_PRICE_TOLERANCE_INDEX)?;\n                let (gas_price_tolerance, window) = window.deserialize_and_maybe_next::<u8>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(PAYMENT_LIMITED_STANDARD_PAYMENT_INDEX)?;\n                let (standard_payment, window) = window.deserialize_and_maybe_next::<bool>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(PricingMode::PaymentLimited {\n                    payment_amount,\n                    gas_price_tolerance,\n                    standard_payment,\n                })\n            }\n            FIXED_VARIANT_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(FIXED_GAS_PRICE_TOLERANCE_INDEX)?;\n                let (gas_price_tolerance, window) = window.deserialize_and_maybe_next::<u8>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(FIXED_ADDITIONAL_COMPUTATION_FACTOR_INDEX)?;\n                let (additional_computation_factor, window) =\n                    window.deserialize_and_maybe_next::<u8>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(PricingMode::Fixed {\n                    gas_price_tolerance,\n                    additional_computation_factor,\n                })\n            }\n            RESERVED_VARIANT_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(RESERVED_RECEIPT_INDEX)?;\n                let (receipt, window) = window.deserialize_and_maybe_next::<Digest>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(PricingMode::Prepaid { receipt })\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl Display for PricingMode {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            PricingMode::PaymentLimited {\n                payment_amount,\n                gas_price_tolerance: gas_price,\n                standard_payment,\n            } => {\n                write!(\n                    formatter,\n                    \"payment amount {}, gas price multiplier {} standard_payment {}\",\n                    payment_amount, gas_price, standard_payment\n                )\n            }\n            PricingMode::Prepaid { receipt } => write!(formatter, \"prepaid: {}\", receipt),\n            PricingMode::Fixed {\n                gas_price_tolerance,\n                additional_computation_factor,\n            } => write!(\n                formatter,\n                \"fixed pricing {} {}\",\n                gas_price_tolerance, additional_computation_factor\n            ),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::bytesrepr;\n\n    #[test]\n    fn test_to_bytes_and_from_bytes() {\n        bytesrepr::test_serialization_roundtrip(&PricingMode::PaymentLimited {\n            payment_amount: 100,\n            gas_price_tolerance: 1,\n            standard_payment: true,\n        });\n        bytesrepr::test_serialization_roundtrip(&PricingMode::Fixed {\n            gas_price_tolerance: 2,\n            additional_computation_factor: 1,\n        });\n        bytesrepr::test_serialization_roundtrip(&PricingMode::Prepaid {\n            receipt: Digest::hash(b\"prepaid\"),\n        });\n    }\n\n    use crate::gens::pricing_mode_arb;\n    use proptest::prelude::*;\n    proptest! {\n        #[test]\n        fn generative_bytesrepr_roundtrip(val in pricing_mode_arb()) {\n            bytesrepr::test_serialization_roundtrip(&val);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/runtime_args.rs",
    "content": "//! Home of RuntimeArgs for calling contracts\n\nuse alloc::{collections::BTreeMap, string::String, vec::Vec};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{Rng, RngCore};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{bytesrepr::Bytes, testing::TestRng};\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n    CLType, CLTyped, CLValue, CLValueError, U512,\n};\n/// Named arguments to a contract.\n#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct NamedArg(String, CLValue);\n\nimpl NamedArg {\n    /// Returns a new `NamedArg`.\n    pub fn new(name: String, value: CLValue) -> Self {\n        NamedArg(name, value)\n    }\n\n    /// Returns the name of the named arg.\n    pub fn name(&self) -> &str {\n        &self.0\n    }\n\n    /// Returns the value of the named arg.\n    pub fn cl_value(&self) -> &CLValue {\n        &self.1\n    }\n\n    /// Returns a mutable reference to the value of the named arg.\n    pub fn cl_value_mut(&mut self) -> &mut CLValue {\n        &mut self.1\n    }\n}\n\nimpl From<(String, CLValue)> for NamedArg {\n    fn from((name, value): (String, CLValue)) -> NamedArg {\n        NamedArg(name, value)\n    }\n}\n\nimpl ToBytes for NamedArg {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::allocate_buffer(self)?;\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length() + self.1.serialized_length()\n    }\n}\n\nimpl FromBytes for NamedArg {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (name, remainder) = String::from_bytes(bytes)?;\n        let (cl_value, remainder) = CLValue::from_bytes(remainder)?;\n        Ok((NamedArg(name, cl_value), remainder))\n    }\n}\n\n/// Represents a collection of arguments passed to a smart contract.\n#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub struct RuntimeArgs(Vec<NamedArg>);\n\nimpl RuntimeArgs {\n    /// Create an empty [`RuntimeArgs`] instance.\n    pub fn new() -> RuntimeArgs {\n        RuntimeArgs::default()\n    }\n\n    /// A wrapper that lets you easily and safely create runtime arguments.\n    ///\n    /// This method is useful when you have to construct a [`RuntimeArgs`] with multiple entries,\n    /// but error handling at given call site would require to have a match statement for each\n    /// [`RuntimeArgs::insert`] call. With this method you can use ? operator inside the closure and\n    /// then handle single result. When `try_block` will be stabilized this method could be\n    /// deprecated in favor of using those blocks.\n    pub fn try_new<F>(func: F) -> Result<RuntimeArgs, CLValueError>\n    where\n        F: FnOnce(&mut RuntimeArgs) -> Result<(), CLValueError>,\n    {\n        let mut runtime_args = RuntimeArgs::new();\n        func(&mut runtime_args)?;\n        Ok(runtime_args)\n    }\n\n    /// Gets an argument by its name.\n    pub fn get(&self, name: &str) -> Option<&CLValue> {\n        self.0.iter().find_map(|NamedArg(named_name, named_value)| {\n            if named_name == name {\n                Some(named_value)\n            } else {\n                None\n            }\n        })\n    }\n\n    /// Gets the length of the collection.\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns `true` if the collection of arguments is empty.\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Inserts a new named argument into the collection.\n    pub fn insert<K, V>(&mut self, key: K, value: V) -> Result<(), CLValueError>\n    where\n        K: Into<String>,\n        V: CLTyped + ToBytes,\n    {\n        let cl_value = CLValue::from_t(value)?;\n        self.0.push(NamedArg(key.into(), cl_value));\n        Ok(())\n    }\n\n    /// Inserts a new named argument into the collection.\n    pub fn insert_cl_value<K>(&mut self, key: K, cl_value: CLValue)\n    where\n        K: Into<String>,\n    {\n        self.0.push(NamedArg(key.into(), cl_value));\n    }\n\n    /// Returns all the values of the named args.\n    pub fn to_values(&self) -> Vec<&CLValue> {\n        self.0.iter().map(|NamedArg(_name, value)| value).collect()\n    }\n\n    /// Returns an iterator of references over all arguments in insertion order.\n    pub fn named_args(&self) -> impl Iterator<Item = &NamedArg> {\n        self.0.iter()\n    }\n\n    /// Returns an iterator of mutable references over all arguments in insertion order.\n    pub fn named_args_mut(&mut self) -> impl Iterator<Item = &mut NamedArg> {\n        self.0.iter_mut()\n    }\n\n    /// Returns the numeric value of `name` arg from the runtime arguments or defaults to\n    /// 0 if that arg doesn't exist or is not an integer type.\n    ///\n    /// Supported [`CLType`]s for numeric conversions are U64, and U512.\n    ///\n    /// Returns an error if parsing the arg fails.\n    pub fn try_get_number(&self, name: &str) -> Result<U512, CLValueError> {\n        let amount_arg = match self.get(name) {\n            None => return Ok(U512::zero()),\n            Some(arg) => arg,\n        };\n        match amount_arg.cl_type() {\n            CLType::U512 => amount_arg.clone().into_t::<U512>(),\n            CLType::U64 => amount_arg.clone().into_t::<u64>().map(U512::from),\n            _ => Ok(U512::zero()),\n        }\n    }\n\n    /// Returns a random `RuntimeArgs`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        fn random_bytes(rng: &mut TestRng) -> Bytes {\n            let mut buffer = vec![0u8; rng.gen_range(0..100)];\n            rng.fill_bytes(buffer.as_mut());\n            Bytes::from(buffer)\n        }\n\n        let count = rng.gen_range(0..6);\n        let mut args = RuntimeArgs::new();\n        for _ in 0..count {\n            let key = rng.random_string(1..21);\n            let value = random_bytes(rng);\n            let _ = args.insert(key, value);\n        }\n        args\n    }\n}\n\nimpl From<Vec<NamedArg>> for RuntimeArgs {\n    fn from(values: Vec<NamedArg>) -> Self {\n        RuntimeArgs(values)\n    }\n}\n\nimpl From<BTreeMap<String, CLValue>> for RuntimeArgs {\n    fn from(cl_values: BTreeMap<String, CLValue>) -> RuntimeArgs {\n        RuntimeArgs(cl_values.into_iter().map(NamedArg::from).collect())\n    }\n}\n\nimpl From<RuntimeArgs> for BTreeMap<String, CLValue> {\n    fn from(args: RuntimeArgs) -> BTreeMap<String, CLValue> {\n        let mut map = BTreeMap::new();\n        for named in args.0 {\n            map.insert(named.0, named.1);\n        }\n        map\n    }\n}\n\nimpl ToBytes for RuntimeArgs {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for RuntimeArgs {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (args, remainder) = Vec::<NamedArg>::from_bytes(bytes)?;\n        Ok((RuntimeArgs(args), remainder))\n    }\n}\n\n/// Macro that makes it easier to construct named arguments.\n///\n/// NOTE: This macro does not propagate possible errors that could occur while creating a\n/// [`CLValue`]. For such cases creating [`RuntimeArgs`] manually is recommended.\n///\n/// # Example usage\n/// ```\n/// use casper_types::runtime_args;\n/// let _named_args = runtime_args! {\n///   \"foo\" => 42,\n///   \"bar\" => \"Hello, world!\"\n/// };\n/// ```\n#[macro_export]\nmacro_rules! runtime_args {\n    () => ($crate::RuntimeArgs::new());\n    ( $($key:expr => $value:expr,)+ ) => (runtime_args!($($key => $value),+));\n    ( $($key:expr => $value:expr),* ) => {\n        {\n            let mut named_args = $crate::RuntimeArgs::new();\n            $(\n                named_args.insert($key, $value).unwrap();\n            )*\n            named_args\n        }\n    };\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    const ARG_AMOUNT: &str = \"amount\";\n\n    #[test]\n    fn test_runtime_args() {\n        let arg1 = CLValue::from_t(1).unwrap();\n        let arg2 = CLValue::from_t(\"Foo\").unwrap();\n        let arg3 = CLValue::from_t(Some(1)).unwrap();\n        let args = {\n            let mut map = BTreeMap::new();\n            map.insert(\"bar\".into(), arg2.clone());\n            map.insert(\"foo\".into(), arg1.clone());\n            map.insert(\"qwer\".into(), arg3.clone());\n            map\n        };\n        let runtime_args = RuntimeArgs::from(args);\n        assert_eq!(runtime_args.get(\"qwer\"), Some(&arg3));\n        assert_eq!(runtime_args.get(\"foo\"), Some(&arg1));\n        assert_eq!(runtime_args.get(\"bar\"), Some(&arg2));\n        assert_eq!(runtime_args.get(\"aaa\"), None);\n\n        // Ensure macro works\n\n        let runtime_args_2 = runtime_args! {\n            \"bar\" => \"Foo\",\n            \"foo\" => 1i32,\n            \"qwer\" => Some(1i32),\n        };\n        assert_eq!(runtime_args, runtime_args_2);\n    }\n\n    #[test]\n    fn empty_macro() {\n        assert_eq!(runtime_args! {}, RuntimeArgs::new());\n    }\n\n    #[test]\n    fn btreemap_compat() {\n        // This test assumes same serialization format as BTreeMap\n        let runtime_args_1 = runtime_args! {\n            \"bar\" => \"Foo\",\n            \"foo\" => 1i32,\n            \"qwer\" => Some(1i32),\n        };\n        let tagless = runtime_args_1.to_bytes().unwrap().to_vec();\n\n        let mut runtime_args_2 = BTreeMap::new();\n        runtime_args_2.insert(String::from(\"bar\"), CLValue::from_t(\"Foo\").unwrap());\n        runtime_args_2.insert(String::from(\"foo\"), CLValue::from_t(1i32).unwrap());\n        runtime_args_2.insert(String::from(\"qwer\"), CLValue::from_t(Some(1i32)).unwrap());\n\n        assert_eq!(tagless, runtime_args_2.to_bytes().unwrap());\n    }\n\n    #[test]\n    fn named_serialization_roundtrip() {\n        let args = runtime_args! {\n            \"foo\" => 1i32,\n        };\n        bytesrepr::test_serialization_roundtrip(&args);\n    }\n\n    #[test]\n    fn should_create_args_with() {\n        let res = RuntimeArgs::try_new(|runtime_args| {\n            runtime_args.insert(String::from(\"foo\"), 123)?;\n            runtime_args.insert(String::from(\"bar\"), 456)?;\n            Ok(())\n        });\n\n        let expected = runtime_args! {\n            \"foo\" => 123,\n            \"bar\" => 456,\n        };\n        assert!(matches!(res, Ok(args) if expected == args));\n    }\n\n    #[test]\n    fn try_get_number_should_work() {\n        let mut args = RuntimeArgs::new();\n        args.insert(ARG_AMOUNT, 0u64).expect(\"is ok\");\n        assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero());\n\n        let mut args = RuntimeArgs::new();\n        args.insert(ARG_AMOUNT, U512::zero()).expect(\"is ok\");\n        assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero());\n\n        let args = RuntimeArgs::new();\n        assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero());\n\n        let hundred = 100u64;\n\n        let mut args = RuntimeArgs::new();\n        let input = U512::from(hundred);\n        args.insert(ARG_AMOUNT, input).expect(\"is ok\");\n        assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), input);\n\n        let mut args = RuntimeArgs::new();\n        args.insert(ARG_AMOUNT, hundred).expect(\"is ok\");\n        assert_eq!(\n            args.try_get_number(ARG_AMOUNT).unwrap(),\n            U512::from(hundred)\n        );\n    }\n\n    #[test]\n    fn try_get_number_should_return_zero_for_non_numeric_type() {\n        let mut args = RuntimeArgs::new();\n        args.insert(ARG_AMOUNT, \"Non-numeric-string\").unwrap();\n        assert_eq!(\n            args.try_get_number(ARG_AMOUNT).expect(\"should get amount\"),\n            U512::zero()\n        );\n    }\n\n    #[test]\n    fn try_get_number_should_return_zero_if_amount_is_missing() {\n        let args = RuntimeArgs::new();\n        assert_eq!(\n            args.try_get_number(ARG_AMOUNT).expect(\"should get amount\"),\n            U512::zero()\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/serialization/field.rs",
    "content": "use crate::bytesrepr::{\n    self, Error, FromBytes, ToBytes, U16_SERIALIZED_LENGTH, U32_SERIALIZED_LENGTH,\n};\nuse alloc::vec::Vec;\n\n#[derive(Eq, PartialEq, Debug)]\npub(crate) struct Field {\n    pub index: u16,\n    pub offset: u32,\n}\n\nimpl Field {\n    pub(crate) fn new(index: u16, offset: u32) -> Self {\n        Field { index, offset }\n    }\n}\n\nimpl ToBytes for Field {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), Error> {\n        self.index.write_bytes(writer)?;\n        self.offset.write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U16_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for Field {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (index, remainder) = u16::from_bytes(bytes)?;\n        let (offset, remainder) = u32::from_bytes(remainder)?;\n        Ok((Field::new(index, offset), remainder))\n    }\n}\n\nimpl Field {\n    pub fn serialized_vec_size(number_of_fields: usize) -> usize {\n        let mut size = U32_SERIALIZED_LENGTH; // Overhead of the vec itself\n        size += number_of_fields * Field::serialized_length();\n        size\n    }\n\n    pub fn serialized_length() -> usize {\n        U16_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/serialization/mod.rs",
    "content": "mod field;\nuse alloc::vec::Vec;\nuse field::Field;\n\nuse crate::bytesrepr::{\n    self, Bytes, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH, U8_SERIALIZED_LENGTH,\n};\n\npub struct CalltableSerializationEnvelopeBuilder {\n    fields: Vec<Field>,\n    expected_payload_sizes: Vec<usize>,\n    bytes: Vec<u8>,\n    current_field_index: usize,\n    current_offset: usize,\n}\n\nimpl CalltableSerializationEnvelopeBuilder {\n    pub fn new(\n        expected_payload_sizes: Vec<usize>,\n    ) -> Result<CalltableSerializationEnvelopeBuilder, Error> {\n        let number_of_fields = expected_payload_sizes.len();\n        let fields_size = Field::serialized_vec_size(number_of_fields);\n        let bytes_of_payload_size = expected_payload_sizes.iter().sum::<usize>();\n        let payload_and_vec_overhead: usize = U32_SERIALIZED_LENGTH + bytes_of_payload_size; // u32 for the overhead of serializing a vec\n        let mut payload_buffer =\n            bytesrepr::allocate_buffer_for_size(fields_size + payload_and_vec_overhead)?;\n        payload_buffer.extend(vec![0; fields_size]); // Making room for the call table\n        payload_buffer.extend((bytes_of_payload_size as u32).to_bytes()?); // Writing down number of bytes that are in the payload\n        Ok(CalltableSerializationEnvelopeBuilder {\n            fields: Vec::with_capacity(number_of_fields),\n            expected_payload_sizes,\n            bytes: payload_buffer,\n            current_field_index: 0,\n            current_offset: 0,\n        })\n    }\n\n    pub fn add_field<T: ToBytes + ?Sized>(\n        mut self,\n        field_index: u16,\n        value: &T,\n    ) -> Result<Self, Error> {\n        let current_field_index = self.current_field_index;\n        if current_field_index >= self.expected_payload_sizes.len() {\n            //We wrote more fields than expected\n            return Err(Error::NotRepresentable);\n        }\n        let fields = &mut self.fields;\n        if current_field_index > 0 && fields[current_field_index - 1].index >= field_index {\n            //Need to make sure we write fields in ascending order of tab index\n            return Err(Error::NotRepresentable);\n        }\n        let size = self.expected_payload_sizes[current_field_index];\n        let bytes_before = self.bytes.len();\n        value.write_bytes(&mut self.bytes)?;\n        fields.push(Field::new(field_index, self.current_offset as u32));\n        self.current_field_index += 1;\n        self.current_offset += size;\n        let bytes_after = self.bytes.len();\n        let wrote_bytes = bytes_after - bytes_before;\n        if wrote_bytes == 0 {\n            //We don't allow writing empty fields\n            return Err(Error::NotRepresentable);\n        }\n        if wrote_bytes != size {\n            //The written field occupied different amount of bytes than declared\n            return Err(Error::NotRepresentable);\n        }\n        Ok(self)\n    }\n\n    pub fn binary_payload_bytes(mut self) -> Result<Vec<u8>, Error> {\n        if self.current_field_index != self.expected_payload_sizes.len() {\n            //We didn't write all the fields we expected\n            return Err(Error::NotRepresentable);\n        }\n        let write_at_slice = &mut self.bytes[0..];\n        let calltable_bytes = self.fields.to_bytes()?;\n        for (pos, byte) in calltable_bytes.into_iter().enumerate() {\n            write_at_slice[pos] = byte;\n        }\n        Ok(self.bytes)\n    }\n}\n\npub struct CalltableSerializationEnvelope {\n    fields: Vec<Field>,\n    bytes: Bytes,\n}\n\nimpl CalltableSerializationEnvelope {\n    pub fn estimate_size(field_sizes: Vec<usize>) -> usize {\n        let number_of_fields = field_sizes.len();\n        let payload_in_bytes: usize = field_sizes.iter().sum();\n        let mut size = U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH; // Overhead of the fields vec and bytes vec\n        size += number_of_fields * Field::serialized_length();\n        size += payload_in_bytes * U8_SERIALIZED_LENGTH;\n        size\n    }\n\n    pub fn start_consuming(&self) -> Result<Option<CalltableFieldsIterator>, Error> {\n        if self.fields.is_empty() {\n            return Ok(None);\n        }\n        let field = &self.fields[0];\n        let expected_size = if self.fields.len() == 1 {\n            self.bytes.len()\n        } else {\n            self.fields[1].offset as usize\n        };\n        Ok(Some(CalltableFieldsIterator {\n            index_in_fields_vec: 0,\n            expected_size,\n            field,\n            bytes: &self.bytes,\n            parent: self,\n        }))\n    }\n\n    pub fn from_bytes(\n        max_expected_fields: u32,\n        input_bytes: &[u8],\n    ) -> Result<(CalltableSerializationEnvelope, &[u8]), Error> {\n        if input_bytes.len() < U32_SERIALIZED_LENGTH {\n            //The first \"thing\" in the bytes of the payload should be a `fields` vector. We want to\n            // check the number of entries in that vector to avoid field pumping. If the\n            // payload doesn't have u32 size of bytes in it, then it's malformed.\n            return Err(Error::Formatting);\n        }\n        let (number_of_fields, _) = u32::from_bytes(input_bytes)?;\n        if number_of_fields > max_expected_fields {\n            return Err(Error::Formatting);\n        }\n        let (fields, remainder) = Vec::<Field>::from_bytes(input_bytes)?;\n        let (bytes, remainder) = Bytes::from_bytes(remainder)?;\n        Ok((CalltableSerializationEnvelope { fields, bytes }, remainder))\n    }\n}\n\npub struct CalltableFieldsIterator<'a> {\n    index_in_fields_vec: usize,\n    expected_size: usize,\n    field: &'a Field,\n    bytes: &'a [u8],\n    parent: &'a CalltableSerializationEnvelope,\n}\n\nimpl CalltableFieldsIterator<'_> {\n    pub fn verify_index(&self, expected_index: u16) -> Result<(), Error> {\n        let field = self.field;\n        if field.index != expected_index {\n            return Err(Error::Formatting);\n        }\n        Ok(())\n    }\n\n    pub fn deserialize_and_maybe_next<T: FromBytes>(\n        &self,\n    ) -> Result<(T, Option<CalltableFieldsIterator>), Error> {\n        let (t, maybe_window) = self.step()?;\n        Ok((t, maybe_window))\n    }\n\n    fn step<T: FromBytes>(&self) -> Result<(T, Option<CalltableFieldsIterator>), Error> {\n        let (t, remainder) = T::from_bytes(self.bytes)?;\n        let parent_fields = &self.parent.fields;\n        let parent_fields_len = parent_fields.len();\n        let is_last_field = self.index_in_fields_vec == parent_fields_len - 1;\n        if remainder.len() + self.expected_size != self.bytes.len() {\n            //The field occupied different amount of bytes than expected\n            return Err(Error::Formatting);\n        }\n        if !is_last_field {\n            let next_field_index = self.index_in_fields_vec + 1;\n            let next_field = &parent_fields[next_field_index]; // We already checked that this index exists\n            let is_next_field_last = next_field_index == parent_fields_len - 1;\n            let expected_size = if is_next_field_last {\n                remainder.len()\n            } else {\n                (parent_fields[next_field_index + 1].offset\n                    - parent_fields[next_field_index].offset) as usize\n            };\n            let next_window = CalltableFieldsIterator {\n                index_in_fields_vec: next_field_index,\n                expected_size,\n                field: next_field,\n                bytes: remainder,\n                parent: self.parent,\n            };\n            Ok((t, Some(next_window)))\n        } else {\n            if !remainder.is_empty() {\n                //The payload of BinaryPayload should contain only the serialized, there should be\n                // no trailing bytes after consuming all the fields.\n                return Err(Error::Formatting);\n            }\n            Ok((t, None))\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::{CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder, Field};\n    use crate::bytesrepr::*;\n\n    #[test]\n    fn binary_payload_should_serialize_fields() {\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            U32_SERIALIZED_LENGTH,\n            U16_SERIALIZED_LENGTH,\n        ])\n        .unwrap();\n        let bytes = binary_payload\n            .add_field(0, &(254_u8))\n            .unwrap()\n            .add_field(1, &(u32::MAX))\n            .unwrap()\n            .add_field(2, &(555_u16))\n            .unwrap()\n            .binary_payload_bytes()\n            .unwrap();\n        let (payload, remainder) = CalltableSerializationEnvelope::from_bytes(3, &bytes).unwrap();\n        assert!(remainder.is_empty());\n        assert_eq!(\n            payload.fields,\n            vec![Field::new(0, 0), Field::new(1, 1), Field::new(2, 5)]\n        );\n        let bytes = &payload.bytes;\n        let (first_value, remainder) = u8::from_bytes(bytes).unwrap();\n        let (second_value, remainder) = u32::from_bytes(remainder).unwrap();\n        let (third_value, remainder) = u16::from_bytes(remainder).unwrap();\n        assert!(remainder.is_empty());\n        assert_eq!(first_value, 254);\n        assert_eq!(second_value, u32::MAX);\n        assert_eq!(third_value, 555);\n    }\n\n    #[test]\n    fn binary_payload_should_fail_to_deserialzie_if_more_then_expected_fields() {\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            U32_SERIALIZED_LENGTH,\n            U16_SERIALIZED_LENGTH,\n        ])\n        .unwrap();\n        let bytes = binary_payload\n            .add_field(0, &(254_u8))\n            .unwrap()\n            .add_field(1, &(u32::MAX))\n            .unwrap()\n            .add_field(2, &(555_u16))\n            .unwrap()\n            .binary_payload_bytes()\n            .unwrap();\n        let res = CalltableSerializationEnvelope::from_bytes(2, &bytes);\n        assert!(res.is_err())\n    }\n\n    #[test]\n    fn binary_payload_should_fail_to_serialize_if_field_indexes_not_unique() {\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            U32_SERIALIZED_LENGTH,\n            U16_SERIALIZED_LENGTH,\n        ])\n        .unwrap();\n        let res = binary_payload\n            .add_field(0, &(254_u8))\n            .unwrap()\n            .add_field(0, &(u32::MAX));\n        assert!(res.is_err())\n    }\n\n    #[test]\n    fn binary_payload_should_fail_to_serialize_if_field_indexes_not_sequential() {\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            U32_SERIALIZED_LENGTH,\n            U16_SERIALIZED_LENGTH,\n        ])\n        .unwrap();\n        let res = binary_payload\n            .add_field(1, &(254_u8))\n            .unwrap()\n            .add_field(0, &(u32::MAX));\n        assert!(res.is_err())\n    }\n\n    #[test]\n    fn binary_payload_should_fail_to_serialize_if_proposed_fields_size_is_smaller_than_declaration()\n    {\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            U32_SERIALIZED_LENGTH,\n            U16_SERIALIZED_LENGTH,\n        ])\n        .unwrap();\n        let res = binary_payload\n            .add_field(0, &(254_u8))\n            .unwrap()\n            .add_field(1, &(u16::MAX));\n        assert!(res.is_err())\n    }\n\n    #[test]\n    fn binary_payload_should_fail_to_serialize_if_proposed_fields_size_is_greater_than_declaration()\n    {\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            U16_SERIALIZED_LENGTH,\n        ])\n        .unwrap();\n        let res = binary_payload\n            .add_field(0, &(254_u8))\n            .unwrap()\n            .add_field(1, &(u32::MAX));\n        assert!(res.is_err())\n    }\n\n    #[test]\n    fn binary_payload_should_fail_to_serialize_if_proposed_a_field_with_zero_bytes() {\n        struct FunkyType {}\n        impl ToBytes for FunkyType {\n            fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n                Ok(Vec::new())\n            }\n\n            fn serialized_length(&self) -> usize {\n                0\n            }\n        }\n        let funky_instance = FunkyType {};\n        let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![\n            U8_SERIALIZED_LENGTH,\n            funky_instance.serialized_length(),\n        ])\n        .unwrap();\n        let res = binary_payload\n            .add_field(0, &(254_u8))\n            .unwrap()\n            .add_field(1, &(funky_instance));\n        assert!(res.is_err())\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_entry_point.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    alloc::string::ToString,\n    bytesrepr::{\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    system::{auction, mint},\n    transaction::serialization::{\n        CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder,\n    },\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// The entry point of a [`crate::Transaction`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Entry point of a Transaction.\")\n)]\n#[serde(deny_unknown_fields)]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\npub enum TransactionEntryPoint {\n    /// The default entry point name.\n    #[cfg_attr(any(feature = \"testing\", test), default)]\n    Call,\n\n    /// A non-native, arbitrary entry point.\n    Custom(String),\n\n    /// The `transfer` native entry point, used to transfer `Motes` from a source purse to a target\n    /// purse.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"source\": `URef`\n    ///   * \"target\": `URef`\n    ///   * \"amount\": `U512`\n    ///\n    /// The following optional runtime args can also be provided:\n    ///   * \"to\": `Option<AccountHash>`\n    ///   * \"id\": `Option<u64>`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `transfer` native entry point, used to transfer `Motes` from a \\\n            source purse to a target purse.\"\n        )\n    )]\n    Transfer,\n\n    /// The `burn` native entry point, used to burn `Motes` from a source purse.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"source\": `URef`\n    ///   * \"amount\": `U512`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `burn` native entry point, used to burn `Motes` from a \\\n            source purse.\"\n        )\n    )]\n    Burn,\n\n    /// The `add_bid` native entry point, used to create or top off a bid purse.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"public_key\": `PublicKey`\n    ///   * \"delegation_rate\": `u8`\n    ///   * \"amount\": `U512`\n    ///   * \"minimum_delegation_amount\": `Option<u64>`\n    ///   * \"maximum_delegation_amount\": `Option<u64>`\n    ///   * \"reserved_slots\": `Option<u32>`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `add_bid` native entry point, used to create or top off a bid purse.\"\n        )\n    )]\n    AddBid,\n\n    /// The `withdraw_bid` native entry point, used to decrease a stake.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"public_key\": `PublicKey`\n    ///   * \"amount\": `U512`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(description = \"The `withdraw_bid` native entry point, used to decrease a stake.\")\n    )]\n    WithdrawBid,\n\n    /// The `delegate` native entry point, used to add a new delegator or increase an existing\n    /// delegator's stake.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"delegator\": `PublicKey`\n    ///   * \"validator\": `PublicKey`\n    ///   * \"amount\": `U512`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `delegate` native entry point, used to add a new delegator or \\\n            increase an existing delegator's stake.\"\n        )\n    )]\n    Delegate,\n\n    /// The `undelegate` native entry point, used to reduce a delegator's stake or remove the\n    /// delegator if the remaining stake is 0.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"delegator\": `PublicKey`\n    ///   * \"validator\": `PublicKey`\n    ///   * \"amount\": `U512`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `undelegate` native entry point, used to reduce a delegator's \\\n            stake or remove the delegator if the remaining stake is 0.\"\n        )\n    )]\n    Undelegate,\n\n    /// The `redelegate` native entry point, used to reduce a delegator's stake or remove the\n    /// delegator if the remaining stake is 0, and after the unbonding delay, automatically\n    /// delegate to a new validator.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"delegator\": `PublicKey`\n    ///   * \"validator\": `PublicKey`\n    ///   * \"amount\": `U512`\n    ///   * \"new_validator\": `PublicKey`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `redelegate` native entry point, used to reduce a delegator's stake \\\n            or remove the delegator if the remaining stake is 0, and after the unbonding delay, \\\n            automatically delegate to a new validator.\"\n        )\n    )]\n    Redelegate,\n\n    /// The `activate bid` native entry point, used to reactivate an inactive bid.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"validator_public_key\": `PublicKey`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `activate_bid` native entry point, used to used to reactivate an \\\n            inactive bid.\"\n        )\n    )]\n    ActivateBid,\n\n    /// The `change_bid_public_key` native entry point, used to change a bid's public key.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"public_key\": `PublicKey`\n    ///   * \"new_public_key\": `PublicKey`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `change_bid_public_key` native entry point, used to change a bid's public key.\"\n        )\n    )]\n    ChangeBidPublicKey,\n\n    /// The `add_reservations` native entry point, used to add delegators to validator's reserve\n    /// list.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"reservations\": `Vec<Reservation>`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `add_reservations` native entry point, used to add delegator to \\\n            validator's reserve list\"\n        )\n    )]\n    AddReservations,\n\n    /// The `cancel_reservations` native entry point, used to remove delegators from validator's\n    /// reserve list.\n    ///\n    /// Requires the following runtime args:\n    ///   * \"validator\": `PublicKey`\n    ///   * \"delegators\": `Vec<DelegatorKind>`\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            description = \"The `cancel_reservations` native entry point, used to remove delegator \\\n            from validator's reserve list\"\n        )\n    )]\n    CancelReservations,\n}\n\nimpl TransactionEntryPoint {\n    /// Returns a random `TransactionEntryPoint`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..13) {\n            0 => TransactionEntryPoint::Custom(rng.random_string(1..21)),\n            1 => TransactionEntryPoint::Transfer,\n            2 => TransactionEntryPoint::AddBid,\n            3 => TransactionEntryPoint::WithdrawBid,\n            4 => TransactionEntryPoint::Delegate,\n            5 => TransactionEntryPoint::Undelegate,\n            6 => TransactionEntryPoint::Redelegate,\n            7 => TransactionEntryPoint::ActivateBid,\n            8 => TransactionEntryPoint::ChangeBidPublicKey,\n            9 => TransactionEntryPoint::Call,\n            10 => TransactionEntryPoint::AddReservations,\n            11 => TransactionEntryPoint::CancelReservations,\n            12 => TransactionEntryPoint::Burn,\n            _ => unreachable!(),\n        }\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            TransactionEntryPoint::Custom(custom) => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    custom.serialized_length(),\n                ]\n            }\n            TransactionEntryPoint::Call\n            | TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn\n            | TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => {\n                vec![crate::bytesrepr::U8_SERIALIZED_LENGTH]\n            }\n        }\n    }\n\n    /// Returns custom entry point name if relevant.\n    pub fn custom_entry_point(&self) -> Option<String> {\n        if let TransactionEntryPoint::Custom(entry_point) = self {\n            Some(entry_point.clone())\n        } else {\n            None\n        }\n    }\n}\n\nconst TAG_FIELD_INDEX: u16 = 0;\n\nconst CALL_VARIANT_TAG: u8 = 0;\n\nconst CUSTOM_VARIANT_TAG: u8 = 1;\nconst CUSTOM_CUSTOM_INDEX: u16 = 1;\n\nconst TRANSFER_VARIANT_TAG: u8 = 2;\nconst ADD_BID_VARIANT_TAG: u8 = 3;\nconst WITHDRAW_BID_VARIANT_TAG: u8 = 4;\nconst DELEGATE_VARIANT_TAG: u8 = 5;\nconst UNDELEGATE_VARIANT_TAG: u8 = 6;\nconst REDELEGATE_VARIANT_TAG: u8 = 7;\nconst ACTIVATE_BID_VARIANT_TAG: u8 = 8;\nconst CHANGE_BID_PUBLIC_KEY_VARIANT_TAG: u8 = 9;\nconst ADD_RESERVATIONS_VARIANT_TAG: u8 = 10;\nconst CANCEL_RESERVATIONS_VARIANT_TAG: u8 = 11;\nconst BURN_VARIANT_TAG: u8 = 12;\n\nimpl ToBytes for TransactionEntryPoint {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            TransactionEntryPoint::Call => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &CALL_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::Custom(custom) => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &CUSTOM_VARIANT_TAG)?\n                    .add_field(CUSTOM_CUSTOM_INDEX, &custom)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::Transfer => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &TRANSFER_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::Burn => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &BURN_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::AddBid => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &ADD_BID_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::WithdrawBid => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &WITHDRAW_BID_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::Delegate => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &DELEGATE_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::Undelegate => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &UNDELEGATE_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::Redelegate => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &REDELEGATE_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::ActivateBid => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &ACTIVATE_BID_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::ChangeBidPublicKey => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &CHANGE_BID_PUBLIC_KEY_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::AddReservations => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &ADD_RESERVATIONS_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionEntryPoint::CancelReservations => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &CANCEL_RESERVATIONS_VARIANT_TAG)?\n                    .binary_payload_bytes()\n            }\n        }\n    }\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for TransactionEntryPoint {\n    fn from_bytes(bytes: &[u8]) -> Result<(TransactionEntryPoint, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(2u32, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(TAG_FIELD_INDEX)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            CALL_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Call)\n            }\n            CUSTOM_VARIANT_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(CUSTOM_CUSTOM_INDEX)?;\n                let (custom, window) = window.deserialize_and_maybe_next::<String>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Custom(custom))\n            }\n            TRANSFER_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Transfer)\n            }\n            BURN_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Burn)\n            }\n            ADD_BID_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::AddBid)\n            }\n            WITHDRAW_BID_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::WithdrawBid)\n            }\n            DELEGATE_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Delegate)\n            }\n            UNDELEGATE_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Undelegate)\n            }\n            REDELEGATE_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::Redelegate)\n            }\n            ACTIVATE_BID_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::ActivateBid)\n            }\n            CHANGE_BID_PUBLIC_KEY_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::ChangeBidPublicKey)\n            }\n            ADD_RESERVATIONS_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::AddReservations)\n            }\n            CANCEL_RESERVATIONS_VARIANT_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionEntryPoint::CancelReservations)\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl Display for TransactionEntryPoint {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionEntryPoint::Call => write!(formatter, \"call\"),\n            TransactionEntryPoint::Custom(entry_point) => {\n                write!(formatter, \"custom({entry_point})\")\n            }\n            TransactionEntryPoint::Transfer => write!(formatter, \"transfer\"),\n            TransactionEntryPoint::Burn => write!(formatter, \"burn\"),\n            TransactionEntryPoint::AddBid => write!(formatter, \"add_bid\"),\n            TransactionEntryPoint::WithdrawBid => write!(formatter, \"withdraw_bid\"),\n            TransactionEntryPoint::Delegate => write!(formatter, \"delegate\"),\n            TransactionEntryPoint::Undelegate => write!(formatter, \"undelegate\"),\n            TransactionEntryPoint::Redelegate => write!(formatter, \"redelegate\"),\n            TransactionEntryPoint::ActivateBid => write!(formatter, \"activate_bid\"),\n            TransactionEntryPoint::ChangeBidPublicKey => write!(formatter, \"change_bid_public_key\"),\n            TransactionEntryPoint::AddReservations => write!(formatter, \"add_reservations\"),\n            TransactionEntryPoint::CancelReservations => write!(formatter, \"cancel_reservations\"),\n        }\n    }\n}\n\nimpl From<&str> for TransactionEntryPoint {\n    fn from(value: &str) -> Self {\n        if value.to_lowercase() == mint::METHOD_TRANSFER {\n            return TransactionEntryPoint::Transfer;\n        }\n        if value.to_lowercase() == mint::METHOD_BURN {\n            return TransactionEntryPoint::Burn;\n        }\n        if value.to_lowercase() == auction::METHOD_ACTIVATE_BID {\n            return TransactionEntryPoint::ActivateBid;\n        }\n        if value.to_lowercase() == auction::METHOD_ADD_BID {\n            return TransactionEntryPoint::AddBid;\n        }\n        if value.to_lowercase() == auction::METHOD_WITHDRAW_BID {\n            return TransactionEntryPoint::WithdrawBid;\n        }\n        if value.to_lowercase() == auction::METHOD_DELEGATE {\n            return TransactionEntryPoint::Delegate;\n        }\n        if value.to_lowercase() == auction::METHOD_UNDELEGATE {\n            return TransactionEntryPoint::Undelegate;\n        }\n        if value.to_lowercase() == auction::METHOD_REDELEGATE {\n            return TransactionEntryPoint::Redelegate;\n        }\n        if value.to_lowercase() == auction::METHOD_CHANGE_BID_PUBLIC_KEY {\n            return TransactionEntryPoint::ChangeBidPublicKey;\n        }\n\n        TransactionEntryPoint::Custom(value.to_string())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{bytesrepr::test_serialization_roundtrip, gens::transaction_entry_point_arb};\n    use proptest::prelude::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            test_serialization_roundtrip(&TransactionEntryPoint::random(rng));\n        }\n    }\n\n    proptest! {\n        #[test]\n        fn bytesrepr_roundtrip_from_arb(entry_point in transaction_entry_point_arb()) {\n            test_serialization_roundtrip(&entry_point);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_hash.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse super::{DeployHash, TransactionV1Hash};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Digest,\n};\n\nconst DEPLOY_TAG: u8 = 0;\nconst V1_TAG: u8 = 1;\nconst TAG_LENGTH: u8 = 1;\n\n/// A versioned wrapper for a transaction hash or deploy hash.\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub enum TransactionHash {\n    /// A deploy hash.\n    Deploy(DeployHash),\n    /// A version 1 transaction hash.\n    #[serde(rename = \"Version1\")]\n    V1(TransactionV1Hash),\n}\n\nimpl TransactionHash {\n    /// The number of bytes in a `DeployHash` digest.\n    pub const LENGTH: usize = TAG_LENGTH as usize + Digest::LENGTH;\n    /// Digest representation of hash.\n    pub fn digest(&self) -> Digest {\n        match self {\n            TransactionHash::Deploy(deploy_hash) => *deploy_hash.inner(),\n            TransactionHash::V1(transaction_hash) => *transaction_hash.inner(),\n        }\n    }\n\n    /// Hexadecimal representation of the hash.\n    pub fn to_hex_string(&self) -> String {\n        base16::encode_lower(&self.digest())\n    }\n\n    /// Returns a random `TransactionHash`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..2) {\n            0 => TransactionHash::from(DeployHash::random(rng)),\n            1 => TransactionHash::from(TransactionV1Hash::random(rng)),\n            _ => panic!(),\n        }\n    }\n\n    /// Returns a new `TransactionHash` directly initialized with the provided bytes; no hashing\n    /// is done.\n    pub const fn from_raw(raw_digest: [u8; TransactionV1Hash::LENGTH]) -> Self {\n        TransactionHash::V1(TransactionV1Hash::from_raw(raw_digest))\n    }\n}\n\nimpl From<DeployHash> for TransactionHash {\n    fn from(hash: DeployHash) -> Self {\n        Self::Deploy(hash)\n    }\n}\n\nimpl From<&DeployHash> for TransactionHash {\n    fn from(hash: &DeployHash) -> Self {\n        Self::from(*hash)\n    }\n}\n\nimpl From<TransactionV1Hash> for TransactionHash {\n    fn from(hash: TransactionV1Hash) -> Self {\n        Self::V1(hash)\n    }\n}\n\nimpl From<&TransactionV1Hash> for TransactionHash {\n    fn from(hash: &TransactionV1Hash) -> Self {\n        Self::from(*hash)\n    }\n}\n\nimpl Default for TransactionHash {\n    fn default() -> Self {\n        TransactionHash::V1(TransactionV1Hash::default())\n    }\n}\n\nimpl Display for TransactionHash {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionHash::Deploy(hash) => Display::fmt(hash, formatter),\n            TransactionHash::V1(hash) => Display::fmt(hash, formatter),\n        }\n    }\n}\n\nimpl AsRef<[u8]> for TransactionHash {\n    fn as_ref(&self) -> &[u8] {\n        match self {\n            TransactionHash::Deploy(hash) => hash.as_ref(),\n            TransactionHash::V1(hash) => hash.as_ref(),\n        }\n    }\n}\n\nimpl ToBytes for TransactionHash {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                TransactionHash::Deploy(hash) => hash.serialized_length(),\n                TransactionHash::V1(hash) => hash.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            TransactionHash::Deploy(hash) => {\n                DEPLOY_TAG.write_bytes(writer)?;\n                hash.write_bytes(writer)\n            }\n            TransactionHash::V1(hash) => {\n                V1_TAG.write_bytes(writer)?;\n                hash.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for TransactionHash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            DEPLOY_TAG => {\n                let (hash, remainder) = DeployHash::from_bytes(remainder)?;\n                Ok((TransactionHash::Deploy(hash), remainder))\n            }\n            V1_TAG => {\n                let (hash, remainder) = TransactionV1Hash::from_bytes(remainder)?;\n                Ok((TransactionHash::V1(hash), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let hash = TransactionHash::from(DeployHash::random(rng));\n        bytesrepr::test_serialization_roundtrip(&hash);\n\n        let hash = TransactionHash::from(TransactionV1Hash::random(rng));\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_id.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::Transaction;\nuse super::{ApprovalsHash, TransactionHash};\nuse crate::bytesrepr::{self, FromBytes, ToBytes};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n\n/// The unique identifier of a [`Transaction`], comprising its [`TransactionHash`] and\n/// [`ApprovalsHash`].\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[serde(deny_unknown_fields)]\npub struct TransactionId {\n    /// The transaction hash.\n    transaction_hash: TransactionHash,\n    /// The approvals hash.\n    approvals_hash: ApprovalsHash,\n}\n\nimpl TransactionId {\n    /// Returns a new `TransactionId`.\n    pub fn new(transaction_hash: TransactionHash, approvals_hash: ApprovalsHash) -> Self {\n        TransactionId {\n            transaction_hash,\n            approvals_hash,\n        }\n    }\n\n    /// Returns the transaction hash.\n    pub fn transaction_hash(&self) -> TransactionHash {\n        self.transaction_hash\n    }\n\n    /// Returns the approvals hash.\n    pub fn approvals_hash(&self) -> ApprovalsHash {\n        self.approvals_hash\n    }\n\n    /// Returns a random `TransactionId`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        TransactionId::new(TransactionHash::random(rng), ApprovalsHash::random(rng))\n    }\n}\n\nimpl Display for TransactionId {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"transaction-id({}, {})\",\n            self.transaction_hash(),\n            self.approvals_hash()\n        )\n    }\n}\n\nimpl ToBytes for TransactionId {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.transaction_hash.write_bytes(writer)?;\n        self.approvals_hash.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.transaction_hash.serialized_length() + self.approvals_hash.serialized_length()\n    }\n}\n\nimpl FromBytes for TransactionId {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (transaction_hash, rem) = TransactionHash::from_bytes(bytes)?;\n        let (approvals_hash, rem) = ApprovalsHash::from_bytes(rem)?;\n        let transaction_id = TransactionId::new(transaction_hash, approvals_hash);\n        Ok((transaction_id, rem))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let id = TransactionId::random(rng);\n        bytesrepr::test_serialization_roundtrip(&id);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_invocation_target.rs",
    "content": "use alloc::{string::String, vec::Vec};\nuse core::fmt::{self, Debug, Display, Formatter};\n\nuse super::{serialization::CalltableSerializationEnvelope, AddressableEntityIdentifier};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    contracts::ProtocolVersionMajor,\n    serde_helpers,\n    transaction::serialization::CalltableSerializationEnvelopeBuilder,\n    AddressableEntityHash, EntityVersion, HashAddr, PackageAddr, PackageHash, PackageIdentifier,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse hex_fmt::HexFmt;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// The identifier of a [`crate::TransactionTarget::Stored`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Identifier of a `Stored` transaction target.\")\n)]\n#[serde(deny_unknown_fields)]\npub enum TransactionInvocationTarget {\n    /// The address identifying the invocable entity.\n    #[serde(with = \"serde_helpers::raw_32_byte_array\")]\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            with = \"String\",\n            description = \"Hex-encoded entity address identifying the invocable entity.\"\n        )\n    )]\n    ByHash(HashAddr), /* currently needs to be of contract tag\n                       * variant */\n    /// The alias identifying the invocable entity.\n    ByName(String),\n    /// The address and optional version identifying the package.\n    ByPackageHash {\n        /// The package address.\n        #[serde(with = \"serde_helpers::raw_32_byte_array\")]\n        #[cfg_attr(\n            feature = \"json-schema\",\n            schemars(with = \"String\", description = \"Hex-encoded address of the package.\")\n        )]\n        addr: PackageAddr,\n        /// The package version.\n        ///\n        /// If `None`, the latest enabled version is implied.\n        version: Option<EntityVersion>,\n        /// The major protocol version of the contract package.\n        ///\n        /// `None` implies latest major protocol version.\n        #[serde(skip_serializing_if = \"Option::is_none\")]\n        protocol_version_major: Option<ProtocolVersionMajor>,\n    },\n    /// The alias and optional version identifying the package.\n    ByPackageName {\n        /// The package name.\n        name: String,\n        /// The package version.\n        ///\n        /// If `None`, the latest enabled version is implied.\n        version: Option<EntityVersion>,\n        /// The major protocol version of the contract package.\n        ///\n        /// `None` implies latest major protocol version.\n        #[serde(skip_serializing_if = \"Option::is_none\")]\n        protocol_version_major: Option<ProtocolVersionMajor>,\n    },\n}\n\nimpl TransactionInvocationTarget {\n    /// Returns a new `TransactionInvocationTarget::InvocableEntity`.\n    pub fn new_invocable_entity(hash: AddressableEntityHash) -> Self {\n        TransactionInvocationTarget::ByHash(hash.value())\n    }\n\n    /// Returns a new `TransactionInvocationTarget::InvocableEntityAlias`.\n    pub fn new_invocable_entity_alias(alias: String) -> Self {\n        TransactionInvocationTarget::ByName(alias)\n    }\n\n    /// Returns a new `TransactionInvocationTarget::Package`.\n    #[deprecated(since = \"5.0.1\", note = \"please use `new_package_with_major` instead\")]\n    pub fn new_package(hash: PackageHash, version: Option<EntityVersion>) -> Self {\n        TransactionInvocationTarget::ByPackageHash {\n            addr: hash.value(),\n            version,\n            protocol_version_major: None,\n        }\n    }\n\n    /// Returns a new `TransactionInvocationTarget::Package`.\n    pub fn new_package_with_major(\n        hash: PackageHash,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n    ) -> Self {\n        TransactionInvocationTarget::ByPackageHash {\n            addr: hash.value(),\n            version,\n            protocol_version_major,\n        }\n    }\n\n    /// Returns a new `TransactionInvocationTarget::PackageAlias`.\n    #[deprecated(\n        since = \"5.0.1\",\n        note = \"please use `new_package_alias_with_major` instead\"\n    )]\n    pub fn new_package_alias(alias: String, version: Option<EntityVersion>) -> Self {\n        TransactionInvocationTarget::ByPackageName {\n            name: alias,\n            version,\n            protocol_version_major: None,\n        }\n    }\n\n    /// Returns a new `TransactionInvocationTarget::PackageAlias`.\n    pub fn new_package_alias_with_major(\n        alias: String,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n    ) -> Self {\n        TransactionInvocationTarget::ByPackageName {\n            name: alias,\n            version,\n            protocol_version_major,\n        }\n    }\n\n    #[cfg(test)]\n    pub fn new_package_alias_with_major_and_entity(\n        hash: PackageHash,\n        version: Option<EntityVersion>,\n        protocol_version_major: Option<ProtocolVersionMajor>,\n    ) -> Self {\n        TransactionInvocationTarget::ByPackageHash {\n            addr: hash.value(),\n            version,\n            protocol_version_major,\n        }\n    }\n\n    /// Returns the contract `hash_addr`, if any.\n    pub fn contract_by_hash(&self) -> Option<HashAddr> {\n        if let TransactionInvocationTarget::ByHash(hash_addr) = self {\n            Some(*hash_addr)\n        } else {\n            None\n        }\n    }\n\n    /// Returns the identifier of the addressable entity, if present.\n    pub fn addressable_entity_identifier(&self) -> Option<AddressableEntityIdentifier> {\n        match self {\n            TransactionInvocationTarget::ByHash(addr) => Some(AddressableEntityIdentifier::Hash(\n                AddressableEntityHash::new(*addr),\n            )),\n            TransactionInvocationTarget::ByName(alias) => {\n                Some(AddressableEntityIdentifier::Name(alias.clone()))\n            }\n            TransactionInvocationTarget::ByPackageHash { .. }\n            | TransactionInvocationTarget::ByPackageName { .. } => None,\n        }\n    }\n\n    /// Returns the identifier of the contract package, if present.\n    pub fn package_identifier(&self) -> Option<PackageIdentifier> {\n        match self {\n            TransactionInvocationTarget::ByHash(_) | TransactionInvocationTarget::ByName(_) => None,\n            TransactionInvocationTarget::ByPackageHash {\n                addr,\n                version,\n                protocol_version_major,\n            } => Some(PackageIdentifier::HashWithMajorVersion {\n                package_hash: PackageHash::new(*addr),\n                version: *version,\n                protocol_version_major: *protocol_version_major,\n            }),\n            TransactionInvocationTarget::ByPackageName {\n                name: alias,\n                version,\n                protocol_version_major,\n            } => Some(PackageIdentifier::NameWithMajorVersion {\n                name: alias.clone(),\n                version: *version,\n                protocol_version_major: *protocol_version_major,\n            }),\n        }\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            TransactionInvocationTarget::ByHash(hash) => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    hash.serialized_length(),\n                ]\n            }\n            TransactionInvocationTarget::ByName(name) => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    name.serialized_length(),\n                ]\n            }\n            TransactionInvocationTarget::ByPackageHash {\n                addr,\n                version,\n                protocol_version_major,\n            } => {\n                let mut field_sizes = vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    addr.serialized_length(),\n                    version.serialized_length(),\n                ];\n                if let Some(protocol_version_major) = protocol_version_major {\n                    //When we serialize protocol_version_major we put the actual value,\n                    // if we want to denote `None` we don't put an entry in the calltable.\n                    field_sizes.push(protocol_version_major.serialized_length());\n                }\n                field_sizes\n            }\n            TransactionInvocationTarget::ByPackageName {\n                name,\n                version,\n                protocol_version_major,\n            } => {\n                let mut field_sizes = vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    name.serialized_length(),\n                    version.serialized_length(),\n                ];\n                if let Some(protocol_version_major) = protocol_version_major {\n                    //When we serialize protocol_version_major we put the actual value,\n                    // if we want to denote `None` we don't put an entry in the calltable.\n                    field_sizes.push(protocol_version_major.serialized_length());\n                }\n                field_sizes\n            }\n        }\n    }\n\n    /// Returns a random `TransactionInvocationTarget`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..4) {\n            0 => TransactionInvocationTarget::ByHash(rng.gen()),\n            1 => TransactionInvocationTarget::ByName(rng.random_string(1..21)),\n            2 => TransactionInvocationTarget::ByPackageHash {\n                addr: rng.gen(),\n                version: rng.gen(),\n                protocol_version_major: rng.gen(),\n            },\n            3 => TransactionInvocationTarget::ByPackageName {\n                name: rng.random_string(1..21),\n                version: rng.gen(),\n                protocol_version_major: rng.gen(),\n            },\n            _ => unreachable!(),\n        }\n    }\n}\n\nconst TAG_FIELD_INDEX: u16 = 0;\n\nconst BY_HASH_VARIANT: u8 = 0;\nconst BY_HASH_HASH_INDEX: u16 = 1;\n\nconst BY_NAME_VARIANT: u8 = 1;\nconst BY_NAME_NAME_INDEX: u16 = 1;\n\nconst BY_PACKAGE_HASH_VARIANT: u8 = 2;\nconst BY_PACKAGE_HASH_ADDR_INDEX: u16 = 1;\nconst BY_PACKAGE_HASH_VERSION_INDEX: u16 = 2;\nconst BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX: u16 = 3;\n\nconst BY_PACKAGE_NAME_VARIANT: u8 = 3;\nconst BY_PACKAGE_NAME_NAME_INDEX: u16 = 1;\nconst BY_PACKAGE_NAME_VERSION_INDEX: u16 = 2;\nconst BY_PACKAGE_NAME_PROTOCOL_VERSION_MAJOR_INDEX: u16 = 3;\n\nimpl ToBytes for TransactionInvocationTarget {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            TransactionInvocationTarget::ByHash(hash) => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &BY_HASH_VARIANT)?\n                    .add_field(BY_HASH_HASH_INDEX, &hash)?\n                    .binary_payload_bytes()\n            }\n            TransactionInvocationTarget::ByName(name) => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &BY_NAME_VARIANT)?\n                    .add_field(BY_NAME_NAME_INDEX, &name)?\n                    .binary_payload_bytes()\n            }\n            TransactionInvocationTarget::ByPackageHash {\n                addr,\n                version,\n                protocol_version_major,\n            } => {\n                let mut builder =\n                    CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                        .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_HASH_VARIANT)?\n                        .add_field(BY_PACKAGE_HASH_ADDR_INDEX, &addr)?\n                        .add_field(BY_PACKAGE_HASH_VERSION_INDEX, &version)?;\n                if let Some(protocol_version_major) = protocol_version_major {\n                    //We do this to support transactions that were created before the\n                    // `protocol_version_major` fix. The pre-fix transactions\n                    // will not have a BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX\n                    // entry and we need to maintain ability to deserialize them.\n                    builder = builder.add_field(\n                        BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX,\n                        protocol_version_major,\n                    )?;\n                }\n                builder.binary_payload_bytes()\n            }\n            TransactionInvocationTarget::ByPackageName {\n                name,\n                version,\n                protocol_version_major,\n            } => {\n                let mut builder =\n                    CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                        .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_NAME_VARIANT)?\n                        .add_field(BY_PACKAGE_NAME_NAME_INDEX, &name)?\n                        .add_field(BY_PACKAGE_NAME_VERSION_INDEX, &version)?;\n                if let Some(protocol_version_major) = protocol_version_major {\n                    //We do this to support transactions that were created before the\n                    // `protocol_version_major` fix. The pre-fix transactions\n                    // will not have a BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX\n                    // entry and we need to maintain ability to deserialize them.\n                    builder = builder.add_field(\n                        BY_PACKAGE_NAME_PROTOCOL_VERSION_MAJOR_INDEX,\n                        protocol_version_major,\n                    )?;\n                }\n                builder.binary_payload_bytes()\n            }\n        }\n    }\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for TransactionInvocationTarget {\n    fn from_bytes(bytes: &[u8]) -> Result<(TransactionInvocationTarget, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(4, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(TAG_FIELD_INDEX)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            BY_HASH_VARIANT => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(BY_HASH_HASH_INDEX)?;\n                let (hash, window) = window.deserialize_and_maybe_next::<HashAddr>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionInvocationTarget::ByHash(hash))\n            }\n            BY_NAME_VARIANT => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(BY_NAME_NAME_INDEX)?;\n                let (name, window) = window.deserialize_and_maybe_next::<String>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionInvocationTarget::ByName(name))\n            }\n            BY_PACKAGE_HASH_VARIANT => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(BY_PACKAGE_HASH_ADDR_INDEX)?;\n                let (addr, window) = window.deserialize_and_maybe_next::<PackageAddr>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(BY_PACKAGE_HASH_VERSION_INDEX)?;\n                let (version, window) =\n                    window.deserialize_and_maybe_next::<Option<EntityVersion>>()?;\n                let protocol_version_major = if let Some(window) = window {\n                    window.verify_index(BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX)?;\n                    let (protocol_version_major, window) =\n                        window.deserialize_and_maybe_next::<ProtocolVersionMajor>()?;\n                    if window.is_some() {\n                        return Err(Formatting);\n                    }\n                    Some(protocol_version_major)\n                } else {\n                    if window.is_some() {\n                        return Err(Formatting);\n                    }\n                    None\n                };\n\n                Ok(TransactionInvocationTarget::ByPackageHash {\n                    addr,\n                    version,\n                    protocol_version_major,\n                })\n            }\n            BY_PACKAGE_NAME_VARIANT => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(BY_PACKAGE_NAME_NAME_INDEX)?;\n                let (name, window) = window.deserialize_and_maybe_next::<String>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(BY_PACKAGE_NAME_VERSION_INDEX)?;\n                let (version, window) =\n                    window.deserialize_and_maybe_next::<Option<EntityVersion>>()?;\n                let protocol_version_major = if let Some(window) = window {\n                    window.verify_index(BY_PACKAGE_NAME_PROTOCOL_VERSION_MAJOR_INDEX)?;\n                    let (protocol_version_major, window) =\n                        window.deserialize_and_maybe_next::<ProtocolVersionMajor>()?;\n                    if window.is_some() {\n                        return Err(Formatting);\n                    }\n                    Some(protocol_version_major)\n                } else {\n                    if window.is_some() {\n                        return Err(Formatting);\n                    }\n                    None\n                };\n                Ok(TransactionInvocationTarget::ByPackageName {\n                    name,\n                    version,\n                    protocol_version_major,\n                })\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl Display for TransactionInvocationTarget {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionInvocationTarget::ByHash(addr) => {\n                write!(formatter, \"invocable-entity({:10})\", HexFmt(addr))\n            }\n            TransactionInvocationTarget::ByName(alias) => {\n                write!(formatter, \"invocable-entity({})\", alias)\n            }\n            TransactionInvocationTarget::ByPackageHash {\n                addr,\n                version,\n                protocol_version_major,\n            } => {\n                write!(\n                    formatter,\n                    \"package({:10}, version {:?}, protocol_version_major {:?})\",\n                    HexFmt(addr),\n                    version,\n                    protocol_version_major\n                )\n            }\n            TransactionInvocationTarget::ByPackageName {\n                name: alias,\n                version,\n                protocol_version_major,\n            } => {\n                write!(\n                    formatter,\n                    \"package({}, version {:?}, protocol_version_major {:?})\",\n                    alias, version, protocol_version_major\n                )\n            }\n        }\n    }\n}\n\nimpl Debug for TransactionInvocationTarget {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionInvocationTarget::ByHash(addr) => formatter\n                .debug_tuple(\"InvocableEntity\")\n                .field(&HexFmt(addr))\n                .finish(),\n            TransactionInvocationTarget::ByName(alias) => formatter\n                .debug_tuple(\"InvocableEntityAlias\")\n                .field(alias)\n                .finish(),\n            TransactionInvocationTarget::ByPackageHash {\n                addr,\n                version,\n                protocol_version_major,\n            } => formatter\n                .debug_struct(\"Package\")\n                .field(\"addr\", &HexFmt(addr))\n                .field(\"version\", version)\n                .field(\"protocol_version_major\", protocol_version_major)\n                .finish(),\n            TransactionInvocationTarget::ByPackageName {\n                name: alias,\n                version,\n                protocol_version_major,\n            } => formatter\n                .debug_struct(\"PackageAlias\")\n                .field(\"alias\", alias)\n                .field(\"version\", version)\n                .field(\"protocol_version_major\", protocol_version_major)\n                .finish(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{bytesrepr, gens::transaction_invocation_target_arb};\n    use proptest::prelude::*;\n\n    #[test]\n    fn json_should_not_produce_version_key_if_none() {\n        let alias = TransactionInvocationTarget::new_package_alias_with_major(\n            \"abc\".to_owned(),\n            Some(111),\n            None,\n        );\n        assert!(!serde_json::to_string(&alias)\n            .unwrap()\n            .contains(\"\\\"protocol_version_major\\\"\"));\n\n        let alias = TransactionInvocationTarget::new_package_alias_with_major(\n            \"abc\".to_owned(),\n            Some(111),\n            Some(5),\n        );\n        assert!(serde_json::to_string(&alias)\n            .unwrap()\n            .contains(\"\\\"protocol_version_major\\\":5\"));\n\n        let package = TransactionInvocationTarget::new_package_with_major(\n            PackageHash::from([1; 32]),\n            Some(222),\n            None,\n        );\n        assert!(!serde_json::to_string(&package)\n            .unwrap()\n            .contains(\"\\\"protocol_version_major\\\"\"));\n\n        let package = TransactionInvocationTarget::new_package_with_major(\n            PackageHash::from([1; 32]),\n            Some(222),\n            Some(5),\n        );\n        assert!(serde_json::to_string(&package)\n            .unwrap()\n            .contains(\"\\\"protocol_version_major\\\":5\"));\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            bytesrepr::test_serialization_roundtrip(&TransactionInvocationTarget::random(rng));\n        }\n    }\n\n    #[test]\n    fn by_package_hash_variant_without_version_key_should_serialize_exactly_as_before_the_version_key_change(\n    ) {\n        let addr = [1; 32];\n        let version = Some(1200);\n        let field_sizes = vec![\n            crate::bytesrepr::U8_SERIALIZED_LENGTH,\n            addr.serialized_length(),\n            version.serialized_length(),\n        ];\n        let builder = CalltableSerializationEnvelopeBuilder::new(field_sizes)\n            .unwrap()\n            .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_HASH_VARIANT)\n            .unwrap()\n            .add_field(BY_PACKAGE_HASH_ADDR_INDEX, &addr)\n            .unwrap()\n            .add_field(BY_PACKAGE_HASH_VERSION_INDEX, &version)\n            .unwrap();\n        let bytes = builder.binary_payload_bytes().unwrap();\n        let expected = TransactionInvocationTarget::ByPackageHash {\n            addr,\n            version,\n            protocol_version_major: None,\n        };\n        let expected_bytes = expected.to_bytes().unwrap();\n        assert_eq!(bytes, expected_bytes); //We want the \"legacy\" binary representation and current representation without\n                                           // protocol_version_major equal\n\n        let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap();\n        assert_eq!(expected, got);\n        assert!(remainder.is_empty());\n    }\n\n    #[test]\n    fn by_package_name_variant_without_version_key_should_serialize_exactly_as_before_the_version_key_change(\n    ) {\n        let name = \"some_name\".to_string();\n        let version = Some(1200);\n        let field_sizes = vec![\n            crate::bytesrepr::U8_SERIALIZED_LENGTH,\n            name.serialized_length(),\n            version.serialized_length(),\n        ];\n        let builder = CalltableSerializationEnvelopeBuilder::new(field_sizes)\n            .unwrap()\n            .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_NAME_VARIANT)\n            .unwrap()\n            .add_field(BY_PACKAGE_NAME_NAME_INDEX, &name)\n            .unwrap()\n            .add_field(BY_PACKAGE_NAME_VERSION_INDEX, &version)\n            .unwrap();\n        let bytes = builder.binary_payload_bytes().unwrap();\n        let expected = TransactionInvocationTarget::ByPackageName {\n            name,\n            version,\n            protocol_version_major: None,\n        };\n        let expected_bytes = expected.to_bytes().unwrap();\n        assert_eq!(bytes, expected_bytes); //We want the \"legacy\" binary representation and current representation without\n                                           // protocol_version_major equal\n\n        let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap();\n        assert_eq!(expected, got);\n        assert!(remainder.is_empty());\n    }\n\n    #[test]\n    fn by_package_hash_variant_should_deserialize_bytes_that_have_both_version_and_key() {\n        let target = TransactionInvocationTarget::ByPackageHash {\n            addr: [1; 32],\n            version: Some(11),\n            protocol_version_major: Some(2),\n        };\n        let bytes = target.to_bytes().unwrap();\n        let (number_of_fields, _) = u32::from_bytes(&bytes).unwrap();\n        assert_eq!(number_of_fields, 4); //We want the enum tag, addr, version (even if it's None) and protocol_version_major to\n                                         // have been serialized\n        let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap();\n        assert_eq!(target, got);\n        assert!(remainder.is_empty());\n    }\n\n    #[test]\n    fn by_package_name_variant_should_deserialize_bytes_that_have_both_version_and_key() {\n        let target = TransactionInvocationTarget::ByPackageName {\n            name: \"xyz\".to_string(),\n            version: Some(11),\n            protocol_version_major: Some(3),\n        };\n        let bytes = target.to_bytes().unwrap();\n        let (number_of_fields, _) = u32::from_bytes(&bytes).unwrap();\n        assert_eq!(number_of_fields, 4); //We want the enum tag, addr, version (even if it's None) and protocol_version_major to\n                                         // have been serialized\n        let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap();\n        assert_eq!(target, got);\n        assert!(remainder.is_empty());\n    }\n\n    proptest! {\n        #[test]\n        fn generative_bytesrepr_roundtrip(val in transaction_invocation_target_arb()) {\n            bytesrepr::test_serialization_roundtrip(&val);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_scheduling.rs",
    "content": "use super::serialization::CalltableSerializationEnvelope;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    transaction::serialization::CalltableSerializationEnvelopeBuilder,\n};\nuse alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\n/// The scheduling mode of a [`crate::Transaction`].\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(\n    any(feature = \"std\", test),\n    derive(Serialize, Deserialize),\n    serde(deny_unknown_fields)\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Scheduling mode of a Transaction.\")\n)]\npub enum TransactionScheduling {\n    /// No special scheduling applied.\n    Standard,\n}\n\nimpl TransactionScheduling {\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            TransactionScheduling::Standard => {\n                vec![crate::bytesrepr::U8_SERIALIZED_LENGTH]\n            }\n        }\n    }\n\n    /// Returns a random `TransactionScheduling`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..1) {\n            0 => TransactionScheduling::Standard,\n            _ => unreachable!(),\n        }\n    }\n}\n\nconst TAG_FIELD_INDEX: u16 = 0;\n\nconst STANDARD_VARIANT: u8 = 0;\n\nimpl ToBytes for TransactionScheduling {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            TransactionScheduling::Standard => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &STANDARD_VARIANT)?\n                    .binary_payload_bytes()\n            }\n        }\n    }\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for TransactionScheduling {\n    fn from_bytes(bytes: &[u8]) -> Result<(TransactionScheduling, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(2, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(0)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            STANDARD_VARIANT => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionScheduling::Standard)\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl Display for TransactionScheduling {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionScheduling::Standard => write!(formatter, \"schedule(standard)\"),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{bytesrepr, gens::transaction_scheduling_arb};\n    use proptest::prelude::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        for _ in 0..10 {\n            bytesrepr::test_serialization_roundtrip(&TransactionScheduling::random(rng));\n        }\n    }\n\n    proptest! {\n        #[test]\n        fn generative_bytesrepr_roundtrip(val in transaction_scheduling_arb()) {\n            bytesrepr::test_serialization_roundtrip(&val);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_target.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Debug, Display, Formatter};\n\nuse super::{serialization::CalltableSerializationEnvelope, TransactionInvocationTarget};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{\n        Bytes,\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    transaction::serialization::CalltableSerializationEnvelopeBuilder,\n    ContractRuntimeTag, HashAddr,\n};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{Rng, RngCore};\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nconst VM_CASPER_V1_TAG: u8 = 0;\nconst VM_CASPER_V2_TAG: u8 = 1;\nconst TRANSFERRED_VALUE_INDEX: u16 = 1;\nconst SEED_VALUE_INDEX: u16 = 2;\n\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Session params of a TransactionTarget.\")\n)]\n#[serde(deny_unknown_fields)]\npub enum TransactionRuntimeParams {\n    VmCasperV1,\n    VmCasperV2 {\n        /// The amount of motes to transfer before code is executed.\n        ///\n        /// This is for protection against phishing attack where a malicious session code drains\n        /// the balance of the caller account. The amount stated here is the maximum amount\n        /// that can be transferred from the caller account to the session account.\n        transferred_value: u64,\n        /// The seed for the session code that is used for an installer.\n        seed: Option<[u8; 32]>,\n    },\n}\n\nimpl TransactionRuntimeParams {\n    /// Returns the contract runtime tag.\n    pub fn contract_runtime_tag(&self) -> ContractRuntimeTag {\n        match self {\n            TransactionRuntimeParams::VmCasperV1 => ContractRuntimeTag::VmCasperV1,\n            TransactionRuntimeParams::VmCasperV2 { .. } => ContractRuntimeTag::VmCasperV2,\n        }\n    }\n\n    pub fn seed(&self) -> Option<[u8; 32]> {\n        match self {\n            TransactionRuntimeParams::VmCasperV1 => None,\n            TransactionRuntimeParams::VmCasperV2 { seed, .. } => *seed,\n        }\n    }\n\n    pub fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            TransactionRuntimeParams::VmCasperV1 => vec![crate::bytesrepr::U8_SERIALIZED_LENGTH],\n            TransactionRuntimeParams::VmCasperV2 {\n                transferred_value,\n                seed,\n            } => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    transferred_value.serialized_length(),\n                    seed.serialized_length(),\n                ]\n            }\n        }\n    }\n}\n\nimpl ToBytes for TransactionRuntimeParams {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            TransactionRuntimeParams::VmCasperV1 => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &VM_CASPER_V1_TAG)?\n                    .binary_payload_bytes()\n            }\n            TransactionRuntimeParams::VmCasperV2 {\n                transferred_value,\n                seed,\n            } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                .add_field(TAG_FIELD_INDEX, &VM_CASPER_V2_TAG)?\n                .add_field(TRANSFERRED_VALUE_INDEX, transferred_value)?\n                .add_field(SEED_VALUE_INDEX, seed)?\n                .binary_payload_bytes(),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            TransactionRuntimeParams::VmCasperV1 => {\n                CalltableSerializationEnvelope::estimate_size(vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                ])\n            }\n            TransactionRuntimeParams::VmCasperV2 {\n                transferred_value,\n                seed,\n            } => CalltableSerializationEnvelope::estimate_size(vec![\n                crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                transferred_value.serialized_length(),\n                seed.serialized_length(),\n            ]),\n        }\n    }\n}\n\nimpl FromBytes for TransactionRuntimeParams {\n    fn from_bytes(bytes: &[u8]) -> Result<(TransactionRuntimeParams, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(3, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(TAG_FIELD_INDEX)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            VM_CASPER_V1_TAG => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionRuntimeParams::VmCasperV1)\n            }\n            VM_CASPER_V2_TAG => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(TRANSFERRED_VALUE_INDEX)?;\n                let (transferred_value, window) = window.deserialize_and_maybe_next::<u64>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(SEED_VALUE_INDEX)?;\n                let (seed, window) = window.deserialize_and_maybe_next::<Option<[u8; 32]>>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionRuntimeParams::VmCasperV2 {\n                    transferred_value,\n                    seed,\n                })\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl Display for TransactionRuntimeParams {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionRuntimeParams::VmCasperV1 => write!(formatter, \"vm-casper-v1\"),\n            TransactionRuntimeParams::VmCasperV2 {\n                transferred_value,\n                seed,\n            } => write!(\n                formatter,\n                \"vm-casper-v2 {{ transferred_value: {}, seed: {:?} }}\",\n                transferred_value, seed\n            ),\n        }\n    }\n}\n\n/// The execution target of a [`crate::Transaction`].\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Execution target of a Transaction.\")\n)]\n#[serde(deny_unknown_fields)]\npub enum TransactionTarget {\n    /// The execution target is a native operation (e.g. a transfer).\n    Native,\n    /// The execution target is a stored entity or package.\n    Stored {\n        /// The identifier of the stored execution target.\n        id: TransactionInvocationTarget,\n        /// The execution runtime to use.\n        runtime: TransactionRuntimeParams,\n    },\n    /// The execution target is the included module bytes, i.e. compiled Wasm.\n    Session {\n        /// Flag determining if the Wasm is an install/upgrade.\n        is_install_upgrade: bool,\n        /// The compiled Wasm.\n        module_bytes: Bytes,\n        /// The execution runtime to use.\n        runtime: TransactionRuntimeParams,\n    },\n}\n\nimpl TransactionTarget {\n    /// Returns a new `TransactionTarget::Native`.\n    pub fn new_native() -> Self {\n        TransactionTarget::Native\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        match self {\n            TransactionTarget::Native => {\n                vec![crate::bytesrepr::U8_SERIALIZED_LENGTH]\n            }\n            TransactionTarget::Stored { id, runtime } => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    id.serialized_length(),\n                    runtime.serialized_length(),\n                ]\n            }\n            TransactionTarget::Session {\n                is_install_upgrade,\n                module_bytes,\n                runtime,\n            } => {\n                vec![\n                    crate::bytesrepr::U8_SERIALIZED_LENGTH,\n                    is_install_upgrade.serialized_length(),\n                    runtime.serialized_length(),\n                    module_bytes.serialized_length(),\n                ]\n            }\n        }\n    }\n\n    /// Returns a `hash_addr` for a targeted contract, if known.\n    pub fn contract_hash_addr(&self) -> Option<HashAddr> {\n        if let Some(invocation_target) = self.invocation_target() {\n            invocation_target.contract_by_hash()\n        } else {\n            None\n        }\n    }\n\n    /// Returns the invocation target, if any.\n    pub fn invocation_target(&self) -> Option<TransactionInvocationTarget> {\n        match self {\n            TransactionTarget::Native | TransactionTarget::Session { .. } => None,\n            TransactionTarget::Stored { id, .. } => Some(id.clone()),\n        }\n    }\n\n    /// Returns a random `TransactionTarget`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..3) {\n            0 => TransactionTarget::Native,\n            1 => TransactionTarget::Stored {\n                id: TransactionInvocationTarget::random(rng),\n                runtime: TransactionRuntimeParams::VmCasperV1,\n            },\n            2 => {\n                let mut buffer = vec![0u8; rng.gen_range(0..100)];\n                rng.fill_bytes(buffer.as_mut());\n                let is_install_upgrade = rng.gen();\n                TransactionTarget::Session {\n                    is_install_upgrade,\n                    module_bytes: Bytes::from(buffer),\n                    runtime: TransactionRuntimeParams::VmCasperV1,\n                }\n            }\n            _ => unreachable!(),\n        }\n    }\n\n    /// Returns `true` if the transaction target is [`Session`].\n    ///\n    /// [`Session`]: TransactionTarget::Session\n    #[must_use]\n    pub fn is_session(&self) -> bool {\n        matches!(self, Self::Session { .. })\n    }\n}\n\nconst TAG_FIELD_INDEX: u16 = 0;\n\nconst NATIVE_VARIANT: u8 = 0;\n\nconst STORED_VARIANT: u8 = 1;\nconst STORED_ID_INDEX: u16 = 1;\nconst STORED_RUNTIME_INDEX: u16 = 2;\n\nconst SESSION_VARIANT: u8 = 2;\nconst SESSION_IS_INSTALL_INDEX: u16 = 1;\nconst SESSION_RUNTIME_INDEX: u16 = 2;\nconst SESSION_MODULE_BYTES_INDEX: u16 = 3;\n\nimpl ToBytes for TransactionTarget {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        match self {\n            TransactionTarget::Native => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &NATIVE_VARIANT)?\n                    .binary_payload_bytes()\n            }\n            TransactionTarget::Stored { id, runtime } => {\n                CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                    .add_field(TAG_FIELD_INDEX, &STORED_VARIANT)?\n                    .add_field(STORED_ID_INDEX, &id)?\n                    .add_field(STORED_RUNTIME_INDEX, &runtime)?\n                    .binary_payload_bytes()\n            }\n            TransactionTarget::Session {\n                is_install_upgrade,\n                module_bytes,\n                runtime,\n            } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())?\n                .add_field(TAG_FIELD_INDEX, &SESSION_VARIANT)?\n                .add_field(SESSION_IS_INSTALL_INDEX, &is_install_upgrade)?\n                .add_field(SESSION_RUNTIME_INDEX, &runtime)?\n                .add_field(SESSION_MODULE_BYTES_INDEX, &module_bytes)?\n                .binary_payload_bytes(),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for TransactionTarget {\n    fn from_bytes(bytes: &[u8]) -> Result<(TransactionTarget, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(6, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n        window.verify_index(TAG_FIELD_INDEX)?;\n        let (tag, window) = window.deserialize_and_maybe_next::<u8>()?;\n        let to_ret = match tag {\n            NATIVE_VARIANT => {\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionTarget::Native)\n            }\n            STORED_VARIANT => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(STORED_ID_INDEX)?;\n                let (id, window) =\n                    window.deserialize_and_maybe_next::<TransactionInvocationTarget>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(STORED_RUNTIME_INDEX)?;\n                let (runtime, window) =\n                    window.deserialize_and_maybe_next::<TransactionRuntimeParams>()?;\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionTarget::Stored { id, runtime })\n            }\n            SESSION_VARIANT => {\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(SESSION_IS_INSTALL_INDEX)?;\n                let (is_install_upgrade, window) = window.deserialize_and_maybe_next::<bool>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(SESSION_RUNTIME_INDEX)?;\n                let (runtime, window) =\n                    window.deserialize_and_maybe_next::<TransactionRuntimeParams>()?;\n                let window = window.ok_or(Formatting)?;\n                window.verify_index(SESSION_MODULE_BYTES_INDEX)?;\n                let (module_bytes, window) = window.deserialize_and_maybe_next::<Bytes>()?;\n\n                if window.is_some() {\n                    return Err(Formatting);\n                }\n                Ok(TransactionTarget::Session {\n                    is_install_upgrade,\n                    module_bytes,\n                    runtime,\n                })\n            }\n            _ => Err(Formatting),\n        };\n        to_ret.map(|endpoint| (endpoint, remainder))\n    }\n}\n\nimpl Display for TransactionTarget {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransactionTarget::Native => write!(formatter, \"native\"),\n            TransactionTarget::Stored { id, runtime } => {\n                write!(formatter, \"stored({}, {})\", id, runtime,)\n            }\n            TransactionTarget::Session {\n                is_install_upgrade,\n                module_bytes,\n                runtime,\n            } => write!(\n                formatter,\n                \"session({} module bytes, runtime: {}, is_install_upgrade: {})\",\n                module_bytes.len(),\n                runtime,\n                is_install_upgrade,\n            ),\n        }\n    }\n}\n\nimpl Debug for TransactionTarget {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            TransactionTarget::Native => formatter.debug_struct(\"Native\").finish(),\n            TransactionTarget::Stored { id, runtime } => formatter\n                .debug_struct(\"Stored\")\n                .field(\"id\", id)\n                .field(\"runtime\", runtime)\n                .finish(),\n            TransactionTarget::Session {\n                is_install_upgrade,\n                module_bytes,\n                runtime,\n            } => {\n                struct BytesLen(usize);\n                impl Debug for BytesLen {\n                    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n                        write!(formatter, \"{} bytes\", self.0)\n                    }\n                }\n\n                formatter\n                    .debug_struct(\"Session\")\n                    .field(\"module_bytes\", &BytesLen(module_bytes.len()))\n                    .field(\"is_install_upgrade\", is_install_upgrade)\n                    .field(\"runtime\", runtime)\n                    .finish()\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, gens::transaction_target_arb};\n    use proptest::prelude::*;\n\n    proptest! {\n        #[test]\n        fn generative_bytesrepr_roundtrip(val in transaction_target_arb()) {\n            bytesrepr::test_serialization_roundtrip(&val);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1/arg_handling.rs",
    "content": "use core::marker::PhantomData;\n\nuse crate::TransferTarget;\n\nuse crate::{bytesrepr::ToBytes, CLTyped, CLValueError, PublicKey, RuntimeArgs, URef, U512};\n\nconst TRANSFER_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst TRANSFER_ARG_SOURCE: OptionalArg<URef> = OptionalArg::new(\"source\");\nconst TRANSFER_ARG_TARGET: &str = \"target\";\n// \"id\" for legacy reasons, if the argument is passed it is [Option]\nconst TRANSFER_ARG_ID: OptionalArg<Option<u64>> = OptionalArg::new(\"id\");\n\nconst ADD_BID_ARG_PUBLIC_KEY: RequiredArg<PublicKey> = RequiredArg::new(\"public_key\");\nconst ADD_BID_ARG_DELEGATION_RATE: RequiredArg<u8> = RequiredArg::new(\"delegation_rate\");\nconst ADD_BID_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT: OptionalArg<u64> =\n    OptionalArg::new(\"minimum_delegation_amount\");\n\nconst ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT: OptionalArg<u64> =\n    OptionalArg::new(\"maximum_delegation_amount\");\n\nconst ADD_BID_ARG_RESERVED_SLOTS: OptionalArg<u32> = OptionalArg::new(\"reserved_slots\");\n\nconst WITHDRAW_BID_ARG_PUBLIC_KEY: RequiredArg<PublicKey> = RequiredArg::new(\"public_key\");\nconst WITHDRAW_BID_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst DELEGATE_ARG_DELEGATOR: RequiredArg<PublicKey> = RequiredArg::new(\"delegator\");\nconst DELEGATE_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst DELEGATE_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst UNDELEGATE_ARG_DELEGATOR: RequiredArg<PublicKey> = RequiredArg::new(\"delegator\");\nconst UNDELEGATE_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst UNDELEGATE_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\n\nconst REDELEGATE_ARG_DELEGATOR: RequiredArg<PublicKey> = RequiredArg::new(\"delegator\");\nconst REDELEGATE_ARG_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"validator\");\nconst REDELEGATE_ARG_AMOUNT: RequiredArg<U512> = RequiredArg::new(\"amount\");\nconst REDELEGATE_ARG_NEW_VALIDATOR: RequiredArg<PublicKey> = RequiredArg::new(\"new_validator\");\n\nstruct RequiredArg<T> {\n    name: &'static str,\n    _phantom: PhantomData<T>,\n}\n\nimpl<T> RequiredArg<T> {\n    const fn new(name: &'static str) -> Self {\n        Self {\n            name,\n            _phantom: PhantomData,\n        }\n    }\n\n    fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError>\n    where\n        T: CLTyped + ToBytes,\n    {\n        args.insert(self.name, value)\n    }\n}\n\nstruct OptionalArg<T> {\n    name: &'static str,\n    _phantom: PhantomData<T>,\n}\n\nimpl<T> OptionalArg<T> {\n    const fn new(name: &'static str) -> Self {\n        Self {\n            name,\n            _phantom: PhantomData,\n        }\n    }\n\n    fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError>\n    where\n        T: CLTyped + ToBytes,\n    {\n        args.insert(self.name, value)\n    }\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a transfer transaction.\npub(crate) fn new_transfer_args<A: Into<U512>, T: Into<TransferTarget>>(\n    amount: A,\n    maybe_source: Option<URef>,\n    target: T,\n    maybe_id: Option<u64>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    if let Some(source) = maybe_source {\n        TRANSFER_ARG_SOURCE.insert(&mut args, source)?;\n    }\n    match target.into() {\n        TransferTarget::PublicKey(public_key) => args.insert(TRANSFER_ARG_TARGET, public_key)?,\n        TransferTarget::AccountHash(account_hash) => {\n            args.insert(TRANSFER_ARG_TARGET, account_hash)?\n        }\n        TransferTarget::URef(uref) => args.insert(TRANSFER_ARG_TARGET, uref)?,\n    }\n    TRANSFER_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    if maybe_id.is_some() {\n        TRANSFER_ARG_ID.insert(&mut args, maybe_id)?;\n    }\n    Ok(args)\n}\n\n/// Creates a `RuntimeArgs` suitable for use in an add_bid transaction.\npub(crate) fn new_add_bid_args<A: Into<U512>>(\n    public_key: PublicKey,\n    delegation_rate: u8,\n    amount: A,\n    maybe_minimum_delegation_amount: Option<u64>,\n    maybe_maximum_delegation_amount: Option<u64>,\n    maybe_reserved_slots: Option<u32>,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    ADD_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?;\n    ADD_BID_ARG_DELEGATION_RATE.insert(&mut args, delegation_rate)?;\n    ADD_BID_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    if let Some(minimum_delegation_amount) = maybe_minimum_delegation_amount {\n        ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT.insert(&mut args, minimum_delegation_amount)?;\n    };\n    if let Some(maximum_delegation_amount) = maybe_maximum_delegation_amount {\n        ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT.insert(&mut args, maximum_delegation_amount)?;\n    };\n    if let Some(reserved_slots) = maybe_reserved_slots {\n        ADD_BID_ARG_RESERVED_SLOTS.insert(&mut args, reserved_slots)?;\n    };\n    Ok(args)\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a withdraw_bid transaction.\npub fn new_withdraw_bid_args<A: Into<U512>>(\n    public_key: PublicKey,\n    amount: A,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    WITHDRAW_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?;\n    WITHDRAW_BID_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a delegate transaction.\npub(crate) fn new_delegate_args<A: Into<U512>>(\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: A,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    DELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?;\n    DELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?;\n    DELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Creates a `RuntimeArgs` suitable for use in an undelegate transaction.\npub(crate) fn new_undelegate_args<A: Into<U512>>(\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: A,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    UNDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?;\n    UNDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?;\n    UNDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    Ok(args)\n}\n\n/// Creates a `RuntimeArgs` suitable for use in a redelegate transaction.\npub(crate) fn new_redelegate_args<A: Into<U512>>(\n    delegator: PublicKey,\n    validator: PublicKey,\n    amount: A,\n    new_validator: PublicKey,\n) -> Result<RuntimeArgs, CLValueError> {\n    let mut args = RuntimeArgs::new();\n    REDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?;\n    REDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?;\n    REDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?;\n    REDELEGATE_ARG_NEW_VALIDATOR.insert(&mut args, new_validator)?;\n    Ok(args)\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1/errors_v1.rs",
    "content": "use alloc::{boxed::Box, string::String, vec::Vec};\nuse core::{\n    array::TryFromSliceError,\n    fmt::{self, Display, Formatter},\n};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n#[cfg(any(feature = \"testing\", test))]\nuse strum::EnumIter;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse serde::Serialize;\n\n#[cfg(doc)]\nuse super::TransactionV1;\nuse crate::{\n    addressable_entity::ContractRuntimeTag, bytesrepr, crypto, CLType, DisplayIter, PricingMode,\n    TimeDiff, Timestamp, TransactionEntryPoint, TransactionInvocationTarget, U512,\n};\n\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(feature = \"std\", derive(Serialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum FieldDeserializationError {\n    IndexNotExists { index: u16 },\n    FromBytesError { index: u16, error: bytesrepr::Error },\n    LingeringBytesInField { index: u16 },\n}\n\n// This impl is provided due to a completeness test that we\n// have in binary-port. It checks if all variants of this\n// error have corresponding binary port error codes\n#[cfg(any(feature = \"testing\", test))]\nimpl Default for FieldDeserializationError {\n    fn default() -> Self {\n        Self::IndexNotExists { index: 0 }\n    }\n}\n\n/// Returned when a [`TransactionV1`] fails validation.\n#[derive(Clone, Eq, PartialEq, Debug)]\n#[cfg_attr(feature = \"std\", derive(Serialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[non_exhaustive]\n// This derive should not be removed due to a completeness\n// test that we have in binary-port. It checks if all variants\n// of this error have corresponding binary port error codes\n#[cfg_attr(any(feature = \"testing\", test), derive(EnumIter))]\npub enum InvalidTransaction {\n    /// Invalid chain name.\n    InvalidChainName {\n        /// The expected chain name.\n        expected: String,\n        /// The transaction's chain name.\n        got: String,\n    },\n\n    /// Transaction is too large.\n    ExcessiveSize(ExcessiveSizeErrorV1),\n\n    /// Excessive time-to-live.\n    ExcessiveTimeToLive {\n        /// The time-to-live limit.\n        max_ttl: TimeDiff,\n        /// The transaction's time-to-live.\n        got: TimeDiff,\n    },\n\n    /// Transaction's timestamp is in the future.\n    TimestampInFuture {\n        /// The node's timestamp when validating the transaction.\n        validation_timestamp: Timestamp,\n        /// Any configured leeway added to `validation_timestamp`.\n        timestamp_leeway: TimeDiff,\n        /// The transaction's timestamp.\n        got: Timestamp,\n    },\n\n    /// The provided body hash does not match the actual hash of the body.\n    InvalidBodyHash,\n\n    /// The provided transaction hash does not match the actual hash of the transaction.\n    InvalidTransactionHash,\n\n    /// The transaction has no approvals.\n    EmptyApprovals,\n\n    /// Invalid approval.\n    InvalidApproval {\n        /// The index of the approval at fault.\n        index: usize,\n        /// The approval verification error.\n        error: crypto::Error,\n    },\n\n    /// Excessive length of transaction's runtime args.\n    ExcessiveArgsLength {\n        /// The byte size limit of runtime arguments.\n        max_length: usize,\n        /// The length of the transaction's runtime arguments.\n        got: usize,\n    },\n\n    /// The amount of approvals on the transaction exceeds the configured limit.\n    ExcessiveApprovals {\n        /// The chainspec limit for max_associated_keys.\n        max_associated_keys: u32,\n        /// Number of approvals on the transaction.\n        got: u32,\n    },\n\n    /// The payment amount associated with the transaction exceeds the block gas limit.\n    ExceedsBlockGasLimit {\n        /// Configured block gas limit.\n        block_gas_limit: u64,\n        /// The transaction's calculated gas limit.\n        got: Box<U512>,\n    },\n\n    /// Missing a required runtime arg.\n    MissingArg {\n        /// The name of the missing arg.\n        arg_name: String,\n    },\n\n    /// Given runtime arg is not one of the expected types.\n    UnexpectedArgType {\n        /// The name of the invalid arg.\n        arg_name: String,\n        /// The choice of valid types for the given runtime arg.\n        expected: Vec<String>,\n        /// The provided type of the given runtime arg.\n        got: String,\n    },\n\n    /// Failed to deserialize the given runtime arg.\n    InvalidArg {\n        /// The name of the invalid arg.\n        arg_name: String,\n        /// The deserialization error.\n        error: bytesrepr::Error,\n    },\n\n    /// Insufficient transfer amount.\n    InsufficientTransferAmount {\n        /// The minimum transfer amount.\n        minimum: u64,\n        /// The attempted transfer amount.\n        attempted: U512,\n    },\n\n    /// Insufficient burn amount.\n    InsufficientBurnAmount {\n        /// The minimum burn amount.\n        minimum: u64,\n        /// The attempted burn amount.\n        attempted: U512,\n    },\n\n    /// The entry point for this transaction target cannot be `call`.\n    EntryPointCannotBeCall,\n    /// The entry point for this transaction target cannot be `TransactionEntryPoint::Custom`.\n    EntryPointCannotBeCustom {\n        /// The invalid entry point.\n        entry_point: TransactionEntryPoint,\n    },\n    /// The entry point for this transaction target must be `TransactionEntryPoint::Custom`.\n    EntryPointMustBeCustom {\n        /// The invalid entry point.\n        entry_point: TransactionEntryPoint,\n    },\n    /// The entry point for this transaction target must be `TransactionEntryPoint::Call`.\n    EntryPointMustBeCall {\n        /// The invalid entry point.\n        entry_point: TransactionEntryPoint,\n    },\n    /// The transaction has empty module bytes.\n    EmptyModuleBytes,\n    /// Attempt to factor the amount over the gas_price failed.\n    GasPriceConversion {\n        /// The base amount.\n        amount: u64,\n        /// The attempted gas price.\n        gas_price: u8,\n    },\n    /// Unable to calculate gas limit.\n    UnableToCalculateGasLimit,\n    /// Unable to calculate gas cost.\n    UnableToCalculateGasCost,\n    /// Invalid combination of pricing handling and pricing mode.\n    InvalidPricingMode {\n        /// The pricing mode as specified by the transaction.\n        price_mode: PricingMode,\n    },\n    /// The transaction provided is not supported.\n    InvalidTransactionLane(u8),\n    /// Could not match v1 with transaction lane\n    NoLaneMatch,\n    /// Gas price tolerance too low.\n    GasPriceToleranceTooLow {\n        /// The minimum gas price tolerance.\n        min_gas_price_tolerance: u8,\n        /// The provided gas price tolerance.\n        provided_gas_price_tolerance: u8,\n    },\n    /// Error when trying to deserialize one of the transactionV1 payload fields.\n    CouldNotDeserializeField {\n        /// Underlying reason why the deserialization failed\n        error: FieldDeserializationError,\n    },\n\n    /// Unable to calculate hash for payloads transaction.\n    CannotCalculateFieldsHash,\n\n    /// The transactions field map had entries that were unexpected\n    UnexpectedTransactionFieldEntries,\n    /// The transaction requires named arguments.\n    ExpectedNamedArguments,\n    /// The transaction required bytes arguments.\n    ExpectedBytesArguments,\n    /// The transaction runtime is invalid.\n    InvalidTransactionRuntime {\n        /// The expected runtime as specified by the chainspec.\n        expected: ContractRuntimeTag,\n    },\n    /// The transaction is missing a seed field.\n    MissingSeed,\n    // Pricing mode not implemented yet\n    PricingModeNotSupported,\n    // Invalid payment amount.\n    InvalidPaymentAmount,\n    /// Unexpected entry point detected.\n    UnexpectedEntryPoint {\n        entry_point: TransactionEntryPoint,\n        lane_id: u8,\n    },\n    /// Could not serialize transaction\n    CouldNotSerializeTransaction,\n\n    /// Insufficient value for amount argument.\n    InsufficientAmount {\n        /// The attempted amount.\n        attempted: U512,\n    },\n\n    /// Invalid minimum delegation amount.\n    InvalidMinimumDelegationAmount {\n        /// The lowest allowed amount.\n        floor: u64,\n        /// The attempted amount.\n        attempted: u64,\n    },\n\n    /// Invalid maximum delegation amount.\n    InvalidMaximumDelegationAmount {\n        /// The highest allowed amount.\n        ceiling: u64,\n        /// The attempted amount.\n        attempted: u64,\n    },\n\n    /// Invalid reserved slots.\n    InvalidReservedSlots {\n        /// The highest allowed amount.\n        ceiling: u32,\n        /// The attempted amount.\n        attempted: u64,\n    },\n\n    /// Invalid delegation amount.\n    InvalidDelegationAmount {\n        /// The highest allowed amount.\n        ceiling: u64,\n        /// The attempted amount.\n        attempted: U512,\n    },\n    /// The transaction invocation target is unsupported under V2 runtime.\n    ///\n    /// This error is returned when the transaction invocation target is not supported by the\n    /// current runtime version.\n    UnsupportedInvocationTarget {\n        id: Option<TransactionInvocationTarget>,\n    },\n}\n\nimpl Display for InvalidTransaction {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            InvalidTransaction::InvalidChainName { expected, got } => {\n                                                write!(\n                                                    formatter,\n                                                    \"invalid chain name: expected {expected}, got {got}\"\n                                                )\n                                            }\n            InvalidTransaction::ExcessiveSize(error) => {\n                                                write!(formatter, \"transaction size too large: {error}\")\n                                            }\n            InvalidTransaction::ExcessiveTimeToLive { max_ttl, got } => {\n                                                write!(\n                                                    formatter,\n                                                    \"time-to-live of {got} exceeds limit of {max_ttl}\"\n                                                )\n                                            }\n            InvalidTransaction::TimestampInFuture {\n                                                validation_timestamp,\n                                                timestamp_leeway,\n                                                got,\n                                            } => {\n                                                write!(\n                                                    formatter,\n                                                    \"timestamp of {got} is later than node's validation timestamp of \\\n                    {validation_timestamp} plus leeway of {timestamp_leeway}\"\n                                                )\n                                            }\n            InvalidTransaction::InvalidBodyHash => {\n                                                write!(\n                                                    formatter,\n                                                    \"the provided hash does not match the actual hash of the transaction body\"\n                                                )\n                                            }\n            InvalidTransaction::InvalidTransactionHash => {\n                                                write!(\n                                                    formatter,\n                                                    \"the provided hash does not match the actual hash of the transaction\"\n                                                )\n                                            }\n            InvalidTransaction::EmptyApprovals => {\n                                                write!(formatter, \"the transaction has no approvals\")\n                                            }\n            InvalidTransaction::InvalidApproval { index, error } => {\n                                                write!(\n                                                    formatter,\n                                                    \"the transaction approval at index {index} is invalid: {error}\"\n                                                )\n                                            }\n            InvalidTransaction::ExcessiveArgsLength { max_length, got } => {\n                                                write!(\n                                                    formatter,\n                                                    \"serialized transaction runtime args of {got} bytes exceeds limit of \\\n                    {max_length} bytes\"\n                                                )\n                                            }\n            InvalidTransaction::ExcessiveApprovals {\n                                                max_associated_keys,\n                                                got,\n                                            } => {\n                                                write!(\n                                                    formatter,\n                                                    \"number of transaction approvals {got} exceeds the maximum number of \\\n                    associated keys {max_associated_keys}\",\n                                                )\n                                            }\n            InvalidTransaction::ExceedsBlockGasLimit {\n                                                block_gas_limit,\n                                                got,\n                                            } => {\n                                                write!(\n                                                    formatter,\n                                                    \"payment amount of {got} exceeds the block gas limit of {block_gas_limit}\"\n                                                )\n                                            }\n            InvalidTransaction::MissingArg { arg_name } => {\n                                                write!(formatter, \"missing required runtime argument '{arg_name}'\")\n                                            }\n            InvalidTransaction::UnexpectedArgType {\n                                                arg_name,\n                                                expected,\n                                                got,\n                                            } => {\n                                                write!(\n                                                    formatter,\n                                                    \"expected type of '{arg_name}' runtime argument to be one of {}, but got {got}\",\n                                                    DisplayIter::new(expected)\n                                                )\n                                            }\n            InvalidTransaction::InvalidArg { arg_name, error } => {\n                                                write!(formatter, \"invalid runtime argument '{arg_name}': {error}\")\n                                            }\n            InvalidTransaction::InsufficientTransferAmount { minimum, attempted } => {\n                                                write!(\n                                                    formatter,\n                                                    \"insufficient transfer amount; minimum: {minimum} attempted: {attempted}\"\n                                                )\n                                            }\n            InvalidTransaction::EntryPointCannotBeCall => {\n                                                write!(formatter, \"entry point cannot be call\")\n                                            }\n            InvalidTransaction::EntryPointCannotBeCustom { entry_point } => {\n                                                write!(formatter, \"entry point cannot be custom: {entry_point}\")\n                                            }\n            InvalidTransaction::EntryPointMustBeCustom { entry_point } => {\n                                                write!(formatter, \"entry point must be custom: {entry_point}\")\n                                            }\n            InvalidTransaction::EmptyModuleBytes => {\n                                                write!(formatter, \"the transaction has empty module bytes\")\n                                            }\n            InvalidTransaction::GasPriceConversion { amount, gas_price } => {\n                                                write!(\n                                                    formatter,\n                                                    \"failed to divide the amount {} by the gas price {}\",\n                                                    amount, gas_price\n                                                )\n                                            }\n            InvalidTransaction::UnableToCalculateGasLimit => {\n                                                write!(formatter, \"unable to calculate gas limit\", )\n                                            }\n            InvalidTransaction::UnableToCalculateGasCost => {\n                                                write!(formatter, \"unable to calculate gas cost\", )\n                                            }\n            InvalidTransaction::InvalidPricingMode { price_mode } => {\n                                                write!(\n                                                    formatter,\n                                                    \"received a transaction with an invalid mode {price_mode}\"\n                                                )\n                                            }\n            InvalidTransaction::InvalidTransactionLane(kind) => {\n                                                write!(\n                                                    formatter,\n                                                    \"received a transaction with an invalid kind {kind}\"\n                                                )\n                                            }\n            InvalidTransaction::GasPriceToleranceTooLow {\n                                                min_gas_price_tolerance,\n                                                provided_gas_price_tolerance,\n                                            } => {\n                                                write!(\n                                                    formatter,\n                                                    \"received a transaction with gas price tolerance {} but this chain will only go as low as {}\",\n                                                    provided_gas_price_tolerance, min_gas_price_tolerance\n                                                )\n                                            }\n            InvalidTransaction::CouldNotDeserializeField { error } => {\n                                                match error {\n                                                    FieldDeserializationError::IndexNotExists { index } => write!(\n                                                        formatter,\n                                                        \"tried to deserialize a field under index {} but it is not present in the payload\",\n                                                        index\n                                                    ),\n                                                    FieldDeserializationError::FromBytesError { index, error } => write!(\n                                                        formatter,\n                                                        \"tried to deserialize a field under index {} but it failed with error: {}\",\n                                                        index,\n                                                        error\n                                                    ),\n                                                    FieldDeserializationError::LingeringBytesInField { index } => write!(\n                                                        formatter,\n                                                        \"tried to deserialize a field under index {} but after deserialization there were still bytes left\",\n                                                        index,\n                                                    ),\n                                                }\n                                            }\n            InvalidTransaction::CannotCalculateFieldsHash => write!(\n                                                formatter,\n                                                \"cannot calculate a hash digest for the transaction\"\n                                            ),\n            InvalidTransaction::EntryPointMustBeCall { entry_point } => {\n                                        write!(formatter, \"entry point must be call: {entry_point}\")\n                                    }\n            InvalidTransaction::NoLaneMatch => write!(formatter, \"Could not match any lane to the specified transaction\"),\n            InvalidTransaction::UnexpectedTransactionFieldEntries => write!(formatter, \"There were entries in the fields map of the payload that could not be matched\"),\n            InvalidTransaction::ExpectedNamedArguments => {\n                                                write!(formatter, \"transaction requires named arguments\")\n                                            }\n            InvalidTransaction::ExpectedBytesArguments => {\n                                                write!(formatter, \"transaction requires bytes arguments\")\n                                            }\n            InvalidTransaction::InvalidTransactionRuntime { expected } => {\n                                                write!(\n                                                    formatter,\n                                                    \"invalid transaction runtime: expected {expected}\"\n                                                )\n                                            }\n            InvalidTransaction::MissingSeed => {\n                                                write!(formatter, \"missing seed for install or upgrade\")\n                                            }\n            InvalidTransaction::PricingModeNotSupported => {\n                                                write!(formatter, \"Pricing mode not supported\")\n                                            }\n            InvalidTransaction::InvalidPaymentAmount => {\n                                                write!(formatter, \"invalid payment amount\")\n                                            }\n            InvalidTransaction::UnexpectedEntryPoint {\n                                                entry_point, lane_id\n                                            } => {\n                                                write!(formatter, \"unexpected entry_point {} lane_id {}\", entry_point, lane_id)\n                                            }\n            InvalidTransaction::InsufficientBurnAmount { minimum, attempted } => {\n                                                write!(formatter, \"insufficient burn amount: {minimum} {attempted}\")\n                                            }\n            InvalidTransaction::CouldNotSerializeTransaction => write!(formatter, \"Could not serialize transaction.\"),\n            InvalidTransaction::InsufficientAmount { attempted } => {\n                                        write!(\n                                            formatter,\n                                            \"the value provided for the argument ({attempted}) named amount is too low.\",\n                                        )\n                                    }\n            InvalidTransaction::InvalidMinimumDelegationAmount { floor, attempted } => {\n                                        write!(\n                                            formatter,\n                                            \"the value provided for the minimum delegation amount ({attempted}) cannot be lower than {floor}.\",\n                                        )}\n            InvalidTransaction::InvalidMaximumDelegationAmount { ceiling, attempted } => {\n                                        write!(\n                                            formatter,\n                                            \"the value provided for the maximum delegation amount ({ceiling}) cannot be higher than {attempted}.\",\n                                        )}\n            InvalidTransaction::InvalidReservedSlots { ceiling, attempted } => {\n                                        write!(\n                                            formatter,\n                                            \"the value provided for reserved slots ({ceiling}) cannot be higher than {attempted}.\",\n                                        )}\n            InvalidTransaction::InvalidDelegationAmount { ceiling, attempted } => {\n                                write!(\n                                formatter,\n                                \"the value provided for the delegation amount ({attempted}) cannot be higher than {ceiling}.\",\n                                )}\n            InvalidTransaction::UnsupportedInvocationTarget { id: Some(target) } => {\n                        write!(\n                            formatter,\n                            \"the transaction invocation target is unsupported under V2 runtime: {target:?}\",\n                        )\n                    }\n            InvalidTransaction::UnsupportedInvocationTarget { id :None} => {\n                        write!(\n                            formatter,\n                            \"the transaction invocation target is unsupported under V2 runtime\",\n                        )\n                    }\n        }\n    }\n}\n\nimpl From<ExcessiveSizeErrorV1> for InvalidTransaction {\n    fn from(error: ExcessiveSizeErrorV1) -> Self {\n        InvalidTransaction::ExcessiveSize(error)\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for InvalidTransaction {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            InvalidTransaction::InvalidApproval { error, .. } => Some(error),\n            InvalidTransaction::InvalidArg { error, .. } => Some(error),\n            InvalidTransaction::InvalidChainName { .. }\n            | InvalidTransaction::ExcessiveSize(_)\n            | InvalidTransaction::ExcessiveTimeToLive { .. }\n            | InvalidTransaction::TimestampInFuture { .. }\n            | InvalidTransaction::InvalidBodyHash\n            | InvalidTransaction::InvalidTransactionHash\n            | InvalidTransaction::EmptyApprovals\n            | InvalidTransaction::ExcessiveArgsLength { .. }\n            | InvalidTransaction::ExcessiveApprovals { .. }\n            | InvalidTransaction::ExceedsBlockGasLimit { .. }\n            | InvalidTransaction::MissingArg { .. }\n            | InvalidTransaction::UnexpectedArgType { .. }\n            | InvalidTransaction::InsufficientTransferAmount { .. }\n            | InvalidTransaction::EntryPointCannotBeCall\n            | InvalidTransaction::EntryPointCannotBeCustom { .. }\n            | InvalidTransaction::EntryPointMustBeCustom { .. }\n            | InvalidTransaction::EntryPointMustBeCall { .. }\n            | InvalidTransaction::EmptyModuleBytes\n            | InvalidTransaction::GasPriceConversion { .. }\n            | InvalidTransaction::UnableToCalculateGasLimit\n            | InvalidTransaction::UnableToCalculateGasCost\n            | InvalidTransaction::InvalidPricingMode { .. }\n            | InvalidTransaction::GasPriceToleranceTooLow { .. }\n            | InvalidTransaction::InvalidTransactionLane(_)\n            | InvalidTransaction::CannotCalculateFieldsHash\n            | InvalidTransaction::NoLaneMatch\n            | InvalidTransaction::UnexpectedTransactionFieldEntries => None,\n            InvalidTransaction::CouldNotDeserializeField { error } => match error {\n                FieldDeserializationError::IndexNotExists { .. }\n                | FieldDeserializationError::LingeringBytesInField { .. } => None,\n                FieldDeserializationError::FromBytesError { error, .. } => Some(error),\n            },\n            InvalidTransaction::ExpectedNamedArguments\n            | InvalidTransaction::ExpectedBytesArguments\n            | InvalidTransaction::InvalidTransactionRuntime { .. }\n            | InvalidTransaction::MissingSeed\n            | InvalidTransaction::PricingModeNotSupported\n            | InvalidTransaction::InvalidPaymentAmount\n            | InvalidTransaction::InsufficientBurnAmount { .. }\n            | InvalidTransaction::UnexpectedEntryPoint { .. }\n            | InvalidTransaction::CouldNotSerializeTransaction\n            | InvalidTransaction::InsufficientAmount { .. }\n            | InvalidTransaction::InvalidMinimumDelegationAmount { .. }\n            | InvalidTransaction::InvalidMaximumDelegationAmount { .. }\n            | InvalidTransaction::InvalidReservedSlots { .. }\n            | InvalidTransaction::InvalidDelegationAmount { .. }\n            | InvalidTransaction::UnsupportedInvocationTarget { .. } => None,\n        }\n    }\n}\n\nimpl InvalidTransaction {\n    pub fn unexpected_arg_type(arg_name: String, expected: Vec<CLType>, got: CLType) -> Self {\n        let expected = expected.iter().map(|el| format!(\"{}\", el)).collect();\n        InvalidTransaction::UnexpectedArgType {\n            arg_name,\n            expected,\n            got: format!(\"{}\", got),\n        }\n    }\n}\n/// Error returned when a transaction is too large.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Serialize)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n//Default is needed only in testing to meet EnumIter needs\n#[cfg_attr(any(feature = \"testing\", test), derive(Default))]\npub struct ExcessiveSizeErrorV1 {\n    /// The maximum permitted serialized transaction size, in bytes.\n    pub max_transaction_size: u32,\n    /// The serialized size of the transaction provided, in bytes.\n    pub actual_transaction_size: usize,\n}\n\nimpl Display for ExcessiveSizeErrorV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"transaction size of {} bytes exceeds limit of {}\",\n            self.actual_transaction_size, self.max_transaction_size\n        )\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for ExcessiveSizeErrorV1 {}\n\n/// Errors other than validation failures relating to Transactions.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum ErrorV1 {\n    /// Error while encoding to JSON.\n    EncodeToJson(serde_json::Error),\n\n    /// Error while decoding from JSON.\n    DecodeFromJson(DecodeFromJsonErrorV1),\n\n    /// Unable to calculate payment.\n    InvalidPayment,\n}\n\nimpl From<serde_json::Error> for ErrorV1 {\n    fn from(error: serde_json::Error) -> Self {\n        ErrorV1::EncodeToJson(error)\n    }\n}\n\nimpl From<DecodeFromJsonErrorV1> for ErrorV1 {\n    fn from(error: DecodeFromJsonErrorV1) -> Self {\n        ErrorV1::DecodeFromJson(error)\n    }\n}\n\nimpl Display for ErrorV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            ErrorV1::EncodeToJson(error) => {\n                write!(formatter, \"encoding to json: {}\", error)\n            }\n            ErrorV1::DecodeFromJson(error) => {\n                write!(formatter, \"decoding from json: {}\", error)\n            }\n            ErrorV1::InvalidPayment => write!(formatter, \"invalid payment\"),\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for ErrorV1 {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            ErrorV1::EncodeToJson(error) => Some(error),\n            ErrorV1::DecodeFromJson(error) => Some(error),\n            ErrorV1::InvalidPayment => None,\n        }\n    }\n}\n\n/// Error while decoding a `TransactionV1` from JSON.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum DecodeFromJsonErrorV1 {\n    /// Failed to decode from base 16.\n    FromHex(base16::DecodeError),\n\n    /// Failed to convert slice to array.\n    TryFromSlice(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for DecodeFromJsonErrorV1 {\n    fn from(error: base16::DecodeError) -> Self {\n        DecodeFromJsonErrorV1::FromHex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for DecodeFromJsonErrorV1 {\n    fn from(error: TryFromSliceError) -> Self {\n        DecodeFromJsonErrorV1::TryFromSlice(error)\n    }\n}\n\nimpl Display for DecodeFromJsonErrorV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            DecodeFromJsonErrorV1::FromHex(error) => {\n                write!(formatter, \"{}\", error)\n            }\n            DecodeFromJsonErrorV1::TryFromSlice(error) => {\n                write!(formatter, \"{}\", error)\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for DecodeFromJsonErrorV1 {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            DecodeFromJsonErrorV1::FromHex(error) => Some(error),\n            DecodeFromJsonErrorV1::TryFromSlice(error) => Some(error),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1/fields_container.rs",
    "content": "#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse crate::{\n    bytesrepr::{Bytes, ToBytes},\n    transaction::transaction_v1::*,\n    TransactionEntryPoint, TransactionScheduling, TransactionTarget,\n};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::{\n    PublicKey, RuntimeArgs, TransactionInvocationTarget, TransferTarget, AUCTION_LANE_ID,\n    INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse alloc::collections::BTreeMap;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{Rng, RngCore};\n\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\npub(crate) const ARGS_MAP_KEY: u16 = 0;\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\npub(crate) const TARGET_MAP_KEY: u16 = 1;\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\npub(crate) const ENTRY_POINT_MAP_KEY: u16 = 2;\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\npub(crate) const SCHEDULING_MAP_KEY: u16 = 3;\n\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\n#[derive(Clone, Eq, PartialEq, Debug)]\npub(crate) enum FieldsContainerError {\n    CouldNotSerializeField { field_index: u16 },\n}\n\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\npub(crate) struct FieldsContainer {\n    pub(super) args: TransactionArgs,\n    pub(super) target: TransactionTarget,\n    pub(super) entry_point: TransactionEntryPoint,\n    pub(super) scheduling: TransactionScheduling,\n}\n\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\nimpl FieldsContainer {\n    pub(crate) fn new(\n        args: TransactionArgs,\n        target: TransactionTarget,\n        entry_point: TransactionEntryPoint,\n        scheduling: TransactionScheduling,\n    ) -> Self {\n        FieldsContainer {\n            args,\n            target,\n            entry_point,\n            scheduling,\n        }\n    }\n\n    pub(crate) fn to_map(&self) -> Result<BTreeMap<u16, Bytes>, FieldsContainerError> {\n        let mut map: BTreeMap<u16, Bytes> = BTreeMap::new();\n        map.insert(\n            ARGS_MAP_KEY,\n            self.args.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: ARGS_MAP_KEY,\n                }\n            })?,\n        );\n        map.insert(\n            TARGET_MAP_KEY,\n            self.target.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: TARGET_MAP_KEY,\n                }\n            })?,\n        );\n        map.insert(\n            ENTRY_POINT_MAP_KEY,\n            self.entry_point.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: ENTRY_POINT_MAP_KEY,\n                }\n            })?,\n        );\n        map.insert(\n            SCHEDULING_MAP_KEY,\n            self.scheduling.to_bytes().map(Into::into).map_err(|_| {\n                FieldsContainerError::CouldNotSerializeField {\n                    field_index: SCHEDULING_MAP_KEY,\n                }\n            })?,\n        );\n        Ok(map)\n    }\n\n    /// Returns a random `FieldsContainer`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub(crate) fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..8) {\n            0 => {\n                let amount = rng.gen_range(2_500_000_000..=u64::MAX);\n                let maybe_source = if rng.gen() { Some(rng.gen()) } else { None };\n                let target = TransferTarget::random(rng);\n                let maybe_id = rng.gen::<bool>().then(|| rng.gen());\n                let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id)\n                    .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Transfer,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            1 => {\n                let public_key = PublicKey::random(rng);\n                let delegation_rate = rng.gen();\n                let amount = rng.gen::<u64>();\n                let minimum_delegation_amount = rng.gen::<bool>().then(|| rng.gen());\n                let maximum_delegation_amount =\n                    minimum_delegation_amount.map(|minimum_delegation_amount| {\n                        minimum_delegation_amount + rng.gen::<u32>() as u64\n                    });\n                let reserved_slots = rng.gen::<bool>().then(|| rng.gen::<u32>());\n                let args = arg_handling::new_add_bid_args(\n                    public_key,\n                    delegation_rate,\n                    amount,\n                    minimum_delegation_amount,\n                    maximum_delegation_amount,\n                    reserved_slots,\n                )\n                .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::AddBid,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            2 => {\n                let public_key = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let args = arg_handling::new_withdraw_bid_args(public_key, amount).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::WithdrawBid,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            3 => {\n                let delegator = PublicKey::random(rng);\n                let validator = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let args = arg_handling::new_delegate_args(delegator, validator, amount).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Delegate,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            4 => {\n                let delegator = PublicKey::random(rng);\n                let validator = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let args = arg_handling::new_undelegate_args(delegator, validator, amount).unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Undelegate,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            5 => {\n                let delegator = PublicKey::random(rng);\n                let validator = PublicKey::random(rng);\n                let amount = rng.gen::<u64>();\n                let new_validator = PublicKey::random(rng);\n                let args =\n                    arg_handling::new_redelegate_args(delegator, validator, amount, new_validator)\n                        .unwrap();\n                FieldsContainer::new(\n                    TransactionArgs::Named(args),\n                    TransactionTarget::Native,\n                    TransactionEntryPoint::Redelegate,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            6 => Self::random_standard(rng),\n            7 => {\n                let mut buffer = vec![0u8; rng.gen_range(1..100)];\n                rng.fill_bytes(buffer.as_mut());\n                let is_install_upgrade = rng.gen();\n                let target = TransactionTarget::Session {\n                    is_install_upgrade,\n                    module_bytes: Bytes::from(buffer),\n                    runtime: crate::TransactionRuntimeParams::VmCasperV1,\n                };\n                FieldsContainer::new(\n                    TransactionArgs::Named(RuntimeArgs::random(rng)),\n                    target,\n                    TransactionEntryPoint::Call,\n                    TransactionScheduling::random(rng),\n                )\n            }\n            _ => unreachable!(),\n        }\n    }\n\n    /// Returns a random `FieldsContainer`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random_of_lane(rng: &mut TestRng, lane_id: u8) -> Self {\n        match lane_id {\n            MINT_LANE_ID => Self::random_transfer(rng),\n            AUCTION_LANE_ID => Self::random_staking(rng),\n            INSTALL_UPGRADE_LANE_ID => Self::random_install_upgrade(rng),\n            _ => Self::random_standard(rng),\n        }\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    fn random_transfer(rng: &mut TestRng) -> Self {\n        let amount = rng.gen_range(2_500_000_000..=u64::MAX);\n        let maybe_source = if rng.gen() { Some(rng.gen()) } else { None };\n        let target = TransferTarget::random(rng);\n        let maybe_id = rng.gen::<bool>().then(|| rng.gen());\n        let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id).unwrap();\n        FieldsContainer::new(\n            TransactionArgs::Named(args),\n            TransactionTarget::Native,\n            TransactionEntryPoint::Transfer,\n            TransactionScheduling::random(rng),\n        )\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    fn random_install_upgrade(rng: &mut TestRng) -> Self {\n        let target = TransactionTarget::Session {\n            module_bytes: Bytes::from(rng.random_vec(0..100)),\n            runtime: crate::TransactionRuntimeParams::VmCasperV1,\n            is_install_upgrade: true,\n        };\n        FieldsContainer::new(\n            TransactionArgs::Named(RuntimeArgs::random(rng)),\n            target,\n            TransactionEntryPoint::Call,\n            TransactionScheduling::random(rng),\n        )\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    fn random_staking(rng: &mut TestRng) -> Self {\n        let public_key = PublicKey::random(rng);\n        let delegation_rate = rng.gen();\n        let amount = rng.gen::<u64>();\n        let minimum_delegation_amount = rng.gen::<bool>().then(|| rng.gen());\n        let maximum_delegation_amount = minimum_delegation_amount\n            .map(|minimum_delegation_amount| minimum_delegation_amount + rng.gen::<u32>() as u64);\n        let reserved_slots = rng.gen::<bool>().then(|| rng.gen::<u32>());\n        let args = arg_handling::new_add_bid_args(\n            public_key,\n            delegation_rate,\n            amount,\n            minimum_delegation_amount,\n            maximum_delegation_amount,\n            reserved_slots,\n        )\n        .unwrap();\n        FieldsContainer::new(\n            TransactionArgs::Named(args),\n            TransactionTarget::Native,\n            TransactionEntryPoint::AddBid,\n            TransactionScheduling::random(rng),\n        )\n    }\n\n    #[cfg(any(feature = \"testing\", test))]\n    fn random_standard(rng: &mut TestRng) -> Self {\n        let target = TransactionTarget::Stored {\n            id: TransactionInvocationTarget::random(rng),\n            runtime: crate::transaction::transaction_target::TransactionRuntimeParams::VmCasperV1,\n        };\n        FieldsContainer::new(\n            TransactionArgs::Named(RuntimeArgs::random(rng)),\n            target,\n            TransactionEntryPoint::Custom(rng.random_string(1..11)),\n            TransactionScheduling::random(rng),\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1/transaction_args.rs",
    "content": "use crate::{\n    bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    CLTyped, CLValueError, RuntimeArgs,\n};\nuse alloc::{string::String, vec::Vec};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n\n/// The arguments of a transaction, which can be either a named set of runtime arguments or a\n/// chunked bytes.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(\n    any(feature = \"std\", test),\n    derive(Serialize, Deserialize),\n    serde(deny_unknown_fields)\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Body of a `TransactionArgs`.\")\n)]\npub enum TransactionArgs {\n    /// Named runtime arguments.\n    Named(RuntimeArgs),\n    /// Bytesrepr bytes.\n    Bytesrepr(Bytes),\n}\n\nimpl TransactionArgs {\n    /// Returns `RuntimeArgs` if the transaction arguments are named.\n    pub fn as_named(&self) -> Option<&RuntimeArgs> {\n        match self {\n            TransactionArgs::Named(args) => Some(args),\n            TransactionArgs::Bytesrepr(_) => None,\n        }\n    }\n\n    /// Returns `RuntimeArgs` if the transaction arguments are mnamed.\n    pub fn into_named(self) -> Option<RuntimeArgs> {\n        match self {\n            TransactionArgs::Named(args) => Some(args),\n            TransactionArgs::Bytesrepr(_) => None,\n        }\n    }\n\n    /// Returns `Bytes` if the transaction arguments are chunked.\n    pub fn into_bytesrepr(self) -> Option<Bytes> {\n        match self {\n            TransactionArgs::Named(_) => None,\n            TransactionArgs::Bytesrepr(bytes) => Some(bytes),\n        }\n    }\n\n    /// Returns `Bytes` if the transaction arguments are bytes.\n    pub fn as_bytesrepr(&self) -> Option<&Bytes> {\n        match self {\n            TransactionArgs::Named(_) => None,\n            TransactionArgs::Bytesrepr(bytes) => Some(bytes),\n        }\n    }\n\n    /// Inserts a key-value pair into the named runtime arguments.\n    pub fn insert<K, V>(&mut self, key: K, value: V) -> Result<(), CLValueError>\n    where\n        K: Into<String>,\n        V: CLTyped + ToBytes,\n    {\n        match self {\n            TransactionArgs::Named(args) => {\n                args.insert(key, value)?;\n                Ok(())\n            }\n            TransactionArgs::Bytesrepr(_) => {\n                Err(CLValueError::Serialization(bytesrepr::Error::Formatting))\n            }\n        }\n    }\n\n    /// Returns `true` if the transaction args is [`Named`].\n    ///\n    /// [`Named`]: TransactionArgs::Named\n    #[must_use]\n    pub fn is_named(&self) -> bool {\n        matches!(self, Self::Named(..))\n    }\n\n    /// Returns `true` if the transaction args is [`Bytesrepr`].\n    ///\n    /// [`Bytesrepr`]: TransactionArgs::Bytesrepr\n    #[must_use]\n    pub fn is_bytesrepr(&self) -> bool {\n        matches!(self, Self::Bytesrepr(..))\n    }\n}\n\nimpl FromBytes for TransactionArgs {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            0 => {\n                let (args, remainder) = RuntimeArgs::from_bytes(remainder)?;\n                Ok((TransactionArgs::Named(args), remainder))\n            }\n            1 => {\n                let (bytes, remainder) = Bytes::from_bytes(remainder)?;\n                Ok((TransactionArgs::Bytesrepr(bytes), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl ToBytes for TransactionArgs {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            TransactionArgs::Named(args) => args.serialized_length() + U8_SERIALIZED_LENGTH,\n            TransactionArgs::Bytesrepr(bytes) => bytes.serialized_length() + U8_SERIALIZED_LENGTH,\n        }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            TransactionArgs::Named(args) => {\n                writer.push(0);\n                args.write_bytes(writer)\n            }\n            TransactionArgs::Bytesrepr(bytes) => {\n                writer.push(1);\n                bytes.write_bytes(writer)\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use proptest::prelude::*;\n\n    use crate::{bytesrepr, gens::transaction_args_arb};\n\n    proptest! {\n        #[test]\n        fn serialization_roundtrip(args in transaction_args_arb()) {\n            bytesrepr::test_serialization_roundtrip(&args);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1/transaction_v1_hash.rs",
    "content": "use alloc::vec::Vec;\nuse core::fmt::{self, Display, Formatter};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(doc)]\nuse super::TransactionV1;\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    Digest,\n};\n\n/// The cryptographic hash of a [`TransactionV1`].\n#[derive(\n    Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default,\n)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded TransactionV1 hash.\")\n)]\n#[serde(deny_unknown_fields)]\npub struct TransactionV1Hash(Digest);\n\nimpl TransactionV1Hash {\n    /// The number of bytes in a `TransactionV1Hash` digest.\n    pub const LENGTH: usize = Digest::LENGTH;\n\n    /// Constructs a new `TransactionV1Hash`.\n    pub const fn new(hash: Digest) -> Self {\n        TransactionV1Hash(hash)\n    }\n\n    /// Returns the wrapped inner digest.\n    pub fn inner(&self) -> &Digest {\n        &self.0\n    }\n\n    /// Returns a new `TransactionV1Hash` directly initialized with the provided bytes; no hashing\n    /// is done.\n    pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self {\n        TransactionV1Hash(Digest::from_raw(raw_digest))\n    }\n\n    /// Returns a random `TransactionV1Hash`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let hash = rng.gen::<[u8; Digest::LENGTH]>().into();\n        TransactionV1Hash(hash)\n    }\n}\n\nimpl From<Digest> for TransactionV1Hash {\n    fn from(digest: Digest) -> Self {\n        TransactionV1Hash(digest)\n    }\n}\n\nimpl From<TransactionV1Hash> for Digest {\n    fn from(transaction_hash: TransactionV1Hash) -> Self {\n        transaction_hash.0\n    }\n}\n\nimpl Display for TransactionV1Hash {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(formatter, \"transaction-v1-hash({})\", self.0)\n    }\n}\n\nimpl AsRef<[u8]> for TransactionV1Hash {\n    fn as_ref(&self) -> &[u8] {\n        self.0.as_ref()\n    }\n}\n\nimpl ToBytes for TransactionV1Hash {\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n}\n\nimpl FromBytes for TransactionV1Hash {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        Digest::from_bytes(bytes).map(|(inner, remainder)| (TransactionV1Hash(inner), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let hash = TransactionV1Hash::random(rng);\n        bytesrepr::test_serialization_roundtrip(&hash);\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1/transaction_v1_payload.rs",
    "content": "use core::fmt::{self, Debug, Display, Formatter};\n\nuse super::{errors_v1::FieldDeserializationError, PricingMode};\nuse crate::{\n    bytesrepr::{\n        Bytes,\n        Error::{self, Formatting},\n        FromBytes, ToBytes,\n    },\n    transaction::serialization::{\n        CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder,\n    },\n    DisplayIter, InitiatorAddr, TimeDiff, Timestamp,\n};\n#[cfg(any(feature = \"std\", test))]\nuse crate::{TransactionArgs, TransactionEntryPoint, TransactionScheduling, TransactionTarget};\nuse alloc::{collections::BTreeMap, string::String, vec::Vec};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\n#[cfg(any(feature = \"std\", test))]\nuse serde_json::Value;\n#[cfg(any(feature = \"std\", test))]\nuse thiserror::Error;\n\nconst INITIATOR_ADDR_FIELD_INDEX: u16 = 0;\nconst TIMESTAMP_FIELD_INDEX: u16 = 1;\nconst TTL_FIELD_INDEX: u16 = 2;\nconst CHAIN_NAME_FIELD_INDEX: u16 = 3;\nconst PRICING_MODE_FIELD_INDEX: u16 = 4;\nconst FIELDS_FIELD_INDEX: u16 = 5;\n\nconst ARGS_MAP_KEY: u16 = 0;\nconst TARGET_MAP_KEY: u16 = 1;\nconst ENTRY_POINT_MAP_KEY: u16 = 2;\nconst SCHEDULING_MAP_KEY: u16 = 3;\n#[cfg(any(feature = \"std\", test))]\nconst ARGS_MAP_HUMAN_READABLE_KEY: &str = \"args\";\n#[cfg(any(feature = \"std\", test))]\nconst TARGET_MAP_HUMAN_READABLE_KEY: &str = \"target\";\n#[cfg(any(feature = \"std\", test))]\nconst ENTRY_POINT_MAP_HUMAN_READABLE_KEY: &str = \"entry_point\";\n#[cfg(any(feature = \"std\", test))]\nconst SCHEDULING_MAP_HUMAN_READABLE_KEY: &str = \"scheduling\";\n\nconst EXPECTED_FIELD_KEYS: [u16; 4] = [\n    ARGS_MAP_KEY,\n    TARGET_MAP_KEY,\n    ENTRY_POINT_MAP_KEY,\n    SCHEDULING_MAP_KEY,\n];\n\n/// Structure aggregating internal data of V1 transaction.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    any(feature = \"std\", test),\n    derive(Serialize, Deserialize),\n    serde(deny_unknown_fields)\n)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(with = \"TransactionV1PayloadJson\")\n)]\npub struct TransactionV1Payload {\n    initiator_addr: InitiatorAddr,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n    chain_name: String,\n    pricing_mode: PricingMode,\n    fields: BTreeMap<u16, Bytes>,\n}\n\nimpl TransactionV1Payload {\n    // ctor\n    pub fn new(\n        chain_name: String,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        pricing_mode: PricingMode,\n        initiator_addr: InitiatorAddr,\n        fields: BTreeMap<u16, Bytes>,\n    ) -> TransactionV1Payload {\n        TransactionV1Payload {\n            chain_name,\n            timestamp,\n            ttl,\n            pricing_mode,\n            initiator_addr,\n            fields,\n        }\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        vec![\n            self.initiator_addr.serialized_length(),\n            self.timestamp.serialized_length(),\n            self.ttl.serialized_length(),\n            self.chain_name.serialized_length(),\n            self.pricing_mode.serialized_length(),\n            self.fields.serialized_length(),\n        ]\n    }\n\n    /// Returns the chain name of the transaction.\n    pub fn chain_name(&self) -> &str {\n        &self.chain_name\n    }\n\n    /// Returns the timestamp of the transaction.\n    pub fn timestamp(&self) -> Timestamp {\n        self.timestamp\n    }\n\n    /// Returns the time-to-live of the transaction.\n    pub fn ttl(&self) -> TimeDiff {\n        self.ttl\n    }\n\n    /// Returns the pricing mode of the transaction.\n    pub fn pricing_mode(&self) -> &PricingMode {\n        &self.pricing_mode\n    }\n\n    /// Returns the initiator address of the transaction.\n    pub fn initiator_addr(&self) -> &InitiatorAddr {\n        &self.initiator_addr\n    }\n\n    /// Returns the fields of the transaction.\n    pub fn fields(&self) -> &BTreeMap<u16, Bytes> {\n        &self.fields\n    }\n\n    /// Returns the timestamp of when the transaction expires, i.e. `self.timestamp + self.ttl`.\n    pub fn expires(&self) -> Timestamp {\n        self.timestamp.saturating_add(self.ttl)\n    }\n\n    /// Returns `true` if the transaction has expired.\n    pub fn expired(&self, current_instant: Timestamp) -> bool {\n        self.expires() < current_instant\n    }\n\n    /// Fetches field from the amorphic `field` map and attempts to deserialize it into a type `T`.\n    /// The deserialization is done using the `FromBytes` trait.\n    pub fn deserialize_field<T: FromBytes>(\n        &self,\n        index: u16,\n    ) -> Result<T, FieldDeserializationError> {\n        let field = self\n            .fields\n            .get(&index)\n            .ok_or(FieldDeserializationError::IndexNotExists { index })?;\n        let (value, remainder) = T::from_bytes(field)\n            .map_err(|error| FieldDeserializationError::FromBytesError { index, error })?;\n        if !remainder.is_empty() {\n            return Err(FieldDeserializationError::LingeringBytesInField { index });\n        }\n        Ok(value)\n    }\n\n    /// Helper method to return size of `fields`.\n    pub fn number_of_fields(&self) -> usize {\n        self.fields.len()\n    }\n\n    /// Makes transaction payload invalid.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn invalidate(&mut self) {\n        self.chain_name.clear();\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<TransactionV1PayloadJson> for TransactionV1Payload {\n    type Error = TransactionV1PayloadJsonError;\n    fn try_from(transaction_v1_json: TransactionV1PayloadJson) -> Result<Self, Self::Error> {\n        Ok(TransactionV1Payload {\n            initiator_addr: transaction_v1_json.initiator_addr,\n            timestamp: transaction_v1_json.timestamp,\n            ttl: transaction_v1_json.ttl,\n            chain_name: transaction_v1_json.chain_name,\n            pricing_mode: transaction_v1_json.pricing_mode,\n            fields: from_human_readable_fields(&transaction_v1_json.fields)?,\n        })\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(deny_unknown_fields)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(\n        description = \"Internal payload of the transaction. The actual data over which the signing is done.\",\n        rename = \"TransactionV1Payload\",\n    )\n)]\npub(super) struct TransactionV1PayloadJson {\n    initiator_addr: InitiatorAddr,\n    timestamp: Timestamp,\n    ttl: TimeDiff,\n    chain_name: String,\n    pricing_mode: PricingMode,\n    fields: BTreeMap<String, Value>,\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Error, Debug)]\n\npub(super) enum TransactionV1PayloadJsonError {\n    #[error(\"{0}\")]\n    FailedToMap(String),\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<TransactionV1Payload> for TransactionV1PayloadJson {\n    type Error = TransactionV1PayloadJsonError;\n\n    fn try_from(value: TransactionV1Payload) -> Result<Self, Self::Error> {\n        Ok(TransactionV1PayloadJson {\n            initiator_addr: value.initiator_addr,\n            timestamp: value.timestamp,\n            ttl: value.ttl,\n            chain_name: value.chain_name,\n            pricing_mode: value.pricing_mode,\n            fields: to_human_readable_fields(&value.fields)?,\n        })\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nfn from_human_readable_fields(\n    fields: &BTreeMap<String, Value>,\n) -> Result<BTreeMap<u16, Bytes>, TransactionV1PayloadJsonError> {\n    let number_of_expected_fields = EXPECTED_FIELD_KEYS.len();\n    if fields.len() != number_of_expected_fields {\n        return Err(TransactionV1PayloadJsonError::FailedToMap(format!(\n            \"Expected exactly {} fields\",\n            number_of_expected_fields\n        )));\n    }\n    let args_bytes = to_bytesrepr::<TransactionArgs>(fields, ARGS_MAP_HUMAN_READABLE_KEY)?;\n    let target_bytes = to_bytesrepr::<TransactionTarget>(fields, TARGET_MAP_HUMAN_READABLE_KEY)?;\n    let entry_point_bytes =\n        to_bytesrepr::<TransactionEntryPoint>(fields, ENTRY_POINT_MAP_HUMAN_READABLE_KEY)?;\n    let schedule_bytes =\n        to_bytesrepr::<TransactionScheduling>(fields, SCHEDULING_MAP_HUMAN_READABLE_KEY)?;\n    Ok(BTreeMap::from_iter(vec![\n        (ARGS_MAP_KEY, args_bytes),\n        (TARGET_MAP_KEY, target_bytes),\n        (ENTRY_POINT_MAP_KEY, entry_point_bytes),\n        (SCHEDULING_MAP_KEY, schedule_bytes),\n    ]))\n}\n\n#[cfg(any(feature = \"std\", test))]\nfn to_human_readable_fields(\n    fields: &BTreeMap<u16, Bytes>,\n) -> Result<BTreeMap<String, Value>, TransactionV1PayloadJsonError> {\n    let args_value =\n        extract_and_deserialize_field::<TransactionArgs>(fields, ARGS_MAP_KEY, \"args\")?;\n    let target_value =\n        extract_and_deserialize_field::<TransactionTarget>(fields, TARGET_MAP_KEY, \"target\")?;\n    let entry_point_value = extract_and_deserialize_field::<TransactionEntryPoint>(\n        fields,\n        ENTRY_POINT_MAP_KEY,\n        \"entry_point\",\n    )?;\n    let scheduling_value = extract_and_deserialize_field::<TransactionScheduling>(\n        fields,\n        SCHEDULING_MAP_KEY,\n        \"scheduling\",\n    )?;\n\n    Ok(BTreeMap::from_iter(vec![\n        (ARGS_MAP_HUMAN_READABLE_KEY.to_string(), args_value),\n        (TARGET_MAP_HUMAN_READABLE_KEY.to_string(), target_value),\n        (\n            ENTRY_POINT_MAP_HUMAN_READABLE_KEY.to_string(),\n            entry_point_value,\n        ),\n        (\n            SCHEDULING_MAP_HUMAN_READABLE_KEY.to_string(),\n            scheduling_value,\n        ),\n    ]))\n}\n\n#[cfg(any(feature = \"std\", test))]\nfn to_bytesrepr<T: ToBytes + DeserializeOwned>(\n    fields: &BTreeMap<String, Value>,\n    field_name: &str,\n) -> Result<Bytes, TransactionV1PayloadJsonError> {\n    let value_json = fields\n        .get(field_name)\n        .ok_or(TransactionV1PayloadJsonError::FailedToMap(format!(\n            \"Could not find {field_name} field\"\n        )))?;\n    let deserialized = serde_json::from_value::<T>(value_json.clone())\n        .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!(\"{:?}\", e)))?;\n    deserialized\n        .to_bytes()\n        .map(|bytes| bytes.into())\n        .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!(\"{:?}\", e)))\n}\n\n#[cfg(any(feature = \"std\", test))]\nfn extract_and_deserialize_field<T: FromBytes + Serialize>(\n    fields: &BTreeMap<u16, Bytes>,\n    key: u16,\n    field_name: &str,\n) -> Result<Value, TransactionV1PayloadJsonError> {\n    let value_bytes = fields\n        .get(&key)\n        .ok_or(TransactionV1PayloadJsonError::FailedToMap(format!(\n            \"Could not find {field_name} field\"\n        )))?;\n    let (from_bytes, remainder) = T::from_bytes(value_bytes)\n        .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!(\"{:?}\", e)))?;\n    if !remainder.is_empty() {\n        return Err(TransactionV1PayloadJsonError::FailedToMap(format!(\n            \"Unexpexcted bytes in {field_name} field\"\n        )));\n    }\n    let value = serde_json::to_value(from_bytes)\n        .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!(\"{:?}\", e)))?;\n    Ok(value)\n}\n\nimpl ToBytes for TransactionV1Payload {\n    fn to_bytes(&self) -> Result<Vec<u8>, crate::bytesrepr::Error> {\n        let expected_payload_sizes = self.serialized_field_lengths();\n        CalltableSerializationEnvelopeBuilder::new(expected_payload_sizes)?\n            .add_field(INITIATOR_ADDR_FIELD_INDEX, &self.initiator_addr)?\n            .add_field(TIMESTAMP_FIELD_INDEX, &self.timestamp)?\n            .add_field(TTL_FIELD_INDEX, &self.ttl)?\n            .add_field(CHAIN_NAME_FIELD_INDEX, &self.chain_name)?\n            .add_field(PRICING_MODE_FIELD_INDEX, &self.pricing_mode)?\n            .add_field(FIELDS_FIELD_INDEX, &self.fields)?\n            .binary_payload_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for TransactionV1Payload {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(6, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Formatting)?;\n\n        window.verify_index(INITIATOR_ADDR_FIELD_INDEX)?;\n        let (initiator_addr, window) = window.deserialize_and_maybe_next::<InitiatorAddr>()?;\n        let window = window.ok_or(Formatting)?;\n        window.verify_index(TIMESTAMP_FIELD_INDEX)?;\n        let (timestamp, window) = window.deserialize_and_maybe_next::<Timestamp>()?;\n        let window = window.ok_or(Formatting)?;\n        window.verify_index(TTL_FIELD_INDEX)?;\n        let (ttl, window) = window.deserialize_and_maybe_next::<TimeDiff>()?;\n        let window = window.ok_or(Formatting)?;\n        window.verify_index(CHAIN_NAME_FIELD_INDEX)?;\n        let (chain_name, window) = window.deserialize_and_maybe_next::<String>()?;\n        let window = window.ok_or(Formatting)?;\n        window.verify_index(PRICING_MODE_FIELD_INDEX)?;\n        let (pricing_mode, window) = window.deserialize_and_maybe_next::<PricingMode>()?;\n        let window = window.ok_or(Formatting)?;\n        window.verify_index(FIELDS_FIELD_INDEX)?;\n        let (fields_as_vec, window) = window.deserialize_and_maybe_next::<Vec<(u16, Bytes)>>()?;\n        let fields = build_map(fields_as_vec)?;\n        if window.is_some() {\n            return Err(Formatting);\n        }\n        if fields.len() != EXPECTED_FIELD_KEYS.len()\n            || EXPECTED_FIELD_KEYS\n                .iter()\n                .any(|expected_key| !fields.contains_key(expected_key))\n        {\n            return Err(Formatting);\n        }\n        let from_bytes = TransactionV1Payload {\n            chain_name,\n            timestamp,\n            ttl,\n            pricing_mode,\n            initiator_addr,\n            fields,\n        };\n\n        Ok((from_bytes, remainder))\n    }\n}\n\n// We need to make sure that the bytes of the `fields` field are serialized in the correct order.\n// A BTreeMap is serialized the same as Vec<(K, V)> and it actually, on deserialization, doesn't\n// check if the keys are in ascending order. We need to make sure that the incoming transaction\n// payload is serialized in a strict way, otherwise we would have trouble with verifying the\n// signature(s).\nfn build_map(fields_as_vec: Vec<(u16, Bytes)>) -> Result<BTreeMap<u16, Bytes>, Error> {\n    let mut ret = BTreeMap::new();\n    let mut max_idx: i32 = -1;\n    for (key, value) in fields_as_vec {\n        let key_signed = key as i32;\n        if key_signed <= max_idx {\n            return Err(Formatting);\n        }\n        max_idx = key_signed;\n        ret.insert(key, value);\n    }\n\n    Ok(ret)\n}\n\nimpl Display for TransactionV1Payload {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"transaction-v1-payload[{}, {}, {}, {}, {}, fields: {}]\",\n            self.chain_name,\n            self.timestamp,\n            self.ttl,\n            self.pricing_mode,\n            self.initiator_addr,\n            DisplayIter::new(self.fields.keys())\n        )\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{\n        testing::TestRng, RuntimeArgs, TransactionEntryPoint, TransactionScheduling,\n        TransactionTarget,\n    };\n    use std::collections::BTreeMap;\n\n    #[test]\n    fn reserialize_should_work_with_ascending_ids() {\n        let input = vec![\n            (0, Bytes::from(vec![1])),\n            (1, Bytes::from(vec![2])),\n            (4, Bytes::from(vec![3])),\n        ];\n        let map = build_map(input).expect(\"Should not fail\");\n        assert_eq!(\n            map,\n            BTreeMap::from_iter(vec![\n                (0, Bytes::from(vec![1])),\n                (1, Bytes::from(vec![2])),\n                (4, Bytes::from(vec![3]))\n            ])\n        );\n    }\n\n    #[test]\n    fn reserialize_should_fail_when_ids_not_unique() {\n        let input = vec![\n            (0, Bytes::from(vec![1])),\n            (0, Bytes::from(vec![2])),\n            (4, Bytes::from(vec![3])),\n        ];\n        let map_ret = build_map(input);\n        assert!(map_ret.is_err());\n    }\n\n    #[test]\n    fn reserialize_should_fail_when_ids_not_ascending() {\n        let input = vec![\n            (0, Bytes::from(vec![1])),\n            (2, Bytes::from(vec![2])),\n            (1, Bytes::from(vec![3])),\n        ];\n        assert!(build_map(input).is_err());\n        let input = vec![\n            (0, Bytes::from(vec![1])),\n            (2, Bytes::from(vec![2])),\n            (0, Bytes::from(vec![3])),\n        ];\n        assert!(build_map(input).is_err());\n        let input = vec![\n            (0, Bytes::from(vec![1])),\n            (1, Bytes::from(vec![2])),\n            (2, Bytes::from(vec![3])),\n            (3, Bytes::from(vec![4])),\n            (2, Bytes::from(vec![5])),\n        ];\n        assert!(build_map(input).is_err());\n    }\n\n    #[test]\n    fn should_fail_if_deserialized_payload_has_too_many_fields() {\n        let rng = &mut TestRng::new();\n        let (\n            args,\n            target,\n            entry_point,\n            scheduling,\n            initiator_addr,\n            timestamp,\n            ttl,\n            chain_name,\n            pricing_mode,\n        ) = random_payload_data(rng);\n        let mut fields = BTreeMap::new();\n        fields.insert(ARGS_MAP_KEY, args.to_bytes().unwrap().into());\n        fields.insert(TARGET_MAP_KEY, target.to_bytes().unwrap().into());\n        fields.insert(ENTRY_POINT_MAP_KEY, entry_point.to_bytes().unwrap().into());\n        fields.insert(SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into());\n        fields.insert(4, 111_u64.to_bytes().unwrap().into());\n\n        let bytes = TransactionV1Payload::new(\n            chain_name,\n            timestamp,\n            ttl,\n            pricing_mode,\n            initiator_addr,\n            fields,\n        )\n        .to_bytes()\n        .unwrap();\n        let result = TransactionV1Payload::from_bytes(&bytes);\n        assert!(result.is_err());\n    }\n\n    #[test]\n    fn should_fail_if_deserialized_payload_has_unrecognized_fields() {\n        let rng = &mut TestRng::new();\n        let (\n            args,\n            target,\n            entry_point,\n            scheduling,\n            initiator_addr,\n            timestamp,\n            ttl,\n            chain_name,\n            pricing_mode,\n        ) = random_payload_data(rng);\n        let mut fields = BTreeMap::new();\n        fields.insert(ARGS_MAP_KEY, args.to_bytes().unwrap().into());\n        fields.insert(TARGET_MAP_KEY, target.to_bytes().unwrap().into());\n        fields.insert(100, entry_point.to_bytes().unwrap().into());\n        fields.insert(SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into());\n\n        let bytes = TransactionV1Payload::new(\n            chain_name,\n            timestamp,\n            ttl,\n            pricing_mode,\n            initiator_addr,\n            fields,\n        )\n        .to_bytes()\n        .unwrap();\n        let result = TransactionV1Payload::from_bytes(&bytes);\n        assert!(result.is_err());\n    }\n\n    #[test]\n    fn should_fail_if_serialized_payoad_has_fields_out_of_order() {\n        let rng = &mut TestRng::new();\n        let (\n            args,\n            target,\n            entry_point,\n            scheduling,\n            initiator_addr,\n            timestamp,\n            ttl,\n            chain_name,\n            pricing_mode,\n        ) = random_payload_data(rng);\n        let fields: Vec<(u16, Bytes)> = vec![\n            (SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into()),\n            (TARGET_MAP_KEY, target.to_bytes().unwrap().into()),\n            (ENTRY_POINT_MAP_KEY, entry_point.to_bytes().unwrap().into()),\n            (ARGS_MAP_KEY, args.to_bytes().unwrap().into()),\n        ];\n\n        let expected_payload_sizes = vec![\n            initiator_addr.serialized_length(),\n            timestamp.serialized_length(),\n            ttl.serialized_length(),\n            chain_name.serialized_length(),\n            pricing_mode.serialized_length(),\n            fields.serialized_length(),\n        ];\n\n        let bytes = CalltableSerializationEnvelopeBuilder::new(expected_payload_sizes)\n            .unwrap()\n            .add_field(INITIATOR_ADDR_FIELD_INDEX, &initiator_addr)\n            .unwrap()\n            .add_field(TIMESTAMP_FIELD_INDEX, &timestamp)\n            .unwrap()\n            .add_field(TTL_FIELD_INDEX, &ttl)\n            .unwrap()\n            .add_field(CHAIN_NAME_FIELD_INDEX, &chain_name)\n            .unwrap()\n            .add_field(PRICING_MODE_FIELD_INDEX, &pricing_mode)\n            .unwrap()\n            .add_field(FIELDS_FIELD_INDEX, &fields)\n            .unwrap()\n            .binary_payload_bytes()\n            .unwrap();\n        let payload_res = TransactionV1Payload::from_bytes(&bytes);\n        assert!(payload_res.is_err());\n    }\n\n    fn random_payload_data(\n        rng: &mut TestRng,\n    ) -> (\n        RuntimeArgs,\n        TransactionTarget,\n        TransactionEntryPoint,\n        TransactionScheduling,\n        InitiatorAddr,\n        Timestamp,\n        TimeDiff,\n        String,\n        PricingMode,\n    ) {\n        let args = RuntimeArgs::random(rng);\n        let target = TransactionTarget::random(rng);\n        let entry_point = TransactionEntryPoint::random(rng);\n        let scheduling = TransactionScheduling::random(rng);\n        let initiator_addr = InitiatorAddr::random(rng);\n        let timestamp = Timestamp::now();\n        let ttl = TimeDiff::from_millis(1000);\n        let chain_name = \"chain-name\".to_string();\n        let pricing_mode = PricingMode::random(rng);\n        (\n            args,\n            target,\n            entry_point,\n            scheduling,\n            initiator_addr,\n            timestamp,\n            ttl,\n            chain_name,\n            pricing_mode,\n        )\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transaction_v1.rs",
    "content": "#[cfg(any(feature = \"testing\", test, feature = \"json-schema\"))]\npub(crate) mod arg_handling;\nmod errors_v1;\npub mod fields_container;\nmod transaction_args;\nmod transaction_v1_hash;\npub mod transaction_v1_payload;\n\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse super::InitiatorAddrAndSecretKey;\nuse crate::{\n    bytesrepr::{self, Error, FromBytes, ToBytes},\n    crypto,\n};\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::{testing::TestRng, TransactionConfig, LARGE_WASM_LANE_ID};\n#[cfg(any(feature = \"std\", test))]\nuse crate::{\n    TransactionEntryPoint, TransactionTarget, TransactionV1Config, AUCTION_LANE_ID,\n    INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID,\n};\n#[cfg(any(feature = \"std\", test, feature = \"testing\"))]\nuse alloc::collections::BTreeMap;\nuse alloc::{collections::BTreeSet, vec::Vec};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\nuse errors_v1::FieldDeserializationError;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse fields_container::FieldsContainer;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse fields_container::{ENTRY_POINT_MAP_KEY, TARGET_MAP_KEY};\n#[cfg(any(feature = \"once_cell\", test))]\nuse once_cell::sync::OnceCell;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n#[cfg(any(feature = \"std\", test))]\nuse thiserror::Error;\nuse tracing::{error, trace};\npub use transaction_v1_payload::TransactionV1Payload;\n#[cfg(any(feature = \"std\", test))]\nuse transaction_v1_payload::TransactionV1PayloadJson;\n\nuse super::{\n    serialization::{CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder},\n    Approval, ApprovalsHash, InitiatorAddr, PricingMode,\n};\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\nuse crate::bytesrepr::Bytes;\nuse crate::{Digest, DisplayIter, SecretKey, TimeDiff, Timestamp};\n\npub use errors_v1::{\n    DecodeFromJsonErrorV1 as TransactionV1DecodeFromJsonError, ErrorV1 as TransactionV1Error,\n    ExcessiveSizeErrorV1 as TransactionV1ExcessiveSizeError,\n    InvalidTransaction as InvalidTransactionV1,\n};\npub use transaction_args::TransactionArgs;\npub use transaction_v1_hash::TransactionV1Hash;\n\nuse core::{\n    cmp,\n    fmt::{self, Debug, Display, Formatter},\n    hash,\n};\n\nconst HASH_FIELD_INDEX: u16 = 0;\nconst PAYLOAD_FIELD_INDEX: u16 = 1;\nconst APPROVALS_FIELD_INDEX: u16 = 2;\n\n/// A unit of work sent by a client to the network, which when executed can cause global state to\n/// be altered.\n#[derive(Clone, Eq, Debug)]\n#[cfg_attr(any(feature = \"std\", test), derive(Serialize, Deserialize))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(with = \"TransactionV1Json\")\n)]\npub struct TransactionV1 {\n    hash: TransactionV1Hash,\n    payload: TransactionV1Payload,\n    approvals: BTreeSet<Approval>,\n    #[cfg_attr(any(all(feature = \"std\", feature = \"once_cell\"), test), serde(skip))]\n    #[cfg_attr(\n        all(any(feature = \"once_cell\", test), feature = \"datasize\"),\n        data_size(skip)\n    )]\n    #[cfg(any(feature = \"once_cell\", test))]\n    is_verified: OnceCell<Result<(), InvalidTransactionV1>>,\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<TransactionV1Json> for TransactionV1 {\n    type Error = TransactionV1JsonError;\n    fn try_from(transaction_v1_json: TransactionV1Json) -> Result<Self, Self::Error> {\n        Ok(TransactionV1 {\n            hash: transaction_v1_json.hash,\n            payload: transaction_v1_json.payload.try_into().map_err(|error| {\n                TransactionV1JsonError::FailedToMap(format!(\n                    \"Failed to map TransactionJson::V1 to Transaction::V1, err: {}\",\n                    error\n                ))\n            })?,\n            approvals: transaction_v1_json.approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::new(),\n        })\n    }\n}\n\n/// A helper struct to represent the transaction as json.\n#[cfg(any(feature = \"std\", test))]\n#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(\n        description = \"A unit of work sent by a client to the network, which when executed can \\\n        cause global state to be altered.\",\n        rename = \"TransactionV1\",\n    )\n)]\npub(super) struct TransactionV1Json {\n    hash: TransactionV1Hash,\n    payload: TransactionV1PayloadJson,\n    approvals: BTreeSet<Approval>,\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Error, Debug)]\npub(super) enum TransactionV1JsonError {\n    #[error(\"{0}\")]\n    FailedToMap(String),\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<TransactionV1> for TransactionV1Json {\n    type Error = TransactionV1JsonError;\n    fn try_from(transaction: TransactionV1) -> Result<Self, Self::Error> {\n        Ok(TransactionV1Json {\n            hash: transaction.hash,\n            payload: transaction.payload.try_into().map_err(|error| {\n                TransactionV1JsonError::FailedToMap(format!(\n                    \"Failed to map Transaction::V1 to TransactionJson::V1, err: {}\",\n                    error\n                ))\n            })?,\n            approvals: transaction.approvals,\n        })\n    }\n}\n\nimpl TransactionV1 {\n    /// ctor\n    pub fn new(\n        hash: TransactionV1Hash,\n        payload: TransactionV1Payload,\n        approvals: BTreeSet<Approval>,\n    ) -> Self {\n        Self {\n            hash,\n            payload,\n            approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::new(),\n        }\n    }\n\n    #[cfg(any(feature = \"std\", test, feature = \"testing\"))]\n    pub(crate) fn build(\n        chain_name: String,\n        timestamp: Timestamp,\n        ttl: TimeDiff,\n        pricing_mode: PricingMode,\n        fields: BTreeMap<u16, Bytes>,\n        initiator_addr_and_secret_key: InitiatorAddrAndSecretKey,\n    ) -> TransactionV1 {\n        let initiator_addr = initiator_addr_and_secret_key.initiator_addr();\n        let transaction_v1_payload = TransactionV1Payload::new(\n            chain_name,\n            timestamp,\n            ttl,\n            pricing_mode,\n            initiator_addr,\n            fields,\n        );\n        let hash = Digest::hash(\n            transaction_v1_payload\n                .to_bytes()\n                .unwrap_or_else(|error| panic!(\"should serialize body: {}\", error)),\n        );\n        let mut transaction =\n            TransactionV1::new(hash.into(), transaction_v1_payload, BTreeSet::new());\n\n        if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() {\n            transaction.sign(secret_key);\n        }\n        transaction\n    }\n\n    /// Adds a signature of this transaction's hash to its approvals.\n    pub fn sign(&mut self, secret_key: &SecretKey) {\n        let approval = Approval::create(&self.hash.into(), secret_key);\n        self.approvals.insert(approval);\n    }\n\n    /// Returns the `ApprovalsHash` of this transaction's approvals.\n    pub fn hash(&self) -> &TransactionV1Hash {\n        &self.hash\n    }\n\n    /// Returns the internal payload of this transaction.\n    pub fn payload(&self) -> &TransactionV1Payload {\n        &self.payload\n    }\n\n    /// Returns transactions approvals.\n    pub fn approvals(&self) -> &BTreeSet<Approval> {\n        &self.approvals\n    }\n\n    /// Returns the address of the initiator of the transaction.\n    pub fn initiator_addr(&self) -> &InitiatorAddr {\n        self.payload.initiator_addr()\n    }\n\n    /// Returns the name of the chain the transaction should be executed on.\n    pub fn chain_name(&self) -> &str {\n        self.payload.chain_name()\n    }\n\n    /// Returns the creation timestamp of the transaction.\n    pub fn timestamp(&self) -> Timestamp {\n        self.payload.timestamp()\n    }\n\n    /// Returns the duration after the creation timestamp for which the transaction will stay valid.\n    ///\n    /// After this duration has ended, the transaction will be considered expired.\n    pub fn ttl(&self) -> TimeDiff {\n        self.payload.ttl()\n    }\n\n    /// Returns `true` if the transaction has expired.\n    pub fn expired(&self, current_instant: Timestamp) -> bool {\n        self.payload.expired(current_instant)\n    }\n\n    /// Returns the pricing mode for the transaction.\n    pub fn pricing_mode(&self) -> &PricingMode {\n        self.payload.pricing_mode()\n    }\n\n    /// Returns the `ApprovalsHash` of this transaction's approvals.\n    pub fn compute_approvals_hash(&self) -> Result<ApprovalsHash, bytesrepr::Error> {\n        ApprovalsHash::compute(&self.approvals)\n    }\n\n    #[doc(hidden)]\n    pub fn with_approvals(mut self, approvals: BTreeSet<Approval>) -> Self {\n        self.approvals = approvals;\n        self\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn apply_approvals(&mut self, approvals: Vec<Approval>) {\n        self.approvals.extend(approvals);\n    }\n\n    /// Returns the payment amount if the txn is using payment limited mode.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn payment_amount(&self) -> Option<u64> {\n        if let PricingMode::PaymentLimited { payment_amount, .. } = self.pricing_mode() {\n            Some(*payment_amount)\n        } else {\n            None\n        }\n    }\n\n    /// Returns a random, valid but possibly expired transaction.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        let secret_key = SecretKey::random(rng);\n        let ttl_millis = rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis());\n        let timestamp = Timestamp::random(rng);\n        let container = FieldsContainer::random(rng);\n        let initiator_addr_and_secret_key = InitiatorAddrAndSecretKey::SecretKey(&secret_key);\n        let pricing_mode = PricingMode::Fixed {\n            gas_price_tolerance: 5,\n            additional_computation_factor: 0,\n        };\n        TransactionV1::build(\n            rng.random_string(5..10),\n            timestamp,\n            TimeDiff::from_millis(ttl_millis),\n            pricing_mode,\n            container.to_map().unwrap(),\n            initiator_addr_and_secret_key,\n        )\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_lane_and_timestamp_and_ttl(\n        rng: &mut TestRng,\n        lane: u8,\n        maybe_timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        let secret_key = SecretKey::random(rng);\n        let timestamp = maybe_timestamp.unwrap_or_else(Timestamp::now);\n        let ttl_millis = ttl.map_or(\n            rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()),\n            |ttl| ttl.millis(),\n        );\n        let container = FieldsContainer::random_of_lane(rng, lane);\n        let initiator_addr_and_secret_key = InitiatorAddrAndSecretKey::SecretKey(&secret_key);\n        let pricing_mode = PricingMode::Fixed {\n            gas_price_tolerance: 5,\n            additional_computation_factor: 0,\n        };\n        TransactionV1::build(\n            rng.random_string(5..10),\n            timestamp,\n            TimeDiff::from_millis(ttl_millis),\n            pricing_mode,\n            container.to_map().unwrap(),\n            initiator_addr_and_secret_key,\n        )\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_with_timestamp_and_ttl(\n        rng: &mut TestRng,\n        maybe_timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        Self::random_with_lane_and_timestamp_and_ttl(\n            rng,\n            INSTALL_UPGRADE_LANE_ID,\n            maybe_timestamp,\n            ttl,\n        )\n    }\n\n    /// Returns a random transaction with \"transfer\" category.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_transfer(\n        rng: &mut TestRng,\n        timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        TransactionV1::random_with_lane_and_timestamp_and_ttl(rng, MINT_LANE_ID, timestamp, ttl)\n    }\n\n    /// Returns a random transaction with \"standard\" category.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_wasm(\n        rng: &mut TestRng,\n        timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        TransactionV1::random_with_lane_and_timestamp_and_ttl(\n            rng,\n            LARGE_WASM_LANE_ID,\n            timestamp,\n            ttl,\n        )\n    }\n\n    /// Returns a random transaction with \"install/upgrade\" category.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_auction(\n        rng: &mut TestRng,\n        timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        TransactionV1::random_with_lane_and_timestamp_and_ttl(rng, AUCTION_LANE_ID, timestamp, ttl)\n    }\n\n    /// Returns a random transaction with \"install/upgrade\" category.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random_install_upgrade(\n        rng: &mut TestRng,\n        timestamp: Option<Timestamp>,\n        ttl: Option<TimeDiff>,\n    ) -> Self {\n        TransactionV1::random_with_lane_and_timestamp_and_ttl(\n            rng,\n            INSTALL_UPGRADE_LANE_ID,\n            timestamp,\n            ttl,\n        )\n    }\n\n    /// Returns result of attempting to deserailize a field from the amorphic `fields` container.\n    pub fn deserialize_field<T: FromBytes>(\n        &self,\n        index: u16,\n    ) -> Result<T, FieldDeserializationError> {\n        self.payload.deserialize_field(index)\n    }\n\n    /// Returns number of fields in the amorphic `fields` container.\n    pub fn number_of_fields(&self) -> usize {\n        self.payload.number_of_fields()\n    }\n\n    /// Checks if the declared hash of the transaction matches calculated hash.\n    pub fn has_valid_hash(&self) -> Result<(), InvalidTransactionV1> {\n        let computed_hash = Digest::hash(self.payload.to_bytes().map_err(|error| {\n            error!(\n                ?error,\n                \"Could not serialize transaction for purpose of calculating hash.\"\n            );\n            InvalidTransactionV1::CouldNotSerializeTransaction\n        })?);\n        if TransactionV1Hash::new(computed_hash) != self.hash {\n            trace!(?self, ?computed_hash, \"invalid transaction hash\");\n            return Err(InvalidTransactionV1::InvalidTransactionHash);\n        }\n        Ok(())\n    }\n\n    /// Returns `Ok` if and only if:\n    ///   * the transaction hash is correct (see [`TransactionV1::has_valid_hash`] for details)\n    ///   * approvals are non-empty, and\n    ///   * all approvals are valid signatures of the signed hash\n    pub fn verify(&self) -> Result<(), InvalidTransactionV1> {\n        #[cfg(any(feature = \"once_cell\", test))]\n        return self.is_verified.get_or_init(|| self.do_verify()).clone();\n\n        #[cfg(not(any(feature = \"once_cell\", test)))]\n        self.do_verify()\n    }\n\n    fn do_verify(&self) -> Result<(), InvalidTransactionV1> {\n        if self.approvals.is_empty() {\n            trace!(?self, \"transaction has no approvals\");\n            return Err(InvalidTransactionV1::EmptyApprovals);\n        }\n\n        self.has_valid_hash()?;\n\n        for (index, approval) in self.approvals.iter().enumerate() {\n            if let Err(error) = crypto::verify(self.hash, approval.signature(), approval.signer()) {\n                trace!(\n                    ?self,\n                    \"failed to verify transaction approval {}: {}\",\n                    index,\n                    error\n                );\n                return Err(InvalidTransactionV1::InvalidApproval { index, error });\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Returns the hash of the transaction's payload.\n    pub fn payload_hash(&self) -> Result<Digest, InvalidTransactionV1> {\n        let bytes = self\n            .payload\n            .fields()\n            .to_bytes()\n            .map_err(|_| InvalidTransactionV1::CannotCalculateFieldsHash)?;\n        Ok(Digest::hash(bytes))\n    }\n\n    fn serialized_field_lengths(&self) -> Vec<usize> {\n        vec![\n            self.hash.serialized_length(),\n            self.payload.serialized_length(),\n            self.approvals.serialized_length(),\n        ]\n    }\n\n    /// Turns `self` into an invalid `TransactionV1` by clearing the `chain_name`, invalidating the\n    /// transaction hash\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn invalidate(&mut self) {\n        self.payload.invalidate();\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub(crate) fn get_transaction_target(&self) -> Result<TransactionTarget, InvalidTransactionV1> {\n        self.deserialize_field::<TransactionTarget>(TARGET_MAP_KEY)\n            .map_err(|error| InvalidTransactionV1::CouldNotDeserializeField { error })\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub(crate) fn get_transaction_entry_point(\n        &self,\n    ) -> Result<TransactionEntryPoint, InvalidTransactionV1> {\n        self.deserialize_field::<TransactionEntryPoint>(ENTRY_POINT_MAP_KEY)\n            .map_err(|error| InvalidTransactionV1::CouldNotDeserializeField { error })\n    }\n\n    /// Returns the gas price tolerance for the given transaction.\n    pub fn gas_price_tolerance(&self) -> u8 {\n        match self.pricing_mode() {\n            PricingMode::PaymentLimited {\n                gas_price_tolerance,\n                ..\n            } => *gas_price_tolerance,\n            PricingMode::Fixed {\n                gas_price_tolerance,\n                ..\n            } => *gas_price_tolerance,\n            PricingMode::Prepaid { .. } => {\n                // TODO: Change this when reserve gets implemented.\n                0u8\n            }\n        }\n    }\n}\n\nimpl ToBytes for TransactionV1 {\n    fn to_bytes(&self) -> Result<Vec<u8>, crate::bytesrepr::Error> {\n        let expected_payload_sizes = self.serialized_field_lengths();\n        CalltableSerializationEnvelopeBuilder::new(expected_payload_sizes)?\n            .add_field(HASH_FIELD_INDEX, &self.hash)?\n            .add_field(PAYLOAD_FIELD_INDEX, &self.payload)?\n            .add_field(APPROVALS_FIELD_INDEX, &self.approvals)?\n            .binary_payload_bytes()\n    }\n\n    fn serialized_length(&self) -> usize {\n        CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths())\n    }\n}\n\nimpl FromBytes for TransactionV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(3, bytes)?;\n        let window = binary_payload.start_consuming()?.ok_or(Error::Formatting)?;\n        window.verify_index(HASH_FIELD_INDEX)?;\n        let (hash, window) = window.deserialize_and_maybe_next::<TransactionV1Hash>()?;\n        let window = window.ok_or(Error::Formatting)?;\n        window.verify_index(PAYLOAD_FIELD_INDEX)?;\n        let (payload, window) = window.deserialize_and_maybe_next::<TransactionV1Payload>()?;\n        let window = window.ok_or(Error::Formatting)?;\n        window.verify_index(APPROVALS_FIELD_INDEX)?;\n        let (approvals, window) = window.deserialize_and_maybe_next::<BTreeSet<Approval>>()?;\n        if window.is_some() {\n            return Err(Error::Formatting);\n        }\n        let from_bytes = TransactionV1 {\n            hash,\n            payload,\n            approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n            is_verified: OnceCell::new(),\n        };\n        Ok((from_bytes, remainder))\n    }\n}\n\nimpl Display for TransactionV1 {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        write!(\n            formatter,\n            \"transaction-v1[{}, {}, approvals: {}]\",\n            self.hash,\n            self.payload,\n            DisplayIter::new(self.approvals.iter())\n        )\n    }\n}\n\nimpl hash::Hash for TransactionV1 {\n    fn hash<H: hash::Hasher>(&self, state: &mut H) {\n        // Destructure to make sure we don't accidentally omit fields.\n        let TransactionV1 {\n            hash,\n            payload,\n            approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: _,\n        } = self;\n        hash.hash(state);\n        payload.hash(state);\n        approvals.hash(state);\n    }\n}\n\nimpl PartialEq for TransactionV1 {\n    fn eq(&self, other: &TransactionV1) -> bool {\n        // Destructure to make sure we don't accidentally omit fields.\n        let TransactionV1 {\n            hash,\n            payload,\n            approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: _,\n        } = self;\n        *hash == other.hash && *payload == other.payload && *approvals == other.approvals\n    }\n}\n\nimpl Ord for TransactionV1 {\n    fn cmp(&self, other: &TransactionV1) -> cmp::Ordering {\n        // Destructure to make sure we don't accidentally omit fields.\n        let TransactionV1 {\n            hash,\n            payload,\n            approvals,\n            #[cfg(any(feature = \"once_cell\", test))]\n                is_verified: _,\n        } = self;\n        hash.cmp(&other.hash)\n            .then_with(|| payload.cmp(&other.payload))\n            .then_with(|| approvals.cmp(&other.approvals))\n    }\n}\n\nimpl PartialOrd for TransactionV1 {\n    fn partial_cmp(&self, other: &TransactionV1) -> Option<cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\n/// Calculates the laned based on properties of the transaction\npub fn calculate_transaction_lane(\n    entry_point: &TransactionEntryPoint,\n    target: &TransactionTarget,\n    pricing_mode: &PricingMode,\n    config: &TransactionV1Config,\n    size_estimation: u64,\n    runtime_args_size: u64,\n) -> Result<u8, InvalidTransactionV1> {\n    use crate::TransactionRuntimeParams;\n\n    use super::get_lane_for_non_install_wasm;\n\n    match target {\n        TransactionTarget::Native => match entry_point {\n            TransactionEntryPoint::Transfer | TransactionEntryPoint::Burn => Ok(MINT_LANE_ID),\n            TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => Ok(AUCTION_LANE_ID),\n            TransactionEntryPoint::Call => Err(InvalidTransactionV1::EntryPointCannotBeCall),\n            TransactionEntryPoint::Custom(_) => {\n                Err(InvalidTransactionV1::EntryPointCannotBeCustom {\n                    entry_point: entry_point.clone(),\n                })\n            }\n        },\n        TransactionTarget::Stored { .. } => match entry_point {\n            TransactionEntryPoint::Custom(_) => get_lane_for_non_install_wasm(\n                config,\n                pricing_mode,\n                size_estimation,\n                runtime_args_size,\n            )\n            .map_err(Into::into),\n            TransactionEntryPoint::Call\n            | TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn\n            | TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => {\n                Err(InvalidTransactionV1::EntryPointMustBeCustom {\n                    entry_point: entry_point.clone(),\n                })\n            }\n        },\n        TransactionTarget::Session {\n            is_install_upgrade,\n            runtime: TransactionRuntimeParams::VmCasperV1,\n            ..\n        } => match entry_point {\n            TransactionEntryPoint::Call => {\n                if *is_install_upgrade {\n                    Ok(INSTALL_UPGRADE_LANE_ID)\n                } else {\n                    get_lane_for_non_install_wasm(\n                        config,\n                        pricing_mode,\n                        size_estimation,\n                        runtime_args_size,\n                    )\n                    .map_err(Into::into)\n                }\n            }\n            TransactionEntryPoint::Custom(_)\n            | TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn\n            | TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => {\n                Err(InvalidTransactionV1::EntryPointMustBeCall {\n                    entry_point: entry_point.clone(),\n                })\n            }\n        },\n        TransactionTarget::Session {\n            is_install_upgrade,\n            runtime: TransactionRuntimeParams::VmCasperV2 { .. },\n            ..\n        } => match entry_point {\n            TransactionEntryPoint::Call | TransactionEntryPoint::Custom(_) => {\n                if *is_install_upgrade {\n                    Ok(INSTALL_UPGRADE_LANE_ID)\n                } else {\n                    get_lane_for_non_install_wasm(\n                        config,\n                        pricing_mode,\n                        size_estimation,\n                        runtime_args_size,\n                    )\n                    .map_err(Into::into)\n                }\n            }\n            TransactionEntryPoint::Transfer\n            | TransactionEntryPoint::Burn\n            | TransactionEntryPoint::AddBid\n            | TransactionEntryPoint::WithdrawBid\n            | TransactionEntryPoint::Delegate\n            | TransactionEntryPoint::Undelegate\n            | TransactionEntryPoint::Redelegate\n            | TransactionEntryPoint::ActivateBid\n            | TransactionEntryPoint::ChangeBidPublicKey\n            | TransactionEntryPoint::AddReservations\n            | TransactionEntryPoint::CancelReservations => {\n                Err(InvalidTransactionV1::EntryPointMustBeCall {\n                    entry_point: entry_point.clone(),\n                })\n            }\n        },\n    }\n}\n"
  },
  {
    "path": "types/src/transaction/transfer_target.rs",
    "content": "#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{account::AccountHash, PublicKey, URef};\n\n/// The various types which can be used as the `target` runtime argument of a native transfer.\n#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]\npub enum TransferTarget {\n    /// A public key.\n    PublicKey(PublicKey),\n    /// An account hash.\n    AccountHash(AccountHash),\n    /// A URef.\n    URef(URef),\n}\n\nimpl TransferTarget {\n    /// Returns a random `TransferTarget`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        match rng.gen_range(0..3) {\n            0 => TransferTarget::PublicKey(PublicKey::random(rng)),\n            1 => TransferTarget::AccountHash(rng.gen()),\n            2 => TransferTarget::URef(rng.gen()),\n            _ => unreachable!(),\n        }\n    }\n}\n\nimpl From<PublicKey> for TransferTarget {\n    fn from(public_key: PublicKey) -> Self {\n        Self::PublicKey(public_key)\n    }\n}\n\nimpl From<AccountHash> for TransferTarget {\n    fn from(account_hash: AccountHash) -> Self {\n        Self::AccountHash(account_hash)\n    }\n}\n\nimpl From<URef> for TransferTarget {\n    fn from(uref: URef) -> Self {\n        Self::URef(uref)\n    }\n}\n"
  },
  {
    "path": "types/src/transaction.rs",
    "content": "mod addressable_entity_identifier;\nmod approval;\nmod approvals_hash;\nmod deploy;\nmod error;\nmod execution_info;\nmod initiator_addr;\n#[cfg(any(feature = \"std\", test, feature = \"testing\"))]\nmod initiator_addr_and_secret_key;\nmod package_identifier;\nmod pricing_mode;\nmod runtime_args;\nmod serialization;\nmod transaction_entry_point;\nmod transaction_hash;\nmod transaction_id;\nmod transaction_invocation_target;\nmod transaction_scheduling;\nmod transaction_target;\nmod transaction_v1;\nmod transfer_target;\n\n#[cfg(feature = \"json-schema\")]\nuse crate::URef;\nuse alloc::{\n    collections::BTreeSet,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::fmt::{self, Debug, Display, Formatter};\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\n#[cfg(any(feature = \"std\", test))]\nuse serde::{de, ser, Deserializer, Serializer};\n#[cfg(any(feature = \"std\", test))]\nuse serde::{Deserialize, Serialize};\n#[cfg(any(feature = \"std\", test))]\nuse serde_bytes::ByteBuf;\n#[cfg(any(feature = \"std\", test))]\nuse std::hash::Hash;\n#[cfg(any(feature = \"std\", test))]\nuse thiserror::Error;\nuse tracing::error;\n#[cfg(any(feature = \"std\", test))]\npub use transaction_v1::calculate_transaction_lane;\n#[cfg(any(feature = \"std\", test))]\nuse transaction_v1::TransactionV1Json;\n\n#[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\nuse crate::testing::TestRng;\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    Digest, Phase, SecretKey, TimeDiff, Timestamp,\n};\n#[cfg(any(feature = \"std\", test))]\nuse crate::{Chainspec, Gas, Motes, TransactionV1Config};\npub use addressable_entity_identifier::AddressableEntityIdentifier;\npub use approval::Approval;\npub use approvals_hash::ApprovalsHash;\n#[cfg(any(feature = \"std\", test))]\npub use deploy::calculate_lane_id_for_deploy;\npub use deploy::{\n    Deploy, DeployDecodeFromJsonError, DeployError, DeployExcessiveSizeError, DeployHash,\n    DeployHeader, DeployId, ExecutableDeployItem, ExecutableDeployItemIdentifier, InvalidDeploy,\n};\npub use error::InvalidTransaction;\npub use execution_info::ExecutionInfo;\npub use initiator_addr::InitiatorAddr;\n#[cfg(any(feature = \"std\", feature = \"testing\", test))]\npub(crate) use initiator_addr_and_secret_key::InitiatorAddrAndSecretKey;\npub use package_identifier::PackageIdentifier;\npub use pricing_mode::{PricingMode, PricingModeError};\npub use runtime_args::{NamedArg, RuntimeArgs};\npub use transaction_entry_point::TransactionEntryPoint;\npub use transaction_hash::TransactionHash;\npub use transaction_id::TransactionId;\npub use transaction_invocation_target::TransactionInvocationTarget;\npub use transaction_scheduling::TransactionScheduling;\npub use transaction_target::{TransactionRuntimeParams, TransactionTarget};\n#[cfg(feature = \"json-schema\")]\npub(crate) use transaction_v1::arg_handling;\n#[cfg(any(feature = \"std\", feature = \"testing\", feature = \"gens\", test))]\npub(crate) use transaction_v1::fields_container::FieldsContainer;\npub use transaction_v1::{\n    InvalidTransactionV1, TransactionArgs, TransactionV1, TransactionV1DecodeFromJsonError,\n    TransactionV1Error, TransactionV1ExcessiveSizeError, TransactionV1Hash, TransactionV1Payload,\n};\npub use transfer_target::TransferTarget;\n\nconst DEPLOY_TAG: u8 = 0;\nconst V1_TAG: u8 = 1;\n\n#[cfg(feature = \"json-schema\")]\npub(super) static TRANSACTION: Lazy<Transaction> = Lazy::new(|| {\n    let secret_key = SecretKey::example();\n    let source = URef::from_formatted_str(\n        \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007\",\n    )\n    .unwrap();\n    let target = URef::from_formatted_str(\n        \"uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000\",\n    )\n    .unwrap();\n    let id = Some(999);\n    let amount = 30_000_000_000_u64;\n    let args = arg_handling::new_transfer_args(amount, Some(source), target, id).unwrap();\n    let container = FieldsContainer::new(\n        TransactionArgs::Named(args),\n        TransactionTarget::Native,\n        TransactionEntryPoint::Transfer,\n        TransactionScheduling::Standard,\n    );\n    let pricing_mode = PricingMode::Fixed {\n        gas_price_tolerance: 5,\n        additional_computation_factor: 0,\n    };\n    let initiator_addr_and_secret_key = InitiatorAddrAndSecretKey::SecretKey(secret_key);\n    let v1_txn = TransactionV1::build(\n        \"casper-example\".to_string(),\n        *Timestamp::example(),\n        TimeDiff::from_seconds(3_600),\n        pricing_mode,\n        container.to_map().unwrap(),\n        initiator_addr_and_secret_key,\n    );\n    Transaction::V1(v1_txn)\n});\n\n/// A versioned wrapper for a transaction or deploy.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum Transaction {\n    /// A deploy.\n    Deploy(Deploy),\n    /// A version 1 transaction.\n    #[cfg_attr(\n        feature = \"json-schema\",\n        serde(rename = \"Version1\"),\n        schemars(with = \"TransactionV1Json\")\n    )]\n    V1(TransactionV1),\n}\n\nimpl Transaction {\n    // Deploy variant ctor\n    pub fn from_deploy(deploy: Deploy) -> Self {\n        Transaction::Deploy(deploy)\n    }\n\n    // V1 variant ctor\n    pub fn from_v1(v1: TransactionV1) -> Self {\n        Transaction::V1(v1)\n    }\n\n    /// Returns the `TransactionHash` identifying this transaction.\n    pub fn hash(&self) -> TransactionHash {\n        match self {\n            Transaction::Deploy(deploy) => TransactionHash::from(*deploy.hash()),\n            Transaction::V1(txn) => TransactionHash::from(*txn.hash()),\n        }\n    }\n\n    /// Size estimate.\n    pub fn size_estimate(&self) -> usize {\n        match self {\n            Transaction::Deploy(deploy) => deploy.serialized_length(),\n            Transaction::V1(v1) => v1.serialized_length(),\n        }\n    }\n\n    /// Timestamp.\n    pub fn timestamp(&self) -> Timestamp {\n        match self {\n            Transaction::Deploy(deploy) => deploy.header().timestamp(),\n            Transaction::V1(v1) => v1.payload().timestamp(),\n        }\n    }\n\n    /// Time to live.\n    pub fn ttl(&self) -> TimeDiff {\n        match self {\n            Transaction::Deploy(deploy) => deploy.header().ttl(),\n            Transaction::V1(v1) => v1.payload().ttl(),\n        }\n    }\n\n    /// Returns `Ok` if the given transaction is valid. Verification procedure is delegated to the\n    /// implementation of the particular variant of the transaction.\n    pub fn verify(&self) -> Result<(), InvalidTransaction> {\n        match self {\n            Transaction::Deploy(deploy) => deploy.is_valid().map_err(Into::into),\n            Transaction::V1(v1) => v1.verify().map_err(Into::into),\n        }\n    }\n\n    /// Adds a signature of this transaction's hash to its approvals.\n    pub fn sign(&mut self, secret_key: &SecretKey) {\n        match self {\n            Transaction::Deploy(deploy) => deploy.sign(secret_key),\n            Transaction::V1(v1) => v1.sign(secret_key),\n        }\n    }\n\n    /// Returns the `Approval`s for this transaction.\n    pub fn approvals(&self) -> BTreeSet<Approval> {\n        match self {\n            Transaction::Deploy(deploy) => deploy.approvals().clone(),\n            Transaction::V1(v1) => v1.approvals().clone(),\n        }\n    }\n\n    /// Returns the computed approvals hash identifying this transaction's approvals.\n    pub fn compute_approvals_hash(&self) -> Result<ApprovalsHash, bytesrepr::Error> {\n        let approvals_hash = match self {\n            Transaction::Deploy(deploy) => deploy.compute_approvals_hash()?,\n            Transaction::V1(txn) => txn.compute_approvals_hash()?,\n        };\n        Ok(approvals_hash)\n    }\n\n    /// Returns the chain name for the transaction, whether it's a `Deploy` or `V1` transaction.\n    pub fn chain_name(&self) -> String {\n        match self {\n            Transaction::Deploy(txn) => txn.chain_name().to_string(),\n            Transaction::V1(txn) => txn.chain_name().to_string(),\n        }\n    }\n\n    /// Checks if the transaction is a standard payment.\n    ///\n    /// For `Deploy` transactions, it checks if the session is a standard payment\n    /// in the payment phase. For `V1` transactions, it returns the value of\n    /// `standard_payment` if the pricing mode is `PaymentLimited`, otherwise it returns `true`.\n    pub fn is_standard_payment(&self) -> bool {\n        match self {\n            Transaction::Deploy(txn) => txn.session().is_standard_payment(Phase::Payment),\n            Transaction::V1(txn) => match txn.pricing_mode() {\n                PricingMode::PaymentLimited {\n                    standard_payment, ..\n                } => *standard_payment,\n                _ => true,\n            },\n        }\n    }\n\n    /// Returns the computed `TransactionId` uniquely identifying this transaction and its\n    /// approvals.\n    pub fn compute_id(&self) -> TransactionId {\n        match self {\n            Transaction::Deploy(deploy) => {\n                let deploy_hash = *deploy.hash();\n                let approvals_hash = deploy.compute_approvals_hash().unwrap_or_else(|error| {\n                    error!(%error, \"failed to serialize deploy approvals\");\n                    ApprovalsHash::from(Digest::default())\n                });\n                TransactionId::new(TransactionHash::Deploy(deploy_hash), approvals_hash)\n            }\n            Transaction::V1(txn) => {\n                let txn_hash = *txn.hash();\n                let approvals_hash = txn.compute_approvals_hash().unwrap_or_else(|error| {\n                    error!(%error, \"failed to serialize transaction approvals\");\n                    ApprovalsHash::from(Digest::default())\n                });\n                TransactionId::new(TransactionHash::V1(txn_hash), approvals_hash)\n            }\n        }\n    }\n\n    /// Returns the address of the initiator of the transaction.\n    pub fn initiator_addr(&self) -> InitiatorAddr {\n        match self {\n            Transaction::Deploy(deploy) => InitiatorAddr::PublicKey(deploy.account().clone()),\n            Transaction::V1(txn) => txn.initiator_addr().clone(),\n        }\n    }\n\n    /// Returns `true` if the transaction has expired.\n    pub fn expired(&self, current_instant: Timestamp) -> bool {\n        match self {\n            Transaction::Deploy(deploy) => deploy.expired(current_instant),\n            Transaction::V1(txn) => txn.expired(current_instant),\n        }\n    }\n\n    /// Returns the timestamp of when the transaction expires, i.e. `self.timestamp + self.ttl`.\n    pub fn expires(&self) -> Timestamp {\n        match self {\n            Transaction::Deploy(deploy) => deploy.header().expires(),\n            Transaction::V1(txn) => txn.payload().expires(),\n        }\n    }\n\n    /// Returns the set of account hashes corresponding to the public keys of the approvals.\n    pub fn signers(&self) -> BTreeSet<AccountHash> {\n        match self {\n            Transaction::Deploy(deploy) => deploy\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n            Transaction::V1(txn) => txn\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    //\n    // It is required to allow finalized approvals to be injected after reading a `Deploy` from\n    // storage.\n    #[doc(hidden)]\n    pub fn with_approvals(self, approvals: BTreeSet<Approval>) -> Self {\n        match self {\n            Transaction::Deploy(deploy) => Transaction::Deploy(deploy.with_approvals(approvals)),\n            Transaction::V1(transaction_v1) => {\n                Transaction::V1(transaction_v1.with_approvals(approvals))\n            }\n        }\n    }\n\n    /// Get [`TransactionV1`]\n    pub fn as_transaction_v1(&self) -> Option<&TransactionV1> {\n        match self {\n            Transaction::Deploy(_) => None,\n            Transaction::V1(v1) => Some(v1),\n        }\n    }\n\n    /// Authorization keys.\n    pub fn authorization_keys(&self) -> BTreeSet<AccountHash> {\n        match self {\n            Transaction::Deploy(deploy) => deploy\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n            Transaction::V1(transaction_v1) => transaction_v1\n                .approvals()\n                .iter()\n                .map(|approval| approval.signer().to_account_hash())\n                .collect(),\n        }\n    }\n\n    /// Is the transaction the legacy deploy variant.\n    pub fn is_legacy_transaction(&self) -> bool {\n        match self {\n            Transaction::Deploy(_) => true,\n            Transaction::V1(_) => false,\n        }\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    /// Calcualates the gas limit for the transaction.\n    pub fn gas_limit(&self, chainspec: &Chainspec, lane_id: u8) -> Result<Gas, InvalidTransaction> {\n        match self {\n            Transaction::Deploy(deploy) => {\n                match deploy\n                    .gas_limit(chainspec)\n                    .map_err(InvalidTransaction::from)\n                {\n                    Ok(gas) => {\n                        if gas.value() == crate::U512::zero() {\n                            Err(InvalidTransaction::Deploy(\n                                InvalidDeploy::InvalidPaymentAmount,\n                            ))\n                        } else {\n                            Ok(gas)\n                        }\n                    }\n                    Err(err) => Err(err),\n                }\n            }\n            Transaction::V1(v1) => {\n                if let Ok(TransactionTarget::Native) = v1.get_transaction_target() {\n                    // retro-compatibility for incentivized native transfer cost\n                    if let Ok(TransactionEntryPoint::Transfer) = v1.get_transaction_entry_point() {\n                        let gas = Gas::new(chainspec.system_costs_config.mint_costs().transfer);\n                        return Ok(gas);\n                    };\n                }\n\n                let pricing_mode = v1.pricing_mode();\n                match pricing_mode\n                    .gas_limit(chainspec, lane_id)\n                    .map_err(InvalidTransaction::from)\n                {\n                    Ok(gas) => {\n                        // the transaction acceptor enforces this on an actual network,\n                        // rejecting 0 payment txn's right away.\n                        // however, direct tests don't engage the acceptor.\n                        // so, also checking here so those tests are consistent\n                        // and also defense in depth\n                        if gas.value() == crate::U512::zero() {\n                            Err(InvalidTransaction::V1(\n                                InvalidTransactionV1::InvalidPaymentAmount,\n                            ))\n                        } else {\n                            Ok(gas)\n                        }\n                    }\n                    Err(err) => Err(err),\n                }\n            }\n        }\n    }\n\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    /// Returns a gas cost based upon the gas_limit, the gas price,\n    /// and the chainspec settings.\n    pub fn gas_cost(\n        &self,\n        chainspec: &Chainspec,\n        lane_id: u8,\n        gas_price: u8,\n    ) -> Result<Motes, InvalidTransaction> {\n        match self {\n            Transaction::Deploy(deploy) => deploy\n                .gas_cost(chainspec, gas_price)\n                .map_err(InvalidTransaction::from),\n            Transaction::V1(v1) => {\n                if let Ok(TransactionTarget::Native) = v1.get_transaction_target() {\n                    // retro-compatibility for incentivized native transfer cost\n                    if let Ok(TransactionEntryPoint::Transfer) = v1.get_transaction_entry_point() {\n                        return Ok(Motes::new(\n                            chainspec.system_costs_config.mint_costs().transfer,\n                        ));\n                    };\n                }\n                let pricing_mode = v1.pricing_mode();\n                pricing_mode\n                    .gas_cost(chainspec, lane_id, gas_price)\n                    .map_err(InvalidTransaction::from)\n            }\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &TRANSACTION\n    }\n\n    /// Returns a random, valid but possibly expired transaction.\n    #[cfg(any(all(feature = \"std\", feature = \"testing\"), test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        if rng.gen() {\n            Transaction::Deploy(Deploy::random_valid_native_transfer(rng))\n        } else {\n            Transaction::V1(TransactionV1::random(rng))\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl Serialize for Transaction {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            TransactionJson::try_from(self.clone())\n                .map_err(|error| ser::Error::custom(format!(\"{:?}\", error)))?\n                .serialize(serializer)\n        } else {\n            let bytes = self\n                .to_bytes()\n                .map_err(|error| ser::Error::custom(format!(\"{:?}\", error)))?;\n            ByteBuf::from(bytes).serialize(serializer)\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl<'de> Deserialize<'de> for Transaction {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let json_helper = TransactionJson::deserialize(deserializer)?;\n            Transaction::try_from(json_helper)\n                .map_err(|error| de::Error::custom(format!(\"{:?}\", error)))\n        } else {\n            let bytes = ByteBuf::deserialize(deserializer)?.into_vec();\n            bytesrepr::deserialize::<Transaction>(bytes)\n                .map_err(|error| de::Error::custom(format!(\"{:?}\", error)))\n        }\n    }\n}\n\n/// A util structure to json-serialize a transaction.\n#[cfg(any(feature = \"std\", test))]\n#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\nenum TransactionJson {\n    /// A deploy.\n    Deploy(Deploy),\n    /// A version 1 transaction.\n    #[serde(rename = \"Version1\")]\n    V1(TransactionV1Json),\n}\n\n#[cfg(any(feature = \"std\", test))]\n#[derive(Error, Debug)]\nenum TransactionJsonError {\n    #[error(\"{0}\")]\n    FailedToMap(String),\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<TransactionJson> for Transaction {\n    type Error = TransactionJsonError;\n    fn try_from(transaction: TransactionJson) -> Result<Self, Self::Error> {\n        match transaction {\n            TransactionJson::Deploy(deploy) => Ok(Transaction::Deploy(deploy)),\n            TransactionJson::V1(v1) => {\n                TransactionV1::try_from(v1)\n                    .map(Transaction::V1)\n                    .map_err(|error| {\n                        TransactionJsonError::FailedToMap(format!(\n                            \"Failed to map TransactionJson::V1 to Transaction::V1, err: {}\",\n                            error\n                        ))\n                    })\n            }\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl TryFrom<Transaction> for TransactionJson {\n    type Error = TransactionJsonError;\n    fn try_from(transaction: Transaction) -> Result<Self, Self::Error> {\n        match transaction {\n            Transaction::Deploy(deploy) => Ok(TransactionJson::Deploy(deploy)),\n            Transaction::V1(v1) => TransactionV1Json::try_from(v1)\n                .map(TransactionJson::V1)\n                .map_err(|error| {\n                    TransactionJsonError::FailedToMap(format!(\n                        \"Failed to map Transaction::V1 to TransactionJson::V1, err: {}\",\n                        error\n                    ))\n                }),\n        }\n    }\n}\n/// Calculates gas limit.\n#[cfg(any(feature = \"std\", test))]\npub trait GasLimited {\n    /// The error type.\n    type Error;\n\n    /// The minimum allowed gas price (aka the floor).\n    const GAS_PRICE_FLOOR: u8 = 1;\n\n    /// Returns a gas cost based upon the gas_limit, the gas price,\n    /// and the chainspec settings.\n    fn gas_cost(&self, chainspec: &Chainspec, gas_price: u8) -> Result<Motes, Self::Error>;\n\n    /// Returns the gas / computation limit prior to execution.\n    fn gas_limit(&self, chainspec: &Chainspec) -> Result<Gas, Self::Error>;\n\n    /// Returns the gas price tolerance.\n    fn gas_price_tolerance(&self) -> Result<u8, Self::Error>;\n}\n\nimpl From<Deploy> for Transaction {\n    fn from(deploy: Deploy) -> Self {\n        Self::Deploy(deploy)\n    }\n}\n\nimpl From<TransactionV1> for Transaction {\n    fn from(txn: TransactionV1) -> Self {\n        Self::V1(txn)\n    }\n}\n\nimpl ToBytes for Transaction {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Transaction::Deploy(deploy) => deploy.serialized_length(),\n                Transaction::V1(txn) => txn.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            Transaction::Deploy(deploy) => {\n                DEPLOY_TAG.write_bytes(writer)?;\n                deploy.write_bytes(writer)\n            }\n            Transaction::V1(txn) => {\n                V1_TAG.write_bytes(writer)?;\n                txn.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for Transaction {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            DEPLOY_TAG => {\n                let (deploy, remainder) = Deploy::from_bytes(remainder)?;\n                Ok((Transaction::Deploy(deploy), remainder))\n            }\n            V1_TAG => {\n                let (txn, remainder) = TransactionV1::from_bytes(remainder)?;\n                Ok((Transaction::V1(txn), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\nimpl Display for Transaction {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            Transaction::Deploy(deploy) => Display::fmt(deploy, formatter),\n            Transaction::V1(txn) => Display::fmt(txn, formatter),\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\npub(crate) enum GetLaneError {\n    NoLaneMatch,\n    PricingModeNotSupported,\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl From<GetLaneError> for InvalidTransactionV1 {\n    fn from(value: GetLaneError) -> Self {\n        match value {\n            GetLaneError::NoLaneMatch => InvalidTransactionV1::NoLaneMatch,\n            GetLaneError::PricingModeNotSupported => InvalidTransactionV1::PricingModeNotSupported,\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\nimpl From<GetLaneError> for InvalidDeploy {\n    fn from(value: GetLaneError) -> Self {\n        match value {\n            GetLaneError::NoLaneMatch => InvalidDeploy::NoLaneMatch,\n            GetLaneError::PricingModeNotSupported => InvalidDeploy::PricingModeNotSupported,\n        }\n    }\n}\n\n#[cfg(any(feature = \"std\", test))]\npub(crate) fn get_lane_for_non_install_wasm(\n    config: &TransactionV1Config,\n    pricing_mode: &PricingMode,\n    transaction_size: u64,\n    runtime_args_size: u64,\n) -> Result<u8, GetLaneError> {\n    match pricing_mode {\n        PricingMode::PaymentLimited { payment_amount, .. } => config\n            .get_wasm_lane_id_by_payment_limited(\n                *payment_amount,\n                transaction_size,\n                runtime_args_size,\n            )\n            .ok_or(GetLaneError::NoLaneMatch),\n        PricingMode::Fixed {\n            additional_computation_factor,\n            ..\n        } => config\n            .get_wasm_lane_id_by_size(\n                transaction_size,\n                *additional_computation_factor,\n                runtime_args_size,\n            )\n            .ok_or(GetLaneError::NoLaneMatch),\n        PricingMode::Prepaid { .. } => Err(GetLaneError::PricingModeNotSupported),\n    }\n}\n\n/// Proptest generators for [`Transaction`].\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use super::*;\n    use proptest::{\n        array,\n        prelude::{Arbitrary, Strategy},\n    };\n\n    /// Generates a random `DeployHash` for testing purposes.\n    ///\n    /// This function is used to generate random `DeployHash` values for testing purposes.\n    /// It produces a proptest `Strategy` that can be used to generate arbitrary `DeployHash`\n    /// values.\n    pub fn deploy_hash_arb() -> impl Strategy<Value = DeployHash> {\n        array::uniform32(<u8>::arbitrary()).prop_map(DeployHash::from_raw)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn json_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let transaction = Transaction::from(Deploy::random(rng));\n        let json_string = serde_json::to_string_pretty(&transaction).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(transaction, decoded);\n\n        let transaction = Transaction::from(TransactionV1::random(rng));\n        let json_string = serde_json::to_string_pretty(&transaction).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(transaction, decoded);\n    }\n\n    #[test]\n    fn bincode_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let transaction = Transaction::from(Deploy::random(rng));\n        let serialized = bincode::serialize(&transaction).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(transaction, deserialized);\n\n        let transaction = Transaction::from(TransactionV1::random(rng));\n        let serialized = bincode::serialize(&transaction).unwrap();\n        let deserialized = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(transaction, deserialized);\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let transaction = Transaction::from(Deploy::random(rng));\n        bytesrepr::test_serialization_roundtrip(&transaction);\n\n        let transaction = Transaction::from(TransactionV1::random(rng));\n        bytesrepr::test_serialization_roundtrip(&transaction);\n    }\n}\n\n#[cfg(test)]\nmod proptests {\n    use super::*;\n    use crate::{\n        bytesrepr,\n        gens::{legal_transaction_arb, transaction_arb},\n    };\n    use proptest::prelude::*;\n\n    proptest! {\n        #[test]\n        fn bytesrepr_roundtrip(transaction in transaction_arb()) {\n            bytesrepr::test_serialization_roundtrip(&transaction);\n        }\n\n        #[test]\n        fn json_roundtrip(transaction in legal_transaction_arb()) {\n            let json_string = serde_json::to_string_pretty(&transaction).unwrap();\n            let decoded = serde_json::from_str::<Transaction>(&json_string).unwrap();\n            assert_eq!(transaction, decoded);\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transfer/error.rs",
    "content": "use core::{\n    array::TryFromSliceError,\n    fmt::{self, Debug, Display, Formatter},\n};\n#[cfg(feature = \"std\")]\nuse std::error::Error as StdError;\n\n/// Error returned when decoding a `TransferAddr` from a formatted string.\n#[derive(Debug, Clone)]\n#[non_exhaustive]\npub enum TransferFromStrError {\n    /// The prefix is invalid.\n    InvalidPrefix,\n    /// The address is not valid hex.\n    Hex(base16::DecodeError),\n    /// The slice is the wrong length.\n    Length(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for TransferFromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        TransferFromStrError::Hex(error)\n    }\n}\n\nimpl From<TryFromSliceError> for TransferFromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        TransferFromStrError::Length(error)\n    }\n}\n\nimpl Display for TransferFromStrError {\n    fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {\n        match self {\n            TransferFromStrError::InvalidPrefix => {\n                write!(formatter, \"transfer addr prefix is invalid\",)\n            }\n            TransferFromStrError::Hex(error) => {\n                write!(\n                    formatter,\n                    \"failed to decode address portion of transfer addr from hex: {}\",\n                    error\n                )\n            }\n            TransferFromStrError::Length(error) => write!(\n                formatter,\n                \"address portion of transfer addr is wrong length: {}\",\n                error\n            ),\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl StdError for TransferFromStrError {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        match self {\n            TransferFromStrError::InvalidPrefix => None,\n            TransferFromStrError::Hex(error) => Some(error),\n            TransferFromStrError::Length(error) => Some(error),\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/transfer/transfer_v1/transfer_v1_addr.rs",
    "content": "use alloc::{format, string::String, vec::Vec};\nuse core::{\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse super::super::TransferFromStrError;\npub(super) const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = \"transfer-\";\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes},\n    checksummed_hex, CLType, CLTyped,\n};\n\n/// The length of a version 1 transfer address.\npub const TRANSFER_ADDR_LENGTH: usize = 32;\n\n/// A newtype wrapping a <code>[u8; [TRANSFER_ADDR_LENGTH]]</code> which is the raw bytes of the\n/// transfer address.\n#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(\n    feature = \"json-schema\",\n    derive(JsonSchema),\n    schemars(description = \"Hex-encoded version 1 transfer address.\")\n)]\npub struct TransferAddr(\n    #[cfg_attr(feature = \"json-schema\", schemars(skip, with = \"String\"))]\n    [u8; TRANSFER_ADDR_LENGTH],\n);\n\nimpl TransferAddr {\n    /// Constructs a new `TransferV1Addr` instance from the raw bytes.\n    pub const fn new(value: [u8; TRANSFER_ADDR_LENGTH]) -> TransferAddr {\n        TransferAddr(value)\n    }\n\n    /// Returns the raw bytes of the transfer address as an array.\n    pub fn value(&self) -> [u8; TRANSFER_ADDR_LENGTH] {\n        self.0\n    }\n\n    /// Returns the raw bytes of the transfer address as a `slice`.\n    pub fn as_bytes(&self) -> &[u8] {\n        &self.0\n    }\n\n    /// Formats the `TransferV1Addr` as a prefixed, hex-encoded string.\n    pub fn to_formatted_string(self) -> String {\n        format!(\n            \"{}{}\",\n            TRANSFER_ADDR_FORMATTED_STRING_PREFIX,\n            base16::encode_lower(&self.0),\n        )\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a `TransferV1Addr`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, TransferFromStrError> {\n        let remainder = input\n            .strip_prefix(TRANSFER_ADDR_FORMATTED_STRING_PREFIX)\n            .ok_or(TransferFromStrError::InvalidPrefix)?;\n        let bytes =\n            <[u8; TRANSFER_ADDR_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?;\n        Ok(TransferAddr(bytes))\n    }\n\n    /// Returns a random `TransferV1Addr`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        TransferAddr(rng.gen())\n    }\n}\n\nimpl Serialize for TransferAddr {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            self.0.serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for TransferAddr {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            TransferAddr::from_formatted_str(&formatted_string).map_err(SerdeError::custom)\n        } else {\n            let bytes = <[u8; TRANSFER_ADDR_LENGTH]>::deserialize(deserializer)?;\n            Ok(TransferAddr(bytes))\n        }\n    }\n}\n\nimpl Display for TransferAddr {\n    fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {\n        write!(formatter, \"{}\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl Debug for TransferAddr {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"TransferV1Addr({})\", base16::encode_lower(&self.0))\n    }\n}\n\nimpl CLTyped for TransferAddr {\n    fn cl_type() -> CLType {\n        CLType::ByteArray(TRANSFER_ADDR_LENGTH as u32)\n    }\n}\n\nimpl ToBytes for TransferAddr {\n    #[inline(always)]\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        self.0.to_bytes()\n    }\n\n    #[inline(always)]\n    fn serialized_length(&self) -> usize {\n        self.0.serialized_length()\n    }\n\n    #[inline(always)]\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.0.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for TransferAddr {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (bytes, remainder) = <[u8; TRANSFER_ADDR_LENGTH]>::from_bytes(bytes)?;\n        Ok((TransferAddr(bytes), remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bytesrepr, testing::TestRng};\n\n    use super::*;\n\n    #[test]\n    fn transfer_addr_from_str() {\n        let transfer_address = TransferAddr([4; 32]);\n        let encoded = transfer_address.to_formatted_string();\n        let decoded = TransferAddr::from_formatted_str(&encoded).unwrap();\n        assert_eq!(transfer_address, decoded);\n\n        let invalid_prefix =\n            \"transferv-0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            TransferAddr::from_formatted_str(invalid_prefix),\n            Err(TransferFromStrError::InvalidPrefix)\n        ));\n\n        let invalid_prefix =\n            \"transfer0000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            TransferAddr::from_formatted_str(invalid_prefix),\n            Err(TransferFromStrError::InvalidPrefix)\n        ));\n\n        let short_addr = \"transfer-00000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            TransferAddr::from_formatted_str(short_addr),\n            Err(TransferFromStrError::Length(_))\n        ));\n\n        let long_addr =\n            \"transfer-000000000000000000000000000000000000000000000000000000000000000000\";\n        assert!(matches!(\n            TransferAddr::from_formatted_str(long_addr),\n            Err(TransferFromStrError::Length(_))\n        ));\n\n        let invalid_hex =\n            \"transfer-000000000000000000000000000000000000000000000000000000000000000g\";\n        assert!(matches!(\n            TransferAddr::from_formatted_str(invalid_hex),\n            Err(TransferFromStrError::Hex(_))\n        ));\n    }\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n        let transfer_address = TransferAddr::random(rng);\n        bytesrepr::test_serialization_roundtrip(&transfer_address)\n    }\n\n    #[test]\n    fn bincode_roundtrip() {\n        let rng = &mut TestRng::new();\n        let transfer_address = TransferAddr::random(rng);\n        let serialized = bincode::serialize(&transfer_address).unwrap();\n        let decoded = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(transfer_address, decoded);\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let rng = &mut TestRng::new();\n        let transfer_address = TransferAddr::random(rng);\n        let json_string = serde_json::to_string_pretty(&transfer_address).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(transfer_address, decoded);\n    }\n}\n"
  },
  {
    "path": "types/src/transfer/transfer_v1.rs",
    "content": "mod transfer_v1_addr;\n\nuse alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes},\n    serde_helpers, DeployHash, URef, U512,\n};\npub use transfer_v1_addr::{TransferAddr, TRANSFER_ADDR_LENGTH};\n\n/// Represents a version 1 transfer from one purse to another.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct TransferV1 {\n    /// Deploy that created the transfer\n    #[serde(with = \"serde_helpers::deploy_hash_as_array\")]\n    #[cfg_attr(\n        feature = \"json-schema\",\n        schemars(\n            with = \"DeployHash\",\n            description = \"Hex-encoded Deploy hash of Deploy that created the transfer.\"\n        )\n    )]\n    pub deploy_hash: DeployHash,\n    /// Account from which transfer was executed\n    pub from: AccountHash,\n    /// Account to which funds are transferred\n    pub to: Option<AccountHash>,\n    /// Source purse\n    pub source: URef,\n    /// Target purse\n    pub target: URef,\n    /// Transfer amount\n    pub amount: U512,\n    /// Gas\n    pub gas: U512,\n    /// User-defined id\n    pub id: Option<u64>,\n}\n\nimpl TransferV1 {\n    /// Creates a [`TransferV1`].\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        deploy_hash: DeployHash,\n        from: AccountHash,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        gas: U512,\n        id: Option<u64>,\n    ) -> Self {\n        TransferV1 {\n            deploy_hash,\n            from,\n            to,\n            source,\n            target,\n            amount,\n            gas,\n            id,\n        }\n    }\n}\n\nimpl ToBytes for TransferV1 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.deploy_hash.serialized_length()\n            + self.from.serialized_length()\n            + self.to.serialized_length()\n            + self.source.serialized_length()\n            + self.target.serialized_length()\n            + self.amount.serialized_length()\n            + self.gas.serialized_length()\n            + self.id.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.deploy_hash.write_bytes(writer)?;\n        self.from.write_bytes(writer)?;\n        self.to.write_bytes(writer)?;\n        self.source.write_bytes(writer)?;\n        self.target.write_bytes(writer)?;\n        self.amount.write_bytes(writer)?;\n        self.gas.write_bytes(writer)?;\n        self.id.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for TransferV1 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (deploy_hash, rem) = FromBytes::from_bytes(bytes)?;\n        let (from, rem) = AccountHash::from_bytes(rem)?;\n        let (to, rem) = <Option<AccountHash>>::from_bytes(rem)?;\n        let (source, rem) = URef::from_bytes(rem)?;\n        let (target, rem) = URef::from_bytes(rem)?;\n        let (amount, rem) = U512::from_bytes(rem)?;\n        let (gas, rem) = U512::from_bytes(rem)?;\n        let (id, rem) = <Option<u64>>::from_bytes(rem)?;\n        Ok((\n            TransferV1 {\n                deploy_hash,\n                from,\n                to,\n                source,\n                target,\n                amount,\n                gas,\n                id,\n            },\n            rem,\n        ))\n    }\n}\n"
  },
  {
    "path": "types/src/transfer/transfer_v2.rs",
    "content": "use alloc::vec::Vec;\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    account::AccountHash,\n    bytesrepr::{self, FromBytes, ToBytes},\n    transaction::TransactionHash,\n    Gas, InitiatorAddr, URef, U512,\n};\n\n/// Represents a version 2 transfer from one purse to another.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[serde(deny_unknown_fields)]\npub struct TransferV2 {\n    /// Transaction that created the transfer.\n    pub transaction_hash: TransactionHash,\n    /// Entity from which transfer was executed.\n    pub from: InitiatorAddr,\n    /// Account to which funds are transferred.\n    pub to: Option<AccountHash>,\n    /// Source purse.\n    pub source: URef,\n    /// Target purse.\n    pub target: URef,\n    /// Transfer amount.\n    pub amount: U512,\n    /// Gas.\n    pub gas: Gas,\n    /// User-defined ID.\n    pub id: Option<u64>,\n}\n\nimpl TransferV2 {\n    /// Creates a [`TransferV2`].\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        transaction_hash: TransactionHash,\n        from: InitiatorAddr,\n        to: Option<AccountHash>,\n        source: URef,\n        target: URef,\n        amount: U512,\n        gas: Gas,\n        id: Option<u64>,\n    ) -> Self {\n        TransferV2 {\n            transaction_hash,\n            from,\n            to,\n            source,\n            target,\n            amount,\n            gas,\n            id,\n        }\n    }\n}\n\nimpl ToBytes for TransferV2 {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buf = Vec::new();\n        self.write_bytes(&mut buf)?;\n        Ok(buf)\n    }\n\n    fn serialized_length(&self) -> usize {\n        self.transaction_hash.serialized_length()\n            + self.from.serialized_length()\n            + self.to.serialized_length()\n            + self.source.serialized_length()\n            + self.target.serialized_length()\n            + self.amount.serialized_length()\n            + self.gas.serialized_length()\n            + self.id.serialized_length()\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        self.transaction_hash.write_bytes(writer)?;\n        self.from.write_bytes(writer)?;\n        self.to.write_bytes(writer)?;\n        self.source.write_bytes(writer)?;\n        self.target.write_bytes(writer)?;\n        self.amount.write_bytes(writer)?;\n        self.gas.write_bytes(writer)?;\n        self.id.write_bytes(writer)\n    }\n}\n\nimpl FromBytes for TransferV2 {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (transaction_hash, remainder) = TransactionHash::from_bytes(bytes)?;\n        let (from, remainder) = InitiatorAddr::from_bytes(remainder)?;\n        let (to, remainder) = <Option<AccountHash>>::from_bytes(remainder)?;\n        let (source, remainder) = URef::from_bytes(remainder)?;\n        let (target, remainder) = URef::from_bytes(remainder)?;\n        let (amount, remainder) = U512::from_bytes(remainder)?;\n        let (gas, remainder) = Gas::from_bytes(remainder)?;\n        let (id, remainder) = <Option<u64>>::from_bytes(remainder)?;\n        Ok((\n            TransferV2 {\n                transaction_hash,\n                from,\n                to,\n                source,\n                target,\n                amount,\n                gas,\n                id,\n            },\n            remainder,\n        ))\n    }\n}\n"
  },
  {
    "path": "types/src/transfer.rs",
    "content": "mod error;\nmod transfer_v1;\nmod transfer_v2;\n\nuse alloc::vec::Vec;\n\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse once_cell::sync::Lazy;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::Rng;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n#[cfg(feature = \"json-schema\")]\nuse crate::{account::AccountHash, transaction::TransactionV1Hash, URef};\nuse crate::{\n    bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH},\n    U512,\n};\n#[cfg(any(feature = \"testing\", feature = \"json-schema\", test))]\nuse crate::{transaction::TransactionHash, Gas, InitiatorAddr};\npub use error::TransferFromStrError;\npub use transfer_v1::{TransferAddr, TransferV1, TRANSFER_ADDR_LENGTH};\npub use transfer_v2::TransferV2;\n\nconst V1_TAG: u8 = 0;\nconst V2_TAG: u8 = 1;\n\n#[cfg(feature = \"json-schema\")]\npub(super) static TRANSFER: Lazy<Transfer> = Lazy::new(|| {\n    let transaction_hash = TransactionHash::V1(TransactionV1Hash::from_raw([1; 32]));\n    let from = InitiatorAddr::AccountHash(AccountHash::new([2; 32]));\n    let to = Some(AccountHash::new([3; 32]));\n    let source = URef::from_formatted_str(\n        \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007\",\n    )\n    .unwrap();\n    let target = URef::from_formatted_str(\n        \"uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000\",\n    )\n    .unwrap();\n    let amount = U512::from(1_000_000_000_000_u64);\n    let gas = Gas::new(2_500_000_000_u64);\n    let id = Some(999);\n    Transfer::V2(TransferV2::new(\n        transaction_hash,\n        from,\n        to,\n        source,\n        target,\n        amount,\n        gas,\n        id,\n    ))\n});\n\n/// A versioned wrapper for a transfer.\n#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\npub enum Transfer {\n    /// A version 1 transfer.\n    #[serde(rename = \"Version1\")]\n    V1(TransferV1),\n    /// A version 2 transfer.\n    #[serde(rename = \"Version2\")]\n    V2(TransferV2),\n}\n\nimpl Transfer {\n    /// Transfer amount.\n    pub fn amount(&self) -> U512 {\n        match self {\n            Transfer::V1(transfer_v1) => transfer_v1.amount,\n            Transfer::V2(transfer_v2) => transfer_v2.amount,\n        }\n    }\n\n    // This method is not intended to be used by third party crates.\n    #[doc(hidden)]\n    #[cfg(feature = \"json-schema\")]\n    pub fn example() -> &'static Self {\n        &TRANSFER\n    }\n\n    /// Returns a random `Transfer`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use crate::DeployHash;\n\n        if rng.gen() {\n            Transfer::V1(TransferV1::new(\n                DeployHash::random(rng),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n            ))\n        } else {\n            Transfer::V2(TransferV2::new(\n                TransactionHash::random(rng),\n                InitiatorAddr::random(rng),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n                rng.gen(),\n                Gas::new(rng.gen::<u64>()),\n                rng.gen(),\n            ))\n        }\n    }\n}\n\nimpl From<TransferV1> for Transfer {\n    fn from(v1_transfer: TransferV1) -> Self {\n        Transfer::V1(v1_transfer)\n    }\n}\n\nimpl From<TransferV2> for Transfer {\n    fn from(v2_transfer: TransferV2) -> Self {\n        Transfer::V2(v2_transfer)\n    }\n}\n\nimpl ToBytes for Transfer {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        U8_SERIALIZED_LENGTH\n            + match self {\n                Transfer::V1(transfer) => transfer.serialized_length(),\n                Transfer::V2(transfer) => transfer.serialized_length(),\n            }\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            Transfer::V1(transfer) => {\n                V1_TAG.write_bytes(writer)?;\n                transfer.write_bytes(writer)\n            }\n            Transfer::V2(transfer) => {\n                V2_TAG.write_bytes(writer)?;\n                transfer.write_bytes(writer)\n            }\n        }\n    }\n}\n\nimpl FromBytes for Transfer {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        match tag {\n            V1_TAG => {\n                let (transfer, remainder) = TransferV1::from_bytes(remainder)?;\n                Ok((Transfer::V1(transfer), remainder))\n            }\n            V2_TAG => {\n                let (transfer, remainder) = TransferV2::from_bytes(remainder)?;\n                Ok((Transfer::V2(transfer), remainder))\n            }\n            _ => Err(bytesrepr::Error::Formatting),\n        }\n    }\n}\n\n/// Proptest generators for [`Transfer`].\n#[cfg(any(feature = \"testing\", feature = \"gens\", test))]\npub mod gens {\n    use proptest::{\n        array,\n        prelude::{prop::option, Arbitrary, Strategy},\n    };\n\n    use super::*;\n    use crate::{\n        gens::{account_hash_arb, u512_arb, uref_arb},\n        transaction::gens::deploy_hash_arb,\n    };\n\n    pub fn transfer_v1_addr_arb() -> impl Strategy<Value = TransferAddr> {\n        array::uniform32(<u8>::arbitrary()).prop_map(TransferAddr::new)\n    }\n\n    pub fn transfer_v1_arb() -> impl Strategy<Value = TransferV1> {\n        (\n            deploy_hash_arb(),\n            account_hash_arb(),\n            option::of(account_hash_arb()),\n            uref_arb(),\n            uref_arb(),\n            u512_arb(),\n            u512_arb(),\n            option::of(<u64>::arbitrary()),\n        )\n            .prop_map(|(deploy_hash, from, to, source, target, amount, gas, id)| {\n                TransferV1 {\n                    deploy_hash,\n                    from,\n                    to,\n                    source,\n                    target,\n                    amount,\n                    gas,\n                    id,\n                }\n            })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::bytesrepr;\n\n    use super::*;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let transfer = Transfer::random(rng);\n        bytesrepr::test_serialization_roundtrip(&transfer);\n    }\n}\n"
  },
  {
    "path": "types/src/transfer_result.rs",
    "content": "use core::fmt::Debug;\n\nuse crate::ApiError;\n\n/// The result of an attempt to transfer between purses.\npub type TransferResult = Result<TransferredTo, ApiError>;\n\n/// The result of a successful transfer between purses.\n#[derive(Debug, Copy, Clone, PartialEq, Eq)]\n#[repr(i32)]\npub enum TransferredTo {\n    /// The destination account already existed.\n    ExistingAccount = 0,\n    /// The destination account was created.\n    NewAccount = 1,\n}\n\nimpl TransferredTo {\n    /// Converts an `i32` to a [`TransferResult`], where:\n    /// * `0` represents `Ok(TransferredTo::ExistingAccount)`,\n    /// * `1` represents `Ok(TransferredTo::NewAccount)`,\n    /// * all other inputs are mapped to `Err(ApiError::Transfer)`.\n    pub fn result_from(value: i32) -> TransferResult {\n        match value {\n            x if x == TransferredTo::ExistingAccount as i32 => Ok(TransferredTo::ExistingAccount),\n            x if x == TransferredTo::NewAccount as i32 => Ok(TransferredTo::NewAccount),\n            _ => Err(ApiError::Transfer),\n        }\n    }\n\n    // This conversion is not intended to be used by third party crates.\n    #[doc(hidden)]\n    pub fn i32_from(result: TransferResult) -> i32 {\n        match result {\n            Ok(transferred_to) => transferred_to as i32,\n            Err(_) => 2,\n        }\n    }\n}\n"
  },
  {
    "path": "types/src/uint.rs",
    "content": "use alloc::{\n    format,\n    string::{String, ToString},\n    vec::Vec,\n};\nuse core::{\n    fmt::{self, Formatter},\n    iter::Sum,\n    ops::Add,\n};\n\nuse num_integer::Integer;\nuse num_traits::{\n    AsPrimitive, Bounded, CheckedAdd, CheckedMul, CheckedSub, Num, One, Unsigned, WrappingAdd,\n    WrappingSub, Zero,\n};\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\nuse serde::{\n    de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor},\n    ser::{Serialize, SerializeStruct, Serializer},\n};\n\nuse crate::bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH};\n\n#[allow(\n    clippy::assign_op_pattern,\n    clippy::ptr_offset_with_cast,\n    clippy::manual_range_contains,\n    clippy::range_plus_one,\n    clippy::transmute_ptr_to_ptr,\n    clippy::reversed_empty_ranges,\n    clippy::manual_div_ceil\n)]\nmod macro_code {\n    #[cfg(feature = \"datasize\")]\n    use datasize::DataSize;\n    use uint::construct_uint;\n\n    construct_uint! {\n        #[cfg_attr(feature = \"datasize\", derive(DataSize))]\n        pub struct U512(8);\n    }\n    construct_uint! {\n        #[cfg_attr(feature = \"datasize\", derive(DataSize))]\n        pub struct U256(4);\n    }\n    construct_uint! {\n        #[cfg_attr(feature = \"datasize\", derive(DataSize))]\n        pub struct U128(2);\n    }\n}\n\npub use self::macro_code::{U128, U256, U512};\n\n/// Error type for parsing [`U128`], [`U256`], [`U512`] from a string.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum UIntParseError {\n    /// Contains the parsing error from the `uint` crate, which only supports base-10 parsing.\n    FromDecStr(uint::FromDecStrErr),\n    /// Parsing was attempted on a string representing the number in some base other than 10.\n    ///\n    /// Note: a general radix may be supported in the future.\n    InvalidRadix,\n}\n\nmacro_rules! impl_traits_for_uint {\n    ($type:ident, $total_bytes:expr, $test_mod:ident) => {\n        impl $type {\n            /// The smallest value that can be represented by this type.\n            pub const MIN: $type = $type([0; $total_bytes / 8]);\n        }\n\n        impl Serialize for $type {\n            fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n                if serializer.is_human_readable() {\n                    return self.to_string().serialize(serializer);\n                }\n\n                let mut buffer = [0u8; $total_bytes];\n                self.to_little_endian(&mut buffer);\n                let non_zero_bytes: Vec<u8> = buffer\n                    .iter()\n                    .rev()\n                    .skip_while(|b| **b == 0)\n                    .cloned()\n                    .collect();\n                let num_bytes = non_zero_bytes.len();\n\n                let mut state = serializer.serialize_struct(\"bigint\", num_bytes + 1)?;\n                state.serialize_field(\"\", &(num_bytes as u8))?;\n\n                for byte in non_zero_bytes.into_iter().rev() {\n                    state.serialize_field(\"\", &byte)?;\n                }\n                state.end()\n            }\n        }\n\n        impl<'de> Deserialize<'de> for $type {\n            fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n                struct BigNumVisitor;\n\n                impl<'de> Visitor<'de> for BigNumVisitor {\n                    type Value = $type;\n\n                    fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {\n                        formatter.write_str(\"bignum struct\")\n                    }\n\n                    fn visit_seq<V: SeqAccess<'de>>(\n                        self,\n                        mut sequence: V,\n                    ) -> Result<$type, V::Error> {\n                        let length: u8 = sequence\n                            .next_element()?\n                            .ok_or_else(|| de::Error::invalid_length(0, &self))?;\n                        let mut buffer = [0u8; $total_bytes];\n                        for index in 0..length as usize {\n                            let value = sequence\n                                .next_element()?\n                                .ok_or_else(|| de::Error::invalid_length(index + 1, &self))?;\n                            buffer[index as usize] = value;\n                        }\n                        let result = $type::from_little_endian(&buffer);\n                        Ok(result)\n                    }\n\n                    fn visit_map<V: MapAccess<'de>>(self, mut map: V) -> Result<$type, V::Error> {\n                        let _length_key: u8 = map\n                            .next_key()?\n                            .ok_or_else(|| de::Error::missing_field(\"length\"))?;\n                        let length: u8 = map\n                            .next_value()\n                            .map_err(|_| de::Error::invalid_length(0, &self))?;\n                        let mut buffer = [0u8; $total_bytes];\n                        for index in 0..length {\n                            let _byte_key: u8 = map\n                                .next_key()?\n                                .ok_or_else(|| de::Error::missing_field(\"byte\"))?;\n                            let value = map.next_value().map_err(|_| {\n                                de::Error::invalid_length(index as usize + 1, &self)\n                            })?;\n                            buffer[index as usize] = value;\n                        }\n                        let result = $type::from_little_endian(&buffer);\n                        Ok(result)\n                    }\n                }\n\n                const FIELDS: &'static [&'static str] = &[\n                    \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\",\n                    \"15\", \"16\", \"17\", \"18\", \"19\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\",\n                    \"28\", \"29\", \"30\", \"31\", \"32\", \"33\", \"34\", \"35\", \"36\", \"37\", \"38\", \"39\", \"40\",\n                    \"41\", \"42\", \"43\", \"44\", \"45\", \"46\", \"47\", \"48\", \"49\", \"50\", \"51\", \"52\", \"53\",\n                    \"54\", \"55\", \"56\", \"57\", \"58\", \"59\", \"60\", \"61\", \"62\", \"63\", \"64\",\n                ];\n\n                if deserializer.is_human_readable() {\n                    let decimal_string = String::deserialize(deserializer)?;\n                    return Self::from_dec_str(&decimal_string)\n                        .map_err(|error| de::Error::custom(format!(\"{:?}\", error)));\n                }\n\n                deserializer.deserialize_struct(\"bigint\", FIELDS, BigNumVisitor)\n            }\n        }\n\n        impl ToBytes for $type {\n            fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n                let mut buf = [0u8; $total_bytes];\n                self.to_little_endian(&mut buf);\n                let mut non_zero_bytes: Vec<u8> =\n                    buf.iter().rev().skip_while(|b| **b == 0).cloned().collect();\n                let num_bytes = non_zero_bytes.len() as u8;\n                non_zero_bytes.push(num_bytes);\n                non_zero_bytes.reverse();\n                Ok(non_zero_bytes)\n            }\n\n            fn serialized_length(&self) -> usize {\n                let mut buf = [0u8; $total_bytes];\n                self.to_little_endian(&mut buf);\n                let non_zero_bytes = buf.iter().rev().skip_while(|b| **b == 0).count();\n                U8_SERIALIZED_LENGTH + non_zero_bytes\n            }\n        }\n\n        impl FromBytes for $type {\n            fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n                let (num_bytes, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?;\n\n                if num_bytes > $total_bytes {\n                    Err(Error::Formatting)\n                } else {\n                    let (value, rem) = bytesrepr::safe_split_at(rem, num_bytes as usize)?;\n                    let result = $type::from_little_endian(value);\n                    Ok((result, rem))\n                }\n            }\n        }\n\n        // Trait implementations for unifying U* as numeric types\n        impl Zero for $type {\n            fn zero() -> Self {\n                $type::zero()\n            }\n\n            fn is_zero(&self) -> bool {\n                self.is_zero()\n            }\n        }\n\n        impl One for $type {\n            fn one() -> Self {\n                $type::one()\n            }\n        }\n\n        // Requires Zero and One to be implemented\n        impl Num for $type {\n            type FromStrRadixErr = UIntParseError;\n            fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {\n                if radix == 10 {\n                    $type::from_dec_str(str).map_err(UIntParseError::FromDecStr)\n                } else {\n                    // TODO: other radix parsing\n                    Err(UIntParseError::InvalidRadix)\n                }\n            }\n        }\n\n        // Requires Num to be implemented\n        impl Unsigned for $type {}\n\n        // Additional numeric trait, which also holds for these types\n        impl Bounded for $type {\n            fn min_value() -> Self {\n                $type::zero()\n            }\n\n            fn max_value() -> Self {\n                $type::MAX\n            }\n        }\n\n        // Instead of implementing arbitrary methods we can use existing traits from num_trait\n        // crate.\n        impl WrappingAdd for $type {\n            fn wrapping_add(&self, other: &$type) -> $type {\n                self.overflowing_add(*other).0\n            }\n        }\n\n        impl WrappingSub for $type {\n            fn wrapping_sub(&self, other: &$type) -> $type {\n                self.overflowing_sub(*other).0\n            }\n        }\n\n        impl CheckedMul for $type {\n            fn checked_mul(&self, v: &$type) -> Option<$type> {\n                $type::checked_mul(*self, *v)\n            }\n        }\n\n        impl CheckedSub for $type {\n            fn checked_sub(&self, v: &$type) -> Option<$type> {\n                $type::checked_sub(*self, *v)\n            }\n        }\n\n        impl CheckedAdd for $type {\n            fn checked_add(&self, v: &$type) -> Option<$type> {\n                $type::checked_add(*self, *v)\n            }\n        }\n\n        impl Integer for $type {\n            /// Unsigned integer division. Returns the same result as `div` (`/`).\n            #[inline]\n            fn div_floor(&self, other: &Self) -> Self {\n                *self / *other\n            }\n\n            /// Unsigned integer modulo operation. Returns the same result as `rem` (`%`).\n            #[inline]\n            fn mod_floor(&self, other: &Self) -> Self {\n                *self % *other\n            }\n\n            /// Calculates the Greatest Common Divisor (GCD) of the number and `other`\n            #[inline]\n            fn gcd(&self, other: &Self) -> Self {\n                let zero = Self::zero();\n                // Use Stein's algorithm\n                let mut m = *self;\n                let mut n = *other;\n                if m == zero || n == zero {\n                    return m | n;\n                }\n\n                // find common factors of 2\n                let shift = (m | n).trailing_zeros();\n\n                // divide n and m by 2 until odd\n                m >>= m.trailing_zeros();\n                n >>= n.trailing_zeros();\n\n                while m != n {\n                    if m > n {\n                        m -= n;\n                        m >>= m.trailing_zeros();\n                    } else {\n                        n -= m;\n                        n >>= n.trailing_zeros();\n                    }\n                }\n                m << shift\n            }\n\n            /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.\n            #[inline]\n            fn lcm(&self, other: &Self) -> Self {\n                self.gcd_lcm(other).1\n            }\n\n            /// Calculates the Greatest Common Divisor (GCD) and\n            /// Lowest Common Multiple (LCM) of the number and `other`.\n            #[inline]\n            fn gcd_lcm(&self, other: &Self) -> (Self, Self) {\n                if self.is_zero() && other.is_zero() {\n                    return (Self::zero(), Self::zero());\n                }\n                let gcd = self.gcd(other);\n                let lcm = *self * (*other / gcd);\n                (gcd, lcm)\n            }\n\n            /// Deprecated, use `is_multiple_of` instead.\n            #[inline]\n            fn divides(&self, other: &Self) -> bool {\n                self.is_multiple_of(other)\n            }\n\n            /// Returns `true` if the number is a multiple of `other`.\n            #[inline]\n            fn is_multiple_of(&self, other: &Self) -> bool {\n                *self % *other == $type::zero()\n            }\n\n            /// Returns `true` if the number is divisible by `2`.\n            #[inline]\n            fn is_even(&self) -> bool {\n                (self.0[0]) & 1 == 0\n            }\n\n            /// Returns `true` if the number is not divisible by `2`.\n            #[inline]\n            fn is_odd(&self) -> bool {\n                !self.is_even()\n            }\n\n            /// Simultaneous truncated integer division and modulus.\n            #[inline]\n            fn div_rem(&self, other: &Self) -> (Self, Self) {\n                (*self / *other, *self % *other)\n            }\n        }\n\n        impl AsPrimitive<$type> for i32 {\n            fn as_(self) -> $type {\n                if self >= 0 {\n                    $type::from(self as u32)\n                } else {\n                    let abs = 0u32.wrapping_sub(self as u32);\n                    $type::zero().wrapping_sub(&$type::from(abs))\n                }\n            }\n        }\n\n        impl AsPrimitive<$type> for i64 {\n            fn as_(self) -> $type {\n                if self >= 0 {\n                    $type::from(self as u64)\n                } else {\n                    let abs = 0u64.wrapping_sub(self as u64);\n                    $type::zero().wrapping_sub(&$type::from(abs))\n                }\n            }\n        }\n\n        impl AsPrimitive<$type> for u8 {\n            fn as_(self) -> $type {\n                $type::from(self)\n            }\n        }\n\n        impl AsPrimitive<$type> for u32 {\n            fn as_(self) -> $type {\n                $type::from(self)\n            }\n        }\n\n        impl AsPrimitive<$type> for u64 {\n            fn as_(self) -> $type {\n                $type::from(self)\n            }\n        }\n\n        impl AsPrimitive<i32> for $type {\n            fn as_(self) -> i32 {\n                self.0[0] as i32\n            }\n        }\n\n        impl AsPrimitive<i64> for $type {\n            fn as_(self) -> i64 {\n                self.0[0] as i64\n            }\n        }\n\n        impl AsPrimitive<u8> for $type {\n            fn as_(self) -> u8 {\n                self.0[0] as u8\n            }\n        }\n\n        impl AsPrimitive<u32> for $type {\n            fn as_(self) -> u32 {\n                self.0[0] as u32\n            }\n        }\n\n        impl AsPrimitive<u64> for $type {\n            fn as_(self) -> u64 {\n                self.0[0]\n            }\n        }\n\n        impl Sum for $type {\n            fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {\n                iter.fold($type::zero(), Add::add)\n            }\n        }\n\n        impl Distribution<$type> for Standard {\n            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $type {\n                let mut raw_bytes = [0u8; $total_bytes];\n                rng.fill_bytes(raw_bytes.as_mut());\n                $type::from(raw_bytes)\n            }\n        }\n\n        #[cfg(feature = \"json-schema\")]\n        impl schemars::JsonSchema for $type {\n            fn schema_name() -> String {\n                format!(\"U{}\", $total_bytes * 8)\n            }\n\n            fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema {\n                let schema = gen.subschema_for::<String>();\n                let mut schema_object = schema.into_object();\n                schema_object.metadata().description = Some(format!(\n                    \"Decimal representation of a {}-bit integer.\",\n                    $total_bytes * 8\n                ));\n                schema_object.into()\n            }\n        }\n\n        #[cfg(test)]\n        mod $test_mod {\n            use super::*;\n\n            #[test]\n            fn test_div_mod_floor() {\n                assert_eq!($type::from(10).div_floor(&$type::from(3)), $type::from(3));\n                assert_eq!($type::from(10).mod_floor(&$type::from(3)), $type::from(1));\n                assert_eq!(\n                    $type::from(10).div_mod_floor(&$type::from(3)),\n                    ($type::from(3), $type::from(1))\n                );\n                assert_eq!($type::from(5).div_floor(&$type::from(5)), $type::from(1));\n                assert_eq!($type::from(5).mod_floor(&$type::from(5)), $type::from(0));\n                assert_eq!(\n                    $type::from(5).div_mod_floor(&$type::from(5)),\n                    ($type::from(1), $type::from(0))\n                );\n                assert_eq!($type::from(3).div_floor(&$type::from(7)), $type::from(0));\n                assert_eq!($type::from(3).mod_floor(&$type::from(7)), $type::from(3));\n                assert_eq!(\n                    $type::from(3).div_mod_floor(&$type::from(7)),\n                    ($type::from(0), $type::from(3))\n                );\n            }\n\n            #[test]\n            fn test_gcd() {\n                assert_eq!($type::from(10).gcd(&$type::from(2)), $type::from(2));\n                assert_eq!($type::from(10).gcd(&$type::from(3)), $type::from(1));\n                assert_eq!($type::from(0).gcd(&$type::from(3)), $type::from(3));\n                assert_eq!($type::from(3).gcd(&$type::from(3)), $type::from(3));\n                assert_eq!($type::from(56).gcd(&$type::from(42)), $type::from(14));\n                assert_eq!(\n                    $type::MAX.gcd(&($type::MAX / $type::from(2))),\n                    $type::from(1)\n                );\n                assert_eq!($type::from(15).gcd(&$type::from(17)), $type::from(1));\n            }\n\n            #[test]\n            fn test_lcm() {\n                assert_eq!($type::from(1).lcm(&$type::from(0)), $type::from(0));\n                assert_eq!($type::from(0).lcm(&$type::from(1)), $type::from(0));\n                assert_eq!($type::from(1).lcm(&$type::from(1)), $type::from(1));\n                assert_eq!($type::from(8).lcm(&$type::from(9)), $type::from(72));\n                assert_eq!($type::from(11).lcm(&$type::from(5)), $type::from(55));\n                assert_eq!($type::from(15).lcm(&$type::from(17)), $type::from(255));\n                assert_eq!($type::from(4).lcm(&$type::from(8)), $type::from(8));\n            }\n\n            #[test]\n            fn test_is_multiple_of() {\n                assert!($type::from(6).is_multiple_of(&$type::from(6)));\n                assert!($type::from(6).is_multiple_of(&$type::from(3)));\n                assert!($type::from(6).is_multiple_of(&$type::from(1)));\n                assert!(!$type::from(3).is_multiple_of(&$type::from(5)))\n            }\n\n            #[test]\n            fn is_even() {\n                assert_eq!($type::from(0).is_even(), true);\n                assert_eq!($type::from(1).is_even(), false);\n                assert_eq!($type::from(2).is_even(), true);\n                assert_eq!($type::from(3).is_even(), false);\n                assert_eq!($type::from(4).is_even(), true);\n            }\n\n            #[test]\n            fn is_odd() {\n                assert_eq!($type::from(0).is_odd(), false);\n                assert_eq!($type::from(1).is_odd(), true);\n                assert_eq!($type::from(2).is_odd(), false);\n                assert_eq!($type::from(3).is_odd(), true);\n                assert_eq!($type::from(4).is_odd(), false);\n            }\n\n            #[test]\n            #[should_panic]\n            fn overflow_mul_test() {\n                let _ = $type::MAX * $type::from(2);\n            }\n\n            #[test]\n            #[should_panic]\n            fn overflow_add_test() {\n                let _ = $type::MAX + $type::from(1);\n            }\n\n            #[test]\n            #[should_panic]\n            fn underflow_sub_test() {\n                let _ = $type::zero() - $type::from(1);\n            }\n        }\n    };\n}\n\nimpl_traits_for_uint!(U128, 16, u128_test);\nimpl_traits_for_uint!(U256, 32, u256_test);\nimpl_traits_for_uint!(U512, 64, u512_test);\n\nimpl AsPrimitive<U128> for U128 {\n    fn as_(self) -> U128 {\n        self\n    }\n}\n\nimpl AsPrimitive<U256> for U128 {\n    fn as_(self) -> U256 {\n        let mut result = U256::zero();\n        result.0[..2].clone_from_slice(&self.0[..2]);\n        result\n    }\n}\n\nimpl AsPrimitive<U512> for U128 {\n    fn as_(self) -> U512 {\n        let mut result = U512::zero();\n        result.0[..2].clone_from_slice(&self.0[..2]);\n        result\n    }\n}\n\nimpl AsPrimitive<U128> for U256 {\n    fn as_(self) -> U128 {\n        let mut result = U128::zero();\n        result.0[..2].clone_from_slice(&self.0[..2]);\n        result\n    }\n}\n\nimpl AsPrimitive<U256> for U256 {\n    fn as_(self) -> U256 {\n        self\n    }\n}\n\nimpl AsPrimitive<U512> for U256 {\n    fn as_(self) -> U512 {\n        let mut result = U512::zero();\n        result.0[..4].clone_from_slice(&self.0[..4]);\n        result\n    }\n}\n\nimpl AsPrimitive<U128> for U512 {\n    fn as_(self) -> U128 {\n        let mut result = U128::zero();\n        result.0[..2].clone_from_slice(&self.0[..2]);\n        result\n    }\n}\n\nimpl AsPrimitive<U256> for U512 {\n    fn as_(self) -> U256 {\n        let mut result = U256::zero();\n        result.0[..4].clone_from_slice(&self.0[..4]);\n        result\n    }\n}\n\nimpl AsPrimitive<U512> for U512 {\n    fn as_(self) -> U512 {\n        self\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::fmt::Debug;\n\n    use serde::de::DeserializeOwned;\n\n    use super::*;\n\n    fn check_as_i32<T: AsPrimitive<i32>>(expected: i32, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_i64<T: AsPrimitive<i64>>(expected: i64, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_u8<T: AsPrimitive<u8>>(expected: u8, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_u32<T: AsPrimitive<u32>>(expected: u32, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_u64<T: AsPrimitive<u64>>(expected: u64, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_u128<T: AsPrimitive<U128>>(expected: U128, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_u256<T: AsPrimitive<U256>>(expected: U256, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    fn check_as_u512<T: AsPrimitive<U512>>(expected: U512, input: T) {\n        assert_eq!(expected, input.as_());\n    }\n\n    #[test]\n    fn as_primitive_from_i32() {\n        let mut input = 0_i32;\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = i32::MAX - 1;\n        check_as_i32(input, input);\n        check_as_i64(i64::from(input), input);\n        check_as_u8(input as u8, input);\n        check_as_u32(input as u32, input);\n        check_as_u64(input as u64, input);\n        check_as_u128(U128::from(input), input);\n        check_as_u256(U256::from(input), input);\n        check_as_u512(U512::from(input), input);\n\n        input = i32::MIN + 1;\n        check_as_i32(input, input);\n        check_as_i64(i64::from(input), input);\n        check_as_u8(input as u8, input);\n        check_as_u32(input as u32, input);\n        check_as_u64(input as u64, input);\n        // i32::MIN is -1 - i32::MAX\n        check_as_u128(U128::zero().wrapping_sub(&U128::from(i32::MAX)), input);\n        check_as_u256(U256::zero().wrapping_sub(&U256::from(i32::MAX)), input);\n        check_as_u512(U512::zero().wrapping_sub(&U512::from(i32::MAX)), input);\n    }\n\n    #[test]\n    fn as_primitive_from_i64() {\n        let mut input = 0_i64;\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = i64::MAX - 1;\n        check_as_i32(input as i32, input);\n        check_as_i64(input, input);\n        check_as_u8(input as u8, input);\n        check_as_u32(input as u32, input);\n        check_as_u64(input as u64, input);\n        check_as_u128(U128::from(input), input);\n        check_as_u256(U256::from(input), input);\n        check_as_u512(U512::from(input), input);\n\n        input = i64::MIN + 1;\n        check_as_i32(input as i32, input);\n        check_as_i64(input, input);\n        check_as_u8(input as u8, input);\n        check_as_u32(input as u32, input);\n        check_as_u64(input as u64, input);\n        // i64::MIN is (-1 - i64::MAX)\n        check_as_u128(U128::zero().wrapping_sub(&U128::from(i64::MAX)), input);\n        check_as_u256(U256::zero().wrapping_sub(&U256::from(i64::MAX)), input);\n        check_as_u512(U512::zero().wrapping_sub(&U512::from(i64::MAX)), input);\n    }\n\n    #[test]\n    fn as_primitive_from_u8() {\n        let mut input = 0_u8;\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = u8::MAX - 1;\n        check_as_i32(i32::from(input), input);\n        check_as_i64(i64::from(input), input);\n        check_as_u8(input, input);\n        check_as_u32(u32::from(input), input);\n        check_as_u64(u64::from(input), input);\n        check_as_u128(U128::from(input), input);\n        check_as_u256(U256::from(input), input);\n        check_as_u512(U512::from(input), input);\n    }\n\n    #[test]\n    fn as_primitive_from_u32() {\n        let mut input = 0_u32;\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = u32::MAX - 1;\n        check_as_i32(input as i32, input);\n        check_as_i64(i64::from(input), input);\n        check_as_u8(input as u8, input);\n        check_as_u32(input, input);\n        check_as_u64(u64::from(input), input);\n        check_as_u128(U128::from(input), input);\n        check_as_u256(U256::from(input), input);\n        check_as_u512(U512::from(input), input);\n    }\n\n    #[test]\n    fn as_primitive_from_u64() {\n        let mut input = 0_u64;\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = u64::MAX - 1;\n        check_as_i32(input as i32, input);\n        check_as_i64(input as i64, input);\n        check_as_u8(input as u8, input);\n        check_as_u32(input as u32, input);\n        check_as_u64(input, input);\n        check_as_u128(U128::from(input), input);\n        check_as_u256(U256::from(input), input);\n        check_as_u512(U512::from(input), input);\n    }\n\n    fn make_little_endian_arrays(little_endian_bytes: &[u8]) -> ([u8; 4], [u8; 8]) {\n        let le_32 = {\n            let mut le_32 = [0; 4];\n            le_32.copy_from_slice(&little_endian_bytes[..4]);\n            le_32\n        };\n\n        let le_64 = {\n            let mut le_64 = [0; 8];\n            le_64.copy_from_slice(&little_endian_bytes[..8]);\n            le_64\n        };\n\n        (le_32, le_64)\n    }\n\n    #[test]\n    fn as_primitive_from_u128() {\n        let mut input = U128::zero();\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = U128::MAX - 1;\n\n        let mut little_endian_bytes = [0_u8; 64];\n        input.to_little_endian(&mut little_endian_bytes[..16]);\n        let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes);\n\n        check_as_i32(i32::from_le_bytes(le_32), input);\n        check_as_i64(i64::from_le_bytes(le_64), input);\n        check_as_u8(little_endian_bytes[0], input);\n        check_as_u32(u32::from_le_bytes(le_32), input);\n        check_as_u64(u64::from_le_bytes(le_64), input);\n        check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input);\n        check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input);\n        check_as_u512(U512::from_little_endian(&little_endian_bytes), input);\n    }\n\n    #[test]\n    fn as_primitive_from_u256() {\n        let mut input = U256::zero();\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = U256::MAX - 1;\n\n        let mut little_endian_bytes = [0_u8; 64];\n        input.to_little_endian(&mut little_endian_bytes[..32]);\n        let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes);\n\n        check_as_i32(i32::from_le_bytes(le_32), input);\n        check_as_i64(i64::from_le_bytes(le_64), input);\n        check_as_u8(little_endian_bytes[0], input);\n        check_as_u32(u32::from_le_bytes(le_32), input);\n        check_as_u64(u64::from_le_bytes(le_64), input);\n        check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input);\n        check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input);\n        check_as_u512(U512::from_little_endian(&little_endian_bytes), input);\n    }\n\n    #[test]\n    fn as_primitive_from_u512() {\n        let mut input = U512::zero();\n        check_as_i32(0, input);\n        check_as_i64(0, input);\n        check_as_u8(0, input);\n        check_as_u32(0, input);\n        check_as_u64(0, input);\n        check_as_u128(U128::zero(), input);\n        check_as_u256(U256::zero(), input);\n        check_as_u512(U512::zero(), input);\n\n        input = U512::MAX - 1;\n\n        let mut little_endian_bytes = [0_u8; 64];\n        input.to_little_endian(&mut little_endian_bytes);\n        let (le_32, le_64) = make_little_endian_arrays(&little_endian_bytes);\n\n        check_as_i32(i32::from_le_bytes(le_32), input);\n        check_as_i64(i64::from_le_bytes(le_64), input);\n        check_as_u8(little_endian_bytes[0], input);\n        check_as_u32(u32::from_le_bytes(le_32), input);\n        check_as_u64(u64::from_le_bytes(le_64), input);\n        check_as_u128(U128::from_little_endian(&little_endian_bytes[..16]), input);\n        check_as_u256(U256::from_little_endian(&little_endian_bytes[..32]), input);\n        check_as_u512(U512::from_little_endian(&little_endian_bytes), input);\n    }\n\n    #[test]\n    fn wrapping_test_u512() {\n        let max = U512::MAX;\n        let value = max.wrapping_add(&1.into());\n        assert_eq!(value, 0.into());\n\n        let min = U512::MIN;\n        let value = min.wrapping_sub(&1.into());\n        assert_eq!(value, U512::MAX);\n    }\n\n    #[test]\n    fn wrapping_test_u256() {\n        let max = U256::MAX;\n        let value = max.wrapping_add(&1.into());\n        assert_eq!(value, 0.into());\n\n        let min = U256::MIN;\n        let value = min.wrapping_sub(&1.into());\n        assert_eq!(value, U256::MAX);\n    }\n\n    #[test]\n    fn wrapping_test_u128() {\n        let max = U128::MAX;\n        let value = max.wrapping_add(&1.into());\n        assert_eq!(value, 0.into());\n\n        let min = U128::MIN;\n        let value = min.wrapping_sub(&1.into());\n        assert_eq!(value, U128::MAX);\n    }\n\n    fn serde_roundtrip<T: Serialize + DeserializeOwned + Eq + Debug>(value: T) {\n        {\n            let serialized = bincode::serialize(&value).unwrap();\n            let deserialized = bincode::deserialize(serialized.as_slice()).unwrap();\n            assert_eq!(value, deserialized);\n        }\n        {\n            let serialized = serde_json::to_string_pretty(&value).unwrap();\n            let deserialized = serde_json::from_str(&serialized).unwrap();\n            assert_eq!(value, deserialized);\n        }\n    }\n\n    #[test]\n    fn serde_roundtrip_u512() {\n        serde_roundtrip(U512::MIN);\n        serde_roundtrip(U512::from(1));\n        serde_roundtrip(U512::from(u64::MAX));\n        serde_roundtrip(U512::MAX);\n    }\n\n    #[test]\n    fn serde_roundtrip_u256() {\n        serde_roundtrip(U256::MIN);\n        serde_roundtrip(U256::from(1));\n        serde_roundtrip(U256::from(u64::MAX));\n        serde_roundtrip(U256::MAX);\n    }\n\n    #[test]\n    fn serde_roundtrip_u128() {\n        serde_roundtrip(U128::MIN);\n        serde_roundtrip(U128::from(1));\n        serde_roundtrip(U128::from(u64::MAX));\n        serde_roundtrip(U128::MAX);\n    }\n\n    #[test]\n    fn safe_conversion_from_u512_to_u64() {\n        let mut value = U512::from(u64::MAX);\n        assert_eq!(value.try_into(), Ok(u64::MAX));\n        value += U512::one();\n        assert!(\n            matches!(value.try_into(), Result::<u64, _>::Err(_)),\n            \"integer overflow when casting to u64\"\n        );\n    }\n}\n"
  },
  {
    "path": "types/src/uref.rs",
    "content": "use alloc::{format, string::String, vec::Vec};\nuse core::{\n    array::TryFromSliceError,\n    convert::TryFrom,\n    fmt::{self, Debug, Display, Formatter},\n    num::ParseIntError,\n};\n\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(any(feature = \"testing\", test))]\nuse rand::{\n    distributions::{Distribution, Standard},\n    Rng,\n};\n#[cfg(feature = \"json-schema\")]\nuse schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};\nuse serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::{\n    bytesrepr,\n    bytesrepr::{Error, FromBytes},\n    checksummed_hex, AccessRights, ApiError, Key, ACCESS_RIGHTS_SERIALIZED_LENGTH,\n};\n\n/// The number of bytes in a [`URef`] address.\npub const UREF_ADDR_LENGTH: usize = 32;\n\n/// The number of bytes in a serialized [`URef`] where the [`AccessRights`] are not `None`.\npub const UREF_SERIALIZED_LENGTH: usize = UREF_ADDR_LENGTH + ACCESS_RIGHTS_SERIALIZED_LENGTH;\n\npub(super) const UREF_FORMATTED_STRING_PREFIX: &str = \"uref-\";\n\n/// The address of a `URef` (unforgeable reference) on the network.\npub type URefAddr = [u8; UREF_ADDR_LENGTH];\n\n/// Error while parsing a URef from a formatted string.\n#[derive(Debug)]\n#[non_exhaustive]\npub enum FromStrError {\n    /// Prefix is not \"uref-\".\n    InvalidPrefix,\n    /// No access rights as suffix.\n    MissingSuffix,\n    /// Access rights are invalid.\n    InvalidAccessRights,\n    /// Failed to decode address portion of URef.\n    Hex(base16::DecodeError),\n    /// Failed to parse an int.\n    Int(ParseIntError),\n    /// The address portion is the wrong length.\n    Address(TryFromSliceError),\n}\n\nimpl From<base16::DecodeError> for FromStrError {\n    fn from(error: base16::DecodeError) -> Self {\n        FromStrError::Hex(error)\n    }\n}\n\nimpl From<ParseIntError> for FromStrError {\n    fn from(error: ParseIntError) -> Self {\n        FromStrError::Int(error)\n    }\n}\n\nimpl From<TryFromSliceError> for FromStrError {\n    fn from(error: TryFromSliceError) -> Self {\n        FromStrError::Address(error)\n    }\n}\n\nimpl Display for FromStrError {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        match self {\n            FromStrError::InvalidPrefix => write!(f, \"prefix is not 'uref-'\"),\n            FromStrError::MissingSuffix => write!(f, \"no access rights as suffix\"),\n            FromStrError::InvalidAccessRights => write!(f, \"invalid access rights\"),\n            FromStrError::Hex(error) => {\n                write!(f, \"failed to decode address portion from hex: {}\", error)\n            }\n            FromStrError::Int(error) => write!(f, \"failed to parse an int: {}\", error),\n            FromStrError::Address(error) => {\n                write!(f, \"address portion is the wrong length: {}\", error)\n            }\n        }\n    }\n}\n\n/// Represents an unforgeable reference, containing an address in the network's global storage and\n/// the [`AccessRights`] of the reference.\n///\n/// A `URef` can be used to index entities such as [`CLValue`](crate::CLValue)s, or smart contracts.\n#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub struct URef(URefAddr, AccessRights);\n\nimpl URef {\n    /// Constructs a [`URef`] from an address and access rights.\n    pub const fn new(address: URefAddr, access_rights: AccessRights) -> Self {\n        URef(address, access_rights)\n    }\n\n    /// Returns the address of this [`URef`].\n    pub fn addr(&self) -> URefAddr {\n        self.0\n    }\n\n    /// Returns the access rights of this [`URef`].\n    pub fn access_rights(&self) -> AccessRights {\n        self.1\n    }\n\n    /// Returns a new [`URef`] with the same address and updated access rights.\n    #[must_use]\n    pub fn with_access_rights(self, access_rights: AccessRights) -> Self {\n        URef(self.0, access_rights)\n    }\n\n    /// Removes the access rights from this [`URef`].\n    #[must_use]\n    pub fn remove_access_rights(self) -> Self {\n        URef(self.0, AccessRights::NONE)\n    }\n\n    /// Returns `true` if the access rights are `Some` and\n    /// [`is_readable`](AccessRights::is_readable) is `true` for them.\n    #[must_use]\n    pub fn is_readable(self) -> bool {\n        self.1.is_readable()\n    }\n\n    /// Returns a new [`URef`] with the same address and [`AccessRights::READ`] permission.\n    #[must_use]\n    pub fn into_read(self) -> URef {\n        URef(self.0, AccessRights::READ)\n    }\n\n    /// Returns a new [`URef`] with the same address and [`AccessRights::WRITE`] permission.\n    #[must_use]\n    pub fn into_write(self) -> URef {\n        URef(self.0, AccessRights::WRITE)\n    }\n\n    /// Returns a new [`URef`] with the same address and [`AccessRights::ADD`] permission.\n    #[must_use]\n    pub fn into_add(self) -> URef {\n        URef(self.0, AccessRights::ADD)\n    }\n\n    /// Returns a new [`URef`] with the same address and [`AccessRights::READ_ADD_WRITE`]\n    /// permission.\n    #[must_use]\n    pub fn into_read_add_write(self) -> URef {\n        URef(self.0, AccessRights::READ_ADD_WRITE)\n    }\n\n    /// Returns a new [`URef`] with the same address and [`AccessRights::READ_WRITE`]\n    /// permission.\n    #[must_use]\n    pub fn into_read_write(self) -> URef {\n        URef(self.0, AccessRights::READ_WRITE)\n    }\n\n    /// Returns `true` if the access rights are `Some` and\n    /// [`is_writeable`](AccessRights::is_writeable) is `true` for them.\n    pub fn is_writeable(self) -> bool {\n        self.1.is_writeable()\n    }\n\n    /// Returns `true` if the access rights are `Some` and [`is_addable`](AccessRights::is_addable)\n    /// is `true` for them.\n    pub fn is_addable(self) -> bool {\n        self.1.is_addable()\n    }\n\n    /// Formats the address and access rights of the [`URef`] in a unique way that could be used as\n    /// a name when storing the given `URef` in a global state.\n    pub fn to_formatted_string(self) -> String {\n        // Extract bits as numerical value, with no flags marked as 0.\n        let access_rights_bits = self.access_rights().bits();\n        // Access rights is represented as octal, which means that max value of u8 can\n        // be represented as maximum of 3 octal digits.\n        format!(\n            \"{}{}-{:03o}\",\n            UREF_FORMATTED_STRING_PREFIX,\n            base16::encode_lower(&self.addr()),\n            access_rights_bits\n        )\n    }\n\n    /// Parses a string formatted as per `Self::to_formatted_string()` into a `URef`.\n    pub fn from_formatted_str(input: &str) -> Result<Self, FromStrError> {\n        let remainder = input\n            .strip_prefix(UREF_FORMATTED_STRING_PREFIX)\n            .ok_or(FromStrError::InvalidPrefix)?;\n        let parts = remainder.splitn(2, '-').collect::<Vec<_>>();\n        if parts.len() != 2 {\n            return Err(FromStrError::MissingSuffix);\n        }\n        let addr = URefAddr::try_from(checksummed_hex::decode(parts[0])?.as_ref())?;\n        let access_rights_value = u8::from_str_radix(parts[1], 8)?;\n        let access_rights = AccessRights::from_bits(access_rights_value)\n            .ok_or(FromStrError::InvalidAccessRights)?;\n        Ok(URef(addr, access_rights))\n    }\n\n    /// Removes specific access rights from this URef if present.\n    pub fn disable_access_rights(&mut self, access_rights: AccessRights) {\n        self.1.remove(access_rights);\n    }\n}\n\n#[cfg(feature = \"json-schema\")]\nimpl JsonSchema for URef {\n    fn schema_name() -> String {\n        String::from(\"URef\")\n    }\n\n    fn json_schema(gen: &mut SchemaGenerator) -> Schema {\n        let schema = gen.subschema_for::<String>();\n        let mut schema_object = schema.into_object();\n        schema_object.metadata().description = Some(String::from(\"Hex-encoded, formatted URef.\"));\n        schema_object.into()\n    }\n}\n\nimpl Display for URef {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        let addr = self.addr();\n        let access_rights = self.access_rights();\n        write!(\n            f,\n            \"URef({}, {})\",\n            base16::encode_lower(&addr),\n            access_rights\n        )\n    }\n}\n\nimpl Debug for URef {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        write!(f, \"{}\", self)\n    }\n}\n\nimpl bytesrepr::ToBytes for URef {\n    fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut result = bytesrepr::unchecked_allocate_buffer(self);\n        result.append(&mut self.0.to_bytes()?);\n        result.append(&mut self.1.to_bytes()?);\n        Ok(result)\n    }\n\n    fn serialized_length(&self) -> usize {\n        UREF_SERIALIZED_LENGTH\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), self::Error> {\n        writer.extend_from_slice(&self.0);\n        self.1.write_bytes(writer)?;\n        Ok(())\n    }\n}\n\nimpl FromBytes for URef {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> {\n        let (id, rem) = FromBytes::from_bytes(bytes)?;\n        let (access_rights, rem) = FromBytes::from_bytes(rem)?;\n        Ok((URef(id, access_rights), rem))\n    }\n}\n\nimpl Serialize for URef {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        if serializer.is_human_readable() {\n            self.to_formatted_string().serialize(serializer)\n        } else {\n            (self.0, self.1).serialize(serializer)\n        }\n    }\n}\n\nimpl<'de> Deserialize<'de> for URef {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        if deserializer.is_human_readable() {\n            let formatted_string = String::deserialize(deserializer)?;\n            URef::from_formatted_str(&formatted_string).map_err(D::Error::custom)\n        } else {\n            let (address, access_rights) = <(URefAddr, AccessRights)>::deserialize(deserializer)?;\n            Ok(URef(address, access_rights))\n        }\n    }\n}\n\nimpl TryFrom<Key> for URef {\n    type Error = ApiError;\n\n    fn try_from(key: Key) -> Result<Self, Self::Error> {\n        if let Key::URef(uref) = key {\n            Ok(uref)\n        } else {\n            Err(ApiError::UnexpectedKeyVariant)\n        }\n    }\n}\n\n#[cfg(any(feature = \"testing\", test))]\nimpl Distribution<URef> for Standard {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> URef {\n        URef::new(rng.gen(), rng.gen())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn uref_as_string() {\n        // Since we are putting URefs to named_keys map keyed by the label that\n        // `as_string()` returns, any changes to the string representation of\n        // that type cannot break the format.\n        let addr_array = [0u8; 32];\n        let uref_a = URef::new(addr_array, AccessRights::READ);\n        assert_eq!(\n            uref_a.to_formatted_string(),\n            \"uref-0000000000000000000000000000000000000000000000000000000000000000-001\"\n        );\n        let uref_b = URef::new(addr_array, AccessRights::WRITE);\n        assert_eq!(\n            uref_b.to_formatted_string(),\n            \"uref-0000000000000000000000000000000000000000000000000000000000000000-002\"\n        );\n\n        let uref_c = uref_b.remove_access_rights();\n        assert_eq!(\n            uref_c.to_formatted_string(),\n            \"uref-0000000000000000000000000000000000000000000000000000000000000000-000\"\n        );\n    }\n\n    fn round_trip(uref: URef) {\n        let string = uref.to_formatted_string();\n        let parsed_uref = URef::from_formatted_str(&string).unwrap();\n        assert_eq!(uref, parsed_uref);\n    }\n\n    #[test]\n    fn uref_from_str() {\n        round_trip(URef::new([0; 32], AccessRights::NONE));\n        round_trip(URef::new([255; 32], AccessRights::READ_ADD_WRITE));\n\n        let invalid_prefix =\n            \"ref-0000000000000000000000000000000000000000000000000000000000000000-000\";\n        assert!(URef::from_formatted_str(invalid_prefix).is_err());\n\n        let invalid_prefix =\n            \"uref0000000000000000000000000000000000000000000000000000000000000000-000\";\n        assert!(URef::from_formatted_str(invalid_prefix).is_err());\n\n        let short_addr = \"uref-00000000000000000000000000000000000000000000000000000000000000-000\";\n        assert!(URef::from_formatted_str(short_addr).is_err());\n\n        let long_addr =\n            \"uref-000000000000000000000000000000000000000000000000000000000000000000-000\";\n        assert!(URef::from_formatted_str(long_addr).is_err());\n\n        let invalid_hex =\n            \"uref-000000000000000000000000000000000000000000000000000000000000000g-000\";\n        assert!(URef::from_formatted_str(invalid_hex).is_err());\n\n        let invalid_suffix_separator =\n            \"uref-0000000000000000000000000000000000000000000000000000000000000000:000\";\n        assert!(URef::from_formatted_str(invalid_suffix_separator).is_err());\n\n        let invalid_suffix =\n            \"uref-0000000000000000000000000000000000000000000000000000000000000000-abc\";\n        assert!(URef::from_formatted_str(invalid_suffix).is_err());\n\n        let invalid_access_rights =\n            \"uref-0000000000000000000000000000000000000000000000000000000000000000-200\";\n        assert!(URef::from_formatted_str(invalid_access_rights).is_err());\n    }\n\n    #[test]\n    fn serde_roundtrip() {\n        let uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE);\n        let serialized = bincode::serialize(&uref).unwrap();\n        let decoded = bincode::deserialize(&serialized).unwrap();\n        assert_eq!(uref, decoded);\n    }\n\n    #[test]\n    fn json_roundtrip() {\n        let uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE);\n        let json_string = serde_json::to_string_pretty(&uref).unwrap();\n        let decoded = serde_json::from_str(&json_string).unwrap();\n        assert_eq!(uref, decoded);\n    }\n\n    #[test]\n    fn should_disable_access_rights() {\n        let mut uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE);\n        assert!(uref.is_writeable());\n        uref.disable_access_rights(AccessRights::WRITE);\n        assert_eq!(uref.access_rights(), AccessRights::READ_ADD);\n\n        uref.disable_access_rights(AccessRights::WRITE);\n        assert!(\n            !uref.is_writeable(),\n            \"Disabling access bit twice should be a noop\"\n        );\n\n        assert_eq!(uref.access_rights(), AccessRights::READ_ADD);\n\n        uref.disable_access_rights(AccessRights::READ_ADD);\n        assert_eq!(uref.access_rights(), AccessRights::NONE);\n\n        uref.disable_access_rights(AccessRights::READ_ADD);\n        assert_eq!(uref.access_rights(), AccessRights::NONE);\n\n        uref.disable_access_rights(AccessRights::NONE);\n        assert_eq!(uref.access_rights(), AccessRights::NONE);\n    }\n}\n"
  },
  {
    "path": "types/src/validator_change.rs",
    "content": "use crate::bytesrepr::{self, FromBytes, ToBytes};\n#[cfg(any(feature = \"testing\", test))]\nuse crate::testing::TestRng;\nuse alloc::vec::Vec;\n#[cfg(feature = \"datasize\")]\nuse datasize::DataSize;\n#[cfg(feature = \"json-schema\")]\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\n/// A change to a validator's status between two eras.\n#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Ord, PartialOrd)]\n#[cfg_attr(feature = \"json-schema\", derive(JsonSchema))]\n#[cfg_attr(feature = \"datasize\", derive(DataSize))]\npub enum ValidatorChange {\n    /// The validator got newly added to the validator set.\n    Added,\n    /// The validator was removed from the validator set.\n    Removed,\n    /// The validator was banned from this era.\n    Banned,\n    /// The validator was excluded from proposing new blocks in this era.\n    CannotPropose,\n    /// We saw the validator misbehave in this era.\n    SeenAsFaulty,\n}\n\nimpl ValidatorChange {\n    /// Returns a random `ValidatorChange`.\n    #[cfg(any(feature = \"testing\", test))]\n    pub fn random(rng: &mut TestRng) -> Self {\n        use rand::Rng;\n\n        match rng.gen_range(0..5) {\n            ADDED_TAG => ValidatorChange::Added,\n            REMOVED_TAG => ValidatorChange::Removed,\n            BANNED_TAG => ValidatorChange::Banned,\n            CANNOT_PROPOSE_TAG => ValidatorChange::CannotPropose,\n            SEEN_AS_FAULTY_TAG => ValidatorChange::SeenAsFaulty,\n            _ => unreachable!(),\n        }\n    }\n}\n\nconst ADDED_TAG: u8 = 0;\nconst REMOVED_TAG: u8 = 1;\nconst BANNED_TAG: u8 = 2;\nconst CANNOT_PROPOSE_TAG: u8 = 3;\nconst SEEN_AS_FAULTY_TAG: u8 = 4;\n\nimpl ToBytes for ValidatorChange {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        let mut buffer = bytesrepr::allocate_buffer(self)?;\n        self.write_bytes(&mut buffer)?;\n        Ok(buffer)\n    }\n\n    fn write_bytes(&self, writer: &mut Vec<u8>) -> Result<(), bytesrepr::Error> {\n        match self {\n            ValidatorChange::Added => ADDED_TAG,\n            ValidatorChange::Removed => REMOVED_TAG,\n            ValidatorChange::Banned => BANNED_TAG,\n            ValidatorChange::CannotPropose => CANNOT_PROPOSE_TAG,\n            ValidatorChange::SeenAsFaulty => SEEN_AS_FAULTY_TAG,\n        }\n        .write_bytes(writer)\n    }\n\n    fn serialized_length(&self) -> usize {\n        bytesrepr::U8_SERIALIZED_LENGTH\n    }\n}\n\nimpl FromBytes for ValidatorChange {\n    fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> {\n        let (tag, remainder) = u8::from_bytes(bytes)?;\n        let id = match tag {\n            ADDED_TAG => ValidatorChange::Added,\n            REMOVED_TAG => ValidatorChange::Removed,\n            BANNED_TAG => ValidatorChange::Banned,\n            CANNOT_PROPOSE_TAG => ValidatorChange::CannotPropose,\n            SEEN_AS_FAULTY_TAG => ValidatorChange::SeenAsFaulty,\n            _ => return Err(bytesrepr::Error::NotRepresentable),\n        };\n        Ok((id, remainder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::testing::TestRng;\n\n    #[test]\n    fn bytesrepr_roundtrip() {\n        let rng = &mut TestRng::new();\n\n        let val = ValidatorChange::random(rng);\n        bytesrepr::test_serialization_roundtrip(&val);\n    }\n}\n"
  },
  {
    "path": "types/tests/version_numbers.rs",
    "content": "#[cfg(feature = \"version-sync\")]\n#[test]\nfn test_html_root_url() {\n    version_sync::assert_html_root_url_updated!(\"src/lib.rs\");\n}\n"
  },
  {
    "path": "utils/accounts_toml/encode_account_toml.py",
    "content": "import argparse\nfrom pathlib import Path\ntry:\n    import toml\nexcept ImportError:\n    print(\"toml package not available.\")\n    print(\"Install with `python3 -m pip install toml`.\")\n    exit(1)\n\n# This file takes two .csv files and encodes into one accounts.toml.\n# accounts.csv    <public_key_hex>,<balance>,<bonded amt>,<delegation rate>\n# delegators.csv  <delegator_key_hex>,<balance>,<validator_key_hex>,<delegation_amt>\n\n\ndef parse_args():\n\n    def file_path(path):\n        if not Path(path).is_file():\n            raise argparse.ArgumentTypeError(f'{path} is not a valid file.')\n        if not Path(path).exists():\n            raise argparse.ArgumentTypeError(f'{path} does not exist.')\n        return Path(path)\n\n    def valid_dir(path):\n        if not Path(path).is_dir():\n            raise argparse.ArgumentTypeError(f'{path} is not a valid directory.')\n        if not Path(path).exists():\n            raise argparse.ArgumentTypeError(f'{path} does not exist.')\n        return Path(path)\n\n    parser = argparse.ArgumentParser(description='Convert csv files to accounts.toml.')\n    parser.add_argument('-a', type=file_path, help=\"Path to accounts.csv file\", required=True)\n    parser.add_argument('-d', type=file_path, help=\"Path to delegators.csv file\", required=False)\n\n    return parser.parse_args()\n\n\ndef load_accounts_csv(csv_path: Path):\n    accounts = []\n    for line in csv_path.read_text().strip().splitlines():\n        columns = line.split(',')\n        if len(columns) != 4:\n            print(f\"Expected 4 pieces of data: {line}\")\n            exit(1)\n        account_dict = {\"public_key\": columns[0],\n                        \"balance\": columns[1]}\n        if columns[2] != \"\" or columns[3] != \"\":\n            account_dict[\"validator\"] = {\"bonded_amount\": columns[2],\n                                         \"delegation_rate\": int(columns[3])}\n        accounts.append(account_dict)\n    return accounts\n\n\ndef load_delegators_csv(csv_path: Path):\n    delegators = []\n    for line in csv_path.read_text().strip().splitlines():\n        columns = line.split(',')\n        if len(columns) != 4:\n            print(f\"Expected 4 pieces of data: {line}\")\n            exit(1)\n        delegators.append({\"delegator_public_key\": columns[0],\n                           \"balance\": columns[1],\n                           \"validator_public_key\": columns[2],\n                           \"delegated_amount\": columns[3]})\n    return delegators\n\n\nif __name__ == '__main__':\n    args = parse_args()\n\n    doc_dict = {\"accounts\": load_accounts_csv(args.a)}\n    if args.d:\n        doc_dict[\"delegators\"] = load_delegators_csv(args.d)\n\n    toml_output = toml.dumps(doc_dict)\n    print(toml_output)\n"
  },
  {
    "path": "utils/accounts_toml/test_validate.py",
    "content": "from validate import is_valid_key\n\ngood_keys = \"\"\"01c1beefebddc0179d55899d689ab2fb7083fab9ee59070a98c941f80798a57680\n01f2f5492263be29234647f026c69861aad349afed181b603fc2b6cf837901a231\n01b04a13decfaab9fddadafc44ff735233a848dd38b38482a394a07cc4ed819e6b\n020317bba9fc02817f9c84e83028bb3ffe167fdd1e27eaaa9526521ba1308303b923\n0203f075a57445b293587aa6742f94e4b1aaf344cf6f8c87797ddbb6fe1a6df7daef\n020322a29bb39b43e8100b9e4aa467e067af612ffc4b1f5b48b4053b8c35a5dc45c1\"\"\".splitlines()\n\nbad_keys = \"\"\"039f63d614f098be9bd30265d5e4c00767afb49e8a3dd6237b8becf6ba5245744b\n040317bba9fc02817f9c84e83028bb3ffe167fdd1e27eaaa9526521ba1308303b923\n0168f4f25gf641623277e47613e7bec1ec384bb0debfd1c33c91714fb350567526\n019f204cb4f04382745ffd327954ab3e80dbbd71b3ef2e98d15cf8af5837eadcb\n0203f075a57445b293587aa6742f94e4b1aaf344cf6f8c87797ddbb6fe1a6df7daef2\n0203f075a57445b293587aa6742f94e4b1aaf344cf6f8c87797ddbb6fe1a6df7df2\"\"\".splitlines()\n\nfor key in good_keys:\n    assert(is_valid_key(key))\n\nfor key in bad_keys:\n    assert(not is_valid_key(key))\n"
  },
  {
    "path": "utils/accounts_toml/validate.py",
    "content": "\n\ndef is_valid_key(key_hex: str) -> bool:\n    key_length = {\"01\": 66,\n                  \"02\": 68}\n    start = key_hex[:2]\n    try:\n        length = key_length[start]\n        if len(key_hex) != length:\n            return False\n        _ = int(key_hex, 16)\n    except KeyError:\n        # Key doesn't start with \"01\" or \"02\"\n        return False\n    except ValueError:\n        # Key isn't hex as int() failed\n        return False\n    return True\n"
  },
  {
    "path": "utils/accounts_toml/validate_account_toml.py",
    "content": "import argparse\nfrom pathlib import Path\ntry:\n    import toml\nexcept ImportError:\n    print(\"toml package not available.\")\n    print(\"Install with `python3 -m pip install toml`.\")\n    exit(1)\nfrom validate import is_valid_key\n\n\ndef parse_args():\n\n    def file_path(path):\n        if not Path(path).is_file():\n            raise argparse.ArgumentTypeError(f'{path} is not a valid file.')\n        if not Path(path).exists():\n            raise argparse.ArgumentTypeError(f'{path} does not exist.')\n        return Path(path)\n\n    parser = argparse.ArgumentParser(description='Convert csv files to accounts.toml.')\n    parser.add_argument('accounts_toml_path', type=file_path, help=\"Path to accounts.toml file\")\n    return parser.parse_args()\n\n\ndef is_int(value):\n    try:\n        _ = int(value)\n    except ValueError:\n        return False\n    else:\n        return True\n\n\ndef is_valid(toml_obj):\n    is_good = True\n    for key in toml_obj.keys():\n        if key not in (\"accounts\", \"delegators\"):\n            print(f\"ERR: Unknown root level group: {key}\")\n            is_good = False\n    accounts = toml_obj.get(\"accounts\", None)\n    delegators = toml_obj.get(\"delegators\", None)\n    if accounts is None:\n        print(\"ERR: No accounts found.\")\n        is_good = False\n    if delegators is None:\n        print(\"WARN: No delegators found.\")\n\n    account_keys = []\n    validator_keys = []\n    total_amt = 0\n    for account in accounts:\n        account_key = account.get(\"public_key\", None)\n        account_balance = account.get(\"balance\", None)\n        validator = account.get(\"validator\", None)\n        if validator is None:\n            account_bonded_amount, account_delegation_rate = None, None\n        else:\n            account_bonded_amount = validator.get(\"bonded_amount\", None)\n            account_delegation_rate = validator.get(\"delegation_rate\", None)\n\n        if account_key is None:\n            print(\"ERR: Missing accounts.public_key\")\n            is_good = False\n        elif not is_valid_key(account_key):\n            print(f\"ERR: Invalid accounts.public_key: {account_key}\")\n            is_good = False\n        elif account_key in account_keys:\n            print(f\"ERR: Duplicated account_key: {account_key}\")\n            is_good = False\n        else:\n            account_keys.append(account_key)\n\n        if account_balance is None:\n            print(\"ERR: Missing accounts.balance\")\n            is_good = False\n        elif not is_int(account_balance):\n            print(f\"ERR: Invalid accounts.balance: {account_balance} for {account_key}\")\n            is_good = False\n        else:\n            total_amt += int(account_balance)\n\n        if account_bonded_amount is not None or account_delegation_rate is not None:\n            validator_keys.append(account_key)\n\n            if account_bonded_amount is None:\n                print(f\"ERR: No bonded_amount with delegation_rate for {account_key}\")\n                is_good = False\n            elif not is_int(account_bonded_amount):\n                print(f\"ERR: Invalid bonded_amount for {account_key}\")\n                is_good = False\n            else:\n                total_amt += int(account_bonded_amount)\n\n            if account_delegation_rate is None:\n                print(f\"ERR: No delegation_rate with bonded_amount for {account_key}\")\n                is_good = False\n            elif not is_int(account_delegation_rate):\n                print(f\"ERR: Invalid delegation_rate for {account_key}\")\n                is_good = False\n            elif int(account_delegation_rate) < 0 or int(account_delegation_rate) > 100:\n                print(f\"ERR: delegation_rate not between 0 and 100 for {account_key}\")\n                is_good = False\n\n    delegator_keys = []\n    for delegator in delegators:\n        validator_key = delegator.get(\"validator_public_key\", None)\n        delegator_key = delegator.get(\"delegator_public_key\", None)\n        balance = delegator.get(\"balance\", None)\n        delegated_amount = delegator.get(\"delegated_amount\", None)\n\n        if validator_key is None:\n            print(f\"ERR: Missing validator_key for delegator_key: {delegator_key}\")\n            is_good = False\n        elif not is_valid_key(validator_key):\n            print(f\"ERR: Invalid validator_key: {validator_key}\")\n            is_good = False\n        elif validator_key not in validator_keys:\n            print(f\"ERR: Cannot delegate to unknown validator_key: {validator_key}\")\n            is_good = False\n\n        if delegator_key is None:\n            print(f\"ERR: Missing delegator_key with validator_key: {validator_key}\")\n            is_good = False\n        elif not is_valid_key(delegator_key):\n            print(f\"ERR: Invalid delegator_key: {delegator_key}\")\n            is_good = False\n        elif delegator_key in delegator_keys:\n            print(f\"ERR: Duplicate delegator_key: {delegator_key}\")\n            is_good = False\n        else:\n            delegator_keys.append(delegator_key)\n\n        if balance is None:\n            print(f\"ERR: No balance with delegator_key: {delegator_key}\")\n            is_good = False\n        elif not is_int(balance):\n            print(f\"ERR: Invalid balance with delegator_key: {delegator_key}\")\n            is_good = False\n        else:\n            total_amt += int(balance)\n\n        if delegated_amount is None:\n            print(f\"ERR: No delegated_amount with delegator_key: {delegator_key}\")\n            is_good = False\n        elif not is_int(delegated_amount):\n            print(f\"ERR: Invalid delegated_amount with delegator_key: {delegator_key}\")\n            is_good = False\n        else:\n            total_amt += int(delegated_amount)\n\n    if is_good:\n        print(f\"Total Amt: {total_amt}\")\n    return is_good\n\n\nif __name__ == '__main__':\n    args = parse_args()\n\n    toml_obj = toml.load(args.accounts_toml_path)\n    if is_valid(toml_obj):\n        print(f\"{args.accounts_toml_path} is valid.\")\n\n"
  },
  {
    "path": "utils/casper-tool/casper-tool.py",
    "content": "#!/usr/bin/env python3\n\nfrom datetime import datetime, timedelta, timezone\nimport os\nimport subprocess\n\nimport click\nimport shutil\nimport toml\n\n#: List of WASM blobs required to be set up in chainspec.\nCONTRACTS = [\"mint\", \"pos\", \"standard_payment\", \"auction\"]\n\n\n#: Relative directory to be appended to basedir in case WASM dir is not specified.\nDEFAULT_WASM_SUBDIR = [\"target\", \"wasm32-unknown-unknown\", \"release\"]\n\n\n#: The port the node is reachable on.\nNODE_PORT = 34553\n\n\n@click.group()\n@click.option(\n    \"-b\",\n    \"--basedir\",\n    help=\"casper-node source code base directory\",\n    type=click.Path(exists=True, dir_okay=True, file_okay=False, readable=True),\n    default=os.path.join(os.path.dirname(__file__), \"..\", \"..\"),\n)\n@click.option(\n    \"--casper-client\",\n    help=\"path to casper client binary (compiled from basedir by default)\",\n    type=click.Path(exists=True, dir_okay=False, readable=True),\n)\n@click.option(\n    \"-p\",\n    \"--production\",\n    is_flag=True,\n    help=\"Use production chainspec template instead of dev/local\",\n)\n@click.option(\n    \"-c\",\n    \"--config-template\",\n    type=click.Path(exists=True, dir_okay=False, readable=True),\n    help=\"Node configuration template to use\",\n)\n@click.option(\n    \"-C\",\n    \"--chainspec-template\",\n    type=click.Path(exists=True, dir_okay=False, readable=True),\n    help=\"Chainspec template to use\",\n)\n@click.option(\n    \"-w\",\n    \"--wasm-dir\",\n    type=click.Path(exists=True, dir_okay=False, readable=True),\n    help=\"directory containing compiled wasm contracts (defaults to `BASEDIR/{}`\".format(\n        os.path.join(*DEFAULT_WASM_SUBDIR)\n    ),\n)\n@click.pass_context\ndef cli(\n    ctx,\n    basedir,\n    production,\n    chainspec_template,\n    config_template,\n    wasm_dir,\n    casper_client,\n):\n    \"\"\"Casper Network creation tool\n\n    Can be used to create new casper-labs chains with automatic validator setups. Useful for testing.\"\"\"\n    obj = {}\n    if chainspec_template:\n        obj[\"chainspec_template\"] = chainspec_template\n    elif production:\n        obj[\"chainspec_template\"] = os.path.join(\n            basedir, \"resources\", \"production\", \"chainspec.toml\"\n        )\n    else:\n        obj[\"chainspec_template\"] = os.path.join(\n            basedir, \"resources\", \"local\", \"chainspec.toml.in\"\n        )\n    obj[\"wasm_dir\"] = wasm_dir or os.path.join(basedir, *DEFAULT_WASM_SUBDIR)\n\n    if config_template:\n        obj[\"config_template\"] = chainspec_template\n    elif production:\n        obj[\"config_template\"] = os.path.join(\n            basedir, \"resources\", \"production\", \"config.toml\"\n        )\n    else:\n        obj[\"config_template\"] = os.path.join(\n            basedir, \"resources\", \"local\", \"config.toml\"\n        )\n\n    if casper_client:\n        obj[\"casper_client_argv0\"] = [casper_client]\n    else:\n        obj[\"casper_client_argv0\"] = [\n            \"cargo\",\n            \"run\",\n            \"--quiet\",\n            \"--manifest-path={}\".format(os.path.join(basedir, \"client\", \"Cargo.toml\")),\n            \"--\",\n        ]\n\n    ctx.obj = obj\n    return\n\n\n@cli.command(\"create-network\")\n@click.pass_obj\n@click.argument(\"target-path\", type=click.Path(exists=False, writable=True))\n@click.option(\n    \"-n\",\n    \"--network-name\",\n    help=\"The network name (also set in chainspec), defaults to output directory name\",\n)\n@click.option(\n    \"-g\",\n    \"--genesis-in\",\n    help=\"Number of seconds from now until Genesis\",\n    default=300,\n    type=int,\n)\n@click.option(\n    \"-c/-C\",\n    \"--cluster/--no-cluster\",\n    help=\"Setup networking suitable for cluster operation (default: enabled).\",\n    default=True,\n    is_flag=True,\n)\n@click.option(\n    \"-N\", \"--number-of-nodes\", help=\"Number of nodes to create data for\", default=5\n)\n@click.option(\n    \"-d\",\n    \"--discovery-strategy\",\n    help=\"The discovery strategy to use (only valid with `--cluster`)\",\n    default=\"root\",\n    type=click.Choice([\"root\"]),\n)\ndef create_network(\n    obj,\n    target_path,\n    network_name,\n    genesis_in,\n    number_of_nodes,\n    cluster,\n    discovery_strategy,\n):\n    if network_name is None:\n        network_name = os.path.basename(target_path)\n\n    # Create the network output directories.\n    show_val(\"Output path\", target_path)\n    os.mkdir(target_path)\n    chain_path = os.path.join(target_path, \"chain\")\n    os.mkdir(chain_path)\n\n    # Prepare paths and copy over all contracts.\n    show_val(\"WASM contracts\", obj[\"wasm_dir\"])\n    contract_paths = {}\n    for contract in CONTRACTS:\n        key = \"{}_installer_path\".format(contract)\n        basename = \"{}.wasm\".format(contract)\n        source = os.path.join(obj[\"wasm_dir\"], \"{}_install.wasm\".format(contract))\n        target = os.path.join(chain_path, \"{}.wasm\".format(contract))\n        shutil.copy(source, target)\n\n        # We use relative paths when creating a self-contained network.\n        contract_paths[contract] = basename\n\n    # Update chainspec values.\n    chainspec = create_chainspec(\n        obj[\"chainspec_template\"], network_name, genesis_in, contract_paths\n    )\n\n    chainspec_path = os.path.join(chain_path, \"chainspec.toml\")\n    toml.dump(chainspec, open(chainspec_path, \"w\"))\n    show_val(\"Chainspec\", chainspec_path)\n\n    # Setup each node, collecting all pubkey hashes.\n    show_val(\"Node config template\", obj[\"config_template\"])\n    show_val(\"Number of nodes\", number_of_nodes)\n    show_val(\"Discovery strategy\", discovery_strategy)\n    pubkeys = {}\n    for n in range(number_of_nodes):\n        if discovery_strategy == \"root\":\n            known_nodes = [0]\n        else:\n            raise ValueError(\n                \"unknown discovery strategy: {}\".format(discovery_strategy)\n            )\n\n        node_path = os.path.join(target_path, \"node-{}\".format(n))\n        os.mkdir(node_path)\n        pubkey_hex = create_node(\n            n,\n            obj[\"casper_client_argv0\"],\n            network_name,\n            obj[\"config_template\"],\n            node_path,\n            cluster,\n            known_nodes,\n        )\n        pubkeys[n] = pubkey_hex\n\n    accounts_path = os.path.join(chain_path, \"accounts.toml\")\n    show_val(\"accounts file\", accounts_path)\n    create_accounts_toml(open(accounts_path, \"w\"), pubkeys)\n\n\ndef create_chainspec(template, network_name, genesis_in, contract_paths):\n    \"\"\"Creates a new chainspec from a template.\n\n    `contract_path` must be a dictionary mapping the keys of `CONTRACTS` to relative or absolute\n    paths to be put into the new chainspec.\n\n    Returns a dictionary that can be serialized using `toml`.\n    \"\"\"\n    show_val(\"Chainspec template\", template)\n    chainspec = toml.load(open(template))\n\n    show_val(\"Chain name\", network_name)\n    genesis_timestamp = (datetime.now(timezone.utc).replace(tzinfo=None) + timedelta(seconds=genesis_in)).isoformat(\n        \"T\"\n    ) + \"Z\"\n\n    # Update the chainspec.\n    show_val(\"Genesis\", \"{} (in {} seconds)\".format(genesis_timestamp, genesis_in))\n    chainspec[\"protocol\"][\"activation_point\"] = genesis_timestamp\n    chainspec[\"network\"][\"name\"] = network_name\n\n    # Setup WASM contracts.\n    for contract in CONTRACTS:\n        key = \"{}_installer_path\".format(contract)\n        chainspec[\"network\"][key] = contract_paths[contract]\n\n    return chainspec\n\n\ndef create_node(\n    n, client_argv0, network_name, config_template, node_path, cluster, known_nodes\n):\n    \"\"\"Create a node configuration inside a network.\n\n    Paths are assumed to be set up using `create_chainspec`.\n\n    Returns the nodes public key as a string.\"\"\"\n\n    # Generate a key\n    key_path = os.path.join(node_path, \"keys\")\n    run_client(client_argv0, \"keygen\", key_path)\n\n    config = toml.load(open(config_template))\n    config[\"node\"][\"chainspec_config_path\"] = \"../chain/chainspec.toml\"\n\n    config[\"consensus\"][\"secret_key_path\"] = os.path.join(\n        os.path.relpath(key_path, node_path), \"secret_key.pem\"\n    )\n\n    # All the different state/storage paths\n    config[\"storage\"][\"path\"] = \"state/storage\"\n    config[\"consensus\"][\"unit_hashes_folder\"] = \"state/unit_hashes\"\n\n    config[\"logging\"][\"format\"] = \"json\"\n\n    # Cluster-specific configuration\n    if cluster:\n        # Set the public address to `casper-node-XX`, which will resolve to the internal\n        # network IP, and use the automatic port detection by setting `:0`.\n        config[\"network\"][\"public_address\"] = \"casper-node-{}:{}\".format(n, NODE_PORT)\n        config[\"network\"][\"bind_address\"] = \"casper-node-{}:{}\".format(n, NODE_PORT)\n\n        config[\"network\"][\"known_addresses\"] = [\n            \"casper-node-{}.casper-node.casper-{}:{}\".format(n, network_name, NODE_PORT)\n            for n in known_nodes\n        ]\n\n        # Setup for volume operation.\n        config[\"storage\"][\"path\"] = \"/storage\"\n        config[\"consensus\"][\"unit_hashes_folder\"] = \"/storage\"\n\n    toml.dump(config, open(os.path.join(node_path, \"config.toml\"), \"w\"))\n\n    return open(os.path.join(key_path, \"public_key_hex\")).read().strip()\n\n\ndef create_accounts_toml(output_file, pubkeys):\n    items = list(pubkeys.items())\n    items.sort()\n\n    accounts = []\n    for id, key_hex in items:\n        motes = 1000000000000000\n        weight = 10000000000000\n        account = {\n            'public_key': key_hex,\n            'balance': f'{motes}',\n            'bonded_amount': f'{weight}'\n        }\n        accounts += [account]\n\n    toml.dump(accounts, output_file)\n\n\ndef run_client(argv0, *args):\n    \"\"\"Run the casper client, compiling it if necessary, with the given command-line args\"\"\"\n    return subprocess.check_output(argv0 + list(args))\n\n\ndef show_val(key, value):\n    \"\"\"Auxiliary function to display a value on the terminal.\"\"\"\n\n    key = \"{:>20s}\".format(key)\n    click.echo(\"{}:  {}\".format(click.style(key, fg=\"blue\"), value))\n\n\nif __name__ == \"__main__\":\n    cli()\n"
  },
  {
    "path": "utils/dump-cpu-features.sh",
    "content": "#!/bin/sh\n\n# Dumps a list of X86 extensions found to be in use by the given binary, in alphabetical order.\n\nset -e\n\nif [ $# -ne 1 ]; then\n  echo \"usage: $(basename $0) binary\"\nfi;\n\nBINARY=$1\n\nexport PATH=\"$HOME/.cargo/bin:$PATH\"\n\nelfx86exts $BINARY | grep -v 'CPU Generation' | cut -f1 -d ' ' | sort\n"
  },
  {
    "path": "utils/global-state-update-gen/Cargo.toml",
    "content": "[package]\nname = \"global-state-update-gen\"\nversion = \"0.3.0\"\nauthors = [\"Bartłomiej Kamiński <bart@casperlabs.io>\"]\nedition = \"2021\"\nlicense-file = \"../../LICENSE\"\ndescription = \"A tool used to make changes to casper-node's global state\"\nreadme = \"README.md\"\n\n[dependencies]\nitertools = \"0.10.3\"\nbase16 = \"0.2.1\"\nbase64 = \"0.13\"\ncasper-engine-test-support = { path = \"../../execution_engine_testing/test_support\" }\ncasper-execution-engine = { path = \"../../execution_engine\" }\ncasper-storage = { path = \"../../storage\" }\ncasper-types = { path = \"../../types\" }\nclap = \"2.33\"\nlmdb-rkv = \"0.14\"\nrand = \"0.8\"\nserde = \"1\"\ntoml = \"0.5\"\n\n[package.metadata.deb]\nrevision = \"0\"\ndepends = \"$auto\"\nassets = [\n    # binary\n    [\"../../target/release/global-state-update-gen\", \"/usr/bin/\", \"755\"],\n]\n\n[package.metadata.deb.variants.bionic]\nname = \"global-state-update-gen\"\nrevision = \"0+bionic\"\n\n[package.metadata.deb.variants.focal]\nname = \"global-state-update-gen\"\nrevision = \"0+focal\"\n"
  },
  {
    "path": "utils/global-state-update-gen/README.md",
    "content": "# global-state-update-gen\n\nIf the network experiences a catastrophic failure, it might become impossible to make changes to the global state required for fixing the situation via normal channels (i.e. executing deploys on the network), and we might instead need to resort to social consensus outside the blockchain and applying the changes manually. This tool facilitates generating files specifying such changes, which can then be applied during an emergency upgrade.\n\nThe tool consists of 1 main subcommand and 3 legacy subcommands:\n- `generic` - a generic update based on a config file,\n- `change-validators` (legacy) - updating the set of validators on the network,\n- `balances` (legacy) - performing some transfers between accounts,\n- `migrate-into-system-contract-registry` (legacy) - this was a single-use subcommand intended to introduce some changes to the system structures in the global state that couldn't be made otherwise.\n\n## A detailed description of the subcommands\n\nAll subcommands share 3 parameters:\n\n- `-h`, `--help` - prints help information about the subcommand,\n- `-d`, `--data-dir` - path to the data directory of a node, containing its storage and global state database,\n- `-s`, `--state-hash` - the root hash of the global state to be used as the base for the update - usually the state root hash from the last block before the planned upgrade.\n\n### `generic`\n\nUsage: `global-state-update-gen generic -d DATA-DIRECTORY -s STATE-ROOT-HASH CONFIG-FILE`\n\nThe config file should be a TOML file, which can contain the following values:\n\n```toml\n# can be true or false, optional, false if not present; more detailed description below\n# *must* be listed before all [[accounts]] and [[transfers]] entries\nonly_listed_validators = false\n\n# can be true or false, optional, false if not present; more detailed description below\n# *must* be listed before all [[accounts]] and [[transfers]] entries\nslash_instead_of_unbonding = false\n\n# multiple [[accounts]] definitions are possible\n[[accounts]]\npublic_key = \"...\" # the public key of the account owner\nbalance = \"...\"    # account balance, in motes (optional)\n\n# if the account is supposed to be validator, define the section below\n[accounts.validator]\nbonded_amount = \"...\"      # the staked amount for this account, in motes\ndelegation_rate = ...      # the delegation rate for this validator (optional)\n\n# define delegators as entries in accounts.validator.delegators\n# multiple definitions per validator are possible\n[[accounts.validator.delegators]]\npublic_key = \"...\"         # the delegator's public key\ndelegated_amount = \"...\"   # the amount delegated to the validator, in motes\n\n# multiple [[transfers]] definitions are possible\n[[transfers]]\nfrom = \"account-hash-...\" # the account hash to transfer funds from\nto = \"account-hash-...\"   # the account hash to transfer funds to\namount = \"...\"            # the amount to be transferred, in motes\n```\n\nThe `[[accounts]]` definitions control the balances and stakes of accounts on the network. It is possible to change the set of validators using these definitions, by changing the staked amounts.\n\nFor every such definition, if the `balance` key is present, the balance of the account will be updated. The account will be created if it didn't exist previously. If the `balance` key is not present, the pre-existing balance (if any) will be left untouched.\n\nUpdating the validator properties (stake, delegators) behaves differently based on the value of `only_listed_validators`. If it is false, the existing list of validators is treated as a base, and validator properties are modified based on the entries in the config. If the `validator` key is present, the stake and delegators are set to the configured values. If it is not present, the pre-existing properties are left untouched.\n\nIf `only_listed_validators` is true, pre-existing validators are discarded, and only the accounts with non-zero stakes configured in the config file will be validators after the update. This option exists to match the behavior of the legacy `change-validators` subcommand and to cater to some use cases in testing.\n\nIf `slash_instead_of_unbonding` is true, pre-existing validators which are being discarded and their delegators have their staked amounts slashed rather than unbonded.\n\nSo, for example, if the network has 100 validators and we want to only change the stake of a single one:\n- with `only_listed_validators` set to false, we need only a single `[[accounts]]` entry for the validator we want to change,\n- with `only_listed_validators` set to true, we need 100 `[[accounts]]` entries, one per each account that is supposed to be a validator after the upgrade.\n\nOn the other hand, replacing 100 validators with 5 different ones (a use case in testing setups) would require:\n- just 5 entries for the new validators if `only_listed_validators` is true,\n- 105 entries - 100 to remove the old validators, and 5 to add the new ones - if `only_listed_validators` is false.\n\nThe `[[transfers]]` definitions simply transfer funds from one account to another. Every definition requires a source account, a target account and an amount to be defined. If the source account doesn't contain enough funds for the transfer, it won't be executed. If the target account doesn't exist, it will be created.\n\n**Note:** transfers are executed before the `[[accounts]]` definitions. This means that it is possible to overwrite the effects of a transfer if the source or target account is also specified among `[[accounts]]`.\n\nAfter the transfers have been executed and account balances and stakes have been updated, the tool also updates the auction contract state in the form of the snapshot of validators sets for the next few eras, as well as bids and withdraws:\n- bids of accounts with zero stake are set to empty, and bids of accounts with nonzero stake are created or updated to reflect the configured amounts,\n- if `only_listed_validators` is true, any bid larger than the smallest stake among the new set of validators is reset to zero,\n- the withdraws of validators that are being removed from the set are cancelled.\n\nThe tool also takes care to update the total supply in the network to reflect the changes in balances resulting from the configured modifications to the state.\n\n### Legacy commands\n\n#### `change-validators`\n\nUsage: `global-state-update-gen change-validators -d DATA-DIRECTORY -s STATE-ROOT-HASH -v VALIDATOR-KEY,STAKE,BALANCE -v VALIDATOR-KEY,STAKE,BALANCE ...`\n\nApart from the common `-d` and `-s` parameters, the subcommand has one additional parameter, `-v` or `--validator`. Multiple such parameters can be supplied. Also note that the third field, `BALANCE`, is optional (ie., the definition can be just `-v VALIDATOR-KEY,STAKE`).\n\nEvery `-v` instance configures a single validator to be included in the set after the upgrade. A `-v KEY,STAKE,BALANCE` corresponds to an `[[accounts]]` entry in the config file:\n\n```toml\n[[accounts]]\npublic_key = \"KEY\"\nbalance = \"BALANCE\"\n\n[accounts.validator]\nbonded_amount = \"STAKE\"\n```\n\nThe command as a whole works just like a config file with only `[[accounts]]` entries, `only_listed_validators` set to `true` and `slash_instead_of_unbonding` set to `false` or omitted.\n\n#### `balances`\n\nUsage: `global-state-update-gen balances -d DATA_DIRECTORY -s STATE_ROOT_HASH -f FROM-ACCOUNT -t TO-ACCOUNT -a AMOUNT`\n\nThis functions exactly like a config file with just a single transfer configured:\n\n```toml\n[[transfers]]\nfrom = \"FROM-ACCOUNT\"\nto = \"TO-ACCOUNT\"\namount = \"AMOUNT\"\n```\n\n#### `migrate-into-system-contract-registry`\n\nUsage: `global-state-update-gen migrate-into-system-contract-registry -d DATA_DIRECTORY -s STATE_ROOT_HASH`\n\nThis subcommand doesn't take any additional parameters. It adds a registry of system contracts to the global state, based either on the data contained within the state (if `-s` is present), or based on the protocol data in storage (if `-s` is not present).\n\nIt has been used to add the registry to the global state during the upgrade to 1.4.0 and will most likely never be needed again.\n"
  },
  {
    "path": "utils/global-state-update-gen/src/admins.rs",
    "content": "use casper_engine_test_support::LmdbWasmTestBuilder;\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION;\nuse casper_types::{\n    account::Account, bytesrepr::ToBytes, contracts::NamedKeys, system::mint, AccessRights,\n    AsymmetricType, CLTyped, CLValue, EntityAddr, Key, PublicKey, StoredValue, URef, U512,\n};\nuse clap::ArgMatches;\nuse rand::Rng;\n\nuse crate::utils::{hash_from_str, print_entry};\n\nconst DEFAULT_MAIN_PURSE_ACCESS_RIGHTS: AccessRights = AccessRights::READ_ADD_WRITE;\n\nfn create_purse() -> URef {\n    URef::new(rand::thread_rng().gen(), DEFAULT_MAIN_PURSE_ACCESS_RIGHTS)\n}\n\nfn make_stored_clvalue<T: CLTyped + ToBytes>(value: T) -> StoredValue {\n    let cl = CLValue::from_t(value).unwrap();\n    StoredValue::CLValue(cl)\n}\n\npub(crate) fn generate_admins(matches: &ArgMatches<'_>) {\n    let data_dir = matches.value_of(\"data_dir\").unwrap_or(\".\");\n    let state_hash = matches.value_of(\"hash\").unwrap();\n\n    // Open the global state that should be in the supplied directory.\n    let post_state_hash = hash_from_str(state_hash);\n    let test_builder = LmdbWasmTestBuilder::open_raw(\n        data_dir,\n        Default::default(),\n        DEFAULT_PROTOCOL_VERSION,\n        post_state_hash,\n    );\n\n    let admin_values = matches.values_of(\"admin\").expect(\"at least one argument\");\n    let protocol_version = DEFAULT_PROTOCOL_VERSION;\n    let mut total_supply = test_builder.total_supply(protocol_version, Some(post_state_hash));\n    let total_supply_before = total_supply;\n\n    for value in admin_values {\n        let mut fields = value.split(',').peekable();\n        let field1 = fields.next().unwrap();\n        let field2 = fields.next().unwrap();\n        if fields.peek().is_some() {\n            panic!(\"correct syntax for --admin parameter is [PUBLIC_KEY,BALANCE]\")\n        }\n        let pub_key = PublicKey::from_hex(field1.as_bytes()).expect(\"valid public key\");\n        let balance = U512::from_dec_str(field2).expect(\"valid balance amount\");\n\n        let main_purse = create_purse();\n\n        let purse_balance_key = Key::Balance(main_purse.addr());\n        let purse_balance_value = make_stored_clvalue(balance);\n        print_entry(&purse_balance_key, &purse_balance_value);\n\n        let purse_uref_key = Key::URef(main_purse);\n        let purse_uref_value = make_stored_clvalue(());\n        print_entry(&purse_uref_key, &purse_uref_value);\n\n        let account_key = Key::Account(pub_key.to_account_hash());\n        let account_value = {\n            let account = {\n                let account_hash = pub_key.to_account_hash();\n                let named_keys = NamedKeys::default();\n                Account::create(account_hash, named_keys, main_purse)\n            };\n            StoredValue::Account(account)\n        };\n        print_entry(&account_key, &account_value);\n\n        total_supply = total_supply.checked_add(balance).expect(\"no overflow\");\n    }\n\n    if total_supply == total_supply_before {\n        // Don't update total supply if it did not change\n        return;\n    }\n\n    println!(\n        \"# total supply increases from {} to {}\",\n        total_supply_before, total_supply\n    );\n\n    let total_supply_key = {\n        let mint_contract_hash = test_builder.get_mint_contract_hash();\n\n        let mint_named_keys =\n            test_builder.get_named_keys(EntityAddr::new_system(mint_contract_hash.value()));\n\n        mint_named_keys\n            .get(mint::TOTAL_SUPPLY_KEY)\n            .cloned()\n            .expect(\"valid key in mint named keys\")\n    };\n    let total_supply_value = make_stored_clvalue(total_supply);\n    print_entry(&total_supply_key, &total_supply_value);\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/balances.rs",
    "content": "use clap::ArgMatches;\n\nuse casper_engine_test_support::LmdbWasmTestBuilder;\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION;\nuse casper_types::{account::AccountHash, U512};\n\nuse crate::{\n    generic::{\n        config::{Config, Transfer},\n        update_from_config,\n    },\n    utils::{hash_from_str, protocol_version_from_matches},\n};\n\npub(crate) fn generate_balances_update(matches: &ArgMatches<'_>) {\n    let data_dir = matches.value_of(\"data_dir\").unwrap_or(\".\");\n    let state_hash = hash_from_str(matches.value_of(\"hash\").unwrap());\n\n    let from_account = AccountHash::from_formatted_str(matches.value_of(\"from\").unwrap()).unwrap();\n    let to_account = AccountHash::from_formatted_str(matches.value_of(\"to\").unwrap()).unwrap();\n    let amount = U512::from_str_radix(matches.value_of(\"amount\").unwrap(), 10).unwrap();\n\n    let protocol_version = protocol_version_from_matches(matches);\n\n    let config = Config {\n        accounts: vec![],\n        transfers: vec![Transfer {\n            from: from_account,\n            to: to_account,\n            amount,\n        }],\n        only_listed_validators: false,\n        slash_instead_of_unbonding: false,\n        protocol_version,\n    };\n\n    let builder = LmdbWasmTestBuilder::open_raw(\n        data_dir,\n        Default::default(),\n        DEFAULT_PROTOCOL_VERSION,\n        state_hash,\n    );\n    update_from_config(builder, config);\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/decode.rs",
    "content": "use std::{collections::BTreeMap, fmt, fs::File, io::Read};\n\nuse clap::ArgMatches;\n\nuse casper_types::{\n    bytesrepr::FromBytes, system::auction::SeigniorageRecipientsSnapshotV2, CLType,\n    GlobalStateUpdate, GlobalStateUpdateConfig, Key, StoredValue,\n};\n\nstruct Entries(BTreeMap<Key, StoredValue>);\n\nimpl fmt::Debug for Entries {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        let mut map = f.debug_map();\n        for (k, v) in &self.0 {\n            let debug_v: Box<dyn fmt::Debug> = match v {\n                StoredValue::CLValue(clv) => match clv.cl_type() {\n                    CLType::Map { key, value: _ } if **key == CLType::U64 => {\n                        // this should be the seigniorage recipient snapshot\n                        let snapshot: SeigniorageRecipientsSnapshotV2 =\n                            clv.clone().into_t().unwrap();\n                        Box::new(snapshot)\n                    }\n                    _ => Box::new(clv),\n                },\n                _ => Box::new(v),\n            };\n            map.key(k).value(&debug_v);\n        }\n        map.finish()\n    }\n}\n\npub(crate) fn decode_file(matches: &ArgMatches<'_>) {\n    let file_name = matches.value_of(\"file\").unwrap();\n    let mut file = File::open(file_name).unwrap();\n\n    let mut contents = String::new();\n    file.read_to_string(&mut contents).unwrap();\n\n    let config: GlobalStateUpdateConfig = toml::from_str(&contents).unwrap();\n    let update_data: GlobalStateUpdate = config.try_into().unwrap();\n\n    println!(\"validators = {:#?}\", &update_data.validators);\n    let entries: BTreeMap<_, _> = update_data\n        .entries\n        .iter()\n        .map(|(key, bytes)| (*key, StoredValue::from_bytes(bytes).unwrap().0))\n        .collect();\n    println!(\"entries = {:#?}\", Entries(entries));\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/generic/config.rs",
    "content": "use std::collections::BTreeMap;\n\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    account::AccountHash,\n    system::auction::{DelegationRate, DelegatorKind},\n    ProtocolVersion, PublicKey, U512,\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct Config {\n    #[serde(default)]\n    pub transfers: Vec<Transfer>,\n    #[serde(default)]\n    pub accounts: Vec<AccountConfig>,\n    #[serde(default)]\n    pub only_listed_validators: bool,\n    #[serde(default)]\n    pub slash_instead_of_unbonding: bool,\n    #[serde(default)]\n    pub protocol_version: ProtocolVersion,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Transfer {\n    pub from: AccountHash,\n    pub to: AccountHash,\n    pub amount: U512,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct AccountConfig {\n    pub public_key: PublicKey,\n    pub balance: Option<U512>,\n    pub validator: Option<ValidatorConfig>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct ValidatorConfig {\n    pub bonded_amount: U512,\n    pub delegation_rate: Option<u8>,\n    pub delegators: Option<Vec<DelegatorConfig>>,\n    pub reservations: Option<Vec<ReservationConfig>>,\n}\n\nimpl ValidatorConfig {\n    pub fn delegators_map(&self) -> Option<BTreeMap<DelegatorKind, U512>> {\n        self.delegators.as_ref().map(|delegators| {\n            delegators\n                .iter()\n                .map(|delegator| {\n                    (\n                        DelegatorKind::PublicKey(delegator.public_key.clone()),\n                        delegator.delegated_amount,\n                    )\n                })\n                .collect()\n        })\n    }\n\n    pub fn reservations_map(&self) -> Option<BTreeMap<DelegatorKind, DelegationRate>> {\n        self.reservations.as_ref().map(|reservations| {\n            reservations\n                .iter()\n                .map(|reservation| {\n                    (\n                        DelegatorKind::PublicKey(reservation.public_key.clone()),\n                        reservation.delegation_rate,\n                    )\n                })\n                .collect()\n        })\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct DelegatorConfig {\n    pub public_key: PublicKey,\n    pub delegated_amount: U512,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReservationConfig {\n    pub public_key: PublicKey,\n    pub delegation_rate: DelegationRate,\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/generic/state_reader.rs",
    "content": "use casper_engine_test_support::LmdbWasmTestBuilder;\nuse casper_types::{\n    account::AccountHash,\n    contracts::ContractHash,\n    system::{\n        auction::{\n            BidKind, Unbond, UnbondKind, UnbondingPurse, WithdrawPurses,\n            SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY,\n        },\n        mint::TOTAL_SUPPLY_KEY,\n    },\n    AddressableEntity, Key, StoredValue,\n};\nuse std::collections::BTreeMap;\n\npub trait StateReader {\n    fn query(&mut self, key: Key) -> Option<StoredValue>;\n\n    fn get_total_supply_key(&mut self) -> Key;\n\n    fn get_seigniorage_recipients_key(&mut self) -> Key;\n\n    fn get_account(&mut self, account_hash: AccountHash) -> Option<AddressableEntity>;\n\n    fn get_bids(&mut self) -> Vec<BidKind>;\n\n    #[deprecated(note = \"superseded by get_unbonding_purses\")]\n    fn get_withdraws(&mut self) -> WithdrawPurses;\n\n    #[deprecated(note = \"superseded by get_unbonds\")]\n    fn get_unbonding_purses(&mut self) -> BTreeMap<AccountHash, Vec<UnbondingPurse>>;\n\n    fn get_unbonds(&mut self) -> BTreeMap<UnbondKind, Vec<Unbond>>;\n}\n\nimpl<T> StateReader for &mut T\nwhere\n    T: StateReader,\n{\n    fn query(&mut self, key: Key) -> Option<StoredValue> {\n        T::query(self, key)\n    }\n\n    fn get_total_supply_key(&mut self) -> Key {\n        T::get_total_supply_key(self)\n    }\n\n    fn get_seigniorage_recipients_key(&mut self) -> Key {\n        T::get_seigniorage_recipients_key(self)\n    }\n\n    fn get_account(&mut self, account_hash: AccountHash) -> Option<AddressableEntity> {\n        T::get_account(self, account_hash)\n    }\n\n    fn get_bids(&mut self) -> Vec<BidKind> {\n        T::get_bids(self)\n    }\n\n    #[allow(deprecated)]\n    fn get_withdraws(&mut self) -> WithdrawPurses {\n        T::get_withdraws(self)\n    }\n\n    #[allow(deprecated)]\n    fn get_unbonding_purses(&mut self) -> BTreeMap<AccountHash, Vec<UnbondingPurse>> {\n        T::get_unbonding_purses(self)\n    }\n\n    fn get_unbonds(&mut self) -> BTreeMap<UnbondKind, Vec<Unbond>> {\n        T::get_unbonds(self)\n    }\n}\n\nimpl StateReader for LmdbWasmTestBuilder {\n    fn query(&mut self, key: Key) -> Option<StoredValue> {\n        LmdbWasmTestBuilder::query(self, None, key, &[]).ok()\n    }\n\n    fn get_total_supply_key(&mut self) -> Key {\n        // Find the hash of the mint contract.\n        let mint_contract_hash = self.get_system_mint_hash();\n\n        if let Some(entity) = self.get_entity_with_named_keys_by_entity_hash(mint_contract_hash) {\n            entity\n                .named_keys()\n                .get(TOTAL_SUPPLY_KEY)\n                .copied()\n                .expect(\"total_supply should exist in mint named keys\")\n        } else {\n            let mint_legacy_contract_hash: ContractHash =\n                ContractHash::new(mint_contract_hash.value());\n\n            self.get_contract(mint_legacy_contract_hash)\n                .expect(\"mint should exist\")\n                .named_keys()\n                .get(TOTAL_SUPPLY_KEY)\n                .copied()\n                .expect(\"total_supply should exist in mint named keys\")\n        }\n    }\n\n    fn get_seigniorage_recipients_key(&mut self) -> Key {\n        // Find the hash of the auction contract.\n        let auction_contract_hash = self.get_system_auction_hash();\n\n        if let Some(entity) = self.get_entity_with_named_keys_by_entity_hash(auction_contract_hash)\n        {\n            entity\n                .named_keys()\n                .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n                .copied()\n                .expect(\"seigniorage_recipients_snapshot should exist in auction named keys\")\n        } else {\n            let auction_legacy_contract_hash = ContractHash::new(auction_contract_hash.value());\n\n            self.get_contract(auction_legacy_contract_hash)\n                .expect(\"auction should exist\")\n                .named_keys()\n                .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY)\n                .copied()\n                .expect(\"seigniorage_recipients_snapshot should exist in auction named keys\")\n        }\n    }\n\n    fn get_account(&mut self, account_hash: AccountHash) -> Option<AddressableEntity> {\n        LmdbWasmTestBuilder::get_entity_by_account_hash(self, account_hash)\n    }\n\n    fn get_bids(&mut self) -> Vec<BidKind> {\n        LmdbWasmTestBuilder::get_bids(self)\n    }\n\n    fn get_withdraws(&mut self) -> WithdrawPurses {\n        LmdbWasmTestBuilder::get_withdraw_purses(self)\n    }\n\n    fn get_unbonding_purses(&mut self) -> BTreeMap<AccountHash, Vec<UnbondingPurse>> {\n        LmdbWasmTestBuilder::get_unbonding_purses(self)\n    }\n\n    fn get_unbonds(&mut self) -> BTreeMap<UnbondKind, Vec<Unbond>> {\n        LmdbWasmTestBuilder::get_unbonds(self)\n    }\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/generic/state_tracker.rs",
    "content": "use std::{\n    cmp::Ordering,\n    collections::{btree_map::Entry, BTreeMap, BTreeSet},\n    convert::TryFrom,\n};\n\nuse rand::Rng;\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AssociatedKeys, Weight},\n    system::auction::{\n        BidAddr, BidKind, BidsExt, DelegatorKind, SeigniorageRecipientsSnapshotV2, Unbond,\n        UnbondEra, UnbondKind, UnbondingPurse, WithdrawPurse, WithdrawPurses,\n    },\n    AccessRights, AddressableEntity, AddressableEntityHash, ByteCodeHash, CLValue, EntityAddr,\n    EntityKind, EntityVersions, Groups, Key, Package, PackageHash, PackageStatus, ProtocolVersion,\n    PublicKey, StoredValue, URef, U512,\n};\n\nuse super::{config::Transfer, state_reader::StateReader};\n\n/// A struct tracking changes to be made to the global state.\npub struct StateTracker<T> {\n    reader: T,\n    entries_to_write: BTreeMap<Key, StoredValue>,\n    total_supply: U512,\n    total_supply_key: Key,\n    accounts_cache: BTreeMap<AccountHash, AddressableEntity>,\n    withdraws_cache: BTreeMap<AccountHash, Vec<WithdrawPurse>>,\n    unbonding_purses_cache: BTreeMap<AccountHash, Vec<UnbondingPurse>>,\n    unbonds_cache: BTreeMap<UnbondKind, Vec<Unbond>>,\n    purses_cache: BTreeMap<URef, U512>,\n    staking: Option<Vec<BidKind>>,\n    seigniorage_recipients: Option<(Key, SeigniorageRecipientsSnapshotV2)>,\n    protocol_version: ProtocolVersion,\n}\n\nimpl<T: StateReader> StateTracker<T> {\n    /// Creates a new `StateTracker`.\n    pub fn new(mut reader: T, protocol_version: ProtocolVersion) -> Self {\n        // Read the URef under which total supply is stored.\n        let total_supply_key = reader.get_total_supply_key();\n\n        // Read the total supply.\n        let total_supply_sv = reader.query(total_supply_key).expect(\"should query\");\n        let total_supply = total_supply_sv.into_cl_value().expect(\"should be cl value\");\n\n        Self {\n            reader,\n            entries_to_write: Default::default(),\n            total_supply_key,\n            total_supply: total_supply.into_t().expect(\"should be U512\"),\n            accounts_cache: BTreeMap::new(),\n            withdraws_cache: BTreeMap::new(),\n            unbonding_purses_cache: BTreeMap::new(),\n            unbonds_cache: BTreeMap::new(),\n            purses_cache: BTreeMap::new(),\n            staking: None,\n            seigniorage_recipients: None,\n            protocol_version,\n        }\n    }\n\n    /// Returns all the entries to be written to the global state\n    pub fn get_entries(&self) -> BTreeMap<Key, StoredValue> {\n        self.entries_to_write.clone()\n    }\n\n    /// Stores a write of an entry in the global state.\n    pub fn write_entry(&mut self, key: Key, value: StoredValue) {\n        let _ = self.entries_to_write.insert(key, value);\n    }\n\n    pub fn write_bid(&mut self, bid_kind: BidKind) {\n        let bid_addr = bid_kind.bid_addr();\n\n        let _ = self\n            .entries_to_write\n            .insert(bid_addr.into(), bid_kind.into());\n    }\n\n    /// Increases the total supply of the tokens in the network.\n    pub fn increase_supply(&mut self, to_add: U512) {\n        self.total_supply += to_add;\n        self.write_entry(\n            self.total_supply_key,\n            StoredValue::CLValue(CLValue::from_t(self.total_supply).unwrap()),\n        );\n    }\n\n    /// Decreases the total supply of the tokens in the network.\n    pub fn decrease_supply(&mut self, to_sub: U512) {\n        self.total_supply -= to_sub;\n        self.write_entry(\n            self.total_supply_key,\n            StoredValue::CLValue(CLValue::from_t(self.total_supply).unwrap()),\n        );\n    }\n\n    /// Creates a new purse containing the given amount of motes and returns its URef.\n    pub fn create_purse(&mut self, amount: U512) -> URef {\n        let mut rng = rand::thread_rng();\n        let new_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE);\n\n        // Purse URef pointing to `()` so that the owner cannot modify the purse directly.\n        self.write_entry(Key::URef(new_purse), StoredValue::CLValue(CLValue::unit()));\n\n        self.set_purse_balance(new_purse, amount);\n\n        new_purse\n    }\n\n    /// Gets the balance of the purse, taking into account changes made during the update.\n    pub fn get_purse_balance(&mut self, purse: URef) -> U512 {\n        match self.purses_cache.get(&purse).cloned() {\n            Some(amount) => amount,\n            None => {\n                let base_key = Key::Balance(purse.addr());\n                let amount = self\n                    .reader\n                    .query(base_key)\n                    .map(|v| CLValue::try_from(v).expect(\"purse balance should be a CLValue\"))\n                    .map(|cl_value| cl_value.into_t().expect(\"purse balance should be a U512\"))\n                    .unwrap_or_else(U512::zero);\n                self.purses_cache.insert(purse, amount);\n                amount\n            }\n        }\n    }\n\n    /// Sets the balance of the purse.\n    pub fn set_purse_balance(&mut self, purse: URef, balance: U512) {\n        let current_balance = self.get_purse_balance(purse);\n\n        match balance.cmp(&current_balance) {\n            Ordering::Greater => self.increase_supply(balance - current_balance),\n            Ordering::Less => self.decrease_supply(current_balance - balance),\n            Ordering::Equal => return,\n        }\n\n        self.write_entry(\n            Key::Balance(purse.addr()),\n            StoredValue::CLValue(CLValue::from_t(balance).unwrap()),\n        );\n        self.purses_cache.insert(purse, balance);\n    }\n\n    /// Creates a new account for the given public key and seeds it with the given amount of\n    /// tokens.\n    pub fn create_addressable_entity_for_account(\n        &mut self,\n        account_hash: AccountHash,\n        amount: U512,\n    ) -> AddressableEntity {\n        let main_purse = self.create_purse(amount);\n\n        let mut rng = rand::thread_rng();\n\n        let entity_hash = AddressableEntityHash::new(account_hash.value());\n        let package_hash = PackageHash::new(rng.gen());\n        let contract_wasm_hash = ByteCodeHash::new([0u8; 32]);\n\n        let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1));\n\n        let addressable_entity = AddressableEntity::new(\n            package_hash,\n            contract_wasm_hash,\n            self.protocol_version,\n            main_purse,\n            associated_keys,\n            ActionThresholds::default(),\n            EntityKind::Account(account_hash),\n        );\n\n        let mut contract_package = Package::new(\n            EntityVersions::default(),\n            BTreeSet::default(),\n            Groups::default(),\n            PackageStatus::Locked,\n        );\n\n        contract_package.insert_entity_version(\n            self.protocol_version.value().major,\n            EntityAddr::Account(account_hash.value()),\n        );\n        self.write_entry(\n            package_hash.into(),\n            StoredValue::SmartContract(contract_package.clone()),\n        );\n\n        let entity_key = addressable_entity.entity_key(entity_hash);\n\n        self.write_entry(\n            entity_key,\n            StoredValue::AddressableEntity(addressable_entity.clone()),\n        );\n\n        let addressable_entity_by_account_hash =\n            { CLValue::from_t(entity_key).expect(\"must convert to cl_value\") };\n\n        self.accounts_cache\n            .insert(account_hash, addressable_entity.clone());\n\n        self.write_entry(\n            Key::Account(account_hash),\n            StoredValue::CLValue(addressable_entity_by_account_hash),\n        );\n\n        addressable_entity\n    }\n\n    /// Gets the account for the given public key.\n    pub fn get_account(&mut self, account_hash: &AccountHash) -> Option<AddressableEntity> {\n        match self.accounts_cache.entry(*account_hash) {\n            Entry::Vacant(vac) => self\n                .reader\n                .get_account(*account_hash)\n                .map(|account| vac.insert(account).clone()),\n            Entry::Occupied(occupied) => Some(occupied.into_mut().clone()),\n        }\n    }\n\n    pub fn execute_transfer(&mut self, transfer: &Transfer) {\n        let from_account = if let Some(account) = self.get_account(&transfer.from) {\n            account\n        } else {\n            eprintln!(\"\\\"from\\\" account doesn't exist; transfer: {:?}\", transfer);\n            return;\n        };\n\n        let to_account = if let Some(account) = self.get_account(&transfer.to) {\n            account\n        } else {\n            self.create_addressable_entity_for_account(transfer.to, U512::zero())\n        };\n\n        let from_balance = self.get_purse_balance(from_account.main_purse());\n\n        if from_balance < transfer.amount {\n            eprintln!(\n                \"\\\"from\\\" account balance insufficient; balance = {}, transfer = {:?}\",\n                from_balance, transfer\n            );\n            return;\n        }\n\n        let to_balance = self.get_purse_balance(to_account.main_purse());\n\n        self.set_purse_balance(from_account.main_purse(), from_balance - transfer.amount);\n        self.set_purse_balance(to_account.main_purse(), to_balance + transfer.amount);\n    }\n\n    /// Reads the `SeigniorageRecipientsSnapshot` stored in the global state.\n    pub fn read_snapshot(&mut self) -> (Key, SeigniorageRecipientsSnapshotV2) {\n        if let Some(key_and_snapshot) = &self.seigniorage_recipients {\n            return key_and_snapshot.clone();\n        }\n        // Read the key under which the snapshot is stored.\n        let validators_key = self.reader.get_seigniorage_recipients_key();\n\n        // Decode the old snapshot.\n        let stored_value = self.reader.query(validators_key).expect(\"should query\");\n        let cl_value = stored_value.into_cl_value().expect(\"should be cl value\");\n        let snapshot: SeigniorageRecipientsSnapshotV2 = cl_value.into_t().expect(\"should convert\");\n        self.seigniorage_recipients = Some((validators_key, snapshot.clone()));\n        (validators_key, snapshot)\n    }\n\n    /// Reads the bids from the global state.\n    pub fn get_bids(&mut self) -> Vec<BidKind> {\n        if let Some(ref staking) = self.staking {\n            staking.clone()\n        } else {\n            let staking = self.reader.get_bids();\n            self.staking = Some(staking.clone());\n            staking\n        }\n    }\n\n    fn existing_bid(&mut self, bid_kind: &BidKind, existing_bids: Vec<BidKind>) -> Option<BidKind> {\n        match bid_kind.clone() {\n            BidKind::Unified(bid) => existing_bids\n                .unified_bid(bid.validator_public_key())\n                .map(|existing_bid| BidKind::Unified(Box::new(existing_bid))),\n            BidKind::Validator(validator_bid) => existing_bids\n                .validator_bid(validator_bid.validator_public_key())\n                .map(|existing_validator| BidKind::Validator(Box::new(existing_validator))),\n            BidKind::Delegator(delegator_bid) => {\n                // this one is a little tricky due to legacy issues.\n                match existing_bids.delegator_by_kind(\n                    delegator_bid.validator_public_key(),\n                    delegator_bid.delegator_kind(),\n                ) {\n                    Some(existing_delegator) => {\n                        Some(BidKind::Delegator(Box::new(existing_delegator)))\n                    }\n                    None => match existing_bids.unified_bid(delegator_bid.validator_public_key()) {\n                        Some(existing_bid) => {\n                            if let BidKind::Delegator(delegator_bid) = bid_kind {\n                                for delegator in existing_bid.delegators().values() {\n                                    if let DelegatorKind::PublicKey(dpk) =\n                                        delegator_bid.delegator_kind()\n                                    {\n                                        if delegator.delegator_public_key() != dpk {\n                                            continue;\n                                        }\n                                        return Some(BidKind::Delegator(delegator_bid.clone()));\n                                    }\n                                }\n                            }\n                            None\n                        }\n                        None => None,\n                    },\n                }\n            }\n            // dont modify bridge records\n            BidKind::Bridge(_) => None,\n            BidKind::Credit(credit) => existing_bids\n                .credit(credit.validator_public_key())\n                .map(|existing_credit| BidKind::Credit(Box::new(existing_credit))),\n            BidKind::Reservation(reservation) => existing_bids\n                .reservation_by_kind(\n                    reservation.validator_public_key(),\n                    reservation.delegator_kind(),\n                )\n                .map(|exisiting_reservation| BidKind::Reservation(Box::new(exisiting_reservation))),\n            BidKind::Unbond(unbond) => existing_bids\n                .unbond_by_kind(unbond.validator_public_key(), unbond.unbond_kind())\n                .map(|existing_unbond| BidKind::Unbond(Box::new(existing_unbond))),\n        }\n    }\n\n    /// Sets the bid for the given account.\n    pub fn set_bid(&mut self, bid_kind: BidKind, slash_instead_of_unbonding: bool) {\n        // skip bridge records since they shouldn't need to be overwritten\n        if let BidKind::Bridge(_) = bid_kind {\n            return;\n        }\n\n        let bids = self.get_bids();\n        let maybe_existing_bid = self.existing_bid(&bid_kind, bids);\n\n        // since we skip bridge records optional values should be present\n        let new_stake = bid_kind.staked_amount().expect(\"should have staked amount\");\n        let bonding_purse = bid_kind.bonding_purse().expect(\"should have bonding purse\");\n\n        let previous_stake = match maybe_existing_bid {\n            None => U512::zero(),\n            Some(existing_bid) => {\n                let previously_bonded =\n                    self.get_purse_balance(existing_bid.bonding_purse().unwrap());\n                if existing_bid\n                    .bonding_purse()\n                    .expect(\"should have bonding purse\")\n                    != bonding_purse\n                {\n                    self.set_purse_balance(existing_bid.bonding_purse().unwrap(), U512::zero());\n                    self.set_purse_balance(bonding_purse, previously_bonded);\n                    // the old bonding purse gets zeroed - the unbonds will get invalid, anyway\n                    self.remove_withdraws_and_unbonds_with_bonding_purse(\n                        &existing_bid.bonding_purse().unwrap(),\n                    );\n                }\n\n                previously_bonded\n            }\n        };\n\n        // we called `get_bids` above, so `staking` will be `Some`\n        self.staking.as_mut().unwrap().upsert(bid_kind.clone());\n\n        // Replace the bid (overwrite the previous bid, if any):\n        self.write_bid(bid_kind.clone());\n\n        // Remove all the relevant unbonds if we're slashing\n        if slash_instead_of_unbonding {\n            self.remove_withdraws_and_unbonds_with_bonding_purse(&bonding_purse);\n        }\n\n        let unbond_kind = match bid_kind.delegator_kind() {\n            None => UnbondKind::Validator(bid_kind.validator_public_key()),\n            Some(kind) => match kind {\n                DelegatorKind::PublicKey(pk) => UnbondKind::DelegatedPublicKey(pk),\n                DelegatorKind::Purse(addr) => UnbondKind::DelegatedPurse(addr),\n            },\n        };\n\n        // This will be zero if the unbonds got removed above.\n        let already_unbonded = self.already_unbonding_amount(&bid_kind);\n\n        // This is the amount that should be in the bonding purse.\n        let new_stake = new_stake + already_unbonded;\n\n        if (slash_instead_of_unbonding && new_stake != previous_stake) || new_stake > previous_stake\n        {\n            self.set_purse_balance(bonding_purse, new_stake);\n        } else if new_stake < previous_stake {\n            let amount = previous_stake - new_stake;\n            self.create_unbond(\n                bonding_purse,\n                &bid_kind.validator_public_key(),\n                &unbond_kind,\n                amount,\n            );\n        }\n    }\n\n    #[allow(deprecated)]\n    fn get_withdraws(&mut self) -> WithdrawPurses {\n        let mut result = self.reader.get_withdraws();\n        for (acc, purses) in &self.withdraws_cache {\n            result.insert(*acc, purses.clone());\n        }\n        result\n    }\n\n    #[allow(deprecated)]\n    fn get_unbonding_purses(&mut self) -> BTreeMap<AccountHash, Vec<UnbondingPurse>> {\n        let mut result = self.reader.get_unbonding_purses();\n        for (acc, purses) in &self.unbonding_purses_cache {\n            result.insert(*acc, purses.clone());\n        }\n        result\n    }\n\n    fn get_unbonds(&mut self) -> BTreeMap<UnbondKind, Vec<Unbond>> {\n        let mut result = self.reader.get_unbonds();\n        for (kind, unbond) in &self.unbonds_cache {\n            match result.get_mut(kind) {\n                None => {\n                    result.insert(kind.clone(), unbond.clone());\n                }\n                Some(unbonds) => {\n                    unbonds.append(&mut unbond.clone());\n                }\n            }\n        }\n        result\n    }\n\n    fn write_withdraws(&mut self, account_hash: AccountHash, withdraws: Vec<WithdrawPurse>) {\n        self.withdraws_cache.insert(account_hash, withdraws.clone());\n        self.write_entry(\n            Key::Withdraw(account_hash),\n            StoredValue::Withdraw(withdraws),\n        );\n    }\n\n    fn write_unbonding_purses(&mut self, account_hash: AccountHash, unbonds: Vec<UnbondingPurse>) {\n        self.unbonding_purses_cache\n            .insert(account_hash, unbonds.clone());\n        self.write_entry(Key::Unbond(account_hash), StoredValue::Unbonding(unbonds));\n    }\n\n    fn write_unbond(&mut self, unbond_kind: UnbondKind, unbond: Unbond) {\n        match self.unbonds_cache.get_mut(&unbond_kind) {\n            Some(unbonds) => unbonds.push(unbond.clone()),\n            None => {\n                let _ = self\n                    .unbonds_cache\n                    .insert(unbond_kind.clone(), vec![unbond.clone()]);\n            }\n        }\n\n        let bid_addr = unbond_kind.bid_addr(unbond.validator_public_key());\n        self.write_entry(\n            Key::BidAddr(bid_addr),\n            StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))),\n        );\n    }\n\n    /// Returns the sum of already unbonding purses for the given validator account & unbonder.\n    fn already_unbonding_amount(&mut self, bid_kind: &BidKind) -> U512 {\n        let unbonds = self.get_unbonds();\n        let validator_public_key = bid_kind.validator_public_key();\n        if let Some(unbond) = unbonds.get(&UnbondKind::Validator(validator_public_key.clone())) {\n            return unbond\n                .iter()\n                .map(|unbond| {\n                    if unbond.is_validator() {\n                        if let Some(unbond_era) = unbond\n                            .eras()\n                            .iter()\n                            .max_by(|x, y| x.era_of_creation().cmp(&y.era_of_creation()))\n                        {\n                            *unbond_era.amount()\n                        } else {\n                            U512::zero()\n                        }\n                    } else {\n                        U512::zero()\n                    }\n                })\n                .sum();\n        }\n\n        if let BidKind::Unbond(unbond) = bid_kind {\n            match unbond.unbond_kind() {\n                UnbondKind::Validator(unbonder_public_key)\n                | UnbondKind::DelegatedPublicKey(unbonder_public_key) => {\n                    let unbonding_purses = self.get_unbonding_purses();\n                    let account_hash = validator_public_key.to_account_hash();\n                    if let Some(purses) = unbonding_purses.get(&account_hash) {\n                        if let Some(purse) = purses\n                            .iter()\n                            .find(|x| x.unbonder_public_key() == unbonder_public_key)\n                        {\n                            return *purse.amount();\n                        }\n                    }\n                }\n                UnbondKind::DelegatedPurse(_) => {\n                    // noop\n                }\n            }\n        }\n\n        let withdrawals = self.get_withdraws();\n        if let Some(withdraws) = withdrawals.get(&validator_public_key.to_account_hash()) {\n            if let Some(withdraw) = withdraws\n                .iter()\n                .find(|x| x.unbonder_public_key() == &validator_public_key)\n            {\n                return *withdraw.amount();\n            }\n        }\n\n        U512::zero()\n    }\n\n    pub fn remove_withdraws_and_unbonds_with_bonding_purse(&mut self, affected_purse: &URef) {\n        let withdraws = self.get_withdraws();\n        let unbonding_purses = self.get_unbonding_purses();\n        let unbonds = self.get_unbonds();\n        for (acc, mut purses) in withdraws {\n            let old_len = purses.len();\n            purses.retain(|purse| purse.bonding_purse().addr() != affected_purse.addr());\n            if purses.len() != old_len {\n                self.write_withdraws(acc, purses);\n            }\n        }\n\n        for (acc, mut purses) in unbonding_purses {\n            let old_len = purses.len();\n            purses.retain(|purse| purse.bonding_purse().addr() != affected_purse.addr());\n            if purses.len() != old_len {\n                self.write_unbonding_purses(acc, purses);\n            }\n        }\n\n        for (unbond_kind, mut unbonds) in unbonds {\n            for unbond in unbonds.iter_mut() {\n                let old_len = unbond.eras().len();\n                unbond\n                    .eras_mut()\n                    .retain(|purse| purse.bonding_purse().addr() != affected_purse.addr());\n                if unbond.eras().len() != old_len {\n                    self.write_unbond(unbond_kind.clone(), unbond.clone());\n                }\n            }\n        }\n    }\n\n    pub fn create_unbond(\n        &mut self,\n        bonding_purse: URef,\n        validator_key: &PublicKey,\n        unbond_kind: &UnbondKind,\n        amount: U512,\n    ) {\n        let era_id = &self.read_snapshot().1.keys().next().copied().unwrap();\n        let unbond_era = UnbondEra::new(bonding_purse, *era_id, amount, None);\n        let unbonds = match self.unbonds_cache.entry(unbond_kind.clone()) {\n            Entry::Occupied(ref entry) => entry.get().clone(),\n            Entry::Vacant(entry) => {\n                // Fill the cache with the information from the reader when the cache is empty:\n                let rec = match self.reader.get_unbonds().get(unbond_kind).cloned() {\n                    Some(rec) => rec,\n                    None => vec![Unbond::new(\n                        validator_key.clone(),\n                        unbond_kind.clone(),\n                        vec![unbond_era.clone()],\n                    )],\n                };\n\n                entry.insert(rec.clone());\n                rec\n            }\n        };\n\n        if amount == U512::zero() {\n            return;\n        }\n\n        for mut unbond in unbonds {\n            if !unbond.eras().contains(&unbond_era.clone()) {\n                unbond.eras_mut().push(unbond_era.clone());\n            }\n\n            let bid_addr = match unbond_kind {\n                UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => {\n                    BidAddr::UnbondAccount {\n                        validator: validator_key.to_account_hash(),\n                        unbonder: pk.to_account_hash(),\n                    }\n                }\n                UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse {\n                    validator: validator_key.to_account_hash(),\n                    unbonder: *addr,\n                },\n            };\n\n            // This doesn't actually transfer or create any funds - the funds will be transferred\n            // from the bonding purse to the unbonder's main purse later by the auction\n            // contract.\n            self.write_entry(\n                Key::BidAddr(bid_addr),\n                StoredValue::BidKind(BidKind::Unbond(Box::new(unbond.clone()))),\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/generic/testing.rs",
    "content": "use std::collections::BTreeMap;\n\nuse itertools::Itertools;\nuse rand::Rng;\n\nuse casper_types::{\n    account::AccountHash,\n    addressable_entity::{ActionThresholds, AssociatedKeys, Weight},\n    system::auction::{\n        BidKind, BidsExt, DelegatorBid, DelegatorKind, SeigniorageRecipientV2,\n        SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Unbond, UnbondEra, UnbondKind,\n        UnbondingPurse, ValidatorBid, WithdrawPurse, WithdrawPurses,\n    },\n    testing::TestRng,\n    AccessRights, AddressableEntity, ByteCodeHash, CLValue, EntityKind, EraId, Key, PackageHash,\n    ProtocolVersion, PublicKey, StoredValue, URef, URefAddr, U512,\n};\n\n#[cfg(test)]\nuse crate::utils::ValidatorInfo;\n\nuse super::{\n    config::{AccountConfig, Config, DelegatorConfig, Transfer, ValidatorConfig},\n    get_update,\n    state_reader::StateReader,\n};\n\nconst TOTAL_SUPPLY_KEY: URef = URef::new([1; 32], AccessRights::READ_ADD_WRITE);\nconst SEIGNIORAGE_RECIPIENTS_KEY: URef = URef::new([2; 32], AccessRights::READ_ADD_WRITE);\n\nstruct MockStateReader {\n    accounts: BTreeMap<AccountHash, AddressableEntity>,\n    purses: BTreeMap<URefAddr, U512>,\n    total_supply: U512,\n    seigniorage_recipients: SeigniorageRecipientsSnapshotV2,\n    bids: Vec<BidKind>,\n    withdraws: WithdrawPurses,\n    unbonding_purses: BTreeMap<AccountHash, Vec<UnbondingPurse>>,\n    unbonds: BTreeMap<UnbondKind, Vec<Unbond>>,\n    protocol_version: ProtocolVersion,\n    last_bonding_purse: Option<URef>,\n}\n\nimpl MockStateReader {\n    fn new() -> Self {\n        Self {\n            accounts: BTreeMap::new(),\n            purses: BTreeMap::new(),\n            total_supply: U512::zero(),\n            seigniorage_recipients: SeigniorageRecipientsSnapshotV2::new(),\n            bids: vec![],\n            withdraws: WithdrawPurses::new(),\n            unbonding_purses: BTreeMap::new(),\n            unbonds: BTreeMap::new(),\n            protocol_version: ProtocolVersion::V1_0_0,\n            last_bonding_purse: None,\n        }\n    }\n\n    fn with_account<R: Rng>(\n        mut self,\n        account_hash: AccountHash,\n        balance: U512,\n        rng: &mut R,\n    ) -> Self {\n        let main_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE);\n        let entity = AddressableEntity::new(\n            PackageHash::new(rng.gen()),\n            ByteCodeHash::new(rng.gen()),\n            self.protocol_version,\n            main_purse,\n            AssociatedKeys::new(account_hash, Weight::new(1)),\n            ActionThresholds::default(),\n            EntityKind::Account(account_hash),\n        );\n\n        self.purses.insert(main_purse.addr(), balance);\n        // If `insert` returns `Some()`, it means we used the same account hash twice, which is\n        // a programmer error and the function will panic.\n        assert!(self.accounts.insert(account_hash, entity).is_none());\n        self.total_supply += balance;\n        self\n    }\n\n    fn with_validators<R: Rng>(\n        mut self,\n        validators: Vec<(PublicKey, U512, ValidatorConfig)>,\n        rng: &mut R,\n    ) -> Self {\n        let mut recipients = SeigniorageRecipientsV2::new();\n        for (public_key, balance, validator_cfg) in validators {\n            let stake = validator_cfg.bonded_amount;\n            let delegation_rate = validator_cfg.delegation_rate.unwrap_or_default();\n            let delegators = validator_cfg.delegators_map().unwrap_or_default();\n            let reservation_delegation_rates = validator_cfg.reservations_map().unwrap_or_default();\n            // add an entry to the recipients snapshot\n            let recipient = SeigniorageRecipientV2::new(\n                stake,\n                delegation_rate,\n                delegators.clone(),\n                reservation_delegation_rates,\n            );\n            recipients.insert(public_key.clone(), recipient);\n\n            // create the account if it doesn't exist\n            let account_hash = public_key.to_account_hash();\n            if !self.accounts.contains_key(&account_hash) {\n                self = self.with_account(account_hash, balance, rng);\n            }\n\n            let bonding_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE);\n            self.last_bonding_purse = Some(bonding_purse);\n            self.purses.insert(bonding_purse.addr(), stake);\n            self.total_supply += stake;\n\n            for delegator_kind in delegators.keys() {\n                match delegator_kind {\n                    DelegatorKind::PublicKey(delegator_pub_key) => {\n                        let account_hash = delegator_pub_key.to_account_hash();\n\n                        if !self.accounts.contains_key(&account_hash) {\n                            self = self.with_account(account_hash, U512::zero(), rng);\n                        }\n                    }\n                    DelegatorKind::Purse(_) => {\n                        continue;\n                    }\n                }\n            }\n\n            // create the bid\n            for (delegator_kind, delegator_stake) in &delegators {\n                let bonding_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE);\n                self.last_bonding_purse = Some(bonding_purse);\n                self.purses.insert(bonding_purse.addr(), *delegator_stake);\n                self.total_supply += *delegator_stake;\n\n                let delegator = DelegatorBid::unlocked(\n                    delegator_kind.clone(),\n                    *delegator_stake,\n                    bonding_purse,\n                    public_key.clone(),\n                );\n\n                self.bids.push(BidKind::Delegator(Box::new(delegator)));\n            }\n\n            let validator_bid = ValidatorBid::unlocked(\n                public_key.clone(),\n                bonding_purse,\n                stake,\n                delegation_rate,\n                0,\n                u64::MAX,\n                0,\n            );\n\n            self.bids.push(BidKind::Validator(Box::new(validator_bid)));\n        }\n\n        for era_id in 0..5 {\n            self.seigniorage_recipients\n                .insert(era_id.into(), recipients.clone());\n        }\n\n        self\n    }\n\n    /// Returns the bonding purse if the unbonder exists in `self.bids`.\n    fn unbonder_bonding_purse(\n        &self,\n        validator_public_key: &PublicKey,\n        unbond_kind: &UnbondKind,\n    ) -> Option<URef> {\n        let bid = self.bids.validator_bid(validator_public_key)?;\n        if unbond_kind.is_validator() {\n            return Some(*bid.bonding_purse());\n        }\n\n        match self.bids.iter().find(|x| {\n            &x.validator_public_key() == validator_public_key\n                && x.unbond_kind() == Some(unbond_kind.clone())\n        }) {\n            Some(x) => x.bonding_purse(),\n            None => None,\n        }\n    }\n\n    /// Returns the bonding purse if the unbonder exists in `self.bids`, or creates a new account\n    /// with a nominal stake with the given validator and returns the new unbonder's bonding purse.\n    fn create_or_get_unbonder_bonding_purse<R: Rng>(\n        mut self,\n        validator_public_key: &PublicKey,\n        unbond_kind: &UnbondKind,\n        rng: &mut R,\n    ) -> Self {\n        if let Some(purse) = self.unbonder_bonding_purse(validator_public_key, unbond_kind) {\n            self.last_bonding_purse = Some(purse);\n            return self;\n        }\n\n        let bonding_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE);\n        self.purses.insert(bonding_purse.addr(), U512::zero());\n        // it is not clear to me why this method would increment stake here? -Ed\n        // let stake = U512::from(10);\n        // self.purses.insert(bonding_purse.addr(), stake);\n        // self.total_supply += stake;\n        self.last_bonding_purse = Some(bonding_purse);\n        self\n    }\n\n    /// Creates a `WithdrawPurse` for 1 mote.  If the validator or delegator don't exist in\n    /// `self.bids`, a random bonding purse is assigned.\n    fn with_withdraw<R: Rng>(\n        mut self,\n        validator_public_key: PublicKey,\n        unbond_kind: UnbondKind,\n        era_of_creation: EraId,\n        amount: U512,\n        rng: &mut R,\n    ) -> Self {\n        self = self.create_or_get_unbonder_bonding_purse(&validator_public_key, &unbond_kind, rng);\n        let bonding_purse = self.last_bonding_purse.expect(\"should have bonding purse\");\n\n        let unbonder_public_key = unbond_kind\n            .maybe_public_key()\n            .expect(\"withdraw purses is legacy tech\");\n\n        let withdraw = WithdrawPurse::new(\n            bonding_purse,\n            validator_public_key,\n            unbonder_public_key,\n            era_of_creation,\n            amount,\n        );\n\n        let withdraws = self\n            .withdraws\n            .entry(withdraw.validator_public_key().to_account_hash())\n            .or_default();\n        withdraws.push(withdraw);\n        self\n    }\n\n    /// Creates an `Unbond` for 1 mote.  If the validator or delegator don't exist in\n    /// `self.bids`, a random bonding purse is assigned.\n    fn with_unbond<R: Rng>(\n        mut self,\n        validator_public_key: PublicKey,\n        unbond_kind: UnbondKind,\n        amount: U512,\n        rng: &mut R,\n    ) -> Self {\n        self = self.create_or_get_unbonder_bonding_purse(&validator_public_key, &unbond_kind, rng);\n        let purse_uref = self.last_bonding_purse.expect(\"should have bonding purse\");\n        let unbond_era = UnbondEra::new(purse_uref, EraId::new(10), amount, None);\n\n        match self.unbonds.get_mut(&unbond_kind) {\n            None => {\n                let unbond =\n                    Unbond::new(validator_public_key, unbond_kind.clone(), vec![unbond_era]);\n                self.unbonds.insert(unbond_kind, vec![unbond]);\n            }\n            Some(existing_unbond) => {\n                for unbond in existing_unbond {\n                    if !unbond.eras().contains(&unbond_era) {\n                        unbond.eras_mut().push(unbond_era.clone());\n                    }\n                }\n            }\n        }\n        self\n    }\n\n    fn total_supply(&self) -> U512 {\n        self.total_supply\n    }\n}\n\nimpl StateReader for MockStateReader {\n    fn query(&mut self, key: Key) -> Option<StoredValue> {\n        match key {\n            Key::URef(uref) if uref == TOTAL_SUPPLY_KEY => Some(StoredValue::from(\n                CLValue::from_t(self.total_supply).expect(\"should convert to CLValue\"),\n            )),\n            Key::URef(uref) if uref == SEIGNIORAGE_RECIPIENTS_KEY => Some(StoredValue::from(\n                CLValue::from_t(self.seigniorage_recipients.clone())\n                    .expect(\"should convert seigniorage recipients to CLValue\"),\n            )),\n            Key::Account(acc_hash) => self\n                .accounts\n                .get(&acc_hash)\n                .map(|account| StoredValue::from(account.clone())),\n            Key::Balance(purse_addr) => self.purses.get(&purse_addr).map(|balance| {\n                StoredValue::from(CLValue::from_t(*balance).expect(\"should convert to CLValue\"))\n            }),\n            key => unimplemented!(\n                \"Querying a key of type {:?} is not handled\",\n                key.type_string()\n            ),\n        }\n    }\n\n    fn get_total_supply_key(&mut self) -> Key {\n        Key::URef(TOTAL_SUPPLY_KEY)\n    }\n\n    fn get_seigniorage_recipients_key(&mut self) -> Key {\n        Key::URef(SEIGNIORAGE_RECIPIENTS_KEY)\n    }\n\n    fn get_account(&mut self, account_hash: AccountHash) -> Option<AddressableEntity> {\n        self.accounts.get(&account_hash).cloned()\n    }\n\n    fn get_bids(&mut self) -> Vec<BidKind> {\n        self.bids.clone()\n    }\n\n    fn get_withdraws(&mut self) -> WithdrawPurses {\n        self.withdraws.clone()\n    }\n\n    fn get_unbonding_purses(&mut self) -> BTreeMap<AccountHash, Vec<UnbondingPurse>> {\n        self.unbonding_purses.clone()\n    }\n\n    fn get_unbonds(&mut self) -> BTreeMap<UnbondKind, Vec<Unbond>> {\n        self.unbonds.clone()\n    }\n}\n\nimpl ValidatorInfo {\n    pub fn new(public_key: &PublicKey, weight: U512) -> Self {\n        ValidatorInfo {\n            public_key: public_key.clone(),\n            weight,\n        }\n    }\n}\n\n#[test]\nfn should_transfer_funds() {\n    let mut rng = TestRng::new();\n\n    let public_key1 = PublicKey::random(&mut rng);\n    let public_key2 = PublicKey::random(&mut rng);\n    let account1 = public_key1.to_account_hash();\n    let account2 = public_key2.to_account_hash();\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![\n            (\n                public_key1,\n                U512::from(1_000_000_000),\n                ValidatorConfig {\n                    bonded_amount: U512::from(1),\n                    ..Default::default()\n                },\n            ),\n            (\n                public_key2,\n                U512::zero(),\n                ValidatorConfig {\n                    bonded_amount: U512::zero(),\n                    ..Default::default()\n                },\n            ),\n        ],\n        &mut rng,\n    );\n\n    let config = Config {\n        transfers: vec![Transfer {\n            from: account1,\n            to: account2,\n            amount: U512::from(300_000_000),\n        }],\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators_unchanged();\n\n    // should write decreased balance to the first purse\n    let account1 = reader.get_account(account1).expect(\"should have account\");\n    update.assert_written_balance(account1.main_purse(), 700_000_000);\n\n    // should write increased balance to the second purse\n    let account2 = reader.get_account(account2).expect(\"should have account\");\n    update.assert_written_balance(account2.main_purse(), 300_000_000);\n\n    // total supply is written on every purse balance change, so we'll have a write to this key\n    // even though the changes cancel each other out\n    update.assert_total_supply(&mut reader, 1_000_000_001);\n\n    // 3 keys should be written:\n    // - balance of account 1\n    // - balance of account 2\n    // - total supply\n    assert_eq!(update.len(), 3);\n}\n\n#[test]\nfn should_create_account_when_transferring_funds() {\n    let mut rng = TestRng::new();\n\n    let public_key1 = PublicKey::random(&mut rng);\n    let public_key2 = PublicKey::random(&mut rng);\n    let account1 = public_key1.to_account_hash();\n    let account2 = public_key2.to_account_hash();\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            public_key1,\n            U512::from(1_000_000_000),\n            ValidatorConfig {\n                bonded_amount: U512::from(1),\n                ..Default::default()\n            },\n        )],\n        &mut rng,\n    );\n\n    let config = Config {\n        transfers: vec![Transfer {\n            from: account1,\n            to: account2,\n            amount: U512::from(300_000_000),\n        }],\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators_unchanged();\n\n    let account1 = reader.get_account(account1).expect(\"should have account\");\n    // account2 shouldn't exist in the reader itself, only the update should be creating it\n    assert!(reader.get_account(account2).is_none());\n    let account2 = update.get_written_addressable_entity(account2);\n\n    // should write decreased balance to the first purse\n    update.assert_written_balance(account1.main_purse(), 700_000_000);\n\n    // check that the main purse for the new account has been created with the correct amount\n    update.assert_written_balance(account2.main_purse(), 300_000_000);\n    update.assert_written_purse_is_unit(account2.main_purse());\n\n    // total supply is written on every purse balance change, so we'll have a write to this key\n    // even though the changes cancel each other out\n    update.assert_total_supply(&mut reader, 1_000_000_001);\n\n    // 7 keys should be written:\n    // - balance of account 1\n    // - account indirection for account 2\n    // - the package for the addressable entity associated with account 2\n    // - the addressable entity associated with account 2.\n    // - main purse of account 2\n    // - balance of account 2\n    // - total supply\n    assert_eq!(update.len(), 7);\n}\n\nfn validator_config(\n    public_key: &PublicKey,\n    balance: U512,\n    staked: U512,\n) -> (PublicKey, U512, ValidatorConfig) {\n    (\n        public_key.clone(),\n        balance,\n        ValidatorConfig {\n            bonded_amount: staked,\n            ..Default::default()\n        },\n    )\n}\n\n#[test]\nfn should_change_one_validator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator1_staked = U512::from(1);\n    let validator2 = PublicKey::random(&mut rng);\n    let validator2_staked = U512::from(2);\n    let validator3 = PublicKey::random(&mut rng);\n    let validator3_staked = U512::from(3);\n\n    let liquid = U512::from(5);\n\n    let validators = vec![\n        validator_config(&validator1, liquid, validator1_staked),\n        validator_config(&validator2, liquid, validator2_staked),\n        validator_config(&validator3, liquid, validator3_staked),\n    ];\n    let mut reader = MockStateReader::new().with_validators(validators, &mut rng);\n\n    let mut total_supply: U512 =\n        (liquid * 3) + validator1_staked + validator2_staked + validator3_staked;\n\n    assert_eq!(\n        reader.total_supply(),\n        total_supply,\n        \"initial total supply mismatch\"\n    );\n\n    let validator3_new_balance = liquid.saturating_add(1.into());\n    let validator3_new_staked = validator3_staked.saturating_add(1.into());\n    total_supply = total_supply.saturating_add(2.into());\n\n    // we'll be increasing the stake and balance of validator 3\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator3.clone(),\n            balance: Some(validator3_new_balance),\n            validator: Some(ValidatorConfig {\n                bonded_amount: validator3_new_staked,\n                delegation_rate: None,\n                delegators: None,\n                reservations: None,\n            }),\n        }],\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[\n        ValidatorInfo::new(&validator1, validator1_staked),\n        ValidatorInfo::new(&validator2, validator2_staked),\n        ValidatorInfo::new(&validator3, validator3_new_staked),\n    ]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, total_supply.as_u64());\n\n    let account3_hash = validator3.to_account_hash();\n    let account3 = reader\n        .get_account(account3_hash)\n        .expect(\"should have account\");\n    update.assert_written_balance(account3.main_purse(), validator3_new_balance.as_u64());\n\n    let bids = reader.get_bids();\n\n    let old_bid3 = bids.validator_bid(&validator3).expect(\"should have bid\");\n    let bid_purse = *old_bid3.bonding_purse();\n    update.assert_written_balance(bid_purse, validator3_new_staked.as_u64());\n\n    // check bid overwrite\n    let expected_bid = ValidatorBid::unlocked(\n        validator3,\n        bid_purse,\n        validator3_new_staked,\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    update.assert_written_bid(account3_hash, BidKind::Validator(Box::new(expected_bid)));\n\n    // 5 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - balance of bid purse of validator 3\n    // - balance of main purse of validator 3\n    // - bid of validator 3\n    assert_eq!(update.len(), 5);\n}\n\n#[test]\nfn should_change_only_stake_of_one_validator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n    let validator3 = PublicKey::random(&mut rng);\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![\n            (\n                validator1.clone(),\n                U512::from(101),\n                ValidatorConfig {\n                    bonded_amount: U512::from(101),\n                    ..Default::default()\n                },\n            ),\n            (\n                validator2.clone(),\n                U512::from(102),\n                ValidatorConfig {\n                    bonded_amount: U512::from(102),\n                    ..Default::default()\n                },\n            ),\n            (\n                validator3.clone(),\n                U512::from(103),\n                ValidatorConfig {\n                    bonded_amount: U512::from(103),\n                    ..Default::default()\n                },\n            ),\n        ],\n        &mut rng,\n    );\n\n    // we'll be updating only the stake of validator 3\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator3.clone(),\n            balance: None,\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(104),\n                delegation_rate: None,\n                delegators: None,\n                reservations: None,\n            }),\n        }],\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[\n        ValidatorInfo::new(&validator1, U512::from(101)),\n        ValidatorInfo::new(&validator2, U512::from(102)),\n        ValidatorInfo::new(&validator3, U512::from(104)),\n    ]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, 613);\n\n    // check purse writes\n    let account3_hash = validator3.to_account_hash();\n    let old_bid3 = reader\n        .get_bids()\n        .validator_bid(&validator3)\n        .expect(\"should have bid\");\n    let bid_purse = *old_bid3.bonding_purse();\n\n    update.assert_written_balance(bid_purse, 104);\n\n    // check bid overwrite\n    let expected_bid = ValidatorBid::unlocked(\n        validator3,\n        bid_purse,\n        U512::from(104),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    update.assert_written_bid(account3_hash, BidKind::Validator(Box::new(expected_bid)));\n\n    // 4 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid purse balance for validator 3\n    // - bid for validator 3\n    assert_eq!(update.len(), 4);\n}\n\n#[test]\nfn should_change_only_balance_of_one_validator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n    let validator3 = PublicKey::random(&mut rng);\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![\n            (\n                validator1,\n                U512::from(101),\n                ValidatorConfig {\n                    bonded_amount: U512::from(101),\n                    ..Default::default()\n                },\n            ),\n            (\n                validator2,\n                U512::from(102),\n                ValidatorConfig {\n                    bonded_amount: U512::from(102),\n                    ..Default::default()\n                },\n            ),\n            (\n                validator3.clone(),\n                U512::from(103),\n                ValidatorConfig {\n                    bonded_amount: U512::from(103),\n                    ..Default::default()\n                },\n            ),\n        ],\n        &mut rng,\n    );\n\n    // we'll be updating only the balance of validator 3\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator3.clone(),\n            balance: Some(U512::from(100)),\n            validator: None,\n        }],\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators_unchanged();\n\n    update.assert_total_supply(&mut reader, 609);\n\n    // check purse writes\n    let account3_hash = validator3.to_account_hash();\n    let account3 = reader\n        .get_account(account3_hash)\n        .expect(\"should have account\");\n\n    update.assert_written_balance(account3.main_purse(), 100);\n\n    // 2 keys should be written:\n    // - total supply\n    // - balance for main purse of validator 3\n    assert_eq!(update.len(), 2);\n}\n\n#[test]\nfn should_replace_one_validator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(101),\n            ValidatorConfig {\n                bonded_amount: U512::from(101),\n                ..Default::default()\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be updating the validators set to only contain validator2\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator2.clone(),\n            balance: Some(U512::from(102)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(102),\n                delegation_rate: None,\n                delegators: None,\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: true,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&validator2, U512::from(102))]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, 305);\n\n    // check purse write for validator1\n    let old_bid1 = reader\n        .get_bids()\n        .validator_bid(&validator1)\n        .expect(\"should have bid\");\n    let bid_purse = *old_bid1.bonding_purse();\n\n    update.assert_written_balance(bid_purse, 0);\n\n    // check bid overwrite\n    let account1_hash = validator1.to_account_hash();\n    let mut expected_bid_1 = ValidatorBid::unlocked(\n        validator1,\n        bid_purse,\n        U512::zero(),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    expected_bid_1.deactivate();\n    update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1)));\n\n    // check writes for validator2\n    let account2_hash = validator2.to_account_hash();\n\n    // the new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), 102);\n\n    let bid_write = update.get_written_bid(account2_hash);\n    assert_eq!(bid_write.validator_public_key(), validator2);\n    let total_stake = update\n        .get_total_stake(account2_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_stake, U512::from(102));\n    assert!(!bid_write.inactive());\n\n    // check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap());\n    update.assert_written_balance(bid_write.bonding_purse().unwrap(), 102);\n\n    // 12 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for validator 1\n    // - bonding purse balance for validator 1\n    // - account indirection for validator 2\n    // - the package for the addressable entity associated with validator 2\n    // - the addressable entity associated with validator 2.\n    // - main purse for account for validator 2\n    // - main purse balance for account for validator 2\n    // - bid for validator 2\n    // - bonding purse for validator 2\n    // - bonding purse balance for validator 2\n    assert_eq!(update.len(), 12);\n}\n\n#[test]\nfn should_replace_one_validator_with_unbonding() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(101),\n            ValidatorConfig {\n                bonded_amount: U512::from(101),\n                ..Default::default()\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be updating the validators set to only contain validator2\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator2.clone(),\n            balance: Some(U512::from(102)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(102),\n                ..Default::default()\n            }),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&validator2, U512::from(102))]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, 406);\n\n    // check purse write for validator1\n    let old_bid1 = reader\n        .get_bids()\n        .validator_bid(&validator1)\n        .expect(\"should have bid\");\n    let bid_purse = *old_bid1.bonding_purse();\n\n    // bid purse balance should be unchanged\n    update.assert_key_absent(&Key::Balance(bid_purse.addr()));\n\n    // should write an unbonding purse\n    update.assert_unbond_bid_kind(\n        bid_purse,\n        &validator1,\n        &UnbondKind::Validator(validator1.clone()),\n        101,\n    );\n\n    // check bid overwrite\n    let account1_hash = validator1.to_account_hash();\n    let mut expected_bid_1 = ValidatorBid::unlocked(\n        validator1,\n        bid_purse,\n        U512::zero(),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    expected_bid_1.deactivate();\n    update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1)));\n\n    // check writes for validator2\n    let account2_hash = validator2.to_account_hash();\n\n    // the new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), 102);\n\n    let bid_write = update.get_written_bid(account2_hash);\n    assert_eq!(bid_write.validator_public_key(), validator2);\n    let total_stake = update\n        .get_total_stake(account2_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_stake, U512::from(102));\n    assert!(!bid_write.inactive());\n\n    // check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap());\n    update.assert_written_balance(bid_write.bonding_purse().unwrap(), 102);\n\n    // 12 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for validator 1\n    // - unbonding purse for validator 1\n    // - account indirection for validator 2\n    // - the package for the addressable entity associated with validator 2\n    // - the addressable entity associated with validator 2.\n    // - main purse for account for validator 2\n    // - main purse balance for account for validator 2\n    // - bid for validator 2\n    // - bonding purse for validator 2\n    // - bonding purse balance for validator 2\n    assert_eq!(update.len(), 12);\n}\n\n#[test]\nfn should_add_one_validator() {\n    let mut rng = TestRng::new();\n\n    let mut validators = BTreeMap::new();\n    for index in 1..4 {\n        let balance = index * 10;\n        validators.insert(\n            PublicKey::random(&mut rng),\n            (U512::from(balance), U512::from(index)),\n        );\n    }\n\n    let initial_validators = validators\n        .iter()\n        .map(|(k, (b, s))| {\n            (\n                k.clone(),\n                *b,\n                ValidatorConfig {\n                    bonded_amount: *s,\n                    ..Default::default()\n                },\n            )\n        })\n        .collect();\n\n    let initial_supply: u64 = validators.iter().map(|(_, (b, s))| (b + s).as_u64()).sum();\n\n    let mut reader = MockStateReader::new().with_validators(initial_validators, &mut rng);\n\n    assert_eq!(\n        reader.total_supply().as_u64(),\n        initial_supply,\n        \"initial supply should equal\"\n    );\n\n    let validator4 = PublicKey::random(&mut rng);\n    let v4_balance = U512::from(40);\n    let v4_stake = U512::from(4);\n    validators.insert(validator4.clone(), (v4_balance, v4_stake));\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator4.clone(),\n            balance: Some(v4_balance),\n            validator: Some(ValidatorConfig {\n                bonded_amount: v4_stake,\n                delegation_rate: None,\n                delegators: None,\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n    let expected_supply: u64 = validators.iter().map(|(_, (b, s))| (b + s).as_u64()).sum();\n    assert_eq!(\n        initial_supply + (v4_stake + v4_balance).as_u64(),\n        expected_supply,\n        \"should match\"\n    );\n\n    update.assert_total_supply(&mut reader, expected_supply);\n\n    let expected_staking = validators\n        .iter()\n        .map(|(k, (_, s))| ValidatorInfo::new(k, *s))\n        .collect_vec();\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&expected_staking);\n    update.assert_seigniorage_recipients_written(&mut reader);\n\n    // check writes for validator4\n    let account4_hash = validator4.to_account_hash();\n    // the new account should be created\n    let account4 = update.get_written_addressable_entity(account4_hash);\n    let total_stake = update\n        .get_total_stake(account4_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_stake, v4_stake);\n    // check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account4.main_purse());\n    update.assert_written_balance(account4.main_purse(), v4_balance.as_u64());\n    // check that the bid purse for the new validator has been created with the correct amount\n    let bid4 = update.get_written_bid(account4_hash);\n    assert_eq!(bid4.validator_public_key(), validator4);\n    update.assert_written_balance(bid4.bonding_purse().unwrap(), v4_stake.as_u64());\n    update.assert_written_purse_is_unit(bid4.bonding_purse().unwrap());\n\n    assert!(!bid4.inactive());\n\n    // 8 keys should be written:\n    // - seigniorage recipients snapshot\n    // - total supply\n    // - account indirection for validator 4\n    // - package for the addressable entity associated with validator 4\n    // - the addressable entity record associated with validator 4\n    // - main purse for account for validator 4\n    // - main purse balance for account for validator 4\n    // - bid for validator 4\n    // - bonding purse for validator 4\n    // - bonding purse balance for validator 4\n    assert_eq!(update.len(), 10);\n}\n\n#[test]\nfn should_add_one_validator_with_delegators() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n    let delegator1 = PublicKey::random(&mut rng);\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(101),\n            ValidatorConfig {\n                bonded_amount: U512::from(101),\n                ..Default::default()\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be adding validator 2\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator2.clone(),\n            balance: Some(U512::from(100)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(102),\n                delegation_rate: Some(5),\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator1.clone(),\n                    delegated_amount: U512::from(13),\n                }]),\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[\n        ValidatorInfo::new(&validator1, U512::from(101)),\n        ValidatorInfo::new(&validator2, U512::from(115)),\n    ]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, 417);\n\n    // check writes for validator2\n    let account2_hash = validator2.to_account_hash();\n\n    // the new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), 100);\n\n    let bid2 = update.get_written_bid(account2_hash);\n    assert_eq!(bid2.validator_public_key(), validator2);\n    assert_eq!(bid2.staked_amount().unwrap(), U512::from(102));\n    let total_staked = update\n        .get_total_stake(account2_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_staked, U512::from(115));\n    assert!(!bid2.inactive());\n\n    // check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid2.bonding_purse().unwrap());\n    update.assert_written_balance(bid2.bonding_purse().unwrap(), 102);\n\n    if let BidKind::Validator(validator_bid) = bid2 {\n        let bid_delegator_purse = *update\n            .delegator(&validator_bid, &delegator1.into())\n            .expect(\"should have delegator\")\n            .bonding_purse();\n        // check that the bid purse for the new delegator has been created with the correct amount\n        update.assert_written_purse_is_unit(bid_delegator_purse);\n        update.assert_written_balance(bid_delegator_purse, 13);\n    }\n\n    // 13 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - account indirection for validator 2\n    // - main purse for account for validator 2\n    // - main purse balance for account for validator 2\n    // - package for the addressable entity associated with validator 2\n    // - the addressable entity record associated with validator 2\n    // - bid for validator 2\n    // - bonding purse for validator 2\n    // - bonding purse balance for validator2\n    // - bid for delegator\n    // - bonding purse for delegator\n    // - bonding purse balance for delegator\n    assert_eq!(update.len(), 13);\n}\n\n#[test]\nfn should_replace_a_delegator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let v1_stake = 1;\n    let v1_balance = 100;\n    let v1_updated_balance = 100;\n    let v1_updated_stake = 4;\n    let delegator1 = PublicKey::random(&mut rng);\n    let d1_stake = 2;\n    let delegator2 = PublicKey::random(&mut rng);\n    let d2_stake = 3;\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(v1_balance),\n            ValidatorConfig {\n                bonded_amount: U512::from(v1_stake),\n                delegation_rate: Some(5),\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator1.clone(),\n                    delegated_amount: U512::from(d1_stake),\n                }]),\n                reservations: None,\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be replacing the delegator\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(U512::from(v1_updated_balance)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(v1_updated_stake),\n                delegation_rate: None,\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator2.clone(),\n                    delegated_amount: U512::from(d2_stake),\n                }]),\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        slash_instead_of_unbonding: true,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(\n        &validator1,\n        U512::from(v1_updated_stake + d2_stake),\n    )]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(\n        &mut reader,\n        v1_updated_balance + v1_updated_stake + d2_stake,\n    );\n\n    let account1_hash = validator1.to_account_hash();\n\n    let bid1 = update.get_written_bid(account1_hash);\n    assert_eq!(bid1.validator_public_key(), validator1);\n    assert_eq!(bid1.staked_amount().unwrap(), U512::from(v1_updated_stake));\n    let total_stake = update\n        .get_total_stake(account1_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_stake, U512::from(v1_updated_stake + d2_stake));\n    assert!(!bid1.inactive());\n\n    let initial_bids = reader.get_bids();\n\n    let validator_bid = initial_bids\n        .validator_bid(&validator1)\n        .expect(\"should have old bid\");\n    let delegator1_bid_purse = *initial_bids\n        .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone()))\n        .expect(\"should have old delegator\")\n        .bonding_purse();\n\n    let delegator2_bid_purse = *update\n        .delegator(&validator_bid, &delegator2.into())\n        .expect(\"should have new delegator\")\n        .bonding_purse();\n\n    // check that the old delegator's bid purse got zeroed\n    update.assert_written_balance(delegator1_bid_purse, 0);\n\n    // check that the bid purse for the new delegator has been created with the correct amount\n    update.assert_written_purse_is_unit(delegator2_bid_purse);\n    update.assert_written_balance(delegator2_bid_purse, d2_stake);\n\n    // 9 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - main purse for validator 1\n    // - 3 bids, 3 balances\n    assert_eq!(update.len(), 9);\n}\n\n#[test]\nfn should_replace_a_delegator_with_unbonding() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let (v1_stake, v1_balance) = (1, 100);\n    let (v1_updated_stake, v1_updated_balance) = (4, 200);\n    let delegator1 = PublicKey::random(&mut rng);\n    let d1_stake = 2;\n    let delegator2 = PublicKey::random(&mut rng);\n    let d2_stake = 3;\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(v1_balance),\n            ValidatorConfig {\n                bonded_amount: U512::from(v1_stake),\n                delegation_rate: Some(5),\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator1.clone(),\n                    delegated_amount: U512::from(d1_stake),\n                }]),\n                reservations: None,\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be replacing the delegator\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(U512::from(v1_updated_balance)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(v1_updated_stake),\n                delegation_rate: None,\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator2.clone(),\n                    delegated_amount: U512::from(d2_stake),\n                }]),\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(\n        &validator1,\n        U512::from(v1_updated_stake + d2_stake),\n    )]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(\n        &mut reader,\n        v1_updated_balance + v1_updated_stake + d1_stake + d2_stake,\n    );\n\n    let account1_hash = validator1.to_account_hash();\n\n    let bid1 = update.get_written_bid(account1_hash);\n    assert_eq!(bid1.validator_public_key(), validator1);\n    assert_eq!(bid1.staked_amount().unwrap(), U512::from(v1_updated_stake));\n    let total_stake = update\n        .get_total_stake(account1_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_stake, U512::from(v1_updated_stake + d2_stake));\n    assert!(!bid1.inactive());\n\n    let initial_bids = reader.get_bids();\n\n    let validator_bid = initial_bids\n        .validator_bid(&validator1)\n        .expect(\"should have old bid\");\n    let delegator1_bid_purse = *initial_bids\n        .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone()))\n        .expect(\"should have old delegator\")\n        .bonding_purse();\n\n    let delegator2_bid_purse = *update\n        .delegator(&validator_bid, &delegator2.into())\n        .expect(\"should have new delegator\")\n        .bonding_purse();\n\n    // check that the old delegator's bid purse hasn't been updated\n    update.assert_key_absent(&Key::Balance(delegator1_bid_purse.addr()));\n\n    // check that the old delegator has been unbonded\n    update.assert_unbond_bid_kind(\n        delegator1_bid_purse,\n        &validator1,\n        &UnbondKind::DelegatedPublicKey(delegator1.clone()),\n        d1_stake,\n    );\n\n    // check that the bid purse for the new delegator has been created with the correct amount\n    update.assert_written_purse_is_unit(delegator2_bid_purse);\n    update.assert_written_balance(delegator2_bid_purse, d2_stake);\n\n    // 10 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - main purse for validator 1\n    // - 3 bids, 3 balances, 1 unbond\n    assert_eq!(update.len(), 10);\n}\n\n#[test]\nfn should_not_change_the_delegator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let v1_balance = 100;\n    let v1_stake = 1;\n    let delegator1 = PublicKey::random(&mut rng);\n    let d1_stake = 2;\n    let v1_updated_stake = 3;\n    let v1_updated_balance = 200;\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(v1_balance),\n            ValidatorConfig {\n                bonded_amount: U512::from(v1_stake),\n                delegation_rate: Some(5),\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator1,\n                    delegated_amount: U512::from(d1_stake),\n                }]),\n                reservations: None,\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be changing the validator's stake\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(U512::from(v1_updated_balance)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(v1_updated_stake),\n                delegation_rate: None,\n                delegators: None,\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(\n        &validator1,\n        U512::from(d1_stake + v1_updated_stake),\n    )]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(\n        &mut reader,\n        v1_updated_balance + d1_stake + v1_updated_stake,\n    );\n\n    let account1_hash = validator1.to_account_hash();\n\n    let bid1 = update.get_written_bid(account1_hash);\n    assert_eq!(bid1.validator_public_key(), validator1);\n    assert_eq!(bid1.staked_amount().unwrap(), U512::from(v1_updated_stake));\n    let total_stake = update\n        .get_total_stake(account1_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_stake, U512::from(v1_updated_stake));\n    assert!(!bid1.inactive());\n\n    // check that the validator's bid purse got updated\n    update.assert_written_balance(bid1.bonding_purse().unwrap(), v1_updated_stake);\n\n    // 5 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for validator 1\n    // - bid for delegator 1\n    // - bonding purse balance for validator 1\n    assert_eq!(update.len(), 5);\n}\n\n#[test]\nfn should_remove_the_delegator() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let delegator1 = PublicKey::random(&mut rng);\n\n    let v_balance = U512::from(10);\n    let v_stake = U512::from(1);\n    let d_stake = U512::from(2);\n    let initial_supply = v_balance + v_stake + d_stake;\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            v_balance,\n            ValidatorConfig {\n                bonded_amount: v_stake,\n                delegation_rate: Some(5),\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator1.clone(),\n                    delegated_amount: d_stake,\n                }]),\n                reservations: None,\n            },\n        )],\n        &mut rng,\n    );\n\n    assert_eq!(\n        reader.total_supply(),\n        initial_supply,\n        \"should match initial supply\"\n    );\n\n    /* validator and delegator bids should be present */\n    let original_bids = reader.get_bids();\n    let validator_bid = original_bids\n        .validator_bid(&validator1)\n        .expect(\"should have old bid\");\n    let validator_initial_stake = reader\n        .purses\n        .get(&validator_bid.bonding_purse().addr())\n        .expect(\"should have validator initial stake\");\n    assert_eq!(\n        *validator_initial_stake, v_stake,\n        \"validator initial balance should match\"\n    );\n    let delegator_bid = original_bids\n        .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone()))\n        .expect(\"should have delegator\");\n    let delegator_initial_stake = reader\n        .purses\n        .get(&delegator_bid.bonding_purse().addr())\n        .expect(\"should have delegator initial stake\");\n    assert_eq!(\n        *delegator_initial_stake, d_stake,\n        \"delegator initial balance should match\"\n    );\n\n    let v_updated_balance = U512::from(20);\n    let v_updated_stake = U512::from(2);\n    let updated_supply = v_updated_balance + v_updated_stake;\n\n    /* make various changes to the bid, including removal of delegator */\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(v_updated_balance),\n            validator: Some(ValidatorConfig {\n                bonded_amount: v_updated_stake,\n                delegation_rate: None,\n                delegators: Some(vec![]),\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        slash_instead_of_unbonding: true,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    /* check high level details */\n    let expected_validator_set = &[ValidatorInfo::new(&validator1, v_updated_stake)];\n    update.assert_validators(expected_validator_set);\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, updated_supply.as_u64());\n\n    /* check validator bid details */\n    let account1_hash = validator1.to_account_hash();\n    let updated_validator_bid = update.get_written_bid(account1_hash);\n    update.assert_written_balance(\n        updated_validator_bid.bonding_purse().unwrap(),\n        v_updated_stake.as_u64(),\n    );\n    assert_eq!(updated_validator_bid.validator_public_key(), validator1);\n    assert_eq!(\n        updated_validator_bid.staked_amount().unwrap(),\n        v_updated_stake\n    );\n    let total_staked = update\n        .get_total_stake(account1_hash)\n        .expect(\"should have total stake\");\n    assert_eq!(total_staked, v_updated_stake);\n    assert!(!updated_validator_bid.inactive());\n    // The delegator's bonding purse should be 0'd\n    update.assert_written_balance(*delegator_bid.bonding_purse(), 0);\n\n    // 7 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - main purse for validator 1\n    // - bonding purse balance for validator 1\n    // - bonding purse balance for delegator 1\n    // - unbonding for delegator 1\n    // - bid for validator 1\n    assert_eq!(update.len(), 7);\n}\n\n#[test]\nfn should_remove_the_delegator_with_unbonding() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let delegator1 = PublicKey::random(&mut rng);\n\n    let mut reader = MockStateReader::new().with_validators(\n        vec![(\n            validator1.clone(),\n            U512::from(101),\n            ValidatorConfig {\n                bonded_amount: U512::from(101),\n                delegation_rate: Some(5),\n                delegators: Some(vec![DelegatorConfig {\n                    public_key: delegator1.clone(),\n                    delegated_amount: U512::from(13),\n                }]),\n                reservations: None,\n            },\n        )],\n        &mut rng,\n    );\n\n    // we'll be removing the delegator\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(U512::from(101)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(111),\n                delegation_rate: None,\n                delegators: Some(vec![]),\n                reservations: None,\n            }),\n        }],\n        only_listed_validators: false,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&validator1, U512::from(111))]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(&mut reader, 225);\n\n    let account1_hash = validator1.to_account_hash();\n\n    let expected = U512::from(111);\n    let bid1 = update.get_written_bid(account1_hash);\n    assert_eq!(bid1.validator_public_key(), validator1);\n    assert_eq!(bid1.staked_amount().unwrap(), expected);\n\n    let total_stake = update\n        .get_total_stake(account1_hash)\n        .expect(\"should have total stake\");\n\n    assert_eq!(total_stake, expected);\n    assert!(!bid1.inactive());\n\n    // check that the validator's bid purse got updated\n    update.assert_written_balance(bid1.bonding_purse().unwrap(), 111);\n\n    let old_bids1 = reader.get_bids();\n    let _ = old_bids1\n        .validator_bid(&validator1)\n        .expect(\"should have validator1\");\n\n    let delegator1_bid = old_bids1\n        .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone()))\n        .expect(\"should have delegator1\");\n\n    let delegator1_bid_purse = *delegator1_bid.bonding_purse();\n\n    // check that the old delegator's bid purse hasn't been updated\n    update.assert_key_absent(&Key::Balance(delegator1_bid_purse.addr()));\n\n    // check that the unbonding purse got created\n    update.assert_unbond_bid_kind(\n        delegator1_bid_purse,\n        &validator1,\n        &UnbondKind::DelegatedPublicKey(delegator1.clone()),\n        13,\n    );\n\n    // 6 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for validator 1\n    // - bid for delegator 1\n    // - bonding purse balance for validator 1\n    // - unbonding purse for delegator\n    assert_eq!(update.len(), 6);\n}\n\n#[test]\nfn should_slash_a_validator_and_delegator_with_enqueued_withdraws() {\n    let mut rng = TestRng::new();\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n    let delegator1 = PublicKey::random(&mut rng);\n    let delegator2 = PublicKey::random(&mut rng);\n    let past_delegator1 = PublicKey::random(&mut rng);\n    let past_delegator2 = PublicKey::random(&mut rng);\n\n    let amount = U512::one();\n    let era_id = EraId::new(1);\n\n    let validator1_config = ValidatorConfig {\n        bonded_amount: amount,\n        delegation_rate: Some(5),\n        delegators: Some(vec![DelegatorConfig {\n            public_key: delegator1.clone(),\n            delegated_amount: amount,\n        }]),\n        reservations: None,\n    };\n\n    let mut reader = MockStateReader::new()\n        .with_validators(\n            vec![\n                (validator1.clone(), amount, validator1_config.clone()),\n                (\n                    validator2.clone(),\n                    amount,\n                    ValidatorConfig {\n                        bonded_amount: amount,\n                        delegation_rate: Some(5),\n                        delegators: Some(vec![DelegatorConfig {\n                            public_key: delegator2.clone(),\n                            delegated_amount: amount,\n                        }]),\n                        reservations: None,\n                    },\n                ),\n            ],\n            &mut rng,\n        )\n        .with_withdraw(\n            validator1.clone(),\n            UnbondKind::Validator(validator1.clone()),\n            era_id,\n            amount,\n            &mut rng,\n        )\n        .with_withdraw(\n            validator1.clone(),\n            UnbondKind::DelegatedPublicKey(delegator1),\n            era_id,\n            amount,\n            &mut rng,\n        )\n        .with_withdraw(\n            validator1.clone(),\n            UnbondKind::DelegatedPublicKey(past_delegator1),\n            era_id,\n            amount,\n            &mut rng,\n        )\n        .with_withdraw(\n            validator2.clone(),\n            UnbondKind::Validator(validator2.clone()),\n            era_id,\n            amount,\n            &mut rng,\n        )\n        .with_withdraw(\n            validator2.clone(),\n            UnbondKind::DelegatedPublicKey(delegator2.clone()),\n            era_id,\n            amount,\n            &mut rng,\n        )\n        .with_withdraw(\n            validator2.clone(),\n            UnbondKind::DelegatedPublicKey(past_delegator2.clone()),\n            era_id,\n            amount,\n            &mut rng,\n        );\n\n    // we'll be removing validator2\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(amount),\n            validator: Some(validator1_config),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: true,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&validator1, amount * 2)]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n\n    // check validator2 slashed\n    let old_bids2 = reader.get_bids();\n    let old_bid2 = old_bids2\n        .validator_bid(&validator2)\n        .expect(\"should have validator2\");\n\n    update.assert_written_balance(*old_bid2.bonding_purse(), 0);\n    let delegator2_record = old_bids2\n        .delegator_by_kind(&validator2, &DelegatorKind::PublicKey(delegator2.clone()))\n        .expect(\"should have delegator 2\");\n\n    // check delegator2 slashed\n    update.assert_written_balance(*delegator2_record.bonding_purse(), 0);\n    // check past_delegator2 untouched\n    let past_delegator2_bid_purse = reader\n        .withdraws\n        .get(&validator2.to_account_hash())\n        .expect(\"should have withdraws for validator2\")\n        .iter()\n        .find(|withdraw| withdraw.unbonder_public_key() == &past_delegator2)\n        .expect(\"should have withdraw purses\")\n        .bonding_purse();\n    update.assert_key_absent(&Key::Balance(past_delegator2_bid_purse.addr()));\n\n    // check validator1 and its delegators not slashed\n    for withdraw in reader\n        .withdraws\n        .get(&validator1.to_account_hash())\n        .expect(\"should have withdraws for validator2\")\n    {\n        update.assert_key_absent(&Key::Balance(withdraw.bonding_purse().addr()));\n    }\n\n    // check the withdraws under validator 2 still contain the past delegator's withdraw\n    update.assert_withdraw_purse(*past_delegator2_bid_purse, &validator2, &past_delegator2, 1);\n\n    // check the withdraws under validator 1 are unchanged\n    update.assert_key_absent(&Key::Withdraw(validator1.to_account_hash()));\n\n    // 8 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - 2 balances, 2 bids, 1 withdraw\n    assert_eq!(update.len(), 7);\n}\n\n#[ignore]\n#[test]\nfn should_slash_a_validator_and_delegator_with_enqueued_unbonds() {\n    let mut rng = TestRng::new();\n\n    let (v1_balance, v2_balance) = (100u64, 200u64);\n    let (v1_stake, v2_stake, d1_stake, d2_stake) = (1u64, 2u64, 3u64, 4u64);\n    let (pd1_stake, pd2_stake) = (10u64, 11u64);\n\n    let validator1 = PublicKey::random(&mut rng);\n    let validator2 = PublicKey::random(&mut rng);\n    let delegator1 = PublicKey::random(&mut rng);\n    let delegator2 = PublicKey::random(&mut rng);\n\n    let past_delegator1 = PublicKey::random(&mut rng);\n    let past_delegator2 = PublicKey::random(&mut rng);\n\n    let validator1_config = ValidatorConfig {\n        bonded_amount: U512::from(v1_stake),\n        delegation_rate: Some(5),\n        delegators: Some(vec![DelegatorConfig {\n            public_key: delegator1.clone(),\n            delegated_amount: U512::from(d1_stake),\n        }]),\n        reservations: None,\n    };\n\n    let validator_2_config = ValidatorConfig {\n        bonded_amount: U512::from(v2_stake),\n        delegation_rate: Some(5),\n        delegators: Some(vec![DelegatorConfig {\n            public_key: delegator2.clone(),\n            delegated_amount: U512::from(d2_stake),\n        }]),\n        reservations: None,\n    };\n\n    let mut reader = MockStateReader::new()\n        .with_validators(\n            vec![\n                (\n                    validator1.clone(),\n                    v1_balance.into(),\n                    validator1_config.clone(),\n                ),\n                (\n                    validator2.clone(),\n                    v2_balance.into(),\n                    validator_2_config.clone(),\n                ),\n            ],\n            &mut rng,\n        )\n        .with_unbond(\n            validator1.clone(),\n            UnbondKind::Validator(validator1.clone()),\n            v1_stake.into(),\n            &mut rng,\n        )\n        .with_unbond(\n            validator1.clone(),\n            UnbondKind::DelegatedPublicKey(delegator1.clone()),\n            d1_stake.into(),\n            &mut rng,\n        )\n        .with_unbond(\n            validator1.clone(),\n            UnbondKind::DelegatedPublicKey(past_delegator1.clone()),\n            pd1_stake.into(),\n            &mut rng,\n        )\n        .with_unbond(\n            validator2.clone(),\n            UnbondKind::Validator(validator2.clone()),\n            v2_stake.into(),\n            &mut rng,\n        )\n        .with_unbond(\n            validator2.clone(),\n            UnbondKind::DelegatedPublicKey(delegator2.clone()),\n            d2_stake.into(),\n            &mut rng,\n        )\n        .with_unbond(\n            validator2.clone(),\n            UnbondKind::DelegatedPublicKey(past_delegator2.clone()),\n            pd2_stake.into(),\n            &mut rng,\n        );\n\n    // we'll be removing validator2\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: validator1.clone(),\n            balance: Some(v1_stake.into()),\n            validator: Some(validator1_config),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: true,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(\n        &validator1,\n        U512::from(v1_stake + d1_stake),\n    )]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n\n    let old_bids = reader.get_bids();\n    // check validator2 slashed\n    let old_bid2 = old_bids\n        .validator_bid(&validator2)\n        .expect(\"should have bid\");\n    update.assert_written_balance(*old_bid2.bonding_purse(), 0);\n\n    let delegator = old_bids\n        .delegator_by_kind(&validator2, &DelegatorKind::PublicKey(delegator2.clone()))\n        .expect(\"should have delegator\");\n\n    // check delegator2 slashed\n    update.assert_written_balance(*delegator.bonding_purse(), 0);\n    let unbond_kind = UnbondKind::DelegatedPublicKey(past_delegator2.clone());\n    // check past_delegator2 untouched\n    let past_delegator2_bid_purse = reader\n        .unbonds\n        .get(&unbond_kind)\n        .expect(\"should have unbonds for validator2\")\n        .first()\n        .expect(\"must have at least one entry\")\n        .eras()\n        .first()\n        .expect(\"should have unbonding purses\")\n        .bonding_purse();\n    update.assert_key_absent(&Key::Balance(past_delegator2_bid_purse.addr()));\n    let unbond_kind = UnbondKind::Validator(validator1.clone());\n    // check validator1 and its delegators not slashed\n    for unbond in reader\n        .unbonds\n        .get(&unbond_kind)\n        .expect(\"should have unbonds for validator2\")\n        .first()\n        .expect(\"must have at least one entry\")\n        .eras()\n    {\n        update.assert_key_absent(&Key::Balance(unbond.bonding_purse().addr()));\n    }\n\n    // check the unbonds under validator 1 are unchanged\n    update.assert_key_absent(&Key::Unbond(validator1.to_account_hash()));\n    update.assert_key_absent(&Key::Unbond(delegator1.to_account_hash()));\n    update.assert_key_absent(&Key::Unbond(past_delegator1.to_account_hash()));\n\n    // 8 keys should be written for validator1:\n    // - seigniorage recipients\n    // - total supply\n    // - 3 balances, 2 bids,\n    // - 1 unbonds\n    assert_eq!(update.len(), 7);\n}\n\n#[test]\nfn should_handle_unbonding_to_oneself_correctly() {\n    let rng = &mut TestRng::new();\n\n    let old_validator = PublicKey::random(rng);\n    let new_validator = PublicKey::random(rng);\n\n    const OLD_BALANCE: u64 = 31;\n    const NEW_BALANCE: u64 = 73;\n    const OLD_STAKE: u64 = 97;\n    const NEW_STAKE: u64 = 103;\n\n    let mut reader = MockStateReader::new()\n        .with_account(old_validator.to_account_hash(), OLD_BALANCE.into(), rng)\n        .with_validators(\n            vec![(\n                old_validator.clone(),\n                U512::from(OLD_BALANCE),\n                ValidatorConfig {\n                    bonded_amount: U512::from(OLD_STAKE),\n                    ..Default::default()\n                },\n            )],\n            rng,\n        )\n        // One token is being unbonded to the validator:\n        .with_unbond(\n            old_validator.clone(),\n            UnbondKind::Validator(old_validator.clone()),\n            OLD_STAKE.into(),\n            rng,\n        );\n\n    // We'll be updating the validators set to only contain new_validator:\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: new_validator.clone(),\n            balance: Some(U512::from(NEW_BALANCE)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(NEW_STAKE),\n                ..Default::default()\n            }),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // Check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&new_validator, U512::from(NEW_STAKE))]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(\n        &mut reader,\n        OLD_BALANCE + OLD_STAKE + NEW_BALANCE + NEW_STAKE,\n    );\n\n    // Check purse write for validator1\n    let bid_purse = *reader\n        .get_bids()\n        .validator_bid(&old_validator)\n        .expect(\"should have bid\")\n        .bonding_purse();\n\n    // Bid purse balance should be unchanged\n    update.assert_key_absent(&Key::Balance(bid_purse.addr()));\n\n    // Check bid overwrite\n    let account1_hash = old_validator.to_account_hash();\n    let mut expected_bid_1 = ValidatorBid::unlocked(\n        old_validator,\n        bid_purse,\n        U512::zero(),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    expected_bid_1.deactivate();\n    update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1)));\n\n    // Check writes for validator2\n    let account2_hash = new_validator.to_account_hash();\n\n    // The new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // Check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), NEW_BALANCE);\n\n    let bid_write = update.get_written_bid(account2_hash);\n    assert_eq!(bid_write.validator_public_key(), new_validator);\n    let total = update\n        .get_total_stake(account2_hash)\n        .expect(\"should read total staked amount\");\n    assert_eq!(total, U512::from(NEW_STAKE));\n    assert!(!bid_write.inactive());\n\n    // Check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap());\n    update.assert_written_balance(bid_write.bonding_purse().unwrap(), NEW_STAKE);\n\n    // 11 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for old validator\n    // - account for new validator\n    // - main purse for account for new validator\n    // - main purse balance for account for new validator\n    // - addressable entity for new validator\n    // - package for the newly created addressable entity\n    // - bid for new validator\n    // - bonding purse for new validator\n    // - bonding purse balance for new validator\n    assert_eq!(update.len(), 11);\n}\n\n#[test]\nfn should_handle_unbonding_to_a_delegator_correctly() {\n    let rng = &mut TestRng::new();\n\n    let old_validator = PublicKey::random(rng);\n    let new_validator = PublicKey::random(rng);\n    let delegator = PublicKey::random(rng);\n\n    const OLD_BALANCE: u64 = 100;\n    const NEW_BALANCE: u64 = 200;\n    const DELEGATOR_BALANCE: u64 = 50;\n    const OLD_STAKE: u64 = 1;\n    const DELEGATOR_STAKE: u64 = 2;\n    const NEW_STAKE: u64 = 3;\n\n    let mut reader = MockStateReader::new()\n        .with_account(delegator.to_account_hash(), DELEGATOR_BALANCE.into(), rng)\n        .with_account(old_validator.to_account_hash(), OLD_BALANCE.into(), rng)\n        .with_validators(\n            vec![(\n                old_validator.clone(),\n                U512::from(OLD_BALANCE),\n                ValidatorConfig {\n                    bonded_amount: U512::from(OLD_STAKE),\n                    delegators: Some(vec![DelegatorConfig {\n                        public_key: delegator.clone(),\n                        delegated_amount: DELEGATOR_STAKE.into(),\n                    }]),\n                    ..Default::default()\n                },\n            )],\n            rng,\n        )\n        // One token is being unbonded to the validator:\n        .with_unbond(\n            old_validator.clone(),\n            UnbondKind::Validator(old_validator.clone()),\n            OLD_STAKE.into(),\n            rng,\n        )\n        // One token is being unbonded to the delegator:\n        .with_unbond(\n            old_validator.clone(),\n            UnbondKind::DelegatedPublicKey(delegator.clone()),\n            OLD_STAKE.into(),\n            rng,\n        );\n\n    // We'll be updating the validators set to only contain new_validator:\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: new_validator.clone(),\n            balance: Some(U512::from(NEW_BALANCE)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(NEW_STAKE),\n                ..Default::default()\n            }),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // Check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&new_validator, U512::from(NEW_STAKE))]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(\n        &mut reader,\n        OLD_BALANCE + OLD_STAKE + NEW_BALANCE + NEW_STAKE + DELEGATOR_BALANCE + DELEGATOR_STAKE,\n    );\n    let unbond_kind = UnbondKind::Validator(old_validator.clone());\n    let unbond = reader\n        .get_unbonds()\n        .get(&unbond_kind)\n        .cloned()\n        .expect(\"should have unbond purses\");\n    let validator_purse = unbond\n        .first()\n        .expect(\"must have unbond entry\")\n        .eras()\n        .first()\n        .map(|purse| *purse.bonding_purse())\n        .expect(\"A bonding purse for the validator\");\n    let unbond_kind = UnbondKind::DelegatedPublicKey(delegator.clone());\n    let unbonds = reader\n        .get_unbonds()\n        .get(&unbond_kind)\n        .cloned()\n        .expect(\"should have unbond purses\");\n    let unbonding_purses = unbonds.first().expect(\"must have at least one entry\");\n    let _ = unbonding_purses\n        .eras()\n        .first()\n        .map(|purse| *purse.bonding_purse())\n        .expect(\"A bonding purse for the delegator\");\n\n    // Bid purse balance should be unchanged\n    update.assert_key_absent(&Key::Balance(validator_purse.addr()));\n\n    // Check bid overwrite\n    let account1_hash = old_validator.to_account_hash();\n    let mut expected_bid_1 = ValidatorBid::unlocked(\n        old_validator,\n        validator_purse,\n        U512::zero(),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    expected_bid_1.deactivate();\n    update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1)));\n\n    // Check writes for validator2\n    let account2_hash = new_validator.to_account_hash();\n\n    // The new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // Check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), NEW_BALANCE);\n\n    let bid_write = update.get_written_bid(account2_hash);\n    assert_eq!(bid_write.validator_public_key(), new_validator);\n    let total = update\n        .get_total_stake(account2_hash)\n        .expect(\"should read total staked amount\");\n    assert_eq!(total, U512::from(NEW_STAKE));\n    assert!(!bid_write.inactive());\n\n    // Check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap());\n    update.assert_written_balance(bid_write.bonding_purse().unwrap(), NEW_STAKE);\n\n    // 13 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for old validator\n    // - bid for delegator\n    // - unbonding purse for old validator\n    // - account for new validator\n    // - main purse for account for new validator\n    // - main purse balance for account for new validator\n    // - addressable entity for new validator\n    // - package for the newly created addressable entity\n    // - bid for new validator\n    // - bonding purse for new validator\n    // - bonding purse balance for new validator\n    assert_eq!(update.len(), 13);\n}\n\n#[test]\nfn should_handle_legacy_unbonding_to_oneself_correctly() {\n    let rng = &mut TestRng::new();\n\n    let old_validator = PublicKey::random(rng);\n    let new_validator = PublicKey::random(rng);\n\n    const OLD_BALANCE: u64 = 100;\n    const NEW_BALANCE: u64 = 200;\n    const OLD_STAKE: u64 = 1;\n    const NEW_STAKE: u64 = 2;\n\n    let mut reader = MockStateReader::new()\n        .with_account(old_validator.to_account_hash(), OLD_BALANCE.into(), rng)\n        .with_validators(\n            vec![(\n                old_validator.clone(),\n                U512::from(OLD_BALANCE),\n                ValidatorConfig {\n                    bonded_amount: U512::from(OLD_STAKE),\n                    ..Default::default()\n                },\n            )],\n            rng,\n        )\n        // Two tokens are being unbonded to the validator, one legacy, the other not:\n        .with_unbond(\n            old_validator.clone(),\n            UnbondKind::Validator(old_validator.clone()),\n            OLD_STAKE.into(),\n            rng,\n        )\n        .with_withdraw(\n            old_validator.clone(),\n            UnbondKind::Validator(old_validator.clone()),\n            EraId::new(1),\n            OLD_STAKE.into(),\n            rng,\n        );\n\n    // We'll be updating the validators set to only contain new_validator:\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: new_validator.clone(),\n            balance: Some(U512::from(NEW_BALANCE)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(NEW_STAKE),\n                ..Default::default()\n            }),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    // Check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(&new_validator, U512::from(NEW_STAKE))]);\n\n    update.assert_seigniorage_recipients_written(&mut reader);\n    update.assert_total_supply(\n        &mut reader,\n        OLD_BALANCE + OLD_STAKE + NEW_BALANCE + NEW_STAKE,\n    );\n\n    // Check purse write for validator1\n    let bid_purse = *reader\n        .get_bids()\n        .validator_bid(&old_validator)\n        .expect(\"should have bid\")\n        .bonding_purse();\n\n    // Bid purse balance should be unchanged\n    update.assert_key_absent(&Key::Balance(bid_purse.addr()));\n\n    // Check bid overwrite\n    let account1_hash = old_validator.to_account_hash();\n    let mut expected_bid_1 = ValidatorBid::unlocked(\n        old_validator,\n        bid_purse,\n        U512::zero(),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    expected_bid_1.deactivate();\n    update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1)));\n\n    // Check writes for validator2\n    let account2_hash = new_validator.to_account_hash();\n\n    // The new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // Check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), NEW_BALANCE);\n\n    let bid_write = update.get_written_bid(account2_hash);\n    assert_eq!(bid_write.validator_public_key(), new_validator);\n    let total = update\n        .get_total_stake(account2_hash)\n        .expect(\"should read total staked amount\");\n    assert_eq!(total, U512::from(NEW_STAKE));\n    assert!(!bid_write.inactive());\n\n    // Check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap());\n    update.assert_written_balance(bid_write.bonding_purse().unwrap(), NEW_STAKE);\n\n    // 11 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for old validator\n    // - account for new validator\n    // - main purse for account for new validator\n    // - main purse balance for account for new validator\n    // - addressable entity for new validator\n    // - package for the newly created addressable entity\n    // - bid for new validator\n    // - bonding purse for new validator\n    // - bonding purse balance for new validator\n    assert_eq!(update.len(), 11);\n}\n\n#[test]\nfn should_handle_legacy_unbonding_to_a_delegator_correctly() {\n    let rng = &mut TestRng::new();\n\n    let v1_public_key = PublicKey::random(rng);\n    let v2_public_key = PublicKey::random(rng);\n    let d1_public_key = PublicKey::random(rng);\n\n    const V1_INITIAL_BALANCE: u64 = 100;\n    const V1_INITIAL_STAKE: u64 = 1;\n    const V2_INITIAL_BALANCE: u64 = 200;\n    const V2_INITIAL_STAKE: u64 = 3;\n\n    const D1_INITIAL_BALANCE: u64 = 20;\n    const D1_INITIAL_STAKE: u64 = 2;\n\n    const WITHDRAW_ERA: EraId = EraId::new(0);\n\n    let mut reader = MockStateReader::new()\n        .with_account(\n            d1_public_key.to_account_hash(),\n            D1_INITIAL_BALANCE.into(),\n            rng,\n        )\n        .with_account(\n            v1_public_key.to_account_hash(),\n            V1_INITIAL_BALANCE.into(),\n            rng,\n        )\n        .with_validators(\n            vec![(\n                v1_public_key.clone(),\n                U512::from(V1_INITIAL_BALANCE),\n                ValidatorConfig {\n                    bonded_amount: U512::from(V1_INITIAL_STAKE),\n                    delegators: Some(vec![DelegatorConfig {\n                        public_key: d1_public_key.clone(),\n                        delegated_amount: D1_INITIAL_STAKE.into(),\n                    }]),\n                    ..Default::default()\n                },\n            )],\n            rng,\n        )\n        .with_withdraw(\n            v1_public_key.clone(),\n            UnbondKind::Validator(v1_public_key.clone()),\n            WITHDRAW_ERA,\n            U512::from(V1_INITIAL_STAKE),\n            rng,\n        )\n        // Two tokens are being unbonded to the validator, one legacy, the other not:\n        .with_unbond(\n            v1_public_key.clone(),\n            UnbondKind::Validator(v1_public_key.clone()),\n            U512::from(V1_INITIAL_STAKE),\n            rng,\n        )\n        // Two tokens are being unbonded to the delegator, one legacy, the other not:\n        .with_withdraw(\n            v1_public_key.clone(),\n            UnbondKind::DelegatedPublicKey(d1_public_key.clone()),\n            WITHDRAW_ERA,\n            U512::from(D1_INITIAL_STAKE),\n            rng,\n        )\n        .with_unbond(\n            v1_public_key.clone(),\n            UnbondKind::DelegatedPublicKey(d1_public_key),\n            U512::from(D1_INITIAL_STAKE),\n            rng,\n        );\n\n    assert_eq!(\n        reader.total_supply.as_u64(),\n        V1_INITIAL_BALANCE + V1_INITIAL_STAKE + D1_INITIAL_BALANCE + D1_INITIAL_STAKE,\n        \"should equal\"\n    );\n\n    // We'll be updating the validators set to only contain new_validator:\n    let config = Config {\n        accounts: vec![AccountConfig {\n            public_key: v2_public_key.clone(),\n            balance: Some(U512::from(V2_INITIAL_BALANCE)),\n            validator: Some(ValidatorConfig {\n                bonded_amount: U512::from(V2_INITIAL_STAKE),\n                ..Default::default()\n            }),\n        }],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: false,\n        ..Default::default()\n    };\n\n    let update = get_update(&mut reader, config);\n\n    update.assert_total_supply(\n        &mut reader,\n        V1_INITIAL_BALANCE\n            + V1_INITIAL_STAKE\n            + D1_INITIAL_BALANCE\n            + D1_INITIAL_STAKE\n            + V2_INITIAL_BALANCE\n            + V2_INITIAL_STAKE,\n    );\n\n    // Check that the update contains the correct list of validators\n    update.assert_validators(&[ValidatorInfo::new(\n        &v2_public_key,\n        U512::from(V2_INITIAL_STAKE),\n    )]);\n\n    let unbond_kind = UnbondKind::Validator(v1_public_key.clone());\n    let unbonds = reader\n        .get_unbonds()\n        .get(&unbond_kind)\n        .cloned()\n        .expect(\"should have unbond purses\");\n    let unbonding_purses = unbonds.first().expect(\"must have at least one entry\");\n    let validator_purse = unbonding_purses\n        .eras()\n        .first()\n        .map(|purse| *purse.bonding_purse())\n        .expect(\"A bonding purse for the validator\");\n\n    // Bid purse balance should be unchanged\n    update.assert_key_absent(&Key::Balance(validator_purse.addr()));\n\n    // Check bid overwrite\n    let account1_hash = v1_public_key.to_account_hash();\n    let mut expected_bid_1 = ValidatorBid::unlocked(\n        v1_public_key,\n        validator_purse,\n        U512::zero(),\n        Default::default(),\n        0,\n        u64::MAX,\n        0,\n    );\n    expected_bid_1.deactivate();\n    update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1)));\n\n    // Check writes for validator2\n    let account2_hash = v2_public_key.to_account_hash();\n\n    // The new account should be created\n    let account2 = update.get_written_addressable_entity(account2_hash);\n\n    // Check that the main purse for the new account has been created with the correct amount\n    update.assert_written_purse_is_unit(account2.main_purse());\n    update.assert_written_balance(account2.main_purse(), V2_INITIAL_BALANCE);\n\n    let bid_write = update.get_written_bid(account2_hash);\n    assert_eq!(bid_write.validator_public_key(), v2_public_key);\n    let total = update\n        .get_total_stake(account2_hash)\n        .expect(\"should read total staked amount\");\n    assert_eq!(total, U512::from(V2_INITIAL_STAKE));\n    assert!(!bid_write.inactive());\n\n    // Check that the bid purse for the new validator has been created with the correct amount\n    update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap());\n    update.assert_written_balance(bid_write.bonding_purse().unwrap(), V2_INITIAL_STAKE);\n\n    // 13 keys should be written:\n    // - seigniorage recipients\n    // - total supply\n    // - bid for old validator\n    // - unbonding for old validator\n    // - unbond for delegator\n    // - account for new validator\n    // - main purse for account for new validator\n    // - main purse balance for account for new validator\n    // - addressable entity for new validator\n    // - package for the newly created addressable entity\n    // - bid for new validator\n    // - bonding purse for new validator\n    // - bonding purse balance for new validator\n    assert_eq!(update.len(), 13);\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/generic/update.rs",
    "content": "use std::collections::BTreeMap;\n#[cfg(test)]\nuse std::collections::HashSet;\n\n#[cfg(test)]\nuse casper_types::{account::AccountHash, AddressableEntity, CLValue, PublicKey, URef, U512};\nuse casper_types::{Key, StoredValue};\n\n#[cfg(test)]\nuse casper_types::system::auction::{\n    BidAddr, BidKind, DelegatorBid, DelegatorKind, UnbondKind, ValidatorBid,\n};\n\n#[cfg(test)]\nuse super::state_reader::StateReader;\n\nuse crate::utils::{print_entry, print_validators, ValidatorInfo};\n\n#[derive(Debug)]\npub(crate) struct Update {\n    entries: BTreeMap<Key, StoredValue>,\n    // Holds the complete set of validators, only if the validator set changed\n    validators: Option<Vec<ValidatorInfo>>,\n}\n\nimpl Update {\n    pub(crate) fn new(\n        entries: BTreeMap<Key, StoredValue>,\n        validators: Option<Vec<ValidatorInfo>>,\n    ) -> Self {\n        Self {\n            entries,\n            validators,\n        }\n    }\n\n    pub(crate) fn print(&self) {\n        if let Some(validators) = &self.validators {\n            print_validators(validators);\n        }\n        for (key, value) in &self.entries {\n            print_entry(key, value);\n        }\n    }\n}\n\n#[cfg(test)]\nimpl Update {\n    pub(crate) fn len(&self) -> usize {\n        self.entries.len()\n    }\n\n    pub(crate) fn get_written_addressable_entity(\n        &self,\n        account_hash: AccountHash,\n    ) -> AddressableEntity {\n        let entity_key = self\n            .entries\n            .get(&Key::Account(account_hash))\n            .expect(\"must have Key for account hash\")\n            .as_cl_value()\n            .expect(\"must have underlying cl value\")\n            .to_owned()\n            .into_t::<Key>()\n            .expect(\"must convert to key\");\n\n        self.entries\n            .get(&entity_key)\n            .expect(\"must have addressable entity\")\n            .as_addressable_entity()\n            .expect(\"should be addressable entity\")\n            .clone()\n    }\n\n    pub(crate) fn get_written_bid(&self, account: AccountHash) -> BidKind {\n        self.entries\n            .get(&Key::BidAddr(BidAddr::from(account)))\n            .expect(\"stored value should exist\")\n            .as_bid_kind()\n            .expect(\"stored value should be be BidKind\")\n            .clone()\n    }\n\n    #[track_caller]\n    pub(crate) fn get_total_stake(&self, account: AccountHash) -> Option<U512> {\n        let bid_addr = BidAddr::from(account);\n\n        if let BidKind::Validator(validator_bid) = self\n            .entries\n            .get(&bid_addr.into())\n            .expect(\"should create bid\")\n            .as_bid_kind()\n            .expect(\"should be bid\")\n        {\n            let delegator_stake: U512 = self\n                .delegators(validator_bid)\n                .iter()\n                .map(|x| x.staked_amount())\n                .sum();\n\n            Some(validator_bid.staked_amount() + delegator_stake)\n        } else {\n            None\n        }\n    }\n\n    #[track_caller]\n    pub(crate) fn delegators(&self, validator_bid: &ValidatorBid) -> Vec<DelegatorBid> {\n        let mut ret = vec![];\n\n        for (_, v) in self.entries.clone() {\n            if let StoredValue::BidKind(BidKind::Delegator(delegator)) = v {\n                if delegator.validator_public_key() != validator_bid.validator_public_key() {\n                    continue;\n                }\n                ret.push(*delegator);\n            }\n        }\n\n        ret\n    }\n\n    #[track_caller]\n    pub(crate) fn delegator(\n        &self,\n        validator_bid: &ValidatorBid,\n        delegator_kind: &DelegatorKind,\n    ) -> Option<DelegatorBid> {\n        let delegators = self.delegators(validator_bid);\n        for delegator in delegators {\n            if delegator.delegator_kind() != delegator_kind {\n                continue;\n            }\n            return Some(delegator);\n        }\n        None\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_written_balance(&self, purse: URef, balance: u64) {\n        if let StoredValue::CLValue(cl_value) = self\n            .entries\n            .get(&Key::Balance(purse.addr()))\n            .expect(\"must have balance\")\n        {\n            let actual = CLValue::to_t::<U512>(cl_value).expect(\"must get u512\");\n            assert_eq!(actual, U512::from(balance))\n        };\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_total_supply<R: StateReader>(&self, reader: &mut R, supply: u64) {\n        let total = self\n            .entries\n            .get(&reader.get_total_supply_key())\n            .expect(\"should have total supply\")\n            .as_cl_value()\n            .expect(\"total supply should be CLValue\")\n            .clone()\n            .into_t::<U512>()\n            .expect(\"total supply should be a U512\");\n        let expected = U512::from(supply);\n        assert_eq!(\n            total, expected,\n            \"total supply ({total}) not as expected ({expected})\",\n        );\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_written_purse_is_unit(&self, purse: URef) {\n        assert_eq!(\n            self.entries.get(&Key::URef(purse)),\n            Some(&StoredValue::from(\n                CLValue::from_t(()).expect(\"should convert unit to CLValue\")\n            ))\n        );\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_seigniorage_recipients_written<R: StateReader>(&self, reader: &mut R) {\n        assert!(self\n            .entries\n            .contains_key(&reader.get_seigniorage_recipients_key()));\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_written_bid(&self, account: AccountHash, bid: BidKind) {\n        assert_eq!(\n            self.entries.get(&Key::BidAddr(BidAddr::from(account))),\n            Some(&StoredValue::from(bid))\n        );\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_withdraw_purse(\n        &self,\n        bid_purse: URef,\n        validator_key: &PublicKey,\n        unbonder_key: &PublicKey,\n        amount: u64,\n    ) {\n        let account_hash = validator_key.to_account_hash();\n        let withdraws = self\n            .entries\n            .get(&Key::Withdraw(account_hash))\n            .expect(\"should have withdraws for the account\")\n            .as_withdraw()\n            .expect(\"should be withdraw purses\");\n        assert!(withdraws.iter().any(\n            |withdraw_purse| withdraw_purse.bonding_purse() == &bid_purse\n                && withdraw_purse.validator_public_key() == validator_key\n                && withdraw_purse.unbonder_public_key() == unbonder_key\n                && withdraw_purse.amount() == &U512::from(amount)\n        ))\n    }\n\n    #[track_caller]\n    #[allow(unused)]\n    pub(crate) fn assert_unbonding_purse(\n        &self,\n        bid_purse: URef,\n        validator_key: &PublicKey,\n        unbonder_key: &PublicKey,\n        amount: u64,\n    ) {\n        let account_hash = unbonder_key.to_account_hash();\n        let unbonds = self\n            .entries\n            .get(&Key::Unbond(account_hash))\n            .expect(\"should have unbonds for the account\")\n            .as_unbonding()\n            .expect(\"should be unbonding purses\");\n        assert!(unbonds.iter().any(\n            |unbonding_purse| unbonding_purse.bonding_purse() == &bid_purse\n                && unbonding_purse.validator_public_key() == validator_key\n                && unbonding_purse.unbonder_public_key() == unbonder_key\n                && unbonding_purse.amount() == &U512::from(amount)\n        ))\n    }\n\n    /// `expected`: (bid_purse, unbonder_key, amount)\n    #[track_caller]\n    #[allow(unused)]\n    pub(crate) fn assert_unbonding_purses<'a>(\n        &self,\n        account_hash: AccountHash,\n        expected: impl IntoIterator<Item = (URef, &'a PublicKey, u64)>,\n    ) {\n        let mut expected: Vec<_> = expected\n            .into_iter()\n            .map(|(bid_purse, unbonder_key, amount)| {\n                (account_hash, bid_purse, unbonder_key, U512::from(amount))\n            })\n            .collect();\n        let mut data: Vec<_> = self\n            .entries\n            .get(&Key::Unbond(account_hash))\n            .expect(\"should have unbonds for the account\")\n            .as_unbonding()\n            .expect(\"should be unbonding purses\")\n            .iter()\n            .map(|unbonding_purse| {\n                (\n                    unbonding_purse.unbonder_public_key().to_account_hash(),\n                    *unbonding_purse.bonding_purse(),\n                    unbonding_purse.unbonder_public_key(),\n                    *unbonding_purse.amount(),\n                )\n            })\n            .collect();\n\n        expected.sort();\n        data.sort();\n\n        assert_eq!(\n            data, expected,\n            \"\\nThe data we got:\\n{data:#?}\\nExpected values:\\n{expected:#?}\"\n        );\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_unbond_bid_kind(\n        &self,\n        bid_purse: URef,\n        validator_key: &PublicKey,\n        unbond_kind: &UnbondKind,\n        amount: u64,\n    ) {\n        println!(\n            \"assert_unbond_bid_kind {:?} {:?}\",\n            validator_key,\n            validator_key.to_account_hash()\n        );\n        println!(\"assert_unbond_bid_kind {:?}\", unbond_kind);\n        let bid_addr = match unbond_kind {\n            UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => {\n                BidAddr::UnbondAccount {\n                    validator: validator_key.to_account_hash(),\n                    unbonder: pk.to_account_hash(),\n                }\n            }\n            UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse {\n                validator: validator_key.to_account_hash(),\n                unbonder: *addr,\n            },\n        };\n\n        println!(\"assert_unbond_bid_kind {:?}\", Key::BidAddr(bid_addr));\n\n        let entries = &self.entries;\n        let unbonds = entries\n            .get(&Key::BidAddr(bid_addr))\n            .expect(\"should have record\")\n            .as_unbond()\n            .expect(\"should be unbond\");\n\n        assert!(unbonds\n            .eras()\n            .iter()\n            .any(|unbond_era| unbond_era.bonding_purse() == &bid_purse\n                && unbond_era.amount() == &U512::from(amount)))\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_key_absent(&self, key: &Key) {\n        assert!(!self.entries.contains_key(key))\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_validators(&self, validators: &[ValidatorInfo]) {\n        let self_set: HashSet<_> = self.validators.as_ref().unwrap().iter().collect();\n        let other_set: HashSet<_> = validators.iter().collect();\n        assert_eq!(self_set, other_set);\n    }\n\n    #[track_caller]\n    pub(crate) fn assert_validators_unchanged(&self) {\n        assert!(self.validators.is_none());\n    }\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/generic.rs",
    "content": "pub(crate) mod config;\nmod state_reader;\nmod state_tracker;\n#[cfg(test)]\nmod testing;\nmod update;\n\nuse std::{\n    collections::{BTreeMap, BTreeSet},\n    fs,\n};\n\nuse clap::ArgMatches;\nuse itertools::Itertools;\n\nuse casper_engine_test_support::LmdbWasmTestBuilder;\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION;\n\nuse casper_types::{\n    system::auction::{\n        Bid, BidKind, BidsExt, DelegatorBid, DelegatorKind, Reservation, SeigniorageRecipientV2,\n        SeigniorageRecipientsSnapshotV2, Unbond, ValidatorBid, ValidatorCredit,\n    },\n    CLValue, EraId, PublicKey, StoredValue, U512,\n};\n\nuse crate::utils::{hash_from_str, validators_diff, ValidatorInfo, ValidatorsDiff};\n\nuse self::{\n    config::{AccountConfig, Config, Transfer},\n    state_reader::StateReader,\n    state_tracker::StateTracker,\n    update::Update,\n};\n\npub(crate) fn generate_generic_update(matches: &ArgMatches<'_>) {\n    let data_dir = matches.value_of(\"data_dir\").unwrap_or(\".\");\n    let state_hash = hash_from_str(matches.value_of(\"hash\").unwrap());\n    let config_path = matches.value_of(\"config_file\").unwrap();\n\n    let config_bytes = fs::read(config_path).expect(\"couldn't read the config file\");\n    let config: Config = toml::from_slice(&config_bytes).expect(\"couldn't parse the config file\");\n\n    let builder = LmdbWasmTestBuilder::open_raw(\n        data_dir,\n        Default::default(),\n        DEFAULT_PROTOCOL_VERSION,\n        state_hash,\n    );\n\n    update_from_config(builder, config);\n}\n\nfn get_update<T: StateReader>(reader: T, config: Config) -> Update {\n    let protocol_version = config.protocol_version;\n    let mut state_tracker = StateTracker::new(reader, protocol_version);\n\n    process_transfers(&mut state_tracker, &config.transfers);\n\n    update_account_balances(&mut state_tracker, &config.accounts);\n\n    let validators = update_auction_state(\n        &mut state_tracker,\n        &config.accounts,\n        config.only_listed_validators,\n        config.slash_instead_of_unbonding,\n    );\n\n    let entries = state_tracker.get_entries();\n\n    Update::new(entries, validators)\n}\n\npub(crate) fn update_from_config<T: StateReader>(reader: T, config: Config) {\n    let update = get_update(reader, config);\n    update.print();\n}\n\nfn process_transfers<T: StateReader>(state: &mut StateTracker<T>, transfers: &[Transfer]) {\n    for transfer in transfers {\n        state.execute_transfer(transfer);\n    }\n}\n\nfn update_account_balances<T: StateReader>(\n    state: &mut StateTracker<T>,\n    accounts: &[AccountConfig],\n) {\n    for account in accounts {\n        if let Some(target_balance) = account.balance {\n            let account_hash = account.public_key.to_account_hash();\n            if let Some(account) = state.get_account(&account_hash) {\n                state.set_purse_balance(account.main_purse(), target_balance);\n            } else {\n                state.create_addressable_entity_for_account(account_hash, target_balance);\n            }\n        }\n    }\n}\n\n/// Returns the complete set of validators immediately after the upgrade,\n/// if the validator set changed.\nfn update_auction_state<T: StateReader>(\n    state: &mut StateTracker<T>,\n    accounts: &[AccountConfig],\n    only_listed_validators: bool,\n    slash_instead_of_unbonding: bool,\n) -> Option<Vec<ValidatorInfo>> {\n    // Read the old SeigniorageRecipientsSnapshot\n    let (snapshot_key, old_snapshot) = state.read_snapshot();\n\n    // Create a new snapshot based on the old one and the supplied validators.\n    let new_snapshot = if only_listed_validators {\n        gen_snapshot_only_listed(\n            *old_snapshot.keys().next().unwrap(),\n            old_snapshot.len() as u64,\n            accounts,\n        )\n    } else {\n        gen_snapshot_from_old(old_snapshot.clone(), accounts)\n    };\n\n    if new_snapshot == old_snapshot {\n        return None;\n    }\n\n    // Save the write to the snapshot key.\n    state.write_entry(\n        snapshot_key,\n        StoredValue::from(CLValue::from_t(new_snapshot.clone()).unwrap()),\n    );\n\n    let validators_diff = validators_diff(&old_snapshot, &new_snapshot);\n\n    let bids = state.get_bids();\n    if slash_instead_of_unbonding {\n        // zero the unbonds for the removed validators independently of set_bid; set_bid will take\n        // care of zeroing the delegators if necessary\n        for bid_kind in bids {\n            if validators_diff\n                .removed\n                .contains(&bid_kind.validator_public_key())\n            {\n                if let Some(bonding_purse) = bid_kind.bonding_purse() {\n                    state.remove_withdraws_and_unbonds_with_bonding_purse(&bonding_purse);\n                }\n            }\n        }\n    }\n\n    add_and_remove_bids(\n        state,\n        &validators_diff,\n        &new_snapshot,\n        only_listed_validators,\n        slash_instead_of_unbonding,\n    );\n\n    // We need to output the validators for the next era, which are contained in the first entry\n    // in the snapshot.\n    Some(\n        new_snapshot\n            .values()\n            .next()\n            .expect(\"snapshot should have at least one entry\")\n            .iter()\n            .map(|(public_key, seigniorage_recipient)| ValidatorInfo {\n                public_key: public_key.clone(),\n                weight: seigniorage_recipient\n                    .total_stake()\n                    .expect(\"total validator stake too large\"),\n            })\n            .collect(),\n    )\n}\n\n/// Generates a new `SeigniorageRecipientsSnapshotV2` based on:\n/// - The starting era ID (the era ID at which the snapshot should start).\n/// - Count - the number of eras to be included in the snapshot.\n/// - The list of configured accounts.\nfn gen_snapshot_only_listed(\n    starting_era_id: EraId,\n    count: u64,\n    accounts: &[AccountConfig],\n) -> SeigniorageRecipientsSnapshotV2 {\n    let mut new_snapshot = BTreeMap::new();\n    let mut era_validators = BTreeMap::new();\n    for account in accounts {\n        // don't add validators with zero stake to the snapshot\n        let validator_cfg = match &account.validator {\n            Some(validator) if validator.bonded_amount != U512::zero() => validator,\n            _ => continue,\n        };\n        let seigniorage_recipient = SeigniorageRecipientV2::new(\n            validator_cfg.bonded_amount,\n            validator_cfg.delegation_rate.unwrap_or_default(),\n            validator_cfg.delegators_map().unwrap_or_default(),\n            validator_cfg.reservations_map().unwrap_or_default(),\n        );\n        let _ = era_validators.insert(account.public_key.clone(), seigniorage_recipient);\n    }\n    for era_id in starting_era_id.iter(count) {\n        let _ = new_snapshot.insert(era_id, era_validators.clone());\n    }\n    new_snapshot\n}\n\n/// Generates a new `SeigniorageRecipientsSnapshotV2` by modifying the stakes listed in the old\n/// snapshot according to the supplied list of configured accounts.\nfn gen_snapshot_from_old(\n    mut snapshot: SeigniorageRecipientsSnapshotV2,\n    accounts: &[AccountConfig],\n) -> SeigniorageRecipientsSnapshotV2 {\n    // Read the modifications to be applied to the validators set from the config.\n    let validators_map: BTreeMap<_, _> = accounts\n        .iter()\n        .filter_map(|acc| {\n            acc.validator\n                .as_ref()\n                .map(|validator| (acc.public_key.clone(), validator.clone()))\n        })\n        .collect();\n\n    // We will be modifying the entries in the old snapshot passed in as `snapshot` according to\n    // the config.\n    for recipients in snapshot.values_mut() {\n        // We use `retain` to drop some entries and modify some of the ones that will be retained.\n        recipients.retain(\n            |public_key, recipient| match validators_map.get(public_key) {\n                // If the validator's stake is configured to be zero, we drop them from the\n                // snapshot.\n                Some(validator) if validator.bonded_amount.is_zero() => false,\n                // Otherwise, we keep them, but modify the properties.\n                Some(validator) => {\n                    *recipient = SeigniorageRecipientV2::new(\n                        validator.bonded_amount,\n                        validator\n                            .delegation_rate\n                            // If the delegation rate wasn't specified in the config, keep the one\n                            // from the old snapshot.\n                            .unwrap_or(*recipient.delegation_rate()),\n                        validator\n                            .delegators_map()\n                            // If the delegators weren't specified in the config, keep the ones\n                            // from the old snapshot.\n                            .unwrap_or_else(|| recipient.delegator_stake().clone()),\n                        validator\n                            .reservations_map()\n                            // If the delegators weren't specified in the config, keep the ones\n                            // from the old snapshot.\n                            .unwrap_or_else(|| recipient.reservation_delegation_rates().clone()),\n                    );\n                    true\n                }\n                // Validators not present in the config will be kept unmodified.\n                None => true,\n            },\n        );\n\n        // Add the validators that weren't present in the old snapshot.\n        for (public_key, validator) in &validators_map {\n            if recipients.contains_key(public_key) {\n                continue;\n            }\n\n            if validator.bonded_amount != U512::zero() {\n                recipients.insert(\n                    public_key.clone(),\n                    SeigniorageRecipientV2::new(\n                        validator.bonded_amount,\n                        // Unspecified delegation rate will be treated as 0.\n                        validator.delegation_rate.unwrap_or_default(),\n                        // Unspecified delegators will be treated as an empty list.\n                        validator.delegators_map().unwrap_or_default(),\n                        // Unspecified reservation delegation rates will be treated as an empty\n                        // list.\n                        validator.reservations_map().unwrap_or_default(),\n                    ),\n                );\n            }\n        }\n    }\n\n    // Return the modified snapshot.\n    snapshot\n}\n\n/// Generates a set of writes necessary to \"fix\" the bids, ie.:\n/// - set the bids of the new validators to their desired stakes,\n/// - remove the bids of the old validators that are no longer validators,\n/// - if `only_listed_validators` is true, remove all the bids that are larger than the smallest bid\n///   among the new validators (necessary, because such bidders would outbid the validators decided\n///   by the social consensus).\npub fn add_and_remove_bids<T: StateReader>(\n    state: &mut StateTracker<T>,\n    validators_diff: &ValidatorsDiff,\n    new_snapshot: &SeigniorageRecipientsSnapshotV2,\n    only_listed_validators: bool,\n    slash_instead_of_unbonding: bool,\n) {\n    let to_unbid = if only_listed_validators {\n        let large_bids = find_large_bids(state, new_snapshot);\n        validators_diff\n            .removed\n            .union(&large_bids)\n            .cloned()\n            .collect()\n    } else {\n        validators_diff.removed.clone()\n    };\n\n    for (pub_key, seigniorage_recipient) in new_snapshot.values().next_back().unwrap() {\n        create_or_update_bid(\n            state,\n            pub_key,\n            seigniorage_recipient,\n            slash_instead_of_unbonding,\n        );\n    }\n\n    // Refresh the bids - we modified them above.\n    let bids = state.get_bids();\n    for public_key in to_unbid {\n        for bid_kind in bids\n            .iter()\n            .filter(|x| x.validator_public_key() == public_key)\n        {\n            let reset_bid = match bid_kind {\n                BidKind::Unified(bid) => BidKind::Unified(Box::new(Bid::empty(\n                    public_key.clone(),\n                    *bid.bonding_purse(),\n                ))),\n                BidKind::Validator(validator_bid) => {\n                    let mut new_bid =\n                        ValidatorBid::empty(public_key.clone(), *validator_bid.bonding_purse());\n                    new_bid.set_delegation_amount_boundaries(\n                        validator_bid.minimum_delegation_amount(),\n                        validator_bid.maximum_delegation_amount(),\n                    );\n                    BidKind::Validator(Box::new(new_bid))\n                }\n                BidKind::Delegator(delegator_bid) => {\n                    BidKind::Delegator(Box::new(DelegatorBid::empty(\n                        public_key.clone(),\n                        delegator_bid.delegator_kind().clone(),\n                        *delegator_bid.bonding_purse(),\n                    )))\n                }\n                // there should be no need to modify bridge records\n                // since they don't influence the bidding process\n                BidKind::Bridge(_) => continue,\n                BidKind::Credit(credit) => BidKind::Credit(Box::new(ValidatorCredit::empty(\n                    public_key.clone(),\n                    credit.era_id(),\n                ))),\n                BidKind::Reservation(reservation_bid) => {\n                    BidKind::Reservation(Box::new(Reservation::new(\n                        public_key.clone(),\n                        reservation_bid.delegator_kind().clone(),\n                        *reservation_bid.delegation_rate(),\n                    )))\n                }\n                BidKind::Unbond(unbond) => BidKind::Unbond(Box::new(Unbond::new(\n                    unbond.validator_public_key().clone(),\n                    unbond.unbond_kind().clone(),\n                    unbond.eras().clone(),\n                ))),\n            };\n            state.set_bid(reset_bid, slash_instead_of_unbonding);\n        }\n    }\n}\n\n/// Returns the set of public keys that have bids larger than the smallest bid among the new\n/// validators.\nfn find_large_bids<T: StateReader>(\n    state: &mut StateTracker<T>,\n    snapshot: &SeigniorageRecipientsSnapshotV2,\n) -> BTreeSet<PublicKey> {\n    let seigniorage_recipients = snapshot.values().next().unwrap();\n    let min_bid = seigniorage_recipients\n        .values()\n        .map(|recipient| {\n            recipient\n                .total_stake()\n                .expect(\"should have valid total stake\")\n        })\n        .min()\n        .unwrap();\n    let new_validators: BTreeSet<_> = seigniorage_recipients.keys().collect();\n\n    let mut ret = BTreeSet::new();\n\n    let validator_bids = state\n        .get_bids()\n        .iter()\n        .filter(|x| x.is_validator() || x.is_delegator())\n        .cloned()\n        .collect_vec();\n\n    for bid_kind in validator_bids {\n        if let BidKind::Unified(bid) = bid_kind {\n            if bid.total_staked_amount().unwrap_or_default() > min_bid\n                && !new_validators.contains(bid.validator_public_key())\n            {\n                ret.insert(bid.validator_public_key().clone());\n            }\n        } else if let BidKind::Validator(validator_bid) = bid_kind {\n            if new_validators.contains(validator_bid.validator_public_key()) {\n                // The validator is still going to be a validator - we don't remove their bid.\n                continue;\n            }\n            if validator_bid.staked_amount() > min_bid {\n                ret.insert(validator_bid.validator_public_key().clone());\n                continue;\n            }\n            let delegator_stake = state\n                .get_bids()\n                .iter()\n                .filter(|x| {\n                    x.validator_public_key() == *validator_bid.validator_public_key()\n                        && x.is_delegator()\n                })\n                .map(|x| x.staked_amount().unwrap())\n                .sum();\n\n            let total = validator_bid\n                .staked_amount()\n                .checked_add(delegator_stake)\n                .unwrap_or_default();\n            if total > min_bid {\n                ret.insert(validator_bid.validator_public_key().clone());\n            }\n        }\n    }\n    ret\n}\n\n/// Updates the amount of an existing bid for the given public key, or creates a new one.\nfn create_or_update_bid<T: StateReader>(\n    state: &mut StateTracker<T>,\n    validator_public_key: &PublicKey,\n    updated_recipient: &SeigniorageRecipientV2,\n    slash_instead_of_unbonding: bool,\n) {\n    let existing_bids = state.get_bids();\n\n    let maybe_existing_recipient = existing_bids\n        .iter()\n        .find(|x| {\n            (x.is_unified() || x.is_validator())\n                && &x.validator_public_key() == validator_public_key\n        })\n        .map(|existing_bid| {\n            let reservation_delegation_rates =\n                match existing_bids.reservations_by_validator_public_key(validator_public_key) {\n                    None => BTreeMap::new(),\n                    Some(reservations) => reservations\n                        .iter()\n                        .map(|reservation| {\n                            (\n                                reservation.delegator_kind().clone(),\n                                *reservation.delegation_rate(),\n                            )\n                        })\n                        .collect(),\n                };\n\n            match existing_bid {\n                BidKind::Unified(bid) => {\n                    let delegator_stake = bid\n                        .delegators()\n                        .iter()\n                        .map(|(k, d)| (DelegatorKind::PublicKey(k.clone()), d.staked_amount()))\n                        .collect();\n\n                    (\n                        bid.bonding_purse(),\n                        SeigniorageRecipientV2::new(\n                            *bid.staked_amount(),\n                            *bid.delegation_rate(),\n                            delegator_stake,\n                            reservation_delegation_rates,\n                        ),\n                        0,\n                        u64::MAX,\n                        0,\n                    )\n                }\n                BidKind::Validator(validator_bid) => {\n                    let delegator_stake = match existing_bids\n                        .delegators_by_validator_public_key(validator_public_key)\n                    {\n                        None => BTreeMap::new(),\n                        Some(delegators) => delegators\n                            .iter()\n                            .map(|d| (d.delegator_kind().clone(), d.staked_amount()))\n                            .collect(),\n                    };\n\n                    (\n                        validator_bid.bonding_purse(),\n                        SeigniorageRecipientV2::new(\n                            validator_bid.staked_amount(),\n                            *validator_bid.delegation_rate(),\n                            delegator_stake,\n                            reservation_delegation_rates,\n                        ),\n                        validator_bid.minimum_delegation_amount(),\n                        validator_bid.maximum_delegation_amount(),\n                        validator_bid.reserved_slots(),\n                    )\n                }\n                _ => unreachable!(),\n            }\n        });\n\n    // existing bid\n    if let Some((\n        bonding_purse,\n        existing_recipient,\n        min_delegation_amount,\n        max_delegation_amount,\n        reserved_slots,\n    )) = maybe_existing_recipient\n    {\n        if existing_recipient == *updated_recipient {\n            return; // noop\n        }\n\n        let delegators = existing_bids\n            .delegators_by_validator_public_key(validator_public_key)\n            .unwrap_or_default();\n\n        for delegator in delegators {\n            let delegator_bid = match updated_recipient\n                .delegator_stake()\n                .get(delegator.delegator_kind())\n            {\n                None => {\n                    // todo!() this is a remove; the global state update tool does not\n                    // yet support prune so in the meantime, setting the amount\n                    // to 0.\n                    DelegatorBid::empty(\n                        delegator.validator_public_key().clone(),\n                        delegator.delegator_kind().clone(),\n                        *delegator.bonding_purse(),\n                    )\n                }\n                Some(updated_delegator_stake) => DelegatorBid::unlocked(\n                    delegator.delegator_kind().clone(),\n                    *updated_delegator_stake,\n                    *delegator.bonding_purse(),\n                    validator_public_key.clone(),\n                ),\n            };\n            if delegator.staked_amount() == delegator_bid.staked_amount() {\n                continue; // effectively noop\n            }\n            state.set_bid(\n                BidKind::Delegator(Box::new(delegator_bid)),\n                slash_instead_of_unbonding,\n            );\n        }\n\n        for (delegator_pub_key, delegator_stake) in updated_recipient.delegator_stake() {\n            if existing_recipient\n                .delegator_stake()\n                .contains_key(delegator_pub_key)\n            {\n                // we handled this scenario above\n                continue;\n            }\n            // this is a entirely new delegator\n            let delegator_bonding_purse = state.create_purse(*delegator_stake);\n            let delegator_bid = DelegatorBid::unlocked(\n                delegator_pub_key.clone(),\n                *delegator_stake,\n                delegator_bonding_purse,\n                validator_public_key.clone(),\n            );\n\n            state.set_bid(\n                BidKind::Delegator(Box::new(delegator_bid)),\n                slash_instead_of_unbonding,\n            );\n        }\n\n        if *existing_recipient.stake() == *updated_recipient.stake() {\n            // if the delegators changed, do the above, but if the validator's\n            // personal stake is unchanged their bid doesn't need to be modified.\n            return;\n        }\n\n        let updated_bid = ValidatorBid::unlocked(\n            validator_public_key.clone(),\n            *bonding_purse,\n            *updated_recipient.stake(),\n            *updated_recipient.delegation_rate(),\n            min_delegation_amount,\n            max_delegation_amount,\n            reserved_slots,\n        );\n\n        state.set_bid(\n            BidKind::Validator(Box::new(updated_bid)),\n            slash_instead_of_unbonding,\n        );\n        return;\n    }\n\n    // new bid\n    let stake = *updated_recipient.stake();\n    if stake.is_zero() {\n        return;\n    }\n\n    for (delegator_pub_key, delegator_stake) in updated_recipient.delegator_stake() {\n        let delegator_bonding_purse = state.create_purse(*delegator_stake);\n        let delegator_bid = DelegatorBid::unlocked(\n            delegator_pub_key.clone(),\n            *delegator_stake,\n            delegator_bonding_purse,\n            validator_public_key.clone(),\n        );\n\n        state.set_bid(\n            BidKind::Delegator(Box::new(delegator_bid)),\n            slash_instead_of_unbonding,\n        );\n    }\n\n    let bonding_purse = state.create_purse(stake);\n    let validator_bid = ValidatorBid::unlocked(\n        validator_public_key.clone(),\n        bonding_purse,\n        stake,\n        *updated_recipient.delegation_rate(),\n        0,\n        u64::MAX,\n        0,\n    );\n    state.set_bid(\n        BidKind::Validator(Box::new(validator_bid)),\n        slash_instead_of_unbonding,\n    );\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/main.rs",
    "content": "mod admins;\nmod balances;\nmod decode;\nmod generic;\nmod system_entity_registry;\nmod utils;\nmod validators;\n\nuse admins::generate_admins;\nuse clap::{crate_version, App, Arg, SubCommand};\n\nuse crate::{\n    balances::generate_balances_update, decode::decode_file, generic::generate_generic_update,\n    system_entity_registry::generate_system_entity_registry,\n    validators::generate_validators_update,\n};\n\nfn main() {\n    let matches = App::new(\"Global State Update Generator\")\n        .version(crate_version!())\n        .about(\"Generates a global state update file based on the supplied parameters\")\n        .subcommand(\n            SubCommand::with_name(\"change-validators\")\n                .about(\"Generates an update changing the validators set\")\n                .arg(\n                    Arg::with_name(\"data_dir\")\n                        .short(\"d\")\n                        .long(\"data-dir\")\n                        .value_name(\"PATH\")\n                        .help(\"Data storage directory containing the global state database file\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"hash\")\n                        .short(\"s\")\n                        .long(\"state-hash\")\n                        .value_name(\"HEX_STRING\")\n                        .help(\"The global state hash to be used as the base\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"validator\")\n                        .short(\"v\")\n                        .long(\"validator\")\n                        .value_name(\"KEY,STAKE[,BALANCE]\")\n                        .help(\"A validator config in the format 'public_key,stake[,balance]'\")\n                        .takes_value(true)\n                        .required(true)\n                        .multiple(true)\n                        .number_of_values(1),\n                ),\n        )\n        .subcommand(\n            SubCommand::with_name(\"balances\")\n                .about(\"Generates an update changing account balances\")\n                .arg(\n                    Arg::with_name(\"data_dir\")\n                        .short(\"d\")\n                        .long(\"data-dir\")\n                        .value_name(\"PATH\")\n                        .help(\"Data storage directory containing the global state database file\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"hash\")\n                        .short(\"s\")\n                        .long(\"state-hash\")\n                        .value_name(\"HEX_STRING\")\n                        .help(\"The global state hash to be used as the base\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"from\")\n                        .short(\"f\")\n                        .long(\"from\")\n                        .value_name(\"ACCOUNT_HASH\")\n                        .help(\"Source account hash (with the account-hash- prefix)\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"to\")\n                        .short(\"t\")\n                        .long(\"to\")\n                        .value_name(\"ACCOUNT_HASH\")\n                        .help(\"Target account hash (with the account-hash- prefix)\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"amount\")\n                        .short(\"a\")\n                        .long(\"amount\")\n                        .value_name(\"MOTES\")\n                        .help(\"Amount to be transferred\")\n                        .takes_value(true)\n                        .required(true),\n                ),\n        )\n        .subcommand(\n            SubCommand::with_name(\"migrate-into-system-contract-registry\")\n                .about(\"Generates an update creating the system entity registry\")\n                .arg(\n                    Arg::with_name(\"data_dir\")\n                        .short(\"d\")\n                        .long(\"data-dir\")\n                        .value_name(\"PATH\")\n                        .help(\"Data storage directory containing the global state database file\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"hash\")\n                        .short(\"s\")\n                        .long(\"state-hash\")\n                        .value_name(\"HEX_STRING\")\n                        .help(\"The global state hash to be used as the base\")\n                        .takes_value(true)\n                        .required(false),\n                ),\n        )\n        .subcommand(\n            SubCommand::with_name(\"generic\")\n                .about(\"Generates a generic update based on a config file\")\n                .arg(\n                    Arg::with_name(\"data_dir\")\n                        .short(\"d\")\n                        .long(\"data-dir\")\n                        .value_name(\"PATH\")\n                        .help(\"Data storage directory containing the global state database file\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"hash\")\n                        .short(\"s\")\n                        .long(\"state-hash\")\n                        .value_name(\"HEX_STRING\")\n                        .help(\"The global state hash to be used as the base\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"config_file\")\n                        .value_name(\"FILE\")\n                        .index(1)\n                        .required(true)\n                        .help(\"The config file to be used for generating the update\"),\n                ),\n        )\n        .subcommand(\n            SubCommand::with_name(\"generate-admins\")\n                .about(\"Generates entries to create new admin accounts on a private chain\")\n                .arg(\n                    Arg::with_name(\"data_dir\")\n                        .short(\"d\")\n                        .long(\"data-dir\")\n                        .value_name(\"PATH\")\n                        .help(\"Data storage directory containing the global state database file\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"hash\")\n                        .short(\"s\")\n                        .long(\"state-hash\")\n                        .value_name(\"HEX_STRING\")\n                        .help(\"The global state hash to be used as the base\")\n                        .takes_value(true)\n                        .required(true),\n                )\n                .arg(\n                    Arg::with_name(\"admin\")\n                        .short(\"a\")\n                        .long(\"admin\")\n                        .value_name(\"PUBLIC_KEY,BALANCE\")\n                        .help(\"A new admin account\")\n                        .takes_value(true)\n                        .required(true)\n                        .multiple(true)\n                        .number_of_values(1),\n                ),\n        )\n        .subcommand(\n            SubCommand::with_name(\"decode\")\n                .about(\"Decodes the global_state.toml file into a readable form\")\n                .arg(\n                    Arg::with_name(\"file\")\n                        .value_name(\"FILE\")\n                        .index(1)\n                        .required(true)\n                        .help(\"The file to be decoded\"),\n                ),\n        )\n        .get_matches();\n\n    match matches.subcommand() {\n        (\"change-validators\", Some(sub_matches)) => generate_validators_update(sub_matches),\n        (\"balances\", Some(sub_matches)) => generate_balances_update(sub_matches),\n        (\"migrate-into-system-contract-registry\", Some(sub_matches)) => {\n            generate_system_entity_registry(sub_matches)\n        }\n        (\"generic\", Some(sub_matches)) => generate_generic_update(sub_matches),\n        (\"generate-admins\", Some(sub_matches)) => generate_admins(sub_matches),\n        (\"decode\", Some(sub_matches)) => decode_file(sub_matches),\n        (subcommand, _) => {\n            println!(\"Unknown subcommand: \\\"{}\\\"\", subcommand);\n        }\n    }\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/system_entity_registry.rs",
    "content": "use std::path::Path;\n\nuse clap::ArgMatches;\nuse lmdb::{self, Cursor, Environment, EnvironmentFlags, Transaction};\n\nuse casper_engine_test_support::LmdbWasmTestBuilder;\nuse casper_execution_engine::engine_state::engine_config::{\n    DEFAULT_ENABLE_ENTITY, DEFAULT_PROTOCOL_VERSION,\n};\nuse casper_storage::{\n    data_access_layer::{\n        SystemEntityRegistryPayload, SystemEntityRegistryRequest, SystemEntityRegistrySelector,\n    },\n    global_state::state::StateProvider,\n};\nuse casper_types::{\n    bytesrepr::FromBytes,\n    system::{AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT},\n    AddressableEntityHash, CLValue, Key, ProtocolVersion, StoredValue, SystemHashRegistry,\n    KEY_HASH_LENGTH,\n};\n\nuse crate::utils::{hash_from_str, print_entry};\n\nconst DATABASE_NAME: &str = \"PROTOCOL_DATA_STORE\";\n\npub(crate) fn generate_system_entity_registry(matches: &ArgMatches<'_>) {\n    let data_dir = Path::new(matches.value_of(\"data_dir\").unwrap_or(\".\"));\n    match matches.value_of(\"hash\") {\n        None => generate_system_entity_registry_using_protocol_data(data_dir),\n        Some(hash) => generate_system_entity_registry_using_global_state(data_dir, hash),\n    }\n}\n\nfn generate_system_entity_registry_using_protocol_data(data_dir: &Path) {\n    let database_path = data_dir.join(\"data.lmdb\");\n\n    let env = Environment::new()\n        .set_flags(EnvironmentFlags::READ_ONLY | EnvironmentFlags::NO_SUB_DIR)\n        .set_max_dbs(2)\n        .open(&database_path)\n        .unwrap_or_else(|error| {\n            panic!(\n                \"failed to initialize database environment at {}: {}\",\n                database_path.display(),\n                error\n            )\n        });\n\n    let protocol_data_db = env.open_db(Some(DATABASE_NAME)).unwrap_or_else(|error| {\n        panic!(\"failed to open database named {}: {}\", DATABASE_NAME, error)\n    });\n\n    let ro_transaction = env\n        .begin_ro_txn()\n        .unwrap_or_else(|error| panic!(\"failed to initialize read-only transaction: {}\", error));\n    let mut cursor = ro_transaction\n        .open_ro_cursor(protocol_data_db)\n        .unwrap_or_else(|error| panic!(\"failed to open a read-only cursor: {}\", error));\n\n    let serialized_protocol_data = match cursor.iter().next().map(Result::unwrap) {\n        Some((_key, value)) => value,\n        None => {\n            println!(\"No protocol data found\");\n            return;\n        }\n    };\n\n    // The last four 32-byte chunks of the serialized data are the contract hashes.\n    let start_index = serialized_protocol_data\n        .len()\n        .saturating_sub(4 * KEY_HASH_LENGTH);\n    let remainder = &serialized_protocol_data[start_index..];\n    let (mint_hash, remainder) =\n        AddressableEntityHash::from_bytes(remainder).unwrap_or_else(|error| {\n            panic!(\n                \"failed to parse mint hash: {:?}\\nraw_bytes: {:?}\",\n                error, serialized_protocol_data\n            )\n        });\n    let (handle_payment_hash, remainder) = AddressableEntityHash::from_bytes(remainder)\n        .unwrap_or_else(|error| {\n            panic!(\n                \"failed to parse handle_payment hash: {:?}\\nraw_bytes: {:?}\",\n                error, serialized_protocol_data\n            )\n        });\n    let (standard_payment_hash, remainder) = AddressableEntityHash::from_bytes(remainder)\n        .unwrap_or_else(|error| {\n            panic!(\n                \"failed to parse standard_payment hash: {:?}\\nraw_bytes: {:?}\",\n                error, serialized_protocol_data\n            )\n        });\n    let (auction_hash, remainder) =\n        AddressableEntityHash::from_bytes(remainder).unwrap_or_else(|error| {\n            panic!(\n                \"failed to parse auction hash: {:?}\\nraw_bytes: {:?}\",\n                error, serialized_protocol_data\n            )\n        });\n    assert!(remainder.is_empty());\n\n    let mut registry = SystemHashRegistry::new();\n    registry.insert(MINT.to_string(), mint_hash.value());\n    registry.insert(HANDLE_PAYMENT.to_string(), handle_payment_hash.value());\n    registry.insert(STANDARD_PAYMENT.to_string(), standard_payment_hash.value());\n    registry.insert(AUCTION.to_string(), auction_hash.value());\n\n    print_entry(\n        &Key::SystemEntityRegistry,\n        &StoredValue::from(CLValue::from_t(registry).unwrap()),\n    );\n}\n\nfn generate_system_entity_registry_using_global_state(data_dir: &Path, state_hash: &str) {\n    let builder = LmdbWasmTestBuilder::open_raw(\n        data_dir,\n        Default::default(),\n        DEFAULT_PROTOCOL_VERSION,\n        hash_from_str(state_hash),\n    );\n\n    let registry_req = SystemEntityRegistryRequest::new(\n        builder.get_post_state_hash(),\n        ProtocolVersion::V2_0_0,\n        SystemEntityRegistrySelector::All,\n        DEFAULT_ENABLE_ENTITY,\n    );\n\n    let registry = match builder\n        .data_access_layer()\n        .system_entity_registry(registry_req)\n        .as_registry_payload()\n        .expect(\"should have payload\")\n    {\n        SystemEntityRegistryPayload::All(registry) => registry,\n        SystemEntityRegistryPayload::EntityKey(_) => {\n            panic!(\"expected registry\");\n        }\n    };\n\n    // make sure expected entries exist\n    let _ = *registry.get(MINT).expect(\"mint should exist\");\n    let _ = *registry.get(AUCTION).expect(\"auction should exist\");\n    let _ = *registry\n        .get(HANDLE_PAYMENT)\n        .expect(\"handle payment should exist\");\n\n    print_entry(\n        &Key::SystemEntityRegistry,\n        &StoredValue::from(CLValue::from_t(registry).unwrap()),\n    );\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/utils.rs",
    "content": "use clap::ArgMatches;\nuse std::{\n    collections::{BTreeMap, BTreeSet},\n    convert::TryInto,\n};\n\nuse casper_types::{\n    bytesrepr::ToBytes, checksummed_hex, system::auction::SeigniorageRecipientsSnapshotV2,\n    AsymmetricType, Digest, Key, ProtocolVersion, PublicKey, StoredValue, U512,\n};\n\n/// Parses a Digest from a string. Panics if parsing fails.\npub fn hash_from_str(hex_str: &str) -> Digest {\n    (&checksummed_hex::decode(hex_str).unwrap()[..])\n        .try_into()\n        .unwrap()\n}\n\npub fn num_from_str(str: Option<&str>) -> Option<u32> {\n    match str {\n        Some(val) => val.parse::<u32>().ok(),\n        None => None,\n    }\n}\n\npub fn protocol_version_from_matches(matches: &ArgMatches<'_>) -> ProtocolVersion {\n    let major = num_from_str(matches.value_of(\"major\")).unwrap_or(2);\n    let minor = num_from_str(matches.value_of(\"minor\")).unwrap_or(0);\n    let patch = num_from_str(matches.value_of(\"patch\")).unwrap_or(0);\n    ProtocolVersion::from_parts(major, minor, patch)\n}\n\npub(crate) fn print_validators(validators: &[ValidatorInfo]) {\n    for validator in validators {\n        println!(\"[[validators]]\");\n        println!(\"public_key = \\\"{}\\\"\", validator.public_key.to_hex());\n        println!(\"weight = \\\"{}\\\"\", validator.weight);\n        println!();\n    }\n    println!();\n}\n\n/// Prints a global state update entry in a format ready for inclusion in a TOML file.\npub(crate) fn print_entry(key: &Key, value: &StoredValue) {\n    println!(\"[[entries]]\");\n    println!(\"key = \\\"{}\\\"\", key.to_formatted_string());\n    println!(\"value = \\\"{}\\\"\", base64::encode(value.to_bytes().unwrap()));\n    println!();\n}\n\n#[derive(Debug, PartialEq, Eq, Hash)]\npub(crate) struct ValidatorInfo {\n    pub public_key: PublicKey,\n    pub weight: U512,\n}\n\n#[derive(Debug, PartialEq, Eq, Hash)]\npub struct ValidatorsDiff {\n    pub added: BTreeSet<PublicKey>,\n    pub removed: BTreeSet<PublicKey>,\n}\n\n/// Calculates the sets of added and removed validators between the two snapshots.\npub fn validators_diff(\n    old_snapshot: &SeigniorageRecipientsSnapshotV2,\n    new_snapshot: &SeigniorageRecipientsSnapshotV2,\n) -> ValidatorsDiff {\n    let old_validators: BTreeSet<_> = old_snapshot\n        .values()\n        .flat_map(BTreeMap::keys)\n        .cloned()\n        .collect();\n    let new_validators: BTreeSet<_> = new_snapshot\n        .values()\n        .flat_map(BTreeMap::keys)\n        .cloned()\n        .collect();\n\n    ValidatorsDiff {\n        added: new_validators\n            .difference(&old_validators)\n            .cloned()\n            .collect(),\n        removed: old_validators\n            .difference(&new_validators)\n            .cloned()\n            .collect(),\n    }\n}\n"
  },
  {
    "path": "utils/global-state-update-gen/src/validators.rs",
    "content": "use casper_engine_test_support::LmdbWasmTestBuilder;\nuse casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION;\nuse casper_types::{AsymmetricType, PublicKey, U512};\nuse clap::ArgMatches;\n\nuse crate::{\n    generic::{\n        config::{AccountConfig, Config, ValidatorConfig},\n        update_from_config,\n    },\n    utils::{hash_from_str, protocol_version_from_matches},\n};\n\npub(crate) fn generate_validators_update(matches: &ArgMatches<'_>) {\n    let data_dir = matches.value_of(\"data_dir\").unwrap_or(\".\");\n    let state_hash = hash_from_str(matches.value_of(\"hash\").unwrap());\n    let accounts = match matches.values_of(\"validator\") {\n        None => vec![],\n        Some(values) => values\n            .map(|validator_def| {\n                let mut fields = validator_def.split(',').map(str::to_owned);\n\n                let public_key_str = fields\n                    .next()\n                    .expect(\"validator config should contain a public key\");\n                let public_key = PublicKey::from_hex(public_key_str.as_bytes())\n                    .expect(\"validator config should have a valid public key\");\n\n                let stake_str = fields\n                    .next()\n                    .expect(\"validator config should contain a stake\");\n                let stake =\n                    U512::from_dec_str(&stake_str).expect(\"stake should be a valid decimal number\");\n\n                let maybe_new_balance_str = fields.next();\n                let maybe_new_balance = maybe_new_balance_str.as_ref().map(|balance_str| {\n                    U512::from_dec_str(balance_str)\n                        .expect(\"balance should be a valid decimal number\")\n                });\n\n                AccountConfig {\n                    public_key,\n                    balance: maybe_new_balance,\n                    validator: Some(ValidatorConfig {\n                        bonded_amount: stake,\n                        delegation_rate: None,\n                        delegators: None,\n                        reservations: None,\n                    }),\n                }\n            })\n            .collect(),\n    };\n    let protocol_version = protocol_version_from_matches(matches);\n\n    let config = Config {\n        accounts,\n        transfers: vec![],\n        only_listed_validators: true,\n        slash_instead_of_unbonding: false,\n        protocol_version,\n    };\n\n    let builder = LmdbWasmTestBuilder::open_raw(\n        data_dir,\n        Default::default(),\n        DEFAULT_PROTOCOL_VERSION,\n        state_hash,\n    );\n    update_from_config(builder, config);\n}\n"
  },
  {
    "path": "utils/highway-rewards-analysis/Cargo.toml",
    "content": "[package]\nname = \"highway-rewards-analysis\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\nbincode = \"1\"\nclap = { version = \"4\", features = [\"derive\"] }\ncasper-node = { path = \"../../node\" }\ncasper-types = { path = \"../../types\" }\nflate2 = \"1\"\nserde = \"1\"\n"
  },
  {
    "path": "utils/highway-rewards-analysis/README.md",
    "content": "# Highway State Analyzer\n\nThis tool analyzes a Highway protocol state dump and prints some information that may be helpful in explaining decreased reward amounts.\n\nUsage: `highway-rewards-analysis [-v] FILE`\n\n`FILE` should contain a Highway protocol state dump in the Bincode format, either plain or gzip-compressed. The `-v` flag causes the tool to print some additional information (see below).\n\nThe tool prints out 10 nodes that missed the most rounds in the era contained in the dump, as well as 10 nodes with the lowest average maximum level-1 summit quorum. The reward for a given block for a node is proportional to the maximum quorum of a level-1 summit containing that node in the round in which it was proposed - such quora for all the rounds in the era are what is averaged in the latter statistic.\n\nIf the `-v` flag is set, in addition to printing the 10 nodes with the most rounds missed, the tool also prints which rounds were missed by those nodes.\n"
  },
  {
    "path": "utils/highway-rewards-analysis/src/main.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashSet},\n    fs::File,\n    io::Read,\n};\n\nuse clap::Parser;\nuse flate2::read::GzDecoder;\nuse serde::{Deserialize, Serialize};\n\nuse casper_node::consensus::{\n    highway_core::{\n        finality_detector::{\n            assigned_weight_and_latest_unit, find_max_quora, round_participation,\n            RoundParticipation,\n        },\n        State,\n    },\n    protocols::common::validators,\n    utils::Validators,\n    ClContext,\n};\nuse casper_types::{EraId, PublicKey, Timestamp, U512};\n\n#[derive(Parser, Debug)]\n#[command(author, version, about, long_about = None)]\nstruct Args {\n    filename: String,\n    #[arg(short, long)]\n    verbose: bool,\n}\n\n/// Debug dump of era used for serialization.\n#[derive(Debug, Serialize, Deserialize)]\npub(crate) struct EraDump {\n    /// The era that is being dumped.\n    pub id: EraId,\n\n    /// The scheduled starting time of this era.\n    pub start_time: Timestamp,\n    /// The height of this era's first block.\n    pub start_height: u64,\n\n    // omitted: pending blocks\n    /// Validators that have been faulty in any of the recent BONDED_ERAS switch blocks. This\n    /// includes `new_faulty`.\n    pub faulty: HashSet<PublicKey>,\n    /// Validators that are excluded from proposing new blocks.\n    pub cannot_propose: HashSet<PublicKey>,\n    /// Accusations collected in this era so far.\n    pub accusations: HashSet<PublicKey>,\n    /// The validator weights.\n    pub validators: BTreeMap<PublicKey, U512>,\n\n    /// The state of the highway instance associated with the era.\n    pub highway_state: State<ClContext>,\n}\n\nfn main() {\n    let args = Args::parse();\n\n    let mut data = vec![];\n    let mut file = File::open(&args.filename).unwrap();\n\n    if args.filename.ends_with(\".gz\") {\n        let mut gz = GzDecoder::new(file);\n        gz.read_to_end(&mut data).unwrap();\n    } else {\n        file.read_to_end(&mut data).unwrap();\n    }\n\n    let dump: EraDump = bincode::deserialize(&data).unwrap();\n\n    let validators =\n        validators::<ClContext>(&dump.faulty, &dump.cannot_propose, dump.validators.clone());\n\n    print_faults(&validators, &dump.highway_state);\n\n    print_skipped_rounds(&validators, &dump, args.verbose);\n\n    print_lowest_quorum_participation(&validators, &dump);\n}\n\nfn print_faults(validators: &Validators<PublicKey>, state: &State<ClContext>) {\n    if state.faulty_validators().count() == 0 {\n        return;\n    }\n\n    println!(\"Faults:\");\n    for vid in state.faulty_validators() {\n        let fault = state.maybe_fault(vid).unwrap();\n        println!(\"{}: {:?}\", validators.id(vid).unwrap(), fault);\n    }\n    println!();\n}\n\nconst TOP_TO_PRINT: usize = 10;\n\nfn round_num(dump: &EraDump, round_id: Timestamp) -> u64 {\n    let min_round_length = dump.highway_state.params().min_round_length();\n    (round_id.millis() - dump.start_time.millis()) / min_round_length.millis()\n}\n\nfn print_skipped_rounds(validators: &Validators<PublicKey>, dump: &EraDump, verbose: bool) {\n    let state = &dump.highway_state;\n    let highest_block = state.fork_choice(state.panorama()).unwrap();\n    let all_blocks = std::iter::once(highest_block).chain(state.ancestor_hashes(highest_block));\n    let mut skipped_rounds = vec![vec![]; validators.len()];\n\n    for block_hash in all_blocks {\n        let block_unit = state.unit(block_hash);\n        let round_id = block_unit.round_id();\n        for i in 0..validators.len() as u32 {\n            let observation = state.panorama().get(i.into()).unwrap();\n            let round_participation = round_participation(state, observation, round_id);\n            if matches!(round_participation, RoundParticipation::No) {\n                skipped_rounds[i as usize].push(round_id);\n            }\n        }\n    }\n\n    for rounds in skipped_rounds.iter_mut() {\n        rounds.sort();\n    }\n\n    let mut num_skipped_rounds: Vec<_> = skipped_rounds\n        .iter()\n        .enumerate()\n        .map(|(i, skipped)| (i as u32, skipped.len()))\n        .collect();\n    num_skipped_rounds.sort_by_key(|(_, len)| *len);\n\n    println!(\"{} validators who skipped the most rounds:\", TOP_TO_PRINT);\n    for (vid, len) in num_skipped_rounds.iter().rev().take(TOP_TO_PRINT) {\n        println!(\n            \"{}: skipped {} rounds\",\n            validators.id((*vid).into()).unwrap(),\n            len\n        );\n    }\n\n    if verbose {\n        println!();\n        for (vid, _) in num_skipped_rounds.iter().rev().take(TOP_TO_PRINT) {\n            let skipped_rounds: Vec<_> = skipped_rounds[*vid as usize]\n                .iter()\n                .map(|rid| format!(\"{}\", round_num(dump, *rid)))\n                .collect();\n            println!(\n                \"{} skipped rounds: {}\",\n                validators.id((*vid).into()).unwrap(),\n                skipped_rounds.join(\", \")\n            );\n        }\n    }\n\n    println!();\n}\n\nfn print_lowest_quorum_participation(validators: &Validators<PublicKey>, dump: &EraDump) {\n    let state = &dump.highway_state;\n    let highest_block = state.fork_choice(state.panorama()).unwrap();\n    let mut quora_sum = vec![0.0; validators.len()];\n    let mut num_rounds = 0;\n\n    let hb_unit = state.unit(highest_block);\n    for bhash in state.ancestor_hashes(highest_block) {\n        let proposal_unit = state.unit(bhash);\n        let r_id = proposal_unit.round_id();\n\n        let (assigned_weight, latest) =\n            assigned_weight_and_latest_unit(state, &hb_unit.panorama, r_id);\n\n        let max_quora = find_max_quora(state, bhash, &latest);\n        let max_quora: Vec<f32> = max_quora\n            .into_iter()\n            .map(|quorum_w| quorum_w.0 as f32 / assigned_weight.0 as f32 * 100.0)\n            .collect();\n\n        for (q_sum, max_q) in quora_sum.iter_mut().zip(&max_quora) {\n            *q_sum += max_q;\n        }\n        num_rounds += 1;\n    }\n\n    let mut quora_avg: Vec<_> = quora_sum\n        .into_iter()\n        .enumerate()\n        .map(|(vid, q_sum)| (vid as u32, q_sum / num_rounds as f32))\n        .collect();\n    quora_avg.sort_by(|(_, q_avg1), (_, q_avg2)| q_avg1.partial_cmp(q_avg2).unwrap());\n\n    println!(\"{} validators with lowest average max quora:\", TOP_TO_PRINT);\n    for (vid, q_avg) in quora_avg.iter().take(TOP_TO_PRINT) {\n        println!(\n            \"{}: average max quorum {:3.1}%\",\n            validators.id((*vid).into()).unwrap(),\n            q_avg\n        );\n    }\n}\n"
  },
  {
    "path": "utils/highway-state-grapher/Cargo.toml",
    "content": "[package]\nname = \"highway-state-grapher\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\nbincode = \"1\"\nclap = { version = \"4\", features = [\"derive\"] }\ncasper-node = { path = \"../../node\" }\ncasper-types = { path = \"../../types\" }\nflate2 = \"1\"\nfreetype-sys = \"0.13\"\nglium = \"0.26\"\nglium_text_rusttype = \"0.3\"\nlibc = \"0.2\"\nnalgebra = \"0.32\"\nserde = \"1\"\n"
  },
  {
    "path": "utils/highway-state-grapher/src/main.rs",
    "content": "mod renderer;\n\nuse std::{\n    collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque},\n    fmt::{self, Debug},\n    fs::File,\n    io::Read,\n    ops::RangeBounds,\n};\n\nuse casper_node::consensus::{\n    highway_core::{\n        finality_detector::{assigned_weight_and_latest_unit, find_max_quora},\n        Panorama, State,\n    },\n    utils::{ValidatorIndex, ValidatorMap},\n    ClContext,\n};\nuse casper_types::{Digest, EraId, PublicKey, Timestamp, U512};\n\nuse clap::Parser;\nuse flate2::read::GzDecoder;\nuse glium::{\n    glutin::{\n        event::{ElementState, Event, MouseButton, MouseScrollDelta, VirtualKeyCode, WindowEvent},\n        event_loop::{ControlFlow, EventLoop},\n        window::WindowBuilder,\n        ContextBuilder,\n    },\n    Display,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::renderer::Renderer;\n\n#[derive(Parser, Debug)]\n#[command(author, version, about, long_about = None)]\nstruct Args {\n    filename: String,\n}\n\n/// Debug dump of era used for serialization.\n#[derive(Debug, Serialize, Deserialize)]\npub(crate) struct EraDump {\n    /// The era that is being dumped.\n    pub id: EraId,\n\n    /// The scheduled starting time of this era.\n    pub start_time: Timestamp,\n    /// The height of this era's first block.\n    pub start_height: u64,\n\n    // omitted: pending blocks\n    /// Validators that have been faulty in any of the recent BONDED_ERAS switch blocks. This\n    /// includes `new_faulty`.\n    pub faulty: HashSet<PublicKey>,\n    /// Validators that are excluded from proposing new blocks.\n    pub cannot_propose: HashSet<PublicKey>,\n    /// Accusations collected in this era so far.\n    pub accusations: HashSet<PublicKey>,\n    /// The validator weights.\n    pub validators: BTreeMap<PublicKey, U512>,\n\n    /// The state of the highway instance associated with the era.\n    pub highway_state: State<ClContext>,\n}\n\n/// Helper struct for sorting the units with regards to the implicit partial ordering in the DAG.\nstruct Units {\n    set: HashSet<Digest>,\n    order: Vec<Digest>,\n}\n\nimpl Units {\n    /// Collects all the unit hashes and orders them roughly from the newest to the oldest.\n    fn do_collect_ancestor_units(\n        &mut self,\n        state: &State<ClContext>,\n        panorama: &Panorama<ClContext>,\n    ) {\n        let hashes_to_add: Vec<_> = panorama.iter_correct_hashes().collect();\n        let mut hashes_to_proceed_with = vec![];\n        for hash in hashes_to_add {\n            if self.set.insert(*hash) {\n                self.order.push(*hash);\n                hashes_to_proceed_with.push(*hash);\n            }\n        }\n        for hash in hashes_to_proceed_with {\n            let unit = state.unit(&hash);\n            self.do_collect_ancestor_units(state, &unit.panorama);\n        }\n    }\n\n    /// Reorders the units in self.order so that every unit comes after all its dependencies.\n    fn reorder(&mut self, state: &State<ClContext>) {\n        let mut new_order_set = HashSet::new();\n        let mut new_order = vec![];\n        let mut queue: VecDeque<_> = std::mem::take(&mut self.order).into_iter().rev().collect();\n        loop {\n            if queue.is_empty() {\n                break;\n            }\n            let unit = queue.pop_front().unwrap();\n            if state\n                .unit(&unit)\n                .panorama\n                .iter_correct_hashes()\n                .all(|cited| new_order_set.contains(cited))\n            {\n                new_order_set.insert(unit);\n                new_order.push(unit)\n            } else {\n                queue.push_back(unit);\n            }\n        }\n        self.order = new_order;\n    }\n\n    /// Collects all the unit hashes and orders them so that every unit comes after all its\n    /// dependencies.\n    fn collect_ancestor_units(&mut self, state: &State<ClContext>) {\n        self.do_collect_ancestor_units(state, state.panorama());\n        self.reorder(state);\n    }\n}\n\n/// A more readable unit ID: the validator index together with the height in that validator's\n/// swimlane\n#[derive(Clone, Copy, PartialEq, Eq, Hash)]\npub struct UnitId(ValidatorIndex, usize);\n\nimpl Debug for UnitId {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"V{}_{}\", self.0 .0, self.1)\n    }\n}\n\n/// A more readable block id. The first field is the block height, the second is the number of the\n/// block among all the blocks at that height (if there are no orphan blocks, all the block IDs will\n/// have 0s in the second field).\n#[derive(Clone, Copy, PartialEq, Eq, Hash)]\npub struct BlockId(u64, u8);\n\nimpl Debug for BlockId {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"B{}\", self.0)?;\n        for _ in 0..self.1 {\n            write!(f, \"'\")?;\n        }\n        Ok(())\n    }\n}\n\n/// A helper struct for coloring units based on the validator's max quorum.\n/// `max_rank` is the number of distinct values of max quorum. `rank` is the index relative to the\n/// maximum value (ie. the largest max quorum has rank 0, the second largest has rank 1 etc.)\n#[derive(Clone, Copy)]\npub struct Quorum {\n    pub rank: usize,\n    pub max_rank: usize,\n    pub weight_percent: f32,\n}\n\nimpl Debug for Quorum {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"{:3.1}\", self.weight_percent)\n    }\n}\n\n/// A representation of the protocol state unit for the purpose of drawing it on the screen.\n/// `graph_height` is the maximum of graph heights of the cited units, plus 1 - drawing based on\n/// graph height guarantees that every unit will appear higher than all its dependencies.\n#[derive(Clone)]\npub struct GraphUnit {\n    pub id: UnitId,\n    pub creator: ValidatorIndex,\n    pub vote: BlockId,\n    pub is_proposal: bool,\n    pub cited_units: Vec<UnitId>,\n    pub height: usize,\n    pub graph_height: usize,\n    pub timestamp: u64,\n    pub round_num: u64,\n    pub round_id: Timestamp,\n    pub round_exp: u8,\n    pub max_quorum: Option<Quorum>,\n}\n\nimpl Debug for GraphUnit {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        f.debug_struct(\"GraphUnit\")\n            .field(\"id\", &self.id)\n            .field(\"creator\", &format!(\"V{}\", self.creator.0))\n            .field(\"height\", &self.height)\n            .field(\"graph_height\", &self.graph_height)\n            .field(\"vote\", &self.vote)\n            .field(\"is_proposal\", &self.is_proposal)\n            .field(\"timestamp\", &self.timestamp)\n            .field(\"round_num\", &self.round_num)\n            .field(\"round_id\", &self.round_id)\n            .field(\"round_exp\", &self.round_exp)\n            .field(\"max_quorum\", &self.max_quorum)\n            .field(\"cited_units\", &self.cited_units)\n            .finish()\n    }\n}\n\n/// A struct helping in assigning readable IDs to blocks.\n#[derive(Clone, Debug)]\nstruct BlockMapper {\n    hash_to_id: HashMap<Digest, BlockId>,\n    id_to_hash: HashMap<BlockId, Digest>,\n    last_id_by_height: HashMap<u64, u8>,\n}\n\nimpl BlockMapper {\n    fn new() -> Self {\n        Self {\n            hash_to_id: HashMap::new(),\n            id_to_hash: HashMap::new(),\n            last_id_by_height: HashMap::new(),\n        }\n    }\n\n    /// Inserts the new ID, updating the mappings as necessary.\n    fn insert(&mut self, hash: Digest, id: BlockId) {\n        self.hash_to_id.insert(hash, id);\n        self.id_to_hash.insert(id, hash);\n        let entry = self.last_id_by_height.entry(id.0).or_insert(id.1);\n        *entry = (*entry).max(id.1);\n    }\n\n    /// Returns the block ID for the next block at the given height.\n    /// Usually, there will only be one block at a given height, but in some cases forks are\n    /// possible - in those cases, blocks at the same height will get sequential IDs.\n    fn next_id_for_height(&self, height: u64) -> BlockId {\n        BlockId(\n            height,\n            self.last_id_by_height\n                .get(&height)\n                .map(|idx| *idx + 1)\n                .unwrap_or(0),\n        )\n    }\n\n    /// Gets the readable block ID corresponding to the given hash.\n    fn get(&self, hash: &Digest) -> Option<BlockId> {\n        self.hash_to_id.get(hash).copied()\n    }\n\n    /// Gets the block hash corresponding to the given ID.\n    #[allow(unused)] // Will be useful if we add features related to the blocks\n    fn get_by_id(&self, id: &BlockId) -> Option<Digest> {\n        self.id_to_hash.get(id).copied()\n    }\n}\n\n/// All the data needed for drawing the unit DAG.\n#[derive(Clone, Debug)]\npub struct Graph {\n    units: ValidatorMap<Vec<GraphUnit>>,\n    reverse_edges: HashMap<UnitId, Vec<UnitId>>,\n    #[allow(unused)] // Will be useful if we add features related to the blocks\n    blocks: BlockMapper,\n    weight_percentages: ValidatorMap<f32>,\n}\n\nimpl Graph {\n    /// Creates a `Graph` based on the `state`.\n    fn new(state: &State<ClContext>, start_time: Timestamp) -> Self {\n        let mut units: BTreeMap<ValidatorIndex, Vec<GraphUnit>> = state\n            .weights()\n            .iter()\n            .enumerate()\n            .map(|(idx, _)| (ValidatorIndex::from(idx as u32), vec![]))\n            .collect();\n        let mut reverse_edges: HashMap<UnitId, Vec<UnitId>> = HashMap::new();\n        let mut unit_ids_by_hash: HashMap<Digest, UnitId> = HashMap::new();\n        let mut blocks = BlockMapper::new();\n\n        let mut units_set = Units {\n            set: HashSet::new(),\n            order: vec![],\n        };\n\n        units_set.collect_ancestor_units(state);\n\n        eprintln!(\"num units: {}\", units_set.order.len());\n\n        let max_round_exp = (state.params().max_round_length().millis()\n            / state.params().min_round_length().millis())\n        .trailing_zeros();\n        let max_round_length = state.params().min_round_length().millis() << max_round_exp;\n        let rounded_era_start =\n            Timestamp::from((start_time.millis() / max_round_length) * max_round_length);\n\n        let mut highest_block: Option<(u64, Digest)> = None;\n\n        for unit_hash in &units_set.order {\n            let unit = state.unit(unit_hash);\n            let block = state.block(&unit.block);\n            if highest_block.is_none_or( |(height, _)| height < block.height) {\n                highest_block = Some((block.height, unit.block));\n            }\n            let block_id = if let Some(b_id) = blocks.get(&unit.block) {\n                b_id\n            } else {\n                let b_id = blocks.next_id_for_height(block.height);\n                blocks.insert(unit.block, b_id);\n                b_id\n            };\n            let is_proposal = unit\n                .panorama\n                .iter_correct_hashes()\n                .all(|hash| state.unit(hash).block != unit.block);\n            let cited_units: Vec<UnitId> = unit\n                .panorama\n                .iter_correct_hashes()\n                .map(|hash| *unit_ids_by_hash.get(hash).unwrap())\n                .collect();\n            let graph_height = cited_units\n                .iter()\n                .map(|unit_id| &units.get(&unit_id.0).unwrap()[unit_id.1])\n                .map(|g_unit| g_unit.graph_height)\n                .max()\n                .map(|max_height| max_height + 1)\n                .unwrap_or(0);\n            let unit_id = UnitId(unit.creator, units.get(&unit.creator).unwrap().len());\n\n            for cited_unit_id in &cited_units {\n                reverse_edges\n                    .entry(*cited_unit_id)\n                    .or_default()\n                    .push(unit_id);\n            }\n\n            let time_since_era_start = unit.timestamp.saturating_diff(rounded_era_start).millis();\n            let round_num = time_since_era_start / state.params().min_round_length().millis();\n\n            let graph_unit = GraphUnit {\n                id: unit_id,\n                creator: unit.creator,\n                vote: block_id,\n                is_proposal,\n                cited_units,\n                height: unit.seq_number as usize,\n                graph_height,\n                timestamp: time_since_era_start,\n                round_num,\n                round_id: unit.round_id(),\n                round_exp: (unit.round_len().millis() / state.params().min_round_length().millis())\n                    .trailing_zeros() as u8,\n                max_quorum: None,\n            };\n            unit_ids_by_hash.insert(*unit_hash, unit_id);\n            units.get_mut(&unit.creator).unwrap().push(graph_unit);\n        }\n\n        // fill in max quora\n        if let Some((_hb_height, hb_hash)) = highest_block {\n            let hb_unit = state.unit(&hb_hash);\n            for bhash in state.ancestor_hashes(&hb_hash) {\n                let proposal_unit = state.unit(bhash);\n                let r_id = proposal_unit.round_id();\n\n                let (assigned_weight, latest) =\n                    assigned_weight_and_latest_unit(state, &hb_unit.panorama, r_id);\n\n                let max_quora = find_max_quora(state, bhash, &latest);\n                // deduplicate and sort max quora\n                let max_quora_set: BTreeSet<_> = max_quora.iter().copied().collect();\n                let max_quora_rank_map: BTreeMap<_, _> = max_quora_set\n                    .into_iter()\n                    .rev()\n                    .enumerate()\n                    .map(|(rank, quorum)| (quorum, rank))\n                    .collect();\n\n                for unit in latest.iter().flatten() {\n                    let gunit_id = unit_ids_by_hash.get(*unit).unwrap();\n                    let gunit = &mut units.get_mut(&gunit_id.0).unwrap()[gunit_id.1];\n                    let quorum_w = max_quora[gunit.creator];\n                    let rank = max_quora_rank_map[&quorum_w];\n                    let weight_percent = quorum_w.0 as f32 / assigned_weight.0 as f32 * 100.0;\n                    gunit.max_quorum = Some(Quorum {\n                        rank,\n                        max_rank: max_quora_rank_map.len(),\n                        weight_percent,\n                    });\n                }\n            }\n        }\n\n        let weight_percentages: ValidatorMap<f32> = state\n            .weights()\n            .iter()\n            .map(|weight| weight.0 as f32 / state.total_weight().0 as f32 * 100.0)\n            .collect();\n\n        Self {\n            units: units.into_values().collect(),\n            reverse_edges,\n            blocks,\n            weight_percentages,\n        }\n    }\n\n    /// Returns the unit under the given `unit_id`.\n    pub fn get(&self, unit_id: &UnitId) -> Option<&GraphUnit> {\n        self.units\n            .get(unit_id.0)\n            .and_then(|swimlane| swimlane.get(unit_id.1))\n    }\n\n    /// Returns the validator weights.\n    pub fn validator_weights(&self) -> &ValidatorMap<f32> {\n        &self.weight_percentages\n    }\n\n    /// Iterates over all the units created by validators within `range_vid` and with graph heights\n    /// within `range_graph_height`.\n    pub fn iter_range<R1, R2>(\n        &self,\n        range_vid: R1,\n        range_graph_height: R2,\n    ) -> impl Iterator<Item = &GraphUnit>\n    where\n        R1: RangeBounds<usize> + Clone,\n        R2: RangeBounds<usize> + Clone,\n    {\n        let range_vid_clone = range_vid.clone();\n        self.units\n            .iter()\n            .enumerate()\n            .skip_while(move |(vid, _)| !range_vid.contains(vid))\n            .take_while(move |(vid, _)| range_vid_clone.contains(vid))\n            .flat_map(move |(_, swimlane)| {\n                let range_graph_height_clone1 = range_graph_height.clone();\n                let range_graph_height_clone2 = range_graph_height.clone();\n                swimlane\n                    .iter()\n                    .skip_while(move |unit| !range_graph_height_clone1.contains(&unit.graph_height))\n                    .take_while(move |unit| range_graph_height_clone2.contains(&unit.graph_height))\n            })\n    }\n}\n\nfn main() {\n    let args = Args::parse();\n\n    let mut data = vec![];\n    let mut file = File::open(&args.filename).unwrap();\n\n    if args.filename.ends_with(\".gz\") {\n        let mut gz = GzDecoder::new(file);\n        gz.read_to_end(&mut data).unwrap();\n    } else {\n        file.read_to_end(&mut data).unwrap();\n    }\n\n    let dump: EraDump = bincode::deserialize(&data).unwrap();\n\n    eprintln!(\"{}\", dump.id);\n\n    let graph = Graph::new(&dump.highway_state, dump.start_time);\n\n    for (index, (pub_key, _)) in dump.validators.iter().enumerate() {\n        eprintln!(\"{}: {}\", index, pub_key);\n    }\n\n    start_rendering(graph);\n}\n\n/// Struct keeping the current state of some keys (the events only report the current state, so we\n/// need to store the old state to know when it changes).\n#[derive(Clone, Copy)]\nstruct KeyboardState {\n    /// State of the 'E' key.\n    e_state: bool,\n}\n\nimpl KeyboardState {\n    fn e_pressed(&mut self) -> bool {\n        let was_pressed = self.e_state;\n        self.e_state = true;\n        !was_pressed\n    }\n\n    fn e_released(&mut self) {\n        self.e_state = false;\n    }\n}\n\n/// Enum keeping the state of mouse input.\n#[derive(Clone, Copy)]\nenum MouseState {\n    /// Mouse is freely moving.\n    Free { position: (f64, f64) },\n    /// The user is dragging something.\n    Dragging { last_position: (f64, f64) },\n}\n\nimpl MouseState {\n    /// Handles a mouse move event.\n    /// Returns `Some(delta_x, delta_y)` if dragging is in progress.\n    fn handle_move(&mut self, new_position: (f64, f64)) -> Option<(f32, f32)> {\n        match self {\n            Self::Free { position } => {\n                *position = new_position;\n                None\n            }\n            Self::Dragging { last_position } => {\n                let delta_x = (new_position.0 - last_position.0) as f32;\n                let delta_y = (new_position.1 - last_position.1) as f32;\n                *last_position = new_position;\n                Some((delta_x, delta_y))\n            }\n        }\n    }\n\n    /// Switches between `Free` and `Dragging` based on the button presses.\n    fn handle_button(&mut self, button_down: bool) {\n        match (*self, button_down) {\n            (Self::Free { position }, true) => {\n                *self = Self::Dragging {\n                    last_position: position,\n                };\n            }\n            (Self::Dragging { last_position }, false) => {\n                *self = Self::Free {\n                    position: last_position,\n                };\n            }\n            _ => (),\n        }\n    }\n\n    /// Returns the current position of the cursor.\n    fn cursor(&self) -> (f32, f32) {\n        match self {\n            Self::Free { position } => (position.0 as f32, position.1 as f32),\n            Self::Dragging { last_position } => (last_position.0 as f32, last_position.1 as f32),\n        }\n    }\n}\n\n/// The main loop of the program.\nfn start_rendering(graph: Graph) {\n    let event_loop = EventLoop::new();\n\n    let wb = WindowBuilder::new()\n        .with_title(\"Consensus Graph Visualization\")\n        .with_maximized(true)\n        .with_resizable(true);\n    let cb = ContextBuilder::new();\n    let display = Display::new(wb, cb, &event_loop).unwrap();\n\n    let mut renderer = Renderer::new(&display);\n    let mut mouse_state = MouseState::Free {\n        position: (0.0, 0.0),\n    };\n    let mut keyboard_state = KeyboardState { e_state: false };\n\n    event_loop.run(move |ev, _, control_flow| {\n        match ev {\n            Event::WindowEvent { event, .. } => match event {\n                WindowEvent::CloseRequested => {\n                    *control_flow = ControlFlow::Exit;\n                    return;\n                }\n                WindowEvent::MouseWheel { delta, .. } => match delta {\n                    MouseScrollDelta::LineDelta(_, vertical) => {\n                        renderer.mouse_scroll(vertical);\n                    }\n                    MouseScrollDelta::PixelDelta(pixels) => {\n                        renderer.mouse_scroll(pixels.y as f32 / 30.0);\n                    }\n                },\n                WindowEvent::KeyboardInput { input, .. } => {\n                    match (input.virtual_keycode, input.state) {\n                        (Some(VirtualKeyCode::E), ElementState::Pressed) => {\n                            if keyboard_state.e_pressed() {\n                                renderer.toggle_edges();\n                            }\n                        }\n                        (Some(VirtualKeyCode::E), ElementState::Released) => {\n                            keyboard_state.e_released();\n                        }\n                        _ => (),\n                    }\n                }\n                WindowEvent::MouseInput { state, button, .. } => {\n                    if let (state, MouseButton::Left) = (state, button) {\n                        mouse_state.handle_button(matches!(state, ElementState::Pressed));\n                    }\n                }\n                WindowEvent::CursorMoved { position, .. } => {\n                    if let Some(delta) = mouse_state.handle_move((position.x, position.y)) {\n                        renderer.pan(delta.0, delta.1);\n                    }\n                }\n                _ => (),\n            },\n            Event::MainEventsCleared => {\n                let (cursor_x, cursor_y) = mouse_state.cursor();\n                renderer.draw(&display, &graph, cursor_x, cursor_y);\n            }\n            _ => (),\n        }\n        *control_flow = ControlFlow::Poll;\n    });\n}\n"
  },
  {
    "path": "utils/highway-state-grapher/src/renderer/matrix.rs",
    "content": "use std::ops;\n\n#[derive(Clone, Copy)]\npub struct Matrix {\n    coords: [[f32; 4]; 4],\n}\n\nimpl Matrix {\n    pub fn identity() -> Matrix {\n        Matrix {\n            coords: [\n                [1.0, 0.0, 0.0, 0.0],\n                [0.0, 1.0, 0.0, 0.0],\n                [0.0, 0.0, 1.0, 0.0],\n                [0.0, 0.0, 0.0, 1.0],\n            ],\n        }\n    }\n\n    pub fn inner(self) -> [[f32; 4]; 4] {\n        self.coords\n    }\n\n    pub fn translation(x: f32, y: f32) -> Matrix {\n        let mut result = Matrix::identity();\n        result.coords[3][0] = x;\n        result.coords[3][1] = y;\n        result\n    }\n\n    pub fn scale(x: f32, y: f32) -> Matrix {\n        Matrix {\n            coords: [\n                [x, 0.0, 0.0, 0.0],\n                [0.0, y, 0.0, 0.0],\n                [0.0, 0.0, 1.0, 0.0],\n                [0.0, 0.0, 0.0, 1.0],\n            ],\n        }\n    }\n}\n\nimpl ops::Add<Matrix> for Matrix {\n    type Output = Matrix;\n\n    fn add(mut self, other: Matrix) -> Matrix {\n        for i in 0..4 {\n            for j in 0..4 {\n                self.coords[i][j] += other.coords[i][j];\n            }\n        }\n        self\n    }\n}\n\nimpl ops::Sub<Matrix> for Matrix {\n    type Output = Matrix;\n\n    fn sub(mut self, other: Matrix) -> Matrix {\n        for i in 0..4 {\n            for j in 0..4 {\n                self.coords[i][j] -= other.coords[i][j];\n            }\n        }\n        self\n    }\n}\n\nimpl ops::Mul<Matrix> for Matrix {\n    type Output = Matrix;\n\n    #[allow(clippy::needless_range_loop)]\n    fn mul(self, other: Matrix) -> Matrix {\n        let mut new_coords = [[0.0; 4]; 4];\n        for i in 0..4 {\n            for j in 0..4 {\n                for k in 0..4 {\n                    new_coords[i][j] += self.coords[i][k] * other.coords[k][j];\n                }\n            }\n        }\n        Matrix { coords: new_coords }\n    }\n}\n"
  },
  {
    "path": "utils/highway-state-grapher/src/renderer.rs",
    "content": "mod matrix;\n\nuse std::{collections::HashSet, f32::consts::PI};\n\nuse casper_node::consensus::utils::ValidatorMap;\nuse glium::{\n    implement_vertex, index, uniform, Display, DrawParameters, Frame, Program, Surface,\n    VertexBuffer,\n};\nuse glium_text_rusttype::{self, FontTexture, TextDisplay, TextSystem};\nuse nalgebra::Vector2;\n\nuse crate::{renderer::matrix::Matrix, Graph, GraphUnit, UnitId};\n\nconst VERTEX_SHADER_SRC: &str = r#\"\n    #version 140\n\n    in vec2 position;\n\n    uniform mat4 matrix;\n    uniform vec3 color;\n    out vec3 in_color;\n\n    void main() {\n        gl_Position = matrix * vec4(position, 0.0, 1.0);\n        in_color = color;\n    }\n\"#;\n\nconst FRAGMENT_SHADER_SRC: &str = r#\"\n    #version 140\n\n    in vec3 in_color;\n    out vec4 color;\n\n    void main() {\n        color = vec4(in_color, 1.0);\n    }\n\"#;\n\nconst FONT_FILE: &[u8] = include_bytes!(\"../DejaVuSans.ttf\");\n\n#[derive(Debug, Clone, Copy)]\nstruct Vertex {\n    position: [f32; 2],\n}\n\nimplement_vertex!(Vertex, position);\n\n/// Rendering-specific data.\npub struct Renderer {\n    /// The coordinates at the center of the screen.\n    center: Vector2<f32>,\n    /// The width of the window, in pixels.\n    window_width: f32,\n    /// The current width of the viewport.\n    width: f32,\n    /// The shading program.\n    program: Program,\n    /// Stuff for rendering text.\n    text_system: TextSystem,\n    font: FontTexture,\n\n    /// Pre-generated vertices for a unit.\n    unit_vertex_buffer: VertexBuffer<Vertex>,\n    interior_indices: index::NoIndices,\n    frame_indices: index::IndexBuffer<u32>,\n\n    /// `True` if we're drawing edges.\n    edges_enabled: bool,\n}\n\nconst UNIT_WIDTH: f32 = 0.5;\nconst UNIT_HEIGHT: f32 = 0.4;\nconst CORNER_RADIUS: f32 = 0.05;\nconst LINE_WIDTH: f32 = 0.015;\n\nimpl Renderer {\n    pub fn new(display: &Display) -> Self {\n        let text_system = TextSystem::new(display);\n        let font =\n            FontTexture::new(display, FONT_FILE, 32, FontTexture::ascii_character_list()).unwrap();\n\n        let (unit_vertex_buffer, interior_indices, frame_indices) =\n            Self::unit_vertex_buffer(display);\n\n        Renderer {\n            center: Vector2::new(3.5, 2.5),\n            window_width: 3000.0, // will get updated on first frame draw\n            width: 8.0,\n            program: Program::from_source(display, VERTEX_SHADER_SRC, FRAGMENT_SHADER_SRC, None)\n                .unwrap(),\n            text_system,\n            font,\n\n            unit_vertex_buffer,\n            interior_indices,\n            frame_indices,\n            edges_enabled: true,\n        }\n    }\n\n    /// Creates vertices for a rounded rectangle.\n    fn unit_vertex_buffer(\n        display: &Display,\n    ) -> (\n        VertexBuffer<Vertex>,\n        index::NoIndices,\n        index::IndexBuffer<u32>,\n    ) {\n        let mut shape = vec![];\n        let n_vertices_corner = 8;\n\n        let corner_radius = CORNER_RADIUS;\n        let width = UNIT_WIDTH;\n        let height = UNIT_HEIGHT;\n\n        let corners = [\n            (\n                width / 2.0 - corner_radius,\n                height / 2.0 - corner_radius,\n                0.0,\n            ),\n            (\n                -width / 2.0 + corner_radius,\n                height / 2.0 - corner_radius,\n                PI * 0.5,\n            ),\n            (\n                -width / 2.0 + corner_radius,\n                -height / 2.0 + corner_radius,\n                PI,\n            ),\n            (\n                width / 2.0 - corner_radius,\n                -height / 2.0 + corner_radius,\n                PI * 1.5,\n            ),\n        ];\n\n        shape.push(Vertex {\n            position: [0.0, 0.0],\n        });\n        for (x, y, phase) in corners {\n            for i in 0..n_vertices_corner {\n                let ang = 0.5 * PI * (i as f32) / n_vertices_corner as f32 + phase;\n                shape.push(Vertex {\n                    position: [corner_radius * ang.cos() + x, corner_radius * ang.sin() + y],\n                });\n            }\n        }\n        shape.push(shape[1]);\n\n        (\n            VertexBuffer::new(display, &shape).unwrap(),\n            index::NoIndices(index::PrimitiveType::TriangleFan),\n            index::IndexBuffer::new(\n                display,\n                index::PrimitiveType::LineLoop,\n                &(1..(shape.len() - 1) as u32).collect::<Vec<_>>(),\n            )\n            .unwrap(),\n        )\n    }\n\n    /// Draws the graph.\n    pub fn draw(&mut self, display: &Display, graph: &Graph, cursor_x: f32, cursor_y: f32) {\n        let mut target = display.draw();\n\n        let (size_x, size_y) = target.get_dimensions();\n        self.window_width = size_x as f32;\n\n        let (cursor_x, cursor_y) = self.convert_cursor(cursor_x, cursor_y, size_x, size_y);\n\n        let aspect = (size_y as f32) / (size_x as f32);\n\n        let height = self.width * aspect;\n\n        let max_graph_height = (self.center.y + height / 2.0 + 1.0) as usize;\n        let min_graph_height = (self.center.y - height / 2.0 - 1.0).max(0.0) as usize;\n\n        let max_validator_index = (self.center.x + self.width / 2.0 + 1.0) as usize;\n        let min_validator_index = (self.center.x - self.width / 2.0 - 1.0).max(0.0) as usize;\n\n        target.clear_color(0.0, 0.0, 0.2, 1.0);\n\n        let matrix = Matrix::translation(-self.center.x, -self.center.y)\n            * Matrix::scale(2.0 / self.width, 2.0 / height);\n\n        let mut edges_to_draw = HashSet::new();\n        let mut highlighted_edges_to_draw = HashSet::new();\n\n        for unit in graph.iter_range(\n            min_validator_index..=max_validator_index,\n            min_graph_height..=max_graph_height,\n        ) {\n            let set_to_insert = if Self::unit_contains_cursor(unit, cursor_x, cursor_y) {\n                &mut highlighted_edges_to_draw\n            } else {\n                &mut edges_to_draw\n            };\n            for cited_unit in &unit.cited_units {\n                set_to_insert.insert((unit.id, *cited_unit));\n            }\n            for dependent_unit in graph.reverse_edges.get(&unit.id).into_iter().flatten() {\n                set_to_insert.insert((*dependent_unit, unit.id));\n            }\n        }\n\n        // draw edges first, so that the units are drawn over them\n        if self.edges_enabled {\n            self.draw_edges(display, &mut target, &matrix, graph, edges_to_draw, false);\n        }\n        self.draw_edges(\n            display,\n            &mut target,\n            &matrix,\n            graph,\n            highlighted_edges_to_draw,\n            true,\n        );\n\n        for unit in graph.iter_range(\n            min_validator_index..=max_validator_index,\n            min_graph_height..=max_graph_height,\n        ) {\n            self.draw_unit(&mut target, unit, graph.validator_weights(), &matrix);\n        }\n\n        target.finish().unwrap();\n    }\n\n    /// Converts the cursor coordinates in pixels into the scene coordinates.\n    fn convert_cursor(&self, cursor_x: f32, cursor_y: f32, size_x: u32, size_y: u32) -> (f32, f32) {\n        let size_x = size_x as f32;\n        let size_y = size_y as f32;\n        let delta_x = (cursor_x / size_x - 0.5) * self.width;\n        let delta_y = (0.5 - cursor_y / size_y) * self.width * size_y / size_x;\n        (self.center.x + delta_x, self.center.y + delta_y)\n    }\n\n    /// Checks whether the cursor hovers over a unit.\n    fn unit_contains_cursor(unit: &GraphUnit, cursor_x: f32, cursor_y: f32) -> bool {\n        let (unit_x, unit_y) = Self::unit_pos(unit);\n        (unit_x - cursor_x).abs() < UNIT_WIDTH / 2.0\n            && (unit_y - cursor_y).abs() < UNIT_HEIGHT / 2.0\n    }\n\n    /// Draws a unit.\n    fn draw_unit(\n        &mut self,\n        target: &mut Frame,\n        unit: &GraphUnit,\n        weights: &ValidatorMap<f32>,\n        view: &Matrix,\n    ) {\n        let (x, y) = Self::unit_pos(unit);\n\n        let matrix2 = Matrix::translation(x, y) * *view;\n\n        let color = match (unit.is_proposal, unit.max_quorum.as_ref()) {\n            (false, Some(quorum)) => {\n                if quorum.max_rank <= 1 {\n                    Self::quorum_color_spectrum(0.0)\n                } else {\n                    let frac = quorum.rank as f32 / (quorum.max_rank - 1) as f32;\n                    Self::quorum_color_spectrum(frac)\n                }\n            }\n            (true, _) => [0.0_f32, 0.5, 0.5],\n            _ => [0.0_f32, 0.0, 0.2],\n        };\n\n        let uniforms = uniform! {\n            matrix: matrix2.inner(),\n            color: color,\n        };\n\n        target\n            .draw(\n                &self.unit_vertex_buffer,\n                self.interior_indices,\n                &self.program,\n                &uniforms,\n                &Default::default(),\n            )\n            .unwrap();\n\n        let uniforms = uniform! {\n            matrix: matrix2.inner(),\n            color: [ 1.0_f32, 1.0, 0.0 ],\n        };\n\n        let draw_params = DrawParameters {\n            line_width: Some(LINE_WIDTH),\n            ..Default::default()\n        };\n\n        target\n            .draw(\n                &self.unit_vertex_buffer,\n                &self.frame_indices,\n                &self.program,\n                &uniforms,\n                &draw_params,\n            )\n            .unwrap();\n\n        if self.width < 10.0 {\n            let text1 = format!(\"{:?}\", unit.id);\n            let text2 = format!(\n                \"Creator weight: {:3.1}%\",\n                weights.get(unit.creator).unwrap()\n            );\n            let text3 = format!(\"Vote: {:?}\", unit.vote);\n            let text4 = format!(\"round_exp: {}\", unit.round_exp);\n            let text5 = format!(\"round_id: {}\", unit.round_id);\n            let text6 = format!(\"timestamp: {} (round {})\", unit.timestamp, unit.round_num);\n            let text7 = if let Some(quorum) = unit.max_quorum.as_ref() {\n                format!(\"max quorum: {:3.1}%\", quorum.weight_percent)\n            } else {\n                \"\".to_string()\n            };\n            self.draw_text(target, -0.4, 0.7, &text1, 1.3, &matrix2);\n            self.draw_text(target, -0.8, 0.46, &text2, 0.8, &matrix2);\n            self.draw_text(target, -0.8, 0.22, &text3, 0.8, &matrix2);\n            self.draw_text(target, -0.8, -0.02, &text4, 0.8, &matrix2);\n            self.draw_text(target, -0.8, -0.26, &text5, 0.8, &matrix2);\n            self.draw_text(target, -0.8, -0.5, &text6, 0.8, &matrix2);\n            self.draw_text(target, -0.8, -0.74, &text7, 0.8, &matrix2);\n        } else {\n            let text = format!(\"{:?}\", unit.id);\n            self.draw_text(target, -0.4, -0.15, &text, 3.0, &matrix2);\n        }\n    }\n\n    /// Renders a string.\n    fn draw_text(\n        &self,\n        target: &mut Frame,\n        x: f32,\n        y: f32,\n        text: &str,\n        scale: f32,\n        matrix: &Matrix,\n    ) {\n        let basic_scale = UNIT_HEIGHT / 12.0;\n        let scale = basic_scale * scale;\n        let matrix = Matrix::scale(scale, scale)\n            * Matrix::translation(x * UNIT_WIDTH / 2.0, y * UNIT_HEIGHT / 2.0)\n            * *matrix;\n        let text = TextDisplay::new(&self.text_system, &self.font, text);\n\n        glium_text_rusttype::draw(\n            &text,\n            &self.text_system,\n            target,\n            matrix.inner(),\n            (1.0, 1.0, 1.0, 1.0),\n        )\n        .unwrap();\n    }\n\n    /// Draws the edges between units.\n    fn draw_edges(\n        &mut self,\n        display: &Display,\n        target: &mut Frame,\n        view: &Matrix,\n        graph: &Graph,\n        edges: HashSet<(UnitId, UnitId)>,\n        highlight: bool,\n    ) {\n        let mut vertices = vec![];\n\n        for (unit1, unit2) in edges {\n            let pos1 = Self::unit_pos(graph.get(&unit1).unwrap());\n            let pos2 = Self::unit_pos(graph.get(&unit2).unwrap());\n\n            vertices.push(Vertex {\n                position: [pos1.0, pos1.1],\n            });\n            vertices.push(Vertex {\n                position: [pos2.0, pos2.1],\n            });\n        }\n\n        let vertex_buffer = VertexBuffer::new(display, &vertices).unwrap();\n        let indices = index::NoIndices(index::PrimitiveType::LinesList);\n\n        let color = if highlight {\n            [1.0_f32, 1.0, 1.0]\n        } else {\n            [1.0_f32, 1.0, 0.0]\n        };\n\n        let uniforms = uniform! {\n            matrix: view.inner(),\n            color: color,\n        };\n\n        let draw_parameters = DrawParameters {\n            line_width: Some(if highlight {\n                LINE_WIDTH * 2.0\n            } else {\n                LINE_WIDTH\n            }),\n            ..Default::default()\n        };\n\n        target\n            .draw(\n                &vertex_buffer,\n                indices,\n                &self.program,\n                &uniforms,\n                &draw_parameters,\n            )\n            .unwrap();\n    }\n\n    /// Returns the position of the units in scene coordinates.\n    fn unit_pos(unit: &GraphUnit) -> (f32, f32) {\n        let x = unit.creator.0 as f32;\n        let y = unit.graph_height as f32;\n        (x, y)\n    }\n\n    /// Handles a mouse scroll event (zooms in or out).\n    pub fn mouse_scroll(&mut self, lines: f32) {\n        self.width *= 2.0_f32.powf(lines / 3.0);\n    }\n\n    /// Handles a dragging event (pans the screen).\n    pub fn pan(&mut self, delta_x: f32, delta_y: f32) {\n        let scale = self.width / self.window_width;\n        self.center += Vector2::new(-delta_x * scale, delta_y * scale);\n    }\n\n    pub fn toggle_edges(&mut self) {\n        self.edges_enabled = !self.edges_enabled;\n    }\n\n    /// Returns a color for the max quorum based on its rank.\n    fn quorum_color_spectrum(frac: f32) -> [f32; 3] {\n        let r = if frac < 0.5 { frac } else { 1.0 };\n        let g = if frac < 0.5 { 1.0 } else { 1.0 - frac };\n        [r * 0.5, g * 0.5, 0.0]\n    }\n}\n"
  },
  {
    "path": "utils/validation/Cargo.toml",
    "content": "[package]\nname = \"casper-validation\"\nversion = \"0.1.0\"\nauthors = [\"Michał Papierski <michal@casper.network>\"]\nedition = \"2021\"\n\n[dependencies]\nanyhow = \"1\"\nbase16 = \"0.2.1\"\ncasper-types = { path = \"../../types\", features = [\"testing\", \"std\", \"json-schema\"] }\nclap = { version = \"3.0.0-rc.0\", features = [\"derive\"] }\nderive_more = \"0.99.13\"\nhex = { version = \"0.4.2\", features = [\"serde\"] }\nserde = \"1\"\nserde_json = \"1\"\nthiserror = \"1.0.18\"\n\n[[test]]\nname = \"validation_test\"\nharness = false\n\n[[bin]]\nname = \"casper-validation\"\ntest = false\ndoctest = false\n\n[lib]\ntest = false\ndoctest = false\n"
  },
  {
    "path": "utils/validation/README.md",
    "content": "validation\n===============\n\nAutomation tool to validate the code based on fixtures.\n\nWhat is casper-validation?\n--------------------------------------\n\nThis tool validates the code by loading fixtures JSON that contains an input, and the expected output by applying an operation.\n\nUsage\n--------------------------------------\n\nTo generate new fixtures with a generator run:\n\n```\ncargo run -p casper-validation -- generate --output utils/validation/tests/fixtures\n```\n\n**Important note**\n\nDo not use this with day to day development - for example to fix an error in serialization code by replacing the fixture with possibly invalid code.\n\nTo validate the implementation using all the fixtures:\n\n```\ncargo test -p casper-validation\n```\n"
  },
  {
    "path": "utils/validation/src/abi.rs",
    "content": "use std::collections::BTreeMap;\n\nuse serde::{Deserialize, Serialize};\n\nuse casper_types::{\n    bytesrepr::{self, ToBytes},\n    CLValue, Key, StoredValue, U512,\n};\n\nuse crate::test_case::{Error, TestCase};\n\n/// Representation of supported input value.\n#[derive(Serialize, Deserialize, Debug, From)]\n#[serde(tag = \"type\", content = \"value\")]\npub enum Input {\n    U8(u8),\n    U16(u16),\n    U32(u32),\n    U64(u64),\n    String(String),\n    Bool(bool),\n    U512(U512),\n    CLValue(CLValue),\n    Key(Key),\n    StoredValue(StoredValue),\n}\n\nimpl ToBytes for Input {\n    fn to_bytes(&self) -> Result<Vec<u8>, bytesrepr::Error> {\n        match self {\n            Input::U8(value) => value.to_bytes(),\n            Input::U16(value) => value.to_bytes(),\n            Input::U32(value) => value.to_bytes(),\n            Input::U64(value) => value.to_bytes(),\n            Input::String(value) => value.to_bytes(),\n            Input::Bool(value) => value.to_bytes(),\n            Input::U512(value) => value.to_bytes(),\n            Input::CLValue(value) => value.to_bytes(),\n            Input::Key(value) => value.to_bytes(),\n            Input::StoredValue(value) => value.to_bytes(),\n        }\n    }\n\n    fn serialized_length(&self) -> usize {\n        match self {\n            Input::U8(value) => value.serialized_length(),\n            Input::U16(value) => value.serialized_length(),\n            Input::U32(value) => value.serialized_length(),\n            Input::U64(value) => value.serialized_length(),\n            Input::String(value) => value.serialized_length(),\n            Input::Bool(value) => value.serialized_length(),\n            Input::U512(value) => value.serialized_length(),\n            Input::CLValue(value) => value.serialized_length(),\n            Input::Key(value) => value.serialized_length(),\n            Input::StoredValue(value) => value.serialized_length(),\n        }\n    }\n}\n\n/// Test case defines a list of inputs and an output.\n#[derive(Serialize, Deserialize, Debug)]\npub struct ABITestCase {\n    input: Vec<serde_json::Value>,\n    output: String,\n}\n\nimpl ABITestCase {\n    pub fn from_inputs(inputs: Vec<Input>) -> Result<ABITestCase, Error> {\n        // This is manually going through each input passed as we can't use `ToBytes for Vec<T>` as\n        // the `output` would be a serialized collection.\n        let mut truth = Vec::new();\n        for input in &inputs {\n            // Input::to_bytes uses static dispatch to call into each raw value impl.\n            let mut generated_truth = input.to_bytes()?;\n            truth.append(&mut generated_truth);\n        }\n\n        let input_values = inputs\n            .into_iter()\n            .map(serde_json::to_value)\n            .collect::<Result<Vec<_>, _>>()?;\n\n        Ok(ABITestCase {\n            input: input_values,\n            output: hex::encode(truth),\n        })\n    }\n\n    pub fn input(&self) -> Result<Vec<Input>, Error> {\n        let mut res = Vec::new();\n        for input_value in &self.input {\n            let input: Input = serde_json::from_value(input_value.clone())?;\n            res.push(input);\n        }\n        Ok(res)\n    }\n\n    pub fn output(&self) -> Result<Vec<u8>, Error> {\n        let output = hex::decode(&self.output)?;\n        Ok(output)\n    }\n\n    pub fn to_bytes(&self) -> Result<Vec<u8>, Error> {\n        let mut res = Vec::with_capacity(self.serialized_length()?);\n\n        for input in self.input()? {\n            res.append(&mut input.to_bytes()?);\n        }\n\n        Ok(res)\n    }\n\n    pub fn serialized_length(&self) -> Result<usize, Error> {\n        Ok(self.input()?.iter().map(ToBytes::serialized_length).sum())\n    }\n}\n\nimpl TestCase for ABITestCase {\n    /// Compares input to output.\n    ///\n    /// This gets executed for each test case.\n    fn run_test(&self) -> Result<(), Error> {\n        let serialized_length = self.serialized_length()?;\n        let serialized_data = self.to_bytes()?;\n\n        let output = self.output()?;\n\n        // Serialized data should match the output\n        if serialized_data != output {\n            return Err(Error::DataMismatch {\n                actual: serialized_data,\n                expected: output.to_vec(),\n            });\n        }\n\n        // Output from serialized_length should match the output data length\n        if serialized_length != output.len() {\n            return Err(Error::LengthMismatch {\n                expected: serialized_length,\n                actual: output.len(),\n            });\n        }\n\n        Ok(())\n    }\n}\n\n/// A fixture consists of multiple test cases.\n#[derive(Serialize, Deserialize, Debug, From)]\npub struct ABIFixture(BTreeMap<String, ABITestCase>);\n\nimpl ABIFixture {\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    pub fn into_inner(self) -> BTreeMap<String, ABITestCase> {\n        self.0\n    }\n}\n"
  },
  {
    "path": "utils/validation/src/error.rs",
    "content": "use std::{io, path::PathBuf};\n\nuse thiserror::Error;\n\nuse casper_types::bytesrepr;\n\nuse crate::test_case;\n\n#[derive(Error, Debug)]\npub enum Error {\n    #[error(transparent)]\n    Io(#[from] io::Error),\n    #[error(transparent)]\n    Deserialize(#[from] serde_json::Error),\n    #[error(\"missing file stem in: {0}\")]\n    NoStem(PathBuf),\n    #[error(\"unsupported file format at {0}\")]\n    UnsupportedFormat(PathBuf),\n    #[error(\"file {0} lacks extension\")]\n    NoExtension(PathBuf),\n    #[error(\"{0}\")]\n    Bytesrepr(bytesrepr::Error),\n    #[error(transparent)]\n    TestCase(#[from] test_case::Error),\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(error: bytesrepr::Error) -> Self {\n        Error::Bytesrepr(error)\n    }\n}\n"
  },
  {
    "path": "utils/validation/src/generators.rs",
    "content": "use std::{\n    collections::{BTreeMap, BTreeSet},\n    iter::FromIterator,\n};\n\nuse casper_types::{\n    account::{\n        Account, AccountHash, ActionThresholds as AccountActionThresholds,\n        AssociatedKeys as AccountAssociatedKeys, Weight as AccountWeight,\n    },\n    addressable_entity::{ActionThresholds, AddressableEntity, AssociatedKeys, EntityKind},\n    contracts::NamedKeys,\n    system::{\n        auction::{\n            Bid, BidAddr, BidKind, Delegator, DelegatorBid, DelegatorKind, EraInfo,\n            SeigniorageAllocation, UnbondingPurse, ValidatorBid, WithdrawPurse,\n        },\n        mint::BalanceHoldAddr,\n    },\n    AccessRights, BlockTime, ByteCode, ByteCodeHash, ByteCodeKind, CLType, CLTyped, CLValue,\n    ContractRuntimeTag, DeployHash, DeployInfo, EntityAddr, EntityEntryPoint, EntityVersionKey,\n    EntityVersions, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPointValue, EraId,\n    Group, Groups, Key, Package, PackageHash, PackageStatus, Parameter, ProtocolVersion, PublicKey,\n    SecretKey, StoredValue, TransferAddr, TransferV1, URef, U512,\n};\nuse casper_validation::{\n    abi::{ABIFixture, ABITestCase},\n    error::Error,\n    Fixture, TestFixtures,\n};\n\nconst DO_NOTHING_BYTES: &[u8] = b\"\\x00asm\\x01\\x00\\x00\\x00\\x01\\x04\\x01`\\x00\\x00\\x03\\x02\\x01\\x00\\x05\\x03\\x01\\x00\\x01\\x07\\x08\\x01\\x04call\\x00\\x00\\n\\x04\\x01\\x02\\x00\\x0b\";\n\npub fn make_abi_test_fixtures() -> Result<TestFixtures, Error> {\n    let basic_fixture = {\n        let mut basic = BTreeMap::new();\n        basic.insert(\n            \"SerializeU8\".to_string(),\n            ABITestCase::from_inputs(vec![254u8.into()])?,\n        );\n        basic.insert(\n            \"SerializeU16\".to_string(),\n            ABITestCase::from_inputs(vec![62356u16.into()])?,\n        );\n        basic.insert(\n            \"SerializeU32\".to_string(),\n            ABITestCase::from_inputs(vec![3490072870u32.into()])?,\n        );\n        basic.insert(\n            \"SerializeU64\".to_string(),\n            ABITestCase::from_inputs(vec![10829133186225377555u64.into()])?,\n        );\n        basic.insert(\n            \"SerializeEmptyString\".to_string(),\n            ABITestCase::from_inputs(vec![String::new().into()])?,\n        );\n        basic.insert(\n            \"SerializeString\".to_string(),\n            ABITestCase::from_inputs(vec![\"Hello, world!\".to_string().into()])?,\n        );\n        basic.insert(\n            \"SerializeBool\".to_string(),\n            ABITestCase::from_inputs(vec![true.into(), false.into()])?,\n        );\n        Fixture::ABI {\n            name: \"basic\".to_string(),\n            fixture: ABIFixture::from(basic),\n        }\n    };\n\n    let legacy_transfer = TransferV1::new(\n        DeployHash::from_raw([44; 32]),\n        AccountHash::new([100; 32]),\n        Some(AccountHash::new([101; 32])),\n        URef::new([10; 32], AccessRights::WRITE),\n        URef::new([11; 32], AccessRights::WRITE),\n        U512::from(15_000_000_000u64),\n        U512::from(2_500_000_000u64),\n        Some(1),\n    );\n    let deploy_info = DeployInfo::new(\n        DeployHash::from_raw([55; 32]),\n        &[TransferAddr::new([1; 32]), TransferAddr::new([2; 32])],\n        AccountHash::new([100; 32]),\n        URef::new([10; 32], AccessRights::READ_ADD_WRITE),\n        U512::from(2_500_000_000u64),\n    );\n\n    let validator_secret_key =\n        SecretKey::ed25519_from_bytes([42; 32]).expect(\"should create secret key\");\n    let delegator_secret_key =\n        SecretKey::secp256k1_from_bytes([43; 32]).expect(\"should create secret key\");\n\n    let era_info = {\n        let mut era_info = EraInfo::new();\n\n        era_info\n            .seigniorage_allocations_mut()\n            .push(SeigniorageAllocation::Validator {\n                validator_public_key: PublicKey::from(&validator_secret_key),\n                amount: U512::from(1_000_000_000),\n            });\n\n        era_info\n            .seigniorage_allocations_mut()\n            .push(SeigniorageAllocation::Delegator {\n                validator_public_key: PublicKey::from(&validator_secret_key),\n                delegator_public_key: PublicKey::from(&delegator_secret_key),\n                amount: U512::from(1_000_000_000),\n            });\n        era_info\n            .seigniorage_allocations_mut()\n            .push(SeigniorageAllocation::DelegatorKind {\n                validator_public_key: PublicKey::from(&validator_secret_key),\n                delegator_kind: PublicKey::from(&delegator_secret_key).into(),\n                amount: U512::from(1_000_000_000),\n            });\n        era_info\n    };\n\n    let validator_public_key = PublicKey::from(&validator_secret_key);\n    let validator_bid_key =\n        Key::BidAddr(BidAddr::new_from_public_keys(&validator_public_key, None));\n    let validator_bid = ValidatorBid::locked(\n        validator_public_key.clone(),\n        URef::new([10; 32], AccessRights::READ_ADD_WRITE),\n        U512::from(50_000_000_000u64),\n        100,\n        u64::MAX,\n        0,\n        u64::MAX,\n        0,\n    );\n    let validator_bid_kind = BidKind::Validator(Box::new(validator_bid));\n    let delegator_public_key = PublicKey::from(&delegator_secret_key);\n    let delegator_bid_key = Key::BidAddr(BidAddr::new_from_public_keys(\n        &validator_public_key,\n        Some(&delegator_public_key.clone()),\n    ));\n    let delegator = Delegator::locked(\n        delegator_public_key.clone(),\n        U512::from(1_000_000_000u64),\n        URef::new([11; 32], AccessRights::READ_ADD_WRITE),\n        validator_public_key.clone(),\n        u64::MAX,\n    );\n\n    let delegator_bid_kind = BidKind::Delegator(Box::new(DelegatorBid::locked(\n        DelegatorKind::PublicKey(delegator_public_key.clone()),\n        U512::from(1_000_000_000u64),\n        URef::new([11; 32], AccessRights::READ_ADD_WRITE),\n        validator_public_key.clone(),\n        u64::MAX,\n    )));\n\n    let _delegator_bid = DelegatorBid::locked(\n        delegator_public_key.clone().into(),\n        U512::from(1_000_000_000u64),\n        URef::new([11; 32], AccessRights::READ_ADD_WRITE),\n        validator_public_key.clone(),\n        u64::MAX,\n    );\n\n    let unified_bid_key = Key::BidAddr(BidAddr::legacy(\n        validator_public_key.to_account_hash().value(),\n    ));\n    let unified_bid = {\n        let mut unified_bid = Bid::locked(\n            validator_public_key.clone(),\n            URef::new([10; 32], AccessRights::READ_ADD_WRITE),\n            U512::from(50_000_000_000u64),\n            100,\n            u64::MAX,\n        );\n        unified_bid\n            .delegators_mut()\n            .insert(delegator.delegator_public_key().clone(), delegator.clone());\n        unified_bid\n    };\n    let unified_bid_kind = BidKind::Unified(Box::new(unified_bid));\n\n    let original_bid_key = Key::Bid(validator_public_key.to_account_hash());\n    let original_bid = {\n        let mut bid = Bid::locked(\n            validator_public_key,\n            URef::new([10; 32], AccessRights::READ_ADD_WRITE),\n            U512::from(50_000_000_000u64),\n            100,\n            u64::MAX,\n        );\n        bid.delegators_mut()\n            .insert(delegator.delegator_public_key().clone(), delegator);\n        bid\n    };\n\n    let withdraw_purse_1 = WithdrawPurse::new(\n        URef::new([10; 32], AccessRights::READ),\n        PublicKey::from(&validator_secret_key),\n        PublicKey::from(&validator_secret_key),\n        EraId::new(41),\n        U512::from(60_000_000_000u64),\n    );\n    let withdraw_purse_2 = WithdrawPurse::new(\n        URef::new([11; 32], AccessRights::READ),\n        PublicKey::from(&validator_secret_key),\n        PublicKey::from(&delegator_secret_key),\n        EraId::new(42),\n        U512::from(50_000_000_000u64),\n    );\n    let unbonding_purse_1 = UnbondingPurse::new(\n        URef::new([10; 32], AccessRights::READ),\n        PublicKey::from(&validator_secret_key),\n        PublicKey::from(&validator_secret_key),\n        EraId::new(41),\n        U512::from(60_000_000_000u64),\n        None,\n    );\n    let unbonding_purse_2 = UnbondingPurse::new(\n        URef::new([11; 32], AccessRights::READ),\n        PublicKey::from(&validator_secret_key),\n        PublicKey::from(&delegator_secret_key),\n        EraId::new(42),\n        U512::from(50_000_000_000u64),\n        None,\n    );\n\n    let keys_fixture = {\n        const ACCOUNT_KEY: Key = Key::Account(AccountHash::new([42; 32]));\n        const HASH_KEY: Key = Key::Hash([42; 32]);\n        const UREF_KEY: Key = Key::URef(URef::new([42; 32], AccessRights::READ));\n        const TRANSFER_KEY: Key = Key::Transfer(TransferAddr::new([42; 32]));\n        const DEPLOY_INFO_KEY: Key = Key::DeployInfo(DeployHash::from_raw([42; 32]));\n        const ERA_INFO_KEY: Key = Key::EraInfo(EraId::new(42));\n        const BALANCE_KEY: Key = Key::Balance([42; 32]);\n        const BALANCE_HOLD_KEY: Key = Key::BalanceHold(BalanceHoldAddr::Gas {\n            purse_addr: [42; 32],\n            block_time: BlockTime::new(0),\n        });\n        const WITHDRAW_KEY: Key = Key::Withdraw(AccountHash::new([42; 32]));\n        const DICTIONARY_KEY: Key = Key::Dictionary([42; 32]);\n        const SYSTEM_ENTITY_REGISTRY_KEY: Key = Key::SystemEntityRegistry;\n        const ERA_SUMMARY_KEY: Key = Key::EraSummary;\n        const UNBOND_KEY: Key = Key::Unbond(AccountHash::new([42; 32]));\n        const CHAINSPEC_REGISTRY_KEY: Key = Key::ChainspecRegistry;\n        const CHECKSUM_REGISTRY_KEY: Key = Key::ChecksumRegistry;\n\n        let mut keys = BTreeMap::new();\n        keys.insert(\n            \"Account\".to_string(),\n            ABITestCase::from_inputs(vec![ACCOUNT_KEY.into()])?,\n        );\n        keys.insert(\n            \"Hash\".to_string(),\n            ABITestCase::from_inputs(vec![HASH_KEY.into()])?,\n        );\n        keys.insert(\n            \"URef\".to_string(),\n            ABITestCase::from_inputs(vec![UREF_KEY.into()])?,\n        );\n        keys.insert(\n            \"Transfer\".to_string(),\n            ABITestCase::from_inputs(vec![TRANSFER_KEY.into()])?,\n        );\n        keys.insert(\n            \"DeployInfo\".to_string(),\n            ABITestCase::from_inputs(vec![DEPLOY_INFO_KEY.into()])?,\n        );\n        keys.insert(\n            \"EraInfo\".to_string(),\n            ABITestCase::from_inputs(vec![ERA_INFO_KEY.into()])?,\n        );\n        keys.insert(\n            \"Balance\".to_string(),\n            ABITestCase::from_inputs(vec![BALANCE_KEY.into()])?,\n        );\n        keys.insert(\n            \"BalanceHold\".to_string(),\n            ABITestCase::from_inputs(vec![BALANCE_HOLD_KEY.into()])?,\n        );\n        keys.insert(\n            \"WriteBid\".to_string(),\n            ABITestCase::from_inputs(vec![original_bid_key.into()])?,\n        );\n        keys.insert(\n            \"WriteUnifiedBid\".to_string(),\n            ABITestCase::from_inputs(vec![unified_bid_key.into()])?,\n        );\n        keys.insert(\n            \"WriteValidatorBid\".to_string(),\n            ABITestCase::from_inputs(vec![validator_bid_key.into()])?,\n        );\n        keys.insert(\n            \"WriteDelegatorBid\".to_string(),\n            ABITestCase::from_inputs(vec![delegator_bid_key.into()])?,\n        );\n\n        keys.insert(\n            \"Withdraw\".to_string(),\n            ABITestCase::from_inputs(vec![WITHDRAW_KEY.into()])?,\n        );\n        keys.insert(\n            \"Dictionary\".to_string(),\n            ABITestCase::from_inputs(vec![DICTIONARY_KEY.into()])?,\n        );\n        keys.insert(\n            \"SystemEntityRegistry\".to_string(),\n            ABITestCase::from_inputs(vec![SYSTEM_ENTITY_REGISTRY_KEY.into()])?,\n        );\n        keys.insert(\n            \"EraSummary\".to_string(),\n            ABITestCase::from_inputs(vec![ERA_SUMMARY_KEY.into()])?,\n        );\n        keys.insert(\n            \"Unbond\".to_string(),\n            ABITestCase::from_inputs(vec![UNBOND_KEY.into()])?,\n        );\n        keys.insert(\n            \"ChainspecRegistry\".to_string(),\n            ABITestCase::from_inputs(vec![CHAINSPEC_REGISTRY_KEY.into()])?,\n        );\n        keys.insert(\n            \"ChecksumRegistry\".to_string(),\n            ABITestCase::from_inputs(vec![CHECKSUM_REGISTRY_KEY.into()])?,\n        );\n        Fixture::ABI {\n            name: \"key\".to_string(),\n            fixture: ABIFixture::from(keys),\n        }\n    };\n\n    let stored_value_fixture = {\n        let mut stored_value = BTreeMap::new();\n\n        let cl_value = CLValue::from_t(\"Hello, world!\").expect(\"should create cl value\");\n\n        stored_value.insert(\n            \"CLValue\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::CLValue(cl_value).into()])?,\n        );\n\n        let account_secret_key =\n            SecretKey::ed25519_from_bytes([42; 32]).expect(\"should create secret key\");\n        let account_public_key = PublicKey::from(&account_secret_key);\n        let account_hash = account_public_key.to_account_hash();\n\n        let account_named_keys = {\n            let mut named_keys = NamedKeys::new();\n            named_keys.insert(\"hash\".to_string(), Key::Hash([42; 32]));\n            named_keys.insert(\n                \"uref\".to_string(),\n                Key::URef(URef::new([16; 32], AccessRights::READ_ADD_WRITE)),\n            );\n            named_keys\n        };\n\n        let associated_keys = AccountAssociatedKeys::new(account_hash, AccountWeight::new(1));\n\n        let account = Account::new(\n            account_hash,\n            account_named_keys,\n            URef::new([17; 32], AccessRights::WRITE),\n            associated_keys,\n            AccountActionThresholds::new(AccountWeight::new(1), AccountWeight::new(1)).unwrap(),\n        );\n\n        stored_value.insert(\n            \"Account\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::Account(account).into()])?,\n        );\n\n        let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, DO_NOTHING_BYTES.to_vec());\n\n        stored_value.insert(\n            \"ByteCode\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::ByteCode(byte_code).into()])?,\n        );\n\n        let public_contract_entry_point = EntityEntryPoint::new(\n            \"public_entry_point_func\",\n            vec![\n                Parameter::new(\"param1\", U512::cl_type()),\n                Parameter::new(\"param2\", String::cl_type()),\n            ],\n            CLType::Unit,\n            EntryPointAccess::Public,\n            EntryPointType::Called,\n            EntryPointPayment::Caller,\n        );\n\n        stored_value.insert(\n            \"EntryPoint\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::EntryPoint(EntryPointValue::V1CasperVm(\n                public_contract_entry_point,\n            ))\n            .into()])?,\n        );\n\n        let entity = AddressableEntity::new(\n            PackageHash::new([100; 32]),\n            ByteCodeHash::new([101; 32]),\n            ProtocolVersion::V1_0_0,\n            URef::default(),\n            AssociatedKeys::default(),\n            ActionThresholds::default(),\n            EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1),\n        );\n        stored_value.insert(\n            \"AddressableEntity\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::AddressableEntity(entity).into()])?,\n        );\n\n        let mut active_versions = BTreeMap::new();\n        let v1_hash = EntityAddr::SmartContract([99; 32]);\n        let v2_hash = EntityAddr::SmartContract([100; 32]);\n        active_versions.insert(EntityVersionKey::new(1, 2), v1_hash);\n        let v1 = EntityVersionKey::new(1, 1);\n        active_versions.insert(v1, v2_hash);\n        let active_versions = EntityVersions::from(active_versions);\n\n        let mut disabled_versions = BTreeSet::new();\n        disabled_versions.insert(v1);\n\n        let mut groups = Groups::new();\n        groups.insert(Group::new(\"Empty\"), BTreeSet::new());\n        groups.insert(\n            Group::new(\"Single\"),\n            BTreeSet::from_iter(vec![URef::new([55; 32], AccessRights::READ)]),\n        );\n\n        let package = Package::new(\n            active_versions,\n            disabled_versions,\n            groups,\n            PackageStatus::Locked,\n        );\n\n        stored_value.insert(\n            \"SmartContract\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::SmartContract(package).into()])?,\n        );\n\n        stored_value.insert(\n            \"Transfer\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::Transfer(legacy_transfer).into()])?,\n        );\n        stored_value.insert(\n            \"DeployInfo\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::DeployInfo(deploy_info).into()])?,\n        );\n        stored_value.insert(\n            \"EraInfo\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::EraInfo(era_info).into()])?,\n        );\n\n        stored_value.insert(\n            \"Bid\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::Bid(Box::new(original_bid)).into()])?,\n        );\n        stored_value.insert(\n            \"UnifiedBid\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::BidKind(unified_bid_kind).into()])?,\n        );\n        stored_value.insert(\n            \"ValidatorBid\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::BidKind(validator_bid_kind).into()])?,\n        );\n        stored_value.insert(\n            \"DelegatorBid\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::BidKind(delegator_bid_kind).into()])?,\n        );\n        stored_value.insert(\n            \"Withdraw\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::Withdraw(vec![\n                withdraw_purse_1,\n                withdraw_purse_2,\n            ])\n            .into()])?,\n        );\n        stored_value.insert(\n            \"Unbonding\".to_string(),\n            ABITestCase::from_inputs(vec![StoredValue::Unbonding(vec![\n                unbonding_purse_1,\n                unbonding_purse_2,\n            ])\n            .into()])?,\n        );\n\n        Fixture::ABI {\n            name: \"stored_value\".to_string(),\n            fixture: ABIFixture::from(stored_value),\n        }\n    };\n\n    Ok(vec![basic_fixture, stored_value_fixture, keys_fixture])\n}\n"
  },
  {
    "path": "utils/validation/src/lib.rs",
    "content": "//! This crate contains types that contain the logic necessary to validate Casper implementation\n//! correctness using external test fixtures.\n//!\n//! Casper test fixtures can contain multiple directories at the root level, which corresponds to a\n//! test category. For example structure of files found inside `ABI` can differ from files in other\n//! directories.\n//!\n//! Currently supported test fixtures:\n//!\n//! * [ABI](abi)\n\n#[macro_use]\nextern crate derive_more;\n\npub mod abi;\npub mod error;\npub mod test_case;\npub mod utils;\n\nuse std::{\n    ffi::OsStr,\n    fs::{self, File},\n    io::BufReader,\n    path::{Path, PathBuf},\n};\n\nuse serde::de::DeserializeOwned;\n\nuse abi::ABIFixture;\nuse error::Error;\n\npub const ABI_TEST_FIXTURES: &str = \"ABI\";\nconst JSON_FILE_EXT: &str = \"json\";\n\n#[derive(Debug)]\npub enum Fixture {\n    /// ABI fixture.\n    ABI {\n        /// Name of the test fixture (taken from a file name).\n        name: String,\n        /// ABI fixture itself.\n        fixture: ABIFixture,\n    },\n}\n\n/// Loads a generic test fixture from a file with a reader based on a file extension.\n///\n/// Currently only JSON files are supported.\npub fn load_fixture<T: DeserializeOwned>(path: PathBuf) -> Result<T, Error> {\n    let file = File::open(&path)?;\n    let buffered_reader = BufReader::new(file);\n\n    let fixture = match path.extension().and_then(OsStr::to_str) {\n        Some(extension) if extension.to_ascii_lowercase() == JSON_FILE_EXT => {\n            serde_json::from_reader(buffered_reader)?\n        }\n        Some(_) => return Err(Error::UnsupportedFormat(path)),\n        None => return Err(Error::NoExtension(path)),\n    };\n    Ok(fixture)\n}\n\n/// A series of fixtures. One element represents a single structured file.\npub type TestFixtures = Vec<Fixture>;\n\n/// Loads fixtures from a directory.\npub fn load_fixtures(path: &Path) -> Result<TestFixtures, Error> {\n    let mut test_fixtures = TestFixtures::new();\n\n    for entry in fs::read_dir(path)? {\n        let entry = entry?;\n\n        if !entry.metadata()?.is_dir() {\n            continue;\n        }\n\n        let dir_entries = match entry.path().file_name() {\n            Some(file_name) if file_name == ABI_TEST_FIXTURES => {\n                utils::recursive_read_dir(&entry.path())?\n            }\n            None | Some(_) => continue,\n        };\n\n        for dir_entry in dir_entries {\n            let dir_entry_path = dir_entry.path();\n            let fixture = load_fixture(dir_entry_path.clone())?;\n            let filename = dir_entry_path\n                .file_stem()\n                .and_then(OsStr::to_str)\n                .ok_or_else(|| Error::NoStem(dir_entry_path.clone()))?;\n            test_fixtures.push(Fixture::ABI {\n                name: filename.to_string(),\n                fixture,\n            });\n        }\n    }\n    Ok(test_fixtures)\n}\n"
  },
  {
    "path": "utils/validation/src/main.rs",
    "content": "mod generators;\n\nuse std::{fs::File, io::BufWriter, path::PathBuf};\n\nuse anyhow::Context;\nuse clap::Parser;\n\nuse casper_validation::{Fixture, ABI_TEST_FIXTURES};\n\n#[derive(Parser)]\n#[clap(version = \"1.0\")]\nstruct Opts {\n    #[clap(subcommand)]\n    subcmd: SubCommand,\n}\n\n#[derive(Parser)]\nenum SubCommand {\n    Generate(Generate),\n}\n\n/// Generates example test fixtures from the code.\n///\n/// Do not use with day to day development - for example to fix an error in serialization code by\n/// replacing the fixture with possibly invalid code.\n#[derive(Parser)]\nstruct Generate {\n    /// Path to fixtures directory.\n    #[clap(short, long, parse(from_os_str))]\n    output: PathBuf,\n}\n\nimpl Generate {\n    fn run(self) -> anyhow::Result<()> {\n        let fixtures = generators::make_abi_test_fixtures()?;\n\n        for Fixture::ABI {\n            name: file_name,\n            fixture,\n        } in fixtures\n        {\n            let output_path = {\n                let mut output_path = self.output.clone();\n                output_path.push(ABI_TEST_FIXTURES);\n                output_path.push(file_name + \".json\");\n                output_path\n            };\n\n            let file = File::create(&output_path)\n                .context(format!(\"Unable to create output file {:?}\", output_path))?;\n            let buffered_writer = BufWriter::new(file);\n            serde_json::to_writer_pretty(buffered_writer, &fixture)?;\n        }\n\n        Ok(())\n    }\n}\n\nfn main() -> anyhow::Result<()> {\n    let opts: Opts = Opts::parse();\n    match opts.subcmd {\n        SubCommand::Generate(generate) => generate.run(),\n    }\n}\n"
  },
  {
    "path": "utils/validation/src/test_case.rs",
    "content": "use casper_types::bytesrepr;\nuse hex::FromHexError;\nuse thiserror::Error;\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"{0}\")]\n    Bytesrepr(bytesrepr::Error),\n    #[error(\"data mismatch expected {} != actual {}\", base16::encode_lower(&.expected), base16::encode_lower(&.actual))]\n    DataMismatch { expected: Vec<u8>, actual: Vec<u8> },\n    #[error(\"length mismatch expected {expected} != actual {actual}\")]\n    LengthMismatch { expected: usize, actual: usize },\n    #[error(\"expected JSON string in output field\")]\n    WrongOutputType,\n    #[error(\"not a valid hex string\")]\n    Hex(#[from] FromHexError),\n    #[error(transparent)]\n    Json(#[from] serde_json::Error),\n}\n\nimpl From<bytesrepr::Error> for Error {\n    fn from(error: bytesrepr::Error) -> Self {\n        Error::Bytesrepr(error)\n    }\n}\n\npub trait TestCase {\n    fn run_test(&self) -> Result<(), Error>;\n}\n"
  },
  {
    "path": "utils/validation/src/utils.rs",
    "content": "use std::{\n    fs::{self, DirEntry},\n    io,\n    path::Path,\n};\n\n/// Like [`fs::read_dir]` but recursive.\npub fn recursive_read_dir(dir: &Path) -> io::Result<Vec<DirEntry>> {\n    let mut result = Vec::new();\n\n    for entry in fs::read_dir(dir)? {\n        let entry = entry?;\n        let path = entry.path();\n        if path.is_dir() {\n            result.append(&mut recursive_read_dir(&path)?);\n        } else {\n            result.push(entry);\n        }\n    }\n\n    Ok(result)\n}\n"
  },
  {
    "path": "utils/validation/tests/fixtures/ABI/basic.json",
    "content": "{\n  \"SerializeBool\": {\n    \"input\": [\n      {\n        \"type\": \"Bool\",\n        \"value\": true\n      },\n      {\n        \"type\": \"Bool\",\n        \"value\": false\n      }\n    ],\n    \"output\": \"0100\"\n  },\n  \"SerializeEmptyString\": {\n    \"input\": [\n      {\n        \"type\": \"String\",\n        \"value\": \"\"\n      }\n    ],\n    \"output\": \"00000000\"\n  },\n  \"SerializeString\": {\n    \"input\": [\n      {\n        \"type\": \"String\",\n        \"value\": \"Hello, world!\"\n      }\n    ],\n    \"output\": \"0d00000048656c6c6f2c20776f726c6421\"\n  },\n  \"SerializeU16\": {\n    \"input\": [\n      {\n        \"type\": \"U16\",\n        \"value\": 62356\n      }\n    ],\n    \"output\": \"94f3\"\n  },\n  \"SerializeU32\": {\n    \"input\": [\n      {\n        \"type\": \"U32\",\n        \"value\": 3490072870\n      }\n    ],\n    \"output\": \"264906d0\"\n  },\n  \"SerializeU64\": {\n    \"input\": [\n      {\n        \"type\": \"U64\",\n        \"value\": 10829133186225377555\n      }\n    ],\n    \"output\": \"13915bf641cf4896\"\n  },\n  \"SerializeU8\": {\n    \"input\": [\n      {\n        \"type\": \"U8\",\n        \"value\": 254\n      }\n    ],\n    \"output\": \"fe\"\n  }\n}"
  },
  {
    "path": "utils/validation/tests/fixtures/ABI/bignum.json",
    "content": "{\n    \"U512_zero\": {\n        \"input\": [\n            [\n                \"U512\",\n                \"0\"\n            ]\n        ],\n        \"output\": \"00\"\n    },\n    \"U512_one\": {\n        \"input\": [\n            [\n                \"U512\",\n                \"1\"\n            ]\n        ],\n        \"output\": \"0101\"\n    },\n    \"U512_u32_max_value\": {\n        \"input\": [\n            [\n                \"U512\",\n                \"4294967294\"\n            ]\n        ],\n        \"output\": \"04feffffff\"\n    },\n    \"U512_u64_max_value_plus_one\": {\n        \"input\": [\n            [\n                \"U512\",\n                \"18446744073709551616\"\n            ]\n        ],\n        \"output\": \"09000000000000000001\"\n    },\n    \"U512_max\": {\n        \"input\": [\n            [\n                \"U512\",\n                \"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095\"\n            ]\n        ],\n        \"output\": \"40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n    }\n}"
  },
  {
    "path": "utils/validation/tests/fixtures/ABI/clvalue.json",
    "content": "{\n    \"String\": {\n        \"input\": [\n            [\n                \"CLValue\",\n                {\n                    \"cl_type\": \"String\",\n                    \"bytes\": \"0d00000048656c6c6f2c20776f726c6421\",\n                    \"parsed\": \"Hello, world!\"\n                }\n            ]\n        ],\n        \"output\": \"110000000d00000048656c6c6f2c20776f726c64210a\"\n    },\n    \"Map\": {\n        \"input\": [\n            [\n                \"CLValue\",\n                {\n                    \"cl_type\": {\n                        \"Map\": {\n                            \"key\": \"String\",\n                            \"value\": \"U64\"\n                        }\n                    },\n                    \"bytes\": \"020000000300000061626301000000000000000300000078797a0200000000000000\",\n                    \"parsed\": [\n                        {\n                            \"key\": \"abc\",\n                            \"value\": 1\n                        },\n                        {\n                            \"key\": \"xyz\",\n                            \"value\": 2\n                        }\n                    ]\n                }\n            ]\n        ],\n        \"output\": \"22000000020000000300000061626301000000000000000300000078797a0200000000000000110a05\"\n    }\n}"
  },
  {
    "path": "utils/validation/tests/fixtures/ABI/collections.json",
    "content": "{\n    \"VectorOfStrings\": {\n        \"input\": [\n            [\n                \"U32\",\n                3\n            ],\n            [\n                \"String\",\n                \"Hello\"\n            ],\n            [\n                \"String\",\n                \"world\"\n            ],\n            [\n                \"String\",\n                \"!\"\n            ]\n        ],\n        \"output\": \"030000000500000048656c6c6f05000000776f726c640100000021\"\n    }\n}"
  },
  {
    "path": "utils/validation/tests/fixtures/ABI/key.json",
    "content": "{\n  \"Account\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"account-hash-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"Balance\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"balance-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"062a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"BalanceHold\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"balance-hold-002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a0000000000000000\"\n      }\n    ],\n    \"output\": \"16002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a0000000000000000\"\n  },\n  \"ChainspecRegistry\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"chainspec-registry-0000000000000000000000000000000000000000000000000000000000000000\"\n      }\n    ],\n    \"output\": \"0d0000000000000000000000000000000000000000000000000000000000000000\"\n  },\n  \"ChecksumRegistry\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"checksum-registry-0000000000000000000000000000000000000000000000000000000000000000\"\n      }\n    ],\n    \"output\": \"0e0000000000000000000000000000000000000000000000000000000000000000\"\n  },\n  \"DeployInfo\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"deploy-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"042a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"Dictionary\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"dictionary-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"092a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"EraInfo\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"era-42\"\n      }\n    ],\n    \"output\": \"052a00000000000000\"\n  },\n  \"EraSummary\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"era-summary-0000000000000000000000000000000000000000000000000000000000000000\"\n      }\n    ],\n    \"output\": \"0b0000000000000000000000000000000000000000000000000000000000000000\"\n  },\n  \"Hash\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"hash-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"012a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"SystemEntityRegistry\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"system-entity-registry-0000000000000000000000000000000000000000000000000000000000000000\"\n      }\n    ],\n    \"output\": \"0a0000000000000000000000000000000000000000000000000000000000000000\"\n  },\n  \"Transfer\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"transfer-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"032a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"URef\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"uref-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a-001\"\n      }\n    ],\n    \"output\": \"022a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a01\"\n  },\n  \"Unbond\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"unbond-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"0c2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"Withdraw\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"withdraw-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n      }\n    ],\n    \"output\": \"082a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n  },\n  \"WriteBid\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"bid-306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\"\n      }\n    ],\n    \"output\": \"07306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\"\n  },\n  \"WriteDelegatorBid\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"bid-addr-02306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1b27960a454670985e9072683f779602413d6ebf738ba0dc4200534c57de17e12\"\n      }\n    ],\n    \"output\": \"0f02306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1b27960a454670985e9072683f779602413d6ebf738ba0dc4200534c57de17e12\"\n  },\n  \"WriteUnifiedBid\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"bid-addr-00306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\"\n      }\n    ],\n    \"output\": \"0f00306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\"\n  },\n  \"WriteValidatorBid\": {\n    \"input\": [\n      {\n        \"type\": \"Key\",\n        \"value\": \"bid-addr-01306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\"\n      }\n    ],\n    \"output\": \"0f01306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\"\n  }\n}"
  },
  {
    "path": "utils/validation/tests/fixtures/ABI/stored_value.json",
    "content": "{\n  \"Account\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"Account\": {\n            \"account_hash\": \"account-hash-306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\",\n            \"named_keys\": [\n              {\n                \"name\": \"hash\",\n                \"key\": \"hash-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a\"\n              },\n              {\n                \"name\": \"uref\",\n                \"key\": \"uref-1010101010101010101010101010101010101010101010101010101010101010-007\"\n              }\n            ],\n            \"main_purse\": \"uref-1111111111111111111111111111111111111111111111111111111111111111-002\",\n            \"associated_keys\": [\n              {\n                \"account_hash\": \"account-hash-306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1\",\n                \"weight\": 1\n              }\n            ],\n            \"action_thresholds\": {\n              \"deployment\": 1,\n              \"key_management\": 1\n            }\n          }\n        }\n      }\n    ],\n    \"output\": \"01306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1020000000400000068617368012a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a04000000757265660210101010101010101010101010101010101010101010101010101010101010100711111111111111111111111111111111111111111111111111111111111111110201000000306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1010101\"\n  },\n  \"AddressableEntity\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"AddressableEntity\": {\n            \"protocol_version\": \"1.0.0\",\n            \"entity_kind\": {\n              \"SmartContract\": \"VmCasperV1\"\n            },\n            \"package_hash\": \"package-6464646464646464646464646464646464646464646464646464646464646464\",\n            \"byte_code_hash\": \"byte-code-6565656565656565656565656565656565656565656565656565656565656565\",\n            \"main_purse\": \"uref-0000000000000000000000000000000000000000000000000000000000000000-000\",\n            \"associated_keys\": [],\n            \"action_thresholds\": {\n              \"deployment\": 1,\n              \"upgrade_management\": 1,\n              \"key_management\": 1\n            }\n          }\n        }\n      }\n    ],\n    \"output\": \"0d64646464646464646464646464646464646464646464646464646464646464646565656565656565656565656565656565656565656565656565656565656565010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101010200\"\n  },\n  \"Bid\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"Bid\": {\n            \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n            \"bonding_purse\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007\",\n            \"staked_amount\": \"50000000000\",\n            \"delegation_rate\": 100,\n            \"vesting_schedule\": {\n              \"initial_release_timestamp_millis\": 18446744073709551615,\n              \"locked_amounts\": null\n            },\n            \"delegators\": [\n              {\n                \"delegator_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n                \"delegator\": {\n                  \"delegator_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n                  \"staked_amount\": \"1000000000\",\n                  \"bonding_purse\": \"uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-007\",\n                  \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n                  \"vesting_schedule\": {\n                    \"initial_release_timestamp_millis\": 18446744073709551615,\n                    \"locked_amounts\": null\n                  }\n                }\n              }\n            ],\n            \"inactive\": false\n          }\n        }\n      }\n    ],\n    \"output\": \"0801197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070500743ba40b6401ffffffffffffffff00010000000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20400ca9a3b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0701197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101ffffffffffffffff0000\"\n  },\n  \"ByteCode\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"ByteCode\": {\n            \"kind\": \"V1CasperWasm\",\n            \"bytes\": \"0061736d010000000104016000000302010005030100010708010463616c6c00000a040102000b\"\n          }\n        }\n      }\n    ],\n    \"output\": \"0e01270000000061736d010000000104016000000302010005030100010708010463616c6c00000a040102000b\"\n  },\n  \"CLValue\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"CLValue\": {\n            \"cl_type\": \"String\",\n            \"bytes\": \"0d00000048656c6c6f2c20776f726c6421\",\n            \"parsed\": \"Hello, world!\"\n          }\n        }\n      }\n    ],\n    \"output\": \"00110000000d00000048656c6c6f2c20776f726c64210a\"\n  },\n  \"DelegatorBid\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"BidKind\": {\n            \"Delegator\": {\n              \"delegator_kind\": {\n                \"PublicKey\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\"\n              },\n              \"staked_amount\": \"1000000000\",\n              \"bonding_purse\": \"uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-007\",\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"vesting_schedule\": {\n                \"initial_release_timestamp_millis\": 18446744073709551615,\n                \"locked_amounts\": null\n              }\n            }\n          }\n        }\n      }\n    ],\n    \"output\": \"0b02000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20400ca9a3b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0701197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101ffffffffffffffff00\"\n  },\n  \"DeployInfo\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"DeployInfo\": {\n            \"deploy_hash\": \"3737373737373737373737373737373737373737373737373737373737373737\",\n            \"transfers\": [\n              \"transfer-0101010101010101010101010101010101010101010101010101010101010101\",\n              \"transfer-0202020202020202020202020202020202020202020202020202020202020202\"\n            ],\n            \"from\": \"account-hash-6464646464646464646464646464646464646464646464646464646464646464\",\n            \"source\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007\",\n            \"gas\": \"2500000000\"\n          }\n        }\n      }\n    ],\n    \"output\": \"063737373737373737373737373737373737373737373737373737373737373737020000000101010101010101010101010101010101010101010101010101010101010101020202020202020202020202020202020202020202020202020202020202020264646464646464646464646464646464646464646464646464646464646464640a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070400f90295\"\n  },\n  \"EntryPoint\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"EntryPoint\": {\n            \"V1CasperVm\": {\n              \"name\": \"public_entry_point_func\",\n              \"args\": [\n                {\n                  \"name\": \"param1\",\n                  \"cl_type\": \"U512\"\n                },\n                {\n                  \"name\": \"param2\",\n                  \"cl_type\": \"String\"\n                }\n              ],\n              \"ret\": \"Unit\",\n              \"access\": \"Public\",\n              \"entry_point_type\": \"Called\",\n              \"entry_point_payment\": \"Caller\"\n            }\n          }\n        }\n      }\n    ],\n    \"output\": \"1300170000007075626c69635f656e7472795f706f696e745f66756e630200000006000000706172616d310806000000706172616d320a09010100\"\n  },\n  \"EraInfo\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"EraInfo\": {\n            \"seigniorage_allocations\": [\n              {\n                \"Validator\": {\n                  \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n                  \"amount\": \"1000000000\"\n                }\n              },\n              {\n                \"Delegator\": {\n                  \"delegator_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n                  \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n                  \"amount\": \"1000000000\"\n                }\n              },\n              {\n                \"DelegatorKind\": {\n                  \"delegator_kind\": {\n                    \"PublicKey\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\"\n                  },\n                  \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n                  \"amount\": \"1000000000\"\n                }\n              }\n            ]\n          }\n        }\n      }\n    ],\n    \"output\": \"07030000000001197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610400ca9a3b010202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f201197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610400ca9a3b02000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f201197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610400ca9a3b\"\n  },\n  \"SmartContract\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"SmartContract\": {\n            \"versions\": [\n              {\n                \"entity_version_key\": {\n                  \"protocol_version_major\": 1,\n                  \"entity_version\": 1\n                },\n                \"entity_addr\": \"entity-contract-6464646464646464646464646464646464646464646464646464646464646464\"\n              },\n              {\n                \"entity_version_key\": {\n                  \"protocol_version_major\": 1,\n                  \"entity_version\": 2\n                },\n                \"entity_addr\": \"entity-contract-6363636363636363636363636363636363636363636363636363636363636363\"\n              }\n            ],\n            \"disabled_versions\": [\n              {\n                \"protocol_version_major\": 1,\n                \"entity_version\": 1\n              }\n            ],\n            \"groups\": [\n              {\n                \"group_name\": \"Empty\",\n                \"group_users\": []\n              },\n              {\n                \"group_name\": \"Single\",\n                \"group_users\": [\n                  \"uref-3737373737373737373737373737373737373737373737373737373737373737-001\"\n                ]\n              }\n            ],\n            \"lock_status\": \"Locked\"\n          }\n        }\n      }\n    ],\n    \"output\": \"0c02000000010000000100000002646464646464646464646464646464646464646464646464646464646464646401000000020000000263636363636363636363636363636363636363636363636363636363636363630100000001000000010000000200000005000000456d707479000000000600000053696e676c650100000037373737373737373737373737373737373737373737373737373737373737370101\"\n  },\n  \"Transfer\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"Transfer\": {\n            \"deploy_hash\": \"2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c\",\n            \"from\": \"account-hash-6464646464646464646464646464646464646464646464646464646464646464\",\n            \"to\": \"account-hash-6565656565656565656565656565656565656565656565656565656565656565\",\n            \"source\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-002\",\n            \"target\": \"uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-002\",\n            \"amount\": \"15000000000\",\n            \"gas\": \"2500000000\",\n            \"id\": 1\n          }\n        }\n      }\n    ],\n    \"output\": \"052c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c64646464646464646464646464646464646464646464646464646464646464640165656565656565656565656565656565656565656565656565656565656565650a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a020b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b020500d6117e030400f90295010100000000000000\"\n  },\n  \"Unbonding\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"Unbonding\": [\n            {\n              \"bonding_purse\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-001\",\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"unbonder_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"era_of_creation\": 41,\n              \"amount\": \"60000000000\",\n              \"new_validator\": null\n            },\n            {\n              \"bonding_purse\": \"uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-001\",\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"unbonder_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n              \"era_of_creation\": 42,\n              \"amount\": \"50000000000\",\n              \"new_validator\": null\n            }\n          ]\n        }\n      }\n    ],\n    \"output\": \"0a020000000a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61290000000000000005005847f80d000b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f22a000000000000000500743ba40b00\"\n  },\n  \"UnifiedBid\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"BidKind\": {\n            \"Unified\": {\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"bonding_purse\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007\",\n              \"staked_amount\": \"50000000000\",\n              \"delegation_rate\": 100,\n              \"vesting_schedule\": {\n                \"initial_release_timestamp_millis\": 18446744073709551615,\n                \"locked_amounts\": null\n              },\n              \"delegators\": [\n                {\n                  \"delegator_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n                  \"delegator\": {\n                    \"delegator_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n                    \"staked_amount\": \"1000000000\",\n                    \"bonding_purse\": \"uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-007\",\n                    \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n                    \"vesting_schedule\": {\n                      \"initial_release_timestamp_millis\": 18446744073709551615,\n                      \"locked_amounts\": null\n                    }\n                  }\n                }\n              ],\n              \"inactive\": false\n            }\n          }\n        }\n      }\n    ],\n    \"output\": \"0b0001197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070500743ba40b6401ffffffffffffffff00010000000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20400ca9a3b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0701197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101ffffffffffffffff0000\"\n  },\n  \"ValidatorBid\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"BidKind\": {\n            \"Validator\": {\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"bonding_purse\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007\",\n              \"staked_amount\": \"50000000000\",\n              \"delegation_rate\": 100,\n              \"vesting_schedule\": {\n                \"initial_release_timestamp_millis\": 18446744073709551615,\n                \"locked_amounts\": null\n              },\n              \"inactive\": false,\n              \"minimum_delegation_amount\": 0,\n              \"maximum_delegation_amount\": 18446744073709551615,\n              \"reserved_slots\": 0\n            }\n          }\n        }\n      }\n    ],\n    \"output\": \"0b0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070500743ba40b6401ffffffffffffffff00000000000000000000ffffffffffffffff00000000\"\n  },\n  \"Withdraw\": {\n    \"input\": [\n      {\n        \"type\": \"StoredValue\",\n        \"value\": {\n          \"Withdraw\": [\n            {\n              \"bonding_purse\": \"uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-001\",\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"unbonder_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"era_of_creation\": 41,\n              \"amount\": \"60000000000\"\n            },\n            {\n              \"bonding_purse\": \"uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-001\",\n              \"validator_public_key\": \"01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61\",\n              \"unbonder_public_key\": \"0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2\",\n              \"era_of_creation\": 42,\n              \"amount\": \"50000000000\"\n            }\n          ]\n        }\n      }\n    ],\n    \"output\": \"09020000000a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61290000000000000005005847f80d0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f22a000000000000000500743ba40b\"\n  }\n}"
  },
  {
    "path": "utils/validation/tests/validation_test.rs",
    "content": "use std::{\n    env,\n    path::{Path, PathBuf},\n};\n\nuse anyhow::bail;\n\nuse casper_validation::{abi::ABIFixture, error::Error, test_case::TestCase, Fixture};\n\ntype TestPair = (String, Box<dyn TestCase>);\n\nfn get_fixtures_path() -> PathBuf {\n    let mut path = Path::new(env!(\"CARGO_MANIFEST_DIR\")).to_path_buf();\n    path.push(\"tests\");\n    path.push(\"fixtures\");\n    path\n}\n\nfn prog() -> Option<String> {\n    let first_arg = env::args().next()?;\n    let path = Path::new(&first_arg);\n    let filename = path.file_name()?.to_str()?;\n    let prog_name = match filename.split('-').next() {\n        Some(name) => name,\n        None => filename,\n    };\n    Some(prog_name.to_string())\n}\n\nfn make_abi_tests(test_name: &str, test_fixture: ABIFixture) -> Vec<TestPair> {\n    let prog_name = prog().expect(\"should get exe\");\n\n    let mut tests = Vec::with_capacity(test_fixture.len());\n\n    for (test_case, data) in test_fixture.into_inner() {\n        // validation_test::fixture_file_name::test_case\n        let desc = format!(\"{}::{}::{}\", prog_name, test_name, test_case);\n\n        tests.push((desc, Box::new(data) as Box<dyn TestCase>));\n    }\n\n    tests\n}\n\nfn make_test_cases() -> Result<Vec<TestPair>, Error> {\n    let fixtures = get_fixtures_path();\n    let test_fixtures = casper_validation::load_fixtures(&fixtures)?;\n\n    let mut tests = Vec::new();\n\n    for test_fixture in test_fixtures {\n        match test_fixture {\n            Fixture::ABI {\n                name,\n                fixture: abi_test_case,\n            } => tests.append(&mut make_abi_tests(&name, abi_test_case)),\n        }\n    }\n\n    Ok(tests)\n}\n\nfn main() -> anyhow::Result<()> {\n    let mut failed_tests = Vec::new();\n\n    for (name, test_case) in make_test_cases()? {\n        print!(\"{}... \", name);\n        match test_case.run_test() {\n            Ok(()) => println!(\"OK\"),\n            Err(error) => {\n                println!(\"ERROR: {}\", error);\n                failed_tests.push(name);\n            }\n        }\n    }\n\n    if !failed_tests.is_empty() {\n        bail!(\"List of failed tests: {:?}\", failed_tests);\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "vm2-build-contracts.sh",
    "content": "#!/bin/bash\nset -e\n\nVM2_BINS=(\n  \"vm2-harness\"\n  \"vm2-cep18-caller\"\n)\n\nVM2_LIBS=(\n  \"vm2-trait\"\n  \"vm2-cep18\"\n  \"vm2-flipper\"\n  \"vm2-upgradable\"\n  \"vm2-upgradable-v2\"\n  \"vm2-legacy-counter-proxy\"\n  \"vm2-host\"\n)\n\n\nfor contract in \"${VM2_LIBS[@]}\"\ndo\n  pushd smart_contracts/contracts/vm2/$contract/\n  pwd\n  cargo build --target wasm32-unknown-unknown -p $contract --release\n  popd\ndone\n\nfor contract in \"${VM2_BINS[@]}\"\ndo\n  pushd smart_contracts/contracts/vm2/$contract/\n  pwd\n  cargo build --target wasm32-unknown-unknown -p $contract --release\n  popd\ndone\n\necho \"Stripping linked wasm\"\nfor wasm in executor/wasm/*.wasm; do\n  echo \"Stripping $wasm\"\n  wasm-strip $wasm\ndone\n"
  }
]